content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
# remove all the variables in the work space
rm(list= ls())
library(DT)
library(shiny)
#install.packages("readxl")
library(readxl)
#install.packages("dplyr")
library("dplyr")
#install.packages("openxlsx")
library("openxlsx")
#install.packages("reshape2")
library("reshape2")
#install.packages("tidyr")
library("tidyr")
# install.packages("zoo")
options(shiny.port = 8100, shiny.host='0.0.0.0')
ui <- basicPage(
titlePanel(sprintf("MPE Limits aktualisiert am %s", "22/07/2020")),
DT::dataTableOutput("mytable")
)
server <- function(input, output,session) {
setwd("W://Technology//NY-FMMT614-7-55//FT")
df_zero <- reactiveFileReader(5000, session, filePath ="FT_statistic//NY_bin_statistics.xlsx",readFunc = read_excel)
df_limit_zero <- reactiveFileReader(5000, session, filePath ="FT_statistic//MPE_limit_update_limit.xlsx", readFunc = read_excel)
df_comment <- reactiveFileReader(5000, session, filePath ="FT_statistic//NY_bin_comment.xlsx", readFunc = read_excel)
output$mytable = DT::renderDataTable({
##################### Data generation######################
#--------------------- change to categorical variables- lot, sub lot, bin name and bin number
df <- df_zero()
df_limit <- df_limit_zero()
df_comment <- df_comment()
as.data.frame(df_comment)
df$lot <- factor(df$lot)
df$sub_lot <- factor(df$sub_lot)
df$bin_name <- factor(df$bin_name)
df$bin_number <- factor(df$bin_number)
#-------------------- data filtering
df <- df[df$bin_name != "Leer" & df$bin_name != "Kelvin",]
df$lot <- paste(df$lot, "_", df$sub_lot, sep="")
#-------------------- remove meaningless columns
df$Spalte1 <- NULL
df$Spalte2 <- NULL
df$sub_lot <- NULL
#--------------------- group by lot and sublot
df$percentage <- df %>% # %>% is like a pipe
group_by(lot) %>%
summarise(100*count/sum(count))
#-------------------- column bind a dateframe from the old one
df0 <- cbind(df[c("lot", "bin_name", "bin_number","date")], df$percentage %>% .$`100 * count/sum(count)`)
#-------------------- name columns
colnames(df0) <- c("lot", "bin_name", "bin_number", "date", "percentage")
#------------------- drop bin number
df0$bin_name <- NULL
#--------------------------- divide df0 into three part
df1 = df0[1:33,] # 11 test bins
df2 = df0[34:585,] # 12 test bins
df3 = df0[586:nrow(df0),] # 14 test bins
# pivot the dataframes
df1_1 <- spread(data=df1, bin_number, percentage)
df2_1 <- spread(data=df2, bin_number, percentage)
df3_1 <- spread(data=df3, bin_number, percentage)
#------------------------ edit df1_1
dummy <- rep(NA,3) # create dummy column for RTh
df1_1 <- cbind(df1_1, dummy) # add the column to df1_1
colnames(df1_1) <- c("lot","date","FMMT614", "Grenzwert", "Totalausfall", "Blown on Test",
"Offen", "Kurzschluss", "ICBO", "ICES", "IEBO", "VCESAT",
"VBESAT", "RTH") # change the column names
#------------------------ edit df2_1
colnames(df2_1) <- c("lot","date","FMMT614", "Grenzwert", "Totalausfall", "Blown on Test",
"Offen", "Kurzschluss", "ICBO", "ICES", "IEBO", "VCESAT",
"VBESAT", "RTH") # change the column names
#------------------------ edit df3_1
df3_1$"1" <- df3_1$"1" + df3_1$"2" + df3_1$"3"
df3_1$"2" <- NULL
df3_1$"3" <- NULL # delete two columns
colnames(df3_1) <- c("lot","date","FMMT614", "Grenzwert", "Totalausfall", "Blown on Test",
"Offen", "Kurzschluss", "ICBO", "ICES", "IEBO", "VCESAT",
"VBESAT", "RTH") # change the column names
#---------------------- add up df1_1, df2_1 and df3_1
df0_1 <- rbind(df1_1,df2_1, df3_1)
#---------------------- remove GW, TA and blown on test
df0_1$Grenzwert <- NULL
df0_1$Totalausfall <- NULL
df0_1$`Blown on Test` <- NULL
# #--------------------- inserting a column for comment in the report
# library("tibble") # for inserting a column in a dataframe
# dummy3 <- rep(NA, nrow(df0_1))
# df0_1 <- add_column(df0_1, dummy3, .after = "VBESAT")
#--------------------- remove old dataframes
rm(df0,df1,df2,df3,df1_1,df2_1,df3_1,dummy)
# df0_1[!complete.cases(df0_1),]
# -------------------- fill missing values in RTH
df0_1$RTH[1] = mean(df0_1$RTH[complete.cases(df0_1$RTH)])
df0_1$RTH[2] = mean(df0_1$RTH[complete.cases(df0_1$RTH)])
df0_1$RTH[3] = mean(df0_1$RTH[complete.cases(df0_1$RTH)])
#-------------------- change time format and round values
df0_1$date <- strftime(df0_1$date, format="%d/%m/%Y")
df0_1 <- df0_1[-c(188,189,190,191),]
df0_1[c("FMMT614", "Offen", "Kurzschluss",
"ICBO", "ICES", "IEBO", "VCESAT",
"VBESAT", "RTH")] <- round(df0_1[c("FMMT614", "Offen",
"Kurzschluss", "ICBO", "ICES",
"IEBO", "VCESAT", "VBESAT",
"RTH")], 3)
#---------------- set column names
colnames(df0_1) <- c("Charge", "Testdatum", "FMMT614", "Offen", "Kurzschluss",
"ICBO", "ICES", "IEBO", "VCESAT",
"VBESAT", "RTH")
# ---------------- reverse data and divide quarterly
rownames(df0_1) <- NULL
## ------------ combine two columns for comments
df0_1 <- merge(df0_1, df_comment, by="Charge")
df0_1 <- df0_1[235:nrow(df0_1),]
df0_1 <- df0_1[seq(dim(df0_1)[1],1),] # reverse data
#---------------- add dummy columns to the front
df_limit <- cbind(Charge = 0, Testdatum = 0, df_limit)
df0_1 <- rbind(df_limit, df0_1)
df0_1$Charge[1] <- 'MPE Limits'
###################### Data generation done ######################
datatable(df0_1)
datatable(df0_1) %>% formatStyle(
'FMMT614',
backgroundColor = styleInterval(c(df_limit$FMMT614[1])-0.001, c('#d63447', 'white'))
) %>% formatStyle(
'Offen',
backgroundColor = styleInterval(c(df_limit$Offen[1]), c('white', '#d63447'))
) %>% formatStyle(
'Kurzschluss',
backgroundColor = styleInterval(c(df_limit$Kurzschluss[1]), c('white', '#d63447'))
) %>% formatStyle(
'ICBO',
backgroundColor = styleInterval(c(df_limit$ICBO[1]), c('white', '#d63447'))
) %>% formatStyle(
'ICES',
backgroundColor = styleInterval(c(df_limit$ICES[1]), c('white', '#d63447'))
) %>% formatStyle(
'IEBO',
backgroundColor = styleInterval(c(df_limit$IEBO[1]), c('white', '#d63447'))
) %>% formatStyle(
'VCESAT',
backgroundColor = styleInterval(c(df_limit$VCESAT[1]), c('white', '#d63447'))
) %>% formatStyle(
'VBESAT',
backgroundColor = styleInterval(c(df_limit$VBESAT[1]), c('white', '#d63447'))
) %>% formatStyle(
'RTH',
backgroundColor = styleInterval(c(df_limit$RTH[1]), c('white', '#d63447'))
) %>% formatStyle(
'Charge',
backgroundColor = styleEqual(c('MPE Limits'), c('#a2de96'))
) %>% formatStyle(
'Comment_L',
target = 'row',
backgroundColor = styleEqual(1, c('#d63447'))
)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
/Neuer Ordner/Code5/app_4.R
|
no_license
|
decemberkms/Plotly-Dashboards-with-Dash
|
R
| false
| false
| 8,339
|
r
|
# remove all the variables in the work space
rm(list= ls())
library(DT)
library(shiny)
#install.packages("readxl")
library(readxl)
#install.packages("dplyr")
library("dplyr")
#install.packages("openxlsx")
library("openxlsx")
#install.packages("reshape2")
library("reshape2")
#install.packages("tidyr")
library("tidyr")
# install.packages("zoo")
options(shiny.port = 8100, shiny.host='0.0.0.0')
ui <- basicPage(
titlePanel(sprintf("MPE Limits aktualisiert am %s", "22/07/2020")),
DT::dataTableOutput("mytable")
)
server <- function(input, output,session) {
setwd("W://Technology//NY-FMMT614-7-55//FT")
df_zero <- reactiveFileReader(5000, session, filePath ="FT_statistic//NY_bin_statistics.xlsx",readFunc = read_excel)
df_limit_zero <- reactiveFileReader(5000, session, filePath ="FT_statistic//MPE_limit_update_limit.xlsx", readFunc = read_excel)
df_comment <- reactiveFileReader(5000, session, filePath ="FT_statistic//NY_bin_comment.xlsx", readFunc = read_excel)
output$mytable = DT::renderDataTable({
##################### Data generation######################
#--------------------- change to categorical variables- lot, sub lot, bin name and bin number
df <- df_zero()
df_limit <- df_limit_zero()
df_comment <- df_comment()
as.data.frame(df_comment)
df$lot <- factor(df$lot)
df$sub_lot <- factor(df$sub_lot)
df$bin_name <- factor(df$bin_name)
df$bin_number <- factor(df$bin_number)
#-------------------- data filtering
df <- df[df$bin_name != "Leer" & df$bin_name != "Kelvin",]
df$lot <- paste(df$lot, "_", df$sub_lot, sep="")
#-------------------- remove meaningless columns
df$Spalte1 <- NULL
df$Spalte2 <- NULL
df$sub_lot <- NULL
#--------------------- group by lot and sublot
df$percentage <- df %>% # %>% is like a pipe
group_by(lot) %>%
summarise(100*count/sum(count))
#-------------------- column bind a dateframe from the old one
df0 <- cbind(df[c("lot", "bin_name", "bin_number","date")], df$percentage %>% .$`100 * count/sum(count)`)
#-------------------- name columns
colnames(df0) <- c("lot", "bin_name", "bin_number", "date", "percentage")
#------------------- drop bin number
df0$bin_name <- NULL
#--------------------------- divide df0 into three part
df1 = df0[1:33,] # 11 test bins
df2 = df0[34:585,] # 12 test bins
df3 = df0[586:nrow(df0),] # 14 test bins
# pivot the dataframes
df1_1 <- spread(data=df1, bin_number, percentage)
df2_1 <- spread(data=df2, bin_number, percentage)
df3_1 <- spread(data=df3, bin_number, percentage)
#------------------------ edit df1_1
dummy <- rep(NA,3) # create dummy column for RTh
df1_1 <- cbind(df1_1, dummy) # add the column to df1_1
colnames(df1_1) <- c("lot","date","FMMT614", "Grenzwert", "Totalausfall", "Blown on Test",
"Offen", "Kurzschluss", "ICBO", "ICES", "IEBO", "VCESAT",
"VBESAT", "RTH") # change the column names
#------------------------ edit df2_1
colnames(df2_1) <- c("lot","date","FMMT614", "Grenzwert", "Totalausfall", "Blown on Test",
"Offen", "Kurzschluss", "ICBO", "ICES", "IEBO", "VCESAT",
"VBESAT", "RTH") # change the column names
#------------------------ edit df3_1
df3_1$"1" <- df3_1$"1" + df3_1$"2" + df3_1$"3"
df3_1$"2" <- NULL
df3_1$"3" <- NULL # delete two columns
colnames(df3_1) <- c("lot","date","FMMT614", "Grenzwert", "Totalausfall", "Blown on Test",
"Offen", "Kurzschluss", "ICBO", "ICES", "IEBO", "VCESAT",
"VBESAT", "RTH") # change the column names
#---------------------- add up df1_1, df2_1 and df3_1
df0_1 <- rbind(df1_1,df2_1, df3_1)
#---------------------- remove GW, TA and blown on test
df0_1$Grenzwert <- NULL
df0_1$Totalausfall <- NULL
df0_1$`Blown on Test` <- NULL
# #--------------------- inserting a column for comment in the report
# library("tibble") # for inserting a column in a dataframe
# dummy3 <- rep(NA, nrow(df0_1))
# df0_1 <- add_column(df0_1, dummy3, .after = "VBESAT")
#--------------------- remove old dataframes
rm(df0,df1,df2,df3,df1_1,df2_1,df3_1,dummy)
# df0_1[!complete.cases(df0_1),]
# -------------------- fill missing values in RTH
df0_1$RTH[1] = mean(df0_1$RTH[complete.cases(df0_1$RTH)])
df0_1$RTH[2] = mean(df0_1$RTH[complete.cases(df0_1$RTH)])
df0_1$RTH[3] = mean(df0_1$RTH[complete.cases(df0_1$RTH)])
#-------------------- change time format and round values
df0_1$date <- strftime(df0_1$date, format="%d/%m/%Y")
df0_1 <- df0_1[-c(188,189,190,191),]
df0_1[c("FMMT614", "Offen", "Kurzschluss",
"ICBO", "ICES", "IEBO", "VCESAT",
"VBESAT", "RTH")] <- round(df0_1[c("FMMT614", "Offen",
"Kurzschluss", "ICBO", "ICES",
"IEBO", "VCESAT", "VBESAT",
"RTH")], 3)
#---------------- set column names
colnames(df0_1) <- c("Charge", "Testdatum", "FMMT614", "Offen", "Kurzschluss",
"ICBO", "ICES", "IEBO", "VCESAT",
"VBESAT", "RTH")
# ---------------- reverse data and divide quarterly
rownames(df0_1) <- NULL
## ------------ combine two columns for comments
df0_1 <- merge(df0_1, df_comment, by="Charge")
df0_1 <- df0_1[235:nrow(df0_1),]
df0_1 <- df0_1[seq(dim(df0_1)[1],1),] # reverse data
#---------------- add dummy columns to the front
df_limit <- cbind(Charge = 0, Testdatum = 0, df_limit)
df0_1 <- rbind(df_limit, df0_1)
df0_1$Charge[1] <- 'MPE Limits'
###################### Data generation done ######################
datatable(df0_1)
datatable(df0_1) %>% formatStyle(
'FMMT614',
backgroundColor = styleInterval(c(df_limit$FMMT614[1])-0.001, c('#d63447', 'white'))
) %>% formatStyle(
'Offen',
backgroundColor = styleInterval(c(df_limit$Offen[1]), c('white', '#d63447'))
) %>% formatStyle(
'Kurzschluss',
backgroundColor = styleInterval(c(df_limit$Kurzschluss[1]), c('white', '#d63447'))
) %>% formatStyle(
'ICBO',
backgroundColor = styleInterval(c(df_limit$ICBO[1]), c('white', '#d63447'))
) %>% formatStyle(
'ICES',
backgroundColor = styleInterval(c(df_limit$ICES[1]), c('white', '#d63447'))
) %>% formatStyle(
'IEBO',
backgroundColor = styleInterval(c(df_limit$IEBO[1]), c('white', '#d63447'))
) %>% formatStyle(
'VCESAT',
backgroundColor = styleInterval(c(df_limit$VCESAT[1]), c('white', '#d63447'))
) %>% formatStyle(
'VBESAT',
backgroundColor = styleInterval(c(df_limit$VBESAT[1]), c('white', '#d63447'))
) %>% formatStyle(
'RTH',
backgroundColor = styleInterval(c(df_limit$RTH[1]), c('white', '#d63447'))
) %>% formatStyle(
'Charge',
backgroundColor = styleEqual(c('MPE Limits'), c('#a2de96'))
) %>% formatStyle(
'Comment_L',
target = 'row',
backgroundColor = styleEqual(1, c('#d63447'))
)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
##' @name predict_dlm_lnorm
##' @title predict_dlm_lnorm
##' @export
##' @author Mike Dietze and Andrew Tredennick
##' @description make predictions from fit dlm for new data
##' @param fit fit_dlm output list
##' @param newdata array of driver data organized with dimensions [ensemble,time,variable]
##' @param n.iter number of samples
##' @param steps number of steps
##' @param start.time time for start of forecast. Defaults to end of fit
##' @param include which source of uncertainty to include (vector of strings). Options are
##' \itemize{
##' \item{I}{Initial Conditions}
##' \item{P}{Parameters}
##' \item{D}{Drivers}
##' \item{E}{Process Error}
##' }
##' @return list
##' \itemize{
##' \item{predict}{matrix of predictions [ens,time]}
##' \item{index}{data.frame of the parameter (P) and driver (D) indices used for each ensemble member}
##' }
predict_dlm_lnorm <- function(fit,newdata=NULL,n.iter=5000,steps=NULL,start.time=NULL,include=c("I","P","D","E")){
## checks
if(n.iter < 1){
print("n.iter must be > 1")
return(NULL)
}
## set up variables
if(is.null(steps)){
if(is.null(newdata)){
print("either newdata or steps needs to be provided")
return(NULL)
} else {
steps = dim(newdata)[2]
}
}
if(!("D" %in% include)){
my.dims = dim(newdata)
my.dims[1] = 1
my.dimnames <- dimnames(newdata)
my.dimnames[[1]] = 1
newdata <- array(apply(newdata,3,apply,2,mean),
dim=my.dims,dimnames = my.dimnames)
}
params = as.matrix(fit$params)
if(!("P" %in% include)){
params <- as.matrix(apply(params,2,median))
}
if(ncol(params)==1) params <- t(params)
IC = as.matrix(fit$predict)
if(is.null(start.time)){
start.time = ncol(IC)
}
IC = IC[,start.time]
if(!("I" %in% include)){
IC = median(IC)
}
## set up storage
predict = matrix(NA,n.iter,steps)
## sample indices from newdata
index = data.frame(P = sample.int(nrow(params),n.iter,replace=TRUE),
D = sample.int(dim(newdata)[1],n.iter,replace=TRUE),
I = sample.int(length(IC),n.iter,replace=TRUE))
x = IC[index$I]
beta_IC = params[index$P,"beta_IC"]
# beta = params[index$P,paste0("beta",dimnames(newdata)[[3]])]
if("E" %in% include){
tau_add = 1/sqrt(params[index$P,"tau_add"]) ## convert from precision to SD
} else {
tau_add = 0
}
## simulate
for(t in 1:steps){
Z = newdata[index$D,t,]
mu = beta_IC*x #+ apply( Z * beta,1,sum)
x = rnorm(n.iter,mu,tau_add)
predict[,t] = exp(x)
}
## output
return(list(predict=predict,index=index,newdata=newdata))
}
|
/R/predict_dlm_lnorm.R
|
permissive
|
atredennick/ecoforecastR
|
R
| false
| false
| 2,669
|
r
|
##' @name predict_dlm_lnorm
##' @title predict_dlm_lnorm
##' @export
##' @author Mike Dietze and Andrew Tredennick
##' @description make predictions from fit dlm for new data
##' @param fit fit_dlm output list
##' @param newdata array of driver data organized with dimensions [ensemble,time,variable]
##' @param n.iter number of samples
##' @param steps number of steps
##' @param start.time time for start of forecast. Defaults to end of fit
##' @param include which source of uncertainty to include (vector of strings). Options are
##' \itemize{
##' \item{I}{Initial Conditions}
##' \item{P}{Parameters}
##' \item{D}{Drivers}
##' \item{E}{Process Error}
##' }
##' @return list
##' \itemize{
##' \item{predict}{matrix of predictions [ens,time]}
##' \item{index}{data.frame of the parameter (P) and driver (D) indices used for each ensemble member}
##' }
predict_dlm_lnorm <- function(fit,newdata=NULL,n.iter=5000,steps=NULL,start.time=NULL,include=c("I","P","D","E")){
## checks
if(n.iter < 1){
print("n.iter must be > 1")
return(NULL)
}
## set up variables
if(is.null(steps)){
if(is.null(newdata)){
print("either newdata or steps needs to be provided")
return(NULL)
} else {
steps = dim(newdata)[2]
}
}
if(!("D" %in% include)){
my.dims = dim(newdata)
my.dims[1] = 1
my.dimnames <- dimnames(newdata)
my.dimnames[[1]] = 1
newdata <- array(apply(newdata,3,apply,2,mean),
dim=my.dims,dimnames = my.dimnames)
}
params = as.matrix(fit$params)
if(!("P" %in% include)){
params <- as.matrix(apply(params,2,median))
}
if(ncol(params)==1) params <- t(params)
IC = as.matrix(fit$predict)
if(is.null(start.time)){
start.time = ncol(IC)
}
IC = IC[,start.time]
if(!("I" %in% include)){
IC = median(IC)
}
## set up storage
predict = matrix(NA,n.iter,steps)
## sample indices from newdata
index = data.frame(P = sample.int(nrow(params),n.iter,replace=TRUE),
D = sample.int(dim(newdata)[1],n.iter,replace=TRUE),
I = sample.int(length(IC),n.iter,replace=TRUE))
x = IC[index$I]
beta_IC = params[index$P,"beta_IC"]
# beta = params[index$P,paste0("beta",dimnames(newdata)[[3]])]
if("E" %in% include){
tau_add = 1/sqrt(params[index$P,"tau_add"]) ## convert from precision to SD
} else {
tau_add = 0
}
## simulate
for(t in 1:steps){
Z = newdata[index$D,t,]
mu = beta_IC*x #+ apply( Z * beta,1,sum)
x = rnorm(n.iter,mu,tau_add)
predict[,t] = exp(x)
}
## output
return(list(predict=predict,index=index,newdata=newdata))
}
|
# Copyright (c) 2014 Clear Channel Broadcasting, Inc.
# https://github.com/iheartradio/ShinyBuilder
# Licensed under the MIT License (MIT)
#' Update All Dashboards
#'
#' @param dashboards a vector of dashboard names. By default, all dashboards in the dashboards directory are updated
#' @export
#' @examples
#' \dontrun{
#' #All Dashboards
#' updateDashboards()
#' #Selected dashboards
#' updeateDashboards(c('dashboard_1', 'dashboard_2'))
#' }
updateDashboards <- function(dashboards = NULL){
sbd_path <- system.file('dashboards', package = 'ShinyBuilder')
#Check/set permissions
Sys.chmod(sbd_path, mode = "0755")
if(is.null(dashboards))
dashboards <- list.files(path = sbd_path, full.names = T)
db_list <- dbListInit()
print(db_list)
for (dashboard_file in dashboards){
#Load current dashboard
load(dashboard_file)
print(paste0('Updating: ', dashboard_file))
#Update chart data
for (i in 1:length(dashboard_state)){
if(grepl('gItemPlot', dashboard_state[[i]]$id)){
input_query <- dashboard_state[[i]]$query
db_obj <- db_list[[dashboard_state[[i]]$db_name]]
tryCatch(
{
dashboard_state[[i]]$data <- do.call(db_obj$query_fn, list(db_obj$db, input_query))
},
error=function(cond) {
message("Dashboard threw an error updating:")
message(cond)
}
warnng=function(cond) {
message("Dashboard threw a warning updating:")
message(cond)
}
)
#dashboard_state[[i]]$data <- do.call(db_obj$query_fn, list(db_obj$db, input_query))
}
}
#Save current dashboard
save(dashboard_state, file = dashboard_file)
}
}
|
/R/updateDashboards.R
|
permissive
|
sshivaji/ShinyBuilder
|
R
| false
| false
| 1,766
|
r
|
# Copyright (c) 2014 Clear Channel Broadcasting, Inc.
# https://github.com/iheartradio/ShinyBuilder
# Licensed under the MIT License (MIT)
#' Update All Dashboards
#'
#' @param dashboards a vector of dashboard names. By default, all dashboards in the dashboards directory are updated
#' @export
#' @examples
#' \dontrun{
#' #All Dashboards
#' updateDashboards()
#' #Selected dashboards
#' updeateDashboards(c('dashboard_1', 'dashboard_2'))
#' }
updateDashboards <- function(dashboards = NULL){
sbd_path <- system.file('dashboards', package = 'ShinyBuilder')
#Check/set permissions
Sys.chmod(sbd_path, mode = "0755")
if(is.null(dashboards))
dashboards <- list.files(path = sbd_path, full.names = T)
db_list <- dbListInit()
print(db_list)
for (dashboard_file in dashboards){
#Load current dashboard
load(dashboard_file)
print(paste0('Updating: ', dashboard_file))
#Update chart data
for (i in 1:length(dashboard_state)){
if(grepl('gItemPlot', dashboard_state[[i]]$id)){
input_query <- dashboard_state[[i]]$query
db_obj <- db_list[[dashboard_state[[i]]$db_name]]
tryCatch(
{
dashboard_state[[i]]$data <- do.call(db_obj$query_fn, list(db_obj$db, input_query))
},
error=function(cond) {
message("Dashboard threw an error updating:")
message(cond)
}
warnng=function(cond) {
message("Dashboard threw a warning updating:")
message(cond)
}
)
#dashboard_state[[i]]$data <- do.call(db_obj$query_fn, list(db_obj$db, input_query))
}
}
#Save current dashboard
save(dashboard_state, file = dashboard_file)
}
}
|
context("test-rx_uppercase")
test_that("uppercase works", {
# expect match
expect_true(grepl(rx_uppercase(), "A"))
# dont expect match
expect_false(grepl(rx_uppercase(), "a"))
expect_false(grepl(rx_uppercase(), "!"))
# expect pipe functionality
expect_equal(rx() %>% rx_start_of_line() %>% rx_uppercase(), "^[A-Z]")
# expect inverse
expect_true(grepl(rx_uppercase(inverse = TRUE), "abc"))
expect_true(grepl(rx_uppercase(inverse = TRUE), "!"))
expect_false(grepl(rx_uppercase(inverse = TRUE), "ABC"))
# expect error if not TRUE/FALSE for inverse arg
expect_error(rx_uppercase(inverse = "x"))
})
|
/tests/testthat/test-uppercase.R
|
permissive
|
FcoCarlosBarbosaMartins/RVerbalExpressions
|
R
| false
| false
| 627
|
r
|
context("test-rx_uppercase")
test_that("uppercase works", {
# expect match
expect_true(grepl(rx_uppercase(), "A"))
# dont expect match
expect_false(grepl(rx_uppercase(), "a"))
expect_false(grepl(rx_uppercase(), "!"))
# expect pipe functionality
expect_equal(rx() %>% rx_start_of_line() %>% rx_uppercase(), "^[A-Z]")
# expect inverse
expect_true(grepl(rx_uppercase(inverse = TRUE), "abc"))
expect_true(grepl(rx_uppercase(inverse = TRUE), "!"))
expect_false(grepl(rx_uppercase(inverse = TRUE), "ABC"))
# expect error if not TRUE/FALSE for inverse arg
expect_error(rx_uppercase(inverse = "x"))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iam.R
\name{iam_get_user}
\alias{iam_get_user}
\title{Retrieves information about the specified IAM user, including the user's creation date, path, unique ID, and ARN}
\usage{
iam_get_user(...)
}
\arguments{
\item{...}{optional extra arguments passed}
}
\value{
list
}
\description{
Retrieves information about the specified IAM user, including the user's creation date, path, unique ID, and ARN
}
\references{
\url{https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.get_user}
}
|
/man/iam_get_user.Rd
|
no_license
|
cran/botor
|
R
| false
| true
| 601
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iam.R
\name{iam_get_user}
\alias{iam_get_user}
\title{Retrieves information about the specified IAM user, including the user's creation date, path, unique ID, and ARN}
\usage{
iam_get_user(...)
}
\arguments{
\item{...}{optional extra arguments passed}
}
\value{
list
}
\description{
Retrieves information about the specified IAM user, including the user's creation date, path, unique ID, and ARN
}
\references{
\url{https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.get_user}
}
|
library("DESeq2")
library("pheatmap")
library("RColorBrewer")
library("ggplot2")
library("genefilter")
workingDir = "/data/atimms/kim_rett_rnaseq_0119";
setwd(workingDir);
###all samples
##read in count and metadata
countData1 <- read.table('kim_rett_rnaseq_0119.star_fc.counts.txt', header=T, row.names=1)
colData1 <- read.table('kim_rett_rnaseq_0119_bias.star_fc.metadata.txt', header=T, row.names=1)
head(countData1)
head(colData1)
##add to deseq, give countdata and metadata and then design information i.e. info on sample types
#dds <- DESeqDataSetFromMatrix(countData = countData1, colData = colData1, ~region + dx + region_dx)
#dds <- DESeqDataSetFromMatrix(countData = countData1, colData = colData1, ~bias + region_dx)
dds <- DESeqDataSetFromMatrix(countData = countData1, colData = colData1, ~bias_3 + region + dx)
dds
##remove rows of the DESeqDataSet that have no counts, or only a single count across all samples
nrow(dds)
dds <- dds[ rowSums(counts(dds)) > 4, ]
nrow(dds)
##write normalized counts
dds_norm <- estimateSizeFactors(dds)
count_data <- counts(dds_norm, normalized=TRUE)
#write.csv(count_data, file="kim_rett_rnaseq_0119_bias.norm_counts.csv")
##vst transform data -- new version
rld <- vst(dds, blind=FALSE)
##check
head(assay(rld), 3)
head(assay(dds),3)
##and write to csv file
#write.csv(assay(rld), file="kim_rett_rnaseq_0119.deseq.vst_counts.csv")
##calculate sample distances from rlog
sampleDists <- dist( t( assay(rld) ) )
sampleDists
##and plot as heat map
sampleDistMatrix <- as.matrix( sampleDists )
rownames(sampleDistMatrix) <- paste( rld$region, rld$dx, rld$bias_3 , sep="-" )
colnames(sampleDistMatrix) <- NULL
colors <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255)
pheatmap(sampleDistMatrix,
clustering_distance_rows=sampleDists,
clustering_distance_cols=sampleDists,
col=colors)
dev.copy2pdf(file='kim_rett_rnaseq_0119_bias.sample_heatmap.pdf', width = 7, height = 5)
##principal components analysis
plotPCA(rld, intgroup = c("region"))
ggsave('kim_rett_rnaseq_0119_bias.region_pca.pdf', width=6, height = 6)
plotPCA(rld, intgroup = c("dx"))
ggsave('kim_rett_rnaseq_0119_bias.dx_pca.pdf', width=6, height = 6)
plotPCA(rld, intgroup = c("bias_3"))
ggsave('kim_rett_rnaseq_0119_bias.bias_pca.pdf', width=6, height = 6)
##get just data
pcaData_bias <- plotPCA(rld, intgroup = c("bias_3"), returnData = TRUE)
pcaData_bias
write.csv(pcaData_bias,file='kim_rett_rnaseq_0119_bias.bias_pca_data.csv')
plotPCA(rld, intgroup = c("sample_name"))
ggsave('kim_rett_rnaseq_0119_bias.sample_pca.pdf', width=6, height = 6)
##gene clustering
##take 25 most variable gene
topVarGenes <- head(order(rowVars(assay(rld)),decreasing=TRUE),25)
mat <- assay(rld)[ topVarGenes, ]
mat <- mat - rowMeans(mat)
newdf <- as.data.frame(colData(rld)[c("region", "dx", "bias_3")])
pheatmap(mat, annotation_col=newdf, fontsize_row=8, fontsize_col=8)
dev.copy2pdf(file='kim_rett_rnaseq_0119_bias.25_var_gene_clustering.pdf', width = 7, height = 5)
##differential expression --- just rett vs control
##do the test
dds <- DESeq(dds)
##get results and summary
##this just gets the last test
(res <- results(dds))
##to get a specific test:
res2 <- results(dds, contrast=c("dx", "Rett", "control"))
##get summary
summary(res2) #lots of significant genes
##save differentially expression results
##sort results by adjusted p-value
resOrdered2 <- res2[order(res2$padj),]
head(resOrdered2)
##save results as dataframe and take top 20k results, then write csv file
resOrdered2DF <- as.data.frame(resOrdered2)[1:19623,]
write.csv(resOrdered2DF, file="kim_rett_rnaseq_0119_bias.Rett_vs_control.csv")
#redo, so splitting the data into region and dx
dds <- DESeqDataSetFromMatrix(countData = countData1, colData = colData1, ~bias_3 + region_dx)
dds
nrow(dds)
dds <- dds[ rowSums(counts(dds)) > 4, ]
nrow(dds)
##differential expression
##do the test
dds <- DESeq(dds)
##to get a specific test:
res2 <- results(dds, contrast=c("region_dx", "temporal_Rett", "temporal_control"))
##get summary
summary(res2) #lots of significant genes
##save differentially expression results
##sort results by adjusted p-value
resOrdered2 <- res2[order(res2$padj),]
head(resOrdered2)
##save results as dataframe and take top 20k results, then write csv file
resOrdered2DF <- as.data.frame(resOrdered2)[1:19623,]
write.csv(resOrdered2DF, file="kim_rett_rnaseq_0119_bias.temporal.Rett_vs_control.csv")
res2 <- results(dds, contrast=c("region_dx", "cingulate_Rett", "cingulate_control"))
##get summary
summary(res2) #lots of significant genes
##save differentially expression results
##sort results by adjusted p-value
resOrdered2 <- res2[order(res2$padj),]
head(resOrdered2)
##save results as dataframe and take top 20k results, then write csv file
resOrdered2DF <- as.data.frame(resOrdered2)[1:19623,]
write.csv(resOrdered2DF, file="kim_rett_rnaseq_0119_bias.cingulate.Rett_vs_control.csv")
###for a slightly different gtf which differtiates
##read in count and metadata
countData1 <- read.table('kim_rett_rnaseq_mecp1_0319.star_fc.counts.txt', header=T, row.names=1)
colData1 <- read.table('kim_rett_rnaseq_mecp1_0319_bias.star_fc.metadata.txt', header=T, row.names=1)
head(countData1)
head(colData1)
##add to deseq, give countdata and metadata and then design information i.e. info on sample types
dds <- DESeqDataSetFromMatrix(countData = countData1, colData = colData1, ~ bias_3 + region + dx)
dds
##remove rows of the DESeqDataSet that have no counts, or only a single count across all samples
nrow(dds)
dds <- dds[ rowSums(counts(dds)) > 4, ]
nrow(dds)
##write normalized counts
dds_norm <- estimateSizeFactors(dds)
count_data <- counts(dds_norm, normalized=TRUE)
write.csv(count_data, file="kim_rett_rnaseq_mecp1_0319_bias.norm_counts.csv")
##differential expression --- just rett vs control
##do the test
dds <- DESeq(dds)
##get results and summary
##this just gets the last test
(res <- results(dds))
##to get a specific test:
res2 <- results(dds, contrast=c("dx", "Rett", "control"))
##get summary
summary(res2) #lots of significant genes
##save differentially expression results
##sort results by adjusted p-value
resOrdered2 <- res2[order(res2$padj),]
head(resOrdered2)
##save results as dataframe and take top 20k results, then write csv file
resOrdered2DF <- as.data.frame(resOrdered2)[1:20413,]
write.csv(resOrdered2DF, file="kim_rett_rnaseq_mecp1_0319_bias.Rett_vs_control.csv")
|
/r_scripts/kim_rett_rnaseq_0220.R
|
no_license
|
atimms/ratchet_scripts
|
R
| false
| false
| 6,414
|
r
|
library("DESeq2")
library("pheatmap")
library("RColorBrewer")
library("ggplot2")
library("genefilter")
workingDir = "/data/atimms/kim_rett_rnaseq_0119";
setwd(workingDir);
###all samples
##read in count and metadata
countData1 <- read.table('kim_rett_rnaseq_0119.star_fc.counts.txt', header=T, row.names=1)
colData1 <- read.table('kim_rett_rnaseq_0119_bias.star_fc.metadata.txt', header=T, row.names=1)
head(countData1)
head(colData1)
##add to deseq, give countdata and metadata and then design information i.e. info on sample types
#dds <- DESeqDataSetFromMatrix(countData = countData1, colData = colData1, ~region + dx + region_dx)
#dds <- DESeqDataSetFromMatrix(countData = countData1, colData = colData1, ~bias + region_dx)
dds <- DESeqDataSetFromMatrix(countData = countData1, colData = colData1, ~bias_3 + region + dx)
dds
##remove rows of the DESeqDataSet that have no counts, or only a single count across all samples
nrow(dds)
dds <- dds[ rowSums(counts(dds)) > 4, ]
nrow(dds)
##write normalized counts
dds_norm <- estimateSizeFactors(dds)
count_data <- counts(dds_norm, normalized=TRUE)
#write.csv(count_data, file="kim_rett_rnaseq_0119_bias.norm_counts.csv")
##vst transform data -- new version
rld <- vst(dds, blind=FALSE)
##check
head(assay(rld), 3)
head(assay(dds),3)
##and write to csv file
#write.csv(assay(rld), file="kim_rett_rnaseq_0119.deseq.vst_counts.csv")
##calculate sample distances from rlog
sampleDists <- dist( t( assay(rld) ) )
sampleDists
##and plot as heat map
sampleDistMatrix <- as.matrix( sampleDists )
rownames(sampleDistMatrix) <- paste( rld$region, rld$dx, rld$bias_3 , sep="-" )
colnames(sampleDistMatrix) <- NULL
colors <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255)
pheatmap(sampleDistMatrix,
clustering_distance_rows=sampleDists,
clustering_distance_cols=sampleDists,
col=colors)
dev.copy2pdf(file='kim_rett_rnaseq_0119_bias.sample_heatmap.pdf', width = 7, height = 5)
##principal components analysis
plotPCA(rld, intgroup = c("region"))
ggsave('kim_rett_rnaseq_0119_bias.region_pca.pdf', width=6, height = 6)
plotPCA(rld, intgroup = c("dx"))
ggsave('kim_rett_rnaseq_0119_bias.dx_pca.pdf', width=6, height = 6)
plotPCA(rld, intgroup = c("bias_3"))
ggsave('kim_rett_rnaseq_0119_bias.bias_pca.pdf', width=6, height = 6)
##get just data
pcaData_bias <- plotPCA(rld, intgroup = c("bias_3"), returnData = TRUE)
pcaData_bias
write.csv(pcaData_bias,file='kim_rett_rnaseq_0119_bias.bias_pca_data.csv')
plotPCA(rld, intgroup = c("sample_name"))
ggsave('kim_rett_rnaseq_0119_bias.sample_pca.pdf', width=6, height = 6)
##gene clustering
##take 25 most variable gene
topVarGenes <- head(order(rowVars(assay(rld)),decreasing=TRUE),25)
mat <- assay(rld)[ topVarGenes, ]
mat <- mat - rowMeans(mat)
newdf <- as.data.frame(colData(rld)[c("region", "dx", "bias_3")])
pheatmap(mat, annotation_col=newdf, fontsize_row=8, fontsize_col=8)
dev.copy2pdf(file='kim_rett_rnaseq_0119_bias.25_var_gene_clustering.pdf', width = 7, height = 5)
##differential expression --- just rett vs control
##do the test
dds <- DESeq(dds)
##get results and summary
##this just gets the last test
(res <- results(dds))
##to get a specific test:
res2 <- results(dds, contrast=c("dx", "Rett", "control"))
##get summary
summary(res2) #lots of significant genes
##save differentially expression results
##sort results by adjusted p-value
resOrdered2 <- res2[order(res2$padj),]
head(resOrdered2)
##save results as dataframe and take top 20k results, then write csv file
resOrdered2DF <- as.data.frame(resOrdered2)[1:19623,]
write.csv(resOrdered2DF, file="kim_rett_rnaseq_0119_bias.Rett_vs_control.csv")
#redo, so splitting the data into region and dx
dds <- DESeqDataSetFromMatrix(countData = countData1, colData = colData1, ~bias_3 + region_dx)
dds
nrow(dds)
dds <- dds[ rowSums(counts(dds)) > 4, ]
nrow(dds)
##differential expression
##do the test
dds <- DESeq(dds)
##to get a specific test:
res2 <- results(dds, contrast=c("region_dx", "temporal_Rett", "temporal_control"))
##get summary
summary(res2) #lots of significant genes
##save differentially expression results
##sort results by adjusted p-value
resOrdered2 <- res2[order(res2$padj),]
head(resOrdered2)
##save results as dataframe and take top 20k results, then write csv file
resOrdered2DF <- as.data.frame(resOrdered2)[1:19623,]
write.csv(resOrdered2DF, file="kim_rett_rnaseq_0119_bias.temporal.Rett_vs_control.csv")
res2 <- results(dds, contrast=c("region_dx", "cingulate_Rett", "cingulate_control"))
##get summary
summary(res2) #lots of significant genes
##save differentially expression results
##sort results by adjusted p-value
resOrdered2 <- res2[order(res2$padj),]
head(resOrdered2)
##save results as dataframe and take top 20k results, then write csv file
resOrdered2DF <- as.data.frame(resOrdered2)[1:19623,]
write.csv(resOrdered2DF, file="kim_rett_rnaseq_0119_bias.cingulate.Rett_vs_control.csv")
###for a slightly different gtf which differtiates
##read in count and metadata
countData1 <- read.table('kim_rett_rnaseq_mecp1_0319.star_fc.counts.txt', header=T, row.names=1)
colData1 <- read.table('kim_rett_rnaseq_mecp1_0319_bias.star_fc.metadata.txt', header=T, row.names=1)
head(countData1)
head(colData1)
##add to deseq, give countdata and metadata and then design information i.e. info on sample types
dds <- DESeqDataSetFromMatrix(countData = countData1, colData = colData1, ~ bias_3 + region + dx)
dds
##remove rows of the DESeqDataSet that have no counts, or only a single count across all samples
nrow(dds)
dds <- dds[ rowSums(counts(dds)) > 4, ]
nrow(dds)
##write normalized counts
dds_norm <- estimateSizeFactors(dds)
count_data <- counts(dds_norm, normalized=TRUE)
write.csv(count_data, file="kim_rett_rnaseq_mecp1_0319_bias.norm_counts.csv")
##differential expression --- just rett vs control
##do the test
dds <- DESeq(dds)
##get results and summary
##this just gets the last test
(res <- results(dds))
##to get a specific test:
res2 <- results(dds, contrast=c("dx", "Rett", "control"))
##get summary
summary(res2) #lots of significant genes
##save differentially expression results
##sort results by adjusted p-value
resOrdered2 <- res2[order(res2$padj),]
head(resOrdered2)
##save results as dataframe and take top 20k results, then write csv file
resOrdered2DF <- as.data.frame(resOrdered2)[1:20413,]
write.csv(resOrdered2DF, file="kim_rett_rnaseq_mecp1_0319_bias.Rett_vs_control.csv")
|
changePropertyInput <- function(newInputs, groupVar, sequenceFileXML, sequenceXML, sequenceXMLpath, newSequenceFileXML,
newsequenceXMLpath) {
# Replaces the argument with newArg and saves file
require(xml2)
# seclude groups, properties and args
groupName <- unlist(newInputs['Groupnames'])
propertyName <- unlist(newInputs['Propertynames'])
newArg <- unlist(newInputs['Newargs'])
#check for errors
checkErrors(groupVar, propertyName, groupName, newArg, sequenceXML, newSequenceFileXML)
# replace the sequence name with the new sequence name
if (sequenceFileXML != newSequenceFileXML) {
changeSeqName(sequenceXML, newSequenceFileXML)
}
# replace the old argument with the new argument
changeArg(sequenceXML, groupName, propertyName, newArg)
# replace 'numberParallelThreads
changeNParThre(sequenceXML)
}
changeNParThre <- function(sequenceXML) {
# changes numberparallelthreads to the product of args in different groups.
require(xml2)
seqPrefsPath <- '/DartFile/DartSequencerDescriptor/DartSequencerPreferences'
seqPrefs <- xml2::xml_find_all(sequenceXML, seqPrefsPath)
nParThre <- numberParallelThreads(sequenceXML)
xml2::xml_attr(seqPrefs, 'numberParallelThreads') <- nParThre
}
changeArg <- function(sequenceXML, groupName, propertyName, newArg) {
#alters the arg to new specified arg
require(xml2)
groupIndex <- getGroupIndex(sequenceXML, groupName)
groupPath <- '/DartFile/DartSequencerDescriptor/DartSequencerDescriptorEntries/DartSequencerDescriptorGroup'
groups <- xml2::xml_find_all(sequenceXML, groupPath)
groupNo <- groups[groupIndex]
propertyPath <- 'DartSequencerDescriptorEntry'
properties <- xml2::xml_find_all(groupNo, propertyPath)
propertyIndex <- which(xml2::xml_attr(properties, 'propertyName') == as.character(propertyName))
propertyNo <- properties[propertyIndex]
xml2::xml_attr(propertyNo, 'args') <- newArg
}
changeSeqName <- function(sequenceXML, newSequenceFileXML) {
# changes the sequence name to the name defined in newPath
require(xml2)
require(stringr)
seqDescPath <- '/DartFile/DartSequencerDescriptor'
seqDesc <- xml2::xml_find_all(sequenceXML, seqDescPath)
nf <- stringr::str_remove(newSequenceFileXML, '.xml')
newSeqName <- paste0('sequence;;', nf)
xml2::xml_attr(seqDesc, 'sequenceName') <- newSeqName
}
|
/R/changePropInputs.R
|
no_license
|
kitbenjamin/daRtInput
|
R
| false
| false
| 2,371
|
r
|
changePropertyInput <- function(newInputs, groupVar, sequenceFileXML, sequenceXML, sequenceXMLpath, newSequenceFileXML,
newsequenceXMLpath) {
# Replaces the argument with newArg and saves file
require(xml2)
# seclude groups, properties and args
groupName <- unlist(newInputs['Groupnames'])
propertyName <- unlist(newInputs['Propertynames'])
newArg <- unlist(newInputs['Newargs'])
#check for errors
checkErrors(groupVar, propertyName, groupName, newArg, sequenceXML, newSequenceFileXML)
# replace the sequence name with the new sequence name
if (sequenceFileXML != newSequenceFileXML) {
changeSeqName(sequenceXML, newSequenceFileXML)
}
# replace the old argument with the new argument
changeArg(sequenceXML, groupName, propertyName, newArg)
# replace 'numberParallelThreads
changeNParThre(sequenceXML)
}
changeNParThre <- function(sequenceXML) {
# changes numberparallelthreads to the product of args in different groups.
require(xml2)
seqPrefsPath <- '/DartFile/DartSequencerDescriptor/DartSequencerPreferences'
seqPrefs <- xml2::xml_find_all(sequenceXML, seqPrefsPath)
nParThre <- numberParallelThreads(sequenceXML)
xml2::xml_attr(seqPrefs, 'numberParallelThreads') <- nParThre
}
changeArg <- function(sequenceXML, groupName, propertyName, newArg) {
#alters the arg to new specified arg
require(xml2)
groupIndex <- getGroupIndex(sequenceXML, groupName)
groupPath <- '/DartFile/DartSequencerDescriptor/DartSequencerDescriptorEntries/DartSequencerDescriptorGroup'
groups <- xml2::xml_find_all(sequenceXML, groupPath)
groupNo <- groups[groupIndex]
propertyPath <- 'DartSequencerDescriptorEntry'
properties <- xml2::xml_find_all(groupNo, propertyPath)
propertyIndex <- which(xml2::xml_attr(properties, 'propertyName') == as.character(propertyName))
propertyNo <- properties[propertyIndex]
xml2::xml_attr(propertyNo, 'args') <- newArg
}
changeSeqName <- function(sequenceXML, newSequenceFileXML) {
# changes the sequence name to the name defined in newPath
require(xml2)
require(stringr)
seqDescPath <- '/DartFile/DartSequencerDescriptor'
seqDesc <- xml2::xml_find_all(sequenceXML, seqDescPath)
nf <- stringr::str_remove(newSequenceFileXML, '.xml')
newSeqName <- paste0('sequence;;', nf)
xml2::xml_attr(seqDesc, 'sequenceName') <- newSeqName
}
|
.lpp <- function(x, a, b, u, d, e) {
# the exponential term
f.exp <- exp((x + d * log(e) - u) / d)
# first part
f1 <- (b/u) * (1 + f.exp)^((-e - 1) / e)
# second part
f2 <- f.exp * (e + 1)^((e+1) / e)
# combine pieces
res <- a + f1 * f2
return(res)
}
#' @title Random Profile
#'
#' @description Generate a random soil profile according to set criteria, with correlated
#' depth trends.
#'
#' The random walk method produces profiles with considerable variation between
#' horizons and is based on values from the normal distribution seeded with
#' means and standard deviations drawn from the uniform distribution of \[0,
#' 10].
#'
#' The logistic power peak (LPP) function can be used to generate random soil
#' property depth functions that are sharply peaked. LPP parameters can be
#' hard-coded using the optional arguments: "lpp.a", "lpp.b", "lpp.u", "lpp.d",
#' "lpp.e". Amplitude of the peak is controlled by ("lpp.a + "lpp.b"), depth of
#' the peak by "lpp.u", and abruptness by "lpp.d" and "lpp.e". Further
#' description of the method is outlined in (Brenton et al, 2011). Simulated
#' horizon distinctness codes are based on the USDA-NCSS field description
#' methods.
#' Simulated distinctness codes are constrained according to horizon thickness,
#' i.e. a gradual boundary (+/- 5cm) will not be simulated for horizons that
#' are thinner than 3x this vertical distance
#'
#' @aliases random_profile .lpp
#' @param id a character or numeric id used for this profile
#' @param n vector of possible number of horizons, or the exact number of
#' horizons (see below)
#' @param min_thick minimum thickness criteria for a simulated horizon
#' @param max_thick maximum thickness criteria for a simulated horizon
#' @param n_prop number of simulated soil properties (columns in the returned
#' dataframe)
#' @param exact should the exact number of requested horizons be generated?
#' (defaults to FALSE)
#' @param method named method used to synthesize depth function ('random_walk'
#' or 'LPP'), see details
#' @param HzDistinctSim optionally simulate horizon boundary distinctness codes
#' @param SPC result is a \code{SoilProfileCollection} object, otherwise a
#' \code{data.frame} object
#' @param \dots additional parameters passed-in to the LPP (\code{.lpp})
#' function
#' @return A \code{data.frame} or \code{SoilProfileCollection} object.
#' @note See examples for ideas on simulating several profiles at once.
#' @author Dylan E. Beaudette
#' @seealso \code{\link{profile_compare}, \link{hzDistinctnessCodeToOffset}}
#' @references Myers, D. B.; Kitchen, N. R.; Sudduth, K. A.; Miles, R. J.;
#' Sadler, E. J. & Grunwald, S. Peak functions for modeling high resolution
#' soil profile data Geoderma, 2011, 166, 74-83.
#' @keywords manip
#' @export
#' @examples
#'
#'
#' # generate 10 random profiles, result is a list of SoilProfileCollection objects
#' d <- lapply(1:10, random_profile, SPC=TRUE)
#'
#' # combine
#' d <- combine(d)
#'
#' # plot
#' opar <- par(mar=c(0,0,3,2))
#' plotSPC(d, color='p1', name='name', cex.names=0.75)
#' par(opar)
#'
#' # simulate horizon boundary distinctness codes:
#' d <- lapply(1:10, random_profile, SPC=TRUE, HzDistinctSim=TRUE)
#' d <- combine(d)
#'
#' d$HzD <- hzDistinctnessCodeToOffset(d$HzDistinctCode)
#'
#' opar <- par(mar=c(0,0,3,2))
#' plotSPC(d, name='name', color='p1', hz.distinctness.offset='HzD')
#' par(opar)
#'
#'
#' # depth functions are generated using the LPP function
#' opar <- par(mfrow=c(2,1), mar=c(0,0,3,0))
#'
#' # generate data
#' d.1 <- lapply(1:10, random_profile, SPC=TRUE, n=c(6, 7, 8), n_prop=1, method='LPP')
#' d.1 <- combine(d.1)
#'
#' # plot
#' plotSPC(d.1, name='name', color='p1', col.label = 'LPP Defaults')
#'
#'
#' # do this again, this time set all of the LPP parameters
#' d.2 <- lapply(1:10, random_profile, SPC=TRUE, n=c(6, 7, 8), n_prop=1, method='LPP',
#' lpp.a=5, lpp.b=10, lpp.d=5, lpp.e=5, lpp.u=25)
#' d.2 <- combine(d.2)
#'
#' # plot
#' plotSPC(d.2, name='name', color='p1', col.label = 'Custom LPP Parameters')
#'
#'
#' # reset plotting defaults
#' par(opar)
#'
#'
#'
#' # try plotting the LPP-derived simulated data
#' # aggregated over all profiles
#' a <- slab(d.2, fm= ~ p1)
#' a$mid <- with(a, (top + bottom) / 2)
#'
#' library(lattice)
#' (p1 <- xyplot(mid ~ p.q50, data=a,
#' lower=a$p.q25, upper=a$p.q75, ylim=c(150,-5), alpha=0.5,
#' panel=panel.depth_function, prepanel=prepanel.depth_function,
#' cf=a$contributing_fraction, xlab='Simulated Data', ylab='Depth',
#' main='LPP(a=5, b=10, d=5, e=5, u=25)',
#' par.settings=list(superpose.line=list(col='black', lwd=2))
#' ))
#'
#' # optionally add original data as step-functions
#' if(require(latticeExtra)) {
#' h <- horizons(d.2)
#' p1 + as.layer(xyplot(top ~ p1, groups=id, data=h,
#' horizontal=TRUE, type='S',
#' par.settings=list(superpose.line=list(col='blue', lwd=1, lty=2))))
#' }
#'
#'
#'
random_profile <- function(id, n = c(3,4,5,6), min_thick = 5, max_thick = 30, n_prop = 5, exact = FALSE, method= 'random_walk', HzDistinctSim = FALSE, SPC = FALSE, ...) {
# sanity check
if(missing(id))
stop('must specify an id')
if(max_thick < min_thick)
stop('illogical horizon thickness constraints')
if(! method %in% c('random_walk', 'LPP'))
stop('invalid method')
# get extra arguments
dots <- list(...)
# if requested, give back the exact number of horizons
if(length(n) == 1 & exact)
n_hz <- n
# otherwise randomly choose from suggestions
else
n_hz <- sample(n, 1)
# generate hz top bnd
tops <- integer(n_hz-1)
for(i in 1:(n_hz-1))
tops[i] <- sample(min_thick:max_thick, 1)
# add 0, then generate bottom bnd
tops <- as.integer(c(0, tops))
bottoms <- as.integer(c(tops[-1], sample(min_thick:max_thick, 1)))
# combine into a df
# always treat ID as a character: "solves" some cases of SPC corruption due to re-ordering of integers cast to character:
# https://github.com/ncss-tech/aqp/issues/90
d <- data.frame(id=as.character(id), top=cumsum(tops), bottom=cumsum(bottoms), name=paste('H',1:n_hz,sep=''), stringsAsFactors = FALSE)
# generate several properties
# with different means / sd
for(i in 1:n_prop) {
# init storage
p <- numeric(n_hz)
if(method == 'random_walk') {
p[1] <- rnorm(1, mean=runif(n=1, min=-10, max=10), sd=runif(n=1, min=1, max=10))
for(j in 2:n_hz)
p[j] <- p[j-1] + rnorm(1, mean=runif(n=1, min=-10, max=10), sd=runif(n=1, min=1, max=10))
}
if(method == 'LPP') {
# generate synthetic values at horizon mid-points
mids <- with(d, (top + bottom)/2)
# generate LPP parameters from uniform dist if not given as arguments
if(is.null(dots[['lpp.a']]))
lpp.a <- runif(n=1, min=5, max=25)
else
lpp.a <- dots[['lpp.a']]
if(is.null(dots[['lpp.b']]))
lpp.b <- runif(n=1, min=20, max=60)
else
lpp.b <- dots[['lpp.b']]
if(is.null(dots[['lpp.u']]))
lpp.u <- runif(n=1, min=10, max=90)
else
lpp.u <- dots[['lpp.u']]
if(is.null(dots[['lpp.d']]))
lpp.d <- runif(n=1, min=1, max=10)
else
lpp.d <- dots[['lpp.d']]
if(is.null(dots[['lpp.e']]))
lpp.e <- runif(n=1, min=5, max=20)
else
lpp.e <- dots[['lpp.e']]
# generate vector of synthetic values based on LPP
p <- .lpp(mids, a=lpp.a, b=lpp.b, u=lpp.u, d=lpp.d, e=lpp.e)
}
# add generated depth profile to horizons
new_col <- paste('p',i, sep='')
d[, new_col] <- p
}
# optionally add horizon distinctness codes:
# these are based on USDA-NCSS codes and approximate vertical offsets
# codes are constrained to the thickness of the horizon
if(HzDistinctSim) {
# standard codes and offsets
codes <- c('A','C','G','D')
offsets <- hzDistinctnessCodeToOffset(codes)
# compute horizon thickness vector
thick <- with(d, bottom-top)
# create matrix of distinctness codes based on (1/3) horizon thickness
# 1 when possible, 0 when impossible
prob.matrix <- t(sapply(thick, function(i) (i/3) >= offsets))
prob.matrix[which(prob.matrix)] <- 1
d.codes <- vector(mode='character', length=n_hz)
for(i in 1:n_hz) {
d.codes[i] <- sample(codes, size=1, prob=prob.matrix[i, ])
}
d$HzDistinctCode <- d.codes
}
# note: 3-4x performance hit when calling from lapply(1:big.number, ...)
# optionally return as SPC
if(SPC) {
depths(d) <- id ~ top + bottom
}
# all done
return(d)
}
|
/R/random_profile.R
|
no_license
|
ncss-tech/aqp
|
R
| false
| false
| 8,493
|
r
|
.lpp <- function(x, a, b, u, d, e) {
# the exponential term
f.exp <- exp((x + d * log(e) - u) / d)
# first part
f1 <- (b/u) * (1 + f.exp)^((-e - 1) / e)
# second part
f2 <- f.exp * (e + 1)^((e+1) / e)
# combine pieces
res <- a + f1 * f2
return(res)
}
#' @title Random Profile
#'
#' @description Generate a random soil profile according to set criteria, with correlated
#' depth trends.
#'
#' The random walk method produces profiles with considerable variation between
#' horizons and is based on values from the normal distribution seeded with
#' means and standard deviations drawn from the uniform distribution of \[0,
#' 10].
#'
#' The logistic power peak (LPP) function can be used to generate random soil
#' property depth functions that are sharply peaked. LPP parameters can be
#' hard-coded using the optional arguments: "lpp.a", "lpp.b", "lpp.u", "lpp.d",
#' "lpp.e". Amplitude of the peak is controlled by ("lpp.a + "lpp.b"), depth of
#' the peak by "lpp.u", and abruptness by "lpp.d" and "lpp.e". Further
#' description of the method is outlined in (Brenton et al, 2011). Simulated
#' horizon distinctness codes are based on the USDA-NCSS field description
#' methods.
#' Simulated distinctness codes are constrained according to horizon thickness,
#' i.e. a gradual boundary (+/- 5cm) will not be simulated for horizons that
#' are thinner than 3x this vertical distance
#'
#' @aliases random_profile .lpp
#' @param id a character or numeric id used for this profile
#' @param n vector of possible number of horizons, or the exact number of
#' horizons (see below)
#' @param min_thick minimum thickness criteria for a simulated horizon
#' @param max_thick maximum thickness criteria for a simulated horizon
#' @param n_prop number of simulated soil properties (columns in the returned
#' dataframe)
#' @param exact should the exact number of requested horizons be generated?
#' (defaults to FALSE)
#' @param method named method used to synthesize depth function ('random_walk'
#' or 'LPP'), see details
#' @param HzDistinctSim optionally simulate horizon boundary distinctness codes
#' @param SPC result is a \code{SoilProfileCollection} object, otherwise a
#' \code{data.frame} object
#' @param \dots additional parameters passed-in to the LPP (\code{.lpp})
#' function
#' @return A \code{data.frame} or \code{SoilProfileCollection} object.
#' @note See examples for ideas on simulating several profiles at once.
#' @author Dylan E. Beaudette
#' @seealso \code{\link{profile_compare}, \link{hzDistinctnessCodeToOffset}}
#' @references Myers, D. B.; Kitchen, N. R.; Sudduth, K. A.; Miles, R. J.;
#' Sadler, E. J. & Grunwald, S. Peak functions for modeling high resolution
#' soil profile data Geoderma, 2011, 166, 74-83.
#' @keywords manip
#' @export
#' @examples
#'
#'
#' # generate 10 random profiles, result is a list of SoilProfileCollection objects
#' d <- lapply(1:10, random_profile, SPC=TRUE)
#'
#' # combine
#' d <- combine(d)
#'
#' # plot
#' opar <- par(mar=c(0,0,3,2))
#' plotSPC(d, color='p1', name='name', cex.names=0.75)
#' par(opar)
#'
#' # simulate horizon boundary distinctness codes:
#' d <- lapply(1:10, random_profile, SPC=TRUE, HzDistinctSim=TRUE)
#' d <- combine(d)
#'
#' d$HzD <- hzDistinctnessCodeToOffset(d$HzDistinctCode)
#'
#' opar <- par(mar=c(0,0,3,2))
#' plotSPC(d, name='name', color='p1', hz.distinctness.offset='HzD')
#' par(opar)
#'
#'
#' # depth functions are generated using the LPP function
#' opar <- par(mfrow=c(2,1), mar=c(0,0,3,0))
#'
#' # generate data
#' d.1 <- lapply(1:10, random_profile, SPC=TRUE, n=c(6, 7, 8), n_prop=1, method='LPP')
#' d.1 <- combine(d.1)
#'
#' # plot
#' plotSPC(d.1, name='name', color='p1', col.label = 'LPP Defaults')
#'
#'
#' # do this again, this time set all of the LPP parameters
#' d.2 <- lapply(1:10, random_profile, SPC=TRUE, n=c(6, 7, 8), n_prop=1, method='LPP',
#' lpp.a=5, lpp.b=10, lpp.d=5, lpp.e=5, lpp.u=25)
#' d.2 <- combine(d.2)
#'
#' # plot
#' plotSPC(d.2, name='name', color='p1', col.label = 'Custom LPP Parameters')
#'
#'
#' # reset plotting defaults
#' par(opar)
#'
#'
#'
#' # try plotting the LPP-derived simulated data
#' # aggregated over all profiles
#' a <- slab(d.2, fm= ~ p1)
#' a$mid <- with(a, (top + bottom) / 2)
#'
#' library(lattice)
#' (p1 <- xyplot(mid ~ p.q50, data=a,
#' lower=a$p.q25, upper=a$p.q75, ylim=c(150,-5), alpha=0.5,
#' panel=panel.depth_function, prepanel=prepanel.depth_function,
#' cf=a$contributing_fraction, xlab='Simulated Data', ylab='Depth',
#' main='LPP(a=5, b=10, d=5, e=5, u=25)',
#' par.settings=list(superpose.line=list(col='black', lwd=2))
#' ))
#'
#' # optionally add original data as step-functions
#' if(require(latticeExtra)) {
#' h <- horizons(d.2)
#' p1 + as.layer(xyplot(top ~ p1, groups=id, data=h,
#' horizontal=TRUE, type='S',
#' par.settings=list(superpose.line=list(col='blue', lwd=1, lty=2))))
#' }
#'
#'
#'
random_profile <- function(id, n = c(3,4,5,6), min_thick = 5, max_thick = 30, n_prop = 5, exact = FALSE, method= 'random_walk', HzDistinctSim = FALSE, SPC = FALSE, ...) {
# sanity check
if(missing(id))
stop('must specify an id')
if(max_thick < min_thick)
stop('illogical horizon thickness constraints')
if(! method %in% c('random_walk', 'LPP'))
stop('invalid method')
# get extra arguments
dots <- list(...)
# if requested, give back the exact number of horizons
if(length(n) == 1 & exact)
n_hz <- n
# otherwise randomly choose from suggestions
else
n_hz <- sample(n, 1)
# generate hz top bnd
tops <- integer(n_hz-1)
for(i in 1:(n_hz-1))
tops[i] <- sample(min_thick:max_thick, 1)
# add 0, then generate bottom bnd
tops <- as.integer(c(0, tops))
bottoms <- as.integer(c(tops[-1], sample(min_thick:max_thick, 1)))
# combine into a df
# always treat ID as a character: "solves" some cases of SPC corruption due to re-ordering of integers cast to character:
# https://github.com/ncss-tech/aqp/issues/90
d <- data.frame(id=as.character(id), top=cumsum(tops), bottom=cumsum(bottoms), name=paste('H',1:n_hz,sep=''), stringsAsFactors = FALSE)
# generate several properties
# with different means / sd
for(i in 1:n_prop) {
# init storage
p <- numeric(n_hz)
if(method == 'random_walk') {
p[1] <- rnorm(1, mean=runif(n=1, min=-10, max=10), sd=runif(n=1, min=1, max=10))
for(j in 2:n_hz)
p[j] <- p[j-1] + rnorm(1, mean=runif(n=1, min=-10, max=10), sd=runif(n=1, min=1, max=10))
}
if(method == 'LPP') {
# generate synthetic values at horizon mid-points
mids <- with(d, (top + bottom)/2)
# generate LPP parameters from uniform dist if not given as arguments
if(is.null(dots[['lpp.a']]))
lpp.a <- runif(n=1, min=5, max=25)
else
lpp.a <- dots[['lpp.a']]
if(is.null(dots[['lpp.b']]))
lpp.b <- runif(n=1, min=20, max=60)
else
lpp.b <- dots[['lpp.b']]
if(is.null(dots[['lpp.u']]))
lpp.u <- runif(n=1, min=10, max=90)
else
lpp.u <- dots[['lpp.u']]
if(is.null(dots[['lpp.d']]))
lpp.d <- runif(n=1, min=1, max=10)
else
lpp.d <- dots[['lpp.d']]
if(is.null(dots[['lpp.e']]))
lpp.e <- runif(n=1, min=5, max=20)
else
lpp.e <- dots[['lpp.e']]
# generate vector of synthetic values based on LPP
p <- .lpp(mids, a=lpp.a, b=lpp.b, u=lpp.u, d=lpp.d, e=lpp.e)
}
# add generated depth profile to horizons
new_col <- paste('p',i, sep='')
d[, new_col] <- p
}
# optionally add horizon distinctness codes:
# these are based on USDA-NCSS codes and approximate vertical offsets
# codes are constrained to the thickness of the horizon
if(HzDistinctSim) {
# standard codes and offsets
codes <- c('A','C','G','D')
offsets <- hzDistinctnessCodeToOffset(codes)
# compute horizon thickness vector
thick <- with(d, bottom-top)
# create matrix of distinctness codes based on (1/3) horizon thickness
# 1 when possible, 0 when impossible
prob.matrix <- t(sapply(thick, function(i) (i/3) >= offsets))
prob.matrix[which(prob.matrix)] <- 1
d.codes <- vector(mode='character', length=n_hz)
for(i in 1:n_hz) {
d.codes[i] <- sample(codes, size=1, prob=prob.matrix[i, ])
}
d$HzDistinctCode <- d.codes
}
# note: 3-4x performance hit when calling from lapply(1:big.number, ...)
# optionally return as SPC
if(SPC) {
depths(d) <- id ~ top + bottom
}
# all done
return(d)
}
|
#### Test 1 answers
## Question 1
# LifeCycleSavings built-in data
x <- LifeCycleSavings$dpi #a x is dpi data
x
median(x) #b median of x
IQR(x) #c Interquartile range
summary(x)[5]-summary(x)[2] #c another way. just ignore the name
quantile(x,.75)-quantile(x,.25) #c another way
(ex <-mean(x)) #d sample mean
(s <- sd(x)) #e sample standard deviation
c(ex-1*s,ex+1*s) # interval within 1 sd of mean
xinside <- x[x>ex-1*s & x <ex + 1*s ]
tibble(x = x) %>% #the tidy way
filter(x<ex+s,x>ex-s) %>%
nrow()
38/length(x) #answer to f
length(xinside) # Number of elements withing 1 sd of mean
length(xinside)/length(x) #f proportion within 1 sd from mean
sqrt(sum(x)) #g square root of sum of values in x
max(x) #h maximum of x
## Question 2
pa <- .3
pb <- .1
pc <- .2
(paandb<-pa*pb) #a probability of a and b multiply by independence
(pagb <- pa) #b By definition of conditional probability and independence
(paandbandc <-pa*pb*pc) #c P(a and b and c) - multiply because of independence
(paorb <- pa + pb - paandb) #d P(A union B) = P(A) + P(B) - P(A intersect B)
# by deMorgans law a' intersect b' = (A union B)' so
1 - paorb #e demorgans law and P((A intersect B)')
(l3<-(1-pa)*(1-pb)*(1-pc)) #f Probability of losing all 3 independent bets.
pa+pb+pc-pa*pb-pa*pc-pb*pc+pa*pb*pc #g. Formula for P(A union B union C)
1-l3 #g another way Want complement of losing all 3
### Question 3
#Bayes Theorem - Creative in math/music
# m creative in math nm Not creative in math
# u creative in music nu Not creative in music.
pu <- .29 # Probability of creative in music
(pnu<- 1-pu) # Probability of not creative in music
(pmgu <- .30) # Probability of cr. math given cr. music
(pmgnu<- .15) # Probability of cr. math given not cr. music
(pnmgu <- 1 - pmgu) # Probability of not cr. math given cr. music
(pnmgnu <- 1 - pmgnu) # Probability of not cr. math given not cr. music
(pm <- pmgu*pu + pmgnu*pnu)
# P(m) = P(m and u) + P(m and nu) = P(m|u)*P(u) + P(m|nu)*P(nu)v
(pm <- pmgu*pu + pmgnu*pnu) #a) Law of total probability above
pmgu*pu #b P(m and u) = p(m|u)*pu : Note 1st term on right in a - multiplication principle
(pugm <- pmgu*pu / pm) # c Bayes Theorem p(u|m) = p(m and u)/p(m)
pu^5 #d Prob all 5 creative in music - multiply- independence
(1-pu)^5. #e None of the 5 creative in music.
### Question 4
m <- 6 # number of math books
n <- 9 # number statistics books
k<- 4 # number of books TA picks
dhyper(0,m,n,k) # a P(X=0)
phyper(1,m,n,k) # b P(X<2)
(p<-m/(m+n)) # proportion of math books on shelf
k*(m/(m+n))#expected value where k = n or the size of your sample.
#Variance of X if batch size is k=4
(vx <- ((m+n-k)/(m+n-1))*k*p*(1-p))
(ex <- k*p) # c expected value for hypergometric
(sdx<-sqrt(vx)) # d standard deviation of X
c(ex-sdx,ex+sdx) # e within 1sd
dhyper(1,m,n,k)+dhyper(2,m,n,k) #e
##Question 5
#Poisson boat arrival user 2.3 arrivals per 30 minutes
# x number of arrivals in an hour
# y = number of arrivals in a day.
(60/30)*2.3 #a Number of arrivals in 60 minutes. Convert to hour units
lambda <- (60/30)*2.3 #b Poisson parameter for X - arrivals in an hour is average arrivals per hour
(lambday<-24*lambda) #c parameter is proportional to time in Poisson
(sdx <- sqrt(lambda)) #d for Poisson, sd is square root of variance & variance = parameter
(vy <-lambday) #e variance is the parameter for poisson
dpois(0,lambda) #f probability of no boats arrive in hour
1 -ppois(8,lambda) #g prob more than 8 boats barrive in an hour
ppois(110,lambday)-ppois(89,lambday) #h P(90 <= Y <= 110)
sqrt(7)^2*lambda #i Variance of aX is a^2*V(X) and v(x) is lambda
(1 - dpois(0,lambda))^24 # j Use independence. True hour11 an hour2 and...and hour24
##Question 6
x <- c(8,10,12,14,16,18,20) # demandd for tickets
x
px <- c(.05,.10, .35, .25, .15, .05, .05) # must add to 1
px
sum(px)
rbind(x,px)
sum(px[5:7]) #a Probability X> 15 - p(16)+p(18)+p(20)
(ex <- sum(x*px)) #b expected value
(ex2<- sum(x^2*px)) #c Expected value of X^2
(vx <- ex2 - ex^2) #d variance is E(X^2) - E(X)^2
(sdx <- sqrt(vx)) #e standard deviation is sqrt of variance
7^2*vx #f V(aX+b) = a^2*V(X). our a= 7, V(X)=vx
Fx <- cumsum(px) # CDF of X
rbind(x,px,Fx)
Fx[4] # Probability x <=15 on a single week
Fx[4]^7 #g. By independence <15 on each of 7 days
## Question 7
# Binomial distribution - .29 probability of snowing on March 3
n <- 12. # The 12 years from 2019 - 2030
p <- .29 # probability of snowing on March 3
1-p # probability of not snowing on March 3
# x is number of times snowing March 3's, This is binomial(x, 12, .29)
(ex <- n*p) #a expected value of binomial
(vx <- n*p*(1-p)) #b variance of binomial
(sdx <- sqrt(n*p*(1-p))) #c standard deviation = sqrt of variance
5*ex +2 #d E(ax + b) = ae(x) + b
5^2*vx #e. V(ax+b) = a^2*v(x)
x011<-0:11
sum(x011^2*dbinom(x011,n,p)) #f E(x^2) using definition of expected value
dbinom(3, n, p) #g P( x = 3)
dbinom(0,n,p) #h P(x=0. No snow 2019-2030
#i Prob x within 1 sd of its expected value
c(ex - sdx, ex+sdx) # x must be in this interval
# We see x within 1 sd of expected value if x is 2 to 5
pbinom(5,n,p) - pbinom(1,n,p) #i
pbinom(ex+sdx,n,p)-pbinom(ex-sdx,n,p) #i another way. Does not work if ex-sdx is integer
|
/testanswers/quiz1answers.R
|
permissive
|
amirhmstu/math301
|
R
| false
| false
| 5,189
|
r
|
#### Test 1 answers
## Question 1
# LifeCycleSavings built-in data
x <- LifeCycleSavings$dpi #a x is dpi data
x
median(x) #b median of x
IQR(x) #c Interquartile range
summary(x)[5]-summary(x)[2] #c another way. just ignore the name
quantile(x,.75)-quantile(x,.25) #c another way
(ex <-mean(x)) #d sample mean
(s <- sd(x)) #e sample standard deviation
c(ex-1*s,ex+1*s) # interval within 1 sd of mean
xinside <- x[x>ex-1*s & x <ex + 1*s ]
tibble(x = x) %>% #the tidy way
filter(x<ex+s,x>ex-s) %>%
nrow()
38/length(x) #answer to f
length(xinside) # Number of elements withing 1 sd of mean
length(xinside)/length(x) #f proportion within 1 sd from mean
sqrt(sum(x)) #g square root of sum of values in x
max(x) #h maximum of x
## Question 2
pa <- .3
pb <- .1
pc <- .2
(paandb<-pa*pb) #a probability of a and b multiply by independence
(pagb <- pa) #b By definition of conditional probability and independence
(paandbandc <-pa*pb*pc) #c P(a and b and c) - multiply because of independence
(paorb <- pa + pb - paandb) #d P(A union B) = P(A) + P(B) - P(A intersect B)
# by deMorgans law a' intersect b' = (A union B)' so
1 - paorb #e demorgans law and P((A intersect B)')
(l3<-(1-pa)*(1-pb)*(1-pc)) #f Probability of losing all 3 independent bets.
pa+pb+pc-pa*pb-pa*pc-pb*pc+pa*pb*pc #g. Formula for P(A union B union C)
1-l3 #g another way Want complement of losing all 3
### Question 3
#Bayes Theorem - Creative in math/music
# m creative in math nm Not creative in math
# u creative in music nu Not creative in music.
pu <- .29 # Probability of creative in music
(pnu<- 1-pu) # Probability of not creative in music
(pmgu <- .30) # Probability of cr. math given cr. music
(pmgnu<- .15) # Probability of cr. math given not cr. music
(pnmgu <- 1 - pmgu) # Probability of not cr. math given cr. music
(pnmgnu <- 1 - pmgnu) # Probability of not cr. math given not cr. music
(pm <- pmgu*pu + pmgnu*pnu)
# P(m) = P(m and u) + P(m and nu) = P(m|u)*P(u) + P(m|nu)*P(nu)v
(pm <- pmgu*pu + pmgnu*pnu) #a) Law of total probability above
pmgu*pu #b P(m and u) = p(m|u)*pu : Note 1st term on right in a - multiplication principle
(pugm <- pmgu*pu / pm) # c Bayes Theorem p(u|m) = p(m and u)/p(m)
pu^5 #d Prob all 5 creative in music - multiply- independence
(1-pu)^5. #e None of the 5 creative in music.
### Question 4
m <- 6 # number of math books
n <- 9 # number statistics books
k<- 4 # number of books TA picks
dhyper(0,m,n,k) # a P(X=0)
phyper(1,m,n,k) # b P(X<2)
(p<-m/(m+n)) # proportion of math books on shelf
k*(m/(m+n))#expected value where k = n or the size of your sample.
#Variance of X if batch size is k=4
(vx <- ((m+n-k)/(m+n-1))*k*p*(1-p))
(ex <- k*p) # c expected value for hypergometric
(sdx<-sqrt(vx)) # d standard deviation of X
c(ex-sdx,ex+sdx) # e within 1sd
dhyper(1,m,n,k)+dhyper(2,m,n,k) #e
##Question 5
#Poisson boat arrival user 2.3 arrivals per 30 minutes
# x number of arrivals in an hour
# y = number of arrivals in a day.
(60/30)*2.3 #a Number of arrivals in 60 minutes. Convert to hour units
lambda <- (60/30)*2.3 #b Poisson parameter for X - arrivals in an hour is average arrivals per hour
(lambday<-24*lambda) #c parameter is proportional to time in Poisson
(sdx <- sqrt(lambda)) #d for Poisson, sd is square root of variance & variance = parameter
(vy <-lambday) #e variance is the parameter for poisson
dpois(0,lambda) #f probability of no boats arrive in hour
1 -ppois(8,lambda) #g prob more than 8 boats barrive in an hour
ppois(110,lambday)-ppois(89,lambday) #h P(90 <= Y <= 110)
sqrt(7)^2*lambda #i Variance of aX is a^2*V(X) and v(x) is lambda
(1 - dpois(0,lambda))^24 # j Use independence. True hour11 an hour2 and...and hour24
##Question 6
x <- c(8,10,12,14,16,18,20) # demandd for tickets
x
px <- c(.05,.10, .35, .25, .15, .05, .05) # must add to 1
px
sum(px)
rbind(x,px)
sum(px[5:7]) #a Probability X> 15 - p(16)+p(18)+p(20)
(ex <- sum(x*px)) #b expected value
(ex2<- sum(x^2*px)) #c Expected value of X^2
(vx <- ex2 - ex^2) #d variance is E(X^2) - E(X)^2
(sdx <- sqrt(vx)) #e standard deviation is sqrt of variance
7^2*vx #f V(aX+b) = a^2*V(X). our a= 7, V(X)=vx
Fx <- cumsum(px) # CDF of X
rbind(x,px,Fx)
Fx[4] # Probability x <=15 on a single week
Fx[4]^7 #g. By independence <15 on each of 7 days
## Question 7
# Binomial distribution - .29 probability of snowing on March 3
n <- 12. # The 12 years from 2019 - 2030
p <- .29 # probability of snowing on March 3
1-p # probability of not snowing on March 3
# x is number of times snowing March 3's, This is binomial(x, 12, .29)
(ex <- n*p) #a expected value of binomial
(vx <- n*p*(1-p)) #b variance of binomial
(sdx <- sqrt(n*p*(1-p))) #c standard deviation = sqrt of variance
5*ex +2 #d E(ax + b) = ae(x) + b
5^2*vx #e. V(ax+b) = a^2*v(x)
x011<-0:11
sum(x011^2*dbinom(x011,n,p)) #f E(x^2) using definition of expected value
dbinom(3, n, p) #g P( x = 3)
dbinom(0,n,p) #h P(x=0. No snow 2019-2030
#i Prob x within 1 sd of its expected value
c(ex - sdx, ex+sdx) # x must be in this interval
# We see x within 1 sd of expected value if x is 2 to 5
pbinom(5,n,p) - pbinom(1,n,p) #i
pbinom(ex+sdx,n,p)-pbinom(ex-sdx,n,p) #i another way. Does not work if ex-sdx is integer
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/textstat_dist.R, R/textstat_simil.R
\name{textstat_dist}
\alias{textstat_dist}
\alias{textstat_simil}
\title{Similarity and distance computation between documents or features}
\usage{
textstat_dist(x, selection = NULL, n = NULL, margin = c("documents",
"features"), method = "euclidean", upper = FALSE, diag = FALSE, p = 2)
textstat_simil(x, selection = NULL, n = NULL, margin = c("documents",
"features"), method = "correlation", upper = FALSE, diag = FALSE)
}
\arguments{
\item{x}{a \link{dfm} object}
\item{selection}{character vector of document names or feature labels from
\code{x}. A \code{"dist"} object is returned if selection is \code{NULL},
otherwise, a matrix is returned.}
\item{n}{the top \code{n} highest-ranking items will be returned. If n is
\code{NULL}, return all items. Useful if the output object will be coerced
into a list, for instance if the top \code{n} most similar features to a
target feature is desired. (See examples.)}
\item{margin}{identifies the margin of the dfm on which similarity or
difference will be computed: \code{documents} for documents or
\code{features} for word/term features.}
\item{method}{method the similarity or distance measure to be used; see
Details}
\item{upper}{whether the upper triangle of the symmetric \eqn{V \times V}
matrix is recorded}
\item{diag}{whether the diagonal of the distance matrix should be recorded}
\item{p}{The power of the Minkowski distance.}
}
\description{
These functions compute matrixes of distances and similarities between
documents or features from a \code{\link{dfm}} and return a
\code{\link[stats]{dist}} object (or a matrix if specific targets are
selected).
}
\details{
\code{textstat_dist} options are: \code{"euclidean"} (default),
\code{"Chisquared"}, \code{"Chisquared2"}, \code{"hamming"},
\code{"kullback"}. \code{"manhattan"}, \code{"maximum"}, \code{"canberra"},
and \code{"minkowski"}.
\code{textstat_simil} options are: \code{"correlation"} (default),
\code{"cosine"}, \code{"jaccard"}, \code{"eJaccard"}, \code{"dice"},
\code{"eDice"}, \code{"simple matching"}, \code{"hamann"}, and
\code{"faith"}.
}
\note{
If you want to compute similarity on a "normalized" dfm object
(controlling for variable document lengths, for methods such as correlation
for which different document lengths matter), then wrap the input dfm in
\code{\link{weight}(x, "relFreq")}.
}
\examples{
# create a dfm from inaugural addresses from Reagan onwards
presDfm <- dfm(corpus_subset(data_corpus_inaugural, Year > 1990),
remove = stopwords("english"), stem = TRUE, remove_punct = TRUE)
# distances for documents
(d1 <- textstat_dist(presDfm, margin = "documents"))
as.matrix(d1)
# distances for specific documents
textstat_dist(presDfm, "2017-Trump", margin = "documents")
textstat_dist(presDfm, "2005-Bush", margin = "documents", method = "eJaccard")
(d2 <- textstat_dist(presDfm, c("2009-Obama" , "2013-Obama"), margin = "documents"))
as.list(d1)
# similarities for documents
(s1 <- textstat_simil(presDfm, method = "cosine", margin = "documents"))
as.matrix(s1)
as.list(s1)
# similarities for for specific documents
textstat_simil(presDfm, "2017-Trump", margin = "documents")
textstat_simil(presDfm, "2017-Trump", method = "cosine", margin = "documents")
textstat_simil(presDfm, c("2009-Obama" , "2013-Obama"), margin = "documents")
# compute some term similarities
(s2 <- textstat_simil(presDfm, c("fair", "health", "terror"), method = "cosine",
margin = "features", n = 8))
as.list(s2)
}
\seealso{
\code{\link{textstat_dist}}, \code{\link{as.list.dist}},
\code{\link{dist}}
}
\author{
Kenneth Benoit, Haiyan Wang
}
|
/man/textstat_simil.Rd
|
no_license
|
leeper/quanteda
|
R
| false
| true
| 3,789
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/textstat_dist.R, R/textstat_simil.R
\name{textstat_dist}
\alias{textstat_dist}
\alias{textstat_simil}
\title{Similarity and distance computation between documents or features}
\usage{
textstat_dist(x, selection = NULL, n = NULL, margin = c("documents",
"features"), method = "euclidean", upper = FALSE, diag = FALSE, p = 2)
textstat_simil(x, selection = NULL, n = NULL, margin = c("documents",
"features"), method = "correlation", upper = FALSE, diag = FALSE)
}
\arguments{
\item{x}{a \link{dfm} object}
\item{selection}{character vector of document names or feature labels from
\code{x}. A \code{"dist"} object is returned if selection is \code{NULL},
otherwise, a matrix is returned.}
\item{n}{the top \code{n} highest-ranking items will be returned. If n is
\code{NULL}, return all items. Useful if the output object will be coerced
into a list, for instance if the top \code{n} most similar features to a
target feature is desired. (See examples.)}
\item{margin}{identifies the margin of the dfm on which similarity or
difference will be computed: \code{documents} for documents or
\code{features} for word/term features.}
\item{method}{method the similarity or distance measure to be used; see
Details}
\item{upper}{whether the upper triangle of the symmetric \eqn{V \times V}
matrix is recorded}
\item{diag}{whether the diagonal of the distance matrix should be recorded}
\item{p}{The power of the Minkowski distance.}
}
\description{
These functions compute matrixes of distances and similarities between
documents or features from a \code{\link{dfm}} and return a
\code{\link[stats]{dist}} object (or a matrix if specific targets are
selected).
}
\details{
\code{textstat_dist} options are: \code{"euclidean"} (default),
\code{"Chisquared"}, \code{"Chisquared2"}, \code{"hamming"},
\code{"kullback"}. \code{"manhattan"}, \code{"maximum"}, \code{"canberra"},
and \code{"minkowski"}.
\code{textstat_simil} options are: \code{"correlation"} (default),
\code{"cosine"}, \code{"jaccard"}, \code{"eJaccard"}, \code{"dice"},
\code{"eDice"}, \code{"simple matching"}, \code{"hamann"}, and
\code{"faith"}.
}
\note{
If you want to compute similarity on a "normalized" dfm object
(controlling for variable document lengths, for methods such as correlation
for which different document lengths matter), then wrap the input dfm in
\code{\link{weight}(x, "relFreq")}.
}
\examples{
# create a dfm from inaugural addresses from Reagan onwards
presDfm <- dfm(corpus_subset(data_corpus_inaugural, Year > 1990),
remove = stopwords("english"), stem = TRUE, remove_punct = TRUE)
# distances for documents
(d1 <- textstat_dist(presDfm, margin = "documents"))
as.matrix(d1)
# distances for specific documents
textstat_dist(presDfm, "2017-Trump", margin = "documents")
textstat_dist(presDfm, "2005-Bush", margin = "documents", method = "eJaccard")
(d2 <- textstat_dist(presDfm, c("2009-Obama" , "2013-Obama"), margin = "documents"))
as.list(d1)
# similarities for documents
(s1 <- textstat_simil(presDfm, method = "cosine", margin = "documents"))
as.matrix(s1)
as.list(s1)
# similarities for for specific documents
textstat_simil(presDfm, "2017-Trump", margin = "documents")
textstat_simil(presDfm, "2017-Trump", method = "cosine", margin = "documents")
textstat_simil(presDfm, c("2009-Obama" , "2013-Obama"), margin = "documents")
# compute some term similarities
(s2 <- textstat_simil(presDfm, c("fair", "health", "terror"), method = "cosine",
margin = "features", n = 8))
as.list(s2)
}
\seealso{
\code{\link{textstat_dist}}, \code{\link{as.list.dist}},
\code{\link{dist}}
}
\author{
Kenneth Benoit, Haiyan Wang
}
|
#' Class Auth
#'
#' Auth object
#'
#' Every object could be requested from this Auth object and any action
#' could start from this object using cascading style. Please check
#' \code{vignette("api")} for more information.
#'
#' @field from [character] Authentication method. Could be \code{"direct"}
#' (pass the credential information to the arguments directly),
#' \code{"env"} (read from pre-set system environment variables),
#' or \code{"file"} (read configurations from a credentials file).
#' Default is \code{"direct"}.
#' @field platform [character] Which platform you want to use,
#' if platform and url are both not specified, the default is
#' \code{"cgc"} (Cancer Genomics Cloud). Possible values include
#' \code{"cgc"}, \code{"aws-us"}, \code{"aws-eu"}, \code{"gcp"},
#' and \code{"cavatica"}.
#' @field url [character] Base URL for API. Please only use this when you
#' want to specify a platform that is not in the \code{platform} list
#' above, and also leaving \code{platform} unspecified.
#' @field token [character] Your authentication token.
#' @field sysenv_url Name of the system environment variable storing
#' the API base URL. By default: \code{"SB_API_ENDPOINT"}.
#' @field sysenv_token Name of the system environment variable storing
#' the auth token. By default: \code{"SB_AUTH_TOKEN"}.
#' @field config_file [character] Location of the user configuration file.
#' By default: \code{"~/.sevenbridges/credential"}.
#' @field profile_name [character] Profile name in the user configuration file.
#' The default value is \code{"default"}.
#' @field fs FS object, for mount and unmount file system.
#'
#' @importFrom stringr str_match
#'
#' @export Auth
#' @exportClass Auth
#' @examples
#' # Direct authentication (default)
#' # replace with your auth token
#' token <- "aef7e9e3f6c54fb1b338ac4ecddf1a56"
#' a <- Auth(platform = "cgc", token = token)
#'
#' \dontrun{
#' # Authentication with environment variables
#' # This will read system environments variables
#' # `SB_API_ENDPOINT` and `SB_AUTH_TOKEN` by default
#' a <- Auth(from = "env")
#'
#' # Authentication with user configuration file
#' # This will load profile `default` from config
#' # file `~/.sevenbridges/credential` by default
#' a <- Auth(from = "file")}
Auth <- setRefClass("Auth", fields = list(from = "character",
platform = "characterORNULL",
url = "character",
token = "character",
sysenv_url = "characterORNULL",
sysenv_token = "characterORNULL",
config_file = "characterORNULL",
profile_name = "characterORNULL",
fs = "FSORNULL"),
methods = list(
initialize = function(
from = c("direct", "env", "file"),
platform = NULL,
url = NULL,
token = NULL,
sysenv_url = NULL,
sysenv_token = NULL,
config_file = NULL,
profile_name = NULL,
fs = NULL, ...) {
# Authentication Logic
#
# 0x01. If `from == "direct"` (default)
# then use on-the-fly configuration.
#
# Four cases:
#
# 1. `platform` and `url` are both provided:
# throw error: platform and URL cannot coexist
# 2. `platform` and `url` are both not provided:
# use `.sbg_default_platform` and throw a warning
# 3. `platform` != NULL, `url` = NULL:
# use platform + token, throw message
# 4. `platform` = NULL, `url` != NULL:
# use URL + token, throw message
#
# 0x02. If `from == "env"`
# then read from environment variables.
#
# One step:
#
# 1. Read environment variables `sysenv_url`
# and `sysenv_token`
# throw message indicating environment
# variable names
# use url + token
#
# 0x03. If `from == "file"`
# then use configuration file.
#
# Two steps:
#
# 1. Load ini format file at location `config_file`
# throw message indicating file location
# 2. In loaded config list, look for `profile_name`
# throw message indicating profile name
# get url + token from this profile
# For backward compatibility only:
# remove this block when enough time has passed
auth_call = as.list(match.call())
if (!is.null(auth_call[["username"]]))
stop("This authentication parameter is deprecated, please refer to: https://sbg.github.io/sevenbridges-r/articles/api.html#create-auth-object for the new authentication methods")
fs <<- fs
.from = match.arg(from)
from <<- .from
if (.from == "direct") {
# In this case, `sysenv_url`, `sysenv_token`,
# `config_file`, and `profile_name`
# should all be `NULL` even if they
# are assigned values
.sysenv_url = NULL
.sysenv_token = NULL
.config_file = NULL
.profile_name = NULL
sysenv_url <<- .sysenv_url
sysenv_token <<- .sysenv_token
config_file <<- .config_file
profile_name <<- .profile_name
# Four cases depending on `platform` and `url`
# Case 1: platform and url are both provided
if (!is.null(platform) & !is.null(url))
stop("`platform` and `url` cannot be set simultaneously", call. = FALSE)
# Case 2: platform and url are both *not* provided
if (is.null(platform) & is.null(url)) {
warning("`platform` and `url` are not set, will use the default platform: ",
.sbg_default_platform, call. = FALSE)
.platform = .sbg_default_platform
.url = .sbg_baseurl[[.sbg_default_platform]]
platform <<- .platform
url <<- .url
}
# Case 3: platform is provided, url is not provided
if (!is.null(platform) & is.null(url)) {
# platform name sanity check
.platform = platform
if (.platform %in% names(.sbg_baseurl))
.url = .sbg_baseurl[[.platform]] else
stop("Platform does not exist, please check its spelling (case-sensitive)", call. = FALSE)
message("Using platform: ", .platform)
platform <<- .platform
url <<- .url
}
# Case 4: platform is not provided, url is provided
if (is.null(platform) & !is.null(url)) {
.url = normalize_url(url)
# lookup an accurate platform name
.platform = sbg_platform_lookup(.url)
platform <<- .platform
url <<- .url
}
if (is.null(token))
stop('`token` must be set when `from = "direct"`', call. = FALSE)
token <<- token
}
if (.from == "env") {
# In this case, `config_file` and `profile_name`
# should be `NULL` even if they
# are assigned values
.config_file = NULL
.profile_name = NULL
config_file <<- .config_file
profile_name <<- .profile_name
# get system environment variables
if (is.null(sysenv_url))
.sysenv_url = .sbg_default_sysenv_url else
.sysenv_url = sysenv_url
if (is.null(sysenv_token))
.sysenv_token = .sbg_default_sysenv_token else
.sysenv_token = sysenv_token
message("Authenticating with system environment variables: ",
.sysenv_url, ' and ', .sysenv_token)
sysenv_url <<- .sysenv_url
sysenv_token <<- .sysenv_token
# extract url + token from environment variables
.url = normalize_url(sbg_get_env(.sysenv_url))
.token = sbg_get_env(.sysenv_token)
url <<- .url
token <<- .token
# lookup an accurate platform name instead of simply `NULL`
.platform = sbg_platform_lookup(.url)
platform <<- .platform
}
if (.from == "file") {
# In this case, `sysenv_url`, `sysenv_token`,
# should be `NULL` even if they
# are assigned values
.sysenv_url = NULL
.sysenv_token = NULL
sysenv_url <<- .sysenv_url
sysenv_token <<- .sysenv_token
# parse user config file
if (is.null(config_file))
.config_file = .sbg_default_config_file else
.config_file = config_file
config_list = sbg_parse_config(.config_file)
message("Authenticating with user configuration file: ", .config_file)
config_file <<- .config_file
# locate user profile with url + token
if (is.null(profile_name))
.profile_name = .sbg_default_profile_name else
.profile_name = profile_name
# extract url + token from profile
.url = normalize_url(config_list[[.profile_name]][["api_endpoint"]])
.token = config_list[[.profile_name]][["auth_token"]]
if (is.null(.url) | is.null(.token))
stop("`The field api_endpoint` or `auth_token` is missing in profile:",
.profile_name, call. = FALSE)
message("Authenticating with user profile: ", .profile_name)
profile_name <<- .profile_name
url <<- .url
token <<- .token
# lookup an accurate platform name instead of simply `NULL`
.platform = sbg_platform_lookup(.url)
platform <<- .platform
}
},
project_owner = function(owner = NULL, ...) {
'
List the projects owned by and accessible to a particular user.
Each project\'s ID and URL will be returned.'
if (is.null(owner)) {
stop("owner must be provided. For example, Nate.")
}
req = api(token = token,
base_url = url,
path = paste0("projects/", owner),
method = "GET", ...)
res = status_check(req)
if (hasItems(res)) {
rp = parseItem(res)
obj = .asProjectList(rp)
} else {
message("not found")
obj = res
}
obj = setAuth(obj, .self, "Project")
},
project_new = function(name = NULL,
billing_group_id = NULL,
description = name,
tags = list(),
type = "v2", ...) {
'
Create new projects, required parameters: name,
billing_group_id, optional parameteres: tags and
description, type.'
if (is.null(name) || is.null(billing_group_id))
stop("name, description, and billing_group_id must be provided")
# check tags
if (is.character(tags)) tags = as.list(tags)
body = list("name" = name,
"type" = type,
"description" = description,
"tags" = tags,
"billing_group" = billing_group_id)
res = api(path = "projects", body = body,
method = "POST", ...)
res = .asProject(res)
res = setAuth(res, .self, "Project")
},
# Project call
project = function(name = NULL,
id = NULL,
index = NULL,
ignore.case = TRUE,
exact = FALSE,
owner = NULL,
detail = FALSE, ...) {
'
If no id or name provided, this call returns a
list of all projects you are a member of.
Each project\'s project_id and URL on the CGC
will be returned. If name or id provided,
we did a match search the list'
if (!is.null(id)) {
req = api(path = paste0("projects/", id), method = "GET", ...)
res = .asProject(req)
res = setAuth(res, .self, "Project")
return(res)
}
# check owner
if (is.null(owner)) {
# show all projects
req = api(path = "projects", method = "GET", ...)
res = .asProjectList(req)
} else {
message("Owner: ", owner)
req = api(path = paste0("projects/", owner),
method = "GET", ...)
res = .asProjectList(req)
}
res = m.match(res, id = id, name = name,
exact = exact,
ignore.case = ignore.case)
if (!length(res)) return(NULL)
# if (length(res) == 1) {
# .id = res$id
# req = api(path = paste0("projects/", .id), method = "GET", ...)
# res = .asProject(req)
# res = setAuth(res, .self, "Project")
# return(res)
# }
if (detail && length(res)) {
if (is(res, "SimpleList")) {
ids = sapply(res, function(x){ x$id })
} else {
ids = res$id
}
lst = lapply(ids, function(id) {
req = api(path = paste0("projects/", id), method = "GET", ...)
.asProject(req)
})
res = ProjectList(lst)
}
# double check
if (length(res) == 1 && is(res, "SimpleList")) {
res = res[[1]]
}
res = setAuth(res, .self, "Project")
res
},
billing = function(id = NULL, breakdown = FALSE, ...) {
'
If no id provided, This call returns a list of
paths used to access billing information via the
API. else, This call lists all your billing groups,
including groups that are pending or have been disabled.
if breakdown = TRUE, This call returns a breakdown
of spending per-project for the billing group
specified by billing_group. For each project that
the billing group is associated with, information
is shown on the tasks run, including their
initiating user (the runner), start and end times,
and cost.'
if (is.null(id)) {
# show api
req = api(path = "billing/groups", method = "GET", ...)
req = .asBillingList(req)
if (length(req) == 1 && is(req, "SimpleList")) {
req = req[[1]]
}
return(req)
} else {
if (breakdown) {
req = api(path = paste0("billing/groups/", id, "/breakdown"),
method = "GET", ...)
} else {
req = api(path = paste0("billing/groups/", id), method = "GET", ...)
}
req = .asBilling(req)
return(req)
}
},
invoice = function(id = NULL, ...) {
'
If no id provided, This call returns a list of invoices,
with information about each, including whether or not
the invoice is pending and the billing period it covers.
The call returns information about all your available
invoices, unless you use the query parameter bg_id to
specify the ID of a particular billing group, in which
case it will return the invoice incurred by that billing
group only.
If id was provided, This call retrieves information about
a selected invoice, including the costs for analysis
and storage, and the invoice period.'
if (is.null(id)) {
req = api(path = "billing/invoices", method = "GET", ...)
} else {
req = api(path = paste0("billing/invoices/", id), method = "GET", ...)
}
req
},
api = function(...,
limit = getOption("sevenbridges")$"limit",
offset = getOption("sevenbridges")$"offset",
complete = FALSE) {
'
This call returns all API paths, and pass arguments
to api() function and input token and url automatically'
req = sevenbridges::api(token, base_url = url, limit = limit, offset = offset, ...)
req = status_check(req)
if (complete) {
N = as.numeric(headers(response(req))[["x-total-matching-query"]])
if (length(N)) .item = length(req$items)
if (.item < N) {
pb = txtProgressBar(min = 1, max = N %/% 100 + 1, style = 3)
res = NULL
for (i in 1:(N %/% 100 + 1)) {
.limit = 100
.offset = (i - 1) * 100
req = sevenbridges::api(token, base_url = url,
limit = .limit, offset = .offset, ...)
req = status_check(req)
res$items = c(res$items, req$items)
setTxtProgressBar(pb, i)
}
cat("\n")
res$href = NULL
} else {
return(req)
}
return(res)
} else {
return(req)
}
},
show = function() {
.showFields(.self, "== Auth ==",
values = c("url", "token"))
},
# v2 only feature
rate_limit = function(...) {
'
This call returns information about your current
rate limit. This is the number of API calls you can
make in one hour.'
req = api(path = "rate_limit", method = "GET", ...)
.asRate(req)
},
user = function(username = NULL, ...) {
'
This call returns a list of the resources, such as projects,
billing groups, and organizations, that are accessible to you.
If you are not an administrator, this call will only return a
successful response if {username} is replaced with your own
username. If you are an administrator, you can replace
{username} with the username of any CGC user, to return
information on their resources.'
if (is.null(username)) {
req = api(token = token,
path = "user/",
method = "GET", ...)
message("username is not provided, show run user information instead")
} else {
req = api(token = token,
path = paste0("users/", username),
method = "GET", ...)
}
.asUser(req)
},
# File API
file = function(name = NULL,
id = NULL,
project = NULL,
exact = FALSE,
detail = FALSE,
metadata = list(),
origin.task = NULL,
tag = NULL,
complete = FALSE,
search.engine = c("server", "brute"), ...) {
'
This call returns a list of all files in a specified
project that you can access. For each file, the call
returns: 1) Its ID 2) Its filename The project is
specified as a query parameter in the call.'
search.engine = match.arg(search.engine)
if (is.null(id)) {
if (is.null(project)) {
stop("When file id is not provided, project id need to be provided.")
}
} else {
if (length(id) > 1) {
res = iterId(id, .self$file, exact = exact, ...)
return(res)
}
req = api(path = paste0("files/", id), method = "GET", ...)
res = .asFiles(req)
res = setAuth(res, .self, "Files")
return(res)
}
# build query
.query = list(project = project)
if (length(metadata)) {
new.meta = unlist(metadata)
names(new.meta) = sapply(names(new.meta),
function(nm) paste("metadata", nm, sep = "."))
.query = c(.query, as.list(new.meta))
}
if (!is.null(origin.task)) {
.query = c(.query, list(origin.task = origin.task))
}
.split_item = function(x, list_name = NULL) {
if (length(x) > 1) {
names(x) = rep(list_name, length(x))
x
} else {
if (is.list(x)) {
x = x[[1]]
}
res = list(x)
names(res) = list_name
res
}
}
if (!is.null(tag)) {
.new_tag = .split_item(tag, "tag")
# encode the tag for cases like "#1"
.new_tag = lapply(.new_tag, URLencode, TRUE)
.query = c(.query, .new_tag)
}
if (is.null(name)) {
# if no id, no name, list all
if (length(metadata) || length(origin.task) || length(tag)) {
complete = FALSE
}
req = api(path = "files", method = "GET",
query = .query, complete = complete, ...)
res = .asFilesList(req)
res = setAuth(res, .self, "Files")
if (length(res) == 1) {
return(res[[1]])
} else {
return(res)
}
}
# search now by name or multiple names
# get all files
switch(search.engine,
server = {
if (exact) {
.query = c(.split_item(name, "name"), .query)
req = api(path = "files", method = "GET",
query = .query, complete = FALSE, ...)
res = .asFilesList(req)
if (length(res) == 1) res = res[[1]]
} else {
# use brute
req = api(path = "files", method = "GET",
query = .query, complete = complete, ...)
res = .asFilesList(req)
res = m.match(res, id = id, name = name, exact = exact)
}
},
brute = {
req = api(path = "files", method = "GET",
query = .query, complete = complete, ...)
res = .asFilesList(req)
res = m.match(res, id = id, name = name, exact = exact)
})
if (length(res)) {
if (detail) {
if (is(res, "FilesList")) {
ids = sapply(res, function(x){ x$id })
} else {
ids = res$id
}
lst = lapply(ids, function(id) {
req = api(path = paste0("files/", id), method = "GET", ...)
.asFiles(req)
})
res = FilesList(lst)
}
} else {
return(NULL)
}
res = setAuth(res, .self, "Files")
res
},
public_file = function(...) {
file(project = "admin/sbg-public-data", ...)
},
copyFile = function(id, project = NULL, name = "") {
if (is.null(project))
stop("project ID need to be provided, to which the file is copied to")
# iteratively
if (length(id) > 1) {
ids = as.character(id)
for (i in ids) {
message("copying: ", i)
copyFile(i, project = project, name = name)
}
} else {
body = list(project = project,
name = name)
res = api(path = paste0("files/", id, "/actions/copy"),
body = body, method = "POST")
res = .asFiles(res)
setAuth(res, .self, "Files")
}
},
copy_file = function(id, project = NULL, name = "") {
copyFile(id = id, project = project, name = name)
},
# App API
app = function(name = NULL,
id = NULL,
exact = FALSE,
ignore.case = TRUE,
detail = FALSE,
project = NULL,
query = NULL,
visibility = c("project", "public"),
revision = NULL,
complete = FALSE, ...) {
visibility = match.arg(visibility)
if (visibility == "public")
query = c(query, list(visibility = "public"))
# if id specified, does not have to list all
if (!is.null(id)) {
req = api(path = paste0("apps/", .update_revision(id, revision)),
method = "GET", query = query, ...)
return(setAuth(.asApp(req), .self, "App"))
}
# list all apps first
if (is.null(project)) {
req = api(path = "apps", method = "GET",
query = query, complete = complete, ...)
# browser()
# if (complete) {
# res = lapply(req$it, function(x) {
# as.list(.asAppList(x))
# })
# res = do.call(c, res)
# res = do.call(AppList, res)
# } else {
# res = .asAppList(req)
# }
} else {
req = api(path = "apps", method = "GET",
query = c(list(project = project), query),
complete = complete, ...)
# if (complete) {
# res = lapply(req, function(x) {
# as.list(.asAppList(x))
# })
# res = do.call(c, res)
# res = do.call(AppList, res)
# } else {
# res = .asAppList(req)
# }
}
res = .asAppList(req)
# match
res = m.match(res, id = id, name = name, exact = exact,
ignore.case = ignore.case)
if (length(res) == 1) {
.id = res$id
req = api(path = paste0("apps/", .update_revision(.id, revision)),
method = "GET", query = query, ...)
res = .asApp(req)
return(setAuth(res, .self, "App"))
}
if (detail && length(res)) {
if (is(res, "AppList")) {
ids = sapply(res, function(x){ x$id })
} else {
ids = res$id
}
lst = lapply(ids, function(id) {
if (is.null(project)) {
req = api(path = paste0("apps/", id),
query = query,
method = "GET", ...)
} else {
req = api(path = paste0("apps/", id), method = "GET",
query = c(list(project = project), query), ...)
}
.asApp(req)
})
res = AppList(lst)
}
if (!length(res)) return(NULL)
setAuth(res, .self, "App")
},
public_app = function(...) {
app(visibility = "public", ...)
},
copyApp = function(id, project = NULL, name = "") {
if(is.null(project))
stop("project ID need to be provided, to which the file is copied to")
# iteratively
if (length(id) > 1) {
ids = as.character(id)
for (i in ids) {
message("copying: ", i)
copyApp(i, project = project, name = name)
}
} else {
body = list(project = project,
name = name)
res = api(path = paste0("apps/", id, "/actions/copy"),
body = body, method = "POST")
res = .asApp(res)
setAuth(res, .self, "App")
}
},
copy_app = function(id, project = NULL, name = "") {
copyApp(id = id, project = project, name = name)
},
task = function(name = NULL,
id = NULL, project = NULL,
parent = NULL,
exact = FALSE, detail = FALSE,
status = c("all", "queued", "draft",
"running", "completed",
"aborted", "failed"), ...) {
status = match.arg(status)
if (!is.null(id)) {
req = api(path = paste0("tasks/", id), method = "GET", ...)
res = .asTask(req)
res = setAuth(res, .self, "Task")
return(res)
}
if (!is.null(parent)) {
if (status == "all") {
req = api(path = "tasks", method = "GET", query = list(parent = parent), ...)
} else {
req = api(path = "tasks", method = "GET",
query = list(status = status, parent = parent), ...)
}
} else {
if (is.null(project)) {
# list all files
if (status == "all") {
req = api(path = "tasks", method = "GET", ...)
} else {
req = api(path = "tasks", method = "GET", query = list(status = status), ...)
}
} else {
# list all files
if (status == "all") {
req = api(path = paste0("projects/", project, "/tasks"),
method = "GET", ...)
# req = api(path = "tasks", method = "GET", query = list(project = project), ...)
} else {
req = api(path = paste0("projects/", project, "/tasks"),
method = "GET",
query = list(status = status), ...)
}
}
}
res = .asTaskList(req)
# matching
res = m.match(res, id = id, name = name, exact = exact)
# if (length(res) == 1) {
# .id = res$id
# req = api(path = paste0("tasks/", .id), method = "GET", ...)
# res = .asTask(req)
# res = setAuth(res, .self, "Task")
# return(res)
# }
if (length(res)) {
if (detail) {
if (is(res, "TaskList")) {
ids = sapply(res, function(x){ x$id })
} else {
ids = res$id
}
lst = lapply(ids, function(id) {
req = api(path = paste0("tasks/", id), method = "GET", ...)
.asTask(req)
})
res = TaskList(lst)
}
} else {
return(NULL)
}
res = setAuth(res, .self, "Task")
res
},
mount = function(mountPoint = NULL,
projectId = NULL,
ignore.stdout = TRUE,
sudo = TRUE, ...) {
fs <<- FS(authToken = token, ...)
fs$mount(mountPoint = mountPoint,
projectId = projectId,
ignore.stdout = ignore.stdout,
sudo = sudo)
},
unmount = function(...) {
fs$unmount(...)
},
get_id_from_path = function(p) {
ids = a$api(path = "action/files/get_ids",
method = "POST",
body = as.list(p))
idx = unlist(lapply(ids, is.null))
if (sum(idx)) {
message("no id for following file: \n", paste(df.path[idx], collapse = "\n"))
}
if (sum(!idx)) {
id.valid = unlist(ids[!idx])
} else {
id.valid = NULL
}
id.valid
},
add_volume = function(name = NULL,
type = c("s3", "gcs"),
root_url = NULL,
bucket = NULL,
prefix = "",
access_key_id = NULL,
secret_access_key = NULL,
client_email = NULL,
private_key = NULL,
sse_algorithm = "AES256",
aws_canned_acl = NULL,
access_mode = c("RW", "RO")) {
if (is.null(name))
stop("Please provide name, the name of the volume. It must be unique from all other volumes for this user.")
type = match.arg(type)
access_mode = match.arg(access_mode)
if (is.null(root_url)) {
root_url = switch(type,
s3 = "https://s3.amazonaws.com",
gcs = "https://www.googleapis.com/")
}
if (type == "s3" && !is.null(access_key_id) && !is.null(secret_access_key)) {
credentials = list(
access_key_id = access_key_id,
secret_access_key = secret_access_key
)
} else if (type == "gcs" && !is.null(client_email) && !is.null(private_key)) {
credentials = list(
client_email = client_email,
private_key = private_key
)
} else {
stop("credentials are needed")
}
body = list(
name = name,
service = list(
type = type,
bucket = bucket,
root_url = root_url,
prefix = prefix,
credentials = credentials,
properties = list(sse_algorithm = sse_algorithm)
),
access_mode = access_mode
)
res = api(path = "storage/volumes", body = body, method = "POST")
res = .asVolume(res)
res = setAuth(res, .self, "Volume")
res
},
volume = function(name = NULL, id = NULL,
index = NULL, ignore.case = TRUE,
exact = FALSE, detail = FALSE, ...) {
'
If no id or name provided, this call returns a list
of all volumes you are a member of. If name or id
provided, we did a match search the list'
if (!is.null(id)) {
req = api(path = paste0("storage/volumes/", id), method = "GET", ...)
res = .asVolume(req)
res = setAuth(res, .self, "Volume")
return(res)
}
# list "all"
req = api(path = "storage/volumes", method = "GET", ...)
res = .asVolumeList(req)
if (is.null(name)) {
res = setAuth(res, .self, "Volume")
return(res)
}
res = m.match(res, id = id, name = name, exact = exact,
ignore.case = ignore.case)
if (!length(res)) return(NULL)
if (detail && length(res)) {
if (is(res, "SimpleList")) {
ids = sapply(res, function(x){ x$id })
} else {
ids = res$id
}
lst = lapply(ids, function(id) {
req = api(path = paste0("storage/volumes/", id), method = "GET", ...)
.asVolume(req)
})
res = VolumeList(lst)
}
# double check
if (length(res) == 1 && is(res, "SimpleList")) {
res = res[[1]]
}
res = setAuth(res, .self, "Volume")
res
}
))
setClassUnion("AuthORNULL", c("Auth", "NULL"))
|
/R/class-auth.R
|
permissive
|
gmdzy2010/sevenbridges-r
|
R
| false
| false
| 53,022
|
r
|
#' Class Auth
#'
#' Auth object
#'
#' Every object could be requested from this Auth object and any action
#' could start from this object using cascading style. Please check
#' \code{vignette("api")} for more information.
#'
#' @field from [character] Authentication method. Could be \code{"direct"}
#' (pass the credential information to the arguments directly),
#' \code{"env"} (read from pre-set system environment variables),
#' or \code{"file"} (read configurations from a credentials file).
#' Default is \code{"direct"}.
#' @field platform [character] Which platform you want to use,
#' if platform and url are both not specified, the default is
#' \code{"cgc"} (Cancer Genomics Cloud). Possible values include
#' \code{"cgc"}, \code{"aws-us"}, \code{"aws-eu"}, \code{"gcp"},
#' and \code{"cavatica"}.
#' @field url [character] Base URL for API. Please only use this when you
#' want to specify a platform that is not in the \code{platform} list
#' above, and also leaving \code{platform} unspecified.
#' @field token [character] Your authentication token.
#' @field sysenv_url Name of the system environment variable storing
#' the API base URL. By default: \code{"SB_API_ENDPOINT"}.
#' @field sysenv_token Name of the system environment variable storing
#' the auth token. By default: \code{"SB_AUTH_TOKEN"}.
#' @field config_file [character] Location of the user configuration file.
#' By default: \code{"~/.sevenbridges/credential"}.
#' @field profile_name [character] Profile name in the user configuration file.
#' The default value is \code{"default"}.
#' @field fs FS object, for mount and unmount file system.
#'
#' @importFrom stringr str_match
#'
#' @export Auth
#' @exportClass Auth
#' @examples
#' # Direct authentication (default)
#' # replace with your auth token
#' token <- "aef7e9e3f6c54fb1b338ac4ecddf1a56"
#' a <- Auth(platform = "cgc", token = token)
#'
#' \dontrun{
#' # Authentication with environment variables
#' # This will read system environments variables
#' # `SB_API_ENDPOINT` and `SB_AUTH_TOKEN` by default
#' a <- Auth(from = "env")
#'
#' # Authentication with user configuration file
#' # This will load profile `default` from config
#' # file `~/.sevenbridges/credential` by default
#' a <- Auth(from = "file")}
Auth <- setRefClass("Auth", fields = list(from = "character",
platform = "characterORNULL",
url = "character",
token = "character",
sysenv_url = "characterORNULL",
sysenv_token = "characterORNULL",
config_file = "characterORNULL",
profile_name = "characterORNULL",
fs = "FSORNULL"),
methods = list(
initialize = function(
from = c("direct", "env", "file"),
platform = NULL,
url = NULL,
token = NULL,
sysenv_url = NULL,
sysenv_token = NULL,
config_file = NULL,
profile_name = NULL,
fs = NULL, ...) {
# Authentication Logic
#
# 0x01. If `from == "direct"` (default)
# then use on-the-fly configuration.
#
# Four cases:
#
# 1. `platform` and `url` are both provided:
# throw error: platform and URL cannot coexist
# 2. `platform` and `url` are both not provided:
# use `.sbg_default_platform` and throw a warning
# 3. `platform` != NULL, `url` = NULL:
# use platform + token, throw message
# 4. `platform` = NULL, `url` != NULL:
# use URL + token, throw message
#
# 0x02. If `from == "env"`
# then read from environment variables.
#
# One step:
#
# 1. Read environment variables `sysenv_url`
# and `sysenv_token`
# throw message indicating environment
# variable names
# use url + token
#
# 0x03. If `from == "file"`
# then use configuration file.
#
# Two steps:
#
# 1. Load ini format file at location `config_file`
# throw message indicating file location
# 2. In loaded config list, look for `profile_name`
# throw message indicating profile name
# get url + token from this profile
# For backward compatibility only:
# remove this block when enough time has passed
auth_call = as.list(match.call())
if (!is.null(auth_call[["username"]]))
stop("This authentication parameter is deprecated, please refer to: https://sbg.github.io/sevenbridges-r/articles/api.html#create-auth-object for the new authentication methods")
fs <<- fs
.from = match.arg(from)
from <<- .from
if (.from == "direct") {
# In this case, `sysenv_url`, `sysenv_token`,
# `config_file`, and `profile_name`
# should all be `NULL` even if they
# are assigned values
.sysenv_url = NULL
.sysenv_token = NULL
.config_file = NULL
.profile_name = NULL
sysenv_url <<- .sysenv_url
sysenv_token <<- .sysenv_token
config_file <<- .config_file
profile_name <<- .profile_name
# Four cases depending on `platform` and `url`
# Case 1: platform and url are both provided
if (!is.null(platform) & !is.null(url))
stop("`platform` and `url` cannot be set simultaneously", call. = FALSE)
# Case 2: platform and url are both *not* provided
if (is.null(platform) & is.null(url)) {
warning("`platform` and `url` are not set, will use the default platform: ",
.sbg_default_platform, call. = FALSE)
.platform = .sbg_default_platform
.url = .sbg_baseurl[[.sbg_default_platform]]
platform <<- .platform
url <<- .url
}
# Case 3: platform is provided, url is not provided
if (!is.null(platform) & is.null(url)) {
# platform name sanity check
.platform = platform
if (.platform %in% names(.sbg_baseurl))
.url = .sbg_baseurl[[.platform]] else
stop("Platform does not exist, please check its spelling (case-sensitive)", call. = FALSE)
message("Using platform: ", .platform)
platform <<- .platform
url <<- .url
}
# Case 4: platform is not provided, url is provided
if (is.null(platform) & !is.null(url)) {
.url = normalize_url(url)
# lookup an accurate platform name
.platform = sbg_platform_lookup(.url)
platform <<- .platform
url <<- .url
}
if (is.null(token))
stop('`token` must be set when `from = "direct"`', call. = FALSE)
token <<- token
}
if (.from == "env") {
# In this case, `config_file` and `profile_name`
# should be `NULL` even if they
# are assigned values
.config_file = NULL
.profile_name = NULL
config_file <<- .config_file
profile_name <<- .profile_name
# get system environment variables
if (is.null(sysenv_url))
.sysenv_url = .sbg_default_sysenv_url else
.sysenv_url = sysenv_url
if (is.null(sysenv_token))
.sysenv_token = .sbg_default_sysenv_token else
.sysenv_token = sysenv_token
message("Authenticating with system environment variables: ",
.sysenv_url, ' and ', .sysenv_token)
sysenv_url <<- .sysenv_url
sysenv_token <<- .sysenv_token
# extract url + token from environment variables
.url = normalize_url(sbg_get_env(.sysenv_url))
.token = sbg_get_env(.sysenv_token)
url <<- .url
token <<- .token
# lookup an accurate platform name instead of simply `NULL`
.platform = sbg_platform_lookup(.url)
platform <<- .platform
}
if (.from == "file") {
# In this case, `sysenv_url`, `sysenv_token`,
# should be `NULL` even if they
# are assigned values
.sysenv_url = NULL
.sysenv_token = NULL
sysenv_url <<- .sysenv_url
sysenv_token <<- .sysenv_token
# parse user config file
if (is.null(config_file))
.config_file = .sbg_default_config_file else
.config_file = config_file
config_list = sbg_parse_config(.config_file)
message("Authenticating with user configuration file: ", .config_file)
config_file <<- .config_file
# locate user profile with url + token
if (is.null(profile_name))
.profile_name = .sbg_default_profile_name else
.profile_name = profile_name
# extract url + token from profile
.url = normalize_url(config_list[[.profile_name]][["api_endpoint"]])
.token = config_list[[.profile_name]][["auth_token"]]
if (is.null(.url) | is.null(.token))
stop("`The field api_endpoint` or `auth_token` is missing in profile:",
.profile_name, call. = FALSE)
message("Authenticating with user profile: ", .profile_name)
profile_name <<- .profile_name
url <<- .url
token <<- .token
# lookup an accurate platform name instead of simply `NULL`
.platform = sbg_platform_lookup(.url)
platform <<- .platform
}
},
project_owner = function(owner = NULL, ...) {
'
List the projects owned by and accessible to a particular user.
Each project\'s ID and URL will be returned.'
if (is.null(owner)) {
stop("owner must be provided. For example, Nate.")
}
req = api(token = token,
base_url = url,
path = paste0("projects/", owner),
method = "GET", ...)
res = status_check(req)
if (hasItems(res)) {
rp = parseItem(res)
obj = .asProjectList(rp)
} else {
message("not found")
obj = res
}
obj = setAuth(obj, .self, "Project")
},
project_new = function(name = NULL,
billing_group_id = NULL,
description = name,
tags = list(),
type = "v2", ...) {
'
Create new projects, required parameters: name,
billing_group_id, optional parameteres: tags and
description, type.'
if (is.null(name) || is.null(billing_group_id))
stop("name, description, and billing_group_id must be provided")
# check tags
if (is.character(tags)) tags = as.list(tags)
body = list("name" = name,
"type" = type,
"description" = description,
"tags" = tags,
"billing_group" = billing_group_id)
res = api(path = "projects", body = body,
method = "POST", ...)
res = .asProject(res)
res = setAuth(res, .self, "Project")
},
# Project call
project = function(name = NULL,
id = NULL,
index = NULL,
ignore.case = TRUE,
exact = FALSE,
owner = NULL,
detail = FALSE, ...) {
'
If no id or name provided, this call returns a
list of all projects you are a member of.
Each project\'s project_id and URL on the CGC
will be returned. If name or id provided,
we did a match search the list'
if (!is.null(id)) {
req = api(path = paste0("projects/", id), method = "GET", ...)
res = .asProject(req)
res = setAuth(res, .self, "Project")
return(res)
}
# check owner
if (is.null(owner)) {
# show all projects
req = api(path = "projects", method = "GET", ...)
res = .asProjectList(req)
} else {
message("Owner: ", owner)
req = api(path = paste0("projects/", owner),
method = "GET", ...)
res = .asProjectList(req)
}
res = m.match(res, id = id, name = name,
exact = exact,
ignore.case = ignore.case)
if (!length(res)) return(NULL)
# if (length(res) == 1) {
# .id = res$id
# req = api(path = paste0("projects/", .id), method = "GET", ...)
# res = .asProject(req)
# res = setAuth(res, .self, "Project")
# return(res)
# }
if (detail && length(res)) {
if (is(res, "SimpleList")) {
ids = sapply(res, function(x){ x$id })
} else {
ids = res$id
}
lst = lapply(ids, function(id) {
req = api(path = paste0("projects/", id), method = "GET", ...)
.asProject(req)
})
res = ProjectList(lst)
}
# double check
if (length(res) == 1 && is(res, "SimpleList")) {
res = res[[1]]
}
res = setAuth(res, .self, "Project")
res
},
billing = function(id = NULL, breakdown = FALSE, ...) {
'
If no id provided, This call returns a list of
paths used to access billing information via the
API. else, This call lists all your billing groups,
including groups that are pending or have been disabled.
if breakdown = TRUE, This call returns a breakdown
of spending per-project for the billing group
specified by billing_group. For each project that
the billing group is associated with, information
is shown on the tasks run, including their
initiating user (the runner), start and end times,
and cost.'
if (is.null(id)) {
# show api
req = api(path = "billing/groups", method = "GET", ...)
req = .asBillingList(req)
if (length(req) == 1 && is(req, "SimpleList")) {
req = req[[1]]
}
return(req)
} else {
if (breakdown) {
req = api(path = paste0("billing/groups/", id, "/breakdown"),
method = "GET", ...)
} else {
req = api(path = paste0("billing/groups/", id), method = "GET", ...)
}
req = .asBilling(req)
return(req)
}
},
invoice = function(id = NULL, ...) {
'
If no id provided, This call returns a list of invoices,
with information about each, including whether or not
the invoice is pending and the billing period it covers.
The call returns information about all your available
invoices, unless you use the query parameter bg_id to
specify the ID of a particular billing group, in which
case it will return the invoice incurred by that billing
group only.
If id was provided, This call retrieves information about
a selected invoice, including the costs for analysis
and storage, and the invoice period.'
if (is.null(id)) {
req = api(path = "billing/invoices", method = "GET", ...)
} else {
req = api(path = paste0("billing/invoices/", id), method = "GET", ...)
}
req
},
api = function(...,
limit = getOption("sevenbridges")$"limit",
offset = getOption("sevenbridges")$"offset",
complete = FALSE) {
'
This call returns all API paths, and pass arguments
to api() function and input token and url automatically'
req = sevenbridges::api(token, base_url = url, limit = limit, offset = offset, ...)
req = status_check(req)
if (complete) {
N = as.numeric(headers(response(req))[["x-total-matching-query"]])
if (length(N)) .item = length(req$items)
if (.item < N) {
pb = txtProgressBar(min = 1, max = N %/% 100 + 1, style = 3)
res = NULL
for (i in 1:(N %/% 100 + 1)) {
.limit = 100
.offset = (i - 1) * 100
req = sevenbridges::api(token, base_url = url,
limit = .limit, offset = .offset, ...)
req = status_check(req)
res$items = c(res$items, req$items)
setTxtProgressBar(pb, i)
}
cat("\n")
res$href = NULL
} else {
return(req)
}
return(res)
} else {
return(req)
}
},
show = function() {
.showFields(.self, "== Auth ==",
values = c("url", "token"))
},
# v2 only feature
rate_limit = function(...) {
'
This call returns information about your current
rate limit. This is the number of API calls you can
make in one hour.'
req = api(path = "rate_limit", method = "GET", ...)
.asRate(req)
},
user = function(username = NULL, ...) {
'
This call returns a list of the resources, such as projects,
billing groups, and organizations, that are accessible to you.
If you are not an administrator, this call will only return a
successful response if {username} is replaced with your own
username. If you are an administrator, you can replace
{username} with the username of any CGC user, to return
information on their resources.'
if (is.null(username)) {
req = api(token = token,
path = "user/",
method = "GET", ...)
message("username is not provided, show run user information instead")
} else {
req = api(token = token,
path = paste0("users/", username),
method = "GET", ...)
}
.asUser(req)
},
# File API
file = function(name = NULL,
id = NULL,
project = NULL,
exact = FALSE,
detail = FALSE,
metadata = list(),
origin.task = NULL,
tag = NULL,
complete = FALSE,
search.engine = c("server", "brute"), ...) {
'
This call returns a list of all files in a specified
project that you can access. For each file, the call
returns: 1) Its ID 2) Its filename The project is
specified as a query parameter in the call.'
search.engine = match.arg(search.engine)
if (is.null(id)) {
if (is.null(project)) {
stop("When file id is not provided, project id need to be provided.")
}
} else {
if (length(id) > 1) {
res = iterId(id, .self$file, exact = exact, ...)
return(res)
}
req = api(path = paste0("files/", id), method = "GET", ...)
res = .asFiles(req)
res = setAuth(res, .self, "Files")
return(res)
}
# build query
.query = list(project = project)
if (length(metadata)) {
new.meta = unlist(metadata)
names(new.meta) = sapply(names(new.meta),
function(nm) paste("metadata", nm, sep = "."))
.query = c(.query, as.list(new.meta))
}
if (!is.null(origin.task)) {
.query = c(.query, list(origin.task = origin.task))
}
.split_item = function(x, list_name = NULL) {
if (length(x) > 1) {
names(x) = rep(list_name, length(x))
x
} else {
if (is.list(x)) {
x = x[[1]]
}
res = list(x)
names(res) = list_name
res
}
}
if (!is.null(tag)) {
.new_tag = .split_item(tag, "tag")
# encode the tag for cases like "#1"
.new_tag = lapply(.new_tag, URLencode, TRUE)
.query = c(.query, .new_tag)
}
if (is.null(name)) {
# if no id, no name, list all
if (length(metadata) || length(origin.task) || length(tag)) {
complete = FALSE
}
req = api(path = "files", method = "GET",
query = .query, complete = complete, ...)
res = .asFilesList(req)
res = setAuth(res, .self, "Files")
if (length(res) == 1) {
return(res[[1]])
} else {
return(res)
}
}
# search now by name or multiple names
# get all files
switch(search.engine,
server = {
if (exact) {
.query = c(.split_item(name, "name"), .query)
req = api(path = "files", method = "GET",
query = .query, complete = FALSE, ...)
res = .asFilesList(req)
if (length(res) == 1) res = res[[1]]
} else {
# use brute
req = api(path = "files", method = "GET",
query = .query, complete = complete, ...)
res = .asFilesList(req)
res = m.match(res, id = id, name = name, exact = exact)
}
},
brute = {
req = api(path = "files", method = "GET",
query = .query, complete = complete, ...)
res = .asFilesList(req)
res = m.match(res, id = id, name = name, exact = exact)
})
if (length(res)) {
if (detail) {
if (is(res, "FilesList")) {
ids = sapply(res, function(x){ x$id })
} else {
ids = res$id
}
lst = lapply(ids, function(id) {
req = api(path = paste0("files/", id), method = "GET", ...)
.asFiles(req)
})
res = FilesList(lst)
}
} else {
return(NULL)
}
res = setAuth(res, .self, "Files")
res
},
public_file = function(...) {
file(project = "admin/sbg-public-data", ...)
},
copyFile = function(id, project = NULL, name = "") {
if (is.null(project))
stop("project ID need to be provided, to which the file is copied to")
# iteratively
if (length(id) > 1) {
ids = as.character(id)
for (i in ids) {
message("copying: ", i)
copyFile(i, project = project, name = name)
}
} else {
body = list(project = project,
name = name)
res = api(path = paste0("files/", id, "/actions/copy"),
body = body, method = "POST")
res = .asFiles(res)
setAuth(res, .self, "Files")
}
},
copy_file = function(id, project = NULL, name = "") {
copyFile(id = id, project = project, name = name)
},
# App API
app = function(name = NULL,
id = NULL,
exact = FALSE,
ignore.case = TRUE,
detail = FALSE,
project = NULL,
query = NULL,
visibility = c("project", "public"),
revision = NULL,
complete = FALSE, ...) {
visibility = match.arg(visibility)
if (visibility == "public")
query = c(query, list(visibility = "public"))
# if id specified, does not have to list all
if (!is.null(id)) {
req = api(path = paste0("apps/", .update_revision(id, revision)),
method = "GET", query = query, ...)
return(setAuth(.asApp(req), .self, "App"))
}
# list all apps first
if (is.null(project)) {
req = api(path = "apps", method = "GET",
query = query, complete = complete, ...)
# browser()
# if (complete) {
# res = lapply(req$it, function(x) {
# as.list(.asAppList(x))
# })
# res = do.call(c, res)
# res = do.call(AppList, res)
# } else {
# res = .asAppList(req)
# }
} else {
req = api(path = "apps", method = "GET",
query = c(list(project = project), query),
complete = complete, ...)
# if (complete) {
# res = lapply(req, function(x) {
# as.list(.asAppList(x))
# })
# res = do.call(c, res)
# res = do.call(AppList, res)
# } else {
# res = .asAppList(req)
# }
}
res = .asAppList(req)
# match
res = m.match(res, id = id, name = name, exact = exact,
ignore.case = ignore.case)
if (length(res) == 1) {
.id = res$id
req = api(path = paste0("apps/", .update_revision(.id, revision)),
method = "GET", query = query, ...)
res = .asApp(req)
return(setAuth(res, .self, "App"))
}
if (detail && length(res)) {
if (is(res, "AppList")) {
ids = sapply(res, function(x){ x$id })
} else {
ids = res$id
}
lst = lapply(ids, function(id) {
if (is.null(project)) {
req = api(path = paste0("apps/", id),
query = query,
method = "GET", ...)
} else {
req = api(path = paste0("apps/", id), method = "GET",
query = c(list(project = project), query), ...)
}
.asApp(req)
})
res = AppList(lst)
}
if (!length(res)) return(NULL)
setAuth(res, .self, "App")
},
public_app = function(...) {
app(visibility = "public", ...)
},
copyApp = function(id, project = NULL, name = "") {
if(is.null(project))
stop("project ID need to be provided, to which the file is copied to")
# iteratively
if (length(id) > 1) {
ids = as.character(id)
for (i in ids) {
message("copying: ", i)
copyApp(i, project = project, name = name)
}
} else {
body = list(project = project,
name = name)
res = api(path = paste0("apps/", id, "/actions/copy"),
body = body, method = "POST")
res = .asApp(res)
setAuth(res, .self, "App")
}
},
copy_app = function(id, project = NULL, name = "") {
copyApp(id = id, project = project, name = name)
},
task = function(name = NULL,
id = NULL, project = NULL,
parent = NULL,
exact = FALSE, detail = FALSE,
status = c("all", "queued", "draft",
"running", "completed",
"aborted", "failed"), ...) {
status = match.arg(status)
if (!is.null(id)) {
req = api(path = paste0("tasks/", id), method = "GET", ...)
res = .asTask(req)
res = setAuth(res, .self, "Task")
return(res)
}
if (!is.null(parent)) {
if (status == "all") {
req = api(path = "tasks", method = "GET", query = list(parent = parent), ...)
} else {
req = api(path = "tasks", method = "GET",
query = list(status = status, parent = parent), ...)
}
} else {
if (is.null(project)) {
# list all files
if (status == "all") {
req = api(path = "tasks", method = "GET", ...)
} else {
req = api(path = "tasks", method = "GET", query = list(status = status), ...)
}
} else {
# list all files
if (status == "all") {
req = api(path = paste0("projects/", project, "/tasks"),
method = "GET", ...)
# req = api(path = "tasks", method = "GET", query = list(project = project), ...)
} else {
req = api(path = paste0("projects/", project, "/tasks"),
method = "GET",
query = list(status = status), ...)
}
}
}
res = .asTaskList(req)
# matching
res = m.match(res, id = id, name = name, exact = exact)
# if (length(res) == 1) {
# .id = res$id
# req = api(path = paste0("tasks/", .id), method = "GET", ...)
# res = .asTask(req)
# res = setAuth(res, .self, "Task")
# return(res)
# }
if (length(res)) {
if (detail) {
if (is(res, "TaskList")) {
ids = sapply(res, function(x){ x$id })
} else {
ids = res$id
}
lst = lapply(ids, function(id) {
req = api(path = paste0("tasks/", id), method = "GET", ...)
.asTask(req)
})
res = TaskList(lst)
}
} else {
return(NULL)
}
res = setAuth(res, .self, "Task")
res
},
mount = function(mountPoint = NULL,
projectId = NULL,
ignore.stdout = TRUE,
sudo = TRUE, ...) {
fs <<- FS(authToken = token, ...)
fs$mount(mountPoint = mountPoint,
projectId = projectId,
ignore.stdout = ignore.stdout,
sudo = sudo)
},
unmount = function(...) {
fs$unmount(...)
},
get_id_from_path = function(p) {
ids = a$api(path = "action/files/get_ids",
method = "POST",
body = as.list(p))
idx = unlist(lapply(ids, is.null))
if (sum(idx)) {
message("no id for following file: \n", paste(df.path[idx], collapse = "\n"))
}
if (sum(!idx)) {
id.valid = unlist(ids[!idx])
} else {
id.valid = NULL
}
id.valid
},
add_volume = function(name = NULL,
type = c("s3", "gcs"),
root_url = NULL,
bucket = NULL,
prefix = "",
access_key_id = NULL,
secret_access_key = NULL,
client_email = NULL,
private_key = NULL,
sse_algorithm = "AES256",
aws_canned_acl = NULL,
access_mode = c("RW", "RO")) {
if (is.null(name))
stop("Please provide name, the name of the volume. It must be unique from all other volumes for this user.")
type = match.arg(type)
access_mode = match.arg(access_mode)
if (is.null(root_url)) {
root_url = switch(type,
s3 = "https://s3.amazonaws.com",
gcs = "https://www.googleapis.com/")
}
if (type == "s3" && !is.null(access_key_id) && !is.null(secret_access_key)) {
credentials = list(
access_key_id = access_key_id,
secret_access_key = secret_access_key
)
} else if (type == "gcs" && !is.null(client_email) && !is.null(private_key)) {
credentials = list(
client_email = client_email,
private_key = private_key
)
} else {
stop("credentials are needed")
}
body = list(
name = name,
service = list(
type = type,
bucket = bucket,
root_url = root_url,
prefix = prefix,
credentials = credentials,
properties = list(sse_algorithm = sse_algorithm)
),
access_mode = access_mode
)
res = api(path = "storage/volumes", body = body, method = "POST")
res = .asVolume(res)
res = setAuth(res, .self, "Volume")
res
},
volume = function(name = NULL, id = NULL,
index = NULL, ignore.case = TRUE,
exact = FALSE, detail = FALSE, ...) {
'
If no id or name provided, this call returns a list
of all volumes you are a member of. If name or id
provided, we did a match search the list'
if (!is.null(id)) {
req = api(path = paste0("storage/volumes/", id), method = "GET", ...)
res = .asVolume(req)
res = setAuth(res, .self, "Volume")
return(res)
}
# list "all"
req = api(path = "storage/volumes", method = "GET", ...)
res = .asVolumeList(req)
if (is.null(name)) {
res = setAuth(res, .self, "Volume")
return(res)
}
res = m.match(res, id = id, name = name, exact = exact,
ignore.case = ignore.case)
if (!length(res)) return(NULL)
if (detail && length(res)) {
if (is(res, "SimpleList")) {
ids = sapply(res, function(x){ x$id })
} else {
ids = res$id
}
lst = lapply(ids, function(id) {
req = api(path = paste0("storage/volumes/", id), method = "GET", ...)
.asVolume(req)
})
res = VolumeList(lst)
}
# double check
if (length(res) == 1 && is(res, "SimpleList")) {
res = res[[1]]
}
res = setAuth(res, .self, "Volume")
res
}
))
setClassUnion("AuthORNULL", c("Auth", "NULL"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GWexPrec_lib.r
\name{cor.emp.int}
\alias{cor.emp.int}
\title{cor.emp.int}
\usage{
cor.emp.int(zeta, nChainFit, Xt, parMargin, typeMargin)
}
\arguments{
\item{zeta}{correlation of Gaussian multivariates}
\item{nChainFit}{number of simulated variates}
\item{Xt}{simulated occurrences, n x 2 matrix}
\item{parMargin}{parameters of the margins 2 x 3}
\item{typeMargin}{type of marginal distribution: 'EGPD' or 'mixExp'}
}
\value{
\item{scalar}{correlation between simulated intensities}
}
\description{
Finds observed correlations between intensities corresponding
to a degree of correlation of Gaussian multivariate random numbers
}
\author{
Guillaume Evin
}
|
/man/cor.emp.int.Rd
|
no_license
|
guillaumeevin/GWEX
|
R
| false
| true
| 738
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GWexPrec_lib.r
\name{cor.emp.int}
\alias{cor.emp.int}
\title{cor.emp.int}
\usage{
cor.emp.int(zeta, nChainFit, Xt, parMargin, typeMargin)
}
\arguments{
\item{zeta}{correlation of Gaussian multivariates}
\item{nChainFit}{number of simulated variates}
\item{Xt}{simulated occurrences, n x 2 matrix}
\item{parMargin}{parameters of the margins 2 x 3}
\item{typeMargin}{type of marginal distribution: 'EGPD' or 'mixExp'}
}
\value{
\item{scalar}{correlation between simulated intensities}
}
\description{
Finds observed correlations between intensities corresponding
to a degree of correlation of Gaussian multivariate random numbers
}
\author{
Guillaume Evin
}
|
\name{blockvector}
\alias{as.blockvector}
\alias{as.bvector}
\alias{blockvector}
\alias{is.blockvector}
\alias{is.bvector}
\title{Block Vector}
\usage{
blockvector(data, parts = NULL, dims = NULL)
is.blockvector(x)
as.blockvector(x)
}
\arguments{
\item{data}{a vector}
\item{parts}{vector of partitions}
\item{dims}{integer indicating the number of blocks}
\item{x}{an R object}
}
\value{
An object of class \code{"blockvector"}
}
\description{
\code{blockvector} creates a block-vector from the given
set of values \cr \code{as.bvector} attempts to turn its
argument into a block-vector \cr \code{is.bvector} tests
if its argument is a (strict) block-vector
}
\examples{
# some vectors
vnum = runif(10)
vlog = sample(c(TRUE, FALSE), size=10, replace=TRUE)
vstr = letters[1:10]
# convert vectors into block-vectors
bnum = blockvector(vnum, 10, 1)
blog = blockvector(vlog, c(5,5), 2)
bstr = blockvector(vstr, c(3,3,4), 3)
# test if objects are blockvectors
is.bvector(vnum) # FALSE
is.bvector(bnum) # TRUE
# generate a vector
v = 1:10
# convert 'v' into a block-vector (one block)
bv = as.bvector(v)
bv
}
\seealso{
\code{\link{blockmatrix}}, \code{\link{vector}}
}
|
/man/blockvector.Rd
|
no_license
|
gastonstat/blockberry
|
R
| false
| false
| 1,202
|
rd
|
\name{blockvector}
\alias{as.blockvector}
\alias{as.bvector}
\alias{blockvector}
\alias{is.blockvector}
\alias{is.bvector}
\title{Block Vector}
\usage{
blockvector(data, parts = NULL, dims = NULL)
is.blockvector(x)
as.blockvector(x)
}
\arguments{
\item{data}{a vector}
\item{parts}{vector of partitions}
\item{dims}{integer indicating the number of blocks}
\item{x}{an R object}
}
\value{
An object of class \code{"blockvector"}
}
\description{
\code{blockvector} creates a block-vector from the given
set of values \cr \code{as.bvector} attempts to turn its
argument into a block-vector \cr \code{is.bvector} tests
if its argument is a (strict) block-vector
}
\examples{
# some vectors
vnum = runif(10)
vlog = sample(c(TRUE, FALSE), size=10, replace=TRUE)
vstr = letters[1:10]
# convert vectors into block-vectors
bnum = blockvector(vnum, 10, 1)
blog = blockvector(vlog, c(5,5), 2)
bstr = blockvector(vstr, c(3,3,4), 3)
# test if objects are blockvectors
is.bvector(vnum) # FALSE
is.bvector(bnum) # TRUE
# generate a vector
v = 1:10
# convert 'v' into a block-vector (one block)
bv = as.bvector(v)
bv
}
\seealso{
\code{\link{blockmatrix}}, \code{\link{vector}}
}
|
library(roxygen2)
#' # draws result of the variance analysis
#' @param stock_bunch stacked or one matrix of multiple stock informations with only one single price information(not all OHLC)
#' @return histogram of varaince analysis
#' @export
#' @examples draw_var_explanation(stock_bunch)
#' @details this gives result by histogram table of PCA analysis information for current market in given period
draw_var_explanation = function(stock_bunch){
temp_pca = prcomp(stock_bunch, center = TRUE ,scale. = TRUE)
res = fviz_eig(temp_pca)
return(res)
}
|
/R/draw_var_explanation.R
|
no_license
|
muiPomeranian/private_Rpackage
|
R
| false
| false
| 555
|
r
|
library(roxygen2)
#' # draws result of the variance analysis
#' @param stock_bunch stacked or one matrix of multiple stock informations with only one single price information(not all OHLC)
#' @return histogram of varaince analysis
#' @export
#' @examples draw_var_explanation(stock_bunch)
#' @details this gives result by histogram table of PCA analysis information for current market in given period
draw_var_explanation = function(stock_bunch){
temp_pca = prcomp(stock_bunch, center = TRUE ,scale. = TRUE)
res = fviz_eig(temp_pca)
return(res)
}
|
# Load jester dataset ratings
ratings = scan('data/ratings.txt',sep='\n')
png(filename="data/figs/ratings-density-plot")
plot(density(ratings),main="Jester Rating Distribution Density")
dev.off()
png(filename="data/figs/ratings-histogram")
hist(ratings,main="Jester Rating Distribution Histogram")
dev.off()
descdist(ratings)
# Estimate parameters for beta distribution to fit to data
estBetaParams <- function(mu, var) {
alpha <- ((1 - mu) / var - 1 / mu) * mu ^ 2
beta <- alpha * (1 / mu - 1)
return(params = list(alpha = alpha, beta = beta))
}
# find params and get quantiles for Q-Q plot
scaledRatings = (ratings - min(ratings)) / diff(range(ratings))
params = estBetaParams(scaledRatings)
n = length(ratings)
probs = (1:n)/(n+1)
betaQuants = qbeta(probs, shape1=params$alpha, shape2=params$beta)
# Finally plot the theoretical vs. empirical on q-q plot for comparison
plot(sort(betaQuants), sort(scaledRatings),
xlab="Theoretical Quantiles for Beta Dist.",
ylab="Sample Quantiles: Jester Joke Ratings",
main="Beta Q-Q Plot of Jester Joke Ratings")
abline(0,1)
|
/exploration.R
|
no_license
|
macks22/recsys
|
R
| false
| false
| 1,092
|
r
|
# Load jester dataset ratings
ratings = scan('data/ratings.txt',sep='\n')
png(filename="data/figs/ratings-density-plot")
plot(density(ratings),main="Jester Rating Distribution Density")
dev.off()
png(filename="data/figs/ratings-histogram")
hist(ratings,main="Jester Rating Distribution Histogram")
dev.off()
descdist(ratings)
# Estimate parameters for beta distribution to fit to data
estBetaParams <- function(mu, var) {
alpha <- ((1 - mu) / var - 1 / mu) * mu ^ 2
beta <- alpha * (1 / mu - 1)
return(params = list(alpha = alpha, beta = beta))
}
# find params and get quantiles for Q-Q plot
scaledRatings = (ratings - min(ratings)) / diff(range(ratings))
params = estBetaParams(scaledRatings)
n = length(ratings)
probs = (1:n)/(n+1)
betaQuants = qbeta(probs, shape1=params$alpha, shape2=params$beta)
# Finally plot the theoretical vs. empirical on q-q plot for comparison
plot(sort(betaQuants), sort(scaledRatings),
xlab="Theoretical Quantiles for Beta Dist.",
ylab="Sample Quantiles: Jester Joke Ratings",
main="Beta Q-Q Plot of Jester Joke Ratings")
abline(0,1)
|
/DM_FactorsDecoupling.R
|
no_license
|
lxyandy66/YangtzeProject
|
R
| false
| false
| 35,652
|
r
| ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DataDocumentation.R
\docType{data}
\name{Detail_CommodityCodeName_2012}
\alias{Detail_CommodityCodeName_2012}
\title{Detail Commodity Code and Name (2012 schema)}
\format{
A dataframe with 405 obs. and 2 variables
}
\source{
\url{https://edap-ord-data-commons.s3.amazonaws.com/useeior/AllTablesIO.zip}
}
\usage{
Detail_CommodityCodeName_2012
}
\description{
Detail Commodity Code and Name (2012 schema)
}
\keyword{datasets}
|
/man/Detail_CommodityCodeName_2012.Rd
|
permissive
|
USEPA/useeior
|
R
| false
| true
| 502
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DataDocumentation.R
\docType{data}
\name{Detail_CommodityCodeName_2012}
\alias{Detail_CommodityCodeName_2012}
\title{Detail Commodity Code and Name (2012 schema)}
\format{
A dataframe with 405 obs. and 2 variables
}
\source{
\url{https://edap-ord-data-commons.s3.amazonaws.com/useeior/AllTablesIO.zip}
}
\usage{
Detail_CommodityCodeName_2012
}
\description{
Detail Commodity Code and Name (2012 schema)
}
\keyword{datasets}
|
#' @name createNoise
#'
#' @aliases createZ
#'
#' @description Creating a normally distributed vector of \code{noise} given \code{sampleSize} (length), \code{mean} and \code{sd}.
#'
#' @param sampleSize time, a.k.a. \code{N} of the sample of \code{X}.
#' @param mean the mean.
#' @param sd standard deviation.
#'
#' @return The vector of numbers.
#'
#' @examples
#' createNoise(sampleSize = 10, mean = 0, sd = 1)
#' createNoise(sampleSize = 20, mean = 5, sd = 10)
createNoise <- function(sampleSize,
mean,
sd) {
noise <- rnorm(n = sampleSize,
mean = mean,
sd = sd)
}
|
/SCB/R/createNoise.R
|
no_license
|
GlinKate/scb
|
R
| false
| false
| 646
|
r
|
#' @name createNoise
#'
#' @aliases createZ
#'
#' @description Creating a normally distributed vector of \code{noise} given \code{sampleSize} (length), \code{mean} and \code{sd}.
#'
#' @param sampleSize time, a.k.a. \code{N} of the sample of \code{X}.
#' @param mean the mean.
#' @param sd standard deviation.
#'
#' @return The vector of numbers.
#'
#' @examples
#' createNoise(sampleSize = 10, mean = 0, sd = 1)
#' createNoise(sampleSize = 20, mean = 5, sd = 10)
createNoise <- function(sampleSize,
mean,
sd) {
noise <- rnorm(n = sampleSize,
mean = mean,
sd = sd)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sagemaker_operations.R
\name{sagemaker_list_data_quality_job_definitions}
\alias{sagemaker_list_data_quality_job_definitions}
\title{Lists the data quality job definitions in your account}
\usage{
sagemaker_list_data_quality_job_definitions(EndpointName, SortBy,
SortOrder, NextToken, MaxResults, NameContains, CreationTimeBefore,
CreationTimeAfter)
}
\arguments{
\item{EndpointName}{A filter that lists the data quality job definitions associated with the
specified endpoint.}
\item{SortBy}{The field to sort results by. The default is \code{CreationTime}.}
\item{SortOrder}{The sort order for results. The default is \code{Descending}.}
\item{NextToken}{If the result of the previous \code{ListDataQualityJobDefinitions} request
was truncated, the response includes a \code{NextToken}. To retrieve the next
set of transform jobs, use the token in the next request.>}
\item{MaxResults}{The maximum number of data quality monitoring job definitions to return
in the response.}
\item{NameContains}{A string in the data quality monitoring job definition name. This filter
returns only data quality monitoring job definitions whose name contains
the specified string.}
\item{CreationTimeBefore}{A filter that returns only data quality monitoring job definitions
created before the specified time.}
\item{CreationTimeAfter}{A filter that returns only data quality monitoring job definitions
created after the specified time.}
}
\description{
Lists the data quality job definitions in your account.
}
\section{Request syntax}{
\preformatted{svc$list_data_quality_job_definitions(
EndpointName = "string",
SortBy = "Name"|"CreationTime",
SortOrder = "Ascending"|"Descending",
NextToken = "string",
MaxResults = 123,
NameContains = "string",
CreationTimeBefore = as.POSIXct(
"2015-01-01"
),
CreationTimeAfter = as.POSIXct(
"2015-01-01"
)
)
}
}
\keyword{internal}
|
/cran/paws.machine.learning/man/sagemaker_list_data_quality_job_definitions.Rd
|
permissive
|
sanchezvivi/paws
|
R
| false
| true
| 1,974
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sagemaker_operations.R
\name{sagemaker_list_data_quality_job_definitions}
\alias{sagemaker_list_data_quality_job_definitions}
\title{Lists the data quality job definitions in your account}
\usage{
sagemaker_list_data_quality_job_definitions(EndpointName, SortBy,
SortOrder, NextToken, MaxResults, NameContains, CreationTimeBefore,
CreationTimeAfter)
}
\arguments{
\item{EndpointName}{A filter that lists the data quality job definitions associated with the
specified endpoint.}
\item{SortBy}{The field to sort results by. The default is \code{CreationTime}.}
\item{SortOrder}{The sort order for results. The default is \code{Descending}.}
\item{NextToken}{If the result of the previous \code{ListDataQualityJobDefinitions} request
was truncated, the response includes a \code{NextToken}. To retrieve the next
set of transform jobs, use the token in the next request.>}
\item{MaxResults}{The maximum number of data quality monitoring job definitions to return
in the response.}
\item{NameContains}{A string in the data quality monitoring job definition name. This filter
returns only data quality monitoring job definitions whose name contains
the specified string.}
\item{CreationTimeBefore}{A filter that returns only data quality monitoring job definitions
created before the specified time.}
\item{CreationTimeAfter}{A filter that returns only data quality monitoring job definitions
created after the specified time.}
}
\description{
Lists the data quality job definitions in your account.
}
\section{Request syntax}{
\preformatted{svc$list_data_quality_job_definitions(
EndpointName = "string",
SortBy = "Name"|"CreationTime",
SortOrder = "Ascending"|"Descending",
NextToken = "string",
MaxResults = 123,
NameContains = "string",
CreationTimeBefore = as.POSIXct(
"2015-01-01"
),
CreationTimeAfter = as.POSIXct(
"2015-01-01"
)
)
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/player.R
\name{player}
\alias{player}
\title{Player data}
\description{
TODO shooting data
}
\keyword{internal}
|
/man/player.Rd
|
permissive
|
stephematician/statsnbaR
|
R
| false
| true
| 191
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/player.R
\name{player}
\alias{player}
\title{Player data}
\description{
TODO shooting data
}
\keyword{internal}
|
context("Printer works")
test_that("data printer works", {
X = as.matrix(1:10)
expect_silent({ data.source = InMemoryData$new(X, "x") })
expect_silent({ data.target = InMemoryData$new() })
expect_output({ test.source = show(data.source) })
expect_output({ test.target = show(data.target) })
expect_equal(test.source, "InMemoryDataPrinter")
expect_equal(test.target, "InMemoryDataPrinter")
})
test_that("factory list printer works", {
expect_silent({ factory.list = BlearnerFactoryList$new() })
expect_output({ test.factory.list.printer = show(factory.list) })
expect_equal(test.factory.list.printer, "BlearnerFactoryListPrinter")
})
test_that("Loss printer works", {
expect_silent({ quadratic.loss = LossQuadratic$new() })
expect_silent({ absolute.loss = LossAbsolute$new() })
expect_silent({ binomial.loss = LossBinomial$new() })
expect_silent({ Rcpp::sourceCpp(code = getCustomCppExample(example = "loss", silent = TRUE)) })
myLossFun = function (true.value, prediction) NULL
myGradientFun = function (true.value, prediction) NULL
myConstantInitializerFun = function (true.value) NULL
expect_silent({ custom.cpp.loss = LossCustomCpp$new(lossFunSetter(), gradFunSetter(), constInitFunSetter()) })
expect_silent({ custom.loss = LossCustom$new(myLossFun, myGradientFun, myConstantInitializerFun) })
expect_output({ test.quadratic.printer = show(quadratic.loss) })
expect_output({ test.absolute.printer = show(absolute.loss) })
expect_output({ test.custom.printer = show(custom.loss) })
expect_output({ test.custom.cpp.printer = show(custom.cpp.loss) })
expect_output({ test.binomialprinter = show(binomial.loss) })
expect_equal(test.quadratic.printer, "LossQuadraticPrinter")
expect_equal(test.absolute.printer, "LossAbsolutePrinter")
expect_equal(test.binomialprinter, "LossBinomialPrinter")
expect_equal(test.custom.cpp.printer, "LossCustomCppPrinter")
expect_equal(test.custom.printer, "LossCustomPrinter")
})
test_that("Baselearner factory printer works", {
df = mtcars
X.hp = cbind(1, df[["hp"]])
X.hp.sp = as.matrix(df[["hp"]])
expect_silent({ data.source = InMemoryData$new(X.hp, "hp") })
expect_silent({ data.source.sp = InMemoryData$new(X.hp.sp, "hp") })
expect_silent({ data.target = InMemoryData$new() })
expect_silent({ linear.factory.hp = BaselearnerPolynomial$new(data.source, data.target,
list(degree = 1, intercept = FALSE)) })
expect_output({ linear.factory.hp.printer = show(linear.factory.hp) })
expect_equal(linear.factory.hp.printer, "BaselearnerPolynomialPrinter")
expect_silent({ quad.factory.hp = BaselearnerPolynomial$new(data.source, data.target,
list(degree = 2, intercept = FALSE)) })
expect_output({ quad.factory.hp.printer = show(quad.factory.hp) })
expect_equal(quad.factory.hp.printer, "BaselearnerPolynomialPrinter")
expect_silent({ cubic.factory.hp = BaselearnerPolynomial$new(data.source, data.target,
list(degree = 3, intercept = FALSE)) })
expect_output({ cubic.factory.hp.printer = show(cubic.factory.hp) })
expect_equal(cubic.factory.hp.printer, "BaselearnerPolynomialPrinter")
expect_silent({ poly.factory.hp = BaselearnerPolynomial$new(data.source, data.target,
list(degree = 4, intercept = FALSE)) })
expect_output({ poly.factory.hp.printer = show(poly.factory.hp) })
expect_equal(poly.factory.hp.printer, "BaselearnerPolynomialPrinter")
expect_silent({ spline.factory = BaselearnerPSpline$new(data.source.sp, data.target,
list(degree = 3, n.knots = 5, penalty = 2.5, differences = 2)) })
expect_output({ spline.printer = show(spline.factory) })
expect_equal(spline.printer, "BaselearnerPSplinePrinter")
instantiateData = function (X)
{
return(X);
}
trainFun = function (y, X) {
return(solve(t(X) %*% X) %*% t(X) %*% y)
}
predictFun = function (model, newdata) {
return(newdata %*% model)
}
extractParameter = function (model) {
return(model)
}
expect_silent({
custom.factory = BaselearnerCustom$new(data.source, data.target,
list(instantiate.fun = instantiateData, train.fun = trainFun,
predict.fun = predictFun, param.fun = extractParameter))
})
expect_output({ custom.factory.printer = show(custom.factory) })
expect_equal(custom.factory.printer, "BaselearnerCustomPrinter")
expect_output(Rcpp::sourceCpp(code = getCustomCppExample()))
expect_silent({
custom.cpp.factory = BaselearnerCustomCpp$new(data.source, data.target,
list(instantiate.ptr = dataFunSetter(), train.ptr = trainFunSetter(),
predict.ptr = predictFunSetter()))
})
expect_output({ custom.cpp.factory.printer = show(custom.cpp.factory) })
expect_equal(custom.cpp.factory.printer, "BaselearnerCustomCppPrinter")
})
test_that("Optimizer printer works", {
expect_silent({ greedy.optimizer = OptimizerCoordinateDescent$new() })
expect_output({ greedy.optimizer.printer = show(greedy.optimizer) })
expect_equal(greedy.optimizer.printer, "OptimizerCoordinateDescentPrinter")
expect_silent({ greedy.optimizer.ls = OptimizerCoordinateDescentLineSearch$new() })
expect_output({ greedy.optimizer.printer.ls = show(greedy.optimizer.ls) })
expect_equal(greedy.optimizer.printer.ls, "OptimizerCoordinateDescentLineSearchPrinter")
})
test_that("Logger(List) printer works", {
expect_silent({ loss.quadratic = LossQuadratic$new() })
expect_silent({
eval.oob.test = list(
InMemoryData$new(as.matrix(NA_real_), "hp"),
InMemoryData$new(as.matrix(NA_real_), "wt")
)
})
y = NA_real_
expect_silent({ log.iterations = LoggerIteration$new("iterations", TRUE, 500) })
expect_silent({ log.time = LoggerTime$new("time", FALSE, 500, "microseconds") })
expect_silent({ log.inbag = LoggerInbagRisk$new("inbag.risk", FALSE, loss.quadratic, 0.05) })
expect_silent({ log.oob = LoggerOobRisk$new("oob.risk", FALSE, loss.quadratic, 0.05, eval.oob.test, y) })
expect_silent({ logger.list = LoggerList$new() })
expect_output({ logger.list.printer = show(logger.list) })
expect_equal(logger.list.printer, "LoggerListPrinter")
expect_silent(logger.list$registerLogger(log.iterations))
expect_silent(logger.list$registerLogger(log.time))
expect_silent(logger.list$registerLogger(log.inbag))
expect_silent(logger.list$registerLogger(log.oob))
expect_output({ log.iterations.printer = show(log.iterations) })
expect_output({ log.time.printer = show(log.time) })
expect_output({ log.inbag = show(log.inbag) })
expect_output({ log.oob = show(log.oob) })
expect_output({ logger.list.printer = show(logger.list) })
expect_equal(log.iterations.printer, "LoggerIterationPrinter")
expect_equal(log.time.printer, "LoggerTimePrinter")
expect_equal(log.inbag, "LoggerInbagRiskPrinter")
expect_equal(log.oob, "LoggerOobRiskPrinter")
expect_equal(logger.list.printer, "LoggerListPrinter")
})
test_that("Compboost printer works", {
df = mtcars
df$hp2 = df[["hp"]]^2
X.hp = as.matrix(df[["hp"]], ncol = 1)
X.wt = as.matrix(df[["wt"]], ncol = 1)
y = df[["mpg"]]
expect_silent({ data.source.hp = InMemoryData$new(X.hp, "hp") })
expect_silent({ data.source.wt = InMemoryData$new(X.wt, "wt") })
expect_silent({ data.target.hp1 = InMemoryData$new() })
expect_silent({ data.target.hp2 = InMemoryData$new() })
expect_silent({ data.target.wt = InMemoryData$new() })
eval.oob.test = list(data.source.hp, data.source.wt)
learning.rate = 0.05
iter.max = 500
expect_silent({ linear.factory.hp = BaselearnerPolynomial$new(data.source.hp, data.target.hp1,
list(degree = 1, intercept = FALSE)) })
expect_silent({ linear.factory.wt = BaselearnerPolynomial$new(data.source.wt, data.target.wt,
list(degree = 1, intercept = FALSE)) })
expect_silent({ quadratic.factory.hp = BaselearnerPolynomial$new(data.source.hp, data.target.hp2,
list(degree = 2, intercept = FALSE)) })
expect_silent({ factory.list = BlearnerFactoryList$new() })
expect_silent(factory.list$registerFactory(linear.factory.hp))
expect_silent(factory.list$registerFactory(linear.factory.wt))
expect_silent(factory.list$registerFactory(quadratic.factory.hp))
expect_silent({ loss.quadratic = LossQuadratic$new() })
expect_silent({ optimizer = OptimizerCoordinateDescent$new() })
expect_silent({ log.iterations = LoggerIteration$new("iterations", TRUE, iter.max) })
expect_silent({ log.time.ms = LoggerTime$new("time.ms", TRUE, 50000, "microseconds") })
expect_silent({ log.time.sec = LoggerTime$new("time.sec", TRUE, 2, "seconds") })
expect_silent({ log.time.min = LoggerTime$new("time.min", TRUE, 1, "minutes") })
expect_silent({ log.inbag = LoggerInbagRisk$new("inbag.risk", FALSE, loss.quadratic, 0.01) })
expect_silent({ log.oob = LoggerOobRisk$new("oob.risk", FALSE, loss.quadratic, 0.01, eval.oob.test, y) })
expect_silent({ logger.list = LoggerList$new() })
expect_silent({ logger.list$registerLogger(log.iterations) })
expect_silent({ logger.list$registerLogger(log.time.ms) })
expect_silent({ logger.list$registerLogger(log.time.sec) })
expect_silent({ logger.list$registerLogger(log.time.min) })
expect_silent({ logger.list$registerLogger(log.inbag) })
expect_silent({ logger.list$registerLogger(log.oob) })
expect_silent({
cboost = Compboost_internal$new(
response = y,
learning_rate = learning.rate,
stop_if_all_stopper_fulfilled = FALSE,
factory_list = factory.list,
loss = loss.quadratic,
logger_list = logger.list,
optimizer = optimizer
)
})
expect_output(cboost$train(trace = 0))
expect_output({ cboost.printer = show(cboost) })
expect_equal(cboost.printer, "CompboostInternalPrinter")
})
|
/tests/testthat/test_printer.R
|
permissive
|
mllg/compboost
|
R
| false
| false
| 9,825
|
r
|
context("Printer works")
test_that("data printer works", {
X = as.matrix(1:10)
expect_silent({ data.source = InMemoryData$new(X, "x") })
expect_silent({ data.target = InMemoryData$new() })
expect_output({ test.source = show(data.source) })
expect_output({ test.target = show(data.target) })
expect_equal(test.source, "InMemoryDataPrinter")
expect_equal(test.target, "InMemoryDataPrinter")
})
test_that("factory list printer works", {
expect_silent({ factory.list = BlearnerFactoryList$new() })
expect_output({ test.factory.list.printer = show(factory.list) })
expect_equal(test.factory.list.printer, "BlearnerFactoryListPrinter")
})
test_that("Loss printer works", {
expect_silent({ quadratic.loss = LossQuadratic$new() })
expect_silent({ absolute.loss = LossAbsolute$new() })
expect_silent({ binomial.loss = LossBinomial$new() })
expect_silent({ Rcpp::sourceCpp(code = getCustomCppExample(example = "loss", silent = TRUE)) })
myLossFun = function (true.value, prediction) NULL
myGradientFun = function (true.value, prediction) NULL
myConstantInitializerFun = function (true.value) NULL
expect_silent({ custom.cpp.loss = LossCustomCpp$new(lossFunSetter(), gradFunSetter(), constInitFunSetter()) })
expect_silent({ custom.loss = LossCustom$new(myLossFun, myGradientFun, myConstantInitializerFun) })
expect_output({ test.quadratic.printer = show(quadratic.loss) })
expect_output({ test.absolute.printer = show(absolute.loss) })
expect_output({ test.custom.printer = show(custom.loss) })
expect_output({ test.custom.cpp.printer = show(custom.cpp.loss) })
expect_output({ test.binomialprinter = show(binomial.loss) })
expect_equal(test.quadratic.printer, "LossQuadraticPrinter")
expect_equal(test.absolute.printer, "LossAbsolutePrinter")
expect_equal(test.binomialprinter, "LossBinomialPrinter")
expect_equal(test.custom.cpp.printer, "LossCustomCppPrinter")
expect_equal(test.custom.printer, "LossCustomPrinter")
})
test_that("Baselearner factory printer works", {
df = mtcars
X.hp = cbind(1, df[["hp"]])
X.hp.sp = as.matrix(df[["hp"]])
expect_silent({ data.source = InMemoryData$new(X.hp, "hp") })
expect_silent({ data.source.sp = InMemoryData$new(X.hp.sp, "hp") })
expect_silent({ data.target = InMemoryData$new() })
expect_silent({ linear.factory.hp = BaselearnerPolynomial$new(data.source, data.target,
list(degree = 1, intercept = FALSE)) })
expect_output({ linear.factory.hp.printer = show(linear.factory.hp) })
expect_equal(linear.factory.hp.printer, "BaselearnerPolynomialPrinter")
expect_silent({ quad.factory.hp = BaselearnerPolynomial$new(data.source, data.target,
list(degree = 2, intercept = FALSE)) })
expect_output({ quad.factory.hp.printer = show(quad.factory.hp) })
expect_equal(quad.factory.hp.printer, "BaselearnerPolynomialPrinter")
expect_silent({ cubic.factory.hp = BaselearnerPolynomial$new(data.source, data.target,
list(degree = 3, intercept = FALSE)) })
expect_output({ cubic.factory.hp.printer = show(cubic.factory.hp) })
expect_equal(cubic.factory.hp.printer, "BaselearnerPolynomialPrinter")
expect_silent({ poly.factory.hp = BaselearnerPolynomial$new(data.source, data.target,
list(degree = 4, intercept = FALSE)) })
expect_output({ poly.factory.hp.printer = show(poly.factory.hp) })
expect_equal(poly.factory.hp.printer, "BaselearnerPolynomialPrinter")
expect_silent({ spline.factory = BaselearnerPSpline$new(data.source.sp, data.target,
list(degree = 3, n.knots = 5, penalty = 2.5, differences = 2)) })
expect_output({ spline.printer = show(spline.factory) })
expect_equal(spline.printer, "BaselearnerPSplinePrinter")
instantiateData = function (X)
{
return(X);
}
trainFun = function (y, X) {
return(solve(t(X) %*% X) %*% t(X) %*% y)
}
predictFun = function (model, newdata) {
return(newdata %*% model)
}
extractParameter = function (model) {
return(model)
}
expect_silent({
custom.factory = BaselearnerCustom$new(data.source, data.target,
list(instantiate.fun = instantiateData, train.fun = trainFun,
predict.fun = predictFun, param.fun = extractParameter))
})
expect_output({ custom.factory.printer = show(custom.factory) })
expect_equal(custom.factory.printer, "BaselearnerCustomPrinter")
expect_output(Rcpp::sourceCpp(code = getCustomCppExample()))
expect_silent({
custom.cpp.factory = BaselearnerCustomCpp$new(data.source, data.target,
list(instantiate.ptr = dataFunSetter(), train.ptr = trainFunSetter(),
predict.ptr = predictFunSetter()))
})
expect_output({ custom.cpp.factory.printer = show(custom.cpp.factory) })
expect_equal(custom.cpp.factory.printer, "BaselearnerCustomCppPrinter")
})
test_that("Optimizer printer works", {
expect_silent({ greedy.optimizer = OptimizerCoordinateDescent$new() })
expect_output({ greedy.optimizer.printer = show(greedy.optimizer) })
expect_equal(greedy.optimizer.printer, "OptimizerCoordinateDescentPrinter")
expect_silent({ greedy.optimizer.ls = OptimizerCoordinateDescentLineSearch$new() })
expect_output({ greedy.optimizer.printer.ls = show(greedy.optimizer.ls) })
expect_equal(greedy.optimizer.printer.ls, "OptimizerCoordinateDescentLineSearchPrinter")
})
test_that("Logger(List) printer works", {
expect_silent({ loss.quadratic = LossQuadratic$new() })
expect_silent({
eval.oob.test = list(
InMemoryData$new(as.matrix(NA_real_), "hp"),
InMemoryData$new(as.matrix(NA_real_), "wt")
)
})
y = NA_real_
expect_silent({ log.iterations = LoggerIteration$new("iterations", TRUE, 500) })
expect_silent({ log.time = LoggerTime$new("time", FALSE, 500, "microseconds") })
expect_silent({ log.inbag = LoggerInbagRisk$new("inbag.risk", FALSE, loss.quadratic, 0.05) })
expect_silent({ log.oob = LoggerOobRisk$new("oob.risk", FALSE, loss.quadratic, 0.05, eval.oob.test, y) })
expect_silent({ logger.list = LoggerList$new() })
expect_output({ logger.list.printer = show(logger.list) })
expect_equal(logger.list.printer, "LoggerListPrinter")
expect_silent(logger.list$registerLogger(log.iterations))
expect_silent(logger.list$registerLogger(log.time))
expect_silent(logger.list$registerLogger(log.inbag))
expect_silent(logger.list$registerLogger(log.oob))
expect_output({ log.iterations.printer = show(log.iterations) })
expect_output({ log.time.printer = show(log.time) })
expect_output({ log.inbag = show(log.inbag) })
expect_output({ log.oob = show(log.oob) })
expect_output({ logger.list.printer = show(logger.list) })
expect_equal(log.iterations.printer, "LoggerIterationPrinter")
expect_equal(log.time.printer, "LoggerTimePrinter")
expect_equal(log.inbag, "LoggerInbagRiskPrinter")
expect_equal(log.oob, "LoggerOobRiskPrinter")
expect_equal(logger.list.printer, "LoggerListPrinter")
})
test_that("Compboost printer works", {
df = mtcars
df$hp2 = df[["hp"]]^2
X.hp = as.matrix(df[["hp"]], ncol = 1)
X.wt = as.matrix(df[["wt"]], ncol = 1)
y = df[["mpg"]]
expect_silent({ data.source.hp = InMemoryData$new(X.hp, "hp") })
expect_silent({ data.source.wt = InMemoryData$new(X.wt, "wt") })
expect_silent({ data.target.hp1 = InMemoryData$new() })
expect_silent({ data.target.hp2 = InMemoryData$new() })
expect_silent({ data.target.wt = InMemoryData$new() })
eval.oob.test = list(data.source.hp, data.source.wt)
learning.rate = 0.05
iter.max = 500
expect_silent({ linear.factory.hp = BaselearnerPolynomial$new(data.source.hp, data.target.hp1,
list(degree = 1, intercept = FALSE)) })
expect_silent({ linear.factory.wt = BaselearnerPolynomial$new(data.source.wt, data.target.wt,
list(degree = 1, intercept = FALSE)) })
expect_silent({ quadratic.factory.hp = BaselearnerPolynomial$new(data.source.hp, data.target.hp2,
list(degree = 2, intercept = FALSE)) })
expect_silent({ factory.list = BlearnerFactoryList$new() })
expect_silent(factory.list$registerFactory(linear.factory.hp))
expect_silent(factory.list$registerFactory(linear.factory.wt))
expect_silent(factory.list$registerFactory(quadratic.factory.hp))
expect_silent({ loss.quadratic = LossQuadratic$new() })
expect_silent({ optimizer = OptimizerCoordinateDescent$new() })
expect_silent({ log.iterations = LoggerIteration$new("iterations", TRUE, iter.max) })
expect_silent({ log.time.ms = LoggerTime$new("time.ms", TRUE, 50000, "microseconds") })
expect_silent({ log.time.sec = LoggerTime$new("time.sec", TRUE, 2, "seconds") })
expect_silent({ log.time.min = LoggerTime$new("time.min", TRUE, 1, "minutes") })
expect_silent({ log.inbag = LoggerInbagRisk$new("inbag.risk", FALSE, loss.quadratic, 0.01) })
expect_silent({ log.oob = LoggerOobRisk$new("oob.risk", FALSE, loss.quadratic, 0.01, eval.oob.test, y) })
expect_silent({ logger.list = LoggerList$new() })
expect_silent({ logger.list$registerLogger(log.iterations) })
expect_silent({ logger.list$registerLogger(log.time.ms) })
expect_silent({ logger.list$registerLogger(log.time.sec) })
expect_silent({ logger.list$registerLogger(log.time.min) })
expect_silent({ logger.list$registerLogger(log.inbag) })
expect_silent({ logger.list$registerLogger(log.oob) })
expect_silent({
cboost = Compboost_internal$new(
response = y,
learning_rate = learning.rate,
stop_if_all_stopper_fulfilled = FALSE,
factory_list = factory.list,
loss = loss.quadratic,
logger_list = logger.list,
optimizer = optimizer
)
})
expect_output(cboost$train(trace = 0))
expect_output({ cboost.printer = show(cboost) })
expect_equal(cboost.printer, "CompboostInternalPrinter")
})
|
CAP<-function(x, transform=NULL) {
cap<-function(x) {
y = as.data.frame(x)
if(ncol(y)>1) {
for(i in (ncol(y)-1):1) {
y[,i] = y[,i]+y[,i+1]
}
}
return(y)
}
if(!inherits(x,"stratifiedvegdata")) stop("Input should be of class 'stratifiedvegdata'")
Y = lapply(x, FUN=cap)
if(!is.null(transform)) Y = lapply(Y, FUN=transform)
class(Y)<-c("list","CAP")
return(Y)
}
|
/vegclust/R/CAP.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 408
|
r
|
CAP<-function(x, transform=NULL) {
cap<-function(x) {
y = as.data.frame(x)
if(ncol(y)>1) {
for(i in (ncol(y)-1):1) {
y[,i] = y[,i]+y[,i+1]
}
}
return(y)
}
if(!inherits(x,"stratifiedvegdata")) stop("Input should be of class 'stratifiedvegdata'")
Y = lapply(x, FUN=cap)
if(!is.null(transform)) Y = lapply(Y, FUN=transform)
class(Y)<-c("list","CAP")
return(Y)
}
|
#-------------------------------------------------------------------------------
# Copyright (c) 2012 University of Illinois, NCSA.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the
# University of Illinois/NCSA Open Source License
# which accompanies this distribution, and is available at
# http://opensource.ncsa.illinois.edu/license.html
#-------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------#
##' Convert BioCro output to netCDF
##'
##' Converts all output contained in a folder to netCDF.
##' Modified from on model2netcdf.sipnet and model2netcdf.ED2 by
##' Shawn Serbin and Mike Dietze
##' @name model2netcdf.BIOCRO
##' @title Function to convert biocro model output to standard netCDF format
##' @param outdir Location of biocro model output
##' @param run.id Name of biocro model output file.
##' @export
##' @author David LeBauer, Deepak Jaiswal
model2netcdf.BIOCRO <- function(outdir) {
### Read in model output in biocro format
outfile <- file.path(outdir, "result.csv")
result <- read.csv(outfile)
t <- ncdim_def("time", "seconds", 3600)
var <- list()
var[["Stem"]] <- ncvar_def("Stem", "Mg ha-1", t, -999, "Stem Biomass")
var[["Leaf"]] <- ncvar_def("Leaf", "Mg ha-1", t, -999,
"Leaf Biomass")
var[["Root"]] <- ncvar_def("Root", "Mg ha-1", t, -999,
"Root Biomass")
## var[["Rhizome"]] <- ncvar_def("Rhizome", "Mg ha-1", t, -999,
## "Rhizome Biomass")
## var[["LAI"]] <- ncvar_def("LAI", "m2/m2", t, -999,
## "Leaf Area Index")
## var[["Transpiration"]] <- ncvar_def("Assim", "?", t, -999,
## "Canopy Transpiration")
##******************** Declare netCDF variables ********************#
start_year <- format(as.Date(settings$run$start.date), "%Y")
nc.outfile <- file.path(outdir, paste0(start_year, ".nc"))
nc <- nc_create(filename = nc.outfile, var)
## Output netCDF data
for(name in names(var)) {
ncatt_put(nc, var[[name]], name, result[[name]])
}
nc_close(nc)
}
####################################################################################################
### EOF. End of R script file.
####################################################################################################
|
/models/biocro/R/model2netcdf.BIOCRO.R
|
permissive
|
jingxia/pecan
|
R
| false
| false
| 2,557
|
r
|
#-------------------------------------------------------------------------------
# Copyright (c) 2012 University of Illinois, NCSA.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the
# University of Illinois/NCSA Open Source License
# which accompanies this distribution, and is available at
# http://opensource.ncsa.illinois.edu/license.html
#-------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------#
##' Convert BioCro output to netCDF
##'
##' Converts all output contained in a folder to netCDF.
##' Modified from on model2netcdf.sipnet and model2netcdf.ED2 by
##' Shawn Serbin and Mike Dietze
##' @name model2netcdf.BIOCRO
##' @title Function to convert biocro model output to standard netCDF format
##' @param outdir Location of biocro model output
##' @param run.id Name of biocro model output file.
##' @export
##' @author David LeBauer, Deepak Jaiswal
model2netcdf.BIOCRO <- function(outdir) {
### Read in model output in biocro format
outfile <- file.path(outdir, "result.csv")
result <- read.csv(outfile)
t <- ncdim_def("time", "seconds", 3600)
var <- list()
var[["Stem"]] <- ncvar_def("Stem", "Mg ha-1", t, -999, "Stem Biomass")
var[["Leaf"]] <- ncvar_def("Leaf", "Mg ha-1", t, -999,
"Leaf Biomass")
var[["Root"]] <- ncvar_def("Root", "Mg ha-1", t, -999,
"Root Biomass")
## var[["Rhizome"]] <- ncvar_def("Rhizome", "Mg ha-1", t, -999,
## "Rhizome Biomass")
## var[["LAI"]] <- ncvar_def("LAI", "m2/m2", t, -999,
## "Leaf Area Index")
## var[["Transpiration"]] <- ncvar_def("Assim", "?", t, -999,
## "Canopy Transpiration")
##******************** Declare netCDF variables ********************#
start_year <- format(as.Date(settings$run$start.date), "%Y")
nc.outfile <- file.path(outdir, paste0(start_year, ".nc"))
nc <- nc_create(filename = nc.outfile, var)
## Output netCDF data
for(name in names(var)) {
ncatt_put(nc, var[[name]], name, result[[name]])
}
nc_close(nc)
}
####################################################################################################
### EOF. End of R script file.
####################################################################################################
|
# required packages
library("GenomicFeatures")
library('Gviz')
library('TxDb.Hsapiens.UCSC.hg19.knownGene')
library('annmap')
library("biomaRt")
# get transcript data
annmapConnect(use.webservice = T, name = 'homo_sapiens.74') #hg19
# get BioMart hg19 build
ensembl54=useMart(biomart="ENSEMBL_MART_ENSEMBL", host="grch37.ensembl.org", path="/biomart/martservice", dataset="hsapiens_gene_ensembl")
# get info for a single gene by hgnc symbol
gene.search <- getBM(attributes = c("hgnc_symbol","entrezgene", "chromosome_name", "start_position", "end_position"),
filters = c("hgnc_symbol"), values = 'FTO', mart = ensembl54)
gene.search$chromosome_name <- paste0('chr', gene.search$chromosome_name)
# bam file to search
bam.dir <- '/media/disk1part1/PostDoc/Neven/bam'
bam.list <- list.files(bam.dir, pattern = '.bam$', full.names = T, include.dirs = T)
bam.file <- bam.list[grep('DG1051', bam.list)]
# asign bam file and prepare coverage and alignment tracks
alTrack=AlignmentsTrack(bam.file, isPaired=F) #Read bam file
# create gtrack
gtrack <- GenomeAxisTrack()
# create dtrack
dtrack <- DataTrack(range=bam.file, genome="hg19", name="Coverage", chromosome=gene.search$chromosome_name,
type = "histogram", col.histogram= "#377EB8", fill="#377EB8")
# create ideogram track
itrack <- IdeogramTrack(genome="hg19", chromosome=gene.search$chromosome_name) #requires internet connection
# create transcript track
grtrack <- GeneRegionTrack(TxDb.Hsapiens.UCSC.hg19.knownGene, genome = "hg19", chromosome=gene.search$chromosome_name,
name="TxDb.Hsapiens.UCSC.hg19")
# get transcript info for selected gene
gene = symbolToGene(gene.search$hgnc_symbol)
transcript.out <- geneToTranscript(gene)
tran.start <- transcript.out@ranges@start[1]
tran.end <- data.frame(transcript.out@ranges[length(transcript.out)])[[2]]
# create plot
plotTracks(list(itrack, gtrack, grtrack, alTrack), from = tran.start, to = tran.end)
|
/www/bam_testing.R
|
permissive
|
mikblack/WES_ShinyDiscover
|
R
| false
| false
| 1,968
|
r
|
# required packages
library("GenomicFeatures")
library('Gviz')
library('TxDb.Hsapiens.UCSC.hg19.knownGene')
library('annmap')
library("biomaRt")
# get transcript data
annmapConnect(use.webservice = T, name = 'homo_sapiens.74') #hg19
# get BioMart hg19 build
ensembl54=useMart(biomart="ENSEMBL_MART_ENSEMBL", host="grch37.ensembl.org", path="/biomart/martservice", dataset="hsapiens_gene_ensembl")
# get info for a single gene by hgnc symbol
gene.search <- getBM(attributes = c("hgnc_symbol","entrezgene", "chromosome_name", "start_position", "end_position"),
filters = c("hgnc_symbol"), values = 'FTO', mart = ensembl54)
gene.search$chromosome_name <- paste0('chr', gene.search$chromosome_name)
# bam file to search
bam.dir <- '/media/disk1part1/PostDoc/Neven/bam'
bam.list <- list.files(bam.dir, pattern = '.bam$', full.names = T, include.dirs = T)
bam.file <- bam.list[grep('DG1051', bam.list)]
# asign bam file and prepare coverage and alignment tracks
alTrack=AlignmentsTrack(bam.file, isPaired=F) #Read bam file
# create gtrack
gtrack <- GenomeAxisTrack()
# create dtrack
dtrack <- DataTrack(range=bam.file, genome="hg19", name="Coverage", chromosome=gene.search$chromosome_name,
type = "histogram", col.histogram= "#377EB8", fill="#377EB8")
# create ideogram track
itrack <- IdeogramTrack(genome="hg19", chromosome=gene.search$chromosome_name) #requires internet connection
# create transcript track
grtrack <- GeneRegionTrack(TxDb.Hsapiens.UCSC.hg19.knownGene, genome = "hg19", chromosome=gene.search$chromosome_name,
name="TxDb.Hsapiens.UCSC.hg19")
# get transcript info for selected gene
gene = symbolToGene(gene.search$hgnc_symbol)
transcript.out <- geneToTranscript(gene)
tran.start <- transcript.out@ranges@start[1]
tran.end <- data.frame(transcript.out@ranges[length(transcript.out)])[[2]]
# create plot
plotTracks(list(itrack, gtrack, grtrack, alTrack), from = tran.start, to = tran.end)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#' @include arrow-package.R
#' @title FileSystem entry info
#' @usage NULL
#' @format NULL
#'
#' @section Methods:
#'
#' - `base_name()` : The file base name (component after the last directory
#' separator).
#' - `extension()` : The file extension
#'
#' @section Active bindings:
#'
#' - `$type`: The file type
#' - `$path`: The full file path in the filesystem
#' - `$size`: The size in bytes, if available. Only regular files are
#' guaranteed to have a size.
#' - `$mtime`: The time of last modification, if available.
#'
#' @rdname FileInfo
#' @export
FileInfo <- R6Class("FileInfo",
inherit = ArrowObject,
public = list(
base_name = function() fs___FileInfo__base_name(self),
extension = function() fs___FileInfo__extension(self)
),
active = list(
type = function(type) {
if (missing(type)) {
fs___FileInfo__type(self)
} else {
fs___FileInfo__set_type(self, type)
}
},
path = function(path) {
if (missing(path)) {
fs___FileInfo__path(self)
} else {
invisible(fs___FileInfo__set_path(self))
}
},
size = function(size) {
if (missing(size)) {
fs___FileInfo__size(self)
} else {
invisible(fs___FileInfo__set_size(self, size))
}
},
mtime = function(time) {
if (missing(time)) {
fs___FileInfo__mtime(self)
} else {
if (!inherits(time, "POSIXct") && length(time) == 1L) {
abort("invalid time")
}
invisible(fs___FileInfo__set_mtime(self, time))
}
}
)
)
#' @title file selector
#' @format NULL
#'
#' @section Factory:
#'
#' The `$create()` factory method instantiates a `FileSelector` given the 3 fields
#' described below.
#'
#' @section Fields:
#'
#' - `base_dir`: The directory in which to select files. If the path exists but
#' doesn't point to a directory, this should be an error.
#' - `allow_not_found`: The behavior if `base_dir` doesn't exist in the
#' filesystem. If `FALSE`, an error is returned. If `TRUE`, an empty
#' selection is returned
#' - `recursive`: Whether to recurse into subdirectories.
#'
#' @rdname FileSelector
#' @export
FileSelector <- R6Class("FileSelector",
inherit = ArrowObject,
active = list(
base_dir = function() fs___FileSelector__base_dir(self),
allow_not_found = function() fs___FileSelector__allow_not_found(self),
recursive = function() fs___FileSelector__recursive(self)
)
)
FileSelector$create <- function(base_dir, allow_not_found = FALSE, recursive = FALSE) {
shared_ptr(
FileSelector,
fs___FileSelector__create(clean_path_rel(base_dir), allow_not_found, recursive)
)
}
#' @title FileSystem classes
#' @description `FileSystem` is an abstract file system API,
#' `LocalFileSystem` is an implementation accessing files
#' on the local machine. `SubTreeFileSystem` is an implementation that delegates
#' to another implementation after prepending a fixed base path
#'
#' @section Factory:
#'
#' The `$create()` factory methods instantiate the `FileSystem` object and
#' take the following arguments, depending on the subclass:
#'
#' - no argument is needed for instantiating a `LocalFileSystem`
#' - `base_path` and `base_fs` for instantiating a `SubTreeFileSystem`
#'
#' @section Methods:
#'
#' - `$GetFileInfo(x)`: `x` may be a [FileSelector][FileSelector] or a character
#' vector of paths. Returns a list of [FileInfo][FileInfo]
#' - `$CreateDir(path, recursive = TRUE)`: Create a directory and subdirectories.
#' - `$DeleteDir(path)`: Delete a directory and its contents, recursively.
#' - `$DeleteDirContents(path)`: Delete a directory's contents, recursively.
#' Like `$DeleteDir()`,
#' but doesn't delete the directory itself. Passing an empty path (`""`) will
#' wipe the entire filesystem tree.
#' - `$DeleteFile(path)` : Delete a file.
#' - `$DeleteFiles(paths)` : Delete many files. The default implementation
#' issues individual delete operations in sequence.
#' - `$Move(src, dest)`: Move / rename a file or directory. If the destination
#' exists:
#' if it is a non-empty directory, an error is returned
#' otherwise, if it has the same type as the source, it is replaced
#' otherwise, behavior is unspecified (implementation-dependent).
#' - `$CopyFile(src, dest)`: Copy a file. If the destination exists and is a
#' directory, an error is returned. Otherwise, it is replaced.
#' - `$OpenInputStream(path)`: Open an [input stream][InputStream] for
#' sequential reading.
#' - `$OpenInputFile(path)`: Open an [input file][RandomAccessFile] for random
#' access reading.
#' - `$OpenOutputStream(path)`: Open an [output stream][OutputStream] for
#' sequential writing.
#' - `$OpenAppendStream(path)`: Open an [output stream][OutputStream] for
#' appending.
#'
#' @usage NULL
#' @format NULL
#' @docType class
#'
#' @rdname FileSystem
#' @name FileSystem
#' @export
FileSystem <- R6Class("FileSystem", inherit = ArrowObject,
public = list(
..dispatch = function() {
type_name <- self$type_name
if (type_name == "local") {
shared_ptr(LocalFileSystem, self$pointer())
} else if (type_name == "s3") {
shared_ptr(S3FileSystem, self$pointer())
} else if (type_name == "subtree") {
shared_ptr(SubTreeFileSystem, self$pointer())
} else {
self
}
},
GetFileInfo = function(x) {
if (inherits(x, "FileSelector")) {
map(
fs___FileSystem__GetTargetInfos_FileSelector(self, x),
shared_ptr,
class = FileInfo
)
} else if (is.character(x)){
map(
fs___FileSystem__GetTargetInfos_Paths(self, clean_path_rel(x)),
shared_ptr,
class = FileInfo
)
} else {
abort("incompatible type for FileSystem$GetFileInfo()")
}
},
CreateDir = function(path, recursive = TRUE) {
fs___FileSystem__CreateDir(self, clean_path_rel(path), isTRUE(recursive))
},
DeleteDir = function(path) {
fs___FileSystem__DeleteDir(self, clean_path_rel(path))
},
DeleteDirContents = function(path) {
fs___FileSystem__DeleteDirContents(self, clean_path_rel(path))
},
DeleteFile = function(path) {
fs___FileSystem__DeleteFile(self, clean_path_rel(path))
},
DeleteFiles = function(paths) {
fs___FileSystem__DeleteFiles(self, clean_path_rel(paths))
},
Move = function(src, dest) {
fs___FileSystem__Move(self, clean_path_rel(src), clean_path_rel(dest))
},
CopyFile = function(src, dest) {
fs___FileSystem__CopyFile(self, clean_path_rel(src), clean_path_rel(dest))
},
OpenInputStream = function(path) {
shared_ptr(InputStream, fs___FileSystem__OpenInputStream(self, clean_path_rel(path)))
},
OpenInputFile = function(path) {
shared_ptr(InputStream, fs___FileSystem__OpenInputFile(self, clean_path_rel(path)))
},
OpenOutputStream = function(path) {
shared_ptr(OutputStream, fs___FileSystem__OpenOutputStream(self, clean_path_rel(path)))
},
OpenAppendStream = function(path) {
shared_ptr(OutputStream, fs___FileSystem__OpenAppendStream(self, clean_path_rel(path)))
}
),
active = list(
type_name = function() fs___FileSystem__type_name(self)
)
)
FileSystem$from_uri <- function(uri) {
out <- fs___FileSystemFromUri(uri)
out$fs <- shared_ptr(FileSystem, out$fs)$..dispatch()
out
}
#' @usage NULL
#' @format NULL
#' @rdname FileSystem
#' @export
LocalFileSystem <- R6Class("LocalFileSystem", inherit = FileSystem)
LocalFileSystem$create <- function() {
shared_ptr(LocalFileSystem, fs___LocalFileSystem__create())
}
#' @usage NULL
#' @format NULL
#' @rdname FileSystem
#' @export
S3FileSystem <- R6Class("S3FileSystem", inherit = FileSystem)
S3FileSystem$create <- function() {
fs___EnsureS3Initialized()
shared_ptr(S3FileSystem, fs___S3FileSystem__create())
}
arrow_with_s3 <- function() {
.Call(`_s3_available`)
}
#' @usage NULL
#' @format NULL
#' @rdname FileSystem
#' @export
SubTreeFileSystem <- R6Class("SubTreeFileSystem", inherit = FileSystem)
SubTreeFileSystem$create <- function(base_path, base_fs) {
xp <- fs___SubTreeFileSystem__create(clean_path_rel(base_path), base_fs)
shared_ptr(SubTreeFileSystem, xp)
}
clean_path_abs <- function(path) {
# Make sure we have a valid, absolute, forward-slashed path for passing to Arrow
normalizePath(path, winslash = "/", mustWork = FALSE)
}
clean_path_rel <- function(path) {
# Make sure all path separators are "/", not "\" as on Windows
path_sep <- ifelse(tolower(Sys.info()[["sysname"]]) == "windows", "\\\\", "/")
gsub(path_sep, "/", path)
}
|
/r/R/filesystem.R
|
permissive
|
haaami01/arrow
|
R
| false
| false
| 9,495
|
r
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#' @include arrow-package.R
#' @title FileSystem entry info
#' @usage NULL
#' @format NULL
#'
#' @section Methods:
#'
#' - `base_name()` : The file base name (component after the last directory
#' separator).
#' - `extension()` : The file extension
#'
#' @section Active bindings:
#'
#' - `$type`: The file type
#' - `$path`: The full file path in the filesystem
#' - `$size`: The size in bytes, if available. Only regular files are
#' guaranteed to have a size.
#' - `$mtime`: The time of last modification, if available.
#'
#' @rdname FileInfo
#' @export
FileInfo <- R6Class("FileInfo",
inherit = ArrowObject,
public = list(
base_name = function() fs___FileInfo__base_name(self),
extension = function() fs___FileInfo__extension(self)
),
active = list(
type = function(type) {
if (missing(type)) {
fs___FileInfo__type(self)
} else {
fs___FileInfo__set_type(self, type)
}
},
path = function(path) {
if (missing(path)) {
fs___FileInfo__path(self)
} else {
invisible(fs___FileInfo__set_path(self))
}
},
size = function(size) {
if (missing(size)) {
fs___FileInfo__size(self)
} else {
invisible(fs___FileInfo__set_size(self, size))
}
},
mtime = function(time) {
if (missing(time)) {
fs___FileInfo__mtime(self)
} else {
if (!inherits(time, "POSIXct") && length(time) == 1L) {
abort("invalid time")
}
invisible(fs___FileInfo__set_mtime(self, time))
}
}
)
)
#' @title file selector
#' @format NULL
#'
#' @section Factory:
#'
#' The `$create()` factory method instantiates a `FileSelector` given the 3 fields
#' described below.
#'
#' @section Fields:
#'
#' - `base_dir`: The directory in which to select files. If the path exists but
#' doesn't point to a directory, this should be an error.
#' - `allow_not_found`: The behavior if `base_dir` doesn't exist in the
#' filesystem. If `FALSE`, an error is returned. If `TRUE`, an empty
#' selection is returned
#' - `recursive`: Whether to recurse into subdirectories.
#'
#' @rdname FileSelector
#' @export
FileSelector <- R6Class("FileSelector",
inherit = ArrowObject,
active = list(
base_dir = function() fs___FileSelector__base_dir(self),
allow_not_found = function() fs___FileSelector__allow_not_found(self),
recursive = function() fs___FileSelector__recursive(self)
)
)
FileSelector$create <- function(base_dir, allow_not_found = FALSE, recursive = FALSE) {
shared_ptr(
FileSelector,
fs___FileSelector__create(clean_path_rel(base_dir), allow_not_found, recursive)
)
}
#' @title FileSystem classes
#' @description `FileSystem` is an abstract file system API,
#' `LocalFileSystem` is an implementation accessing files
#' on the local machine. `SubTreeFileSystem` is an implementation that delegates
#' to another implementation after prepending a fixed base path
#'
#' @section Factory:
#'
#' The `$create()` factory methods instantiate the `FileSystem` object and
#' take the following arguments, depending on the subclass:
#'
#' - no argument is needed for instantiating a `LocalFileSystem`
#' - `base_path` and `base_fs` for instantiating a `SubTreeFileSystem`
#'
#' @section Methods:
#'
#' - `$GetFileInfo(x)`: `x` may be a [FileSelector][FileSelector] or a character
#' vector of paths. Returns a list of [FileInfo][FileInfo]
#' - `$CreateDir(path, recursive = TRUE)`: Create a directory and subdirectories.
#' - `$DeleteDir(path)`: Delete a directory and its contents, recursively.
#' - `$DeleteDirContents(path)`: Delete a directory's contents, recursively.
#' Like `$DeleteDir()`,
#' but doesn't delete the directory itself. Passing an empty path (`""`) will
#' wipe the entire filesystem tree.
#' - `$DeleteFile(path)` : Delete a file.
#' - `$DeleteFiles(paths)` : Delete many files. The default implementation
#' issues individual delete operations in sequence.
#' - `$Move(src, dest)`: Move / rename a file or directory. If the destination
#' exists:
#' if it is a non-empty directory, an error is returned
#' otherwise, if it has the same type as the source, it is replaced
#' otherwise, behavior is unspecified (implementation-dependent).
#' - `$CopyFile(src, dest)`: Copy a file. If the destination exists and is a
#' directory, an error is returned. Otherwise, it is replaced.
#' - `$OpenInputStream(path)`: Open an [input stream][InputStream] for
#' sequential reading.
#' - `$OpenInputFile(path)`: Open an [input file][RandomAccessFile] for random
#' access reading.
#' - `$OpenOutputStream(path)`: Open an [output stream][OutputStream] for
#' sequential writing.
#' - `$OpenAppendStream(path)`: Open an [output stream][OutputStream] for
#' appending.
#'
#' @usage NULL
#' @format NULL
#' @docType class
#'
#' @rdname FileSystem
#' @name FileSystem
#' @export
FileSystem <- R6Class("FileSystem", inherit = ArrowObject,
public = list(
..dispatch = function() {
type_name <- self$type_name
if (type_name == "local") {
shared_ptr(LocalFileSystem, self$pointer())
} else if (type_name == "s3") {
shared_ptr(S3FileSystem, self$pointer())
} else if (type_name == "subtree") {
shared_ptr(SubTreeFileSystem, self$pointer())
} else {
self
}
},
GetFileInfo = function(x) {
if (inherits(x, "FileSelector")) {
map(
fs___FileSystem__GetTargetInfos_FileSelector(self, x),
shared_ptr,
class = FileInfo
)
} else if (is.character(x)){
map(
fs___FileSystem__GetTargetInfos_Paths(self, clean_path_rel(x)),
shared_ptr,
class = FileInfo
)
} else {
abort("incompatible type for FileSystem$GetFileInfo()")
}
},
CreateDir = function(path, recursive = TRUE) {
fs___FileSystem__CreateDir(self, clean_path_rel(path), isTRUE(recursive))
},
DeleteDir = function(path) {
fs___FileSystem__DeleteDir(self, clean_path_rel(path))
},
DeleteDirContents = function(path) {
fs___FileSystem__DeleteDirContents(self, clean_path_rel(path))
},
DeleteFile = function(path) {
fs___FileSystem__DeleteFile(self, clean_path_rel(path))
},
DeleteFiles = function(paths) {
fs___FileSystem__DeleteFiles(self, clean_path_rel(paths))
},
Move = function(src, dest) {
fs___FileSystem__Move(self, clean_path_rel(src), clean_path_rel(dest))
},
CopyFile = function(src, dest) {
fs___FileSystem__CopyFile(self, clean_path_rel(src), clean_path_rel(dest))
},
OpenInputStream = function(path) {
shared_ptr(InputStream, fs___FileSystem__OpenInputStream(self, clean_path_rel(path)))
},
OpenInputFile = function(path) {
shared_ptr(InputStream, fs___FileSystem__OpenInputFile(self, clean_path_rel(path)))
},
OpenOutputStream = function(path) {
shared_ptr(OutputStream, fs___FileSystem__OpenOutputStream(self, clean_path_rel(path)))
},
OpenAppendStream = function(path) {
shared_ptr(OutputStream, fs___FileSystem__OpenAppendStream(self, clean_path_rel(path)))
}
),
active = list(
type_name = function() fs___FileSystem__type_name(self)
)
)
FileSystem$from_uri <- function(uri) {
out <- fs___FileSystemFromUri(uri)
out$fs <- shared_ptr(FileSystem, out$fs)$..dispatch()
out
}
#' @usage NULL
#' @format NULL
#' @rdname FileSystem
#' @export
LocalFileSystem <- R6Class("LocalFileSystem", inherit = FileSystem)
LocalFileSystem$create <- function() {
shared_ptr(LocalFileSystem, fs___LocalFileSystem__create())
}
#' @usage NULL
#' @format NULL
#' @rdname FileSystem
#' @export
S3FileSystem <- R6Class("S3FileSystem", inherit = FileSystem)
S3FileSystem$create <- function() {
fs___EnsureS3Initialized()
shared_ptr(S3FileSystem, fs___S3FileSystem__create())
}
arrow_with_s3 <- function() {
.Call(`_s3_available`)
}
#' @usage NULL
#' @format NULL
#' @rdname FileSystem
#' @export
SubTreeFileSystem <- R6Class("SubTreeFileSystem", inherit = FileSystem)
SubTreeFileSystem$create <- function(base_path, base_fs) {
xp <- fs___SubTreeFileSystem__create(clean_path_rel(base_path), base_fs)
shared_ptr(SubTreeFileSystem, xp)
}
clean_path_abs <- function(path) {
# Make sure we have a valid, absolute, forward-slashed path for passing to Arrow
normalizePath(path, winslash = "/", mustWork = FALSE)
}
clean_path_rel <- function(path) {
# Make sure all path separators are "/", not "\" as on Windows
path_sep <- ifelse(tolower(Sys.info()[["sysname"]]) == "windows", "\\\\", "/")
gsub(path_sep, "/", path)
}
|
# Partition Functions
library(scales)
library(RColorBrewer)
col_list <- rev(brewer.pal(n = 5, name = "RdBu"))
sorted_equal <- function(v1, v2){
return(identical(sort(as.numeric(v1)), sort(as.numeric(v2))))
}
# A function to check whether x is inside vector v.
# we will use this with apply to check whether
index_in <- function(x, v){
return(x %in% v)
}
# Configuration of the parttion
partition_config <- function(gamma, A_block){
tmp <- sapply(gamma, FUN = length)
singletons <- which(tmp == 1)
non_singletons <- which(tmp != 1)
K <- length(gamma) # number of clusters
n <- nrow(A_block) # number of block groups
n_singletons <- length(singletons)
n_non_singletons <- length(non_singletons)
Z <- matrix(0, nrow = n, ncol = n) # pairwise co-allocation matrix
for(k in 1:K){
Z[gamma[[k]], gamma[[k]]] <- 1
}
if(K > 1){ # identify which clusters are spatially adjacent
A_cluster <- matrix(0, nrow = K, ncol = K)
for(k in 1:(K-1)){
for(kk in (k+1):K){
A_cluster[k,kk] <- 1*any(A_block[gamma[[k]], gamma[[kk]]] == 1)
A_cluster[kk,k] <- A_cluster[k,kk]
}
}
} else{
A_cluster <- NULL
}
return(list(K = K, config = tmp,
singletons = singletons, non_singletons = non_singletons,
A_cluster = A_cluster, Z = Z))
}
# Ensures that the clusters are spatially connected
partition_modify <- function(gamma, A_block){
gamma_new <- gamma
K <- length(gamma)
count <- K + 1
to_be_removed <- c()
for(k in 1:K){
if(length(gamma[[k]])>1){
cl <- gamma[[k]]
g <- graph_from_adjacency_matrix(A_block[cl,cl]) # adjacency matrix of the cluster
tmp <- components(g)$membership # finds the connected components of the cluster
if(length(unique(tmp))>1){ # there are more than connected components
for(x in unique(tmp)){ # loops over the component names
gamma_new[[count]] <- cl[which(tmp == x)] # forms a new cluster out the connected components. adds to end of gamma
count <- count + 1
}
to_be_removed <- c(to_be_removed, k) # will eventually delete the original cluster j
}
}
}
if(length(to_be_removed) > 0) gamma_new <- gamma_new[-to_be_removed]
return(gamma_new)
}
partition_equal <- function(gamma1, gamma2, A_block){
tmp_1 <- partition_config(gamma1, A_block)
tmp_2 <- partition_config(gamma2, A_block)
K_1 <- tmp_1$K
K_2 <- tmp_2$K
config_1 <- sort(tmp_1$config)
config_2 <- sort(tmp_2$config)
if(! identical(config_1, config_2)){ # if the cluster configurations are different, stop
flag <- FALSE
} else { # now check individual partition elements
flag <- TRUE
i <- 0
while(i < K_1 & flag == TRUE){
i <- i+1
flag <- any(sapply(gamma2, FUN = sorted_equal, gamma1[[i]]))
}
}
return(flag)
}
vi_distance <- function(N, gamma1, gamma2){
# Create the N matrix
k1 <- length(gamma1)
k2 <- length(gamma2)
counts <- matrix(0, nrow = k1, ncol = k2)
for(k in 1:k1){
for(kk in 1:k2){
counts[k,kk] <- length(intersect(gamma1[[k]], gamma2[[kk]]))
}
}
row_sums <- rowSums(counts)
col_sums <- colSums(counts)
dist <- sum(row_sums/N * log(row_sums/N)) + sum(col_sums/N * log(col_sums/N))
for(k in 1:k1){
for(kk in 1:k2){
if(counts[k,kk] > 0){
dist <- dist -2 * counts[k,kk]/N * log(counts[k,kk]/N)
}
}
}
if(abs(diff) < 1e-14) diff <- 0
return(dist)
}
vi_ensemble <- function(N, particles){
L <- length(particles)
if(L == 1) return(0.0)
else{
dist_mat <- matrix(0,nrow = L, ncol = L)
for(l in 1:(L-1)){
for(ll in (l+1):L){
dist_mat[l,ll] <- vi_distance(N, particles[[l]], particles[[ll]])
dist_mat[ll,l] <- dist_mat[l,ll]
}
}
return(dist_mat[upper.tri(dist_mat)])
}
}
binder_loss <- function(N, gamma1, gamma2){
k1 <- length(gamma1)
k2 <- length(gamma2)
counts <- matrix(nrow = k1, ncol = k2)
# need matrix of counts n_ij counting the number of indices that belong to cluster k in gamma1 and cluster kk in gamma2
for(k in 1:k1){
for(kk in 1:k2){
counts[k,kk] <- length(intersect(gamma1[[k]], gamma2[[kk]]))
}
}
row_sums <- rowSums(counts)
col_sums <- colSums(counts)
dist <- 0.5 * (sum( (row_sums)^2) + sum( (col_sums)^2) - 2 * sum(counts^2))
return(dist)
}
rand_index <- function(N, gamma1, gamma2){
tmp_binder <- binder_loss(N, gamma1, gamma2)
tmp_rand <- 1 - binder_loss(N, gamma1, gamma2) * 2/(N * (N-1))
return(tmp_rand)
}
binder_ensemble <- function(N, particles){
L <- length(particles)
if(L == 1) return(0.0)
else{
dist_mat <- matrix(0,nrow = L, ncol = L)
for(l in 1:(L-1)){
for(ll in (l+1):L){
dist_mat[l,ll] <- binder_loss(N, particles[[l]], particles[[ll]])
dist_mat[ll,l] <- dist_mat[l,ll]
}
}
return(dist_mat[upper.tri(dist_mat)])
}
}
|
/one_partition/scripts/partition_functions.R
|
no_license
|
cecilia-balocchi/particle-optimization
|
R
| false
| false
| 4,918
|
r
|
# Partition Functions
library(scales)
library(RColorBrewer)
col_list <- rev(brewer.pal(n = 5, name = "RdBu"))
sorted_equal <- function(v1, v2){
return(identical(sort(as.numeric(v1)), sort(as.numeric(v2))))
}
# A function to check whether x is inside vector v.
# we will use this with apply to check whether
index_in <- function(x, v){
return(x %in% v)
}
# Configuration of the parttion
partition_config <- function(gamma, A_block){
tmp <- sapply(gamma, FUN = length)
singletons <- which(tmp == 1)
non_singletons <- which(tmp != 1)
K <- length(gamma) # number of clusters
n <- nrow(A_block) # number of block groups
n_singletons <- length(singletons)
n_non_singletons <- length(non_singletons)
Z <- matrix(0, nrow = n, ncol = n) # pairwise co-allocation matrix
for(k in 1:K){
Z[gamma[[k]], gamma[[k]]] <- 1
}
if(K > 1){ # identify which clusters are spatially adjacent
A_cluster <- matrix(0, nrow = K, ncol = K)
for(k in 1:(K-1)){
for(kk in (k+1):K){
A_cluster[k,kk] <- 1*any(A_block[gamma[[k]], gamma[[kk]]] == 1)
A_cluster[kk,k] <- A_cluster[k,kk]
}
}
} else{
A_cluster <- NULL
}
return(list(K = K, config = tmp,
singletons = singletons, non_singletons = non_singletons,
A_cluster = A_cluster, Z = Z))
}
# Ensures that the clusters are spatially connected
partition_modify <- function(gamma, A_block){
gamma_new <- gamma
K <- length(gamma)
count <- K + 1
to_be_removed <- c()
for(k in 1:K){
if(length(gamma[[k]])>1){
cl <- gamma[[k]]
g <- graph_from_adjacency_matrix(A_block[cl,cl]) # adjacency matrix of the cluster
tmp <- components(g)$membership # finds the connected components of the cluster
if(length(unique(tmp))>1){ # there are more than connected components
for(x in unique(tmp)){ # loops over the component names
gamma_new[[count]] <- cl[which(tmp == x)] # forms a new cluster out the connected components. adds to end of gamma
count <- count + 1
}
to_be_removed <- c(to_be_removed, k) # will eventually delete the original cluster j
}
}
}
if(length(to_be_removed) > 0) gamma_new <- gamma_new[-to_be_removed]
return(gamma_new)
}
partition_equal <- function(gamma1, gamma2, A_block){
tmp_1 <- partition_config(gamma1, A_block)
tmp_2 <- partition_config(gamma2, A_block)
K_1 <- tmp_1$K
K_2 <- tmp_2$K
config_1 <- sort(tmp_1$config)
config_2 <- sort(tmp_2$config)
if(! identical(config_1, config_2)){ # if the cluster configurations are different, stop
flag <- FALSE
} else { # now check individual partition elements
flag <- TRUE
i <- 0
while(i < K_1 & flag == TRUE){
i <- i+1
flag <- any(sapply(gamma2, FUN = sorted_equal, gamma1[[i]]))
}
}
return(flag)
}
vi_distance <- function(N, gamma1, gamma2){
# Create the N matrix
k1 <- length(gamma1)
k2 <- length(gamma2)
counts <- matrix(0, nrow = k1, ncol = k2)
for(k in 1:k1){
for(kk in 1:k2){
counts[k,kk] <- length(intersect(gamma1[[k]], gamma2[[kk]]))
}
}
row_sums <- rowSums(counts)
col_sums <- colSums(counts)
dist <- sum(row_sums/N * log(row_sums/N)) + sum(col_sums/N * log(col_sums/N))
for(k in 1:k1){
for(kk in 1:k2){
if(counts[k,kk] > 0){
dist <- dist -2 * counts[k,kk]/N * log(counts[k,kk]/N)
}
}
}
if(abs(diff) < 1e-14) diff <- 0
return(dist)
}
vi_ensemble <- function(N, particles){
L <- length(particles)
if(L == 1) return(0.0)
else{
dist_mat <- matrix(0,nrow = L, ncol = L)
for(l in 1:(L-1)){
for(ll in (l+1):L){
dist_mat[l,ll] <- vi_distance(N, particles[[l]], particles[[ll]])
dist_mat[ll,l] <- dist_mat[l,ll]
}
}
return(dist_mat[upper.tri(dist_mat)])
}
}
binder_loss <- function(N, gamma1, gamma2){
k1 <- length(gamma1)
k2 <- length(gamma2)
counts <- matrix(nrow = k1, ncol = k2)
# need matrix of counts n_ij counting the number of indices that belong to cluster k in gamma1 and cluster kk in gamma2
for(k in 1:k1){
for(kk in 1:k2){
counts[k,kk] <- length(intersect(gamma1[[k]], gamma2[[kk]]))
}
}
row_sums <- rowSums(counts)
col_sums <- colSums(counts)
dist <- 0.5 * (sum( (row_sums)^2) + sum( (col_sums)^2) - 2 * sum(counts^2))
return(dist)
}
rand_index <- function(N, gamma1, gamma2){
tmp_binder <- binder_loss(N, gamma1, gamma2)
tmp_rand <- 1 - binder_loss(N, gamma1, gamma2) * 2/(N * (N-1))
return(tmp_rand)
}
binder_ensemble <- function(N, particles){
L <- length(particles)
if(L == 1) return(0.0)
else{
dist_mat <- matrix(0,nrow = L, ncol = L)
for(l in 1:(L-1)){
for(ll in (l+1):L){
dist_mat[l,ll] <- binder_loss(N, particles[[l]], particles[[ll]])
dist_mat[ll,l] <- dist_mat[l,ll]
}
}
return(dist_mat[upper.tri(dist_mat)])
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nemo_circle.R
\name{nemo_circle}
\alias{nemo_circle}
\title{Computes the largest empty circle which doesn't contain any points, inside a defined hull.}
\usage{
nemo_circle(points, hull, strict_inclusion, nmax_circles)
}
\arguments{
\item{points}{a set of points (an `sf` object).}
\item{hull}{external limits of the set of points (a polygon type `sf` object). Can be imported or computed with `nemo_hull` function, with the coordinate reference system as points object.}
\item{strict_inclusion}{TRUE if empty circle has to be entirely within the hull, FALSE otherwise.}
\item{nmax_circles}{number of empty circles in output.}
}
\value{
an `sf` object.
}
\description{
}
\examples{
\donttest{
data(points)
nemo_pts <-
nemo_circle(points = points \%>\% st_transform(2154),
hull = hull_pts,
strict_inclusion = T,
nmax_circles = 1)
}
}
|
/man/nemo_circle.Rd
|
permissive
|
mtmx/nemo
|
R
| false
| true
| 948
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nemo_circle.R
\name{nemo_circle}
\alias{nemo_circle}
\title{Computes the largest empty circle which doesn't contain any points, inside a defined hull.}
\usage{
nemo_circle(points, hull, strict_inclusion, nmax_circles)
}
\arguments{
\item{points}{a set of points (an `sf` object).}
\item{hull}{external limits of the set of points (a polygon type `sf` object). Can be imported or computed with `nemo_hull` function, with the coordinate reference system as points object.}
\item{strict_inclusion}{TRUE if empty circle has to be entirely within the hull, FALSE otherwise.}
\item{nmax_circles}{number of empty circles in output.}
}
\value{
an `sf` object.
}
\description{
}
\examples{
\donttest{
data(points)
nemo_pts <-
nemo_circle(points = points \%>\% st_transform(2154),
hull = hull_pts,
strict_inclusion = T,
nmax_circles = 1)
}
}
|
library(RCurl)
library(jsonlite)
#' get.scid.sid
#' @title Get the source compound ids from another source compound id
#' @description a list of all src_compound_ids from all sources which are
#' CURRENTLY assigned to the same structure as a currently assigned
#' query src_compound_id.
#' The output will include query src_compound_id if it is a valid src_compound_id
#' with a current assignment.
#' @name get.scid.sid
#' @docType package
#' @param x : Input string Source compound id
#' @param y : Input integer Source id
#' @export
#' @examples
#' \donttest{
#' # Get source compound ids and source information
#' # Using ChEMBL ID and source
#' get.scid.sid("CHEMBL12",1)
#' # Using drugbank id and source
#' get.scid.sid("DB00789",2)
#' }
get.scid.sid <- function(x,y) {
url <- sprintf("https://www.ebi.ac.uk/unichem/rest/src_compound_id/%s/%d",x,y)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
data(SourceNames)
sn<-SourceNames
data<-fromJSON(d)
df<-merge(data,sn,by.x="src_id",by.y="src_id")
return(df)
} else {
return(NULL)
}
}
#' get.sAll.sid
#' @title Get the all source compound ids from another source compound id
#' @description Obtain a list of all src_compound_ids from all sources
#' (including BOTH current AND obsolete assignments) to the same structure
#' as a currently assigned query
#' src_compound_id.
#' @name get.sAll.sid
#' @docType package
#' @param x : Input chemblid
#' @param y : Input integer source id
#' @export
#' @examples
#' \donttest{
#' # Get all source ids using ChEMBL id and source
#' get.sAll.sid("CHEMBL12",1)
#' # Using drugbank id and source
#' get.sAll.sid("DB00789",2)
#' }
get.sAll.sid <- function(x,y) {
url <- sprintf("https://www.ebi.ac.uk/unichem/rest/src_compound_id_all/%s/%d",x,y)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
data(SourceNames)
sn<-SourceNames
data<-fromJSON(d)
df<-merge(data,sn,by.x="src_id",by.y="src_id")
return(df)
}else {
return(NULL)
}
}
#' get.mapping.full
#' @title Get full mapping between two sources.
#' @description Obtain a full mapping between two sources. Uses only currently
#' assigned src_compound_ids from both sources.
#' @name get.mapping.full
#' @docType package
#' @param x : Input integer source id
#' @param y : Input integer source id
#' @export
#' @examples
#' \donttest{
#' # Get full mapping of PDBe and ChEMBL
#' get.mapping.full(3,1)
#' # Get full mapping of ZINC and ChEMBL
#' get.mapping.full(9,1)
#' }
get.mapping.full <-function(x,y) {
url <- sprintf("https://www.ebi.ac.uk/unichem/rest/mapping/%d/%d",x,y)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
return(fromJSON(d))
} else {
return(NULL)
}
}
#' get.src_id.InCHIKey
#' @title Get source compound ids
#' @description Obtain a list of src_compound_ids (from all sources) which
#' are CURRENTLY assigned to a query InChI Key. Returns a list of data from
#' Unichem and ChEMBL databases.
#' @name get.src_id.InCHIKey
#' @docType package
#' @param x : Input string InCHI Key
#' @export
#' @examples
#' \donttest{
#' # Get source compound ids from InCHIKey
#' get.sid.InCHIKey("AAOVKJBEBIDNHE-UHFFFAOYSA-N")
#'
#' data<-get.sid.InCHIKey("BSYNRYMUTXBXSQ-UHFFFAOYSA-N")
#' # to get chembl data
#' data$Chem
#' to get Unichem data
#' data$Uni
#' }
get.sid.InCHIKey<-function(x){
url_uni <- sprintf("https://www.ebi.ac.uk/unichem/rest/inchikey/%s",x)
url_chem<-sprintf("https://www.ebi.ac.uk/chemblws/compounds/stdinchikey/%s.json",x)
h <- getCurlHandle()
d <- getURL(url_uni, curl=h)
c<-getURL(url_chem, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
data(SourceNames)
sn<-SourceNames
data<-fromJSON(d)
df<-merge(data,sn,by.x="src_id",by.y="src_id")
cf<-do.call(rbind, lapply(fromJSON(c), data.frame))
return(list(uni=df,Chem=cf))
} else {
return(NULL)
}
}
#' get.sAll.InCHIKey
#' @title Get all src_compound_ids.
#' @description Get a list of all src_compound_ids (from all sources) which
#' have current AND obsolete assignments to a query InChIKey
#' @name get.sAll.InCHIKey
#' @docType package
#' @param x : Input string InCHI Key
#' @export
#' @examples
#' \donttest{
#' # Get all the IDs using InCHIKey
#' get.sAll.InCHIKey("AAOVKJBEBIDNHE-UHFFFAOYSA-N")
#' }
get.sAll.InCHIKey<-function(x){
url <- sprintf("https://www.ebi.ac.uk/unichem/rest/inchikey_all/%s",x)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
data(SourceNames)
sn<-SourceNames
data<-fromJSON(d)
df<-merge(data,sn,by.x="src_id",by.y="src_id")
return(df)
}else {
return(NULL)
}
}
#' get.structure
#' @title Get structure
#' @description Get structure(s) currently assigned to a query src_compound_id.
#' @name get.structure
#' @docType package
#' @param x : Input string chemblid
#' @param s : Input integer source id (default is 1)
#' @export
#' @examples
#' \donttest{
#' # Get Standard inhci and InCHIKey from drugbank compound and source
#' get.structure("DB00321",2)
#' # Using ChEMBL compound and source id
#' get.structure("CHEMBL1231",1)
#' }
get.structure<-function(x,s=1){
url <- sprintf("https://www.ebi.ac.uk/unichem/rest/structure/%s/%d",x,s)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
return(fromJSON(d))
} else {
return(NULL)
}
}
#' get.struc.all
#' @title Get Structures for source compound id
#' @description Get the standard InCHI and standard InCHI Key for the source
#' compound id
#' @name get.struc.all
#' @docType package
#' @param x : Input string chemblid
#' @param s : Input integer source id (default is 1)
#' @export
#' @examples
#' \donttest{
#' # Get all the structure information using ChEMBL id and source.
#' get.struc.all("CHEMBL1231",1)
#' #using drugbank id and source
#' get.structure("DB00321",2)
#' }
get.struc.all<-function(x,s=1){
url <- sprintf("https://www.ebi.ac.uk/unichem/rest/structure_all/%s/%d",x,s)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
return(fromJSON(d))
} else {
return(NULL)
}
}
#' get.url.sid
#' @title Get url for the query compound
#' @description Get a list of URLs for all src_compound_ids, from a specified
#' source .
#' @name get.url.sid
#' @docType package
#' @param x : Input string source compound id
#' @param y : Input integer source id
#' @param z : Input integer to source id
#' @export
#' @examples
#' \donttest{
#' # get urls of compounds using source compound id, source id
#' # get drugbank url from ChEMBL source id and ChEMBL source
#' get.url.sid("ChEMBL490",1,2)
#'
#' # get chembl url from drugbank id and source
#' get.url.sid("DB00715",2,1)
#' }
get.url.sid<-function(x,y,z){
url <- sprintf("https://www.ebi.ac.uk/unichem/rest/src_compound_id_url/%s/%d/%d",x,y,z)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
return(fromJSON(d))
} else {
return(NULL)
}
}
#' get.sAll.obs
#' @title Get source compound id from obsolete source compound id
#' @description Get a list of all src_compound_ids from all sources with BOTH
#' current AND obsolete to the same structure with an obsolete assignment to the #' query src_compound_id.
#' @name get.SrcAll.obs
#' @docType package
#' @param x : Input string source compound id
#' @param y : Input integer to source id
#' @export
#' @examples
#' \donttest{
#' #get for drugbank compound and source
#' get.sAll.obs("DB07699",2)
#' #get for chembl compound and source
#' get.sAll.obs("CHEMBL12",1)
#' }
get.sAll.obs<-function(x,y){
url <- sprintf("https://www.ebi.ac.uk/unichem/rest/src_compound_id_all_obsolete/%s/%d",x,y)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
data(SourceNames)
sn<-SourceNames
data<-fromJSON(d)
df<-merge(data,sn,by.x="src_id",by.y="src_id")
return(df)
} else {
return(NULL)
}
}
#' get.verbose.InCHIkey
#' @title Get all src_compound_ids to a query InChIKey
#' @description Returns a dataframe containing src_id (the src_id for this source),
#' src_url (the main home page of the source), name (the unique name for the source in
#' UniChem, always lower case), name_long (the full name of the source, as defined by the
#' source),name_label (A name for the source
#' suitable for use as a 'label' for the source within a web-page. Correct case setting
#' for source, and always less than 30 characters), description (a description of the
#' content of the source), base_id_url_available (an flag indicating whether this source
#' provides a valid base_id_url for creating cpd-specific links [1=yes, 0=no]),base_id_url
#' (the base url for constructing hyperlinks to this source [append an identifier from
#' this source to the end of this url to create a valid url to a specific page for this
#' cpd], unless aux_for_url=1), aux_for_url (A flag to indicate whether the aux_src field
#' should be used to create hyperlinks instead of the src_compound_id [1=yes, 0=no] ,
#' src_compound_id (a list of src_compound_ids from this source which are currently
#' assigned to the query InChIKey, aux_src (a list of src-compound_id keys mapping to
#' corresponding auxiliary data (url_id:value), for creating links if aux_for_url=1. Only
#' shown if aux_for_url=1).
#' @name get.verbose.InCHIkey
#' @docType package
#' @param x : Input string InCHI Key
#' @export
#' @examples
#' \donttest{
#' # get for InCHIkey
#' get.verbose.InCHIkey("GZUITABIAKMVPG-UHFFFAOYSA-N")
#'
#' get.verbose.InCHIkey("AAOVKJBEBIDNHE-UHFFFAOYSA-N")
#' }
get.verbose.InCHIkey<-function(x){
url <- sprintf("https://www.ebi.ac.uk/unichem/rest/verbose_inchikey/%s",x)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
return(fromJSON(d))
} else {
return(NULL)
}
}
#' get.cmp.inf
#' @title Get compound information from ChEMBL
#' @description These functions allow one to retrieve compounds information from ChEMBL
#' compounds are identified either by a ChEMBL ID or by a standard InChI key.
#' @name get.cmp.inf
#' @docType package
#' @param x : String representing chemblid or standard InCHI key for the molecule.
#' @param type : For \code{get.compound}, one of \code{chemblid} or
#' \code{stdinchi} to indicate the nature of the molecule id.
#' @export
#' @examples
#' \donttest{
#' #get information for chembl compound id
#' get.compound("CHEMBL12")
#'
#' #get information for standard inchi
#' get.compound("QFFGVLORLPOAEC-SNVBAGLBSA-N",type='stdinchi')
#' }
get.cmp.inf <- function(x, type='chemblid') {
types <- c('chemblid', 'stdinchi')
type <- pmatch(type, types)
if (is.na(type)) stop("Invalid type given")
url <- switch(type,
url = 'https://www.ebi.ac.uk/chemblws/compounds/',
url = 'https://www.ebi.ac.uk/chemblws/compounds/stdinchikey/')
url <- sprintf('%s%s.json', url, id)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
return(fromJSON(d))
} else {
return(NULL)
}
}
#' get.cmp.sim
#' @title Retrive similar compounds from ChEMBL database.
#' @description This function retrieves a dataframe of similar compounds
#' from ChEMBL database given a smiles string as query and also given a similarity score above 70.
#' @name get.cmp.sim
#' @docType package
#' @param mol : String representing smiles of the moelcule
#' @param sim : Integer representing for percentage of similarity
#' for the query compound and the database molecules. Values ranges
#' from 70 to 100.
#' @export
get.cmp.sim <- function(mol,sim=70) {
url <- 'https://www.ebi.ac.uk/chemblws/compounds/similarity/'
url <- sprintf('%s%s/%d.json', url,mol,sim)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
return(fromJSON(d)[[1]])
} else {
return(NULL)
}
}
#' get.cmp.substruct
#' @title Get compound information from substructure query smiles.
#' @description This function retrieves a dataframe of all compounds from ChEMBL database
#' containing the substructure represented by the given Canonical SMILES and their
#' chemical properties.
#' @name get.compound.substruct
#' @docType package
#' @param mol : String representing smiles of the moelcule
#' @export
#' @examples
#' \donttest{
#' #get compounds by substructure
#' get.cmp.subsruct("CN(CCCN)c1cccc2ccccc12")
#' }
get.cmp.substruct<-function(mol){
url <- sprintf('https://www.ebi.ac.uk/chemblws/compounds/substructure/%s.json',mol)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
data<-fromJSON(d)
return(data$compounds)
} else {
return(NULL)
}
}
#' get.appDrugs
#' @title Get approved drugs for target.
#' @description This function retrieves a dataframe of all approved drug compounds from
#' ChEMBL database given a string of ChEMBL target ID.
#' @name get.appDrugs
#' @docType package
#' @param x : string ChEMBL target ID.
#' @export
#' @examples
#' \donttest{
#' #get chembl ids of approved drugs
#' get.appDrugs("CHEMBL1824")
#' }
get.appDrugs<-function(x){
url<-sprintf('https://www.ebi.ac.uk/chemblws/targets/%s/approvedDrug.json',x)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
return(fromJSON(d))[[1]]
} else {
return(NULL)
}
}
#' get.bioactivity.
#' @title Get Bioactivity Information for Compounds, Targets or Assays.
#' @description This method retrieves bioactivity information for a compound
#' across multiple targets/assays or else for a target across multiple compounds.
#' The function can also be used to retrieve all activities within a given assay.
#' In all cases, ChEMBL identifiers must be used.
#' @name get.bioactivity
#' @docType package
#' @param x : Input string chemblid
#' @param type : Input string \code{'compound'},\code{'target'},\code{'assay'}. Default is
#' \code{'compound'}.
#' @export
#' @examples
#' \donttest{
#' # get bioactivities of compounds
#' get.bioactivity("CHEMBL12",type='compound')
#'
#' # get compound bioactivities for targets
#' get.bioactivity("CHEMBL240",type="target")
#'
#' # get bioactivities by assay
#' get.bioactivity("CHEMBL1217643",type='assay')
#' }
get.bioactivity <- function(x, type='compound') {
types <- c('compound', 'target', 'assay')
type <- pmatch(type, types)
if (is.na(type)) stop("Invalid type given")
url <- switch(type,
url = 'https://www.ebi.ac.uk/chemblws/compounds/%s/bioactivities.json',
url = 'https://www.ebi.ac.uk/chemblws/targets/%s/bioactivities.json',
url = 'https://www.ebi.ac.uk/chemblws/assays/%s/bioactivities.json')
url <- sprintf(url,x)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
data<-do.call(rbind, lapply(fromJSON(d), data.frame))
return(data)
} else {
return(NULL)
}
}
#' get.moa
#' @title Get mechanism of action
#' @description This function retrieves a data frame of compounds and its
#' mode of action for a compound (where compound is a drug) and drug targets.
#' @name get.moa
#' @docType package
#' @param x : Input string chemblid
#' @export
#' @examples
#' \donttest{
#' # get moa of drug
#' get.moa("CHEMBL1642")
#' }
get.moa<-function(x){
url <- 'https://www.ebi.ac.uk/chemblws/compounds/'
url<-sprintf('%s%s/drugMechanism.json',url,x)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
data<- do.call(rbind, lapply(fromJSON(d), data.frame))
return(data)
} else {
return(NULL)
}
}
#' get.targets
#' @title Get target information.
#' @description This function retrieves the target information by chembl id and
#' uniprot id and also retrieves all the names of targets by organisms. When
#' \code{org="Homo sapiens"} subsets the data frame by organism homo sapiens and retrieves
#' all the Homo sapiens taregts
#' @name get.targets
#' @docType package
#' @param x : Input string chemblid
#' @param type : Input string 'chemblid' or 'uniprot'
#' @param org : Input string species name like "Homo sapiens","Plasmodium falciparum" and etc.
#' @export
#' @examples
#' \donttest{
#' #get target information by chembl ids
#' get.targets("CHEMBL1862",type='chemblid')
#'
#' #get target information by uniprot ids
#' get.targets("Q13936",type='uniprot')
#'
#' #get all the target information using organism name
#' get.targets(org="Homo Sapiens")
#' }
get.targets <- function(x,type='chemblid',org=NULL){
types <- c('chemblid', 'uniprot')
type <- pmatch(type, types)
if (is.na(type)) stop("Invalid type given")
if(is.null(org)){
url<-switch(type,'https://www.ebi.ac.uk/chemblws/targets/%s.json',
'https://www.ebi.ac.uk/chemblws/targets/uniprot/%s.json')
url <- sprintf(url,x)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
data<- do.call(rbind, lapply(fromJSON(d), data.frame))
return(data)
} else {
return(NULL)
}
}else{
url<-'https://www.ebi.ac.uk/chemblws/targets.json'
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
df <- do.call(rbind, lapply(fromJSON(d), data.frame))
data<-df[ which(df$organism==org)]
return (data)
} else {
return(NULL)
}
}
}
|
/R/rUniChembl.r
|
no_license
|
woodhaha/rUniChEMBL
|
R
| false
| false
| 18,742
|
r
|
library(RCurl)
library(jsonlite)
#' get.scid.sid
#' @title Get the source compound ids from another source compound id
#' @description a list of all src_compound_ids from all sources which are
#' CURRENTLY assigned to the same structure as a currently assigned
#' query src_compound_id.
#' The output will include query src_compound_id if it is a valid src_compound_id
#' with a current assignment.
#' @name get.scid.sid
#' @docType package
#' @param x : Input string Source compound id
#' @param y : Input integer Source id
#' @export
#' @examples
#' \donttest{
#' # Get source compound ids and source information
#' # Using ChEMBL ID and source
#' get.scid.sid("CHEMBL12",1)
#' # Using drugbank id and source
#' get.scid.sid("DB00789",2)
#' }
get.scid.sid <- function(x,y) {
url <- sprintf("https://www.ebi.ac.uk/unichem/rest/src_compound_id/%s/%d",x,y)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
data(SourceNames)
sn<-SourceNames
data<-fromJSON(d)
df<-merge(data,sn,by.x="src_id",by.y="src_id")
return(df)
} else {
return(NULL)
}
}
#' get.sAll.sid
#' @title Get the all source compound ids from another source compound id
#' @description Obtain a list of all src_compound_ids from all sources
#' (including BOTH current AND obsolete assignments) to the same structure
#' as a currently assigned query
#' src_compound_id.
#' @name get.sAll.sid
#' @docType package
#' @param x : Input chemblid
#' @param y : Input integer source id
#' @export
#' @examples
#' \donttest{
#' # Get all source ids using ChEMBL id and source
#' get.sAll.sid("CHEMBL12",1)
#' # Using drugbank id and source
#' get.sAll.sid("DB00789",2)
#' }
get.sAll.sid <- function(x,y) {
url <- sprintf("https://www.ebi.ac.uk/unichem/rest/src_compound_id_all/%s/%d",x,y)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
data(SourceNames)
sn<-SourceNames
data<-fromJSON(d)
df<-merge(data,sn,by.x="src_id",by.y="src_id")
return(df)
}else {
return(NULL)
}
}
#' get.mapping.full
#' @title Get full mapping between two sources.
#' @description Obtain a full mapping between two sources. Uses only currently
#' assigned src_compound_ids from both sources.
#' @name get.mapping.full
#' @docType package
#' @param x : Input integer source id
#' @param y : Input integer source id
#' @export
#' @examples
#' \donttest{
#' # Get full mapping of PDBe and ChEMBL
#' get.mapping.full(3,1)
#' # Get full mapping of ZINC and ChEMBL
#' get.mapping.full(9,1)
#' }
get.mapping.full <-function(x,y) {
url <- sprintf("https://www.ebi.ac.uk/unichem/rest/mapping/%d/%d",x,y)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
return(fromJSON(d))
} else {
return(NULL)
}
}
#' get.src_id.InCHIKey
#' @title Get source compound ids
#' @description Obtain a list of src_compound_ids (from all sources) which
#' are CURRENTLY assigned to a query InChI Key. Returns a list of data from
#' Unichem and ChEMBL databases.
#' @name get.src_id.InCHIKey
#' @docType package
#' @param x : Input string InCHI Key
#' @export
#' @examples
#' \donttest{
#' # Get source compound ids from InCHIKey
#' get.sid.InCHIKey("AAOVKJBEBIDNHE-UHFFFAOYSA-N")
#'
#' data<-get.sid.InCHIKey("BSYNRYMUTXBXSQ-UHFFFAOYSA-N")
#' # to get chembl data
#' data$Chem
#' to get Unichem data
#' data$Uni
#' }
get.sid.InCHIKey<-function(x){
url_uni <- sprintf("https://www.ebi.ac.uk/unichem/rest/inchikey/%s",x)
url_chem<-sprintf("https://www.ebi.ac.uk/chemblws/compounds/stdinchikey/%s.json",x)
h <- getCurlHandle()
d <- getURL(url_uni, curl=h)
c<-getURL(url_chem, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
data(SourceNames)
sn<-SourceNames
data<-fromJSON(d)
df<-merge(data,sn,by.x="src_id",by.y="src_id")
cf<-do.call(rbind, lapply(fromJSON(c), data.frame))
return(list(uni=df,Chem=cf))
} else {
return(NULL)
}
}
#' get.sAll.InCHIKey
#' @title Get all src_compound_ids.
#' @description Get a list of all src_compound_ids (from all sources) which
#' have current AND obsolete assignments to a query InChIKey
#' @name get.sAll.InCHIKey
#' @docType package
#' @param x : Input string InCHI Key
#' @export
#' @examples
#' \donttest{
#' # Get all the IDs using InCHIKey
#' get.sAll.InCHIKey("AAOVKJBEBIDNHE-UHFFFAOYSA-N")
#' }
get.sAll.InCHIKey<-function(x){
url <- sprintf("https://www.ebi.ac.uk/unichem/rest/inchikey_all/%s",x)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
data(SourceNames)
sn<-SourceNames
data<-fromJSON(d)
df<-merge(data,sn,by.x="src_id",by.y="src_id")
return(df)
}else {
return(NULL)
}
}
#' get.structure
#' @title Get structure
#' @description Get structure(s) currently assigned to a query src_compound_id.
#' @name get.structure
#' @docType package
#' @param x : Input string chemblid
#' @param s : Input integer source id (default is 1)
#' @export
#' @examples
#' \donttest{
#' # Get Standard inhci and InCHIKey from drugbank compound and source
#' get.structure("DB00321",2)
#' # Using ChEMBL compound and source id
#' get.structure("CHEMBL1231",1)
#' }
get.structure<-function(x,s=1){
url <- sprintf("https://www.ebi.ac.uk/unichem/rest/structure/%s/%d",x,s)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
return(fromJSON(d))
} else {
return(NULL)
}
}
#' get.struc.all
#' @title Get Structures for source compound id
#' @description Get the standard InCHI and standard InCHI Key for the source
#' compound id
#' @name get.struc.all
#' @docType package
#' @param x : Input string chemblid
#' @param s : Input integer source id (default is 1)
#' @export
#' @examples
#' \donttest{
#' # Get all the structure information using ChEMBL id and source.
#' get.struc.all("CHEMBL1231",1)
#' #using drugbank id and source
#' get.structure("DB00321",2)
#' }
get.struc.all<-function(x,s=1){
url <- sprintf("https://www.ebi.ac.uk/unichem/rest/structure_all/%s/%d",x,s)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
return(fromJSON(d))
} else {
return(NULL)
}
}
#' get.url.sid
#' @title Get url for the query compound
#' @description Get a list of URLs for all src_compound_ids, from a specified
#' source .
#' @name get.url.sid
#' @docType package
#' @param x : Input string source compound id
#' @param y : Input integer source id
#' @param z : Input integer to source id
#' @export
#' @examples
#' \donttest{
#' # get urls of compounds using source compound id, source id
#' # get drugbank url from ChEMBL source id and ChEMBL source
#' get.url.sid("ChEMBL490",1,2)
#'
#' # get chembl url from drugbank id and source
#' get.url.sid("DB00715",2,1)
#' }
get.url.sid<-function(x,y,z){
url <- sprintf("https://www.ebi.ac.uk/unichem/rest/src_compound_id_url/%s/%d/%d",x,y,z)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
return(fromJSON(d))
} else {
return(NULL)
}
}
#' get.sAll.obs
#' @title Get source compound id from obsolete source compound id
#' @description Get a list of all src_compound_ids from all sources with BOTH
#' current AND obsolete to the same structure with an obsolete assignment to the #' query src_compound_id.
#' @name get.SrcAll.obs
#' @docType package
#' @param x : Input string source compound id
#' @param y : Input integer to source id
#' @export
#' @examples
#' \donttest{
#' #get for drugbank compound and source
#' get.sAll.obs("DB07699",2)
#' #get for chembl compound and source
#' get.sAll.obs("CHEMBL12",1)
#' }
get.sAll.obs<-function(x,y){
url <- sprintf("https://www.ebi.ac.uk/unichem/rest/src_compound_id_all_obsolete/%s/%d",x,y)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
data(SourceNames)
sn<-SourceNames
data<-fromJSON(d)
df<-merge(data,sn,by.x="src_id",by.y="src_id")
return(df)
} else {
return(NULL)
}
}
#' get.verbose.InCHIkey
#' @title Get all src_compound_ids to a query InChIKey
#' @description Returns a dataframe containing src_id (the src_id for this source),
#' src_url (the main home page of the source), name (the unique name for the source in
#' UniChem, always lower case), name_long (the full name of the source, as defined by the
#' source),name_label (A name for the source
#' suitable for use as a 'label' for the source within a web-page. Correct case setting
#' for source, and always less than 30 characters), description (a description of the
#' content of the source), base_id_url_available (an flag indicating whether this source
#' provides a valid base_id_url for creating cpd-specific links [1=yes, 0=no]),base_id_url
#' (the base url for constructing hyperlinks to this source [append an identifier from
#' this source to the end of this url to create a valid url to a specific page for this
#' cpd], unless aux_for_url=1), aux_for_url (A flag to indicate whether the aux_src field
#' should be used to create hyperlinks instead of the src_compound_id [1=yes, 0=no] ,
#' src_compound_id (a list of src_compound_ids from this source which are currently
#' assigned to the query InChIKey, aux_src (a list of src-compound_id keys mapping to
#' corresponding auxiliary data (url_id:value), for creating links if aux_for_url=1. Only
#' shown if aux_for_url=1).
#' @name get.verbose.InCHIkey
#' @docType package
#' @param x : Input string InCHI Key
#' @export
#' @examples
#' \donttest{
#' # get for InCHIkey
#' get.verbose.InCHIkey("GZUITABIAKMVPG-UHFFFAOYSA-N")
#'
#' get.verbose.InCHIkey("AAOVKJBEBIDNHE-UHFFFAOYSA-N")
#' }
get.verbose.InCHIkey<-function(x){
url <- sprintf("https://www.ebi.ac.uk/unichem/rest/verbose_inchikey/%s",x)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
return(fromJSON(d))
} else {
return(NULL)
}
}
#' get.cmp.inf
#' @title Get compound information from ChEMBL
#' @description These functions allow one to retrieve compounds information from ChEMBL
#' compounds are identified either by a ChEMBL ID or by a standard InChI key.
#' @name get.cmp.inf
#' @docType package
#' @param x : String representing chemblid or standard InCHI key for the molecule.
#' @param type : For \code{get.compound}, one of \code{chemblid} or
#' \code{stdinchi} to indicate the nature of the molecule id.
#' @export
#' @examples
#' \donttest{
#' #get information for chembl compound id
#' get.compound("CHEMBL12")
#'
#' #get information for standard inchi
#' get.compound("QFFGVLORLPOAEC-SNVBAGLBSA-N",type='stdinchi')
#' }
get.cmp.inf <- function(x, type='chemblid') {
types <- c('chemblid', 'stdinchi')
type <- pmatch(type, types)
if (is.na(type)) stop("Invalid type given")
url <- switch(type,
url = 'https://www.ebi.ac.uk/chemblws/compounds/',
url = 'https://www.ebi.ac.uk/chemblws/compounds/stdinchikey/')
url <- sprintf('%s%s.json', url, id)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
return(fromJSON(d))
} else {
return(NULL)
}
}
#' get.cmp.sim
#' @title Retrive similar compounds from ChEMBL database.
#' @description This function retrieves a dataframe of similar compounds
#' from ChEMBL database given a smiles string as query and also given a similarity score above 70.
#' @name get.cmp.sim
#' @docType package
#' @param mol : String representing smiles of the moelcule
#' @param sim : Integer representing for percentage of similarity
#' for the query compound and the database molecules. Values ranges
#' from 70 to 100.
#' @export
get.cmp.sim <- function(mol,sim=70) {
url <- 'https://www.ebi.ac.uk/chemblws/compounds/similarity/'
url <- sprintf('%s%s/%d.json', url,mol,sim)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
return(fromJSON(d)[[1]])
} else {
return(NULL)
}
}
#' get.cmp.substruct
#' @title Get compound information from substructure query smiles.
#' @description This function retrieves a dataframe of all compounds from ChEMBL database
#' containing the substructure represented by the given Canonical SMILES and their
#' chemical properties.
#' @name get.compound.substruct
#' @docType package
#' @param mol : String representing smiles of the moelcule
#' @export
#' @examples
#' \donttest{
#' #get compounds by substructure
#' get.cmp.subsruct("CN(CCCN)c1cccc2ccccc12")
#' }
get.cmp.substruct<-function(mol){
url <- sprintf('https://www.ebi.ac.uk/chemblws/compounds/substructure/%s.json',mol)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
data<-fromJSON(d)
return(data$compounds)
} else {
return(NULL)
}
}
#' get.appDrugs
#' @title Get approved drugs for target.
#' @description This function retrieves a dataframe of all approved drug compounds from
#' ChEMBL database given a string of ChEMBL target ID.
#' @name get.appDrugs
#' @docType package
#' @param x : string ChEMBL target ID.
#' @export
#' @examples
#' \donttest{
#' #get chembl ids of approved drugs
#' get.appDrugs("CHEMBL1824")
#' }
get.appDrugs<-function(x){
url<-sprintf('https://www.ebi.ac.uk/chemblws/targets/%s/approvedDrug.json',x)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
return(fromJSON(d))[[1]]
} else {
return(NULL)
}
}
#' get.bioactivity.
#' @title Get Bioactivity Information for Compounds, Targets or Assays.
#' @description This method retrieves bioactivity information for a compound
#' across multiple targets/assays or else for a target across multiple compounds.
#' The function can also be used to retrieve all activities within a given assay.
#' In all cases, ChEMBL identifiers must be used.
#' @name get.bioactivity
#' @docType package
#' @param x : Input string chemblid
#' @param type : Input string \code{'compound'},\code{'target'},\code{'assay'}. Default is
#' \code{'compound'}.
#' @export
#' @examples
#' \donttest{
#' # get bioactivities of compounds
#' get.bioactivity("CHEMBL12",type='compound')
#'
#' # get compound bioactivities for targets
#' get.bioactivity("CHEMBL240",type="target")
#'
#' # get bioactivities by assay
#' get.bioactivity("CHEMBL1217643",type='assay')
#' }
get.bioactivity <- function(x, type='compound') {
types <- c('compound', 'target', 'assay')
type <- pmatch(type, types)
if (is.na(type)) stop("Invalid type given")
url <- switch(type,
url = 'https://www.ebi.ac.uk/chemblws/compounds/%s/bioactivities.json',
url = 'https://www.ebi.ac.uk/chemblws/targets/%s/bioactivities.json',
url = 'https://www.ebi.ac.uk/chemblws/assays/%s/bioactivities.json')
url <- sprintf(url,x)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
data<-do.call(rbind, lapply(fromJSON(d), data.frame))
return(data)
} else {
return(NULL)
}
}
#' get.moa
#' @title Get mechanism of action
#' @description This function retrieves a data frame of compounds and its
#' mode of action for a compound (where compound is a drug) and drug targets.
#' @name get.moa
#' @docType package
#' @param x : Input string chemblid
#' @export
#' @examples
#' \donttest{
#' # get moa of drug
#' get.moa("CHEMBL1642")
#' }
get.moa<-function(x){
url <- 'https://www.ebi.ac.uk/chemblws/compounds/'
url<-sprintf('%s%s/drugMechanism.json',url,x)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
data<- do.call(rbind, lapply(fromJSON(d), data.frame))
return(data)
} else {
return(NULL)
}
}
#' get.targets
#' @title Get target information.
#' @description This function retrieves the target information by chembl id and
#' uniprot id and also retrieves all the names of targets by organisms. When
#' \code{org="Homo sapiens"} subsets the data frame by organism homo sapiens and retrieves
#' all the Homo sapiens taregts
#' @name get.targets
#' @docType package
#' @param x : Input string chemblid
#' @param type : Input string 'chemblid' or 'uniprot'
#' @param org : Input string species name like "Homo sapiens","Plasmodium falciparum" and etc.
#' @export
#' @examples
#' \donttest{
#' #get target information by chembl ids
#' get.targets("CHEMBL1862",type='chemblid')
#'
#' #get target information by uniprot ids
#' get.targets("Q13936",type='uniprot')
#'
#' #get all the target information using organism name
#' get.targets(org="Homo Sapiens")
#' }
get.targets <- function(x,type='chemblid',org=NULL){
types <- c('chemblid', 'uniprot')
type <- pmatch(type, types)
if (is.na(type)) stop("Invalid type given")
if(is.null(org)){
url<-switch(type,'https://www.ebi.ac.uk/chemblws/targets/%s.json',
'https://www.ebi.ac.uk/chemblws/targets/uniprot/%s.json')
url <- sprintf(url,x)
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
data<- do.call(rbind, lapply(fromJSON(d), data.frame))
return(data)
} else {
return(NULL)
}
}else{
url<-'https://www.ebi.ac.uk/chemblws/targets.json'
h <- getCurlHandle()
d <- getURL(url, curl=h)
status <- getCurlInfo(h)$response.code
rm(h)
if (status == 200) {
df <- do.call(rbind, lapply(fromJSON(d), data.frame))
data<-df[ which(df$organism==org)]
return (data)
} else {
return(NULL)
}
}
}
|
library(lubridate)
library(tidyverse)
library(zoo)
library(tidyr)
library(choroplethr)
library(choroplethrMaps)
library(gridExtra)
library(magrittr)
#THE INSTALL ZONE
#install.packages("jbmisc") # package installations are only needed the first time you use it
#Import Data
uspres_results = read.csv("D:/CodingDocs/DataCamp/AnalyzingElectionDataInR/us_pres_2016_by_county.csv")
# Deselect the is.national.winner, national.count, and national.party.percent variables
uspres_results.slim <- uspres_results %>%
select(-c(is.national.winner, national.count, national.party.percent))
# Spread party and votes to their own columns
uspres_county <- uspres_results.slim %>%
spread(key=party,value=vote.count)
# Add a variable to the uspres_county dataset to store the Democrat's percentage of votes
uspres_county <- uspres_county %>%
mutate(Dem.pct = D/county.total.count) %>%
mutate(Rep.pct = R/county.total.count)
# Load the county demographic data
data(df_county_demographics)
# Look at the demographic data
head(df_county_demographics)
# Rename the 'region' variable in df_county_demographics to "county.fips"
df_county_demographics <- df_county_demographics %>%
rename("county.fips" = region)
# Join county demographic with vote share data via its FIPS code
county_merged <- left_join(df_county_demographics,
uspres_county, by = "county.fips")
# plot percent_white and Dem.pct on the x and y axes. add points and a trend line
ggplot(county_merged, aes(x=percent_white, y=Dem.pct)) +
geom_point() +
geom_smooth(method="lm")
# plot percent_white and Dem.pct on the x and y axes. add points and a trend line
ggplot(county_merged, aes(x=percent_black, y=Dem.pct)) +
geom_point() +
geom_smooth(method="lm")
# plot percent_white and Dem.pct on the x and y axes. add points and a trend line
ggplot(county_merged, aes(x=percent_hispanic, y=Dem.pct)) +
geom_point() +
geom_smooth(method="lm")
# plot percent_white and Dem.pct on the x and y axes. add points and a trend line
ggplot(county_merged, aes(x=per_capita_income, y=Dem.pct)) +
geom_point() +
geom_smooth(method="lm")
# plot percent_white and Dem.pct on the x and y axes. add points and a trend line
ggplot(county_merged, aes(x=percent_white, y=Rep.pct)) +
geom_point() +
geom_smooth(method="lm")
democratic_map <- county_merged %>%
dplyr::rename("region" = county.fips,
"value" = Dem.pct)
republican_map <- county_merged %>%
dplyr::rename("region" = county.fips,
"value" = Rep.pct)
white_map <- county_merged %>%
dplyr::rename("region" = county.fips,
"value" = percent_white)
hispanic_map <- county_merged %>%
dplyr::rename("region" = county.fips,
"value" = percent_hispanic)
black_map <- county_merged %>%
dplyr::rename("region" = county.fips,
"value" = percent_black)
county_choropleth(white_map)
county_choropleth(black_map)
county_choropleth(hispanic_map)
ggplot(whiteplot)
# Fit a linear model to predict Dem.pct dependent on percent_white in each county
fit <- lm(Dem.pct ~ percent_white, data=county_merged)
# Evaluate the model
summary(fit)
# Fit a linear model to predict Dem.pct dependent on percent_white and per_capita_income in each county
fit <- lm(Dem.pct ~ percent_white + per_capita_income, data=county_merged)
e model
summary(fit)
|
/AnalyzingElectionDataInR/Worksheets/Chapter 3.R
|
no_license
|
BenChurchus/R-Learning
|
R
| false
| false
| 3,481
|
r
|
library(lubridate)
library(tidyverse)
library(zoo)
library(tidyr)
library(choroplethr)
library(choroplethrMaps)
library(gridExtra)
library(magrittr)
#THE INSTALL ZONE
#install.packages("jbmisc") # package installations are only needed the first time you use it
#Import Data
uspres_results = read.csv("D:/CodingDocs/DataCamp/AnalyzingElectionDataInR/us_pres_2016_by_county.csv")
# Deselect the is.national.winner, national.count, and national.party.percent variables
uspres_results.slim <- uspres_results %>%
select(-c(is.national.winner, national.count, national.party.percent))
# Spread party and votes to their own columns
uspres_county <- uspres_results.slim %>%
spread(key=party,value=vote.count)
# Add a variable to the uspres_county dataset to store the Democrat's percentage of votes
uspres_county <- uspres_county %>%
mutate(Dem.pct = D/county.total.count) %>%
mutate(Rep.pct = R/county.total.count)
# Load the county demographic data
data(df_county_demographics)
# Look at the demographic data
head(df_county_demographics)
# Rename the 'region' variable in df_county_demographics to "county.fips"
df_county_demographics <- df_county_demographics %>%
rename("county.fips" = region)
# Join county demographic with vote share data via its FIPS code
county_merged <- left_join(df_county_demographics,
uspres_county, by = "county.fips")
# plot percent_white and Dem.pct on the x and y axes. add points and a trend line
ggplot(county_merged, aes(x=percent_white, y=Dem.pct)) +
geom_point() +
geom_smooth(method="lm")
# plot percent_white and Dem.pct on the x and y axes. add points and a trend line
ggplot(county_merged, aes(x=percent_black, y=Dem.pct)) +
geom_point() +
geom_smooth(method="lm")
# plot percent_white and Dem.pct on the x and y axes. add points and a trend line
ggplot(county_merged, aes(x=percent_hispanic, y=Dem.pct)) +
geom_point() +
geom_smooth(method="lm")
# plot percent_white and Dem.pct on the x and y axes. add points and a trend line
ggplot(county_merged, aes(x=per_capita_income, y=Dem.pct)) +
geom_point() +
geom_smooth(method="lm")
# plot percent_white and Dem.pct on the x and y axes. add points and a trend line
ggplot(county_merged, aes(x=percent_white, y=Rep.pct)) +
geom_point() +
geom_smooth(method="lm")
democratic_map <- county_merged %>%
dplyr::rename("region" = county.fips,
"value" = Dem.pct)
republican_map <- county_merged %>%
dplyr::rename("region" = county.fips,
"value" = Rep.pct)
white_map <- county_merged %>%
dplyr::rename("region" = county.fips,
"value" = percent_white)
hispanic_map <- county_merged %>%
dplyr::rename("region" = county.fips,
"value" = percent_hispanic)
black_map <- county_merged %>%
dplyr::rename("region" = county.fips,
"value" = percent_black)
county_choropleth(white_map)
county_choropleth(black_map)
county_choropleth(hispanic_map)
ggplot(whiteplot)
# Fit a linear model to predict Dem.pct dependent on percent_white in each county
fit <- lm(Dem.pct ~ percent_white, data=county_merged)
# Evaluate the model
summary(fit)
# Fit a linear model to predict Dem.pct dependent on percent_white and per_capita_income in each county
fit <- lm(Dem.pct ~ percent_white + per_capita_income, data=county_merged)
e model
summary(fit)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mutationRatio.R
\name{readMPFile_localMutRate}
\alias{readMPFile_localMutRate}
\title{read the mutation position format file and extract the genomic position as a mutation feature}
\usage{
readMPFile_localMutRate(infile)
}
\arguments{
\item{infile}{the path for the input mutation position format file}
}
\description{
read the mutation position format file and extract the genomic position as a mutation feature
}
|
/man/readMPFile_localMutRate.Rd
|
no_license
|
kojimaryuta/pmsignature
|
R
| false
| true
| 494
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mutationRatio.R
\name{readMPFile_localMutRate}
\alias{readMPFile_localMutRate}
\title{read the mutation position format file and extract the genomic position as a mutation feature}
\usage{
readMPFile_localMutRate(infile)
}
\arguments{
\item{infile}{the path for the input mutation position format file}
}
\description{
read the mutation position format file and extract the genomic position as a mutation feature
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rasterLocalSums.R
\name{rasterLocalSums}
\alias{rasterLocalSums}
\title{Local sums for an in memory raster image.}
\usage{
rasterLocalSums(r, W)
}
\arguments{
\item{r}{An in memory raster image.}
\item{W}{A matrix of weights. The sums will be applied at each centroid.
Dimensions must be non-zero and odd. Only non-missing neighbors are used in
the sum.}
}
\value{
An in memory raster image of local sums.
}
\description{
\code{rasterLocalSums} finds the local sum within the weighted neighborhood of W.
}
\details{
A spatial neighborhood is calculated for each pixel in \code{r}.
The spatial neighborhood for each pixel is defined by the weight matrix
\code{W}, where the center of the odd dimensioned matrix \code{W} is identified
with the target pixel. The target pixel value is replaced with the sum of
all pixels within the neighborhood weighted by \code{W}. Only non-missing
or neighbors with non-zero weights are used in the calculation.
}
\examples{
r <- raster::raster( matrix(rnorm(36),6,6))
W <- matrix(1,3,3)
sumR <- rasterLocalSums(r,W)
}
|
/man/rasterLocalSums.Rd
|
no_license
|
cran/rasterKernelEstimates
|
R
| false
| true
| 1,151
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rasterLocalSums.R
\name{rasterLocalSums}
\alias{rasterLocalSums}
\title{Local sums for an in memory raster image.}
\usage{
rasterLocalSums(r, W)
}
\arguments{
\item{r}{An in memory raster image.}
\item{W}{A matrix of weights. The sums will be applied at each centroid.
Dimensions must be non-zero and odd. Only non-missing neighbors are used in
the sum.}
}
\value{
An in memory raster image of local sums.
}
\description{
\code{rasterLocalSums} finds the local sum within the weighted neighborhood of W.
}
\details{
A spatial neighborhood is calculated for each pixel in \code{r}.
The spatial neighborhood for each pixel is defined by the weight matrix
\code{W}, where the center of the odd dimensioned matrix \code{W} is identified
with the target pixel. The target pixel value is replaced with the sum of
all pixels within the neighborhood weighted by \code{W}. Only non-missing
or neighbors with non-zero weights are used in the calculation.
}
\examples{
r <- raster::raster( matrix(rnorm(36),6,6))
W <- matrix(1,3,3)
sumR <- rasterLocalSums(r,W)
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 6.04642905895392e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615782537-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 329
|
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 6.04642905895392e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
library(dave)
### Name: pveg
### Title: Soppensee pollen data
### Aliases: pveg
### Keywords: datasets
### ** Examples
summary(pveg)
|
/data/genthat_extracted_code/dave/examples/pveg.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 140
|
r
|
library(dave)
### Name: pveg
### Title: Soppensee pollen data
### Aliases: pveg
### Keywords: datasets
### ** Examples
summary(pveg)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\encoding{UTF-8}
\name{poblacion}
\alias{poblacion}
\title{Datos de poblacion por seccion censal para las ciudades MEDEA3
(periodo 2006-2016)}
\format{Un objeto de clase \code{poblaciones_ine} donde las filas representan
la combinación de las distintas secciones censales de MEDEA3, sexos y años
del periodo de estudio. Las cuatro primeras columnas son: \describe{
\item{seccion}{Código de la sección censal.} \item{sexo}{0 hombres; 1
mujeres.} \item{year}{Año.} \item{medea3}{Pertenencia de la sección al
proyecto MEDEA3.} } El resto de columnas representan los distintos grupos
de edad. Todo objeto de la clase \code{poblaciones_ine} deberá tener este
formato.}
\usage{
poblacion
}
\description{
Datos de población por sexo (0=Hombres, 1=Mujeres), edad (grupos
quinquenales) y año (periodo 2006-2016) a nivel de sección censal para las
ciudades de MEDEA3. Estos datos han sido descargados de la web del INE, que
los publica de forma libre, y se pueden obtener usando la función
\code{\link{descarga_poblaciones}} de este paquete.
}
\details{
Los códigos de sección censal (columna \code{seccion} del
\code{data.frame} \code{poblacion}) se corresponden con el identificador
habitual de secciones censales según el INE, es decir: los primeros dos
dígitos identifican la provincia, los siguientes tres dígitos el municipio,
los próximos dos dígitos el distrito y los últimos tres la sección censal.
Los 5 primeros dígitos de este identificador se corresponden con el código
INE del respectivo municipio.
Hasta el año 2010 (inclusive) el INE agrupa la última categoría de edad
como 85 y más, mientras que desde el año 2011 llega hasta 100 y más. Los
últimas columnas de \code{poblacion} tienen información detallada de los
grupos de edad mayores para los años posteriores a 2010, por si esta
pudiera ser de utilidad en algún momento. En cualquier caso, la casilla
correspondiente al grupo de edad 85 y más para dichos años también tiene la
información agregada para los grupos de edad mayores de 85, de la misma
forma que los años anteriores.
El paquete \code{medear} dispone también de los datos para todo el periodo
1996-2016 pero estos están encriptados ya que los datos para el periodo
1996-2005 son propiedad del INE, que han sido adquiridos para uso exclusivo
del proyecto MEDEA3. Estos datos son accesibles mediante la función
\code{\link{carga_datos}} que necesita una contraseña de desencriptación,
que se hará disponible a todos los grupos del proyecto MEDEA. La llamada a
\code{\link{carga_datos}} produce un data.frame con exactamente el mismo
formato que \code{poblacion}, de hecho machaca dicho objeto, pero con la
información adicional del periodo 1996-2005.
Notar que las poblaciones corresponden al seccionado censal de cada año por
lo que algunas de las secciones censales consideradas pueden no tener
información para todo el periodo 2006-2016 si es que dicha sección no ha
existido durante todo este periodo. Este comentario también aplica a la
función \code{\link{carga_datos}}.
}
\examples{
\dontrun{
library(medear)
data("poblacion")
# Información de poblaciones de la sección censal 01001 de Valencia (código INE 46250)
poblacion[poblacion$seccion == "4625001001", ]
# Información de poblaciones de toda la ciudad de Valencia
poblacion[substring(poblacion$seccion, 1, 5) == "46250", ]
}
}
\references{
\url{http://www.ine.es/}{ Sitio web del INE}.
\url{http://www.ine.es/dyngs/INEbase/es/operacion.htm?c=Estadistica_C&cid=1254736177012&menu=resultados&secc=1254736195461&idp=1254734710990}{
Poblaciones}.
}
\seealso{
\code{\link{descarga_poblaciones}}, \code{\link{carga_datos}}
}
\keyword{datasets}
|
/man/poblacion.Rd
|
no_license
|
pcorpas/medear
|
R
| false
| true
| 3,874
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\encoding{UTF-8}
\name{poblacion}
\alias{poblacion}
\title{Datos de poblacion por seccion censal para las ciudades MEDEA3
(periodo 2006-2016)}
\format{Un objeto de clase \code{poblaciones_ine} donde las filas representan
la combinación de las distintas secciones censales de MEDEA3, sexos y años
del periodo de estudio. Las cuatro primeras columnas son: \describe{
\item{seccion}{Código de la sección censal.} \item{sexo}{0 hombres; 1
mujeres.} \item{year}{Año.} \item{medea3}{Pertenencia de la sección al
proyecto MEDEA3.} } El resto de columnas representan los distintos grupos
de edad. Todo objeto de la clase \code{poblaciones_ine} deberá tener este
formato.}
\usage{
poblacion
}
\description{
Datos de población por sexo (0=Hombres, 1=Mujeres), edad (grupos
quinquenales) y año (periodo 2006-2016) a nivel de sección censal para las
ciudades de MEDEA3. Estos datos han sido descargados de la web del INE, que
los publica de forma libre, y se pueden obtener usando la función
\code{\link{descarga_poblaciones}} de este paquete.
}
\details{
Los códigos de sección censal (columna \code{seccion} del
\code{data.frame} \code{poblacion}) se corresponden con el identificador
habitual de secciones censales según el INE, es decir: los primeros dos
dígitos identifican la provincia, los siguientes tres dígitos el municipio,
los próximos dos dígitos el distrito y los últimos tres la sección censal.
Los 5 primeros dígitos de este identificador se corresponden con el código
INE del respectivo municipio.
Hasta el año 2010 (inclusive) el INE agrupa la última categoría de edad
como 85 y más, mientras que desde el año 2011 llega hasta 100 y más. Los
últimas columnas de \code{poblacion} tienen información detallada de los
grupos de edad mayores para los años posteriores a 2010, por si esta
pudiera ser de utilidad en algún momento. En cualquier caso, la casilla
correspondiente al grupo de edad 85 y más para dichos años también tiene la
información agregada para los grupos de edad mayores de 85, de la misma
forma que los años anteriores.
El paquete \code{medear} dispone también de los datos para todo el periodo
1996-2016 pero estos están encriptados ya que los datos para el periodo
1996-2005 son propiedad del INE, que han sido adquiridos para uso exclusivo
del proyecto MEDEA3. Estos datos son accesibles mediante la función
\code{\link{carga_datos}} que necesita una contraseña de desencriptación,
que se hará disponible a todos los grupos del proyecto MEDEA. La llamada a
\code{\link{carga_datos}} produce un data.frame con exactamente el mismo
formato que \code{poblacion}, de hecho machaca dicho objeto, pero con la
información adicional del periodo 1996-2005.
Notar que las poblaciones corresponden al seccionado censal de cada año por
lo que algunas de las secciones censales consideradas pueden no tener
información para todo el periodo 2006-2016 si es que dicha sección no ha
existido durante todo este periodo. Este comentario también aplica a la
función \code{\link{carga_datos}}.
}
\examples{
\dontrun{
library(medear)
data("poblacion")
# Información de poblaciones de la sección censal 01001 de Valencia (código INE 46250)
poblacion[poblacion$seccion == "4625001001", ]
# Información de poblaciones de toda la ciudad de Valencia
poblacion[substring(poblacion$seccion, 1, 5) == "46250", ]
}
}
\references{
\url{http://www.ine.es/}{ Sitio web del INE}.
\url{http://www.ine.es/dyngs/INEbase/es/operacion.htm?c=Estadistica_C&cid=1254736177012&menu=resultados&secc=1254736195461&idp=1254734710990}{
Poblaciones}.
}
\seealso{
\code{\link{descarga_poblaciones}}, \code{\link{carga_datos}}
}
\keyword{datasets}
|
##reading data
data<-read.table("household_power_consumption.txt", sep = ";", header = TRUE,stringsAsFactors = FALSE,na.strings = "?")
data<-subset(data, data$Date=="1/2/2007"|data$Date=="2/2/2007")
##making plot
DT <- strptime(paste(data$Date, data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
sub1<-as.numeric(data$Sub_metering_1)
sub2<-as.numeric(data$Sub_metering_2)
sub3<-as.numeric(data$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(DT, sub1, type="l", ylab="Energy Submetering", xlab="")
lines(DT, sub2, type="l", col="red")
lines(DT, sub3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, col=c("black", "red", "blue"))
dev.off()
|
/plot3.R
|
no_license
|
David-Wang-NJU/ExData_Plotting1
|
R
| false
| false
| 702
|
r
|
##reading data
data<-read.table("household_power_consumption.txt", sep = ";", header = TRUE,stringsAsFactors = FALSE,na.strings = "?")
data<-subset(data, data$Date=="1/2/2007"|data$Date=="2/2/2007")
##making plot
DT <- strptime(paste(data$Date, data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
sub1<-as.numeric(data$Sub_metering_1)
sub2<-as.numeric(data$Sub_metering_2)
sub3<-as.numeric(data$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(DT, sub1, type="l", ylab="Energy Submetering", xlab="")
lines(DT, sub2, type="l", col="red")
lines(DT, sub3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, col=c("black", "red", "blue"))
dev.off()
|
#' @export
startApplicationLocal <- function(...) {
#shiny::runApp("inst/app", ...)
shiny::runApp("inst/app")
}
|
/R/startApplication.R
|
no_license
|
farrierworks/Availability-Report-R
|
R
| false
| false
| 116
|
r
|
#' @export
startApplicationLocal <- function(...) {
#shiny::runApp("inst/app", ...)
shiny::runApp("inst/app")
}
|
options(stringsAsFactors = F)
args = commandArgs(trailingOnly=TRUE)
f = '~/GoogleDrive/gastric_cancer_samples/Tables/DEP_blastp/table.DEP_blastp.N13T236.txt'
# f = args[1]
sample = strsplit(f, '.', fixed = T)[[1]][3]
## Load data
m = as.data.frame(read.delim(f))
## filter the ones which have overexpressed or underexpressed
library(tidyr)
library(dplyr)
g_name_ov <- m %>% filter(adj.P.Val < 0.05 & t > 0) %>% pull(symbol)
g_name_un <- m %>% filter(adj.P.Val < 0.05 & t < 0) %>% pull(symbol)
## run gprofiler
library(gProfileR)
gp_over <- gprofiler(g_name_ov, organism = "hsapiens")
gp_under <- gprofiler(g_name_un, organism = "hsapiens")
## Save in file
over_out <- paste("~/GoogleDrive/gastric_cancer_samples/Tables/GO/overexpressed_gp/overexpressed", sample, 'txt', sep = ".")
under_out <- paste("~/GoogleDrive/gastric_cancer_samples/Tables/GO/underexpressed_gp/underexpressed", sample, 'txt', sep = ".")
write.table(gp_over, over_out, sep='\t', quote = F, row.names = F, col.names = T)
write.table(gp_under,under_out, sep='\t', quote = F, row.names = F, col.names = T)
|
/GO_analysis_EOGC.R
|
no_license
|
dahae-lee/gastric_cancer_samples
|
R
| false
| false
| 1,080
|
r
|
options(stringsAsFactors = F)
args = commandArgs(trailingOnly=TRUE)
f = '~/GoogleDrive/gastric_cancer_samples/Tables/DEP_blastp/table.DEP_blastp.N13T236.txt'
# f = args[1]
sample = strsplit(f, '.', fixed = T)[[1]][3]
## Load data
m = as.data.frame(read.delim(f))
## filter the ones which have overexpressed or underexpressed
library(tidyr)
library(dplyr)
g_name_ov <- m %>% filter(adj.P.Val < 0.05 & t > 0) %>% pull(symbol)
g_name_un <- m %>% filter(adj.P.Val < 0.05 & t < 0) %>% pull(symbol)
## run gprofiler
library(gProfileR)
gp_over <- gprofiler(g_name_ov, organism = "hsapiens")
gp_under <- gprofiler(g_name_un, organism = "hsapiens")
## Save in file
over_out <- paste("~/GoogleDrive/gastric_cancer_samples/Tables/GO/overexpressed_gp/overexpressed", sample, 'txt', sep = ".")
under_out <- paste("~/GoogleDrive/gastric_cancer_samples/Tables/GO/underexpressed_gp/underexpressed", sample, 'txt', sep = ".")
write.table(gp_over, over_out, sep='\t', quote = F, row.names = F, col.names = T)
write.table(gp_under,under_out, sep='\t', quote = F, row.names = F, col.names = T)
|
pkgs <- c(
"ensembldb",
"qvalue",
"plyranges",
"ComplexHeatmap",
"gtrellis"
)
BiocManager::install(pkgs, update=FALSE, ask=FALSE)
|
/install_genome_pkgs.R
|
no_license
|
ccwang002/rocker-genome
|
R
| false
| false
| 150
|
r
|
pkgs <- c(
"ensembldb",
"qvalue",
"plyranges",
"ComplexHeatmap",
"gtrellis"
)
BiocManager::install(pkgs, update=FALSE, ask=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rsquared.R
\name{rsquared.glmmPQL}
\alias{rsquared.glmmPQL}
\title{R^2 for glmmPQL objects}
\usage{
rsquared.glmmPQL(model, method = "trigamma")
}
\description{
R^2 for glmmPQL objects
}
\keyword{internal}
|
/man/rsquared.glmmPQL.Rd
|
no_license
|
cran/piecewiseSEM
|
R
| false
| true
| 296
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rsquared.R
\name{rsquared.glmmPQL}
\alias{rsquared.glmmPQL}
\title{R^2 for glmmPQL objects}
\usage{
rsquared.glmmPQL(model, method = "trigamma")
}
\description{
R^2 for glmmPQL objects
}
\keyword{internal}
|
#'Sort out sampling times, coalescent times and sampling lineages from a phylogenetic tree
#'
##' \code{att} sorts out sampling times, coalescent times and sampling lineages from a phylogenetic tree.
##'
##' @param phy A phylogenetic tree.
##' @param eps Difference parameter to separate coalescent and sampling event.
##'
##' @return Sorted sampling times, coalescent times and sampling lineages.
##'
##' @references Palacios JA and Minin VN. Integrated nested Laplace approximation for Bayesian nonparametric phylodynamics, in Proceedings of the Twenty-Eighth Conference on Uncertainty in Artificial Intelligence, 2012.
##' @examples
##' library(ape)
##' t1=rcoal(20)
##' att(t1)
##'
##'
##' @author Simon Frost (\email{sdwfrost@@gmail.com})
##'
##'
##' @export
att <- function(phy,eps=1e-6){
b.s.times = branching.sampling.times(phy)
int.ind = which(as.numeric(names(b.s.times)) < 0)
tip.ind = which(as.numeric(names(b.s.times)) > 0)
num.tips = length(tip.ind)
num.coal.events = length(int.ind)
sampl.suf.stat = rep(NA, num.coal.events)
coal.interval = rep(NA, num.coal.events)
coal.lineages = rep(NA, num.coal.events)
sorted.coal.times = sort(b.s.times[int.ind])
names(sorted.coal.times) = NULL
sampling.times = sort((b.s.times[tip.ind]))
for (i in 2:length(sampling.times)){
if ((sampling.times[i]-sampling.times[i-1])<eps){
sampling.times[i]<-sampling.times[i-1]}
}
unique.sampling.times<-unique(sampling.times)
sampled.lineages = NULL
for (sample.time in unique.sampling.times){
sampled.lineages = c(sampled.lineages,
sum(sampling.times == sample.time))
}
if(sum(sorted.coal.times %in% unique.sampling.times)>0){
stop("Simultaneous sample and coalescence time")
}
all.times <- sort(unique(c(unique.sampling.times,sorted.coal.times)))
# Check that first time is sampling
if(!(all.times[1] %in% unique.sampling.times)){
stop("Samples must be first (in reverse time)")
}
A <- rep(0,length(all.times))
lastA <- 0
for(i in 1:length(all.times)){
is.sample <- match(all.times[i],unique.sampling.times)
if(!is.na(is.sample)){
ss <- sampled.lineages[is.sample]
A[i] <- lastA + ss
}else{
A[i] <- lastA - 1
}
lastA <- A[i]
}
data.frame(t=all.times,A=A)
}
|
/R/att.R
|
permissive
|
pboesu/GenieR
|
R
| false
| false
| 2,360
|
r
|
#'Sort out sampling times, coalescent times and sampling lineages from a phylogenetic tree
#'
##' \code{att} sorts out sampling times, coalescent times and sampling lineages from a phylogenetic tree.
##'
##' @param phy A phylogenetic tree.
##' @param eps Difference parameter to separate coalescent and sampling event.
##'
##' @return Sorted sampling times, coalescent times and sampling lineages.
##'
##' @references Palacios JA and Minin VN. Integrated nested Laplace approximation for Bayesian nonparametric phylodynamics, in Proceedings of the Twenty-Eighth Conference on Uncertainty in Artificial Intelligence, 2012.
##' @examples
##' library(ape)
##' t1=rcoal(20)
##' att(t1)
##'
##'
##' @author Simon Frost (\email{sdwfrost@@gmail.com})
##'
##'
##' @export
att <- function(phy,eps=1e-6){
b.s.times = branching.sampling.times(phy)
int.ind = which(as.numeric(names(b.s.times)) < 0)
tip.ind = which(as.numeric(names(b.s.times)) > 0)
num.tips = length(tip.ind)
num.coal.events = length(int.ind)
sampl.suf.stat = rep(NA, num.coal.events)
coal.interval = rep(NA, num.coal.events)
coal.lineages = rep(NA, num.coal.events)
sorted.coal.times = sort(b.s.times[int.ind])
names(sorted.coal.times) = NULL
sampling.times = sort((b.s.times[tip.ind]))
for (i in 2:length(sampling.times)){
if ((sampling.times[i]-sampling.times[i-1])<eps){
sampling.times[i]<-sampling.times[i-1]}
}
unique.sampling.times<-unique(sampling.times)
sampled.lineages = NULL
for (sample.time in unique.sampling.times){
sampled.lineages = c(sampled.lineages,
sum(sampling.times == sample.time))
}
if(sum(sorted.coal.times %in% unique.sampling.times)>0){
stop("Simultaneous sample and coalescence time")
}
all.times <- sort(unique(c(unique.sampling.times,sorted.coal.times)))
# Check that first time is sampling
if(!(all.times[1] %in% unique.sampling.times)){
stop("Samples must be first (in reverse time)")
}
A <- rep(0,length(all.times))
lastA <- 0
for(i in 1:length(all.times)){
is.sample <- match(all.times[i],unique.sampling.times)
if(!is.na(is.sample)){
ss <- sampled.lineages[is.sample]
A[i] <- lastA + ss
}else{
A[i] <- lastA - 1
}
lastA <- A[i]
}
data.frame(t=all.times,A=A)
}
|
# titanic is avaliable in your workspace
# 1 - Check the structure of titanic
str(titanic)
# 2 - Use ggplot() for the first instruction
ggplot(titanic, aes(x = Pclass, fill = Sex)) +
geom_bar(position = "dodge")
# 3 - Plot 2, add facet_grid() layer
ggplot(titanic, aes(x = Pclass, fill = Sex)) +
geom_bar(position = "dodge") +
facet_grid(. ~ Survived)
# 4 - Define an object for position jitterdodge, to use below
posn.jd <- position_jitterdodge(0.5, 0, 0.6)
# 5 - Plot 3, but use the position object from instruction 4
ggplot(titanic, aes(x = Pclass, y = Age, color = Sex)) +
geom_point(size = 3, alpha = 0.5, position = posn.jd) +
facet_grid(. ~ Survived)
|
/titanicggPlotexercise.R
|
no_license
|
ramsubra1/Titanic
|
R
| false
| false
| 681
|
r
|
# titanic is avaliable in your workspace
# 1 - Check the structure of titanic
str(titanic)
# 2 - Use ggplot() for the first instruction
ggplot(titanic, aes(x = Pclass, fill = Sex)) +
geom_bar(position = "dodge")
# 3 - Plot 2, add facet_grid() layer
ggplot(titanic, aes(x = Pclass, fill = Sex)) +
geom_bar(position = "dodge") +
facet_grid(. ~ Survived)
# 4 - Define an object for position jitterdodge, to use below
posn.jd <- position_jitterdodge(0.5, 0, 0.6)
# 5 - Plot 3, but use the position object from instruction 4
ggplot(titanic, aes(x = Pclass, y = Age, color = Sex)) +
geom_point(size = 3, alpha = 0.5, position = posn.jd) +
facet_grid(. ~ Survived)
|
plot2 <- function() {
NEI <- readRDS("summarySCC_PM25.rds")
## Have total emissions from PM2.5 decreased in the
## Baltimore City, Maryland (fips == "24510") from 1999
## to 2008? Use the base plotting system to make a plot
## answering this question.
DF <- split(NEI, as.factor(NEI$fips))
EBY <- aggregate(Emissions~ year, DF$'24510', sum)
png('plot2.png')
barplot(EBY$Emissions, names.arg=EBY$year, main="Total PM2.5 Emission from Baltimore", xlab="Year", ylab="PM20 Emission in tons")
dev.off()
}
|
/ExploratoryAnalysisAssignment2/plot2.R
|
no_license
|
gyuen922/CourseProject2
|
R
| false
| false
| 531
|
r
|
plot2 <- function() {
NEI <- readRDS("summarySCC_PM25.rds")
## Have total emissions from PM2.5 decreased in the
## Baltimore City, Maryland (fips == "24510") from 1999
## to 2008? Use the base plotting system to make a plot
## answering this question.
DF <- split(NEI, as.factor(NEI$fips))
EBY <- aggregate(Emissions~ year, DF$'24510', sum)
png('plot2.png')
barplot(EBY$Emissions, names.arg=EBY$year, main="Total PM2.5 Emission from Baltimore", xlab="Year", ylab="PM20 Emission in tons")
dev.off()
}
|
if(interactive()){
library(setwidth)
options(vimcom.verbose = 0) # optional
library(vimcom)
}
|
/.Rprofile
|
no_license
|
grgurev/dotfiles
|
R
| false
| false
| 106
|
rprofile
|
if(interactive()){
library(setwidth)
options(vimcom.verbose = 0) # optional
library(vimcom)
}
|
# -------------------------------------------
# common CpG between training and AML samples
# -------------------------------------------
d <- readRDS('/home-4/whou10@jhu.edu/scratch/Wenpin/metpred/data/tcga_450k/proc/hg38/combine/me.rds')
loc <- readRDS('/home-4/zji4@jhu.edu/scratch/metpred/data/tcga_450k/proc/hg38/loc/hg38.rds')
rownames(d) <- loc[rownames(d)]
amlme.cs <- readRDS('/home-4/whou10@jhu.edu/data2/whou10/aml_feinberg/wgbs/processed/hg38/all_cpg_by_14_patients_completecases.rds')
int <- intersect(rownames(d), rownames(amlme.cs)) ## [1:156912]
amlme.cs <- amlme.cs[int, ]
# ----------------------------------
# highly variable CpG in AML samples
# ----------------------------------
cm <- rowMeans(amlme.cs)
csv <- sqrt(rowMeans(amlme.cs * amlme.cs - cm^2) / (ncol(amlme.cs) - 1) * ncol(amlme.cs))
# > summary(csv)
amlme.cs <- amlme.cs[csv >= 0.2, ]
# > str(amlme.cs)
set.seed(12345)
v = sample(rownames(amlme.cs), 1e4)
saveRDS(d[v, ],file='/home-4/whou10@jhu.edu/scratch/Wenpin/metpred/data/tcga_450k/proc/hg38/combine/sampleme_match_aml.rds')
## ---------------------------
## load, process training data
## ---------------------------
library(data.table)
source('/home-4/whou10@jhu.edu/scratch/Wenpin/metpred/software/predict.R')
source('/home-4//whou10@jhu.edu/scratch/Wenpin/metpred/software/trainmodel.R')
meth <- readRDS('/home-4/whou10@jhu.edu/scratch/Wenpin/metpred/data/tcga_450k/proc/hg38/combine/sampleme_match_aml.rds') ## [1:10000, 1:8578]
expr <- readRDS('/home-4/whou10@jhu.edu/scratch/Wenpin/metpred/data/tcga_450k/proc/hg38/combine/ge.rds') ## [1:58560, 1:8578]
project <- readRDS('/home-4/whou10@jhu.edu/scratch/Wenpin/metpred/data/tcga_450k/proc/hg38/combine/project.rds')
# # select LAML samples as training only <<
# meth <- meth[, names(project)[project=='TCGA-LAML']]
# meth = meth[complete.cases(meth), ]
# expr <- expr[, colnames(meth)]
# # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# ds <- sub(':.*','',colnames(meth)) ##
# avoid 0 and 1 to enter logit function
meth[meth==0] <- min(meth[meth>0])
meth[meth==1] <- max(meth[meth<1])
meth <- log(meth/(1-meth))
# Filtering low expressed genes and quantile normalization
expr <- expr[rowMeans(expr > 0) >= 0.01,]
# qnem <- rowMeans(apply(expr,2,function(i) sort(i)))
# gn <- row.names(expr)
# expr <- apply(expr,2,function(i) qnem[frank(i,ties.method='min')])
# row.names(expr) <- gn
# Filtering low variable genes
m <- rowMeans(expr)
s <- sqrt((rowMeans(expr*expr) - m^2) * ncol(expr)/(ncol(expr)-1))
mod <- loess(s~m)
expr <- expr[resid(mod) > 0,]
## ---------------------------
## load, process testing data
## ---------------------------
seur <- readRDS('/home-4/whou10@jhu.edu/data2/whou10/aml_feinberg/scrna/AML_integrated_scRNA_seurat.rds')
predexpr <- as.matrix(seur@assays$RNA@counts)
meta <- seur@meta.data$sample
predexpr <- sapply(unique(meta),function(i) rowSums(predexpr[,meta==i]))
gidgn <- readRDS('/home-4/whou10@jhu.edu/scratch/Wenpin/resource/hg38_geneid_genename_genelength.rds')
rownames(expr) <- gidgn[match(rownames(expr), gsub('\\..*', '', gidgn$geneid)), 2]
expr <- expr[!duplicated(rownames(expr)), ]
## match genes
intgene <- intersect(rownames(predexpr), rownames(expr))
predexpr <- predexpr[intgene, ]
expr <- expr[intgene, ]
## train model
m <- trainmodel(expr, meth, log.transform = F, filter.low.express.gene = F, filter.low_var.gene = F)
saveRDS(m, '/home-4/whou10@jhu.edu/scratch/Wenpin/aml_feinberg/pred_dnam/result/pseudobulk/tcga_training/all_trainmodel.rds')
## predict
pred <- predict(predexpr, m)
saveRDS(pred, '/home-4/whou10@jhu.edu/scratch/Wenpin/aml_feinberg/pred_dnam/result/pseudobulk/tcga_training/all_pred.rds')
## evaluate
amlme <- readRDS('/home-4/whou10@jhu.edu/data2/whou10/aml_feinberg/wgbs/processed/hg38/all_cpg_by_14_patients_completecases.rds')
amlme <- amlme[rownames(meth), intersect(colnames(pred), colnames(amlme))]
pred <- pred[, colnames(amlme)]
scalematrix <- function(data) {
cm <- rowMeans(data)
csd <- sqrt((rowMeans(data*data) - cm^2) / (ncol(data) - 1) * ncol(data))
(data - cm) / csd
}
corfunc <- function(m1,m2,type='concordant') {
if (type=='concordant') {
rowSums(scalematrix(m1) * scalematrix(m2))/(ncol(m1)-1)
} else {
scalematrix(t(m1)) %*% t(scalematrix(t(m2)))/(nrow(m1)-1)
}
}
cv1 <- corfunc(pred, amlme)
print(summary(cv1))
cv2 <- corfunc(t(pred),t(amlme))
print(summary(cv2))
saveRDS(list(crosssample = cv1, crosssite = cv2), '/home-4/whou10@jhu.edu/scratch/Wenpin/aml_feinberg/pred_dnam/result/pseudobulk/tcga_training/all_perf.rds')
|
/pred_dnam/code/06_tcga_all_training_pseudobulk.R
|
no_license
|
Winnie09/aml_feinberg
|
R
| false
| false
| 4,549
|
r
|
# -------------------------------------------
# common CpG between training and AML samples
# -------------------------------------------
d <- readRDS('/home-4/whou10@jhu.edu/scratch/Wenpin/metpred/data/tcga_450k/proc/hg38/combine/me.rds')
loc <- readRDS('/home-4/zji4@jhu.edu/scratch/metpred/data/tcga_450k/proc/hg38/loc/hg38.rds')
rownames(d) <- loc[rownames(d)]
amlme.cs <- readRDS('/home-4/whou10@jhu.edu/data2/whou10/aml_feinberg/wgbs/processed/hg38/all_cpg_by_14_patients_completecases.rds')
int <- intersect(rownames(d), rownames(amlme.cs)) ## [1:156912]
amlme.cs <- amlme.cs[int, ]
# ----------------------------------
# highly variable CpG in AML samples
# ----------------------------------
cm <- rowMeans(amlme.cs)
csv <- sqrt(rowMeans(amlme.cs * amlme.cs - cm^2) / (ncol(amlme.cs) - 1) * ncol(amlme.cs))
# > summary(csv)
amlme.cs <- amlme.cs[csv >= 0.2, ]
# > str(amlme.cs)
set.seed(12345)
v = sample(rownames(amlme.cs), 1e4)
saveRDS(d[v, ],file='/home-4/whou10@jhu.edu/scratch/Wenpin/metpred/data/tcga_450k/proc/hg38/combine/sampleme_match_aml.rds')
## ---------------------------
## load, process training data
## ---------------------------
library(data.table)
source('/home-4/whou10@jhu.edu/scratch/Wenpin/metpred/software/predict.R')
source('/home-4//whou10@jhu.edu/scratch/Wenpin/metpred/software/trainmodel.R')
meth <- readRDS('/home-4/whou10@jhu.edu/scratch/Wenpin/metpred/data/tcga_450k/proc/hg38/combine/sampleme_match_aml.rds') ## [1:10000, 1:8578]
expr <- readRDS('/home-4/whou10@jhu.edu/scratch/Wenpin/metpred/data/tcga_450k/proc/hg38/combine/ge.rds') ## [1:58560, 1:8578]
project <- readRDS('/home-4/whou10@jhu.edu/scratch/Wenpin/metpred/data/tcga_450k/proc/hg38/combine/project.rds')
# # select LAML samples as training only <<
# meth <- meth[, names(project)[project=='TCGA-LAML']]
# meth = meth[complete.cases(meth), ]
# expr <- expr[, colnames(meth)]
# # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# ds <- sub(':.*','',colnames(meth)) ##
# avoid 0 and 1 to enter logit function
meth[meth==0] <- min(meth[meth>0])
meth[meth==1] <- max(meth[meth<1])
meth <- log(meth/(1-meth))
# Filtering low expressed genes and quantile normalization
expr <- expr[rowMeans(expr > 0) >= 0.01,]
# qnem <- rowMeans(apply(expr,2,function(i) sort(i)))
# gn <- row.names(expr)
# expr <- apply(expr,2,function(i) qnem[frank(i,ties.method='min')])
# row.names(expr) <- gn
# Filtering low variable genes
m <- rowMeans(expr)
s <- sqrt((rowMeans(expr*expr) - m^2) * ncol(expr)/(ncol(expr)-1))
mod <- loess(s~m)
expr <- expr[resid(mod) > 0,]
## ---------------------------
## load, process testing data
## ---------------------------
seur <- readRDS('/home-4/whou10@jhu.edu/data2/whou10/aml_feinberg/scrna/AML_integrated_scRNA_seurat.rds')
predexpr <- as.matrix(seur@assays$RNA@counts)
meta <- seur@meta.data$sample
predexpr <- sapply(unique(meta),function(i) rowSums(predexpr[,meta==i]))
gidgn <- readRDS('/home-4/whou10@jhu.edu/scratch/Wenpin/resource/hg38_geneid_genename_genelength.rds')
rownames(expr) <- gidgn[match(rownames(expr), gsub('\\..*', '', gidgn$geneid)), 2]
expr <- expr[!duplicated(rownames(expr)), ]
## match genes
intgene <- intersect(rownames(predexpr), rownames(expr))
predexpr <- predexpr[intgene, ]
expr <- expr[intgene, ]
## train model
m <- trainmodel(expr, meth, log.transform = F, filter.low.express.gene = F, filter.low_var.gene = F)
saveRDS(m, '/home-4/whou10@jhu.edu/scratch/Wenpin/aml_feinberg/pred_dnam/result/pseudobulk/tcga_training/all_trainmodel.rds')
## predict
pred <- predict(predexpr, m)
saveRDS(pred, '/home-4/whou10@jhu.edu/scratch/Wenpin/aml_feinberg/pred_dnam/result/pseudobulk/tcga_training/all_pred.rds')
## evaluate
amlme <- readRDS('/home-4/whou10@jhu.edu/data2/whou10/aml_feinberg/wgbs/processed/hg38/all_cpg_by_14_patients_completecases.rds')
amlme <- amlme[rownames(meth), intersect(colnames(pred), colnames(amlme))]
pred <- pred[, colnames(amlme)]
scalematrix <- function(data) {
cm <- rowMeans(data)
csd <- sqrt((rowMeans(data*data) - cm^2) / (ncol(data) - 1) * ncol(data))
(data - cm) / csd
}
corfunc <- function(m1,m2,type='concordant') {
if (type=='concordant') {
rowSums(scalematrix(m1) * scalematrix(m2))/(ncol(m1)-1)
} else {
scalematrix(t(m1)) %*% t(scalematrix(t(m2)))/(nrow(m1)-1)
}
}
cv1 <- corfunc(pred, amlme)
print(summary(cv1))
cv2 <- corfunc(t(pred),t(amlme))
print(summary(cv2))
saveRDS(list(crosssample = cv1, crosssite = cv2), '/home-4/whou10@jhu.edu/scratch/Wenpin/aml_feinberg/pred_dnam/result/pseudobulk/tcga_training/all_perf.rds')
|
## The required data is "Electric Power Consumption". It can be downloaded from
## 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
## It is a zip file. It should be unzipped and saved in the working directory.
## Please save it as "household_power_consumption.txt".
library(datasets)
library(lubridate)
## to find the number of rows to skip so as to get data of interest
## time difference between the start time of the original data and
## start time of our interest
time_diff <- difftime(strptime("2007-02-01 00:00:00", format="%Y-%m-%d %H:%M:%S"),
strptime("2006-12-16 17:24:00", format="%Y-%m-%d %H:%M:%S"),
unit="mins")
skip_rows <- time_diff+1 ## skip one extra row for row of colnames
## reads only subset of data for 2007/02/01 and 2007/02/01,
## two days= 2*24*60=2880 rows
data_of_int <- read.table("household_power_consumption.txt", sep=";",
skip=skip_rows, nrows=2880)
## assigns colnames of the original data to the subset data
col_names <- colnames(read.table("household_power_consumption.txt", sep=";",
nrows=1, header=TRUE))
colnames(data_of_int) <- col_names
## draws histogram of "Global_active_power"
hist(data_of_int$Global_active_power, col="red",
xlab="Global Active Power(kilowatt)",
mar=c(5, 4, 4, 2),
main="Global Active Power", cex.lab=.75, cex.axis=.5)
|
/plot1.R
|
no_license
|
skusum/ExData_Plotting1
|
R
| false
| false
| 1,453
|
r
|
## The required data is "Electric Power Consumption". It can be downloaded from
## 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
## It is a zip file. It should be unzipped and saved in the working directory.
## Please save it as "household_power_consumption.txt".
library(datasets)
library(lubridate)
## to find the number of rows to skip so as to get data of interest
## time difference between the start time of the original data and
## start time of our interest
time_diff <- difftime(strptime("2007-02-01 00:00:00", format="%Y-%m-%d %H:%M:%S"),
strptime("2006-12-16 17:24:00", format="%Y-%m-%d %H:%M:%S"),
unit="mins")
skip_rows <- time_diff+1 ## skip one extra row for row of colnames
## reads only subset of data for 2007/02/01 and 2007/02/01,
## two days= 2*24*60=2880 rows
data_of_int <- read.table("household_power_consumption.txt", sep=";",
skip=skip_rows, nrows=2880)
## assigns colnames of the original data to the subset data
col_names <- colnames(read.table("household_power_consumption.txt", sep=";",
nrows=1, header=TRUE))
colnames(data_of_int) <- col_names
## draws histogram of "Global_active_power"
hist(data_of_int$Global_active_power, col="red",
xlab="Global Active Power(kilowatt)",
mar=c(5, 4, 4, 2),
main="Global Active Power", cex.lab=.75, cex.axis=.5)
|
Substrate_preference<-function(data){
#Define the model
Substr_model<-function(time, state, pars){
with(as.list(c(state, pars)),{
#Fluorescent product/substrate
dPf<-Vmax*S/(Kmf*(1 + Porg/Kmorg) + S)#fluorescent product
dS<--Vmax*S/(Kmf*(1 + Porg/Kmorg) + S)
#Porg
dPorg<--Vmax*Porg/(Kmorg*(1 + S/Kmf) + Porg)
return(list(c(dPf, dS, dPorg)))
})
}
#Define the cost function
cost<-function(x){
##the ODE models is run for each initial Substrate and Product concentration
##the model runs are stored here
yhat<-data.frame(Pred=numeric(), Product=numeric(), Substrate=numeric())
for(i in unique(data$Substrate)){
for(n in unique(data$InhibitorSRP)){
out<-as.data.frame(ode(y=c(Pf=0, S=i,
Porg=mean(data[(data$Substrate==i & data$InhibitorSRP==n), "Porg"])), parms = c(Vmax=x[1], Kmf=x[2], Kmorg=x[3]),
Substr_model, times=sort(unique((data[(data$Substrate==i & data$InhibitorSRP==n), "time"])))))
out<-out[, c("time", "Pf")]
colnames(out)<-c("time", "Pred")
outm<-merge(out, data[(data$Substrate==i & data$InhibitorSRP==n), c("time", "Product", "Substrate")], by = c("time"))[,-1]
yhat<-rbind(yhat, outm)
}
}
RMSE<-with(yhat, sum((((Pred-Product)/Substrate)^2), na.rm = T))
return(RMSE)
}
#Use MCMC to define ranges of possible model parameters
par_mcmc<-modMCMC(f=cost, p=c(1e-2, 20, 20),
lower=c(1e-3, 1e-2, 1e-2),
upper=c(100, 500, 500), niter=10000)
#lower and upper limits for parameters are extracted
pl<-as.numeric(summary(par_mcmc)["min",])
pu<-as.numeric(summary(par_mcmc)["max",])
#these limits are used to find global optimum by DEoptim
# opt_par<-DEoptim(fn=cost, lower=pl, upper=pu,
# control = c(itermax = 10000, steptol = 50, reltol = 1e-8,
# trace=FALSE, strategy=3, NP=250))
#these limits are used to find global optimum by rgenoud
# opt_par<-genoud(fn=cost, print.level = 0, pop.size=1e6, max=FALSE, nvars=6, Domains = cbind(pl, pu),
# boundary.enforcement = 2)
#these limits are used to find global optimum by ABCotpim
opt_par<-abc_optim(fn=cost, par = as.numeric(summary(par_mcmc)["mean",]), lb=pl, ub=pu, maxCycle = 1e6)
#Calculate goodness of correspondence
goodness<-function(x){
yhat<-data.frame(time = numeric(), Pred=numeric(), Product=numeric(), Substrate=numeric(), InhibitorSRP=numeric(),
Catchment=character(), Horizon=character(), Porg=numeric())
for(i in unique(data$Substrate)){
for(n in unique(data$InhibitorSRP)){
out<-as.data.frame(ode(y=c(Pf=0, S=i,
Porg=mean(data[(data$Substrate==i & data$InhibitorSRP==n), "Porg"])), parms = c(Vmax=x[1], Kmf=x[2], Kmorg=x[3]),
Substr_model, times=sort(unique((data[(data$Substrate==i & data$InhibitorSRP==n), "time"])))))
out<-out[, c("time", "Pf", "Porg")]
colnames(out)<-c("time", "Pred", "Porg")
outm<-merge(out, data[(data$Substrate==i & data$InhibitorSRP==n), c("time", "Product")], by = c("time"))
outm$Substrate<-rep(i, times=nrow(outm))
outm$InhibitorSRP<-rep(i, times=nrow(outm))
yhat<-rbind(yhat, outm)
}
}
yhat$Catchment<-rep(data$Catchment[1], times=nrow(yhat))
yhat$Horizon<-rep(data$Horizon[1], times=nrow(yhat))
SSres=with(yhat, sum((((Product-Pred)/Substrate)^2), na.rm = T))
SStot=with(yhat, sum((((Product-mean(Product, na.rm = T))/Substrate)^2), na.rm = T))
ll=with(yhat, -sum((((Product-Pred)/Substrate)^2), na.rm = T)/2/(sd(Product/Substrate, na.rm = T)^2))
R2<-1-SSres/SStot
N<-length(x)
AIC<-2*N-2*ll
Gfit<-c(R2=R2, N=N, AIC=AIC, ll=ll, SSres=SSres, SStot=SStot)
goodness_out<-list(Yhat=yhat, Gfit=Gfit)
return(goodness_out)
}
#Parameters<-opt_par$optim$bestmem#Deoptim algorithm
Parameters<-opt_par$par#genoud/ABC algorithm
names(Parameters)<-c("Vmax", "Kmf", "Kmorg")
out_all<-list(Parameters = Parameters,
#Goodness = goodness(as.numeric(opt_par$optim$bestmem)),#DEoptim algorithm
Goodness = goodness(as.numeric(opt_par$par)),#genoud/ABC algorithm
MCMC = par_mcmc)
return(out_all)
}
|
/EE_data_analysis/Substrate_preference.R
|
no_license
|
petacapek/Enzyme_inhibition
|
R
| false
| false
| 4,500
|
r
|
Substrate_preference<-function(data){
#Define the model
Substr_model<-function(time, state, pars){
with(as.list(c(state, pars)),{
#Fluorescent product/substrate
dPf<-Vmax*S/(Kmf*(1 + Porg/Kmorg) + S)#fluorescent product
dS<--Vmax*S/(Kmf*(1 + Porg/Kmorg) + S)
#Porg
dPorg<--Vmax*Porg/(Kmorg*(1 + S/Kmf) + Porg)
return(list(c(dPf, dS, dPorg)))
})
}
#Define the cost function
cost<-function(x){
##the ODE models is run for each initial Substrate and Product concentration
##the model runs are stored here
yhat<-data.frame(Pred=numeric(), Product=numeric(), Substrate=numeric())
for(i in unique(data$Substrate)){
for(n in unique(data$InhibitorSRP)){
out<-as.data.frame(ode(y=c(Pf=0, S=i,
Porg=mean(data[(data$Substrate==i & data$InhibitorSRP==n), "Porg"])), parms = c(Vmax=x[1], Kmf=x[2], Kmorg=x[3]),
Substr_model, times=sort(unique((data[(data$Substrate==i & data$InhibitorSRP==n), "time"])))))
out<-out[, c("time", "Pf")]
colnames(out)<-c("time", "Pred")
outm<-merge(out, data[(data$Substrate==i & data$InhibitorSRP==n), c("time", "Product", "Substrate")], by = c("time"))[,-1]
yhat<-rbind(yhat, outm)
}
}
RMSE<-with(yhat, sum((((Pred-Product)/Substrate)^2), na.rm = T))
return(RMSE)
}
#Use MCMC to define ranges of possible model parameters
par_mcmc<-modMCMC(f=cost, p=c(1e-2, 20, 20),
lower=c(1e-3, 1e-2, 1e-2),
upper=c(100, 500, 500), niter=10000)
#lower and upper limits for parameters are extracted
pl<-as.numeric(summary(par_mcmc)["min",])
pu<-as.numeric(summary(par_mcmc)["max",])
#these limits are used to find global optimum by DEoptim
# opt_par<-DEoptim(fn=cost, lower=pl, upper=pu,
# control = c(itermax = 10000, steptol = 50, reltol = 1e-8,
# trace=FALSE, strategy=3, NP=250))
#these limits are used to find global optimum by rgenoud
# opt_par<-genoud(fn=cost, print.level = 0, pop.size=1e6, max=FALSE, nvars=6, Domains = cbind(pl, pu),
# boundary.enforcement = 2)
#these limits are used to find global optimum by ABCotpim
opt_par<-abc_optim(fn=cost, par = as.numeric(summary(par_mcmc)["mean",]), lb=pl, ub=pu, maxCycle = 1e6)
#Calculate goodness of correspondence
goodness<-function(x){
yhat<-data.frame(time = numeric(), Pred=numeric(), Product=numeric(), Substrate=numeric(), InhibitorSRP=numeric(),
Catchment=character(), Horizon=character(), Porg=numeric())
for(i in unique(data$Substrate)){
for(n in unique(data$InhibitorSRP)){
out<-as.data.frame(ode(y=c(Pf=0, S=i,
Porg=mean(data[(data$Substrate==i & data$InhibitorSRP==n), "Porg"])), parms = c(Vmax=x[1], Kmf=x[2], Kmorg=x[3]),
Substr_model, times=sort(unique((data[(data$Substrate==i & data$InhibitorSRP==n), "time"])))))
out<-out[, c("time", "Pf", "Porg")]
colnames(out)<-c("time", "Pred", "Porg")
outm<-merge(out, data[(data$Substrate==i & data$InhibitorSRP==n), c("time", "Product")], by = c("time"))
outm$Substrate<-rep(i, times=nrow(outm))
outm$InhibitorSRP<-rep(i, times=nrow(outm))
yhat<-rbind(yhat, outm)
}
}
yhat$Catchment<-rep(data$Catchment[1], times=nrow(yhat))
yhat$Horizon<-rep(data$Horizon[1], times=nrow(yhat))
SSres=with(yhat, sum((((Product-Pred)/Substrate)^2), na.rm = T))
SStot=with(yhat, sum((((Product-mean(Product, na.rm = T))/Substrate)^2), na.rm = T))
ll=with(yhat, -sum((((Product-Pred)/Substrate)^2), na.rm = T)/2/(sd(Product/Substrate, na.rm = T)^2))
R2<-1-SSres/SStot
N<-length(x)
AIC<-2*N-2*ll
Gfit<-c(R2=R2, N=N, AIC=AIC, ll=ll, SSres=SSres, SStot=SStot)
goodness_out<-list(Yhat=yhat, Gfit=Gfit)
return(goodness_out)
}
#Parameters<-opt_par$optim$bestmem#Deoptim algorithm
Parameters<-opt_par$par#genoud/ABC algorithm
names(Parameters)<-c("Vmax", "Kmf", "Kmorg")
out_all<-list(Parameters = Parameters,
#Goodness = goodness(as.numeric(opt_par$optim$bestmem)),#DEoptim algorithm
Goodness = goodness(as.numeric(opt_par$par)),#genoud/ABC algorithm
MCMC = par_mcmc)
return(out_all)
}
|
eudatReplCheckIntegrity{
writeLine("stdout", "userNameClient: $userNameClient");
writeLine("stdout", "rodsZoneClient: $rodsZoneClient");
if (*home == '') {
*home="/$rodsZoneClient/home/$userNameClient";
}
msiDataObjCreate("*home/test_data.txt", "", *fd);
msiDataObjWrite(*fd, "Hello World!", "");
msiDataObjClose(*fd, *status1);
writeLine("stdout", "Object *home/test_data.txt written with success!");
writeLine("stdout", "Its content is: Hello World!");
# PID creation
# EUDATCreatePID(*parent_pid, *path, *ror, *fio, *fixed, *newPID)
EUDATCreatePID("None", "*home/test_data.txt", "None", "None", "true", *newPID);
writeLine("stdout", "The Object *home/test_data.txt has PID = *newPID");
# Data set replication
# with PID registration (3rd argument "true")
# and not recursive (4th argument "false")
*res = EUDATReplication("*home/test_data.txt", "*home/test_data2.txt", "true", "false", *response);
if (*res) {
writeLine("stdout", "Object *home/test_data.txt replicated to Object *home/test_data2.txt!");
writeLine("stdout", "The content of the replica is:");
msiDataObjOpen("*home/test_data2.txt", *S_FD);
msiDataObjRead(*S_FD, 12,*R_BUF);
writeBytesBuf("stdout", *R_BUF);
msiDataObjClose(*S_FD, *status2);
writeLine("stdout", "");
EUDATiFieldVALUEretrieve("*home/test_data2.txt", "PID", *value);
writeLine("stdout", "The Replica *home/test_data2.txt has PID = *value");
# Data set integrity check
# with logging enabled (3rd argument "true")
# EUDATCheckIntegrityDO(*source, *destination, *logEnabled, *response);
*status_check = EUDATCheckIntegrityDO("*home/test_data.txt", "*home/test_data2.txt", bool("true"), *response);
if (*status_check) {
writeLine("stdout", "Integrity check after replication: successful!");
}
else {
writeLine("stdout", "Integrity check after replication: failed: *response");
}
EUDATePIDremove("*home/test_data2.txt", "true");
writeLine("stdout", "PID *value removed");
msiDataObjUnlink("*home/test_data2.txt",*status3);
writeLine("stdout", "Replica removed");
}
else {
writeLine("stdout", "Replication failed: *response");
}
EUDATePIDremove("*home/test_data.txt", "true");
writeLine("stdout", "PID *newPID removed");
msiDataObjUnlink("*home/test_data.txt",*status4);
writeLine("stdout", "Object *home/test_data.txt removed");
}
INPUT *home=''
OUTPUT ruleExecOut
|
/rules/eudatRepl_checkIntegrity.r
|
permissive
|
alexal14/B2SAFE-core
|
R
| false
| false
| 2,616
|
r
|
eudatReplCheckIntegrity{
writeLine("stdout", "userNameClient: $userNameClient");
writeLine("stdout", "rodsZoneClient: $rodsZoneClient");
if (*home == '') {
*home="/$rodsZoneClient/home/$userNameClient";
}
msiDataObjCreate("*home/test_data.txt", "", *fd);
msiDataObjWrite(*fd, "Hello World!", "");
msiDataObjClose(*fd, *status1);
writeLine("stdout", "Object *home/test_data.txt written with success!");
writeLine("stdout", "Its content is: Hello World!");
# PID creation
# EUDATCreatePID(*parent_pid, *path, *ror, *fio, *fixed, *newPID)
EUDATCreatePID("None", "*home/test_data.txt", "None", "None", "true", *newPID);
writeLine("stdout", "The Object *home/test_data.txt has PID = *newPID");
# Data set replication
# with PID registration (3rd argument "true")
# and not recursive (4th argument "false")
*res = EUDATReplication("*home/test_data.txt", "*home/test_data2.txt", "true", "false", *response);
if (*res) {
writeLine("stdout", "Object *home/test_data.txt replicated to Object *home/test_data2.txt!");
writeLine("stdout", "The content of the replica is:");
msiDataObjOpen("*home/test_data2.txt", *S_FD);
msiDataObjRead(*S_FD, 12,*R_BUF);
writeBytesBuf("stdout", *R_BUF);
msiDataObjClose(*S_FD, *status2);
writeLine("stdout", "");
EUDATiFieldVALUEretrieve("*home/test_data2.txt", "PID", *value);
writeLine("stdout", "The Replica *home/test_data2.txt has PID = *value");
# Data set integrity check
# with logging enabled (3rd argument "true")
# EUDATCheckIntegrityDO(*source, *destination, *logEnabled, *response);
*status_check = EUDATCheckIntegrityDO("*home/test_data.txt", "*home/test_data2.txt", bool("true"), *response);
if (*status_check) {
writeLine("stdout", "Integrity check after replication: successful!");
}
else {
writeLine("stdout", "Integrity check after replication: failed: *response");
}
EUDATePIDremove("*home/test_data2.txt", "true");
writeLine("stdout", "PID *value removed");
msiDataObjUnlink("*home/test_data2.txt",*status3);
writeLine("stdout", "Replica removed");
}
else {
writeLine("stdout", "Replication failed: *response");
}
EUDATePIDremove("*home/test_data.txt", "true");
writeLine("stdout", "PID *newPID removed");
msiDataObjUnlink("*home/test_data.txt",*status4);
writeLine("stdout", "Object *home/test_data.txt removed");
}
INPUT *home=''
OUTPUT ruleExecOut
|
#' Plots character changes on branches
#'
#' @description
#'
#' Plots character changes in boxes on branches.
#'
#' @param character_changes A matrix of character changes.
#' @param time_tree Tree on which character changes occur.
#' @param label_size The size of the text for the barnch labels. Default is 0.5.
#'
#' @details
#'
#' Takes the \code{character_changes} output from \link{test_rates} and plots it on the tree used to generate it.
#'
#' @return A plot of character changes on a tree.
#'
#' @author Graeme T. Lloyd \email{graemetlloyd@@gmail.com}
#'
#' @examples
#'
#' # Set random seed:
#' set.seed(17)
#'
#' # Get first MPT for the Michaux data set:
#' time_tree <- ape::read.tree(text = paste0("(Ancilla:31.6,(Turrancilla:102.7,",
#' "(Ancillista:1,Amalda:63.5):1):1);"))
#'
#' # Set root time for tree:
#' time_tree$root.time <- 103.7
#'
#' # Generate two equal length time bins:
#' time_bins <- matrix(data = c(seq(time_tree$root.time, 0, length.out = 3)[1:2],
#' seq(time_tree$root.time, 0, length.out = 3)[2:3]), ncol = 2, dimnames = list(LETTERS[1:2],
#' c("fad", "lad")))
#'
#' # Set class as timeBins:
#' class(time_bins) <- "timeBins"
#'
#' # Get discrete character rates (includes changes):
#' out <- test_rates(
#' time_tree = time_tree,
#' cladistic_matrix = michaux_1989,
#' time_bins = time_bins,
#' branch_partitions = list(list(1)),
#' alpha = 0.01
#' )
#'
#' # Plot character changes on the tree:
#' plot_changes_on_tree(
#' character_changes = out$inferred_character_changes,
#' time_tree = time_tree
#' )
#' @export plot_changes_on_tree
plot_changes_on_tree <- function(character_changes, time_tree, label_size = 0.5) {
# Update tree edge lengths to number of character changes:
time_tree$edge.length <- rle(sort(x = c(character_changes[, "edge"], 1:nrow(time_tree$edge))))$lengths - 0.5
# Create empty edge labels vector:
edge_labels <- rep(NA, nrow(time_tree$edge))
# For each edge:
for (i in 1:nrow(time_tree$edge)) {
# Get rows for where changes occur:
change_rows <- which(x = character_changes[, "edge"] == i)
# If there are changes on edge:
if (length(x = change_rows) > 0) {
# Compile all changes into edge label:
edge_labels[i] <- paste(paste(character_changes[change_rows, "character"], ": ", character_changes[change_rows, "from"], " -> ", character_changes[change_rows, "to"], sep = ""), collapse = "\n")
}
}
# ADD DOT DOT DOT.....
# Plot tree:
plot(time_tree, direction = "upwards")
# Add edge labels for changes:
edgelabels(text = edge_labels, bg = "white", cex = label_size)
# NEED TO LADDERISE LEFT IF WRITING ON RIGHT OF BRANCHES...
}
|
/R/plot_changes_on_tree.R
|
no_license
|
graemetlloyd/Claddis
|
R
| false
| false
| 2,673
|
r
|
#' Plots character changes on branches
#'
#' @description
#'
#' Plots character changes in boxes on branches.
#'
#' @param character_changes A matrix of character changes.
#' @param time_tree Tree on which character changes occur.
#' @param label_size The size of the text for the barnch labels. Default is 0.5.
#'
#' @details
#'
#' Takes the \code{character_changes} output from \link{test_rates} and plots it on the tree used to generate it.
#'
#' @return A plot of character changes on a tree.
#'
#' @author Graeme T. Lloyd \email{graemetlloyd@@gmail.com}
#'
#' @examples
#'
#' # Set random seed:
#' set.seed(17)
#'
#' # Get first MPT for the Michaux data set:
#' time_tree <- ape::read.tree(text = paste0("(Ancilla:31.6,(Turrancilla:102.7,",
#' "(Ancillista:1,Amalda:63.5):1):1);"))
#'
#' # Set root time for tree:
#' time_tree$root.time <- 103.7
#'
#' # Generate two equal length time bins:
#' time_bins <- matrix(data = c(seq(time_tree$root.time, 0, length.out = 3)[1:2],
#' seq(time_tree$root.time, 0, length.out = 3)[2:3]), ncol = 2, dimnames = list(LETTERS[1:2],
#' c("fad", "lad")))
#'
#' # Set class as timeBins:
#' class(time_bins) <- "timeBins"
#'
#' # Get discrete character rates (includes changes):
#' out <- test_rates(
#' time_tree = time_tree,
#' cladistic_matrix = michaux_1989,
#' time_bins = time_bins,
#' branch_partitions = list(list(1)),
#' alpha = 0.01
#' )
#'
#' # Plot character changes on the tree:
#' plot_changes_on_tree(
#' character_changes = out$inferred_character_changes,
#' time_tree = time_tree
#' )
#' @export plot_changes_on_tree
plot_changes_on_tree <- function(character_changes, time_tree, label_size = 0.5) {
# Update tree edge lengths to number of character changes:
time_tree$edge.length <- rle(sort(x = c(character_changes[, "edge"], 1:nrow(time_tree$edge))))$lengths - 0.5
# Create empty edge labels vector:
edge_labels <- rep(NA, nrow(time_tree$edge))
# For each edge:
for (i in 1:nrow(time_tree$edge)) {
# Get rows for where changes occur:
change_rows <- which(x = character_changes[, "edge"] == i)
# If there are changes on edge:
if (length(x = change_rows) > 0) {
# Compile all changes into edge label:
edge_labels[i] <- paste(paste(character_changes[change_rows, "character"], ": ", character_changes[change_rows, "from"], " -> ", character_changes[change_rows, "to"], sep = ""), collapse = "\n")
}
}
# ADD DOT DOT DOT.....
# Plot tree:
plot(time_tree, direction = "upwards")
# Add edge labels for changes:
edgelabels(text = edge_labels, bg = "white", cex = label_size)
# NEED TO LADDERISE LEFT IF WRITING ON RIGHT OF BRANCHES...
}
|
# The top level script generateTestData.R defines:
# initial_wd <- getwd()
# simulateAndExportDatasetParamGUI
setwd(file.path(initial_wd, "simData03"))
show_plot = FALSE # if TRUE: show on screen, else: write to png
simFilename <- "simData03.ascii"
# global_sigma <- .001
# Experimental parameters
kinpar_sim <- c(0.025, 0.001)
amplitudes_sim <- c(1, 1)
specpar_sim <- list(c(14285, 800, 0.4), c(13700, 650, -0.3))
irf_sim <- FALSE
irfpar_sim <- vector()
seqmod_sim <- FALSE
# Fitting parameters
kinpar_guess <- c(0.02, 0.002)
specpar_guess <- list(c(14285-50, 800+50, 0.4-0.1), c(13700+200, 650-50, -0.3+0.1))
# Simulate some data!
simulateAndExportDatasetCustomAxes(simFilename,
kinpar = kinpar_sim ,
amplitudes = c(1, 1) ,
times = times_no_IRF,
specpar= specpar_sim ,
spectral = spectral,
sigma= global_sigma ,
irf = irf_sim ,
irfpar = irfpar_sim ,
seqmod = seqmod_sim )
# Test we can read the data back (it's a valid data file)
test_simData<- readData(simFilename)
## Run a kinetic model
kinModel<- TIMP::initModel(mod_type = "kin",
kinpar = kinpar_guess,
seqmod = seqmod_sim)
kinFit<- TIMP::fitModel(
data = list(test_simData),
modspec = list(kinModel),
opt = kinopt(iter = 99,
plot = FALSE)
)
kinFitSummary<- summary(
kinFit$currModel@fit@nlsres[[1]],
currModel = kinFit$currModel,
currTheta = kinFit$currTheta,
correlation = TRUE
)
sink("kinFitSummary.txt")
print(kinFitSummary)
sink()
if (!show_plot)
png('kinFit.png', width = 1024, height = 768, res=100)
plotterforGUI(
modtype = "kin",
data = test_simData,
model = kinModel,
result = kinFit,
lin = tmax_sim
)
if (!show_plot)
dev.off()
## Run a spectral model
specModel<- TIMP::initModel(mod_type = "spec",
specpar = specpar_guess,
nupow=1,
specfun="gaus")
specFit<- TIMP::fitModel(
data = list(test_simData),
modspec = list(specModel),
opt = specopt(iter = 99,
plot = FALSE)
)
specFitSummary<- summary(
specFit$currModel@fit@nlsres[[1]],
currModel = specFit$currModel,
currTheta = specFit$currTheta,
correlation = TRUE
)
sink("specFitSummary.txt")
print(specFitSummary)
sink()
if (!show_plot)
png('specFit.png', width = 1024, height = 768, res=100)
plotterforGUI(
modtype = "spec",
data = test_simData,
model = specModel,
result = specFit,
lin = tmax_sim
)
if (!show_plot)
dev.off()
# Run a spectral temporal model
specTempModel<- TIMP::initModel(mod_type = "kin",
kinpar = kinpar_guess,
irfpar = irfpar_sim,
seqmod = seqmod_sim)
# here's the crucial bit!
specTempModel@specpar <- specpar_guess
# Spectral-temporal model only supported in paramGUI, not in TIMP
specTempFit<- paramGUI::spectemp(
sim = test_simData,
model = specTempModel,
iter = 99,
kroncol = FALSE,
lin = tmax_sim,
l_posk = FALSE
)
specTempFitSummary<- summary(specTempFit$onls)
sink("specTempFitSummary.txt")
print(specTempFitSummary)
sink()
if (!show_plot)
png('specTempFit.png', width = 1024, height = 768, res=100)
plotterforGUI(
modtype = "spectemp",
data = test_simData,
model = kinModel,
result = specTempFit$onls,
theta = specTempFit$theta,
lin = tmax_sim
)
if (!show_plot)
dev.off()
# Reset wd
setwd(file.path(initial_wd))
|
/paramGUI/simData03/simData03.R
|
permissive
|
s-weigand/pyglotaran-validation-data-paramGUI
|
R
| false
| false
| 3,754
|
r
|
# The top level script generateTestData.R defines:
# initial_wd <- getwd()
# simulateAndExportDatasetParamGUI
setwd(file.path(initial_wd, "simData03"))
show_plot = FALSE # if TRUE: show on screen, else: write to png
simFilename <- "simData03.ascii"
# global_sigma <- .001
# Experimental parameters
kinpar_sim <- c(0.025, 0.001)
amplitudes_sim <- c(1, 1)
specpar_sim <- list(c(14285, 800, 0.4), c(13700, 650, -0.3))
irf_sim <- FALSE
irfpar_sim <- vector()
seqmod_sim <- FALSE
# Fitting parameters
kinpar_guess <- c(0.02, 0.002)
specpar_guess <- list(c(14285-50, 800+50, 0.4-0.1), c(13700+200, 650-50, -0.3+0.1))
# Simulate some data!
simulateAndExportDatasetCustomAxes(simFilename,
kinpar = kinpar_sim ,
amplitudes = c(1, 1) ,
times = times_no_IRF,
specpar= specpar_sim ,
spectral = spectral,
sigma= global_sigma ,
irf = irf_sim ,
irfpar = irfpar_sim ,
seqmod = seqmod_sim )
# Test we can read the data back (it's a valid data file)
test_simData<- readData(simFilename)
## Run a kinetic model
kinModel<- TIMP::initModel(mod_type = "kin",
kinpar = kinpar_guess,
seqmod = seqmod_sim)
kinFit<- TIMP::fitModel(
data = list(test_simData),
modspec = list(kinModel),
opt = kinopt(iter = 99,
plot = FALSE)
)
kinFitSummary<- summary(
kinFit$currModel@fit@nlsres[[1]],
currModel = kinFit$currModel,
currTheta = kinFit$currTheta,
correlation = TRUE
)
sink("kinFitSummary.txt")
print(kinFitSummary)
sink()
if (!show_plot)
png('kinFit.png', width = 1024, height = 768, res=100)
plotterforGUI(
modtype = "kin",
data = test_simData,
model = kinModel,
result = kinFit,
lin = tmax_sim
)
if (!show_plot)
dev.off()
## Run a spectral model
specModel<- TIMP::initModel(mod_type = "spec",
specpar = specpar_guess,
nupow=1,
specfun="gaus")
specFit<- TIMP::fitModel(
data = list(test_simData),
modspec = list(specModel),
opt = specopt(iter = 99,
plot = FALSE)
)
specFitSummary<- summary(
specFit$currModel@fit@nlsres[[1]],
currModel = specFit$currModel,
currTheta = specFit$currTheta,
correlation = TRUE
)
sink("specFitSummary.txt")
print(specFitSummary)
sink()
if (!show_plot)
png('specFit.png', width = 1024, height = 768, res=100)
plotterforGUI(
modtype = "spec",
data = test_simData,
model = specModel,
result = specFit,
lin = tmax_sim
)
if (!show_plot)
dev.off()
# Run a spectral temporal model
specTempModel<- TIMP::initModel(mod_type = "kin",
kinpar = kinpar_guess,
irfpar = irfpar_sim,
seqmod = seqmod_sim)
# here's the crucial bit!
specTempModel@specpar <- specpar_guess
# Spectral-temporal model only supported in paramGUI, not in TIMP
specTempFit<- paramGUI::spectemp(
sim = test_simData,
model = specTempModel,
iter = 99,
kroncol = FALSE,
lin = tmax_sim,
l_posk = FALSE
)
specTempFitSummary<- summary(specTempFit$onls)
sink("specTempFitSummary.txt")
print(specTempFitSummary)
sink()
if (!show_plot)
png('specTempFit.png', width = 1024, height = 768, res=100)
plotterforGUI(
modtype = "spectemp",
data = test_simData,
model = kinModel,
result = specTempFit$onls,
theta = specTempFit$theta,
lin = tmax_sim
)
if (!show_plot)
dev.off()
# Reset wd
setwd(file.path(initial_wd))
|
# IMPORT DATA
movies <- read.csv("Section6-Homework-Data.csv")
str(movies)
# SUBSETTING DATA
m <- movies[, c(3, 6, 8, 18)]
colnames(m) <- c("Genre", "Studio", "BudgetMillions", "GrossPercentage")
genre_plot <- c("action", "adventure", "animation", "comedy", "drama")
studio_plot <- c("Buena Vista Studios", "Fox", "Paramount Pictures", "Sony", "Universal", "WB")
m <- m[m$Genre %in% genre_plot & m$Studio %in% studio_plot, ]
p <- ggplot(data = m, aes(x = Genre, y = GrossPercentage))
q <- p +
geom_jitter(aes(color = Studio, size = BudgetMillions)) +
geom_boxplot(alpha = 0.7, outlier.color = NA, color = "red")
q <- q +
xlab("Genre") +
ylab("Gross % US") +
ggtitle("Domestic Gross % By Genre") +
theme(
text = element_text(family = "Comic Sans MS"),
# axes title
axis.title = element_text(color = "blue", size = 20),
# tick
axis.text = element_text(size = 12),
# title
plot.title = element_text(color = "black", size = 20),
# legend
legend.title = element_text(size = 15),
legend.text = element_text(size = 12)
)
q$labels$size = "Budget $M"
q
|
/Section 6 GGPlot2/section 6 - homework.R
|
no_license
|
tomytjandra/r-programming-a-to-z
|
R
| false
| false
| 1,164
|
r
|
# IMPORT DATA
movies <- read.csv("Section6-Homework-Data.csv")
str(movies)
# SUBSETTING DATA
m <- movies[, c(3, 6, 8, 18)]
colnames(m) <- c("Genre", "Studio", "BudgetMillions", "GrossPercentage")
genre_plot <- c("action", "adventure", "animation", "comedy", "drama")
studio_plot <- c("Buena Vista Studios", "Fox", "Paramount Pictures", "Sony", "Universal", "WB")
m <- m[m$Genre %in% genre_plot & m$Studio %in% studio_plot, ]
p <- ggplot(data = m, aes(x = Genre, y = GrossPercentage))
q <- p +
geom_jitter(aes(color = Studio, size = BudgetMillions)) +
geom_boxplot(alpha = 0.7, outlier.color = NA, color = "red")
q <- q +
xlab("Genre") +
ylab("Gross % US") +
ggtitle("Domestic Gross % By Genre") +
theme(
text = element_text(family = "Comic Sans MS"),
# axes title
axis.title = element_text(color = "blue", size = 20),
# tick
axis.text = element_text(size = 12),
# title
plot.title = element_text(color = "black", size = 20),
# legend
legend.title = element_text(size = 15),
legend.text = element_text(size = 12)
)
q$labels$size = "Budget $M"
q
|
library(testthat)
library(grieman)
test_check("grieman")
|
/tests/testthat.R
|
no_license
|
grieman/grieman
|
R
| false
| false
| 59
|
r
|
library(testthat)
library(grieman)
test_check("grieman")
|
#' @title Metropolis-Hastings sampler for lasso estimator under the fixed active set.
#'
#' @description Metropolis-Hastings sampler for lasso estimator
#' under the fixed active set.
#'
#' @param X predictor matrix.
#' @param PE,sig2,lbd parameters of target distribution.
#' (point estimate of beta or \code{E(y)} depends on \code{PEtype}, variance estimate of error and lambda).
#' @param weights weight vector with length \code{p}(the number of covariates).
#' Default is \code{weights = rep(1, p)}.
#' @param B0 numeric vector with length \code{p}.
#' Initial value of lasso estimator.
#' @param S0 numeric vector with length \code{p}.
#' Initial value of subgradients.
#' If not given, this will be generated in a default way.
#' @param A numeric vector. Active coefficient index.
#' Every active coefficient index in \code{B0} must be included.
#' Default is \code{A = which(B0 != 0)}.
#' @param tau numeric vector with length \code{p}.
#' Standard deviaion of proposal distribution for each coefficient.
#' @param niter integer. The number of iterations. Default is \code{niter = 2000}
#' @param burnin integer. The length of burin-in periods. Default is \code{burnin = 0}
#' @param updateS.itv integer. Update subgradients every \code{updateS.itv} iterations.
#' Set this value larger than \code{niter} if one wants to skip updating subgradients.
#' @param PEtype Type of \code{PE} which is needed to characterize the target distribution.
#' Users can choose either \code{"coeff"} or \code{"mu"}.
#' @param verbose logical. If true, print out the progress step.
#' @param ... complementary arguments.
#' \itemize{
#' \item{\code{FlipSA :}}{ optional parameter.
#' This has to be a subset of active set, A. If the index is not listed in FlipSA,
#' the sign of coefficients which correspond to the listed index will remain fixed.
#' The default is \code{FlipSA=A}}
#' \item{\code{SFindex :}}{ optional parameter. subgradient index for the free coordinate.}
#' \item{\code{randomSFindex :}}{ logical. If \code{true}, resample \code{SFindex} every
#' \code{updateSF.itv} iterations.}
#' \item{\code{updateSF.itv :}}{ integer. In every \code{updateSF.itv} iterations,
#' randomize \code{SFindex}.}
#' }
#'
#' @details Given appropriate initial value, provides Metropolis-Hastings samples
#' under the fixed active set.
#'
#' @return \code{MHLS} returns an object of class \code{"MHLS"}.
#' The functions \code{\link{summary.MHLS}} and \code{\link{plot.MHLS}}
#' provide a brief summary and generate plots.
#' \item{beta}{lasso samples.}
#' \item{subgrad}{subgradient samples.}
#' \item{acceptHistory}{numbers of acceptance and proposal.}
#' \item{niter, burnin, PE, type}{same as function arguments.}
#'
#' @references
#' Zhou, Q. (2014), "Monte Carlo simulation for Lasso-type problems by estimator augmentation,"
#' Journal of the American Statistical Association, 109, 1495-1516.
#'
#' Zhou, Q. and Min, S. (2017), "Estimator augmentation with applications in
#' high-dimensional group inference," Electronic Journal of Statistics, 11(2), 3039-3080.
#'
#' @examples
#' #-------------------------
#' # Low dim
#' #-------------------------
#' set.seed(123)
#' n <- 10
#' p <- 5
#' X <- matrix(rnorm(n * p), n)
#' Y <- X %*% rep(1, p) + rnorm(n)
#' sigma2 <- 1
#' lbd <- .37
#' weights <- rep(1, p)
#' LassoResult <- Lasso.MHLS(X = X, Y = Y, lbd = lbd, type = "lasso", weights = weights)
#' B0 <- LassoResult$B0
#' S0 <- LassoResult$S0
#' MHLS(X = X, PE = rep(0, p), sig2 = 1, lbd = 1,
#' weights = weights, B0 = B0, S0 = S0, niter = 50, burnin = 0,
#' PEtype = "coeff")
#' MHLS(X = X, PE = rep(0, n), sig2 = 1, lbd = 1,
#' weights = weights, B0 = B0, S0 = S0, niter = 50, burnin = 0,
#' PEtype = "mu")
#'
#' #-------------------------
#' # High dim
#' #-------------------------
#' set.seed(123)
#' n <- 5
#' p <- 10
#' X <- matrix(rnorm(n*p),n)
#' Y <- X %*% rep(1,p) + rnorm(n)
#' weights <- rep(1,p)
#' LassoResult <- Lasso.MHLS(X = X,Y = Y,lbd = lbd, type = "lasso", weights = weights)
#' B0 <- LassoResult$B0
#' S0 <- LassoResult$S0
#' MHLS(X = X, PE = rep(0, p), sig2 = 1, lbd = 1,
#' weights = weights, B0 = B0, S0 = S0, niter = 50, burnin = 0,
#' PEtype = "coeff")
#' MHLS(X = X, PE = rep(0, n), sig2 = 1, lbd = 1,
#' weights = weights, B0 = B0, S0 = S0, niter = 50, burnin = 0,
#' PEtype = "mu")
#' @export
MHLS <- function(X, PE, sig2, lbd,
weights = rep(1, ncol(X)), B0, S0, A = which(B0 != 0),
tau = rep(1, ncol(X)), niter = 2000, burnin = 0,
PEtype = "coeff", updateS.itv = 1, verbose = FALSE, ...)
{
MHLSmain(X = X, PE = PE, sig2 = sig2, lbd = lbd,
weights = weights, B0 = B0, S0 = S0, A = A,
tau = tau, niter = niter, burnin = burnin, PEtype = PEtype,
updateS.itv = updateS.itv, verbose = verbose, ...)
}
MHLSmain <- function (X, PE, sig2, lbd,
weights, B0, S0, A, tau, niter, burnin, PEtype, updateS.itv, verbose, ...)
{
#------------------
# Error handling
#------------------
n <- nrow(X)
p <- ncol(X)
if (PEtype == "coeff" && length(PE) != p) {
stop("length(PE) must be the same with ncol(X), if PEtype = \"coeff\"")
}
if (PEtype == "mu" && length(PE) != n) {
stop("length(PE) must be the same with nrow(X), if PEtype = \"mu\"")
}
if (length(B0) != p || (!missing(S0) && length(S0) != p)) {
stop("length(B0) and/or length(S0) has to be the same with ncol(X)")
}
if (n < p && length(A) > n) {
stop("Invalid active set index, A. Cannot be larger than min(nrow(X),ncol(X)).")
}
if (length(A) != length(unique(A))) {
stop("Invalid active set index, A.")
}
if (!PEtype %in% c("coeff", "mu")) {
stop("Invalide PEtype input.")
}
if (length(weights) != p) {
stop("length(weights) has to be the same with the number of coefficients")
}
if (any(weights <= 0)) {
stop("weights should be positive.")
}
if (sig2 <=0 || lbd <= 0) {
stop("sig2 and/or lbd have to be positive.")
}
# if (!all(group == 1:ncol(X)) && missing(S0)) {
# stop("Missing S0. Use LassoMHLS for a good initial value.")
# }
if (any(missing(PE), missing(sig2), missing(lbd))) {
stop("provide all the parameters for the distribution")
}
if (burnin >= niter) {
stop("burnin has to be greater than niter")
}
if (niter <= 1) {
stop("niter should be a integer greater than 1.")
}
est <- MHLSswp(X = X, PE = PE, sig2 = sig2,
lbd = lbd, weights = weights, B0 = B0, S0 = S0, A = A,
tau = tau, niter = niter, burnin = burnin, PEtype = PEtype,
updateS.itv = updateS.itv, verbose = verbose, ...)
class(est) <- "MHLS"
est$niter <- niter
est$burnin <- burnin
est$PE <- PE
est$PEtype <- PEtype
return(est)
}
MHLSswp <- function(X, PE, sig2, lbd, weights,
B0, S0, A, tau, niter,
burnin, PEtype, FlipSA = A, SFindex,
randomSFindex = TRUE, updateSF.itv = round(niter/20), updateS.itv,
verbose, ...)
{
X <- as.matrix(X)
n <- nrow(X)
p <- ncol(X)
NN <- floor((1:10) * niter / 10) # verbose index
A <- unique(A) # active set
Ac <- setdiff(1:p,A) # inactive set
nA <- length(A) # size of the active set
nI <- length(Ac) # size of the inactive set
C <- crossprod(X) / n #Gram matrix
if (!all(which(B0 != 0) %in% A)) {
stop("Invalid active set index, A. The active set, A, has to include every index of nonzero B0.")
}
if (any(!A %in% 1:p)) {
stop("Invalid active set index, A. The active set, A, has to be a subset of 1:ncol(X)")
}
if (!missing(S0) && !all(round(S0[which(B0 != 0)], 3) == sign(B0[B0 != 0]))) {
stop("Invalid S0. Leave S0 blank, if S0 is unknown.")
}
if (length(tau) != ncol(X)) {
stop("tau must have a same length with the active set, A.")
}
if (n >= p) { # Low-dim MH
#precalculation
#for(j in 1:p){X[,j]=X[,j]-mean(X[,j])}
#If X to be centered, we need to re-compute B0 and S0 using centered X.
Cinv <- solve(C) #Inverse Gram matrix
logdiagC <- log(diag(C))
Vinv <- n / (2 * sig2) * Cinv # non exponential part of pdf of U
if (PEtype == "coeff") {
CB <- C %*% PE # PE : True beta
} else {
CB <- crossprod(X, PE) / n # PE : True beta
}
lbdwgt <- lbd * weights
loglbd <- log(lbdwgt)
#initialization
B <- matrix(0, niter, p) #Store Coefficient
S <- matrix(0, niter, p) #Store Sub-grad
B[1,] <- B0 #B0 initial value of beta
if (missing(S0)) {
S[1, A] <- sign(B[1, A])
S[1, Ac] <- runif(length(Ac), -1, 1)
} else {
S[1, ] <- S0
}
Ucur <- C %*% B[1, ] + lbdwgt * S[1, ] - CB #Current U
ldetDRatio <- 0
if (nA >= 1) {negCAAinv <- -solve(C[A, A])} else {negCAAinv <- NULL}
nAccept <- numeric(2)
nProp <- (niter - burnin) * c(nA, nI)
if (length(setdiff(FlipSA, A))!=0)
stop("FlipSA has to be a subset of active set, A.")
A2 <- setdiff(A,FlipSA)
# if (any(B0[A2]==0))
# stop("To fix the sign of beta_j, use non-zero B0_j.")
if (length(A2) != 0) {
LUbounds <- matrix(0, p, 2);
LUbounds[B0 < 0, 1] <- -Inf;
LUbounds[B0 > 0, 2] <- Inf;
}
for(t in 2:niter)
{
if(nA >= 1){
if (length(FlipSA)!=0) {
for (j in FlipSA) {
b_prop <- rnorm(1, mean = B[t - 1, j], sd = tau[j])
s_prop <- sign(b_prop)
DiffU <- (b_prop - B[t - 1, j]) * C[, j]
DiffU[j] <- DiffU[j] + lbdwgt[j] * (s_prop - S[t - 1, j])
Uprop <- Ucur + DiffU
logMH <- -t(Ucur + Uprop) %*% Vinv %*% DiffU
u <- runif(1)
if(log(u) < logMH)
{
B[t, j] <- b_prop
S[t, j] <- s_prop
Ucur <- Uprop
if (t > burnin) {nAccept[1] <- nAccept[1] + 1}
#nAccept[2]=nAccept[2]+1
}else{
B[t, j] <- B[t - 1, j]
S[t, j] <- S[t - 1, j]
}
}
}
if (length(A2)!=0) {
for (j in A2) {
b_prop <- rtnorm(1, mean = B[t - 1, j], sd = tau[j],
lower = LUbounds[j, 1],
upper = LUbounds[j, 2])
Ccur <- pnorm(0,mean=B[t-1, j],sd=tau[j],lower.tail=(B[t-1,j]<0),log.p=FALSE);
Cnew <- pnorm(0,mean=b_prop,sd=tau[j],lower.tail=(b_prop<0),log.p=FALSE);
lqratio=log(Ccur/Cnew);
DiffU <- (b_prop - B[t - 1, j]) * C[, j]
Uprop <- Ucur + DiffU
logMH <- -t(Ucur + Uprop) %*% Vinv %*% DiffU + lqratio
u <- runif(1)
if(log(u) < logMH)
{
B[t, j] <- b_prop
S[t, j] <- s_prop
Ucur <- Uprop
if (t > burnin) {nAccept[1] <- nAccept[1] + 1}
#nAccept[2]=nAccept[2]+1
}else{
B[t, j] <- B[t - 1, j]
S[t, j] <- S[t - 1, j]
}
}
}
}
if(nI >= 1 && (t %% updateS.itv == 0)){
for(j in Ac)
{
s_prop <- runif(1, -1, 1)
diffu <- lbdwgt[j] * (s_prop - S[t - 1, j])
Uprop <- Ucur
Uprop[j] <- Ucur[j] + diffu
logMH <- -t(Ucur + Uprop) %*% Vinv[, j] * diffu
u <- runif(1)
if(log(u) < logMH)
{
S[t,j] <- s_prop
Ucur <- Uprop
if (t > burnin) {nAccept[2] <- nAccept[2] + 1}
#nAccept[3]=nAccept[3]+1
} else {
S[t, j] <- S[t - 1, j]
}
}
} else {
S[t, Ac] <- S[t - 1, Ac]
}
if (verbose && sum(t == NN)==1) {
aa <- which(NN==t)
cat(paste("Updating : ", aa * 10,"%" ,sep = ""), "\n")
}
}
#nAccept=nAccept/c((niter-1)*selectsize,nProp)
#nAccept=nAccept/nProp
}
if (n < p) {
#precalculation---------------------------
#for (j in 1:p) {X[,j]=X[,j]-mean(X[,j])}
#If X to be centered, we need to recompute B0 and S0 using centered X.
C <- t(X) %*% X / n
egC <- eigen(C)
V <- egC$vectors
R <- 1:n
N <- (n + 1):p
InvVarR <- 1 / (egC$values[R] * sig2 / n) #inverse of (sig2*Lambda_i/n)
VR <- matrix(V[,R], p, n)
VRC <- t(VR) %*% C
W <- diag(weights)
LBD <- diag(egC$values[R])
lbdVRW <- lbd * t(VR) %*% W
if (PEtype == "coeff") {
VRCB <- t(VR) %*% C %*% PE
} else {
VRCB <- t(VR) %*% crossprod(X, PE) / n
}
# if (is.missing(B0)) {
# if (signBA == NULL)
# stop("If initial value of 'B0' is not given, 'signBA' has to be given.")
# B0 = rep(0,p)
# B0[A] = abs(rnorm(nA)) * signBA
# }
if (length(setdiff(FlipSA, A))!=0)
stop("FlipSA has to be a subset of active set, A.")
A2 <- setdiff(A,FlipSA)
V_AN <- V[A,N,drop=FALSE]
V_IN <- V[Ac,N,drop=FALSE]
V_AR <- V[A,R,drop=FALSE]
V_IR <- V[Ac,R,drop=FALSE]
BB <- t(V_IN) %*% W[Ac,Ac]
tVAN.WA <- t(V_AN)%*%W[A,A]
if (!missing(S0) && !all.equal(t(V[,N])%*%S0,matrix(0,length(N),1))) {
warning("Invalid S0. Regenerate S0 with a default way")
S0 <- NULL
}
if (missing(S0) || is.null(S0)) {
E <- BB
H <- rep(-1,2*nI)
G <- rbind(diag(rep(1,nI)),diag(rep(-1,nI)))
F1 <- -tVAN.WA%*%sign(B0[A])
S0.prop <- limSolve::lsei(G=G,H=H,E=E,F=F1)
if (S0.prop$IsError) {
stop("There exist no solution for the given 'B0'.")
} else {
S0 <- rep(0,p)
S0[A] <- sign(B0[A])
S0[-A] <- S0.prop$X
}
}
if (n-nA != 0) {
# SFindex : S_F, free coordinate, index
if (missing(SFindex)) {SFindex <- 1:(n-nA)}
if (length(SFindex) != n-nA) {
warning("Length of SFindex has to be same as 'n-length(A)'. Automatically set 'SFindex <- 1:(n-nA)'.")
SFindex <- 1:(n-nA)
}
B_F <- BB[, SFindex, drop=FALSE]
B_D <- BB[,-SFindex, drop=FALSE]
invB_D <- solve(B_D)
BDF <- invB_D%*%B_F
} else if (n-nA == 0) {
if (missing(SFindex)) {SFindex <- NULL}
if (!is.null(SFindex)) {
warning("If 'n-nA == 0', SFindex has to be set to NULL. Automatically set 'SFindex <- NULL'.")
SFindex <- NULL
}
BDF <- invB_D <- solve(BB) # NEED TO CHECK
B_F <- NULL
}
#initialization--------------------------
B <- matrix(0,niter,p)
S <- matrix(0,niter,p)
B[1,] <- B0
S[1,] <- S0
Rcur <- VRC[ , A, drop=FALSE] %*% t(B[1, A, drop=FALSE]) + lbdVRW %*% S[1, ] - VRCB
logfRcur <- -0.5 * sum(Rcur^2 * InvVarR)
Tcur <- CalTmat(p,n,V,LBD,W,lbd,R,N,A,Ac)
logdensitycur <- logfRcur+Tcur$logdetT
#record acceptance rates
nAccept <- numeric(2)
#nProp=numeric(2)
nProp <- c(nA*(niter-max(1,burnin)),(n-nA)*(niter-max(1,burnin)))
#Change sign count
nSignChange <- numeric(3)
for(t in 2:niter )
{
#P1: update b_A
if(nA>=1)
{
S[t,] <- S[t-1,]
if (length(FlipSA)!=0) {
MoveBA <- UpdateBA(B[t-1,],S[t-1,],tau,A,Ac,Rcur,logfRcur,VRC,lbdVRW,InvVarR,
tVAN.WA,invB_D,B_F,FlipSA,SFindex)
B[t,] <- MoveBA$B
S[t,] <- MoveBA$S
nSignChange <- nSignChange+MoveBA$nChangeSign
Rcur <- MoveBA$Rvec
logfRcur <- MoveBA$logf
nAccept[1] <- nAccept[1]+MoveBA$nAccept
#nProp[1]=nProp[1]+nA
}
if (length(A2)!=0) {
MoveBA <- UpdateBA.fixedSA(B[t-1,],tau,A2,Rcur,logfRcur,VRC,InvVarR)
B[t,A2] <- MoveBA$B[A2]
#S[t,]=S[t-1,]
Rcur <- MoveBA$Rvec
logfRcur <- MoveBA$logf
nAccept[1] <- nAccept[1] + MoveBA$nAccept
}
}else{ B[t,] <- B[t-1,]; S[t,] <- S[t-1,]}
# P2: update S_I
if(nA<n && (t %% updateS.itv == 0))
{
MoveSI <- UpdateSI(S[t,],A,Ac,Rcur,n,p,logfRcur,lbdVRW,InvVarR,
tVAN.WA,invB_D,BDF,B_F,SFindex,...)
S[t,] <- MoveSI$S
Rcur <- MoveSI$Rvec
logfRcur <- MoveSI$logf
nAccept[2] <- nAccept[2]+MoveSI$nAccept
#nProp[2]=nProp[2]+(n-nA)
}#else{S[t,]=S[t-1,];S[t,A]=sign(B[t,A])}
if (!is.null(SFindex) && randomSFindex && (t%%updateSF.itv==0) ) {
SFindex <- sort(sample(1:(p-nA),n-nA))
if (verbose) {cat("New SFindex : [",paste(SFindex,collapse=", "),"]\n")}
B_F <- BB[,SFindex,drop=FALSE]
B_D <- BB[,-SFindex,drop=FALSE]
invB_D <- solve(B_D)
BDF <- invB_D%*%B_F
}
if (verbose && sum(t == NN)==1) {
aa <- which(NN==t)
cat(paste("Updating : ", aa*10 ,"%",sep=""),"\n")
}
}
}
return(list(beta = B[if (burnin != 0){-c(1:burnin)} else {1:niter}, ],
subgrad = S[if (burnin != 0){-c(1:burnin)} else {1:niter}, ],
acceptHistory = rbind(nAccept, nProp)))
#
# return(list(beta = B[if (burnin != 0){-c(1:burnin)} else {1:niter}, ],
# subgrad = S[if (burnin != 0){-c(1:burnin)} else {1:niter}, ],
# PE = PE,
# signchange=nSignChange,
# acceptHistory = rbind(nAccept, nProp)))
}
# MHLSgroup <- function(X, PE, sig2, lbd,
# weights, group, B0, S0, A, tau, niter, burnin, PEtype = "coeff", updateS.itv, verbose)
# {
# if ( all.equal(group.norm2(S0, group)[A], rep(1, length(A)), tolerance = 1e-04) != TRUE ) {
# stop("Invalid S0. Use LassoMHLS for a good initial value.")
# }
# n <- nrow(X)
# p <- ncol(X)
#
# K <- 10
# W <- rep(weights,table(group))
# Psi <- 1/n * crossprod(X)
# if (n > p) {
# inv.Var <- n/sig2 * solve(Psi)
# }
# nA <- length(A)
#
# r.seq <- matrix(, niter, max(group))
# S.seq <- matrix(, niter ,p)
# nAccept <- numeric(2)
# nProp <- c(nA*(niter-max(1,burnin)), max(group)*(niter-max(1,burnin)))
#
# rcur <- group.norm2(B0,group)
# r.seq[1, ] <- rcur
# S.seq[1, ] <- Scur <- S0
#
# if (PEtype == "coeff") {
# Hcur <- drop(Psi %*% drop(B0 - PE) + lbd * W * drop(S0))
# } else {
# Hcur <- drop(Psi %*% drop(B0) - t(X) %*% PE / n + lbd * W * drop(S0))
# }
#
# if (n >= p) {
# for (i in 2:niter) {
# r.new <- ld.Update.r(rcur,Scur,A,Hcur,X,PE,Psi,W,lbd,group,inv.Var,tau,PEtype,n,p)
# r.seq[i,] <- rcur <- r.new$r
# Hcur <- r.new$Hcur
# if (i > burnin) {nAccept[1] <- nAccept[1] + r.new$nrUpdate}
#
# if (i %% updateS.itv == 0) {
# S.new <- ld.Update.S (rcur,Scur,A,Hcur,X,PE,Psi,W,lbd,group,inv.Var,PEtype,n,p)
# S.seq[i,] <- Scur <- S.new$S
# Hcur <- S.new$Hcur
# } else {
# S.seq[i,] <- Scur
# }
# if (i > burnin) {nAccept[2] <- nAccept[2] + S.new$nSUpdate}
#
# if (verbose && (i %% round(niter/10) == 0)) {
# cat("MCMC step,", K, "% Finished\n")
# K <- K+10
# }
# }
# # } else {
# # for (i in 2:niter) {
# # r.new <- hd.Update.r(rcur,Scur,A,Hcur,X,PE,Psi,W,lbd,group,inv.Var,1)
# # r.seq[i,] <- rcur <- r.new$r
# # Hcur <- r.new$Hcur
# # nAccept[1] <- nAccept[1] + r.new$nrUpdate
# #
# # S.new <- hd.Update.S (rcur,Scur,A,Hcur,X,PE,Psi,W,lbd,group,inv.Var,p)
# # S.seq[i,] <- Scur <- S.new$S
# # Hcur <- S.new$Hcur
# # nAccept[2] <- nAccept[2] + S.new$nSUpdate
# #
# # if (verbose && (i %% round(niter/10) == 0)) {
# # cat("MCMC step,", K, "% Finished\n")
# # K <- K+10
# # }
# # }
# }
# return(list(
# group.l2.norm = r.seq[if (burnin != 0){-c(1:burnin)} else {1:niter}, ],
# subgrad = S.seq[if (burnin != 0){-c(1:burnin)} else {1:niter}, ],
# acceptHistory = rbind(nAccept, nProp)))
# }
#' @method print MHLS
#' @title Print Metropolis-Hastings sampler outputs
#'
#' @description Print a brief summary of the MH sampler outputs.
#'
#' @param x an object of class "MHLS", which is a result of \code{\link{MHLS}}.
#' @param ... ... addtional print arguments.
#' @details
#' \code{\link{print.MHLS}} prints out last 10 iterations and a brief summary
#' of the simulation; number of iterations, number of burn-in periods, PE, PEtype and
#' acceptance rate.
#'
#' @return Above results are silently returned.
#'
#' @examples
#' set.seed(123)
#' n <- 10
#' p <- 5
#' X <- matrix(rnorm(n * p), n)
#' Y <- X %*% rep(1, p) + rnorm(n)
#' sigma2 <- 1
#' lbd <- .37
#' weights <- rep(1, p)
#' LassoResult <- Lasso.MHLS(X = X, Y = Y, lbd = lbd, type="lasso", weights = weights)
#' B0 <- LassoResult$B0
#' S0 <- LassoResult$S0
#' Result <- MHLS(X = X, PE = rep(0, p), sig2 = sigma2, lbd = lbd, group = 1:p,
#' weights = weights, B0 = B0, S0 = S0, niter = 50, burnin = 0,
#' type = "coeff")
#' print(Result)
#' @export
print.MHLS <- function (x, ...) {
cat ("===========================\n")
cat ("Number of iteration: ", x$niter,"\n\n")
cat ("Burn-in period: ", x$burnin,"\n\n")
cat ("Plug-in PE: \n")
print(x$PE)
cat ("PEtype: \n")
print(x$PEtype)
# if (inherits(x,"Group")) {
# Group.matrix <- matrix(0, length(unique(x$group)), p)
# Group.matrix[cbind(x$group,1:p)] <- 1
# beta <- (x$group.l2.norm %*% Group.matrix) * x$subgrad
# }
cat ("\nLast 10 steps of beta's:\n")
# if (inherits(x,"GroupLasso")) {
# if (x$niter-x$burnin <= 9) {
# print(x$group.l2.norm)
# } else {
# print(x$group.l2.norm[(x$niter-x$burnin-9):(x$niter-x$burnin),])
# }
# }
if (x$niter-x$burnin <= 9) {
# if (inherits(x,"Group")) {
# #print(x$group.l2.norm)
# print(beta)
# } else {
print(x$beta)
# }
} else {
# if (inherits(x,"Group")) {
# print(beta[(x$niter-x$burnin-9):(x$niter-x$burnin),])
# #print(x$group.l2.norm[(x$niter-x$burnin-9):(x$niter-x$burnin),])
# } else {
print(x$beta[(x$niter-x$burnin-9):(x$niter-x$burnin),])
# }
}
cat ("\nlast 10 steps of subgradients:\n")
if (x$niter-x$burnin <= 9) {
print(x$subgrad)
} else {
print(x$subgrad[(x$niter-x$burnin-9):(x$niter-x$burnin),])
}
cat ("\nAcceptance rate:\n")
cat("-----------------------------\n")
cat("\t \t \t beta \t subgrad\n")
# if (inherits(x,"GroupLasso")) {
# cat("\t \t l_2 group norm\t subgrad\n")
# } else {
# cat("\t \t \t beta \t subgrad\n")
# }
cat("# Accepted\t : \t", paste(x$acceptHistory[1,],"\t"),"\n")
cat("# Moved\t\t : \t", paste(x$acceptHistory[2,],"\t"),"\n")
cat("Acceptance rate\t : \t", paste(round(x$acceptHistory[1,]/x$acceptHistory[2,],3),"\t"),"\n")
# cat ("\nSignChange rate:\n")
# cat("-----------------------------\n")
# cat("# Accepted\t : \t", paste(x$signchange[1],"\t"),"\n")
# cat("# Moved\t\t : \t", paste(x$signchange[2],"\t"),"\n")
# cat("# Cdt Accept \t : \t", paste(x$signchange[3],"\t"),"\n")
# cat("Acceptance rate\t : \t", paste(round(x$signchange[1]/x$signchange[2],3),"\t"),"\n")
}
#' @method summary MHLS
#' @title Summarizing Metropolis-Hastings sampler outputs
#'
#' @description Summary method for class "MHLS".
#'
#' @param object an object of class "MHLS", which is a result of \code{\link{MHLS}}.
#' @param ... additional arguments affecting the summary produced.
#'
#' @details
#' This function provides a summary of each sampled beta and subgradient.
#' @return mean, median, standard deviation, 2.5\% quantile and 97.5\% quantile
#' for each beta and its subgradient.
#' @examples
#' #' set.seed(123)
#' n <- 10
#' p <- 5
#' X <- matrix(rnorm(n * p), n)
#' Y <- X %*% rep(1, p) + rnorm(n)
#' sigma2 <- 1
#' lbd <- .37
#' weights <- rep(1, p)
#' LassoResult <- Lasso.MHLS(X = X, Y = Y, lbd = lbd, type = "lasso", weights = weights)
#' B0 <- LassoResult$B0
#' S0 <- LassoResult$S0
#' summary(MHLS(X = X, PE = rep(0, p), sig2 = sigma2, lbd = lbd,
#' weights = weights, B0 = B0, S0 = S0, niter = 50, burnin = 0,
#' type = "coeff"))
#' @export
summary.MHLS <- function (object, ...) {
betaSummary <- t(apply(object$beta,2,SummBeta))
#signsummary <- t(apply(object$beta,2,SummSign))
subgradSummary <- t(apply(object$subgrad,2,SummBeta))
result <- list(beta=betaSummary,subgradient=subgradSummary)
class(result) <- "summary.MHLS"
return(result)
}
#' @method plot MHLS
#' @title Plot Metropolis-Hastings sampler outputs
#'
#' @description Provides six plots for each covariate index;
#' histogram, path plot and acf plot for beta and for its subgradient.
#'
#' @param x an object of class "MHLS", which is an output of \code{\link{MHLS}}.
#' @param index an index of covariates to plot.
#' @param skipS logical. If \code{skipS = TRUE}, plots beta only.
#' @param ... addtional arguments passed to or from other methods.
#' @details
#' \code{\link{plot.MHLS}} provides summary plots of beta and subgradient.
#' The first column provides histogram of beta and subgradient, while the second
#' and the third columns provide path and acf plots, respectively.
#' If \code{skipS = TRUE}, this function provides summary plots for beta only.
#' @examples
#' #' set.seed(123)
#' n <- 10
#' p <- 5
#' X <- matrix(rnorm(n * p), n)
#' Y <- X %*% rep(1, p) + rnorm(n)
#' sigma2 <- 1
#' lbd <- .37
#' weights <- rep(1, p)
#' LassoResult <- Lasso.MHLS(X = X, Y = Y, lbd = lbd, type="lasso", weights = weights)
#' B0 <- LassoResult$B0
#' S0 <- LassoResult$S0
#' plot(MHLS(X = X, PE = rep(0, p), sig2 = 1, lbd = 1, group = 1:p,
#' weights = weights, B0 = B0, S0 = S0, niter = 50, burnin = 0,
#' type = "coeff"))
#' @export
plot.MHLS <- function(x, index = 1:ncol(x$beta), skipS = FALSE, ...) {
# n=nrow(x$beta)
if (any(!index %in% 1:ncol(x$beta))) {
stop("Invalid index.")
}
niter <- x$niter
burnin <- x$burnin
if (!skipS) {par(mfrow = c(2,3))} else {par(mfrow = c(1,3))}
if (!skipS) {
for (i in index) {
hist(x$beta[,i],breaks=20,prob=T,xlab=paste("Beta_",i,sep=""),ylab="Density",main="")
#ts.plot(x$beta[,i],xlab="Iterations",ylab="Samples")
plot((burnin+1):niter,x$beta[,i],xlab="Iterations",ylab="Path",type="l")
if ( sum(abs(diff(x$beta[,i]))) == 0 ) { plot( 0,type="n",axes=F,xlab="",ylab="")
text(1,0,"Auto correlation plot \n not available",cex=1)} else {
acf(x$beta[,i],xlab="Lag",main="")
}
hist(x$subgrad[,i],breaks=seq(-1-1/10,1,by=1/10)+1/20,prob=T,xlim=c(-1-1/20,1+1/20),xlab=paste("Subgradient_",i,sep=""),ylab="Density",main="")
#ts.plot(x$subgrad[,i],xlab=Iterations,ylab="Samples")
plot((burnin+1):niter,x$subgrad[,i],xlab="Iterations",ylab="Samples",type="l")
if ( sum(abs(diff(x$subgrad[,i]))) == 0 ) { plot( 0,type="n",axes=F,xlab="",ylab="")
text(1,0,"Auto correlation plot \n not available",cex=1)} else {
acf(x$subgrad[,i],xlab="Lag",main="")
}
readline("Hit <Return> to see the next plot: ")
}
} else {
for (i in index) {
hist(x$beta[,i],breaks=20,prob=T,xlab=paste("Beta_",i,sep=""),ylab="Density",main="")
#ts.plot(x$beta[,i],xlab="Iterations",ylab="Samples")
plot((burnin+1):niter,x$beta[,i],xlab="Iterations",ylab="Path",type="l")
if ( sum(abs(diff(x$beta[,i]))) == 0 ) { plot( 0,type="n",axes=F,xlab="",ylab="")
text(1,0,"Auto correlation plot \n not available",cex=1)} else {
acf(x$beta[,i],xlab="Lag",main="")
}
readline("Hit <Return> to see the next plot: ")
}
}
}
|
/R/MHLS.R
|
no_license
|
cran/EAlasso
|
R
| false
| false
| 27,264
|
r
|
#' @title Metropolis-Hastings sampler for lasso estimator under the fixed active set.
#'
#' @description Metropolis-Hastings sampler for lasso estimator
#' under the fixed active set.
#'
#' @param X predictor matrix.
#' @param PE,sig2,lbd parameters of target distribution.
#' (point estimate of beta or \code{E(y)} depends on \code{PEtype}, variance estimate of error and lambda).
#' @param weights weight vector with length \code{p}(the number of covariates).
#' Default is \code{weights = rep(1, p)}.
#' @param B0 numeric vector with length \code{p}.
#' Initial value of lasso estimator.
#' @param S0 numeric vector with length \code{p}.
#' Initial value of subgradients.
#' If not given, this will be generated in a default way.
#' @param A numeric vector. Active coefficient index.
#' Every active coefficient index in \code{B0} must be included.
#' Default is \code{A = which(B0 != 0)}.
#' @param tau numeric vector with length \code{p}.
#' Standard deviaion of proposal distribution for each coefficient.
#' @param niter integer. The number of iterations. Default is \code{niter = 2000}
#' @param burnin integer. The length of burin-in periods. Default is \code{burnin = 0}
#' @param updateS.itv integer. Update subgradients every \code{updateS.itv} iterations.
#' Set this value larger than \code{niter} if one wants to skip updating subgradients.
#' @param PEtype Type of \code{PE} which is needed to characterize the target distribution.
#' Users can choose either \code{"coeff"} or \code{"mu"}.
#' @param verbose logical. If true, print out the progress step.
#' @param ... complementary arguments.
#' \itemize{
#' \item{\code{FlipSA :}}{ optional parameter.
#' This has to be a subset of active set, A. If the index is not listed in FlipSA,
#' the sign of coefficients which correspond to the listed index will remain fixed.
#' The default is \code{FlipSA=A}}
#' \item{\code{SFindex :}}{ optional parameter. subgradient index for the free coordinate.}
#' \item{\code{randomSFindex :}}{ logical. If \code{true}, resample \code{SFindex} every
#' \code{updateSF.itv} iterations.}
#' \item{\code{updateSF.itv :}}{ integer. In every \code{updateSF.itv} iterations,
#' randomize \code{SFindex}.}
#' }
#'
#' @details Given appropriate initial value, provides Metropolis-Hastings samples
#' under the fixed active set.
#'
#' @return \code{MHLS} returns an object of class \code{"MHLS"}.
#' The functions \code{\link{summary.MHLS}} and \code{\link{plot.MHLS}}
#' provide a brief summary and generate plots.
#' \item{beta}{lasso samples.}
#' \item{subgrad}{subgradient samples.}
#' \item{acceptHistory}{numbers of acceptance and proposal.}
#' \item{niter, burnin, PE, type}{same as function arguments.}
#'
#' @references
#' Zhou, Q. (2014), "Monte Carlo simulation for Lasso-type problems by estimator augmentation,"
#' Journal of the American Statistical Association, 109, 1495-1516.
#'
#' Zhou, Q. and Min, S. (2017), "Estimator augmentation with applications in
#' high-dimensional group inference," Electronic Journal of Statistics, 11(2), 3039-3080.
#'
#' @examples
#' #-------------------------
#' # Low dim
#' #-------------------------
#' set.seed(123)
#' n <- 10
#' p <- 5
#' X <- matrix(rnorm(n * p), n)
#' Y <- X %*% rep(1, p) + rnorm(n)
#' sigma2 <- 1
#' lbd <- .37
#' weights <- rep(1, p)
#' LassoResult <- Lasso.MHLS(X = X, Y = Y, lbd = lbd, type = "lasso", weights = weights)
#' B0 <- LassoResult$B0
#' S0 <- LassoResult$S0
#' MHLS(X = X, PE = rep(0, p), sig2 = 1, lbd = 1,
#' weights = weights, B0 = B0, S0 = S0, niter = 50, burnin = 0,
#' PEtype = "coeff")
#' MHLS(X = X, PE = rep(0, n), sig2 = 1, lbd = 1,
#' weights = weights, B0 = B0, S0 = S0, niter = 50, burnin = 0,
#' PEtype = "mu")
#'
#' #-------------------------
#' # High dim
#' #-------------------------
#' set.seed(123)
#' n <- 5
#' p <- 10
#' X <- matrix(rnorm(n*p),n)
#' Y <- X %*% rep(1,p) + rnorm(n)
#' weights <- rep(1,p)
#' LassoResult <- Lasso.MHLS(X = X,Y = Y,lbd = lbd, type = "lasso", weights = weights)
#' B0 <- LassoResult$B0
#' S0 <- LassoResult$S0
#' MHLS(X = X, PE = rep(0, p), sig2 = 1, lbd = 1,
#' weights = weights, B0 = B0, S0 = S0, niter = 50, burnin = 0,
#' PEtype = "coeff")
#' MHLS(X = X, PE = rep(0, n), sig2 = 1, lbd = 1,
#' weights = weights, B0 = B0, S0 = S0, niter = 50, burnin = 0,
#' PEtype = "mu")
#' @export
MHLS <- function(X, PE, sig2, lbd,
weights = rep(1, ncol(X)), B0, S0, A = which(B0 != 0),
tau = rep(1, ncol(X)), niter = 2000, burnin = 0,
PEtype = "coeff", updateS.itv = 1, verbose = FALSE, ...)
{
MHLSmain(X = X, PE = PE, sig2 = sig2, lbd = lbd,
weights = weights, B0 = B0, S0 = S0, A = A,
tau = tau, niter = niter, burnin = burnin, PEtype = PEtype,
updateS.itv = updateS.itv, verbose = verbose, ...)
}
MHLSmain <- function (X, PE, sig2, lbd,
weights, B0, S0, A, tau, niter, burnin, PEtype, updateS.itv, verbose, ...)
{
#------------------
# Error handling
#------------------
n <- nrow(X)
p <- ncol(X)
if (PEtype == "coeff" && length(PE) != p) {
stop("length(PE) must be the same with ncol(X), if PEtype = \"coeff\"")
}
if (PEtype == "mu" && length(PE) != n) {
stop("length(PE) must be the same with nrow(X), if PEtype = \"mu\"")
}
if (length(B0) != p || (!missing(S0) && length(S0) != p)) {
stop("length(B0) and/or length(S0) has to be the same with ncol(X)")
}
if (n < p && length(A) > n) {
stop("Invalid active set index, A. Cannot be larger than min(nrow(X),ncol(X)).")
}
if (length(A) != length(unique(A))) {
stop("Invalid active set index, A.")
}
if (!PEtype %in% c("coeff", "mu")) {
stop("Invalide PEtype input.")
}
if (length(weights) != p) {
stop("length(weights) has to be the same with the number of coefficients")
}
if (any(weights <= 0)) {
stop("weights should be positive.")
}
if (sig2 <=0 || lbd <= 0) {
stop("sig2 and/or lbd have to be positive.")
}
# if (!all(group == 1:ncol(X)) && missing(S0)) {
# stop("Missing S0. Use LassoMHLS for a good initial value.")
# }
if (any(missing(PE), missing(sig2), missing(lbd))) {
stop("provide all the parameters for the distribution")
}
if (burnin >= niter) {
stop("burnin has to be greater than niter")
}
if (niter <= 1) {
stop("niter should be a integer greater than 1.")
}
est <- MHLSswp(X = X, PE = PE, sig2 = sig2,
lbd = lbd, weights = weights, B0 = B0, S0 = S0, A = A,
tau = tau, niter = niter, burnin = burnin, PEtype = PEtype,
updateS.itv = updateS.itv, verbose = verbose, ...)
class(est) <- "MHLS"
est$niter <- niter
est$burnin <- burnin
est$PE <- PE
est$PEtype <- PEtype
return(est)
}
MHLSswp <- function(X, PE, sig2, lbd, weights,
B0, S0, A, tau, niter,
burnin, PEtype, FlipSA = A, SFindex,
randomSFindex = TRUE, updateSF.itv = round(niter/20), updateS.itv,
verbose, ...)
{
X <- as.matrix(X)
n <- nrow(X)
p <- ncol(X)
NN <- floor((1:10) * niter / 10) # verbose index
A <- unique(A) # active set
Ac <- setdiff(1:p,A) # inactive set
nA <- length(A) # size of the active set
nI <- length(Ac) # size of the inactive set
C <- crossprod(X) / n #Gram matrix
if (!all(which(B0 != 0) %in% A)) {
stop("Invalid active set index, A. The active set, A, has to include every index of nonzero B0.")
}
if (any(!A %in% 1:p)) {
stop("Invalid active set index, A. The active set, A, has to be a subset of 1:ncol(X)")
}
if (!missing(S0) && !all(round(S0[which(B0 != 0)], 3) == sign(B0[B0 != 0]))) {
stop("Invalid S0. Leave S0 blank, if S0 is unknown.")
}
if (length(tau) != ncol(X)) {
stop("tau must have a same length with the active set, A.")
}
if (n >= p) { # Low-dim MH
#precalculation
#for(j in 1:p){X[,j]=X[,j]-mean(X[,j])}
#If X to be centered, we need to re-compute B0 and S0 using centered X.
Cinv <- solve(C) #Inverse Gram matrix
logdiagC <- log(diag(C))
Vinv <- n / (2 * sig2) * Cinv # non exponential part of pdf of U
if (PEtype == "coeff") {
CB <- C %*% PE # PE : True beta
} else {
CB <- crossprod(X, PE) / n # PE : True beta
}
lbdwgt <- lbd * weights
loglbd <- log(lbdwgt)
#initialization
B <- matrix(0, niter, p) #Store Coefficient
S <- matrix(0, niter, p) #Store Sub-grad
B[1,] <- B0 #B0 initial value of beta
if (missing(S0)) {
S[1, A] <- sign(B[1, A])
S[1, Ac] <- runif(length(Ac), -1, 1)
} else {
S[1, ] <- S0
}
Ucur <- C %*% B[1, ] + lbdwgt * S[1, ] - CB #Current U
ldetDRatio <- 0
if (nA >= 1) {negCAAinv <- -solve(C[A, A])} else {negCAAinv <- NULL}
nAccept <- numeric(2)
nProp <- (niter - burnin) * c(nA, nI)
if (length(setdiff(FlipSA, A))!=0)
stop("FlipSA has to be a subset of active set, A.")
A2 <- setdiff(A,FlipSA)
# if (any(B0[A2]==0))
# stop("To fix the sign of beta_j, use non-zero B0_j.")
if (length(A2) != 0) {
LUbounds <- matrix(0, p, 2);
LUbounds[B0 < 0, 1] <- -Inf;
LUbounds[B0 > 0, 2] <- Inf;
}
for(t in 2:niter)
{
if(nA >= 1){
if (length(FlipSA)!=0) {
for (j in FlipSA) {
b_prop <- rnorm(1, mean = B[t - 1, j], sd = tau[j])
s_prop <- sign(b_prop)
DiffU <- (b_prop - B[t - 1, j]) * C[, j]
DiffU[j] <- DiffU[j] + lbdwgt[j] * (s_prop - S[t - 1, j])
Uprop <- Ucur + DiffU
logMH <- -t(Ucur + Uprop) %*% Vinv %*% DiffU
u <- runif(1)
if(log(u) < logMH)
{
B[t, j] <- b_prop
S[t, j] <- s_prop
Ucur <- Uprop
if (t > burnin) {nAccept[1] <- nAccept[1] + 1}
#nAccept[2]=nAccept[2]+1
}else{
B[t, j] <- B[t - 1, j]
S[t, j] <- S[t - 1, j]
}
}
}
if (length(A2)!=0) {
for (j in A2) {
b_prop <- rtnorm(1, mean = B[t - 1, j], sd = tau[j],
lower = LUbounds[j, 1],
upper = LUbounds[j, 2])
Ccur <- pnorm(0,mean=B[t-1, j],sd=tau[j],lower.tail=(B[t-1,j]<0),log.p=FALSE);
Cnew <- pnorm(0,mean=b_prop,sd=tau[j],lower.tail=(b_prop<0),log.p=FALSE);
lqratio=log(Ccur/Cnew);
DiffU <- (b_prop - B[t - 1, j]) * C[, j]
Uprop <- Ucur + DiffU
logMH <- -t(Ucur + Uprop) %*% Vinv %*% DiffU + lqratio
u <- runif(1)
if(log(u) < logMH)
{
B[t, j] <- b_prop
S[t, j] <- s_prop
Ucur <- Uprop
if (t > burnin) {nAccept[1] <- nAccept[1] + 1}
#nAccept[2]=nAccept[2]+1
}else{
B[t, j] <- B[t - 1, j]
S[t, j] <- S[t - 1, j]
}
}
}
}
if(nI >= 1 && (t %% updateS.itv == 0)){
for(j in Ac)
{
s_prop <- runif(1, -1, 1)
diffu <- lbdwgt[j] * (s_prop - S[t - 1, j])
Uprop <- Ucur
Uprop[j] <- Ucur[j] + diffu
logMH <- -t(Ucur + Uprop) %*% Vinv[, j] * diffu
u <- runif(1)
if(log(u) < logMH)
{
S[t,j] <- s_prop
Ucur <- Uprop
if (t > burnin) {nAccept[2] <- nAccept[2] + 1}
#nAccept[3]=nAccept[3]+1
} else {
S[t, j] <- S[t - 1, j]
}
}
} else {
S[t, Ac] <- S[t - 1, Ac]
}
if (verbose && sum(t == NN)==1) {
aa <- which(NN==t)
cat(paste("Updating : ", aa * 10,"%" ,sep = ""), "\n")
}
}
#nAccept=nAccept/c((niter-1)*selectsize,nProp)
#nAccept=nAccept/nProp
}
if (n < p) {
#precalculation---------------------------
#for (j in 1:p) {X[,j]=X[,j]-mean(X[,j])}
#If X to be centered, we need to recompute B0 and S0 using centered X.
C <- t(X) %*% X / n
egC <- eigen(C)
V <- egC$vectors
R <- 1:n
N <- (n + 1):p
InvVarR <- 1 / (egC$values[R] * sig2 / n) #inverse of (sig2*Lambda_i/n)
VR <- matrix(V[,R], p, n)
VRC <- t(VR) %*% C
W <- diag(weights)
LBD <- diag(egC$values[R])
lbdVRW <- lbd * t(VR) %*% W
if (PEtype == "coeff") {
VRCB <- t(VR) %*% C %*% PE
} else {
VRCB <- t(VR) %*% crossprod(X, PE) / n
}
# if (is.missing(B0)) {
# if (signBA == NULL)
# stop("If initial value of 'B0' is not given, 'signBA' has to be given.")
# B0 = rep(0,p)
# B0[A] = abs(rnorm(nA)) * signBA
# }
if (length(setdiff(FlipSA, A))!=0)
stop("FlipSA has to be a subset of active set, A.")
A2 <- setdiff(A,FlipSA)
V_AN <- V[A,N,drop=FALSE]
V_IN <- V[Ac,N,drop=FALSE]
V_AR <- V[A,R,drop=FALSE]
V_IR <- V[Ac,R,drop=FALSE]
BB <- t(V_IN) %*% W[Ac,Ac]
tVAN.WA <- t(V_AN)%*%W[A,A]
if (!missing(S0) && !all.equal(t(V[,N])%*%S0,matrix(0,length(N),1))) {
warning("Invalid S0. Regenerate S0 with a default way")
S0 <- NULL
}
if (missing(S0) || is.null(S0)) {
E <- BB
H <- rep(-1,2*nI)
G <- rbind(diag(rep(1,nI)),diag(rep(-1,nI)))
F1 <- -tVAN.WA%*%sign(B0[A])
S0.prop <- limSolve::lsei(G=G,H=H,E=E,F=F1)
if (S0.prop$IsError) {
stop("There exist no solution for the given 'B0'.")
} else {
S0 <- rep(0,p)
S0[A] <- sign(B0[A])
S0[-A] <- S0.prop$X
}
}
if (n-nA != 0) {
# SFindex : S_F, free coordinate, index
if (missing(SFindex)) {SFindex <- 1:(n-nA)}
if (length(SFindex) != n-nA) {
warning("Length of SFindex has to be same as 'n-length(A)'. Automatically set 'SFindex <- 1:(n-nA)'.")
SFindex <- 1:(n-nA)
}
B_F <- BB[, SFindex, drop=FALSE]
B_D <- BB[,-SFindex, drop=FALSE]
invB_D <- solve(B_D)
BDF <- invB_D%*%B_F
} else if (n-nA == 0) {
if (missing(SFindex)) {SFindex <- NULL}
if (!is.null(SFindex)) {
warning("If 'n-nA == 0', SFindex has to be set to NULL. Automatically set 'SFindex <- NULL'.")
SFindex <- NULL
}
BDF <- invB_D <- solve(BB) # NEED TO CHECK
B_F <- NULL
}
#initialization--------------------------
B <- matrix(0,niter,p)
S <- matrix(0,niter,p)
B[1,] <- B0
S[1,] <- S0
Rcur <- VRC[ , A, drop=FALSE] %*% t(B[1, A, drop=FALSE]) + lbdVRW %*% S[1, ] - VRCB
logfRcur <- -0.5 * sum(Rcur^2 * InvVarR)
Tcur <- CalTmat(p,n,V,LBD,W,lbd,R,N,A,Ac)
logdensitycur <- logfRcur+Tcur$logdetT
#record acceptance rates
nAccept <- numeric(2)
#nProp=numeric(2)
nProp <- c(nA*(niter-max(1,burnin)),(n-nA)*(niter-max(1,burnin)))
#Change sign count
nSignChange <- numeric(3)
for(t in 2:niter )
{
#P1: update b_A
if(nA>=1)
{
S[t,] <- S[t-1,]
if (length(FlipSA)!=0) {
MoveBA <- UpdateBA(B[t-1,],S[t-1,],tau,A,Ac,Rcur,logfRcur,VRC,lbdVRW,InvVarR,
tVAN.WA,invB_D,B_F,FlipSA,SFindex)
B[t,] <- MoveBA$B
S[t,] <- MoveBA$S
nSignChange <- nSignChange+MoveBA$nChangeSign
Rcur <- MoveBA$Rvec
logfRcur <- MoveBA$logf
nAccept[1] <- nAccept[1]+MoveBA$nAccept
#nProp[1]=nProp[1]+nA
}
if (length(A2)!=0) {
MoveBA <- UpdateBA.fixedSA(B[t-1,],tau,A2,Rcur,logfRcur,VRC,InvVarR)
B[t,A2] <- MoveBA$B[A2]
#S[t,]=S[t-1,]
Rcur <- MoveBA$Rvec
logfRcur <- MoveBA$logf
nAccept[1] <- nAccept[1] + MoveBA$nAccept
}
}else{ B[t,] <- B[t-1,]; S[t,] <- S[t-1,]}
# P2: update S_I
if(nA<n && (t %% updateS.itv == 0))
{
MoveSI <- UpdateSI(S[t,],A,Ac,Rcur,n,p,logfRcur,lbdVRW,InvVarR,
tVAN.WA,invB_D,BDF,B_F,SFindex,...)
S[t,] <- MoveSI$S
Rcur <- MoveSI$Rvec
logfRcur <- MoveSI$logf
nAccept[2] <- nAccept[2]+MoveSI$nAccept
#nProp[2]=nProp[2]+(n-nA)
}#else{S[t,]=S[t-1,];S[t,A]=sign(B[t,A])}
if (!is.null(SFindex) && randomSFindex && (t%%updateSF.itv==0) ) {
SFindex <- sort(sample(1:(p-nA),n-nA))
if (verbose) {cat("New SFindex : [",paste(SFindex,collapse=", "),"]\n")}
B_F <- BB[,SFindex,drop=FALSE]
B_D <- BB[,-SFindex,drop=FALSE]
invB_D <- solve(B_D)
BDF <- invB_D%*%B_F
}
if (verbose && sum(t == NN)==1) {
aa <- which(NN==t)
cat(paste("Updating : ", aa*10 ,"%",sep=""),"\n")
}
}
}
return(list(beta = B[if (burnin != 0){-c(1:burnin)} else {1:niter}, ],
subgrad = S[if (burnin != 0){-c(1:burnin)} else {1:niter}, ],
acceptHistory = rbind(nAccept, nProp)))
#
# return(list(beta = B[if (burnin != 0){-c(1:burnin)} else {1:niter}, ],
# subgrad = S[if (burnin != 0){-c(1:burnin)} else {1:niter}, ],
# PE = PE,
# signchange=nSignChange,
# acceptHistory = rbind(nAccept, nProp)))
}
# MHLSgroup <- function(X, PE, sig2, lbd,
# weights, group, B0, S0, A, tau, niter, burnin, PEtype = "coeff", updateS.itv, verbose)
# {
# if ( all.equal(group.norm2(S0, group)[A], rep(1, length(A)), tolerance = 1e-04) != TRUE ) {
# stop("Invalid S0. Use LassoMHLS for a good initial value.")
# }
# n <- nrow(X)
# p <- ncol(X)
#
# K <- 10
# W <- rep(weights,table(group))
# Psi <- 1/n * crossprod(X)
# if (n > p) {
# inv.Var <- n/sig2 * solve(Psi)
# }
# nA <- length(A)
#
# r.seq <- matrix(, niter, max(group))
# S.seq <- matrix(, niter ,p)
# nAccept <- numeric(2)
# nProp <- c(nA*(niter-max(1,burnin)), max(group)*(niter-max(1,burnin)))
#
# rcur <- group.norm2(B0,group)
# r.seq[1, ] <- rcur
# S.seq[1, ] <- Scur <- S0
#
# if (PEtype == "coeff") {
# Hcur <- drop(Psi %*% drop(B0 - PE) + lbd * W * drop(S0))
# } else {
# Hcur <- drop(Psi %*% drop(B0) - t(X) %*% PE / n + lbd * W * drop(S0))
# }
#
# if (n >= p) {
# for (i in 2:niter) {
# r.new <- ld.Update.r(rcur,Scur,A,Hcur,X,PE,Psi,W,lbd,group,inv.Var,tau,PEtype,n,p)
# r.seq[i,] <- rcur <- r.new$r
# Hcur <- r.new$Hcur
# if (i > burnin) {nAccept[1] <- nAccept[1] + r.new$nrUpdate}
#
# if (i %% updateS.itv == 0) {
# S.new <- ld.Update.S (rcur,Scur,A,Hcur,X,PE,Psi,W,lbd,group,inv.Var,PEtype,n,p)
# S.seq[i,] <- Scur <- S.new$S
# Hcur <- S.new$Hcur
# } else {
# S.seq[i,] <- Scur
# }
# if (i > burnin) {nAccept[2] <- nAccept[2] + S.new$nSUpdate}
#
# if (verbose && (i %% round(niter/10) == 0)) {
# cat("MCMC step,", K, "% Finished\n")
# K <- K+10
# }
# }
# # } else {
# # for (i in 2:niter) {
# # r.new <- hd.Update.r(rcur,Scur,A,Hcur,X,PE,Psi,W,lbd,group,inv.Var,1)
# # r.seq[i,] <- rcur <- r.new$r
# # Hcur <- r.new$Hcur
# # nAccept[1] <- nAccept[1] + r.new$nrUpdate
# #
# # S.new <- hd.Update.S (rcur,Scur,A,Hcur,X,PE,Psi,W,lbd,group,inv.Var,p)
# # S.seq[i,] <- Scur <- S.new$S
# # Hcur <- S.new$Hcur
# # nAccept[2] <- nAccept[2] + S.new$nSUpdate
# #
# # if (verbose && (i %% round(niter/10) == 0)) {
# # cat("MCMC step,", K, "% Finished\n")
# # K <- K+10
# # }
# # }
# }
# return(list(
# group.l2.norm = r.seq[if (burnin != 0){-c(1:burnin)} else {1:niter}, ],
# subgrad = S.seq[if (burnin != 0){-c(1:burnin)} else {1:niter}, ],
# acceptHistory = rbind(nAccept, nProp)))
# }
#' @method print MHLS
#' @title Print Metropolis-Hastings sampler outputs
#'
#' @description Print a brief summary of the MH sampler outputs.
#'
#' @param x an object of class "MHLS", which is a result of \code{\link{MHLS}}.
#' @param ... ... addtional print arguments.
#' @details
#' \code{\link{print.MHLS}} prints out last 10 iterations and a brief summary
#' of the simulation; number of iterations, number of burn-in periods, PE, PEtype and
#' acceptance rate.
#'
#' @return Above results are silently returned.
#'
#' @examples
#' set.seed(123)
#' n <- 10
#' p <- 5
#' X <- matrix(rnorm(n * p), n)
#' Y <- X %*% rep(1, p) + rnorm(n)
#' sigma2 <- 1
#' lbd <- .37
#' weights <- rep(1, p)
#' LassoResult <- Lasso.MHLS(X = X, Y = Y, lbd = lbd, type="lasso", weights = weights)
#' B0 <- LassoResult$B0
#' S0 <- LassoResult$S0
#' Result <- MHLS(X = X, PE = rep(0, p), sig2 = sigma2, lbd = lbd, group = 1:p,
#' weights = weights, B0 = B0, S0 = S0, niter = 50, burnin = 0,
#' type = "coeff")
#' print(Result)
#' @export
print.MHLS <- function (x, ...) {
cat ("===========================\n")
cat ("Number of iteration: ", x$niter,"\n\n")
cat ("Burn-in period: ", x$burnin,"\n\n")
cat ("Plug-in PE: \n")
print(x$PE)
cat ("PEtype: \n")
print(x$PEtype)
# if (inherits(x,"Group")) {
# Group.matrix <- matrix(0, length(unique(x$group)), p)
# Group.matrix[cbind(x$group,1:p)] <- 1
# beta <- (x$group.l2.norm %*% Group.matrix) * x$subgrad
# }
cat ("\nLast 10 steps of beta's:\n")
# if (inherits(x,"GroupLasso")) {
# if (x$niter-x$burnin <= 9) {
# print(x$group.l2.norm)
# } else {
# print(x$group.l2.norm[(x$niter-x$burnin-9):(x$niter-x$burnin),])
# }
# }
if (x$niter-x$burnin <= 9) {
# if (inherits(x,"Group")) {
# #print(x$group.l2.norm)
# print(beta)
# } else {
print(x$beta)
# }
} else {
# if (inherits(x,"Group")) {
# print(beta[(x$niter-x$burnin-9):(x$niter-x$burnin),])
# #print(x$group.l2.norm[(x$niter-x$burnin-9):(x$niter-x$burnin),])
# } else {
print(x$beta[(x$niter-x$burnin-9):(x$niter-x$burnin),])
# }
}
cat ("\nlast 10 steps of subgradients:\n")
if (x$niter-x$burnin <= 9) {
print(x$subgrad)
} else {
print(x$subgrad[(x$niter-x$burnin-9):(x$niter-x$burnin),])
}
cat ("\nAcceptance rate:\n")
cat("-----------------------------\n")
cat("\t \t \t beta \t subgrad\n")
# if (inherits(x,"GroupLasso")) {
# cat("\t \t l_2 group norm\t subgrad\n")
# } else {
# cat("\t \t \t beta \t subgrad\n")
# }
cat("# Accepted\t : \t", paste(x$acceptHistory[1,],"\t"),"\n")
cat("# Moved\t\t : \t", paste(x$acceptHistory[2,],"\t"),"\n")
cat("Acceptance rate\t : \t", paste(round(x$acceptHistory[1,]/x$acceptHistory[2,],3),"\t"),"\n")
# cat ("\nSignChange rate:\n")
# cat("-----------------------------\n")
# cat("# Accepted\t : \t", paste(x$signchange[1],"\t"),"\n")
# cat("# Moved\t\t : \t", paste(x$signchange[2],"\t"),"\n")
# cat("# Cdt Accept \t : \t", paste(x$signchange[3],"\t"),"\n")
# cat("Acceptance rate\t : \t", paste(round(x$signchange[1]/x$signchange[2],3),"\t"),"\n")
}
#' @method summary MHLS
#' @title Summarizing Metropolis-Hastings sampler outputs
#'
#' @description Summary method for class "MHLS".
#'
#' @param object an object of class "MHLS", which is a result of \code{\link{MHLS}}.
#' @param ... additional arguments affecting the summary produced.
#'
#' @details
#' This function provides a summary of each sampled beta and subgradient.
#' @return mean, median, standard deviation, 2.5\% quantile and 97.5\% quantile
#' for each beta and its subgradient.
#' @examples
#' #' set.seed(123)
#' n <- 10
#' p <- 5
#' X <- matrix(rnorm(n * p), n)
#' Y <- X %*% rep(1, p) + rnorm(n)
#' sigma2 <- 1
#' lbd <- .37
#' weights <- rep(1, p)
#' LassoResult <- Lasso.MHLS(X = X, Y = Y, lbd = lbd, type = "lasso", weights = weights)
#' B0 <- LassoResult$B0
#' S0 <- LassoResult$S0
#' summary(MHLS(X = X, PE = rep(0, p), sig2 = sigma2, lbd = lbd,
#' weights = weights, B0 = B0, S0 = S0, niter = 50, burnin = 0,
#' type = "coeff"))
#' @export
summary.MHLS <- function (object, ...) {
betaSummary <- t(apply(object$beta,2,SummBeta))
#signsummary <- t(apply(object$beta,2,SummSign))
subgradSummary <- t(apply(object$subgrad,2,SummBeta))
result <- list(beta=betaSummary,subgradient=subgradSummary)
class(result) <- "summary.MHLS"
return(result)
}
#' @method plot MHLS
#' @title Plot Metropolis-Hastings sampler outputs
#'
#' @description Provides six plots for each covariate index;
#' histogram, path plot and acf plot for beta and for its subgradient.
#'
#' @param x an object of class "MHLS", which is an output of \code{\link{MHLS}}.
#' @param index an index of covariates to plot.
#' @param skipS logical. If \code{skipS = TRUE}, plots beta only.
#' @param ... addtional arguments passed to or from other methods.
#' @details
#' \code{\link{plot.MHLS}} provides summary plots of beta and subgradient.
#' The first column provides histogram of beta and subgradient, while the second
#' and the third columns provide path and acf plots, respectively.
#' If \code{skipS = TRUE}, this function provides summary plots for beta only.
#' @examples
#' #' set.seed(123)
#' n <- 10
#' p <- 5
#' X <- matrix(rnorm(n * p), n)
#' Y <- X %*% rep(1, p) + rnorm(n)
#' sigma2 <- 1
#' lbd <- .37
#' weights <- rep(1, p)
#' LassoResult <- Lasso.MHLS(X = X, Y = Y, lbd = lbd, type="lasso", weights = weights)
#' B0 <- LassoResult$B0
#' S0 <- LassoResult$S0
#' plot(MHLS(X = X, PE = rep(0, p), sig2 = 1, lbd = 1, group = 1:p,
#' weights = weights, B0 = B0, S0 = S0, niter = 50, burnin = 0,
#' type = "coeff"))
#' @export
plot.MHLS <- function(x, index = 1:ncol(x$beta), skipS = FALSE, ...) {
# n=nrow(x$beta)
if (any(!index %in% 1:ncol(x$beta))) {
stop("Invalid index.")
}
niter <- x$niter
burnin <- x$burnin
if (!skipS) {par(mfrow = c(2,3))} else {par(mfrow = c(1,3))}
if (!skipS) {
for (i in index) {
hist(x$beta[,i],breaks=20,prob=T,xlab=paste("Beta_",i,sep=""),ylab="Density",main="")
#ts.plot(x$beta[,i],xlab="Iterations",ylab="Samples")
plot((burnin+1):niter,x$beta[,i],xlab="Iterations",ylab="Path",type="l")
if ( sum(abs(diff(x$beta[,i]))) == 0 ) { plot( 0,type="n",axes=F,xlab="",ylab="")
text(1,0,"Auto correlation plot \n not available",cex=1)} else {
acf(x$beta[,i],xlab="Lag",main="")
}
hist(x$subgrad[,i],breaks=seq(-1-1/10,1,by=1/10)+1/20,prob=T,xlim=c(-1-1/20,1+1/20),xlab=paste("Subgradient_",i,sep=""),ylab="Density",main="")
#ts.plot(x$subgrad[,i],xlab=Iterations,ylab="Samples")
plot((burnin+1):niter,x$subgrad[,i],xlab="Iterations",ylab="Samples",type="l")
if ( sum(abs(diff(x$subgrad[,i]))) == 0 ) { plot( 0,type="n",axes=F,xlab="",ylab="")
text(1,0,"Auto correlation plot \n not available",cex=1)} else {
acf(x$subgrad[,i],xlab="Lag",main="")
}
readline("Hit <Return> to see the next plot: ")
}
} else {
for (i in index) {
hist(x$beta[,i],breaks=20,prob=T,xlab=paste("Beta_",i,sep=""),ylab="Density",main="")
#ts.plot(x$beta[,i],xlab="Iterations",ylab="Samples")
plot((burnin+1):niter,x$beta[,i],xlab="Iterations",ylab="Path",type="l")
if ( sum(abs(diff(x$beta[,i]))) == 0 ) { plot( 0,type="n",axes=F,xlab="",ylab="")
text(1,0,"Auto correlation plot \n not available",cex=1)} else {
acf(x$beta[,i],xlab="Lag",main="")
}
readline("Hit <Return> to see the next plot: ")
}
}
}
|
#
# File: qualityControl.R
# Created Date: Monday, September 28th 2020
# Author: Debora Antunes
# -----
# Last Modified: Wednesday, September 30th 2020, 10:52:14 am
# -----
#
# if (!requireNamespace("BiocManager", quietly = TRUE))
# install.packages("BiocManager")
# BiocManager::install("GWASTools")
# BiocManager::install("SNPRelate")
library(GWASTools)
library(SNPRelate)
library(plyr)
####### CONVERTING VCF TO GDS ###########
vcf.fn <- "files/data/vcf/cases/All_PT.vcf.gz"
gdsfile <- "files/data/variants/snps.gds"
snpgdsVCF2GDS(vcf.fn, gdsfile, verbose=TRUE)
##### CONVERT CHR VALUES ######
genofile = snpgdsOpen(gdsfile, readonly=FALSE)
chr = read.gdsn(index.gdsn(genofile, "snp.chromosome"))
unique(chr)
chr = revalue(chr, c('X'='23', 'Y'='25'))
unique(chr)
add.gdsn(genofile,'snp.chromosome',chr,replace=TRUE)
# open the GDS file and create a GenotypeData object
gdsfile <- "files/data/variants/snps.gds"
gds <- GdsGenotypeReader(gdsfile)
nscan(gds)
nsnp(gds)
head(getScanID(gds))
head(getSnpID(gds))
tail(getChromosome(gds))
head(getPosition(gds))
########## Missingness and heterozygosity within samples ################
## Missingness
genoData <- GenotypeData(gds)
miss <- missingGenotypeByScanChrom(genoData)
# Examine the results
miss.rate <- t(apply(miss$missing.counts, 1, function(x) {x / miss$snps.per.chr}))
miss.rate <- as.data.frame(miss.rate)
cols <- names(miss.rate) %in% c(1:22, "X", "Y")
boxplot(miss.rate[,cols],
main="Missingness by Chromosome",
ylab="Proportion Missing",
xlab="Chromosome")
## Heterozygosity
het.results <- hetByScanChrom(genoData)
cols <- colnames(het.results) %in% c(1:22, "X", "Y")
boxplot(het.results[,cols],
main="Heterozygoty by Chromosome",
ylab="Heterozygoty rate",
xlab="Chromosome")
close(genoData)
############# Relatedness and IBD Estimation ############
gdsobj = snpgdsOpen(gdsfile, readonly=FALSE)
ibdobj <- snpgdsIBDKING(gdsobj)
length(ibdobj$kinship[ibdobj$kinship < 0.0625])#fourth degree
length(ibdobj$snp.id)
|
/R/qualityControl.R
|
no_license
|
Daantunes/ComplexDiseases_Pipeline
|
R
| false
| false
| 2,042
|
r
|
#
# File: qualityControl.R
# Created Date: Monday, September 28th 2020
# Author: Debora Antunes
# -----
# Last Modified: Wednesday, September 30th 2020, 10:52:14 am
# -----
#
# if (!requireNamespace("BiocManager", quietly = TRUE))
# install.packages("BiocManager")
# BiocManager::install("GWASTools")
# BiocManager::install("SNPRelate")
library(GWASTools)
library(SNPRelate)
library(plyr)
####### CONVERTING VCF TO GDS ###########
vcf.fn <- "files/data/vcf/cases/All_PT.vcf.gz"
gdsfile <- "files/data/variants/snps.gds"
snpgdsVCF2GDS(vcf.fn, gdsfile, verbose=TRUE)
##### CONVERT CHR VALUES ######
genofile = snpgdsOpen(gdsfile, readonly=FALSE)
chr = read.gdsn(index.gdsn(genofile, "snp.chromosome"))
unique(chr)
chr = revalue(chr, c('X'='23', 'Y'='25'))
unique(chr)
add.gdsn(genofile,'snp.chromosome',chr,replace=TRUE)
# open the GDS file and create a GenotypeData object
gdsfile <- "files/data/variants/snps.gds"
gds <- GdsGenotypeReader(gdsfile)
nscan(gds)
nsnp(gds)
head(getScanID(gds))
head(getSnpID(gds))
tail(getChromosome(gds))
head(getPosition(gds))
########## Missingness and heterozygosity within samples ################
## Missingness
genoData <- GenotypeData(gds)
miss <- missingGenotypeByScanChrom(genoData)
# Examine the results
miss.rate <- t(apply(miss$missing.counts, 1, function(x) {x / miss$snps.per.chr}))
miss.rate <- as.data.frame(miss.rate)
cols <- names(miss.rate) %in% c(1:22, "X", "Y")
boxplot(miss.rate[,cols],
main="Missingness by Chromosome",
ylab="Proportion Missing",
xlab="Chromosome")
## Heterozygosity
het.results <- hetByScanChrom(genoData)
cols <- colnames(het.results) %in% c(1:22, "X", "Y")
boxplot(het.results[,cols],
main="Heterozygoty by Chromosome",
ylab="Heterozygoty rate",
xlab="Chromosome")
close(genoData)
############# Relatedness and IBD Estimation ############
gdsobj = snpgdsOpen(gdsfile, readonly=FALSE)
ibdobj <- snpgdsIBDKING(gdsobj)
length(ibdobj$kinship[ibdobj$kinship < 0.0625])#fourth degree
length(ibdobj$snp.id)
|
source("objective.R")
#the data
changeK = c(35,25); changeN = c(40,40)
sameK = c(5,15); sameN = c(40,40)
setSize = c(4,8)
#set up best-fitting parameters for slot model
startpars = c(3.5, 0.5)
out = optim(par = startpars, fn = slotObjective, changeN = changeN, sameN = sameN, changeK = changeK, sameK = sameK, setSize = setSize, lower = c(0,0), upper = c(8,1), method = c('L-BFGS-B'), control = list(fnscale = -1))
slotBestPars = out$par
slotMaxLikelihood = out$value
slotDeviance = - 2 * log(slotMaxLikelihood)
slotAIC = slotDeviance + 2 * length(slotBestPars)
slotBIC = slotDeviance + log(sum(changeN,sameN)) * length(slotBestPars)
#set up best-fitting parameters for resource model
startpars = c(2, 1, 1)
out = optim(par = startpars, fn = resourceObjective, changeN = changeN, sameN = sameN, changeK = changeK, sameK = sameK, setSize = setSize, lower = c(0,0,0), upper = c(5,5,20), method = c('L-BFGS-B'), control = list(fnscale = -1))
resourceBestPars = out$par
resourceMaxLikelihood = out$value
resourceDeviance = - 2 * log(resourceMaxLikelihood)
resourceAIC = resourceDeviance + 2 * length(resourceBestPars)
resourceBIC = resourceDeviance + log(sum(changeN,sameN)) * length(resourceBestPars)
|
/Classes/Day_06/slotResource/final/bic.R
|
no_license
|
danieljwilson/CMMC-2018
|
R
| false
| false
| 1,202
|
r
|
source("objective.R")
#the data
changeK = c(35,25); changeN = c(40,40)
sameK = c(5,15); sameN = c(40,40)
setSize = c(4,8)
#set up best-fitting parameters for slot model
startpars = c(3.5, 0.5)
out = optim(par = startpars, fn = slotObjective, changeN = changeN, sameN = sameN, changeK = changeK, sameK = sameK, setSize = setSize, lower = c(0,0), upper = c(8,1), method = c('L-BFGS-B'), control = list(fnscale = -1))
slotBestPars = out$par
slotMaxLikelihood = out$value
slotDeviance = - 2 * log(slotMaxLikelihood)
slotAIC = slotDeviance + 2 * length(slotBestPars)
slotBIC = slotDeviance + log(sum(changeN,sameN)) * length(slotBestPars)
#set up best-fitting parameters for resource model
startpars = c(2, 1, 1)
out = optim(par = startpars, fn = resourceObjective, changeN = changeN, sameN = sameN, changeK = changeK, sameK = sameK, setSize = setSize, lower = c(0,0,0), upper = c(5,5,20), method = c('L-BFGS-B'), control = list(fnscale = -1))
resourceBestPars = out$par
resourceMaxLikelihood = out$value
resourceDeviance = - 2 * log(resourceMaxLikelihood)
resourceAIC = resourceDeviance + 2 * length(resourceBestPars)
resourceBIC = resourceDeviance + log(sum(changeN,sameN)) * length(resourceBestPars)
|
require(tidyr)
require(dplyr)
require(ggplot2)
setwd("C:/DataViz/6. Final Project/DV_FinalProject/01 Data")
file_path <- "HallOfFame.csv"
df <- read.csv(file_path, stringsAsFactors = FALSE)
# Replace "." (i.e., period) with "_" in the column names.
names(df) <- gsub("\\.+", "_", names(df))
# summary(df) # Uncomment this and run just the lines to here to get column types to use for getting the list of measures.
measures <- c("yearid", "ballots", "needed", "votes")
# Get rid of special characters in each column.
# Google ASCII Table to understand the following:
for(n in names(df)) {
df[n] <- data.frame(lapply(df[n], gsub, pattern="[^ -~]",replacement= ""))
}
dimensions <- setdiff(names(df), measures)
if( length(measures) > 1 || ! is.na(dimensions)) {
for(d in dimensions) {
# Get rid of " and ' in dimensions.
df[d] <- data.frame(lapply(df[d], gsub, pattern="[\"']",replacement= ""))
# Change & to and in dimensions.
df[d] <- data.frame(lapply(df[d], gsub, pattern="&",replacement= " and "))
# Change : to ; in dimensions.
df[d] <- data.frame(lapply(df[d], gsub, pattern=":",replacement= ";"))
}
}
library(lubridate)
# Fix date columns, this needs to be done by hand because | needs to be correct.
# \_/
df$Order_Date <- gsub(" [0-9]+:.*", "", gsub(" UTC", "", mdy(as.character(df$Order_Date), tz="UTC")))
df$Ship_Date <- gsub(" [0-9]+:.*", "", gsub(" UTC", "", mdy(as.character(df$Ship_Date), tz="UTC")))
# The following is an example of dealing with special cases like making state abbreviations be all upper case.
# df["State"] <- data.frame(lapply(df["State"], toupper))
# Get rid of all characters in measures except for numbers, the - sign, and period.dimensions
if( length(measures) > 1 || ! is.na(measures)) {
for(m in measures) {
df[m] <- data.frame(lapply(df[m], gsub, pattern="[^--.0-9]",replacement= ""))
}
}
write.csv(df, paste(gsub(".csv", "", file_path), ".reformatted.csv", sep=""), row.names=FALSE, na = "")
tableName <- gsub(" +", "_", gsub("[^A-z, 0-9, ]", "", gsub(".csv", "", file_path)))
sql <- paste("CREATE TABLE", tableName, "(\n-- Change table_name to the table name you want.\n")
if( length(measures) > 1 || ! is.na(dimensions)) {
for(d in dimensions) {
sql <- paste(sql, paste(d, "varchar2(4000),\n"))
}
}
if( length(measures) > 1 || ! is.na(measures)) {
for(m in measures) {
if(m != tail(measures, n=1)) sql <- paste(sql, paste(m, "number(38,4),\n"))
else sql <- paste(sql, paste(m, "number(38,4)\n"))
}
}
sql <- paste(sql, ");")
cat(sql)
|
/DV_FinalProject/01 Data/HallOfFameETL.R
|
no_license
|
germanmtz93/DV_FinalProject
|
R
| false
| false
| 2,604
|
r
|
require(tidyr)
require(dplyr)
require(ggplot2)
setwd("C:/DataViz/6. Final Project/DV_FinalProject/01 Data")
file_path <- "HallOfFame.csv"
df <- read.csv(file_path, stringsAsFactors = FALSE)
# Replace "." (i.e., period) with "_" in the column names.
names(df) <- gsub("\\.+", "_", names(df))
# summary(df) # Uncomment this and run just the lines to here to get column types to use for getting the list of measures.
measures <- c("yearid", "ballots", "needed", "votes")
# Get rid of special characters in each column.
# Google ASCII Table to understand the following:
for(n in names(df)) {
df[n] <- data.frame(lapply(df[n], gsub, pattern="[^ -~]",replacement= ""))
}
dimensions <- setdiff(names(df), measures)
if( length(measures) > 1 || ! is.na(dimensions)) {
for(d in dimensions) {
# Get rid of " and ' in dimensions.
df[d] <- data.frame(lapply(df[d], gsub, pattern="[\"']",replacement= ""))
# Change & to and in dimensions.
df[d] <- data.frame(lapply(df[d], gsub, pattern="&",replacement= " and "))
# Change : to ; in dimensions.
df[d] <- data.frame(lapply(df[d], gsub, pattern=":",replacement= ";"))
}
}
library(lubridate)
# Fix date columns, this needs to be done by hand because | needs to be correct.
# \_/
df$Order_Date <- gsub(" [0-9]+:.*", "", gsub(" UTC", "", mdy(as.character(df$Order_Date), tz="UTC")))
df$Ship_Date <- gsub(" [0-9]+:.*", "", gsub(" UTC", "", mdy(as.character(df$Ship_Date), tz="UTC")))
# The following is an example of dealing with special cases like making state abbreviations be all upper case.
# df["State"] <- data.frame(lapply(df["State"], toupper))
# Get rid of all characters in measures except for numbers, the - sign, and period.dimensions
if( length(measures) > 1 || ! is.na(measures)) {
for(m in measures) {
df[m] <- data.frame(lapply(df[m], gsub, pattern="[^--.0-9]",replacement= ""))
}
}
write.csv(df, paste(gsub(".csv", "", file_path), ".reformatted.csv", sep=""), row.names=FALSE, na = "")
tableName <- gsub(" +", "_", gsub("[^A-z, 0-9, ]", "", gsub(".csv", "", file_path)))
sql <- paste("CREATE TABLE", tableName, "(\n-- Change table_name to the table name you want.\n")
if( length(measures) > 1 || ! is.na(dimensions)) {
for(d in dimensions) {
sql <- paste(sql, paste(d, "varchar2(4000),\n"))
}
}
if( length(measures) > 1 || ! is.na(measures)) {
for(m in measures) {
if(m != tail(measures, n=1)) sql <- paste(sql, paste(m, "number(38,4),\n"))
else sql <- paste(sql, paste(m, "number(38,4)\n"))
}
}
sql <- paste(sql, ");")
cat(sql)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.R
\name{servicecatalog}
\alias{servicecatalog}
\title{AWS Service Catalog}
\usage{
servicecatalog(
config = list(),
credentials = list(),
endpoint = NULL,
region = NULL
)
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.
\itemize{
\item{\strong{credentials}:} {\itemize{
\item{\strong{creds}:} {\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
}}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
\item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
\item{\strong{region}:} {The AWS Region used in instantiating the client.}
}}
\item{\strong{close_connection}:} {Immediately close all HTTP connections.}
\item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
\item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e. \verb{http://s3.amazonaws.com/BUCKET/KEY}.}
\item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}}
}}
\item{credentials}{Optional credentials shorthand for the config parameter
\itemize{
\item{\strong{creds}:} {\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
}}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
}}
\item{endpoint}{Optional shorthand for complete URL to use for the constructed client.}
\item{region}{Optional shorthand for AWS Region used in instantiating the client.}
}
\value{
A client for the service. You can call the service's operations using
syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned
to the client. The available operations are listed in the
Operations section.
}
\description{
Service Catalog
\href{https://aws.amazon.com/servicecatalog/}{Service Catalog} enables
organizations to create and manage catalogs of IT services that are
approved for Amazon Web Services. To get the most out of this
documentation, you should be familiar with the terminology discussed in
\href{https://docs.aws.amazon.com/servicecatalog/latest/adminguide/what-is_concepts.html}{Service Catalog Concepts}.
}
\section{Service syntax}{
\if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- servicecatalog(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string",
close_connection = "logical",
timeout = "numeric",
s3_force_path_style = "logical",
sts_regional_endpoint = "string"
),
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string"
)
}\if{html}{\out{</div>}}
}
\section{Operations}{
\tabular{ll}{
\link[paws.management:servicecatalog_accept_portfolio_share]{accept_portfolio_share} \tab Accepts an offer to share the specified portfolio\cr
\link[paws.management:servicecatalog_associate_budget_with_resource]{associate_budget_with_resource} \tab Associates the specified budget with the specified resource\cr
\link[paws.management:servicecatalog_associate_principal_with_portfolio]{associate_principal_with_portfolio} \tab Associates the specified principal ARN with the specified portfolio\cr
\link[paws.management:servicecatalog_associate_product_with_portfolio]{associate_product_with_portfolio} \tab Associates the specified product with the specified portfolio\cr
\link[paws.management:servicecatalog_associate_service_action_with_provisioning_artifact]{associate_service_action_with_provisioning_artifact} \tab Associates a self-service action with a provisioning artifact\cr
\link[paws.management:servicecatalog_associate_tag_option_with_resource]{associate_tag_option_with_resource} \tab Associate the specified TagOption with the specified portfolio or product\cr
\link[paws.management:servicecatalog_batch_associate_service_action_with_provisioning_artifact]{batch_associate_service_action_with_provisioning_artifact} \tab Associates multiple self-service actions with provisioning artifacts\cr
\link[paws.management:servicecatalog_batch_disassociate_service_action_from_provisioning_artifact]{batch_disassociate_service_action_from_provisioning_artifact} \tab Disassociates a batch of self-service actions from the specified provisioning artifact\cr
\link[paws.management:servicecatalog_copy_product]{copy_product} \tab Copies the specified source product to the specified target product or a new product\cr
\link[paws.management:servicecatalog_create_constraint]{create_constraint} \tab Creates a constraint\cr
\link[paws.management:servicecatalog_create_portfolio]{create_portfolio} \tab Creates a portfolio\cr
\link[paws.management:servicecatalog_create_portfolio_share]{create_portfolio_share} \tab Shares the specified portfolio with the specified account or organization node\cr
\link[paws.management:servicecatalog_create_product]{create_product} \tab Creates a product\cr
\link[paws.management:servicecatalog_create_provisioned_product_plan]{create_provisioned_product_plan} \tab Creates a plan\cr
\link[paws.management:servicecatalog_create_provisioning_artifact]{create_provisioning_artifact} \tab Creates a provisioning artifact (also known as a version) for the specified product\cr
\link[paws.management:servicecatalog_create_service_action]{create_service_action} \tab Creates a self-service action\cr
\link[paws.management:servicecatalog_create_tag_option]{create_tag_option} \tab Creates a TagOption\cr
\link[paws.management:servicecatalog_delete_constraint]{delete_constraint} \tab Deletes the specified constraint\cr
\link[paws.management:servicecatalog_delete_portfolio]{delete_portfolio} \tab Deletes the specified portfolio\cr
\link[paws.management:servicecatalog_delete_portfolio_share]{delete_portfolio_share} \tab Stops sharing the specified portfolio with the specified account or organization node\cr
\link[paws.management:servicecatalog_delete_product]{delete_product} \tab Deletes the specified product\cr
\link[paws.management:servicecatalog_delete_provisioned_product_plan]{delete_provisioned_product_plan} \tab Deletes the specified plan\cr
\link[paws.management:servicecatalog_delete_provisioning_artifact]{delete_provisioning_artifact} \tab Deletes the specified provisioning artifact (also known as a version) for the specified product\cr
\link[paws.management:servicecatalog_delete_service_action]{delete_service_action} \tab Deletes a self-service action\cr
\link[paws.management:servicecatalog_delete_tag_option]{delete_tag_option} \tab Deletes the specified TagOption\cr
\link[paws.management:servicecatalog_describe_constraint]{describe_constraint} \tab Gets information about the specified constraint\cr
\link[paws.management:servicecatalog_describe_copy_product_status]{describe_copy_product_status} \tab Gets the status of the specified copy product operation\cr
\link[paws.management:servicecatalog_describe_portfolio]{describe_portfolio} \tab Gets information about the specified portfolio\cr
\link[paws.management:servicecatalog_describe_portfolio_shares]{describe_portfolio_shares} \tab Returns a summary of each of the portfolio shares that were created for the specified portfolio\cr
\link[paws.management:servicecatalog_describe_portfolio_share_status]{describe_portfolio_share_status} \tab Gets the status of the specified portfolio share operation\cr
\link[paws.management:servicecatalog_describe_product]{describe_product} \tab Gets information about the specified product\cr
\link[paws.management:servicecatalog_describe_product_as_admin]{describe_product_as_admin} \tab Gets information about the specified product\cr
\link[paws.management:servicecatalog_describe_product_view]{describe_product_view} \tab Gets information about the specified product\cr
\link[paws.management:servicecatalog_describe_provisioned_product]{describe_provisioned_product} \tab Gets information about the specified provisioned product\cr
\link[paws.management:servicecatalog_describe_provisioned_product_plan]{describe_provisioned_product_plan} \tab Gets information about the resource changes for the specified plan\cr
\link[paws.management:servicecatalog_describe_provisioning_artifact]{describe_provisioning_artifact} \tab Gets information about the specified provisioning artifact (also known as a version) for the specified product\cr
\link[paws.management:servicecatalog_describe_provisioning_parameters]{describe_provisioning_parameters} \tab Gets information about the configuration required to provision the specified product using the specified provisioning artifact\cr
\link[paws.management:servicecatalog_describe_record]{describe_record} \tab Gets information about the specified request operation\cr
\link[paws.management:servicecatalog_describe_service_action]{describe_service_action} \tab Describes a self-service action\cr
\link[paws.management:servicecatalog_describe_service_action_execution_parameters]{describe_service_action_execution_parameters} \tab Finds the default parameters for a specific self-service action on a specific provisioned product and returns a map of the results to the user\cr
\link[paws.management:servicecatalog_describe_tag_option]{describe_tag_option} \tab Gets information about the specified TagOption\cr
\link[paws.management:servicecatalog_disable_aws_organizations_access]{disable_aws_organizations_access} \tab Disable portfolio sharing through the Organizations service\cr
\link[paws.management:servicecatalog_disassociate_budget_from_resource]{disassociate_budget_from_resource} \tab Disassociates the specified budget from the specified resource\cr
\link[paws.management:servicecatalog_disassociate_principal_from_portfolio]{disassociate_principal_from_portfolio} \tab Disassociates a previously associated principal ARN from a specified portfolio\cr
\link[paws.management:servicecatalog_disassociate_product_from_portfolio]{disassociate_product_from_portfolio} \tab Disassociates the specified product from the specified portfolio\cr
\link[paws.management:servicecatalog_disassociate_service_action_from_provisioning_artifact]{disassociate_service_action_from_provisioning_artifact} \tab Disassociates the specified self-service action association from the specified provisioning artifact\cr
\link[paws.management:servicecatalog_disassociate_tag_option_from_resource]{disassociate_tag_option_from_resource} \tab Disassociates the specified TagOption from the specified resource\cr
\link[paws.management:servicecatalog_enable_aws_organizations_access]{enable_aws_organizations_access} \tab Enable portfolio sharing feature through Organizations\cr
\link[paws.management:servicecatalog_execute_provisioned_product_plan]{execute_provisioned_product_plan} \tab Provisions or modifies a product based on the resource changes for the specified plan\cr
\link[paws.management:servicecatalog_execute_provisioned_product_service_action]{execute_provisioned_product_service_action} \tab Executes a self-service action against a provisioned product\cr
\link[paws.management:servicecatalog_get_aws_organizations_access_status]{get_aws_organizations_access_status} \tab Get the Access Status for Organizations portfolio share feature\cr
\link[paws.management:servicecatalog_get_provisioned_product_outputs]{get_provisioned_product_outputs} \tab This API takes either a ProvisonedProductId or a ProvisionedProductName, along with a list of one or more output keys, and responds with the key/value pairs of those outputs\cr
\link[paws.management:servicecatalog_import_as_provisioned_product]{import_as_provisioned_product} \tab Requests the import of a resource as an Service Catalog provisioned product that is associated to an Service Catalog product and provisioning artifact\cr
\link[paws.management:servicecatalog_list_accepted_portfolio_shares]{list_accepted_portfolio_shares} \tab Lists all imported portfolios for which account-to-account shares were accepted by this account\cr
\link[paws.management:servicecatalog_list_budgets_for_resource]{list_budgets_for_resource} \tab Lists all the budgets associated to the specified resource\cr
\link[paws.management:servicecatalog_list_constraints_for_portfolio]{list_constraints_for_portfolio} \tab Lists the constraints for the specified portfolio and product\cr
\link[paws.management:servicecatalog_list_launch_paths]{list_launch_paths} \tab Lists the paths to the specified product\cr
\link[paws.management:servicecatalog_list_organization_portfolio_access]{list_organization_portfolio_access} \tab Lists the organization nodes that have access to the specified portfolio\cr
\link[paws.management:servicecatalog_list_portfolio_access]{list_portfolio_access} \tab Lists the account IDs that have access to the specified portfolio\cr
\link[paws.management:servicecatalog_list_portfolios]{list_portfolios} \tab Lists all portfolios in the catalog\cr
\link[paws.management:servicecatalog_list_portfolios_for_product]{list_portfolios_for_product} \tab Lists all portfolios that the specified product is associated with\cr
\link[paws.management:servicecatalog_list_principals_for_portfolio]{list_principals_for_portfolio} \tab Lists all PrincipalARNs and corresponding PrincipalTypes associated with the specified portfolio\cr
\link[paws.management:servicecatalog_list_provisioned_product_plans]{list_provisioned_product_plans} \tab Lists the plans for the specified provisioned product or all plans to which the user has access\cr
\link[paws.management:servicecatalog_list_provisioning_artifacts]{list_provisioning_artifacts} \tab Lists all provisioning artifacts (also known as versions) for the specified product\cr
\link[paws.management:servicecatalog_list_provisioning_artifacts_for_service_action]{list_provisioning_artifacts_for_service_action} \tab Lists all provisioning artifacts (also known as versions) for the specified self-service action\cr
\link[paws.management:servicecatalog_list_record_history]{list_record_history} \tab Lists the specified requests or all performed requests\cr
\link[paws.management:servicecatalog_list_resources_for_tag_option]{list_resources_for_tag_option} \tab Lists the resources associated with the specified TagOption\cr
\link[paws.management:servicecatalog_list_service_actions]{list_service_actions} \tab Lists all self-service actions\cr
\link[paws.management:servicecatalog_list_service_actions_for_provisioning_artifact]{list_service_actions_for_provisioning_artifact} \tab Returns a paginated list of self-service actions associated with the specified Product ID and Provisioning Artifact ID\cr
\link[paws.management:servicecatalog_list_stack_instances_for_provisioned_product]{list_stack_instances_for_provisioned_product} \tab Returns summary information about stack instances that are associated with the specified CFN_STACKSET type provisioned product\cr
\link[paws.management:servicecatalog_list_tag_options]{list_tag_options} \tab Lists the specified TagOptions or all TagOptions\cr
\link[paws.management:servicecatalog_notify_provision_product_engine_workflow_result]{notify_provision_product_engine_workflow_result} \tab Notifies the result of the provisioning engine execution\cr
\link[paws.management:servicecatalog_notify_terminate_provisioned_product_engine_workflow_result]{notify_terminate_provisioned_product_engine_workflow_result} \tab Notifies the result of the terminate engine execution\cr
\link[paws.management:servicecatalog_notify_update_provisioned_product_engine_workflow_result]{notify_update_provisioned_product_engine_workflow_result} \tab Notifies the result of the update engine execution\cr
\link[paws.management:servicecatalog_provision_product]{provision_product} \tab Provisions the specified product\cr
\link[paws.management:servicecatalog_reject_portfolio_share]{reject_portfolio_share} \tab Rejects an offer to share the specified portfolio\cr
\link[paws.management:servicecatalog_scan_provisioned_products]{scan_provisioned_products} \tab Lists the provisioned products that are available (not terminated)\cr
\link[paws.management:servicecatalog_search_products]{search_products} \tab Gets information about the products to which the caller has access\cr
\link[paws.management:servicecatalog_search_products_as_admin]{search_products_as_admin} \tab Gets information about the products for the specified portfolio or all products\cr
\link[paws.management:servicecatalog_search_provisioned_products]{search_provisioned_products} \tab Gets information about the provisioned products that meet the specified criteria\cr
\link[paws.management:servicecatalog_terminate_provisioned_product]{terminate_provisioned_product} \tab Terminates the specified provisioned product\cr
\link[paws.management:servicecatalog_update_constraint]{update_constraint} \tab Updates the specified constraint\cr
\link[paws.management:servicecatalog_update_portfolio]{update_portfolio} \tab Updates the specified portfolio\cr
\link[paws.management:servicecatalog_update_portfolio_share]{update_portfolio_share} \tab Updates the specified portfolio share\cr
\link[paws.management:servicecatalog_update_product]{update_product} \tab Updates the specified product\cr
\link[paws.management:servicecatalog_update_provisioned_product]{update_provisioned_product} \tab Requests updates to the configuration of the specified provisioned product\cr
\link[paws.management:servicecatalog_update_provisioned_product_properties]{update_provisioned_product_properties} \tab Requests updates to the properties of the specified provisioned product\cr
\link[paws.management:servicecatalog_update_provisioning_artifact]{update_provisioning_artifact} \tab Updates the specified provisioning artifact (also known as a version) for the specified product\cr
\link[paws.management:servicecatalog_update_service_action]{update_service_action} \tab Updates a self-service action\cr
\link[paws.management:servicecatalog_update_tag_option]{update_tag_option} \tab Updates the specified TagOption
}
}
\examples{
\dontrun{
svc <- servicecatalog()
svc$accept_portfolio_share(
Foo = 123
)
}
}
|
/cran/paws/man/servicecatalog.Rd
|
permissive
|
paws-r/paws
|
R
| false
| true
| 18,910
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.R
\name{servicecatalog}
\alias{servicecatalog}
\title{AWS Service Catalog}
\usage{
servicecatalog(
config = list(),
credentials = list(),
endpoint = NULL,
region = NULL
)
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.
\itemize{
\item{\strong{credentials}:} {\itemize{
\item{\strong{creds}:} {\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
}}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
\item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
\item{\strong{region}:} {The AWS Region used in instantiating the client.}
}}
\item{\strong{close_connection}:} {Immediately close all HTTP connections.}
\item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
\item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e. \verb{http://s3.amazonaws.com/BUCKET/KEY}.}
\item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}}
}}
\item{credentials}{Optional credentials shorthand for the config parameter
\itemize{
\item{\strong{creds}:} {\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
}}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
}}
\item{endpoint}{Optional shorthand for complete URL to use for the constructed client.}
\item{region}{Optional shorthand for AWS Region used in instantiating the client.}
}
\value{
A client for the service. You can call the service's operations using
syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned
to the client. The available operations are listed in the
Operations section.
}
\description{
Service Catalog
\href{https://aws.amazon.com/servicecatalog/}{Service Catalog} enables
organizations to create and manage catalogs of IT services that are
approved for Amazon Web Services. To get the most out of this
documentation, you should be familiar with the terminology discussed in
\href{https://docs.aws.amazon.com/servicecatalog/latest/adminguide/what-is_concepts.html}{Service Catalog Concepts}.
}
\section{Service syntax}{
\if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- servicecatalog(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string",
close_connection = "logical",
timeout = "numeric",
s3_force_path_style = "logical",
sts_regional_endpoint = "string"
),
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string"
)
}\if{html}{\out{</div>}}
}
\section{Operations}{
\tabular{ll}{
\link[paws.management:servicecatalog_accept_portfolio_share]{accept_portfolio_share} \tab Accepts an offer to share the specified portfolio\cr
\link[paws.management:servicecatalog_associate_budget_with_resource]{associate_budget_with_resource} \tab Associates the specified budget with the specified resource\cr
\link[paws.management:servicecatalog_associate_principal_with_portfolio]{associate_principal_with_portfolio} \tab Associates the specified principal ARN with the specified portfolio\cr
\link[paws.management:servicecatalog_associate_product_with_portfolio]{associate_product_with_portfolio} \tab Associates the specified product with the specified portfolio\cr
\link[paws.management:servicecatalog_associate_service_action_with_provisioning_artifact]{associate_service_action_with_provisioning_artifact} \tab Associates a self-service action with a provisioning artifact\cr
\link[paws.management:servicecatalog_associate_tag_option_with_resource]{associate_tag_option_with_resource} \tab Associate the specified TagOption with the specified portfolio or product\cr
\link[paws.management:servicecatalog_batch_associate_service_action_with_provisioning_artifact]{batch_associate_service_action_with_provisioning_artifact} \tab Associates multiple self-service actions with provisioning artifacts\cr
\link[paws.management:servicecatalog_batch_disassociate_service_action_from_provisioning_artifact]{batch_disassociate_service_action_from_provisioning_artifact} \tab Disassociates a batch of self-service actions from the specified provisioning artifact\cr
\link[paws.management:servicecatalog_copy_product]{copy_product} \tab Copies the specified source product to the specified target product or a new product\cr
\link[paws.management:servicecatalog_create_constraint]{create_constraint} \tab Creates a constraint\cr
\link[paws.management:servicecatalog_create_portfolio]{create_portfolio} \tab Creates a portfolio\cr
\link[paws.management:servicecatalog_create_portfolio_share]{create_portfolio_share} \tab Shares the specified portfolio with the specified account or organization node\cr
\link[paws.management:servicecatalog_create_product]{create_product} \tab Creates a product\cr
\link[paws.management:servicecatalog_create_provisioned_product_plan]{create_provisioned_product_plan} \tab Creates a plan\cr
\link[paws.management:servicecatalog_create_provisioning_artifact]{create_provisioning_artifact} \tab Creates a provisioning artifact (also known as a version) for the specified product\cr
\link[paws.management:servicecatalog_create_service_action]{create_service_action} \tab Creates a self-service action\cr
\link[paws.management:servicecatalog_create_tag_option]{create_tag_option} \tab Creates a TagOption\cr
\link[paws.management:servicecatalog_delete_constraint]{delete_constraint} \tab Deletes the specified constraint\cr
\link[paws.management:servicecatalog_delete_portfolio]{delete_portfolio} \tab Deletes the specified portfolio\cr
\link[paws.management:servicecatalog_delete_portfolio_share]{delete_portfolio_share} \tab Stops sharing the specified portfolio with the specified account or organization node\cr
\link[paws.management:servicecatalog_delete_product]{delete_product} \tab Deletes the specified product\cr
\link[paws.management:servicecatalog_delete_provisioned_product_plan]{delete_provisioned_product_plan} \tab Deletes the specified plan\cr
\link[paws.management:servicecatalog_delete_provisioning_artifact]{delete_provisioning_artifact} \tab Deletes the specified provisioning artifact (also known as a version) for the specified product\cr
\link[paws.management:servicecatalog_delete_service_action]{delete_service_action} \tab Deletes a self-service action\cr
\link[paws.management:servicecatalog_delete_tag_option]{delete_tag_option} \tab Deletes the specified TagOption\cr
\link[paws.management:servicecatalog_describe_constraint]{describe_constraint} \tab Gets information about the specified constraint\cr
\link[paws.management:servicecatalog_describe_copy_product_status]{describe_copy_product_status} \tab Gets the status of the specified copy product operation\cr
\link[paws.management:servicecatalog_describe_portfolio]{describe_portfolio} \tab Gets information about the specified portfolio\cr
\link[paws.management:servicecatalog_describe_portfolio_shares]{describe_portfolio_shares} \tab Returns a summary of each of the portfolio shares that were created for the specified portfolio\cr
\link[paws.management:servicecatalog_describe_portfolio_share_status]{describe_portfolio_share_status} \tab Gets the status of the specified portfolio share operation\cr
\link[paws.management:servicecatalog_describe_product]{describe_product} \tab Gets information about the specified product\cr
\link[paws.management:servicecatalog_describe_product_as_admin]{describe_product_as_admin} \tab Gets information about the specified product\cr
\link[paws.management:servicecatalog_describe_product_view]{describe_product_view} \tab Gets information about the specified product\cr
\link[paws.management:servicecatalog_describe_provisioned_product]{describe_provisioned_product} \tab Gets information about the specified provisioned product\cr
\link[paws.management:servicecatalog_describe_provisioned_product_plan]{describe_provisioned_product_plan} \tab Gets information about the resource changes for the specified plan\cr
\link[paws.management:servicecatalog_describe_provisioning_artifact]{describe_provisioning_artifact} \tab Gets information about the specified provisioning artifact (also known as a version) for the specified product\cr
\link[paws.management:servicecatalog_describe_provisioning_parameters]{describe_provisioning_parameters} \tab Gets information about the configuration required to provision the specified product using the specified provisioning artifact\cr
\link[paws.management:servicecatalog_describe_record]{describe_record} \tab Gets information about the specified request operation\cr
\link[paws.management:servicecatalog_describe_service_action]{describe_service_action} \tab Describes a self-service action\cr
\link[paws.management:servicecatalog_describe_service_action_execution_parameters]{describe_service_action_execution_parameters} \tab Finds the default parameters for a specific self-service action on a specific provisioned product and returns a map of the results to the user\cr
\link[paws.management:servicecatalog_describe_tag_option]{describe_tag_option} \tab Gets information about the specified TagOption\cr
\link[paws.management:servicecatalog_disable_aws_organizations_access]{disable_aws_organizations_access} \tab Disable portfolio sharing through the Organizations service\cr
\link[paws.management:servicecatalog_disassociate_budget_from_resource]{disassociate_budget_from_resource} \tab Disassociates the specified budget from the specified resource\cr
\link[paws.management:servicecatalog_disassociate_principal_from_portfolio]{disassociate_principal_from_portfolio} \tab Disassociates a previously associated principal ARN from a specified portfolio\cr
\link[paws.management:servicecatalog_disassociate_product_from_portfolio]{disassociate_product_from_portfolio} \tab Disassociates the specified product from the specified portfolio\cr
\link[paws.management:servicecatalog_disassociate_service_action_from_provisioning_artifact]{disassociate_service_action_from_provisioning_artifact} \tab Disassociates the specified self-service action association from the specified provisioning artifact\cr
\link[paws.management:servicecatalog_disassociate_tag_option_from_resource]{disassociate_tag_option_from_resource} \tab Disassociates the specified TagOption from the specified resource\cr
\link[paws.management:servicecatalog_enable_aws_organizations_access]{enable_aws_organizations_access} \tab Enable portfolio sharing feature through Organizations\cr
\link[paws.management:servicecatalog_execute_provisioned_product_plan]{execute_provisioned_product_plan} \tab Provisions or modifies a product based on the resource changes for the specified plan\cr
\link[paws.management:servicecatalog_execute_provisioned_product_service_action]{execute_provisioned_product_service_action} \tab Executes a self-service action against a provisioned product\cr
\link[paws.management:servicecatalog_get_aws_organizations_access_status]{get_aws_organizations_access_status} \tab Get the Access Status for Organizations portfolio share feature\cr
\link[paws.management:servicecatalog_get_provisioned_product_outputs]{get_provisioned_product_outputs} \tab This API takes either a ProvisonedProductId or a ProvisionedProductName, along with a list of one or more output keys, and responds with the key/value pairs of those outputs\cr
\link[paws.management:servicecatalog_import_as_provisioned_product]{import_as_provisioned_product} \tab Requests the import of a resource as an Service Catalog provisioned product that is associated to an Service Catalog product and provisioning artifact\cr
\link[paws.management:servicecatalog_list_accepted_portfolio_shares]{list_accepted_portfolio_shares} \tab Lists all imported portfolios for which account-to-account shares were accepted by this account\cr
\link[paws.management:servicecatalog_list_budgets_for_resource]{list_budgets_for_resource} \tab Lists all the budgets associated to the specified resource\cr
\link[paws.management:servicecatalog_list_constraints_for_portfolio]{list_constraints_for_portfolio} \tab Lists the constraints for the specified portfolio and product\cr
\link[paws.management:servicecatalog_list_launch_paths]{list_launch_paths} \tab Lists the paths to the specified product\cr
\link[paws.management:servicecatalog_list_organization_portfolio_access]{list_organization_portfolio_access} \tab Lists the organization nodes that have access to the specified portfolio\cr
\link[paws.management:servicecatalog_list_portfolio_access]{list_portfolio_access} \tab Lists the account IDs that have access to the specified portfolio\cr
\link[paws.management:servicecatalog_list_portfolios]{list_portfolios} \tab Lists all portfolios in the catalog\cr
\link[paws.management:servicecatalog_list_portfolios_for_product]{list_portfolios_for_product} \tab Lists all portfolios that the specified product is associated with\cr
\link[paws.management:servicecatalog_list_principals_for_portfolio]{list_principals_for_portfolio} \tab Lists all PrincipalARNs and corresponding PrincipalTypes associated with the specified portfolio\cr
\link[paws.management:servicecatalog_list_provisioned_product_plans]{list_provisioned_product_plans} \tab Lists the plans for the specified provisioned product or all plans to which the user has access\cr
\link[paws.management:servicecatalog_list_provisioning_artifacts]{list_provisioning_artifacts} \tab Lists all provisioning artifacts (also known as versions) for the specified product\cr
\link[paws.management:servicecatalog_list_provisioning_artifacts_for_service_action]{list_provisioning_artifacts_for_service_action} \tab Lists all provisioning artifacts (also known as versions) for the specified self-service action\cr
\link[paws.management:servicecatalog_list_record_history]{list_record_history} \tab Lists the specified requests or all performed requests\cr
\link[paws.management:servicecatalog_list_resources_for_tag_option]{list_resources_for_tag_option} \tab Lists the resources associated with the specified TagOption\cr
\link[paws.management:servicecatalog_list_service_actions]{list_service_actions} \tab Lists all self-service actions\cr
\link[paws.management:servicecatalog_list_service_actions_for_provisioning_artifact]{list_service_actions_for_provisioning_artifact} \tab Returns a paginated list of self-service actions associated with the specified Product ID and Provisioning Artifact ID\cr
\link[paws.management:servicecatalog_list_stack_instances_for_provisioned_product]{list_stack_instances_for_provisioned_product} \tab Returns summary information about stack instances that are associated with the specified CFN_STACKSET type provisioned product\cr
\link[paws.management:servicecatalog_list_tag_options]{list_tag_options} \tab Lists the specified TagOptions or all TagOptions\cr
\link[paws.management:servicecatalog_notify_provision_product_engine_workflow_result]{notify_provision_product_engine_workflow_result} \tab Notifies the result of the provisioning engine execution\cr
\link[paws.management:servicecatalog_notify_terminate_provisioned_product_engine_workflow_result]{notify_terminate_provisioned_product_engine_workflow_result} \tab Notifies the result of the terminate engine execution\cr
\link[paws.management:servicecatalog_notify_update_provisioned_product_engine_workflow_result]{notify_update_provisioned_product_engine_workflow_result} \tab Notifies the result of the update engine execution\cr
\link[paws.management:servicecatalog_provision_product]{provision_product} \tab Provisions the specified product\cr
\link[paws.management:servicecatalog_reject_portfolio_share]{reject_portfolio_share} \tab Rejects an offer to share the specified portfolio\cr
\link[paws.management:servicecatalog_scan_provisioned_products]{scan_provisioned_products} \tab Lists the provisioned products that are available (not terminated)\cr
\link[paws.management:servicecatalog_search_products]{search_products} \tab Gets information about the products to which the caller has access\cr
\link[paws.management:servicecatalog_search_products_as_admin]{search_products_as_admin} \tab Gets information about the products for the specified portfolio or all products\cr
\link[paws.management:servicecatalog_search_provisioned_products]{search_provisioned_products} \tab Gets information about the provisioned products that meet the specified criteria\cr
\link[paws.management:servicecatalog_terminate_provisioned_product]{terminate_provisioned_product} \tab Terminates the specified provisioned product\cr
\link[paws.management:servicecatalog_update_constraint]{update_constraint} \tab Updates the specified constraint\cr
\link[paws.management:servicecatalog_update_portfolio]{update_portfolio} \tab Updates the specified portfolio\cr
\link[paws.management:servicecatalog_update_portfolio_share]{update_portfolio_share} \tab Updates the specified portfolio share\cr
\link[paws.management:servicecatalog_update_product]{update_product} \tab Updates the specified product\cr
\link[paws.management:servicecatalog_update_provisioned_product]{update_provisioned_product} \tab Requests updates to the configuration of the specified provisioned product\cr
\link[paws.management:servicecatalog_update_provisioned_product_properties]{update_provisioned_product_properties} \tab Requests updates to the properties of the specified provisioned product\cr
\link[paws.management:servicecatalog_update_provisioning_artifact]{update_provisioning_artifact} \tab Updates the specified provisioning artifact (also known as a version) for the specified product\cr
\link[paws.management:servicecatalog_update_service_action]{update_service_action} \tab Updates a self-service action\cr
\link[paws.management:servicecatalog_update_tag_option]{update_tag_option} \tab Updates the specified TagOption
}
}
\examples{
\dontrun{
svc <- servicecatalog()
svc$accept_portfolio_share(
Foo = 123
)
}
}
|
SpaceFilling <-
function(asch){
fun1<-function() {
n<-readline("Number of lines of association schemes array :\n")
l<-readline("Number of columns of association schemes array :\n")
n<-as.integer(n);l<-as.integer(l)
return(c(n,l))}
fun2<-function() {
n<-readline("Number of lines of association schemes array :\n")
l<-readline("Number of columns of association schemes array :\n")
w<-readline("Number of the association scheme arrays :\n")
n<-as.integer(n);l<-as.integer(l);w<-as.integer(w)
return(c(n,l,w))}
# Si Div
if (asch == "Div"){
V<-fun1();n<-V[1];l<-V[2]
s<-n*l;A<-matrix(1:s, ncol = V[2], byrow=TRUE)
SF<-matrix(ncol=s,nrow=s)
for (d in 1:s) {
SF[d,d]<-1
for (dd in 1:s){
D<-which(A==d); d1<-D%%n ; if (d1==0){d1<-n};DD<-which(A==dd); d2<-DD%%n ; if (d2==0){d2<-n}
if (d1==d2) {SF[d,dd]<-1;SF[dd,d]<-1}
else {SF[d,dd]<-2;SF[dd,d]<-2}}}}
##SI Rect
if (asch == "Rect"){
V<-fun1();n<-V[1];l<-V[2];s<-n*l;A<-matrix(1:s, ncol =l, byrow=TRUE)
SF<-matrix(ncol=s,nrow=s)
for (d in 1:s) {
SF[d,d]<-1
for (dd in 1:s){
B<-t(A)
D<-which(A==d); d1<-D%%n ; if (d1==0){d1<-n};DD<-which(A==dd); d2<-DD%%n
if (d2==0){d2<-n}
D1<-which(B==d); d11<-D1%%l ; if (d11==0){d11<-l};DD1<-which(B==dd); d12<-DD1%%l
if (d12==0){d12<-l}
if (d1==d2) {SF[d,dd]<-1;SF[dd,d]<-1}
if (is.na(SF[d,dd])==TRUE){
if (d11==d12) {SF[d,dd]<-2;SF[dd,d]<-2}}}}
for (d in 1:s) {
for (dd in 1:s){
if (is.na(SF[d,dd])==TRUE){
SF[d,dd]<-3;SF[dd,d]<-3}}}}
##si Nestdiv
if (asch == "Nestdiv"){
V<-fun2();n<-V[1];l<-V[2];w<-V[3]
s<-l*n;A<-NULL;S<-l*n*w
SF<-matrix(ncol=S,nrow=S)
for (i in 1:w){
A[[i]]<-matrix(1:s, ncol=l, byrow=TRUE)
z<-(i-1)*s
A[[i]]<-A[[i]]+z};B<-Reduce("rbind",A)
for (i in 1:w) {
a<-A[[i]];mi<-min(a);ma<-max(a)
for (d in mi:ma){
for (dd in mi:ma){
D<-which(a==d); d1<-D%%n ; if (d1==0){d1<-n}
DD<-which(a==dd); d2<-DD%%n ; if (d2==0){d2<-n}
if (d1==d2) {SF[d,dd]<-1;SF[dd,d]<-1}
else {SF[d,dd]<-2;SF[dd,d]<-2}}}}
for (d in 1:S) {
for (dd in 1:S){
if (is.na(SF[d,dd])==TRUE){
SF[d,dd]<-3;SF[dd,d]<-3}}}}
#### SI RightAng
if (asch == "RightAng"){
V<-fun2();n<-V[1];l<-V[2];w<-V[3];s<-l*n;A<-NULL;S<-l*n*w
SF<-matrix(ncol=S,nrow=S)
for (i in 1:w){
A[[i]]<-matrix(1:s, ncol=l, byrow=TRUE)
z<-(i-1)*s
A[[i]]<-A[[i]]+z};B<-Reduce("rbind",A)
for (i in 1:w) {
a<-A[[i]];mi<-min(a);ma<-max(a)
for (d in mi:ma){
for (dd in mi:ma){
D<-which(a==d); d1<-D%%n ; if (d1==0){d1<-n}
DD<-which(a==dd); d2<-DD%%n ; if (d2==0){d2<-n}
if (d1==d2) {SF[d,dd]<-1;SF[dd,d]<-1}
else {SF[d,dd]<-2;SF[dd,d]<-2}}
for (i in 1:w) {
if (i < w){
b<-A[[i+1]];mib<-min(b);mab<-max(b)
for (db in mib:mab){
DB<-which(b==db); db2<-DB%%n ; if (db2==0){db2<-n}
if (d1==db2) {if (is.na(SF[d,db])==TRUE){
SF[d,db]<-3;SF[db,d]<-3}}
else {if (is.na(SF[d,db])==TRUE){
SF[d,db]<-4;SF[db,d]<-4}}}}}}}}
#### SI GrectRightAng4
if (asch == "GrectRightAng4"){
V<-fun2();n<-V[1];l<-V[2];w<-V[3];s<-l*n;A<-NULL;S<-l*n*w;SF<-matrix(ncol=S,nrow=S)
for (i in 1:w){
A[[i]]<-matrix(1:s, ncol=l, byrow=TRUE);z<-(i-1)*s
A[[i]]<-A[[i]]+z};B<-Reduce("rbind",A)
for (i in 1:w) {
a<-A[[i]];mi<-min(a);ma<-max(a)
B<-t(a)
for (d in mi:ma){
for (dd in mi:ma){
D<-which(a==d); d1<-D%%n ; if (d1==0){d1<-n}
DD<-which(a==dd); d2<-DD%%n ; if (d2==0){d2<-n}
if (d1==d2) {SF[d,dd]<-1;SF[dd,d]<-1}
D1<-which(B==d); d11<-D1%%n ; if (d11==0){d11<-n}
DD1<-which(B==dd); d21<-DD1%%n ; if (d21==0){d21<-n}
if (d11==d21)
if (is.na(SF[d,dd])==TRUE){
{SF[d,dd]<-2;SF[dd,d]<-2}}
if (is.na(SF[d,dd])==TRUE){
SF[d,dd]<-3;SF[dd,d]<-3}}}}
for (d in 1:S) {
for (dd in 1:S){
if (is.na(SF[d,dd])==TRUE){
SF[d,dd]<-4;SF[dd,d]<-4}}}}
## SI GrectRightAng5
if (asch == "GrectRightAng5"){
V<-fun2();n<-V[1];l<-V[2];w<-V[3];s<-l*n;A<-NULL;S<-l*n*w;SF<-matrix(ncol=S,nrow=S)
for (i in 1:w){
A[[i]]<-matrix(1:s, ncol=l, byrow=TRUE);z<-(i-1)*s
A[[i]]<-A[[i]]+z};B<-Reduce("rbind",A);SF<-matrix(ncol=S,nrow=S)
for (i in 1:w) {
a<-A[[i]];mi<-min(a);ma<-max(a);B<-t(a)
for (d in mi:ma){
for (dd in mi:ma){
D<-which(a==d); d1<-D%%n ; if (d1==0){d1<-n}
DD<-which(a==dd); d2<-DD%%n ; if (d2==0){d2<-n}
if (d1==d2) {SF[d,dd]<-1;SF[dd,d]<-1}
D1<-which(B==d); d11<-D1%%n ; if (d11==0){d11<-n}
DD1<-which(B==dd); d21<-DD1%%n ; if (d21==0){d21<-n}
if (d11==d21)
if (is.na(SF[d,dd])==TRUE){
{SF[d,dd]<-2;SF[dd,d]<-2}}
if (is.na(SF[d,dd])==TRUE){
SF[d,dd]<-3;SF[dd,d]<-3}}
for (i in 1:w) {
if (i < w){
bb<-A[[i+1]];mib<-min(bb);mab<-max(bb)
for (db in mib:mab){
DB<-which(bb==db); db2<-DB%%n ; if (db2==0){db2<-n}
if (d1==db2) {if (is.na(SF[d,db])==TRUE){SF[d,db]<-4;SF[db,d]<-4}}
else {if (is.na(SF[d,db])==TRUE){SF[d,db]<-5;SF[db,d]<-5}}}}}}}}
## SI GrectRightAng7
if (asch == "GrectRightAng7"){
V<-fun2();n<-V[1];l<-V[2];w<-V[3];s<-l*n;S<-l*n*w;A<-NULL
for (i in 1:w){
A[[i]]<-matrix(1:s, ncol=l, byrow=TRUE);z<-(i-1)*s
A[[i]]<-A[[i]]+z};B<-Reduce("rbind",A);SF<-matrix(ncol=S,nrow=S)
for (i in 1:w) {
a<-A[[i]];mi<-min(a);ma<-max(a);B<-t(a)
for (d in mi:ma){
for (dd in mi:ma){
D<-which(a==d); d1<-D%%n ; if (d1==0){d1<-n};DD<-which(a==dd); d2<-DD%%n
if (d2==0){d2<-n}
if (d1==d2) {SF[d,dd]<-1;SF[dd,d]<-1}
D1<-which(B==d); d11<-D1%%n ; if (d11==0){d11<-n};DD1<-which(B==dd);d21<-DD1%%n
if (d21==0){d21<-n}
if (d11==d21)
if (is.na(SF[d,dd])==TRUE){
{SF[d,dd]<-2;SF[dd,d]<-2}}
if (is.na(SF[d,dd])==TRUE){
SF[d,dd]<-3;SF[dd,d]<-3}}
for (i in 1:w) {
if (i < w){
bb<-A[[i+1]];mib<-min(bb);mab<-max(bb)
for (db in mib:mab){
B2<-t(bb);DB<-which(bb==db); db2<-DB%%n ; if (db2==0){db2<-n}
n1<-which(B2==db); n21<-n1%%n ; if (n21==0){n21<-n}
if (D==DB) {if (is.na(SF[d,db])==TRUE){SF[d,db]<-4;SF[db,d]<-4}}
if (d11==db2) {if (is.na(SF[d,db])==TRUE){SF[d,db]<-5;SF[db,d]<-5}}
if (d1==n21)
if (is.na(SF[d,db])==TRUE){
{SF[d,db]<-6;SF[db,d]<-6}}}}}}}
for (d in 1:S) {
for (dd in 1:S){
if (is.na(SF[d,dd])==TRUE){
SF[d,dd]<-7;SF[dd,d]<-7}}}}
NN<-max(SF)
RR<-dim(SF)[1]
return(list(SFDesign=SF,Runs=RR,Factors=RR,Levels=NN))}
|
/RPPairwiseDesign/R/SpaceFilling.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 5,999
|
r
|
SpaceFilling <-
function(asch){
fun1<-function() {
n<-readline("Number of lines of association schemes array :\n")
l<-readline("Number of columns of association schemes array :\n")
n<-as.integer(n);l<-as.integer(l)
return(c(n,l))}
fun2<-function() {
n<-readline("Number of lines of association schemes array :\n")
l<-readline("Number of columns of association schemes array :\n")
w<-readline("Number of the association scheme arrays :\n")
n<-as.integer(n);l<-as.integer(l);w<-as.integer(w)
return(c(n,l,w))}
# Si Div
if (asch == "Div"){
V<-fun1();n<-V[1];l<-V[2]
s<-n*l;A<-matrix(1:s, ncol = V[2], byrow=TRUE)
SF<-matrix(ncol=s,nrow=s)
for (d in 1:s) {
SF[d,d]<-1
for (dd in 1:s){
D<-which(A==d); d1<-D%%n ; if (d1==0){d1<-n};DD<-which(A==dd); d2<-DD%%n ; if (d2==0){d2<-n}
if (d1==d2) {SF[d,dd]<-1;SF[dd,d]<-1}
else {SF[d,dd]<-2;SF[dd,d]<-2}}}}
##SI Rect
if (asch == "Rect"){
V<-fun1();n<-V[1];l<-V[2];s<-n*l;A<-matrix(1:s, ncol =l, byrow=TRUE)
SF<-matrix(ncol=s,nrow=s)
for (d in 1:s) {
SF[d,d]<-1
for (dd in 1:s){
B<-t(A)
D<-which(A==d); d1<-D%%n ; if (d1==0){d1<-n};DD<-which(A==dd); d2<-DD%%n
if (d2==0){d2<-n}
D1<-which(B==d); d11<-D1%%l ; if (d11==0){d11<-l};DD1<-which(B==dd); d12<-DD1%%l
if (d12==0){d12<-l}
if (d1==d2) {SF[d,dd]<-1;SF[dd,d]<-1}
if (is.na(SF[d,dd])==TRUE){
if (d11==d12) {SF[d,dd]<-2;SF[dd,d]<-2}}}}
for (d in 1:s) {
for (dd in 1:s){
if (is.na(SF[d,dd])==TRUE){
SF[d,dd]<-3;SF[dd,d]<-3}}}}
##si Nestdiv
if (asch == "Nestdiv"){
V<-fun2();n<-V[1];l<-V[2];w<-V[3]
s<-l*n;A<-NULL;S<-l*n*w
SF<-matrix(ncol=S,nrow=S)
for (i in 1:w){
A[[i]]<-matrix(1:s, ncol=l, byrow=TRUE)
z<-(i-1)*s
A[[i]]<-A[[i]]+z};B<-Reduce("rbind",A)
for (i in 1:w) {
a<-A[[i]];mi<-min(a);ma<-max(a)
for (d in mi:ma){
for (dd in mi:ma){
D<-which(a==d); d1<-D%%n ; if (d1==0){d1<-n}
DD<-which(a==dd); d2<-DD%%n ; if (d2==0){d2<-n}
if (d1==d2) {SF[d,dd]<-1;SF[dd,d]<-1}
else {SF[d,dd]<-2;SF[dd,d]<-2}}}}
for (d in 1:S) {
for (dd in 1:S){
if (is.na(SF[d,dd])==TRUE){
SF[d,dd]<-3;SF[dd,d]<-3}}}}
#### SI RightAng
if (asch == "RightAng"){
V<-fun2();n<-V[1];l<-V[2];w<-V[3];s<-l*n;A<-NULL;S<-l*n*w
SF<-matrix(ncol=S,nrow=S)
for (i in 1:w){
A[[i]]<-matrix(1:s, ncol=l, byrow=TRUE)
z<-(i-1)*s
A[[i]]<-A[[i]]+z};B<-Reduce("rbind",A)
for (i in 1:w) {
a<-A[[i]];mi<-min(a);ma<-max(a)
for (d in mi:ma){
for (dd in mi:ma){
D<-which(a==d); d1<-D%%n ; if (d1==0){d1<-n}
DD<-which(a==dd); d2<-DD%%n ; if (d2==0){d2<-n}
if (d1==d2) {SF[d,dd]<-1;SF[dd,d]<-1}
else {SF[d,dd]<-2;SF[dd,d]<-2}}
for (i in 1:w) {
if (i < w){
b<-A[[i+1]];mib<-min(b);mab<-max(b)
for (db in mib:mab){
DB<-which(b==db); db2<-DB%%n ; if (db2==0){db2<-n}
if (d1==db2) {if (is.na(SF[d,db])==TRUE){
SF[d,db]<-3;SF[db,d]<-3}}
else {if (is.na(SF[d,db])==TRUE){
SF[d,db]<-4;SF[db,d]<-4}}}}}}}}
#### SI GrectRightAng4
if (asch == "GrectRightAng4"){
V<-fun2();n<-V[1];l<-V[2];w<-V[3];s<-l*n;A<-NULL;S<-l*n*w;SF<-matrix(ncol=S,nrow=S)
for (i in 1:w){
A[[i]]<-matrix(1:s, ncol=l, byrow=TRUE);z<-(i-1)*s
A[[i]]<-A[[i]]+z};B<-Reduce("rbind",A)
for (i in 1:w) {
a<-A[[i]];mi<-min(a);ma<-max(a)
B<-t(a)
for (d in mi:ma){
for (dd in mi:ma){
D<-which(a==d); d1<-D%%n ; if (d1==0){d1<-n}
DD<-which(a==dd); d2<-DD%%n ; if (d2==0){d2<-n}
if (d1==d2) {SF[d,dd]<-1;SF[dd,d]<-1}
D1<-which(B==d); d11<-D1%%n ; if (d11==0){d11<-n}
DD1<-which(B==dd); d21<-DD1%%n ; if (d21==0){d21<-n}
if (d11==d21)
if (is.na(SF[d,dd])==TRUE){
{SF[d,dd]<-2;SF[dd,d]<-2}}
if (is.na(SF[d,dd])==TRUE){
SF[d,dd]<-3;SF[dd,d]<-3}}}}
for (d in 1:S) {
for (dd in 1:S){
if (is.na(SF[d,dd])==TRUE){
SF[d,dd]<-4;SF[dd,d]<-4}}}}
## SI GrectRightAng5
if (asch == "GrectRightAng5"){
V<-fun2();n<-V[1];l<-V[2];w<-V[3];s<-l*n;A<-NULL;S<-l*n*w;SF<-matrix(ncol=S,nrow=S)
for (i in 1:w){
A[[i]]<-matrix(1:s, ncol=l, byrow=TRUE);z<-(i-1)*s
A[[i]]<-A[[i]]+z};B<-Reduce("rbind",A);SF<-matrix(ncol=S,nrow=S)
for (i in 1:w) {
a<-A[[i]];mi<-min(a);ma<-max(a);B<-t(a)
for (d in mi:ma){
for (dd in mi:ma){
D<-which(a==d); d1<-D%%n ; if (d1==0){d1<-n}
DD<-which(a==dd); d2<-DD%%n ; if (d2==0){d2<-n}
if (d1==d2) {SF[d,dd]<-1;SF[dd,d]<-1}
D1<-which(B==d); d11<-D1%%n ; if (d11==0){d11<-n}
DD1<-which(B==dd); d21<-DD1%%n ; if (d21==0){d21<-n}
if (d11==d21)
if (is.na(SF[d,dd])==TRUE){
{SF[d,dd]<-2;SF[dd,d]<-2}}
if (is.na(SF[d,dd])==TRUE){
SF[d,dd]<-3;SF[dd,d]<-3}}
for (i in 1:w) {
if (i < w){
bb<-A[[i+1]];mib<-min(bb);mab<-max(bb)
for (db in mib:mab){
DB<-which(bb==db); db2<-DB%%n ; if (db2==0){db2<-n}
if (d1==db2) {if (is.na(SF[d,db])==TRUE){SF[d,db]<-4;SF[db,d]<-4}}
else {if (is.na(SF[d,db])==TRUE){SF[d,db]<-5;SF[db,d]<-5}}}}}}}}
## SI GrectRightAng7
if (asch == "GrectRightAng7"){
V<-fun2();n<-V[1];l<-V[2];w<-V[3];s<-l*n;S<-l*n*w;A<-NULL
for (i in 1:w){
A[[i]]<-matrix(1:s, ncol=l, byrow=TRUE);z<-(i-1)*s
A[[i]]<-A[[i]]+z};B<-Reduce("rbind",A);SF<-matrix(ncol=S,nrow=S)
for (i in 1:w) {
a<-A[[i]];mi<-min(a);ma<-max(a);B<-t(a)
for (d in mi:ma){
for (dd in mi:ma){
D<-which(a==d); d1<-D%%n ; if (d1==0){d1<-n};DD<-which(a==dd); d2<-DD%%n
if (d2==0){d2<-n}
if (d1==d2) {SF[d,dd]<-1;SF[dd,d]<-1}
D1<-which(B==d); d11<-D1%%n ; if (d11==0){d11<-n};DD1<-which(B==dd);d21<-DD1%%n
if (d21==0){d21<-n}
if (d11==d21)
if (is.na(SF[d,dd])==TRUE){
{SF[d,dd]<-2;SF[dd,d]<-2}}
if (is.na(SF[d,dd])==TRUE){
SF[d,dd]<-3;SF[dd,d]<-3}}
for (i in 1:w) {
if (i < w){
bb<-A[[i+1]];mib<-min(bb);mab<-max(bb)
for (db in mib:mab){
B2<-t(bb);DB<-which(bb==db); db2<-DB%%n ; if (db2==0){db2<-n}
n1<-which(B2==db); n21<-n1%%n ; if (n21==0){n21<-n}
if (D==DB) {if (is.na(SF[d,db])==TRUE){SF[d,db]<-4;SF[db,d]<-4}}
if (d11==db2) {if (is.na(SF[d,db])==TRUE){SF[d,db]<-5;SF[db,d]<-5}}
if (d1==n21)
if (is.na(SF[d,db])==TRUE){
{SF[d,db]<-6;SF[db,d]<-6}}}}}}}
for (d in 1:S) {
for (dd in 1:S){
if (is.na(SF[d,dd])==TRUE){
SF[d,dd]<-7;SF[dd,d]<-7}}}}
NN<-max(SF)
RR<-dim(SF)[1]
return(list(SFDesign=SF,Runs=RR,Factors=RR,Levels=NN))}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/discretize_exprs_supervised.R
\name{discretize_exprs_supervised}
\alias{discretize_exprs_supervised}
\title{supervised_disc_df}
\usage{
discretize_exprs_supervised(expression_table, target, parallel = FALSE)
}
\arguments{
\item{expression_table}{A previously normalized expression table}
\item{target}{A series of labels matching each of the values in the gene vector
(genes in rows, cells/samples in columns)}
\item{parallel}{Set calculations in parallel. May be worth it if the number of rows and columns is really large. Do watchout for memory overload.}
}
\value{
A data frame with the discretized features in the same order as previously
}
\description{
Uses several discretizations and selects the one that is best for a given variable (gene)
in comparison to a target class by equivocation
}
\examples{
data(scDengue)
exprs <- as.data.frame(SummarizedExperiment::assay(scDengue, 'logcounts'))
exprs <- exprs [1:200, 1:120]
infection <- SummarizedExperiment::colData(scDengue)
target <- infection$infection
discrete_expression <- as.data.frame(discretize_exprs_supervised(exprs,target))
fcbf(discrete_expression,target, thresh = 0.05, verbose = TRUE)
}
|
/man/discretize_exprs_supervised.Rd
|
no_license
|
AndiPauli/FCBF
|
R
| false
| true
| 1,239
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/discretize_exprs_supervised.R
\name{discretize_exprs_supervised}
\alias{discretize_exprs_supervised}
\title{supervised_disc_df}
\usage{
discretize_exprs_supervised(expression_table, target, parallel = FALSE)
}
\arguments{
\item{expression_table}{A previously normalized expression table}
\item{target}{A series of labels matching each of the values in the gene vector
(genes in rows, cells/samples in columns)}
\item{parallel}{Set calculations in parallel. May be worth it if the number of rows and columns is really large. Do watchout for memory overload.}
}
\value{
A data frame with the discretized features in the same order as previously
}
\description{
Uses several discretizations and selects the one that is best for a given variable (gene)
in comparison to a target class by equivocation
}
\examples{
data(scDengue)
exprs <- as.data.frame(SummarizedExperiment::assay(scDengue, 'logcounts'))
exprs <- exprs [1:200, 1:120]
infection <- SummarizedExperiment::colData(scDengue)
target <- infection$infection
discrete_expression <- as.data.frame(discretize_exprs_supervised(exprs,target))
fcbf(discrete_expression,target, thresh = 0.05, verbose = TRUE)
}
|
# Create S4 method 'recruit'
#B.PP.GenBms.FoodUpdate.01<-function(
if (!isGeneric("foodUpdate"))
setGeneric("foodUpdate", function(element, universe) standardGeneric("foodUpdate"))
setMethod("foodUpdate", signature(element="PPgenBMS", universe="Universe"),
function(
element,
universe # access to universe if needed
)
{
# Function: B.PP.GenBms.FoodUpdate.01
# Version 0.01
# Description: Update food - Primary Production general Biomass variable as food for secondary producers
# Primary attributes: update food in PPgenBMS based on an environment variable
# Return data: adds food to state
#-------------------------------------------------------------------------------
# dependent elements
# Environment variable with a vector of environments for each polygon
# Data References
# Elmnt$State$PolygonEnv = vector of environments for each polygon
#-------------------------------------------------------------------------------
# data set (dSet) requirements
# dSet could be used to transform environment variable
# at present no transformation is undertaken
#-------------------------------------------------------------------------------
# Get a handle on some necessary universe and element state data
action <- getRTState(universe, "currentAction")
# ActionMat row
# Col 1 = module
# Col 2 = element
# Col 3 = period
# Col 4 = reference day in year
# Col 5 = action reference number in period (NA if no actions)
# Col 6 = number for "before =1", "during = 2", "after = 3" (NA if no actions)
periodInfo <- getRTState(universe, "currentPeriodInfo")
# periodInfo # information about the active period for use in subroutines
# Number = eTSD
# Day = PropYear[eTSD,1]
# KnifeEdge = if(PropYear[eTSD,2]==0) FALSE else TRUE
# YearPropn = PropYear[eTSD,3]
# PeriodStart = PreviousDay/365 # proportion of year passed since 0 Jan
# # to beginning of time period
# PeriodEnd = PreviousDay/365+PropYear[eTSD,3]
elemState <- getState(element)
elemTimesteps <- getTimestep(element)
EnvElementIndx <- elemTimesteps[[action[3]]]$actions[[action[5]]]$relatedIndexes
EnvNames <- elemTimesteps[[action[3]]]$actions[[action[5]]]$relatedElements
dSet <- elemTimesteps[[action[3]]]$actions[[action[5]]]$dset
if (is.null(universe$modules[[EnvElementIndx[1]]][[EnvElementIndx[2]]])) {
epocErrorMessage(element, "Missing element '", EnvNames[2], "' from ", EnvNames[1],
" module as required by '", getSignature(element, "Name.short"), "' relationship.", halt=TRUE)
}
# read state of environment for polygons
E <- getState(universe$modules[[EnvElementIndx[1]]][[EnvElementIndx[2]]], "PolygonEnv")
Food<-E
elemState$Abundance$mass <- elemState$Abundance$mass+Food
# Update state for the element of universe
setState(element, value=elemState)
}
)
|
/2 Biota/1 Primary production/Food/B.PP.GenBms.FoodUpdate.01.R
|
no_license
|
AndrewJConstable/EPOCuniverse
|
R
| false
| false
| 3,067
|
r
|
# Create S4 method 'recruit'
#B.PP.GenBms.FoodUpdate.01<-function(
if (!isGeneric("foodUpdate"))
setGeneric("foodUpdate", function(element, universe) standardGeneric("foodUpdate"))
setMethod("foodUpdate", signature(element="PPgenBMS", universe="Universe"),
function(
element,
universe # access to universe if needed
)
{
# Function: B.PP.GenBms.FoodUpdate.01
# Version 0.01
# Description: Update food - Primary Production general Biomass variable as food for secondary producers
# Primary attributes: update food in PPgenBMS based on an environment variable
# Return data: adds food to state
#-------------------------------------------------------------------------------
# dependent elements
# Environment variable with a vector of environments for each polygon
# Data References
# Elmnt$State$PolygonEnv = vector of environments for each polygon
#-------------------------------------------------------------------------------
# data set (dSet) requirements
# dSet could be used to transform environment variable
# at present no transformation is undertaken
#-------------------------------------------------------------------------------
# Get a handle on some necessary universe and element state data
action <- getRTState(universe, "currentAction")
# ActionMat row
# Col 1 = module
# Col 2 = element
# Col 3 = period
# Col 4 = reference day in year
# Col 5 = action reference number in period (NA if no actions)
# Col 6 = number for "before =1", "during = 2", "after = 3" (NA if no actions)
periodInfo <- getRTState(universe, "currentPeriodInfo")
# periodInfo # information about the active period for use in subroutines
# Number = eTSD
# Day = PropYear[eTSD,1]
# KnifeEdge = if(PropYear[eTSD,2]==0) FALSE else TRUE
# YearPropn = PropYear[eTSD,3]
# PeriodStart = PreviousDay/365 # proportion of year passed since 0 Jan
# # to beginning of time period
# PeriodEnd = PreviousDay/365+PropYear[eTSD,3]
elemState <- getState(element)
elemTimesteps <- getTimestep(element)
EnvElementIndx <- elemTimesteps[[action[3]]]$actions[[action[5]]]$relatedIndexes
EnvNames <- elemTimesteps[[action[3]]]$actions[[action[5]]]$relatedElements
dSet <- elemTimesteps[[action[3]]]$actions[[action[5]]]$dset
if (is.null(universe$modules[[EnvElementIndx[1]]][[EnvElementIndx[2]]])) {
epocErrorMessage(element, "Missing element '", EnvNames[2], "' from ", EnvNames[1],
" module as required by '", getSignature(element, "Name.short"), "' relationship.", halt=TRUE)
}
# read state of environment for polygons
E <- getState(universe$modules[[EnvElementIndx[1]]][[EnvElementIndx[2]]], "PolygonEnv")
Food<-E
elemState$Abundance$mass <- elemState$Abundance$mass+Food
# Update state for the element of universe
setState(element, value=elemState)
}
)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/dimple.R
\name{renderdimple}
\alias{renderdimple}
\title{Widget render function for use in Shiny}
\usage{
renderdimple(expr, env = parent.frame(), quoted = FALSE)
}
\description{
Widget render function for use in Shiny
}
|
/man/renderdimple.Rd
|
no_license
|
levmorgan/dimple
|
R
| false
| false
| 308
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/dimple.R
\name{renderdimple}
\alias{renderdimple}
\title{Widget render function for use in Shiny}
\usage{
renderdimple(expr, env = parent.frame(), quoted = FALSE)
}
\description{
Widget render function for use in Shiny
}
|
run_analysis <- function () {
#Read training data
trainingset <- read.table("UCI HAR Dataset/train/X_train.txt")
traininglabels <- read.table("UCI HAR Dataset/train/y_train.txt")
trainingsubject <- read.table("UCI HAR Dataset/train/subject_train.txt")
#Read test data
testset <- read.table("UCI HAR Dataset/test/X_test.txt")
testlabels <- read.table("UCI HAR Dataset/test/y_test.txt")
testsubject <- read.table("UCI HAR Dataset/test/subject_test.txt")
#Read features and labels
features <- read.table("UCI HAR Dataset/features.txt")
activitylabels <- read.table("UCI HAR Dataset/activity_labels.txt")
#merge training and test data
mergedset <- rbind(trainingset, testset)
#merge training and test labels
mergedlabels <- rbind(traininglabels, testlabels)
#merges subjects
mergedsubject <- rbind(trainingsubject, testsubject)
#add varialbe names to columns for the features
colnames(mergedset) <- features[,2]
#Add labels for Activity and Subject columns
colnames(mergedlabels) <- "Activity"
colnames(mergedsubject) <- "Subject"
#merge into one set of data
allmerged <- cbind(mergedset,mergedlabels,mergedsubject)
#Get rid of duplicates by recreating the data frame with unique index
allmerged <- allmerged[,]
#Add descriptive activity names instead of numeric values
allmerged$Activity <- factor(allmerged$Activity, levels = c(1:6), labels = (activitylabels$V2))
#Group by activities and subjects and run mean function only on columns that have mean or std in the variable names
allmerged <-allmerged %>%
group_by(Activity, Subject) %>%
summarise_each (funs(mean), contains("mean()"), contains("std()"))
#Save the finished data set
write.table(allmerged, "tidy.txt", row.names = FALSE)
#head(allmerged)
}
|
/run_analysis.R
|
no_license
|
mateomtb/getting-and-cleaning-data
|
R
| false
| false
| 1,815
|
r
|
run_analysis <- function () {
#Read training data
trainingset <- read.table("UCI HAR Dataset/train/X_train.txt")
traininglabels <- read.table("UCI HAR Dataset/train/y_train.txt")
trainingsubject <- read.table("UCI HAR Dataset/train/subject_train.txt")
#Read test data
testset <- read.table("UCI HAR Dataset/test/X_test.txt")
testlabels <- read.table("UCI HAR Dataset/test/y_test.txt")
testsubject <- read.table("UCI HAR Dataset/test/subject_test.txt")
#Read features and labels
features <- read.table("UCI HAR Dataset/features.txt")
activitylabels <- read.table("UCI HAR Dataset/activity_labels.txt")
#merge training and test data
mergedset <- rbind(trainingset, testset)
#merge training and test labels
mergedlabels <- rbind(traininglabels, testlabels)
#merges subjects
mergedsubject <- rbind(trainingsubject, testsubject)
#add varialbe names to columns for the features
colnames(mergedset) <- features[,2]
#Add labels for Activity and Subject columns
colnames(mergedlabels) <- "Activity"
colnames(mergedsubject) <- "Subject"
#merge into one set of data
allmerged <- cbind(mergedset,mergedlabels,mergedsubject)
#Get rid of duplicates by recreating the data frame with unique index
allmerged <- allmerged[,]
#Add descriptive activity names instead of numeric values
allmerged$Activity <- factor(allmerged$Activity, levels = c(1:6), labels = (activitylabels$V2))
#Group by activities and subjects and run mean function only on columns that have mean or std in the variable names
allmerged <-allmerged %>%
group_by(Activity, Subject) %>%
summarise_each (funs(mean), contains("mean()"), contains("std()"))
#Save the finished data set
write.table(allmerged, "tidy.txt", row.names = FALSE)
#head(allmerged)
}
|
#counting number of observations by trait.
#Using MCMCglmm to fit PGLS models.
#zero model selection performed, just straight up sending model structures based on apriori hypotheses.
rm(list=ls())
source('paths.r')
source('functions/pgls_glmm_no_selection.r')
source('functions/tic_toc.r')
library(data.table)
library(phytools)
library(caper)
#set output path.----
output.path <- trait_N_table.path
#load data.----
d <- readRDS(inter_specific_analysis_data.path)
phy <- read.tree(phylogeny_raw.path) #'colin_2018-12--2.tre'
d$biome_trop <- ifelse(d$biome3 == 'b_tropical',1, 0)
d$biome_bore <- ifelse(d$biome3 == 'c_boreal',1, 0)
#Some data prep.----
phy$tip.label <- paste0(toupper(substr(phy$tip.label, 1, 1)), substr(phy$tip.label, 2, nchar(phy$tip.label)))
phy$tip.label <- gsub('_',' ',phy$tip.label)
phy$node.label <- NULL
d <- d[d$Species %in% phy$tip.label,]
d$MYCO_ASSO <- droplevels(d$MYCO_ASSO)
#specify traits, count observations.----
traits <- c('Ngreen','Nsenes','Nroots','Pgreen','Psenes','Proots','log.LL','root_lifespan')
preds <- c('tpl.Species','MYCO_ASSO','nfix','pgf','mat.c','map.c','biome_bore','biome_trop')
sum <- list()
spp <- list()
for(i in 1:length(traits)){
dat <- d[,colnames(d) %in% c(traits[i],preds)]
dat <- dat[complete.cases(dat),]
#we did not analyze tropical or boreal root lifespan observations.
if(traits[i] == 'root_lifespan'){
dat <- dat[dat$biome_bore == 0,]
dat <- dat[dat$biome_trop == 0,]
}
N <- nrow(dat)
AM <- nrow(dat[dat$MYCO_ASSO == 'AM',])
EM <- nrow(dat[dat$MYCO_ASSO == 'ECM',])
nfix <- nrow(dat[dat$nfix == 1,])
angio <- nrow(dat[dat$pgf == 'angio',])
gymno <- nrow(dat[dat$pgf == 'gymno',])
bore <- nrow(dat[dat$biome_bore == 1,])
trop <- nrow(dat[dat$biome_trop == 1,])
temp <- N - (bore + trop)
return <- c(N,AM,EM,nfix,angio,gymno,bore,temp,trop)
sum[[i]] <- return
spp[[i]] <- dat$tpl.Species
}
sum <- do.call(rbind, sum)
spp <- unlist(spp)
traits <- c('green foliar N','senescent foliar N','root N',
'green foliar P','senescent foliar P','root P',
'leaf lifespan','root lifespan')
sum <- data.frame(cbind(traits, sum))
colnames(sum) <- c('Trait','N','AM','EM','N-fixer','angiosperm','gymnosperm','boreal','temperate','tropical')
for(i in 2:ncol(sum)){
sum[,i] <- as.numeric(as.character(sum[,i]))
}
#save output as .csv----
write.csv(sum, output.path)
#How many total unique species?
n.spp <- length(unique(spp))
#how many trait observations?
#myc type, nfix, angio/gymno for every species, as well as the traits.
dat <- d[,c('Ngreen','Nsenes','Nroots','Pgreen','Psenes','Proots','log.LL','root_lifespan')]
n.trait <- length(unique(spp)) * 3 + sum(!is.na(dat))
cat('You observed',n.trait,'traits across',n.spp,'unique species.\n')
|
/figure_scripts/trait_N_table.r
|
no_license
|
colinaverill/myco_trait_analysis
|
R
| false
| false
| 2,775
|
r
|
#counting number of observations by trait.
#Using MCMCglmm to fit PGLS models.
#zero model selection performed, just straight up sending model structures based on apriori hypotheses.
rm(list=ls())
source('paths.r')
source('functions/pgls_glmm_no_selection.r')
source('functions/tic_toc.r')
library(data.table)
library(phytools)
library(caper)
#set output path.----
output.path <- trait_N_table.path
#load data.----
d <- readRDS(inter_specific_analysis_data.path)
phy <- read.tree(phylogeny_raw.path) #'colin_2018-12--2.tre'
d$biome_trop <- ifelse(d$biome3 == 'b_tropical',1, 0)
d$biome_bore <- ifelse(d$biome3 == 'c_boreal',1, 0)
#Some data prep.----
phy$tip.label <- paste0(toupper(substr(phy$tip.label, 1, 1)), substr(phy$tip.label, 2, nchar(phy$tip.label)))
phy$tip.label <- gsub('_',' ',phy$tip.label)
phy$node.label <- NULL
d <- d[d$Species %in% phy$tip.label,]
d$MYCO_ASSO <- droplevels(d$MYCO_ASSO)
#specify traits, count observations.----
traits <- c('Ngreen','Nsenes','Nroots','Pgreen','Psenes','Proots','log.LL','root_lifespan')
preds <- c('tpl.Species','MYCO_ASSO','nfix','pgf','mat.c','map.c','biome_bore','biome_trop')
sum <- list()
spp <- list()
for(i in 1:length(traits)){
dat <- d[,colnames(d) %in% c(traits[i],preds)]
dat <- dat[complete.cases(dat),]
#we did not analyze tropical or boreal root lifespan observations.
if(traits[i] == 'root_lifespan'){
dat <- dat[dat$biome_bore == 0,]
dat <- dat[dat$biome_trop == 0,]
}
N <- nrow(dat)
AM <- nrow(dat[dat$MYCO_ASSO == 'AM',])
EM <- nrow(dat[dat$MYCO_ASSO == 'ECM',])
nfix <- nrow(dat[dat$nfix == 1,])
angio <- nrow(dat[dat$pgf == 'angio',])
gymno <- nrow(dat[dat$pgf == 'gymno',])
bore <- nrow(dat[dat$biome_bore == 1,])
trop <- nrow(dat[dat$biome_trop == 1,])
temp <- N - (bore + trop)
return <- c(N,AM,EM,nfix,angio,gymno,bore,temp,trop)
sum[[i]] <- return
spp[[i]] <- dat$tpl.Species
}
sum <- do.call(rbind, sum)
spp <- unlist(spp)
traits <- c('green foliar N','senescent foliar N','root N',
'green foliar P','senescent foliar P','root P',
'leaf lifespan','root lifespan')
sum <- data.frame(cbind(traits, sum))
colnames(sum) <- c('Trait','N','AM','EM','N-fixer','angiosperm','gymnosperm','boreal','temperate','tropical')
for(i in 2:ncol(sum)){
sum[,i] <- as.numeric(as.character(sum[,i]))
}
#save output as .csv----
write.csv(sum, output.path)
#How many total unique species?
n.spp <- length(unique(spp))
#how many trait observations?
#myc type, nfix, angio/gymno for every species, as well as the traits.
dat <- d[,c('Ngreen','Nsenes','Nroots','Pgreen','Psenes','Proots','log.LL','root_lifespan')]
n.trait <- length(unique(spp)) * 3 + sum(!is.na(dat))
cat('You observed',n.trait,'traits across',n.spp,'unique species.\n')
|
.libPaths(new="~/R")
rm(list=ls())
setwd("~/hdi_path")
require(LOCOpath)
# source("~/hdi_path/bin/pathwise_power.R")
#######################################################################################################################################
# Set simulation parameters (to be done with command-line arguments)
# Execute this from within the directory containing this R script:
############################################################################
options(echo=TRUE)
args <- commandArgs(trailingOnly = TRUE)
print(args)
# args <- c("1000","2","1","20","3","5","3",".92",".96",".95",".98","1")
n <- as.numeric(args[1])
p <- as.numeric(args[2])
iter <- as.numeric(args[3]) # goes from 1 to 12
B <- as.numeric(args[4])
# path.method <- args[6]
# beta.init <- args[7]
save.name <- args[5]
beta_i = as.numeric(args[6])
####
###################################################
###################################################
###################################################
if (beta_i ==0){
rho = 0
}else if (beta_i ==1){
rho = 0.5
}else if (beta_i == 2){
rho = 0.9
}else if (beta_i == 3){
rho = 'equl'
}else if (beta_i == 4){
rho = 'weak_equl'
}else{
stop("wrong rho")
}
results = Path.Resample.Power(n = n, p = p, beta=c(0,rep(1,2),rep(0,9)), rho=rho, iter = iter, B = B, setting = 'dep', which.covariate = 1, betaNull = 0, multiTest = FALSE, parallel = TRUE, norm = 'L2.squared', path.method ='lars', beta.init = 'adaptive')
print(results)
f1 = paste0("~/hdi_simu/results/",save.name,"_", 'L2', "_", rho ,".RData")
print(f1)
save(results,file = f1)
results = Path.Resample.Power(n = n, p = p, beta=c(0,rep(1,2),rep(0,9)), rho=rho, iter = iter, B = B, setting = 'dep', which.covariate = 1, betaNull = 0, multiTest = FALSE, parallel = TRUE, norm = 'L1', path.method ='lars', beta.init = 'adaptive')
print(results)
f1 = paste0("~/hdi_simu/results/",save.name,"_", 'L1',"_", rho, ".RData")
print(f1)
save(results,file = f1)
results = Path.Resample.Power(n = n, p = p, beta=c(0,rep(1,2),rep(0,9)), rho=rho, iter = iter, B = B, setting = 'dep', which.covariate = 1, betaNull = 0, multiTest = FALSE, parallel = TRUE, norm = 'L_inf', path.method ='lars', beta.init = 'adaptive')
print(results)
f1 = paste0("~/hdi_simu/results/",save.name,"_", 'L_inf',"_", rho, ".RData")
print(f1)
save(results,file = f1)
|
/slurm_cluster_code/power_simu_12.R
|
no_license
|
devcao/LOCOpath_repo
|
R
| false
| false
| 2,365
|
r
|
.libPaths(new="~/R")
rm(list=ls())
setwd("~/hdi_path")
require(LOCOpath)
# source("~/hdi_path/bin/pathwise_power.R")
#######################################################################################################################################
# Set simulation parameters (to be done with command-line arguments)
# Execute this from within the directory containing this R script:
############################################################################
options(echo=TRUE)
args <- commandArgs(trailingOnly = TRUE)
print(args)
# args <- c("1000","2","1","20","3","5","3",".92",".96",".95",".98","1")
n <- as.numeric(args[1])
p <- as.numeric(args[2])
iter <- as.numeric(args[3]) # goes from 1 to 12
B <- as.numeric(args[4])
# path.method <- args[6]
# beta.init <- args[7]
save.name <- args[5]
beta_i = as.numeric(args[6])
####
###################################################
###################################################
###################################################
if (beta_i ==0){
rho = 0
}else if (beta_i ==1){
rho = 0.5
}else if (beta_i == 2){
rho = 0.9
}else if (beta_i == 3){
rho = 'equl'
}else if (beta_i == 4){
rho = 'weak_equl'
}else{
stop("wrong rho")
}
results = Path.Resample.Power(n = n, p = p, beta=c(0,rep(1,2),rep(0,9)), rho=rho, iter = iter, B = B, setting = 'dep', which.covariate = 1, betaNull = 0, multiTest = FALSE, parallel = TRUE, norm = 'L2.squared', path.method ='lars', beta.init = 'adaptive')
print(results)
f1 = paste0("~/hdi_simu/results/",save.name,"_", 'L2', "_", rho ,".RData")
print(f1)
save(results,file = f1)
results = Path.Resample.Power(n = n, p = p, beta=c(0,rep(1,2),rep(0,9)), rho=rho, iter = iter, B = B, setting = 'dep', which.covariate = 1, betaNull = 0, multiTest = FALSE, parallel = TRUE, norm = 'L1', path.method ='lars', beta.init = 'adaptive')
print(results)
f1 = paste0("~/hdi_simu/results/",save.name,"_", 'L1',"_", rho, ".RData")
print(f1)
save(results,file = f1)
results = Path.Resample.Power(n = n, p = p, beta=c(0,rep(1,2),rep(0,9)), rho=rho, iter = iter, B = B, setting = 'dep', which.covariate = 1, betaNull = 0, multiTest = FALSE, parallel = TRUE, norm = 'L_inf', path.method ='lars', beta.init = 'adaptive')
print(results)
f1 = paste0("~/hdi_simu/results/",save.name,"_", 'L_inf',"_", rho, ".RData")
print(f1)
save(results,file = f1)
|
working.compvariogmodels1 <-
function(gammadat,models,range0)
{
## gammadat : x : distance, gamma empirical semivariance
## models: variogram models to fit
## range0 : first peak or range
## value : better fitting models weighted least square
## note to Dong July 29: add nugget effects
lsout <- vector('list',length(models))
lsfit <- rep(NA,length(models))
coefs <- matrix(NA,length(models),2)
for(i in 1:length(models)){
##cat(models[i],'\n')
f <- get(paste('v',models[i],sep=''))
xx <- with(gammadat,f(x,1,range0,0))
## catch <- try(lm(gamma ~ xx-1,data=gammadat,weight=n),silent=T)
catch <- try(lm(gamma ~ xx,data=gammadat,weights=gammadat$n),silent=T)
chk <- inherits(catch,'try-error')
if(!chk) {
lsout[[i]]<- catch
lsfit[i] <- deviance(lsout[[i]])
coefs[i,] <- coef(catch)
}
}
tmp <- apply(coefs,1,function(v)
{ out <- all(!is.na(v))
if(out) {
out <- all(v>0)
}
out }
)
vii <- which(tmp)
ii <- vii[which.min(lsfit[tmp])]
if(length(ii)==0) {
ii <- 1 ## chose exp by default
xx <- vexpn(gammadat$x,1,range0,0)
lsout[[ii]] <- lm(gamma ~ xx -1, data=gammadat,weights=gammadat$n)
coefs[ii,] <- c(0,coef(lsout[[ii]]))
}
list(parms=coefs[ii,],model=models[ii])
}
|
/R/working.compvariogmodels1.R
|
no_license
|
cran/ltsk
|
R
| false
| false
| 1,219
|
r
|
working.compvariogmodels1 <-
function(gammadat,models,range0)
{
## gammadat : x : distance, gamma empirical semivariance
## models: variogram models to fit
## range0 : first peak or range
## value : better fitting models weighted least square
## note to Dong July 29: add nugget effects
lsout <- vector('list',length(models))
lsfit <- rep(NA,length(models))
coefs <- matrix(NA,length(models),2)
for(i in 1:length(models)){
##cat(models[i],'\n')
f <- get(paste('v',models[i],sep=''))
xx <- with(gammadat,f(x,1,range0,0))
## catch <- try(lm(gamma ~ xx-1,data=gammadat,weight=n),silent=T)
catch <- try(lm(gamma ~ xx,data=gammadat,weights=gammadat$n),silent=T)
chk <- inherits(catch,'try-error')
if(!chk) {
lsout[[i]]<- catch
lsfit[i] <- deviance(lsout[[i]])
coefs[i,] <- coef(catch)
}
}
tmp <- apply(coefs,1,function(v)
{ out <- all(!is.na(v))
if(out) {
out <- all(v>0)
}
out }
)
vii <- which(tmp)
ii <- vii[which.min(lsfit[tmp])]
if(length(ii)==0) {
ii <- 1 ## chose exp by default
xx <- vexpn(gammadat$x,1,range0,0)
lsout[[ii]] <- lm(gamma ~ xx -1, data=gammadat,weights=gammadat$n)
coefs[ii,] <- c(0,coef(lsout[[ii]]))
}
list(parms=coefs[ii,],model=models[ii])
}
|
rm(list = ls())
library(rmarkdown)
library(dplyr)
library(rgdal)
library(htmlwidgets)
library(tools)
library (leaflet)
library(shiny)
library(ggplot2)
library(webshot)
server<-shinyServer(function(input, output){
webshot::install_phantomjs()
#IMPORT DATA
output$text1<-renderText({ "research the 2016 election using custom metrics and color schemes"})
output$text2<-renderText({ "for the customt tab , select your area for data and see the heatmap on custom sdie wrok"})
output$text3<-renderText({ "labels will be added in the label heatmap"})
output$text4<-renderText({ "By James Hennessy and Benjamin Berger"})
output$text6<-renderText({ "James Hennessy is a handsome and very talented dancer, who one day dreamed of being a Pokemon, but had to settle on something else due to copyright issues "})
output$text7<-renderText({"Benjamin Berger simply can't stack up to James at all! Here he is pictured below"})
#IMPORT ELECTION DATA 2016
#IMPORT ELECTION DATA 2012
st_fips <- read.csv("st_fips.csv")
#IMPORT AND MERGE DRUG DATA
drugDeathBin <- function(x){
if(x == "0-2") return(1)
if(x == "2.1-4") return(3)
if(x == "4.1-6") return(5)
if(x == "6.1-8") return(7)
if(x == "8.1-10") return(9)
if(x == "10.1-12") return(11)
if(x == "12.1-14") return(13)
if(x == "14.1-16") return(15)
if(x == "16.1-18") return(17)
if(x == "18.1-20") return(19)
if(x == ">20") return(21)
}
getCongress<-reactive({
cong<-readOGR(dsn="cb_2014_us_cd114_20m",layer="cb_2014_us_cd114_20m")
cong<-cong[cong$STATEFP!=72,]
cong<-cong[as.character(cong$STATEFP)!="02",]
cong$NAME<-paste0(cong$STATEFP,cong$CD114FP)
if(input$individualState){
number<-st_fips[st_fips$State== input$chooseStates,]$FIPS
if(number<10){
number<-as.character(number)
number<-paste0("0",number)
cong <- cong[cong$STATEFP == number,]
}
else{
cong <- cong[cong$STATEFP == number,]
}
}
cong<-cong
})
getCongResults<-reactive({
kos<-read.csv("Results.csv")
statesAbv <- read.csv("statesAbv.csv")
names<-substr(kos$CD,1,2)
dist<-substr(kos$CD,4,5)
for(i in 1:length(names)){
number<-statesAbv[which(names[i]==statesAbv$ABV),]$FIPS
if(number<10){
number<-as.character(number)
number<-paste0("0",number)
numberDist<-paste0(number,dist[i])
kos$CDfull[i]<-numberDist
kos$CDstate[i]<-number
kos$CDdist[i]<-dist[i]
}
else{
number<-as.character(number)
numberDist<-paste0(number,dist[i])
kos$CDfull[i]<-numberDist
kos$CDstate[i]<-number
kos$CDdist[i]<-dist[i]
}
if(!is.na(kos$Clinton.2016[i] )){
if((kos$Clinton.2016[i])<(kos$Trump.2016[i])){
kos$newWinner[i]<-"TRUE"
}
else{
kos$newWinner[i]<-"FALSE"
}
}
else{
kos$newWinner[i]<-"NA"
}
}
if(input$individualState){
number<-st_fips[st_fips$State== input$chooseStates,]$FIPS
if(number<10){
number<-as.character(number)
number<-paste0("0",number)
kos<-kos[which(kos$CDstate==number),]
}
else{
kos<-kos[which(kos$CDstate==number),]
}
}
kos<-kos
})
getStates<-reactive({
states <- readOGR(dsn="cb_2015_us_county_20m",layer="cb_2015_us_county_20m")
states<-states[states$STATEFP!=72,]
states<-states[as.character(states$STATEFP)!="02",]
states<-states[states$NAME!="Kalawao",]
states$FULLFP<-paste0(states$STATEFP,states$COUNTYFP)
if(input$individualState){
number<-st_fips[st_fips$State== input$chooseStates,]$FIPS
if(number<10){
number<-as.character(number)
number<-paste0("0",number)
states <- states[states$STATEFP == number,]
}
else{
states <- states[states$STATEFP == number,]
}
}
states<-states
})
getData<-reactive({
data <- read.csv("data.csv", header = T, sep = ",")
states<-getStates()
if(input$individualState){
num<-as.numeric(as.character(states$STATEFP))
if(num<10){
data<- data[data$StateFIPS==num,]
}
else{
data <- data[data$StateFIPS==states$STATEFP,]
}
data<-data
}
ncounty <- length(states$COUNTYFP)
m1 <- lm(gop_margin_2016 ~ DrugDeathRate, data)
data$m1.residuals <- resid(m1)
m2 <- lm(gop_margin_2016 ~ gop_margin_2012, data)
data$m2.residuals <- resid(m2)
data$rnorm <- rnorm(ncounty)
m3 <- lm(gop_margin_2016 ~ rnorm, data)
data$m3.residuals <- resid(m3)
m4<-lm(gop_margin_2016~BlackShare,data)
data$m4.residuals<- resid(m4)
data$winner <- "Hillary"
data$winner[data$TrumpWin==1] <- "Trump"
data<-data
})
output$CongMap<- renderLeaflet({
finalCongMap()
})
finalCongMap<-reactive({
cong<-getCongress()
congResults<-getCongResults()
for(i in 1:length(cong)){
index<-match(cong$NAME[i],congResults$CDfull)
cong$Incumbent[i]<-congResults$Incumbent[index]
cong$Party[i]<-congResults$Party[index]
cong$Csix[i]<-congResults$Clinton.2016[index]
cong$Tsix[i]<-congResults$Trump.2016[index]
cong$Ot[i]<-congResults$Obama.2012[index]
cong$Rt[i]<-congResults$Romney.2012[index]
}
data<-getData()
states<-getStates()
data <- data[order(order(as.numeric(as.character(states$GEOID)))),]
#color <- rep('blue',length(congResults))
#color[congResults$newWinner=="TRUE"]<- 'red'
color <- colorBin(input$chooseColor, cong$Ot , bins = 8)(cong$Ot)
congMapper<-{
leaflet(cong) %>%
addPolygons( stroke = T, fillOpacity =.7, smoothFactor = 0, color = "black",
weight = .5, fill = T, fillColor = ~color)
}
if(input$labelYes){
longLat<-read.csv("us_cty_area.csv")
abbStates<-read.csv("states.csv")
if(input$individualState){
intial<-abbStates$Abbreviation[which(abbStates$State==input$chooseStates)]
long<-data$long[data$StateCode==toString(intial)]
lat<-data$lat[data$StateCode==toString(intial)]
}
else{
long<-data$long
lat<-data$lat
}
congMapper<-{ congMapper %>%
addLabelOnlyMarkers(~lat, ~long, label = ~as.character(congResults$CD),
labelOptions = labelOptions(noHide = T, direction = 'top', textOnly = T))
}
}
if(input$legendYes){
if(input$whatData=="2016Results"){
pal<- colorFactor(c("blue","red"),
domain= c("Hillary","Trump"))
value<-c("Hillary","Trump")
}
else{
pal <- colorNumeric(
palette = input$chooseColor,
domain = data$DrugDeathRate
)
value<-data$DrugDeathRate
}
value<-value
pal<-pal
congMapper<-congMapper%>%
addLegend("bottomright", pal = pal, values = value,
title = input$whatData,
opacity = 1
)
}
congMapper<-congMapper
})
##Drug death rates
output$drugMap<-renderLeaflet({
finalMap<-finalMap()
})
finalMap<-reactive({
data<-getData()
states<-getStates()
data <- data[order(order(as.numeric(as.character(states$GEOID)))),]
if(input$individualState){
num<-as.numeric(as.character(states$STATEFP))
if(num<10){
data<- data[data$StateFIPS==num,]
}
else{
data <- data[data$StateFIPS==states$STATEFP,]
}
data<-data
}
ncounty <- length(states$COUNTYFP)
if(input$whatData=="drugs"){
color <- colorBin(input$chooseColor, data$DrugDeathRate , bins = 8)(data$DrugDeathRate)
}
else if(input$whatData=="2016Results"){
color <- rep('blue',ncounty)
color[data$TrumpWin == 1]<- 'red'
}
else if (input$whatData == "RomTrump"){
color <- colorBin(input$chooseColor, data$m2.residuals^2 , bins = 5)(data$m2.residuals^2)
}
else if(input$whatData=="blackPop"){
color <- colorBin(input$chooseColor, data$BlackShare , bins = 8)(data$BlackShare)
}
else if(input$whatData=="PopDensity"){
color <- colorBin(input$chooseColor, data$PopDensity , bins = 8)(data$PopDensity)
}
if(input$whatData=="2016Results"){
color <- rep('blue',ncounty)
color[data$TrumpWin == 1]<- 'red'
}
map<-{
leaflet(states) %>%
addPolygons( stroke = T, fillOpacity =.7, smoothFactor = 0, color = "black",
weight = .5, fill = T, fillColor = ~color)
}
if(input$labelYes){
longLat<-read.csv("us_cty_area.csv")
abbStates<-read.csv("states.csv")
if(input$individualState){
intial<-abbStates$Abbreviation[which(abbStates$State==input$chooseStates)]
long<-data$long[data$StateCode==toString(intial)]
lat<-data$lat[data$StateCode==toString(intial)]
}
else{
long<-data$long
lat<-data$lat
}
map<-map%>%
addLabelOnlyMarkers(~lat, ~long, label = ~as.character(states$NAME),
labelOptions = labelOptions(noHide = T, direction = 'top', textOnly = T))
}
if(input$legendYes){
if(input$whatData=="2016Results"){
pal<- colorFactor(c("blue","red"),
domain= c("Hillary","Trump"))
value<-c("Hillary","Trump")
}
else{
pal <- colorNumeric(
palette = input$chooseColor,
domain = data$DrugDeathRate
)
value<-data$DrugDeathRate
}
value<-value
pal<-pal
map<-map%>%
addLegend("bottomright", pal = pal, values = value,
title = input$whatData,
opacity = 1
)
}
else{
map<-map
}
map<-map
})
getMap<-function()({
data<-getData()
states<-getStates()
if(input$individualState){
# data<-data[data$StateFIPS==states$STATEFP,]
}
ncounty <- length(states$COUNTYFP)
color <- colorBin(input$chooseColor, data$DrugDeathRate , bins = 8)(data$DrugDeathRate)
leaflet(states) %>%
addPolygons( stroke = T, fillOpacity =.7, smoothFactor = 0, color = "black",
weight = .5, fill = T, fillColor = ~color
)
})
#Correlation of drug death rates & trump victory margin
output$genMap<-renderLeaflet({
map<-getGenMap()
states<-getStates()
data<-getData()
if(input$individualState){
# data<-data[data$StateFIPS==states$STATEFP,]
}
abbStates<-read.csv("states.csv")
if(input$individualState){
intial<-abbStates$Abbreviation[which(abbStates$State==input$chooseStates)]
long<-data$long[data$StateCode==toString(intial)]
lat<-data$lat[data$StateCode==toString(intial)]
}
else{
long<-data$long
lat<-data$lat
}
map%>%
addLabelOnlyMarkers(~lat, ~long, label = ~as.character(states$NAME),
labelOptions = labelOptions(noHide = T, direction = 'top', textOnly = T))
})
getGenMap<-reactive({
data<-getData()
states<-getStates()
if(input$individualState){
# data<-data[data$StateFIPS==states$STATEFP,]
}
data <- data[order(order(as.numeric(as.character(states$GEOID )))),]
ncounty <- length(states$COUNTYFP)
##Correlation w/ Random Number
color <- colorBin(input$chooseColor, data$m3.residuals^2 , bins = 5)(data$m3.residuals^2)
leaflet(states) %>%
addPolygons( stroke = T, fillOpacity =.7, smoothFactor = 0, color = "black",
weight = .5, fill = T, fillColor = ~color
)
})
output$mapRandomer<-renderLeaflet({
color <- colorBin(input$chooseColor, data$m3.residuals^2 , bins = 5)(data$m3.residuals^2)
leaflet(states) %>%
addPolygons( stroke = T, fillOpacity =.7, smoothFactor = 0, color = "black",
weight = .5, fill = T, fillColor = ~color
)
})
##BOXPLOTS
output$graphTwo<-renderPlot({
data<-getData()
bp <- ggplot(data = data, aes(x=data$DrugDeathRate, y=gop_margin_2016), order(as.numeric(data$DrugDeathRate))) + geom_boxplot(aes(fill=DrugDeathRate) )
bp <- bp + xlab( "Age-Adjusted drug deaths per 100,000 people") +
ylab("Trump Victory Margin")
bp <- bp + scale_fill_discrete(breaks=c("6.1-8","8.1-10","10.1-12","12.1-14","14.1-16","16.1-18","18.1-20",">20"))
bp + ggtitle("Trump victory margin in North Carolina counties, by county drug overdose rate ")
print(bp)
})
##BAR GRAPH
output$graphThree <-renderPlot({
# data$winner16 <- factor(data$winner16)
#data$winner16 <- factor(data$winner16, levels = rev(levels(data$winner16)))
data<-getData()
bp2 <- ggplot(data, aes(DrugDeathRateCategory, fill = winner, order = as.numeric(DrugDeathRateCategory))) +
geom_bar()
bp2 <- bp2 + xlab( "Age-Adjusted drug deaths per 100,000 people") +
ylab("Number of Counties")
bp2 + ggtitle("2016 Election victor in State counties by county drug overdose rate")
})
##REGRESSIONS
getSummary<-renderText({
summary(lm(TrumpPctVictory ~ RomneyPctVictory + DDR, data))
# summary(glm(TrumpWin ~ RomneyWin + DDR,data,family="DDRomial"))
cor(data$TrumpPctVictory, data$DDR)
summary(lm(TrumpPctVictory ~ DDR, data[data$RomneyWin == F,])) ##effect of drug death on obama counties
})
output$downloadMap <- downloadHandler(
filename = function() {
paste(input$chooseStates,input$whatFormat, sep='')
},
content = function(file) {
# src <- normalizePath('report.Rmd')
here<-finalMap()
long<-((input$drugMap_bounds$north)+input$drugMap_bounds$south)/2
latt<-((input$drugMap_bounds$west)+input$drugMap_bounds$east)/2
heres<-here%>% setView(lng=latt, lat=long,zoom=input$drugMap_zoom)
owd <- setwd(tempdir())
on.exit(setwd(owd))
saveWidget(heres, file="temp.html", selfcontained = F)
webshot("temp.html", file = file,
cliprect = "viewport")
}
)
output$downloadData <- downloadHandler({
# This function returns a string which tells the client
# browser what name to use when saving the file.
filename = function() {
paste(input$chooseStates, ".csv", sep = ".")
}
# This function should write data to a file given to it by
# the argument 'file'.
content = function(file) {
sep <- ","
# Write to a file specified by the 'file' argument
write.table("data.csv", file, sep = sep,
row.names = FALSE)
}
})
})
|
/server.r
|
no_license
|
jamesthesnake/Election2016Project
|
R
| false
| false
| 14,738
|
r
|
rm(list = ls())
library(rmarkdown)
library(dplyr)
library(rgdal)
library(htmlwidgets)
library(tools)
library (leaflet)
library(shiny)
library(ggplot2)
library(webshot)
server<-shinyServer(function(input, output){
webshot::install_phantomjs()
#IMPORT DATA
output$text1<-renderText({ "research the 2016 election using custom metrics and color schemes"})
output$text2<-renderText({ "for the customt tab , select your area for data and see the heatmap on custom sdie wrok"})
output$text3<-renderText({ "labels will be added in the label heatmap"})
output$text4<-renderText({ "By James Hennessy and Benjamin Berger"})
output$text6<-renderText({ "James Hennessy is a handsome and very talented dancer, who one day dreamed of being a Pokemon, but had to settle on something else due to copyright issues "})
output$text7<-renderText({"Benjamin Berger simply can't stack up to James at all! Here he is pictured below"})
#IMPORT ELECTION DATA 2016
#IMPORT ELECTION DATA 2012
st_fips <- read.csv("st_fips.csv")
#IMPORT AND MERGE DRUG DATA
drugDeathBin <- function(x){
if(x == "0-2") return(1)
if(x == "2.1-4") return(3)
if(x == "4.1-6") return(5)
if(x == "6.1-8") return(7)
if(x == "8.1-10") return(9)
if(x == "10.1-12") return(11)
if(x == "12.1-14") return(13)
if(x == "14.1-16") return(15)
if(x == "16.1-18") return(17)
if(x == "18.1-20") return(19)
if(x == ">20") return(21)
}
getCongress<-reactive({
cong<-readOGR(dsn="cb_2014_us_cd114_20m",layer="cb_2014_us_cd114_20m")
cong<-cong[cong$STATEFP!=72,]
cong<-cong[as.character(cong$STATEFP)!="02",]
cong$NAME<-paste0(cong$STATEFP,cong$CD114FP)
if(input$individualState){
number<-st_fips[st_fips$State== input$chooseStates,]$FIPS
if(number<10){
number<-as.character(number)
number<-paste0("0",number)
cong <- cong[cong$STATEFP == number,]
}
else{
cong <- cong[cong$STATEFP == number,]
}
}
cong<-cong
})
getCongResults<-reactive({
kos<-read.csv("Results.csv")
statesAbv <- read.csv("statesAbv.csv")
names<-substr(kos$CD,1,2)
dist<-substr(kos$CD,4,5)
for(i in 1:length(names)){
number<-statesAbv[which(names[i]==statesAbv$ABV),]$FIPS
if(number<10){
number<-as.character(number)
number<-paste0("0",number)
numberDist<-paste0(number,dist[i])
kos$CDfull[i]<-numberDist
kos$CDstate[i]<-number
kos$CDdist[i]<-dist[i]
}
else{
number<-as.character(number)
numberDist<-paste0(number,dist[i])
kos$CDfull[i]<-numberDist
kos$CDstate[i]<-number
kos$CDdist[i]<-dist[i]
}
if(!is.na(kos$Clinton.2016[i] )){
if((kos$Clinton.2016[i])<(kos$Trump.2016[i])){
kos$newWinner[i]<-"TRUE"
}
else{
kos$newWinner[i]<-"FALSE"
}
}
else{
kos$newWinner[i]<-"NA"
}
}
if(input$individualState){
number<-st_fips[st_fips$State== input$chooseStates,]$FIPS
if(number<10){
number<-as.character(number)
number<-paste0("0",number)
kos<-kos[which(kos$CDstate==number),]
}
else{
kos<-kos[which(kos$CDstate==number),]
}
}
kos<-kos
})
getStates<-reactive({
states <- readOGR(dsn="cb_2015_us_county_20m",layer="cb_2015_us_county_20m")
states<-states[states$STATEFP!=72,]
states<-states[as.character(states$STATEFP)!="02",]
states<-states[states$NAME!="Kalawao",]
states$FULLFP<-paste0(states$STATEFP,states$COUNTYFP)
if(input$individualState){
number<-st_fips[st_fips$State== input$chooseStates,]$FIPS
if(number<10){
number<-as.character(number)
number<-paste0("0",number)
states <- states[states$STATEFP == number,]
}
else{
states <- states[states$STATEFP == number,]
}
}
states<-states
})
getData<-reactive({
data <- read.csv("data.csv", header = T, sep = ",")
states<-getStates()
if(input$individualState){
num<-as.numeric(as.character(states$STATEFP))
if(num<10){
data<- data[data$StateFIPS==num,]
}
else{
data <- data[data$StateFIPS==states$STATEFP,]
}
data<-data
}
ncounty <- length(states$COUNTYFP)
m1 <- lm(gop_margin_2016 ~ DrugDeathRate, data)
data$m1.residuals <- resid(m1)
m2 <- lm(gop_margin_2016 ~ gop_margin_2012, data)
data$m2.residuals <- resid(m2)
data$rnorm <- rnorm(ncounty)
m3 <- lm(gop_margin_2016 ~ rnorm, data)
data$m3.residuals <- resid(m3)
m4<-lm(gop_margin_2016~BlackShare,data)
data$m4.residuals<- resid(m4)
data$winner <- "Hillary"
data$winner[data$TrumpWin==1] <- "Trump"
data<-data
})
output$CongMap<- renderLeaflet({
finalCongMap()
})
finalCongMap<-reactive({
cong<-getCongress()
congResults<-getCongResults()
for(i in 1:length(cong)){
index<-match(cong$NAME[i],congResults$CDfull)
cong$Incumbent[i]<-congResults$Incumbent[index]
cong$Party[i]<-congResults$Party[index]
cong$Csix[i]<-congResults$Clinton.2016[index]
cong$Tsix[i]<-congResults$Trump.2016[index]
cong$Ot[i]<-congResults$Obama.2012[index]
cong$Rt[i]<-congResults$Romney.2012[index]
}
data<-getData()
states<-getStates()
data <- data[order(order(as.numeric(as.character(states$GEOID)))),]
#color <- rep('blue',length(congResults))
#color[congResults$newWinner=="TRUE"]<- 'red'
color <- colorBin(input$chooseColor, cong$Ot , bins = 8)(cong$Ot)
congMapper<-{
leaflet(cong) %>%
addPolygons( stroke = T, fillOpacity =.7, smoothFactor = 0, color = "black",
weight = .5, fill = T, fillColor = ~color)
}
if(input$labelYes){
longLat<-read.csv("us_cty_area.csv")
abbStates<-read.csv("states.csv")
if(input$individualState){
intial<-abbStates$Abbreviation[which(abbStates$State==input$chooseStates)]
long<-data$long[data$StateCode==toString(intial)]
lat<-data$lat[data$StateCode==toString(intial)]
}
else{
long<-data$long
lat<-data$lat
}
congMapper<-{ congMapper %>%
addLabelOnlyMarkers(~lat, ~long, label = ~as.character(congResults$CD),
labelOptions = labelOptions(noHide = T, direction = 'top', textOnly = T))
}
}
if(input$legendYes){
if(input$whatData=="2016Results"){
pal<- colorFactor(c("blue","red"),
domain= c("Hillary","Trump"))
value<-c("Hillary","Trump")
}
else{
pal <- colorNumeric(
palette = input$chooseColor,
domain = data$DrugDeathRate
)
value<-data$DrugDeathRate
}
value<-value
pal<-pal
congMapper<-congMapper%>%
addLegend("bottomright", pal = pal, values = value,
title = input$whatData,
opacity = 1
)
}
congMapper<-congMapper
})
##Drug death rates
output$drugMap<-renderLeaflet({
finalMap<-finalMap()
})
finalMap<-reactive({
data<-getData()
states<-getStates()
data <- data[order(order(as.numeric(as.character(states$GEOID)))),]
if(input$individualState){
num<-as.numeric(as.character(states$STATEFP))
if(num<10){
data<- data[data$StateFIPS==num,]
}
else{
data <- data[data$StateFIPS==states$STATEFP,]
}
data<-data
}
ncounty <- length(states$COUNTYFP)
if(input$whatData=="drugs"){
color <- colorBin(input$chooseColor, data$DrugDeathRate , bins = 8)(data$DrugDeathRate)
}
else if(input$whatData=="2016Results"){
color <- rep('blue',ncounty)
color[data$TrumpWin == 1]<- 'red'
}
else if (input$whatData == "RomTrump"){
color <- colorBin(input$chooseColor, data$m2.residuals^2 , bins = 5)(data$m2.residuals^2)
}
else if(input$whatData=="blackPop"){
color <- colorBin(input$chooseColor, data$BlackShare , bins = 8)(data$BlackShare)
}
else if(input$whatData=="PopDensity"){
color <- colorBin(input$chooseColor, data$PopDensity , bins = 8)(data$PopDensity)
}
if(input$whatData=="2016Results"){
color <- rep('blue',ncounty)
color[data$TrumpWin == 1]<- 'red'
}
map<-{
leaflet(states) %>%
addPolygons( stroke = T, fillOpacity =.7, smoothFactor = 0, color = "black",
weight = .5, fill = T, fillColor = ~color)
}
if(input$labelYes){
longLat<-read.csv("us_cty_area.csv")
abbStates<-read.csv("states.csv")
if(input$individualState){
intial<-abbStates$Abbreviation[which(abbStates$State==input$chooseStates)]
long<-data$long[data$StateCode==toString(intial)]
lat<-data$lat[data$StateCode==toString(intial)]
}
else{
long<-data$long
lat<-data$lat
}
map<-map%>%
addLabelOnlyMarkers(~lat, ~long, label = ~as.character(states$NAME),
labelOptions = labelOptions(noHide = T, direction = 'top', textOnly = T))
}
if(input$legendYes){
if(input$whatData=="2016Results"){
pal<- colorFactor(c("blue","red"),
domain= c("Hillary","Trump"))
value<-c("Hillary","Trump")
}
else{
pal <- colorNumeric(
palette = input$chooseColor,
domain = data$DrugDeathRate
)
value<-data$DrugDeathRate
}
value<-value
pal<-pal
map<-map%>%
addLegend("bottomright", pal = pal, values = value,
title = input$whatData,
opacity = 1
)
}
else{
map<-map
}
map<-map
})
getMap<-function()({
data<-getData()
states<-getStates()
if(input$individualState){
# data<-data[data$StateFIPS==states$STATEFP,]
}
ncounty <- length(states$COUNTYFP)
color <- colorBin(input$chooseColor, data$DrugDeathRate , bins = 8)(data$DrugDeathRate)
leaflet(states) %>%
addPolygons( stroke = T, fillOpacity =.7, smoothFactor = 0, color = "black",
weight = .5, fill = T, fillColor = ~color
)
})
#Correlation of drug death rates & trump victory margin
output$genMap<-renderLeaflet({
map<-getGenMap()
states<-getStates()
data<-getData()
if(input$individualState){
# data<-data[data$StateFIPS==states$STATEFP,]
}
abbStates<-read.csv("states.csv")
if(input$individualState){
intial<-abbStates$Abbreviation[which(abbStates$State==input$chooseStates)]
long<-data$long[data$StateCode==toString(intial)]
lat<-data$lat[data$StateCode==toString(intial)]
}
else{
long<-data$long
lat<-data$lat
}
map%>%
addLabelOnlyMarkers(~lat, ~long, label = ~as.character(states$NAME),
labelOptions = labelOptions(noHide = T, direction = 'top', textOnly = T))
})
getGenMap<-reactive({
data<-getData()
states<-getStates()
if(input$individualState){
# data<-data[data$StateFIPS==states$STATEFP,]
}
data <- data[order(order(as.numeric(as.character(states$GEOID )))),]
ncounty <- length(states$COUNTYFP)
##Correlation w/ Random Number
color <- colorBin(input$chooseColor, data$m3.residuals^2 , bins = 5)(data$m3.residuals^2)
leaflet(states) %>%
addPolygons( stroke = T, fillOpacity =.7, smoothFactor = 0, color = "black",
weight = .5, fill = T, fillColor = ~color
)
})
output$mapRandomer<-renderLeaflet({
color <- colorBin(input$chooseColor, data$m3.residuals^2 , bins = 5)(data$m3.residuals^2)
leaflet(states) %>%
addPolygons( stroke = T, fillOpacity =.7, smoothFactor = 0, color = "black",
weight = .5, fill = T, fillColor = ~color
)
})
##BOXPLOTS
output$graphTwo<-renderPlot({
data<-getData()
bp <- ggplot(data = data, aes(x=data$DrugDeathRate, y=gop_margin_2016), order(as.numeric(data$DrugDeathRate))) + geom_boxplot(aes(fill=DrugDeathRate) )
bp <- bp + xlab( "Age-Adjusted drug deaths per 100,000 people") +
ylab("Trump Victory Margin")
bp <- bp + scale_fill_discrete(breaks=c("6.1-8","8.1-10","10.1-12","12.1-14","14.1-16","16.1-18","18.1-20",">20"))
bp + ggtitle("Trump victory margin in North Carolina counties, by county drug overdose rate ")
print(bp)
})
##BAR GRAPH
output$graphThree <-renderPlot({
# data$winner16 <- factor(data$winner16)
#data$winner16 <- factor(data$winner16, levels = rev(levels(data$winner16)))
data<-getData()
bp2 <- ggplot(data, aes(DrugDeathRateCategory, fill = winner, order = as.numeric(DrugDeathRateCategory))) +
geom_bar()
bp2 <- bp2 + xlab( "Age-Adjusted drug deaths per 100,000 people") +
ylab("Number of Counties")
bp2 + ggtitle("2016 Election victor in State counties by county drug overdose rate")
})
##REGRESSIONS
getSummary<-renderText({
summary(lm(TrumpPctVictory ~ RomneyPctVictory + DDR, data))
# summary(glm(TrumpWin ~ RomneyWin + DDR,data,family="DDRomial"))
cor(data$TrumpPctVictory, data$DDR)
summary(lm(TrumpPctVictory ~ DDR, data[data$RomneyWin == F,])) ##effect of drug death on obama counties
})
output$downloadMap <- downloadHandler(
filename = function() {
paste(input$chooseStates,input$whatFormat, sep='')
},
content = function(file) {
# src <- normalizePath('report.Rmd')
here<-finalMap()
long<-((input$drugMap_bounds$north)+input$drugMap_bounds$south)/2
latt<-((input$drugMap_bounds$west)+input$drugMap_bounds$east)/2
heres<-here%>% setView(lng=latt, lat=long,zoom=input$drugMap_zoom)
owd <- setwd(tempdir())
on.exit(setwd(owd))
saveWidget(heres, file="temp.html", selfcontained = F)
webshot("temp.html", file = file,
cliprect = "viewport")
}
)
output$downloadData <- downloadHandler({
# This function returns a string which tells the client
# browser what name to use when saving the file.
filename = function() {
paste(input$chooseStates, ".csv", sep = ".")
}
# This function should write data to a file given to it by
# the argument 'file'.
content = function(file) {
sep <- ","
# Write to a file specified by the 'file' argument
write.table("data.csv", file, sep = sep,
row.names = FALSE)
}
})
})
|
#!/usr/bin/env r
suppressMessages(library(docopt))
suppressMessages(library(devtools))
doc <- "Usage: installPackage.r [-p PATH]
Options:
-p --path PATH package path [default: .]
Example:
installPackage.r -p ./epivizFileServer
"
opt <- docopt(doc)
invisible(devtools::install(pkg=opt$path))
|
/installPackage.r
|
no_license
|
epiviz/docker_rbase
|
R
| false
| false
| 301
|
r
|
#!/usr/bin/env r
suppressMessages(library(docopt))
suppressMessages(library(devtools))
doc <- "Usage: installPackage.r [-p PATH]
Options:
-p --path PATH package path [default: .]
Example:
installPackage.r -p ./epivizFileServer
"
opt <- docopt(doc)
invisible(devtools::install(pkg=opt$path))
|
require(caret)
require(caretEnsemble)
require(pROC)
require(doParallel)
require(kknn)
source("script/R/fun/summaryResult.R")
result.kknn.df <- readRDS("result/result.kknn.df.data")
#
# 前処理
#
source("./Data-pre-processing.R")
my_preProcess <- c("center", "scale")
data_preProcess <- "none"
data_preProcess <- "nzv"
data_preProcess <- "dummy"
data_preProcess <- "dummy.nzv.highlyCorDescr"
if ( data_preProcess == "none") {
TRAIN <- all.train
TRAIN.TRAIN <- train.train
TRAIN.TEST <- train.test
TEST <- test
} else if ( data_preProcess == "nzv") {
TRAIN <- all.nzv.train
TRAIN.TRAIN <- train.nzv.train
TRAIN.TEST <- train.nzv.test
TEST <- test
} else if ( data_preProcess == "dummy") {
TRAIN <- train.dummy
TRAIN.TRAIN <- train.dummy.train
TRAIN.TEST <- train.dummy.test
TEST <- test.dummy
} else if ( data_preProcess == "dummy.nzv.highlyCorDescr") {
TRAIN <- train.dummy.nzv.highlyCorDescr
TRAIN.TRAIN <- train.dummy.nzv.highlyCorDescr.train
TRAIN.TEST <- train.dummy.nzv.highlyCorDescr.test
TEST <- test.dummy.nzv.highlyCorDescr
}
#
# kknn
#
# 説明変数一覧の作成
explanation_variable <- names(subset(TRAIN, select = -c(response)))
# seeds の決定
set.seed(123)
seeds <- vector(mode = "list", length = 51)
for(i in 1:50) seeds[[i]] <- sample.int(1000, 500)
seeds[[51]] <- sample.int(1000, 1)
my_control <- trainControl(
method = "cv"
,number = 10
,summaryFunction = twoClassSummary
,classProbs = TRUE
,verbose = TRUE
,savePredictions = "final"
,index = createResample(TRAIN.TRAIN$response, 10)
,seeds = seeds
)
# fit.kknn.dummy <-
# train(
# x = TRAIN.TRAIN[,explanation_variable]
# ,y = TRAIN.TRAIN$response
# ,trControl = my_control
# ,method = "kknn"
# ,metric = "ROC"
# ,tuneGrid = expand.grid(mtry = 5)
# ,importance = 'impurity'
# )
cl <- makeCluster(detectCores(), type = 'PSOCK', outfile = " ")
registerDoParallel(cl)
model_list <- caretList(
x = TRAIN.TRAIN[,explanation_variable]
,y = TRAIN.TRAIN$response
,trControl = my_control
,tuneList = list(
fit.kknn.dummy = caretModelSpec(
method = "kknn"
,metric = "ROC"
,tuneGrid = expand.grid(
kmax = 1
,distance = 1
,kernel = "rectangular"
)
)
)
)
stopCluster(cl)
registerDoSEQ()
fit.kknn <- model_list[[1]]
fit.kknn$times
# tuneGrid = expand.grid(kmax = 1,distance = 1,kernel = "rectangular")
# $everything
# ユーザ システム 経過
# 205.239 6.891 1708.582
# Fitting kmax = 5, distance = 5, kernel = rectangular on full training set
# $everything
# user system elapsed
# 252.280 7.936 13221.714
fit.kknn$bestTune$kmax
fit.kknn$bestTune$distance
fit.kknn$bestTune$kernel
# 特徴量の確認
caret::varImp(fit.kknn)
#
# モデル比較
#
allProb <- caret::extractProb(
list(fit.kknn)
,testX = subset(TRAIN.TEST, select = -c(response))
,testY = unlist(subset(TRAIN.TEST, select = c(response)))
)
# dataType 列に Test と入っているもののみを抜き出す
testProb <- subset(allProb, dataType == "Test")
tp <- subset(testProb, object == "Object1")
# confusionMatrix で比較
confusionMatrix(tp$pred, tp$obs)$overall[1]
# ROC
pROC::roc(tp$obs, tp$yes)
# 結果の保存
result.kknn.df <- rbind(result.kknn.df, summaryResult(model_list[[1]]))
saveRDS(result.kknn.df, "result/result.kknn.df.data")
# predict() を利用した検算
if (is.null(fit.nnet$preProcess)){
# preProcess を指定していない場合
pred_test.verification <- predict(
fit.kknn$finalModel
,subset(TRAIN.TEST, select = -c(response))
,type = "prob"
)
} else {
# preProcess を指定している場合
pred_test.verification <- preProcess(
subset(TRAIN.TEST, select = -c(response))
,method = my_preProcess
) %>%
predict(., subset(TRAIN.TEST, select = -c(response))) %>%
predict(fit.kknn$finalModel, . ,type = "prob")
}
#ROC
pROC::roc(TRAIN.TEST[,"response"], pred_test.verification[,"yes"])
#
# 予測データにモデルの当てはめ
#
if (is.null(fit.kknn$preProcess)){
# preProcess を指定していない場合
pred_test <- predict(fit.kknn$finalModel, TEST, type = "prob")[,2]
PREPROCESS <- "no_preProcess"
} else {
# preProcess を指定している場合
pred_test <- preProcess(TEST, method = my_preProcess) %>%
predict(., TEST) %>%
predict(fit.kknn$finalModel, ., type = "prob")
pred_test <- pred_test[,2]
PREPROCESS <- paste(my_preProcess, collapse = "_")
}
#submitの形式で出力(CSV)
#データ加工
out <- data.frame(test$id, pred_test)
# 予測データを保存
for(NUM in 1:10){
DATE <- format(jrvFinance::edate(from = Sys.Date(), 0), "%Y%m%d")
SUBMIT_FILENAME <- paste("./submit/submit_", DATE, "_", NUM, "_", PREPROCESS, "_kknn.csv", sep = "")
if ( !file.exists(SUBMIT_FILENAME) ) {
write.table(out, #出力データ
SUBMIT_FILENAME, #出力先
quote = FALSE, #文字列を「"」で囲む有無
col.names = FALSE, #変数名(列名)の有無
row.names = FALSE, #行番号の有無
sep = "," #区切り文字の指定
)
break
}
}
|
/caret_kknn.R
|
no_license
|
ryoogata/BankCustomersTargeting
|
R
| false
| false
| 5,456
|
r
|
require(caret)
require(caretEnsemble)
require(pROC)
require(doParallel)
require(kknn)
source("script/R/fun/summaryResult.R")
result.kknn.df <- readRDS("result/result.kknn.df.data")
#
# 前処理
#
source("./Data-pre-processing.R")
my_preProcess <- c("center", "scale")
data_preProcess <- "none"
data_preProcess <- "nzv"
data_preProcess <- "dummy"
data_preProcess <- "dummy.nzv.highlyCorDescr"
if ( data_preProcess == "none") {
TRAIN <- all.train
TRAIN.TRAIN <- train.train
TRAIN.TEST <- train.test
TEST <- test
} else if ( data_preProcess == "nzv") {
TRAIN <- all.nzv.train
TRAIN.TRAIN <- train.nzv.train
TRAIN.TEST <- train.nzv.test
TEST <- test
} else if ( data_preProcess == "dummy") {
TRAIN <- train.dummy
TRAIN.TRAIN <- train.dummy.train
TRAIN.TEST <- train.dummy.test
TEST <- test.dummy
} else if ( data_preProcess == "dummy.nzv.highlyCorDescr") {
TRAIN <- train.dummy.nzv.highlyCorDescr
TRAIN.TRAIN <- train.dummy.nzv.highlyCorDescr.train
TRAIN.TEST <- train.dummy.nzv.highlyCorDescr.test
TEST <- test.dummy.nzv.highlyCorDescr
}
#
# kknn
#
# 説明変数一覧の作成
explanation_variable <- names(subset(TRAIN, select = -c(response)))
# seeds の決定
set.seed(123)
seeds <- vector(mode = "list", length = 51)
for(i in 1:50) seeds[[i]] <- sample.int(1000, 500)
seeds[[51]] <- sample.int(1000, 1)
my_control <- trainControl(
method = "cv"
,number = 10
,summaryFunction = twoClassSummary
,classProbs = TRUE
,verbose = TRUE
,savePredictions = "final"
,index = createResample(TRAIN.TRAIN$response, 10)
,seeds = seeds
)
# fit.kknn.dummy <-
# train(
# x = TRAIN.TRAIN[,explanation_variable]
# ,y = TRAIN.TRAIN$response
# ,trControl = my_control
# ,method = "kknn"
# ,metric = "ROC"
# ,tuneGrid = expand.grid(mtry = 5)
# ,importance = 'impurity'
# )
cl <- makeCluster(detectCores(), type = 'PSOCK', outfile = " ")
registerDoParallel(cl)
model_list <- caretList(
x = TRAIN.TRAIN[,explanation_variable]
,y = TRAIN.TRAIN$response
,trControl = my_control
,tuneList = list(
fit.kknn.dummy = caretModelSpec(
method = "kknn"
,metric = "ROC"
,tuneGrid = expand.grid(
kmax = 1
,distance = 1
,kernel = "rectangular"
)
)
)
)
stopCluster(cl)
registerDoSEQ()
fit.kknn <- model_list[[1]]
fit.kknn$times
# tuneGrid = expand.grid(kmax = 1,distance = 1,kernel = "rectangular")
# $everything
# ユーザ システム 経過
# 205.239 6.891 1708.582
# Fitting kmax = 5, distance = 5, kernel = rectangular on full training set
# $everything
# user system elapsed
# 252.280 7.936 13221.714
fit.kknn$bestTune$kmax
fit.kknn$bestTune$distance
fit.kknn$bestTune$kernel
# 特徴量の確認
caret::varImp(fit.kknn)
#
# モデル比較
#
allProb <- caret::extractProb(
list(fit.kknn)
,testX = subset(TRAIN.TEST, select = -c(response))
,testY = unlist(subset(TRAIN.TEST, select = c(response)))
)
# dataType 列に Test と入っているもののみを抜き出す
testProb <- subset(allProb, dataType == "Test")
tp <- subset(testProb, object == "Object1")
# confusionMatrix で比較
confusionMatrix(tp$pred, tp$obs)$overall[1]
# ROC
pROC::roc(tp$obs, tp$yes)
# 結果の保存
result.kknn.df <- rbind(result.kknn.df, summaryResult(model_list[[1]]))
saveRDS(result.kknn.df, "result/result.kknn.df.data")
# predict() を利用した検算
if (is.null(fit.nnet$preProcess)){
# preProcess を指定していない場合
pred_test.verification <- predict(
fit.kknn$finalModel
,subset(TRAIN.TEST, select = -c(response))
,type = "prob"
)
} else {
# preProcess を指定している場合
pred_test.verification <- preProcess(
subset(TRAIN.TEST, select = -c(response))
,method = my_preProcess
) %>%
predict(., subset(TRAIN.TEST, select = -c(response))) %>%
predict(fit.kknn$finalModel, . ,type = "prob")
}
#ROC
pROC::roc(TRAIN.TEST[,"response"], pred_test.verification[,"yes"])
#
# 予測データにモデルの当てはめ
#
if (is.null(fit.kknn$preProcess)){
# preProcess を指定していない場合
pred_test <- predict(fit.kknn$finalModel, TEST, type = "prob")[,2]
PREPROCESS <- "no_preProcess"
} else {
# preProcess を指定している場合
pred_test <- preProcess(TEST, method = my_preProcess) %>%
predict(., TEST) %>%
predict(fit.kknn$finalModel, ., type = "prob")
pred_test <- pred_test[,2]
PREPROCESS <- paste(my_preProcess, collapse = "_")
}
#submitの形式で出力(CSV)
#データ加工
out <- data.frame(test$id, pred_test)
# 予測データを保存
for(NUM in 1:10){
DATE <- format(jrvFinance::edate(from = Sys.Date(), 0), "%Y%m%d")
SUBMIT_FILENAME <- paste("./submit/submit_", DATE, "_", NUM, "_", PREPROCESS, "_kknn.csv", sep = "")
if ( !file.exists(SUBMIT_FILENAME) ) {
write.table(out, #出力データ
SUBMIT_FILENAME, #出力先
quote = FALSE, #文字列を「"」で囲む有無
col.names = FALSE, #変数名(列名)の有無
row.names = FALSE, #行番号の有無
sep = "," #区切り文字の指定
)
break
}
}
|
# functions that allow to calculate emissions factors from EDGAR
# EF in EDGAR are in kton/TJ
# https://edgar.jrc.ec.europa.eu/overview.php?v=432_AP&SECURE=123
# Crippa, M., Guizzardi, D., Muntean, M., Schaaf, E., Dentener, F.,
# van Aardenne, J. A., Monni, S., Doering, U., Olivier, J. G. J.,
# Pagliari, V., and Janssens-Maenhout, G.: Gridded Emissions of Air
# Pollutants for the period 1970–2012 within EDGAR v4.3.2,
# Earth Syst. Sci. Data Discuss., https://doi.org/10.5194/essd-2018-31,
# in review, 2018.
# contact: emanuela.peduzzi@gmail.com, enrico.pisoni@ec.europa.eu
library(reshape)
com_edgar_merge<- function(beimei_f, subDir, path_e, polls){
#read data edgar, activity levels, IEF and emissions
print('Reading EDGAR File')
cat('Reading EDGAR File', file = paste0(subDir,"/test.log"), append = TRUE, sep = "\n")
dataedgar <- read_excel(paste0(path_e), sheet='implied_EF')
# putting all com data in a data.frame and adding the pollutant column
df_tmp <-beimei_f %>% ungroup() # CoM data
df_final <- data.frame() # final dataframe
for (poll in polls){
df_tmp$poll <- poll
df_final<-rbind(df_final, df_tmp)}
# I included all countries, because EDGAR can potentially give us all world countries.
n_list_newdataset_edgar <- (list(
Spain = 'ESP',
Germany = 'DEU',
Austria = 'AUT',
Bulgaria = 'BGR',
BosniaandHerzegovina = 'BIH',
Belgium = 'BEL',
Finland = 'FIN',
France = 'FRA',
Italy = 'ITA',
Greece = 'GRC',
Luxembourg = 'LUX',
Norway = 'NOR',
Portugal = 'PRT',
Ireland = 'IRL',
Romania = 'ROU',
Hungary = 'HUN',
UnitedKingdom = 'GBR',
Estonia = 'EST',
Poland = 'POL',
Croatia = 'HRV',
TheNetherlands = 'NLD',
Denmark = 'DNK',
Latvia = 'LVA',
Sweden = 'SWE',
Iceland = 'ISL',
Switzerland = 'CHE',
Slovakia = 'SVK',
Cyprus = 'CYP',
Slovenia = 'SVN',
CzechRepublic = 'CZE',
Lithuania = 'LTU',
Georgia = 'GEO',
Ukraine = 'UKR',
Turkey = 'TUR'
))
df_final <- df_final %>% mutate(cou4 = gsub(" ", "", cou)) %>% mutate(cou5 = gsub("-", "", cou4)) %>%
mutate(edgar_country = ifelse(cou5 %in% names(n_list_newdataset_edgar), as.character(n_list_newdataset_edgar[cou5]), cou5)) %>%
select(-matches('cou4')) %>% select(-matches('cou5'))
df_final <- df_final %>% filter(BeiYear>=1990 & BeiYear<=2015) %>% filter(MeiYear>=1990 & MeiYear<=2015)
# if you do not have all the time series
beiyears <- unique(df_final$BeiYear)
meiyears <- unique(df_final$MeiYear)
years <- c(beiyears, meiyears)
# if you do not have all the time series
# years <- 1990:2015
coliefnames <- character(length = length(years))
for (i in seq_along(years)){
coliefnames[i] <- paste0('DER_',years[i])
}
names(dataedgar)
coltokeep<-c(names(dataedgar)[1:4], coliefnames)
wideedgar <- dataedgar %>% select(coltokeep)
longedgar <- melt(as.data.frame(wideedgar), id=names(wideedgar)[1:4])
longedgar <- longedgar %>% mutate(year = gsub("DER_", "", variable)) %>% select(-c(variable)) #%>%
#dplyr::rename(ief=value, edgar_country=Country_code_A3,poll=substance, edgar_sector=SECTOR, edgar_carrier=category)
print('Calculating emissions of air pollutants precursors')
cat('Calculating emissions of air pollutants precursors', file = paste0(subDir,"/test.log"), append = TRUE, sep= "\n")
# merging with the original dataframe
df_m <- merge(df_final, longedgar,
by.x = c("edgar_country","edgar_sector", "edgar_carrier", "poll", "BeiYear"),
by.y = c("Country_code_A3", "SECTOR","category", "substance", "year"), all.x=TRUE) %>% dplyr::rename(ief_bei=value)
df_m <- merge(df_m, longedgar,
by.x = c("edgar_country","edgar_sector", "edgar_carrier", "poll", "MeiYear"),
by.y = c("Country_code_A3", "SECTOR","category", "substance", "year"), all.x=TRUE) %>% dplyr::rename(ief_mei=value)
n_poll <- (list(
PM2.5 = 'pm25',
PM10 = 'pm10',
NOx = 'nox'))
df_m <- df_m %>% mutate(poll = ifelse(poll %in% names(n_poll), as.character(n_poll[poll]), poll))
# VERY IMPORTANT: changing units! CAREFUL!
df_m_units <- df_m %>% mutate(ief_bei=ief_bei *3.6, ief_mei= ief_mei *3.6)# 1kton/TJ -> 3.6 ton/MWh
df_m_final <- df_m_units %>% mutate(emi_e_bei_ef_bei=ener_bei*ief_bei)
df_m_final <- df_m_final %>% mutate(emi_e_mei_ef_mei=ener_mei*ief_mei)
df_m_final <- df_m_final %>% mutate(emi_e_mei_ef_bei=ener_mei*ief_bei)
write.csv(df_m_final, file = paste0(subDir,'/summary_edgar_Com.csv'))
df_m_final_grouped <- df_m_final %>% group_by(cou, name, poll, ms) %>% summarise(ener_bei=sum(ener_bei, na.rm = TRUE),
ener_mei=sum(ener_mei, na.rm = TRUE),
emi_bei_co2=sum(emi_bei_co2, na.rm = TRUE),
emi_mei_co2=sum(emi_mei_co2, na.rm = TRUE),
emi_e_bei_ef_bei=sum(emi_e_bei_ef_bei, na.rm = TRUE),
emi_e_mei_ef_mei=sum(emi_e_mei_ef_mei, na.rm = TRUE),
emi_e_mei_ef_bei=sum(emi_e_mei_ef_bei, na.rm = TRUE),
BeiYear=min(BeiYear),
MeiYear=min(MeiYear),
pop_bei=min(pop_bei),
pop_mei=min(pop_mei))
write.csv(df_m_final_grouped, file = paste0(subDir,'/summary_edgar_Com_sel_ms.csv'))
return(df_m_final_grouped)
}
|
/edgar_ef.R
|
permissive
|
esperluette/airpollutants_script_for_com
|
R
| false
| false
| 5,836
|
r
|
# functions that allow to calculate emissions factors from EDGAR
# EF in EDGAR are in kton/TJ
# https://edgar.jrc.ec.europa.eu/overview.php?v=432_AP&SECURE=123
# Crippa, M., Guizzardi, D., Muntean, M., Schaaf, E., Dentener, F.,
# van Aardenne, J. A., Monni, S., Doering, U., Olivier, J. G. J.,
# Pagliari, V., and Janssens-Maenhout, G.: Gridded Emissions of Air
# Pollutants for the period 1970–2012 within EDGAR v4.3.2,
# Earth Syst. Sci. Data Discuss., https://doi.org/10.5194/essd-2018-31,
# in review, 2018.
# contact: emanuela.peduzzi@gmail.com, enrico.pisoni@ec.europa.eu
library(reshape)
com_edgar_merge<- function(beimei_f, subDir, path_e, polls){
#read data edgar, activity levels, IEF and emissions
print('Reading EDGAR File')
cat('Reading EDGAR File', file = paste0(subDir,"/test.log"), append = TRUE, sep = "\n")
dataedgar <- read_excel(paste0(path_e), sheet='implied_EF')
# putting all com data in a data.frame and adding the pollutant column
df_tmp <-beimei_f %>% ungroup() # CoM data
df_final <- data.frame() # final dataframe
for (poll in polls){
df_tmp$poll <- poll
df_final<-rbind(df_final, df_tmp)}
# I included all countries, because EDGAR can potentially give us all world countries.
n_list_newdataset_edgar <- (list(
Spain = 'ESP',
Germany = 'DEU',
Austria = 'AUT',
Bulgaria = 'BGR',
BosniaandHerzegovina = 'BIH',
Belgium = 'BEL',
Finland = 'FIN',
France = 'FRA',
Italy = 'ITA',
Greece = 'GRC',
Luxembourg = 'LUX',
Norway = 'NOR',
Portugal = 'PRT',
Ireland = 'IRL',
Romania = 'ROU',
Hungary = 'HUN',
UnitedKingdom = 'GBR',
Estonia = 'EST',
Poland = 'POL',
Croatia = 'HRV',
TheNetherlands = 'NLD',
Denmark = 'DNK',
Latvia = 'LVA',
Sweden = 'SWE',
Iceland = 'ISL',
Switzerland = 'CHE',
Slovakia = 'SVK',
Cyprus = 'CYP',
Slovenia = 'SVN',
CzechRepublic = 'CZE',
Lithuania = 'LTU',
Georgia = 'GEO',
Ukraine = 'UKR',
Turkey = 'TUR'
))
df_final <- df_final %>% mutate(cou4 = gsub(" ", "", cou)) %>% mutate(cou5 = gsub("-", "", cou4)) %>%
mutate(edgar_country = ifelse(cou5 %in% names(n_list_newdataset_edgar), as.character(n_list_newdataset_edgar[cou5]), cou5)) %>%
select(-matches('cou4')) %>% select(-matches('cou5'))
df_final <- df_final %>% filter(BeiYear>=1990 & BeiYear<=2015) %>% filter(MeiYear>=1990 & MeiYear<=2015)
# if you do not have all the time series
beiyears <- unique(df_final$BeiYear)
meiyears <- unique(df_final$MeiYear)
years <- c(beiyears, meiyears)
# if you do not have all the time series
# years <- 1990:2015
coliefnames <- character(length = length(years))
for (i in seq_along(years)){
coliefnames[i] <- paste0('DER_',years[i])
}
names(dataedgar)
coltokeep<-c(names(dataedgar)[1:4], coliefnames)
wideedgar <- dataedgar %>% select(coltokeep)
longedgar <- melt(as.data.frame(wideedgar), id=names(wideedgar)[1:4])
longedgar <- longedgar %>% mutate(year = gsub("DER_", "", variable)) %>% select(-c(variable)) #%>%
#dplyr::rename(ief=value, edgar_country=Country_code_A3,poll=substance, edgar_sector=SECTOR, edgar_carrier=category)
print('Calculating emissions of air pollutants precursors')
cat('Calculating emissions of air pollutants precursors', file = paste0(subDir,"/test.log"), append = TRUE, sep= "\n")
# merging with the original dataframe
df_m <- merge(df_final, longedgar,
by.x = c("edgar_country","edgar_sector", "edgar_carrier", "poll", "BeiYear"),
by.y = c("Country_code_A3", "SECTOR","category", "substance", "year"), all.x=TRUE) %>% dplyr::rename(ief_bei=value)
df_m <- merge(df_m, longedgar,
by.x = c("edgar_country","edgar_sector", "edgar_carrier", "poll", "MeiYear"),
by.y = c("Country_code_A3", "SECTOR","category", "substance", "year"), all.x=TRUE) %>% dplyr::rename(ief_mei=value)
n_poll <- (list(
PM2.5 = 'pm25',
PM10 = 'pm10',
NOx = 'nox'))
df_m <- df_m %>% mutate(poll = ifelse(poll %in% names(n_poll), as.character(n_poll[poll]), poll))
# VERY IMPORTANT: changing units! CAREFUL!
df_m_units <- df_m %>% mutate(ief_bei=ief_bei *3.6, ief_mei= ief_mei *3.6)# 1kton/TJ -> 3.6 ton/MWh
df_m_final <- df_m_units %>% mutate(emi_e_bei_ef_bei=ener_bei*ief_bei)
df_m_final <- df_m_final %>% mutate(emi_e_mei_ef_mei=ener_mei*ief_mei)
df_m_final <- df_m_final %>% mutate(emi_e_mei_ef_bei=ener_mei*ief_bei)
write.csv(df_m_final, file = paste0(subDir,'/summary_edgar_Com.csv'))
df_m_final_grouped <- df_m_final %>% group_by(cou, name, poll, ms) %>% summarise(ener_bei=sum(ener_bei, na.rm = TRUE),
ener_mei=sum(ener_mei, na.rm = TRUE),
emi_bei_co2=sum(emi_bei_co2, na.rm = TRUE),
emi_mei_co2=sum(emi_mei_co2, na.rm = TRUE),
emi_e_bei_ef_bei=sum(emi_e_bei_ef_bei, na.rm = TRUE),
emi_e_mei_ef_mei=sum(emi_e_mei_ef_mei, na.rm = TRUE),
emi_e_mei_ef_bei=sum(emi_e_mei_ef_bei, na.rm = TRUE),
BeiYear=min(BeiYear),
MeiYear=min(MeiYear),
pop_bei=min(pop_bei),
pop_mei=min(pop_mei))
write.csv(df_m_final_grouped, file = paste0(subDir,'/summary_edgar_Com_sel_ms.csv'))
return(df_m_final_grouped)
}
|
##
# The five essential tasks to complete the Course Project are as follows.
#
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of
# each variable for each activity and each subject.
#
#--------------------------------------------------------------------------------------------
##1. Merges the training and the test sets to create one data set
X_train <- read.table("UCI HAR Dataset/train/X_train.txt")
X_test <- read.table("UCI HAR Dataset/test/X_test.txt")
# merge the two raw data tables together, row-wise.
X <- rbind(X_train, X_test)
y_train <- read.table("UCI HAR Dataset/train/y_train.txt")
y_test <- read.table("UCI HAR Dataset/test/y_test.txt")
# merge the two label sets; the labels correspond to activities; these are coded, as integers
Y <- rbind(y_train, y_test)
s_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
s_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
# merge the two subject codes lists
S <- rbind(s_train, s_test)
##2.Extracts only the measurements on the mean and standard deviation for each measurement.
# Read in the feature names from the features.txt file.
features <- read.table("UCI HAR Dataset/features.txt")
# identify all the features that are either standard deviations or means of measurements.
meanstddevfeatures <- grep("(-std\\(\\)|-mean\\(\\))",features$V2)
X2 <- X[, meanstddevfeatures]
names(X2) <- features[meanstddevfeatures, 2]
names(X2) <- gsub("\\(|\\)", "", names(X2))
names(X2) <- tolower(names(X2))
## 3. Uses descriptive activity names to name the activities in the data set.
activities <- read.table("UCI HAR Dataset/activity_labels.txt")
activities[, 2] = gsub("_", "", tolower(as.character(activities[, 2])))
Y[,1] = activities[Y[,1], 2]
names(Y) <- "activity"
## 4. Appropriately labels the data set with descriptive activity names.
names(S) <- "subject"
final <- cbind(S, Y, X2)
write.table(final, "final_data.txt", row.names = FALSE)
## 5. From the data set in step 4, creates a second, independent tidy data set with the average of
# each variable for each activity and each subject.
temp <- final[, 3:dim(final)[2]]
average_final <- aggregate(temp,list(final$subject, final$activity), mean)
names(average_final)[1] <- "Subject"
names(average_final)[2] <- "Activity"
write.table(average_final, "average_final_data.txt",row.names = FALSE)
|
/run_analysis.R
|
no_license
|
keithtan87/Coursera_Getting_And_Cleaning_Data
|
R
| false
| false
| 2,702
|
r
|
##
# The five essential tasks to complete the Course Project are as follows.
#
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of
# each variable for each activity and each subject.
#
#--------------------------------------------------------------------------------------------
##1. Merges the training and the test sets to create one data set
X_train <- read.table("UCI HAR Dataset/train/X_train.txt")
X_test <- read.table("UCI HAR Dataset/test/X_test.txt")
# merge the two raw data tables together, row-wise.
X <- rbind(X_train, X_test)
y_train <- read.table("UCI HAR Dataset/train/y_train.txt")
y_test <- read.table("UCI HAR Dataset/test/y_test.txt")
# merge the two label sets; the labels correspond to activities; these are coded, as integers
Y <- rbind(y_train, y_test)
s_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
s_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
# merge the two subject codes lists
S <- rbind(s_train, s_test)
##2.Extracts only the measurements on the mean and standard deviation for each measurement.
# Read in the feature names from the features.txt file.
features <- read.table("UCI HAR Dataset/features.txt")
# identify all the features that are either standard deviations or means of measurements.
meanstddevfeatures <- grep("(-std\\(\\)|-mean\\(\\))",features$V2)
X2 <- X[, meanstddevfeatures]
names(X2) <- features[meanstddevfeatures, 2]
names(X2) <- gsub("\\(|\\)", "", names(X2))
names(X2) <- tolower(names(X2))
## 3. Uses descriptive activity names to name the activities in the data set.
activities <- read.table("UCI HAR Dataset/activity_labels.txt")
activities[, 2] = gsub("_", "", tolower(as.character(activities[, 2])))
Y[,1] = activities[Y[,1], 2]
names(Y) <- "activity"
## 4. Appropriately labels the data set with descriptive activity names.
names(S) <- "subject"
final <- cbind(S, Y, X2)
write.table(final, "final_data.txt", row.names = FALSE)
## 5. From the data set in step 4, creates a second, independent tidy data set with the average of
# each variable for each activity and each subject.
temp <- final[, 3:dim(final)[2]]
average_final <- aggregate(temp,list(final$subject, final$activity), mean)
names(average_final)[1] <- "Subject"
names(average_final)[2] <- "Activity"
write.table(average_final, "average_final_data.txt",row.names = FALSE)
|
#----------------------------------------------------------------------------------------------#
# Revised from /proj/yunligrp/users/lagler/SIP/R/ATACseqPlot.R
# Figure 3a-b: SIPs and ATAC-seq Peak Regions
# requires SIP_*.txt, fraser.rda, and ATAC-seq peak files
# /proj/yunligrp/users/lagler/SIP/figures/ATACseqPlot.png is the table for Figure 3a.
#----------------------------------------------------------------------------------------------#
library(data.table)
library(ggplot2)
library(ggpubr)
library(dplyr)
library(kableExtra)
setwd("/proj/yunligrp/users/jwen/SIP/revise/2_4")
load("/proj/yunligrp/users/jwen/SIP/Fig.3a_combine_mac_mon/fraser.rda")
fraser <- data.table(fraser)
fraser <- fraser[baitChr %in% 1:22]
colnames(fraser)[31] <- "MacMon"
atac_ery <- fread("/proj/yunligrp/users/jwen/ATAC-seq_collection/ATAC-seq/2019-08/Ery_peaks.narrowPeak.gz", select=c(1:3))
atac_mon <- fread("/proj/yunligrp/users/jwen/ATAC-seq_collection/ATAC-seq/2019-08/Mono_peaks.narrowPeak.gz", select=c(1:3))
atac_ncd4 <- fread("/proj/yunligrp/users/jwen/ATAC-seq_collection/ATAC-seq/2019-08/CD4_peaks.narrowPeak.gz", select=c(1:3))
atac_mk <- fread("/proj/yunligrp/users/jwen/ATAC-seq_collection/ATAC-seq/2019-08/Mega_peaks.narrowPeak.gz", select=c(1:3))
getData <- function (cell, seqdat){
# select significant interactions
hic <- fraser %>% filter(!!as.name(cell) >= 5) %>% data.table
hic <- hic[,1:11] # drop expression data
# sip data
sip <- fread(paste0("/proj/yunligrp/users/lagler/SIP/data/SIP_", cell, ".txt"))
# merge with hic data to get other ends and add SIP indicator
sipdata <- merge(hic, sip, by="baitID", allow.cartesian = T)
# add chr to match ATAC-seq data
setDT(sipdata)[, chr2 := paste0("chr", baitChr)]
# format ATAC-seq data
colnames(seqdat) <- c("chr", "start", "end")
seqdat$size <- seqdat$end - seqdat$start+1
seqdat$row <- seq(1:nrow(seqdat))
setkey(seqdat, chr, start, end)
overlap <- foverlaps(sipdata, seqdat, by.x=c("chr2", "oeStart", "oeEnd"),
type="any", mult="first", which=T)
# add to sip_chr data
sipdata$atacRow <- overlap # row number of ATAC-seq overlap for size merging
sipdata$atac <- as.numeric(is.na(overlap)==F) # recode to 0/1 (false/true) overlap
sipdata <- merge(sipdata, seqdat[, c("row", "size")],
by.x="atacRow", by.y="row", all.x=T, all.y=F)
sipdata$chr2 <- NULL; sipdata$atacRow <- NULL
return(as.data.table(sipdata))
}
#----------------------------------------------------------------------------------#
# 2x2 contigency tables of overlaps - SIP vs non-SIP - bait level
# calls overlap function
#----------------------------------------------------------------------------------#
getCountsbait <- function(cell, seqdat){
CT2 <- getData(cell, seqdat)
ovlps <- CT2[, .("nInteractions"=.N,
"nOverlaps"=sum(atac)), by=baitID]
ovlps <- unique(merge(ovlps, CT2[, c("baitID", "SIP")], by="baitID"))
# add cell type indicator
setDT(ovlps)[, CT := cell]
# contingency table
# yy <- ovlps[SIP==1 & nOverlaps > 0]$nOverlaps/ovlps[SIP==1 & nOverlaps > 0]$nInteractions
yy <- ovlps[SIP==1]$nOverlaps/ovlps[SIP==1]$nInteractions
mean_sip_inter <- sum(ovlps[SIP==1]$nInteractions)/nrow(ovlps[SIP==1])
#ny <- ovlps[SIP==0 & nOverlaps > 0]$nOverlaps/ovlps[SIP==0 & nOverlaps > 0]$nInteractions
ny <- ovlps[SIP==0]$nOverlaps/ovlps[SIP==0]$nInteractions
mean_nonsip_inter <- sum(ovlps[SIP==1]$nInteractions)/nrow(ovlps[SIP==0])
#
# median/mean # overlaps per group
t1 <- summary(yy)
t2 <- summary(ny)
counts <- cbind(c(paste0(cell,"_SIP PIR ratio"),paste0(cell,"_non_SIP PIR ratio")),rbind(c(t1,mean_sip_inter),c(t2,mean_nonsip_inter)))
counts <- data.table(counts)
# wilcoxon test for # overlaps
w <- wilcox.test(yy, ny,alternative="less")$p.value
setDT(counts)[, wilcox.pval.onesided := w]
# t test for # overlaps
t <- t.test(yy, ny,alternative="less")$p.value
setDT(counts)[, t.pval.onesided := t]
return(list("data"=list(yy,ny),
"table"=counts))
}
ratios <- data.table(rbind(getCountsbait("Ery", atac_ery)$table,
getCountsbait("MacMon", atac_mon)$table,
getCountsbait("MK", atac_mk)$table,
getCountsbait("nCD4", atac_ncd4)$table))
colnames(ratios)[8] <- "Mean of the SIP/non-SIP PIR number"
ovlpData <- list(getCountsbait("Ery", atac_ery)$data,
getCountsbait("MacMon", atac_mon)$data,
getCountsbait("MK", atac_mk)$data,
getCountsbait("nCD4", atac_ncd4)$data)
dat1 <- rbind(rbind(cbind(ovlpData[[1]][[1]],"Ery","SIP"),cbind(ovlpData[[1]][[2]],"Ery","non-SIP")),
rbind(cbind(ovlpData[[2]][[1]],"MacMon","SIP"),cbind(ovlpData[[2]][[2]],"MacMon","non-SIP")),
rbind(cbind(ovlpData[[3]][[1]],"MK","SIP"),cbind(ovlpData[[3]][[2]],"MK","non-SIP")),
rbind(cbind(ovlpData[[4]][[1]],"nCD4","SIP"),cbind(ovlpData[[4]][[2]],"nCD4","non-SIP")))
dat1 <- data.frame(dat1)
dat1$X1 <- as.numeric(as.character(dat1$X1))
mean <- data.frame(cbind(ratios$Mean,c("Ery","Ery","MacMon","MacMon","MK","MK","nCD4","nCD4"),c("SIP PIRs","no-SIP PIRs", "SIP PIRs","no-SIP PIRs","SIP PIRs","no-SIP PIRs","SIP PIRs","no-SIP PIRs")))
median <- data.frame(cbind(ratios$Median,c("Ery","Ery","MacMon","MacMon","MK","MK","nCD4","nCD4"),c("SIP PIRs","no-SIP PIRs", "SIP PIRs","no-SIP PIRs","SIP PIRs","no-SIP PIRs","SIP PIRs","no-SIP PIRs")))
mean$X1 <- as.numeric(as.character(mean$X1))
median$X1 <- as.numeric(as.character(median$X1))
dat1 <- data.table(dat1)
plotd <- ggplot(dat1, aes(x=X3, y = X1, fill=X3)) +
geom_violin() + scale_fill_brewer(palette="Paired") + ylab("% of PIRs overlapping ATAC-seq peaks") +
xlab("") +
geom_point(data=dat1[, .("median"=median(X1)), by=X3], aes(y=median), show.legend = F,size = 3)+
geom_point(data=dat1[, .("mean"=mean(X1)), by=X3], aes(y=mean), show.legend = F,size = 3,shape = 17)+ facet_wrap(.~X2) +
theme_bw() +
theme(axis.text=element_text(size=20),axis.title=element_text(size=20)) +
theme(legend.title = element_blank(),legend.text = element_text(size=20))+
theme(strip.text.x = element_text(size = 15))
ggsave("ATAC.png", height = 9, width=12, units="in")
ggsave("ATAC.pdf", height =9, width=12, units="in")
write.table(ratios,"pir_atac_seq_ratio_comp_includingNonOlap",quote =F,sep="\t",col.names = T,row.names = F)
|
/revision/2_4.R
|
no_license
|
Jia21/SIP
|
R
| false
| false
| 6,536
|
r
|
#----------------------------------------------------------------------------------------------#
# Revised from /proj/yunligrp/users/lagler/SIP/R/ATACseqPlot.R
# Figure 3a-b: SIPs and ATAC-seq Peak Regions
# requires SIP_*.txt, fraser.rda, and ATAC-seq peak files
# /proj/yunligrp/users/lagler/SIP/figures/ATACseqPlot.png is the table for Figure 3a.
#----------------------------------------------------------------------------------------------#
library(data.table)
library(ggplot2)
library(ggpubr)
library(dplyr)
library(kableExtra)
setwd("/proj/yunligrp/users/jwen/SIP/revise/2_4")
load("/proj/yunligrp/users/jwen/SIP/Fig.3a_combine_mac_mon/fraser.rda")
fraser <- data.table(fraser)
fraser <- fraser[baitChr %in% 1:22]
colnames(fraser)[31] <- "MacMon"
atac_ery <- fread("/proj/yunligrp/users/jwen/ATAC-seq_collection/ATAC-seq/2019-08/Ery_peaks.narrowPeak.gz", select=c(1:3))
atac_mon <- fread("/proj/yunligrp/users/jwen/ATAC-seq_collection/ATAC-seq/2019-08/Mono_peaks.narrowPeak.gz", select=c(1:3))
atac_ncd4 <- fread("/proj/yunligrp/users/jwen/ATAC-seq_collection/ATAC-seq/2019-08/CD4_peaks.narrowPeak.gz", select=c(1:3))
atac_mk <- fread("/proj/yunligrp/users/jwen/ATAC-seq_collection/ATAC-seq/2019-08/Mega_peaks.narrowPeak.gz", select=c(1:3))
getData <- function (cell, seqdat){
# select significant interactions
hic <- fraser %>% filter(!!as.name(cell) >= 5) %>% data.table
hic <- hic[,1:11] # drop expression data
# sip data
sip <- fread(paste0("/proj/yunligrp/users/lagler/SIP/data/SIP_", cell, ".txt"))
# merge with hic data to get other ends and add SIP indicator
sipdata <- merge(hic, sip, by="baitID", allow.cartesian = T)
# add chr to match ATAC-seq data
setDT(sipdata)[, chr2 := paste0("chr", baitChr)]
# format ATAC-seq data
colnames(seqdat) <- c("chr", "start", "end")
seqdat$size <- seqdat$end - seqdat$start+1
seqdat$row <- seq(1:nrow(seqdat))
setkey(seqdat, chr, start, end)
overlap <- foverlaps(sipdata, seqdat, by.x=c("chr2", "oeStart", "oeEnd"),
type="any", mult="first", which=T)
# add to sip_chr data
sipdata$atacRow <- overlap # row number of ATAC-seq overlap for size merging
sipdata$atac <- as.numeric(is.na(overlap)==F) # recode to 0/1 (false/true) overlap
sipdata <- merge(sipdata, seqdat[, c("row", "size")],
by.x="atacRow", by.y="row", all.x=T, all.y=F)
sipdata$chr2 <- NULL; sipdata$atacRow <- NULL
return(as.data.table(sipdata))
}
#----------------------------------------------------------------------------------#
# 2x2 contigency tables of overlaps - SIP vs non-SIP - bait level
# calls overlap function
#----------------------------------------------------------------------------------#
getCountsbait <- function(cell, seqdat){
CT2 <- getData(cell, seqdat)
ovlps <- CT2[, .("nInteractions"=.N,
"nOverlaps"=sum(atac)), by=baitID]
ovlps <- unique(merge(ovlps, CT2[, c("baitID", "SIP")], by="baitID"))
# add cell type indicator
setDT(ovlps)[, CT := cell]
# contingency table
# yy <- ovlps[SIP==1 & nOverlaps > 0]$nOverlaps/ovlps[SIP==1 & nOverlaps > 0]$nInteractions
yy <- ovlps[SIP==1]$nOverlaps/ovlps[SIP==1]$nInteractions
mean_sip_inter <- sum(ovlps[SIP==1]$nInteractions)/nrow(ovlps[SIP==1])
#ny <- ovlps[SIP==0 & nOverlaps > 0]$nOverlaps/ovlps[SIP==0 & nOverlaps > 0]$nInteractions
ny <- ovlps[SIP==0]$nOverlaps/ovlps[SIP==0]$nInteractions
mean_nonsip_inter <- sum(ovlps[SIP==1]$nInteractions)/nrow(ovlps[SIP==0])
#
# median/mean # overlaps per group
t1 <- summary(yy)
t2 <- summary(ny)
counts <- cbind(c(paste0(cell,"_SIP PIR ratio"),paste0(cell,"_non_SIP PIR ratio")),rbind(c(t1,mean_sip_inter),c(t2,mean_nonsip_inter)))
counts <- data.table(counts)
# wilcoxon test for # overlaps
w <- wilcox.test(yy, ny,alternative="less")$p.value
setDT(counts)[, wilcox.pval.onesided := w]
# t test for # overlaps
t <- t.test(yy, ny,alternative="less")$p.value
setDT(counts)[, t.pval.onesided := t]
return(list("data"=list(yy,ny),
"table"=counts))
}
ratios <- data.table(rbind(getCountsbait("Ery", atac_ery)$table,
getCountsbait("MacMon", atac_mon)$table,
getCountsbait("MK", atac_mk)$table,
getCountsbait("nCD4", atac_ncd4)$table))
colnames(ratios)[8] <- "Mean of the SIP/non-SIP PIR number"
ovlpData <- list(getCountsbait("Ery", atac_ery)$data,
getCountsbait("MacMon", atac_mon)$data,
getCountsbait("MK", atac_mk)$data,
getCountsbait("nCD4", atac_ncd4)$data)
dat1 <- rbind(rbind(cbind(ovlpData[[1]][[1]],"Ery","SIP"),cbind(ovlpData[[1]][[2]],"Ery","non-SIP")),
rbind(cbind(ovlpData[[2]][[1]],"MacMon","SIP"),cbind(ovlpData[[2]][[2]],"MacMon","non-SIP")),
rbind(cbind(ovlpData[[3]][[1]],"MK","SIP"),cbind(ovlpData[[3]][[2]],"MK","non-SIP")),
rbind(cbind(ovlpData[[4]][[1]],"nCD4","SIP"),cbind(ovlpData[[4]][[2]],"nCD4","non-SIP")))
dat1 <- data.frame(dat1)
dat1$X1 <- as.numeric(as.character(dat1$X1))
mean <- data.frame(cbind(ratios$Mean,c("Ery","Ery","MacMon","MacMon","MK","MK","nCD4","nCD4"),c("SIP PIRs","no-SIP PIRs", "SIP PIRs","no-SIP PIRs","SIP PIRs","no-SIP PIRs","SIP PIRs","no-SIP PIRs")))
median <- data.frame(cbind(ratios$Median,c("Ery","Ery","MacMon","MacMon","MK","MK","nCD4","nCD4"),c("SIP PIRs","no-SIP PIRs", "SIP PIRs","no-SIP PIRs","SIP PIRs","no-SIP PIRs","SIP PIRs","no-SIP PIRs")))
mean$X1 <- as.numeric(as.character(mean$X1))
median$X1 <- as.numeric(as.character(median$X1))
dat1 <- data.table(dat1)
plotd <- ggplot(dat1, aes(x=X3, y = X1, fill=X3)) +
geom_violin() + scale_fill_brewer(palette="Paired") + ylab("% of PIRs overlapping ATAC-seq peaks") +
xlab("") +
geom_point(data=dat1[, .("median"=median(X1)), by=X3], aes(y=median), show.legend = F,size = 3)+
geom_point(data=dat1[, .("mean"=mean(X1)), by=X3], aes(y=mean), show.legend = F,size = 3,shape = 17)+ facet_wrap(.~X2) +
theme_bw() +
theme(axis.text=element_text(size=20),axis.title=element_text(size=20)) +
theme(legend.title = element_blank(),legend.text = element_text(size=20))+
theme(strip.text.x = element_text(size = 15))
ggsave("ATAC.png", height = 9, width=12, units="in")
ggsave("ATAC.pdf", height =9, width=12, units="in")
write.table(ratios,"pir_atac_seq_ratio_comp_includingNonOlap",quote =F,sep="\t",col.names = T,row.names = F)
|
###
#
# Script to parametrise the RangeShiftR transfer sub-modue CorrRW (correlated random walk)
#
###
# 1.) define function to calculate distribution of settlement and death events per each step
pSteps <- function(pStepMort, pSettle, MaxSteps = 10){
step <- seq(MaxSteps)
sett <- rep(0, MaxSteps)
mort <- rep(0, MaxSteps)
pNotSettle <- (1-pStepMort) * (1-pSettle)
sett[step] <- pNotSettle^(step-1) * (1-pStepMort) * pSettle
mort[step] <- pNotSettle^(step-1) * pStepMort
normconst <- sum(sett+mort) # normalisation constant
#sett <- sett / normconst # normalise
return(list(dist = cbind(step, # within/after n-th step:
sett, # proportion of individuals settled
mort), # proportion of individuals dead from step mortality
mortMaxStep = 1-normconst))
}
# 2.) use function on an example correlated random walk (CorrRW) and plot results
# get step-wise stats
stepstats <- pSteps(pStepMort = 0.01, pSettle = 0.8, MaxSteps = 6)
# plot step-wise stats
plot( stepstats$dist[,'step'], stepstats$dist[,'sett'], type = 'b', col = "blue", ylim = c(0,0.5), xlab = "step", ylab = "per-step proportion" )
lines(stepstats$dist[,'step'], stepstats$dist[,'mort'], type = 'b', col = "red")
legend("topright", legend = c("settled","died"), col = c("blue","red"), lty = 1)
# cumulative proportions:
cum_stepstats <- cbind(step = stepstats$dist[,'step'],
cumsum(as.data.frame(stepstats$dist[,2:3])))
# plot cumulatives
plot( cum_stepstats[,'step'], cum_stepstats[,'sett'], type = 'b', col = "blue", ylim = c(0,1), xlab = "step", ylab = "cumulative proportion" )
lines(cum_stepstats[,'step'], cum_stepstats[,'mort'], type = 'b', col = "red")
legend("topleft", legend = c("settled","died"), col = c("blue","red"), lty = 1)
# 3.) get an estimate of expected dispersal distance (assuming constant settlement probability and step mortality)
# CorrRW parameters:
StepLength <- 4000 # given in meters
Rho <- 0.5
# calculate expected number of steps
(E_nsteps <- as.numeric(stepstats$dist[,'step']%*%stepstats$dist[,'sett']))
# estimate expected dispersal distance
(E_dispdist <- sqrt(E_nsteps*(1+Rho*(E_nsteps-1))*StepLength^2))
|
/scripts/4_wildboar_CorrRW_distances.R
|
no_license
|
FelixNoessler/Individuals-based-modelling-with-RangeShiftR
|
R
| false
| false
| 2,259
|
r
|
###
#
# Script to parametrise the RangeShiftR transfer sub-modue CorrRW (correlated random walk)
#
###
# 1.) define function to calculate distribution of settlement and death events per each step
pSteps <- function(pStepMort, pSettle, MaxSteps = 10){
step <- seq(MaxSteps)
sett <- rep(0, MaxSteps)
mort <- rep(0, MaxSteps)
pNotSettle <- (1-pStepMort) * (1-pSettle)
sett[step] <- pNotSettle^(step-1) * (1-pStepMort) * pSettle
mort[step] <- pNotSettle^(step-1) * pStepMort
normconst <- sum(sett+mort) # normalisation constant
#sett <- sett / normconst # normalise
return(list(dist = cbind(step, # within/after n-th step:
sett, # proportion of individuals settled
mort), # proportion of individuals dead from step mortality
mortMaxStep = 1-normconst))
}
# 2.) use function on an example correlated random walk (CorrRW) and plot results
# get step-wise stats
stepstats <- pSteps(pStepMort = 0.01, pSettle = 0.8, MaxSteps = 6)
# plot step-wise stats
plot( stepstats$dist[,'step'], stepstats$dist[,'sett'], type = 'b', col = "blue", ylim = c(0,0.5), xlab = "step", ylab = "per-step proportion" )
lines(stepstats$dist[,'step'], stepstats$dist[,'mort'], type = 'b', col = "red")
legend("topright", legend = c("settled","died"), col = c("blue","red"), lty = 1)
# cumulative proportions:
cum_stepstats <- cbind(step = stepstats$dist[,'step'],
cumsum(as.data.frame(stepstats$dist[,2:3])))
# plot cumulatives
plot( cum_stepstats[,'step'], cum_stepstats[,'sett'], type = 'b', col = "blue", ylim = c(0,1), xlab = "step", ylab = "cumulative proportion" )
lines(cum_stepstats[,'step'], cum_stepstats[,'mort'], type = 'b', col = "red")
legend("topleft", legend = c("settled","died"), col = c("blue","red"), lty = 1)
# 3.) get an estimate of expected dispersal distance (assuming constant settlement probability and step mortality)
# CorrRW parameters:
StepLength <- 4000 # given in meters
Rho <- 0.5
# calculate expected number of steps
(E_nsteps <- as.numeric(stepstats$dist[,'step']%*%stepstats$dist[,'sett']))
# estimate expected dispersal distance
(E_dispdist <- sqrt(E_nsteps*(1+Rho*(E_nsteps-1))*StepLength^2))
|
# Individual household electric power consumption Data Set
# https://archive.ics.uci.edu/ml/datasets/Individual+household+electric+power+consumption
# Download code:
if(!file.exists("./data")){
print("Directory not found. Creating Data Directory")
dir.create("./data")}
if(!file.exists("./data/HPC.txt")){
print("Data file not found. Downloading File...")
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile = "./data/HPC.zip")
unzip("./data/HPC.zip",exdir = "./data")
file.rename("./data/household_power_consumption.txt","./data/HPC.txt")
file.remove("./data/HPC.zip") }
if(!exists("HPC")){
print("Reading Household Power Consumption file")
n1 <-read.table("./data/HPC.txt",sep=";",header = TRUE,nrows=1,stringsAsFactors=FALSE)
HPC <-read.table("./data/HPC.txt",sep = ";",stringsAsFactors=FALSE, na.strings = "?",skip=grep("1/2/2007", readLines("./data/HPC.txt"))[[1]],nrows=2880)
names(HPC)<-names(n1)
rm(n1) }
HPC[,1] <- as.Date(HPC[,1],"%d/%m/%Y")
# Histogram
png(file="plot1.png",width = 480, height = 480)
hist(HPC$Global_active_power,col="red", main = "Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
|
/Plot1.R
|
no_license
|
Porphyrytic/ExData_Plotting1
|
R
| false
| false
| 1,330
|
r
|
# Individual household electric power consumption Data Set
# https://archive.ics.uci.edu/ml/datasets/Individual+household+electric+power+consumption
# Download code:
if(!file.exists("./data")){
print("Directory not found. Creating Data Directory")
dir.create("./data")}
if(!file.exists("./data/HPC.txt")){
print("Data file not found. Downloading File...")
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile = "./data/HPC.zip")
unzip("./data/HPC.zip",exdir = "./data")
file.rename("./data/household_power_consumption.txt","./data/HPC.txt")
file.remove("./data/HPC.zip") }
if(!exists("HPC")){
print("Reading Household Power Consumption file")
n1 <-read.table("./data/HPC.txt",sep=";",header = TRUE,nrows=1,stringsAsFactors=FALSE)
HPC <-read.table("./data/HPC.txt",sep = ";",stringsAsFactors=FALSE, na.strings = "?",skip=grep("1/2/2007", readLines("./data/HPC.txt"))[[1]],nrows=2880)
names(HPC)<-names(n1)
rm(n1) }
HPC[,1] <- as.Date(HPC[,1],"%d/%m/%Y")
# Histogram
png(file="plot1.png",width = 480, height = 480)
hist(HPC$Global_active_power,col="red", main = "Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
|
#Optimizacion Bayesiana de hiperparametros de ArbolesAzarosos
#funciona automaticamente con EXPERIMENTOS
#va generando incrementalmente salidas para kaggle
#limpio la memoria
rm( list=ls() ) #remove all objects
gc() #garbage collection
require("data.table")
require("rlist")
require("yaml")
require("rpart")
require("parallel")
#paquetes necesarios para la Bayesian Optimization
require("DiceKriging")
require("mlrMBO")
#defino la carpeta donde trabajo
setwd( "~/buckets/b1/crudo/" )
kexperimento <- NA #NA si se corre la primera vez, un valor concreto si es para continuar procesando
kscript <- "460_arboles_azarosos_BO"
karch_generacion <- "./datasetsOri/paquete_premium_202011.csv"
karch_aplicacion <- "./datasetsOri/paquete_premium_202101.csv"
kBO_iter <- 150 #cantidad de iteraciones de la Optimizacion Bayesiana
hs <- makeParamSet(
makeNumericParam("cp" , lower= -1 , upper= 0.1),
makeIntegerParam("minsplit" , lower= 1L , upper= 8000L), #la letra L al final significa ENTERO
makeIntegerParam("minbucket", lower= 1L , upper= 2000L),
makeIntegerParam("maxdepth" , lower= 3L , upper= 20L),
makeNumericParam("feature_fraction" , lower= 0.1 , upper= 0.9),
makeIntegerParam("num_trees" , lower= 2L , upper= 100L), #no me conforma que llegue solo a 100 ...
forbidden = quote( minbucket > 0.5*minsplit ) )
ksemilla_azar <- 102191 #Aqui poner la propia semilla
#------------------------------------------------------------------------------
#Funcion que lleva el registro de los experimentos
get_experimento <- function()
{
if( !file.exists( "./maestro.yaml" ) ) cat( file="./maestro.yaml", "experimento: 1000" )
exp <- read_yaml( "./maestro.yaml" )
experimento_actual <- exp$experimento
exp$experimento <- as.integer(exp$experimento + 1)
Sys.chmod( "./maestro.yaml", mode = "0644", use_umask = TRUE)
write_yaml( exp, "./maestro.yaml" )
Sys.chmod( "./maestro.yaml", mode = "0444", use_umask = TRUE) #dejo el archivo readonly
return( experimento_actual )
}
#------------------------------------------------------------------------------
#graba a un archivo los componentes de lista
#para el primer registro, escribe antes los titulos
loguear <- function( reg, arch=NA, folder="./work/", ext=".txt", verbose=TRUE )
{
archivo <- arch
if( is.na(arch) ) archivo <- paste0( folder, substitute( reg), ext )
if( !file.exists( archivo ) ) #Escribo los titulos
{
linea <- paste0( "fecha\t",
paste( list.names(reg), collapse="\t" ), "\n" )
cat( linea, file=archivo )
}
linea <- paste0( format(Sys.time(), "%Y%m%d %H%M%S"), "\t", #la fecha y hora
gsub( ", ", "\t", toString( reg ) ), "\n" )
cat( linea, file=archivo, append=TRUE ) #grabo al archivo
if( verbose ) cat( linea ) #imprimo por pantalla
}
#------------------------------------------------------------------------------
#funcion para particionar, es la que Andres reemplaza con caret
particionar <- function( data, division, agrupa="", campo="fold", start=1, seed=NA )
{
if( !is.na( seed) ) set.seed( seed )
bloque <- unlist( mapply( function(x,y) { rep( y, x ) }, division, seq( from=start, length.out=length(division) ) ) )
data[ , (campo) := sample( rep( bloque, ceiling(.N/length(bloque))) )[1:.N],
by= agrupa ]
}
#------------------------------------------------------------------------------
ArbolesAzarosos_Simple <- function( fold_test, data, param, pcampos_buenos )
{
#genero el modelo
set.seed(ksemilla_azar)
qty_campos_a_utilizar <- as.integer( length(pcampos_buenos) * param$feature_fraction )
#inicializo en CERO el vector de las probabilidades en testing
#Aqui es donde voy acumulando, sumando, las probabilidades
probabilidad_ensemble <- rep( 0, nrow(data[ fold==fold_test, ]) )
for( i in 1:param$num_trees )
{
campos_random <- sample( pcampos_buenos, qty_campos_a_utilizar )
campos_random <- paste( campos_random, collapse=" + ")
formulita <- paste0( "clase_ternaria ~ ", campos_random )
modelo <- rpart(formulita,
data= data[ fold != fold_test, ],
xval= 0,
control= param )
#aplico el modelo a los datos de testing
prediccion <- predict( modelo, data[ fold==fold_test, ], type = "prob")
#Acumulo
probabilidad_ensemble <- probabilidad_ensemble + prediccion[, "BAJA+2"]
}
#fue sumando las probabilidades, ahora hago el cociente por la cantidad de arboles
#o sea, calculo el promedio
probabilidad_ensemble <- probabilidad_ensemble / param$num_trees
ganancia_testing <- sum( data[ fold==fold_test ][ prediccion[, "BAJA+2"] >0.025, ifelse( clase_ternaria=="BAJA+2", 48750, -1250 ) ] )
return( ganancia_testing )
}
#------------------------------------------------------------------------------
ArbolesAzarosos_CrossValidation <- function( data, param, pcampos_buenos, qfolds, pagrupa, semilla )
{
divi <- rep( 1, qfolds )
particionar( data, divi, seed=semilla, agrupa=pagrupa )
ganancias <- mcmapply( ArbolesAzarosos_Simple,
seq(qfolds), # 1 2 3 4 5
MoreArgs= list( data, param, pcampos_buenos),
SIMPLIFY= FALSE,
mc.cores= 5 ) #se puede subir a 5 si posee Linux o Mac OS
#Se se usa Windows, obligatoriamente debe ser 1
data[ , fold := NULL ]
#devuelvo la primer ganancia y el promedio
return( mean( unlist( ganancias )) * qfolds ) #aqui normalizo la ganancia
}
#------------------------------------------------------------------------------
#esta funcion solo puede recibir los parametros que se estan optimizando
#el resto de los parametros se pasan como variables globales
EstimarGanancia_ArbolesAzarosos <- function( x )
{
campos_buenos <- setdiff( colnames(dataset) , c("clase_ternaria") )
GLOBAL_iteracion <<- GLOBAL_iteracion + 1
xval_folds <- 5
ganancia <- ArbolesAzarosos_CrossValidation( dataset,
param= x,
pcampos_buenos= campos_buenos,
qfolds= xval_folds,
pagrupa= "clase_ternaria",
semilla= ksemilla_azar )
#si tengo una ganancia superadora, genero el archivo para Kaggle
if( ganancia > GLOBAL_ganancia_max )
{
GLOBAL_ganancia_max <<- ganancia #asigno la nueva maxima ganancia
set.seed(ksemilla_azar)
qty_campos_a_utilizar <- as.integer( length(campos_buenos) * x$feature_fraction )
#inicializo vector donde acumulo probabilidades
probabilidad_ensemble <- rep( 0, nrow(dapply) )
for( i in 1:x$num_trees )
{
campos_random <- sample( campos_buenos, qty_campos_a_utilizar )
campos_random <- paste( campos_random, collapse=" + ")
formulita <- paste0( "clase_ternaria ~ ", campos_random )
modelo <- rpart(formulita,
data= dataset,
xval= 0,
control= x )
#genero el vector con la prediccion, la probabilidad de ser positivo
prediccion <- predict( modelo, dapply)
probabilidad_ensemble <- probabilidad_ensemble + prediccion[, "BAJA+2"]
}
#fue sumando las probabilidades, ahora hago el cociente por la cantidad de arboles
#o sea, calculo el promedio
probabilidad_ensemble <- probabilidad_ensemble / x$num_trees
Predicted <- ifelse( probabilidad_ensemble > 0.025, 1, 0 )
entrega <- as.data.table( list( "numero_de_cliente"=dapply$numero_de_cliente,
"Predicted"= Predicted) )
#genero el archivo para Kaggle
fwrite( entrega,
file= paste0(kkaggle, GLOBAL_iteracion, ".csv" ),
sep= "," )
}
#logueo
xx <- x
xx$xval_folds <- xval_folds
xx$ganancia <- ganancia
loguear( xx, arch= klog )
return( ganancia )
}
#------------------------------------------------------------------------------
#Aqui empieza el programa
if( is.na(kexperimento ) ) kexperimento <- get_experimento() #creo el experimento
#en estos archivos quedan los resultados
kbayesiana <- paste0("./work/E", kexperimento, "_rpart.RDATA" )
klog <- paste0("./work/E", kexperimento, "_rpart_log.txt" )
kkaggle <- paste0("./kaggle/E",kexperimento, "_", kscript, "_" )
GLOBAL_ganancia_max <- -Inf
GLOBAL_iteracion <- 0
if( file.exists(klog) )
{
tabla_log <- fread( klog)
GLOBAL_iteracion <- nrow( tabla_log ) -1
GLOBAL_ganancia_max <- tabla_log[ , max(ganancia) ]
}
#cargo los datasets
dataset <- fread(karch_generacion) #donde entreno
dapply <- fread(karch_aplicacion) #donde aplico el modelo
#Aqui comienza la configuracion de la Bayesian Optimization
configureMlr( show.learner.output = FALSE)
funcion_optimizar <- EstimarGanancia_ArbolesAzarosos
#configuro la busqueda bayesiana, los hiperparametros que se van a optimizar
#por favor, no desesperarse por lo complejo
obj.fun <- makeSingleObjectiveFunction(
fn= funcion_optimizar,
minimize= FALSE, #estoy Maximizando la ganancia
noisy= TRUE,
par.set= hs,
has.simple.signature = FALSE
)
ctrl <- makeMBOControl( save.on.disk.at.time= 600, save.file.path= kbayesiana)
ctrl <- setMBOControlTermination(ctrl, iters= kBO_iter )
ctrl <- setMBOControlInfill(ctrl, crit= makeMBOInfillCritEI())
surr.km <- makeLearner("regr.km", predict.type= "se", covtype= "matern3_2", control= list(trace= TRUE))
#inicio la optimizacion bayesiana
if(!file.exists(kbayesiana)) {
run <- mbo(obj.fun, learner = surr.km, control = ctrl)
} else run <- mboContinue( kbayesiana ) #retomo en caso que ya exista
quit( save="no" )
|
/src/arbolesazarosos/460_arboles_azarosos_BO.r
|
no_license
|
Chapamar/dm2021b
|
R
| false
| false
| 10,202
|
r
|
#Optimizacion Bayesiana de hiperparametros de ArbolesAzarosos
#funciona automaticamente con EXPERIMENTOS
#va generando incrementalmente salidas para kaggle
#limpio la memoria
rm( list=ls() ) #remove all objects
gc() #garbage collection
require("data.table")
require("rlist")
require("yaml")
require("rpart")
require("parallel")
#paquetes necesarios para la Bayesian Optimization
require("DiceKriging")
require("mlrMBO")
#defino la carpeta donde trabajo
setwd( "~/buckets/b1/crudo/" )
kexperimento <- NA #NA si se corre la primera vez, un valor concreto si es para continuar procesando
kscript <- "460_arboles_azarosos_BO"
karch_generacion <- "./datasetsOri/paquete_premium_202011.csv"
karch_aplicacion <- "./datasetsOri/paquete_premium_202101.csv"
kBO_iter <- 150 #cantidad de iteraciones de la Optimizacion Bayesiana
hs <- makeParamSet(
makeNumericParam("cp" , lower= -1 , upper= 0.1),
makeIntegerParam("minsplit" , lower= 1L , upper= 8000L), #la letra L al final significa ENTERO
makeIntegerParam("minbucket", lower= 1L , upper= 2000L),
makeIntegerParam("maxdepth" , lower= 3L , upper= 20L),
makeNumericParam("feature_fraction" , lower= 0.1 , upper= 0.9),
makeIntegerParam("num_trees" , lower= 2L , upper= 100L), #no me conforma que llegue solo a 100 ...
forbidden = quote( minbucket > 0.5*minsplit ) )
ksemilla_azar <- 102191 #Aqui poner la propia semilla
#------------------------------------------------------------------------------
#Funcion que lleva el registro de los experimentos
get_experimento <- function()
{
if( !file.exists( "./maestro.yaml" ) ) cat( file="./maestro.yaml", "experimento: 1000" )
exp <- read_yaml( "./maestro.yaml" )
experimento_actual <- exp$experimento
exp$experimento <- as.integer(exp$experimento + 1)
Sys.chmod( "./maestro.yaml", mode = "0644", use_umask = TRUE)
write_yaml( exp, "./maestro.yaml" )
Sys.chmod( "./maestro.yaml", mode = "0444", use_umask = TRUE) #dejo el archivo readonly
return( experimento_actual )
}
#------------------------------------------------------------------------------
#graba a un archivo los componentes de lista
#para el primer registro, escribe antes los titulos
loguear <- function( reg, arch=NA, folder="./work/", ext=".txt", verbose=TRUE )
{
archivo <- arch
if( is.na(arch) ) archivo <- paste0( folder, substitute( reg), ext )
if( !file.exists( archivo ) ) #Escribo los titulos
{
linea <- paste0( "fecha\t",
paste( list.names(reg), collapse="\t" ), "\n" )
cat( linea, file=archivo )
}
linea <- paste0( format(Sys.time(), "%Y%m%d %H%M%S"), "\t", #la fecha y hora
gsub( ", ", "\t", toString( reg ) ), "\n" )
cat( linea, file=archivo, append=TRUE ) #grabo al archivo
if( verbose ) cat( linea ) #imprimo por pantalla
}
#------------------------------------------------------------------------------
#funcion para particionar, es la que Andres reemplaza con caret
particionar <- function( data, division, agrupa="", campo="fold", start=1, seed=NA )
{
if( !is.na( seed) ) set.seed( seed )
bloque <- unlist( mapply( function(x,y) { rep( y, x ) }, division, seq( from=start, length.out=length(division) ) ) )
data[ , (campo) := sample( rep( bloque, ceiling(.N/length(bloque))) )[1:.N],
by= agrupa ]
}
#------------------------------------------------------------------------------
ArbolesAzarosos_Simple <- function( fold_test, data, param, pcampos_buenos )
{
#genero el modelo
set.seed(ksemilla_azar)
qty_campos_a_utilizar <- as.integer( length(pcampos_buenos) * param$feature_fraction )
#inicializo en CERO el vector de las probabilidades en testing
#Aqui es donde voy acumulando, sumando, las probabilidades
probabilidad_ensemble <- rep( 0, nrow(data[ fold==fold_test, ]) )
for( i in 1:param$num_trees )
{
campos_random <- sample( pcampos_buenos, qty_campos_a_utilizar )
campos_random <- paste( campos_random, collapse=" + ")
formulita <- paste0( "clase_ternaria ~ ", campos_random )
modelo <- rpart(formulita,
data= data[ fold != fold_test, ],
xval= 0,
control= param )
#aplico el modelo a los datos de testing
prediccion <- predict( modelo, data[ fold==fold_test, ], type = "prob")
#Acumulo
probabilidad_ensemble <- probabilidad_ensemble + prediccion[, "BAJA+2"]
}
#fue sumando las probabilidades, ahora hago el cociente por la cantidad de arboles
#o sea, calculo el promedio
probabilidad_ensemble <- probabilidad_ensemble / param$num_trees
ganancia_testing <- sum( data[ fold==fold_test ][ prediccion[, "BAJA+2"] >0.025, ifelse( clase_ternaria=="BAJA+2", 48750, -1250 ) ] )
return( ganancia_testing )
}
#------------------------------------------------------------------------------
ArbolesAzarosos_CrossValidation <- function( data, param, pcampos_buenos, qfolds, pagrupa, semilla )
{
divi <- rep( 1, qfolds )
particionar( data, divi, seed=semilla, agrupa=pagrupa )
ganancias <- mcmapply( ArbolesAzarosos_Simple,
seq(qfolds), # 1 2 3 4 5
MoreArgs= list( data, param, pcampos_buenos),
SIMPLIFY= FALSE,
mc.cores= 5 ) #se puede subir a 5 si posee Linux o Mac OS
#Se se usa Windows, obligatoriamente debe ser 1
data[ , fold := NULL ]
#devuelvo la primer ganancia y el promedio
return( mean( unlist( ganancias )) * qfolds ) #aqui normalizo la ganancia
}
#------------------------------------------------------------------------------
#esta funcion solo puede recibir los parametros que se estan optimizando
#el resto de los parametros se pasan como variables globales
EstimarGanancia_ArbolesAzarosos <- function( x )
{
campos_buenos <- setdiff( colnames(dataset) , c("clase_ternaria") )
GLOBAL_iteracion <<- GLOBAL_iteracion + 1
xval_folds <- 5
ganancia <- ArbolesAzarosos_CrossValidation( dataset,
param= x,
pcampos_buenos= campos_buenos,
qfolds= xval_folds,
pagrupa= "clase_ternaria",
semilla= ksemilla_azar )
#si tengo una ganancia superadora, genero el archivo para Kaggle
if( ganancia > GLOBAL_ganancia_max )
{
GLOBAL_ganancia_max <<- ganancia #asigno la nueva maxima ganancia
set.seed(ksemilla_azar)
qty_campos_a_utilizar <- as.integer( length(campos_buenos) * x$feature_fraction )
#inicializo vector donde acumulo probabilidades
probabilidad_ensemble <- rep( 0, nrow(dapply) )
for( i in 1:x$num_trees )
{
campos_random <- sample( campos_buenos, qty_campos_a_utilizar )
campos_random <- paste( campos_random, collapse=" + ")
formulita <- paste0( "clase_ternaria ~ ", campos_random )
modelo <- rpart(formulita,
data= dataset,
xval= 0,
control= x )
#genero el vector con la prediccion, la probabilidad de ser positivo
prediccion <- predict( modelo, dapply)
probabilidad_ensemble <- probabilidad_ensemble + prediccion[, "BAJA+2"]
}
#fue sumando las probabilidades, ahora hago el cociente por la cantidad de arboles
#o sea, calculo el promedio
probabilidad_ensemble <- probabilidad_ensemble / x$num_trees
Predicted <- ifelse( probabilidad_ensemble > 0.025, 1, 0 )
entrega <- as.data.table( list( "numero_de_cliente"=dapply$numero_de_cliente,
"Predicted"= Predicted) )
#genero el archivo para Kaggle
fwrite( entrega,
file= paste0(kkaggle, GLOBAL_iteracion, ".csv" ),
sep= "," )
}
#logueo
xx <- x
xx$xval_folds <- xval_folds
xx$ganancia <- ganancia
loguear( xx, arch= klog )
return( ganancia )
}
#------------------------------------------------------------------------------
#Aqui empieza el programa
if( is.na(kexperimento ) ) kexperimento <- get_experimento() #creo el experimento
#en estos archivos quedan los resultados
kbayesiana <- paste0("./work/E", kexperimento, "_rpart.RDATA" )
klog <- paste0("./work/E", kexperimento, "_rpart_log.txt" )
kkaggle <- paste0("./kaggle/E",kexperimento, "_", kscript, "_" )
GLOBAL_ganancia_max <- -Inf
GLOBAL_iteracion <- 0
if( file.exists(klog) )
{
tabla_log <- fread( klog)
GLOBAL_iteracion <- nrow( tabla_log ) -1
GLOBAL_ganancia_max <- tabla_log[ , max(ganancia) ]
}
#cargo los datasets
dataset <- fread(karch_generacion) #donde entreno
dapply <- fread(karch_aplicacion) #donde aplico el modelo
#Aqui comienza la configuracion de la Bayesian Optimization
configureMlr( show.learner.output = FALSE)
funcion_optimizar <- EstimarGanancia_ArbolesAzarosos
#configuro la busqueda bayesiana, los hiperparametros que se van a optimizar
#por favor, no desesperarse por lo complejo
obj.fun <- makeSingleObjectiveFunction(
fn= funcion_optimizar,
minimize= FALSE, #estoy Maximizando la ganancia
noisy= TRUE,
par.set= hs,
has.simple.signature = FALSE
)
ctrl <- makeMBOControl( save.on.disk.at.time= 600, save.file.path= kbayesiana)
ctrl <- setMBOControlTermination(ctrl, iters= kBO_iter )
ctrl <- setMBOControlInfill(ctrl, crit= makeMBOInfillCritEI())
surr.km <- makeLearner("regr.km", predict.type= "se", covtype= "matern3_2", control= list(trace= TRUE))
#inicio la optimizacion bayesiana
if(!file.exists(kbayesiana)) {
run <- mbo(obj.fun, learner = surr.km, control = ctrl)
} else run <- mboContinue( kbayesiana ) #retomo en caso que ya exista
quit( save="no" )
|
source("scripts/functions/harmonic_mean.R")
vp_mut_matrix<- readRDS(file="data/vp_mut_matrix.rds")
muts<- readRDS("data/driver_muts13.rds")
driver_muts<- colnames(readRDS("data/mut_matrix.rds"))
source("scripts/functions/do_logreg.R")
# Load genotype data 1kg
pop_HLA<- read.table("downloads/1kg/1000_genomes_hla.tsv",sep="\t",header = T,row.names = 3,stringsAsFactors = F)
idx_amb<- unique(as.numeric(unlist(apply(pop_HLA[,3:8],2,function(x) grep("\\/|\\*|None",x))))) # identify ambiguous samples (e.g. 02/03 format - * labelled - "none"): 116 in total
pop_HLA<- pop_HLA[-idx_amb,]
pops<- pop_HLA$Region
pop_HLA<- pop_HLA[,3:8] # Focus on MHC-I only (MHC-II not complete)
table(pops)
# AFR AMR EAS EUR SAS
# 674 359 511 512 521
####################################
# (1) PROTOTYPES PER POPULATION
####################################
# Get prototypes per population
##################################
proto_1kg<- as.data.frame(matrix(NA,length(unique(pops)),6,dimnames = list(unique(pops),c("A1","A2","B1","B2","C1","C2"))))
proto_1kg_freq<- proto_1kg
for(pop in unique(pops)){
pop_HLA_sel<- pop_HLA[pops==pop,]
freq_A<- sort(prop.table(table(c(pop_HLA_sel$HLA.A.1,pop_HLA_sel$HLA.A.2))),decreasing = T)[1:2]
names(freq_A)<- paste0("HLA-A",names(freq_A))
freq_B<- sort(prop.table(table(c(pop_HLA_sel$HLA.B.1,pop_HLA_sel$HLA.B.2))),decreasing = T)[1:2]
names(freq_B)<- paste0("HLA-B",names(freq_B))
freq_C<- sort(prop.table(table(c(pop_HLA_sel$HLA.C.1,pop_HLA_sel$HLA.C.2))),decreasing = T)[1:2]
names(freq_C)<- paste0("HLA-C",names(freq_C))
proto_1kg[pop,]<- c(names(freq_A),names(freq_B),names(freq_C))
proto_1kg_freq[pop,]<- c(freq_A,freq_B,freq_C)
}
proto_1kg
# A1 A2 B1 B2 C1 C2
# AFR HLA-A02:01 HLA-A23:01 HLA-B53:01 HLA-B35:01 HLA-C04:01 HLA-C16:01
# AMR HLA-A02:01 HLA-A24:02 HLA-B44:03 HLA-B07:02 HLA-C04:01 HLA-C07:02
# EAS HLA-A11:01 HLA-A24:02 HLA-B46:01 HLA-B40:01 HLA-C01:02 HLA-C07:02
# EUR HLA-A02:01 HLA-A03:01 HLA-B07:02 HLA-B44:02 HLA-C04:01 HLA-C07:01
# SAS HLA-A11:01 HLA-A01:01 HLA-B40:06 HLA-B52:01 HLA-C06:02 HLA-C07:02
proto_1kg_freq
# AFR 0.1142433 0.09347181 0.16246291 0.08976261 0.2462908 0.1201780
# AMR 0.2200557 0.14066852 0.07381616 0.05849582 0.1922006 0.1100279
# EAS 0.2338552 0.19373777 0.15264188 0.10958904 0.1937378 0.1712329
# EUR 0.3007812 0.15625000 0.11621094 0.08691406 0.1318359 0.1250000
# SAS 0.1660269 0.16314779 0.09596929 0.09213052 0.1420345 0.1238004
# Calculate matrices & log reg models
######################################
pop_PHBR_ls<- list()
for(pop in rownames(proto_1kg)){
cat(pop," ...","\n")
# Get proto HLA genotype
HLA_proto_mhc1<- as.character(proto_1kg[pop,])
# Calculate PHBR
PHBR_proto_mhc1_mut<- rep(NA,length(driver_muts))
names(PHBR_proto_mhc1_mut)<- driver_muts
for(mut_tmp in driver_muts){
for(i in c(1:3)){
gene_mhc1_filename<- paste0("temp/netMHCPan40/output/",mut_tmp,i,'.xls')
if(!file.exists(gene_mhc1_filename)) next
gene_mhc1<- read.table(gene_mhc1_filename,header = T, stringsAsFactors = F,sep ="\t",quote = "",skip=1)
# Only select peptides with actual mutation present
idx_core<- which(gene_mhc1$Pos%in%0:10) # Cannever start at position 11 (12) or higher
idx_core<- intersect(idx_core,which(nchar(gene_mhc1$Peptide)+gene_mhc1$Pos>10)) # Remove pos 0 (1) for 8/9/10-mers, ...
gene_mhc1<- gene_mhc1[idx_core,]
# Get ranks
gene_mhc1_rank<- gene_mhc1[,grep("Rank",colnames(gene_mhc1))]
colnames(gene_mhc1_rank)<- setdiff(unlist(strsplit(readLines(gene_mhc1_filename,n=1),"\t")),"")
# Calculate PBR
if(i==1) PBR_tmp<- apply(gene_mhc1_rank,MARGIN = 2, "min")
else PBR_tmp<- c(PBR_tmp,apply(gene_mhc1_rank,MARGIN = 2, "min"))
}
if(!file.exists(gene_mhc1_filename)) next
PBR_tmp<- PBR_tmp[HLA_proto_mhc1]
PHBR_proto_mhc1_mut[mut_tmp]<- harmonic_mean(PBR_tmp) # 9.566061 for prototypical (1.91 for wt)
}
# Put proto in matrix
vp_HLA1_mut_matrix<- vp_mut_matrix
vp_HLA1_mut_matrix[,]<- NA
common_muts<- intersect(names(PHBR_proto_mhc1_mut),colnames(vp_HLA1_mut_matrix))
for(i in 1:nrow(vp_HLA1_mut_matrix)){
vp_HLA1_mut_matrix[i,common_muts]<- PHBR_proto_mhc1_mut[common_muts]
}
# Calculate LR models
logreg_wp<- do_logreg(vp_mut_matrix,vp_HLA1_mut_matrix,"wp")
idx_mut13<- which(colnames(vp_mut_matrix)%in%muts)
logreg_wp_excl<- do_logreg(vp_mut_matrix[,-idx_mut13],vp_HLA1_mut_matrix[,-idx_mut13],"wp")
logreg<- rbind(wp=logreg_wp,wp_excl=logreg_wp_excl)
# Data in list
pop_PHBR_ls[[pop]]<- list(vp_HLA1_mut_matrix=vp_HLA1_mut_matrix,logreg=logreg)
}
saveRDS(pop_PHBR_ls,"results/data/1kg_LR.rds")
saveRDS(proto_1kg,"results/data/1kg_proto.rds")
#############################################
# (2) LOWEST AF GENOTYPES PER POPULATION
#############################################
# Get genotypes for lowest AF per population
##############################################
# Get TCGA alleles
TCGA_alleles<- NULL
for(i in 1:3){
gene_mhc1_filename<- paste0("temp/netMHCPan40/output/BRAF_V600E",i,'.xls')
TCGA_alleles<- c(TCGA_alleles,setdiff(unlist(strsplit(readLines(gene_mhc1_filename,n=1),"\t")),""))
}
# Get random alleles that were never called in a population
proto_1kg<- as.data.frame(matrix(NA,length(unique(pops)),6,dimnames = list(unique(pops),c("A1","A2","B1","B2","C1","C2"))))
for(pop in unique(pops)){
pop_HLA_sel<- pop_HLA[pops==pop,]
HLA_A<- sample(setdiff(TCGA_alleles[grep("HLA-A",TCGA_alleles)],paste0("HLA-A",c(pop_HLA_sel$HLA.A.1,pop_HLA_sel$HLA.A.2))),2)
HLA_B<- sample(setdiff(TCGA_alleles[grep("HLA-B",TCGA_alleles)],paste0("HLA-B",c(pop_HLA_sel$HLA.B.1,pop_HLA_sel$HLA.B.2))),2)
HLA_C<- sample(setdiff(TCGA_alleles[grep("HLA-C",TCGA_alleles)],paste0("HLA-C",c(pop_HLA_sel$HLA.C.1,pop_HLA_sel$HLA.C.2))),2)
proto_1kg[pop,]<- c(HLA_A,HLA_B,HLA_C)
}
proto_1kg
# A1 A2 B1 B2 C1 C2
# AFR HLA-A11:02 HLA-A02:07 HLA-B15:20 HLA-B55:04 HLA-C07:05 HLA-C16:02
# AMR HLA-A02:03 HLA-A69:01 HLA-B15:58 HLA-B15:12 HLA-C07:17 HLA-C18:01
# EAS HLA-A11:04 HLA-A33:01 HLA-B15:08 HLA-B39:10 HLA-C18:01 HLA-C08:04
# EUR HLA-A11:02 HLA-A02:10 HLA-B15:21 HLA-B73:01 HLA-C14:03 HLA-C07:05
# SAS HLA-A30:04 HLA-A29:02 HLA-B27:03 HLA-B73:01 HLA-C04:04 HLA-C02:10
# Calculate matrices & log reg models
######################################
pop_PHBR_ls<- list()
for(pop in rownames(proto_1kg)){
cat(pop," ...","\n")
# Get proto HLA genotype
HLA_proto_mhc1<- as.character(proto_1kg[pop,])
# Calculate PHBR
PHBR_proto_mhc1_mut<- rep(NA,length(driver_muts))
names(PHBR_proto_mhc1_mut)<- driver_muts
for(mut_tmp in driver_muts){
for(i in c(1:3)){
gene_mhc1_filename<- paste0("temp/netMHCPan40/output/",mut_tmp,i,'.xls')
if(!file.exists(gene_mhc1_filename)) next
gene_mhc1<- read.table(gene_mhc1_filename,header = T, stringsAsFactors = F,sep ="\t",quote = "",skip=1)
# Only select peptides with actual mutation present
idx_core<- which(gene_mhc1$Pos%in%0:10) # Cannever start at position 11 (12) or higher
idx_core<- intersect(idx_core,which(nchar(gene_mhc1$Peptide)+gene_mhc1$Pos>10)) # Remove pos 0 (1) for 8/9/10-mers, ...
gene_mhc1<- gene_mhc1[idx_core,]
# Get ranks
gene_mhc1_rank<- gene_mhc1[,grep("Rank",colnames(gene_mhc1))]
colnames(gene_mhc1_rank)<- setdiff(unlist(strsplit(readLines(gene_mhc1_filename,n=1),"\t")),"")
# Calculate PBR
if(i==1) PBR_tmp<- apply(gene_mhc1_rank,MARGIN = 2, "min")
else PBR_tmp<- c(PBR_tmp,apply(gene_mhc1_rank,MARGIN = 2, "min"))
}
if(!file.exists(gene_mhc1_filename)) next
PBR_tmp<- PBR_tmp[HLA_proto_mhc1]
PHBR_proto_mhc1_mut[mut_tmp]<- harmonic_mean(PBR_tmp) # 9.566061 for prototypical (1.91 for wt)
}
# Put proto in matrix
vp_HLA1_mut_matrix<- vp_mut_matrix
vp_HLA1_mut_matrix[,]<- NA
common_muts<- intersect(names(PHBR_proto_mhc1_mut),colnames(vp_HLA1_mut_matrix))
for(i in 1:nrow(vp_HLA1_mut_matrix)){
vp_HLA1_mut_matrix[i,common_muts]<- PHBR_proto_mhc1_mut[common_muts]
}
# Calculate LR models
logreg_wp<- do_logreg(vp_mut_matrix,vp_HLA1_mut_matrix,"wp")
idx_mut13<- which(colnames(vp_mut_matrix)%in%muts)
logreg_wp_excl<- do_logreg(vp_mut_matrix[,-idx_mut13],vp_HLA1_mut_matrix[,-idx_mut13],"wp")
logreg<- rbind(wp=logreg_wp,wp_excl=logreg_wp_excl)
# Data in list
pop_PHBR_ls[[pop]]<- list(vp_HLA1_mut_matrix=vp_HLA1_mut_matrix,logreg=logreg)
}
saveRDS(pop_PHBR_ls,"results/data/1kg_LR_lowFreq.rds")
saveRDS(proto_1kg,"results/data/1kg_proto_lowFreq.rds")
|
/scripts/manuscript_1kg.R
|
no_license
|
CCGGlab/mhc_driver
|
R
| false
| false
| 8,643
|
r
|
source("scripts/functions/harmonic_mean.R")
vp_mut_matrix<- readRDS(file="data/vp_mut_matrix.rds")
muts<- readRDS("data/driver_muts13.rds")
driver_muts<- colnames(readRDS("data/mut_matrix.rds"))
source("scripts/functions/do_logreg.R")
# Load genotype data 1kg
pop_HLA<- read.table("downloads/1kg/1000_genomes_hla.tsv",sep="\t",header = T,row.names = 3,stringsAsFactors = F)
idx_amb<- unique(as.numeric(unlist(apply(pop_HLA[,3:8],2,function(x) grep("\\/|\\*|None",x))))) # identify ambiguous samples (e.g. 02/03 format - * labelled - "none"): 116 in total
pop_HLA<- pop_HLA[-idx_amb,]
pops<- pop_HLA$Region
pop_HLA<- pop_HLA[,3:8] # Focus on MHC-I only (MHC-II not complete)
table(pops)
# AFR AMR EAS EUR SAS
# 674 359 511 512 521
####################################
# (1) PROTOTYPES PER POPULATION
####################################
# Get prototypes per population
##################################
proto_1kg<- as.data.frame(matrix(NA,length(unique(pops)),6,dimnames = list(unique(pops),c("A1","A2","B1","B2","C1","C2"))))
proto_1kg_freq<- proto_1kg
for(pop in unique(pops)){
pop_HLA_sel<- pop_HLA[pops==pop,]
freq_A<- sort(prop.table(table(c(pop_HLA_sel$HLA.A.1,pop_HLA_sel$HLA.A.2))),decreasing = T)[1:2]
names(freq_A)<- paste0("HLA-A",names(freq_A))
freq_B<- sort(prop.table(table(c(pop_HLA_sel$HLA.B.1,pop_HLA_sel$HLA.B.2))),decreasing = T)[1:2]
names(freq_B)<- paste0("HLA-B",names(freq_B))
freq_C<- sort(prop.table(table(c(pop_HLA_sel$HLA.C.1,pop_HLA_sel$HLA.C.2))),decreasing = T)[1:2]
names(freq_C)<- paste0("HLA-C",names(freq_C))
proto_1kg[pop,]<- c(names(freq_A),names(freq_B),names(freq_C))
proto_1kg_freq[pop,]<- c(freq_A,freq_B,freq_C)
}
proto_1kg
# A1 A2 B1 B2 C1 C2
# AFR HLA-A02:01 HLA-A23:01 HLA-B53:01 HLA-B35:01 HLA-C04:01 HLA-C16:01
# AMR HLA-A02:01 HLA-A24:02 HLA-B44:03 HLA-B07:02 HLA-C04:01 HLA-C07:02
# EAS HLA-A11:01 HLA-A24:02 HLA-B46:01 HLA-B40:01 HLA-C01:02 HLA-C07:02
# EUR HLA-A02:01 HLA-A03:01 HLA-B07:02 HLA-B44:02 HLA-C04:01 HLA-C07:01
# SAS HLA-A11:01 HLA-A01:01 HLA-B40:06 HLA-B52:01 HLA-C06:02 HLA-C07:02
proto_1kg_freq
# AFR 0.1142433 0.09347181 0.16246291 0.08976261 0.2462908 0.1201780
# AMR 0.2200557 0.14066852 0.07381616 0.05849582 0.1922006 0.1100279
# EAS 0.2338552 0.19373777 0.15264188 0.10958904 0.1937378 0.1712329
# EUR 0.3007812 0.15625000 0.11621094 0.08691406 0.1318359 0.1250000
# SAS 0.1660269 0.16314779 0.09596929 0.09213052 0.1420345 0.1238004
# Calculate matrices & log reg models
######################################
pop_PHBR_ls<- list()
for(pop in rownames(proto_1kg)){
cat(pop," ...","\n")
# Get proto HLA genotype
HLA_proto_mhc1<- as.character(proto_1kg[pop,])
# Calculate PHBR
PHBR_proto_mhc1_mut<- rep(NA,length(driver_muts))
names(PHBR_proto_mhc1_mut)<- driver_muts
for(mut_tmp in driver_muts){
for(i in c(1:3)){
gene_mhc1_filename<- paste0("temp/netMHCPan40/output/",mut_tmp,i,'.xls')
if(!file.exists(gene_mhc1_filename)) next
gene_mhc1<- read.table(gene_mhc1_filename,header = T, stringsAsFactors = F,sep ="\t",quote = "",skip=1)
# Only select peptides with actual mutation present
idx_core<- which(gene_mhc1$Pos%in%0:10) # Cannever start at position 11 (12) or higher
idx_core<- intersect(idx_core,which(nchar(gene_mhc1$Peptide)+gene_mhc1$Pos>10)) # Remove pos 0 (1) for 8/9/10-mers, ...
gene_mhc1<- gene_mhc1[idx_core,]
# Get ranks
gene_mhc1_rank<- gene_mhc1[,grep("Rank",colnames(gene_mhc1))]
colnames(gene_mhc1_rank)<- setdiff(unlist(strsplit(readLines(gene_mhc1_filename,n=1),"\t")),"")
# Calculate PBR
if(i==1) PBR_tmp<- apply(gene_mhc1_rank,MARGIN = 2, "min")
else PBR_tmp<- c(PBR_tmp,apply(gene_mhc1_rank,MARGIN = 2, "min"))
}
if(!file.exists(gene_mhc1_filename)) next
PBR_tmp<- PBR_tmp[HLA_proto_mhc1]
PHBR_proto_mhc1_mut[mut_tmp]<- harmonic_mean(PBR_tmp) # 9.566061 for prototypical (1.91 for wt)
}
# Put proto in matrix
vp_HLA1_mut_matrix<- vp_mut_matrix
vp_HLA1_mut_matrix[,]<- NA
common_muts<- intersect(names(PHBR_proto_mhc1_mut),colnames(vp_HLA1_mut_matrix))
for(i in 1:nrow(vp_HLA1_mut_matrix)){
vp_HLA1_mut_matrix[i,common_muts]<- PHBR_proto_mhc1_mut[common_muts]
}
# Calculate LR models
logreg_wp<- do_logreg(vp_mut_matrix,vp_HLA1_mut_matrix,"wp")
idx_mut13<- which(colnames(vp_mut_matrix)%in%muts)
logreg_wp_excl<- do_logreg(vp_mut_matrix[,-idx_mut13],vp_HLA1_mut_matrix[,-idx_mut13],"wp")
logreg<- rbind(wp=logreg_wp,wp_excl=logreg_wp_excl)
# Data in list
pop_PHBR_ls[[pop]]<- list(vp_HLA1_mut_matrix=vp_HLA1_mut_matrix,logreg=logreg)
}
saveRDS(pop_PHBR_ls,"results/data/1kg_LR.rds")
saveRDS(proto_1kg,"results/data/1kg_proto.rds")
#############################################
# (2) LOWEST AF GENOTYPES PER POPULATION
#############################################
# Get genotypes for lowest AF per population
##############################################
# Get TCGA alleles
TCGA_alleles<- NULL
for(i in 1:3){
gene_mhc1_filename<- paste0("temp/netMHCPan40/output/BRAF_V600E",i,'.xls')
TCGA_alleles<- c(TCGA_alleles,setdiff(unlist(strsplit(readLines(gene_mhc1_filename,n=1),"\t")),""))
}
# Get random alleles that were never called in a population
proto_1kg<- as.data.frame(matrix(NA,length(unique(pops)),6,dimnames = list(unique(pops),c("A1","A2","B1","B2","C1","C2"))))
for(pop in unique(pops)){
pop_HLA_sel<- pop_HLA[pops==pop,]
HLA_A<- sample(setdiff(TCGA_alleles[grep("HLA-A",TCGA_alleles)],paste0("HLA-A",c(pop_HLA_sel$HLA.A.1,pop_HLA_sel$HLA.A.2))),2)
HLA_B<- sample(setdiff(TCGA_alleles[grep("HLA-B",TCGA_alleles)],paste0("HLA-B",c(pop_HLA_sel$HLA.B.1,pop_HLA_sel$HLA.B.2))),2)
HLA_C<- sample(setdiff(TCGA_alleles[grep("HLA-C",TCGA_alleles)],paste0("HLA-C",c(pop_HLA_sel$HLA.C.1,pop_HLA_sel$HLA.C.2))),2)
proto_1kg[pop,]<- c(HLA_A,HLA_B,HLA_C)
}
proto_1kg
# A1 A2 B1 B2 C1 C2
# AFR HLA-A11:02 HLA-A02:07 HLA-B15:20 HLA-B55:04 HLA-C07:05 HLA-C16:02
# AMR HLA-A02:03 HLA-A69:01 HLA-B15:58 HLA-B15:12 HLA-C07:17 HLA-C18:01
# EAS HLA-A11:04 HLA-A33:01 HLA-B15:08 HLA-B39:10 HLA-C18:01 HLA-C08:04
# EUR HLA-A11:02 HLA-A02:10 HLA-B15:21 HLA-B73:01 HLA-C14:03 HLA-C07:05
# SAS HLA-A30:04 HLA-A29:02 HLA-B27:03 HLA-B73:01 HLA-C04:04 HLA-C02:10
# Calculate matrices & log reg models
######################################
pop_PHBR_ls<- list()
for(pop in rownames(proto_1kg)){
cat(pop," ...","\n")
# Get proto HLA genotype
HLA_proto_mhc1<- as.character(proto_1kg[pop,])
# Calculate PHBR
PHBR_proto_mhc1_mut<- rep(NA,length(driver_muts))
names(PHBR_proto_mhc1_mut)<- driver_muts
for(mut_tmp in driver_muts){
for(i in c(1:3)){
gene_mhc1_filename<- paste0("temp/netMHCPan40/output/",mut_tmp,i,'.xls')
if(!file.exists(gene_mhc1_filename)) next
gene_mhc1<- read.table(gene_mhc1_filename,header = T, stringsAsFactors = F,sep ="\t",quote = "",skip=1)
# Only select peptides with actual mutation present
idx_core<- which(gene_mhc1$Pos%in%0:10) # Cannever start at position 11 (12) or higher
idx_core<- intersect(idx_core,which(nchar(gene_mhc1$Peptide)+gene_mhc1$Pos>10)) # Remove pos 0 (1) for 8/9/10-mers, ...
gene_mhc1<- gene_mhc1[idx_core,]
# Get ranks
gene_mhc1_rank<- gene_mhc1[,grep("Rank",colnames(gene_mhc1))]
colnames(gene_mhc1_rank)<- setdiff(unlist(strsplit(readLines(gene_mhc1_filename,n=1),"\t")),"")
# Calculate PBR
if(i==1) PBR_tmp<- apply(gene_mhc1_rank,MARGIN = 2, "min")
else PBR_tmp<- c(PBR_tmp,apply(gene_mhc1_rank,MARGIN = 2, "min"))
}
if(!file.exists(gene_mhc1_filename)) next
PBR_tmp<- PBR_tmp[HLA_proto_mhc1]
PHBR_proto_mhc1_mut[mut_tmp]<- harmonic_mean(PBR_tmp) # 9.566061 for prototypical (1.91 for wt)
}
# Put proto in matrix
vp_HLA1_mut_matrix<- vp_mut_matrix
vp_HLA1_mut_matrix[,]<- NA
common_muts<- intersect(names(PHBR_proto_mhc1_mut),colnames(vp_HLA1_mut_matrix))
for(i in 1:nrow(vp_HLA1_mut_matrix)){
vp_HLA1_mut_matrix[i,common_muts]<- PHBR_proto_mhc1_mut[common_muts]
}
# Calculate LR models
logreg_wp<- do_logreg(vp_mut_matrix,vp_HLA1_mut_matrix,"wp")
idx_mut13<- which(colnames(vp_mut_matrix)%in%muts)
logreg_wp_excl<- do_logreg(vp_mut_matrix[,-idx_mut13],vp_HLA1_mut_matrix[,-idx_mut13],"wp")
logreg<- rbind(wp=logreg_wp,wp_excl=logreg_wp_excl)
# Data in list
pop_PHBR_ls[[pop]]<- list(vp_HLA1_mut_matrix=vp_HLA1_mut_matrix,logreg=logreg)
}
saveRDS(pop_PHBR_ls,"results/data/1kg_LR_lowFreq.rds")
saveRDS(proto_1kg,"results/data/1kg_proto_lowFreq.rds")
|
# Lab 2
# Created by Ye (Eric) Wang
# Modified largely from the code by Prof. Jerry Reiter
# on 09/01/2015
# 0. Optional Assignment of Lab 1
# 1. Install R Packages
#******************************************************
# Install from CRAN ("randomForest")
# or install from local files ("lrpd")
library("randomForest")
library("lrpd")
#******************************************************
# 2. Set working directory
#******************************************************
# This returns the current directory
getwd()
# Change the directory
setwd("C:/Users/Eric Wang/Dropbox/DukePhD Semester 3/STA 201 (TA)/Lab 2")
#******************************************************
# 3. Read data
#******************************************************
cpsdata = read.csv("cpsdata.txt", header = T)
#******************************************************
# 4. Exploratory Data Analysis (EDA)
#******************************************************
#Let's look at the first 5 rows of the data file.
#Let's delete the first column which is redundant
#Let's look at the first 5 values of the first column (total alimony payments in the house)
#You can get summaries of all the variables at once
summary(cpsdata)
#Let's list the names of the variables in the file -- the actual definitions are in the Sakai folder
#Let's get some basics about the file. First, how many rows (people) does it have.
nrow(cpsdata)
# Now the number of columns
ncol(cpsdata)
#Let's see how many people are males (ASEX==1) and females (ASEX==2) on the file.
table(cpsdata$ASEX)
#Really, we want percentages rather than counts. So, we divide by the sample size.
table(cpsdata$ASEX)/nrow(cpsdata)
#Note how we divide every element of the vector by the sample size--R is smart enough to do so automatically!
#This result has a messy appearance. Let's round the values to the nearest .001
round(table(cpsdata$ASEX)/nrow(cpsdata), 3)
#Let's also take into account the counts for marital status (1 = married, 4 = widowed, 5 = divorced, 6 = separated, 7 = single;
mytable <- table(cpsdata$ASEX,cpsdata$AMARITL) # A contingency table
margin.table(mytable, 1)
margin.table(mytable, 2)
round(prop.table(mytable),3) # cell percentages
round(prop.table(mytable, 1),3) # row percentages
round(prop.table(mytable, 2),3) # column percentages
#We'll look a lot at income (HTOTVAL). Let's make a histogram.
attach(cpsdata)
hist(HTOTVAL)
#It is always good practice to add titles to graphs
hist(HTOTVAL, xlab="Income", main = "Histogram of income")
#If you want the vertical axis of the histogram to report density rather than counts
hist(HTOTVAL, xlab="Income", main = "Histogram of income", prob=T)
#It is a good idea to examine histograms with greater numbers of bins than the default, so
#that you can see more detail about the distribution. Here is a histogram with 50 bins.
hist(HTOTVAL, xlab="Income", main = "Histogram of income", prob=T, breaks = 50)
#Now 100 bins
hist(HTOTVAL, xlab="Income", main = "Histogram of income", prob=T, breaks = 100)
#You can estimate the density of income from the data using
incdens = density(HTOTVAL)
#You can plot this curve on top of the histogram (in orange) using
lines(incdens, col = "orange")
#The density estimator is based on collections of normal distributions. If you change the
#standard deviation of those normal distributions, you can get different pictures. This can
#be useful if the density is ragged and you want to see a smoother version. Here is the
#density with twice the standard deviation as the default. You can change the number after
#adjust to get other pictures.
incdens = density(HTOTVAL, adjust = 2)
lines(incdens, col = "red")
#Box plots are useful for seeing distributions of data. Here is a box plot of income.
boxplot(HTOTVAL, xlab = "Income")
#They are even more useful for comparing distributions for people in different categories.
#Here is a side-by-side box plot of income for men and women
boxplot(HTOTVAL~ASEX, xlab = "Sex (1 = male, 2 = female)")
#Now for marital status
boxplot(HTOTVAL~AMARITL, xlab = "Marital Status")
#If you ever want to close the graphics window and start over, type
dev.off()
#Quantile-quantile plots are especially useful for comparing distributions
#The most common QQ plot is a comparison of the variable against a normal distribution.
qqnorm(HTOTVAL)
#If the variable follows a normal, the QQ plot should show a straight line. Otherwise, it
#will show a curve, just like the plot for income. You can add the straight line indicating
#what should appear if the data were normally distributed using
qqline(HTOTVAL)
# Remove HTOTVAL that are no greater than 0 and do a power 0.3 transformation
# then do the QQ plot again
qqnorm((HTOTVAL[HTOTVAL>0])^.3)
qqline((HTOTVAL[HTOTVAL>0])^.3)
# Group the data by sex and do the above once again
qqnorm((HTOTVAL[HTOTVAL>0&ASEX==1])^.3)
qqline((HTOTVAL[HTOTVAL>0&ASEX==1])^.3)
qqnorm((HTOTVAL[HTOTVAL>0&ASEX==2])^.3)
qqline((HTOTVAL[HTOTVAL>0&ASEX==2])^.3)
# ANOVA
summary(aov(HTOTVAL~ASEX))
#For two continuous variables, the scatter plot is an effective EDA tool. Let's plot income
#versus property taxes.
plot(y=HTOTVAL, x=PROPTAX, xlab="Property Tax", ylab="Income", main = "Income versus property tax")
#Let's remove all the people who don't own a home (i.e., have taxes == 0)
plot(y=HTOTVAL[PROPTAX>0], x=PROPTAX[PROPTAX>0], ylab="Income", xlab = "Property tax", main = "Income versus property tax for homeowners")
#We can add a smooth trend to the data using the lowess command.
lines(lowess(HTOTVAL[PROPTAX>0]~PROPTAX[PROPTAX>0]))
#to get a matrix of scatter plots involving multiple variables, use
pairs(HTOTVAL~PROPTAX+SSVAL+AHGA)
#what if we wanted the relationship between income and education for males only?
plot(HTOTVAL[ASEX==1] ~ AHGA[ASEX==1], xlab = "Education", ylab = "Income", main = "Income and education for males only")
#now for females only
plot(HTOTVAL[ASEX==2] ~ AHGA[ASEX==2], xlab = "Education", ylab = "Income", main = "Income and education for females only")
#we can put 2 graphs on the same page (as 2 rows and 1 column) by using
par(mfcol=c(2,1))
#then, we call for the two graphs
plot(HTOTVAL[ASEX==1] ~ AHGA[ASEX==1], xlab = "Education", ylab = "Income", main = "Income and education for males")
plot(HTOTVAL[ASEX==2] ~ AHGA[ASEX==2], xlab = "Education", ylab = "Income", main = "Income and education for females")
#get back to one graph per page
par(mfcol = c(1,1))
#******************************************************
# Practice
#******************************************************
# 1. Draw boxplots of HTOTVAL for different ARACE
# 2. Check the normality of HTOTVAL for each ARACE (race), draw the QQ plots
# 3. Summary the results of the one-way ANOVA of HTOTVAL agains ARACE
# 4. Explain the quantities in the results
#******************************************************
|
/Lab2_R_Script.R
|
no_license
|
ericyewang/Duke-STA-210
|
R
| false
| false
| 6,878
|
r
|
# Lab 2
# Created by Ye (Eric) Wang
# Modified largely from the code by Prof. Jerry Reiter
# on 09/01/2015
# 0. Optional Assignment of Lab 1
# 1. Install R Packages
#******************************************************
# Install from CRAN ("randomForest")
# or install from local files ("lrpd")
library("randomForest")
library("lrpd")
#******************************************************
# 2. Set working directory
#******************************************************
# This returns the current directory
getwd()
# Change the directory
setwd("C:/Users/Eric Wang/Dropbox/DukePhD Semester 3/STA 201 (TA)/Lab 2")
#******************************************************
# 3. Read data
#******************************************************
cpsdata = read.csv("cpsdata.txt", header = T)
#******************************************************
# 4. Exploratory Data Analysis (EDA)
#******************************************************
#Let's look at the first 5 rows of the data file.
#Let's delete the first column which is redundant
#Let's look at the first 5 values of the first column (total alimony payments in the house)
#You can get summaries of all the variables at once
summary(cpsdata)
#Let's list the names of the variables in the file -- the actual definitions are in the Sakai folder
#Let's get some basics about the file. First, how many rows (people) does it have.
nrow(cpsdata)
# Now the number of columns
ncol(cpsdata)
#Let's see how many people are males (ASEX==1) and females (ASEX==2) on the file.
table(cpsdata$ASEX)
#Really, we want percentages rather than counts. So, we divide by the sample size.
table(cpsdata$ASEX)/nrow(cpsdata)
#Note how we divide every element of the vector by the sample size--R is smart enough to do so automatically!
#This result has a messy appearance. Let's round the values to the nearest .001
round(table(cpsdata$ASEX)/nrow(cpsdata), 3)
#Let's also take into account the counts for marital status (1 = married, 4 = widowed, 5 = divorced, 6 = separated, 7 = single;
mytable <- table(cpsdata$ASEX,cpsdata$AMARITL) # A contingency table
margin.table(mytable, 1)
margin.table(mytable, 2)
round(prop.table(mytable),3) # cell percentages
round(prop.table(mytable, 1),3) # row percentages
round(prop.table(mytable, 2),3) # column percentages
#We'll look a lot at income (HTOTVAL). Let's make a histogram.
attach(cpsdata)
hist(HTOTVAL)
#It is always good practice to add titles to graphs
hist(HTOTVAL, xlab="Income", main = "Histogram of income")
#If you want the vertical axis of the histogram to report density rather than counts
hist(HTOTVAL, xlab="Income", main = "Histogram of income", prob=T)
#It is a good idea to examine histograms with greater numbers of bins than the default, so
#that you can see more detail about the distribution. Here is a histogram with 50 bins.
hist(HTOTVAL, xlab="Income", main = "Histogram of income", prob=T, breaks = 50)
#Now 100 bins
hist(HTOTVAL, xlab="Income", main = "Histogram of income", prob=T, breaks = 100)
#You can estimate the density of income from the data using
incdens = density(HTOTVAL)
#You can plot this curve on top of the histogram (in orange) using
lines(incdens, col = "orange")
#The density estimator is based on collections of normal distributions. If you change the
#standard deviation of those normal distributions, you can get different pictures. This can
#be useful if the density is ragged and you want to see a smoother version. Here is the
#density with twice the standard deviation as the default. You can change the number after
#adjust to get other pictures.
incdens = density(HTOTVAL, adjust = 2)
lines(incdens, col = "red")
#Box plots are useful for seeing distributions of data. Here is a box plot of income.
boxplot(HTOTVAL, xlab = "Income")
#They are even more useful for comparing distributions for people in different categories.
#Here is a side-by-side box plot of income for men and women
boxplot(HTOTVAL~ASEX, xlab = "Sex (1 = male, 2 = female)")
#Now for marital status
boxplot(HTOTVAL~AMARITL, xlab = "Marital Status")
#If you ever want to close the graphics window and start over, type
dev.off()
#Quantile-quantile plots are especially useful for comparing distributions
#The most common QQ plot is a comparison of the variable against a normal distribution.
qqnorm(HTOTVAL)
#If the variable follows a normal, the QQ plot should show a straight line. Otherwise, it
#will show a curve, just like the plot for income. You can add the straight line indicating
#what should appear if the data were normally distributed using
qqline(HTOTVAL)
# Remove HTOTVAL that are no greater than 0 and do a power 0.3 transformation
# then do the QQ plot again
qqnorm((HTOTVAL[HTOTVAL>0])^.3)
qqline((HTOTVAL[HTOTVAL>0])^.3)
# Group the data by sex and do the above once again
qqnorm((HTOTVAL[HTOTVAL>0&ASEX==1])^.3)
qqline((HTOTVAL[HTOTVAL>0&ASEX==1])^.3)
qqnorm((HTOTVAL[HTOTVAL>0&ASEX==2])^.3)
qqline((HTOTVAL[HTOTVAL>0&ASEX==2])^.3)
# ANOVA
summary(aov(HTOTVAL~ASEX))
#For two continuous variables, the scatter plot is an effective EDA tool. Let's plot income
#versus property taxes.
plot(y=HTOTVAL, x=PROPTAX, xlab="Property Tax", ylab="Income", main = "Income versus property tax")
#Let's remove all the people who don't own a home (i.e., have taxes == 0)
plot(y=HTOTVAL[PROPTAX>0], x=PROPTAX[PROPTAX>0], ylab="Income", xlab = "Property tax", main = "Income versus property tax for homeowners")
#We can add a smooth trend to the data using the lowess command.
lines(lowess(HTOTVAL[PROPTAX>0]~PROPTAX[PROPTAX>0]))
#to get a matrix of scatter plots involving multiple variables, use
pairs(HTOTVAL~PROPTAX+SSVAL+AHGA)
#what if we wanted the relationship between income and education for males only?
plot(HTOTVAL[ASEX==1] ~ AHGA[ASEX==1], xlab = "Education", ylab = "Income", main = "Income and education for males only")
#now for females only
plot(HTOTVAL[ASEX==2] ~ AHGA[ASEX==2], xlab = "Education", ylab = "Income", main = "Income and education for females only")
#we can put 2 graphs on the same page (as 2 rows and 1 column) by using
par(mfcol=c(2,1))
#then, we call for the two graphs
plot(HTOTVAL[ASEX==1] ~ AHGA[ASEX==1], xlab = "Education", ylab = "Income", main = "Income and education for males")
plot(HTOTVAL[ASEX==2] ~ AHGA[ASEX==2], xlab = "Education", ylab = "Income", main = "Income and education for females")
#get back to one graph per page
par(mfcol = c(1,1))
#******************************************************
# Practice
#******************************************************
# 1. Draw boxplots of HTOTVAL for different ARACE
# 2. Check the normality of HTOTVAL for each ARACE (race), draw the QQ plots
# 3. Summary the results of the one-way ANOVA of HTOTVAL agains ARACE
# 4. Explain the quantities in the results
#******************************************************
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gbmExtras.R
\name{relative.influence.noprint}
\alias{relative.influence.noprint}
\title{relative.influence.noprint
Get relative influence from gbm fit}
\usage{
relative.influence.noprint(object, n.trees, scale. = FALSE, sort. = FALSE)
}
\arguments{
\item{object}{a gbm fit object}
\item{n.trees}{number of trees to be used}
\item{scale.}{boolean specifying if importance should be scaled}
\item{sort.}{boolean specifying if importance should be sorted}
}
\description{
relative.influence.noprint
Get relative influence from gbm fit
}
|
/man/relative.influence.noprint.Rd
|
no_license
|
kaixinhuaihuai/OncoCast
|
R
| false
| true
| 615
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gbmExtras.R
\name{relative.influence.noprint}
\alias{relative.influence.noprint}
\title{relative.influence.noprint
Get relative influence from gbm fit}
\usage{
relative.influence.noprint(object, n.trees, scale. = FALSE, sort. = FALSE)
}
\arguments{
\item{object}{a gbm fit object}
\item{n.trees}{number of trees to be used}
\item{scale.}{boolean specifying if importance should be scaled}
\item{sort.}{boolean specifying if importance should be sorted}
}
\description{
relative.influence.noprint
Get relative influence from gbm fit
}
|
rand_str <- function(n = 16L, hex = TRUE) {
if (hex) {
paste(sample(as.raw(0:255), n, replace = TRUE), collapse = "")
} else {
rawToChar(sample(as.raw(32:126), n, replace = TRUE))
}
}
skip_long_test <- function() {
if (identical(Sys.getenv("STORR_RUN_LONG_TESTS"), "true")) {
return(invisible(TRUE))
}
testthat::skip("Skipping long running test")
}
copy_to_tmp <- function(src) {
path <- tempfile()
dir.create(path)
file.copy(src, path, recursive = TRUE)
file.path(path, src)
}
has_postgres <- function(ctor) {
!is.null(tryCatch(DBI::dbConnect(ctor()), error = function(e) NULL))
}
|
/tests/testthat/helper-storr.R
|
no_license
|
mpadge/storr
|
R
| false
| false
| 619
|
r
|
rand_str <- function(n = 16L, hex = TRUE) {
if (hex) {
paste(sample(as.raw(0:255), n, replace = TRUE), collapse = "")
} else {
rawToChar(sample(as.raw(32:126), n, replace = TRUE))
}
}
skip_long_test <- function() {
if (identical(Sys.getenv("STORR_RUN_LONG_TESTS"), "true")) {
return(invisible(TRUE))
}
testthat::skip("Skipping long running test")
}
copy_to_tmp <- function(src) {
path <- tempfile()
dir.create(path)
file.copy(src, path, recursive = TRUE)
file.path(path, src)
}
has_postgres <- function(ctor) {
!is.null(tryCatch(DBI::dbConnect(ctor()), error = function(e) NULL))
}
|
#loading required libraries
library(caTools)
library(xgboost)
library(rpart)
library(randomForest)
library(dplyr)
#Loading the train and test set
train <- read.csv("C:/Users/Vinay/Downloads/Animal State Prediction - dataset/train.csv")
test <- read.csv("C:/Users/Vinay/Downloads/Animal State Prediction - dataset/test.csv")
#Lookiong at the structure of the data
str(train)
#combining both the datasets for relevant changes & feature engineering
complete=bind_rows(train,test)
#Dropping the unwanted columns
complete$dob_year=complete$age_upon_intake=complete$dob_month=complete$count=complete$age_upon_intake_.years.=NULL
complete$intake_datetime=complete$intake_monthyear=complete$time_in_shelter=complete$date_of_birth=NULL
complete$age_upon_outcome=complete$age_upon_outcome_.years.=complete$outcome_datetime=complete$outcome_monthyear=NULL
#########Feature engineering#############
# Dealing with 1847 breeds & 443 colors
# Adding new feature wrt Mix
complete$Mix_Or_Not <- ifelse(grepl('Mix', complete$breed), 1, 0)
#Major reduction on breed
complete$breed_reduced <- sapply(complete$breed,
function(x) gsub(' Mix', '',
strsplit(x, split = '/')[[1]][1]))
#USing only the primary color
complete$NewColor <- sapply(complete$color,
function(x) strsplit(x, split = '/')[[1]][1])
# Creating table to finally reduce the number of Breed and Color to 32 factors for RF model
breed_table = data.frame(table(complete$breed_reduced))
color_table = data.frame(table(complete$NewColor))
# Order the table in descending order of frequency
breed_table = breed_table[order(-breed_table$Freq),]
color_table = color_table[order(-color_table$Freq),]
breeds <- breed_table$Var1[1:31]
colors <- color_table$Var1[1:31]
#Finalizing breed and color to 32 different kinds
complete$breed_redcd <- (ifelse(complete$breed %in% breeds, complete$breed, "Other"))
complete$color_redcd <- (ifelse(complete$color %in% colors, complete$color, "Other"))
#Dropping unnecessary columns for breed and color
complete$breed=complete$color=complete$NewColor=complete$breed_reduced=NULL
#COnverting intake_hour and outcome_hour variable to specific times of day
complete$intake_time = ifelse(complete$intake_hour > 5 & complete$intake_hour < 11, 'morning', ifelse(complete$intake_hour > 11 & complete$intake_hour < 16, 'midday', ifelse (complete$intake_hour > 16 & complete$intake_hour < 20, 'evening', 'night')))
complete$outcome_time = ifelse(complete$outcome_hour > 5 & complete$outcome_hour < 11, 'morning', ifelse(complete$outcome_hour > 11 & complete$outcome_hour < 16, 'midday', ifelse (complete$outcome_hour > 16 & complete$outcome_hour < 20, 'evening', 'night')))
#Drop unnecessary time variables
complete$intake_hour=complete$outcome_hour=NULL
#Time to convert variables back to factors
factorize = c('sex_upon_intake','age_upon_intake_age_group','intake_month','intake_year','intake_number','sex_upon_outcome','age_upon_outcome_age_group','outcome_month','outcome_year','outcome_number','Mix_Or_Not','breed_redcd','color_redcd','intake_time','outcome_time')
complete[factorize] <- lapply(complete[factorize], function(x) as.factor(x))
#splitting the data back to train and test set
new_train = complete[1:47803, ]
new_test = complete[47804:nrow(complete), ]
#Setting a seed
set.seed(786)
Model1RF=randomForest(outcome_type~.-animal_id_outcome,data=new_train,ntree=500,importance =T)
#Making predictions on test data
prediction <- predict(Model1RF, new_test, type = 'vote')
#Taking the header of column with highest value
pred_value=colnames(prediction)[max.col(prediction,ties.method="first")]
# Save the solution to a dataframe
solution <- data.frame('animal_id_outcome' = new_test$animal_id_outcome,'outcome_type'= pred_value)
# Write it to file
write.csv(solution, 'solution1.csv', row.names = F,quote = F)
plot(Model1RF, ylim=c(0,1))
legend('topright', colnames(Model1RF$err.rate), col=1:6, fill=1:6)
importance <- importance(Model1RF)
varImportance <- data.frame(Variables = row.names(importance),
Importance = round(importance[ ,'MeanDecreaseGini'],2))
# Create a rank variable based on importance
rankImportance <- varImportance %>%
mutate(Rank = paste0('#',dense_rank(desc(Importance))))
# Use ggplot2 to visualize the relative importance of variables
ggplot(rankImportance, aes(x = reorder(Variables, Importance),
y = Importance)) +
geom_bar(stat='identity', colour = 'black') +
geom_text(aes(x = Variables, y = 0.5, label = Rank),
hjust=0, vjust=0.55, size = 4, colour = 'lavender',
fontface = 'bold') +
labs(x = 'Variables', title = 'Relative Variable Importance') +
coord_flip() +
theme_few()
|
/script1_RF.R
|
no_license
|
V-S-D/fff
|
R
| false
| false
| 4,894
|
r
|
#loading required libraries
library(caTools)
library(xgboost)
library(rpart)
library(randomForest)
library(dplyr)
#Loading the train and test set
train <- read.csv("C:/Users/Vinay/Downloads/Animal State Prediction - dataset/train.csv")
test <- read.csv("C:/Users/Vinay/Downloads/Animal State Prediction - dataset/test.csv")
#Lookiong at the structure of the data
str(train)
#combining both the datasets for relevant changes & feature engineering
complete=bind_rows(train,test)
#Dropping the unwanted columns
complete$dob_year=complete$age_upon_intake=complete$dob_month=complete$count=complete$age_upon_intake_.years.=NULL
complete$intake_datetime=complete$intake_monthyear=complete$time_in_shelter=complete$date_of_birth=NULL
complete$age_upon_outcome=complete$age_upon_outcome_.years.=complete$outcome_datetime=complete$outcome_monthyear=NULL
#########Feature engineering#############
# Dealing with 1847 breeds & 443 colors
# Adding new feature wrt Mix
complete$Mix_Or_Not <- ifelse(grepl('Mix', complete$breed), 1, 0)
#Major reduction on breed
complete$breed_reduced <- sapply(complete$breed,
function(x) gsub(' Mix', '',
strsplit(x, split = '/')[[1]][1]))
#USing only the primary color
complete$NewColor <- sapply(complete$color,
function(x) strsplit(x, split = '/')[[1]][1])
# Creating table to finally reduce the number of Breed and Color to 32 factors for RF model
breed_table = data.frame(table(complete$breed_reduced))
color_table = data.frame(table(complete$NewColor))
# Order the table in descending order of frequency
breed_table = breed_table[order(-breed_table$Freq),]
color_table = color_table[order(-color_table$Freq),]
breeds <- breed_table$Var1[1:31]
colors <- color_table$Var1[1:31]
#Finalizing breed and color to 32 different kinds
complete$breed_redcd <- (ifelse(complete$breed %in% breeds, complete$breed, "Other"))
complete$color_redcd <- (ifelse(complete$color %in% colors, complete$color, "Other"))
#Dropping unnecessary columns for breed and color
complete$breed=complete$color=complete$NewColor=complete$breed_reduced=NULL
#COnverting intake_hour and outcome_hour variable to specific times of day
complete$intake_time = ifelse(complete$intake_hour > 5 & complete$intake_hour < 11, 'morning', ifelse(complete$intake_hour > 11 & complete$intake_hour < 16, 'midday', ifelse (complete$intake_hour > 16 & complete$intake_hour < 20, 'evening', 'night')))
complete$outcome_time = ifelse(complete$outcome_hour > 5 & complete$outcome_hour < 11, 'morning', ifelse(complete$outcome_hour > 11 & complete$outcome_hour < 16, 'midday', ifelse (complete$outcome_hour > 16 & complete$outcome_hour < 20, 'evening', 'night')))
#Drop unnecessary time variables
complete$intake_hour=complete$outcome_hour=NULL
#Time to convert variables back to factors
factorize = c('sex_upon_intake','age_upon_intake_age_group','intake_month','intake_year','intake_number','sex_upon_outcome','age_upon_outcome_age_group','outcome_month','outcome_year','outcome_number','Mix_Or_Not','breed_redcd','color_redcd','intake_time','outcome_time')
complete[factorize] <- lapply(complete[factorize], function(x) as.factor(x))
#splitting the data back to train and test set
new_train = complete[1:47803, ]
new_test = complete[47804:nrow(complete), ]
#Setting a seed
set.seed(786)
Model1RF=randomForest(outcome_type~.-animal_id_outcome,data=new_train,ntree=500,importance =T)
#Making predictions on test data
prediction <- predict(Model1RF, new_test, type = 'vote')
#Taking the header of column with highest value
pred_value=colnames(prediction)[max.col(prediction,ties.method="first")]
# Save the solution to a dataframe
solution <- data.frame('animal_id_outcome' = new_test$animal_id_outcome,'outcome_type'= pred_value)
# Write it to file
write.csv(solution, 'solution1.csv', row.names = F,quote = F)
plot(Model1RF, ylim=c(0,1))
legend('topright', colnames(Model1RF$err.rate), col=1:6, fill=1:6)
importance <- importance(Model1RF)
varImportance <- data.frame(Variables = row.names(importance),
Importance = round(importance[ ,'MeanDecreaseGini'],2))
# Create a rank variable based on importance
rankImportance <- varImportance %>%
mutate(Rank = paste0('#',dense_rank(desc(Importance))))
# Use ggplot2 to visualize the relative importance of variables
ggplot(rankImportance, aes(x = reorder(Variables, Importance),
y = Importance)) +
geom_bar(stat='identity', colour = 'black') +
geom_text(aes(x = Variables, y = 0.5, label = Rank),
hjust=0, vjust=0.55, size = 4, colour = 'lavender',
fontface = 'bold') +
labs(x = 'Variables', title = 'Relative Variable Importance') +
coord_flip() +
theme_few()
|
library(shiny)
library(shinydashboard)
library(googlesheets4)
# Load survey questions
source("questions.R")
sidebar <- dashboardSidebar(
hr(),
sidebarMenu(
id="tabs",
menuItem("About you", tabName = "About_you", icon = icon("id-card")),
menuItem("Questions", tabName = "Questions", icon = icon("images")),
menuItem("Thank you", tabName = "Thank_you", icon = icon("smile-wink"))
),
hr(),
conditionalPanel(
condition = "input.tabs == 'Questions'",
fluidRow(
column(width = 1),
column(width = 12,
h4("Questions:"),
hr(),
questions$scene,
div(
uiOutput("ui_btn_next")
),
status = "info")
)
)
)
shinyUI(
dashboardPage(
dashboardHeader(
title = "Visual Inference Study : Reading Plots",
tags$li(class = "dropdown", actionLink("btn_export", span(icon("save"), "Submit Survey")))
),
sidebar,
dashboardBody(
tabItems(
tabItem(tabName = "About_you",
box(
title = "Demographics",
questions$demographics,
div(uiOutput("ui_d_save")),
width = 14,
status = "info",
solidHeader = TRUE,
collapsible = TRUE
)
),
tabItem(tabName = "Questions",
includeCSS("www/taipan.css"),
includeScript("www/img_size.js"),
box(
title = textOutput("out_img_info"),
div(class = "taipan_image_div",
imageOutput("out_img",
inline = T)),
width = NULL,
status = "primary",
solidHeader = TRUE,
collapsible = F
)),
tabItem(tabName = "Thank_you",
box(
title = "Thank you",
div(
uiOutput("validation")),
width = 12,
status = "info",
solidHeader = TRUE,
collapsible = TRUE
)
)
)
)
)
)
|
/experiment/ui.R
|
permissive
|
numbats/summer-vis-inf
|
R
| false
| false
| 2,225
|
r
|
library(shiny)
library(shinydashboard)
library(googlesheets4)
# Load survey questions
source("questions.R")
sidebar <- dashboardSidebar(
hr(),
sidebarMenu(
id="tabs",
menuItem("About you", tabName = "About_you", icon = icon("id-card")),
menuItem("Questions", tabName = "Questions", icon = icon("images")),
menuItem("Thank you", tabName = "Thank_you", icon = icon("smile-wink"))
),
hr(),
conditionalPanel(
condition = "input.tabs == 'Questions'",
fluidRow(
column(width = 1),
column(width = 12,
h4("Questions:"),
hr(),
questions$scene,
div(
uiOutput("ui_btn_next")
),
status = "info")
)
)
)
shinyUI(
dashboardPage(
dashboardHeader(
title = "Visual Inference Study : Reading Plots",
tags$li(class = "dropdown", actionLink("btn_export", span(icon("save"), "Submit Survey")))
),
sidebar,
dashboardBody(
tabItems(
tabItem(tabName = "About_you",
box(
title = "Demographics",
questions$demographics,
div(uiOutput("ui_d_save")),
width = 14,
status = "info",
solidHeader = TRUE,
collapsible = TRUE
)
),
tabItem(tabName = "Questions",
includeCSS("www/taipan.css"),
includeScript("www/img_size.js"),
box(
title = textOutput("out_img_info"),
div(class = "taipan_image_div",
imageOutput("out_img",
inline = T)),
width = NULL,
status = "primary",
solidHeader = TRUE,
collapsible = F
)),
tabItem(tabName = "Thank_you",
box(
title = "Thank you",
div(
uiOutput("validation")),
width = 12,
status = "info",
solidHeader = TRUE,
collapsible = TRUE
)
)
)
)
)
)
|
#Profile basic test
source("setup.R")
local({
if(!require(profvis,lib.loc = user_path)) {
install.packages("profvis",lib = user_path)
if(!require(profvis,lib.loc = user_path)) {
stop("Failed to install profiler")
}
}
library(htmlwidgets,lib.loc = user_path)
library(jsonlite,lib.loc = user_path)
library(yaml,lib.loc = user_path)
invisible(T)
})
print(selac_release)
# setup_selac_for_profiling()
test_selac_hmm <- function(phy,
fasta.file,
nuc.model=c("JC", "GTR", "HKY", "UNREST"),
gamma.type=c("none", "median","quadrature","lognormal" ),
nCores=1){
nuc.model=match.arg(nuc.model)
gamma.type=match.arg(gamma.type)
if(nuc.model == "HKY") stop("HKY model not implemented for GetLikelihoodSAC_CodonForManyCharGivenAllParams.")
include.gamma = (gamma.type != "none")
if(!include.gamma) gamma.type = "quadrature"
tmp.gene <- read.dna(fasta.file, format="fasta")
tmp.gene <- as.list(as.matrix(cbind(tmp.gene)))
chars <- selac:::DNAbinToCodonNumeric(tmp.gene)
codon.data <- chars[phy$tip.label,c(1,1+sample(ncol(chars)-1,10))]
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
codon.data <- selac:::SitePattern(codon.data)
codon.index.matrix = selac:::CreateCodonMutationMatrixIndexEvolveAA()
model.params = hmm.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParamsEvolvingAA
if(include.gamma)
model.params=c(model.params,std.gamma.shape)
model.params = c(model.params,std.sel.reg)
lSAC.c4mc.full(log(model.params),
codon.data=codon.data,
phy=phy,
codon.freq.by.aa=NULL,
codon.freq.by.gene=codon.freq.by.gene,
numcode=1,
diploid=TRUE,
aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=include.gamma,
gamma.type=gamma.type,
ncats=4,
k.levels=0,
logspace=TRUE,
verbose=TRUE,
n.cores.by.gene.by.site=nCores,
estimate.importance=FALSE) -> res
return(res)
}
# basic loader to build further tests
load_inputs <- function(){
tree <- read.tree("rokasYeast.tre")
phy <- drop.tip(tree, "Calb")
yeast.gene <- read.dna("gene1Yeast.fasta", format="fasta")
yeast.gene <- as.list(as.matrix(cbind(yeast.gene))[1:7,])
chars <- selac:::DNAbinToCodonNumeric(yeast.gene)
codon.data <- chars[phy$tip.label,]
aa.data <- selac:::ConvertCodonNumericDataToAAData(codon.data, numcode=1)
aa.optim <- apply(aa.data[, -1], 2, selac:::GetMaxName) #starting values for all, final values for majrule
aa.optim.full.list <- aa.optim
codon.freq.by.aa <- selac:::GetCodonFreqsByAA(codon.data[,-1], aa.optim, numcode=1)
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
aa.optim.frame.to.add <- matrix(c("optimal", aa.optim), 1, dim(codon.data)[2])
colnames(aa.optim.frame.to.add) <- colnames(codon.data)
codon.data <- rbind(codon.data, aa.optim.frame.to.add)
codon.data <- selac:::SitePattern(codon.data, includes.optimal.aa=TRUE)
aa.optim = codon.data$optimal.aa
codon.index.matrix = selac:::CreateCodonMutationMatrixIndex()
}
load_rokasYeast <- function(){
tree <- read.tree("rokasYeast.tre")
phy <- drop.tip(tree, "Calb")
yeast.gene <- read.dna("gene1Yeast.fasta", format="fasta")
yeast.gene <- as.list(as.matrix(cbind(yeast.gene))[1:7,])
chars <- selac:::DNAbinToCodonNumeric(yeast.gene)
codon.data <- chars[phy$tip.label,]
list(input.key="rokasYeast",phy=phy,codon.data=codon.data)
}
test_selac.gamma.quadrature <- function(){
lSAC.c4mc.full(log(c(4*4e-7*.5*5e6, 1.829272, 0.101799, .25, .25, .25, rep(1,5), 5)),
codon.data=codon.data, phy=phy, aa.optim_array=aa.optim,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene,
numcode=1, diploid=TRUE, aa.properties=NULL,
volume.fixed.value=0.0003990333,
nuc.model="GTR",
codon.index.matrix=codon.index.matrix,
include.gamma=TRUE,
gamma.type="quadrature",
ncats=4, k.levels=0, logspace=TRUE, verbose=FALSE,
n.cores.by.gene.by.site=1)
}
test_selac.gamma.median <- function(){
lSAC.c4mc.full(log(c(4*4e-7*.5*5e6, 1.829272, 0.101799, .25, .25, .25, rep(1,5), 5)),
codon_data=codon.data, phy=phy, aa.optim_array=aa.optim,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene,
numcode=1, diploid=TRUE, aa.properties=NULL,
volume.fixed.value=0.0003990333,
nuc.model="GTR",
codon.index.matrix=codon.index.matrix,
include.gamma=TRUE,
gamma.type="median",
ncats=4, k.levels=0, logspace=TRUE, verbose=FALSE)
}
test_selac.unrest <- function(){
lSAC.c4mc.full(log(c(4*4e-7*.5*5e6, 1.829272, 0.101799, rep(1,11))),
codon.data, phy, aa.optim_array=aa.optim,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene,
numcode=1, diploid=TRUE, aa.properties=NULL,
volume.fixed.value=0.0003990333,
nuc.model="UNREST",
codon.index.matrix,
include.gamma=FALSE,
ncats=4, k.levels=0, logspace=TRUE, verbose=FALSE,
n.cores.by.gene.by.site=1)
}
test_selac.gtr <- function(){
lSAC.c4mc.full(log(c(4*4e-7*.5*5e6, 1.829272, 0.101799, .25, .25, .25, rep(1,5))),
codon.data, phy, aa.optim_array=aa.optim,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene,
numcode=1, diploid=TRUE, aa.properties=NULL,
volume.fixed.value=0.0003990333,
nuc.model="GTR",
codon.index.matrix,
include.gamma=FALSE,
ncats=4, k.levels=0, logspace=TRUE, verbose=FALSE,
n.cores.by.gene.by.site=1)
}
std.params = c(C.Phi.q.Ne = 4*4e-7*.5*5e6,
alpha=1.829272,
beta=0.101799)
std.gamma=0.0003990333
std.base.freq = c(A=0.25,C=0.25,G=0.25)
std.poly.params = c(NA,NA)
std.gamma.shape = 5
hmm.params = c(C.Phi.q.Ne = 2,
alpha=1.829272,
beta=0.101799)
std.sel.reg = 0.01
## Notes on nuc.mutation.params:
# used as rates value in selac:::CreateNucleotideMutationMatrix(rates, model, base.freqs)->res
# either: length(base.freqs) == 4 && sum(base.freqs) == 1
# or: is.null(base.freqs) == TRUE
# dim(res) == c(4,4)
# rowSums(res) == rep(1,4)
## selac:::CreateNucleotideMutationMatrix with JC model
# rates = rates[1] (ie just uses first value)
## selac:::CreateNucleotideMutationMatrix with GTR model
# rates = rates[1:5] (ie just uses first 5 values)
## selac:::CreateNucleotideMutationMatrix with HKY model
# rates = rates[1:2] (ie just uses first 2 values)
## selac:::CreateNucleotideMutationMatrix with UNREST model
# rates = rates[1:11] (ie just uses first 11 values)
#
# std.nuc.mutation.paramsA = c(1,1,1,1,1)
# std.nuc.mutation.paramsB = rep(1,11)
# std.nuc.mutation.paramsC = c(1,1,1,1,1)
std.nuc.params = mapply(rep_len,length.out=c(JC=1,GTR=5,HKY=2,UNREST=11),x=rep(1,4),
USE.NAMES = T,SIMPLIFY = F)
test_selac_std <- function(phy, codon.data,
nuc.model=c("JC", "GTR", "HKY", "UNREST"),
gamma.type=c("none", "median","quadrature","lognormal" ),
nCores=1){
nuc.model=match.arg(nuc.model)
gamma.type=match.arg(gamma.type)
if(nuc.model == "HKY") stop("HKY model not implemented for GetLikelihoodSAC_CodonForManyCharGivenAllParams.")
include.gamma = (gamma.type != "none")
aa.data <- selac:::ConvertCodonNumericDataToAAData(codon.data, numcode=1)
aa.optim <- apply(aa.data[, -1], 2, selac:::GetMaxName) #starting values for all, final values for majrule
aa.optim.full.list <- aa.optim
codon.freq.by.aa <- selac:::GetCodonFreqsByAA(codon.data[,-1], aa.optim, numcode=1)
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
aa.optim.frame.to.add <- matrix(c("optimal", aa.optim), 1, dim(codon.data)[2])
colnames(aa.optim.frame.to.add) <- colnames(codon.data)
codon.data <- rbind(codon.data, aa.optim.frame.to.add)
codon.data <- selac:::SitePattern(codon.data, includes.optimal.aa=TRUE)
aa.optim = codon.data$optimal.aa
codon.index.matrix = selac:::CreateCodonMutationMatrixIndex()
model.params = std.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParams
if(include.gamma){
model.params=c(model.params,std.gamma.shape)
lSAC.c4mc.full(log(model.params),
codon.data=codon.data, phy=phy, aa.optim_array=aa.optim,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene,
numcode=1, diploid=TRUE, aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=TRUE, gamma.type=gamma.type,
ncats=4, k.levels=0, logspace=TRUE, verbose=FALSE,
n.cores.by.gene.by.site=nCores)->res
}else{
lSAC.c4mc.full(log(model.params),
codon.data=codon.data, phy=phy, aa.optim_array=aa.optim,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene,
numcode=1, diploid=TRUE, aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=FALSE,
ncats=4, k.levels=0, logspace=TRUE, verbose=FALSE,
n.cores.by.gene.by.site=nCores)->res
}
return(res)
}
#round(selac.gtr, 3)
#get_test_key <- function(phy.source, nuc.model, gamma.type, nCores, seed)
run_profile <- function(src_data,nuc.model,gamma.model,seed,nCores){
set.seed(seed)
cat(sprintf("Start: %s_%s_%s_%s_%i_%i\n",
src_data$input.key,
nuc.model,
gamma.model,
selac_release,
nCores,
seed))
profile_prefix=sprintf("%s_%s_%s_%s_%i_%i",
src_data$input.key,
nuc.model,
gamma.model,
selac_release,
nCores,
seed)
model.LL=NA
try({
prof_obj <- profvis({
model.LL=test_selac_std(src_data$phy,
src_data$codon.data,
nuc.model = nuc.model,
gamma.type = gamma.model,
nCores = nCores)
}, prof_output = paste0(profile_prefix,".Rprof"))
htmlwidgets::saveWidget(prof_obj,
file=paste0(profile_prefix,".Rprofvis.html"))
})
cat(sprintf("End: %s_%s_%s_%s_%i_%i\tLL: %0.3f\n",
src_data$input.key,
nuc.model,
gamma.model,
selac_release,
nCores,
seed,
model.LL))
if(!file.exists(paste0(src_data$input.key,"_LL_log.csv")))
cat("SRC,Nuc.Model,Gamma.model,Revision,nCores,seed,model.LL\n",
file=paste0(src_data$input.key,"_LL_log.csv"),
append = T)
cat(sprintf("\"%s\",\"%s\",\"%s\",\"%s\",%i,%i,%0.3f\n",
src_data$input.key,
nuc.model,
gamma.model,
selac_release,
nCores,
seed,
model.LL),
file=paste0(src_data$input.key,"_LL_log.csv"),
append = T)
model.LL
}
run_simple_selac_optimize <- function(seed=sample.int(1e6,1),ref="v1.6.1-rc1"){
setup_selac_for_profiling(ref=ref)
profile_prefix=sprintf("%s_%s_%s_%s_%i_%i",
"selac19XX",
"GTR",
"noneXquadrature",
selac_release,
3,
seed)
src.key="selac19XX"
set.seed(seed)
cat(sprintf("Start: %s\n",
profile_prefix))
tree<-read.tree('selac_paper_data/SalichosRokas.tre')
result=list(loglik=NA)
nuc.model = 'GTR'
gamma.type="noneXquadrature"
nCores=3
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'tmp_data/', phy = tree, n.partitions=3,
edge.length = 'optimize', optimal.aa = 'none', data.type='codon',
codon.model = 'GY94', nuc.model = 'GTR',
include.gamma = FALSE, gamma.type='quadrature', ncats = 4, numcode = 1,
diploid = FALSE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = 3, max.restarts = 1, max.evals=20)
}, prof_output = paste0(profile_prefix,".Rprof"))
htmlwidgets::saveWidget(prof_obj,
file=paste0(profile_prefix,".Rprofvis.html"))
})
cat(sprintf("End: %s\tLL: %0.3f\n",
profile_prefix,
result$loglik))
if(!file.exists(paste0(src.key,"_LL_log.csv")))
cat("SRC,Nuc.Model,Gamma.model,Revision,nCores,seed,model.LL\n",
file=paste0(src.key,"_LL_log.csv"),
append = T)
cat(sprintf("\"%s\",\"%s\",\"%s\",\"%s\",%i,%i,%0.3f\n",
src.key,
nuc.model,
gamma.type,
selac_release,
nCores,
seed,
result$loglik),
file=paste0(src.key,"_LL_log.csv"),
append = T)
save(result,file=sprintf('selac_paper_output/yeastSalRokSelacGTRG_quad_%s.Rdata',profile_prefix))
result$loglik
}
run_full_selac_optimize <- function(seed=sample.int(1e6,1),ref="v1.6.1-rc1", nCores=3){
setup_selac_for_profiling(ref=ref)
profile_prefix=sprintf("%s_%s_%s_%s_%i_%i",
"selacFULLb",
"GTR",
"noneXquadrature",
selac_release,
nCores,
seed)
src.key="selacFULLb"
set.seed(seed)
cat(sprintf("Start: %s\n",
profile_prefix))
tree<-read.tree('selac_paper_data/SalichosRokas.tre')
result=list(loglik=NA)
nuc.model = 'GTR'
gamma.type="noneXquadrature"
#nCores=3
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'selac_paper_data/', phy = tree,
edge.length = 'optimize', optimal.aa = 'none', data.type='codon',
codon.model = 'GY94', nuc.model = 'GTR',
include.gamma = FALSE, gamma.type='quadrature', ncats = 4, numcode = 1,
diploid = FALSE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = nCores, max.restarts = 1, max.evals=20)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
htmlwidgets::saveWidget(prof_obj,
file=paste0(profile_prefix,".Rprofvis.html"))
})
cat(sprintf("End: %s\tLL: %0.3f\n",
profile_prefix,
result$loglik))
if(!file.exists(paste0(src.key,"_LL_log.csv")))
cat("SRC,Nuc.Model,Gamma.model,Revision,nCores,seed,model.LL\n",
file=paste0(src.key,"_LL_log.csv"),
append = T)
cat(sprintf("\"%s\",\"%s\",\"%s\",\"%s\",%i,%i,%0.3f\n",
src.key,
nuc.model,
gamma.type,
selac_release,
nCores,
seed,
result$loglik),
file=paste0(src.key,"_LL_log.csv"),
append = T)
save(result,file=sprintf('selac_paper_output/yeastSalRokSelacGTRG_quad_%s.Rdata',profile_prefix))
result$loglik
}
run_test_ecoli_optimize <- function(seed=sample.int(1e6,1),ref="v1.6.1-rc1", nCores=3){
setup_selac_for_profiling(ref=ref)
src.key="ecoliTEST"
nuc.model = 'UNREST'
gamma.type="quadrature"
profile_prefix=sprintf("%s_%s_%s_%s_%i_%i",
src.key,
nuc.model,
gamma.type,
selac_release,
nCores,
seed)
set.seed(seed)
cat(sprintf("Start: %s\n",
profile_prefix))
tree<-read.tree('kosi07_data/kosi07_codonphyml_tree_TEM.newick')
fasta.file="kosi07_data/aligned_KOSI07_TEM.fasta"
output.file.name=sprintf('ecoli_output/%s_restart.Rdata',profile_prefix)
result=list(loglik=NA)
opt.aa.type <- "optimize"
# random starting values
starting.vals <- matrix(runif(n = 15, min = 0.01, max = 5), ncol = 15, nrow = 1)
tree$edge.length <- runif(nrow(tree$edge), 0.01, 3)
#nCores=3
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize', optimal.aa = opt.aa.type, data.type='codon',
codon.model = 'selac', nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = TRUE, gamma.type='quadrature', ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = nCores, n.cores.by.gene.by.site=1,
max.restarts = 1, max.evals=20, max.tol=1e-2, max.iterations = 15,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
htmlwidgets::saveWidget(prof_obj,
file=paste0(profile_prefix,".Rprofvis.html"))
})
cat(sprintf("End: %s\tLL: %0.3f\n",
profile_prefix,
result$loglik))
if(!file.exists(paste0(src.key,"_LL_log.csv")))
cat("SRC,Nuc.Model,Gamma.model,Revision,nCores,seed,model.LL\n",
file=paste0(src.key,"_LL_log.csv"),
append = T)
cat(sprintf("\"%s\",\"%s\",\"%s\",\"%s\",%i,%i,%0.3f\n",
src.key,
nuc.model,
gamma.type,
selac_release,
nCores,
seed,
result$loglik),
file=paste0(src.key,"_LL_log.csv"),
append = T)
cat("SELAC Done. saving results\n")
result$seed <- seed
result$startingValues <- starting.vals
result$startingTree <- tree
save(result,file=sprintf('ecoli_output/%s_result.Rdata',profile_prefix))
result$loglik
}
run_ecoli_profile_mode <- function(mode=c("SHORTTEST","TEST","SHORT",
"SHORTTESTHMM","SHORTHMM","LONGHMM",
"FASTHMMTEST","HMMEVAL50","HMMEVALFULL",
"FASTHMMDEBUG","FASTHMMSINGLEDEBUG"),
seed=sample.int(1e6,1),
codon.model=c("selac","none","GY94","YN98"),
nuc.model=c("GTR","UNREST","JC"),
ref="v1.6.1-rc1",
include.gamma=T,
gamma.type=c("quadrature","median","lognormal","none"),
nCores=1){
setup_selac_for_profiling(ref=ref)
mode=match.arg(mode)
src.key=paste0("ecoli",mode)
codon.model = match.arg(codon.model)
nuc.model = match.arg(nuc.model)
if(!include.gamma)
{ gamma.type="quadrature"; gamma.mode="none";}
else {
gamma.mode=gamma.type=match.arg(gamma.type)
}
if(gamma.type=="none"){
include.gamma=F
gamma.type="quadrature"
gamma.mode="none"
}
profile_prefix=sprintf("%s_%s_%s_%s_%s_%i_%i",
src.key,
codon.model,
nuc.model,
gamma.mode,
selac_release,
nCores,
seed)
if(file.exists(sprintf('ecoli_output/%s_result.Rdata',profile_prefix))){
try({
load(file=sprintf('ecoli_output/%s_result.Rdata',profile_prefix))
if(!is.null(result$loglik) && is.finite(result$loglik)) {
cat(sprintf("Skip: %s\n",
profile_prefix))
return(result$loglik)
}
})
cat(sprintf("Rebuilding: %s\n",
profile_prefix))
}
set.seed(seed)
cat(sprintf("Start: %s\n",
profile_prefix))
tree<-read.tree('kosi07_data/kosi07_codonphyml_tree_TEM.newick')
fasta.file="kosi07_data/aligned_KOSI07_TEM.fasta"
output.file.name=sprintf('ecoli_output/%s_restart.Rdata',profile_prefix)
result=list(loglik=NA)
opt.aa.type <- "optimize"
# random starting values
starting.vals <- matrix(runif(n = 15, min = 0.01, max = 5), ncol = 15, nrow = 1)
tree$edge.length <- runif(nrow(tree$edge), 0.01, 3)
#nCores=3
if(mode=="TEST"){
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize', optimal.aa = opt.aa.type, data.type='codon',
codon.model = codon.model, nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = include.gamma, gamma.type=gamma.type, ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = 1, n.cores.by.gene.by.site=nCores,
max.restarts = 1, max.evals=20, max.tol=1e-2, max.iterations = 15,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else if(mode=="SHORTTEST"){
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize', optimal.aa = opt.aa.type, data.type='codon',
codon.model = codon.model, nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = include.gamma, gamma.type=gamma.type, ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = 1, n.cores.by.gene.by.site=nCores,
max.restarts = 1, max.evals=1, max.tol=1e-2, max.iterations = 1,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
}else if(mode=="SHORTTESTHMM"){
# HMM code requires starting edge length < 0.5 and > 1e-8
tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacHMMOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize', data.type='codon',
codon.model = codon.model, nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = include.gamma, gamma.type=gamma.type, ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = TRUE,
n.cores.by.gene = 1, n.cores.by.gene.by.site=nCores,
max.restarts = 1, max.evals=1, max.tol=1e-2,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name, max.iterations=1)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
}else if(mode=="SHORT"){
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize', optimal.aa = opt.aa.type, data.type='codon',
codon.model = codon.model, nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = include.gamma, gamma.type=gamma.type, ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = 1, n.cores.by.gene.by.site=nCores,
max.restarts = 1, max.evals=1, max.tol=1e-2, max.iterations = 1,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.05)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else if(mode=="SHORTHMM"){
# HMM code requires starting edge length < 0.5 and > 1e-8
tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacHMMOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize',data.type='codon',
codon.model = codon.model, nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = include.gamma, gamma.type=gamma.type, ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = 1, n.cores.by.gene.by.site=nCores,
max.restarts = 1, max.evals=1, max.tol=1e-2,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name, max.iterations=1)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else if(mode=="LONGHMM"){
# HMM code requires starting edge length < 0.5 and > 1e-8
tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacHMMOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize',data.type='codon',
codon.model = codon.model, nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = include.gamma, gamma.type=gamma.type, ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = 1, n.cores.by.gene.by.site=nCores,
max.restarts = 1, max.evals=5, max.tol=1e-2,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name, max.iterations=5)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else if(mode=="FASTHMMDEBUG") {
if(!file.exists("ecoli_chars.RData")){
tmp.gene <- read.dna(fasta.file, format="fasta")
tmp.gene <- as.list(as.matrix(cbind(tmp.gene)))
chars <- selac:::DNAbinToCodonNumeric(tmp.gene)
save(chars,file="ecoli_chars.RData")
} else {
load(file="ecoli_chars.RData")
}
codon.data <- chars[tree$tip.label,c(1,1+sample(ncol(chars)-1,10))]
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
codon.data <- selac:::SitePattern(codon.data)
codon.index.matrix = selac:::CreateCodonMutationMatrixIndexEvolveAA()
model.params = hmm.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParamsEvolvingAA
if(include.gamma)
model.params=c(model.params,std.gamma.shape)
model.params = c(model.params,std.sel.reg)
tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
# try({
lSAC.c4mc.full(log(model.params),
codon.data=codon.data,
phy=tree,
codon.freq.by.aa=NULL,
codon.freq.by.gene=codon.freq.by.gene,
numcode=1,
diploid=TRUE,
aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=include.gamma,
gamma.type=gamma.type,
ncats=4,
k.levels=0,
logspace=TRUE,
verbose=TRUE,
n.cores.by.gene.by.site=nCores,
estimate.importance=FALSE) -> result$loglik
# })
}else if(mode=="FASTHMMSINGLEDEBUG") {
if(!file.exists("ecoli_chars.RData")){
tmp.gene <- read.dna(fasta.file, format="fasta")
tmp.gene <- as.list(as.matrix(cbind(tmp.gene)))
chars <- selac:::DNAbinToCodonNumeric(tmp.gene)
save(chars,file="ecoli_chars.RData")
} else {
load(file="ecoli_chars.RData")
}
codon.data <- chars[tree$tip.label,c(1,1+sample(ncol(chars)-1,10))]
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
codon.data <- selac:::SitePattern(codon.data)
codon.index.matrix = selac:::CreateCodonMutationMatrixIndexEvolveAA()
model.params = hmm.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
# lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParamsEvolvingAA
GetLikelihoodSAC_CodonForManyCharVaryingBySiteEvolvingAA <- function(codon.data, phy, Q_codon_array, codon.freq.by.aa=NULL, codon.freq.by.gene=NULL, aa.optim_array, codon_mutation_matrix, Ne, rates, numcode, diploid, n.cores.by.gene.by.site=1, verbose=FALSE){
nsites.unique <- dim(codon.data$unique.site.patterns)[2]-1
final.likelihood.vector <- rep(NA, nsites.unique)
#We rescale the codon matrix only:
diag(codon_mutation_matrix) = 0
diag(codon_mutation_matrix) = -rowSums(codon_mutation_matrix)
scale.factor <- -sum(diag(codon_mutation_matrix) * codon.freq.by.gene, na.rm=TRUE)
codon_mutation_matrix_scaled = codon_mutation_matrix * (1/scale.factor)
#Finish the Q_array codon mutation matrix multiplication here:
if(diploid == TRUE){
Q_codon_array = (2 * Ne) * codon_mutation_matrix_scaled * Q_codon_array
}else{
Q_codon_array = Ne * codon_mutation_matrix_scaled * Q_codon_array
}
diag(Q_codon_array) = 0
diag(Q_codon_array) = -rowSums(Q_codon_array)
#Put the na.rm=TRUE bit here just in case -- when the amino acid is a stop codon, there is a bunch of NaNs. Should be fixed now.
#scale.factor <- -sum(Q_codon_array[DiagArray(dim(Q_codon_array))] * equilibrium.codon.freq, na.rm=TRUE)
## This is obviously not very elegant, but not sure how else to code it to store this stuff in this way -- WORK IN PROGRESS:
#expQt <- GetExpQt(phy=phy, Q=Q_codon_array, scale.factor=NULL, rates=rates)
#Generate matrix of root frequencies for each optimal AA:
root.p_array <- codon.freq.by.gene
#root.p_array <- t(root.p_array)
#root.p_array <- root.p_array / rowSums(root.p_array)
#rownames(root.p_array) <- .unique.aa
phy.sort <- reorder(phy, "pruningwise")
# Q_codon_array_vectored <- c(t(Q_codon_array)) # has to be transposed
# Q_codon_array_vectored <- Q_codon_array_vectored[.non_zero_pos]
anc.indices <- unique(phy.sort$edge[,1])
if(verbose){
MultiCoreLikelihoodBySite <- function(nsite.index){
tmp <- selac:::GetLikelihoodSAC_CodonForSingleCharGivenOptimumHMMScoring(charnum=nsite.index, codon.data=codon.data$unique.site.patterns,
phy=phy.sort, Q_codon_array=Q_codon_array,
root.p=root.p_array, scale.factor=scale.factor,
anc.indices=anc.indices, return.all=FALSE)
cat(".")
return(tmp)
}
} else {
MultiCoreLikelihoodBySite <- function(nsite.index){
tmp <- selac:::GetLikelihoodSAC_CodonForSingleCharGivenOptimumHMMScoring(charnum=nsite.index, codon.data=codon.data$unique.site.patterns, phy=phy.sort, Q_codon_array_vectored=Q_codon_array_vectored, root.p=root.p_array, scale.factor=scale.factor, anc.indices=anc.indices, return.all=FALSE)
return(tmp)
}
}
final.likelihood.vector <- unlist(lapply(1:nsites.unique, MultiCoreLikelihoodBySite))
if(verbose) cat("|\n")
return(final.likelihood.vector)
}
if(include.gamma)
model.params=c(model.params,std.gamma.shape)
model.params = c(model.params,std.sel.reg)
tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
phy=tree; verbose=T;
diploid=T; numcode=1
x <- model.params
k.levels =0
importance.of.aa.dist.in.selective.environment.change = 1
rate.for.selective.environment.change = x[length(x)]
x = x[-length(x)]
aa.properties=NULL
if(include.gamma == TRUE){
shape = x[length(x)]
x = x[-length(x)]
}
C.Phi.q.Ne <- x[1]
C <- 4
q <- 4e-7
Ne <- 5e6
Phi.q.Ne <- C.Phi.q.Ne / C
Phi.Ne <- Phi.q.Ne / q
Phi <- Phi.Ne / Ne
alpha <- x[2]
beta <- x[3]
gamma <- 0.0003990333 #volume.fixed.value
if(k.levels > 0){
if(nuc.model == "JC") {
base.freqs=c(x[4:6], 1-sum(x[4:6]))
#During the early stages of the optimization process it will try weird values for the base frequencies.
stopifnot(!any(base.freqs < 0))
if(any(base.freqs < 0)){
return(1000000)
}
nuc.mutation.rates <- selac:::CreateNucleotideMutationMatrix(1, model=nuc.model, base.freqs=base.freqs)
poly.params <- x[7:8]
}
if(nuc.model == "GTR") {
base.freqs=c(x[4:6], 1-sum(x[4:6]))
#During the early stages of the optimization process it will try weird values for the base frequencies.
stopifnot(!any(base.freqs < 0))
if(any(base.freqs < 0)){
return(1000000)
}
nuc.mutation.rates <- selac:::CreateNucleotideMutationMatrix(x[9:length(x)], model=nuc.model, base.freqs=base.freqs)
poly.params <- x[7:8]
}
if(nuc.model == "UNREST") {
nuc.mutation.rates <- selac:::CreateNucleotideMutationMatrix(x[6:length(x)], model=nuc.model, base.freqs=NULL)
poly.params <- x[4:5]
}
}else{
if(nuc.model == "JC") {
base.freqs=c(x[4:6], 1-sum(x[4:6]))
#During the early stages of the optimization process it will try weird values for the base frequencies.
stopifnot(!any(base.freqs < 0))
if(any(base.freqs < 0)){
return(1000000)
}
nuc.mutation.rates <- selac:::CreateNucleotideMutationMatrix(1, model=nuc.model, base.freqs=base.freqs)
}
if(nuc.model == "GTR") {
base.freqs=c(x[4:6], 1-sum(x[4:6]))
#During the early stages of the optimization process it will try weird values for the base frequencies.
stopifnot(!any(base.freqs < 0))
if(any(base.freqs < 0)){
return(1000000)
}
nuc.mutation.rates <- selac:::CreateNucleotideMutationMatrix(x[7:length(x)], model=nuc.model, base.freqs=base.freqs)
}
if(nuc.model == "UNREST") {
nuc.mutation.rates <- selac:::CreateNucleotideMutationMatrix(x[4:length(x)], model=nuc.model, base.freqs=NULL)
}
}
nuc.mutation.rates.vector <- c(nuc.mutation.rates, rate.for.selective.environment.change)
codon_mutation_matrix <- matrix(nuc.mutation.rates.vector[codon.index.matrix], dim(codon.index.matrix))
codon_mutation_matrix[is.na(codon_mutation_matrix)]=0
nsites.unique <- dim(codon.data$unique.site.patterns)[2]-1
nsites <- sum(codon.data$site.pattern.counts)
if(include.gamma==TRUE){
if(gamma.type == "median"){
rates.k <- DiscreteGamma(shape=shape, ncats=ncats)
weights.k <- rep(1/ncats, ncats)
}
if(gamma.type == "quadrature"){
rates.and.weights <- LaguerreQuad(shape=shape, ncats=ncats)
rates.k <- rates.and.weights[1:ncats]
weights.k <- rates.and.weights[(ncats+1):(ncats*2)]
}
if(gamma.type == "lognormal"){
rates.and.weights <- LogNormalQuad(shape=shape, ncats=ncats)
rates.k <- rates.and.weights[1:ncats]
weights.k <- rates.and.weights[(ncats+1):(ncats*2)]
}
#ttmmpp <- c(nuc.mutation.rates.vector, nsites.unique, nsites, C, Phi, rates.k, q, Ne, shape, importance.of.aa.dist.in.selective.environment.change)
#writeLines(text = paste(ttmmpp), con = "~/Desktop/selac_parameter.txt", sep = "\t")
final.likelihood.mat = matrix(0, nrow=ncats, ncol=nsites.unique)
for(k.cat in sequence(ncats)){
if(k.levels > 0){
aa.distances <- selac:::CreateAADistanceMatrix(alpha=alpha, beta=beta, gamma=gamma, aa.properties=aa.properties, normalize=FALSE, poly.params=poly.params, k=k.levels)
}else{
aa.distances <- selac:::CreateAADistanceMatrix(alpha=alpha, beta=beta, gamma=gamma, aa.properties=aa.properties, normalize=FALSE, poly.params=NULL, k=k.levels)
}
Q_codon_array <- selac:::FastCreateEvolveAACodonFixationProbabilityMatrix(aa.distances=aa.distances, nsites=nsites, C=C, Phi=Phi*rates.k[k.cat], q=q, Ne=Ne, include.stop.codon=TRUE, numcode=numcode, diploid=diploid, flee.stop.codon.rate=0.9999999, importance.of.aa.dist.in.selective.environment.change) #Cedric: added importance
final.likelihood.mat[k.cat,] = GetLikelihoodSAC_CodonForManyCharVaryingBySiteEvolvingAA(codon.data, phy, Q_codon_array,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene, codon_mutation_matrix=codon_mutation_matrix,
Ne=Ne, rates=NULL, numcode=numcode, diploid=diploid, n.cores.by.gene.by.site=n.cores.by.gene.by.site, verbose=verbose)
}
likelihood <- sum(log(colSums(exp(final.likelihood.mat)*weights.k)) * codon.data$site.pattern.counts)
}else{
if(k.levels > 0){
aa.distances <- selac:::CreateAADistanceMatrix(alpha=alpha, beta=beta, gamma=gamma, aa.properties=aa.properties, normalize=FALSE, poly.params=poly.params, k=k.levels)
}else{
aa.distances <- selac:::CreateAADistanceMatrix(alpha=alpha, beta=beta, gamma=gamma, aa.properties=aa.properties, normalize=FALSE, poly.params=NULL, k=k.levels)
}
Q_codon_array <- selac:::FastCreateEvolveAACodonFixationProbabilityMatrix(aa.distances=aa.distances, nsites=nsites, C=C, Phi=Phi, q=q, Ne=Ne, include.stop.codon=TRUE, numcode=numcode, diploid=diploid, flee.stop.codon.rate=0.9999999, importance.of.aa.dist.in.selective.environment.change) #Cedric: added importance
final.likelihood = GetLikelihoodSAC_CodonForManyCharVaryingBySiteEvolvingAA(codon.data, phy, Q_codon_array, codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene, codon_mutation_matrix=codon_mutation_matrix, Ne=Ne, rates=NULL, numcode=numcode, diploid=diploid, n.cores.by.gene.by.site=n.cores.by.gene.by.site, verbose=verbose)
likelihood <- sum(final.likelihood * codon.data$site.pattern.counts)
}
if(neglnl) {
likelihood <- -1 * likelihood
}
if(verbose > 1) {
results.vector <- c(likelihood, C*Phi*q, alpha, beta, gamma, Ne, ape::write.tree(phy))
names(results.vector) <- c("likelihood", "C.Phi.q.Ne", "alpha", "beta", "gamma", "Ne", "phy")
print(results.vector)
}else if(verbose){
results.vector <- c(likelihood, alpha, beta, gamma)
names(results.vector) <- c("likelihood", "alpha", "beta", "gamma")
print(results.vector)
}
if(is.na(likelihood) || is.nan(likelihood)){
res <-1000000
}else{
res <- likelihood
}
result$loglik <-res
# try({
lSAC.c4mc.full(log(model.params),
codon.data=codon.data,
phy=tree,
codon.freq.by.aa=NULL,
codon.freq.by.gene=codon.freq.by.gene,
numcode=1,
diploid=TRUE,
aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=include.gamma,
gamma.type=gamma.type,
ncats=4,
k.levels=0,
logspace=TRUE,
verbose=TRUE,
n.cores.by.gene.by.site=nCores,
estimate.importance=FALSE) -> result$loglik
# })
}else if(mode=="FASTHMMTEST") {
if(!file.exists("ecoli_chars.RData")){
tmp.gene <- read.dna(fasta.file, format="fasta")
tmp.gene <- as.list(as.matrix(cbind(tmp.gene)))
chars <- selac:::DNAbinToCodonNumeric(tmp.gene)
save(chars,file="ecoli_chars.RData")
} else {
load(file="ecoli_chars.RData")
}
codon.data <- chars[tree$tip.label,c(1,1+sample(ncol(chars)-1,10))]
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
codon.data <- selac:::SitePattern(codon.data)
codon.index.matrix = selac:::CreateCodonMutationMatrixIndexEvolveAA()
model.params = hmm.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParamsEvolvingAA
if(include.gamma)
model.params=c(model.params,std.gamma.shape)
model.params = c(model.params,std.sel.reg)
# tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
try({
prof_obj <- profvis({
lSAC.c4mc.full(log(model.params),
codon.data=codon.data,
phy=tree,
codon.freq.by.aa=NULL,
codon.freq.by.gene=codon.freq.by.gene,
numcode=1,
diploid=TRUE,
aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=include.gamma,
gamma.type=gamma.type,
ncats=4,
k.levels=0,
logspace=TRUE,
verbose=TRUE,
n.cores.by.gene.by.site=nCores,
estimate.importance=FALSE) -> result$loglik
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.05)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else if(mode=="HMMEVAL50") {
if(!file.exists("ecoli_chars.RData")){
tmp.gene <- read.dna(fasta.file, format="fasta")
tmp.gene <- as.list(as.matrix(cbind(tmp.gene)))
chars <- selac:::DNAbinToCodonNumeric(tmp.gene)
save(chars,file="ecoli_chars.RData")
} else {
load(file="ecoli_chars.RData")
}
codon.data <- chars[tree$tip.label,c(1,1+sample(ncol(chars)-1,50))]
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
codon.data <- selac:::SitePattern(codon.data)
codon.index.matrix = selac:::CreateCodonMutationMatrixIndexEvolveAA()
model.params = hmm.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParamsEvolvingAA
if(include.gamma)
model.params=c(model.params,std.gamma.shape)
model.params = c(model.params,std.sel.reg)
# tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
try({
prof_obj <- profvis({
lSAC.c4mc.full(log(model.params),
codon.data=codon.data,
phy=tree,
codon.freq.by.aa=NULL,
codon.freq.by.gene=codon.freq.by.gene,
numcode=1,
diploid=TRUE,
aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=include.gamma,
gamma.type=gamma.type,
ncats=4,
k.levels=0,
logspace=TRUE,
verbose=TRUE,
n.cores.by.gene.by.site=nCores,
estimate.importance=FALSE) -> result$loglik
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.05)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else if(mode=="HMMEVALFULL") {
if(!file.exists("ecoli_chars.RData")){
tmp.gene <- read.dna(fasta.file, format="fasta")
tmp.gene <- as.list(as.matrix(cbind(tmp.gene)))
chars <- selac:::DNAbinToCodonNumeric(tmp.gene)
save(chars,file="ecoli_chars.RData")
} else {
load(file="ecoli_chars.RData")
}
codon.data <- chars[tree$tip.label,]
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
codon.data <- selac:::SitePattern(codon.data)
codon.index.matrix = selac:::CreateCodonMutationMatrixIndexEvolveAA()
model.params = hmm.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParamsEvolvingAA
if(include.gamma)
model.params=c(model.params,std.gamma.shape)
model.params = c(model.params,std.sel.reg)
# tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
try({
prof_obj <- profvis({
lSAC.c4mc.full(log(model.params),
codon.data=codon.data,
phy=tree,
codon.freq.by.aa=NULL,
codon.freq.by.gene=codon.freq.by.gene,
numcode=1,
diploid=TRUE,
aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=include.gamma,
gamma.type=gamma.type,
ncats=4,
k.levels=0,
logspace=TRUE,
verbose=TRUE,
n.cores.by.gene.by.site=nCores,
estimate.importance=FALSE) -> result$loglik
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else {
cat(sprintf("Request for %s mode not understood.\n",as.character(mode)))
}
cat(sprintf("End: %s\tLL: %0.3f\n",
profile_prefix,
result$loglik))
if(!file.exists(paste0(src.key,"_",codon.model,"_LL_log.csv")))
cat("SRC,Nuc.Model,Gamma.model,Revision,nCores,seed,model.LL\n",
file=paste0(src.key,"_",codon.model,"_LL_log.csv"),
append = T)
cat(sprintf("\"%s\",\"%s\",\"%s\",\"%s\",%i,%i,%0.3f\n",
src.key,
nuc.model,
gamma.mode,
selac_release,
nCores,
seed,
result$loglik),
file=paste0(src.key,"_",codon.model,"_LL_log.csv"),
append = T)
cat("SELAC Done. saving results\n")
result$seed <- seed
result$startingValues <- starting.vals
result$startingTree <- tree
save(result,file=sprintf('ecoli_output/%s_result.Rdata',profile_prefix))
result$loglik
}
run_test_ecoli_optimize_no_profile <- function(seed=sample.int(1e6,1),ref="v1.6.1-rc1", nCores=1, auto.skip=T){
setup_selac_for_profiling(ref=ref)
src.key="ecoliDEBUG"
nuc.model = 'UNREST'
gamma.type="quadrature"
profile_prefix=sprintf("%s_%s_%s_%s_%i_%i",
src.key,
nuc.model,
gamma.type,
selac_release,
nCores,
seed)
if(file.exists(sprintf('ecoli_output/%s_result.Rdata',profile_prefix))){
try({
load(file=sprintf('ecoli_output/%s_result.Rdata',profile_prefix))
if(!is.null(result$loglik) && is.finite(result$loglik)) {
cat(sprintf("Skip: %s\n",
profile_prefix))
return(result$loglik)
}
})
cat(sprintf("Rebuilding: %s\n",
profile_prefix))
}
set.seed(seed)
cat(sprintf("Start: %s\n",
profile_prefix))
tree<-read.tree('kosi07_data/kosi07_codonphyml_tree_TEM.newick')
fasta.file="kosi07_data/aligned_KOSI07_TEM.fasta"
output.file.name=sprintf('ecoli_output/%s_restart.Rdata',profile_prefix)
result=list(loglik=NA)
opt.aa.type <- "optimize"
# random starting values
starting.vals <- matrix(runif(n = 15, min = 0.01, max = 5), ncol = 15, nrow = 1)
tree$edge.length <- runif(nrow(tree$edge), 0.01, 3)
#nCores=3
# try({
# prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize', optimal.aa = opt.aa.type, data.type='codon',
codon.model = 'selac', nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = TRUE, gamma.type='quadrature', ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = TRUE,
n.cores.by.gene = nCores, n.cores.by.gene.by.site=1,
max.restarts = 1, max.evals=20, max.tol=1e-2, max.iterations = 15,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
# }, prof_output = paste0(profile_prefix,".Rprof"),interval=1)
# save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
# })
cat(sprintf("End: %s\tLL: %0.3f\n",
profile_prefix,
result$loglik))
if(!file.exists(paste0(src.key,"_LL_log.csv")))
cat("SRC,Nuc.Model,Gamma.model,Revision,nCores,seed,model.LL\n",
file=paste0(src.key,"_LL_log.csv"),
append = T)
cat(sprintf("\"%s\",\"%s\",\"%s\",\"%s\",%i,%i,%0.3f\n",
src.key,
nuc.model,
gamma.type,
selac_release,
nCores,
seed,
result$loglik),
file=paste0(src.key,"_LL_log.csv"),
append = T)
cat("SELAC Done. saving results\n")
result$seed <- seed
result$startingValues <- starting.vals
result$startingTree <- tree
save(result,file=sprintf('ecoli_output/%s_result.Rdata',profile_prefix))
result$loglik
}
|
/R_profile_helper.R
|
no_license
|
GrahamDB/testing_selac
|
R
| false
| false
| 58,407
|
r
|
#Profile basic test
source("setup.R")
local({
if(!require(profvis,lib.loc = user_path)) {
install.packages("profvis",lib = user_path)
if(!require(profvis,lib.loc = user_path)) {
stop("Failed to install profiler")
}
}
library(htmlwidgets,lib.loc = user_path)
library(jsonlite,lib.loc = user_path)
library(yaml,lib.loc = user_path)
invisible(T)
})
print(selac_release)
# setup_selac_for_profiling()
test_selac_hmm <- function(phy,
fasta.file,
nuc.model=c("JC", "GTR", "HKY", "UNREST"),
gamma.type=c("none", "median","quadrature","lognormal" ),
nCores=1){
nuc.model=match.arg(nuc.model)
gamma.type=match.arg(gamma.type)
if(nuc.model == "HKY") stop("HKY model not implemented for GetLikelihoodSAC_CodonForManyCharGivenAllParams.")
include.gamma = (gamma.type != "none")
if(!include.gamma) gamma.type = "quadrature"
tmp.gene <- read.dna(fasta.file, format="fasta")
tmp.gene <- as.list(as.matrix(cbind(tmp.gene)))
chars <- selac:::DNAbinToCodonNumeric(tmp.gene)
codon.data <- chars[phy$tip.label,c(1,1+sample(ncol(chars)-1,10))]
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
codon.data <- selac:::SitePattern(codon.data)
codon.index.matrix = selac:::CreateCodonMutationMatrixIndexEvolveAA()
model.params = hmm.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParamsEvolvingAA
if(include.gamma)
model.params=c(model.params,std.gamma.shape)
model.params = c(model.params,std.sel.reg)
lSAC.c4mc.full(log(model.params),
codon.data=codon.data,
phy=phy,
codon.freq.by.aa=NULL,
codon.freq.by.gene=codon.freq.by.gene,
numcode=1,
diploid=TRUE,
aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=include.gamma,
gamma.type=gamma.type,
ncats=4,
k.levels=0,
logspace=TRUE,
verbose=TRUE,
n.cores.by.gene.by.site=nCores,
estimate.importance=FALSE) -> res
return(res)
}
# basic loader to build further tests
load_inputs <- function(){
tree <- read.tree("rokasYeast.tre")
phy <- drop.tip(tree, "Calb")
yeast.gene <- read.dna("gene1Yeast.fasta", format="fasta")
yeast.gene <- as.list(as.matrix(cbind(yeast.gene))[1:7,])
chars <- selac:::DNAbinToCodonNumeric(yeast.gene)
codon.data <- chars[phy$tip.label,]
aa.data <- selac:::ConvertCodonNumericDataToAAData(codon.data, numcode=1)
aa.optim <- apply(aa.data[, -1], 2, selac:::GetMaxName) #starting values for all, final values for majrule
aa.optim.full.list <- aa.optim
codon.freq.by.aa <- selac:::GetCodonFreqsByAA(codon.data[,-1], aa.optim, numcode=1)
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
aa.optim.frame.to.add <- matrix(c("optimal", aa.optim), 1, dim(codon.data)[2])
colnames(aa.optim.frame.to.add) <- colnames(codon.data)
codon.data <- rbind(codon.data, aa.optim.frame.to.add)
codon.data <- selac:::SitePattern(codon.data, includes.optimal.aa=TRUE)
aa.optim = codon.data$optimal.aa
codon.index.matrix = selac:::CreateCodonMutationMatrixIndex()
}
load_rokasYeast <- function(){
tree <- read.tree("rokasYeast.tre")
phy <- drop.tip(tree, "Calb")
yeast.gene <- read.dna("gene1Yeast.fasta", format="fasta")
yeast.gene <- as.list(as.matrix(cbind(yeast.gene))[1:7,])
chars <- selac:::DNAbinToCodonNumeric(yeast.gene)
codon.data <- chars[phy$tip.label,]
list(input.key="rokasYeast",phy=phy,codon.data=codon.data)
}
test_selac.gamma.quadrature <- function(){
lSAC.c4mc.full(log(c(4*4e-7*.5*5e6, 1.829272, 0.101799, .25, .25, .25, rep(1,5), 5)),
codon.data=codon.data, phy=phy, aa.optim_array=aa.optim,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene,
numcode=1, diploid=TRUE, aa.properties=NULL,
volume.fixed.value=0.0003990333,
nuc.model="GTR",
codon.index.matrix=codon.index.matrix,
include.gamma=TRUE,
gamma.type="quadrature",
ncats=4, k.levels=0, logspace=TRUE, verbose=FALSE,
n.cores.by.gene.by.site=1)
}
test_selac.gamma.median <- function(){
lSAC.c4mc.full(log(c(4*4e-7*.5*5e6, 1.829272, 0.101799, .25, .25, .25, rep(1,5), 5)),
codon_data=codon.data, phy=phy, aa.optim_array=aa.optim,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene,
numcode=1, diploid=TRUE, aa.properties=NULL,
volume.fixed.value=0.0003990333,
nuc.model="GTR",
codon.index.matrix=codon.index.matrix,
include.gamma=TRUE,
gamma.type="median",
ncats=4, k.levels=0, logspace=TRUE, verbose=FALSE)
}
test_selac.unrest <- function(){
lSAC.c4mc.full(log(c(4*4e-7*.5*5e6, 1.829272, 0.101799, rep(1,11))),
codon.data, phy, aa.optim_array=aa.optim,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene,
numcode=1, diploid=TRUE, aa.properties=NULL,
volume.fixed.value=0.0003990333,
nuc.model="UNREST",
codon.index.matrix,
include.gamma=FALSE,
ncats=4, k.levels=0, logspace=TRUE, verbose=FALSE,
n.cores.by.gene.by.site=1)
}
test_selac.gtr <- function(){
lSAC.c4mc.full(log(c(4*4e-7*.5*5e6, 1.829272, 0.101799, .25, .25, .25, rep(1,5))),
codon.data, phy, aa.optim_array=aa.optim,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene,
numcode=1, diploid=TRUE, aa.properties=NULL,
volume.fixed.value=0.0003990333,
nuc.model="GTR",
codon.index.matrix,
include.gamma=FALSE,
ncats=4, k.levels=0, logspace=TRUE, verbose=FALSE,
n.cores.by.gene.by.site=1)
}
std.params = c(C.Phi.q.Ne = 4*4e-7*.5*5e6,
alpha=1.829272,
beta=0.101799)
std.gamma=0.0003990333
std.base.freq = c(A=0.25,C=0.25,G=0.25)
std.poly.params = c(NA,NA)
std.gamma.shape = 5
hmm.params = c(C.Phi.q.Ne = 2,
alpha=1.829272,
beta=0.101799)
std.sel.reg = 0.01
## Notes on nuc.mutation.params:
# used as rates value in selac:::CreateNucleotideMutationMatrix(rates, model, base.freqs)->res
# either: length(base.freqs) == 4 && sum(base.freqs) == 1
# or: is.null(base.freqs) == TRUE
# dim(res) == c(4,4)
# rowSums(res) == rep(1,4)
## selac:::CreateNucleotideMutationMatrix with JC model
# rates = rates[1] (ie just uses first value)
## selac:::CreateNucleotideMutationMatrix with GTR model
# rates = rates[1:5] (ie just uses first 5 values)
## selac:::CreateNucleotideMutationMatrix with HKY model
# rates = rates[1:2] (ie just uses first 2 values)
## selac:::CreateNucleotideMutationMatrix with UNREST model
# rates = rates[1:11] (ie just uses first 11 values)
#
# std.nuc.mutation.paramsA = c(1,1,1,1,1)
# std.nuc.mutation.paramsB = rep(1,11)
# std.nuc.mutation.paramsC = c(1,1,1,1,1)
std.nuc.params = mapply(rep_len,length.out=c(JC=1,GTR=5,HKY=2,UNREST=11),x=rep(1,4),
USE.NAMES = T,SIMPLIFY = F)
test_selac_std <- function(phy, codon.data,
nuc.model=c("JC", "GTR", "HKY", "UNREST"),
gamma.type=c("none", "median","quadrature","lognormal" ),
nCores=1){
nuc.model=match.arg(nuc.model)
gamma.type=match.arg(gamma.type)
if(nuc.model == "HKY") stop("HKY model not implemented for GetLikelihoodSAC_CodonForManyCharGivenAllParams.")
include.gamma = (gamma.type != "none")
aa.data <- selac:::ConvertCodonNumericDataToAAData(codon.data, numcode=1)
aa.optim <- apply(aa.data[, -1], 2, selac:::GetMaxName) #starting values for all, final values for majrule
aa.optim.full.list <- aa.optim
codon.freq.by.aa <- selac:::GetCodonFreqsByAA(codon.data[,-1], aa.optim, numcode=1)
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
aa.optim.frame.to.add <- matrix(c("optimal", aa.optim), 1, dim(codon.data)[2])
colnames(aa.optim.frame.to.add) <- colnames(codon.data)
codon.data <- rbind(codon.data, aa.optim.frame.to.add)
codon.data <- selac:::SitePattern(codon.data, includes.optimal.aa=TRUE)
aa.optim = codon.data$optimal.aa
codon.index.matrix = selac:::CreateCodonMutationMatrixIndex()
model.params = std.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParams
if(include.gamma){
model.params=c(model.params,std.gamma.shape)
lSAC.c4mc.full(log(model.params),
codon.data=codon.data, phy=phy, aa.optim_array=aa.optim,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene,
numcode=1, diploid=TRUE, aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=TRUE, gamma.type=gamma.type,
ncats=4, k.levels=0, logspace=TRUE, verbose=FALSE,
n.cores.by.gene.by.site=nCores)->res
}else{
lSAC.c4mc.full(log(model.params),
codon.data=codon.data, phy=phy, aa.optim_array=aa.optim,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene,
numcode=1, diploid=TRUE, aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=FALSE,
ncats=4, k.levels=0, logspace=TRUE, verbose=FALSE,
n.cores.by.gene.by.site=nCores)->res
}
return(res)
}
#round(selac.gtr, 3)
#get_test_key <- function(phy.source, nuc.model, gamma.type, nCores, seed)
run_profile <- function(src_data,nuc.model,gamma.model,seed,nCores){
set.seed(seed)
cat(sprintf("Start: %s_%s_%s_%s_%i_%i\n",
src_data$input.key,
nuc.model,
gamma.model,
selac_release,
nCores,
seed))
profile_prefix=sprintf("%s_%s_%s_%s_%i_%i",
src_data$input.key,
nuc.model,
gamma.model,
selac_release,
nCores,
seed)
model.LL=NA
try({
prof_obj <- profvis({
model.LL=test_selac_std(src_data$phy,
src_data$codon.data,
nuc.model = nuc.model,
gamma.type = gamma.model,
nCores = nCores)
}, prof_output = paste0(profile_prefix,".Rprof"))
htmlwidgets::saveWidget(prof_obj,
file=paste0(profile_prefix,".Rprofvis.html"))
})
cat(sprintf("End: %s_%s_%s_%s_%i_%i\tLL: %0.3f\n",
src_data$input.key,
nuc.model,
gamma.model,
selac_release,
nCores,
seed,
model.LL))
if(!file.exists(paste0(src_data$input.key,"_LL_log.csv")))
cat("SRC,Nuc.Model,Gamma.model,Revision,nCores,seed,model.LL\n",
file=paste0(src_data$input.key,"_LL_log.csv"),
append = T)
cat(sprintf("\"%s\",\"%s\",\"%s\",\"%s\",%i,%i,%0.3f\n",
src_data$input.key,
nuc.model,
gamma.model,
selac_release,
nCores,
seed,
model.LL),
file=paste0(src_data$input.key,"_LL_log.csv"),
append = T)
model.LL
}
run_simple_selac_optimize <- function(seed=sample.int(1e6,1),ref="v1.6.1-rc1"){
setup_selac_for_profiling(ref=ref)
profile_prefix=sprintf("%s_%s_%s_%s_%i_%i",
"selac19XX",
"GTR",
"noneXquadrature",
selac_release,
3,
seed)
src.key="selac19XX"
set.seed(seed)
cat(sprintf("Start: %s\n",
profile_prefix))
tree<-read.tree('selac_paper_data/SalichosRokas.tre')
result=list(loglik=NA)
nuc.model = 'GTR'
gamma.type="noneXquadrature"
nCores=3
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'tmp_data/', phy = tree, n.partitions=3,
edge.length = 'optimize', optimal.aa = 'none', data.type='codon',
codon.model = 'GY94', nuc.model = 'GTR',
include.gamma = FALSE, gamma.type='quadrature', ncats = 4, numcode = 1,
diploid = FALSE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = 3, max.restarts = 1, max.evals=20)
}, prof_output = paste0(profile_prefix,".Rprof"))
htmlwidgets::saveWidget(prof_obj,
file=paste0(profile_prefix,".Rprofvis.html"))
})
cat(sprintf("End: %s\tLL: %0.3f\n",
profile_prefix,
result$loglik))
if(!file.exists(paste0(src.key,"_LL_log.csv")))
cat("SRC,Nuc.Model,Gamma.model,Revision,nCores,seed,model.LL\n",
file=paste0(src.key,"_LL_log.csv"),
append = T)
cat(sprintf("\"%s\",\"%s\",\"%s\",\"%s\",%i,%i,%0.3f\n",
src.key,
nuc.model,
gamma.type,
selac_release,
nCores,
seed,
result$loglik),
file=paste0(src.key,"_LL_log.csv"),
append = T)
save(result,file=sprintf('selac_paper_output/yeastSalRokSelacGTRG_quad_%s.Rdata',profile_prefix))
result$loglik
}
run_full_selac_optimize <- function(seed=sample.int(1e6,1),ref="v1.6.1-rc1", nCores=3){
setup_selac_for_profiling(ref=ref)
profile_prefix=sprintf("%s_%s_%s_%s_%i_%i",
"selacFULLb",
"GTR",
"noneXquadrature",
selac_release,
nCores,
seed)
src.key="selacFULLb"
set.seed(seed)
cat(sprintf("Start: %s\n",
profile_prefix))
tree<-read.tree('selac_paper_data/SalichosRokas.tre')
result=list(loglik=NA)
nuc.model = 'GTR'
gamma.type="noneXquadrature"
#nCores=3
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'selac_paper_data/', phy = tree,
edge.length = 'optimize', optimal.aa = 'none', data.type='codon',
codon.model = 'GY94', nuc.model = 'GTR',
include.gamma = FALSE, gamma.type='quadrature', ncats = 4, numcode = 1,
diploid = FALSE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = nCores, max.restarts = 1, max.evals=20)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
htmlwidgets::saveWidget(prof_obj,
file=paste0(profile_prefix,".Rprofvis.html"))
})
cat(sprintf("End: %s\tLL: %0.3f\n",
profile_prefix,
result$loglik))
if(!file.exists(paste0(src.key,"_LL_log.csv")))
cat("SRC,Nuc.Model,Gamma.model,Revision,nCores,seed,model.LL\n",
file=paste0(src.key,"_LL_log.csv"),
append = T)
cat(sprintf("\"%s\",\"%s\",\"%s\",\"%s\",%i,%i,%0.3f\n",
src.key,
nuc.model,
gamma.type,
selac_release,
nCores,
seed,
result$loglik),
file=paste0(src.key,"_LL_log.csv"),
append = T)
save(result,file=sprintf('selac_paper_output/yeastSalRokSelacGTRG_quad_%s.Rdata',profile_prefix))
result$loglik
}
run_test_ecoli_optimize <- function(seed=sample.int(1e6,1),ref="v1.6.1-rc1", nCores=3){
setup_selac_for_profiling(ref=ref)
src.key="ecoliTEST"
nuc.model = 'UNREST'
gamma.type="quadrature"
profile_prefix=sprintf("%s_%s_%s_%s_%i_%i",
src.key,
nuc.model,
gamma.type,
selac_release,
nCores,
seed)
set.seed(seed)
cat(sprintf("Start: %s\n",
profile_prefix))
tree<-read.tree('kosi07_data/kosi07_codonphyml_tree_TEM.newick')
fasta.file="kosi07_data/aligned_KOSI07_TEM.fasta"
output.file.name=sprintf('ecoli_output/%s_restart.Rdata',profile_prefix)
result=list(loglik=NA)
opt.aa.type <- "optimize"
# random starting values
starting.vals <- matrix(runif(n = 15, min = 0.01, max = 5), ncol = 15, nrow = 1)
tree$edge.length <- runif(nrow(tree$edge), 0.01, 3)
#nCores=3
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize', optimal.aa = opt.aa.type, data.type='codon',
codon.model = 'selac', nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = TRUE, gamma.type='quadrature', ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = nCores, n.cores.by.gene.by.site=1,
max.restarts = 1, max.evals=20, max.tol=1e-2, max.iterations = 15,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
htmlwidgets::saveWidget(prof_obj,
file=paste0(profile_prefix,".Rprofvis.html"))
})
cat(sprintf("End: %s\tLL: %0.3f\n",
profile_prefix,
result$loglik))
if(!file.exists(paste0(src.key,"_LL_log.csv")))
cat("SRC,Nuc.Model,Gamma.model,Revision,nCores,seed,model.LL\n",
file=paste0(src.key,"_LL_log.csv"),
append = T)
cat(sprintf("\"%s\",\"%s\",\"%s\",\"%s\",%i,%i,%0.3f\n",
src.key,
nuc.model,
gamma.type,
selac_release,
nCores,
seed,
result$loglik),
file=paste0(src.key,"_LL_log.csv"),
append = T)
cat("SELAC Done. saving results\n")
result$seed <- seed
result$startingValues <- starting.vals
result$startingTree <- tree
save(result,file=sprintf('ecoli_output/%s_result.Rdata',profile_prefix))
result$loglik
}
run_ecoli_profile_mode <- function(mode=c("SHORTTEST","TEST","SHORT",
"SHORTTESTHMM","SHORTHMM","LONGHMM",
"FASTHMMTEST","HMMEVAL50","HMMEVALFULL",
"FASTHMMDEBUG","FASTHMMSINGLEDEBUG"),
seed=sample.int(1e6,1),
codon.model=c("selac","none","GY94","YN98"),
nuc.model=c("GTR","UNREST","JC"),
ref="v1.6.1-rc1",
include.gamma=T,
gamma.type=c("quadrature","median","lognormal","none"),
nCores=1){
setup_selac_for_profiling(ref=ref)
mode=match.arg(mode)
src.key=paste0("ecoli",mode)
codon.model = match.arg(codon.model)
nuc.model = match.arg(nuc.model)
if(!include.gamma)
{ gamma.type="quadrature"; gamma.mode="none";}
else {
gamma.mode=gamma.type=match.arg(gamma.type)
}
if(gamma.type=="none"){
include.gamma=F
gamma.type="quadrature"
gamma.mode="none"
}
profile_prefix=sprintf("%s_%s_%s_%s_%s_%i_%i",
src.key,
codon.model,
nuc.model,
gamma.mode,
selac_release,
nCores,
seed)
if(file.exists(sprintf('ecoli_output/%s_result.Rdata',profile_prefix))){
try({
load(file=sprintf('ecoli_output/%s_result.Rdata',profile_prefix))
if(!is.null(result$loglik) && is.finite(result$loglik)) {
cat(sprintf("Skip: %s\n",
profile_prefix))
return(result$loglik)
}
})
cat(sprintf("Rebuilding: %s\n",
profile_prefix))
}
set.seed(seed)
cat(sprintf("Start: %s\n",
profile_prefix))
tree<-read.tree('kosi07_data/kosi07_codonphyml_tree_TEM.newick')
fasta.file="kosi07_data/aligned_KOSI07_TEM.fasta"
output.file.name=sprintf('ecoli_output/%s_restart.Rdata',profile_prefix)
result=list(loglik=NA)
opt.aa.type <- "optimize"
# random starting values
starting.vals <- matrix(runif(n = 15, min = 0.01, max = 5), ncol = 15, nrow = 1)
tree$edge.length <- runif(nrow(tree$edge), 0.01, 3)
#nCores=3
if(mode=="TEST"){
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize', optimal.aa = opt.aa.type, data.type='codon',
codon.model = codon.model, nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = include.gamma, gamma.type=gamma.type, ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = 1, n.cores.by.gene.by.site=nCores,
max.restarts = 1, max.evals=20, max.tol=1e-2, max.iterations = 15,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else if(mode=="SHORTTEST"){
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize', optimal.aa = opt.aa.type, data.type='codon',
codon.model = codon.model, nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = include.gamma, gamma.type=gamma.type, ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = 1, n.cores.by.gene.by.site=nCores,
max.restarts = 1, max.evals=1, max.tol=1e-2, max.iterations = 1,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
}else if(mode=="SHORTTESTHMM"){
# HMM code requires starting edge length < 0.5 and > 1e-8
tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacHMMOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize', data.type='codon',
codon.model = codon.model, nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = include.gamma, gamma.type=gamma.type, ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = TRUE,
n.cores.by.gene = 1, n.cores.by.gene.by.site=nCores,
max.restarts = 1, max.evals=1, max.tol=1e-2,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name, max.iterations=1)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
}else if(mode=="SHORT"){
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize', optimal.aa = opt.aa.type, data.type='codon',
codon.model = codon.model, nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = include.gamma, gamma.type=gamma.type, ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = 1, n.cores.by.gene.by.site=nCores,
max.restarts = 1, max.evals=1, max.tol=1e-2, max.iterations = 1,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.05)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else if(mode=="SHORTHMM"){
# HMM code requires starting edge length < 0.5 and > 1e-8
tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacHMMOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize',data.type='codon',
codon.model = codon.model, nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = include.gamma, gamma.type=gamma.type, ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = 1, n.cores.by.gene.by.site=nCores,
max.restarts = 1, max.evals=1, max.tol=1e-2,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name, max.iterations=1)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else if(mode=="LONGHMM"){
# HMM code requires starting edge length < 0.5 and > 1e-8
tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
try({
prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacHMMOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize',data.type='codon',
codon.model = codon.model, nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = include.gamma, gamma.type=gamma.type, ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = FALSE,
n.cores.by.gene = 1, n.cores.by.gene.by.site=nCores,
max.restarts = 1, max.evals=5, max.tol=1e-2,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name, max.iterations=5)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else if(mode=="FASTHMMDEBUG") {
if(!file.exists("ecoli_chars.RData")){
tmp.gene <- read.dna(fasta.file, format="fasta")
tmp.gene <- as.list(as.matrix(cbind(tmp.gene)))
chars <- selac:::DNAbinToCodonNumeric(tmp.gene)
save(chars,file="ecoli_chars.RData")
} else {
load(file="ecoli_chars.RData")
}
codon.data <- chars[tree$tip.label,c(1,1+sample(ncol(chars)-1,10))]
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
codon.data <- selac:::SitePattern(codon.data)
codon.index.matrix = selac:::CreateCodonMutationMatrixIndexEvolveAA()
model.params = hmm.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParamsEvolvingAA
if(include.gamma)
model.params=c(model.params,std.gamma.shape)
model.params = c(model.params,std.sel.reg)
tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
# try({
lSAC.c4mc.full(log(model.params),
codon.data=codon.data,
phy=tree,
codon.freq.by.aa=NULL,
codon.freq.by.gene=codon.freq.by.gene,
numcode=1,
diploid=TRUE,
aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=include.gamma,
gamma.type=gamma.type,
ncats=4,
k.levels=0,
logspace=TRUE,
verbose=TRUE,
n.cores.by.gene.by.site=nCores,
estimate.importance=FALSE) -> result$loglik
# })
}else if(mode=="FASTHMMSINGLEDEBUG") {
if(!file.exists("ecoli_chars.RData")){
tmp.gene <- read.dna(fasta.file, format="fasta")
tmp.gene <- as.list(as.matrix(cbind(tmp.gene)))
chars <- selac:::DNAbinToCodonNumeric(tmp.gene)
save(chars,file="ecoli_chars.RData")
} else {
load(file="ecoli_chars.RData")
}
codon.data <- chars[tree$tip.label,c(1,1+sample(ncol(chars)-1,10))]
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
codon.data <- selac:::SitePattern(codon.data)
codon.index.matrix = selac:::CreateCodonMutationMatrixIndexEvolveAA()
model.params = hmm.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
# lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParamsEvolvingAA
GetLikelihoodSAC_CodonForManyCharVaryingBySiteEvolvingAA <- function(codon.data, phy, Q_codon_array, codon.freq.by.aa=NULL, codon.freq.by.gene=NULL, aa.optim_array, codon_mutation_matrix, Ne, rates, numcode, diploid, n.cores.by.gene.by.site=1, verbose=FALSE){
nsites.unique <- dim(codon.data$unique.site.patterns)[2]-1
final.likelihood.vector <- rep(NA, nsites.unique)
#We rescale the codon matrix only:
diag(codon_mutation_matrix) = 0
diag(codon_mutation_matrix) = -rowSums(codon_mutation_matrix)
scale.factor <- -sum(diag(codon_mutation_matrix) * codon.freq.by.gene, na.rm=TRUE)
codon_mutation_matrix_scaled = codon_mutation_matrix * (1/scale.factor)
#Finish the Q_array codon mutation matrix multiplication here:
if(diploid == TRUE){
Q_codon_array = (2 * Ne) * codon_mutation_matrix_scaled * Q_codon_array
}else{
Q_codon_array = Ne * codon_mutation_matrix_scaled * Q_codon_array
}
diag(Q_codon_array) = 0
diag(Q_codon_array) = -rowSums(Q_codon_array)
#Put the na.rm=TRUE bit here just in case -- when the amino acid is a stop codon, there is a bunch of NaNs. Should be fixed now.
#scale.factor <- -sum(Q_codon_array[DiagArray(dim(Q_codon_array))] * equilibrium.codon.freq, na.rm=TRUE)
## This is obviously not very elegant, but not sure how else to code it to store this stuff in this way -- WORK IN PROGRESS:
#expQt <- GetExpQt(phy=phy, Q=Q_codon_array, scale.factor=NULL, rates=rates)
#Generate matrix of root frequencies for each optimal AA:
root.p_array <- codon.freq.by.gene
#root.p_array <- t(root.p_array)
#root.p_array <- root.p_array / rowSums(root.p_array)
#rownames(root.p_array) <- .unique.aa
phy.sort <- reorder(phy, "pruningwise")
# Q_codon_array_vectored <- c(t(Q_codon_array)) # has to be transposed
# Q_codon_array_vectored <- Q_codon_array_vectored[.non_zero_pos]
anc.indices <- unique(phy.sort$edge[,1])
if(verbose){
MultiCoreLikelihoodBySite <- function(nsite.index){
tmp <- selac:::GetLikelihoodSAC_CodonForSingleCharGivenOptimumHMMScoring(charnum=nsite.index, codon.data=codon.data$unique.site.patterns,
phy=phy.sort, Q_codon_array=Q_codon_array,
root.p=root.p_array, scale.factor=scale.factor,
anc.indices=anc.indices, return.all=FALSE)
cat(".")
return(tmp)
}
} else {
MultiCoreLikelihoodBySite <- function(nsite.index){
tmp <- selac:::GetLikelihoodSAC_CodonForSingleCharGivenOptimumHMMScoring(charnum=nsite.index, codon.data=codon.data$unique.site.patterns, phy=phy.sort, Q_codon_array_vectored=Q_codon_array_vectored, root.p=root.p_array, scale.factor=scale.factor, anc.indices=anc.indices, return.all=FALSE)
return(tmp)
}
}
final.likelihood.vector <- unlist(lapply(1:nsites.unique, MultiCoreLikelihoodBySite))
if(verbose) cat("|\n")
return(final.likelihood.vector)
}
if(include.gamma)
model.params=c(model.params,std.gamma.shape)
model.params = c(model.params,std.sel.reg)
tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
phy=tree; verbose=T;
diploid=T; numcode=1
x <- model.params
k.levels =0
importance.of.aa.dist.in.selective.environment.change = 1
rate.for.selective.environment.change = x[length(x)]
x = x[-length(x)]
aa.properties=NULL
if(include.gamma == TRUE){
shape = x[length(x)]
x = x[-length(x)]
}
C.Phi.q.Ne <- x[1]
C <- 4
q <- 4e-7
Ne <- 5e6
Phi.q.Ne <- C.Phi.q.Ne / C
Phi.Ne <- Phi.q.Ne / q
Phi <- Phi.Ne / Ne
alpha <- x[2]
beta <- x[3]
gamma <- 0.0003990333 #volume.fixed.value
if(k.levels > 0){
if(nuc.model == "JC") {
base.freqs=c(x[4:6], 1-sum(x[4:6]))
#During the early stages of the optimization process it will try weird values for the base frequencies.
stopifnot(!any(base.freqs < 0))
if(any(base.freqs < 0)){
return(1000000)
}
nuc.mutation.rates <- selac:::CreateNucleotideMutationMatrix(1, model=nuc.model, base.freqs=base.freqs)
poly.params <- x[7:8]
}
if(nuc.model == "GTR") {
base.freqs=c(x[4:6], 1-sum(x[4:6]))
#During the early stages of the optimization process it will try weird values for the base frequencies.
stopifnot(!any(base.freqs < 0))
if(any(base.freqs < 0)){
return(1000000)
}
nuc.mutation.rates <- selac:::CreateNucleotideMutationMatrix(x[9:length(x)], model=nuc.model, base.freqs=base.freqs)
poly.params <- x[7:8]
}
if(nuc.model == "UNREST") {
nuc.mutation.rates <- selac:::CreateNucleotideMutationMatrix(x[6:length(x)], model=nuc.model, base.freqs=NULL)
poly.params <- x[4:5]
}
}else{
if(nuc.model == "JC") {
base.freqs=c(x[4:6], 1-sum(x[4:6]))
#During the early stages of the optimization process it will try weird values for the base frequencies.
stopifnot(!any(base.freqs < 0))
if(any(base.freqs < 0)){
return(1000000)
}
nuc.mutation.rates <- selac:::CreateNucleotideMutationMatrix(1, model=nuc.model, base.freqs=base.freqs)
}
if(nuc.model == "GTR") {
base.freqs=c(x[4:6], 1-sum(x[4:6]))
#During the early stages of the optimization process it will try weird values for the base frequencies.
stopifnot(!any(base.freqs < 0))
if(any(base.freqs < 0)){
return(1000000)
}
nuc.mutation.rates <- selac:::CreateNucleotideMutationMatrix(x[7:length(x)], model=nuc.model, base.freqs=base.freqs)
}
if(nuc.model == "UNREST") {
nuc.mutation.rates <- selac:::CreateNucleotideMutationMatrix(x[4:length(x)], model=nuc.model, base.freqs=NULL)
}
}
nuc.mutation.rates.vector <- c(nuc.mutation.rates, rate.for.selective.environment.change)
codon_mutation_matrix <- matrix(nuc.mutation.rates.vector[codon.index.matrix], dim(codon.index.matrix))
codon_mutation_matrix[is.na(codon_mutation_matrix)]=0
nsites.unique <- dim(codon.data$unique.site.patterns)[2]-1
nsites <- sum(codon.data$site.pattern.counts)
if(include.gamma==TRUE){
if(gamma.type == "median"){
rates.k <- DiscreteGamma(shape=shape, ncats=ncats)
weights.k <- rep(1/ncats, ncats)
}
if(gamma.type == "quadrature"){
rates.and.weights <- LaguerreQuad(shape=shape, ncats=ncats)
rates.k <- rates.and.weights[1:ncats]
weights.k <- rates.and.weights[(ncats+1):(ncats*2)]
}
if(gamma.type == "lognormal"){
rates.and.weights <- LogNormalQuad(shape=shape, ncats=ncats)
rates.k <- rates.and.weights[1:ncats]
weights.k <- rates.and.weights[(ncats+1):(ncats*2)]
}
#ttmmpp <- c(nuc.mutation.rates.vector, nsites.unique, nsites, C, Phi, rates.k, q, Ne, shape, importance.of.aa.dist.in.selective.environment.change)
#writeLines(text = paste(ttmmpp), con = "~/Desktop/selac_parameter.txt", sep = "\t")
final.likelihood.mat = matrix(0, nrow=ncats, ncol=nsites.unique)
for(k.cat in sequence(ncats)){
if(k.levels > 0){
aa.distances <- selac:::CreateAADistanceMatrix(alpha=alpha, beta=beta, gamma=gamma, aa.properties=aa.properties, normalize=FALSE, poly.params=poly.params, k=k.levels)
}else{
aa.distances <- selac:::CreateAADistanceMatrix(alpha=alpha, beta=beta, gamma=gamma, aa.properties=aa.properties, normalize=FALSE, poly.params=NULL, k=k.levels)
}
Q_codon_array <- selac:::FastCreateEvolveAACodonFixationProbabilityMatrix(aa.distances=aa.distances, nsites=nsites, C=C, Phi=Phi*rates.k[k.cat], q=q, Ne=Ne, include.stop.codon=TRUE, numcode=numcode, diploid=diploid, flee.stop.codon.rate=0.9999999, importance.of.aa.dist.in.selective.environment.change) #Cedric: added importance
final.likelihood.mat[k.cat,] = GetLikelihoodSAC_CodonForManyCharVaryingBySiteEvolvingAA(codon.data, phy, Q_codon_array,
codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene, codon_mutation_matrix=codon_mutation_matrix,
Ne=Ne, rates=NULL, numcode=numcode, diploid=diploid, n.cores.by.gene.by.site=n.cores.by.gene.by.site, verbose=verbose)
}
likelihood <- sum(log(colSums(exp(final.likelihood.mat)*weights.k)) * codon.data$site.pattern.counts)
}else{
if(k.levels > 0){
aa.distances <- selac:::CreateAADistanceMatrix(alpha=alpha, beta=beta, gamma=gamma, aa.properties=aa.properties, normalize=FALSE, poly.params=poly.params, k=k.levels)
}else{
aa.distances <- selac:::CreateAADistanceMatrix(alpha=alpha, beta=beta, gamma=gamma, aa.properties=aa.properties, normalize=FALSE, poly.params=NULL, k=k.levels)
}
Q_codon_array <- selac:::FastCreateEvolveAACodonFixationProbabilityMatrix(aa.distances=aa.distances, nsites=nsites, C=C, Phi=Phi, q=q, Ne=Ne, include.stop.codon=TRUE, numcode=numcode, diploid=diploid, flee.stop.codon.rate=0.9999999, importance.of.aa.dist.in.selective.environment.change) #Cedric: added importance
final.likelihood = GetLikelihoodSAC_CodonForManyCharVaryingBySiteEvolvingAA(codon.data, phy, Q_codon_array, codon.freq.by.aa=codon.freq.by.aa, codon.freq.by.gene=codon.freq.by.gene, codon_mutation_matrix=codon_mutation_matrix, Ne=Ne, rates=NULL, numcode=numcode, diploid=diploid, n.cores.by.gene.by.site=n.cores.by.gene.by.site, verbose=verbose)
likelihood <- sum(final.likelihood * codon.data$site.pattern.counts)
}
if(neglnl) {
likelihood <- -1 * likelihood
}
if(verbose > 1) {
results.vector <- c(likelihood, C*Phi*q, alpha, beta, gamma, Ne, ape::write.tree(phy))
names(results.vector) <- c("likelihood", "C.Phi.q.Ne", "alpha", "beta", "gamma", "Ne", "phy")
print(results.vector)
}else if(verbose){
results.vector <- c(likelihood, alpha, beta, gamma)
names(results.vector) <- c("likelihood", "alpha", "beta", "gamma")
print(results.vector)
}
if(is.na(likelihood) || is.nan(likelihood)){
res <-1000000
}else{
res <- likelihood
}
result$loglik <-res
# try({
lSAC.c4mc.full(log(model.params),
codon.data=codon.data,
phy=tree,
codon.freq.by.aa=NULL,
codon.freq.by.gene=codon.freq.by.gene,
numcode=1,
diploid=TRUE,
aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=include.gamma,
gamma.type=gamma.type,
ncats=4,
k.levels=0,
logspace=TRUE,
verbose=TRUE,
n.cores.by.gene.by.site=nCores,
estimate.importance=FALSE) -> result$loglik
# })
}else if(mode=="FASTHMMTEST") {
if(!file.exists("ecoli_chars.RData")){
tmp.gene <- read.dna(fasta.file, format="fasta")
tmp.gene <- as.list(as.matrix(cbind(tmp.gene)))
chars <- selac:::DNAbinToCodonNumeric(tmp.gene)
save(chars,file="ecoli_chars.RData")
} else {
load(file="ecoli_chars.RData")
}
codon.data <- chars[tree$tip.label,c(1,1+sample(ncol(chars)-1,10))]
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
codon.data <- selac:::SitePattern(codon.data)
codon.index.matrix = selac:::CreateCodonMutationMatrixIndexEvolveAA()
model.params = hmm.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParamsEvolvingAA
if(include.gamma)
model.params=c(model.params,std.gamma.shape)
model.params = c(model.params,std.sel.reg)
# tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
try({
prof_obj <- profvis({
lSAC.c4mc.full(log(model.params),
codon.data=codon.data,
phy=tree,
codon.freq.by.aa=NULL,
codon.freq.by.gene=codon.freq.by.gene,
numcode=1,
diploid=TRUE,
aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=include.gamma,
gamma.type=gamma.type,
ncats=4,
k.levels=0,
logspace=TRUE,
verbose=TRUE,
n.cores.by.gene.by.site=nCores,
estimate.importance=FALSE) -> result$loglik
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.05)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else if(mode=="HMMEVAL50") {
if(!file.exists("ecoli_chars.RData")){
tmp.gene <- read.dna(fasta.file, format="fasta")
tmp.gene <- as.list(as.matrix(cbind(tmp.gene)))
chars <- selac:::DNAbinToCodonNumeric(tmp.gene)
save(chars,file="ecoli_chars.RData")
} else {
load(file="ecoli_chars.RData")
}
codon.data <- chars[tree$tip.label,c(1,1+sample(ncol(chars)-1,50))]
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
codon.data <- selac:::SitePattern(codon.data)
codon.index.matrix = selac:::CreateCodonMutationMatrixIndexEvolveAA()
model.params = hmm.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParamsEvolvingAA
if(include.gamma)
model.params=c(model.params,std.gamma.shape)
model.params = c(model.params,std.sel.reg)
# tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
try({
prof_obj <- profvis({
lSAC.c4mc.full(log(model.params),
codon.data=codon.data,
phy=tree,
codon.freq.by.aa=NULL,
codon.freq.by.gene=codon.freq.by.gene,
numcode=1,
diploid=TRUE,
aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=include.gamma,
gamma.type=gamma.type,
ncats=4,
k.levels=0,
logspace=TRUE,
verbose=TRUE,
n.cores.by.gene.by.site=nCores,
estimate.importance=FALSE) -> result$loglik
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.05)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else if(mode=="HMMEVALFULL") {
if(!file.exists("ecoli_chars.RData")){
tmp.gene <- read.dna(fasta.file, format="fasta")
tmp.gene <- as.list(as.matrix(cbind(tmp.gene)))
chars <- selac:::DNAbinToCodonNumeric(tmp.gene)
save(chars,file="ecoli_chars.RData")
} else {
load(file="ecoli_chars.RData")
}
codon.data <- chars[tree$tip.label,]
codon.freq.by.gene <- selac:::GetCodonFreqsByGene(codon.data[,-1])
codon.data <- selac:::SitePattern(codon.data)
codon.index.matrix = selac:::CreateCodonMutationMatrixIndexEvolveAA()
model.params = hmm.params
if(nuc.model != "UNREST")
model.params=c(model.params,std.base.freq)
model.params=c(model.params,std.nuc.params[[nuc.model]])
lSAC.c4mc.full <- selac:::GetLikelihoodSAC_CodonForManyCharGivenAllParamsEvolvingAA
if(include.gamma)
model.params=c(model.params,std.gamma.shape)
model.params = c(model.params,std.sel.reg)
# tree$edge.length <- runif(nrow(tree$edge), 0.01, 0.45)
try({
prof_obj <- profvis({
lSAC.c4mc.full(log(model.params),
codon.data=codon.data,
phy=tree,
codon.freq.by.aa=NULL,
codon.freq.by.gene=codon.freq.by.gene,
numcode=1,
diploid=TRUE,
aa.properties=NULL,
volume.fixed.value=std.gamma,
nuc.model=nuc.model,
codon.index.matrix=codon.index.matrix,
include.gamma=include.gamma,
gamma.type=gamma.type,
ncats=4,
k.levels=0,
logspace=TRUE,
verbose=TRUE,
n.cores.by.gene.by.site=nCores,
estimate.importance=FALSE) -> result$loglik
}, prof_output = paste0(profile_prefix,".Rprof"),interval=0.5)
save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
})
} else {
cat(sprintf("Request for %s mode not understood.\n",as.character(mode)))
}
cat(sprintf("End: %s\tLL: %0.3f\n",
profile_prefix,
result$loglik))
if(!file.exists(paste0(src.key,"_",codon.model,"_LL_log.csv")))
cat("SRC,Nuc.Model,Gamma.model,Revision,nCores,seed,model.LL\n",
file=paste0(src.key,"_",codon.model,"_LL_log.csv"),
append = T)
cat(sprintf("\"%s\",\"%s\",\"%s\",\"%s\",%i,%i,%0.3f\n",
src.key,
nuc.model,
gamma.mode,
selac_release,
nCores,
seed,
result$loglik),
file=paste0(src.key,"_",codon.model,"_LL_log.csv"),
append = T)
cat("SELAC Done. saving results\n")
result$seed <- seed
result$startingValues <- starting.vals
result$startingTree <- tree
save(result,file=sprintf('ecoli_output/%s_result.Rdata',profile_prefix))
result$loglik
}
run_test_ecoli_optimize_no_profile <- function(seed=sample.int(1e6,1),ref="v1.6.1-rc1", nCores=1, auto.skip=T){
setup_selac_for_profiling(ref=ref)
src.key="ecoliDEBUG"
nuc.model = 'UNREST'
gamma.type="quadrature"
profile_prefix=sprintf("%s_%s_%s_%s_%i_%i",
src.key,
nuc.model,
gamma.type,
selac_release,
nCores,
seed)
if(file.exists(sprintf('ecoli_output/%s_result.Rdata',profile_prefix))){
try({
load(file=sprintf('ecoli_output/%s_result.Rdata',profile_prefix))
if(!is.null(result$loglik) && is.finite(result$loglik)) {
cat(sprintf("Skip: %s\n",
profile_prefix))
return(result$loglik)
}
})
cat(sprintf("Rebuilding: %s\n",
profile_prefix))
}
set.seed(seed)
cat(sprintf("Start: %s\n",
profile_prefix))
tree<-read.tree('kosi07_data/kosi07_codonphyml_tree_TEM.newick')
fasta.file="kosi07_data/aligned_KOSI07_TEM.fasta"
output.file.name=sprintf('ecoli_output/%s_restart.Rdata',profile_prefix)
result=list(loglik=NA)
opt.aa.type <- "optimize"
# random starting values
starting.vals <- matrix(runif(n = 15, min = 0.01, max = 5), ncol = 15, nrow = 1)
tree$edge.length <- runif(nrow(tree$edge), 0.01, 3)
#nCores=3
# try({
# prof_obj <- profvis({
## start.from.mle set to allow manual specification of fasta files
# requires mle.matrix to be set, setting to start.from.mle==FALSE values for now
# mle.matrix[1,] = c(selac.starting.vals[1,1:3], 0.25, 0.25, 0.25, nuc.ip)
result <- SelacOptimize(codon.data.path = 'kosi07_data/', phy = tree,
edge.length = 'optimize', optimal.aa = opt.aa.type, data.type='codon',
codon.model = 'selac', nuc.model = nuc.model, edge.linked=TRUE,
include.gamma = TRUE, gamma.type='quadrature', ncats = 4, numcode = 1,
diploid = TRUE, k.levels = 0, aa.properties = NULL, verbose = TRUE,
n.cores.by.gene = nCores, n.cores.by.gene.by.site=1,
max.restarts = 1, max.evals=20, max.tol=1e-2, max.iterations = 15,
fasta.rows.to.keep=NULL, recalculate.starting.brlen=FALSE, output.by.restart=FALSE,
output.restart.filename=output.file.name)
# output.restart.filename=output.file.name, start.from.mle = TRUE,
# mle.matrix=starting.vals, tol.step=1, partition.order = fasta.file)
# }, prof_output = paste0(profile_prefix,".Rprof"),interval=1)
# save(prof_obj, file=paste0(profile_prefix,".Rprofvis.RData"))
# htmlwidgets::saveWidget(prof_obj,
# file=paste0(profile_prefix,".Rprofvis.html"))
# })
cat(sprintf("End: %s\tLL: %0.3f\n",
profile_prefix,
result$loglik))
if(!file.exists(paste0(src.key,"_LL_log.csv")))
cat("SRC,Nuc.Model,Gamma.model,Revision,nCores,seed,model.LL\n",
file=paste0(src.key,"_LL_log.csv"),
append = T)
cat(sprintf("\"%s\",\"%s\",\"%s\",\"%s\",%i,%i,%0.3f\n",
src.key,
nuc.model,
gamma.type,
selac_release,
nCores,
seed,
result$loglik),
file=paste0(src.key,"_LL_log.csv"),
append = T)
cat("SELAC Done. saving results\n")
result$seed <- seed
result$startingValues <- starting.vals
result$startingTree <- tree
save(result,file=sprintf('ecoli_output/%s_result.Rdata',profile_prefix))
result$loglik
}
|
# Plot temperature obs and reanalysis - along part of the voyage
library(grid)
library(chron)
o<-read.table('../Endurance.comparisons')
o2<-read.table('../Endurance.comparisons.ERA20C')
o3<-read.table('../Endurance.comparisons.354')
dates<-chron(dates=sprintf("%04d/%02d/%02d",o$V1,o$V2,o$V3),
times=sprintf("%02d:00:00",o$V4),
format=c(dates = "y/m/d", times = "h:m:s"))
# Pic a time range to display
p.x<-chron(dates=c("1914/08/08","1915/08/08"),times="12:00:00",
format=c(dates = "y/m/d", times = "h:m:s"))
tics=pretty(p.x,min.n=7)
ticl=attr(tics,'labels')
w<-which(dates>=p.x[1]&dates<p.x[2])
pdf(file="Endurance_temperature.pdf",
width=15,height=7,family='Helvetica',
paper='special',pointsize=18)
pushViewport(viewport(width=1.0,height=1.0,x=0.0,y=0.0,
just=c("left","bottom"),name="Page",clip='off'))
pushViewport(plotViewport(margins=c(4,6,0,0)))
pushViewport(dataViewport(p.x,c(-45,30)))
grid.xaxis(at=as.numeric(tics),label=ticl,main=T)
grid.text('Date (1914-15)',y=unit(-3,'lines'))
grid.yaxis(main=T)
grid.text('Sea-level pressure (hPa)',x=unit(-4,'lines'),rot=90)
# 20CR Analysis spreads
gp=gpar(col=rgb(0.8,0.8,1,1),fill=rgb(0.8,0.8,1,1))
for(i in w) {
x<-c(dates[i]-0.125,dates[i]+0.125,
dates[i]+0.125,dates[i]-0.125)
y<-c(o$V9[i]-(o$V10[i])*2,
o$V9[i]-(o$V10[i])*2,
o$V9[i]+(o$V10[i])*2,
o$V9[i]+(o$V10[i])*2)
grid.polygon(x=unit(x,'native'),
y=unit(y,'native'),
gp=gp)
}
gp=gpar(col=rgb(0.4,0.4,1,1),fill=rgb(0.4,0.4,1,1))
for(i in w) {
x<-c(dates[i]-0.125,dates[i]+0.125,
dates[i]+0.125,dates[i]-0.125)
y<-c(o3$V9[i]-(o3$V10[i])*2,
o3$V9[i]-(o3$V10[i])*2,
o3$V9[i]+(o3$V10[i])*2,
o3$V9[i]+(o3$V10[i])*2)
grid.polygon(x=unit(x,'native'),
y=unit(y,'native'),
gp=gp)
}
# Observation
gp=gpar(col=rgb(0,0,0,1),fill=rgb(0,0,0,1))
grid.points(x=unit(dates[w],'native'),
y=unit(o$V8[w],'native'),
size=unit(0.005,'npc'),
pch=20,
gp=gp)
popViewport()
popViewport()
popViewport()
|
/voyages/endurance_1914-6/scripts/paper_figures/Endurance_temperatures_plot.R
|
no_license
|
alxbutterworth/Expeditions
|
R
| false
| false
| 2,559
|
r
|
# Plot temperature obs and reanalysis - along part of the voyage
library(grid)
library(chron)
o<-read.table('../Endurance.comparisons')
o2<-read.table('../Endurance.comparisons.ERA20C')
o3<-read.table('../Endurance.comparisons.354')
dates<-chron(dates=sprintf("%04d/%02d/%02d",o$V1,o$V2,o$V3),
times=sprintf("%02d:00:00",o$V4),
format=c(dates = "y/m/d", times = "h:m:s"))
# Pic a time range to display
p.x<-chron(dates=c("1914/08/08","1915/08/08"),times="12:00:00",
format=c(dates = "y/m/d", times = "h:m:s"))
tics=pretty(p.x,min.n=7)
ticl=attr(tics,'labels')
w<-which(dates>=p.x[1]&dates<p.x[2])
pdf(file="Endurance_temperature.pdf",
width=15,height=7,family='Helvetica',
paper='special',pointsize=18)
pushViewport(viewport(width=1.0,height=1.0,x=0.0,y=0.0,
just=c("left","bottom"),name="Page",clip='off'))
pushViewport(plotViewport(margins=c(4,6,0,0)))
pushViewport(dataViewport(p.x,c(-45,30)))
grid.xaxis(at=as.numeric(tics),label=ticl,main=T)
grid.text('Date (1914-15)',y=unit(-3,'lines'))
grid.yaxis(main=T)
grid.text('Sea-level pressure (hPa)',x=unit(-4,'lines'),rot=90)
# 20CR Analysis spreads
gp=gpar(col=rgb(0.8,0.8,1,1),fill=rgb(0.8,0.8,1,1))
for(i in w) {
x<-c(dates[i]-0.125,dates[i]+0.125,
dates[i]+0.125,dates[i]-0.125)
y<-c(o$V9[i]-(o$V10[i])*2,
o$V9[i]-(o$V10[i])*2,
o$V9[i]+(o$V10[i])*2,
o$V9[i]+(o$V10[i])*2)
grid.polygon(x=unit(x,'native'),
y=unit(y,'native'),
gp=gp)
}
gp=gpar(col=rgb(0.4,0.4,1,1),fill=rgb(0.4,0.4,1,1))
for(i in w) {
x<-c(dates[i]-0.125,dates[i]+0.125,
dates[i]+0.125,dates[i]-0.125)
y<-c(o3$V9[i]-(o3$V10[i])*2,
o3$V9[i]-(o3$V10[i])*2,
o3$V9[i]+(o3$V10[i])*2,
o3$V9[i]+(o3$V10[i])*2)
grid.polygon(x=unit(x,'native'),
y=unit(y,'native'),
gp=gp)
}
# Observation
gp=gpar(col=rgb(0,0,0,1),fill=rgb(0,0,0,1))
grid.points(x=unit(dates[w],'native'),
y=unit(o$V8[w],'native'),
size=unit(0.005,'npc'),
pch=20,
gp=gp)
popViewport()
popViewport()
popViewport()
|
testlist <- list(b = c(-1667457875L, -1667458046L, 67964173L, 67964173L, -1835887972L))
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result)
|
/mcga/inst/testfiles/ByteVectorToDoubles/AFL_ByteVectorToDoubles/ByteVectorToDoubles_valgrind_files/1613108506-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 155
|
r
|
testlist <- list(b = c(-1667457875L, -1667458046L, 67964173L, 67964173L, -1835887972L))
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result)
|
context("helper_plot_latent works")
set.seed(2323)
options(warn=-1)
require(lavaan)
test_that("beta_to_flexplot works", {
expect_equal(beta_to_flexplot(small)[[1]], formula(z~f1 | f2))
expect_equal(beta_to_flexplot(small, return_dvs = T), 3)
expect_equal(beta_to_flexplot(small_fa), formula(f1~f2))
expect_equal(beta_to_flexplot(small_fa, T), c(1,2))
})
test_that("get_endogenous_names works", {
expect_true(get_endogenous_names(small)[3] == "z")
expect_equal(get_endogenous_names(small_fa), c("f1", "f2"))
})
test_that("get_dv_iv works", {
beta = small@Model@GLIST$beta
expect_equal(get_dv_iv(3,beta), c(1,2))
})
options(warn=0)
|
/tests/testthat/test-helper_plot_latent.R
|
no_license
|
dustinfife/flexplavaan
|
R
| false
| false
| 655
|
r
|
context("helper_plot_latent works")
set.seed(2323)
options(warn=-1)
require(lavaan)
test_that("beta_to_flexplot works", {
expect_equal(beta_to_flexplot(small)[[1]], formula(z~f1 | f2))
expect_equal(beta_to_flexplot(small, return_dvs = T), 3)
expect_equal(beta_to_flexplot(small_fa), formula(f1~f2))
expect_equal(beta_to_flexplot(small_fa, T), c(1,2))
})
test_that("get_endogenous_names works", {
expect_true(get_endogenous_names(small)[3] == "z")
expect_equal(get_endogenous_names(small_fa), c("f1", "f2"))
})
test_that("get_dv_iv works", {
beta = small@Model@GLIST$beta
expect_equal(get_dv_iv(3,beta), c(1,2))
})
options(warn=0)
|
testlist <- list(doy = -1.72131968218895e+83, latitude = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 4.81717963470483e+135, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615828613-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 734
|
r
|
testlist <- list(doy = -1.72131968218895e+83, latitude = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 4.81717963470483e+135, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/halfmatrix.R
\name{is.square}
\alias{is.square}
\title{Test If Something is Square}
\usage{
is.square(x, ...)
}
\arguments{
\item{x}{object}
\item{...}{passed arguments}
}
\description{
Tests if something is square. Generic, with method for matrix.
}
\seealso{
Other halfmatrix:
\code{\link{as.data.frame.halfmatrix}()},
\code{\link{as.halfmatrix.default}()},
\code{\link{as.halfmatrix.halfmatrix}()},
\code{\link{as.halfmatrix}()},
\code{\link{as.matrix.halfmatrix}()},
\code{\link{half.matrix}()},
\code{\link{half}()},
\code{\link{is.square.matrix}()},
\code{\link{offdiag.halfmatrix}()},
\code{\link{offdiag}()},
\code{\link{ord.halfmatrix}()},
\code{\link{ord.matrix}()},
\code{\link{ord}()},
\code{\link{print.halfmatrix}()}
}
\concept{halfmatrix}
\keyword{internal}
|
/man/is.square.Rd
|
no_license
|
cran/nonmemica
|
R
| false
| true
| 889
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/halfmatrix.R
\name{is.square}
\alias{is.square}
\title{Test If Something is Square}
\usage{
is.square(x, ...)
}
\arguments{
\item{x}{object}
\item{...}{passed arguments}
}
\description{
Tests if something is square. Generic, with method for matrix.
}
\seealso{
Other halfmatrix:
\code{\link{as.data.frame.halfmatrix}()},
\code{\link{as.halfmatrix.default}()},
\code{\link{as.halfmatrix.halfmatrix}()},
\code{\link{as.halfmatrix}()},
\code{\link{as.matrix.halfmatrix}()},
\code{\link{half.matrix}()},
\code{\link{half}()},
\code{\link{is.square.matrix}()},
\code{\link{offdiag.halfmatrix}()},
\code{\link{offdiag}()},
\code{\link{ord.halfmatrix}()},
\code{\link{ord.matrix}()},
\code{\link{ord}()},
\code{\link{print.halfmatrix}()}
}
\concept{halfmatrix}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exception.R
\name{assert}
\alias{assert}
\title{Assert}
\usage{
assert(condition, message = "assert error")
}
\arguments{
\item{condition}{Condition for assert.}
\item{message}{Exception message if the condition is not satisfied.}
}
\description{
Assert: throw exception if the condition is not satisfied.
}
|
/man/assert.Rd
|
permissive
|
hmito/hmRLib
|
R
| false
| true
| 387
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exception.R
\name{assert}
\alias{assert}
\title{Assert}
\usage{
assert(condition, message = "assert error")
}
\arguments{
\item{condition}{Condition for assert.}
\item{message}{Exception message if the condition is not satisfied.}
}
\description{
Assert: throw exception if the condition is not satisfied.
}
|
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-#
# Short Term Load Forecasting Competiion - Tao Hong's Energy Analytics Course
#
# Prepare and manage data sets used in forecasting
#
# Author: Jon T Farland <jonfarland@gmail.com>
#
# Copywright September 2015
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-#
#plotting and visual libraries
library("ggplot2")
library("lattice")
library("rworldmap")
#data management libraries
library("dplyr")
library("tidyr")
library("gdata")
library("reshape2")
#modeling and forecast libraries
library("forecast")
#weather daya forecasts
library("weatherData")
#-----------------------------------------------------------------------------#
#
# Setup / Options
#
#-----------------------------------------------------------------------------#
# Current Directory
getwd()
#set the raw data as the current directory
setwd("/home/rstudio/projects/comp-2015/data/rawdat")
#-----------------------------------------------------------------------------#
#
# Load Inputs
#
#-----------------------------------------------------------------------------#
#uncomment the next command to run a Python script to download PJM load data for the last 5 years
#system('python /home/rstudio/projects/comp-2015/data/rawdat/00-pull-historical-load-data.py')
#download just the 2015 data as the competition ensues
system('python /home/rstudio/projects/comp-2015/data/rawdat/00-pull-2015-load-data.py')
#system('python /home/rstudio/projects/comp-2015/data/rawdat/00-pull-2015-load-data.py')
#Read in only the dominion tab of the excel spreadsheets
load11 <- read.xls("load11.xls", sheet=22) %>%
select(DATE:HE24)
load12 <- read.xls("load12.xls", sheet=22) %>%
select(DATE:HE24)
load13 <- read.xls("load13.xls", sheet=22) %>%
select(DATE:HE24)
load14 <- read.xls("load14.xls", sheet=22) %>%
select(DATE:HE24)
load15 <- read.xls("load15.xls", sheet=22) %>%
select(DATE:HE24)
#2015 data goes up until 10/1. We're going to need to download the other preliminary files as well
str02 <- readLines("20151002_dailyload.csv")
prelim02 <- read.csv(text=str02,skip=2)
str03 <- readLines("20151003_dailyload.csv")
prelim03 <- read.csv(text=str03,skip=2)
str04 <- readLines("20151004_dailyload.csv")
prelim04 <- read.csv(text=str04,skip=2)
str05 <- readLines("20151005_dailyload.csv")
prelim05 <- read.csv(text=str05,skip=2)
str06 <- readLines("20151006_dailyload.csv")
prelim06 <- read.csv(text=str06,skip=2)
str07 <- readLines("20151007_dailyload.csv")
prelim07 <- read.csv(text=str07,skip=2)
#-----------------------------------------------------------------------------#
#
# Processing
#
#-----------------------------------------------------------------------------#
load.data=rbind(load11, load12, load13, load14, load15)
#go from wide to long
load.long <- melt(load.data, id=c("DATE", "COMP")) %>%
rename(hour = variable, load = value) %>%
mutate(tindx = mdy_h(paste(DATE, substr(hour, 3, 4)))-duration(1,"hours"),
hindx = hour(tindx),
dindx = as.Date(tindx),
mindx = month(tindx),
dow = weekdays(tindx)) %>%
select(tindx, hindx, dindx, mindx, load, dow) %>%
arrange(dindx, hindx)
#stack preliminary data
prelim.data = rbind(prelim02, prelim03, prelim04, prelim05, prelim06, prelim07) %>%
select(Date, HourEnd, LoadAvgHourlyDOM) %>%
rename(hour = HourEnd, load = LoadAvgHourlyDOM) %>%
mutate(tindx = mdy_h(paste(Date, hour))-duration(1,"hours"),
hindx = hour(tindx),
dindx = as.Date(tindx),
mindx = month(tindx),
dow = weekdays(tindx)) %>%
select(tindx, hindx, dindx, mindx, load, dow) %>%
arrange(dindx, hindx)
#stack historical and preliminary metering
load.long = rbind(load.long, prelim.data)
#shifted to hour beginning rather than hour ending
#quick checks
summary(load.long)
#-----------------------------------------------------------------------------#
#
# Graphics
#
#-----------------------------------------------------------------------------#
# load over time
plot1 <- plot(load.long$load ~ load.long$tindx)
plot2 <- plot(load.long$load ~ load.long$hindx)
plot3 <- plot(load.long$load ~ load.long$dindx)
#histograms and conditional histograms
histogram(~load | mindx, data = load.long, xlab="Load (MW)", ylab ="Density", col=c("red"))
histogram(~load | hindx, data = load.long, xlab="Load (MW)", ylab ="Density", col=c("red"))
histogram(~load , data = load.long, xlab="Load (MW)", ylab ="Density", col=c("red"))
#-----------------------------------------------------------------------------#
#
# Weather Data
#
#-----------------------------------------------------------------------------#
#use weatherData to pull data from Weather Underground
View(USAirportWeatherStations)
VA_stat <-
subset(USAirportWeatherStations, State=="VA")
View(VA_stat)
#map weather stations
newmap<- getMap(resolution="low")
plot(newmap, xlim=c(-81,-70),ylim=c(30,40))
points(VA_stat$Lon, VA_stat$Lat, col ="red")
#pull weather data
beg <- as.Date('2011/01/01',format= "%Y/%m/%d")
end <- as.Date('2015/10/08',format= "%Y/%m/%d")
s <- seq(beg, to = end, by = 'days')
wx_df <- list()
#wx_df <- getDetailedWeather("RIC", "2015-01-01", opt_all_columns = T)
# for ( i in seq_along(s))
# {
# print(i)
# print(s[i])
# wx_df[[i]]<-getDetailedWeather("RIC", s[i], opt_all_columns = T)
# wx_df[[i]]$Wind_SpeedMPH[wx_df[[i]]$Wind_SpeedMPH %in% ("Calm")] = 0
# wx_df[[i]]$Wind_SpeedMPH = as.numeric(wx_df[[i]]$Wind_SpeedMPH)
# }
for ( i in seq_along(s))
{
print(i)
print(s[i])
wx_df[[i]]<-getDetailedWeather("RIC", s[i], opt_temperature_columns = T)
}
#unpack the list
weather <- bind_rows(wx_df) %>%
mutate(tindx = floor_date(Time, "hour"),
hindx = hour(tindx),
dindx = as.Date(Time),
TemperatureF = replace(TemperatureF, TemperatureF < -1000, lag(TemperatureF, n=1))) %>%
group_by(dindx,hindx) %>%
summarize(TemperatureF = mean(TemperatureF)) %>%
as.data.frame
summary(weather)
class(weather)
plot(weather$TemperatureF)
forecast <- read.csv("temp-forecasts-2015-10-08.csv") %>%
mutate(tindx = ISOdatetime(year,mindx,dindx,hindx,0,0))
#quick plot
plot(forecast$tindx,forecast$temp)
#trim data and stack
temp_act <-
subset(weather, dindx != "2015-10-09") %>%
rename(temp = TemperatureF) %>%
mutate(type = "act",
tindx = ymd_h(paste(dindx, hindx))) %>%
select(temp, type, tindx)
temp_fcst <-
subset(forecast, dindx != 8 | mindx != 10 ) %>%
mutate(type = "fcst") %>%
select(temp, type, tindx)
temp_final <-
rbind(temp_act, temp_fcst)
#-----------------------------------------------------------------------------#
#
# Outputs
#
#-----------------------------------------------------------------------------#
#save out the data
setwd("/home/rstudio/projects/comp-2015/data")
save(load.long,file="load-long.Rda")
save(temp_act,file="temp-act.Rda")
save(temp_fcst,file="temp-fcst.Rda")
write.csv()
writeRDS()
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-#
|
/code/reference-code/01-manage-data.R
|
no_license
|
jfarland/prob-comp-2015
|
R
| false
| false
| 7,082
|
r
|
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-#
# Short Term Load Forecasting Competiion - Tao Hong's Energy Analytics Course
#
# Prepare and manage data sets used in forecasting
#
# Author: Jon T Farland <jonfarland@gmail.com>
#
# Copywright September 2015
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-#
#plotting and visual libraries
library("ggplot2")
library("lattice")
library("rworldmap")
#data management libraries
library("dplyr")
library("tidyr")
library("gdata")
library("reshape2")
#modeling and forecast libraries
library("forecast")
#weather daya forecasts
library("weatherData")
#-----------------------------------------------------------------------------#
#
# Setup / Options
#
#-----------------------------------------------------------------------------#
# Current Directory
getwd()
#set the raw data as the current directory
setwd("/home/rstudio/projects/comp-2015/data/rawdat")
#-----------------------------------------------------------------------------#
#
# Load Inputs
#
#-----------------------------------------------------------------------------#
#uncomment the next command to run a Python script to download PJM load data for the last 5 years
#system('python /home/rstudio/projects/comp-2015/data/rawdat/00-pull-historical-load-data.py')
#download just the 2015 data as the competition ensues
system('python /home/rstudio/projects/comp-2015/data/rawdat/00-pull-2015-load-data.py')
#system('python /home/rstudio/projects/comp-2015/data/rawdat/00-pull-2015-load-data.py')
#Read in only the dominion tab of the excel spreadsheets
load11 <- read.xls("load11.xls", sheet=22) %>%
select(DATE:HE24)
load12 <- read.xls("load12.xls", sheet=22) %>%
select(DATE:HE24)
load13 <- read.xls("load13.xls", sheet=22) %>%
select(DATE:HE24)
load14 <- read.xls("load14.xls", sheet=22) %>%
select(DATE:HE24)
load15 <- read.xls("load15.xls", sheet=22) %>%
select(DATE:HE24)
#2015 data goes up until 10/1. We're going to need to download the other preliminary files as well
str02 <- readLines("20151002_dailyload.csv")
prelim02 <- read.csv(text=str02,skip=2)
str03 <- readLines("20151003_dailyload.csv")
prelim03 <- read.csv(text=str03,skip=2)
str04 <- readLines("20151004_dailyload.csv")
prelim04 <- read.csv(text=str04,skip=2)
str05 <- readLines("20151005_dailyload.csv")
prelim05 <- read.csv(text=str05,skip=2)
str06 <- readLines("20151006_dailyload.csv")
prelim06 <- read.csv(text=str06,skip=2)
str07 <- readLines("20151007_dailyload.csv")
prelim07 <- read.csv(text=str07,skip=2)
#-----------------------------------------------------------------------------#
#
# Processing
#
#-----------------------------------------------------------------------------#
load.data=rbind(load11, load12, load13, load14, load15)
#go from wide to long
load.long <- melt(load.data, id=c("DATE", "COMP")) %>%
rename(hour = variable, load = value) %>%
mutate(tindx = mdy_h(paste(DATE, substr(hour, 3, 4)))-duration(1,"hours"),
hindx = hour(tindx),
dindx = as.Date(tindx),
mindx = month(tindx),
dow = weekdays(tindx)) %>%
select(tindx, hindx, dindx, mindx, load, dow) %>%
arrange(dindx, hindx)
#stack preliminary data
prelim.data = rbind(prelim02, prelim03, prelim04, prelim05, prelim06, prelim07) %>%
select(Date, HourEnd, LoadAvgHourlyDOM) %>%
rename(hour = HourEnd, load = LoadAvgHourlyDOM) %>%
mutate(tindx = mdy_h(paste(Date, hour))-duration(1,"hours"),
hindx = hour(tindx),
dindx = as.Date(tindx),
mindx = month(tindx),
dow = weekdays(tindx)) %>%
select(tindx, hindx, dindx, mindx, load, dow) %>%
arrange(dindx, hindx)
#stack historical and preliminary metering
load.long = rbind(load.long, prelim.data)
#shifted to hour beginning rather than hour ending
#quick checks
summary(load.long)
#-----------------------------------------------------------------------------#
#
# Graphics
#
#-----------------------------------------------------------------------------#
# load over time
plot1 <- plot(load.long$load ~ load.long$tindx)
plot2 <- plot(load.long$load ~ load.long$hindx)
plot3 <- plot(load.long$load ~ load.long$dindx)
#histograms and conditional histograms
histogram(~load | mindx, data = load.long, xlab="Load (MW)", ylab ="Density", col=c("red"))
histogram(~load | hindx, data = load.long, xlab="Load (MW)", ylab ="Density", col=c("red"))
histogram(~load , data = load.long, xlab="Load (MW)", ylab ="Density", col=c("red"))
#-----------------------------------------------------------------------------#
#
# Weather Data
#
#-----------------------------------------------------------------------------#
#use weatherData to pull data from Weather Underground
View(USAirportWeatherStations)
VA_stat <-
subset(USAirportWeatherStations, State=="VA")
View(VA_stat)
#map weather stations
newmap<- getMap(resolution="low")
plot(newmap, xlim=c(-81,-70),ylim=c(30,40))
points(VA_stat$Lon, VA_stat$Lat, col ="red")
#pull weather data
beg <- as.Date('2011/01/01',format= "%Y/%m/%d")
end <- as.Date('2015/10/08',format= "%Y/%m/%d")
s <- seq(beg, to = end, by = 'days')
wx_df <- list()
#wx_df <- getDetailedWeather("RIC", "2015-01-01", opt_all_columns = T)
# for ( i in seq_along(s))
# {
# print(i)
# print(s[i])
# wx_df[[i]]<-getDetailedWeather("RIC", s[i], opt_all_columns = T)
# wx_df[[i]]$Wind_SpeedMPH[wx_df[[i]]$Wind_SpeedMPH %in% ("Calm")] = 0
# wx_df[[i]]$Wind_SpeedMPH = as.numeric(wx_df[[i]]$Wind_SpeedMPH)
# }
for ( i in seq_along(s))
{
print(i)
print(s[i])
wx_df[[i]]<-getDetailedWeather("RIC", s[i], opt_temperature_columns = T)
}
#unpack the list
weather <- bind_rows(wx_df) %>%
mutate(tindx = floor_date(Time, "hour"),
hindx = hour(tindx),
dindx = as.Date(Time),
TemperatureF = replace(TemperatureF, TemperatureF < -1000, lag(TemperatureF, n=1))) %>%
group_by(dindx,hindx) %>%
summarize(TemperatureF = mean(TemperatureF)) %>%
as.data.frame
summary(weather)
class(weather)
plot(weather$TemperatureF)
forecast <- read.csv("temp-forecasts-2015-10-08.csv") %>%
mutate(tindx = ISOdatetime(year,mindx,dindx,hindx,0,0))
#quick plot
plot(forecast$tindx,forecast$temp)
#trim data and stack
temp_act <-
subset(weather, dindx != "2015-10-09") %>%
rename(temp = TemperatureF) %>%
mutate(type = "act",
tindx = ymd_h(paste(dindx, hindx))) %>%
select(temp, type, tindx)
temp_fcst <-
subset(forecast, dindx != 8 | mindx != 10 ) %>%
mutate(type = "fcst") %>%
select(temp, type, tindx)
temp_final <-
rbind(temp_act, temp_fcst)
#-----------------------------------------------------------------------------#
#
# Outputs
#
#-----------------------------------------------------------------------------#
#save out the data
setwd("/home/rstudio/projects/comp-2015/data")
save(load.long,file="load-long.Rda")
save(temp_act,file="temp-act.Rda")
save(temp_fcst,file="temp-fcst.Rda")
write.csv()
writeRDS()
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-#
|
require(gdata)
datesep <- function(datestring){
# takes an ostensibly-date string
# returns a list of parts as separated by hyphen, slash or period
dlist <- unlist(strsplit(datestring, '-'))
if (length(dlist)<3) dlist <- unlist(strsplit(datestring, '/'))
if (length(dlist)<3) dlist <- unlist(strsplit(datestring, '.'))
return(dlist)
}
check.date.quality <- function(datestring){
# takes an ostensibly-date string
# returns 1 if datesep(datestring) has three parts, 0 if not
datelength <- length(datesep(datestring))
return(ifelse(datelength==3,1,0))
}
get.date.part <- function(datecol, part.of.date){
# takes a date column and a part of date ('year' or 'month' or 'day')
# returns a column of the specified part
# e.g. data$month <- get.date.order(data$date)
datecol[is.na(datecol)] <- '0-0-0' # get rid of NAs
datecheck <- sapply(datecol,check.date.quality) # check for too-short dates
if (0 %in% datecheck) {
print(table(datecheck))
print(datecol[datecheck==0])
print('date(s) cannot be parsed! sapply check.date.quality and see what went wrong!')
stop # stop if there are bad dates
}
datematrix <- sapply(datecol, datesep)
d1 <- as.numeric(datematrix[1,])
m1 <- max(d1)
assign(as.character(m1),d1)
d2 <- as.numeric(datematrix[2,])
m2 <- max(d2)
assign(as.character(m2),d2)
d3 <- as.numeric(datematrix[3,])
m3 <- max(d3)
assign(as.character(m3),d3)
stopifnot (m1!=m2 & m1!=m3 & m2!=m3)
if (part.of.date=='month'){
month <- as.character(min(c(m1,m2,m3)))
return(as.numeric(get(month)))
}
else if (part.of.date=='year'){
year <- as.character(max(c(m1,m2,m3)))
return(as.numeric(get(year)))
}
else {
month <- as.character(median(c(m1,m2,m3)))
return(as.numeric(get(month)))
}
}
##########################################
get.canonical.vocab <- function(keystring, vocablist, gen.or.specific){
# takes string, list from which you're looking from vocab, and "general" or "specific"
# "general" means perp categories for matching, e.g. "Salvadoran Army"
# "specific" means canonical version of more specific perp label, e.g. "Second Brigade"
new.vocab <- ''
key <- unlist(strsplit(keystring, '-'))
if (gen.or.specific=='specific') {
for (k in key){
if (k %in% vocablist$old){
n <- setdiff(vocablist$new.specific[vocablist$old==k],NA)
}
else n <- 'Unknown'
new.vocab <- paste(new.vocab,n,sep='-')
}
}
else if (gen.or.specific=='general'){
for (k in key){
if (k %in% vocablist$old){
n <- setdiff(vocablist$new[vocablist$old==k],NA)
}
else n <- 'Unknown'
new.vocab <- paste(new.vocab,n,sep='-')
}
}
else {
print('cannot recognize general or specific command. only general or specific are accepted. did you misspell?')
stop
}
new.vocab <- sub('-','',new.vocab)
return(new.vocab)
}
##############################################
# function generators
# perp.vocab and viol.vocab are in individual/share/src,
# and can be linked to any individual/* project in SV
# they should always be called AS perp.vocab and viol.vocab, otherwise
# the code won't work, e.g.
# viol.vocab <- read.table('share/src/viols-vocab.csv', etc. etc.)
get.perp <- function(keystring){
return(get.canonical.vocab(keystring,perp.vocab,'general'))
}
get.perp.specific <- function(keystring){
return(get.canonical.vocab(keystring,perp.vocab,'specific'))
}
get.viol <- function(keystring){
return(get.canonical.vocab(keystring,viol.vocab,'general'))
}
get.viol.specific <- function(keystring){
return(get.canonical.vocab(keystring,viol.vocab,'specific'))
}
## general purpose string splitters
split.general <- function(splitter){
# takes splitter
# returns function to return vector of string parts
return(
function(stringlist){
return(unlist(strsplit(stringlist,splitter,fixed=TRUE)))
}
)
}
comma.split <- split.general(',') # returns vector of string parts as split by commas
split.and.find <- function(stringlist,splitter,n){
# takes string, splitter, and n
# returns nth string part from string, as split by splitter
new <- unlist(strsplit(stringlist,splitter))
new.n <- new[n]
return(new.n)
}
make.split.and.find <- function(splitter,n){
# returns specific versions of split.and.find
return(
function(stringlist){
return(split.and.find(stringlist,splitter,n))
}
)
}
get.last <- function(placestr){
# specific to CDHES, for now
# takes a place string and returns the final location
# usually the department, in CDHES
parts <- unlist(strsplit(placestr,','))
last <- length(parts)
candidate <- trim(parts[last])
if (candidate %in% unique(geo$DEPT)) {
return(candidate)
}
else {
ok <- c()
for (d in depts){
if (length(grep(d,candidate))>0) ok <- c(ok,d)
else next
}
if (length(ok)>1) return('TRAVELING')
else if (length(ok)<1) return(NA)
else return(trim(ok))
}
print(candidate)
return(NA)
}
get.last.rescate <- function(namestr){
namelist <- space.split(unlist(namestr))
n <- length(namelist)
if(n<2) return(NA)
else return(namelist[n])
}
get.middle.names.rescate <- function(namestr){
namelist <- space.split(namestr)
n <- length(namelist)
if(n<3) return (NA)
else return(paste(namelist[2:(n-1)],collapse=' '))
}
get.nth <- function(character.vector,n){
return(character.vector[n])
}
make.get.nth <- function(n){
return(function(character.vector){
return(get.nth(character.vector,n))
}
)
}
unlist.and.get.nth <- function(listitem,n){
listitem <- unlist(listitem)
return(get.nth(listitem,n))
}
make.get.fn <- function(n,listtype){
if (listtype=='char'){
return(function(character.vector){
return(get.nth(character.vector,n))
}
)
}
else {
return(function(listitem){
return(unlist.and.get.nth(listitem,n))
}
)
}
}
dot.split <- split.general('.')
fix.db.colnames <- function(dset){
cols <- colnames(dset)
cols2 <- lapply(cols,dot.split)
cols3 <- unlist(lapply(cols2,make.get.nth(1)))
colnames(dset) <- toupper(cols3)
return(dset)
}
get.muni <- function(dept,mlist,placestring){
# takes department name, list of munis in that dept, and lugar string
# returns character string with munis from the list that appear in the given string
munis <- c()
for (m in mlist){
if(length(grep(m,placestring))>0) {
munis <- c(munis,m)
}
}
munis <- paste(unique(munis),collapse=',')
return(munis)
}
make.get.muni <- function(dept, mlist){
# wrapper for get.muni, above
# makes an (l-apply-able) function of a string specific to one dept.
return(
function(placestring){
return(get.muni(dept,mlist,placestring))
}
)
}
fix.specific.error <- function(col, error, correction){
#rows.with.error <- grep(error,col)
#col[rows.with.error] <-
### still working on this one ###
}
#########################################
zfill <- function(number){
while(nchar(number)<5) number <- paste("0", number, sep="")
return(number)
}
zfill2 <- function(number){
while(nchar(number)<2) number <- paste('0',number,sep='')
return(number)
}
zfill4 <- function(number){
while(nchar(number)<4) number <- paste('0',number,sep='')
return(number)
}
###########################################
fix.colnames <- function(dataset){
# rescate-specific
cols <- colnames(dataset)
cols.fixed <- c()
for (c in cols){
c.fixed <- toupper(unlist(strsplit(c,','))[1])
cols.fixed <- c(cols.fixed,c.fixed)
}
colnames(dataset) <- cols.fixed
return(dataset)
}
strsplit.dash <- function(string){
new <- trim(unlist(strsplit(string,'-')))
return(new)
}
# takes a list represented as a string with dashes as delimiters
# returns a sorted, unique list, also as a string
make.unique <- function(stringlist){
new <- strsplit.dash(stringlist)
new <- new[new!='' & is.na(new)==FALSE]
if (length(new)==0) new <- 'Unknown'
new <- sort(unique(new))
new <- paste(new,collapse='-')
return(new)
}
# takes a list represented as a string with slashes as delimiters
# returns UNSORTED, unique list, aslso as a string
make.unique.slash <- function(stringlist){
new <- unlist(strsplit(stringlist,'/'))
new <- unique(new)
new <- paste(new,collapse='/')
return(new)
}
######################################################
get.perp <- function(old.perp,spec){
row <- which(perp.vocab$old==old.perp)
if (spec=='general') {
new <- unique(perp.vocab$new[row])
if (length(new)<1) return('Unknown')
else return(new)
}
else {
new <- unique(perp.vocab$new.specific[row])
if (length(new)<1) return('Unknown')
else return(new)
}
}
get.perp.gen <- function(old.perp){
return(get.perp(old.perp,'general'))
}
get.perp.spec <- function(old.perp){
return(get.perp(old.perp,'specific'))
}
##########################################################
get.nth <- function(strlist,n){
new <- strsplit.dash(strlist)
return(new[n])
}
get.1st <- function(strlist){
return(get.nth(strlist,1))
}
get.2nd <- function(strlist){
return(get.nth(strlist,2))
}
get.3rd <- function(strlist){
return(get.nth(strlist,3))
}
get.4th <- function(strlist){
return(get.nth(strlist,4))
}
|
/individual/Rescate/x-canonical/src/canonicalization-functions.R
|
no_license
|
AndreStephens/UWCHR
|
R
| false
| false
| 9,445
|
r
|
require(gdata)
datesep <- function(datestring){
# takes an ostensibly-date string
# returns a list of parts as separated by hyphen, slash or period
dlist <- unlist(strsplit(datestring, '-'))
if (length(dlist)<3) dlist <- unlist(strsplit(datestring, '/'))
if (length(dlist)<3) dlist <- unlist(strsplit(datestring, '.'))
return(dlist)
}
check.date.quality <- function(datestring){
# takes an ostensibly-date string
# returns 1 if datesep(datestring) has three parts, 0 if not
datelength <- length(datesep(datestring))
return(ifelse(datelength==3,1,0))
}
get.date.part <- function(datecol, part.of.date){
# takes a date column and a part of date ('year' or 'month' or 'day')
# returns a column of the specified part
# e.g. data$month <- get.date.order(data$date)
datecol[is.na(datecol)] <- '0-0-0' # get rid of NAs
datecheck <- sapply(datecol,check.date.quality) # check for too-short dates
if (0 %in% datecheck) {
print(table(datecheck))
print(datecol[datecheck==0])
print('date(s) cannot be parsed! sapply check.date.quality and see what went wrong!')
stop # stop if there are bad dates
}
datematrix <- sapply(datecol, datesep)
d1 <- as.numeric(datematrix[1,])
m1 <- max(d1)
assign(as.character(m1),d1)
d2 <- as.numeric(datematrix[2,])
m2 <- max(d2)
assign(as.character(m2),d2)
d3 <- as.numeric(datematrix[3,])
m3 <- max(d3)
assign(as.character(m3),d3)
stopifnot (m1!=m2 & m1!=m3 & m2!=m3)
if (part.of.date=='month'){
month <- as.character(min(c(m1,m2,m3)))
return(as.numeric(get(month)))
}
else if (part.of.date=='year'){
year <- as.character(max(c(m1,m2,m3)))
return(as.numeric(get(year)))
}
else {
month <- as.character(median(c(m1,m2,m3)))
return(as.numeric(get(month)))
}
}
##########################################
get.canonical.vocab <- function(keystring, vocablist, gen.or.specific){
# takes string, list from which you're looking from vocab, and "general" or "specific"
# "general" means perp categories for matching, e.g. "Salvadoran Army"
# "specific" means canonical version of more specific perp label, e.g. "Second Brigade"
new.vocab <- ''
key <- unlist(strsplit(keystring, '-'))
if (gen.or.specific=='specific') {
for (k in key){
if (k %in% vocablist$old){
n <- setdiff(vocablist$new.specific[vocablist$old==k],NA)
}
else n <- 'Unknown'
new.vocab <- paste(new.vocab,n,sep='-')
}
}
else if (gen.or.specific=='general'){
for (k in key){
if (k %in% vocablist$old){
n <- setdiff(vocablist$new[vocablist$old==k],NA)
}
else n <- 'Unknown'
new.vocab <- paste(new.vocab,n,sep='-')
}
}
else {
print('cannot recognize general or specific command. only general or specific are accepted. did you misspell?')
stop
}
new.vocab <- sub('-','',new.vocab)
return(new.vocab)
}
##############################################
# function generators
# perp.vocab and viol.vocab are in individual/share/src,
# and can be linked to any individual/* project in SV
# they should always be called AS perp.vocab and viol.vocab, otherwise
# the code won't work, e.g.
# viol.vocab <- read.table('share/src/viols-vocab.csv', etc. etc.)
get.perp <- function(keystring){
return(get.canonical.vocab(keystring,perp.vocab,'general'))
}
get.perp.specific <- function(keystring){
return(get.canonical.vocab(keystring,perp.vocab,'specific'))
}
get.viol <- function(keystring){
return(get.canonical.vocab(keystring,viol.vocab,'general'))
}
get.viol.specific <- function(keystring){
return(get.canonical.vocab(keystring,viol.vocab,'specific'))
}
## general purpose string splitters
split.general <- function(splitter){
# takes splitter
# returns function to return vector of string parts
return(
function(stringlist){
return(unlist(strsplit(stringlist,splitter,fixed=TRUE)))
}
)
}
comma.split <- split.general(',') # returns vector of string parts as split by commas
split.and.find <- function(stringlist,splitter,n){
# takes string, splitter, and n
# returns nth string part from string, as split by splitter
new <- unlist(strsplit(stringlist,splitter))
new.n <- new[n]
return(new.n)
}
make.split.and.find <- function(splitter,n){
# returns specific versions of split.and.find
return(
function(stringlist){
return(split.and.find(stringlist,splitter,n))
}
)
}
get.last <- function(placestr){
# specific to CDHES, for now
# takes a place string and returns the final location
# usually the department, in CDHES
parts <- unlist(strsplit(placestr,','))
last <- length(parts)
candidate <- trim(parts[last])
if (candidate %in% unique(geo$DEPT)) {
return(candidate)
}
else {
ok <- c()
for (d in depts){
if (length(grep(d,candidate))>0) ok <- c(ok,d)
else next
}
if (length(ok)>1) return('TRAVELING')
else if (length(ok)<1) return(NA)
else return(trim(ok))
}
print(candidate)
return(NA)
}
get.last.rescate <- function(namestr){
namelist <- space.split(unlist(namestr))
n <- length(namelist)
if(n<2) return(NA)
else return(namelist[n])
}
get.middle.names.rescate <- function(namestr){
namelist <- space.split(namestr)
n <- length(namelist)
if(n<3) return (NA)
else return(paste(namelist[2:(n-1)],collapse=' '))
}
get.nth <- function(character.vector,n){
return(character.vector[n])
}
make.get.nth <- function(n){
return(function(character.vector){
return(get.nth(character.vector,n))
}
)
}
unlist.and.get.nth <- function(listitem,n){
listitem <- unlist(listitem)
return(get.nth(listitem,n))
}
make.get.fn <- function(n,listtype){
if (listtype=='char'){
return(function(character.vector){
return(get.nth(character.vector,n))
}
)
}
else {
return(function(listitem){
return(unlist.and.get.nth(listitem,n))
}
)
}
}
dot.split <- split.general('.')
fix.db.colnames <- function(dset){
cols <- colnames(dset)
cols2 <- lapply(cols,dot.split)
cols3 <- unlist(lapply(cols2,make.get.nth(1)))
colnames(dset) <- toupper(cols3)
return(dset)
}
get.muni <- function(dept,mlist,placestring){
# takes department name, list of munis in that dept, and lugar string
# returns character string with munis from the list that appear in the given string
munis <- c()
for (m in mlist){
if(length(grep(m,placestring))>0) {
munis <- c(munis,m)
}
}
munis <- paste(unique(munis),collapse=',')
return(munis)
}
make.get.muni <- function(dept, mlist){
# wrapper for get.muni, above
# makes an (l-apply-able) function of a string specific to one dept.
return(
function(placestring){
return(get.muni(dept,mlist,placestring))
}
)
}
fix.specific.error <- function(col, error, correction){
#rows.with.error <- grep(error,col)
#col[rows.with.error] <-
### still working on this one ###
}
#########################################
zfill <- function(number){
while(nchar(number)<5) number <- paste("0", number, sep="")
return(number)
}
zfill2 <- function(number){
while(nchar(number)<2) number <- paste('0',number,sep='')
return(number)
}
zfill4 <- function(number){
while(nchar(number)<4) number <- paste('0',number,sep='')
return(number)
}
###########################################
fix.colnames <- function(dataset){
# rescate-specific
cols <- colnames(dataset)
cols.fixed <- c()
for (c in cols){
c.fixed <- toupper(unlist(strsplit(c,','))[1])
cols.fixed <- c(cols.fixed,c.fixed)
}
colnames(dataset) <- cols.fixed
return(dataset)
}
strsplit.dash <- function(string){
new <- trim(unlist(strsplit(string,'-')))
return(new)
}
# takes a list represented as a string with dashes as delimiters
# returns a sorted, unique list, also as a string
make.unique <- function(stringlist){
new <- strsplit.dash(stringlist)
new <- new[new!='' & is.na(new)==FALSE]
if (length(new)==0) new <- 'Unknown'
new <- sort(unique(new))
new <- paste(new,collapse='-')
return(new)
}
# takes a list represented as a string with slashes as delimiters
# returns UNSORTED, unique list, aslso as a string
make.unique.slash <- function(stringlist){
new <- unlist(strsplit(stringlist,'/'))
new <- unique(new)
new <- paste(new,collapse='/')
return(new)
}
######################################################
get.perp <- function(old.perp,spec){
row <- which(perp.vocab$old==old.perp)
if (spec=='general') {
new <- unique(perp.vocab$new[row])
if (length(new)<1) return('Unknown')
else return(new)
}
else {
new <- unique(perp.vocab$new.specific[row])
if (length(new)<1) return('Unknown')
else return(new)
}
}
get.perp.gen <- function(old.perp){
return(get.perp(old.perp,'general'))
}
get.perp.spec <- function(old.perp){
return(get.perp(old.perp,'specific'))
}
##########################################################
get.nth <- function(strlist,n){
new <- strsplit.dash(strlist)
return(new[n])
}
get.1st <- function(strlist){
return(get.nth(strlist,1))
}
get.2nd <- function(strlist){
return(get.nth(strlist,2))
}
get.3rd <- function(strlist){
return(get.nth(strlist,3))
}
get.4th <- function(strlist){
return(get.nth(strlist,4))
}
|
library(logcondens)
### Name: isoMean
### Title: Pool-Adjacent Violaters Algorithm: Least Square Fit under
### Monotonicity Constraint
### Aliases: isoMean
### Keywords: htest nonparametric
### ** Examples
## simple regression model
n <- 50
x <- sort(runif(n, 0, 1))
y <- x ^ 2 + rnorm(n, 0, 0.2)
s <- seq(0, 1, by = 0.01)
plot(s, s ^ 2, col = 2, type = 'l', xlim = range(c(0, 1, x)),
ylim = range(c(0, 1 , y))); rug(x)
## plot pava result
lines(x, isoMean(y, rep(1 / n, n)), type = 's')
|
/data/genthat_extracted_code/logcondens/examples/isoMean.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 503
|
r
|
library(logcondens)
### Name: isoMean
### Title: Pool-Adjacent Violaters Algorithm: Least Square Fit under
### Monotonicity Constraint
### Aliases: isoMean
### Keywords: htest nonparametric
### ** Examples
## simple regression model
n <- 50
x <- sort(runif(n, 0, 1))
y <- x ^ 2 + rnorm(n, 0, 0.2)
s <- seq(0, 1, by = 0.01)
plot(s, s ^ 2, col = 2, type = 'l', xlim = range(c(0, 1, x)),
ylim = range(c(0, 1 , y))); rug(x)
## plot pava result
lines(x, isoMean(y, rep(1 / n, n)), type = 's')
|
require(keras)
setwd("/users/jelle/documents/github/LoreSeekerAI/models")
source("../Queries/read_ls_data.R")
setwd("/users/jelle/documents/github/helloworldcpp/data")
l1reglambda <- 0.00005
lambda <- 0.001
# layers:
# 60 -> 10 got about 0.774 in 1 run
# 60 -> 17 got about 0.78 in 1 run
# 60 -> 20 got about 0.78 in 1 run
# 60 -> 30 got about 0.76 in 1 run
# 40 -> 17 got about 0.774 in 1 run
# 55 -> 17 got about 0.771 in 1 run
# 80 -> 17 got about 0.7745 in 1 run
# 120 -> 17 got about 0.77 in 1 run
# dropouts 0.15 -> 0.07 got 0.78 in 1 run
# dropouts 0.1 -> 0.07 got 0.7725 in 1 run
# dropouts 0.07 -> 0.07 got 0.7799 in 1 run
# dropouts 0.15 -> 0.15 got 0.768 in 1 run
# dropouts 0.15 -> 0.04 got 0.780 in run
model <- keras_model_sequential()
model %>%
layer_dense(
units = 6,
activation = "relu",
kernel_regularizer = regularizer_l1_l2(
l1 = l1reglambda,
l2 = lambda)) %>%
layer_dense(
units = 1,
activation = 'sigmoid',
kernel_regularizer = regularizer_l1_l2(
l1 = l1reglambda,
l2 = lambda))
# on 1 training set, we're getting about 0.78 validation accuracy
# lr = 0.00008, rho = 0.9 did well on 1 training set
model %>% compile(
optimizer = optimizer_rmsprop(
lr = 0.0012,
rho = 0.9,
epsilon = NULL,
decay = 0,
clipnorm = NULL,
clipvalue = NULL),
loss = loss_binary_crossentropy,
metrics = metric_binary_accuracy)
data_list <- read_loreseeker_data("summary.csv")
# 105 epochs of 512 batches gave 0.762 in 1 run
# 220 epochs of 256 batches gave 0.78 in 1 run
# 220 epochs of 124 batches gave 0.771 in 1 run
# 440 epochs of 124 batches gave 0.773 in 1 run
# best result we've had was epochs 220 x batch_size 256 for 0.78 in 1 run
model %>% fit(
data_list$train_x,
data_list$train_y,
epochs = round(nrow(data_list$train_x) / 32),
batch_size= 64,
validation_data = list(data_list$test_x, data_list$test_y),
verbose = 2)
setwd("/users/jelle/documents/github/LoreSeekerAI/models")
model %>% save_model_tf("kerasdeepnn")
weights <- get_weights(model)
for (i in 1:length(weights)) {
description <- "weights"
if (i %% 2 == 0) { description <- "intercepts" }
layer <- ceiling(i / 2)
filename <- paste0(
"AI_",
description,
"_layer",
layer,
".csv")
write.table(
weights[i],
file=filename,
sep=",",
row.names=F,
col.names=F)
}
|
/Exploration/Keras.Rd
|
no_license
|
ArtOfBBQ/LoreSeekerAI
|
R
| false
| false
| 2,537
|
rd
|
require(keras)
setwd("/users/jelle/documents/github/LoreSeekerAI/models")
source("../Queries/read_ls_data.R")
setwd("/users/jelle/documents/github/helloworldcpp/data")
l1reglambda <- 0.00005
lambda <- 0.001
# layers:
# 60 -> 10 got about 0.774 in 1 run
# 60 -> 17 got about 0.78 in 1 run
# 60 -> 20 got about 0.78 in 1 run
# 60 -> 30 got about 0.76 in 1 run
# 40 -> 17 got about 0.774 in 1 run
# 55 -> 17 got about 0.771 in 1 run
# 80 -> 17 got about 0.7745 in 1 run
# 120 -> 17 got about 0.77 in 1 run
# dropouts 0.15 -> 0.07 got 0.78 in 1 run
# dropouts 0.1 -> 0.07 got 0.7725 in 1 run
# dropouts 0.07 -> 0.07 got 0.7799 in 1 run
# dropouts 0.15 -> 0.15 got 0.768 in 1 run
# dropouts 0.15 -> 0.04 got 0.780 in run
model <- keras_model_sequential()
model %>%
layer_dense(
units = 6,
activation = "relu",
kernel_regularizer = regularizer_l1_l2(
l1 = l1reglambda,
l2 = lambda)) %>%
layer_dense(
units = 1,
activation = 'sigmoid',
kernel_regularizer = regularizer_l1_l2(
l1 = l1reglambda,
l2 = lambda))
# on 1 training set, we're getting about 0.78 validation accuracy
# lr = 0.00008, rho = 0.9 did well on 1 training set
model %>% compile(
optimizer = optimizer_rmsprop(
lr = 0.0012,
rho = 0.9,
epsilon = NULL,
decay = 0,
clipnorm = NULL,
clipvalue = NULL),
loss = loss_binary_crossentropy,
metrics = metric_binary_accuracy)
data_list <- read_loreseeker_data("summary.csv")
# 105 epochs of 512 batches gave 0.762 in 1 run
# 220 epochs of 256 batches gave 0.78 in 1 run
# 220 epochs of 124 batches gave 0.771 in 1 run
# 440 epochs of 124 batches gave 0.773 in 1 run
# best result we've had was epochs 220 x batch_size 256 for 0.78 in 1 run
model %>% fit(
data_list$train_x,
data_list$train_y,
epochs = round(nrow(data_list$train_x) / 32),
batch_size= 64,
validation_data = list(data_list$test_x, data_list$test_y),
verbose = 2)
setwd("/users/jelle/documents/github/LoreSeekerAI/models")
model %>% save_model_tf("kerasdeepnn")
weights <- get_weights(model)
for (i in 1:length(weights)) {
description <- "weights"
if (i %% 2 == 0) { description <- "intercepts" }
layer <- ceiling(i / 2)
filename <- paste0(
"AI_",
description,
"_layer",
layer,
".csv")
write.table(
weights[i],
file=filename,
sep=",",
row.names=F,
col.names=F)
}
|
\name{PolarConcentrationAndPhase}
\alias{PolarConcentrationAndPhase}
\alias{PolarConcentrationAndPhase.RasterBrick}
\alias{PolarConcentrationAndPhase.RasterStack}
\title{
Polar Concentration And Phase
}
\description{
Calculates the concentration and phase of a polar data
}
\usage{
PolarConcentrationAndPhase(cdata, phase_units = "radians", n = 12,
disag = FALSE, justPhase = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{cdata}{
Either vector or matrix. If vector, each element reprents equally spaced
points along a polar coordinate system. If matrix, each column represnets
equally spaced position along a polar coordinate system and each row a new
series of measurments.
}
\item{phase_units}{
Units of the phase outputs. Default is \code{"radians"} (\eqn{- \pi} to
\eqn{\pi}), but there is also a choice of \code{"degrees"} (-180 to 180)
and \code{"months"} (-6 months to 6 months)
}
\item{n}{
length of cycle phase and concentration is calculated over. Default number
of columns for matrix or 12 (i.e, 12 in a year) for raster stack or brick.
If dat is longer (i.e, if \cide{ncol(dat)} for matrix or number of layers
for raster objects is > n), a a 'climatology' is calculated at base n.
}
\item{disagFact}{
Only used if dat is a raster brick or stack. Disaggregatation factor used
by \code{\link{disaggregate}} to smooth raster outputs. Useful fi neater
plotting. Default is NaN which does not perform disaggregatation.
}
\item{justPhase}{
Logical only used if dat is a raster brick or stack. . If \code{TRUE}, just
returns the phase metric.
}
}
\details{
Each simulated or observed timestep (e.g month) with in the period
(i.e year) is represented by a vector in the complex plane, the length of
the vector corresponding to the magnitude of the variable for each period
and the directions of the vector corresponding to the timeing within the
period. It is assumed each timestep is uniformly distributed:
\deqn{\theta_{t} = 2 . \pi ( t - 1 ) / n}
where \eqn{n} is the number of timesteps \eqn{t} in the period.
A mean vector L is calculated by averaging the real and imaginary parts of
the n vectors, \eqn{x}.
\deqn{Lx = \Sigma x cos ( \theta )}
and
\deqn{Ly = \Sigma x sin ( \theta )}
The length of the mean vector divided by the annual value stands for
seasonal concentration, \eqn{C}; its direction stands for phase, \eqn{P}:
\deqn{C = ( Lx^2 + Ly^2 ) / \Sigma x}
\deqn{P = atan (Lx / Ly)}
Thus, if the variable is concentrated all in one point within the polar
coordinates, seasonal concentration is equal to 1 and the phase corresponds
to that month. If the variable is evenly spread over all coordinates, then
concentration is equal to zero and phase is undefined.
}
\value{
Two compoments are returned, each of the length of the first dimension of
cdata input, or length 1 if cdata is a vector
\item{phase }{the phase timing of each row of the inputs (see details above)}
\item{conc }{the concentration around the phase of each row of the inputs
(see details above)}
}
\references{
Kelley, D. I., Prentice, I. C., Harrison, S. P., Wang, H., Simard, M.,
Fisher, J. B., & Willis, K. O. (2013). A comprehensive benchmarking system
for evaluating global vegetation models. Biogeosciences, 10(5), 3313-3340.
doi:10.5194/bg-10-3313-2013
}
\author{Douglas Kelley \email{douglas.i.kelley@gmail.com}}
\seealso{
\code{\link{MPD}}
}
\examples{
require(plotrix)
##############################################################################
## matrix ##
##############################################################################
## Average Monthly Temperatures at Nottingham, 1920-1939
## Anderson, O. D. (1976) Time Series Analysis and Forecasting: The
## Box-Jenkins approach. Butterworths. Series R.
## see ?nottem
## Load T
T = t(matrix(nottem,nrow=12))
## Calculate seasonal climatology and angle of each month in degrees
climT = apply(T, 2, mean)
climT[1:6] = climT[1:6]
periods = head(seq(0, 360, length.out = 13), -1)
## Plot climatology
polar.plot(climT, periods,
labels = c('J','F','M','A','M','J','J','A','S','O','N','D'),
label.pos = periods, radial.labels = '', radial.lim = c(25,62),
rp.type = 'p', poly.col = '#FF0000AA')
scaleConc <- function(i) min(climT) + i * diff(range(climT))
## Calculate phase and concentraion.
pc = PolarConcentrationAndPhase(climT, phase_units = "degrees")
phase = pc[[1]][1]
## Covert concentration to point on tempurature plot
conc = scaleConc(pc[[2]][1])
## Plot climatology phase on concentration on plot
polar.plot(conc, phase, point.symbol = 4,radial.lim = c(25,62),
rp.type = 'rs', cex = 2, lwd = 2, add = TRUE)
## same calculation and plot or each year.
pc = PolarConcentrationAndPhase(T, phase_units = "degrees")
phase = pc[[1]]
conc = scaleConc(pc[[2]])
polar.plot(conc, phase, point.symbol = 16, radial.lim = c(25,62),
rp.type = 'rs', cex = 1, add = TRUE, line.col = 'black')
##############################################################################
## Raster ##
##############################################################################
require(raster)
b = brick(system.file("external/rlogo.grd", package = "raster"))
b = PolarConcentrationAndPhase(b)
dev.new()
plot(b)
b = b[[2]]
b0 = b*2*pi
for (i in 1:12) {
bi = cos(pi *i/12 + b0)
b = addLayer(b, bi)
}
b = dropLayer(b, 1)
maxb = which.max(b)
phsb = PolarConcentrationAndPhase(b, phase_units = 'months', justPhase = TRUE)
dev.new()
par(mfrow = c(3, 1))
plot(maxb, main = 'max layer')
plot(phsb, main = 'phase')
plot(values(maxb), values(phsb), pch = 4)
}
\keyword{ ~polar }
\keyword{ ~phase }
\keyword{ ~concetration }
|
/benchmarkMetrics/man/PolarConcentrationAndPhase.Rd
|
no_license
|
douglask3/benchmarkmetrics
|
R
| false
| false
| 6,367
|
rd
|
\name{PolarConcentrationAndPhase}
\alias{PolarConcentrationAndPhase}
\alias{PolarConcentrationAndPhase.RasterBrick}
\alias{PolarConcentrationAndPhase.RasterStack}
\title{
Polar Concentration And Phase
}
\description{
Calculates the concentration and phase of a polar data
}
\usage{
PolarConcentrationAndPhase(cdata, phase_units = "radians", n = 12,
disag = FALSE, justPhase = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{cdata}{
Either vector or matrix. If vector, each element reprents equally spaced
points along a polar coordinate system. If matrix, each column represnets
equally spaced position along a polar coordinate system and each row a new
series of measurments.
}
\item{phase_units}{
Units of the phase outputs. Default is \code{"radians"} (\eqn{- \pi} to
\eqn{\pi}), but there is also a choice of \code{"degrees"} (-180 to 180)
and \code{"months"} (-6 months to 6 months)
}
\item{n}{
length of cycle phase and concentration is calculated over. Default number
of columns for matrix or 12 (i.e, 12 in a year) for raster stack or brick.
If dat is longer (i.e, if \cide{ncol(dat)} for matrix or number of layers
for raster objects is > n), a a 'climatology' is calculated at base n.
}
\item{disagFact}{
Only used if dat is a raster brick or stack. Disaggregatation factor used
by \code{\link{disaggregate}} to smooth raster outputs. Useful fi neater
plotting. Default is NaN which does not perform disaggregatation.
}
\item{justPhase}{
Logical only used if dat is a raster brick or stack. . If \code{TRUE}, just
returns the phase metric.
}
}
\details{
Each simulated or observed timestep (e.g month) with in the period
(i.e year) is represented by a vector in the complex plane, the length of
the vector corresponding to the magnitude of the variable for each period
and the directions of the vector corresponding to the timeing within the
period. It is assumed each timestep is uniformly distributed:
\deqn{\theta_{t} = 2 . \pi ( t - 1 ) / n}
where \eqn{n} is the number of timesteps \eqn{t} in the period.
A mean vector L is calculated by averaging the real and imaginary parts of
the n vectors, \eqn{x}.
\deqn{Lx = \Sigma x cos ( \theta )}
and
\deqn{Ly = \Sigma x sin ( \theta )}
The length of the mean vector divided by the annual value stands for
seasonal concentration, \eqn{C}; its direction stands for phase, \eqn{P}:
\deqn{C = ( Lx^2 + Ly^2 ) / \Sigma x}
\deqn{P = atan (Lx / Ly)}
Thus, if the variable is concentrated all in one point within the polar
coordinates, seasonal concentration is equal to 1 and the phase corresponds
to that month. If the variable is evenly spread over all coordinates, then
concentration is equal to zero and phase is undefined.
}
\value{
Two compoments are returned, each of the length of the first dimension of
cdata input, or length 1 if cdata is a vector
\item{phase }{the phase timing of each row of the inputs (see details above)}
\item{conc }{the concentration around the phase of each row of the inputs
(see details above)}
}
\references{
Kelley, D. I., Prentice, I. C., Harrison, S. P., Wang, H., Simard, M.,
Fisher, J. B., & Willis, K. O. (2013). A comprehensive benchmarking system
for evaluating global vegetation models. Biogeosciences, 10(5), 3313-3340.
doi:10.5194/bg-10-3313-2013
}
\author{Douglas Kelley \email{douglas.i.kelley@gmail.com}}
\seealso{
\code{\link{MPD}}
}
\examples{
require(plotrix)
##############################################################################
## matrix ##
##############################################################################
## Average Monthly Temperatures at Nottingham, 1920-1939
## Anderson, O. D. (1976) Time Series Analysis and Forecasting: The
## Box-Jenkins approach. Butterworths. Series R.
## see ?nottem
## Load T
T = t(matrix(nottem,nrow=12))
## Calculate seasonal climatology and angle of each month in degrees
climT = apply(T, 2, mean)
climT[1:6] = climT[1:6]
periods = head(seq(0, 360, length.out = 13), -1)
## Plot climatology
polar.plot(climT, periods,
labels = c('J','F','M','A','M','J','J','A','S','O','N','D'),
label.pos = periods, radial.labels = '', radial.lim = c(25,62),
rp.type = 'p', poly.col = '#FF0000AA')
scaleConc <- function(i) min(climT) + i * diff(range(climT))
## Calculate phase and concentraion.
pc = PolarConcentrationAndPhase(climT, phase_units = "degrees")
phase = pc[[1]][1]
## Covert concentration to point on tempurature plot
conc = scaleConc(pc[[2]][1])
## Plot climatology phase on concentration on plot
polar.plot(conc, phase, point.symbol = 4,radial.lim = c(25,62),
rp.type = 'rs', cex = 2, lwd = 2, add = TRUE)
## same calculation and plot or each year.
pc = PolarConcentrationAndPhase(T, phase_units = "degrees")
phase = pc[[1]]
conc = scaleConc(pc[[2]])
polar.plot(conc, phase, point.symbol = 16, radial.lim = c(25,62),
rp.type = 'rs', cex = 1, add = TRUE, line.col = 'black')
##############################################################################
## Raster ##
##############################################################################
require(raster)
b = brick(system.file("external/rlogo.grd", package = "raster"))
b = PolarConcentrationAndPhase(b)
dev.new()
plot(b)
b = b[[2]]
b0 = b*2*pi
for (i in 1:12) {
bi = cos(pi *i/12 + b0)
b = addLayer(b, bi)
}
b = dropLayer(b, 1)
maxb = which.max(b)
phsb = PolarConcentrationAndPhase(b, phase_units = 'months', justPhase = TRUE)
dev.new()
par(mfrow = c(3, 1))
plot(maxb, main = 'max layer')
plot(phsb, main = 'phase')
plot(values(maxb), values(phsb), pch = 4)
}
\keyword{ ~polar }
\keyword{ ~phase }
\keyword{ ~concetration }
|
source("RTwitterUtils.r")
args<-commandArgs(trailingOnly=TRUE)
RunTwitterSentiment<-function(searchterm, qty){
print(paste("Searching for: ",searchterm))
pos.words = LoadPosWordSet()
neg.words = LoadNegWordSet()
tweets<-TweetFrame(searchterm, qty)
db<-GetConnection()
by(tweets, 1:nrow(tweets), function(row){
print(row$text)
tweetScore = 0
sentimentOkay = TRUE
tryCatch(
tweetScore<-GetScore(row$text, pos.words, neg.words)
, error=function(e) {
sentimentOkay = FALSE
})
if(sentimentOkay) {
SaveTweetToDB(db, row$id, row$screenName, row$text, tweetScore)
}
}
)
CloseConnection(db)
}
RunTwitterSentiment(args[1], args[2])
|
/RunTwitterSentiment.r
|
no_license
|
PereiraM/RPISentimentServer
|
R
| false
| false
| 754
|
r
|
source("RTwitterUtils.r")
args<-commandArgs(trailingOnly=TRUE)
RunTwitterSentiment<-function(searchterm, qty){
print(paste("Searching for: ",searchterm))
pos.words = LoadPosWordSet()
neg.words = LoadNegWordSet()
tweets<-TweetFrame(searchterm, qty)
db<-GetConnection()
by(tweets, 1:nrow(tweets), function(row){
print(row$text)
tweetScore = 0
sentimentOkay = TRUE
tryCatch(
tweetScore<-GetScore(row$text, pos.words, neg.words)
, error=function(e) {
sentimentOkay = FALSE
})
if(sentimentOkay) {
SaveTweetToDB(db, row$id, row$screenName, row$text, tweetScore)
}
}
)
CloseConnection(db)
}
RunTwitterSentiment(args[1], args[2])
|
if (nchar(Sys.getenv("SPARK_HOME"))<1) {
Sys.setenv(SPARK_HOME = "C:\\Spark\\spark-2.1")
}
#Sys.setenv(JAVA_HOME= "C:/Program Files/java/jre1.8.0_121/")
library(SparkR, lib.loc = c(file.path(Sys.getenv("SPARK_HOME"),"R", "lib")))
#sc <- sparkR.session(master = "local")
sc <- sparkR.session(master = "local[*]", sparkEnvir = list(spark.driver.memory = "1.5g"))
sqlContext <- sparkR.session(sc)
music <- read.df("/E:/capstone_project/Musical_Instruments_5.json", "json", inferschema = TRUE)
printSchema(music)
head(music)
count(music)
#library(rJava)
|
/spark r in R.R
|
no_license
|
avk1/VJ
|
R
| false
| false
| 562
|
r
|
if (nchar(Sys.getenv("SPARK_HOME"))<1) {
Sys.setenv(SPARK_HOME = "C:\\Spark\\spark-2.1")
}
#Sys.setenv(JAVA_HOME= "C:/Program Files/java/jre1.8.0_121/")
library(SparkR, lib.loc = c(file.path(Sys.getenv("SPARK_HOME"),"R", "lib")))
#sc <- sparkR.session(master = "local")
sc <- sparkR.session(master = "local[*]", sparkEnvir = list(spark.driver.memory = "1.5g"))
sqlContext <- sparkR.session(sc)
music <- read.df("/E:/capstone_project/Musical_Instruments_5.json", "json", inferschema = TRUE)
printSchema(music)
head(music)
count(music)
#library(rJava)
|
qat_call_save_histogram_test <-
function(resultlist_part, element = -999, dim_mv=1, time = NULL, height = NULL, lat = NULL, lon = NULL, vec1 = NULL, vec2 = NULL, vec3 = NULL, vec4 = NULL, baseunit = NULL, savelist = list(), savelistcounter = 1) {
## functionality: calling function for qat_save_distribution_1d
## author: André Düsterhus
## date: 12.04.2011
## version: A0.1
## input: measurement_vector, workflowlist element, number of actual element, time vector (optional), latitude vector (optional), longitude vector (optional), 4 optional vectors, resultlist (optional), counter of resultlist (optional)
## output: list with the results and parameters of the lim rule analysis
# add informations to savelist
savelist[[savelistcounter <- savelistcounter+1]] <- list(element=element, tosave = qat_save_histogram_test(resultlist_part, baseunit=""))
return(savelist)
}
|
/R/qat_call_save_histogram_test.R
|
no_license
|
cran/qat
|
R
| false
| false
| 876
|
r
|
qat_call_save_histogram_test <-
function(resultlist_part, element = -999, dim_mv=1, time = NULL, height = NULL, lat = NULL, lon = NULL, vec1 = NULL, vec2 = NULL, vec3 = NULL, vec4 = NULL, baseunit = NULL, savelist = list(), savelistcounter = 1) {
## functionality: calling function for qat_save_distribution_1d
## author: André Düsterhus
## date: 12.04.2011
## version: A0.1
## input: measurement_vector, workflowlist element, number of actual element, time vector (optional), latitude vector (optional), longitude vector (optional), 4 optional vectors, resultlist (optional), counter of resultlist (optional)
## output: list with the results and parameters of the lim rule analysis
# add informations to savelist
savelist[[savelistcounter <- savelistcounter+1]] <- list(element=element, tosave = qat_save_histogram_test(resultlist_part, baseunit=""))
return(savelist)
}
|
library(shiny)
library(HH)
shiny.CIplot(height = "auto")
|
/inst/shiny/CIplot/app.R
|
no_license
|
cran/HH
|
R
| false
| false
| 58
|
r
|
library(shiny)
library(HH)
shiny.CIplot(height = "auto")
|
rm(list = ls())
setwd("/Users/dylandavis/Documents/Math 370/CDC_10_Years_of_Data")
library(plyr)
E = lapply(list.files(),function(x) read.csv(x,header = T))
e = E
w = list(
lapply(E,function(x) names(x)[grep("DRUGID", names(x))]),
lapply(E,function(x) names(x)[grep("CONTSUB", names(x))]),
lapply(1:10,function(x) if(x<=8){c("DIAG1","DIAG2","DIAG3")}else{c("DIAG1","DIAG2","DIAG3","DIAG4","DIAG5")} ),
lapply(1:10,function(x) if(x<=8){c("RFV1","RFV2","RFV3")}else{c("RFV1","RFV2","RFV3","RFV4","RFV5")} )
)
combine_columns = function(df_L,col_L){
sub_df_L = mapply(function(x,y) x[,y] ,df_L,col_L )
new_df = do.call(rbind.fill,sub_df_L)
return(new_df)
}
newDF = do.call(cbind, lapply(w,function(x) combine_columns(e,x) ))
whitespace2NA = function(vec){
myclass = class(vec)
vec = as.character(vec)
vec[trimws(vec)==""] = NA
func = get( paste0("as.",myclass) )
vec = func(vec)
return(vec)
}
newDF = as.data.frame(lapply(newDF,function(x) whitespace2NA(x)))
#creats a vector with #'s 1-70
v = 1:length(newDF)
#interates over in value in the 70 columns and replaces -9 with NA
for(i in v){
(newDF[[i]][newDF[[i]] == -9] = NA)
(newDF[[i]][newDF[[i]] == "-9"] = NA)
}
contains_dash = list()
#creates a list of lists for each column in the data.frame
#where the index of a value containing a "-" is stored
for(i in v){
contains_dash[i] = list(grep("-", newDF[[i]]))
}
contains_dash_ind = which(sapply(contains_dash, function(x) length(x) ) != 0)
#replace all "-" with 0
for(i in contains_dash_ind){
newDF[[i]] = as.factor(as.character(gsub("-","",newDF[[i]])))
}
setwd("/Users/dylandavis/Documents/Math 370/Assigment 4 Resources")
op_codes = as.vector(read.csv("OpioidCodesOnly.csv",header=F)[,1])
#eliminates duplicate values from op_codes
op_codes = unique(op_codes)
#gets rid of all the values in the vector interated over from op_codes
for(i in c("d04766","a11242","d03826","n09045","n11008")){op_codes=op_codes[-grep(i,op_codes)]}
load(file = "ICD9_codes.rda")
load(file = "RFV_codes.rda")
practice_newDF = newDF[c(1,2),]
#list of all icd9 codes
ICD9_list = c(Alcohol_ICD9,Diabetes_ICD9,Mental_ICD9,Pain_ICD9)
op_list = list(OP = op_codes)
schedule_list = list(S2=2,S3=3,S4=4,S5=5,S6=6)
bicols = function(myDF, myList){
bicol = function(myDF, myVector){return(apply(myDF, 1, function(x) any(x %in% myVector)))}
biDF = as.data.frame(lapply(myList, function(x) bicol(myDF,x)))*1
return(biDF)
}
diags = bicols(newDF[names(newDF)[grep("DIAG", names(newDF))]],ICD9_list)
rfvs = bicols(newDF[names(newDF)[grep("RFV", names(newDF))]],codes_RFV)
ops = bicols(newDF[names(newDF)[grep("DRUGID", names(newDF))]],op_list)
constubs = bicols(newDF[names(newDF)[grep("CONTSUB", names(newDF))]],schedule_list)
biDF = cbind(diags,rfvs,ops,constubs)
setwd("/Users/dylandavis/Documents/Math 370")
write.csv(biDF, file = "biDF.csv", row.names = FALSE)
#=============================test_cases==========================================#
load("testdf.Rda")
test = bicols(df_char, list("1or2"=c(1,2),"3"=3))
testy = cbind(df_char,test)
test = bicols(df_fac, list("1or2"=c(1,2),"3"=3))
testy = cbind(df_fac,test)
test = bicols(df_num, list("1or2"=c("1","2"),"3"="3"))
testy = cbind(df_num,test)
test = bicols(df_int, list("1or2"=c("1","2"),"3"="3"))
testy = cbind(df_int,test)
|
/Optimized for Logistic Regression/Dylan_Davis_HW4.R
|
no_license
|
dd238/Data-Mgmt.-Analysis-with-R
|
R
| false
| false
| 3,323
|
r
|
rm(list = ls())
setwd("/Users/dylandavis/Documents/Math 370/CDC_10_Years_of_Data")
library(plyr)
E = lapply(list.files(),function(x) read.csv(x,header = T))
e = E
w = list(
lapply(E,function(x) names(x)[grep("DRUGID", names(x))]),
lapply(E,function(x) names(x)[grep("CONTSUB", names(x))]),
lapply(1:10,function(x) if(x<=8){c("DIAG1","DIAG2","DIAG3")}else{c("DIAG1","DIAG2","DIAG3","DIAG4","DIAG5")} ),
lapply(1:10,function(x) if(x<=8){c("RFV1","RFV2","RFV3")}else{c("RFV1","RFV2","RFV3","RFV4","RFV5")} )
)
combine_columns = function(df_L,col_L){
sub_df_L = mapply(function(x,y) x[,y] ,df_L,col_L )
new_df = do.call(rbind.fill,sub_df_L)
return(new_df)
}
newDF = do.call(cbind, lapply(w,function(x) combine_columns(e,x) ))
whitespace2NA = function(vec){
myclass = class(vec)
vec = as.character(vec)
vec[trimws(vec)==""] = NA
func = get( paste0("as.",myclass) )
vec = func(vec)
return(vec)
}
newDF = as.data.frame(lapply(newDF,function(x) whitespace2NA(x)))
#creats a vector with #'s 1-70
v = 1:length(newDF)
#interates over in value in the 70 columns and replaces -9 with NA
for(i in v){
(newDF[[i]][newDF[[i]] == -9] = NA)
(newDF[[i]][newDF[[i]] == "-9"] = NA)
}
contains_dash = list()
#creates a list of lists for each column in the data.frame
#where the index of a value containing a "-" is stored
for(i in v){
contains_dash[i] = list(grep("-", newDF[[i]]))
}
contains_dash_ind = which(sapply(contains_dash, function(x) length(x) ) != 0)
#replace all "-" with 0
for(i in contains_dash_ind){
newDF[[i]] = as.factor(as.character(gsub("-","",newDF[[i]])))
}
setwd("/Users/dylandavis/Documents/Math 370/Assigment 4 Resources")
op_codes = as.vector(read.csv("OpioidCodesOnly.csv",header=F)[,1])
#eliminates duplicate values from op_codes
op_codes = unique(op_codes)
#gets rid of all the values in the vector interated over from op_codes
for(i in c("d04766","a11242","d03826","n09045","n11008")){op_codes=op_codes[-grep(i,op_codes)]}
load(file = "ICD9_codes.rda")
load(file = "RFV_codes.rda")
practice_newDF = newDF[c(1,2),]
#list of all icd9 codes
ICD9_list = c(Alcohol_ICD9,Diabetes_ICD9,Mental_ICD9,Pain_ICD9)
op_list = list(OP = op_codes)
schedule_list = list(S2=2,S3=3,S4=4,S5=5,S6=6)
bicols = function(myDF, myList){
bicol = function(myDF, myVector){return(apply(myDF, 1, function(x) any(x %in% myVector)))}
biDF = as.data.frame(lapply(myList, function(x) bicol(myDF,x)))*1
return(biDF)
}
diags = bicols(newDF[names(newDF)[grep("DIAG", names(newDF))]],ICD9_list)
rfvs = bicols(newDF[names(newDF)[grep("RFV", names(newDF))]],codes_RFV)
ops = bicols(newDF[names(newDF)[grep("DRUGID", names(newDF))]],op_list)
constubs = bicols(newDF[names(newDF)[grep("CONTSUB", names(newDF))]],schedule_list)
biDF = cbind(diags,rfvs,ops,constubs)
setwd("/Users/dylandavis/Documents/Math 370")
write.csv(biDF, file = "biDF.csv", row.names = FALSE)
#=============================test_cases==========================================#
load("testdf.Rda")
test = bicols(df_char, list("1or2"=c(1,2),"3"=3))
testy = cbind(df_char,test)
test = bicols(df_fac, list("1or2"=c(1,2),"3"=3))
testy = cbind(df_fac,test)
test = bicols(df_num, list("1or2"=c("1","2"),"3"="3"))
testy = cbind(df_num,test)
test = bicols(df_int, list("1or2"=c("1","2"),"3"="3"))
testy = cbind(df_int,test)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{interviews}
\alias{interviews}
\title{Descriptions of Interviews with Patients}
\format{A character vector of legth 10 containing the terms separated by \code{", "}.}
\usage{
interviews
}
\description{
An artificial dataset containing terms from generated
descriptions of interviews with patients.
(cf. \link{examinations} and \link{recommendations}).
}
\details{
The structure of the visit, i.e. the partition to interview, examination
and recommendations is based on real data collected from Polish
health centers. The procedure of extracting medical terms from the real
medical data is described in the paper Dobrakowski et al., 2019.
}
\references{
Dobrakowski, A., A. Mykowiecka, M. Marciniak, W. Jaworski, and P. Biecek 2019.
Interpretable Segmentation of Medical Free-Text
Records Based on Word Embeddings. arXiv preprint arXiv:1907.04152.
}
\seealso{
\code{\link{examinations}}, \code{\link{recommendations}}
}
\keyword{datasets}
|
/man/interviews.Rd
|
permissive
|
karthik/memr
|
R
| false
| true
| 1,044
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{interviews}
\alias{interviews}
\title{Descriptions of Interviews with Patients}
\format{A character vector of legth 10 containing the terms separated by \code{", "}.}
\usage{
interviews
}
\description{
An artificial dataset containing terms from generated
descriptions of interviews with patients.
(cf. \link{examinations} and \link{recommendations}).
}
\details{
The structure of the visit, i.e. the partition to interview, examination
and recommendations is based on real data collected from Polish
health centers. The procedure of extracting medical terms from the real
medical data is described in the paper Dobrakowski et al., 2019.
}
\references{
Dobrakowski, A., A. Mykowiecka, M. Marciniak, W. Jaworski, and P. Biecek 2019.
Interpretable Segmentation of Medical Free-Text
Records Based on Word Embeddings. arXiv preprint arXiv:1907.04152.
}
\seealso{
\code{\link{examinations}}, \code{\link{recommendations}}
}
\keyword{datasets}
|
\name{cubinf}
\alias{cubinf}
\title{
Conditionally unbiased bounded influence estimates of discrete Generalized Linear Models
}
\description{
Conditionally unbiased bounded influence estimates as described in Kuensch et al.
(1989) in three special GLM cases: Bernoulli, Binomial, and Poisson distributed responses.
The result is an object of class "cubinf".
}
\usage{
cubinf(x, y, weights = NULL, start=NULL, etastart=NULL, mustart=NULL,
offset = NULL, family = binomial(), control = cubinf.control(...),
intercept = FALSE, ...)
}
\arguments{
\item{x}{Vector or matrix of explanatory variable(s).
Columns represent variables and rows are observations.
}
\item{y}{Vector of observed responses.
In the case of Binomial responses, y is a two column matrix:
the 1st column contains the number of successes,
the 2nd column the number of failures.
The Bernoulli case, is treated as a special Binomial case.
However, the response y is a categorical variable (not a matrix with two colums) with two levels.
}
\item{weights}{Optional weigths for weighted regression. Components must be non negative integers.
}
\item{start}{Starting values for the parameters in the linear predictor.
Not used but required for compatibility with the glm function.
}
\item{etastart}{Starting values for the linear predictor.
Not used but required for compatibility with the glm function.
}
\item{mustart}{Starting values for the vector of means.
Not used but required for compatibility with the glm function.
}
\item{offset}{Optional offset added to the linear predictor.
}
\item{family}{A family object. Only two options are available for cubinf:
'family=binomial()' and 'family=poisson()'.
}
\item{control}{A list of control parameters for the numerical algorithms.
See cubinf.control for the possible control parameters and their defaults.
}
\item{intercept}{Logical flag: if TRUE, an intercept term is added to the model.
}
\item{\dots }{Further named control arguments as singular.ok or qr.out used in the case
where the x matrix is singular.}
}
\value{
A list with the following components:
\item{coefficients}{Coefficient estimates. }
\item{residuals}{Working residuals. }
\item{rsdev}{Deviance residuals. }
\item{fitted.values }{Fitted values. }
\item{cov}{Estimated covariance matrix of the coefficients. }
\item{rank }{Rank of the model matrix. }
\item{df.residuals}{Degrees of freedom in the residuals. }
\item{ci}{Vector of final bias corrections. }
\item{A}{Final value of the matrix A. }
\item{ai}{Vector with components a_i=ufact/|Ax_i| (where x_i^T denotes the ith row of the model matrix) }
\item{converged}{A logical value. FALSE if the maximum number of iterations was reached. }
\item{control}{Control parameters. }
\item{prior.weights}{Input vector w (when some of its components are different from 1). }
\item{family}{The family object used in the call to cubinf
'ics=1' for the Bernoulli case.
'ics=2' for the Binomial case.
'ics=3' for the Poisson case. }
\item{linear.predictors}{Components of the linear predictor (the model matrix
multiplied by the coefficient vector). }
\item{iter}{Number of iterations required in the main algorithm. }
\item{y }{Coded value of the response. }
\item{gradient }{Vector of the final unscaled negative gradient of the objective
function. }
\item{inv.hessian }{Vector of the final inverse of the Hessian matrix in compact
storage mode. }
}
\details{
The initial values of the coefficients (theta), the matrix A and the bias correction c are
computed using the ROBETH subroutine GINTAC (Marazzi, 1993). Then an initial covariance
matrix (for the convergence criterion) is computed by means of the ROBETH subroutines
GFEDCA and KTASKW.
Finally, the main algorithm (subroutine GYMAIN) alternates between improving values of
- theta, for fixed A and c (theta-step, subroutine GYTSTP),
- c, for fixed theta and A (c-step, subroutine GYCSTP),
- A, for fixed theta and c (A-step, subroutine GYASTP).
For the different available options see the function cubinf.control.
}
\references{
Kuensch, H.R., Stefanski L.A., Carroll R.J. (1989).
Conditionally unbiased bounded-influence estimation in general regression models,
with application to generalized linear models.
Journal of the American Statistical Association, 84, 460-466.
Marazzi, A. (1993).
Algorithms, Routines, and S-functions for robust Statistics.
Chapman and Hall, New York.
}
\seealso{
glm(..., method="cubinf"), \code{\link{cubinf.control}}
}
\examples{
library(robcbi)
y <- c(5,10,15,20,30,40,60,80,100)
x <- matrix(
c(0,1,0,0,1,0,0,1,0,0,0,1,0,0,1,0,0,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1),
nrow=9,byrow=FALSE)
z <- cubinf(x,y, family=poisson, control=list(ufact=3.2), intercept=TRUE)
z$iter
z$coeff
z <- cubinf(x,y, family=poisson, control=list(ufact=30), intercept=TRUE)
z$iter
z$coeff
}
\keyword{ stats }
\keyword{ robust }
|
/man/cubinf.Rd
|
no_license
|
cran/robcbi
|
R
| false
| false
| 5,203
|
rd
|
\name{cubinf}
\alias{cubinf}
\title{
Conditionally unbiased bounded influence estimates of discrete Generalized Linear Models
}
\description{
Conditionally unbiased bounded influence estimates as described in Kuensch et al.
(1989) in three special GLM cases: Bernoulli, Binomial, and Poisson distributed responses.
The result is an object of class "cubinf".
}
\usage{
cubinf(x, y, weights = NULL, start=NULL, etastart=NULL, mustart=NULL,
offset = NULL, family = binomial(), control = cubinf.control(...),
intercept = FALSE, ...)
}
\arguments{
\item{x}{Vector or matrix of explanatory variable(s).
Columns represent variables and rows are observations.
}
\item{y}{Vector of observed responses.
In the case of Binomial responses, y is a two column matrix:
the 1st column contains the number of successes,
the 2nd column the number of failures.
The Bernoulli case, is treated as a special Binomial case.
However, the response y is a categorical variable (not a matrix with two colums) with two levels.
}
\item{weights}{Optional weigths for weighted regression. Components must be non negative integers.
}
\item{start}{Starting values for the parameters in the linear predictor.
Not used but required for compatibility with the glm function.
}
\item{etastart}{Starting values for the linear predictor.
Not used but required for compatibility with the glm function.
}
\item{mustart}{Starting values for the vector of means.
Not used but required for compatibility with the glm function.
}
\item{offset}{Optional offset added to the linear predictor.
}
\item{family}{A family object. Only two options are available for cubinf:
'family=binomial()' and 'family=poisson()'.
}
\item{control}{A list of control parameters for the numerical algorithms.
See cubinf.control for the possible control parameters and their defaults.
}
\item{intercept}{Logical flag: if TRUE, an intercept term is added to the model.
}
\item{\dots }{Further named control arguments as singular.ok or qr.out used in the case
where the x matrix is singular.}
}
\value{
A list with the following components:
\item{coefficients}{Coefficient estimates. }
\item{residuals}{Working residuals. }
\item{rsdev}{Deviance residuals. }
\item{fitted.values }{Fitted values. }
\item{cov}{Estimated covariance matrix of the coefficients. }
\item{rank }{Rank of the model matrix. }
\item{df.residuals}{Degrees of freedom in the residuals. }
\item{ci}{Vector of final bias corrections. }
\item{A}{Final value of the matrix A. }
\item{ai}{Vector with components a_i=ufact/|Ax_i| (where x_i^T denotes the ith row of the model matrix) }
\item{converged}{A logical value. FALSE if the maximum number of iterations was reached. }
\item{control}{Control parameters. }
\item{prior.weights}{Input vector w (when some of its components are different from 1). }
\item{family}{The family object used in the call to cubinf
'ics=1' for the Bernoulli case.
'ics=2' for the Binomial case.
'ics=3' for the Poisson case. }
\item{linear.predictors}{Components of the linear predictor (the model matrix
multiplied by the coefficient vector). }
\item{iter}{Number of iterations required in the main algorithm. }
\item{y }{Coded value of the response. }
\item{gradient }{Vector of the final unscaled negative gradient of the objective
function. }
\item{inv.hessian }{Vector of the final inverse of the Hessian matrix in compact
storage mode. }
}
\details{
The initial values of the coefficients (theta), the matrix A and the bias correction c are
computed using the ROBETH subroutine GINTAC (Marazzi, 1993). Then an initial covariance
matrix (for the convergence criterion) is computed by means of the ROBETH subroutines
GFEDCA and KTASKW.
Finally, the main algorithm (subroutine GYMAIN) alternates between improving values of
- theta, for fixed A and c (theta-step, subroutine GYTSTP),
- c, for fixed theta and A (c-step, subroutine GYCSTP),
- A, for fixed theta and c (A-step, subroutine GYASTP).
For the different available options see the function cubinf.control.
}
\references{
Kuensch, H.R., Stefanski L.A., Carroll R.J. (1989).
Conditionally unbiased bounded-influence estimation in general regression models,
with application to generalized linear models.
Journal of the American Statistical Association, 84, 460-466.
Marazzi, A. (1993).
Algorithms, Routines, and S-functions for robust Statistics.
Chapman and Hall, New York.
}
\seealso{
glm(..., method="cubinf"), \code{\link{cubinf.control}}
}
\examples{
library(robcbi)
y <- c(5,10,15,20,30,40,60,80,100)
x <- matrix(
c(0,1,0,0,1,0,0,1,0,0,0,1,0,0,1,0,0,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1),
nrow=9,byrow=FALSE)
z <- cubinf(x,y, family=poisson, control=list(ufact=3.2), intercept=TRUE)
z$iter
z$coeff
z <- cubinf(x,y, family=poisson, control=list(ufact=30), intercept=TRUE)
z$iter
z$coeff
}
\keyword{ stats }
\keyword{ robust }
|
#' Display regression model results in table
#'
#' This function takes a regression model object and returns a formatted table
#' that is publication-ready. The function is highly customizable
#' allowing the user to obtain a bespoke summary table of the
#' regression model results. Review the
#' \href{https://www.danieldsjoberg.com/gtsummary/articles/tbl_regression.html}{tbl_regression vignette}
#' for detailed examples.
#'
#' @section Methods:
#'
#' The default method for `tbl_regression()` model summary uses `broom::tidy(x)`
#' to perform the initial tidying of the model object. There are, however,
#' a few models that use [modifications][tbl_regression_methods].
#'
#' - `"parsnip/workflows"`: If the model was prepared using parsnip/workflows,
#' the original model fit is extracted and the original `x=` argument
#' is replaced with the model fit. This will typically go unnoticed; however,if you've
#' provided a custom tidier in `tidy_fun=` the tidier will be applied to the model
#' fit object and not the parsnip/workflows object.
#' - `"survreg"`: The scale parameter is removed, `broom::tidy(x) %>% dplyr::filter(term != "Log(scale)")`
#' - `"multinom"`: This multinomial outcome is complex, with one line per covariate per outcome (less the reference group)
#' - `"gam"`: Uses the internal tidier `tidy_gam()` to print both parametric and smooth terms.
#' - `"tidycrr"`: Uses the tidier `tidycmprsk::tidy()` to print the model terms.
#' - `"lmerMod"`, `"glmerMod"`, `"glmmTMB"`, `"glmmadmb"`, `"stanreg"`, `"brmsfit"`: These mixed effects
#' models use `broom.mixed::tidy(x, effects = "fixed")`. Specify `tidy_fun = broom.mixed::tidy`
#' to print the random components.
#'
#' @param x Regression model object
#' @param exponentiate Logical indicating whether to exponentiate the
#' coefficient estimates. Default is `FALSE`.
#' @param label List of formulas specifying variables labels,
#' e.g. `list(age ~ "Age", stage ~ "Path T Stage")`
#' @param include Variables to include in output. Input may be a vector of
#' quoted variable names, unquoted variable names, or tidyselect select helper
#' functions. Default is `everything()`.
#' @param conf.level Must be strictly greater than 0 and less than 1.
#' Defaults to 0.95, which corresponds to a 95 percent confidence interval.
#' @param intercept Logical argument indicating whether to include the intercept
#' in the output. Default is `FALSE`
#' @param show_single_row By default categorical variables are printed on
#' multiple rows. If a variable is dichotomous (e.g. Yes/No) and you wish to print
#' the regression coefficient on a single row, include the variable name(s)
#' here--quoted and unquoted variable name accepted.
#' @param estimate_fun Function to round and format coefficient estimates.
#' Default is [style_sigfig] when the coefficients are not transformed, and
#' [style_ratio] when the coefficients have been exponentiated.
#' @param pvalue_fun Function to round and format p-values.
#' Default is [style_pvalue].
#' The function must have a numeric vector input (the numeric, exact p-value),
#' and return a string that is the rounded/formatted p-value (e.g.
#' `pvalue_fun = function(x) style_pvalue(x, digits = 2)` or equivalently,
#' `purrr::partial(style_pvalue, digits = 2)`).
#' @param tidy_fun Option to specify a particular tidier function for the
#' model. Default is to use `broom::tidy()`, but if an error occurs
#' then tidying of the model is attempted with `parameters::model_parameters()`,
#' if installed.
#' @param add_estimate_to_reference_rows add a reference value. Default is FALSE
#' @param conf.int Logical indicating whether or not to include a confidence
#' interval in the output. Defaults to `TRUE`.
#' @param ... \lifecycle{experimental}Additional arguments passed to [broom.helpers::tidy_plus_plus()].
#' See `?tidy_plus_plus_dots` for details.
#' @author Daniel D. Sjoberg
#' @seealso See tbl_regression \href{https://www.danieldsjoberg.com/gtsummary/articles/tbl_regression.html}{vignette} for detailed examples
#' @seealso Review [list, formula, and selector syntax][syntax] used throughout gtsummary
#' @family tbl_regression tools
#' @export
#' @rdname tbl_regression
#' @return A `tbl_regression` object
#' @examples
#' \donttest{
#' # Example 1 ----------------------------------
#' library(survival)
#' tbl_regression_ex1 <-
#' coxph(Surv(ttdeath, death) ~ age + marker, trial) %>%
#' tbl_regression(exponentiate = TRUE)
#'
#' # Example 2 ----------------------------------
#' tbl_regression_ex2 <-
#' glm(response ~ age + grade, trial, family = binomial(link = "logit")) %>%
#' tbl_regression(exponentiate = TRUE)
#'
#' # Example 3 ----------------------------------
#' # round all estimates to 3 decimal places
#' suppressMessages(library(lme4))
#' tbl_regression_ex3 <-
#' lmer(hp ~ am + (1 | gear), data = mtcars) %>%
#' tbl_regression(estimate_fun = function(x) style_number(x, digits = 3))
#' }
#' @section Example Output:
#' \if{html}{Example 1}
#'
#' \if{html}{\out{
#' `r man_create_image_tag(file = "tbl_regression_ex1.png", width = "64")`
#' }}
#'
#' \if{html}{Example 2}
#'
#' \if{html}{\out{
#' `r man_create_image_tag(file = "tbl_regression_ex2.png", width = "50")`
#' }}
#'
#' \if{html}{Example 3}
#'
#' \if{html}{\out{
#' `r man_create_image_tag(file = "tbl_regression_ex3.png", width = "50")`
#' }}
tbl_regression <- function(x, ...) {
UseMethod("tbl_regression")
}
#' @export
#' @rdname tbl_regression
tbl_regression.default <- function(x, label = NULL, exponentiate = FALSE,
include = everything(), show_single_row = NULL,
conf.level = NULL, intercept = FALSE,
estimate_fun = NULL, pvalue_fun = NULL,
tidy_fun = NULL,
add_estimate_to_reference_rows = FALSE,
conf.int = NULL, ...) {
# setting defaults -----------------------------------------------------------
tidy_fun <- tidy_fun %||% broom.helpers::tidy_with_broom_or_parameters
pvalue_fun <-
pvalue_fun %||%
get_theme_element("tbl_regression-arg:pvalue_fun") %||%
get_theme_element("pkgwide-fn:pvalue_fun") %||%
.get_deprecated_option("gtsummary.pvalue_fun", default = style_pvalue) %>%
gts_mapper("tbl_regression(pvalue_fun=)")
estimate_fun <-
estimate_fun %||%
get_theme_element("tbl_regression-arg:estimate_fun") %||%
.get_deprecated_option(
"gtsummary.tbl_regression.estimate_fun",
default = ifelse(exponentiate == TRUE, style_ratio, style_sigfig)
) %>%
gts_mapper("tbl_regression(estimate_fun=)")
conf.level <-
conf.level %||%
get_theme_element("tbl_regression-arg:conf.level") %||%
.get_deprecated_option("gtsummary.conf.level", default = 0.95)
conf.int <-
conf.int %||%
get_theme_element("tbl_regression-arg:conf.int", default = TRUE)
add_estimate_to_reference_rows <-
add_estimate_to_reference_rows %||%
get_theme_element("tbl_regression-arg:add_estimate_to_reference_rows", default = FALSE)
# checking estimate_fun and pvalue_fun are functions
if (!purrr::every(list(estimate_fun, pvalue_fun, tidy_fun %||% pvalue_fun), is.function)) {
stop("Inputs `estimate_fun`, `pvalue_fun`, `tidy_fun` must be functions.",
call. = FALSE
)
}
include <- rlang::enquo(include)
show_single_row <- rlang::enquo(show_single_row)
# will return call, and all object passed to in tbl_regression call
# the object func_inputs is a list of every object passed to the function
func_inputs <- as.list(environment())
table_body <-
tidy_prep(x,
tidy_fun = tidy_fun, exponentiate = exponentiate,
conf.level = conf.level, intercept = intercept,
label = label, show_single_row = !!show_single_row,
include = !!include,
add_estimate_to_reference_rows = add_estimate_to_reference_rows,
conf.int = conf.int,
...
)
# saving evaluated `label`, `show_single_row`, and `include` -----------------
func_inputs$label <-
.formula_list_to_named_list(
x = label,
var_info = table_body,
arg_name = "label",
type_check = chuck(type_check, "is_string", "fn"),
type_check_msg = chuck(type_check, "is_string", "msg")
)
func_inputs$show_single_row <-
.select_to_varnames(
select = !!show_single_row,
var_info = table_body,
arg_name = "show_single_row"
)
func_inputs$include <- unique(table_body$variable)
# adding character CI
if (all(c("conf.low", "conf.high") %in% names(table_body))) {
ci.sep <- get_theme_element("pkgwide-str:ci.sep", default = ", ")
table_body <-
table_body %>%
mutate( # adding character CI
ci = if_else(
!is.na(.data$conf.low),
paste0(estimate_fun(.data$conf.low), ci.sep, estimate_fun(.data$conf.high)),
NA_character_
)
) %>%
dplyr::relocate(any_of("ci"), .after = "conf.high")
}
# re-ordering columns
table_body <-
table_body %>%
dplyr::relocate(any_of(c("conf.low", "conf.high", "ci", "p.value")), .after = last_col())
# table of column headers
x <-
.create_gtsummary_object(table_body = table_body) %>%
purrr::list_modify(
N = pluck(table_body, "N_obs", 1),
n = pluck(table_body, "N_obs", 1), # i want to remove this eventually
N_event = pluck(table_body, "N_event", 1), model_obj = x,
inputs = func_inputs,
call_list = list(tbl_regression = match.call())
) %>%
purrr::discard(is.null)
# assigning a class of tbl_regression (for special printing in R markdown)
class(x) <- c("tbl_regression", "gtsummary")
# setting column headers, and print instructions
tidy_columns_to_report <-
get_theme_element("tbl_regression-chr:tidy_columns",
default = c("conf.low", "conf.high", "p.value")
) %>%
union("estimate") %>%
intersect(names(table_body))
# setting default table_header values
x <-
.tbl_regression_default_table_header(
x,
exponentiate = exponentiate,
tidy_columns_to_report = tidy_columns_to_report,
estimate_fun = estimate_fun,
pvalue_fun = pvalue_fun,
conf.level = conf.level
)
# adding the Ns to the `x$table_styling$header`
if (!rlang::is_empty(x[c("N", "n", "N_event")] %>% purrr::compact())) {
x$table_styling$header <-
x[c("N", "n", "N_event")] %>%
purrr::compact() %>%
as_tibble() %>%
dplyr::rename_with(.fn = ~ vec_paste0("modify_stat_", .), .cols = everything()) %>%
dplyr::cross_join(
x$table_styling$header
) %>%
dplyr::relocate(starts_with("modify_stat_"), .after = last_col())
}
# running any additional mods ------------------------------------------------
x <-
get_theme_element("tbl_regression-fn:addnl-fn-to-run", default = identity) %>%
do.call(list(x))
# return results -------------------------------------------------------------
x
}
|
/R/tbl_regression.R
|
permissive
|
ddsjoberg/gtsummary
|
R
| false
| false
| 11,048
|
r
|
#' Display regression model results in table
#'
#' This function takes a regression model object and returns a formatted table
#' that is publication-ready. The function is highly customizable
#' allowing the user to obtain a bespoke summary table of the
#' regression model results. Review the
#' \href{https://www.danieldsjoberg.com/gtsummary/articles/tbl_regression.html}{tbl_regression vignette}
#' for detailed examples.
#'
#' @section Methods:
#'
#' The default method for `tbl_regression()` model summary uses `broom::tidy(x)`
#' to perform the initial tidying of the model object. There are, however,
#' a few models that use [modifications][tbl_regression_methods].
#'
#' - `"parsnip/workflows"`: If the model was prepared using parsnip/workflows,
#' the original model fit is extracted and the original `x=` argument
#' is replaced with the model fit. This will typically go unnoticed; however,if you've
#' provided a custom tidier in `tidy_fun=` the tidier will be applied to the model
#' fit object and not the parsnip/workflows object.
#' - `"survreg"`: The scale parameter is removed, `broom::tidy(x) %>% dplyr::filter(term != "Log(scale)")`
#' - `"multinom"`: This multinomial outcome is complex, with one line per covariate per outcome (less the reference group)
#' - `"gam"`: Uses the internal tidier `tidy_gam()` to print both parametric and smooth terms.
#' - `"tidycrr"`: Uses the tidier `tidycmprsk::tidy()` to print the model terms.
#' - `"lmerMod"`, `"glmerMod"`, `"glmmTMB"`, `"glmmadmb"`, `"stanreg"`, `"brmsfit"`: These mixed effects
#' models use `broom.mixed::tidy(x, effects = "fixed")`. Specify `tidy_fun = broom.mixed::tidy`
#' to print the random components.
#'
#' @param x Regression model object
#' @param exponentiate Logical indicating whether to exponentiate the
#' coefficient estimates. Default is `FALSE`.
#' @param label List of formulas specifying variables labels,
#' e.g. `list(age ~ "Age", stage ~ "Path T Stage")`
#' @param include Variables to include in output. Input may be a vector of
#' quoted variable names, unquoted variable names, or tidyselect select helper
#' functions. Default is `everything()`.
#' @param conf.level Must be strictly greater than 0 and less than 1.
#' Defaults to 0.95, which corresponds to a 95 percent confidence interval.
#' @param intercept Logical argument indicating whether to include the intercept
#' in the output. Default is `FALSE`
#' @param show_single_row By default categorical variables are printed on
#' multiple rows. If a variable is dichotomous (e.g. Yes/No) and you wish to print
#' the regression coefficient on a single row, include the variable name(s)
#' here--quoted and unquoted variable name accepted.
#' @param estimate_fun Function to round and format coefficient estimates.
#' Default is [style_sigfig] when the coefficients are not transformed, and
#' [style_ratio] when the coefficients have been exponentiated.
#' @param pvalue_fun Function to round and format p-values.
#' Default is [style_pvalue].
#' The function must have a numeric vector input (the numeric, exact p-value),
#' and return a string that is the rounded/formatted p-value (e.g.
#' `pvalue_fun = function(x) style_pvalue(x, digits = 2)` or equivalently,
#' `purrr::partial(style_pvalue, digits = 2)`).
#' @param tidy_fun Option to specify a particular tidier function for the
#' model. Default is to use `broom::tidy()`, but if an error occurs
#' then tidying of the model is attempted with `parameters::model_parameters()`,
#' if installed.
#' @param add_estimate_to_reference_rows add a reference value. Default is FALSE
#' @param conf.int Logical indicating whether or not to include a confidence
#' interval in the output. Defaults to `TRUE`.
#' @param ... \lifecycle{experimental}Additional arguments passed to [broom.helpers::tidy_plus_plus()].
#' See `?tidy_plus_plus_dots` for details.
#' @author Daniel D. Sjoberg
#' @seealso See tbl_regression \href{https://www.danieldsjoberg.com/gtsummary/articles/tbl_regression.html}{vignette} for detailed examples
#' @seealso Review [list, formula, and selector syntax][syntax] used throughout gtsummary
#' @family tbl_regression tools
#' @export
#' @rdname tbl_regression
#' @return A `tbl_regression` object
#' @examples
#' \donttest{
#' # Example 1 ----------------------------------
#' library(survival)
#' tbl_regression_ex1 <-
#' coxph(Surv(ttdeath, death) ~ age + marker, trial) %>%
#' tbl_regression(exponentiate = TRUE)
#'
#' # Example 2 ----------------------------------
#' tbl_regression_ex2 <-
#' glm(response ~ age + grade, trial, family = binomial(link = "logit")) %>%
#' tbl_regression(exponentiate = TRUE)
#'
#' # Example 3 ----------------------------------
#' # round all estimates to 3 decimal places
#' suppressMessages(library(lme4))
#' tbl_regression_ex3 <-
#' lmer(hp ~ am + (1 | gear), data = mtcars) %>%
#' tbl_regression(estimate_fun = function(x) style_number(x, digits = 3))
#' }
#' @section Example Output:
#' \if{html}{Example 1}
#'
#' \if{html}{\out{
#' `r man_create_image_tag(file = "tbl_regression_ex1.png", width = "64")`
#' }}
#'
#' \if{html}{Example 2}
#'
#' \if{html}{\out{
#' `r man_create_image_tag(file = "tbl_regression_ex2.png", width = "50")`
#' }}
#'
#' \if{html}{Example 3}
#'
#' \if{html}{\out{
#' `r man_create_image_tag(file = "tbl_regression_ex3.png", width = "50")`
#' }}
tbl_regression <- function(x, ...) {
UseMethod("tbl_regression")
}
#' @export
#' @rdname tbl_regression
tbl_regression.default <- function(x, label = NULL, exponentiate = FALSE,
include = everything(), show_single_row = NULL,
conf.level = NULL, intercept = FALSE,
estimate_fun = NULL, pvalue_fun = NULL,
tidy_fun = NULL,
add_estimate_to_reference_rows = FALSE,
conf.int = NULL, ...) {
# setting defaults -----------------------------------------------------------
tidy_fun <- tidy_fun %||% broom.helpers::tidy_with_broom_or_parameters
pvalue_fun <-
pvalue_fun %||%
get_theme_element("tbl_regression-arg:pvalue_fun") %||%
get_theme_element("pkgwide-fn:pvalue_fun") %||%
.get_deprecated_option("gtsummary.pvalue_fun", default = style_pvalue) %>%
gts_mapper("tbl_regression(pvalue_fun=)")
estimate_fun <-
estimate_fun %||%
get_theme_element("tbl_regression-arg:estimate_fun") %||%
.get_deprecated_option(
"gtsummary.tbl_regression.estimate_fun",
default = ifelse(exponentiate == TRUE, style_ratio, style_sigfig)
) %>%
gts_mapper("tbl_regression(estimate_fun=)")
conf.level <-
conf.level %||%
get_theme_element("tbl_regression-arg:conf.level") %||%
.get_deprecated_option("gtsummary.conf.level", default = 0.95)
conf.int <-
conf.int %||%
get_theme_element("tbl_regression-arg:conf.int", default = TRUE)
add_estimate_to_reference_rows <-
add_estimate_to_reference_rows %||%
get_theme_element("tbl_regression-arg:add_estimate_to_reference_rows", default = FALSE)
# checking estimate_fun and pvalue_fun are functions
if (!purrr::every(list(estimate_fun, pvalue_fun, tidy_fun %||% pvalue_fun), is.function)) {
stop("Inputs `estimate_fun`, `pvalue_fun`, `tidy_fun` must be functions.",
call. = FALSE
)
}
include <- rlang::enquo(include)
show_single_row <- rlang::enquo(show_single_row)
# will return call, and all object passed to in tbl_regression call
# the object func_inputs is a list of every object passed to the function
func_inputs <- as.list(environment())
table_body <-
tidy_prep(x,
tidy_fun = tidy_fun, exponentiate = exponentiate,
conf.level = conf.level, intercept = intercept,
label = label, show_single_row = !!show_single_row,
include = !!include,
add_estimate_to_reference_rows = add_estimate_to_reference_rows,
conf.int = conf.int,
...
)
# saving evaluated `label`, `show_single_row`, and `include` -----------------
func_inputs$label <-
.formula_list_to_named_list(
x = label,
var_info = table_body,
arg_name = "label",
type_check = chuck(type_check, "is_string", "fn"),
type_check_msg = chuck(type_check, "is_string", "msg")
)
func_inputs$show_single_row <-
.select_to_varnames(
select = !!show_single_row,
var_info = table_body,
arg_name = "show_single_row"
)
func_inputs$include <- unique(table_body$variable)
# adding character CI
if (all(c("conf.low", "conf.high") %in% names(table_body))) {
ci.sep <- get_theme_element("pkgwide-str:ci.sep", default = ", ")
table_body <-
table_body %>%
mutate( # adding character CI
ci = if_else(
!is.na(.data$conf.low),
paste0(estimate_fun(.data$conf.low), ci.sep, estimate_fun(.data$conf.high)),
NA_character_
)
) %>%
dplyr::relocate(any_of("ci"), .after = "conf.high")
}
# re-ordering columns
table_body <-
table_body %>%
dplyr::relocate(any_of(c("conf.low", "conf.high", "ci", "p.value")), .after = last_col())
# table of column headers
x <-
.create_gtsummary_object(table_body = table_body) %>%
purrr::list_modify(
N = pluck(table_body, "N_obs", 1),
n = pluck(table_body, "N_obs", 1), # i want to remove this eventually
N_event = pluck(table_body, "N_event", 1), model_obj = x,
inputs = func_inputs,
call_list = list(tbl_regression = match.call())
) %>%
purrr::discard(is.null)
# assigning a class of tbl_regression (for special printing in R markdown)
class(x) <- c("tbl_regression", "gtsummary")
# setting column headers, and print instructions
tidy_columns_to_report <-
get_theme_element("tbl_regression-chr:tidy_columns",
default = c("conf.low", "conf.high", "p.value")
) %>%
union("estimate") %>%
intersect(names(table_body))
# setting default table_header values
x <-
.tbl_regression_default_table_header(
x,
exponentiate = exponentiate,
tidy_columns_to_report = tidy_columns_to_report,
estimate_fun = estimate_fun,
pvalue_fun = pvalue_fun,
conf.level = conf.level
)
# adding the Ns to the `x$table_styling$header`
if (!rlang::is_empty(x[c("N", "n", "N_event")] %>% purrr::compact())) {
x$table_styling$header <-
x[c("N", "n", "N_event")] %>%
purrr::compact() %>%
as_tibble() %>%
dplyr::rename_with(.fn = ~ vec_paste0("modify_stat_", .), .cols = everything()) %>%
dplyr::cross_join(
x$table_styling$header
) %>%
dplyr::relocate(starts_with("modify_stat_"), .after = last_col())
}
# running any additional mods ------------------------------------------------
x <-
get_theme_element("tbl_regression-fn:addnl-fn-to-run", default = identity) %>%
do.call(list(x))
# return results -------------------------------------------------------------
x
}
|
library(rjson)
library(jsonlite)
my_path <- "C:\\Users\\Monica\\Data_Analytics\\DA_Assignment2.2"
j_files <- list.files(path = my_path, pattern = ".json$", include.dirs = FALSE, recursive = FALSE)
myJSON <- lapply(j_files, function(x) fromJSON(x))
d_data <- data.frame(unlist(myJSON))
print(d_data)
|
/Assignment2.2_PS1.R
|
no_license
|
msam04/DA_Assignment2.2
|
R
| false
| false
| 300
|
r
|
library(rjson)
library(jsonlite)
my_path <- "C:\\Users\\Monica\\Data_Analytics\\DA_Assignment2.2"
j_files <- list.files(path = my_path, pattern = ".json$", include.dirs = FALSE, recursive = FALSE)
myJSON <- lapply(j_files, function(x) fromJSON(x))
d_data <- data.frame(unlist(myJSON))
print(d_data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.