content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# Select or linsten bird's voice on R
# @author woodiw2wopper <woodie2wopper@gmail.com>
# Copyright (C) 2019 woodiw2wopper
# version########################################################
programname <- "toriR0.83.R"; # 191006 HO 公開用にコードを整える
## ユーザ設定1|音源ファイル ##############################
path_wav <- "/Users/osaka/Desktop/toriR_demo/test_山田彩子/190917/190917_030000-040000.wav";
if( ! file.exists(path_wav) ) {stop(paste("No file:",path_wav))};
# path_wavfile <- "C:/Users/〇〇/190501_230000-0000.wav"; # Windowsの書き方
## ユーザ設定2|鳥の名前 ########################################
spices <- c(
"再生", "WAY|保存", "雑音",
"フクロウ", "トラツグミ","ヨタカ", "ホトトギス",
"ヤブサメ","ミゾゴイ", "アオサギ", "ゴイサギ"
);
## ユーザ設定3|再生パラメータの設定 #############################
volume <- 8; # play volume |再生音量
length_preplay <- 1.;# play time befor click|クリック前の再生時間
length_play <- 4.;# play time after click|クリック後の再生時間
## ユーザ設定4|ライブラリの読み込み(初回のみ) #############################
# install.packages("png", dependencies = TRUE);
#install.packages("dplyr", dependencies = TRUE);
#install.packages("stringr", dependencies = TRUE);
# R script from here ###########################################
library(png)
library(dplyr)
library(stringr)
library(tools)
## directory and files ##############
dir_dist <- dirname(path_wav)
basename_wav <- basename(path_wav)
dirname_wav <- dirname(path_wav)
filebody_wav <- tools::file_path_sans_ext(basename_wav)
setwd(dir_dist)
### parameter file laod ####
file_param <- stringr::str_replace(path_wav, ".wav", ".psgrm")
if( ! file.exists(file_param) ){
stop(paste("No file:", file_param))
}else{
load(file = file_param)
};
### output file csv) ####
csvfile_output <- stringr::str_replace(path_wav, ".wav", ".csv")
if(file.exists(csvfile_output)) {cat("ファイルに追記します:", csvfile_output)};
### Recordint date and time ####
file_info <- stringr::str_split(filebody_wav, "[_-]") %>% unlist;
date_record <- file_info[1]; # 録音日
time_start <- paste(date_record, file_info[2], sep =" ")
time_start <- strptime(time_start, format="%y%m%d %H%M%S");
time_end <- paste(date_record, file_info[3], sep = " "); # 録音終了時間
time_end <- strptime( time_end, format="%y%m%d %H%M%S"); # 録音開始時刻
### png file setting ######
pngs <- list.files(pattern = paste0(filebody_wav, "_P[0-9][0-9].png"))
num_png <- length(pngs)
### time setting of toriR ######
now <- Sys.time()
txt_kikimimi <- c(
paste0("# プログラム:programname=", programname, "\n",
"# 解析開始時間:StartTime_toriR=", now, "\n",
format(time_start,"%Y.%m.%d,%H:%M:%S,R,,\n")
)
)
cat(txt_kikimimi, file = csvfile_output, append = TRUE)
# OSによる使い分け
os <- strsplit(osVersion, " ")[[1]][1];
str_play_trim <- function(os, volume, file_song, second_start=0, length_play=5){
if (os == "Windows" ) {
res <- sprintf("sox -V0 -v %d %s -t waveaudio trim %s %d", volume, file_song, second_start, length_play)
}
if (os == "macOS"){
res <- sprintf("play -V0 -v %d %s trim %s %d", volume, file_song, second_start, length_play)
}
return(res)
}
# str_play <- function(os, volume, file_song){
# if (os == "Windows" ) {
# res <- sprintf("sox -V0 -v %d %s -t waveaudio", volume, file_song)
# }
# if (os == "macOS"){
# res <- sprintf("play -V0 -v %d %s", volume, file_song)
# }
# return (res)
# }
### windowの表示画面上の場所指定 ####
if (windows_a_page == 4){
xl <- 0.044; xr <- 0.93;
yb <- 0.805; yt <- 0.978;
dy <- 0.25;
}
### 表示の設定####
if (os == "macOS") {
par(family = "HiraKakuProN-W3")
}
if (os == "windows"){
windowsFonts(MEI = windowsFont("Meiryo"))
par(family = MEI)
}
images <- list(1:num_png)
for (num_page in 1:num_png){
images[[num_page]] <- readPNG(pngs[num_page])
}
### toriRの結果データベース####
db <- tribble(
~num_png, ~x, ~y, ~label,~col_text,
1, 0, 0, "temp", "white"
)
for (num_page in 1:num_png){
# for (num_page in 1:2){
flag_page_end <- FALSE;
pngfile_input <- pngs[num_page];
cat("R> num_page = ",num_page,"\n")
cat("R> ", pngfile_input, "\n")
cat(paste0("# スペクトログラム:spectrogram=", pngfile_input, "\n"), file = csvfile_output, append =TRUE)
num_page <- stringr::str_sub(pngfile_input, start = 23, end = 24) %>% as.numeric()
par(mar=c(0, 0, 0, 0)); #外側のマージンをゼロにする
plot(NULL
, xlim = c(0,1), ylim = c(0, 1)
, xlab = "", ylab = "", yaxt = "n", xaxt = "n",xaxs = 'i', yaxs = 'i'
)
rasterImage(images[[num_page]], 0, 0, 1, 1)
for( i in 0:(windows_a_page-1)){
rect( xl, yb - i * dy, xr, yt - i * dy, col="transparent", border = "red")
}
while (flag_page_end == FALSE){
cat("R> 声紋をクリック。無ければ範囲外(白い部分)をクリック。またはESPキー");
z <- locator(n=1);
if (is.null(z) == TRUE){# ESCが押されたなら
flag_page_end <- TRUE
break;
}
x <- z$x;
y <- z$y;
y_position <- "out_of_range"
for( k in 0:(windows_a_page - 1)) {# locator()の場所判定
if(yb - k * dy <= y && y < yt - k * dy) {y_position <- k}
}
if(y_position == "out_of_range"){break};# 範囲外選択
points(x, y, col="RED", pch = 20);
col_text <- "RED"; #抽出種名の文字の色
time_offset <- (num_page - 1) * window_time * windows_a_page + window_time * y_position
second_locator <- (time_offset + (x - xl) * (window_time)/(xr - xl))
if(second_locator < 0)(length_preplay <- 0)
txt_play <- str_play_trim(os, volume, path_wav, second_locator - length_preplay, length_play)
time_locator <- (time_offset + (x - xl) * (window_time)/(xr - xl) + time_start) %>% format("%Y.%m.%d,%H:%M:%S")
freq_locator <- (f_lcf + (y - (yb - y_position * dy)) * (f_hcf - f_lcf)/(yt - yb))
answer <- menu(spices, title="\nR> 種類を選択してください:")
# cat(paste0("Select:", spices[answer]))
while(spices[answer] == "再生") {#再生
cat (txt_play);
cat (paste0("# 再生:", txt_play, "\n"), file = csvfile_output, append = TRUE);
system(txt_play);
answer <- menu(spices, title="R> 種類を再選択してください");
cat(paste0("Reselection = ", spices[answer],"\n"))
}
if(spices[answer] == "WAY|保存") {#
time_locator_file <- (time_offset + (x - xl) * (window_time)/(xr - xl) + time_start) %>% format("%y%m%d_%H%M%S")
file_sox_save <- paste0(time_locator_file, "|WAY.wav")
txt_sox_save <- sprintf("sox -v %d %s %s trim %s %d", volume, path_wav, file_sox_save, second_locator - length_preplay, length_play)
system(txt_sox_save);
col_text <- "YELLOW"
}
if (answer == "0") {
flag_page_end <- TRUE;
break;
}else{
txt_kikimimi <- sprintf("%s,%s,F=%d[Hz]\n", time_locator , spices[answer], as.integer(freq_locator))
cat(paste0("\nkikimimi>",txt_kikimimi))
cat(txt_kikimimi, file = csvfile_output, append = TRUE)
db <- rbind(db,c(num_page, x, y, spices[answer], col_text))
text(x, y + 0.03, label = spices[answer], col= col_text); # 選択した文字列を図中に表示
}
}
if(num_page == num_png){
dev.off()
before <- now
now <- Sys.time()
txt_ending <- paste0(
format(time_end,"%Y.%m.%d,%H:%M:%S,STOP,,\n"),
"# 終了解析時間:StopTime_toriR=", now, "\n",
"# 分析時間:AnalysisTime_toriR=", format(as.numeric(difftime(now, before, unit="min")), digit=2), "min.\n"
)
cat(txt_ending, file = csvfile_output, append = TRUE)
cat(txt_ending)
}
}
##画像に抽出結果を出力する ####
answer <- menu(c("上書き", "終了"), title="\nR> 画像ファイルの処理")
if (answer == 1){
db <- db[-1,];
cat("画像ファイルに結果を出力します")
for (num_page in 1:num_png){
# for (num_page in 1:2){
pngfile_input <- pngs[num_page];
pngfile_output <- stringr::str_replace(pngfile_input, ".png", "_toriR.png");
pngfile_output <- pngfile_input;
png(pngfile_output, width = width_image, height = windows_a_page * height_image, bg="white")
par(mar=c(0, 0, 0, 0)); #外側のマージンをゼロにする
plot(NULL, xlim = c(0,1), ylim = c(0, 1), xlab = "a", ylab = "", yaxt = "n", xaxt = "n",xaxs = 'i', yaxs = 'i')
rasterImage(images[[num_page]], 0, 0, 1, 1)
text(0.9,0.99,label="toriR",col="RED")
for (ii in 1:nrow(db)){
w <- db[ii,];
if ( num_page == w$num_png ){
points(w$x, w$y, col=w$col_text, pch = 20);
text(as.numeric(w$x), as.numeric(w$y) + 0.03, label = w$label, col= w$col_text);
}
}
dev.off()
}
}
| /toriR0.83.R | permissive | marltake/toriR | R | false | false | 8,986 | r | # Select or linsten bird's voice on R
# @author woodiw2wopper <woodie2wopper@gmail.com>
# Copyright (C) 2019 woodiw2wopper
# version########################################################
programname <- "toriR0.83.R"; # 191006 HO 公開用にコードを整える
## ユーザ設定1|音源ファイル ##############################
path_wav <- "/Users/osaka/Desktop/toriR_demo/test_山田彩子/190917/190917_030000-040000.wav";
if( ! file.exists(path_wav) ) {stop(paste("No file:",path_wav))};
# path_wavfile <- "C:/Users/〇〇/190501_230000-0000.wav"; # Windowsの書き方
## ユーザ設定2|鳥の名前 ########################################
spices <- c(
"再生", "WAY|保存", "雑音",
"フクロウ", "トラツグミ","ヨタカ", "ホトトギス",
"ヤブサメ","ミゾゴイ", "アオサギ", "ゴイサギ"
);
## ユーザ設定3|再生パラメータの設定 #############################
volume <- 8; # play volume |再生音量
length_preplay <- 1.;# play time befor click|クリック前の再生時間
length_play <- 4.;# play time after click|クリック後の再生時間
## ユーザ設定4|ライブラリの読み込み(初回のみ) #############################
# install.packages("png", dependencies = TRUE);
#install.packages("dplyr", dependencies = TRUE);
#install.packages("stringr", dependencies = TRUE);
# R script from here ###########################################
library(png)
library(dplyr)
library(stringr)
library(tools)
## directory and files ##############
dir_dist <- dirname(path_wav)
basename_wav <- basename(path_wav)
dirname_wav <- dirname(path_wav)
filebody_wav <- tools::file_path_sans_ext(basename_wav)
setwd(dir_dist)
### parameter file laod ####
file_param <- stringr::str_replace(path_wav, ".wav", ".psgrm")
if( ! file.exists(file_param) ){
stop(paste("No file:", file_param))
}else{
load(file = file_param)
};
### output file csv) ####
csvfile_output <- stringr::str_replace(path_wav, ".wav", ".csv")
if(file.exists(csvfile_output)) {cat("ファイルに追記します:", csvfile_output)};
### Recordint date and time ####
file_info <- stringr::str_split(filebody_wav, "[_-]") %>% unlist;
date_record <- file_info[1]; # 録音日
time_start <- paste(date_record, file_info[2], sep =" ")
time_start <- strptime(time_start, format="%y%m%d %H%M%S");
time_end <- paste(date_record, file_info[3], sep = " "); # 録音終了時間
time_end <- strptime( time_end, format="%y%m%d %H%M%S"); # 録音開始時刻
### png file setting ######
pngs <- list.files(pattern = paste0(filebody_wav, "_P[0-9][0-9].png"))
num_png <- length(pngs)
### time setting of toriR ######
now <- Sys.time()
txt_kikimimi <- c(
paste0("# プログラム:programname=", programname, "\n",
"# 解析開始時間:StartTime_toriR=", now, "\n",
format(time_start,"%Y.%m.%d,%H:%M:%S,R,,\n")
)
)
cat(txt_kikimimi, file = csvfile_output, append = TRUE)
# OSによる使い分け
os <- strsplit(osVersion, " ")[[1]][1];
str_play_trim <- function(os, volume, file_song, second_start=0, length_play=5){
if (os == "Windows" ) {
res <- sprintf("sox -V0 -v %d %s -t waveaudio trim %s %d", volume, file_song, second_start, length_play)
}
if (os == "macOS"){
res <- sprintf("play -V0 -v %d %s trim %s %d", volume, file_song, second_start, length_play)
}
return(res)
}
# str_play <- function(os, volume, file_song){
# if (os == "Windows" ) {
# res <- sprintf("sox -V0 -v %d %s -t waveaudio", volume, file_song)
# }
# if (os == "macOS"){
# res <- sprintf("play -V0 -v %d %s", volume, file_song)
# }
# return (res)
# }
### windowの表示画面上の場所指定 ####
if (windows_a_page == 4){
xl <- 0.044; xr <- 0.93;
yb <- 0.805; yt <- 0.978;
dy <- 0.25;
}
### 表示の設定####
if (os == "macOS") {
par(family = "HiraKakuProN-W3")
}
if (os == "windows"){
windowsFonts(MEI = windowsFont("Meiryo"))
par(family = MEI)
}
images <- list(1:num_png)
for (num_page in 1:num_png){
images[[num_page]] <- readPNG(pngs[num_page])
}
### toriRの結果データベース####
db <- tribble(
~num_png, ~x, ~y, ~label,~col_text,
1, 0, 0, "temp", "white"
)
for (num_page in 1:num_png){
# for (num_page in 1:2){
flag_page_end <- FALSE;
pngfile_input <- pngs[num_page];
cat("R> num_page = ",num_page,"\n")
cat("R> ", pngfile_input, "\n")
cat(paste0("# スペクトログラム:spectrogram=", pngfile_input, "\n"), file = csvfile_output, append =TRUE)
num_page <- stringr::str_sub(pngfile_input, start = 23, end = 24) %>% as.numeric()
par(mar=c(0, 0, 0, 0)); #外側のマージンをゼロにする
plot(NULL
, xlim = c(0,1), ylim = c(0, 1)
, xlab = "", ylab = "", yaxt = "n", xaxt = "n",xaxs = 'i', yaxs = 'i'
)
rasterImage(images[[num_page]], 0, 0, 1, 1)
for( i in 0:(windows_a_page-1)){
rect( xl, yb - i * dy, xr, yt - i * dy, col="transparent", border = "red")
}
while (flag_page_end == FALSE){
cat("R> 声紋をクリック。無ければ範囲外(白い部分)をクリック。またはESPキー");
z <- locator(n=1);
if (is.null(z) == TRUE){# ESCが押されたなら
flag_page_end <- TRUE
break;
}
x <- z$x;
y <- z$y;
y_position <- "out_of_range"
for( k in 0:(windows_a_page - 1)) {# locator()の場所判定
if(yb - k * dy <= y && y < yt - k * dy) {y_position <- k}
}
if(y_position == "out_of_range"){break};# 範囲外選択
points(x, y, col="RED", pch = 20);
col_text <- "RED"; #抽出種名の文字の色
time_offset <- (num_page - 1) * window_time * windows_a_page + window_time * y_position
second_locator <- (time_offset + (x - xl) * (window_time)/(xr - xl))
if(second_locator < 0)(length_preplay <- 0)
txt_play <- str_play_trim(os, volume, path_wav, second_locator - length_preplay, length_play)
time_locator <- (time_offset + (x - xl) * (window_time)/(xr - xl) + time_start) %>% format("%Y.%m.%d,%H:%M:%S")
freq_locator <- (f_lcf + (y - (yb - y_position * dy)) * (f_hcf - f_lcf)/(yt - yb))
answer <- menu(spices, title="\nR> 種類を選択してください:")
# cat(paste0("Select:", spices[answer]))
while(spices[answer] == "再生") {#再生
cat (txt_play);
cat (paste0("# 再生:", txt_play, "\n"), file = csvfile_output, append = TRUE);
system(txt_play);
answer <- menu(spices, title="R> 種類を再選択してください");
cat(paste0("Reselection = ", spices[answer],"\n"))
}
if(spices[answer] == "WAY|保存") {#
time_locator_file <- (time_offset + (x - xl) * (window_time)/(xr - xl) + time_start) %>% format("%y%m%d_%H%M%S")
file_sox_save <- paste0(time_locator_file, "|WAY.wav")
txt_sox_save <- sprintf("sox -v %d %s %s trim %s %d", volume, path_wav, file_sox_save, second_locator - length_preplay, length_play)
system(txt_sox_save);
col_text <- "YELLOW"
}
if (answer == "0") {
flag_page_end <- TRUE;
break;
}else{
txt_kikimimi <- sprintf("%s,%s,F=%d[Hz]\n", time_locator , spices[answer], as.integer(freq_locator))
cat(paste0("\nkikimimi>",txt_kikimimi))
cat(txt_kikimimi, file = csvfile_output, append = TRUE)
db <- rbind(db,c(num_page, x, y, spices[answer], col_text))
text(x, y + 0.03, label = spices[answer], col= col_text); # 選択した文字列を図中に表示
}
}
if(num_page == num_png){
dev.off()
before <- now
now <- Sys.time()
txt_ending <- paste0(
format(time_end,"%Y.%m.%d,%H:%M:%S,STOP,,\n"),
"# 終了解析時間:StopTime_toriR=", now, "\n",
"# 分析時間:AnalysisTime_toriR=", format(as.numeric(difftime(now, before, unit="min")), digit=2), "min.\n"
)
cat(txt_ending, file = csvfile_output, append = TRUE)
cat(txt_ending)
}
}
##画像に抽出結果を出力する ####
answer <- menu(c("上書き", "終了"), title="\nR> 画像ファイルの処理")
if (answer == 1){
db <- db[-1,];
cat("画像ファイルに結果を出力します")
for (num_page in 1:num_png){
# for (num_page in 1:2){
pngfile_input <- pngs[num_page];
pngfile_output <- stringr::str_replace(pngfile_input, ".png", "_toriR.png");
pngfile_output <- pngfile_input;
png(pngfile_output, width = width_image, height = windows_a_page * height_image, bg="white")
par(mar=c(0, 0, 0, 0)); #外側のマージンをゼロにする
plot(NULL, xlim = c(0,1), ylim = c(0, 1), xlab = "a", ylab = "", yaxt = "n", xaxt = "n",xaxs = 'i', yaxs = 'i')
rasterImage(images[[num_page]], 0, 0, 1, 1)
text(0.9,0.99,label="toriR",col="RED")
for (ii in 1:nrow(db)){
w <- db[ii,];
if ( num_page == w$num_png ){
points(w$x, w$y, col=w$col_text, pch = 20);
text(as.numeric(w$x), as.numeric(w$y) + 0.03, label = w$label, col= w$col_text);
}
}
dev.off()
}
}
|
## The following two functions can be used to cache the inverse of a matrix.
## To save costly computation, if the inverse of a particular matrix has been
## calculated earlier, the second function will pick up the result from the cache.
## If not, a new calculation will run.
## This function creates a special object of matrix type that can cache its inverse
makeCacheMatrix <- function(x = matrix()) { # input a matrix
i = NULL # reset the inverse
set <- function(y) { # save the input matrix in this environment
x <<- y
i<<- NULL
}
get <- function() x # return the value of the original matrix
setinverse <- function(solve) i <<- solve # calculate the inverse first time
getinverse <- function() i # get the inverse from cache if already calculated
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse) # return all functions of this object that can be accessed
}
## This function returns the inverse of the matrix created above.
## If the inverse has already been calculated once before, then the function retrieves the result from the cache.
## Else, it will be recalculated
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | ShubhadeepSarkar/ProgrammingAssignment2 | R | false | false | 1,702 | r | ## The following two functions can be used to cache the inverse of a matrix.
## To save costly computation, if the inverse of a particular matrix has been
## calculated earlier, the second function will pick up the result from the cache.
## If not, a new calculation will run.
## This function creates a special object of matrix type that can cache its inverse
makeCacheMatrix <- function(x = matrix()) { # input a matrix
i = NULL # reset the inverse
set <- function(y) { # save the input matrix in this environment
x <<- y
i<<- NULL
}
get <- function() x # return the value of the original matrix
setinverse <- function(solve) i <<- solve # calculate the inverse first time
getinverse <- function() i # get the inverse from cache if already calculated
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse) # return all functions of this object that can be accessed
}
## This function returns the inverse of the matrix created above.
## If the inverse has already been calculated once before, then the function retrieves the result from the cache.
## Else, it will be recalculated
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
## Return a matrix that is the inverse of 'x'
}
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
ui <- fluidPage(
h1("Datacamp"),
h2("Uses cases cource"),
em("Shiny"),
strong("is fun"),
# Show a plot of the generated distribution
sidebarLayout(
sidebarPanel(
h4("Plot parameters"),
textInput("title", "Plot title", "Car speed vs distance to stop"),
numericInput("num", "Number of cars to show", 30, 1, nrow(cars)),
sliderInput("size", "Point size", 1, 5, 2, 0.5)
),
mainPanel(
plotOutput("plot"),
tableOutput("table")
)
)
)
# Define the server logic
server <- function(input, output) {
output$plot <- renderPlot({
plot(cars[1:input$num, ], main = input$title, cex = input$size)
})
output$table <- renderTable({
cars[1:input$num, ]
})
}
# Run the application
shinyApp(ui = ui, server = server)
| /SingleFileShiny/app.R | no_license | servassheldon/myfirstapp | R | false | false | 1,030 | r | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
ui <- fluidPage(
h1("Datacamp"),
h2("Uses cases cource"),
em("Shiny"),
strong("is fun"),
# Show a plot of the generated distribution
sidebarLayout(
sidebarPanel(
h4("Plot parameters"),
textInput("title", "Plot title", "Car speed vs distance to stop"),
numericInput("num", "Number of cars to show", 30, 1, nrow(cars)),
sliderInput("size", "Point size", 1, 5, 2, 0.5)
),
mainPanel(
plotOutput("plot"),
tableOutput("table")
)
)
)
# Define the server logic
server <- function(input, output) {
output$plot <- renderPlot({
plot(cars[1:input$num, ], main = input$title, cex = input$size)
})
output$table <- renderTable({
cars[1:input$num, ]
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
#' ---
#' title: "Factorial ANOVA"
#' author: "Veronique Storme"
#' date: "17/04/2019"
#' ---
#'
#' R Code for Chapter 12 of:
#'
#' Field, A. P., Miles, J. N. V., & Field, Z. C. (2012). Discovering Statistics Using R: and Sex and Drugs and Rock 'N' Roll. #London Sage
#'
#' (c) 2011 Andy P. Field, Jeremy N. V. Miles & Zoe C. Field
#'
#' with adaptations from Veronique Storme
#'
#' and additions of http://rcompanion.org/handbook/
#'
## Set the working directory
## ------------------------------------------------------------------------
setwd("C:/Users/vesto/Documents/myCOURSES/basic-stat-R/april2019/data")
## Install and Load Packages
#install.packages("car")
#install.packages("ggplot2")
#install.packages("mosaic")
#install.packages("plyr")
#install.packages("dplyr")
#install.packages("lsmeans")
library(car)
library(ggplot2)
library(mosaic)
library(plyr)
library(dplyr)
library(emmeans)
###################
## two-way ANOVA ##
###################
## import the goggles data
##--------------------------
gogglesData <- read.csv("goggles.csv", header = TRUE)
head(gogglesData)
## data exploration
##--------------------------
### retrieve the levels of the factors
### Note: if categorical variables are coded numerically, you should set the categorical var as factors
levels(gogglesData$alcohol)
levels(gogglesData$gender)
### reset the order of the levels of alcohol and gender
gogglesData$alcohol <- factor(gogglesData$alcohol, levels = c("None", "2 Pints", "4 Pints"))
levels(gogglesData$alcohol)
gogglesData$gender <- factor(gogglesData$gender, levels = c("Male", "Female"))
levels(gogglesData$gender)
### frequency table
table(gogglesData$gender, gogglesData$alcohol)
### descriptives
summ <- ddply(gogglesData, .(gender, alcohol), summarise,
Nobs = sum(!is.na(attractiveness)),
Nmiss = sum(is.na(attractiveness)),
mean = mean(attractiveness, na.rm=TRUE),
sd = sd(attractiveness, na.rm=TRUE),
se = sd/sqrt(Nobs),
t = qt(0.975, Nobs-1),
lower = mean - t*se,
upper = mean + t*se)
summ
## graphical display
##------------------
### boxplots by gender and alcohol consumption
plot <- ggplot(gogglesData, aes(alcohol, attractiveness))
plot + geom_boxplot() +
facet_wrap(~gender) +
labs(x = "Alcohol Consumption", y = "Mean Attractiveness of Date (%)")
### bar charts by gender and alcohol consumption
plot +
stat_summary(fun.data = mean_cl_normal, geom = "errorbar", position=position_dodge(width=0.90), width = 0.2) +
stat_summary(fun.y = mean, geom = "bar", position="dodge") +
facet_wrap(~gender) +
labs(x = "Alcohol Consumption", y = "Mean Attractiveness of Date (%)")
### interaction plot
ggplot(summ, aes(x=alcohol, y=mean, group=gender, color=gender)) +
geom_errorbar(aes(ymin=lower, ymax=upper), width=.1,
position=position_dodge(0.05)) +
geom_line() +
geom_point() +
labs(x = "Alcohol Consumption", y = "Mean Attractiveness of Date (%)") +
scale_color_brewer(palette="Paired")
### the lack of parallelism between lines reveals how one factor
### changes the effect of the other one.
## analysis with a linear model
##------------------------------
### to illustrate the analysis with a linear model,
### we keep only the None and 4 Pints group
### subset the data
subset = filter(gogglesData, alcohol!="2 Pints")
### reset the factor levels
subset = droplevels(subset)
levels(subset$alcohol)
model.lm = lm(attractiveness ~ gender*alcohol,data=subset)
summary(model.lm)
Anova(model.lm, type="III")
### testing the assumptions
par(mfcol=c(1,2))
plot(model.lm,1:2)
par(mfcol=c(1,1))
### see slides for the interpretation
## For 2 factors with 2 levels each, the interaction effect is given by b3
## or:
### user defined contrasts in function of the lsmeans
lsm.lm <- emmeans (model.lm, ~ gender * alcohol)
### get the order of the groups
lsm.lm
### the order is:
### M0 F0 M4 F4
### c3: comparing differences (=interaction)
### example: compare Female-Male between 4 pints and None
### H0: (F4-F0 = M4-M0) or (F4-F0)-(M4-M0)=0 or F4-F0-M4+M0=0
contrast(lsm.lm,
list(c3=c(1, -1, -1, 1)),
adjust="none",
by = NULL)
## two-way anova on complete data set
##------------------------------------
fit.int <- lm(attractiveness ~ gender + alcohol + gender:alcohol, data = gogglesData)
summary(fit.int)
Anova(fit.int, type="III")
### The F-test for the two-way interaction term tests for:
### H0: all group means are equal (there are 6 groups)
### HA: at least one of the group means is not equal
### The F-test indicates that there is enough evidence to reject H0 (p<0.05)
### Another way to test for significance is by performing an extra sum-of-squares F test to compare the interaction model with the main effect model (i.e. it tests whether reduction in the residual sum of squares are statistically significant or not).
### Note that this makes sense only if the two models are nested models.
### Note also that the comparison between two or more models will only be valid if they are fitted to the same dataset. This may be a problem if there are missing values and R's default of na.action = na.omit is used.
### the function call is: anova(reduced model, full model)
fit.main = lm(attractiveness ~ gender + alcohol, data = gogglesData)
anova(fit.main,fit.int)
### testing the assumptions
par(mfcol=c(1,2))
plot(fit.int,1:2)
par(mfcol=c(1,1))
### getting the least-square means estimates
### population average predicted values according to the model
### This equals the average values in a balanced situation
lsm <- emmeans (fit.int, ~ gender * alcohol)
lsm
plot(lsm)
lsmip(lsm, gender ~ alcohol, ylab = "Observed attractiveness")
### post-hoc tests
#### test all pairwise comparisons:
C1 = contrast(lsm, method="pairwise", adjust="tukey")
plot(C1)
confint(C1)
### simple tests of effects:
### returns partial F-tests
### evaluate contrasts across the levels of one factor while the values
### of the other interaction factor is kept fixed at certain levels
### example:
### H01: for gender=M: none=2pints=4pints
### H02: for gender=F: none=2pints=4pints
diffs.alc = lsmeans(fit.int, pairwise ~ alcohol | gender,glhargs=list())
diffs.alc[[2]]
confint(diffs.alc[[2]])
plot(diffs.alc[[2]])
diffs.g = lsmeans(fit.int, pairwise ~ gender | alcohol, glhargs=list())
diffs.g[[2]]
confint(diffs.g[[2]])
plot(diffs.g[[2]])
### user defined contrasts in function of the lsmeans
### c1: difference between alcohol=4pints and alcohol=2pints for gender=F
### c2: difference between alcohol=2pints and alcohol=placebo for gender=F
### H01: F4 - F2=0
### H02: F2 - F0 = 0
### get the order of the groups
lsm
### the order is:
### M0 F0 M2 F2 M4 F4
contrast(lsm,
list(c1=c(0, 0, 0, -1, 0, 1), c2=c(0, -1, 0, 1, 0, 0)),
by = NULL)
### c3: comparing differences (=interaction)
### example: compare Female-Male between 4 pints and None
### H0: (F4-F0 = M4-M0) or (F4-F0)-(M4-M0)=0 or F4-F0-M4+M0=0
contrast(lsm,
list(c1=c(0, -1, 0, 0, 0, 1),
c2=c(-1, 0, 0, 0, 1, 0),
c3=c(1, -1, 0, 0, -1, 1)),
adjust="sidak",
by = NULL)
| /demo/Ch12_glm2.R | no_license | vstorme/basicstat | R | false | false | 7,438 | r | #' ---
#' title: "Factorial ANOVA"
#' author: "Veronique Storme"
#' date: "17/04/2019"
#' ---
#'
#' R Code for Chapter 12 of:
#'
#' Field, A. P., Miles, J. N. V., & Field, Z. C. (2012). Discovering Statistics Using R: and Sex and Drugs and Rock 'N' Roll. #London Sage
#'
#' (c) 2011 Andy P. Field, Jeremy N. V. Miles & Zoe C. Field
#'
#' with adaptations from Veronique Storme
#'
#' and additions of http://rcompanion.org/handbook/
#'
## Set the working directory
## ------------------------------------------------------------------------
setwd("C:/Users/vesto/Documents/myCOURSES/basic-stat-R/april2019/data")
## Install and Load Packages
#install.packages("car")
#install.packages("ggplot2")
#install.packages("mosaic")
#install.packages("plyr")
#install.packages("dplyr")
#install.packages("lsmeans")
library(car)
library(ggplot2)
library(mosaic)
library(plyr)
library(dplyr)
library(emmeans)
###################
## two-way ANOVA ##
###################
## import the goggles data
##--------------------------
gogglesData <- read.csv("goggles.csv", header = TRUE)
head(gogglesData)
## data exploration
##--------------------------
### retrieve the levels of the factors
### Note: if categorical variables are coded numerically, you should set the categorical var as factors
levels(gogglesData$alcohol)
levels(gogglesData$gender)
### reset the order of the levels of alcohol and gender
gogglesData$alcohol <- factor(gogglesData$alcohol, levels = c("None", "2 Pints", "4 Pints"))
levels(gogglesData$alcohol)
gogglesData$gender <- factor(gogglesData$gender, levels = c("Male", "Female"))
levels(gogglesData$gender)
### frequency table
table(gogglesData$gender, gogglesData$alcohol)
### descriptives
summ <- ddply(gogglesData, .(gender, alcohol), summarise,
Nobs = sum(!is.na(attractiveness)),
Nmiss = sum(is.na(attractiveness)),
mean = mean(attractiveness, na.rm=TRUE),
sd = sd(attractiveness, na.rm=TRUE),
se = sd/sqrt(Nobs),
t = qt(0.975, Nobs-1),
lower = mean - t*se,
upper = mean + t*se)
summ
## graphical display
##------------------
### boxplots by gender and alcohol consumption
plot <- ggplot(gogglesData, aes(alcohol, attractiveness))
plot + geom_boxplot() +
facet_wrap(~gender) +
labs(x = "Alcohol Consumption", y = "Mean Attractiveness of Date (%)")
### bar charts by gender and alcohol consumption
plot +
stat_summary(fun.data = mean_cl_normal, geom = "errorbar", position=position_dodge(width=0.90), width = 0.2) +
stat_summary(fun.y = mean, geom = "bar", position="dodge") +
facet_wrap(~gender) +
labs(x = "Alcohol Consumption", y = "Mean Attractiveness of Date (%)")
### interaction plot
ggplot(summ, aes(x=alcohol, y=mean, group=gender, color=gender)) +
geom_errorbar(aes(ymin=lower, ymax=upper), width=.1,
position=position_dodge(0.05)) +
geom_line() +
geom_point() +
labs(x = "Alcohol Consumption", y = "Mean Attractiveness of Date (%)") +
scale_color_brewer(palette="Paired")
### the lack of parallelism between lines reveals how one factor
### changes the effect of the other one.
## analysis with a linear model
##------------------------------
### to illustrate the analysis with a linear model,
### we keep only the None and 4 Pints group
### subset the data
subset = filter(gogglesData, alcohol!="2 Pints")
### reset the factor levels
subset = droplevels(subset)
levels(subset$alcohol)
model.lm = lm(attractiveness ~ gender*alcohol,data=subset)
summary(model.lm)
Anova(model.lm, type="III")
### testing the assumptions
par(mfcol=c(1,2))
plot(model.lm,1:2)
par(mfcol=c(1,1))
### see slides for the interpretation
## For 2 factors with 2 levels each, the interaction effect is given by b3
## or:
### user defined contrasts in function of the lsmeans
lsm.lm <- emmeans (model.lm, ~ gender * alcohol)
### get the order of the groups
lsm.lm
### the order is:
### M0 F0 M4 F4
### c3: comparing differences (=interaction)
### example: compare Female-Male between 4 pints and None
### H0: (F4-F0 = M4-M0) or (F4-F0)-(M4-M0)=0 or F4-F0-M4+M0=0
contrast(lsm.lm,
list(c3=c(1, -1, -1, 1)),
adjust="none",
by = NULL)
## two-way anova on complete data set
##------------------------------------
fit.int <- lm(attractiveness ~ gender + alcohol + gender:alcohol, data = gogglesData)
summary(fit.int)
Anova(fit.int, type="III")
### The F-test for the two-way interaction term tests for:
### H0: all group means are equal (there are 6 groups)
### HA: at least one of the group means is not equal
### The F-test indicates that there is enough evidence to reject H0 (p<0.05)
### Another way to test for significance is by performing an extra sum-of-squares F test to compare the interaction model with the main effect model (i.e. it tests whether reduction in the residual sum of squares are statistically significant or not).
### Note that this makes sense only if the two models are nested models.
### Note also that the comparison between two or more models will only be valid if they are fitted to the same dataset. This may be a problem if there are missing values and R's default of na.action = na.omit is used.
### the function call is: anova(reduced model, full model)
fit.main = lm(attractiveness ~ gender + alcohol, data = gogglesData)
anova(fit.main,fit.int)
### testing the assumptions
par(mfcol=c(1,2))
plot(fit.int,1:2)
par(mfcol=c(1,1))
### getting the least-square means estimates
### population average predicted values according to the model
### This equals the average values in a balanced situation
lsm <- emmeans (fit.int, ~ gender * alcohol)
lsm
plot(lsm)
lsmip(lsm, gender ~ alcohol, ylab = "Observed attractiveness")
### post-hoc tests
#### test all pairwise comparisons:
C1 = contrast(lsm, method="pairwise", adjust="tukey")
plot(C1)
confint(C1)
### simple tests of effects:
### returns partial F-tests
### evaluate contrasts across the levels of one factor while the values
### of the other interaction factor is kept fixed at certain levels
### example:
### H01: for gender=M: none=2pints=4pints
### H02: for gender=F: none=2pints=4pints
diffs.alc = lsmeans(fit.int, pairwise ~ alcohol | gender,glhargs=list())
diffs.alc[[2]]
confint(diffs.alc[[2]])
plot(diffs.alc[[2]])
diffs.g = lsmeans(fit.int, pairwise ~ gender | alcohol, glhargs=list())
diffs.g[[2]]
confint(diffs.g[[2]])
plot(diffs.g[[2]])
### user defined contrasts in function of the lsmeans
### c1: difference between alcohol=4pints and alcohol=2pints for gender=F
### c2: difference between alcohol=2pints and alcohol=placebo for gender=F
### H01: F4 - F2=0
### H02: F2 - F0 = 0
### get the order of the groups
lsm
### the order is:
### M0 F0 M2 F2 M4 F4
contrast(lsm,
list(c1=c(0, 0, 0, -1, 0, 1), c2=c(0, -1, 0, 1, 0, 0)),
by = NULL)
### c3: comparing differences (=interaction)
### example: compare Female-Male between 4 pints and None
### H0: (F4-F0 = M4-M0) or (F4-F0)-(M4-M0)=0 or F4-F0-M4+M0=0
contrast(lsm,
list(c1=c(0, -1, 0, 0, 0, 1),
c2=c(-1, 0, 0, 0, 1, 0),
c3=c(1, -1, 0, 0, -1, 1)),
adjust="sidak",
by = NULL)
|
#' @title Log Likelihood for Fitting Cdfquantile Distributions
#' @aliases qrLogLik
#' @description Function to give the (negative) log likelihood for fitting cdfquantile distributions.
#'
#' @param y the vector to be evaluated.
#' @param mu mean of the distribution.
#' @param sigma sigma of the distribution.
#' @param fd A string that specifies the parent distribution.
#' @param sd A string that specifies the sub-family distribution.
#' @param total whether the sum of logliklihood is calculated
#'
#' @return The negative log likelihood for fitting the data with a cdfquantile distribution.
#'
#' @export
#' @import pracma
#' @examples
#' y <- rbeta(20, 0.5, 0.5)
#' qrLogLik(y, mu = 0.5, sigma = 1, 't2','t2')
#'
qrLogLik <- function(y, mu, sigma, fd, sd, total = TRUE) {
# arcsinh-XX-----
if (fd == "arcsinh") {
## arcsinh-arcsinh----
if (sd == "arcsinh") {
loglik <- asinh(((1 - 2 * y)/(2 * (-1 + y) * y) - mu)/sigma) -
2 * log(1 + exp(asinh(((1 - 2 * y)/(2 * (-1 + y) * y) - mu)/sigma))) -
log(1 - y) - log(y) + log(1 - 2 * y + 2 * y^2) - (1/2) *
log(1 + 4 * y * (-1 + mu) + 4 * y^4 * (mu^2 + sigma^2) +
4 * y^2 * (1 -3 * mu + mu^2 + sigma^2) - 8 * y^3 * (-mu + mu^2 + sigma^2))
}
# arcsinh-Cauchy----
if (sd == "cauchy") {
loglik <- log(pi) + asinh((mu + cot(pi * y))/sigma) -
2 * log(1 + exp(asinh((mu + cot(pi * y))/sigma))) -
(1/2) * log(mu^2 + sigma^2 + 2 * mu * cot(pi * y) + cot(pi * y)^2) +
2 * log(csc(pi * y))
}
# arcsinh-t2----
if (sd == "t2") {
a1 <- a2 <- y
for (i in 1:length(y)) {
c1 <- ((y[i] >= 0) & (y[i] < 1/2)) #1st situation
c2 <- (y[i] >= 1/2 & y[i] <= 1) #2ed situation
if (c1) {
a1[i] <- -sqrt((1 - 2 * y[i])^2/(2 * (1 - y[i]) * y[i]))
} else if (c2) {
a1[i] <- sqrt((1 - 2 * y[i])^2/(2 * (1 - y[i]) * y[i]))
} else {
a1[i] <- 0
}
c3 <- (y[i] > 0 & y[i] < 1/2) #3rd situation
c4 <- (y[i] > 1/2 & y[i] < 1) #4th situation
c5 <- (y[i] == 1 | y[i] == 0)
if (y[i] == 0.5) {
a2[i] <- 2 * sqrt(2)
} else if (c3) {
a2[i] <- (1 - 2 * y[i])/sqrt(8 * (1 - 2 * y[i])^2 * ((1 - y[i]) * y[i])^3)
} else if (c4) {
a2[i] <- (2 * y[i] - 1)/sqrt(8 * (1 - 2 * y[i])^2 * ((1 - y[i]) * y[i])^3)
} else if (c5) {
a2[i] <- NA # NO IDEA
} else {
a2[i] <- 0
}
}
lik <- (exp(asinh((-mu + a1)/sigma)) * a2)/((1 + exp(asinh((-mu + a1)/sigma)))^2 * sigma *
sqrt(1 + (mu - a1)^2/sigma^2))
loglik <- log(lik)
}
# arcsinh-burr7----
if (sd == "burr7") {
loglik <- -log(2) - log(sigma) + asinh((mu + atanh(1 - 2 * y))/sigma) -
2 * log(1 + exp(asinh((mu + atanh(1 - 2 * y))/sigma))) -
(1/2) * log(1 + (mu + atanh(1 - 2 * y))^2/sigma^2) - log(1 - y) - log(y)
}
# arcsinh-burr8----
if (sd == "burr8") {
loglik <- log(pi) - log(sigma) + asinh((-mu + log(tan((pi * y)/2)))/sigma) -
2 * log(1 + exp(asinh((-mu + log(tan((pi * y)/2)))/sigma))) +
log(csc(pi * y)) - (1/2) * log(1 +(mu - log(tan((pi * y)/2)))^2/sigma^2)
}
# arcsinh-logistic----
if (sd == "logistic") {
loglik <- -log(sigma) + asinh((-mu - log(1 - y) + log(y))/sigma) -
2 * log(1 + exp(asinh((-mu - log(1 - y) + log(y))/sigma))) -
(1/2) * log(1 + (mu + log(1 - y) - log(y))^2/sigma^2) - log(1 - y) - log(y)
}
}
# burr7-XX-----
if (fd =="burr7"){
# burr7-arcsinh----
if (sd == 'arcsinh'){
loglik <- -log(4) - log(sigma) + 2 * log(sech((-mu - (1 - 2 * y)/(2 * (1 - y) * y))/sigma)) -
2 * log(1 - y) - 2 * log(y) + log(1 - 2 * (1 - y) * y)
}
# burr7-burr7----
if (sd == 'burr7'){
loglik <- 2 * log(sech((mu + atanh(1 - 2 * y))/sigma)) - log(4 * sigma * y - 4 * sigma *
y^2)
}
# burr7-burr8----
if (sd == 'burr8'){
loglik <- Re((1/(2 * sigma)) * (-2 * (0 + 1i) * pi + 4 * mu + (0 + 1i) * pi * sigma + sigma *
log(16) + 2 * sigma * log(pi) - 2 * sigma * log(sigma) + 4 * log(-1 + exp((0 + 1i) *
pi * y)) - 4 * log(1 + exp((0 + 1i) * pi * y)) - 2 * sigma * log(-1 + exp(2 * (0 + 1i) *
pi * y)) - 4 * sigma * log(exp((2 * mu)/sigma) + ((-(0 + 1i))^(2/sigma) * (-1 + exp((0 +
1) * pi * y))^(2/sigma))/(1 + exp((0 + 1i) * pi * y))^(2/sigma)) + 2 * (0 + 1i) * pi *
sigma * y))
}
# burr7-cauchy----
if (sd == 'cauchy'){
loglik <- -log((2 * sigma)/pi) + 2 * log(csc(pi * y)) + 2 * log(sech((mu + cot(pi * y))/sigma))
}
# burr7-logistic----
if (sd == 'logistic'){
loglik <- -log(2) - log(sigma) + 2 * log(sech((-mu - log(1 - y) + log(y))/sigma)) - log(1 -
y) - log(y)
}
# burr7-t2----
if (sd == 't2'){
a1 <- y
for (i in 1:length(y)) {
c1 <- ((y[i] >= 0) & (y[i] < 1)) #1st situation
c2 <- (y[i] == 0 & y[i] == 1) #2ed situation
if (c1) {
a1[i] <- sech(((-1 + 2 * y[i])/(sqrt(2) * sqrt((-(-1 + y[i])) * y[i])) - mu[i])/sigma[i])^2/(4 *
sqrt(2) * ((-(-1 + y[i])) * y[i])^(3/2) * sigma[i])
} else if (c2) {
a1[i] <- NA
} else {
a1[i] <- 0
}
}
loglik <- log(a1)
}
}
#burr8-XX-----
if (fd == "burr8") {
# burr8 arcsinh-----
if (sd == 'arcsinh'){
loglik <- (-(1/(2 * sigma))) * (2 * sigma * log(pi) + 2 * sigma * log(sigma) + 2 * sigma *
log(exp((2 * (mu - 1/(1 - y)))/sigma) + exp(1/(sigma * (-1 + y) * y))) + 4 * sigma *
log(1 - y) + 4 * sigma * log(y) - 2 * sigma * log(1 - 2 * (1 - y) * y) + 2 * (1/(1 -
y)) - 2 * mu * (1/(1 - y)) + 1/((1 - y) * y) + 2 * mu * (y/(1 - y)))
}
# burr8 Cauchy-----
if (sd == "cauchy") {
loglik <- (mu + sigma * log(2) - sigma * log(sigma) + cot(pi * y) -
sigma * log(1 + exp((2 * (mu + cot(pi * y)))/sigma)) +
2 * sigma * log(csc(pi * y)))/sigma
}
# burr8-logistic----
if (sd == 'logistic'){
loglik <- -((-mu - sigma * log(2) + sigma * log(pi) + sigma * log(sigma) + (1 + sigma) *
log(1 - y) + (-1 + sigma) * log(y) + sigma * log(exp((2 * mu)/sigma) + y^(2/sigma)/(1 -
y)^(2/sigma)))/sigma)
}
# burr8 t2-----
if (sd == "t2") {
a1 <- a2 <- y
for (i in 1:length(y)) {
c1 <- ((y[i] >= 0) & (y[i] < 1)) #1st situation
c2 <- (y[i] == 0 & y[i] == 1) #2ed situation
if (c1) {
a1[i] <- sech(((1 - 2 * y[i])/(sqrt(2) * sqrt((-(-1 + y[i])) * y[i])) + mu[i])/sigma[i])/(2 * sqrt(2) * pi * ((-(-1 + y[i])) * y[i])^(3/2) * sigma[i])
# sech((mu[i] + (1 - 2*y[i])/(sqrt(2)*sqrt(1 - y[i])*sqrt(y[i])))/sigma[i])/
# (2*sqrt(2)*pi*sigma[i]*(1 - y[i])^(3/2)*y[i]^(3/2))
} else if (c2) {
a1[i] <- NA
} else {
a1[i] <- 0
}
}
loglik <- log(a1)
}
# burr8 burr7-----
if (sd == "burr7") {
loglik <- -log(2 * pi * y * sigma - 2 * pi * y^2 * sigma) +
log(sech((mu + atanh(1 - 2 * y))/sigma))
}
# burr8 burr8-----
if (sd == "burr8") {
loglik <- (mu + sigma * log(2) + sigma * log(csc(pi * y)) + log(tan(pi * y/2)) - sigma *
log(exp((2 * mu)/sigma) * sigma + sigma * tan((pi * y)/2)^(2/sigma)))/sigma
}
}
#Cauchit-XX-----
if (fd == "cauchit") {
# cauchit-arcsinh-----
if (sd == "arcsinh") {
loglik <- log((2 * sigma)/pi) + log(1 + 2 * (-1 + y) * y) - log(4 * sigma^2 * (-1 + y)^2 *
y^2 + (-1 + 2 * y + 2 * mu * (-1 + y) * y)^2)
}
# cauchit-burr7-----
if (sd == "burr7") {
loglik <- -log(2) - log(pi) + log(sigma) - log(mu^2 + sigma^2 + 2 * mu * atanh(1 - 2 *
y) + atanh(1 - 2 * y)^2) - log(1 - y) - log(y)
}
# cauchit-burr8-----
if (sd == "burr8") {
loglik <- log(sigma) + log(csc(pi * y)) - log(mu^2 + sigma^2 - 2 * mu * log(tan((pi * y)/2)) +
log(tan((pi * y)/2))^2)
}
# cauchit-Cauchy-----
if (sd == "cauchy") {
loglik <- log(sigma) - log(mu^2 + sigma^2 + 2 * mu * cot(pi * y) + cot(pi * y)^2) + 2 *
log(csc(pi * y))
}
# cauchit-Logistic-----
if (sd == "logistic") {
loglik <- log(sigma) - log(mu^2 + sigma^2 - 2 * mu * (-log(1 - y) + log(y)) + (-log(1 -
y) + log(y))^2) - log(1 - y) - log(y)
}
# cauchit-t2----
if (sd == 't2'){
a1 <- a2 <- y
for (i in 1:length(y)) {
c1 <- ((y[i] >= 0) & (y[i] < 1/2)) #1st situation
c2 <- (y[i] >= 1/2 & y[i] <= 1) #2ed situation
if (c1) {
a1[i] <- -sqrt((1 - 2 * y[i])^2/(2 * (1 - y[i]) * y[i]))
} else if (c2) {
a1[i] <- sqrt((1 - 2 * y[i])^2/(2 * (1 - y[i]) * y[i]))
} else {
a1[i] <- 0
}
c3 <- (y[i] > 0 & y[i] < 1/2) #3rd situation
c4 <- (y[i] > 1/2 & y[i] < 1) #4th situation
c5 <- (y[i] == 1 | y[i] == 0)
if (y[i] == 0.5) {
a2[i] <- 2 * sqrt(2)
} else if (c3) {
a2[i] <- (1 - 2 * y[i])/sqrt(8 * (1 - 2 * y[i])^2 * ((1 - y[i]) * y[i])^3)
} else if (c4) {
a2[i] <- (2 * y[i] - 1)/sqrt(8 * (1 - 2 * y[i])^2 * ((1 - y[i]) * y[i])^3)
} else if (c5) {
a2[i] <- NA # NO IDEA
} else {
a2[i] <- 0
}
}
lik <- a2/(pi * sigma * (1 + (mu - a1)^2/sigma^2))
loglik <- log(lik)
}
}
# logit-XX-----
if (fd == "logit") {
# logit-arcsinh-----
if (sd == "arcsinh") {
loglik <- -log(2) - log(sigma) - 2 * log(exp(1/(sigma - sigma * y)) + exp(mu/sigma + 1/(2 *
sigma * y - 2 * sigma * y^2))) - 2 * log(1 - y) - 2 * log(y) + log(1 - 2 * y + 2 *
y^2) + 1/(2 * sigma * y - 2 * sigma * y^2) + 2 * (y/(2 * sigma * y - 2 * sigma * y^2)) +
2 * mu * (y/(2 * sigma * y - 2 * sigma * y^2)) - 2 * mu * (y^2/(2 * sigma * y - 2 *
sigma * y^2))
}
# logit-burr7-----
if (sd == "burr7") {
loglik <- 2 * log(sech((mu + atanh(1 - 2 * y))/(2 * sigma))) -
log(8 * sigma * y - 8 * sigma * y^2)
}
# logit-burr8-----
if (sd == "burr8") {
loglik <-log(csc(pi * y)) + ((mu + sigma * log(pi) - sigma * log(sigma))
+ log(tan((pi * y)/2)) - 2 * sigma * log(exp(mu/sigma) +
tan((pi * y)/2)^(1/sigma)))/sigma
}
# logit-Cauchy-----
if (sd == "cauchy") {
loglik <- (mu + sigma * log(pi) - sigma * log(sigma) + cot(pi * y) -
2 * sigma * log(1 + exp((mu + cot(pi * y))/sigma)) +
2 * sigma * log(csc(pi * y)))/sigma
}
# logit-logistic-----
if (sd == "logistic") {
loglik <- ((mu - sigma * log(sigma)) - (-1 + sigma) * log(-1 + 1/y) -
2 * sigma * log(y + exp(mu/sigma) * (-1 + 1/y)^(1/sigma) * y))/sigma
}
# logit-t2-----
if (sd == "t2") {
a1 <- a2 <- y
for (i in 1:length(y)) {
c1 <- ((y[i] >= 0) & (y[i] < 1/2)) #1st situation
c2 <- (y[i] >= 1/2 & y[i] <= 1) #2ed situation
if (c1) {
a1[i] <- -sqrt((1 - 2 * y[i])^2/(2 * (1 - y[i]) * y[i]))
} else if (c2) {
a1[i] <- sqrt((1 - 2 * y[i])^2/(2 * (1 - y[i]) * y[i]))
} else {
a1[i] <- 0
}
c3 <- (y[i] > 0 & y[i] < 1/2) #3rd situation
c4 <- (y[i] > 1/2 & y[i] < 1) #4th situation
c5 <- (y[i] == 1 | y[i] == 0)
if (y[i] == 0.5) {
a2[i] <- 2 * sqrt(2)
} else if (c3) {
a2[i] <- (1 - 2 * y[i])/sqrt(8 * (1 - 2 * y[i])^2 * ((1 - y[i]) * y[i])^3)
} else if (c4) {
a2[i] <- (2 * y[i] - 1)/sqrt(8 * (1 - 2 * y[i])^2 * ((1 - y[i]) * y[i])^3)
} else if (c5) {
a2[i] <- NA # NO IDEA
} else {
a2[i] <- 0
}
}
lik <- (exp((mu + a1)/sigma) * a2)/((exp(mu/sigma) + exp(a1/sigma))^2 * sigma)
loglik <- log(lik)
}
}
# t2-XX-----
if (fd == "t2") {
# t2-ArcSinh-----
if (sd == "arcsinh") {
loglik <- log(4) + 2 * log(sigma) + 3 * log(1 - y) + log(y) - (3/2) * log(1 + 4 * (-1 +
mu) * y + 4 * (1 - 3 * mu + mu^2 + 2 * sigma^2) * y^2 - 8 * (-mu + mu^2 + 2 * sigma^2) *
y^3 + 4 * (mu^2 + 2 * sigma^2) * y^4)
}
# t2-burr7-----
if (sd == "burr7") {
loglik <- -log(2) + 2 * log(sigma) - (3/2) * log(mu^2 + 2 * sigma^2 +
2 * mu * atanh(1 - 2 * y) + atanh(1 - 2 * y)^2) - log(1 - y) - log(y)
}
# t2-burr8
if (sd == "burr8") {
loglik <- (log(pi) + 2 * log(sigma)) + log(csc(pi * y)) -
(3/2) * log(mu^2 + 2 * sigma^2 - 2 * mu * log(tan((pi * y)/2)) + log(tan((pi * y)/2))^2)
}
# t2-Cauchy-----
if (sd == "cauchy") {
loglik <- log(pi) + log(sigma) - log(mu^2 + 2 * sigma^2 + 2 * mu * cot(pi * y)
+ cot(pi * y)^2) - (1/2) * log(2 + (mu + cot(pi * y))^2/sigma^2) + 2 * log(csc(pi * y))
}
# t2-t2-----
if (sd == "t2") {
loglik <- 2 * log(sigma) - (3/2) * log(1 + 2 * sqrt(2) * mu * sqrt(1 - y) * sqrt(y) + 2 *
(-2 + mu^2 + 2 * sigma^2) * y -
4 * mu * sqrt(2 - 2 * y) * y^(3/2) - 2 * (-2 + mu^2 + 2 * sigma^2) * y^2)
}
# t2-logistic-----
if (sd == "logistic") {
loglik <- log(sigma) -1/2 * log(2 + (mu + log(1 - y) - log(y))^2/sigma^2) -
log(mu^2 + 2* sigma^2 -2* mu *(-log(1 - y) + log(y)) + (-log(1 - y) + log(y))^2) -log(1 - y) - log(y)
}
}
if (fd == "km" | sd == "km") {
a <- mu
b <- sigma
loglik <- log(a) + log(b) + (a - 1) * log(y) + (b - 1) * log(1 - y^a)
}
if (total) sum(loglik, na.rm = TRUE) else loglik
}
| /R/qrLogLik.R | no_license | Bhanditz/cdfquantreg | R | false | false | 14,861 | r | #' @title Log Likelihood for Fitting Cdfquantile Distributions
#' @aliases qrLogLik
#' @description Function to give the (negative) log likelihood for fitting cdfquantile distributions.
#'
#' @param y the vector to be evaluated.
#' @param mu mean of the distribution.
#' @param sigma sigma of the distribution.
#' @param fd A string that specifies the parent distribution.
#' @param sd A string that specifies the sub-family distribution.
#' @param total whether the sum of logliklihood is calculated
#'
#' @return The negative log likelihood for fitting the data with a cdfquantile distribution.
#'
#' @export
#' @import pracma
#' @examples
#' y <- rbeta(20, 0.5, 0.5)
#' qrLogLik(y, mu = 0.5, sigma = 1, 't2','t2')
#'
qrLogLik <- function(y, mu, sigma, fd, sd, total = TRUE) {
# arcsinh-XX-----
if (fd == "arcsinh") {
## arcsinh-arcsinh----
if (sd == "arcsinh") {
loglik <- asinh(((1 - 2 * y)/(2 * (-1 + y) * y) - mu)/sigma) -
2 * log(1 + exp(asinh(((1 - 2 * y)/(2 * (-1 + y) * y) - mu)/sigma))) -
log(1 - y) - log(y) + log(1 - 2 * y + 2 * y^2) - (1/2) *
log(1 + 4 * y * (-1 + mu) + 4 * y^4 * (mu^2 + sigma^2) +
4 * y^2 * (1 -3 * mu + mu^2 + sigma^2) - 8 * y^3 * (-mu + mu^2 + sigma^2))
}
# arcsinh-Cauchy----
if (sd == "cauchy") {
loglik <- log(pi) + asinh((mu + cot(pi * y))/sigma) -
2 * log(1 + exp(asinh((mu + cot(pi * y))/sigma))) -
(1/2) * log(mu^2 + sigma^2 + 2 * mu * cot(pi * y) + cot(pi * y)^2) +
2 * log(csc(pi * y))
}
# arcsinh-t2----
if (sd == "t2") {
a1 <- a2 <- y
for (i in 1:length(y)) {
c1 <- ((y[i] >= 0) & (y[i] < 1/2)) #1st situation
c2 <- (y[i] >= 1/2 & y[i] <= 1) #2ed situation
if (c1) {
a1[i] <- -sqrt((1 - 2 * y[i])^2/(2 * (1 - y[i]) * y[i]))
} else if (c2) {
a1[i] <- sqrt((1 - 2 * y[i])^2/(2 * (1 - y[i]) * y[i]))
} else {
a1[i] <- 0
}
c3 <- (y[i] > 0 & y[i] < 1/2) #3rd situation
c4 <- (y[i] > 1/2 & y[i] < 1) #4th situation
c5 <- (y[i] == 1 | y[i] == 0)
if (y[i] == 0.5) {
a2[i] <- 2 * sqrt(2)
} else if (c3) {
a2[i] <- (1 - 2 * y[i])/sqrt(8 * (1 - 2 * y[i])^2 * ((1 - y[i]) * y[i])^3)
} else if (c4) {
a2[i] <- (2 * y[i] - 1)/sqrt(8 * (1 - 2 * y[i])^2 * ((1 - y[i]) * y[i])^3)
} else if (c5) {
a2[i] <- NA # NO IDEA
} else {
a2[i] <- 0
}
}
lik <- (exp(asinh((-mu + a1)/sigma)) * a2)/((1 + exp(asinh((-mu + a1)/sigma)))^2 * sigma *
sqrt(1 + (mu - a1)^2/sigma^2))
loglik <- log(lik)
}
# arcsinh-burr7----
if (sd == "burr7") {
loglik <- -log(2) - log(sigma) + asinh((mu + atanh(1 - 2 * y))/sigma) -
2 * log(1 + exp(asinh((mu + atanh(1 - 2 * y))/sigma))) -
(1/2) * log(1 + (mu + atanh(1 - 2 * y))^2/sigma^2) - log(1 - y) - log(y)
}
# arcsinh-burr8----
if (sd == "burr8") {
loglik <- log(pi) - log(sigma) + asinh((-mu + log(tan((pi * y)/2)))/sigma) -
2 * log(1 + exp(asinh((-mu + log(tan((pi * y)/2)))/sigma))) +
log(csc(pi * y)) - (1/2) * log(1 +(mu - log(tan((pi * y)/2)))^2/sigma^2)
}
# arcsinh-logistic----
if (sd == "logistic") {
loglik <- -log(sigma) + asinh((-mu - log(1 - y) + log(y))/sigma) -
2 * log(1 + exp(asinh((-mu - log(1 - y) + log(y))/sigma))) -
(1/2) * log(1 + (mu + log(1 - y) - log(y))^2/sigma^2) - log(1 - y) - log(y)
}
}
# burr7-XX-----
if (fd =="burr7"){
# burr7-arcsinh----
if (sd == 'arcsinh'){
loglik <- -log(4) - log(sigma) + 2 * log(sech((-mu - (1 - 2 * y)/(2 * (1 - y) * y))/sigma)) -
2 * log(1 - y) - 2 * log(y) + log(1 - 2 * (1 - y) * y)
}
# burr7-burr7----
if (sd == 'burr7'){
loglik <- 2 * log(sech((mu + atanh(1 - 2 * y))/sigma)) - log(4 * sigma * y - 4 * sigma *
y^2)
}
# burr7-burr8----
if (sd == 'burr8'){
loglik <- Re((1/(2 * sigma)) * (-2 * (0 + 1i) * pi + 4 * mu + (0 + 1i) * pi * sigma + sigma *
log(16) + 2 * sigma * log(pi) - 2 * sigma * log(sigma) + 4 * log(-1 + exp((0 + 1i) *
pi * y)) - 4 * log(1 + exp((0 + 1i) * pi * y)) - 2 * sigma * log(-1 + exp(2 * (0 + 1i) *
pi * y)) - 4 * sigma * log(exp((2 * mu)/sigma) + ((-(0 + 1i))^(2/sigma) * (-1 + exp((0 +
1) * pi * y))^(2/sigma))/(1 + exp((0 + 1i) * pi * y))^(2/sigma)) + 2 * (0 + 1i) * pi *
sigma * y))
}
# burr7-cauchy----
if (sd == 'cauchy'){
loglik <- -log((2 * sigma)/pi) + 2 * log(csc(pi * y)) + 2 * log(sech((mu + cot(pi * y))/sigma))
}
# burr7-logistic----
if (sd == 'logistic'){
loglik <- -log(2) - log(sigma) + 2 * log(sech((-mu - log(1 - y) + log(y))/sigma)) - log(1 -
y) - log(y)
}
# burr7-t2----
if (sd == 't2'){
a1 <- y
for (i in 1:length(y)) {
c1 <- ((y[i] >= 0) & (y[i] < 1)) #1st situation
c2 <- (y[i] == 0 & y[i] == 1) #2ed situation
if (c1) {
a1[i] <- sech(((-1 + 2 * y[i])/(sqrt(2) * sqrt((-(-1 + y[i])) * y[i])) - mu[i])/sigma[i])^2/(4 *
sqrt(2) * ((-(-1 + y[i])) * y[i])^(3/2) * sigma[i])
} else if (c2) {
a1[i] <- NA
} else {
a1[i] <- 0
}
}
loglik <- log(a1)
}
}
#burr8-XX-----
if (fd == "burr8") {
# burr8 arcsinh-----
if (sd == 'arcsinh'){
loglik <- (-(1/(2 * sigma))) * (2 * sigma * log(pi) + 2 * sigma * log(sigma) + 2 * sigma *
log(exp((2 * (mu - 1/(1 - y)))/sigma) + exp(1/(sigma * (-1 + y) * y))) + 4 * sigma *
log(1 - y) + 4 * sigma * log(y) - 2 * sigma * log(1 - 2 * (1 - y) * y) + 2 * (1/(1 -
y)) - 2 * mu * (1/(1 - y)) + 1/((1 - y) * y) + 2 * mu * (y/(1 - y)))
}
# burr8 Cauchy-----
if (sd == "cauchy") {
loglik <- (mu + sigma * log(2) - sigma * log(sigma) + cot(pi * y) -
sigma * log(1 + exp((2 * (mu + cot(pi * y)))/sigma)) +
2 * sigma * log(csc(pi * y)))/sigma
}
# burr8-logistic----
if (sd == 'logistic'){
loglik <- -((-mu - sigma * log(2) + sigma * log(pi) + sigma * log(sigma) + (1 + sigma) *
log(1 - y) + (-1 + sigma) * log(y) + sigma * log(exp((2 * mu)/sigma) + y^(2/sigma)/(1 -
y)^(2/sigma)))/sigma)
}
# burr8 t2-----
if (sd == "t2") {
a1 <- a2 <- y
for (i in 1:length(y)) {
c1 <- ((y[i] >= 0) & (y[i] < 1)) #1st situation
c2 <- (y[i] == 0 & y[i] == 1) #2ed situation
if (c1) {
a1[i] <- sech(((1 - 2 * y[i])/(sqrt(2) * sqrt((-(-1 + y[i])) * y[i])) + mu[i])/sigma[i])/(2 * sqrt(2) * pi * ((-(-1 + y[i])) * y[i])^(3/2) * sigma[i])
# sech((mu[i] + (1 - 2*y[i])/(sqrt(2)*sqrt(1 - y[i])*sqrt(y[i])))/sigma[i])/
# (2*sqrt(2)*pi*sigma[i]*(1 - y[i])^(3/2)*y[i]^(3/2))
} else if (c2) {
a1[i] <- NA
} else {
a1[i] <- 0
}
}
loglik <- log(a1)
}
# burr8 burr7-----
if (sd == "burr7") {
loglik <- -log(2 * pi * y * sigma - 2 * pi * y^2 * sigma) +
log(sech((mu + atanh(1 - 2 * y))/sigma))
}
# burr8 burr8-----
if (sd == "burr8") {
loglik <- (mu + sigma * log(2) + sigma * log(csc(pi * y)) + log(tan(pi * y/2)) - sigma *
log(exp((2 * mu)/sigma) * sigma + sigma * tan((pi * y)/2)^(2/sigma)))/sigma
}
}
#Cauchit-XX-----
if (fd == "cauchit") {
# cauchit-arcsinh-----
if (sd == "arcsinh") {
loglik <- log((2 * sigma)/pi) + log(1 + 2 * (-1 + y) * y) - log(4 * sigma^2 * (-1 + y)^2 *
y^2 + (-1 + 2 * y + 2 * mu * (-1 + y) * y)^2)
}
# cauchit-burr7-----
if (sd == "burr7") {
loglik <- -log(2) - log(pi) + log(sigma) - log(mu^2 + sigma^2 + 2 * mu * atanh(1 - 2 *
y) + atanh(1 - 2 * y)^2) - log(1 - y) - log(y)
}
# cauchit-burr8-----
if (sd == "burr8") {
loglik <- log(sigma) + log(csc(pi * y)) - log(mu^2 + sigma^2 - 2 * mu * log(tan((pi * y)/2)) +
log(tan((pi * y)/2))^2)
}
# cauchit-Cauchy-----
if (sd == "cauchy") {
loglik <- log(sigma) - log(mu^2 + sigma^2 + 2 * mu * cot(pi * y) + cot(pi * y)^2) + 2 *
log(csc(pi * y))
}
# cauchit-Logistic-----
if (sd == "logistic") {
loglik <- log(sigma) - log(mu^2 + sigma^2 - 2 * mu * (-log(1 - y) + log(y)) + (-log(1 -
y) + log(y))^2) - log(1 - y) - log(y)
}
# cauchit-t2----
if (sd == 't2'){
a1 <- a2 <- y
for (i in 1:length(y)) {
c1 <- ((y[i] >= 0) & (y[i] < 1/2)) #1st situation
c2 <- (y[i] >= 1/2 & y[i] <= 1) #2ed situation
if (c1) {
a1[i] <- -sqrt((1 - 2 * y[i])^2/(2 * (1 - y[i]) * y[i]))
} else if (c2) {
a1[i] <- sqrt((1 - 2 * y[i])^2/(2 * (1 - y[i]) * y[i]))
} else {
a1[i] <- 0
}
c3 <- (y[i] > 0 & y[i] < 1/2) #3rd situation
c4 <- (y[i] > 1/2 & y[i] < 1) #4th situation
c5 <- (y[i] == 1 | y[i] == 0)
if (y[i] == 0.5) {
a2[i] <- 2 * sqrt(2)
} else if (c3) {
a2[i] <- (1 - 2 * y[i])/sqrt(8 * (1 - 2 * y[i])^2 * ((1 - y[i]) * y[i])^3)
} else if (c4) {
a2[i] <- (2 * y[i] - 1)/sqrt(8 * (1 - 2 * y[i])^2 * ((1 - y[i]) * y[i])^3)
} else if (c5) {
a2[i] <- NA # NO IDEA
} else {
a2[i] <- 0
}
}
lik <- a2/(pi * sigma * (1 + (mu - a1)^2/sigma^2))
loglik <- log(lik)
}
}
# logit-XX-----
if (fd == "logit") {
# logit-arcsinh-----
if (sd == "arcsinh") {
loglik <- -log(2) - log(sigma) - 2 * log(exp(1/(sigma - sigma * y)) + exp(mu/sigma + 1/(2 *
sigma * y - 2 * sigma * y^2))) - 2 * log(1 - y) - 2 * log(y) + log(1 - 2 * y + 2 *
y^2) + 1/(2 * sigma * y - 2 * sigma * y^2) + 2 * (y/(2 * sigma * y - 2 * sigma * y^2)) +
2 * mu * (y/(2 * sigma * y - 2 * sigma * y^2)) - 2 * mu * (y^2/(2 * sigma * y - 2 *
sigma * y^2))
}
# logit-burr7-----
if (sd == "burr7") {
loglik <- 2 * log(sech((mu + atanh(1 - 2 * y))/(2 * sigma))) -
log(8 * sigma * y - 8 * sigma * y^2)
}
# logit-burr8-----
if (sd == "burr8") {
loglik <-log(csc(pi * y)) + ((mu + sigma * log(pi) - sigma * log(sigma))
+ log(tan((pi * y)/2)) - 2 * sigma * log(exp(mu/sigma) +
tan((pi * y)/2)^(1/sigma)))/sigma
}
# logit-Cauchy-----
if (sd == "cauchy") {
loglik <- (mu + sigma * log(pi) - sigma * log(sigma) + cot(pi * y) -
2 * sigma * log(1 + exp((mu + cot(pi * y))/sigma)) +
2 * sigma * log(csc(pi * y)))/sigma
}
# logit-logistic-----
if (sd == "logistic") {
loglik <- ((mu - sigma * log(sigma)) - (-1 + sigma) * log(-1 + 1/y) -
2 * sigma * log(y + exp(mu/sigma) * (-1 + 1/y)^(1/sigma) * y))/sigma
}
# logit-t2-----
if (sd == "t2") {
a1 <- a2 <- y
for (i in 1:length(y)) {
c1 <- ((y[i] >= 0) & (y[i] < 1/2)) #1st situation
c2 <- (y[i] >= 1/2 & y[i] <= 1) #2ed situation
if (c1) {
a1[i] <- -sqrt((1 - 2 * y[i])^2/(2 * (1 - y[i]) * y[i]))
} else if (c2) {
a1[i] <- sqrt((1 - 2 * y[i])^2/(2 * (1 - y[i]) * y[i]))
} else {
a1[i] <- 0
}
c3 <- (y[i] > 0 & y[i] < 1/2) #3rd situation
c4 <- (y[i] > 1/2 & y[i] < 1) #4th situation
c5 <- (y[i] == 1 | y[i] == 0)
if (y[i] == 0.5) {
a2[i] <- 2 * sqrt(2)
} else if (c3) {
a2[i] <- (1 - 2 * y[i])/sqrt(8 * (1 - 2 * y[i])^2 * ((1 - y[i]) * y[i])^3)
} else if (c4) {
a2[i] <- (2 * y[i] - 1)/sqrt(8 * (1 - 2 * y[i])^2 * ((1 - y[i]) * y[i])^3)
} else if (c5) {
a2[i] <- NA # NO IDEA
} else {
a2[i] <- 0
}
}
lik <- (exp((mu + a1)/sigma) * a2)/((exp(mu/sigma) + exp(a1/sigma))^2 * sigma)
loglik <- log(lik)
}
}
# t2-XX-----
if (fd == "t2") {
# t2-ArcSinh-----
if (sd == "arcsinh") {
loglik <- log(4) + 2 * log(sigma) + 3 * log(1 - y) + log(y) - (3/2) * log(1 + 4 * (-1 +
mu) * y + 4 * (1 - 3 * mu + mu^2 + 2 * sigma^2) * y^2 - 8 * (-mu + mu^2 + 2 * sigma^2) *
y^3 + 4 * (mu^2 + 2 * sigma^2) * y^4)
}
# t2-burr7-----
if (sd == "burr7") {
loglik <- -log(2) + 2 * log(sigma) - (3/2) * log(mu^2 + 2 * sigma^2 +
2 * mu * atanh(1 - 2 * y) + atanh(1 - 2 * y)^2) - log(1 - y) - log(y)
}
# t2-burr8
if (sd == "burr8") {
loglik <- (log(pi) + 2 * log(sigma)) + log(csc(pi * y)) -
(3/2) * log(mu^2 + 2 * sigma^2 - 2 * mu * log(tan((pi * y)/2)) + log(tan((pi * y)/2))^2)
}
# t2-Cauchy-----
if (sd == "cauchy") {
loglik <- log(pi) + log(sigma) - log(mu^2 + 2 * sigma^2 + 2 * mu * cot(pi * y)
+ cot(pi * y)^2) - (1/2) * log(2 + (mu + cot(pi * y))^2/sigma^2) + 2 * log(csc(pi * y))
}
# t2-t2-----
if (sd == "t2") {
loglik <- 2 * log(sigma) - (3/2) * log(1 + 2 * sqrt(2) * mu * sqrt(1 - y) * sqrt(y) + 2 *
(-2 + mu^2 + 2 * sigma^2) * y -
4 * mu * sqrt(2 - 2 * y) * y^(3/2) - 2 * (-2 + mu^2 + 2 * sigma^2) * y^2)
}
# t2-logistic-----
if (sd == "logistic") {
loglik <- log(sigma) -1/2 * log(2 + (mu + log(1 - y) - log(y))^2/sigma^2) -
log(mu^2 + 2* sigma^2 -2* mu *(-log(1 - y) + log(y)) + (-log(1 - y) + log(y))^2) -log(1 - y) - log(y)
}
}
if (fd == "km" | sd == "km") {
a <- mu
b <- sigma
loglik <- log(a) + log(b) + (a - 1) * log(y) + (b - 1) * log(1 - y^a)
}
if (total) sum(loglik, na.rm = TRUE) else loglik
}
|
library(GeneSurvey)
#################################################################
#################################################################
baseDir <- getBaseDir()
zipFile <- getZipDir()
if ((!is.null(baseDir))&&(!is.null(zipFile)))
{
initGeneReport("-Xmx4800m")
# "hsa-let-7a.MIMAT0000062"
foo <- getImputedNAs_GeneSymbol_RnaSeq2(getNames_GeneSymbol_RnaSeq2(theZipFile=zipFile)[1], theZipFile=zipFile)
(1==dim(foo)[1])&&
(10464==dim(foo)[2])
} else {
message("No test data. Skip test.")
TRUE
}
| /tests/getImputedNAs_GeneSymbol_RnaSeq2.R | no_license | minghao2016/GeneSurvey | R | false | false | 524 | r | library(GeneSurvey)
#################################################################
#################################################################
baseDir <- getBaseDir()
zipFile <- getZipDir()
if ((!is.null(baseDir))&&(!is.null(zipFile)))
{
initGeneReport("-Xmx4800m")
# "hsa-let-7a.MIMAT0000062"
foo <- getImputedNAs_GeneSymbol_RnaSeq2(getNames_GeneSymbol_RnaSeq2(theZipFile=zipFile)[1], theZipFile=zipFile)
(1==dim(foo)[1])&&
(10464==dim(foo)[2])
} else {
message("No test data. Skip test.")
TRUE
}
|
np=500
ne=200
x=3
l=ne/np
P=dpois(x,l)
n<-paste("Probability that there are three errors in a page is less than a percentage of",round(P*100))
n | /Elementary_Statistics:_A_Step_By_Step_Approach_by_Allan_G._Bluman/CH5/EX5.27/ex5_27.R | permissive | FOSSEE/R_TBC_Uploads | R | false | false | 144 | r | np=500
ne=200
x=3
l=ne/np
P=dpois(x,l)
n<-paste("Probability that there are three errors in a page is less than a percentage of",round(P*100))
n |
#install.packages("RCurl")
#library(RCurl)
#https://www.r-bloggers.com/getting-data-from-an-online-source/
## get sorted filelist in the path / location
# https://stackoverflow.com/questions/14496325/natural-sort-order-human-sort-order-in-r-list-files
library(gtools)
filepath <- "C:/Users/Desktop/data"
setwd(filepath)
details <- list.files(filepath)
details <- mixedsort(details, decreasing=FALSE)
# http://r.789695.n4.nabble.com/How-to-get-the-last-modified-time-of-a-file-from-R-td3772025.html
# https://stackoverflow.com/questions/13762224/how-to-sort-files-list-by-date
| /1_read_data.r | no_license | sreetamadas/sample_R_code | R | false | false | 581 | r | #install.packages("RCurl")
#library(RCurl)
#https://www.r-bloggers.com/getting-data-from-an-online-source/
## get sorted filelist in the path / location
# https://stackoverflow.com/questions/14496325/natural-sort-order-human-sort-order-in-r-list-files
library(gtools)
filepath <- "C:/Users/Desktop/data"
setwd(filepath)
details <- list.files(filepath)
details <- mixedsort(details, decreasing=FALSE)
# http://r.789695.n4.nabble.com/How-to-get-the-last-modified-time-of-a-file-from-R-td3772025.html
# https://stackoverflow.com/questions/13762224/how-to-sort-files-list-by-date
|
#' Convert coordinates from degrees, minutes and seconds to decimal degrees
#'
#' @param degrees
#' @param minutes
#' @param seconds
#' @param direction
#'
#' @return
#' @export
#'
#' @examples
from_dms_to_dd <- function(degrees, minutes, seconds, direction){
dd <- as.numeric(degrees) + (as.numeric(minutes)/60) + (as.numeric(seconds)/(60*60))
if (direction == "W" | direction == "S"){
dd <- dd * -1
}
return(dd)
}
# https://stackoverflow.com/questions/33997361/how-to-convert-degree-minute-second-to-degree-decimal
| /R/from_dms_to_dd.R | no_license | kguidonimartins/monitora-derramamento-oleo | R | false | false | 534 | r | #' Convert coordinates from degrees, minutes and seconds to decimal degrees
#'
#' @param degrees
#' @param minutes
#' @param seconds
#' @param direction
#'
#' @return
#' @export
#'
#' @examples
from_dms_to_dd <- function(degrees, minutes, seconds, direction){
dd <- as.numeric(degrees) + (as.numeric(minutes)/60) + (as.numeric(seconds)/(60*60))
if (direction == "W" | direction == "S"){
dd <- dd * -1
}
return(dd)
}
# https://stackoverflow.com/questions/33997361/how-to-convert-degree-minute-second-to-degree-decimal
|
library(leaps);
null <- lm(bug~1,data=data_training);
full <- lm(bug~.,data = data_training);
finalModel <- step(null,scope=list(lower=null,upper=full),direction="forward")
predictions <<- predict.lm(finalModel,data_testing);
data_testing$target <- predictions;
orderedData <- data_testing[order(-data_testing$target),];
data_testing$target <- NULL;
write.csv(orderedData,"E:\\STUDY\\THESIS\\STEP-WISE REGRESSION\\testDataOrderedByPredictions.csv",row.names=F);
cumulativeLOC <- cumsum(orderedData$loc);
cumulativeBugs <- cumsum(orderedData$bug);
totalLOC <- cumulativeLOC[NROW(cumulativeLOC)];
totalBugs <- cumulativeBugs[NROW(cumulativeBugs)];
percentLOC <- (cumulativeLOC/totalLOC)*100;
percentBugs <- (cumulativeBugs/totalBugs)*100;
lines(percentLOC,percentBugs,type="o",col="red",lwd = 0.5,pch=".",cex=0.8)
| /predicted.r | permissive | sriramdhanvi/step-wise-linear-regression | R | false | false | 825 | r | library(leaps);
null <- lm(bug~1,data=data_training);
full <- lm(bug~.,data = data_training);
finalModel <- step(null,scope=list(lower=null,upper=full),direction="forward")
predictions <<- predict.lm(finalModel,data_testing);
data_testing$target <- predictions;
orderedData <- data_testing[order(-data_testing$target),];
data_testing$target <- NULL;
write.csv(orderedData,"E:\\STUDY\\THESIS\\STEP-WISE REGRESSION\\testDataOrderedByPredictions.csv",row.names=F);
cumulativeLOC <- cumsum(orderedData$loc);
cumulativeBugs <- cumsum(orderedData$bug);
totalLOC <- cumulativeLOC[NROW(cumulativeLOC)];
totalBugs <- cumulativeBugs[NROW(cumulativeBugs)];
percentLOC <- (cumulativeLOC/totalLOC)*100;
percentBugs <- (cumulativeBugs/totalBugs)*100;
lines(percentLOC,percentBugs,type="o",col="red",lwd = 0.5,pch=".",cex=0.8)
|
/exercise-2/exercise.R | permissive | rsr3rs/ch8-lists | R | false | false | 2,184 | r | ||
#' Fast replacment for the \%in\% operator
#'
#' Replacement for \code{\link[base]{\%in\%}}, based upon \code{\link{fmatch}}.
#' @param x Values to be matched.
#' @param table Values to be matched against.
#' @return A logical vector with the same length as \code{x}. It is \code{TRUE}
#' wherever an element of \code{x} is a member of \code{table}.
#' @seealso \code{\link{fmatch}} and \code{\link[base]{match}}
#' @examples
#' x = sample(1e7, 1e7, replace = TRUE)
#' s = 1:100
#' system.time(s %in% x)
#' # A bit faster
#' system.time(s %fin% x)
#' # Subsequent call is very fast!
#' system.time(s %fin% x)
#' @export
`%fin%` <- function(x, table) fmatch(x, table, nomatch = 0L) > 0L
| /R/operators.R | no_license | richierocks/fastmatch | R | false | false | 688 | r | #' Fast replacment for the \%in\% operator
#'
#' Replacement for \code{\link[base]{\%in\%}}, based upon \code{\link{fmatch}}.
#' @param x Values to be matched.
#' @param table Values to be matched against.
#' @return A logical vector with the same length as \code{x}. It is \code{TRUE}
#' wherever an element of \code{x} is a member of \code{table}.
#' @seealso \code{\link{fmatch}} and \code{\link[base]{match}}
#' @examples
#' x = sample(1e7, 1e7, replace = TRUE)
#' s = 1:100
#' system.time(s %in% x)
#' # A bit faster
#' system.time(s %fin% x)
#' # Subsequent call is very fast!
#' system.time(s %fin% x)
#' @export
`%fin%` <- function(x, table) fmatch(x, table, nomatch = 0L) > 0L
|
# Mehrnoosh Oghbaie
## self
## 09/21/2020
## Preparing data from MaxQuant or ...
## Data preparation consists of four stages:
## 1. Remove contaminants and reverse proteins
## 2. Log transformation
## t-test and fold change analysis ...
set.seed(123)
###########################################################################
### Proteomic template
###########################################################################
TemplateProtein <- R6Class("TemplateProtein",
list(
input.dir = NA,
input = NA,
df = NA,
df_log = NA,
df_log_revised = NA,
df_imputed = list(),
df_significant = list(),
df_significant_woI = list(),
condition = NA,
dp = NA,
dp_log = NA,
count_records = NA,
regulation_table= list(),
enrichment_table =list()
)
)
TemplateProtein$set("public","importInput", function(input.dir){
print("Importing Maxquant output")
self$input.dir <- input.dir
self$input <- read.delim(file.path(self$input.dir, 'proteinGroups.txt'), header = TRUE)
self$condition <- gsub("LFQ.intensity.","",unique(colnames(self$input)[grepl("LFQ.intensity",colnames(self$input))]))
self$dp <- read.delim(file.path(self$input.dir,"Phospho (STY)Sites.txt"))
invisible(self)
})
TemplateProtein$set("public","removeContaminant", function(){
print("Removing contaminant proteins and reversed sequences")
cols =colnames(self$input)
dl = self[["input"]]
dl$Peptide.counts..unique. <- apply(dl,1,function(x) strsplit(as.character(x["Peptide.counts..unique."]),";")[[1]][1])
dl$Protein.ID <- apply(dl,1,function(x) strsplit(as.character(x["Protein.IDs"]),";")[[1]][1])
dl$Gene.name <- apply(dl,1,function(x) strsplit(as.character(x["Gene.names"]),";")[[1]][1])
dl$Gene.name[is.na(dl$Gene.name)] <- dl$Protein.ID[is.na(dl$Gene.name)]
self$df <- dl[!(dl$Potential.contaminant=="+"|dl$Reverse=="+"|dl[["Q.value"]]>0.05) ,
c("Protein.ID","Gene.name","Peptide.counts..unique.","Fasta.headers",cols[grepl("LFQ.intensity",cols)])]
colp <- colnames(self$dp)
dlp <- self$dp
dlp <- dlp %>% dplyr::mutate(Protein = as.character(Leading.proteins),
Gene.names = as.character(Gene.names),
Potential.contaminant = as.character(Potential.contaminant))
np <- dim(dlp)[1]
self$dp <- dlp[!(dlp$Potential.contaminant=="+") ,]
print(paste((np-dim(self$dp)[1]),"records out of",np,"records were contaminants or reversed sequences and were removed from monoclonal phospho table."))
invisible(self)
})
TemplateProtein$set("public","transformData", function(){
print("Log transformation from intensities")
self[["df_log"]] <- self$df
self[["df_log"]][,colnames(self[["df_log"]])[grepl("LFQ.intensity.",colnames(self[["df_log"]]))]] <- data.frame(apply(self$df[,colnames(self$df)[grepl("LFQ.intensity.",colnames(self$df))]],2, function(x) ifelse(x>0,log2(as.numeric(x)),0)))
dlp <- self$dp[,c("Gene.names", "Protein" ,"PEP","Fasta.headers",
colnames(self$dp)[grepl(paste0("Intensity."), colnames(self$dp))])]
colp <- colnames(dlp)
dlp[,grepl("Intensity.",colp)&!grepl("___", colp)] <- apply(dlp[,grepl("Intensity.",colp)&!grepl("___", colp)],2,function(x) ifelse(x!=0, log2(x),0))
self[["dp_log"]] <- dlp
invisible(self)
})
TemplateProtein$set("public","choosingConditions", function(Comparison){
print("Removing redundant replicates")
dx <- self$df_log
self$count_records <- data.frame(apply(dx[,grepl("LFQ.intensity",colnames(dx))],2, function(x) length(x[x!=0])))
for(i in 1:dim(Comparison)[1]){
for(j in 1:4){
col <- paste0(c(Comparison[i,c(1,3,2)],j), collapse="_")
cols <- colnames(dx)[grepl(col, colnames(dx))]
if(length(cols)>1){
tableCount <- data.frame(apply(dx[,cols],2, function(x) length(x[x!=0])))
dx <- dx[,!colnames(dx) %in% rownames(tableCount)[tableCount[,1]!=max(tableCount[,1])]]
}
}
}
colnames(dx) <- gsub("_revB|_revC|revD","",colnames(dx))
self$df_log_revised <- dx
invisible(self)
})
TemplateProtein$set("public","anovaAnalysis", function(Comparison){
print("Performing imputation and t-test")
df <- self$df_log_revised
cells <- unique(Comparison[,1])
extract <- unique(Comparison[,3])
grid <- expand.grid(cells,extract)
for(i in 1:dim(grid)[1]){
grid[i,]
comp <- Comparison[Comparison[,1]==grid[i,1] &Comparison[,3]==grid[i,2],]
case <- paste0(comp[1,c(1,3,2)], collapse="_")
control <- paste0(comp[2,c(1,3,2)], collapse="_")
case_reps <- colnames(df)[grepl(case, colnames(df))]
control_reps <- colnames(df)[grepl(control, colnames(df))]
## Removing records with zero intensities in all replicates
dfnz <- df[apply(df[,c(case_reps, control_reps)],1,sum)!=0,]
print(paste('imputing',paste(grid[i,])))
dfi <- dfnz[,!grepl("LFQ.intensity", colnames( dfnz))]
cols2 <- c(colnames(dfi), case_reps, control_reps)
dfi <- cbind(dfi, matrix(NA, ncol=length(c(case_reps, control_reps)), nrow= dim(dfi)[1]))
colnames(dfi) <- cols2
dfi[,case_reps] <- impute(dfnz[case_reps], amm = "2", pmm = "6")
dfi[,control_reps] <- impute(dfnz[control_reps], amm = "2", pmm = "6")
self$df_imputed[[paste0(as.character(unlist(grid[i,])), collapse="_")]] <- dfi
dff <- dfnz[,!grepl("LFQ.intensity", colnames( dfnz))]
dff[["avg.Case.log.intensity"]] <- apply(dfnz[,case_reps],1, function(x) ifelse(!is.na(mean(x[x!=0])), mean(x[x!=0]),0))
dff[["avg.Control.log.intensity"]] <-apply(dfnz[,control_reps],1, function(x) ifelse(!is.na(mean(x[x!=0])), mean(x[x!=0]),0))
dff[["avg.Case.num.reps"]] <- apply(dfnz[,case_reps],1, function(x) length(x[x!=0]))
dff[["avg.Control.num.reps"]] <-apply(dfnz[,control_reps],1, function(x) length(x[x!=0]))
dff[["log2foldChange"]] <- as.numeric(dff[["avg.Case.log.intensity"]]) - as.numeric(dff[["avg.Control.log.intensity"]])
dff["p.value"] <- NA
tt <- foreach(k=1:dim(dff)[1], .combine=rbind)%do%{
t.test(dfnz[k,case_reps],dfnz[k,control_reps])$p.value
}
dff[["p.value"]] <- data.frame(tt)$tt
dff[["p.adjust.value"]] <- p.adjust(dff[["p.value"]] ,method="BH")
dff[["Significant"]] <- ifelse(((dff[['log2foldChange']] >1 &dff[["avg.Case.num.reps"]]>1)|(dff[['log2foldChange']] < -1 &dff[["avg.Control.num.reps"]]>1))&dff[["p.adjust.value"]]<0.05,'Yes','No')
self$df_significant_woI[[paste0(as.character(unlist(grid[i,])), collapse="_")]] <- dff
dfx <- dfi[,!grepl("LFQ.intensity", colnames( dfi))]
dfx[["avg.Case.log.intensity"]] <- apply(dfi[,case_reps],1, function(x) ifelse(!is.na(mean(x[x!=0])), mean(x[x!=0]),0))
dfx[["avg.Control.log.intensity"]] <-apply(dfi[,control_reps],1, function(x) ifelse(!is.na(mean(x[x!=0])), mean(x[x!=0]),0))
dfx[["avg.Case.num.reps"]] <- apply(dfnz[,case_reps],1, function(x) length(x[x!=0]))
dfx[["avg.Control.num.reps"]] <-apply(dfnz[,control_reps],1, function(x) length(x[x!=0]))
dfx[["log2foldChange"]] <- as.numeric(dfx[["avg.Case.log.intensity"]]) - as.numeric(dfx[["avg.Control.log.intensity"]])
dfx[["log2foldChange.adjusted"]] <- as.numeric(dfx[["avg.Case.log.intensity"]]) - as.numeric(dfx[["avg.Control.log.intensity"]])*(median(as.numeric(dfx[["avg.Case.log.intensity"]]))/median(as.numeric(dfx[["avg.Control.log.intensity"]])))
dfx["p.value"] <- NA
tt2 <- foreach(k=1:dim(dff)[1], .combine=rbind)%do%{
t.test(dfi[k,case_reps],dfi[k,control_reps])$p.value
}
dfx[["p.value"]] <- data.frame(tt2)$tt2
dfx[["p.adjust.value"]] <- p.adjust(dfx[["p.value"]] ,method="BH")
dfx[["Significant"]] <- ifelse(((dfx[['log2foldChange']] >1 &dfx[["avg.Case.num.reps"]]>1)|(dfx[['log2foldChange']] < -1 &dfx[["avg.Control.num.reps"]]>1))&dfx[["p.adjust.value"]]<0.05,'Yes','No')
dfx[["Significant.adjusted"]] <- ifelse(((dfx[['log2foldChange.adjusted']] >1 &dfx[["avg.Case.num.reps"]]>1)|(dfx[['log2foldChange.adjusted']] < -1 &dfx[["avg.Control.num.reps"]]>1))&dfx[["p.adjust.value"]]<0.05,'Yes','No')
bfe <- foreach(k=1:dim(dfi)[1], .combine=rbind)%do%{
dh <- data.frame(rbind(cbind(unlist(dfi[k,case_reps]),rep("case",length(dfi[k,case_reps]))),
cbind(unlist(dfi[k,control_reps]),rep("control",length(dfi[k,control_reps])))))
colnames(dh) <- c("intensity", "condition")
dh$intensity <- as.numeric(dh$intensity)
return(abs(1/ttestBF(formula = intensity ~ condition, data = dh)@bayesFactor$bf))
}
dfx[["bf.error"]] <- data.frame(bfe)$bfe
dfx[["SignificantB"]] <- ifelse((dfx$bf.error < (1/3)) & (dfx$log2foldChange > 1 & dfx$avg.Case.num.reps > 1), "Yes","No")
dfx[["SignificantB.adjusted"]] <- ifelse((dfx$bf.error < (1/3)) & (dfx$log2foldChange.adjusted > 1 & dfx$avg.Case.num.reps > 1), "Yes","No")
self$df_significant[[paste0(as.character(unlist(grid[i,])), collapse="_")]] <- dfx
}
invisible(self)
})
TemplateProtein$set("public","visualize", function(Comparison){
dt <- self$count_records
colnames(dt) <- 'count'
dt$replicate <- gsub("_revB|_revC|_revD","",rownames(dt))
dt1 <- dt%>%group_by(replicate)%>% summarize(count = max(count))
dt1$replicate <- gsub("LFQ.intensity.","",dt1$replicate)
p1 <- ggplot(dt1, (aes(x = replicate, y = count))) +
geom_bar(aes(fill = count), stat = "identity") +
scale_color_gradient(low="blue", high="green")+
xlab("sample") +
ylab("Number of proteins with non zero intensity") +
ggtitle("Number of proteins with intensity > 0") +
theme_classic() +
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
scale_fill_viridis_b()
ggsave(file= file.path("../img/proteomics/intensity.barplot.pdf"), p1, width=9, height=9, dpi=100)
ggsave(file= file.path("../img/proteomics/intensity.barplot.png"), p1, width=9, height=9, dpi=100)
for(cell in names(self$df_imputed)){
cols <- colnames(self$df_imputed[[cell]])[grepl("LFQ.",colnames(self$df_imputed[[cell]]))]
dz <- self$df_log_revised[apply(self$df_log_revised[,cols], 1,sum )>0, cols]
dw <- self$df_imputed[[cell]][,grepl("LFQ", colnames(self$df_imputed[[cell]]))]
dzm <- melt(dz)
dwm <- melt(dw)
dzm[["category"]] <- "PreImputation"
dwm[["category"]] <- "PostImputation"
dm <- rbind(dzm,dwm)
colnames(dm) <- c("condition","value", "category")
dm$condition <- gsub("LFQ.intensity.","", dm$condition)
pd <- ggplot(dm, aes(x = value, y = condition, color = category, point_color = category, fill = category)) +
geom_density_ridges(
jittered_points = TRUE, scale = .95, rel_min_height = .01,
point_shape = "|", point_size = 3, size = 0.25,
position = position_points_jitter(height = 0)
) +
scale_y_discrete(expand = c(0, 0)) +
scale_x_continuous(expand = c(0, 0), name = "log LFQ intensity") +
scale_fill_manual(values = c("#D55E0050", "#0072B250"), labels = c("Post Imputation", "Pre Imputation")) +
scale_color_manual(values = c("#D55E00", "#0072B2"), guide = "none") +
scale_discrete_manual("point_color", values = c("#D55E00", "#0072B2"), guide = "none") +
coord_cartesian(clip = "off") +
guides(fill = guide_legend(
override.aes = list(
fill = c("#D55E00A0", "#0072B2A0"),
color = NA, point_color = NA)
)
) +
ggtitle(paste("Density plot before and after imputation", cell)) +
theme_ridges(center = TRUE)+
theme(axis.text.y = element_text(size = 10))
ggsave(file= file.path("../img/proteomics/",paste0(cell,".density.imputation.pdf")), pd, width=8, height=9, dpi=100)
ggsave(file= file.path("../img/proteomics/",paste0(cell,".density.imputation.png")), pd, width=8, height=9, dpi=100)
}
for(cell in names(self$df_significant)){
dx <- self$df_significant[[cell]]
shade = data.frame(x1=c(-1, 1),
x2=c(-Inf, Inf),
y1=c(-log10(0.05), -log10(0.05)),
y2=c(Inf, Inf))
fold_cutoff = 1
pvalue_cutoff = 0.05
order <- c(Comparison[Comparison[,1]==strsplit(cell,"_")[[1]][1]&Comparison[,3]==strsplit(cell,"_")[[1]][2],2])
p <- ggplot(dx) +
theme_bw()+
geom_rect(data=shade,
mapping=aes(xmin=x1, xmax=x2, ymin=y1, ymax=y2), fill='grey95')+
geom_point(data = subset(dx, (Significant=="No")), aes(x =log2foldChange, y = -log10(p.adjust.value),colour=Significant), alpha = 0.5,size=2)+
geom_point(data = subset(dx, (Significant=="Yes")), aes(x =log2foldChange, y = -log10(p.adjust.value),colour=Significant), alpha = 0.8,size=3)+
geom_vline(xintercept = fold_cutoff, col = "blue")+
geom_vline(xintercept = -fold_cutoff, col = "blue")+
geom_hline(yintercept = -log10(pvalue_cutoff), col = "green")+
ggtitle(paste(cell, "(",order[[2]],"~",order[[1]],")"))+
scale_colour_manual(values=c("gray","red"))+
geom_text_repel(data = subset(dx, (Significant=="Yes")),
aes(x=log2foldChange, y=-log10(p.adjust.value),label=Gene.name),
segment.alpha =0.35,
size = 2.5 )
ggsave(file=paste0("../img/proteomics/",paste0(cell,"_", paste0(order, collapse=".")),"_volcanoplot.pdf"), p, width=11, height=9, dpi=200)
ggsave(file=paste0("../img/proteomics/",paste0(cell,"_", paste0(order, collapse=".")),"_volcanoplot.png"), p, width=11, height=9, dpi=200)
}
for(cell in names(self$df_significant_woI)){
dx <- self$df_significant_woI[[cell]]
shade = data.frame(x1=c(-1, 1),
x2=c(-Inf, Inf),
y1=c(-log10(0.05), -log10(0.05)),
y2=c(Inf, Inf))
fold_cutoff = 1
pvalue_cutoff = 0.05
order <- c(Comparison[Comparison[,1]==strsplit(cell,"_")[[1]][1]&Comparison[,3]==strsplit(cell,"_")[[1]][2],2])
p <- ggplot(dx) +
theme_bw()+
geom_rect(data=shade,
mapping=aes(xmin=x1, xmax=x2, ymin=y1, ymax=y2), fill='grey95')+
geom_point(data = subset(dx, (Significant=="No")), aes(x =log2foldChange, y = -log10(p.adjust.value),colour=Significant), alpha = 0.5,size=2)+
geom_point(data = subset(dx, (Significant=="Yes")), aes(x =log2foldChange, y = -log10(p.adjust.value),colour=Significant), alpha = 0.8,size=3)+
geom_vline(xintercept = fold_cutoff, col = "blue")+
geom_vline(xintercept = -fold_cutoff, col = "blue")+
geom_hline(yintercept = -log10(pvalue_cutoff), col = "green")+
ggtitle(paste(cell, "(",order[[2]],"~",order[[1]],")"))+
scale_colour_manual(values=c("gray","red"))+
#geom_text_repel(data = subset(dx, (Significant=="Yes")),
# aes(x=log2foldChange, y=-log10(p.adjust.value),label=Gene.name),
# segment.alpha =0.35,
# size = 2.5 )
ggsave(file=paste0("../img/proteomics/",paste0(cell,"_", paste0(order, collapse=".")),"_volcanoplotWOI.pdf"), p, width=11, height=9, dpi=200)
ggsave(file=paste0("../img/proteomics/",paste0(cell,"_", paste0(order, collapse=".")),"_volcanoplotWOI.png"), p, width=11, height=9, dpi=200)
}
png(file=paste0("../img/proteomics/pvalue.distribution.png"), width=800, height=900)
par(mfrow=c(4,2))
for(cell in names(self$df_significant)){
order <- c(Comparison[Comparison[,1]==strsplit(cell,"_")[[1]][1]&Comparison[,3]==strsplit(cell,"_")[[1]][2],2])
hist(self$df_significant_woI[[cell]]$p.value, breaks="FD", main=paste("Pre imputation", cell,paste0(order, collapse=".vs.")), xlab="")
hist(self$df_significant_woI[[cell]]$p.adjust.value, breaks="FD", add=T, col='red')
hist(self$df_significant[[cell]]$p.value, breaks="FD", main=paste("Post imputation",cell,paste0(order, collapse=".vs.")), xlab="")
hist(self$df_significant[[cell]]$p.adjust.value, breaks="FD", add=T, col='red')
legend("top", inset=.02, title="",
c("Pre Imputation","Post Imputation"), fill=c("grey","red"), horiz=TRUE, cex=1.5)
}
dev.off()
invisible(self)
})
TemplateProtein$set("public","enrichment", function(){
self$enrichment_table[["CC"]] <- list()
self$enrichment_table[["BP"]] <- list()
for(name in names(self$df_significant)){
print(name)
dp1 <- self$df_significant[[name]]
dp1$regulate <- ifelse((dp1$Significant=="Yes"&dp1$log2foldChange>= 1),1,ifelse((dp1$Significant=="Yes"&dp1$log2foldChange < -1),-1,0))
self$regulation_table[[name]] <-dp1
for(pro in c("CC","BP")){
geneID <- mapIds(org.Hs.eg.db, dp1$Gene.name[dp1$regulate==1], 'ENTREZID', 'SYMBOL')
gene.df <- bitr(geneID , fromType = "ENTREZID",
toType = c("ENSEMBL", "SYMBOL"),
OrgDb = org.Hs.eg.db)
ego <- enrichGO(gene = gene.df$ENTREZID,
# universe = gene.df$SYMBOL,
OrgDb = org.Hs.eg.db,
ont = pro,
pAdjustMethod = "BH",
pvalueCutoff = 0.01,
qvalueCutoff = 0.05)
egos <- clusterProfiler::simplify(ego, cutoff=0.7, by="p.adjust", select_fun=min)
if(dim(egos@result)[1]>1){
egoList <- egos@result%>% filter(qvalue<=0.05&p.adjust<=0.01)
egoList$GeneRatio <- round(unlist(lapply(egoList$GeneRatio, function(x) as.numeric(strsplit(x,"/")[[1]][1])/as.numeric(strsplit(x,"/")[[1]][2]))),2)
egoList$Symbol <- NA
for(i in 1:dim(egoList)[1]){
geneID <- strsplit(egoList$geneID[i],"/")[[1]]
symbols <- mapIds(org.Hs.eg.db, geneID, 'SYMBOL', 'ENTREZID')
egoList$Symbol[i] <- paste(unname(symbols), collapse="|")
}
self$enrichment_table[[pro]][[name]] <- egoList
qe <- ggplot(egoList, aes(x=Description, y=GeneRatio)) +
geom_bar(stat='identity', aes(fill=p.adjust)) +
labs(title=paste(""),
subtitle=ifelse(pro=="CC", paste("Cellular component",name),paste("Biological Process", name))) +
scale_fill_gradient(low="blue", high="red")+
coord_flip()+
theme_bw()+
theme(axis.text.x = element_text(angle = 90))
ggsave(file=paste0("../img/proteomics/",name,".Enrichment.",pro,".pdf"), qe, width=5+(dim(egoList)[1]/30), height=7+(dim(egoList)[1]/10), dpi=200)
ggsave(file=paste0("../img/proteomics/",name,".Enrichment.",pro,".png"), qe, width=5+(dim(egoList)[1]/30), height=7+(dim(egoList)[1]/10), dpi=200)
}
}
}
invisible(self)
})
TemplateProtein$set("public","writeFiles", function(){
if(file.exists("../table/significant_table.xlsx")){
file.remove("../table/significant_table.xlsx")}
names <- names(self$df_significant)
for(name in names){
xlsx::write.xlsx(as.data.frame(self$df_significant[[name]]), file="../table/significant_table.xlsx",sheetName=name, row.names=FALSE, append=TRUE)
}
if(file.exists("../table/significant_tableWOI.xlsx")){
file.remove("../table/significant_tableWOI.xlsx")}
for(name in names){
xlsx::write.xlsx(as.data.frame(self$df_significant_woI[[name]]), file="../table/significant_tableWOI.xlsx",sheetName=name, row.names=FALSE, append=TRUE)
}
if(file.exists("../table/enrichment_tableCC.xlsx")){
file.remove("../table/enrichment_tableCC.xlsx")}
names <- names(self$enrichment_table$CC)
for(name in names){
xlsx::write.xlsx(as.data.frame(self$enrichment_table$CC[[name]]), file="../table/enrichment_tableCC.xlsx",sheetName=name, row.names=FALSE, append=TRUE)
}
if(file.exists("../table/enrichment_tableBP.xlsx")){
file.remove("../table/enrichment_tableBP.xlsx")}
names <- names(self$enrichment_table$BP)
for(name in names){
xlsx::write.xlsx(as.data.frame(self$enrichment_table$BP[[name]]), file="../table/enrichment_tableBP.xlsx",sheetName=name, row.names=FALSE, append=TRUE)
}
if(file.exists("../table/regulation_table.xlsx")){
file.remove("../table/regulation_table.xlsx")}
names <- names(self$regulation_table)
for(name in names){
xlsx::write.xlsx(as.data.frame(self$regulation_table[[name]]), file="../table/regulation_table.xlsx",sheetName=name, row.names=FALSE, append=TRUE)
}
invisible(self)
})
TemplateProtein$set("public","drawScatterplot", function(){
sig <- c(self$df_significant$HEK293T_insol$Gene.name[self$df_significant$HEK293T_insol$Significant=="Yes"],
self$df_significant$HEK293T_wcl$Gene.name[self$df_significant$HEK293T_wcl$Significant=="Yes"])
dx <- merge(self$df_significant$HEK293T_insol[,c("Gene.name","log2foldChange")],
self$df_significant$HEK293T_wcl[,c("Gene.name","log2foldChange")], by="Gene.name", all=T)
colnames(dx) <- c("Gene.name","insol","wcl")
pS1 <- ggplot(dx, aes(x = insol, y = wcl)) +
geom_point(data =subset(dx, !Gene.name %in% sig) ,colour="gray", size=2)+
geom_point(data =subset(dx, Gene.name %in% sig) ,colour="red", size=3)+
geom_text_repel(data = subset(dx, Gene.name %in% sig),
aes(x = insol, y = wcl,label = Gene.name),
segment.alpha =0.35,
size = 2.5 ) +
ggtitle("HEK293T")
ggsave(file=paste0("../img/proteomics/Hek293T_scatterplot.pdf"), pS1, width=11, height=9, dpi=200)
ggsave(file=paste0("../img/proteomics/Hek293T_scatterplot.png"), pS1, width=11, height=9, dpi=200)
sig <- c(self$df_significant$U2OS_insol$Gene.name[self$df_significant$U2OS_insol$Significant=="Yes"],
self$df_significant$U2OS_wcl$Gene.name[self$df_significant$U2OS_wcl$Significant=="Yes"])
dx <- merge(self$df_significant$U2OS_insol[,c("Gene.name","log2foldChange")],
self$df_significant$U2OS_wcl[,c("Gene.name","log2foldChange")], by="Gene.name", all=T)
colnames(dx) <- c("Gene.name","insol","wcl")
pS2 <- ggplot(dx, aes(x = insol, y = wcl)) +
geom_point(data =subset(dx, !Gene.name %in% sig) ,colour="gray", size=2)+
geom_point(data =subset(dx, Gene.name %in% sig) ,colour="red", size=3)+
geom_text_repel(data = subset(dx, Gene.name %in% sig),
aes(x = insol, y = wcl,label = Gene.name),
segment.alpha =0.35,
size = 2.5 ) +
ggtitle("U2OS")
ggsave(file=paste0("../img/proteomics/U2OS_scatterplot.pdf"), pS2, width=11, height=9, dpi=200)
ggsave(file=paste0("../img/proteomics/U2OS_scatterplot.png"), pS2, width=11, height=9, dpi=200)
invisible(self)
}) | /src/functions/Anova_Analysis.R | no_license | moghbaie/Wouter | R | false | false | 23,238 | r | # Mehrnoosh Oghbaie
## self
## 09/21/2020
## Preparing data from MaxQuant or ...
## Data preparation consists of four stages:
## 1. Remove contaminants and reverse proteins
## 2. Log transformation
## t-test and fold change analysis ...
set.seed(123)
###########################################################################
### Proteomic template
###########################################################################
TemplateProtein <- R6Class("TemplateProtein",
list(
input.dir = NA,
input = NA,
df = NA,
df_log = NA,
df_log_revised = NA,
df_imputed = list(),
df_significant = list(),
df_significant_woI = list(),
condition = NA,
dp = NA,
dp_log = NA,
count_records = NA,
regulation_table= list(),
enrichment_table =list()
)
)
TemplateProtein$set("public","importInput", function(input.dir){
print("Importing Maxquant output")
self$input.dir <- input.dir
self$input <- read.delim(file.path(self$input.dir, 'proteinGroups.txt'), header = TRUE)
self$condition <- gsub("LFQ.intensity.","",unique(colnames(self$input)[grepl("LFQ.intensity",colnames(self$input))]))
self$dp <- read.delim(file.path(self$input.dir,"Phospho (STY)Sites.txt"))
invisible(self)
})
TemplateProtein$set("public","removeContaminant", function(){
print("Removing contaminant proteins and reversed sequences")
cols =colnames(self$input)
dl = self[["input"]]
dl$Peptide.counts..unique. <- apply(dl,1,function(x) strsplit(as.character(x["Peptide.counts..unique."]),";")[[1]][1])
dl$Protein.ID <- apply(dl,1,function(x) strsplit(as.character(x["Protein.IDs"]),";")[[1]][1])
dl$Gene.name <- apply(dl,1,function(x) strsplit(as.character(x["Gene.names"]),";")[[1]][1])
dl$Gene.name[is.na(dl$Gene.name)] <- dl$Protein.ID[is.na(dl$Gene.name)]
self$df <- dl[!(dl$Potential.contaminant=="+"|dl$Reverse=="+"|dl[["Q.value"]]>0.05) ,
c("Protein.ID","Gene.name","Peptide.counts..unique.","Fasta.headers",cols[grepl("LFQ.intensity",cols)])]
colp <- colnames(self$dp)
dlp <- self$dp
dlp <- dlp %>% dplyr::mutate(Protein = as.character(Leading.proteins),
Gene.names = as.character(Gene.names),
Potential.contaminant = as.character(Potential.contaminant))
np <- dim(dlp)[1]
self$dp <- dlp[!(dlp$Potential.contaminant=="+") ,]
print(paste((np-dim(self$dp)[1]),"records out of",np,"records were contaminants or reversed sequences and were removed from monoclonal phospho table."))
invisible(self)
})
TemplateProtein$set("public","transformData", function(){
print("Log transformation from intensities")
self[["df_log"]] <- self$df
self[["df_log"]][,colnames(self[["df_log"]])[grepl("LFQ.intensity.",colnames(self[["df_log"]]))]] <- data.frame(apply(self$df[,colnames(self$df)[grepl("LFQ.intensity.",colnames(self$df))]],2, function(x) ifelse(x>0,log2(as.numeric(x)),0)))
dlp <- self$dp[,c("Gene.names", "Protein" ,"PEP","Fasta.headers",
colnames(self$dp)[grepl(paste0("Intensity."), colnames(self$dp))])]
colp <- colnames(dlp)
dlp[,grepl("Intensity.",colp)&!grepl("___", colp)] <- apply(dlp[,grepl("Intensity.",colp)&!grepl("___", colp)],2,function(x) ifelse(x!=0, log2(x),0))
self[["dp_log"]] <- dlp
invisible(self)
})
TemplateProtein$set("public","choosingConditions", function(Comparison){
print("Removing redundant replicates")
dx <- self$df_log
self$count_records <- data.frame(apply(dx[,grepl("LFQ.intensity",colnames(dx))],2, function(x) length(x[x!=0])))
for(i in 1:dim(Comparison)[1]){
for(j in 1:4){
col <- paste0(c(Comparison[i,c(1,3,2)],j), collapse="_")
cols <- colnames(dx)[grepl(col, colnames(dx))]
if(length(cols)>1){
tableCount <- data.frame(apply(dx[,cols],2, function(x) length(x[x!=0])))
dx <- dx[,!colnames(dx) %in% rownames(tableCount)[tableCount[,1]!=max(tableCount[,1])]]
}
}
}
colnames(dx) <- gsub("_revB|_revC|revD","",colnames(dx))
self$df_log_revised <- dx
invisible(self)
})
TemplateProtein$set("public","anovaAnalysis", function(Comparison){
print("Performing imputation and t-test")
df <- self$df_log_revised
cells <- unique(Comparison[,1])
extract <- unique(Comparison[,3])
grid <- expand.grid(cells,extract)
for(i in 1:dim(grid)[1]){
grid[i,]
comp <- Comparison[Comparison[,1]==grid[i,1] &Comparison[,3]==grid[i,2],]
case <- paste0(comp[1,c(1,3,2)], collapse="_")
control <- paste0(comp[2,c(1,3,2)], collapse="_")
case_reps <- colnames(df)[grepl(case, colnames(df))]
control_reps <- colnames(df)[grepl(control, colnames(df))]
## Removing records with zero intensities in all replicates
dfnz <- df[apply(df[,c(case_reps, control_reps)],1,sum)!=0,]
print(paste('imputing',paste(grid[i,])))
dfi <- dfnz[,!grepl("LFQ.intensity", colnames( dfnz))]
cols2 <- c(colnames(dfi), case_reps, control_reps)
dfi <- cbind(dfi, matrix(NA, ncol=length(c(case_reps, control_reps)), nrow= dim(dfi)[1]))
colnames(dfi) <- cols2
dfi[,case_reps] <- impute(dfnz[case_reps], amm = "2", pmm = "6")
dfi[,control_reps] <- impute(dfnz[control_reps], amm = "2", pmm = "6")
self$df_imputed[[paste0(as.character(unlist(grid[i,])), collapse="_")]] <- dfi
dff <- dfnz[,!grepl("LFQ.intensity", colnames( dfnz))]
dff[["avg.Case.log.intensity"]] <- apply(dfnz[,case_reps],1, function(x) ifelse(!is.na(mean(x[x!=0])), mean(x[x!=0]),0))
dff[["avg.Control.log.intensity"]] <-apply(dfnz[,control_reps],1, function(x) ifelse(!is.na(mean(x[x!=0])), mean(x[x!=0]),0))
dff[["avg.Case.num.reps"]] <- apply(dfnz[,case_reps],1, function(x) length(x[x!=0]))
dff[["avg.Control.num.reps"]] <-apply(dfnz[,control_reps],1, function(x) length(x[x!=0]))
dff[["log2foldChange"]] <- as.numeric(dff[["avg.Case.log.intensity"]]) - as.numeric(dff[["avg.Control.log.intensity"]])
dff["p.value"] <- NA
tt <- foreach(k=1:dim(dff)[1], .combine=rbind)%do%{
t.test(dfnz[k,case_reps],dfnz[k,control_reps])$p.value
}
dff[["p.value"]] <- data.frame(tt)$tt
dff[["p.adjust.value"]] <- p.adjust(dff[["p.value"]] ,method="BH")
dff[["Significant"]] <- ifelse(((dff[['log2foldChange']] >1 &dff[["avg.Case.num.reps"]]>1)|(dff[['log2foldChange']] < -1 &dff[["avg.Control.num.reps"]]>1))&dff[["p.adjust.value"]]<0.05,'Yes','No')
self$df_significant_woI[[paste0(as.character(unlist(grid[i,])), collapse="_")]] <- dff
dfx <- dfi[,!grepl("LFQ.intensity", colnames( dfi))]
dfx[["avg.Case.log.intensity"]] <- apply(dfi[,case_reps],1, function(x) ifelse(!is.na(mean(x[x!=0])), mean(x[x!=0]),0))
dfx[["avg.Control.log.intensity"]] <-apply(dfi[,control_reps],1, function(x) ifelse(!is.na(mean(x[x!=0])), mean(x[x!=0]),0))
dfx[["avg.Case.num.reps"]] <- apply(dfnz[,case_reps],1, function(x) length(x[x!=0]))
dfx[["avg.Control.num.reps"]] <-apply(dfnz[,control_reps],1, function(x) length(x[x!=0]))
dfx[["log2foldChange"]] <- as.numeric(dfx[["avg.Case.log.intensity"]]) - as.numeric(dfx[["avg.Control.log.intensity"]])
dfx[["log2foldChange.adjusted"]] <- as.numeric(dfx[["avg.Case.log.intensity"]]) - as.numeric(dfx[["avg.Control.log.intensity"]])*(median(as.numeric(dfx[["avg.Case.log.intensity"]]))/median(as.numeric(dfx[["avg.Control.log.intensity"]])))
dfx["p.value"] <- NA
tt2 <- foreach(k=1:dim(dff)[1], .combine=rbind)%do%{
t.test(dfi[k,case_reps],dfi[k,control_reps])$p.value
}
dfx[["p.value"]] <- data.frame(tt2)$tt2
dfx[["p.adjust.value"]] <- p.adjust(dfx[["p.value"]] ,method="BH")
dfx[["Significant"]] <- ifelse(((dfx[['log2foldChange']] >1 &dfx[["avg.Case.num.reps"]]>1)|(dfx[['log2foldChange']] < -1 &dfx[["avg.Control.num.reps"]]>1))&dfx[["p.adjust.value"]]<0.05,'Yes','No')
dfx[["Significant.adjusted"]] <- ifelse(((dfx[['log2foldChange.adjusted']] >1 &dfx[["avg.Case.num.reps"]]>1)|(dfx[['log2foldChange.adjusted']] < -1 &dfx[["avg.Control.num.reps"]]>1))&dfx[["p.adjust.value"]]<0.05,'Yes','No')
bfe <- foreach(k=1:dim(dfi)[1], .combine=rbind)%do%{
dh <- data.frame(rbind(cbind(unlist(dfi[k,case_reps]),rep("case",length(dfi[k,case_reps]))),
cbind(unlist(dfi[k,control_reps]),rep("control",length(dfi[k,control_reps])))))
colnames(dh) <- c("intensity", "condition")
dh$intensity <- as.numeric(dh$intensity)
return(abs(1/ttestBF(formula = intensity ~ condition, data = dh)@bayesFactor$bf))
}
dfx[["bf.error"]] <- data.frame(bfe)$bfe
dfx[["SignificantB"]] <- ifelse((dfx$bf.error < (1/3)) & (dfx$log2foldChange > 1 & dfx$avg.Case.num.reps > 1), "Yes","No")
dfx[["SignificantB.adjusted"]] <- ifelse((dfx$bf.error < (1/3)) & (dfx$log2foldChange.adjusted > 1 & dfx$avg.Case.num.reps > 1), "Yes","No")
self$df_significant[[paste0(as.character(unlist(grid[i,])), collapse="_")]] <- dfx
}
invisible(self)
})
TemplateProtein$set("public","visualize", function(Comparison){
dt <- self$count_records
colnames(dt) <- 'count'
dt$replicate <- gsub("_revB|_revC|_revD","",rownames(dt))
dt1 <- dt%>%group_by(replicate)%>% summarize(count = max(count))
dt1$replicate <- gsub("LFQ.intensity.","",dt1$replicate)
p1 <- ggplot(dt1, (aes(x = replicate, y = count))) +
geom_bar(aes(fill = count), stat = "identity") +
scale_color_gradient(low="blue", high="green")+
xlab("sample") +
ylab("Number of proteins with non zero intensity") +
ggtitle("Number of proteins with intensity > 0") +
theme_classic() +
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
scale_fill_viridis_b()
ggsave(file= file.path("../img/proteomics/intensity.barplot.pdf"), p1, width=9, height=9, dpi=100)
ggsave(file= file.path("../img/proteomics/intensity.barplot.png"), p1, width=9, height=9, dpi=100)
for(cell in names(self$df_imputed)){
cols <- colnames(self$df_imputed[[cell]])[grepl("LFQ.",colnames(self$df_imputed[[cell]]))]
dz <- self$df_log_revised[apply(self$df_log_revised[,cols], 1,sum )>0, cols]
dw <- self$df_imputed[[cell]][,grepl("LFQ", colnames(self$df_imputed[[cell]]))]
dzm <- melt(dz)
dwm <- melt(dw)
dzm[["category"]] <- "PreImputation"
dwm[["category"]] <- "PostImputation"
dm <- rbind(dzm,dwm)
colnames(dm) <- c("condition","value", "category")
dm$condition <- gsub("LFQ.intensity.","", dm$condition)
pd <- ggplot(dm, aes(x = value, y = condition, color = category, point_color = category, fill = category)) +
geom_density_ridges(
jittered_points = TRUE, scale = .95, rel_min_height = .01,
point_shape = "|", point_size = 3, size = 0.25,
position = position_points_jitter(height = 0)
) +
scale_y_discrete(expand = c(0, 0)) +
scale_x_continuous(expand = c(0, 0), name = "log LFQ intensity") +
scale_fill_manual(values = c("#D55E0050", "#0072B250"), labels = c("Post Imputation", "Pre Imputation")) +
scale_color_manual(values = c("#D55E00", "#0072B2"), guide = "none") +
scale_discrete_manual("point_color", values = c("#D55E00", "#0072B2"), guide = "none") +
coord_cartesian(clip = "off") +
guides(fill = guide_legend(
override.aes = list(
fill = c("#D55E00A0", "#0072B2A0"),
color = NA, point_color = NA)
)
) +
ggtitle(paste("Density plot before and after imputation", cell)) +
theme_ridges(center = TRUE)+
theme(axis.text.y = element_text(size = 10))
ggsave(file= file.path("../img/proteomics/",paste0(cell,".density.imputation.pdf")), pd, width=8, height=9, dpi=100)
ggsave(file= file.path("../img/proteomics/",paste0(cell,".density.imputation.png")), pd, width=8, height=9, dpi=100)
}
for(cell in names(self$df_significant)){
dx <- self$df_significant[[cell]]
shade = data.frame(x1=c(-1, 1),
x2=c(-Inf, Inf),
y1=c(-log10(0.05), -log10(0.05)),
y2=c(Inf, Inf))
fold_cutoff = 1
pvalue_cutoff = 0.05
order <- c(Comparison[Comparison[,1]==strsplit(cell,"_")[[1]][1]&Comparison[,3]==strsplit(cell,"_")[[1]][2],2])
p <- ggplot(dx) +
theme_bw()+
geom_rect(data=shade,
mapping=aes(xmin=x1, xmax=x2, ymin=y1, ymax=y2), fill='grey95')+
geom_point(data = subset(dx, (Significant=="No")), aes(x =log2foldChange, y = -log10(p.adjust.value),colour=Significant), alpha = 0.5,size=2)+
geom_point(data = subset(dx, (Significant=="Yes")), aes(x =log2foldChange, y = -log10(p.adjust.value),colour=Significant), alpha = 0.8,size=3)+
geom_vline(xintercept = fold_cutoff, col = "blue")+
geom_vline(xintercept = -fold_cutoff, col = "blue")+
geom_hline(yintercept = -log10(pvalue_cutoff), col = "green")+
ggtitle(paste(cell, "(",order[[2]],"~",order[[1]],")"))+
scale_colour_manual(values=c("gray","red"))+
geom_text_repel(data = subset(dx, (Significant=="Yes")),
aes(x=log2foldChange, y=-log10(p.adjust.value),label=Gene.name),
segment.alpha =0.35,
size = 2.5 )
ggsave(file=paste0("../img/proteomics/",paste0(cell,"_", paste0(order, collapse=".")),"_volcanoplot.pdf"), p, width=11, height=9, dpi=200)
ggsave(file=paste0("../img/proteomics/",paste0(cell,"_", paste0(order, collapse=".")),"_volcanoplot.png"), p, width=11, height=9, dpi=200)
}
for(cell in names(self$df_significant_woI)){
dx <- self$df_significant_woI[[cell]]
shade = data.frame(x1=c(-1, 1),
x2=c(-Inf, Inf),
y1=c(-log10(0.05), -log10(0.05)),
y2=c(Inf, Inf))
fold_cutoff = 1
pvalue_cutoff = 0.05
order <- c(Comparison[Comparison[,1]==strsplit(cell,"_")[[1]][1]&Comparison[,3]==strsplit(cell,"_")[[1]][2],2])
p <- ggplot(dx) +
theme_bw()+
geom_rect(data=shade,
mapping=aes(xmin=x1, xmax=x2, ymin=y1, ymax=y2), fill='grey95')+
geom_point(data = subset(dx, (Significant=="No")), aes(x =log2foldChange, y = -log10(p.adjust.value),colour=Significant), alpha = 0.5,size=2)+
geom_point(data = subset(dx, (Significant=="Yes")), aes(x =log2foldChange, y = -log10(p.adjust.value),colour=Significant), alpha = 0.8,size=3)+
geom_vline(xintercept = fold_cutoff, col = "blue")+
geom_vline(xintercept = -fold_cutoff, col = "blue")+
geom_hline(yintercept = -log10(pvalue_cutoff), col = "green")+
ggtitle(paste(cell, "(",order[[2]],"~",order[[1]],")"))+
scale_colour_manual(values=c("gray","red"))+
#geom_text_repel(data = subset(dx, (Significant=="Yes")),
# aes(x=log2foldChange, y=-log10(p.adjust.value),label=Gene.name),
# segment.alpha =0.35,
# size = 2.5 )
ggsave(file=paste0("../img/proteomics/",paste0(cell,"_", paste0(order, collapse=".")),"_volcanoplotWOI.pdf"), p, width=11, height=9, dpi=200)
ggsave(file=paste0("../img/proteomics/",paste0(cell,"_", paste0(order, collapse=".")),"_volcanoplotWOI.png"), p, width=11, height=9, dpi=200)
}
png(file=paste0("../img/proteomics/pvalue.distribution.png"), width=800, height=900)
par(mfrow=c(4,2))
for(cell in names(self$df_significant)){
order <- c(Comparison[Comparison[,1]==strsplit(cell,"_")[[1]][1]&Comparison[,3]==strsplit(cell,"_")[[1]][2],2])
hist(self$df_significant_woI[[cell]]$p.value, breaks="FD", main=paste("Pre imputation", cell,paste0(order, collapse=".vs.")), xlab="")
hist(self$df_significant_woI[[cell]]$p.adjust.value, breaks="FD", add=T, col='red')
hist(self$df_significant[[cell]]$p.value, breaks="FD", main=paste("Post imputation",cell,paste0(order, collapse=".vs.")), xlab="")
hist(self$df_significant[[cell]]$p.adjust.value, breaks="FD", add=T, col='red')
legend("top", inset=.02, title="",
c("Pre Imputation","Post Imputation"), fill=c("grey","red"), horiz=TRUE, cex=1.5)
}
dev.off()
invisible(self)
})
TemplateProtein$set("public","enrichment", function(){
self$enrichment_table[["CC"]] <- list()
self$enrichment_table[["BP"]] <- list()
for(name in names(self$df_significant)){
print(name)
dp1 <- self$df_significant[[name]]
dp1$regulate <- ifelse((dp1$Significant=="Yes"&dp1$log2foldChange>= 1),1,ifelse((dp1$Significant=="Yes"&dp1$log2foldChange < -1),-1,0))
self$regulation_table[[name]] <-dp1
for(pro in c("CC","BP")){
geneID <- mapIds(org.Hs.eg.db, dp1$Gene.name[dp1$regulate==1], 'ENTREZID', 'SYMBOL')
gene.df <- bitr(geneID , fromType = "ENTREZID",
toType = c("ENSEMBL", "SYMBOL"),
OrgDb = org.Hs.eg.db)
ego <- enrichGO(gene = gene.df$ENTREZID,
# universe = gene.df$SYMBOL,
OrgDb = org.Hs.eg.db,
ont = pro,
pAdjustMethod = "BH",
pvalueCutoff = 0.01,
qvalueCutoff = 0.05)
egos <- clusterProfiler::simplify(ego, cutoff=0.7, by="p.adjust", select_fun=min)
if(dim(egos@result)[1]>1){
egoList <- egos@result%>% filter(qvalue<=0.05&p.adjust<=0.01)
egoList$GeneRatio <- round(unlist(lapply(egoList$GeneRatio, function(x) as.numeric(strsplit(x,"/")[[1]][1])/as.numeric(strsplit(x,"/")[[1]][2]))),2)
egoList$Symbol <- NA
for(i in 1:dim(egoList)[1]){
geneID <- strsplit(egoList$geneID[i],"/")[[1]]
symbols <- mapIds(org.Hs.eg.db, geneID, 'SYMBOL', 'ENTREZID')
egoList$Symbol[i] <- paste(unname(symbols), collapse="|")
}
self$enrichment_table[[pro]][[name]] <- egoList
qe <- ggplot(egoList, aes(x=Description, y=GeneRatio)) +
geom_bar(stat='identity', aes(fill=p.adjust)) +
labs(title=paste(""),
subtitle=ifelse(pro=="CC", paste("Cellular component",name),paste("Biological Process", name))) +
scale_fill_gradient(low="blue", high="red")+
coord_flip()+
theme_bw()+
theme(axis.text.x = element_text(angle = 90))
ggsave(file=paste0("../img/proteomics/",name,".Enrichment.",pro,".pdf"), qe, width=5+(dim(egoList)[1]/30), height=7+(dim(egoList)[1]/10), dpi=200)
ggsave(file=paste0("../img/proteomics/",name,".Enrichment.",pro,".png"), qe, width=5+(dim(egoList)[1]/30), height=7+(dim(egoList)[1]/10), dpi=200)
}
}
}
invisible(self)
})
TemplateProtein$set("public","writeFiles", function(){
if(file.exists("../table/significant_table.xlsx")){
file.remove("../table/significant_table.xlsx")}
names <- names(self$df_significant)
for(name in names){
xlsx::write.xlsx(as.data.frame(self$df_significant[[name]]), file="../table/significant_table.xlsx",sheetName=name, row.names=FALSE, append=TRUE)
}
if(file.exists("../table/significant_tableWOI.xlsx")){
file.remove("../table/significant_tableWOI.xlsx")}
for(name in names){
xlsx::write.xlsx(as.data.frame(self$df_significant_woI[[name]]), file="../table/significant_tableWOI.xlsx",sheetName=name, row.names=FALSE, append=TRUE)
}
if(file.exists("../table/enrichment_tableCC.xlsx")){
file.remove("../table/enrichment_tableCC.xlsx")}
names <- names(self$enrichment_table$CC)
for(name in names){
xlsx::write.xlsx(as.data.frame(self$enrichment_table$CC[[name]]), file="../table/enrichment_tableCC.xlsx",sheetName=name, row.names=FALSE, append=TRUE)
}
if(file.exists("../table/enrichment_tableBP.xlsx")){
file.remove("../table/enrichment_tableBP.xlsx")}
names <- names(self$enrichment_table$BP)
for(name in names){
xlsx::write.xlsx(as.data.frame(self$enrichment_table$BP[[name]]), file="../table/enrichment_tableBP.xlsx",sheetName=name, row.names=FALSE, append=TRUE)
}
if(file.exists("../table/regulation_table.xlsx")){
file.remove("../table/regulation_table.xlsx")}
names <- names(self$regulation_table)
for(name in names){
xlsx::write.xlsx(as.data.frame(self$regulation_table[[name]]), file="../table/regulation_table.xlsx",sheetName=name, row.names=FALSE, append=TRUE)
}
invisible(self)
})
TemplateProtein$set("public","drawScatterplot", function(){
sig <- c(self$df_significant$HEK293T_insol$Gene.name[self$df_significant$HEK293T_insol$Significant=="Yes"],
self$df_significant$HEK293T_wcl$Gene.name[self$df_significant$HEK293T_wcl$Significant=="Yes"])
dx <- merge(self$df_significant$HEK293T_insol[,c("Gene.name","log2foldChange")],
self$df_significant$HEK293T_wcl[,c("Gene.name","log2foldChange")], by="Gene.name", all=T)
colnames(dx) <- c("Gene.name","insol","wcl")
pS1 <- ggplot(dx, aes(x = insol, y = wcl)) +
geom_point(data =subset(dx, !Gene.name %in% sig) ,colour="gray", size=2)+
geom_point(data =subset(dx, Gene.name %in% sig) ,colour="red", size=3)+
geom_text_repel(data = subset(dx, Gene.name %in% sig),
aes(x = insol, y = wcl,label = Gene.name),
segment.alpha =0.35,
size = 2.5 ) +
ggtitle("HEK293T")
ggsave(file=paste0("../img/proteomics/Hek293T_scatterplot.pdf"), pS1, width=11, height=9, dpi=200)
ggsave(file=paste0("../img/proteomics/Hek293T_scatterplot.png"), pS1, width=11, height=9, dpi=200)
sig <- c(self$df_significant$U2OS_insol$Gene.name[self$df_significant$U2OS_insol$Significant=="Yes"],
self$df_significant$U2OS_wcl$Gene.name[self$df_significant$U2OS_wcl$Significant=="Yes"])
dx <- merge(self$df_significant$U2OS_insol[,c("Gene.name","log2foldChange")],
self$df_significant$U2OS_wcl[,c("Gene.name","log2foldChange")], by="Gene.name", all=T)
colnames(dx) <- c("Gene.name","insol","wcl")
pS2 <- ggplot(dx, aes(x = insol, y = wcl)) +
geom_point(data =subset(dx, !Gene.name %in% sig) ,colour="gray", size=2)+
geom_point(data =subset(dx, Gene.name %in% sig) ,colour="red", size=3)+
geom_text_repel(data = subset(dx, Gene.name %in% sig),
aes(x = insol, y = wcl,label = Gene.name),
segment.alpha =0.35,
size = 2.5 ) +
ggtitle("U2OS")
ggsave(file=paste0("../img/proteomics/U2OS_scatterplot.pdf"), pS2, width=11, height=9, dpi=200)
ggsave(file=paste0("../img/proteomics/U2OS_scatterplot.png"), pS2, width=11, height=9, dpi=200)
invisible(self)
}) |
\name{plotDim}
\alias{plotDim}
\title{ Determine the Plot Layout}
\description{
Determine the "optimal" layout for the plots of MCMB sequences based on the number of
parameters.}
\usage{
plotDim(p)
}
\arguments{
\item{p}{the number of parameters}
}
\value{
Dimensions for plot layout.
}
\keyword{hplot}
| /man/plotDim.Rd | no_license | cran/rqmcmb2 | R | false | false | 334 | rd | \name{plotDim}
\alias{plotDim}
\title{ Determine the Plot Layout}
\description{
Determine the "optimal" layout for the plots of MCMB sequences based on the number of
parameters.}
\usage{
plotDim(p)
}
\arguments{
\item{p}{the number of parameters}
}
\value{
Dimensions for plot layout.
}
\keyword{hplot}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R
\name{handle_user_prefix_param}
\alias{handle_user_prefix_param}
\title{handle_user_prefix_param}
\usage{
handle_user_prefix_param(stringvector)
}
\arguments{
\item{stringvector}{vector of strings with attribute names}
}
\value{
stringvector adjusted with required namespace (Linux)
}
\description{
handle_user_prefix_param
}
\keyword{internal}
| /man/handle_user_prefix_param.Rd | permissive | rayiik/xattrs | R | false | true | 430 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R
\name{handle_user_prefix_param}
\alias{handle_user_prefix_param}
\title{handle_user_prefix_param}
\usage{
handle_user_prefix_param(stringvector)
}
\arguments{
\item{stringvector}{vector of strings with attribute names}
}
\value{
stringvector adjusted with required namespace (Linux)
}
\description{
handle_user_prefix_param
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/round_robin.R
\name{rrmlg}
\alias{rrmlg}
\title{Round Robin Multilocus Genotypes}
\usage{
rrmlg(gid)
}
\arguments{
\item{gid}{a genind, genclone, or loci object.}
}
\value{
a matrix of multilocus genotype assignments by masked locus. There
will be n rows and m columns where n = number of samples and m = number of
loci.
}
\description{
This function will mask each locus one by one and then calculate multilocus
genotypes from the remaining loci in a round-robin fashion. This is used for
calculating the round robin allele frequencies for pgen and psex.
}
\examples{
# Find out the round-robin multilocus genotype assignments for P. ramorum
data(Pram)
pmlg_rr <- rrmlg(Pram)
head(pmlg_rr)
\dontrun{
# You can find out how many unique genotypes are found without each locus:
colSums(!apply(pmlg_rr, 2, duplicated))
}
}
\author{
Zhian N. Kamvar, Jonah Brooks, Stacy A. Krueger-Hadfield, Erik Sotka
}
\references{
Arnaud-Haond, S., Duarte, C. M., Alberto, F., & Serrão, E. A. 2007.
Standardizing methods to address clonality in population studies.
\emph{Molecular Ecology}, 16(24), 5115-5139.
Parks, J. C., & Werth, C. R. 1993. A study of spatial features of clones in a
population of bracken fern, \emph{Pteridium aquilinum} (Dennstaedtiaceae).
\emph{American Journal of Botany}, 537-544.
}
\seealso{
\code{\link{rraf}}, \code{\link{pgen}}, \code{\link{psex}}
}
| /man/rrmlg.Rd | no_license | knausb/poppr | R | false | true | 1,450 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/round_robin.R
\name{rrmlg}
\alias{rrmlg}
\title{Round Robin Multilocus Genotypes}
\usage{
rrmlg(gid)
}
\arguments{
\item{gid}{a genind, genclone, or loci object.}
}
\value{
a matrix of multilocus genotype assignments by masked locus. There
will be n rows and m columns where n = number of samples and m = number of
loci.
}
\description{
This function will mask each locus one by one and then calculate multilocus
genotypes from the remaining loci in a round-robin fashion. This is used for
calculating the round robin allele frequencies for pgen and psex.
}
\examples{
# Find out the round-robin multilocus genotype assignments for P. ramorum
data(Pram)
pmlg_rr <- rrmlg(Pram)
head(pmlg_rr)
\dontrun{
# You can find out how many unique genotypes are found without each locus:
colSums(!apply(pmlg_rr, 2, duplicated))
}
}
\author{
Zhian N. Kamvar, Jonah Brooks, Stacy A. Krueger-Hadfield, Erik Sotka
}
\references{
Arnaud-Haond, S., Duarte, C. M., Alberto, F., & Serrão, E. A. 2007.
Standardizing methods to address clonality in population studies.
\emph{Molecular Ecology}, 16(24), 5115-5139.
Parks, J. C., & Werth, C. R. 1993. A study of spatial features of clones in a
population of bracken fern, \emph{Pteridium aquilinum} (Dennstaedtiaceae).
\emph{American Journal of Botany}, 537-544.
}
\seealso{
\code{\link{rraf}}, \code{\link{pgen}}, \code{\link{psex}}
}
|
kdeModeBoot <- function(x,
reps = 100) { # Suggested by Adam
boot::boot(x, kdeMode, R = reps)$t0
} | /r_modules/trade_prevalidation/R/kdeModeBoot.R | no_license | mkao006/sws_r_api | R | false | false | 124 | r | kdeModeBoot <- function(x,
reps = 100) { # Suggested by Adam
boot::boot(x, kdeMode, R = reps)$t0
} |
##Rule6
##This player correctly matched all of the reasoning wheel
##sentences in the last case played.
##rsnwhl: 2 = attempt and correct; 1 = attempt and incorrect; 0 = no attempt
aw <- inData
r6set1 <- aw %>%
group_by(sid) %>%
select(sid, CaseOrder, rsnwhl) %>%
filter(CaseOrder==max(CaseOrder))
list(r6set1)
#str(aw)
#r3set2 <- aw %>%
# group_by(sid) %>%
# select(sid, CaseOrder, rsnwhl) %>%
# filter(CaseOrder==max(CaseOrder)) %>%
# summarise_each(funs(max), rsnwhl)
#r3set2 <- mutate(r3set2, rsnwhl1 = rsnwhl == 1)
#list(r3set2)
str(aw)
r6set2 <- aw %>%
group_by(sid) %>%
select(sid, CaseOrder, rsnwhl) %>%
filter(rsnwhl < 2 & CaseOrder==max(CaseOrder)) %>%
summarise_each(funs(max), rsnwhl)
r6set2 <- mutate(r6set2, rsnwhl1 = rsnwhl == 0)
list(r6set2)
r6set3 <- merge(r6set1, r6set2, by=c("sid"))
list(r6set3)
| /servers/lib/aeng/engines/r/games/AW/aw_job8_rule6.r | no_license | GlasslabGames/Assessment | R | false | false | 841 | r | ##Rule6
##This player correctly matched all of the reasoning wheel
##sentences in the last case played.
##rsnwhl: 2 = attempt and correct; 1 = attempt and incorrect; 0 = no attempt
aw <- inData
r6set1 <- aw %>%
group_by(sid) %>%
select(sid, CaseOrder, rsnwhl) %>%
filter(CaseOrder==max(CaseOrder))
list(r6set1)
#str(aw)
#r3set2 <- aw %>%
# group_by(sid) %>%
# select(sid, CaseOrder, rsnwhl) %>%
# filter(CaseOrder==max(CaseOrder)) %>%
# summarise_each(funs(max), rsnwhl)
#r3set2 <- mutate(r3set2, rsnwhl1 = rsnwhl == 1)
#list(r3set2)
str(aw)
r6set2 <- aw %>%
group_by(sid) %>%
select(sid, CaseOrder, rsnwhl) %>%
filter(rsnwhl < 2 & CaseOrder==max(CaseOrder)) %>%
summarise_each(funs(max), rsnwhl)
r6set2 <- mutate(r6set2, rsnwhl1 = rsnwhl == 0)
list(r6set2)
r6set3 <- merge(r6set1, r6set2, by=c("sid"))
list(r6set3)
|
if (!require("shiny")) {
install.packages("shiny", repos="http://cran.rstudio.com/")
library("shiny")
}
shinyUI(pageWithSidebar(
headerPanel( "GraphExplorer", "GraphExplorer"),
sidebarPanel(
HTML('<a target=_blank href="https://raw.githubusercontent.com/digistam/ShinyProject/master/GraphExplorer/testgraph.csv">click here to download a demo csv file</a>'),
p(),
fileInput('file1', 'Select csv file', accept=c('text/csv')),
#checkboxInput('header', 'Header', TRUE),
radioButtons('sep', 'CSV separator:',
c(Comma=',', Tab='\t', Semicolon=';' )
),
h4('Degree: '),
sliderInput('nodes', 'select minimum degree', 0, min =0, max = 5, step = 1),
tableOutput('degreeList')#,
#h4('Node connections: '),
#tableOutput('edgeList')
),
mainPanel(
tabsetPanel(
tabPanel("Introduction",
h4('How does it work ...'),
HTML('First, click <b>"Choose File"</b> in the left column.'),
HTML('Take great care of the <b>separator</b> which has been used in the csv file. '),
HTML('After loading the CSV file, you can select the <b>"Graph"</b> tab to see the Graph output, '),
HTML('or the "Details" tab to see the Adjacency matrix etc. '),
p(),
HTML('You can use the <b>"Degree slider"</b> (see left column) to select nodes with more or less edges.'),
p(),
HTML('<img src="graphs.jpg" />')
),
tabPanel("Graph",
plotOutput('newGraph', height = 600, width = 600),
#tableOutput('adjacency'),
tableOutput("contents")#,
#h2('Legenda: '),
#img(src="yellow.png", height = 25, width = 25),
#img(src="purple.png", height = 25, width = 25),
#img(src="lblue.png", height = 25, width = 25),
#img(src="blue.png", height = 25, width = 25),
#img(src="green.png", height = 25, width = 25),
#img(src="red.png", height = 25, width = 25),
#p('degrees: 6, 5, 4, 3, 2, 1')
),
tabPanel("Details",
p('Unique nodes in graph: '),
verbatimTextOutput("nodeCount"),
p('Unique edges in graph: '),
verbatimTextOutput("edgeCount"),
p('Graph density: '),
verbatimTextOutput("density"),
p('Adjacency matrix'),
p(),
tableOutput('adjacency'),
HTML('Please use the slider to change the minimum degree value')
)
)
)
)) | /GraphExplorer/ui.R | no_license | digistam/ShinyProject | R | false | false | 2,601 | r | if (!require("shiny")) {
install.packages("shiny", repos="http://cran.rstudio.com/")
library("shiny")
}
shinyUI(pageWithSidebar(
headerPanel( "GraphExplorer", "GraphExplorer"),
sidebarPanel(
HTML('<a target=_blank href="https://raw.githubusercontent.com/digistam/ShinyProject/master/GraphExplorer/testgraph.csv">click here to download a demo csv file</a>'),
p(),
fileInput('file1', 'Select csv file', accept=c('text/csv')),
#checkboxInput('header', 'Header', TRUE),
radioButtons('sep', 'CSV separator:',
c(Comma=',', Tab='\t', Semicolon=';' )
),
h4('Degree: '),
sliderInput('nodes', 'select minimum degree', 0, min =0, max = 5, step = 1),
tableOutput('degreeList')#,
#h4('Node connections: '),
#tableOutput('edgeList')
),
mainPanel(
tabsetPanel(
tabPanel("Introduction",
h4('How does it work ...'),
HTML('First, click <b>"Choose File"</b> in the left column.'),
HTML('Take great care of the <b>separator</b> which has been used in the csv file. '),
HTML('After loading the CSV file, you can select the <b>"Graph"</b> tab to see the Graph output, '),
HTML('or the "Details" tab to see the Adjacency matrix etc. '),
p(),
HTML('You can use the <b>"Degree slider"</b> (see left column) to select nodes with more or less edges.'),
p(),
HTML('<img src="graphs.jpg" />')
),
tabPanel("Graph",
plotOutput('newGraph', height = 600, width = 600),
#tableOutput('adjacency'),
tableOutput("contents")#,
#h2('Legenda: '),
#img(src="yellow.png", height = 25, width = 25),
#img(src="purple.png", height = 25, width = 25),
#img(src="lblue.png", height = 25, width = 25),
#img(src="blue.png", height = 25, width = 25),
#img(src="green.png", height = 25, width = 25),
#img(src="red.png", height = 25, width = 25),
#p('degrees: 6, 5, 4, 3, 2, 1')
),
tabPanel("Details",
p('Unique nodes in graph: '),
verbatimTextOutput("nodeCount"),
p('Unique edges in graph: '),
verbatimTextOutput("edgeCount"),
p('Graph density: '),
verbatimTextOutput("density"),
p('Adjacency matrix'),
p(),
tableOutput('adjacency'),
HTML('Please use the slider to change the minimum degree value')
)
)
)
)) |
library(metacoder)
library(testthat)
context("Input parsing")
test_that("Mothur classify.seqs *.taxonomy parsing", {
raw_data <-
"AY457915 Bacteria;Firmicutes;Clostridiales;Johnsonella_et_rel.;Johnsonella_et_rel.;Johnsonella_et_rel.;Eubacterium_eligens_et_rel.;Lachnospira_pectinoschiza;
AY457914 Bacteria;Firmicutes;Clostridiales;Johnsonella_et_rel.;Johnsonella_et_rel.;Johnsonella_et_rel.;Eubacterium_eligens_et_rel.;Eubacterium_eligens;Eubacterium_eligens;
AY457913 Bacteria;Firmicutes;Clostridiales;Johnsonella_et_rel.;Johnsonella_et_rel.;Roseoburia_et_rel.;Roseoburia_et_rel.;Eubacterium_ramulus_et_rel.;uncultured;
AY457912 Bacteria;Firmicutes;Clostridiales;Johnsonella_et_rel.;Johnsonella_et_rel.;
AY457911 Bacteria;Firmicutes;Clostridiales;Ruminococcus_et_rel.;Anaerofilum-Faecalibacterium;Faecalibacterium;Faecalibacterium_prausnitzii;
"
result <- parse_mothur_taxonomy(text = raw_data)
expect_equal(length(result$taxa), 18)
expect_equal(length(roots(result)), 1)
expect_true(all(c("Bacteria", "Firmicutes") %in% result$taxon_names()))
# Check that the input can be replicated
out_path <- "test_mothur_tax_output.txt"
write_mothur_taxonomy(result, file = out_path)
expect_equal(readLines(out_path), strsplit(raw_data, split = "\n")[[1]])
expect_error(write_mothur_taxonomy(result))
# Delete files used for tests
file.remove(out_path)
})
test_that("Mothur classify.seqs *.taxonomy parsing w/ scores", {
raw_data <-
"AY457915\tBacteria(100);Firmicutes(99);Clostridiales(99);Johnsonella_et_rel.(99);Johnsonella_et_rel.(99);Johnsonella_et_rel.(91);Eubacterium_eligens_et_rel.(89);Lachnospira_pectinoschiza(80);
AY457914\tBacteria(100);Firmicutes(100);Clostridiales(100);Johnsonella_et_rel.(100);Johnsonella_et_rel.(100);Johnsonella_et_rel.(95);Eubacterium_eligens_et_rel.(92);Eubacterium_eligens(84);Eubacterium_eligens(81);
AY457913\tBacteria(100);Firmicutes(100);Clostridiales(100);Johnsonella_et_rel.(100);Johnsonella_et_rel.(100);Roseoburia_et_rel.(97);Roseoburia_et_rel.(97);Eubacterium_ramulus_et_rel.(90);uncultured(90);
AY457912\tBacteria(100);Firmicutes(99);Clostridiales(99);Johnsonella_et_rel.(99);Johnsonella_et_rel.(99);
AY457911\tBacteria(100);Firmicutes(99);Clostridiales(98);Ruminococcus_et_rel.(96);Anaerofilum-Faecalibacterium(92);Faecalibacterium(92);Faecalibacterium_prausnitzii(90);
"
result <- parse_mothur_taxonomy(text = raw_data)
expect_equal(length(result$taxa), 18)
expect_equal(length(roots(result)), 1)
expect_true(all(c("Bacteria", "Firmicutes") %in% result$taxon_names()))
expect_equal(nrow(result$data$class_data), stringr::str_count(raw_data, ";"))
expect_true("score" %in% colnames(result$data$class_data))
# Check that the input can be replicated
out_path <- "test_mothur_tax_output.txt"
write_mothur_taxonomy(result, file = out_path)
expect_equal(readLines(out_path), strsplit(raw_data, split = "\n")[[1]])
expect_error(write_mothur_taxonomy(result))
# Delete files used for tests
file.remove(out_path)
})
test_that("Mothur classify.seqs *.tax.summary detailed parsing", {
raw_data <-
"taxlevel rankID taxon daughterlevels total A B C
0 0 Root 2 242 84 84 74
1 0.1 Bacteria 50 242 84 84 74
2 0.1.2 Actinobacteria 38 13 0 13 0
3 0.1.2.3 Actinomycetaceae-Bifidobacteriaceae 10 13 0 13 0
4 0.1.2.3.7 Bifidobacteriaceae 6 13 0 13 0
5 0.1.2.3.7.2 Bifidobacterium_choerinum_et_rel. 8 13 0 13 0
6 0.1.2.3.7.2.1 Bifidobacterium_angulatum_et_rel. 1 11 0 11 0
7 0.1.2.3.7.2.1.1 unclassified 1 11 0 11 0
8 0.1.2.3.7.2.1.1.1 unclassified 1 11 0 11 0
9 0.1.2.3.7.2.1.1.1.1 unclassified 1 11 0 11 0
10 0.1.2.3.7.2.1.1.1.1.1 unclassified 1 11 0 11 0
11 0.1.2.3.7.2.1.1.1.1.1.1 unclassified 1 11 0 11 0
12 0.1.2.3.7.2.1.1.1.1.1.1.1 unclassified 1 11 0 11 0
6 0.1.2.3.7.2.5 Bifidobacterium_longum_et_rel. 1 2 0 2 0
7 0.1.2.3.7.2.5.1 unclassified 1 2 0 2 0
8 0.1.2.3.7.2.5.1.1 unclassified 1 2 0 2 0
9 0.1.2.3.7.2.5.1.1.1 unclassified 1 2 0 2 0"
result <- parse_mothur_tax_summary(text = raw_data)
result_from_file <- parse_mothur_tax_summary(file = "example_data/mothur_summary.txt")
expect_equal(result, result_from_file)
expect_equal(length(result$taxa), 17)
expect_equal(length(roots(result)), 1)
expect_true(all(c("Bacteria", "Actinobacteria") %in% result$taxon_names()))
})
test_that("Mothur classify.seqs *.tax.summary simple parsing", {
raw_data <-
'taxon total A B C
"k__Bacteria";"p__Actinobacteria";"c__Actinobacteria";"o__Bifidobacteriales";"f__Bifidobacteriaceae";"g__Bifidobacterium";"s__"; 1 0 1 0
"k__Bacteria";"p__Actinobacteria";"c__Actinobacteria";"o__Bifidobacteriales";"f__Bifidobacteriaceae";"g__Bifidobacterium";"s__adolescentis"; 1 0 1 0
"k__Bacteria";"p__Actinobacteria";"c__Actinobacteria";"o__Bifidobacteriales";"f__Bifidobacteriaceae";"g__Bifidobacterium";"s__longum"; 1 0 1 0
'
result <- parse_mothur_tax_summary(text = raw_data)
expect_equal(length(result$taxa), 9)
expect_equal(length(roots(result)), 1)
expect_true(all(c("k__Bacteria", "p__Actinobacteria") %in% result$taxon_names()))
})
test_that("Newick parsing", {
expect_warning(result <- parse_newick("example_data/newick_example_1.txt"))
expect_equal(length(result$taxa), 21)
expect_equal(length(roots(result)), 2)
expect_true(all(c("node_1", "node_2") %in% result$taxon_names()))
})
test_that("Parsing the UNITE general release fasta", {
# Reading
seq_in_path <- "example_data/unite_general.fa"
result <- parse_unite_general(file = seq_in_path)
expect_equal(length(result$taxa), 183)
expect_equal(length(roots(result)), 1)
expect_equivalent(result$taxon_names()[result$data$tax_data$taxon_id[5]], "Orbilia_sp")
expect_equal(result$data$tax_data$organism[5], "Orbilia_sp")
expect_equal(result$data$tax_data$unite_seq[5], "CCAAATCATGTCTCCCGGCCGCAAGGCAGGTGCAGGCGTTTAACCCTTTGTGAACCAAAAAACCTTTCGCTTCGGCAGCAGCTCGGTTGGAGACAGCCTCTGTGTCAGCCTGCCGCTAGCACCAATTATCAAAACTTGCGGTTAGCAACATTGTCTGATTACCAAATTTTCGAATGAAAATCAAAACTTTCAACAACGGATCTCTTGGTTCCCGCATCGATGAAGAACGCAGCGAAACGCGATAGTTAATGTGAATTGCAGAATTCAGTGAATCATCGAGTCTTTGAACGCACATTGCGCCCATTGGTATTCCATTGGGCATGTCTGTTTGAGCGTCATTACAACCCTCGGTCACCACCGGTTTTGAGCGAGCAGGGTCTTCGGATCCAGCTGGCTTTAAAGTTGTAAGCTCTGCTGGCTGCTCGGCCCAACCAGAACATAGTAAAATCATGCTTGTTCAAGGTTCGCGGTCGAAGCGGTACGGCCTGAACAATACCTACCACCTCTTAGG")
# Check that the input can be replicated
seq_out_path <- "test_unite_output.fa"
write_unite_general(result, file = seq_out_path)
expect_equal(readLines(seq_out_path), readLines(seq_in_path))
expect_error(write_unite_general(result))
# Delete files used for tests
file.remove(seq_out_path)
})
test_that("Parsing the RDP fasta release", {
# Reading
seq_in_path <- "example_data/rdp_example.fa"
result <- parse_rdp(file = seq_in_path)
expect_equal(length(result$taxa), 26)
expect_equal(length(roots(result)), 1)
expect_equivalent(result$taxon_names()[result$data$tax_data$taxon_id[3]], "Saccharomyces")
expect_equal(result$data$tax_data$rdp_id[3], "S004468774")
expect_true(startsWith(result$data$tax_data$rdp_seq[3], "gtttgacctcaaatcaggtaggagtacccgctgaacttaagcatatcaataagcggaggaaaagaaaccaaccgggattg"))
# Check that the input can be replicated
seq_out_path <- "test_rdp_output.fa"
write_rdp(result, file = seq_out_path)
expect_equal(readLines(seq_out_path), readLines(seq_in_path))
expect_error(write_greengenes(result))
# Delete files used for tests
file.remove(seq_out_path)
})
test_that("Parsing the SILVA fasta release", {
# Reading
seq_in_path <- "example_data/silva_example.fa"
result <- parse_silva_fasta(file = seq_in_path)
expect_equal(length(result$taxa), 164)
expect_equal(length(roots(result)), 2)
expect_equivalent(result$taxon_names()[result$data$tax_data$taxon_id[5]], "Physalis peruviana")
expect_equal(result$data$tax_data$ncbi_id[5], "GEET01005309")
expect_true(startsWith(result$data$tax_data$silva_seq[5], "GAUGGAUGCCUUGGCUUCAUCAGGCGAAGAAGGACGCAGCAAGCUGCGAUAAGCUUCGGGGAGCGGCACGCACGCUUUGA"))
# Check that the input can be replicated
seq_out_path <- "test_rdp_output.fa"
write_silva_fasta(result, file = seq_out_path)
# expect_equal(readLines(seq_out_path)[c(-89, -2580)],
# readLines(seq_in_path)[c(-89, -2580)])
expect_error(write_greengenes(result))
# Delete files used for tests
file.remove(seq_out_path)
})
test_that("Parsing/writing the greengenes database", {
# Reading
tax_in_path <- "example_data/gg_tax_example.txt"
seq_in_path <- "example_data/gg_seq_example.fa"
result <- parse_greengenes(tax_file = tax_in_path, seq_file = seq_in_path)
expect_equal(length(result$taxa), 119)
expect_equal(length(roots(result)), 1)
expect_equivalent(result$taxon_names()[result$data$tax_data$taxon_id[5]], "Rhodobacteraceae")
expect_equal(result$data$tax_data$gg_id[5], "1111758")
expect_true(startsWith(result$data$tax_data$gg_seq[5], "TTAGAGTTTGATCCTGGCTCAGAACGAACGCTGGCGGCAGGCCTAACACATGCAAGTCGAGCGCGCCCTTCGGGGTGAGCGGCGGACGGGTGAGTAACGCGTGGGAACGTGCCCTCTTCTGCGGGATAGCC"))
# Check that the input can be replicated
tax_out_path <- "test_gg_output.txt"
seq_out_path <- "test_gg_output.fa"
write_greengenes(result, tax_file = tax_out_path, seq_file = seq_out_path)
expect_equal(readLines(tax_out_path), readLines(tax_in_path))
expect_equal(readLines(seq_out_path), readLines(seq_in_path))
expect_error(write_greengenes(result))
# Delete files used for tests
file.remove(tax_out_path)
file.remove(seq_out_path)
})
test_that("Converting to phyloseq", {
# test round-trip
library(phyloseq)
data(enterotype)
expect_warning(x <- parse_phyloseq(enterotype))
y <- as_phyloseq(x)
expect_equivalent(enterotype, y)
}) | /tests/testthat/test--parsers_and_writers.R | permissive | agronomist/metacoder | R | false | false | 9,696 | r | library(metacoder)
library(testthat)
context("Input parsing")
test_that("Mothur classify.seqs *.taxonomy parsing", {
raw_data <-
"AY457915 Bacteria;Firmicutes;Clostridiales;Johnsonella_et_rel.;Johnsonella_et_rel.;Johnsonella_et_rel.;Eubacterium_eligens_et_rel.;Lachnospira_pectinoschiza;
AY457914 Bacteria;Firmicutes;Clostridiales;Johnsonella_et_rel.;Johnsonella_et_rel.;Johnsonella_et_rel.;Eubacterium_eligens_et_rel.;Eubacterium_eligens;Eubacterium_eligens;
AY457913 Bacteria;Firmicutes;Clostridiales;Johnsonella_et_rel.;Johnsonella_et_rel.;Roseoburia_et_rel.;Roseoburia_et_rel.;Eubacterium_ramulus_et_rel.;uncultured;
AY457912 Bacteria;Firmicutes;Clostridiales;Johnsonella_et_rel.;Johnsonella_et_rel.;
AY457911 Bacteria;Firmicutes;Clostridiales;Ruminococcus_et_rel.;Anaerofilum-Faecalibacterium;Faecalibacterium;Faecalibacterium_prausnitzii;
"
result <- parse_mothur_taxonomy(text = raw_data)
expect_equal(length(result$taxa), 18)
expect_equal(length(roots(result)), 1)
expect_true(all(c("Bacteria", "Firmicutes") %in% result$taxon_names()))
# Check that the input can be replicated
out_path <- "test_mothur_tax_output.txt"
write_mothur_taxonomy(result, file = out_path)
expect_equal(readLines(out_path), strsplit(raw_data, split = "\n")[[1]])
expect_error(write_mothur_taxonomy(result))
# Delete files used for tests
file.remove(out_path)
})
test_that("Mothur classify.seqs *.taxonomy parsing w/ scores", {
raw_data <-
"AY457915\tBacteria(100);Firmicutes(99);Clostridiales(99);Johnsonella_et_rel.(99);Johnsonella_et_rel.(99);Johnsonella_et_rel.(91);Eubacterium_eligens_et_rel.(89);Lachnospira_pectinoschiza(80);
AY457914\tBacteria(100);Firmicutes(100);Clostridiales(100);Johnsonella_et_rel.(100);Johnsonella_et_rel.(100);Johnsonella_et_rel.(95);Eubacterium_eligens_et_rel.(92);Eubacterium_eligens(84);Eubacterium_eligens(81);
AY457913\tBacteria(100);Firmicutes(100);Clostridiales(100);Johnsonella_et_rel.(100);Johnsonella_et_rel.(100);Roseoburia_et_rel.(97);Roseoburia_et_rel.(97);Eubacterium_ramulus_et_rel.(90);uncultured(90);
AY457912\tBacteria(100);Firmicutes(99);Clostridiales(99);Johnsonella_et_rel.(99);Johnsonella_et_rel.(99);
AY457911\tBacteria(100);Firmicutes(99);Clostridiales(98);Ruminococcus_et_rel.(96);Anaerofilum-Faecalibacterium(92);Faecalibacterium(92);Faecalibacterium_prausnitzii(90);
"
result <- parse_mothur_taxonomy(text = raw_data)
expect_equal(length(result$taxa), 18)
expect_equal(length(roots(result)), 1)
expect_true(all(c("Bacteria", "Firmicutes") %in% result$taxon_names()))
expect_equal(nrow(result$data$class_data), stringr::str_count(raw_data, ";"))
expect_true("score" %in% colnames(result$data$class_data))
# Check that the input can be replicated
out_path <- "test_mothur_tax_output.txt"
write_mothur_taxonomy(result, file = out_path)
expect_equal(readLines(out_path), strsplit(raw_data, split = "\n")[[1]])
expect_error(write_mothur_taxonomy(result))
# Delete files used for tests
file.remove(out_path)
})
test_that("Mothur classify.seqs *.tax.summary detailed parsing", {
raw_data <-
"taxlevel rankID taxon daughterlevels total A B C
0 0 Root 2 242 84 84 74
1 0.1 Bacteria 50 242 84 84 74
2 0.1.2 Actinobacteria 38 13 0 13 0
3 0.1.2.3 Actinomycetaceae-Bifidobacteriaceae 10 13 0 13 0
4 0.1.2.3.7 Bifidobacteriaceae 6 13 0 13 0
5 0.1.2.3.7.2 Bifidobacterium_choerinum_et_rel. 8 13 0 13 0
6 0.1.2.3.7.2.1 Bifidobacterium_angulatum_et_rel. 1 11 0 11 0
7 0.1.2.3.7.2.1.1 unclassified 1 11 0 11 0
8 0.1.2.3.7.2.1.1.1 unclassified 1 11 0 11 0
9 0.1.2.3.7.2.1.1.1.1 unclassified 1 11 0 11 0
10 0.1.2.3.7.2.1.1.1.1.1 unclassified 1 11 0 11 0
11 0.1.2.3.7.2.1.1.1.1.1.1 unclassified 1 11 0 11 0
12 0.1.2.3.7.2.1.1.1.1.1.1.1 unclassified 1 11 0 11 0
6 0.1.2.3.7.2.5 Bifidobacterium_longum_et_rel. 1 2 0 2 0
7 0.1.2.3.7.2.5.1 unclassified 1 2 0 2 0
8 0.1.2.3.7.2.5.1.1 unclassified 1 2 0 2 0
9 0.1.2.3.7.2.5.1.1.1 unclassified 1 2 0 2 0"
result <- parse_mothur_tax_summary(text = raw_data)
result_from_file <- parse_mothur_tax_summary(file = "example_data/mothur_summary.txt")
expect_equal(result, result_from_file)
expect_equal(length(result$taxa), 17)
expect_equal(length(roots(result)), 1)
expect_true(all(c("Bacteria", "Actinobacteria") %in% result$taxon_names()))
})
test_that("Mothur classify.seqs *.tax.summary simple parsing", {
raw_data <-
'taxon total A B C
"k__Bacteria";"p__Actinobacteria";"c__Actinobacteria";"o__Bifidobacteriales";"f__Bifidobacteriaceae";"g__Bifidobacterium";"s__"; 1 0 1 0
"k__Bacteria";"p__Actinobacteria";"c__Actinobacteria";"o__Bifidobacteriales";"f__Bifidobacteriaceae";"g__Bifidobacterium";"s__adolescentis"; 1 0 1 0
"k__Bacteria";"p__Actinobacteria";"c__Actinobacteria";"o__Bifidobacteriales";"f__Bifidobacteriaceae";"g__Bifidobacterium";"s__longum"; 1 0 1 0
'
result <- parse_mothur_tax_summary(text = raw_data)
expect_equal(length(result$taxa), 9)
expect_equal(length(roots(result)), 1)
expect_true(all(c("k__Bacteria", "p__Actinobacteria") %in% result$taxon_names()))
})
test_that("Newick parsing", {
expect_warning(result <- parse_newick("example_data/newick_example_1.txt"))
expect_equal(length(result$taxa), 21)
expect_equal(length(roots(result)), 2)
expect_true(all(c("node_1", "node_2") %in% result$taxon_names()))
})
test_that("Parsing the UNITE general release fasta", {
# Reading
seq_in_path <- "example_data/unite_general.fa"
result <- parse_unite_general(file = seq_in_path)
expect_equal(length(result$taxa), 183)
expect_equal(length(roots(result)), 1)
expect_equivalent(result$taxon_names()[result$data$tax_data$taxon_id[5]], "Orbilia_sp")
expect_equal(result$data$tax_data$organism[5], "Orbilia_sp")
expect_equal(result$data$tax_data$unite_seq[5], "CCAAATCATGTCTCCCGGCCGCAAGGCAGGTGCAGGCGTTTAACCCTTTGTGAACCAAAAAACCTTTCGCTTCGGCAGCAGCTCGGTTGGAGACAGCCTCTGTGTCAGCCTGCCGCTAGCACCAATTATCAAAACTTGCGGTTAGCAACATTGTCTGATTACCAAATTTTCGAATGAAAATCAAAACTTTCAACAACGGATCTCTTGGTTCCCGCATCGATGAAGAACGCAGCGAAACGCGATAGTTAATGTGAATTGCAGAATTCAGTGAATCATCGAGTCTTTGAACGCACATTGCGCCCATTGGTATTCCATTGGGCATGTCTGTTTGAGCGTCATTACAACCCTCGGTCACCACCGGTTTTGAGCGAGCAGGGTCTTCGGATCCAGCTGGCTTTAAAGTTGTAAGCTCTGCTGGCTGCTCGGCCCAACCAGAACATAGTAAAATCATGCTTGTTCAAGGTTCGCGGTCGAAGCGGTACGGCCTGAACAATACCTACCACCTCTTAGG")
# Check that the input can be replicated
seq_out_path <- "test_unite_output.fa"
write_unite_general(result, file = seq_out_path)
expect_equal(readLines(seq_out_path), readLines(seq_in_path))
expect_error(write_unite_general(result))
# Delete files used for tests
file.remove(seq_out_path)
})
test_that("Parsing the RDP fasta release", {
# Reading
seq_in_path <- "example_data/rdp_example.fa"
result <- parse_rdp(file = seq_in_path)
expect_equal(length(result$taxa), 26)
expect_equal(length(roots(result)), 1)
expect_equivalent(result$taxon_names()[result$data$tax_data$taxon_id[3]], "Saccharomyces")
expect_equal(result$data$tax_data$rdp_id[3], "S004468774")
expect_true(startsWith(result$data$tax_data$rdp_seq[3], "gtttgacctcaaatcaggtaggagtacccgctgaacttaagcatatcaataagcggaggaaaagaaaccaaccgggattg"))
# Check that the input can be replicated
seq_out_path <- "test_rdp_output.fa"
write_rdp(result, file = seq_out_path)
expect_equal(readLines(seq_out_path), readLines(seq_in_path))
expect_error(write_greengenes(result))
# Delete files used for tests
file.remove(seq_out_path)
})
test_that("Parsing the SILVA fasta release", {
# Reading
seq_in_path <- "example_data/silva_example.fa"
result <- parse_silva_fasta(file = seq_in_path)
expect_equal(length(result$taxa), 164)
expect_equal(length(roots(result)), 2)
expect_equivalent(result$taxon_names()[result$data$tax_data$taxon_id[5]], "Physalis peruviana")
expect_equal(result$data$tax_data$ncbi_id[5], "GEET01005309")
expect_true(startsWith(result$data$tax_data$silva_seq[5], "GAUGGAUGCCUUGGCUUCAUCAGGCGAAGAAGGACGCAGCAAGCUGCGAUAAGCUUCGGGGAGCGGCACGCACGCUUUGA"))
# Check that the input can be replicated
seq_out_path <- "test_rdp_output.fa"
write_silva_fasta(result, file = seq_out_path)
# expect_equal(readLines(seq_out_path)[c(-89, -2580)],
# readLines(seq_in_path)[c(-89, -2580)])
expect_error(write_greengenes(result))
# Delete files used for tests
file.remove(seq_out_path)
})
test_that("Parsing/writing the greengenes database", {
# Reading
tax_in_path <- "example_data/gg_tax_example.txt"
seq_in_path <- "example_data/gg_seq_example.fa"
result <- parse_greengenes(tax_file = tax_in_path, seq_file = seq_in_path)
expect_equal(length(result$taxa), 119)
expect_equal(length(roots(result)), 1)
expect_equivalent(result$taxon_names()[result$data$tax_data$taxon_id[5]], "Rhodobacteraceae")
expect_equal(result$data$tax_data$gg_id[5], "1111758")
expect_true(startsWith(result$data$tax_data$gg_seq[5], "TTAGAGTTTGATCCTGGCTCAGAACGAACGCTGGCGGCAGGCCTAACACATGCAAGTCGAGCGCGCCCTTCGGGGTGAGCGGCGGACGGGTGAGTAACGCGTGGGAACGTGCCCTCTTCTGCGGGATAGCC"))
# Check that the input can be replicated
tax_out_path <- "test_gg_output.txt"
seq_out_path <- "test_gg_output.fa"
write_greengenes(result, tax_file = tax_out_path, seq_file = seq_out_path)
expect_equal(readLines(tax_out_path), readLines(tax_in_path))
expect_equal(readLines(seq_out_path), readLines(seq_in_path))
expect_error(write_greengenes(result))
# Delete files used for tests
file.remove(tax_out_path)
file.remove(seq_out_path)
})
test_that("Converting to phyloseq", {
# test round-trip
library(phyloseq)
data(enterotype)
expect_warning(x <- parse_phyloseq(enterotype))
y <- as_phyloseq(x)
expect_equivalent(enterotype, y)
}) |
# Set working directory to the folder which contains results of
# computational approaches on SBS1-SBS5-correlated data sets
# before running this script.
#
# PATH <- paste0("<path_to_results_on_SBS1-SBS5-correlated_datasets>")
#
# setwd(PATH)
topLevelFolder4Data <- "./0.Input_datasets"
topLevelFolder4Run <- "./2a.Full_output_K_unspecified"
## Load required packages
library(ICAMS)
library(SynSigRun)
library(hdp)
## Specify slopes and Rsqs for the datasets
slopes <- c(0.1,0.5,1,2,10)
Rsqs <- c(0.1,0.2,0.3,0.6)
datasetNames <- c()
for(slope in slopes)
for(Rsq in Rsqs)
datasetNames <- c(datasetNames, paste0("S.",slope,".Rsq.",Rsq))
## Specify 20 seeds used in software running
seedsInUse <- c(1, 691, 1999, 3511, 8009,
9902, 10163, 10509, 14476, 20897,
27847, 34637, 49081, 75679, 103333,
145879, 200437, 310111, 528401, 1076753)
for(seedInUse in seedsInUse){
for(datasetName in datasetNames){
out.dir <- paste0(topLevelFolder4Run,"/hdp.results/",datasetName,"/seed.",seedInUse)
if(file.exists(paste0(out.dir,"/inferred.exposures.csv"))) next
message("\n\n########################################################\n\n")
message(paste0("Begin running catalog with K.guess = 10",datasetName," using seed ",seedInUse,"...\n"))
message("\n\n########################################################\n\n")
Runhdp(
input.catalog = paste0(topLevelFolder4Data,"/",datasetName,"/ground.truth.syn.catalog.csv"),
out.dir = out.dir,
CPU.cores = 4,
K.guess = 10,
multi.types = FALSE,
remove.noise = TRUE,
seedNumber = seedInUse,
post.burnin = 20000,
post.n = 1000,
post.space = 50,
overwrite = T)
}
}
| /data-raw/Wu_2022/1_scripts.for.SBS1SBS5/2_running_approaches_without_knowing_K/Run.hdp.R | no_license | WuyangFF95/SynSigRun | R | false | false | 1,777 | r |
# Set working directory to the folder which contains results of
# computational approaches on SBS1-SBS5-correlated data sets
# before running this script.
#
# PATH <- paste0("<path_to_results_on_SBS1-SBS5-correlated_datasets>")
#
# setwd(PATH)
topLevelFolder4Data <- "./0.Input_datasets"
topLevelFolder4Run <- "./2a.Full_output_K_unspecified"
## Load required packages
library(ICAMS)
library(SynSigRun)
library(hdp)
## Specify slopes and Rsqs for the datasets
slopes <- c(0.1,0.5,1,2,10)
Rsqs <- c(0.1,0.2,0.3,0.6)
datasetNames <- c()
for(slope in slopes)
for(Rsq in Rsqs)
datasetNames <- c(datasetNames, paste0("S.",slope,".Rsq.",Rsq))
## Specify 20 seeds used in software running
seedsInUse <- c(1, 691, 1999, 3511, 8009,
9902, 10163, 10509, 14476, 20897,
27847, 34637, 49081, 75679, 103333,
145879, 200437, 310111, 528401, 1076753)
for(seedInUse in seedsInUse){
for(datasetName in datasetNames){
out.dir <- paste0(topLevelFolder4Run,"/hdp.results/",datasetName,"/seed.",seedInUse)
if(file.exists(paste0(out.dir,"/inferred.exposures.csv"))) next
message("\n\n########################################################\n\n")
message(paste0("Begin running catalog with K.guess = 10",datasetName," using seed ",seedInUse,"...\n"))
message("\n\n########################################################\n\n")
Runhdp(
input.catalog = paste0(topLevelFolder4Data,"/",datasetName,"/ground.truth.syn.catalog.csv"),
out.dir = out.dir,
CPU.cores = 4,
K.guess = 10,
multi.types = FALSE,
remove.noise = TRUE,
seedNumber = seedInUse,
post.burnin = 20000,
post.n = 1000,
post.space = 50,
overwrite = T)
}
}
|
dfplsr_cov <- function(
X, Y, ncomp, algo = NULL,
maxlv = 50,
B = 30, seed = NULL,
print = TRUE,
...
) {
if(is.null(algo))
algo <- pls_kernel
X <- .matrix(X)
zdim <- dim(X)
n <- zdim[1]
p <- zdim[2]
ncomp <- min(ncomp, n, p)
maxlv <- min(maxlv, n, p)
#fm <- plsr(X, Y, X, Y, ncomp = ncomp, algo = algo, ...)
#z <- mse(fm, ~ ncomp, digits = 25)
#ssr <- z$nbpred * z$msep
## Same as
## fm <- algo(X, Y, ncomp = ncomp, ...)
## ssr <- numeric()
## for(a in seq_len(ncomp))
## ssr[a] <- sum(.resid.pls(fm, Y, ncomp = a)$r^2)
## End
## COmputation of mu and s2 for the parametric bootstrap
## ==> estimated from a low biased model
## ----------- s2
## Below s2 is not an unbiased estimate of sigma2 for the model
## (this unbiased estimate would need to know df, which is actually unknown)
## This is not important here, since the amount put in
## the simulated variations is counter-balanced by the covariances.
## Efron 2004 p. 620 is not clear how he calculates s2
## "obtained from residuals of some 'big' model presumed
## to have negligible bias"
k <- maxlv
fm <- plsr(X, Y, X, Y, ncomp = k, algo = algo, ...)
z <- mse(fm, ~ ncomp, digits = 25)
ssr <- z$nbpred * z$msep
s2 <- ssr[k + 1] / (n - k - 1)
## ----------- mu
## In Efron 2004, mu is estimated for each number of LV
## This is a simplification here: mu is computed only one time
## from a low-biased model
#k <- min(20, maxlv)
mu <- .resid.pls(fm, Y, ncomp = k)$fit
## End
zY <- matrix(rep(mu, B), nrow = n, ncol = B, byrow = FALSE)
set.seed(seed = seed)
zE <- matrix(rnorm(n * B, sd = s2^.5), nrow = n, ncol = B)
set.seed(seed = NULL)
zY <- zY + zE
Fit <- array(dim = c(n, B, ncomp))
for(j in seq_len(B)) {
if(print)
cat(j, " ")
z <- plsr(X, zY[, j], X, zY[, j], ncomp = ncomp, algo = algo, ...)$fit
zfit <- z[z$ncomp >= 1, ]
Fit[, j, ] <- zfit[, ncol(zfit)]
}
if(print)
cat("\n\n")
Cov <- matrix(nrow = n, ncol = ncomp)
for(a in seq_len(ncomp))
for(i in seq_len(n))
Cov[i, a] <- cov(zY[i, ], Fit[i, , a])
cov <- colSums(Cov)
df <- c(1, cov / s2)
list(df = df, cov = cov)
}
| /R/dfplsr_cov.R | no_license | mlesnoff/rnirs | R | false | false | 2,447 | r | dfplsr_cov <- function(
X, Y, ncomp, algo = NULL,
maxlv = 50,
B = 30, seed = NULL,
print = TRUE,
...
) {
if(is.null(algo))
algo <- pls_kernel
X <- .matrix(X)
zdim <- dim(X)
n <- zdim[1]
p <- zdim[2]
ncomp <- min(ncomp, n, p)
maxlv <- min(maxlv, n, p)
#fm <- plsr(X, Y, X, Y, ncomp = ncomp, algo = algo, ...)
#z <- mse(fm, ~ ncomp, digits = 25)
#ssr <- z$nbpred * z$msep
## Same as
## fm <- algo(X, Y, ncomp = ncomp, ...)
## ssr <- numeric()
## for(a in seq_len(ncomp))
## ssr[a] <- sum(.resid.pls(fm, Y, ncomp = a)$r^2)
## End
## COmputation of mu and s2 for the parametric bootstrap
## ==> estimated from a low biased model
## ----------- s2
## Below s2 is not an unbiased estimate of sigma2 for the model
## (this unbiased estimate would need to know df, which is actually unknown)
## This is not important here, since the amount put in
## the simulated variations is counter-balanced by the covariances.
## Efron 2004 p. 620 is not clear how he calculates s2
## "obtained from residuals of some 'big' model presumed
## to have negligible bias"
k <- maxlv
fm <- plsr(X, Y, X, Y, ncomp = k, algo = algo, ...)
z <- mse(fm, ~ ncomp, digits = 25)
ssr <- z$nbpred * z$msep
s2 <- ssr[k + 1] / (n - k - 1)
## ----------- mu
## In Efron 2004, mu is estimated for each number of LV
## This is a simplification here: mu is computed only one time
## from a low-biased model
#k <- min(20, maxlv)
mu <- .resid.pls(fm, Y, ncomp = k)$fit
## End
zY <- matrix(rep(mu, B), nrow = n, ncol = B, byrow = FALSE)
set.seed(seed = seed)
zE <- matrix(rnorm(n * B, sd = s2^.5), nrow = n, ncol = B)
set.seed(seed = NULL)
zY <- zY + zE
Fit <- array(dim = c(n, B, ncomp))
for(j in seq_len(B)) {
if(print)
cat(j, " ")
z <- plsr(X, zY[, j], X, zY[, j], ncomp = ncomp, algo = algo, ...)$fit
zfit <- z[z$ncomp >= 1, ]
Fit[, j, ] <- zfit[, ncol(zfit)]
}
if(print)
cat("\n\n")
Cov <- matrix(nrow = n, ncol = ncomp)
for(a in seq_len(ncomp))
for(i in seq_len(n))
Cov[i, a] <- cov(zY[i, ], Fit[i, , a])
cov <- colSums(Cov)
df <- c(1, cov / s2)
list(df = df, cov = cov)
}
|
\name{xmp15.06}
\alias{xmp15.06}
\docType{data}
\title{data from Example 15.6}
\description{
The \code{xmp15.06} data frame has 28 rows and 1 columns.
}
\format{
This data frame contains the following columns:
\describe{
\item{metabolc}{
a numeric vector
}
}
}
\source{
Devore, J. L. (2003) \emph{Probability and Statistics for Engineering and the Sciences (6th ed)}, Duxbury
}
\examples{
str(xmp15.06)
}
\keyword{datasets}
| /man/xmp15.06.Rd | no_license | dmbates/Devore6 | R | false | false | 448 | rd | \name{xmp15.06}
\alias{xmp15.06}
\docType{data}
\title{data from Example 15.6}
\description{
The \code{xmp15.06} data frame has 28 rows and 1 columns.
}
\format{
This data frame contains the following columns:
\describe{
\item{metabolc}{
a numeric vector
}
}
}
\source{
Devore, J. L. (2003) \emph{Probability and Statistics for Engineering and the Sciences (6th ed)}, Duxbury
}
\examples{
str(xmp15.06)
}
\keyword{datasets}
|
# This function calculate AIC
#' AIC
#'
#' @param covmat covariance matrix of innovations
#' @param p order of autoregression
#' @param m dimension of the variable
#' @param n number of observations
#'
#' @return
#' @export
#'
#' @examples
SIC <- function(covmat, p, m, n) {
out <-n*(log(det(covmat))+m)+p*m^2*log(n)
return(out)
}
| /R/SIC.R | no_license | ruizhima/AICC | R | false | false | 339 | r | # This function calculate AIC
#' AIC
#'
#' @param covmat covariance matrix of innovations
#' @param p order of autoregression
#' @param m dimension of the variable
#' @param n number of observations
#'
#' @return
#' @export
#'
#' @examples
SIC <- function(covmat, p, m, n) {
out <-n*(log(det(covmat))+m)+p*m^2*log(n)
return(out)
}
|
body = '
#---------------------------------------\
# Log file |
#---------------------------------------/
writeLines(c(""), "log.txt")
wlog("Starting simulations")
wlog("Cluster intialized successfully!")
#---------------------------------------|
#---------------------------------------\
# Global libraries |
#---------------------------------------/
packages = c("base")
sapply(packages, function(l){library(l, character.only = TRUE)})
#---------------------------------------|
#---------------------------------------\
# Parameters |
#---------------------------------------/
set.seed(0)
R = 3 # runs
methods = c("mean", "median", "sd")
N = c(20, 25, 30, 100)
#---------------------------------------|
#---------------------------------------\
# Generate data |
#---------------------------------------/
wlog("Generating data...")
generate_data(R, N) #This
#---------------------------------------|
#---------------------------------------\
# Parallel loop |
#---------------------------------------/
wlog("Entering foreach loop")
results = foreach(run = 1:R, .packages = packages, .combine = "rbind")%:%
foreach(n = N, .packages = packages, .combine = "rbind")%:%
foreach(method = methods, .packages=packages, .combine = "rbind")%dopar%{
# Load data
load(paste0("data/", run, "_", n,".RData"))
out = compute(x, method)
wlog("method: ", method, " run: ", run, " output: ", out)
data.frame(method = method,
run = run,
N = n,
output = out)
}
wlog("Simulations finished!")
#---------------------------------------|
'
| /R/body.R | no_license | jlaria/p4R | R | false | false | 1,730 | r | body = '
#---------------------------------------\
# Log file |
#---------------------------------------/
writeLines(c(""), "log.txt")
wlog("Starting simulations")
wlog("Cluster intialized successfully!")
#---------------------------------------|
#---------------------------------------\
# Global libraries |
#---------------------------------------/
packages = c("base")
sapply(packages, function(l){library(l, character.only = TRUE)})
#---------------------------------------|
#---------------------------------------\
# Parameters |
#---------------------------------------/
set.seed(0)
R = 3 # runs
methods = c("mean", "median", "sd")
N = c(20, 25, 30, 100)
#---------------------------------------|
#---------------------------------------\
# Generate data |
#---------------------------------------/
wlog("Generating data...")
generate_data(R, N) #This
#---------------------------------------|
#---------------------------------------\
# Parallel loop |
#---------------------------------------/
wlog("Entering foreach loop")
results = foreach(run = 1:R, .packages = packages, .combine = "rbind")%:%
foreach(n = N, .packages = packages, .combine = "rbind")%:%
foreach(method = methods, .packages=packages, .combine = "rbind")%dopar%{
# Load data
load(paste0("data/", run, "_", n,".RData"))
out = compute(x, method)
wlog("method: ", method, " run: ", run, " output: ", out)
data.frame(method = method,
run = run,
N = n,
output = out)
}
wlog("Simulations finished!")
#---------------------------------------|
'
|
#' Get a dataset with flags indicating if patient has a diagnosis
#'
#' This function provides a dataframe with flags indicating if a patient has a diagnosis or not based on icd9, icd10 and snomed codes.
#' The flags are 1 whichindicate patient had the medication or 0 which indicates patient did not. A missing flag means that
#' the patient did not get any diagnosis at all in the time frame specified. All arguments are strings
#'
#' @param supermart Name of the supermart where the data is stored. Eg. supermart_392
#' @param sandbox Name of the writable location usually sandbox where cohort is saved
#' @param cohort Name of the cohort with explorys_patient_id's.
#' @dx_codes_df Name of the dataframe containing the icd 9/10/snomed codes. This dataframe must be available in local environment as well as
#' on the sandbox
#' @param dx_codes_col Name of the column in the dx_codes_df specified. This column has the icd9/10/snomed codes
#' @param grouping_col Name of the column that has the groups of medications of interest
#' @param index_col Name of the column with the index dates
#' @param extra_grouping_vars Character string of additional variables separated by comma to grouping variables
#' @param date_condition The sql format date condition. For example "DATE(diagnosis_date) BETWEEN '2019-01-01' AND '2020-01-01'"
#' @return A dataframe with indicators of diagnoses
#' @export
#'
get_diagnosis <- function(supermart, sandbox, cohort, dx_codes_df, dx_codes_col, grouping_col, index_col,
date_condition, extra_grouping_vars = ''){
if (extra_grouping_vars == '') {
group_var = ''
} else if (!is.na(extra_grouping_vars)) {
group_var = paste0(", ", extra_grouping_vars)
}
# First let's get snomed and icd codes in one column so it's easy to search
sql1 <- glue::glue("
CREATE TABLE {sandbox}.temp_diags AS
SELECT d.explorys_patient_id {group_var}, DATE(d.diagnosis_date) AS diagnosis_date, REPLACE(icd_code, '.', '') AS diag_code,
{index_col}, 'icd' AS code_type
FROM {supermart}.v_diagnosis AS d
INNER JOIN {sandbox}.{cohort} AS c
ON d.explorys_patient_id = c.explorys_patient_id
WHERE d.icd_code IS NOT NULL
UNION
SELECT vd.explorys_patient_id {group_var}, DATE(vd.diagnosis_date) AS diagnosis_date,
vtsi.snomed_id AS diag_code, {index_col}, 'snomed' AS code_type
FROM {supermart}.v_diagnosis vd
INNER JOIN {supermart}.v_tokenized_snomed_ids vtsi
ON vtsi.snomed_join_id = vd.snomed_join_id
INNER JOIN {sandbox}.{cohort} AS c
ON vd.explorys_patient_id = c.explorys_patient_id",
sandbox = sandbox,
cohort = cohort,
index_col = index_col,
supermart = supermart)
drop(sandbox, table = "temp_diags")
dbSendUpdate(sm, sql1)
select_max <- function(){
groups = get(dx_codes_df)[grouping_col]
vec = as.list(unique(groups))
statement <- paste0('MAX(', unlist(vec, use.names = F), ') AS ', unlist(vec, use.names = F), collapse = ', ')
return(statement)}
select_sql_str <- function(table, var, str) {
sql <- glue::glue(
"SELECT DISTINCT {var} FROM {sandbox_name}.{tbl} WHERE {grouping_col} = '{string}' ",
string = str,
tbl = table,
var = var,
sandbox_name = sandbox
#grouping_col_name = grouping_col
)
list <- dbGetQuery(sm, sql)
paste0("'",
unlist(list, use.names = FALSE),
"'", collapse = ",")
}
make_case <- function(){
groups = get(dx_codes_df)[grouping_col]
vec = pull(unique(groups))
statement <- sapply(vec, function(vec) {
paste0('CASE WHEN diag_code IN (',
select_sql_str(dx_codes_df, dx_codes_col, vec),
") THEN 1 ELSE 0 END AS ",
vec, collapse = ', ')}
)
return(paste0(statement, collapse = ' ,'))
}
sql3 <- glue::glue("
SELECT explorys_patient_id {group_var},
{select_max_statement}
FROM
(SELECT DISTINCT explorys_patient_id {group_var},
{case}
FROM {sandbox}.temp_diags
WHERE {date_condition}) AS a
GROUP BY explorys_patient_id {group_var}
",
case = make_case(),
select_max_statement = select_max(),
date_condition = date_condition)
results = dbGetQuery(sm, sql3)
return(results)
}
# Testing
# library(RJDBC)
# library(magrittr)
# library(tidyselect)
# library(tidyr)
# library(dplyr)
# library(stringr)
# library(data.table)
#
# supermart = 'supermart_472'
# sandbox = 'SANDBOX_GENESIS_BMS_COVID'
# cohort = 'cohort_pts'
# dx_codes_df = 'dx_codes'
# dx_codes_col = 'code_clean'
# grouping_col = 'grp'
# index_col = 'index_dt'
# date_condition = '(DATE(diagnosis_date) BETWEEN CAST(index_dt AS DATE) - 180 AND CAST(index_dt AS DATE))'
# extra_grouping_vars = 'pt_group'
#
# dx_codes = readxl::read_excel('Y:/genesis-research_global/BMS_COVID/data/Charlson_Comorbidity.xlsx',
# sheet = 'Sheet2',
# col_names = TRUE) %>%
# mutate(x = str_replace(Code, '\\.','')) %>%
# mutate(code_clean = str_replace(x, '%', ''))
#
#
# drop("SANDBOX_GENESIS_BMS_COVID", "dx_codes")
# dbWriteTable(sm, "SANDBOX_GENESIS_BMS_COVID.dx_codes", dx_codes)
| /R/get_diagnosis.R | no_license | aminyakubu/explorys | R | false | false | 6,122 | r | #' Get a dataset with flags indicating if patient has a diagnosis
#'
#' This function provides a dataframe with flags indicating if a patient has a diagnosis or not based on icd9, icd10 and snomed codes.
#' The flags are 1 whichindicate patient had the medication or 0 which indicates patient did not. A missing flag means that
#' the patient did not get any diagnosis at all in the time frame specified. All arguments are strings
#'
#' @param supermart Name of the supermart where the data is stored. Eg. supermart_392
#' @param sandbox Name of the writable location usually sandbox where cohort is saved
#' @param cohort Name of the cohort with explorys_patient_id's.
#' @dx_codes_df Name of the dataframe containing the icd 9/10/snomed codes. This dataframe must be available in local environment as well as
#' on the sandbox
#' @param dx_codes_col Name of the column in the dx_codes_df specified. This column has the icd9/10/snomed codes
#' @param grouping_col Name of the column that has the groups of medications of interest
#' @param index_col Name of the column with the index dates
#' @param extra_grouping_vars Character string of additional variables separated by comma to grouping variables
#' @param date_condition The sql format date condition. For example "DATE(diagnosis_date) BETWEEN '2019-01-01' AND '2020-01-01'"
#' @return A dataframe with indicators of diagnoses
#' @export
#'
get_diagnosis <- function(supermart, sandbox, cohort, dx_codes_df, dx_codes_col, grouping_col, index_col,
date_condition, extra_grouping_vars = ''){
if (extra_grouping_vars == '') {
group_var = ''
} else if (!is.na(extra_grouping_vars)) {
group_var = paste0(", ", extra_grouping_vars)
}
# First let's get snomed and icd codes in one column so it's easy to search
sql1 <- glue::glue("
CREATE TABLE {sandbox}.temp_diags AS
SELECT d.explorys_patient_id {group_var}, DATE(d.diagnosis_date) AS diagnosis_date, REPLACE(icd_code, '.', '') AS diag_code,
{index_col}, 'icd' AS code_type
FROM {supermart}.v_diagnosis AS d
INNER JOIN {sandbox}.{cohort} AS c
ON d.explorys_patient_id = c.explorys_patient_id
WHERE d.icd_code IS NOT NULL
UNION
SELECT vd.explorys_patient_id {group_var}, DATE(vd.diagnosis_date) AS diagnosis_date,
vtsi.snomed_id AS diag_code, {index_col}, 'snomed' AS code_type
FROM {supermart}.v_diagnosis vd
INNER JOIN {supermart}.v_tokenized_snomed_ids vtsi
ON vtsi.snomed_join_id = vd.snomed_join_id
INNER JOIN {sandbox}.{cohort} AS c
ON vd.explorys_patient_id = c.explorys_patient_id",
sandbox = sandbox,
cohort = cohort,
index_col = index_col,
supermart = supermart)
drop(sandbox, table = "temp_diags")
dbSendUpdate(sm, sql1)
select_max <- function(){
groups = get(dx_codes_df)[grouping_col]
vec = as.list(unique(groups))
statement <- paste0('MAX(', unlist(vec, use.names = F), ') AS ', unlist(vec, use.names = F), collapse = ', ')
return(statement)}
select_sql_str <- function(table, var, str) {
sql <- glue::glue(
"SELECT DISTINCT {var} FROM {sandbox_name}.{tbl} WHERE {grouping_col} = '{string}' ",
string = str,
tbl = table,
var = var,
sandbox_name = sandbox
#grouping_col_name = grouping_col
)
list <- dbGetQuery(sm, sql)
paste0("'",
unlist(list, use.names = FALSE),
"'", collapse = ",")
}
make_case <- function(){
groups = get(dx_codes_df)[grouping_col]
vec = pull(unique(groups))
statement <- sapply(vec, function(vec) {
paste0('CASE WHEN diag_code IN (',
select_sql_str(dx_codes_df, dx_codes_col, vec),
") THEN 1 ELSE 0 END AS ",
vec, collapse = ', ')}
)
return(paste0(statement, collapse = ' ,'))
}
sql3 <- glue::glue("
SELECT explorys_patient_id {group_var},
{select_max_statement}
FROM
(SELECT DISTINCT explorys_patient_id {group_var},
{case}
FROM {sandbox}.temp_diags
WHERE {date_condition}) AS a
GROUP BY explorys_patient_id {group_var}
",
case = make_case(),
select_max_statement = select_max(),
date_condition = date_condition)
results = dbGetQuery(sm, sql3)
return(results)
}
# Testing
# library(RJDBC)
# library(magrittr)
# library(tidyselect)
# library(tidyr)
# library(dplyr)
# library(stringr)
# library(data.table)
#
# supermart = 'supermart_472'
# sandbox = 'SANDBOX_GENESIS_BMS_COVID'
# cohort = 'cohort_pts'
# dx_codes_df = 'dx_codes'
# dx_codes_col = 'code_clean'
# grouping_col = 'grp'
# index_col = 'index_dt'
# date_condition = '(DATE(diagnosis_date) BETWEEN CAST(index_dt AS DATE) - 180 AND CAST(index_dt AS DATE))'
# extra_grouping_vars = 'pt_group'
#
# dx_codes = readxl::read_excel('Y:/genesis-research_global/BMS_COVID/data/Charlson_Comorbidity.xlsx',
# sheet = 'Sheet2',
# col_names = TRUE) %>%
# mutate(x = str_replace(Code, '\\.','')) %>%
# mutate(code_clean = str_replace(x, '%', ''))
#
#
# drop("SANDBOX_GENESIS_BMS_COVID", "dx_codes")
# dbWriteTable(sm, "SANDBOX_GENESIS_BMS_COVID.dx_codes", dx_codes)
|
# Purpose : Fit/predict distribution of soil types (memberships);
# Maintainer : Tomislav Hengl (tom.hengl@wur.nl)
# Contributions : Bas Kempen (bas.kempen@wur.nl); Dainius Masiliūnas (dainius.masiliunas@wur.nl)
# Dev Status : Pre-Alpha
# Note : if the regression model is difficult to fit, it might lead to artifacts;
# Fit a supervised fuzzy kmeans model and predict memberships:
setMethod("spfkm", signature(formulaString = "formula"), function(formulaString, observations, covariates, class.c = NULL, class.sd = NULL, fuzzy.e = 1.2){
## generate formula if missing:
if(missing(formulaString)) {
formulaString <- as.formula(paste(names(observations)[1], "~", paste(names(covariates), collapse="+"), sep=""))
}
## check the formula string:
if(!plyr::is.formula(formulaString)){
stop("'formulaString' object of class 'formula' required")
}
## get regular data.frames from the input
if (class(observations) == "SpatialPointsDataFrame")
{
obs.df = observations@data
} else if (is.data.frame(observations)) {
obs.df = observations
} else {
stop("'observations' must be a SpatialPointsDataFrame or a regular data.frame")
}
if (class(covariates) == "SpatialPixelsDataFrame")
{
cov.df = covariates@data
} else if (is.data.frame(covariates)) {
cov.df = covariates
} else {
stop("'covariates' must be a SpatialPixelsDataFrame or a regular data.frame")
}
## selected variables:
tv = all.vars(formulaString)[1]
sel = names(cov.df) %in% all.vars(formulaString)[-1]
if(all(sel==FALSE)|length(sel)==0){
stop("None of the covariates in the 'formulaString' matches the column names in the 'covariates' object")
}
## if available, use class centres:
check_tc <- !is.null(class.c)&!is.null(class.sd)
if(check_tc){
if(!class(class.c)=="matrix"){ stop("Object of type 'matrix' with column names for covariates and row names correspodning to the class names required") }
if(!class(class.sd)=="matrix"){ stop("Object of type 'matrix' with column names for covariates and row names correspodning to the class names required") }
mout = list(NULL)
}
## otherwise, estimate class centres using the multinomial logistic regression:
else {
message("Trying to estimate the class centres using the 'multinom' method...")
## multinomial logistic regression:
rout <- spmultinom(formulaString=formulaString, observations, covariates, class.stats=TRUE, predict.probs=FALSE)
mout = rout$model
if(length(unique(rout$fit))<2){ stop("Predictions resulted in <2 classes. See ?multinom for more info") }
class.c = rout$class.c
class.sd = rout$class.sd
}
cl <- as.list(row.names(class.c))
dsf <- NULL
## derive distances in feature space:
for(c in unlist(cl)){
dsf[[c]] <- data.frame(lapply(names(cov.df)[sel], FUN=function(x){rep(NA, length(cov.df[,1]))}))
names(dsf[[c]]) <- names(cov.df)[sel]
for(j in names(cov.df)[sel]){
dsf[[c]][,j] <- ((cov.df[,j]-class.c[c,j])/class.sd[c,j])^2
}
}
## sum up distances per class:
ds <- NULL
ds <- lapply(dsf, FUN=function(x){sqrt(rowSums(x, na.rm=TRUE, dims=1))})
names(ds) <- unlist(cl)
ds <- data.frame(ds)
## total sum:
tt <- rowSums(ds^(-2/(fuzzy.e-1)), na.rm=TRUE, dims=1)
## derive the fuzzy membership:
mm <- cov.df[1]
for(c in unlist(cl)){
mm[,c] <- (ds[,c]^(-2/(fuzzy.e-1))/tt)
}
mm[,names(cov.df)[1]] <- NULL
## Derive the dominant class:
maxm <- sapply(data.frame(t(as.matrix(mm))), FUN=function(x){max(x, na.rm=TRUE)})
## class having the highest membership
cout <- NULL
for(c in unlist(cl)){
cout[which(mm[,c] == maxm)] <- c
}
cout <- as.factor(cout)
## construct a map: overlay observations and covariates:
if (class(observations) == "SpatialPointsDataFrame")
{
pm <- covariates[1]
pm@data[,tv] <- cout
pm@data[,names(covariates)[1]] <- NULL
ov <- over(observations, pm)
} else {
pm <- data.frame(cout)
names(pm) <- tv
ov <- cbind(obs.df, cov.df[complete.cases(cov.df),])
}
sel.c <- !is.na(ov[,tv]) & !is.na(obs.df[,tv])
## kappa statistics:
if(requireNamespace("mda", quietly = TRUE)&requireNamespace("psych", quietly = TRUE)){
cf <- mda::confusion(ov[sel.c,tv], as.character(obs.df[sel.c,tv]))
## remove missing classes:
a <- attr(cf, "dimnames")[[1]] %in% attr(cf, "dimnames")[[2]]
b <- attr(cf, "dimnames")[[2]] %in% attr(cf, "dimnames")[[1]]
c.kappa = psych::cohen.kappa(cf[a,b])
message(paste("Estimated Cohen Kappa (weighted):", signif(c.kappa$weighted.kappa, 4)))
} else {
cf <- NULL
}
## create the output object:
out <- new("SpatialMemberships", predicted = pm, model = mout, mu = mm, class.c = class.c, class.sd = class.sd, confusion = cf)
return(out)
})
# end of script;
| /R/fkmeans.R | no_license | GreatEmerald/GSIF | R | false | false | 5,009 | r | # Purpose : Fit/predict distribution of soil types (memberships);
# Maintainer : Tomislav Hengl (tom.hengl@wur.nl)
# Contributions : Bas Kempen (bas.kempen@wur.nl); Dainius Masiliūnas (dainius.masiliunas@wur.nl)
# Dev Status : Pre-Alpha
# Note : if the regression model is difficult to fit, it might lead to artifacts;
# Fit a supervised fuzzy kmeans model and predict memberships:
setMethod("spfkm", signature(formulaString = "formula"), function(formulaString, observations, covariates, class.c = NULL, class.sd = NULL, fuzzy.e = 1.2){
## generate formula if missing:
if(missing(formulaString)) {
formulaString <- as.formula(paste(names(observations)[1], "~", paste(names(covariates), collapse="+"), sep=""))
}
## check the formula string:
if(!plyr::is.formula(formulaString)){
stop("'formulaString' object of class 'formula' required")
}
## get regular data.frames from the input
if (class(observations) == "SpatialPointsDataFrame")
{
obs.df = observations@data
} else if (is.data.frame(observations)) {
obs.df = observations
} else {
stop("'observations' must be a SpatialPointsDataFrame or a regular data.frame")
}
if (class(covariates) == "SpatialPixelsDataFrame")
{
cov.df = covariates@data
} else if (is.data.frame(covariates)) {
cov.df = covariates
} else {
stop("'covariates' must be a SpatialPixelsDataFrame or a regular data.frame")
}
## selected variables:
tv = all.vars(formulaString)[1]
sel = names(cov.df) %in% all.vars(formulaString)[-1]
if(all(sel==FALSE)|length(sel)==0){
stop("None of the covariates in the 'formulaString' matches the column names in the 'covariates' object")
}
## if available, use class centres:
check_tc <- !is.null(class.c)&!is.null(class.sd)
if(check_tc){
if(!class(class.c)=="matrix"){ stop("Object of type 'matrix' with column names for covariates and row names correspodning to the class names required") }
if(!class(class.sd)=="matrix"){ stop("Object of type 'matrix' with column names for covariates and row names correspodning to the class names required") }
mout = list(NULL)
}
## otherwise, estimate class centres using the multinomial logistic regression:
else {
message("Trying to estimate the class centres using the 'multinom' method...")
## multinomial logistic regression:
rout <- spmultinom(formulaString=formulaString, observations, covariates, class.stats=TRUE, predict.probs=FALSE)
mout = rout$model
if(length(unique(rout$fit))<2){ stop("Predictions resulted in <2 classes. See ?multinom for more info") }
class.c = rout$class.c
class.sd = rout$class.sd
}
cl <- as.list(row.names(class.c))
dsf <- NULL
## derive distances in feature space:
for(c in unlist(cl)){
dsf[[c]] <- data.frame(lapply(names(cov.df)[sel], FUN=function(x){rep(NA, length(cov.df[,1]))}))
names(dsf[[c]]) <- names(cov.df)[sel]
for(j in names(cov.df)[sel]){
dsf[[c]][,j] <- ((cov.df[,j]-class.c[c,j])/class.sd[c,j])^2
}
}
## sum up distances per class:
ds <- NULL
ds <- lapply(dsf, FUN=function(x){sqrt(rowSums(x, na.rm=TRUE, dims=1))})
names(ds) <- unlist(cl)
ds <- data.frame(ds)
## total sum:
tt <- rowSums(ds^(-2/(fuzzy.e-1)), na.rm=TRUE, dims=1)
## derive the fuzzy membership:
mm <- cov.df[1]
for(c in unlist(cl)){
mm[,c] <- (ds[,c]^(-2/(fuzzy.e-1))/tt)
}
mm[,names(cov.df)[1]] <- NULL
## Derive the dominant class:
maxm <- sapply(data.frame(t(as.matrix(mm))), FUN=function(x){max(x, na.rm=TRUE)})
## class having the highest membership
cout <- NULL
for(c in unlist(cl)){
cout[which(mm[,c] == maxm)] <- c
}
cout <- as.factor(cout)
## construct a map: overlay observations and covariates:
if (class(observations) == "SpatialPointsDataFrame")
{
pm <- covariates[1]
pm@data[,tv] <- cout
pm@data[,names(covariates)[1]] <- NULL
ov <- over(observations, pm)
} else {
pm <- data.frame(cout)
names(pm) <- tv
ov <- cbind(obs.df, cov.df[complete.cases(cov.df),])
}
sel.c <- !is.na(ov[,tv]) & !is.na(obs.df[,tv])
## kappa statistics:
if(requireNamespace("mda", quietly = TRUE)&requireNamespace("psych", quietly = TRUE)){
cf <- mda::confusion(ov[sel.c,tv], as.character(obs.df[sel.c,tv]))
## remove missing classes:
a <- attr(cf, "dimnames")[[1]] %in% attr(cf, "dimnames")[[2]]
b <- attr(cf, "dimnames")[[2]] %in% attr(cf, "dimnames")[[1]]
c.kappa = psych::cohen.kappa(cf[a,b])
message(paste("Estimated Cohen Kappa (weighted):", signif(c.kappa$weighted.kappa, 4)))
} else {
cf <- NULL
}
## create the output object:
out <- new("SpatialMemberships", predicted = pm, model = mout, mu = mm, class.c = class.c, class.sd = class.sd, confusion = cf)
return(out)
})
# end of script;
|
\name{GP-package}
\alias{GP-package}
\alias{GP}
\docType{package}
\title{
What the package does (short line)
~~ package title ~~
}
\description{
More about what it does (maybe more than one line)
~~ A concise (1-5 lines) description of the package ~~
}
\details{
\tabular{ll}{
Package: \tab GP\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2013-08-26\cr
License: \tab What license is it under?\cr
}
~~ An overview of how to use the package, including the most important ~~
~~ functions ~~
}
\author{
Who wrote it
Maintainer: Who to complain to <yourfault@somewhere.net>
~~ The author and/or maintainer of the package ~~
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file KEYWORDS in ~~
~~ the R documentation directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
| /man_/GP-package.Rd | permissive | mcrucifix/gp | R | false | false | 1,019 | rd | \name{GP-package}
\alias{GP-package}
\alias{GP}
\docType{package}
\title{
What the package does (short line)
~~ package title ~~
}
\description{
More about what it does (maybe more than one line)
~~ A concise (1-5 lines) description of the package ~~
}
\details{
\tabular{ll}{
Package: \tab GP\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2013-08-26\cr
License: \tab What license is it under?\cr
}
~~ An overview of how to use the package, including the most important ~~
~~ functions ~~
}
\author{
Who wrote it
Maintainer: Who to complain to <yourfault@somewhere.net>
~~ The author and/or maintainer of the package ~~
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file KEYWORDS in ~~
~~ the R documentation directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
|
#' Summary results from dtree.
#'
#' @param object An object from dtree.
#' @param ... Other arguments.
#' @method summary dtree
#' @export
summary.dtree <- function(object,...){
ret <- list(results = object$return.matrix,
response.type = object$response.type,
call = object$call)
class(ret) <- "summary.dtree"
print(ret)
}
| /R/summary.R | no_license | Rjacobucci/dtree | R | false | false | 363 | r | #' Summary results from dtree.
#'
#' @param object An object from dtree.
#' @param ... Other arguments.
#' @method summary dtree
#' @export
summary.dtree <- function(object,...){
ret <- list(results = object$return.matrix,
response.type = object$response.type,
call = object$call)
class(ret) <- "summary.dtree"
print(ret)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotDiffHeatmap.R
\name{plotDiffHeatmap}
\alias{plotDiffHeatmap}
\title{Plot differential heatmap}
\usage{
plotDiffHeatmap(
x,
y,
k = NULL,
top_n = 20,
fdr = 0.05,
lfc = 1,
all = FALSE,
sort_by = c("padj", "lfc", "none"),
y_cols = list(padj = "p_adj", lfc = "logFC", target = "marker_id"),
assay = "exprs",
fun = c("median", "mean", "sum"),
normalize = TRUE,
col_anno = TRUE,
row_anno = TRUE,
hm_pal = NULL,
fdr_pal = c("lightgrey", "lightgreen"),
lfc_pal = c("blue3", "white", "red3")
)
}
\arguments{
\item{x}{a \code{\link[SingleCellExperiment]{SingleCellExperiment}}.}
\item{y}{a \code{SummarizedExperiment} containing differential testing
results as returned by one of \code{\link[diffcyt]{testDA_edgeR}},
\code{\link[diffcyt]{testDA_voom}}, \code{\link[diffcyt]{testDA_GLMM}},
\code{\link[diffcyt]{testDS_limma}}, or \code{\link[diffcyt]{testDS_LMM}}.
Alternatively, a list as returned by \code{\link[diffcyt]{diffcyt}}.}
\item{k}{character string specifying
the clustering in \code{x} from which \code{y} was obtained.
If NULL, \code{plotDiffHeatmap} will try and guess it,
which will be inaccurate if multiple clusterings share the same levels.}
\item{top_n}{numeric. Number of top clusters (if \code{type = "DA"})
or cluster-marker combinations (if \code{type = "DS"}) to display.}
\item{fdr}{numeric threshold on adjusted p-values below which
results should be retained and considered to be significant.}
\item{lfc}{numeric threshold on logFCs above which to retain results.}
\item{all}{logical specifying whether all \code{top_n} results should
be displayed. If \code{TRUE}, \code{fdr,lfc} filtering is skipped.}
\item{sort_by}{character string specifying the \code{y} column to sort by;
\code{"none"} to retain original ordering. Adj. p-values will increase,
logFCs will decreasing from top to bottom.}
\item{y_cols}{named list specifying columns in \code{y} that contain
adjusted p-values (\code{padj}), logFCs (\code{lfc}) and,
for DS results, feature names (\code{target}).
When only some \code{y_cols} differ from the defaults,
specifying only these is sufficient.}
\item{assay}{character string specifying which assay
data to use; valid values are \code{assayNames(x)}.}
\item{fun}{character string specifying the function to use
as summary statistic for aggregation of \code{assay} data.}
\item{normalize}{logical specifying whether Z-score normalized values
should be plotted. If \code{y} contains DA analysis results,
frequencies will be arcsine-square-root scaled prior to normalization.}
\item{col_anno}{logical specifying whether to include column annotations
for all non-numeric cell metadata variables; or a character vector
in \code{names(colData(x))} to include only a subset of annotations.
(Only variables that map uniquely to each sample will be included)}
\item{row_anno}{logical specifying whether to include a row annotation
indicating whether cluster (DA) or cluster-marker combinations (DS)
are significant, labeled with adjusted p-values, as well as logFCs.}
\item{hm_pal}{character vector of colors to interpolate for the heatmap.
Defaults to \code{\link[RColorBrewer:RColorBrewer]{brewer.pal}}'s
\code{"RdYlBu"} for DS, \code{"RdBu"} for DA results heatmaps.}
\item{fdr_pal, lfc_pal}{character vector of colors to use for row annotations
\itemize{
\item{\code{fdr_pal}}{length 2 for (non-)significant at given \code{fdr}}
\item{\code{lfc_pal}}{length 3 for negative, zero and positive}}}
}
\value{
a \code{\link[ComplexHeatmap]{Heatmap-class}} object.
}
\description{
Heatmaps summarizing differental abundance
& differential state testing results.
}
\examples{
# construct SCE & run clustering
data(PBMC_fs, PBMC_panel, PBMC_md)
sce <- prepData(PBMC_fs, PBMC_panel, PBMC_md)
sce <- cluster(sce)
## differential analysis
library(diffcyt)
# create design & constrast matrix
design <- createDesignMatrix(PBMC_md, cols_design=3:4)
contrast <- createContrast(c(0, 1, 0, 0, 0))
# test for
# - differential abundance (DA) of clusters
# - differential states (DS) within clusters
da <- diffcyt(sce, design = design, contrast = contrast,
analysis_type = "DA", method_DA = "diffcyt-DA-edgeR",
clustering_to_use = "meta20")
ds <- diffcyt(sce, design = design, contrast = contrast,
analysis_type = "DS", method_DS = "diffcyt-DS-limma",
clustering_to_use = "meta20")
# extract result tables
da <- rowData(da$res)
ds <- rowData(ds$res)
# display test results for
# - top DA clusters
# - top DS cluster-marker combinations
plotDiffHeatmap(sce, da)
plotDiffHeatmap(sce, ds)
# visualize results for subset of clusters
sub <- filterSCE(sce, cluster_id \%in\% seq_len(5), k = "meta20")
plotDiffHeatmap(sub, da, all = TRUE, sort_by = "none")
# visualize results for selected feature
# & include only selected annotation
plotDiffHeatmap(sce["pp38", ], ds, col_anno = "condition", all = TRUE)
}
\author{
Lukas M Weber & Helena L Crowell \email{helena.crowell@uzh.ch}
}
| /man/plotDiffHeatmap.Rd | no_license | MeganQiu/CATALYST | R | false | true | 5,069 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotDiffHeatmap.R
\name{plotDiffHeatmap}
\alias{plotDiffHeatmap}
\title{Plot differential heatmap}
\usage{
plotDiffHeatmap(
x,
y,
k = NULL,
top_n = 20,
fdr = 0.05,
lfc = 1,
all = FALSE,
sort_by = c("padj", "lfc", "none"),
y_cols = list(padj = "p_adj", lfc = "logFC", target = "marker_id"),
assay = "exprs",
fun = c("median", "mean", "sum"),
normalize = TRUE,
col_anno = TRUE,
row_anno = TRUE,
hm_pal = NULL,
fdr_pal = c("lightgrey", "lightgreen"),
lfc_pal = c("blue3", "white", "red3")
)
}
\arguments{
\item{x}{a \code{\link[SingleCellExperiment]{SingleCellExperiment}}.}
\item{y}{a \code{SummarizedExperiment} containing differential testing
results as returned by one of \code{\link[diffcyt]{testDA_edgeR}},
\code{\link[diffcyt]{testDA_voom}}, \code{\link[diffcyt]{testDA_GLMM}},
\code{\link[diffcyt]{testDS_limma}}, or \code{\link[diffcyt]{testDS_LMM}}.
Alternatively, a list as returned by \code{\link[diffcyt]{diffcyt}}.}
\item{k}{character string specifying
the clustering in \code{x} from which \code{y} was obtained.
If NULL, \code{plotDiffHeatmap} will try and guess it,
which will be inaccurate if multiple clusterings share the same levels.}
\item{top_n}{numeric. Number of top clusters (if \code{type = "DA"})
or cluster-marker combinations (if \code{type = "DS"}) to display.}
\item{fdr}{numeric threshold on adjusted p-values below which
results should be retained and considered to be significant.}
\item{lfc}{numeric threshold on logFCs above which to retain results.}
\item{all}{logical specifying whether all \code{top_n} results should
be displayed. If \code{TRUE}, \code{fdr,lfc} filtering is skipped.}
\item{sort_by}{character string specifying the \code{y} column to sort by;
\code{"none"} to retain original ordering. Adj. p-values will increase,
logFCs will decreasing from top to bottom.}
\item{y_cols}{named list specifying columns in \code{y} that contain
adjusted p-values (\code{padj}), logFCs (\code{lfc}) and,
for DS results, feature names (\code{target}).
When only some \code{y_cols} differ from the defaults,
specifying only these is sufficient.}
\item{assay}{character string specifying which assay
data to use; valid values are \code{assayNames(x)}.}
\item{fun}{character string specifying the function to use
as summary statistic for aggregation of \code{assay} data.}
\item{normalize}{logical specifying whether Z-score normalized values
should be plotted. If \code{y} contains DA analysis results,
frequencies will be arcsine-square-root scaled prior to normalization.}
\item{col_anno}{logical specifying whether to include column annotations
for all non-numeric cell metadata variables; or a character vector
in \code{names(colData(x))} to include only a subset of annotations.
(Only variables that map uniquely to each sample will be included)}
\item{row_anno}{logical specifying whether to include a row annotation
indicating whether cluster (DA) or cluster-marker combinations (DS)
are significant, labeled with adjusted p-values, as well as logFCs.}
\item{hm_pal}{character vector of colors to interpolate for the heatmap.
Defaults to \code{\link[RColorBrewer:RColorBrewer]{brewer.pal}}'s
\code{"RdYlBu"} for DS, \code{"RdBu"} for DA results heatmaps.}
\item{fdr_pal, lfc_pal}{character vector of colors to use for row annotations
\itemize{
\item{\code{fdr_pal}}{length 2 for (non-)significant at given \code{fdr}}
\item{\code{lfc_pal}}{length 3 for negative, zero and positive}}}
}
\value{
a \code{\link[ComplexHeatmap]{Heatmap-class}} object.
}
\description{
Heatmaps summarizing differental abundance
& differential state testing results.
}
\examples{
# construct SCE & run clustering
data(PBMC_fs, PBMC_panel, PBMC_md)
sce <- prepData(PBMC_fs, PBMC_panel, PBMC_md)
sce <- cluster(sce)
## differential analysis
library(diffcyt)
# create design & constrast matrix
design <- createDesignMatrix(PBMC_md, cols_design=3:4)
contrast <- createContrast(c(0, 1, 0, 0, 0))
# test for
# - differential abundance (DA) of clusters
# - differential states (DS) within clusters
da <- diffcyt(sce, design = design, contrast = contrast,
analysis_type = "DA", method_DA = "diffcyt-DA-edgeR",
clustering_to_use = "meta20")
ds <- diffcyt(sce, design = design, contrast = contrast,
analysis_type = "DS", method_DS = "diffcyt-DS-limma",
clustering_to_use = "meta20")
# extract result tables
da <- rowData(da$res)
ds <- rowData(ds$res)
# display test results for
# - top DA clusters
# - top DS cluster-marker combinations
plotDiffHeatmap(sce, da)
plotDiffHeatmap(sce, ds)
# visualize results for subset of clusters
sub <- filterSCE(sce, cluster_id \%in\% seq_len(5), k = "meta20")
plotDiffHeatmap(sub, da, all = TRUE, sort_by = "none")
# visualize results for selected feature
# & include only selected annotation
plotDiffHeatmap(sce["pp38", ], ds, col_anno = "condition", all = TRUE)
}
\author{
Lukas M Weber & Helena L Crowell \email{helena.crowell@uzh.ch}
}
|
context("Make API")
test_that("make_ works, order of arguments does not matter", {
g0 <- make_undirected_graph(1:10)
g1 <- make_(undirected_graph(1:10))
g2 <- make_(undirected_graph(), 1:10)
g3 <- make_(1:10, undirected_graph())
expect_true(identical_graphs(g0, g1))
expect_true(identical_graphs(g0, g2))
expect_true(identical_graphs(g0, g3))
})
test_that("sample_, graph_ also work", {
g0 <- make_undirected_graph(1:10)
g1 <- sample_(undirected_graph(1:10))
g2 <- sample_(undirected_graph(), 1:10)
g3 <- sample_(1:10, undirected_graph())
expect_true(identical_graphs(g0, g1))
expect_true(identical_graphs(g0, g2))
expect_true(identical_graphs(g0, g3))
g4 <- graph_(undirected_graph(1:10))
g5 <- graph_(undirected_graph(), 1:10)
g6 <- graph_(1:10, undirected_graph())
expect_true(identical_graphs(g0, g4))
expect_true(identical_graphs(g0, g5))
expect_true(identical_graphs(g0, g6))
})
test_that("error messages are proper", {
expect_error(make_(), "Don't know how to make_")
expect_error(make_(1:10), "Don't know how to make_")
expect_error(graph_(), "Don't know how to graph_")
expect_error(graph_(1:10), "Don't know how to graph_")
expect_error(graph_(directed_graph(), directed_graph()),
"Don't know how to graph_")
expect_error(sample_(), "Don't know how to sample_")
expect_error(sample_(1:10), "Don't know how to sample_")
expect_error(sample_(directed_graph(), directed_graph()),
"Don't know how to sample_")
})
test_that("we pass arguments unevaluated", {
g0 <- graph_from_literal(A -+ B:C)
g1 <- graph_(from_literal(A -+ B:C))
expect_true(identical_graphs(g0, g1))
})
| /R-Portable/App/R-Portable/library/igraph/tests/test-make.R | permissive | voltek62/seo-viz-install | R | false | false | 1,692 | r |
context("Make API")
test_that("make_ works, order of arguments does not matter", {
g0 <- make_undirected_graph(1:10)
g1 <- make_(undirected_graph(1:10))
g2 <- make_(undirected_graph(), 1:10)
g3 <- make_(1:10, undirected_graph())
expect_true(identical_graphs(g0, g1))
expect_true(identical_graphs(g0, g2))
expect_true(identical_graphs(g0, g3))
})
test_that("sample_, graph_ also work", {
g0 <- make_undirected_graph(1:10)
g1 <- sample_(undirected_graph(1:10))
g2 <- sample_(undirected_graph(), 1:10)
g3 <- sample_(1:10, undirected_graph())
expect_true(identical_graphs(g0, g1))
expect_true(identical_graphs(g0, g2))
expect_true(identical_graphs(g0, g3))
g4 <- graph_(undirected_graph(1:10))
g5 <- graph_(undirected_graph(), 1:10)
g6 <- graph_(1:10, undirected_graph())
expect_true(identical_graphs(g0, g4))
expect_true(identical_graphs(g0, g5))
expect_true(identical_graphs(g0, g6))
})
test_that("error messages are proper", {
expect_error(make_(), "Don't know how to make_")
expect_error(make_(1:10), "Don't know how to make_")
expect_error(graph_(), "Don't know how to graph_")
expect_error(graph_(1:10), "Don't know how to graph_")
expect_error(graph_(directed_graph(), directed_graph()),
"Don't know how to graph_")
expect_error(sample_(), "Don't know how to sample_")
expect_error(sample_(1:10), "Don't know how to sample_")
expect_error(sample_(directed_graph(), directed_graph()),
"Don't know how to sample_")
})
test_that("we pass arguments unevaluated", {
g0 <- graph_from_literal(A -+ B:C)
g1 <- graph_(from_literal(A -+ B:C))
expect_true(identical_graphs(g0, g1))
})
|
# Utils and Environment variables
.cranium <- new.env(hash = TRUE)
.cranium[['package_fields']] <- c("Package", "Version", "Priority", "Depends", "Imports",
"LinkingTo", "Suggests", "Enhances", "License", "License_is_FOSS",
"License_restricts_use", "OS_type", "Archs", "MD5sum", "NeedsCompilation",
"Path",'Repository','Header')
.cranium[['repo']] <- NULL
.cranium[['repo_name']] <- NULL
#' Modify Default Fields in PACKAGES file
#'
#' @param fields fields to add (must be present in package descriptions)
#' @param append should this be appended to existing fields
#' @return NULL
#' @export
set_packages_fields <- function(fields, append = TRUE) {
if(append) {
.cranium[['package_fields']] <- c(.cranium[['package_fields']], fields)
} else {
.cranium[['package_fields']] <- fields
}
message('Fields displayed: ', paste(.cranium[['package_fields']], collapse = '\n'))
}
#' Get Current package fields
#'
#' @return current fields used in the PACKAGES file
#' @export
get_packages_fields <- function() {
.cranium[['package_fields']]
}
#' set repo location (normalizes path)
#'
#' @param x a relative filepath
#'
#' @return the value of x (invisible)
#' @export
set_repo_location <- function(x) {
.cranium[['repo']] <- normalizePath(x)
invisible(.cranium[['repo']])
}
#' Get Currently set repo location
#'
#' @return the filepath of the current repo
#' @export
get_repo_location <- function() {
if(is.null(.cranium[['repo']])) stop('No repo location set, use set_repo_location.')
.cranium[['repo']]
}
#' set repo name (optional)
#'
#' @param x a name for the repo, this will be used by packrat when searching for a repository
#'
#' @return the value of x (invisible)
#' @export
set_repo_name <- function(x) {
.cranium[['repo_name']] <- x
invisible(.cranium[['repo_name']])
}
#' get repo name (optional)
#'
#' @return the name of the current repo
#' @export
get_repo_name <- function() {
.cranium[['repo_name']]
}
# Get the base directory of a tarball
getbasedir <- function(x) {
strsplit(untar(x, list = TRUE)[[1]],'/')[[1]][1]
}
# Remove extension from tar.gz
noEXT <- function(x) {
gsub('(^.*_[0-9]+.*?)\\.tar\\.gz$', '\\1', basename(x))
}
# Get Max Version of several versions
max_ver <- function(z) {
z <- as.character(z)
Reduce(function(x,y) {
if(utils::compareVersion(x,y) + 1L) x else y
},z)
}
# Is maximum version
is_max_ver <- function(z) {
max_ver(z) == as.character(z)
}
# Is the shortest string
is_shortest_string <- function(z) {
nchar(z) == min(nchar(z))
}
# Create a hardlink that meets requirements of download.packages
hardlink_name <- function(Path, Version) {
sprintf(gsub('(.*)(\\.tar\\.gz)','\\1_%s\\2',Path),Version)
}
# Checks the base address of an url against a string to see if they match
check_base_address <- function(x,url) {
m = nchar(url)
substr(x,1,m) == url
}
# Sets attribute, by reference
addAttr <- function(x, name, value) {
data.table::setattr(x, name, c(attr(x,name,exact = TRUE), value))
}
# Checks if is git
is_git <- function(x) {
substr(x, y <- nchar(x) - 3, y + 3) == '.git'
}
# Defines intended location
addr_class <- function(x) {
x <- copy(x)
# Check ends
if(is_git(x)) addAttr(x, 'class', 'git')
# Check known addresses
if(check_base_address(x, 'https://github.com')) addAttr(x, 'class', 'github')
# Check protocols
if(check_base_address(x, 'https://')) addAttr(x, 'class', c('web','ssl'))
if(check_base_address(x, 'http://') |
check_base_address(x, 'ftp://')) addAttr(x, 'class', 'web')
if(!inherits(x, 'web')) addAttr(x, 'class', 'local')
x
}
| /R/utils.R | no_license | atheriel/cranium | R | false | false | 3,709 | r | # Utils and Environment variables
.cranium <- new.env(hash = TRUE)
.cranium[['package_fields']] <- c("Package", "Version", "Priority", "Depends", "Imports",
"LinkingTo", "Suggests", "Enhances", "License", "License_is_FOSS",
"License_restricts_use", "OS_type", "Archs", "MD5sum", "NeedsCompilation",
"Path",'Repository','Header')
.cranium[['repo']] <- NULL
.cranium[['repo_name']] <- NULL
#' Modify Default Fields in PACKAGES file
#'
#' @param fields fields to add (must be present in package descriptions)
#' @param append should this be appended to existing fields
#' @return NULL
#' @export
set_packages_fields <- function(fields, append = TRUE) {
if(append) {
.cranium[['package_fields']] <- c(.cranium[['package_fields']], fields)
} else {
.cranium[['package_fields']] <- fields
}
message('Fields displayed: ', paste(.cranium[['package_fields']], collapse = '\n'))
}
#' Get Current package fields
#'
#' @return current fields used in the PACKAGES file
#' @export
get_packages_fields <- function() {
.cranium[['package_fields']]
}
#' set repo location (normalizes path)
#'
#' @param x a relative filepath
#'
#' @return the value of x (invisible)
#' @export
set_repo_location <- function(x) {
.cranium[['repo']] <- normalizePath(x)
invisible(.cranium[['repo']])
}
#' Get Currently set repo location
#'
#' @return the filepath of the current repo
#' @export
get_repo_location <- function() {
if(is.null(.cranium[['repo']])) stop('No repo location set, use set_repo_location.')
.cranium[['repo']]
}
#' set repo name (optional)
#'
#' @param x a name for the repo, this will be used by packrat when searching for a repository
#'
#' @return the value of x (invisible)
#' @export
set_repo_name <- function(x) {
.cranium[['repo_name']] <- x
invisible(.cranium[['repo_name']])
}
#' get repo name (optional)
#'
#' @return the name of the current repo
#' @export
get_repo_name <- function() {
.cranium[['repo_name']]
}
# Get the base directory of a tarball
getbasedir <- function(x) {
strsplit(untar(x, list = TRUE)[[1]],'/')[[1]][1]
}
# Remove extension from tar.gz
noEXT <- function(x) {
gsub('(^.*_[0-9]+.*?)\\.tar\\.gz$', '\\1', basename(x))
}
# Get Max Version of several versions
max_ver <- function(z) {
z <- as.character(z)
Reduce(function(x,y) {
if(utils::compareVersion(x,y) + 1L) x else y
},z)
}
# Is maximum version
is_max_ver <- function(z) {
max_ver(z) == as.character(z)
}
# Is the shortest string
is_shortest_string <- function(z) {
nchar(z) == min(nchar(z))
}
# Create a hardlink that meets requirements of download.packages
hardlink_name <- function(Path, Version) {
sprintf(gsub('(.*)(\\.tar\\.gz)','\\1_%s\\2',Path),Version)
}
# Checks the base address of an url against a string to see if they match
check_base_address <- function(x,url) {
m = nchar(url)
substr(x,1,m) == url
}
# Sets attribute, by reference
addAttr <- function(x, name, value) {
data.table::setattr(x, name, c(attr(x,name,exact = TRUE), value))
}
# Checks if is git
is_git <- function(x) {
substr(x, y <- nchar(x) - 3, y + 3) == '.git'
}
# Defines intended location
addr_class <- function(x) {
x <- copy(x)
# Check ends
if(is_git(x)) addAttr(x, 'class', 'git')
# Check known addresses
if(check_base_address(x, 'https://github.com')) addAttr(x, 'class', 'github')
# Check protocols
if(check_base_address(x, 'https://')) addAttr(x, 'class', c('web','ssl'))
if(check_base_address(x, 'http://') |
check_base_address(x, 'ftp://')) addAttr(x, 'class', 'web')
if(!inherits(x, 'web')) addAttr(x, 'class', 'local')
x
}
|
#' Write a pedigree to file
#'
#' @param x A `ped` object
#' @param prefix A character string giving the prefix of the files. For
#' instance, if `prefix = "myped"` and `what = c("ped", "map")`, the output
#' files are "myped.ped" and "myped.map" in the current directory. Paths to
#' other folder may be included, e.g. `prefix = "path-to-my-dir/myped"`.
#' @param what A subset of the character vector `c("ped", "map", "dat",
#' "freq")`, indicating which files should be created. By default only the
#' "ped" file is created. This option is ignored if `merlin = TRUE`.
#' @param famid A logical indicating if family ID should be included as the
#' first column in the ped file. The family ID is taken from `famid(x)`. If
#' `x` is a list of pedigrees, the family IDs are taken from `names(x)`, or if
#' this is NULL, the component-wise `famid()` values. Missing values are
#' replaced by natural numbers. This option is ignored if `merlin = TRUE`.
#' @param header A logical indicating if column names should be included in the
#' ped file. This option is ignored if `merlin = TRUE`.
#' @param merlin A logical. If TRUE, "ped", "map", "dat" and "freq" files are
#' written in a format readable by the MERLIN software. In particular MERLIN
#' requires non-numerical allele labels in the frequency file.
#' @param verbose A logical.
#'
#' @return A character vector with the file names.
#' @examples
#'
#' x = nuclearPed(1)
#' x = addMarker(x, "3" = "a/b", name = "m1")
#'
#' # Write to file
#' fn = writePed(x, prefix = tempfile("test"))
#'
#' # Read
#' y = readPed(fn)
#'
#' stopifnot(identical(x, y))
#'
#' @importFrom utils write.table
#' @export
writePed = function(x, prefix, what = "ped", famid = is.pedList(x),
header = TRUE, merlin = FALSE, verbose = TRUE) {
if(merlin)
return(writeMerlin(x, prefix = prefix, verbose = verbose))
fnames = setNames(paste(prefix, what, sep = "."), what)
if(is.pedList(x)) {
pedmatr = do.call(rbind, lapply(x, as.data.frame.ped))
if(famid) {
famids = names(x) %||% unlist(lapply(x, famid))
if(any(miss <- famids == "" | is.na(famids)))
famids[miss] = seq_along(which(miss))
famvec = rep(famids, pedsize(x))
pedmatr = cbind(famid = famvec, pedmatr)
}
x = x[[1]] # for later use
}
else {
pedmatr = as.data.frame(x)
if(famid) {
fam = if(x$FAMID == "") "1" else x$FAMID
pedmatr = cbind(famid = fam, pedmatr)
}
}
if ("ped" %in% what) {
# TODO: This is slow; should use matrix objects.
write.table(pedmatr, file = fnames[["ped"]], sep = "\t", col.names = header, row.names = FALSE, quote = FALSE)
if(verbose) message("File written: ", fnames[["ped"]])
# Faster, but excludes column names
# write(t.default(pedmatr), fnames[["ped"]], ncolumns = ncol(pedmatr))
}
if ("freq" %in% what) {
writeFreqDatabase(x, filename = fnames[["freq"]], format = "list")
if(verbose) message("File written: ", fnames[["freq"]])
}
if (any(c("map", "dat") %in% what)) {
mapmatr = getMap(x, na.action = 1, verbose = FALSE)
}
if ("map" %in% what) {
write.table(mapmatr, fnames[["map"]], col.names = FALSE, row.names = FALSE, quote = FALSE)
if(verbose) message("File written: ", fnames[["map"]])
}
if ("dat" %in% what) {
datmatr = cbind("M", mapmatr$MARKER)
write.table(datmatr, fnames[["dat"]], col.names = FALSE, row.names = FALSE, quote = FALSE)
if(verbose) message("File written: ", fnames[["dat"]])
}
invisible(unname(fnames))
}
#' @importFrom utils write.table
writePed_merlin = function(x, prefix, verbose = TRUE) {
what = c("ped", "map", "dat", "freq")
fnames = setNames(paste(prefix, what, sep = "."), what)
### ped file
if(is.pedList(x)) {
pedmatr = do.call(rbind, lapply(x, as.matrix.ped, include.attrs = FALSE))
pedmatr = cbind(rep.int(seq_along(x), pedsize(x)), pedmatr)
x = x[[1]]
} else {
pedmatr = cbind(1, as.matrix(x, include.attrs = FALSE))
}
write(t.default(pedmatr), file = fnames[["ped"]], ncolumns = ncol(pedmatr))
if(verbose) message("File written: ", fnames[["ped"]])
### map file
mapmatr = getMap(x, na.action = 1, verbose = FALSE)
write.table(mapmatr, file = fnames[["map"]], col.names = FALSE, row.names = FALSE, quote = FALSE)
if(verbose) message("File written: ", fnames[["map"]])
### dat file
datmatr = cbind("M", mapmatr$MARKER)
write.table(datmatr, file = fnames[["dat"]], col.names = FALSE, row.names = FALSE, quote = FALSE)
if(verbose) message("File written: ", fnames[["dat"]])
### freq file
nalls = nAlleles(x)
L = sum(nalls) + length(nalls)
cum = cumsum(c(1, nalls + 1))
length(cum) = length(nalls) #remove last
col1 = rep("A", L)
col1[cum] = "M"
col2 = character(L)
col2[cum] = mapmatr$MARKER
allalleles = unlist(lapply(nalls, seq_len)) # numerical allele names for merlin!
col2[-cum] = allalleles
col3 = character(L)
allfreqs = unlist(lapply(x$MARKERS, afreq))
col3[-cum] = format(allfreqs, scientifit = FALSE, digits = 6)
freqmatr = cbind(col1, col2, col3)
write.table(freqmatr, file = fnames[["freq"]], col.names = FALSE, row.names = FALSE, quote = FALSE)
if(verbose) message("File written: ", fnames[["freq"]])
invisible(unname(fnames))
}
#' @importFrom utils write.table
writeMerlin = function(x, prefix, verbose = TRUE) {
what = c("ped", "map", "dat", "freq")
fnames = setNames(paste(prefix, what, sep = "."), what)
### ped file
if(is.pedList(x)) {
pedmatr = do.call(rbind, lapply(x, as.matrix.ped, include.attrs = FALSE))
pedmatr = cbind(rep.int(seq_along(x), pedsize(x)), pedmatr)
x = x[[1]]
} else {
pedmatr = cbind(1L, as.matrix.ped(x, include.attrs = FALSE))
}
# writelines faster than write and write.table
nr = nrow(pedmatr)
lines = vapply(seq_len(nr), function(i) paste(pedmatr[i, ], collapse = " "), "")
writeLines(lines, fnames[["ped"]])
if(verbose) message("File written: ", fnames[["ped"]])
### map file
mapmatr = getMap(x, merlin = TRUE, verbose = FALSE)
write.table(mapmatr, file = fnames[["map"]], col.names = FALSE, row.names = FALSE, quote = FALSE)
if(verbose) message("File written: ", fnames[["map"]])
### dat file
markernames = mapmatr[,"MARKER"]
datmatr = cbind("M", markernames)
write.table(datmatr, file = fnames[["dat"]], col.names = FALSE, row.names = FALSE, quote = FALSE)
if(verbose) message("File written: ", fnames[["dat"]])
### freq file
nalls = nAlleles.ped(x)
L = sum(nalls) + length(nalls)
cum = cumsum(c(1, nalls + 1))
length(cum) = length(nalls) #remove last
col1 = rep("A", L)
col1[cum] = "M"
col2 = character(L)
col2[cum] = markernames
allalleles = unlist(lapply(nalls, seq_len)) # numerical allele names for merlin!
col2[-cum] = allalleles
col3 = character(L)
allfreqs = unlist(lapply(x$MARKERS, attr, "afreq"))
col3[-cum] = format(allfreqs, scientifit = FALSE, digits = 6)
freqmatr = cbind(col1, col2, col3)
write.table(freqmatr, file = fnames[["freq"]], col.names = FALSE, row.names = FALSE, quote = FALSE)
if(verbose) message("File written: ", fnames[["freq"]])
invisible(unname(fnames))
}
| /R/writePed.R | no_license | cran/pedtools | R | false | false | 7,422 | r | #' Write a pedigree to file
#'
#' @param x A `ped` object
#' @param prefix A character string giving the prefix of the files. For
#' instance, if `prefix = "myped"` and `what = c("ped", "map")`, the output
#' files are "myped.ped" and "myped.map" in the current directory. Paths to
#' other folder may be included, e.g. `prefix = "path-to-my-dir/myped"`.
#' @param what A subset of the character vector `c("ped", "map", "dat",
#' "freq")`, indicating which files should be created. By default only the
#' "ped" file is created. This option is ignored if `merlin = TRUE`.
#' @param famid A logical indicating if family ID should be included as the
#' first column in the ped file. The family ID is taken from `famid(x)`. If
#' `x` is a list of pedigrees, the family IDs are taken from `names(x)`, or if
#' this is NULL, the component-wise `famid()` values. Missing values are
#' replaced by natural numbers. This option is ignored if `merlin = TRUE`.
#' @param header A logical indicating if column names should be included in the
#' ped file. This option is ignored if `merlin = TRUE`.
#' @param merlin A logical. If TRUE, "ped", "map", "dat" and "freq" files are
#' written in a format readable by the MERLIN software. In particular MERLIN
#' requires non-numerical allele labels in the frequency file.
#' @param verbose A logical.
#'
#' @return A character vector with the file names.
#' @examples
#'
#' x = nuclearPed(1)
#' x = addMarker(x, "3" = "a/b", name = "m1")
#'
#' # Write to file
#' fn = writePed(x, prefix = tempfile("test"))
#'
#' # Read
#' y = readPed(fn)
#'
#' stopifnot(identical(x, y))
#'
#' @importFrom utils write.table
#' @export
writePed = function(x, prefix, what = "ped", famid = is.pedList(x),
header = TRUE, merlin = FALSE, verbose = TRUE) {
if(merlin)
return(writeMerlin(x, prefix = prefix, verbose = verbose))
fnames = setNames(paste(prefix, what, sep = "."), what)
if(is.pedList(x)) {
pedmatr = do.call(rbind, lapply(x, as.data.frame.ped))
if(famid) {
famids = names(x) %||% unlist(lapply(x, famid))
if(any(miss <- famids == "" | is.na(famids)))
famids[miss] = seq_along(which(miss))
famvec = rep(famids, pedsize(x))
pedmatr = cbind(famid = famvec, pedmatr)
}
x = x[[1]] # for later use
}
else {
pedmatr = as.data.frame(x)
if(famid) {
fam = if(x$FAMID == "") "1" else x$FAMID
pedmatr = cbind(famid = fam, pedmatr)
}
}
if ("ped" %in% what) {
# TODO: This is slow; should use matrix objects.
write.table(pedmatr, file = fnames[["ped"]], sep = "\t", col.names = header, row.names = FALSE, quote = FALSE)
if(verbose) message("File written: ", fnames[["ped"]])
# Faster, but excludes column names
# write(t.default(pedmatr), fnames[["ped"]], ncolumns = ncol(pedmatr))
}
if ("freq" %in% what) {
writeFreqDatabase(x, filename = fnames[["freq"]], format = "list")
if(verbose) message("File written: ", fnames[["freq"]])
}
if (any(c("map", "dat") %in% what)) {
mapmatr = getMap(x, na.action = 1, verbose = FALSE)
}
if ("map" %in% what) {
write.table(mapmatr, fnames[["map"]], col.names = FALSE, row.names = FALSE, quote = FALSE)
if(verbose) message("File written: ", fnames[["map"]])
}
if ("dat" %in% what) {
datmatr = cbind("M", mapmatr$MARKER)
write.table(datmatr, fnames[["dat"]], col.names = FALSE, row.names = FALSE, quote = FALSE)
if(verbose) message("File written: ", fnames[["dat"]])
}
invisible(unname(fnames))
}
#' @importFrom utils write.table
writePed_merlin = function(x, prefix, verbose = TRUE) {
what = c("ped", "map", "dat", "freq")
fnames = setNames(paste(prefix, what, sep = "."), what)
### ped file
if(is.pedList(x)) {
pedmatr = do.call(rbind, lapply(x, as.matrix.ped, include.attrs = FALSE))
pedmatr = cbind(rep.int(seq_along(x), pedsize(x)), pedmatr)
x = x[[1]]
} else {
pedmatr = cbind(1, as.matrix(x, include.attrs = FALSE))
}
write(t.default(pedmatr), file = fnames[["ped"]], ncolumns = ncol(pedmatr))
if(verbose) message("File written: ", fnames[["ped"]])
### map file
mapmatr = getMap(x, na.action = 1, verbose = FALSE)
write.table(mapmatr, file = fnames[["map"]], col.names = FALSE, row.names = FALSE, quote = FALSE)
if(verbose) message("File written: ", fnames[["map"]])
### dat file
datmatr = cbind("M", mapmatr$MARKER)
write.table(datmatr, file = fnames[["dat"]], col.names = FALSE, row.names = FALSE, quote = FALSE)
if(verbose) message("File written: ", fnames[["dat"]])
### freq file
nalls = nAlleles(x)
L = sum(nalls) + length(nalls)
cum = cumsum(c(1, nalls + 1))
length(cum) = length(nalls) #remove last
col1 = rep("A", L)
col1[cum] = "M"
col2 = character(L)
col2[cum] = mapmatr$MARKER
allalleles = unlist(lapply(nalls, seq_len)) # numerical allele names for merlin!
col2[-cum] = allalleles
col3 = character(L)
allfreqs = unlist(lapply(x$MARKERS, afreq))
col3[-cum] = format(allfreqs, scientifit = FALSE, digits = 6)
freqmatr = cbind(col1, col2, col3)
write.table(freqmatr, file = fnames[["freq"]], col.names = FALSE, row.names = FALSE, quote = FALSE)
if(verbose) message("File written: ", fnames[["freq"]])
invisible(unname(fnames))
}
#' @importFrom utils write.table
writeMerlin = function(x, prefix, verbose = TRUE) {
what = c("ped", "map", "dat", "freq")
fnames = setNames(paste(prefix, what, sep = "."), what)
### ped file
if(is.pedList(x)) {
pedmatr = do.call(rbind, lapply(x, as.matrix.ped, include.attrs = FALSE))
pedmatr = cbind(rep.int(seq_along(x), pedsize(x)), pedmatr)
x = x[[1]]
} else {
pedmatr = cbind(1L, as.matrix.ped(x, include.attrs = FALSE))
}
# writelines faster than write and write.table
nr = nrow(pedmatr)
lines = vapply(seq_len(nr), function(i) paste(pedmatr[i, ], collapse = " "), "")
writeLines(lines, fnames[["ped"]])
if(verbose) message("File written: ", fnames[["ped"]])
### map file
mapmatr = getMap(x, merlin = TRUE, verbose = FALSE)
write.table(mapmatr, file = fnames[["map"]], col.names = FALSE, row.names = FALSE, quote = FALSE)
if(verbose) message("File written: ", fnames[["map"]])
### dat file
markernames = mapmatr[,"MARKER"]
datmatr = cbind("M", markernames)
write.table(datmatr, file = fnames[["dat"]], col.names = FALSE, row.names = FALSE, quote = FALSE)
if(verbose) message("File written: ", fnames[["dat"]])
### freq file
nalls = nAlleles.ped(x)
L = sum(nalls) + length(nalls)
cum = cumsum(c(1, nalls + 1))
length(cum) = length(nalls) #remove last
col1 = rep("A", L)
col1[cum] = "M"
col2 = character(L)
col2[cum] = markernames
allalleles = unlist(lapply(nalls, seq_len)) # numerical allele names for merlin!
col2[-cum] = allalleles
col3 = character(L)
allfreqs = unlist(lapply(x$MARKERS, attr, "afreq"))
col3[-cum] = format(allfreqs, scientifit = FALSE, digits = 6)
freqmatr = cbind(col1, col2, col3)
write.table(freqmatr, file = fnames[["freq"]], col.names = FALSE, row.names = FALSE, quote = FALSE)
if(verbose) message("File written: ", fnames[["freq"]])
invisible(unname(fnames))
}
|
library(cholera)
### Name: walkingDistance
### Title: Compute the shortest walking distance between cases and/or
### pumps.
### Aliases: walkingDistance
### ** Examples
## Not run:
##D
##D # distance from case 1 to nearest pump.
##D walkingDistance(1)
##D
##D # distance from case 1 to pump 6.
##D walkingDistance(1, 6)
##D
##D # exclude pump 7 from consideration.
##D walkingDistance(1, -7)
##D
##D # distance from case 1 to case 6.
##D walkingDistance(1, 6, type = "cases")
##D
##D # distance from pump 1 to pump 6.
##D walkingDistance(1, 6, type = "pumps")
## End(Not run)
| /data/genthat_extracted_code/cholera/examples/walkingDistance.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 591 | r | library(cholera)
### Name: walkingDistance
### Title: Compute the shortest walking distance between cases and/or
### pumps.
### Aliases: walkingDistance
### ** Examples
## Not run:
##D
##D # distance from case 1 to nearest pump.
##D walkingDistance(1)
##D
##D # distance from case 1 to pump 6.
##D walkingDistance(1, 6)
##D
##D # exclude pump 7 from consideration.
##D walkingDistance(1, -7)
##D
##D # distance from case 1 to case 6.
##D walkingDistance(1, 6, type = "cases")
##D
##D # distance from pump 1 to pump 6.
##D walkingDistance(1, 6, type = "pumps")
## End(Not run)
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(3.97314911878724e-307, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(1.46950620900256e+302, 4.11932782999429e-175, -3.85515401974544e+79, -3.02137085628715e+143, -1.07335709985308e+237, 1.20695523931594e-309, 3.32564188180072e+80, -1.04944149130577e-291, -6.66433666280476e+260, -1.24299680236504e+248, 9.70815500676051e+204, 4.46572320545082e-23, -1.13853964838196e+217, 95.7774360421032, 2.0018737059126e-28, -4636800105173434, 1.65447250389292e-256, -2.30374790479512e+88, 9.31444420548792e+294, 1.87140051912765e+293, 7.81174850164908e+153, -1.81388628605987e-210, 2.97417034753781e-112, 3.07889205700993e+72, -5.68358142431207e+115, -1.49905137588813e-296, -4.83607699504741e+296, -4.39048939437592e-283, 6.14411608709023e-73, -7.9700945594356e-175, -7.74871223767381e-132, 4.16882816770762e+216, 1.77638799941844e-103, 3.10673888773823e+67, 7.78963466942964e+235, -3.58131929196381e+99, -0.000144958566634, -1.97272183211855e+299, -4.80684530567003e-211, 1.27171785317634e+32, 7.27866839395753e-304, -4.03745792148629e+247, 6.98516021012687e+303, -1.47416531241142e-29, -9.26916759452804e-30, 2.80442413482245e+93, -3.49120966287497e+274, -1.64918989358022e+230, -6.65976989513026e-283, 4.42844269247337e-45, 1.98141864604823e-95, -2.80316332377215e+114, 3.39496965625457e+134, -1.15574798364676e+282, -4.86507829573234e+261, -1.12181685914956e-204, 4.83444858402713e-21, 4.44411230227823e-288, 1.74273204902173e-84, 3.6354008294539e-305), temp = c(1.4174931883648e-311, -9.27191279380401e-227, -3.30454338512553e-220, 0.00326457501838524, -4.11828281046168e-243, -1.95893925610339e-77, -7.57690586869615e+160, 1.77288451463919e+81, 7.30351788343351e+245, 1.14935825540514e+262, 9.09252021533702e-172, 1.65646662424464e-91, 2.77067322468006e+114, 6.44719590123194e+27, -1.82639555575468e-07, -4.2372858822964e-119, -1.19043356885614e+85, 3.31651557487312e-262, 1.82363221083299e-238, 4.35812421290471e+289, 1.11765367033464e-296))
result <- do.call(meteor:::ET0_PriestleyTaylor,testlist)
str(result) | /meteor/inst/testfiles/ET0_PriestleyTaylor/AFL_ET0_PriestleyTaylor/ET0_PriestleyTaylor_valgrind_files/1615844216-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 2,232 | r | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(3.97314911878724e-307, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(1.46950620900256e+302, 4.11932782999429e-175, -3.85515401974544e+79, -3.02137085628715e+143, -1.07335709985308e+237, 1.20695523931594e-309, 3.32564188180072e+80, -1.04944149130577e-291, -6.66433666280476e+260, -1.24299680236504e+248, 9.70815500676051e+204, 4.46572320545082e-23, -1.13853964838196e+217, 95.7774360421032, 2.0018737059126e-28, -4636800105173434, 1.65447250389292e-256, -2.30374790479512e+88, 9.31444420548792e+294, 1.87140051912765e+293, 7.81174850164908e+153, -1.81388628605987e-210, 2.97417034753781e-112, 3.07889205700993e+72, -5.68358142431207e+115, -1.49905137588813e-296, -4.83607699504741e+296, -4.39048939437592e-283, 6.14411608709023e-73, -7.9700945594356e-175, -7.74871223767381e-132, 4.16882816770762e+216, 1.77638799941844e-103, 3.10673888773823e+67, 7.78963466942964e+235, -3.58131929196381e+99, -0.000144958566634, -1.97272183211855e+299, -4.80684530567003e-211, 1.27171785317634e+32, 7.27866839395753e-304, -4.03745792148629e+247, 6.98516021012687e+303, -1.47416531241142e-29, -9.26916759452804e-30, 2.80442413482245e+93, -3.49120966287497e+274, -1.64918989358022e+230, -6.65976989513026e-283, 4.42844269247337e-45, 1.98141864604823e-95, -2.80316332377215e+114, 3.39496965625457e+134, -1.15574798364676e+282, -4.86507829573234e+261, -1.12181685914956e-204, 4.83444858402713e-21, 4.44411230227823e-288, 1.74273204902173e-84, 3.6354008294539e-305), temp = c(1.4174931883648e-311, -9.27191279380401e-227, -3.30454338512553e-220, 0.00326457501838524, -4.11828281046168e-243, -1.95893925610339e-77, -7.57690586869615e+160, 1.77288451463919e+81, 7.30351788343351e+245, 1.14935825540514e+262, 9.09252021533702e-172, 1.65646662424464e-91, 2.77067322468006e+114, 6.44719590123194e+27, -1.82639555575468e-07, -4.2372858822964e-119, -1.19043356885614e+85, 3.31651557487312e-262, 1.82363221083299e-238, 4.35812421290471e+289, 1.11765367033464e-296))
result <- do.call(meteor:::ET0_PriestleyTaylor,testlist)
str(result) |
# Original code from
# Tidying the new Johns Hopkins Covid-19 time-series datasets (old code before 30.03.2020)
# March 24, 2020 in R
# https://joachim-gassen.github.io/2020/03/tidying-the-new-johns-hopkins-covid-19-datasests/
# adapted for daily use on different platforms and include graph for Slovenia
# Matjaz Jeran 02.04.2020
# visualize data
# input:
# jh_covid19_data.RData
# jh_add_wbank_data.RData
# output:
# various graphs left opened
rm (list = ls (all = TRUE))
### Warning: adjust working directory as needed for operating systems Windows, macOS, linux
if (Sys.info () ["sysname"] == "Windows") setwd ("C:/Moji Dokumenti/Corona/Podatki")
if (Sys.info () ["sysname"] == "Darwin") setwd ("/Users/matjaz/Corona/Podatki")
if (Sys.info () ["sysname"] == "Linux") setwd ("/home/matjaz/Corona/Podatki")
covid.dfile <- "jh_covid19_data.RData"
wbank.dfile <- "jh_add_wbank_data.RData"
country.selection <- c ("China", "US", "Italy", "Germany", "Austria", "Switzerland", "Croatia", "Hungary", "Singapore", "Japan", "Iceland", "Slovenia")
suppressPackageStartupMessages({
library(tidyverse)
library(lubridate)
library(gghighlight)
library(ggrepel)
})
load (file = covid.dfile) # jh_covid19_data
dta <- jh_covid19_data
load (file = wbank.dfile) # wb_cs
# I define event time zero where, for a given country, the confirmed
# cases match or exceed the Chinese case number at the beginning of the
# data so that all countries can be compared across event time.
# Also a require each country to have at least 7 days post event day 0
dta %>%
dplyr::group_by(country) %>%
dplyr::filter(confirmed >= min(dta$confirmed[dta$country == "China"])) %>%
dplyr::summarise(edate_confirmed = min(date)) -> edates_confirmed
dta %>%
dplyr::left_join(edates_confirmed, by = "country") %>%
dplyr::mutate(
edate_confirmed = as.numeric(date - edate_confirmed)
) %>%
dplyr::filter(edate_confirmed >= 0) %>%
dplyr::group_by(country) %>%
dplyr::filter (dplyr::n() >= 7) %>%
dplyr::ungroup() %>%
dplyr::left_join(wb_cs, by = "iso3c") %>%
dplyr::mutate(
confirmed_1e5pop = 1e5*confirmed/population
) -> df
# data selection for Slovenia and neighbourhood
# I define event time zero where, for a given country, the confirmed
# cases match or exceed the Itallian case number at the beginning of the
# data so that all countries can be compared across event time.
# Also a require each country to have at least 7 days post event day 0
dta %>%
dplyr::group_by(country) %>%
dplyr::filter(date >= '2020-03-01' ) %>%
dplyr::summarise(edate_confirmed = min(date)) -> edates_confirmed_SVN
dta %>%
dplyr::left_join(edates_confirmed_SVN, by = "country") %>%
dplyr::filter (country %in% country.selection) %>%
dplyr::mutate(
edate_confirmed = as.numeric(date - edate_confirmed)
) %>%
dplyr::filter(edate_confirmed >= 0) %>%
dplyr::group_by(country) %>%
dplyr::filter (dplyr::n() >= 7) %>%
dplyr::ungroup() %>%
dplyr::left_join(wb_cs, by = "iso3c") %>%
dplyr::mutate(
confirmed_1e5pop = 1e5*confirmed/population
) -> dSVN
lab_notes <- paste0(
"Data as provided by Johns Hopkins University Center for Systems Science ",
"and Engineering (JHU CSSE)\nand obtained on March 25, 2020. ",
"The sample is limited to countries with at least seven days of positive\n",
"event days data. Code and walk-through: https://joachim-gassen.github.io."
)
lab_x_axis_confirmed <- sprintf(paste(
"Days since confirmed cases matched or exceeded\n",
"initial value reported for China (%d cases)\n"
), min(dta$confirmed[dta$country == "China"]))
lab_x_axis_confirmed_SVN <- sprintf(paste(
"Days since 2020-03-01"))
gg_my_blob <- list(
scale_y_continuous(trans='log10', labels = scales::comma),
theme_minimal(),
theme(
plot.title.position = "plot",
plot.caption.position = "plot",
plot.caption = element_text(hjust = 0),
axis.title.x = element_text(hjust = 1),
axis.title.y = element_text(hjust = 1),
),
labs(caption = lab_notes,
x = lab_x_axis_confirmed,
y = "Confirmed cases (logarithmic scale)"),
gghighlight::gghighlight(TRUE, label_key = country, use_direct_label = TRUE,
label_params = list(segment.color = NA, nudge_x = 1))
)
dev.new ()
ggplot2::ggplot(df %>% filter (edate_confirmed <= 40),
aes(x = edate_confirmed, color = country, y = confirmed)) +
geom_line() +
labs(
title = "Focus on the first month: Confirmed Cases\n"
) +
gg_my_blob
# add graph for Slovenia and neghbours
lab_notes_SVN <- paste0(
"Data as provided by Johns Hopkins University Center for Systems Science ",
"and Engineering (JHU CSSE)\nand obtained on March 25, 2020. ",
"The selection of countries versus Slovenia.\n",
"Code and walk-through: https://joachim-gassen.github.io. and adaptation by MJ"
)
gg_my_blob_SVN <- list(
scale_y_continuous(trans='log10', labels = scales::comma),
theme_minimal(),
theme(
plot.title.position = "plot",
plot.caption.position = "plot",
plot.caption = element_text(hjust = 0),
axis.title.x = element_text(hjust = 1),
axis.title.y = element_text(hjust = 1),
),
labs(caption = lab_notes_SVN,
x = lab_x_axis_confirmed_SVN,
y = "Confirmed cases (logarithmic scale)"),
gghighlight::gghighlight(TRUE, label_key = country, use_direct_label = TRUE,
label_params = list(segment.color = NA, nudge_x = 1))
)
dev.new ()
ggplot2::ggplot(dSVN %>% filter (edate_confirmed <= 200),
aes(x = edate_confirmed, color = country, y = confirmed_1e5pop)) +
geom_line() +
labs(
title = "Focus on the first month: Confirmed Cases per 10^5 population\n"
) +
gg_my_blob_SVN
# add graph of deaths versus infected - world
lab_notes_linear <- paste0(
"Data as provided by Johns Hopkins University Center for Systems Science ",
"and Engineering (JHU CSSE)\nand obtained on March 25, 2020. ",
"The sample is limited to countries with at least seven days of positive\n",
"event days data. Code and walk-through: https://joachim-gassen.github.io. and adaptation by MJ"
)
gg_my_blob_linear <- list(
scale_y_continuous(labels = scales::comma),
theme_minimal(),
theme(
plot.title.position = "plot",
plot.caption.position = "plot",
plot.caption = element_text(hjust = 0),
axis.title.x = element_text(hjust = 1),
axis.title.y = element_text(hjust = 1),
),
labs(caption = lab_notes_linear,
x = lab_x_axis_confirmed,
y = "Confirmed deaths / cases (linear scale)"),
gghighlight::gghighlight(TRUE, label_key = country, use_direct_label = TRUE,
label_params = list(segment.color = NA, nudge_x = 1))
)
dev.new ()
ggplot2::ggplot(df %>% filter (edate_confirmed <= 200),
aes(x = edate_confirmed, color = country, y = deaths/confirmed)) +
geom_line() +
labs(
title = "Focus on the first month: daily total deaths / infected ratio"
) +
gg_my_blob_linear
# add graph of deaths versus infected - Slovenia and neighbours
gg_my_blob_linear_SVN <- list(
scale_y_continuous(labels = scales::comma),
theme_minimal(),
theme(
plot.title.position = "plot",
plot.caption.position = "plot",
plot.caption = element_text(hjust = 0),
axis.title.x = element_text(hjust = 1),
axis.title.y = element_text(hjust = 1),
),
labs(caption = lab_notes_SVN,
x = lab_x_axis_confirmed_SVN,
y = "Confirmed deaths / cases (linear scale)"),
gghighlight::gghighlight(TRUE, label_key = country, use_direct_label = TRUE,
label_params = list(segment.color = NA, nudge_x = 1))
)
dev.new ()
ggplot2::ggplot(dSVN %>% filter (edate_confirmed <= 200),
aes(x = edate_confirmed, color = country, y = deaths/confirmed)) +
geom_line() +
labs(
title = "Focus on the first month: daily total deaths / infected ratio"
) +
gg_my_blob_linear_SVN
# remove all graph windows
# graphics.off ()
| /Podatki/R_04_vizualize_data_20200402.R | no_license | matjazjeran/Covid-19 | R | false | false | 8,032 | r | # Original code from
# Tidying the new Johns Hopkins Covid-19 time-series datasets (old code before 30.03.2020)
# March 24, 2020 in R
# https://joachim-gassen.github.io/2020/03/tidying-the-new-johns-hopkins-covid-19-datasests/
# adapted for daily use on different platforms and include graph for Slovenia
# Matjaz Jeran 02.04.2020
# visualize data
# input:
# jh_covid19_data.RData
# jh_add_wbank_data.RData
# output:
# various graphs left opened
rm (list = ls (all = TRUE))
### Warning: adjust working directory as needed for operating systems Windows, macOS, linux
if (Sys.info () ["sysname"] == "Windows") setwd ("C:/Moji Dokumenti/Corona/Podatki")
if (Sys.info () ["sysname"] == "Darwin") setwd ("/Users/matjaz/Corona/Podatki")
if (Sys.info () ["sysname"] == "Linux") setwd ("/home/matjaz/Corona/Podatki")
covid.dfile <- "jh_covid19_data.RData"
wbank.dfile <- "jh_add_wbank_data.RData"
country.selection <- c ("China", "US", "Italy", "Germany", "Austria", "Switzerland", "Croatia", "Hungary", "Singapore", "Japan", "Iceland", "Slovenia")
suppressPackageStartupMessages({
library(tidyverse)
library(lubridate)
library(gghighlight)
library(ggrepel)
})
load (file = covid.dfile) # jh_covid19_data
dta <- jh_covid19_data
load (file = wbank.dfile) # wb_cs
# I define event time zero where, for a given country, the confirmed
# cases match or exceed the Chinese case number at the beginning of the
# data so that all countries can be compared across event time.
# Also a require each country to have at least 7 days post event day 0
dta %>%
dplyr::group_by(country) %>%
dplyr::filter(confirmed >= min(dta$confirmed[dta$country == "China"])) %>%
dplyr::summarise(edate_confirmed = min(date)) -> edates_confirmed
dta %>%
dplyr::left_join(edates_confirmed, by = "country") %>%
dplyr::mutate(
edate_confirmed = as.numeric(date - edate_confirmed)
) %>%
dplyr::filter(edate_confirmed >= 0) %>%
dplyr::group_by(country) %>%
dplyr::filter (dplyr::n() >= 7) %>%
dplyr::ungroup() %>%
dplyr::left_join(wb_cs, by = "iso3c") %>%
dplyr::mutate(
confirmed_1e5pop = 1e5*confirmed/population
) -> df
# data selection for Slovenia and neighbourhood
# I define event time zero where, for a given country, the confirmed
# cases match or exceed the Itallian case number at the beginning of the
# data so that all countries can be compared across event time.
# Also a require each country to have at least 7 days post event day 0
dta %>%
dplyr::group_by(country) %>%
dplyr::filter(date >= '2020-03-01' ) %>%
dplyr::summarise(edate_confirmed = min(date)) -> edates_confirmed_SVN
dta %>%
dplyr::left_join(edates_confirmed_SVN, by = "country") %>%
dplyr::filter (country %in% country.selection) %>%
dplyr::mutate(
edate_confirmed = as.numeric(date - edate_confirmed)
) %>%
dplyr::filter(edate_confirmed >= 0) %>%
dplyr::group_by(country) %>%
dplyr::filter (dplyr::n() >= 7) %>%
dplyr::ungroup() %>%
dplyr::left_join(wb_cs, by = "iso3c") %>%
dplyr::mutate(
confirmed_1e5pop = 1e5*confirmed/population
) -> dSVN
lab_notes <- paste0(
"Data as provided by Johns Hopkins University Center for Systems Science ",
"and Engineering (JHU CSSE)\nand obtained on March 25, 2020. ",
"The sample is limited to countries with at least seven days of positive\n",
"event days data. Code and walk-through: https://joachim-gassen.github.io."
)
lab_x_axis_confirmed <- sprintf(paste(
"Days since confirmed cases matched or exceeded\n",
"initial value reported for China (%d cases)\n"
), min(dta$confirmed[dta$country == "China"]))
lab_x_axis_confirmed_SVN <- sprintf(paste(
"Days since 2020-03-01"))
gg_my_blob <- list(
scale_y_continuous(trans='log10', labels = scales::comma),
theme_minimal(),
theme(
plot.title.position = "plot",
plot.caption.position = "plot",
plot.caption = element_text(hjust = 0),
axis.title.x = element_text(hjust = 1),
axis.title.y = element_text(hjust = 1),
),
labs(caption = lab_notes,
x = lab_x_axis_confirmed,
y = "Confirmed cases (logarithmic scale)"),
gghighlight::gghighlight(TRUE, label_key = country, use_direct_label = TRUE,
label_params = list(segment.color = NA, nudge_x = 1))
)
dev.new ()
ggplot2::ggplot(df %>% filter (edate_confirmed <= 40),
aes(x = edate_confirmed, color = country, y = confirmed)) +
geom_line() +
labs(
title = "Focus on the first month: Confirmed Cases\n"
) +
gg_my_blob
# add graph for Slovenia and neghbours
lab_notes_SVN <- paste0(
"Data as provided by Johns Hopkins University Center for Systems Science ",
"and Engineering (JHU CSSE)\nand obtained on March 25, 2020. ",
"The selection of countries versus Slovenia.\n",
"Code and walk-through: https://joachim-gassen.github.io. and adaptation by MJ"
)
gg_my_blob_SVN <- list(
scale_y_continuous(trans='log10', labels = scales::comma),
theme_minimal(),
theme(
plot.title.position = "plot",
plot.caption.position = "plot",
plot.caption = element_text(hjust = 0),
axis.title.x = element_text(hjust = 1),
axis.title.y = element_text(hjust = 1),
),
labs(caption = lab_notes_SVN,
x = lab_x_axis_confirmed_SVN,
y = "Confirmed cases (logarithmic scale)"),
gghighlight::gghighlight(TRUE, label_key = country, use_direct_label = TRUE,
label_params = list(segment.color = NA, nudge_x = 1))
)
dev.new ()
ggplot2::ggplot(dSVN %>% filter (edate_confirmed <= 200),
aes(x = edate_confirmed, color = country, y = confirmed_1e5pop)) +
geom_line() +
labs(
title = "Focus on the first month: Confirmed Cases per 10^5 population\n"
) +
gg_my_blob_SVN
# add graph of deaths versus infected - world
lab_notes_linear <- paste0(
"Data as provided by Johns Hopkins University Center for Systems Science ",
"and Engineering (JHU CSSE)\nand obtained on March 25, 2020. ",
"The sample is limited to countries with at least seven days of positive\n",
"event days data. Code and walk-through: https://joachim-gassen.github.io. and adaptation by MJ"
)
gg_my_blob_linear <- list(
scale_y_continuous(labels = scales::comma),
theme_minimal(),
theme(
plot.title.position = "plot",
plot.caption.position = "plot",
plot.caption = element_text(hjust = 0),
axis.title.x = element_text(hjust = 1),
axis.title.y = element_text(hjust = 1),
),
labs(caption = lab_notes_linear,
x = lab_x_axis_confirmed,
y = "Confirmed deaths / cases (linear scale)"),
gghighlight::gghighlight(TRUE, label_key = country, use_direct_label = TRUE,
label_params = list(segment.color = NA, nudge_x = 1))
)
dev.new ()
ggplot2::ggplot(df %>% filter (edate_confirmed <= 200),
aes(x = edate_confirmed, color = country, y = deaths/confirmed)) +
geom_line() +
labs(
title = "Focus on the first month: daily total deaths / infected ratio"
) +
gg_my_blob_linear
# add graph of deaths versus infected - Slovenia and neighbours
gg_my_blob_linear_SVN <- list(
scale_y_continuous(labels = scales::comma),
theme_minimal(),
theme(
plot.title.position = "plot",
plot.caption.position = "plot",
plot.caption = element_text(hjust = 0),
axis.title.x = element_text(hjust = 1),
axis.title.y = element_text(hjust = 1),
),
labs(caption = lab_notes_SVN,
x = lab_x_axis_confirmed_SVN,
y = "Confirmed deaths / cases (linear scale)"),
gghighlight::gghighlight(TRUE, label_key = country, use_direct_label = TRUE,
label_params = list(segment.color = NA, nudge_x = 1))
)
dev.new ()
ggplot2::ggplot(dSVN %>% filter (edate_confirmed <= 200),
aes(x = edate_confirmed, color = country, y = deaths/confirmed)) +
geom_line() +
labs(
title = "Focus on the first month: daily total deaths / infected ratio"
) +
gg_my_blob_linear_SVN
# remove all graph windows
# graphics.off ()
|
### ====================
### read data functions
### ====================
## higher level read functions
## ===========================
#' Read Data function
#'
#' This is the core function to read and parse raw data from a config file.
#' At the moment only the BAM format is supported. It is not intended to be
#' used by the user directly, as it is called internally by the GenoGAMDataSet
#' constructor. However it is exported if people wish to separately assemble
#' their data and construct the GenoGAMDataSet from SummarizedExperiment
#' afterwards. It also offers the possibility to use the HDF5 backend.
#'
#' @param config A data.frame containing the experiment design of the model
#' to be computed with the first three columns fixed. See the 'experimentDesign'
#' parameter in \code{\link{GenoGAMDataSet}} or details here.
#' @param hdf5 Should the data be stored on HDD in HDF5 format? By default this
#' is disabled, as the Rle representation of count data already provides a
#' decent compression of the data. However in case of large organisms, a complex
#' experiment design or just limited memory, this might further decrease the
#' memory footprint.
#' @param split If TRUE the data will be stored as a list of DataFrames by
#' chromosome instead of one big DataFrame. This is only necessary if organisms
#' with a genome size bigger than 2^31 (approx. 2.14Gbp) are analyzed,
#' in which case Rs lack of long integers prevents having a well compressed Rle
#' of sufficient size.
#' @param settings A GenoGAMSettings object. Not needed by default, but might
#' be of use if only specific regions should be read in.
#' See \code{\link{GenoGAMSettings}}.
#' @param ... Further parameters that can be passed to low-level functions.
#' Mostly to pass arguments to custom process functions. In case the default
#' process functions are used, i.e. the default settings paramenter,
#' the most interesting parameters might be fragment length estimator method
#' from ?chipseq::estimate.mean.fraglen for single-end data.
#' @return A DataFrame of counts for each sample and position.
#' Or if split = TRUE, a list of DataFrames by chromosomes
#' @details
#' The config data.frame contains the actual experiment design. It must
#' contain at least three columns with fixed names: 'ID', 'file' and 'paired'.
#'
#' The field 'ID' stores a unique identifier for each alignment file.
#' It is recommended to use short and easy to understand identifiers because
#' they are subsequently used for labelling data and plots.
#'
#' The field 'file' stores the complete path to the BAM file.
#'
#' The field 'paired', values TRUE for paired-end sequencing data, and FALSE for
#' single-end sequencing data.
#'
#' Other columns will be ignored by this function.
#' @examples
#' # Read data
#'
#' ## Set config file
#' config <- system.file("extdata/Set1", "experimentDesign.txt", package = "GenoGAM")
#' config <- read.table(config, header = TRUE, sep = '\t', stringsAsFactors = FALSE)
#' for(ii in 1:nrow(config)) {
#' absPath <- system.file("extdata/Set1/bam", config$file[ii], package = "GenoGAM")
#' config$file[ii] <- absPath
#' }
#'
#' ## Read all data
#' df <- readData(config)
#' df
#'
#' ## Read data of a particular chromosome
#' settings <- GenoGAMSettings(chromosomeList = "chrI")
#' df <- readData(config, settings = settings)
#' df
#'
#' ## Read data of particular range
#' region <- GenomicRanges::GRanges("chrI", IRanges(10000, 20000))
#' params <- Rsamtools::ScanBamParam(which = region)
#' settings <- GenoGAMSettings(bamParams = params)
#' df <- readData(config, settings = settings)
#' df
#' @author Georg Stricker \email{georg.stricker@@in.tum.de}
#' @export
readData <- function(config, hdf5 = FALSE, split = FALSE,
settings = GenoGAMSettings(), ...) {
futile.logger::flog.info("Reading in data")
fixedInput <- paste0("Using the following parameters:\n",
" Split: ", split, "\n",
" HDF5: ", hdf5, "\n")
futile.logger::flog.debug(fixedInput)
futile.logger::flog.debug(show(list(...)))
futile.logger::flog.debug(show(settings))
args <- list(...)
## read settings parameters
## get chromosomeLengths and check again chromosomeList
header <- Rsamtools::scanBamHeader(config$file[1])
chromosomeLengths <- header[[1]]$targets
chromosomeList <- slot(settings, "chromosomeList")
if(!is.null(chromosomeList)) {
chromosomeLengths <- chromosomeLengths[names(chromosomeLengths) %in% chromosomeList]
if(length(chromosomeLengths) == 0) {
futile.logger::flog.error("The data does not match the region specification in the bamParams settings.")
return(S4Vectors::DataFrame())
}
}
## get other parameters
center <- slot(settings, "center")
processFun <- slot(settings, "processFunction")
if(is.null(processFun)) {
slot(settings, "processFunction") <- .processCountChunks
processFun <- slot(settings, "processFunction")
}
params <- slot(settings, "bamParams")
## read data
rawData <- lapply(1:nrow(config), function(ii) {
if(!is.null(center)) {
args <- c(args, center = center)
}
futile.logger::flog.info(paste("Reading in", config$ID[ii]))
futile.logger::flog.debug(paste(config$ID[ii], "is located at", config$file[ii],
"and is paired end =", config$paired[ii]))
res <- do.call(.readRawData, c(list(path = config$file[ii], processFUN = processFun,
asMates = config$paired[ii], params = params,
chromosomeLengths = chromosomeLengths), args))
return(res)
})
names(rawData) <- config$ID
ans <- lapply(rawData, unlist)
names(ans) <- names(rawData)
ans <- S4Vectors::DataFrame(ans)
futile.logger::flog.info("Finished reading in data")
return(ans)
}
## The intermediate function calling the correct
## function to read in data based on the suffix and
## computing the coverage from the GRanges output.
## Returns a list of coverage Rles per chromosome
.readRawData <- function(path, ...) {
elements <- strsplit(path,"\\.")[[1]]
suffix <- elements[length(elements)]
res <- NULL
if (suffix == "bam") {
futile.logger::flog.debug(paste(path, "identified as BAM file"))
res <- .readBAM(path = path, ...)
}
## for the time being throw error if no BAM file
else {
stop("The input file doesn't seem to be in BAM format. At the moment only BAM format is supported")
}
grl <- GenomicRanges::GRangesList(res)
GenomeInfoDb::seqlevels(grl) <- GenomeInfoDb::seqlevelsInUse(grl)
coverageRle <- IRanges::coverage(grl)
GenomeInfoDb::seqlengths(coverageRle) <- GenomeInfoDb::seqlengths(grl)
if(length(Rsamtools::bamWhich(list(...)$params)) != 0) {
chroms <- seqlevels(coverageRle)
regions <- Rsamtools::bamWhich(list(...)$params)
for(chr in chroms) {
coverageRle[[chr]] <- coverageRle[[chr]][regions[[chr]]]
}
}
return(coverageRle)
}
## Low level read in functions
## ===========================
#' A function to read BAM files.
#'
#' The functions reads in BAM files and processes them according to a given
#' process function. It returns a list of GRanges, one element per chromosome.
#'
#' @param path A character object indicating the path to the BAM file.
#' @param indexFile A character object indicating the path to the BAM Index
#' file. By default it is assumed to be in the same directory as the BAM
#' file.
#' @param processFUN A function specifying how to process the raw data.
#' @param chromosomeLengths A named numeric vector of chromosome lengths.
#' @param params A 'ScanBamParam' object defining the parameters to be used.
#' If NULL 'what' is defined as the columns 'pos' and 'qwidth'. 'Which' is
#' set according to the chunkSize parameter, but at most covers the entire
#' chromosome.
#' @param asMates A logical value indicating if mates are present in the BAM
#' file (paired-end) or not (single-end mostly).
#' @param ... Further parameters that are passed to the process functions.
#' @return A GRanges list of intervals
#' @noRd
.readBAM <- function(path, indexFile = path, processFUN, asMates,
chromosomeLengths, params = NULL, ...){
## stop if package for shifting in case of single-end data is not installed
if(!asMates) {
if(!requireNamespace("chipseq", quietly = TRUE)) {
stop("The 'chipseq' package is required to estimate the fragment
length of single-end data. Please install it from Bioconductor or use
asMates = TRUE for paired-end data", call. = FALSE)
}
}
args <- list(...)
if(is.null(params)) {
params <- Rsamtools::ScanBamParam(what = c("pos", "qwidth"))
}
## convert which params to GRanges
if(length(Rsamtools::bamWhich(params)) != 0) {
regions <- GenomicRanges::GRanges(Rsamtools::bamWhich(params))
lengths <- chromosomeLengths[GenomeInfoDb::seqlevels(regions)]
suppressWarnings(GenomeInfoDb::seqlengths(regions) <- lengths)
regions <- GenomicRanges::trim(regions)
regions <- GenomicRanges::reduce(regions)
}
else {
starts <- rep(1, length(chromosomeLengths))
ends <- chromosomeLengths
regions <- GenomicRanges::GRanges(names(chromosomeLengths),
IRanges::IRanges(starts, ends))
}
futile.logger::flog.debug("The following regions will be read in:")
futile.logger::flog.debug(show(regions))
.local <- function(chromName, chromosomeCoords, asMates, path, indexFile,
params, processFUN, args) {
## load package for SnowParam or BatchJobs backend
suppressPackageStartupMessages(require(GenoGAM, quietly = TRUE))
## read data
coords <- chromosomeCoords[GenomeInfoDb::seqnames(regions) == chromName,]
Rsamtools::bamWhich(params) <- coords
if (asMates) {
reads <- GenomicAlignments::readGAlignmentPairs(path, index = indexFile,
param = params)
}
else {
reads <- GenomicAlignments::readGAlignments(path, index = indexFile,
param = params)
}
if(length(reads) == 0L) return(GRanges())
## ensure sequence levels are correct
GenomeInfoDb::seqlevels(reads, pruning.mode="coarse") <- GenomeInfoDb::seqlevelsInUse(reads)
ans <- do.call(processFUN, c(list(reads), args))
return(ans)
}
## run BAM reading in parallel.
res <- BiocParallel::bplapply(names(chromosomeLengths), .local,
chromosomeCoords = regions, asMates = asMates,
path = path, indexFile = indexFile, params = params,
processFUN = processFUN, args = args)
return(res)
}
## Process functions for chunks of data
## ====================================
#' Processing raw data and convert to GRanges of intervals
#'
#' @param chunk An object of type GAlignments or GAlignmentPairs
#' @param center Logical value indicating if the fragments should be centered
#' or not. If centered, the fragment is reduced to only one data point,
#' which is the center of the fragment. If center = FALSE, then the complete
#' fragment is taken to compute the coverage. In single-end cases the
#' fragment size is estimated by one of the three methods: coverage,
#' correlation or SISSR. See ?chipseq::estimate.mean.fraglen.
#' @return A GRanges object of intervals.
#' @noRd
.processCountChunks <- function(chunk, center, ...) {
paired <- switch(class(chunk),
GAlignmentPairs = TRUE,
GAlignments = FALSE)
coords <- GenomicRanges::GRanges(GenomeInfoDb::seqlevels(chunk), IRanges::IRanges(1, GenomeInfoDb::seqlengths(chunk)))
## correct for unspecified reads and chunks containing only those
if(!paired) {
chunk <- chunk[IRanges::width(chunk) <= 2*median(IRanges::width(chunk))]
strands <- droplevels(GenomicRanges::strand(chunk))
if(!all(c("+", "-") %in% levels(strands))) {
return(NULL)
}
}
if(center) {
fragments <- .centerFragments(chunk, asMates = paired, ...)
}
else {
fragments <- .countFragments(chunk, asMates = paired, ...)
}
validFragments <- fragments[IRanges::start(fragments) >= IRanges::start(coords) &
IRanges::end(fragments) <= IRanges::end(coords)]
return(fragments)
}
#' A function to center fragments.
#'
#' This functions centers fragments with regard to the strand and returns a
#' GRanges object with centered ranges, that is ranges of width one.
#'
#' @param reads A GAlignment object as returned by the 'readGAlignments'
#' functions.
#' @param asMates A logical value indicating if mates are present in the BAM
#' file (paired-end) or not (single-end mostly).
#' @param shiftMethods The method to be used when estimating the fragment
#' length for single-end reads (see ?chipseq::estimate.mean.fraglen).
#' Other methods are 'SISSR' and 'correlation'.
#' @param ... Further parameters that can be passed on to
#' chipseq::estimate.mean.fraglen.
#' @return A GRanges object of centered fragments.
#' @noRd
.centerFragments <- function(reads, asMates, shiftMethod = c("coverage", "correlation", "SISSR"), ...) {
if(is.null(reads)) return(NULL)
plusStrand <- reads[GenomicRanges::strand(reads) == "+",]
negStrand <- reads[GenomicRanges::strand(reads) == "-",]
if (asMates) {
plusMidpoints <- .getPairedCenters(plusStrand)
negMidpoints <- .getPairedCenters(negStrand)
}
else {
## estimate fragment length for shift
granges_reads <- GenomicRanges::granges(reads)
fraglen <- chipseq::estimate.mean.fraglen(granges_reads, method =
match.arg(shiftMethod),
...)
plusMidpoints <- GenomicRanges::granges(plusStrand)
IRanges::start(plusMidpoints) <- IRanges::end(plusMidpoints) <- IRanges::start(plusMidpoints) + fraglen/2
negMidpoints <- GenomicRanges::granges(negStrand)
IRanges::end(negMidpoints) <- IRanges::start(negMidpoints) <- IRanges::end(negMidpoints) - fraglen/2
}
midpoints <- sort(c(plusMidpoints,negMidpoints))
return(midpoints)
}
#' A function to get the fragment centers
#'
#' @param chunk A GAligmnents or GAlignmentPairs object.
#' @return A GRanges object of centered fragments, that is of lenght one
#' @noRd
.getPairedCenters <- function(chunk) {
if(length(chunk) == 0) return(GenomicRanges::GRanges())
gr <- GenomicRanges::GRanges(chunk)
fragmentSize <- IRanges::width(gr)
midpoints <- (IRanges::start(gr) + IRanges::end(gr))/2
IRanges::start(gr) <- IRanges::end(gr) <- midpoints
## filter for artefacts because of wrongly mapped reads
maxAllowedFragSize <- 2*median(fragmentSize)
falseFragments <- which(fragmentSize > maxAllowedFragSize)
futile.logger::flog.debug(paste(length(falseFragments), "dropped due to maximum allowed fragment size of:",
maxAllowedFragSize, "on the following GRange:"))
futile.logger::flog.debug(show(gr))
if (length(falseFragments) > 0) {
gr <- gr[-falseFragments]
}
return(gr)
}
#' Extracting fragments given reads from a BAM file.
#'
#' @param reads A GAligments or GAlignmenPairs object as returned by the
#' 'readGAlignments' functions.
#' @param asMates A logical value indicating if mates are present in the BAM
#' file (paired-end) or not (single-end mostly)
#' @param shiftMethod The method to be used when estimating the fragment
#' length for single-end reads (see ?chipseq::estimate.mean.fraglen). Other
#' methods are 'SISSR' and 'correlation'.
#' @param ... Further parameters that can be passed on to.
#' chipseq::estimate.mean.fraglen.
#' @return A GRanges object of full fragments
#' @noRd
.countFragments <- function(reads, asMates, shiftMethod =
c("coverage", "correlation", "SISSR"), ...) {
if(is.null(reads)) return(NULL)
plusStrand <- reads[GenomicRanges::strand(reads) == "+",]
negStrand <- reads[GenomicRanges::strand(reads) == "-",]
if (asMates) {
plusFragments <- .getFragments(plusStrand)
negFragments <- .getFragments(negStrand)
}
else {
granges_reads <- GenomicRanges::granges(reads)
fraglen <- chipseq::estimate.mean.fraglen(granges_reads, method =
match.arg(shiftMethod),
...)
plusFragments <- GenomicRanges::granges(plusStrand)
IRanges::end(plusFragments) <- IRanges::start(plusFragments) + fraglen
negFragments <- GenomicRanges::granges(negStrand)
IRanges::start(negFragments) <- IRanges::end(negFragments) - fraglen
}
allFragments <- sort(c(plusFragments,negFragments))
return(allFragments)
}
#' A function to get the fragments
#'
#' @param chunk A GAligmnents or GAlignmentPairs object.
#' @return A GRanges object of fragments
#' @noRd
.getFragments <- function(chunk){
if(length(chunk) == 0) return(GenomicRanges::GRanges())
gr <- GenomicRanges::GRanges(chunk)
maxAllowedFragSize <- 2*median(IRanges::width(gr))
falseFragments <- which(IRanges::width(gr) > maxAllowedFragSize)
futile.logger::flog.debug(paste(length(falseFragments), "dropped due to maximum allowed fragment size of:",
maxAllowedFragSize, "on the following GRange:"))
futile.logger::flog.debug(show(gr))
if (length(falseFragments) > 0) {
gr <- gr[-falseFragments]
}
return(gr)
}
| /R/readData.R | no_license | gstricker/GenoGAM | R | false | false | 18,223 | r | ### ====================
### read data functions
### ====================
## higher level read functions
## ===========================
#' Read Data function
#'
#' This is the core function to read and parse raw data from a config file.
#' At the moment only the BAM format is supported. It is not intended to be
#' used by the user directly, as it is called internally by the GenoGAMDataSet
#' constructor. However it is exported if people wish to separately assemble
#' their data and construct the GenoGAMDataSet from SummarizedExperiment
#' afterwards. It also offers the possibility to use the HDF5 backend.
#'
#' @param config A data.frame containing the experiment design of the model
#' to be computed with the first three columns fixed. See the 'experimentDesign'
#' parameter in \code{\link{GenoGAMDataSet}} or details here.
#' @param hdf5 Should the data be stored on HDD in HDF5 format? By default this
#' is disabled, as the Rle representation of count data already provides a
#' decent compression of the data. However in case of large organisms, a complex
#' experiment design or just limited memory, this might further decrease the
#' memory footprint.
#' @param split If TRUE the data will be stored as a list of DataFrames by
#' chromosome instead of one big DataFrame. This is only necessary if organisms
#' with a genome size bigger than 2^31 (approx. 2.14Gbp) are analyzed,
#' in which case Rs lack of long integers prevents having a well compressed Rle
#' of sufficient size.
#' @param settings A GenoGAMSettings object. Not needed by default, but might
#' be of use if only specific regions should be read in.
#' See \code{\link{GenoGAMSettings}}.
#' @param ... Further parameters that can be passed to low-level functions.
#' Mostly to pass arguments to custom process functions. In case the default
#' process functions are used, i.e. the default settings paramenter,
#' the most interesting parameters might be fragment length estimator method
#' from ?chipseq::estimate.mean.fraglen for single-end data.
#' @return A DataFrame of counts for each sample and position.
#' Or if split = TRUE, a list of DataFrames by chromosomes
#' @details
#' The config data.frame contains the actual experiment design. It must
#' contain at least three columns with fixed names: 'ID', 'file' and 'paired'.
#'
#' The field 'ID' stores a unique identifier for each alignment file.
#' It is recommended to use short and easy to understand identifiers because
#' they are subsequently used for labelling data and plots.
#'
#' The field 'file' stores the complete path to the BAM file.
#'
#' The field 'paired', values TRUE for paired-end sequencing data, and FALSE for
#' single-end sequencing data.
#'
#' Other columns will be ignored by this function.
#' @examples
#' # Read data
#'
#' ## Set config file
#' config <- system.file("extdata/Set1", "experimentDesign.txt", package = "GenoGAM")
#' config <- read.table(config, header = TRUE, sep = '\t', stringsAsFactors = FALSE)
#' for(ii in 1:nrow(config)) {
#' absPath <- system.file("extdata/Set1/bam", config$file[ii], package = "GenoGAM")
#' config$file[ii] <- absPath
#' }
#'
#' ## Read all data
#' df <- readData(config)
#' df
#'
#' ## Read data of a particular chromosome
#' settings <- GenoGAMSettings(chromosomeList = "chrI")
#' df <- readData(config, settings = settings)
#' df
#'
#' ## Read data of particular range
#' region <- GenomicRanges::GRanges("chrI", IRanges(10000, 20000))
#' params <- Rsamtools::ScanBamParam(which = region)
#' settings <- GenoGAMSettings(bamParams = params)
#' df <- readData(config, settings = settings)
#' df
#' @author Georg Stricker \email{georg.stricker@@in.tum.de}
#' @export
readData <- function(config, hdf5 = FALSE, split = FALSE,
settings = GenoGAMSettings(), ...) {
futile.logger::flog.info("Reading in data")
fixedInput <- paste0("Using the following parameters:\n",
" Split: ", split, "\n",
" HDF5: ", hdf5, "\n")
futile.logger::flog.debug(fixedInput)
futile.logger::flog.debug(show(list(...)))
futile.logger::flog.debug(show(settings))
args <- list(...)
## read settings parameters
## get chromosomeLengths and check again chromosomeList
header <- Rsamtools::scanBamHeader(config$file[1])
chromosomeLengths <- header[[1]]$targets
chromosomeList <- slot(settings, "chromosomeList")
if(!is.null(chromosomeList)) {
chromosomeLengths <- chromosomeLengths[names(chromosomeLengths) %in% chromosomeList]
if(length(chromosomeLengths) == 0) {
futile.logger::flog.error("The data does not match the region specification in the bamParams settings.")
return(S4Vectors::DataFrame())
}
}
## get other parameters
center <- slot(settings, "center")
processFun <- slot(settings, "processFunction")
if(is.null(processFun)) {
slot(settings, "processFunction") <- .processCountChunks
processFun <- slot(settings, "processFunction")
}
params <- slot(settings, "bamParams")
## read data
rawData <- lapply(1:nrow(config), function(ii) {
if(!is.null(center)) {
args <- c(args, center = center)
}
futile.logger::flog.info(paste("Reading in", config$ID[ii]))
futile.logger::flog.debug(paste(config$ID[ii], "is located at", config$file[ii],
"and is paired end =", config$paired[ii]))
res <- do.call(.readRawData, c(list(path = config$file[ii], processFUN = processFun,
asMates = config$paired[ii], params = params,
chromosomeLengths = chromosomeLengths), args))
return(res)
})
names(rawData) <- config$ID
ans <- lapply(rawData, unlist)
names(ans) <- names(rawData)
ans <- S4Vectors::DataFrame(ans)
futile.logger::flog.info("Finished reading in data")
return(ans)
}
## The intermediate function calling the correct
## function to read in data based on the suffix and
## computing the coverage from the GRanges output.
## Returns a list of coverage Rles per chromosome
.readRawData <- function(path, ...) {
elements <- strsplit(path,"\\.")[[1]]
suffix <- elements[length(elements)]
res <- NULL
if (suffix == "bam") {
futile.logger::flog.debug(paste(path, "identified as BAM file"))
res <- .readBAM(path = path, ...)
}
## for the time being throw error if no BAM file
else {
stop("The input file doesn't seem to be in BAM format. At the moment only BAM format is supported")
}
grl <- GenomicRanges::GRangesList(res)
GenomeInfoDb::seqlevels(grl) <- GenomeInfoDb::seqlevelsInUse(grl)
coverageRle <- IRanges::coverage(grl)
GenomeInfoDb::seqlengths(coverageRle) <- GenomeInfoDb::seqlengths(grl)
if(length(Rsamtools::bamWhich(list(...)$params)) != 0) {
chroms <- seqlevels(coverageRle)
regions <- Rsamtools::bamWhich(list(...)$params)
for(chr in chroms) {
coverageRle[[chr]] <- coverageRle[[chr]][regions[[chr]]]
}
}
return(coverageRle)
}
## Low level read in functions
## ===========================
#' A function to read BAM files.
#'
#' The functions reads in BAM files and processes them according to a given
#' process function. It returns a list of GRanges, one element per chromosome.
#'
#' @param path A character object indicating the path to the BAM file.
#' @param indexFile A character object indicating the path to the BAM Index
#' file. By default it is assumed to be in the same directory as the BAM
#' file.
#' @param processFUN A function specifying how to process the raw data.
#' @param chromosomeLengths A named numeric vector of chromosome lengths.
#' @param params A 'ScanBamParam' object defining the parameters to be used.
#' If NULL 'what' is defined as the columns 'pos' and 'qwidth'. 'Which' is
#' set according to the chunkSize parameter, but at most covers the entire
#' chromosome.
#' @param asMates A logical value indicating if mates are present in the BAM
#' file (paired-end) or not (single-end mostly).
#' @param ... Further parameters that are passed to the process functions.
#' @return A GRanges list of intervals
#' @noRd
.readBAM <- function(path, indexFile = path, processFUN, asMates,
chromosomeLengths, params = NULL, ...){
## stop if package for shifting in case of single-end data is not installed
if(!asMates) {
if(!requireNamespace("chipseq", quietly = TRUE)) {
stop("The 'chipseq' package is required to estimate the fragment
length of single-end data. Please install it from Bioconductor or use
asMates = TRUE for paired-end data", call. = FALSE)
}
}
args <- list(...)
if(is.null(params)) {
params <- Rsamtools::ScanBamParam(what = c("pos", "qwidth"))
}
## convert which params to GRanges
if(length(Rsamtools::bamWhich(params)) != 0) {
regions <- GenomicRanges::GRanges(Rsamtools::bamWhich(params))
lengths <- chromosomeLengths[GenomeInfoDb::seqlevels(regions)]
suppressWarnings(GenomeInfoDb::seqlengths(regions) <- lengths)
regions <- GenomicRanges::trim(regions)
regions <- GenomicRanges::reduce(regions)
}
else {
starts <- rep(1, length(chromosomeLengths))
ends <- chromosomeLengths
regions <- GenomicRanges::GRanges(names(chromosomeLengths),
IRanges::IRanges(starts, ends))
}
futile.logger::flog.debug("The following regions will be read in:")
futile.logger::flog.debug(show(regions))
.local <- function(chromName, chromosomeCoords, asMates, path, indexFile,
params, processFUN, args) {
## load package for SnowParam or BatchJobs backend
suppressPackageStartupMessages(require(GenoGAM, quietly = TRUE))
## read data
coords <- chromosomeCoords[GenomeInfoDb::seqnames(regions) == chromName,]
Rsamtools::bamWhich(params) <- coords
if (asMates) {
reads <- GenomicAlignments::readGAlignmentPairs(path, index = indexFile,
param = params)
}
else {
reads <- GenomicAlignments::readGAlignments(path, index = indexFile,
param = params)
}
if(length(reads) == 0L) return(GRanges())
## ensure sequence levels are correct
GenomeInfoDb::seqlevels(reads, pruning.mode="coarse") <- GenomeInfoDb::seqlevelsInUse(reads)
ans <- do.call(processFUN, c(list(reads), args))
return(ans)
}
## run BAM reading in parallel.
res <- BiocParallel::bplapply(names(chromosomeLengths), .local,
chromosomeCoords = regions, asMates = asMates,
path = path, indexFile = indexFile, params = params,
processFUN = processFUN, args = args)
return(res)
}
## Process functions for chunks of data
## ====================================
#' Processing raw data and convert to GRanges of intervals
#'
#' @param chunk An object of type GAlignments or GAlignmentPairs
#' @param center Logical value indicating if the fragments should be centered
#' or not. If centered, the fragment is reduced to only one data point,
#' which is the center of the fragment. If center = FALSE, then the complete
#' fragment is taken to compute the coverage. In single-end cases the
#' fragment size is estimated by one of the three methods: coverage,
#' correlation or SISSR. See ?chipseq::estimate.mean.fraglen.
#' @return A GRanges object of intervals.
#' @noRd
.processCountChunks <- function(chunk, center, ...) {
paired <- switch(class(chunk),
GAlignmentPairs = TRUE,
GAlignments = FALSE)
coords <- GenomicRanges::GRanges(GenomeInfoDb::seqlevels(chunk), IRanges::IRanges(1, GenomeInfoDb::seqlengths(chunk)))
## correct for unspecified reads and chunks containing only those
if(!paired) {
chunk <- chunk[IRanges::width(chunk) <= 2*median(IRanges::width(chunk))]
strands <- droplevels(GenomicRanges::strand(chunk))
if(!all(c("+", "-") %in% levels(strands))) {
return(NULL)
}
}
if(center) {
fragments <- .centerFragments(chunk, asMates = paired, ...)
}
else {
fragments <- .countFragments(chunk, asMates = paired, ...)
}
validFragments <- fragments[IRanges::start(fragments) >= IRanges::start(coords) &
IRanges::end(fragments) <= IRanges::end(coords)]
return(fragments)
}
#' A function to center fragments.
#'
#' This functions centers fragments with regard to the strand and returns a
#' GRanges object with centered ranges, that is ranges of width one.
#'
#' @param reads A GAlignment object as returned by the 'readGAlignments'
#' functions.
#' @param asMates A logical value indicating if mates are present in the BAM
#' file (paired-end) or not (single-end mostly).
#' @param shiftMethods The method to be used when estimating the fragment
#' length for single-end reads (see ?chipseq::estimate.mean.fraglen).
#' Other methods are 'SISSR' and 'correlation'.
#' @param ... Further parameters that can be passed on to
#' chipseq::estimate.mean.fraglen.
#' @return A GRanges object of centered fragments.
#' @noRd
.centerFragments <- function(reads, asMates, shiftMethod = c("coverage", "correlation", "SISSR"), ...) {
if(is.null(reads)) return(NULL)
plusStrand <- reads[GenomicRanges::strand(reads) == "+",]
negStrand <- reads[GenomicRanges::strand(reads) == "-",]
if (asMates) {
plusMidpoints <- .getPairedCenters(plusStrand)
negMidpoints <- .getPairedCenters(negStrand)
}
else {
## estimate fragment length for shift
granges_reads <- GenomicRanges::granges(reads)
fraglen <- chipseq::estimate.mean.fraglen(granges_reads, method =
match.arg(shiftMethod),
...)
plusMidpoints <- GenomicRanges::granges(plusStrand)
IRanges::start(plusMidpoints) <- IRanges::end(plusMidpoints) <- IRanges::start(plusMidpoints) + fraglen/2
negMidpoints <- GenomicRanges::granges(negStrand)
IRanges::end(negMidpoints) <- IRanges::start(negMidpoints) <- IRanges::end(negMidpoints) - fraglen/2
}
midpoints <- sort(c(plusMidpoints,negMidpoints))
return(midpoints)
}
#' A function to get the fragment centers
#'
#' @param chunk A GAligmnents or GAlignmentPairs object.
#' @return A GRanges object of centered fragments, that is of lenght one
#' @noRd
.getPairedCenters <- function(chunk) {
if(length(chunk) == 0) return(GenomicRanges::GRanges())
gr <- GenomicRanges::GRanges(chunk)
fragmentSize <- IRanges::width(gr)
midpoints <- (IRanges::start(gr) + IRanges::end(gr))/2
IRanges::start(gr) <- IRanges::end(gr) <- midpoints
## filter for artefacts because of wrongly mapped reads
maxAllowedFragSize <- 2*median(fragmentSize)
falseFragments <- which(fragmentSize > maxAllowedFragSize)
futile.logger::flog.debug(paste(length(falseFragments), "dropped due to maximum allowed fragment size of:",
maxAllowedFragSize, "on the following GRange:"))
futile.logger::flog.debug(show(gr))
if (length(falseFragments) > 0) {
gr <- gr[-falseFragments]
}
return(gr)
}
#' Extracting fragments given reads from a BAM file.
#'
#' @param reads A GAligments or GAlignmenPairs object as returned by the
#' 'readGAlignments' functions.
#' @param asMates A logical value indicating if mates are present in the BAM
#' file (paired-end) or not (single-end mostly)
#' @param shiftMethod The method to be used when estimating the fragment
#' length for single-end reads (see ?chipseq::estimate.mean.fraglen). Other
#' methods are 'SISSR' and 'correlation'.
#' @param ... Further parameters that can be passed on to.
#' chipseq::estimate.mean.fraglen.
#' @return A GRanges object of full fragments
#' @noRd
.countFragments <- function(reads, asMates, shiftMethod =
c("coverage", "correlation", "SISSR"), ...) {
if(is.null(reads)) return(NULL)
plusStrand <- reads[GenomicRanges::strand(reads) == "+",]
negStrand <- reads[GenomicRanges::strand(reads) == "-",]
if (asMates) {
plusFragments <- .getFragments(plusStrand)
negFragments <- .getFragments(negStrand)
}
else {
granges_reads <- GenomicRanges::granges(reads)
fraglen <- chipseq::estimate.mean.fraglen(granges_reads, method =
match.arg(shiftMethod),
...)
plusFragments <- GenomicRanges::granges(plusStrand)
IRanges::end(plusFragments) <- IRanges::start(plusFragments) + fraglen
negFragments <- GenomicRanges::granges(negStrand)
IRanges::start(negFragments) <- IRanges::end(negFragments) - fraglen
}
allFragments <- sort(c(plusFragments,negFragments))
return(allFragments)
}
#' A function to get the fragments
#'
#' @param chunk A GAligmnents or GAlignmentPairs object.
#' @return A GRanges object of fragments
#' @noRd
.getFragments <- function(chunk){
if(length(chunk) == 0) return(GenomicRanges::GRanges())
gr <- GenomicRanges::GRanges(chunk)
maxAllowedFragSize <- 2*median(IRanges::width(gr))
falseFragments <- which(IRanges::width(gr) > maxAllowedFragSize)
futile.logger::flog.debug(paste(length(falseFragments), "dropped due to maximum allowed fragment size of:",
maxAllowedFragSize, "on the following GRange:"))
futile.logger::flog.debug(show(gr))
if (length(falseFragments) > 0) {
gr <- gr[-falseFragments]
}
return(gr)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/correlationHC.details.R
\name{correlationHC.details}
\alias{correlationHC.details}
\title{To explain how hierarchical correlation algorithm works.}
\usage{
correlationHC.details(
data,
target = NULL,
weight = c(),
distance = "EUC",
normalize = TRUE,
labels = NULL
)
}
\arguments{
\item{data}{is a data frame with the main data.}
\item{target}{is a data frame , a numeric vector or a matrix. Default value = NULL.}
\item{weight}{is a numeric vector. Default value = empty vector.}
\item{distance}{is a string. The distance type. Default value = Euclidean distance.}
\item{normalize}{is a boolean parameter. If the user wants to normalize weights. Default value = TRUE.}
\item{labels}{is a string vector. For the graphical solution. Default value = NULL.}
}
\value{
R object with a dendrogram, the sorted distances and the list with every cluster. Explanation.
}
\description{
To explain how the hierarchical correlation algorithm works.
}
\details{
This function explains the complete hierarchical correlation method.
It explains the theoretical algorithm step by step.
1 - The function transforms data in useful object to be used.
2 - It creates the clusters.
3 - It calculates the distance from the target to every cluster applying the distance type given.
4 - It orders the distance in an increasing way.
5 - It orders the clusters according to their distance from the previous step
6 - It shows the clusters sorted and the distance used.
}
\examples{
data <- matrix(c(1,2,1,4,5,1,8,2,9,6,3,5,8,5,4),ncol= 3)
dataFrame <- data.frame(data)
target1 <- c(1,2,3)
target2 <- dataFrame[1,]
weight1 <- c(1,6,3)
weight2 <- c(0.1,0.6,0.3)
correlationHC.details(dataFrame, target1)
correlationHC.details(dataFrame, target1, weight1)
correlationHC.details(dataFrame, target1, weight1, normalize = FALSE)
correlationHC.details(dataFrame, target1, weight2, 'CAN', FALSE)
}
\author{
Roberto Alcántara \email{roberto.alcantara@edu.uah.es}
Juan José Cuadrado \email{jjcg@uah.es}
Universidad de Alcalá de Henares
}
| /man/correlationHC.details.Rd | no_license | cran/LearnClust | R | false | true | 2,117 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/correlationHC.details.R
\name{correlationHC.details}
\alias{correlationHC.details}
\title{To explain how hierarchical correlation algorithm works.}
\usage{
correlationHC.details(
data,
target = NULL,
weight = c(),
distance = "EUC",
normalize = TRUE,
labels = NULL
)
}
\arguments{
\item{data}{is a data frame with the main data.}
\item{target}{is a data frame , a numeric vector or a matrix. Default value = NULL.}
\item{weight}{is a numeric vector. Default value = empty vector.}
\item{distance}{is a string. The distance type. Default value = Euclidean distance.}
\item{normalize}{is a boolean parameter. If the user wants to normalize weights. Default value = TRUE.}
\item{labels}{is a string vector. For the graphical solution. Default value = NULL.}
}
\value{
R object with a dendrogram, the sorted distances and the list with every cluster. Explanation.
}
\description{
To explain how the hierarchical correlation algorithm works.
}
\details{
This function explains the complete hierarchical correlation method.
It explains the theoretical algorithm step by step.
1 - The function transforms data in useful object to be used.
2 - It creates the clusters.
3 - It calculates the distance from the target to every cluster applying the distance type given.
4 - It orders the distance in an increasing way.
5 - It orders the clusters according to their distance from the previous step
6 - It shows the clusters sorted and the distance used.
}
\examples{
data <- matrix(c(1,2,1,4,5,1,8,2,9,6,3,5,8,5,4),ncol= 3)
dataFrame <- data.frame(data)
target1 <- c(1,2,3)
target2 <- dataFrame[1,]
weight1 <- c(1,6,3)
weight2 <- c(0.1,0.6,0.3)
correlationHC.details(dataFrame, target1)
correlationHC.details(dataFrame, target1, weight1)
correlationHC.details(dataFrame, target1, weight1, normalize = FALSE)
correlationHC.details(dataFrame, target1, weight2, 'CAN', FALSE)
}
\author{
Roberto Alcántara \email{roberto.alcantara@edu.uah.es}
Juan José Cuadrado \email{jjcg@uah.es}
Universidad de Alcalá de Henares
}
|
# Fitting Auto ARIMA model
library(forecast)
library(tidyverse)
# Use training1, testing and df generated from main_dataprep.R
# get list of states
all_states = unique(df$state)
# create new columns for ARIMA
df = df %>%
mutate(
arima_trained = as.numeric(week <= test_wk_min),
arima_fit = 999
) %>%
ungroup()
# assign the regressor names for the arima models
regset = c("time","time_sq","prev_log_new_death","prev_log_prevalence_country")
# fit arima to every state separately
all_arima_models = list()
for (i in 1:length(all_states)){
idx1 = which(df$state == all_states[i] & df$arima_trained == 1)
idx2 = which(df$state == all_states[i] & df$arima_trained == 0)
reg_train = cbind(rep(1,length(idx1)),df[idx1,regset])
reg_test = cbind(rep(1,length(idx2)),df[idx2,regset])
colnames(reg_train) = colnames(reg_test) = c("beta0",regset)
fit_auto = auto.arima(df$log_prevalence[idx1],
max.p = 3,max.P = 3,max.q = 3,max.Q = 3,max.d = 1,max.D = 0,
seasonal = FALSE,
trace = FALSE,
approximation = FALSE,
allowdrift = FALSE,
allowmean = FALSE,
stepwise = TRUE,
biasadj = FALSE,
ic = "aicc",
lambda = NULL,
xreg = as.matrix(reg_train))
all_arima_models[[i]] = fit_auto
y_pred = forecast::forecast(fit_auto,xreg = as.matrix(reg_test))
df$arima_fit[idx1] = fitted(fit_auto)
df$arima_fit[idx2] = y_pred$mean
}
# find out the errors
df = df %>%
mutate(
arima_error = arima_fit - log_prevalence,
arima_mape = 100*abs(arima_error/log_prevalence),
arima_smape = 100*abs(arima_error/(abs(log_prevalence)+abs(arima_fit))),
arima_mse = arima_error^2#,
# our_error = log_prev_model_values - log_prevalence,
# our_mape = 100*abs(our_error/log_prevalence),
# our_smape = 100*abs(our_error/(abs(log_prev_model_values) + abs(log_prevalence))),
# our_mse = our_error^2
)
# check state-wise prediction error
error_summary = df %>%
filter(arima_trained == 0) %>%
group_by(state) %>%
summarize(
arima_MAE = mean(abs(arima_error)),
arima_MAPE = mean(arima_mape),
arima_SMAPE = mean(arima_smape),
arima_MSE = mean(arima_mse)
) %>%
ungroup() %>%
mutate_if(is.numeric,round,digits = 3)
View(error_summary)
# check total errors
mean(error_summary$arima_MAPE)
mean(error_summary$arima_MSE)
mean(error_summary$arima_SMAPE)
# see the model performance for a state
df %>%
filter(state == 'Wisconsin') %>%
ggplot(aes(x = week)) +
geom_line(aes(y = log_prevalence,col = "Actual")) +
geom_line(aes(y = arima_fit,col = "ARIMA")) +
# geom_line(aes(y = log_prev_model_values,col = "Our model")) +
geom_vline(aes(xintercept = test_wk_min),lty = 2) +
scale_color_manual(values = c("Actual" = "black","ARIMA" = "red","Our model" = "blue")) +
theme_minimal() +
theme(legend.title = element_blank(),
legend.position = "bottom",
axis.title = element_text(size = 20))
#::::: OLD CODES FROM SIDDHARTH
Xreg = cbind(rep(1, n_full_training2), full_training2$time, full_training2$time_sq) #rep(1, n_full_training2),
y_pred_arima_mat = vector()
for (loc in (1:n_sp_full_training2)) {
#fitting auto arima
fit_auto = auto.arima(y_full_training2[((loc - 1)*n_tmp_full_training2+1):(loc*n_tmp_full_training2)],
max.p = 7,max.P = 7,max.q = 7,max.Q = 7,max.d = 1,max.D = 0,
seasonal = FALSE,
trace = FALSE,
approximation = FALSE,
allowdrift = FALSE,
allowmean = FALSE,
stepwise = TRUE,
biasadj = FALSE,
ic = "aicc",
lambda = NULL,
xreg = as.matrix(Xreg[(((loc - 1)*n_tmp_full_training2+1):(loc*n_tmp_full_training2)),]))
#Forecasting using the fitted model on forecast at a time
y_pred_arima_vec = vector()
for (pred_i in (1:n_tmp_testing)) {
if(pred_i %% n_tmp_testing == 1){
X_vec_tmp = t(c(1, testing$time[pred_i], testing$time_sq[pred_i]))
}else{
X_vec_tmp = t(c(1, testing$time[pred_i], testing$time_sq[pred_i]))
}
y_pred_arima = forecast(fit_auto, xreg = X_vec_tmp)
y_pred_arima_vec = c(y_pred_arima_vec, unname(y_pred_arima$mean[1]))
}
y_pred_arima_mat = cbind(y_pred_arima_mat, y_pred_arima_vec)
}
#Mape for predicted values
mape_pred_arima = (sum(abs( (y_testing - y_pred_arima_mat[1:(n_sp_full_training2*n_tmp_testing)])/y_testing )))/n_testing
| /Code/main_arimafit.R | no_license | Kapil21Gupta/COVID_Analysis | R | false | false | 4,723 | r | # Fitting Auto ARIMA model
library(forecast)
library(tidyverse)
# Use training1, testing and df generated from main_dataprep.R
# get list of states
all_states = unique(df$state)
# create new columns for ARIMA
df = df %>%
mutate(
arima_trained = as.numeric(week <= test_wk_min),
arima_fit = 999
) %>%
ungroup()
# assign the regressor names for the arima models
regset = c("time","time_sq","prev_log_new_death","prev_log_prevalence_country")
# fit arima to every state separately
all_arima_models = list()
for (i in 1:length(all_states)){
idx1 = which(df$state == all_states[i] & df$arima_trained == 1)
idx2 = which(df$state == all_states[i] & df$arima_trained == 0)
reg_train = cbind(rep(1,length(idx1)),df[idx1,regset])
reg_test = cbind(rep(1,length(idx2)),df[idx2,regset])
colnames(reg_train) = colnames(reg_test) = c("beta0",regset)
fit_auto = auto.arima(df$log_prevalence[idx1],
max.p = 3,max.P = 3,max.q = 3,max.Q = 3,max.d = 1,max.D = 0,
seasonal = FALSE,
trace = FALSE,
approximation = FALSE,
allowdrift = FALSE,
allowmean = FALSE,
stepwise = TRUE,
biasadj = FALSE,
ic = "aicc",
lambda = NULL,
xreg = as.matrix(reg_train))
all_arima_models[[i]] = fit_auto
y_pred = forecast::forecast(fit_auto,xreg = as.matrix(reg_test))
df$arima_fit[idx1] = fitted(fit_auto)
df$arima_fit[idx2] = y_pred$mean
}
# find out the errors
df = df %>%
mutate(
arima_error = arima_fit - log_prevalence,
arima_mape = 100*abs(arima_error/log_prevalence),
arima_smape = 100*abs(arima_error/(abs(log_prevalence)+abs(arima_fit))),
arima_mse = arima_error^2#,
# our_error = log_prev_model_values - log_prevalence,
# our_mape = 100*abs(our_error/log_prevalence),
# our_smape = 100*abs(our_error/(abs(log_prev_model_values) + abs(log_prevalence))),
# our_mse = our_error^2
)
# check state-wise prediction error
error_summary = df %>%
filter(arima_trained == 0) %>%
group_by(state) %>%
summarize(
arima_MAE = mean(abs(arima_error)),
arima_MAPE = mean(arima_mape),
arima_SMAPE = mean(arima_smape),
arima_MSE = mean(arima_mse)
) %>%
ungroup() %>%
mutate_if(is.numeric,round,digits = 3)
View(error_summary)
# check total errors
mean(error_summary$arima_MAPE)
mean(error_summary$arima_MSE)
mean(error_summary$arima_SMAPE)
# see the model performance for a state
df %>%
filter(state == 'Wisconsin') %>%
ggplot(aes(x = week)) +
geom_line(aes(y = log_prevalence,col = "Actual")) +
geom_line(aes(y = arima_fit,col = "ARIMA")) +
# geom_line(aes(y = log_prev_model_values,col = "Our model")) +
geom_vline(aes(xintercept = test_wk_min),lty = 2) +
scale_color_manual(values = c("Actual" = "black","ARIMA" = "red","Our model" = "blue")) +
theme_minimal() +
theme(legend.title = element_blank(),
legend.position = "bottom",
axis.title = element_text(size = 20))
#::::: OLD CODES FROM SIDDHARTH
Xreg = cbind(rep(1, n_full_training2), full_training2$time, full_training2$time_sq) #rep(1, n_full_training2),
y_pred_arima_mat = vector()
for (loc in (1:n_sp_full_training2)) {
#fitting auto arima
fit_auto = auto.arima(y_full_training2[((loc - 1)*n_tmp_full_training2+1):(loc*n_tmp_full_training2)],
max.p = 7,max.P = 7,max.q = 7,max.Q = 7,max.d = 1,max.D = 0,
seasonal = FALSE,
trace = FALSE,
approximation = FALSE,
allowdrift = FALSE,
allowmean = FALSE,
stepwise = TRUE,
biasadj = FALSE,
ic = "aicc",
lambda = NULL,
xreg = as.matrix(Xreg[(((loc - 1)*n_tmp_full_training2+1):(loc*n_tmp_full_training2)),]))
#Forecasting using the fitted model on forecast at a time
y_pred_arima_vec = vector()
for (pred_i in (1:n_tmp_testing)) {
if(pred_i %% n_tmp_testing == 1){
X_vec_tmp = t(c(1, testing$time[pred_i], testing$time_sq[pred_i]))
}else{
X_vec_tmp = t(c(1, testing$time[pred_i], testing$time_sq[pred_i]))
}
y_pred_arima = forecast(fit_auto, xreg = X_vec_tmp)
y_pred_arima_vec = c(y_pred_arima_vec, unname(y_pred_arima$mean[1]))
}
y_pred_arima_mat = cbind(y_pred_arima_mat, y_pred_arima_vec)
}
#Mape for predicted values
mape_pred_arima = (sum(abs( (y_testing - y_pred_arima_mat[1:(n_sp_full_training2*n_tmp_testing)])/y_testing )))/n_testing
|
# ---
# title: "Project-DM2017"
# author: "Salman Lashkarara,Hina,Behzad"
# date: "May 5, 2017"
# output: Library for calling svm, decision tree, and logestic regresion
# ---
library(randomForest)
library(e1071)
library(rpart)
library(ggplot2)
library(ROCR)
library(caret)
AUC <-function(prediction,target){
res<-prediction(prediction, target, label.ordering = NULL)
auc.tmp <- performance(res,"auc");
auc <- as.numeric(auc.tmp@y.values)
auc
}
combiner<-function(train,test){
reg<-logestic.regresion(train,test)
svm<-sVM(train,test)
tree<-decision.tree(train,test)
(reg+svm+tree)/3
}
logestic.regresion<-function(train,test){
model<-lm(target~. ,train)
predict(model,test)
}
decision.tree<-function(train,test){
fit <- rpart(target~. , method="class", data=train)
tree<-predict(fit, test)
tree[,2]
}
sVM <- function(train,test){
model <- svm(target ~ . , train)
predict(model, test)
}
radial.svm <- function(train,test){
model <- svm(target ~ . , train, kernel="radial", cost=1, gamma=0.5)
predict(model, test)
}
rocCurve <-function(prediction,real){
res<-prediction(prediction, real, label.ordering = NULL)
roc.perf = performance(res, measure = "tpr", x.measure = "fpr")
data.frame(y=as.vector(unlist(roc.perf@y.values)),x=as.vector(unlist(roc.perf@x.values)))
}
| /library.R | no_license | hina86/DM2017_UT | R | false | false | 1,320 | r |
# ---
# title: "Project-DM2017"
# author: "Salman Lashkarara,Hina,Behzad"
# date: "May 5, 2017"
# output: Library for calling svm, decision tree, and logestic regresion
# ---
library(randomForest)
library(e1071)
library(rpart)
library(ggplot2)
library(ROCR)
library(caret)
AUC <-function(prediction,target){
res<-prediction(prediction, target, label.ordering = NULL)
auc.tmp <- performance(res,"auc");
auc <- as.numeric(auc.tmp@y.values)
auc
}
combiner<-function(train,test){
reg<-logestic.regresion(train,test)
svm<-sVM(train,test)
tree<-decision.tree(train,test)
(reg+svm+tree)/3
}
logestic.regresion<-function(train,test){
model<-lm(target~. ,train)
predict(model,test)
}
decision.tree<-function(train,test){
fit <- rpart(target~. , method="class", data=train)
tree<-predict(fit, test)
tree[,2]
}
sVM <- function(train,test){
model <- svm(target ~ . , train)
predict(model, test)
}
radial.svm <- function(train,test){
model <- svm(target ~ . , train, kernel="radial", cost=1, gamma=0.5)
predict(model, test)
}
rocCurve <-function(prediction,real){
res<-prediction(prediction, real, label.ordering = NULL)
roc.perf = performance(res, measure = "tpr", x.measure = "fpr")
data.frame(y=as.vector(unlist(roc.perf@y.values)),x=as.vector(unlist(roc.perf@x.values)))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multivariance-functions.R
\name{mu3.unbiased}
\alias{mu3.unbiased}
\title{given the distance matrix the unbiased estimate for mu3 is computed}
\usage{
mu3.unbiased(B, b2ob = sum(tcrossprod(B) * B))
}
\description{
given the distance matrix the unbiased estimate for mu3 is computed
}
\keyword{internal}
| /fuzzedpackages/multivariance/man/mu3.unbiased.Rd | no_license | akhikolla/testpackages | R | false | true | 393 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multivariance-functions.R
\name{mu3.unbiased}
\alias{mu3.unbiased}
\title{given the distance matrix the unbiased estimate for mu3 is computed}
\usage{
mu3.unbiased(B, b2ob = sum(tcrossprod(B) * B))
}
\description{
given the distance matrix the unbiased estimate for mu3 is computed
}
\keyword{internal}
|
################################
##
## test RF VIS v. LASSO
##
################################
set.seed(2000)
require(parallel)
source('simulateData.R')
source('variableModelFunctions.R')
source('helper_functions.R')
source('../../binomialRF/R/k.binomialRF.R')
#source('~/Desktop/shiny/test/helper.R')
require(data.table)
require(randomForest)
require(devtools)
# load_all("~/Dropbox/Samir/SMART_VIS/SMART_VIS/SMARTVIS")
load_all('~/Dropbox/Samir/binomialRF/')
numSamples=100
dimX=10
BetaSignal=3
##### Generate simulated data
### Generate multivariate normal data in R10
X = matrix(rnorm(numSamples*dimX), ncol=dimX)
### let half of the coefficients be 0, the other be 10
trueBeta= c(rep(BetaSignal,5), rep(0,dimX-5))
### do logistic transform and generate the labels
z = X %*% trueBeta
pr = 1/(1+exp(-z))
y = factor(rbinom(numSamples,1,pr))
candidateModels <- list(
m1=c(paste('X',c(1,4,5,6),sep='')),
m2=c(paste('X',c(2,3,5,6,9),sep='')),
m3=c(paste("X", c(2,5:10),sep='')),
m4= c(paste("X", 3:9,sep='')),
m5= c(paste("X", c(1,4,5,7,9),sep='')),
m6= c(paste("X", c(2,5,6,8,9),sep='')),
m7= c(paste("X", c(5,7,9,10),sep='')),
m8= c(paste("X", c(3,4,5,8,10),sep='')),
m9= c(paste("X", c(1,4,5,7,9,10),sep='')),
m10= c(paste("X",1:10,sep=''))
)
tab = evaluateCandidateModels(candidateModels, X,y,percent_features = 0.3, ntrees = 2000)
tab
| /code/old code/modelAveraging.R | no_license | yadevi/binomialRF_simulationStudy | R | false | false | 1,431 | r | ################################
##
## test RF VIS v. LASSO
##
################################
set.seed(2000)
require(parallel)
source('simulateData.R')
source('variableModelFunctions.R')
source('helper_functions.R')
source('../../binomialRF/R/k.binomialRF.R')
#source('~/Desktop/shiny/test/helper.R')
require(data.table)
require(randomForest)
require(devtools)
# load_all("~/Dropbox/Samir/SMART_VIS/SMART_VIS/SMARTVIS")
load_all('~/Dropbox/Samir/binomialRF/')
numSamples=100
dimX=10
BetaSignal=3
##### Generate simulated data
### Generate multivariate normal data in R10
X = matrix(rnorm(numSamples*dimX), ncol=dimX)
### let half of the coefficients be 0, the other be 10
trueBeta= c(rep(BetaSignal,5), rep(0,dimX-5))
### do logistic transform and generate the labels
z = X %*% trueBeta
pr = 1/(1+exp(-z))
y = factor(rbinom(numSamples,1,pr))
candidateModels <- list(
m1=c(paste('X',c(1,4,5,6),sep='')),
m2=c(paste('X',c(2,3,5,6,9),sep='')),
m3=c(paste("X", c(2,5:10),sep='')),
m4= c(paste("X", 3:9,sep='')),
m5= c(paste("X", c(1,4,5,7,9),sep='')),
m6= c(paste("X", c(2,5,6,8,9),sep='')),
m7= c(paste("X", c(5,7,9,10),sep='')),
m8= c(paste("X", c(3,4,5,8,10),sep='')),
m9= c(paste("X", c(1,4,5,7,9,10),sep='')),
m10= c(paste("X",1:10,sep=''))
)
tab = evaluateCandidateModels(candidateModels, X,y,percent_features = 0.3, ntrees = 2000)
tab
|
#!/usr/bin/Rscript
Args <- commandArgs()
#
if ( length(Args) < 7 ){
fnm<-Args[4]
fnm<-substring(fnm,8,nchar(as.character(fnm)))
print(paste("Usage - RScript training.csv testing.csv "))
print(paste(" .... " ))
q()
}
ARGIND<-6
training<-c(as.character(Args[ARGIND]))
ARGIND<-ARGIND+1
testing<-c(as.character(Args[ARGIND]))
ARGIND<-ARGIND+1
filereadable<-file.access(training,mode=4)
if ( filereadable == -1 ) {
print(paste(" Cannot find the training set ",training," quitting. "))
q()
}
filereadable<-file.access(testing,mode=4)
if ( filereadable == -1 ) {
print(paste(" Cannot find the testing set ",testing," quitting. "))
q()
}
# train<-t(scale(t(as.matrix(read.csv(training,h=T)))))
# test<-t(scale(t(as.matrix(read.csv(testing,h=T)))))
# train<-scale(as.matrix(read.csv(training,h=T)))
train<-(as.matrix(read.csv(training,h=T)))
test<-(as.matrix(read.csv(testing,h=T)))
nt<-as.numeric(dim(train)[1])
for ( y in c(1:dim(test)[1] ) )
{
wpvals<-rep(NA,nt)
kpvals<-rep(NA,nt)
# globalp<-(ks.test(c(train),test[y,]))$p.value
for ( x in c(1:nt) )
{
kpvals[x]<-(ks.test(train[x,],test[y,]))$p.value
wpvals[x]<-(wilcox.test(train[x,],test[y,]))$p.value
}
kqvals<-p.adjust(kpvals)
wqvals<-p.adjust(wpvals)
print(paste("KS-Summary for subject: ",y,"---mean:",mean(kqvals),"min",min(kqvals),"max",max(kqvals))) # ," global ",globalp))
print(paste("Wilcox-Summary for subject: ",y,"---mean:",mean(wqvals),"min",min(wqvals),"max",max(wqvals))) # ," global ",globalp))
}
# print(warnings())
q()
| /src/compare_trainingimages_to_testimages | no_license | stnava/Spinner | R | false | false | 1,528 | #!/usr/bin/Rscript
Args <- commandArgs()
#
if ( length(Args) < 7 ){
fnm<-Args[4]
fnm<-substring(fnm,8,nchar(as.character(fnm)))
print(paste("Usage - RScript training.csv testing.csv "))
print(paste(" .... " ))
q()
}
ARGIND<-6
training<-c(as.character(Args[ARGIND]))
ARGIND<-ARGIND+1
testing<-c(as.character(Args[ARGIND]))
ARGIND<-ARGIND+1
filereadable<-file.access(training,mode=4)
if ( filereadable == -1 ) {
print(paste(" Cannot find the training set ",training," quitting. "))
q()
}
filereadable<-file.access(testing,mode=4)
if ( filereadable == -1 ) {
print(paste(" Cannot find the testing set ",testing," quitting. "))
q()
}
# train<-t(scale(t(as.matrix(read.csv(training,h=T)))))
# test<-t(scale(t(as.matrix(read.csv(testing,h=T)))))
# train<-scale(as.matrix(read.csv(training,h=T)))
train<-(as.matrix(read.csv(training,h=T)))
test<-(as.matrix(read.csv(testing,h=T)))
nt<-as.numeric(dim(train)[1])
for ( y in c(1:dim(test)[1] ) )
{
wpvals<-rep(NA,nt)
kpvals<-rep(NA,nt)
# globalp<-(ks.test(c(train),test[y,]))$p.value
for ( x in c(1:nt) )
{
kpvals[x]<-(ks.test(train[x,],test[y,]))$p.value
wpvals[x]<-(wilcox.test(train[x,],test[y,]))$p.value
}
kqvals<-p.adjust(kpvals)
wqvals<-p.adjust(wpvals)
print(paste("KS-Summary for subject: ",y,"---mean:",mean(kqvals),"min",min(kqvals),"max",max(kqvals))) # ," global ",globalp))
print(paste("Wilcox-Summary for subject: ",y,"---mean:",mean(wqvals),"min",min(wqvals),"max",max(wqvals))) # ," global ",globalp))
}
# print(warnings())
q()
| |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plumbing.R
\name{cached_install}
\alias{cached_install}
\title{Install and/or load a version of a package}
\usage{
cached_install(package, version, return = c("namespace", "path"),
cache = TRUE, quiet = TRUE, partial = TRUE, ...)
}
\arguments{
\item{package}{Package name.}
\item{version}{Version as a character string.}
\item{return}{Return the file "path" to the installed package, or the "namespace" object?}
\item{cache}{If \code{FALSE}, always reinstall the package.}
\item{quiet}{Logical. Try to minimize output from package installation. (Some output comes
from \code{R CMD INSTALL} and may be unavoidable.)}
\item{partial}{Default \code{TRUE}. Passed to \code{\link[=loadNamespace]{loadNamespace()}}.}
\item{...}{Arguments passed to \code{\link[versions:install.versions]{versions::install.versions()}} or
\code{\link[remotes:install_version]{remotes::install_version()}}, and thence to \code{\link[=install.packages]{install.packages()}}. \code{Ncpus} may be useful.}
}
\value{
The namespace object or directory where the package is installed.
}
\description{
\code{cached_install} checks the package cache, installs the specified version if it is not
already installed, and loads the versioned package namespace.
}
\details{
If the package is already loaded, \code{cached_install} will first attempt
to unload it with a warning. This may not always work!
Note that the namespace is not attached. Partial loading is faster and safer when
you are (un)loading multiple versions, but does not export functions etc.
}
\examples{
\dontrun{
cached_install("clipr", "0.4.0")
}
}
| /man/cached_install.Rd | permissive | hughjonesd/apicheck | R | false | true | 1,668 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plumbing.R
\name{cached_install}
\alias{cached_install}
\title{Install and/or load a version of a package}
\usage{
cached_install(package, version, return = c("namespace", "path"),
cache = TRUE, quiet = TRUE, partial = TRUE, ...)
}
\arguments{
\item{package}{Package name.}
\item{version}{Version as a character string.}
\item{return}{Return the file "path" to the installed package, or the "namespace" object?}
\item{cache}{If \code{FALSE}, always reinstall the package.}
\item{quiet}{Logical. Try to minimize output from package installation. (Some output comes
from \code{R CMD INSTALL} and may be unavoidable.)}
\item{partial}{Default \code{TRUE}. Passed to \code{\link[=loadNamespace]{loadNamespace()}}.}
\item{...}{Arguments passed to \code{\link[versions:install.versions]{versions::install.versions()}} or
\code{\link[remotes:install_version]{remotes::install_version()}}, and thence to \code{\link[=install.packages]{install.packages()}}. \code{Ncpus} may be useful.}
}
\value{
The namespace object or directory where the package is installed.
}
\description{
\code{cached_install} checks the package cache, installs the specified version if it is not
already installed, and loads the versioned package namespace.
}
\details{
If the package is already loaded, \code{cached_install} will first attempt
to unload it with a warning. This may not always work!
Note that the namespace is not attached. Partial loading is faster and safer when
you are (un)loading multiple versions, but does not export functions etc.
}
\examples{
\dontrun{
cached_install("clipr", "0.4.0")
}
}
|
context("request: head")
test_that("head request works", {
skip_on_cran()
cli <- HttpClient$new(url = "https://www.google.com")
aa <- cli$head()
expect_is(aa, "HttpResponse")
expect_is(aa$handle, 'curl_handle')
expect_is(aa$content, "raw")
expect_is(aa$method, "character")
expect_equal(aa$method, "head")
expect_is(aa$parse, "function")
expect_is(aa$parse(), "character")
expect_true(aa$success())
# content is empty
expect_equal(aa$content, raw(0))
})
| /tests/testthat/test-head.R | permissive | graceli8/crul | R | false | false | 484 | r | context("request: head")
test_that("head request works", {
skip_on_cran()
cli <- HttpClient$new(url = "https://www.google.com")
aa <- cli$head()
expect_is(aa, "HttpResponse")
expect_is(aa$handle, 'curl_handle')
expect_is(aa$content, "raw")
expect_is(aa$method, "character")
expect_equal(aa$method, "head")
expect_is(aa$parse, "function")
expect_is(aa$parse(), "character")
expect_true(aa$success())
# content is empty
expect_equal(aa$content, raw(0))
})
|
context("classif_LiblineaRL2SVC")
test_that("classif_LiblineaRL2SVC", {
requirePackagesOrSkip("LiblineaR", default.method = "load")
parset.list = list(
list(type = 1),
list(type = 2),
list(type = 1, cost = 5L),
list(type = 2, cost = 5L)
)
old.predicts.list = list()
old.probs.list = list()
for (i in 1L:length(parset.list)) {
parset = parset.list[[i]]
pars = list(data = binaryclass.train[, -binaryclass.class.col],
target = binaryclass.train[, binaryclass.target])
pars = c(pars, parset)
set.seed(getOption("mlr.debug.seed"))
m = do.call(LiblineaR::LiblineaR, pars)
set.seed(getOption("mlr.debug.seed"))
p = predict(m, newx = binaryclass.test[, -binaryclass.class.col])
old.predicts.list[[i]] = as.factor(p$predictions)
}
parset.list = list(
list(type = 1),
list(),
list(type = 1, cost = 5L),
list(cost = 5L)
)
testSimpleParsets("classif.LiblineaRL2SVC", binaryclass.df, binaryclass.target,
binaryclass.train.inds, old.predicts.list, parset.list)
})
| /tests/testthat/test_classif_LiblineaRL2SVC.R | no_license | jimhester/mlr | R | false | false | 1,051 | r | context("classif_LiblineaRL2SVC")
test_that("classif_LiblineaRL2SVC", {
requirePackagesOrSkip("LiblineaR", default.method = "load")
parset.list = list(
list(type = 1),
list(type = 2),
list(type = 1, cost = 5L),
list(type = 2, cost = 5L)
)
old.predicts.list = list()
old.probs.list = list()
for (i in 1L:length(parset.list)) {
parset = parset.list[[i]]
pars = list(data = binaryclass.train[, -binaryclass.class.col],
target = binaryclass.train[, binaryclass.target])
pars = c(pars, parset)
set.seed(getOption("mlr.debug.seed"))
m = do.call(LiblineaR::LiblineaR, pars)
set.seed(getOption("mlr.debug.seed"))
p = predict(m, newx = binaryclass.test[, -binaryclass.class.col])
old.predicts.list[[i]] = as.factor(p$predictions)
}
parset.list = list(
list(type = 1),
list(),
list(type = 1, cost = 5L),
list(cost = 5L)
)
testSimpleParsets("classif.LiblineaRL2SVC", binaryclass.df, binaryclass.target,
binaryclass.train.inds, old.predicts.list, parset.list)
})
|
\name{twNlme-package}
\alias{twNlme-package}
\alias{twNlme}
\title{Prediction intervals for nlme and gnls models}
\description{Prediction intervals for nlme and gnls models}
\author{Thomas Wutzler <twutz@bgc-jena.mpg.de>}
\details{
The main purpose of the package is to provide standard errors for predictions of
simple models fitted by \code{\link{nlme}} or \code{\link{gnls}}.
The main function is \code{\link{varPredictNlmeGnls}} using function \code{\link{attachVarPrep}}.
\describe{\item{Further functionality of package \code{twNlme} includes}{
\itemize{
\item Variance of sum of predictions \code{\link{varSumPredictNlmeGnls}}
\item treating gnls models as special random effects models with zero random effects, i.e accessing fixed (coefficients) and random (none) of gls models: \code{\link{fixef.gls}}, \code{\link{ranef.gls}}
\item extracting Variance Covariance matrix of fixed and random effects: \code{\link{varRanef.lme}}, \code{\link{varFixef.lme}} for both lme and gls models
\item extracting Standard error of fixed effects: varRanef, \code{\link{seFixef.lme}} for both lme and gls models
}
}}
\describe{\item{Data}{
\itemize{
\item tree weights in relation to diameter and height: \code{\link{Wutzler08BeechStem}}
\item nlme model fit to the tree weight data: \code{\link{modExampleStem}}
}
}}
}
\keyword{ package }
| /man/twNlme-package.Rd | no_license | mattocci27/twNlme | R | false | false | 1,347 | rd | \name{twNlme-package}
\alias{twNlme-package}
\alias{twNlme}
\title{Prediction intervals for nlme and gnls models}
\description{Prediction intervals for nlme and gnls models}
\author{Thomas Wutzler <twutz@bgc-jena.mpg.de>}
\details{
The main purpose of the package is to provide standard errors for predictions of
simple models fitted by \code{\link{nlme}} or \code{\link{gnls}}.
The main function is \code{\link{varPredictNlmeGnls}} using function \code{\link{attachVarPrep}}.
\describe{\item{Further functionality of package \code{twNlme} includes}{
\itemize{
\item Variance of sum of predictions \code{\link{varSumPredictNlmeGnls}}
\item treating gnls models as special random effects models with zero random effects, i.e accessing fixed (coefficients) and random (none) of gls models: \code{\link{fixef.gls}}, \code{\link{ranef.gls}}
\item extracting Variance Covariance matrix of fixed and random effects: \code{\link{varRanef.lme}}, \code{\link{varFixef.lme}} for both lme and gls models
\item extracting Standard error of fixed effects: varRanef, \code{\link{seFixef.lme}} for both lme and gls models
}
}}
\describe{\item{Data}{
\itemize{
\item tree weights in relation to diameter and height: \code{\link{Wutzler08BeechStem}}
\item nlme model fit to the tree weight data: \code{\link{modExampleStem}}
}
}}
}
\keyword{ package }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mbtommHG.r
\name{mbtommHG}
\alias{mbtommHG}
\title{mbtommHG}
\usage{
mbtommHG(mb)
}
\arguments{
\item{mb}{numeric Air pressure in Millibar [hPa].}
}
\value{
mmHG
}
\description{
Conversion from Millibar [hPa] to mmHG.
}
\author{
Istituto per la Bioeconomia CNR Firenze Italy Alfonso Crisci \email{alfonso.crisci@ibe.cnr.it}
}
\keyword{mbtommHG}
| /man/mbtommHG.Rd | permissive | alfcrisci/rBiometeo | R | false | true | 426 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mbtommHG.r
\name{mbtommHG}
\alias{mbtommHG}
\title{mbtommHG}
\usage{
mbtommHG(mb)
}
\arguments{
\item{mb}{numeric Air pressure in Millibar [hPa].}
}
\value{
mmHG
}
\description{
Conversion from Millibar [hPa] to mmHG.
}
\author{
Istituto per la Bioeconomia CNR Firenze Italy Alfonso Crisci \email{alfonso.crisci@ibe.cnr.it}
}
\keyword{mbtommHG}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Papp.R
\name{z.Papp}
\alias{z.Papp}
\title{Papp correlation}
\usage{
z.Papp(pres.pr, temp.pr, tolerance = 1e-13, verbose = FALSE)
}
\arguments{
\item{pres.pr}{pseudo-reduced pressure}
\item{temp.pr}{pseudo-reduced temperature}
\item{tolerance}{rounding tolerance to avoid rounding readings that are in
the middle of the grid. "tolerance" adds flexibility in deciding point closeness.}
\item{verbose}{print internal}
}
\description{
Calculate the Z factor with the Papp correlation
}
\examples{
# Example 1
## one single z calculation
z.Papp(pres.pr = 1.5, temp.pr = 2.0)
# Example 2
## calculate z for multiple values of Tpr and Ppr
ppr <- c(0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5)
tpr <- c(1.3, 1.5, 1.7, 2)
z.Papp(pres.pr = ppr, temp.pr = tpr)
}
| /man/Papp.Rd | no_license | cran/zFactor | R | false | true | 824 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Papp.R
\name{z.Papp}
\alias{z.Papp}
\title{Papp correlation}
\usage{
z.Papp(pres.pr, temp.pr, tolerance = 1e-13, verbose = FALSE)
}
\arguments{
\item{pres.pr}{pseudo-reduced pressure}
\item{temp.pr}{pseudo-reduced temperature}
\item{tolerance}{rounding tolerance to avoid rounding readings that are in
the middle of the grid. "tolerance" adds flexibility in deciding point closeness.}
\item{verbose}{print internal}
}
\description{
Calculate the Z factor with the Papp correlation
}
\examples{
# Example 1
## one single z calculation
z.Papp(pres.pr = 1.5, temp.pr = 2.0)
# Example 2
## calculate z for multiple values of Tpr and Ppr
ppr <- c(0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5)
tpr <- c(1.3, 1.5, 1.7, 2)
z.Papp(pres.pr = ppr, temp.pr = tpr)
}
|
#down load the data from the link.
#open data and merge two data sets together.First commit.
library(dplyr)
# load both test and train data.
testData<- read.table('test/X_test.txt')
trainData<- read.table('train/X_train.txt')
# load both subject data
testSubject<- read.table('test/subject_test.txt')
trainSubject<- read.table('train/subject_train.txt')
# combind them together
allData<- rbind(testData,trainData)
allSubject<- rbind(testSubject,trainSubject)
# load all the features and change the variable names to feature names.
feature<- read.table('features.txt')
names(allData)<- feature$V2
# add a column named subject to allData
allData<- cbind(allData,subject=allSubject$V1)
# load both labels and combind them together
testLable<- read.table('test/y_test.txt')
trainLable<- read.table('train/y_train.txt')
allLable<- rbind(testLable,trainLable)
# update the allData with a column named label
allData<- cbind(allData,lable=allLable$V1)
# load activity labels
activity<- read.table('activity_labels.txt')
names(activity)<-c('V1','activity')
#merge allData and activies labels data frame.
allData<- merge(x=allData,y=activity,by.y='V1',by.x='lable')
allData<- tbl_df(allData)
# find variables with only meansurements on the mean and std and write the data to meanAndStd.csv
mean_std_Col<- grep('std\\(\\)|mean\\(\\)',names(allData),value = TRUE)
meanAndStd<- allData[,c(mean_std_Col,'activity',"subject")]
newNames<- gsub('[-()]','',names(meanAndStd))
names(meanAndStd)<- newNames
write.csv(meanAndStd,'meanAndStd.csv')
#group meanAndStd by subject and activity and store it as meanData, then get the mean for each activity
# and subject.
meanData<- group_by(meanAndStd,subject,activity)
meanData<- summarize_each(meanData,funs(mean))
write.table(meanData,'meanData.txt',row.names = FALSE)
| /run_analysis.R | no_license | woodyzc/getting-cleaning-data | R | false | false | 1,814 | r | #down load the data from the link.
#open data and merge two data sets together.First commit.
library(dplyr)
# load both test and train data.
testData<- read.table('test/X_test.txt')
trainData<- read.table('train/X_train.txt')
# load both subject data
testSubject<- read.table('test/subject_test.txt')
trainSubject<- read.table('train/subject_train.txt')
# combind them together
allData<- rbind(testData,trainData)
allSubject<- rbind(testSubject,trainSubject)
# load all the features and change the variable names to feature names.
feature<- read.table('features.txt')
names(allData)<- feature$V2
# add a column named subject to allData
allData<- cbind(allData,subject=allSubject$V1)
# load both labels and combind them together
testLable<- read.table('test/y_test.txt')
trainLable<- read.table('train/y_train.txt')
allLable<- rbind(testLable,trainLable)
# update the allData with a column named label
allData<- cbind(allData,lable=allLable$V1)
# load activity labels
activity<- read.table('activity_labels.txt')
names(activity)<-c('V1','activity')
#merge allData and activies labels data frame.
allData<- merge(x=allData,y=activity,by.y='V1',by.x='lable')
allData<- tbl_df(allData)
# find variables with only meansurements on the mean and std and write the data to meanAndStd.csv
mean_std_Col<- grep('std\\(\\)|mean\\(\\)',names(allData),value = TRUE)
meanAndStd<- allData[,c(mean_std_Col,'activity',"subject")]
newNames<- gsub('[-()]','',names(meanAndStd))
names(meanAndStd)<- newNames
write.csv(meanAndStd,'meanAndStd.csv')
#group meanAndStd by subject and activity and store it as meanData, then get the mean for each activity
# and subject.
meanData<- group_by(meanAndStd,subject,activity)
meanData<- summarize_each(meanData,funs(mean))
write.table(meanData,'meanData.txt',row.names = FALSE)
|
## The functions contained here allow for caching the result of
## a Matrix inversion, which can allow for more efficient use
## of resources in this costly computation
## Thi function creates a special "matrix" object that can cache
## its inverse.
makeCacheMatrix <- function(x = matrix()) {
## initialize variable to hold the cached inversed matrix
inv <- NULL
## Setup function which will set the stored matrix
## and re-initialize the cache variable
set <- function(y) {
## set stored base matrix
x <<- y
## re-initialize/clear cache variable
inv <<- NULL
}
## Setup function to return the stored base matrix
get <- function() x
## Setup set function to store cached inversed matrix
setinverse <- function(solved) inv <<- solved
## Setup get function to retrieve cached inversed matrix
getinverse <- function() inv
## Create list of named elements to allow access
## to sub-functions
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse
)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix
cacheSolve <- function(x, ...) {
## Get the value stored as the cached inverse
inv <- x$getinverse()
## Check to see if the inverse has already been cached
if(!is.null(inv)) {
## inverse has been cached; return the cached value
message("getting cached data")
return(inv)
}
## inverse wasn't cached
## Gets the stored base matrix
data <- x$get()
## Solve the inverse for the stored base matrix
inv <- solve(data, ...)
## Call function to store/cache inversed matrix
x$setinverse(inv)
## Return the inversed matrix
inv
}
| /cachematrix.R | no_license | RushRy/ProgrammingAssignment2 | R | false | false | 1,889 | r | ## The functions contained here allow for caching the result of
## a Matrix inversion, which can allow for more efficient use
## of resources in this costly computation
## Thi function creates a special "matrix" object that can cache
## its inverse.
makeCacheMatrix <- function(x = matrix()) {
## initialize variable to hold the cached inversed matrix
inv <- NULL
## Setup function which will set the stored matrix
## and re-initialize the cache variable
set <- function(y) {
## set stored base matrix
x <<- y
## re-initialize/clear cache variable
inv <<- NULL
}
## Setup function to return the stored base matrix
get <- function() x
## Setup set function to store cached inversed matrix
setinverse <- function(solved) inv <<- solved
## Setup get function to retrieve cached inversed matrix
getinverse <- function() inv
## Create list of named elements to allow access
## to sub-functions
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse
)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix
cacheSolve <- function(x, ...) {
## Get the value stored as the cached inverse
inv <- x$getinverse()
## Check to see if the inverse has already been cached
if(!is.null(inv)) {
## inverse has been cached; return the cached value
message("getting cached data")
return(inv)
}
## inverse wasn't cached
## Gets the stored base matrix
data <- x$get()
## Solve the inverse for the stored base matrix
inv <- solve(data, ...)
## Call function to store/cache inversed matrix
x$setinverse(inv)
## Return the inversed matrix
inv
}
|
# TUTORIAL: https://www.r-bloggers.com/rselenium-a-wonderful-tool-for-web-scraping/
# TUTORIAL2: https://ropensci.org/tutorials/rselenium_tutorial/
# INSTALL THE LIBRARY
install.packages('RSelenium')
# IMPORT THE LIBRARY
library(RSelenium)
# RUN THIS INSTEAD OF checkForServer()
# (I BELIEVE YOU ONLY NEED TO RUN THIS ONCE AFTER INSTALLING, BUT NOT CONFIDENT)
rsDriver()
# START THE DRIVER
# (THIS WILL BE HOW YOU RUN THE BROWSER)
remDr <- remoteDriver(remoteServerAddr = "localhost", port = 4567, browserName = "firefox")
# OPENS A COPY OF YOUR BROWSER
remDr$open()
# GIVES YOU SOME INFO ON YOUR SERVER/BROWSER SESSION
remDr$getStatus()
# SOME BASIC COMMANDS:
# NAVIGATE TO WEBSITE
remDr$navigate("https://www.google.com/")
# YOU SHOULD NOW SEE GOOGLE IN YOUR BROWSER TAB
# LET'S TRY NOW CLICKING ON THE SEARCH BAR
# FIRST WE NEED TO FIND IT
# ASSIGN ELEMENT TO A VARIABLE CALLED webElem
# TO FIND THIS, RIGHT CLICK THE SEARCHBAR IN YOUR BROWSER AND INSPECT ELEMENT
# WE SHOULD SEE AN INPUT CLASS. WE CAN USE XPATH TO FIND THIS SPECIFIC ONE
webElem <- remDr$findElement(using = 'xpath', value = "//*/input[@class='gLFyf gsfi']")
# WE CAN NOW LOOK AT THE ATTRIBUTES OF THIS ELEMENT. LET'S LOOK AT THE DIV ID
webElem$getElementAttribute("id")
# LET'S LOOK AT THE DIV CLASS
webElem$getElementAttribute("class")
# WE CAN NOW TYPE IN IT AS WELL
webElem$sendKeysToElement(list("test"))
# WE NEED TO NOW PRESS THE SEARCH BUTTON. AGAIN, RIGHT CLICK AND INSPECT ELEMENT ON THE BUTTON.
webElem <- remDr$findElement(using = 'xpath', value = "//*/input[@class='gNO89b']")
# WE NOW NEED TO CLICK IT
webElem$clickElement()
# WE NOW SEE SEARCH RESULTS!
# LET'S NOW LOOK AT THE PAGE SOURCE IN R
# THIS WILL SPIT OUT THE PAGE SOURCE IN FULL
remDr$getPageSource()[[1]]
# WE CAN NOW PARSE THIS TO MAKE IT A LITTLE MORE READABLE
# WE WILL NEED THE XML LIBRARY TO DO SO
library(XML)
htmlParse(remDr$getPageSource()[[1]])
| /R/RSeleniumStartup.R | no_license | marshareb/R-and-Probability | R | false | false | 1,913 | r | # TUTORIAL: https://www.r-bloggers.com/rselenium-a-wonderful-tool-for-web-scraping/
# TUTORIAL2: https://ropensci.org/tutorials/rselenium_tutorial/
# INSTALL THE LIBRARY
install.packages('RSelenium')
# IMPORT THE LIBRARY
library(RSelenium)
# RUN THIS INSTEAD OF checkForServer()
# (I BELIEVE YOU ONLY NEED TO RUN THIS ONCE AFTER INSTALLING, BUT NOT CONFIDENT)
rsDriver()
# START THE DRIVER
# (THIS WILL BE HOW YOU RUN THE BROWSER)
remDr <- remoteDriver(remoteServerAddr = "localhost", port = 4567, browserName = "firefox")
# OPENS A COPY OF YOUR BROWSER
remDr$open()
# GIVES YOU SOME INFO ON YOUR SERVER/BROWSER SESSION
remDr$getStatus()
# SOME BASIC COMMANDS:
# NAVIGATE TO WEBSITE
remDr$navigate("https://www.google.com/")
# YOU SHOULD NOW SEE GOOGLE IN YOUR BROWSER TAB
# LET'S TRY NOW CLICKING ON THE SEARCH BAR
# FIRST WE NEED TO FIND IT
# ASSIGN ELEMENT TO A VARIABLE CALLED webElem
# TO FIND THIS, RIGHT CLICK THE SEARCHBAR IN YOUR BROWSER AND INSPECT ELEMENT
# WE SHOULD SEE AN INPUT CLASS. WE CAN USE XPATH TO FIND THIS SPECIFIC ONE
webElem <- remDr$findElement(using = 'xpath', value = "//*/input[@class='gLFyf gsfi']")
# WE CAN NOW LOOK AT THE ATTRIBUTES OF THIS ELEMENT. LET'S LOOK AT THE DIV ID
webElem$getElementAttribute("id")
# LET'S LOOK AT THE DIV CLASS
webElem$getElementAttribute("class")
# WE CAN NOW TYPE IN IT AS WELL
webElem$sendKeysToElement(list("test"))
# WE NEED TO NOW PRESS THE SEARCH BUTTON. AGAIN, RIGHT CLICK AND INSPECT ELEMENT ON THE BUTTON.
webElem <- remDr$findElement(using = 'xpath', value = "//*/input[@class='gNO89b']")
# WE NOW NEED TO CLICK IT
webElem$clickElement()
# WE NOW SEE SEARCH RESULTS!
# LET'S NOW LOOK AT THE PAGE SOURCE IN R
# THIS WILL SPIT OUT THE PAGE SOURCE IN FULL
remDr$getPageSource()[[1]]
# WE CAN NOW PARSE THIS TO MAKE IT A LITTLE MORE READABLE
# WE WILL NEED THE XML LIBRARY TO DO SO
library(XML)
htmlParse(remDr$getPageSource()[[1]])
|
# https://www.flickr.com/photos/stringrbelle/49656406813/
#' @rdname rosemary
#' @export
social_distance_02 <- function(dir = NULL, ...) {
dir <- check_dir(dir)
file <- file.path(dir, "social_distance_02.png")
dat <- jasmines::use_seed(255) %>%
jasmines::scene_mix(
n = 500, xpos = 1:80, ypos = 1:80,
grain = 2, size = 1, entity = "circle"
) %>%
jasmines::unfold_tempest(iterations = 50) %>%
dplyr::mutate(x = x/10, y = y/10) %>%
jasmines::unfold_warp(iterations = 30) %>%
dplyr::mutate(sigh = id + ind) %>%
dplyr::filter(time > 10) %>%
dplyr::mutate(time = time - 10)
pal <- jasmines::palette_adjust(
name = "rainbow",
prefix = NULL,
red.f = .5,
blue.f = .8,
green.f = .8
)
dat %>%
jasmines::style_ribbon(
colour = "sigh",
palette = pal,
background = "wheat",
alpha = c(.1, .1)
) %>%
jasmines::export_image(
filename = file
)
cat("image written to:", file, "\n")
return(invisible(NULL))
}
| /R/social_distance_02.R | permissive | Shornone/rosemary | R | false | false | 1,024 | r | # https://www.flickr.com/photos/stringrbelle/49656406813/
#' @rdname rosemary
#' @export
social_distance_02 <- function(dir = NULL, ...) {
dir <- check_dir(dir)
file <- file.path(dir, "social_distance_02.png")
dat <- jasmines::use_seed(255) %>%
jasmines::scene_mix(
n = 500, xpos = 1:80, ypos = 1:80,
grain = 2, size = 1, entity = "circle"
) %>%
jasmines::unfold_tempest(iterations = 50) %>%
dplyr::mutate(x = x/10, y = y/10) %>%
jasmines::unfold_warp(iterations = 30) %>%
dplyr::mutate(sigh = id + ind) %>%
dplyr::filter(time > 10) %>%
dplyr::mutate(time = time - 10)
pal <- jasmines::palette_adjust(
name = "rainbow",
prefix = NULL,
red.f = .5,
blue.f = .8,
green.f = .8
)
dat %>%
jasmines::style_ribbon(
colour = "sigh",
palette = pal,
background = "wheat",
alpha = c(.1, .1)
) %>%
jasmines::export_image(
filename = file
)
cat("image written to:", file, "\n")
return(invisible(NULL))
}
|
texAddin <- function() {
require(texViewer)
# Get the document context.
context <- rstudioapi::getActiveDocumentContext()
# Set the default data to use based on the selection.
obj <- context$selection[[1]]$text
if (nchar(obj) == 0) {
stop('Please highlight a tex before selecting this addin.')
}
if(grepl('xtable',obj)){
eval(parse(text=paste0('obj=',obj)))
}else{
obj=gsub('\\\\\\\\','\\\\',obj)
}
texPreview(obj = obj,fileNM = 'addinTemp',imgFormat = 'png')
} | /R/texAddin.R | no_license | talgalili/texPreview | R | false | false | 508 | r | texAddin <- function() {
require(texViewer)
# Get the document context.
context <- rstudioapi::getActiveDocumentContext()
# Set the default data to use based on the selection.
obj <- context$selection[[1]]$text
if (nchar(obj) == 0) {
stop('Please highlight a tex before selecting this addin.')
}
if(grepl('xtable',obj)){
eval(parse(text=paste0('obj=',obj)))
}else{
obj=gsub('\\\\\\\\','\\\\',obj)
}
texPreview(obj = obj,fileNM = 'addinTemp',imgFormat = 'png')
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot_age_adjusted_ypll}
\alias{plot_age_adjusted_ypll}
\title{bar charts}
\usage{
plot_age_adjusted_ypll(
dt,
byvar,
year_rle = NA,
usa_ypll_ls = NULL,
panel_letter = NULL,
usa_text_angle = 90,
axis.text.x.size = 8,
axis.text.x.angle = 30
)
}
\description{
bar charts
}
| /man/plot_age_adjusted_ypll.Rd | no_license | Owain-S/COVIDYPLL | R | false | true | 377 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot_age_adjusted_ypll}
\alias{plot_age_adjusted_ypll}
\title{bar charts}
\usage{
plot_age_adjusted_ypll(
dt,
byvar,
year_rle = NA,
usa_ypll_ls = NULL,
panel_letter = NULL,
usa_text_angle = 90,
axis.text.x.size = 8,
axis.text.x.angle = 30
)
}
\description{
bar charts
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/franchises.R
\name{getPlayoffRecordVsFranchise}
\alias{getPlayoffRecordVsFranchise}
\title{Get Franchise Head to Head playoff record}
\usage{
getPlayoffRecordVsFranchise(franchiseID = NULL, franchiseName = NULL,
opponentName = NULL, opponentID = NULL)
}
\arguments{
\item{franchiseID}{Optional franchise ID to filter results.}
\item{franchiseName}{Optional franchise name to filter results.}
\item{opponentName}{Optional opponent name to filter results.}
\item{opponentID}{Optional opponent franchise ID to filter results.}
}
\value{
Head to head records for every franchise' playoff games.
}
\description{
Get Franchise Head to Head playoff record
}
\examples{
#See the head to head playoff results between the two New York teams
h2h_playoffs <- getPlayoffRecordVsFranchise(franchiseName = "New York Islanders",
opponentName = "New York Rangers")
}
| /man/getPlayoffRecordVsFranchise.Rd | permissive | pbulsink/nhlRapi | R | false | true | 977 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/franchises.R
\name{getPlayoffRecordVsFranchise}
\alias{getPlayoffRecordVsFranchise}
\title{Get Franchise Head to Head playoff record}
\usage{
getPlayoffRecordVsFranchise(franchiseID = NULL, franchiseName = NULL,
opponentName = NULL, opponentID = NULL)
}
\arguments{
\item{franchiseID}{Optional franchise ID to filter results.}
\item{franchiseName}{Optional franchise name to filter results.}
\item{opponentName}{Optional opponent name to filter results.}
\item{opponentID}{Optional opponent franchise ID to filter results.}
}
\value{
Head to head records for every franchise' playoff games.
}
\description{
Get Franchise Head to Head playoff record
}
\examples{
#See the head to head playoff results between the two New York teams
h2h_playoffs <- getPlayoffRecordVsFranchise(franchiseName = "New York Islanders",
opponentName = "New York Rangers")
}
|
### UNEMPLOYMENT RATES BY GENDER FORECAST ###
# Charge libraries
library(shiny)
suppressMessages(library(data.table)) ## data table language
suppressMessages(library(openxlsx)) ## reads Excel files
suppressMessages(library(forecast)) ## allows time series forecasting
suppressMessages(library(stats)) ## for time series treatment
# Define server logic
shinyServer(function(input, output) {
dirname <- '~/data_science_JHU/4-Developing_Data_Products/project/ShinyAppProject'
if (!dir.exists(dirname))dir.create(dirname,recursive=TRUE)
# read data
un_data <- as.data.table(read.xlsx("unemployment_gender.xlsx"))
# fix date format
un_data[, date := as.Date(date, origin = "1899-12-30")]
# create dummy indicating months where COVID is active in the economy
un_data[, covid := ifelse(date >= '2020-03-01', 1, 0)]
# first convert women unemployment rate data to time series
women_ts <- stats::ts(data = un_data$un_women, start = c(2005, 1), frequency = 12)
# now convert men unemployment rate data to time series
men_ts <- stats::ts(data = un_data$un_men, start = c(2005, 1), frequency = 12)
# exogenous variable (dummy)
xreg <- as.matrix(data.frame(covid = un_data$covid))
# women model
w_model <- forecast::auto.arima(women_ts, xreg = xreg)
# men model
m_model <- forecast::auto.arima(men_ts, xreg = xreg)
dt_wforecast <- reactive({
th_input <- input$sliderHorizon *31
dt_forecast <- data.table(date = seq(from = max(un_data$date),
to = max(un_data$date) + th_input,
by = "month"))
xreg <- as.matrix(data.frame(covid = ifelse(dt_forecast[2:nrow(dt_forecast),]$date >= '2020-05-01' &
dt_forecast[2:nrow(dt_forecast),]$date <= '2020-08-01', 1, 0)))
pronostico1 <- forecast(w_model, nrow(dt_forecast)-1 , level = 68, xreg = xreg)
# create forecast table
dt_pronostico1 <- data.table(date = dt_forecast[-1,]$date,
base = as.numeric(pronostico1$mean))
as.numeric(dt_pronostico1[th_input/31, 2])
})
dt_mforecast <- reactive({
th_input <- input$sliderHorizon *31
dt_forecast <- data.table(date = seq(from = max(un_data$date),
to = max(un_data$date) + th_input,
by = "month"))
xreg <- as.matrix(data.frame(covid = ifelse(dt_forecast[2:nrow(dt_forecast),]$date >= '2020-05-01' &
dt_forecast[2:nrow(dt_forecast),]$date <= '2020-08-01', 1, 0)))
pronostico2 <- forecast(m_model, nrow(dt_forecast)-1 , level = 68, xreg = xreg)
# create forecast table
dt_pronostico2 <- data.table(date = dt_forecast[-1,]$date,
base = as.numeric(pronostico2$mean))
as.numeric(dt_pronostico2[th_input/31, 2])
})
updateplot <- reactive({
th_input <- input$sliderHorizon *31
dt_forecast <- data.table(date = seq(from = max(un_data$date),
to = max(un_data$date) + th_input,
by = "month"))
xreg <- as.matrix(data.frame(covid = ifelse(dt_forecast[2:nrow(dt_forecast),]$date >= '2020-05-01' &
dt_forecast[2:nrow(dt_forecast),]$date <= '2020-08-01', 1, 0)))
pronostico1 <- forecast(w_model, nrow(dt_forecast)-1 , level = 68, xreg = xreg)
# create forecast table
dt_pronostico1 <- data.table(date = dt_forecast[-1,]$date,
base = as.numeric(pronostico1$mean))
x <- data.frame(date = max(un_data$date),
base = last(un_data$un_women))
dt_pronostico1 <- rbind(x, dt_pronostico1)
dt_forecast <- data.table(date = seq(from = max(un_data$date),
to = max(un_data$date) + th_input,
by = "month"))
xreg <- as.matrix(data.frame(covid = ifelse(dt_forecast[2:nrow(dt_forecast),]$date >= '2020-05-01' &
dt_forecast[2:nrow(dt_forecast),]$date <= '2020-08-01', 1, 0)))
pronostico2 <- forecast(m_model, nrow(dt_forecast)-1 , level = 68, xreg = xreg)
# create forecast table
dt_pronostico2 <- data.table(date = dt_forecast[-1,]$date,
base = as.numeric(pronostico2$mean))
x <- data.frame(date = max(un_data$date),
base = last(un_data$un_men))
dt_pronostico2 <- rbind(x, dt_pronostico2)
unplot <- ggplot2::ggplot() + ggplot2::theme_minimal()
if(input$WomenModel){
unplot <- unplot +
ggplot2::geom_line(data = un_data[157:nrow(un_data),],
ggplot2::aes(x = date, y = un_women, col = "Women historic"), size = 1.2) +
ggplot2::geom_line(data = dt_pronostico1,
ggplot2::aes(x = date, y = base, col = "Women forecast"), size = 1.2)
unplot
}
if(input$MenModel){
unplot <- unplot +
ggplot2::geom_line(data = un_data[157:nrow(un_data),],
ggplot2::aes(x = date, y = un_men, col = "Men historic"), size = 1.2) +
ggplot2::geom_line(data = dt_pronostico2,
ggplot2::aes(x = date, y = base, col = "Men forecast"), size = 1.2)
unplot
}
unplot <- unplot + ggplot2::labs(x = "Date", y = "Unemployment rate", colour = NULL) +
ggplot2::scale_colour_manual(name = '', values =c('blue2', 'darkgrey', 'salmon', 'grey25')) +
ggplot2::theme(legend.position = "bottom",
plot.title = ggplot2::element_text(face = "bold", hjust = 0.5, size = 20),
axis.title = ggplot2::element_text(size = 15),
axis.text = ggplot2::element_text(size = 15),
legend.text = ggplot2::element_text(size = 15))
unplot
})
output$plot1 <- renderPlot({
updateplot()
})
output$pred1 <- renderText({
dt_wforecast()
})
output$pred2 <- renderText({
dt_mforecast()
})
})
| /server.R | permissive | carolinart/Shiny-App-Project-JHU- | R | false | false | 7,051 | r | ### UNEMPLOYMENT RATES BY GENDER FORECAST ###
# Charge libraries
library(shiny)
suppressMessages(library(data.table)) ## data table language
suppressMessages(library(openxlsx)) ## reads Excel files
suppressMessages(library(forecast)) ## allows time series forecasting
suppressMessages(library(stats)) ## for time series treatment
# Define server logic
shinyServer(function(input, output) {
dirname <- '~/data_science_JHU/4-Developing_Data_Products/project/ShinyAppProject'
if (!dir.exists(dirname))dir.create(dirname,recursive=TRUE)
# read data
un_data <- as.data.table(read.xlsx("unemployment_gender.xlsx"))
# fix date format
un_data[, date := as.Date(date, origin = "1899-12-30")]
# create dummy indicating months where COVID is active in the economy
un_data[, covid := ifelse(date >= '2020-03-01', 1, 0)]
# first convert women unemployment rate data to time series
women_ts <- stats::ts(data = un_data$un_women, start = c(2005, 1), frequency = 12)
# now convert men unemployment rate data to time series
men_ts <- stats::ts(data = un_data$un_men, start = c(2005, 1), frequency = 12)
# exogenous variable (dummy)
xreg <- as.matrix(data.frame(covid = un_data$covid))
# women model
w_model <- forecast::auto.arima(women_ts, xreg = xreg)
# men model
m_model <- forecast::auto.arima(men_ts, xreg = xreg)
dt_wforecast <- reactive({
th_input <- input$sliderHorizon *31
dt_forecast <- data.table(date = seq(from = max(un_data$date),
to = max(un_data$date) + th_input,
by = "month"))
xreg <- as.matrix(data.frame(covid = ifelse(dt_forecast[2:nrow(dt_forecast),]$date >= '2020-05-01' &
dt_forecast[2:nrow(dt_forecast),]$date <= '2020-08-01', 1, 0)))
pronostico1 <- forecast(w_model, nrow(dt_forecast)-1 , level = 68, xreg = xreg)
# create forecast table
dt_pronostico1 <- data.table(date = dt_forecast[-1,]$date,
base = as.numeric(pronostico1$mean))
as.numeric(dt_pronostico1[th_input/31, 2])
})
dt_mforecast <- reactive({
th_input <- input$sliderHorizon *31
dt_forecast <- data.table(date = seq(from = max(un_data$date),
to = max(un_data$date) + th_input,
by = "month"))
xreg <- as.matrix(data.frame(covid = ifelse(dt_forecast[2:nrow(dt_forecast),]$date >= '2020-05-01' &
dt_forecast[2:nrow(dt_forecast),]$date <= '2020-08-01', 1, 0)))
pronostico2 <- forecast(m_model, nrow(dt_forecast)-1 , level = 68, xreg = xreg)
# create forecast table
dt_pronostico2 <- data.table(date = dt_forecast[-1,]$date,
base = as.numeric(pronostico2$mean))
as.numeric(dt_pronostico2[th_input/31, 2])
})
updateplot <- reactive({
th_input <- input$sliderHorizon *31
dt_forecast <- data.table(date = seq(from = max(un_data$date),
to = max(un_data$date) + th_input,
by = "month"))
xreg <- as.matrix(data.frame(covid = ifelse(dt_forecast[2:nrow(dt_forecast),]$date >= '2020-05-01' &
dt_forecast[2:nrow(dt_forecast),]$date <= '2020-08-01', 1, 0)))
pronostico1 <- forecast(w_model, nrow(dt_forecast)-1 , level = 68, xreg = xreg)
# create forecast table
dt_pronostico1 <- data.table(date = dt_forecast[-1,]$date,
base = as.numeric(pronostico1$mean))
x <- data.frame(date = max(un_data$date),
base = last(un_data$un_women))
dt_pronostico1 <- rbind(x, dt_pronostico1)
dt_forecast <- data.table(date = seq(from = max(un_data$date),
to = max(un_data$date) + th_input,
by = "month"))
xreg <- as.matrix(data.frame(covid = ifelse(dt_forecast[2:nrow(dt_forecast),]$date >= '2020-05-01' &
dt_forecast[2:nrow(dt_forecast),]$date <= '2020-08-01', 1, 0)))
pronostico2 <- forecast(m_model, nrow(dt_forecast)-1 , level = 68, xreg = xreg)
# create forecast table
dt_pronostico2 <- data.table(date = dt_forecast[-1,]$date,
base = as.numeric(pronostico2$mean))
x <- data.frame(date = max(un_data$date),
base = last(un_data$un_men))
dt_pronostico2 <- rbind(x, dt_pronostico2)
unplot <- ggplot2::ggplot() + ggplot2::theme_minimal()
if(input$WomenModel){
unplot <- unplot +
ggplot2::geom_line(data = un_data[157:nrow(un_data),],
ggplot2::aes(x = date, y = un_women, col = "Women historic"), size = 1.2) +
ggplot2::geom_line(data = dt_pronostico1,
ggplot2::aes(x = date, y = base, col = "Women forecast"), size = 1.2)
unplot
}
if(input$MenModel){
unplot <- unplot +
ggplot2::geom_line(data = un_data[157:nrow(un_data),],
ggplot2::aes(x = date, y = un_men, col = "Men historic"), size = 1.2) +
ggplot2::geom_line(data = dt_pronostico2,
ggplot2::aes(x = date, y = base, col = "Men forecast"), size = 1.2)
unplot
}
unplot <- unplot + ggplot2::labs(x = "Date", y = "Unemployment rate", colour = NULL) +
ggplot2::scale_colour_manual(name = '', values =c('blue2', 'darkgrey', 'salmon', 'grey25')) +
ggplot2::theme(legend.position = "bottom",
plot.title = ggplot2::element_text(face = "bold", hjust = 0.5, size = 20),
axis.title = ggplot2::element_text(size = 15),
axis.text = ggplot2::element_text(size = 15),
legend.text = ggplot2::element_text(size = 15))
unplot
})
output$plot1 <- renderPlot({
updateplot()
})
output$pred1 <- renderText({
dt_wforecast()
})
output$pred2 <- renderText({
dt_mforecast()
})
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\name{within}
\alias{within}
\alias{within,GRanges-method}
\title{within on GRanges}
\usage{
within(data, expr, ...)
}
\value{
GRanges
}
\description{
}
\author{
Kevin Hadi
}
| /man/gr_within.Rd | no_license | kevinmhadi/khtools | R | false | true | 264 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\name{within}
\alias{within}
\alias{within,GRanges-method}
\title{within on GRanges}
\usage{
within(data, expr, ...)
}
\value{
GRanges
}
\description{
}
\author{
Kevin Hadi
}
|
###################################
# Macuata District Report Maps
###################################
# Map setup
###########################
## Defining map elements
# Colors
# administrative
land_col <- "#878787"
water_col <- "#a6cee3"
# ecological
coral_col <- "#e31a1c" # red
seagrass_col <- "#86bb56" # light green
mangrove96_col <- "#408536"
mangrove16_col <- "#33a02c" # green
mangrove_gain <- "#a481b6" # purple
mangrove_loss <- "#fdbf6f" # orange
# geomorphical
plat_col <- "#befbff" # dark orange
rs_col <- "#92739d" # light yellow
srs_col <- "#ffba15" # tan
rc_col <- "#cd6812" # deep purple
orf_col <- "#614272" # dark purple
irf_col <- "#288471" # mild purple
trf_col <- "#77d0fc" # light purple
sl_col <- "#e69113" # tile blue
unk_col <- "#B2B2B2" # grey
# geophysical
bath_col <- bath_color <- (RColorBrewer::brewer.pal(9,"Blues"))
sed_col <- (RColorBrewer::brewer.pal(9,"YlOrRd"))
# survey sites
site_shape <- c(21,22,23,24)
site_color <- c("#ffffb3","#fb8072","#bebada","#8dd3c7")
# unique site colors
eia_col <- "#ffffb3" # light yellow
new_col <- "#fb8072" # light pink
rfc_col <- "#bebada" # light purple
wwf_col <- "#8dd3c7" # light green
# survey site shapes
eia_shape <- 21 # circle
new_shape <- 22 # square
rfc_shape <- 23 # diamond
wwf_shape <- 24 # triangle up
# scale bar
scalebar_macuata <- annotation_scale(width_hint = 0.2, # percent of the plot occupied (20%)
pad_x = unit(0.35, "in"), # how much padded from the x=0 position
pad_y = unit(3.5, "in")) # how much padded from the y=0 position
# north arrow
narrow_macuata <- annotation_north_arrow(height = unit(0.25, "in"),
width = unit(0.2, "in"),
pad_x = unit(0.1, "in"),
pad_y = unit(3.5, "in"),
style = north_arrow_orienteering(
line_width = 1,
line_col = "black",
fill = c("white", "black"),
text_col = "black",
text_family = "",
text_face = NULL,
text_size = 5,
text_angle = 0))
# map themes
map_theme_macuata <- theme(axis.text=element_text(size=8),
axis.title=element_text(size=10),
plot.title=element_text(size=12),
panel.grid.major = element_line(color = "transparent"),
panel.grid.minor = element_line(color = "transparent"),
panel.background = element_rect(fill = water_col),
axis.text.y = element_text(angle = 90, hjust = 0.5),
legend.position = c(0.82,0.10), # alternative = bottom
legend.title = element_blank(), # remove the legend title
legend.text = element_text(size=6), # text size of the descriptor
legend.background = element_rect(fill = "transparent"), # make the box transparent --> "transparent"
legend.box.background = element_rect(fill = "white", color = "#4C84A2"), # white background with blue border
legend.box.margin = margin(1,1,1,1), # add some space between the box and the text
legend.spacing.y = unit(0.025, "in"),
legend.key.size = unit(0.1, "in"), # size of the color box
legend.key = element_rect(fill = "transparent"), # make the background of the key clear
legend.margin = margin(0, 0.0, 0, 0, "in"), # reduce spacing between legend elements
axis.line = element_line(colour = "black"))
map_theme_macuata_geomorphic <- theme(axis.text=element_text(size=8),
axis.title=element_text(size=10),
plot.title=element_text(size=12),
panel.grid.major = element_line(color = "transparent"),
panel.grid.minor = element_line(color = "transparent"),
panel.background = element_rect(fill = water_col),
axis.text.y = element_text(angle = 90, hjust = 0.5),
legend.title = element_text(size = 6), # size of the title
legend.position = c(0.87,0.15), # alternative = bottom
legend.text = element_text(size=4), # text size of the descriptor
legend.background = element_rect(fill = "transparent"), # make the box transparent --> "transparent"
legend.box.background = element_rect(fill = "white", color = "#4C84A2"), # white background with blue border
legend.box.margin = margin(1,1,1,1), # add some space between the box and the text
legend.spacing.y = unit(0.025, "in"),
legend.key.size = unit(0.1, "in"), # size of the color box
legend.key = element_rect(fill = "transparent"), # make the background of the key clear
legend.margin = margin(0, 0, 0, 0, "in"), # reduce spacing between legend elements
axis.line = element_line(colour = "black"))
map_theme_macuata_sediment <- theme(axis.text=element_text(size=8),
axis.title=element_text(size=10),
plot.title=element_text(size=12),
panel.grid.major = element_line(color = "transparent"),
panel.grid.minor = element_line(color = "transparent"),
panel.background = element_rect(fill = water_col),
axis.text.y = element_text(angle = 90, hjust = 0.5),
legend.title = element_text(size = 4), # size of the title
legend.position = c(0.87,0.15), # alternative = bottom
legend.text = element_text(size=4), # text size of the descriptor
legend.background = element_rect(fill = "transparent"), # make the box transparent --> "transparent"
legend.box.background = element_rect(fill = "white", color = "#4C84A2"), # white background with blue border
legend.box.margin = margin(1,1,1,1), # add some space between the box and the text
legend.spacing.y = unit(0.025, "in"),
legend.key.size = unit(0.1, "in"), # size of the color box
legend.key = element_rect(fill = "transparent"), # make the background of the key clear
legend.margin = margin(0, 0, 0, 0, "in"), # reduce spacing between legend elements
axis.line = element_line(colour = "black"))
map_theme_macuata_bathymetry <- theme(axis.text=element_text(size=8),
axis.title=element_text(size=10),
plot.title=element_text(size=12),
panel.grid.major = element_line(color = "transparent"),
panel.grid.minor = element_line(color = "transparent"),
panel.background = element_rect(fill = water_col),
axis.text.y = element_text(angle = 90, hjust = 0.5),
legend.title = element_text(size = 4), # size of the title
legend.position = c(0.87,0.15), # alternative = bottom
legend.text = element_text(size=4), # text size of the descriptor
legend.background = element_rect(fill = "transparent"), # make the box transparent --> "transparent"
legend.box.background = element_rect(fill = "white", color = "#4C84A2"), # white background with blue border
legend.box.margin = margin(1,1,1,1), # add some space between the box and the text
legend.spacing.y = unit(0.025, "in"),
legend.key.size = unit(0.1, "in"), # size of the color box
legend.key = element_rect(fill = "transparent"), # make the background of the key clear
legend.margin = margin(0, 0, 0, 0, "in"), # reduce spacing between legend elements
axis.line = element_line(colour = "black"))
map_theme_macuata_survey <- theme(axis.text=element_text(size=8),
axis.title=element_text(size=10),
plot.title=element_text(size=12),
panel.grid.major = element_line(color = "transparent"),
panel.grid.minor = element_line(color = "transparent"),
panel.background = element_rect(fill = water_col),
axis.text.y = element_text(angle = 90, hjust = 0.5),
legend.title = element_text(size = 6), # size of the title
legend.position = c(0.87,0.15), # alternative = bottom
legend.text = element_text(size=4), # text size of the descriptor
legend.background = element_rect(fill = "transparent"), # make the box transparent --> "transparent"
legend.box.background = element_rect(fill = "white", color = "#4C84A2"), # white background with blue border
legend.box.margin = margin(1,1,1,1), # add some space between the box and the text
legend.spacing.y = unit(0.025, "in"),
legend.key.size = unit(0.1, "in"), # size of the color box
legend.key = element_rect(fill = "transparent"), # make the background of the key clear
legend.margin = margin(0, 0, 0, 0, "in"), # reduce spacing between legend elements
axis.line = element_line(colour = "black"))
# get the Macuata district
i <- 2
for (i in 2){
# get qoliqoli
qoliqoli_do <- qoliqoli[i,]
# get the limits
xlim_qoli <- c(xmin = st_bbox(qoliqoli_do)$xmin-1000, xmax = st_bbox(qoliqoli_do)$xmax+1000)
ylim_qoli <- c(xmin = st_bbox(qoliqoli_do)$ymin, xmax = st_bbox(qoliqoli_do)$ymax)
# extract qoliqoli name
qoli_name <- qoliqoli_do$District
# x-axis limits
if(i==2){xbreaks <- seq(178.5,179.5,0.20)} # Macuata
# create coral map
qoliqoli_coral <- ggplot() +
# load Fiji land
geom_sf(data = fiji, fill = land_col, color = NA) +
# load Great Sea Reef
geom_sf(data = gsr, fill = NA, aes(linetype = "Great Sea Reef"), color = "grey50", size = 0.5) +
# load coral data
geom_tile(data = coral_map, aes(x=longitude, y=latitude, fill=coral)) +
# load qoliqoli
geom_sf(data = qoliqoli_do, fill = NA, aes(color = District), size = 0.5) +
# focus on the area of interest
coord_sf(xlim = xlim_qoli,
ylim = ylim_qoli) +
# legend
scale_fill_manual(values=coral_col,
label = "Coral reefs") +
scale_color_manual(values = "grey30") +
scale_linetype_manual(values = "3313") +
# x-axis breaks
scale_x_longitude(breaks = xbreaks) +
# labels + title
labs(x="",y="",title="") +
# map elements
scalebar_macuata +
narrow_macuata +
# theme
theme_bw() +
map_theme_macuata
# create seagrass map
qoliqoli_seagrass <- ggplot() +
# load Fiji land
geom_sf(data = fiji, fill = land_col, color = NA) +
# load Great Sea Reef
geom_sf(data = gsr, fill = NA, aes(linetype = "Great Sea Reef"), color = "grey50", size = 0.5) +
# load seagrass data
geom_tile(data = seagrass_map, aes(x=longitude, y=latitude, fill=seagrass)) +
# load qoliqoli
geom_sf(data = qoliqoli_do, fill = NA, aes(color = District), size = 0.5) +
# focus on the area of interest
coord_sf(xlim = xlim_qoli,
ylim = ylim_qoli) +
# legend
scale_fill_manual(values=seagrass_col) +
scale_color_manual(values = "grey30") +
scale_linetype_manual(values = "3313") +
# x-axis breaks
scale_x_longitude(breaks = xbreaks) +
# labels + title
labs(x="",y="",title="") +
# map elements
scalebar_macuata +
narrow_macuata +
# theme
theme_bw() +
map_theme_macuata
# create mangrove map
qoliqoli_mangrove <- ggplot() +
# load Fiji land
geom_sf(data = fiji, fill = land_col, color = NA) +
# load Great Sea Reef
geom_sf(data = gsr, fill = NA, aes(linetype = "Great Sea Reef"), color = "grey50", size = 0.5) +
# load mangrove data
geom_tile(data = mangrove16_map, aes(x=longitude, y=latitude, fill=mangrove)) +
# load qoliqoli
geom_sf(data = qoliqoli_do, fill = NA, aes(color = District), size = 0.5) +
# focus on the area of interest
coord_sf(xlim = xlim_qoli,
ylim = ylim_qoli) +
# legend
scale_fill_manual(values=mangrove16_col) +
scale_color_manual(values="grey30") +
scale_linetype_manual(values = "3313") +
# x-axis breaks
scale_x_longitude(breaks = xbreaks) +
# labels + title
labs(x="",y="",title="") +
# map elements
scalebar_macuata +
narrow_macuata +
# theme
theme_bw() +
map_theme_macuata
# create mangrove change map
qoliqoli_mangrove_change <- ggplot() +
# load Fiji land
geom_sf(data = fiji, fill = land_col, color = NA) +
# load Great Sea Reef
geom_sf(data = gsr, fill = NA, aes(linetype = "Great Sea Reef"), color = "grey50", size = 0.5) +
# load mangrove change data
geom_tile(data = mangrove_gain_map, aes(x=longitude, y=latitude, fill=gain)) +
geom_tile(data = mangrove_loss_map, aes(x=longitude, y=latitude, fill=loss)) +
# load qoliqoli
geom_sf(data = qoliqoli_do, fill = NA, aes(color = District), size = 0.5) +
# focus on the area of interest
coord_sf(xlim = xlim_qoli,
ylim = ylim_qoli) +
# x-axis breaks
scale_x_longitude(breaks = xbreaks) +
# legend
scale_fill_manual(labels = c("Mangrove gain",
"Mangrove loss"),
values=c(mangrove_gain,
mangrove_loss)) +
scale_color_manual(values="grey30") +
scale_linetype_manual(values = "3313") +
# labels + title
labs(x="",y="",title="") +
# map elements
scalebar_macuata +
narrow_macuata +
# theme
theme_bw() +
map_theme_macuata
# create geomorphic map
# irf_map_sample <- sample_frac(irf_map, 0.01)
# orf_map_sample <- sample_frac(orf_map, 0.01)
# plat_map_sample <- sample_frac(plat_map, 0.01)
# rc_map_sample <- sample_frac(rc_map, 0.01)
# rs_map_sample <- sample_frac(rs_map, 0.01)
# sl_map_sample <- sample_frac(sl_map, 0.01)
# srs_map_sample <- sample_frac(srs_map, 0.01)
# trf_map_sample <- sample_frac(trf_map, 0.01)
# unk_map_sample <- sample_frac(unk_map, 0.01)
qoliqoli_geomorphic <- ggplot() +
# load Fiji land
geom_sf(data = fiji, fill = land_col, color = NA) +
# load Great Sea Reef
geom_sf(data = gsr, fill = NA, aes(linetype = "Great Sea Reef"), color = "grey50", size = 0.5) +
# load geomorphic data
geom_tile(data = irf_map, aes(x=longitude, y=latitude, fill=irf)) + # inner reef flat
geom_tile(data = orf_map, aes(x=longitude, y=latitude, fill=orf)) + # outer reef flat
geom_tile(data = plat_map, aes(x=longitude, y=latitude, fill=plat)) + # plateau
geom_tile(data = rc_map, aes(x=longitude, y=latitude, fill=rc)) + # reef crest
geom_tile(data = rs_map, aes(x=longitude, y=latitude, fill=rs)) + # reef slope
geom_tile(data = sl_map, aes(x=longitude, y=latitude, fill=sl)) + # shallow lagoon
geom_tile(data = srs_map, aes(x=longitude, y=latitude, fill=srs)) + # sheltered reef slope
geom_tile(data = trf_map, aes(x=longitude, y=latitude, fill=trf)) + # terrestrial reef flat
geom_tile(data = unk_map, aes(x=longitude, y=latitude, fill=unk)) + # unknown
# load qoliqoli
geom_sf(data = qoliqoli_do, fill = NA, aes(color = District), size = 0.5) +
# focus on the area of interest
coord_sf(xlim = xlim_qoli,
ylim = ylim_qoli) +
# x-axis breaks
scale_x_longitude(breaks = xbreaks) +
# legend
scale_fill_manual(name = "Geomorphic zone",
guide = guide_legend(ncol = 2),
values=c(irf_col,
orf_col,
plat_col,
rc_col,
rs_col,
sl_col,
srs_col,
trf_col,
unk_col)) +
scale_color_manual(name = "District",
values = "grey30") +
scale_linetype_manual(name = "Great Sea Reef",
label = "",
values = "3313") +
# labels + title
labs(x="",y="",title="") +
# map elements
scalebar_macuata +
narrow_macuata +
# theme
theme_bw() +
map_theme_macuata_geomorphic
# Create sedimentation map
# sediment_map_sample <- sample_frac(sediment_map,0.01)
qoliqoli_sediment <- ggplot() +
# load sediment data
geom_tile(data = sediment_map, aes(x=longitude, y=latitude, fill = relative)) +
# load Fiji land
geom_sf(data = fiji, fill = land_col, color = NA) +
# load Great Sea Reef
geom_sf(data = gsr, fill = NA, aes(linetype = "Great Sea Reef"), color = "grey50", size = 0.5) +
# load qoliqoli
geom_sf(data = qoliqoli_do, fill = NA, aes(color = District), size = 0.5) +
# focus on the area of interest
coord_sf(xlim = xlim_qoli,
ylim = ylim_qoli) +
# color sedimentation
scale_fill_gradientn(name = "Turbidity \n(relative)",
colors = sed_col,
breaks = seq(0,10,2.5),
na.value=NA) +
guides(fill = guide_colorbar(ticks.colour = "black", frame.colour = "black")) +
# district legend
scale_color_manual(name = "District",
values = "grey30") +
scale_linetype_manual(name = "Great Sea Reef",
label = "",
values = "3313") +
# labels + title
labs(x="",y="",title="") +
# change x-axis breaks
scale_x_longitude(breaks = xbreaks) +
# map elements
scalebar_macuata +
narrow_macuata +
# theme
theme_bw() +
map_theme_macuata_sediment
# Create the loop for the provincial bathymetry maps
# bath_map_sample <- sample_frac(bath_map,0.01)
# create plot
qoliqoli_bath <- ggplot() +
# load bathmetry data
geom_tile(data = bath_map, aes(x=longitude, y=latitude, fill = depth)) +
# load Fiji land
geom_sf(data = fiji, fill = land_col, color = NA) +
# load Great Sea Reef
geom_sf(data = gsr, fill = NA, aes(linetype = "Great Sea Reef"), color = "grey50", size = 0.5) +
# load qoliqoli
geom_sf(data = qoliqoli_do, fill = NA, aes(color = District), size = 0.5) +
# focus on the area of interest
coord_sf(xlim = xlim_qoli,
ylim = ylim_qoli) +
# color bathymetry
scale_fill_gradientn(name = "Depth (m)",
colors = bath_color,
na.value=NA) +
guides(fill = guide_colorbar(ticks.colour = "black", frame.colour = "black")) +
# qoliqoli legend
scale_color_manual(name = "District",
values = "grey30") +
scale_linetype_manual(name = "Great Sea Reef",
label = "",
values = "3313") +
# x-axis breaks
scale_x_longitude(breaks = xbreaks) +
# labels + title
labs(x="",y="",title="") +
# map elements
scalebar_macuata +
narrow_macuata +
# theme
theme_bw() +
map_theme_macuata_bathymetry
# Create the loop for the provincial survery site maps
# coral_map_sample <- sample_frac(coral_map,0.01)
qoliqoli_survey <- ggplot() +
# load Fiji land
geom_sf(data = fiji, fill = land_col, color = NA) +
# load Great Sea Reef
geom_sf(data = gsr, fill = NA, aes(linetype = "Great Sea Reef"), size = 0.5) +
# load coral data
geom_tile(data = coral_map, aes(x=longitude,y=latitude, color="Coral"), fill = coral_col) +
# load qoliqoli
geom_sf(data = qoliqoli_do, fill = NA, aes(linetype = District), size = 0.5) +
# load suvery site data
geom_sf(data = surv_site, aes(fill=surveyor, shape=surveyor),show.legend = "point") +
# focus on the area of interest
coord_sf(xlim = xlim_qoli,
ylim = ylim_qoli) +
# x-axis breaks
scale_x_longitude(breaks = xbreaks) +
# survey shape
scale_shape_manual(name = "Survey Site",
labels = c("Ba EIA",
"New Site",
"Reef Check",
"WWF"),
values = c(eia_shape,
new_shape,
rfc_shape,
wwf_shape),
guide = guide_legend(override.aes = list(fill = c(eia_col,
new_col,
rfc_col,
wwf_col)))) +
# surveyor fill
scale_fill_manual(labels = c("Ba EIA",
"New Site",
"Reef Check",
"WWF"),
values = c(eia_col,
new_col,
rfc_col,
wwf_col)) +
# Great Sea Reef legend
scale_linetype_manual(name = "Borders",
values = c("3313","solid"),
guide = guide_legend(override.aes = list(color = c("grey50","grey30"),
shape = c(NA,NA)))) +
# coral legend
scale_color_manual(name = "Benthic habitat",
values = coral_col,
label = "Coral reefs",
guide = guide_legend(override.aes = list(fill = coral_col,
shape = NA))) +
# remove fill symbology
guides(fill = FALSE) +
# repel text of sites in area of interest
ggrepel::geom_text_repel(data = filter(surv_site, district == qoli_name),
mapping = aes(x = longitude,
y = latitude,
label = site,
geometry = geometry),
stat = "sf_coordinates",
size = 2,
fontface = "bold",
nudge_x = 20,
nudge_y = 30,
max.iter = 1500) +
# labels + title
labs(x="",y="", title="") +
# map elements
scalebar_macuata +
narrow_macuata +
# theme
theme_bw() +
map_theme_macuata_survey
# Export plots
out_coral <- paste0(qoli_name,"_coral.tiff")
ggsave(qoliqoli_coral, filename=file.path(qoliqoli_map_dir, out_coral), width=6.5,
height=4.5, units="in", dpi=600, compression = "lzw")
out_seagrass <- paste0(qoli_name,"_seagrass.tiff")
ggsave(qoliqoli_seagrass, filename=file.path(qoliqoli_map_dir, out_seagrass), width=6.5,
height=4.5, units="in", dpi=600, compression = "lzw")
out_mangrove <- paste0(qoli_name,"_mangrove.tiff")
ggsave(qoliqoli_mangrove, filename=file.path(qoliqoli_map_dir, out_mangrove), width=6.5,
height=4.5, units="in", dpi=600, compression = "lzw")
out_mangchange <- paste0(qoli_name,"_mangrove_change.tiff")
ggsave(qoliqoli_mangrove_change, filename=file.path(qoliqoli_map_dir, out_mangchange), width=6.5,
height=4.5, units="in", dpi=600, compression = "lzw")
out_geomorphic <- paste0(qoli_name,"_geomorphic.tiff")
ggsave(qoliqoli_geomorphic, filename=file.path(qoliqoli_map_dir, out_geomorphic), width=6.5,
height=4.5, units="in", dpi=600, compression = "lzw")
out_sed <- paste0(qoli_name,"_sedimentation.tiff")
ggsave(qoliqoli_sediment, filename=file.path(qoliqoli_map_dir, out_sed), width=6.5,
height=4.5, units="in", dpi=600, compression = "lzw")
out_bath <- paste0(qoli_name,"_bathymetry.tiff")
ggsave(qoliqoli_bath, filename=file.path(qoliqoli_map_dir, out_bath), width=6.5,
height=4.5, units="in", dpi=600, compression = "lzw")
out_survey <- paste0(qoli_name,"_survey.tiff")
ggsave(qoliqoli_survey, filename=file.path(qoliqoli_map_dir, out_survey), width=6.5,
height=4.5, units="in", dpi=600, compression = "lzw")
}
| /wwf_fiji_maps/qoliqoli_macuata_maps_code_r.R | no_license | bpfree/work_sample | R | false | false | 26,722 | r | ###################################
# Macuata District Report Maps
###################################
# Map setup
###########################
## Defining map elements
# Colors
# administrative
land_col <- "#878787"
water_col <- "#a6cee3"
# ecological
coral_col <- "#e31a1c" # red
seagrass_col <- "#86bb56" # light green
mangrove96_col <- "#408536"
mangrove16_col <- "#33a02c" # green
mangrove_gain <- "#a481b6" # purple
mangrove_loss <- "#fdbf6f" # orange
# geomorphical
plat_col <- "#befbff" # dark orange
rs_col <- "#92739d" # light yellow
srs_col <- "#ffba15" # tan
rc_col <- "#cd6812" # deep purple
orf_col <- "#614272" # dark purple
irf_col <- "#288471" # mild purple
trf_col <- "#77d0fc" # light purple
sl_col <- "#e69113" # tile blue
unk_col <- "#B2B2B2" # grey
# geophysical
bath_col <- bath_color <- (RColorBrewer::brewer.pal(9,"Blues"))
sed_col <- (RColorBrewer::brewer.pal(9,"YlOrRd"))
# survey sites
site_shape <- c(21,22,23,24)
site_color <- c("#ffffb3","#fb8072","#bebada","#8dd3c7")
# unique site colors
eia_col <- "#ffffb3" # light yellow
new_col <- "#fb8072" # light pink
rfc_col <- "#bebada" # light purple
wwf_col <- "#8dd3c7" # light green
# survey site shapes
eia_shape <- 21 # circle
new_shape <- 22 # square
rfc_shape <- 23 # diamond
wwf_shape <- 24 # triangle up
# scale bar
scalebar_macuata <- annotation_scale(width_hint = 0.2, # percent of the plot occupied (20%)
pad_x = unit(0.35, "in"), # how much padded from the x=0 position
pad_y = unit(3.5, "in")) # how much padded from the y=0 position
# north arrow
narrow_macuata <- annotation_north_arrow(height = unit(0.25, "in"),
width = unit(0.2, "in"),
pad_x = unit(0.1, "in"),
pad_y = unit(3.5, "in"),
style = north_arrow_orienteering(
line_width = 1,
line_col = "black",
fill = c("white", "black"),
text_col = "black",
text_family = "",
text_face = NULL,
text_size = 5,
text_angle = 0))
# map themes
map_theme_macuata <- theme(axis.text=element_text(size=8),
axis.title=element_text(size=10),
plot.title=element_text(size=12),
panel.grid.major = element_line(color = "transparent"),
panel.grid.minor = element_line(color = "transparent"),
panel.background = element_rect(fill = water_col),
axis.text.y = element_text(angle = 90, hjust = 0.5),
legend.position = c(0.82,0.10), # alternative = bottom
legend.title = element_blank(), # remove the legend title
legend.text = element_text(size=6), # text size of the descriptor
legend.background = element_rect(fill = "transparent"), # make the box transparent --> "transparent"
legend.box.background = element_rect(fill = "white", color = "#4C84A2"), # white background with blue border
legend.box.margin = margin(1,1,1,1), # add some space between the box and the text
legend.spacing.y = unit(0.025, "in"),
legend.key.size = unit(0.1, "in"), # size of the color box
legend.key = element_rect(fill = "transparent"), # make the background of the key clear
legend.margin = margin(0, 0.0, 0, 0, "in"), # reduce spacing between legend elements
axis.line = element_line(colour = "black"))
map_theme_macuata_geomorphic <- theme(axis.text=element_text(size=8),
axis.title=element_text(size=10),
plot.title=element_text(size=12),
panel.grid.major = element_line(color = "transparent"),
panel.grid.minor = element_line(color = "transparent"),
panel.background = element_rect(fill = water_col),
axis.text.y = element_text(angle = 90, hjust = 0.5),
legend.title = element_text(size = 6), # size of the title
legend.position = c(0.87,0.15), # alternative = bottom
legend.text = element_text(size=4), # text size of the descriptor
legend.background = element_rect(fill = "transparent"), # make the box transparent --> "transparent"
legend.box.background = element_rect(fill = "white", color = "#4C84A2"), # white background with blue border
legend.box.margin = margin(1,1,1,1), # add some space between the box and the text
legend.spacing.y = unit(0.025, "in"),
legend.key.size = unit(0.1, "in"), # size of the color box
legend.key = element_rect(fill = "transparent"), # make the background of the key clear
legend.margin = margin(0, 0, 0, 0, "in"), # reduce spacing between legend elements
axis.line = element_line(colour = "black"))
map_theme_macuata_sediment <- theme(axis.text=element_text(size=8),
axis.title=element_text(size=10),
plot.title=element_text(size=12),
panel.grid.major = element_line(color = "transparent"),
panel.grid.minor = element_line(color = "transparent"),
panel.background = element_rect(fill = water_col),
axis.text.y = element_text(angle = 90, hjust = 0.5),
legend.title = element_text(size = 4), # size of the title
legend.position = c(0.87,0.15), # alternative = bottom
legend.text = element_text(size=4), # text size of the descriptor
legend.background = element_rect(fill = "transparent"), # make the box transparent --> "transparent"
legend.box.background = element_rect(fill = "white", color = "#4C84A2"), # white background with blue border
legend.box.margin = margin(1,1,1,1), # add some space between the box and the text
legend.spacing.y = unit(0.025, "in"),
legend.key.size = unit(0.1, "in"), # size of the color box
legend.key = element_rect(fill = "transparent"), # make the background of the key clear
legend.margin = margin(0, 0, 0, 0, "in"), # reduce spacing between legend elements
axis.line = element_line(colour = "black"))
map_theme_macuata_bathymetry <- theme(axis.text=element_text(size=8),
axis.title=element_text(size=10),
plot.title=element_text(size=12),
panel.grid.major = element_line(color = "transparent"),
panel.grid.minor = element_line(color = "transparent"),
panel.background = element_rect(fill = water_col),
axis.text.y = element_text(angle = 90, hjust = 0.5),
legend.title = element_text(size = 4), # size of the title
legend.position = c(0.87,0.15), # alternative = bottom
legend.text = element_text(size=4), # text size of the descriptor
legend.background = element_rect(fill = "transparent"), # make the box transparent --> "transparent"
legend.box.background = element_rect(fill = "white", color = "#4C84A2"), # white background with blue border
legend.box.margin = margin(1,1,1,1), # add some space between the box and the text
legend.spacing.y = unit(0.025, "in"),
legend.key.size = unit(0.1, "in"), # size of the color box
legend.key = element_rect(fill = "transparent"), # make the background of the key clear
legend.margin = margin(0, 0, 0, 0, "in"), # reduce spacing between legend elements
axis.line = element_line(colour = "black"))
map_theme_macuata_survey <- theme(axis.text=element_text(size=8),
axis.title=element_text(size=10),
plot.title=element_text(size=12),
panel.grid.major = element_line(color = "transparent"),
panel.grid.minor = element_line(color = "transparent"),
panel.background = element_rect(fill = water_col),
axis.text.y = element_text(angle = 90, hjust = 0.5),
legend.title = element_text(size = 6), # size of the title
legend.position = c(0.87,0.15), # alternative = bottom
legend.text = element_text(size=4), # text size of the descriptor
legend.background = element_rect(fill = "transparent"), # make the box transparent --> "transparent"
legend.box.background = element_rect(fill = "white", color = "#4C84A2"), # white background with blue border
legend.box.margin = margin(1,1,1,1), # add some space between the box and the text
legend.spacing.y = unit(0.025, "in"),
legend.key.size = unit(0.1, "in"), # size of the color box
legend.key = element_rect(fill = "transparent"), # make the background of the key clear
legend.margin = margin(0, 0, 0, 0, "in"), # reduce spacing between legend elements
axis.line = element_line(colour = "black"))
# get the Macuata district
i <- 2
for (i in 2){
# get qoliqoli
qoliqoli_do <- qoliqoli[i,]
# get the limits
xlim_qoli <- c(xmin = st_bbox(qoliqoli_do)$xmin-1000, xmax = st_bbox(qoliqoli_do)$xmax+1000)
ylim_qoli <- c(xmin = st_bbox(qoliqoli_do)$ymin, xmax = st_bbox(qoliqoli_do)$ymax)
# extract qoliqoli name
qoli_name <- qoliqoli_do$District
# x-axis limits
if(i==2){xbreaks <- seq(178.5,179.5,0.20)} # Macuata
# create coral map
qoliqoli_coral <- ggplot() +
# load Fiji land
geom_sf(data = fiji, fill = land_col, color = NA) +
# load Great Sea Reef
geom_sf(data = gsr, fill = NA, aes(linetype = "Great Sea Reef"), color = "grey50", size = 0.5) +
# load coral data
geom_tile(data = coral_map, aes(x=longitude, y=latitude, fill=coral)) +
# load qoliqoli
geom_sf(data = qoliqoli_do, fill = NA, aes(color = District), size = 0.5) +
# focus on the area of interest
coord_sf(xlim = xlim_qoli,
ylim = ylim_qoli) +
# legend
scale_fill_manual(values=coral_col,
label = "Coral reefs") +
scale_color_manual(values = "grey30") +
scale_linetype_manual(values = "3313") +
# x-axis breaks
scale_x_longitude(breaks = xbreaks) +
# labels + title
labs(x="",y="",title="") +
# map elements
scalebar_macuata +
narrow_macuata +
# theme
theme_bw() +
map_theme_macuata
# create seagrass map
qoliqoli_seagrass <- ggplot() +
# load Fiji land
geom_sf(data = fiji, fill = land_col, color = NA) +
# load Great Sea Reef
geom_sf(data = gsr, fill = NA, aes(linetype = "Great Sea Reef"), color = "grey50", size = 0.5) +
# load seagrass data
geom_tile(data = seagrass_map, aes(x=longitude, y=latitude, fill=seagrass)) +
# load qoliqoli
geom_sf(data = qoliqoli_do, fill = NA, aes(color = District), size = 0.5) +
# focus on the area of interest
coord_sf(xlim = xlim_qoli,
ylim = ylim_qoli) +
# legend
scale_fill_manual(values=seagrass_col) +
scale_color_manual(values = "grey30") +
scale_linetype_manual(values = "3313") +
# x-axis breaks
scale_x_longitude(breaks = xbreaks) +
# labels + title
labs(x="",y="",title="") +
# map elements
scalebar_macuata +
narrow_macuata +
# theme
theme_bw() +
map_theme_macuata
# create mangrove map
qoliqoli_mangrove <- ggplot() +
# load Fiji land
geom_sf(data = fiji, fill = land_col, color = NA) +
# load Great Sea Reef
geom_sf(data = gsr, fill = NA, aes(linetype = "Great Sea Reef"), color = "grey50", size = 0.5) +
# load mangrove data
geom_tile(data = mangrove16_map, aes(x=longitude, y=latitude, fill=mangrove)) +
# load qoliqoli
geom_sf(data = qoliqoli_do, fill = NA, aes(color = District), size = 0.5) +
# focus on the area of interest
coord_sf(xlim = xlim_qoli,
ylim = ylim_qoli) +
# legend
scale_fill_manual(values=mangrove16_col) +
scale_color_manual(values="grey30") +
scale_linetype_manual(values = "3313") +
# x-axis breaks
scale_x_longitude(breaks = xbreaks) +
# labels + title
labs(x="",y="",title="") +
# map elements
scalebar_macuata +
narrow_macuata +
# theme
theme_bw() +
map_theme_macuata
# create mangrove change map
qoliqoli_mangrove_change <- ggplot() +
# load Fiji land
geom_sf(data = fiji, fill = land_col, color = NA) +
# load Great Sea Reef
geom_sf(data = gsr, fill = NA, aes(linetype = "Great Sea Reef"), color = "grey50", size = 0.5) +
# load mangrove change data
geom_tile(data = mangrove_gain_map, aes(x=longitude, y=latitude, fill=gain)) +
geom_tile(data = mangrove_loss_map, aes(x=longitude, y=latitude, fill=loss)) +
# load qoliqoli
geom_sf(data = qoliqoli_do, fill = NA, aes(color = District), size = 0.5) +
# focus on the area of interest
coord_sf(xlim = xlim_qoli,
ylim = ylim_qoli) +
# x-axis breaks
scale_x_longitude(breaks = xbreaks) +
# legend
scale_fill_manual(labels = c("Mangrove gain",
"Mangrove loss"),
values=c(mangrove_gain,
mangrove_loss)) +
scale_color_manual(values="grey30") +
scale_linetype_manual(values = "3313") +
# labels + title
labs(x="",y="",title="") +
# map elements
scalebar_macuata +
narrow_macuata +
# theme
theme_bw() +
map_theme_macuata
# create geomorphic map
# irf_map_sample <- sample_frac(irf_map, 0.01)
# orf_map_sample <- sample_frac(orf_map, 0.01)
# plat_map_sample <- sample_frac(plat_map, 0.01)
# rc_map_sample <- sample_frac(rc_map, 0.01)
# rs_map_sample <- sample_frac(rs_map, 0.01)
# sl_map_sample <- sample_frac(sl_map, 0.01)
# srs_map_sample <- sample_frac(srs_map, 0.01)
# trf_map_sample <- sample_frac(trf_map, 0.01)
# unk_map_sample <- sample_frac(unk_map, 0.01)
qoliqoli_geomorphic <- ggplot() +
# load Fiji land
geom_sf(data = fiji, fill = land_col, color = NA) +
# load Great Sea Reef
geom_sf(data = gsr, fill = NA, aes(linetype = "Great Sea Reef"), color = "grey50", size = 0.5) +
# load geomorphic data
geom_tile(data = irf_map, aes(x=longitude, y=latitude, fill=irf)) + # inner reef flat
geom_tile(data = orf_map, aes(x=longitude, y=latitude, fill=orf)) + # outer reef flat
geom_tile(data = plat_map, aes(x=longitude, y=latitude, fill=plat)) + # plateau
geom_tile(data = rc_map, aes(x=longitude, y=latitude, fill=rc)) + # reef crest
geom_tile(data = rs_map, aes(x=longitude, y=latitude, fill=rs)) + # reef slope
geom_tile(data = sl_map, aes(x=longitude, y=latitude, fill=sl)) + # shallow lagoon
geom_tile(data = srs_map, aes(x=longitude, y=latitude, fill=srs)) + # sheltered reef slope
geom_tile(data = trf_map, aes(x=longitude, y=latitude, fill=trf)) + # terrestrial reef flat
geom_tile(data = unk_map, aes(x=longitude, y=latitude, fill=unk)) + # unknown
# load qoliqoli
geom_sf(data = qoliqoli_do, fill = NA, aes(color = District), size = 0.5) +
# focus on the area of interest
coord_sf(xlim = xlim_qoli,
ylim = ylim_qoli) +
# x-axis breaks
scale_x_longitude(breaks = xbreaks) +
# legend
scale_fill_manual(name = "Geomorphic zone",
guide = guide_legend(ncol = 2),
values=c(irf_col,
orf_col,
plat_col,
rc_col,
rs_col,
sl_col,
srs_col,
trf_col,
unk_col)) +
scale_color_manual(name = "District",
values = "grey30") +
scale_linetype_manual(name = "Great Sea Reef",
label = "",
values = "3313") +
# labels + title
labs(x="",y="",title="") +
# map elements
scalebar_macuata +
narrow_macuata +
# theme
theme_bw() +
map_theme_macuata_geomorphic
# Create sedimentation map
# sediment_map_sample <- sample_frac(sediment_map,0.01)
qoliqoli_sediment <- ggplot() +
# load sediment data
geom_tile(data = sediment_map, aes(x=longitude, y=latitude, fill = relative)) +
# load Fiji land
geom_sf(data = fiji, fill = land_col, color = NA) +
# load Great Sea Reef
geom_sf(data = gsr, fill = NA, aes(linetype = "Great Sea Reef"), color = "grey50", size = 0.5) +
# load qoliqoli
geom_sf(data = qoliqoli_do, fill = NA, aes(color = District), size = 0.5) +
# focus on the area of interest
coord_sf(xlim = xlim_qoli,
ylim = ylim_qoli) +
# color sedimentation
scale_fill_gradientn(name = "Turbidity \n(relative)",
colors = sed_col,
breaks = seq(0,10,2.5),
na.value=NA) +
guides(fill = guide_colorbar(ticks.colour = "black", frame.colour = "black")) +
# district legend
scale_color_manual(name = "District",
values = "grey30") +
scale_linetype_manual(name = "Great Sea Reef",
label = "",
values = "3313") +
# labels + title
labs(x="",y="",title="") +
# change x-axis breaks
scale_x_longitude(breaks = xbreaks) +
# map elements
scalebar_macuata +
narrow_macuata +
# theme
theme_bw() +
map_theme_macuata_sediment
# Create the loop for the provincial bathymetry maps
# bath_map_sample <- sample_frac(bath_map,0.01)
# create plot
qoliqoli_bath <- ggplot() +
# load bathmetry data
geom_tile(data = bath_map, aes(x=longitude, y=latitude, fill = depth)) +
# load Fiji land
geom_sf(data = fiji, fill = land_col, color = NA) +
# load Great Sea Reef
geom_sf(data = gsr, fill = NA, aes(linetype = "Great Sea Reef"), color = "grey50", size = 0.5) +
# load qoliqoli
geom_sf(data = qoliqoli_do, fill = NA, aes(color = District), size = 0.5) +
# focus on the area of interest
coord_sf(xlim = xlim_qoli,
ylim = ylim_qoli) +
# color bathymetry
scale_fill_gradientn(name = "Depth (m)",
colors = bath_color,
na.value=NA) +
guides(fill = guide_colorbar(ticks.colour = "black", frame.colour = "black")) +
# qoliqoli legend
scale_color_manual(name = "District",
values = "grey30") +
scale_linetype_manual(name = "Great Sea Reef",
label = "",
values = "3313") +
# x-axis breaks
scale_x_longitude(breaks = xbreaks) +
# labels + title
labs(x="",y="",title="") +
# map elements
scalebar_macuata +
narrow_macuata +
# theme
theme_bw() +
map_theme_macuata_bathymetry
# Create the loop for the provincial survery site maps
# coral_map_sample <- sample_frac(coral_map,0.01)
qoliqoli_survey <- ggplot() +
# load Fiji land
geom_sf(data = fiji, fill = land_col, color = NA) +
# load Great Sea Reef
geom_sf(data = gsr, fill = NA, aes(linetype = "Great Sea Reef"), size = 0.5) +
# load coral data
geom_tile(data = coral_map, aes(x=longitude,y=latitude, color="Coral"), fill = coral_col) +
# load qoliqoli
geom_sf(data = qoliqoli_do, fill = NA, aes(linetype = District), size = 0.5) +
# load suvery site data
geom_sf(data = surv_site, aes(fill=surveyor, shape=surveyor),show.legend = "point") +
# focus on the area of interest
coord_sf(xlim = xlim_qoli,
ylim = ylim_qoli) +
# x-axis breaks
scale_x_longitude(breaks = xbreaks) +
# survey shape
scale_shape_manual(name = "Survey Site",
labels = c("Ba EIA",
"New Site",
"Reef Check",
"WWF"),
values = c(eia_shape,
new_shape,
rfc_shape,
wwf_shape),
guide = guide_legend(override.aes = list(fill = c(eia_col,
new_col,
rfc_col,
wwf_col)))) +
# surveyor fill
scale_fill_manual(labels = c("Ba EIA",
"New Site",
"Reef Check",
"WWF"),
values = c(eia_col,
new_col,
rfc_col,
wwf_col)) +
# Great Sea Reef legend
scale_linetype_manual(name = "Borders",
values = c("3313","solid"),
guide = guide_legend(override.aes = list(color = c("grey50","grey30"),
shape = c(NA,NA)))) +
# coral legend
scale_color_manual(name = "Benthic habitat",
values = coral_col,
label = "Coral reefs",
guide = guide_legend(override.aes = list(fill = coral_col,
shape = NA))) +
# remove fill symbology
guides(fill = FALSE) +
# repel text of sites in area of interest
ggrepel::geom_text_repel(data = filter(surv_site, district == qoli_name),
mapping = aes(x = longitude,
y = latitude,
label = site,
geometry = geometry),
stat = "sf_coordinates",
size = 2,
fontface = "bold",
nudge_x = 20,
nudge_y = 30,
max.iter = 1500) +
# labels + title
labs(x="",y="", title="") +
# map elements
scalebar_macuata +
narrow_macuata +
# theme
theme_bw() +
map_theme_macuata_survey
# Export plots
out_coral <- paste0(qoli_name,"_coral.tiff")
ggsave(qoliqoli_coral, filename=file.path(qoliqoli_map_dir, out_coral), width=6.5,
height=4.5, units="in", dpi=600, compression = "lzw")
out_seagrass <- paste0(qoli_name,"_seagrass.tiff")
ggsave(qoliqoli_seagrass, filename=file.path(qoliqoli_map_dir, out_seagrass), width=6.5,
height=4.5, units="in", dpi=600, compression = "lzw")
out_mangrove <- paste0(qoli_name,"_mangrove.tiff")
ggsave(qoliqoli_mangrove, filename=file.path(qoliqoli_map_dir, out_mangrove), width=6.5,
height=4.5, units="in", dpi=600, compression = "lzw")
out_mangchange <- paste0(qoli_name,"_mangrove_change.tiff")
ggsave(qoliqoli_mangrove_change, filename=file.path(qoliqoli_map_dir, out_mangchange), width=6.5,
height=4.5, units="in", dpi=600, compression = "lzw")
out_geomorphic <- paste0(qoli_name,"_geomorphic.tiff")
ggsave(qoliqoli_geomorphic, filename=file.path(qoliqoli_map_dir, out_geomorphic), width=6.5,
height=4.5, units="in", dpi=600, compression = "lzw")
out_sed <- paste0(qoli_name,"_sedimentation.tiff")
ggsave(qoliqoli_sediment, filename=file.path(qoliqoli_map_dir, out_sed), width=6.5,
height=4.5, units="in", dpi=600, compression = "lzw")
out_bath <- paste0(qoli_name,"_bathymetry.tiff")
ggsave(qoliqoli_bath, filename=file.path(qoliqoli_map_dir, out_bath), width=6.5,
height=4.5, units="in", dpi=600, compression = "lzw")
out_survey <- paste0(qoli_name,"_survey.tiff")
ggsave(qoliqoli_survey, filename=file.path(qoliqoli_map_dir, out_survey), width=6.5,
height=4.5, units="in", dpi=600, compression = "lzw")
}
|
# read in Excel file with data and clean it
tidy_excel <- function(file_path){
data_tbl <- xlsx_cells(file_path)
dat <- data_tbl %>% behead("up-left", "swab_number") %>% behead("up", "plate_number") %>%
behead("left", "swab_type") %>% select(swab_type, swab_number, plate_number, numeric, character)
dat
}
clean_data <- function(uncleaned_data){
# add unique id for each swab
uncleaned_data$id <- rep(1:(nrow(uncleaned_data)/2), each = 2)
# strip asterisk and greater than sign from values then convert to numeric
uncleaned_data$character <- as.numeric(gsub(pattern = "\\*|\\*>", replacement = "0",
uncleaned_data$character))
# merge character column values into numeric column when numeric is NA
# and add flag for censored
uncleaned_data$numeric <- ifelse(!is.na(uncleaned_data$numeric), uncleaned_data$numeric, 0) +
ifelse(!is.na(uncleaned_data$character), uncleaned_data$character, 0)
uncleaned_data$censored <- uncleaned_data$numeric == 100
# remove "Plate " prefix but set plate_number as factor
uncleaned_data$plate_number <- trimws(gsub("Plate ", "", uncleaned_data$plate_number))
uncleaned_data$plate_number <- factor(uncleaned_data$plate_number, levels = as.character(1:40))
uncleaned_data$plate_first20 <- ifelse(uncleaned_data$plate_number %in% paste(1:20), 1, 0)
# add plate_grouping so we can do small multiples plot
uncleaned_data$plate_group <- uncleaned_data$plate_number
levels(uncleaned_data$plate_group) <- rep(1:4, each = 10)
# add copan floq flag for setting color
uncleaned_data$ref_flag <- factor(ifelse(uncleaned_data$swab_type == "Copan Floq", 1, 0))
# set swab_type as factor and re-level so that Copan Floq is
# reference level
uncleaned_data$swab_type <- factor(uncleaned_data$swab_type)
uncleaned_data$swab_type <- relevel(uncleaned_data$swab_type, "Copan Floq")
uncleaned_data <- uncleaned_data %>% rename(count = numeric)
uncleaned_data
} | /r/functions.R | no_license | Scott-Coggeshall/repro_research_example | R | false | false | 2,063 | r | # read in Excel file with data and clean it
tidy_excel <- function(file_path){
data_tbl <- xlsx_cells(file_path)
dat <- data_tbl %>% behead("up-left", "swab_number") %>% behead("up", "plate_number") %>%
behead("left", "swab_type") %>% select(swab_type, swab_number, plate_number, numeric, character)
dat
}
clean_data <- function(uncleaned_data){
# add unique id for each swab
uncleaned_data$id <- rep(1:(nrow(uncleaned_data)/2), each = 2)
# strip asterisk and greater than sign from values then convert to numeric
uncleaned_data$character <- as.numeric(gsub(pattern = "\\*|\\*>", replacement = "0",
uncleaned_data$character))
# merge character column values into numeric column when numeric is NA
# and add flag for censored
uncleaned_data$numeric <- ifelse(!is.na(uncleaned_data$numeric), uncleaned_data$numeric, 0) +
ifelse(!is.na(uncleaned_data$character), uncleaned_data$character, 0)
uncleaned_data$censored <- uncleaned_data$numeric == 100
# remove "Plate " prefix but set plate_number as factor
uncleaned_data$plate_number <- trimws(gsub("Plate ", "", uncleaned_data$plate_number))
uncleaned_data$plate_number <- factor(uncleaned_data$plate_number, levels = as.character(1:40))
uncleaned_data$plate_first20 <- ifelse(uncleaned_data$plate_number %in% paste(1:20), 1, 0)
# add plate_grouping so we can do small multiples plot
uncleaned_data$plate_group <- uncleaned_data$plate_number
levels(uncleaned_data$plate_group) <- rep(1:4, each = 10)
# add copan floq flag for setting color
uncleaned_data$ref_flag <- factor(ifelse(uncleaned_data$swab_type == "Copan Floq", 1, 0))
# set swab_type as factor and re-level so that Copan Floq is
# reference level
uncleaned_data$swab_type <- factor(uncleaned_data$swab_type)
uncleaned_data$swab_type <- relevel(uncleaned_data$swab_type, "Copan Floq")
uncleaned_data <- uncleaned_data %>% rename(count = numeric)
uncleaned_data
} |
## Functions to return the inverse of a passed matrix
## Creates the matrix with its own set of functions
makeCacheMatrix <- function(x = matrix()) {
i <- NULL ## Set up variable for inverse but set it to Null
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() # Function to retrieve the matrix
setinverse <- function(inv) i <<- inv # ... to set the inverse
getinverse <- function() i # ... to return the inverse
list(set = set, get = get, # list of the functions
setinverse = setinverse,
getinverse = getinverse)
}
## Accepts a function created by makeCacheMatrix and either
## a) returns its inverse if this hasn't already be done or
## b) retrieves it from memory if it has.
cacheSolve <- function(x, ...) {
i <- x$getinverse() # See if we've already retrieved the inverse
if(!is.null(i)) { # If we have, just return that.
message("Retrieving cached data...")
return(i)
}
d <- x$get() ## Otherwise, get the matrix and find its inverse
i <- solve(d)
x$setinverse(i) ## Now that we have it, store it for later retrieval
i
}
| /cachematrix.R | no_license | strobbe/datasciencecoursera | R | false | false | 1,119 | r | ## Functions to return the inverse of a passed matrix
## Creates the matrix with its own set of functions
makeCacheMatrix <- function(x = matrix()) {
i <- NULL ## Set up variable for inverse but set it to Null
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() # Function to retrieve the matrix
setinverse <- function(inv) i <<- inv # ... to set the inverse
getinverse <- function() i # ... to return the inverse
list(set = set, get = get, # list of the functions
setinverse = setinverse,
getinverse = getinverse)
}
## Accepts a function created by makeCacheMatrix and either
## a) returns its inverse if this hasn't already be done or
## b) retrieves it from memory if it has.
cacheSolve <- function(x, ...) {
i <- x$getinverse() # See if we've already retrieved the inverse
if(!is.null(i)) { # If we have, just return that.
message("Retrieving cached data...")
return(i)
}
d <- x$get() ## Otherwise, get the matrix and find its inverse
i <- solve(d)
x$setinverse(i) ## Now that we have it, store it for later retrieval
i
}
|
% File src/library/utils/man/object.size.Rd
% Part of the R package, https://www.R-project.org
% Copyright 1995-2017 R Core Team
% Distributed under GPL 2 or later
\name{object.size}
\alias{object.size}
\alias{format.object_size}
\alias{print.object_size}
\title{Report the Space Allocated for an Object}
\description{
Provides an estimate of the memory that is being used to store an \R object.
}
\usage{
object.size(x)
\method{format}{object_size}(x, units = "b", standard = "auto", digits = 1L, \dots)
\method{print}{object_size}(x, quote = FALSE, units = "b", standard = "auto",
digits = 1L, \dots)
}
\arguments{
\item{x}{an \R object.}
\item{quote}{logical, indicating whether or not the result should be
printed with surrounding quotes.}
\item{units}{the units to be used in formatting and printing the size.
Allowed values for the different \code{standard}s are
\describe{
\item{\code{standard = "legacy"}:}{
\code{"b"}, \code{"Kb"}, \code{"Mb"}, \code{"Gb"}, \code{"Tb"}, \code{"Pb"},
\code{"B"}, \code{"KB"}, \code{"MB"}, \code{"GB"}, \code{"TB"} and \code{"PB"}.}
\item{\code{standard = "IEC"}:}{
\code{"B"}, \code{"KiB"}, \code{"MiB"}, \code{"GiB"},
\code{"TiB"}, \code{"PiB"}, \code{"EiB"}, \code{"ZiB"} and \code{"YiB"}.}
\item{\code{standard = "SI"}:}{
\code{"B"}, \code{"kB"}, \code{"MB"}, \code{"GB"}, \code{"TB"},
\code{"PB"}, \code{"EB"}, \code{"ZB"} and \code{"YB"}.}
}
For all standards, \code{unit = "auto"} is also allowed.
If \code{standard = "auto"}, any of the "legacy" and \acronym{IEC}
units are allowed.
See \sQuote{Formatting and printing object sizes} for details.}
\item{standard}{the byte-size unit standard to be used. A character
string, possibly abbreviated from \code{"legacy"}, \code{"IEC"},
\code{"SI"} and \code{"auto"}. See \sQuote{Formatting and printing
object sizes} for details.}
\item{digits}{the number of digits after the decimal point, passed to
\code{\link{round}}.}
\item{\dots}{arguments to be passed to or from other methods.}
}
\details{
Exactly which parts of the memory allocation should be attributed to
which object is not clear-cut. This function merely provides a rough
indication: it should be reasonably accurate for atomic vectors, but
does not detect if elements of a list are shared, for example.
(Sharing amongst elements of a character vector is taken into account,
but not that between character vectors in a single object.)
The calculation is of the size of the object, and excludes the space
needed to store its name in the symbol table.
Associated space (e.g., the environment of a function and what the
pointer in a \code{EXTPTRSXP} points to) is not included in the
calculation.
Object sizes are larger on 64-bit builds than 32-bit ones, but will
very likely be the same on different platforms with the same word
length and pointer size.
Sizes of objects using a compact internal representation may be
over-estimated.
}
\section{Formatting and printing object sizes}{
Object sizes can be formatted using byte-size units from \R's legacy
standard, the \acronym{IEC} standard, or the \acronym{SI} standard.
As illustrated by below tables, the legacy and \acronym{IEC} standards use
\emph{binary} units (multiples of 1024), whereas the SI standard uses
\emph{decimal} units (multiples of 1000).
For methods \code{format} and \code{print}, argument \code{standard}
specifies which standard to use and argument \code{units} specifies
which byte-size unit to use. \code{units = "auto"} chooses the largest
units in which the result is one or more (before rounding).
Byte sizes are rounded to \code{digits} decimal places.
\code{standard = "auto"} chooses the standard based on \code{units},
if possible, otherwise, the legacy standard is used.
Summary of \R's legacy and \acronym{IEC} units:
\tabular{lll}{
\bold{object size} \tab\bold{legacy} \tab\bold{IEC}\cr
1 \tab 1 bytes \tab 1 B \cr
1024 \tab 1 Kb \tab 1 KiB \cr
1024^2 \tab 1 Mb \tab 1 MiB \cr
1024^3 \tab 1 Gb \tab 1 GiB \cr
1024^4 \tab 1 Tb \tab 1 TiB \cr
1024^5 \tab 1 Pb \tab 1 PiB \cr
1024^6 \tab \tab 1 EiB \cr
1024^7 \tab \tab 1 ZiB \cr
1024^8 \tab \tab 1 YiB \cr
}
Summary of \acronym{SI} units:
\tabular{ll}{
\bold{object size} \tab \bold{SI} \cr
1 \tab 1 B \cr
1000 \tab 1 kB \cr
1000^2 \tab 1 MB \cr
1000^3 \tab 1 GB \cr
1000^4 \tab 1 TB \cr
1000^5 \tab 1 PB \cr
1000^6 \tab 1 EB \cr
1000^7 \tab 1 ZB \cr
1000^8 \tab 1 YB \cr
}
}
\value{
An object of class \code{"object_size"} with a length-one double value,
an estimate of the memory allocation attributable to the object in bytes.
}
\author{R Core; Henrik Bengtsson for the non-legacy \code{standard}s.}
\seealso{
\code{\link{Memory-limits}} for the design limitations on object size.
}
\references{
The wikipedia page, \url{https://en.wikipedia.org/wiki/Binary_prefix},
is extensive on the different standards, usages and their history.
}
\examples{
object.size(letters)
object.size(ls)
format(object.size(library), units = "auto")
sl <- object.size(rep(letters, 1000))
print(sl) ## 209288 bytes
print(sl, units = "auto") ## 204.4 Kb
print(sl, units = "auto", standard = "IEC") ## 204.4 KiB
print(sl, units = "auto", standard = "SI") ## 209.3 kB
(fsl <- sapply(c("Kb", "KB", "KiB"),
function(u) format(sl, units = u)))
stopifnot(identical( ## assert that all three are the same :
unique(substr(as.vector(fsl), 1,5)),
format(round(as.vector(sl)/1024, 1))))
## find the 10 largest objects in the base package
z <- sapply(ls("package:base"), function(x)
object.size(get(x, envir = baseenv())))
if(interactive()) {
as.matrix(rev(sort(z))[1:10])
} else # (more constant over time):
names(rev(sort(z))[1:10])
}
\keyword{utilities}
| /bin/R-3.5.1/src/library/utils/man/object.size.Rd | permissive | lifebit-ai/exomedepth | R | false | false | 6,250 | rd | % File src/library/utils/man/object.size.Rd
% Part of the R package, https://www.R-project.org
% Copyright 1995-2017 R Core Team
% Distributed under GPL 2 or later
\name{object.size}
\alias{object.size}
\alias{format.object_size}
\alias{print.object_size}
\title{Report the Space Allocated for an Object}
\description{
Provides an estimate of the memory that is being used to store an \R object.
}
\usage{
object.size(x)
\method{format}{object_size}(x, units = "b", standard = "auto", digits = 1L, \dots)
\method{print}{object_size}(x, quote = FALSE, units = "b", standard = "auto",
digits = 1L, \dots)
}
\arguments{
\item{x}{an \R object.}
\item{quote}{logical, indicating whether or not the result should be
printed with surrounding quotes.}
\item{units}{the units to be used in formatting and printing the size.
Allowed values for the different \code{standard}s are
\describe{
\item{\code{standard = "legacy"}:}{
\code{"b"}, \code{"Kb"}, \code{"Mb"}, \code{"Gb"}, \code{"Tb"}, \code{"Pb"},
\code{"B"}, \code{"KB"}, \code{"MB"}, \code{"GB"}, \code{"TB"} and \code{"PB"}.}
\item{\code{standard = "IEC"}:}{
\code{"B"}, \code{"KiB"}, \code{"MiB"}, \code{"GiB"},
\code{"TiB"}, \code{"PiB"}, \code{"EiB"}, \code{"ZiB"} and \code{"YiB"}.}
\item{\code{standard = "SI"}:}{
\code{"B"}, \code{"kB"}, \code{"MB"}, \code{"GB"}, \code{"TB"},
\code{"PB"}, \code{"EB"}, \code{"ZB"} and \code{"YB"}.}
}
For all standards, \code{unit = "auto"} is also allowed.
If \code{standard = "auto"}, any of the "legacy" and \acronym{IEC}
units are allowed.
See \sQuote{Formatting and printing object sizes} for details.}
\item{standard}{the byte-size unit standard to be used. A character
string, possibly abbreviated from \code{"legacy"}, \code{"IEC"},
\code{"SI"} and \code{"auto"}. See \sQuote{Formatting and printing
object sizes} for details.}
\item{digits}{the number of digits after the decimal point, passed to
\code{\link{round}}.}
\item{\dots}{arguments to be passed to or from other methods.}
}
\details{
Exactly which parts of the memory allocation should be attributed to
which object is not clear-cut. This function merely provides a rough
indication: it should be reasonably accurate for atomic vectors, but
does not detect if elements of a list are shared, for example.
(Sharing amongst elements of a character vector is taken into account,
but not that between character vectors in a single object.)
The calculation is of the size of the object, and excludes the space
needed to store its name in the symbol table.
Associated space (e.g., the environment of a function and what the
pointer in a \code{EXTPTRSXP} points to) is not included in the
calculation.
Object sizes are larger on 64-bit builds than 32-bit ones, but will
very likely be the same on different platforms with the same word
length and pointer size.
Sizes of objects using a compact internal representation may be
over-estimated.
}
\section{Formatting and printing object sizes}{
Object sizes can be formatted using byte-size units from \R's legacy
standard, the \acronym{IEC} standard, or the \acronym{SI} standard.
As illustrated by below tables, the legacy and \acronym{IEC} standards use
\emph{binary} units (multiples of 1024), whereas the SI standard uses
\emph{decimal} units (multiples of 1000).
For methods \code{format} and \code{print}, argument \code{standard}
specifies which standard to use and argument \code{units} specifies
which byte-size unit to use. \code{units = "auto"} chooses the largest
units in which the result is one or more (before rounding).
Byte sizes are rounded to \code{digits} decimal places.
\code{standard = "auto"} chooses the standard based on \code{units},
if possible, otherwise, the legacy standard is used.
Summary of \R's legacy and \acronym{IEC} units:
\tabular{lll}{
\bold{object size} \tab\bold{legacy} \tab\bold{IEC}\cr
1 \tab 1 bytes \tab 1 B \cr
1024 \tab 1 Kb \tab 1 KiB \cr
1024^2 \tab 1 Mb \tab 1 MiB \cr
1024^3 \tab 1 Gb \tab 1 GiB \cr
1024^4 \tab 1 Tb \tab 1 TiB \cr
1024^5 \tab 1 Pb \tab 1 PiB \cr
1024^6 \tab \tab 1 EiB \cr
1024^7 \tab \tab 1 ZiB \cr
1024^8 \tab \tab 1 YiB \cr
}
Summary of \acronym{SI} units:
\tabular{ll}{
\bold{object size} \tab \bold{SI} \cr
1 \tab 1 B \cr
1000 \tab 1 kB \cr
1000^2 \tab 1 MB \cr
1000^3 \tab 1 GB \cr
1000^4 \tab 1 TB \cr
1000^5 \tab 1 PB \cr
1000^6 \tab 1 EB \cr
1000^7 \tab 1 ZB \cr
1000^8 \tab 1 YB \cr
}
}
\value{
An object of class \code{"object_size"} with a length-one double value,
an estimate of the memory allocation attributable to the object in bytes.
}
\author{R Core; Henrik Bengtsson for the non-legacy \code{standard}s.}
\seealso{
\code{\link{Memory-limits}} for the design limitations on object size.
}
\references{
The wikipedia page, \url{https://en.wikipedia.org/wiki/Binary_prefix},
is extensive on the different standards, usages and their history.
}
\examples{
object.size(letters)
object.size(ls)
format(object.size(library), units = "auto")
sl <- object.size(rep(letters, 1000))
print(sl) ## 209288 bytes
print(sl, units = "auto") ## 204.4 Kb
print(sl, units = "auto", standard = "IEC") ## 204.4 KiB
print(sl, units = "auto", standard = "SI") ## 209.3 kB
(fsl <- sapply(c("Kb", "KB", "KiB"),
function(u) format(sl, units = u)))
stopifnot(identical( ## assert that all three are the same :
unique(substr(as.vector(fsl), 1,5)),
format(round(as.vector(sl)/1024, 1))))
## find the 10 largest objects in the base package
z <- sapply(ls("package:base"), function(x)
object.size(get(x, envir = baseenv())))
if(interactive()) {
as.matrix(rev(sort(z))[1:10])
} else # (more constant over time):
names(rev(sort(z))[1:10])
}
\keyword{utilities}
|
source("functions.R")
conf.networks = list()
conf.dags = list()
# BNLEARN networks
# "alarm", "andes", "asia", "barley", "diabetes", "hailfinder", "hepar2",
# "insurance", "link", "mildew", "munin", "munin1","munin2", "munin3", "munin4",
# "pigs", "water", "win95pts"
# ECML
# "child", "insurance", "alarm", "mildew", "hailfinder", "munin1", "pigs", "link"
# conf.trainingsizes = c(50, 100, 200, 500, 1500, 5000)
# conf.testsize = 5000
# KOJIMA
# "alarm", "alarm3", "alarm5", "alarm10",
# "insurance", "insurance3", "insurance5", "insurance10",
# "child", "child3", "child5", "child10"
# conf.alphas = c(0.01, 0.02, 0.05)
# conf.trainingsizes = c(1000, 10000)
# conf.testsize = 10000
# KOJIMA - his datasets
# "koj.alarm01", "koj.alarm03", "koj.alarm05", "koj.alarm10",
# "koj.insurance01", "koj.insurance03", "koj.insurance05", "koj.insurance10",
# "koj.child01", "koj.child03", "koj.child05", "koj.child10"
# conf.alphas = c(0.01, 0.02, 0.05)
# conf.trainingsizes = c(500, 1000, 10000)
# conf.testsize = 10000
# Tsamardinos - his datasets
# "tsam.alarm1", "tsam.alarm3", "tsam.alarm5", "tsam.alarm10"
# "tsam.barley", "tsam.child1", "tsam.child3", "tsam.child5",
# "tsam.child10", "tsam.gene", "tsam.hailfinder1", "tsam.hailfinder3",
# "tsam.hailfinder5", "tsam.hailfinder10", "tsam.insurance1",
# "tsam.insurance3", "tsam.insurance5", "tsam.insurance10",
# "tsam.link", "tsam.mildew", "tsam.munin1", "tsam.pigs"
# conf.trainingsizes = c(500, 1000, 5000)
# conf.testsize = 5000
for (network in c(
"child", "insurance", "mildew", "alarm", "hailfinder", "munin1", "pigs", "link"
)) {
conf.networks[[network]] = get(load(file=paste("networks/", network, ".rda", sep="")))
conf.dags[[network]] = bn.net(conf.networks[[network]])
}
conf.trainingsizes = c(50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000)
conf.pc.methods = c(
"hpc-fdr", "mmpc"
# "rpc", "rpc2-and", "iambfdr", "mmpc", "pc", "hpc", "hpc-fdr"
# "truedag", "hpc.cached"
# "none"
# "hpc", "hpc-or", "fast-hpc", "hpc-fdr", "hpc.cached", "hpc.cached-fdr"
# "mmpc", "mmpc-bt"
# "none", "truedag"
# "iamb", "inter-iamb", "fast-iamb"
)
conf.tests = "mi" # "mi-h
conf.alphas = c(0.05) # c(0.01, 0.02, 0.05) c(0)
conf.trainingreps = 1:10
conf.trainingpermuts = 1:1
conf.testsize = 50000
conf.seed = 0
conf.nbcores = 4
conf.ss.methods = c("tabu")# "tabu", "2p"
conf.score = "bde"#"bde"
conf.tabu = 100
conf.max.tabu = 15
conf.restart = 0
conf.perturb = 0
conf.progress.tracking = TRUE
conf.fig.format = "eps"
if (length(conf.pc.methods) == 2) {
conf.pc.colors = c("darkgreen", "red")
} else {
conf.pc.colors = as.list(rainbow(length(conf.pc.methods)))
}
names(conf.pc.colors) = conf.pc.methods
conf.pc.labels = list(
"iamb" = "IAMB",
"inter-iamb" = "Inter-IAMB",
"fast-iamb" = "Fast-IAMB",
"mmpc" = "MMPC",
"mmpc-bt" = "MMPC-BT",
"pc" = "PC",
# "rpc" = "R-MMPC-OR",
# "rpc-and" = "R_MMPC-AND",
# "rpc2" = "R-IAPC-OR",
# "rpc2-and" = "R-IAPC-AND",
"iambfdr" = "IAMBFDR",
"iambfdr-and" = "IAMBFDR-AND",
"hpc" = "HPC",
"hpc-or" = "HPC-OR",
"hpc-fdr" = "HPC",
"hpc-fdr-or" = "HPC-FDR-OR",
"hpc-fdr-bt" = "HPC-FDR-BT",
"fast-hpc" = "HPC-fast",
# "hpc.cached" = "HPC-cached",
# "hpc.cached-fdr" = "HPC-cached-FDR",
"none" = "none",
"truedag" = "truedag")
conf.pc.base.method = "mmpc" # NULL "mmpc" | /conf.R | no_license | gasse/bayes-benchmark | R | false | false | 3,338 | r | source("functions.R")
conf.networks = list()
conf.dags = list()
# BNLEARN networks
# "alarm", "andes", "asia", "barley", "diabetes", "hailfinder", "hepar2",
# "insurance", "link", "mildew", "munin", "munin1","munin2", "munin3", "munin4",
# "pigs", "water", "win95pts"
# ECML
# "child", "insurance", "alarm", "mildew", "hailfinder", "munin1", "pigs", "link"
# conf.trainingsizes = c(50, 100, 200, 500, 1500, 5000)
# conf.testsize = 5000
# KOJIMA
# "alarm", "alarm3", "alarm5", "alarm10",
# "insurance", "insurance3", "insurance5", "insurance10",
# "child", "child3", "child5", "child10"
# conf.alphas = c(0.01, 0.02, 0.05)
# conf.trainingsizes = c(1000, 10000)
# conf.testsize = 10000
# KOJIMA - his datasets
# "koj.alarm01", "koj.alarm03", "koj.alarm05", "koj.alarm10",
# "koj.insurance01", "koj.insurance03", "koj.insurance05", "koj.insurance10",
# "koj.child01", "koj.child03", "koj.child05", "koj.child10"
# conf.alphas = c(0.01, 0.02, 0.05)
# conf.trainingsizes = c(500, 1000, 10000)
# conf.testsize = 10000
# Tsamardinos - his datasets
# "tsam.alarm1", "tsam.alarm3", "tsam.alarm5", "tsam.alarm10"
# "tsam.barley", "tsam.child1", "tsam.child3", "tsam.child5",
# "tsam.child10", "tsam.gene", "tsam.hailfinder1", "tsam.hailfinder3",
# "tsam.hailfinder5", "tsam.hailfinder10", "tsam.insurance1",
# "tsam.insurance3", "tsam.insurance5", "tsam.insurance10",
# "tsam.link", "tsam.mildew", "tsam.munin1", "tsam.pigs"
# conf.trainingsizes = c(500, 1000, 5000)
# conf.testsize = 5000
for (network in c(
"child", "insurance", "mildew", "alarm", "hailfinder", "munin1", "pigs", "link"
)) {
conf.networks[[network]] = get(load(file=paste("networks/", network, ".rda", sep="")))
conf.dags[[network]] = bn.net(conf.networks[[network]])
}
conf.trainingsizes = c(50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000)
conf.pc.methods = c(
"hpc-fdr", "mmpc"
# "rpc", "rpc2-and", "iambfdr", "mmpc", "pc", "hpc", "hpc-fdr"
# "truedag", "hpc.cached"
# "none"
# "hpc", "hpc-or", "fast-hpc", "hpc-fdr", "hpc.cached", "hpc.cached-fdr"
# "mmpc", "mmpc-bt"
# "none", "truedag"
# "iamb", "inter-iamb", "fast-iamb"
)
conf.tests = "mi" # "mi-h
conf.alphas = c(0.05) # c(0.01, 0.02, 0.05) c(0)
conf.trainingreps = 1:10
conf.trainingpermuts = 1:1
conf.testsize = 50000
conf.seed = 0
conf.nbcores = 4
conf.ss.methods = c("tabu")# "tabu", "2p"
conf.score = "bde"#"bde"
conf.tabu = 100
conf.max.tabu = 15
conf.restart = 0
conf.perturb = 0
conf.progress.tracking = TRUE
conf.fig.format = "eps"
if (length(conf.pc.methods) == 2) {
conf.pc.colors = c("darkgreen", "red")
} else {
conf.pc.colors = as.list(rainbow(length(conf.pc.methods)))
}
names(conf.pc.colors) = conf.pc.methods
conf.pc.labels = list(
"iamb" = "IAMB",
"inter-iamb" = "Inter-IAMB",
"fast-iamb" = "Fast-IAMB",
"mmpc" = "MMPC",
"mmpc-bt" = "MMPC-BT",
"pc" = "PC",
# "rpc" = "R-MMPC-OR",
# "rpc-and" = "R_MMPC-AND",
# "rpc2" = "R-IAPC-OR",
# "rpc2-and" = "R-IAPC-AND",
"iambfdr" = "IAMBFDR",
"iambfdr-and" = "IAMBFDR-AND",
"hpc" = "HPC",
"hpc-or" = "HPC-OR",
"hpc-fdr" = "HPC",
"hpc-fdr-or" = "HPC-FDR-OR",
"hpc-fdr-bt" = "HPC-FDR-BT",
"fast-hpc" = "HPC-fast",
# "hpc.cached" = "HPC-cached",
# "hpc.cached-fdr" = "HPC-cached-FDR",
"none" = "none",
"truedag" = "truedag")
conf.pc.base.method = "mmpc" # NULL "mmpc" |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Authentication.R
\name{login}
\alias{login}
\title{Login to a TMC server}
\usage{
login(clientID, secret, username, password, serverAddress)
}
\arguments{
\item{clientID}{Client ID used in \code{OAuth2}.}
\item{secret}{Secret used in \code{OAuth2}.}
\item{username}{Username of a TMC account.}
\item{password}{Password matching the inputed username of a TMC account.}
\item{serverAddress}{Address of the TMC server which the user wants to log in to.}
}
\value{
An \code{OAuth2} token if the authentication was succesful, otherwise returns an error message.
}
\description{
Logs in to a TMC server using an username and a password along with a clientID and
secret used for \code{OAuth2} authentication.
}
\details{
If logging in was successful, saves the login credentials (username, access-token
and the server address) in the \code{.credentials} file.
}
\seealso{
\code{\link[httr]{POST}}, \code{\link[httr]{status_code}}, \code{\link[httr]{content}},
\code{\link{saveCredentials}}
}
| /tmcrstudioaddin/man/login.Rd | no_license | RTMC/tmc-rstudio | R | false | true | 1,067 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Authentication.R
\name{login}
\alias{login}
\title{Login to a TMC server}
\usage{
login(clientID, secret, username, password, serverAddress)
}
\arguments{
\item{clientID}{Client ID used in \code{OAuth2}.}
\item{secret}{Secret used in \code{OAuth2}.}
\item{username}{Username of a TMC account.}
\item{password}{Password matching the inputed username of a TMC account.}
\item{serverAddress}{Address of the TMC server which the user wants to log in to.}
}
\value{
An \code{OAuth2} token if the authentication was succesful, otherwise returns an error message.
}
\description{
Logs in to a TMC server using an username and a password along with a clientID and
secret used for \code{OAuth2} authentication.
}
\details{
If logging in was successful, saves the login credentials (username, access-token
and the server address) in the \code{.credentials} file.
}
\seealso{
\code{\link[httr]{POST}}, \code{\link[httr]{status_code}}, \code{\link[httr]{content}},
\code{\link{saveCredentials}}
}
|
library(caret)
library(tidyverse)
library(dslabs)
data(iris)
iris <- iris[-which(iris$Species=='setosa'),]
# give true indices,pural for index,
# removing setosa
y <- iris$Species
# set.seed(2) # if using R 3.5 or earlier
set.seed(2, sample.kind="Rounding") # if using R 3.6 or later
test_index <- createDataPartition(y,times=1,p=0.5,list=FALSE)
#sepersting data
# line of code
test <- iris[test_index,]
train <- iris[-test_index,]
head(train)
#*
# find the best preditor amoumt many predictor to predict two cat. outcomes
foo <- function(feature){
# defind new funciton(name){expression last expression is returned}
rangedValues <- seq(min(feature) ,max(feature) ,by=0.1)
#making a list of values according to data
sapply(rangedValues,function(rangedValues){
# apply a function over a list or vector(a vector,a function to be applie on each element of a vector)
y_hat <- ifelse(feature>rangedValues,'virginica','versicolor')
# is the feature is greater than cuttoff, verginica, not versicolor
mean(y_hat==train$Species)
# return poportion of correctly predicted
})
}
predictions <- apply(train[,-5],2,foo)
#applying a function to margins of an array or matrix(
# matrix,
#1 indicates rows 2 indicates colume 1&2 indicates rows and columes,
# the function to be applyed )
head(predictions)
# returning a portion for each cutoff over all specified colum data, so outputs has leght(rangedValues) in each each colum eg.($Sepal.Length)
sapply(predictions,max)
# looking for a max on preditons
# using samrt cutoff value from training data to calvulate overall accuracy on test set
head(train)
predictions <- foo(train[,3])
# indexing traning data on 3rd colume
rangedValues <- seq(min(train[,3]) ,max(train[,3]),by=0.1)
# generating a seq of vacalue
cutoffs <-rangedValues[which(predictions==max(predictions))]
# lookiing for the best predition on test set
y_hat <- ifelse(test[,3]>cutoffs[1],'virginica','versicolor')
mean(y_hat==test$Species)
#explortory data analysis
plot(iris,pch=21,bg=iris$Species)
#Petal.Length and Petal.Width in combination could potentially be more information than either feature alone
predictions <- foo(train[,3] )
# indexing traning data on 3rd colume
rangedValues <- seq(min(train[,3] ) ,max(train[,3] ),by=0.1)
# generating a seq of vacalue
cutoffs <-rangedValues[which(predictions==max(predictions))]
# lookiing for the best predition on test set
predictions <- foo(train[,4] )
# indexing traning data on 3rd colume
rangedValues <- seq(min(train[,4] ) ,max(train[,4] ),by=0.1)
# generating a seq of vacalue
cutoffss <-rangedValues[which(predictions==max(predictions))]
# lookiing for the best predition on test set
cutoffss
y_hat <- ifelse(test[,3]>cutoffs[1]|test[,4]>cutoffss[1],'virginica','versicolor')
mean(y_hat==test$Species)
# up to here
# solution on hw
library(caret)
data(iris)
iris <- iris[-which(iris$Species=='setosa'),]
y <- iris$Species
plot(iris,pch=21,bg=iris$Species)
# set.seed(2) # if using R 3.5 or earlier
set.seed(2, sample.kind="Rounding") # if using R 3.6 or later
test_index <- createDataPartition(y,times=1,p=0.5,list=FALSE)
test <- iris[test_index,]
train <- iris[-test_index,]
petalLengthRange <- seq(range(train$Petal.Length)[1],range(train$Petal.Length)[2],by=0.1)
petalWidthRange <- seq(range(train$Petal.Width)[1],range(train$Petal.Width)[2],by=0.1)
length_predictions <- sapply(petalLengthRange,function(i){
y_hat <- ifelse(train$Petal.Length>i,'virginica','versicolor')
mean(y_hat==train$Species)
})
length_cutoff <- petalLengthRange[which.max(length_predictions)] # 4.7
width_predictions <- sapply(petalWidthRange,function(i){
y_hat <- ifelse(train$Petal.Width>i,'virginica','versicolor')
mean(y_hat==train$Species)
})
width_cutoff <- petalWidthRange[which.max(width_predictions)] # 1.5
y_hat <- ifelse(test$Petal.Length>length_cutoff | test$Petal.Width>width_cutoff,'virginica','versicolor')
mean(y_hat==test$Species)
| /multiPredictorsTwoCatergory.R | no_license | BaoLei1/Machine_Learning | R | false | false | 4,307 | r | library(caret)
library(tidyverse)
library(dslabs)
data(iris)
iris <- iris[-which(iris$Species=='setosa'),]
# give true indices,pural for index,
# removing setosa
y <- iris$Species
# set.seed(2) # if using R 3.5 or earlier
set.seed(2, sample.kind="Rounding") # if using R 3.6 or later
test_index <- createDataPartition(y,times=1,p=0.5,list=FALSE)
#sepersting data
# line of code
test <- iris[test_index,]
train <- iris[-test_index,]
head(train)
#*
# find the best preditor amoumt many predictor to predict two cat. outcomes
foo <- function(feature){
# defind new funciton(name){expression last expression is returned}
rangedValues <- seq(min(feature) ,max(feature) ,by=0.1)
#making a list of values according to data
sapply(rangedValues,function(rangedValues){
# apply a function over a list or vector(a vector,a function to be applie on each element of a vector)
y_hat <- ifelse(feature>rangedValues,'virginica','versicolor')
# is the feature is greater than cuttoff, verginica, not versicolor
mean(y_hat==train$Species)
# return poportion of correctly predicted
})
}
predictions <- apply(train[,-5],2,foo)
#applying a function to margins of an array or matrix(
# matrix,
#1 indicates rows 2 indicates colume 1&2 indicates rows and columes,
# the function to be applyed )
head(predictions)
# returning a portion for each cutoff over all specified colum data, so outputs has leght(rangedValues) in each each colum eg.($Sepal.Length)
sapply(predictions,max)
# looking for a max on preditons
# using samrt cutoff value from training data to calvulate overall accuracy on test set
head(train)
predictions <- foo(train[,3])
# indexing traning data on 3rd colume
rangedValues <- seq(min(train[,3]) ,max(train[,3]),by=0.1)
# generating a seq of vacalue
cutoffs <-rangedValues[which(predictions==max(predictions))]
# lookiing for the best predition on test set
y_hat <- ifelse(test[,3]>cutoffs[1],'virginica','versicolor')
mean(y_hat==test$Species)
#explortory data analysis
plot(iris,pch=21,bg=iris$Species)
#Petal.Length and Petal.Width in combination could potentially be more information than either feature alone
predictions <- foo(train[,3] )
# indexing traning data on 3rd colume
rangedValues <- seq(min(train[,3] ) ,max(train[,3] ),by=0.1)
# generating a seq of vacalue
cutoffs <-rangedValues[which(predictions==max(predictions))]
# lookiing for the best predition on test set
predictions <- foo(train[,4] )
# indexing traning data on 3rd colume
rangedValues <- seq(min(train[,4] ) ,max(train[,4] ),by=0.1)
# generating a seq of vacalue
cutoffss <-rangedValues[which(predictions==max(predictions))]
# lookiing for the best predition on test set
cutoffss
y_hat <- ifelse(test[,3]>cutoffs[1]|test[,4]>cutoffss[1],'virginica','versicolor')
mean(y_hat==test$Species)
# up to here
# solution on hw
library(caret)
data(iris)
iris <- iris[-which(iris$Species=='setosa'),]
y <- iris$Species
plot(iris,pch=21,bg=iris$Species)
# set.seed(2) # if using R 3.5 or earlier
set.seed(2, sample.kind="Rounding") # if using R 3.6 or later
test_index <- createDataPartition(y,times=1,p=0.5,list=FALSE)
test <- iris[test_index,]
train <- iris[-test_index,]
petalLengthRange <- seq(range(train$Petal.Length)[1],range(train$Petal.Length)[2],by=0.1)
petalWidthRange <- seq(range(train$Petal.Width)[1],range(train$Petal.Width)[2],by=0.1)
length_predictions <- sapply(petalLengthRange,function(i){
y_hat <- ifelse(train$Petal.Length>i,'virginica','versicolor')
mean(y_hat==train$Species)
})
length_cutoff <- petalLengthRange[which.max(length_predictions)] # 4.7
width_predictions <- sapply(petalWidthRange,function(i){
y_hat <- ifelse(train$Petal.Width>i,'virginica','versicolor')
mean(y_hat==train$Species)
})
width_cutoff <- petalWidthRange[which.max(width_predictions)] # 1.5
y_hat <- ifelse(test$Petal.Length>length_cutoff | test$Petal.Width>width_cutoff,'virginica','versicolor')
mean(y_hat==test$Species)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{suffolk}
\alias{suffolk}
\title{Weather in Suffolk, VA}
\format{A data frame with columns:
\describe{
\item{Date}{Date of weather observation}
\item{TemperatureF}{Daily mean temperature in Fahrenheit}
\item{Relative.Humidity}{Daily relative humidity in \%}
}}
\source{
\href{http://www.wunderground.com/}{Weather Underground}
}
\usage{
suffolk
}
\description{
Daily values of mean temperature (Fahrenheit) and mean relative humidity
(\%) for the week of July 12, 1998, in Suffolk, VA.
}
\keyword{datasets}
| /man/suffolk.Rd | no_license | cran/weathermetrics | R | false | true | 628 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{suffolk}
\alias{suffolk}
\title{Weather in Suffolk, VA}
\format{A data frame with columns:
\describe{
\item{Date}{Date of weather observation}
\item{TemperatureF}{Daily mean temperature in Fahrenheit}
\item{Relative.Humidity}{Daily relative humidity in \%}
}}
\source{
\href{http://www.wunderground.com/}{Weather Underground}
}
\usage{
suffolk
}
\description{
Daily values of mean temperature (Fahrenheit) and mean relative humidity
(\%) for the week of July 12, 1998, in Suffolk, VA.
}
\keyword{datasets}
|
#' Determine significance of wavelet coherence
#'
#' @author Tarik C. Gouhier (tarik.gouhier@@gmail.com)
#'
#' Code based on WTC MATLAB package written by Aslak Grinsted.
#'
#' @param nrands Number of Monte Carlo randomizations.
#' @param lag1 Vector containing the AR(1) coefficient of each time series.
#' @param dt Length of a time step.
#' @param ntimesteps Number of time steps in time series.
#' @param pad Pad the values will with zeros to increase the speed of the
#' transform.
#' @param dj Spacing between successive scales.
#' @param s0 Smallest scale of the wavelet.
#' @param J1 Number of scales - 1.
#' @param max.scale Maximum scale.
#' @param mother Type of mother wavelet function to use. Can be set to
#' \code{morlet}, \code{dog}, or \code{paul}.
#' Significance testing is only available for \code{morlet} wavelet.
#' @param sig.level Significance level to compute.
#' @param quiet Do not display progress bar.
#'
#' @return Returns significance matrix containing the \code{sig.level}
#' percentile of wavelet coherence at each time step and scale.
#'
#' @references
#' Cazelles, B., M. Chavez, D. Berteaux, F. Menard, J. O. Vik, S. Jenouvrier,
#' and N. C. Stenseth. 2008. Wavelet analysis of ecological time series.
#' \emph{Oecologia} 156:287-304.
#'
#' Grinsted, A., J. C. Moore, and S. Jevrejeva. 2004. Application of the cross
#' wavelet transform and wavelet coherence to geophysical time series.
#' \emph{Nonlinear Processes in Geophysics} 11:561-566.
#'
#' Torrence, C., and G. P. Compo. 1998. A Practical Guide to Wavelet Analysis.
#' \emph{Bulletin of the American Meteorological Society} 79:61-78.
#'
#' Torrence, C., and P. J. Webster. 1998. The annual cycle of persistence in the
#' El Nino/Southern Oscillation. \emph{Quarterly Journal of the Royal
#' Meteorological Society} 124:1985-2004.
#'
#' @note The Monte Carlo randomizations can be extremely slow for large
#' datasets. For instance, 1000 randomizations of a dataset consisting of 1000
#' samples will take ~30 minutes on a 2.66 GHz dual-core Xeon processor.
#'
#' @examples
#' # Not run: wtcsig <- wtc.sig(nrands, lag1 = c(d1.ar1, d2.ar1), dt,
#' # pad, dj, J1, s0, mother = "morlet")
#'
#' @export
wtc.sig <- function(nrands = 300, lag1, dt, ntimesteps, pad = TRUE,
dj = 1 / 12, s0, J1, max.scale = NULL,
mother = "morlet", sig.level = 0.95, quiet = FALSE) {
if (nrands < 1) {
return(NA)
}
mr1 <- get_minroots(lag1[1])
mr2 <- get_minroots(lag1[2])
ntseq <- seq_len(ntimesteps)
d1 <- cbind(ntseq, ar1_ma0_sim(mr1, lag1[1], ntimesteps))
wt1 <- wt(d = d1, pad = pad, dj = dj, dt = dt, s0 = s0, J1 = J1,
max.scale = max.scale, mother = mother, do.sig = FALSE)
s.inv <- 1 / t(wt1$scale)
s.inv <- matrix(rep(s.inv, ntimesteps), nrow = NROW(wt1$wave))
rand.rsq <- array(dim = c(NROW(wt1$wave), NCOL(wt1$wave), nrands), NA)
if (!quiet) {
prog.bar <- txtProgressBar(min = 0, max = nrands, style = 3)
}
for (r in seq_len(nrands)) {
# Generate time series
d1 <- cbind(ntseq, ar1_ma0_sim(mr1, lag1[1], ntimesteps))
d2 <- cbind(ntseq, ar1_ma0_sim(mr2, lag1[2], ntimesteps))
# Wavelet transforms
wt1 <- wt(d = d1, pad = pad, dj = dj, dt = dt, s0 = s0, J1 = J1,
max.scale = max.scale, mother = mother, do.sig = FALSE)
wt2 <- wt(d = d2, pad = pad, dj = dj, dt = dt, s0 = s0, J1 = J1,
max.scale = max.scale, mother = mother, do.sig = FALSE)
# Smoothed cross wavelet transform
smooth.CW <- smooth.wavelet(s.inv * wt1$wave * Conj(wt2$wave),
dt, dj, wt1$scale)
sw1 <- smooth.wavelet(s.inv * (abs(wt1$wave) ^ 2), dt, dj, wt1$scale)
sw2 <- smooth.wavelet(s.inv * (abs(wt2$wave) ^ 2), dt, dj, wt2$scale)
rand.rsq[, , r] <- abs(smooth.CW) ^ 2 / (sw1 * sw2)
if (!quiet) {
setTxtProgressBar(prog.bar, r)
}
}
if (!quiet) {
close(prog.bar)
}
# The original slow implementation was using "apply" and "quantile" functions
# apply(rand.rsq, MARGIN = c(1,2), quantile, sig.level, na.rm = TRUE)
# This has been replaced with a C++ implementation taken from WGCNA package
result <- matrix(nrow = nrow(rand.rsq), ncol = ncol(rand.rsq))
for (i in seq_len(ncol(rand.rsq))) {
# TODO: can be facter if we remove as.matrix()
result[,i] <- rcpp_row_quantile(as.matrix(rand.rsq[,i,]), sig.level)
}
return(result)
}
#' Helper function (not exported)
#' @param ar The 'ar' part of AR(1)
#' @return double
get_minroots <- function(ar) {
min(Mod(polyroot(c(1, -ar))))
}
#' Slightly faster \code{\link{arima.sim}} implementation which assumes AR(1)
#' and \code{ma=0}.
#'
#' @param minroots Output from \code{\link{get_minroots}} function.
#' @param ar The 'ar' part of AR(1)
#' @param n Length of output series, before un-differencing. A strictly positive
#' integer.
#' @seealso \code{\link{arima.sim}}
ar1_ma0_sim <- function(minroots, ar, n) {
if (minroots <= 1) {
stop("'ar' part of model is not stationary")
}
nstart <- 2 + ceiling(6 / log(minroots))
x <- ts(data = rnorm(n + nstart), start = 1 - nstart)
x <- filter(x, ar, method = "recursive")
x[-seq_len(nstart)]
# maybe also this: as.ts(x)
}
| /R/wtc.sig.R | no_license | tgouhier/biwavelet | R | false | false | 5,268 | r | #' Determine significance of wavelet coherence
#'
#' @author Tarik C. Gouhier (tarik.gouhier@@gmail.com)
#'
#' Code based on WTC MATLAB package written by Aslak Grinsted.
#'
#' @param nrands Number of Monte Carlo randomizations.
#' @param lag1 Vector containing the AR(1) coefficient of each time series.
#' @param dt Length of a time step.
#' @param ntimesteps Number of time steps in time series.
#' @param pad Pad the values will with zeros to increase the speed of the
#' transform.
#' @param dj Spacing between successive scales.
#' @param s0 Smallest scale of the wavelet.
#' @param J1 Number of scales - 1.
#' @param max.scale Maximum scale.
#' @param mother Type of mother wavelet function to use. Can be set to
#' \code{morlet}, \code{dog}, or \code{paul}.
#' Significance testing is only available for \code{morlet} wavelet.
#' @param sig.level Significance level to compute.
#' @param quiet Do not display progress bar.
#'
#' @return Returns significance matrix containing the \code{sig.level}
#' percentile of wavelet coherence at each time step and scale.
#'
#' @references
#' Cazelles, B., M. Chavez, D. Berteaux, F. Menard, J. O. Vik, S. Jenouvrier,
#' and N. C. Stenseth. 2008. Wavelet analysis of ecological time series.
#' \emph{Oecologia} 156:287-304.
#'
#' Grinsted, A., J. C. Moore, and S. Jevrejeva. 2004. Application of the cross
#' wavelet transform and wavelet coherence to geophysical time series.
#' \emph{Nonlinear Processes in Geophysics} 11:561-566.
#'
#' Torrence, C., and G. P. Compo. 1998. A Practical Guide to Wavelet Analysis.
#' \emph{Bulletin of the American Meteorological Society} 79:61-78.
#'
#' Torrence, C., and P. J. Webster. 1998. The annual cycle of persistence in the
#' El Nino/Southern Oscillation. \emph{Quarterly Journal of the Royal
#' Meteorological Society} 124:1985-2004.
#'
#' @note The Monte Carlo randomizations can be extremely slow for large
#' datasets. For instance, 1000 randomizations of a dataset consisting of 1000
#' samples will take ~30 minutes on a 2.66 GHz dual-core Xeon processor.
#'
#' @examples
#' # Not run: wtcsig <- wtc.sig(nrands, lag1 = c(d1.ar1, d2.ar1), dt,
#' # pad, dj, J1, s0, mother = "morlet")
#'
#' @export
wtc.sig <- function(nrands = 300, lag1, dt, ntimesteps, pad = TRUE,
dj = 1 / 12, s0, J1, max.scale = NULL,
mother = "morlet", sig.level = 0.95, quiet = FALSE) {
if (nrands < 1) {
return(NA)
}
mr1 <- get_minroots(lag1[1])
mr2 <- get_minroots(lag1[2])
ntseq <- seq_len(ntimesteps)
d1 <- cbind(ntseq, ar1_ma0_sim(mr1, lag1[1], ntimesteps))
wt1 <- wt(d = d1, pad = pad, dj = dj, dt = dt, s0 = s0, J1 = J1,
max.scale = max.scale, mother = mother, do.sig = FALSE)
s.inv <- 1 / t(wt1$scale)
s.inv <- matrix(rep(s.inv, ntimesteps), nrow = NROW(wt1$wave))
rand.rsq <- array(dim = c(NROW(wt1$wave), NCOL(wt1$wave), nrands), NA)
if (!quiet) {
prog.bar <- txtProgressBar(min = 0, max = nrands, style = 3)
}
for (r in seq_len(nrands)) {
# Generate time series
d1 <- cbind(ntseq, ar1_ma0_sim(mr1, lag1[1], ntimesteps))
d2 <- cbind(ntseq, ar1_ma0_sim(mr2, lag1[2], ntimesteps))
# Wavelet transforms
wt1 <- wt(d = d1, pad = pad, dj = dj, dt = dt, s0 = s0, J1 = J1,
max.scale = max.scale, mother = mother, do.sig = FALSE)
wt2 <- wt(d = d2, pad = pad, dj = dj, dt = dt, s0 = s0, J1 = J1,
max.scale = max.scale, mother = mother, do.sig = FALSE)
# Smoothed cross wavelet transform
smooth.CW <- smooth.wavelet(s.inv * wt1$wave * Conj(wt2$wave),
dt, dj, wt1$scale)
sw1 <- smooth.wavelet(s.inv * (abs(wt1$wave) ^ 2), dt, dj, wt1$scale)
sw2 <- smooth.wavelet(s.inv * (abs(wt2$wave) ^ 2), dt, dj, wt2$scale)
rand.rsq[, , r] <- abs(smooth.CW) ^ 2 / (sw1 * sw2)
if (!quiet) {
setTxtProgressBar(prog.bar, r)
}
}
if (!quiet) {
close(prog.bar)
}
# The original slow implementation was using "apply" and "quantile" functions
# apply(rand.rsq, MARGIN = c(1,2), quantile, sig.level, na.rm = TRUE)
# This has been replaced with a C++ implementation taken from WGCNA package
result <- matrix(nrow = nrow(rand.rsq), ncol = ncol(rand.rsq))
for (i in seq_len(ncol(rand.rsq))) {
# TODO: can be facter if we remove as.matrix()
result[,i] <- rcpp_row_quantile(as.matrix(rand.rsq[,i,]), sig.level)
}
return(result)
}
#' Helper function (not exported)
#' @param ar The 'ar' part of AR(1)
#' @return double
get_minroots <- function(ar) {
min(Mod(polyroot(c(1, -ar))))
}
#' Slightly faster \code{\link{arima.sim}} implementation which assumes AR(1)
#' and \code{ma=0}.
#'
#' @param minroots Output from \code{\link{get_minroots}} function.
#' @param ar The 'ar' part of AR(1)
#' @param n Length of output series, before un-differencing. A strictly positive
#' integer.
#' @seealso \code{\link{arima.sim}}
ar1_ma0_sim <- function(minroots, ar, n) {
if (minroots <= 1) {
stop("'ar' part of model is not stationary")
}
nstart <- 2 + ceiling(6 / log(minroots))
x <- ts(data = rnorm(n + nstart), start = 1 - nstart)
x <- filter(x, ar, method = "recursive")
x[-seq_len(nstart)]
# maybe also this: as.ts(x)
}
|
#' Calculate Median Temperature
#'
#' This function calculates the median module temperature throughout the data
#' to be used in corrections.
#'
#' @param df Dataframe containing timeseries irradiance (column name must be poa)
#' and module temperature (column name must be modt) in unit of Celsius.
#'
#' @return Returns an integer value of median reported module temperature of the data.
#'
#' @examples
#' T_corr <- median_temp(df_wbw)
#'
#' @importFrom rlang .data
#' @export
median_temp <- function(df) {
corr_T <- df %>%
dplyr::filter(.data$poa > 995 & .data$poa < 1005) %>%
summarise(stats::median(.data$modt, na.rm = T)) %>%
as.numeric() %>%
round
return(corr_T)
}
| /R/tpk-median_temp.R | no_license | cran/SunsVoc | R | false | false | 695 | r |
#' Calculate Median Temperature
#'
#' This function calculates the median module temperature throughout the data
#' to be used in corrections.
#'
#' @param df Dataframe containing timeseries irradiance (column name must be poa)
#' and module temperature (column name must be modt) in unit of Celsius.
#'
#' @return Returns an integer value of median reported module temperature of the data.
#'
#' @examples
#' T_corr <- median_temp(df_wbw)
#'
#' @importFrom rlang .data
#' @export
median_temp <- function(df) {
corr_T <- df %>%
dplyr::filter(.data$poa > 995 & .data$poa < 1005) %>%
summarise(stats::median(.data$modt, na.rm = T)) %>%
as.numeric() %>%
round
return(corr_T)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/em_functions.r
\name{postProcess}
\alias{postProcess}
\title{postProcess}
\usage{
postProcess(b, dev = 1e-05)
}
\arguments{
\item{b}{scaled interaction matrix (diagonal/self-interaction is -1)}
\item{dev}{scale of the interaction < dev * 1, the value will be set to 0}
}
\description{
post-process parameters to remove small entries
}
\author{
Chenhao Li, Niranjan Nagarajan
}
| /man/postProcess.Rd | permissive | pythseq/BEEM-static | R | false | true | 456 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/em_functions.r
\name{postProcess}
\alias{postProcess}
\title{postProcess}
\usage{
postProcess(b, dev = 1e-05)
}
\arguments{
\item{b}{scaled interaction matrix (diagonal/self-interaction is -1)}
\item{dev}{scale of the interaction < dev * 1, the value will be set to 0}
}
\description{
post-process parameters to remove small entries
}
\author{
Chenhao Li, Niranjan Nagarajan
}
|
# Code chunk (not a true subfunction) segmenting stream trib/name/type detection
Find.Streams <- function(Data,Feature,col.in,cols.out,n.dup.cols, DELETION = TRUE, VERBOSE = FALSE){
Rows <- rownames(Data)[!is.na(Data[,col.in]) & Data[,col.in]!=""]
# Tributary checks -------
Data[Rows,col.in] <- gsub("[[:punct:]]","",Data[Rows,col.in])
rows <- rows[apply(sapply(unlist(ls.TribKeys),
function(s) grepl(s,Data[rows,col.in])),
MARGIN = 1, any)]
if(length(rows)>0){
if(VERBOSE) print("Checking for tributaries, forks, branches")
tribs <- data.frame(PATTERN = ls.TribKeys, REGEX = TRUE, stringsAsFactors = FALSE)
tribs$STREAM_TRIB <- tribs$PATTERN
tribs <- tribs[order(nchar(tribs$PATTERN),decreasing = TRUE),]
Data[rows,c(col.in,"STREAM_TRIB_1","STREAM_TRIB_2")] <- Feature.Detect(Data[rows,],
tribs,
col.in,
c("STREAM_TRIB_1","STREAM_TRIB_2"),
n.dup.cols = 2,
DELETE = TRUE)
}
# # STREAM NAMES AS CAN BE INFERRED, REMOVE IF NO ROAD WORD
# HasStreamStringBool <- sapply(paste("[[:print:]]{3,}\\<",StreamKeys,"\\>",sep=""), grepl, FailDataFrame[rowsForState,"LOCATION"])
# if (nRowsState==1) HasStreamStringBool <- t(HasStreamStringBool)
# RowsWithStreamMatch <- which(rowSums(HasStreamStringBool) != 0)
# nRowsWithMatch <- length(RowsWithStreamMatch)
# if (nRowsWithMatch > 0){
# MatchedStreamIndex <- lapply(RowsWithStreamMatch, function(i) which(HasStreamStringBool[i,]))
# nRowsWithMatchesRow <- integer(nRowsState)
# nRowsWithMatchesRow[RowsWithStreamMatch] <- sapply(1:nRowsWithMatch, function(i) length(MatchedStreamIndex[[i]]))
# for (i in 1:nRowsWithMatch){
# for (k in 1:nRowsWithMatchesRow[RowsWithStreamMatch[i]]){
# FailDataFrame[rowsForState[RowsWithStreamMatch[i]],LocProcessColsOut] <- GetAndRemoveLocationForStream(FailDataFrame[rowsForState[RowsWithStreamMatch[i]],LocProcessColsIn], StreamKeys[MatchedStreamIndex[[i]][k]], ls.StreamKeys[[StreamKeyIndex[MatchedStreamIndex[[i]][k]]]][1], LocProcessColsOut,ls.JurisdictionKeys, ls.RoadKeys, ls.StreamKeys)
# }
# }
# }
return(Data[,c(col.in,cols.out)])
} | /Scripts/Process Input/Find.Streams.R | no_license | fahmidah/bridge.collapses | R | false | false | 2,661 | r | # Code chunk (not a true subfunction) segmenting stream trib/name/type detection
Find.Streams <- function(Data,Feature,col.in,cols.out,n.dup.cols, DELETION = TRUE, VERBOSE = FALSE){
Rows <- rownames(Data)[!is.na(Data[,col.in]) & Data[,col.in]!=""]
# Tributary checks -------
Data[Rows,col.in] <- gsub("[[:punct:]]","",Data[Rows,col.in])
rows <- rows[apply(sapply(unlist(ls.TribKeys),
function(s) grepl(s,Data[rows,col.in])),
MARGIN = 1, any)]
if(length(rows)>0){
if(VERBOSE) print("Checking for tributaries, forks, branches")
tribs <- data.frame(PATTERN = ls.TribKeys, REGEX = TRUE, stringsAsFactors = FALSE)
tribs$STREAM_TRIB <- tribs$PATTERN
tribs <- tribs[order(nchar(tribs$PATTERN),decreasing = TRUE),]
Data[rows,c(col.in,"STREAM_TRIB_1","STREAM_TRIB_2")] <- Feature.Detect(Data[rows,],
tribs,
col.in,
c("STREAM_TRIB_1","STREAM_TRIB_2"),
n.dup.cols = 2,
DELETE = TRUE)
}
# # STREAM NAMES AS CAN BE INFERRED, REMOVE IF NO ROAD WORD
# HasStreamStringBool <- sapply(paste("[[:print:]]{3,}\\<",StreamKeys,"\\>",sep=""), grepl, FailDataFrame[rowsForState,"LOCATION"])
# if (nRowsState==1) HasStreamStringBool <- t(HasStreamStringBool)
# RowsWithStreamMatch <- which(rowSums(HasStreamStringBool) != 0)
# nRowsWithMatch <- length(RowsWithStreamMatch)
# if (nRowsWithMatch > 0){
# MatchedStreamIndex <- lapply(RowsWithStreamMatch, function(i) which(HasStreamStringBool[i,]))
# nRowsWithMatchesRow <- integer(nRowsState)
# nRowsWithMatchesRow[RowsWithStreamMatch] <- sapply(1:nRowsWithMatch, function(i) length(MatchedStreamIndex[[i]]))
# for (i in 1:nRowsWithMatch){
# for (k in 1:nRowsWithMatchesRow[RowsWithStreamMatch[i]]){
# FailDataFrame[rowsForState[RowsWithStreamMatch[i]],LocProcessColsOut] <- GetAndRemoveLocationForStream(FailDataFrame[rowsForState[RowsWithStreamMatch[i]],LocProcessColsIn], StreamKeys[MatchedStreamIndex[[i]][k]], ls.StreamKeys[[StreamKeyIndex[MatchedStreamIndex[[i]][k]]]][1], LocProcessColsOut,ls.JurisdictionKeys, ls.RoadKeys, ls.StreamKeys)
# }
# }
# }
return(Data[,c(col.in,cols.out)])
} |
#Shiny tutorial http://shiny.rstudio.com/tutorial/lesson6/
#Navbar layout http://shiny.rstudio.com/gallery/navbar-example.html
#run once
library (randomForest)
library (caret)
load ("TrainedModel.RDA") #modFit, training
# see *_Internal project for model build
shinyServer(
function(input, output) {
output$caseNo <- renderText({
paste0("Selected test case # " , input$caseNo)
})
output$digit <- renderPlot({
#dev.new(width = 1, height=4)
image(matrix(as.integer(testing[input$caseNo,-1]), nrow=28)[,28:1], col=gray(12:1/12))
#dev.off()
}, height = 200, width=200)
output$textReal <- renderText({
paste0("Real digit is ", testing[input$caseNo,1])
})
output$textPredicted <- renderText({
predicted <- predict (modFit, newdata = testing[input$caseNo,])
#span("groups of words", style = "color:blue"),
res <- ifelse(predicted == testing[input$caseNo,1], "RIGHT.", "WRONG!")
paste0("Predicted digit is " , predicted, ", the Model is ", res)
})
}
) | /server.R | no_license | Ash-Datalytica/R_09_DDP_CourseProject_ShinyApp | R | false | false | 1,127 | r | #Shiny tutorial http://shiny.rstudio.com/tutorial/lesson6/
#Navbar layout http://shiny.rstudio.com/gallery/navbar-example.html
#run once
library (randomForest)
library (caret)
load ("TrainedModel.RDA") #modFit, training
# see *_Internal project for model build
shinyServer(
function(input, output) {
output$caseNo <- renderText({
paste0("Selected test case # " , input$caseNo)
})
output$digit <- renderPlot({
#dev.new(width = 1, height=4)
image(matrix(as.integer(testing[input$caseNo,-1]), nrow=28)[,28:1], col=gray(12:1/12))
#dev.off()
}, height = 200, width=200)
output$textReal <- renderText({
paste0("Real digit is ", testing[input$caseNo,1])
})
output$textPredicted <- renderText({
predicted <- predict (modFit, newdata = testing[input$caseNo,])
#span("groups of words", style = "color:blue"),
res <- ifelse(predicted == testing[input$caseNo,1], "RIGHT.", "WRONG!")
paste0("Predicted digit is " , predicted, ", the Model is ", res)
})
}
) |
library(dplyr)
library(ggplot2)
library(GGally)
df <- read.csv('./reviews_cleaned.csv', stringsAsFactors=F)
colnames(df)
# [1] "Company" "Username" "City" "State" "Date" "Rating"
# [7] "Review" "Distance" "Cost" "Cost.per.Mile" "Sqft.Moved"
dim(df) # [1] 5197 11
df <- df %>% select(., Company, Rating, Cost.per.Mile, Sqft.Moved, Distance, Cost)
df <- df[complete.cases(df), ]
dim(df) # [1] 3904 6
df3 <- df %>% filter(., Company != 'Enterprise' & Company != 'Ryder') %>%
mutate(., distance_type = ifelse(Distance <= 50, 'local', ifelse(Distance > 250, 'long', 'medium')))
dim(df3) # [1] 3883 7
head(df3, 4)
# Company Rating Cost.per.Mile Sqft.Moved Distance Cost distance_type
# 1 Budget 3 1.36 2000 110 150 medium
# 2 Budget 5 2.00 2000 300 600 long
# 3 Budget 4 2.67 1300 450 1200 long
# 4 Budget 1 0.50 2500 600 300 long
df3_summary <- df3 %>% group_by(., Company, distance_type) %>%
summarise(., count = n(), Avg.Rating = mean(Rating), Avg.Sqft = mean(Sqft.Moved),
Avg.Cost = mean(Cost), Avg.Distance = mean(Distance), Avg.CpM = mean(Cost.per.Mile))
df3_summary
# # A tibble: 9 x 8
# Groups: Company [?]
# Company distance_type count Avg.Rating Avg.Sqft Avg.Cost Avg.Distance Avg.CpM
# <chr> <chr> <int> <dbl> <dbl> <dbl> <dbl> <dbl>
# 1 Budget local 193 3.90 1392. 156. 18.2 21.1
# 2 Budget long 422 3.74 1385. 953. 945. 1.13
# 3 Budget medium 122 3.79 1259. 291. 163. 1.98
# 4 Penske local 182 4.5 1435. 172. 18.0 19.4
# 5 Penske long 797 4.43 1603. 1217. 1064. 1.30
# 6 Penske medium 122 4.58 1413. 374. 149. 2.78
# 7 U-Haul local 955 4.35 1415. 135. 17.4 16.6
# 8 U-Haul long 697 4.16 1512. 1065. 816. 1.39
# 9 U-Haul medium 393 4.17 1382. 291. 140. 2.18
uhaul_loc <- df3 %>% filter(., Company == 'U-Haul' & distance_type == 'local')
uhaul_med <- df3 %>% filter(., Company == 'U-Haul' & distance_type == 'medium')
uhaul_lon <- df3 %>% filter(., Company == 'U-Haul' & distance_type == 'long')
penske_loc <- df3 %>% filter(., Company == 'Penske' & distance_type == 'local')
penske_med <- df3 %>% filter(., Company == 'Penske' & distance_type == 'medium')
penske_lon <- df3 %>% filter(., Company == 'Penske' & distance_type == 'long')
budget_loc <- df3 %>% filter(., Company == 'Budget' & distance_type == 'local')
budget_med <- df3 %>% filter(., Company == 'Budget' & distance_type == 'medium')
budget_lon <- df3 %>% filter(., Company == 'Budget' & distance_type == 'long')
u_loc_b1 = sum((uhaul_loc$Cost - mean(uhaul_loc$Cost)) * (uhaul_loc$Distance - mean(uhaul_loc$Distance))) /
sum((uhaul_loc$Distance - mean(uhaul_loc$Distance))^2)
u_loc_b0 = mean(uhaul_loc$Cost) - u_loc_b1*mean(uhaul_loc$Distance)
u_med_b1 = sum((uhaul_med$Cost - mean(uhaul_med$Cost)) * (uhaul_med$Distance - mean(uhaul_med$Distance))) /
sum((uhaul_med$Distance - mean(uhaul_med$Distance))^2)
u_med_b0 = mean(uhaul_med$Cost) - u_med_b1*mean(uhaul_med$Distance)
u_lon_b1 = sum((uhaul_lon$Cost - mean(uhaul_lon$Cost)) * (uhaul_lon$Distance - mean(uhaul_lon$Distance))) /
sum((uhaul_lon$Distance - mean(uhaul_lon$Distance))^2)
u_lon_b0 = mean(uhaul_lon$Cost) - u_lon_b1*mean(uhaul_lon$Distance)
p_loc_b1 = sum((penske_loc$Cost - mean(penske_loc$Cost)) * (penske_loc$Distance - mean(penske_loc$Distance))) /
sum((penske_loc$Distance - mean(penske_loc$Distance))^2)
p_loc_b0 = mean(penske_loc$Cost) - p_loc_b1*mean(penske_loc$Distance)
p_med_b1 = sum((penske_med$Cost - mean(penske_med$Cost)) * (penske_med$Distance - mean(penske_med$Distance))) /
sum((penske_med$Distance - mean(penske_med$Distance))^2)
p_med_b0 = mean(penske_med$Cost) - p_med_b1*mean(penske_med$Distance)
p_lon_b1 = sum((penske_lon$Cost - mean(penske_lon$Cost)) * (penske_lon$Distance - mean(penske_lon$Distance))) /
sum((penske_lon$Distance - mean(penske_lon$Distance))^2)
p_lon_b0 = mean(penske_lon$Cost) - p_lon_b1*mean(penske_lon$Distance)
b_loc_b1 = sum((budget_loc$Cost - mean(budget_loc$Cost)) * (budget_loc$Distance - mean(budget_loc$Distance))) /
sum((budget_loc$Distance - mean(budget_loc$Distance))^2)
b_loc_b0 = mean(budget_loc$Cost) - b_loc_b1*mean(budget_loc$Distance)
b_med_b1 = sum((budget_med$Cost - mean(budget_med$Cost)) * (budget_med$Distance - mean(budget_med$Distance))) /
sum((budget_med$Distance - mean(budget_med$Distance))^2)
b_med_b0 = mean(budget_med$Cost) - b_med_b1*mean(budget_med$Distance)
b_lon_b1 = sum((budget_lon$Cost - mean(budget_lon$Cost)) * (budget_lon$Distance - mean(budget_lon$Distance))) /
sum((budget_lon$Distance - mean(budget_lon$Distance))^2)
b_lon_b0 = mean(budget_lon$Cost) - b_lon_b1*mean(budget_lon$Distance)
u_loc_b1 # [1] 3.275865
p_loc_b1 # [1] 1.519525
b_loc_b1 # [1] 1.388352
u_med_b1 # [1] 1.568722
p_med_b1 # [1] 1.610588
b_med_b1 # [1] 0.6574091
u_lon_b1 # [1] 0.9762844
p_lon_b1 # [1] 0.6243064
b_lon_b1 # [1] 0.5773355
u_loc_b0 # [1] 77.91029
p_loc_b0 # [1] 144.2997
b_loc_b0 # [1] 130.5815
u_med_b0 # [1] 71.09208
p_med_b0 # [1] 133.7092
b_med_b0 # [1] 183.9075
u_lon_b0 # [1] 268.1307
p_lon_b0 # [1] 553.1557
b_lon_b0 # [1] 407.3246
model_u_loc = lm(Cost ~ Distance, data = uhaul_loc)
confint(model_u_loc)
# 2.5 % 97.5 %
# (Intercept) 43.275516 112.545062
# Distance 1.723214 4.828517
model_u_med = lm(Cost ~ Distance, data = uhaul_med)
confint(model_u_med)
# 2.5 % 97.5 %
# (Intercept) 6.006113 136.178045
# Distance 1.139608 1.997836
model_u_lon = lm(Cost ~ Distance, data = uhaul_lon)
confint(model_u_lon)
# 2.5 % 97.5 %
# (Intercept) 181.4377730 354.823577
# Distance 0.8876355 1.064933
model_p_loc = lm(Cost ~ Distance, data = penske_loc)
confint(model_p_loc)
# 2.5 % 97.5 %
# (Intercept) 95.0006540 193.598842
# Distance -0.6637927 3.702844
model_p_med = lm(Cost ~ Distance, data = penske_med)
confint(model_p_med)
# 2.5 % 97.5 %
# (Intercept) -11.7272126 279.145593
# Distance 0.7072642 2.513913
model_p_lon = lm(Cost ~ Distance, data = penske_lon)
confint(model_p_lon)
# 2.5 % 97.5 %
# (Intercept) 452.5669119 653.7445414
# Distance 0.5432162 0.7053967
model_b_loc = lm(Cost ~ Distance, data = budget_loc)
confint(model_b_loc)
# 2.5 % 97.5 %
# (Intercept) 87.0516697 174.111292
# Distance -0.5041655 3.280869
model_b_med = lm(Cost ~ Distance, data = budget_med)
confint(model_b_med)
# 2.5 % 97.5 %
# (Intercept) 93.5045171 274.31051
# Distance 0.1354583 1.17936
model_b_lon = lm(Cost ~ Distance, data = budget_lon)
confint(model_b_lon)
# 2.5 % 97.5 %
# (Intercept) 285.5021014 529.1470332
# Distance 0.4669667 0.6877043
u_loc_b1 # [1] 3.275865 1.723214 4.828517 exp
p_loc_b1 # [1] 1.519525 -0.6637927 3.702844
b_loc_b1 # [1] 1.388352 -0.5041655 3.280869
#---
u_loc_b0 # [1] 77.91029 43.275516 112.545062 che
p_loc_b0 # [1] 144.2997 95.0006540 193.598842
b_loc_b0 # [1] 130.5815 87.0516697 174.111292
u_med_b1 # [1] 1.568722 1.139608 1.997836
p_med_b1 # [1] 1.610588 0.7072642 2.513913
b_med_b1 # [1] 0.6574091 0.1354583 1.17936 che
#---
u_med_b0 # [1] 71.09208 6.006113 136.178045
p_med_b0 # [1] 133.7092 -11.7272126 279.145593
b_med_b0 # [1] 183.9075 93.5045171 274.31051
u_lon_b1 # [1] 0.9762844 0.8876355 1.064933 exp
p_lon_b1 # [1] 0.6243064 0.5432162 0.7053967
b_lon_b1 # [1] 0.5773355 0.4669667 0.6877043
#---
u_lon_b0 # [1] 268.1307 181.4377730 354.823577 che
p_lon_b0 # [1] 553.1557 452.5669119 653.7445414 exp
b_lon_b0 # [1] 407.3246 285.5021014 529.1470332
| /07_lr_stats.R | no_license | maryxhuang/Project_2_WebScraping | R | false | false | 8,154 | r | library(dplyr)
library(ggplot2)
library(GGally)
df <- read.csv('./reviews_cleaned.csv', stringsAsFactors=F)
colnames(df)
# [1] "Company" "Username" "City" "State" "Date" "Rating"
# [7] "Review" "Distance" "Cost" "Cost.per.Mile" "Sqft.Moved"
dim(df) # [1] 5197 11
df <- df %>% select(., Company, Rating, Cost.per.Mile, Sqft.Moved, Distance, Cost)
df <- df[complete.cases(df), ]
dim(df) # [1] 3904 6
df3 <- df %>% filter(., Company != 'Enterprise' & Company != 'Ryder') %>%
mutate(., distance_type = ifelse(Distance <= 50, 'local', ifelse(Distance > 250, 'long', 'medium')))
dim(df3) # [1] 3883 7
head(df3, 4)
# Company Rating Cost.per.Mile Sqft.Moved Distance Cost distance_type
# 1 Budget 3 1.36 2000 110 150 medium
# 2 Budget 5 2.00 2000 300 600 long
# 3 Budget 4 2.67 1300 450 1200 long
# 4 Budget 1 0.50 2500 600 300 long
df3_summary <- df3 %>% group_by(., Company, distance_type) %>%
summarise(., count = n(), Avg.Rating = mean(Rating), Avg.Sqft = mean(Sqft.Moved),
Avg.Cost = mean(Cost), Avg.Distance = mean(Distance), Avg.CpM = mean(Cost.per.Mile))
df3_summary
# # A tibble: 9 x 8
# Groups: Company [?]
# Company distance_type count Avg.Rating Avg.Sqft Avg.Cost Avg.Distance Avg.CpM
# <chr> <chr> <int> <dbl> <dbl> <dbl> <dbl> <dbl>
# 1 Budget local 193 3.90 1392. 156. 18.2 21.1
# 2 Budget long 422 3.74 1385. 953. 945. 1.13
# 3 Budget medium 122 3.79 1259. 291. 163. 1.98
# 4 Penske local 182 4.5 1435. 172. 18.0 19.4
# 5 Penske long 797 4.43 1603. 1217. 1064. 1.30
# 6 Penske medium 122 4.58 1413. 374. 149. 2.78
# 7 U-Haul local 955 4.35 1415. 135. 17.4 16.6
# 8 U-Haul long 697 4.16 1512. 1065. 816. 1.39
# 9 U-Haul medium 393 4.17 1382. 291. 140. 2.18
uhaul_loc <- df3 %>% filter(., Company == 'U-Haul' & distance_type == 'local')
uhaul_med <- df3 %>% filter(., Company == 'U-Haul' & distance_type == 'medium')
uhaul_lon <- df3 %>% filter(., Company == 'U-Haul' & distance_type == 'long')
penske_loc <- df3 %>% filter(., Company == 'Penske' & distance_type == 'local')
penske_med <- df3 %>% filter(., Company == 'Penske' & distance_type == 'medium')
penske_lon <- df3 %>% filter(., Company == 'Penske' & distance_type == 'long')
budget_loc <- df3 %>% filter(., Company == 'Budget' & distance_type == 'local')
budget_med <- df3 %>% filter(., Company == 'Budget' & distance_type == 'medium')
budget_lon <- df3 %>% filter(., Company == 'Budget' & distance_type == 'long')
u_loc_b1 = sum((uhaul_loc$Cost - mean(uhaul_loc$Cost)) * (uhaul_loc$Distance - mean(uhaul_loc$Distance))) /
sum((uhaul_loc$Distance - mean(uhaul_loc$Distance))^2)
u_loc_b0 = mean(uhaul_loc$Cost) - u_loc_b1*mean(uhaul_loc$Distance)
u_med_b1 = sum((uhaul_med$Cost - mean(uhaul_med$Cost)) * (uhaul_med$Distance - mean(uhaul_med$Distance))) /
sum((uhaul_med$Distance - mean(uhaul_med$Distance))^2)
u_med_b0 = mean(uhaul_med$Cost) - u_med_b1*mean(uhaul_med$Distance)
u_lon_b1 = sum((uhaul_lon$Cost - mean(uhaul_lon$Cost)) * (uhaul_lon$Distance - mean(uhaul_lon$Distance))) /
sum((uhaul_lon$Distance - mean(uhaul_lon$Distance))^2)
u_lon_b0 = mean(uhaul_lon$Cost) - u_lon_b1*mean(uhaul_lon$Distance)
p_loc_b1 = sum((penske_loc$Cost - mean(penske_loc$Cost)) * (penske_loc$Distance - mean(penske_loc$Distance))) /
sum((penske_loc$Distance - mean(penske_loc$Distance))^2)
p_loc_b0 = mean(penske_loc$Cost) - p_loc_b1*mean(penske_loc$Distance)
p_med_b1 = sum((penske_med$Cost - mean(penske_med$Cost)) * (penske_med$Distance - mean(penske_med$Distance))) /
sum((penske_med$Distance - mean(penske_med$Distance))^2)
p_med_b0 = mean(penske_med$Cost) - p_med_b1*mean(penske_med$Distance)
p_lon_b1 = sum((penske_lon$Cost - mean(penske_lon$Cost)) * (penske_lon$Distance - mean(penske_lon$Distance))) /
sum((penske_lon$Distance - mean(penske_lon$Distance))^2)
p_lon_b0 = mean(penske_lon$Cost) - p_lon_b1*mean(penske_lon$Distance)
b_loc_b1 = sum((budget_loc$Cost - mean(budget_loc$Cost)) * (budget_loc$Distance - mean(budget_loc$Distance))) /
sum((budget_loc$Distance - mean(budget_loc$Distance))^2)
b_loc_b0 = mean(budget_loc$Cost) - b_loc_b1*mean(budget_loc$Distance)
b_med_b1 = sum((budget_med$Cost - mean(budget_med$Cost)) * (budget_med$Distance - mean(budget_med$Distance))) /
sum((budget_med$Distance - mean(budget_med$Distance))^2)
b_med_b0 = mean(budget_med$Cost) - b_med_b1*mean(budget_med$Distance)
b_lon_b1 = sum((budget_lon$Cost - mean(budget_lon$Cost)) * (budget_lon$Distance - mean(budget_lon$Distance))) /
sum((budget_lon$Distance - mean(budget_lon$Distance))^2)
b_lon_b0 = mean(budget_lon$Cost) - b_lon_b1*mean(budget_lon$Distance)
u_loc_b1 # [1] 3.275865
p_loc_b1 # [1] 1.519525
b_loc_b1 # [1] 1.388352
u_med_b1 # [1] 1.568722
p_med_b1 # [1] 1.610588
b_med_b1 # [1] 0.6574091
u_lon_b1 # [1] 0.9762844
p_lon_b1 # [1] 0.6243064
b_lon_b1 # [1] 0.5773355
u_loc_b0 # [1] 77.91029
p_loc_b0 # [1] 144.2997
b_loc_b0 # [1] 130.5815
u_med_b0 # [1] 71.09208
p_med_b0 # [1] 133.7092
b_med_b0 # [1] 183.9075
u_lon_b0 # [1] 268.1307
p_lon_b0 # [1] 553.1557
b_lon_b0 # [1] 407.3246
model_u_loc = lm(Cost ~ Distance, data = uhaul_loc)
confint(model_u_loc)
# 2.5 % 97.5 %
# (Intercept) 43.275516 112.545062
# Distance 1.723214 4.828517
model_u_med = lm(Cost ~ Distance, data = uhaul_med)
confint(model_u_med)
# 2.5 % 97.5 %
# (Intercept) 6.006113 136.178045
# Distance 1.139608 1.997836
model_u_lon = lm(Cost ~ Distance, data = uhaul_lon)
confint(model_u_lon)
# 2.5 % 97.5 %
# (Intercept) 181.4377730 354.823577
# Distance 0.8876355 1.064933
model_p_loc = lm(Cost ~ Distance, data = penske_loc)
confint(model_p_loc)
# 2.5 % 97.5 %
# (Intercept) 95.0006540 193.598842
# Distance -0.6637927 3.702844
model_p_med = lm(Cost ~ Distance, data = penske_med)
confint(model_p_med)
# 2.5 % 97.5 %
# (Intercept) -11.7272126 279.145593
# Distance 0.7072642 2.513913
model_p_lon = lm(Cost ~ Distance, data = penske_lon)
confint(model_p_lon)
# 2.5 % 97.5 %
# (Intercept) 452.5669119 653.7445414
# Distance 0.5432162 0.7053967
model_b_loc = lm(Cost ~ Distance, data = budget_loc)
confint(model_b_loc)
# 2.5 % 97.5 %
# (Intercept) 87.0516697 174.111292
# Distance -0.5041655 3.280869
model_b_med = lm(Cost ~ Distance, data = budget_med)
confint(model_b_med)
# 2.5 % 97.5 %
# (Intercept) 93.5045171 274.31051
# Distance 0.1354583 1.17936
model_b_lon = lm(Cost ~ Distance, data = budget_lon)
confint(model_b_lon)
# 2.5 % 97.5 %
# (Intercept) 285.5021014 529.1470332
# Distance 0.4669667 0.6877043
u_loc_b1 # [1] 3.275865 1.723214 4.828517 exp
p_loc_b1 # [1] 1.519525 -0.6637927 3.702844
b_loc_b1 # [1] 1.388352 -0.5041655 3.280869
#---
u_loc_b0 # [1] 77.91029 43.275516 112.545062 che
p_loc_b0 # [1] 144.2997 95.0006540 193.598842
b_loc_b0 # [1] 130.5815 87.0516697 174.111292
u_med_b1 # [1] 1.568722 1.139608 1.997836
p_med_b1 # [1] 1.610588 0.7072642 2.513913
b_med_b1 # [1] 0.6574091 0.1354583 1.17936 che
#---
u_med_b0 # [1] 71.09208 6.006113 136.178045
p_med_b0 # [1] 133.7092 -11.7272126 279.145593
b_med_b0 # [1] 183.9075 93.5045171 274.31051
u_lon_b1 # [1] 0.9762844 0.8876355 1.064933 exp
p_lon_b1 # [1] 0.6243064 0.5432162 0.7053967
b_lon_b1 # [1] 0.5773355 0.4669667 0.6877043
#---
u_lon_b0 # [1] 268.1307 181.4377730 354.823577 che
p_lon_b0 # [1] 553.1557 452.5669119 653.7445414 exp
b_lon_b0 # [1] 407.3246 285.5021014 529.1470332
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/gather.R
\name{gather}
\alias{gather}
\title{Gather columns into key-value pairs.}
\usage{
gather(data, key, value, ..., na.rm = FALSE, convert = FALSE)
}
\arguments{
\item{data}{A data frame.}
\item{key,value}{Names of key and value columns to create in output.}
\item{...}{Specification of columns to gather. Use bare variable names.
Select all variables between x and z with \code{x:z}, exclude y with
\code{-y}. For more options, see the \link[dplyr]{select} documentation.}
\item{na.rm}{If \code{TRUE}, will remove rows from output where the
value column in \code{NA}.}
\item{convert}{If \code{TRUE} will automatically run
\code{\link{type.convert}} on the key column. This is useful if the column
names are actually numeric, integer, or logical.}
}
\description{
Gather takes multiple columns and collapses into key-value pairs,
duplicating all other columns as needed. You use \code{gather()} when
you notice that you have columns that are not variables.
}
\examples{
library(dplyr)
# From http://stackoverflow.com/questions/1181060
stocks <- data.frame(
time = as.Date('2009-01-01') + 0:9,
X = rnorm(10, 0, 1),
Y = rnorm(10, 0, 2),
Z = rnorm(10, 0, 4)
)
gather(stocks, stock, price, -time)
stocks \%>\% gather(stock, price, -time)
# get first observation for each Species in iris data -- base R
mini_iris <- iris[c(1, 51, 101), ]
# gather Sepal.Length, Sepal.Width, Petal.Length, Petal.Width
gather(mini_iris, key = flower_att, value = measurement,
Sepal.Length, Sepal.Width, Petal.Length, Petal.Width)
# same result but less verbose
gather(mini_iris, key = flower_att, value = measurement, -Species)
# repeat iris example using dplyr and the pipe operator
library(dplyr)
mini_iris <-
iris \%>\%
group_by(Species) \%>\%
slice(1)
mini_iris \%>\% gather(key = flower_att, value = measurement, -Species)
}
\seealso{
\code{\link{gather_}} for a version that uses regular evaluation
and is suitable for programming with.
}
| /man/gather.Rd | no_license | juliacrapo/tidyr | R | false | false | 2,043 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/gather.R
\name{gather}
\alias{gather}
\title{Gather columns into key-value pairs.}
\usage{
gather(data, key, value, ..., na.rm = FALSE, convert = FALSE)
}
\arguments{
\item{data}{A data frame.}
\item{key,value}{Names of key and value columns to create in output.}
\item{...}{Specification of columns to gather. Use bare variable names.
Select all variables between x and z with \code{x:z}, exclude y with
\code{-y}. For more options, see the \link[dplyr]{select} documentation.}
\item{na.rm}{If \code{TRUE}, will remove rows from output where the
value column in \code{NA}.}
\item{convert}{If \code{TRUE} will automatically run
\code{\link{type.convert}} on the key column. This is useful if the column
names are actually numeric, integer, or logical.}
}
\description{
Gather takes multiple columns and collapses into key-value pairs,
duplicating all other columns as needed. You use \code{gather()} when
you notice that you have columns that are not variables.
}
\examples{
library(dplyr)
# From http://stackoverflow.com/questions/1181060
stocks <- data.frame(
time = as.Date('2009-01-01') + 0:9,
X = rnorm(10, 0, 1),
Y = rnorm(10, 0, 2),
Z = rnorm(10, 0, 4)
)
gather(stocks, stock, price, -time)
stocks \%>\% gather(stock, price, -time)
# get first observation for each Species in iris data -- base R
mini_iris <- iris[c(1, 51, 101), ]
# gather Sepal.Length, Sepal.Width, Petal.Length, Petal.Width
gather(mini_iris, key = flower_att, value = measurement,
Sepal.Length, Sepal.Width, Petal.Length, Petal.Width)
# same result but less verbose
gather(mini_iris, key = flower_att, value = measurement, -Species)
# repeat iris example using dplyr and the pipe operator
library(dplyr)
mini_iris <-
iris \%>\%
group_by(Species) \%>\%
slice(1)
mini_iris \%>\% gather(key = flower_att, value = measurement, -Species)
}
\seealso{
\code{\link{gather_}} for a version that uses regular evaluation
and is suitable for programming with.
}
|
/itnet/nettime/modif.r | no_license | unix-history/tropix-cmd | R | false | false | 340 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doubly_protected_estimators.R
\name{drdpn}
\alias{drdpn}
\title{Computes a robust doubly protected estimator of a quantile of a random variable Y with the method proposed in Sued, Valdora and Yohai (2019) and the SPPS model for the propensity score.}
\usage{
drdpn(x, y, ps = NULL, type = 2, qq = 0.5)
}
\arguments{
\item{x}{A matrix of covariates without an intercept}
\item{y}{A vector of outcomes (may contain NAs)}
\item{type}{1 or 2; if type=2 the estimator is normalized as explained in Sued, Valdora and Yohai (2019)}
}
\value{
The estimated quantile of Y
}
\description{
Computes a robust doubly protected estimator of a quantile of a random variable Y with the method proposed in Sued, Valdora and Yohai (2019) and the SPPS model for the propensity score.
}
\examples{
vars <- rsamplemissing(n, alpha, beta)
x <- vars$x
y <- vars$y
drdpn(x, y)
}
| /man/drdpn.Rd | no_license | mvaldora/rdp | R | false | true | 943 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doubly_protected_estimators.R
\name{drdpn}
\alias{drdpn}
\title{Computes a robust doubly protected estimator of a quantile of a random variable Y with the method proposed in Sued, Valdora and Yohai (2019) and the SPPS model for the propensity score.}
\usage{
drdpn(x, y, ps = NULL, type = 2, qq = 0.5)
}
\arguments{
\item{x}{A matrix of covariates without an intercept}
\item{y}{A vector of outcomes (may contain NAs)}
\item{type}{1 or 2; if type=2 the estimator is normalized as explained in Sued, Valdora and Yohai (2019)}
}
\value{
The estimated quantile of Y
}
\description{
Computes a robust doubly protected estimator of a quantile of a random variable Y with the method proposed in Sued, Valdora and Yohai (2019) and the SPPS model for the propensity score.
}
\examples{
vars <- rsamplemissing(n, alpha, beta)
x <- vars$x
y <- vars$y
drdpn(x, y)
}
|
#' tidy_phylo_beta_pair
#'
#' @param x matrix species.
#' @param tree dendrogram.
#' @param index.family "jaccard" or "sorensen".
#'
#' @return data_frame
#' @export
#'
#' @examples
tidy_phylo_beta_pair <- function (x, tree, index.family = "sorensen")
{
requireNamespace("tidyverse")
requireNamespace("corrr")
requireNamespace("betapart")
index.family <- match.arg(index.family, c("jaccard", "sorensen"))
pbc <- x
if (!inherits(x, "phylo.betapart")) {
pbc <- betapart::phylo.betapart.core(x, tree)
}
switch(index.family, sorensen = {
phylo.beta.sim <- pbc$min.not.shared/(pbc$min.not.shared +
pbc$shared)
phylo.beta.sne <- ((pbc$max.not.shared - pbc$min.not.shared)/((2 *
pbc$shared) + pbc$sum.not.shared)) * (pbc$shared/(pbc$min.not.shared +
pbc$shared))
phylo.beta.sor <- pbc$sum.not.shared/(2 * pbc$shared +
pbc$sum.not.shared)
phylo.pairwise <- dplyr::bind_cols(
phylo.beta.sim = phylo.beta.sim %>%
as.matrix() %>%
corrr::as_cordf() %>%
corrr::shave() %>%
corrr::stretch() %>% dplyr::filter(r != "NA"),
phylo.beta.sne = phylo.beta.sne %>%
as.matrix() %>%
corrr::as_cordf() %>%
corrr::shave() %>%
corrr::stretch() %>% dplyr::filter(r != "NA"),
phylo.beta.sor = phylo.beta.sor %>%
as.matrix() %>%
corrr::as_cordf() %>%
corrr::shave() %>%
corrr::stretch() %>% dplyr::filter(r != "NA"))%>%
dplyr::select(x , y,
phylo.beta.sim = r,
phylo.beta.sne = r1,
phylo.beta.sor = r2)
}, jaccard = {
phylo.beta.jtu <- (2 * pbc$min.not.shared)/((2 * pbc$min.not.shared) +
pbc$shared)
phylo.beta.jne <- ((pbc$max.not.shared - pbc$min.not.shared)/(pbc$shared +
pbc$sum.not.shared)) * (pbc$shared/((2 * pbc$min.not.shared) +
pbc$shared))
phylo.beta.jac <- pbc$sum.not.shared/(pbc$shared + pbc$sum.not.shared)
phylo.pairwise <- dplyr::bind_cols(
phylo.beta.jtu = phylo.beta.jtu %>%
as.matrix() %>%
corrr::as_cordf() %>%
corrr::shave() %>%
corrr::stretch() %>% dplyr::filter(r != "NA"),
phylo.beta.jne = phylo.beta.jne %>%
as.matrix() %>%
corrr::as_cordf() %>%
corrr::shave() %>%
corrr::stretch() %>% dplyr::filter(r != "NA"),
phylo.beta.jac = phylo.beta.jac %>%
as.matrix() %>%
corrr::as_cordf() %>%
corrr::shave() %>%
corrr::stretch() %>% dplyr::filter(r != "NA")) %>%
dplyr::select(x, y,
phylo.beta.jtu = r,
phylo.beta.jne = r1,
phylo.beta.jac = r2 )
})
return(phylo.pairwise)
}
| /R/tidy_phylo_beta_pair.R | no_license | PaulESantos/betapart.tidy | R | false | false | 3,185 | r | #' tidy_phylo_beta_pair
#'
#' @param x matrix species.
#' @param tree dendrogram.
#' @param index.family "jaccard" or "sorensen".
#'
#' @return data_frame
#' @export
#'
#' @examples
tidy_phylo_beta_pair <- function (x, tree, index.family = "sorensen")
{
requireNamespace("tidyverse")
requireNamespace("corrr")
requireNamespace("betapart")
index.family <- match.arg(index.family, c("jaccard", "sorensen"))
pbc <- x
if (!inherits(x, "phylo.betapart")) {
pbc <- betapart::phylo.betapart.core(x, tree)
}
switch(index.family, sorensen = {
phylo.beta.sim <- pbc$min.not.shared/(pbc$min.not.shared +
pbc$shared)
phylo.beta.sne <- ((pbc$max.not.shared - pbc$min.not.shared)/((2 *
pbc$shared) + pbc$sum.not.shared)) * (pbc$shared/(pbc$min.not.shared +
pbc$shared))
phylo.beta.sor <- pbc$sum.not.shared/(2 * pbc$shared +
pbc$sum.not.shared)
phylo.pairwise <- dplyr::bind_cols(
phylo.beta.sim = phylo.beta.sim %>%
as.matrix() %>%
corrr::as_cordf() %>%
corrr::shave() %>%
corrr::stretch() %>% dplyr::filter(r != "NA"),
phylo.beta.sne = phylo.beta.sne %>%
as.matrix() %>%
corrr::as_cordf() %>%
corrr::shave() %>%
corrr::stretch() %>% dplyr::filter(r != "NA"),
phylo.beta.sor = phylo.beta.sor %>%
as.matrix() %>%
corrr::as_cordf() %>%
corrr::shave() %>%
corrr::stretch() %>% dplyr::filter(r != "NA"))%>%
dplyr::select(x , y,
phylo.beta.sim = r,
phylo.beta.sne = r1,
phylo.beta.sor = r2)
}, jaccard = {
phylo.beta.jtu <- (2 * pbc$min.not.shared)/((2 * pbc$min.not.shared) +
pbc$shared)
phylo.beta.jne <- ((pbc$max.not.shared - pbc$min.not.shared)/(pbc$shared +
pbc$sum.not.shared)) * (pbc$shared/((2 * pbc$min.not.shared) +
pbc$shared))
phylo.beta.jac <- pbc$sum.not.shared/(pbc$shared + pbc$sum.not.shared)
phylo.pairwise <- dplyr::bind_cols(
phylo.beta.jtu = phylo.beta.jtu %>%
as.matrix() %>%
corrr::as_cordf() %>%
corrr::shave() %>%
corrr::stretch() %>% dplyr::filter(r != "NA"),
phylo.beta.jne = phylo.beta.jne %>%
as.matrix() %>%
corrr::as_cordf() %>%
corrr::shave() %>%
corrr::stretch() %>% dplyr::filter(r != "NA"),
phylo.beta.jac = phylo.beta.jac %>%
as.matrix() %>%
corrr::as_cordf() %>%
corrr::shave() %>%
corrr::stretch() %>% dplyr::filter(r != "NA")) %>%
dplyr::select(x, y,
phylo.beta.jtu = r,
phylo.beta.jne = r1,
phylo.beta.jac = r2 )
})
return(phylo.pairwise)
}
|
library(dashCoreComponents)
library(dashHtmlComponents)
library(dash)
utils <- new.env()
source('dash_docs/utils.R', local=utils)
source('dash_docs/styles.R')
source('dash_docs/components.R')
examples <- list(
simpleslider = utils$LoadExampleCode('dash_docs/chapters/dash_core_components/Slider/examples/slider1.R'),
proptable = utils$LoadExampleCode('dash_docs/chapters/dash_core_components/Slider/examples/sliderproptable.R'),
nonlinearex = utils$LoadExampleCode('dash_docs/chapters/dash_core_components/Slider/examples/sliderupdatemode.R')
)
layout <- htmlDiv(list(
htmlH1('Slider Examples and Reference'),
htmlHr(),
htmlH3('Simple Slider Example
'),
dccMarkdown('An example of a basic slider tied to a callback.
'),
examples$simpleslider$source,
examples$simpleslider$layout,
#--------------------------------
htmlH3('Marks and Steps'),
dccMarkdown("If slider `marks` are defined and `step` is set to `NULL` \
then the slider will only be able to select values that \
have been predefined by the `marks`. `marks` is a `list` \
where the keys represent the numerical values and the \
values represent their labels."),
htmlDiv(list(utils$LoadAndDisplayComponent2(
'
library(dashCoreComponents)
dccSlider(
min=0,
max=10,
marks = list(
"0" = "0 °F",
"3" = "3 °F",
"5" = "5 °F",
"7.65" = "7.65 °F",
"10" = "10 °F"
),
value=5
)
'
))),
#--------------------------------
htmlH3('Marks and Steps'),
dccMarkdown("By default, `included=TRUE`, meaning the rail trailing the \
handle will be highlighted. To have the handle act as a \
discrete value set `included=FALSE`. To style `marks`, \
include a style css attribute alongside the list value."),
htmlDiv(list(utils$LoadAndDisplayComponent2(
'
library(dashCoreComponents)
dccSlider(
min=0,
max=100,
value = 65,
marks = list(
"0" = list("label" = "0 °C", "style" = list("color" = "#77b0b1")),
"26" = list("label" = "26 °C"),
"37" = list("label" = "37 °C"),
"100" = list("label" = "100 °C", "style" = list("color" = "#FF4500"))
)
)
'
))),
htmlDiv(list(utils$LoadAndDisplayComponent2(
'
library(dashCoreComponents)
dccSlider(
min=0,
max=100,
marks = list(
"0" = list("label" = "0 °C", "style" = list("color" = "#77b0b1")),
"26" = list("label" = "26 °C"),
"37" = list("label" = "37 °C"),
"100" = list("label" = "100 °C", "style" = list("color" = "#FF4500"))
),
included=FALSE
)
'
))),
htmlDiv(list(utils$LoadAndDisplayComponent2(
'
library(dashCoreComponents)
dccSlider(
min=0,
max=100,
marks = list(
"0" = list("label" = "0 °C", "style" = list("color" = "#77b0b1")),
"26" = list("label" = "26 °C"),
"37" = list("label" = "37 °C"),
"100" = list("label" = "100 °C", "style" = list("color" = "#FF4500"))
),
included=FALSE
)
'
))),
htmlH3('Non-Linear Slider and Updatemode'),
examples$nonlinearex$source,
examples$nonlinearex$layout,
htmlH3('Slider Properties'),
examples$proptable$layout,
htmlHr(),
dccMarkdown("
[Back to the Table of Contents](/)
")
))
| /dash_docs/chapters/dash_core_components/Slider/index.R | permissive | plotly/dash-docs | R | false | false | 3,314 | r | library(dashCoreComponents)
library(dashHtmlComponents)
library(dash)
utils <- new.env()
source('dash_docs/utils.R', local=utils)
source('dash_docs/styles.R')
source('dash_docs/components.R')
examples <- list(
simpleslider = utils$LoadExampleCode('dash_docs/chapters/dash_core_components/Slider/examples/slider1.R'),
proptable = utils$LoadExampleCode('dash_docs/chapters/dash_core_components/Slider/examples/sliderproptable.R'),
nonlinearex = utils$LoadExampleCode('dash_docs/chapters/dash_core_components/Slider/examples/sliderupdatemode.R')
)
layout <- htmlDiv(list(
htmlH1('Slider Examples and Reference'),
htmlHr(),
htmlH3('Simple Slider Example
'),
dccMarkdown('An example of a basic slider tied to a callback.
'),
examples$simpleslider$source,
examples$simpleslider$layout,
#--------------------------------
htmlH3('Marks and Steps'),
dccMarkdown("If slider `marks` are defined and `step` is set to `NULL` \
then the slider will only be able to select values that \
have been predefined by the `marks`. `marks` is a `list` \
where the keys represent the numerical values and the \
values represent their labels."),
htmlDiv(list(utils$LoadAndDisplayComponent2(
'
library(dashCoreComponents)
dccSlider(
min=0,
max=10,
marks = list(
"0" = "0 °F",
"3" = "3 °F",
"5" = "5 °F",
"7.65" = "7.65 °F",
"10" = "10 °F"
),
value=5
)
'
))),
#--------------------------------
htmlH3('Marks and Steps'),
dccMarkdown("By default, `included=TRUE`, meaning the rail trailing the \
handle will be highlighted. To have the handle act as a \
discrete value set `included=FALSE`. To style `marks`, \
include a style css attribute alongside the list value."),
htmlDiv(list(utils$LoadAndDisplayComponent2(
'
library(dashCoreComponents)
dccSlider(
min=0,
max=100,
value = 65,
marks = list(
"0" = list("label" = "0 °C", "style" = list("color" = "#77b0b1")),
"26" = list("label" = "26 °C"),
"37" = list("label" = "37 °C"),
"100" = list("label" = "100 °C", "style" = list("color" = "#FF4500"))
)
)
'
))),
htmlDiv(list(utils$LoadAndDisplayComponent2(
'
library(dashCoreComponents)
dccSlider(
min=0,
max=100,
marks = list(
"0" = list("label" = "0 °C", "style" = list("color" = "#77b0b1")),
"26" = list("label" = "26 °C"),
"37" = list("label" = "37 °C"),
"100" = list("label" = "100 °C", "style" = list("color" = "#FF4500"))
),
included=FALSE
)
'
))),
htmlDiv(list(utils$LoadAndDisplayComponent2(
'
library(dashCoreComponents)
dccSlider(
min=0,
max=100,
marks = list(
"0" = list("label" = "0 °C", "style" = list("color" = "#77b0b1")),
"26" = list("label" = "26 °C"),
"37" = list("label" = "37 °C"),
"100" = list("label" = "100 °C", "style" = list("color" = "#FF4500"))
),
included=FALSE
)
'
))),
htmlH3('Non-Linear Slider and Updatemode'),
examples$nonlinearex$source,
examples$nonlinearex$layout,
htmlH3('Slider Properties'),
examples$proptable$layout,
htmlHr(),
dccMarkdown("
[Back to the Table of Contents](/)
")
))
|
# setup -------------------------------------------------------------------
require(dplyr)
require(ggplot2)
require(lme4)
require(lmerTest)
require(mlogit)
require(lattice)
require(stringdist)
require(ggstatsplot)
theme_update(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_rect(colour = "black"),
axis.text=element_text(size=20, colour = "black"), axis.title=element_text(size=18, face = "bold"), axis.title.x = element_text(vjust = 0),
legend.title = element_text(size = 24, face = "bold"), legend.text = element_text(size = 20), plot.title = element_text(size = 26, face = "bold", vjust = 1))
setwd("~/Me/Psychology/Projects/choicesets/git")
#setwd("C:/Users/Jphil/Dropbox/choiceSets/choice-sets")
getIndex = function(x, list) {
y = numeric(length(x))
for (j in 1:length(x)) {
if (any(list %in% x[j])) {
y[j] = which(list %in% x[j])
} else {
y[j] = NA
}
}
return(y)
}
as.string.vector = function(x) {
temp = strsplit(substr(x,2,nchar(x)-1), split=",")[[1]]
return(substr(temp, 2, nchar(temp) - 1))
}
as.numeric.vector = function(x) {
return(as.numeric(strsplit(substr(x,2,nchar(x)-1), split=",")[[1]]))
}
se = function(x) {return(sd(x, na.rm = T) / sqrt(length(x)))}
dodge <- position_dodge(width=0.9)
# import data -------------------------------------------------------------
versions = c('value1', 'value2', 'freq', 'confounded', 'stripped', 'value4')
version = versions[4]
if (version == 'value1') {
numWords = 14;
numTrials = 112;
minNAs = 4;
path = 'data/value/v1/real2/'
pointsPerCent = 10;
pointsPerWord = 10; # for memory condition
numRealQuestions = 9
type = 0; # 0 is value, 1 is freq, 2 is stripped
maxRepeats = 2;
} else if (version == 'value2') {
numWords = 14;
numTrials = 112;
minNAs = 2;
path = 'data/value/v2/real1/'
pointsPerCent = 10;
pointsPerWord = 10; # for memory condition
numRealQuestions = 5
type = 0;
maxRepeats = 2;
numQuestions = 6;
} else if (version == 'freq') {
numWords = 14;
numTrials = 112;
minNAs = 4;
path = 'data/frequency/v1/real1/'
numRealQuestions = 9
type = 1;
maxRepeats = 2;
numQuestions = 10;
} else if (version == 'confounded') {
numWords = 14;
numTrials = 91;
minNAs = 4;
path = 'data/confounded/v1/real1/'
numRealQuestions = 9
type = 0;
maxRepeats = 2;
} else if (version == 'stripped') {
numWords = 12;
numTrials = 0;
numQuestions = 3;
minNAs = 1;
path = 'data/value/v3/real4/'
type = 2;
numRealQuestions = 1;
pointsPerCent = 1;
pointsPerWord = 1;
maxRepeats = 0;
} else if (version == 'value4') {
numWords = 12;
numTrials = 120;
minNAs = 1;
path = 'data/value/v4/pilot1/'
pointsPerCent_s1 = 10;
pointsPerCent_s2 = 1;
pointsPerWord = 3; # for memory condition
allBonus = 25;
numRealQuestions = 1;
type = 0;
maxRepeats = 2;
numQuestions = 2;
}
# Load data
df.demo = read.csv(paste0(path, 'demo.csv'), stringsAsFactors = F) %>% arrange(subject) %>% mutate(total_time_real = total_time / 60000)
df.words.raw = read.csv(paste0(path, 'words.csv'), stringsAsFactors = F) %>% arrange(subject, word_ind)
if (type != 2) {
df.s1.raw = read.csv(paste0(path, 's1.csv'), stringsAsFactors = F) %>% arrange(subject);
} else {
df.s1.raw = data.frame(subject = numeric(), resp = numeric(), word = numeric(), resp2 = numeric(), value = numeric(), alt = numeric(),
choice = numeric());
}
df.s2.raw = read.csv(paste0(path, 's2.csv'), stringsAsFactors = F) %>% arrange(subject, question_order)
subjlist = df.demo$subject
## words
df.words = df.words.raw %>%
mutate(doubled = ifelse(is.na(lead(word)), FALSE, word == lead(word) & subject == lead(subject))) %>%
filter(doubled == FALSE & subject %in% subjlist)
for (i in 1:nrow(df.words)) {
df.words$high_value[i] = ifelse(type == 1, df.words$exposures[i] > 8, df.words$value[i] > 5)
if (type == 2) { # stripped-down version
valuelist = (df.words %>% filter(subject == df.words$subject[i]))$value
df.words$high_value[i] = ifelse(df.words$value[i] == median(valuelist), as.logical(runif(1) > .5), df.words$value[i] > median(valuelist))
}
}
## s1
df.s1 = df.s1.raw %>% filter(subject %in% subjlist) %>%
mutate(correct_word = ain(toupper(resp), word, maxDist = 2), correct_val = resp2 == value, word_chosen = ifelse(choice, alt, word))
df.s1.subj = df.s1 %>% group_by(subject) %>%
summarize(pctCorrect_words = mean(correct_word, na.rm = T), pctCorrect_val = ifelse(type == 1, 1, mean(correct_val, na.rm = T)),
numTrials = n())
## s2
df.s2 = df.s2.raw %>% filter(subject %in% subjlist)
df.s2$choice = toupper(df.s2$choice)
df.s2$scratch = gsub("[.]", ",", toupper(as.character(df.s2$scratch)))
df.s2$all_values = as.character(df.s2$all_values)
df.s2$rank_value = NULL
for (i in 1:nrow(df.s2)) {
subj.name = df.s2$subject[i]
wordlist = (df.words %>% filter(subject == subj.name))$word
c = df.s2$choice[i]
creal = wordlist[amatch(c, wordlist, maxDist = 2)]
cind = getIndex(creal, wordlist)
all_vals = as.numeric.vector(df.s2$all_values[i])
all_vals_rank = rank(all_vals, ties.method = 'max')
s2_val = ifelse(is.na(cind), NA, all_vals[cind])
word_rows = subj.name == df.words$subject & creal == df.words$word
df.s2$choice_real[i] = creal
df.s2$choice_real_ind[i] = cind
df.s2$s2_value[i] = s2_val
df.s2$rank_value[i] = ifelse(is.na(cind), NA, all_vals_rank[cind])
df.s2$s1_value[i] = ifelse(is.na(cind), NA, df.words$value[word_rows])
df.s2$s1_exposures[i] = ifelse(is.na(cind), NA, df.words$exposures[word_rows])
df.s2$high_value[i] = ifelse(is.na(cind), NA, df.words$high_value[word_rows])#ifelse(type == 1, df.s2$s1_exposures[i] > 8, df.s2$s1_value[i] > 5)
df.s2$high_rank[i] = ifelse(is.na(cind), NA, df.s2$rank_value[i] > 7)
}
df.s2 = df.s2 %>% mutate(s2_subj_ind = as.numeric(as.factor(subject)), # just for modeling
doubled = ifelse(is.na(choice_real_ind), NA, ifelse(is.na(lead(choice_real_ind)), F, choice_real_ind == lead(choice_real_ind)) |
ifelse(is.na(lag(choice_real_ind)), F, choice_real_ind == lag(choice_real_ind))),
bonus_value = ifelse(is.na(choice_real_ind), 0, ifelse(doubled, 0, s2_value)))
df.s2.subj = df.s2 %>% filter(subject %in% df.demo$subject) %>%
group_by(subject) %>%
summarize(s2_bonus = sum(bonus_value), rt = mean(rt) / 1000,
comp_check_pass = mean(comp_check_pass),
comp_check_rt = mean(comp_check_rt) / 1000,
numNAs = sum(is.na(choice_real)),
numRepeats = sum(choice_real == lag(choice_real), na.rm = T),
numTrials = n(),
s1_value = mean(s1_value, na.rm = T),
high_value = mean(high_value, na.rm = T),
rank_value = mean(rank_value, na.rm = T),
high_rank = mean(high_rank, na.rm = T))
df.s2.subj$mem_words = NULL
df.s2.subj$mem_vals = NULL
for (i in 1:nrow(df.s2.subj)) {
s2.filt = df.s2 %>% filter(subject == df.s2.subj$subject[i] & question == 'Memory')
df.s2.subj$mem_words[i] = ifelse(length(s2.filt$choice) == 0, NA, s2.filt$choice)
df.s2.subj$mem_vals[i] = ifelse(length(s2.filt$scratch) == 0, NA, s2.filt$scratch)
}
## Compute recalled
recalled = matrix(F, nrow = nrow(df.s2.subj), ncol = numWords)
recalled_ever = matrix(F, nrow = nrow(df.s2.subj), ncol = numWords)
recalled_val = matrix(F, nrow = nrow(df.s2.subj), ncol = numWords)
df.words$recall = NULL
df.words$recall.ever = NULL
df.words$order = NULL
for (i in 1:nrow(df.s2.subj)) {
subj.name = df.s2.subj$subject[i]
df.words.temp = df.words %>% filter(subject == subj.name)
df.s2.temp = df.s2 %>% filter(subject == subj.name)
words_temp = trimws(as.string.vector(df.s2.subj$mem_words[i]))
val_temp = as.numeric(trimws(as.string.vector(df.s2.subj$mem_vals[i])))
wordlist = df.words.temp$word
if (length(wordlist) == numWords) {
for (j in 1:numWords) {
which_word = amatch(wordlist[j], words_temp, maxDist = 2, nomatch = 0)
recalled[i,j] = which_word > 0
if (recalled[i,j]) {
true_val = df.words.temp$value[df.words.temp$word_ind == (j - 1)]
recalled_val[i,j] = abs(val_temp[which_word] - true_val) <= 2
}
df.words$recall[df.words$subject == subj.name & df.words$word == wordlist[j]] = recalled[i,j]
recalled_ever[i,j] = recalled[i,j] | any(na.omit(df.s2.temp$choice_real_ind) == j)
df.words$recall.ever[df.words$subject == subj.name & df.words$word == wordlist[j]] = recalled_ever[i,j]
df.words$order[df.words$subject == subj.name & df.words$word == wordlist[j]] = which_word
}
}
}
# exclusion ---------------------------------------------------------------
# Exclude if any of these: cor in s1 < .75, comp_check_pass < .5, pctCorrect_words < .75, pctCorrect_pts < .75, numNAs > 3, numRepeats > 2, numRecalled < 5
include_rows = NULL
include_names = NULL
for (subj in 1:length(subjlist)) {
subj.name = subjlist[subj]
df.s1.subj.temp = df.s1.subj %>% filter(subject == subj.name)
df.s2.subj.temp = df.s2.subj %>% filter(subject == subj.name)
df.demo.temp = df.demo %>% filter(subject == subj.name)
exclude = df.demo.temp$write_down == 'Yes' || df.s2.subj.temp$comp_check_pass < .5 || df.s2.subj.temp$numRepeats > maxRepeats ||
df.s2.subj.temp$numNAs > minNAs || sum(recalled[subj,]) < 5
if (type != 2) {
exclude = exclude || df.s1.subj.temp$numTrials != numTrials || df.s1.subj.temp$pctCorrect_words < .75 ||
df.s1.subj.temp$pctCorrect_val < .75
}
if (exclude) {
include_rows[subj] = FALSE
} else {
include_rows[subj] = TRUE
include_names = c(include_names, subj.name)
}
}
# check out data ----------------------------------------------------------
if (type == 2) {
df.s2 = df.s2 %>% filter(question_order == 1)
df.s2.subj = df.s2 %>%
group_by(subject) %>%
summarize(s1_value = mean(s1_value, na.rm = T),
high_value = mean(high_value, na.rm = T),
rank_value = mean(rank_value, na.rm = T),
high_rank = mean(high_rank, na.rm = T),
s2_bonus = mean(bonus_value, na.rm = T))
}
## stage 2 choices
df.s2.filt = df.s2 %>% filter(subject %in% include_names)
df.s2.subj.filt = df.s2.subj %>% filter(subject %in% include_names)
# s2 rank value
ggplot(df.s2.subj.filt, aes(x = rank_value)) + geom_histogram(col = 'black', fill = 'blue') + xlim(c(1,14))
t.test(df.s2.subj.filt$rank_value - 7)
ggplot(df.s2.subj.filt, aes(x = high_rank)) + geom_histogram(col = 'black', fill = 'blue') + ylim(c(0,40)) + xlim(c(0,1)) +
xlab('') + ylab('')
t.test(df.s2.subj.filt$high_rank - .5)
# s1 high value
ggplot(df.s2.subj.filt, aes(x = high_value)) + geom_histogram(col = 'black', fill = 'blue')
t.test(df.s2.subj.filt$high_value - .5)
ggplot(df.s2.subj.filt, aes(x = s1_value)) + geom_histogram(col = 'black', fill = 'blue')
t.test(df.s2.subj.filt$s1_value - 5)
# logit test
df.logit = data.frame(Subj = NULL, Trial = NULL, OptionID = NULL, Choice = NULL, MFval = NULL, MBval = NULL, nExposures = NULL, Recalled = NULL, Question = NULL)
for (subj in 1:nrow(df.demo)) {
subj.name = df.demo$subject[subj]
recalled.temp = recalled_ever[subj, ]
#recalled.temp = !logical(numWords)
num.recalled.temp = sum(recalled.temp)
df.words.temp = df.words %>% filter(subject == subj.name)
df.s2.temp = df.s2 %>% filter(subject == subj.name) %>% arrange(question_order)
nAnswered = sum(!is.na(df.s2.temp$choice_real_ind))
if (nAnswered > 0 & subj.name %in% include_names) {
Subj.col = rep(subj, num.recalled.temp * nAnswered)
MFval.col = rep(df.words.temp$value[recalled.temp], nAnswered)
MFhigh.col = rep(df.words.temp$high_value[recalled.temp] * 1, nAnswered)
nExposures.col = rep(df.words.temp$exposures[recalled.temp], nAnswered)
Recalled.col = rep(df.words.temp$recall.ever[recalled.temp] * 1, nAnswered)
numChosen.col = rep(df.words.temp$numChosen_high[recalled.temp], nAnswered)
OptionID_real.col = rep(which(recalled.temp), nAnswered)
OptionID.col = rep(1:num.recalled.temp, nAnswered)
Trial.col = rep(1:nAnswered, each = num.recalled.temp)
Question.col = rep(df.s2.temp$question_ind[!is.na(df.s2.temp$choice_real_ind)], each = num.recalled.temp)
temp.mbval = matrix(0, nrow = nAnswered, ncol = num.recalled.temp)
temp.mbhigh = matrix(0, nrow = nAnswered, ncol = num.recalled.temp)
temp.choice = matrix(0, nrow = nAnswered, ncol = num.recalled.temp)
temp.choice2 = matrix(0, nrow = nAnswered, ncol = num.recalled.temp)
ind = 1
for (q in 1:numRealQuestions) {
if (!is.na(df.s2.temp$choice_real_ind[q])) {
all_vals = as.numeric.vector(df.s2.temp$all_values[q])
mbvals = rank(all_vals, ties.method = 'max')
#mbvals = all_vals
temp.mbval[ind,] = mbvals[recalled.temp]
temp.mbhigh[ind,] = mbvals[recalled.temp] > 10
choice = logical(num.recalled.temp)
choice[which(df.s2.temp$choice_real_ind[q] == which(recalled.temp))] = TRUE
temp.choice[ind,] = choice
#choice2 = vector(mode = 'numeric', num.recalled.temp)
#choice2[1] = which(df.s2.temp$choice_real_ind[q] == which(recalled.temp))
#choice2[1] = OptionID.col[1:num.recalled.temp][choice]
#temp.choice2[ind,] = choice2
ind = ind + 1
}
}
MBval.col = as.vector(t(temp.mbval))
MBhigh.col = as.vector(t(temp.mbhigh))
Choice.col = as.vector(t(temp.choice))
#Choice2.col = as.vector(t(temp.choice2))
df.logit = rbind(df.logit,
data.frame(Subj = Subj.col, Trial = Trial.col, OptionID = OptionID.col, Choice = Choice.col,
MFval = MFval.col, MBval = MBval.col, MFhigh = MFhigh.col, MBhigh = MBhigh.col))
#Recall = Recalled.col, Question = Question.col, OptionID_real = OptionID_real.col))
}
}
df.logit = df.logit %>% mutate(MFcent = MFhigh - mean(MFhigh), MBcent = MBhigh - mean(MBhigh), Int = MFcent * MBcent,
Choice = as.logical(Choice), Trial_unique = paste(Subj, Trial, sep="_"))
df.logit2 = mlogit.data(df.logit, choice = "Choice", shape = "long", id.var = "Subj", alt.var = "OptionID", chid.var = "Trial_unique")
m = mlogit(Choice ~ MFcent + MBcent + Int | -1, df.logit2)#, panel = T,
#rpar = c(MFcent = "n", MBcent = "n", Int = "n"), correlation = F, halton = NA, R = 1000, tol = .001)
summary(m)
# interaction graph
df.graph = df.logit %>% mutate(MFhigh = factor(MFhigh), MBhigh = factor(MBhigh)) %>%
group_by(MFhigh,MBhigh,Subj) %>% summarize(Choice = any(Choice)) %>%
group_by(MFhigh,MBhigh) %>%
summarize(Choice.mean = mean(Choice), Choice.se = sqrt(Choice.mean * (1 - Choice.mean) / n()))
ggplot(data = df.graph, aes(x = MBhigh, y = Choice.mean, group = MFhigh, colour = MFhigh)) +
geom_point(aes(size = 2)) + geom_line() +
geom_errorbar(aes(ymin=Choice.mean - Choice.se, ymax = Choice.mean + Choice.se), width = .2) +
guides(size = FALSE)
df.graph.all = df.logit %>%
group_by(MFval,MBval,Subj) %>% summarize(Choice = any(Choice)) %>%
group_by(MFval,MBval) %>%
summarize(Choice.mean = mean(Choice), Choice.se = sqrt(Choice.mean * (1 - Choice.mean) / n())) %>%
mutate(Choice.mean = round(Choice.mean, 2))
## recall
nrecall = rowSums(recalled[include_rows,])
mean(nrecall)
df.words.filt = df.words %>% filter(subject %in% include_names)
# plot split by value
df.words.byvalue = df.words.filt %>% group_by(high_value, subject) %>% summarize(recall = mean(recall, na.rm = T)) %>%
group_by(high_value) %>% summarize(recall.mean = mean(recall, na.rm = T), recall.se = se(recall))
ggplot(df.words.byvalue, aes(x = high_value, y = recall.mean)) +
geom_bar(stat = "identity", position = dodge) +
geom_errorbar(aes(ymax = recall.mean + recall.se, ymin = recall.mean - recall.se), width = .5, position = dodge) +
xlab('') + ylab('') + guides(fill = F)
# did value influence recall?
#m.recall = glmer(recall ~ high_value + (0 + high_value | subject) + (1 | subject) + (1 | word),
# data = df.words %>% filter(subject %in% include_names), family = binomial)
m.recall = glmer(recall ~ high_value + (1 | word),
data = df.words.filt, family = binomial)
summary(m.recall)
## order effects
histogram(~ order | value, df.words[df.words$subject %in% include_names & df.words$recall == T, ])
m.order = lmer(order ~ high_value + (1 | subject) + (1 | word),
data = df.words.filt[df.words.filt$recall == T, ])
summary(m.order)
df.s2.subj.filt$order_weights = coef(m.order)$subject$high_valueTRUE
## weights
if (version == "value1") {
df.s2.subj.filt$weights = c(7.79e-02,3.15e-01,2.04e-01,1.17e-01,6.19e-08,1.94e-07,1.68e-01,1.27e-01,9.58e-01,2.50e-01,5.62e-07,1.60e-07,5.84e-01,3.52e-07,2.96e-01,2.94e-07,4.19e-01,2.74e-01,3.86e-08,2.79e-08,1.16e-01,4.21e-07,4.02e-01,2.92e-01,2.48e-02,1.51e-08,4.69e-07,2.69e-01,1.15e-08,6.13e-01,2.51e-01,2.06e-01,5.69e-02,1.86e-07,2.90e-07,4.00e-01,8.97e-08,1.42e-08,2.32e-01,4.33e-01,8.15e-03,1.97e-01,1.41e-07,6.35e-01,2.34e-01,2.36e-01,3.88e-08,3.74e-08,2.56e-08,2.60e-01,1.15e-06,2.34e-01,8.60e-01,5.42e-08,1.74e-01,1.76e-01,2.46e-01,3.32e-01,3.93e-07,4.69e-01,3.03e-02,7.43e-07,2.14e-01,1.11e-07,2.41e-01,5.26e-02,2.10e-01,4.74e-01,1.55e-01,1.00e+00,1.67e-07,4.37e-08,3.19e-07,9.62e-02,2.91e-08,2.62e-01,2.40e-01,2.03e-01,2.76e-01,1.98e-01,8.71e-08,1.04e-06,1.62e-01,1.54e-01,1.16e-01,2.42e-01,1.64e-07,7.93e-08,4.14e-01,9.77e-09,1.22e-01,3.96e-01,2.70e-08,3.83e-01,8.94e-08,2.67e-06,1.62e-01,7.43e-01,3.02e-02,1.72e-08,2.37e-01,2.33e-01,3.64e-01,1.99e-08,1.53e-07,3.34e-03,7.11e-08,1.19e-08,3.58e-01,9.93e-01,1.37e-01,3.07e-01,4.64e-07,8.93e-09,1.00e+00,2.75e-07,1.34e-01,2.76e-01,1.37e-02,5.08e-09,3.69e-01,1.00e+00,1.70e-01)
}
ggplot(df.s2.subj.filt, aes(high_value, weights)) + geom_point() + geom_smooth(method = 'lm')
ggplot(df.s2.subj.filt, aes(order_weights, weights)) + geom_point() + geom_smooth(method = 'lm')
# bonuses, modeling -----------------------------------------------------------------
## save for modeling
df.test = df.s2 %>% group_by(subject) %>% summarize(anyGood = any(!is.na(choice_real_ind)))
rewards_tr = matrix(0, nrow = sum(include_rows), ncol = numWords)
ind = 1
for (subj in 1:nrow(df.demo)) {
subj.name = df.demo$subject[subj]
if (subj.name %in% include_names & df.test$anyGood[df.test$subject == subj.name]) {
df.words.temp = df.words %>% filter(subject == subj.name)
for (word in 1:numWords) {
rewards_tr[ind, word] = ifelse(type == 1, df.words.temp$exposures[word], df.words.temp$value[word])
}
ind = ind + 1
}
}
write.csv(rewards_tr, paste0(path, 'rewards_s1.csv'), row.names = F)
write.csv(recalled_ever[include_rows & df.test$anyGood, ] * 1, paste0(path, 'recalled.csv'), row.names = F)
df.modeling = df.s2 %>% filter(subject %in% include_names & !is.na(choice_real_ind)) %>%
mutate(all_values_nocomma = gsub(",", " ", all_values)) %>%
dplyr::select(s2_subj_ind, choice_real_ind, all_values_nocomma)
write.table(df.modeling, paste0(path, 'choices.csv'), row.names = F, col.names = F, sep=",")
## bonuses
if (type == 2) {
recalled_total = recalled
} else {
recalled_total = recalled & recalled_val
}
nrecall_bonus = rowSums(recalled_total)
df.s2.subj = df.s2.subj %>% mutate(mem_bonus = nrecall_bonus * pointsPerWord)
df.demo = df.demo %>% mutate(s2_bonus = I(df.s2.subj$s2_bonus), mem_bonus = I(df.s2.subj$mem_bonus),
bonus = round((s1_bonus + s2_bonus + mem_bonus) / (pointsPerCent * 100), 2))
write.table(df.demo %>% select(WorkerID = subject, Bonus = bonus),
paste0(path, 'Bonuses.csv'), row.names = FALSE, col.names = FALSE, sep = ",")
## save
save.image(paste0(path, 'analysis.rdata'))
# jphil’s stuff -----------------------------------------------------------
## word order raster plot:
cor.test(df.words$order[df.words$recall==TRUE],df.words$value[df.words$recall==TRUE])
plot <- df.words %>%
filter(recall) %>%
mutate(high_val = factor(c("Low Past Value","High Past Value")[as.factor(high_value)])) %>%
group_by(order,value,high_val) %>%
summarise(count = table(value)[1]) %>%
ggplot(aes(x=order,y=value,fill=count)) +
geom_tile()+
scale_fill_continuous(low = 'white',high = 'red') +
facet_wrap(~high_val, scales="free_y",ncol=1) +
theme_bw() +
theme(
plot.background = element_blank()
,panel.grid.major = element_blank()
,panel.grid.minor = element_blank()
,legend.title=element_blank()
#,legend.position=c(.1,.9)
#,legend.text=element_text(size=rel(1.4))
,axis.text.y=element_text(size=rel(1.5))
,axis.text.x=element_text(size=rel(1.5))
,axis.title.y=element_text(vjust=.9)
,axis.ticks = element_blank()
,strip.text=element_text(size=rel(1.5))
,axis.title=element_text(size=rel(1.5))
)
plot
## word order mean position plot
orderD <- df.words %>%
filter(recall) %>%
mutate(high_val = factor(c("Low Past Value","High Past Value")[as.factor(high_value)])) %>%
group_by(value,high_val,subject) %>%
summarise(meanOrders = mean(order,na.rm=T)) %>%
group_by(value,high_val) %>%
summarise(meanOrder = mean(meanOrders,na.rm=T),
seOrder = se(meanOrders),
minOrder = meanOrder - seOrder,
maxOrder = meanOrder + seOrder) %>%
arrange(order(high_val))
plot2 <- ggplot(orderD,aes(x=meanOrder,y=value)) +
geom_errorbarh(xmin=orderD$minOrder, xmax=orderD$maxOrder, height=.2) +
geom_point(size=3,color="Red") +
coord_cartesian(xlim=c(0,15)) +
facet_wrap(~high_val, scales="free_y",ncol=1) +
theme_bw() +
theme(
plot.background = element_blank()
,panel.grid.major = element_blank()
,panel.grid.minor = element_blank()
,legend.title=element_blank()
#,legend.position=c(.1,.9)
#,legend.text=element_text(size=rel(1.4))
,axis.text.y=element_text(size=rel(1.5))
,axis.text.x=element_text(size=rel(1.5))
,axis.title.y=element_text(vjust=.9)
,axis.ticks = element_blank()
,strip.text=element_text(size=rel(1.5))
,axis.title=element_text(size=rel(1.5))
)
plot2
## Graph for simulations - here the data are set so that there are 500 words, and a choice set size of 10, meaning 2% of the
jphilPalette <- c("darkorange3","lightblue","darkgreen","azure4")
d.sims = read.csv("data/modelSim.csv") %>% gather(model,earnings,-R) %>%
mutate(model = recode(model, CS ="Choice Set", MB = "Full Planning", MF = "No Planning"),
model = factor(model, levels=c("No Planning","Choice Set","Full Planning")),
R = factor(R),
earnings = (earnings/max(earnings))*100) %>%
filter(R!=1) %>%
ggplot(aes(x=R,y=earnings,fill=model)) +
geom_bar(position="dodge",stat="identity") +
#geom_line(aes(color=model)) +
scale_fill_manual(values=grey.colors(3,start=.9,end=.3)) +
theme_bw() +
theme(
plot.background = element_blank()
,panel.grid.major = element_blank()
,panel.grid.minor = element_blank()
,legend.title=element_blank()
,legend.text=element_text(size=rel(1.4))
,axis.title=element_blank()
,axis.text.y=element_text(size=rel(1.5))
,axis.text.x=element_text(size=rel(1.5))
,axis.ticks = element_blank()
)
d.sims
s1 = c(20, 9, 4, 13, 15, 2, 24, 12, 1, 18, 25, 19)
s2 = c(14, 16, 4, 9, 5, 12, 15, 20, 19, 21, 1, 11)
ggplot(df.graph.all, aes(x = MBval, y = MFval, color = Choice.mean)) +
geom_point(size = 10) +
geom_text(aes(label = Choice.mean), hjust = .5, vjust = 3)
| /data/old/analysis.R | no_license | adammmorris/choice-sets | R | false | false | 24,300 | r | # setup -------------------------------------------------------------------
require(dplyr)
require(ggplot2)
require(lme4)
require(lmerTest)
require(mlogit)
require(lattice)
require(stringdist)
require(ggstatsplot)
theme_update(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_rect(colour = "black"),
axis.text=element_text(size=20, colour = "black"), axis.title=element_text(size=18, face = "bold"), axis.title.x = element_text(vjust = 0),
legend.title = element_text(size = 24, face = "bold"), legend.text = element_text(size = 20), plot.title = element_text(size = 26, face = "bold", vjust = 1))
setwd("~/Me/Psychology/Projects/choicesets/git")
#setwd("C:/Users/Jphil/Dropbox/choiceSets/choice-sets")
getIndex = function(x, list) {
y = numeric(length(x))
for (j in 1:length(x)) {
if (any(list %in% x[j])) {
y[j] = which(list %in% x[j])
} else {
y[j] = NA
}
}
return(y)
}
as.string.vector = function(x) {
temp = strsplit(substr(x,2,nchar(x)-1), split=",")[[1]]
return(substr(temp, 2, nchar(temp) - 1))
}
as.numeric.vector = function(x) {
return(as.numeric(strsplit(substr(x,2,nchar(x)-1), split=",")[[1]]))
}
se = function(x) {return(sd(x, na.rm = T) / sqrt(length(x)))}
dodge <- position_dodge(width=0.9)
# import data -------------------------------------------------------------
versions = c('value1', 'value2', 'freq', 'confounded', 'stripped', 'value4')
version = versions[4]
if (version == 'value1') {
numWords = 14;
numTrials = 112;
minNAs = 4;
path = 'data/value/v1/real2/'
pointsPerCent = 10;
pointsPerWord = 10; # for memory condition
numRealQuestions = 9
type = 0; # 0 is value, 1 is freq, 2 is stripped
maxRepeats = 2;
} else if (version == 'value2') {
numWords = 14;
numTrials = 112;
minNAs = 2;
path = 'data/value/v2/real1/'
pointsPerCent = 10;
pointsPerWord = 10; # for memory condition
numRealQuestions = 5
type = 0;
maxRepeats = 2;
numQuestions = 6;
} else if (version == 'freq') {
numWords = 14;
numTrials = 112;
minNAs = 4;
path = 'data/frequency/v1/real1/'
numRealQuestions = 9
type = 1;
maxRepeats = 2;
numQuestions = 10;
} else if (version == 'confounded') {
numWords = 14;
numTrials = 91;
minNAs = 4;
path = 'data/confounded/v1/real1/'
numRealQuestions = 9
type = 0;
maxRepeats = 2;
} else if (version == 'stripped') {
numWords = 12;
numTrials = 0;
numQuestions = 3;
minNAs = 1;
path = 'data/value/v3/real4/'
type = 2;
numRealQuestions = 1;
pointsPerCent = 1;
pointsPerWord = 1;
maxRepeats = 0;
} else if (version == 'value4') {
numWords = 12;
numTrials = 120;
minNAs = 1;
path = 'data/value/v4/pilot1/'
pointsPerCent_s1 = 10;
pointsPerCent_s2 = 1;
pointsPerWord = 3; # for memory condition
allBonus = 25;
numRealQuestions = 1;
type = 0;
maxRepeats = 2;
numQuestions = 2;
}
# Load data
df.demo = read.csv(paste0(path, 'demo.csv'), stringsAsFactors = F) %>% arrange(subject) %>% mutate(total_time_real = total_time / 60000)
df.words.raw = read.csv(paste0(path, 'words.csv'), stringsAsFactors = F) %>% arrange(subject, word_ind)
if (type != 2) {
df.s1.raw = read.csv(paste0(path, 's1.csv'), stringsAsFactors = F) %>% arrange(subject);
} else {
df.s1.raw = data.frame(subject = numeric(), resp = numeric(), word = numeric(), resp2 = numeric(), value = numeric(), alt = numeric(),
choice = numeric());
}
df.s2.raw = read.csv(paste0(path, 's2.csv'), stringsAsFactors = F) %>% arrange(subject, question_order)
subjlist = df.demo$subject
## words
df.words = df.words.raw %>%
mutate(doubled = ifelse(is.na(lead(word)), FALSE, word == lead(word) & subject == lead(subject))) %>%
filter(doubled == FALSE & subject %in% subjlist)
for (i in 1:nrow(df.words)) {
df.words$high_value[i] = ifelse(type == 1, df.words$exposures[i] > 8, df.words$value[i] > 5)
if (type == 2) { # stripped-down version
valuelist = (df.words %>% filter(subject == df.words$subject[i]))$value
df.words$high_value[i] = ifelse(df.words$value[i] == median(valuelist), as.logical(runif(1) > .5), df.words$value[i] > median(valuelist))
}
}
## s1
df.s1 = df.s1.raw %>% filter(subject %in% subjlist) %>%
mutate(correct_word = ain(toupper(resp), word, maxDist = 2), correct_val = resp2 == value, word_chosen = ifelse(choice, alt, word))
df.s1.subj = df.s1 %>% group_by(subject) %>%
summarize(pctCorrect_words = mean(correct_word, na.rm = T), pctCorrect_val = ifelse(type == 1, 1, mean(correct_val, na.rm = T)),
numTrials = n())
## s2
df.s2 = df.s2.raw %>% filter(subject %in% subjlist)
df.s2$choice = toupper(df.s2$choice)
df.s2$scratch = gsub("[.]", ",", toupper(as.character(df.s2$scratch)))
df.s2$all_values = as.character(df.s2$all_values)
df.s2$rank_value = NULL
for (i in 1:nrow(df.s2)) {
subj.name = df.s2$subject[i]
wordlist = (df.words %>% filter(subject == subj.name))$word
c = df.s2$choice[i]
creal = wordlist[amatch(c, wordlist, maxDist = 2)]
cind = getIndex(creal, wordlist)
all_vals = as.numeric.vector(df.s2$all_values[i])
all_vals_rank = rank(all_vals, ties.method = 'max')
s2_val = ifelse(is.na(cind), NA, all_vals[cind])
word_rows = subj.name == df.words$subject & creal == df.words$word
df.s2$choice_real[i] = creal
df.s2$choice_real_ind[i] = cind
df.s2$s2_value[i] = s2_val
df.s2$rank_value[i] = ifelse(is.na(cind), NA, all_vals_rank[cind])
df.s2$s1_value[i] = ifelse(is.na(cind), NA, df.words$value[word_rows])
df.s2$s1_exposures[i] = ifelse(is.na(cind), NA, df.words$exposures[word_rows])
df.s2$high_value[i] = ifelse(is.na(cind), NA, df.words$high_value[word_rows])#ifelse(type == 1, df.s2$s1_exposures[i] > 8, df.s2$s1_value[i] > 5)
df.s2$high_rank[i] = ifelse(is.na(cind), NA, df.s2$rank_value[i] > 7)
}
df.s2 = df.s2 %>% mutate(s2_subj_ind = as.numeric(as.factor(subject)), # just for modeling
doubled = ifelse(is.na(choice_real_ind), NA, ifelse(is.na(lead(choice_real_ind)), F, choice_real_ind == lead(choice_real_ind)) |
ifelse(is.na(lag(choice_real_ind)), F, choice_real_ind == lag(choice_real_ind))),
bonus_value = ifelse(is.na(choice_real_ind), 0, ifelse(doubled, 0, s2_value)))
df.s2.subj = df.s2 %>% filter(subject %in% df.demo$subject) %>%
group_by(subject) %>%
summarize(s2_bonus = sum(bonus_value), rt = mean(rt) / 1000,
comp_check_pass = mean(comp_check_pass),
comp_check_rt = mean(comp_check_rt) / 1000,
numNAs = sum(is.na(choice_real)),
numRepeats = sum(choice_real == lag(choice_real), na.rm = T),
numTrials = n(),
s1_value = mean(s1_value, na.rm = T),
high_value = mean(high_value, na.rm = T),
rank_value = mean(rank_value, na.rm = T),
high_rank = mean(high_rank, na.rm = T))
df.s2.subj$mem_words = NULL
df.s2.subj$mem_vals = NULL
for (i in 1:nrow(df.s2.subj)) {
s2.filt = df.s2 %>% filter(subject == df.s2.subj$subject[i] & question == 'Memory')
df.s2.subj$mem_words[i] = ifelse(length(s2.filt$choice) == 0, NA, s2.filt$choice)
df.s2.subj$mem_vals[i] = ifelse(length(s2.filt$scratch) == 0, NA, s2.filt$scratch)
}
## Compute recalled
recalled = matrix(F, nrow = nrow(df.s2.subj), ncol = numWords)
recalled_ever = matrix(F, nrow = nrow(df.s2.subj), ncol = numWords)
recalled_val = matrix(F, nrow = nrow(df.s2.subj), ncol = numWords)
df.words$recall = NULL
df.words$recall.ever = NULL
df.words$order = NULL
for (i in 1:nrow(df.s2.subj)) {
subj.name = df.s2.subj$subject[i]
df.words.temp = df.words %>% filter(subject == subj.name)
df.s2.temp = df.s2 %>% filter(subject == subj.name)
words_temp = trimws(as.string.vector(df.s2.subj$mem_words[i]))
val_temp = as.numeric(trimws(as.string.vector(df.s2.subj$mem_vals[i])))
wordlist = df.words.temp$word
if (length(wordlist) == numWords) {
for (j in 1:numWords) {
which_word = amatch(wordlist[j], words_temp, maxDist = 2, nomatch = 0)
recalled[i,j] = which_word > 0
if (recalled[i,j]) {
true_val = df.words.temp$value[df.words.temp$word_ind == (j - 1)]
recalled_val[i,j] = abs(val_temp[which_word] - true_val) <= 2
}
df.words$recall[df.words$subject == subj.name & df.words$word == wordlist[j]] = recalled[i,j]
recalled_ever[i,j] = recalled[i,j] | any(na.omit(df.s2.temp$choice_real_ind) == j)
df.words$recall.ever[df.words$subject == subj.name & df.words$word == wordlist[j]] = recalled_ever[i,j]
df.words$order[df.words$subject == subj.name & df.words$word == wordlist[j]] = which_word
}
}
}
# exclusion ---------------------------------------------------------------
# Exclude if any of these: cor in s1 < .75, comp_check_pass < .5, pctCorrect_words < .75, pctCorrect_pts < .75, numNAs > 3, numRepeats > 2, numRecalled < 5
include_rows = NULL
include_names = NULL
for (subj in 1:length(subjlist)) {
subj.name = subjlist[subj]
df.s1.subj.temp = df.s1.subj %>% filter(subject == subj.name)
df.s2.subj.temp = df.s2.subj %>% filter(subject == subj.name)
df.demo.temp = df.demo %>% filter(subject == subj.name)
exclude = df.demo.temp$write_down == 'Yes' || df.s2.subj.temp$comp_check_pass < .5 || df.s2.subj.temp$numRepeats > maxRepeats ||
df.s2.subj.temp$numNAs > minNAs || sum(recalled[subj,]) < 5
if (type != 2) {
exclude = exclude || df.s1.subj.temp$numTrials != numTrials || df.s1.subj.temp$pctCorrect_words < .75 ||
df.s1.subj.temp$pctCorrect_val < .75
}
if (exclude) {
include_rows[subj] = FALSE
} else {
include_rows[subj] = TRUE
include_names = c(include_names, subj.name)
}
}
# check out data ----------------------------------------------------------
if (type == 2) {
df.s2 = df.s2 %>% filter(question_order == 1)
df.s2.subj = df.s2 %>%
group_by(subject) %>%
summarize(s1_value = mean(s1_value, na.rm = T),
high_value = mean(high_value, na.rm = T),
rank_value = mean(rank_value, na.rm = T),
high_rank = mean(high_rank, na.rm = T),
s2_bonus = mean(bonus_value, na.rm = T))
}
## stage 2 choices
df.s2.filt = df.s2 %>% filter(subject %in% include_names)
df.s2.subj.filt = df.s2.subj %>% filter(subject %in% include_names)
# s2 rank value
ggplot(df.s2.subj.filt, aes(x = rank_value)) + geom_histogram(col = 'black', fill = 'blue') + xlim(c(1,14))
t.test(df.s2.subj.filt$rank_value - 7)
ggplot(df.s2.subj.filt, aes(x = high_rank)) + geom_histogram(col = 'black', fill = 'blue') + ylim(c(0,40)) + xlim(c(0,1)) +
xlab('') + ylab('')
t.test(df.s2.subj.filt$high_rank - .5)
# s1 high value
ggplot(df.s2.subj.filt, aes(x = high_value)) + geom_histogram(col = 'black', fill = 'blue')
t.test(df.s2.subj.filt$high_value - .5)
ggplot(df.s2.subj.filt, aes(x = s1_value)) + geom_histogram(col = 'black', fill = 'blue')
t.test(df.s2.subj.filt$s1_value - 5)
# logit test
df.logit = data.frame(Subj = NULL, Trial = NULL, OptionID = NULL, Choice = NULL, MFval = NULL, MBval = NULL, nExposures = NULL, Recalled = NULL, Question = NULL)
for (subj in 1:nrow(df.demo)) {
subj.name = df.demo$subject[subj]
recalled.temp = recalled_ever[subj, ]
#recalled.temp = !logical(numWords)
num.recalled.temp = sum(recalled.temp)
df.words.temp = df.words %>% filter(subject == subj.name)
df.s2.temp = df.s2 %>% filter(subject == subj.name) %>% arrange(question_order)
nAnswered = sum(!is.na(df.s2.temp$choice_real_ind))
if (nAnswered > 0 & subj.name %in% include_names) {
Subj.col = rep(subj, num.recalled.temp * nAnswered)
MFval.col = rep(df.words.temp$value[recalled.temp], nAnswered)
MFhigh.col = rep(df.words.temp$high_value[recalled.temp] * 1, nAnswered)
nExposures.col = rep(df.words.temp$exposures[recalled.temp], nAnswered)
Recalled.col = rep(df.words.temp$recall.ever[recalled.temp] * 1, nAnswered)
numChosen.col = rep(df.words.temp$numChosen_high[recalled.temp], nAnswered)
OptionID_real.col = rep(which(recalled.temp), nAnswered)
OptionID.col = rep(1:num.recalled.temp, nAnswered)
Trial.col = rep(1:nAnswered, each = num.recalled.temp)
Question.col = rep(df.s2.temp$question_ind[!is.na(df.s2.temp$choice_real_ind)], each = num.recalled.temp)
temp.mbval = matrix(0, nrow = nAnswered, ncol = num.recalled.temp)
temp.mbhigh = matrix(0, nrow = nAnswered, ncol = num.recalled.temp)
temp.choice = matrix(0, nrow = nAnswered, ncol = num.recalled.temp)
temp.choice2 = matrix(0, nrow = nAnswered, ncol = num.recalled.temp)
ind = 1
for (q in 1:numRealQuestions) {
if (!is.na(df.s2.temp$choice_real_ind[q])) {
all_vals = as.numeric.vector(df.s2.temp$all_values[q])
mbvals = rank(all_vals, ties.method = 'max')
#mbvals = all_vals
temp.mbval[ind,] = mbvals[recalled.temp]
temp.mbhigh[ind,] = mbvals[recalled.temp] > 10
choice = logical(num.recalled.temp)
choice[which(df.s2.temp$choice_real_ind[q] == which(recalled.temp))] = TRUE
temp.choice[ind,] = choice
#choice2 = vector(mode = 'numeric', num.recalled.temp)
#choice2[1] = which(df.s2.temp$choice_real_ind[q] == which(recalled.temp))
#choice2[1] = OptionID.col[1:num.recalled.temp][choice]
#temp.choice2[ind,] = choice2
ind = ind + 1
}
}
MBval.col = as.vector(t(temp.mbval))
MBhigh.col = as.vector(t(temp.mbhigh))
Choice.col = as.vector(t(temp.choice))
#Choice2.col = as.vector(t(temp.choice2))
df.logit = rbind(df.logit,
data.frame(Subj = Subj.col, Trial = Trial.col, OptionID = OptionID.col, Choice = Choice.col,
MFval = MFval.col, MBval = MBval.col, MFhigh = MFhigh.col, MBhigh = MBhigh.col))
#Recall = Recalled.col, Question = Question.col, OptionID_real = OptionID_real.col))
}
}
df.logit = df.logit %>% mutate(MFcent = MFhigh - mean(MFhigh), MBcent = MBhigh - mean(MBhigh), Int = MFcent * MBcent,
Choice = as.logical(Choice), Trial_unique = paste(Subj, Trial, sep="_"))
df.logit2 = mlogit.data(df.logit, choice = "Choice", shape = "long", id.var = "Subj", alt.var = "OptionID", chid.var = "Trial_unique")
m = mlogit(Choice ~ MFcent + MBcent + Int | -1, df.logit2)#, panel = T,
#rpar = c(MFcent = "n", MBcent = "n", Int = "n"), correlation = F, halton = NA, R = 1000, tol = .001)
summary(m)
# interaction graph
df.graph = df.logit %>% mutate(MFhigh = factor(MFhigh), MBhigh = factor(MBhigh)) %>%
group_by(MFhigh,MBhigh,Subj) %>% summarize(Choice = any(Choice)) %>%
group_by(MFhigh,MBhigh) %>%
summarize(Choice.mean = mean(Choice), Choice.se = sqrt(Choice.mean * (1 - Choice.mean) / n()))
ggplot(data = df.graph, aes(x = MBhigh, y = Choice.mean, group = MFhigh, colour = MFhigh)) +
geom_point(aes(size = 2)) + geom_line() +
geom_errorbar(aes(ymin=Choice.mean - Choice.se, ymax = Choice.mean + Choice.se), width = .2) +
guides(size = FALSE)
df.graph.all = df.logit %>%
group_by(MFval,MBval,Subj) %>% summarize(Choice = any(Choice)) %>%
group_by(MFval,MBval) %>%
summarize(Choice.mean = mean(Choice), Choice.se = sqrt(Choice.mean * (1 - Choice.mean) / n())) %>%
mutate(Choice.mean = round(Choice.mean, 2))
## recall
nrecall = rowSums(recalled[include_rows,])
mean(nrecall)
df.words.filt = df.words %>% filter(subject %in% include_names)
# plot split by value
df.words.byvalue = df.words.filt %>% group_by(high_value, subject) %>% summarize(recall = mean(recall, na.rm = T)) %>%
group_by(high_value) %>% summarize(recall.mean = mean(recall, na.rm = T), recall.se = se(recall))
ggplot(df.words.byvalue, aes(x = high_value, y = recall.mean)) +
geom_bar(stat = "identity", position = dodge) +
geom_errorbar(aes(ymax = recall.mean + recall.se, ymin = recall.mean - recall.se), width = .5, position = dodge) +
xlab('') + ylab('') + guides(fill = F)
# did value influence recall?
#m.recall = glmer(recall ~ high_value + (0 + high_value | subject) + (1 | subject) + (1 | word),
# data = df.words %>% filter(subject %in% include_names), family = binomial)
m.recall = glmer(recall ~ high_value + (1 | word),
data = df.words.filt, family = binomial)
summary(m.recall)
## order effects
histogram(~ order | value, df.words[df.words$subject %in% include_names & df.words$recall == T, ])
m.order = lmer(order ~ high_value + (1 | subject) + (1 | word),
data = df.words.filt[df.words.filt$recall == T, ])
summary(m.order)
df.s2.subj.filt$order_weights = coef(m.order)$subject$high_valueTRUE
## weights
if (version == "value1") {
df.s2.subj.filt$weights = c(7.79e-02,3.15e-01,2.04e-01,1.17e-01,6.19e-08,1.94e-07,1.68e-01,1.27e-01,9.58e-01,2.50e-01,5.62e-07,1.60e-07,5.84e-01,3.52e-07,2.96e-01,2.94e-07,4.19e-01,2.74e-01,3.86e-08,2.79e-08,1.16e-01,4.21e-07,4.02e-01,2.92e-01,2.48e-02,1.51e-08,4.69e-07,2.69e-01,1.15e-08,6.13e-01,2.51e-01,2.06e-01,5.69e-02,1.86e-07,2.90e-07,4.00e-01,8.97e-08,1.42e-08,2.32e-01,4.33e-01,8.15e-03,1.97e-01,1.41e-07,6.35e-01,2.34e-01,2.36e-01,3.88e-08,3.74e-08,2.56e-08,2.60e-01,1.15e-06,2.34e-01,8.60e-01,5.42e-08,1.74e-01,1.76e-01,2.46e-01,3.32e-01,3.93e-07,4.69e-01,3.03e-02,7.43e-07,2.14e-01,1.11e-07,2.41e-01,5.26e-02,2.10e-01,4.74e-01,1.55e-01,1.00e+00,1.67e-07,4.37e-08,3.19e-07,9.62e-02,2.91e-08,2.62e-01,2.40e-01,2.03e-01,2.76e-01,1.98e-01,8.71e-08,1.04e-06,1.62e-01,1.54e-01,1.16e-01,2.42e-01,1.64e-07,7.93e-08,4.14e-01,9.77e-09,1.22e-01,3.96e-01,2.70e-08,3.83e-01,8.94e-08,2.67e-06,1.62e-01,7.43e-01,3.02e-02,1.72e-08,2.37e-01,2.33e-01,3.64e-01,1.99e-08,1.53e-07,3.34e-03,7.11e-08,1.19e-08,3.58e-01,9.93e-01,1.37e-01,3.07e-01,4.64e-07,8.93e-09,1.00e+00,2.75e-07,1.34e-01,2.76e-01,1.37e-02,5.08e-09,3.69e-01,1.00e+00,1.70e-01)
}
ggplot(df.s2.subj.filt, aes(high_value, weights)) + geom_point() + geom_smooth(method = 'lm')
ggplot(df.s2.subj.filt, aes(order_weights, weights)) + geom_point() + geom_smooth(method = 'lm')
# bonuses, modeling -----------------------------------------------------------------
## save for modeling
df.test = df.s2 %>% group_by(subject) %>% summarize(anyGood = any(!is.na(choice_real_ind)))
rewards_tr = matrix(0, nrow = sum(include_rows), ncol = numWords)
ind = 1
for (subj in 1:nrow(df.demo)) {
subj.name = df.demo$subject[subj]
if (subj.name %in% include_names & df.test$anyGood[df.test$subject == subj.name]) {
df.words.temp = df.words %>% filter(subject == subj.name)
for (word in 1:numWords) {
rewards_tr[ind, word] = ifelse(type == 1, df.words.temp$exposures[word], df.words.temp$value[word])
}
ind = ind + 1
}
}
write.csv(rewards_tr, paste0(path, 'rewards_s1.csv'), row.names = F)
write.csv(recalled_ever[include_rows & df.test$anyGood, ] * 1, paste0(path, 'recalled.csv'), row.names = F)
df.modeling = df.s2 %>% filter(subject %in% include_names & !is.na(choice_real_ind)) %>%
mutate(all_values_nocomma = gsub(",", " ", all_values)) %>%
dplyr::select(s2_subj_ind, choice_real_ind, all_values_nocomma)
write.table(df.modeling, paste0(path, 'choices.csv'), row.names = F, col.names = F, sep=",")
## bonuses
if (type == 2) {
recalled_total = recalled
} else {
recalled_total = recalled & recalled_val
}
nrecall_bonus = rowSums(recalled_total)
df.s2.subj = df.s2.subj %>% mutate(mem_bonus = nrecall_bonus * pointsPerWord)
df.demo = df.demo %>% mutate(s2_bonus = I(df.s2.subj$s2_bonus), mem_bonus = I(df.s2.subj$mem_bonus),
bonus = round((s1_bonus + s2_bonus + mem_bonus) / (pointsPerCent * 100), 2))
write.table(df.demo %>% select(WorkerID = subject, Bonus = bonus),
paste0(path, 'Bonuses.csv'), row.names = FALSE, col.names = FALSE, sep = ",")
## save
save.image(paste0(path, 'analysis.rdata'))
# jphil’s stuff -----------------------------------------------------------
## word order raster plot:
cor.test(df.words$order[df.words$recall==TRUE],df.words$value[df.words$recall==TRUE])
plot <- df.words %>%
filter(recall) %>%
mutate(high_val = factor(c("Low Past Value","High Past Value")[as.factor(high_value)])) %>%
group_by(order,value,high_val) %>%
summarise(count = table(value)[1]) %>%
ggplot(aes(x=order,y=value,fill=count)) +
geom_tile()+
scale_fill_continuous(low = 'white',high = 'red') +
facet_wrap(~high_val, scales="free_y",ncol=1) +
theme_bw() +
theme(
plot.background = element_blank()
,panel.grid.major = element_blank()
,panel.grid.minor = element_blank()
,legend.title=element_blank()
#,legend.position=c(.1,.9)
#,legend.text=element_text(size=rel(1.4))
,axis.text.y=element_text(size=rel(1.5))
,axis.text.x=element_text(size=rel(1.5))
,axis.title.y=element_text(vjust=.9)
,axis.ticks = element_blank()
,strip.text=element_text(size=rel(1.5))
,axis.title=element_text(size=rel(1.5))
)
plot
## word order mean position plot
orderD <- df.words %>%
filter(recall) %>%
mutate(high_val = factor(c("Low Past Value","High Past Value")[as.factor(high_value)])) %>%
group_by(value,high_val,subject) %>%
summarise(meanOrders = mean(order,na.rm=T)) %>%
group_by(value,high_val) %>%
summarise(meanOrder = mean(meanOrders,na.rm=T),
seOrder = se(meanOrders),
minOrder = meanOrder - seOrder,
maxOrder = meanOrder + seOrder) %>%
arrange(order(high_val))
plot2 <- ggplot(orderD,aes(x=meanOrder,y=value)) +
geom_errorbarh(xmin=orderD$minOrder, xmax=orderD$maxOrder, height=.2) +
geom_point(size=3,color="Red") +
coord_cartesian(xlim=c(0,15)) +
facet_wrap(~high_val, scales="free_y",ncol=1) +
theme_bw() +
theme(
plot.background = element_blank()
,panel.grid.major = element_blank()
,panel.grid.minor = element_blank()
,legend.title=element_blank()
#,legend.position=c(.1,.9)
#,legend.text=element_text(size=rel(1.4))
,axis.text.y=element_text(size=rel(1.5))
,axis.text.x=element_text(size=rel(1.5))
,axis.title.y=element_text(vjust=.9)
,axis.ticks = element_blank()
,strip.text=element_text(size=rel(1.5))
,axis.title=element_text(size=rel(1.5))
)
plot2
## Graph for simulations - here the data are set so that there are 500 words, and a choice set size of 10, meaning 2% of the
jphilPalette <- c("darkorange3","lightblue","darkgreen","azure4")
d.sims = read.csv("data/modelSim.csv") %>% gather(model,earnings,-R) %>%
mutate(model = recode(model, CS ="Choice Set", MB = "Full Planning", MF = "No Planning"),
model = factor(model, levels=c("No Planning","Choice Set","Full Planning")),
R = factor(R),
earnings = (earnings/max(earnings))*100) %>%
filter(R!=1) %>%
ggplot(aes(x=R,y=earnings,fill=model)) +
geom_bar(position="dodge",stat="identity") +
#geom_line(aes(color=model)) +
scale_fill_manual(values=grey.colors(3,start=.9,end=.3)) +
theme_bw() +
theme(
plot.background = element_blank()
,panel.grid.major = element_blank()
,panel.grid.minor = element_blank()
,legend.title=element_blank()
,legend.text=element_text(size=rel(1.4))
,axis.title=element_blank()
,axis.text.y=element_text(size=rel(1.5))
,axis.text.x=element_text(size=rel(1.5))
,axis.ticks = element_blank()
)
d.sims
s1 = c(20, 9, 4, 13, 15, 2, 24, 12, 1, 18, 25, 19)
s2 = c(14, 16, 4, 9, 5, 12, 15, 20, 19, 21, 1, 11)
ggplot(df.graph.all, aes(x = MBval, y = MFval, color = Choice.mean)) +
geom_point(size = 10) +
geom_text(aes(label = Choice.mean), hjust = .5, vjust = 3)
|
## server.R
library(dplyr)
library(ggplot2)
library(recommenderlab)
library(DT)
library(data.table)
library(reshape2)
source('functions/cf_algorithm.R') # collaborative filtering
source('functions/similarity_measures.R') # similarity measures
get_user_ratings = function(value_list) {
dat = data.table(
MovieID = sapply(strsplit(names(value_list), "_"),
function(x)
ifelse(length(x) > 1, x[[2]], NA)),
Rating = unlist(as.character(value_list))
)
dat = dat[!is.null(Rating) & !is.na(MovieID)]
dat[Rating == " ", Rating := 0]
dat[, ':=' (MovieID = as.numeric(MovieID), Rating = as.numeric(Rating))]
dat = dat[Rating > 0]
print(paste0("m", dat$MovieID))
if (nrow(dat) == 0)
{
user_ratings <- -1
rank_done <<- 0
}
else
{
rank_done <<- 1
user_ratings <- sparseMatrix(
i = rep(1, nrow(dat)),
j = match(paste0("m", dat$MovieID), colnames(r_mat)),
x = dat$Rating,
dims = c(1, ncol(r_mat))
)
}
return(user_ratings)
}
get_movie_genre1 = function(InputGenre = "Action")
{
#InputGenre = "Action"
recom = ratings %>%
group_by(MovieID) %>%
summarize(ratings_per_movie = n(),
ave_ratings = round(mean(Rating), dig=3)) %>%
inner_join(movies, by = 'MovieID') %>%
filter(ratings_per_movie > 1000) %>%
filter(grepl(InputGenre, Genres)) %>%
top_n(5, ave_ratings) %>%
select('MovieID', 'Title', 'ave_ratings', 'ratings_per_movie', 'Genres') %>%
arrange(desc(ave_ratings)) #%>%
#datatable(class = "nowrap hover row-border",escape = FALSE, options = list(dom = 't',scrollX = TRUE, autoWidth = TRUE))
}
# read in data
myurl = "https://liangfgithub.github.io/MovieData/"
movies = readLines(paste0(myurl, 'movies.dat?raw=true'))
movies = strsplit(movies,
split = "::",
fixed = TRUE,
useBytes = TRUE)
movies = matrix(unlist(movies), ncol = 3, byrow = TRUE)
movies = data.frame(movies, stringsAsFactors = FALSE)
colnames(movies) = c('MovieID', 'Title', 'Genres')
movies$MovieID = as.integer(movies$MovieID)
movies$Title = iconv(movies$Title, "latin1", "UTF-8")
# extract year
movies$Year = as.numeric(unlist(
lapply(movies$Title, function(x) substr(x, nchar(x)-4, nchar(x)-1))))
#unique(unlist(strsplit(movies$Genres, split = "|", fixed = TRUE, useBytes = TRUE)))
small_image_url = "https://liangfgithub.github.io/MovieImages/"
movies$image_url = sapply(movies$MovieID,
function(x)
paste0(small_image_url, x, '.jpg?raw=true'))
#rating data
ratings = read.csv(paste0(myurl, 'ratings.dat?raw=true'),
sep = ':',
colClasses = c('integer', 'NULL'),
header = FALSE)
colnames(ratings) = c('UserID', 'MovieID', 'Rating', 'Timestamp')
i = paste0('u', ratings$UserID)
j = paste0('m', ratings$MovieID)
x = ratings$Rating
tmp = data.frame(i, j, x, stringsAsFactors = T)
r_mat = sparseMatrix(as.integer(tmp$i), as.integer(tmp$j), x = tmp$x)
rownames(r_mat) = levels(tmp$i)
colnames(r_mat) = levels(tmp$j)
rank_done = 1
shinyServer(function(input, output, session) {
# show the movies to be rated
output$ratings <- renderUI({
num_rows <- 50
num_movies <- 5 # movies per row
lapply(1:num_rows, function(i) {
list(fluidRow(lapply(1:num_movies, function(j) {
list(box(
width = 2,
div(style = "text-align:center", img(
src = movies$image_url[(i - 1) * num_movies + j], height = 150
)),
#div(style = "text-align:center; color: #999999; font-size: 80%", books$authors[(i - 1) * num_books + j]),
div(style = "text-align:center", strong(movies$Title[(i - 1) * num_movies + j])),
div(style = "text-align:center; font-size: 150%; color: #f0ad4e;", ratingInput(
paste0("select_", movies$MovieID[(i - 1) * num_movies + j]),
label = "",
dataStop = 5
))
)) #00c0ef
})))
})
})
# Calculate recommendations when the sbumbutton is clicked
df <- eventReactive(input$btn, {
withBusyIndicatorServer("btn", {
# showing the busy indicator
# hide the rating container
useShinyjs()
jsCode <-
"document.querySelector('[data-widget=collapse]').click();"
runjs(jsCode)
# get the user's rating data
value_list <- reactiveValuesToList(input)
user_ratings <- get_user_ratings(value_list)
if(rank_done == 0)
{
user_predicted_ids = get_movie_genre1("Action")$MovieID
}
else
{
r_mat_user = rbind(user_ratings, r_mat)
r_mat_user_rrm = new('realRatingMatrix', data = r_mat_user)
res1 = predict_cf1(r_mat_user_rrm, "ubcf", 5)
user_predicted_ids = res1$MovieID
}
# add user's ratings as first column to rating matrix
#print(user_predicted_ids)
recom_results <- data.table(
Rank = 1:5,
MovieID = user_predicted_ids,#movies$MovieID[user_predicted_ids],
Title = movies$Title[match(user_predicted_ids, movies$MovieID)]#,
#Predicted_rating = user_results
)
print(recom_results)
}) # still busy
}) # clicked on button
# display the recommendations
output$results <- renderUI({
num_rows <- 1
recom_result <- df()
num_movies <- min(5, length(recom_result$MovieID))
print(rank_done)
if(rank_done == 0)
{
num_rows = num_rows + 1
}
#print(recom_result)
lapply(1:num_rows, function(i) {
image_height = 150
if(rank_done == 0 && i == 2)
{
num_movies = 1
i = 1000
image_height = 0
}
list(fluidRow(lapply(1:num_movies, function(j) {
box(
width = ifelse(rank_done == 0 && i == 1000, 12, 2),
status = "success",
solidHeader = TRUE,
title = ifelse(rank_done == 0 && i == 1000, "You didn't Rate Anything, So Watch Action Movies", paste0("Rank ", (i - 1) * num_movies + j)),
div(style = "text-align:center",
a(
img(src = movies$image_url[which(movies$MovieID == recom_result$MovieID[(i - 1) * num_movies + j])], height = image_height)
)),
div(style = "text-align:center; font-size: 100%",
strong(movies$Title[which(movies$MovieID == recom_result$MovieID[(i - 1) * num_movies + j])]))
)
}))) # columns
}) # rows
}) # renderUI function
output$RecomMovieList <- renderUI({
num_rows <- 1
recom_result <- get_movie_genre1(input$selectedGenre)
num_movies <- max(1, min(5, length(recom_result$MovieID)))
#print(recom_result)
lapply(1:num_rows, function(i) {
list(fluidRow(lapply(1:num_movies, function(j) {
box(
width = ifelse(length(recom_result$MovieID) == 0, 5, 2),
status = "success",
solidHeader = TRUE,
title = ifelse(length(recom_result$MovieID) == 0, "No Recommendation", paste0("Rank ", (i - 1) * num_movies + j)),
div(style = "text-align:center",
a(
img(src = movies$image_url[which(movies$MovieID == recom_result$MovieID[(i - 1) * num_movies + j])], height = 150)
)),
div(style = "text-align:center; font-size: 100%",
strong(movies$Title[which(movies$MovieID == recom_result$MovieID[(i - 1) * num_movies + j])]))
)
}))) # columns
}) # rows
}) # renderUI function
}) # server function | /server.R | no_license | aknemani/FindMovies | R | false | false | 7,859 | r | ## server.R
library(dplyr)
library(ggplot2)
library(recommenderlab)
library(DT)
library(data.table)
library(reshape2)
source('functions/cf_algorithm.R') # collaborative filtering
source('functions/similarity_measures.R') # similarity measures
get_user_ratings = function(value_list) {
dat = data.table(
MovieID = sapply(strsplit(names(value_list), "_"),
function(x)
ifelse(length(x) > 1, x[[2]], NA)),
Rating = unlist(as.character(value_list))
)
dat = dat[!is.null(Rating) & !is.na(MovieID)]
dat[Rating == " ", Rating := 0]
dat[, ':=' (MovieID = as.numeric(MovieID), Rating = as.numeric(Rating))]
dat = dat[Rating > 0]
print(paste0("m", dat$MovieID))
if (nrow(dat) == 0)
{
user_ratings <- -1
rank_done <<- 0
}
else
{
rank_done <<- 1
user_ratings <- sparseMatrix(
i = rep(1, nrow(dat)),
j = match(paste0("m", dat$MovieID), colnames(r_mat)),
x = dat$Rating,
dims = c(1, ncol(r_mat))
)
}
return(user_ratings)
}
get_movie_genre1 = function(InputGenre = "Action")
{
#InputGenre = "Action"
recom = ratings %>%
group_by(MovieID) %>%
summarize(ratings_per_movie = n(),
ave_ratings = round(mean(Rating), dig=3)) %>%
inner_join(movies, by = 'MovieID') %>%
filter(ratings_per_movie > 1000) %>%
filter(grepl(InputGenre, Genres)) %>%
top_n(5, ave_ratings) %>%
select('MovieID', 'Title', 'ave_ratings', 'ratings_per_movie', 'Genres') %>%
arrange(desc(ave_ratings)) #%>%
#datatable(class = "nowrap hover row-border",escape = FALSE, options = list(dom = 't',scrollX = TRUE, autoWidth = TRUE))
}
# read in data
myurl = "https://liangfgithub.github.io/MovieData/"
movies = readLines(paste0(myurl, 'movies.dat?raw=true'))
movies = strsplit(movies,
split = "::",
fixed = TRUE,
useBytes = TRUE)
movies = matrix(unlist(movies), ncol = 3, byrow = TRUE)
movies = data.frame(movies, stringsAsFactors = FALSE)
colnames(movies) = c('MovieID', 'Title', 'Genres')
movies$MovieID = as.integer(movies$MovieID)
movies$Title = iconv(movies$Title, "latin1", "UTF-8")
# extract year
movies$Year = as.numeric(unlist(
lapply(movies$Title, function(x) substr(x, nchar(x)-4, nchar(x)-1))))
#unique(unlist(strsplit(movies$Genres, split = "|", fixed = TRUE, useBytes = TRUE)))
small_image_url = "https://liangfgithub.github.io/MovieImages/"
movies$image_url = sapply(movies$MovieID,
function(x)
paste0(small_image_url, x, '.jpg?raw=true'))
#rating data
ratings = read.csv(paste0(myurl, 'ratings.dat?raw=true'),
sep = ':',
colClasses = c('integer', 'NULL'),
header = FALSE)
colnames(ratings) = c('UserID', 'MovieID', 'Rating', 'Timestamp')
i = paste0('u', ratings$UserID)
j = paste0('m', ratings$MovieID)
x = ratings$Rating
tmp = data.frame(i, j, x, stringsAsFactors = T)
r_mat = sparseMatrix(as.integer(tmp$i), as.integer(tmp$j), x = tmp$x)
rownames(r_mat) = levels(tmp$i)
colnames(r_mat) = levels(tmp$j)
rank_done = 1
shinyServer(function(input, output, session) {
# show the movies to be rated
output$ratings <- renderUI({
num_rows <- 50
num_movies <- 5 # movies per row
lapply(1:num_rows, function(i) {
list(fluidRow(lapply(1:num_movies, function(j) {
list(box(
width = 2,
div(style = "text-align:center", img(
src = movies$image_url[(i - 1) * num_movies + j], height = 150
)),
#div(style = "text-align:center; color: #999999; font-size: 80%", books$authors[(i - 1) * num_books + j]),
div(style = "text-align:center", strong(movies$Title[(i - 1) * num_movies + j])),
div(style = "text-align:center; font-size: 150%; color: #f0ad4e;", ratingInput(
paste0("select_", movies$MovieID[(i - 1) * num_movies + j]),
label = "",
dataStop = 5
))
)) #00c0ef
})))
})
})
# Calculate recommendations when the sbumbutton is clicked
df <- eventReactive(input$btn, {
withBusyIndicatorServer("btn", {
# showing the busy indicator
# hide the rating container
useShinyjs()
jsCode <-
"document.querySelector('[data-widget=collapse]').click();"
runjs(jsCode)
# get the user's rating data
value_list <- reactiveValuesToList(input)
user_ratings <- get_user_ratings(value_list)
if(rank_done == 0)
{
user_predicted_ids = get_movie_genre1("Action")$MovieID
}
else
{
r_mat_user = rbind(user_ratings, r_mat)
r_mat_user_rrm = new('realRatingMatrix', data = r_mat_user)
res1 = predict_cf1(r_mat_user_rrm, "ubcf", 5)
user_predicted_ids = res1$MovieID
}
# add user's ratings as first column to rating matrix
#print(user_predicted_ids)
recom_results <- data.table(
Rank = 1:5,
MovieID = user_predicted_ids,#movies$MovieID[user_predicted_ids],
Title = movies$Title[match(user_predicted_ids, movies$MovieID)]#,
#Predicted_rating = user_results
)
print(recom_results)
}) # still busy
}) # clicked on button
# display the recommendations
output$results <- renderUI({
num_rows <- 1
recom_result <- df()
num_movies <- min(5, length(recom_result$MovieID))
print(rank_done)
if(rank_done == 0)
{
num_rows = num_rows + 1
}
#print(recom_result)
lapply(1:num_rows, function(i) {
image_height = 150
if(rank_done == 0 && i == 2)
{
num_movies = 1
i = 1000
image_height = 0
}
list(fluidRow(lapply(1:num_movies, function(j) {
box(
width = ifelse(rank_done == 0 && i == 1000, 12, 2),
status = "success",
solidHeader = TRUE,
title = ifelse(rank_done == 0 && i == 1000, "You didn't Rate Anything, So Watch Action Movies", paste0("Rank ", (i - 1) * num_movies + j)),
div(style = "text-align:center",
a(
img(src = movies$image_url[which(movies$MovieID == recom_result$MovieID[(i - 1) * num_movies + j])], height = image_height)
)),
div(style = "text-align:center; font-size: 100%",
strong(movies$Title[which(movies$MovieID == recom_result$MovieID[(i - 1) * num_movies + j])]))
)
}))) # columns
}) # rows
}) # renderUI function
output$RecomMovieList <- renderUI({
num_rows <- 1
recom_result <- get_movie_genre1(input$selectedGenre)
num_movies <- max(1, min(5, length(recom_result$MovieID)))
#print(recom_result)
lapply(1:num_rows, function(i) {
list(fluidRow(lapply(1:num_movies, function(j) {
box(
width = ifelse(length(recom_result$MovieID) == 0, 5, 2),
status = "success",
solidHeader = TRUE,
title = ifelse(length(recom_result$MovieID) == 0, "No Recommendation", paste0("Rank ", (i - 1) * num_movies + j)),
div(style = "text-align:center",
a(
img(src = movies$image_url[which(movies$MovieID == recom_result$MovieID[(i - 1) * num_movies + j])], height = 150)
)),
div(style = "text-align:center; font-size: 100%",
strong(movies$Title[which(movies$MovieID == recom_result$MovieID[(i - 1) * num_movies + j])]))
)
}))) # columns
}) # rows
}) # renderUI function
}) # server function |
## this script will get data from IDXData
########## call packages
require(quantmod)
################## output to file
# sink("~/Dropbox/DATA/YAHOO/learnSPX.csv", split=TRUE)
############## get data from Dropbox/DATA/IDXData
gold <- read.zoo("~/Dropbox/DATA/IDXDATA/$GOLD.CSV", sep=",", format ="%m/%d/%Y", header=TRUE)
Gold <- as.xts(gold, order.by=index(gold), frequency=NULL)
tail(gold)
#Gold <- as.xts(gold)
#tail (Gold)
#plot (gold)
## write.table(goldStudy, "~/Dropbox/DATA/learnSPX.csv")
| /task_reference/svn/trunk/getIDX.r | no_license | githubfun/R | R | false | false | 515 | r | ## this script will get data from IDXData
########## call packages
require(quantmod)
################## output to file
# sink("~/Dropbox/DATA/YAHOO/learnSPX.csv", split=TRUE)
############## get data from Dropbox/DATA/IDXData
gold <- read.zoo("~/Dropbox/DATA/IDXDATA/$GOLD.CSV", sep=",", format ="%m/%d/%Y", header=TRUE)
Gold <- as.xts(gold, order.by=index(gold), frequency=NULL)
tail(gold)
#Gold <- as.xts(gold)
#tail (Gold)
#plot (gold)
## write.table(goldStudy, "~/Dropbox/DATA/learnSPX.csv")
|
#-------------------------------------------------------------------------
#Wind Speed (Darro) X Precipitation (Darro) - Maxima diary precipitation
#-------------------------------------------------------------------------
#Eduardo Q Marques 17-05-2022
#eduardobio2009@gmail.com
#-------------------------------------------------------------------------
library(tidyverse)
library(reshape2)
library(ggplot2)
library(ggpubr)
library(viridis)
library(fmsb)
library(lubridate)
library(extRemes)
library(boot)
#Darro data ====================================================================
setwd("C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Banco de Dados Tanguro/Area1-plot/Dados das torres")
darro = read.csv("Master_Estacao_Darro_2020.csv", sep = ",")
#Filter data ------------------------------------------------------------------
df = darro %>%
select(Date, windSpd, ppt)
colnames(df) = c("date", "ws", "ppt")
df$date = as.Date(df$date)
df$date2 = as.numeric(substr(df$date, 1, 4))
df$month = as.character(substr(df$date, 6, 7))
df$Date = as.factor(df$date2)
#Filters
df = df %>% filter(date2 %in% c(2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020)) #Consistent data time series
df = df %>% filter(month %in% c("10","11","12","01","02","03","04")) #Rainy months to AW climate
df = df %>% filter(ppt <100) #Outlier maybe a error in registration
#Plot data
eqm = c('#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#b15928')
rawppt = ggplot(df, aes(x=ppt, y=ws))+
geom_point(aes(col = Date), alpha = 0.7, size = 3)+
geom_smooth(method = "lm", col = "black")+
stat_cor(show.legend = F)+
labs( x = "Precipitation (mm)", y = "Wind Speed (m/s)", title = "A")+
scale_color_manual(values = eqm)+
theme_bw()+
theme(legend.position = c(30, 30))#; rawppt
rawppt2 = ggplot(df, aes(x=ppt, y=ws))+
geom_point(aes(col = Date), alpha = 0.9, size = 2)+
geom_smooth(method = "lm", col = "black")+
labs( x = "Precipitation (mm)", y = NULL, title = "B")+
stat_cor(show.legend = F)+
facet_wrap(~date2)+
scale_color_manual(values = eqm)+
theme_bw()+
theme(legend.position = c(30, 30))#; rawppt2
rawppt3 = ggarrange(rawppt, rawppt2, ncol = 2)
#ggsave(filename = "WS-Prec_darro_RAW_all.png", plot = rawppt3,
# path = "C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Figuras/Wind Speed vs Precipitation", width = 35, height = 15, units = "cm", dpi = 300)
#ggsave(filename = "WS-Prec_darro_RAW.png", plot = rawppt,
# path = "C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Figuras/Wind Speed vs Precipitation", width = 13, height = 13, units = "cm", dpi = 300)
#Block Maxima diary precipitation -----------------------------------------------
df2 <- blockmaxxer(df, blocks = df$date, which="ppt") #Function only blocking precipitation
#Plot data
maxppt = ggplot(df2, aes(x=ppt, y=ws))+
geom_point(alpha = 0.7, size = 3, col = '#ff7f00')+
geom_smooth(method = "lm", col = "black")+
stat_cor(show.legend = F)+
labs( x = "Maximum Precipitation (mm)", y = "Wind Speed (m/s)", title = "A")+
#scale_color_manual(values = eqm)+
theme_bw()+
theme(legend.position = c(30, 30)); maxppt
maxppt2 = ggplot(df2, aes(x=ppt, y=ws))+
geom_point(alpha = 0.7, size = 2, col = '#ff7f00')+
geom_smooth(method = "lm", col = "black")+
stat_cor(show.legend = F)+
labs( x = "Maximum Precipitation (mm)", y = NULL, title = "B")+
facet_wrap(~Date)+
#scale_color_manual(values = eqm)+
theme_bw()+
theme(legend.position = c(30, 30)); maxppt2
maxppt3 = ggarrange(maxppt, maxppt2, ncol = 2); maxppt3
ggsave(filename = "WS-Prec_darro_all.png", plot = maxppt3,
path = "C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Figuras/Wind Speed vs Precipitation", width = 30, height = 13, units = "cm", dpi = 300)
#ggsave(filename = "WS-Prec_darro.png", plot = maxppt,
# path = "C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Figuras/Wind Speed vs Precipitation", width = 17, height = 15, units = "cm", dpi = 300)
#ggsave(filename = "WS-Prec_darro_facet.png", plot = maxppt2,
# path = "C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Figuras/Wind Speed vs Precipitation", width = 23, height = 15, units = "cm", dpi = 300)
#Extract tail dependence values and Bootstrapping ------------------------------------
tq = seq(.05, 1, .01); length(tq)
t1 = taildep(df2$ppt, df2$ws, 0.05)
taild = data.frame(tq[[1]], t1[[1]], t1[[2]])
colnames(taild) = c("quant", "chi", "chibar")
#Chi ---------------------------------------------------------------------------------
#Function to extract Chi data from tail dependence
chifun = function(formula, data, indices) {
df2 <- data[indices,] # selecting sample with boot
fit <- taildep(df2$ppt, df2$ws, 0.05)
return(fit[[1]])
}
f1 =c(df2$ppt, df2$ws, 0.05)
chifun(formula = f1, data = df2) #Just a test
#Performing 1000 replications with boot
output <- boot(data=df2, statistic=chifun,
R=1000, formula=f1)
#Obtaining a confidence interval of 95%
inter = boot.ci(output, type="perc")
chi = data.frame(tq[[1]], inter$t0, inter$percent[1,4], inter$percent[1,5])
colnames(chi) = c("quant", "value", "low", "upp")
#Loop to do all Chi quantiles
for (z in 2:96) {
print(tq[[z]])
chifun = function(formula, data, indices) {
df2 <- data[indices,]
fit <- taildep(df2$ppt, df2$ws, (tq[[z]]))
return(fit[[1]])
}
f1 =c(df2$ppt, df2$ws, (tq[[z]]))
output <- boot(data=df2, statistic=chifun,
R=1000, formula=f1)
inter = boot.ci(output, type="perc")
t2 = data.frame(tq[[z]], inter$t0, inter$percent[1,4], inter$percent[1,5])
colnames(t2) = c("quant", "value", "low", "upp")
chi = rbind(chi, t2)
}
#Chibar ---------------------------------------------------------------------------------
chibarfun = function(formula, data, indices) {
df2 <- data[indices,] # selecting sample with boot
fit <- taildep(df2$ppt, df2$ws, 0.05)
return(fit[[2]])
}
f1 =c(df2$ppt, df2$ws, 0.05)
chibarfun(formula = f1, data = df2) #Just a test
#Performing 1000 replications with boot
output <- boot(data=df2, statistic=chibarfun,
R=1000, formula=f1)
#Obtaining a confidence interval of 95%
inter = boot.ci(output, type="perc")
chibar = data.frame(tq[[1]], inter$t0, inter$percent[1,4], inter$percent[1,5])
colnames(chibar) = c("quant", "value", "low", "upp")
#Loop to do all Chibar quantiles
for (z in 2:96) {
print(tq[[z]])
chibarfun = function(formula, data, indices) {
df2 <- data[indices,]
fit <- taildep(df2$ppt, df2$ws, (tq[[z]]))
return(fit[[2]])
}
f1 =c(df2$ppt, df2$ws, (tq[[z]]))
output <- boot(data=df2, statistic=chibarfun,
R=1000, formula=f1)
inter = boot.ci(output, type="perc")
t2 = data.frame(tq[[z]], inter$t0, inter$percent[1,4], inter$percent[1,5])
colnames(t2) = c("quant", "value", "low", "upp")
chibar = rbind(chibar, t2)
}
#Plot results ------------------------------------------------------------------------------
ggplot(chi, aes(quant, value))+
geom_line(size = 1)+
labs(x = "Quantile theshold q", y = "Chi")+
geom_ribbon(aes(ymin = low, ymax = upp), alpha = 0.3, fill = "green")+
ylim(0, 1)+
theme_bw()
ggplot(chibar, aes(quant, value))+
geom_line(size = 1)+
labs(x = "Quantile theshold q", y = "Chibar")+
geom_ribbon(aes(ymin = low, ymax = upp), alpha = 0.3, fill = "green")+
ylim(-1, 1)+
theme_bw()
#Unite Tail dependence results from Darro and ERA5 ------------------------------------
chi$data = c("Darro Station")
chibar$data = c("Darro Station")
chi_era = read.csv("C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Dados ERA5/CHI_ERA5.csv", sep = ",")
chibar_era = read.csv("C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Dados ERA5/CHIBAR_ERA5.csv", sep = ",")
chi_torre = read.csv("C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Dados Torre/CHI_Tower.csv", sep = ",")
chibar_torre = read.csv("C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Dados Torre/CHIBAR_Tower.csv", sep = ",")
chi2 = rbind(chi, chi_era, chi_torre)
chibar2 = rbind(chibar, chibar_era, chibar_torre)
colnames(chi2)[5] = c("Dataset")
colnames(chibar2)[5] = c("Dataset")
chi_plot = ggplot(chi2, aes(quant, value, fill = Dataset, linetype = Dataset))+
geom_line()+
labs(x = NULL, y = "Chi", title = "A")+
geom_ribbon(aes(ymin = low, ymax = upp), alpha = 0.25)+
scale_fill_manual(values = c("#33a02c", "red", "#1f78b4"))+
theme_bw()+
theme(legend.position = c(30,30)); chi_plot
chibar_plot = ggplot(chibar2, aes(quant, value, fill = Dataset, linetype = Dataset))+
geom_line()+
labs(x = "Quantile theshold q", y = "Chibar", title = "B")+
geom_ribbon(aes(ymin = low, ymax = upp), alpha = 0.25)+
scale_fill_manual(values = c("#33a02c", "red", "#1f78b4"))+
theme_bw()+
theme(legend.position = c(.8,.2)); chibar_plot
chis = ggarrange(chi_plot, chibar_plot, ncol = 1); chis
#ggsave(filename = "WS-Prec_darro_era_chi_chibar.png", plot = chis,
# path = "C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Figuras/Wind Speed vs Precipitation", width = 15, height = 20, units = "cm", dpi = 300)
#ggsave(filename = "WS-Prec_darro_chi.png", plot = chi_plot,
# path = "C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Figuras/Wind Speed vs Precipitation", width = 20, height = 10, units = "cm", dpi = 300)
#ggsave(filename = "WS-Prec_darro_chibar.png", plot = chibar_plot,
# path = "C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Figuras/Wind Speed vs Precipitation", width = 20, height = 10, units = "cm", dpi = 300)
| /Doutorado/Chapter-2/Tower and Darro Station/Wind Speed (Darro) X Precipitation (Darro) - Maxima Precpitacao diaria.R | no_license | Eduardoqm/Science-Repository | R | false | false | 9,827 | r | #-------------------------------------------------------------------------
#Wind Speed (Darro) X Precipitation (Darro) - Maxima diary precipitation
#-------------------------------------------------------------------------
#Eduardo Q Marques 17-05-2022
#eduardobio2009@gmail.com
#-------------------------------------------------------------------------
library(tidyverse)
library(reshape2)
library(ggplot2)
library(ggpubr)
library(viridis)
library(fmsb)
library(lubridate)
library(extRemes)
library(boot)
#Darro data ====================================================================
setwd("C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Banco de Dados Tanguro/Area1-plot/Dados das torres")
darro = read.csv("Master_Estacao_Darro_2020.csv", sep = ",")
#Filter data ------------------------------------------------------------------
df = darro %>%
select(Date, windSpd, ppt)
colnames(df) = c("date", "ws", "ppt")
df$date = as.Date(df$date)
df$date2 = as.numeric(substr(df$date, 1, 4))
df$month = as.character(substr(df$date, 6, 7))
df$Date = as.factor(df$date2)
#Filters
df = df %>% filter(date2 %in% c(2010,2011,2012,2013,2014,2015,2016,2017,2018,2019,2020)) #Consistent data time series
df = df %>% filter(month %in% c("10","11","12","01","02","03","04")) #Rainy months to AW climate
df = df %>% filter(ppt <100) #Outlier maybe a error in registration
#Plot data
eqm = c('#a6cee3','#1f78b4','#b2df8a','#33a02c','#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a','#b15928')
rawppt = ggplot(df, aes(x=ppt, y=ws))+
geom_point(aes(col = Date), alpha = 0.7, size = 3)+
geom_smooth(method = "lm", col = "black")+
stat_cor(show.legend = F)+
labs( x = "Precipitation (mm)", y = "Wind Speed (m/s)", title = "A")+
scale_color_manual(values = eqm)+
theme_bw()+
theme(legend.position = c(30, 30))#; rawppt
rawppt2 = ggplot(df, aes(x=ppt, y=ws))+
geom_point(aes(col = Date), alpha = 0.9, size = 2)+
geom_smooth(method = "lm", col = "black")+
labs( x = "Precipitation (mm)", y = NULL, title = "B")+
stat_cor(show.legend = F)+
facet_wrap(~date2)+
scale_color_manual(values = eqm)+
theme_bw()+
theme(legend.position = c(30, 30))#; rawppt2
rawppt3 = ggarrange(rawppt, rawppt2, ncol = 2)
#ggsave(filename = "WS-Prec_darro_RAW_all.png", plot = rawppt3,
# path = "C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Figuras/Wind Speed vs Precipitation", width = 35, height = 15, units = "cm", dpi = 300)
#ggsave(filename = "WS-Prec_darro_RAW.png", plot = rawppt,
# path = "C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Figuras/Wind Speed vs Precipitation", width = 13, height = 13, units = "cm", dpi = 300)
#Block Maxima diary precipitation -----------------------------------------------
df2 <- blockmaxxer(df, blocks = df$date, which="ppt") #Function only blocking precipitation
#Plot data
maxppt = ggplot(df2, aes(x=ppt, y=ws))+
geom_point(alpha = 0.7, size = 3, col = '#ff7f00')+
geom_smooth(method = "lm", col = "black")+
stat_cor(show.legend = F)+
labs( x = "Maximum Precipitation (mm)", y = "Wind Speed (m/s)", title = "A")+
#scale_color_manual(values = eqm)+
theme_bw()+
theme(legend.position = c(30, 30)); maxppt
maxppt2 = ggplot(df2, aes(x=ppt, y=ws))+
geom_point(alpha = 0.7, size = 2, col = '#ff7f00')+
geom_smooth(method = "lm", col = "black")+
stat_cor(show.legend = F)+
labs( x = "Maximum Precipitation (mm)", y = NULL, title = "B")+
facet_wrap(~Date)+
#scale_color_manual(values = eqm)+
theme_bw()+
theme(legend.position = c(30, 30)); maxppt2
maxppt3 = ggarrange(maxppt, maxppt2, ncol = 2); maxppt3
ggsave(filename = "WS-Prec_darro_all.png", plot = maxppt3,
path = "C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Figuras/Wind Speed vs Precipitation", width = 30, height = 13, units = "cm", dpi = 300)
#ggsave(filename = "WS-Prec_darro.png", plot = maxppt,
# path = "C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Figuras/Wind Speed vs Precipitation", width = 17, height = 15, units = "cm", dpi = 300)
#ggsave(filename = "WS-Prec_darro_facet.png", plot = maxppt2,
# path = "C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Figuras/Wind Speed vs Precipitation", width = 23, height = 15, units = "cm", dpi = 300)
#Extract tail dependence values and Bootstrapping ------------------------------------
tq = seq(.05, 1, .01); length(tq)
t1 = taildep(df2$ppt, df2$ws, 0.05)
taild = data.frame(tq[[1]], t1[[1]], t1[[2]])
colnames(taild) = c("quant", "chi", "chibar")
#Chi ---------------------------------------------------------------------------------
#Function to extract Chi data from tail dependence
chifun = function(formula, data, indices) {
df2 <- data[indices,] # selecting sample with boot
fit <- taildep(df2$ppt, df2$ws, 0.05)
return(fit[[1]])
}
f1 =c(df2$ppt, df2$ws, 0.05)
chifun(formula = f1, data = df2) #Just a test
#Performing 1000 replications with boot
output <- boot(data=df2, statistic=chifun,
R=1000, formula=f1)
#Obtaining a confidence interval of 95%
inter = boot.ci(output, type="perc")
chi = data.frame(tq[[1]], inter$t0, inter$percent[1,4], inter$percent[1,5])
colnames(chi) = c("quant", "value", "low", "upp")
#Loop to do all Chi quantiles
for (z in 2:96) {
print(tq[[z]])
chifun = function(formula, data, indices) {
df2 <- data[indices,]
fit <- taildep(df2$ppt, df2$ws, (tq[[z]]))
return(fit[[1]])
}
f1 =c(df2$ppt, df2$ws, (tq[[z]]))
output <- boot(data=df2, statistic=chifun,
R=1000, formula=f1)
inter = boot.ci(output, type="perc")
t2 = data.frame(tq[[z]], inter$t0, inter$percent[1,4], inter$percent[1,5])
colnames(t2) = c("quant", "value", "low", "upp")
chi = rbind(chi, t2)
}
#Chibar ---------------------------------------------------------------------------------
chibarfun = function(formula, data, indices) {
df2 <- data[indices,] # selecting sample with boot
fit <- taildep(df2$ppt, df2$ws, 0.05)
return(fit[[2]])
}
f1 =c(df2$ppt, df2$ws, 0.05)
chibarfun(formula = f1, data = df2) #Just a test
#Performing 1000 replications with boot
output <- boot(data=df2, statistic=chibarfun,
R=1000, formula=f1)
#Obtaining a confidence interval of 95%
inter = boot.ci(output, type="perc")
chibar = data.frame(tq[[1]], inter$t0, inter$percent[1,4], inter$percent[1,5])
colnames(chibar) = c("quant", "value", "low", "upp")
#Loop to do all Chibar quantiles
for (z in 2:96) {
print(tq[[z]])
chibarfun = function(formula, data, indices) {
df2 <- data[indices,]
fit <- taildep(df2$ppt, df2$ws, (tq[[z]]))
return(fit[[2]])
}
f1 =c(df2$ppt, df2$ws, (tq[[z]]))
output <- boot(data=df2, statistic=chibarfun,
R=1000, formula=f1)
inter = boot.ci(output, type="perc")
t2 = data.frame(tq[[z]], inter$t0, inter$percent[1,4], inter$percent[1,5])
colnames(t2) = c("quant", "value", "low", "upp")
chibar = rbind(chibar, t2)
}
#Plot results ------------------------------------------------------------------------------
ggplot(chi, aes(quant, value))+
geom_line(size = 1)+
labs(x = "Quantile theshold q", y = "Chi")+
geom_ribbon(aes(ymin = low, ymax = upp), alpha = 0.3, fill = "green")+
ylim(0, 1)+
theme_bw()
ggplot(chibar, aes(quant, value))+
geom_line(size = 1)+
labs(x = "Quantile theshold q", y = "Chibar")+
geom_ribbon(aes(ymin = low, ymax = upp), alpha = 0.3, fill = "green")+
ylim(-1, 1)+
theme_bw()
#Unite Tail dependence results from Darro and ERA5 ------------------------------------
chi$data = c("Darro Station")
chibar$data = c("Darro Station")
chi_era = read.csv("C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Dados ERA5/CHI_ERA5.csv", sep = ",")
chibar_era = read.csv("C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Dados ERA5/CHIBAR_ERA5.csv", sep = ",")
chi_torre = read.csv("C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Dados Torre/CHI_Tower.csv", sep = ",")
chibar_torre = read.csv("C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Dados Torre/CHIBAR_Tower.csv", sep = ",")
chi2 = rbind(chi, chi_era, chi_torre)
chibar2 = rbind(chibar, chibar_era, chibar_torre)
colnames(chi2)[5] = c("Dataset")
colnames(chibar2)[5] = c("Dataset")
chi_plot = ggplot(chi2, aes(quant, value, fill = Dataset, linetype = Dataset))+
geom_line()+
labs(x = NULL, y = "Chi", title = "A")+
geom_ribbon(aes(ymin = low, ymax = upp), alpha = 0.25)+
scale_fill_manual(values = c("#33a02c", "red", "#1f78b4"))+
theme_bw()+
theme(legend.position = c(30,30)); chi_plot
chibar_plot = ggplot(chibar2, aes(quant, value, fill = Dataset, linetype = Dataset))+
geom_line()+
labs(x = "Quantile theshold q", y = "Chibar", title = "B")+
geom_ribbon(aes(ymin = low, ymax = upp), alpha = 0.25)+
scale_fill_manual(values = c("#33a02c", "red", "#1f78b4"))+
theme_bw()+
theme(legend.position = c(.8,.2)); chibar_plot
chis = ggarrange(chi_plot, chibar_plot, ncol = 1); chis
#ggsave(filename = "WS-Prec_darro_era_chi_chibar.png", plot = chis,
# path = "C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Figuras/Wind Speed vs Precipitation", width = 15, height = 20, units = "cm", dpi = 300)
#ggsave(filename = "WS-Prec_darro_chi.png", plot = chi_plot,
# path = "C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Figuras/Wind Speed vs Precipitation", width = 20, height = 10, units = "cm", dpi = 300)
#ggsave(filename = "WS-Prec_darro_chibar.png", plot = chibar_plot,
# path = "C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Capitulo2/Figuras/Wind Speed vs Precipitation", width = 20, height = 10, units = "cm", dpi = 300)
|
library(ape)
testtree <- read.tree("7123_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="7123_2_unrooted.txt") | /codeml_files/newick_trees_processed/7123_2/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("7123_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="7123_2_unrooted.txt") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NumCheck.R
\name{createNumCheck}
\alias{createNumCheck}
\title{Create data.frame for specification of numerical plausibility checks.}
\usage{
createNumCheck(GADSdat)
}
\arguments{
\item{GADSdat}{A \code{GADSdat} object.}
}
\value{
A data.frame with the following variables:
\item{variable}{All numerical variables in the \code{GADSdat}}
\item{varLabel}{Corresponding variable labels}
\item{min}{Minimum value for the specific variable.}
\item{max}{Maximum value for the specific variable.}
\item{value_new}{Which value should be inserted if values exceed the specified range?}
}
\description{
All numerical variables without value labels in a \code{GADSdat} are selected and a \code{data.frame} is created, which allows the specification
of minima and maxima.
}
\details{
This function is currently under development.
}
\examples{
# tbd
}
| /man/createNumCheck.Rd | no_license | beckerbenj/eatGADS | R | false | true | 918 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NumCheck.R
\name{createNumCheck}
\alias{createNumCheck}
\title{Create data.frame for specification of numerical plausibility checks.}
\usage{
createNumCheck(GADSdat)
}
\arguments{
\item{GADSdat}{A \code{GADSdat} object.}
}
\value{
A data.frame with the following variables:
\item{variable}{All numerical variables in the \code{GADSdat}}
\item{varLabel}{Corresponding variable labels}
\item{min}{Minimum value for the specific variable.}
\item{max}{Maximum value for the specific variable.}
\item{value_new}{Which value should be inserted if values exceed the specified range?}
}
\description{
All numerical variables without value labels in a \code{GADSdat} are selected and a \code{data.frame} is created, which allows the specification
of minima and maxima.
}
\details{
This function is currently under development.
}
\examples{
# tbd
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createModels.R
\name{updateCurrentValues}
\alias{updateCurrentValues}
\title{Updates current values}
\usage{
updateCurrentValues(templateTags, initCollection)
}
\arguments{
\item{templateTags}{The template tags}
\item{initCollection}{Initial collection}
}
\value{
Updated current value or the original if no match.
}
\description{
Body tags currentValues are substituted at the bottom-most level
after init collection is finalized (recursively process any nested tags)
}
\keyword{internal}
| /man/updateCurrentValues.Rd | no_license | wang-ze/MplusAutomation | R | false | true | 590 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createModels.R
\name{updateCurrentValues}
\alias{updateCurrentValues}
\title{Updates current values}
\usage{
updateCurrentValues(templateTags, initCollection)
}
\arguments{
\item{templateTags}{The template tags}
\item{initCollection}{Initial collection}
}
\value{
Updated current value or the original if no match.
}
\description{
Body tags currentValues are substituted at the bottom-most level
after init collection is finalized (recursively process any nested tags)
}
\keyword{internal}
|
# OpenAPI Petstore
#
# This is a sample server Petstore server. For this sample, you can use the api key `special-key` to test the authorization filters.
#
# OpenAPI spec version: 1.0.0
#
# Generated by: https://openapi-generator.tech
#' @title Pet operations
#' @description petstore.Pet
#'
#' @field path Stores url path of the request.
#' @field apiClient Handles the client-server communication.
#'
#' @importFrom R6 R6Class
#'
#' @section Methods:
#' \describe{
#'
#' AddPet Add a new pet to the store
#'
#'
#' DeletePet Deletes a pet
#'
#'
#' FindPetsByStatus Finds Pets by status
#'
#'
#' FindPetsByTags Finds Pets by tags
#'
#'
#' GetPetById Find pet by ID
#'
#'
#' UpdatePet Update an existing pet
#'
#'
#' UpdatePetWithForm Updates a pet in the store with form data
#'
#'
#' UploadFile uploads an image
#'
#' }
#'
#' @importFrom caTools base64encode
#' @export
PetApi <- R6::R6Class(
'PetApi',
public = list(
apiClient = NULL,
initialize = function(apiClient){
if (!missing(apiClient)) {
self$apiClient <- apiClient
}
else {
self$apiClient <- ApiClient$new()
}
},
AddPet = function(pet, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`pet`)) {
stop("Missing required parameter `pet`.")
}
if (!missing(`pet`)) {
body <- `pet`$toJSONString()
} else {
body <- NULL
}
urlPath <- "/pet"
# OAuth token
headerParams['Authorization'] <- paste("Bearer", self$apiClient$accessToken, sep=" ")
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "POST",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
# void response, no need to return anything
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
DeletePet = function(pet.id, api.key=NULL, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`pet.id`)) {
stop("Missing required parameter `pet.id`.")
}
headerParams['api_key'] <- `api.key`
urlPath <- "/pet/{petId}"
if (!missing(`pet.id`)) {
urlPath <- gsub(paste0("\\{", "petId", "\\}"), `pet.id`, urlPath)
}
# OAuth token
headerParams['Authorization'] <- paste("Bearer", self$apiClient$accessToken, sep=" ")
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "DELETE",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
# void response, no need to return anything
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
FindPetsByStatus = function(status, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`status`)) {
stop("Missing required parameter `status`.")
}
queryParams['status'] <- status
urlPath <- "/pet/findByStatus"
# OAuth token
headerParams['Authorization'] <- paste("Bearer", self$apiClient$accessToken, sep=" ")
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "GET",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
Pet$new()$fromJSONString(httr::content(resp, "text", encoding = "UTF-8"))
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
FindPetsByTags = function(tags, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`tags`)) {
stop("Missing required parameter `tags`.")
}
queryParams['tags'] <- tags
urlPath <- "/pet/findByTags"
# OAuth token
headerParams['Authorization'] <- paste("Bearer", self$apiClient$accessToken, sep=" ")
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "GET",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
Pet$new()$fromJSONString(httr::content(resp, "text", encoding = "UTF-8"))
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
GetPetById = function(pet.id, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`pet.id`)) {
stop("Missing required parameter `pet.id`.")
}
urlPath <- "/pet/{petId}"
if (!missing(`pet.id`)) {
urlPath <- gsub(paste0("\\{", "petId", "\\}"), `pet.id`, urlPath)
}
# API key authentication
if ("api_key" %in% names(self$apiClient$apiKeys) && nchar(self$apiClient$apiKeys["api_key"]) > 0) {
headerParams['api_key'] <- paste(unlist(self$apiClient$apiKeys["api_key"]), collapse='')
}
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "GET",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
Pet$new()$fromJSONString(httr::content(resp, "text", encoding = "UTF-8"))
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
UpdatePet = function(pet, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`pet`)) {
stop("Missing required parameter `pet`.")
}
if (!missing(`pet`)) {
body <- `pet`$toJSONString()
} else {
body <- NULL
}
urlPath <- "/pet"
# OAuth token
headerParams['Authorization'] <- paste("Bearer", self$apiClient$accessToken, sep=" ")
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "PUT",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
# void response, no need to return anything
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
UpdatePetWithForm = function(pet.id, name=NULL, status=NULL, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`pet.id`)) {
stop("Missing required parameter `pet.id`.")
}
body <- list(
"name" = name,
"status" = status
)
urlPath <- "/pet/{petId}"
if (!missing(`pet.id`)) {
urlPath <- gsub(paste0("\\{", "petId", "\\}"), `pet.id`, urlPath)
}
# OAuth token
headerParams['Authorization'] <- paste("Bearer", self$apiClient$accessToken, sep=" ")
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "POST",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
# void response, no need to return anything
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
UploadFile = function(pet.id, additional.metadata=NULL, file=NULL, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`pet.id`)) {
stop("Missing required parameter `pet.id`.")
}
body <- list(
"additionalMetadata" = additional.metadata,
"file" = httr::upload_file(file)
)
urlPath <- "/pet/{petId}/uploadImage"
if (!missing(`pet.id`)) {
urlPath <- gsub(paste0("\\{", "petId", "\\}"), `pet.id`, urlPath)
}
# OAuth token
headerParams['Authorization'] <- paste("Bearer", self$apiClient$accessToken, sep=" ")
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "POST",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
ModelApiResponse$new()$fromJSONString(httr::content(resp, "text", encoding = "UTF-8"))
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
}
)
)
| /samples/client/petstore/R/R/pet_api.R | permissive | mrvdot/openapi-generator | R | false | false | 11,384 | r | # OpenAPI Petstore
#
# This is a sample server Petstore server. For this sample, you can use the api key `special-key` to test the authorization filters.
#
# OpenAPI spec version: 1.0.0
#
# Generated by: https://openapi-generator.tech
#' @title Pet operations
#' @description petstore.Pet
#'
#' @field path Stores url path of the request.
#' @field apiClient Handles the client-server communication.
#'
#' @importFrom R6 R6Class
#'
#' @section Methods:
#' \describe{
#'
#' AddPet Add a new pet to the store
#'
#'
#' DeletePet Deletes a pet
#'
#'
#' FindPetsByStatus Finds Pets by status
#'
#'
#' FindPetsByTags Finds Pets by tags
#'
#'
#' GetPetById Find pet by ID
#'
#'
#' UpdatePet Update an existing pet
#'
#'
#' UpdatePetWithForm Updates a pet in the store with form data
#'
#'
#' UploadFile uploads an image
#'
#' }
#'
#' @importFrom caTools base64encode
#' @export
PetApi <- R6::R6Class(
'PetApi',
public = list(
apiClient = NULL,
initialize = function(apiClient){
if (!missing(apiClient)) {
self$apiClient <- apiClient
}
else {
self$apiClient <- ApiClient$new()
}
},
AddPet = function(pet, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`pet`)) {
stop("Missing required parameter `pet`.")
}
if (!missing(`pet`)) {
body <- `pet`$toJSONString()
} else {
body <- NULL
}
urlPath <- "/pet"
# OAuth token
headerParams['Authorization'] <- paste("Bearer", self$apiClient$accessToken, sep=" ")
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "POST",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
# void response, no need to return anything
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
DeletePet = function(pet.id, api.key=NULL, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`pet.id`)) {
stop("Missing required parameter `pet.id`.")
}
headerParams['api_key'] <- `api.key`
urlPath <- "/pet/{petId}"
if (!missing(`pet.id`)) {
urlPath <- gsub(paste0("\\{", "petId", "\\}"), `pet.id`, urlPath)
}
# OAuth token
headerParams['Authorization'] <- paste("Bearer", self$apiClient$accessToken, sep=" ")
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "DELETE",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
# void response, no need to return anything
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
FindPetsByStatus = function(status, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`status`)) {
stop("Missing required parameter `status`.")
}
queryParams['status'] <- status
urlPath <- "/pet/findByStatus"
# OAuth token
headerParams['Authorization'] <- paste("Bearer", self$apiClient$accessToken, sep=" ")
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "GET",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
Pet$new()$fromJSONString(httr::content(resp, "text", encoding = "UTF-8"))
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
FindPetsByTags = function(tags, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`tags`)) {
stop("Missing required parameter `tags`.")
}
queryParams['tags'] <- tags
urlPath <- "/pet/findByTags"
# OAuth token
headerParams['Authorization'] <- paste("Bearer", self$apiClient$accessToken, sep=" ")
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "GET",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
Pet$new()$fromJSONString(httr::content(resp, "text", encoding = "UTF-8"))
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
GetPetById = function(pet.id, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`pet.id`)) {
stop("Missing required parameter `pet.id`.")
}
urlPath <- "/pet/{petId}"
if (!missing(`pet.id`)) {
urlPath <- gsub(paste0("\\{", "petId", "\\}"), `pet.id`, urlPath)
}
# API key authentication
if ("api_key" %in% names(self$apiClient$apiKeys) && nchar(self$apiClient$apiKeys["api_key"]) > 0) {
headerParams['api_key'] <- paste(unlist(self$apiClient$apiKeys["api_key"]), collapse='')
}
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "GET",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
Pet$new()$fromJSONString(httr::content(resp, "text", encoding = "UTF-8"))
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
UpdatePet = function(pet, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`pet`)) {
stop("Missing required parameter `pet`.")
}
if (!missing(`pet`)) {
body <- `pet`$toJSONString()
} else {
body <- NULL
}
urlPath <- "/pet"
# OAuth token
headerParams['Authorization'] <- paste("Bearer", self$apiClient$accessToken, sep=" ")
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "PUT",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
# void response, no need to return anything
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
UpdatePetWithForm = function(pet.id, name=NULL, status=NULL, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`pet.id`)) {
stop("Missing required parameter `pet.id`.")
}
body <- list(
"name" = name,
"status" = status
)
urlPath <- "/pet/{petId}"
if (!missing(`pet.id`)) {
urlPath <- gsub(paste0("\\{", "petId", "\\}"), `pet.id`, urlPath)
}
# OAuth token
headerParams['Authorization'] <- paste("Bearer", self$apiClient$accessToken, sep=" ")
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "POST",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
# void response, no need to return anything
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
UploadFile = function(pet.id, additional.metadata=NULL, file=NULL, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`pet.id`)) {
stop("Missing required parameter `pet.id`.")
}
body <- list(
"additionalMetadata" = additional.metadata,
"file" = httr::upload_file(file)
)
urlPath <- "/pet/{petId}/uploadImage"
if (!missing(`pet.id`)) {
urlPath <- gsub(paste0("\\{", "petId", "\\}"), `pet.id`, urlPath)
}
# OAuth token
headerParams['Authorization'] <- paste("Bearer", self$apiClient$accessToken, sep=" ")
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "POST",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
ModelApiResponse$new()$fromJSONString(httr::content(resp, "text", encoding = "UTF-8"))
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
}
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paper_functions.R
\name{plotFracDrift}
\alias{plotFracDrift}
\title{plotFracDrift}
\usage{
plotFracDrift(summ.frac)
}
\arguments{
\item{summ.frac}{summ frac list to plot, contains $baf and $cn}
}
\description{
plotFracDrift
}
| /man/plotFracDrift.Rd | permissive | quevedor2/CCLid | R | false | true | 304 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paper_functions.R
\name{plotFracDrift}
\alias{plotFracDrift}
\title{plotFracDrift}
\usage{
plotFracDrift(summ.frac)
}
\arguments{
\item{summ.frac}{summ frac list to plot, contains $baf and $cn}
}
\description{
plotFracDrift
}
|
### ---
### Title: "Untitled"
### Create: "Yuansh"
### Date: "5/01/2020"
### Email: yuansh3354@163.com
### output: html_document
### ---
### step0 准备
##### 1. 设置镜像
if(T){
options()$repos
options()$BioC_mirror
options(BioC_mirror="http://mirrors.ustc.edu.cn/bioc/")
options("repos" = c(CRAN="http://mirrors.tuna.tsinghua.edu.cn/CRAN/"))
#setwd('/Volumes/Lexar/ZG/')
#BiocManager::install('randomForestSRC')
#install.packages('包')
}
##### 2. 导入包
if(T){
library(limma)
library(clusterProfiler)
library(GEOquery)
library(org.Hs.eg.db)
library(RColorBrewer)
library(AnnotationDbi)
library(affy)
library(gcrma)
library(stringr)
library(hgu133plus2.db )
library(org.Hs.eg.db)
library(GenomicFeatures)
library(rtracklayer)
library(biomaRt)
library(glmnet)
library(survival)
library(Hmisc)
}
##### 3. 数据下载
file = 'GSE66229'
setwd('/Volumes/Lexar/ZG/胃癌')
df_expr = read.csv(paste(file,'.csv',sep = ''),header = T, row.names = 1)
pd = read.csv('survival.csv',header = T)
pd = na.omit(pd)
geoid = pd[,1]
sit = which(colnames(df_expr) %in% geoid)
GEO_ID = c(colnames(df_expr)[sit],colnames(df_expr)[-sit])
label = c(rep('tumor',300),rep('normal',100))
clinic = cbind(GEO_ID,label)
pd = merge(clinic,pd,all = T)
rownames(pd) = pd$GEO_ID
pd$GEO_ID == colnames(df_expr)
sit = which(pd$class!=3)
pd = pd[-sit,]
df_expr = df_expr[,-sit]
pd$Death = ifelse(pd$Death == 1 ,0,1)
if(T){
group_list = as.character(pd[,2])
design <- model.matrix(~0+factor(group_list))
colnames(design)=levels(factor(group_list))
rownames(design)=colnames(df_expr)
print(paste(colnames(design)[1],colnames(design)[2],sep = '-'))
contrast.matrix<-makeContrasts(paste(colnames(design)[1],colnames(design)[2],sep = '-'),
levels = design)
}
##### 2. 寻找差异基因
deg = function(exprSet,design,contrast.matrix){
##step1
fit <- lmFit(exprSet,design)
##step2
fit2 <- contrasts.fit(fit, contrast.matrix)
##这一步很重要,大家可以自行看看效果
fit2 <- eBayes(fit2) ## default no trend !!!
##eBayes() with trend=TRUE
##step3
tempOutput = topTable(fit2, coef=1, n=Inf)
nrDEG = na.omit(tempOutput)
#write.csv(nrDEG2,"limma_notrend.results.csv",quote = F)
head(nrDEG)
return(nrDEG)
}
deg = deg(df_expr,design,contrast.matrix)
deg = deg[order(deg$adj.P.Val),]
# 输出文件夹中的GSE13911-deg.csv就是差异基因
top = deg
top = top[which(top$adj.P.Val<0.05 & abs(top$logFC)>0.3),]
write.csv(top,paste(file,'-deg.csv',sep = ''))
##### 3. 提取满足条件的差异基因
if(T){
top = deg
top = top[which(top$adj.P.Val<0.05),]
top = top[order(top$adj.P.Val),]
top = top[1:100,]
int_gene = rownames(top[which(top$adj.P.Val<0.05& abs(top$logFC)>0.5),])
id = rownames(int_gene)
write.csv(top,paste(file,'-top-deg.csv',sep = ''))
}
##### 4.聚类
if(T){
library(pheatmap)
n = t(scale(t(df_expr[int_gene,])))
n[n>2] = 2
n[n< -2] = -2
ac = data.frame(g=group_list)
ac$names = colnames(n)
ac = ac[order(ac[,1]),]
rownames(ac) = ac$names
a = as.data.frame(ac[,1])
colnames(a) = 'Type'
rownames(a) = rownames(ac)
pheatmap(n,show_colnames =F,
show_rownames = F,
annotation_col=a,filename = '符合条件的基因聚类.png')
}
### 5.通路富集
##### 其中go是功能富集,kegg是通路富集
if(T){
top = deg
top = top[which(top$adj.P.Val<0.05& abs(top$logFC)>0.3),]
logFC_t=1
top$g= 0
top[which(top$logFC<= 0),]$g = 'DOWN'
top[which(top$logFC>= 0),]$g = 'UP'
top$ENTREZID=rownames(top)
gene_up= top[top$g == 'UP','ENTREZID']
gene_down=top[top$g == 'DOWN','ENTREZID']
gene_diff=c(gene_up,gene_down)
source('kegg_and_go_up_and_down.R')
#run_go(gene_up,gene_down,pro='NORMAL-TUMOR')
}
if(T){
if(T){
kk.up <- enrichKEGG(gene = gene_up,
organism = 'hsa',
universe = gene_diff,
pvalueCutoff = 0.9,
qvalueCutoff =0.9)
head(kk.up)[,1:6]
barplot(kk.up )
ggsave('kk.up.barplot.png')
write.csv(kk.up,paste(file,'-kk_up.csv',sep = ''),row.names = F)
kk.down <- enrichKEGG(gene = gene_down,
organism = 'hsa',
universe = gene_diff,
pvalueCutoff = 0.9,
qvalueCutoff =0.9)
head(kk.down)[,1:6]
barplot(kk.down )
ggsave('kk.down.barplot.png')
write.csv(kk.down,paste(file,'-kk_down.csv',sep = ''),row.names = F)
kk.diff <- enrichKEGG(gene = gene_diff,
organism = 'hsa',
pvalueCutoff = 0.05)
head(kk.diff)[,1:6]
barplot(kk.diff )
ggsave('kk.diff.barplot.png')
kegg_diff_dt <- as.data.frame(kk.diff)
kegg_down_dt <- as.data.frame(kk.down)
kegg_up_dt <- as.data.frame(kk.up)
down_kegg<-kegg_down_dt[kegg_down_dt$pvalue<0.05,];down_kegg$group=-1
up_kegg<-kegg_up_dt[kegg_up_dt$pvalue<0.05,];up_kegg$group=1
g_kegg=kegg_plot(up_kegg,down_kegg)
print(g_kegg)
ggsave(g_kegg,filename = 'kegg_up_down.png')
}#kegg
}
### step2 cox分析
##### 1.构建数据:其中sample是所需表达谱,survival是所需生存信息
if(T){
top = deg
top = top[which(top$adj.P.Val<0.05& abs(top$logFC)>0.3),]
top = top[order(top$adj.P.Val),]
IDs = rownames(top)
gene = paste0('ID_',IDs)
sample = df_expr[IDs,]
survival = pd[,c(5,4)]
survival = na.omit(survival)
sample = sample[,colnames(sample) %in% rownames(survival)]
gene = gsub(gene,pattern = '-', replacement = '_')
rownames(sample) = gene
colnames(survival) = c('time', 'status')
cox_dat = as.data.frame(cbind(survival,t(sample)))
cox_dat[,1] = as.numeric(cox_dat[,1])
cox_dat[,2] = as.numeric(cox_dat[,2])
}
##### 2.构建cox模型 识别和预后相关的基因
if(T){
library("survival")
library("survminer")
library(clusterProfiler)
library(stringr)
cox_analy = function(gene,survival_info){
uni_cox = function(single_gene){
formula = as.formula(paste0('Surv(time,status)~',single_gene))
surv_uni_cox = summary(coxph(formula, data = cox_dat))
ph_hypothesis_p = cox.zph(coxph(formula,data = cox_dat))$table[1:3]
if(surv_uni_cox$coefficients[,5]<0.05 & ph_hypothesis_p > 0.05){
single_cox_report = data.frame(
'ID'=single_gene,
'beta' = surv_uni_cox$coefficients[,1],
'Hazard_Ratio' = exp(surv_uni_cox$coefficients[,1]),
'z_pvalue'=surv_uni_cox$coefficients[,5],
'Wald_pvalue'= as.numeric(surv_uni_cox$waldtest[3]),
'Likelihood_pvalue'=as.numeric(surv_uni_cox$logtest[3]))
single_cox_report
}
}
uni_cox_list = lapply(gene,uni_cox)
do.call(rbind,uni_cox_list)
}
a = gene
uni_cox_df = cox_analy(a,cox_dat)
cox_IDs = str_split(uni_cox_df[,1],'_',simplify = T)
uni_cox_df[,1] = cox_IDs[,2]
cox_IDs = cox_IDs[,2]
}
uni_cox_df$z_pvalue_adjust = p.adjust(uni_cox_df$z_pvalue ,method = "BH")
write.csv(uni_cox_df,paste(file,'-cox-gene.csv',sep = ''),row.names = F)
# 生存分析
if(T){
ID = uni_cox_df[,1]
geneIDselect <-select(org.Hs.eg.db, #.db是这个芯片数据对应的注释包
keys=ID,
columns=c("SYMBOL"), #clolumns参数是你要转换的ID类型是什么,这里选择三个。
keytype="ENTREZID" )#函数里面的keytype与keys参数是对应的,keys是你输入的那些数据,keytype是指这些数据是属于什么类型的数据。
survival_exp = df_expr[ID,]
rownames(survival_exp) = geneIDselect[,2]
survival_table = survival
names = rownames(survival_table)
sit = which(colnames(survival_exp) %in% names)
survival_exp = survival_exp[,sit]
n= 2# 2
rownames(survival_exp[n,])
survival_table$ADHFE1 = ifelse(t(survival_exp[n,]) > mean(as.numeric(survival_exp[n,])),'hig','low')
meta = na.omit(survival_table)
meta$time = as.numeric(meta$time)
sfit1 <- survfit(Surv(time, status)~ADHFE1, data=meta) #primary_ER/CTC_ER
## more complicate figures.
ggsurvplot(sfit1,palette =c("#1EB2A6","#F67575"),
risk.table =TRUE,
pval =TRUE,
alpha=0.75,
conf.int = FALSE,
xlab ="Time in months",
ylab ="Duration of the treatment",
ncensor.plot = F)
}
| /胃癌/生存分析.R | no_license | yuansh3354/project_test | R | false | false | 8,485 | r | ### ---
### Title: "Untitled"
### Create: "Yuansh"
### Date: "5/01/2020"
### Email: yuansh3354@163.com
### output: html_document
### ---
### step0 准备
##### 1. 设置镜像
if(T){
options()$repos
options()$BioC_mirror
options(BioC_mirror="http://mirrors.ustc.edu.cn/bioc/")
options("repos" = c(CRAN="http://mirrors.tuna.tsinghua.edu.cn/CRAN/"))
#setwd('/Volumes/Lexar/ZG/')
#BiocManager::install('randomForestSRC')
#install.packages('包')
}
##### 2. 导入包
if(T){
library(limma)
library(clusterProfiler)
library(GEOquery)
library(org.Hs.eg.db)
library(RColorBrewer)
library(AnnotationDbi)
library(affy)
library(gcrma)
library(stringr)
library(hgu133plus2.db )
library(org.Hs.eg.db)
library(GenomicFeatures)
library(rtracklayer)
library(biomaRt)
library(glmnet)
library(survival)
library(Hmisc)
}
##### 3. 数据下载
file = 'GSE66229'
setwd('/Volumes/Lexar/ZG/胃癌')
df_expr = read.csv(paste(file,'.csv',sep = ''),header = T, row.names = 1)
pd = read.csv('survival.csv',header = T)
pd = na.omit(pd)
geoid = pd[,1]
sit = which(colnames(df_expr) %in% geoid)
GEO_ID = c(colnames(df_expr)[sit],colnames(df_expr)[-sit])
label = c(rep('tumor',300),rep('normal',100))
clinic = cbind(GEO_ID,label)
pd = merge(clinic,pd,all = T)
rownames(pd) = pd$GEO_ID
pd$GEO_ID == colnames(df_expr)
sit = which(pd$class!=3)
pd = pd[-sit,]
df_expr = df_expr[,-sit]
pd$Death = ifelse(pd$Death == 1 ,0,1)
if(T){
group_list = as.character(pd[,2])
design <- model.matrix(~0+factor(group_list))
colnames(design)=levels(factor(group_list))
rownames(design)=colnames(df_expr)
print(paste(colnames(design)[1],colnames(design)[2],sep = '-'))
contrast.matrix<-makeContrasts(paste(colnames(design)[1],colnames(design)[2],sep = '-'),
levels = design)
}
##### 2. 寻找差异基因
deg = function(exprSet,design,contrast.matrix){
##step1
fit <- lmFit(exprSet,design)
##step2
fit2 <- contrasts.fit(fit, contrast.matrix)
##这一步很重要,大家可以自行看看效果
fit2 <- eBayes(fit2) ## default no trend !!!
##eBayes() with trend=TRUE
##step3
tempOutput = topTable(fit2, coef=1, n=Inf)
nrDEG = na.omit(tempOutput)
#write.csv(nrDEG2,"limma_notrend.results.csv",quote = F)
head(nrDEG)
return(nrDEG)
}
deg = deg(df_expr,design,contrast.matrix)
deg = deg[order(deg$adj.P.Val),]
# 输出文件夹中的GSE13911-deg.csv就是差异基因
top = deg
top = top[which(top$adj.P.Val<0.05 & abs(top$logFC)>0.3),]
write.csv(top,paste(file,'-deg.csv',sep = ''))
##### 3. 提取满足条件的差异基因
if(T){
top = deg
top = top[which(top$adj.P.Val<0.05),]
top = top[order(top$adj.P.Val),]
top = top[1:100,]
int_gene = rownames(top[which(top$adj.P.Val<0.05& abs(top$logFC)>0.5),])
id = rownames(int_gene)
write.csv(top,paste(file,'-top-deg.csv',sep = ''))
}
##### 4.聚类
if(T){
library(pheatmap)
n = t(scale(t(df_expr[int_gene,])))
n[n>2] = 2
n[n< -2] = -2
ac = data.frame(g=group_list)
ac$names = colnames(n)
ac = ac[order(ac[,1]),]
rownames(ac) = ac$names
a = as.data.frame(ac[,1])
colnames(a) = 'Type'
rownames(a) = rownames(ac)
pheatmap(n,show_colnames =F,
show_rownames = F,
annotation_col=a,filename = '符合条件的基因聚类.png')
}
### 5.通路富集
##### 其中go是功能富集,kegg是通路富集
if(T){
top = deg
top = top[which(top$adj.P.Val<0.05& abs(top$logFC)>0.3),]
logFC_t=1
top$g= 0
top[which(top$logFC<= 0),]$g = 'DOWN'
top[which(top$logFC>= 0),]$g = 'UP'
top$ENTREZID=rownames(top)
gene_up= top[top$g == 'UP','ENTREZID']
gene_down=top[top$g == 'DOWN','ENTREZID']
gene_diff=c(gene_up,gene_down)
source('kegg_and_go_up_and_down.R')
#run_go(gene_up,gene_down,pro='NORMAL-TUMOR')
}
if(T){
if(T){
kk.up <- enrichKEGG(gene = gene_up,
organism = 'hsa',
universe = gene_diff,
pvalueCutoff = 0.9,
qvalueCutoff =0.9)
head(kk.up)[,1:6]
barplot(kk.up )
ggsave('kk.up.barplot.png')
write.csv(kk.up,paste(file,'-kk_up.csv',sep = ''),row.names = F)
kk.down <- enrichKEGG(gene = gene_down,
organism = 'hsa',
universe = gene_diff,
pvalueCutoff = 0.9,
qvalueCutoff =0.9)
head(kk.down)[,1:6]
barplot(kk.down )
ggsave('kk.down.barplot.png')
write.csv(kk.down,paste(file,'-kk_down.csv',sep = ''),row.names = F)
kk.diff <- enrichKEGG(gene = gene_diff,
organism = 'hsa',
pvalueCutoff = 0.05)
head(kk.diff)[,1:6]
barplot(kk.diff )
ggsave('kk.diff.barplot.png')
kegg_diff_dt <- as.data.frame(kk.diff)
kegg_down_dt <- as.data.frame(kk.down)
kegg_up_dt <- as.data.frame(kk.up)
down_kegg<-kegg_down_dt[kegg_down_dt$pvalue<0.05,];down_kegg$group=-1
up_kegg<-kegg_up_dt[kegg_up_dt$pvalue<0.05,];up_kegg$group=1
g_kegg=kegg_plot(up_kegg,down_kegg)
print(g_kegg)
ggsave(g_kegg,filename = 'kegg_up_down.png')
}#kegg
}
### step2 cox分析
##### 1.构建数据:其中sample是所需表达谱,survival是所需生存信息
if(T){
top = deg
top = top[which(top$adj.P.Val<0.05& abs(top$logFC)>0.3),]
top = top[order(top$adj.P.Val),]
IDs = rownames(top)
gene = paste0('ID_',IDs)
sample = df_expr[IDs,]
survival = pd[,c(5,4)]
survival = na.omit(survival)
sample = sample[,colnames(sample) %in% rownames(survival)]
gene = gsub(gene,pattern = '-', replacement = '_')
rownames(sample) = gene
colnames(survival) = c('time', 'status')
cox_dat = as.data.frame(cbind(survival,t(sample)))
cox_dat[,1] = as.numeric(cox_dat[,1])
cox_dat[,2] = as.numeric(cox_dat[,2])
}
##### 2.构建cox模型 识别和预后相关的基因
if(T){
library("survival")
library("survminer")
library(clusterProfiler)
library(stringr)
cox_analy = function(gene,survival_info){
uni_cox = function(single_gene){
formula = as.formula(paste0('Surv(time,status)~',single_gene))
surv_uni_cox = summary(coxph(formula, data = cox_dat))
ph_hypothesis_p = cox.zph(coxph(formula,data = cox_dat))$table[1:3]
if(surv_uni_cox$coefficients[,5]<0.05 & ph_hypothesis_p > 0.05){
single_cox_report = data.frame(
'ID'=single_gene,
'beta' = surv_uni_cox$coefficients[,1],
'Hazard_Ratio' = exp(surv_uni_cox$coefficients[,1]),
'z_pvalue'=surv_uni_cox$coefficients[,5],
'Wald_pvalue'= as.numeric(surv_uni_cox$waldtest[3]),
'Likelihood_pvalue'=as.numeric(surv_uni_cox$logtest[3]))
single_cox_report
}
}
uni_cox_list = lapply(gene,uni_cox)
do.call(rbind,uni_cox_list)
}
a = gene
uni_cox_df = cox_analy(a,cox_dat)
cox_IDs = str_split(uni_cox_df[,1],'_',simplify = T)
uni_cox_df[,1] = cox_IDs[,2]
cox_IDs = cox_IDs[,2]
}
uni_cox_df$z_pvalue_adjust = p.adjust(uni_cox_df$z_pvalue ,method = "BH")
write.csv(uni_cox_df,paste(file,'-cox-gene.csv',sep = ''),row.names = F)
# 生存分析
if(T){
ID = uni_cox_df[,1]
geneIDselect <-select(org.Hs.eg.db, #.db是这个芯片数据对应的注释包
keys=ID,
columns=c("SYMBOL"), #clolumns参数是你要转换的ID类型是什么,这里选择三个。
keytype="ENTREZID" )#函数里面的keytype与keys参数是对应的,keys是你输入的那些数据,keytype是指这些数据是属于什么类型的数据。
survival_exp = df_expr[ID,]
rownames(survival_exp) = geneIDselect[,2]
survival_table = survival
names = rownames(survival_table)
sit = which(colnames(survival_exp) %in% names)
survival_exp = survival_exp[,sit]
n= 2# 2
rownames(survival_exp[n,])
survival_table$ADHFE1 = ifelse(t(survival_exp[n,]) > mean(as.numeric(survival_exp[n,])),'hig','low')
meta = na.omit(survival_table)
meta$time = as.numeric(meta$time)
sfit1 <- survfit(Surv(time, status)~ADHFE1, data=meta) #primary_ER/CTC_ER
## more complicate figures.
ggsurvplot(sfit1,palette =c("#1EB2A6","#F67575"),
risk.table =TRUE,
pval =TRUE,
alpha=0.75,
conf.int = FALSE,
xlab ="Time in months",
ylab ="Duration of the treatment",
ncensor.plot = F)
}
|
\name{income}
\alias{income}
\docType{data}
\title{income mean in 1000 $ on subsamples of age, gender and marital status groups}
\description{
}
\usage{data(income)}
\format{
A data frame with 8 observations and 6 variables.
\describe{
\item{agegroup}{Age group}
\item{maritalstatus}{Marital Status}
\item{gender}{Gender}
\item{samplesize}{Sample size in age group}
\item{nresp}{Number of respondents in age group}
\item{income}{Average income in group}
}
}
\source{Statistical Analysis with Missing data, Little and Rubin, 2nd Ed. p.~57}
\examples{
data(cholesterol)
}
\keyword{datasets}
| /man/income.Rd | no_license | DanielBonnery/dataIncome | R | false | false | 623 | rd | \name{income}
\alias{income}
\docType{data}
\title{income mean in 1000 $ on subsamples of age, gender and marital status groups}
\description{
}
\usage{data(income)}
\format{
A data frame with 8 observations and 6 variables.
\describe{
\item{agegroup}{Age group}
\item{maritalstatus}{Marital Status}
\item{gender}{Gender}
\item{samplesize}{Sample size in age group}
\item{nresp}{Number of respondents in age group}
\item{income}{Average income in group}
}
}
\source{Statistical Analysis with Missing data, Little and Rubin, 2nd Ed. p.~57}
\examples{
data(cholesterol)
}
\keyword{datasets}
|
library(dominanceanalysis)
### Name: bootDominanceAnalysis
### Title: Bootstrap analysis for Dominance Analysis
### Aliases: bootDominanceAnalysis
### ** Examples
## No test:
lm.1<-lm(Employed~.,longley)
da.boot<-bootDominanceAnalysis(lm.1,R=1000)
summary(da.boot)
## End(No test)
| /data/genthat_extracted_code/dominanceanalysis/examples/bootDominanceAnalysis.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 289 | r | library(dominanceanalysis)
### Name: bootDominanceAnalysis
### Title: Bootstrap analysis for Dominance Analysis
### Aliases: bootDominanceAnalysis
### ** Examples
## No test:
lm.1<-lm(Employed~.,longley)
da.boot<-bootDominanceAnalysis(lm.1,R=1000)
summary(da.boot)
## End(No test)
|
library(jsonlite)
#library(data.table)
library(RCurl)
source('hasloA.R', encoding = 'UTF-8')
read_counterids<-function(filename="pliki/counterids.json") {
ids<-read_json(filename, simplifyVector = TRUE)
ids
}
zaladuj_dane_api<-function(ids=ids, od="2018-01-01", do=Sys.Date()) {
link <- paste('http://greenelephant.pl/rowery/api/v1/?start=',od,'&end=',do)
txt<- getURL(link, userpwd = credentials)
tabela<-data.table(read.csv(text=txt, sep=',', header=FALSE))
setnames(tabela, c("Licznik", "Data", "Liczba_rowerow"))
#tabela<-tabela[Licznik!=100042112 & Licznik!=100042111] #powsinska
tabela[,Data:=as.Date(Data)]
tabela[,Miejsce:=as.character(Licznik)]
tabela[,Miejsce:=unlist(ids[Miejsce])]
tabela_wide<-dcast(tabela, Data ~Miejsce, value.var="Liczba_rowerow")
tabela_wide
} | /rowery/read_from_api.R | permissive | plazmonik/shiny-server | R | false | false | 804 | r | library(jsonlite)
#library(data.table)
library(RCurl)
source('hasloA.R', encoding = 'UTF-8')
read_counterids<-function(filename="pliki/counterids.json") {
ids<-read_json(filename, simplifyVector = TRUE)
ids
}
zaladuj_dane_api<-function(ids=ids, od="2018-01-01", do=Sys.Date()) {
link <- paste('http://greenelephant.pl/rowery/api/v1/?start=',od,'&end=',do)
txt<- getURL(link, userpwd = credentials)
tabela<-data.table(read.csv(text=txt, sep=',', header=FALSE))
setnames(tabela, c("Licznik", "Data", "Liczba_rowerow"))
#tabela<-tabela[Licznik!=100042112 & Licznik!=100042111] #powsinska
tabela[,Data:=as.Date(Data)]
tabela[,Miejsce:=as.character(Licznik)]
tabela[,Miejsce:=unlist(ids[Miejsce])]
tabela_wide<-dcast(tabela, Data ~Miejsce, value.var="Liczba_rowerow")
tabela_wide
} |
#
# Copyright 2007-2020 by the individuals mentioned in the source code history
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
mxOption <- function(model=NULL, key=NULL, value, reset = FALSE) {
if (!reset && (length(key) != 1 || !is.character(key))) {
stop("argument 'key' must be a character string")
}
if (!missing(model) && !is.null(model) && !is(model, "MxModel")) {
stop(paste("The first argument to mxOption must",
"be an MxModel, not", omxQuotes(class(model))))
}
if (is.null(model) && reset) {
return(invisible(mxSetDefaultOptions()))
}
optionsNames <- names(getOption('mxOptions'))
match <- grep(paste("^", key, "$", sep = ""), optionsNames,
ignore.case=TRUE)
if(length(match) == 0) {
stop(paste("argument 'key' is the character string",
omxQuotes(key), "and cannot be found in",
"getOption('mxOptions')"))
}
key <- optionsNames[[match]] # repair capitalization
if (missing(value)) {
if (length(model) && !is.null(model@options[[key]])) {
return(model@options[[key]])
}
return(processDefaultOptionList(key, value))
}
if (length(value) > 1 && key!="No Sort Data" && key != "Status OK") {
msg <- paste("argument 'value' must be either NULL or of length 1.",
"You gave me an object of length", length(value))
stop(msg)
}
if (length(reset) != 1 || !is.logical(reset)) {
stop("argument 'reset' must be TRUE or FALSE")
}
if (key == "Major iterations" && typeof(value) == "closure") {
args <- formals(value)
if (length(args) != 2) {
msg <- paste("The function provided to the option 'Major iterations'",
"must have exactly 2 arguments but you have provided",
"a function with", length(args), "arguments.")
stop(msg)
}
if (!single.na(match("...", names(args)))) {
msg <- paste("You have provided a function to the option 'Major iterations'",
"that uses the '...' argument.")
stop(msg)
}
}
if (is.null(model)) {
return(processDefaultOptionList(key, value))
}
if (length(model) > 1 || !is(model, "MxModel")) {
stop("argument 'model' must be an MxModel object")
}
if (reset) {
model@options <- list()
return(model)
}
if (key == "Default optimizer" || key == "Gradient algorithm" ||
key == "Gradient iterations" || key == "Gradient step size") {
stop(paste(omxQuotes(key), " is a global option and cannot be set on models.\n",
"To change ", omxQuotes(key) ," globally, use, e.g.:\n",
"mxOption(NULL, '", key, "', '", value,"')", sep = ""))
# to use NLOPT, use: mxOption(NULL, 'Default optimizer', 'NLOPT')
}
if (key == "Status OK") value <- as.statusCode(value)
model@options[[key]] <- value
return(model)
}
processDefaultOptionList <- function(key, value) {
defaultOptions <- getOption('mxOptions')
optionsNames <- names(defaultOptions)
match <- grep(paste("^", key, "$", sep = ""), optionsNames,
ignore.case=TRUE)
if(length(match) == 0) {
stop(paste("argument 'key' has a value",
omxQuotes(key), "that cannot be found in",
"getOption('mxOptions')"))
}
key <- optionsNames[[match]] # repair capitalization
if (missing(value)) return(defaultOptions[[key]])
defaultOptions[[key]] <- value
options('mxOptions' = defaultOptions)
return(invisible(defaultOptions))
}
##' imxDetermineDefaultOptimizer
##'
##' This is an internal function exported for those people who know
##' what they are doing.
##'
##' @details Returns a character, the default optimizer
imxDetermineDefaultOptimizer <- function() {
engine <- Sys.getenv("IMX_OPT_ENGINE")
if (!nchar(engine)) {
if (imxHasNPSOL()) {
engine <- "SLSQP"
} else {
engine <- "SLSQP"
}
}
engine
}
# Names and values must all be strings
npsolOptions <- list(
"Nolist" = "",
"Print level" = "0",
"Minor print level" = "0",
"Print file" = "0",
"Summary file" = "0",
"Function precision" = "Auto",#"1e-14"
"Optimality tolerance" = "6.3e-12",
"Infinite bound size" = "1.0e+15",
"Feasibility tolerance" = "5e-2",
"Major iterations" = function(nParams, nConstraints) { max(1000, 3 * nParams + 10 * nConstraints) },
"Verify level" = "-1",
"Line search tolerance" = "0.3",
"Derivative level" = "0",
"Step limit" = "2.0",
"Hessian" = "Yes",
# below are not npsol options
"Calculate Hessian" = "Yes",
"Standard Errors" = "Yes",
"Analytic Gradients" = "Yes"
)
checkpointOptions <- list(
"Checkpoint Directory" = ".",
"Checkpoint Prefix" = "",
"Checkpoint Units" = "iterations",
"Checkpoint Count" = 1,
"Checkpoint Fullpath" = "",
"Socket Server" = "",
"Socket Port" = 8080,
"Socket Units" = "minutes",
"Socket Count" = c("minutes" = 0.08, "iterations" = 1)
)
otherOptions <- list(
"Always Checkpoint" = "No",
"Error Checking" = "Yes",
"No Sort Data" = character(),
"RAM Inverse Optimization" = "Yes",
"RAM Max Depth" = NA,
"UsePPML" = "No",
"Allow Unlabeled" = FALSE,
"loglikelihoodScale" = -2.0,
"maxOrdinalPerBlock" = 20,
"mvnMaxPointsA" = 0,
"mvnMaxPointsB" = 0,
"mvnMaxPointsC" = 0,
"mvnMaxPointsD" = 3.606464,
"mvnMaxPointsE" = -0.126859,
"mvnAbsEps" = 0,
"mvnRelEps" = .005,
"maxStackDepth" = 25000L, # R_PPSSIZE/2
"Gradient algorithm" = NULL,
"Gradient iterations" = "Auto",#1L,
"Gradient step size" = "Auto",#1.0e-7,
"Parallel diagnostics" = "No",
"Debug protect stack" = "No",
"Nudge zero starts" = "Yes",
"Status OK"= as.statusCode(c("OK", "OK/green")),
"Max minutes"=0
)
limitMajorIterations <- function(options, numParam, numConstraints) {
mIters <- options[["Major iterations"]]
if (typeof(mIters) == "closure") {
mIters <- do.call(mIters, list(numParam, numConstraints))
}
options[["Major iterations"]] <- as.character(mIters)
options
}
imxGetNumThreads <- function() {
if (imxSfClient()) {
return(1L)
} else {
thrlimit <- as.integer(Sys.getenv("OMP_NUM_THREADS"))
if (!is.na(thrlimit)) {
return(thrlimit)
} else {
detect <- omxDetectCores()
if(is.na(detect)) detect <- 1L
# Due to demand by CRAN maintainers, we default to 2 cores
# when OMP_NUM_THREADS is not set. This seems like a bad
# policy to the OpenMx team, but we have no choice.
else detect <- 2L
return(detect)
}
}
}
generateOptionsList <- function(model, useOptimizer) {
input <- list()
if (!is.null(model)) {
input <- model@options
if( !is.null(input[["UsePPML"]])
&& (input[["UsePPML"]] == "PartialSolved" || input[["UsePPML"]] == "Split") ) {
input[["Calculate Hessian"]] <- "No"
input[["Hessian"]] <- "No"
input[["Standard Errors"]] <- "No"
}
}
options <- combineDefaultOptions(input)
if (useOptimizer) {
options[["useOptimizer"]] <- "Yes"
#PPML Analytical solution
if (!is.null(model@options$UsePPML) && model@options$UsePPML == "Solved")
options[["useOptimizer"]] <- "No"
} else {
options[["useOptimizer"]] <- "No"
}
if (identical(options[["Standard Errors"]], "Yes") &&
identical(options[["Calculate Hessian"]], "No")) {
msg <- paste('The "Standard Errors" option is enabled and',
'the "Calculate Hessian" option is disabled. This may',
'result in poor accuracy standard errors.')
warning(msg)
}
return(options)
}
# Convert the keys and values into strings
combineDefaultOptions <- function(input) {
options <- getOption('mxOptions')
temp <- input[names(input) %in% names(npsolOptions)]
temp[["Major iterations"]] <- NULL
if (length(temp) > 0) {
keys <- sapply(names(temp), as.character)
values <- sapply(temp, as.character)
ynOptions <- options[keys]=='Yes' | options[keys]=='No'
badYN <- values[ynOptions] != 'Yes' & values[ynOptions] != 'No'
if (any(badYN)) {
stop(paste("mxOption '", names(badYN),
"' must be either 'Yes' or 'No'\n", sep=''))
}
options[keys] <- values
}
if (!is.null(input[["Major iterations"]])) {
options[["Major iterations"]] <- input[["Major iterations"]]
}
#Need to make sure that non-default values for options not already handled in this function don't get
#overwritten by the defaults:
namesHandled <- c( names(temp), "Major iterations" )
if(sum( !(names(input) %in% namesHandled) )>0){
options[names(input)[!(names(input) %in% namesHandled)]] <-
input[names(input)[!(names(input) %in% namesHandled)]]
}
return(options)
}
##' imxAutoOptionValue
##'
##' Convert "Auto" placeholders in global mxOptions to actual default values.
##'
##' This is an internal function exported for documentation purposes.
##' Its primary purpose is to convert the on-load value of "Auto"to
##' valid values for \link{mxOption}s \sQuote{Gradient step size},
##' \sQuote{Gradient iterations}, and
##' \sQuote{Function precision}--respectively, 1.0e-7, 1L, and 1e-14.
##'
##' @param optionName Character string naming the \link{mxOption} for which a numeric or integer value is wanted.
##' @param optionList List of options; defaults to list of global \link{mxOption}s.
##' imxAutoOptionValue
imxAutoOptionValue <- function(optionName, optionList=options()$mxOption){
#First, check to see if the option already has a valid value (possibly in string form), and if so, return that:
numcast <- try(suppressWarnings(as.numeric(optionList[[optionName]])),silent=TRUE)
if(!length(numcast)){
#NULL numcast is most likely to result from either (1) misspelled optionName,
#or (2) user providing non-default value for optionList that lacks an element named optionName.
#Throwing an error seems the best behavior in this case.
stop(paste("extracting element '",optionName,"' from argument 'optionList' resulted in NULL"),sep="")
}
#numcast will be try-error for e.g. NPSOL option "Major iterations" (on-load default is a function);
if("try-error" %in% class(numcast)){return(optionList[[optionName]])}
if(length(numcast) && !is.na(numcast)){
if(optionName=="Gradient iterations"){numcast <- as.integer(numcast)}
return(numcast)
}
#Otherwise, if the current value is a string and can be matched to "Auto",
#convert to default numerical value for the three motivating cases:
else{
if(length(grep(pattern="Auto",x=optionList[[optionName]],ignore.case=T))){
if(optionName=="Gradient step size"){return(1.0e-7)}
if(optionName=="Gradient iterations"){return(1L)}
if(optionName=="Function precision"){return(1e-14)}
}
else{stop(paste("found unrecognized character string '",optionList[[optionName]],"' as value for mxOption '",optionName,"' in argument 'optionList'",sep=""))}
}
}
| /R/MxOptions.R | no_license | mirkoruks/OpenMx | R | false | false | 10,906 | r | #
# Copyright 2007-2020 by the individuals mentioned in the source code history
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
mxOption <- function(model=NULL, key=NULL, value, reset = FALSE) {
if (!reset && (length(key) != 1 || !is.character(key))) {
stop("argument 'key' must be a character string")
}
if (!missing(model) && !is.null(model) && !is(model, "MxModel")) {
stop(paste("The first argument to mxOption must",
"be an MxModel, not", omxQuotes(class(model))))
}
if (is.null(model) && reset) {
return(invisible(mxSetDefaultOptions()))
}
optionsNames <- names(getOption('mxOptions'))
match <- grep(paste("^", key, "$", sep = ""), optionsNames,
ignore.case=TRUE)
if(length(match) == 0) {
stop(paste("argument 'key' is the character string",
omxQuotes(key), "and cannot be found in",
"getOption('mxOptions')"))
}
key <- optionsNames[[match]] # repair capitalization
if (missing(value)) {
if (length(model) && !is.null(model@options[[key]])) {
return(model@options[[key]])
}
return(processDefaultOptionList(key, value))
}
if (length(value) > 1 && key!="No Sort Data" && key != "Status OK") {
msg <- paste("argument 'value' must be either NULL or of length 1.",
"You gave me an object of length", length(value))
stop(msg)
}
if (length(reset) != 1 || !is.logical(reset)) {
stop("argument 'reset' must be TRUE or FALSE")
}
if (key == "Major iterations" && typeof(value) == "closure") {
args <- formals(value)
if (length(args) != 2) {
msg <- paste("The function provided to the option 'Major iterations'",
"must have exactly 2 arguments but you have provided",
"a function with", length(args), "arguments.")
stop(msg)
}
if (!single.na(match("...", names(args)))) {
msg <- paste("You have provided a function to the option 'Major iterations'",
"that uses the '...' argument.")
stop(msg)
}
}
if (is.null(model)) {
return(processDefaultOptionList(key, value))
}
if (length(model) > 1 || !is(model, "MxModel")) {
stop("argument 'model' must be an MxModel object")
}
if (reset) {
model@options <- list()
return(model)
}
if (key == "Default optimizer" || key == "Gradient algorithm" ||
key == "Gradient iterations" || key == "Gradient step size") {
stop(paste(omxQuotes(key), " is a global option and cannot be set on models.\n",
"To change ", omxQuotes(key) ," globally, use, e.g.:\n",
"mxOption(NULL, '", key, "', '", value,"')", sep = ""))
# to use NLOPT, use: mxOption(NULL, 'Default optimizer', 'NLOPT')
}
if (key == "Status OK") value <- as.statusCode(value)
model@options[[key]] <- value
return(model)
}
processDefaultOptionList <- function(key, value) {
defaultOptions <- getOption('mxOptions')
optionsNames <- names(defaultOptions)
match <- grep(paste("^", key, "$", sep = ""), optionsNames,
ignore.case=TRUE)
if(length(match) == 0) {
stop(paste("argument 'key' has a value",
omxQuotes(key), "that cannot be found in",
"getOption('mxOptions')"))
}
key <- optionsNames[[match]] # repair capitalization
if (missing(value)) return(defaultOptions[[key]])
defaultOptions[[key]] <- value
options('mxOptions' = defaultOptions)
return(invisible(defaultOptions))
}
##' imxDetermineDefaultOptimizer
##'
##' This is an internal function exported for those people who know
##' what they are doing.
##'
##' @details Returns a character, the default optimizer
imxDetermineDefaultOptimizer <- function() {
engine <- Sys.getenv("IMX_OPT_ENGINE")
if (!nchar(engine)) {
if (imxHasNPSOL()) {
engine <- "SLSQP"
} else {
engine <- "SLSQP"
}
}
engine
}
# Names and values must all be strings
npsolOptions <- list(
"Nolist" = "",
"Print level" = "0",
"Minor print level" = "0",
"Print file" = "0",
"Summary file" = "0",
"Function precision" = "Auto",#"1e-14"
"Optimality tolerance" = "6.3e-12",
"Infinite bound size" = "1.0e+15",
"Feasibility tolerance" = "5e-2",
"Major iterations" = function(nParams, nConstraints) { max(1000, 3 * nParams + 10 * nConstraints) },
"Verify level" = "-1",
"Line search tolerance" = "0.3",
"Derivative level" = "0",
"Step limit" = "2.0",
"Hessian" = "Yes",
# below are not npsol options
"Calculate Hessian" = "Yes",
"Standard Errors" = "Yes",
"Analytic Gradients" = "Yes"
)
checkpointOptions <- list(
"Checkpoint Directory" = ".",
"Checkpoint Prefix" = "",
"Checkpoint Units" = "iterations",
"Checkpoint Count" = 1,
"Checkpoint Fullpath" = "",
"Socket Server" = "",
"Socket Port" = 8080,
"Socket Units" = "minutes",
"Socket Count" = c("minutes" = 0.08, "iterations" = 1)
)
otherOptions <- list(
"Always Checkpoint" = "No",
"Error Checking" = "Yes",
"No Sort Data" = character(),
"RAM Inverse Optimization" = "Yes",
"RAM Max Depth" = NA,
"UsePPML" = "No",
"Allow Unlabeled" = FALSE,
"loglikelihoodScale" = -2.0,
"maxOrdinalPerBlock" = 20,
"mvnMaxPointsA" = 0,
"mvnMaxPointsB" = 0,
"mvnMaxPointsC" = 0,
"mvnMaxPointsD" = 3.606464,
"mvnMaxPointsE" = -0.126859,
"mvnAbsEps" = 0,
"mvnRelEps" = .005,
"maxStackDepth" = 25000L, # R_PPSSIZE/2
"Gradient algorithm" = NULL,
"Gradient iterations" = "Auto",#1L,
"Gradient step size" = "Auto",#1.0e-7,
"Parallel diagnostics" = "No",
"Debug protect stack" = "No",
"Nudge zero starts" = "Yes",
"Status OK"= as.statusCode(c("OK", "OK/green")),
"Max minutes"=0
)
limitMajorIterations <- function(options, numParam, numConstraints) {
mIters <- options[["Major iterations"]]
if (typeof(mIters) == "closure") {
mIters <- do.call(mIters, list(numParam, numConstraints))
}
options[["Major iterations"]] <- as.character(mIters)
options
}
imxGetNumThreads <- function() {
if (imxSfClient()) {
return(1L)
} else {
thrlimit <- as.integer(Sys.getenv("OMP_NUM_THREADS"))
if (!is.na(thrlimit)) {
return(thrlimit)
} else {
detect <- omxDetectCores()
if(is.na(detect)) detect <- 1L
# Due to demand by CRAN maintainers, we default to 2 cores
# when OMP_NUM_THREADS is not set. This seems like a bad
# policy to the OpenMx team, but we have no choice.
else detect <- 2L
return(detect)
}
}
}
generateOptionsList <- function(model, useOptimizer) {
input <- list()
if (!is.null(model)) {
input <- model@options
if( !is.null(input[["UsePPML"]])
&& (input[["UsePPML"]] == "PartialSolved" || input[["UsePPML"]] == "Split") ) {
input[["Calculate Hessian"]] <- "No"
input[["Hessian"]] <- "No"
input[["Standard Errors"]] <- "No"
}
}
options <- combineDefaultOptions(input)
if (useOptimizer) {
options[["useOptimizer"]] <- "Yes"
#PPML Analytical solution
if (!is.null(model@options$UsePPML) && model@options$UsePPML == "Solved")
options[["useOptimizer"]] <- "No"
} else {
options[["useOptimizer"]] <- "No"
}
if (identical(options[["Standard Errors"]], "Yes") &&
identical(options[["Calculate Hessian"]], "No")) {
msg <- paste('The "Standard Errors" option is enabled and',
'the "Calculate Hessian" option is disabled. This may',
'result in poor accuracy standard errors.')
warning(msg)
}
return(options)
}
# Convert the keys and values into strings
combineDefaultOptions <- function(input) {
options <- getOption('mxOptions')
temp <- input[names(input) %in% names(npsolOptions)]
temp[["Major iterations"]] <- NULL
if (length(temp) > 0) {
keys <- sapply(names(temp), as.character)
values <- sapply(temp, as.character)
ynOptions <- options[keys]=='Yes' | options[keys]=='No'
badYN <- values[ynOptions] != 'Yes' & values[ynOptions] != 'No'
if (any(badYN)) {
stop(paste("mxOption '", names(badYN),
"' must be either 'Yes' or 'No'\n", sep=''))
}
options[keys] <- values
}
if (!is.null(input[["Major iterations"]])) {
options[["Major iterations"]] <- input[["Major iterations"]]
}
#Need to make sure that non-default values for options not already handled in this function don't get
#overwritten by the defaults:
namesHandled <- c( names(temp), "Major iterations" )
if(sum( !(names(input) %in% namesHandled) )>0){
options[names(input)[!(names(input) %in% namesHandled)]] <-
input[names(input)[!(names(input) %in% namesHandled)]]
}
return(options)
}
##' imxAutoOptionValue
##'
##' Convert "Auto" placeholders in global mxOptions to actual default values.
##'
##' This is an internal function exported for documentation purposes.
##' Its primary purpose is to convert the on-load value of "Auto"to
##' valid values for \link{mxOption}s \sQuote{Gradient step size},
##' \sQuote{Gradient iterations}, and
##' \sQuote{Function precision}--respectively, 1.0e-7, 1L, and 1e-14.
##'
##' @param optionName Character string naming the \link{mxOption} for which a numeric or integer value is wanted.
##' @param optionList List of options; defaults to list of global \link{mxOption}s.
##' imxAutoOptionValue
imxAutoOptionValue <- function(optionName, optionList=options()$mxOption){
#First, check to see if the option already has a valid value (possibly in string form), and if so, return that:
numcast <- try(suppressWarnings(as.numeric(optionList[[optionName]])),silent=TRUE)
if(!length(numcast)){
#NULL numcast is most likely to result from either (1) misspelled optionName,
#or (2) user providing non-default value for optionList that lacks an element named optionName.
#Throwing an error seems the best behavior in this case.
stop(paste("extracting element '",optionName,"' from argument 'optionList' resulted in NULL"),sep="")
}
#numcast will be try-error for e.g. NPSOL option "Major iterations" (on-load default is a function);
if("try-error" %in% class(numcast)){return(optionList[[optionName]])}
if(length(numcast) && !is.na(numcast)){
if(optionName=="Gradient iterations"){numcast <- as.integer(numcast)}
return(numcast)
}
#Otherwise, if the current value is a string and can be matched to "Auto",
#convert to default numerical value for the three motivating cases:
else{
if(length(grep(pattern="Auto",x=optionList[[optionName]],ignore.case=T))){
if(optionName=="Gradient step size"){return(1.0e-7)}
if(optionName=="Gradient iterations"){return(1L)}
if(optionName=="Function precision"){return(1e-14)}
}
else{stop(paste("found unrecognized character string '",optionList[[optionName]],"' as value for mxOption '",optionName,"' in argument 'optionList'",sep=""))}
}
}
|
# blast_result <- pcc7120_groel_nuccore[1,]
# xstring <- readDNAStringSet(filepath = "/home/mirkko/Documents/NCBI/ncbi_nuccore/fasta/17227497.fna", format = "fasta")
# path <- "/tmp/seqs.fna"
# nuc_up = 20000
# nuc_down = 300
ex_1 <- tibble(qseqid = c("a", "b", "c"),
sstart = c(4, 8, 25),
send = c(8, 4, 25))
ex_2 <- tibble(qseqid = c("a"),
sstart = c(12),
send = c(11))
bliblablupp <- function(blast_result) {
if(dim(blast_result)[1] > 1) {
blast_result <- blast_result %>%
mutate(., row = c(1:dim(blast_result)[1]))
1:3 %>% map_dfr(blast_result, define_strand)
} else {
hit_position <- blast_result %>% define_strand(.)
}
hit_position
}
bliblablupp(ex_1)
define_strand <- function(blast_result) {
if(blast_result$sstart == blast_result$send) return(
"Invalid blast hit: sstart equals send"
)
if(blast_result$sstart > blast_result$send) {
hit_position <- blast_result %>%
mutate(., sstart2 = send, send2 = sstart,
sstart = sstart2, send = send2,
strand = '-') %>%
select(-c(sstart2, send2))
} else {
hit_position <- blast_result %>%
mutate(strand = '+')
}
hit_position
}
define_strand(ex_2)
set_subseq_limits <- function(hit_position) {
# do we need to define add and sub differently for +/- strand situation ???
extract <- hit_position %>% mutate(sstart_extr = case_when(sstart >= sub_5 ~ sstart - sub_5,
sstart < sub_5 ~ 1),
send_extr = case_when(width(contig) >= send + add_3 ~ send + add_3,
width(contig) < send ~ width(contig) %>% as.double()) )
}
extract_subseq <- function(xstring, start, end, rev_comp = F) {
# do input checking
subsequence <- xstring %>% subseq(., start = start, end = end)
if (rev_comp == T) {
subsequence <- subsequence %>% reverseComplement(.)
subsequence
} else {
subsequence
}
}
fooo <- function(blast_result, xstring, nuc_buffer, path) {
if (identical(blast_result$sseqid, names(xstring)) == TRUE) {
} else {
contig_names <- names(xstring) %>% map_dfr(., extract_id_from_xstring)
names(xstring) <- contig_names
}
# todo: useful error mesagges are required: like in the case that sseqid has not been found in names(xstring)
hit_position <- blast_result %>% select(sseqid, sstart, send)
contig <- xstring[xstring@ranges@NAMES == hit_position$sseqid]
nuc_down <- nuc_buffer[1]
nuc_up <- nuc_buffer[2]
# todo: include option for space +/- in function params
# test positioning of nuc_buffer, evtl nuc_down as negative value ?
# test what to do if buffer not given?!
if (hit_position$sstart > hit_position$send) {
hit_position <- hit_position %>% mutate(., sstart2 = send, send2 = sstart, sstart = sstart2, send = send2)
extract <- hit_position %>% mutate(sstart_extr = case_when(sstart >= nuc_down ~ sstart - nuc_down,
sstart < nuc_down ~ 1),
send_extr = case_when(width(contig) >= send + nuc_up ~ send + nuc_up,
width(contig) < send ~ width(contig) %>% as.double()) )
fasta <- contig %>% subseq(., start = extract$sstart_extr, end = extract$send_extr) %>% reverseComplement(.)
} else {
extract <- hit_position %>% mutate(sstart_extr = case_when(sstart >= nuc_down ~ sstart - nuc_down,
sstart < nuc_down ~ 1),
send_extr = case_when(width(contig) >= send + nuc_up ~ send + nuc_up,
width(contig) < send ~ width(contig) %>% as.double()) )
fasta <- contig %>% subseq(., start = extract$sstart_extr, end = extract$send_extr)
}
writeXStringSet(fasta, filepath = path, append = T)
}
fooo(blast_result = pcc7120_groel_nuccore[2,],
xstring = readDNAStringSet(filepath = "/home/mirkko/Documents/NCBI/ncbi_nuccore/fasta/17227497.fna", format = "fasta"),
path = "/tmp/seqs.fna",
nuc_buffer = c(345, 200))
fasta <- readAAStringSet(filepath = "/tmp/seqs.fna", format = "fasta")
| /man/not_working.R | no_license | mirkko-hub/rentrezaddon | R | false | false | 4,375 | r |
# blast_result <- pcc7120_groel_nuccore[1,]
# xstring <- readDNAStringSet(filepath = "/home/mirkko/Documents/NCBI/ncbi_nuccore/fasta/17227497.fna", format = "fasta")
# path <- "/tmp/seqs.fna"
# nuc_up = 20000
# nuc_down = 300
ex_1 <- tibble(qseqid = c("a", "b", "c"),
sstart = c(4, 8, 25),
send = c(8, 4, 25))
ex_2 <- tibble(qseqid = c("a"),
sstart = c(12),
send = c(11))
bliblablupp <- function(blast_result) {
if(dim(blast_result)[1] > 1) {
blast_result <- blast_result %>%
mutate(., row = c(1:dim(blast_result)[1]))
1:3 %>% map_dfr(blast_result, define_strand)
} else {
hit_position <- blast_result %>% define_strand(.)
}
hit_position
}
bliblablupp(ex_1)
define_strand <- function(blast_result) {
if(blast_result$sstart == blast_result$send) return(
"Invalid blast hit: sstart equals send"
)
if(blast_result$sstart > blast_result$send) {
hit_position <- blast_result %>%
mutate(., sstart2 = send, send2 = sstart,
sstart = sstart2, send = send2,
strand = '-') %>%
select(-c(sstart2, send2))
} else {
hit_position <- blast_result %>%
mutate(strand = '+')
}
hit_position
}
define_strand(ex_2)
set_subseq_limits <- function(hit_position) {
# do we need to define add and sub differently for +/- strand situation ???
extract <- hit_position %>% mutate(sstart_extr = case_when(sstart >= sub_5 ~ sstart - sub_5,
sstart < sub_5 ~ 1),
send_extr = case_when(width(contig) >= send + add_3 ~ send + add_3,
width(contig) < send ~ width(contig) %>% as.double()) )
}
extract_subseq <- function(xstring, start, end, rev_comp = F) {
# do input checking
subsequence <- xstring %>% subseq(., start = start, end = end)
if (rev_comp == T) {
subsequence <- subsequence %>% reverseComplement(.)
subsequence
} else {
subsequence
}
}
fooo <- function(blast_result, xstring, nuc_buffer, path) {
if (identical(blast_result$sseqid, names(xstring)) == TRUE) {
} else {
contig_names <- names(xstring) %>% map_dfr(., extract_id_from_xstring)
names(xstring) <- contig_names
}
# todo: useful error mesagges are required: like in the case that sseqid has not been found in names(xstring)
hit_position <- blast_result %>% select(sseqid, sstart, send)
contig <- xstring[xstring@ranges@NAMES == hit_position$sseqid]
nuc_down <- nuc_buffer[1]
nuc_up <- nuc_buffer[2]
# todo: include option for space +/- in function params
# test positioning of nuc_buffer, evtl nuc_down as negative value ?
# test what to do if buffer not given?!
if (hit_position$sstart > hit_position$send) {
hit_position <- hit_position %>% mutate(., sstart2 = send, send2 = sstart, sstart = sstart2, send = send2)
extract <- hit_position %>% mutate(sstart_extr = case_when(sstart >= nuc_down ~ sstart - nuc_down,
sstart < nuc_down ~ 1),
send_extr = case_when(width(contig) >= send + nuc_up ~ send + nuc_up,
width(contig) < send ~ width(contig) %>% as.double()) )
fasta <- contig %>% subseq(., start = extract$sstart_extr, end = extract$send_extr) %>% reverseComplement(.)
} else {
extract <- hit_position %>% mutate(sstart_extr = case_when(sstart >= nuc_down ~ sstart - nuc_down,
sstart < nuc_down ~ 1),
send_extr = case_when(width(contig) >= send + nuc_up ~ send + nuc_up,
width(contig) < send ~ width(contig) %>% as.double()) )
fasta <- contig %>% subseq(., start = extract$sstart_extr, end = extract$send_extr)
}
writeXStringSet(fasta, filepath = path, append = T)
}
fooo(blast_result = pcc7120_groel_nuccore[2,],
xstring = readDNAStringSet(filepath = "/home/mirkko/Documents/NCBI/ncbi_nuccore/fasta/17227497.fna", format = "fasta"),
path = "/tmp/seqs.fna",
nuc_buffer = c(345, 200))
fasta <- readAAStringSet(filepath = "/tmp/seqs.fna", format = "fasta")
|
runInd <- 1
args <- commandArgs(TRUE)
for(i in 1:length(args)){
eval(parse(text = args[[i]]))
}
print(runInd)
run.onceLvl <- function(p, n, distro, bs = 200, K = c(2,3), parent_prob = 1/3){
dat <- cdcs::rDAG(p, n, parent_prob = 5/p, lowScale = .8,
highScale = 1, lowEdge = .2, highEdge = .8,
dist = distro, uniqueTop = T)
max(abs(dat$Y))
outTime <- system.time(pval <- cdcs::testOrdering(dat$Y, 1:p, K = K, bs = bs, aggType = 3))
ret <- c(fisherInfPval = pval["fisherInfPval"],
fisherOnePval = pval["fisherOnePval"],
fisherTwoPval = pval["fisherTwoPval"],
tipettInfPval = pval["tipettInfPval"],
tipettOnePval = pval["tipettOnePval"],
tipettTwoPval = pval["tipettTwoPval"], time = outTime[3])
return(ret)
}
##################
library(parallel)
library(cdcs)
sample.size <- 1000
rep.runs <- 100
p.list <- c(10, 25, 50, 100, 150, 200)
d.list <- c("gauss", "unif", "lognorm", "gamma", "weibull", "laplace")
param.grid <- expand.grid(rep(p.list, sample.size / rep.runs), d.list)
## Param grid size 360
p <- param.grid[runInd, 1]
n <- round(p^(15/8))
distro <- param.grid[runInd, 2]
cl <- makeCluster(3)
clusterExport(cl, ls())
out <- t(parSapply(cl, 1:rep.runs, function(x){run.onceLvl(p, n, distro)}))
outTab <- data.frame(p, n, distro, out)
write.csv(outTab, paste("../levelRes/levelRes_", runInd, ".csv", sep = ""))
stopCluster(cl)
run.onceLvl(p, n, distro)
| /test/levelValid_test.R | no_license | ysamwang/cdcs | R | false | false | 1,499 | r | runInd <- 1
args <- commandArgs(TRUE)
for(i in 1:length(args)){
eval(parse(text = args[[i]]))
}
print(runInd)
run.onceLvl <- function(p, n, distro, bs = 200, K = c(2,3), parent_prob = 1/3){
dat <- cdcs::rDAG(p, n, parent_prob = 5/p, lowScale = .8,
highScale = 1, lowEdge = .2, highEdge = .8,
dist = distro, uniqueTop = T)
max(abs(dat$Y))
outTime <- system.time(pval <- cdcs::testOrdering(dat$Y, 1:p, K = K, bs = bs, aggType = 3))
ret <- c(fisherInfPval = pval["fisherInfPval"],
fisherOnePval = pval["fisherOnePval"],
fisherTwoPval = pval["fisherTwoPval"],
tipettInfPval = pval["tipettInfPval"],
tipettOnePval = pval["tipettOnePval"],
tipettTwoPval = pval["tipettTwoPval"], time = outTime[3])
return(ret)
}
##################
library(parallel)
library(cdcs)
sample.size <- 1000
rep.runs <- 100
p.list <- c(10, 25, 50, 100, 150, 200)
d.list <- c("gauss", "unif", "lognorm", "gamma", "weibull", "laplace")
param.grid <- expand.grid(rep(p.list, sample.size / rep.runs), d.list)
## Param grid size 360
p <- param.grid[runInd, 1]
n <- round(p^(15/8))
distro <- param.grid[runInd, 2]
cl <- makeCluster(3)
clusterExport(cl, ls())
out <- t(parSapply(cl, 1:rep.runs, function(x){run.onceLvl(p, n, distro)}))
outTab <- data.frame(p, n, distro, out)
write.csv(outTab, paste("../levelRes/levelRes_", runInd, ".csv", sep = ""))
stopCluster(cl)
run.onceLvl(p, n, distro)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funcc_show_bicluster_coverage.R
\name{funcc_show_bicluster_coverage}
\alias{funcc_show_bicluster_coverage}
\title{plotting coverage of each bi-cluster}
\usage{
funcc_show_bicluster_coverage(
fun_mat,
res_input,
not_assigned = TRUE,
max_coverage = 1
)
}
\arguments{
\item{fun_mat}{The data array (n x m x T) where each entry corresponds to the measure of one observation i, i=1,...,n, for a functional variable m, m=1,...,p, at point t, t=1,...,T}
\item{res_input}{An object produced by the funcc_biclust function}
\item{not_assigned}{logicol: if true also the cluster of not assigned elements is included}
\item{max_coverage}{scalar: percentage of maximum cumulative coverage to be shown}
}
\value{
a figure representing for each bi-cluster the coverage in terms of percentage of included functions
}
\description{
funcc_show_bicluster_coverage graphically shows the coverage of each bi-cluster in terms of percentage of included functions
}
\examples{
data("funCCdata")
res <- funcc_biclust(funCCdata,delta=10,theta=1,alpha=1,beta=0,const_alpha=TRUE)
funcc_show_bicluster_coverage(funCCdata,res)
}
| /man/funcc_show_bicluster_coverage.Rd | no_license | cran/FunCC | R | false | true | 1,190 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funcc_show_bicluster_coverage.R
\name{funcc_show_bicluster_coverage}
\alias{funcc_show_bicluster_coverage}
\title{plotting coverage of each bi-cluster}
\usage{
funcc_show_bicluster_coverage(
fun_mat,
res_input,
not_assigned = TRUE,
max_coverage = 1
)
}
\arguments{
\item{fun_mat}{The data array (n x m x T) where each entry corresponds to the measure of one observation i, i=1,...,n, for a functional variable m, m=1,...,p, at point t, t=1,...,T}
\item{res_input}{An object produced by the funcc_biclust function}
\item{not_assigned}{logicol: if true also the cluster of not assigned elements is included}
\item{max_coverage}{scalar: percentage of maximum cumulative coverage to be shown}
}
\value{
a figure representing for each bi-cluster the coverage in terms of percentage of included functions
}
\description{
funcc_show_bicluster_coverage graphically shows the coverage of each bi-cluster in terms of percentage of included functions
}
\examples{
data("funCCdata")
res <- funcc_biclust(funCCdata,delta=10,theta=1,alpha=1,beta=0,const_alpha=TRUE)
funcc_show_bicluster_coverage(funCCdata,res)
}
|
suppressMessages({
library(tidyverse, quietly = TRUE, warn.conflicts = FALSE)
library(forecast)
library(prophet)
library(qgam)
library(lubridate)
})
#' Preprocess raw weekly sales data.
#'
#' Creates new columns for Primary Key, Day, Weekday,
#' Month, Year, and each holiday (i.e., Christmas,
#' Thanksgiving, Labor Day, Superbowl and Easter).
#' Primary Key is the factor interaction between Store
#' and Department.
#'
#' @param raw_df A data frame of raw weekly sales data.
#' Must contain columns named Date, Store, Dept, and
#' IsHoliday. The Date column must be formated as "%m/%d/%Y".
#'
#' @return A data frame.
#'
#' @examples
#' test_df <- data.frame(Date = rep('12/01/2020', times = 10),
#' Store = rep(c('1', '2'), times = 5),
#' Dept = rep(c('3', '4'), times = 5),
#' IsHoliday = rep(0, times = 10))
#' processed_df <- preprocess(test_df)
#' head(processed_df)
preprocess <- function(raw_df){
#convert data types
raw_df$Date <- as.Date(raw_df$Date, format = "%m/%d/%Y")
raw_df$Store <- as.factor(raw_df$Store)
raw_df$Dept <- as.factor(raw_df$Dept)
#compute new variables
raw_df$Primary_Key <- interaction(raw_df$Store, raw_df$Dept)
raw_df$Weekly_Sales_boxcox <- BoxCox(raw_df$Weekly_Sales, 0.3)
raw_df$Day <- as.numeric(strftime(raw_df$Date, '%d'))
raw_df$Weekday <- as.factor(strftime(raw_df$Date, '%A'))
raw_df$Week <- as.factor(strftime(raw_df$Date, '%V'))
raw_df$Month <- as.factor(strftime(raw_df$Date, '%B'))
raw_df$Year <- as.factor(strftime(raw_df$Date, '%y'))
raw_df$DayMonthNormalized <- raw_df$Day/days_in_month(raw_df$Date)
raw_df$DayYearNormalized <- yday(raw_df$Date)/yday(as.Date(paste0("12/31/", raw_df$Year), format = "%m/%d/%Y"))
raw_df$DaysFromStart <- as.numeric(raw_df$Date - min(raw_df$Date))
raw_df$IsChristmas <- as.logical(raw_df$IsHoliday & raw_df$Month == 'December')
raw_df$IsThanksgiving <- as.logical(raw_df$IsHoliday & raw_df$Month == 'November')
raw_df$IsLaborDay <- as.logical(raw_df$IsHoliday & raw_df$Month == 'September')
raw_df$IsSuperBowl <- as.logical(raw_df$IsHoliday & raw_df$Month == 'February')
raw_df$IsEaster <- as.logical(raw_df$Date == '2010-04-09' | raw_df$Date == '2011-04-29' | raw_df$Date == '2012-04-13')
#compute holiday lags for qgam
n <- nrow(raw_df)
raw_df$OneWeekBeforeEaster <- as.factor(c(as.numeric(raw_df$IsEaster[2:n]), 0))
raw_df$TwoWeeksBeforeEaster <- as.factor(c(as.numeric(raw_df$IsEaster[3:n]), 0, 0))
raw_df$OneWeekAfterEaster <- as.factor(c(0, as.numeric(raw_df$IsEaster[1:(n-1)])))
raw_df$TwoWeeksAfterEaster <- as.factor(c(0, 0, as.numeric(raw_df$IsEaster[1:(n-2)])))
raw_df$WeekOfEaster <- as.factor(raw_df$IsEaster)
raw_df$OneWeekBeforeChristmas <- as.factor(c(as.numeric(raw_df$IsChristmas[2:n]), 0))
raw_df$TwoWeeksBeforeChristmas <- as.factor(c(as.numeric(raw_df$IsChristmas[3:n]), 0, 0))
raw_df$OneWeekAfterChristmas <- as.factor(c(0, as.numeric(raw_df$IsChristmas[1:(n-1)])))
raw_df$TwoWeeksAfterChristmas <- as.factor(c(0, 0, as.numeric(raw_df$IsChristmas[1:(n-2)])))
raw_df$WeekOfChristmas <- as.factor(raw_df$IsChristmas)
raw_df$OneWeekBeforeThanksgiving <- as.factor(c(as.numeric(raw_df$IsThanksgiving[2:n]), 0))
raw_df$TwoWeeksBeforeThanksgiving <- as.factor(c(as.numeric(raw_df$IsThanksgiving[3:n]), 0, 0))
raw_df$OneWeekAfterThanksgiving <- as.factor(c(0, as.numeric(raw_df$IsThanksgiving[1:(n-1)])))
raw_df$TwoWeeksAfterThanksgiving <- as.factor(c(0, 0, as.numeric(raw_df$IsThanksgiving[1:(n-2)])))
raw_df$WeekOfThanksgiving <- as.factor(raw_df$IsThanksgiving)
raw_df$OneWeekBeforeSuperBowl <- as.factor(c(as.numeric(raw_df$IsSuperBowl[2:n]), 0))
raw_df$TwoWeeksBeforeSuperBowl <- as.factor(c(as.numeric(raw_df$IsSuperBowl[3:n]), 0, 0))
raw_df$OneWeekAfterSuperBowl <- as.factor(c(0, as.numeric(raw_df$IsSuperBowl[1:(n-1)])))
raw_df$TwoWeeksAfterSuperBowl <- as.factor(c(0, 0, as.numeric(raw_df$IsSuperBowl[1:(n-2)])))
raw_df$WeekOfSuperBowl <- as.factor(raw_df$IsSuperBowl)
raw_df$OneWeekBeforeLaborDay <- as.factor(c(as.numeric(raw_df$IsLaborDay[2:n]), 0))
raw_df$TwoWeeksBeforeLaborDay <- as.factor(c(as.numeric(raw_df$IsLaborDay[3:n]), 0, 0))
raw_df$OneWeekAfterLaborDay <- as.factor(c(0, as.numeric(raw_df$IsLaborDay[1:(n-1)])))
raw_df$TwoWeeksAfterLaborDay <- as.factor(c(0, 0, as.numeric(raw_df$IsLaborDay[1:(n-2)])))
raw_df$WeekOfLaborDay <- as.factor(raw_df$IsLaborDay)
return(raw_df)
}
#' Split training and test data for modeling.
#'
#' @param weekly_df A data frame of weekly sales data.
#' Must contain a Date column.
#'
#' @param weeks Number of weeks to reserve for testing.
#' Test data will be taken from the last weeks in the
#' input data frame. Must be less than the total number of
#' weeks in the input data frame.
#'
#' @return A list of two data frames named train and test.
#'
#' @examples
#' test_df <- data.frame(Date = c('12/01/2020', '12/08/2020'),
#' Weekly_Sales = c(1000, 2000))
#' split_list <- train_test_split(test_df, weeks = 1)
#' head(split_list$train)
#' head(split_list$test)
train_test_split <- function(weekly_df, weeks = 3){
last_weeks <- sort(unique(weekly_df$Date), decreasing = TRUE)[1:weeks]
test <- weekly_df[weekly_df$Date %in% last_weeks,]
train <- weekly_df[!(weekly_df$Date %in% last_weeks),]
return(list('train' = train,
'test' = test))
}
#' Get the data frame corresponding to a given Primary Key.
#'
#' Given a multi-key data frame and the desired key,
#' returns the sub-frame with Primary_Key equal to key.
#' Can optionally fill missing weeks in the series with NA.
#'
#' @param multi_key_df A data frame of weekly sales data.
#' Must contain columns for Date and Primary_Key.
#'
#' @param key Level of the Primary_Key column to filter for.
#'
#' @param add_na Boolean specifying whether to fill missing
#' weeks with NA.
#'
#' @return A data frame.
#'
#' @examples
#' test_df <- data.frame(Date = c('12/01/2020', '12/07/2020', '12/01/2020'),
#' Primary_Key = c('1.1', '1.1', '1.2'),
#' Weekly_Sales = c(1000, 2000, 1500))
#' key_df <- get_key_df(test_df, key = '1.2', add_na = FALSE)
#' key_df_na <- get_key_df(test_df, key = '1.2', add_na = TRUE)
#' head(key_df)
#' head(key_df_na)
get_key_df <- function(multi_key_df, key, add_na = TRUE){
key_df <- multi_key_df %>%
filter(Primary_Key == key)
if (add_na){
num_dates <- length(unique(multi_key_df$Date))
dates_df <- data.frame('Date' = sort(unique(multi_key_df$Date)),
'Primary_Key' = rep(key, num_dates))
suppressMessages({
key_df <- left_join(dates_df, key_df)
})
}
return(key_df)
}
#' Produce a nested data frame for each key.
#'
#' Given a multi-key data frame, nests it by each level
#' of Primary_Key. Can optionally fill missing weeks in
#' the series with NA.
#'
#' @param multi_key_df A data frame of weekly sales data.
#' Must contain columns for Date and Primary_Key.
#'
#' @param add_na Boolean specifying whether to fill missing
#' weeks with NA.
#'
#' @return A data frame two columns: Primary_Key and data.
#' The data column is a list column that contains a separate
#' data frame for each level of Primary_Key.
#'
#' @examples
#' test_df <- data.frame(Date = c('12/01/2020', '12/07/2020', '12/01/2020'),
#' Primary_Key = c('1.1', '1.1', '1.2'),
#' Weekly_Sales = c(1000, 2000, 1500))
#' nested_df <- nest_by_key(test_df, add_na = FALSE)
#' nested_df_na <- nest_by_key(test_df, add_na = TRUE)
#' head(nested_df)
#' head(nested_df_na)
nest_by_key <- function(multi_key_df, add_na = TRUE){
if (add_na){
num_keys <- length(unique(multi_key_df$Primary_Key))
num_dates <- length(unique(multi_key_df$Date))
dates_df <- data.frame('Date' = rep(sort(unique(multi_key_df$Date)), times = num_keys),
'Primary_Key' = rep(unique(multi_key_df$Primary_Key), each = num_dates))
suppressMessages({
multi_key_df <- left_join(dates_df, multi_key_df)
})
}
nested_by_key <- multi_key_df %>%
group_by(Primary_Key) %>%
nest
return(nested_by_key)
}
#' Produce a holiday data frame formatted for Prophet.
#'
#' @param train A data frame of weekly sales training
#' data. Must a Date column and logical columns for each
#' of the holidays of interest. The names of the holiday
#' columns should be formatted in camel case as IsHoliday
#' where "Holiday" is the holiday of interest (e.g.,
#' IsChristmas, IsEaster, etc.).
#'
#' @param test A data frame of weekly sales testing
#' data. Must be formatted similarly to the train data frame.
#'
#' @param holidays A list of holidays to include. For each
#' holiday, a corresponding "IsHoliday" column must be present
#' the train and test data frames.
#'
#' @param lower_window Int specifying a range of days prior to
#' the date to be included as holidays. For example,
#' lower_window = -14 will include the two weeks prior to the
#' date as holidays. Must be a negative number. See Prophet
#' docs for more information.
#'
#' @param upper_window Int specifying a range of days after
#' the date to be included as holidays. For example,
#' upper_window = 14 will include 2 weeks after the date as
#' holidays. Must be a positive number. See Prophet
#' docs for more information.
#'
#' @return A data frame with columns holiday (character) and
#' ds (date type) and optionally columns lower_window and
#' upper_window which specify a range of days around the date
#' to be included as holidays.
#'
#' @examples
#' train_start_date <- as.Date('06/12/2019', format = '%d/%m/%Y')
#' test_start_date <- as.Date('04/12/2020', format = '%d/%m/%Y')
#' train_df <- data.frame(Date = seq(train_start_date, length.out = 10, by = 7),
#' IsChristmas = as.logical(c(0, 0, 0, 1, 0, 0, 0, 0, 0, 0)))
#' test_df <- data.frame(Date = seq(test_start_date, length.out = 10, by = 7),
#' IsChristmas = as.logical(c(0, 0, 0, 1, 0, 0, 0, 0, 0, 0)))
#' holiday_df <- get_holiday_df(train_df, test_df,
#' holidays = c('Christmas'),
#' lower_window = -14, upper_window = 7)
#' head(holiday_df)
get_holiday_df <- function(train, test, holidays, lower_window = 0, upper_window = 0){
capwords <- function(s, strict = FALSE) {
cap <- function(s) paste(toupper(substring(s, 1, 1)),
{s <- substring(s, 2); if(strict) tolower(s) else s},
sep = "", collapse = " " )
sapply(strsplit(s, split = " "), cap, USE.NAMES = !is.null(names(s)))
}
holiday_col <- c()
ds_col <- c()
for (h in holidays){
bool_name <- paste0('Is', capwords(h))
isHoliday_train <- train[, colnames(train) == bool_name]
isHoliday_test <- test[, colnames(test) == bool_name]
num_holiday <- length(unique(train$Date[isHoliday_train])) + length(unique(test$Date[isHoliday_test]))
holiday_col <- c(holiday_col, rep(h, num_holiday))
holiday_dates <- c(unique(train$Date[isHoliday_train]),
unique(test$Date[isHoliday_test]))
ds_col <- c(ds_col, holiday_dates)
}
holiday_df <- data.frame(holiday = holiday_col,
ds = as.Date(ds_col, origin ="1970-01-01"),
lower_window = lower_window,
upper_window = upper_window)
return(holiday_df)
}
#' Perform time series cross-validation on all models.
#'
#' Given weekly sales data for a single key, and a list of
#' cutoff dates, trains on data ocurring before the cutoff
#' dates and tests on data ocurring after. Trains a separate
#' model for each cutoff date.
#'
#' @param data A data frame of weekly sales data.
#' Must contain columns for Date, Primary_Key, and
#' Weekly_Sales. Weeks with missing data should be filled
#' with NA.
#'
#' @param cutoff_dates A list of cutoff dates for creating
#' train and test splits on each cross-validation loop.
#'
#' @param holidays A holiday data frame formatted for use
#' with Prophet.
#'
#' @return A data frame with the following columns: error,
#' obs, mean_diff, med_diff, horizon, and model. The error
#' column is yhat - y for each predicition. It is set to NA
#' for preditction with no corresponding observation. The
#' obs column is the corresponding observations. The
#' mean_diff column is the mean of the diff of the weekly
#' sales for each set of training data. The med_diff column
#' is the median instead of the mean. The horizon column
#' gives the number of weeks each prediciton is from the
#' last date in the training data. The model column gives
#' the name of the model that made each prediction.
ts_cv <- function(data, cutoff_dates, holidays, fit_qgam = FALSE) {
if (sum(!is.na(data$Weekly_Sales)) < 100){
return(NA)
}
tryCatch(
{
#rename cols
data <- data %>%
rename(ds = Date) %>%
rename(y = Weekly_Sales)
#init vars
prophet_error <- c()
qgam_error <- c()
naive_error <- c()
meanf_error <- c()
medf_error <- c()
obs <- c()
mean_diff <- c()
med_diff <- c()
for (i in seq(1, length(cutoff_dates))) {
#get train and test sets
date_idx <- which(data$ds == cutoff_dates[i])
train <- data[1:date_idx,]
test <- data[(date_idx+1):(date_idx+3),]
#fit prophet
prophet_model <- prophet(growth = 'linear',
yearly.seasonality = 6,
weekly.seasonality = FALSE,
daily.seasonality = FALSE,
seasonality.mode = "additive",
holidays = holidays,
fit = FALSE)
prophet_model <- add_seasonality(prophet_model, name='monthly', period=30.5, fourier.order=1)
prophet_model <- fit.prophet(prophet_model, df = train)
#get forecast
future = make_future_dataframe(prophet_model, periods = 3, freq = 'week')
prophet_forecast = predict(prophet_model, future)
if (fit_qgam){
#fit qgam
qgam_model <- qgam(y ~ WeekOfEaster +
OneWeekBeforeEaster + TwoWeeksBeforeEaster +
TwoWeeksBeforeThanksgiving + OneWeekBeforeThanksgiving +
WeekOfThanksgiving +
s(DaysFromStart, bs = "gp", k=45) +
s(DayYearNormalized, bs = "ad", k=52) +
s(DayMonthNormalized, bs = "cp"),
control = list(progress = FALSE),
qu = 0.5,
data = train)
#get forecast
qgam_forecast <- predict(object = qgam_model,
newdata = test,
type = "response")
}
#get test errors
prophet_error <- c(prophet_error, prophet_forecast$yhat[-(1:nrow(train))] - test$y)
if (fit_qgam){
qgam_error <- c(qgam_error, qgam_forecast - test$y)
}
naive_error <- c(naive_error, train$y[nrow(train)] - test$y)
meanf_error <- c(meanf_error, mean(train$y, na.rm = TRUE) - test$y)
medf_error <- c(medf_error, median(train$y, na.rm = TRUE) - test$y)
obs <- c(obs, test$y)
mean_diff <- c(mean_diff, rep(mean(abs(diff(train$y)), na.rm = TRUE), 3))
med_diff <- c(med_diff, rep(median(abs(diff(train$y)), na.rm = TRUE), 3))
}
if (fit_qgam) {
error_df <- tibble('error' = c(prophet_error, qgam_error, naive_error, meanf_error, medf_error),
'obs' = rep(obs, times = 5),
'mean_diff' = rep(mean_diff, times = 5),
'med_diff' = rep(med_diff, times = 5),
'horizon' = rep(seq(1, 3), times = 5*length(cutoff_dates)),
'model' = rep(c('prophet', 'qgam', 'naive', 'meanf', 'medf'), each = 3*length(cutoff_dates)))
} else {
error_df <- tibble('error' = c(prophet_error, naive_error, meanf_error, medf_error),
'obs' = rep(obs, times = 4),
'mean_diff' = rep(mean_diff, times = 4),
'med_diff' = rep(med_diff, times = 4),
'horizon' = rep(seq(1, 3), times = 4*length(cutoff_dates)),
'model' = rep(c('prophet', 'naive', 'meanf', 'medf'), each = 3*length(cutoff_dates)))
}
return(error_df)
},
error=function(cond) {
# print(cond)
return(NA)
},
warning=function(cond) {
# print(cond)
return(NA)
}
)
}
#' Compute common forecasting performance metrics.
#'
#' Computes rmse, mae, mape, mase, and mamse from an
#' error data frame returned by ts_cv.
#'
#' @param error_df The data frame returned by ts_cv (see
#' ?ts_cv)
#'
#' @return A data frame with the following columns: model,
#' horizon, metric, and value. The model column gives the
#' name of the forecasting model being measured. The
#' horizon column gives the number of weeks each prediciton
#' is from the last date in the training data. The metric
#' column gives the name of the computed performance metric.
#' It can be any of: rmse, mae, mape, mase, or mamse. The
#' value column gives the estimated value of the performance
#' metric.
gather_metrics <- function(error_df) {
tryCatch(
{
metric_df <- error_df %>%
group_by(model, horizon) %>%
summarise(rmse = sqrt(mean(error^2, na.rm = TRUE)),
mae = mean(abs(error), na.rm = TRUE),
mape = 100*mean(abs(error)/obs, na.rm = TRUE),
mase = mean(abs(error)/mean_diff, na.rm = TRUE),
mamse = mean(abs(error)/med_diff, na.rm = TRUE),
.groups = 'drop') %>%
pivot_longer(cols = c('rmse', 'mae', 'mape', 'mase', 'mamse'),
names_to = 'metric',
values_to = 'value')
},
error=function(cond) {
return(NA)
},
warning=function(cond) {
return(NA)
}
)
}
#' Count the number of weeks with sales reported.
#'
#' Returns the number of rows with non-NA values for the
#' Weekly_Sales column.
#'
#' @param df A data frame of weekly sales data. Must
#' contain a Weekly_Sales column.
#'
#' @return An int giving the number of rows with non-NA
#' values for the Weekly_Sales column.
#'
#' @examples
#' test_df <- data.frame(Date = c('12/01/2020', '12/07/2020'),
#' Primary_Key = c('1.1', '1.1'),
#' Weekly_Sales = c(1000, NA))
#' n <- num_obs(test_df)
#' print(n)
num_obs <- function(df){
sum(as.numeric(!is.na(df$Weekly_Sales)))
}
#' Get the smallest reported sales.
#'
#' Returns the min of the Weekly_Sales column.
#'
#' @param df A data frame of weekly sales data. Must
#' contain a Weekly_Sales column.
#'
#' @return An int giving the min of the Weekly_Sales
#' column.
#'
#' @examples
#' test_df <- data.frame(Date = c('12/01/2020', '12/07/2020'),
#' Primary_Key = c('1.1', '1.1'),
#' Weekly_Sales = c(1000, NA))
#' n <- min_sales(test_df)
#' print(n)
min_sales <- function(df){
min(df$Weekly_Sales, na.rm = TRUE)
}
| /R/helpers.R | no_license | bgereke/salesforecast | R | false | false | 19,427 | r | suppressMessages({
library(tidyverse, quietly = TRUE, warn.conflicts = FALSE)
library(forecast)
library(prophet)
library(qgam)
library(lubridate)
})
#' Preprocess raw weekly sales data.
#'
#' Creates new columns for Primary Key, Day, Weekday,
#' Month, Year, and each holiday (i.e., Christmas,
#' Thanksgiving, Labor Day, Superbowl and Easter).
#' Primary Key is the factor interaction between Store
#' and Department.
#'
#' @param raw_df A data frame of raw weekly sales data.
#' Must contain columns named Date, Store, Dept, and
#' IsHoliday. The Date column must be formated as "%m/%d/%Y".
#'
#' @return A data frame.
#'
#' @examples
#' test_df <- data.frame(Date = rep('12/01/2020', times = 10),
#' Store = rep(c('1', '2'), times = 5),
#' Dept = rep(c('3', '4'), times = 5),
#' IsHoliday = rep(0, times = 10))
#' processed_df <- preprocess(test_df)
#' head(processed_df)
preprocess <- function(raw_df){
#convert data types
raw_df$Date <- as.Date(raw_df$Date, format = "%m/%d/%Y")
raw_df$Store <- as.factor(raw_df$Store)
raw_df$Dept <- as.factor(raw_df$Dept)
#compute new variables
raw_df$Primary_Key <- interaction(raw_df$Store, raw_df$Dept)
raw_df$Weekly_Sales_boxcox <- BoxCox(raw_df$Weekly_Sales, 0.3)
raw_df$Day <- as.numeric(strftime(raw_df$Date, '%d'))
raw_df$Weekday <- as.factor(strftime(raw_df$Date, '%A'))
raw_df$Week <- as.factor(strftime(raw_df$Date, '%V'))
raw_df$Month <- as.factor(strftime(raw_df$Date, '%B'))
raw_df$Year <- as.factor(strftime(raw_df$Date, '%y'))
raw_df$DayMonthNormalized <- raw_df$Day/days_in_month(raw_df$Date)
raw_df$DayYearNormalized <- yday(raw_df$Date)/yday(as.Date(paste0("12/31/", raw_df$Year), format = "%m/%d/%Y"))
raw_df$DaysFromStart <- as.numeric(raw_df$Date - min(raw_df$Date))
raw_df$IsChristmas <- as.logical(raw_df$IsHoliday & raw_df$Month == 'December')
raw_df$IsThanksgiving <- as.logical(raw_df$IsHoliday & raw_df$Month == 'November')
raw_df$IsLaborDay <- as.logical(raw_df$IsHoliday & raw_df$Month == 'September')
raw_df$IsSuperBowl <- as.logical(raw_df$IsHoliday & raw_df$Month == 'February')
raw_df$IsEaster <- as.logical(raw_df$Date == '2010-04-09' | raw_df$Date == '2011-04-29' | raw_df$Date == '2012-04-13')
#compute holiday lags for qgam
n <- nrow(raw_df)
raw_df$OneWeekBeforeEaster <- as.factor(c(as.numeric(raw_df$IsEaster[2:n]), 0))
raw_df$TwoWeeksBeforeEaster <- as.factor(c(as.numeric(raw_df$IsEaster[3:n]), 0, 0))
raw_df$OneWeekAfterEaster <- as.factor(c(0, as.numeric(raw_df$IsEaster[1:(n-1)])))
raw_df$TwoWeeksAfterEaster <- as.factor(c(0, 0, as.numeric(raw_df$IsEaster[1:(n-2)])))
raw_df$WeekOfEaster <- as.factor(raw_df$IsEaster)
raw_df$OneWeekBeforeChristmas <- as.factor(c(as.numeric(raw_df$IsChristmas[2:n]), 0))
raw_df$TwoWeeksBeforeChristmas <- as.factor(c(as.numeric(raw_df$IsChristmas[3:n]), 0, 0))
raw_df$OneWeekAfterChristmas <- as.factor(c(0, as.numeric(raw_df$IsChristmas[1:(n-1)])))
raw_df$TwoWeeksAfterChristmas <- as.factor(c(0, 0, as.numeric(raw_df$IsChristmas[1:(n-2)])))
raw_df$WeekOfChristmas <- as.factor(raw_df$IsChristmas)
raw_df$OneWeekBeforeThanksgiving <- as.factor(c(as.numeric(raw_df$IsThanksgiving[2:n]), 0))
raw_df$TwoWeeksBeforeThanksgiving <- as.factor(c(as.numeric(raw_df$IsThanksgiving[3:n]), 0, 0))
raw_df$OneWeekAfterThanksgiving <- as.factor(c(0, as.numeric(raw_df$IsThanksgiving[1:(n-1)])))
raw_df$TwoWeeksAfterThanksgiving <- as.factor(c(0, 0, as.numeric(raw_df$IsThanksgiving[1:(n-2)])))
raw_df$WeekOfThanksgiving <- as.factor(raw_df$IsThanksgiving)
raw_df$OneWeekBeforeSuperBowl <- as.factor(c(as.numeric(raw_df$IsSuperBowl[2:n]), 0))
raw_df$TwoWeeksBeforeSuperBowl <- as.factor(c(as.numeric(raw_df$IsSuperBowl[3:n]), 0, 0))
raw_df$OneWeekAfterSuperBowl <- as.factor(c(0, as.numeric(raw_df$IsSuperBowl[1:(n-1)])))
raw_df$TwoWeeksAfterSuperBowl <- as.factor(c(0, 0, as.numeric(raw_df$IsSuperBowl[1:(n-2)])))
raw_df$WeekOfSuperBowl <- as.factor(raw_df$IsSuperBowl)
raw_df$OneWeekBeforeLaborDay <- as.factor(c(as.numeric(raw_df$IsLaborDay[2:n]), 0))
raw_df$TwoWeeksBeforeLaborDay <- as.factor(c(as.numeric(raw_df$IsLaborDay[3:n]), 0, 0))
raw_df$OneWeekAfterLaborDay <- as.factor(c(0, as.numeric(raw_df$IsLaborDay[1:(n-1)])))
raw_df$TwoWeeksAfterLaborDay <- as.factor(c(0, 0, as.numeric(raw_df$IsLaborDay[1:(n-2)])))
raw_df$WeekOfLaborDay <- as.factor(raw_df$IsLaborDay)
return(raw_df)
}
#' Split training and test data for modeling.
#'
#' @param weekly_df A data frame of weekly sales data.
#' Must contain a Date column.
#'
#' @param weeks Number of weeks to reserve for testing.
#' Test data will be taken from the last weeks in the
#' input data frame. Must be less than the total number of
#' weeks in the input data frame.
#'
#' @return A list of two data frames named train and test.
#'
#' @examples
#' test_df <- data.frame(Date = c('12/01/2020', '12/08/2020'),
#' Weekly_Sales = c(1000, 2000))
#' split_list <- train_test_split(test_df, weeks = 1)
#' head(split_list$train)
#' head(split_list$test)
train_test_split <- function(weekly_df, weeks = 3){
last_weeks <- sort(unique(weekly_df$Date), decreasing = TRUE)[1:weeks]
test <- weekly_df[weekly_df$Date %in% last_weeks,]
train <- weekly_df[!(weekly_df$Date %in% last_weeks),]
return(list('train' = train,
'test' = test))
}
#' Get the data frame corresponding to a given Primary Key.
#'
#' Given a multi-key data frame and the desired key,
#' returns the sub-frame with Primary_Key equal to key.
#' Can optionally fill missing weeks in the series with NA.
#'
#' @param multi_key_df A data frame of weekly sales data.
#' Must contain columns for Date and Primary_Key.
#'
#' @param key Level of the Primary_Key column to filter for.
#'
#' @param add_na Boolean specifying whether to fill missing
#' weeks with NA.
#'
#' @return A data frame.
#'
#' @examples
#' test_df <- data.frame(Date = c('12/01/2020', '12/07/2020', '12/01/2020'),
#' Primary_Key = c('1.1', '1.1', '1.2'),
#' Weekly_Sales = c(1000, 2000, 1500))
#' key_df <- get_key_df(test_df, key = '1.2', add_na = FALSE)
#' key_df_na <- get_key_df(test_df, key = '1.2', add_na = TRUE)
#' head(key_df)
#' head(key_df_na)
get_key_df <- function(multi_key_df, key, add_na = TRUE){
key_df <- multi_key_df %>%
filter(Primary_Key == key)
if (add_na){
num_dates <- length(unique(multi_key_df$Date))
dates_df <- data.frame('Date' = sort(unique(multi_key_df$Date)),
'Primary_Key' = rep(key, num_dates))
suppressMessages({
key_df <- left_join(dates_df, key_df)
})
}
return(key_df)
}
#' Produce a nested data frame for each key.
#'
#' Given a multi-key data frame, nests it by each level
#' of Primary_Key. Can optionally fill missing weeks in
#' the series with NA.
#'
#' @param multi_key_df A data frame of weekly sales data.
#' Must contain columns for Date and Primary_Key.
#'
#' @param add_na Boolean specifying whether to fill missing
#' weeks with NA.
#'
#' @return A data frame two columns: Primary_Key and data.
#' The data column is a list column that contains a separate
#' data frame for each level of Primary_Key.
#'
#' @examples
#' test_df <- data.frame(Date = c('12/01/2020', '12/07/2020', '12/01/2020'),
#' Primary_Key = c('1.1', '1.1', '1.2'),
#' Weekly_Sales = c(1000, 2000, 1500))
#' nested_df <- nest_by_key(test_df, add_na = FALSE)
#' nested_df_na <- nest_by_key(test_df, add_na = TRUE)
#' head(nested_df)
#' head(nested_df_na)
nest_by_key <- function(multi_key_df, add_na = TRUE){
if (add_na){
num_keys <- length(unique(multi_key_df$Primary_Key))
num_dates <- length(unique(multi_key_df$Date))
dates_df <- data.frame('Date' = rep(sort(unique(multi_key_df$Date)), times = num_keys),
'Primary_Key' = rep(unique(multi_key_df$Primary_Key), each = num_dates))
suppressMessages({
multi_key_df <- left_join(dates_df, multi_key_df)
})
}
nested_by_key <- multi_key_df %>%
group_by(Primary_Key) %>%
nest
return(nested_by_key)
}
#' Produce a holiday data frame formatted for Prophet.
#'
#' @param train A data frame of weekly sales training
#' data. Must a Date column and logical columns for each
#' of the holidays of interest. The names of the holiday
#' columns should be formatted in camel case as IsHoliday
#' where "Holiday" is the holiday of interest (e.g.,
#' IsChristmas, IsEaster, etc.).
#'
#' @param test A data frame of weekly sales testing
#' data. Must be formatted similarly to the train data frame.
#'
#' @param holidays A list of holidays to include. For each
#' holiday, a corresponding "IsHoliday" column must be present
#' the train and test data frames.
#'
#' @param lower_window Int specifying a range of days prior to
#' the date to be included as holidays. For example,
#' lower_window = -14 will include the two weeks prior to the
#' date as holidays. Must be a negative number. See Prophet
#' docs for more information.
#'
#' @param upper_window Int specifying a range of days after
#' the date to be included as holidays. For example,
#' upper_window = 14 will include 2 weeks after the date as
#' holidays. Must be a positive number. See Prophet
#' docs for more information.
#'
#' @return A data frame with columns holiday (character) and
#' ds (date type) and optionally columns lower_window and
#' upper_window which specify a range of days around the date
#' to be included as holidays.
#'
#' @examples
#' train_start_date <- as.Date('06/12/2019', format = '%d/%m/%Y')
#' test_start_date <- as.Date('04/12/2020', format = '%d/%m/%Y')
#' train_df <- data.frame(Date = seq(train_start_date, length.out = 10, by = 7),
#' IsChristmas = as.logical(c(0, 0, 0, 1, 0, 0, 0, 0, 0, 0)))
#' test_df <- data.frame(Date = seq(test_start_date, length.out = 10, by = 7),
#' IsChristmas = as.logical(c(0, 0, 0, 1, 0, 0, 0, 0, 0, 0)))
#' holiday_df <- get_holiday_df(train_df, test_df,
#' holidays = c('Christmas'),
#' lower_window = -14, upper_window = 7)
#' head(holiday_df)
get_holiday_df <- function(train, test, holidays, lower_window = 0, upper_window = 0){
capwords <- function(s, strict = FALSE) {
cap <- function(s) paste(toupper(substring(s, 1, 1)),
{s <- substring(s, 2); if(strict) tolower(s) else s},
sep = "", collapse = " " )
sapply(strsplit(s, split = " "), cap, USE.NAMES = !is.null(names(s)))
}
holiday_col <- c()
ds_col <- c()
for (h in holidays){
bool_name <- paste0('Is', capwords(h))
isHoliday_train <- train[, colnames(train) == bool_name]
isHoliday_test <- test[, colnames(test) == bool_name]
num_holiday <- length(unique(train$Date[isHoliday_train])) + length(unique(test$Date[isHoliday_test]))
holiday_col <- c(holiday_col, rep(h, num_holiday))
holiday_dates <- c(unique(train$Date[isHoliday_train]),
unique(test$Date[isHoliday_test]))
ds_col <- c(ds_col, holiday_dates)
}
holiday_df <- data.frame(holiday = holiday_col,
ds = as.Date(ds_col, origin ="1970-01-01"),
lower_window = lower_window,
upper_window = upper_window)
return(holiday_df)
}
#' Perform time series cross-validation on all models.
#'
#' Given weekly sales data for a single key, and a list of
#' cutoff dates, trains on data ocurring before the cutoff
#' dates and tests on data ocurring after. Trains a separate
#' model for each cutoff date.
#'
#' @param data A data frame of weekly sales data.
#' Must contain columns for Date, Primary_Key, and
#' Weekly_Sales. Weeks with missing data should be filled
#' with NA.
#'
#' @param cutoff_dates A list of cutoff dates for creating
#' train and test splits on each cross-validation loop.
#'
#' @param holidays A holiday data frame formatted for use
#' with Prophet.
#'
#' @return A data frame with the following columns: error,
#' obs, mean_diff, med_diff, horizon, and model. The error
#' column is yhat - y for each predicition. It is set to NA
#' for preditction with no corresponding observation. The
#' obs column is the corresponding observations. The
#' mean_diff column is the mean of the diff of the weekly
#' sales for each set of training data. The med_diff column
#' is the median instead of the mean. The horizon column
#' gives the number of weeks each prediciton is from the
#' last date in the training data. The model column gives
#' the name of the model that made each prediction.
ts_cv <- function(data, cutoff_dates, holidays, fit_qgam = FALSE) {
if (sum(!is.na(data$Weekly_Sales)) < 100){
return(NA)
}
tryCatch(
{
#rename cols
data <- data %>%
rename(ds = Date) %>%
rename(y = Weekly_Sales)
#init vars
prophet_error <- c()
qgam_error <- c()
naive_error <- c()
meanf_error <- c()
medf_error <- c()
obs <- c()
mean_diff <- c()
med_diff <- c()
for (i in seq(1, length(cutoff_dates))) {
#get train and test sets
date_idx <- which(data$ds == cutoff_dates[i])
train <- data[1:date_idx,]
test <- data[(date_idx+1):(date_idx+3),]
#fit prophet
prophet_model <- prophet(growth = 'linear',
yearly.seasonality = 6,
weekly.seasonality = FALSE,
daily.seasonality = FALSE,
seasonality.mode = "additive",
holidays = holidays,
fit = FALSE)
prophet_model <- add_seasonality(prophet_model, name='monthly', period=30.5, fourier.order=1)
prophet_model <- fit.prophet(prophet_model, df = train)
#get forecast
future = make_future_dataframe(prophet_model, periods = 3, freq = 'week')
prophet_forecast = predict(prophet_model, future)
if (fit_qgam){
#fit qgam
qgam_model <- qgam(y ~ WeekOfEaster +
OneWeekBeforeEaster + TwoWeeksBeforeEaster +
TwoWeeksBeforeThanksgiving + OneWeekBeforeThanksgiving +
WeekOfThanksgiving +
s(DaysFromStart, bs = "gp", k=45) +
s(DayYearNormalized, bs = "ad", k=52) +
s(DayMonthNormalized, bs = "cp"),
control = list(progress = FALSE),
qu = 0.5,
data = train)
#get forecast
qgam_forecast <- predict(object = qgam_model,
newdata = test,
type = "response")
}
#get test errors
prophet_error <- c(prophet_error, prophet_forecast$yhat[-(1:nrow(train))] - test$y)
if (fit_qgam){
qgam_error <- c(qgam_error, qgam_forecast - test$y)
}
naive_error <- c(naive_error, train$y[nrow(train)] - test$y)
meanf_error <- c(meanf_error, mean(train$y, na.rm = TRUE) - test$y)
medf_error <- c(medf_error, median(train$y, na.rm = TRUE) - test$y)
obs <- c(obs, test$y)
mean_diff <- c(mean_diff, rep(mean(abs(diff(train$y)), na.rm = TRUE), 3))
med_diff <- c(med_diff, rep(median(abs(diff(train$y)), na.rm = TRUE), 3))
}
if (fit_qgam) {
error_df <- tibble('error' = c(prophet_error, qgam_error, naive_error, meanf_error, medf_error),
'obs' = rep(obs, times = 5),
'mean_diff' = rep(mean_diff, times = 5),
'med_diff' = rep(med_diff, times = 5),
'horizon' = rep(seq(1, 3), times = 5*length(cutoff_dates)),
'model' = rep(c('prophet', 'qgam', 'naive', 'meanf', 'medf'), each = 3*length(cutoff_dates)))
} else {
error_df <- tibble('error' = c(prophet_error, naive_error, meanf_error, medf_error),
'obs' = rep(obs, times = 4),
'mean_diff' = rep(mean_diff, times = 4),
'med_diff' = rep(med_diff, times = 4),
'horizon' = rep(seq(1, 3), times = 4*length(cutoff_dates)),
'model' = rep(c('prophet', 'naive', 'meanf', 'medf'), each = 3*length(cutoff_dates)))
}
return(error_df)
},
error=function(cond) {
# print(cond)
return(NA)
},
warning=function(cond) {
# print(cond)
return(NA)
}
)
}
#' Compute common forecasting performance metrics.
#'
#' Computes rmse, mae, mape, mase, and mamse from an
#' error data frame returned by ts_cv.
#'
#' @param error_df The data frame returned by ts_cv (see
#' ?ts_cv)
#'
#' @return A data frame with the following columns: model,
#' horizon, metric, and value. The model column gives the
#' name of the forecasting model being measured. The
#' horizon column gives the number of weeks each prediciton
#' is from the last date in the training data. The metric
#' column gives the name of the computed performance metric.
#' It can be any of: rmse, mae, mape, mase, or mamse. The
#' value column gives the estimated value of the performance
#' metric.
gather_metrics <- function(error_df) {
tryCatch(
{
metric_df <- error_df %>%
group_by(model, horizon) %>%
summarise(rmse = sqrt(mean(error^2, na.rm = TRUE)),
mae = mean(abs(error), na.rm = TRUE),
mape = 100*mean(abs(error)/obs, na.rm = TRUE),
mase = mean(abs(error)/mean_diff, na.rm = TRUE),
mamse = mean(abs(error)/med_diff, na.rm = TRUE),
.groups = 'drop') %>%
pivot_longer(cols = c('rmse', 'mae', 'mape', 'mase', 'mamse'),
names_to = 'metric',
values_to = 'value')
},
error=function(cond) {
return(NA)
},
warning=function(cond) {
return(NA)
}
)
}
#' Count the number of weeks with sales reported.
#'
#' Returns the number of rows with non-NA values for the
#' Weekly_Sales column.
#'
#' @param df A data frame of weekly sales data. Must
#' contain a Weekly_Sales column.
#'
#' @return An int giving the number of rows with non-NA
#' values for the Weekly_Sales column.
#'
#' @examples
#' test_df <- data.frame(Date = c('12/01/2020', '12/07/2020'),
#' Primary_Key = c('1.1', '1.1'),
#' Weekly_Sales = c(1000, NA))
#' n <- num_obs(test_df)
#' print(n)
num_obs <- function(df){
sum(as.numeric(!is.na(df$Weekly_Sales)))
}
#' Get the smallest reported sales.
#'
#' Returns the min of the Weekly_Sales column.
#'
#' @param df A data frame of weekly sales data. Must
#' contain a Weekly_Sales column.
#'
#' @return An int giving the min of the Weekly_Sales
#' column.
#'
#' @examples
#' test_df <- data.frame(Date = c('12/01/2020', '12/07/2020'),
#' Primary_Key = c('1.1', '1.1'),
#' Weekly_Sales = c(1000, NA))
#' n <- min_sales(test_df)
#' print(n)
min_sales <- function(df){
min(df$Weekly_Sales, na.rm = TRUE)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/xgboost.R
\name{xgboost}
\alias{xgboost}
\title{eXtreme Gradient Boosting (Tree) library}
\usage{
xgboost(data = NULL, label = NULL, missing = NULL, params = list(),
nrounds, verbose = 1, print.every.n = 1L, early.stop.round = NULL,
maximize = NULL, ...)
}
\arguments{
\item{data}{takes \code{matrix}, \code{dgCMatrix}, local data file or
\code{xgb.DMatrix}.}
\item{label}{the response variable. User should not set this field,
if data is local data file or \code{xgb.DMatrix}.}
\item{missing}{Missing is only used when input is dense matrix, pick a float
value that represents missing value. Sometimes a data use 0 or other extreme value to represents missing values.}
\item{params}{the list of parameters.
Commonly used ones are:
\itemize{
\item \code{objective} objective function, common ones are
\itemize{
\item \code{reg:linear} linear regression
\item \code{binary:logistic} logistic regression for classification
}
\item \code{eta} step size of each boosting step
\item \code{max.depth} maximum depth of the tree
\item \code{nthread} number of thread used in training, if not set, all threads are used
}
Look at \code{\link{xgb.train}} for a more complete list of parameters or \url{https://github.com/dmlc/xgboost/wiki/Parameters} for the full list.
See also \code{demo/} for walkthrough example in R.}
\item{nrounds}{the max number of iterations}
\item{verbose}{If 0, xgboost will stay silent. If 1, xgboost will print
information of performance. If 2, xgboost will print information of both
performance and construction progress information}
\item{print.every.n}{Print every N progress messages when \code{verbose>0}. Default is 1 which means all messages are printed.}
\item{early.stop.round}{If \code{NULL}, the early stopping function is not triggered.
If set to an integer \code{k}, training with a validation set will stop if the performance
keeps getting worse consecutively for \code{k} rounds.}
\item{maximize}{If \code{feval} and \code{early.stop.round} are set, then \code{maximize} must be set as well.
\code{maximize=TRUE} means the larger the evaluation score the better.}
\item{...}{other parameters to pass to \code{params}.}
}
\description{
A simple interface for training xgboost model. Look at \code{\link{xgb.train}} function for a more advanced interface.
}
\details{
This is the modeling function for Xgboost.
Parallelization is automatically enabled if \code{OpenMP} is present.
Number of threads can also be manually specified via \code{nthread} parameter.
}
\examples{
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
bst <- xgboost(data = train$data, label = train$label, max.depth = 2,
eta = 1, nthread = 2, nround = 2, objective = "binary:logistic")
pred <- predict(bst, test$data)
}
| /R-package/man/xgboost.Rd | permissive | saurav111/xgboost | R | false | false | 2,943 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/xgboost.R
\name{xgboost}
\alias{xgboost}
\title{eXtreme Gradient Boosting (Tree) library}
\usage{
xgboost(data = NULL, label = NULL, missing = NULL, params = list(),
nrounds, verbose = 1, print.every.n = 1L, early.stop.round = NULL,
maximize = NULL, ...)
}
\arguments{
\item{data}{takes \code{matrix}, \code{dgCMatrix}, local data file or
\code{xgb.DMatrix}.}
\item{label}{the response variable. User should not set this field,
if data is local data file or \code{xgb.DMatrix}.}
\item{missing}{Missing is only used when input is dense matrix, pick a float
value that represents missing value. Sometimes a data use 0 or other extreme value to represents missing values.}
\item{params}{the list of parameters.
Commonly used ones are:
\itemize{
\item \code{objective} objective function, common ones are
\itemize{
\item \code{reg:linear} linear regression
\item \code{binary:logistic} logistic regression for classification
}
\item \code{eta} step size of each boosting step
\item \code{max.depth} maximum depth of the tree
\item \code{nthread} number of thread used in training, if not set, all threads are used
}
Look at \code{\link{xgb.train}} for a more complete list of parameters or \url{https://github.com/dmlc/xgboost/wiki/Parameters} for the full list.
See also \code{demo/} for walkthrough example in R.}
\item{nrounds}{the max number of iterations}
\item{verbose}{If 0, xgboost will stay silent. If 1, xgboost will print
information of performance. If 2, xgboost will print information of both
performance and construction progress information}
\item{print.every.n}{Print every N progress messages when \code{verbose>0}. Default is 1 which means all messages are printed.}
\item{early.stop.round}{If \code{NULL}, the early stopping function is not triggered.
If set to an integer \code{k}, training with a validation set will stop if the performance
keeps getting worse consecutively for \code{k} rounds.}
\item{maximize}{If \code{feval} and \code{early.stop.round} are set, then \code{maximize} must be set as well.
\code{maximize=TRUE} means the larger the evaluation score the better.}
\item{...}{other parameters to pass to \code{params}.}
}
\description{
A simple interface for training xgboost model. Look at \code{\link{xgb.train}} function for a more advanced interface.
}
\details{
This is the modeling function for Xgboost.
Parallelization is automatically enabled if \code{OpenMP} is present.
Number of threads can also be manually specified via \code{nthread} parameter.
}
\examples{
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
bst <- xgboost(data = train$data, label = train$label, max.depth = 2,
eta = 1, nthread = 2, nround = 2, objective = "binary:logistic")
pred <- predict(bst, test$data)
}
|
### Site setup
library(blogdown)
# Create new site in our recently cloned blogdown repository
# This does not need to be run again
new_site(dir = 'blogdown_source',
theme = 'gcushen/hugo-academic',
format = 'toml')
# WORKING DIRECTORY NEEDS TO BE blogdown_source
# Fixed to control this - created project with this as wd
new_post(title = 'book-review-gnomon', ext = '.Rmd')
new_content(path = 'project/')
serve_site()
build_site()
## hugo_build() worked! Edit config file in local blogdown_source, then use hugo_build(),
# then push changes in tsteggall.github.io to github repository and BOOM
hugo_build()
| /site_setup.R | no_license | tsteggall/blogdown_source | R | false | false | 639 | r | ### Site setup
library(blogdown)
# Create new site in our recently cloned blogdown repository
# This does not need to be run again
new_site(dir = 'blogdown_source',
theme = 'gcushen/hugo-academic',
format = 'toml')
# WORKING DIRECTORY NEEDS TO BE blogdown_source
# Fixed to control this - created project with this as wd
new_post(title = 'book-review-gnomon', ext = '.Rmd')
new_content(path = 'project/')
serve_site()
build_site()
## hugo_build() worked! Edit config file in local blogdown_source, then use hugo_build(),
# then push changes in tsteggall.github.io to github repository and BOOM
hugo_build()
|
require(shiny)
require(igraph)
require(visNetwork)
ui <- fluidPage(
sidebarLayout(
sidebarPanel(
sliderInput("obs", "Number of observations:",
min = 10, max = 500, value = 100)
),
mainPanel(plotOutput("plot1"))
)
)
server <- function(input, output) {
output$plot1 <- renderPlot({
hist(rnorm(input$obs), col = 'darkgray', border = 'white')
})
}
shinyApp(ui = ui, server = server)
| /demo.r | no_license | daniellenguyen/basketball-bracket | R | false | false | 403 | r | require(shiny)
require(igraph)
require(visNetwork)
ui <- fluidPage(
sidebarLayout(
sidebarPanel(
sliderInput("obs", "Number of observations:",
min = 10, max = 500, value = 100)
),
mainPanel(plotOutput("plot1"))
)
)
server <- function(input, output) {
output$plot1 <- renderPlot({
hist(rnorm(input$obs), col = 'darkgray', border = 'white')
})
}
shinyApp(ui = ui, server = server)
|
pkn.findNextWord <- function(string, ncandidates, min_cand_freq = 3, dictlist){
#Determine candidates that have a min. count of min_cand_freq
source("candidateList.R")
print(c("[EC1] Original string:", string))
library(qdap)
string <- gsub(pattern = "'", replacement = "", tolower(replace_contraction(tolower(string))))
string <- gsub(pattern = "takea", replacement = "take a", string)
print(paste("String:", string))
cand.list <- candidateList(string, dictlist, min = min_cand_freq)
if(is.na(cand.list[1])){
print(paste("Error: No continuation of [", tail(string,1), "] found."))
return(NULL)
}
print(paste("[EC2] cand.list[1:5]:", paste(cand.list[1:5], collapse = " ")))
#Create dt for top n candidates
source("calc.R")
results <- c()
results.names <- c()
for(q in (1:ncandidates)){
cand <- cand.list[q]
# print(c("[EC3] Checking Prediction for the candidate:", cand))
temp <- pkn.calc(string, candidate = cand, dictlist)
results <- c(results, temp)
results.names <- c(results.names, cand.list[q])
}
dt <- data.table("Candidate" = results.names, "pkn.score" = results)
return(dt[order(-pkn.score)])
}
| /findNextWord.R | no_license | andimasanam/Capstone-TextPredictProject | R | false | false | 1,245 | r | pkn.findNextWord <- function(string, ncandidates, min_cand_freq = 3, dictlist){
#Determine candidates that have a min. count of min_cand_freq
source("candidateList.R")
print(c("[EC1] Original string:", string))
library(qdap)
string <- gsub(pattern = "'", replacement = "", tolower(replace_contraction(tolower(string))))
string <- gsub(pattern = "takea", replacement = "take a", string)
print(paste("String:", string))
cand.list <- candidateList(string, dictlist, min = min_cand_freq)
if(is.na(cand.list[1])){
print(paste("Error: No continuation of [", tail(string,1), "] found."))
return(NULL)
}
print(paste("[EC2] cand.list[1:5]:", paste(cand.list[1:5], collapse = " ")))
#Create dt for top n candidates
source("calc.R")
results <- c()
results.names <- c()
for(q in (1:ncandidates)){
cand <- cand.list[q]
# print(c("[EC3] Checking Prediction for the candidate:", cand))
temp <- pkn.calc(string, candidate = cand, dictlist)
results <- c(results, temp)
results.names <- c(results.names, cand.list[q])
}
dt <- data.table("Candidate" = results.names, "pkn.score" = results)
return(dt[order(-pkn.score)])
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flow-functions.R
\name{forget}
\alias{forget}
\title{Forgets the computation for the current state}
\usage{
forget(flow, state = "current")
}
\arguments{
\item{flow}{A flow object, e.g. as returned by \code{\link{flow_fn}}.}
\item{state}{A flow state. It can be either a valid state
\code{index} (integer) or a valid state: \code{"current"}, \code{"all"},
\code{in_hash} or \code{out_hash} (string).}
}
\value{
A logical value, whether the deletion was successful.
}
\description{
Forgets the computation for the current state
}
\examples{
fn <- function(x, y) { x + y + 10 }
flowed_fn <- flow_fn(2, 3, fn = fn)
forget(flowed_fn)
}
| /man/forget.Rd | permissive | numeract/rflow | R | false | true | 714 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flow-functions.R
\name{forget}
\alias{forget}
\title{Forgets the computation for the current state}
\usage{
forget(flow, state = "current")
}
\arguments{
\item{flow}{A flow object, e.g. as returned by \code{\link{flow_fn}}.}
\item{state}{A flow state. It can be either a valid state
\code{index} (integer) or a valid state: \code{"current"}, \code{"all"},
\code{in_hash} or \code{out_hash} (string).}
}
\value{
A logical value, whether the deletion was successful.
}
\description{
Forgets the computation for the current state
}
\examples{
fn <- function(x, y) { x + y + 10 }
flowed_fn <- flow_fn(2, 3, fn = fn)
forget(flowed_fn)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.