content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
browseURL("https://drive.google.com/open?id=1t54qpJ1GmeY_NAoMAXqIYQ9xmjPxZr1u") # Install and import essential packages ----------------------------------- # install.packages("tidyverse") # library(tidyverse) will import ggplot2, dplyr, tidyr, readr, purrr, and tibble # Need to library # readxl for .xls and .xlsx sheets. # haven for SPSS, Stata, and SAS data. # jsonlite for JSON. # xml2 for XML. # httr for web APIs. # rvest for web scraping. # stringr for strings. # lubridate for dates and date-times. # forcats for categorical variables (factors). # hms for time-of-day values. # blob for storing blob (binary) data. # packages for lecture hand-in-hand R pkgs <- c("tidyverse", "ggmap", "twitteR", "tidytext", "tm", "wordcloud", "igraph", "tidyr", "readr", "RColorBrewer", "jsonlite", "ggmap", "ggplot2") # excluded packages not in the computer pkgs <- pkgs[!(pkgs %in% installed.packages()[,"Package"])] if(length(pkgs)) install.packages(pkgs) # 0. Inconsisdent encoding of RStudio ------------------------------------- # Sys.setlocale(category = "LC_ALL", locale = "UTF-8") # Sys.setlocale(category = "LC_ALL", locale = "C") # Sys.setlocale(category = "LC_ALL", locale = "cht") # for win # http://psmethods.postach.io/post/ru-he-geng-gai-rde-yu-she-yu-xi # The locale describes aspects of the internationalization of a program. Initially most aspects of the locale of R are set to "C" (which is the default for the C language and reflects North-American usage) # https://stat.ethz.ch/R-manual/R-devel/library/base/html/locales.html
/0_install_pkgs.R
no_license
anhnguyendepocen/NCCU2018
R
false
false
1,581
r
browseURL("https://drive.google.com/open?id=1t54qpJ1GmeY_NAoMAXqIYQ9xmjPxZr1u") # Install and import essential packages ----------------------------------- # install.packages("tidyverse") # library(tidyverse) will import ggplot2, dplyr, tidyr, readr, purrr, and tibble # Need to library # readxl for .xls and .xlsx sheets. # haven for SPSS, Stata, and SAS data. # jsonlite for JSON. # xml2 for XML. # httr for web APIs. # rvest for web scraping. # stringr for strings. # lubridate for dates and date-times. # forcats for categorical variables (factors). # hms for time-of-day values. # blob for storing blob (binary) data. # packages for lecture hand-in-hand R pkgs <- c("tidyverse", "ggmap", "twitteR", "tidytext", "tm", "wordcloud", "igraph", "tidyr", "readr", "RColorBrewer", "jsonlite", "ggmap", "ggplot2") # excluded packages not in the computer pkgs <- pkgs[!(pkgs %in% installed.packages()[,"Package"])] if(length(pkgs)) install.packages(pkgs) # 0. Inconsisdent encoding of RStudio ------------------------------------- # Sys.setlocale(category = "LC_ALL", locale = "UTF-8") # Sys.setlocale(category = "LC_ALL", locale = "C") # Sys.setlocale(category = "LC_ALL", locale = "cht") # for win # http://psmethods.postach.io/post/ru-he-geng-gai-rde-yu-she-yu-xi # The locale describes aspects of the internationalization of a program. Initially most aspects of the locale of R are set to "C" (which is the default for the C language and reflects North-American usage) # https://stat.ethz.ch/R-manual/R-devel/library/base/html/locales.html
# Este codigo foi criado para executar tanto no Azure, quanto no RStudio. # Para executar no Azure, altere o valor da variavel Azure para TRUE. Se o valor for FALSE, o codigo sera executado no RStudio # ***** Esta é a versão 2.0 deste script, atualizado em 23/05/2017 ***** # ***** Esse script pode ser executado nas versões 3.3.1, 3.3.2, 3.3.3 e 3.4.0 da linguagem R ***** # ***** Recomendamos a utilização da versão 3.4.0 da linguagem R ***** # Configurando o diretório de trabalho # Coloque entre aspas o diretório de trabalho que você está usando no seu computador # setwd("~/Dropbox/DSA/BigDataAnalytics-R-Azure/Cap10") # getwd() # Variavel que controla a execucao do script Azure <- FALSE if(Azure){ source("src/Tools.R") bikes <- maml.mapInputPort(1) bikes$dteday <- set.asPOSIXct(bikes) }else{ bikes <- bikes } dim(bikes) any(is.na(bikes)) # Criando um modelo para identificar os atributos com maior importancia para o modelo preditivo require(randomForest) # Avalidando a importância de todas as variaveis modelo <- randomForest(cnt ~ . , data = bikes, ntree = 100, nodesize = 10, importance = TRUE) # Removendo variaveis colineares modelo <- randomForest(cnt ~ . - count - mnth - hr - workingday - isWorking - dayWeek - xformHr - workTime - holiday - windspeed - monthCount - weathersit, data = bikes, ntree = 100, nodesize = 10, importance = TRUE) # Plotando as variaveis por grau de importancia # ?varImpPlot varImpPlot(modelo) # Gravando o resultado df_saida <- bikes[, c("cnt", rownames(modelo$importance))] if(Azure) maml.mapOutputPort("df_saida ")
/PrevendoDemandaBike/06-Feature Selection.R
no_license
LeoGitBR/RProjects
R
false
false
1,992
r
# Este codigo foi criado para executar tanto no Azure, quanto no RStudio. # Para executar no Azure, altere o valor da variavel Azure para TRUE. Se o valor for FALSE, o codigo sera executado no RStudio # ***** Esta é a versão 2.0 deste script, atualizado em 23/05/2017 ***** # ***** Esse script pode ser executado nas versões 3.3.1, 3.3.2, 3.3.3 e 3.4.0 da linguagem R ***** # ***** Recomendamos a utilização da versão 3.4.0 da linguagem R ***** # Configurando o diretório de trabalho # Coloque entre aspas o diretório de trabalho que você está usando no seu computador # setwd("~/Dropbox/DSA/BigDataAnalytics-R-Azure/Cap10") # getwd() # Variavel que controla a execucao do script Azure <- FALSE if(Azure){ source("src/Tools.R") bikes <- maml.mapInputPort(1) bikes$dteday <- set.asPOSIXct(bikes) }else{ bikes <- bikes } dim(bikes) any(is.na(bikes)) # Criando um modelo para identificar os atributos com maior importancia para o modelo preditivo require(randomForest) # Avalidando a importância de todas as variaveis modelo <- randomForest(cnt ~ . , data = bikes, ntree = 100, nodesize = 10, importance = TRUE) # Removendo variaveis colineares modelo <- randomForest(cnt ~ . - count - mnth - hr - workingday - isWorking - dayWeek - xformHr - workTime - holiday - windspeed - monthCount - weathersit, data = bikes, ntree = 100, nodesize = 10, importance = TRUE) # Plotando as variaveis por grau de importancia # ?varImpPlot varImpPlot(modelo) # Gravando o resultado df_saida <- bikes[, c("cnt", rownames(modelo$importance))] if(Azure) maml.mapOutputPort("df_saida ")
#' summarySE #' #' summarySE provides summaries (mean, sd, ci, se, N) for a variable/variables either overall or stratified by one or more grouping variables. This is an extension of the summarySE function provided by Winston Chang in the Cookbook for R. This extension allows for getting summaries across multple outcome variables. #' #' @param data A data.frame #' @param measurevars The variable name or vector of variable names that will be summarized #' @param groupvars A vector containing names of columns that contain grouping variables #' @param na.rm If TRUE, NA values will be ignored #' @param conf.interval The percent range of the confidence interval (default is 95%) #' @param .drop Default is TRUE #' @param digits Number of decimals to round output to (default is 3) #' #' @return Summary data.frame #' @export #' #' @examples #' #example_df <- data.frame(a = sample(c(0, 1), 100, replace = T), #' # b = sample(letters[24:26], 100, replace = T), #' # c = rnorm(100), #' # d = rnorm(100, 15, 3)) #' #summarySE(example_df, measurevars="c") #' #summarySE(example_df, measurevars="c", groupvars = "a") #' #summarySE(example_df, measurevars=c("c", "d")) #' #summarySE(example_df, measurevars=c("c", "d"), groupvars = "a") summarySE <- function(data = NULL, measurevars, groupvars = NULL, na.rm = TRUE, conf.interval = .95, .drop = TRUE, digits = 3) { detach_package <- function(pkg, character.only = FALSE) { if (!character.only) { pkg <- deparse(substitute(pkg)) } search_item <- paste("package", pkg, sep = ":") while (search_item %in% search()) { detach(search_item, unload = TRUE, character.only = TRUE) } } detach_package(dplyr) print( "Warning: dplyr will be detached in order to run this function. You must re-attach manually" ) do_one <- function(measurevar) { # New version of length which can handle NA's: if na.rm==T, don't count them length2 <- function (x, na.rm = FALSE) { if (na.rm) sum(!is.na(x)) else length(x) } # This does the summary. For each group's data frame, return a vector with # N, mean, and sd datac <- ddply( data, groupvars, .drop = .drop, .fun = function(xx, col) { c( N = length2(as.numeric(as.character(xx[[col]])), na.rm = na.rm), mean = mean (as.numeric(as.character(xx[[col]])), na.rm = na.rm), sd = sd (as.numeric(as.character(xx[[col]])), na.rm = na.rm) ) }, measurevar ) datac <- data.frame(Var = measurevar,datac) # Rename the "mean" column datac$mean <- round(datac$mean, digits = digits) datac$sd <- round(datac$sd, digits = digits) # datac <- rename(datac, c("mean" = measurevar)) datac$se <- round(datac$sd / sqrt(datac$N),digits = digits) # Calculate standard error of the mean # Confidence interval multiplier for standard error # Calculate t-statistic for confidence interval: # e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1 ciMult <- round(qt(conf.interval / 2 + .5, datac$N - 1), digits = 3) datac$ci <- round(datac$se * ciMult,digits = digits) return(datac) } detac_full <- do.call(rbind, lapply(measurevars, do_one)) detac_full$.id <- NULL if(length(measurevars)==1) detac_full$Var <- NULL return(detac_full) }
/R/summarySE.R
no_license
TaylorAndrew/atPrepAnalyze
R
false
false
3,556
r
#' summarySE #' #' summarySE provides summaries (mean, sd, ci, se, N) for a variable/variables either overall or stratified by one or more grouping variables. This is an extension of the summarySE function provided by Winston Chang in the Cookbook for R. This extension allows for getting summaries across multple outcome variables. #' #' @param data A data.frame #' @param measurevars The variable name or vector of variable names that will be summarized #' @param groupvars A vector containing names of columns that contain grouping variables #' @param na.rm If TRUE, NA values will be ignored #' @param conf.interval The percent range of the confidence interval (default is 95%) #' @param .drop Default is TRUE #' @param digits Number of decimals to round output to (default is 3) #' #' @return Summary data.frame #' @export #' #' @examples #' #example_df <- data.frame(a = sample(c(0, 1), 100, replace = T), #' # b = sample(letters[24:26], 100, replace = T), #' # c = rnorm(100), #' # d = rnorm(100, 15, 3)) #' #summarySE(example_df, measurevars="c") #' #summarySE(example_df, measurevars="c", groupvars = "a") #' #summarySE(example_df, measurevars=c("c", "d")) #' #summarySE(example_df, measurevars=c("c", "d"), groupvars = "a") summarySE <- function(data = NULL, measurevars, groupvars = NULL, na.rm = TRUE, conf.interval = .95, .drop = TRUE, digits = 3) { detach_package <- function(pkg, character.only = FALSE) { if (!character.only) { pkg <- deparse(substitute(pkg)) } search_item <- paste("package", pkg, sep = ":") while (search_item %in% search()) { detach(search_item, unload = TRUE, character.only = TRUE) } } detach_package(dplyr) print( "Warning: dplyr will be detached in order to run this function. You must re-attach manually" ) do_one <- function(measurevar) { # New version of length which can handle NA's: if na.rm==T, don't count them length2 <- function (x, na.rm = FALSE) { if (na.rm) sum(!is.na(x)) else length(x) } # This does the summary. For each group's data frame, return a vector with # N, mean, and sd datac <- ddply( data, groupvars, .drop = .drop, .fun = function(xx, col) { c( N = length2(as.numeric(as.character(xx[[col]])), na.rm = na.rm), mean = mean (as.numeric(as.character(xx[[col]])), na.rm = na.rm), sd = sd (as.numeric(as.character(xx[[col]])), na.rm = na.rm) ) }, measurevar ) datac <- data.frame(Var = measurevar,datac) # Rename the "mean" column datac$mean <- round(datac$mean, digits = digits) datac$sd <- round(datac$sd, digits = digits) # datac <- rename(datac, c("mean" = measurevar)) datac$se <- round(datac$sd / sqrt(datac$N),digits = digits) # Calculate standard error of the mean # Confidence interval multiplier for standard error # Calculate t-statistic for confidence interval: # e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1 ciMult <- round(qt(conf.interval / 2 + .5, datac$N - 1), digits = 3) datac$ci <- round(datac$se * ciMult,digits = digits) return(datac) } detac_full <- do.call(rbind, lapply(measurevars, do_one)) detac_full$.id <- NULL if(length(measurevars)==1) detac_full$Var <- NULL return(detac_full) }
\name{slMatrix} \alias{slMatrix} \title{ Function to create objects from class slMatrix } \description{ Provides a flexible way to create objects from class \code{slMatrix}. The entries may be specified in several ways. } \usage{ slMatrix(init = NA, period, maxlag, seasonnames = seq(length = period), lagnames = as.character(0:maxlag), periodunit = "season", lagunit = "lag", f = NA, type = "sl") } \arguments{ \item{init}{ values for the the autocovariances, see also argument \code{f}. } \item{period}{ the number of seasons in an epoch } \item{maxlag}{ maximum lag to be stored } \item{seasonnames}{ names of seasons (?) } \item{lagnames}{ names of lags } \item{periodunit}{ name of the period unit } \item{lagunit}{ name of the unit for lags } \item{f}{ function to evaluate or matrix to get the values of the autocovariances.} \item{type}{ format or the arguments of \code{f}, see details.} } \details{ The internal representation of \code{slMatrix} is a matrix slot, \code{m}, of size \code{period x (maxlag+1)}. It is created by a call to \code{matrix()} with \code{init} supplying the values (may be \code{NA}s). If \code{init} is a matrix values for \code{period} and \code{maxlag} are deduced (if not supplied) from its size. Change on 21/06/2006: Now, if the length of \code{init} is smaller than that of \code{m}, the remaining values are filled with NA's (in the past the normal recycling rules of \code{matrix()} applied). The previous behaviour used to hide puzzling and difficult to track errors. I cannot be sure but this change should not affect old code. If \code{f} is given it is used to populate the slot \code{m} by a call to \code{fill.slMatrix}. Normally in this case \code{init=NA} but this is not required. Currently \code{fill.slMatrix} has methods for \code{f} of class "matrix" and "function". The arguments (or the indices) can be controlled by the argument \code{type}. \code{type="sl"} - standard season-lag pair \code{type="tt"} - time-time pair \code{type="tl"} - standard season-lag pair } \value{ An object of class \code{slMatrix} } %\references{ ~put references to the literature/web site here ~ } \author{Georgi N. Boshnakov} \note{ To do: additional work is needed on the case when the dimensions of init and the result are not the same (see the details section) } \seealso{ \code{\link{slMatrix-class}}, \code{\link{fill.slMatrix}} } %\examples{ %} \keyword{seasonLag}% at least one, from doc/KEYWORDS
/man/slMatrix.Rd
no_license
GeoBosh/lagged
R
false
false
2,546
rd
\name{slMatrix} \alias{slMatrix} \title{ Function to create objects from class slMatrix } \description{ Provides a flexible way to create objects from class \code{slMatrix}. The entries may be specified in several ways. } \usage{ slMatrix(init = NA, period, maxlag, seasonnames = seq(length = period), lagnames = as.character(0:maxlag), periodunit = "season", lagunit = "lag", f = NA, type = "sl") } \arguments{ \item{init}{ values for the the autocovariances, see also argument \code{f}. } \item{period}{ the number of seasons in an epoch } \item{maxlag}{ maximum lag to be stored } \item{seasonnames}{ names of seasons (?) } \item{lagnames}{ names of lags } \item{periodunit}{ name of the period unit } \item{lagunit}{ name of the unit for lags } \item{f}{ function to evaluate or matrix to get the values of the autocovariances.} \item{type}{ format or the arguments of \code{f}, see details.} } \details{ The internal representation of \code{slMatrix} is a matrix slot, \code{m}, of size \code{period x (maxlag+1)}. It is created by a call to \code{matrix()} with \code{init} supplying the values (may be \code{NA}s). If \code{init} is a matrix values for \code{period} and \code{maxlag} are deduced (if not supplied) from its size. Change on 21/06/2006: Now, if the length of \code{init} is smaller than that of \code{m}, the remaining values are filled with NA's (in the past the normal recycling rules of \code{matrix()} applied). The previous behaviour used to hide puzzling and difficult to track errors. I cannot be sure but this change should not affect old code. If \code{f} is given it is used to populate the slot \code{m} by a call to \code{fill.slMatrix}. Normally in this case \code{init=NA} but this is not required. Currently \code{fill.slMatrix} has methods for \code{f} of class "matrix" and "function". The arguments (or the indices) can be controlled by the argument \code{type}. \code{type="sl"} - standard season-lag pair \code{type="tt"} - time-time pair \code{type="tl"} - standard season-lag pair } \value{ An object of class \code{slMatrix} } %\references{ ~put references to the literature/web site here ~ } \author{Georgi N. Boshnakov} \note{ To do: additional work is needed on the case when the dimensions of init and the result are not the same (see the details section) } \seealso{ \code{\link{slMatrix-class}}, \code{\link{fill.slMatrix}} } %\examples{ %} \keyword{seasonLag}% at least one, from doc/KEYWORDS
#sample augmentation with censoring cases #rationale: the all-death registry data is missing recent patients who are still surviving #simulation parameter table is built based on national statistics on k-year survival rates: # 1-3 year: 50% # 3-5 year: 30% # 5-10 year: 20% # install.packages("tidyverse") # install.packages("survminer") library(tidyverse) library(readxl) library(magrittr) library(survival) library(survminer) #load data from one-drive onedrive_root<-file.path("C:","Users", "xsm7f" #user1 # "sxing" #user2 ) alsa_dat<-read_xlsx(file.path(onedrive_root, "OneDrive - University of Missouri", "#Grants", "Analyses", "ALS_registry_data", # "ALSA Mid America dataset 112520.xlsx" "ALSA_calender year included 071521.xlsx" )) %>% select(`Death - symptom onset (m)`,`Onset year`) %>% mutate(row_id=1:n(), status=1) %>% rename(time=`Death - symptom onset (m)`,calyr=`Onset year`) %>% filter(!is.na(time)) survfit(Surv(time, status) ~ 1, data = alsa_dat) survfit(Surv(time, status) ~ calyr, data = alsa_dat) #specify simulation parameters yr<-c(2,3,5,10) surv<-c(0.5,0.3,0.2,0.1) time_rg<-list(c(1,3),c(3,5),c(5,10),c(10,20)) N<-nrow(alsa_dat) alsa_sim<-c() n0<-N for(i in seq_along(yr)){ n<-floor(yr[i]/10*N*surv[i]/(1-surv[i])) alsa_sim %<>% bind_rows(data.frame(row_id=(n0+1):(n0+n), time=sample(time_rg[[i]][1]:time_rg[[i]][2],n,replace=T)*12)) n0<-max(alsa_sim$row_id) } alsa_aug<-alsa_dat %>% bind_rows(alsa_sim %>% mutate(status=0)) %>% select(row_id,status,time) #sanity check ggsurvplot( fit = survfit(Surv(time, status) ~ 1, data = alsa_aug), xlab = "Months", ylab = "Overall survival probability") survfit(Surv(time, status) ~ 1, data = alsa_aug) #save augmented sample write.csv(alsa_aug,file="./alsa_augmented.csv",row.names = F)
/simulate_survival.R
no_license
sxinger/Simulations
R
false
false
2,121
r
#sample augmentation with censoring cases #rationale: the all-death registry data is missing recent patients who are still surviving #simulation parameter table is built based on national statistics on k-year survival rates: # 1-3 year: 50% # 3-5 year: 30% # 5-10 year: 20% # install.packages("tidyverse") # install.packages("survminer") library(tidyverse) library(readxl) library(magrittr) library(survival) library(survminer) #load data from one-drive onedrive_root<-file.path("C:","Users", "xsm7f" #user1 # "sxing" #user2 ) alsa_dat<-read_xlsx(file.path(onedrive_root, "OneDrive - University of Missouri", "#Grants", "Analyses", "ALS_registry_data", # "ALSA Mid America dataset 112520.xlsx" "ALSA_calender year included 071521.xlsx" )) %>% select(`Death - symptom onset (m)`,`Onset year`) %>% mutate(row_id=1:n(), status=1) %>% rename(time=`Death - symptom onset (m)`,calyr=`Onset year`) %>% filter(!is.na(time)) survfit(Surv(time, status) ~ 1, data = alsa_dat) survfit(Surv(time, status) ~ calyr, data = alsa_dat) #specify simulation parameters yr<-c(2,3,5,10) surv<-c(0.5,0.3,0.2,0.1) time_rg<-list(c(1,3),c(3,5),c(5,10),c(10,20)) N<-nrow(alsa_dat) alsa_sim<-c() n0<-N for(i in seq_along(yr)){ n<-floor(yr[i]/10*N*surv[i]/(1-surv[i])) alsa_sim %<>% bind_rows(data.frame(row_id=(n0+1):(n0+n), time=sample(time_rg[[i]][1]:time_rg[[i]][2],n,replace=T)*12)) n0<-max(alsa_sim$row_id) } alsa_aug<-alsa_dat %>% bind_rows(alsa_sim %>% mutate(status=0)) %>% select(row_id,status,time) #sanity check ggsurvplot( fit = survfit(Surv(time, status) ~ 1, data = alsa_aug), xlab = "Months", ylab = "Overall survival probability") survfit(Surv(time, status) ~ 1, data = alsa_aug) #save augmented sample write.csv(alsa_aug,file="./alsa_augmented.csv",row.names = F)
testlist <- list(doy = -1.72131968218895e+83, latitude = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, 2.3834705896422e+195, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161)) result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist) str(result)
/meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615827825-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
733
r
testlist <- list(doy = -1.72131968218895e+83, latitude = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, 2.3834705896422e+195, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161)) result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist) str(result)
\name{RADami-package} \alias{RADami-package} \alias{RADami} \docType{package} \title{R Package for Phylogenetic Analysis of RADseq Data} \description{This package implements import, export, manipulation, visualization, and downstream (post-clustering) analysis of RADseq data, integrating with the pyRAD package by Deren Eaton. It begins with output from the pyRAD package by Deren Eataon (http://dereneaton.com/software; Eaton and Ree, 2013) and implements many analyses conducted in a recent phylogenetic analysis of oaks (Hipp et al., accepted).} \details{ \tabular{ll}{ Package: \tab RADami\cr Type: \tab Package\cr Version: \tab 1.0\cr Date: \tab 2014-03-03\cr License: \tab GPL-3\cr } } \author{Andrew Hipp Maintainer: Andrew Hipp <ahipp@mortonarb.org> } \references{ Eaton D.A.R. & Ree R.H. (2013). Inferring Phylogeny and Introgression using RADseq Data: An Example from Flowering Plants (Pedicularis: Orobanchaceae). Systematic Biolology 62: 689-706. Hipp A.L., Eaton D.A.R., Cavender-Bares J., Fitzek E., Nipper R. & Manos P.S. (Accepted pending revision). A framework phylogeny of the American oak clade based on sequenced RAD data. PLoS ONE. http://www.dereneaton.com/software/ } \keyword{ package }
/man/RADami-package.Rd
no_license
KlausVigo/RADami
R
false
false
1,266
rd
\name{RADami-package} \alias{RADami-package} \alias{RADami} \docType{package} \title{R Package for Phylogenetic Analysis of RADseq Data} \description{This package implements import, export, manipulation, visualization, and downstream (post-clustering) analysis of RADseq data, integrating with the pyRAD package by Deren Eaton. It begins with output from the pyRAD package by Deren Eataon (http://dereneaton.com/software; Eaton and Ree, 2013) and implements many analyses conducted in a recent phylogenetic analysis of oaks (Hipp et al., accepted).} \details{ \tabular{ll}{ Package: \tab RADami\cr Type: \tab Package\cr Version: \tab 1.0\cr Date: \tab 2014-03-03\cr License: \tab GPL-3\cr } } \author{Andrew Hipp Maintainer: Andrew Hipp <ahipp@mortonarb.org> } \references{ Eaton D.A.R. & Ree R.H. (2013). Inferring Phylogeny and Introgression using RADseq Data: An Example from Flowering Plants (Pedicularis: Orobanchaceae). Systematic Biolology 62: 689-706. Hipp A.L., Eaton D.A.R., Cavender-Bares J., Fitzek E., Nipper R. & Manos P.S. (Accepted pending revision). A framework phylogeny of the American oak clade based on sequenced RAD data. PLoS ONE. http://www.dereneaton.com/software/ } \keyword{ package }
# This file is generated by make.paws. Please do not edit here. #' @importFrom paws.common new_handlers new_service set_config NULL #' AWS Ground Station #' #' @description #' Welcome to the AWS Ground Station API Reference. AWS Ground Station is a #' fully managed service that enables you to control satellite #' communications, downlink and process satellite data, and scale your #' satellite operations efficiently and cost-effectively without having to #' build or manage your own ground station infrastructure. #' #' @param #' config #' Optional configuration of credentials, endpoint, and/or region. #' #' @section Service syntax: #' ``` #' svc <- groundstation( #' config = list( #' credentials = list( #' creds = list( #' access_key_id = "string", #' secret_access_key = "string", #' session_token = "string" #' ), #' profile = "string" #' ), #' endpoint = "string", #' region = "string" #' ) #' ) #' ``` #' #' @examples #' \donttest{svc <- groundstation() #' svc$cancel_contact( #' Foo = 123 #' )} #' #' @section Operations: #' \tabular{ll}{ #' \link[=groundstation_cancel_contact]{cancel_contact} \tab Cancels a contact with a specified contact ID \cr #' \link[=groundstation_create_config]{create_config} \tab Creates a Config with the specified configData parameters \cr #' \link[=groundstation_create_dataflow_endpoint_group]{create_dataflow_endpoint_group} \tab Creates a DataflowEndpoint group containing the specified list of DataflowEndpoint objects\cr #' \link[=groundstation_create_mission_profile]{create_mission_profile} \tab Creates a mission profile \cr #' \link[=groundstation_delete_config]{delete_config} \tab Deletes a Config \cr #' \link[=groundstation_delete_dataflow_endpoint_group]{delete_dataflow_endpoint_group} \tab Deletes a dataflow endpoint group \cr #' \link[=groundstation_delete_mission_profile]{delete_mission_profile} \tab Deletes a mission profile \cr #' \link[=groundstation_describe_contact]{describe_contact} \tab Describes an existing contact \cr #' \link[=groundstation_get_config]{get_config} \tab Returns Config information \cr #' \link[=groundstation_get_dataflow_endpoint_group]{get_dataflow_endpoint_group} \tab Returns the dataflow endpoint group \cr #' \link[=groundstation_get_minute_usage]{get_minute_usage} \tab Returns the number of minutes used by account \cr #' \link[=groundstation_get_mission_profile]{get_mission_profile} \tab Returns a mission profile \cr #' \link[=groundstation_get_satellite]{get_satellite} \tab Returns a satellite \cr #' \link[=groundstation_list_configs]{list_configs} \tab Returns a list of Config objects \cr #' \link[=groundstation_list_contacts]{list_contacts} \tab Returns a list of contacts \cr #' \link[=groundstation_list_dataflow_endpoint_groups]{list_dataflow_endpoint_groups} \tab Returns a list of DataflowEndpoint groups \cr #' \link[=groundstation_list_ground_stations]{list_ground_stations} \tab Returns a list of ground stations \cr #' \link[=groundstation_list_mission_profiles]{list_mission_profiles} \tab Returns a list of mission profiles \cr #' \link[=groundstation_list_satellites]{list_satellites} \tab Returns a list of satellites \cr #' \link[=groundstation_list_tags_for_resource]{list_tags_for_resource} \tab Returns a list of tags or a specified resource \cr #' \link[=groundstation_reserve_contact]{reserve_contact} \tab Reserves a contact using specified parameters \cr #' \link[=groundstation_tag_resource]{tag_resource} \tab Assigns a tag to a resource \cr #' \link[=groundstation_untag_resource]{untag_resource} \tab Deassigns a resource tag \cr #' \link[=groundstation_update_config]{update_config} \tab Updates the Config used when scheduling contacts \cr #' \link[=groundstation_update_mission_profile]{update_mission_profile} \tab Updates a mission profile #' } #' #' @rdname groundstation #' @export groundstation <- function(config = list()) { svc <- .groundstation$operations svc <- set_config(svc, config) return(svc) } # Private API objects: metadata, handlers, interfaces, etc. .groundstation <- list() .groundstation$operations <- list() .groundstation$metadata <- list( service_name = "groundstation", endpoints = list("*" = list(endpoint = "groundstation.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "groundstation.{region}.amazonaws.com.cn", global = FALSE)), service_id = "GroundStation", api_version = "2019-05-23", signing_name = "groundstation", json_version = "1.1", target_prefix = "" ) .groundstation$handlers <- new_handlers("restjson", "v4") .groundstation$service <- function(config = list()) { new_service(.groundstation$metadata, .groundstation$handlers, config) }
/paws/R/groundstation_service.R
permissive
ryanb8/paws
R
false
false
4,752
r
# This file is generated by make.paws. Please do not edit here. #' @importFrom paws.common new_handlers new_service set_config NULL #' AWS Ground Station #' #' @description #' Welcome to the AWS Ground Station API Reference. AWS Ground Station is a #' fully managed service that enables you to control satellite #' communications, downlink and process satellite data, and scale your #' satellite operations efficiently and cost-effectively without having to #' build or manage your own ground station infrastructure. #' #' @param #' config #' Optional configuration of credentials, endpoint, and/or region. #' #' @section Service syntax: #' ``` #' svc <- groundstation( #' config = list( #' credentials = list( #' creds = list( #' access_key_id = "string", #' secret_access_key = "string", #' session_token = "string" #' ), #' profile = "string" #' ), #' endpoint = "string", #' region = "string" #' ) #' ) #' ``` #' #' @examples #' \donttest{svc <- groundstation() #' svc$cancel_contact( #' Foo = 123 #' )} #' #' @section Operations: #' \tabular{ll}{ #' \link[=groundstation_cancel_contact]{cancel_contact} \tab Cancels a contact with a specified contact ID \cr #' \link[=groundstation_create_config]{create_config} \tab Creates a Config with the specified configData parameters \cr #' \link[=groundstation_create_dataflow_endpoint_group]{create_dataflow_endpoint_group} \tab Creates a DataflowEndpoint group containing the specified list of DataflowEndpoint objects\cr #' \link[=groundstation_create_mission_profile]{create_mission_profile} \tab Creates a mission profile \cr #' \link[=groundstation_delete_config]{delete_config} \tab Deletes a Config \cr #' \link[=groundstation_delete_dataflow_endpoint_group]{delete_dataflow_endpoint_group} \tab Deletes a dataflow endpoint group \cr #' \link[=groundstation_delete_mission_profile]{delete_mission_profile} \tab Deletes a mission profile \cr #' \link[=groundstation_describe_contact]{describe_contact} \tab Describes an existing contact \cr #' \link[=groundstation_get_config]{get_config} \tab Returns Config information \cr #' \link[=groundstation_get_dataflow_endpoint_group]{get_dataflow_endpoint_group} \tab Returns the dataflow endpoint group \cr #' \link[=groundstation_get_minute_usage]{get_minute_usage} \tab Returns the number of minutes used by account \cr #' \link[=groundstation_get_mission_profile]{get_mission_profile} \tab Returns a mission profile \cr #' \link[=groundstation_get_satellite]{get_satellite} \tab Returns a satellite \cr #' \link[=groundstation_list_configs]{list_configs} \tab Returns a list of Config objects \cr #' \link[=groundstation_list_contacts]{list_contacts} \tab Returns a list of contacts \cr #' \link[=groundstation_list_dataflow_endpoint_groups]{list_dataflow_endpoint_groups} \tab Returns a list of DataflowEndpoint groups \cr #' \link[=groundstation_list_ground_stations]{list_ground_stations} \tab Returns a list of ground stations \cr #' \link[=groundstation_list_mission_profiles]{list_mission_profiles} \tab Returns a list of mission profiles \cr #' \link[=groundstation_list_satellites]{list_satellites} \tab Returns a list of satellites \cr #' \link[=groundstation_list_tags_for_resource]{list_tags_for_resource} \tab Returns a list of tags or a specified resource \cr #' \link[=groundstation_reserve_contact]{reserve_contact} \tab Reserves a contact using specified parameters \cr #' \link[=groundstation_tag_resource]{tag_resource} \tab Assigns a tag to a resource \cr #' \link[=groundstation_untag_resource]{untag_resource} \tab Deassigns a resource tag \cr #' \link[=groundstation_update_config]{update_config} \tab Updates the Config used when scheduling contacts \cr #' \link[=groundstation_update_mission_profile]{update_mission_profile} \tab Updates a mission profile #' } #' #' @rdname groundstation #' @export groundstation <- function(config = list()) { svc <- .groundstation$operations svc <- set_config(svc, config) return(svc) } # Private API objects: metadata, handlers, interfaces, etc. .groundstation <- list() .groundstation$operations <- list() .groundstation$metadata <- list( service_name = "groundstation", endpoints = list("*" = list(endpoint = "groundstation.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "groundstation.{region}.amazonaws.com.cn", global = FALSE)), service_id = "GroundStation", api_version = "2019-05-23", signing_name = "groundstation", json_version = "1.1", target_prefix = "" ) .groundstation$handlers <- new_handlers("restjson", "v4") .groundstation$service <- function(config = list()) { new_service(.groundstation$metadata, .groundstation$handlers, config) }
library(RSQLite) library(caret) library(caretEnsemble) library(ggplot2) library(pROC) library(randomForest) library(C50) library(nnet) library(gbm) library(Boruta) library(dplyr) # by rinda fb.db <- "~/Documents/kaggle/facebook_IV_Human_or_Robot/fb.db" ffile <- "~/Documents/kaggle/facebook_IV_Human_or_Robot/extractedFeatures.Rda"
/init.R
no_license
rindajones/kaggleFR4
R
false
false
334
r
library(RSQLite) library(caret) library(caretEnsemble) library(ggplot2) library(pROC) library(randomForest) library(C50) library(nnet) library(gbm) library(Boruta) library(dplyr) # by rinda fb.db <- "~/Documents/kaggle/facebook_IV_Human_or_Robot/fb.db" ffile <- "~/Documents/kaggle/facebook_IV_Human_or_Robot/extractedFeatures.Rda"
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stationary_ploidevol.R \name{stationary_ploidevol} \alias{stationary_ploidevol} \title{Calculates stationary distribution for root under Ploidevol model} \usage{ stationary_ploidevol(log.parameters) } \arguments{ \item{log.parameters}{a vector of size 6 containing ln values for (alpha, beta, delta, epsilon, rho, omega)} } \value{ A probability vector of size 11 indicating probabilities of the root being ploidy 2x, 3x,..., 12x } \description{ Calculates stationary distribution for root under Ploidevol model } \seealso{ \code{\link{negloglikelihood}}, \code{\link{Q_ploidevol}} } \author{ Rosana Zenil-Ferguson }
/man/stationary_ploidevol.Rd
no_license
roszenil/chromploid
R
false
true
695
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stationary_ploidevol.R \name{stationary_ploidevol} \alias{stationary_ploidevol} \title{Calculates stationary distribution for root under Ploidevol model} \usage{ stationary_ploidevol(log.parameters) } \arguments{ \item{log.parameters}{a vector of size 6 containing ln values for (alpha, beta, delta, epsilon, rho, omega)} } \value{ A probability vector of size 11 indicating probabilities of the root being ploidy 2x, 3x,..., 12x } \description{ Calculates stationary distribution for root under Ploidevol model } \seealso{ \code{\link{negloglikelihood}}, \code{\link{Q_ploidevol}} } \author{ Rosana Zenil-Ferguson }
library(ggplot2) library(stringr) library(scales) library(gridExtra) library(lubridate) library(caret) library(caTools) library(e1071) library(party) dataset <- read.csv("E:/R Workspace/Data Mining Project/No-show-Issue-Comma-300k.csv", stringsAsFactors = TRUE, header = TRUE, na.strings = c("", " ", "NA")) summary(dataset) #dataset <- cbind(data) ############################################################# #################Data Understanding########################## ############################################################# # plotting histogram of relation between Age and Status ggplot(dataset,aes(x=Age,group=Status,fill=Status))+geom_histogram(position="identity",alpha=.4,bins=40)+scale_fill_brewer(palette="Set1") #plotting boxplot of relation between Age and Status ggplot(dataset,aes(x=Status,y=Age,colour=Status))+geom_boxplot(size=1)+scale_colour_brewer(palette="Pastel1") #taking absolute of AwaitingTime to make it positive dataset$AwaitingTime <- abs(dataset$AwaitingTime) #plotting density of effect of AwaitingTime on status ggplot(dataset[dataset$AwaitingTime<=100,],aes(x=AwaitingTime,group=Status))+geom_density(aes(fill=Status),alpha=.3,colour=NA) #plotting the bar chart of relation between Gender and Status p1 <- ggplot(dataset, aes(x=Gender, fill=Gender)) + geom_bar(position="dodge") p2 <- ggplot(dataset, aes(x=Gender, fill=Status)) + geom_bar(position="dodge") grid.arrange(p1, p2,ncol=2, top='Gender distribution') gender_table <- table(dataset$Gender, dataset$Status) addmargins(gender_table) #plotting relationship of days of the week and status on bar chart ggplot(dataset, aes(x=DayOfTheWeek, fill=Status)) + geom_bar(position="dodge") days_table <- table(dataset$DayOfTheWeek, dataset$Status) addmargins(days_table) #plotting relation of appointment month and status dataset$ApointmentData <- ymd_hms(dataset$ApointmentData) ggplot(dataset, aes(x=month(ApointmentData,label=TRUE), fill=Status)) + geom_bar(position="dodge") month_table <- table(month(dataset$ApointmentData), dataset$Status) addmargins(month_table) #plotting the effect of SMS reminders on Status ggplot(dataset, aes(x=Sms_Reminder, fill=Status)) + geom_bar(position="dodge") ####################################################### #############Preparing the Data######################## #normalization the data dataset$Age <- (dataset$Age - min(dataset$Age))/(max(dataset$Age) - min(dataset$Age)) dataset$AwaitingTime <- (dataset$AwaitingTime - min(dataset$AwaitingTime))/(max(dataset$AwaitingTime) - min(dataset$AwaitingTime)) #removing variables dataset <- dataset[,-c(3,4)] #splitting the dataset into training and testing sets split_index <-createDataPartition(dataset$Status,p=0.7,list=FALSE) Train.data <-dataset[split_index,] Test.data <-dataset[-split_index,] # seperating the class labels Train.Status <- Train.data$Status Test.Status<-Test.data$Status ########################################################## ###################Modelling############################## #implementing Logistic Regression to classify Status Model.2 <- glm(Status~., data = Train.data, family = binomial("logit")) summary(Model.2) pred_m2 <- predict(Model.2, Test.data, type="response") colAUC(pred_m2, Test.data$Status, plotROC=TRUE) #implementing Decision Tree to classify Status # Give the chart file a name. png(file = "decision_tree.png") # Create the classification tree. output.tree <- ctree(Status~., data = Train.data) # Plot the tree. plot(output.tree) # Predictions on test data pred_tree <- predict(output.tree, Test.data) #Printing confusion matrix confusionMatrix(pred_tree, Test.data$Status) # Save the file. dev.off() #implementing Support Vector Machine for classification Model.1 <- svm(Status~., data = Train.data) summary(Model.1) pred_m1 <- predict(Model.1, Test.data, type="response") colAUC(pred_m1, Test.data$Status, plotROC=TRUE)
/Project.R
no_license
ZubairHussain/Appointment-No-Show-
R
false
false
4,057
r
library(ggplot2) library(stringr) library(scales) library(gridExtra) library(lubridate) library(caret) library(caTools) library(e1071) library(party) dataset <- read.csv("E:/R Workspace/Data Mining Project/No-show-Issue-Comma-300k.csv", stringsAsFactors = TRUE, header = TRUE, na.strings = c("", " ", "NA")) summary(dataset) #dataset <- cbind(data) ############################################################# #################Data Understanding########################## ############################################################# # plotting histogram of relation between Age and Status ggplot(dataset,aes(x=Age,group=Status,fill=Status))+geom_histogram(position="identity",alpha=.4,bins=40)+scale_fill_brewer(palette="Set1") #plotting boxplot of relation between Age and Status ggplot(dataset,aes(x=Status,y=Age,colour=Status))+geom_boxplot(size=1)+scale_colour_brewer(palette="Pastel1") #taking absolute of AwaitingTime to make it positive dataset$AwaitingTime <- abs(dataset$AwaitingTime) #plotting density of effect of AwaitingTime on status ggplot(dataset[dataset$AwaitingTime<=100,],aes(x=AwaitingTime,group=Status))+geom_density(aes(fill=Status),alpha=.3,colour=NA) #plotting the bar chart of relation between Gender and Status p1 <- ggplot(dataset, aes(x=Gender, fill=Gender)) + geom_bar(position="dodge") p2 <- ggplot(dataset, aes(x=Gender, fill=Status)) + geom_bar(position="dodge") grid.arrange(p1, p2,ncol=2, top='Gender distribution') gender_table <- table(dataset$Gender, dataset$Status) addmargins(gender_table) #plotting relationship of days of the week and status on bar chart ggplot(dataset, aes(x=DayOfTheWeek, fill=Status)) + geom_bar(position="dodge") days_table <- table(dataset$DayOfTheWeek, dataset$Status) addmargins(days_table) #plotting relation of appointment month and status dataset$ApointmentData <- ymd_hms(dataset$ApointmentData) ggplot(dataset, aes(x=month(ApointmentData,label=TRUE), fill=Status)) + geom_bar(position="dodge") month_table <- table(month(dataset$ApointmentData), dataset$Status) addmargins(month_table) #plotting the effect of SMS reminders on Status ggplot(dataset, aes(x=Sms_Reminder, fill=Status)) + geom_bar(position="dodge") ####################################################### #############Preparing the Data######################## #normalization the data dataset$Age <- (dataset$Age - min(dataset$Age))/(max(dataset$Age) - min(dataset$Age)) dataset$AwaitingTime <- (dataset$AwaitingTime - min(dataset$AwaitingTime))/(max(dataset$AwaitingTime) - min(dataset$AwaitingTime)) #removing variables dataset <- dataset[,-c(3,4)] #splitting the dataset into training and testing sets split_index <-createDataPartition(dataset$Status,p=0.7,list=FALSE) Train.data <-dataset[split_index,] Test.data <-dataset[-split_index,] # seperating the class labels Train.Status <- Train.data$Status Test.Status<-Test.data$Status ########################################################## ###################Modelling############################## #implementing Logistic Regression to classify Status Model.2 <- glm(Status~., data = Train.data, family = binomial("logit")) summary(Model.2) pred_m2 <- predict(Model.2, Test.data, type="response") colAUC(pred_m2, Test.data$Status, plotROC=TRUE) #implementing Decision Tree to classify Status # Give the chart file a name. png(file = "decision_tree.png") # Create the classification tree. output.tree <- ctree(Status~., data = Train.data) # Plot the tree. plot(output.tree) # Predictions on test data pred_tree <- predict(output.tree, Test.data) #Printing confusion matrix confusionMatrix(pred_tree, Test.data$Status) # Save the file. dev.off() #implementing Support Vector Machine for classification Model.1 <- svm(Status~., data = Train.data) summary(Model.1) pred_m1 <- predict(Model.1, Test.data, type="response") colAUC(pred_m1, Test.data$Status, plotROC=TRUE)
sgRSEA <- function(dat, multiplier=50, r.seed=NULL){ v1 =as.character(dat[,1]) v2 = as.character(dat[,2]) v3 = as.numeric(dat[,3]) v4 = as.numeric(dat[,4]) cname=colnames(dat) dat = data.frame( v1,v2,v3, v4 ) colnames(dat)=cname if (anyNA(dat)) stop("There is NA in dat.") if (min(dat[,3:4]) <1) stop("Minimum counts should be greater than or equal to 1.") dat = dat[ order(dat[,2]), ] set.seed(r.seed) Null.dat = dat[,3:4] p0 = pMME(Null.dat) Z0 = apply(Null.dat, MARGIN=1, Zstat, p.null=p0) #### (1) Fitting Zvec <- Zfit(dat, p.null=p0) geneZ = data.frame(dat[,2], Zvec) Txx = T.maxmean( geneZ ) datZ = outdatZ(dat, Zvec, Txx) datZ.s = datZ[ order(datZ$Z, decreasing=T),] #### (2) Generating Null T values (permutation T values) Null.Tss = Tmaxmean.Null.Tss2(gnames=dat[,2], null.Z=Z0, multiplier=multiplier) meanvec = tapply(Null.Tss[,1], Null.Tss[,2], mean) sdvec = tapply(Null.Tss[,1], Null.Tss[,2], sd) meanarr = cbind(meanvec, as.numeric( names(meanvec) ) ) sdarr = cbind(sdvec, as.numeric( names(sdvec) ) ) stdTmat = t( apply(Txx, MARGIN=1, stdofT, meanarr, sdarr) ) stdNullTmat = t( apply(Null.Tss, MARGIN=1, stdofT, meanarr, sdarr) ) #### (3) Significance calculation stdNullTmat = stdNullTmat[ order(stdNullTmat[,1], decreasing=T),] stdTmat = stdTmat[ order(stdTmat[,1], decreasing=T),] null.stdT = stdNullTmat[ ,1] obs.stdT = stdTmat[,1] Txx.pmat = PermPval(obs.stdT, null.stdT) Txx.pmat = cbind( m=stdTmat[,2], Txx.pmat) NES = Txx.pmat[,2] Txx.p.pos = Txx.pmat[,3] FDR.pos = p.adjust(Txx.p.pos, 'BH') rank.pos = rank( -Txx.pmat[,2]) Txx.p.neg = Txx.pmat[,4] FDR.neg = p.adjust(Txx.p.neg, 'BH') rank.neg = rank( Txx.pmat[,2]) Txx.pqrmat.pos = cbind(Txx.pmat[,1:2], p.value.pos=Txx.p.pos, FDR.pos, rank.pos)[order(NES, decreasing=T),] Txx.pqrmat.neg = cbind(Txx.pmat[,1:2], p.value.neg=Txx.p.neg, FDR.neg, rank.neg)[order(NES),] result = list(gene.pos=Txx.pqrmat.pos, gene.neg=Txx.pqrmat.neg, stdTmat=stdTmat, stdNullTmat=stdNullTmat, Tmat=Txx, NullTmat=Null.Tss, sgRNA.stat=datZ.s) return(result) }
/sgRSEA/R/sgRSEA.R
no_license
ingted/R-Examples
R
false
false
2,262
r
sgRSEA <- function(dat, multiplier=50, r.seed=NULL){ v1 =as.character(dat[,1]) v2 = as.character(dat[,2]) v3 = as.numeric(dat[,3]) v4 = as.numeric(dat[,4]) cname=colnames(dat) dat = data.frame( v1,v2,v3, v4 ) colnames(dat)=cname if (anyNA(dat)) stop("There is NA in dat.") if (min(dat[,3:4]) <1) stop("Minimum counts should be greater than or equal to 1.") dat = dat[ order(dat[,2]), ] set.seed(r.seed) Null.dat = dat[,3:4] p0 = pMME(Null.dat) Z0 = apply(Null.dat, MARGIN=1, Zstat, p.null=p0) #### (1) Fitting Zvec <- Zfit(dat, p.null=p0) geneZ = data.frame(dat[,2], Zvec) Txx = T.maxmean( geneZ ) datZ = outdatZ(dat, Zvec, Txx) datZ.s = datZ[ order(datZ$Z, decreasing=T),] #### (2) Generating Null T values (permutation T values) Null.Tss = Tmaxmean.Null.Tss2(gnames=dat[,2], null.Z=Z0, multiplier=multiplier) meanvec = tapply(Null.Tss[,1], Null.Tss[,2], mean) sdvec = tapply(Null.Tss[,1], Null.Tss[,2], sd) meanarr = cbind(meanvec, as.numeric( names(meanvec) ) ) sdarr = cbind(sdvec, as.numeric( names(sdvec) ) ) stdTmat = t( apply(Txx, MARGIN=1, stdofT, meanarr, sdarr) ) stdNullTmat = t( apply(Null.Tss, MARGIN=1, stdofT, meanarr, sdarr) ) #### (3) Significance calculation stdNullTmat = stdNullTmat[ order(stdNullTmat[,1], decreasing=T),] stdTmat = stdTmat[ order(stdTmat[,1], decreasing=T),] null.stdT = stdNullTmat[ ,1] obs.stdT = stdTmat[,1] Txx.pmat = PermPval(obs.stdT, null.stdT) Txx.pmat = cbind( m=stdTmat[,2], Txx.pmat) NES = Txx.pmat[,2] Txx.p.pos = Txx.pmat[,3] FDR.pos = p.adjust(Txx.p.pos, 'BH') rank.pos = rank( -Txx.pmat[,2]) Txx.p.neg = Txx.pmat[,4] FDR.neg = p.adjust(Txx.p.neg, 'BH') rank.neg = rank( Txx.pmat[,2]) Txx.pqrmat.pos = cbind(Txx.pmat[,1:2], p.value.pos=Txx.p.pos, FDR.pos, rank.pos)[order(NES, decreasing=T),] Txx.pqrmat.neg = cbind(Txx.pmat[,1:2], p.value.neg=Txx.p.neg, FDR.neg, rank.neg)[order(NES),] result = list(gene.pos=Txx.pqrmat.pos, gene.neg=Txx.pqrmat.neg, stdTmat=stdTmat, stdNullTmat=stdNullTmat, Tmat=Txx, NullTmat=Null.Tss, sgRNA.stat=datZ.s) return(result) }
library(measures) ### Name: TNR ### Title: True negative rate ### Aliases: TNR ### ** Examples n = 20 set.seed(125) truth = as.factor(sample(c(1,0), n, replace = TRUE)) probabilities = runif(n) response = as.factor(as.numeric(probabilities > 0.5)) negative = 0 TNR(truth, response, negative)
/data/genthat_extracted_code/measures/examples/TNR.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
299
r
library(measures) ### Name: TNR ### Title: True negative rate ### Aliases: TNR ### ** Examples n = 20 set.seed(125) truth = as.factor(sample(c(1,0), n, replace = TRUE)) probabilities = runif(n) response = as.factor(as.numeric(probabilities > 0.5)) negative = 0 TNR(truth, response, negative)
rm(list=ls()); gc() library(data.table) library(lubridate) tr <- fread("../input/train.csv") #Select Train Sizes #set.seed(777) #tr <- tr[sample(.N, 50e6), ] #test dataset change te <- fread("../input/test.csv") # te <- fread("../input/test_supplement.csv") tr <- setorder(tr, click_time, is_attributed) tr <- tr[, -"attributed_time"] tri <- 1:nrow(tr) # te <- setorder(te, click_time, click_id) # te <- te[634:nrow(te)] #tr-te gap # teid <- te$click_id te <- te[, -"click_id"] adt <- rbind(tr, te, fill = T) rm(tr, te); gc() #### 1st Saving Point ##### saveRDS(adt, "adt_1st.RDS") saveRDS(tri, "tri_1st.RDS") #saveRDS(teid, "teid.RDS") adt[, click_hour := hour(adt$click_time)] adt[, click_weekd := wday(adt$click_time)] adt$click_time <- as.numeric(ymd_hms(adt$click_time)) head(adt) adt[, ip_hw := .N, by = list(ip, click_hour, click_weekd)] adt[, ip_app := .N, by = list(ip, app)] adt[, ip_dev := .N, by = list(ip, device)] adt[, ip_os := .N, by = list(ip, os)] adt[, ip_ch := .N, by = list(ip, channel)] adt[, ip_cnt := .N, by = ip] adt[, app_cnt := .N, by = app] adt[, dev_cnt := .N, by = device] adt[, os_cnt := .N, by = os] adt[, ch_cnt := .N, by = channel] adt[, clicker := .N, by = list(ip, device, os)] adt[, clicker_app := .N, by = list(ip, device, os, app)] adt[, clicker_N := seq(.N), by = list(ip, device, os)] adt[, clicker_app_N := seq(.N), by = list(ip, device, os, app)] adt[, app_dev := .N, by = list(app, device)] adt[, app_os := .N, by = list(app, os)] adt[, app_ch := .N, by = list(app, channel)] dim(adt) colnames(adt) #te_hourG1 <- c(4, 14, 13, 10, 9, 5) #te_hourG2 <- c(15, 11, 6) #adt$h_div <- ifelse(adt$click_hour %in% te_hourG1, 1, # ifelse(adt$click_hour %in% te_hourG2, 3, 2)) colnames(adt) adt[, clicker_Next := c(click_time[-1], NA), by = .(ip, device, os)] adt[, clicker_Next := clicker_Next - click_time, by = .(ip, device, os)] adt[is.na(clicker_Next), clicker_Next := 0] adt[, clicker_app_Next := c(click_time[-1], NA), by = .(ip, device, os, app)] adt[, clicker_app_Next := clicker_app_Next - click_time, by = .(ip, device, os, app)] adt[is.na(clicker_app_Next), clicker_app_Next := 0] adt[, clicker_ch_Next := c(click_time[-1], NA), by = .(ip, device, os, app, channel)] adt[, clicker_ch_Next := clicker_ch_Next - click_time, by = .(ip, device, os,app,channel)] adt[is.na(clicker_ch_Next), clicker_ch_Next := 0] adt[, clicker_prev := click_time - shift(click_time), by = .(ip, device, os)] adt[is.na(clicker_prev), clicker_prev := 0] adt[, clicker_app_prev := click_time - shift(click_time), by = .(ip, device, os, app)] adt[is.na(clicker_app_prev), clicker_app_prev := 0] adt[, clicker_ch_prev := click_time - shift(click_time), by = .(ip, device, os, app, channel)] adt[is.na(clicker_ch_prev), clicker_ch_prev := 0] adt[, clicker_Next2 := shift(click_time, 2, type = "lead", fill = 0) - click_time, by = .(ip, device, os)] adt[clicker_Next2 < 0 , clicker_Next2 := 0] adt[, clicker_app_Next2 := shift(click_time, 2, type = "lead", fill = 0) - click_time, by = .(ip, device, os, app)] adt[clicker_app_Next2 < 0 , clicker_app_Next2 := 0] adt[, clicker_ch_Next2 := shift(click_time, 2, type = "lead", fill = 0) - click_time, by = .(ip, device, os, app, channel)] adt[clicker_ch_Next2 < 0 , clicker_ch_Next2 := 0] adt[, clicker_prev2 := click_time - shift(click_time, 2), by = .(ip, device, os)] adt[is.na(clicker_prev2), clicker_prev2 := 0] adt[, clicker_app_prev2 := click_time - shift(click_time, 2), by = .(ip, device, os, app)] adt[is.na(clicker_app_prev2), clicker_app_prev2 := 0] adt[, clicker_ch_prev2 := click_time - shift(click_time, 2), by = .(ip, device, os, app, channel)] adt[is.na(clicker_ch_prev2), clicker_ch_prev2 := 0] adt[, clicker_Last := max(click_time), by = .(ip, device, os)] adt[, clicker_app_Last := max(click_time), by = .(ip, device, os, app)] adt[, clicker_ch_Last := max(click_time), by = .(ip, device, os, app, channel)] ##### 2nd Saving Point ##### saveRDS(adt, "adt_2nd.RDS") #adt[, clicker_Next := shift(click_time, 1, type = "lead", fill = 0) - click_time, # by = .(ip, device, os)] #adt[, clicker_app_Next := shift(click_time, 1, type = "lead", fill = 0) - click_time, # by = .(ip, device, os, app)] #adt[, clicker_ch_Next := shift(click_time, 1, type = "lead", fill = 0) - click_time, # by = .(ip, device, os, app, channel)] #adt$clicker_Next <- ifelse(adt$clicker_Next < 0 , 0 , adt$clicker_Next) #adt$clicker_app_Next <- ifelse(adt$clicker_app_Next <0 , 0 , adt$clicker_app_Next) #adt$clicker_ch_Next <- ifelse(adt$clicker_ch_Next <0 , 0 , adt$clicker_ch_Next) # adt[, clicker_Nmean := as.integer(mean(clicker_Next)), by = .(ip, device, os)] # adt[, clicker_app_Nmean := as.integer(mean(clicker_app_Next)), # by = .(ip, device, os,app)] # adt[, clicker_ch_Nmean := as.integer(mean(clicker_ch_Next)), # by = .(ip, device, os, app, channel)] # adt[, clicker_Pmean := as.integer(mean(clicker_prev)), by = .(ip, device, os)] # adt[, clicker_app_Pmean := as.integer(mean(clicker_app_prev)), # by = .(ip, device, os,app)] # adt[, clicker_ch_Pmean := as.integer(mean(clicker_ch_prev)), # by = .(ip, device, os, app, channel)] # # adt[, clicker_Nmed := as.integer(median(clicker_Next)), by = .(ip, device, os)] # adt[, clicker_app_Nmed := as.integer(median(clicker_app_Next)), by = .(ip, device, os,app)] # adt[, clicker_ch_Nmed := as.integer(median(clicker_ch_Next)), by = .(ip, device, os, app, channel)] # adt[, clicker_Pmed := as.integer(median(clicker_prev)), by = .(ip, device, os)] # adt[, clicker_app_Pmed := as.integer(median(clicker_app_prev)), by = .(ip, device, os,app)] # adt[, clicker_ch_Pmed := as.integer(median(clicker_ch_prev)), by = .(ip, device, os, app, channel)] colnames(adt) ##### 3rd Saving Point ##### #saveRDS(adt, "adt_3rd.RDS") library(caret) set.seed(777) y <- adt[tri]$is_attributed idx <- createDataPartition(y, p= 0.9, list = F) #adt_index <- createDataPartition(y, p = 0.7, list = F) #tri <- createDataPartition(y[adt_index], p = 0.9, list = F) cat_f <- c("app", "device", "os", "channel", "click_hour") adtr <- adt[, -c("ip", "click_time", "is_attributed")] library(pryr) mem_used() rm(adt); gc() mem_used() library(lightgbm) #dtrain <- lgb.Dataset(data = as.matrix(adtr[adt_index,][tri,]), # label = y[adt_index][tri], # categorical_feature = cat_f) #dval <- lgb.Dataset(data = as.matrix(adtr[adt_index,][-tri,]), # label = y[adt_index][-tri], # categorical_feature = cat_f) #dtest <- as.matrix(adtr[-adt_index,]) adte <- as.matrix(adtr[-tri]) saveRDS(adte, "adte.RDS") rm(adte); gc() dtrain <- lgb.Dataset(data = as.matrix(adtr[tri][idx,]), label = y[idx], categorical_feature = cat_f) dval <- lgb.Dataset(data = as.matrix(adtr[tri][-idx,]), label = y[-idx], categorical_feature = cat_f) rm(adtr); gc() mem_used() params = list(objective = "binary", metric = "auc", boosting = "gbdt", learning_rate= 0.1, num_leaves= 7, max_depth= 3, #change:4 to 3 min_child_samples= 100, max_bin= 100, subsample= 0.7, subsample_freq= 1, colsample_bytree= 0.7, #change : 0.7 to 0.9 min_child_weight= 0, min_split_gain= 0, scale_pos_weight=99.7 #change : 99.7 to 200 ) model_lgbm <- lgb.train(params, dtrain, valids = list(validation = dval), nthread = 8, nrounds = 1500, verbose = 1, early_stopping_rounds = 120, eval_freq = 10) model_lgbm$best_score model_lgbm$best_iter #pred_lgbm <- predict(model_lgbm, dtest, n = model_lgbm$best_iter) #pred_lgbm2 <- ifelse(pred_lgbm>0.8, 1, 0) #confusionMatrix(as.factor(pred_lgbm2), as.factor(y[-adt_index])) #library(ROCR) #pr <- prediction(pred_lgbm, y[-adt_index]) #prf <- performance(pr, "tpr", "fpr") #plot(prf) #auc <- performance(pr, "auc") #(auc <- auc@y.values[[1]]) library(knitr) kable(lgb.importance(model_lgbm)) lgb.plot.importance(lgb.importance(model_lgbm), top_n = 15) library(pryr) mem_used() #rm(adt_index, dtest, dval, dtrain, adtr, tri, y); gc() rm(dval, dtrain, idx, y); gc() mem_used() adte <- readRDS("adte.RDS") realpred <- predict(model_lgbm, adte, n = model_lgbm$best_iter) #saveRDS(realpred, "realpred.RDS") sub <- fread("../input/sample_submission.csv") sub$is_attributed <- round(realpred, 6) fwrite(sub, paste0("AdT_NP2Lst_NPC_", round(model_lgbm$best_score, 6), ".csv")) #realpred <- readRDS("realpred.RDS") # length(realpred) # # #tes <- fread("../input/test_supplement.csv", select = "click_id") # #tes$pred <- realpred # #range(tes$click_id) # tes <- data.table(click_id = teid, realpred = realpred) # cir <- fread("../input/test_click_id_relation.csv") # head(cir) # setkey(tes, click_id) # setkey(cir, click_id.testsup) # result <- tes[cir] # head(result) # #result <- setorder(result, click_id) # #head(result) # #tes[click_id %in% c(21290592, 21290658, 21290705, 21290785, 21290822, 21290876)] # #tail(result) # #tes[click_id %in% c(54584253:54584258)] # result <- setorder(result, click_id.test) # #cir <- setorder(cir, click_id.testsup) # #cir[, pred := tes$realpred[tes$click_id %in% cir$click_id.testsup]] # #cir[click_id.testsup %in% c(21290878, 21290876, 21290880, 21290882)] # #cir <- setorder(cir, click_id.test) # # sub <- fread("../input/sample_submission.csv") # sub$is_attributed <- round(result$realpred, 6) # head(sub) # fwrite(sub, paste0("AdT_T_TS_", round(model_lgbm$best_score, 6), ".csv"))
/AdT/Code/Fail/todo.R
permissive
LegenDad/KaggleUXLog
R
false
false
9,703
r
rm(list=ls()); gc() library(data.table) library(lubridate) tr <- fread("../input/train.csv") #Select Train Sizes #set.seed(777) #tr <- tr[sample(.N, 50e6), ] #test dataset change te <- fread("../input/test.csv") # te <- fread("../input/test_supplement.csv") tr <- setorder(tr, click_time, is_attributed) tr <- tr[, -"attributed_time"] tri <- 1:nrow(tr) # te <- setorder(te, click_time, click_id) # te <- te[634:nrow(te)] #tr-te gap # teid <- te$click_id te <- te[, -"click_id"] adt <- rbind(tr, te, fill = T) rm(tr, te); gc() #### 1st Saving Point ##### saveRDS(adt, "adt_1st.RDS") saveRDS(tri, "tri_1st.RDS") #saveRDS(teid, "teid.RDS") adt[, click_hour := hour(adt$click_time)] adt[, click_weekd := wday(adt$click_time)] adt$click_time <- as.numeric(ymd_hms(adt$click_time)) head(adt) adt[, ip_hw := .N, by = list(ip, click_hour, click_weekd)] adt[, ip_app := .N, by = list(ip, app)] adt[, ip_dev := .N, by = list(ip, device)] adt[, ip_os := .N, by = list(ip, os)] adt[, ip_ch := .N, by = list(ip, channel)] adt[, ip_cnt := .N, by = ip] adt[, app_cnt := .N, by = app] adt[, dev_cnt := .N, by = device] adt[, os_cnt := .N, by = os] adt[, ch_cnt := .N, by = channel] adt[, clicker := .N, by = list(ip, device, os)] adt[, clicker_app := .N, by = list(ip, device, os, app)] adt[, clicker_N := seq(.N), by = list(ip, device, os)] adt[, clicker_app_N := seq(.N), by = list(ip, device, os, app)] adt[, app_dev := .N, by = list(app, device)] adt[, app_os := .N, by = list(app, os)] adt[, app_ch := .N, by = list(app, channel)] dim(adt) colnames(adt) #te_hourG1 <- c(4, 14, 13, 10, 9, 5) #te_hourG2 <- c(15, 11, 6) #adt$h_div <- ifelse(adt$click_hour %in% te_hourG1, 1, # ifelse(adt$click_hour %in% te_hourG2, 3, 2)) colnames(adt) adt[, clicker_Next := c(click_time[-1], NA), by = .(ip, device, os)] adt[, clicker_Next := clicker_Next - click_time, by = .(ip, device, os)] adt[is.na(clicker_Next), clicker_Next := 0] adt[, clicker_app_Next := c(click_time[-1], NA), by = .(ip, device, os, app)] adt[, clicker_app_Next := clicker_app_Next - click_time, by = .(ip, device, os, app)] adt[is.na(clicker_app_Next), clicker_app_Next := 0] adt[, clicker_ch_Next := c(click_time[-1], NA), by = .(ip, device, os, app, channel)] adt[, clicker_ch_Next := clicker_ch_Next - click_time, by = .(ip, device, os,app,channel)] adt[is.na(clicker_ch_Next), clicker_ch_Next := 0] adt[, clicker_prev := click_time - shift(click_time), by = .(ip, device, os)] adt[is.na(clicker_prev), clicker_prev := 0] adt[, clicker_app_prev := click_time - shift(click_time), by = .(ip, device, os, app)] adt[is.na(clicker_app_prev), clicker_app_prev := 0] adt[, clicker_ch_prev := click_time - shift(click_time), by = .(ip, device, os, app, channel)] adt[is.na(clicker_ch_prev), clicker_ch_prev := 0] adt[, clicker_Next2 := shift(click_time, 2, type = "lead", fill = 0) - click_time, by = .(ip, device, os)] adt[clicker_Next2 < 0 , clicker_Next2 := 0] adt[, clicker_app_Next2 := shift(click_time, 2, type = "lead", fill = 0) - click_time, by = .(ip, device, os, app)] adt[clicker_app_Next2 < 0 , clicker_app_Next2 := 0] adt[, clicker_ch_Next2 := shift(click_time, 2, type = "lead", fill = 0) - click_time, by = .(ip, device, os, app, channel)] adt[clicker_ch_Next2 < 0 , clicker_ch_Next2 := 0] adt[, clicker_prev2 := click_time - shift(click_time, 2), by = .(ip, device, os)] adt[is.na(clicker_prev2), clicker_prev2 := 0] adt[, clicker_app_prev2 := click_time - shift(click_time, 2), by = .(ip, device, os, app)] adt[is.na(clicker_app_prev2), clicker_app_prev2 := 0] adt[, clicker_ch_prev2 := click_time - shift(click_time, 2), by = .(ip, device, os, app, channel)] adt[is.na(clicker_ch_prev2), clicker_ch_prev2 := 0] adt[, clicker_Last := max(click_time), by = .(ip, device, os)] adt[, clicker_app_Last := max(click_time), by = .(ip, device, os, app)] adt[, clicker_ch_Last := max(click_time), by = .(ip, device, os, app, channel)] ##### 2nd Saving Point ##### saveRDS(adt, "adt_2nd.RDS") #adt[, clicker_Next := shift(click_time, 1, type = "lead", fill = 0) - click_time, # by = .(ip, device, os)] #adt[, clicker_app_Next := shift(click_time, 1, type = "lead", fill = 0) - click_time, # by = .(ip, device, os, app)] #adt[, clicker_ch_Next := shift(click_time, 1, type = "lead", fill = 0) - click_time, # by = .(ip, device, os, app, channel)] #adt$clicker_Next <- ifelse(adt$clicker_Next < 0 , 0 , adt$clicker_Next) #adt$clicker_app_Next <- ifelse(adt$clicker_app_Next <0 , 0 , adt$clicker_app_Next) #adt$clicker_ch_Next <- ifelse(adt$clicker_ch_Next <0 , 0 , adt$clicker_ch_Next) # adt[, clicker_Nmean := as.integer(mean(clicker_Next)), by = .(ip, device, os)] # adt[, clicker_app_Nmean := as.integer(mean(clicker_app_Next)), # by = .(ip, device, os,app)] # adt[, clicker_ch_Nmean := as.integer(mean(clicker_ch_Next)), # by = .(ip, device, os, app, channel)] # adt[, clicker_Pmean := as.integer(mean(clicker_prev)), by = .(ip, device, os)] # adt[, clicker_app_Pmean := as.integer(mean(clicker_app_prev)), # by = .(ip, device, os,app)] # adt[, clicker_ch_Pmean := as.integer(mean(clicker_ch_prev)), # by = .(ip, device, os, app, channel)] # # adt[, clicker_Nmed := as.integer(median(clicker_Next)), by = .(ip, device, os)] # adt[, clicker_app_Nmed := as.integer(median(clicker_app_Next)), by = .(ip, device, os,app)] # adt[, clicker_ch_Nmed := as.integer(median(clicker_ch_Next)), by = .(ip, device, os, app, channel)] # adt[, clicker_Pmed := as.integer(median(clicker_prev)), by = .(ip, device, os)] # adt[, clicker_app_Pmed := as.integer(median(clicker_app_prev)), by = .(ip, device, os,app)] # adt[, clicker_ch_Pmed := as.integer(median(clicker_ch_prev)), by = .(ip, device, os, app, channel)] colnames(adt) ##### 3rd Saving Point ##### #saveRDS(adt, "adt_3rd.RDS") library(caret) set.seed(777) y <- adt[tri]$is_attributed idx <- createDataPartition(y, p= 0.9, list = F) #adt_index <- createDataPartition(y, p = 0.7, list = F) #tri <- createDataPartition(y[adt_index], p = 0.9, list = F) cat_f <- c("app", "device", "os", "channel", "click_hour") adtr <- adt[, -c("ip", "click_time", "is_attributed")] library(pryr) mem_used() rm(adt); gc() mem_used() library(lightgbm) #dtrain <- lgb.Dataset(data = as.matrix(adtr[adt_index,][tri,]), # label = y[adt_index][tri], # categorical_feature = cat_f) #dval <- lgb.Dataset(data = as.matrix(adtr[adt_index,][-tri,]), # label = y[adt_index][-tri], # categorical_feature = cat_f) #dtest <- as.matrix(adtr[-adt_index,]) adte <- as.matrix(adtr[-tri]) saveRDS(adte, "adte.RDS") rm(adte); gc() dtrain <- lgb.Dataset(data = as.matrix(adtr[tri][idx,]), label = y[idx], categorical_feature = cat_f) dval <- lgb.Dataset(data = as.matrix(adtr[tri][-idx,]), label = y[-idx], categorical_feature = cat_f) rm(adtr); gc() mem_used() params = list(objective = "binary", metric = "auc", boosting = "gbdt", learning_rate= 0.1, num_leaves= 7, max_depth= 3, #change:4 to 3 min_child_samples= 100, max_bin= 100, subsample= 0.7, subsample_freq= 1, colsample_bytree= 0.7, #change : 0.7 to 0.9 min_child_weight= 0, min_split_gain= 0, scale_pos_weight=99.7 #change : 99.7 to 200 ) model_lgbm <- lgb.train(params, dtrain, valids = list(validation = dval), nthread = 8, nrounds = 1500, verbose = 1, early_stopping_rounds = 120, eval_freq = 10) model_lgbm$best_score model_lgbm$best_iter #pred_lgbm <- predict(model_lgbm, dtest, n = model_lgbm$best_iter) #pred_lgbm2 <- ifelse(pred_lgbm>0.8, 1, 0) #confusionMatrix(as.factor(pred_lgbm2), as.factor(y[-adt_index])) #library(ROCR) #pr <- prediction(pred_lgbm, y[-adt_index]) #prf <- performance(pr, "tpr", "fpr") #plot(prf) #auc <- performance(pr, "auc") #(auc <- auc@y.values[[1]]) library(knitr) kable(lgb.importance(model_lgbm)) lgb.plot.importance(lgb.importance(model_lgbm), top_n = 15) library(pryr) mem_used() #rm(adt_index, dtest, dval, dtrain, adtr, tri, y); gc() rm(dval, dtrain, idx, y); gc() mem_used() adte <- readRDS("adte.RDS") realpred <- predict(model_lgbm, adte, n = model_lgbm$best_iter) #saveRDS(realpred, "realpred.RDS") sub <- fread("../input/sample_submission.csv") sub$is_attributed <- round(realpred, 6) fwrite(sub, paste0("AdT_NP2Lst_NPC_", round(model_lgbm$best_score, 6), ".csv")) #realpred <- readRDS("realpred.RDS") # length(realpred) # # #tes <- fread("../input/test_supplement.csv", select = "click_id") # #tes$pred <- realpred # #range(tes$click_id) # tes <- data.table(click_id = teid, realpred = realpred) # cir <- fread("../input/test_click_id_relation.csv") # head(cir) # setkey(tes, click_id) # setkey(cir, click_id.testsup) # result <- tes[cir] # head(result) # #result <- setorder(result, click_id) # #head(result) # #tes[click_id %in% c(21290592, 21290658, 21290705, 21290785, 21290822, 21290876)] # #tail(result) # #tes[click_id %in% c(54584253:54584258)] # result <- setorder(result, click_id.test) # #cir <- setorder(cir, click_id.testsup) # #cir[, pred := tes$realpred[tes$click_id %in% cir$click_id.testsup]] # #cir[click_id.testsup %in% c(21290878, 21290876, 21290880, 21290882)] # #cir <- setorder(cir, click_id.test) # # sub <- fread("../input/sample_submission.csv") # sub$is_attributed <- round(result$realpred, 6) # head(sub) # fwrite(sub, paste0("AdT_T_TS_", round(model_lgbm$best_score, 6), ".csv"))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calculate_vp.R \name{calculate_vp} \alias{calculate_vp} \title{Calculate a vertical profile (\code{vp}) from a polar volume (\code{pvol})} \usage{ calculate_vp(file, vpfile = "", pvolfile_out = "", autoconf = FALSE, verbose = FALSE, mount = dirname(file[1]), sd_vvp_threshold = 2, rcs = 11, dual_pol = FALSE, rho_hv = 0.95, elev_min = 0, elev_max = 90, azim_min = 0, azim_max = 360, range_min = 5000, range_max = 35000, n_layer = 20L, h_layer = 200, dealias = TRUE, nyquist_min = if (dealias) 5 else 25, dbz_quantity = "DBZH", local_install, pvolfile) } \arguments{ \item{file}{string or a vector of strings with radar file(s) for a radar polar volume. Provide either a single file containing a polar volume, or multiple files with single scans/sweeps. Data format should be either \href{https://github.com/adokter/vol2bird/blob/master/doc/OPERA2014_O4_ODIM_H5-v2.2.pdf}{ODIM} format, which is the implementation of the OPERA data information model in \href{https://support.hdfgroup.org/HDF5/}{HDF5} format, or a format supported by the \href{http://trmm-fc.gsfc.nasa.gov/trmm_gv/software/rsl/}{RSL library}, or Vaisala IRIS (IRIS RAW) format.} \item{vpfile}{character. Filename for the vertical profile to be generated in ODIM HDF5 format (optional).} \item{pvolfile_out}{character. Filename for the polar volume to be generated in ODIM HDF5 format (optional, e.g. for converting RSL formats to ODIM).} \item{autoconf}{logical. When TRUE, default optimal configuration settings are selected automatically, and other user settings are ignored.} \item{verbose}{logical. When TRUE, pipe Docker stdout to R console. On Windows always TRUE.} \item{mount}{character. String with the mount point (a directory path) for the Docker container.} \item{sd_vvp_threshold}{numeric. Lower threshold in radial velocity standard deviation (profile quantity \code{sd_vvp}) in m/s. Biological signals with \code{sd_vvp} < \code{sd_vvp_threshold} are set to zero.} \item{rcs}{numeric. Radar cross section per bird in cm^2.} \item{dual_pol}{logical. When \code{TRUE} use dual-pol mode, in which meteorological echoes are filtered using the correlation coefficient \code{rho_hv}. When \code{FALSE} use single polarization mode based only on reflectivity and radial velocity quantities.} \item{rho_hv}{numeric. Lower threshold in correlation coefficient used to filter meteorological scattering.} \item{elev_min}{numeric. Minimum scan elevation in degrees.} \item{elev_max}{numeric. Maximum scan elevation in degrees.} \item{azim_min}{numeric. Minimum azimuth in degrees clockwise from north.} \item{azim_max}{numeric. Maximum azimuth in degrees clockwise from north.} \item{range_min}{numeric. Minimum range in m.} \item{range_max}{numeric. Maximum range in m.} \item{n_layer}{numeric. Number of altitude layers in the profile.} \item{h_layer}{numeric. Width of altitude layers in meter.} \item{dealias}{logical. Whether to dealias radial velocities; this should typically be done when the scans in the polar volume have low Nyquist velocities (below 25 m/s).} \item{nyquist_min}{numeric. Minimum Nyquist velocity of scans in m/s for scans to be included in the analysis.} \item{dbz_quantity}{character. One of the available reflectivity factor quantities in the ODIM radar data format, e.g. DBZH, DBZV, TH, TV.} \item{local_install}{(optional) String with path to local vol2bird installation, see details.} \item{pvolfile}{deprecated argument renamed to \code{file}.} } \value{ A vertical profile object of class \link[=summary.vp]{vp}. When defined, output files \code{vpfile} and \code{pvolfile_out} are saved to disk. } \description{ Calculates a vertical profile of biological scatterers (vp) from a polar volume (pvol) using the algorithm \href{https://github.com/adokter/vol2bird/}{vol2bird} (Dokter et al. 2011). } \details{ Requires a running \href{https://www.docker.com/}{Docker} daemon (unless a local installation of vol2bird is specified with \code{local_install}). Common arguments set by users are \code{file}, \code{vpfile}, \code{autoconf} and \code{mount}. Turn on \code{autoconf} to automatically select the optimal parameters for a given radar file. The default for C-band data is to apply rain-filtering in single polarization mode, as well as dual polarization mode when available. The default for S-band data is to apply precipitation filtering in dual-polarization mode. Arguments that sometimes require non-default values are: \code{rcs}, \code{sd_vvp_threshold}, \code{range_max}, \code{dual_pol}, \code{dealias}. Other arguments are typically left at their defaults. \code{azim_min} and \code{azim_max} only affects reflectivity-derived estimates in the profile (DBZH,eta,dens), not radial-velocity derived estimates (u, v, w, ff, dd, sd_vvp), which are estimated on all azimuths at all times. \code{azim_min}, \code{azim_max} may be set to exclude an angular sector with high ground clutter. \code{range_max} may be extended up to 40,000 m for volumes with low elevations only, in order to extend coverage to higher altitudes. For altitude layers with a VVP-retrieved radial velocity standard deviation value below the threshold \code{sd_vvp_threshold}, the bird density \code{dens} is set to zero (see vertical profile \link[=summary.vp]{vp} class). This threshold might be dependent on radar processing settings. Results from validation campaigns so far indicate that 2 m/s is the best choice for this parameter for most weather radars. The algorithm has been tested and developed for altitude layers with \code{h_layer} = 200 m. Smaller widths are not recommended as they may cause instabilities of the volume velocity profiling (VVP) and dealiasing routines, and effectively lead to pseudo-replicated altitude data, since altitudinal patterns smaller than the beam width cannot be resolved. The default radar cross section (11 cm^2) corresponds to the average value found by Dokter et al. in a calibration campaign of a full migration autumn season in western Europe at C-band. It's value may depend on radar wavelength. \code{rcs} will scale approximately \eqn{M^{2/3}} with \code{M} the bird's mass. Using default values of \code{range_min} and \code{range_max} is recommended. Ranges closer than 5 km tend to be contaminated by ground clutter, while range gates beyond 35 km become too wide to resolve the default altitude layer width of 200 meter (see \link{beam_width}). For dealiasing, the torus mapping method by Haase et al. is used. At S-band (radar wavelength ~ 10 cm), currently only \code{dual_pol=TRUE} mode is recommended. On repeated calls of \code{calculate_vp}, the Docker container mount can be recycled from one call to the next if subsequent calls share the same \code{mount} argument. Re-mounting a Docker container takes time, therefore it is advised to choose a mountpoint that is a parent directory of all volume files to be processed, such that \code{calculate_vp} calls are as fast as possible. If you have installed the vol2bird algorithm locally (not possible on Windows) you can call vol2bird through this local installation (bypassing the Docker container), which will be faster. Simply point \code{local_install} to the path of your local vol2bird executable. Your local vol2bird executable will be called through a bash login shell. LD_LIBRARY_PATH (Linux) or DYLD_LIBRARY_PATH (Mac) should be correctly specified in your .bashrc or .bash_profile file and contain all the required shared libraries by vol2bird. See vol2bird installation pages on Github for details. } \examples{ # locate example polar volume file: pvolfile <- system.file("extdata", "volume.h5", package = "bioRad") # copy to a home directory with read/write permissions: file.copy(pvolfile, "~/volume.h5") # calculate the profile: \dontrun{ profile <- calculate_vp("~/volume.h5") } # clean up: file.remove("~/volume.h5") } \references{ \itemize{ \item Haase, G. and Landelius, T., 2004. Dealiasing of Doppler radar velocities using a torus mapping. Journal of Atmospheric and Oceanic Technology, 21(10), pp.1566-1573. \item Bird migration flight altitudes studied by a network of operational weather radars, Dokter et al., J. R. Soc. Interface 8 (54), pp. 30--43, 2011. \url{https://doi.org/10.1098/rsif.2010.0116} } }
/man/calculate_vp.Rd
permissive
bart1/bioRad
R
false
true
8,372
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calculate_vp.R \name{calculate_vp} \alias{calculate_vp} \title{Calculate a vertical profile (\code{vp}) from a polar volume (\code{pvol})} \usage{ calculate_vp(file, vpfile = "", pvolfile_out = "", autoconf = FALSE, verbose = FALSE, mount = dirname(file[1]), sd_vvp_threshold = 2, rcs = 11, dual_pol = FALSE, rho_hv = 0.95, elev_min = 0, elev_max = 90, azim_min = 0, azim_max = 360, range_min = 5000, range_max = 35000, n_layer = 20L, h_layer = 200, dealias = TRUE, nyquist_min = if (dealias) 5 else 25, dbz_quantity = "DBZH", local_install, pvolfile) } \arguments{ \item{file}{string or a vector of strings with radar file(s) for a radar polar volume. Provide either a single file containing a polar volume, or multiple files with single scans/sweeps. Data format should be either \href{https://github.com/adokter/vol2bird/blob/master/doc/OPERA2014_O4_ODIM_H5-v2.2.pdf}{ODIM} format, which is the implementation of the OPERA data information model in \href{https://support.hdfgroup.org/HDF5/}{HDF5} format, or a format supported by the \href{http://trmm-fc.gsfc.nasa.gov/trmm_gv/software/rsl/}{RSL library}, or Vaisala IRIS (IRIS RAW) format.} \item{vpfile}{character. Filename for the vertical profile to be generated in ODIM HDF5 format (optional).} \item{pvolfile_out}{character. Filename for the polar volume to be generated in ODIM HDF5 format (optional, e.g. for converting RSL formats to ODIM).} \item{autoconf}{logical. When TRUE, default optimal configuration settings are selected automatically, and other user settings are ignored.} \item{verbose}{logical. When TRUE, pipe Docker stdout to R console. On Windows always TRUE.} \item{mount}{character. String with the mount point (a directory path) for the Docker container.} \item{sd_vvp_threshold}{numeric. Lower threshold in radial velocity standard deviation (profile quantity \code{sd_vvp}) in m/s. Biological signals with \code{sd_vvp} < \code{sd_vvp_threshold} are set to zero.} \item{rcs}{numeric. Radar cross section per bird in cm^2.} \item{dual_pol}{logical. When \code{TRUE} use dual-pol mode, in which meteorological echoes are filtered using the correlation coefficient \code{rho_hv}. When \code{FALSE} use single polarization mode based only on reflectivity and radial velocity quantities.} \item{rho_hv}{numeric. Lower threshold in correlation coefficient used to filter meteorological scattering.} \item{elev_min}{numeric. Minimum scan elevation in degrees.} \item{elev_max}{numeric. Maximum scan elevation in degrees.} \item{azim_min}{numeric. Minimum azimuth in degrees clockwise from north.} \item{azim_max}{numeric. Maximum azimuth in degrees clockwise from north.} \item{range_min}{numeric. Minimum range in m.} \item{range_max}{numeric. Maximum range in m.} \item{n_layer}{numeric. Number of altitude layers in the profile.} \item{h_layer}{numeric. Width of altitude layers in meter.} \item{dealias}{logical. Whether to dealias radial velocities; this should typically be done when the scans in the polar volume have low Nyquist velocities (below 25 m/s).} \item{nyquist_min}{numeric. Minimum Nyquist velocity of scans in m/s for scans to be included in the analysis.} \item{dbz_quantity}{character. One of the available reflectivity factor quantities in the ODIM radar data format, e.g. DBZH, DBZV, TH, TV.} \item{local_install}{(optional) String with path to local vol2bird installation, see details.} \item{pvolfile}{deprecated argument renamed to \code{file}.} } \value{ A vertical profile object of class \link[=summary.vp]{vp}. When defined, output files \code{vpfile} and \code{pvolfile_out} are saved to disk. } \description{ Calculates a vertical profile of biological scatterers (vp) from a polar volume (pvol) using the algorithm \href{https://github.com/adokter/vol2bird/}{vol2bird} (Dokter et al. 2011). } \details{ Requires a running \href{https://www.docker.com/}{Docker} daemon (unless a local installation of vol2bird is specified with \code{local_install}). Common arguments set by users are \code{file}, \code{vpfile}, \code{autoconf} and \code{mount}. Turn on \code{autoconf} to automatically select the optimal parameters for a given radar file. The default for C-band data is to apply rain-filtering in single polarization mode, as well as dual polarization mode when available. The default for S-band data is to apply precipitation filtering in dual-polarization mode. Arguments that sometimes require non-default values are: \code{rcs}, \code{sd_vvp_threshold}, \code{range_max}, \code{dual_pol}, \code{dealias}. Other arguments are typically left at their defaults. \code{azim_min} and \code{azim_max} only affects reflectivity-derived estimates in the profile (DBZH,eta,dens), not radial-velocity derived estimates (u, v, w, ff, dd, sd_vvp), which are estimated on all azimuths at all times. \code{azim_min}, \code{azim_max} may be set to exclude an angular sector with high ground clutter. \code{range_max} may be extended up to 40,000 m for volumes with low elevations only, in order to extend coverage to higher altitudes. For altitude layers with a VVP-retrieved radial velocity standard deviation value below the threshold \code{sd_vvp_threshold}, the bird density \code{dens} is set to zero (see vertical profile \link[=summary.vp]{vp} class). This threshold might be dependent on radar processing settings. Results from validation campaigns so far indicate that 2 m/s is the best choice for this parameter for most weather radars. The algorithm has been tested and developed for altitude layers with \code{h_layer} = 200 m. Smaller widths are not recommended as they may cause instabilities of the volume velocity profiling (VVP) and dealiasing routines, and effectively lead to pseudo-replicated altitude data, since altitudinal patterns smaller than the beam width cannot be resolved. The default radar cross section (11 cm^2) corresponds to the average value found by Dokter et al. in a calibration campaign of a full migration autumn season in western Europe at C-band. It's value may depend on radar wavelength. \code{rcs} will scale approximately \eqn{M^{2/3}} with \code{M} the bird's mass. Using default values of \code{range_min} and \code{range_max} is recommended. Ranges closer than 5 km tend to be contaminated by ground clutter, while range gates beyond 35 km become too wide to resolve the default altitude layer width of 200 meter (see \link{beam_width}). For dealiasing, the torus mapping method by Haase et al. is used. At S-band (radar wavelength ~ 10 cm), currently only \code{dual_pol=TRUE} mode is recommended. On repeated calls of \code{calculate_vp}, the Docker container mount can be recycled from one call to the next if subsequent calls share the same \code{mount} argument. Re-mounting a Docker container takes time, therefore it is advised to choose a mountpoint that is a parent directory of all volume files to be processed, such that \code{calculate_vp} calls are as fast as possible. If you have installed the vol2bird algorithm locally (not possible on Windows) you can call vol2bird through this local installation (bypassing the Docker container), which will be faster. Simply point \code{local_install} to the path of your local vol2bird executable. Your local vol2bird executable will be called through a bash login shell. LD_LIBRARY_PATH (Linux) or DYLD_LIBRARY_PATH (Mac) should be correctly specified in your .bashrc or .bash_profile file and contain all the required shared libraries by vol2bird. See vol2bird installation pages on Github for details. } \examples{ # locate example polar volume file: pvolfile <- system.file("extdata", "volume.h5", package = "bioRad") # copy to a home directory with read/write permissions: file.copy(pvolfile, "~/volume.h5") # calculate the profile: \dontrun{ profile <- calculate_vp("~/volume.h5") } # clean up: file.remove("~/volume.h5") } \references{ \itemize{ \item Haase, G. and Landelius, T., 2004. Dealiasing of Doppler radar velocities using a torus mapping. Journal of Atmospheric and Oceanic Technology, 21(10), pp.1566-1573. \item Bird migration flight altitudes studied by a network of operational weather radars, Dokter et al., J. R. Soc. Interface 8 (54), pp. 30--43, 2011. \url{https://doi.org/10.1098/rsif.2010.0116} } }
library(googlesheets) library(tidyverse) library(stringr) results <- gs_title("R ladies remote") %>% gs_read table(results$Country) results = results %>% mutate(country = case_when((tolower(Country) %in% c('us','usa','u.s.a.','south bend'))|grepl('united states',tolower(Country))|(Country=='Georgia'&City=='Atlanta')~'USA', grepl('united kingdom|england|scotland|wales|reino unido',tolower(Country))|(tolower(Country)=='uk')~'UK', grepl('new zealand|nz',tolower(Country))~'New Zealand', grepl('germ|deutschland',tolower(Country))~'Germany', grepl('canada|canda',tolower(Country))~'Canada', grepl('argenti',tolower(Country))~'Argentina', grepl('netherland|holland',tolower(Country))~'Netherlands', grepl('colombi',tolower(Country))~'Colombia', tolower(Country) %in% c('spain','españa')~'Spain', tolower(Country) %in% c('italy','italia')~'Italy', tolower(Country) %in% c('norge','norway')~'Norway', tolower(Country) %in% c('brazil','brasil')~'Brazil', TRUE~str_to_title(Country))) sort(table(results$country)) prop.table(table(results$`What is your current level of R knowledge?`)) #interests = c('Learning R (for those who are new to the language)', # 'Learning new R skills, in a webinar', # 'Learning new R skills, paired with one other person or a small group', # 'Online Office hours about different topics, where you can drop in, as a question, about R, about consultant work, about a certain package, and get some help', # 'Mentoring for folks at different career stages/experience levels', # 'Talks about working remotely', # 'Talks about doing R consulting part or fulltime', # 'Talks about fighting prejudices (e.g. being a woman, being a mother, wanting to work remotely)', # 'Resume or portfolio reviews') results$`Which one(s) below would you be interested in? Select all that apply` = gsub(', ([[:upper:]])','| \\U\\1',results$`Which one(s) below would you be interested in? Select all that apply`,perl=TRUE)
/surveyresults.R
no_license
rladies/rladies_remote
R
false
false
2,659
r
library(googlesheets) library(tidyverse) library(stringr) results <- gs_title("R ladies remote") %>% gs_read table(results$Country) results = results %>% mutate(country = case_when((tolower(Country) %in% c('us','usa','u.s.a.','south bend'))|grepl('united states',tolower(Country))|(Country=='Georgia'&City=='Atlanta')~'USA', grepl('united kingdom|england|scotland|wales|reino unido',tolower(Country))|(tolower(Country)=='uk')~'UK', grepl('new zealand|nz',tolower(Country))~'New Zealand', grepl('germ|deutschland',tolower(Country))~'Germany', grepl('canada|canda',tolower(Country))~'Canada', grepl('argenti',tolower(Country))~'Argentina', grepl('netherland|holland',tolower(Country))~'Netherlands', grepl('colombi',tolower(Country))~'Colombia', tolower(Country) %in% c('spain','españa')~'Spain', tolower(Country) %in% c('italy','italia')~'Italy', tolower(Country) %in% c('norge','norway')~'Norway', tolower(Country) %in% c('brazil','brasil')~'Brazil', TRUE~str_to_title(Country))) sort(table(results$country)) prop.table(table(results$`What is your current level of R knowledge?`)) #interests = c('Learning R (for those who are new to the language)', # 'Learning new R skills, in a webinar', # 'Learning new R skills, paired with one other person or a small group', # 'Online Office hours about different topics, where you can drop in, as a question, about R, about consultant work, about a certain package, and get some help', # 'Mentoring for folks at different career stages/experience levels', # 'Talks about working remotely', # 'Talks about doing R consulting part or fulltime', # 'Talks about fighting prejudices (e.g. being a woman, being a mother, wanting to work remotely)', # 'Resume or portfolio reviews') results$`Which one(s) below would you be interested in? Select all that apply` = gsub(', ([[:upper:]])','| \\U\\1',results$`Which one(s) below would you be interested in? Select all that apply`,perl=TRUE)
library(ggplot2) # Datasets data(movies) data(EuStockMarkets) # Cleanup the movie dataset # Filter out rows with 0, negative or no budget information and remove idx <- which(movies$budget <=0 | is.na(movies$budget)) movies <- movies[-idx,] # Data genre <- rep(NA, nrow(movies)) count <- rowSums(movies[, 18:24]) genre[which(count > 1)] = "Mixed" genre[which(count < 1)] = "None" genre[which(count == 1 & movies$Action == 1)] = "Action" genre[which(count == 1 & movies$Animation == 1)] = "Animation" genre[which(count == 1 & movies$Comedy == 1)] = "Comedy" genre[which(count == 1 & movies$Drama == 1)] = "Drama" genre[which(count == 1 & movies$Documentary == 1)] = "Documentary" genre[which(count == 1 & movies$Romance == 1)] = "Romance" genre[which(count == 1 & movies$Short == 1)] = "Short" # EU dataset for Plot4 eu <- transform(data.frame(EuStockMarkets), time = time(EuStockMarkets)) # Add the Genre column to the movies2 dataframe movies$Genre <- movies$Genre <- as.factor(genre) # Plot1 plot1 <- ggplot() + geom_point(data=movies, aes(x=budget/1000000, y=rating, col=Genre), alpha= 0.5, shape=1) + xlab("Budget in Millions (USD)") + xlim(0,200) + ylab("Movie Rating") + scale_y_continuous (expand=c(0,0.1), breaks=c(seq(0,10,2))) + ggtitle("Plot1: Rating Vs Budget") + theme(text=element_text(family="Trebuchet MS"), legend.text=element_text(size=4.5), legend.title=element_text(size=6), axis.ticks.x=element_blank(), axis.ticks.y=element_blank(), axis.text = element_text(size=6, colour="black"), plot.title = element_text(size=8), axis.title=element_text(size=6)) plot1 ggsave(file="hw1-scatter.png", height=4, width=6) # Plot2 plot2 <- ggplot(movies, aes(x=reorder(Genre, Genre, function(x) - length(x)), fill=Genre)) + geom_bar() + xlab("Genre") + ylab("Number of Movies") + scale_y_continuous(breaks=c(seq(0,1800,200)), limits=c(0,1800), expand=c(0,1)) + ggtitle("Plot2: Number of Movies by Genre") + theme(text=element_text(family="Trebuchet MS"), legend.position="none", #legend.title=element_text(size=7), #legend.text=element_text(size=6), axis.title.x=element_blank(), panel.grid.major.x = element_blank(), axis.ticks.x=element_blank(), axis.ticks.y=element_blank(), plot.title = element_text(size=8, colour="black"), axis.text = element_text(size=6, colour="black"), axis.title=element_text(size=6, face="bold")) plot2 ggsave(file="hw1-bar.png", height=4, width=6) # Plot3 plot3 <- ggplot(movies, aes(x=budget/1000000, y=rating))+ geom_point(alpha=0.3, aes(fill=Genre, col=Genre), shape=1, size=1.5) + facet_wrap(~ Genre) + xlab("Budget in Millions (USD)") + xlim(0,200) + ylab("Movie Rating") + scale_y_continuous (expand=c(0,0.1), breaks=c(seq(0,10,2))) + ggtitle("Plot3: Movie Ratings by Genre") + theme(text=element_text(family="Trebuchet MS"), legend.position="none", axis.title.y=element_blank(), axis.ticks.x=element_blank(), axis.ticks.y=element_blank(), axis.text = element_text(size=5, colour="black"), plot.title = element_text(size=8), strip.text.x = element_text(size = 6), axis.title=element_text(size=5, face="bold")) plot3 ggsave(file="hw1-multiples.png", height=4, width=6) # Plot4 plot4 <- ggplot(data=eu) + geom_line(aes(x=as.numeric(time), y=DAX, col='DAX'), size=0.25) + geom_line(aes(x=as.numeric(time), y=SMI, col='SMI'), size=0.25) + geom_line(aes(x=as.numeric(time), y=CAC, col='CAC'), size=0.25) + geom_line(aes(x=as.numeric(time), y=FTSE, col='FTSE'), size=0.25) + xlab("YEAR") + scale_x_continuous(breaks=c(seq(1992,1998,1)), limits=c(1991.48,1998.65), expand=c(0,0)) + ylab("INDEX LEVEL") + scale_y_continuous(breaks=c(seq(0,9000,2000)), limits=c(1000,8500), expand=c(0,0)) + ggtitle("Plot4: EU Financial Indices between 1991 and 1999") + theme(text=element_text(family="Trebuchet MS"), legend.title=element_blank(), legend.text=element_text(size=5), legend.position=c(0.05, 0.80), axis.ticks.x=element_blank(), axis.ticks.y=element_blank(), axis.text = element_text(size=5, colour="black"), plot.title = element_text(size=8), axis.title=element_text(size=5, face="bold")) plot4 ggsave(file="hw1-multiline.png", height=4, width=8)
/homework1/code.R
no_license
manoj-v/msan622
R
false
false
4,425
r
library(ggplot2) # Datasets data(movies) data(EuStockMarkets) # Cleanup the movie dataset # Filter out rows with 0, negative or no budget information and remove idx <- which(movies$budget <=0 | is.na(movies$budget)) movies <- movies[-idx,] # Data genre <- rep(NA, nrow(movies)) count <- rowSums(movies[, 18:24]) genre[which(count > 1)] = "Mixed" genre[which(count < 1)] = "None" genre[which(count == 1 & movies$Action == 1)] = "Action" genre[which(count == 1 & movies$Animation == 1)] = "Animation" genre[which(count == 1 & movies$Comedy == 1)] = "Comedy" genre[which(count == 1 & movies$Drama == 1)] = "Drama" genre[which(count == 1 & movies$Documentary == 1)] = "Documentary" genre[which(count == 1 & movies$Romance == 1)] = "Romance" genre[which(count == 1 & movies$Short == 1)] = "Short" # EU dataset for Plot4 eu <- transform(data.frame(EuStockMarkets), time = time(EuStockMarkets)) # Add the Genre column to the movies2 dataframe movies$Genre <- movies$Genre <- as.factor(genre) # Plot1 plot1 <- ggplot() + geom_point(data=movies, aes(x=budget/1000000, y=rating, col=Genre), alpha= 0.5, shape=1) + xlab("Budget in Millions (USD)") + xlim(0,200) + ylab("Movie Rating") + scale_y_continuous (expand=c(0,0.1), breaks=c(seq(0,10,2))) + ggtitle("Plot1: Rating Vs Budget") + theme(text=element_text(family="Trebuchet MS"), legend.text=element_text(size=4.5), legend.title=element_text(size=6), axis.ticks.x=element_blank(), axis.ticks.y=element_blank(), axis.text = element_text(size=6, colour="black"), plot.title = element_text(size=8), axis.title=element_text(size=6)) plot1 ggsave(file="hw1-scatter.png", height=4, width=6) # Plot2 plot2 <- ggplot(movies, aes(x=reorder(Genre, Genre, function(x) - length(x)), fill=Genre)) + geom_bar() + xlab("Genre") + ylab("Number of Movies") + scale_y_continuous(breaks=c(seq(0,1800,200)), limits=c(0,1800), expand=c(0,1)) + ggtitle("Plot2: Number of Movies by Genre") + theme(text=element_text(family="Trebuchet MS"), legend.position="none", #legend.title=element_text(size=7), #legend.text=element_text(size=6), axis.title.x=element_blank(), panel.grid.major.x = element_blank(), axis.ticks.x=element_blank(), axis.ticks.y=element_blank(), plot.title = element_text(size=8, colour="black"), axis.text = element_text(size=6, colour="black"), axis.title=element_text(size=6, face="bold")) plot2 ggsave(file="hw1-bar.png", height=4, width=6) # Plot3 plot3 <- ggplot(movies, aes(x=budget/1000000, y=rating))+ geom_point(alpha=0.3, aes(fill=Genre, col=Genre), shape=1, size=1.5) + facet_wrap(~ Genre) + xlab("Budget in Millions (USD)") + xlim(0,200) + ylab("Movie Rating") + scale_y_continuous (expand=c(0,0.1), breaks=c(seq(0,10,2))) + ggtitle("Plot3: Movie Ratings by Genre") + theme(text=element_text(family="Trebuchet MS"), legend.position="none", axis.title.y=element_blank(), axis.ticks.x=element_blank(), axis.ticks.y=element_blank(), axis.text = element_text(size=5, colour="black"), plot.title = element_text(size=8), strip.text.x = element_text(size = 6), axis.title=element_text(size=5, face="bold")) plot3 ggsave(file="hw1-multiples.png", height=4, width=6) # Plot4 plot4 <- ggplot(data=eu) + geom_line(aes(x=as.numeric(time), y=DAX, col='DAX'), size=0.25) + geom_line(aes(x=as.numeric(time), y=SMI, col='SMI'), size=0.25) + geom_line(aes(x=as.numeric(time), y=CAC, col='CAC'), size=0.25) + geom_line(aes(x=as.numeric(time), y=FTSE, col='FTSE'), size=0.25) + xlab("YEAR") + scale_x_continuous(breaks=c(seq(1992,1998,1)), limits=c(1991.48,1998.65), expand=c(0,0)) + ylab("INDEX LEVEL") + scale_y_continuous(breaks=c(seq(0,9000,2000)), limits=c(1000,8500), expand=c(0,0)) + ggtitle("Plot4: EU Financial Indices between 1991 and 1999") + theme(text=element_text(family="Trebuchet MS"), legend.title=element_blank(), legend.text=element_text(size=5), legend.position=c(0.05, 0.80), axis.ticks.x=element_blank(), axis.ticks.y=element_blank(), axis.text = element_text(size=5, colour="black"), plot.title = element_text(size=8), axis.title=element_text(size=5, face="bold")) plot4 ggsave(file="hw1-multiline.png", height=4, width=8)
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/pca2d.R, R/pca3d-package.R, R/pca3d.R \name{pca2d} \alias{pca2d} \alias{pca3d} \alias{pca3d-package} \title{Show a three- or two-dimensional plot of a prcomp object} \usage{ pca2d(pca, components = 1:2, col = NULL, title = NULL, new = FALSE, axes.color = "black", bg = "white", radius = 1, group = NULL, shape = NULL, palette = NULL, fancy = FALSE, biplot = FALSE, biplot.vars = 5, legend = NULL, show.scale = FALSE, show.labels = FALSE, labels.col = "black", show.axes = TRUE, show.axe.titles = TRUE, axe.titles = NULL, show.plane = TRUE, show.shadows = FALSE, show.centroids = FALSE, show.group.labels = FALSE, show.ellipses = FALSE, ellipse.ci = 0.95, ...) pca3d(pca, components = 1:3, col = NULL, title = NULL, new = FALSE, axes.color = "grey", bg = "white", radius = 1, group = NULL, shape = NULL, palette = NULL, fancy = FALSE, biplot = FALSE, biplot.vars = 5, legend = NULL, show.scale = FALSE, show.labels = FALSE, labels.col = "black", show.axes = TRUE, show.axe.titles = TRUE, axe.titles = NULL, show.plane = TRUE, show.shadows = FALSE, show.centroids = FALSE, show.group.labels = FALSE, show.shapes = TRUE, show.ellipses = FALSE, ellipse.ci = 0.95) } \arguments{ \item{pca}{Either a prcomp object or a matrix with at least three columns} \item{components}{Vector of length 3 (\code{pca3d}) or 2 (\code{pca2d}) containing the components to be shown} \item{col}{Either a single value or a vector of length equal to number of rows, containing color definitions for the plot points to be shown} \item{title}{Window title} \item{new}{Use TRUE to open a new window} \item{axes.color}{Axis color This option has no effect in pca2d.} \item{bg}{Background color} \item{radius}{Scaling item for the size of points to be shown. In pca2d, this corresponds to the cex parameter.} \item{group}{either NULL or a factor of length equal to number of rows. Factor levels can be used to automatically generate symbols and colors for the points shown} \item{shape}{Either a single value or a character vector describing the shapes to be used when drawing data points. Allowed shapes are: sphere, tetrahaedron and cube, and may be abbreviated. In pca2d, the parameter is passed directly on to the pch option of the points() function.} \item{palette}{Specifies the color palette when colors are automatically assigned to the groups. See Details.} \item{fancy}{set \option{show.labels}, \option{show.shadows}, \option{show.centroids} and \option{show.group.labels} to TRUE.} \item{biplot}{Specify whether to show a biplot (see section \sQuote{biplots} below)} \item{biplot.vars}{Specify which loading to show on the biplot (see section \sQuote{biplots} below)} \item{legend}{If NULL, no legend will be drawn. Otherwise the value specifies the legend position in a form accepted by \code{\link{legend}} and \code{\link{legend3d}}.} \item{show.scale}{TRUE for showing a numeric scale at the edges of the plot. This option has no effect in pca2d.} \item{show.labels}{TRUE for showing labels (taken from the coordinate matrix or the prcomp object). Alternatively, a vector with labels to be shown next to the data points.} \item{labels.col}{Single value or vector describing the colors of the labels.} \item{show.axes}{TRUE to show the axes. This option has no effect in pca2d.} \item{show.axe.titles}{If TRUE, show axe titles (PC 1, PC 2 etc.) This option has no effect in pca2d.} \item{axe.titles}{A vector with two (pca2d) or three (pca3d) values containing the axe titles (corresponds to xlab and ylab in regular plot). If missing, but show.axe.titles is TRUE, axe titles will be generated automatically.} \item{show.plane}{If TRUE, show a grey horizontal plane at y = 0. This option has no effect in pca2d.} \item{show.shadows}{If TRUE, show a "lollipop" representation of the points on the y = 0 plane: a vertical line joining the data point with the plane and a shadow. In pca2d, for each sample at (x,y), a grey line is drawn from (x,y) to (x,0).} \item{show.centroids}{If TRUE and the group variable is defined, show cluster centroids (using apropriate group symbols) and lines from each data point to the corresponding centroid.} \item{show.group.labels}{Either TRUTH/FALSE or a vector equal to the number of unique values in the \code{group} parameter. If set, labels for each of the defined group will be shown at the group's centroid. If the value of the parameter is TRUE, then the group names will be taken from the \code{group} parameter. Otherwise, the values from this parameter will be used.} \item{show.ellipses}{A TRUTH/FALSE value indicating whether to show confidence interval ellipses or ellipsoids around each defined group} \item{ellipse.ci}{The confidence level of a pairwise confidence region for the CI. The default is 0.95, for a 95% region. This is used to control the size of the ellipse being plotted.} \item{...}{For pca2d, any further argument will be passed on to the points() function.} \item{show.shapes}{A TRUTH/FALSE value indicating whether the different symbols (shapes) for the shown data points should be plotted (default TRUE).} } \value{ Both pca2d and pca3d return invisibly a data frame which can be used to generate a legend for the figure. The data frame has as many rows as there are groups, and column with the group name, assigned color and assigned shape. } \description{ Show a three- two-dimensional plot of a prcomp object or a matrix, using different symbols and colors for groups of data } \details{ The pca3d function shows a three dimensional representation of a PCA object or any other matrix. It uses the rgl package for rendering. pca2d is the 2D counterpart. It creates a regular, two-dimensional plot on the standard graphic device. However, it takes exactly the same options as pca3d, such that it is easy to create 2D variants of the 3D graph. Often, PCA visualisation requires using different symbols and colors for different groups of data. pca3d() and pca2d() aim at creating reasonable defaults, such that a simple call with two parameters -- the pca object and the vector with group assignments of the samples -- is sufficient for a basic diagnosis. } \section{Biplots}{ If option \option{biplot} is TRUE, a biplot showing both the PCA results (samples) and variables is shown. This corresponds to the \code{\link{biplot}} function which works for the \code{\link{prcomp}} class objects. However, a biplot showing all variable loadings will be unreadable if the data is highly dimensional (for example, gene expression data). Therefore, the option \option{biplot.vars} specifies which variables are shown on the biplot. If \option{biplot.vars} is a vector of length larger than one, it will be interpreted as a direct selection of the variables to be shown; for example, for a \code{\link{prcomp}} object \var{pca}, the variable selection will happen through \code{pca$rotation[biplot.vars,]}. If \option{biplot.vars} is a single number, then for each of the components shown, a number of variables equal to \option{biplot.vars} with the highest absolute loadings will be shown on the biplot. } \examples{ data( metabo ) pca <- prcomp( metabo[,-1], scale.= TRUE ) pca3d( pca, group= metabo[,1] ) pca2d( pca, group= metabo[,1] ) ## a bit more fancy: ## black background, white axes, ## centroids pca3d( pca, group= metabo[,1], fancy= TRUE, bg= "black", axes.color= "white", new= TRUE ) } \keyword{PCA} \keyword{biplot} \keyword{prcomp} \keyword{princomp}
/man/pca3d-package.Rd
no_license
thomas-omahoney/pca3d
R
false
false
7,616
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/pca2d.R, R/pca3d-package.R, R/pca3d.R \name{pca2d} \alias{pca2d} \alias{pca3d} \alias{pca3d-package} \title{Show a three- or two-dimensional plot of a prcomp object} \usage{ pca2d(pca, components = 1:2, col = NULL, title = NULL, new = FALSE, axes.color = "black", bg = "white", radius = 1, group = NULL, shape = NULL, palette = NULL, fancy = FALSE, biplot = FALSE, biplot.vars = 5, legend = NULL, show.scale = FALSE, show.labels = FALSE, labels.col = "black", show.axes = TRUE, show.axe.titles = TRUE, axe.titles = NULL, show.plane = TRUE, show.shadows = FALSE, show.centroids = FALSE, show.group.labels = FALSE, show.ellipses = FALSE, ellipse.ci = 0.95, ...) pca3d(pca, components = 1:3, col = NULL, title = NULL, new = FALSE, axes.color = "grey", bg = "white", radius = 1, group = NULL, shape = NULL, palette = NULL, fancy = FALSE, biplot = FALSE, biplot.vars = 5, legend = NULL, show.scale = FALSE, show.labels = FALSE, labels.col = "black", show.axes = TRUE, show.axe.titles = TRUE, axe.titles = NULL, show.plane = TRUE, show.shadows = FALSE, show.centroids = FALSE, show.group.labels = FALSE, show.shapes = TRUE, show.ellipses = FALSE, ellipse.ci = 0.95) } \arguments{ \item{pca}{Either a prcomp object or a matrix with at least three columns} \item{components}{Vector of length 3 (\code{pca3d}) or 2 (\code{pca2d}) containing the components to be shown} \item{col}{Either a single value or a vector of length equal to number of rows, containing color definitions for the plot points to be shown} \item{title}{Window title} \item{new}{Use TRUE to open a new window} \item{axes.color}{Axis color This option has no effect in pca2d.} \item{bg}{Background color} \item{radius}{Scaling item for the size of points to be shown. In pca2d, this corresponds to the cex parameter.} \item{group}{either NULL or a factor of length equal to number of rows. Factor levels can be used to automatically generate symbols and colors for the points shown} \item{shape}{Either a single value or a character vector describing the shapes to be used when drawing data points. Allowed shapes are: sphere, tetrahaedron and cube, and may be abbreviated. In pca2d, the parameter is passed directly on to the pch option of the points() function.} \item{palette}{Specifies the color palette when colors are automatically assigned to the groups. See Details.} \item{fancy}{set \option{show.labels}, \option{show.shadows}, \option{show.centroids} and \option{show.group.labels} to TRUE.} \item{biplot}{Specify whether to show a biplot (see section \sQuote{biplots} below)} \item{biplot.vars}{Specify which loading to show on the biplot (see section \sQuote{biplots} below)} \item{legend}{If NULL, no legend will be drawn. Otherwise the value specifies the legend position in a form accepted by \code{\link{legend}} and \code{\link{legend3d}}.} \item{show.scale}{TRUE for showing a numeric scale at the edges of the plot. This option has no effect in pca2d.} \item{show.labels}{TRUE for showing labels (taken from the coordinate matrix or the prcomp object). Alternatively, a vector with labels to be shown next to the data points.} \item{labels.col}{Single value or vector describing the colors of the labels.} \item{show.axes}{TRUE to show the axes. This option has no effect in pca2d.} \item{show.axe.titles}{If TRUE, show axe titles (PC 1, PC 2 etc.) This option has no effect in pca2d.} \item{axe.titles}{A vector with two (pca2d) or three (pca3d) values containing the axe titles (corresponds to xlab and ylab in regular plot). If missing, but show.axe.titles is TRUE, axe titles will be generated automatically.} \item{show.plane}{If TRUE, show a grey horizontal plane at y = 0. This option has no effect in pca2d.} \item{show.shadows}{If TRUE, show a "lollipop" representation of the points on the y = 0 plane: a vertical line joining the data point with the plane and a shadow. In pca2d, for each sample at (x,y), a grey line is drawn from (x,y) to (x,0).} \item{show.centroids}{If TRUE and the group variable is defined, show cluster centroids (using apropriate group symbols) and lines from each data point to the corresponding centroid.} \item{show.group.labels}{Either TRUTH/FALSE or a vector equal to the number of unique values in the \code{group} parameter. If set, labels for each of the defined group will be shown at the group's centroid. If the value of the parameter is TRUE, then the group names will be taken from the \code{group} parameter. Otherwise, the values from this parameter will be used.} \item{show.ellipses}{A TRUTH/FALSE value indicating whether to show confidence interval ellipses or ellipsoids around each defined group} \item{ellipse.ci}{The confidence level of a pairwise confidence region for the CI. The default is 0.95, for a 95% region. This is used to control the size of the ellipse being plotted.} \item{...}{For pca2d, any further argument will be passed on to the points() function.} \item{show.shapes}{A TRUTH/FALSE value indicating whether the different symbols (shapes) for the shown data points should be plotted (default TRUE).} } \value{ Both pca2d and pca3d return invisibly a data frame which can be used to generate a legend for the figure. The data frame has as many rows as there are groups, and column with the group name, assigned color and assigned shape. } \description{ Show a three- two-dimensional plot of a prcomp object or a matrix, using different symbols and colors for groups of data } \details{ The pca3d function shows a three dimensional representation of a PCA object or any other matrix. It uses the rgl package for rendering. pca2d is the 2D counterpart. It creates a regular, two-dimensional plot on the standard graphic device. However, it takes exactly the same options as pca3d, such that it is easy to create 2D variants of the 3D graph. Often, PCA visualisation requires using different symbols and colors for different groups of data. pca3d() and pca2d() aim at creating reasonable defaults, such that a simple call with two parameters -- the pca object and the vector with group assignments of the samples -- is sufficient for a basic diagnosis. } \section{Biplots}{ If option \option{biplot} is TRUE, a biplot showing both the PCA results (samples) and variables is shown. This corresponds to the \code{\link{biplot}} function which works for the \code{\link{prcomp}} class objects. However, a biplot showing all variable loadings will be unreadable if the data is highly dimensional (for example, gene expression data). Therefore, the option \option{biplot.vars} specifies which variables are shown on the biplot. If \option{biplot.vars} is a vector of length larger than one, it will be interpreted as a direct selection of the variables to be shown; for example, for a \code{\link{prcomp}} object \var{pca}, the variable selection will happen through \code{pca$rotation[biplot.vars,]}. If \option{biplot.vars} is a single number, then for each of the components shown, a number of variables equal to \option{biplot.vars} with the highest absolute loadings will be shown on the biplot. } \examples{ data( metabo ) pca <- prcomp( metabo[,-1], scale.= TRUE ) pca3d( pca, group= metabo[,1] ) pca2d( pca, group= metabo[,1] ) ## a bit more fancy: ## black background, white axes, ## centroids pca3d( pca, group= metabo[,1], fancy= TRUE, bg= "black", axes.color= "white", new= TRUE ) } \keyword{PCA} \keyword{biplot} \keyword{prcomp} \keyword{princomp}
BITRATE = 8000 # per second BUFFER = 1/12 # seconds options(digits.secs = 9) #' @export loop <- function(next.sample, prev.time = NULL) { if (is.null(prev.time)) next.time <- Sys.time() else next.time <- prev.time + length(next.sample) / BITRATE if (next.time - (BITRATE * BUFFER) < Sys.time()) { cat('XXX Write to /dev/dsp\n') function(x) loop(x, prev.time = next.time) } }
/loop.r
no_license
tlevine/mdoow
R
false
false
401
r
BITRATE = 8000 # per second BUFFER = 1/12 # seconds options(digits.secs = 9) #' @export loop <- function(next.sample, prev.time = NULL) { if (is.null(prev.time)) next.time <- Sys.time() else next.time <- prev.time + length(next.sample) / BITRATE if (next.time - (BITRATE * BUFFER) < Sys.time()) { cat('XXX Write to /dev/dsp\n') function(x) loop(x, prev.time = next.time) } }
\name{is.chordal} \alias{peo} \alias{is.chordal} \alias{is.cycle} \alias{is.complete} \alias{simplicial.vertex} \title{ Tests for certain types of graphs. } \description{ Tests that a graph is chordal or complete. } \usage{ is.chordal(g) peo(g) simplicial.vertex(g,n) is.complete(g) is.cycle(g) } \arguments{ \item{g}{ a graph. } \item{n}{ order of the graph. } } \details{ A simplicial vertex is a vertex whose neighborhood is the complete graph. A PEO is a partial elimination ordering, a list of vertices such that each is simplicial in the remaining graph. A graph is chordal if and only if it has a peo. If the graph is not chordal, \code{peo} will return a partial list of simplicial vertices. } \value{ \code{peo} returns a list of vertices. The others return a logical indicating whether the graph is chordal/complete etc. } \references{ Douglas B. West, Introduction to Graph Theory, Prentice-Hall, 2001. } \author{ David Marchette, \email{dmarchette@gmail.com} } \note{ If the graph is not chordal, \code{peo} still returns a list of vertices. This list will be less than the order of the graph, and is not unique (an isomorphic graph may well return a different list). All that can be assumed is that if the graph is not chordal, the list will not have length the order of the graph. } \keyword{ graphs }
/man/is.chordal.Rd
no_license
cran/mfr
R
false
false
1,322
rd
\name{is.chordal} \alias{peo} \alias{is.chordal} \alias{is.cycle} \alias{is.complete} \alias{simplicial.vertex} \title{ Tests for certain types of graphs. } \description{ Tests that a graph is chordal or complete. } \usage{ is.chordal(g) peo(g) simplicial.vertex(g,n) is.complete(g) is.cycle(g) } \arguments{ \item{g}{ a graph. } \item{n}{ order of the graph. } } \details{ A simplicial vertex is a vertex whose neighborhood is the complete graph. A PEO is a partial elimination ordering, a list of vertices such that each is simplicial in the remaining graph. A graph is chordal if and only if it has a peo. If the graph is not chordal, \code{peo} will return a partial list of simplicial vertices. } \value{ \code{peo} returns a list of vertices. The others return a logical indicating whether the graph is chordal/complete etc. } \references{ Douglas B. West, Introduction to Graph Theory, Prentice-Hall, 2001. } \author{ David Marchette, \email{dmarchette@gmail.com} } \note{ If the graph is not chordal, \code{peo} still returns a list of vertices. This list will be less than the order of the graph, and is not unique (an isomorphic graph may well return a different list). All that can be assumed is that if the graph is not chordal, the list will not have length the order of the graph. } \keyword{ graphs }
/02-Segundo Semestre - Estadística Multivariada/Proyecto/Codigos/Analisis salarial/Inciso b/Parte 2/Clasificador Salarios Campo SS.R
no_license
nicoletron770/Maestria-Computo-Estadistico
R
false
false
18,415
r
# Analysis of DNA composition for a subset of genes or genomes # Report the nucleotide fraction, GC content and asymmetries # # Aleix Lafita - October 2019 suppressPackageStartupMessages(library(dplyr)) suppressPackageStartupMessages(library(seqinr)) suppressPackageStartupMessages(library(argparse)) suppressPackageStartupMessages(library(tidyr)) suppressPackageStartupMessages(library(stringr)) suppressPackageStartupMessages(library(ggplot2)) suppressPackageStartupMessages(library(ggfortify)) suppressPackageStartupMessages(library(ggdendro)) suppressPackageStartupMessages(library(gridExtra)) theme_set(theme_bw() + theme(panel.grid = element_blank())) ###################### Argparse ############################# input = "examples/periscopes_dna.fa" prefix = "examples/periscopes_dna" # create parser object parser = ArgumentParser(description = 'Analysis of DNA composition for a subset of genes or genomes') # specify our desired options parser$add_argument("-d", "--dna", default=input, help="DNA sequences in FASTA format [default \"%(default)s\"]") parser$add_argument("-p", "--prefix", default=prefix, help="Prefix for the output files, results, plots and tables [default \"%(default)s\"]") # get command line options, if help option encountered print help and exit, # otherwise if options not found on command line then set defaults, args = parser$parse_args() dna = args$dna prefix = args$prefix ###################### Parsing ############################# # Parse the gene sequences and convert to DF genes.fa = read.fasta(dna) genes.gc = data.frame( acc=names(genes.fa), dna=unlist(getSequence(genes.fa, as.string=T)), stringsAsFactors = F ) %>% rowwise() %>%mutate( dna = toupper(dna), codon1 = paste0(unlist(strsplit(dna, ""))[seq(1,nchar(dna),3)], collapse = ""), codon2 = paste0(unlist(strsplit(dna, ""))[seq(2,nchar(dna),3)], collapse = ""), codon3 = paste0(unlist(strsplit(dna, ""))[seq(3,nchar(dna),3)], collapse = ""), ) genes.fa = NULL ###################### Calculate ############################# nts = "ACTG" nt = strsplit(nts, "")[[1]] genes.gc = genes.gc %>% mutate(bias = 0) # Routine to calculate the amino acid frequencies for each sequence for (n in nt) { for (f in 1:3) { nf = paste0("n", n, f, "fr") nc = paste0("n", n, f) c = paste0("codon", f) genes.gc = genes.gc %>% mutate( !!nc := str_count(!!sym(c), n), !!nf := !!sym(nc) / str_count(!!sym(c)) #bias = bias - ifelse(eval(as.symbol(nf)) > 0, eval(as.symbol(nf)) * log2(eval(as.symbol(nf))), 0) ) } } # Calculate the total nucleotide frequencies genes.gc = genes.gc %>% mutate( nA = nA1 + nA2 + nA3, nT = nT1 + nT2 + nT3, nG = nG1 + nG2 + nG3, nC = nC1 + nC2 + nC3, nAfr = nA / nchar(dna), nTfr = nT / nchar(dna), nGfr = nG / nchar(dna), nCfr = nC / nchar(dna) ) # Calculate the global nucleotide frequencies (null model) nt_freq = colSums(genes.gc %>% select(paste0("n", nt)) / sum(genes.gc %>% select(paste0("n", nt)))) # Calculate the nucleotide bias for (n in nt) { nf = paste(c("n", n, "fr"), collapse = "") nb = paste(c("bias", n), collapse = "") genes.gc = genes.gc %>% rowwise() %>% mutate( !!nb := eval(as.symbol(nf)) * log2(eval(as.symbol(nf)) / nt_freq[paste0("n", n)]), bias = bias + ifelse(eval(as.symbol(nf)) > 0, eval(as.symbol(nb)), 0) ) } # Calculate some other properties derived from nt fractions genes.gc = genes.gc %>% rowwise() %>% mutate( nTotal = nA + nT + nC + nG, nGCfr = nGfr + nCfr, aAT = nAfr - nTfr, sAT = aAT / max((nAfr + nTfr), 0.00000001), aGC = nGfr - nCfr, sGC = aGC / max((nGfr + nCfr), 0.00000001), aPurPyr = (nAfr + nGfr) - (nCfr + nTfr) ) ###################### Save results ############################# write.table( genes.gc %>% select(-dna), sprintf("%s_composition.tsv", prefix), quote = F, sep = "\t", row.names = F ) ###################### Plots ############################# # Plot the distribution of GC content p = ggplot(genes.gc, aes(x = nGCfr)) + geom_step(stat = "bin", boundary = 0, binwidth = 0.01, color = "black") + xlab("Fraction of GC") + xlim(0,1) pdf(sprintf("%s_gc-fraction.pdf", prefix), 4, 3) plot(p) log = dev.off() # Plot the distribution of sequence bias p = ggplot(genes.gc, aes(x = bias)) + geom_step(stat = "bin", boundary = 0, binwidth = 0.01, color = "black") + #xlim(0,max(genes.gc$bias)) + xlab("Relative entropy") pdf(sprintf("%s_nt-bias.pdf", prefix), 4, 3) plot(p) log = dev.off() # Gather the amino acid compositions into a column genes.ntcomp = gather( genes.gc %>% select(nAfr, nTfr, nCfr, nGfr, acc), key, fraction, -acc ) %>% mutate(nt = substring(key, 2, 2)) # Plot the distribution of each nucleotide fraction p = ggplot(genes.ntcomp, aes(x = nt, y = fraction)) + geom_jitter(alpha = 0.1) + geom_violin(alpha = 0, scale = "width") + geom_hline(yintercept = 0.25, alpha = 0.3) + ylab("Nucleotide fraction") + ylim(0, max(genes.ntcomp$fraction) + 0.05) + xlab("") pdf(sprintf("%s_nt-fraction.pdf", prefix), 6, 6) plot(p) log = dev.off() # Gather compositon for each codon genes.codons = genes.gc %>% select(acc, nA1fr, nA2fr, nA3fr, nT1fr, nT2fr, nT3fr, nG1fr, nG2fr, nG3fr, nC1fr, nC2fr, nC3fr) %>% gather(key, fraction, -acc) %>% mutate( nt = substring(key, 2, 2), frame = substring(key, 3, 3), ) # Plot the distribution of each nucleotide fraction p = ggplot(genes.codons, aes(x = frame, y = fraction)) + geom_jitter(alpha = 0.1) + geom_violin(alpha = 0, scale = "width") + facet_grid(~nt) + geom_hline(yintercept = 0.25, alpha = 0.3) + ylab("Nucleotide fraction") + ylim(0, max(genes.codons$fraction) + 0.05) + xlab("Codon position") pdf(sprintf("%s_nt-codons.pdf", prefix), 10, 5) plot(p) log = dev.off() # Gather the other properties into a column genes.ntasymm = gather( genes.gc %>% select(aAT, sAT, aGC, sGC, aPurPyr, acc), nt, fraction, -acc ) # Plot the distribution of nucleotide asymmetry and skew p = ggplot(genes.ntasymm, aes(x = nt, y = fraction)) + geom_jitter(alpha = 0.1) + geom_violin(alpha = 0, scale = "width") + geom_hline(yintercept = 0, alpha = 0.5) + xlab("") + ylab("Nucleotide asymmetry (X-Y) and skew (X-Y)/(X+Y)") + ylim(-1, 1) pdf(sprintf("%s_nt-asymmetry.pdf", prefix), 6, 6) plot(p) log = dev.off() ###################### PCA ############################# genes.pca = genes.gc %>% select(nAfr, nTfr, nGfr, nCfr) names(genes.pca) = c("A", "T", "G", "C") genes.pca.calc = prcomp(genes.pca, center = TRUE, scale. = TRUE) #pdf(sprintf("%s_pca_var.pdf", prefix), 5, 4) #plot(genes.pca.calc, type = 'l') #dev.off() p = autoplot( genes.pca.calc, data = genes.pca.calc, #colour = 'type', loadings = TRUE, loadings.colour = 'black', loadings.label = TRUE, loadings.label.size = 4, loadings.label.colour = "black", loadings.label.vjust = 2, size = 1, alpha = 0.3 ) pdf(sprintf("%s_nt-pca.pdf", prefix), 5, 5) print(p) log = dev.off() ###################### Dendrogram ############################# genes.matrix = genes.pca rownames(genes.matrix) = genes.gc$acc genes.dendro = as.dendrogram(hclust(d = dist(x = genes.matrix))) gene.order = order.dendrogram(genes.dendro) p1 = ggplot(data = genes.ntcomp, aes(x = gsub("n", "", gsub("fr", "", nt)), y = factor(acc, levels = genes.gc$acc[gene.order]))) + geom_tile(aes(fill = fraction)) + scale_fill_gradient(name = "Fraction", low = "white", high = "black") + theme( panel.border = element_blank(), #axis.text.y = element_blank(), axis.title.y = element_blank(), axis.title.x = element_blank(), #axis.ticks.y = element_blank(), legend.position = "left" ) + scale_x_discrete(expand = c(0, 0)) p2 = ggdendrogram(genes.dendro, rotate = FALSE, size = 1) + theme( panel.border = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank() ) + coord_flip() + scale_y_continuous(expand = c(0, 0)) + scale_x_continuous(expand = c(0.005, 0.005)) # Get the ggplot grobs gp1 = ggplotGrob(p1) gp2 = ggplotGrob(p2) # Disable for now #pdf(sprintf("%s_nt-dendro.pdf", prefix), 10, 10 + nrow(genes.gc) / 50) # Combine the two plots #grid.arrange(gp1, gp2, ncol = 2) #log = dev.off() ###################### Expected aa composition ############################# # Create all possible codons nt.double = merge(nt, nt) nt.triple = merge(nt.double, nt, by =c()) names(nt.triple) = c("x", "y", "z") codons = nt.triple %>% mutate(codon = paste0(x, y, z)) %>% gather(pos, nt, -codon) # Create the matrix of all codons per gene and calculate theoretical fraction genes.codons = genes.matrix %>% ungroup() %>% mutate(acc = rownames(genes.matrix)) %>% gather(nt, frac, -acc) %>% merge(codons) %>% group_by(codon, acc) %>% summarise( prob = prod(frac) ) %>% mutate(aa = translate(unlist(strsplit(codon, "")))) %>% group_by(acc, aa) %>% summarise(prob = sum(prob)) genes.codons.table = genes.codons %>% filter(aa != "*") %>% spread(aa, prob) write.table( genes.codons.table, sprintf("%s_expected-aa.tsv", prefix), quote = F, sep = "\t", row.names = F ) # Plot the distribution of each nucleotide fraction p = ggplot(genes.codons %>% filter(aa != "*"), aes(x = aa, y = prob)) + geom_jitter(alpha = 0.1) + geom_violin(alpha = 0, scale = "width") + ylab("Expected AA fraction") + xlab("") + ylim(-0.01, max(genes.codons$prob) + 0.01) pdf(sprintf("%s_expected-aa.pdf", prefix), 10, 6) plot(p) log = dev.off()
/composition_dna.R
permissive
bateman-research/sequence-bias
R
false
false
9,696
r
# Analysis of DNA composition for a subset of genes or genomes # Report the nucleotide fraction, GC content and asymmetries # # Aleix Lafita - October 2019 suppressPackageStartupMessages(library(dplyr)) suppressPackageStartupMessages(library(seqinr)) suppressPackageStartupMessages(library(argparse)) suppressPackageStartupMessages(library(tidyr)) suppressPackageStartupMessages(library(stringr)) suppressPackageStartupMessages(library(ggplot2)) suppressPackageStartupMessages(library(ggfortify)) suppressPackageStartupMessages(library(ggdendro)) suppressPackageStartupMessages(library(gridExtra)) theme_set(theme_bw() + theme(panel.grid = element_blank())) ###################### Argparse ############################# input = "examples/periscopes_dna.fa" prefix = "examples/periscopes_dna" # create parser object parser = ArgumentParser(description = 'Analysis of DNA composition for a subset of genes or genomes') # specify our desired options parser$add_argument("-d", "--dna", default=input, help="DNA sequences in FASTA format [default \"%(default)s\"]") parser$add_argument("-p", "--prefix", default=prefix, help="Prefix for the output files, results, plots and tables [default \"%(default)s\"]") # get command line options, if help option encountered print help and exit, # otherwise if options not found on command line then set defaults, args = parser$parse_args() dna = args$dna prefix = args$prefix ###################### Parsing ############################# # Parse the gene sequences and convert to DF genes.fa = read.fasta(dna) genes.gc = data.frame( acc=names(genes.fa), dna=unlist(getSequence(genes.fa, as.string=T)), stringsAsFactors = F ) %>% rowwise() %>%mutate( dna = toupper(dna), codon1 = paste0(unlist(strsplit(dna, ""))[seq(1,nchar(dna),3)], collapse = ""), codon2 = paste0(unlist(strsplit(dna, ""))[seq(2,nchar(dna),3)], collapse = ""), codon3 = paste0(unlist(strsplit(dna, ""))[seq(3,nchar(dna),3)], collapse = ""), ) genes.fa = NULL ###################### Calculate ############################# nts = "ACTG" nt = strsplit(nts, "")[[1]] genes.gc = genes.gc %>% mutate(bias = 0) # Routine to calculate the amino acid frequencies for each sequence for (n in nt) { for (f in 1:3) { nf = paste0("n", n, f, "fr") nc = paste0("n", n, f) c = paste0("codon", f) genes.gc = genes.gc %>% mutate( !!nc := str_count(!!sym(c), n), !!nf := !!sym(nc) / str_count(!!sym(c)) #bias = bias - ifelse(eval(as.symbol(nf)) > 0, eval(as.symbol(nf)) * log2(eval(as.symbol(nf))), 0) ) } } # Calculate the total nucleotide frequencies genes.gc = genes.gc %>% mutate( nA = nA1 + nA2 + nA3, nT = nT1 + nT2 + nT3, nG = nG1 + nG2 + nG3, nC = nC1 + nC2 + nC3, nAfr = nA / nchar(dna), nTfr = nT / nchar(dna), nGfr = nG / nchar(dna), nCfr = nC / nchar(dna) ) # Calculate the global nucleotide frequencies (null model) nt_freq = colSums(genes.gc %>% select(paste0("n", nt)) / sum(genes.gc %>% select(paste0("n", nt)))) # Calculate the nucleotide bias for (n in nt) { nf = paste(c("n", n, "fr"), collapse = "") nb = paste(c("bias", n), collapse = "") genes.gc = genes.gc %>% rowwise() %>% mutate( !!nb := eval(as.symbol(nf)) * log2(eval(as.symbol(nf)) / nt_freq[paste0("n", n)]), bias = bias + ifelse(eval(as.symbol(nf)) > 0, eval(as.symbol(nb)), 0) ) } # Calculate some other properties derived from nt fractions genes.gc = genes.gc %>% rowwise() %>% mutate( nTotal = nA + nT + nC + nG, nGCfr = nGfr + nCfr, aAT = nAfr - nTfr, sAT = aAT / max((nAfr + nTfr), 0.00000001), aGC = nGfr - nCfr, sGC = aGC / max((nGfr + nCfr), 0.00000001), aPurPyr = (nAfr + nGfr) - (nCfr + nTfr) ) ###################### Save results ############################# write.table( genes.gc %>% select(-dna), sprintf("%s_composition.tsv", prefix), quote = F, sep = "\t", row.names = F ) ###################### Plots ############################# # Plot the distribution of GC content p = ggplot(genes.gc, aes(x = nGCfr)) + geom_step(stat = "bin", boundary = 0, binwidth = 0.01, color = "black") + xlab("Fraction of GC") + xlim(0,1) pdf(sprintf("%s_gc-fraction.pdf", prefix), 4, 3) plot(p) log = dev.off() # Plot the distribution of sequence bias p = ggplot(genes.gc, aes(x = bias)) + geom_step(stat = "bin", boundary = 0, binwidth = 0.01, color = "black") + #xlim(0,max(genes.gc$bias)) + xlab("Relative entropy") pdf(sprintf("%s_nt-bias.pdf", prefix), 4, 3) plot(p) log = dev.off() # Gather the amino acid compositions into a column genes.ntcomp = gather( genes.gc %>% select(nAfr, nTfr, nCfr, nGfr, acc), key, fraction, -acc ) %>% mutate(nt = substring(key, 2, 2)) # Plot the distribution of each nucleotide fraction p = ggplot(genes.ntcomp, aes(x = nt, y = fraction)) + geom_jitter(alpha = 0.1) + geom_violin(alpha = 0, scale = "width") + geom_hline(yintercept = 0.25, alpha = 0.3) + ylab("Nucleotide fraction") + ylim(0, max(genes.ntcomp$fraction) + 0.05) + xlab("") pdf(sprintf("%s_nt-fraction.pdf", prefix), 6, 6) plot(p) log = dev.off() # Gather compositon for each codon genes.codons = genes.gc %>% select(acc, nA1fr, nA2fr, nA3fr, nT1fr, nT2fr, nT3fr, nG1fr, nG2fr, nG3fr, nC1fr, nC2fr, nC3fr) %>% gather(key, fraction, -acc) %>% mutate( nt = substring(key, 2, 2), frame = substring(key, 3, 3), ) # Plot the distribution of each nucleotide fraction p = ggplot(genes.codons, aes(x = frame, y = fraction)) + geom_jitter(alpha = 0.1) + geom_violin(alpha = 0, scale = "width") + facet_grid(~nt) + geom_hline(yintercept = 0.25, alpha = 0.3) + ylab("Nucleotide fraction") + ylim(0, max(genes.codons$fraction) + 0.05) + xlab("Codon position") pdf(sprintf("%s_nt-codons.pdf", prefix), 10, 5) plot(p) log = dev.off() # Gather the other properties into a column genes.ntasymm = gather( genes.gc %>% select(aAT, sAT, aGC, sGC, aPurPyr, acc), nt, fraction, -acc ) # Plot the distribution of nucleotide asymmetry and skew p = ggplot(genes.ntasymm, aes(x = nt, y = fraction)) + geom_jitter(alpha = 0.1) + geom_violin(alpha = 0, scale = "width") + geom_hline(yintercept = 0, alpha = 0.5) + xlab("") + ylab("Nucleotide asymmetry (X-Y) and skew (X-Y)/(X+Y)") + ylim(-1, 1) pdf(sprintf("%s_nt-asymmetry.pdf", prefix), 6, 6) plot(p) log = dev.off() ###################### PCA ############################# genes.pca = genes.gc %>% select(nAfr, nTfr, nGfr, nCfr) names(genes.pca) = c("A", "T", "G", "C") genes.pca.calc = prcomp(genes.pca, center = TRUE, scale. = TRUE) #pdf(sprintf("%s_pca_var.pdf", prefix), 5, 4) #plot(genes.pca.calc, type = 'l') #dev.off() p = autoplot( genes.pca.calc, data = genes.pca.calc, #colour = 'type', loadings = TRUE, loadings.colour = 'black', loadings.label = TRUE, loadings.label.size = 4, loadings.label.colour = "black", loadings.label.vjust = 2, size = 1, alpha = 0.3 ) pdf(sprintf("%s_nt-pca.pdf", prefix), 5, 5) print(p) log = dev.off() ###################### Dendrogram ############################# genes.matrix = genes.pca rownames(genes.matrix) = genes.gc$acc genes.dendro = as.dendrogram(hclust(d = dist(x = genes.matrix))) gene.order = order.dendrogram(genes.dendro) p1 = ggplot(data = genes.ntcomp, aes(x = gsub("n", "", gsub("fr", "", nt)), y = factor(acc, levels = genes.gc$acc[gene.order]))) + geom_tile(aes(fill = fraction)) + scale_fill_gradient(name = "Fraction", low = "white", high = "black") + theme( panel.border = element_blank(), #axis.text.y = element_blank(), axis.title.y = element_blank(), axis.title.x = element_blank(), #axis.ticks.y = element_blank(), legend.position = "left" ) + scale_x_discrete(expand = c(0, 0)) p2 = ggdendrogram(genes.dendro, rotate = FALSE, size = 1) + theme( panel.border = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank() ) + coord_flip() + scale_y_continuous(expand = c(0, 0)) + scale_x_continuous(expand = c(0.005, 0.005)) # Get the ggplot grobs gp1 = ggplotGrob(p1) gp2 = ggplotGrob(p2) # Disable for now #pdf(sprintf("%s_nt-dendro.pdf", prefix), 10, 10 + nrow(genes.gc) / 50) # Combine the two plots #grid.arrange(gp1, gp2, ncol = 2) #log = dev.off() ###################### Expected aa composition ############################# # Create all possible codons nt.double = merge(nt, nt) nt.triple = merge(nt.double, nt, by =c()) names(nt.triple) = c("x", "y", "z") codons = nt.triple %>% mutate(codon = paste0(x, y, z)) %>% gather(pos, nt, -codon) # Create the matrix of all codons per gene and calculate theoretical fraction genes.codons = genes.matrix %>% ungroup() %>% mutate(acc = rownames(genes.matrix)) %>% gather(nt, frac, -acc) %>% merge(codons) %>% group_by(codon, acc) %>% summarise( prob = prod(frac) ) %>% mutate(aa = translate(unlist(strsplit(codon, "")))) %>% group_by(acc, aa) %>% summarise(prob = sum(prob)) genes.codons.table = genes.codons %>% filter(aa != "*") %>% spread(aa, prob) write.table( genes.codons.table, sprintf("%s_expected-aa.tsv", prefix), quote = F, sep = "\t", row.names = F ) # Plot the distribution of each nucleotide fraction p = ggplot(genes.codons %>% filter(aa != "*"), aes(x = aa, y = prob)) + geom_jitter(alpha = 0.1) + geom_violin(alpha = 0, scale = "width") + ylab("Expected AA fraction") + xlab("") + ylim(-0.01, max(genes.codons$prob) + 0.01) pdf(sprintf("%s_expected-aa.pdf", prefix), 10, 6) plot(p) log = dev.off()
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969274184e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 )) result <- do.call(DLMtool::LBSPRgen,testlist) str(result)
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615837649-test.R
no_license
akhikolla/updatedatatype-list2
R
false
false
2,048
r
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969274184e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 )) result <- do.call(DLMtool::LBSPRgen,testlist) str(result)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/out.R \name{drop_incomplete_segments} \alias{drop_incomplete_segments} \title{Drop segments with NAs from the eeg_lst} \usage{ drop_incomplete_segments(x) } \arguments{ \item{x}{eeg_lst} } \value{ An eeg_lst object } \description{ Drop segments with NAs from the eeg_lst. } \seealso{ Other tidyverse-like functions: \code{\link{dplyr_verbs}}, \code{\link{eeg_bind}()}, \code{\link{eeg_slice_signal}()} } \concept{tidyverse-like functions}
/man/drop_incomplete_segments.Rd
permissive
bnicenboim/eeguana
R
false
true
518
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/out.R \name{drop_incomplete_segments} \alias{drop_incomplete_segments} \title{Drop segments with NAs from the eeg_lst} \usage{ drop_incomplete_segments(x) } \arguments{ \item{x}{eeg_lst} } \value{ An eeg_lst object } \description{ Drop segments with NAs from the eeg_lst. } \seealso{ Other tidyverse-like functions: \code{\link{dplyr_verbs}}, \code{\link{eeg_bind}()}, \code{\link{eeg_slice_signal}()} } \concept{tidyverse-like functions}
\name{rfamFamilySummary} \alias{rfamFamilySummary} \title{Get a summary describing an Rfam family } \description{ Gets a summary describing an Rfam family. The summary includes information regarding the number of sequences and species contained in the family, a brief description about the function of the family and the corresponding type of RNA. } \usage{ rfamFamilySummary(rfamFamily) } \arguments{ \item{rfamFamily}{string with an Rfam family accession or ID for which a descriptive summary should be retrieved. } } \value{ A list containing the following elements that describe the Rfam family: \item{rfamReleaseNumber }{Version of Rfam used in the query} \item{numberSequencesSeedAlignment }{Number of sequences used in the seed alignment of the Rfam family} \item{sourceSeedAlignment }{Published reference with the seed alignment of the Rfam family} \item{numberSpecies }{Number of species containing in their genomes sequences that belong to the Rfam family} \item{RNAType }{Keywords describing the type of RNA corresponding to the Rfam family} \item{numberSequencesAll }{Total number of sequences included in the Rfam family} \item{structureSource }{Description of the source for the secondary structure of the family (published, predicted) and PMID of the corresponding publication or prediction method if applicable} \item{description }{Descriptive name of the RNA included in the Rfam family } \item{rfamAccession }{Accession of the Rfam family } \item{rfamID }{ID of the Rfam family } \item{comment }{Short paragraph describing the characteristics and biological role of the Rfam family } } \references{ Ioanna Kalvari, Joanna Argasinska, Natalia Quinones-Olvera, Eric P Nawrocki, Elena Rivas, Sean R Eddy, Alex Bateman, Robert D Finn, Anton I Petrov, Rfam 13.0: shifting to a genome-centric resource for non-coding RNA families, Nucleic Acids Research, Volume 46, Issue D1, 4 January 2018, Pages D335–D342, https://doi.org/10.1093/nar/gkx1038 https://docs.rfam.org/en/latest/api.html } \examples{ # Get a summary for the Rfam family with ID "FMN"" rfamFamilySummary("FMN") # Get a summary for the Rfam family with accession "RF00174"" rfamFamilySummary("RF00174") # Get a brief paragraph describing the Rfam family with accession "RF00174"" rfamFamilySummary("RF00174")$comment }
/man/rfamFamilySummary.Rd
no_license
Rafael-Ayala/rfaRm
R
false
false
2,345
rd
\name{rfamFamilySummary} \alias{rfamFamilySummary} \title{Get a summary describing an Rfam family } \description{ Gets a summary describing an Rfam family. The summary includes information regarding the number of sequences and species contained in the family, a brief description about the function of the family and the corresponding type of RNA. } \usage{ rfamFamilySummary(rfamFamily) } \arguments{ \item{rfamFamily}{string with an Rfam family accession or ID for which a descriptive summary should be retrieved. } } \value{ A list containing the following elements that describe the Rfam family: \item{rfamReleaseNumber }{Version of Rfam used in the query} \item{numberSequencesSeedAlignment }{Number of sequences used in the seed alignment of the Rfam family} \item{sourceSeedAlignment }{Published reference with the seed alignment of the Rfam family} \item{numberSpecies }{Number of species containing in their genomes sequences that belong to the Rfam family} \item{RNAType }{Keywords describing the type of RNA corresponding to the Rfam family} \item{numberSequencesAll }{Total number of sequences included in the Rfam family} \item{structureSource }{Description of the source for the secondary structure of the family (published, predicted) and PMID of the corresponding publication or prediction method if applicable} \item{description }{Descriptive name of the RNA included in the Rfam family } \item{rfamAccession }{Accession of the Rfam family } \item{rfamID }{ID of the Rfam family } \item{comment }{Short paragraph describing the characteristics and biological role of the Rfam family } } \references{ Ioanna Kalvari, Joanna Argasinska, Natalia Quinones-Olvera, Eric P Nawrocki, Elena Rivas, Sean R Eddy, Alex Bateman, Robert D Finn, Anton I Petrov, Rfam 13.0: shifting to a genome-centric resource for non-coding RNA families, Nucleic Acids Research, Volume 46, Issue D1, 4 January 2018, Pages D335–D342, https://doi.org/10.1093/nar/gkx1038 https://docs.rfam.org/en/latest/api.html } \examples{ # Get a summary for the Rfam family with ID "FMN"" rfamFamilySummary("FMN") # Get a summary for the Rfam family with accession "RF00174"" rfamFamilySummary("RF00174") # Get a brief paragraph describing the Rfam family with accession "RF00174"" rfamFamilySummary("RF00174")$comment }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict.BchronologyRun.R \name{predict.BchronologyRun} \alias{predict.BchronologyRun} \title{Predict ages of other positions for a BchronologyRun object} \usage{ \method{predict}{BchronologyRun}( object, newPositions, newPositionThicknesses = NULL, maxExtrap = 500, ... ) } \arguments{ \item{object}{Output from a run of \code{\link{Bchronology}}} \item{newPositions}{A vector of new positions at which to find ages} \item{newPositionThicknesses}{A vector of thicknesses for the above positions. Must be the same length as \code{newPositions}} \item{maxExtrap}{The maximum new of extrapolation attempts. It might be worth increasing this if you are extrapolating a long way from the other dated positions} \item{...}{Other arguments to predict (not currently supported)} } \value{ A matrix of dimension num_samples by num_positions so that each row represents a set of monotonic sample predicted ages } \description{ This function will predict the ages of new positions (usually depths) based on a previous run of the function \code{\link{Bchronology}}. It will also allow for thickness uncertainties to be included in the resulting ages, for example when the age of a particular event is desired } \seealso{ \code{\link{BchronCalibrate}}, \code{\link{Bchronology}} \code{\link{BchronRSL}}, \code{\link{BchronDensity}}, \code{\link{BchronDensityFast}} }
/man/predict.BchronologyRun.Rd
no_license
cran/Bchron
R
false
true
1,446
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict.BchronologyRun.R \name{predict.BchronologyRun} \alias{predict.BchronologyRun} \title{Predict ages of other positions for a BchronologyRun object} \usage{ \method{predict}{BchronologyRun}( object, newPositions, newPositionThicknesses = NULL, maxExtrap = 500, ... ) } \arguments{ \item{object}{Output from a run of \code{\link{Bchronology}}} \item{newPositions}{A vector of new positions at which to find ages} \item{newPositionThicknesses}{A vector of thicknesses for the above positions. Must be the same length as \code{newPositions}} \item{maxExtrap}{The maximum new of extrapolation attempts. It might be worth increasing this if you are extrapolating a long way from the other dated positions} \item{...}{Other arguments to predict (not currently supported)} } \value{ A matrix of dimension num_samples by num_positions so that each row represents a set of monotonic sample predicted ages } \description{ This function will predict the ages of new positions (usually depths) based on a previous run of the function \code{\link{Bchronology}}. It will also allow for thickness uncertainties to be included in the resulting ages, for example when the age of a particular event is desired } \seealso{ \code{\link{BchronCalibrate}}, \code{\link{Bchronology}} \code{\link{BchronRSL}}, \code{\link{BchronDensity}}, \code{\link{BchronDensityFast}} }
# QMEC 2020 HW2 #Haley Carter # Save your homework as an R script file named HW2_Smith.R in your dropbox # folder (if your surname is not Smith, then replace Smith with your surname). # Note: Your entire assignment should run as a script without errors. # The following line will enable R to read a dataset of 25 Cakile edulenta # plants sampled at random from a population that grew on the beach of Lake # Michigan on the NU campus. cc <- read.csv("http://echinaceaproject.org/dataQmec1999/Cakile_data.csv") # The measurements are height (cm), number of branches, internode distance (cm), # distance to the nearest Cakile (cm), distance to the lake (m), distance to the # nearest plant (cm), and bogosity (mm Hg/cm2/sec). In addition, we noted the # presence of the distal and basal fruits, the color of each plant, and the # species of its nearest neighbor. In the Cakile_data.csv file, these data can # be found in the columns labeled Height, StemCount, InternodeDist, Dist_nr_cak, # Dist_lake_m, Dist_nr_pla, bogosity, DistalFrPresent, BasalFrPresent, # StemColor, and Nr_pla_id respectively. UserName specifies the person who # measured the plant. # 1. For the first six columns in the Cakile dataset categorize the type of data # as discrete or continuous. If the column is discrete, specify the values # present. If the column is continuous, show the range of values. Show all of # the code you used and give your answers as comments. Use complete sentences. str(cc) #All but the third and fifth columns (Height and InternodeDist) contain discrete data, #while those two columns contain continuous numeric values. levels(cc$UserName) #Column one contains six values: Alona, Becky, Christine, Countney, #Diane, and Kenny. cc$Plant_id #The second column contains integers from 1 to 196200, though most are in the teens. range(cc$Height) #The range of heights is from 8.0 to 49.7. table(cc$StemCount) #The fourth column contains integers from 1 to 18 with modes of 3, 8, and 18. range(cc$InternodeDist) #The range of InternodeDist is from 0 to 19. levels(cc$DistalFrPresent) #The sixth column contains presence / absence data in the form of "Y" #"N" or empty. # 2. Quantify the central tendency of these four columns of measurement data in # the Cakile dataset Height, InternodeDist, Dist_nr_cak, StemCount. Show your # code. Justify your choice of measurement for central tendency for each column # in commented full sentences. summary(cc) #Because the mean and median values are similar for each of these columns, I've noted #the mean value for each column. #The mean value for Height is 25.5. #The mean value for InternodeDist is 7.624. #The mean value for Dist_nr_cak is 13.9. #The mean value for StemCount is 7.64. # 3. Write code to determine the maximum, minimum, and mean mean height of A) # all plants that Becky measured, and B) all plants measured by someone other # than Becky. #A) The max height of plants measured by Becky was 47.4, the min was 10.5, and the mean was 24.82. max(cc[cc$UserName == "Becky", 'Height']) min(cc[cc$UserName == "Becky", 'Height']) mean(cc[cc$UserName == "Becky", 'Height']) #B) The max height of plants measured by someone other than Becky was 49.7, the min was 8, and the mean was 25.675. max(cc[cc$UserName != "Becky", 'Height']) min(cc[cc$UserName != "Becky", 'Height']) mean(cc[cc$UserName != "Becky", 'Height']) # 4. Randomization is very important when taking samples and designing # experiments. Often it is not done properly. Write a brief description of a # scientific sampling scheme that sounds pretty good but isn't truly random. # Draw from your experience or find an example in the scientific literature. # Then write a brief protocol that describes how to correct the problem and make # the sampling truly random. Two short paragraphs should suffice. #Put all the spiders in the closet in your hat. Reach in and draw a spider, measure it's weight. #Measure 10 of the 30 spiders that live in your closet. This is not truly random because the #most active spiders may climb to the top of your hat and be more likely to be drawn. #Capture all the 30 spiders and put each in its own tupperware container. Line up the tupperwares #Generate numbers from 1 to 30 randomly (maybe using R!) and measure those spiders. # 5. Explain why the correct randomization scheme you described in problem 4 is # valuable. #With the correct randomization scheme (randomization of both sample and assignment) you can #make inferences about cause and effect and about the population as a whole. # 6. Write R code to fulfill the following requests... # a Calculate the square root of 81. sqrt(81) #9 # b Make a vector of the following numbers: 1, 5, 6, 7, 8, 18, 21 and assign it # the name coolNums. coolNums <- c(1, 5:8, 18, 21) # c Calculate the square root of all numbers in coolNums. sqrt(coolNums) #1.000000 2.236068 2.449490 2.645751 2.828427 4.242641 4.582576 # d Randomly order the numbers in coolNums. sample(coolNums) # e Write a sequence of numbers from 1 to 2 that increments by 0.05. seq(1, 2, 0.05) # f Sample 10 numbers at random from all integers from 1 to 100. sample(1:100, 10) # g Make a subset of the Cakile dataset that includes only plants taller than 12 # cm. The subset should include two columns: the height of the plant in mm and # the person who measured it. library(dplyr) subCakile <- cc %>% subset(Height > 12) %>% select(c(UserName, Height)) # 7. Write R code to read the datafile you generated from your own # experiment that you conducted for HW1. Then write code to report the # following information for each treatment: # the number of trials, # the number of successes, # the number of failures, and # the proportion of successes. HWData <- read.csv("https://raw.githubusercontent.com/hscarter/QMECgitrepo/master/hw1_data.csv") numtrials <- length(HWData$trial) numtrials #50 result_db <- HWData %>% group_by(assignment) %>% summarise(results = sum(results)) result_db$proportion <- result_db$results/numtrials result_db #32% success rate with my left hand (denoted by assignment = 0) and 18% success with my right hand. # 8. State the hypothesis that you tested in #7, using both the null & # alternate versions of the statement. #There is a difference in my shuffling ability based on the hand with which I initiate the shuffling. #There is no difference in my shuffling ability based on the hand with which I initiate the shuffling.
/HW2_Carter.R
no_license
hscarter/QMECgitrepo
R
false
false
6,467
r
# QMEC 2020 HW2 #Haley Carter # Save your homework as an R script file named HW2_Smith.R in your dropbox # folder (if your surname is not Smith, then replace Smith with your surname). # Note: Your entire assignment should run as a script without errors. # The following line will enable R to read a dataset of 25 Cakile edulenta # plants sampled at random from a population that grew on the beach of Lake # Michigan on the NU campus. cc <- read.csv("http://echinaceaproject.org/dataQmec1999/Cakile_data.csv") # The measurements are height (cm), number of branches, internode distance (cm), # distance to the nearest Cakile (cm), distance to the lake (m), distance to the # nearest plant (cm), and bogosity (mm Hg/cm2/sec). In addition, we noted the # presence of the distal and basal fruits, the color of each plant, and the # species of its nearest neighbor. In the Cakile_data.csv file, these data can # be found in the columns labeled Height, StemCount, InternodeDist, Dist_nr_cak, # Dist_lake_m, Dist_nr_pla, bogosity, DistalFrPresent, BasalFrPresent, # StemColor, and Nr_pla_id respectively. UserName specifies the person who # measured the plant. # 1. For the first six columns in the Cakile dataset categorize the type of data # as discrete or continuous. If the column is discrete, specify the values # present. If the column is continuous, show the range of values. Show all of # the code you used and give your answers as comments. Use complete sentences. str(cc) #All but the third and fifth columns (Height and InternodeDist) contain discrete data, #while those two columns contain continuous numeric values. levels(cc$UserName) #Column one contains six values: Alona, Becky, Christine, Countney, #Diane, and Kenny. cc$Plant_id #The second column contains integers from 1 to 196200, though most are in the teens. range(cc$Height) #The range of heights is from 8.0 to 49.7. table(cc$StemCount) #The fourth column contains integers from 1 to 18 with modes of 3, 8, and 18. range(cc$InternodeDist) #The range of InternodeDist is from 0 to 19. levels(cc$DistalFrPresent) #The sixth column contains presence / absence data in the form of "Y" #"N" or empty. # 2. Quantify the central tendency of these four columns of measurement data in # the Cakile dataset Height, InternodeDist, Dist_nr_cak, StemCount. Show your # code. Justify your choice of measurement for central tendency for each column # in commented full sentences. summary(cc) #Because the mean and median values are similar for each of these columns, I've noted #the mean value for each column. #The mean value for Height is 25.5. #The mean value for InternodeDist is 7.624. #The mean value for Dist_nr_cak is 13.9. #The mean value for StemCount is 7.64. # 3. Write code to determine the maximum, minimum, and mean mean height of A) # all plants that Becky measured, and B) all plants measured by someone other # than Becky. #A) The max height of plants measured by Becky was 47.4, the min was 10.5, and the mean was 24.82. max(cc[cc$UserName == "Becky", 'Height']) min(cc[cc$UserName == "Becky", 'Height']) mean(cc[cc$UserName == "Becky", 'Height']) #B) The max height of plants measured by someone other than Becky was 49.7, the min was 8, and the mean was 25.675. max(cc[cc$UserName != "Becky", 'Height']) min(cc[cc$UserName != "Becky", 'Height']) mean(cc[cc$UserName != "Becky", 'Height']) # 4. Randomization is very important when taking samples and designing # experiments. Often it is not done properly. Write a brief description of a # scientific sampling scheme that sounds pretty good but isn't truly random. # Draw from your experience or find an example in the scientific literature. # Then write a brief protocol that describes how to correct the problem and make # the sampling truly random. Two short paragraphs should suffice. #Put all the spiders in the closet in your hat. Reach in and draw a spider, measure it's weight. #Measure 10 of the 30 spiders that live in your closet. This is not truly random because the #most active spiders may climb to the top of your hat and be more likely to be drawn. #Capture all the 30 spiders and put each in its own tupperware container. Line up the tupperwares #Generate numbers from 1 to 30 randomly (maybe using R!) and measure those spiders. # 5. Explain why the correct randomization scheme you described in problem 4 is # valuable. #With the correct randomization scheme (randomization of both sample and assignment) you can #make inferences about cause and effect and about the population as a whole. # 6. Write R code to fulfill the following requests... # a Calculate the square root of 81. sqrt(81) #9 # b Make a vector of the following numbers: 1, 5, 6, 7, 8, 18, 21 and assign it # the name coolNums. coolNums <- c(1, 5:8, 18, 21) # c Calculate the square root of all numbers in coolNums. sqrt(coolNums) #1.000000 2.236068 2.449490 2.645751 2.828427 4.242641 4.582576 # d Randomly order the numbers in coolNums. sample(coolNums) # e Write a sequence of numbers from 1 to 2 that increments by 0.05. seq(1, 2, 0.05) # f Sample 10 numbers at random from all integers from 1 to 100. sample(1:100, 10) # g Make a subset of the Cakile dataset that includes only plants taller than 12 # cm. The subset should include two columns: the height of the plant in mm and # the person who measured it. library(dplyr) subCakile <- cc %>% subset(Height > 12) %>% select(c(UserName, Height)) # 7. Write R code to read the datafile you generated from your own # experiment that you conducted for HW1. Then write code to report the # following information for each treatment: # the number of trials, # the number of successes, # the number of failures, and # the proportion of successes. HWData <- read.csv("https://raw.githubusercontent.com/hscarter/QMECgitrepo/master/hw1_data.csv") numtrials <- length(HWData$trial) numtrials #50 result_db <- HWData %>% group_by(assignment) %>% summarise(results = sum(results)) result_db$proportion <- result_db$results/numtrials result_db #32% success rate with my left hand (denoted by assignment = 0) and 18% success with my right hand. # 8. State the hypothesis that you tested in #7, using both the null & # alternate versions of the statement. #There is a difference in my shuffling ability based on the hand with which I initiate the shuffling. #There is no difference in my shuffling ability based on the hand with which I initiate the shuffling.
require(BayesTree) setwd("~/Documents/Research/Genomics") setwd("~/Research_Genomics/bart_gene/Code_Objects") source("bart_fns.R") source("bart_fns2.R") ##Set-up set.seed(20) ##TF settings tf.size1=c(10,15,20,25) tf.size2=c(10,20,30,40) ##script is set to work with this right now ##Observation settings n50=50 n100=100 n300=300 mean.tf=0 #1.4862e-05 ##generate the X matrix which will be fixed- sl sd.tf=1 #.4778817-not using just to keep larger numbers at play ##Generate Design Matrices tf.exp.300=sapply(rep(n300,max(tf.size2)),rnorm,mean=mean.tf,sd=sd.tf) #gives full matrix tf.exp.50=tf.exp.300[1:n50,] tf.exp.100=tf.exp.300[1:n100,] ##Beta settings num.tf=40 tf.beta.1=c(1,rep(0,times=max(tf.size)-1)) tf.beta.3=c(1,.5,.5,rep(0,times=num.tf-3)) #tf.beta.2=c(2,rep(0,times=max(tf.size2-1))) not really using here ##Function params n.tree.vec=c(5,10,15,20) factor.vec1=c(0,.25,.5,1,1.5,2,3.5,5,8) factor.vec2=c(.5,1,1.5,2,2.5,3,3.5,4,4.5,5,6,7,8,9,10,11,12) ##work with this right now burn=2500 post=5000 ##Prior weight vectors rep2=c(rep(1,times=2),2:max(tf.size2)) rep5=c(rep(1,times=5),2:max(tf.size2)) rep10=c(rep(1,times=10),2:max(tf.size2)) rep50=c(rep(1,times=50),2:max(tf.size2)) ##for tf 3 rep121=c(1,2,2,3:num.tf) prior.vec=rep121 tf.size=num.tf ##Set up number of repititions and repeats in data frame rep.nums=sapply(1:tf.size,function(x) length(which(prior.vec==x))) #print(rep.nums) reps=rep.nums #print(reps) dim(train.exp) tf.exp=tf.exp.300 tf.beta=tf.beta.3 n=nrow(tf.exp) sigma=2*sum(abs(tf.exp%*%tf.beta))/n train.exp=tf.exp[,rep(1:ncol(tf.exp),reps)] gene=as.numeric(tf.exp%*%tf.beta+rnorm(n,mean=0,sd=sigma)) tf.null=tf.exp bart.true=bart(x.train=train.exp,y.train=gene,ntree=10,nskip=2000,ndpost=5000,keepevery=25) var_prop=prop_calc_prior(bart.true,rep121) boot=100 boot_mat=matrix(0,nrow=boot,ncol=ncol(tf.null)) sums=numeric(boot) for(i in 1:boot){ perm=gene[sample(1:n,n,replace=F)] bart.false=bart(x.train=tf.null,y.train=perm,ntree=10,nskip=1000,ndpost=5000,keepevery=25,verbose=F) boot_mat[i,]=prop_calc(bart.false) sums[i]=sum(bart.false$varcount) print(i) } boot_mat[1,] maxid=apply(boot_mat,1,which.max) maxs=apply(boot_mat,1,max) maxcut=quantile(maxs,.95) table(maxid) hist(maxs) summary(sums) mean_boot=apply(boot_mat,2,mean); boot.se=apply(boot_mat,2,sd) coverConst=bisectK(tol=.1,coverage=.95,boot_mat=boot_mat,x_left=1,x_right=20,countLimit=100) coverConst mean(sapply(1:nrow(boot_mat), function(s) all(boot_mat[s,]-mean_boot<=coverConst*boot.se))) simul_trueTFs=which(var_prop>=mean_boot+coverConst*boot.se) simul_trueTFs plot(1:40,var_prop,main="Simulated TFs at Noise of 2x\n Simultaneous",xlab="TF") points(1:3,var_prop[1:3],pch=16) abline(h=maxcut,col="red") sapply(1:ncol(tf.exp),function(s) segments(s,0,x1=s,mean_boot[s]+coverConst*boot.se[s],col="blue")) sapply(1:ncol(tf.exp),function(s) segments(s,0,x1=s,quantile(boot_mat[,s],.95),col="red"))
/Code_Objects/simult.R
no_license
jbleich89/bart_gene
R
false
false
2,930
r
require(BayesTree) setwd("~/Documents/Research/Genomics") setwd("~/Research_Genomics/bart_gene/Code_Objects") source("bart_fns.R") source("bart_fns2.R") ##Set-up set.seed(20) ##TF settings tf.size1=c(10,15,20,25) tf.size2=c(10,20,30,40) ##script is set to work with this right now ##Observation settings n50=50 n100=100 n300=300 mean.tf=0 #1.4862e-05 ##generate the X matrix which will be fixed- sl sd.tf=1 #.4778817-not using just to keep larger numbers at play ##Generate Design Matrices tf.exp.300=sapply(rep(n300,max(tf.size2)),rnorm,mean=mean.tf,sd=sd.tf) #gives full matrix tf.exp.50=tf.exp.300[1:n50,] tf.exp.100=tf.exp.300[1:n100,] ##Beta settings num.tf=40 tf.beta.1=c(1,rep(0,times=max(tf.size)-1)) tf.beta.3=c(1,.5,.5,rep(0,times=num.tf-3)) #tf.beta.2=c(2,rep(0,times=max(tf.size2-1))) not really using here ##Function params n.tree.vec=c(5,10,15,20) factor.vec1=c(0,.25,.5,1,1.5,2,3.5,5,8) factor.vec2=c(.5,1,1.5,2,2.5,3,3.5,4,4.5,5,6,7,8,9,10,11,12) ##work with this right now burn=2500 post=5000 ##Prior weight vectors rep2=c(rep(1,times=2),2:max(tf.size2)) rep5=c(rep(1,times=5),2:max(tf.size2)) rep10=c(rep(1,times=10),2:max(tf.size2)) rep50=c(rep(1,times=50),2:max(tf.size2)) ##for tf 3 rep121=c(1,2,2,3:num.tf) prior.vec=rep121 tf.size=num.tf ##Set up number of repititions and repeats in data frame rep.nums=sapply(1:tf.size,function(x) length(which(prior.vec==x))) #print(rep.nums) reps=rep.nums #print(reps) dim(train.exp) tf.exp=tf.exp.300 tf.beta=tf.beta.3 n=nrow(tf.exp) sigma=2*sum(abs(tf.exp%*%tf.beta))/n train.exp=tf.exp[,rep(1:ncol(tf.exp),reps)] gene=as.numeric(tf.exp%*%tf.beta+rnorm(n,mean=0,sd=sigma)) tf.null=tf.exp bart.true=bart(x.train=train.exp,y.train=gene,ntree=10,nskip=2000,ndpost=5000,keepevery=25) var_prop=prop_calc_prior(bart.true,rep121) boot=100 boot_mat=matrix(0,nrow=boot,ncol=ncol(tf.null)) sums=numeric(boot) for(i in 1:boot){ perm=gene[sample(1:n,n,replace=F)] bart.false=bart(x.train=tf.null,y.train=perm,ntree=10,nskip=1000,ndpost=5000,keepevery=25,verbose=F) boot_mat[i,]=prop_calc(bart.false) sums[i]=sum(bart.false$varcount) print(i) } boot_mat[1,] maxid=apply(boot_mat,1,which.max) maxs=apply(boot_mat,1,max) maxcut=quantile(maxs,.95) table(maxid) hist(maxs) summary(sums) mean_boot=apply(boot_mat,2,mean); boot.se=apply(boot_mat,2,sd) coverConst=bisectK(tol=.1,coverage=.95,boot_mat=boot_mat,x_left=1,x_right=20,countLimit=100) coverConst mean(sapply(1:nrow(boot_mat), function(s) all(boot_mat[s,]-mean_boot<=coverConst*boot.se))) simul_trueTFs=which(var_prop>=mean_boot+coverConst*boot.se) simul_trueTFs plot(1:40,var_prop,main="Simulated TFs at Noise of 2x\n Simultaneous",xlab="TF") points(1:3,var_prop[1:3],pch=16) abline(h=maxcut,col="red") sapply(1:ncol(tf.exp),function(s) segments(s,0,x1=s,mean_boot[s]+coverConst*boot.se[s],col="blue")) sapply(1:ncol(tf.exp),function(s) segments(s,0,x1=s,quantile(boot_mat[,s],.95),col="red"))
fluidPage(#theme=shinytheme("united"), headerPanel( HTML('Distribuciones de variables aleatorias (test para UAA) <a href="http://snap.uaf.edu" target="_blank"><img align="right" alt="SNAP Logo" src="./img/SNAP_acronym_100px.png" /></a>' ), "Distributions of Random Variables" ), fluidRow( column(4, wellPanel( radioButtons("disttype","Tipo de distribución:",list("Discreta","Continua"),selected="Discreta") ), wellPanel( uiOutput("distName") ), wellPanel( numericInput("n","Tamaño de la muestra:",1000), uiOutput("dist1"), uiOutput("dist2"), uiOutput("dist3") ), wellPanel( uiOutput("sampDens"), uiOutput("BW"), fluidRow( column(6, downloadButton("dlCurPlot", "Descargar Gráfico", class="btn-block btn-primary")), column(6, downloadButton("dldat", "Descargar Muestra", class="btn-block btn-warning")) ) ) ), column(8, tabsetPanel( tabPanel("Gráfico",plotOutput("plot", width="100%", height="auto"),verbatimTextOutput("summary")), #tabPanel("Summary",verbatimTextOutput("summary")), tabPanel("Muestra",tableOutput("table")), tabPanelAbout(), id="tsp" ) ) ) )
/Sofi/inst/Estadist/Estas/General/IU_Distribuciones_Var_Aleat.R
no_license
ingted/R-Examples
R
false
false
1,393
r
fluidPage(#theme=shinytheme("united"), headerPanel( HTML('Distribuciones de variables aleatorias (test para UAA) <a href="http://snap.uaf.edu" target="_blank"><img align="right" alt="SNAP Logo" src="./img/SNAP_acronym_100px.png" /></a>' ), "Distributions of Random Variables" ), fluidRow( column(4, wellPanel( radioButtons("disttype","Tipo de distribución:",list("Discreta","Continua"),selected="Discreta") ), wellPanel( uiOutput("distName") ), wellPanel( numericInput("n","Tamaño de la muestra:",1000), uiOutput("dist1"), uiOutput("dist2"), uiOutput("dist3") ), wellPanel( uiOutput("sampDens"), uiOutput("BW"), fluidRow( column(6, downloadButton("dlCurPlot", "Descargar Gráfico", class="btn-block btn-primary")), column(6, downloadButton("dldat", "Descargar Muestra", class="btn-block btn-warning")) ) ) ), column(8, tabsetPanel( tabPanel("Gráfico",plotOutput("plot", width="100%", height="auto"),verbatimTextOutput("summary")), #tabPanel("Summary",verbatimTextOutput("summary")), tabPanel("Muestra",tableOutput("table")), tabPanelAbout(), id="tsp" ) ) ) )
############################## #R script to generate a modelbuilder model object with code. #This file was generated on 2023-05-08 15:16:21.930296 ############################## mbmodel = list() #create empty list #Model meta-information mbmodel$title = 'Complex ID Control Scenarios' mbmodel$description = 'The basic SIR model is expanded to include vectors.' mbmodel$author = ' Alexis Vittengl' mbmodel$date = Sys.Date() mbmodel$details = 'The model includes susceptible, infected, asymptomatic, presymptomactic, pathogens, susceptible vectors, infected vectors, recovered and death compartments. The processes that are modeled are infection, recovery, birth, death, and immunity.' #Information for all variables var = vector('list',9) id = 0 id = id + 1 var[[id]]$varname = 'S' var[[id]]$vartext = 'Susceptible Host' var[[id]]$varval = 1000 var[[id]]$flows = c('+eh', '-bP*P*S', '-bA*A*S', '-bI*I*S', '-bE*E*S', '+w*R', '-nh*S') var[[id]]$flownames = c('Susceptible host enters system', 'Presymptomatic infection', 'Asymptomatic infection', 'Symptomatic infection', 'Pathogen infection', 'Loss of immunity', 'Host death') id = id + 1 var[[id]]$varname = 'P' var[[id]]$vartext = 'Infected, Presymptomatic' var[[id]]$varval = 1 var[[id]]$flows = c('+bP*P*S', '+bA*A*S', '+bI*I*S', '+bE*E*S', '+bV*IV*S', '-gP*P', '-nh*P') var[[id]]$flownames = c('Presymptomatic infection', 'Asymptomatic infection', 'Symptomatic infection', 'Pathogen infection', 'Vector infection', 'Presymptomatic recovery', 'Host death') id = id + 1 var[[id]]$varname = 'A' var[[id]]$vartext = 'Infected, Asymptomatic' var[[id]]$varval = 1 var[[id]]$flows = c('+f*gP*P', '-gA*A', '-nh*A') var[[id]]$flownames = c('Presymptomatic hosts move into the asymptomatic', 'Asymptomatic recovery', 'Host natural death') id = id + 1 var[[id]]$varname = 'I' var[[id]]$vartext = 'Infected, Symptomatic' var[[id]]$varval = 1 var[[id]]$flows = c('+gP*P', '-f*gP*P', '-gI*I', '-nh*I') var[[id]]$flownames = c('Presymptomatic recovery', 'Presymptomatic hosts move into the asymptomatic', 'Symptomatic recovery', 'Host infection') id = id + 1 var[[id]]$varname = 'R' var[[id]]$vartext = 'Recovered' var[[id]]$varval = 0 var[[id]]$flows = c('+gA*A', '+gI*I', '-d*gI*I', '-w*R', '-nh*R') var[[id]]$flownames = c('Asymptomatic recovery', 'Symptomatic recovery', 'Host death due to infection', 'Loss of immunity', 'Host natural death') id = id + 1 var[[id]]$varname = 'D' var[[id]]$vartext = 'Deaths' var[[id]]$varval = 0 var[[id]]$flows = c('+d*gI*I') var[[id]]$flownames = c('Host death due to infection') id = id + 1 var[[id]]$varname = 'E' var[[id]]$vartext = 'Pathogen in the environment' var[[id]]$varval = 0 var[[id]]$flows = c('+pI*I', '+pA*A', '-c*E') var[[id]]$flownames = c('Symptomatic host pathogen shed', 'Asymptomatic host pathogen shed', 'Pathogen decay') id = id + 1 var[[id]]$varname = 'SV' var[[id]]$vartext = 'Susceptible Vectors' var[[id]]$varval = 100 var[[id]]$flows = c('+eV', '-bh*I*SV', '-nV*SV') var[[id]]$flownames = c('Susceptible vector enter system', 'Host infection', 'Vector natural death') id = id + 1 var[[id]]$varname = 'IV' var[[id]]$vartext = 'Infectious Vectors' var[[id]]$varval = 1 var[[id]]$flows = c('+bh*I*SV', '-nV*IV') var[[id]]$flownames = c('Host infection', 'Vector natural Death ') mbmodel$var = var #Information for all parameters par = vector('list',19) id = 0 id = id + 1 par[[id]]$parname = 'bP' par[[id]]$partext = 'Presymptomatic infection rate' par[[id]]$parval = 0.002 id = id + 1 par[[id]]$parname = 'bA' par[[id]]$partext = 'Asymptomatic infection rate' par[[id]]$parval = 0.002 id = id + 1 par[[id]]$parname = 'bI' par[[id]]$partext = 'Symptomatic infection rate' par[[id]]$parval = 0.002 id = id + 1 par[[id]]$parname = 'bE' par[[id]]$partext = 'Pathogen infection rate' par[[id]]$parval = 0.002 id = id + 1 par[[id]]$parname = 'bV' par[[id]]$partext = 'Vector infection rate' par[[id]]$parval = 0.002 id = id + 1 par[[id]]$parname = 'bh' par[[id]]$partext = 'Host natural infection rate' par[[id]]$parval = 0.002 id = id + 1 par[[id]]$parname = 'nV' par[[id]]$partext = 'Vector natural death rate' par[[id]]$parval = 0.02 id = id + 1 par[[id]]$parname = 'nh' par[[id]]$partext = 'Host death rate' par[[id]]$parval = 0.02 id = id + 1 par[[id]]$parname = 'gP' par[[id]]$partext = 'Presymptomatic recovery rate' par[[id]]$parval = 0.05 id = id + 1 par[[id]]$parname = 'gA' par[[id]]$partext = 'Asymptomatic recovery rate' par[[id]]$parval = 0.05 id = id + 1 par[[id]]$parname = 'gI' par[[id]]$partext = 'Symptomatic recovery rate' par[[id]]$parval = 0.05 id = id + 1 par[[id]]$parname = 'pI' par[[id]]$partext = 'Symptomatic host pathogen shed rate' par[[id]]$parval = 0.01 id = id + 1 par[[id]]$parname = 'pA' par[[id]]$partext = 'Asymptomatic host pathogen shed rate' par[[id]]$parval = 0.01 id = id + 1 par[[id]]$parname = 'c' par[[id]]$partext = 'pathogen decay rate ' par[[id]]$parval = 0.03 id = id + 1 par[[id]]$parname = 'eV' par[[id]]$partext = 'Susceptible vector enter system rate' par[[id]]$parval = 0.2 id = id + 1 par[[id]]$parname = 'eh' par[[id]]$partext = 'Susceptible host enter system rate' par[[id]]$parval = 0.2 id = id + 1 par[[id]]$parname = 'f' par[[id]]$partext = 'presymptomatic hosts move into the asymptomatic rate' par[[id]]$parval = 0.4 id = id + 1 par[[id]]$parname = 'd' par[[id]]$partext = 'host death rate due to infection' par[[id]]$parval = 0.08 id = id + 1 par[[id]]$parname = 'w' par[[id]]$partext = 'Loss of immunity rate' par[[id]]$parval = 0.005 mbmodel$par = par #Information for time parameters time = vector('list',3) id = 0 id = id + 1 time[[id]]$timename = 'tstart' time[[id]]$timetext = 'Start time of simulation' time[[id]]$timeval = 0 id = id + 1 time[[id]]$timename = 'tfinal' time[[id]]$timetext = 'Final time of simulation' time[[id]]$timeval = 100 id = id + 1 time[[id]]$timename = 'dt' time[[id]]$timetext = 'Time step' time[[id]]$timeval = 0.1 mbmodel$time = time
/auxiliary/modelfiles/other/Complex_ID_Control_Scenarios_file.R
no_license
ahgroup/modelbuilder
R
false
false
6,366
r
############################## #R script to generate a modelbuilder model object with code. #This file was generated on 2023-05-08 15:16:21.930296 ############################## mbmodel = list() #create empty list #Model meta-information mbmodel$title = 'Complex ID Control Scenarios' mbmodel$description = 'The basic SIR model is expanded to include vectors.' mbmodel$author = ' Alexis Vittengl' mbmodel$date = Sys.Date() mbmodel$details = 'The model includes susceptible, infected, asymptomatic, presymptomactic, pathogens, susceptible vectors, infected vectors, recovered and death compartments. The processes that are modeled are infection, recovery, birth, death, and immunity.' #Information for all variables var = vector('list',9) id = 0 id = id + 1 var[[id]]$varname = 'S' var[[id]]$vartext = 'Susceptible Host' var[[id]]$varval = 1000 var[[id]]$flows = c('+eh', '-bP*P*S', '-bA*A*S', '-bI*I*S', '-bE*E*S', '+w*R', '-nh*S') var[[id]]$flownames = c('Susceptible host enters system', 'Presymptomatic infection', 'Asymptomatic infection', 'Symptomatic infection', 'Pathogen infection', 'Loss of immunity', 'Host death') id = id + 1 var[[id]]$varname = 'P' var[[id]]$vartext = 'Infected, Presymptomatic' var[[id]]$varval = 1 var[[id]]$flows = c('+bP*P*S', '+bA*A*S', '+bI*I*S', '+bE*E*S', '+bV*IV*S', '-gP*P', '-nh*P') var[[id]]$flownames = c('Presymptomatic infection', 'Asymptomatic infection', 'Symptomatic infection', 'Pathogen infection', 'Vector infection', 'Presymptomatic recovery', 'Host death') id = id + 1 var[[id]]$varname = 'A' var[[id]]$vartext = 'Infected, Asymptomatic' var[[id]]$varval = 1 var[[id]]$flows = c('+f*gP*P', '-gA*A', '-nh*A') var[[id]]$flownames = c('Presymptomatic hosts move into the asymptomatic', 'Asymptomatic recovery', 'Host natural death') id = id + 1 var[[id]]$varname = 'I' var[[id]]$vartext = 'Infected, Symptomatic' var[[id]]$varval = 1 var[[id]]$flows = c('+gP*P', '-f*gP*P', '-gI*I', '-nh*I') var[[id]]$flownames = c('Presymptomatic recovery', 'Presymptomatic hosts move into the asymptomatic', 'Symptomatic recovery', 'Host infection') id = id + 1 var[[id]]$varname = 'R' var[[id]]$vartext = 'Recovered' var[[id]]$varval = 0 var[[id]]$flows = c('+gA*A', '+gI*I', '-d*gI*I', '-w*R', '-nh*R') var[[id]]$flownames = c('Asymptomatic recovery', 'Symptomatic recovery', 'Host death due to infection', 'Loss of immunity', 'Host natural death') id = id + 1 var[[id]]$varname = 'D' var[[id]]$vartext = 'Deaths' var[[id]]$varval = 0 var[[id]]$flows = c('+d*gI*I') var[[id]]$flownames = c('Host death due to infection') id = id + 1 var[[id]]$varname = 'E' var[[id]]$vartext = 'Pathogen in the environment' var[[id]]$varval = 0 var[[id]]$flows = c('+pI*I', '+pA*A', '-c*E') var[[id]]$flownames = c('Symptomatic host pathogen shed', 'Asymptomatic host pathogen shed', 'Pathogen decay') id = id + 1 var[[id]]$varname = 'SV' var[[id]]$vartext = 'Susceptible Vectors' var[[id]]$varval = 100 var[[id]]$flows = c('+eV', '-bh*I*SV', '-nV*SV') var[[id]]$flownames = c('Susceptible vector enter system', 'Host infection', 'Vector natural death') id = id + 1 var[[id]]$varname = 'IV' var[[id]]$vartext = 'Infectious Vectors' var[[id]]$varval = 1 var[[id]]$flows = c('+bh*I*SV', '-nV*IV') var[[id]]$flownames = c('Host infection', 'Vector natural Death ') mbmodel$var = var #Information for all parameters par = vector('list',19) id = 0 id = id + 1 par[[id]]$parname = 'bP' par[[id]]$partext = 'Presymptomatic infection rate' par[[id]]$parval = 0.002 id = id + 1 par[[id]]$parname = 'bA' par[[id]]$partext = 'Asymptomatic infection rate' par[[id]]$parval = 0.002 id = id + 1 par[[id]]$parname = 'bI' par[[id]]$partext = 'Symptomatic infection rate' par[[id]]$parval = 0.002 id = id + 1 par[[id]]$parname = 'bE' par[[id]]$partext = 'Pathogen infection rate' par[[id]]$parval = 0.002 id = id + 1 par[[id]]$parname = 'bV' par[[id]]$partext = 'Vector infection rate' par[[id]]$parval = 0.002 id = id + 1 par[[id]]$parname = 'bh' par[[id]]$partext = 'Host natural infection rate' par[[id]]$parval = 0.002 id = id + 1 par[[id]]$parname = 'nV' par[[id]]$partext = 'Vector natural death rate' par[[id]]$parval = 0.02 id = id + 1 par[[id]]$parname = 'nh' par[[id]]$partext = 'Host death rate' par[[id]]$parval = 0.02 id = id + 1 par[[id]]$parname = 'gP' par[[id]]$partext = 'Presymptomatic recovery rate' par[[id]]$parval = 0.05 id = id + 1 par[[id]]$parname = 'gA' par[[id]]$partext = 'Asymptomatic recovery rate' par[[id]]$parval = 0.05 id = id + 1 par[[id]]$parname = 'gI' par[[id]]$partext = 'Symptomatic recovery rate' par[[id]]$parval = 0.05 id = id + 1 par[[id]]$parname = 'pI' par[[id]]$partext = 'Symptomatic host pathogen shed rate' par[[id]]$parval = 0.01 id = id + 1 par[[id]]$parname = 'pA' par[[id]]$partext = 'Asymptomatic host pathogen shed rate' par[[id]]$parval = 0.01 id = id + 1 par[[id]]$parname = 'c' par[[id]]$partext = 'pathogen decay rate ' par[[id]]$parval = 0.03 id = id + 1 par[[id]]$parname = 'eV' par[[id]]$partext = 'Susceptible vector enter system rate' par[[id]]$parval = 0.2 id = id + 1 par[[id]]$parname = 'eh' par[[id]]$partext = 'Susceptible host enter system rate' par[[id]]$parval = 0.2 id = id + 1 par[[id]]$parname = 'f' par[[id]]$partext = 'presymptomatic hosts move into the asymptomatic rate' par[[id]]$parval = 0.4 id = id + 1 par[[id]]$parname = 'd' par[[id]]$partext = 'host death rate due to infection' par[[id]]$parval = 0.08 id = id + 1 par[[id]]$parname = 'w' par[[id]]$partext = 'Loss of immunity rate' par[[id]]$parval = 0.005 mbmodel$par = par #Information for time parameters time = vector('list',3) id = 0 id = id + 1 time[[id]]$timename = 'tstart' time[[id]]$timetext = 'Start time of simulation' time[[id]]$timeval = 0 id = id + 1 time[[id]]$timename = 'tfinal' time[[id]]$timetext = 'Final time of simulation' time[[id]]$timeval = 100 id = id + 1 time[[id]]$timename = 'dt' time[[id]]$timetext = 'Time step' time[[id]]$timeval = 0.1 mbmodel$time = time
context("test-check_augment_newdata_precedence") library(tibble) augment_always_data <- function(model, data = NULL, newdata = NULL) { as_tibble(data) } augment_correct <- function(model, data = NULL, newdata = NULL) { ret <- if (!is.null(newdata)) newdata else data as_tibble(ret) } test_that("strict = TRUE", { expect_error( check_augment_newdata_precedence( aug = augment_always_data, model = NULL, data = iris ), "Must specify either `data` or `newdata` argument." ) expect_silent( check_augment_newdata_precedence( aug = augment_correct, model = NULL, data = iris ) ) })
/tests/testthat/test-check_augment_newdata_precedence.R
permissive
alexpghayes/modeltests
R
false
false
652
r
context("test-check_augment_newdata_precedence") library(tibble) augment_always_data <- function(model, data = NULL, newdata = NULL) { as_tibble(data) } augment_correct <- function(model, data = NULL, newdata = NULL) { ret <- if (!is.null(newdata)) newdata else data as_tibble(ret) } test_that("strict = TRUE", { expect_error( check_augment_newdata_precedence( aug = augment_always_data, model = NULL, data = iris ), "Must specify either `data` or `newdata` argument." ) expect_silent( check_augment_newdata_precedence( aug = augment_correct, model = NULL, data = iris ) ) })
#Plot4 #get data #get data powerConsumptionData <- read.table("household_power_consumption.txt", header=TRUE, sep=";",dec=".",na.strings = "?") #combine date and time column powerConsumptionData$DateTime <- paste(powerConsumptionData$Date, powerConsumptionData$Time,sep=" ") #convert date column to type date powerConsumptionData$Date <- as.Date(powerConsumptionData$Date , "%d/%m/%Y") #convert datetime column to type datetime library(lubridate) powerConsumptionData$DateTime <-dmy_hms(powerConsumptionData$DateTime) #filter out unwanteddata powerConsumptionData <- subset(powerConsumptionData , Date == "2007-02-01" | Date == "2007-02-02",select=c(Date, DateTime,Global_active_power,Global_reactive_power, Voltage, Global_intensity, Sub_metering_1,Sub_metering_2,Sub_metering_3)) #created plotted png(filename = "plot4.png",width = 480, height = 480, units = "px") #create graph par(mfrow=c(2,2)) with(powerConsumptionData,{ plot(DateTime,Global_active_power, type="n", xlab="",ylab="Global Active Power (kilowatts)") lines(DateTime,Global_active_power, type="l", col="black") plot(DateTime,Voltage, type="n", xlab="",ylab="Voltage") lines(DateTime,Voltage, type="l", col="black") plot(powerConsumptionData$DateTime,powerConsumptionData$Sub_metering_1, type="n", xlab="",ylab="Energy Sub Metering") lines(powerConsumptionData$DateTime,powerConsumptionData$Sub_metering_1, type="l", col="black") lines(powerConsumptionData$DateTime,powerConsumptionData$Sub_metering_2, type="l", col="red") lines(powerConsumptionData$DateTime,powerConsumptionData$Sub_metering_3, type="l", col="blue") legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,col=c("black","red","blue"),cex=0.75) plot(DateTime,Global_reactive_power, type="n", xlab="",ylab="Global Reactive Power (kilowatts)") lines(DateTime,Global_reactive_power, type="l", col="black") }) #remove connection to png dev.off()
/Plot4.R
no_license
szumkhawala/ExData_Plotting1
R
false
false
1,927
r
#Plot4 #get data #get data powerConsumptionData <- read.table("household_power_consumption.txt", header=TRUE, sep=";",dec=".",na.strings = "?") #combine date and time column powerConsumptionData$DateTime <- paste(powerConsumptionData$Date, powerConsumptionData$Time,sep=" ") #convert date column to type date powerConsumptionData$Date <- as.Date(powerConsumptionData$Date , "%d/%m/%Y") #convert datetime column to type datetime library(lubridate) powerConsumptionData$DateTime <-dmy_hms(powerConsumptionData$DateTime) #filter out unwanteddata powerConsumptionData <- subset(powerConsumptionData , Date == "2007-02-01" | Date == "2007-02-02",select=c(Date, DateTime,Global_active_power,Global_reactive_power, Voltage, Global_intensity, Sub_metering_1,Sub_metering_2,Sub_metering_3)) #created plotted png(filename = "plot4.png",width = 480, height = 480, units = "px") #create graph par(mfrow=c(2,2)) with(powerConsumptionData,{ plot(DateTime,Global_active_power, type="n", xlab="",ylab="Global Active Power (kilowatts)") lines(DateTime,Global_active_power, type="l", col="black") plot(DateTime,Voltage, type="n", xlab="",ylab="Voltage") lines(DateTime,Voltage, type="l", col="black") plot(powerConsumptionData$DateTime,powerConsumptionData$Sub_metering_1, type="n", xlab="",ylab="Energy Sub Metering") lines(powerConsumptionData$DateTime,powerConsumptionData$Sub_metering_1, type="l", col="black") lines(powerConsumptionData$DateTime,powerConsumptionData$Sub_metering_2, type="l", col="red") lines(powerConsumptionData$DateTime,powerConsumptionData$Sub_metering_3, type="l", col="blue") legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,col=c("black","red","blue"),cex=0.75) plot(DateTime,Global_reactive_power, type="n", xlab="",ylab="Global Reactive Power (kilowatts)") lines(DateTime,Global_reactive_power, type="l", col="black") }) #remove connection to png dev.off()
#set and get working directory setwd("/Users/madelondenboeft/Desktop") #create plot4 fulldataset <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"') data_needed <- subset(fulldataset, Date %in% c("1/2/2007","2/2/2007")) data_needed$Date <- as.Date(data_needed$Date, format="%d/%m/%Y") datetime <- paste(as.Date(data_needed$Date), data_needed$Time) data_needed$Datetime <- as.POSIXct(datetime) png("plot4.png", width=480, height=480) par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0)) with(data_needed, { plot(Global_active_power~Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="") plot(Voltage~Datetime, type="l", ylab="Voltage (volt)", xlab="datetime") plot(Sub_metering_1~Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="") lines(Sub_metering_2~Datetime,col='Red') lines(Sub_metering_3~Datetime,col='Blue') legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) plot(Global_reactive_power~Datetime, type="l", ylab="Global Rective Power (kilowatts)",xlab="datetime") }) dev.off()
/plot4.R
no_license
mdenboeft/ExData_Plotting1
R
false
false
1,290
r
#set and get working directory setwd("/Users/madelondenboeft/Desktop") #create plot4 fulldataset <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"') data_needed <- subset(fulldataset, Date %in% c("1/2/2007","2/2/2007")) data_needed$Date <- as.Date(data_needed$Date, format="%d/%m/%Y") datetime <- paste(as.Date(data_needed$Date), data_needed$Time) data_needed$Datetime <- as.POSIXct(datetime) png("plot4.png", width=480, height=480) par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0)) with(data_needed, { plot(Global_active_power~Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="") plot(Voltage~Datetime, type="l", ylab="Voltage (volt)", xlab="datetime") plot(Sub_metering_1~Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="") lines(Sub_metering_2~Datetime,col='Red') lines(Sub_metering_3~Datetime,col='Blue') legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) plot(Global_reactive_power~Datetime, type="l", ylab="Global Rective Power (kilowatts)",xlab="datetime") }) dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print_methods.R \name{print.psychmeta.ad_int} \alias{print.psychmeta.ad_int} \title{print method for interactive artifact distributions} \usage{ \method{print}{psychmeta.ad_int}(x, ..., digits = 3) } \arguments{ \item{x}{Object to be printed.} \item{...}{Further arguments passed to or from other methods.} \item{digits}{Number of digits to which results should be printed.} } \value{ Printed results from objects of the 'psychmeta' class. } \description{ print method for interactive artifact distributions } \keyword{internal}
/man/print.psychmeta.ad_int.Rd
no_license
EnterStudios/psychmeta
R
false
true
609
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print_methods.R \name{print.psychmeta.ad_int} \alias{print.psychmeta.ad_int} \title{print method for interactive artifact distributions} \usage{ \method{print}{psychmeta.ad_int}(x, ..., digits = 3) } \arguments{ \item{x}{Object to be printed.} \item{...}{Further arguments passed to or from other methods.} \item{digits}{Number of digits to which results should be printed.} } \value{ Printed results from objects of the 'psychmeta' class. } \description{ print method for interactive artifact distributions } \keyword{internal}
rm(list = ls()) tribedf <- read_rds("01_data/cache/tribe_county_shapefiles.rds") tribecounties <- tribedf %>% dplyr::select(GEOID) %>% unique() empty <- tribecounties %>% mutate(area = as.numeric(st_area(geometry))) %>% st_set_geometry(NULL) %>% filter(area == 0) tribeuse <- tribecounties %>% filter(!GEOID %in% empty$GEOID) heat <- read_rds("01_data/clean/a_heat_gridmet_county.rds") drought <- read_rds("01_data/clean/b_drought_county.rds") %>% rename(drought_mean = `1980-2020 Mean`) precip <- read_rds("01_data/clean/c_precip_county.rds") whp <- read_rds("01_data/clean/d_whp_county.rds") elrug <- read_rds("01_data/clean/e_ElevationAndRuggedness_County.rds") wells_oil <- read_rds("01_data/clean/f_Oil_wells_county.rds") %>% dplyr::select(GEOID,AllArea_OilPortion) %>% mutate(AllArea_OilPortion = as.numeric(AllArea_OilPortion)) %>% unique() %>% left_join(tribeuse,.,by="GEOID") %>% st_set_geometry(NULL) %>% replace(is.na(.), 0) wells_gas <- read_rds("01_data/clean/f_Gas_wells_county.rds") %>% dplyr::select(GEOID,AllArea_GasPortion) %>% mutate(AllArea_GasPortion = as.numeric(AllArea_GasPortion)) %>% unique() %>% left_join(tribeuse,.,by="GEOID") %>% st_set_geometry(NULL) %>% replace(is.na(.), 0) OGbasins <- read_rds("01_data/clean/g_OilGas_basins_county.rds") %>% st_set_geometry(NULL) PAD <- read_rds("01_data/clean/h_federalland_county.rds") soc <- map_dfr(list.files("01_data/cache/soc_county2", full.names = T), function(fl){ t <- read_rds(fl) }) %>% group_by(GEOID) %>% summarise(SOC_mean = mean(Interpolated_15)) %>% ungroup() all <- tribedf %>% st_set_geometry(NULL) %>% left_join(.,heat,by="GEOID") %>% left_join(.,drought,by="GEOID") %>% left_join(.,precip,by="GEOID") %>% left_join(.,whp,by="GEOID") %>% left_join(.,elrug,by="GEOID") %>% left_join(.,wells_oil,by="GEOID") %>% left_join(.,wells_gas,by="GEOID") %>% left_join(.,OGbasins,by="GEOID") %>% left_join(.,PAD,by="GEOID") %>% left_join(.,soc,by="GEOID") # Rename fields, drop units, replace NAs when appropriate final_ds <- all %>% select(tribe, GEOID, heatdays=heatdays_mean, drought=drought_mean, precip, whp=whp_mean, oil_portion=AllArea_OilPortion, gas_portion=AllArea_GasPortion, og_basin_portion=BasinPortion, federal_lands_portion=PADPortion, soc=SOC_mean, elevation=elevation_mean, tri=tri_mean) %>% inner_join(tigris::fips_codes %>% mutate(GEOID=str_c(state_code,county_code), county=str_remove(county,"County")) %>% select(GEOID,state,county),., by="GEOID") write_csv(final_ds,"01_data/clean/tribal_dispossession_county.csv") write_csv(final_ds,"/RSTOR/tribal_climate/data_products/tribal_dispossession_county.csv") us_co <- USAboundaries::us_counties(resolution = "low") # Append geography and export as geopackage final_ds_geo <- inner_join(select(us_co,GEOID=geoid),final_ds,by="GEOID") write_sf(final_ds_geo,"01_data/clean/tribal_dispossession_county.gpkg") # sums <- all %>% # dplyr::select(-contains(c("q25","q75","sd","min","median","max","GEOID","tribe"))) %>% # summarise_all(list( # N = ~sum(!is.na(.)), # Min = ~min(., na.rm = T), # Mean = ~mean(., na.rm = T), # Max = ~max(., na.rm = T))) %>% # pivot_longer(everything()) %>% # mutate(Stat = str_remove(str_extract(name,"_N|_Min|_Mean|_Max"),"_"), # Variable = str_remove(str_remove(name,"_N|_Min|_Mean|_Max"),"_"), # value = round(value,3)) %>% # pivot_wider(-name, # names_from = Stat)
/02_build/04_combine_county.R
no_license
galsk223/tribalclimate
R
false
false
3,876
r
rm(list = ls()) tribedf <- read_rds("01_data/cache/tribe_county_shapefiles.rds") tribecounties <- tribedf %>% dplyr::select(GEOID) %>% unique() empty <- tribecounties %>% mutate(area = as.numeric(st_area(geometry))) %>% st_set_geometry(NULL) %>% filter(area == 0) tribeuse <- tribecounties %>% filter(!GEOID %in% empty$GEOID) heat <- read_rds("01_data/clean/a_heat_gridmet_county.rds") drought <- read_rds("01_data/clean/b_drought_county.rds") %>% rename(drought_mean = `1980-2020 Mean`) precip <- read_rds("01_data/clean/c_precip_county.rds") whp <- read_rds("01_data/clean/d_whp_county.rds") elrug <- read_rds("01_data/clean/e_ElevationAndRuggedness_County.rds") wells_oil <- read_rds("01_data/clean/f_Oil_wells_county.rds") %>% dplyr::select(GEOID,AllArea_OilPortion) %>% mutate(AllArea_OilPortion = as.numeric(AllArea_OilPortion)) %>% unique() %>% left_join(tribeuse,.,by="GEOID") %>% st_set_geometry(NULL) %>% replace(is.na(.), 0) wells_gas <- read_rds("01_data/clean/f_Gas_wells_county.rds") %>% dplyr::select(GEOID,AllArea_GasPortion) %>% mutate(AllArea_GasPortion = as.numeric(AllArea_GasPortion)) %>% unique() %>% left_join(tribeuse,.,by="GEOID") %>% st_set_geometry(NULL) %>% replace(is.na(.), 0) OGbasins <- read_rds("01_data/clean/g_OilGas_basins_county.rds") %>% st_set_geometry(NULL) PAD <- read_rds("01_data/clean/h_federalland_county.rds") soc <- map_dfr(list.files("01_data/cache/soc_county2", full.names = T), function(fl){ t <- read_rds(fl) }) %>% group_by(GEOID) %>% summarise(SOC_mean = mean(Interpolated_15)) %>% ungroup() all <- tribedf %>% st_set_geometry(NULL) %>% left_join(.,heat,by="GEOID") %>% left_join(.,drought,by="GEOID") %>% left_join(.,precip,by="GEOID") %>% left_join(.,whp,by="GEOID") %>% left_join(.,elrug,by="GEOID") %>% left_join(.,wells_oil,by="GEOID") %>% left_join(.,wells_gas,by="GEOID") %>% left_join(.,OGbasins,by="GEOID") %>% left_join(.,PAD,by="GEOID") %>% left_join(.,soc,by="GEOID") # Rename fields, drop units, replace NAs when appropriate final_ds <- all %>% select(tribe, GEOID, heatdays=heatdays_mean, drought=drought_mean, precip, whp=whp_mean, oil_portion=AllArea_OilPortion, gas_portion=AllArea_GasPortion, og_basin_portion=BasinPortion, federal_lands_portion=PADPortion, soc=SOC_mean, elevation=elevation_mean, tri=tri_mean) %>% inner_join(tigris::fips_codes %>% mutate(GEOID=str_c(state_code,county_code), county=str_remove(county,"County")) %>% select(GEOID,state,county),., by="GEOID") write_csv(final_ds,"01_data/clean/tribal_dispossession_county.csv") write_csv(final_ds,"/RSTOR/tribal_climate/data_products/tribal_dispossession_county.csv") us_co <- USAboundaries::us_counties(resolution = "low") # Append geography and export as geopackage final_ds_geo <- inner_join(select(us_co,GEOID=geoid),final_ds,by="GEOID") write_sf(final_ds_geo,"01_data/clean/tribal_dispossession_county.gpkg") # sums <- all %>% # dplyr::select(-contains(c("q25","q75","sd","min","median","max","GEOID","tribe"))) %>% # summarise_all(list( # N = ~sum(!is.na(.)), # Min = ~min(., na.rm = T), # Mean = ~mean(., na.rm = T), # Max = ~max(., na.rm = T))) %>% # pivot_longer(everything()) %>% # mutate(Stat = str_remove(str_extract(name,"_N|_Min|_Mean|_Max"),"_"), # Variable = str_remove(str_remove(name,"_N|_Min|_Mean|_Max"),"_"), # value = round(value,3)) %>% # pivot_wider(-name, # names_from = Stat)
se <- function(actual, predicted){ #min_length = min(length(actual), length(predicted)) #return((actual[1:min_length] - predicted[1:min_length]) ^ 2) return((actual - predicted) ^ 2) } mse <- function(actual, predicted){ return(mean(x = se(actual, predicted), na.rm = TRUE)) } rmse <- function(actual, predicted){ return(sqrt(mse(actual, predicted))) } myrmse <- function(model.features, target.features){ return(rmse(target.features, model.features)) } #' Calculates model RMSE #' #' @param modelOutput results from the model #' @param start_date date The date which the first model time-point is being compared to. #' @param target_data Data frame The observed data that the model is targeted to match. #' @param weights Vector Vector of length 4 containing weights indicating the relative weighting to give cases, deaths, allvaccinations and fully vaccinated #' For example c(1.0, 0.5, 0.25, 0.25) #' @param under_report_factor fraction Estimated fraction of deaths that get reported as covid-19 deaths. This is deprecated and no longer used. #' The estimated_deaths in the variable "cum_est_deaths" allows for adjusting the death reporting rates. #' #' @export #' modelrmse <- function(modelOutput, start_date, target_data, weights = c(1.0, 0.5, 0.25, 0.25), under_report_factor = 1) { #TODO this function needs a rewrite badly # keep the passed in start_date as reference to time = 0 equivalent date_zero <- start_date # Calculate the date-range where the predicted and target data intersects. # Hacky but, "date" used for target data in date format # "time" also used to indicate date but in "number of days" # from the start of the simulation format used in the model output end_date <- pmin( start_date + max(modelOutput$time), max(target_data$date) ) end_time <- as.integer(end_date - date_zero) start_date <- pmax( start_date, min(target_data$date) ) start_time <- as.integer(start_date - date_zero) if (start_time > end_time) {stop("Cannot calculate rmse, the predicted and actual values to not intersect in time.")} # Filter the observed data so only looking at the part that overlaps with with model prediction target_data <- target_data %>% dplyr::filter(date >= start_date & date <= end_date) modelOutput <- dplyr::filter(modelOutput, time >= start_time & time <= end_time) # target features # target_features <- c(target_data$smoothed_new_cases / tail(target_data$cum_cases, 1), # target_data$smoothed_daily_est_deaths / tail(target_data$cum_est_deaths, 1), # target_data$smoothed_daily_vaccinations / tail(target_data$cum_vaccinations, 1), # target_data$smoothed_daily_fully_vaccinated / tail(target_data$fully_vaccinated, 1)) # # target_features[is.na(target_features)] <- 0 target_cases <- c(target_data$smoothed_new_cases / tail(target_data$cum_cases, 1)) %>% tidyr::replace_na(0) target_deaths <- c(target_data$smoothed_daily_est_deaths / tail(target_data$cum_est_deaths, 1)) %>% tidyr::replace_na(0) target_all_vaccinations <- c(target_data$smoothed_daily_vaccinations / tail(target_data$cum_vaccinations, 1)) %>% tidyr::replace_na(0) target_full_vaccinations <- c(target_data$smoothed_daily_fully_vaccinated / tail(target_data$fully_vaccinated, 1)) %>% tidyr::replace_na(0) model.df_current <- modelOutput %>% dplyr::mutate(AllDeaths = D_s1 + D_h1 + D_c1 + D_s2 + D_h2 + D_c2 + D_s3 + D_h3 + D_c3, ConfirmedCases = ConfirmedCases1 + ConfirmedCases2 + ConfirmedCases3, Dose1Vaccinated = Vaccination_dose1_flow1 + Vaccination_dose1_flow2 + Vaccination_dose1_flow3, FullyVaccinated = Vaccination_fully_flow1 + Vaccination_fully_flow2 + Vaccination_fully_flow3, AllVaccinations = Dose1Vaccinated + FullyVaccinated, NewDeaths = AllDeaths - dplyr::lag(AllDeaths), NewCases = ConfirmedCases - dplyr::lag(ConfirmedCases), NewVaccinations = AllVaccinations - dplyr::lag(AllVaccinations), NewDose1Vaccinated = Dose1Vaccinated - dplyr::lag(Dose1Vaccinated), NewFullyVaccinated = FullyVaccinated - dplyr::lag(FullyVaccinated), date = date_zero + time) %>% dplyr::filter(date >= start_date & date <= max(target_data$date)) %>% dplyr::mutate(ConfirmedCasesRescaled = NewCases / tail(target_data$cum_cases, 1), AllDeathsRescaled = NewDeaths / tail(target_data$cum_est_deaths, 1), AllVaccinationsRescaled = NewVaccinations / tail(target_data$cum_vaccinations, 1), FullyVaccinatedRescaled = NewFullyVaccinated / tail(target_data$fully_vaccinated, 1)) if(under_report_factor != 1) { model.df_current <- model.df_current %>% mutate(AllDeathsRescaled = AllDeathsRescaled * under_report_factor) } if("experiment" %in% colnames(model.df_current)) { model.df_current <- model.df_current %>% dplyr::arrange(experiment, time) %>% dplyr::group_by(experiment) %>% dplyr::select(experiment, time, ConfirmedCasesRescaled, AllDeathsRescaled, AllVaccinationsRescaled, FullyVaccinatedRescaled) %>% dplyr::ungroup() } else { model.df_current <- model.df_current %>% dplyr::arrange(time) %>% dplyr::select(time, ConfirmedCasesRescaled, AllDeathsRescaled, AllVaccinationsRescaled, FullyVaccinatedRescaled) } # model_current_wider.df <- model.df_current %>% # tidyr::pivot_wider(names_from = "time", # values_from = c("ConfirmedCasesRescaled", "AllDeathsRescaled", "AllVaccinationsRescaled", "FullyVaccinatedRescaled")) if("experiment" %in% colnames(model.df_current)) { model_cases_current_wider.df <- model.df_current %>% dplyr::select(experiment, time, ConfirmedCasesRescaled) %>% tidyr::pivot_wider(names_from = "time", values_from = c("ConfirmedCasesRescaled")) model_deaths_current_wider.df <- model.df_current %>% dplyr::select(experiment, time, AllDeathsRescaled) %>% tidyr::pivot_wider(names_from = "time", values_from = c("AllDeathsRescaled")) model_all_vaccinations_current_wider.df <- model.df_current %>% dplyr::select(experiment, time, AllVaccinationsRescaled) %>% tidyr::pivot_wider(names_from = "time", values_from = c("AllVaccinationsRescaled")) model_fully_vaccinated_current_wider.df <- model.df_current %>% dplyr::select(experiment, time, FullyVaccinatedRescaled) %>% tidyr::pivot_wider(names_from = "time", values_from = c("FullyVaccinatedRescaled")) } else { model_cases_current_wider.df <- model.df_current %>% dplyr::select(time, ConfirmedCasesRescaled) %>% tidyr::pivot_wider(names_from = "time", values_from = c("ConfirmedCasesRescaled")) model_deaths_current_wider.df <- model.df_current %>% dplyr::select(time, AllDeathsRescaled) %>% tidyr::pivot_wider(names_from = "time", values_from = c("AllDeathsRescaled")) model_all_vaccinations_current_wider.df <- model.df_current %>% dplyr::select(time, AllVaccinationsRescaled) %>% tidyr::pivot_wider(names_from = "time", values_from = c("AllVaccinationsRescaled")) model_fully_vaccinated_current_wider.df <- model.df_current %>% dplyr::select(time, FullyVaccinatedRescaled) %>% tidyr::pivot_wider(names_from = "time", values_from = c("FullyVaccinatedRescaled")) } # weight vectors # weight_vector <- c(rep(weights[1], (length(target_features) / 4)), # rep(weights[2], (length(target_features) / 4)), # rep(weights[3], (length(target_features) / 4)), # rep(weights[4], (length(target_features) / 4))) cases_weights <- weights[1] deaths_weights <- weights[2] all_vaccinations_weights <- weights[3] full_vaccinations_weights <- weights[4] if("experiment" %in% colnames(model.df_current)) { case_rmse <- apply(dplyr::select(model_cases_current_wider.df, -experiment), MARGIN = 1, FUN = myrmse, target_cases) deaths_rmse <- apply(dplyr::select(model_deaths_current_wider.df, -experiment), MARGIN = 1, FUN = myrmse, target_deaths) all_vaccinations_rmse <- apply(dplyr::select(model_all_vaccinations_current_wider.df, -experiment), MARGIN = 1, FUN = myrmse, target_all_vaccinations) full_vaccinations_rmse <- apply(dplyr::select(model_fully_vaccinated_current_wider.df, -experiment), MARGIN = 1, FUN = myrmse, target_full_vaccinations) total_rmse <- sum(c(case_rmse*cases_weights, deaths_rmse*deaths_weights, all_vaccinations_rmse*all_vaccinations_weights, full_vaccinations_rmse*full_vaccinations_weights), na.rm = T) model_rmse <- total_rmse / sum(weights != 0) } else { case_rmse <- apply(model_cases_current_wider.df, MARGIN = 1, FUN = myrmse, target_cases) deaths_rmse <- apply(model_deaths_current_wider.df, MARGIN = 1, FUN = myrmse, target_deaths) all_vaccinations_rmse <- apply(model_all_vaccinations_current_wider.df, MARGIN = 1, FUN = myrmse, target_all_vaccinations) full_vaccinations_rmse <- apply(model_fully_vaccinated_current_wider.df, MARGIN = 1, FUN = myrmse, target_full_vaccinations) total_rmse <- sum(c(case_rmse*cases_weights, deaths_rmse*deaths_weights, all_vaccinations_rmse*all_vaccinations_weights, full_vaccinations_rmse*full_vaccinations_weights), na.rm = T) model_rmse <- total_rmse / sum(weights != 0) } return(model_rmse) }
/R/rmse_new.R
no_license
wimmyteam/conisi
R
false
false
10,220
r
se <- function(actual, predicted){ #min_length = min(length(actual), length(predicted)) #return((actual[1:min_length] - predicted[1:min_length]) ^ 2) return((actual - predicted) ^ 2) } mse <- function(actual, predicted){ return(mean(x = se(actual, predicted), na.rm = TRUE)) } rmse <- function(actual, predicted){ return(sqrt(mse(actual, predicted))) } myrmse <- function(model.features, target.features){ return(rmse(target.features, model.features)) } #' Calculates model RMSE #' #' @param modelOutput results from the model #' @param start_date date The date which the first model time-point is being compared to. #' @param target_data Data frame The observed data that the model is targeted to match. #' @param weights Vector Vector of length 4 containing weights indicating the relative weighting to give cases, deaths, allvaccinations and fully vaccinated #' For example c(1.0, 0.5, 0.25, 0.25) #' @param under_report_factor fraction Estimated fraction of deaths that get reported as covid-19 deaths. This is deprecated and no longer used. #' The estimated_deaths in the variable "cum_est_deaths" allows for adjusting the death reporting rates. #' #' @export #' modelrmse <- function(modelOutput, start_date, target_data, weights = c(1.0, 0.5, 0.25, 0.25), under_report_factor = 1) { #TODO this function needs a rewrite badly # keep the passed in start_date as reference to time = 0 equivalent date_zero <- start_date # Calculate the date-range where the predicted and target data intersects. # Hacky but, "date" used for target data in date format # "time" also used to indicate date but in "number of days" # from the start of the simulation format used in the model output end_date <- pmin( start_date + max(modelOutput$time), max(target_data$date) ) end_time <- as.integer(end_date - date_zero) start_date <- pmax( start_date, min(target_data$date) ) start_time <- as.integer(start_date - date_zero) if (start_time > end_time) {stop("Cannot calculate rmse, the predicted and actual values to not intersect in time.")} # Filter the observed data so only looking at the part that overlaps with with model prediction target_data <- target_data %>% dplyr::filter(date >= start_date & date <= end_date) modelOutput <- dplyr::filter(modelOutput, time >= start_time & time <= end_time) # target features # target_features <- c(target_data$smoothed_new_cases / tail(target_data$cum_cases, 1), # target_data$smoothed_daily_est_deaths / tail(target_data$cum_est_deaths, 1), # target_data$smoothed_daily_vaccinations / tail(target_data$cum_vaccinations, 1), # target_data$smoothed_daily_fully_vaccinated / tail(target_data$fully_vaccinated, 1)) # # target_features[is.na(target_features)] <- 0 target_cases <- c(target_data$smoothed_new_cases / tail(target_data$cum_cases, 1)) %>% tidyr::replace_na(0) target_deaths <- c(target_data$smoothed_daily_est_deaths / tail(target_data$cum_est_deaths, 1)) %>% tidyr::replace_na(0) target_all_vaccinations <- c(target_data$smoothed_daily_vaccinations / tail(target_data$cum_vaccinations, 1)) %>% tidyr::replace_na(0) target_full_vaccinations <- c(target_data$smoothed_daily_fully_vaccinated / tail(target_data$fully_vaccinated, 1)) %>% tidyr::replace_na(0) model.df_current <- modelOutput %>% dplyr::mutate(AllDeaths = D_s1 + D_h1 + D_c1 + D_s2 + D_h2 + D_c2 + D_s3 + D_h3 + D_c3, ConfirmedCases = ConfirmedCases1 + ConfirmedCases2 + ConfirmedCases3, Dose1Vaccinated = Vaccination_dose1_flow1 + Vaccination_dose1_flow2 + Vaccination_dose1_flow3, FullyVaccinated = Vaccination_fully_flow1 + Vaccination_fully_flow2 + Vaccination_fully_flow3, AllVaccinations = Dose1Vaccinated + FullyVaccinated, NewDeaths = AllDeaths - dplyr::lag(AllDeaths), NewCases = ConfirmedCases - dplyr::lag(ConfirmedCases), NewVaccinations = AllVaccinations - dplyr::lag(AllVaccinations), NewDose1Vaccinated = Dose1Vaccinated - dplyr::lag(Dose1Vaccinated), NewFullyVaccinated = FullyVaccinated - dplyr::lag(FullyVaccinated), date = date_zero + time) %>% dplyr::filter(date >= start_date & date <= max(target_data$date)) %>% dplyr::mutate(ConfirmedCasesRescaled = NewCases / tail(target_data$cum_cases, 1), AllDeathsRescaled = NewDeaths / tail(target_data$cum_est_deaths, 1), AllVaccinationsRescaled = NewVaccinations / tail(target_data$cum_vaccinations, 1), FullyVaccinatedRescaled = NewFullyVaccinated / tail(target_data$fully_vaccinated, 1)) if(under_report_factor != 1) { model.df_current <- model.df_current %>% mutate(AllDeathsRescaled = AllDeathsRescaled * under_report_factor) } if("experiment" %in% colnames(model.df_current)) { model.df_current <- model.df_current %>% dplyr::arrange(experiment, time) %>% dplyr::group_by(experiment) %>% dplyr::select(experiment, time, ConfirmedCasesRescaled, AllDeathsRescaled, AllVaccinationsRescaled, FullyVaccinatedRescaled) %>% dplyr::ungroup() } else { model.df_current <- model.df_current %>% dplyr::arrange(time) %>% dplyr::select(time, ConfirmedCasesRescaled, AllDeathsRescaled, AllVaccinationsRescaled, FullyVaccinatedRescaled) } # model_current_wider.df <- model.df_current %>% # tidyr::pivot_wider(names_from = "time", # values_from = c("ConfirmedCasesRescaled", "AllDeathsRescaled", "AllVaccinationsRescaled", "FullyVaccinatedRescaled")) if("experiment" %in% colnames(model.df_current)) { model_cases_current_wider.df <- model.df_current %>% dplyr::select(experiment, time, ConfirmedCasesRescaled) %>% tidyr::pivot_wider(names_from = "time", values_from = c("ConfirmedCasesRescaled")) model_deaths_current_wider.df <- model.df_current %>% dplyr::select(experiment, time, AllDeathsRescaled) %>% tidyr::pivot_wider(names_from = "time", values_from = c("AllDeathsRescaled")) model_all_vaccinations_current_wider.df <- model.df_current %>% dplyr::select(experiment, time, AllVaccinationsRescaled) %>% tidyr::pivot_wider(names_from = "time", values_from = c("AllVaccinationsRescaled")) model_fully_vaccinated_current_wider.df <- model.df_current %>% dplyr::select(experiment, time, FullyVaccinatedRescaled) %>% tidyr::pivot_wider(names_from = "time", values_from = c("FullyVaccinatedRescaled")) } else { model_cases_current_wider.df <- model.df_current %>% dplyr::select(time, ConfirmedCasesRescaled) %>% tidyr::pivot_wider(names_from = "time", values_from = c("ConfirmedCasesRescaled")) model_deaths_current_wider.df <- model.df_current %>% dplyr::select(time, AllDeathsRescaled) %>% tidyr::pivot_wider(names_from = "time", values_from = c("AllDeathsRescaled")) model_all_vaccinations_current_wider.df <- model.df_current %>% dplyr::select(time, AllVaccinationsRescaled) %>% tidyr::pivot_wider(names_from = "time", values_from = c("AllVaccinationsRescaled")) model_fully_vaccinated_current_wider.df <- model.df_current %>% dplyr::select(time, FullyVaccinatedRescaled) %>% tidyr::pivot_wider(names_from = "time", values_from = c("FullyVaccinatedRescaled")) } # weight vectors # weight_vector <- c(rep(weights[1], (length(target_features) / 4)), # rep(weights[2], (length(target_features) / 4)), # rep(weights[3], (length(target_features) / 4)), # rep(weights[4], (length(target_features) / 4))) cases_weights <- weights[1] deaths_weights <- weights[2] all_vaccinations_weights <- weights[3] full_vaccinations_weights <- weights[4] if("experiment" %in% colnames(model.df_current)) { case_rmse <- apply(dplyr::select(model_cases_current_wider.df, -experiment), MARGIN = 1, FUN = myrmse, target_cases) deaths_rmse <- apply(dplyr::select(model_deaths_current_wider.df, -experiment), MARGIN = 1, FUN = myrmse, target_deaths) all_vaccinations_rmse <- apply(dplyr::select(model_all_vaccinations_current_wider.df, -experiment), MARGIN = 1, FUN = myrmse, target_all_vaccinations) full_vaccinations_rmse <- apply(dplyr::select(model_fully_vaccinated_current_wider.df, -experiment), MARGIN = 1, FUN = myrmse, target_full_vaccinations) total_rmse <- sum(c(case_rmse*cases_weights, deaths_rmse*deaths_weights, all_vaccinations_rmse*all_vaccinations_weights, full_vaccinations_rmse*full_vaccinations_weights), na.rm = T) model_rmse <- total_rmse / sum(weights != 0) } else { case_rmse <- apply(model_cases_current_wider.df, MARGIN = 1, FUN = myrmse, target_cases) deaths_rmse <- apply(model_deaths_current_wider.df, MARGIN = 1, FUN = myrmse, target_deaths) all_vaccinations_rmse <- apply(model_all_vaccinations_current_wider.df, MARGIN = 1, FUN = myrmse, target_all_vaccinations) full_vaccinations_rmse <- apply(model_fully_vaccinated_current_wider.df, MARGIN = 1, FUN = myrmse, target_full_vaccinations) total_rmse <- sum(c(case_rmse*cases_weights, deaths_rmse*deaths_weights, all_vaccinations_rmse*all_vaccinations_weights, full_vaccinations_rmse*full_vaccinations_weights), na.rm = T) model_rmse <- total_rmse / sum(weights != 0) } return(model_rmse) }
% Generated by roxygen2 (4.0.1): do not edit by hand \name{MartinRatio} \alias{MartinRatio} \title{Martin ratio of the return distribution} \usage{ MartinRatio(R, Rf = 0, ...) } \arguments{ \item{R}{an xts, vector, matrix, data frame, timeSeries or zoo object of asset returns} \item{Rf}{risk free rate, in same period as your returns} \item{\dots}{any other passthru parameters} } \description{ To calculate Martin ratio we divide the difference of the portfolio return and the risk free rate by the Ulcer index } \details{ \deqn{Martin ratio = \frac{r_P - r_F}{\sqrt{\sum^{n}_{i=1} \frac{{D'_i}^2}{n}}}}{Martin ratio = (rp - rf) / Ulcer index} where \eqn{r_P} is the annualized portfolio return, \eqn{r_F} is the risk free rate, \eqn{n} is the number of observations of the entire series, \eqn{D'_i} is the drawdown since previous peak in period i } \examples{ data(portfolio_bacon) print(MartinRatio(portfolio_bacon[,1])) #expected 1.70 data(managers) print(MartinRatio(managers['1996'])) print(MartinRatio(managers['1996',1])) } \author{ Matthieu Lestel } \references{ Carl Bacon, \emph{Practical portfolio performance measurement and attribution}, second edition 2008 p.91 } \keyword{distribution} \keyword{models} \keyword{multivariate} \keyword{ts}
/man/MartinRatio.Rd
no_license
ecjbosu/PerformanceAnalytics
R
false
false
1,307
rd
% Generated by roxygen2 (4.0.1): do not edit by hand \name{MartinRatio} \alias{MartinRatio} \title{Martin ratio of the return distribution} \usage{ MartinRatio(R, Rf = 0, ...) } \arguments{ \item{R}{an xts, vector, matrix, data frame, timeSeries or zoo object of asset returns} \item{Rf}{risk free rate, in same period as your returns} \item{\dots}{any other passthru parameters} } \description{ To calculate Martin ratio we divide the difference of the portfolio return and the risk free rate by the Ulcer index } \details{ \deqn{Martin ratio = \frac{r_P - r_F}{\sqrt{\sum^{n}_{i=1} \frac{{D'_i}^2}{n}}}}{Martin ratio = (rp - rf) / Ulcer index} where \eqn{r_P} is the annualized portfolio return, \eqn{r_F} is the risk free rate, \eqn{n} is the number of observations of the entire series, \eqn{D'_i} is the drawdown since previous peak in period i } \examples{ data(portfolio_bacon) print(MartinRatio(portfolio_bacon[,1])) #expected 1.70 data(managers) print(MartinRatio(managers['1996'])) print(MartinRatio(managers['1996',1])) } \author{ Matthieu Lestel } \references{ Carl Bacon, \emph{Practical portfolio performance measurement and attribution}, second edition 2008 p.91 } \keyword{distribution} \keyword{models} \keyword{multivariate} \keyword{ts}
"plotAutoC" <- function(node, plot = TRUE, colour = c("red", "blue", "green", "yellow", "black"), lwd = 5, main = NULL, ...) # Plot auto correlation function for single component of OpenBUGS name { sM <- samplesMonitors(node) if(length(sM) > 1 || sM != node) stop("node must be a scalar variable from the model, for arrays use samplesAutoC") nodeName <- sQuote(node) sample <- samplesSample(node) chain <- samplesGetFirstChain() if (sd(sample) > 1.0E-10) acfresult <- acf(sample, col = colour[chain], main = if(is.null(main)) nodeName else main, lwd = lwd, demean = TRUE, plot = plot, ...) else stop("ACF cannot be computed/plotted: standard deviation <= 1.0E-10") acfresult$series <- node if(plot) invisible(acfresult) else return(acfresult) }
/R/plot.autoC.R
no_license
cran/BRugs
R
false
false
840
r
"plotAutoC" <- function(node, plot = TRUE, colour = c("red", "blue", "green", "yellow", "black"), lwd = 5, main = NULL, ...) # Plot auto correlation function for single component of OpenBUGS name { sM <- samplesMonitors(node) if(length(sM) > 1 || sM != node) stop("node must be a scalar variable from the model, for arrays use samplesAutoC") nodeName <- sQuote(node) sample <- samplesSample(node) chain <- samplesGetFirstChain() if (sd(sample) > 1.0E-10) acfresult <- acf(sample, col = colour[chain], main = if(is.null(main)) nodeName else main, lwd = lwd, demean = TRUE, plot = plot, ...) else stop("ACF cannot be computed/plotted: standard deviation <= 1.0E-10") acfresult$series <- node if(plot) invisible(acfresult) else return(acfresult) }
input = readLines("Day2/Input2_1.txt") input_by_row = strsplit(input, "\t") ###################### PART 1 ###################### sum(sapply(input_by_row, function(x) { max(as.numeric(x)) - min(as.numeric(x)) })) ###################### PART 2 ###################### sum(sapply(input_by_row, function(x) { tmp = expand.grid(as.numeric(x), as.numeric(x)) tmp = tmp[tmp[,1] %% tmp[,2] == 0 & tmp[,1] != tmp[,2], ] tmp[1,1] / tmp[1,2] }))
/Day2/Day2.R
no_license
RossiLorenzo/advent_code_2017
R
false
false
444
r
input = readLines("Day2/Input2_1.txt") input_by_row = strsplit(input, "\t") ###################### PART 1 ###################### sum(sapply(input_by_row, function(x) { max(as.numeric(x)) - min(as.numeric(x)) })) ###################### PART 2 ###################### sum(sapply(input_by_row, function(x) { tmp = expand.grid(as.numeric(x), as.numeric(x)) tmp = tmp[tmp[,1] %% tmp[,2] == 0 & tmp[,1] != tmp[,2], ] tmp[1,1] / tmp[1,2] }))
#------------------------------------------------------------------------- #LAB 2 #1.1 Conditional statements #1.1.1 sheldon_game(player1, player2) #------------------------------------------------------------------------- name <-"Farhana Chowdhury Tondra" liuid <- "farch587" #------------------code starts from here---------------------------------- sheldon_game <- function( player1, player2){ alt <- c("rock", "lizard", "spock", "scissors", "paper") stopifnot ( player1 %in% alt , player2 %in% alt) alt1 <- which ( alt %in% player1 ) alt2 <- which ( alt %in% player2 ) if(alt1== alt2) { return ("Draw!")} else { if( any (( alt1 + c (1 ,3)) %% 5 == alt2 )) { return ("Player 1 wins!") } else { return ("Player2 wins!") }} } sheldon_game("lizard", "spock") sheldon_game("rock", "rock")
/RCourseLab2/R/sheldon_game.R
no_license
tondralok/RCourseLab2
R
false
false
833
r
#------------------------------------------------------------------------- #LAB 2 #1.1 Conditional statements #1.1.1 sheldon_game(player1, player2) #------------------------------------------------------------------------- name <-"Farhana Chowdhury Tondra" liuid <- "farch587" #------------------code starts from here---------------------------------- sheldon_game <- function( player1, player2){ alt <- c("rock", "lizard", "spock", "scissors", "paper") stopifnot ( player1 %in% alt , player2 %in% alt) alt1 <- which ( alt %in% player1 ) alt2 <- which ( alt %in% player2 ) if(alt1== alt2) { return ("Draw!")} else { if( any (( alt1 + c (1 ,3)) %% 5 == alt2 )) { return ("Player 1 wins!") } else { return ("Player2 wins!") }} } sheldon_game("lizard", "spock") sheldon_game("rock", "rock")
#' Read Images into a Matrix #' #' Read images into rows of a matrix, given a mask - much faster for large #' datasets as it is based on C++ implementations. #' #' #' @param imageList A character vector containing a list of image files to #' read, in order. #' @param mask An \code{antsImage} containing a binary mask, voxels in the mask #' are placed in the matrix. #' @return A matrix containing the masked data, the result of calling #' \code{as.numeric(image, mask)} on each input image. #' @author Cook PA, Avants BB (C++ version) #' @seealso \code{\link{matrixToImages}, \link{getMask}} #' @examples #' #' #' # make some simulated images and convert them to a matrix #' #' n <- 2 #' tdir<-tempdir() #' for ( i in 1:n ) { #' simimg<-as.antsImage( replicate(64, rnorm(64) ) ) #' antsImageWrite( simimg, tempfile(fileext='.mha')) #' } #' imageList = list.files(tdir, pattern = ".mha", full.names = TRUE) #' mask = getMask( antsImageRead( imageList[1] , 2 ) ) #' mat = imagesToMatrix(imageList, mask) #' print(dim(mat)) #' #' #' @export imagesToMatrix imagesToMatrix <- function(imageList, mask) { n <- length(imageList) if (n < 1) { print(" length of input list must be >= 1 ") return(NA) } if (class(imageList) != "character") { print("Must pass a list of filenames") return(NA) } return(.Call("imagesToMatrix", imageList, mask, n)) }
/R/imagesToMatrix.R
no_license
bkandel/ANTsRCheck
R
false
false
1,391
r
#' Read Images into a Matrix #' #' Read images into rows of a matrix, given a mask - much faster for large #' datasets as it is based on C++ implementations. #' #' #' @param imageList A character vector containing a list of image files to #' read, in order. #' @param mask An \code{antsImage} containing a binary mask, voxels in the mask #' are placed in the matrix. #' @return A matrix containing the masked data, the result of calling #' \code{as.numeric(image, mask)} on each input image. #' @author Cook PA, Avants BB (C++ version) #' @seealso \code{\link{matrixToImages}, \link{getMask}} #' @examples #' #' #' # make some simulated images and convert them to a matrix #' #' n <- 2 #' tdir<-tempdir() #' for ( i in 1:n ) { #' simimg<-as.antsImage( replicate(64, rnorm(64) ) ) #' antsImageWrite( simimg, tempfile(fileext='.mha')) #' } #' imageList = list.files(tdir, pattern = ".mha", full.names = TRUE) #' mask = getMask( antsImageRead( imageList[1] , 2 ) ) #' mat = imagesToMatrix(imageList, mask) #' print(dim(mat)) #' #' #' @export imagesToMatrix imagesToMatrix <- function(imageList, mask) { n <- length(imageList) if (n < 1) { print(" length of input list must be >= 1 ") return(NA) } if (class(imageList) != "character") { print("Must pass a list of filenames") return(NA) } return(.Call("imagesToMatrix", imageList, mask, n)) }
#' Get Spotify audio analysis tidily #' #' Fetches the Spotify audio analysis for a track using list columns rather than #' lists of lists. #' #' See the #' \href{https://developer.spotify.com/documentation/web-api/reference/tracks/get-audio-analysis/}{Spotify #' developer documentation} for details about the information included in an #' audio analysis. #' #' @param track_uri A string with a Spotify track URI. #' @param ... Additional parameters passed to #' \code{\link[spotifyr]{get_track_audio_analysis}}. #' #' @importFrom magrittr %>% #' @export #' #' @examples #' get_tidy_audio_analysis("6IQILcYkN2S2eSu5IHoPEH") get_tidy_audio_analysis <- function(track_uri, ...) { spotifyr::get_track_audio_analysis(track_uri, ...) %>% tibble::enframe() %>% tidyr::pivot_wider() %>% dplyr::mutate( dplyr::across( c(meta, track, bars, beats, tatums, sections), purrr::map, tibble::as_tibble ), ) %>% tidyr::unnest(cols = c(meta, track)) %>% dplyr::select( analyzer_version, duration, dplyr::contains("fade"), dplyr::ends_with("confidence"), bars, beats, tatums, sections, segments ) %>% dplyr::mutate( segments = purrr::map( segments, . %>% tibble::as_tibble() %>% dplyr::mutate( pitches = purrr::map( pitches, purrr::set_names, c( "C", "C#|Db", "D", "D#|Eb", "E", "F", "F#|Gb", "G", "G#|Ab", "A", "A#|Bb", "B" ) ), timbre = purrr::map( timbre, purrr::set_names, c( "c01", "c02", "c03", "c04", "c05", "c06", "c07", "c08", "c09", "c10", "c11", "c12" ) ) ) ) ) } #' Add Spotify audio analysis to a data frame #' #' Fetches and joins the Spotify audio analysis for every Spotify URI in a data #' frame. #' #' @param df A data frame with a \code{track_uri} column. #' @param ... Parameters passed on to \code{\link{get_tidy_audio_analysis}}. #' #' @seealso \code{\link{get_tidy_audio_analysis}} #' #' @importFrom magrittr %>% #' @export #' #' @examples #' library(tidyverse) #' spotifyr::get_playlist_audio_features("", "37i9dQZF1DX21bRPJuEN7r") %>% #' slice(1:5) %>% #' add_audio_analysis() add_audio_analysis <- function(df, ...) { df %>% dplyr::mutate( track.uri = stringr::str_remove(track.uri, "spotify:track:"), analysis = purrr::map(track.uri, get_tidy_audio_analysis, ...) ) %>% tidyr::unnest(analysis) }
/R/audio_analysis.R
no_license
jaburgoyne/compmus
R
false
false
2,777
r
#' Get Spotify audio analysis tidily #' #' Fetches the Spotify audio analysis for a track using list columns rather than #' lists of lists. #' #' See the #' \href{https://developer.spotify.com/documentation/web-api/reference/tracks/get-audio-analysis/}{Spotify #' developer documentation} for details about the information included in an #' audio analysis. #' #' @param track_uri A string with a Spotify track URI. #' @param ... Additional parameters passed to #' \code{\link[spotifyr]{get_track_audio_analysis}}. #' #' @importFrom magrittr %>% #' @export #' #' @examples #' get_tidy_audio_analysis("6IQILcYkN2S2eSu5IHoPEH") get_tidy_audio_analysis <- function(track_uri, ...) { spotifyr::get_track_audio_analysis(track_uri, ...) %>% tibble::enframe() %>% tidyr::pivot_wider() %>% dplyr::mutate( dplyr::across( c(meta, track, bars, beats, tatums, sections), purrr::map, tibble::as_tibble ), ) %>% tidyr::unnest(cols = c(meta, track)) %>% dplyr::select( analyzer_version, duration, dplyr::contains("fade"), dplyr::ends_with("confidence"), bars, beats, tatums, sections, segments ) %>% dplyr::mutate( segments = purrr::map( segments, . %>% tibble::as_tibble() %>% dplyr::mutate( pitches = purrr::map( pitches, purrr::set_names, c( "C", "C#|Db", "D", "D#|Eb", "E", "F", "F#|Gb", "G", "G#|Ab", "A", "A#|Bb", "B" ) ), timbre = purrr::map( timbre, purrr::set_names, c( "c01", "c02", "c03", "c04", "c05", "c06", "c07", "c08", "c09", "c10", "c11", "c12" ) ) ) ) ) } #' Add Spotify audio analysis to a data frame #' #' Fetches and joins the Spotify audio analysis for every Spotify URI in a data #' frame. #' #' @param df A data frame with a \code{track_uri} column. #' @param ... Parameters passed on to \code{\link{get_tidy_audio_analysis}}. #' #' @seealso \code{\link{get_tidy_audio_analysis}} #' #' @importFrom magrittr %>% #' @export #' #' @examples #' library(tidyverse) #' spotifyr::get_playlist_audio_features("", "37i9dQZF1DX21bRPJuEN7r") %>% #' slice(1:5) %>% #' add_audio_analysis() add_audio_analysis <- function(df, ...) { df %>% dplyr::mutate( track.uri = stringr::str_remove(track.uri, "spotify:track:"), analysis = purrr::map(track.uri, get_tidy_audio_analysis, ...) ) %>% tidyr::unnest(analysis) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_sales.R \docType{data} \name{sales} \alias{sales} \title{Cross section sales analysis per individual} \format{a \code{data frame} with 20000 rows and 17 variables} \usage{ data(sales) } \description{ A dataset contain various sales metrics explaining customer behaviour } \keyword{datasets}
/man/sales.Rd
permissive
HanjoStudy/quotidieR
R
false
true
374
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data_sales.R \docType{data} \name{sales} \alias{sales} \title{Cross section sales analysis per individual} \format{a \code{data frame} with 20000 rows and 17 variables} \usage{ data(sales) } \description{ A dataset contain various sales metrics explaining customer behaviour } \keyword{datasets}
#' Create an appDir for shiny::runApp and rsconnect::deployApp #' #' Create a directory in tempdir() where the folder containing ui.r, server.r #' etc. from an installed package is copied and the data is saved. #' This directory, with its content, will be deployd to or ran by shiny. #' #' @param app_data The data to be saved in the directory, to be used by the app #' @param webpage_title The title of the app #' @param git_hash Current git sha1 hash #' @param github_repo Current github repository #' @param package Package name containing a shiny app in <app_folder> #' @param app_folder The folder name where the shiny app is located. #' #' @return The created directory #' @export #' create_appdir <- function(app_data = NULL, webpage_title = NULL, package = NULL, app_folder = "app", git_hash = NULL, github_repo = NULL) { # Name the directory tmpshinydir <- paste0(tempdir(), "/", "shiny") # Delete old content in directory unlink(tmpshinydir, recursive = TRUE, force = TRUE) # Create main directory dir.create(tmpshinydir) # Copy the installed version of the portal package to the directory if (is.null(package)) { dir.create(paste0(tmpshinydir, "/", app_folder)) } else { file.copy(system.file(app_folder, package = package), tmpshinydir, recursive = TRUE) } # Create data folder dir.create(paste0(tmpshinydir, "/", app_folder, "/", "data")) # Save the data to a .RData file save(app_data, webpage_title, git_hash, github_repo, file = paste0(tmpshinydir, "/", app_folder, "/", "data/data.RData")) # Return the name of the main directory return(paste0(tmpshinydir, "/", app_folder)) }
/R/create_appdir.r
permissive
SKDE-Felles/SKDEr
R
false
false
1,796
r
#' Create an appDir for shiny::runApp and rsconnect::deployApp #' #' Create a directory in tempdir() where the folder containing ui.r, server.r #' etc. from an installed package is copied and the data is saved. #' This directory, with its content, will be deployd to or ran by shiny. #' #' @param app_data The data to be saved in the directory, to be used by the app #' @param webpage_title The title of the app #' @param git_hash Current git sha1 hash #' @param github_repo Current github repository #' @param package Package name containing a shiny app in <app_folder> #' @param app_folder The folder name where the shiny app is located. #' #' @return The created directory #' @export #' create_appdir <- function(app_data = NULL, webpage_title = NULL, package = NULL, app_folder = "app", git_hash = NULL, github_repo = NULL) { # Name the directory tmpshinydir <- paste0(tempdir(), "/", "shiny") # Delete old content in directory unlink(tmpshinydir, recursive = TRUE, force = TRUE) # Create main directory dir.create(tmpshinydir) # Copy the installed version of the portal package to the directory if (is.null(package)) { dir.create(paste0(tmpshinydir, "/", app_folder)) } else { file.copy(system.file(app_folder, package = package), tmpshinydir, recursive = TRUE) } # Create data folder dir.create(paste0(tmpshinydir, "/", app_folder, "/", "data")) # Save the data to a .RData file save(app_data, webpage_title, git_hash, github_repo, file = paste0(tmpshinydir, "/", app_folder, "/", "data/data.RData")) # Return the name of the main directory return(paste0(tmpshinydir, "/", app_folder)) }
# This script contains the R code for GWA plots used in the manuscript: # # Authors: Aguillon SM, Walsh J, Lovette IJ # Year: 2021 # Title: Extensive hybridization reveals multiple coloration genes # underlying a complex plumage phenotype # Journal Info: Proceedings of the Royal Society B, 288, 20201805 # DOI: 10.1098/rspb.2020.1805 # # Please cite the paper if you use these scripts # ## load packages library(qqman) library(tidyverse) ## set directory where GEMMA and VCF output files are stored setwd("~/Desktop/reseq2020/GWA/") ## load file to rename long chromosome names chr_rename <- read.table("./chr_rename_gemma.txt", header=TRUE) ########### GWA OF TRAITS IN HYBRIDS ########### ################################################ ## the code in this section is an example of processing a GEMMA output file ## repeat the process for all traits and for the randomized analyses #### WING AND TAIL COLOR ##### ## load GEMMA output file pseudo_shaft <- read.table("./results_files/GWAS_HZ_lmm_shaft.assoc.txt", header=TRUE) ## clean dataset for plotting, calculate logP from output to_split <- as.character(pseudo_shaft$rs) split <- as.data.frame(do.call(rbind, strsplit(to_split, split=":", fixed=TRUE))) pseudo_shaft <- bind_cols(pseudo_shaft, split) %>% rename(CHR = V1) pseudo_shaft2 <- left_join(pseudo_shaft, chr_rename, by="CHR") pseudo_shaft2 <- select(pseudo_shaft2, CHR_NUM,chr,ps,allele1,allele0,af,beta,se,p_wald,p_lrt,p_score) %>% arrange(CHR_NUM,ps) %>% mutate(logP=-log10(p_wald)) ## output for later plotting if needed (processing takes a bit of time) #write.table(pseudo_shaft2, "./results_files_forfig/GWAS_lmm_shaft_forfig.txt", row.names=FALSE, quote=FALSE, sep="\t") ## plot figure colors <- c("#000000", "#7a7a7a", "#adadad") shaft_plot_clean <- filter(pseudo_shaft2, CHR_NUM<=32) manhattan(filter(shaft_plot_clean,logP>2.5), chr="CHR_NUM", bp="ps", p="logP", logp=FALSE, genomewideline=FALSE, suggestiveline=-log10(0.0000001), ylab="-log(P)", xlab="Chromosome", col=colors) ## output SNPs with logP>7 SNPs_shaft_P7 <- filter(pseudo_shaft2, logP>-log10(0.0000001)) write.table(SNPs_shaft_P7, "./SNP_list/pseudo_shaft_P7.txt", row.names=FALSE, quote=FALSE, sep="\t") ########### FST OF GWA SNPS ########### ####################################### ## load file to rename long chromosome names chr <- read.table("./chr_rename.txt", header=TRUE) colnames(chr) <- c("CHROM", "CHR_NUM") ## load file with per-SNP FST fst <- read.table("./RSFL-YSFL_allSNPs.weir.fst", header=TRUE) colnames(fst) <- c("CHROM", "ps", "FST") ## load file with list of significant SNPs snp <- read.table("./HZ_combined_SNPlist.txt", header=TRUE) ## merge files SNPs_merge <- merge(fst, chr, by="CHROM") GWA_FST_merge <- merge(snp, SNPs_merge, by=c("CHR_NUM", "ps")) ## write output table #write.table(GWA_FST_merge, "./FST_GWA_SNPs.txt", quote=FALSE, sep="\t", row.names=FALSE) ## plot figure hist_plot <- ggplot() + geom_histogram(data=GWA_FST_merge, aes(x=WEIR_AND_COCKERHAM_FST), binwidth=0.05) + labs(x="FST", y="Count") + theme_classic() + theme(legend.position="none", axis.line=element_line(color="black"), axis.title=element_text(face="bold",size=12), axis.text=element_text(size=10,color="black")) hist_plot
/Rcode/GWA-plots.R
no_license
stepfanie-aguillon/flicker-WGS-ProcB2021
R
false
false
3,271
r
# This script contains the R code for GWA plots used in the manuscript: # # Authors: Aguillon SM, Walsh J, Lovette IJ # Year: 2021 # Title: Extensive hybridization reveals multiple coloration genes # underlying a complex plumage phenotype # Journal Info: Proceedings of the Royal Society B, 288, 20201805 # DOI: 10.1098/rspb.2020.1805 # # Please cite the paper if you use these scripts # ## load packages library(qqman) library(tidyverse) ## set directory where GEMMA and VCF output files are stored setwd("~/Desktop/reseq2020/GWA/") ## load file to rename long chromosome names chr_rename <- read.table("./chr_rename_gemma.txt", header=TRUE) ########### GWA OF TRAITS IN HYBRIDS ########### ################################################ ## the code in this section is an example of processing a GEMMA output file ## repeat the process for all traits and for the randomized analyses #### WING AND TAIL COLOR ##### ## load GEMMA output file pseudo_shaft <- read.table("./results_files/GWAS_HZ_lmm_shaft.assoc.txt", header=TRUE) ## clean dataset for plotting, calculate logP from output to_split <- as.character(pseudo_shaft$rs) split <- as.data.frame(do.call(rbind, strsplit(to_split, split=":", fixed=TRUE))) pseudo_shaft <- bind_cols(pseudo_shaft, split) %>% rename(CHR = V1) pseudo_shaft2 <- left_join(pseudo_shaft, chr_rename, by="CHR") pseudo_shaft2 <- select(pseudo_shaft2, CHR_NUM,chr,ps,allele1,allele0,af,beta,se,p_wald,p_lrt,p_score) %>% arrange(CHR_NUM,ps) %>% mutate(logP=-log10(p_wald)) ## output for later plotting if needed (processing takes a bit of time) #write.table(pseudo_shaft2, "./results_files_forfig/GWAS_lmm_shaft_forfig.txt", row.names=FALSE, quote=FALSE, sep="\t") ## plot figure colors <- c("#000000", "#7a7a7a", "#adadad") shaft_plot_clean <- filter(pseudo_shaft2, CHR_NUM<=32) manhattan(filter(shaft_plot_clean,logP>2.5), chr="CHR_NUM", bp="ps", p="logP", logp=FALSE, genomewideline=FALSE, suggestiveline=-log10(0.0000001), ylab="-log(P)", xlab="Chromosome", col=colors) ## output SNPs with logP>7 SNPs_shaft_P7 <- filter(pseudo_shaft2, logP>-log10(0.0000001)) write.table(SNPs_shaft_P7, "./SNP_list/pseudo_shaft_P7.txt", row.names=FALSE, quote=FALSE, sep="\t") ########### FST OF GWA SNPS ########### ####################################### ## load file to rename long chromosome names chr <- read.table("./chr_rename.txt", header=TRUE) colnames(chr) <- c("CHROM", "CHR_NUM") ## load file with per-SNP FST fst <- read.table("./RSFL-YSFL_allSNPs.weir.fst", header=TRUE) colnames(fst) <- c("CHROM", "ps", "FST") ## load file with list of significant SNPs snp <- read.table("./HZ_combined_SNPlist.txt", header=TRUE) ## merge files SNPs_merge <- merge(fst, chr, by="CHROM") GWA_FST_merge <- merge(snp, SNPs_merge, by=c("CHR_NUM", "ps")) ## write output table #write.table(GWA_FST_merge, "./FST_GWA_SNPs.txt", quote=FALSE, sep="\t", row.names=FALSE) ## plot figure hist_plot <- ggplot() + geom_histogram(data=GWA_FST_merge, aes(x=WEIR_AND_COCKERHAM_FST), binwidth=0.05) + labs(x="FST", y="Count") + theme_classic() + theme(legend.position="none", axis.line=element_line(color="black"), axis.title=element_text(face="bold",size=12), axis.text=element_text(size=10,color="black")) hist_plot
library(maps) library(mapdata) library(maptools) library(spatstat) library(spatstat.utils) library(rgeos) library(sp) library(leaflet) # Load data df <- read.csv("MetroBarcelona.csv") mapAreas <- c("Spain") counties <- map("worldHires", "Spain", exact = TRUE, fill=TRUE, plot=FALSE) countries <- gUnaryUnion(map2SpatialPolygons(counties, IDs = counties$names, proj4string = CRS("+proj=longlat +datum=WGS84"))) # Transform the map to UTM coordinates countries <- spTransform(countries, CRS("+proj=utm +zone=31 ellps=WGS84")) W <- as(countries, "owin") # Transform coordinates from long and lat to UTM coordinates lonLat <- data.frame(ID = df$Id, X = df$Longitude, Y = df$Latitude) coordinates(lonLat) <- c("X", "Y") proj4string(lonLat) <- CRS("+proj=longlat +datum=WGS84") utmCoord <- spTransform(lonLat, CRS("+proj=utm +zone=31 ellps=WGS84")) X <- ppp(x = utmCoord$X, y = utmCoord$Y, window = W) # Dirichlet tesselation y <- dirichlet(X) names(y$tiles) <- df$Name # Convert large tess to spatial polygons owin2Polygons <- function(x, id="1") { stopifnot(is.owin(x)) x <- as.polygonal(x) closering <- function(df) { df[c(seq(nrow(df)), 1),] } pieces <- lapply(x$bdry, function(p) { Polygon(coords = closering(cbind(p$x, p$y)), hole = is.hole.xypolygon(p)) }) z <- Polygons(pieces, id) return(z) } tess2SP <- function(x) { stopifnot(is.tess(x)) y <- tiles(x) nom <- names(y) z <- list() for(i in seq(y)) z[[i]] <- owin2Polygons(y[[i]], nom[i]) return(SpatialPolygons(z)) } tessSP <- tess2SP(y) # Transform polygons from UTM back to long and lat proj4string(tessSP) <- CRS("+proj=utm +zone=31 ellps=WGS84") tessSP <- spTransform(tessSP, CRS("+proj=longlat +datum=WGS84")) # Get metro lines and their stops l1 <- subset(df, !is.na(df$L1)) l1 <- l1[order(l1$L1), ] l2 <- subset(df, !is.na(df$L2)) l2 <- l2[order(l2$L2), ] l3 <- subset(df, !is.na(df$L3)) l3 <- l3[order(l3$L3), ] l4 <- subset(df, !is.na(df$L4)) l4 <- l4[order(l4$L4), ] l5 <- subset(df, !is.na(df$L5)) l5 <- l5[order(l5$L5), ] l6 <- subset(df, !is.na(df$L6)) l6 <- l6[order(l6$L6), ] l7 <- subset(df, !is.na(df$L7)) l7 <- l7[order(l7$L7), ] l8 <- subset(df, !is.na(df$L8)) l8 <- l8[order(l8$L8), ] l9S <- subset(df, !is.na(df$L9S)) l9S <- l9S[order(l9S$L9S), ] l9N <- subset(df, !is.na(df$L9N)) l9N <- l9N[order(l9N$L9N), ] l10 <- subset(df, !is.na(df$L10)) l10 <- l10[order(l10$L10), ] l11 <- subset(df, !is.na(df$L11)) l11 <- l11[order(l11$L11), ] l12 <- subset(df, !is.na(df$L12)) l12 <- l12[order(l12$L12), ] # Generate leaflet map leaflet() %>% # Base map addProviderTiles("Hydda.Full") %>% # Voronoi layer addPolygons(data = tessSP, stroke = TRUE, color = "black", weight = 0.5, fill=TRUE, fillOpacity = 0, label = df$Name, popup = df$Name) %>% # L1 addPolylines(data = l1, lng = ~Longitude, lat = ~Latitude, color = "#e1393e") %>% # L2 addPolylines(data = l2, lng = ~Longitude, lat = ~Latitude, color = "#9c459a") %>% # L3 addPolylines(data = l3, lng = ~Longitude, lat = ~Latitude, color = "#53b955") %>% # L4 addPolylines(data = l4, lng = ~Longitude, lat = ~Latitude, color = "#febd10") %>% # L5 addPolylines(data = l5, lng = ~Longitude, lat = ~Latitude, color = "#317bc8") %>% # L6 addPolylines(data = l6, lng = ~Longitude, lat = ~Latitude, color = "#847dc6") %>% # L7 addPolylines(data = l7, lng = ~Longitude, lat = ~Latitude, color = "#ae6118") %>% # L8 addPolylines(data = l8, lng = ~Longitude, lat = ~Latitude, color = "#e659b4") %>% # L9S addPolylines(data = l9S, lng = ~Longitude, lat = ~Latitude, color = "#f68429") %>% # L9N addPolylines(data = l9N, lng = ~Longitude, lat = ~Latitude, color = "#f68429") %>% # L10 addPolylines(data = l10, lng = ~Longitude, lat = ~Latitude, color = "#00adef") %>% # L11 addPolylines(data = l11, lng = ~Longitude, lat = ~Latitude, color = "#a8d164") %>% # L12 addPolylines(data = l12, lng = ~Longitude, lat = ~Latitude, color = "#b6b3e1") %>% # Metro stops addCircles(data = df, lng = ~Longitude, lat = ~Latitude, radius = 10, popup = df$Name, label = df$Name, color = "black", weight = 1, opacity = 1, fillColor = "white", fillOpacity = 1)
/VoronoiMetroBarcelona.R
no_license
hectorgm/VoronoiMetroBarcelona
R
false
false
5,411
r
library(maps) library(mapdata) library(maptools) library(spatstat) library(spatstat.utils) library(rgeos) library(sp) library(leaflet) # Load data df <- read.csv("MetroBarcelona.csv") mapAreas <- c("Spain") counties <- map("worldHires", "Spain", exact = TRUE, fill=TRUE, plot=FALSE) countries <- gUnaryUnion(map2SpatialPolygons(counties, IDs = counties$names, proj4string = CRS("+proj=longlat +datum=WGS84"))) # Transform the map to UTM coordinates countries <- spTransform(countries, CRS("+proj=utm +zone=31 ellps=WGS84")) W <- as(countries, "owin") # Transform coordinates from long and lat to UTM coordinates lonLat <- data.frame(ID = df$Id, X = df$Longitude, Y = df$Latitude) coordinates(lonLat) <- c("X", "Y") proj4string(lonLat) <- CRS("+proj=longlat +datum=WGS84") utmCoord <- spTransform(lonLat, CRS("+proj=utm +zone=31 ellps=WGS84")) X <- ppp(x = utmCoord$X, y = utmCoord$Y, window = W) # Dirichlet tesselation y <- dirichlet(X) names(y$tiles) <- df$Name # Convert large tess to spatial polygons owin2Polygons <- function(x, id="1") { stopifnot(is.owin(x)) x <- as.polygonal(x) closering <- function(df) { df[c(seq(nrow(df)), 1),] } pieces <- lapply(x$bdry, function(p) { Polygon(coords = closering(cbind(p$x, p$y)), hole = is.hole.xypolygon(p)) }) z <- Polygons(pieces, id) return(z) } tess2SP <- function(x) { stopifnot(is.tess(x)) y <- tiles(x) nom <- names(y) z <- list() for(i in seq(y)) z[[i]] <- owin2Polygons(y[[i]], nom[i]) return(SpatialPolygons(z)) } tessSP <- tess2SP(y) # Transform polygons from UTM back to long and lat proj4string(tessSP) <- CRS("+proj=utm +zone=31 ellps=WGS84") tessSP <- spTransform(tessSP, CRS("+proj=longlat +datum=WGS84")) # Get metro lines and their stops l1 <- subset(df, !is.na(df$L1)) l1 <- l1[order(l1$L1), ] l2 <- subset(df, !is.na(df$L2)) l2 <- l2[order(l2$L2), ] l3 <- subset(df, !is.na(df$L3)) l3 <- l3[order(l3$L3), ] l4 <- subset(df, !is.na(df$L4)) l4 <- l4[order(l4$L4), ] l5 <- subset(df, !is.na(df$L5)) l5 <- l5[order(l5$L5), ] l6 <- subset(df, !is.na(df$L6)) l6 <- l6[order(l6$L6), ] l7 <- subset(df, !is.na(df$L7)) l7 <- l7[order(l7$L7), ] l8 <- subset(df, !is.na(df$L8)) l8 <- l8[order(l8$L8), ] l9S <- subset(df, !is.na(df$L9S)) l9S <- l9S[order(l9S$L9S), ] l9N <- subset(df, !is.na(df$L9N)) l9N <- l9N[order(l9N$L9N), ] l10 <- subset(df, !is.na(df$L10)) l10 <- l10[order(l10$L10), ] l11 <- subset(df, !is.na(df$L11)) l11 <- l11[order(l11$L11), ] l12 <- subset(df, !is.na(df$L12)) l12 <- l12[order(l12$L12), ] # Generate leaflet map leaflet() %>% # Base map addProviderTiles("Hydda.Full") %>% # Voronoi layer addPolygons(data = tessSP, stroke = TRUE, color = "black", weight = 0.5, fill=TRUE, fillOpacity = 0, label = df$Name, popup = df$Name) %>% # L1 addPolylines(data = l1, lng = ~Longitude, lat = ~Latitude, color = "#e1393e") %>% # L2 addPolylines(data = l2, lng = ~Longitude, lat = ~Latitude, color = "#9c459a") %>% # L3 addPolylines(data = l3, lng = ~Longitude, lat = ~Latitude, color = "#53b955") %>% # L4 addPolylines(data = l4, lng = ~Longitude, lat = ~Latitude, color = "#febd10") %>% # L5 addPolylines(data = l5, lng = ~Longitude, lat = ~Latitude, color = "#317bc8") %>% # L6 addPolylines(data = l6, lng = ~Longitude, lat = ~Latitude, color = "#847dc6") %>% # L7 addPolylines(data = l7, lng = ~Longitude, lat = ~Latitude, color = "#ae6118") %>% # L8 addPolylines(data = l8, lng = ~Longitude, lat = ~Latitude, color = "#e659b4") %>% # L9S addPolylines(data = l9S, lng = ~Longitude, lat = ~Latitude, color = "#f68429") %>% # L9N addPolylines(data = l9N, lng = ~Longitude, lat = ~Latitude, color = "#f68429") %>% # L10 addPolylines(data = l10, lng = ~Longitude, lat = ~Latitude, color = "#00adef") %>% # L11 addPolylines(data = l11, lng = ~Longitude, lat = ~Latitude, color = "#a8d164") %>% # L12 addPolylines(data = l12, lng = ~Longitude, lat = ~Latitude, color = "#b6b3e1") %>% # Metro stops addCircles(data = df, lng = ~Longitude, lat = ~Latitude, radius = 10, popup = df$Name, label = df$Name, color = "black", weight = 1, opacity = 1, fillColor = "white", fillOpacity = 1)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rf_forecast.R \name{rf_forecast} \alias{rf_forecast} \title{function to calculate point forecast, 95\% confidence intervals, forecast-accuracy for new series} \usage{ rf_forecast(predictions, tslist, database, function_name, h, accuracy, holdout = TRUE) } \arguments{ \item{predictions}{prediction results obtained from random forest classifier} \item{tslist}{list of new time series} \item{database}{whethe the time series is from mcom or other} \item{function_name}{specify the name of the accuracy function (for eg., cal_MASE, etc.) to calculate accuracy measure, ( if a user written function the arguments for the accuracy function should be training period, test period and forecast).} \item{h}{length of the forecast horizon} \item{accuracy}{if true a accuaracy measure will be calculated} \item{holdout}{if holdout=TRUE take a holdout sample from your data to caldulate forecast accuracy measure, if FALSE all of the data will be used for forecasting. Default is TRUE} } \value{ a list containing, point forecast, confidence interval, accuracy measure } \description{ Given the prediction results of random forest calculate point forecast, 95\% confidence intervals, forecast-accuracy for the test set } \author{ Thiyanga Talagala }
/man/rf_forecast.Rd
no_license
martins0n/seer
R
false
true
1,327
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rf_forecast.R \name{rf_forecast} \alias{rf_forecast} \title{function to calculate point forecast, 95\% confidence intervals, forecast-accuracy for new series} \usage{ rf_forecast(predictions, tslist, database, function_name, h, accuracy, holdout = TRUE) } \arguments{ \item{predictions}{prediction results obtained from random forest classifier} \item{tslist}{list of new time series} \item{database}{whethe the time series is from mcom or other} \item{function_name}{specify the name of the accuracy function (for eg., cal_MASE, etc.) to calculate accuracy measure, ( if a user written function the arguments for the accuracy function should be training period, test period and forecast).} \item{h}{length of the forecast horizon} \item{accuracy}{if true a accuaracy measure will be calculated} \item{holdout}{if holdout=TRUE take a holdout sample from your data to caldulate forecast accuracy measure, if FALSE all of the data will be used for forecasting. Default is TRUE} } \value{ a list containing, point forecast, confidence interval, accuracy measure } \description{ Given the prediction results of random forest calculate point forecast, 95\% confidence intervals, forecast-accuracy for the test set } \author{ Thiyanga Talagala }
library(pecanapi) import::from(magrittr, "%>%") source(file.path("analysis", "scratch", "helpers.R")) ## model_id <- 99000000006 # ED develop model_id <- 99000000001 # ED develop site_id <- 1000000033 # UMBS disturbance machine_id <- 99000000001 ## DBI::dbSendStatement(con, paste0( ## "INSERT INTO models (model_name, revision, modeltype_id) ", ## "VALUES ('ED2-experimental', 'experimental', 1)" ## )) ## DBI::dbSendStatement(con, glue::glue( ## "INSERT INTO dbfiles (container_type, container_id, file_name, file_path, machine_id) ", ## "VALUES ('Model', {model_id}, 'ed2.develop', '/usr/local/bin', {machine_id})" ## )) ## dplyr::tbl(con, "dbfiles") %>% dplyr::glimpse() ## dplyr::tbl(con, "machines") %>% ## dplyr::filter(hostname %like% "%docker%") %>% ## dplyr::pull(id) workflow <- insert_new_workflow(con, site_id, model_id, start_date = "1901-06-01", end_date = "1901-08-31") workflow_id <- workflow[["id"]] pft_list <- list( list(name = "temperate.Early_Hardwood", ed2_pft_number = 9), list(name = "temperate.North_Mid_Hardwood", ed2_pft_number = 10), list(name = "temperate.Late_Hardwood", ed2_pft_number = 11) ) settings <- list() %>% add_workflow(workflow) %>% add_database() %>% add_pft_list(pft_list) %>% add_rabbitmq(model_queue = "ED2_develop") %>% modifyList(list( meta.analysis = list(iter = 3000, random.effects = FALSE), run = list(inputs = list(met = list(source = "CRUNCEP", output = "ED2", method = "ncss"))), ensemble = list(size = 1, variable = "NPP") )) %>% modifyList(list( run = list(inputs = list( lu = list(id = 294), soil = list(id = 297), thsum = list(id = 295), veg = list(id = 296) )), model = list( exact.dates = "true", phenol.scheme = 0, edin = "ED2IN.rgit", prerun = "ulimit -s unlimited", barebones_ed2in = "true", ed2in_tags = list( IOOUTPUT = 0, PLANT_HYDRO_SCHEME = 0, ISTOMATA_SCHEME = 0, ISTRUCT_GROWTH_SCHEME = 0, TRAIT_PLASTICITY_SCHEME = 0, ICANRAD = 2, CROWN_MOD = 0, N_PLANT_LIM = 0, N_DECOMP_LIM = 1, INCLUDE_THESE_PFT = "9,10,11" ) ) )) submit_workflow(settings) follow_workflow(workflow_id, start_at = Inf) if (FALSE) { rout <- output_url(workflow_id, "workflow.Rout") %>% readLines() ## writeLines(rout[]) ## writeLines(tail(rout, 200)) ## tail(rout, 40) %>% writeLines() # Follow model log file logfile <- run_url(workflow_id, "logfile.txt") tail_file(logfile) read_output_var <- function(workflow_id, variable) { ncfile <- run_dap(workflow_id, "1980.nc") nc <- ncdf4::nc_open(ncfile) on.exit(ncdf4::nc_close(nc), add = TRUE) ncdf4::ncvar_get(nc, variable) } albedo_2s <- read_output_var(99000000035, "Albedo") albedo_ms <- read_output_var(99000000034, "Albedo") albedo_2s_nc <- read_output_var(99000000036, "Albedo") matplot(cbind(albedo_2s, albedo_ms, albedo_2s_nc), type = "l") plot(albedo_2s - albedo_2s_nc, type = "l") outfile <- run_dap(workflow_id, "1980.nc") nc <- ncdf4::nc_open(outfile) gpp <- ncdf4::ncvar_get(nc, "GPP") npp <- ncdf4::ncvar_get(nc, "NPP") albedo <- ncdf4::ncvar_get(nc, "Albedo") ## names(nc$var) plot(albedo, type = 'l') } ## while (TRUE) { if (FALSE) { ## runid <- 99000000117 ## message("As of: ", Sys.time()) readLines(run_url(workflow_id, "logfile.txt", runid)) %>% tail(2) %>% writeLines() writeLines("---------------") ## Sys.sleep(2) } if (FALSE) { output <- workflow_output(workflow_id) writeLines(output) readLines(run_url(workflow_id, "ED")) %>% writeLines() readLines(output_url(workflow_id, "pecan.xml")) %>% writeLines() run_id <- list_runs(con, workflow_id)[["id"]] ed2in <- output_url(workflow_id, file.path("run", run_id, "ED2IN")) %>% readLines() writeLines(ed2in) result_nc <- ncdf4::nc_open(run_dap(workflow_id, "2004.nc")) gpp <- ncdf4::ncvar_get(result_nc, "GPP") time <- ncdf4::ncvar_get(result_nc, "time") ncdf4::nc_close(result_nc) plot(time, gpp, type = "l") }
/analysis/scratch/test_ed_cases.R
permissive
ashiklom/fortebaseline
R
false
false
4,209
r
library(pecanapi) import::from(magrittr, "%>%") source(file.path("analysis", "scratch", "helpers.R")) ## model_id <- 99000000006 # ED develop model_id <- 99000000001 # ED develop site_id <- 1000000033 # UMBS disturbance machine_id <- 99000000001 ## DBI::dbSendStatement(con, paste0( ## "INSERT INTO models (model_name, revision, modeltype_id) ", ## "VALUES ('ED2-experimental', 'experimental', 1)" ## )) ## DBI::dbSendStatement(con, glue::glue( ## "INSERT INTO dbfiles (container_type, container_id, file_name, file_path, machine_id) ", ## "VALUES ('Model', {model_id}, 'ed2.develop', '/usr/local/bin', {machine_id})" ## )) ## dplyr::tbl(con, "dbfiles") %>% dplyr::glimpse() ## dplyr::tbl(con, "machines") %>% ## dplyr::filter(hostname %like% "%docker%") %>% ## dplyr::pull(id) workflow <- insert_new_workflow(con, site_id, model_id, start_date = "1901-06-01", end_date = "1901-08-31") workflow_id <- workflow[["id"]] pft_list <- list( list(name = "temperate.Early_Hardwood", ed2_pft_number = 9), list(name = "temperate.North_Mid_Hardwood", ed2_pft_number = 10), list(name = "temperate.Late_Hardwood", ed2_pft_number = 11) ) settings <- list() %>% add_workflow(workflow) %>% add_database() %>% add_pft_list(pft_list) %>% add_rabbitmq(model_queue = "ED2_develop") %>% modifyList(list( meta.analysis = list(iter = 3000, random.effects = FALSE), run = list(inputs = list(met = list(source = "CRUNCEP", output = "ED2", method = "ncss"))), ensemble = list(size = 1, variable = "NPP") )) %>% modifyList(list( run = list(inputs = list( lu = list(id = 294), soil = list(id = 297), thsum = list(id = 295), veg = list(id = 296) )), model = list( exact.dates = "true", phenol.scheme = 0, edin = "ED2IN.rgit", prerun = "ulimit -s unlimited", barebones_ed2in = "true", ed2in_tags = list( IOOUTPUT = 0, PLANT_HYDRO_SCHEME = 0, ISTOMATA_SCHEME = 0, ISTRUCT_GROWTH_SCHEME = 0, TRAIT_PLASTICITY_SCHEME = 0, ICANRAD = 2, CROWN_MOD = 0, N_PLANT_LIM = 0, N_DECOMP_LIM = 1, INCLUDE_THESE_PFT = "9,10,11" ) ) )) submit_workflow(settings) follow_workflow(workflow_id, start_at = Inf) if (FALSE) { rout <- output_url(workflow_id, "workflow.Rout") %>% readLines() ## writeLines(rout[]) ## writeLines(tail(rout, 200)) ## tail(rout, 40) %>% writeLines() # Follow model log file logfile <- run_url(workflow_id, "logfile.txt") tail_file(logfile) read_output_var <- function(workflow_id, variable) { ncfile <- run_dap(workflow_id, "1980.nc") nc <- ncdf4::nc_open(ncfile) on.exit(ncdf4::nc_close(nc), add = TRUE) ncdf4::ncvar_get(nc, variable) } albedo_2s <- read_output_var(99000000035, "Albedo") albedo_ms <- read_output_var(99000000034, "Albedo") albedo_2s_nc <- read_output_var(99000000036, "Albedo") matplot(cbind(albedo_2s, albedo_ms, albedo_2s_nc), type = "l") plot(albedo_2s - albedo_2s_nc, type = "l") outfile <- run_dap(workflow_id, "1980.nc") nc <- ncdf4::nc_open(outfile) gpp <- ncdf4::ncvar_get(nc, "GPP") npp <- ncdf4::ncvar_get(nc, "NPP") albedo <- ncdf4::ncvar_get(nc, "Albedo") ## names(nc$var) plot(albedo, type = 'l') } ## while (TRUE) { if (FALSE) { ## runid <- 99000000117 ## message("As of: ", Sys.time()) readLines(run_url(workflow_id, "logfile.txt", runid)) %>% tail(2) %>% writeLines() writeLines("---------------") ## Sys.sleep(2) } if (FALSE) { output <- workflow_output(workflow_id) writeLines(output) readLines(run_url(workflow_id, "ED")) %>% writeLines() readLines(output_url(workflow_id, "pecan.xml")) %>% writeLines() run_id <- list_runs(con, workflow_id)[["id"]] ed2in <- output_url(workflow_id, file.path("run", run_id, "ED2IN")) %>% readLines() writeLines(ed2in) result_nc <- ncdf4::nc_open(run_dap(workflow_id, "2004.nc")) gpp <- ncdf4::ncvar_get(result_nc, "GPP") time <- ncdf4::ncvar_get(result_nc, "time") ncdf4::nc_close(result_nc) plot(time, gpp, type = "l") }
# load packages#### library(tableone) library(caret) library(reshape2) library(tidyr) library(dplyr) library(binom) library(survival) library(gridExtra) library(grid) library(survMisc) library(scales) library(ggplot2) library(tidyr) library(metafor) library(xlsx) library(xlsxjars) library(rJava) library(readxl) library(stringr) library(reshape) library(data.table) library(plyr) library(RColorBrewer) library(rworldmap) library(classInt) library(gdata) library(forestplot) library(Hmisc) library(data.table) final <- read.csv("data_derived.v2/final_age_std.csv") ##============================================== ## PLOTS ##============================================== #2017 data and joining to a map dat.seventeen <- final %>% group_by(year, iso3) %>% dplyr::summarise(burdence=sum(burdence), burdenll=sum(burdenll), burdenul=sum(burdenul)) %>% filter(year==2017) %>% ungroup() %>% mutate_at(vars(burdence, burdenll, burdenul), funs(.*1000)) dat.seventeen$burdencat <- cut2(dat.seventeen$burdence, g=10) pdf(file="results.v2/Age-standardised Burden Cartogram 2017.pdf", height=5, width=10) data(dat.seventeen) sPDF <- joinCountryData2Map(dat.seventeen ,joinCode = "ISO3" ,nameJoinColumn = "iso3") sPDF <- subset(sPDF, continent != "Antarctica") par(mai=c(0,0,0.2,0),xaxs="i",yaxs="i") #getting colours colourPalette <- brewer.pal(10,'OrRd') #plot map #create world map shaped window mapParams <- mapCountryData(sPDF ,nameColumnToPlot="burdencat" ,addLegend=FALSE ,missingCountryCol = 'dark grey' ,colourPalette=colourPalette, mapTitle='' ,catMethod="categorical") #adding legend mapParams$legendText <- c("0", "7-10", "10-29", "29-67", "67-260", "260-516", "516-1510", "1510-1944", "1944-2566", "2566-5152") # do.call(addMapLegendBoxes # ,c(mapParams # ,ncol=2 # ,x='bottomleft' # ,title="Age-standardised DALYs (thousands)")) dev.off() iso <- readRDS("data_derived/iso.rds") table.dat.seventeen <- left_join(dat.seventeen, iso) %>% mutate(burden=paste0(round(burdence, digits=0)," [", round(burdenll, digits=0), " - ", round(burdenul, digits=0), "]")) %>% select(country, burden) write.csv(table.dat.seventeen, "results.v2/country_burden_agestd.table.csv")
/part11_plot_burden_age_std.R
no_license
xjong/HAP
R
false
false
2,456
r
# load packages#### library(tableone) library(caret) library(reshape2) library(tidyr) library(dplyr) library(binom) library(survival) library(gridExtra) library(grid) library(survMisc) library(scales) library(ggplot2) library(tidyr) library(metafor) library(xlsx) library(xlsxjars) library(rJava) library(readxl) library(stringr) library(reshape) library(data.table) library(plyr) library(RColorBrewer) library(rworldmap) library(classInt) library(gdata) library(forestplot) library(Hmisc) library(data.table) final <- read.csv("data_derived.v2/final_age_std.csv") ##============================================== ## PLOTS ##============================================== #2017 data and joining to a map dat.seventeen <- final %>% group_by(year, iso3) %>% dplyr::summarise(burdence=sum(burdence), burdenll=sum(burdenll), burdenul=sum(burdenul)) %>% filter(year==2017) %>% ungroup() %>% mutate_at(vars(burdence, burdenll, burdenul), funs(.*1000)) dat.seventeen$burdencat <- cut2(dat.seventeen$burdence, g=10) pdf(file="results.v2/Age-standardised Burden Cartogram 2017.pdf", height=5, width=10) data(dat.seventeen) sPDF <- joinCountryData2Map(dat.seventeen ,joinCode = "ISO3" ,nameJoinColumn = "iso3") sPDF <- subset(sPDF, continent != "Antarctica") par(mai=c(0,0,0.2,0),xaxs="i",yaxs="i") #getting colours colourPalette <- brewer.pal(10,'OrRd') #plot map #create world map shaped window mapParams <- mapCountryData(sPDF ,nameColumnToPlot="burdencat" ,addLegend=FALSE ,missingCountryCol = 'dark grey' ,colourPalette=colourPalette, mapTitle='' ,catMethod="categorical") #adding legend mapParams$legendText <- c("0", "7-10", "10-29", "29-67", "67-260", "260-516", "516-1510", "1510-1944", "1944-2566", "2566-5152") # do.call(addMapLegendBoxes # ,c(mapParams # ,ncol=2 # ,x='bottomleft' # ,title="Age-standardised DALYs (thousands)")) dev.off() iso <- readRDS("data_derived/iso.rds") table.dat.seventeen <- left_join(dat.seventeen, iso) %>% mutate(burden=paste0(round(burdence, digits=0)," [", round(burdenll, digits=0), " - ", round(burdenul, digits=0), "]")) %>% select(country, burden) write.csv(table.dat.seventeen, "results.v2/country_burden_agestd.table.csv")
\name{NEWS} \title{ChemmineR News} \section{CHANGES IN VERSION 2.14.0}{ \subsection{NEW FEATURES}{ \itemize{ \item Integration of OpenBabel functionalities via new ChemmineOB add-on package \item Improved SMILES support via new SMIset object class and SMILES import/export functions \item Many other compound formats are now supported via ChemmineOB } } } \section{CHANGES IN VERSION 2.12.0}{ \subsection{NEW FEATURES}{ \itemize{ \item Accelerated similarity searching of large small molecule data sets via new eiR add-on package \item Jarvis-Patrick clustering of large small molecule data sets \item SQLite support for small molecule management } } } \section{CHANGES IN VERSION 2.10.0}{ \subsection{NEW FEATURES}{ \itemize{ \item Streaming functionality for SDFs enables processing of millions of molecules on a laptop \item Fast and memory efficient fingerprint searches with atom pair fingerprints or PubChem fingerprints \item Flexible maximum common substructure (MCS) search support provided by new fmcs.R add-on package } } }
/inst/NEWS.Rd
no_license
huaner123/ChemmineR
R
false
false
1,209
rd
\name{NEWS} \title{ChemmineR News} \section{CHANGES IN VERSION 2.14.0}{ \subsection{NEW FEATURES}{ \itemize{ \item Integration of OpenBabel functionalities via new ChemmineOB add-on package \item Improved SMILES support via new SMIset object class and SMILES import/export functions \item Many other compound formats are now supported via ChemmineOB } } } \section{CHANGES IN VERSION 2.12.0}{ \subsection{NEW FEATURES}{ \itemize{ \item Accelerated similarity searching of large small molecule data sets via new eiR add-on package \item Jarvis-Patrick clustering of large small molecule data sets \item SQLite support for small molecule management } } } \section{CHANGES IN VERSION 2.10.0}{ \subsection{NEW FEATURES}{ \itemize{ \item Streaming functionality for SDFs enables processing of millions of molecules on a laptop \item Fast and memory efficient fingerprint searches with atom pair fingerprints or PubChem fingerprints \item Flexible maximum common substructure (MCS) search support provided by new fmcs.R add-on package } } }
library(ggplot2) library(ggthemes) df <-mpg #pl <- ggplot(df,aes(x=hwy)) #pl2 <- pl + geom_histogram(bins=20, fill='red', alpha=0.5) #print(pl2) #pl <- ggplot(df,aes(x=manufacturer)) #pl2 <- pl + geom_bar(aes(fill=factor(cyl)), alpha=0.5) #print(pl2) df <- txhousing #print(df) pl <- ggplot(df,aes(x=sales, y=volume)) pl2<- pl + geom_point(color='blue', alpha = 0.3) pl3 <- pl2 + geom_smooth(color='red') print(pl3)
/rvisulas08.R
no_license
venkatram64/rwork
R
false
false
429
r
library(ggplot2) library(ggthemes) df <-mpg #pl <- ggplot(df,aes(x=hwy)) #pl2 <- pl + geom_histogram(bins=20, fill='red', alpha=0.5) #print(pl2) #pl <- ggplot(df,aes(x=manufacturer)) #pl2 <- pl + geom_bar(aes(fill=factor(cyl)), alpha=0.5) #print(pl2) df <- txhousing #print(df) pl <- ggplot(df,aes(x=sales, y=volume)) pl2<- pl + geom_point(color='blue', alpha = 0.3) pl3 <- pl2 + geom_smooth(color='red') print(pl3)
#' Download and aggregate data from public bicycle hire systems #' #' Download data from all public bicycle hire systems which provide open data, #' currently including #' \itemize{ #' \item Santander Cycles London, U.K. #' \item citibike New York City NY, U.S.A. #' \item Divvy Chicago IL, U.S.A. #' \item Capital BikeShare Washingon DC, U.S.A. #' \item Hubway Boston MA, U.S.A. #' \item Metro Los Angeles CA, U.S.A. #' } #' #' @section Download and store data: #' \itemize{ #' \item \code{dl_bikedata} Download data for particular cities and dates #' \item \code{store_bikedata} Store data in \code{SQLite3} database #' } #' #' @section Sample data for testing package: #' \itemize{ #' \item \code{bike_test_data} Description of test data included with package #' \item \code{bike_write_test_data} Write test data to disk in form precisely #' reflecting data provided by all systems #' \item \code{bike_rm_test_data} Remove data written to disk with #' \code{bike_write_test_data} #' } #' #' @section Functions to aggregate trip data: #' \itemize{ #' \item \code{bike_daily_trips} Aggregate daily time series of total trips #' \item \code{bike_stations} Extract table detailing locations and names of #' bicycle docking stations #' \item \code{bike_tripmat} Extract aggregate counts of trips between all pairs #' of stations within a given city #' } #' #' @section Summary Statistics: #' \itemize{ #' \item \code{bike_summary_stats} Overall quantitative summary of database #' contents. All of the following functions provide individual aspects of this #' summary. #' \item \code{bike_db_totals} Count total numbers of trips or stations, either #' for entire database or a specified city. #' \item \code{bike_datelimits} Return dates of first and last trips, either for #' entire database or a specified city. #' \item \code{bike_demographic_data} Simple table indicating which cities #' include demographic parameters with their data #' \item \code{bike_latest_files} Check whether files contained in database are #' latest published versions #' } #' #' @name bikedata #' @docType package #' @author Mark Padgham #' @importFrom DBI dbBind dbClearResult dbConnect dbDisconnect #' @importFrom DBI dbGetQuery dbSendQuery #' @importFrom dodgr dodgr_dists #' @importFrom httr content GET #' @importFrom lubridate ddays interval ymd #' @importFrom magrittr %>% #' @importFrom methods as #' @importFrom Rcpp evalCpp #' @importFrom RSQLite SQLite #' @importFrom reshape2 dcast melt #' @importFrom stats sd #' @importFrom tibble as_tibble tibble #' @importFrom utils data menu read.csv tail type.convert unzip write.csv zip #' @importFrom xml2 xml_children xml_find_all #' @useDynLib bikedata, .registration = TRUE NULL
/R/bikedata-package.R
no_license
graceli8/bikedata
R
false
false
2,716
r
#' Download and aggregate data from public bicycle hire systems #' #' Download data from all public bicycle hire systems which provide open data, #' currently including #' \itemize{ #' \item Santander Cycles London, U.K. #' \item citibike New York City NY, U.S.A. #' \item Divvy Chicago IL, U.S.A. #' \item Capital BikeShare Washingon DC, U.S.A. #' \item Hubway Boston MA, U.S.A. #' \item Metro Los Angeles CA, U.S.A. #' } #' #' @section Download and store data: #' \itemize{ #' \item \code{dl_bikedata} Download data for particular cities and dates #' \item \code{store_bikedata} Store data in \code{SQLite3} database #' } #' #' @section Sample data for testing package: #' \itemize{ #' \item \code{bike_test_data} Description of test data included with package #' \item \code{bike_write_test_data} Write test data to disk in form precisely #' reflecting data provided by all systems #' \item \code{bike_rm_test_data} Remove data written to disk with #' \code{bike_write_test_data} #' } #' #' @section Functions to aggregate trip data: #' \itemize{ #' \item \code{bike_daily_trips} Aggregate daily time series of total trips #' \item \code{bike_stations} Extract table detailing locations and names of #' bicycle docking stations #' \item \code{bike_tripmat} Extract aggregate counts of trips between all pairs #' of stations within a given city #' } #' #' @section Summary Statistics: #' \itemize{ #' \item \code{bike_summary_stats} Overall quantitative summary of database #' contents. All of the following functions provide individual aspects of this #' summary. #' \item \code{bike_db_totals} Count total numbers of trips or stations, either #' for entire database or a specified city. #' \item \code{bike_datelimits} Return dates of first and last trips, either for #' entire database or a specified city. #' \item \code{bike_demographic_data} Simple table indicating which cities #' include demographic parameters with their data #' \item \code{bike_latest_files} Check whether files contained in database are #' latest published versions #' } #' #' @name bikedata #' @docType package #' @author Mark Padgham #' @importFrom DBI dbBind dbClearResult dbConnect dbDisconnect #' @importFrom DBI dbGetQuery dbSendQuery #' @importFrom dodgr dodgr_dists #' @importFrom httr content GET #' @importFrom lubridate ddays interval ymd #' @importFrom magrittr %>% #' @importFrom methods as #' @importFrom Rcpp evalCpp #' @importFrom RSQLite SQLite #' @importFrom reshape2 dcast melt #' @importFrom stats sd #' @importFrom tibble as_tibble tibble #' @importFrom utils data menu read.csv tail type.convert unzip write.csv zip #' @importFrom xml2 xml_children xml_find_all #' @useDynLib bikedata, .registration = TRUE NULL
source( "masternegloglikeeps1.R" ) source("eudicottree.R" ) library( "expm" ) source( "Qmatrixwoodherb2.R" ) source("Pruning2.R") bichrom.dataset<-read.table( "eudicotvals.txt",header=FALSE,sep=",",stringsAsFactors=FALSE) last.state=50 uniform.samples<-read.csv("sample195.csv",header=FALSE) a<- as.numeric(t(uniform.samples)) p.0<-rep(1,2*(last.state+1))/(2*(last.state+1)) results<-rep(0,9) mle<-try(optim(par=a,fn=negloglikelihood.wh, method= "Nelder-Mead", bichrom.phy=angiosperm.tree, bichrom.data=bichrom.dataset,max.chromosome=last.state,pi.0=p.0),silent=TRUE) print(mle) if(class(mle)=="try-error"){results<-rep(NA,9)}else{ results[1:10]<-exp(mle$par) results[11]<-mle$value} write.table(results,file="results195.csv",sep=",")
/Full model optimizations/explorelikeuni195.R
no_license
roszenil/Bichromdryad
R
false
false
745
r
source( "masternegloglikeeps1.R" ) source("eudicottree.R" ) library( "expm" ) source( "Qmatrixwoodherb2.R" ) source("Pruning2.R") bichrom.dataset<-read.table( "eudicotvals.txt",header=FALSE,sep=",",stringsAsFactors=FALSE) last.state=50 uniform.samples<-read.csv("sample195.csv",header=FALSE) a<- as.numeric(t(uniform.samples)) p.0<-rep(1,2*(last.state+1))/(2*(last.state+1)) results<-rep(0,9) mle<-try(optim(par=a,fn=negloglikelihood.wh, method= "Nelder-Mead", bichrom.phy=angiosperm.tree, bichrom.data=bichrom.dataset,max.chromosome=last.state,pi.0=p.0),silent=TRUE) print(mle) if(class(mle)=="try-error"){results<-rep(NA,9)}else{ results[1:10]<-exp(mle$par) results[11]<-mle$value} write.table(results,file="results195.csv",sep=",")
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 checkBits <- function() { .Call('_largeVis_checkBits', PACKAGE = 'largeVis') } checkOpenMP <- function() { .Call('_largeVis_checkOpenMP', PACKAGE = 'largeVis') } dbscan_cpp <- function(edges, neighbors, eps, minPts, verbose) { .Call('_largeVis_dbscan_cpp', PACKAGE = 'largeVis', edges, neighbors, eps, minPts, verbose) } #' Instantiate an AnnoySearch for the given distMethod #' #' This instantiates a DenseAnnoySearch based on distMethod input. #' distMethod is either a Rcpp::XPtr<DenseDistanceProvider> or a character #' If this is a DenseDistanceProvider, use the provided getAnnoySearch to get an AnnoySearch instance, #' if this is one of the predefined types (Cosine or Euclidean), instantiate it directly without going #' past a DistanceProvider. NULL searchTrees <- function(threshold, n_trees, K, maxIter, data, distMethod, seed, threads, verbose) { .Call('_largeVis_searchTrees', PACKAGE = 'largeVis', threshold, n_trees, K, maxIter, data, distMethod, seed, threads, verbose) } fastDistance <- function(is, js, data, distMethod, threads, verbose) { .Call('_largeVis_fastDistance', PACKAGE = 'largeVis', is, js, data, distMethod, threads, verbose) } fastCDistance <- function(is, js, i_locations, p_locations, x, distMethod, threads, verbose) { .Call('_largeVis_fastCDistance', PACKAGE = 'largeVis', is, js, i_locations, p_locations, x, distMethod, threads, verbose) } fastSDistance <- function(is, js, i_locations, j_locations, x, distMethod, threads, verbose) { .Call('_largeVis_fastSDistance', PACKAGE = 'largeVis', is, js, i_locations, j_locations, x, distMethod, threads, verbose) } referenceWij <- function(i, j, d, threads, perplexity) { .Call('_largeVis_referenceWij', PACKAGE = 'largeVis', i, j, d, threads, perplexity) } hdbscanc <- function(edges, neighbors, K, minPts, threads, verbose) { .Call('_largeVis_hdbscanc', PACKAGE = 'largeVis', edges, neighbors, K, minPts, threads, verbose) } sgd <- function(coords, targets_i, sources_j, ps, weights, gamma, rho, n_samples, M, alpha, momentum, useDegree, seed, threads, verbose) { .Call('_largeVis_sgd', PACKAGE = 'largeVis', coords, targets_i, sources_j, ps, weights, gamma, rho, n_samples, M, alpha, momentum, useDegree, seed, threads, verbose) } optics_cpp <- function(edges, neighbors, eps, minPts, useQueue, verbose) { .Call('_largeVis_optics_cpp', PACKAGE = 'largeVis', edges, neighbors, eps, minPts, useQueue, verbose) } #' @export ramclustDist <- function(i, j, sr, st, maxt, maxdist) { .Call('_largeVis_ramclustDist', PACKAGE = 'largeVis', i, j, sr, st, maxt, maxdist) } #' @export ramclustDistance <- function(sr, st, maxt, maxdist) { .Call('_largeVis_ramclustDistance', PACKAGE = 'largeVis', sr, st, maxt, maxdist) } searchTreesCSparse <- function(threshold, n_trees, K, maxIter, i, p, x, distMethod, seed, threads, verbose) { .Call('_largeVis_searchTreesCSparse', PACKAGE = 'largeVis', threshold, n_trees, K, maxIter, i, p, x, distMethod, seed, threads, verbose) } searchTreesTSparse <- function(threshold, n_trees, K, maxIter, i, j, x, distMethod, seed, threads, verbose) { .Call('_largeVis_searchTreesTSparse', PACKAGE = 'largeVis', threshold, n_trees, K, maxIter, i, j, x, distMethod, seed, threads, verbose) } # Register entry points for exported C++ functions methods::setLoadAction(function(ns) { .Call('_largeVis_RcppExport_registerCCallable', PACKAGE = 'largeVis') })
/R/RcppExports.R
no_license
meowcat/largeVis
R
false
false
3,564
r
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 checkBits <- function() { .Call('_largeVis_checkBits', PACKAGE = 'largeVis') } checkOpenMP <- function() { .Call('_largeVis_checkOpenMP', PACKAGE = 'largeVis') } dbscan_cpp <- function(edges, neighbors, eps, minPts, verbose) { .Call('_largeVis_dbscan_cpp', PACKAGE = 'largeVis', edges, neighbors, eps, minPts, verbose) } #' Instantiate an AnnoySearch for the given distMethod #' #' This instantiates a DenseAnnoySearch based on distMethod input. #' distMethod is either a Rcpp::XPtr<DenseDistanceProvider> or a character #' If this is a DenseDistanceProvider, use the provided getAnnoySearch to get an AnnoySearch instance, #' if this is one of the predefined types (Cosine or Euclidean), instantiate it directly without going #' past a DistanceProvider. NULL searchTrees <- function(threshold, n_trees, K, maxIter, data, distMethod, seed, threads, verbose) { .Call('_largeVis_searchTrees', PACKAGE = 'largeVis', threshold, n_trees, K, maxIter, data, distMethod, seed, threads, verbose) } fastDistance <- function(is, js, data, distMethod, threads, verbose) { .Call('_largeVis_fastDistance', PACKAGE = 'largeVis', is, js, data, distMethod, threads, verbose) } fastCDistance <- function(is, js, i_locations, p_locations, x, distMethod, threads, verbose) { .Call('_largeVis_fastCDistance', PACKAGE = 'largeVis', is, js, i_locations, p_locations, x, distMethod, threads, verbose) } fastSDistance <- function(is, js, i_locations, j_locations, x, distMethod, threads, verbose) { .Call('_largeVis_fastSDistance', PACKAGE = 'largeVis', is, js, i_locations, j_locations, x, distMethod, threads, verbose) } referenceWij <- function(i, j, d, threads, perplexity) { .Call('_largeVis_referenceWij', PACKAGE = 'largeVis', i, j, d, threads, perplexity) } hdbscanc <- function(edges, neighbors, K, minPts, threads, verbose) { .Call('_largeVis_hdbscanc', PACKAGE = 'largeVis', edges, neighbors, K, minPts, threads, verbose) } sgd <- function(coords, targets_i, sources_j, ps, weights, gamma, rho, n_samples, M, alpha, momentum, useDegree, seed, threads, verbose) { .Call('_largeVis_sgd', PACKAGE = 'largeVis', coords, targets_i, sources_j, ps, weights, gamma, rho, n_samples, M, alpha, momentum, useDegree, seed, threads, verbose) } optics_cpp <- function(edges, neighbors, eps, minPts, useQueue, verbose) { .Call('_largeVis_optics_cpp', PACKAGE = 'largeVis', edges, neighbors, eps, minPts, useQueue, verbose) } #' @export ramclustDist <- function(i, j, sr, st, maxt, maxdist) { .Call('_largeVis_ramclustDist', PACKAGE = 'largeVis', i, j, sr, st, maxt, maxdist) } #' @export ramclustDistance <- function(sr, st, maxt, maxdist) { .Call('_largeVis_ramclustDistance', PACKAGE = 'largeVis', sr, st, maxt, maxdist) } searchTreesCSparse <- function(threshold, n_trees, K, maxIter, i, p, x, distMethod, seed, threads, verbose) { .Call('_largeVis_searchTreesCSparse', PACKAGE = 'largeVis', threshold, n_trees, K, maxIter, i, p, x, distMethod, seed, threads, verbose) } searchTreesTSparse <- function(threshold, n_trees, K, maxIter, i, j, x, distMethod, seed, threads, verbose) { .Call('_largeVis_searchTreesTSparse', PACKAGE = 'largeVis', threshold, n_trees, K, maxIter, i, j, x, distMethod, seed, threads, verbose) } # Register entry points for exported C++ functions methods::setLoadAction(function(ns) { .Call('_largeVis_RcppExport_registerCCallable', PACKAGE = 'largeVis') })
## Checks if the file exist on the working directory, if it doesn't exist, it downloads from "http://d396qusza40orc.cloudfront.net/exdata%2Fdata ## %2Fhousehold_power_consumption.zip" and unzips the file if(!file.exists("exdata-data-household_power_consumption.zip")) { temp <- tempfile() download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp) file <- unzip(temp) unlink(temp) } ## Loads the file into memory and reads only the data from the dates 2007-02-01 and 2007-02-02 ## Also converts the Date and Time variables to Date/Time classes in R household_power <- read.table(file, header=T, sep=";") household_power$Date <- as.Date(household_power$Date, format="%d/%m/%Y") data <- household_power[(household_power$Date=="2007-02-01") | (household_power$Date=="2007-02-02"),] data$Global_active_power <- as.numeric(as.character(data$Global_active_power)) data$Global_reactive_power <- as.numeric(as.character(data$Global_reactive_power)) data$Voltage <- as.numeric(as.character(data$Voltage)) data <- transform(data, timestamp=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S") data$Sub_metering_1 <- as.numeric(as.character(data$Sub_metering_1)) data$Sub_metering_2 <- as.numeric(as.character(data$Sub_metering_2)) data$Sub_metering_3 <- as.numeric(as.character(data$Sub_metering_3)) ## This Constructs plot2 and saves it to a PNG file with a width of 480 pixels and a height of 480 pixels in your current working directory plot2 <- function() { plot(data$timestamp,df$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)") dev.copy(png, file="plot2.png", width=480, height=480) dev.off() } plot2()
/plot2.R
no_license
AisaacO/ExData_Plotting1
R
false
false
1,739
r
## Checks if the file exist on the working directory, if it doesn't exist, it downloads from "http://d396qusza40orc.cloudfront.net/exdata%2Fdata ## %2Fhousehold_power_consumption.zip" and unzips the file if(!file.exists("exdata-data-household_power_consumption.zip")) { temp <- tempfile() download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp) file <- unzip(temp) unlink(temp) } ## Loads the file into memory and reads only the data from the dates 2007-02-01 and 2007-02-02 ## Also converts the Date and Time variables to Date/Time classes in R household_power <- read.table(file, header=T, sep=";") household_power$Date <- as.Date(household_power$Date, format="%d/%m/%Y") data <- household_power[(household_power$Date=="2007-02-01") | (household_power$Date=="2007-02-02"),] data$Global_active_power <- as.numeric(as.character(data$Global_active_power)) data$Global_reactive_power <- as.numeric(as.character(data$Global_reactive_power)) data$Voltage <- as.numeric(as.character(data$Voltage)) data <- transform(data, timestamp=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S") data$Sub_metering_1 <- as.numeric(as.character(data$Sub_metering_1)) data$Sub_metering_2 <- as.numeric(as.character(data$Sub_metering_2)) data$Sub_metering_3 <- as.numeric(as.character(data$Sub_metering_3)) ## This Constructs plot2 and saves it to a PNG file with a width of 480 pixels and a height of 480 pixels in your current working directory plot2 <- function() { plot(data$timestamp,df$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)") dev.copy(png, file="plot2.png", width=480, height=480) dev.off() } plot2()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/normalization.R \name{normalize} \alias{normalize} \title{Normalization} \usage{ normalize(microarray) } \arguments{ \item{mocroarray}{A table containing at least M, A and name columns.} } \value{ Same table normalized and with NA removed. } \description{ Normalize mean and average. }
/man/normalize.Rd
no_license
GilaZeus/DAMN
R
false
true
364
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/normalization.R \name{normalize} \alias{normalize} \title{Normalization} \usage{ normalize(microarray) } \arguments{ \item{mocroarray}{A table containing at least M, A and name columns.} } \value{ Same table normalized and with NA removed. } \description{ Normalize mean and average. }
#### Generating figures for preliminary results library(viridis) library(cowplot) ### Symmetrical rates results.symtrans <- read.table("results_symtrans.txt", sep = "\t", header = TRUE) results.symtrans <- results.symtrans[, -dim(results.symtrans)[2]] results.symtrans <- results.symtrans[order(results.symtrans$tree.number), ] ## q01 q01.sym <- ggplot(data = results.symtrans, aes(x = q01.sim, y = q01.fit)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + ylim(0,2.5) + geom_abline(slope = 1, intercept = 0, colour = "red") + scale_color_viridis() + theme(legend.position = "bottom") + labs(x = "Simulated q01", y = "Estimated q01", colour = "Simulated Speciation Rate") q01.sym.bytreesize <- ggplot(data = results.symtrans, aes(x = ntaxa.sim, y = (q01.fit - q01.sim)/q01.sim)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + geom_hline(yintercept = 0, colour = "darkgrey", linetype = "dashed") + scale_color_viridis() + ylim(-5,10) + theme(legend.position = "bottom") + labs(x = "Number of Taxa", y = "Standardized \nFit - Sim", colour = "Simulated Speciation Rate") ## q10 q10.sym <- ggplot(data = results.symtrans, aes(x = q01.sim, y = q10.fit)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + ylim(0,2.5) + geom_abline(slope = 1, intercept = 0, colour = "red") + scale_color_viridis() + theme(legend.position = "bottom") + labs(x = "Simulated q10", y = "Estimated q10", colour = "Simulated Speciation Rate") q10.sym.bytreesize <- ggplot(data = results.symtrans, aes(x = ntaxa.sim, y = (q10.fit - q01.sim)/q01.sim)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + geom_hline(yintercept = 0, colour = "darkgrey", linetype = "dashed") + scale_color_viridis() + ylim(-5,10) + theme(legend.position = "bottom") + labs(x = "Number of Taxa", y = "Standardized \nFit - Sim", colour = "Simulated Speciation Rate") ## q10 ~ q01 q01.q10.sym <- ggplot(data = results.symtrans, aes(x = q01.fit, y = q10.fit)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + geom_abline(slope = 1, intercept = 0, colour = "red") + scale_color_viridis() + ## scale_y_log10() + ## scale_x_log10() + ylim(0,2.5) + xlim(0,2.5) + theme(legend.position = "bottom") + labs(x = "Estimated q01", y = "Estimated q10", colour = "Simulated Speciation Rate") q01.q10.diff.bytreesize <- ggplot(data = results.symtrans, aes(x = ntaxa.sim, y = q10.fit - q01.fit)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + scale_color_viridis() + ylim(-10,10) + theme(legend.position = "bottom") + labs(x = "Number of Taxa", y = "q10 - q01", colour = "Simulated Speciation Rate") ## pSpec pSpec.sym <- ggplot(data = results.symtrans, aes(x = pSpec.sim, y = pSpec.fit)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + geom_abline(slope = 1, intercept = 0, colour = "red") + scale_color_viridis() + ylim(1e-1, 10) + theme(legend.position = "bottom") + labs(x = "Simulated pSpec", y = "Estimated pSpec", colour = "Simulated Speciation Rate") pSpec.sym.bytreesize <- ggplot(data = results.symtrans, aes(x = ntaxa.sim, y = (pSpec.fit - pSpec.sim)/pSpec.sim)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + geom_hline(yintercept = 0, colour = "darkgrey", linetype = "dashed") + scale_color_viridis() + ylim(-5,10) + theme(legend.position = "bottom") + labs(x = "Number of Taxa", y = "Standardized \nFit - Sim", colour = "Simulated Speciation Rate") ### Asymmetrical rates results.asymtrans <- read.table("results_asymtrans.txt", sep = "\t", header = TRUE) results.asymtrans <- results.asymtrans[, -dim(results.asymtrans)[2]] results.asymtrans <- results.asymtrans[order(results.asymtrans$tree.number), ] ## q01 q01.asym <- ggplot(data = results.asymtrans, aes(x = q01.sim, y = q01.fit)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + ylim(0,2.5) + geom_abline(slope = 1, intercept = 0, colour = "red") + scale_color_viridis() + theme(legend.position = "bottom") + labs(x = "Simulated q01", y = "Estimated q01", colour = "Simulated Speciation Rate") q01.asym.bytreesize <- ggplot(data = results.asymtrans, aes(x = ntaxa.sim, y = (q01.fit - q01.sim)/q01.sim)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + geom_hline(yintercept = 0, colour = "darkgrey", linetype = "dashed") + scale_color_viridis() + ylim(-5,10) + theme(legend.position = "bottom") + labs(x = "Number of Taxa", y = "Standardized \nFit - Sim", colour = "Simulated Speciation Rate") ## q10 q10.asym <- ggplot(data = results.asymtrans, aes(x = q10.sim, y = q10.fit)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + ylim(0,2.5) + geom_abline(slope = 1, intercept = 0, colour = "red") + scale_color_viridis() + theme(legend.position = "bottom") + labs(x = "Simulated q10", y = "Estimated q10", colour = "Simulated Speciation Rate") q10.asym.bytreesize <- ggplot(data = results.asymtrans, aes(x = ntaxa.sim, y = (q10.fit - q10.sim)/q10.sim)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + geom_hline(yintercept = 0, colour = "darkgrey", linetype = "dashed") + scale_color_viridis() + ylim(-5,10) + theme(legend.position = "bottom") + labs(x = "Number of Taxa", y = "Standardized \nFit - Sim", colour = "Simulated Speciation Rate") ## ## q10 ~ q01 ## q01.q10 <- ## ggplot(data = results.asymtrans, aes(x = q01.fit, y = q10.fit)) + ## geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + ## geom_abline(slope = 1, intercept = 0, colour = "red") + ## scale_color_viridis() + ## ## scale_y_log10() + ## ## scale_x_log10() + ## ylim(0,2.5) + ## xlim(0,2.5) + ## theme(legend.position = "bottom") + ## labs(x = "Estimated q01", y = "Estimated q10", colour = "Simulated Speciation Rate") ## q01.q10.diff.bytreesize <- ## ggplot(data = results.asymtrans, aes(x = ntaxa.sim, y = q10.fit - q01.fit)) + ## geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + ## scale_color_viridis() + ## ylim(-10,10) + ## theme(legend.position = "bottom") + ## labs(x = "Number of Taxa", y = "q10 - q01", colour = "Simulated Speciation Rate") ## pSpec pSpec.asym <- ggplot(data = results.asymtrans, aes(x = pSpec.sim, y = pSpec.fit)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + geom_abline(slope = 1, intercept = 0, colour = "red") + scale_color_viridis() + ylim(1e-1, 10) + theme(legend.position = "bottom") + labs(x = "Simulated pSpec", y = "Estimated pSpec", colour = "Simulated Speciation Rate") pSpec.asym.bytreesize <- ggplot(data = results.asymtrans, aes(x = ntaxa.sim, y = (pSpec.fit - pSpec.sim)/pSpec.sim)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + geom_hline(yintercept = 0, colour = "darkgrey", linetype = "dashed") + scale_color_viridis() + ylim(-5,10) + theme(legend.position = "bottom") + labs(x = "Number of Taxa", y = "Standardized \nFit - Sim", colour = "Simulated Speciation Rate") ## Legends grobs <- ggplotGrob(q01.sym)$grobs legend.sym <- grobs[[which(sapply(grobs, function(x) x$name) == "guide-box")]] grobs <- ggplotGrob(q01.asym)$grobs legend.asym <- grobs[[which(sapply(grobs, function(x) x$name) == "guide-box")]]
/figs.R
no_license
lukejharmon/netphy
R
false
false
7,570
r
#### Generating figures for preliminary results library(viridis) library(cowplot) ### Symmetrical rates results.symtrans <- read.table("results_symtrans.txt", sep = "\t", header = TRUE) results.symtrans <- results.symtrans[, -dim(results.symtrans)[2]] results.symtrans <- results.symtrans[order(results.symtrans$tree.number), ] ## q01 q01.sym <- ggplot(data = results.symtrans, aes(x = q01.sim, y = q01.fit)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + ylim(0,2.5) + geom_abline(slope = 1, intercept = 0, colour = "red") + scale_color_viridis() + theme(legend.position = "bottom") + labs(x = "Simulated q01", y = "Estimated q01", colour = "Simulated Speciation Rate") q01.sym.bytreesize <- ggplot(data = results.symtrans, aes(x = ntaxa.sim, y = (q01.fit - q01.sim)/q01.sim)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + geom_hline(yintercept = 0, colour = "darkgrey", linetype = "dashed") + scale_color_viridis() + ylim(-5,10) + theme(legend.position = "bottom") + labs(x = "Number of Taxa", y = "Standardized \nFit - Sim", colour = "Simulated Speciation Rate") ## q10 q10.sym <- ggplot(data = results.symtrans, aes(x = q01.sim, y = q10.fit)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + ylim(0,2.5) + geom_abline(slope = 1, intercept = 0, colour = "red") + scale_color_viridis() + theme(legend.position = "bottom") + labs(x = "Simulated q10", y = "Estimated q10", colour = "Simulated Speciation Rate") q10.sym.bytreesize <- ggplot(data = results.symtrans, aes(x = ntaxa.sim, y = (q10.fit - q01.sim)/q01.sim)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + geom_hline(yintercept = 0, colour = "darkgrey", linetype = "dashed") + scale_color_viridis() + ylim(-5,10) + theme(legend.position = "bottom") + labs(x = "Number of Taxa", y = "Standardized \nFit - Sim", colour = "Simulated Speciation Rate") ## q10 ~ q01 q01.q10.sym <- ggplot(data = results.symtrans, aes(x = q01.fit, y = q10.fit)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + geom_abline(slope = 1, intercept = 0, colour = "red") + scale_color_viridis() + ## scale_y_log10() + ## scale_x_log10() + ylim(0,2.5) + xlim(0,2.5) + theme(legend.position = "bottom") + labs(x = "Estimated q01", y = "Estimated q10", colour = "Simulated Speciation Rate") q01.q10.diff.bytreesize <- ggplot(data = results.symtrans, aes(x = ntaxa.sim, y = q10.fit - q01.fit)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + scale_color_viridis() + ylim(-10,10) + theme(legend.position = "bottom") + labs(x = "Number of Taxa", y = "q10 - q01", colour = "Simulated Speciation Rate") ## pSpec pSpec.sym <- ggplot(data = results.symtrans, aes(x = pSpec.sim, y = pSpec.fit)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + geom_abline(slope = 1, intercept = 0, colour = "red") + scale_color_viridis() + ylim(1e-1, 10) + theme(legend.position = "bottom") + labs(x = "Simulated pSpec", y = "Estimated pSpec", colour = "Simulated Speciation Rate") pSpec.sym.bytreesize <- ggplot(data = results.symtrans, aes(x = ntaxa.sim, y = (pSpec.fit - pSpec.sim)/pSpec.sim)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + geom_hline(yintercept = 0, colour = "darkgrey", linetype = "dashed") + scale_color_viridis() + ylim(-5,10) + theme(legend.position = "bottom") + labs(x = "Number of Taxa", y = "Standardized \nFit - Sim", colour = "Simulated Speciation Rate") ### Asymmetrical rates results.asymtrans <- read.table("results_asymtrans.txt", sep = "\t", header = TRUE) results.asymtrans <- results.asymtrans[, -dim(results.asymtrans)[2]] results.asymtrans <- results.asymtrans[order(results.asymtrans$tree.number), ] ## q01 q01.asym <- ggplot(data = results.asymtrans, aes(x = q01.sim, y = q01.fit)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + ylim(0,2.5) + geom_abline(slope = 1, intercept = 0, colour = "red") + scale_color_viridis() + theme(legend.position = "bottom") + labs(x = "Simulated q01", y = "Estimated q01", colour = "Simulated Speciation Rate") q01.asym.bytreesize <- ggplot(data = results.asymtrans, aes(x = ntaxa.sim, y = (q01.fit - q01.sim)/q01.sim)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + geom_hline(yintercept = 0, colour = "darkgrey", linetype = "dashed") + scale_color_viridis() + ylim(-5,10) + theme(legend.position = "bottom") + labs(x = "Number of Taxa", y = "Standardized \nFit - Sim", colour = "Simulated Speciation Rate") ## q10 q10.asym <- ggplot(data = results.asymtrans, aes(x = q10.sim, y = q10.fit)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + ylim(0,2.5) + geom_abline(slope = 1, intercept = 0, colour = "red") + scale_color_viridis() + theme(legend.position = "bottom") + labs(x = "Simulated q10", y = "Estimated q10", colour = "Simulated Speciation Rate") q10.asym.bytreesize <- ggplot(data = results.asymtrans, aes(x = ntaxa.sim, y = (q10.fit - q10.sim)/q10.sim)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + geom_hline(yintercept = 0, colour = "darkgrey", linetype = "dashed") + scale_color_viridis() + ylim(-5,10) + theme(legend.position = "bottom") + labs(x = "Number of Taxa", y = "Standardized \nFit - Sim", colour = "Simulated Speciation Rate") ## ## q10 ~ q01 ## q01.q10 <- ## ggplot(data = results.asymtrans, aes(x = q01.fit, y = q10.fit)) + ## geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + ## geom_abline(slope = 1, intercept = 0, colour = "red") + ## scale_color_viridis() + ## ## scale_y_log10() + ## ## scale_x_log10() + ## ylim(0,2.5) + ## xlim(0,2.5) + ## theme(legend.position = "bottom") + ## labs(x = "Estimated q01", y = "Estimated q10", colour = "Simulated Speciation Rate") ## q01.q10.diff.bytreesize <- ## ggplot(data = results.asymtrans, aes(x = ntaxa.sim, y = q10.fit - q01.fit)) + ## geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + ## scale_color_viridis() + ## ylim(-10,10) + ## theme(legend.position = "bottom") + ## labs(x = "Number of Taxa", y = "q10 - q01", colour = "Simulated Speciation Rate") ## pSpec pSpec.asym <- ggplot(data = results.asymtrans, aes(x = pSpec.sim, y = pSpec.fit)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + geom_abline(slope = 1, intercept = 0, colour = "red") + scale_color_viridis() + ylim(1e-1, 10) + theme(legend.position = "bottom") + labs(x = "Simulated pSpec", y = "Estimated pSpec", colour = "Simulated Speciation Rate") pSpec.asym.bytreesize <- ggplot(data = results.asymtrans, aes(x = ntaxa.sim, y = (pSpec.fit - pSpec.sim)/pSpec.sim)) + geom_point(aes(colour = lambda.sim), size = 2, alpha = 0.5) + geom_hline(yintercept = 0, colour = "darkgrey", linetype = "dashed") + scale_color_viridis() + ylim(-5,10) + theme(legend.position = "bottom") + labs(x = "Number of Taxa", y = "Standardized \nFit - Sim", colour = "Simulated Speciation Rate") ## Legends grobs <- ggplotGrob(q01.sym)$grobs legend.sym <- grobs[[which(sapply(grobs, function(x) x$name) == "guide-box")]] grobs <- ggplotGrob(q01.asym)$grobs legend.asym <- grobs[[which(sapply(grobs, function(x) x$name) == "guide-box")]]
#importing data and packages load("matched_dataset.RData") #for those downloading the code, you should use load("data_to_be_shared.RData") instead pacman::p_load(tidyverse, sjPlot, cowplot, did, lmerTest, ggpubr, interplot,mediation, effectsize,esc, vioplot) #https://cran.r-project.org/web/packages/interplot/vignettes/interplot-vignette.html detach("package:dplyr", unload = TRUE) library(dplyr) set.seed(5030) ################################################################## ######## ANALYSIS STEP 1 - DIFFERENCE IN DIFFERENCE ############## ################################################################## ##FULL PUBLICATIONS## did_model_pfull <- att_gt(yname = "p_full_yearsum", gname = "moving_year_plus1", idname = "cluster_id", tname = "career_year_plus_1", xformla = ~1, #this package doesn't provide functionality to assess impact of covariates on outcomes. data = matched_dataset, est_method = "dr", control_group = "nevertreated", anticipation = 1, allow_unbalanced_panel = T) did_model_pfull_dynamic_short <- aggte(did_model_pfull, type = "dynamic", min_e = -5, max_e = 2) summary(did_model_pfull_dynamic_short) p_full_did_plot <- ggdid(did_model_pfull_dynamic_short, xlab = "Years from move", ylab = "Treatment effect", title = "Publications (year sum)") esc_B(b = did_model_pfull_dynamic_short$overall.att, sdy = sd(matched_dataset$p_full_yearsum),grp1n = did_model_pfull_dynamic_short$DIDparams$n/2, grp2n = did_model_pfull_dynamic_short$DIDparams$n/2,es.type = c("d")) ## NORMALISED CITATION SCORE ## did_model_ncs_full_yearmean <- att_gt(yname = "ncs_full_mean", gname = "moving_year_plus1", idname = "cluster_id", tname = "career_year_plus_1", xformla = ~1, data = matched_dataset, est_method = "dr", control_group = "nevertreated", anticipation = 1, allow_unbalanced_panel = T) #will give an error about missing data (because people don't always publish in a given year...) did_model_ncs_full_dynamic_short <- aggte(did_model_ncs_full_yearmean, type = "dynamic", min_e = -5, max_e = 2) summary(did_model_ncs_full_dynamic_short) ncs_full_did_plot <- ggdid(did_model_ncs_full_dynamic_short, xlab = "Years from move", ylab = "Treatment effect", title = "Citation score (year mean)") esc_B(b = did_model_ncs_full_dynamic_short$overall.att, sdy = sd(matched_dataset$ncs_full_mean, na.rm=T),grp1n = did_model_ncs_full_dynamic_short$DIDparams$n/2, grp2n = did_model_ncs_full_dynamic_short$DIDparams$n/2,es.type = c("d")) ## NORMALISED JOURNAL SCORE ## did_model_njs_full<- att_gt(yname = "njs_full_mean", gname = "moving_year_plus1", idname = "cluster_id", tname = "career_year_plus_1", xformla = ~1, data = matched_dataset, est_method = "dr", control_group = "nevertreated", anticipation = 1, allow_unbalanced_panel = T) #will give an error about missing data (because people don't always publish in a given year...) did_model_njs_full_dynamic_short <- aggte(did_model_njs_full, type = "dynamic", min_e = -5, max_e = 2) summary(did_model_njs_full_dynamic_short) njs_full_did_plot <- ggdid(did_model_njs_full_dynamic_short, xlab = "Years from move", ylab = "Treatment effect", title = " Journal score (year mean)") esc_B(b = did_model_njs_full_dynamic_short$overall.att, sdy = sd(matched_dataset$njs_full_mean, na.rm=T),grp1n = did_model_njs_full_dynamic_short$DIDparams$n/2, grp2n = did_model_njs_full_dynamic_short$DIDparams$n/2,es.type = c("d")) ## NUMBER OF TOP JOURNALS (njs >2 ) ## did_model_njs_topjournals <- att_gt(yname = "njs_full_over2_yearsum", gname = "moving_year_plus1", idname = "cluster_id", tname = "career_year_plus_1", xformla = ~1, data = matched_dataset, est_method = "dr", control_group = "nevertreated", anticipation = 1, allow_unbalanced_panel = T) #will give an error about missing data (because people don't always publish in a given year...) did_model_njs_topjournals_dynamic_short <- aggte(did_model_njs_topjournals, type = "dynamic", min_e = -5, max_e = 2) summary(did_model_njs_topjournals_dynamic_short) njs_topjournals_did_plot <- ggdid(did_model_njs_topjournals_dynamic_short, xlab = "Years from move", ylab = "Treatment effect", title = "Top journal publications (year sum)") esc_B(b = did_model_njs_topjournals_dynamic_short$overall.att, sdy = sd(matched_dataset$njs_full_over2_yearsum, na.rm=T),grp1n = did_model_njs_topjournals_dynamic_short$DIDparams$n/2, grp2n = did_model_njs_topjournals_dynamic_short$DIDparams$n/2,es.type = c("d")) ## NUMBER OF TOP CITED PAPERS ## did_model_p_top_prop10_full<- att_gt(yname = "p_top_prop10_full_yearsum", gname = "moving_year_plus1", idname = "cluster_id", tname = "career_year_plus_1", xformla = ~1, data = matched_dataset, est_method = "dr", control_group = "notyettreated", anticipation = 1, allow_unbalanced_panel = T) #i think this should account for the fact that some people have quit science (and we therefore don't have full data for them) did_model_p_top_prop10_full_dynamic_short <- aggte(did_model_p_top_prop10_full, type = "dynamic", min_e = -5, max_e = 2) summary(did_model_p_top_prop10_full_dynamic_short) pp10_full_did_plot <- ggdid(did_model_p_top_prop10_full_dynamic_short, xlab = "Years from move", ylab = "Treatment effect", title = "Top cited publications (year sum)") esc_B(b = did_model_p_top_prop10_full_dynamic_short$overall.att, sdy = sd(matched_dataset$p_top_prop10_full_yearsum, na.rm=T),grp1n = did_model_p_top_prop10_full_dynamic_short$DIDparams$n/2, grp2n = did_model_p_top_prop10_full_dynamic_short$DIDparams$n/2,es.type = c("d")) ## MAKING A FIGURE OF THE DIFFERENCE-IN-DIFFERENCE RESULTS ## did_plot_grid <- ggarrange(p_full_did_plot, ncs_full_did_plot, njs_full_did_plot, njs_topjournals_did_plot, pp10_full_did_plot, common.legend = T, legend = "bottom", ncol=2,nrow=3, labels = "AUTO",hjust=-2) ggexport(did_plot_grid, filename = "plots/Fig2. DID.pdf") ## MAKING TABLES OF THE DIFFERENCE-IN-DIFFERENCE RESULTS ## # First a table of the relative increases in performance ATTs <- c(did_model_pfull_dynamic_short$overall.att, did_model_ncs_full_dynamic_short$overall.att, did_model_njs_full_dynamic_short$overall.att, did_model_njs_topjournals_dynamic_short$overall.att, did_model_p_top_prop10_full_dynamic_short$overall.att) table2 <- matched_dataset %>% #calculating relative increase filter(years_from_obtaining_usa_affilation >= 0 & years_from_obtaining_usa_affilation <= 2, condition_numeric == 0) %>% summarise(mean_p_full = mean(p_full_yearsum), mean_ncs_full = mean(ncs_full_mean, na.rm=T), mean_njs_full = mean(njs_full_mean, na.rm=T), mean_njs_topjournals = mean(njs_full_over2_yearsum, na.rm=T), mean_p_top_prop10_full = mean(p_top_prop10_full_yearsum)) %>% pivot_longer(everything(),names_to = "bibliometric_measure", values_to = "mean") %>% mutate(ATT = ATTs, percentage_increase = (ATT/mean)*100) %>% mutate(mean = round(mean,2), ATT = round(ATT, 2), percentage_increase = round(percentage_increase, 2)) write.csv(table2, "tables/table1. att.csv") #Then tables of the individual difference-in-difference models tidy(did_model_pfull_dynamic_short) %>% mutate(across(estimate:point.conf.high, round,2)) %>% write.csv("tables/S2. DID pfull.csv") tidy(did_model_ncs_full_dynamic_short) %>% mutate(across(estimate:point.conf.high, round,2)) %>% write.csv("tables/S3. DID ncs.csv") tidy(did_model_njs_full_dynamic_short) %>% mutate(across(estimate:point.conf.high, round,2)) %>% write.csv("tables/S4. DID njs.csv") tidy(did_model_njs_topjournals_dynamic_short) %>% mutate(across(estimate:point.conf.high, round,2)) %>% write.csv("tables/S5. DID topjournals.csv") tidy(did_model_p_top_prop10_full_dynamic_short) %>% mutate(across(estimate:point.conf.high, round,2)) %>% write.csv("tables/S6. DID pptop10.csv") ############################################################################################################ ### ANALYSIS STEP 2. MODERATION ############################################################################ # here we take only movers and interact 2 variables: "moving" with "difference in rank from origin to usa" # ############################################################################################################ # step 2a. first i make two new datasets, one that contains complete data those who we know the origin and usa QS ranking, and one where we know the origin and usa Leiden ranking. diffindiff_data_only_movers_qs_diff <- matched_dataset %>% #the QS ranking moderation dataset filter(years_from_obtaining_usa_affilation >= -2, years_from_obtaining_usa_affilation <= 2, condition_numeric == 1, !is.na(gelman_difference_in_qs_overall_score)) %>% mutate(gelman_difference_in_qs_overall_score_zeropremove = if_else(post_move == 0, 0, gelman_difference_in_qs_overall_score), gelman_origin_qs_overall_score_mean = origin_qs_overall_score_mean/(2*sd(origin_qs_overall_score_mean, na.rm = T))) diffindiff_data_only_movers_leiden_diff <- matched_dataset %>% #the Leiden ranking moderation dataset filter(years_from_obtaining_usa_affilation >= -2, years_from_obtaining_usa_affilation <= 2, condition_numeric == 1, !is.na(gelman_difference_in_pptop10)) %>% mutate(gelman_difference_in_pptop10_zeropremove = if_else(post_move == 0, 0, gelman_difference_in_pptop10), gelman_origin_pp_top10_mean = origin_pp_top10_mean/(2*sd(origin_pp_top10_mean, na.rm = T))) # step 2b. Running the moderation analysis + making individual plots # Publications pfull_qs <- lmer(p_full_yearsum ~ gelman_difference_in_qs_overall_score:post_move + post_move + career_year + gelman_origin_qs_overall_score_mean + (1|cluster_id), data = diffindiff_data_only_movers_qs_diff); summary(pfull_qs); plot_model(pfull_qs, type = "int") pfull_pptop10 <- lmer(p_full_yearsum ~ gelman_difference_in_pptop10:post_move + post_move + career_year + gelman_origin_pp_top10_mean + (1|cluster_id), data = diffindiff_data_only_movers_leiden_diff); summary(pfull_pptop10); plot_model(pfull_pptop10, type = "int", show.values = T) pfull_qs_moderation_plot <- interplot(m = pfull_qs, var1 = "post_move", var2 = "gelman_difference_in_qs_overall_score") + theme_bw() + theme(plot.title = element_text(size = 10), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limits = c(0.25,2), expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0.5,0,0.5, 1),labels=c("-2", "-1", "0", "1", "2")) + geom_hline(yintercept=0, linetype="dotted") pfull_pptop10_moderation_plot <- interplot(m = pfull_pptop10, var1 = "post_move", var2 = "gelman_difference_in_pptop10") + theme_bw() + theme(plot.title = element_text(size = 10), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limits = c(0.25,2), expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0,1, 2),labels=c("-2", "0", "2", "4")) + geom_hline(yintercept=0, linetype="dotted") #Normalised citation score ncs_full_qs <- lmer(ncs_full_mean ~gelman_difference_in_qs_overall_score:post_move + post_move +career_year + gelman_origin_qs_overall_score_mean + (1|cluster_id), data = diffindiff_data_only_movers_qs_diff); summary(ncs_full_qs); plot_model(ncs_full_qs, type = "int") ncs_full_pptop10 <- lmer(ncs_full_mean ~gelman_difference_in_pptop10:post_move + post_move + career_year + gelman_origin_pp_top10_mean + (1|cluster_id), data = diffindiff_data_only_movers_leiden_diff); summary(ncs_full_pptop10); plot_model(ncs_full_pptop10, type = "int", show.values = T) ncs_full_qs_moderation_plot <- interplot(m = ncs_full_qs, var1 = "post_move", var2 = "gelman_difference_in_qs_overall_score") + theme_bw() + theme(plot.title = element_text(size = 10), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limits = c(-1.3, 2.7), expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0.5,0,0.5, 1),labels=c("-2", "-1", "0", "1", "2")) + geom_hline(yintercept=0, linetype="dotted") ncs_full_pptop10_moderation_plot <- interplot(m = ncs_full_pptop10, var1 = "post_move", var2 = "gelman_difference_in_pptop10") + theme_bw() + theme(plot.title = element_text(size = 10), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limits = c(-1.3, 2.7), expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0,1, 2),labels=c("-2", "0", "2", "4")) + geom_hline(yintercept=0, linetype="dotted") #normalised Journal score njs_full_qs <- lmer(njs_full_mean ~gelman_difference_in_qs_overall_score:post_move + post_move +career_year + gelman_origin_qs_overall_score_mean + (1|cluster_id), data = diffindiff_data_only_movers_qs_diff); summary(njs_full_qs); plot_model(njs_full_qs, type = "int") njs_full_pptop10 <- lmer(njs_full_mean ~gelman_difference_in_pptop10:post_move + post_move + career_year + gelman_origin_pp_top10_mean + (1|cluster_id), data = diffindiff_data_only_movers_leiden_diff); summary(njs_full_pptop10); plot_model(njs_full_pptop10, type = "int") njs_full_qs_moderation_plot <- interplot(m = njs_full_qs, var1 = "post_move", var2 = "gelman_difference_in_qs_overall_score") + theme_bw() + theme(plot.title = element_text(size = 10), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limits =c(-0.8, 1.7),expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0.5,0,0.5, 1),labels=c("-2", "-1", "0", "1", "2")) + geom_hline(yintercept=0, linetype="dotted") njs_full_pptop10_moderation_plot <- interplot(m = njs_full_pptop10, var1 = "post_move", var2 = "gelman_difference_in_pptop10") + theme_bw() + theme(plot.title = element_text(size = 10), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limits =c(-0.8, 1.7),expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0,1, 2),labels=c("-2", "0", "2", "4")) + geom_hline(yintercept=0, linetype="dotted") #Top journals njs_topjournals_qs <- lmer(njs_full_over2_yearsum ~gelman_difference_in_qs_overall_score:post_move + post_move +career_year + gelman_origin_qs_overall_score_mean + (1|cluster_id), data = diffindiff_data_only_movers_qs_diff); summary(njs_topjournals_qs); plot_model(njs_topjournals_qs, type = "int"); omega_squared(njs_topjournals_qs) njs_topjournals_pptop10 <- lmer(njs_full_over2_yearsum ~gelman_difference_in_pptop10:post_move + post_move + career_year + gelman_origin_pp_top10_mean + (1|cluster_id), data = diffindiff_data_only_movers_leiden_diff); summary(njs_topjournals_pptop10); plot_model(njs_topjournals_pptop10, type = "int"); omega_squared(njs_topjournals_pptop10) njs_topjournals_qs_moderation_plot <- interplot(m = njs_topjournals_qs, var1 = "post_move", var2 = "gelman_difference_in_qs_overall_score") + theme_bw() + theme(plot.title = element_text(size = 10), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limits = c(-0.4,1.1), expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0.5,0,0.5, 1),labels=c("-2", "-1", "0", "1", "2")) + geom_hline(yintercept=0, linetype="dotted") njs_topjournals_pptop10_moderation_plot <- interplot(m = njs_topjournals_pptop10, var1 = "post_move", var2 = "gelman_difference_in_pptop10") + theme_bw() + theme(plot.title = element_text(size = 10), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limits = c(-0.4,1.1), expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0,1, 2),labels=c("-2", "0", "2", "4")) + geom_hline(yintercept=0, linetype="dotted") # Top cited papers p_top_prop10_full_qs <- lmer(p_top_prop10_full_yearsum ~gelman_difference_in_qs_overall_score:post_move + post_move +career_year + gelman_origin_qs_overall_score_mean + (1|cluster_id), data = diffindiff_data_only_movers_qs_diff); summary(p_top_prop10_full_qs); plot_model(p_top_prop10_full_qs, type = "int"); omega_squared(p_top_prop10_full_qs) p_top_prop10_full_pptop10 <-lmer(p_top_prop10_full_yearsum ~gelman_difference_in_pptop10:post_move + post_move + career_year + gelman_origin_pp_top10_mean + (1|cluster_id), data = diffindiff_data_only_movers_leiden_diff); summary(p_top_prop10_full_pptop10); plot_model(p_top_prop10_full_pptop10, type = "int"); omega_squared(p_top_prop10_full_pptop10) p_top_prop10_full_qs_moderation_plot <- interplot(m = p_top_prop10_full_qs, var1 = "post_move", var2 = "gelman_difference_in_qs_overall_score") + theme_bw() + theme(plot.title = element_text(size = 10), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limits = c(-0.25,0.85), expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0.5,0,0.5, 1),labels=c("-2", "-1", "0", "1", "2")) + geom_hline(yintercept=0, linetype="dotted") p_top_prop10_full_pptop10_moderation_plot <- interplot(m = p_top_prop10_full_pptop10, var1 = "post_move", var2 = "gelman_difference_in_pptop10") + theme_bw() + theme(plot.title = element_text(size = 10), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limits = c(-0.25,0.85),expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0,1, 2),labels=c("-2", "0", "2", "4")) + geom_hline(yintercept=0, linetype="dotted") #step 2c. Making a panel plot that contains all of the moderation analyses + histograms of the raw ranking_difference data qs_difference_plot <- ggplot(diffindiff_data_only_movers_qs_diff %>% distinct(cluster_id, .keep_all = T), aes(x=gelman_difference_in_qs_overall_score)) + #Histogram of difference in QS rankings geom_histogram()+ theme_bw() + theme(plot.title = element_text(size = 10), axis.title.x=element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limit = c(0,125), expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0.5,0,0.5, 1),labels=c("-2", "-1", "0", "1", "2")) leiden_difference_plot <- ggplot(diffindiff_data_only_movers_leiden_diff %>% distinct(cluster_id, .keep_all = T), aes(x = gelman_difference_in_pptop10)) + #histogram of difference in Leiden rankings geom_histogram() + theme_bw() + theme(plot.title = element_text(size = 10), axis.title.x=element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limit = c(0,125), expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0,1, 2),labels=c("-2", "0", "2", "4")) interaction_plots_left <- ggarrange(pfull_qs_moderation_plot, #making the left half of the panel plot ncs_full_qs_moderation_plot, njs_full_qs_moderation_plot, njs_topjournals_qs_moderation_plot, p_top_prop10_full_qs_moderation_plot, qs_difference_plot, common.legend = T, legend = "bottom", ncol=1,nrow=6, labels = c("a. Publications", "b. Citation Score", "c. Journal Score", "d. Top journal publications", "e. Top cited publications", "f. Ranking change"), hjust = -0.1, vjust = -0.1, align = "hv", font.label = list(size = 10, color = "black", face = "bold.italic", family = NULL)) %>% annotate_figure(top = text_grob("QS", face = "bold")) interaction_plots_right <- ggarrange(pfull_pptop10_moderation_plot, #making the right half of the panel plot ncs_full_pptop10_moderation_plot, njs_full_pptop10_moderation_plot, njs_topjournals_pptop10_moderation_plot, p_top_prop10_full_pptop10_moderation_plot, leiden_difference_plot, common.legend = T, legend = "bottom", ncol=1,nrow=6, align = "v") %>% annotate_figure(top = text_grob("Leiden", face="bold")) moderation_plot_grid <- # this makes the final panel plot of the moderation analyses ggarrange(interaction_plots_left, interaction_plots_right) %>% annotate_figure(left = text_grob("Count Estimated Coefficient for effect of moving to USA",rot = 90, size = 10, hjust = .56), bottom = text_grob("Standard deviation change in ranking score (Positive = USA higher ranked)", size = 10)) ggexport(moderation_plot_grid, filename = "plots/Fig3. Moderation.pdf") #saving the plot # step 2d. Creating tables of the modeation results tab_model(pfull_qs, #table of QS ranking moderation ncs_full_qs, njs_full_qs, njs_topjournals_qs, p_top_prop10_full_qs, show.p = F, show.re.var =F, file = "tables/table2 moderation qs.doc") tab_model(pfull_pptop10, #table of leiden ranking moderation ncs_full_pptop10, njs_full_pptop10, njs_topjournals_pptop10, p_top_prop10_full_pptop10, show.p = F, show.re.var =F, file = "tables/table3 moderation leiden.doc") ################################################################### ############ ANALYSIS STEP 3. MEDIATION ########################## ################################################################## #Step 3a. running the mediation analysis# #### QS RANKING ###### # p full set.seed(5030) detach("package:lmerTest", unload = T) pfull_qs_med.fit <- lmer(gelman_difference_in_qs_overall_score_zeropremove ~ post_move + career_year + origin_qs_overall_score_mean+ (1|cluster_id), data = diffindiff_data_only_movers_qs_diff) pfull_qs_out.fit <- lmer(p_full_yearsum ~ gelman_difference_in_qs_overall_score_zeropremove + post_move + career_year + origin_qs_overall_score_mean+ (1|cluster_id), data = diffindiff_data_only_movers_qs_diff) pfull_qs_med.out <- mediate(pfull_qs_med.fit, pfull_qs_out.fit, treat = "post_move", mediator = "gelman_difference_in_qs_overall_score_zeropremove", robustSE = F, sims = 1000) med_qs_pubs <- summary(pfull_qs_med.out) # citation score ncs_qs_mediation_data <- diffindiff_data_only_movers_qs_diff %>% filter(!is.na(ncs_full_mean)) ncs_qs_med.fit <- lmer(gelman_difference_in_qs_overall_score_zeropremove ~ post_move + career_year + origin_qs_overall_score_mean + (1|cluster_id), data = ncs_qs_mediation_data) ncs_qs_out.fit <- lmer(ncs_full_mean ~ gelman_difference_in_qs_overall_score_zeropremove + post_move + career_year + origin_qs_overall_score_mean+ (1|cluster_id), data = ncs_qs_mediation_data) ncs_qs_med.out <- mediate(ncs_qs_out.fit, ncs_qs_out.fit, treat = "post_move", mediator = "gelman_difference_in_qs_overall_score_zeropremove", robustSE = F, sims = 1000) med_qs_ncs <- summary(ncs_qs_med.out) # journal score njs_qs_mediation_data <- diffindiff_data_only_movers_qs_diff %>% filter(!is.na(njs_full_mean)) njs_qs_med.fit <- lmer(gelman_difference_in_qs_overall_score_zeropremove ~ post_move + career_year + origin_qs_overall_score_mean+ (1|cluster_id), data = njs_qs_mediation_data) njs_qs_out.fit <- lmer(njs_full_mean ~ gelman_difference_in_qs_overall_score_zeropremove + post_move + career_year + origin_qs_overall_score_mean+ (1|cluster_id), data = njs_qs_mediation_data) njs_qs_med.out <- mediate(njs_qs_med.fit, njs_qs_out.fit, treat = "post_move", mediator = "gelman_difference_in_qs_overall_score_zeropremove", robustSE = F, sims = 1000) med_qs_njs <- summary(njs_qs_med.out) # top journals topjoural_qs_med.fit <- lmer(gelman_difference_in_qs_overall_score_zeropremove ~ post_move + career_year + origin_qs_overall_score_mean+ (1|cluster_id), data = diffindiff_data_only_movers_qs_diff) topjournal_qs_out.fit <- lmer(njs_full_over2_yearsum ~ gelman_difference_in_qs_overall_score_zeropremove + post_move + career_year + origin_qs_overall_score_mean+ (1|cluster_id), data = diffindiff_data_only_movers_qs_diff) topjournal_qs_med.out <- mediate(topjoural_qs_med.fit, topjournal_qs_out.fit, treat = "post_move", mediator = "gelman_difference_in_qs_overall_score_zeropremove", robustSE = F, sims = 1000) med_qs_topjournals <- summary(topjournal_qs_med.out) # top 10% top10_qs_med.fit <- lmer(gelman_difference_in_qs_overall_score_zeropremove ~ post_move + career_year + origin_qs_overall_score_mean+ (1|cluster_id), data = diffindiff_data_only_movers_qs_diff) top10_qs_out.fit <- lmer(p_top_prop10_full_yearsum ~ gelman_difference_in_qs_overall_score_zeropremove + post_move + career_year + origin_qs_overall_score_mean +(1|cluster_id), data = diffindiff_data_only_movers_qs_diff) top10_qs_med.out <- mediate(top10_qs_med.fit, top10_qs_out.fit, treat = "post_move", mediator = "gelman_difference_in_qs_overall_score_zeropremove", robustSE = F, sims = 1000) med_qs_pptop10 <- summary(top10_qs_med.out) ##### LEIDEN RANKING ###### # p full pfull_med.fit <- lmer(gelman_difference_in_pptop10_zeropremove ~ post_move + career_year + origin_pp_top10_mean+ (1|cluster_id), data = diffindiff_data_only_movers_leiden_diff) pfull_out.fit <- lmer(p_full_yearsum ~ gelman_difference_in_pptop10_zeropremove + post_move + career_year + origin_pp_top10_mean+ (1|cluster_id), data = diffindiff_data_only_movers_leiden_diff) pfull_med.out <- mediate(pfull_med.fit, pfull_out.fit, treat = "post_move", mediator = "gelman_difference_in_pptop10_zeropremove", robustSE = T, sims = 1000) med_leiden_pubs <- summary(pfull_med.out) # citation score ncs_mediation_data <- diffindiff_data_only_movers_leiden_diff %>% filter(!is.na(ncs_full_mean)) ncs_med.fit <- lmer(gelman_difference_in_pptop10_zeropremove ~ post_move + career_year + origin_pp_top10_mean + (1|cluster_id), data = ncs_mediation_data) ncs_out.fit <- lmer(ncs_full_mean ~ gelman_difference_in_pptop10_zeropremove + post_move + career_year + origin_pp_top10_mean+ (1|cluster_id), data = ncs_mediation_data) ncs_med.out <- mediate(ncs_med.fit, ncs_out.fit, treat = "post_move", mediator = "gelman_difference_in_pptop10_zeropremove", robustSE = F, sims = 1000) med_leiden_ncs <- summary(ncs_med.out) # journal score njs_mediation_data <- diffindiff_data_only_movers_leiden_diff %>% filter(!is.na(njs_full_mean)) njs_med.fit <- lmer(gelman_difference_in_pptop10_zeropremove ~ post_move + career_year + origin_pp_top10_mean+ (1|cluster_id), data = njs_mediation_data) njs_out.fit <- lmer(njs_full_mean ~ gelman_difference_in_pptop10_zeropremove + post_move + career_year + origin_pp_top10_mean+ (1|cluster_id), data = njs_mediation_data) njs_med.out <- mediate(njs_med.fit, njs_out.fit, treat = "post_move", mediator = "gelman_difference_in_pptop10_zeropremove", robustSE = F, sims = 1000) med_leiden_njs <- summary(njs_med.out) # top journals topjoural_med.fit <- lmer(gelman_difference_in_pptop10_zeropremove ~ post_move + career_year + origin_pp_top10_mean+ (1|cluster_id), data = diffindiff_data_only_movers_leiden_diff) topjournal_out.fit <- lmer(njs_full_over2_yearsum ~ gelman_difference_in_pptop10_zeropremove + post_move + career_year + origin_pp_top10_mean+ (1|cluster_id), data = diffindiff_data_only_movers_leiden_diff) topjournal_med.out <- mediate(topjoural_med.fit, topjournal_out.fit, treat = "post_move", mediator = "gelman_difference_in_pptop10_zeropremove", robustSE = F, sims = 1000) med_leiden_topjournals <- summary(topjournal_med.out) # top 10% top10_med.fit <- lmer(gelman_difference_in_pptop10_zeropremove ~ post_move + career_year + origin_pp_top10_mean+ (1|cluster_id), data = diffindiff_data_only_movers_leiden_diff) top10_out.fit <- lmer(p_top_prop10_full_yearsum ~ gelman_difference_in_pptop10_zeropremove + post_move + career_year + origin_pp_top10_mean +(1|cluster_id), data = diffindiff_data_only_movers_leiden_diff) top10_med.out <- mediate(top10_med.fit, top10_out.fit, treat = "post_move", mediator = "gelman_difference_in_pptop10_zeropremove", robustSE = F, sims = 1000) med_leiden_pptop10 <- summary(top10_med.out) #step 3b creating a table with the mediation analysis results# mediation_table <- c("", "", "publications","", "", "ncs","", "", "njs","", "", "top journals", "","", "top cited", "","") %>% rbind(c("ranking", "mediation", "estimate", "CI_l","CI_U", "estimate", "CI_l","CI_U", "estimate", "CI_l","CI_U", "estimate", "CI_l","CI_U", "estimate", "CI_l","CI_U"))%>% rbind(c("QS", "ACME", round(med_qs_pubs$d0, 2), paste(round(med_qs_pubs$d0.ci,2)), round(med_qs_ncs$d0,2), paste(round(med_qs_ncs$d0.ci,2)),round(med_qs_njs$d0,2), paste(round(med_qs_njs$d0.ci,2)),round(med_qs_topjournals$d0,2), paste(round(med_qs_topjournals$d0.ci,2)),round(med_qs_pptop10$d0,2), paste(round(med_qs_pptop10$d0.ci,2)))) %>% rbind(c("QS", "ADE", round(med_qs_pubs$z0, 2), paste(round(med_qs_pubs$z0.ci,2)), round(med_qs_ncs$z0,2), paste(round(med_qs_ncs$z0.ci,2)),round(med_qs_njs$z0,2), paste(round(med_qs_njs$z0.ci,2)),round(med_qs_topjournals$z0,2), paste(round(med_qs_topjournals$z0.ci,2)),round(med_qs_pptop10$z0,2), paste(round(med_qs_pptop10$z0.ci,2)))) %>% rbind(c("QS", "Total Effect", round(med_qs_pubs$tau.coef, 2), paste(round(med_qs_pubs$tau.ci,2)), round(med_qs_ncs$tau.coef,2), paste(round(med_qs_ncs$tau.ci,2)),round(med_qs_njs$tau.coef,2), paste(round(med_qs_njs$tau.ci,2)),round(med_qs_topjournals$tau.coef,2), paste(round(med_qs_topjournals$tau.ci,2)),round(med_qs_pptop10$tau.coef,2), paste(round(med_qs_pptop10$tau.ci,2)))) %>% rbind(c("QS", "Prop. Mediated", round(med_qs_pubs$n0, 2), paste(round(med_qs_pubs$n0.ci,2)), round(med_qs_ncs$n0,2), paste(round(med_qs_ncs$n0.ci,2)),round(med_qs_njs$n0,2), paste(round(med_qs_njs$n0.ci,2)),round(med_qs_topjournals$n0,2), paste(round(med_qs_topjournals$n0.ci,2)),round(med_qs_pptop10$n0,2), paste(round(med_qs_pptop10$n0.ci,2)))) %>% rbind(c("", "", "","", "", "","", "", "","", "", "", "","", "", "","")) %>% rbind(c("Leiden", "ACME", round(med_qs_pubs$d0, 2), paste(round(med_leiden_pubs$d0.ci,2)), round(med_leiden_ncs$d0,2), paste(round(med_leiden_ncs$d0.ci,2)),round(med_leiden_njs$d0,2), paste(round(med_leiden_njs$d0.ci,2)),round(med_leiden_topjournals$d0,2), paste(round(med_leiden_topjournals$d0.ci,2)),round(med_leiden_pptop10$d0,2), paste(round(med_leiden_pptop10$d0.ci,2)))) %>% rbind(c("Leiden", "ADE", round(med_leiden_pubs$z0, 2), paste(round(med_leiden_pubs$z0.ci,2)), round(med_leiden_ncs$z0,2), paste(round(med_leiden_ncs$z0.ci,2)),round(med_leiden_njs$z0,2), paste(round(med_leiden_njs$z0.ci,2)),round(med_leiden_topjournals$z0,2), paste(round(med_leiden_topjournals$z0.ci,2)),round(med_leiden_pptop10$z0,2), paste(round(med_leiden_pptop10$z0.ci,2)))) %>% rbind(c("Leiden", "Total Effect", round(med_leiden_pubs$tau.coef, 2), paste(round(med_leiden_pubs$tau.ci,2)), round(med_leiden_ncs$tau.coef,2), paste(round(med_leiden_ncs$tau.ci,2)),round(med_leiden_njs$tau.coef,2), paste(round(med_leiden_njs$tau.ci,2)),round(med_leiden_topjournals$tau.coef,2), paste(round(med_leiden_topjournals$tau.ci,2)),round(med_leiden_pptop10$tau.coef,2), paste(round(med_leiden_pptop10$tau.ci,2)))) %>% rbind(c("Leiden", "Prop. Mediated", round(med_leiden_pubs$n0, 2), paste(round(med_leiden_pubs$n0.ci,2)), round(med_leiden_ncs$n0,2), paste(round(med_leiden_ncs$n0.ci,2)),round(med_leiden_njs$n0,2), paste(round(med_leiden_njs$n0.ci,2)),round(med_leiden_topjournals$n0,2), paste(round(med_leiden_topjournals$n0.ci,2)),round(med_leiden_pptop10$n0,2), paste(round(med_leiden_pptop10$n0.ci,2)))) %>% as_tibble() %>% unite(col = "pubs_CI",V4:V5, sep = ", ") %>% unite(col = "ncs_CI",V7:V8, sep = ", ") %>% unite(col = "njs_CI",V10:V11, sep = ", ") %>% unite(col = "topjournals_CI",V13:V14, sep = ", ") %>% unite(col = "pptop10_CI",V16:V17, sep = ", ") write.csv(mediation_table, "tables/table4. mediation table.csv") ################## ANALYSIS sSTEP 4. DIFFERENCES IN H-INDEX 5 YEARS AFTER THE MOVE ################## researchers_with_five_years_post_move <- matched_dataset %>% filter(years_from_obtaining_usa_affilation == 5, career_over == 0) %>% group_by(pair_id) %>% mutate(number_in_pair_with5years = n()) %>% filter(number_in_pair_with5years == 2) %>% distinct(cluster_id) hindex_at_5years_post <- researchers_with_five_years_post_move %>% left_join(matched_dataset) %>% filter(years_from_obtaining_usa_affilation <= 5) %>% group_by(cluster_id) %>% summarise(condition = first(condition), sum_ncs = sum(ncs_frac_yearsum), post_move_h_index = 0.54*sqrt(sum_ncs)) %>% select(cluster_id, condition, post_move_h_index) #some people have zero, this may be due to missing data in our dataset h_index_on_year_prior <- researchers_with_five_years_post_move %>% left_join(matched_dataset) %>% filter(years_from_obtaining_usa_affilation < 0) %>% group_by(cluster_id) %>% summarise(condition = first(condition), sum_ncs = sum(ncs_frac_yearsum), pre_move_h_index = 0.54*sqrt(sum_ncs)) %>% select(cluster_id, condition, pre_move_h_index) h_index_data <- h_index_on_year_prior %>% left_join(hindex_at_5years_post) %>% mutate(change_in_hindex = post_move_h_index-pre_move_h_index) h_index_data_new <- h_index_data %>% pivot_longer(!c(cluster_id,condition, change_in_hindex), names_to = "pre_or_post", values_to = "hindex") %>% mutate(pre_or_post = factor(gsub("_h_index", "",pre_or_post), levels = c("pre_move", "post_move"), labels = c("Pre-move", "Post-move")), condition = factor(condition, levels = c("stayers", "movers"), labels = c("Stayers", "Movers"))) #descriptive plots for 5-years plot descriptive_5yearspost_data <- researchers_with_five_years_post_move %>% left_join(matched_dataset) %>% filter(years_from_obtaining_usa_affilation <= 5 & years_from_obtaining_usa_affilation >= -2) %>% group_by(condition, years_from_obtaining_usa_affilation) %>% summarise(mean_p_full = mean(p_full_yearsum), mean_ncs_full_mean = mean(ncs_full_mean, na.rm=T), mean_njs_full_mean = mean(njs_full_mean, na.rm=T), mean_njs_full_over2_yearsum = mean(njs_full_over2_yearsum), mean_p_top_prop10_full_yearsum =mean(p_top_prop10_full_yearsum), median_p_full = median(p_full_yearsum), median_ncs_full_mean = median(ncs_full_mean, na.rm=T), median_njs_full_mean = median(njs_full_mean, na.rm=T), median_njs_full_over2_yearsum = median(njs_full_over2_yearsum), median_p_top_prop10_full_yearsum =median(p_top_prop10_full_yearsum), sd_p_full = sd(p_full_yearsum), sd_ncs_full_mean = sd(ncs_full_mean, na.rm=T), sd_mean_njs_full_mean = sd(njs_full_mean, na.rm = T), sd_mean_njs_full_over2_yearsum = sd(njs_full_over2_yearsum), sd_mean_p_top_prop10_full_yearsum = sd(p_top_prop10_full_yearsum), se_p_full = sd_p_full/sqrt(n()), se_ncs_full_mean = sd_ncs_full_mean/sqrt(n()), se_mean_njs_full_mean = sd_mean_njs_full_mean/sqrt(n()), se_njs_full_over2_yearsum = sd_mean_njs_full_over2_yearsum/sqrt(n()), se_mean_p_top_prop10_full_yearsum = sd_mean_p_top_prop10_full_yearsum/sqrt(n()) ) descriptive_5yearsplot_pubs.plot <- ggplot(descriptive_5yearspost_data, aes( years_from_obtaining_usa_affilation,mean_p_full, group = condition, colour = condition, shape = condition)) + geom_point(size = 3) + geom_line(size = 1) + geom_line(data = descriptive_5yearspost_data, aes(years_from_obtaining_usa_affilation,median_p_full), linetype = "dotted", size = 1, position = position_jitter(w=0.05, h=0),alpha = 0.6) + geom_errorbar(aes(ymin = mean_p_full-se_p_full, ymax = mean_p_full+se_p_full), width = 0.2) + theme_classic()+ theme(legend.position="bottom", legend.title = element_blank()) + scale_color_manual(values=c("palevioletred","lightblue")) + ylab("Average (SE) number of publications") + xlab("Years from move") + scale_x_continuous(breaks = seq(-2,5,1)) descriptive_5yearsplot_ncs.plot <- ggplot(descriptive_5yearspost_data, aes( years_from_obtaining_usa_affilation,mean_ncs_full_mean, group = condition, colour = condition, shape = condition)) + geom_point(size = 3) + geom_line(size = 1) + geom_line(data = descriptive_5yearspost_data, aes(years_from_obtaining_usa_affilation,median_ncs_full_mean), linetype = "dotted", size = 1, position = position_jitter(w=0.05, h=0),alpha = 0.6) + geom_errorbar(aes(ymin = mean_ncs_full_mean-se_ncs_full_mean, ymax = mean_ncs_full_mean+se_ncs_full_mean), width = 0.2) + theme_classic()+ theme(legend.position="bottom", legend.title = element_blank()) + scale_color_manual(values=c("palevioletred","lightblue")) + ylab("Average (SE) citation score") + xlab("Years from move") + scale_x_continuous(breaks = seq(-2,5,1)) descriptive_5yearsplot_njs.plot <- ggplot(descriptive_5yearspost_data, aes( years_from_obtaining_usa_affilation,mean_njs_full_mean, group = condition, colour = condition, shape = condition)) + geom_point(size = 3) + geom_line(size = 1) + geom_line(data = descriptive_5yearspost_data, aes(years_from_obtaining_usa_affilation,median_njs_full_mean), linetype = "dotted", size = 1, position = position_jitter(w=0.05, h=0),alpha = 0.6) + geom_errorbar(aes(ymin = mean_njs_full_mean-se_mean_njs_full_mean, ymax = mean_njs_full_mean+se_mean_njs_full_mean),width = 0.2) + theme_classic()+ theme(legend.position="bottom", legend.title = element_blank()) + scale_color_manual(values=c("palevioletred","lightblue")) + ylab("Average (SE) journal score") + xlab("Years from move") + scale_x_continuous(breaks = seq(-2,5,1)) descriptive_5yearsplot_topjournals.plot <- ggplot(descriptive_5yearspost_data, aes( years_from_obtaining_usa_affilation,mean_njs_full_over2_yearsum, group = condition, colour = condition, shape = condition)) + geom_point(size = 3) + geom_line(size = 1) + geom_line(data = descriptive_5yearspost_data, aes(years_from_obtaining_usa_affilation,median_njs_full_over2_yearsum), linetype = "dotted", size = 1, position = position_jitter(w=0.05, h=0),alpha = 0.6) + geom_errorbar(aes(ymin = mean_njs_full_over2_yearsum-se_njs_full_over2_yearsum, ymax = mean_njs_full_over2_yearsum+se_njs_full_over2_yearsum),width = 0.2) + theme_classic()+ theme(legend.position="bottom", legend.title = element_blank()) + scale_color_manual(values=c("palevioletred","lightblue")) + ylab("Average (SE) sum of top journal publications") + xlab("Years from move") + scale_x_continuous(breaks = seq(-2,5,1)) descriptive_5yearsplot_topcited.plot <- ggplot(descriptive_5yearspost_data, aes( years_from_obtaining_usa_affilation,mean_p_top_prop10_full_yearsum, group = condition, colour = condition, shape = condition)) + geom_point(size = 3) + geom_line(size = 1) + geom_line(data = descriptive_5yearspost_data, aes(years_from_obtaining_usa_affilation,median_p_top_prop10_full_yearsum), linetype = "dotted", size = 1, position = position_jitter(w=0.05, h=0),alpha = 0.6) + geom_errorbar(aes(ymin = mean_p_top_prop10_full_yearsum-se_mean_p_top_prop10_full_yearsum, ymax = mean_p_top_prop10_full_yearsum+se_mean_p_top_prop10_full_yearsum),width = 0.2) + theme_classic()+ theme(legend.position="bottom", legend.title = element_blank()) + scale_color_manual(values=c("palevioletred","lightblue")) + ylab("Average (SE) sum of top cited papers") + xlab("Years from move") + scale_x_continuous(breaks = seq(-2,5,1)) ## GeomSplitViolin <- ggproto( "GeomSplitViolin", GeomViolin, draw_group = function(self, data, ..., draw_quantiles = NULL) { data <- transform(data, xminv = x - violinwidth * (x - xmin), xmaxv = x + violinwidth * (xmax - x) ) grp <- data[1, "group"] newdata <- plyr::arrange( transform(data, x = if (grp %% 2 == 1) xminv else xmaxv), if (grp %% 2 == 1) y else -y ) newdata <- rbind(newdata[1, ], newdata, newdata[nrow(newdata), ], newdata[1, ]) newdata[c(1, nrow(newdata) - 1, nrow(newdata)), "x"] <- round(newdata[1, "x"]) if (length(draw_quantiles) > 0 & !scales::zero_range(range(data$y))) { stopifnot(all(draw_quantiles >= 0), all(draw_quantiles <= 1)) quantiles <- ggplot2:::create_quantile_segment_frame(data, draw_quantiles) aesthetics <- data[rep(1, nrow(quantiles)), setdiff(names(data), c("x", "y")), drop = FALSE] aesthetics$alpha <- rep(1, nrow(quantiles)) both <- cbind(quantiles, aesthetics) quantile_grob <- GeomPath$draw_panel(both, ...) ggplot2:::ggname( "geom_split_violin", grid::grobTree(GeomPolygon$draw_panel(newdata, ...), quantile_grob) ) } else { ggplot2:::ggname("geom_split_violin", GeomPolygon$draw_panel(newdata, ...)) } } ) geom_split_violin <- function(mapping = NULL, data = NULL, stat = "ydensity", position = "identity", ..., draw_quantiles = NULL, trim = TRUE, scale = "area", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE) { layer( data = data, mapping = mapping, stat = stat, geom = GeomSplitViolin, position = position, show.legend = show.legend, inherit.aes = inherit.aes, params = list( trim = trim, scale = scale, draw_quantiles = draw_quantiles, na.rm = na.rm, ... ) ) } violinplot_hindex <- ggplot(h_index_data_new, aes(pre_or_post,hindex, fill = condition)) + geom_split_violin() + theme_classic()+ stat_summary(fun = median, geom = "crossbar", width = 0.25, position = position_dodge(width = .25), ) + scale_fill_manual(values = c("lightblue", "palevioletred")) + ylab("'fractionalised h-index'") + xlab("Pre vs post (5 yrs) move") + ylim(0, 2.8) descriptive_5yearsplot.panelplot <- ggarrange(descriptive_5yearsplot_pubs.plot,descriptive_5yearsplot_ncs.plot,descriptive_5yearsplot_njs.plot,descriptive_5yearsplot_topjournals.plot,descriptive_5yearsplot_topcited.plot,violinplot_hindex, common.legend = T, labels = "AUTO",vjust =-0.0, hjust =-2) ggsave(descriptive_5yearsplot.panelplot, filename = "plots/Fig4. 5yearplot_a.pdf")
/5. analysis.R
no_license
benholding/wos_mobility
R
false
false
44,152
r
#importing data and packages load("matched_dataset.RData") #for those downloading the code, you should use load("data_to_be_shared.RData") instead pacman::p_load(tidyverse, sjPlot, cowplot, did, lmerTest, ggpubr, interplot,mediation, effectsize,esc, vioplot) #https://cran.r-project.org/web/packages/interplot/vignettes/interplot-vignette.html detach("package:dplyr", unload = TRUE) library(dplyr) set.seed(5030) ################################################################## ######## ANALYSIS STEP 1 - DIFFERENCE IN DIFFERENCE ############## ################################################################## ##FULL PUBLICATIONS## did_model_pfull <- att_gt(yname = "p_full_yearsum", gname = "moving_year_plus1", idname = "cluster_id", tname = "career_year_plus_1", xformla = ~1, #this package doesn't provide functionality to assess impact of covariates on outcomes. data = matched_dataset, est_method = "dr", control_group = "nevertreated", anticipation = 1, allow_unbalanced_panel = T) did_model_pfull_dynamic_short <- aggte(did_model_pfull, type = "dynamic", min_e = -5, max_e = 2) summary(did_model_pfull_dynamic_short) p_full_did_plot <- ggdid(did_model_pfull_dynamic_short, xlab = "Years from move", ylab = "Treatment effect", title = "Publications (year sum)") esc_B(b = did_model_pfull_dynamic_short$overall.att, sdy = sd(matched_dataset$p_full_yearsum),grp1n = did_model_pfull_dynamic_short$DIDparams$n/2, grp2n = did_model_pfull_dynamic_short$DIDparams$n/2,es.type = c("d")) ## NORMALISED CITATION SCORE ## did_model_ncs_full_yearmean <- att_gt(yname = "ncs_full_mean", gname = "moving_year_plus1", idname = "cluster_id", tname = "career_year_plus_1", xformla = ~1, data = matched_dataset, est_method = "dr", control_group = "nevertreated", anticipation = 1, allow_unbalanced_panel = T) #will give an error about missing data (because people don't always publish in a given year...) did_model_ncs_full_dynamic_short <- aggte(did_model_ncs_full_yearmean, type = "dynamic", min_e = -5, max_e = 2) summary(did_model_ncs_full_dynamic_short) ncs_full_did_plot <- ggdid(did_model_ncs_full_dynamic_short, xlab = "Years from move", ylab = "Treatment effect", title = "Citation score (year mean)") esc_B(b = did_model_ncs_full_dynamic_short$overall.att, sdy = sd(matched_dataset$ncs_full_mean, na.rm=T),grp1n = did_model_ncs_full_dynamic_short$DIDparams$n/2, grp2n = did_model_ncs_full_dynamic_short$DIDparams$n/2,es.type = c("d")) ## NORMALISED JOURNAL SCORE ## did_model_njs_full<- att_gt(yname = "njs_full_mean", gname = "moving_year_plus1", idname = "cluster_id", tname = "career_year_plus_1", xformla = ~1, data = matched_dataset, est_method = "dr", control_group = "nevertreated", anticipation = 1, allow_unbalanced_panel = T) #will give an error about missing data (because people don't always publish in a given year...) did_model_njs_full_dynamic_short <- aggte(did_model_njs_full, type = "dynamic", min_e = -5, max_e = 2) summary(did_model_njs_full_dynamic_short) njs_full_did_plot <- ggdid(did_model_njs_full_dynamic_short, xlab = "Years from move", ylab = "Treatment effect", title = " Journal score (year mean)") esc_B(b = did_model_njs_full_dynamic_short$overall.att, sdy = sd(matched_dataset$njs_full_mean, na.rm=T),grp1n = did_model_njs_full_dynamic_short$DIDparams$n/2, grp2n = did_model_njs_full_dynamic_short$DIDparams$n/2,es.type = c("d")) ## NUMBER OF TOP JOURNALS (njs >2 ) ## did_model_njs_topjournals <- att_gt(yname = "njs_full_over2_yearsum", gname = "moving_year_plus1", idname = "cluster_id", tname = "career_year_plus_1", xformla = ~1, data = matched_dataset, est_method = "dr", control_group = "nevertreated", anticipation = 1, allow_unbalanced_panel = T) #will give an error about missing data (because people don't always publish in a given year...) did_model_njs_topjournals_dynamic_short <- aggte(did_model_njs_topjournals, type = "dynamic", min_e = -5, max_e = 2) summary(did_model_njs_topjournals_dynamic_short) njs_topjournals_did_plot <- ggdid(did_model_njs_topjournals_dynamic_short, xlab = "Years from move", ylab = "Treatment effect", title = "Top journal publications (year sum)") esc_B(b = did_model_njs_topjournals_dynamic_short$overall.att, sdy = sd(matched_dataset$njs_full_over2_yearsum, na.rm=T),grp1n = did_model_njs_topjournals_dynamic_short$DIDparams$n/2, grp2n = did_model_njs_topjournals_dynamic_short$DIDparams$n/2,es.type = c("d")) ## NUMBER OF TOP CITED PAPERS ## did_model_p_top_prop10_full<- att_gt(yname = "p_top_prop10_full_yearsum", gname = "moving_year_plus1", idname = "cluster_id", tname = "career_year_plus_1", xformla = ~1, data = matched_dataset, est_method = "dr", control_group = "notyettreated", anticipation = 1, allow_unbalanced_panel = T) #i think this should account for the fact that some people have quit science (and we therefore don't have full data for them) did_model_p_top_prop10_full_dynamic_short <- aggte(did_model_p_top_prop10_full, type = "dynamic", min_e = -5, max_e = 2) summary(did_model_p_top_prop10_full_dynamic_short) pp10_full_did_plot <- ggdid(did_model_p_top_prop10_full_dynamic_short, xlab = "Years from move", ylab = "Treatment effect", title = "Top cited publications (year sum)") esc_B(b = did_model_p_top_prop10_full_dynamic_short$overall.att, sdy = sd(matched_dataset$p_top_prop10_full_yearsum, na.rm=T),grp1n = did_model_p_top_prop10_full_dynamic_short$DIDparams$n/2, grp2n = did_model_p_top_prop10_full_dynamic_short$DIDparams$n/2,es.type = c("d")) ## MAKING A FIGURE OF THE DIFFERENCE-IN-DIFFERENCE RESULTS ## did_plot_grid <- ggarrange(p_full_did_plot, ncs_full_did_plot, njs_full_did_plot, njs_topjournals_did_plot, pp10_full_did_plot, common.legend = T, legend = "bottom", ncol=2,nrow=3, labels = "AUTO",hjust=-2) ggexport(did_plot_grid, filename = "plots/Fig2. DID.pdf") ## MAKING TABLES OF THE DIFFERENCE-IN-DIFFERENCE RESULTS ## # First a table of the relative increases in performance ATTs <- c(did_model_pfull_dynamic_short$overall.att, did_model_ncs_full_dynamic_short$overall.att, did_model_njs_full_dynamic_short$overall.att, did_model_njs_topjournals_dynamic_short$overall.att, did_model_p_top_prop10_full_dynamic_short$overall.att) table2 <- matched_dataset %>% #calculating relative increase filter(years_from_obtaining_usa_affilation >= 0 & years_from_obtaining_usa_affilation <= 2, condition_numeric == 0) %>% summarise(mean_p_full = mean(p_full_yearsum), mean_ncs_full = mean(ncs_full_mean, na.rm=T), mean_njs_full = mean(njs_full_mean, na.rm=T), mean_njs_topjournals = mean(njs_full_over2_yearsum, na.rm=T), mean_p_top_prop10_full = mean(p_top_prop10_full_yearsum)) %>% pivot_longer(everything(),names_to = "bibliometric_measure", values_to = "mean") %>% mutate(ATT = ATTs, percentage_increase = (ATT/mean)*100) %>% mutate(mean = round(mean,2), ATT = round(ATT, 2), percentage_increase = round(percentage_increase, 2)) write.csv(table2, "tables/table1. att.csv") #Then tables of the individual difference-in-difference models tidy(did_model_pfull_dynamic_short) %>% mutate(across(estimate:point.conf.high, round,2)) %>% write.csv("tables/S2. DID pfull.csv") tidy(did_model_ncs_full_dynamic_short) %>% mutate(across(estimate:point.conf.high, round,2)) %>% write.csv("tables/S3. DID ncs.csv") tidy(did_model_njs_full_dynamic_short) %>% mutate(across(estimate:point.conf.high, round,2)) %>% write.csv("tables/S4. DID njs.csv") tidy(did_model_njs_topjournals_dynamic_short) %>% mutate(across(estimate:point.conf.high, round,2)) %>% write.csv("tables/S5. DID topjournals.csv") tidy(did_model_p_top_prop10_full_dynamic_short) %>% mutate(across(estimate:point.conf.high, round,2)) %>% write.csv("tables/S6. DID pptop10.csv") ############################################################################################################ ### ANALYSIS STEP 2. MODERATION ############################################################################ # here we take only movers and interact 2 variables: "moving" with "difference in rank from origin to usa" # ############################################################################################################ # step 2a. first i make two new datasets, one that contains complete data those who we know the origin and usa QS ranking, and one where we know the origin and usa Leiden ranking. diffindiff_data_only_movers_qs_diff <- matched_dataset %>% #the QS ranking moderation dataset filter(years_from_obtaining_usa_affilation >= -2, years_from_obtaining_usa_affilation <= 2, condition_numeric == 1, !is.na(gelman_difference_in_qs_overall_score)) %>% mutate(gelman_difference_in_qs_overall_score_zeropremove = if_else(post_move == 0, 0, gelman_difference_in_qs_overall_score), gelman_origin_qs_overall_score_mean = origin_qs_overall_score_mean/(2*sd(origin_qs_overall_score_mean, na.rm = T))) diffindiff_data_only_movers_leiden_diff <- matched_dataset %>% #the Leiden ranking moderation dataset filter(years_from_obtaining_usa_affilation >= -2, years_from_obtaining_usa_affilation <= 2, condition_numeric == 1, !is.na(gelman_difference_in_pptop10)) %>% mutate(gelman_difference_in_pptop10_zeropremove = if_else(post_move == 0, 0, gelman_difference_in_pptop10), gelman_origin_pp_top10_mean = origin_pp_top10_mean/(2*sd(origin_pp_top10_mean, na.rm = T))) # step 2b. Running the moderation analysis + making individual plots # Publications pfull_qs <- lmer(p_full_yearsum ~ gelman_difference_in_qs_overall_score:post_move + post_move + career_year + gelman_origin_qs_overall_score_mean + (1|cluster_id), data = diffindiff_data_only_movers_qs_diff); summary(pfull_qs); plot_model(pfull_qs, type = "int") pfull_pptop10 <- lmer(p_full_yearsum ~ gelman_difference_in_pptop10:post_move + post_move + career_year + gelman_origin_pp_top10_mean + (1|cluster_id), data = diffindiff_data_only_movers_leiden_diff); summary(pfull_pptop10); plot_model(pfull_pptop10, type = "int", show.values = T) pfull_qs_moderation_plot <- interplot(m = pfull_qs, var1 = "post_move", var2 = "gelman_difference_in_qs_overall_score") + theme_bw() + theme(plot.title = element_text(size = 10), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limits = c(0.25,2), expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0.5,0,0.5, 1),labels=c("-2", "-1", "0", "1", "2")) + geom_hline(yintercept=0, linetype="dotted") pfull_pptop10_moderation_plot <- interplot(m = pfull_pptop10, var1 = "post_move", var2 = "gelman_difference_in_pptop10") + theme_bw() + theme(plot.title = element_text(size = 10), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limits = c(0.25,2), expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0,1, 2),labels=c("-2", "0", "2", "4")) + geom_hline(yintercept=0, linetype="dotted") #Normalised citation score ncs_full_qs <- lmer(ncs_full_mean ~gelman_difference_in_qs_overall_score:post_move + post_move +career_year + gelman_origin_qs_overall_score_mean + (1|cluster_id), data = diffindiff_data_only_movers_qs_diff); summary(ncs_full_qs); plot_model(ncs_full_qs, type = "int") ncs_full_pptop10 <- lmer(ncs_full_mean ~gelman_difference_in_pptop10:post_move + post_move + career_year + gelman_origin_pp_top10_mean + (1|cluster_id), data = diffindiff_data_only_movers_leiden_diff); summary(ncs_full_pptop10); plot_model(ncs_full_pptop10, type = "int", show.values = T) ncs_full_qs_moderation_plot <- interplot(m = ncs_full_qs, var1 = "post_move", var2 = "gelman_difference_in_qs_overall_score") + theme_bw() + theme(plot.title = element_text(size = 10), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limits = c(-1.3, 2.7), expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0.5,0,0.5, 1),labels=c("-2", "-1", "0", "1", "2")) + geom_hline(yintercept=0, linetype="dotted") ncs_full_pptop10_moderation_plot <- interplot(m = ncs_full_pptop10, var1 = "post_move", var2 = "gelman_difference_in_pptop10") + theme_bw() + theme(plot.title = element_text(size = 10), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limits = c(-1.3, 2.7), expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0,1, 2),labels=c("-2", "0", "2", "4")) + geom_hline(yintercept=0, linetype="dotted") #normalised Journal score njs_full_qs <- lmer(njs_full_mean ~gelman_difference_in_qs_overall_score:post_move + post_move +career_year + gelman_origin_qs_overall_score_mean + (1|cluster_id), data = diffindiff_data_only_movers_qs_diff); summary(njs_full_qs); plot_model(njs_full_qs, type = "int") njs_full_pptop10 <- lmer(njs_full_mean ~gelman_difference_in_pptop10:post_move + post_move + career_year + gelman_origin_pp_top10_mean + (1|cluster_id), data = diffindiff_data_only_movers_leiden_diff); summary(njs_full_pptop10); plot_model(njs_full_pptop10, type = "int") njs_full_qs_moderation_plot <- interplot(m = njs_full_qs, var1 = "post_move", var2 = "gelman_difference_in_qs_overall_score") + theme_bw() + theme(plot.title = element_text(size = 10), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limits =c(-0.8, 1.7),expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0.5,0,0.5, 1),labels=c("-2", "-1", "0", "1", "2")) + geom_hline(yintercept=0, linetype="dotted") njs_full_pptop10_moderation_plot <- interplot(m = njs_full_pptop10, var1 = "post_move", var2 = "gelman_difference_in_pptop10") + theme_bw() + theme(plot.title = element_text(size = 10), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limits =c(-0.8, 1.7),expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0,1, 2),labels=c("-2", "0", "2", "4")) + geom_hline(yintercept=0, linetype="dotted") #Top journals njs_topjournals_qs <- lmer(njs_full_over2_yearsum ~gelman_difference_in_qs_overall_score:post_move + post_move +career_year + gelman_origin_qs_overall_score_mean + (1|cluster_id), data = diffindiff_data_only_movers_qs_diff); summary(njs_topjournals_qs); plot_model(njs_topjournals_qs, type = "int"); omega_squared(njs_topjournals_qs) njs_topjournals_pptop10 <- lmer(njs_full_over2_yearsum ~gelman_difference_in_pptop10:post_move + post_move + career_year + gelman_origin_pp_top10_mean + (1|cluster_id), data = diffindiff_data_only_movers_leiden_diff); summary(njs_topjournals_pptop10); plot_model(njs_topjournals_pptop10, type = "int"); omega_squared(njs_topjournals_pptop10) njs_topjournals_qs_moderation_plot <- interplot(m = njs_topjournals_qs, var1 = "post_move", var2 = "gelman_difference_in_qs_overall_score") + theme_bw() + theme(plot.title = element_text(size = 10), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limits = c(-0.4,1.1), expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0.5,0,0.5, 1),labels=c("-2", "-1", "0", "1", "2")) + geom_hline(yintercept=0, linetype="dotted") njs_topjournals_pptop10_moderation_plot <- interplot(m = njs_topjournals_pptop10, var1 = "post_move", var2 = "gelman_difference_in_pptop10") + theme_bw() + theme(plot.title = element_text(size = 10), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limits = c(-0.4,1.1), expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0,1, 2),labels=c("-2", "0", "2", "4")) + geom_hline(yintercept=0, linetype="dotted") # Top cited papers p_top_prop10_full_qs <- lmer(p_top_prop10_full_yearsum ~gelman_difference_in_qs_overall_score:post_move + post_move +career_year + gelman_origin_qs_overall_score_mean + (1|cluster_id), data = diffindiff_data_only_movers_qs_diff); summary(p_top_prop10_full_qs); plot_model(p_top_prop10_full_qs, type = "int"); omega_squared(p_top_prop10_full_qs) p_top_prop10_full_pptop10 <-lmer(p_top_prop10_full_yearsum ~gelman_difference_in_pptop10:post_move + post_move + career_year + gelman_origin_pp_top10_mean + (1|cluster_id), data = diffindiff_data_only_movers_leiden_diff); summary(p_top_prop10_full_pptop10); plot_model(p_top_prop10_full_pptop10, type = "int"); omega_squared(p_top_prop10_full_pptop10) p_top_prop10_full_qs_moderation_plot <- interplot(m = p_top_prop10_full_qs, var1 = "post_move", var2 = "gelman_difference_in_qs_overall_score") + theme_bw() + theme(plot.title = element_text(size = 10), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limits = c(-0.25,0.85), expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0.5,0,0.5, 1),labels=c("-2", "-1", "0", "1", "2")) + geom_hline(yintercept=0, linetype="dotted") p_top_prop10_full_pptop10_moderation_plot <- interplot(m = p_top_prop10_full_pptop10, var1 = "post_move", var2 = "gelman_difference_in_pptop10") + theme_bw() + theme(plot.title = element_text(size = 10), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limits = c(-0.25,0.85),expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0,1, 2),labels=c("-2", "0", "2", "4")) + geom_hline(yintercept=0, linetype="dotted") #step 2c. Making a panel plot that contains all of the moderation analyses + histograms of the raw ranking_difference data qs_difference_plot <- ggplot(diffindiff_data_only_movers_qs_diff %>% distinct(cluster_id, .keep_all = T), aes(x=gelman_difference_in_qs_overall_score)) + #Histogram of difference in QS rankings geom_histogram()+ theme_bw() + theme(plot.title = element_text(size = 10), axis.title.x=element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limit = c(0,125), expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0.5,0,0.5, 1),labels=c("-2", "-1", "0", "1", "2")) leiden_difference_plot <- ggplot(diffindiff_data_only_movers_leiden_diff %>% distinct(cluster_id, .keep_all = T), aes(x = gelman_difference_in_pptop10)) + #histogram of difference in Leiden rankings geom_histogram() + theme_bw() + theme(plot.title = element_text(size = 10), axis.title.x=element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0.2,0.2,0,0.1), "cm")) + # ("top", "right", "bottom", "left") scale_y_continuous(limit = c(0,125), expand = c(0,0)) + scale_x_continuous(expand = c(0, 0),breaks=c(-1,-0,1, 2),labels=c("-2", "0", "2", "4")) interaction_plots_left <- ggarrange(pfull_qs_moderation_plot, #making the left half of the panel plot ncs_full_qs_moderation_plot, njs_full_qs_moderation_plot, njs_topjournals_qs_moderation_plot, p_top_prop10_full_qs_moderation_plot, qs_difference_plot, common.legend = T, legend = "bottom", ncol=1,nrow=6, labels = c("a. Publications", "b. Citation Score", "c. Journal Score", "d. Top journal publications", "e. Top cited publications", "f. Ranking change"), hjust = -0.1, vjust = -0.1, align = "hv", font.label = list(size = 10, color = "black", face = "bold.italic", family = NULL)) %>% annotate_figure(top = text_grob("QS", face = "bold")) interaction_plots_right <- ggarrange(pfull_pptop10_moderation_plot, #making the right half of the panel plot ncs_full_pptop10_moderation_plot, njs_full_pptop10_moderation_plot, njs_topjournals_pptop10_moderation_plot, p_top_prop10_full_pptop10_moderation_plot, leiden_difference_plot, common.legend = T, legend = "bottom", ncol=1,nrow=6, align = "v") %>% annotate_figure(top = text_grob("Leiden", face="bold")) moderation_plot_grid <- # this makes the final panel plot of the moderation analyses ggarrange(interaction_plots_left, interaction_plots_right) %>% annotate_figure(left = text_grob("Count Estimated Coefficient for effect of moving to USA",rot = 90, size = 10, hjust = .56), bottom = text_grob("Standard deviation change in ranking score (Positive = USA higher ranked)", size = 10)) ggexport(moderation_plot_grid, filename = "plots/Fig3. Moderation.pdf") #saving the plot # step 2d. Creating tables of the modeation results tab_model(pfull_qs, #table of QS ranking moderation ncs_full_qs, njs_full_qs, njs_topjournals_qs, p_top_prop10_full_qs, show.p = F, show.re.var =F, file = "tables/table2 moderation qs.doc") tab_model(pfull_pptop10, #table of leiden ranking moderation ncs_full_pptop10, njs_full_pptop10, njs_topjournals_pptop10, p_top_prop10_full_pptop10, show.p = F, show.re.var =F, file = "tables/table3 moderation leiden.doc") ################################################################### ############ ANALYSIS STEP 3. MEDIATION ########################## ################################################################## #Step 3a. running the mediation analysis# #### QS RANKING ###### # p full set.seed(5030) detach("package:lmerTest", unload = T) pfull_qs_med.fit <- lmer(gelman_difference_in_qs_overall_score_zeropremove ~ post_move + career_year + origin_qs_overall_score_mean+ (1|cluster_id), data = diffindiff_data_only_movers_qs_diff) pfull_qs_out.fit <- lmer(p_full_yearsum ~ gelman_difference_in_qs_overall_score_zeropremove + post_move + career_year + origin_qs_overall_score_mean+ (1|cluster_id), data = diffindiff_data_only_movers_qs_diff) pfull_qs_med.out <- mediate(pfull_qs_med.fit, pfull_qs_out.fit, treat = "post_move", mediator = "gelman_difference_in_qs_overall_score_zeropremove", robustSE = F, sims = 1000) med_qs_pubs <- summary(pfull_qs_med.out) # citation score ncs_qs_mediation_data <- diffindiff_data_only_movers_qs_diff %>% filter(!is.na(ncs_full_mean)) ncs_qs_med.fit <- lmer(gelman_difference_in_qs_overall_score_zeropremove ~ post_move + career_year + origin_qs_overall_score_mean + (1|cluster_id), data = ncs_qs_mediation_data) ncs_qs_out.fit <- lmer(ncs_full_mean ~ gelman_difference_in_qs_overall_score_zeropremove + post_move + career_year + origin_qs_overall_score_mean+ (1|cluster_id), data = ncs_qs_mediation_data) ncs_qs_med.out <- mediate(ncs_qs_out.fit, ncs_qs_out.fit, treat = "post_move", mediator = "gelman_difference_in_qs_overall_score_zeropremove", robustSE = F, sims = 1000) med_qs_ncs <- summary(ncs_qs_med.out) # journal score njs_qs_mediation_data <- diffindiff_data_only_movers_qs_diff %>% filter(!is.na(njs_full_mean)) njs_qs_med.fit <- lmer(gelman_difference_in_qs_overall_score_zeropremove ~ post_move + career_year + origin_qs_overall_score_mean+ (1|cluster_id), data = njs_qs_mediation_data) njs_qs_out.fit <- lmer(njs_full_mean ~ gelman_difference_in_qs_overall_score_zeropremove + post_move + career_year + origin_qs_overall_score_mean+ (1|cluster_id), data = njs_qs_mediation_data) njs_qs_med.out <- mediate(njs_qs_med.fit, njs_qs_out.fit, treat = "post_move", mediator = "gelman_difference_in_qs_overall_score_zeropremove", robustSE = F, sims = 1000) med_qs_njs <- summary(njs_qs_med.out) # top journals topjoural_qs_med.fit <- lmer(gelman_difference_in_qs_overall_score_zeropremove ~ post_move + career_year + origin_qs_overall_score_mean+ (1|cluster_id), data = diffindiff_data_only_movers_qs_diff) topjournal_qs_out.fit <- lmer(njs_full_over2_yearsum ~ gelman_difference_in_qs_overall_score_zeropremove + post_move + career_year + origin_qs_overall_score_mean+ (1|cluster_id), data = diffindiff_data_only_movers_qs_diff) topjournal_qs_med.out <- mediate(topjoural_qs_med.fit, topjournal_qs_out.fit, treat = "post_move", mediator = "gelman_difference_in_qs_overall_score_zeropremove", robustSE = F, sims = 1000) med_qs_topjournals <- summary(topjournal_qs_med.out) # top 10% top10_qs_med.fit <- lmer(gelman_difference_in_qs_overall_score_zeropremove ~ post_move + career_year + origin_qs_overall_score_mean+ (1|cluster_id), data = diffindiff_data_only_movers_qs_diff) top10_qs_out.fit <- lmer(p_top_prop10_full_yearsum ~ gelman_difference_in_qs_overall_score_zeropremove + post_move + career_year + origin_qs_overall_score_mean +(1|cluster_id), data = diffindiff_data_only_movers_qs_diff) top10_qs_med.out <- mediate(top10_qs_med.fit, top10_qs_out.fit, treat = "post_move", mediator = "gelman_difference_in_qs_overall_score_zeropremove", robustSE = F, sims = 1000) med_qs_pptop10 <- summary(top10_qs_med.out) ##### LEIDEN RANKING ###### # p full pfull_med.fit <- lmer(gelman_difference_in_pptop10_zeropremove ~ post_move + career_year + origin_pp_top10_mean+ (1|cluster_id), data = diffindiff_data_only_movers_leiden_diff) pfull_out.fit <- lmer(p_full_yearsum ~ gelman_difference_in_pptop10_zeropremove + post_move + career_year + origin_pp_top10_mean+ (1|cluster_id), data = diffindiff_data_only_movers_leiden_diff) pfull_med.out <- mediate(pfull_med.fit, pfull_out.fit, treat = "post_move", mediator = "gelman_difference_in_pptop10_zeropremove", robustSE = T, sims = 1000) med_leiden_pubs <- summary(pfull_med.out) # citation score ncs_mediation_data <- diffindiff_data_only_movers_leiden_diff %>% filter(!is.na(ncs_full_mean)) ncs_med.fit <- lmer(gelman_difference_in_pptop10_zeropremove ~ post_move + career_year + origin_pp_top10_mean + (1|cluster_id), data = ncs_mediation_data) ncs_out.fit <- lmer(ncs_full_mean ~ gelman_difference_in_pptop10_zeropremove + post_move + career_year + origin_pp_top10_mean+ (1|cluster_id), data = ncs_mediation_data) ncs_med.out <- mediate(ncs_med.fit, ncs_out.fit, treat = "post_move", mediator = "gelman_difference_in_pptop10_zeropremove", robustSE = F, sims = 1000) med_leiden_ncs <- summary(ncs_med.out) # journal score njs_mediation_data <- diffindiff_data_only_movers_leiden_diff %>% filter(!is.na(njs_full_mean)) njs_med.fit <- lmer(gelman_difference_in_pptop10_zeropremove ~ post_move + career_year + origin_pp_top10_mean+ (1|cluster_id), data = njs_mediation_data) njs_out.fit <- lmer(njs_full_mean ~ gelman_difference_in_pptop10_zeropremove + post_move + career_year + origin_pp_top10_mean+ (1|cluster_id), data = njs_mediation_data) njs_med.out <- mediate(njs_med.fit, njs_out.fit, treat = "post_move", mediator = "gelman_difference_in_pptop10_zeropremove", robustSE = F, sims = 1000) med_leiden_njs <- summary(njs_med.out) # top journals topjoural_med.fit <- lmer(gelman_difference_in_pptop10_zeropremove ~ post_move + career_year + origin_pp_top10_mean+ (1|cluster_id), data = diffindiff_data_only_movers_leiden_diff) topjournal_out.fit <- lmer(njs_full_over2_yearsum ~ gelman_difference_in_pptop10_zeropremove + post_move + career_year + origin_pp_top10_mean+ (1|cluster_id), data = diffindiff_data_only_movers_leiden_diff) topjournal_med.out <- mediate(topjoural_med.fit, topjournal_out.fit, treat = "post_move", mediator = "gelman_difference_in_pptop10_zeropremove", robustSE = F, sims = 1000) med_leiden_topjournals <- summary(topjournal_med.out) # top 10% top10_med.fit <- lmer(gelman_difference_in_pptop10_zeropremove ~ post_move + career_year + origin_pp_top10_mean+ (1|cluster_id), data = diffindiff_data_only_movers_leiden_diff) top10_out.fit <- lmer(p_top_prop10_full_yearsum ~ gelman_difference_in_pptop10_zeropremove + post_move + career_year + origin_pp_top10_mean +(1|cluster_id), data = diffindiff_data_only_movers_leiden_diff) top10_med.out <- mediate(top10_med.fit, top10_out.fit, treat = "post_move", mediator = "gelman_difference_in_pptop10_zeropremove", robustSE = F, sims = 1000) med_leiden_pptop10 <- summary(top10_med.out) #step 3b creating a table with the mediation analysis results# mediation_table <- c("", "", "publications","", "", "ncs","", "", "njs","", "", "top journals", "","", "top cited", "","") %>% rbind(c("ranking", "mediation", "estimate", "CI_l","CI_U", "estimate", "CI_l","CI_U", "estimate", "CI_l","CI_U", "estimate", "CI_l","CI_U", "estimate", "CI_l","CI_U"))%>% rbind(c("QS", "ACME", round(med_qs_pubs$d0, 2), paste(round(med_qs_pubs$d0.ci,2)), round(med_qs_ncs$d0,2), paste(round(med_qs_ncs$d0.ci,2)),round(med_qs_njs$d0,2), paste(round(med_qs_njs$d0.ci,2)),round(med_qs_topjournals$d0,2), paste(round(med_qs_topjournals$d0.ci,2)),round(med_qs_pptop10$d0,2), paste(round(med_qs_pptop10$d0.ci,2)))) %>% rbind(c("QS", "ADE", round(med_qs_pubs$z0, 2), paste(round(med_qs_pubs$z0.ci,2)), round(med_qs_ncs$z0,2), paste(round(med_qs_ncs$z0.ci,2)),round(med_qs_njs$z0,2), paste(round(med_qs_njs$z0.ci,2)),round(med_qs_topjournals$z0,2), paste(round(med_qs_topjournals$z0.ci,2)),round(med_qs_pptop10$z0,2), paste(round(med_qs_pptop10$z0.ci,2)))) %>% rbind(c("QS", "Total Effect", round(med_qs_pubs$tau.coef, 2), paste(round(med_qs_pubs$tau.ci,2)), round(med_qs_ncs$tau.coef,2), paste(round(med_qs_ncs$tau.ci,2)),round(med_qs_njs$tau.coef,2), paste(round(med_qs_njs$tau.ci,2)),round(med_qs_topjournals$tau.coef,2), paste(round(med_qs_topjournals$tau.ci,2)),round(med_qs_pptop10$tau.coef,2), paste(round(med_qs_pptop10$tau.ci,2)))) %>% rbind(c("QS", "Prop. Mediated", round(med_qs_pubs$n0, 2), paste(round(med_qs_pubs$n0.ci,2)), round(med_qs_ncs$n0,2), paste(round(med_qs_ncs$n0.ci,2)),round(med_qs_njs$n0,2), paste(round(med_qs_njs$n0.ci,2)),round(med_qs_topjournals$n0,2), paste(round(med_qs_topjournals$n0.ci,2)),round(med_qs_pptop10$n0,2), paste(round(med_qs_pptop10$n0.ci,2)))) %>% rbind(c("", "", "","", "", "","", "", "","", "", "", "","", "", "","")) %>% rbind(c("Leiden", "ACME", round(med_qs_pubs$d0, 2), paste(round(med_leiden_pubs$d0.ci,2)), round(med_leiden_ncs$d0,2), paste(round(med_leiden_ncs$d0.ci,2)),round(med_leiden_njs$d0,2), paste(round(med_leiden_njs$d0.ci,2)),round(med_leiden_topjournals$d0,2), paste(round(med_leiden_topjournals$d0.ci,2)),round(med_leiden_pptop10$d0,2), paste(round(med_leiden_pptop10$d0.ci,2)))) %>% rbind(c("Leiden", "ADE", round(med_leiden_pubs$z0, 2), paste(round(med_leiden_pubs$z0.ci,2)), round(med_leiden_ncs$z0,2), paste(round(med_leiden_ncs$z0.ci,2)),round(med_leiden_njs$z0,2), paste(round(med_leiden_njs$z0.ci,2)),round(med_leiden_topjournals$z0,2), paste(round(med_leiden_topjournals$z0.ci,2)),round(med_leiden_pptop10$z0,2), paste(round(med_leiden_pptop10$z0.ci,2)))) %>% rbind(c("Leiden", "Total Effect", round(med_leiden_pubs$tau.coef, 2), paste(round(med_leiden_pubs$tau.ci,2)), round(med_leiden_ncs$tau.coef,2), paste(round(med_leiden_ncs$tau.ci,2)),round(med_leiden_njs$tau.coef,2), paste(round(med_leiden_njs$tau.ci,2)),round(med_leiden_topjournals$tau.coef,2), paste(round(med_leiden_topjournals$tau.ci,2)),round(med_leiden_pptop10$tau.coef,2), paste(round(med_leiden_pptop10$tau.ci,2)))) %>% rbind(c("Leiden", "Prop. Mediated", round(med_leiden_pubs$n0, 2), paste(round(med_leiden_pubs$n0.ci,2)), round(med_leiden_ncs$n0,2), paste(round(med_leiden_ncs$n0.ci,2)),round(med_leiden_njs$n0,2), paste(round(med_leiden_njs$n0.ci,2)),round(med_leiden_topjournals$n0,2), paste(round(med_leiden_topjournals$n0.ci,2)),round(med_leiden_pptop10$n0,2), paste(round(med_leiden_pptop10$n0.ci,2)))) %>% as_tibble() %>% unite(col = "pubs_CI",V4:V5, sep = ", ") %>% unite(col = "ncs_CI",V7:V8, sep = ", ") %>% unite(col = "njs_CI",V10:V11, sep = ", ") %>% unite(col = "topjournals_CI",V13:V14, sep = ", ") %>% unite(col = "pptop10_CI",V16:V17, sep = ", ") write.csv(mediation_table, "tables/table4. mediation table.csv") ################## ANALYSIS sSTEP 4. DIFFERENCES IN H-INDEX 5 YEARS AFTER THE MOVE ################## researchers_with_five_years_post_move <- matched_dataset %>% filter(years_from_obtaining_usa_affilation == 5, career_over == 0) %>% group_by(pair_id) %>% mutate(number_in_pair_with5years = n()) %>% filter(number_in_pair_with5years == 2) %>% distinct(cluster_id) hindex_at_5years_post <- researchers_with_five_years_post_move %>% left_join(matched_dataset) %>% filter(years_from_obtaining_usa_affilation <= 5) %>% group_by(cluster_id) %>% summarise(condition = first(condition), sum_ncs = sum(ncs_frac_yearsum), post_move_h_index = 0.54*sqrt(sum_ncs)) %>% select(cluster_id, condition, post_move_h_index) #some people have zero, this may be due to missing data in our dataset h_index_on_year_prior <- researchers_with_five_years_post_move %>% left_join(matched_dataset) %>% filter(years_from_obtaining_usa_affilation < 0) %>% group_by(cluster_id) %>% summarise(condition = first(condition), sum_ncs = sum(ncs_frac_yearsum), pre_move_h_index = 0.54*sqrt(sum_ncs)) %>% select(cluster_id, condition, pre_move_h_index) h_index_data <- h_index_on_year_prior %>% left_join(hindex_at_5years_post) %>% mutate(change_in_hindex = post_move_h_index-pre_move_h_index) h_index_data_new <- h_index_data %>% pivot_longer(!c(cluster_id,condition, change_in_hindex), names_to = "pre_or_post", values_to = "hindex") %>% mutate(pre_or_post = factor(gsub("_h_index", "",pre_or_post), levels = c("pre_move", "post_move"), labels = c("Pre-move", "Post-move")), condition = factor(condition, levels = c("stayers", "movers"), labels = c("Stayers", "Movers"))) #descriptive plots for 5-years plot descriptive_5yearspost_data <- researchers_with_five_years_post_move %>% left_join(matched_dataset) %>% filter(years_from_obtaining_usa_affilation <= 5 & years_from_obtaining_usa_affilation >= -2) %>% group_by(condition, years_from_obtaining_usa_affilation) %>% summarise(mean_p_full = mean(p_full_yearsum), mean_ncs_full_mean = mean(ncs_full_mean, na.rm=T), mean_njs_full_mean = mean(njs_full_mean, na.rm=T), mean_njs_full_over2_yearsum = mean(njs_full_over2_yearsum), mean_p_top_prop10_full_yearsum =mean(p_top_prop10_full_yearsum), median_p_full = median(p_full_yearsum), median_ncs_full_mean = median(ncs_full_mean, na.rm=T), median_njs_full_mean = median(njs_full_mean, na.rm=T), median_njs_full_over2_yearsum = median(njs_full_over2_yearsum), median_p_top_prop10_full_yearsum =median(p_top_prop10_full_yearsum), sd_p_full = sd(p_full_yearsum), sd_ncs_full_mean = sd(ncs_full_mean, na.rm=T), sd_mean_njs_full_mean = sd(njs_full_mean, na.rm = T), sd_mean_njs_full_over2_yearsum = sd(njs_full_over2_yearsum), sd_mean_p_top_prop10_full_yearsum = sd(p_top_prop10_full_yearsum), se_p_full = sd_p_full/sqrt(n()), se_ncs_full_mean = sd_ncs_full_mean/sqrt(n()), se_mean_njs_full_mean = sd_mean_njs_full_mean/sqrt(n()), se_njs_full_over2_yearsum = sd_mean_njs_full_over2_yearsum/sqrt(n()), se_mean_p_top_prop10_full_yearsum = sd_mean_p_top_prop10_full_yearsum/sqrt(n()) ) descriptive_5yearsplot_pubs.plot <- ggplot(descriptive_5yearspost_data, aes( years_from_obtaining_usa_affilation,mean_p_full, group = condition, colour = condition, shape = condition)) + geom_point(size = 3) + geom_line(size = 1) + geom_line(data = descriptive_5yearspost_data, aes(years_from_obtaining_usa_affilation,median_p_full), linetype = "dotted", size = 1, position = position_jitter(w=0.05, h=0),alpha = 0.6) + geom_errorbar(aes(ymin = mean_p_full-se_p_full, ymax = mean_p_full+se_p_full), width = 0.2) + theme_classic()+ theme(legend.position="bottom", legend.title = element_blank()) + scale_color_manual(values=c("palevioletred","lightblue")) + ylab("Average (SE) number of publications") + xlab("Years from move") + scale_x_continuous(breaks = seq(-2,5,1)) descriptive_5yearsplot_ncs.plot <- ggplot(descriptive_5yearspost_data, aes( years_from_obtaining_usa_affilation,mean_ncs_full_mean, group = condition, colour = condition, shape = condition)) + geom_point(size = 3) + geom_line(size = 1) + geom_line(data = descriptive_5yearspost_data, aes(years_from_obtaining_usa_affilation,median_ncs_full_mean), linetype = "dotted", size = 1, position = position_jitter(w=0.05, h=0),alpha = 0.6) + geom_errorbar(aes(ymin = mean_ncs_full_mean-se_ncs_full_mean, ymax = mean_ncs_full_mean+se_ncs_full_mean), width = 0.2) + theme_classic()+ theme(legend.position="bottom", legend.title = element_blank()) + scale_color_manual(values=c("palevioletred","lightblue")) + ylab("Average (SE) citation score") + xlab("Years from move") + scale_x_continuous(breaks = seq(-2,5,1)) descriptive_5yearsplot_njs.plot <- ggplot(descriptive_5yearspost_data, aes( years_from_obtaining_usa_affilation,mean_njs_full_mean, group = condition, colour = condition, shape = condition)) + geom_point(size = 3) + geom_line(size = 1) + geom_line(data = descriptive_5yearspost_data, aes(years_from_obtaining_usa_affilation,median_njs_full_mean), linetype = "dotted", size = 1, position = position_jitter(w=0.05, h=0),alpha = 0.6) + geom_errorbar(aes(ymin = mean_njs_full_mean-se_mean_njs_full_mean, ymax = mean_njs_full_mean+se_mean_njs_full_mean),width = 0.2) + theme_classic()+ theme(legend.position="bottom", legend.title = element_blank()) + scale_color_manual(values=c("palevioletred","lightblue")) + ylab("Average (SE) journal score") + xlab("Years from move") + scale_x_continuous(breaks = seq(-2,5,1)) descriptive_5yearsplot_topjournals.plot <- ggplot(descriptive_5yearspost_data, aes( years_from_obtaining_usa_affilation,mean_njs_full_over2_yearsum, group = condition, colour = condition, shape = condition)) + geom_point(size = 3) + geom_line(size = 1) + geom_line(data = descriptive_5yearspost_data, aes(years_from_obtaining_usa_affilation,median_njs_full_over2_yearsum), linetype = "dotted", size = 1, position = position_jitter(w=0.05, h=0),alpha = 0.6) + geom_errorbar(aes(ymin = mean_njs_full_over2_yearsum-se_njs_full_over2_yearsum, ymax = mean_njs_full_over2_yearsum+se_njs_full_over2_yearsum),width = 0.2) + theme_classic()+ theme(legend.position="bottom", legend.title = element_blank()) + scale_color_manual(values=c("palevioletred","lightblue")) + ylab("Average (SE) sum of top journal publications") + xlab("Years from move") + scale_x_continuous(breaks = seq(-2,5,1)) descriptive_5yearsplot_topcited.plot <- ggplot(descriptive_5yearspost_data, aes( years_from_obtaining_usa_affilation,mean_p_top_prop10_full_yearsum, group = condition, colour = condition, shape = condition)) + geom_point(size = 3) + geom_line(size = 1) + geom_line(data = descriptive_5yearspost_data, aes(years_from_obtaining_usa_affilation,median_p_top_prop10_full_yearsum), linetype = "dotted", size = 1, position = position_jitter(w=0.05, h=0),alpha = 0.6) + geom_errorbar(aes(ymin = mean_p_top_prop10_full_yearsum-se_mean_p_top_prop10_full_yearsum, ymax = mean_p_top_prop10_full_yearsum+se_mean_p_top_prop10_full_yearsum),width = 0.2) + theme_classic()+ theme(legend.position="bottom", legend.title = element_blank()) + scale_color_manual(values=c("palevioletred","lightblue")) + ylab("Average (SE) sum of top cited papers") + xlab("Years from move") + scale_x_continuous(breaks = seq(-2,5,1)) ## GeomSplitViolin <- ggproto( "GeomSplitViolin", GeomViolin, draw_group = function(self, data, ..., draw_quantiles = NULL) { data <- transform(data, xminv = x - violinwidth * (x - xmin), xmaxv = x + violinwidth * (xmax - x) ) grp <- data[1, "group"] newdata <- plyr::arrange( transform(data, x = if (grp %% 2 == 1) xminv else xmaxv), if (grp %% 2 == 1) y else -y ) newdata <- rbind(newdata[1, ], newdata, newdata[nrow(newdata), ], newdata[1, ]) newdata[c(1, nrow(newdata) - 1, nrow(newdata)), "x"] <- round(newdata[1, "x"]) if (length(draw_quantiles) > 0 & !scales::zero_range(range(data$y))) { stopifnot(all(draw_quantiles >= 0), all(draw_quantiles <= 1)) quantiles <- ggplot2:::create_quantile_segment_frame(data, draw_quantiles) aesthetics <- data[rep(1, nrow(quantiles)), setdiff(names(data), c("x", "y")), drop = FALSE] aesthetics$alpha <- rep(1, nrow(quantiles)) both <- cbind(quantiles, aesthetics) quantile_grob <- GeomPath$draw_panel(both, ...) ggplot2:::ggname( "geom_split_violin", grid::grobTree(GeomPolygon$draw_panel(newdata, ...), quantile_grob) ) } else { ggplot2:::ggname("geom_split_violin", GeomPolygon$draw_panel(newdata, ...)) } } ) geom_split_violin <- function(mapping = NULL, data = NULL, stat = "ydensity", position = "identity", ..., draw_quantiles = NULL, trim = TRUE, scale = "area", na.rm = FALSE, show.legend = NA, inherit.aes = TRUE) { layer( data = data, mapping = mapping, stat = stat, geom = GeomSplitViolin, position = position, show.legend = show.legend, inherit.aes = inherit.aes, params = list( trim = trim, scale = scale, draw_quantiles = draw_quantiles, na.rm = na.rm, ... ) ) } violinplot_hindex <- ggplot(h_index_data_new, aes(pre_or_post,hindex, fill = condition)) + geom_split_violin() + theme_classic()+ stat_summary(fun = median, geom = "crossbar", width = 0.25, position = position_dodge(width = .25), ) + scale_fill_manual(values = c("lightblue", "palevioletred")) + ylab("'fractionalised h-index'") + xlab("Pre vs post (5 yrs) move") + ylim(0, 2.8) descriptive_5yearsplot.panelplot <- ggarrange(descriptive_5yearsplot_pubs.plot,descriptive_5yearsplot_ncs.plot,descriptive_5yearsplot_njs.plot,descriptive_5yearsplot_topjournals.plot,descriptive_5yearsplot_topcited.plot,violinplot_hindex, common.legend = T, labels = "AUTO",vjust =-0.0, hjust =-2) ggsave(descriptive_5yearsplot.panelplot, filename = "plots/Fig4. 5yearplot_a.pdf")
classifieur_astronomie <- function(dataset){ library('randomForest') library("fastDummies") load("env.Rdata") dataset<-dataset[,-which(names(dataset)=='objid')]; dataset<-dataset[,-which(names(dataset)=='rerun')] dataset$camcol<-as.factor(dataset$camcol) pred<-predict(rf.astro,newdata=dataset,type='class') return(pred) } regresseur_mais <- function(dataset){ library('kernlab') load("env.Rdata") # delete the coloum "X" dataset<-dataset[,-1] pred<-predict(svr,newdata=dataset) return(pred) } classifieur_images <- function(dataset) { library(keras) library(EBImage) model<-load_model_hdf5("model.h5") images<-list() #read images for(i in 1:length(dataset)){ images[[i]]<-readImage(dataset[i]); } #resize images for(i in 1:length(dataset)){ images[[i]]<-resize(images[[i]], 32,32) } # transform images for the input of CNN if(length(dataset) == 1){ images<-combine(images) images<-array(images, dim = c(1, 32, 32,3)) } else { images<-combine(images) images<-aperm(images, c(4,1,2,3)) } # predict pred<-model %>% predict_classes(images) output<-rep(1:length(pred),0) for (i in 1:length(pred)){ if (pred[i] == 0){ output[i] = "car" } if (pred[i] == 1){ output[i] = "cat" } if (pred[i] == 2){ output[i] = "flower" } } return(output) }
/projet/classifieurs.R
no_license
EnzoSeason/Machine-Learning-SY19
R
false
false
1,368
r
classifieur_astronomie <- function(dataset){ library('randomForest') library("fastDummies") load("env.Rdata") dataset<-dataset[,-which(names(dataset)=='objid')]; dataset<-dataset[,-which(names(dataset)=='rerun')] dataset$camcol<-as.factor(dataset$camcol) pred<-predict(rf.astro,newdata=dataset,type='class') return(pred) } regresseur_mais <- function(dataset){ library('kernlab') load("env.Rdata") # delete the coloum "X" dataset<-dataset[,-1] pred<-predict(svr,newdata=dataset) return(pred) } classifieur_images <- function(dataset) { library(keras) library(EBImage) model<-load_model_hdf5("model.h5") images<-list() #read images for(i in 1:length(dataset)){ images[[i]]<-readImage(dataset[i]); } #resize images for(i in 1:length(dataset)){ images[[i]]<-resize(images[[i]], 32,32) } # transform images for the input of CNN if(length(dataset) == 1){ images<-combine(images) images<-array(images, dim = c(1, 32, 32,3)) } else { images<-combine(images) images<-aperm(images, c(4,1,2,3)) } # predict pred<-model %>% predict_classes(images) output<-rep(1:length(pred),0) for (i in 1:length(pred)){ if (pred[i] == 0){ output[i] = "car" } if (pred[i] == 1){ output[i] = "cat" } if (pred[i] == 2){ output[i] = "flower" } } return(output) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods.R \docType{methods} \name{summary,MLZ_data-method} \alias{summary,MLZ_data-method} \title{\code{summary} method for S4 class \code{MLZ_data}} \usage{ \S4method{summary}{MLZ_data}(object) } \arguments{ \item{object}{An object of class \code{MLZ_data}.} } \description{ \code{summary} method for S4 class \code{MLZ_data} } \examples{ data(MuttonSnapper) summary(MuttonSnapper) }
/man/summary-MLZ_data-method.Rd
no_license
quang-huynh/MLZ
R
false
true
484
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods.R \docType{methods} \name{summary,MLZ_data-method} \alias{summary,MLZ_data-method} \title{\code{summary} method for S4 class \code{MLZ_data}} \usage{ \S4method{summary}{MLZ_data}(object) } \arguments{ \item{object}{An object of class \code{MLZ_data}.} } \description{ \code{summary} method for S4 class \code{MLZ_data} } \examples{ data(MuttonSnapper) summary(MuttonSnapper) }
library(DT) library(tidytext) library(dplyr) library(stringr) library(sentimentr) library(ggplot2) library(RColorBrewer) library(readr) library(SnowballC) library(tm) library(wordcloud) library(reticulate) library(crfsuite) #With the help R code, Negative, Positive and Neutral reviews differentiated. #The Created negative, positive and neutral are saved in the drive with the help of write.table function negative_Reviews <- read.delim(file.choose()) View(negative_Reviews) str(negative_Reviews) attach(negative_Reviews) negative_Reviews <- as.character(x) negative_courpus <- Corpus(VectorSource(x)) print(negative_Reviews) negative_courpus <- tm_map(negative_courpus, tolower) inspect(negative_courpus[1:6]) negative_courpus <- tm_map(negative_courpus,removePunctuation) inspect(negative_courpus[1:6]) negative_courpus <- tm_map(negative_courpus,removeNumbers) inspect(negative_courpus[1:6]) negative_courpus <-tm_map(negative_courpus,stripWhitespace) inspect(negative_courpus[1:6]) cleanset_negative <- tm_map(negative_courpus,removeWords, stopwords('english')) inspect(cleanset_negative[1:6]) tdm_negative <- TermDocumentMatrix(cleanset_negative) tdm_negative tdm_negative <- as.matrix(tdm_negative) tdm[1:10,1:20] w <- rowSums(tdm_negative) # provides the no of times a particular word has been used. w <- subset(w, w>=10) # Pull words that were used more than 25 times. barplot(w, las = 2, col = rainbow(50)) # Word Cloud : w <- sort(rowSums(tdm_negative), decreasing = TRUE) # Sort words in decreasing order. set.seed(123) wordcloud(words = names(w), freq = w, max.words = 250,random.order = F, min.freq = 3, colors = brewer.pal(8, 'Dark2'), scale = c(5,0.3), rot.per = 0.6) library(wordcloud2) w <- data.frame(names(w),w) colnames(w) <- c('word','freq') wordcloud2(w,size = 0.5, shape = 'triangle', rotateRatio = 0.5, minSize = 2)
/Negative_Cloud + Reviews.R
no_license
sarabusaiprashanth/Sentiment-Analysis-On-Consumer-Reviews
R
false
false
2,088
r
library(DT) library(tidytext) library(dplyr) library(stringr) library(sentimentr) library(ggplot2) library(RColorBrewer) library(readr) library(SnowballC) library(tm) library(wordcloud) library(reticulate) library(crfsuite) #With the help R code, Negative, Positive and Neutral reviews differentiated. #The Created negative, positive and neutral are saved in the drive with the help of write.table function negative_Reviews <- read.delim(file.choose()) View(negative_Reviews) str(negative_Reviews) attach(negative_Reviews) negative_Reviews <- as.character(x) negative_courpus <- Corpus(VectorSource(x)) print(negative_Reviews) negative_courpus <- tm_map(negative_courpus, tolower) inspect(negative_courpus[1:6]) negative_courpus <- tm_map(negative_courpus,removePunctuation) inspect(negative_courpus[1:6]) negative_courpus <- tm_map(negative_courpus,removeNumbers) inspect(negative_courpus[1:6]) negative_courpus <-tm_map(negative_courpus,stripWhitespace) inspect(negative_courpus[1:6]) cleanset_negative <- tm_map(negative_courpus,removeWords, stopwords('english')) inspect(cleanset_negative[1:6]) tdm_negative <- TermDocumentMatrix(cleanset_negative) tdm_negative tdm_negative <- as.matrix(tdm_negative) tdm[1:10,1:20] w <- rowSums(tdm_negative) # provides the no of times a particular word has been used. w <- subset(w, w>=10) # Pull words that were used more than 25 times. barplot(w, las = 2, col = rainbow(50)) # Word Cloud : w <- sort(rowSums(tdm_negative), decreasing = TRUE) # Sort words in decreasing order. set.seed(123) wordcloud(words = names(w), freq = w, max.words = 250,random.order = F, min.freq = 3, colors = brewer.pal(8, 'Dark2'), scale = c(5,0.3), rot.per = 0.6) library(wordcloud2) w <- data.frame(names(w),w) colnames(w) <- c('word','freq') wordcloud2(w,size = 0.5, shape = 'triangle', rotateRatio = 0.5, minSize = 2)
library(shiny) shinyUI( navbarPage("Motor Trend Analysis", tabPanel("Analysis", fluidPage( titlePanel("Comparison of Automatic vs. Manual Transmission on Auto Miles Per Gallon"), sidebarLayout( sidebarPanel( selectInput("variable", "Variable:", c("Number of cylinders" = "cyl", "Displacement (cu.in.)" = "disp", "Gross Horsepower" = "hp", "Rear Axle Ratio" = "drat", "Weight (lb/1000)" = "wt", "1/4 Mile Time" = "qsec", "V/S" = "vs", "# of Forward Gears" = "gear", "# of Carburetors" = "carb" )), br(), p("INSTRUCTION: The application plots the relationship between variables and miles per gallon (MPG).") ), mainPanel( tabPanel("Plot", plotOutput("plot")), br(), h3("Summary"), tabPanel("Summary", verbatimTextOutput("summary")) ) ) ) ), tabPanel("Prediction", fluidPage( titlePanel("MPG Prediction Based on MTCARS Data"), sidebarLayout( sidebarPanel( radioButtons("trans", "Transmission Type:", list("Automatic" = "Automatic", "Manual" = "Manual")), #sliderInput("hp", "Vehicle Horsepower:", 50, 200, mean(mtcars$hp), step=10), numericInput("hp", "Vehicle Horsepower:", mean(mtcars$hp), min=50, max=300), numericInput("wt", "Vehicle Weight (in 1000 lbs):", mean(mtcars$wt), min=1, max=100), br(), p("INSTRUCTION: The application uses linear regression shown in the formula to predict the MPG."), p("3 parameters are used: transmission type, car horsepower and weight in x1000lb."), p("Predicted result is shown in main panel under 'Predicted MPG'.") ), mainPanel( h3('Regression Model Formula:'), h2(withMathJax('$$MPG = \\beta_0 + \\beta_{1}AM + \\beta_{2}HP + \\beta_{3}WT + \\epsilon$$')), h4("Predicted MPG:"), verbatimTextOutput("mpg"), h4("Horsepower:"), verbatimTextOutput("h"), h4("Transmission:"), verbatimTextOutput("t"), h4("Weight (lbs):"), verbatimTextOutput("w"), h4("Regression Model Coefficient:"), verbatimTextOutput("coef") ) ) ) ), tabPanel("SourceCode", p("Developing_Data_Products: Shiny Assignment"), a("https://github.com/jcbeck/Developing_Data_Products") ) ) )
/ui.R
no_license
jcbeck/Developing_Data_Products
R
false
false
3,786
r
library(shiny) shinyUI( navbarPage("Motor Trend Analysis", tabPanel("Analysis", fluidPage( titlePanel("Comparison of Automatic vs. Manual Transmission on Auto Miles Per Gallon"), sidebarLayout( sidebarPanel( selectInput("variable", "Variable:", c("Number of cylinders" = "cyl", "Displacement (cu.in.)" = "disp", "Gross Horsepower" = "hp", "Rear Axle Ratio" = "drat", "Weight (lb/1000)" = "wt", "1/4 Mile Time" = "qsec", "V/S" = "vs", "# of Forward Gears" = "gear", "# of Carburetors" = "carb" )), br(), p("INSTRUCTION: The application plots the relationship between variables and miles per gallon (MPG).") ), mainPanel( tabPanel("Plot", plotOutput("plot")), br(), h3("Summary"), tabPanel("Summary", verbatimTextOutput("summary")) ) ) ) ), tabPanel("Prediction", fluidPage( titlePanel("MPG Prediction Based on MTCARS Data"), sidebarLayout( sidebarPanel( radioButtons("trans", "Transmission Type:", list("Automatic" = "Automatic", "Manual" = "Manual")), #sliderInput("hp", "Vehicle Horsepower:", 50, 200, mean(mtcars$hp), step=10), numericInput("hp", "Vehicle Horsepower:", mean(mtcars$hp), min=50, max=300), numericInput("wt", "Vehicle Weight (in 1000 lbs):", mean(mtcars$wt), min=1, max=100), br(), p("INSTRUCTION: The application uses linear regression shown in the formula to predict the MPG."), p("3 parameters are used: transmission type, car horsepower and weight in x1000lb."), p("Predicted result is shown in main panel under 'Predicted MPG'.") ), mainPanel( h3('Regression Model Formula:'), h2(withMathJax('$$MPG = \\beta_0 + \\beta_{1}AM + \\beta_{2}HP + \\beta_{3}WT + \\epsilon$$')), h4("Predicted MPG:"), verbatimTextOutput("mpg"), h4("Horsepower:"), verbatimTextOutput("h"), h4("Transmission:"), verbatimTextOutput("t"), h4("Weight (lbs):"), verbatimTextOutput("w"), h4("Regression Model Coefficient:"), verbatimTextOutput("coef") ) ) ) ), tabPanel("SourceCode", p("Developing_Data_Products: Shiny Assignment"), a("https://github.com/jcbeck/Developing_Data_Products") ) ) )
library(ggplot2) library(reshape) library(beepr) library(plyr) library(dplyr) library(GenABEL) library(crimaptools) load("finescale/5_Recheck_Data_Fine_Scale_Chunk_Diffs.RData") chunk.res <- NULL for(i in 1:nrow(map.chunk.50)){ print(paste("Running problem", i, "of", nrow(map.chunk.50))) test <- subset(mapdata, CEL.LG == map.chunk.50$CEL.LG[i]) invmap <- read.table(paste0("finescale/chr", map.chunk.50$CEL.LG[i], AnalysisSuffix, "_cel_ckP_Re_", map.chunk.50$chunk[i], "_inv.parsemap"), header = T, stringsAsFactors = F) delmap <- read.table(paste0("finescale/chr", map.chunk.50$CEL.LG[i], AnalysisSuffix, "_cel_ckP_Re_", map.chunk.50$chunk[i], "_del.parsemap"), header = T, stringsAsFactors = F) map.chunk.50$Inversion.Len[i] <- invmap$cMPosition[nrow(invmap)] map.chunk.50$Initial.Len[i] <- test$cMPosition.run4[nrow(test)] map.chunk.50$Deletion.Len[i] <- delmap$cMPosition[nrow(delmap)] chunk.res <- rbind(chunk.res, invmap, delmap) } write.table(map.chunk.50, paste("results/5_Recheck_MapChunk_DelInvCheck_Summary_", AnalysisSuffix2, ".txt"), row.names = F, quote = F) write.table(chunk.res, paste("results/5_Recheck_MapChunk_DelInvCheck_Full_", AnalysisSuffix2, ".txt"), row.names = F, quote = F) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # 3. For small chunks, try exclusion/inversion # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# map.chunk.50 <- read.table(paste("results/5_Recheck_MapChunk_DelInvCheck_Summary_", AnalysisSuffix2, ".txt"), header = T, stringsAsFactors = F) map.chunk.50$Inversion.Diff <- map.chunk.50$Initial.Len - map.chunk.50$Inversion.Len map.chunk.50$Deletion.Diff <- map.chunk.50$Initial.Len - map.chunk.50$Deletion.Len map.chunk.50$Order <- 1:nrow(map.chunk.50) chunk.res <- read.table(paste("results/5_Recheck_MapChunk_DelInvCheck_Full_", AnalysisSuffix2, ".txt"), header = T, stringsAsFactors = F) chunk.res$chunk <- unlist(lapply(chunk.res$analysisID, function(x) gsub("ck", "", strsplit(x, split = "_")[[1]][3]))) names(chunk.res)[which(names(chunk.res) == "chunk")] <- "chunk.edit" chunk.res <- subset(chunk.res, select = -Order) chunk.res <- join(chunk.res, mapdata[,c("SNP.Name", "chunk", "cMPosition.run4", "Order")]) chunk.res$chunk.focal <- ifelse(chunk.res$chunk.edit == chunk.res$chunk, "focal", "nf") ggplot(map.chunk.50, aes(Initial.Len, Inversion.Len)) + geom_text(aes(label = Freq)) + geom_abline(slope = 1, intercept = 0) ggplot(map.chunk.50, aes(Initial.Len, Deletion.Len)) + geom_text(aes(label = Freq)) + geom_abline(slope = 1, intercept = 0) ggplot(map.chunk.50, aes(Deletion.Diff, Freq)) + geom_point() #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # 4. Remove small chunks with large deletion difference # # and inversions that result in > 0.5cM shorter map # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# edit.tab <- rbind(cbind(map.chunk.50[which(map.chunk.50$Freq < 10 & map.chunk.50$Deletion.Diff > 1),], edit = "del")) edit.tab <- subset(edit.tab, CEL.LG == 34) edit.tab <- rbind(cbind(map.chunk.50[which(map.chunk.50$Freq < 10 & map.chunk.50$Deletion.Diff > 1),], edit = "del"), cbind(map.chunk.50[which(map.chunk.50$Inversion.Diff > 0.5),], edit = "inv")) arrange(edit.tab, edit, Freq) table(edit.tab$chunk) #~~ create mapdata for LGs that were fine newmap <- mapdata for(i in 1:nrow(edit.tab)){ if(edit.tab$edit[i] == "del"){ newmap$CEL.order[which(newmap$chunk == edit.tab$chunk[i])] <- NA } if(edit.tab$edit[i] == "inv"){ newmap$CEL.order[which(newmap$chunk == edit.tab$chunk[i])] <- rev(newmap$CEL.order[which(newmap$chunk == edit.tab$chunk[i])]) } } newmap <- arrange(newmap, CEL.LG, CEL.order) newmap <- subset(newmap, !is.na(CEL.order)) #~~ rerun crimap counter <- 1 for(i in sort(unique(edit.tab$CEL.LG))){ print(paste0("Running chromosome ", counter, " of ", length(unique(edit.tab$CEL.LG)))) create_crimap_input(gwaa.data = abeldata, familyPedigree = famped, analysisID = paste0(i, AnalysisSuffix2), snplist = subset(newmap, CEL.LG == i)$SNP.Name, pseudoautoSNPs = pseudoautoSNPs, is.X = ifelse(i == lg.sex, T, F), outdir = paste0("crimap/crimap_", AnalysisSuffix2), use.specific.mnd = paste0("crimap/crimap_", AnalysisSuffix, "/chr", AnalysisSuffix, "full.mnd"), clear.existing.analysisID = TRUE) run_crimap_prepare(genfile = paste0("crimap/crimap_", AnalysisSuffix2, "/chr", i, AnalysisSuffix2, ".gen")) run_crimap_chrompic(genfile = paste0("crimap/crimap_", AnalysisSuffix2, "/chr", i, AnalysisSuffix2, ".gen")) counter <- counter + 1 } #~~ Parse Chrompic Files fullmap <- NULL for(lg in c(lg.vec)){ recombmap <- parse_map_chrompic(paste0("crimap/crimap_", AnalysisSuffix2, "/chr", lg, AnalysisSuffix2, ".cmp")) recombmap$Chr <- lg fullmap <- rbind(fullmap, recombmap) rm(recombmap) } names(fullmap)[which(names(fullmap) == "cMPosition")] <- "cMPosition.run4" head(fullmap) fullmap <- join(fullmap, subset(newmap, select = c(SNP.Name, BTA.Chr, BTA.Position, BTA.Order, CEL.order, CEL.LG, chunk))) table(is.na(fullmap$CEL.LG)) tapply(mapdata$cMPosition.run3, mapdata$CEL.LG, max) - tapply(fullmap$cMPosition.run4, fullmap$CEL.LG, max) tapply(mapdata$cMPosition.run3, mapdata$CEL.LG, length) - tapply(fullmap$cMPosition.run4, fullmap$CEL.LG, length) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # 5. write everything to file and update unmapped snps # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ggplot(fullmap, aes(CEL.order, cMPosition.run4)) + geom_point() + facet_wrap(~Chr, scales= "free") table(is.na(fullmap$cMPosition.run4)) table(is.na(fullmap$CEL.order)) ggsave(filename = paste0("figs/Linkage_Map_run4_", AnalysisSuffix, ".png"), device = "png", width = 15, height = 10, units = "in") ggplot(subset(fullmap, CEL.LG == 34), aes(CEL.order, cMPosition.run4)) + geom_point() + facet_wrap(~Chr, scales= "free") # x <- read.table("results/5_Linkage_Map_Positions_CEL_run4_a.txt", header = T) # head(x) # x <- subset(x, CEL.LG != 34) # x <- rbind(x, fullmap) # # "cela1_red_x_91610613" %in% x$SNP.Name # # # write.table(x, paste0("results/5_Linkage_Map_Positions_CEL_run4_", AnalysisSuffix, ".txt"), row.names = F, quote = F, sep = "\t") write.table(fullmap, paste0("results/5_Linkage_Map_Positions_CEL_run4_", AnalysisSuffix, ".txt"), row.names = F, quote = F, sep = "\t")
/5.5_Recheck_Parse_Fine_Scale_Differences.R
no_license
susjoh/DeerMapv4
R
false
false
6,694
r
library(ggplot2) library(reshape) library(beepr) library(plyr) library(dplyr) library(GenABEL) library(crimaptools) load("finescale/5_Recheck_Data_Fine_Scale_Chunk_Diffs.RData") chunk.res <- NULL for(i in 1:nrow(map.chunk.50)){ print(paste("Running problem", i, "of", nrow(map.chunk.50))) test <- subset(mapdata, CEL.LG == map.chunk.50$CEL.LG[i]) invmap <- read.table(paste0("finescale/chr", map.chunk.50$CEL.LG[i], AnalysisSuffix, "_cel_ckP_Re_", map.chunk.50$chunk[i], "_inv.parsemap"), header = T, stringsAsFactors = F) delmap <- read.table(paste0("finescale/chr", map.chunk.50$CEL.LG[i], AnalysisSuffix, "_cel_ckP_Re_", map.chunk.50$chunk[i], "_del.parsemap"), header = T, stringsAsFactors = F) map.chunk.50$Inversion.Len[i] <- invmap$cMPosition[nrow(invmap)] map.chunk.50$Initial.Len[i] <- test$cMPosition.run4[nrow(test)] map.chunk.50$Deletion.Len[i] <- delmap$cMPosition[nrow(delmap)] chunk.res <- rbind(chunk.res, invmap, delmap) } write.table(map.chunk.50, paste("results/5_Recheck_MapChunk_DelInvCheck_Summary_", AnalysisSuffix2, ".txt"), row.names = F, quote = F) write.table(chunk.res, paste("results/5_Recheck_MapChunk_DelInvCheck_Full_", AnalysisSuffix2, ".txt"), row.names = F, quote = F) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # 3. For small chunks, try exclusion/inversion # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# map.chunk.50 <- read.table(paste("results/5_Recheck_MapChunk_DelInvCheck_Summary_", AnalysisSuffix2, ".txt"), header = T, stringsAsFactors = F) map.chunk.50$Inversion.Diff <- map.chunk.50$Initial.Len - map.chunk.50$Inversion.Len map.chunk.50$Deletion.Diff <- map.chunk.50$Initial.Len - map.chunk.50$Deletion.Len map.chunk.50$Order <- 1:nrow(map.chunk.50) chunk.res <- read.table(paste("results/5_Recheck_MapChunk_DelInvCheck_Full_", AnalysisSuffix2, ".txt"), header = T, stringsAsFactors = F) chunk.res$chunk <- unlist(lapply(chunk.res$analysisID, function(x) gsub("ck", "", strsplit(x, split = "_")[[1]][3]))) names(chunk.res)[which(names(chunk.res) == "chunk")] <- "chunk.edit" chunk.res <- subset(chunk.res, select = -Order) chunk.res <- join(chunk.res, mapdata[,c("SNP.Name", "chunk", "cMPosition.run4", "Order")]) chunk.res$chunk.focal <- ifelse(chunk.res$chunk.edit == chunk.res$chunk, "focal", "nf") ggplot(map.chunk.50, aes(Initial.Len, Inversion.Len)) + geom_text(aes(label = Freq)) + geom_abline(slope = 1, intercept = 0) ggplot(map.chunk.50, aes(Initial.Len, Deletion.Len)) + geom_text(aes(label = Freq)) + geom_abline(slope = 1, intercept = 0) ggplot(map.chunk.50, aes(Deletion.Diff, Freq)) + geom_point() #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # 4. Remove small chunks with large deletion difference # # and inversions that result in > 0.5cM shorter map # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# edit.tab <- rbind(cbind(map.chunk.50[which(map.chunk.50$Freq < 10 & map.chunk.50$Deletion.Diff > 1),], edit = "del")) edit.tab <- subset(edit.tab, CEL.LG == 34) edit.tab <- rbind(cbind(map.chunk.50[which(map.chunk.50$Freq < 10 & map.chunk.50$Deletion.Diff > 1),], edit = "del"), cbind(map.chunk.50[which(map.chunk.50$Inversion.Diff > 0.5),], edit = "inv")) arrange(edit.tab, edit, Freq) table(edit.tab$chunk) #~~ create mapdata for LGs that were fine newmap <- mapdata for(i in 1:nrow(edit.tab)){ if(edit.tab$edit[i] == "del"){ newmap$CEL.order[which(newmap$chunk == edit.tab$chunk[i])] <- NA } if(edit.tab$edit[i] == "inv"){ newmap$CEL.order[which(newmap$chunk == edit.tab$chunk[i])] <- rev(newmap$CEL.order[which(newmap$chunk == edit.tab$chunk[i])]) } } newmap <- arrange(newmap, CEL.LG, CEL.order) newmap <- subset(newmap, !is.na(CEL.order)) #~~ rerun crimap counter <- 1 for(i in sort(unique(edit.tab$CEL.LG))){ print(paste0("Running chromosome ", counter, " of ", length(unique(edit.tab$CEL.LG)))) create_crimap_input(gwaa.data = abeldata, familyPedigree = famped, analysisID = paste0(i, AnalysisSuffix2), snplist = subset(newmap, CEL.LG == i)$SNP.Name, pseudoautoSNPs = pseudoautoSNPs, is.X = ifelse(i == lg.sex, T, F), outdir = paste0("crimap/crimap_", AnalysisSuffix2), use.specific.mnd = paste0("crimap/crimap_", AnalysisSuffix, "/chr", AnalysisSuffix, "full.mnd"), clear.existing.analysisID = TRUE) run_crimap_prepare(genfile = paste0("crimap/crimap_", AnalysisSuffix2, "/chr", i, AnalysisSuffix2, ".gen")) run_crimap_chrompic(genfile = paste0("crimap/crimap_", AnalysisSuffix2, "/chr", i, AnalysisSuffix2, ".gen")) counter <- counter + 1 } #~~ Parse Chrompic Files fullmap <- NULL for(lg in c(lg.vec)){ recombmap <- parse_map_chrompic(paste0("crimap/crimap_", AnalysisSuffix2, "/chr", lg, AnalysisSuffix2, ".cmp")) recombmap$Chr <- lg fullmap <- rbind(fullmap, recombmap) rm(recombmap) } names(fullmap)[which(names(fullmap) == "cMPosition")] <- "cMPosition.run4" head(fullmap) fullmap <- join(fullmap, subset(newmap, select = c(SNP.Name, BTA.Chr, BTA.Position, BTA.Order, CEL.order, CEL.LG, chunk))) table(is.na(fullmap$CEL.LG)) tapply(mapdata$cMPosition.run3, mapdata$CEL.LG, max) - tapply(fullmap$cMPosition.run4, fullmap$CEL.LG, max) tapply(mapdata$cMPosition.run3, mapdata$CEL.LG, length) - tapply(fullmap$cMPosition.run4, fullmap$CEL.LG, length) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # 5. write everything to file and update unmapped snps # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ggplot(fullmap, aes(CEL.order, cMPosition.run4)) + geom_point() + facet_wrap(~Chr, scales= "free") table(is.na(fullmap$cMPosition.run4)) table(is.na(fullmap$CEL.order)) ggsave(filename = paste0("figs/Linkage_Map_run4_", AnalysisSuffix, ".png"), device = "png", width = 15, height = 10, units = "in") ggplot(subset(fullmap, CEL.LG == 34), aes(CEL.order, cMPosition.run4)) + geom_point() + facet_wrap(~Chr, scales= "free") # x <- read.table("results/5_Linkage_Map_Positions_CEL_run4_a.txt", header = T) # head(x) # x <- subset(x, CEL.LG != 34) # x <- rbind(x, fullmap) # # "cela1_red_x_91610613" %in% x$SNP.Name # # # write.table(x, paste0("results/5_Linkage_Map_Positions_CEL_run4_", AnalysisSuffix, ".txt"), row.names = F, quote = F, sep = "\t") write.table(fullmap, paste0("results/5_Linkage_Map_Positions_CEL_run4_", AnalysisSuffix, ".txt"), row.names = F, quote = F, sep = "\t")
library(tidyverse) res <- read_tsv("res_runs.tsv", col_names=c("dataset","assembler","ref_tot","ref_cov","ref_cov_frac","qry_tot","qry_cov","qry_cov_frac","contig_num")) res %<>% mutate(rep_res = exp(-abs(log(qry_cov/ref_cov)))) %>% mutate(score = (1 - ((abs(1-ref_cov_frac)/4) + (abs(1-qry_cov_frac)/4) + (abs(1-rep_res)/4) + (abs(1-(1/contig_num))/4)))*100) res <- mutate(res, true=(score>=99)) res <- res %>% replace_na(list(true=FALSE)) novel <- read_tsv("res_novel.tsv", col_names=c("dataset","assembler","ir_len","ctg_len","num_ctg")) my_res <- left_join(select(res, dataset, assembler, score, true), novel) eval_params <- function(ir, total, data=my_res){ my_tab <- data %>% mutate(pred=(ir_len>=ir & ctg_len>=total)) %>% select(true,pred) %>% table sens <- my_tab["TRUE","TRUE"]/(my_tab["TRUE","TRUE"]+my_tab["TRUE","FALSE"]) prec <- my_tab["TRUE","TRUE"]/(my_tab["TRUE","TRUE"]+my_tab["FALSE","TRUE"]) f1 <- 2*(sens*prec)/(sens+prec) return(tibble(ir,total,prec,sens,f1)) } crossing(ir=seq(0,25000,1000),total=seq(10000,150000,10000)) %>% rowwise %>% do(eval_params(.$ir,.$total)) %>% gather(measure,value,3:5) %>% filter(measure == "f1") %>% arrange(-value) %>% head(1) %>% print pdf("novel_cutoffs.pdf") crossing(ir=seq(0,25000,1000),total=seq(10000,150000,10000)) %>% rowwise %>% do(eval_params(.$ir,.$total)) %>% gather(measure,value,3:5) %>% ggplot(aes(total,ir,fill=value)) + geom_tile() + facet_grid(. ~ measure) + ggtitle("F1, sensitivity, and precision by total and ir length cutoffs") crossing(ir=seq(0,25000,1000),total=seq(10000,150000,10000)) %>% rowwise %>% do(eval_params(.$ir,.$total)) %>% gather(measure,value,3:5) %>% ggplot(aes(total,color=as.factor(ir),y=value)) + geom_line() + facet_grid(. ~ measure) + ggtitle("F1, sensitivity, and precision by ir length cutoffs (and total)") crossing(ir=seq(0,25000,1000),total=seq(10000,150000,10000)) %>% rowwise %>% do(eval_params(.$ir,.$total)) %>% gather(measure,value,3:5) %>% ggplot(aes(ir,color=as.factor(total),y=value)) + geom_line() + facet_grid(. ~ measure) + ggtitle("F1, sensitivity, and precision by total length cutoffs (and ir)") dev.off()
/code/find_novel_cutoffs.r
permissive
altingia/benchmark
R
false
false
2,135
r
library(tidyverse) res <- read_tsv("res_runs.tsv", col_names=c("dataset","assembler","ref_tot","ref_cov","ref_cov_frac","qry_tot","qry_cov","qry_cov_frac","contig_num")) res %<>% mutate(rep_res = exp(-abs(log(qry_cov/ref_cov)))) %>% mutate(score = (1 - ((abs(1-ref_cov_frac)/4) + (abs(1-qry_cov_frac)/4) + (abs(1-rep_res)/4) + (abs(1-(1/contig_num))/4)))*100) res <- mutate(res, true=(score>=99)) res <- res %>% replace_na(list(true=FALSE)) novel <- read_tsv("res_novel.tsv", col_names=c("dataset","assembler","ir_len","ctg_len","num_ctg")) my_res <- left_join(select(res, dataset, assembler, score, true), novel) eval_params <- function(ir, total, data=my_res){ my_tab <- data %>% mutate(pred=(ir_len>=ir & ctg_len>=total)) %>% select(true,pred) %>% table sens <- my_tab["TRUE","TRUE"]/(my_tab["TRUE","TRUE"]+my_tab["TRUE","FALSE"]) prec <- my_tab["TRUE","TRUE"]/(my_tab["TRUE","TRUE"]+my_tab["FALSE","TRUE"]) f1 <- 2*(sens*prec)/(sens+prec) return(tibble(ir,total,prec,sens,f1)) } crossing(ir=seq(0,25000,1000),total=seq(10000,150000,10000)) %>% rowwise %>% do(eval_params(.$ir,.$total)) %>% gather(measure,value,3:5) %>% filter(measure == "f1") %>% arrange(-value) %>% head(1) %>% print pdf("novel_cutoffs.pdf") crossing(ir=seq(0,25000,1000),total=seq(10000,150000,10000)) %>% rowwise %>% do(eval_params(.$ir,.$total)) %>% gather(measure,value,3:5) %>% ggplot(aes(total,ir,fill=value)) + geom_tile() + facet_grid(. ~ measure) + ggtitle("F1, sensitivity, and precision by total and ir length cutoffs") crossing(ir=seq(0,25000,1000),total=seq(10000,150000,10000)) %>% rowwise %>% do(eval_params(.$ir,.$total)) %>% gather(measure,value,3:5) %>% ggplot(aes(total,color=as.factor(ir),y=value)) + geom_line() + facet_grid(. ~ measure) + ggtitle("F1, sensitivity, and precision by ir length cutoffs (and total)") crossing(ir=seq(0,25000,1000),total=seq(10000,150000,10000)) %>% rowwise %>% do(eval_params(.$ir,.$total)) %>% gather(measure,value,3:5) %>% ggplot(aes(ir,color=as.factor(total),y=value)) + geom_line() + facet_grid(. ~ measure) + ggtitle("F1, sensitivity, and precision by total length cutoffs (and ir)") dev.off()
#' A function to fit a linear and non-linear GAM model to one's data and return some useful summary statistics #' #' This function fits a GAM glm in a linear and non-linear framework and tests if the glm GAM with a smooth independent is a better fit to the data. #' @param wdata a data frame of data with appropriate column names #' @param dependent a string that matches a column name in wdata that you would like to define as the dependent or response variable in your analysis #' @param independent a string that matches a column name in wdata that you would like to define as the independent or primary explanatory variable of interest in your analysis #' @param covariables a string or character vector that matches column names in wdata that you would like to define as additional covariate in your model. Set as NA, if you have no covariates and thus would like to run a univariate analysis. #' @param rnt_dependent TRUE or FALSE, would you like to rank normal transform your dependent variable prior to fitting the data? Default value is TRUE. Uses the rntransform() function in this package to rank normal transform. #' @param bam if you would like to run the GAM with bam(), for very large data sets, then set bam = TRUE. Default is bam = FALSE and gam() is used. #' @param nthread_count number of compute threads to use in your bam(). #' @keywords GAM, non-linear model, linear model, mgcv #' @export #' @examples #' linear_nonlinear_fits() linear_nonlinear_fits = function(wdata, dependent, independent, covariables = NA, rnt_dependent = TRUE, bam = FALSE, nthread_count = 1){ ############################## ### 1. Define Model Data Frame ############################## if(is.na(covariables[1])){ model_variables = c( dependent, independent ) } else { model_variables = c( dependent, covariables, independent) } mod_data = wdata[, c(model_variables)] ############################## ### 2. Rank Transform dependent ### if desired ############################## if(rnt_dependent == TRUE){ mod_data[, dependent] = rntransform(mod_data[, dependent]) } ############################## ### 3. Define Linear Model formula ############################## if( is.na( covariables[1] ) ){ form = formula(paste0(dependent ," ~ ", independent )) } else { form = formula(paste0(dependent ," ~ ", paste0(covariables, collapse = " + ") ," + ", independent )) } ############################## ### 4. Perform Linear Model ############################## if(bam == TRUE){ lm_mod = mgcv::bam(form, data = mod_data, method = "REML", nthreads = nthread_count) } else { lm_mod = mgcv::gam(form, data = mod_data, method = "REML") } # glm_mod = glm(form, data = mod_data, family = "gaussian") ################# ## 4a. summary stats ################# s = summary(lm_mod) ## sample size n = s$n; names(n) = "n_lm" ## Adjusted R-squared, to compare between models with a different number of predictors rsq = s$r.sq; names(rsq) = "rsq_adj_lm" ## Deviance explained dexp = s$dev.expl; names(dexp) = "dev_exp_lm" ## Dependent Variable effect estimates beta = s$p.coeff[independent]; names(beta) = "beta_lm" se = s$se[independent]; names(se) = "se_lm" tval = s$p.t[independent]; names(tval) = "tval_lm" pval = s$p.pv[independent]; names(pval) = "P_lm" loglik_lm = logLik.gam(lm_mod); names(loglik_lm) = "loglik_lm" aic_lm = AIC(lm_mod); names(aic_lm) = "aic_lm" lm_results = c(n, loglik_lm, aic_lm, rsq, dexp, beta, se, tval, pval) ############################## ### 5. Define GAM Model formula ############################## if( is.na( covariables[1] ) ){ form = formula(paste0(dependent ," ~ s(", independent, ")" )) } else { form = formula(paste0(dependent ," ~ ", paste0(covariables, collapse = " + ") ," + s(", independent, ")" )) } ############################## ### 6. Perform GAM (non-linear) Model ############################## if(bam == TRUE){ gam_mod = mgcv::bam( form, data = mod_data, method = "REML", nthreads = nthread_count) } else { gam_mod = mgcv::gam( form, data = mod_data, method = "REML") } # gam.check(gam_mod) ################# ## 6a. summary stats ################# s = summary(gam_mod) ## sample size n_gam = s$n; names(n_gam) = "n_gam" ## smooth data; removing the reference degrees of freedom smooth_data = s$s.table; names(smooth_data) = c("edf_gam", "Ref_df_gam", "F_gam", "P_gam") ## R-squared rsq_gam = s$r.sq; names(rsq_gam) = "rsq_adj_gam" ## Deviance Explained dexp_gam = s$dev.expl; names(dexp_gam) = "dev_exp_gam" ## GAM coefficients for the independent # coef = gam_mod$coefficients # w = grep( independent, names(coef)) # coef = coef[w] # coef = round(coef, d = 4) # coef = paste(coef, collapse = "|"); names(coef) = "coefs_gam" ## REML LogLiklihood and AIC reml_gam = logLik.gam(gam_mod)[1]; names(reml_gam) = "reml_gam" loglik_gam = logLik.gam(gam_mod); names(loglik_gam) = "loglik_gam" aic_gam = AIC(gam_mod); names(aic_gam) = "aic_gam" ## GAM Results out gam_results = c(n_gam, loglik_gam, aic_gam, rsq_gam, dexp_gam, smooth_data ) #################################### ## 7. ANOVA test of lm and GAM #################################### a = anova(lm_mod, gam_mod, test = "F") Ftest = c(a$`Resid. Dev`, a$Df[2], a$Deviance[2], a$F[2], a$`Pr(>F)`[2] ); names(Ftest) = c("resid_dev_lm_Ftest","resid_dev_gam_Ftest","df_Ftest","deviance_Ftest", "F_Ftest", "P_Ftest") #################################### ## 8. Return estimates #################################### # out = c( n, rsq, dexp, dep_est, n_gam, rsq_gam, dev_exp, smooth_data, coef, Ftest_P ) out = c( lm_results, gam_results, Ftest ) return(out) }
/R/linear_nonlinear_fits.R
permissive
hughesevoanth/moosefun
R
false
false
5,929
r
#' A function to fit a linear and non-linear GAM model to one's data and return some useful summary statistics #' #' This function fits a GAM glm in a linear and non-linear framework and tests if the glm GAM with a smooth independent is a better fit to the data. #' @param wdata a data frame of data with appropriate column names #' @param dependent a string that matches a column name in wdata that you would like to define as the dependent or response variable in your analysis #' @param independent a string that matches a column name in wdata that you would like to define as the independent or primary explanatory variable of interest in your analysis #' @param covariables a string or character vector that matches column names in wdata that you would like to define as additional covariate in your model. Set as NA, if you have no covariates and thus would like to run a univariate analysis. #' @param rnt_dependent TRUE or FALSE, would you like to rank normal transform your dependent variable prior to fitting the data? Default value is TRUE. Uses the rntransform() function in this package to rank normal transform. #' @param bam if you would like to run the GAM with bam(), for very large data sets, then set bam = TRUE. Default is bam = FALSE and gam() is used. #' @param nthread_count number of compute threads to use in your bam(). #' @keywords GAM, non-linear model, linear model, mgcv #' @export #' @examples #' linear_nonlinear_fits() linear_nonlinear_fits = function(wdata, dependent, independent, covariables = NA, rnt_dependent = TRUE, bam = FALSE, nthread_count = 1){ ############################## ### 1. Define Model Data Frame ############################## if(is.na(covariables[1])){ model_variables = c( dependent, independent ) } else { model_variables = c( dependent, covariables, independent) } mod_data = wdata[, c(model_variables)] ############################## ### 2. Rank Transform dependent ### if desired ############################## if(rnt_dependent == TRUE){ mod_data[, dependent] = rntransform(mod_data[, dependent]) } ############################## ### 3. Define Linear Model formula ############################## if( is.na( covariables[1] ) ){ form = formula(paste0(dependent ," ~ ", independent )) } else { form = formula(paste0(dependent ," ~ ", paste0(covariables, collapse = " + ") ," + ", independent )) } ############################## ### 4. Perform Linear Model ############################## if(bam == TRUE){ lm_mod = mgcv::bam(form, data = mod_data, method = "REML", nthreads = nthread_count) } else { lm_mod = mgcv::gam(form, data = mod_data, method = "REML") } # glm_mod = glm(form, data = mod_data, family = "gaussian") ################# ## 4a. summary stats ################# s = summary(lm_mod) ## sample size n = s$n; names(n) = "n_lm" ## Adjusted R-squared, to compare between models with a different number of predictors rsq = s$r.sq; names(rsq) = "rsq_adj_lm" ## Deviance explained dexp = s$dev.expl; names(dexp) = "dev_exp_lm" ## Dependent Variable effect estimates beta = s$p.coeff[independent]; names(beta) = "beta_lm" se = s$se[independent]; names(se) = "se_lm" tval = s$p.t[independent]; names(tval) = "tval_lm" pval = s$p.pv[independent]; names(pval) = "P_lm" loglik_lm = logLik.gam(lm_mod); names(loglik_lm) = "loglik_lm" aic_lm = AIC(lm_mod); names(aic_lm) = "aic_lm" lm_results = c(n, loglik_lm, aic_lm, rsq, dexp, beta, se, tval, pval) ############################## ### 5. Define GAM Model formula ############################## if( is.na( covariables[1] ) ){ form = formula(paste0(dependent ," ~ s(", independent, ")" )) } else { form = formula(paste0(dependent ," ~ ", paste0(covariables, collapse = " + ") ," + s(", independent, ")" )) } ############################## ### 6. Perform GAM (non-linear) Model ############################## if(bam == TRUE){ gam_mod = mgcv::bam( form, data = mod_data, method = "REML", nthreads = nthread_count) } else { gam_mod = mgcv::gam( form, data = mod_data, method = "REML") } # gam.check(gam_mod) ################# ## 6a. summary stats ################# s = summary(gam_mod) ## sample size n_gam = s$n; names(n_gam) = "n_gam" ## smooth data; removing the reference degrees of freedom smooth_data = s$s.table; names(smooth_data) = c("edf_gam", "Ref_df_gam", "F_gam", "P_gam") ## R-squared rsq_gam = s$r.sq; names(rsq_gam) = "rsq_adj_gam" ## Deviance Explained dexp_gam = s$dev.expl; names(dexp_gam) = "dev_exp_gam" ## GAM coefficients for the independent # coef = gam_mod$coefficients # w = grep( independent, names(coef)) # coef = coef[w] # coef = round(coef, d = 4) # coef = paste(coef, collapse = "|"); names(coef) = "coefs_gam" ## REML LogLiklihood and AIC reml_gam = logLik.gam(gam_mod)[1]; names(reml_gam) = "reml_gam" loglik_gam = logLik.gam(gam_mod); names(loglik_gam) = "loglik_gam" aic_gam = AIC(gam_mod); names(aic_gam) = "aic_gam" ## GAM Results out gam_results = c(n_gam, loglik_gam, aic_gam, rsq_gam, dexp_gam, smooth_data ) #################################### ## 7. ANOVA test of lm and GAM #################################### a = anova(lm_mod, gam_mod, test = "F") Ftest = c(a$`Resid. Dev`, a$Df[2], a$Deviance[2], a$F[2], a$`Pr(>F)`[2] ); names(Ftest) = c("resid_dev_lm_Ftest","resid_dev_gam_Ftest","df_Ftest","deviance_Ftest", "F_Ftest", "P_Ftest") #################################### ## 8. Return estimates #################################### # out = c( n, rsq, dexp, dep_est, n_gam, rsq_gam, dev_exp, smooth_data, coef, Ftest_P ) out = c( lm_results, gam_results, Ftest ) return(out) }
\name{methods.locppm} \alias{methods.locppm} \alias{as.interact.locppm} \alias{as.ppm.locppm} \alias{coef.locppm} \alias{confint.locppm} \alias{is.poisson.locppm} \alias{print.locppm} \title{Methods for Local Gibbs Models} \description{ Methods for various generic functions, for the class \code{"locppm"} of locally fitted Gibbs point process models. } \usage{ \method{as.interact}{locppm}(object) \method{as.ppm}{locppm}(object) \method{coef}{locppm}(object, \dots, which = c("local", "homogeneous")) \method{confint}{locppm}(object, parm, level = 0.95, \dots, which = c("local", "homogeneous")) \method{is.poisson}{locppm}(x) \method{print}{locppm}(x, \dots) } \arguments{ \item{object,x}{ A locally-fitted Gibbs point process model (object of class \code{"locppm"}). } \item{\dots}{ Additional arguments passed to the default method (for \code{confint.locppm}) or ignored (by \code{coef.locppm}). } \item{which}{ Character string determining whether to perform calculations for the local Gibbs model (\code{which="local"}, the default) or the corresponding homogeneous Gibbs model (\code{which="homogeneous"}). } \item{parm}{ The parameter or parameters for which a confidence interval is desired. A character string or character vector matching the names of \code{coef(object)}, or an index or index vector that can be applied to \code{coef(object)}. } \item{level}{ Confidence level: a number between 0 and 1. } } \value{ \code{as.interact} returns an interaction structure (object of class \code{"interact"}). \code{as.ppm} returns a fitted Gibbs model (object of class \code{"ppm"}). \code{coef} and \code{confint} return a numeric vector if \code{which="homogeneous"} and an object of class \code{"ssf"} if \code{which="local"}. \code{is.poisson} returns a logical value. \code{print} returns \code{NULL}. } \details{ Objects of class \code{"locppm"} represent locally fitted Gibbs point process models. The functions documented here provided methods for this class, for the generic functions \code{\link[spatstat.model]{as.interact}}, \code{\link[spatstat.model]{as.ppm}}, \code{\link[stats]{coef}}, \code{\link[stats]{confint}}, \code{\link[spatstat.random:is.stationary]{is.poisson}} and \code{\link{print}}. For the \code{coef} and \code{confint} methods, the calculations can be performed either on the locally fitted model or on its homogeneous equivalent, by changing the argument \code{which}. } \references{ \localpaper \baddrubaturnbook } \seealso{ \code{\link{locppm}} } \examples{ fit <- locppm(swedishpines, ~1, sigma=9, nd=20, vcalc="full", locations="coarse") fit is.poisson(fit) coef(fit) coef(fit, which="homogeneous") confint(fit) confint(fit, which="homogeneous") as.ppm(fit) as.interact(fit) } \author{Adrian Baddeley} \keyword{spatial} \keyword{methods}
/man/methods.locppm.Rd
no_license
baddstats/spatstat.local
R
false
false
2,983
rd
\name{methods.locppm} \alias{methods.locppm} \alias{as.interact.locppm} \alias{as.ppm.locppm} \alias{coef.locppm} \alias{confint.locppm} \alias{is.poisson.locppm} \alias{print.locppm} \title{Methods for Local Gibbs Models} \description{ Methods for various generic functions, for the class \code{"locppm"} of locally fitted Gibbs point process models. } \usage{ \method{as.interact}{locppm}(object) \method{as.ppm}{locppm}(object) \method{coef}{locppm}(object, \dots, which = c("local", "homogeneous")) \method{confint}{locppm}(object, parm, level = 0.95, \dots, which = c("local", "homogeneous")) \method{is.poisson}{locppm}(x) \method{print}{locppm}(x, \dots) } \arguments{ \item{object,x}{ A locally-fitted Gibbs point process model (object of class \code{"locppm"}). } \item{\dots}{ Additional arguments passed to the default method (for \code{confint.locppm}) or ignored (by \code{coef.locppm}). } \item{which}{ Character string determining whether to perform calculations for the local Gibbs model (\code{which="local"}, the default) or the corresponding homogeneous Gibbs model (\code{which="homogeneous"}). } \item{parm}{ The parameter or parameters for which a confidence interval is desired. A character string or character vector matching the names of \code{coef(object)}, or an index or index vector that can be applied to \code{coef(object)}. } \item{level}{ Confidence level: a number between 0 and 1. } } \value{ \code{as.interact} returns an interaction structure (object of class \code{"interact"}). \code{as.ppm} returns a fitted Gibbs model (object of class \code{"ppm"}). \code{coef} and \code{confint} return a numeric vector if \code{which="homogeneous"} and an object of class \code{"ssf"} if \code{which="local"}. \code{is.poisson} returns a logical value. \code{print} returns \code{NULL}. } \details{ Objects of class \code{"locppm"} represent locally fitted Gibbs point process models. The functions documented here provided methods for this class, for the generic functions \code{\link[spatstat.model]{as.interact}}, \code{\link[spatstat.model]{as.ppm}}, \code{\link[stats]{coef}}, \code{\link[stats]{confint}}, \code{\link[spatstat.random:is.stationary]{is.poisson}} and \code{\link{print}}. For the \code{coef} and \code{confint} methods, the calculations can be performed either on the locally fitted model or on its homogeneous equivalent, by changing the argument \code{which}. } \references{ \localpaper \baddrubaturnbook } \seealso{ \code{\link{locppm}} } \examples{ fit <- locppm(swedishpines, ~1, sigma=9, nd=20, vcalc="full", locations="coarse") fit is.poisson(fit) coef(fit) coef(fit, which="homogeneous") confint(fit) confint(fit, which="homogeneous") as.ppm(fit) as.interact(fit) } \author{Adrian Baddeley} \keyword{spatial} \keyword{methods}
fit.cv.BTLLasso <- function(response, design, penalty, q, m, folds = 10, lambda, control = ctrl.BTLLasso(), cores = folds, trace = TRUE, trace.cv = TRUE, cv.crit) { k <- q + 1 n.design <- nrow(design)/q if (trace.cv) { cat("Full model", "\n") } m.all <- fit.BTLLasso(response = response, design = design, penalty = penalty, lambda = lambda, k = k, m = m, control = control, trace = trace) ### cross validation n.cv <- rep(floor(n.design/folds), folds) rest <- n.design%%folds if (rest > 0) { n.cv[1:rest] <- n.cv[1:rest] + 1 } which.fold <- rep(1:folds, n.cv) id.fold <- rep(sample(which.fold, n.design, replace = FALSE), each = q) cv.fun <- function(ff) { if (trace.cv) { cat("CV-fold:", ff, "out of", folds, "\n") } design.train <- design[which(id.fold != ff), , drop = FALSE] design.test <- design[which(id.fold == ff), , drop = FALSE] if(any(apply(design.train,2,var)==0)){ stop("In cross-validation one of the parameters is not estimable, probably because all correponding observations were eliminated from the training data. Please change your seed and/or increase the number of folds!") } response.train <- response[which(id.fold != ff)] response.test <- response[which(id.fold == ff)] fit.fold <- fit.BTLLasso(response.train, design.train, penalty = penalty, lambda = lambda, k = k, m = m, control = control, trace = trace) coef.fold <- fit.fold$coefs if (cv.crit == "Deviance") { y.test <- t(cbind(matrix(response.test, ncol = q, byrow = TRUE), 1)) * (1:k) y.test[y.test == 0] <- k + 1 y.test <- apply(y.test, 2, min) yhelp <- rep(y.test, each = k) yhelp <- as.numeric(yhelp == rep(1:k, length(y.test))) preds <- c() for (u in 1:length(lambda)) { preds <- cbind(preds, predict.BTLLasso(coef.fold[u, ], q, design.test)) } criterion <- -2 * colSums(yhelp * log(preds)) } else { pi.test <- c() for (u in 1:length(lambda)) { eta.test <- design.test %*% coef.fold[u, ] pi.test <- cbind(pi.test, exp(eta.test)/(1 + exp(eta.test))) } criterion <- colSums((pi.test - response.test)^2) } criterion } cat("Cross-Validation...", "\n") if (cores > 1) { cl <- makeCluster(cores, outfile = "") clusterExport(cl, varlist = c("response", "design", "id.fold", "lambda", "control", "trace.cv", "trace", "k", "m", "cv.crit"), envir = sys.frame(sys.nframe())) criterion <- rowSums(parSapply(cl, seq(folds), cv.fun)) stopCluster(cl) } else { criterion <- rowSums(sapply(seq(folds), cv.fun)) } ret.list <- list(coefs = m.all$coefs, criterion = criterion) return(ret.list) }
/fuzzedpackages/BTLLasso/R/fit.cv.BTLLasso.R
no_license
akhikolla/testpackages
R
false
false
3,009
r
fit.cv.BTLLasso <- function(response, design, penalty, q, m, folds = 10, lambda, control = ctrl.BTLLasso(), cores = folds, trace = TRUE, trace.cv = TRUE, cv.crit) { k <- q + 1 n.design <- nrow(design)/q if (trace.cv) { cat("Full model", "\n") } m.all <- fit.BTLLasso(response = response, design = design, penalty = penalty, lambda = lambda, k = k, m = m, control = control, trace = trace) ### cross validation n.cv <- rep(floor(n.design/folds), folds) rest <- n.design%%folds if (rest > 0) { n.cv[1:rest] <- n.cv[1:rest] + 1 } which.fold <- rep(1:folds, n.cv) id.fold <- rep(sample(which.fold, n.design, replace = FALSE), each = q) cv.fun <- function(ff) { if (trace.cv) { cat("CV-fold:", ff, "out of", folds, "\n") } design.train <- design[which(id.fold != ff), , drop = FALSE] design.test <- design[which(id.fold == ff), , drop = FALSE] if(any(apply(design.train,2,var)==0)){ stop("In cross-validation one of the parameters is not estimable, probably because all correponding observations were eliminated from the training data. Please change your seed and/or increase the number of folds!") } response.train <- response[which(id.fold != ff)] response.test <- response[which(id.fold == ff)] fit.fold <- fit.BTLLasso(response.train, design.train, penalty = penalty, lambda = lambda, k = k, m = m, control = control, trace = trace) coef.fold <- fit.fold$coefs if (cv.crit == "Deviance") { y.test <- t(cbind(matrix(response.test, ncol = q, byrow = TRUE), 1)) * (1:k) y.test[y.test == 0] <- k + 1 y.test <- apply(y.test, 2, min) yhelp <- rep(y.test, each = k) yhelp <- as.numeric(yhelp == rep(1:k, length(y.test))) preds <- c() for (u in 1:length(lambda)) { preds <- cbind(preds, predict.BTLLasso(coef.fold[u, ], q, design.test)) } criterion <- -2 * colSums(yhelp * log(preds)) } else { pi.test <- c() for (u in 1:length(lambda)) { eta.test <- design.test %*% coef.fold[u, ] pi.test <- cbind(pi.test, exp(eta.test)/(1 + exp(eta.test))) } criterion <- colSums((pi.test - response.test)^2) } criterion } cat("Cross-Validation...", "\n") if (cores > 1) { cl <- makeCluster(cores, outfile = "") clusterExport(cl, varlist = c("response", "design", "id.fold", "lambda", "control", "trace.cv", "trace", "k", "m", "cv.crit"), envir = sys.frame(sys.nframe())) criterion <- rowSums(parSapply(cl, seq(folds), cv.fun)) stopCluster(cl) } else { criterion <- rowSums(sapply(seq(folds), cv.fun)) } ret.list <- list(coefs = m.all$coefs, criterion = criterion) return(ret.list) }
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/SpatialNetwork.r \docType{class} \name{SpatialNetwork-class} \alias{SpatialNetwork-class} \title{Class \code{"SpatialNetwork"}} \description{ Allow to store spatial networks, especially for rendering them } \section{Slots}{ \describe{ \item{\code{.Data}}{object of class \code{"list"}} \item{\code{map}}{object of class \code{"SpatialPolygons"}} \item{\code{networks}}{object of class \code{"list"}} \item{\code{plot.title}}{object of class \code{"list"}} \item{\code{plot.label}}{object of class \code{"list"}} \item{\code{plot.color}}{object of class \code{"list"}} \item{\code{plot.blackwhite}}{object of class \code{"list"}} \item{\code{plot.symbol}}{object of class \code{"list"}} \item{\code{plot.arrow}}{object of class \code{"list"}} \item{\code{plot.barplot}}{object of class \code{"list"}} \item{\code{plot.legend}}{object of class \code{"list"}} \item{\code{plot.layout}}{object of class \code{"list"}} \item{\code{plot.par}}{object of class \code{"list"}} \item{\code{infos}}{object of class \code{"list"}} \item{\code{meta}}{object of class \code{"list"}} \item{\code{warnings}}{object of class \code{"list"}} \item{\code{names}}{object of class \code{"character"}} \item{\code{row.names}}{object of class \code{"data.frameRowLabels"}} \item{\code{.S3Class}}{object of class \code{"character"}} }} \section{Objects from the Class}{ Objects can be created with the \code{\link{spnet}} function (official class builder). } \examples{ people <- c("John", "Elsa", "Brian", "Kate") position <- c(2,4,6,8) net1.df <- data.frame( 'NODE' = people, 'POSITION' = position ) net1 <- spnet.create( x = net1.df ) net1 net2 <- spnet.create( x = people ) net2 } \seealso{ Other res: \code{\link{graph.map.plot.position}}, \code{\link{graph.map.plot.position,SpatialNetwork-method}}, \code{\link{graph.map.plot.position,SpatialPolygons-method}} } \keyword{classes} \keyword{network} \keyword{sp} \keyword{spatial}
/man/SpatialNetwork.Rd
no_license
cran/spnet
R
false
false
2,033
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/SpatialNetwork.r \docType{class} \name{SpatialNetwork-class} \alias{SpatialNetwork-class} \title{Class \code{"SpatialNetwork"}} \description{ Allow to store spatial networks, especially for rendering them } \section{Slots}{ \describe{ \item{\code{.Data}}{object of class \code{"list"}} \item{\code{map}}{object of class \code{"SpatialPolygons"}} \item{\code{networks}}{object of class \code{"list"}} \item{\code{plot.title}}{object of class \code{"list"}} \item{\code{plot.label}}{object of class \code{"list"}} \item{\code{plot.color}}{object of class \code{"list"}} \item{\code{plot.blackwhite}}{object of class \code{"list"}} \item{\code{plot.symbol}}{object of class \code{"list"}} \item{\code{plot.arrow}}{object of class \code{"list"}} \item{\code{plot.barplot}}{object of class \code{"list"}} \item{\code{plot.legend}}{object of class \code{"list"}} \item{\code{plot.layout}}{object of class \code{"list"}} \item{\code{plot.par}}{object of class \code{"list"}} \item{\code{infos}}{object of class \code{"list"}} \item{\code{meta}}{object of class \code{"list"}} \item{\code{warnings}}{object of class \code{"list"}} \item{\code{names}}{object of class \code{"character"}} \item{\code{row.names}}{object of class \code{"data.frameRowLabels"}} \item{\code{.S3Class}}{object of class \code{"character"}} }} \section{Objects from the Class}{ Objects can be created with the \code{\link{spnet}} function (official class builder). } \examples{ people <- c("John", "Elsa", "Brian", "Kate") position <- c(2,4,6,8) net1.df <- data.frame( 'NODE' = people, 'POSITION' = position ) net1 <- spnet.create( x = net1.df ) net1 net2 <- spnet.create( x = people ) net2 } \seealso{ Other res: \code{\link{graph.map.plot.position}}, \code{\link{graph.map.plot.position,SpatialNetwork-method}}, \code{\link{graph.map.plot.position,SpatialPolygons-method}} } \keyword{classes} \keyword{network} \keyword{sp} \keyword{spatial}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tissot.R \name{ti_ellipse} \alias{ti_ellipse} \title{Ellipse} \usage{ ti_ellipse(center, axes, scale = 1, n = 36, from = 0, to = 2 * pi) } \arguments{ \item{center}{center} \item{axes}{axes} \item{scale}{scale} \item{n}{n} \item{from}{from} \item{to}{to} } \value{ matrix } \description{ Ellipse }
/man/ti_ellipse.Rd
no_license
hypertidy/tissot
R
false
true
381
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tissot.R \name{ti_ellipse} \alias{ti_ellipse} \title{Ellipse} \usage{ ti_ellipse(center, axes, scale = 1, n = 36, from = 0, to = 2 * pi) } \arguments{ \item{center}{center} \item{axes}{axes} \item{scale}{scale} \item{n}{n} \item{from}{from} \item{to}{to} } \value{ matrix } \description{ Ellipse }
testlist <- list(x = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(myTAI:::cpp_geom_mean,testlist) str(result)
/myTAI/inst/testfiles/cpp_geom_mean/AFL_cpp_geom_mean/cpp_geom_mean_valgrind_files/1615840611-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
179
r
testlist <- list(x = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(myTAI:::cpp_geom_mean,testlist) str(result)
calculate_qdProfile <- function(a ,c, g, t){ slider_values <- c(a, c, g, t) # calculate entropy values entropy_0 <- calculate_entropy_0(slider_values) entropy_1 <- calculate_entropy_1(slider_values) entropy_2 <- calculate_entropy_2(slider_values) # convert to q-profile values qd_0 <- entropy_0 + 1 qd_1 <- exp(entropy_1) qd_2 <- 1/(1-entropy_2) return(c(qd_0, qd_1, qd_2)) } # calculate entropy for q = 0 calculate_entropy_0 <- function(slider_values) { entrop_0 <- 0 for (base in slider_values) { if (base != 0) { entrop_0 <- sum(entrop_0, 1) } } entrop_0 <- entrop_0 - 1 return(entrop_0) } # calculate entropy for q = 1 calculate_entropy_1 <- function(slider_values) { entrop_1 <- 0 for (base in slider_values) { proportion <- base/sum(slider_values) if (proportion != 0) { entrop_1 <- entrop_1 + (proportion * log(proportion, base = exp(1))) } } entrop_1 <- -1 * entrop_1 return(entrop_1) } # calculate entropy for q = 2 calculate_entropy_2 <- function(slider_values) { entrop_2 <- 0 for (base in slider_values) { proportion <- base/sum(slider_values) if (proportion != 0) { entrop_2 <- entrop_2 + proportion^2 } } entrop_2 <- 1 - entrop_2 return(entrop_2) }
/R/calculate_qdProfile.R
no_license
z5168021/InformationTheory
R
false
false
1,286
r
calculate_qdProfile <- function(a ,c, g, t){ slider_values <- c(a, c, g, t) # calculate entropy values entropy_0 <- calculate_entropy_0(slider_values) entropy_1 <- calculate_entropy_1(slider_values) entropy_2 <- calculate_entropy_2(slider_values) # convert to q-profile values qd_0 <- entropy_0 + 1 qd_1 <- exp(entropy_1) qd_2 <- 1/(1-entropy_2) return(c(qd_0, qd_1, qd_2)) } # calculate entropy for q = 0 calculate_entropy_0 <- function(slider_values) { entrop_0 <- 0 for (base in slider_values) { if (base != 0) { entrop_0 <- sum(entrop_0, 1) } } entrop_0 <- entrop_0 - 1 return(entrop_0) } # calculate entropy for q = 1 calculate_entropy_1 <- function(slider_values) { entrop_1 <- 0 for (base in slider_values) { proportion <- base/sum(slider_values) if (proportion != 0) { entrop_1 <- entrop_1 + (proportion * log(proportion, base = exp(1))) } } entrop_1 <- -1 * entrop_1 return(entrop_1) } # calculate entropy for q = 2 calculate_entropy_2 <- function(slider_values) { entrop_2 <- 0 for (base in slider_values) { proportion <- base/sum(slider_values) if (proportion != 0) { entrop_2 <- entrop_2 + proportion^2 } } entrop_2 <- 1 - entrop_2 return(entrop_2) }
leapyear <-function(a) { if (a %% 4 == 0 && (a %% 100 != 0 || a %% 400 == 0)) { return("It is a leap year!") } else { return("It is NOT a leap year!") } }
/leapyear.r
no_license
OSiddiqi/R
R
false
false
172
r
leapyear <-function(a) { if (a %% 4 == 0 && (a %% 100 != 0 || a %% 400 == 0)) { return("It is a leap year!") } else { return("It is NOT a leap year!") } }
/refm/api/src/net/pop.rd
no_license
mrkn/rubydoc
R
false
false
13,785
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/GoTheDist.R \name{GoTheDist} \alias{GoTheDist} \title{Calculate the distance matrix between genes based on GO annotation} \usage{ GoTheDist(M, Min = 4, minparents = 8, MinRow = 0, maxthreads = 3, method = "cosine") } \arguments{ \item{M}{matrix of GO terms for each gene} \item{Min}{Cutoff to exclude low frequency GO terms. Minimum number of genes that contain the GO term. default = 4.} \item{minparents}{Cutoff to exclude non-informative GO terms (ex: nucleus vs. Pol-II transcription factor). defualt = 8.} \item{MinRow}{} } \value{ matrix in a cytoscape-friendly format } \description{ . }
/man/GoTheDist.Rd
no_license
saralinker/GONetwork
R
false
true
678
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/GoTheDist.R \name{GoTheDist} \alias{GoTheDist} \title{Calculate the distance matrix between genes based on GO annotation} \usage{ GoTheDist(M, Min = 4, minparents = 8, MinRow = 0, maxthreads = 3, method = "cosine") } \arguments{ \item{M}{matrix of GO terms for each gene} \item{Min}{Cutoff to exclude low frequency GO terms. Minimum number of genes that contain the GO term. default = 4.} \item{minparents}{Cutoff to exclude non-informative GO terms (ex: nucleus vs. Pol-II transcription factor). defualt = 8.} \item{MinRow}{} } \value{ matrix in a cytoscape-friendly format } \description{ . }
/* File: AppleEvents.r Contains: AppleEvent Package Interfaces. Version: Technology: System 7.5 Release: Universal Interfaces 3.1 Copyright: © 1989-1998 by Apple Computer, Inc., all rights reserved Bugs?: Please include the the file and version information (from above) with the problem description. Developers belonging to one of the Apple developer programs can submit bug reports to: devsupport@apple.com */ #ifndef __APPLEEVENTS_R__ #define __APPLEEVENTS_R__ #ifndef __CONDITIONALMACROS_R__ #include "ConditionalMacros.r" #endif #ifndef __AEDATAMODEL_R__ #include "AEDataModel.r" /* data types have moved to AEDataModel.r */ #endif /* Keywords for Apple event parameters */ #define keyDirectObject '----' #define keyErrorNumber 'errn' #define keyErrorString 'errs' #define keyProcessSerialNumber 'psn ' /* Keywords for special handlers */ #define keyPreDispatch 'phac' /* preHandler accessor call */ #define keySelectProc 'selh' /* more selector call */ /* Keyword for recording */ #define keyAERecorderCount 'recr' /* available only in vers 1.0.1 and greater */ /* Keyword for version information */ #define keyAEVersion 'vers' /* available only in vers 1.0.1 and greater */ /* Event Class */ #define kCoreEventClass 'aevt' /* Event ID’s */ #define kAEOpenApplication 'oapp' #define kAEOpenDocuments 'odoc' #define kAEPrintDocuments 'pdoc' #define kAEQuitApplication 'quit' #define kAEAnswer 'ansr' #define kAEApplicationDied 'obit' /* Constants for recording */ #define kAEStartRecording 'reca' /* available only in vers 1.0.1 and greater */ #define kAEStopRecording 'recc' /* available only in vers 1.0.1 and greater */ #define kAENotifyStartRecording 'rec1' /* available only in vers 1.0.1 and greater */ #define kAENotifyStopRecording 'rec0' /* available only in vers 1.0.1 and greater */ #define kAENotifyRecording 'recr' /* available only in vers 1.0.1 and greater */ /* parameter to AESend */ #define kAENeverInteract 0x00000010 /* server should not interact with user */ #define kAECanInteract 0x00000020 /* server may try to interact with user */ #define kAEAlwaysInteract 0x00000030 /* server should always interact with user where appropriate */ #define kAECanSwitchLayer 0x00000040 /* interaction may switch layer */ #define kAEDontRecord 0x00001000 /* don't record this event - available only in vers 1.0.1 and greater */ #define kAEDontExecute 0x00002000 /* don't send the event for recording - available only in vers 1.0.1 and greater */ #define kAEProcessNonReplyEvents 0x00008000 /* allow processing of non-reply events while awaiting synchronous AppleEvent reply */ #define kAENoReply 0x00000001 /* sender doesn't want a reply to event */ #define kAEQueueReply 0x00000002 /* sender wants a reply but won't wait */ #define kAEWaitReply 0x00000003 /* sender wants a reply and will wait */ #define kAEDontReconnect 0x00000080 /* don't reconnect if there is a sessClosedErr from PPCToolbox */ #define kAEWantReceipt 0x00000200 /* (nReturnReceipt) sender wants a receipt of message */ /* Constants for timeout durations */ /* priority param of AESend */ #define kAENormalPriority 0x00000000 /* post message at the end of the event queue */ #define kAEHighPriority 0x00000001 /* post message at the front of the event queue (same as nAttnMsg) */ /*--------------------------aedt • Apple Events Template---------------------------------*/ /* Resource definition used for associating a value with an apple event */ /* This really only useful for general dispatching */ type 'aedt' { wide array { unsigned longint; /* Event Class */ unsigned longint; /* Event ID */ unsigned longint; /* Value */ }; }; #endif /* __APPLEEVENTS_R__ */
/3.1/Universal/Interfaces/RIncludes/AppleEvents.r
no_license
elliotnunn/UniversalInterfaces
R
false
false
4,025
r
/* File: AppleEvents.r Contains: AppleEvent Package Interfaces. Version: Technology: System 7.5 Release: Universal Interfaces 3.1 Copyright: © 1989-1998 by Apple Computer, Inc., all rights reserved Bugs?: Please include the the file and version information (from above) with the problem description. Developers belonging to one of the Apple developer programs can submit bug reports to: devsupport@apple.com */ #ifndef __APPLEEVENTS_R__ #define __APPLEEVENTS_R__ #ifndef __CONDITIONALMACROS_R__ #include "ConditionalMacros.r" #endif #ifndef __AEDATAMODEL_R__ #include "AEDataModel.r" /* data types have moved to AEDataModel.r */ #endif /* Keywords for Apple event parameters */ #define keyDirectObject '----' #define keyErrorNumber 'errn' #define keyErrorString 'errs' #define keyProcessSerialNumber 'psn ' /* Keywords for special handlers */ #define keyPreDispatch 'phac' /* preHandler accessor call */ #define keySelectProc 'selh' /* more selector call */ /* Keyword for recording */ #define keyAERecorderCount 'recr' /* available only in vers 1.0.1 and greater */ /* Keyword for version information */ #define keyAEVersion 'vers' /* available only in vers 1.0.1 and greater */ /* Event Class */ #define kCoreEventClass 'aevt' /* Event ID’s */ #define kAEOpenApplication 'oapp' #define kAEOpenDocuments 'odoc' #define kAEPrintDocuments 'pdoc' #define kAEQuitApplication 'quit' #define kAEAnswer 'ansr' #define kAEApplicationDied 'obit' /* Constants for recording */ #define kAEStartRecording 'reca' /* available only in vers 1.0.1 and greater */ #define kAEStopRecording 'recc' /* available only in vers 1.0.1 and greater */ #define kAENotifyStartRecording 'rec1' /* available only in vers 1.0.1 and greater */ #define kAENotifyStopRecording 'rec0' /* available only in vers 1.0.1 and greater */ #define kAENotifyRecording 'recr' /* available only in vers 1.0.1 and greater */ /* parameter to AESend */ #define kAENeverInteract 0x00000010 /* server should not interact with user */ #define kAECanInteract 0x00000020 /* server may try to interact with user */ #define kAEAlwaysInteract 0x00000030 /* server should always interact with user where appropriate */ #define kAECanSwitchLayer 0x00000040 /* interaction may switch layer */ #define kAEDontRecord 0x00001000 /* don't record this event - available only in vers 1.0.1 and greater */ #define kAEDontExecute 0x00002000 /* don't send the event for recording - available only in vers 1.0.1 and greater */ #define kAEProcessNonReplyEvents 0x00008000 /* allow processing of non-reply events while awaiting synchronous AppleEvent reply */ #define kAENoReply 0x00000001 /* sender doesn't want a reply to event */ #define kAEQueueReply 0x00000002 /* sender wants a reply but won't wait */ #define kAEWaitReply 0x00000003 /* sender wants a reply and will wait */ #define kAEDontReconnect 0x00000080 /* don't reconnect if there is a sessClosedErr from PPCToolbox */ #define kAEWantReceipt 0x00000200 /* (nReturnReceipt) sender wants a receipt of message */ /* Constants for timeout durations */ /* priority param of AESend */ #define kAENormalPriority 0x00000000 /* post message at the end of the event queue */ #define kAEHighPriority 0x00000001 /* post message at the front of the event queue (same as nAttnMsg) */ /*--------------------------aedt • Apple Events Template---------------------------------*/ /* Resource definition used for associating a value with an apple event */ /* This really only useful for general dispatching */ type 'aedt' { wide array { unsigned longint; /* Event Class */ unsigned longint; /* Event ID */ unsigned longint; /* Value */ }; }; #endif /* __APPLEEVENTS_R__ */
#' Wizytowka #' #' Funkcja \code{wizytowka_ADPD} tworzy wizytowkę dla strategii. #' Funkcja automatycznie wczytuje nazwę strategii zgodną ze wzorcem strategia_ #' #' @param strategia Tabela gry SuperFarmer. #' @param N Liczba powtorzeń dla ktorych powinna być wykonana. #' #' @return Plik pdf z wynikami strategii. #' #' @examples #' wizytowka_ADPD(strategia_DKA,10) #' #' @export wizytowka_ADPD <- function( strategia, N = 10000){ wynik_strategii_max <- SuperFarmerDA::badaj_gre(SuperFarmerDA::strategia_maxrabbit , N)[[1]] wynik_strategii_badanej <- SuperFarmerDA::badaj_gre(strategia, N)[[1]] wynik_strategii_yolo <- SuperFarmerDA::badaj_gre(SuperFarmerRCNK::strategia_yolo, N)[[1]] strategia_max <- "maxrabbit" strategia_min <- "yolo" strategia_porownawcza <- deparse(substitute(strategia)) strategia_porownawcza <- substr( strategia_porownawcza, nchar("strategia_") + regexpr("strategia_", strategia_porownawcza)[[1]], nchar(strategia_porownawcza) ) czasy_gier <- data.frame(wynik_strategii_yolo, wynik_strategii_max, wynik_strategii_badanej) colnames(czasy_gier) <- c(strategia_min, strategia_max, strategia_porownawcza) dane_do_wykresow <- reshape2::melt(czasy_gier) ###### WYKRES DYSTRYBUANTY wykres_dystrybuanty <- ggplot2::ggplot(dane_do_wykresow, ggplot2::aes(x = value)) + ggplot2::stat_ecdf(ggplot2::aes(colour = variable)) + ggplot2::theme_bw() + ggplot2::labs( list( colour = "Nazwa strategii", x = "Czas trwania gry (liczba rund)", y = "P(X<t)", title = "Dystrybuanty czasu gry" ) ) + ggplot2::scale_y_continuous(labels = scales::percent, expand = c(0, 0)) + ggplot2::scale_x_continuous(expand = c(0, 0), limits = c(0, 252)) + ggplot2::theme(legend.position = "none") ######### WYKRES SKRZYPCOWY wykres_pudelkowy <- ggplot2::ggplot(dane_do_wykresow, ggplot2::aes(variable, value)) + ggplot2::geom_violin(ggplot2::aes(fill = variable)) + ggplot2::geom_boxplot(width = 0.15) + ggplot2::theme_bw() + ggplot2::scale_y_continuous(expand = c(0, 0), limits = c(0, 252)) + ggplot2::labs( list( fill = "", x = "", y = "Czas trwania gry (liczba rund)", title = "Wykres skrzypcowo-pudełkowy" ) ) + ggplot2::coord_flip() + ggplot2::theme(legend.position = "none") ###### DANE DO TABELI library(dplyr) statystyki_strategii <- dane_do_wykresow %>% dplyr::group_by(variable) %>% dplyr::summarise( min = min(value), '10proc' = round(quantile(value, prob = 0.1)), mediana = median(value), srednia = round(mean(value), 2), '90proc' = round(quantile(value, prob = 0.9)), '95proc' = round(quantile(value, prob = 0.95)), max = max(value) ) colnames(statystyki_strategii)[1] <- "strategia" ###### PRZEDZIAŁY CZASU GRY dyskretne_czasy_gier <- apply(czasy_gier, 1:2, function(x) cut( x, breaks = c(0, 15, 30, 45, 60, 75, 90, 105, 120, Inf), labels = c( " <0,15>", " <16,30>", " <31,45>", " <46-60>", " <61-75>", " <76-90>", " <91-105>", "<106-120>", "ponad120" ) )) d1 <- reshape2::melt(dyskretne_czasy_gier)[-1] d2 <- data.frame(prop.table(table(d1$Var2, d1$value), 1)) d2 <- as.data.frame(d2) ########### WYKRES PRZEDZIAŁY wykres_przedzialy <- ggplot2::ggplot(d2, ggplot2::aes(Var2, Freq, fill = Var1)) + ggplot2::geom_bar(position = "dodge", stat = "identity") + ggplot2::scale_y_continuous(expand = c(0, 0), labels = scales::percent) + ggplot2::theme(legend.position = "none") + ggplot2::labs( list(x = "Czas trwania gry (liczba rund)", y = "Procent w przedziale", title = "Porownanie wynikow strategii dla przedziałow czasu gry") ) + ggplot2::theme_bw() + ggplot2::theme(legend.position = "none") ######## UKŁAD NA STRONIE t1 <- gridExtra::ttheme_default(core = list(bg_params = list( fill = c(rep(c( "#F8766D", "#5CF058", "#619CFF" ))), alpha = rep(c(1, 0.5), each = 3) ))) wykres_tabela <- gridExtra::tableGrob(statystyki_strategii, rows = NULL, theme = t1) autorzy <- paste("Autorzy:", "Anna Dymowska" , "Przemyslaw Dycha", sep = "\n") autorzy <- grid::grid.text( autorzy, gp = grid::gpar(fontsize = 10), x = grid::unit(0.05, "npc"), just = "left" ) tytul_plakatu <- paste0("Strategia_", strategia_porownawcza) tytul <- grid::grid.text( tytul_plakatu, gp = grid::gpar( fontsize = 25, col = 'cornflowerblue', fontface = 'bold' ), vjust = 0.5, hjust = 0.6 ) opis_slowny <- paste0( "Przedstawiamy statystyki strategii ", strategia_porownawcza , " - strategii gry SuperFarmer dla jednego gracza z liczbą powtorzeń ", N, ". Dla porownania tej strategii na wykresach zamieściliśmy strategie: max_rabbit oraz yolo - najszybszą i najwolniejszą strategię znalezioną przez nas wśrod strategii, ktore przygotowali studenci na zajęcia z Programowania w R i wizualizacji danych w roku 2016/17." ) opis_slowny <- RGraphics::splitTextGrob(opis_slowny, gp = grid::gpar(fontsize = 12)) gg2 <- gridExtra::grid.arrange( autorzy, tytul, wykres_pudelkowy, opis_slowny, wykres_tabela, wykres_dystrybuanty, wykres_przedzialy, ncol = 2, layout_matrix = rbind( c(1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), c(1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), c(NA, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), c(NA, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), c(NA, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), c(5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), c(5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), c(5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), c(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7), c(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7), c(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7), c(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7), c(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7), c(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7), c(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7), c(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7) ) ) ### zapis pliku nazwa_pliku_pdf <- paste0("ADPD_wizytowka_", strategia_porownawcza, ".pdf") ggplot2::ggsave( nazwa_pliku_pdf, plot = gg2, width = 29.7, height = 21, units = "cm" ) }
/SuperFarmerDA/R/wizytowka_ADPD.R
no_license
ambroziakd/KursRprojekt2
R
false
false
7,191
r
#' Wizytowka #' #' Funkcja \code{wizytowka_ADPD} tworzy wizytowkę dla strategii. #' Funkcja automatycznie wczytuje nazwę strategii zgodną ze wzorcem strategia_ #' #' @param strategia Tabela gry SuperFarmer. #' @param N Liczba powtorzeń dla ktorych powinna być wykonana. #' #' @return Plik pdf z wynikami strategii. #' #' @examples #' wizytowka_ADPD(strategia_DKA,10) #' #' @export wizytowka_ADPD <- function( strategia, N = 10000){ wynik_strategii_max <- SuperFarmerDA::badaj_gre(SuperFarmerDA::strategia_maxrabbit , N)[[1]] wynik_strategii_badanej <- SuperFarmerDA::badaj_gre(strategia, N)[[1]] wynik_strategii_yolo <- SuperFarmerDA::badaj_gre(SuperFarmerRCNK::strategia_yolo, N)[[1]] strategia_max <- "maxrabbit" strategia_min <- "yolo" strategia_porownawcza <- deparse(substitute(strategia)) strategia_porownawcza <- substr( strategia_porownawcza, nchar("strategia_") + regexpr("strategia_", strategia_porownawcza)[[1]], nchar(strategia_porownawcza) ) czasy_gier <- data.frame(wynik_strategii_yolo, wynik_strategii_max, wynik_strategii_badanej) colnames(czasy_gier) <- c(strategia_min, strategia_max, strategia_porownawcza) dane_do_wykresow <- reshape2::melt(czasy_gier) ###### WYKRES DYSTRYBUANTY wykres_dystrybuanty <- ggplot2::ggplot(dane_do_wykresow, ggplot2::aes(x = value)) + ggplot2::stat_ecdf(ggplot2::aes(colour = variable)) + ggplot2::theme_bw() + ggplot2::labs( list( colour = "Nazwa strategii", x = "Czas trwania gry (liczba rund)", y = "P(X<t)", title = "Dystrybuanty czasu gry" ) ) + ggplot2::scale_y_continuous(labels = scales::percent, expand = c(0, 0)) + ggplot2::scale_x_continuous(expand = c(0, 0), limits = c(0, 252)) + ggplot2::theme(legend.position = "none") ######### WYKRES SKRZYPCOWY wykres_pudelkowy <- ggplot2::ggplot(dane_do_wykresow, ggplot2::aes(variable, value)) + ggplot2::geom_violin(ggplot2::aes(fill = variable)) + ggplot2::geom_boxplot(width = 0.15) + ggplot2::theme_bw() + ggplot2::scale_y_continuous(expand = c(0, 0), limits = c(0, 252)) + ggplot2::labs( list( fill = "", x = "", y = "Czas trwania gry (liczba rund)", title = "Wykres skrzypcowo-pudełkowy" ) ) + ggplot2::coord_flip() + ggplot2::theme(legend.position = "none") ###### DANE DO TABELI library(dplyr) statystyki_strategii <- dane_do_wykresow %>% dplyr::group_by(variable) %>% dplyr::summarise( min = min(value), '10proc' = round(quantile(value, prob = 0.1)), mediana = median(value), srednia = round(mean(value), 2), '90proc' = round(quantile(value, prob = 0.9)), '95proc' = round(quantile(value, prob = 0.95)), max = max(value) ) colnames(statystyki_strategii)[1] <- "strategia" ###### PRZEDZIAŁY CZASU GRY dyskretne_czasy_gier <- apply(czasy_gier, 1:2, function(x) cut( x, breaks = c(0, 15, 30, 45, 60, 75, 90, 105, 120, Inf), labels = c( " <0,15>", " <16,30>", " <31,45>", " <46-60>", " <61-75>", " <76-90>", " <91-105>", "<106-120>", "ponad120" ) )) d1 <- reshape2::melt(dyskretne_czasy_gier)[-1] d2 <- data.frame(prop.table(table(d1$Var2, d1$value), 1)) d2 <- as.data.frame(d2) ########### WYKRES PRZEDZIAŁY wykres_przedzialy <- ggplot2::ggplot(d2, ggplot2::aes(Var2, Freq, fill = Var1)) + ggplot2::geom_bar(position = "dodge", stat = "identity") + ggplot2::scale_y_continuous(expand = c(0, 0), labels = scales::percent) + ggplot2::theme(legend.position = "none") + ggplot2::labs( list(x = "Czas trwania gry (liczba rund)", y = "Procent w przedziale", title = "Porownanie wynikow strategii dla przedziałow czasu gry") ) + ggplot2::theme_bw() + ggplot2::theme(legend.position = "none") ######## UKŁAD NA STRONIE t1 <- gridExtra::ttheme_default(core = list(bg_params = list( fill = c(rep(c( "#F8766D", "#5CF058", "#619CFF" ))), alpha = rep(c(1, 0.5), each = 3) ))) wykres_tabela <- gridExtra::tableGrob(statystyki_strategii, rows = NULL, theme = t1) autorzy <- paste("Autorzy:", "Anna Dymowska" , "Przemyslaw Dycha", sep = "\n") autorzy <- grid::grid.text( autorzy, gp = grid::gpar(fontsize = 10), x = grid::unit(0.05, "npc"), just = "left" ) tytul_plakatu <- paste0("Strategia_", strategia_porownawcza) tytul <- grid::grid.text( tytul_plakatu, gp = grid::gpar( fontsize = 25, col = 'cornflowerblue', fontface = 'bold' ), vjust = 0.5, hjust = 0.6 ) opis_slowny <- paste0( "Przedstawiamy statystyki strategii ", strategia_porownawcza , " - strategii gry SuperFarmer dla jednego gracza z liczbą powtorzeń ", N, ". Dla porownania tej strategii na wykresach zamieściliśmy strategie: max_rabbit oraz yolo - najszybszą i najwolniejszą strategię znalezioną przez nas wśrod strategii, ktore przygotowali studenci na zajęcia z Programowania w R i wizualizacji danych w roku 2016/17." ) opis_slowny <- RGraphics::splitTextGrob(opis_slowny, gp = grid::gpar(fontsize = 12)) gg2 <- gridExtra::grid.arrange( autorzy, tytul, wykres_pudelkowy, opis_slowny, wykres_tabela, wykres_dystrybuanty, wykres_przedzialy, ncol = 2, layout_matrix = rbind( c(1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), c(1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), c(NA, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), c(NA, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), c(NA, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), c(5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), c(5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), c(5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), c(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7), c(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7), c(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7), c(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7), c(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7), c(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7), c(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7), c(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7) ) ) ### zapis pliku nazwa_pliku_pdf <- paste0("ADPD_wizytowka_", strategia_porownawcza, ".pdf") ggplot2::ggsave( nazwa_pliku_pdf, plot = gg2, width = 29.7, height = 21, units = "cm" ) }
#to be ran after reseedBoth.R holdoutSet <- c() set <- c() set.ones <- c() set.zeros <- c() #summary(data[,"V8517"]) set <- eval(parse(text=paste("combined.",pairedname, sep = ""))) set.ones <- eval(parse(text=paste("combined.ones.",pairedname, sep = ""))) set.zeros <- eval(parse(text=paste("combined.zeros.",pairedname, sep = ""))) #pairedname_List #static (outside of monte carlo/resampling, if desire resampling, simply move above set.seed(base)) #could use d_combined and do conversion of -9 and -8 to na # noticed V7562 and V8531 result in no records together when dropping na's... go figure #https://stackoverflow.com/questions/5510966/create-a-variable-name-with-paste-in-r #nrow(set.ones) #have to use nrow as upper limit at reseedtest because I'm starting here and derivin an upper limit. and reseedtrain.r will use this as an index (invert) holdoutSet <- c() holdoutSet.ones<- c() holdoutSet.zeros <- c() #holdoutSet <- sample(nrow(set), round(holdoutSetSize*nrow(set))) #this resamples #I see now why class balancing is limited to just instances of 1. #Because it most likely is the factor of interest and generally 0's dominate non presence of factor #I guess we're forcing our data into an equal 50/50 split to find out characteristics that determine that binary equation #so the concern of undersampling 0's is kind of irrelevant. Although the elements of the 0 population would be undersampled. #leading to a bias in itself. #stratified resampling #this ends up bootstrapping the amounts found in the population vs focusingg on a clean set of 1's then grabbing 0's avgCountHalved <- c() avgCountHalved <- mean(nrow(set.ones),nrow(set.zeros))/2 reloopFactor <- c() minFactor <- c() minFactor <- min(round(holdoutSetSize*nrow(set.ones)),round(holdoutSetSize*nrow(set.zeros))) reloopFactor <- min(round(holdoutSetSize*nrow(set.ones)),round(holdoutSetSize*nrow(set.zeros)))/round(holdoutSetSize*avgCountHalved) remainder <- c() remainder = reloopFactor-floor(reloopFactor) if(floor(reloopFactor)>0) { for (loops in 1:floor(reloopFactor)) { #generates index and samples in place. I have to do this, else repeat index's get stored as .1's and .2' respectively holdoutSet.ones <- rbind(holdoutSet.ones,set[sample(c(as.numeric(rownames((set.ones)))), minFactor),]) # 1's for training holdoutSet.zeros <- rbind(holdoutSet.zeros,set[sample(c(as.numeric(rownames((set.zeros)))), minFactor),]) # 0's for training. Pick as many 0's as 1's #ones.index <- rbind(ones.index,input_ones[sample(c(rownames(input_ones)), minFactor),]) # 1's for training #zeros.index <- rbind(zeros.index,input_zeros[sample(c(rownames(input_zeros)), minFactor),]) # 0's for training. Pick as many 0's as 1's } } #ones.index <- rbind(ones.index,input_ones[sample(c(rownames(input_ones)), minFactor*remainder),]) # 1's for training #zeros.index <- rbind(zeros.index,input_zeros[sample(c(rownames(input_zeros)), minFactor*remainder),]) # 0's for training. Pick as many 0's as 1's holdoutSet.ones <- rbind(holdoutSet.ones,set[sample(c(as.numeric(rownames((set.ones)))), minFactor*remainder),]) # 1's for training holdoutSet.zeros <- rbind(holdoutSet.zeros,set[sample(c(as.numeric(rownames((set.zeros)))), minFactor*remainder),]) # 0's for training. Pick as many 0's as 1's #nrow(set.ones) #max(as.numeric(rownames(set.ones))) #max(as.numeric(rownames(holdoutSet.ones))) #focus on a clean set of ones, then zeros #holdoutSet.ones <- sample(1:nrow(set.ones), round(holdoutSetSize*nrow(set.ones))) # 1's for training #holdoutSet.zeros <- sample(1:nrow(set.zeros), round(holdoutSetSize*nrow(set.ones))) # 0's for training. Pick as many 0's as 1's #holdoutSet <- sample(precisionSize, round(holdoutSetSize*precisionSize)) #combined.holdoutSet <- c() #combined.holdoutSet <- set[holdoutSet,] #holdoutSet #assign(paste("combined.holdoutSet.",pairedname, sep = ""), set[holdoutSet,]) #assign(paste("combined.holdoutSet.",pairedname, sep = ""), holdoutSet) assign(paste("combined.holdoutSet.ones.",pairedname, sep = ""), holdoutSet.ones) assign(paste("combined.holdoutSet.zeros.",pairedname, sep = ""), holdoutSet.zeros) #nrow(set) #length(holdoutSet) #don't call resampleMC.R from within this, you don't have the nameList yet, this is only index
/reseedTest.R
no_license
thistleknot/Capstone-577
R
false
false
4,265
r
#to be ran after reseedBoth.R holdoutSet <- c() set <- c() set.ones <- c() set.zeros <- c() #summary(data[,"V8517"]) set <- eval(parse(text=paste("combined.",pairedname, sep = ""))) set.ones <- eval(parse(text=paste("combined.ones.",pairedname, sep = ""))) set.zeros <- eval(parse(text=paste("combined.zeros.",pairedname, sep = ""))) #pairedname_List #static (outside of monte carlo/resampling, if desire resampling, simply move above set.seed(base)) #could use d_combined and do conversion of -9 and -8 to na # noticed V7562 and V8531 result in no records together when dropping na's... go figure #https://stackoverflow.com/questions/5510966/create-a-variable-name-with-paste-in-r #nrow(set.ones) #have to use nrow as upper limit at reseedtest because I'm starting here and derivin an upper limit. and reseedtrain.r will use this as an index (invert) holdoutSet <- c() holdoutSet.ones<- c() holdoutSet.zeros <- c() #holdoutSet <- sample(nrow(set), round(holdoutSetSize*nrow(set))) #this resamples #I see now why class balancing is limited to just instances of 1. #Because it most likely is the factor of interest and generally 0's dominate non presence of factor #I guess we're forcing our data into an equal 50/50 split to find out characteristics that determine that binary equation #so the concern of undersampling 0's is kind of irrelevant. Although the elements of the 0 population would be undersampled. #leading to a bias in itself. #stratified resampling #this ends up bootstrapping the amounts found in the population vs focusingg on a clean set of 1's then grabbing 0's avgCountHalved <- c() avgCountHalved <- mean(nrow(set.ones),nrow(set.zeros))/2 reloopFactor <- c() minFactor <- c() minFactor <- min(round(holdoutSetSize*nrow(set.ones)),round(holdoutSetSize*nrow(set.zeros))) reloopFactor <- min(round(holdoutSetSize*nrow(set.ones)),round(holdoutSetSize*nrow(set.zeros)))/round(holdoutSetSize*avgCountHalved) remainder <- c() remainder = reloopFactor-floor(reloopFactor) if(floor(reloopFactor)>0) { for (loops in 1:floor(reloopFactor)) { #generates index and samples in place. I have to do this, else repeat index's get stored as .1's and .2' respectively holdoutSet.ones <- rbind(holdoutSet.ones,set[sample(c(as.numeric(rownames((set.ones)))), minFactor),]) # 1's for training holdoutSet.zeros <- rbind(holdoutSet.zeros,set[sample(c(as.numeric(rownames((set.zeros)))), minFactor),]) # 0's for training. Pick as many 0's as 1's #ones.index <- rbind(ones.index,input_ones[sample(c(rownames(input_ones)), minFactor),]) # 1's for training #zeros.index <- rbind(zeros.index,input_zeros[sample(c(rownames(input_zeros)), minFactor),]) # 0's for training. Pick as many 0's as 1's } } #ones.index <- rbind(ones.index,input_ones[sample(c(rownames(input_ones)), minFactor*remainder),]) # 1's for training #zeros.index <- rbind(zeros.index,input_zeros[sample(c(rownames(input_zeros)), minFactor*remainder),]) # 0's for training. Pick as many 0's as 1's holdoutSet.ones <- rbind(holdoutSet.ones,set[sample(c(as.numeric(rownames((set.ones)))), minFactor*remainder),]) # 1's for training holdoutSet.zeros <- rbind(holdoutSet.zeros,set[sample(c(as.numeric(rownames((set.zeros)))), minFactor*remainder),]) # 0's for training. Pick as many 0's as 1's #nrow(set.ones) #max(as.numeric(rownames(set.ones))) #max(as.numeric(rownames(holdoutSet.ones))) #focus on a clean set of ones, then zeros #holdoutSet.ones <- sample(1:nrow(set.ones), round(holdoutSetSize*nrow(set.ones))) # 1's for training #holdoutSet.zeros <- sample(1:nrow(set.zeros), round(holdoutSetSize*nrow(set.ones))) # 0's for training. Pick as many 0's as 1's #holdoutSet <- sample(precisionSize, round(holdoutSetSize*precisionSize)) #combined.holdoutSet <- c() #combined.holdoutSet <- set[holdoutSet,] #holdoutSet #assign(paste("combined.holdoutSet.",pairedname, sep = ""), set[holdoutSet,]) #assign(paste("combined.holdoutSet.",pairedname, sep = ""), holdoutSet) assign(paste("combined.holdoutSet.ones.",pairedname, sep = ""), holdoutSet.ones) assign(paste("combined.holdoutSet.zeros.",pairedname, sep = ""), holdoutSet.zeros) #nrow(set) #length(holdoutSet) #don't call resampleMC.R from within this, you don't have the nameList yet, this is only index
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. #' @include R6.R #' @title class arrow::ChunkedArray #' #' @usage NULL #' @format NULL #' @docType class #' #' @section Methods: #' #' TODO #' #' @rdname arrow__ChunkedArray #' @name arrow__ChunkedArray `arrow::ChunkedArray` <- R6Class("arrow::ChunkedArray", inherit = `arrow::Object`, public = list( length = function() ChunkedArray__length(self), chunk = function(i) shared_ptr(`arrow::Array`, ChunkedArray__chunk(self, i)), as_vector = function() ChunkedArray__as_vector(self), Slice = function(offset, length = NULL){ if (is.null(length)) { shared_ptr(`arrow::ChunkedArray`, ChunkArray__Slice1(self, offset)) } else { shared_ptr(`arrow::ChunkedArray`, ChunkArray__Slice2(self, offset, length)) } }, cast = function(target_type, safe = TRUE, options = cast_options(safe)) { assert_that(inherits(target_type, "arrow::DataType")) assert_that(inherits(options, "arrow::compute::CastOptions")) shared_ptr(`arrow::ChunkedArray`, ChunkedArray__cast(self, target_type, options)) } ), active = list( null_count = function() ChunkedArray__null_count(self), num_chunks = function() ChunkedArray__num_chunks(self), chunks = function() map(ChunkedArray__chunks(self), shared_ptr, class = `arrow::Array`), type = function() `arrow::DataType`$dispatch(ChunkedArray__type(self)) ) ) #' create an [arrow::ChunkedArray][arrow__ChunkedArray] from various R vectors #' #' @param \dots Vectors to coerce #' @param type currently ignored #' #' @importFrom rlang list2 %||% #' @export chunked_array <- function(..., type = NULL){ shared_ptr(`arrow::ChunkedArray`, ChunkedArray__from_list(list2(...), type)) }
/r/R/ChunkedArray.R
permissive
SeppPenner/arrow
R
false
false
2,484
r
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. #' @include R6.R #' @title class arrow::ChunkedArray #' #' @usage NULL #' @format NULL #' @docType class #' #' @section Methods: #' #' TODO #' #' @rdname arrow__ChunkedArray #' @name arrow__ChunkedArray `arrow::ChunkedArray` <- R6Class("arrow::ChunkedArray", inherit = `arrow::Object`, public = list( length = function() ChunkedArray__length(self), chunk = function(i) shared_ptr(`arrow::Array`, ChunkedArray__chunk(self, i)), as_vector = function() ChunkedArray__as_vector(self), Slice = function(offset, length = NULL){ if (is.null(length)) { shared_ptr(`arrow::ChunkedArray`, ChunkArray__Slice1(self, offset)) } else { shared_ptr(`arrow::ChunkedArray`, ChunkArray__Slice2(self, offset, length)) } }, cast = function(target_type, safe = TRUE, options = cast_options(safe)) { assert_that(inherits(target_type, "arrow::DataType")) assert_that(inherits(options, "arrow::compute::CastOptions")) shared_ptr(`arrow::ChunkedArray`, ChunkedArray__cast(self, target_type, options)) } ), active = list( null_count = function() ChunkedArray__null_count(self), num_chunks = function() ChunkedArray__num_chunks(self), chunks = function() map(ChunkedArray__chunks(self), shared_ptr, class = `arrow::Array`), type = function() `arrow::DataType`$dispatch(ChunkedArray__type(self)) ) ) #' create an [arrow::ChunkedArray][arrow__ChunkedArray] from various R vectors #' #' @param \dots Vectors to coerce #' @param type currently ignored #' #' @importFrom rlang list2 %||% #' @export chunked_array <- function(..., type = NULL){ shared_ptr(`arrow::ChunkedArray`, ChunkedArray__from_list(list2(...), type)) }
library(msde) ### Name: sde.drift ### Title: SDE drift function. ### Aliases: sde.drift ### ** Examples # load Heston's model hmod <- sde.examples("hest") # single input x0 <- c(X = log(1000), Z = 0.1) theta <- c(alpha = 0.1, gamma = 1, beta = 0.8, sigma = 0.6, rho = -0.8) sde.drift(model = hmod, x = x0, theta = theta) # multiple inputs nreps <- 10 Theta <- apply(t(replicate(nreps,theta)),2,jitter) X0 <- apply(t(replicate(nreps,x0)),2,jitter) sde.drift(model = hmod, x = X0, theta = Theta)
/data/genthat_extracted_code/msde/examples/sde.drift.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
503
r
library(msde) ### Name: sde.drift ### Title: SDE drift function. ### Aliases: sde.drift ### ** Examples # load Heston's model hmod <- sde.examples("hest") # single input x0 <- c(X = log(1000), Z = 0.1) theta <- c(alpha = 0.1, gamma = 1, beta = 0.8, sigma = 0.6, rho = -0.8) sde.drift(model = hmod, x = x0, theta = theta) # multiple inputs nreps <- 10 Theta <- apply(t(replicate(nreps,theta)),2,jitter) X0 <- apply(t(replicate(nreps,x0)),2,jitter) sde.drift(model = hmod, x = X0, theta = Theta)
# clear the decks rm(list = ls()) source("~/smr_report/globals.r") pth <- path(str_path_repo, "svg", "reconciliation_outlines_b.svg") pth_fixed <- path(str_path_repo, "svg", "reconciliation_outlines_b_fixed.svg") txt <- readLines(pth) vct_0 <- gsub("st0", "sz0", txt) vct_1 <- gsub("st1", "sz1", vct_0) vct_2 <- gsub("st2", "sz2", vct_1) vct_3 <- gsub("st3", "sz3", vct_2) vct_4 <- gsub("st4", "sz4", vct_3) vct_5 <- gsub("st5", "sz5", vct_4) vct_6 <- gsub("st6", "sz6", vct_5) writeLines(vct_6, pth_fixed) pth <- path(str_path_repo, "svg", "header_mockup_v006.svg") pth_fixed <- path(str_path_repo, "svg", "header_mockup_v006_fixed.svg") setwd("/Users/mark/Documents/TEMP_not_on_cloud/australia_postcode_map/codekit_builder") txt <- readLines("header_mockup_v006.svg") vct_0 <- gsub("st0", "sx0", txt) vct_1 <- gsub("st1", "sx1", vct_0) vct_2 <- gsub("st2", "sx2", vct_1) vct_3 <- gsub("st3", "sx3", vct_2) vct_4 <- gsub("st4", "sx4", vct_3) vct_5 <- gsub("st5", "sx5", vct_4) vct_6 <- gsub("st6", "sx6", vct_5) vct_7 <- gsub("st7", "sx7", vct_6) vct_8 <- gsub("st8", "sx8", vct_7) vct_9 <- gsub("st9", "sx9", vct_8) writeLines(vct_9, "header_mockup_v006_fixed.svg") setwd("/Users/mark/Documents/TEMP_not_on_cloud/australia_postcode_map/codekit_workings") pth <- path(str_path_repo, "svg", "footer_ul_fin.svg") pth_fixed <- path(str_path_repo, "svg", "footer_ul_fin_fixed.svg") txt <- readLines("footer_ul_fin.svg") vct_0 <- gsub("st0", "zx0", txt) vct_1 <- gsub("st1", "zx1", vct_0) vct_2 <- gsub("st2", "zx2", vct_1) vct_3 <- gsub("st3", "zx3", vct_2) vct_4 <- gsub("st4", "zx4", vct_3) vct_5 <- gsub("st5", "zx5", vct_4) vct_6 <- gsub("st6", "zx6", vct_5) vct_7 <- gsub("st7", "zx7", vct_6) vct_8 <- gsub("st8", "zx8", vct_7) vct_9 <- gsub("st9", "zx9", vct_8) vct_10 <- gsub("st10", "zx10", vct_8) writeLines(vct_9, "footer_ul_fin_fixed.svg")
/codekit_workings/process_svg.r
no_license
thefactmachine/australia_postcodes
R
false
false
1,872
r
# clear the decks rm(list = ls()) source("~/smr_report/globals.r") pth <- path(str_path_repo, "svg", "reconciliation_outlines_b.svg") pth_fixed <- path(str_path_repo, "svg", "reconciliation_outlines_b_fixed.svg") txt <- readLines(pth) vct_0 <- gsub("st0", "sz0", txt) vct_1 <- gsub("st1", "sz1", vct_0) vct_2 <- gsub("st2", "sz2", vct_1) vct_3 <- gsub("st3", "sz3", vct_2) vct_4 <- gsub("st4", "sz4", vct_3) vct_5 <- gsub("st5", "sz5", vct_4) vct_6 <- gsub("st6", "sz6", vct_5) writeLines(vct_6, pth_fixed) pth <- path(str_path_repo, "svg", "header_mockup_v006.svg") pth_fixed <- path(str_path_repo, "svg", "header_mockup_v006_fixed.svg") setwd("/Users/mark/Documents/TEMP_not_on_cloud/australia_postcode_map/codekit_builder") txt <- readLines("header_mockup_v006.svg") vct_0 <- gsub("st0", "sx0", txt) vct_1 <- gsub("st1", "sx1", vct_0) vct_2 <- gsub("st2", "sx2", vct_1) vct_3 <- gsub("st3", "sx3", vct_2) vct_4 <- gsub("st4", "sx4", vct_3) vct_5 <- gsub("st5", "sx5", vct_4) vct_6 <- gsub("st6", "sx6", vct_5) vct_7 <- gsub("st7", "sx7", vct_6) vct_8 <- gsub("st8", "sx8", vct_7) vct_9 <- gsub("st9", "sx9", vct_8) writeLines(vct_9, "header_mockup_v006_fixed.svg") setwd("/Users/mark/Documents/TEMP_not_on_cloud/australia_postcode_map/codekit_workings") pth <- path(str_path_repo, "svg", "footer_ul_fin.svg") pth_fixed <- path(str_path_repo, "svg", "footer_ul_fin_fixed.svg") txt <- readLines("footer_ul_fin.svg") vct_0 <- gsub("st0", "zx0", txt) vct_1 <- gsub("st1", "zx1", vct_0) vct_2 <- gsub("st2", "zx2", vct_1) vct_3 <- gsub("st3", "zx3", vct_2) vct_4 <- gsub("st4", "zx4", vct_3) vct_5 <- gsub("st5", "zx5", vct_4) vct_6 <- gsub("st6", "zx6", vct_5) vct_7 <- gsub("st7", "zx7", vct_6) vct_8 <- gsub("st8", "zx8", vct_7) vct_9 <- gsub("st9", "zx9", vct_8) vct_10 <- gsub("st10", "zx10", vct_8) writeLines(vct_9, "footer_ul_fin_fixed.svg")
#' Show residuals from groupwise means #' #' Makes a specific format of plot that shows groupwise means and the residuals from those means. #' #' @param data A data frame containing the data to be used #' @param formula A formula `y ~ gp`` specifying the grouping variable (`gp`) and the response variable (`y`) Formulas with a second grouping variable (e.g. `y ~ gp1 + gp2`) are also accepted. #' @param alpha Numeric 0 to 1. Level of opaqueness. #' @param seed Optional integer seed for the random numbers used in jittering. Use if you want reproducibility. #' @export draw_groupwise_mod <- function(data = NULL, formula, alpha = 0.2, seed = NULL, ...) { mod <- lm(formula, data = data) Dat <- df_from_formula(formula, data = data) response_var <- (Dat %@% "left_vars")[1] explanatory_var <- (Dat %@% "right_vars")[1] if (is.na(explanatory_var)) { # there was a formula like y ~ 1 explanatory_var <- "all_in_same_group" Dat$all_in_same_group <- "" } if (! is.numeric(Dat[[response_var]])) stop("Response variable must be numeric.") # if (is.numeric(Dat[[explanatory_var]])) stop("Explanatory variable must be categorical.") if ( ! is.factor(Dat[[explanatory_var]])) Dat[[explanatory_var]] <- as.factor(Dat[[explanatory_var]]) if (!is.null(seed) && is_integer(seed)) set.seed(seed) for_jitter <- runif(nrow(Dat), max = 0.3, min = -0.3) Dat[["horiz_position"]] <- as.numeric(Dat[[explanatory_var]]) + for_jitter # draw the model line extending a bit beyond the jitter. Dat[["horiz_position_2"]] <- as.numeric(Dat[[explanatory_var]]) + 1.2 * 0.3 * sign(for_jitter) Dat$model_output <- mosaicModel::mod_eval(mod, data = data, append = FALSE)$model_output Dat$residuals <- Dat[[response_var]] - Dat$model_output P <- gf_blank(as.formula(fill_template("{{y}} ~ {{x}}", x = explanatory_var, y = response_var)), data = Dat, show.legend = FALSE) # If there's a color dots <- list(...) if (length(Dat %@% "right_vars") > 1) { color_var <- (Dat %@% "right_vars")[2] Dat[["color_variable"]] <- paste(Dat[[color_var]], Dat[[explanatory_var]]) for_color <- as.formula(fill_template("~ {{x}}", x = color_var)) show.legend = TRUE } else { for_color <- as.formula(fill_template("~ {{x}}", x = explanatory_var)) Dat[["color_variable"]] <- as.factor(Dat[[explanatory_var]]) show.legend = FALSE } # Can we break up the model by the 2nd variable. Use just the formula. P2 <- P %>% gf_point(as.formula(fill_template("{{y}} ~ {{x}}", x = "horiz_position", y = response_var)), data = Dat, color = for_color, alpha = alpha, show.legend = show.legend) %>% gf_line(as.formula(fill_template("{{y}} ~ {{x}}", x = "horiz_position_2", y = "model_output")), data = Dat, color = for_color, group = ~ color_variable, alpha = 1, show.legend = show.legend) P2 <- P2 %>% gf_segment(as.formula(fill_template("{{y1}} + {{y2}} ~ {{x}} + {{x}}", y1 = response_var, y2 = "model_output", x = "horiz_position")), data = Dat, color = for_color, alpha = alpha, show.legend = FALSE) list(plot = P2, matrix = Dat, model = mod) # return the data frame with all the information. For brushing add ons }
/R/draw_groupwise_mod.R
no_license
dtkaplan/mdsint
R
false
false
3,281
r
#' Show residuals from groupwise means #' #' Makes a specific format of plot that shows groupwise means and the residuals from those means. #' #' @param data A data frame containing the data to be used #' @param formula A formula `y ~ gp`` specifying the grouping variable (`gp`) and the response variable (`y`) Formulas with a second grouping variable (e.g. `y ~ gp1 + gp2`) are also accepted. #' @param alpha Numeric 0 to 1. Level of opaqueness. #' @param seed Optional integer seed for the random numbers used in jittering. Use if you want reproducibility. #' @export draw_groupwise_mod <- function(data = NULL, formula, alpha = 0.2, seed = NULL, ...) { mod <- lm(formula, data = data) Dat <- df_from_formula(formula, data = data) response_var <- (Dat %@% "left_vars")[1] explanatory_var <- (Dat %@% "right_vars")[1] if (is.na(explanatory_var)) { # there was a formula like y ~ 1 explanatory_var <- "all_in_same_group" Dat$all_in_same_group <- "" } if (! is.numeric(Dat[[response_var]])) stop("Response variable must be numeric.") # if (is.numeric(Dat[[explanatory_var]])) stop("Explanatory variable must be categorical.") if ( ! is.factor(Dat[[explanatory_var]])) Dat[[explanatory_var]] <- as.factor(Dat[[explanatory_var]]) if (!is.null(seed) && is_integer(seed)) set.seed(seed) for_jitter <- runif(nrow(Dat), max = 0.3, min = -0.3) Dat[["horiz_position"]] <- as.numeric(Dat[[explanatory_var]]) + for_jitter # draw the model line extending a bit beyond the jitter. Dat[["horiz_position_2"]] <- as.numeric(Dat[[explanatory_var]]) + 1.2 * 0.3 * sign(for_jitter) Dat$model_output <- mosaicModel::mod_eval(mod, data = data, append = FALSE)$model_output Dat$residuals <- Dat[[response_var]] - Dat$model_output P <- gf_blank(as.formula(fill_template("{{y}} ~ {{x}}", x = explanatory_var, y = response_var)), data = Dat, show.legend = FALSE) # If there's a color dots <- list(...) if (length(Dat %@% "right_vars") > 1) { color_var <- (Dat %@% "right_vars")[2] Dat[["color_variable"]] <- paste(Dat[[color_var]], Dat[[explanatory_var]]) for_color <- as.formula(fill_template("~ {{x}}", x = color_var)) show.legend = TRUE } else { for_color <- as.formula(fill_template("~ {{x}}", x = explanatory_var)) Dat[["color_variable"]] <- as.factor(Dat[[explanatory_var]]) show.legend = FALSE } # Can we break up the model by the 2nd variable. Use just the formula. P2 <- P %>% gf_point(as.formula(fill_template("{{y}} ~ {{x}}", x = "horiz_position", y = response_var)), data = Dat, color = for_color, alpha = alpha, show.legend = show.legend) %>% gf_line(as.formula(fill_template("{{y}} ~ {{x}}", x = "horiz_position_2", y = "model_output")), data = Dat, color = for_color, group = ~ color_variable, alpha = 1, show.legend = show.legend) P2 <- P2 %>% gf_segment(as.formula(fill_template("{{y1}} + {{y2}} ~ {{x}} + {{x}}", y1 = response_var, y2 = "model_output", x = "horiz_position")), data = Dat, color = for_color, alpha = alpha, show.legend = FALSE) list(plot = P2, matrix = Dat, model = mod) # return the data frame with all the information. For brushing add ons }
#' Download NLA data from USEPA #' #' @description Retrieves NLA flat files. #' #' @import rappdirs #' @importFrom utils download.file #' @export #' #' @param use_rappdirs logical write files to operating system data directories at the location returned by \code{\link[rappdirs]{user_data_dir}}. #' @param year numeric choice of 2007 or 2012. #' @param local_path folder path to raw downloads from `nla_get` #' @inheritParams base::saveRDS #' #' @examples \donttest{ #' nla_get(2012) #' } nla_get <- function(year, use_rappdirs = FALSE, local_path = tempdir(), compress = "xz"){ valid_year(year) if(use_rappdirs & local_path != tempdir()){ stop("Set either use_rappdirs or local_path but not both.") } if(use_rappdirs){ local_path <- nla_path() dir.create(local_path, showWarnings = FALSE) } dir.create(local_path, showWarnings = FALSE) if(year == 2007){ baseurl <- "https://www.epa.gov/sites/production/files/2013-09/" files <- "nla2007_alldata.zip" invisible(lapply(files, function(x) get_if_not_exists(paste0(baseurl, x), file.path(local_path, x)))) unzip(file.path(local_path, files), exdir = local_path) } if(year == 2012){ baseurl <- "https://www.epa.gov/sites/production/files/2016-11/" files <- c( "nla2012_algaltoxins_08192016.csv", "nla2012_atrazine_08192016.csv", "nla2012_bentcond_08232016.csv", "nla2012_wide_benthic_08232016.csv", "nla2012_bentmet.csv", "nla2012_benttaxa_wide_10272015.csv", "nla2012_chla_wide.csv") invisible(lapply(files, function(x) get_if_not_exists(paste0(baseurl, x), file.path(local_path, x)))) baseurl <- "https://www.epa.gov/sites/production/files/2016-12/" files <- c( "nla2012_wide_phab_08232016_0.csv", "nla2012_wide_phabmet_10202016.csv", "nla2012_phytocnt_02122014.csv", "nla2012_phytotaxa_wide_10272015.csv", "nla2012_wide_profile_08232016.csv", "nla2012_secchi_08232016.csv", "nla2012_topsedhg_08192016.csv", "nla2012_wide_siteinfo_08232016.csv", "nla2012_waterchem_wide.csv", "nla2012_zoopcond_08192016.csv", "nla2012_zoopmets_08192016.csv", "nla2012_zooptaxa_wide_10272015.csv") invisible(lapply(files, function(x) get_if_not_exists(paste0(baseurl, x), file.path(local_path, x)))) } invisible(nla_compile(year, use_rappdirs, local_path, compress = compress)) }
/R/get.R
no_license
waternk/nlaR
R
false
false
2,534
r
#' Download NLA data from USEPA #' #' @description Retrieves NLA flat files. #' #' @import rappdirs #' @importFrom utils download.file #' @export #' #' @param use_rappdirs logical write files to operating system data directories at the location returned by \code{\link[rappdirs]{user_data_dir}}. #' @param year numeric choice of 2007 or 2012. #' @param local_path folder path to raw downloads from `nla_get` #' @inheritParams base::saveRDS #' #' @examples \donttest{ #' nla_get(2012) #' } nla_get <- function(year, use_rappdirs = FALSE, local_path = tempdir(), compress = "xz"){ valid_year(year) if(use_rappdirs & local_path != tempdir()){ stop("Set either use_rappdirs or local_path but not both.") } if(use_rappdirs){ local_path <- nla_path() dir.create(local_path, showWarnings = FALSE) } dir.create(local_path, showWarnings = FALSE) if(year == 2007){ baseurl <- "https://www.epa.gov/sites/production/files/2013-09/" files <- "nla2007_alldata.zip" invisible(lapply(files, function(x) get_if_not_exists(paste0(baseurl, x), file.path(local_path, x)))) unzip(file.path(local_path, files), exdir = local_path) } if(year == 2012){ baseurl <- "https://www.epa.gov/sites/production/files/2016-11/" files <- c( "nla2012_algaltoxins_08192016.csv", "nla2012_atrazine_08192016.csv", "nla2012_bentcond_08232016.csv", "nla2012_wide_benthic_08232016.csv", "nla2012_bentmet.csv", "nla2012_benttaxa_wide_10272015.csv", "nla2012_chla_wide.csv") invisible(lapply(files, function(x) get_if_not_exists(paste0(baseurl, x), file.path(local_path, x)))) baseurl <- "https://www.epa.gov/sites/production/files/2016-12/" files <- c( "nla2012_wide_phab_08232016_0.csv", "nla2012_wide_phabmet_10202016.csv", "nla2012_phytocnt_02122014.csv", "nla2012_phytotaxa_wide_10272015.csv", "nla2012_wide_profile_08232016.csv", "nla2012_secchi_08232016.csv", "nla2012_topsedhg_08192016.csv", "nla2012_wide_siteinfo_08232016.csv", "nla2012_waterchem_wide.csv", "nla2012_zoopcond_08192016.csv", "nla2012_zoopmets_08192016.csv", "nla2012_zooptaxa_wide_10272015.csv") invisible(lapply(files, function(x) get_if_not_exists(paste0(baseurl, x), file.path(local_path, x)))) } invisible(nla_compile(year, use_rappdirs, local_path, compress = compress)) }
# 3 Pre-Processing -------------------------------------------------------- # https://topepo.github.io/caret/pre-processing.html # 3.1 Creating Dummy Variables library(earth) data(etitanic) head(model.matrix(survived ~ ., data = etitanic)) # Using dummyVars dummies <- dummyVars(survived ~ ., data = etitanic) head(predict(dummies, newdata = etitanic)) # 3.2 Zero- and Near Zero-Variance Predictors data(mdrr) data.frame(table(mdrrDescr$nR11)) nzv <- nearZeroVar(mdrrDescr, saveMetrics= TRUE) nzv[nzv$nzv,][1:10,] dim(mdrrDescr) nzv <- nearZeroVar(mdrrDescr) filteredDescr <- mdrrDescr[, -nzv] dim(filteredDescr) # 3.3 Identifying Correlated Predictors descrCor <- cor(filteredDescr) highCorr <- sum(abs(descrCor[upper.tri(descrCor)]) > .999) descrCor <- cor(filteredDescr) summary(descrCor[upper.tri(descrCor)]) highlyCorDescr <- findCorrelation(descrCor, cutoff = .75) filteredDescr <- filteredDescr[,-highlyCorDescr] descrCor2 <- cor(filteredDescr) summary(descrCor2[upper.tri(descrCor2)]) # 3.4 Linear Dependencies ltfrDesign <- matrix(0, nrow=6, ncol=6) ltfrDesign[,1] <- c(1, 1, 1, 1, 1, 1) ltfrDesign[,2] <- c(1, 1, 1, 0, 0, 0) ltfrDesign[,3] <- c(0, 0, 0, 1, 1, 1) ltfrDesign[,4] <- c(1, 0, 0, 1, 0, 0) ltfrDesign[,5] <- c(0, 1, 0, 0, 1, 0) ltfrDesign[,6] <- c(0, 0, 1, 0, 0, 1) comboInfo <- findLinearCombos(ltfrDesign) comboInfo ltfrDesign[, -comboInfo$remove] # 3.5 The preProcess Function # 3.6 Centering and Scaling set.seed(96) inTrain <- sample(seq(along = mdrrClass), length(mdrrClass)/2) training <- filteredDescr[inTrain,] test <- filteredDescr[-inTrain,] trainMDRR <- mdrrClass[inTrain] testMDRR <- mdrrClass[-inTrain] preProcValues <- preProcess(training, method = c("center", "scale")) trainTransformed <- predict(preProcValues, training) testTransformed <- predict(preProcValues, test) # 3.7 Imputation # 3.8 Transforming Predictors plotSubset <- data.frame(scale(mdrrDescr[, c("nC", "X4v")])) xyplot(nC ~ X4v, data = plotSubset, groups = mdrrClass, auto.key = list(columns = 2)) # After the spatial sign: transformed <- spatialSign(plotSubset) transformed <- as.data.frame(transformed) xyplot(nC ~ X4v, data = transformed, groups = mdrrClass, auto.key = list(columns = 2)) # Box-Cox transformation preProcValues2 <- preProcess(training, method = "BoxCox") trainBC <- predict(preProcValues2, training) testBC <- predict(preProcValues2, test) preProcValues2 # 3.9 Putting It All Together library(AppliedPredictiveModeling) data(schedulingData) str(schedulingData) pp_hpc <- preProcess(schedulingData[, -8], method = c("center", "scale", "YeoJohnson")) pp_hpc transformed <- predict(pp_hpc, newdata = schedulingData[, -8]) head(transformed) mean(schedulingData$NumPending == 0) pp_no_nzv <- preProcess(schedulingData[, -8], method = c("center", "scale", "YeoJohnson", "nzv")) pp_no_nzv predict(pp_no_nzv, newdata = schedulingData[1:6, -8]) # 3.10 Class Distance Calculations centroids <- classDist(trainBC, trainMDRR) distances <- predict(centroids, testBC) distances <- as.data.frame(distances) head(distances) xyplot(dist.Active ~ dist.Inactive, data = distances, groups = testMDRR, auto.key = list(columns = 2))
/caretBook/Chapter 03 Pre-Processing.R
no_license
PyRPy/ISLR_R
R
false
false
3,392
r
# 3 Pre-Processing -------------------------------------------------------- # https://topepo.github.io/caret/pre-processing.html # 3.1 Creating Dummy Variables library(earth) data(etitanic) head(model.matrix(survived ~ ., data = etitanic)) # Using dummyVars dummies <- dummyVars(survived ~ ., data = etitanic) head(predict(dummies, newdata = etitanic)) # 3.2 Zero- and Near Zero-Variance Predictors data(mdrr) data.frame(table(mdrrDescr$nR11)) nzv <- nearZeroVar(mdrrDescr, saveMetrics= TRUE) nzv[nzv$nzv,][1:10,] dim(mdrrDescr) nzv <- nearZeroVar(mdrrDescr) filteredDescr <- mdrrDescr[, -nzv] dim(filteredDescr) # 3.3 Identifying Correlated Predictors descrCor <- cor(filteredDescr) highCorr <- sum(abs(descrCor[upper.tri(descrCor)]) > .999) descrCor <- cor(filteredDescr) summary(descrCor[upper.tri(descrCor)]) highlyCorDescr <- findCorrelation(descrCor, cutoff = .75) filteredDescr <- filteredDescr[,-highlyCorDescr] descrCor2 <- cor(filteredDescr) summary(descrCor2[upper.tri(descrCor2)]) # 3.4 Linear Dependencies ltfrDesign <- matrix(0, nrow=6, ncol=6) ltfrDesign[,1] <- c(1, 1, 1, 1, 1, 1) ltfrDesign[,2] <- c(1, 1, 1, 0, 0, 0) ltfrDesign[,3] <- c(0, 0, 0, 1, 1, 1) ltfrDesign[,4] <- c(1, 0, 0, 1, 0, 0) ltfrDesign[,5] <- c(0, 1, 0, 0, 1, 0) ltfrDesign[,6] <- c(0, 0, 1, 0, 0, 1) comboInfo <- findLinearCombos(ltfrDesign) comboInfo ltfrDesign[, -comboInfo$remove] # 3.5 The preProcess Function # 3.6 Centering and Scaling set.seed(96) inTrain <- sample(seq(along = mdrrClass), length(mdrrClass)/2) training <- filteredDescr[inTrain,] test <- filteredDescr[-inTrain,] trainMDRR <- mdrrClass[inTrain] testMDRR <- mdrrClass[-inTrain] preProcValues <- preProcess(training, method = c("center", "scale")) trainTransformed <- predict(preProcValues, training) testTransformed <- predict(preProcValues, test) # 3.7 Imputation # 3.8 Transforming Predictors plotSubset <- data.frame(scale(mdrrDescr[, c("nC", "X4v")])) xyplot(nC ~ X4v, data = plotSubset, groups = mdrrClass, auto.key = list(columns = 2)) # After the spatial sign: transformed <- spatialSign(plotSubset) transformed <- as.data.frame(transformed) xyplot(nC ~ X4v, data = transformed, groups = mdrrClass, auto.key = list(columns = 2)) # Box-Cox transformation preProcValues2 <- preProcess(training, method = "BoxCox") trainBC <- predict(preProcValues2, training) testBC <- predict(preProcValues2, test) preProcValues2 # 3.9 Putting It All Together library(AppliedPredictiveModeling) data(schedulingData) str(schedulingData) pp_hpc <- preProcess(schedulingData[, -8], method = c("center", "scale", "YeoJohnson")) pp_hpc transformed <- predict(pp_hpc, newdata = schedulingData[, -8]) head(transformed) mean(schedulingData$NumPending == 0) pp_no_nzv <- preProcess(schedulingData[, -8], method = c("center", "scale", "YeoJohnson", "nzv")) pp_no_nzv predict(pp_no_nzv, newdata = schedulingData[1:6, -8]) # 3.10 Class Distance Calculations centroids <- classDist(trainBC, trainMDRR) distances <- predict(centroids, testBC) distances <- as.data.frame(distances) head(distances) xyplot(dist.Active ~ dist.Inactive, data = distances, groups = testMDRR, auto.key = list(columns = 2))
set.seed(151) n <- 95 X5 <- runif(n, -10, 10) X2 <- sample(c('blue pill', 'red pill'), n, replace=TRUE) X9 <- sample(c(0, 1), n, replace=TRUE) X3 <- sample(c('one fish', 'two fish', 'red fish', 'blue fish'), n, replace=TRUE) X6 <- sample(c('steak', 'chicken', 'mystery meat'), n, replace=TRUE) X8 <- runif(n, -10, 10) red <- ifelse(X2 == 'red pill', 1, 0) X9 <- runif(n, -10, 10) low <- ifelse(X9 < 0, 1, 0) beta0 <- -5 beta1 <- .6 beta2 <- .03 beta3 <- -.01 beta4 <- 3 beta5 <- -1.2 beta6 <- -.06 beta7 <- .02 beta8 <- 1 beta9 <- .1 beta10 <- 0 beta11 <- 0 beta12 <- 0 beta13 <- -.3 beta14 <- 0 beta15 <- 0 beta16 <- -10 beta17 <- -.1 beta18 <- .1 beta19 <- 0 sigma <- 5 sigma <- 2.9 Y <- beta0 + beta1*X5 + beta2*X5^2 + beta3*X5^3 + beta4*red + beta5*X5*red + beta6*X5^2*red + beta7*X5^3*red + rnorm(n, 0, sigma) myData <- data.frame( Y = Y , X1 = rbeta(n,5,2) , X2 = X2 , X3 = X3 , X4 = rf(n, 2, 5) , X5 = X5 , X6 = X6 , X7 = sample(c('right','left'), n, replace=TRUE) , X8 = runif(n, -20, 5) , X9 = X9 , X10 = rbinom(n, 30, .5) ) plot(Y ~ X5, data = myData) # pairs(myData) mylm <- lm(Y ~ X5 + X2 + X5:X2 + I(X5^2) + I(X5^3) + I(X5^2):X2 + I(X5^3):X2, data = myData) summary(mylm) b <- coef(mylm) curve(b[1] +b[2]*x + b[4]*x^2 + b[5]*x^3, add=TRUE, col=palette()[1], lwd=4) curve((b[1] + b[3]) + (b[2] + b[6])*x + (b[4] + b[7])*x^2 + (b[5] + b[8])*x^3, add=TRUE, col=palette()[1], lwd=4) # abline(beta8, beta9) # abline((beta8 + beta12), (beta9 + beta13)) # lm1 <- lm(Y ~ X4, data = myData) # # lm(formula = Y ~ X1, data = myData) # # palette(c("skyblue","orange")) # pairs(cbind(R=lm1$res, fit=lm1$fit, myData), pch=16, cex=1, panel=panel.smooth, col.smooth="skyblue4", col=factor(myData$X1)) write_csv(myData, './../Data/DallinsData.csv')
/SkillsQuizzes/modelSandbox.R
no_license
dallincoons/statistics_notebook
R
false
false
2,045
r
set.seed(151) n <- 95 X5 <- runif(n, -10, 10) X2 <- sample(c('blue pill', 'red pill'), n, replace=TRUE) X9 <- sample(c(0, 1), n, replace=TRUE) X3 <- sample(c('one fish', 'two fish', 'red fish', 'blue fish'), n, replace=TRUE) X6 <- sample(c('steak', 'chicken', 'mystery meat'), n, replace=TRUE) X8 <- runif(n, -10, 10) red <- ifelse(X2 == 'red pill', 1, 0) X9 <- runif(n, -10, 10) low <- ifelse(X9 < 0, 1, 0) beta0 <- -5 beta1 <- .6 beta2 <- .03 beta3 <- -.01 beta4 <- 3 beta5 <- -1.2 beta6 <- -.06 beta7 <- .02 beta8 <- 1 beta9 <- .1 beta10 <- 0 beta11 <- 0 beta12 <- 0 beta13 <- -.3 beta14 <- 0 beta15 <- 0 beta16 <- -10 beta17 <- -.1 beta18 <- .1 beta19 <- 0 sigma <- 5 sigma <- 2.9 Y <- beta0 + beta1*X5 + beta2*X5^2 + beta3*X5^3 + beta4*red + beta5*X5*red + beta6*X5^2*red + beta7*X5^3*red + rnorm(n, 0, sigma) myData <- data.frame( Y = Y , X1 = rbeta(n,5,2) , X2 = X2 , X3 = X3 , X4 = rf(n, 2, 5) , X5 = X5 , X6 = X6 , X7 = sample(c('right','left'), n, replace=TRUE) , X8 = runif(n, -20, 5) , X9 = X9 , X10 = rbinom(n, 30, .5) ) plot(Y ~ X5, data = myData) # pairs(myData) mylm <- lm(Y ~ X5 + X2 + X5:X2 + I(X5^2) + I(X5^3) + I(X5^2):X2 + I(X5^3):X2, data = myData) summary(mylm) b <- coef(mylm) curve(b[1] +b[2]*x + b[4]*x^2 + b[5]*x^3, add=TRUE, col=palette()[1], lwd=4) curve((b[1] + b[3]) + (b[2] + b[6])*x + (b[4] + b[7])*x^2 + (b[5] + b[8])*x^3, add=TRUE, col=palette()[1], lwd=4) # abline(beta8, beta9) # abline((beta8 + beta12), (beta9 + beta13)) # lm1 <- lm(Y ~ X4, data = myData) # # lm(formula = Y ~ X1, data = myData) # # palette(c("skyblue","orange")) # pairs(cbind(R=lm1$res, fit=lm1$fit, myData), pch=16, cex=1, panel=panel.smooth, col.smooth="skyblue4", col=factor(myData$X1)) write_csv(myData, './../Data/DallinsData.csv')
packages <- c("data.table", "reshape2") sapply(packages, require, character.only=TRUE, quietly=TRUE) path <- getwd() url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" download.file(url, file.path(path, "dataFiles.zip")) unzip(zipfile = "dataFiles.zip") activityLabels <- fread(file.path(path, "UCI HAR Dataset/activity_labels.txt") , col.names = c("classLabels", "activityName")) features <- fread(file.path(path, "UCI HAR Dataset/features.txt") , col.names = c("index", "featureNames")) featuresWanted <- grep("(mean|std)\\(\\)", features[, featureNames]) measurements <- features[featuresWanted, featureNames] measurements <- gsub('[()]', '', measurements) train <- fread(file.path(path, "UCI HAR Dataset/train/X_train.txt"))[, featuresWanted, with = TRUE] data.table::setnames(train, colnames(train), measurements) trainActivities <- fread(file.path(path, "UCI HAR Dataset/train/Y_train.txt") , col.names = c("Activity")) trainSubjects <- fread(file.path(path, "UCI HAR Dataset/train/subject_train.txt") , col.names = c("SubjectNum")) train <- cbind(trainSubjects, trainActivities, train) test <- fread(file.path(path, "UCI HAR Dataset/test/X_test.txt"))[, featuresWanted, with = FALSE] data.table::setnames(test, colnames(test), measurements) testActivities <- fread(file.path(path, "UCI HAR Dataset/test/Y_test.txt") , col.names = c("Activity")) testSubjects <- fread(file.path(path, "UCI HAR Dataset/test/subject_test.txt") , col.names = c("SubjectNum")) test <- cbind(testSubjects, testActivities, test) combined <- rbind(train, test) combined[["Activity"]] <- factor(combined[, Activity] , levels = activityLabels[["classLabels"]] , labels = activityLabels[["activityName"]]) combined[["SubjectNum"]] <- as.factor(combined[, SubjectNum]) combined <- reshape2::melt(data = combined, id = c("SubjectNum", "Activity")) combined <- reshape2::dcast(data = combined, SubjectNum + Activity ~ variable, fun.aggregate = mean) data.table::fwrite(x = combined, file = "tidyData.txt", quote = FALSE)
/run_analysis.R
no_license
swapnil110399/getting-and-cleaning-data
R
false
false
2,313
r
packages <- c("data.table", "reshape2") sapply(packages, require, character.only=TRUE, quietly=TRUE) path <- getwd() url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" download.file(url, file.path(path, "dataFiles.zip")) unzip(zipfile = "dataFiles.zip") activityLabels <- fread(file.path(path, "UCI HAR Dataset/activity_labels.txt") , col.names = c("classLabels", "activityName")) features <- fread(file.path(path, "UCI HAR Dataset/features.txt") , col.names = c("index", "featureNames")) featuresWanted <- grep("(mean|std)\\(\\)", features[, featureNames]) measurements <- features[featuresWanted, featureNames] measurements <- gsub('[()]', '', measurements) train <- fread(file.path(path, "UCI HAR Dataset/train/X_train.txt"))[, featuresWanted, with = TRUE] data.table::setnames(train, colnames(train), measurements) trainActivities <- fread(file.path(path, "UCI HAR Dataset/train/Y_train.txt") , col.names = c("Activity")) trainSubjects <- fread(file.path(path, "UCI HAR Dataset/train/subject_train.txt") , col.names = c("SubjectNum")) train <- cbind(trainSubjects, trainActivities, train) test <- fread(file.path(path, "UCI HAR Dataset/test/X_test.txt"))[, featuresWanted, with = FALSE] data.table::setnames(test, colnames(test), measurements) testActivities <- fread(file.path(path, "UCI HAR Dataset/test/Y_test.txt") , col.names = c("Activity")) testSubjects <- fread(file.path(path, "UCI HAR Dataset/test/subject_test.txt") , col.names = c("SubjectNum")) test <- cbind(testSubjects, testActivities, test) combined <- rbind(train, test) combined[["Activity"]] <- factor(combined[, Activity] , levels = activityLabels[["classLabels"]] , labels = activityLabels[["activityName"]]) combined[["SubjectNum"]] <- as.factor(combined[, SubjectNum]) combined <- reshape2::melt(data = combined, id = c("SubjectNum", "Activity")) combined <- reshape2::dcast(data = combined, SubjectNum + Activity ~ variable, fun.aggregate = mean) data.table::fwrite(x = combined, file = "tidyData.txt", quote = FALSE)
# # Explore Binaca Geetmala songs. # The data was extracted from the website: # https://www.keepaliveusa.com/binacageetmala/year/1953 where they have nicely # compiled songs from 1953 to 1993 # library(dplyr) library(tidyr) library(purrr) library(ggplot2) # load songs data dfr_songs = readRDS(file = "data/songs_all.rds") df_songs = dfr_songs df_songs = df_songs %>% mutate(decade = floor(year / 10) * 10) # check number of songs by year df_songs %>% count(year) %>% ggplot() + geom_line(aes(x = year, y = n)) + theme_bw() # keep top n songs from each year df_songs = df_songs %>% filter(rank <= 1) # top music directors df_musicdirs = df_songs %>% mutate(musicdir = map(musicdir, ~paste(.x, collapse = "; "))) df_musicdirs_cnt = df_musicdirs %>% count(musicdir, sort = TRUE) df_musicdirs_dec_cnt = df_musicdirs %>% count(decade, musicdir) %>% arrange(decade, desc(n)) # top singers df_singers = df_songs %>% select(decade, year, rank, song_name, singers) %>% unnest(singers) df_singers_cnt = df_singers %>% count(singers, sort = TRUE) df_singers_dec_cnt = df_singers %>% count(decade, singers) %>% arrange(decade, desc(n)) tmpqc = df_songs %>% filter(grepl("Ila", singers)) # top actors df_actors = df_songs %>% select(year, rank, song_name, actors) %>% unnest(actors) df_actors_cnt = df_actors %>% count(actors, sort = TRUE) df_actors %>% filter(grepl("Jaffrey", actors))
/binaca_geetmala/explore_songs.R
permissive
notesofdabbler/misc_projects
R
false
false
1,400
r
# # Explore Binaca Geetmala songs. # The data was extracted from the website: # https://www.keepaliveusa.com/binacageetmala/year/1953 where they have nicely # compiled songs from 1953 to 1993 # library(dplyr) library(tidyr) library(purrr) library(ggplot2) # load songs data dfr_songs = readRDS(file = "data/songs_all.rds") df_songs = dfr_songs df_songs = df_songs %>% mutate(decade = floor(year / 10) * 10) # check number of songs by year df_songs %>% count(year) %>% ggplot() + geom_line(aes(x = year, y = n)) + theme_bw() # keep top n songs from each year df_songs = df_songs %>% filter(rank <= 1) # top music directors df_musicdirs = df_songs %>% mutate(musicdir = map(musicdir, ~paste(.x, collapse = "; "))) df_musicdirs_cnt = df_musicdirs %>% count(musicdir, sort = TRUE) df_musicdirs_dec_cnt = df_musicdirs %>% count(decade, musicdir) %>% arrange(decade, desc(n)) # top singers df_singers = df_songs %>% select(decade, year, rank, song_name, singers) %>% unnest(singers) df_singers_cnt = df_singers %>% count(singers, sort = TRUE) df_singers_dec_cnt = df_singers %>% count(decade, singers) %>% arrange(decade, desc(n)) tmpqc = df_songs %>% filter(grepl("Ila", singers)) # top actors df_actors = df_songs %>% select(year, rank, song_name, actors) %>% unnest(actors) df_actors_cnt = df_actors %>% count(actors, sort = TRUE) df_actors %>% filter(grepl("Jaffrey", actors))
testthat::context("Missing arguments") # Check Missing NorP testthat::test_that("Expected error messages are returned", { testthat::skip_on_cran() AbdVectorError <- "An argument NorP or Ns must be provided." testthat::expect_error(entropart:::Diversity.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Dqz.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Hqz.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::HqzBeta.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::GenSimpson.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::GenSimpsonD.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Hurlbert.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::HurlbertD.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloBetaEntropy.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloDiversity.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Rao.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Richness.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Shannon.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::ShannonBeta.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Simpson.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::SimpsonBeta.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Tsallis.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::TsallisBeta.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Diversity.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Dqz.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Hqz.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::HqzBeta.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::GenSimpson.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::GenSimpsonD.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Hurlbert.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::HurlbertD.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloBetaEntropy.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloDiversity.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloEntropy.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Rao.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Richness.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Shannon.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::ShannonBeta.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Simpson.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::SimpsonBeta.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Tsallis.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::TsallisBeta.integer(), AbdVectorError, ignore.case = TRUE) AbdVectorExpError <- "An argument NorPexp or Nexp must be provided." testthat::expect_error(entropart:::HqzBeta.AbdVector(0), AbdVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloBetaEntropy.AbdVector(0), AbdVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::ShannonBeta.AbdVector(0), AbdVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::SimpsonBeta.AbdVector(0), AbdVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::TsallisBeta.AbdVector(0), AbdVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::HqzBeta.integer(0), AbdVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloBetaEntropy.integer(0), AbdVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::ShannonBeta.integer(0), AbdVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::SimpsonBeta.integer(0), AbdVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::TsallisBeta.integer(0), AbdVectorExpError, ignore.case = TRUE) ProbaVectorError <- "An argument NorP or Ps must be provided." testthat::expect_error(entropart:::Diversity.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Dqz.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Hqz.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::HqzBeta.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::GenSimpson.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::GenSimpsonD.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Hurlbert.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::HurlbertD.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloBetaEntropy.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloDiversity.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloEntropy.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Rao.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Richness.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Shannon.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::ShannonBeta.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Simpson.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::SimpsonBeta.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Tsallis.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::TsallisBeta.ProbaVector(), ProbaVectorError, ignore.case = TRUE) ProbaVectorExpError <- "An argument NorPexp or Pexp must be provided." testthat::expect_error(entropart:::HqzBeta.ProbaVector(0), ProbaVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloBetaEntropy.ProbaVector(0), ProbaVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::ShannonBeta.ProbaVector(0), ProbaVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::SimpsonBeta.ProbaVector(0), ProbaVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::TsallisBeta.ProbaVector(0), ProbaVectorExpError, ignore.case = TRUE) numericError <- "An argument NorP or Ps or Ns must be provided." testthat::expect_error(entropart:::Diversity.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::Dqz.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::Hqz.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::HqzBeta.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::GenSimpson.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::GenSimpsonD.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::Hurlbert.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::HurlbertD.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloBetaEntropy.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloDiversity.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloEntropy.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::Rao.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::Richness.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::Shannon.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::ShannonBeta.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::Simpson.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::SimpsonBeta.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::Tsallis.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::TsallisBeta.numeric(), numericError, ignore.case = TRUE) numericExpError <- "An argument NorPexp or Pexp or Nexp must be provided." testthat::expect_error(entropart:::HqzBeta.numeric(0), numericExpError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloBetaEntropy.numeric(0), numericExpError, ignore.case = TRUE) testthat::expect_error(entropart:::ShannonBeta.numeric(0), numericExpError, ignore.case = TRUE) testthat::expect_error(entropart:::SimpsonBeta.numeric(0), numericExpError, ignore.case = TRUE) testthat::expect_error(entropart:::TsallisBeta.numeric(0), numericExpError, ignore.case = TRUE) })
/tests/testthat/testMissingNorP.R
no_license
yangxhcaf/entropart
R
false
false
9,777
r
testthat::context("Missing arguments") # Check Missing NorP testthat::test_that("Expected error messages are returned", { testthat::skip_on_cran() AbdVectorError <- "An argument NorP or Ns must be provided." testthat::expect_error(entropart:::Diversity.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Dqz.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Hqz.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::HqzBeta.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::GenSimpson.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::GenSimpsonD.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Hurlbert.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::HurlbertD.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloBetaEntropy.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloDiversity.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Rao.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Richness.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Shannon.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::ShannonBeta.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Simpson.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::SimpsonBeta.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Tsallis.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::TsallisBeta.AbdVector(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Diversity.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Dqz.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Hqz.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::HqzBeta.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::GenSimpson.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::GenSimpsonD.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Hurlbert.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::HurlbertD.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloBetaEntropy.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloDiversity.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloEntropy.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Rao.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Richness.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Shannon.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::ShannonBeta.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Simpson.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::SimpsonBeta.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Tsallis.integer(), AbdVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::TsallisBeta.integer(), AbdVectorError, ignore.case = TRUE) AbdVectorExpError <- "An argument NorPexp or Nexp must be provided." testthat::expect_error(entropart:::HqzBeta.AbdVector(0), AbdVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloBetaEntropy.AbdVector(0), AbdVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::ShannonBeta.AbdVector(0), AbdVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::SimpsonBeta.AbdVector(0), AbdVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::TsallisBeta.AbdVector(0), AbdVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::HqzBeta.integer(0), AbdVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloBetaEntropy.integer(0), AbdVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::ShannonBeta.integer(0), AbdVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::SimpsonBeta.integer(0), AbdVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::TsallisBeta.integer(0), AbdVectorExpError, ignore.case = TRUE) ProbaVectorError <- "An argument NorP or Ps must be provided." testthat::expect_error(entropart:::Diversity.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Dqz.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Hqz.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::HqzBeta.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::GenSimpson.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::GenSimpsonD.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Hurlbert.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::HurlbertD.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloBetaEntropy.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloDiversity.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloEntropy.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Rao.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Richness.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Shannon.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::ShannonBeta.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Simpson.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::SimpsonBeta.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::Tsallis.ProbaVector(), ProbaVectorError, ignore.case = TRUE) testthat::expect_error(entropart:::TsallisBeta.ProbaVector(), ProbaVectorError, ignore.case = TRUE) ProbaVectorExpError <- "An argument NorPexp or Pexp must be provided." testthat::expect_error(entropart:::HqzBeta.ProbaVector(0), ProbaVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloBetaEntropy.ProbaVector(0), ProbaVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::ShannonBeta.ProbaVector(0), ProbaVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::SimpsonBeta.ProbaVector(0), ProbaVectorExpError, ignore.case = TRUE) testthat::expect_error(entropart:::TsallisBeta.ProbaVector(0), ProbaVectorExpError, ignore.case = TRUE) numericError <- "An argument NorP or Ps or Ns must be provided." testthat::expect_error(entropart:::Diversity.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::Dqz.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::Hqz.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::HqzBeta.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::GenSimpson.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::GenSimpsonD.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::Hurlbert.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::HurlbertD.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloBetaEntropy.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloDiversity.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloEntropy.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::Rao.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::Richness.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::Shannon.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::ShannonBeta.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::Simpson.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::SimpsonBeta.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::Tsallis.numeric(), numericError, ignore.case = TRUE) testthat::expect_error(entropart:::TsallisBeta.numeric(), numericError, ignore.case = TRUE) numericExpError <- "An argument NorPexp or Pexp or Nexp must be provided." testthat::expect_error(entropart:::HqzBeta.numeric(0), numericExpError, ignore.case = TRUE) testthat::expect_error(entropart:::PhyloBetaEntropy.numeric(0), numericExpError, ignore.case = TRUE) testthat::expect_error(entropart:::ShannonBeta.numeric(0), numericExpError, ignore.case = TRUE) testthat::expect_error(entropart:::SimpsonBeta.numeric(0), numericExpError, ignore.case = TRUE) testthat::expect_error(entropart:::TsallisBeta.numeric(0), numericExpError, ignore.case = TRUE) })
# 分割表解析モデル # テストデータ作成 test1 <- data.frame(gender=rep(1,749), which=rep(1,749)) test2 <- data.frame(gender=rep(1,83), which=rep(2,83)) test3 <- data.frame(gender=rep(2,445), which=rep(1,445)) test4 <- data.frame(gender=rep(2,636), which=rep(2,636)) test <- rbind(test1,test2) test <- rbind(test,test3) test <- rbind(test,test4) table(test) # which # gender 1 2 # 1 749 83 # 2 445 636 # AIC計算 target <- "which" var <- "gender" df <- test[c(target,var)] n <- nrow(df) c1 <- nrow(unique(df[target])) c2 <- nrow(unique(df[var])) df.freq <- as.data.frame(table(df)) LogFreq <- log(df.freq$Freq) df.freq <- cbind(df.freq, LogFreq) df.freq <- cbind(df.freq, FreqLogFerq=df.freq$Freq*LogFreq) df.target <- as.data.frame(table(df[target])) df.var <- as.data.frame(table(df[var])) df.sum.freq <- rbind(df.target, df.var) LogFreq <- log(df.sum.freq$Freq) df.sum.freq <- cbind(df.sum.freq, LogFreq) df.sum.freq <- cbind(df.sum.freq, FreqLogFerq=df.sum.freq$Freq*LogFreq) aic.0 <- (-2) * (sum(df.sum.freq$FreqLogFerq) - 2 * n * log(n)) + 2 * (c1 + c2 - 2) # [1] 5156.274 aic.1 <- (-2) * (sum(df.freq$FreqLogFerq) - n * log(n)) + 2 * (c1 * c2 - 1) # [1] 4630.196 aic <- aic.1 - aic.0 # [1] -526.0777 ########################################################### # catdapのR実装 ########################################################### # tipsデータに適用 library("reshape2") library("arules") ### 分析用tipsデータ作成 # ターゲット変数の作成 tips.test <- transform(tips, flg=ifelse(tips$sex=="Male",1,0)) # 連続値はカテゴリ化が必要 tips.test <- transform(tips.test, total_bill=categorize(tips.test$total_bill, quantile = TRUE)) tips.test <- transform(tips.test, tip=categorize(tips.test$tip, quantile = TRUE)) # ターゲット変数指定 target <- "flg" # 説明変数の指定 varList <- c("total_bill", "tip", "smoker", "day", "time", "size") # catdap関数 catdap <- function(data,target,varList) { for(var in varList) { df <- data print(var) df <- df[c(target,var)] n <- nrow(df) c1 <- nrow(unique(df[target])) c2 <- nrow(unique(df[var])) df.freq <- as.data.frame(table(df)) LogFreq <- log(df.freq$Freq) df.freq <- cbind(df.freq, LogFreq) df.freq <- cbind(df.freq, FreqLogFerq=df.freq$Freq*LogFreq) df.target <- as.data.frame(table(df[target])) df.var <- as.data.frame(table(df[var])) df.sum.freq <- rbind(df.target, df.var) LogFreq <- log(df.sum.freq$Freq) df.sum.freq <- cbind(df.sum.freq, LogFreq) df.sum.freq <- cbind(df.sum.freq, FreqLogFerq=df.sum.freq$Freq*LogFreq) aic.0 <- (-2) * (sum(df.sum.freq$FreqLogFerq) - 2 * n * log(n)) + 2 * (c1 + c2 - 2) aic.1 <- (-2) * (sum(df.freq$FreqLogFerq) - n * log(n)) + 2 * (c1 * c2 - 1) aic <- aic.1 - aic.0 aic.df <- data.frame(var=var, aic=aic) print(aic.df) assign(paste("aic.df.", var, sep=""), aic.df) } eval(parse(text = paste("aic.rank <- list(", paste("aic.df.", varList, sep="", collapse = ","), ")", sep=""))) aic.rank <- Reduce(function(a,b)rbind(a,b), aic.rank) return(list(aic.rank=aic.rank)) } aic <- catdap(tips.test,"flg",varList) aic$aic.rank[order(aic$aic.rank[,"aic"], decreasing=FALSE),] # var aic # 5 time -8.028523 # 4 day -7.194401 # 1 total_bill -2.443896 # 3 smoker 1.998065 # 2 tip 3.901094 # 6 size 4.119833
/catdap.R
no_license
stockedge/CATDAP
R
false
false
3,482
r
# 分割表解析モデル # テストデータ作成 test1 <- data.frame(gender=rep(1,749), which=rep(1,749)) test2 <- data.frame(gender=rep(1,83), which=rep(2,83)) test3 <- data.frame(gender=rep(2,445), which=rep(1,445)) test4 <- data.frame(gender=rep(2,636), which=rep(2,636)) test <- rbind(test1,test2) test <- rbind(test,test3) test <- rbind(test,test4) table(test) # which # gender 1 2 # 1 749 83 # 2 445 636 # AIC計算 target <- "which" var <- "gender" df <- test[c(target,var)] n <- nrow(df) c1 <- nrow(unique(df[target])) c2 <- nrow(unique(df[var])) df.freq <- as.data.frame(table(df)) LogFreq <- log(df.freq$Freq) df.freq <- cbind(df.freq, LogFreq) df.freq <- cbind(df.freq, FreqLogFerq=df.freq$Freq*LogFreq) df.target <- as.data.frame(table(df[target])) df.var <- as.data.frame(table(df[var])) df.sum.freq <- rbind(df.target, df.var) LogFreq <- log(df.sum.freq$Freq) df.sum.freq <- cbind(df.sum.freq, LogFreq) df.sum.freq <- cbind(df.sum.freq, FreqLogFerq=df.sum.freq$Freq*LogFreq) aic.0 <- (-2) * (sum(df.sum.freq$FreqLogFerq) - 2 * n * log(n)) + 2 * (c1 + c2 - 2) # [1] 5156.274 aic.1 <- (-2) * (sum(df.freq$FreqLogFerq) - n * log(n)) + 2 * (c1 * c2 - 1) # [1] 4630.196 aic <- aic.1 - aic.0 # [1] -526.0777 ########################################################### # catdapのR実装 ########################################################### # tipsデータに適用 library("reshape2") library("arules") ### 分析用tipsデータ作成 # ターゲット変数の作成 tips.test <- transform(tips, flg=ifelse(tips$sex=="Male",1,0)) # 連続値はカテゴリ化が必要 tips.test <- transform(tips.test, total_bill=categorize(tips.test$total_bill, quantile = TRUE)) tips.test <- transform(tips.test, tip=categorize(tips.test$tip, quantile = TRUE)) # ターゲット変数指定 target <- "flg" # 説明変数の指定 varList <- c("total_bill", "tip", "smoker", "day", "time", "size") # catdap関数 catdap <- function(data,target,varList) { for(var in varList) { df <- data print(var) df <- df[c(target,var)] n <- nrow(df) c1 <- nrow(unique(df[target])) c2 <- nrow(unique(df[var])) df.freq <- as.data.frame(table(df)) LogFreq <- log(df.freq$Freq) df.freq <- cbind(df.freq, LogFreq) df.freq <- cbind(df.freq, FreqLogFerq=df.freq$Freq*LogFreq) df.target <- as.data.frame(table(df[target])) df.var <- as.data.frame(table(df[var])) df.sum.freq <- rbind(df.target, df.var) LogFreq <- log(df.sum.freq$Freq) df.sum.freq <- cbind(df.sum.freq, LogFreq) df.sum.freq <- cbind(df.sum.freq, FreqLogFerq=df.sum.freq$Freq*LogFreq) aic.0 <- (-2) * (sum(df.sum.freq$FreqLogFerq) - 2 * n * log(n)) + 2 * (c1 + c2 - 2) aic.1 <- (-2) * (sum(df.freq$FreqLogFerq) - n * log(n)) + 2 * (c1 * c2 - 1) aic <- aic.1 - aic.0 aic.df <- data.frame(var=var, aic=aic) print(aic.df) assign(paste("aic.df.", var, sep=""), aic.df) } eval(parse(text = paste("aic.rank <- list(", paste("aic.df.", varList, sep="", collapse = ","), ")", sep=""))) aic.rank <- Reduce(function(a,b)rbind(a,b), aic.rank) return(list(aic.rank=aic.rank)) } aic <- catdap(tips.test,"flg",varList) aic$aic.rank[order(aic$aic.rank[,"aic"], decreasing=FALSE),] # var aic # 5 time -8.028523 # 4 day -7.194401 # 1 total_bill -2.443896 # 3 smoker 1.998065 # 2 tip 3.901094 # 6 size 4.119833
# This script describes the steps required for implementing the ADAS-CogIRT # scoring methodology proposed in paper: # N. Verma, S. N. Beretvas, B. Pascual, J. C. Masdeu, M. K. Markey, "New scoring # improves the sensitivity of the Alzheimer's Disease Assessment Scale-Cognitive # subscale (ADAS-Cog) in clinical trials", Alzheimer's Research & Therapy, # 7(64), 2015. # https://alzres.biomedcentral.com/articles/10.1186/s13195-015-0151-0 # Install required packages - mirt and lme4 if ("mirt" %in% rownames(installed.packages()) == 0){install.packages('mirt')} if ("lme4" %in% rownames(installed.packages()) == 0){install.packages('lme4')} # load requried packages library(mirt) library(lme4) ############################################################################## # SECTION 1: TRAINING THE IRT MODEL USED FOR CALCULATING PATIENT LATENT TRAITS ############################################################################## # load example cross-sectional data to build IRT model (included on Git) load(file = 'Cross_section_ADAS_Data.RData') # Training IRT model using mirt package # define factor loadings (F1-memory, F2-language, F3-praxis) # All 3 traits allowed to be correlated with each other # (defined by COV = F1*F2, F2*F3, F1*F3) MIRT_F3 <- mirt.model('F1 = 1, 23-29, 35, 30 F2 = 2-8, 10-13, 31-34 F3 = 14-22 COV = F1*F2, F2*F3, F1*F3') # Define type of individual ADAS-Cog items Item_type <- c('graded', # Q1 '2PL','2PL','2PL','2PL','2PL','2PL','3PL', # Q2 Objects '2PL','2PL','2PL','2PL','2PL', # Q2 fingers '2PL','2PL','2PL', # Q3 '2PL','2PL','3PL', # Q4 '2PL','2PL','2PL', # Q5 '2PL','2PL','2PL','2PL','3PL','2PL','2PL', # Q6 'graded','graded','graded', 'graded','graded','graded') # Define interaction terms between patient variables and ADAS-Cog items # Defined based on measurement invariance analysis (refer to published paper) Q25_Gender_Recog <- mat.or.vec(35,1); Q25_Gender_Recog[3]<-1 Q210_Gender_Recog <- mat.or.vec(35,1); Q210_Gender_Recog[6]<-1 Q46_Gender_Recog <- mat.or.vec(35,1); Q46_Gender_Recog[19]<-1 Q11_Gender_Recog <- mat.or.vec(35,1); Q11_Gender_Recog[34]<-1 Itemdesign_F3 <- data.frame(Q25_Gender_Recog, Q210_Gender_Recog, Q46_Gender_Recog, Q11_Gender_Recog) # encode gender as 0,1 Cross_Covariates$Gender <- Cross_Covariates$Gender-1 # Train Cross-sectional IRT model # ADAS_Cross is cross-sectional ADAS-Cog data # Cross_Covariates are patient covariate terms Cross_Model_F3 <- mixedmirt(data = ADAS_Cross, covdata = Cross_Covariates, model = MIRT_F3, fixed =~ Gender:Q25_Gender_Recog + Gender:Q210_Gender_Recog + Gender:Q46_Gender_Recog + Gender:Q11_Gender_Recog, itemtype = Item_type, itemdesign = Itemdesign_F3, SE = TRUE, GenRandomPars = TRUE, rotate = "oblimin", technical = list(NCYCLES = 4000, MAXQUAD= 20000)) ######################################################################## # SECTION 2: CALCULATING PATIENT LATENT TRAITS USING ESTIMATED IRT MODEL ######################################################################## # GLMER data preparation # load the cognitive dysfunction scale parameters load(file = 'Cross_IRT_Model_Results.RData') # contains IRT model parameters load(file = 'Longitudinal_ADAS_Data.RData') # longitudinal ADAS responses dat <- HU_ADAS_Long # compile patient covariates dat_Covariates <- HU_Covariates_Long dat_Covariates$ARM <- as.character(dat_Covariates$ARM) dat_Covariates$VISCODE <- as.character(dat_Covariates$VISCODE) dat_Covariates$Gender <- as.character(dat_Covariates$Gender) dat_Covariates$RID <- as.character(dat_Covariates$RID) # unique patient ID # restructure data Theta1_Slopes <- c(); Theta2_Slopes <- c(); Theta3_Slopes <- c(); Theta_Intercept <- c(); Item_Responses <- c(); Patient_EDU <- c(); Patient_Gender <- c(); Patient_VISCODE <- c(); Patient_RID <- c(); Patient_Visits <- c(); Patient_Arm <- c(); Patient_Age <- c(); Item_Model <- c(1:35) for(it in 1:length(Item_Model)){ item <- Item_Model[it] uniq_item <- sort(unique(dat[,item])) for(unq in 2:(length(uniq_item))){ # item responses as separate rows Item_Responses <- c(Item_Responses, (dat[,item]>=uniq_item[unq])*1) # slopes associated with memory (theta1), language (theta2), and # praxis (theta3) latent traits Theta1_Slopes <- c(Theta1_Slopes, (F3_Slopes_Not_CM[item, 1]*(1-dat_Covariates$CM_Med) + F3_Slopes_CM[item, 1]*dat_Covariates$CM_Med)) Theta2_Slopes <- c(Theta2_Slopes, (F3_Slopes_Not_CM[item, 2]*(1-dat_Covariates$CM_Med) + F3_Slopes_CM[item, 2]*dat_Covariates$CM_Med)) Theta3_Slopes <- c(Theta3_Slopes, (F3_Slopes_Not_CM[item, 3]*(1-dat_Covariates$CM_Med) + F3_Slopes_CM[item, 3]*dat_Covariates$CM_Med)) Patient_RID <- c(Patient_RID, dat_Covariates$RID) Patient_Arm <- c(Patient_Arm, dat_Covariates$ARM) Patient_Visits <- c(Patient_Visits, dat_Covariates$VIS) Patient_Age <- c(Patient_Age, dat_Covariates$AGE) Patient_VISCODE <- c(Patient_VISCODE, dat_Covariates$VISCODE) Patient_Gender <- c(Patient_Gender, dat_Covariates$Gender) Patient_EDU <- c(Patient_EDU, dat_Covariates$EDU) # Check the pattern of encoding of gender (COMMON MISTAKE) M_e3 <- which(dat_Covariates$Gender==0) F_e3 <- which(dat_Covariates$Gender==1) temp_intercept <- mat.or.vec(length(dat[,item]),1) + NA temp_intercept[M_e3] <- F3_Intercepts_Male[item,(unq-1)] temp_intercept[F_e3] <- F3_Intercepts_Female[item,(unq-1)] # item intercepts Theta_Intercept <- c(Theta_Intercept, temp_intercept) } } # define progression rate terms Theta1_Rate <- Theta1_Slopes*Patient_Visits Theta2_Rate <- Theta2_Slopes*Patient_Visits Theta3_Rate <- Theta3_Slopes*Patient_Visits Patient_Arm[Patient_Arm=='Placebo'] <- 'APlacebo' # Create a data frame for usage with glmer GLMER_F3_Data <- data.frame(Theta1_Slopes, Theta2_Slopes, Theta3_Slopes, Theta1_Rate, Theta2_Rate, Theta3_Rate, Patient_Arm, Patient_RID, Patient_VISCODE, Patient_EDU, Patient_Age, Patient_Gender, Item_Responses, Theta_Intercept, Patient_Visits, HU200 = (Patient_Arm=='HU200ug')*1, HU400 = (Patient_Arm=='HU400ug')*1) # standardize patient education and age (zero mean, unit st. dev) GLMER_F3_Data$Patient_EDU <- (GLMER_F3_Data$Patient_EDU- mean(GLMER_F3_Data$Patient_EDU))/ sd(GLMER_F3_Data$Patient_EDU) GLMER_F3_Data$Patient_Age <- (GLMER_F3_Data$Patient_Age- mean(GLMER_F3_Data$Patient_Age))/ sd(GLMER_F3_Data$Patient_Age) # estimate latent traits of patients using glmer HU_GLMER_F3 <- glmer( Item_Responses ~ 0 + Theta1_Slopes + Theta2_Slopes + Theta3_Slopes + Theta1_Rate + Theta2_Rate + Theta3_Rate + I(Theta1_Slopes*HU200) + I(Theta2_Slopes*HU200) + I(Theta3_Slopes*HU200) + I(Theta1_Rate*HU200) + I(Theta2_Rate*HU200) + I(Theta3_Rate*HU200) + I(Theta1_Slopes*HU400) + I(Theta2_Slopes*HU400) + I(Theta3_Slopes*HU400) + I(Theta1_Rate*HU400) + I(Theta2_Rate*HU400) + I(Theta3_Rate*HU400) + I(Theta1_Rate*Patient_Age) + I(Theta2_Rate*Patient_Age) + I(Theta3_Rate*Patient_Age) + I(Theta1_Slopes*Patient_Age) + I(Theta2_Slopes*Patient_Age) + I(Theta3_Slopes*Patient_Age) + (0 + Theta1_Slopes + Theta2_Slopes + Theta3_Slopes + Theta1_Rate + Theta2_Rate + Theta3_Rate|Patient_RID), data = GLMER_F3_Data, offset = Theta_Intercept, family = binomial, verbose = 2, control = glmerControl(optimizer="bobyqa"))
/Codes/ADASCogIRT_code.R
no_license
nishant3115/ADAS-CogIRT-Scoring-Methodology
R
false
false
8,970
r
# This script describes the steps required for implementing the ADAS-CogIRT # scoring methodology proposed in paper: # N. Verma, S. N. Beretvas, B. Pascual, J. C. Masdeu, M. K. Markey, "New scoring # improves the sensitivity of the Alzheimer's Disease Assessment Scale-Cognitive # subscale (ADAS-Cog) in clinical trials", Alzheimer's Research & Therapy, # 7(64), 2015. # https://alzres.biomedcentral.com/articles/10.1186/s13195-015-0151-0 # Install required packages - mirt and lme4 if ("mirt" %in% rownames(installed.packages()) == 0){install.packages('mirt')} if ("lme4" %in% rownames(installed.packages()) == 0){install.packages('lme4')} # load requried packages library(mirt) library(lme4) ############################################################################## # SECTION 1: TRAINING THE IRT MODEL USED FOR CALCULATING PATIENT LATENT TRAITS ############################################################################## # load example cross-sectional data to build IRT model (included on Git) load(file = 'Cross_section_ADAS_Data.RData') # Training IRT model using mirt package # define factor loadings (F1-memory, F2-language, F3-praxis) # All 3 traits allowed to be correlated with each other # (defined by COV = F1*F2, F2*F3, F1*F3) MIRT_F3 <- mirt.model('F1 = 1, 23-29, 35, 30 F2 = 2-8, 10-13, 31-34 F3 = 14-22 COV = F1*F2, F2*F3, F1*F3') # Define type of individual ADAS-Cog items Item_type <- c('graded', # Q1 '2PL','2PL','2PL','2PL','2PL','2PL','3PL', # Q2 Objects '2PL','2PL','2PL','2PL','2PL', # Q2 fingers '2PL','2PL','2PL', # Q3 '2PL','2PL','3PL', # Q4 '2PL','2PL','2PL', # Q5 '2PL','2PL','2PL','2PL','3PL','2PL','2PL', # Q6 'graded','graded','graded', 'graded','graded','graded') # Define interaction terms between patient variables and ADAS-Cog items # Defined based on measurement invariance analysis (refer to published paper) Q25_Gender_Recog <- mat.or.vec(35,1); Q25_Gender_Recog[3]<-1 Q210_Gender_Recog <- mat.or.vec(35,1); Q210_Gender_Recog[6]<-1 Q46_Gender_Recog <- mat.or.vec(35,1); Q46_Gender_Recog[19]<-1 Q11_Gender_Recog <- mat.or.vec(35,1); Q11_Gender_Recog[34]<-1 Itemdesign_F3 <- data.frame(Q25_Gender_Recog, Q210_Gender_Recog, Q46_Gender_Recog, Q11_Gender_Recog) # encode gender as 0,1 Cross_Covariates$Gender <- Cross_Covariates$Gender-1 # Train Cross-sectional IRT model # ADAS_Cross is cross-sectional ADAS-Cog data # Cross_Covariates are patient covariate terms Cross_Model_F3 <- mixedmirt(data = ADAS_Cross, covdata = Cross_Covariates, model = MIRT_F3, fixed =~ Gender:Q25_Gender_Recog + Gender:Q210_Gender_Recog + Gender:Q46_Gender_Recog + Gender:Q11_Gender_Recog, itemtype = Item_type, itemdesign = Itemdesign_F3, SE = TRUE, GenRandomPars = TRUE, rotate = "oblimin", technical = list(NCYCLES = 4000, MAXQUAD= 20000)) ######################################################################## # SECTION 2: CALCULATING PATIENT LATENT TRAITS USING ESTIMATED IRT MODEL ######################################################################## # GLMER data preparation # load the cognitive dysfunction scale parameters load(file = 'Cross_IRT_Model_Results.RData') # contains IRT model parameters load(file = 'Longitudinal_ADAS_Data.RData') # longitudinal ADAS responses dat <- HU_ADAS_Long # compile patient covariates dat_Covariates <- HU_Covariates_Long dat_Covariates$ARM <- as.character(dat_Covariates$ARM) dat_Covariates$VISCODE <- as.character(dat_Covariates$VISCODE) dat_Covariates$Gender <- as.character(dat_Covariates$Gender) dat_Covariates$RID <- as.character(dat_Covariates$RID) # unique patient ID # restructure data Theta1_Slopes <- c(); Theta2_Slopes <- c(); Theta3_Slopes <- c(); Theta_Intercept <- c(); Item_Responses <- c(); Patient_EDU <- c(); Patient_Gender <- c(); Patient_VISCODE <- c(); Patient_RID <- c(); Patient_Visits <- c(); Patient_Arm <- c(); Patient_Age <- c(); Item_Model <- c(1:35) for(it in 1:length(Item_Model)){ item <- Item_Model[it] uniq_item <- sort(unique(dat[,item])) for(unq in 2:(length(uniq_item))){ # item responses as separate rows Item_Responses <- c(Item_Responses, (dat[,item]>=uniq_item[unq])*1) # slopes associated with memory (theta1), language (theta2), and # praxis (theta3) latent traits Theta1_Slopes <- c(Theta1_Slopes, (F3_Slopes_Not_CM[item, 1]*(1-dat_Covariates$CM_Med) + F3_Slopes_CM[item, 1]*dat_Covariates$CM_Med)) Theta2_Slopes <- c(Theta2_Slopes, (F3_Slopes_Not_CM[item, 2]*(1-dat_Covariates$CM_Med) + F3_Slopes_CM[item, 2]*dat_Covariates$CM_Med)) Theta3_Slopes <- c(Theta3_Slopes, (F3_Slopes_Not_CM[item, 3]*(1-dat_Covariates$CM_Med) + F3_Slopes_CM[item, 3]*dat_Covariates$CM_Med)) Patient_RID <- c(Patient_RID, dat_Covariates$RID) Patient_Arm <- c(Patient_Arm, dat_Covariates$ARM) Patient_Visits <- c(Patient_Visits, dat_Covariates$VIS) Patient_Age <- c(Patient_Age, dat_Covariates$AGE) Patient_VISCODE <- c(Patient_VISCODE, dat_Covariates$VISCODE) Patient_Gender <- c(Patient_Gender, dat_Covariates$Gender) Patient_EDU <- c(Patient_EDU, dat_Covariates$EDU) # Check the pattern of encoding of gender (COMMON MISTAKE) M_e3 <- which(dat_Covariates$Gender==0) F_e3 <- which(dat_Covariates$Gender==1) temp_intercept <- mat.or.vec(length(dat[,item]),1) + NA temp_intercept[M_e3] <- F3_Intercepts_Male[item,(unq-1)] temp_intercept[F_e3] <- F3_Intercepts_Female[item,(unq-1)] # item intercepts Theta_Intercept <- c(Theta_Intercept, temp_intercept) } } # define progression rate terms Theta1_Rate <- Theta1_Slopes*Patient_Visits Theta2_Rate <- Theta2_Slopes*Patient_Visits Theta3_Rate <- Theta3_Slopes*Patient_Visits Patient_Arm[Patient_Arm=='Placebo'] <- 'APlacebo' # Create a data frame for usage with glmer GLMER_F3_Data <- data.frame(Theta1_Slopes, Theta2_Slopes, Theta3_Slopes, Theta1_Rate, Theta2_Rate, Theta3_Rate, Patient_Arm, Patient_RID, Patient_VISCODE, Patient_EDU, Patient_Age, Patient_Gender, Item_Responses, Theta_Intercept, Patient_Visits, HU200 = (Patient_Arm=='HU200ug')*1, HU400 = (Patient_Arm=='HU400ug')*1) # standardize patient education and age (zero mean, unit st. dev) GLMER_F3_Data$Patient_EDU <- (GLMER_F3_Data$Patient_EDU- mean(GLMER_F3_Data$Patient_EDU))/ sd(GLMER_F3_Data$Patient_EDU) GLMER_F3_Data$Patient_Age <- (GLMER_F3_Data$Patient_Age- mean(GLMER_F3_Data$Patient_Age))/ sd(GLMER_F3_Data$Patient_Age) # estimate latent traits of patients using glmer HU_GLMER_F3 <- glmer( Item_Responses ~ 0 + Theta1_Slopes + Theta2_Slopes + Theta3_Slopes + Theta1_Rate + Theta2_Rate + Theta3_Rate + I(Theta1_Slopes*HU200) + I(Theta2_Slopes*HU200) + I(Theta3_Slopes*HU200) + I(Theta1_Rate*HU200) + I(Theta2_Rate*HU200) + I(Theta3_Rate*HU200) + I(Theta1_Slopes*HU400) + I(Theta2_Slopes*HU400) + I(Theta3_Slopes*HU400) + I(Theta1_Rate*HU400) + I(Theta2_Rate*HU400) + I(Theta3_Rate*HU400) + I(Theta1_Rate*Patient_Age) + I(Theta2_Rate*Patient_Age) + I(Theta3_Rate*Patient_Age) + I(Theta1_Slopes*Patient_Age) + I(Theta2_Slopes*Patient_Age) + I(Theta3_Slopes*Patient_Age) + (0 + Theta1_Slopes + Theta2_Slopes + Theta3_Slopes + Theta1_Rate + Theta2_Rate + Theta3_Rate|Patient_RID), data = GLMER_F3_Data, offset = Theta_Intercept, family = binomial, verbose = 2, control = glmerControl(optimizer="bobyqa"))
summary.translogRayEst <- function( object, ... ) { object$coefTable <- coefTable( coef( object ), diag( vcov( object ) )^0.5, df.residual( object$est ) ) class( object ) <- "summary.translogRayEst" return( object ) }
/pkg/micEconDist/R/summary.translogRayEst.R
no_license
scfmolina/micecon
R
false
false
238
r
summary.translogRayEst <- function( object, ... ) { object$coefTable <- coefTable( coef( object ), diag( vcov( object ) )^0.5, df.residual( object$est ) ) class( object ) <- "summary.translogRayEst" return( object ) }
################# plot2.R ################### ## Exploratory Data Analysis: Course Project 2 ################ Question 2 ################# ## Have total emissions from PM2.5 decreased in the Baltimore City, Maryland ## (fips == "24510") from 1999 to 2008? Use the base plotting system to make ## a plot answering this question. ## Set working directory if nessesary setwd("/home/user/Dropbox/Coursera/Exploratory data analysis/Course Project 2") ## Load data from working directory NEI <- readRDS("summarySCC_PM25.rds") ## Extract Baltimore fips, group by year and calculate total emissions for all groups plot2 <- NEI[NEI$fips == "24510",] %>% group_by(year) %>% summarise(total = sum(Emissions)) ## Open png device and specify name and dimensions png("plot2.png", width=480, height=480) ## plot using base plotting system in R p2 <- barplot(plot2$total, xlab="Year", names.arg=c("1999", "2002", "2005", "2008"), ylab="Total PM2.5 emissions (tons)", main="Total PM2.5 emissions for Baltimore City, Maryland")) ## Print plot print(p2) ## Close device dev.off()
/plot2.R
no_license
massovercharge/Course-Project-2
R
false
false
1,069
r
################# plot2.R ################### ## Exploratory Data Analysis: Course Project 2 ################ Question 2 ################# ## Have total emissions from PM2.5 decreased in the Baltimore City, Maryland ## (fips == "24510") from 1999 to 2008? Use the base plotting system to make ## a plot answering this question. ## Set working directory if nessesary setwd("/home/user/Dropbox/Coursera/Exploratory data analysis/Course Project 2") ## Load data from working directory NEI <- readRDS("summarySCC_PM25.rds") ## Extract Baltimore fips, group by year and calculate total emissions for all groups plot2 <- NEI[NEI$fips == "24510",] %>% group_by(year) %>% summarise(total = sum(Emissions)) ## Open png device and specify name and dimensions png("plot2.png", width=480, height=480) ## plot using base plotting system in R p2 <- barplot(plot2$total, xlab="Year", names.arg=c("1999", "2002", "2005", "2008"), ylab="Total PM2.5 emissions (tons)", main="Total PM2.5 emissions for Baltimore City, Maryland")) ## Print plot print(p2) ## Close device dev.off()
### neljas koolitus
/neljas.R
no_license
lumeste/Rkoolitus
R
false
false
21
r
### neljas koolitus
\name{DDT} \docType{methods} \alias{DDT} \alias{DDT,Trace,logical,logical,numeric-method} \alias{DDT,Trace,missing,missing,missing-method} \title{Apply demean, detrend, cosine taper} \description{ The \code{DDT} method of \code{Trace} objects returns a new \code{Trace} where data in the \code{@data} slot have been modified. This is typically required before peforming any kind of spectral analysis on the seismic trace. } \usage{ DDT(x, demean, detrend, taper) } \arguments{ \item{x}{a \code{Trace} object} \item{demean}{logical specifying whether to deman (default=\code{TRUE})} \item{detrend}{logical specifying whether to detrend (default=\code{TRUE})} \item{taper}{proportion of the signal to be tapered at each end (default=0.1)} } \details{ %%New \code{Trace} or \code{Stream} objects are created where all data values have been multiplied by \code{y}. Use \code{taper=0} for no tapering. } \value{ A new \code{Trace} object is returned. } %%\references{ } \author{ Jonathan Callahan \email{jonathan@mazamascience.com} } %% \note{ } %% \seealso{ } \examples{ # Open a connection to IRIS DMC webservices iris <- new("IrisClient") # P-wave onset for a big quake starttime <- as.POSIXct("2010-02-27 06:30:00", tz="GMT") endtime <- as.POSIXct("2010-02-27 07:00:00", tz="GMT") result <- try(st <- getDataselect(iris,"IU","ANMO","00","BHZ",starttime,endtime)) if (inherits(result,"try-error")) { message(geterrmessage()) } else { tr <- st@traces[[1]] trClean <- DDT(tr,TRUE,TRUE,0.1) layout(matrix(seq(2))) plot(tr) abline(h=0,col='gray60') mtext("Raw",side=3,line=-2,adj=0.05,col='red') plot(trClean) abline(h=0,col='gray60') mtext("Demean - Detrend - Cosine Taper",line=-2,side=3,adj=0.05,col='red') } # Restore default layout layout(1) } \keyword{methods}
/man/DDT.Rd
no_license
cran/IRISSeismic
R
false
false
1,805
rd
\name{DDT} \docType{methods} \alias{DDT} \alias{DDT,Trace,logical,logical,numeric-method} \alias{DDT,Trace,missing,missing,missing-method} \title{Apply demean, detrend, cosine taper} \description{ The \code{DDT} method of \code{Trace} objects returns a new \code{Trace} where data in the \code{@data} slot have been modified. This is typically required before peforming any kind of spectral analysis on the seismic trace. } \usage{ DDT(x, demean, detrend, taper) } \arguments{ \item{x}{a \code{Trace} object} \item{demean}{logical specifying whether to deman (default=\code{TRUE})} \item{detrend}{logical specifying whether to detrend (default=\code{TRUE})} \item{taper}{proportion of the signal to be tapered at each end (default=0.1)} } \details{ %%New \code{Trace} or \code{Stream} objects are created where all data values have been multiplied by \code{y}. Use \code{taper=0} for no tapering. } \value{ A new \code{Trace} object is returned. } %%\references{ } \author{ Jonathan Callahan \email{jonathan@mazamascience.com} } %% \note{ } %% \seealso{ } \examples{ # Open a connection to IRIS DMC webservices iris <- new("IrisClient") # P-wave onset for a big quake starttime <- as.POSIXct("2010-02-27 06:30:00", tz="GMT") endtime <- as.POSIXct("2010-02-27 07:00:00", tz="GMT") result <- try(st <- getDataselect(iris,"IU","ANMO","00","BHZ",starttime,endtime)) if (inherits(result,"try-error")) { message(geterrmessage()) } else { tr <- st@traces[[1]] trClean <- DDT(tr,TRUE,TRUE,0.1) layout(matrix(seq(2))) plot(tr) abline(h=0,col='gray60') mtext("Raw",side=3,line=-2,adj=0.05,col='red') plot(trClean) abline(h=0,col='gray60') mtext("Demean - Detrend - Cosine Taper",line=-2,side=3,adj=0.05,col='red') } # Restore default layout layout(1) } \keyword{methods}
df <- data.frame('A' = c(1,2,3,4,5), 'B' = c('A','B','C','D','E')) df[1,1] # matrislerdeki gibi satır-sütun olarak seçebiliriz df[1,] df[,1] df[,c(1,2)] # tüm satırlar ile 1 ve 2. sütunları seç df[1:2,] # 1'den başla 2. satıra kadar ve tüm sütunları seç df['A'] # dataframe'in A sütunu (data frame olarak döner) df[['A']] # data frame'in A sütunu (vektör olarak döner) df$A # vektör olarak A sütununu seçer df[c('A','B')] # A ve B sütunlarını seçer
/R/İstatistikVeVeriBilimi/2-)VeriYapılarındaElemanSeçimi/4-)DataFrameler/1-)DataFramelerdeElemanSeçimi.R
no_license
DincerDogan/VeriBilimi
R
false
false
503
r
df <- data.frame('A' = c(1,2,3,4,5), 'B' = c('A','B','C','D','E')) df[1,1] # matrislerdeki gibi satır-sütun olarak seçebiliriz df[1,] df[,1] df[,c(1,2)] # tüm satırlar ile 1 ve 2. sütunları seç df[1:2,] # 1'den başla 2. satıra kadar ve tüm sütunları seç df['A'] # dataframe'in A sütunu (data frame olarak döner) df[['A']] # data frame'in A sütunu (vektör olarak döner) df$A # vektör olarak A sütununu seçer df[c('A','B')] # A ve B sütunlarını seçer
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 54582 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 54040 c c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 54040 c c Input Parameter (command line, file): c input filename QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-MIF04-c05.blif-biu.inv.prop.bb-bmc.conf01.01X-QBF.BB1-Zi.BB2-01X.BB3-01X.with-IOC.unfold-008.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 22882 c no.of clauses 54582 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 54040 c c QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-MIF04-c05.blif-biu.inv.prop.bb-bmc.conf01.01X-QBF.BB1-Zi.BB2-01X.BB3-01X.with-IOC.unfold-008.qdimacs 22882 54582 E1 [1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1092 1093 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1640 1641 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2188 2189 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2736 2737 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3284 3285 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3832 3833 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4380 4381 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4867 4871 4873 4875 4877 4879 4881 4883 4885 4887 4889 4891 4893 4895 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4928 4929 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971] 0 113 19258 54040 RED
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-MIF04-c05.blif-biu.inv.prop.bb-bmc.conf01.01X-QBF.BB1-Zi.BB2-01X.BB3-01X.with-IOC.unfold-008/biu.mv.xl_ao.bb-b003-p020-MIF04-c05.blif-biu.inv.prop.bb-bmc.conf01.01X-QBF.BB1-Zi.BB2-01X.BB3-01X.with-IOC.unfold-008.R
no_license
arey0pushpa/dcnf-autarky
R
false
false
3,584
r
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 54582 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 54040 c c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 54040 c c Input Parameter (command line, file): c input filename QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-MIF04-c05.blif-biu.inv.prop.bb-bmc.conf01.01X-QBF.BB1-Zi.BB2-01X.BB3-01X.with-IOC.unfold-008.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 22882 c no.of clauses 54582 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 54040 c c QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-MIF04-c05.blif-biu.inv.prop.bb-bmc.conf01.01X-QBF.BB1-Zi.BB2-01X.BB3-01X.with-IOC.unfold-008.qdimacs 22882 54582 E1 [1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1092 1093 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1640 1641 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2188 2189 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2736 2737 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3284 3285 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3832 3833 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4380 4381 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4867 4871 4873 4875 4877 4879 4881 4883 4885 4887 4889 4891 4893 4895 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4928 4929 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971] 0 113 19258 54040 RED
library(infutil) ### Name: iota ### Title: Lindley Information (i.e., Information Utility) of Item Response ### Patterns ### Aliases: iota ### Keywords: models ### ** Examples ltm.lsat <- ltm(LSAT~z1, IRT=FALSE) Nu.lsat = nrow(unique(LSAT)) iota(ltm.lsat, data=LSAT, prior=Jeffreys(ltm.lsat)) iota(ltm.lsat, data=LSAT, prior=Jeffreys(ltm.lsat), theta0=c("max.prior")) iota(ltm.lsat, data=LSAT, prior=Jeffreys(ltm.lsat), theta0=0) iota(ltm.lsat, data=LSAT, prior=Jeffreys(ltm.lsat), theta0=rnorm(Nu.lsat))
/data/genthat_extracted_code/infutil/examples/iota.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
518
r
library(infutil) ### Name: iota ### Title: Lindley Information (i.e., Information Utility) of Item Response ### Patterns ### Aliases: iota ### Keywords: models ### ** Examples ltm.lsat <- ltm(LSAT~z1, IRT=FALSE) Nu.lsat = nrow(unique(LSAT)) iota(ltm.lsat, data=LSAT, prior=Jeffreys(ltm.lsat)) iota(ltm.lsat, data=LSAT, prior=Jeffreys(ltm.lsat), theta0=c("max.prior")) iota(ltm.lsat, data=LSAT, prior=Jeffreys(ltm.lsat), theta0=0) iota(ltm.lsat, data=LSAT, prior=Jeffreys(ltm.lsat), theta0=rnorm(Nu.lsat))
library(httr) original <- read.csv("~/Downloads/Landfills/SD_details.csv", stringsAsFactors = FALSE) raw <- original[1:245,] size <- nrow(raw) # FORMATTING RECYCLING WASTE TYPES recycling <- raw[66:245,] types <- recycling[,18] for (i in seq_along(types)) { line <- strsplit(types[i], split=",") #recycling[i,18] <- accepts("msw", line) recycling[i,19] <- accepts("(electronics|phones|computer|TV)", line) recycling[i,20] <- accepts("(white good|whitegood|appliances|mattress)", line) recycling[i,21] <- accepts("(yard|trees)", line) recycling[i,22] <- accepts("msw", line) recycling[i,23] <- accepts("(construction|demolition)", line) recycling[i,24] <- accepts("(metal|car parts)", line) recycling[i,25] <- accepts("(non-ferrous|tin|steel)", line) recycling[i,26] <- accepts("(aluminum|ferrous|foil)", line) recycling[i,27] <- accepts("(plastic)", line) recycling[i,28] <- accepts("(glass)", line) recycling[i,29] <- accepts("(paper|news)", line) recycling[i,30] <- accepts("(cardboard|box board)", line) recycling[i,31] <- accepts("oil", line) recycling[i,32] <- accepts("oil", line) recycling[i,33] <- accepts("(Automotive batteries|lead.*acid)", line) recycling[i,34] <- accepts("(paint|solvent)", line) recycling[i,35] <- accepts("(antifreeze)", line) recycling[i,36] <- accepts("(tires)", line) } # Function - whether waste type is accepted accepts <- function(pattern, line) { matches <- grepl(pattern, line[[1]], ignore.case=TRUE) if (sum(matches) != 0) { return("Yes") } else return("No") } # Mark recycling centers with all No's as NA is_useful <- c() for (i in seq_along(types)) { num_nos <- sum (recycling[i,19:36] == "No") if (num_nos==18) { is_useful[i] <- FALSE } else is_useful[i] <- TRUE } recycling[!is_useful,] <- NA raw[66:245,] <- recycling write.csv(raw, "~/Downloads/Landfills/SD_details.csv") # GET ZIP CODES & COUNTIES for (i in seq_len(size)) { response <- GET(url="https://maps.googleapis.com/maps/api/place/findplacefromtext/json", query = list ( key = "", input = paste(raw[i,"City"], "SD", sep=", "), inputtype = "textquery", fields = "place_id") ) content <- content(response) response <- GET(url="https://maps.googleapis.com/maps/api/place/details/json", query = list ( key = "", place_id = content$candidates[[1]]$place_id, fields = "address_component") ) content <- content(response) for (j in seq_along(content$result$address_components)) { level <- content$result$address_components[[j]]$types[[1]] if (level == "administrative_area_level_2") { raw[i,"County"] <- content$result$address_components[[j]]$short_name } if (level == "postal_code") { raw[i,"Zip"] <- content$result$address_components[[j]]$short_name } } } write.csv(raw, "~/Downloads/Landfills/SD_details.csv") # GET PLACE IDS place_ids <- c() for (i in seq_len(size)) { response <- GET(url="https://maps.googleapis.com/maps/api/place/findplacefromtext/json", query = list ( key = "", input = paste("+1", raw[i,"Phone.Number"]), inputtype = "phonenumber", fields = "place_id,formatted_address") ) content <- content(response) if (length(content$candidates) == 0) place_ids[i] <- "" else { place_ids[i] <- content$candidates[[1]]$place_id raw[i, "Street.Address"] <- content$candidates[[1]]$formatted_address } } length(unique(place_ids)) details <- cbind (raw, place_ids) write.csv(details, "~/Downloads/Landfills/SD_details.csv") # GET DETAILS # hours, website details <- read.csv("~/Downloads/Landfills/SD_details.csv", stringsAsFactors = FALSE)[,2:51] place_ids <- details[,50] for (i in seq_len(size)) { if (!is.na(place_ids[i])) { response <- GET(url="https://maps.googleapis.com/maps/api/place/details/json", query = list( key = "", place_id = place_ids[i], fields = "website,opening_hours,formatted_phone_number" )) content <- content(response) if ( !is.null(content$result$website) ) { details[i, "Website"] <- content$result$website } if ( !is.null(content$result$formatted_phone_number) ) { details[i, "Phone.Number"] <- content$result$formatted_phone_number } if (!is.null(content$result$opening_hours$weekday_text[[1]])) { str <- content$result$opening_hours$weekday_text[[1]] str <- gsub("Monday: ", "", str) str <- gsub(" ", "", str) str <- gsub("Closed", "No", str) details[i, 12] <- str } if (!is.null(content$result$opening_hours$weekday_text[[2]])) { str <- content$result$opening_hours$weekday_text[[2]] str <- gsub("Tuesday: ", "", str) str <- gsub(" ", "", str) str <- gsub("Closed", "No", str) details[i, 13] <- str } if (!is.null(content$result$opening_hours$weekday_text[[3]])) { str <- content$result$opening_hours$weekday_text[[3]] str <- gsub("Wednesday: ", "", str) str <- gsub(" ", "", str) str <- gsub("Closed", "No", str) details[i, 14] <- str } if (!is.null(content$result$opening_hours$weekday_text[[4]])) { str <- content$result$opening_hours$weekday_text[[4]] str <- gsub("Thursday: ", "", str) str <- gsub(" ", "", str) vstr <- gsub("Closed", "No", str) details[i, 15] <- str } if (!is.null(content$result$opening_hours$weekday_text[[5]])) { str <- content$result$opening_hours$weekday_text[[5]] str <- gsub("Friday: ", "", str) str <- gsub(" ", "", str) str <- gsub("Closed", "No", str) details[i, 16] <- str } if (!is.null(content$result$opening_hours$weekday_text[[6]])) { str <- content$result$opening_hours$weekday_text[[6]] str <- gsub("Saturday: ", "", str) str <- gsub(" ", "", str) str <- gsub("Closed", "No", str) details[i, 17] <- str } if (!is.null(content$result$opening_hours$weekday_text[[7]])) { str <- content$result$opening_hours$weekday_text[[7]] str <- gsub("Sunday: ", "", str) str <- gsub(" ", "", str) str <- gsub("Closed", "No", str) details[i, 11] <- str } } } write.csv(details, "~/Downloads/Landfills/SD_details.csv")
/states/SD.R
no_license
amhuang/waste-mgmt-research
R
false
false
6,526
r
library(httr) original <- read.csv("~/Downloads/Landfills/SD_details.csv", stringsAsFactors = FALSE) raw <- original[1:245,] size <- nrow(raw) # FORMATTING RECYCLING WASTE TYPES recycling <- raw[66:245,] types <- recycling[,18] for (i in seq_along(types)) { line <- strsplit(types[i], split=",") #recycling[i,18] <- accepts("msw", line) recycling[i,19] <- accepts("(electronics|phones|computer|TV)", line) recycling[i,20] <- accepts("(white good|whitegood|appliances|mattress)", line) recycling[i,21] <- accepts("(yard|trees)", line) recycling[i,22] <- accepts("msw", line) recycling[i,23] <- accepts("(construction|demolition)", line) recycling[i,24] <- accepts("(metal|car parts)", line) recycling[i,25] <- accepts("(non-ferrous|tin|steel)", line) recycling[i,26] <- accepts("(aluminum|ferrous|foil)", line) recycling[i,27] <- accepts("(plastic)", line) recycling[i,28] <- accepts("(glass)", line) recycling[i,29] <- accepts("(paper|news)", line) recycling[i,30] <- accepts("(cardboard|box board)", line) recycling[i,31] <- accepts("oil", line) recycling[i,32] <- accepts("oil", line) recycling[i,33] <- accepts("(Automotive batteries|lead.*acid)", line) recycling[i,34] <- accepts("(paint|solvent)", line) recycling[i,35] <- accepts("(antifreeze)", line) recycling[i,36] <- accepts("(tires)", line) } # Function - whether waste type is accepted accepts <- function(pattern, line) { matches <- grepl(pattern, line[[1]], ignore.case=TRUE) if (sum(matches) != 0) { return("Yes") } else return("No") } # Mark recycling centers with all No's as NA is_useful <- c() for (i in seq_along(types)) { num_nos <- sum (recycling[i,19:36] == "No") if (num_nos==18) { is_useful[i] <- FALSE } else is_useful[i] <- TRUE } recycling[!is_useful,] <- NA raw[66:245,] <- recycling write.csv(raw, "~/Downloads/Landfills/SD_details.csv") # GET ZIP CODES & COUNTIES for (i in seq_len(size)) { response <- GET(url="https://maps.googleapis.com/maps/api/place/findplacefromtext/json", query = list ( key = "", input = paste(raw[i,"City"], "SD", sep=", "), inputtype = "textquery", fields = "place_id") ) content <- content(response) response <- GET(url="https://maps.googleapis.com/maps/api/place/details/json", query = list ( key = "", place_id = content$candidates[[1]]$place_id, fields = "address_component") ) content <- content(response) for (j in seq_along(content$result$address_components)) { level <- content$result$address_components[[j]]$types[[1]] if (level == "administrative_area_level_2") { raw[i,"County"] <- content$result$address_components[[j]]$short_name } if (level == "postal_code") { raw[i,"Zip"] <- content$result$address_components[[j]]$short_name } } } write.csv(raw, "~/Downloads/Landfills/SD_details.csv") # GET PLACE IDS place_ids <- c() for (i in seq_len(size)) { response <- GET(url="https://maps.googleapis.com/maps/api/place/findplacefromtext/json", query = list ( key = "", input = paste("+1", raw[i,"Phone.Number"]), inputtype = "phonenumber", fields = "place_id,formatted_address") ) content <- content(response) if (length(content$candidates) == 0) place_ids[i] <- "" else { place_ids[i] <- content$candidates[[1]]$place_id raw[i, "Street.Address"] <- content$candidates[[1]]$formatted_address } } length(unique(place_ids)) details <- cbind (raw, place_ids) write.csv(details, "~/Downloads/Landfills/SD_details.csv") # GET DETAILS # hours, website details <- read.csv("~/Downloads/Landfills/SD_details.csv", stringsAsFactors = FALSE)[,2:51] place_ids <- details[,50] for (i in seq_len(size)) { if (!is.na(place_ids[i])) { response <- GET(url="https://maps.googleapis.com/maps/api/place/details/json", query = list( key = "", place_id = place_ids[i], fields = "website,opening_hours,formatted_phone_number" )) content <- content(response) if ( !is.null(content$result$website) ) { details[i, "Website"] <- content$result$website } if ( !is.null(content$result$formatted_phone_number) ) { details[i, "Phone.Number"] <- content$result$formatted_phone_number } if (!is.null(content$result$opening_hours$weekday_text[[1]])) { str <- content$result$opening_hours$weekday_text[[1]] str <- gsub("Monday: ", "", str) str <- gsub(" ", "", str) str <- gsub("Closed", "No", str) details[i, 12] <- str } if (!is.null(content$result$opening_hours$weekday_text[[2]])) { str <- content$result$opening_hours$weekday_text[[2]] str <- gsub("Tuesday: ", "", str) str <- gsub(" ", "", str) str <- gsub("Closed", "No", str) details[i, 13] <- str } if (!is.null(content$result$opening_hours$weekday_text[[3]])) { str <- content$result$opening_hours$weekday_text[[3]] str <- gsub("Wednesday: ", "", str) str <- gsub(" ", "", str) str <- gsub("Closed", "No", str) details[i, 14] <- str } if (!is.null(content$result$opening_hours$weekday_text[[4]])) { str <- content$result$opening_hours$weekday_text[[4]] str <- gsub("Thursday: ", "", str) str <- gsub(" ", "", str) vstr <- gsub("Closed", "No", str) details[i, 15] <- str } if (!is.null(content$result$opening_hours$weekday_text[[5]])) { str <- content$result$opening_hours$weekday_text[[5]] str <- gsub("Friday: ", "", str) str <- gsub(" ", "", str) str <- gsub("Closed", "No", str) details[i, 16] <- str } if (!is.null(content$result$opening_hours$weekday_text[[6]])) { str <- content$result$opening_hours$weekday_text[[6]] str <- gsub("Saturday: ", "", str) str <- gsub(" ", "", str) str <- gsub("Closed", "No", str) details[i, 17] <- str } if (!is.null(content$result$opening_hours$weekday_text[[7]])) { str <- content$result$opening_hours$weekday_text[[7]] str <- gsub("Sunday: ", "", str) str <- gsub(" ", "", str) str <- gsub("Closed", "No", str) details[i, 11] <- str } } } write.csv(details, "~/Downloads/Landfills/SD_details.csv")
#' datamodelr: data model diagrams #' #' Provides a simple structure to describe data models, #' functions to read data model from YAML file, #' and a function to create DiagrammeR graph objects: #' #' #' \itemize{ #' \item \pkg{datamodelr}'s data model object is a simple list of data frames which #' represent the model entities and include elements and their relations. #' See \code{\link{as.data_model}}. #' \item Function \code{\link{dm_read_yaml}} reads YAML format and creates a #' data model object. #' \item Function \code{\link{dm_create_graph}} creates DiagrammeR graph object from #' data model object. #' } #' #' @docType package #' @name datamodelr-package #' @aliases datamodelr NULL #' Coerce to a data model #' #' Functions to coerce an object to a data model if possible. #' #' @details Function accepts a data frame with columns info. #' Data frame must have 'table' and 'column' elements. #' Optional element 'key' (boolean) marks a column as a primary key. #' Optional element 'ref' (character string) defines referenced table name. #' Optional element 'ref_col' (character string) defines a column in a #' referenced table name primary key (only necessery when referenced #' table has a compound primary key). #' @param x Object (list or data frame) to be coerced to data model object #' @aliases as.data_model #' @return If possible it returns a data model object. #' It is a list of data frames with at least the following columns: #' \item{ column }{A name of the column in a table} #' \item{ key }{A boolean value indicating this column is a primary key or NA.} #' \item{ ref }{A character string with a referenced table name. #' Not being NA means the column is a foreign key.} #' @export as.data_model <- function(x) { UseMethod("as.data_model") } #' @keywords internal #' @export as.data_model.list <- function(x) { if(mode(x) != "list"){ stop("Not a list") } if(!all(c("columns", "references") %in% (names(x)))) { stop("Input must have columns and references") } class(x) <- c("data_model", class(x)) x } #' Check if object is a data model #' #' @param x Object to check if it is a data model #' @keywords internal #' @export is.data_model <- function(x) { inherits(x, "data_model") } #' Coerce a data frame to a data model #' #' @keywords internal #' @export as.data_model.data.frame <- function(x) { if(!inherits(x, "data.frame")) stop("Not a data.frame") if(!all(c("column", "table") %in% names(x))) { stop("Data frame must have elements named 'table' and 'column'.") } if(!is.null(x[["key"]])) { x[is.na(x[,"key"]), "key"] <- FALSE } else { x[,"key"] <- FALSE } if(is.null(x[["ref"]])) x[["ref"]] <- NA # create references from ref and keys ref_table <- dm_create_references(x) table_attrs <- attr(x, "tables") if(is.null(table_attrs)) { table_attrs <- data.frame( table = unique(x[["table"]]), segment = NA, display = NA, row.names = NULL, stringsAsFactors = FALSE ) } attr(x, "tables") <- NULL ret <- list( tables = table_attrs, columns = x, references = ref_table ) as.data_model(ret) } #' Read YAML #' #' Reads a file in YAML format and returns a data model object. #' #' @details YAML description should include table names (first level), #' columns (second level) and column attributes (third level). #' Expected (but not required) column attributes are #' \code{key} (Yes|No), #' \code{ref} (Name of referenced table), #' \code{comment} (column description). #' #' @param file A file in YAML format #' @param text A YAML formated character string #' @examples #' dm <- #' dm_read_yaml(text = " #' #' Person: #' Person ID: {key: yes} #' Name: #' E-mail: #' Street: #' Street number: #' City: #' ZIP: #' #' Order: #' Order ID: {key: yes} #' Customer: {ref: Person} #' Sales person: {ref: Person} #' Order date: #' Requested ship date: #' Status: #' #' Order Line: #' Order ID: {key: yes, ref: Order} #' Line number: {key: yes} #' Order item: {ref: Item} #' Quantity: #' Price: #' #' Item: #' Item ID: {key: yes} #' Item Name: #' Description: #' ") #' @export dm_read_yaml <- function(file = NULL, text = NULL) { if( !requireNamespace("yaml", quietly = TRUE)) { stop("yaml package needed for this function to work. Please install it.", call. = FALSE) } if(missing(text)) { if(!missing(file)) { if(!file.exists(file)) stop("File does not exist.") dm <- yaml::yaml.load_file(file) } else { stop("A file or text needed.") } } else { dm <- yaml::yaml.load(text) } if(is.null(dm)) { return(NULL) } col_table <- dm_list2coltable(dm) return(as.data_model(col_table)) } #' List to column table #' #' Convert a 3 level named list to a data frame with column info #' #' @details The funcion is used when creating data model object #' from list provided by yaml parser. #' @param x a named list #' @export #' @keywords internal dm_list2coltable <- function(x) { if(!is.list(x)) { stop("Input must be a list.") } if(is.null(names(x))) { # parsed yaml with sequences x_tables <- x[sapply(x, function(x) !is.null(x[["table"]]))] table_names <- sapply(x_tables, function(tab) tab[["table"]]) columns <- lapply(x_tables, function(tab) { tab_name <- tab[["table"]] if(!is.null(tab_name)) { cols <- tab[["columns"]] } }) names(columns) <- table_names column_names <- lapply(columns, names) column_attributes <- unique( unlist( lapply(columns, sapply, names))) } else { # Named list (parsed yaml with maps) columns <- x table_names <- names(columns) column_names <- lapply(columns, names) column_attributes <- unique( unlist( lapply(columns, sapply, names))) } table_list <- lapply(table_names, function(tab_name) { if(is.null(column_names[[tab_name]])) { column_names[[tab_name]] <- NA } tab <- data.frame( table = tab_name, column = column_names[tab_name], stringsAsFactors = FALSE ) names(tab) <- c("table", "column") for(a in column_attributes) { attr_value <- unlist( sapply(column_names[[tab_name]], function(cname) { if(is.list(columns[[tab_name]][[cname]])) value <- columns[[tab_name]][[cname]][[a]] else value <- NA ifelse(is.null(value), NA, value) }) ) tab[[a]] <- attr_value } tab }) ret <- do.call(rbind, table_list) table_attrs <- dm_get_table_attrs(x) if(!is.null(table_attrs) && is.null(table_attrs$segment)) table_attrs$segment <- NA attr(ret, "tables") <- table_attrs ret } dm_get_table_attrs <- function(x) { x_tables <- x[sapply(x, function(x) !is.null(x[["table"]]))] table_names <- sapply(x_tables, function(tab) tab[["table"]]) table_attrs <- unique(unlist(lapply(x_tables, names))) table_attrs <- table_attrs[!table_attrs %in% c("columns", "table")] names(x_tables) <- table_names table_attrs <- lapply(table_names, function(tab) { ret <- data.frame( table = tab, stringsAsFactors = FALSE ) for(aname in table_attrs) { tab_attr <- x_tables[[tab]][[aname]] if(is.null(tab_attr)) { tab_attr <- NA } ret[[aname]] <- tab_attr } ret }) do.call(rbind, table_attrs) } #' Create reference info #' #' Creates references (foreign keys) based on reference table names in #' column info table. #' #' @param col_table A data frame with table columns #' @details The function is used when creating data model object. #' \code{col_table} must have at least #' \code{table}, #' \code{column} and #' \code{ref} elements. #' When referencing to tables with compound primary keys #' additional \code{ref_col} with primary key columns must be provided. #' @export #' @keywords internal dm_create_references <- function(col_table) { if(!inherits(col_table, "data.frame")) stop("Input must be a data frame.") if(!all(c("table", "column") %in% names(col_table))) { stop("Column info table must have table, column and ref variables.") } if(!"ref" %in% names(col_table)) { return(NULL) } if(all(is.na(col_table[,"ref"]))) { return(NULL) } if(is.null(col_table[["ref_col"]])) { col_table[["ref_col"]] <- NA } ref_table <- col_table[ !is.na(col_table[["ref"]]), # take only rows with reference c("table", "column", "ref", "ref_col")] col_table[is.na(col_table$key), "key"] <- FALSE ref_col <- with(ref_table, ifelse(is.na(ref_col), sapply(ref_table$ref, function(x) col_table[col_table$table == x & col_table$key, "column"][1] ), ref_col ) ) ref_table[["ref_col"]] <- ref_col # number of columns in primary key num_col = sapply(ref_table$ref, function(x) length(col_table[col_table$table == x & col_table$key, "column"]) ) num_col[num_col == 0L] <- 1L key_col_num = { # create column index number rle1 <- rle(num_col) if(lengths(rle1)[1] > 0) { unlist(sapply(1:lengths(rle1)[1], function(i) { rep(1 : rle1$values[i], rle1$lengths[i] / rle1$values[i]) })) } else { NA } } ref_table$ref_id <- cumsum(key_col_num == 1) ref_table$ref_col_num <- key_col_num ref_table } #' Create data model object from R data frames #' #' Uses data frame column names to create a data model diagram #' #' @param ... Data frames or one list of data frames #' @export dm_from_data_frames <- function(...) { df_list <- list(...) if(length(df_list) == 1 && inherits(df_list[[1]], "list")) { df_list <- df_list[[1]] } else { if(length(names(df_list)) < length(df_list)) { names(df_list) <- as.list(match.call( expand.dots = TRUE)[-1]) } } tables <- df_list names(tables) <- make.names(names(tables)) dfdm <- do.call(rbind, lapply(names(tables), function(table_name) { t1 <- tables[[table_name]] columns <- data.frame( column = names(t1), type = sapply(t1[0,], class), stringsAsFactors = FALSE) columns$table <- table_name columns }) ) as.data_model(dfdm) } #' Add reference #' #' Adds reference to existing data model object #' #' @param dm A data model object #' @param table Table name #' @param column Column(s) name #' @param ref Referenced table name #' @param ref_col Referenced column(s) name #' @return New data model object #' @export dm_add_reference_ <- function(dm, table, column, ref = NULL, ref_col = NULL) { ref_df <- data.frame( table = table, column = column, ref = ref, ref_col = ref_col, ref_id = ifelse(is.null(dm$references), 1, max(dm$references$ref_id) + 1), ref_col_num = 1:(length(ref_col)), stringsAsFactors = FALSE ) dm$references <- rbind(dm$references, ref_df) dm$columns$ref[dm$columns$table == table & dm$columns$column == column] <- ref dm } #' Add references #' #' Adds references defined with logical expressions from data frames #' in format table1$column1 == table2$column2 #' #' @param dm Data model object #' @param ... Logical expressions in format table1$column1 == table2$column2 #' @export #' @examples #' dm_add_references( #' flights$carrier == airlines$carrier, #' weather$origin == airports$faa #' ) dm_add_references <- function(dm, ...) { ref_list <- substitute(list(...)) if(is.null(dm$columns$ref)) dm$columns$ref <- NA if(is.null(dm$columns$ref_col)) dm$columns$ref_col <- NA if(is.null(dm$columns$key)) dm$columns$key <- FALSE for(ref in as.list(ref_list[-1])) { ref <- as.list(ref) if( as.character(ref[1]) != "`==`" || length(ref) != 3 || length(ref[[2]]) != 3 || length(ref[[3]]) != 3) { stop("Define references with logical expressions: dataframe1$column1 == dataframe2$column2, ...", call. = FALSE) } toChar <- function(ref, i, j) as.character(ref[[i]][[j]]) table_name = as.character(ref[[2]][[2]]) column_name = as.character(ref[[2]][[3]]) ref_table = as.character(ref[[3]][[2]]) ref_col = as.character(ref[[3]][[3]]) dm_row <- with(dm$columns, table == table_name & column == column_name) dm$columns[dm_row, "ref"] <- ref_table dm$columns[dm_row, "ref_col"] <- ref_col dm_key_row <- dm$columns$table == ref_table & dm$columns$column == ref_col dm$columns[dm_key_row, "key"] <- TRUE } ref_table <- dm_create_references(dm$columns) dm$references <- ref_table dm } #' Set key #' #' Set column as a primary key #' #' @param dm A data model object #' @param table Table name #' @param column Column(s) name #' @export dm_set_key <- function(dm, table, column) { update_cols <- dm$columns$table == table & dm$columns$column == column if(!any(update_cols)) { stop("Column not found.") } dm$columns$key[update_cols] <- TRUE dm } #' Set column attribute #' #' Set column attribute value #' #' @param dm A data model object #' @param table Table name #' @param column Column(s) name #' @param attr Column attribute name #' @param value New value #' @export #' @keywords internal dm_set_col_attr <- function(dm, table, column, attr, value) { update_cols <- dm$columns$table == table & dm$columns$column == column if(!any(update_cols)) { stop("Column not found.") } dm$columns[update_cols, attr] <- value dm } #' Reverse engineer query #' #' Returns a string with SQL query to reverse engineer a database #' #' @param rdbms Which database ("postgres" or "sqlserver") #' @return A character string with sql query #' @export #' @examples #' \dontrun{ #' library(RPostgreSQL) #' # dvdrental sample database: http://www.postgresqltutorial.com/postgresql-sample-database #' con <- dbConnect(dbDriver("PostgreSQL"), dbname="dvdrental", user ="postgres") #' sQuery <- dm_re_query("postgres") #' dm_dvdrental <- dbGetQuery(con, sQuery) #' dbDisconnect(con) #' } dm_re_query <- function(rdbms) { sql_script <- sprintf("sql/%s.sql", rdbms) file_name <- system.file(sql_script, package ="datamodelr") if( !file.exists(file_name) ) { stop("This rdbs not supported") } sQuery <- paste(readLines(file_name), collapse = "\n") sQuery } #' Set table segment #' #' Change tables' segment name in a data model #' #' @param dm A data model object #' @param table_segments A named list of vectors with segments as element names #' and tables as values in vectors #' @export dm_set_segment <- function(dm, table_segments) { if(!is.data_model(dm)) stop("Not a data model object.") for(s in names(table_segments)) { table_names <- table_segments[[s]] dm$tables$segment[dm$tables$table %in% table_names ] <- s } dm } #' Set table display #' #' Change tables' display in a data model #' #' @param dm A data model object #' @param display A named list of vectors with display as element names #' and tables as values in vectors #' @export dm_set_display <- function(dm, display) { if(!is.data_model(dm)) stop("Not a data model object.") for(s in names(display)) { table_names <- display[[s]] dm$tables$display[dm$tables$table %in% table_names ] <- s } dm }
/R/dm.R
no_license
demel/datamodelr
R
false
false
15,680
r
#' datamodelr: data model diagrams #' #' Provides a simple structure to describe data models, #' functions to read data model from YAML file, #' and a function to create DiagrammeR graph objects: #' #' #' \itemize{ #' \item \pkg{datamodelr}'s data model object is a simple list of data frames which #' represent the model entities and include elements and their relations. #' See \code{\link{as.data_model}}. #' \item Function \code{\link{dm_read_yaml}} reads YAML format and creates a #' data model object. #' \item Function \code{\link{dm_create_graph}} creates DiagrammeR graph object from #' data model object. #' } #' #' @docType package #' @name datamodelr-package #' @aliases datamodelr NULL #' Coerce to a data model #' #' Functions to coerce an object to a data model if possible. #' #' @details Function accepts a data frame with columns info. #' Data frame must have 'table' and 'column' elements. #' Optional element 'key' (boolean) marks a column as a primary key. #' Optional element 'ref' (character string) defines referenced table name. #' Optional element 'ref_col' (character string) defines a column in a #' referenced table name primary key (only necessery when referenced #' table has a compound primary key). #' @param x Object (list or data frame) to be coerced to data model object #' @aliases as.data_model #' @return If possible it returns a data model object. #' It is a list of data frames with at least the following columns: #' \item{ column }{A name of the column in a table} #' \item{ key }{A boolean value indicating this column is a primary key or NA.} #' \item{ ref }{A character string with a referenced table name. #' Not being NA means the column is a foreign key.} #' @export as.data_model <- function(x) { UseMethod("as.data_model") } #' @keywords internal #' @export as.data_model.list <- function(x) { if(mode(x) != "list"){ stop("Not a list") } if(!all(c("columns", "references") %in% (names(x)))) { stop("Input must have columns and references") } class(x) <- c("data_model", class(x)) x } #' Check if object is a data model #' #' @param x Object to check if it is a data model #' @keywords internal #' @export is.data_model <- function(x) { inherits(x, "data_model") } #' Coerce a data frame to a data model #' #' @keywords internal #' @export as.data_model.data.frame <- function(x) { if(!inherits(x, "data.frame")) stop("Not a data.frame") if(!all(c("column", "table") %in% names(x))) { stop("Data frame must have elements named 'table' and 'column'.") } if(!is.null(x[["key"]])) { x[is.na(x[,"key"]), "key"] <- FALSE } else { x[,"key"] <- FALSE } if(is.null(x[["ref"]])) x[["ref"]] <- NA # create references from ref and keys ref_table <- dm_create_references(x) table_attrs <- attr(x, "tables") if(is.null(table_attrs)) { table_attrs <- data.frame( table = unique(x[["table"]]), segment = NA, display = NA, row.names = NULL, stringsAsFactors = FALSE ) } attr(x, "tables") <- NULL ret <- list( tables = table_attrs, columns = x, references = ref_table ) as.data_model(ret) } #' Read YAML #' #' Reads a file in YAML format and returns a data model object. #' #' @details YAML description should include table names (first level), #' columns (second level) and column attributes (third level). #' Expected (but not required) column attributes are #' \code{key} (Yes|No), #' \code{ref} (Name of referenced table), #' \code{comment} (column description). #' #' @param file A file in YAML format #' @param text A YAML formated character string #' @examples #' dm <- #' dm_read_yaml(text = " #' #' Person: #' Person ID: {key: yes} #' Name: #' E-mail: #' Street: #' Street number: #' City: #' ZIP: #' #' Order: #' Order ID: {key: yes} #' Customer: {ref: Person} #' Sales person: {ref: Person} #' Order date: #' Requested ship date: #' Status: #' #' Order Line: #' Order ID: {key: yes, ref: Order} #' Line number: {key: yes} #' Order item: {ref: Item} #' Quantity: #' Price: #' #' Item: #' Item ID: {key: yes} #' Item Name: #' Description: #' ") #' @export dm_read_yaml <- function(file = NULL, text = NULL) { if( !requireNamespace("yaml", quietly = TRUE)) { stop("yaml package needed for this function to work. Please install it.", call. = FALSE) } if(missing(text)) { if(!missing(file)) { if(!file.exists(file)) stop("File does not exist.") dm <- yaml::yaml.load_file(file) } else { stop("A file or text needed.") } } else { dm <- yaml::yaml.load(text) } if(is.null(dm)) { return(NULL) } col_table <- dm_list2coltable(dm) return(as.data_model(col_table)) } #' List to column table #' #' Convert a 3 level named list to a data frame with column info #' #' @details The funcion is used when creating data model object #' from list provided by yaml parser. #' @param x a named list #' @export #' @keywords internal dm_list2coltable <- function(x) { if(!is.list(x)) { stop("Input must be a list.") } if(is.null(names(x))) { # parsed yaml with sequences x_tables <- x[sapply(x, function(x) !is.null(x[["table"]]))] table_names <- sapply(x_tables, function(tab) tab[["table"]]) columns <- lapply(x_tables, function(tab) { tab_name <- tab[["table"]] if(!is.null(tab_name)) { cols <- tab[["columns"]] } }) names(columns) <- table_names column_names <- lapply(columns, names) column_attributes <- unique( unlist( lapply(columns, sapply, names))) } else { # Named list (parsed yaml with maps) columns <- x table_names <- names(columns) column_names <- lapply(columns, names) column_attributes <- unique( unlist( lapply(columns, sapply, names))) } table_list <- lapply(table_names, function(tab_name) { if(is.null(column_names[[tab_name]])) { column_names[[tab_name]] <- NA } tab <- data.frame( table = tab_name, column = column_names[tab_name], stringsAsFactors = FALSE ) names(tab) <- c("table", "column") for(a in column_attributes) { attr_value <- unlist( sapply(column_names[[tab_name]], function(cname) { if(is.list(columns[[tab_name]][[cname]])) value <- columns[[tab_name]][[cname]][[a]] else value <- NA ifelse(is.null(value), NA, value) }) ) tab[[a]] <- attr_value } tab }) ret <- do.call(rbind, table_list) table_attrs <- dm_get_table_attrs(x) if(!is.null(table_attrs) && is.null(table_attrs$segment)) table_attrs$segment <- NA attr(ret, "tables") <- table_attrs ret } dm_get_table_attrs <- function(x) { x_tables <- x[sapply(x, function(x) !is.null(x[["table"]]))] table_names <- sapply(x_tables, function(tab) tab[["table"]]) table_attrs <- unique(unlist(lapply(x_tables, names))) table_attrs <- table_attrs[!table_attrs %in% c("columns", "table")] names(x_tables) <- table_names table_attrs <- lapply(table_names, function(tab) { ret <- data.frame( table = tab, stringsAsFactors = FALSE ) for(aname in table_attrs) { tab_attr <- x_tables[[tab]][[aname]] if(is.null(tab_attr)) { tab_attr <- NA } ret[[aname]] <- tab_attr } ret }) do.call(rbind, table_attrs) } #' Create reference info #' #' Creates references (foreign keys) based on reference table names in #' column info table. #' #' @param col_table A data frame with table columns #' @details The function is used when creating data model object. #' \code{col_table} must have at least #' \code{table}, #' \code{column} and #' \code{ref} elements. #' When referencing to tables with compound primary keys #' additional \code{ref_col} with primary key columns must be provided. #' @export #' @keywords internal dm_create_references <- function(col_table) { if(!inherits(col_table, "data.frame")) stop("Input must be a data frame.") if(!all(c("table", "column") %in% names(col_table))) { stop("Column info table must have table, column and ref variables.") } if(!"ref" %in% names(col_table)) { return(NULL) } if(all(is.na(col_table[,"ref"]))) { return(NULL) } if(is.null(col_table[["ref_col"]])) { col_table[["ref_col"]] <- NA } ref_table <- col_table[ !is.na(col_table[["ref"]]), # take only rows with reference c("table", "column", "ref", "ref_col")] col_table[is.na(col_table$key), "key"] <- FALSE ref_col <- with(ref_table, ifelse(is.na(ref_col), sapply(ref_table$ref, function(x) col_table[col_table$table == x & col_table$key, "column"][1] ), ref_col ) ) ref_table[["ref_col"]] <- ref_col # number of columns in primary key num_col = sapply(ref_table$ref, function(x) length(col_table[col_table$table == x & col_table$key, "column"]) ) num_col[num_col == 0L] <- 1L key_col_num = { # create column index number rle1 <- rle(num_col) if(lengths(rle1)[1] > 0) { unlist(sapply(1:lengths(rle1)[1], function(i) { rep(1 : rle1$values[i], rle1$lengths[i] / rle1$values[i]) })) } else { NA } } ref_table$ref_id <- cumsum(key_col_num == 1) ref_table$ref_col_num <- key_col_num ref_table } #' Create data model object from R data frames #' #' Uses data frame column names to create a data model diagram #' #' @param ... Data frames or one list of data frames #' @export dm_from_data_frames <- function(...) { df_list <- list(...) if(length(df_list) == 1 && inherits(df_list[[1]], "list")) { df_list <- df_list[[1]] } else { if(length(names(df_list)) < length(df_list)) { names(df_list) <- as.list(match.call( expand.dots = TRUE)[-1]) } } tables <- df_list names(tables) <- make.names(names(tables)) dfdm <- do.call(rbind, lapply(names(tables), function(table_name) { t1 <- tables[[table_name]] columns <- data.frame( column = names(t1), type = sapply(t1[0,], class), stringsAsFactors = FALSE) columns$table <- table_name columns }) ) as.data_model(dfdm) } #' Add reference #' #' Adds reference to existing data model object #' #' @param dm A data model object #' @param table Table name #' @param column Column(s) name #' @param ref Referenced table name #' @param ref_col Referenced column(s) name #' @return New data model object #' @export dm_add_reference_ <- function(dm, table, column, ref = NULL, ref_col = NULL) { ref_df <- data.frame( table = table, column = column, ref = ref, ref_col = ref_col, ref_id = ifelse(is.null(dm$references), 1, max(dm$references$ref_id) + 1), ref_col_num = 1:(length(ref_col)), stringsAsFactors = FALSE ) dm$references <- rbind(dm$references, ref_df) dm$columns$ref[dm$columns$table == table & dm$columns$column == column] <- ref dm } #' Add references #' #' Adds references defined with logical expressions from data frames #' in format table1$column1 == table2$column2 #' #' @param dm Data model object #' @param ... Logical expressions in format table1$column1 == table2$column2 #' @export #' @examples #' dm_add_references( #' flights$carrier == airlines$carrier, #' weather$origin == airports$faa #' ) dm_add_references <- function(dm, ...) { ref_list <- substitute(list(...)) if(is.null(dm$columns$ref)) dm$columns$ref <- NA if(is.null(dm$columns$ref_col)) dm$columns$ref_col <- NA if(is.null(dm$columns$key)) dm$columns$key <- FALSE for(ref in as.list(ref_list[-1])) { ref <- as.list(ref) if( as.character(ref[1]) != "`==`" || length(ref) != 3 || length(ref[[2]]) != 3 || length(ref[[3]]) != 3) { stop("Define references with logical expressions: dataframe1$column1 == dataframe2$column2, ...", call. = FALSE) } toChar <- function(ref, i, j) as.character(ref[[i]][[j]]) table_name = as.character(ref[[2]][[2]]) column_name = as.character(ref[[2]][[3]]) ref_table = as.character(ref[[3]][[2]]) ref_col = as.character(ref[[3]][[3]]) dm_row <- with(dm$columns, table == table_name & column == column_name) dm$columns[dm_row, "ref"] <- ref_table dm$columns[dm_row, "ref_col"] <- ref_col dm_key_row <- dm$columns$table == ref_table & dm$columns$column == ref_col dm$columns[dm_key_row, "key"] <- TRUE } ref_table <- dm_create_references(dm$columns) dm$references <- ref_table dm } #' Set key #' #' Set column as a primary key #' #' @param dm A data model object #' @param table Table name #' @param column Column(s) name #' @export dm_set_key <- function(dm, table, column) { update_cols <- dm$columns$table == table & dm$columns$column == column if(!any(update_cols)) { stop("Column not found.") } dm$columns$key[update_cols] <- TRUE dm } #' Set column attribute #' #' Set column attribute value #' #' @param dm A data model object #' @param table Table name #' @param column Column(s) name #' @param attr Column attribute name #' @param value New value #' @export #' @keywords internal dm_set_col_attr <- function(dm, table, column, attr, value) { update_cols <- dm$columns$table == table & dm$columns$column == column if(!any(update_cols)) { stop("Column not found.") } dm$columns[update_cols, attr] <- value dm } #' Reverse engineer query #' #' Returns a string with SQL query to reverse engineer a database #' #' @param rdbms Which database ("postgres" or "sqlserver") #' @return A character string with sql query #' @export #' @examples #' \dontrun{ #' library(RPostgreSQL) #' # dvdrental sample database: http://www.postgresqltutorial.com/postgresql-sample-database #' con <- dbConnect(dbDriver("PostgreSQL"), dbname="dvdrental", user ="postgres") #' sQuery <- dm_re_query("postgres") #' dm_dvdrental <- dbGetQuery(con, sQuery) #' dbDisconnect(con) #' } dm_re_query <- function(rdbms) { sql_script <- sprintf("sql/%s.sql", rdbms) file_name <- system.file(sql_script, package ="datamodelr") if( !file.exists(file_name) ) { stop("This rdbs not supported") } sQuery <- paste(readLines(file_name), collapse = "\n") sQuery } #' Set table segment #' #' Change tables' segment name in a data model #' #' @param dm A data model object #' @param table_segments A named list of vectors with segments as element names #' and tables as values in vectors #' @export dm_set_segment <- function(dm, table_segments) { if(!is.data_model(dm)) stop("Not a data model object.") for(s in names(table_segments)) { table_names <- table_segments[[s]] dm$tables$segment[dm$tables$table %in% table_names ] <- s } dm } #' Set table display #' #' Change tables' display in a data model #' #' @param dm A data model object #' @param display A named list of vectors with display as element names #' and tables as values in vectors #' @export dm_set_display <- function(dm, display) { if(!is.data_model(dm)) stop("Not a data model object.") for(s in names(display)) { table_names <- display[[s]] dm$tables$display[dm$tables$table %in% table_names ] <- s } dm }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SDE_glm.R \name{SDE_glm} \alias{SDE_glm} \title{SDE_tmle_glm} \usage{ SDE_glm(data, forms, RCT = 0.5, transport, pooled, gstar_S = 1, truth, B = NULL, max_iter = 100) } \arguments{ \item{data, }{data.frame where confounders have any names but the following, which must be reserved as follows: A = treatment, Z = intermediate confounder, M = mediator and Y is the outcome.} \item{forms, }{list of formulas. Include for each necessary model for outcome, called Yform for outcome Y, QZform for outcome Qstar_Mg, Mform, Zform, Aform (can be NULL if RCT) is selected as TRUE.} \item{RCT}{either NULL or a value, if null, then the Aform is used to fit the propensity score, otherwise propensity scores are set to RCT.} \item{transport}{if true you are transporting to site S=0} \item{pooled}{set to TRUE if you wish to define the stochastic intervention by the mechanism for the mediator defined by pooling the regression across both sites. Otherwise the stochastic intervention will only be defined by the subset for S = gstar_S for both M and Z.} \item{gstar_S}{set to 0 or 1 depending on which site you want to use to define the stochastic intervention} } \value{ a list with a CI's for SDE and SIE for the means under (a*,a) combos (0,0), (0,1), (1,1) and the epsilons for both sequential regressions for those three parameters } \description{ computes the sequential regression, targeted maximum likelihood estimate for the stochastic direct effect or stochastic indirect effect using lasso. Note, this is a non-transport parameter. } \examples{ # uncomment these if testing the lasso functions without loading the package # library("parallel") # library("doParallel") # library("glmnet") #Set up data generating process: data(example_dgp) data = data_example # forms for non-transporting contains no S variable formsNT=list(Aform = formula("A ~ W2 + W1"), Zstarform = formula("Z ~ A + W1 + W2"), QZform = formula("Qstar_Mg ~ Z + W2 + W1"), Mstarform = formula("M ~ Z + W1 + W2"), Yform = formula("Y ~ M + Z + W1 + W2")) # forms for transporting without a pooled (over S) M or Z fit formsNP=list(Sform = formula("S ~ W1 + W2"), Aform = formula("A ~ W2 + W1 + S"), Zstarform = formula("Z ~ A + W1 + W2"), QZform = formula("Qstar_Mg ~ Z + W2 + W1+S"), Mstarform = formula("M ~ Z + W1 + W2"), Yform = formula("Y ~ M + Z + W1 + W2")) # forms for transporting with a pooled (over S) M and Z fit forms=list(Sform = formula("S ~ W1 + W2"), Aform = formula("A ~ W2 + W1 + S"), Zstarform = formula("Z ~ A + W1 + W2 + S"), QZform = formula("Qstar_Mg ~ Z + W2 + W1 + S"), Mstarform = formula("M ~ Z + W1 + W2 + S"), Yform = formula("Y ~ M + Z + W1 + W2")) W = data[,grep("W", colnames(data))] head(data) data = cbind(W, A=data$A, S=data$S, Z=data$Z, M=data$M, Y=data$Y) # define the forms for all the regressions n=1000 Wnames = c("W1", "W2") Wnamesalways = c("W1") # choose no weights (all weights are 1) data$weights = rep(1,nrow(data)) # choose randomly generated weights data$weights = runif(nrow(data)) i=1 res = data.frame(matrix(rep(NA,36),nrow = 6)) colnames(res) = c("SDE", "left", "right","SIE", "left", "right") rownames(res) = c("Trans, pooled, gstar_S=0", "Trans, pooled, gstar_S=1", "Trans, not pooled, gstar_S=0", "Trans, not pooled, gstar_S=1", "Not trans, gstar_S=0", "Not trans, gstar_S=1") for (pooled in c(FALSE, TRUE)) { for (gstar_S in c(0,1)) { if (pooled) forms = forms else forms = formsNP ans = SDE_tmle_lasso(data, forms, RCT = 0.5, Wnames = Wnames, Wnamesalways = Wnamesalways, B = NULL, transport = TRUE, pooled = pooled, gstar_S = gstar_S) res[i,]=c(ans$CI_SDE,ans$CI_SIE) i = i + 1 } } for (gstar_S in c(0,1)) { ans = SDE_tmle_lasso(data, formsNT, RCT = 0.5, Wnames = Wnames, Wnamesalways = Wnamesalways, B = NULL, transport = FALSE, pooled = pooled, gstar_S = gstar_S) res[i,]=c(ans$CI_SDE,ans$CI_SIE) i = i + 1 } res # undebug(SDE_tmle_lasso) # undebug(get_gstarM_lasso) # undebug(get.mediation.initdata_lasso) # undebug(mediation.step1_lasso) # undebug(mediation.step2_lasso) }
/man/SDE_glm.Rd
no_license
jlstiles/SDEtransportsim
R
false
true
4,439
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SDE_glm.R \name{SDE_glm} \alias{SDE_glm} \title{SDE_tmle_glm} \usage{ SDE_glm(data, forms, RCT = 0.5, transport, pooled, gstar_S = 1, truth, B = NULL, max_iter = 100) } \arguments{ \item{data, }{data.frame where confounders have any names but the following, which must be reserved as follows: A = treatment, Z = intermediate confounder, M = mediator and Y is the outcome.} \item{forms, }{list of formulas. Include for each necessary model for outcome, called Yform for outcome Y, QZform for outcome Qstar_Mg, Mform, Zform, Aform (can be NULL if RCT) is selected as TRUE.} \item{RCT}{either NULL or a value, if null, then the Aform is used to fit the propensity score, otherwise propensity scores are set to RCT.} \item{transport}{if true you are transporting to site S=0} \item{pooled}{set to TRUE if you wish to define the stochastic intervention by the mechanism for the mediator defined by pooling the regression across both sites. Otherwise the stochastic intervention will only be defined by the subset for S = gstar_S for both M and Z.} \item{gstar_S}{set to 0 or 1 depending on which site you want to use to define the stochastic intervention} } \value{ a list with a CI's for SDE and SIE for the means under (a*,a) combos (0,0), (0,1), (1,1) and the epsilons for both sequential regressions for those three parameters } \description{ computes the sequential regression, targeted maximum likelihood estimate for the stochastic direct effect or stochastic indirect effect using lasso. Note, this is a non-transport parameter. } \examples{ # uncomment these if testing the lasso functions without loading the package # library("parallel") # library("doParallel") # library("glmnet") #Set up data generating process: data(example_dgp) data = data_example # forms for non-transporting contains no S variable formsNT=list(Aform = formula("A ~ W2 + W1"), Zstarform = formula("Z ~ A + W1 + W2"), QZform = formula("Qstar_Mg ~ Z + W2 + W1"), Mstarform = formula("M ~ Z + W1 + W2"), Yform = formula("Y ~ M + Z + W1 + W2")) # forms for transporting without a pooled (over S) M or Z fit formsNP=list(Sform = formula("S ~ W1 + W2"), Aform = formula("A ~ W2 + W1 + S"), Zstarform = formula("Z ~ A + W1 + W2"), QZform = formula("Qstar_Mg ~ Z + W2 + W1+S"), Mstarform = formula("M ~ Z + W1 + W2"), Yform = formula("Y ~ M + Z + W1 + W2")) # forms for transporting with a pooled (over S) M and Z fit forms=list(Sform = formula("S ~ W1 + W2"), Aform = formula("A ~ W2 + W1 + S"), Zstarform = formula("Z ~ A + W1 + W2 + S"), QZform = formula("Qstar_Mg ~ Z + W2 + W1 + S"), Mstarform = formula("M ~ Z + W1 + W2 + S"), Yform = formula("Y ~ M + Z + W1 + W2")) W = data[,grep("W", colnames(data))] head(data) data = cbind(W, A=data$A, S=data$S, Z=data$Z, M=data$M, Y=data$Y) # define the forms for all the regressions n=1000 Wnames = c("W1", "W2") Wnamesalways = c("W1") # choose no weights (all weights are 1) data$weights = rep(1,nrow(data)) # choose randomly generated weights data$weights = runif(nrow(data)) i=1 res = data.frame(matrix(rep(NA,36),nrow = 6)) colnames(res) = c("SDE", "left", "right","SIE", "left", "right") rownames(res) = c("Trans, pooled, gstar_S=0", "Trans, pooled, gstar_S=1", "Trans, not pooled, gstar_S=0", "Trans, not pooled, gstar_S=1", "Not trans, gstar_S=0", "Not trans, gstar_S=1") for (pooled in c(FALSE, TRUE)) { for (gstar_S in c(0,1)) { if (pooled) forms = forms else forms = formsNP ans = SDE_tmle_lasso(data, forms, RCT = 0.5, Wnames = Wnames, Wnamesalways = Wnamesalways, B = NULL, transport = TRUE, pooled = pooled, gstar_S = gstar_S) res[i,]=c(ans$CI_SDE,ans$CI_SIE) i = i + 1 } } for (gstar_S in c(0,1)) { ans = SDE_tmle_lasso(data, formsNT, RCT = 0.5, Wnames = Wnames, Wnamesalways = Wnamesalways, B = NULL, transport = FALSE, pooled = pooled, gstar_S = gstar_S) res[i,]=c(ans$CI_SDE,ans$CI_SIE) i = i + 1 } res # undebug(SDE_tmle_lasso) # undebug(get_gstarM_lasso) # undebug(get.mediation.initdata_lasso) # undebug(mediation.step1_lasso) # undebug(mediation.step2_lasso) }
raw_data <- read.csv(file = "data/inflammation.csv", header = FALSE) not_centered_part <- raw_data[, 4]
/inflammation.R
no_license
bjorn-ludwigs-fair-git-r-2019/2019-04-11-FAIR-R-demo
R
false
false
126
r
raw_data <- read.csv(file = "data/inflammation.csv", header = FALSE) not_centered_part <- raw_data[, 4]