content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
# # Data Wrangling en R para Programadores SQL # Autor: Marciano Moreno, marciano_moreno@acm.org # # dplyr.R - Demostración de Dplyr install.packages("dplyr") install.packages(c("nycflights13", "Lahman")) install.packages("RSQLite") library(dplyr) library(nycflights13) library(RSQLite) #Exploración de datos ref_flights <- flights # Para referencia en el entorno global flights str(flights) head(flights) tail(flights) dim(flights) names(flights) class(ref_flights) # Identificar las clases de las que hereda este objeto # Acceso a elementos del tbl flights[1] # Primera columna flights[1,] # Primer renglón flights[,1] # Primera columna como data.frame, no como vector. is.vector(flights[,1]) flights$carrier # Elementos de la columna carrier, como vector is.vector(flights$carrier) # Selección de columnas select(flights, origin, dest, air_time) # Encadenamiento en dplyr: # dplyr provee del operador %>% operator. x %>% f(y) se vuelve f(x, y). flights %>% select(origin, dest, air_time) # Filtrado de renglones distinct(select(flights, carrier)) # Más rápido que base::unique() unique(flights$carrier) # de base flights %>% filter(carrier == "UA") flights %>% filter(carrier == "UA" & dest == "LAX") # 5,823 renglones flights %>% filter(carrier == "UA" && dest == "LAX") # 0 renglones # Usando encadenamiento podemos filtar renglones y columnas: flights %>% filter(carrier == "UA" & dest == "LAX") %>% select(origin, dest, air_time) # Sin encadenamiento - Opción 1 anidando funciones select(filter(flights, carrier == "UA" & dest == "LAX"), origin, dest, air_time) #Sin encadenamiento - Opción 2, usando variables temporales t1 <- filter(flights, carrier == "UA" & dest == "LAX") t2 <- select(t1, origin, dest, air_time) t2 # slice() permite filtrar renglones por posición flights %>% slice(5:10) # Agrupación y cálculo de agregados flights %>% group_by(origin) %>% summarise(count = n(), dist = mean(distance, na.rm = TRUE)) flights %>% group_by(origin, dest) %>% summarise(count = n(), dist = mean(distance, na.rm = TRUE)) flights_db1 <- tbl(nycflights13_sqlite(), "flights") carriers_df <- flights %>% group_by(carrier) str(carriers_df) carriers_db1 <- flights_db1 %>% group_by(carrier) str(carriers_db1) summary(carriers_df$arr_delay) carriers_df %>% summarise(delay = mean(arr_delay)) system.time(carriers_df %>% summarise(delay = mean(arr_delay))) carriers_db1 %>% summarise(delay = mean(arr_delay)) %>% collect() system.time(carriers_df %>% summarise(delay = mean(arr_delay))) browseVignettes(package = c("dplyr", "tidyr"))
/dplyr.R
no_license
marcianomoreno/datawrangling
R
false
false
2,566
r
# # Data Wrangling en R para Programadores SQL # Autor: Marciano Moreno, marciano_moreno@acm.org # # dplyr.R - Demostración de Dplyr install.packages("dplyr") install.packages(c("nycflights13", "Lahman")) install.packages("RSQLite") library(dplyr) library(nycflights13) library(RSQLite) #Exploración de datos ref_flights <- flights # Para referencia en el entorno global flights str(flights) head(flights) tail(flights) dim(flights) names(flights) class(ref_flights) # Identificar las clases de las que hereda este objeto # Acceso a elementos del tbl flights[1] # Primera columna flights[1,] # Primer renglón flights[,1] # Primera columna como data.frame, no como vector. is.vector(flights[,1]) flights$carrier # Elementos de la columna carrier, como vector is.vector(flights$carrier) # Selección de columnas select(flights, origin, dest, air_time) # Encadenamiento en dplyr: # dplyr provee del operador %>% operator. x %>% f(y) se vuelve f(x, y). flights %>% select(origin, dest, air_time) # Filtrado de renglones distinct(select(flights, carrier)) # Más rápido que base::unique() unique(flights$carrier) # de base flights %>% filter(carrier == "UA") flights %>% filter(carrier == "UA" & dest == "LAX") # 5,823 renglones flights %>% filter(carrier == "UA" && dest == "LAX") # 0 renglones # Usando encadenamiento podemos filtar renglones y columnas: flights %>% filter(carrier == "UA" & dest == "LAX") %>% select(origin, dest, air_time) # Sin encadenamiento - Opción 1 anidando funciones select(filter(flights, carrier == "UA" & dest == "LAX"), origin, dest, air_time) #Sin encadenamiento - Opción 2, usando variables temporales t1 <- filter(flights, carrier == "UA" & dest == "LAX") t2 <- select(t1, origin, dest, air_time) t2 # slice() permite filtrar renglones por posición flights %>% slice(5:10) # Agrupación y cálculo de agregados flights %>% group_by(origin) %>% summarise(count = n(), dist = mean(distance, na.rm = TRUE)) flights %>% group_by(origin, dest) %>% summarise(count = n(), dist = mean(distance, na.rm = TRUE)) flights_db1 <- tbl(nycflights13_sqlite(), "flights") carriers_df <- flights %>% group_by(carrier) str(carriers_df) carriers_db1 <- flights_db1 %>% group_by(carrier) str(carriers_db1) summary(carriers_df$arr_delay) carriers_df %>% summarise(delay = mean(arr_delay)) system.time(carriers_df %>% summarise(delay = mean(arr_delay))) carriers_db1 %>% summarise(delay = mean(arr_delay)) %>% collect() system.time(carriers_df %>% summarise(delay = mean(arr_delay))) browseVignettes(package = c("dplyr", "tidyr"))
# function for simulating data for model M_0 with t capture occassions # paramters is a list of parameters need to generate the data described below # output is the desired data to output, can return W, Wp C or S # 3/15/2016 source("Simulate_X.R") source("X_to_C.R") source("Simulate_S.R") sim.data.M0<-function(parameters,output){ t=parameters$t p=parameters$p N=parameters$N lambda=parameters$lambda alpha.match=parameters$alpha.match beta.match=parameters$beta.match alpha.non.match=parameters$alpha.non.match beta.non.match=parameters$beta.non.match #Simulate W, where W is the observed capture history matrix #First simulate W with both observed and unobserved individuals. W is simulated under model M0 W<-matrix(rbinom(n=(N*t),size=1,prob=p),nrow=N,ncol=t) remove<-apply(W,1,sum)==0 #Removed unobserved individuals W<-W[!remove,] if(output=='W'){return(W)} #Returns observed capture history matrix #Simulate the number of photos at each occasion using a poisson # We allow for the possibility that an animal was captured but no photo was taken. #Independent poissons are generated for each possible capture #Then multiplied by W, those animals animals not captured are multipled by 0 and #those captured are multiplied by 1 N.obs<-length(W[,1]) #Number of observed individuals Y<-matrix(rpois(n=(N.obs*t),lambda=lambda),nrow=N.obs,ncol=t)*W #Remove individuals that were not photographed Y<-Y[rowSums(Y)>0,] #Returns matrix with number of photos per ind per capture occasion if(output=='Y'){return(Y)} #Simulate X, where X is the array of photos for each capture history. #X is simulated as a list of list #Simulate photo.occasion, which gives the capture occasion for each photo N.photo<-sum(Y) N.obs.photographed<-nrow(Y) Sim.X<-Simulate.X(Y,N.photo,N.obs.photographed) X<-Sim.X$X photo.occasion<-Sim.X$photo.occasion #Returns X if(output=='X'){return(X)} #Computes matrix based off of X matrix C<-X_to_C(X,N.photo,N.obs.photographed) #Returns C if(output=='c'){return(c)} #Simulate the score values S<-Sim.S(C,alpha.match,beta.match,alpha.non.match,beta.non.match,N.photo) if(output=='S'){return(S)} if(output=='ALL'){return(list('S'=S,'photo.occasion'=photo.occasion,'W'=W,'Y'=Y,'X'=X,'C'=C))} }
/Code/Sim_Data.R
no_license
AmandaEllis/Sampler_Project
R
false
false
2,412
r
# function for simulating data for model M_0 with t capture occassions # paramters is a list of parameters need to generate the data described below # output is the desired data to output, can return W, Wp C or S # 3/15/2016 source("Simulate_X.R") source("X_to_C.R") source("Simulate_S.R") sim.data.M0<-function(parameters,output){ t=parameters$t p=parameters$p N=parameters$N lambda=parameters$lambda alpha.match=parameters$alpha.match beta.match=parameters$beta.match alpha.non.match=parameters$alpha.non.match beta.non.match=parameters$beta.non.match #Simulate W, where W is the observed capture history matrix #First simulate W with both observed and unobserved individuals. W is simulated under model M0 W<-matrix(rbinom(n=(N*t),size=1,prob=p),nrow=N,ncol=t) remove<-apply(W,1,sum)==0 #Removed unobserved individuals W<-W[!remove,] if(output=='W'){return(W)} #Returns observed capture history matrix #Simulate the number of photos at each occasion using a poisson # We allow for the possibility that an animal was captured but no photo was taken. #Independent poissons are generated for each possible capture #Then multiplied by W, those animals animals not captured are multipled by 0 and #those captured are multiplied by 1 N.obs<-length(W[,1]) #Number of observed individuals Y<-matrix(rpois(n=(N.obs*t),lambda=lambda),nrow=N.obs,ncol=t)*W #Remove individuals that were not photographed Y<-Y[rowSums(Y)>0,] #Returns matrix with number of photos per ind per capture occasion if(output=='Y'){return(Y)} #Simulate X, where X is the array of photos for each capture history. #X is simulated as a list of list #Simulate photo.occasion, which gives the capture occasion for each photo N.photo<-sum(Y) N.obs.photographed<-nrow(Y) Sim.X<-Simulate.X(Y,N.photo,N.obs.photographed) X<-Sim.X$X photo.occasion<-Sim.X$photo.occasion #Returns X if(output=='X'){return(X)} #Computes matrix based off of X matrix C<-X_to_C(X,N.photo,N.obs.photographed) #Returns C if(output=='c'){return(c)} #Simulate the score values S<-Sim.S(C,alpha.match,beta.match,alpha.non.match,beta.non.match,N.photo) if(output=='S'){return(S)} if(output=='ALL'){return(list('S'=S,'photo.occasion'=photo.occasion,'W'=W,'Y'=Y,'X'=X,'C'=C))} }
# This is the user-interface definition of a Shiny web application. # You can find out more about building applications with Shiny here: # # http://shiny.rstudio.com # library(shiny) library(shinydashboard) library(ggplot2) library(dplyr) library(DT) library(data.table) library(naniar) library(visdat) library(skimr) library(readr) library(NHANES) library(janitor) source("R/helper.R") #data_dictionary <- read.csv("data/shhs-data-dictionary-0.13.1-variables.csv") #myDataFrame <- data.table(read_rds("data/common_data_small.rds")) #load("data/bmi_diabetes.rda") #myDataFrame <- data.table(bmi_diabetes) data("NHANES") ##specify outcome variable here outcome_var <- c("PhysActive") ## specify covariates here (including outcome variable) covariates <- c("Gender", "Age", "SurveyYr", "Race1", "Race3" ,"MaritalStatus", "BMI", "HHIncome", "Education", "Poverty", "Work", "HomeOwn", "BPSysAve", "BPDiaAve", "Testosterone", "DirectChol", "HealthGen","CompHrsDay", "BMI_WHO", "TotChol", "Depressed", "LittleInterest", "Pulse", "Diabetes", "DiabetesAge", "PhysActive","PhysActiveDays","PhysActiveDaysAtLeast3", "SleepHrsNight", "SleepTrouble", "SleepHrsNightCat","TVHrsDay", "AlcoholDay", "SmokeNow","Smoke100","Marijuana", "RegularMarij","HardDrugs") NHANES <- NHANES %>% mutate( PhysActiveDaysAtLeast3=factor(1*(PhysActiveDays>=3),levels=c(0,1),labels=c("No","Yes")), SleepHrsNightCat=case_when(SleepHrsNight<6 ~ "<6", dplyr::between(SleepHrsNight,6,9) ~ "6-9", SleepHrsNight>9 ~ ">9", TRUE ~ as.character(NA))) myDataFrame <- data.table(NHANES)[,covariates,with=FALSE] remove_categories <- outcome_var categoricalVars <- sort(names(get_category_variables(myDataFrame))) cat_no_outcome <- setdiff(categoricalVars, remove_categories) remove_numeric <- c("nsrrid") numericVars <- sort(get_numeric_variables(myDataFrame)) numericVars <- setdiff(numericVars, remove_numeric) theme_set(theme_classic(base_size = 15)) data_dictionary <- readr::read_csv("data/data_dictionary.csv") %>% filter(VariableName %in% covariates) data_dictionary <- data_dictionary %>% add_row(VariableName = "PhysActiveDaysAtLeast3", Definition = "PhysActiveDays>=3 ~ Yes, PhysActiveDays < 3 ~ No") %>% add_row(VariableName = "SleepHrsNightCat", Definition = "SleepHrsNight categorized into <6hrs, [6-9]hrs, >9hrs") %>% arrange(VariableName) data_dictionary <- data_dictionary %>% add_row(VariableName = "PhysActiveDaysAtLeast3", Definition = "PhysActiveDays>=3 ~ Yes, PhysActiveDays < 3 ~ No") %>% add_row(VariableName = "SleepHrsNightCat", Definition = "SleepHrsNight categorized into <6hrs, [6-9]hrs, >9hrs") %>% arrange(VariableName) ##Don't modify anything below here. ui <- dashboardPage( header=dashboardHeader( title = "NHANES" ), sidebar=dashboardSidebar( sidebarMenu( menuItem("Overview", tabName = "overview", selected=TRUE), menuItem("Categorical", tabName = "categorical"), menuItem("Continuous", tabName = "continuous")) ), body= dashboardBody( tabItems( tabItem("overview", tabBox(width = 12, tabPanel("Visual Summary of Data", tags$head(tags$style("#TxtOut {white-space: nowrap;}")), fluidRow(column(12, offset=0, plotOutput("visdat"))) ), tabPanel("Tabular Summary of Data", tags$head(tags$style("#TxtOut {white-space: nowrap;}")), fluidRow(column(12, offset=0, verbatimTextOutput("summaryTable") ) )),#, # tabPanel("Missing Clusters", tags$head(tags$style("#TxtOut {white-space: nowrap;}")), # fluidRow(column(12, offset=0, plotOutput("missing_clusters")) # ) # # ) tabPanel("Data Dictionary", tags$head(tags$style("#TxtOut {white-space: nowrap;}")), fluidRow(dataTableOutput("data_dictionary")) ) ) ), tabItem("categorical", tabBox(width=12, tabPanel("Single Category", selectInput(inputId = "singleVar", "Select Categorical Variable", choices = categoricalVars, selected = categoricalVars[1]), plotOutput("singleTab") ), tabPanel("Category/Outcome", selectInput(inputId = "condTab1", "Select X-axis Variable to Calculate Proportions", choices=categoricalVars, selected=cat_no_outcome[1]), selectInput(inputId = "condTab2", "Select Fill/Outcome Variable", choices=outcome_var, selected=outcome_var), plotOutput("proportionBarplot") ), tabPanel("Crosstab Explorer", selectInput(inputId = "crossTab1", "Select Crosstab Variable (x)", choices=categoricalVars, selected=categoricalVars[1]), selectInput(inputId = "crossTab2", "Select Crosstab Variable (y)", choices=outcome_var, selected=outcome_var), verbatimTextOutput("crossTab") ), tabPanel("Missing Data Explorer", selectInput(inputId = "missingVar", "Select Variable to Examine", choices=categoricalVars, selected = categoricalVars[1]), plotOutput("missingTab") ) )), tabItem("continuous", tabBox(width=12, tabPanel("Histogram Explorer", fluidRow(column(width = 4, selectInput(inputId = "numericVarHist", "Select Numeric Variable", choices = numericVars, selected=numericVars[1])), column(width=4, sliderInput("bins", "Number of bins:", min = 1, max = 50,value = 30))), plotOutput("distPlot") ), tabPanel("Boxplot Explorer", fluidRow( column(width = 4, selectInput(inputId = "numericVarBox", "Select Numeric Variable", choices = numericVars, selected=numericVars[1])), column(width=4,selectInput(inputId = "catVarBox", "Select Category to Condition on", choices = categoricalVars, selected=categoricalVars[1])), column(width=4, selectInput(inputId = "facet_var_boxplot", "Select Facet Variable", choices=c("NONE",categoricalVars), selected = "NONE")) ), plotOutput("boxPlot") ), tabPanel("Correlation Explorer", fluidRow( column(width=4, selectInput("x_var", "Select X Variable", choices=numericVars, selected = numericVars[1])), column(width=4, selectInput("y_var", "Select Y Variable", choices=numericVars, selected = numericVars[2])), column(width=4, selectInput("facet_var", "Select Facet Variable", choices=c("NONE",categoricalVars), selected = "NONE")) ), fluidRow(plotOutput("corr_plot")) )) )) ) ) server <- function(input, output, session) { dataOut <- reactive({ #req(input$cohort) myDataFrame #%>% filter_(cohortList[[input$cohort]]) }) output$singleTab <- renderPlot({ dataOut()[,c(input$singleVar), with=FALSE] %>% mutate(gr = 1) %>% ggplot(aes_string(x=input$singleVar, fill=input$singleVar)) + geom_bar(aes(y = ..count..), color="black") + geom_text(aes(group=gr, label = scales::percent(..prop..), y= ..count.. ), stat= "count", vjust=-0.5) + theme(axis.text.x=element_text(angle=90)) }) # output$missing_clusters <- renderPlot({ # visdat::vis_miss(data.frame(dataOut()), cluster = TRUE) + # theme(axis.text.x = element_text(size = 15, angle = 90)) # }) output$visdat <- renderPlot({ visdat::vis_dat(data.frame(dataOut())) + theme(axis.text.x = element_text(size = 15, angle = 45)) }) output$summaryTable <- renderPrint({ skimr::skim(dataOut()) }) output$missingTab <- renderPlot({ var <- sym(input$missingVar) dataOut() %>% data.frame() %>% gg_miss_fct(fct = !!var) + theme(axis.text = element_text(size = 15)) }) output$crossTab <- renderPrint({ tab <- dataOut() %>% tabyl(!!sym(input$crossTab1), !!sym(input$crossTab2)) %>% adorn_totals() %>% adorn_percentages() %>% adorn_pct_formatting() %>% adorn_ns() #out <- dataOut()[,c(input$crossTab1, input$crossTab2), with=FALSE] #tab <- table(out, useNA = "ifany") tab }) observe({ condTab1_selected <- input$condTab1 condTab2_selected <- input$condTab2 updateSelectInput(session, "condTab2", choices = setdiff(categoricalVars,condTab1_selected), selected = condTab2_selected) }) observe({ crossTab1_selected <- input$crossTab1 crossTab2_selected <- input$crossTab2 updateSelectInput(session, "crossTab2", choices = setdiff(categoricalVars,crossTab1_selected), selected = crossTab2_selected) }) proportionTable <- reactive({ out <- dataOut()[,c(input$condTab1, input$condTab2), with=FALSE] out }) output$proportionTab <- renderPrint({ tab <- table(proportionTable(), useNA="ifany") return(tab[,"Yes"]/(tab[,"No"] + tab[,"Yes"])) }) output$proportionBarplot <- renderPlot({ print(input$condTab1) print(input$condTab2) percent_table <- proportionTable() %>% data.frame() %>% group_by(!!sym(input$condTab1)) %>% count(!!sym(input$condTab2)) %>% mutate(ratio=scales::percent(n/sum(n))) proportionTable() %>% ggplot(aes_string(x=input$condTab1, fill=input$condTab2)) + geom_bar(position="fill", color="black") + theme(text=element_text(size=20), axis.text.x = element_text(angle = 90)) + geom_text(data = percent_table, mapping = aes(y=n, label=ratio), position=position_fill(vjust=0.5)) # group= !!sym(input$condTab) }) output$distPlot <- renderPlot({ outPlot <- ggplot(dataOut(), aes_string(x=input$numericVarHist)) + geom_histogram(bins=input$bins) + theme(text=element_text(size=20), axis.text.x = element_text(angle=90)) outPlot }) output$boxPlot <- renderPlot({ outPlot <- ggplot(dataOut(), aes_string(x=input$catVarBox, y=input$numericVarBox, fill=input$catVarBox)) + geom_boxplot() + theme(text=element_text(size=20), axis.text.x = element_text(angle=90)) if(input$facet_var_boxplot=="NONE") { outPlot }else{ outPlot+facet_wrap(input$facet_var_boxplot) } }) output$data_dictionary <- renderDataTable( datatable(data_dictionary, options=list(pageLength=20)) ) output$corr_plot <- renderPlot({ mini_frame <- dataOut() %>% data.frame() %>% select(!!sym(input$x_var), !!sym(input$y_var)) %>% tidyr::drop_na() xcol <- mini_frame %>% pull(!!sym(input$x_var)) ycol <- mini_frame %>% pull(!!sym(input$y_var)) corval <- signif(cor(xcol, ycol), digits = 3) p <- ggplot(dataOut(), aes_string(x=input$x_var, y=input$y_var)) + geom_miss_point() + stat_smooth(method=lm, se=FALSE) + ggtitle(paste(input$x_var, "vs.", input$y_var, "correlation =", corval)) if(input$facet_var=="NONE") { p }else{ p+facet_wrap(input$facet_var) } }) } shinyApp(ui = ui, server = server)
/app.R
no_license
BioData-Club/nhanes_explore
R
false
false
12,708
r
# This is the user-interface definition of a Shiny web application. # You can find out more about building applications with Shiny here: # # http://shiny.rstudio.com # library(shiny) library(shinydashboard) library(ggplot2) library(dplyr) library(DT) library(data.table) library(naniar) library(visdat) library(skimr) library(readr) library(NHANES) library(janitor) source("R/helper.R") #data_dictionary <- read.csv("data/shhs-data-dictionary-0.13.1-variables.csv") #myDataFrame <- data.table(read_rds("data/common_data_small.rds")) #load("data/bmi_diabetes.rda") #myDataFrame <- data.table(bmi_diabetes) data("NHANES") ##specify outcome variable here outcome_var <- c("PhysActive") ## specify covariates here (including outcome variable) covariates <- c("Gender", "Age", "SurveyYr", "Race1", "Race3" ,"MaritalStatus", "BMI", "HHIncome", "Education", "Poverty", "Work", "HomeOwn", "BPSysAve", "BPDiaAve", "Testosterone", "DirectChol", "HealthGen","CompHrsDay", "BMI_WHO", "TotChol", "Depressed", "LittleInterest", "Pulse", "Diabetes", "DiabetesAge", "PhysActive","PhysActiveDays","PhysActiveDaysAtLeast3", "SleepHrsNight", "SleepTrouble", "SleepHrsNightCat","TVHrsDay", "AlcoholDay", "SmokeNow","Smoke100","Marijuana", "RegularMarij","HardDrugs") NHANES <- NHANES %>% mutate( PhysActiveDaysAtLeast3=factor(1*(PhysActiveDays>=3),levels=c(0,1),labels=c("No","Yes")), SleepHrsNightCat=case_when(SleepHrsNight<6 ~ "<6", dplyr::between(SleepHrsNight,6,9) ~ "6-9", SleepHrsNight>9 ~ ">9", TRUE ~ as.character(NA))) myDataFrame <- data.table(NHANES)[,covariates,with=FALSE] remove_categories <- outcome_var categoricalVars <- sort(names(get_category_variables(myDataFrame))) cat_no_outcome <- setdiff(categoricalVars, remove_categories) remove_numeric <- c("nsrrid") numericVars <- sort(get_numeric_variables(myDataFrame)) numericVars <- setdiff(numericVars, remove_numeric) theme_set(theme_classic(base_size = 15)) data_dictionary <- readr::read_csv("data/data_dictionary.csv") %>% filter(VariableName %in% covariates) data_dictionary <- data_dictionary %>% add_row(VariableName = "PhysActiveDaysAtLeast3", Definition = "PhysActiveDays>=3 ~ Yes, PhysActiveDays < 3 ~ No") %>% add_row(VariableName = "SleepHrsNightCat", Definition = "SleepHrsNight categorized into <6hrs, [6-9]hrs, >9hrs") %>% arrange(VariableName) data_dictionary <- data_dictionary %>% add_row(VariableName = "PhysActiveDaysAtLeast3", Definition = "PhysActiveDays>=3 ~ Yes, PhysActiveDays < 3 ~ No") %>% add_row(VariableName = "SleepHrsNightCat", Definition = "SleepHrsNight categorized into <6hrs, [6-9]hrs, >9hrs") %>% arrange(VariableName) ##Don't modify anything below here. ui <- dashboardPage( header=dashboardHeader( title = "NHANES" ), sidebar=dashboardSidebar( sidebarMenu( menuItem("Overview", tabName = "overview", selected=TRUE), menuItem("Categorical", tabName = "categorical"), menuItem("Continuous", tabName = "continuous")) ), body= dashboardBody( tabItems( tabItem("overview", tabBox(width = 12, tabPanel("Visual Summary of Data", tags$head(tags$style("#TxtOut {white-space: nowrap;}")), fluidRow(column(12, offset=0, plotOutput("visdat"))) ), tabPanel("Tabular Summary of Data", tags$head(tags$style("#TxtOut {white-space: nowrap;}")), fluidRow(column(12, offset=0, verbatimTextOutput("summaryTable") ) )),#, # tabPanel("Missing Clusters", tags$head(tags$style("#TxtOut {white-space: nowrap;}")), # fluidRow(column(12, offset=0, plotOutput("missing_clusters")) # ) # # ) tabPanel("Data Dictionary", tags$head(tags$style("#TxtOut {white-space: nowrap;}")), fluidRow(dataTableOutput("data_dictionary")) ) ) ), tabItem("categorical", tabBox(width=12, tabPanel("Single Category", selectInput(inputId = "singleVar", "Select Categorical Variable", choices = categoricalVars, selected = categoricalVars[1]), plotOutput("singleTab") ), tabPanel("Category/Outcome", selectInput(inputId = "condTab1", "Select X-axis Variable to Calculate Proportions", choices=categoricalVars, selected=cat_no_outcome[1]), selectInput(inputId = "condTab2", "Select Fill/Outcome Variable", choices=outcome_var, selected=outcome_var), plotOutput("proportionBarplot") ), tabPanel("Crosstab Explorer", selectInput(inputId = "crossTab1", "Select Crosstab Variable (x)", choices=categoricalVars, selected=categoricalVars[1]), selectInput(inputId = "crossTab2", "Select Crosstab Variable (y)", choices=outcome_var, selected=outcome_var), verbatimTextOutput("crossTab") ), tabPanel("Missing Data Explorer", selectInput(inputId = "missingVar", "Select Variable to Examine", choices=categoricalVars, selected = categoricalVars[1]), plotOutput("missingTab") ) )), tabItem("continuous", tabBox(width=12, tabPanel("Histogram Explorer", fluidRow(column(width = 4, selectInput(inputId = "numericVarHist", "Select Numeric Variable", choices = numericVars, selected=numericVars[1])), column(width=4, sliderInput("bins", "Number of bins:", min = 1, max = 50,value = 30))), plotOutput("distPlot") ), tabPanel("Boxplot Explorer", fluidRow( column(width = 4, selectInput(inputId = "numericVarBox", "Select Numeric Variable", choices = numericVars, selected=numericVars[1])), column(width=4,selectInput(inputId = "catVarBox", "Select Category to Condition on", choices = categoricalVars, selected=categoricalVars[1])), column(width=4, selectInput(inputId = "facet_var_boxplot", "Select Facet Variable", choices=c("NONE",categoricalVars), selected = "NONE")) ), plotOutput("boxPlot") ), tabPanel("Correlation Explorer", fluidRow( column(width=4, selectInput("x_var", "Select X Variable", choices=numericVars, selected = numericVars[1])), column(width=4, selectInput("y_var", "Select Y Variable", choices=numericVars, selected = numericVars[2])), column(width=4, selectInput("facet_var", "Select Facet Variable", choices=c("NONE",categoricalVars), selected = "NONE")) ), fluidRow(plotOutput("corr_plot")) )) )) ) ) server <- function(input, output, session) { dataOut <- reactive({ #req(input$cohort) myDataFrame #%>% filter_(cohortList[[input$cohort]]) }) output$singleTab <- renderPlot({ dataOut()[,c(input$singleVar), with=FALSE] %>% mutate(gr = 1) %>% ggplot(aes_string(x=input$singleVar, fill=input$singleVar)) + geom_bar(aes(y = ..count..), color="black") + geom_text(aes(group=gr, label = scales::percent(..prop..), y= ..count.. ), stat= "count", vjust=-0.5) + theme(axis.text.x=element_text(angle=90)) }) # output$missing_clusters <- renderPlot({ # visdat::vis_miss(data.frame(dataOut()), cluster = TRUE) + # theme(axis.text.x = element_text(size = 15, angle = 90)) # }) output$visdat <- renderPlot({ visdat::vis_dat(data.frame(dataOut())) + theme(axis.text.x = element_text(size = 15, angle = 45)) }) output$summaryTable <- renderPrint({ skimr::skim(dataOut()) }) output$missingTab <- renderPlot({ var <- sym(input$missingVar) dataOut() %>% data.frame() %>% gg_miss_fct(fct = !!var) + theme(axis.text = element_text(size = 15)) }) output$crossTab <- renderPrint({ tab <- dataOut() %>% tabyl(!!sym(input$crossTab1), !!sym(input$crossTab2)) %>% adorn_totals() %>% adorn_percentages() %>% adorn_pct_formatting() %>% adorn_ns() #out <- dataOut()[,c(input$crossTab1, input$crossTab2), with=FALSE] #tab <- table(out, useNA = "ifany") tab }) observe({ condTab1_selected <- input$condTab1 condTab2_selected <- input$condTab2 updateSelectInput(session, "condTab2", choices = setdiff(categoricalVars,condTab1_selected), selected = condTab2_selected) }) observe({ crossTab1_selected <- input$crossTab1 crossTab2_selected <- input$crossTab2 updateSelectInput(session, "crossTab2", choices = setdiff(categoricalVars,crossTab1_selected), selected = crossTab2_selected) }) proportionTable <- reactive({ out <- dataOut()[,c(input$condTab1, input$condTab2), with=FALSE] out }) output$proportionTab <- renderPrint({ tab <- table(proportionTable(), useNA="ifany") return(tab[,"Yes"]/(tab[,"No"] + tab[,"Yes"])) }) output$proportionBarplot <- renderPlot({ print(input$condTab1) print(input$condTab2) percent_table <- proportionTable() %>% data.frame() %>% group_by(!!sym(input$condTab1)) %>% count(!!sym(input$condTab2)) %>% mutate(ratio=scales::percent(n/sum(n))) proportionTable() %>% ggplot(aes_string(x=input$condTab1, fill=input$condTab2)) + geom_bar(position="fill", color="black") + theme(text=element_text(size=20), axis.text.x = element_text(angle = 90)) + geom_text(data = percent_table, mapping = aes(y=n, label=ratio), position=position_fill(vjust=0.5)) # group= !!sym(input$condTab) }) output$distPlot <- renderPlot({ outPlot <- ggplot(dataOut(), aes_string(x=input$numericVarHist)) + geom_histogram(bins=input$bins) + theme(text=element_text(size=20), axis.text.x = element_text(angle=90)) outPlot }) output$boxPlot <- renderPlot({ outPlot <- ggplot(dataOut(), aes_string(x=input$catVarBox, y=input$numericVarBox, fill=input$catVarBox)) + geom_boxplot() + theme(text=element_text(size=20), axis.text.x = element_text(angle=90)) if(input$facet_var_boxplot=="NONE") { outPlot }else{ outPlot+facet_wrap(input$facet_var_boxplot) } }) output$data_dictionary <- renderDataTable( datatable(data_dictionary, options=list(pageLength=20)) ) output$corr_plot <- renderPlot({ mini_frame <- dataOut() %>% data.frame() %>% select(!!sym(input$x_var), !!sym(input$y_var)) %>% tidyr::drop_na() xcol <- mini_frame %>% pull(!!sym(input$x_var)) ycol <- mini_frame %>% pull(!!sym(input$y_var)) corval <- signif(cor(xcol, ycol), digits = 3) p <- ggplot(dataOut(), aes_string(x=input$x_var, y=input$y_var)) + geom_miss_point() + stat_smooth(method=lm, se=FALSE) + ggtitle(paste(input$x_var, "vs.", input$y_var, "correlation =", corval)) if(input$facet_var=="NONE") { p }else{ p+facet_wrap(input$facet_var) } }) } shinyApp(ui = ui, server = server)
#' Read a Minitab Portable Worksheet with an .mtp3 file extension. #' #' This function will load the specified Minitab Portable Worksheet into #' memory. #' #' @param data.file The name of the data file to be read. #' @param x The path to the data set to be loaded. #' @param variable.name The name to be assigned to in the global environment. #' @param ... Further arguments. #' #' @return No value is returned; this function is called for its side effects. #' #' @export #' #' @examples #' \dontrun{reader.mtp('example.mtp', 'data/example.mtp', 'example')} reader.dataformat.mtp <- function(x, data.file, variable.name, ...) { require.package('foreign') assign(variable.name, read.mtp(x), envir = .TargetEnv) }
/R/reader.mtp.R
no_license
arturochian/LoadMyData
R
false
false
736
r
#' Read a Minitab Portable Worksheet with an .mtp3 file extension. #' #' This function will load the specified Minitab Portable Worksheet into #' memory. #' #' @param data.file The name of the data file to be read. #' @param x The path to the data set to be loaded. #' @param variable.name The name to be assigned to in the global environment. #' @param ... Further arguments. #' #' @return No value is returned; this function is called for its side effects. #' #' @export #' #' @examples #' \dontrun{reader.mtp('example.mtp', 'data/example.mtp', 'example')} reader.dataformat.mtp <- function(x, data.file, variable.name, ...) { require.package('foreign') assign(variable.name, read.mtp(x), envir = .TargetEnv) }
### Started 8 December 2015 ## ### By Ailene Ettinger ### setwd("~/Documents/GitHub/radcliffe") rm(list=ls()) options(stringsAsFactors=FALSE) ##load packages library(reshape) library(tidyr) # make list to store all the derived dataset cleaning functions clean.raw <- list() clean.raw$marchin <- function(filename="Budburst_Marchin.csv", path="./Data/Experiments/marchin") { ## Marchin ## ## Data type: BBD,FFD ## ## Notes: Contact: Renee Marchin, renee.marchin@sydney.edu.au## file <- file.path(path, filename) marchin1 <- read.csv(file, check.names=FALSE, header=TRUE) names(marchin1)[2] <- "genusspecies" names(marchin1)[1] <- "year" names(marchin1)[3] <- "plot" names(marchin1)[8] <- "doy" marchin1a<- subset(marchin1, select=c("year","genusspecies","plot", "doy")) marchin1a$site <- "exp10" marchin1a$event <- "bbd" marchin2<-read.csv("Data/Experiments/marchin/Flower_Marchin.csv", header=T) names(marchin2)[2] <- "genusspecies" names(marchin2)[1] <- "year" names(marchin2)[3] <- "plot" names(marchin2)[7] <- "doy" marchin2a<- subset(marchin2, select=c("year","genusspecies","plot", "doy")) marchin2a$site <- "exp10" marchin2a$event <- "ffd" marchin3<-rbind(marchin1a, marchin2a) marchin3$genus<-NA marchin3$species<-NA marchin3$genus[marchin3$genusspecies=="ACRU"] <- "Acer" marchin3$species[marchin3$genusspecies=="ACRU"] <- "rubrum" marchin3$genus[marchin3$genusspecies=="CATO"] <- "Carya" marchin3$species[marchin3$genusspecies=="CATO"] <- "tomentosa" marchin3$genus[marchin3$genusspecies=="QUAL"] <- "Quercus" marchin3$species[marchin3$genusspecies=="QUAL"] <- "alba" marchin3$genus[marchin3$genusspecies=="VAPA"] <- "Vaccinium" marchin3$species[marchin3$genusspecies=="VAPA"] <- "pallidum" marchin3$genus[marchin3$genusspecies=="VAST"] <- "Vaccinium" marchin3$species[marchin3$genusspecies=="VAST"] <- "stamineum" marchin3$genus[marchin3$genusspecies=="QURU"] <- "Quercus" marchin3$species[marchin3$genusspecies=="QURU"] <- "rubra" marchin3$genus[marchin3$genusspecies=="CHMA"] <- "Chimaphila" marchin3$species[marchin3$genusspecies=="CHMA"] <- "maculata" marchin3$genus[marchin3$genusspecies=="HEAR"] <- "Hexastylis" marchin3$species[marchin3$genusspecies=="HEAR"] <- "arifolia" marchin3$genus[marchin3$genusspecies=="HIVE"] <- "Hieracium" marchin3$species[marchin3$genusspecies=="HIVE"] <- "venosum" marchin3$genus[marchin3$genusspecies=="THTH"] <- "Thalictrum" marchin3$species[marchin3$genusspecies=="THTH"] <- "thalictroides" marchin3$genus[marchin3$genusspecies=="TIDI"] <- "Tipularia" marchin3$species[marchin3$genusspecies=="TIDI"] <- "discolor" marchin3$block<-NA marchin<-subset(marchin3, select=c("site","block","plot","event","year","genus","species", "doy")) #marchin$variety <- NA #marchin$cult <- NA return(marchin) } clean.raw$bace <- function(filename="BACE_deciduous2010_originaltrees.csv", path="./Data/Experiments/bace") { ##BACE ## ## Data type: BBD,LOD,LUD ## ## Notes: Jeff Dukes = contact, years 2010,2011,2013 ##Decided to follow NPN's definitios: >3 of observations of each event needed to count file <- file.path(path, filename) bace1 <- read.csv(file, check.names=FALSE, header=TRUE,na.strings = ".") bace1<-bace1[-1,] names(bace1)[5] <- "genusspecies" names(bace1)[1] <- "plot" names(bace1)[7] <- "doy_bb" names(bace1)[9] <- "doy_lunf" names(bace1)[10] <- "doy_lo" bace1a<- subset(bace1, select=c("genusspecies","plot", "doy_bb")) bace1a$event <- "bbd" bace1a$year <- 2010 bace1a$site <- "exp01" names(bace1a)[3]<-"doy" bace1b<- subset(bace1, select=c("genusspecies","plot", "doy_lo")) bace1b$event <- "lod" bace1b$year <- 2010 bace1b$site <- "exp01" names(bace1b)[3]<-"doy" bace1c<- subset(bace1, select=c("genusspecies","plot", "doy_lunf")) bace1c$event <- "lud" bace1c$year <- 2010 bace1c$site <- "exp01" names(bace1c)[3]<-"doy" file2 <- file.path(path, "BACE_pinustrobus2010_originaltrees.csv") bace2 <- read.csv(file2, check.names=FALSE, header=TRUE,na.strings = ".") names(bace2)[5] <- "genusspecies" names(bace2)[1] <- "plot" names(bace2)[6] <- "doy_bbd" names(bace2)[8] <- "doy_fgn" names(bace2)[10] <- "doy_fnb" names(bace2)[12] <- "doy_fyn" bace2a<-subset(bace2, select=c("genusspecies","plot", "doy_bbd")) bace2a$event <- "bbd" bace2a$year <- 2010 bace2a$site <- "exp01" names(bace2a)[3]<-"doy" bace2b<- subset(bace2, select=c("genusspecies","plot", "doy_fgn")) bace2b$event <- "fgn" bace2b$year <- 2010 bace2b$site <- "exp01" names(bace2b)[3]<-"doy" bace2c<- subset(bace2, select=c("genusspecies","plot", "doy_fnb")) bace2c$event <- "fnb" bace2c$year <- 2010 bace2c$site <- "exp01" names(bace2c)[3]<-"doy" bace2c<- subset(bace2, select=c("genusspecies","plot", "doy_fnb")) bace2c$event <- "fnb" bace2c$year <- 2010 bace2c$site <- "exp01" names(bace2c)[3]<-"doy" bace2d<- subset(bace2, select=c("genusspecies","plot", "doy_fyn")) bace2d$event <- "fyn" bace2d$year <- 2010 bace2d$site <- "exp01" names(bace2d)[3]<-"doy" #2011 file3 <- file.path(path, "2011BACEherbaceousphenologydata11_11CEG.csv") bace3 <- read.csv(file3, skip=1, header=TRUE) bace3<-bace3[1:min(which(bace3$Plot=="")),] names(bace3)[2] <- "genusspecies" names(bace3)[1] <- "plot" bace3$doy_ffd<-bace3[,6] bace3[which(bace3$doy_ffd=="" & bace3$open.flowers>0),]$doy_ffd<-bace3[which(bace3$doy_ffd=="" & bace3$open.flowers>0),]$open.flowers bace3a<- subset(bace3, select=c("genusspecies","plot", "doy_ffd")) bace3a$event <- "ffd" bace3a$year <- 2011 bace3a$site <- "exp01" names(bace3a)[3]<-"doy" #2012 file4 <- file.path(path, "2012_BACESpringTreePhenology_04_04_2013_CEG_EW.csv") bace4 <- read.csv(file4, skip=1, header=TRUE,na.strings=".") bace4<-bace4[1:468,]#remove blank rows at bottom names(bace4)[5] <- "genusspecies" names(bace4)[1] <- "plot" names(bace4)[6] <- "doy_bbd"#use first bbd (not >3) names(bace4)[8] <- "doy_lud"#leaf unfolding date names(bace4)[10] <- "doy_lod"#leaf out date bace4a<- subset(bace4, select=c("genusspecies","plot", "doy_bbd")) bace4a$event <- "bbd" bace4a$year <- 2012 bace4a$site <- "exp01" names(bace4a)[3]<-"doy" bace4b<- subset(bace4, select=c("genusspecies","plot", "doy_lud")) bace4b$event <- "lud" bace4b$year <- 2012 bace4b$site <- "exp01" names(bace4b)[3]<-"doy" bace4c<- subset(bace4, select=c("genusspecies","plot", "doy_lod")) bace4c$event <- "lod" bace4c$year <- 2012 bace4c$site <- "exp01" names(bace4c)[3]<-"doy" file5 <- file.path(path, "2012_BACESpringTreePhenology_04_04_2013_CEG_NS.csv") bace5 <- read.csv(file5, skip=1, header=TRUE,na.strings=".") bace5<-bace5[1:624,]#remove blank rows ar bottom, if there are any names(bace5)[5] <- "genusspecies" names(bace5)[1] <- "plot" names(bace5)[6] <- "doy_bbd"#use first bbd (not >3) names(bace5)[8] <- "doy_lud"#leaf unfolding date names(bace5)[10] <- "doy_lod"#leaf out date bace5a<- subset(bace5, select=c("genusspecies","plot", "doy_bbd")) bace5a$event <- "bbd" bace5a$year <- 2012 bace5a$site <- "exp01" names(bace5a)[3]<-"doy" bace5b<- subset(bace5, select=c("genusspecies","plot", "doy_lud")) bace5b$event <- "lud" bace5b$year <- 2012 bace5b$site <- "exp01" names(bace5b)[3]<-"doy" bace5c<- subset(bace5, select=c("genusspecies","plot", "doy_lod")) bace5c$event <- "lod" bace5c$year <- 2012 bace5c$site <- "exp01" names(bace5c)[3]<-"doy" #file6 <- file.path(path, "2012_BACESpringTreePhenology_04_04_2013_CEG_Outer.csv")#leave these out since they were just outside the main treatment area, were caged, and did not get all treatments #bace6 <- read.csv(file6, skip=1, header=TRUE,na.strings=".") #bace6<-bace6[1:576,]#remove blank rows at bottom #names(bace6)[4] <- "genusspecies" #names(bace6)[1] <- "plot" #names(bace6)[5] <- "doy_bbd"#use first bbd (not >3) #names(bace6)[7] <- "doy_lud"#leaf unfolding date #names(bace6)[9] <- "doy_lod"#leaf out date #bace6a<- subset(bace6, select=c("genusspecies","plot", "doy_bbd")) #bace6a$event <- "bbd" #bace6a$year <- 2012 #bace6a$site <- "exp01" #names(bace6a)[3]<-"doy" #bace6b<- subset(bace6, select=c("genusspecies","plot", "doy_lud")) #bace6b$event <- "lud" #bace6b$year <- 2012 #bace6b$site <- "exp01" #names(bace6b)[3]<-"doy" #bace6c<- subset(bace6, select=c("genusspecies","plot", "doy_lod")) #bace6c$event <- "lod" #bace6c$year <- 2012 #bace6c$site <- "exp01" #names(bace6c)[3]<-"doy" file7 <- file.path(path, "2012_BACESpringTreePhenology_04_04_2013_CEG_pinus.csv") bace7 <- read.csv(file7, skip=1, header=TRUE,na.strings=".") bace7<-bace7[1:156,]#remove blank rows at bottom names(bace7)[5] <- "genusspecies" names(bace7)[1] <- "plot" names(bace7)[6] <- "doy_bbd"#first bud bolted names(bace7)[8] <- "doy_fgn"#first green needles names(bace7)[10] <- "doy_fnb"#first needle bundles names(bace7)[12] <- "doy_fyn"#first young needles bace7a<- subset(bace7, select=c("genusspecies","plot", "doy_bbd")) bace7a$event <- "bbd" bace7a$year <- 2012 bace7a$site <- "exp01" names(bace7a)[3]<-"doy" bace7b<- subset(bace7, select=c("genusspecies","plot", "doy_fgn")) bace7b$event <- "fgn" bace7b$year <- 2012 bace7b$site <- "exp01" names(bace7b)[3]<-"doy" bace7c<- subset(bace7, select=c("genusspecies","plot", "doy_fnb")) bace7c$event <- "fnb" bace7c$year <- 2012 bace7c$site <- "exp01" names(bace7c)[3]<-"doy" bace7d<- subset(bace7, select=c("genusspecies","plot", "doy_fyn")) bace7d$event <- "fyn" bace7d$year <- 2012 bace7d$site <- "exp01" names(bace7d)[3]<-"doy" #2009 file8 <- file.path(path, "2009BACEdeciduoustreespringphenology.csv") bace8 <- read.csv(file8, header=TRUE) names(bace8)[9] <- "genusspecies" names(bace8)[4] <- "plot" names(bace8)[17] <- "doy_bbd"#use first bbd (not >3) names(bace8)[19] <- "doy_lud"#leaf unfolding date names(bace8)[21] <- "doy_lod"#leaf out date bace8a<- subset(bace8, select=c("genusspecies","plot", "doy_bbd")) bace8a$event <- "bbd" bace8a$year <- 2012 bace8a$site <- "exp01" names(bace8a)[3]<-"doy" bace8b<- subset(bace8, select=c("genusspecies","plot", "doy_lud")) bace8b$event <- "lud" bace8b$year <- 2012 bace8b$site <- "exp01" names(bace8b)[3]<-"doy" bace8c<- subset(bace8, select=c("genusspecies","plot", "doy_lod")) bace8c$event <- "lod" bace8c$year <- 2012 bace8c$site <- "exp01" names(bace8c)[3]<-"doy" #2013 file9 <- file.path(path, "2013BACEherbaceousphenologydatasheet.csv") bace9 <- read.csv(file9, skip=1, header=TRUE) bace9<-bace9[1:min(which(bace9$Plot=="")),] names(bace9)[2] <- "genusspecies" names(bace9)[1] <- "plot" bace9$doy_ffd<-bace9[,6] bace9[which(bace9$doy_ffd=="" & bace9$open.flowers>0),]$doy_ffd<-bace9[which(bace9$doy_ffd=="" & bace9$open.flowers>0),]$open.flowers bace9a<- subset(bace9, select=c("genusspecies","plot", "doy_ffd")) bace9a$event <- "ffd" bace9a$year <- 2013 bace9a$site <- "exp01" names(bace9a)[3]<-"doy" #put them all together baceall<-rbind(bace1a,bace1b,bace1c,bace2a,bace2b,bace2c,bace2d,bace3a,bace4a,bace4b,bace4c,bace5a,bace5b,bace5c,bace7a,bace7b,bace7c,bace7d,bace8a,bace8b,bace8c,bace9a) baceall<-baceall[-which(baceall$genusspecies==""),] baceall<-baceall[-which(baceall$genusspecies=="Genus sp."),] baceall<-baceall[-which(baceall$genusspecies=="moss"),] baceall<-baceall[-which(baceall$genusspecies=="Oregano"),] baceall[baceall$genusspecies=="Giant fox tail",]$genusspecies<-"Setaria faberi" baceall[baceall$genusspecies=="Setarir glauca",]$genusspecies<-"Setaria glauca" baceall[baceall$genusspecies=="Setaria viridens",]$genusspecies<-"Setaria viridis" baceall[baceall$genusspecies=="conyza canadensis",]$genusspecies<-"Conyza canadensis" baceall[baceall$genusspecies=="linnaria vulgaris"|baceall$genusspecies=="Linneria vulgaris",]$genusspecies<-"Linaria vulgaris" baceall[baceall$genusspecies=="A. rubrum "|baceall$genusspecies=="A. rubrum "|baceall$genusspecies=="A. rubrum"|baceall$genusspecies=="A. rubrum (main stem)",]$genusspecies <- "Acer rubrum" baceall[baceall$genusspecies=="B. lenta "|baceall$genusspecies=="B. lenta "|baceall$genusspecies=="B. lenta"|baceall$genusspecies=="B. lenta (main stem)",]$genusspecies <- "Betula lenta" baceall[baceall$genusspecies=="Q. rubra "|baceall$genusspecies=="Q. rubra"|baceall$genusspecies=="Q. rubra ",]$genusspecies <- "Quercus rubra" baceall[baceall$genusspecies=="P. strobus"|baceall$genusspecies=="P. strobus ",]$genusspecies <- "Pinus strobus" baceall[baceall$genusspecies=="B. popul" ,]$genusspecies<- "Betula populifolia" baceall[baceall$genusspecies=="U. americana" ,]$genusspecies<- "Ulmus americana" baceall[baceall$genusspecies=="P. grand" ,]$genusspecies<- "Populus grandidentata" baceall[baceall$genusspecies=="P. serotina" ,]$genusspecies<- "Prunus serotina" baceall[baceall$genusspecies=="Polygonum" ,]$genusspecies<- "Polygonum sp." baceall[baceall$genusspecies=="Asclepias syriaca ",]$genusspecies<-"Asclepias syriaca" baceall[baceall$genusspecies=="Capsella bursa-pastoris ",]$genusspecies<-"Capsella bursa-pastoris" baceall[baceall$genusspecies=="Cerastium fontanum ",]$genusspecies<-"Cerastium fontanum" baceall[baceall$genusspecies=="Dactylis glomerata ",]$genusspecies<-"Dactylis glomerata" baceall[baceall$genusspecies=="Draba verna ",]$genusspecies<-"Draba verna" baceall[baceall$genusspecies=="Elymus repens ",]$genusspecies<-"Elymus repens" baceall[baceall$genusspecies=="Erigeron annuus ",]$genusspecies<-"Erigeron annuus" baceall[baceall$genusspecies=="Festuca spp. ",]$genusspecies<-"Festuca sp." baceall[baceall$genusspecies=="lepidium virginicum",]$genusspecies<-"Lepidium virginicum" baceall[baceall$genusspecies=="Lamium amplexicaula",]$genusspecies<-"Lamium amplexicaule" baceall[baceall$genusspecies=="Oxalis stricta ",]$genusspecies<-"Oxalis stricta" baceall[baceall$genusspecies=="Rumex crispus ",]$genusspecies<-"Rumex crispus" baceall[baceall$genusspecies=="Silene alba ",]$genusspecies<-"Silene alba" baceall[baceall$genusspecies=="Tanacetum vulgare ",]$genusspecies<-"Tanacetum vulgare" baceall[baceall$genusspecies=="Taraxacum officinale ",]$genusspecies<-"Taraxacum officinale" baceall[baceall$genusspecies=="Trifolium pratense ",]$genusspecies<-"Trifolium pratense" baceall[baceall$genusspecies=="Trifolium repens ",]$genusspecies<-"Trifolium repens" baceall[baceall$genusspecies=="Veronica arvensis ",]$genusspecies<-"Veronica arvensis" baceall[baceall$genusspecies=="Phleum pratense " ,]$genusspecies<-"Phleum pratense" baceall[baceall$genusspecies=="Plantago lanceolata ",]$genusspecies<-"Plantago lanceolata" baceall[baceall$genusspecies=="Potentilla argentea ",]$genusspecies<-"Potentilla argentea" baceALL<-baceall %>% separate(genusspecies, c("genus", "species"), sep=" ", remove=F) baceALL$block<-NA #baceALL[baceALL$plot=="C1"|baceALL$plot=="C2"|baceALL$plot=="C3",]$block<-NA baceALL[which(as.numeric(baceALL$plot)<13),]$block<-1 baceALL[which(as.numeric(baceALL$plot)<25 & as.numeric(baceALL$plot)>12),]$block<-2 baceALL[which(as.numeric(baceALL$plot)<37 & as.numeric(baceALL$plot)>24),]$block<-3 baceALL[which(as.numeric(baceALL$plot)>36),]$block<-0# for some reason there are a few plots less than 36 that show up as block= 0. fix this bace<-subset(baceALL, select=c("site","block","plot","event","year","genus","species", "doy")) #bace$variety <- NA #bace$cult <- NA bace<-bace[!is.na(bace$doy),] bace<-bace[-which(bace$doy==""),] bace<-bace[-which(substr(bace$doy,1,1)=="<"),] bace<-bace[-which(bace$plot=="C1"|bace$plot=="C2"|bace$plot=="C3"|bace$plot=="40"|bace$plot=="41"|bace$plot=="42"),]#outside treatment area return(bace) } ##Farnsworth from Harvard ## ## Data type: BBD,LOD,LUD,FFD ## ## Notes: Contact: Public data, http://harvardforest.fas.harvard.edu:8080/exist/apps/datasets/showData.html?id=hf033 ## ##Question: hf033-01-diameter-1.csv files says plot 17= intact control (Treat=1) but soil temp file says plot17=disturbance control (=d) clean.raw$farnsworth <- function(filename="hf033-01-diameter-1.csv", path="./Data/Experiments/farnsworth/",names.only=FALSE) { file <- file.path(path, filename) farnsworth1 <- read.csv(file, check.names=FALSE, header=TRUE) #phenological stage 1.5=budburst; need to get day of year for which this occurred colnames(farnsworth1)[3]<-"plot" farnsworth1$bb_doy<-NA for(i in 1:dim(farnsworth1)[1]){ inddat<-farnsworth1[i,20:31] names(inddat)[1:12]<-c("1993-04-16","1993-04-23","1993-05-2","1993-05-17","1993-05-24" ,"1993-06-07","1993-07-09","1993-07-23","1993-08-11","1993-09-09","1993-09-25","1993-10-23") bbdate<-names(inddat)[min(which(inddat==1.5))]#1.5="leaves just emerging" bbdoy<-strftime(bbdate, format = "%j") farnsworth1$bb_doy[i]<-bbdoy } #now phenological stage 2=leaves expanding; need to get day of year for which this occurred farnsworth1$leafunf_doy<-NA for(i in 1:dim(farnsworth1)[1]){ inddat<-farnsworth1[i,20:31] names(inddat)[1:12]<-c("1993-04-16","1993-04-23","1993-05-2","1993-05-17","1993-05-24" ,"1993-06-07","1993-07-09","1993-07-23","1993-08-11","1993-09-09","1993-09-25","1993-10-23") ludate<-names(inddat)[min(which(inddat==2))] ludoy<-strftime(ludate, format = "%j") farnsworth1$leafunf_doy[i]<-ludoy } #now phenological stage 3=leaves fully expanded=leafout; need to get day of year for which this occurred farnsworth1$leafout_doy<-NA for(i in 1:dim(farnsworth1)[1]){ inddat<-farnsworth1[i,20:31] names(inddat)[1:12]<-c("1993-04-16","1993-04-23","1993-05-2","1993-05-17","1993-05-24" ,"1993-06-07","1993-07-09","1993-07-23","1993-08-11","1993-09-09","1993-09-25","1993-10-23") lodate<-names(inddat)[min(which(inddat==3))] lodoy<-strftime(lodate, format = "%j") farnsworth1$leafout_doy[i]<-lodoy } #now flowering phenological stage (4.5)=mature leaves and flowers present;first flowering=day of year for which this first occurred farnsworth1$ffd<-NA for(i in 1:dim(farnsworth1)[1]){ inddat<-farnsworth1[i,20:31] names(inddat)[1:12]<-c("1993-04-16","1993-04-23","1993-05-2","1993-05-17","1993-05-24" ,"1993-06-07","1993-07-09","1993-07-23","1993-08-11","1993-09-09","1993-09-25","1993-10-23") ffdate<-names(inddat)[min(which(inddat==4.5))] ffdoy<-strftime(ffdate, format = "%j") farnsworth1$ffd[i]<-ffdoy } #now fruiting phenological stage (5)=mature leaves and fruits present=first fruiting is first date this was observed farnsworth1$ffrd<-NA for(i in 1:dim(farnsworth1)[1]){ inddat<-farnsworth1[i,20:31] names(inddat)[1:12]<-c("1993-04-16","1993-04-23","1993-05-2","1993-05-17","1993-05-24" ,"1993-06-07","1993-07-09","1993-07-23","1993-08-11","1993-09-09","1993-09-25","1993-10-23") ffrdate<-names(inddat)[min(which(inddat==5))] ffrdoy<-strftime(ffrdate, format = "%j") farnsworth1$ffrd[i]<-ffrdoy } #now leaf coloration=leaves turned color" first date this was observed farnsworth1$col<-NA for(i in 1:dim(farnsworth1)[1]){ inddat<-farnsworth1[i,20:31] names(inddat)[1:12]<-c("1993-04-16","1993-04-23","1993-05-2","1993-05-17","1993-05-24" ,"1993-06-07","1993-07-09","1993-07-23","1993-08-11","1993-09-09","1993-09-25","1993-10-23") coldate<-names(inddat)[min(which(inddat==7))] coldoy<-strftime(coldate, format = "%j") farnsworth1$col[i]<-coldoy } #now leaf drop="some or all leaves abscised" (8,9)-first date this was observed farnsworth1$drop<-NA for(i in 1:dim(farnsworth1)[1]){ inddat<-farnsworth1[i,20:31] names(inddat)[1:12]<-c("1993-04-16","1993-04-23","1993-05-2","1993-05-17","1993-05-24" ,"1993-06-07","1993-07-09","1993-07-23","1993-08-11","1993-09-09","1993-09-25","1993-10-23") dropdate<-names(inddat)[min(which(inddat>7))] dropdoy<-strftime(dropdate, format = "%j") farnsworth1$drop[i]<-dropdoy } farnsworth1$genus<-NA farnsworth1$species1<-NA farnsworth1$genus[farnsworth1$species=="aaga"] <- "Amelanchier" farnsworth1$species1[farnsworth1$species=="aaga"] <- "grandifolia" farnsworth1$genus[farnsworth1$species=="beech"] <- "Fagus" farnsworth1$species1[farnsworth1$species=="beech"] <- "grandifolia" farnsworth1$genus[farnsworth1$species=="bbhg"] <- "Vaccinium" farnsworth1$species1[farnsworth1$species=="bbhg"] <- "corymbosum" farnsworth1$genus[farnsworth1$species=="bbhch"] <- "Vaccinium" farnsworth1$species1[farnsworth1$species=="bbhch"] <- "vacillans" farnsworth1$genus[farnsworth1$species=="blach"] <- "Prunus" farnsworth1$species1[farnsworth1$species=="blach"] <- "serotina" farnsworth1$genus[farnsworth1$species=="crata"] <- "Acer" farnsworth1$species1[farnsworth1$species=="crata"] <- "rubrum" farnsworth1$genus[farnsworth1$species=="ro"] <- "Quercus" farnsworth1$species1[farnsworth1$species=="ro"] <- "rubra" farnsworth1$genus[farnsworth1$species=="sa"] <- "Sorbus" farnsworth1$species1[farnsworth1$species=="sa"] <- "americana" farnsworth1$genus[farnsworth1$species=="wo"] <- "Quercus" farnsworth1$species1[farnsworth1$species=="wo"] <- "alba" farnsworth1$genus[farnsworth1$species=="viac1"] <- "Viburnum" farnsworth1$species1[farnsworth1$species=="viac1"] <- "acerifolium" farnsworth1$genus[farnsworth1$species=="sm"] <- "Acer" farnsworth1$species1[farnsworth1$species=="sm"] <- "pensylvanicum" farnsworth1$genus[farnsworth1$species=="chest"] <- "Castanea" farnsworth1$species1[farnsworth1$species=="chest"] <- "dentata" farnsworth1$genus[farnsworth1$species=="vest"] <- "Viburnum" farnsworth1$species1[farnsworth1$species=="vest"] <- "lentago" farnsworth1$genus[farnsworth1$species=="rm"] <- "Acer" farnsworth1$species1[farnsworth1$species=="rm"] <- "rubrum" farnsworth1$site<-"exp08" #farnsworth1$variety <- NA #farnsworth1$cult <- NA farnsworth1$event <- NA farnsworth1$year <- 1993 #pull out all budburst rows farnsworth2<-farnsworth1[which(farnsworth1$bb_doy>0),] farnsworth2a<-subset(farnsworth2, select=c("site","plot","event","year","genus","species1","bb_doy")) colnames(farnsworth2a)[6]<-"species" colnames(farnsworth2a)[7]<-"doy" farnsworth2a$event <- "bbd" #pull out all leafunf rows farnsworth3<-farnsworth1[which(farnsworth1$leafunf_doy>0),] farnsworth3a<-subset(farnsworth3, select=c("site","plot","event","year","genus","species1","leafunf_doy")) colnames(farnsworth3a)[6]<-"species" colnames(farnsworth3a)[7]<-"doy" farnsworth3a$event <- "lud" #pull out all leafout rows farnsworth4<-farnsworth1[which(farnsworth1$leafout_doy>0),] farnsworth4a<-subset(farnsworth4, select=c("site","plot","event","year","genus","species1","leafout_doy")) colnames(farnsworth4a)[6]<-"species" colnames(farnsworth4a)[7]<-"doy" farnsworth4a$event <- "lod" #pull out all flowering rows farnsworth5<-farnsworth1[which(farnsworth1$ffd>0),] farnsworth5a<-subset(farnsworth5, select=c("site","plot","event","year","genus","species1","ffd")) colnames(farnsworth5a)[6]<-"species" colnames(farnsworth5a)[7]<-"doy" farnsworth5a$event <- "ffd" #pull out all fruiting rows farnsworth6<-farnsworth1[which(farnsworth1$ffrd>0),] farnsworth6a<-subset(farnsworth6, select=c("site","plot","event","year","genus","species1","ffrd")) colnames(farnsworth6a)[6]<-"species" colnames(farnsworth6a)[7]<-"doy" farnsworth6a$event <- "ffrd" #pull out all coloration rows farnsworth7<-farnsworth1[which(farnsworth1$col>0),] farnsworth7a<-subset(farnsworth7, select=c("site","plot","event","year","genus","species1","col")) colnames(farnsworth7a)[6]<-"species" colnames(farnsworth7a)[7]<-"doy" farnsworth7a$event <- "col" #pull out all drop rows farnsworth8<-farnsworth1[which(farnsworth1$drop>0),] farnsworth8a<-subset(farnsworth7, select=c("site","plot","event","year","genus","species1","drop")) colnames(farnsworth8a)[6]<-"species" colnames(farnsworth8a)[7]<-"doy" farnsworth8a$event <- "drop" alldat<- rbind(farnsworth2a,farnsworth3a,farnsworth3a,farnsworth4a,farnsworth5a,farnsworth6a,farnsworth7a,farnsworth8a) alldat$block<-NA alldat[alldat$plot<4,]$block<-1 alldat[alldat$plot==4|alldat$plot==5|alldat$plot==6,]$block=2 alldat[alldat$plot==7|alldat$plot==8|alldat$plot==9,]$block=3 alldat[alldat$plot==10|alldat$plot==11|alldat$plot==12,]$block=4 alldat[alldat$plot==13|alldat$plot==14|alldat$plot==15,]$block=5 alldat[alldat$plot==16|alldat$plot==17|alldat$plot==18,]$block=6 farnsworth<-subset(alldat, select=c("site","block","plot","event","year","genus","species","doy")) return(farnsworth) } ###Cleland et al Jasper Ridge data ###FFD clean.raw$cleland <- function(filename="JasperRidge_data.csv", path="./Data/Experiments/cleland") { file <- file.path(path, filename) cleland1 <- read.csv(file, check.names=FALSE, header=TRUE) cleland1<-cleland1[cleland1$CO2==1,]#remove plots with CO2 added cleland1<-cleland1[cleland1$Nutrient==1,]#remove plots with N added colnames(cleland1)[8]<-"genus" cleland1$species<-NA cleland1$species[cleland1$genus=="Crepis"] <- "vessicaria" cleland1$species[cleland1$genus=="Erodium"] <- "brachycarpum" cleland1$species[cleland1$genus=="Geranium"] <- "dissectum" cleland1$species[cleland1$genus=="Lolium"] <- "multiflorum" cleland1$species[cleland1$genus=="Vicia"] <- "sativa" cleland1$species[cleland1$genus=="Vulpia"] <- "myuros" cleland1$species[cleland1$genus=="Bromusd"] <- "diandrus" cleland1$species[cleland1$genus=="Bromush"] <- "hordeaceus" cleland1$genus[cleland1$genus=="Bromusd"] <- "Bromus" cleland1$genus[cleland1$genus=="Bromush"] <- "Bromus" colnames(cleland1)[10]<-"doy" cleland1$site<-"exp05" cleland1$event<-"ffd" cleland1$block<-NA cleland1[cleland1$plot==1|cleland1$plot==2|cleland1$plot==3|cleland1$plot==33|cleland1$plot==4,]$block<-1 cleland1[cleland1$plot==12|cleland1$plot==7|cleland1$plot==8|cleland1$plot==9,]$block<-2 cleland1[cleland1$plot==10|cleland1$plot==11|cleland1$plot==5|cleland1$plot==6,]$block<-3 cleland1[cleland1$plot==13|cleland1$plot==14|cleland1$plot==26|cleland1$plot==32|cleland1$plot==34,]$block<-4 cleland1[cleland1$plot==15|cleland1$plot==16|cleland1$plot==17|cleland1$plot==18,]$block<-5 cleland1[cleland1$plot==19|cleland1$plot==20|cleland1$plot==21|cleland1$plot==31|cleland1$plot==35,]$block<-6 cleland1[cleland1$plot==27|cleland1$plot==28|cleland1$plot==29|cleland1$plot==30,]$block<-7 cleland1[cleland1$plot==22|cleland1$plot==23|cleland1$plot==24|cleland1$plot==25|cleland1$plot==36,]$block<-8 colnames(cleland1)[5]<-"plot2" cleland1$plot<-paste(cleland1$plot2,cleland1$quad,sep="-") cleland<-subset(cleland1, select=c("site","block","plot","event","year","genus","species", "doy")) #cleland$variety <- NA #cleland$cult <- NA cleland<-cleland[!is.na(cleland$doy),] return(cleland) } ##Clark et al from Duke ## ## Data type: BBD,LUD, LOD ## ## Notes: Contact: Public data ## clean.raw$clarkduke <- function(filename, path="./Data/Experiments/clark/") { clarkdukeplots<-c("DF_G01_A.csv","DF_G02_5.csv","DF_G03_3.csv","DF_G04_A.csv","DF_G05_3.csv","DF_G06_5.csv","DF_G07_A.csv","DF_G08_5.csv","DF_G09_3.csv","DF_G10_C.csv","DF_G11_C.csv","DF_G12_C.csv","DF_S01_5.csv","DF_S02_3.csv","DF_S03_A.csv","DF_S04_A.csv","DF_S05_3.csv","DF_S06_5.csv","DF_S07_5.csv","DF_S08_A.csv","DF_S09_3.csv","DF_S10_C.csv","DF_S11_C.csv","DF_S12_C.csv") clarkduke <- NA spfile <- file.path(path, "speciesList_clark.csv") specieslist<-read.csv(spfile, header=TRUE) for (i in 1:length(clarkdukeplots)){ file <- file.path(path, paste(clarkdukeplots[i])) clarkduke1 <- read.csv(file, check.names=FALSE, header=TRUE) clarkduke1$genus<-NA clarkduke1$species<-NA species1<-unique(clarkduke1$Species) for (j in 1:length(species1)){ clarkduke1$genus[clarkduke1$Species==species1[j]] <- specieslist[specieslist$shortCode==species1[j],]$genus clarkduke1$species[clarkduke1$Species==species1[j]] <- specieslist[specieslist$shortCode==species1[j],]$species } clarkduke1$site<-"exp03" clarkduke1$plot<-substr(clarkduke1$Chamber,1,3) #estimate first date of budburst, leaf unfolding, and leaf out get.bbd <- function(x) names(x)[min(which(x==3), na.rm=T)]#budburst get.lud <- function(x) names(x)[min(which(x==4), na.rm=T)]#leaves unfolding get.lod <- function(x) names(x)[min(which(x==6), na.rm=T)]#leafout bbd_2010<-substr(apply(clarkduke1[,17:30],1,get.bbd),6,13) bbd_2011<-substr(apply(clarkduke1[,31:55],1,get.bbd),6,13) bbd_2012<-substr(apply(clarkduke1[,56:81],1,get.bbd),6,13) bbd_2013<-substr(apply(clarkduke1[,82:101],1,get.bbd),6,13) lud_2010<-substr(apply(clarkduke1[,17:30],1,get.lud),6,13) lud_2011<-substr(apply(clarkduke1[,31:55],1,get.lud),6,13) lud_2012<-substr(apply(clarkduke1[,56:81],1,get.lud),6,13) lud_2013<-substr(apply(clarkduke1[,82:101],1,get.lud),6,13) lod_2010<-substr(apply(clarkduke1[,17:30],1,get.lod),6,13) lod_2011<-substr(apply(clarkduke1[,31:55],1,get.lod),6,13) lod_2012<-substr(apply(clarkduke1[,56:81],1,get.lod),6,13) lod_2013<-substr(apply(clarkduke1[,82:101],1,get.lod),6,13) bbd2010_doy<-strftime(strptime(bbd_2010, format = "%m.%d.%y"),format = "%j") bbd2011_doy<-strftime(strptime(bbd_2011, format = "%m.%d.%y"),format = "%j") bbd2012_doy<-strftime(strptime(bbd_2012, format = "%m.%d.%y"),format = "%j") bbd2013_doy<-strftime(strptime(bbd_2013, format = "%m.%d.%y"),format = "%j") lud2010_doy<-strftime(strptime(lud_2010, format = "%m.%d.%y"),format = "%j") lud2011_doy<-strftime(strptime(lud_2011, format = "%m.%d.%y"),format = "%j") lud2012_doy<-strftime(strptime(lud_2012, format = "%m.%d.%y"),format = "%j") lud2013_doy<-strftime(strptime(lud_2013, format = "%m.%d.%y"),format = "%j") lod2010_doy<-strftime(strptime(lod_2010, format = "%m.%d.%y"),format = "%j") lod2011_doy<-strftime(strptime(lod_2011, format = "%m.%d.%y"),format = "%j") lod2012_doy<-strftime(strptime(lod_2012, format = "%m.%d.%y"),format = "%j") lod2013_doy<-strftime(strptime(lod_2013, format = "%m.%d.%y"),format = "%j") clarkduke2<-cbind(clarkduke1,bbd2010_doy,bbd2011_doy,bbd2012_doy,bbd2013_doy,lud2010_doy,lud2011_doy,lud2012_doy,lud2013_doy,lod2010_doy,lod2011_doy,lod2012_doy,lod2013_doy) clarkduke2a<-subset(clarkduke2, select=c("site","plot","genus","species","bbd2010_doy","bbd2011_doy","bbd2012_doy","bbd2013_doy","lud2010_doy","lud2011_doy","lud2012_doy","lud2013_doy","lod2010_doy","lod2011_doy","lod2012_doy","lod2013_doy")) clarkduke3<-reshape(clarkduke2a,varying = list(names(clarkduke2a)[5:8], names(clarkduke2a)[9:12],names(clarkduke2a)[13:16]), direction = "long", v.names = c("BBD","LUD", "LOD"), times = c(2010:2013)) clarkduke3<-clarkduke3[,-9] colnames(clarkduke3)[5]<-"year" clarkduke4<-reshape(clarkduke3,varying = list(names(clarkduke3)[6:8]), direction = "long", v.names = c("doy"), times = c(1:3)) clarkduke4$event<-c(rep("bbd", times=dim(clarkduke3)[1]),rep("lud", times=dim(clarkduke3)[1]),rep("lod", times=dim(clarkduke3)[1])) #clarkduke4$variety <- NA #clarkduke4$cult <- NA clarkduke4$block<-NA clarkduke5<-subset(clarkduke4, select=c("site","block","plot","event","year","genus","species","doy")) clarkduke<-rbind(clarkduke,clarkduke5) } clarkduke<-clarkduke[!is.na(clarkduke$doy),] clarkduke<-clarkduke[-which(clarkduke$genus=="Ob"),]#unknown genus at clarkduke clarkduke[which(clarkduke$genus=="Carya "),]$genus<-"Carya" return(clarkduke) } ##Clark et al from Harvard ## ## Data type: BBD,LUD,LOD ## ## Notes: Contact: Public data ## clean.raw$clarkharvard <- function(filename, path="./Data/Experiments/clark") { clarkharvardplots<-c("HF_G01_3.csv","HF_G02_A.csv","HF_G03_5.csv","HF_G04_A.csv","HF_G05_5.csv","HF_G06_3.csv","HF_G07_A.csv","HF_G08_3.csv","HF_G09_5.csv","HF_G10_C.csv","HF_G11_C.csv","HF_G12_C.csv","HF_S01_5.csv","HF_S02_A.csv","HF_S03_3.csv","HF_S04_5.csv","HF_S05_A.csv","HF_S06_3.csv","HF_S07_A.csv","HF_S08_3.csv","HF_S09_5.csv","HF_S10_C.csv","HF_S11_C.csv","HF_S12_C.csv") clarkharvard <- c() spfile <- file.path(path, "speciesList_clark.csv") specieslist<-read.csv(spfile, header=TRUE) for (i in 1:length(clarkharvardplots)){ file <- file.path(path, paste(clarkharvardplots[i])) clarkharvard1 <- read.csv(file, check.names=FALSE, header=TRUE) clarkharvard1$genus<-NA clarkharvard1$species<-NA species1<-unique(clarkharvard1$Species) for (j in 1:length(species1)){ clarkharvard1$genus[clarkharvard1$Species==species1[j]] <- specieslist[specieslist$shortCode==species1[j],]$genus clarkharvard1$species[clarkharvard1$Species==species1[j]] <- specieslist[specieslist$shortCode==species1[j],]$species } clarkharvard1$site<-"exp04" #estimate first date of budburst, leaf unfolding, and leaf out get.bbd <- function(x) names(x)[min(which(x==3), na.rm=T)]#budburst get.lud <- function(x) names(x)[min(which(x==4), na.rm=T)]#leaves unfolding get.lod <- function(x) names(x)[min(which(x==6), na.rm=T)]#leafout bbd_2010<-substr(apply(clarkharvard1[,19:26],1,get.bbd),6,13) bbd_2011<-substr(apply(clarkharvard1[,27:31],1,get.bbd),6,13) bbd_2012<-substr(apply(clarkharvard1[,32:44],1,get.bbd),6,13) lud_2010<-substr(apply(clarkharvard1[,19:26],1,get.lud),6,13) lud_2011<-substr(apply(clarkharvard1[,27:31],1,get.lud),6,13) lud_2012<-substr(apply(clarkharvard1[,32:44],1,get.lud),6,13) lod_2010<-substr(apply(clarkharvard1[,19:26],1,get.lod),6,13) lod_2011<-substr(apply(clarkharvard1[,27:31],1,get.lod),6,13) lod_2012<-substr(apply(clarkharvard1[,32:44],1,get.lod),6,13) bbd2010_doy<-strftime(strptime(bbd_2010, format = "%m.%d.%y"),format = "%j") bbd2011_doy<-strftime(strptime(bbd_2011, format = "%m.%d.%y"),format = "%j") bbd2012_doy<-strftime(strptime(bbd_2012, format = "%m.%d.%y"),format = "%j") lud2010_doy<-strftime(strptime(lud_2010, format = "%m.%d.%y"),format = "%j") lud2011_doy<-strftime(strptime(lud_2011, format = "%m.%d.%y"),format = "%j") lud2012_doy<-strftime(strptime(lud_2012, format = "%m.%d.%y"),format = "%j") lod2010_doy<-strftime(strptime(lod_2010, format = "%m.%d.%y"),format = "%j") lod2011_doy<-strftime(strptime(lod_2011, format = "%m.%d.%y"),format = "%j") lod2012_doy<-strftime(strptime(lod_2012, format = "%m.%d.%y"),format = "%j") clarkharvard2<-cbind(clarkharvard1,bbd2010_doy,bbd2011_doy,bbd2012_doy,lud2010_doy,lud2011_doy,lud2012_doy,lod2010_doy,lod2011_doy,lod2012_doy) clarkharvard2$plot<-substr(clarkharvard1$Chamber,1,3) clarkharvard2a<-subset(clarkharvard2, select=c("site","plot","genus","species","bbd2010_doy","bbd2011_doy","bbd2012_doy","lud2010_doy","lud2011_doy","lud2012_doy","lod2010_doy","lod2011_doy","lod2012_doy")) clarkharvard3<-reshape(clarkharvard2a,varying = list(names(clarkharvard2a)[5:7], names(clarkharvard2a)[8:10],names(clarkharvard2a)[11:13]), direction = "long", v.names = c("BBD","LUD", "LOD"), times = c(2010:2012)) clarkharvard3<-clarkharvard3[,-9] colnames(clarkharvard3)[5]<-"year" clarkharvard4<-reshape(clarkharvard3,varying = list(names(clarkharvard3)[6:8]), direction = "long", v.names = c("doy"), times = c(1:3)) clarkharvard4$event<-c(rep("bbd", times=dim(clarkharvard3)[1]),rep("lud", times=dim(clarkharvard3)[1]),rep("lod", times=dim(clarkharvard3)[1])) #clarkharvard4$variety <- NA #clarkharvard4$cult <- NA clarkharvard4$block<-NA clarkharvard5<-subset(clarkharvard4, select=c("site","block","plot","event","year","genus","species","doy")) clarkharvard<-rbind(clarkharvard,clarkharvard5) } clarkharvard<-clarkharvard[!is.na(clarkharvard$doy),] return(clarkharvard) } ##Sherry from Oklahoma## ## Data type: FFD, FFRD ## ## Notes: Rebecca Sherry #Phenological stages for Forbs: F0=vegetative plants; F1, unopened buds; F2, open flowers; F3, old flowers (postanthesis); F4, initiated fruit; F5,expanding fruit; and F6, dehisced fruit. #Phenological stages For grasses: G0, plants with flower stalks (in boot); G1, spikelets present (out of boot); G2,exerted anthers or styles; G3, past the presence of anthers and styles (seed development); and G4, disarticulating florets. #For forb species with very small flowers and fruits that were difficult to observe, stage 3 (initiated fruit) and stage 4 (expanding fruit) were lumped into a category of ‘‘fruit present,’’ (i.e., a score of F4.5) clean.raw$sherry <- function(filename, path) { sherryspp<-c("SherryPhenology2003_Achillea.csv","SherryPhenology2003_Ambrosia.csv","SherryPhenology2003_Andropogon.csv","SherryPhenology2003_Erigeron.csv","SherryPhenology2003_Panicum.csv","SherryPhenology2003_Schizachyrium.csv") sherry <- NA gen<-c("Achillea","Ambrosia","Andropogon","Erigeron","Panicum","Schizachyrium") sp<-c("millefolium","psilostachya","gerardii","strigosus","virgatum","scoparium") for (i in 1:length(sherryspp)){ file <- file.path(path, paste(sherryspp[i])) sherry1 <- read.csv(file, skip=3, header=TRUE) colnames(sherry1)[which(colnames(sherry1)=="Plot")]<-"plot" #estimate first date of flowering and fruiting firstsurv<-min(which(substr(colnames(sherry1),1,1)=="X")) lastsurv<-dim(sherry1)[2] get.ffd <- function(x) names(x)[min(which(x <= 3.5 & x >= 2.5), na.rm=T)]#first flower date get.ffrd <- function(x) names(x)[min(which(x <= 5.5 & x >= 3.5), na.rm=T)]#leaves unfolding ffd_doy<-substr(apply(sherry1[,firstsurv:lastsurv],1,get.ffd),2,4) ffrd_doy<-substr(apply(sherry1[,firstsurv:lastsurv],1,get.ffrd),2,4) sherry2<-cbind(sherry1,ffd_doy,ffrd_doy) sherry2$genus<- paste(gen[i]) sherry2$species<-paste(sp[i]) sherry3<-subset(sherry2, select=c("plot","genus","species", "ffd_doy","ffrd_doy")) sherry<-rbind(sherry,sherry3) } sherry<-sherry[-1,] sherry4<-reshape(sherry,varying = list(names(sherry)[4:5]), direction = "long", v.names = c("doy"), times = c(1:2)) sherry4$event<-c(rep("ffd", times=dim(sherry)[1]),rep("ffrd", times=dim(sherry)[1])) #add in other file,which has different species that are little different than others in formatting file2 <- file.path(path, "SherryPhenology2003_First6spp.csv") sherry5 <- read.csv(file2, skip=3, header=TRUE) verarv<-sherry5[1:40,] viobic<-sherry5[44:82,] colnames(viobic)<-c(sherry5[43,]) cerglo<-sherry5[86:119,] colnames(cerglo)<-c(sherry5[85,]) plavir<-sherry5[123:158,] colnames(plavir)<-c(sherry5[122,]) broarv<-sherry5[162:201,] colnames(broarv)<-c(sherry5[161,]) dicoli<-sherry5[205:235,] colnames(dicoli)<-c(sherry5[204,]) get.ffd <- function(x) names(x)[min(which(x <= 3.5 & x >= 2.5), na.rm=T)]#first flower date get.ffrd <- function(x) names(x)[min(which(x <= 5.5 & x >= 3.5), na.rm=T)]#leaves unfolding dicoli_ffd_doy<-substr(apply(dicoli[,5:14],1,get.ffd),2,4) dicoli_ffrd_doy<-substr(apply(dicoli[,5:14],1,get.ffrd),2,4) dicoli2<-cbind(dicoli,dicoli_ffd_doy,dicoli_ffrd_doy) dicoli2$genus<-"Dichanthelium" dicoli2$species<-"oligosanthes" colnames(dicoli2)[1]<-"plot" colnames(dicoli2)[16]<-"ffd_doy" colnames(dicoli2)[17]<-"ffrd_doy" dicoli3<-subset(dicoli2, select=c("plot","genus","species", "ffd_doy","ffrd_doy")) broarv_ffd_doy<-substr(apply(broarv[,5:14],1,get.ffd),1,4) broarv_ffrd_doy<-substr(apply(broarv[,5:14],1,get.ffrd),1,4) broarv_ffd_doy[which(substr(broarv_ffd_doy,1,1)=="d")]<-substr(broarv_ffd_doy[which(substr(broarv_ffd_doy,1,1)=="d")],2,4) broarv_ffrd_doy[which(substr(broarv_ffrd_doy,1,1)=="d")]<-substr(broarv_ffrd_doy[which(substr(broarv_ffrd_doy,1,1)=="d")],2,4) broarv2<-cbind(broarv,broarv_ffd_doy,broarv_ffrd_doy) broarv2$genus<-"Bromus" broarv2$species<-"arvensis" colnames(broarv2)[1]<-"plot" colnames(broarv2)[16]<-"ffd_doy" colnames(broarv2)[17]<-"ffrd_doy" broarv3<-subset(broarv2, select=c("plot","genus","species", "ffd_doy","ffrd_doy")) cerglo_ffd_doy<-substr(apply(cerglo[,5:10],1,get.ffd),2,7) cerglo_ffrd_doy<-substr(apply(cerglo[,5:10],1,get.ffrd),2,7) cerglo_ffd_doy[which(substr(cerglo_ffd_doy,1,2)=="ay")]<-substr(cerglo_ffd_doy[which(substr(cerglo_ffd_doy,1,2)=="ay")],3,5) cerglo_ffrd_doy[which(substr(cerglo_ffrd_doy,1,2)=="ay")]<-substr(cerglo_ffrd_doy[which(substr(cerglo_ffrd_doy,1,2)=="ay")],3,5) cerglo2<-cbind(cerglo,cerglo_ffd_doy,cerglo_ffrd_doy) cerglo2$genus<-"Cerastium" cerglo2$species<-"glomeratum" colnames(cerglo2)[1]<-"plot" colnames(cerglo2)[16]<-"ffd_doy" colnames(cerglo2)[17]<-"ffrd_doy" cerglo3<-subset(cerglo2, select=c("plot","genus","species", "ffd_doy","ffrd_doy")) plavir_ffd_doy<-substr(apply(plavir[,5:14],1,get.ffd),2,4) plavir_ffrd_doy<-substr(apply(plavir[,5:14],1,get.ffrd),2,4) plavir2<-cbind(plavir,plavir_ffd_doy,plavir_ffrd_doy) plavir2$genus<-"Plantago" plavir2$species<-"virginica" colnames(plavir2)[1]<-"plot" colnames(plavir2)[16]<-"ffd_doy" colnames(plavir2)[17]<-"ffrd_doy" plavir3<-subset(plavir2, select=c("plot","genus","species", "ffd_doy","ffrd_doy")) viobic_ffd_doy<-substr(apply(viobic[,5:14],1,get.ffd),2,4) viobic_ffrd_doy<-substr(apply(viobic[,5:14],1,get.ffrd),2,4) viobic2<-cbind(viobic,viobic_ffd_doy,viobic_ffrd_doy) viobic2$genus<-"Viola" viobic2$species<-"bicolor" colnames(viobic2)[1]<-"plot" colnames(viobic2)[16]<-"ffd_doy" colnames(viobic2)[17]<-"ffrd_doy" viobic3<-subset(viobic2, select=c("plot","genus","species", "ffd_doy","ffrd_doy")) verarv_ffd_doy<-substr(apply(verarv[,5:14],1,get.ffd),4,6) verarv_ffrd_doy<-substr(apply(verarv[,5:14],1,get.ffrd),4,6) verarv2<-cbind(verarv,verarv_ffd_doy,verarv_ffrd_doy) verarv2$genus<-"Veronica" verarv2$species<-"arvensis" colnames(verarv2)[1]<-"plot" colnames(verarv2)[16]<-"ffd_doy" colnames(verarv2)[17]<-"ffrd_doy" verarv3<-subset(verarv2, select=c("plot","genus","species", "ffd_doy","ffrd_doy")) sherry6<-rbind(broarv3,dicoli3,verarv3,viobic3,plavir3,cerglo3) sherry7<-reshape(sherry6,varying = list(names(sherry6)[4:5]), direction = "long", v.names = c("doy"), times = c(1:2)) sherry7$event<-c(rep("ffd", times=dim(sherry6)[1]),rep("ffrd", times=dim(sherry6)[1])) sherry8<-rbind(sherry4,sherry7) sherry8$year<-2003 sherry8$site<-"exp12" sherry8$block<-NA sherry8[as.numeric(sherry8$plot)<5,]$block<-1 sherry8[as.numeric(sherry8$plot)<11 & as.numeric(sherry8$plot)>6,]$block<-2 sherry8[as.numeric(sherry8$plot)<15 & as.numeric(sherry8$plot)>10,]$block<-3 sherry8[as.numeric(sherry8$plot)==5|as.numeric(sherry8$plot)==6|as.numeric(sherry8$plot)==15|as.numeric(sherry8$plot)==16,]$block<-4 sherry8[as.numeric(sherry8$plot)>16,]$block<-5 sherryok<-subset(sherry8, select=c("site","block","plot","event","year","genus","species", "doy")) #sherryok$variety <- NA #sherryok$cult <- NA sherryok<-sherryok[!is.na(sherryok$doy),] return(sherryok) } ##Price & Wasser data RMBL ## Data type: FFd, FFRD, SD ## Notes: mary.price@ucr.edu #From Mary: "We chose up to 5 flowering individuals that spanned the elevational range of flowering individuals of that species on the small moraine. The "DIST" column indicates the meters downslope from the upper edge of the plot. #There is some inconsistency in the ordering of columns. Sometimes the "species" column domes before the "individual ID" column, and sometimes not -- you'll have to look for that if you concatenate files. #The "comments" column after each census date column sometimes includes estimates of fruiting success, measured as #fruits/#flowers on each plant, along with notes on whether the plant got frosted, parasitized, replaced, and the like. I can help you translate the notes if you have problems and need to know what the notes mean. The plant species vary considerably, as you know, in what fruit set means. Our notes are incomplete (at least, I haven't found the relevant info in spot-checks of notes--since we didn't include fruit set info in the analyses for our published paper, I'd have to search...), but I suspect that for Asteraceae we used the head as the "flower" unit, and probably we used the umbel as the unit for Polygonaceae. #In all cases, 0 = not flowering; 1 = bud present; 2 = open flower present; 3 = senescent flower present (corolla still attached); 4 = initiated fruit (corolla off); 5 = expanding fruit; 6 = dehisced fruit. #Of course, these stages mean different things for each species. #Species names have changed in some cases, or if one uses different authorities. For example, Potentilla gracilis is now P. pulcherrima, and I think Bill Weber is the only one who advocates the genus "Seriphidium" for shrubby Artemisia. So you may want to double-check names. #The number of files should correspond more-or-less with the number of years reported in the 1998 paper, with the addition of a few files from 1990 censuses (which can probably be left out of analyses since we were working the bugs out of our methods in that year and this was not a warming year). clean.raw$price <- function(filename, path) { pricefiles<-c("ATPHEN90g.csv","ATPHEN91g.csv","ATPHEN92g.csv","ATPHEN94g.csv","CLPHEN91g.csv","CLPHEN92g.csv","CLPHEN93g.csv","CLPHEN94g.csv","CRPHEN92g.csv","CRPHEN93g.csv","CRPHEN94g.csv","DNPHEN90g.csv","DNPHEN91g.csv","DNPHEN92g.csv","DNPHEN93g.csv","DNPHEN94g.csv","EGPHEN90g.csv","EGPHEN91g.csv","EGPHEN92g.csv","EGPHEN93g.csv","EGPHEN94g.csv","ESPHEN90g.csv","ESPHEN91g.csv","ESPHEN92g.csv","ESPHEN93g.csv","ESPHEN94g.csv","IAPHEN90g.csv","IAPHEN91g.csv","IAPHEN92g.csv","IAPHEN93g.csv","IAPHEN94g.csv","LLPHEN90G.csv","LLPHEN91G.csv","LLPHEN92G.csv","LLPHEN93G.csv","LLPHEN94G.csv","MFPHEN90G.csv","MFPHEN91g.csv","MFPHEN92g.csv","MFPHEN93g.csv","MFPHEN94g.csv","PGPHEN91g.csv","PGPHEN92g.csv","PGPHEN93g.csv","PGPHEN94g.csv") skiplines<-c(3,3,2,2,rep(3,times=41)) price <- NA for (i in 1:length(pricefiles)){ file <- file.path(path, paste(pricefiles[i])) price1 <- read.csv(file, skip=skiplines[i], header=TRUE) colnames(price1)[which(colnames(price1)=="PLOT"|colnames(price1)=="plot")]<-"plot" colnames(price1)[which(colnames(price1)=="SPE")]<-"SP" price1<-price1[!is.na(price1$SP),] price1<-price1[!is.na(price1$plot),] price1<-price1[!(price1$plot==""),] price1<-price1[!(price1$plot=="JDATE"),] price1<-price1[!(price1$plot=="PLOT"),] price1<-price1[!(price1$plot=="jdate"),] price1<-price1[!(price1$plot=="1992 WARMED-MEADOW PHENOLOGY"),] #estimate first date of open flowers, fruits, and seeds dispersing firstsurv<-min(which(substr(colnames(price1),1,1)=="X")) lastsurv<-max(which(substr(colnames(price1),1,1)=="X")) get.ffd <- function(x) names(x)[which(x==2|x==12|x==123|x==1234|x==12345|x==123456)][1]#first flower date get.ffrd <- function(x) names(x)[which(x==4|x==34|x==234|x==1234|x==12345|x==123456)][1]#first fruit date get.sd <- function(x) names(x)[which(x==6|x==56|x==456|x==3456|x==2345|x==123456)][1]#seed dispersal/fruit dehiscing date ffd<-substr(apply(price1[,firstsurv:lastsurv],1,get.ffd),2,9) ffrd<-substr(apply(price1[,firstsurv:lastsurv],1,get.ffrd),2,9) sd<-substr(apply(price1[,firstsurv:lastsurv],1,get.sd),2,9) price2<-cbind(price1,ffd,ffrd,sd) price2$filename<-paste(pricefiles[i]) price2$year<-paste("19",substr(price2$filename,7,8),sep="") price3<-subset(price2, select=c("plot","SP", "ffd","ffrd","sd","filename","year")) price<-rbind(price,price3) } price<-price[-1,] price4<-reshape(price,varying = list(names(price)[3:5]), direction = "long", v.names = c("date"), times = c(names(price)[3:5])) colnames(price4)[5]<-"event" price4$site<-"exp11" price4$date[which(is.na(price4$date))]<-"NA.NA.NA" price4$doy<-strftime(strptime(price4$date, format = "%m.%d.%y"),format = "%j") price4$genus<-NA price4$species<-NA price4$genus[price4$SP=="AT"] <- "Artemesia" price4$species[price4$SP=="AT"] <- "tridentata" price4$genus[price4$SP=="CL"] <- "Claytonia" price4$species[price4$SP=="CL"] <- "lanceolata" price4$genus[price4$SP=="CR"] <- "Campanula" price4$species[price4$SP=="CR"] <- "rotundifolia" price4$genus[price4$SP=="DN"] <- "Delphinium" price4$species[price4$SP=="DN"] <- "nelsonii" price4$genus[price4$SP=="EG"] <- "Erythronium" price4$species[price4$SP=="EG"] <- "grandiflorum" price4$genus[price4$SP=="ES"] <- "Eriogonum" price4$species[price4$SP=="ES"] <- "subalpinum" price4$genus[price4$SP=="es"] <- "Eriogonum" price4$species[price4$SP=="es"] <- "subalpinum" price4$genus[price4$SP=="IA"] <- "Ipomopsis" price4$species[price4$SP=="IA"] <- "aggregata" price4$genus[price4$SP=="ia"] <- "Ipomopsis" price4$species[price4$SP=="ia"] <- "aggregata" price4$genus[price4$SP=="LL"] <- "Lathyrus" price4$species[price4$SP=="LL"] <- "leucanthus" price4$genus[price4$SP=="MF"] <- "Mertensia" price4$species[price4$SP=="MF"] <- "fusiformes" price4$genus[price4$SP=="PG"] <- "Potentilla" price4$species[price4$SP=="PG"] <- "gracilis" price4<-price4[!is.na(price4$doy),] price4$block<-NA price5<-subset(price4, select=c("site","block", "plot","event","year","genus","species", "doy")) price6<-price5[which(price5$year!="1990"),]#remove 1990, since this is prewarming data #price5$variety <- NA #price5$cult <- NA return(price6) } ##Data from Chuine ##no plots listed for 2003 clean.raw$chuine <- function(filename, path="./Data/Experiments/chuine") { chuinefiles<-c("Chuine_pheno_2002.csv","Chuine_pheno_2003_cleaned.csv","Chuine_pheno_2004.csv") years<-c(2002,2003,2004) chuine <- NA for (i in 1:length(chuinefiles)){ file <- file.path(path, paste(chuinefiles[i])) chuine1 <- read.csv(file, header=TRUE) colnames(chuine1)[which(colnames(chuine1)=="Block")]<-"block" chuine1$plot<-paste(chuine1$block,chuine1$Plot,sep="") colnames(chuine1)[which(colnames(chuine1)=="species"|colnames(chuine1)=="Species")]<-"sp" colnames(chuine1)[which(colnames(chuine1)=="X55")]<-"ffb" colnames(chuine1)[which(colnames(chuine1)=="X65")]<-"ffd" colnames(chuine1)[which(colnames(chuine1)=="X85")]<-"ffrd" colnames(chuine1)[which(colnames(chuine1)=="X91")]<-"91" #colnames(chuine1)[which(colnames(chuine1)=="X95")]<-"sen" chuine1<-chuine1[!is.na(chuine1$sp),] phen1<-which(colnames(chuine1)=="ffb") phen2<-which(colnames(chuine1)=="ffrd") chuine2<-reshape(chuine1,varying = list(names(chuine1)[phen1:phen2]), direction = "long", v.names = c("date"), times = c(names(chuine1)[phen1:phen2])) chuine2$year<-paste(years[i]) chuine2<-chuine2[!chuine2$date=="",] colnames(chuine2)[which(colnames(chuine2)=="time")]<-"event" if(years[i]==2002){chuine2$doy<-strftime(strptime(chuine2$date, format = "%d/%m/%y"),format = "%j")} if(years[i]==2003){chuine2$doy<-strftime(strptime(chuine2$date, format = "%d-%b"),format = "%j")} if(years[i]==2004){chuine2$doy<-strftime(strptime(chuine2$date, format = "%d-%b"),format = "%j")} chuine3<-subset(chuine2, select=c("block","plot","sp", "event","year","doy")) chuine<-rbind(chuine,chuine3) } chuine<-chuine[-1,] chuine$genus<-NA chuine$species<-NA chuine$genus[chuine$sp=="aa"] <- "Artemesia" chuine$species[chuine$sp=="aa"] <- "annua" chuine$genus[chuine$sp=="av"] <- "Artemesia" chuine$species[chuine$sp=="av"] <- "vulgaris" chuine$genus[chuine$sp=="ar"] <- "Amaranthus" chuine$species[chuine$sp=="ar"] <- "retroflexus" chuine$genus[chuine$sp=="ad"] <- "Amaranthus" chuine$species[chuine$sp=="ad"] <- "deflexus" chuine$genus[chuine$sp=="qr"] <- "Quercus" chuine$species[chuine$sp=="qr"] <- "robur" chuine$genus[chuine$sp=="qp"] <- "Quercus" chuine$species[chuine$sp=="qp"] <- "pubescens" chuine$genus[chuine$sp=="qi"] <- "Quercus" chuine$species[chuine$sp=="qi"] <- "ilex" chuine$genus[chuine$sp=="lr"] <- "Lolium" chuine$species[chuine$sp=="lr"] <- "rigidum" chuine$genus[chuine$sp=="lp"] <- "Lolium" chuine$species[chuine$sp=="lp"] <- "perenne" chuine$genus[chuine$sp=="sv"] <- "Setaria" chuine$species[chuine$sp=="sv"] <- "viridis" chuine$genus[chuine$sp=="sp"] <- "Setaria" chuine$species[chuine$sp=="sp"] <- "parviflora" chuine$genus[chuine$sp=="lp3"] <- "Lolium" chuine$species[chuine$sp=="lp3"] <- "perenne" chuine$site<-"exp02" chuine4<-subset(chuine, select=c("site","block","plot","event","year","genus","species", "doy")) #chuine4$variety <- NA #chuine4$cult <- NA chuine4<-chuine4[!is.na(chuine4$doy),] return(chuine4) ## } ##Data from FORCE ##Contact: Christy Rollinson clean.raw$force <- function(filename="FORCE_Inventories_2009_2010_clean.csv", path="./Data/Experiments/force") { file <- file.path(path, filename) force1 <- read.csv(file, check.names=FALSE, header=TRUE) force1$plot<-paste(force1$Block,force1$Treatment,sep="") force1$block<-force1$Block force2<-aggregate(x=force1$Survey.DOY, by=list(force1$Year,force1$block,force1$plot,force1$Species,force1$Phenology.State), FUN=min, na.rm=F) colnames(force2)<-c("year","block","plot","SP","phenstate","doy") force2$event<-NA force2[force2$phenstate=="1",]$event<-"lod" force2[force2$phenstate==2,]$event<-"ffd" force2[force2$phenstate==3,]$event<-"ffrd" force2[force2$phenstate==4,]$event<-"sd" force2[force2$phenstate==5,]$event<-"sen" force3<-force2[-which(is.na(force2$event)),] force3<-force3[-which(force3$SP=="11-Oct"),] force3<-force3[-which(force3$SP=="9-Oct"),] force3<-force3[-which(force3$SP=="CEOB"),] force3<-force3[-which(force3$SP=="U44"),] spfile <- file.path(path, "Species_List.csv") specieslist<-read.csv(spfile, header=TRUE) force3$genussp<-NA species1<-unique(force3$SP) species1[which(species1=="U80")]<-"ARMI" for (j in 1:length(species1)){ force3$genussp[force3$SP==species1[j]] <- specieslist[specieslist$Species.CODE==species1[j],]$Species[1] } force4<-force3 %>% separate(genussp, c("genus", "species"), sep=" ", remove=F) force4[which(force4$genus=="Sisynchium"),]$genus<-"Sisyrinchium" force4[which(force4$genus=="Dianthus"),]$species<-"armeria" force4[which(force4$genus=="Amphicarpa"),]$genus<-"Amphicarpaea" force4[which(force4$genus=="Actea"),]$genus<-"Actaea" force4[which(force4$species=="soria"),]$species<-"sororia" force4[which(force4$species=="lavae"),]$species<-"laeve" force4[which(force4$species=="abortivis"),]$species<-"abortivus" force4[which(force4$species=="gramanifolia"),]$species<-"graminifolia" force4[which(force4$genus=="Rubus"),]$species<-"occidentalis" force4[which(force4$species=="anuus"),]$species<-"annuus" force4[which(force4$genus=="Ceanothus"),]$species<-"americanus" force4[which(force4$species=="hieraciifolia"),]$species<-"hieraciifolius" #force4[which(force4$genus=="Oenethera"),]$genus<-"Oenothera" force4$site<-"exp09" force<-subset(force4, select=c("site","block","plot","event","year","genus","species", "doy")) return(force) ## } ##Data from Aaron Ellison's warming/phenology/ant experiment at Harvard Forest ##Spring and Fall phenology ##Contact: Aaron Ellison clean.raw$ellison <- function(filename="hf113-27-hf-phenology.csv", path="./Data/Experiments/ellison") { file <- file.path(path, filename) ellison1 <- read.csv(file, check.names=FALSE, header=TRUE) colnames(ellison1)[2]<-"plot" colnames(ellison1)[4]<-"genussp" ellison1$doy<-strftime(strptime(ellison1$date, format = "%m/%d/%y"),format = "%j") ellison1$year<-strftime(strptime(ellison1$date, format = "%m/%d/%y"),format = "%Y") ellison2<-aggregate(x=ellison1$doy, by=list(ellison1$year,ellison1$plot,ellison1$genussp,ellison1$plant,ellison1$phenology), FUN=min,na.rm=F) ellison3<-ellison2 %>% separate(Group.3, c("genus", "species"), sep="_", remove=F) colnames(ellison3)<-c("year","plot","gensp") colnames(ellison3)[4:8]<-c("genus","species","plant","phenology","doy") ellison3$event<-NA ellison3[ellison3$phenology=="F2",]$event<-"sen" ellison3[ellison3$phenology=="F3",]$event<-"drop" ellison3[ellison3$phenology=="S3",]$event<-"bbd" ellison3[ellison3$phenology=="S4",]$event<-"lud" ellison3[ellison3$phenology=="S5",]$event<-"lod" ellison3[which(ellison3$plot=="13"|ellison3$plot=="14"|ellison3$plot=="15"),]$plot<-"OUT" ellison3$site<-"exp07" ellison3$block<-NA ellison<-subset(ellison3, select=c("site","block","plot","event","year","genus","species", "doy")) return(ellison) } ##Data from Jennifer Dunne's study at RMBL ##Phenological stages:0=not yet flowering, 1=unopened flower buds, 2=open flowers,3 =old flowers,4=initiated fruit, 5=enlarged fruit, and 6= for dehisced fruit. #For Festuca we used five phenological stages: 0=plant with flower stalks,1=presence of spikelets, 2=exerted anthers and styles from the spikelet florets, 3=dried and broken-off anthers and styles, indicating a developing seed, and 4=disarticulated seeds. ##Contact: Jennifer Dunne clean.raw$dunne <- function(path="./Data/Experiments/dunne") { dunnefiles<-c("1995DunnePhenologyData_Artemisia.csv","1995DunnePhenologyData_Delphinium.csv","1995DunnePhenologyData_Erigeron.csv","1995DunnePhenologyData_Helianthella.csv","1995DunnePhenologyData_Lathyrus.csv","1995DunnePhenologyData_Mertensiana.csv","1995DunnePhenologyData_Potentilla.csv","1996DunnePhenologyData_Artemisia.csv","1996DunnePhenologyData_Delphinium.csv","1996DunnePhenologyData_Erigeron.csv","1996DunnePhenologyData_Eriogonums.csv","1996DunnePhenologyData_Festuca.csv","1996DunnePhenologyData_Helianthella.csv","1996DunnePhenologyData_Lathyrus.csv","1996DunnePhenologyData_Mertensiana.csv","1996DunnePhenologyData_Potentilla.csv","1997DunnePhenologyData_Achillea.csv","1997DunnePhenologyData_Artemisia.csv","1997DunnePhenologyData_Claytonia.csv","1997DunnePhenologyData_Delphinium.csv","1997DunnePhenologyData_Erigeron.csv","1997DunnePhenologyData_Eriogonumu.csv","1997DunnePhenologyData_Festuca.csv","1997DunnePhenologyData_Helianthella.csv","1997DunnePhenologyData_Lathyrus.csv","1997DunnePhenologyData_Mertensia.csv","1997DunnePhenologyData_Potentilla.csv","1998DunnePhenologyData_Artemisia.csv","1998DunnePhenologyData_Claytonia.csv","1998DunnePhenologyData_Delphinium.csv","1998DunnePhenologyData_Erigeron.csv","1998DunnePhenologyData_Eriogonumu.csv","1998DunnePhenologyData_Eriogonums.csv","1998DunnePhenologyData_Festuca.csv","1998DunnePhenologyData_Helianthella.csv","1998DunnePhenologyData_Lathyrus.csv","1998DunnePhenologyData_Mertensia.csv","1998DunnePhenologyData_Potentilla.csv") dunne <- NA for (i in 1:length(dunnefiles)){ file <- file.path(path, paste(dunnefiles[i])) dunne1 <- read.csv(file, header=TRUE) dunne_ffd<-aggregate(x=dunne1$date, by=list(dunne1$site,dunne1$plot,dunne1$rep,dunne1$stage2), FUN=min,na.rm=F)#first date of open flowers for each site/plot/rep dunne_ffd$event<-"ffd" if(is.element("stage4", colnames(dunne1)))(dunne_ffrd<-aggregate(x=dunne1$date, by=list(dunne1$site,dunne1$plot,dunne1$rep,dunne1$stage4), FUN=min,na.rm=F)) #first date of fruit for each site/plot/rep else(dunne2<-dunne_ffd) if(is.element("stage4", colnames(dunne1)))(dunne_ffrd$event<-"ffrd") if(is.element("stage4", colnames(dunne1)))(dunne2<-rbind(dunne_ffd, dunne_ffrd)) dunne2$plot<-dunne2$Group.2 stop<-nchar(dunnefiles[i])-4 dunne2$genussp<-paste(substr(dunnefiles[i],24,stop)) dunne2$year<-substr(dunnefiles[i],1,4) colnames(dunne2)[5]<-c("doy") dunne <- rbind(dunne,dunne2) } dunne$genus<-NA dunne$species<-NA dunne$genus[dunne$genussp=="Artemisia"] <- "Artemisia" dunne$species[dunne$genussp=="Artemisia"] <- "tridentata" dunne$genus[dunne$genussp=="Claytonia"] <- "Claytonia" dunne$species[dunne$genussp=="Claytonia"] <- "lanceolata" dunne$genus[dunne$genussp=="Delphinium"] <- "Delphinium" dunne$species[dunne$genussp=="Delphinium"] <- "nuttallianum" dunne$genus[dunne$genussp=="Erigeron"] <- "Erigeron" dunne$species[dunne$genussp=="Erigeron"] <- "speciosus" dunne$genus[dunne$genussp=="Helianthella"] <- "Helianthella" dunne$species[dunne$genussp=="Helianthella"] <- "quinquenervis" dunne$genus[dunne$genussp=="Lathyrus"] <- "Lathyrus" dunne$species[dunne$genussp=="Lathyrus"] <- "lanszwertii" dunne$genus[dunne$genussp=="Potentilla"] <- "Potentilla" dunne$species[dunne$genussp=="Potentilla"] <- "hippiana" dunne$genus[dunne$genussp=="Mertensiana"] <- "Mertensiana" dunne$species[dunne$genussp=="Mertensiana"] <- "fusiformis" dunne$genus[dunne$genussp=="Eriogonums"] <- "Eriogonum" dunne$species[dunne$genussp=="Eriogonums"] <- "subalpinum" dunne$genus[dunne$genussp=="Festuca"] <- "Festuca" dunne$species[dunne$genussp=="Festuca"] <- "thurberi" dunne$genus[dunne$genussp=="Achillea"] <- "Achillea" dunne$species[dunne$genussp=="Achillea"] <- "sp" dunne$genus[dunne$genussp=="Eriogonumu"] <- "Eriogonum" dunne$species[dunne$genussp=="Eriogonumu"] <- "umbellatum" dunne$site<-"exp06" #colnames(dunne)[1]<-"block"#this is the "site" column from the dunne files. i think we actually want to select out only plots frmo one site... dunne$block<-NA dunne<-dunne[-1,] dunne<-dunne[dunne$Group.1=="4",]#site 4= the warming meadow, so we only want these data dunnermbl<-subset(dunne, select=c("site","block","plot","event","year","genus","species", "doy")) dunnermbl<-dunnermbl[!is.na(dunnermbl$genus),] return(dunnermbl) } ##Data from Haibei Alpine Grassland Research Station, China ##Spring phenology ##Contact: sonamkyi@itpcas.ac.cn clean.raw$haibei <- function(filename="ww_data1.csv", path="./Data/Experiments/haibei") { file <- file.path(path, filename) haibei1 <- read.csv(file, check.names=FALSE, header=TRUE) haibei1<-haibei1[haibei1$Treatment!="WW",]#remove winter warming treatment colnames(haibei1)[1]<-"year" colnames(haibei1)[4]<-"block" colnames(haibei1)[5]<-"plot" flow<-subset(haibei1,select=c("block","plot","year","Species","FFD")) colnames(flow)[5]<-"doy" flow$event<-"ffd" leaf<-subset(haibei1,select=c("block","plot","year","Species","LOD")) colnames(leaf)[5]<-"doy" leaf$event<-"lod" haibei<-rbind(flow,leaf) haibei$genus[haibei$Species=="En"] <- "Elymus" haibei$species[haibei$Species=="En"] <- "nutans" haibei$genus[haibei$Species=="Sa"] <- "Stipa" haibei$species[haibei$Species=="Sa"] <- "alinea" haibei$genus[haibei$Species=="Pp"] <- "Poa" haibei$species[haibei$Species=="Pp"] <- "pratensis" haibei$genus[haibei$Species=="Kh"] <- "Kobresia" haibei$species[haibei$Species=="Kh"] <- "humilis" haibei$genus[haibei$Species=="Th"] <- "Tibetia" haibei$species[haibei$Species=="Th"] <- "himalaica" haibei$genus[haibei$Species=="Ma"] <- "Melilotoides" haibei$species[haibei$Species=="Ma"] <- "archiducis-nicolai" haibei$genus[haibei$Species=="Pn"] <- "Potentilla" haibei$species[haibei$Species=="Pn"] <- "nivea" haibei$genus[haibei$Species=="Gs"] <- "Gentiana" haibei$species[haibei$Species=="Gs"] <- "straminea" haibei$genus[haibei$Species=="Ss"] <- "Saussurea" haibei$species[haibei$Species=="Ss"] <- "superba" haibei$genus[haibei$Species=="Gl"] <- "Gentiana" haibei$species[haibei$Species=="Gl"] <- "lawrencei" haibei$genus[haibei$Species=="Ad"] <- "Aster" haibei$species[haibei$Species=="Ad"] <- "diplostephioides" haibei$site<-"exp13" haibei$doy<-round(haibei$doy,digits=0) haibei2<-subset(haibei, select=c("site","block","plot","event","year","genus","species", "doy")) return(haibei2) } ##Data from Cedar Creek ##Spring and Fall phenology ##Contact: danbaha@umn.edu clean.raw$cc <- function(filename="https___pasta.lternet.edu_package_data_eml_knb-lter-cdr_575_8_760e44559a2611967d61bb35f21d9260.csv", path="./Data/Experiments/cedarcreek") { file <- file.path(path, filename) phen <- read.csv(file, check.names=FALSE, header=TRUE) colnames(phen)[2]<-"year" colnames(phen)[4]<-"block" phen$Heat.treatment<-NA#make the plots to match those in clim data phen$Heat.treatment[phen$Treatment=="0-Ambient"]<-"control" phen$Heat.treatment[phen$Treatment=="1-Low"]<-"low" phen$Heat.treatment[phen$Treatment=="2-High"]<-"high" phen$plot<-paste(phen$block,phen$Heat.treatment,sep="-") flow<-subset(phen,select=c("block","plot","year","Species","FlwrDay")) colnames(flow)[5]<-"doy" flow$event<-"ffd" fruit<-subset(phen,select=c("block","plot","year","Species","SeedDay")) colnames(fruit)[5]<-"doy" fruit$event<-"ffrd" ccphen<-rbind(flow,fruit) ccphen$site<-"exp14" genus.species<-strsplit(ccphen$Species," ") genus.species<-do.call(rbind, genus.species) colnames(genus.species)<-c("genus","species") ccphen<-cbind(ccphen,genus.species) cedarcreek<-subset(ccphen, select=c("site","block","plot","event","year","genus","species", "doy")) return(cedarcreek) } # Produce cleaned raw data raw.data.dir <- "./Data/Experiments" cleandata.raw <- list() cleandata.raw$marchin <- clean.raw$marchin(path="./Data/Experiments/marchin") cleandata.raw$bace <- clean.raw$bace(path="./Data/Experiments/bace") cleandata.raw$farnsworth <- clean.raw$farnsworth(path="./Data/Experiments/farnsworth") cleandata.raw$cleland <- clean.raw$cleland(path="./Data/Experiments/cleland") cleandata.raw$clarkduke <- clean.raw$clarkduke(path="./Data/Experiments/clark") cleandata.raw$clarkharvard <- clean.raw$clarkharvard(path="./Data/Experiments/clark") cleandata.raw$sherry <- clean.raw$sherry(path="./Data/Experiments/sherry") cleandata.raw$price <- clean.raw$price(path="./Data/Experiments/price") cleandata.raw$chuine<- clean.raw$chuine(path="./Data/Experiments/chuine") cleandata.raw$force<- clean.raw$force(path="./Data/Experiments/force") cleandata.raw$ellison<- clean.raw$ellison(path="./Data/Experiments/ellison") cleandata.raw$dunne<- clean.raw$dunne(path="./Data/Experiments/dunne") cleandata.raw$haibei<- clean.raw$haibei(path="./Data/Experiments/haibei") cleandata.raw$cc<- clean.raw$cc(path="./Data/Experiments/cedarcreek") expphendb <- do.call("rbind", cleandata.raw) row.names(expphendb) <- NULL #Do some additional cleaning and checking: dim(expphendb) #76966 8 expphendb<-expphendb[!is.na(expphendb$event),] expphendb<-expphendb[!is.na(expphendb$doy),] expphendb$doy<-as.numeric(expphendb$doy) dim(expphendb)#75314 rows,8 columns expphendb<-expphendb[!is.na(expphendb$genus),] expphendb<-expphendb[!expphendb$genus=="",] expphendb<-expphendb[!expphendb$genus=="spp.",]#should look at these expphendb<-expphendb[-which(expphendb$genus=="Le"),]#should look at these expphendb<-expphendb[-which(expphendb$genus=="Unknown"),]#should look at these expphendb[which(expphendb$genus=="Artemesia"),]$genus<-"Artemisia"#chuine expphendb[which(expphendb$species=="spp"),]$species<-"sp"#force expphendb[which(expphendb$species=="spp."),]$species<-"sp"#force expphendb[which(expphendb$species=="sp."),]$species<-"sp"#force expphendb[which(expphendb$doy=="144153"),]$doy<-"153"#bace expphendb[which(expphendb$species=="officionale"),]$species<-"officinale"#force expphendb[which(expphendb$species=="(incanum?)"),]$species<-"incanum"#force expphendb[which(expphendb$species=="quiquefolia"),]$species<-"quinquefolia"#force expphendb[which(expphendb$species=="fusiformes"),]$species<-"fusiformis"#price expphendb[which(expphendb$genus=="Mertensiana"),]$genus<-"Mertensia"#price expphendb[which(expphendb$species=="caepitosum"),]$species<-"caespitosum"#force expphendb[which(expphendb$genus=="Avena"),]$species<-"sp"#JAsper ridge- could be multiple spp expphendb[which(expphendb$species==""),]$species<-"sp"#all galiums at force dim(expphendb)#72468 rows,8 columns head(expphendb) expphendb <- expphendb[order(expphendb$site,expphendb$block,expphendb$plot,expphendb$year,expphendb$doy,expphendb$genus),] write.csv(expphendb,"analyses/exppheno.csv",row.names=F, eol="\r\n") sort(unique(expphendb$site))#14 experiments across 9 sites sort(unique(expphendb$genus))#161 genera expphendb$genus.species<-paste(expphendb$genus,expphendb$species,sep=".") sort(unique(expphendb$genus.species))#268 species unique(expphendb$event)#13 phenological events #Do species cleaning with Miriam's new file #sites<-unique(expphendb$site) #for(i in 1:length(sites)){ #for (j in 1:length(species1)){ # clarkduke1$genus[clarkduke1$Species==species1[j]] <- specieslist[specieslist$shortCode==species1[j],]$genus #clarkduke1$species[clarkduke1$Species==species1[j]] <- specieslist[specieslist$shortCode==species1[j],]$species #} #} specieslist<-sort(unique(paste(expphendb$genus,expphendb$species, sep="."))) write.csv(specieslist,"exp_splist.csv")
/Analyses/radcliffe_merge_exp.R
no_license
AileneKane/radcliffe
R
false
false
68,618
r
### Started 8 December 2015 ## ### By Ailene Ettinger ### setwd("~/Documents/GitHub/radcliffe") rm(list=ls()) options(stringsAsFactors=FALSE) ##load packages library(reshape) library(tidyr) # make list to store all the derived dataset cleaning functions clean.raw <- list() clean.raw$marchin <- function(filename="Budburst_Marchin.csv", path="./Data/Experiments/marchin") { ## Marchin ## ## Data type: BBD,FFD ## ## Notes: Contact: Renee Marchin, renee.marchin@sydney.edu.au## file <- file.path(path, filename) marchin1 <- read.csv(file, check.names=FALSE, header=TRUE) names(marchin1)[2] <- "genusspecies" names(marchin1)[1] <- "year" names(marchin1)[3] <- "plot" names(marchin1)[8] <- "doy" marchin1a<- subset(marchin1, select=c("year","genusspecies","plot", "doy")) marchin1a$site <- "exp10" marchin1a$event <- "bbd" marchin2<-read.csv("Data/Experiments/marchin/Flower_Marchin.csv", header=T) names(marchin2)[2] <- "genusspecies" names(marchin2)[1] <- "year" names(marchin2)[3] <- "plot" names(marchin2)[7] <- "doy" marchin2a<- subset(marchin2, select=c("year","genusspecies","plot", "doy")) marchin2a$site <- "exp10" marchin2a$event <- "ffd" marchin3<-rbind(marchin1a, marchin2a) marchin3$genus<-NA marchin3$species<-NA marchin3$genus[marchin3$genusspecies=="ACRU"] <- "Acer" marchin3$species[marchin3$genusspecies=="ACRU"] <- "rubrum" marchin3$genus[marchin3$genusspecies=="CATO"] <- "Carya" marchin3$species[marchin3$genusspecies=="CATO"] <- "tomentosa" marchin3$genus[marchin3$genusspecies=="QUAL"] <- "Quercus" marchin3$species[marchin3$genusspecies=="QUAL"] <- "alba" marchin3$genus[marchin3$genusspecies=="VAPA"] <- "Vaccinium" marchin3$species[marchin3$genusspecies=="VAPA"] <- "pallidum" marchin3$genus[marchin3$genusspecies=="VAST"] <- "Vaccinium" marchin3$species[marchin3$genusspecies=="VAST"] <- "stamineum" marchin3$genus[marchin3$genusspecies=="QURU"] <- "Quercus" marchin3$species[marchin3$genusspecies=="QURU"] <- "rubra" marchin3$genus[marchin3$genusspecies=="CHMA"] <- "Chimaphila" marchin3$species[marchin3$genusspecies=="CHMA"] <- "maculata" marchin3$genus[marchin3$genusspecies=="HEAR"] <- "Hexastylis" marchin3$species[marchin3$genusspecies=="HEAR"] <- "arifolia" marchin3$genus[marchin3$genusspecies=="HIVE"] <- "Hieracium" marchin3$species[marchin3$genusspecies=="HIVE"] <- "venosum" marchin3$genus[marchin3$genusspecies=="THTH"] <- "Thalictrum" marchin3$species[marchin3$genusspecies=="THTH"] <- "thalictroides" marchin3$genus[marchin3$genusspecies=="TIDI"] <- "Tipularia" marchin3$species[marchin3$genusspecies=="TIDI"] <- "discolor" marchin3$block<-NA marchin<-subset(marchin3, select=c("site","block","plot","event","year","genus","species", "doy")) #marchin$variety <- NA #marchin$cult <- NA return(marchin) } clean.raw$bace <- function(filename="BACE_deciduous2010_originaltrees.csv", path="./Data/Experiments/bace") { ##BACE ## ## Data type: BBD,LOD,LUD ## ## Notes: Jeff Dukes = contact, years 2010,2011,2013 ##Decided to follow NPN's definitios: >3 of observations of each event needed to count file <- file.path(path, filename) bace1 <- read.csv(file, check.names=FALSE, header=TRUE,na.strings = ".") bace1<-bace1[-1,] names(bace1)[5] <- "genusspecies" names(bace1)[1] <- "plot" names(bace1)[7] <- "doy_bb" names(bace1)[9] <- "doy_lunf" names(bace1)[10] <- "doy_lo" bace1a<- subset(bace1, select=c("genusspecies","plot", "doy_bb")) bace1a$event <- "bbd" bace1a$year <- 2010 bace1a$site <- "exp01" names(bace1a)[3]<-"doy" bace1b<- subset(bace1, select=c("genusspecies","plot", "doy_lo")) bace1b$event <- "lod" bace1b$year <- 2010 bace1b$site <- "exp01" names(bace1b)[3]<-"doy" bace1c<- subset(bace1, select=c("genusspecies","plot", "doy_lunf")) bace1c$event <- "lud" bace1c$year <- 2010 bace1c$site <- "exp01" names(bace1c)[3]<-"doy" file2 <- file.path(path, "BACE_pinustrobus2010_originaltrees.csv") bace2 <- read.csv(file2, check.names=FALSE, header=TRUE,na.strings = ".") names(bace2)[5] <- "genusspecies" names(bace2)[1] <- "plot" names(bace2)[6] <- "doy_bbd" names(bace2)[8] <- "doy_fgn" names(bace2)[10] <- "doy_fnb" names(bace2)[12] <- "doy_fyn" bace2a<-subset(bace2, select=c("genusspecies","plot", "doy_bbd")) bace2a$event <- "bbd" bace2a$year <- 2010 bace2a$site <- "exp01" names(bace2a)[3]<-"doy" bace2b<- subset(bace2, select=c("genusspecies","plot", "doy_fgn")) bace2b$event <- "fgn" bace2b$year <- 2010 bace2b$site <- "exp01" names(bace2b)[3]<-"doy" bace2c<- subset(bace2, select=c("genusspecies","plot", "doy_fnb")) bace2c$event <- "fnb" bace2c$year <- 2010 bace2c$site <- "exp01" names(bace2c)[3]<-"doy" bace2c<- subset(bace2, select=c("genusspecies","plot", "doy_fnb")) bace2c$event <- "fnb" bace2c$year <- 2010 bace2c$site <- "exp01" names(bace2c)[3]<-"doy" bace2d<- subset(bace2, select=c("genusspecies","plot", "doy_fyn")) bace2d$event <- "fyn" bace2d$year <- 2010 bace2d$site <- "exp01" names(bace2d)[3]<-"doy" #2011 file3 <- file.path(path, "2011BACEherbaceousphenologydata11_11CEG.csv") bace3 <- read.csv(file3, skip=1, header=TRUE) bace3<-bace3[1:min(which(bace3$Plot=="")),] names(bace3)[2] <- "genusspecies" names(bace3)[1] <- "plot" bace3$doy_ffd<-bace3[,6] bace3[which(bace3$doy_ffd=="" & bace3$open.flowers>0),]$doy_ffd<-bace3[which(bace3$doy_ffd=="" & bace3$open.flowers>0),]$open.flowers bace3a<- subset(bace3, select=c("genusspecies","plot", "doy_ffd")) bace3a$event <- "ffd" bace3a$year <- 2011 bace3a$site <- "exp01" names(bace3a)[3]<-"doy" #2012 file4 <- file.path(path, "2012_BACESpringTreePhenology_04_04_2013_CEG_EW.csv") bace4 <- read.csv(file4, skip=1, header=TRUE,na.strings=".") bace4<-bace4[1:468,]#remove blank rows at bottom names(bace4)[5] <- "genusspecies" names(bace4)[1] <- "plot" names(bace4)[6] <- "doy_bbd"#use first bbd (not >3) names(bace4)[8] <- "doy_lud"#leaf unfolding date names(bace4)[10] <- "doy_lod"#leaf out date bace4a<- subset(bace4, select=c("genusspecies","plot", "doy_bbd")) bace4a$event <- "bbd" bace4a$year <- 2012 bace4a$site <- "exp01" names(bace4a)[3]<-"doy" bace4b<- subset(bace4, select=c("genusspecies","plot", "doy_lud")) bace4b$event <- "lud" bace4b$year <- 2012 bace4b$site <- "exp01" names(bace4b)[3]<-"doy" bace4c<- subset(bace4, select=c("genusspecies","plot", "doy_lod")) bace4c$event <- "lod" bace4c$year <- 2012 bace4c$site <- "exp01" names(bace4c)[3]<-"doy" file5 <- file.path(path, "2012_BACESpringTreePhenology_04_04_2013_CEG_NS.csv") bace5 <- read.csv(file5, skip=1, header=TRUE,na.strings=".") bace5<-bace5[1:624,]#remove blank rows ar bottom, if there are any names(bace5)[5] <- "genusspecies" names(bace5)[1] <- "plot" names(bace5)[6] <- "doy_bbd"#use first bbd (not >3) names(bace5)[8] <- "doy_lud"#leaf unfolding date names(bace5)[10] <- "doy_lod"#leaf out date bace5a<- subset(bace5, select=c("genusspecies","plot", "doy_bbd")) bace5a$event <- "bbd" bace5a$year <- 2012 bace5a$site <- "exp01" names(bace5a)[3]<-"doy" bace5b<- subset(bace5, select=c("genusspecies","plot", "doy_lud")) bace5b$event <- "lud" bace5b$year <- 2012 bace5b$site <- "exp01" names(bace5b)[3]<-"doy" bace5c<- subset(bace5, select=c("genusspecies","plot", "doy_lod")) bace5c$event <- "lod" bace5c$year <- 2012 bace5c$site <- "exp01" names(bace5c)[3]<-"doy" #file6 <- file.path(path, "2012_BACESpringTreePhenology_04_04_2013_CEG_Outer.csv")#leave these out since they were just outside the main treatment area, were caged, and did not get all treatments #bace6 <- read.csv(file6, skip=1, header=TRUE,na.strings=".") #bace6<-bace6[1:576,]#remove blank rows at bottom #names(bace6)[4] <- "genusspecies" #names(bace6)[1] <- "plot" #names(bace6)[5] <- "doy_bbd"#use first bbd (not >3) #names(bace6)[7] <- "doy_lud"#leaf unfolding date #names(bace6)[9] <- "doy_lod"#leaf out date #bace6a<- subset(bace6, select=c("genusspecies","plot", "doy_bbd")) #bace6a$event <- "bbd" #bace6a$year <- 2012 #bace6a$site <- "exp01" #names(bace6a)[3]<-"doy" #bace6b<- subset(bace6, select=c("genusspecies","plot", "doy_lud")) #bace6b$event <- "lud" #bace6b$year <- 2012 #bace6b$site <- "exp01" #names(bace6b)[3]<-"doy" #bace6c<- subset(bace6, select=c("genusspecies","plot", "doy_lod")) #bace6c$event <- "lod" #bace6c$year <- 2012 #bace6c$site <- "exp01" #names(bace6c)[3]<-"doy" file7 <- file.path(path, "2012_BACESpringTreePhenology_04_04_2013_CEG_pinus.csv") bace7 <- read.csv(file7, skip=1, header=TRUE,na.strings=".") bace7<-bace7[1:156,]#remove blank rows at bottom names(bace7)[5] <- "genusspecies" names(bace7)[1] <- "plot" names(bace7)[6] <- "doy_bbd"#first bud bolted names(bace7)[8] <- "doy_fgn"#first green needles names(bace7)[10] <- "doy_fnb"#first needle bundles names(bace7)[12] <- "doy_fyn"#first young needles bace7a<- subset(bace7, select=c("genusspecies","plot", "doy_bbd")) bace7a$event <- "bbd" bace7a$year <- 2012 bace7a$site <- "exp01" names(bace7a)[3]<-"doy" bace7b<- subset(bace7, select=c("genusspecies","plot", "doy_fgn")) bace7b$event <- "fgn" bace7b$year <- 2012 bace7b$site <- "exp01" names(bace7b)[3]<-"doy" bace7c<- subset(bace7, select=c("genusspecies","plot", "doy_fnb")) bace7c$event <- "fnb" bace7c$year <- 2012 bace7c$site <- "exp01" names(bace7c)[3]<-"doy" bace7d<- subset(bace7, select=c("genusspecies","plot", "doy_fyn")) bace7d$event <- "fyn" bace7d$year <- 2012 bace7d$site <- "exp01" names(bace7d)[3]<-"doy" #2009 file8 <- file.path(path, "2009BACEdeciduoustreespringphenology.csv") bace8 <- read.csv(file8, header=TRUE) names(bace8)[9] <- "genusspecies" names(bace8)[4] <- "plot" names(bace8)[17] <- "doy_bbd"#use first bbd (not >3) names(bace8)[19] <- "doy_lud"#leaf unfolding date names(bace8)[21] <- "doy_lod"#leaf out date bace8a<- subset(bace8, select=c("genusspecies","plot", "doy_bbd")) bace8a$event <- "bbd" bace8a$year <- 2012 bace8a$site <- "exp01" names(bace8a)[3]<-"doy" bace8b<- subset(bace8, select=c("genusspecies","plot", "doy_lud")) bace8b$event <- "lud" bace8b$year <- 2012 bace8b$site <- "exp01" names(bace8b)[3]<-"doy" bace8c<- subset(bace8, select=c("genusspecies","plot", "doy_lod")) bace8c$event <- "lod" bace8c$year <- 2012 bace8c$site <- "exp01" names(bace8c)[3]<-"doy" #2013 file9 <- file.path(path, "2013BACEherbaceousphenologydatasheet.csv") bace9 <- read.csv(file9, skip=1, header=TRUE) bace9<-bace9[1:min(which(bace9$Plot=="")),] names(bace9)[2] <- "genusspecies" names(bace9)[1] <- "plot" bace9$doy_ffd<-bace9[,6] bace9[which(bace9$doy_ffd=="" & bace9$open.flowers>0),]$doy_ffd<-bace9[which(bace9$doy_ffd=="" & bace9$open.flowers>0),]$open.flowers bace9a<- subset(bace9, select=c("genusspecies","plot", "doy_ffd")) bace9a$event <- "ffd" bace9a$year <- 2013 bace9a$site <- "exp01" names(bace9a)[3]<-"doy" #put them all together baceall<-rbind(bace1a,bace1b,bace1c,bace2a,bace2b,bace2c,bace2d,bace3a,bace4a,bace4b,bace4c,bace5a,bace5b,bace5c,bace7a,bace7b,bace7c,bace7d,bace8a,bace8b,bace8c,bace9a) baceall<-baceall[-which(baceall$genusspecies==""),] baceall<-baceall[-which(baceall$genusspecies=="Genus sp."),] baceall<-baceall[-which(baceall$genusspecies=="moss"),] baceall<-baceall[-which(baceall$genusspecies=="Oregano"),] baceall[baceall$genusspecies=="Giant fox tail",]$genusspecies<-"Setaria faberi" baceall[baceall$genusspecies=="Setarir glauca",]$genusspecies<-"Setaria glauca" baceall[baceall$genusspecies=="Setaria viridens",]$genusspecies<-"Setaria viridis" baceall[baceall$genusspecies=="conyza canadensis",]$genusspecies<-"Conyza canadensis" baceall[baceall$genusspecies=="linnaria vulgaris"|baceall$genusspecies=="Linneria vulgaris",]$genusspecies<-"Linaria vulgaris" baceall[baceall$genusspecies=="A. rubrum "|baceall$genusspecies=="A. rubrum "|baceall$genusspecies=="A. rubrum"|baceall$genusspecies=="A. rubrum (main stem)",]$genusspecies <- "Acer rubrum" baceall[baceall$genusspecies=="B. lenta "|baceall$genusspecies=="B. lenta "|baceall$genusspecies=="B. lenta"|baceall$genusspecies=="B. lenta (main stem)",]$genusspecies <- "Betula lenta" baceall[baceall$genusspecies=="Q. rubra "|baceall$genusspecies=="Q. rubra"|baceall$genusspecies=="Q. rubra ",]$genusspecies <- "Quercus rubra" baceall[baceall$genusspecies=="P. strobus"|baceall$genusspecies=="P. strobus ",]$genusspecies <- "Pinus strobus" baceall[baceall$genusspecies=="B. popul" ,]$genusspecies<- "Betula populifolia" baceall[baceall$genusspecies=="U. americana" ,]$genusspecies<- "Ulmus americana" baceall[baceall$genusspecies=="P. grand" ,]$genusspecies<- "Populus grandidentata" baceall[baceall$genusspecies=="P. serotina" ,]$genusspecies<- "Prunus serotina" baceall[baceall$genusspecies=="Polygonum" ,]$genusspecies<- "Polygonum sp." baceall[baceall$genusspecies=="Asclepias syriaca ",]$genusspecies<-"Asclepias syriaca" baceall[baceall$genusspecies=="Capsella bursa-pastoris ",]$genusspecies<-"Capsella bursa-pastoris" baceall[baceall$genusspecies=="Cerastium fontanum ",]$genusspecies<-"Cerastium fontanum" baceall[baceall$genusspecies=="Dactylis glomerata ",]$genusspecies<-"Dactylis glomerata" baceall[baceall$genusspecies=="Draba verna ",]$genusspecies<-"Draba verna" baceall[baceall$genusspecies=="Elymus repens ",]$genusspecies<-"Elymus repens" baceall[baceall$genusspecies=="Erigeron annuus ",]$genusspecies<-"Erigeron annuus" baceall[baceall$genusspecies=="Festuca spp. ",]$genusspecies<-"Festuca sp." baceall[baceall$genusspecies=="lepidium virginicum",]$genusspecies<-"Lepidium virginicum" baceall[baceall$genusspecies=="Lamium amplexicaula",]$genusspecies<-"Lamium amplexicaule" baceall[baceall$genusspecies=="Oxalis stricta ",]$genusspecies<-"Oxalis stricta" baceall[baceall$genusspecies=="Rumex crispus ",]$genusspecies<-"Rumex crispus" baceall[baceall$genusspecies=="Silene alba ",]$genusspecies<-"Silene alba" baceall[baceall$genusspecies=="Tanacetum vulgare ",]$genusspecies<-"Tanacetum vulgare" baceall[baceall$genusspecies=="Taraxacum officinale ",]$genusspecies<-"Taraxacum officinale" baceall[baceall$genusspecies=="Trifolium pratense ",]$genusspecies<-"Trifolium pratense" baceall[baceall$genusspecies=="Trifolium repens ",]$genusspecies<-"Trifolium repens" baceall[baceall$genusspecies=="Veronica arvensis ",]$genusspecies<-"Veronica arvensis" baceall[baceall$genusspecies=="Phleum pratense " ,]$genusspecies<-"Phleum pratense" baceall[baceall$genusspecies=="Plantago lanceolata ",]$genusspecies<-"Plantago lanceolata" baceall[baceall$genusspecies=="Potentilla argentea ",]$genusspecies<-"Potentilla argentea" baceALL<-baceall %>% separate(genusspecies, c("genus", "species"), sep=" ", remove=F) baceALL$block<-NA #baceALL[baceALL$plot=="C1"|baceALL$plot=="C2"|baceALL$plot=="C3",]$block<-NA baceALL[which(as.numeric(baceALL$plot)<13),]$block<-1 baceALL[which(as.numeric(baceALL$plot)<25 & as.numeric(baceALL$plot)>12),]$block<-2 baceALL[which(as.numeric(baceALL$plot)<37 & as.numeric(baceALL$plot)>24),]$block<-3 baceALL[which(as.numeric(baceALL$plot)>36),]$block<-0# for some reason there are a few plots less than 36 that show up as block= 0. fix this bace<-subset(baceALL, select=c("site","block","plot","event","year","genus","species", "doy")) #bace$variety <- NA #bace$cult <- NA bace<-bace[!is.na(bace$doy),] bace<-bace[-which(bace$doy==""),] bace<-bace[-which(substr(bace$doy,1,1)=="<"),] bace<-bace[-which(bace$plot=="C1"|bace$plot=="C2"|bace$plot=="C3"|bace$plot=="40"|bace$plot=="41"|bace$plot=="42"),]#outside treatment area return(bace) } ##Farnsworth from Harvard ## ## Data type: BBD,LOD,LUD,FFD ## ## Notes: Contact: Public data, http://harvardforest.fas.harvard.edu:8080/exist/apps/datasets/showData.html?id=hf033 ## ##Question: hf033-01-diameter-1.csv files says plot 17= intact control (Treat=1) but soil temp file says plot17=disturbance control (=d) clean.raw$farnsworth <- function(filename="hf033-01-diameter-1.csv", path="./Data/Experiments/farnsworth/",names.only=FALSE) { file <- file.path(path, filename) farnsworth1 <- read.csv(file, check.names=FALSE, header=TRUE) #phenological stage 1.5=budburst; need to get day of year for which this occurred colnames(farnsworth1)[3]<-"plot" farnsworth1$bb_doy<-NA for(i in 1:dim(farnsworth1)[1]){ inddat<-farnsworth1[i,20:31] names(inddat)[1:12]<-c("1993-04-16","1993-04-23","1993-05-2","1993-05-17","1993-05-24" ,"1993-06-07","1993-07-09","1993-07-23","1993-08-11","1993-09-09","1993-09-25","1993-10-23") bbdate<-names(inddat)[min(which(inddat==1.5))]#1.5="leaves just emerging" bbdoy<-strftime(bbdate, format = "%j") farnsworth1$bb_doy[i]<-bbdoy } #now phenological stage 2=leaves expanding; need to get day of year for which this occurred farnsworth1$leafunf_doy<-NA for(i in 1:dim(farnsworth1)[1]){ inddat<-farnsworth1[i,20:31] names(inddat)[1:12]<-c("1993-04-16","1993-04-23","1993-05-2","1993-05-17","1993-05-24" ,"1993-06-07","1993-07-09","1993-07-23","1993-08-11","1993-09-09","1993-09-25","1993-10-23") ludate<-names(inddat)[min(which(inddat==2))] ludoy<-strftime(ludate, format = "%j") farnsworth1$leafunf_doy[i]<-ludoy } #now phenological stage 3=leaves fully expanded=leafout; need to get day of year for which this occurred farnsworth1$leafout_doy<-NA for(i in 1:dim(farnsworth1)[1]){ inddat<-farnsworth1[i,20:31] names(inddat)[1:12]<-c("1993-04-16","1993-04-23","1993-05-2","1993-05-17","1993-05-24" ,"1993-06-07","1993-07-09","1993-07-23","1993-08-11","1993-09-09","1993-09-25","1993-10-23") lodate<-names(inddat)[min(which(inddat==3))] lodoy<-strftime(lodate, format = "%j") farnsworth1$leafout_doy[i]<-lodoy } #now flowering phenological stage (4.5)=mature leaves and flowers present;first flowering=day of year for which this first occurred farnsworth1$ffd<-NA for(i in 1:dim(farnsworth1)[1]){ inddat<-farnsworth1[i,20:31] names(inddat)[1:12]<-c("1993-04-16","1993-04-23","1993-05-2","1993-05-17","1993-05-24" ,"1993-06-07","1993-07-09","1993-07-23","1993-08-11","1993-09-09","1993-09-25","1993-10-23") ffdate<-names(inddat)[min(which(inddat==4.5))] ffdoy<-strftime(ffdate, format = "%j") farnsworth1$ffd[i]<-ffdoy } #now fruiting phenological stage (5)=mature leaves and fruits present=first fruiting is first date this was observed farnsworth1$ffrd<-NA for(i in 1:dim(farnsworth1)[1]){ inddat<-farnsworth1[i,20:31] names(inddat)[1:12]<-c("1993-04-16","1993-04-23","1993-05-2","1993-05-17","1993-05-24" ,"1993-06-07","1993-07-09","1993-07-23","1993-08-11","1993-09-09","1993-09-25","1993-10-23") ffrdate<-names(inddat)[min(which(inddat==5))] ffrdoy<-strftime(ffrdate, format = "%j") farnsworth1$ffrd[i]<-ffrdoy } #now leaf coloration=leaves turned color" first date this was observed farnsworth1$col<-NA for(i in 1:dim(farnsworth1)[1]){ inddat<-farnsworth1[i,20:31] names(inddat)[1:12]<-c("1993-04-16","1993-04-23","1993-05-2","1993-05-17","1993-05-24" ,"1993-06-07","1993-07-09","1993-07-23","1993-08-11","1993-09-09","1993-09-25","1993-10-23") coldate<-names(inddat)[min(which(inddat==7))] coldoy<-strftime(coldate, format = "%j") farnsworth1$col[i]<-coldoy } #now leaf drop="some or all leaves abscised" (8,9)-first date this was observed farnsworth1$drop<-NA for(i in 1:dim(farnsworth1)[1]){ inddat<-farnsworth1[i,20:31] names(inddat)[1:12]<-c("1993-04-16","1993-04-23","1993-05-2","1993-05-17","1993-05-24" ,"1993-06-07","1993-07-09","1993-07-23","1993-08-11","1993-09-09","1993-09-25","1993-10-23") dropdate<-names(inddat)[min(which(inddat>7))] dropdoy<-strftime(dropdate, format = "%j") farnsworth1$drop[i]<-dropdoy } farnsworth1$genus<-NA farnsworth1$species1<-NA farnsworth1$genus[farnsworth1$species=="aaga"] <- "Amelanchier" farnsworth1$species1[farnsworth1$species=="aaga"] <- "grandifolia" farnsworth1$genus[farnsworth1$species=="beech"] <- "Fagus" farnsworth1$species1[farnsworth1$species=="beech"] <- "grandifolia" farnsworth1$genus[farnsworth1$species=="bbhg"] <- "Vaccinium" farnsworth1$species1[farnsworth1$species=="bbhg"] <- "corymbosum" farnsworth1$genus[farnsworth1$species=="bbhch"] <- "Vaccinium" farnsworth1$species1[farnsworth1$species=="bbhch"] <- "vacillans" farnsworth1$genus[farnsworth1$species=="blach"] <- "Prunus" farnsworth1$species1[farnsworth1$species=="blach"] <- "serotina" farnsworth1$genus[farnsworth1$species=="crata"] <- "Acer" farnsworth1$species1[farnsworth1$species=="crata"] <- "rubrum" farnsworth1$genus[farnsworth1$species=="ro"] <- "Quercus" farnsworth1$species1[farnsworth1$species=="ro"] <- "rubra" farnsworth1$genus[farnsworth1$species=="sa"] <- "Sorbus" farnsworth1$species1[farnsworth1$species=="sa"] <- "americana" farnsworth1$genus[farnsworth1$species=="wo"] <- "Quercus" farnsworth1$species1[farnsworth1$species=="wo"] <- "alba" farnsworth1$genus[farnsworth1$species=="viac1"] <- "Viburnum" farnsworth1$species1[farnsworth1$species=="viac1"] <- "acerifolium" farnsworth1$genus[farnsworth1$species=="sm"] <- "Acer" farnsworth1$species1[farnsworth1$species=="sm"] <- "pensylvanicum" farnsworth1$genus[farnsworth1$species=="chest"] <- "Castanea" farnsworth1$species1[farnsworth1$species=="chest"] <- "dentata" farnsworth1$genus[farnsworth1$species=="vest"] <- "Viburnum" farnsworth1$species1[farnsworth1$species=="vest"] <- "lentago" farnsworth1$genus[farnsworth1$species=="rm"] <- "Acer" farnsworth1$species1[farnsworth1$species=="rm"] <- "rubrum" farnsworth1$site<-"exp08" #farnsworth1$variety <- NA #farnsworth1$cult <- NA farnsworth1$event <- NA farnsworth1$year <- 1993 #pull out all budburst rows farnsworth2<-farnsworth1[which(farnsworth1$bb_doy>0),] farnsworth2a<-subset(farnsworth2, select=c("site","plot","event","year","genus","species1","bb_doy")) colnames(farnsworth2a)[6]<-"species" colnames(farnsworth2a)[7]<-"doy" farnsworth2a$event <- "bbd" #pull out all leafunf rows farnsworth3<-farnsworth1[which(farnsworth1$leafunf_doy>0),] farnsworth3a<-subset(farnsworth3, select=c("site","plot","event","year","genus","species1","leafunf_doy")) colnames(farnsworth3a)[6]<-"species" colnames(farnsworth3a)[7]<-"doy" farnsworth3a$event <- "lud" #pull out all leafout rows farnsworth4<-farnsworth1[which(farnsworth1$leafout_doy>0),] farnsworth4a<-subset(farnsworth4, select=c("site","plot","event","year","genus","species1","leafout_doy")) colnames(farnsworth4a)[6]<-"species" colnames(farnsworth4a)[7]<-"doy" farnsworth4a$event <- "lod" #pull out all flowering rows farnsworth5<-farnsworth1[which(farnsworth1$ffd>0),] farnsworth5a<-subset(farnsworth5, select=c("site","plot","event","year","genus","species1","ffd")) colnames(farnsworth5a)[6]<-"species" colnames(farnsworth5a)[7]<-"doy" farnsworth5a$event <- "ffd" #pull out all fruiting rows farnsworth6<-farnsworth1[which(farnsworth1$ffrd>0),] farnsworth6a<-subset(farnsworth6, select=c("site","plot","event","year","genus","species1","ffrd")) colnames(farnsworth6a)[6]<-"species" colnames(farnsworth6a)[7]<-"doy" farnsworth6a$event <- "ffrd" #pull out all coloration rows farnsworth7<-farnsworth1[which(farnsworth1$col>0),] farnsworth7a<-subset(farnsworth7, select=c("site","plot","event","year","genus","species1","col")) colnames(farnsworth7a)[6]<-"species" colnames(farnsworth7a)[7]<-"doy" farnsworth7a$event <- "col" #pull out all drop rows farnsworth8<-farnsworth1[which(farnsworth1$drop>0),] farnsworth8a<-subset(farnsworth7, select=c("site","plot","event","year","genus","species1","drop")) colnames(farnsworth8a)[6]<-"species" colnames(farnsworth8a)[7]<-"doy" farnsworth8a$event <- "drop" alldat<- rbind(farnsworth2a,farnsworth3a,farnsworth3a,farnsworth4a,farnsworth5a,farnsworth6a,farnsworth7a,farnsworth8a) alldat$block<-NA alldat[alldat$plot<4,]$block<-1 alldat[alldat$plot==4|alldat$plot==5|alldat$plot==6,]$block=2 alldat[alldat$plot==7|alldat$plot==8|alldat$plot==9,]$block=3 alldat[alldat$plot==10|alldat$plot==11|alldat$plot==12,]$block=4 alldat[alldat$plot==13|alldat$plot==14|alldat$plot==15,]$block=5 alldat[alldat$plot==16|alldat$plot==17|alldat$plot==18,]$block=6 farnsworth<-subset(alldat, select=c("site","block","plot","event","year","genus","species","doy")) return(farnsworth) } ###Cleland et al Jasper Ridge data ###FFD clean.raw$cleland <- function(filename="JasperRidge_data.csv", path="./Data/Experiments/cleland") { file <- file.path(path, filename) cleland1 <- read.csv(file, check.names=FALSE, header=TRUE) cleland1<-cleland1[cleland1$CO2==1,]#remove plots with CO2 added cleland1<-cleland1[cleland1$Nutrient==1,]#remove plots with N added colnames(cleland1)[8]<-"genus" cleland1$species<-NA cleland1$species[cleland1$genus=="Crepis"] <- "vessicaria" cleland1$species[cleland1$genus=="Erodium"] <- "brachycarpum" cleland1$species[cleland1$genus=="Geranium"] <- "dissectum" cleland1$species[cleland1$genus=="Lolium"] <- "multiflorum" cleland1$species[cleland1$genus=="Vicia"] <- "sativa" cleland1$species[cleland1$genus=="Vulpia"] <- "myuros" cleland1$species[cleland1$genus=="Bromusd"] <- "diandrus" cleland1$species[cleland1$genus=="Bromush"] <- "hordeaceus" cleland1$genus[cleland1$genus=="Bromusd"] <- "Bromus" cleland1$genus[cleland1$genus=="Bromush"] <- "Bromus" colnames(cleland1)[10]<-"doy" cleland1$site<-"exp05" cleland1$event<-"ffd" cleland1$block<-NA cleland1[cleland1$plot==1|cleland1$plot==2|cleland1$plot==3|cleland1$plot==33|cleland1$plot==4,]$block<-1 cleland1[cleland1$plot==12|cleland1$plot==7|cleland1$plot==8|cleland1$plot==9,]$block<-2 cleland1[cleland1$plot==10|cleland1$plot==11|cleland1$plot==5|cleland1$plot==6,]$block<-3 cleland1[cleland1$plot==13|cleland1$plot==14|cleland1$plot==26|cleland1$plot==32|cleland1$plot==34,]$block<-4 cleland1[cleland1$plot==15|cleland1$plot==16|cleland1$plot==17|cleland1$plot==18,]$block<-5 cleland1[cleland1$plot==19|cleland1$plot==20|cleland1$plot==21|cleland1$plot==31|cleland1$plot==35,]$block<-6 cleland1[cleland1$plot==27|cleland1$plot==28|cleland1$plot==29|cleland1$plot==30,]$block<-7 cleland1[cleland1$plot==22|cleland1$plot==23|cleland1$plot==24|cleland1$plot==25|cleland1$plot==36,]$block<-8 colnames(cleland1)[5]<-"plot2" cleland1$plot<-paste(cleland1$plot2,cleland1$quad,sep="-") cleland<-subset(cleland1, select=c("site","block","plot","event","year","genus","species", "doy")) #cleland$variety <- NA #cleland$cult <- NA cleland<-cleland[!is.na(cleland$doy),] return(cleland) } ##Clark et al from Duke ## ## Data type: BBD,LUD, LOD ## ## Notes: Contact: Public data ## clean.raw$clarkduke <- function(filename, path="./Data/Experiments/clark/") { clarkdukeplots<-c("DF_G01_A.csv","DF_G02_5.csv","DF_G03_3.csv","DF_G04_A.csv","DF_G05_3.csv","DF_G06_5.csv","DF_G07_A.csv","DF_G08_5.csv","DF_G09_3.csv","DF_G10_C.csv","DF_G11_C.csv","DF_G12_C.csv","DF_S01_5.csv","DF_S02_3.csv","DF_S03_A.csv","DF_S04_A.csv","DF_S05_3.csv","DF_S06_5.csv","DF_S07_5.csv","DF_S08_A.csv","DF_S09_3.csv","DF_S10_C.csv","DF_S11_C.csv","DF_S12_C.csv") clarkduke <- NA spfile <- file.path(path, "speciesList_clark.csv") specieslist<-read.csv(spfile, header=TRUE) for (i in 1:length(clarkdukeplots)){ file <- file.path(path, paste(clarkdukeplots[i])) clarkduke1 <- read.csv(file, check.names=FALSE, header=TRUE) clarkduke1$genus<-NA clarkduke1$species<-NA species1<-unique(clarkduke1$Species) for (j in 1:length(species1)){ clarkduke1$genus[clarkduke1$Species==species1[j]] <- specieslist[specieslist$shortCode==species1[j],]$genus clarkduke1$species[clarkduke1$Species==species1[j]] <- specieslist[specieslist$shortCode==species1[j],]$species } clarkduke1$site<-"exp03" clarkduke1$plot<-substr(clarkduke1$Chamber,1,3) #estimate first date of budburst, leaf unfolding, and leaf out get.bbd <- function(x) names(x)[min(which(x==3), na.rm=T)]#budburst get.lud <- function(x) names(x)[min(which(x==4), na.rm=T)]#leaves unfolding get.lod <- function(x) names(x)[min(which(x==6), na.rm=T)]#leafout bbd_2010<-substr(apply(clarkduke1[,17:30],1,get.bbd),6,13) bbd_2011<-substr(apply(clarkduke1[,31:55],1,get.bbd),6,13) bbd_2012<-substr(apply(clarkduke1[,56:81],1,get.bbd),6,13) bbd_2013<-substr(apply(clarkduke1[,82:101],1,get.bbd),6,13) lud_2010<-substr(apply(clarkduke1[,17:30],1,get.lud),6,13) lud_2011<-substr(apply(clarkduke1[,31:55],1,get.lud),6,13) lud_2012<-substr(apply(clarkduke1[,56:81],1,get.lud),6,13) lud_2013<-substr(apply(clarkduke1[,82:101],1,get.lud),6,13) lod_2010<-substr(apply(clarkduke1[,17:30],1,get.lod),6,13) lod_2011<-substr(apply(clarkduke1[,31:55],1,get.lod),6,13) lod_2012<-substr(apply(clarkduke1[,56:81],1,get.lod),6,13) lod_2013<-substr(apply(clarkduke1[,82:101],1,get.lod),6,13) bbd2010_doy<-strftime(strptime(bbd_2010, format = "%m.%d.%y"),format = "%j") bbd2011_doy<-strftime(strptime(bbd_2011, format = "%m.%d.%y"),format = "%j") bbd2012_doy<-strftime(strptime(bbd_2012, format = "%m.%d.%y"),format = "%j") bbd2013_doy<-strftime(strptime(bbd_2013, format = "%m.%d.%y"),format = "%j") lud2010_doy<-strftime(strptime(lud_2010, format = "%m.%d.%y"),format = "%j") lud2011_doy<-strftime(strptime(lud_2011, format = "%m.%d.%y"),format = "%j") lud2012_doy<-strftime(strptime(lud_2012, format = "%m.%d.%y"),format = "%j") lud2013_doy<-strftime(strptime(lud_2013, format = "%m.%d.%y"),format = "%j") lod2010_doy<-strftime(strptime(lod_2010, format = "%m.%d.%y"),format = "%j") lod2011_doy<-strftime(strptime(lod_2011, format = "%m.%d.%y"),format = "%j") lod2012_doy<-strftime(strptime(lod_2012, format = "%m.%d.%y"),format = "%j") lod2013_doy<-strftime(strptime(lod_2013, format = "%m.%d.%y"),format = "%j") clarkduke2<-cbind(clarkduke1,bbd2010_doy,bbd2011_doy,bbd2012_doy,bbd2013_doy,lud2010_doy,lud2011_doy,lud2012_doy,lud2013_doy,lod2010_doy,lod2011_doy,lod2012_doy,lod2013_doy) clarkduke2a<-subset(clarkduke2, select=c("site","plot","genus","species","bbd2010_doy","bbd2011_doy","bbd2012_doy","bbd2013_doy","lud2010_doy","lud2011_doy","lud2012_doy","lud2013_doy","lod2010_doy","lod2011_doy","lod2012_doy","lod2013_doy")) clarkduke3<-reshape(clarkduke2a,varying = list(names(clarkduke2a)[5:8], names(clarkduke2a)[9:12],names(clarkduke2a)[13:16]), direction = "long", v.names = c("BBD","LUD", "LOD"), times = c(2010:2013)) clarkduke3<-clarkduke3[,-9] colnames(clarkduke3)[5]<-"year" clarkduke4<-reshape(clarkduke3,varying = list(names(clarkduke3)[6:8]), direction = "long", v.names = c("doy"), times = c(1:3)) clarkduke4$event<-c(rep("bbd", times=dim(clarkduke3)[1]),rep("lud", times=dim(clarkduke3)[1]),rep("lod", times=dim(clarkduke3)[1])) #clarkduke4$variety <- NA #clarkduke4$cult <- NA clarkduke4$block<-NA clarkduke5<-subset(clarkduke4, select=c("site","block","plot","event","year","genus","species","doy")) clarkduke<-rbind(clarkduke,clarkduke5) } clarkduke<-clarkduke[!is.na(clarkduke$doy),] clarkduke<-clarkduke[-which(clarkduke$genus=="Ob"),]#unknown genus at clarkduke clarkduke[which(clarkduke$genus=="Carya "),]$genus<-"Carya" return(clarkduke) } ##Clark et al from Harvard ## ## Data type: BBD,LUD,LOD ## ## Notes: Contact: Public data ## clean.raw$clarkharvard <- function(filename, path="./Data/Experiments/clark") { clarkharvardplots<-c("HF_G01_3.csv","HF_G02_A.csv","HF_G03_5.csv","HF_G04_A.csv","HF_G05_5.csv","HF_G06_3.csv","HF_G07_A.csv","HF_G08_3.csv","HF_G09_5.csv","HF_G10_C.csv","HF_G11_C.csv","HF_G12_C.csv","HF_S01_5.csv","HF_S02_A.csv","HF_S03_3.csv","HF_S04_5.csv","HF_S05_A.csv","HF_S06_3.csv","HF_S07_A.csv","HF_S08_3.csv","HF_S09_5.csv","HF_S10_C.csv","HF_S11_C.csv","HF_S12_C.csv") clarkharvard <- c() spfile <- file.path(path, "speciesList_clark.csv") specieslist<-read.csv(spfile, header=TRUE) for (i in 1:length(clarkharvardplots)){ file <- file.path(path, paste(clarkharvardplots[i])) clarkharvard1 <- read.csv(file, check.names=FALSE, header=TRUE) clarkharvard1$genus<-NA clarkharvard1$species<-NA species1<-unique(clarkharvard1$Species) for (j in 1:length(species1)){ clarkharvard1$genus[clarkharvard1$Species==species1[j]] <- specieslist[specieslist$shortCode==species1[j],]$genus clarkharvard1$species[clarkharvard1$Species==species1[j]] <- specieslist[specieslist$shortCode==species1[j],]$species } clarkharvard1$site<-"exp04" #estimate first date of budburst, leaf unfolding, and leaf out get.bbd <- function(x) names(x)[min(which(x==3), na.rm=T)]#budburst get.lud <- function(x) names(x)[min(which(x==4), na.rm=T)]#leaves unfolding get.lod <- function(x) names(x)[min(which(x==6), na.rm=T)]#leafout bbd_2010<-substr(apply(clarkharvard1[,19:26],1,get.bbd),6,13) bbd_2011<-substr(apply(clarkharvard1[,27:31],1,get.bbd),6,13) bbd_2012<-substr(apply(clarkharvard1[,32:44],1,get.bbd),6,13) lud_2010<-substr(apply(clarkharvard1[,19:26],1,get.lud),6,13) lud_2011<-substr(apply(clarkharvard1[,27:31],1,get.lud),6,13) lud_2012<-substr(apply(clarkharvard1[,32:44],1,get.lud),6,13) lod_2010<-substr(apply(clarkharvard1[,19:26],1,get.lod),6,13) lod_2011<-substr(apply(clarkharvard1[,27:31],1,get.lod),6,13) lod_2012<-substr(apply(clarkharvard1[,32:44],1,get.lod),6,13) bbd2010_doy<-strftime(strptime(bbd_2010, format = "%m.%d.%y"),format = "%j") bbd2011_doy<-strftime(strptime(bbd_2011, format = "%m.%d.%y"),format = "%j") bbd2012_doy<-strftime(strptime(bbd_2012, format = "%m.%d.%y"),format = "%j") lud2010_doy<-strftime(strptime(lud_2010, format = "%m.%d.%y"),format = "%j") lud2011_doy<-strftime(strptime(lud_2011, format = "%m.%d.%y"),format = "%j") lud2012_doy<-strftime(strptime(lud_2012, format = "%m.%d.%y"),format = "%j") lod2010_doy<-strftime(strptime(lod_2010, format = "%m.%d.%y"),format = "%j") lod2011_doy<-strftime(strptime(lod_2011, format = "%m.%d.%y"),format = "%j") lod2012_doy<-strftime(strptime(lod_2012, format = "%m.%d.%y"),format = "%j") clarkharvard2<-cbind(clarkharvard1,bbd2010_doy,bbd2011_doy,bbd2012_doy,lud2010_doy,lud2011_doy,lud2012_doy,lod2010_doy,lod2011_doy,lod2012_doy) clarkharvard2$plot<-substr(clarkharvard1$Chamber,1,3) clarkharvard2a<-subset(clarkharvard2, select=c("site","plot","genus","species","bbd2010_doy","bbd2011_doy","bbd2012_doy","lud2010_doy","lud2011_doy","lud2012_doy","lod2010_doy","lod2011_doy","lod2012_doy")) clarkharvard3<-reshape(clarkharvard2a,varying = list(names(clarkharvard2a)[5:7], names(clarkharvard2a)[8:10],names(clarkharvard2a)[11:13]), direction = "long", v.names = c("BBD","LUD", "LOD"), times = c(2010:2012)) clarkharvard3<-clarkharvard3[,-9] colnames(clarkharvard3)[5]<-"year" clarkharvard4<-reshape(clarkharvard3,varying = list(names(clarkharvard3)[6:8]), direction = "long", v.names = c("doy"), times = c(1:3)) clarkharvard4$event<-c(rep("bbd", times=dim(clarkharvard3)[1]),rep("lud", times=dim(clarkharvard3)[1]),rep("lod", times=dim(clarkharvard3)[1])) #clarkharvard4$variety <- NA #clarkharvard4$cult <- NA clarkharvard4$block<-NA clarkharvard5<-subset(clarkharvard4, select=c("site","block","plot","event","year","genus","species","doy")) clarkharvard<-rbind(clarkharvard,clarkharvard5) } clarkharvard<-clarkharvard[!is.na(clarkharvard$doy),] return(clarkharvard) } ##Sherry from Oklahoma## ## Data type: FFD, FFRD ## ## Notes: Rebecca Sherry #Phenological stages for Forbs: F0=vegetative plants; F1, unopened buds; F2, open flowers; F3, old flowers (postanthesis); F4, initiated fruit; F5,expanding fruit; and F6, dehisced fruit. #Phenological stages For grasses: G0, plants with flower stalks (in boot); G1, spikelets present (out of boot); G2,exerted anthers or styles; G3, past the presence of anthers and styles (seed development); and G4, disarticulating florets. #For forb species with very small flowers and fruits that were difficult to observe, stage 3 (initiated fruit) and stage 4 (expanding fruit) were lumped into a category of ‘‘fruit present,’’ (i.e., a score of F4.5) clean.raw$sherry <- function(filename, path) { sherryspp<-c("SherryPhenology2003_Achillea.csv","SherryPhenology2003_Ambrosia.csv","SherryPhenology2003_Andropogon.csv","SherryPhenology2003_Erigeron.csv","SherryPhenology2003_Panicum.csv","SherryPhenology2003_Schizachyrium.csv") sherry <- NA gen<-c("Achillea","Ambrosia","Andropogon","Erigeron","Panicum","Schizachyrium") sp<-c("millefolium","psilostachya","gerardii","strigosus","virgatum","scoparium") for (i in 1:length(sherryspp)){ file <- file.path(path, paste(sherryspp[i])) sherry1 <- read.csv(file, skip=3, header=TRUE) colnames(sherry1)[which(colnames(sherry1)=="Plot")]<-"plot" #estimate first date of flowering and fruiting firstsurv<-min(which(substr(colnames(sherry1),1,1)=="X")) lastsurv<-dim(sherry1)[2] get.ffd <- function(x) names(x)[min(which(x <= 3.5 & x >= 2.5), na.rm=T)]#first flower date get.ffrd <- function(x) names(x)[min(which(x <= 5.5 & x >= 3.5), na.rm=T)]#leaves unfolding ffd_doy<-substr(apply(sherry1[,firstsurv:lastsurv],1,get.ffd),2,4) ffrd_doy<-substr(apply(sherry1[,firstsurv:lastsurv],1,get.ffrd),2,4) sherry2<-cbind(sherry1,ffd_doy,ffrd_doy) sherry2$genus<- paste(gen[i]) sherry2$species<-paste(sp[i]) sherry3<-subset(sherry2, select=c("plot","genus","species", "ffd_doy","ffrd_doy")) sherry<-rbind(sherry,sherry3) } sherry<-sherry[-1,] sherry4<-reshape(sherry,varying = list(names(sherry)[4:5]), direction = "long", v.names = c("doy"), times = c(1:2)) sherry4$event<-c(rep("ffd", times=dim(sherry)[1]),rep("ffrd", times=dim(sherry)[1])) #add in other file,which has different species that are little different than others in formatting file2 <- file.path(path, "SherryPhenology2003_First6spp.csv") sherry5 <- read.csv(file2, skip=3, header=TRUE) verarv<-sherry5[1:40,] viobic<-sherry5[44:82,] colnames(viobic)<-c(sherry5[43,]) cerglo<-sherry5[86:119,] colnames(cerglo)<-c(sherry5[85,]) plavir<-sherry5[123:158,] colnames(plavir)<-c(sherry5[122,]) broarv<-sherry5[162:201,] colnames(broarv)<-c(sherry5[161,]) dicoli<-sherry5[205:235,] colnames(dicoli)<-c(sherry5[204,]) get.ffd <- function(x) names(x)[min(which(x <= 3.5 & x >= 2.5), na.rm=T)]#first flower date get.ffrd <- function(x) names(x)[min(which(x <= 5.5 & x >= 3.5), na.rm=T)]#leaves unfolding dicoli_ffd_doy<-substr(apply(dicoli[,5:14],1,get.ffd),2,4) dicoli_ffrd_doy<-substr(apply(dicoli[,5:14],1,get.ffrd),2,4) dicoli2<-cbind(dicoli,dicoli_ffd_doy,dicoli_ffrd_doy) dicoli2$genus<-"Dichanthelium" dicoli2$species<-"oligosanthes" colnames(dicoli2)[1]<-"plot" colnames(dicoli2)[16]<-"ffd_doy" colnames(dicoli2)[17]<-"ffrd_doy" dicoli3<-subset(dicoli2, select=c("plot","genus","species", "ffd_doy","ffrd_doy")) broarv_ffd_doy<-substr(apply(broarv[,5:14],1,get.ffd),1,4) broarv_ffrd_doy<-substr(apply(broarv[,5:14],1,get.ffrd),1,4) broarv_ffd_doy[which(substr(broarv_ffd_doy,1,1)=="d")]<-substr(broarv_ffd_doy[which(substr(broarv_ffd_doy,1,1)=="d")],2,4) broarv_ffrd_doy[which(substr(broarv_ffrd_doy,1,1)=="d")]<-substr(broarv_ffrd_doy[which(substr(broarv_ffrd_doy,1,1)=="d")],2,4) broarv2<-cbind(broarv,broarv_ffd_doy,broarv_ffrd_doy) broarv2$genus<-"Bromus" broarv2$species<-"arvensis" colnames(broarv2)[1]<-"plot" colnames(broarv2)[16]<-"ffd_doy" colnames(broarv2)[17]<-"ffrd_doy" broarv3<-subset(broarv2, select=c("plot","genus","species", "ffd_doy","ffrd_doy")) cerglo_ffd_doy<-substr(apply(cerglo[,5:10],1,get.ffd),2,7) cerglo_ffrd_doy<-substr(apply(cerglo[,5:10],1,get.ffrd),2,7) cerglo_ffd_doy[which(substr(cerglo_ffd_doy,1,2)=="ay")]<-substr(cerglo_ffd_doy[which(substr(cerglo_ffd_doy,1,2)=="ay")],3,5) cerglo_ffrd_doy[which(substr(cerglo_ffrd_doy,1,2)=="ay")]<-substr(cerglo_ffrd_doy[which(substr(cerglo_ffrd_doy,1,2)=="ay")],3,5) cerglo2<-cbind(cerglo,cerglo_ffd_doy,cerglo_ffrd_doy) cerglo2$genus<-"Cerastium" cerglo2$species<-"glomeratum" colnames(cerglo2)[1]<-"plot" colnames(cerglo2)[16]<-"ffd_doy" colnames(cerglo2)[17]<-"ffrd_doy" cerglo3<-subset(cerglo2, select=c("plot","genus","species", "ffd_doy","ffrd_doy")) plavir_ffd_doy<-substr(apply(plavir[,5:14],1,get.ffd),2,4) plavir_ffrd_doy<-substr(apply(plavir[,5:14],1,get.ffrd),2,4) plavir2<-cbind(plavir,plavir_ffd_doy,plavir_ffrd_doy) plavir2$genus<-"Plantago" plavir2$species<-"virginica" colnames(plavir2)[1]<-"plot" colnames(plavir2)[16]<-"ffd_doy" colnames(plavir2)[17]<-"ffrd_doy" plavir3<-subset(plavir2, select=c("plot","genus","species", "ffd_doy","ffrd_doy")) viobic_ffd_doy<-substr(apply(viobic[,5:14],1,get.ffd),2,4) viobic_ffrd_doy<-substr(apply(viobic[,5:14],1,get.ffrd),2,4) viobic2<-cbind(viobic,viobic_ffd_doy,viobic_ffrd_doy) viobic2$genus<-"Viola" viobic2$species<-"bicolor" colnames(viobic2)[1]<-"plot" colnames(viobic2)[16]<-"ffd_doy" colnames(viobic2)[17]<-"ffrd_doy" viobic3<-subset(viobic2, select=c("plot","genus","species", "ffd_doy","ffrd_doy")) verarv_ffd_doy<-substr(apply(verarv[,5:14],1,get.ffd),4,6) verarv_ffrd_doy<-substr(apply(verarv[,5:14],1,get.ffrd),4,6) verarv2<-cbind(verarv,verarv_ffd_doy,verarv_ffrd_doy) verarv2$genus<-"Veronica" verarv2$species<-"arvensis" colnames(verarv2)[1]<-"plot" colnames(verarv2)[16]<-"ffd_doy" colnames(verarv2)[17]<-"ffrd_doy" verarv3<-subset(verarv2, select=c("plot","genus","species", "ffd_doy","ffrd_doy")) sherry6<-rbind(broarv3,dicoli3,verarv3,viobic3,plavir3,cerglo3) sherry7<-reshape(sherry6,varying = list(names(sherry6)[4:5]), direction = "long", v.names = c("doy"), times = c(1:2)) sherry7$event<-c(rep("ffd", times=dim(sherry6)[1]),rep("ffrd", times=dim(sherry6)[1])) sherry8<-rbind(sherry4,sherry7) sherry8$year<-2003 sherry8$site<-"exp12" sherry8$block<-NA sherry8[as.numeric(sherry8$plot)<5,]$block<-1 sherry8[as.numeric(sherry8$plot)<11 & as.numeric(sherry8$plot)>6,]$block<-2 sherry8[as.numeric(sherry8$plot)<15 & as.numeric(sherry8$plot)>10,]$block<-3 sherry8[as.numeric(sherry8$plot)==5|as.numeric(sherry8$plot)==6|as.numeric(sherry8$plot)==15|as.numeric(sherry8$plot)==16,]$block<-4 sherry8[as.numeric(sherry8$plot)>16,]$block<-5 sherryok<-subset(sherry8, select=c("site","block","plot","event","year","genus","species", "doy")) #sherryok$variety <- NA #sherryok$cult <- NA sherryok<-sherryok[!is.na(sherryok$doy),] return(sherryok) } ##Price & Wasser data RMBL ## Data type: FFd, FFRD, SD ## Notes: mary.price@ucr.edu #From Mary: "We chose up to 5 flowering individuals that spanned the elevational range of flowering individuals of that species on the small moraine. The "DIST" column indicates the meters downslope from the upper edge of the plot. #There is some inconsistency in the ordering of columns. Sometimes the "species" column domes before the "individual ID" column, and sometimes not -- you'll have to look for that if you concatenate files. #The "comments" column after each census date column sometimes includes estimates of fruiting success, measured as #fruits/#flowers on each plant, along with notes on whether the plant got frosted, parasitized, replaced, and the like. I can help you translate the notes if you have problems and need to know what the notes mean. The plant species vary considerably, as you know, in what fruit set means. Our notes are incomplete (at least, I haven't found the relevant info in spot-checks of notes--since we didn't include fruit set info in the analyses for our published paper, I'd have to search...), but I suspect that for Asteraceae we used the head as the "flower" unit, and probably we used the umbel as the unit for Polygonaceae. #In all cases, 0 = not flowering; 1 = bud present; 2 = open flower present; 3 = senescent flower present (corolla still attached); 4 = initiated fruit (corolla off); 5 = expanding fruit; 6 = dehisced fruit. #Of course, these stages mean different things for each species. #Species names have changed in some cases, or if one uses different authorities. For example, Potentilla gracilis is now P. pulcherrima, and I think Bill Weber is the only one who advocates the genus "Seriphidium" for shrubby Artemisia. So you may want to double-check names. #The number of files should correspond more-or-less with the number of years reported in the 1998 paper, with the addition of a few files from 1990 censuses (which can probably be left out of analyses since we were working the bugs out of our methods in that year and this was not a warming year). clean.raw$price <- function(filename, path) { pricefiles<-c("ATPHEN90g.csv","ATPHEN91g.csv","ATPHEN92g.csv","ATPHEN94g.csv","CLPHEN91g.csv","CLPHEN92g.csv","CLPHEN93g.csv","CLPHEN94g.csv","CRPHEN92g.csv","CRPHEN93g.csv","CRPHEN94g.csv","DNPHEN90g.csv","DNPHEN91g.csv","DNPHEN92g.csv","DNPHEN93g.csv","DNPHEN94g.csv","EGPHEN90g.csv","EGPHEN91g.csv","EGPHEN92g.csv","EGPHEN93g.csv","EGPHEN94g.csv","ESPHEN90g.csv","ESPHEN91g.csv","ESPHEN92g.csv","ESPHEN93g.csv","ESPHEN94g.csv","IAPHEN90g.csv","IAPHEN91g.csv","IAPHEN92g.csv","IAPHEN93g.csv","IAPHEN94g.csv","LLPHEN90G.csv","LLPHEN91G.csv","LLPHEN92G.csv","LLPHEN93G.csv","LLPHEN94G.csv","MFPHEN90G.csv","MFPHEN91g.csv","MFPHEN92g.csv","MFPHEN93g.csv","MFPHEN94g.csv","PGPHEN91g.csv","PGPHEN92g.csv","PGPHEN93g.csv","PGPHEN94g.csv") skiplines<-c(3,3,2,2,rep(3,times=41)) price <- NA for (i in 1:length(pricefiles)){ file <- file.path(path, paste(pricefiles[i])) price1 <- read.csv(file, skip=skiplines[i], header=TRUE) colnames(price1)[which(colnames(price1)=="PLOT"|colnames(price1)=="plot")]<-"plot" colnames(price1)[which(colnames(price1)=="SPE")]<-"SP" price1<-price1[!is.na(price1$SP),] price1<-price1[!is.na(price1$plot),] price1<-price1[!(price1$plot==""),] price1<-price1[!(price1$plot=="JDATE"),] price1<-price1[!(price1$plot=="PLOT"),] price1<-price1[!(price1$plot=="jdate"),] price1<-price1[!(price1$plot=="1992 WARMED-MEADOW PHENOLOGY"),] #estimate first date of open flowers, fruits, and seeds dispersing firstsurv<-min(which(substr(colnames(price1),1,1)=="X")) lastsurv<-max(which(substr(colnames(price1),1,1)=="X")) get.ffd <- function(x) names(x)[which(x==2|x==12|x==123|x==1234|x==12345|x==123456)][1]#first flower date get.ffrd <- function(x) names(x)[which(x==4|x==34|x==234|x==1234|x==12345|x==123456)][1]#first fruit date get.sd <- function(x) names(x)[which(x==6|x==56|x==456|x==3456|x==2345|x==123456)][1]#seed dispersal/fruit dehiscing date ffd<-substr(apply(price1[,firstsurv:lastsurv],1,get.ffd),2,9) ffrd<-substr(apply(price1[,firstsurv:lastsurv],1,get.ffrd),2,9) sd<-substr(apply(price1[,firstsurv:lastsurv],1,get.sd),2,9) price2<-cbind(price1,ffd,ffrd,sd) price2$filename<-paste(pricefiles[i]) price2$year<-paste("19",substr(price2$filename,7,8),sep="") price3<-subset(price2, select=c("plot","SP", "ffd","ffrd","sd","filename","year")) price<-rbind(price,price3) } price<-price[-1,] price4<-reshape(price,varying = list(names(price)[3:5]), direction = "long", v.names = c("date"), times = c(names(price)[3:5])) colnames(price4)[5]<-"event" price4$site<-"exp11" price4$date[which(is.na(price4$date))]<-"NA.NA.NA" price4$doy<-strftime(strptime(price4$date, format = "%m.%d.%y"),format = "%j") price4$genus<-NA price4$species<-NA price4$genus[price4$SP=="AT"] <- "Artemesia" price4$species[price4$SP=="AT"] <- "tridentata" price4$genus[price4$SP=="CL"] <- "Claytonia" price4$species[price4$SP=="CL"] <- "lanceolata" price4$genus[price4$SP=="CR"] <- "Campanula" price4$species[price4$SP=="CR"] <- "rotundifolia" price4$genus[price4$SP=="DN"] <- "Delphinium" price4$species[price4$SP=="DN"] <- "nelsonii" price4$genus[price4$SP=="EG"] <- "Erythronium" price4$species[price4$SP=="EG"] <- "grandiflorum" price4$genus[price4$SP=="ES"] <- "Eriogonum" price4$species[price4$SP=="ES"] <- "subalpinum" price4$genus[price4$SP=="es"] <- "Eriogonum" price4$species[price4$SP=="es"] <- "subalpinum" price4$genus[price4$SP=="IA"] <- "Ipomopsis" price4$species[price4$SP=="IA"] <- "aggregata" price4$genus[price4$SP=="ia"] <- "Ipomopsis" price4$species[price4$SP=="ia"] <- "aggregata" price4$genus[price4$SP=="LL"] <- "Lathyrus" price4$species[price4$SP=="LL"] <- "leucanthus" price4$genus[price4$SP=="MF"] <- "Mertensia" price4$species[price4$SP=="MF"] <- "fusiformes" price4$genus[price4$SP=="PG"] <- "Potentilla" price4$species[price4$SP=="PG"] <- "gracilis" price4<-price4[!is.na(price4$doy),] price4$block<-NA price5<-subset(price4, select=c("site","block", "plot","event","year","genus","species", "doy")) price6<-price5[which(price5$year!="1990"),]#remove 1990, since this is prewarming data #price5$variety <- NA #price5$cult <- NA return(price6) } ##Data from Chuine ##no plots listed for 2003 clean.raw$chuine <- function(filename, path="./Data/Experiments/chuine") { chuinefiles<-c("Chuine_pheno_2002.csv","Chuine_pheno_2003_cleaned.csv","Chuine_pheno_2004.csv") years<-c(2002,2003,2004) chuine <- NA for (i in 1:length(chuinefiles)){ file <- file.path(path, paste(chuinefiles[i])) chuine1 <- read.csv(file, header=TRUE) colnames(chuine1)[which(colnames(chuine1)=="Block")]<-"block" chuine1$plot<-paste(chuine1$block,chuine1$Plot,sep="") colnames(chuine1)[which(colnames(chuine1)=="species"|colnames(chuine1)=="Species")]<-"sp" colnames(chuine1)[which(colnames(chuine1)=="X55")]<-"ffb" colnames(chuine1)[which(colnames(chuine1)=="X65")]<-"ffd" colnames(chuine1)[which(colnames(chuine1)=="X85")]<-"ffrd" colnames(chuine1)[which(colnames(chuine1)=="X91")]<-"91" #colnames(chuine1)[which(colnames(chuine1)=="X95")]<-"sen" chuine1<-chuine1[!is.na(chuine1$sp),] phen1<-which(colnames(chuine1)=="ffb") phen2<-which(colnames(chuine1)=="ffrd") chuine2<-reshape(chuine1,varying = list(names(chuine1)[phen1:phen2]), direction = "long", v.names = c("date"), times = c(names(chuine1)[phen1:phen2])) chuine2$year<-paste(years[i]) chuine2<-chuine2[!chuine2$date=="",] colnames(chuine2)[which(colnames(chuine2)=="time")]<-"event" if(years[i]==2002){chuine2$doy<-strftime(strptime(chuine2$date, format = "%d/%m/%y"),format = "%j")} if(years[i]==2003){chuine2$doy<-strftime(strptime(chuine2$date, format = "%d-%b"),format = "%j")} if(years[i]==2004){chuine2$doy<-strftime(strptime(chuine2$date, format = "%d-%b"),format = "%j")} chuine3<-subset(chuine2, select=c("block","plot","sp", "event","year","doy")) chuine<-rbind(chuine,chuine3) } chuine<-chuine[-1,] chuine$genus<-NA chuine$species<-NA chuine$genus[chuine$sp=="aa"] <- "Artemesia" chuine$species[chuine$sp=="aa"] <- "annua" chuine$genus[chuine$sp=="av"] <- "Artemesia" chuine$species[chuine$sp=="av"] <- "vulgaris" chuine$genus[chuine$sp=="ar"] <- "Amaranthus" chuine$species[chuine$sp=="ar"] <- "retroflexus" chuine$genus[chuine$sp=="ad"] <- "Amaranthus" chuine$species[chuine$sp=="ad"] <- "deflexus" chuine$genus[chuine$sp=="qr"] <- "Quercus" chuine$species[chuine$sp=="qr"] <- "robur" chuine$genus[chuine$sp=="qp"] <- "Quercus" chuine$species[chuine$sp=="qp"] <- "pubescens" chuine$genus[chuine$sp=="qi"] <- "Quercus" chuine$species[chuine$sp=="qi"] <- "ilex" chuine$genus[chuine$sp=="lr"] <- "Lolium" chuine$species[chuine$sp=="lr"] <- "rigidum" chuine$genus[chuine$sp=="lp"] <- "Lolium" chuine$species[chuine$sp=="lp"] <- "perenne" chuine$genus[chuine$sp=="sv"] <- "Setaria" chuine$species[chuine$sp=="sv"] <- "viridis" chuine$genus[chuine$sp=="sp"] <- "Setaria" chuine$species[chuine$sp=="sp"] <- "parviflora" chuine$genus[chuine$sp=="lp3"] <- "Lolium" chuine$species[chuine$sp=="lp3"] <- "perenne" chuine$site<-"exp02" chuine4<-subset(chuine, select=c("site","block","plot","event","year","genus","species", "doy")) #chuine4$variety <- NA #chuine4$cult <- NA chuine4<-chuine4[!is.na(chuine4$doy),] return(chuine4) ## } ##Data from FORCE ##Contact: Christy Rollinson clean.raw$force <- function(filename="FORCE_Inventories_2009_2010_clean.csv", path="./Data/Experiments/force") { file <- file.path(path, filename) force1 <- read.csv(file, check.names=FALSE, header=TRUE) force1$plot<-paste(force1$Block,force1$Treatment,sep="") force1$block<-force1$Block force2<-aggregate(x=force1$Survey.DOY, by=list(force1$Year,force1$block,force1$plot,force1$Species,force1$Phenology.State), FUN=min, na.rm=F) colnames(force2)<-c("year","block","plot","SP","phenstate","doy") force2$event<-NA force2[force2$phenstate=="1",]$event<-"lod" force2[force2$phenstate==2,]$event<-"ffd" force2[force2$phenstate==3,]$event<-"ffrd" force2[force2$phenstate==4,]$event<-"sd" force2[force2$phenstate==5,]$event<-"sen" force3<-force2[-which(is.na(force2$event)),] force3<-force3[-which(force3$SP=="11-Oct"),] force3<-force3[-which(force3$SP=="9-Oct"),] force3<-force3[-which(force3$SP=="CEOB"),] force3<-force3[-which(force3$SP=="U44"),] spfile <- file.path(path, "Species_List.csv") specieslist<-read.csv(spfile, header=TRUE) force3$genussp<-NA species1<-unique(force3$SP) species1[which(species1=="U80")]<-"ARMI" for (j in 1:length(species1)){ force3$genussp[force3$SP==species1[j]] <- specieslist[specieslist$Species.CODE==species1[j],]$Species[1] } force4<-force3 %>% separate(genussp, c("genus", "species"), sep=" ", remove=F) force4[which(force4$genus=="Sisynchium"),]$genus<-"Sisyrinchium" force4[which(force4$genus=="Dianthus"),]$species<-"armeria" force4[which(force4$genus=="Amphicarpa"),]$genus<-"Amphicarpaea" force4[which(force4$genus=="Actea"),]$genus<-"Actaea" force4[which(force4$species=="soria"),]$species<-"sororia" force4[which(force4$species=="lavae"),]$species<-"laeve" force4[which(force4$species=="abortivis"),]$species<-"abortivus" force4[which(force4$species=="gramanifolia"),]$species<-"graminifolia" force4[which(force4$genus=="Rubus"),]$species<-"occidentalis" force4[which(force4$species=="anuus"),]$species<-"annuus" force4[which(force4$genus=="Ceanothus"),]$species<-"americanus" force4[which(force4$species=="hieraciifolia"),]$species<-"hieraciifolius" #force4[which(force4$genus=="Oenethera"),]$genus<-"Oenothera" force4$site<-"exp09" force<-subset(force4, select=c("site","block","plot","event","year","genus","species", "doy")) return(force) ## } ##Data from Aaron Ellison's warming/phenology/ant experiment at Harvard Forest ##Spring and Fall phenology ##Contact: Aaron Ellison clean.raw$ellison <- function(filename="hf113-27-hf-phenology.csv", path="./Data/Experiments/ellison") { file <- file.path(path, filename) ellison1 <- read.csv(file, check.names=FALSE, header=TRUE) colnames(ellison1)[2]<-"plot" colnames(ellison1)[4]<-"genussp" ellison1$doy<-strftime(strptime(ellison1$date, format = "%m/%d/%y"),format = "%j") ellison1$year<-strftime(strptime(ellison1$date, format = "%m/%d/%y"),format = "%Y") ellison2<-aggregate(x=ellison1$doy, by=list(ellison1$year,ellison1$plot,ellison1$genussp,ellison1$plant,ellison1$phenology), FUN=min,na.rm=F) ellison3<-ellison2 %>% separate(Group.3, c("genus", "species"), sep="_", remove=F) colnames(ellison3)<-c("year","plot","gensp") colnames(ellison3)[4:8]<-c("genus","species","plant","phenology","doy") ellison3$event<-NA ellison3[ellison3$phenology=="F2",]$event<-"sen" ellison3[ellison3$phenology=="F3",]$event<-"drop" ellison3[ellison3$phenology=="S3",]$event<-"bbd" ellison3[ellison3$phenology=="S4",]$event<-"lud" ellison3[ellison3$phenology=="S5",]$event<-"lod" ellison3[which(ellison3$plot=="13"|ellison3$plot=="14"|ellison3$plot=="15"),]$plot<-"OUT" ellison3$site<-"exp07" ellison3$block<-NA ellison<-subset(ellison3, select=c("site","block","plot","event","year","genus","species", "doy")) return(ellison) } ##Data from Jennifer Dunne's study at RMBL ##Phenological stages:0=not yet flowering, 1=unopened flower buds, 2=open flowers,3 =old flowers,4=initiated fruit, 5=enlarged fruit, and 6= for dehisced fruit. #For Festuca we used five phenological stages: 0=plant with flower stalks,1=presence of spikelets, 2=exerted anthers and styles from the spikelet florets, 3=dried and broken-off anthers and styles, indicating a developing seed, and 4=disarticulated seeds. ##Contact: Jennifer Dunne clean.raw$dunne <- function(path="./Data/Experiments/dunne") { dunnefiles<-c("1995DunnePhenologyData_Artemisia.csv","1995DunnePhenologyData_Delphinium.csv","1995DunnePhenologyData_Erigeron.csv","1995DunnePhenologyData_Helianthella.csv","1995DunnePhenologyData_Lathyrus.csv","1995DunnePhenologyData_Mertensiana.csv","1995DunnePhenologyData_Potentilla.csv","1996DunnePhenologyData_Artemisia.csv","1996DunnePhenologyData_Delphinium.csv","1996DunnePhenologyData_Erigeron.csv","1996DunnePhenologyData_Eriogonums.csv","1996DunnePhenologyData_Festuca.csv","1996DunnePhenologyData_Helianthella.csv","1996DunnePhenologyData_Lathyrus.csv","1996DunnePhenologyData_Mertensiana.csv","1996DunnePhenologyData_Potentilla.csv","1997DunnePhenologyData_Achillea.csv","1997DunnePhenologyData_Artemisia.csv","1997DunnePhenologyData_Claytonia.csv","1997DunnePhenologyData_Delphinium.csv","1997DunnePhenologyData_Erigeron.csv","1997DunnePhenologyData_Eriogonumu.csv","1997DunnePhenologyData_Festuca.csv","1997DunnePhenologyData_Helianthella.csv","1997DunnePhenologyData_Lathyrus.csv","1997DunnePhenologyData_Mertensia.csv","1997DunnePhenologyData_Potentilla.csv","1998DunnePhenologyData_Artemisia.csv","1998DunnePhenologyData_Claytonia.csv","1998DunnePhenologyData_Delphinium.csv","1998DunnePhenologyData_Erigeron.csv","1998DunnePhenologyData_Eriogonumu.csv","1998DunnePhenologyData_Eriogonums.csv","1998DunnePhenologyData_Festuca.csv","1998DunnePhenologyData_Helianthella.csv","1998DunnePhenologyData_Lathyrus.csv","1998DunnePhenologyData_Mertensia.csv","1998DunnePhenologyData_Potentilla.csv") dunne <- NA for (i in 1:length(dunnefiles)){ file <- file.path(path, paste(dunnefiles[i])) dunne1 <- read.csv(file, header=TRUE) dunne_ffd<-aggregate(x=dunne1$date, by=list(dunne1$site,dunne1$plot,dunne1$rep,dunne1$stage2), FUN=min,na.rm=F)#first date of open flowers for each site/plot/rep dunne_ffd$event<-"ffd" if(is.element("stage4", colnames(dunne1)))(dunne_ffrd<-aggregate(x=dunne1$date, by=list(dunne1$site,dunne1$plot,dunne1$rep,dunne1$stage4), FUN=min,na.rm=F)) #first date of fruit for each site/plot/rep else(dunne2<-dunne_ffd) if(is.element("stage4", colnames(dunne1)))(dunne_ffrd$event<-"ffrd") if(is.element("stage4", colnames(dunne1)))(dunne2<-rbind(dunne_ffd, dunne_ffrd)) dunne2$plot<-dunne2$Group.2 stop<-nchar(dunnefiles[i])-4 dunne2$genussp<-paste(substr(dunnefiles[i],24,stop)) dunne2$year<-substr(dunnefiles[i],1,4) colnames(dunne2)[5]<-c("doy") dunne <- rbind(dunne,dunne2) } dunne$genus<-NA dunne$species<-NA dunne$genus[dunne$genussp=="Artemisia"] <- "Artemisia" dunne$species[dunne$genussp=="Artemisia"] <- "tridentata" dunne$genus[dunne$genussp=="Claytonia"] <- "Claytonia" dunne$species[dunne$genussp=="Claytonia"] <- "lanceolata" dunne$genus[dunne$genussp=="Delphinium"] <- "Delphinium" dunne$species[dunne$genussp=="Delphinium"] <- "nuttallianum" dunne$genus[dunne$genussp=="Erigeron"] <- "Erigeron" dunne$species[dunne$genussp=="Erigeron"] <- "speciosus" dunne$genus[dunne$genussp=="Helianthella"] <- "Helianthella" dunne$species[dunne$genussp=="Helianthella"] <- "quinquenervis" dunne$genus[dunne$genussp=="Lathyrus"] <- "Lathyrus" dunne$species[dunne$genussp=="Lathyrus"] <- "lanszwertii" dunne$genus[dunne$genussp=="Potentilla"] <- "Potentilla" dunne$species[dunne$genussp=="Potentilla"] <- "hippiana" dunne$genus[dunne$genussp=="Mertensiana"] <- "Mertensiana" dunne$species[dunne$genussp=="Mertensiana"] <- "fusiformis" dunne$genus[dunne$genussp=="Eriogonums"] <- "Eriogonum" dunne$species[dunne$genussp=="Eriogonums"] <- "subalpinum" dunne$genus[dunne$genussp=="Festuca"] <- "Festuca" dunne$species[dunne$genussp=="Festuca"] <- "thurberi" dunne$genus[dunne$genussp=="Achillea"] <- "Achillea" dunne$species[dunne$genussp=="Achillea"] <- "sp" dunne$genus[dunne$genussp=="Eriogonumu"] <- "Eriogonum" dunne$species[dunne$genussp=="Eriogonumu"] <- "umbellatum" dunne$site<-"exp06" #colnames(dunne)[1]<-"block"#this is the "site" column from the dunne files. i think we actually want to select out only plots frmo one site... dunne$block<-NA dunne<-dunne[-1,] dunne<-dunne[dunne$Group.1=="4",]#site 4= the warming meadow, so we only want these data dunnermbl<-subset(dunne, select=c("site","block","plot","event","year","genus","species", "doy")) dunnermbl<-dunnermbl[!is.na(dunnermbl$genus),] return(dunnermbl) } ##Data from Haibei Alpine Grassland Research Station, China ##Spring phenology ##Contact: sonamkyi@itpcas.ac.cn clean.raw$haibei <- function(filename="ww_data1.csv", path="./Data/Experiments/haibei") { file <- file.path(path, filename) haibei1 <- read.csv(file, check.names=FALSE, header=TRUE) haibei1<-haibei1[haibei1$Treatment!="WW",]#remove winter warming treatment colnames(haibei1)[1]<-"year" colnames(haibei1)[4]<-"block" colnames(haibei1)[5]<-"plot" flow<-subset(haibei1,select=c("block","plot","year","Species","FFD")) colnames(flow)[5]<-"doy" flow$event<-"ffd" leaf<-subset(haibei1,select=c("block","plot","year","Species","LOD")) colnames(leaf)[5]<-"doy" leaf$event<-"lod" haibei<-rbind(flow,leaf) haibei$genus[haibei$Species=="En"] <- "Elymus" haibei$species[haibei$Species=="En"] <- "nutans" haibei$genus[haibei$Species=="Sa"] <- "Stipa" haibei$species[haibei$Species=="Sa"] <- "alinea" haibei$genus[haibei$Species=="Pp"] <- "Poa" haibei$species[haibei$Species=="Pp"] <- "pratensis" haibei$genus[haibei$Species=="Kh"] <- "Kobresia" haibei$species[haibei$Species=="Kh"] <- "humilis" haibei$genus[haibei$Species=="Th"] <- "Tibetia" haibei$species[haibei$Species=="Th"] <- "himalaica" haibei$genus[haibei$Species=="Ma"] <- "Melilotoides" haibei$species[haibei$Species=="Ma"] <- "archiducis-nicolai" haibei$genus[haibei$Species=="Pn"] <- "Potentilla" haibei$species[haibei$Species=="Pn"] <- "nivea" haibei$genus[haibei$Species=="Gs"] <- "Gentiana" haibei$species[haibei$Species=="Gs"] <- "straminea" haibei$genus[haibei$Species=="Ss"] <- "Saussurea" haibei$species[haibei$Species=="Ss"] <- "superba" haibei$genus[haibei$Species=="Gl"] <- "Gentiana" haibei$species[haibei$Species=="Gl"] <- "lawrencei" haibei$genus[haibei$Species=="Ad"] <- "Aster" haibei$species[haibei$Species=="Ad"] <- "diplostephioides" haibei$site<-"exp13" haibei$doy<-round(haibei$doy,digits=0) haibei2<-subset(haibei, select=c("site","block","plot","event","year","genus","species", "doy")) return(haibei2) } ##Data from Cedar Creek ##Spring and Fall phenology ##Contact: danbaha@umn.edu clean.raw$cc <- function(filename="https___pasta.lternet.edu_package_data_eml_knb-lter-cdr_575_8_760e44559a2611967d61bb35f21d9260.csv", path="./Data/Experiments/cedarcreek") { file <- file.path(path, filename) phen <- read.csv(file, check.names=FALSE, header=TRUE) colnames(phen)[2]<-"year" colnames(phen)[4]<-"block" phen$Heat.treatment<-NA#make the plots to match those in clim data phen$Heat.treatment[phen$Treatment=="0-Ambient"]<-"control" phen$Heat.treatment[phen$Treatment=="1-Low"]<-"low" phen$Heat.treatment[phen$Treatment=="2-High"]<-"high" phen$plot<-paste(phen$block,phen$Heat.treatment,sep="-") flow<-subset(phen,select=c("block","plot","year","Species","FlwrDay")) colnames(flow)[5]<-"doy" flow$event<-"ffd" fruit<-subset(phen,select=c("block","plot","year","Species","SeedDay")) colnames(fruit)[5]<-"doy" fruit$event<-"ffrd" ccphen<-rbind(flow,fruit) ccphen$site<-"exp14" genus.species<-strsplit(ccphen$Species," ") genus.species<-do.call(rbind, genus.species) colnames(genus.species)<-c("genus","species") ccphen<-cbind(ccphen,genus.species) cedarcreek<-subset(ccphen, select=c("site","block","plot","event","year","genus","species", "doy")) return(cedarcreek) } # Produce cleaned raw data raw.data.dir <- "./Data/Experiments" cleandata.raw <- list() cleandata.raw$marchin <- clean.raw$marchin(path="./Data/Experiments/marchin") cleandata.raw$bace <- clean.raw$bace(path="./Data/Experiments/bace") cleandata.raw$farnsworth <- clean.raw$farnsworth(path="./Data/Experiments/farnsworth") cleandata.raw$cleland <- clean.raw$cleland(path="./Data/Experiments/cleland") cleandata.raw$clarkduke <- clean.raw$clarkduke(path="./Data/Experiments/clark") cleandata.raw$clarkharvard <- clean.raw$clarkharvard(path="./Data/Experiments/clark") cleandata.raw$sherry <- clean.raw$sherry(path="./Data/Experiments/sherry") cleandata.raw$price <- clean.raw$price(path="./Data/Experiments/price") cleandata.raw$chuine<- clean.raw$chuine(path="./Data/Experiments/chuine") cleandata.raw$force<- clean.raw$force(path="./Data/Experiments/force") cleandata.raw$ellison<- clean.raw$ellison(path="./Data/Experiments/ellison") cleandata.raw$dunne<- clean.raw$dunne(path="./Data/Experiments/dunne") cleandata.raw$haibei<- clean.raw$haibei(path="./Data/Experiments/haibei") cleandata.raw$cc<- clean.raw$cc(path="./Data/Experiments/cedarcreek") expphendb <- do.call("rbind", cleandata.raw) row.names(expphendb) <- NULL #Do some additional cleaning and checking: dim(expphendb) #76966 8 expphendb<-expphendb[!is.na(expphendb$event),] expphendb<-expphendb[!is.na(expphendb$doy),] expphendb$doy<-as.numeric(expphendb$doy) dim(expphendb)#75314 rows,8 columns expphendb<-expphendb[!is.na(expphendb$genus),] expphendb<-expphendb[!expphendb$genus=="",] expphendb<-expphendb[!expphendb$genus=="spp.",]#should look at these expphendb<-expphendb[-which(expphendb$genus=="Le"),]#should look at these expphendb<-expphendb[-which(expphendb$genus=="Unknown"),]#should look at these expphendb[which(expphendb$genus=="Artemesia"),]$genus<-"Artemisia"#chuine expphendb[which(expphendb$species=="spp"),]$species<-"sp"#force expphendb[which(expphendb$species=="spp."),]$species<-"sp"#force expphendb[which(expphendb$species=="sp."),]$species<-"sp"#force expphendb[which(expphendb$doy=="144153"),]$doy<-"153"#bace expphendb[which(expphendb$species=="officionale"),]$species<-"officinale"#force expphendb[which(expphendb$species=="(incanum?)"),]$species<-"incanum"#force expphendb[which(expphendb$species=="quiquefolia"),]$species<-"quinquefolia"#force expphendb[which(expphendb$species=="fusiformes"),]$species<-"fusiformis"#price expphendb[which(expphendb$genus=="Mertensiana"),]$genus<-"Mertensia"#price expphendb[which(expphendb$species=="caepitosum"),]$species<-"caespitosum"#force expphendb[which(expphendb$genus=="Avena"),]$species<-"sp"#JAsper ridge- could be multiple spp expphendb[which(expphendb$species==""),]$species<-"sp"#all galiums at force dim(expphendb)#72468 rows,8 columns head(expphendb) expphendb <- expphendb[order(expphendb$site,expphendb$block,expphendb$plot,expphendb$year,expphendb$doy,expphendb$genus),] write.csv(expphendb,"analyses/exppheno.csv",row.names=F, eol="\r\n") sort(unique(expphendb$site))#14 experiments across 9 sites sort(unique(expphendb$genus))#161 genera expphendb$genus.species<-paste(expphendb$genus,expphendb$species,sep=".") sort(unique(expphendb$genus.species))#268 species unique(expphendb$event)#13 phenological events #Do species cleaning with Miriam's new file #sites<-unique(expphendb$site) #for(i in 1:length(sites)){ #for (j in 1:length(species1)){ # clarkduke1$genus[clarkduke1$Species==species1[j]] <- specieslist[specieslist$shortCode==species1[j],]$genus #clarkduke1$species[clarkduke1$Species==species1[j]] <- specieslist[specieslist$shortCode==species1[j],]$species #} #} specieslist<-sort(unique(paste(expphendb$genus,expphendb$species, sep="."))) write.csv(specieslist,"exp_splist.csv")
#' @rdname plot.spv #' @method plot spvlistforlist #' @export plot.spvlistforlist <- function (x, which =c("fds", "vdgsim", "vdgquantile", "vdgboth", "boxplots"), np = 50, alpha = 7/sqrt(length(x[[1]][[1]]$spv)), points.colour = "#39BEB1", points.size = 2, tau = c(0.05, 0.95), radii = 21, hexbin = FALSE, bins = 80, df = 10, lines.size = 1, origin = rep(0, ncol(x[[1]][[1]]$sample)), method, arrange = FALSE, ...) { # Avoid global variable notes for R CMD check and ggplot2 Radius <- SPV <- Design <- Formula <- Fraction <- Location <- NULL # Handle which depending on whether it is numeric or character (gets transformed to numeric) pnms <- c("fds", "vdgsim", "vdgquantile", "vdgboth", "boxplots") show <- rep(FALSE, 5) if (is.character(which)) { which <- match.arg(which, several.ok = TRUE) which <- sort(match(which, pnms)) } if (!is.numeric(which)) stop("Argument 'which' is of incorrect type.") show[which] <- TRUE type <- x[[1]][[1]]$type if (x[[1]][[1]]$at && show[1L]){ show[1L] <- FALSE which <- which[!(which %in% 1L)] message("Plot 1 = 'fds' cannot be produced: 'at' is TRUE (inaccurate FDS plot)") } add.meanspv <- x[[1]][[1]]$type == "spherical" && !is.null(radii) if (is.null(tau) & !add.meanspv){ if (any(3L:4L %in% which)) message("Plots 3 = 'vdgquantile' and/or 4 = 'vdgboth' cannot be produced: 'tau' is NULL and mean SPV not requested/possible") show[3L:4L] <- FALSE which <- which[!(which %in% 3:4)] } if (!x[[1]][[1]]$at && show[5L]){ show[5L] <- FALSE which <- which[!(which %in% 5L)] message("Plot 5 = 'boxplots' cannot be produced: 'at' is FALSE") } pnms <- pnms[show] if (missing(method)) method <- switch(type, spherical = "Euclidean", cuboidal = "supremum", lhs = "supremum", mlhs = "supremum", slhs = "supremum", rslhs = "supremum") xvec <- proxy::dist(x[[1]][[1]]$sample, matrix(origin, nrow = 1, ncol = ncol(x[[1]][[1]]$sample)), method = method, ...) method <- attr(xvec, "method") xvec <- as.numeric(xvec) nfor <- length(x) ndes <- length(x[[1]]) nspv <- length(xvec) ntau <- length(tau) spvmat <- do.call(cbind, lapply(x, function(y) do.call(cbind, lapply(y, "[[", "spv")))) fornms <- names(x) desnms <- names(x[[1]]) names(spvmat) <- paste(rep(desnms, nfor), rep(fornms, each = ndes), sep = ".") if (add.meanspv){ if (length(radii) == 1) radii <- seq(from = 0, to = max(xvec), length.out = radii) mspv <- lapply(x, function(y) lapply(y, function(z) as.data.frame(meanspv(formula = z$formula, radii = radii, FtF.inv = z$FtF.inv, n = ifelse(z$unscaled, 1, z$ndes))))) tmp3 <- do.call(rbind, lapply(mspv, function(x) do.call(rbind, x))) tmp3$Formula <- rep(fornms, each = ndes * length(radii)) tmp3$Design <- rep(rep(desnms, each = length(radii)), nfor) tmp3$Location <- "Mean" } if (any(show[-1L])){ tmp1 <- data.frame(Radius = rep(xvec, ndes*nfor), SPV = as.numeric(spvmat), Design = factor(rep(rep(desnms, each = nspv), nfor)), Formula = factor(rep(fornms, each = ndes*nspv))) } if (show[1L]){ maxmin <- range(spvmat) pts <- 0:np/np quantmat <- apply(spvmat, 2, function(xx) quantile(xx, probs = pts, type = 1)) tmp2 <- data.frame(Fraction = rep(pts, ndes*nfor), SPV = as.numeric(quantmat), Formula = factor(rep(fornms, each = ndes*(np + 1))), Design = rep(rep(desnms, each = np + 1), nfor)) } if (show[3L] || show[4L]){ if (!is.null(tau)){ if (!exists("tmp1", inherits = FALSE)){ tmp1 <- data.frame(Radius = rep(xvec, ndes*nfor), SPV = as.numeric(spvmat), Design = factor(rep(rep(desnms, each = nspv), nfor)), Formula = factor(rep(fornms, each = nfor*nspv))) } pts <- seq(from = min(tmp1$Radius), to = max(tmp1$Radius), length = np) aggfun <- function(x){ fits <- lapply(tau, function(y) quantreg::rq(SPV ~ bs(Radius, df = df), tau = y, data = x)) sapply(fits, predict, newdata = data.frame(Radius = pts)) } preds <- sapply(split(tmp1, list(tmp1$Formula, tmp1$Design)), aggfun) newdf <- data.frame(Radius = rep(pts, ntau*ndes*nfor), SPV = as.numeric(preds), Location = rep(rep(paste("tau =", tau), each = np), nfor*ndes), Formula = factor(rep(rep(sort(fornms), each = ntau*np), ndes)), Design = factor(rep(sort(desnms), each = ntau*np*nfor))) if (exists("tmp3", inherits = FALSE)) tmp3 <- rbind(tmp3, newdf) else tmp3 <- newdf } } if (exists("tmp3", inherits = FALSE)) tmp3$Location <- as.factor(tmp3$Location) if (show[1L]){ plot1 <- ggplot(tmp2, aes(x = Fraction, y = SPV, colour = Design)) + ggtitle("Fraction of Design Space Plot") + xlab("Fraction of Design Space") + geom_line(size = lines.size) + theme(plot.title = element_text(vjust = 1)) + facet_wrap(~ Formula) } if (show[2L]) { plot2 <- ggplot(tmp1, aes(x = Radius, y = SPV, order = Formula)) + ggtitle("Variance Dispersion Graph") + geom_point(alpha = alpha, colour = points.colour, size = points.size) + theme(plot.title = element_text(vjust = 1)) + guides(colour = guide_legend(override.aes = list(alpha = 1))) + facet_wrap(~ Design + Formula) + xlab(paste0("Distance to Origin (", method,")")) if (hexbin){ plot2 <- plot2 + geom_hex(bins = bins) + scale_fill_gradientn(colours = rev(topo.colors(5)[-(4:5)]), name = "Frequency", na.value = NA) } } if (show[3L]) { plot3 <- ggplot(tmp3, aes(x = Radius, y = SPV, colour = Design)) + ggtitle("Variance Dispersion Graph") + theme(plot.title = element_text(vjust = 1)) + geom_line(aes(group = interaction(Location, Formula, Design), linetype = Location), size = lines.size) + theme(legend.text.align = 0.5) + xlab(paste0("Distance to Origin (", method,")")) + facet_wrap(~ Formula) } if (show[4L]) { plot4 <- ggplot(tmp1, aes(x = Radius, y = SPV, order = Formula)) + ggtitle("Variance Dispersion Graph") + geom_point(alpha = alpha, colour = points.colour, size = points.size) + theme(plot.title = element_text(vjust = 1)) + guides(colour = guide_legend(override.aes = list(alpha = 1))) + facet_wrap(~ Design + Formula) + xlab(paste0("Distance to Origin (", method,")")) if (hexbin){ plot4 <- plot4 + geom_hex(bins = bins) + scale_fill_gradientn(colours = rev(topo.colors(5)[-(4:5)]), name = "Frequency", na.value = NA) } plot4 <- plot4 + geom_line(data = tmp3, aes(group = interaction(Location, Design, Formula), linetype = Location, order = Design), size = lines.size, colour = 1) + theme(legend.text.align = 0.5) + xlab(paste0("Distance to Origin (", method,")")) + facet_wrap(~ Design + Formula) } if (show[5L]) { plot5 <- ggplot(tmp1, aes(x = Radius, y = SPV)) + ggtitle("Boxplots") + theme(plot.title = element_text(vjust = 1), legend.text.align = 0.5) + geom_boxplot(mapping = aes(group = Radius)) + xlab(paste0("Distance to Origin (", method,")")) + facet_wrap(~ Design + Formula) } if (length(which)) { out <- mget(paste0("plot", which)) names(out) <- pnms if (x[[1]][[1]]$unscaled) out <- lapply(out, '+', ylab("Unscaled Prediction Variance (UPV)")) else out <- lapply(out, '+', ylab("Scaled Prediction Variance (SPV)")) if (arrange) do.call(gridExtra::grid.arrange, out) else return(out) } }
/R/plot.spvlistforlist.R
no_license
cran/vdg
R
false
false
8,107
r
#' @rdname plot.spv #' @method plot spvlistforlist #' @export plot.spvlistforlist <- function (x, which =c("fds", "vdgsim", "vdgquantile", "vdgboth", "boxplots"), np = 50, alpha = 7/sqrt(length(x[[1]][[1]]$spv)), points.colour = "#39BEB1", points.size = 2, tau = c(0.05, 0.95), radii = 21, hexbin = FALSE, bins = 80, df = 10, lines.size = 1, origin = rep(0, ncol(x[[1]][[1]]$sample)), method, arrange = FALSE, ...) { # Avoid global variable notes for R CMD check and ggplot2 Radius <- SPV <- Design <- Formula <- Fraction <- Location <- NULL # Handle which depending on whether it is numeric or character (gets transformed to numeric) pnms <- c("fds", "vdgsim", "vdgquantile", "vdgboth", "boxplots") show <- rep(FALSE, 5) if (is.character(which)) { which <- match.arg(which, several.ok = TRUE) which <- sort(match(which, pnms)) } if (!is.numeric(which)) stop("Argument 'which' is of incorrect type.") show[which] <- TRUE type <- x[[1]][[1]]$type if (x[[1]][[1]]$at && show[1L]){ show[1L] <- FALSE which <- which[!(which %in% 1L)] message("Plot 1 = 'fds' cannot be produced: 'at' is TRUE (inaccurate FDS plot)") } add.meanspv <- x[[1]][[1]]$type == "spherical" && !is.null(radii) if (is.null(tau) & !add.meanspv){ if (any(3L:4L %in% which)) message("Plots 3 = 'vdgquantile' and/or 4 = 'vdgboth' cannot be produced: 'tau' is NULL and mean SPV not requested/possible") show[3L:4L] <- FALSE which <- which[!(which %in% 3:4)] } if (!x[[1]][[1]]$at && show[5L]){ show[5L] <- FALSE which <- which[!(which %in% 5L)] message("Plot 5 = 'boxplots' cannot be produced: 'at' is FALSE") } pnms <- pnms[show] if (missing(method)) method <- switch(type, spherical = "Euclidean", cuboidal = "supremum", lhs = "supremum", mlhs = "supremum", slhs = "supremum", rslhs = "supremum") xvec <- proxy::dist(x[[1]][[1]]$sample, matrix(origin, nrow = 1, ncol = ncol(x[[1]][[1]]$sample)), method = method, ...) method <- attr(xvec, "method") xvec <- as.numeric(xvec) nfor <- length(x) ndes <- length(x[[1]]) nspv <- length(xvec) ntau <- length(tau) spvmat <- do.call(cbind, lapply(x, function(y) do.call(cbind, lapply(y, "[[", "spv")))) fornms <- names(x) desnms <- names(x[[1]]) names(spvmat) <- paste(rep(desnms, nfor), rep(fornms, each = ndes), sep = ".") if (add.meanspv){ if (length(radii) == 1) radii <- seq(from = 0, to = max(xvec), length.out = radii) mspv <- lapply(x, function(y) lapply(y, function(z) as.data.frame(meanspv(formula = z$formula, radii = radii, FtF.inv = z$FtF.inv, n = ifelse(z$unscaled, 1, z$ndes))))) tmp3 <- do.call(rbind, lapply(mspv, function(x) do.call(rbind, x))) tmp3$Formula <- rep(fornms, each = ndes * length(radii)) tmp3$Design <- rep(rep(desnms, each = length(radii)), nfor) tmp3$Location <- "Mean" } if (any(show[-1L])){ tmp1 <- data.frame(Radius = rep(xvec, ndes*nfor), SPV = as.numeric(spvmat), Design = factor(rep(rep(desnms, each = nspv), nfor)), Formula = factor(rep(fornms, each = ndes*nspv))) } if (show[1L]){ maxmin <- range(spvmat) pts <- 0:np/np quantmat <- apply(spvmat, 2, function(xx) quantile(xx, probs = pts, type = 1)) tmp2 <- data.frame(Fraction = rep(pts, ndes*nfor), SPV = as.numeric(quantmat), Formula = factor(rep(fornms, each = ndes*(np + 1))), Design = rep(rep(desnms, each = np + 1), nfor)) } if (show[3L] || show[4L]){ if (!is.null(tau)){ if (!exists("tmp1", inherits = FALSE)){ tmp1 <- data.frame(Radius = rep(xvec, ndes*nfor), SPV = as.numeric(spvmat), Design = factor(rep(rep(desnms, each = nspv), nfor)), Formula = factor(rep(fornms, each = nfor*nspv))) } pts <- seq(from = min(tmp1$Radius), to = max(tmp1$Radius), length = np) aggfun <- function(x){ fits <- lapply(tau, function(y) quantreg::rq(SPV ~ bs(Radius, df = df), tau = y, data = x)) sapply(fits, predict, newdata = data.frame(Radius = pts)) } preds <- sapply(split(tmp1, list(tmp1$Formula, tmp1$Design)), aggfun) newdf <- data.frame(Radius = rep(pts, ntau*ndes*nfor), SPV = as.numeric(preds), Location = rep(rep(paste("tau =", tau), each = np), nfor*ndes), Formula = factor(rep(rep(sort(fornms), each = ntau*np), ndes)), Design = factor(rep(sort(desnms), each = ntau*np*nfor))) if (exists("tmp3", inherits = FALSE)) tmp3 <- rbind(tmp3, newdf) else tmp3 <- newdf } } if (exists("tmp3", inherits = FALSE)) tmp3$Location <- as.factor(tmp3$Location) if (show[1L]){ plot1 <- ggplot(tmp2, aes(x = Fraction, y = SPV, colour = Design)) + ggtitle("Fraction of Design Space Plot") + xlab("Fraction of Design Space") + geom_line(size = lines.size) + theme(plot.title = element_text(vjust = 1)) + facet_wrap(~ Formula) } if (show[2L]) { plot2 <- ggplot(tmp1, aes(x = Radius, y = SPV, order = Formula)) + ggtitle("Variance Dispersion Graph") + geom_point(alpha = alpha, colour = points.colour, size = points.size) + theme(plot.title = element_text(vjust = 1)) + guides(colour = guide_legend(override.aes = list(alpha = 1))) + facet_wrap(~ Design + Formula) + xlab(paste0("Distance to Origin (", method,")")) if (hexbin){ plot2 <- plot2 + geom_hex(bins = bins) + scale_fill_gradientn(colours = rev(topo.colors(5)[-(4:5)]), name = "Frequency", na.value = NA) } } if (show[3L]) { plot3 <- ggplot(tmp3, aes(x = Radius, y = SPV, colour = Design)) + ggtitle("Variance Dispersion Graph") + theme(plot.title = element_text(vjust = 1)) + geom_line(aes(group = interaction(Location, Formula, Design), linetype = Location), size = lines.size) + theme(legend.text.align = 0.5) + xlab(paste0("Distance to Origin (", method,")")) + facet_wrap(~ Formula) } if (show[4L]) { plot4 <- ggplot(tmp1, aes(x = Radius, y = SPV, order = Formula)) + ggtitle("Variance Dispersion Graph") + geom_point(alpha = alpha, colour = points.colour, size = points.size) + theme(plot.title = element_text(vjust = 1)) + guides(colour = guide_legend(override.aes = list(alpha = 1))) + facet_wrap(~ Design + Formula) + xlab(paste0("Distance to Origin (", method,")")) if (hexbin){ plot4 <- plot4 + geom_hex(bins = bins) + scale_fill_gradientn(colours = rev(topo.colors(5)[-(4:5)]), name = "Frequency", na.value = NA) } plot4 <- plot4 + geom_line(data = tmp3, aes(group = interaction(Location, Design, Formula), linetype = Location, order = Design), size = lines.size, colour = 1) + theme(legend.text.align = 0.5) + xlab(paste0("Distance to Origin (", method,")")) + facet_wrap(~ Design + Formula) } if (show[5L]) { plot5 <- ggplot(tmp1, aes(x = Radius, y = SPV)) + ggtitle("Boxplots") + theme(plot.title = element_text(vjust = 1), legend.text.align = 0.5) + geom_boxplot(mapping = aes(group = Radius)) + xlab(paste0("Distance to Origin (", method,")")) + facet_wrap(~ Design + Formula) } if (length(which)) { out <- mget(paste0("plot", which)) names(out) <- pnms if (x[[1]][[1]]$unscaled) out <- lapply(out, '+', ylab("Unscaled Prediction Variance (UPV)")) else out <- lapply(out, '+', ylab("Scaled Prediction Variance (SPV)")) if (arrange) do.call(gridExtra::grid.arrange, out) else return(out) } }
library(googlesheets) ### Name: gs_read_cellfeed ### Title: Read data from cells ### Aliases: gs_read_cellfeed ### ** Examples ## Not run: ##D gap_ss <- gs_gap() # register the Gapminder example sheet ##D col_4_and_above <- ##D gs_read_cellfeed(gap_ss, ws = "Asia", range = cell_limits(c(NA, 4))) ##D col_4_and_above ##D gs_reshape_cellfeed(col_4_and_above) ##D ##D gs_read_cellfeed(gap_ss, range = "A2:F3") ## End(Not run)
/data/genthat_extracted_code/googlesheets/examples/gs_read_cellfeed.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
435
r
library(googlesheets) ### Name: gs_read_cellfeed ### Title: Read data from cells ### Aliases: gs_read_cellfeed ### ** Examples ## Not run: ##D gap_ss <- gs_gap() # register the Gapminder example sheet ##D col_4_and_above <- ##D gs_read_cellfeed(gap_ss, ws = "Asia", range = cell_limits(c(NA, 4))) ##D col_4_and_above ##D gs_reshape_cellfeed(col_4_and_above) ##D ##D gs_read_cellfeed(gap_ss, range = "A2:F3") ## End(Not run)
lns <- readLines('README.Rmd') vcr_start <- grep("vcr", lns)[1] f <- basename(tempfile(fileext = ".Rmd")) cat(lns[vcr_start:length(lns)], file=f, sep="\n") knitr::knit(f, output = "README.md") unlink(f)
/make_readme.R
permissive
ropensci/vcr
R
false
false
203
r
lns <- readLines('README.Rmd') vcr_start <- grep("vcr", lns)[1] f <- basename(tempfile(fileext = ".Rmd")) cat(lns[vcr_start:length(lns)], file=f, sep="\n") knitr::knit(f, output = "README.md") unlink(f)
library(tm) library(stringr) library(dplyr) library(e1071) load(file = "informacion.Rdata") load( file = "questions_2015.Rdata") # We are going to apply preprocesing text mining functions questions_corpus <- Corpus(VectorSource(questions$body)) remove_lat <- function(x){gsub("<p>","",x)} questions_corpus = tm_map(questions_corpus, remove_lat) questions_corpus = tm_map(questions_corpus, removeNumbers) questions_corpus = tm_map(questions_corpus, removePunctuation) questions_corpus = tm_map(questions_corpus, removeWords, c( "function","for",stopwords("english"))) questions_corpus = tm_map(questions_corpus, stripWhitespace) questions_dtm <- DocumentTermMatrix(questions_corpus,control = list(weighting = weightTfIdf)) questions_dtm = removeSparseTerms(questions_dtm, 0.95) inspect(questions_dtm) # We want to discretize the score variable: for(i in 1:nrow(questions)){ if(questions[i,6] < 0){ questions[i,6] = -1 } if(questions[i,6] > 0){ questions[i,6] = 1 } } # We build the models questions_mining <- cbind(questions$score,as.matrix(questions_dtm)) questions_mining <- as.data.frame(questions_mining) colnames(questions_mining)[1] <- "score" questions_mining$score <- as.factor(questions_mining$score) set.seed(314) train <- sample(1:10000,7000) test <- sample(c(1:10000)[-train],3000) questions.training <- questions_mining[train,] question.testing <- questions_mining[test,] # We train the svm model and predict the classes model.svm <- svm(questions.training$score ~. ,data = questions.training,kernel = "polynomial",class.weights = c("-1" = 0.0235,"0" = 0.2018,"1" = 0.7747), cost = 1000, degree = 10) pred.svm = predict(model.svm, question.testing) # The confussion matrix table(question.testing$score,pred.svm,dnn=c("Obs","Pred")) # we train a naive bayes model naive.questions <- naiveBayes(x= as.matrix(questions_dtm)[train,], y = questions_mining$score[train]) pp <- predict(naive.questions,question.testing) # Confussion matrix table(pp, questions_mining$score[test])
/score_classification.R
no_license
Qalfredo/StackExchange-Project
R
false
false
2,048
r
library(tm) library(stringr) library(dplyr) library(e1071) load(file = "informacion.Rdata") load( file = "questions_2015.Rdata") # We are going to apply preprocesing text mining functions questions_corpus <- Corpus(VectorSource(questions$body)) remove_lat <- function(x){gsub("<p>","",x)} questions_corpus = tm_map(questions_corpus, remove_lat) questions_corpus = tm_map(questions_corpus, removeNumbers) questions_corpus = tm_map(questions_corpus, removePunctuation) questions_corpus = tm_map(questions_corpus, removeWords, c( "function","for",stopwords("english"))) questions_corpus = tm_map(questions_corpus, stripWhitespace) questions_dtm <- DocumentTermMatrix(questions_corpus,control = list(weighting = weightTfIdf)) questions_dtm = removeSparseTerms(questions_dtm, 0.95) inspect(questions_dtm) # We want to discretize the score variable: for(i in 1:nrow(questions)){ if(questions[i,6] < 0){ questions[i,6] = -1 } if(questions[i,6] > 0){ questions[i,6] = 1 } } # We build the models questions_mining <- cbind(questions$score,as.matrix(questions_dtm)) questions_mining <- as.data.frame(questions_mining) colnames(questions_mining)[1] <- "score" questions_mining$score <- as.factor(questions_mining$score) set.seed(314) train <- sample(1:10000,7000) test <- sample(c(1:10000)[-train],3000) questions.training <- questions_mining[train,] question.testing <- questions_mining[test,] # We train the svm model and predict the classes model.svm <- svm(questions.training$score ~. ,data = questions.training,kernel = "polynomial",class.weights = c("-1" = 0.0235,"0" = 0.2018,"1" = 0.7747), cost = 1000, degree = 10) pred.svm = predict(model.svm, question.testing) # The confussion matrix table(question.testing$score,pred.svm,dnn=c("Obs","Pred")) # we train a naive bayes model naive.questions <- naiveBayes(x= as.matrix(questions_dtm)[train,], y = questions_mining$score[train]) pp <- predict(naive.questions,question.testing) # Confussion matrix table(pp, questions_mining$score[test])
# This is the user-interface definition of a Shiny web application. # You can find out more about building applications with Shiny here: # # http://shiny.rstudio.com # library(shiny) library(dplyr) library(openxlsx) library(plotly) library(DT) shinyUI(fluidPage( titlePanel("Fay-Herriot model"), sidebarLayout( sidebarPanel( helpText(a(href="https://github.com/lwawrowski/shinyFHmodel/raw/master/testData.xlsx", target="_blank", "Download test data")), fileInput('file', 'Choose XLSX file', accept=c('.xlsx')), selectInput('inSheet', 'Sheet number with in-sample domains', c("1" = 1, "2" = 2, "3" = 3, "4" = 4, "5" = 5), 1), selectInput('outSheet', 'Sheet number with out-of-sample domains', c("None" = 0, "1" = 1, "2" = 2, "3" = 3, "4" = 4, "5" = 5), 0), tags$hr(), uiOutput("chooseDom"), uiOutput("chooseDep"), uiOutput("chooseIndep"), uiOutput("chooseVariance"), uiOutput("typeVariance"), uiOutput("estMethod"), uiOutput("downloadFileName"), uiOutput("downloadResults") ), mainPanel( tags$style(type="text/css", ".shiny-output-error { visibility: hidden; }", ".shiny-output-error:before { visibility: hidden; }" ), tabsetPanel( tabPanel("Diagnostics", fluidRow( column(4, h4("Goodness of model"), htmlOutput("stats")), column(8, h4("Beta coefficients"), tableOutput("betaCoeff")) ), # verbatimTextOutput("test"), h4("Diagnostics"), fluidRow( column(6, plotlyOutput("randomErrors", width = 400, height = 400)), column(6, plotlyOutput("randomEffects", width = 400, height = 400)) ), fluidRow( column(6, htmlOutput("randomErrorsNorm")), column(6, htmlOutput("randomEffectsNorm")) ) ), tabPanel("Precision", fluidRow( column(6, plotlyOutput("gammaMseHt", width = 400, height = 400)), column(6, plotlyOutput("mseHtmseFh", width = 400, height = 400)) ) ), tabPanel("Results", dataTableOutput('eblupTable') ), tabPanel("Function's summaries", h4("Linear regression summary"), verbatimTextOutput("lmSummary"), h4("Fay-Herriot model summary"), verbatimTextOutput("fhSummary") ) ) ) ) ))
/ui.R
no_license
WojciechDopieralski/shinyFHmodel
R
false
false
2,918
r
# This is the user-interface definition of a Shiny web application. # You can find out more about building applications with Shiny here: # # http://shiny.rstudio.com # library(shiny) library(dplyr) library(openxlsx) library(plotly) library(DT) shinyUI(fluidPage( titlePanel("Fay-Herriot model"), sidebarLayout( sidebarPanel( helpText(a(href="https://github.com/lwawrowski/shinyFHmodel/raw/master/testData.xlsx", target="_blank", "Download test data")), fileInput('file', 'Choose XLSX file', accept=c('.xlsx')), selectInput('inSheet', 'Sheet number with in-sample domains', c("1" = 1, "2" = 2, "3" = 3, "4" = 4, "5" = 5), 1), selectInput('outSheet', 'Sheet number with out-of-sample domains', c("None" = 0, "1" = 1, "2" = 2, "3" = 3, "4" = 4, "5" = 5), 0), tags$hr(), uiOutput("chooseDom"), uiOutput("chooseDep"), uiOutput("chooseIndep"), uiOutput("chooseVariance"), uiOutput("typeVariance"), uiOutput("estMethod"), uiOutput("downloadFileName"), uiOutput("downloadResults") ), mainPanel( tags$style(type="text/css", ".shiny-output-error { visibility: hidden; }", ".shiny-output-error:before { visibility: hidden; }" ), tabsetPanel( tabPanel("Diagnostics", fluidRow( column(4, h4("Goodness of model"), htmlOutput("stats")), column(8, h4("Beta coefficients"), tableOutput("betaCoeff")) ), # verbatimTextOutput("test"), h4("Diagnostics"), fluidRow( column(6, plotlyOutput("randomErrors", width = 400, height = 400)), column(6, plotlyOutput("randomEffects", width = 400, height = 400)) ), fluidRow( column(6, htmlOutput("randomErrorsNorm")), column(6, htmlOutput("randomEffectsNorm")) ) ), tabPanel("Precision", fluidRow( column(6, plotlyOutput("gammaMseHt", width = 400, height = 400)), column(6, plotlyOutput("mseHtmseFh", width = 400, height = 400)) ) ), tabPanel("Results", dataTableOutput('eblupTable') ), tabPanel("Function's summaries", h4("Linear regression summary"), verbatimTextOutput("lmSummary"), h4("Fay-Herriot model summary"), verbatimTextOutput("fhSummary") ) ) ) ) ))
\name{comm2sci} \alias{comm2sci} \title{Get scientific names from common names.} \usage{ comm2sci(commnames, db = "eol", itisby = "search", simplify = TRUE, ...) } \arguments{ \item{commnames}{One or more common names or partial names.} \item{db}{Data source, one of \emph{"eol"} (default), \emph{"itis"}, \emph{"tropicos"} or \emph{"ncbi"}.} \item{itisby}{Search for common names across entire names (search, default), at beginning of names (begin), or at end of names (end).} \item{simplify}{(logical) If TRUE, simplify output to a vector of names. If FALSE, return variable formats from different sources, usually a data.frame.} \item{...}{Further arguments passed on to internal methods.} } \value{ A vector of names. } \description{ Get scientific names from common names. } \examples{ \dontrun{ comm2sci(commnames='black bear') comm2sci(commnames='black bear', db='itis') comm2sci(commnames='annual blue grass', db='tropicos') comm2sci(commnames=c('annual blue grass','tree of heaven'), db='tropicos') comm2sci(commnames=c('black bear', 'roe deer')) # Output easily converts to a data.frame with \\code{\\link[plyr]{ldply}} library(plyr) ldply(comm2sci(commnames=c('annual blue grass','tree of heaven'), db='tropicos')) } } \author{ Scott Chamberlain (myrmecocystus@gmail.com) } \seealso{ \code{\link[taxize]{searchbycommonname}}, \code{\link[taxize]{searchbycommonnamebeginswith}}, \code{\link[taxize]{searchbycommonnameendswith}}, \code{\link[taxize]{eol_search}}, \code{\link[taxize]{tp_search}} \code{\link[taxize]{sci2comm}} }
/man/comm2sci.Rd
permissive
dlebauer/taxize_
R
false
false
1,569
rd
\name{comm2sci} \alias{comm2sci} \title{Get scientific names from common names.} \usage{ comm2sci(commnames, db = "eol", itisby = "search", simplify = TRUE, ...) } \arguments{ \item{commnames}{One or more common names or partial names.} \item{db}{Data source, one of \emph{"eol"} (default), \emph{"itis"}, \emph{"tropicos"} or \emph{"ncbi"}.} \item{itisby}{Search for common names across entire names (search, default), at beginning of names (begin), or at end of names (end).} \item{simplify}{(logical) If TRUE, simplify output to a vector of names. If FALSE, return variable formats from different sources, usually a data.frame.} \item{...}{Further arguments passed on to internal methods.} } \value{ A vector of names. } \description{ Get scientific names from common names. } \examples{ \dontrun{ comm2sci(commnames='black bear') comm2sci(commnames='black bear', db='itis') comm2sci(commnames='annual blue grass', db='tropicos') comm2sci(commnames=c('annual blue grass','tree of heaven'), db='tropicos') comm2sci(commnames=c('black bear', 'roe deer')) # Output easily converts to a data.frame with \\code{\\link[plyr]{ldply}} library(plyr) ldply(comm2sci(commnames=c('annual blue grass','tree of heaven'), db='tropicos')) } } \author{ Scott Chamberlain (myrmecocystus@gmail.com) } \seealso{ \code{\link[taxize]{searchbycommonname}}, \code{\link[taxize]{searchbycommonnamebeginswith}}, \code{\link[taxize]{searchbycommonnameendswith}}, \code{\link[taxize]{eol_search}}, \code{\link[taxize]{tp_search}} \code{\link[taxize]{sci2comm}} }
#' Calculates the threshold for one condition #' \code{one_threshold} calculates the threshold for one condition #' one_threshold #' @keywords internal #' @export one_threshold <- function(d, prob, log, groups, funname, guess, lapses, curves) { if (length(groups) == 0) curves <- curves else curves <- semi_join(curves, d, by = as.character(groups)) if (funname %in% names(get_functions())) { par <- d$par if (is.numeric(guess) && is.numeric(lapses)) q <- (prob - guess) / (1 - guess - lapses) if (is.logical(guess) && is.logical(lapses)) q <- (prob - par[3]) / (1 - par[3] - par[4]) if (is.logical(guess) && is.numeric(lapses)) q <- (prob - par[3]) / (1 - par[3] - lapses) if (is.numeric(guess) && is.logical(lapses)) q <- (prob - guess) / (1 - guess - par[3]) if (q < 0 || q > 1) { warning('probabilities not whitin 0 and 1') thre <- approx(curves$y,curves$x, xout= prob)$y } else { if (funname == 'cum_normal_fun') thre <- inv_cum_normal_fun(q, c(par[1], par[2])) if (funname == 'logistic_fun') thre <- inv_logistic_fun(q, c(par[1], par[2])) if (funname == 'weibull_fun') thre <- inv_weibull_fun(q, c(par[1], par[2])) } } else { thre <- approx(curves$y,curves$x, xout= prob)$y } if (log) thre <- exp(thre) data.frame(thre, prob) }
/R/one_threshold.R
no_license
cran/quickpsy
R
false
false
1,404
r
#' Calculates the threshold for one condition #' \code{one_threshold} calculates the threshold for one condition #' one_threshold #' @keywords internal #' @export one_threshold <- function(d, prob, log, groups, funname, guess, lapses, curves) { if (length(groups) == 0) curves <- curves else curves <- semi_join(curves, d, by = as.character(groups)) if (funname %in% names(get_functions())) { par <- d$par if (is.numeric(guess) && is.numeric(lapses)) q <- (prob - guess) / (1 - guess - lapses) if (is.logical(guess) && is.logical(lapses)) q <- (prob - par[3]) / (1 - par[3] - par[4]) if (is.logical(guess) && is.numeric(lapses)) q <- (prob - par[3]) / (1 - par[3] - lapses) if (is.numeric(guess) && is.logical(lapses)) q <- (prob - guess) / (1 - guess - par[3]) if (q < 0 || q > 1) { warning('probabilities not whitin 0 and 1') thre <- approx(curves$y,curves$x, xout= prob)$y } else { if (funname == 'cum_normal_fun') thre <- inv_cum_normal_fun(q, c(par[1], par[2])) if (funname == 'logistic_fun') thre <- inv_logistic_fun(q, c(par[1], par[2])) if (funname == 'weibull_fun') thre <- inv_weibull_fun(q, c(par[1], par[2])) } } else { thre <- approx(curves$y,curves$x, xout= prob)$y } if (log) thre <- exp(thre) data.frame(thre, prob) }
## Watson Language Identification # IBM SPSS Modeler Node # Install function for packages packages <- function(x){ x <- as.character(match.call()[[2]]) if (!require(x,character.only=TRUE)){ install.packages(pkgs=x,repos="http://cran.r-project.org") require(x,character.only=TRUE) } } packages(httr) packages(XML) # This function is used to generate automatically the dataModel for SPSS Modeler getMetaData <- function (data) { if( is.null(dim(data))) stop("Invalid data received: not a data.frame") if (dim(data)[1]<=0) { print("Warning : modelerData has no line, all fieldStorage fields set to strings") getStorage <- function(x){return("string")} } else { getStorage <- function(x) { x <- unlist(x) res <- NULL #if x is a factor, typeof will return an integer so we treat the case on the side if(is.factor(x)) { res <- "string" } else { res <- switch(typeof(x), integer="integer", double = "real", "string") } return (res) } } col = vector("list", dim(data)[2]) for (i in 1:dim(data)[2]) { col[[i]] <- c(fieldName=names(data[i]), fieldLabel="", fieldStorage=getStorage(data[i]), fieldMeasure="", fieldFormat="", fieldRole="") } mdm<-do.call(cbind,col) mdm<-data.frame(mdm) return(mdm) } version <- R.Version()$major #find version of R for different Post call data <- data.frame() #Data frame to be populated with results for(i in 1:nrow(modelerData)) { text<-modelerData$%%text%%[i] #Load text text <-iconv(text, to="UTF-8") #convert to UTF-8 for Watson base <- "https://gateway.watsonplatform.net/language-translation/api/v2/identify" u <- "%%user%%" #Username and Password provided in CDB p <- "%%password%%" pars <- list(txt=iconv(text,to="UTF-8") ) if (version == 2) { r<-POST(base,authenticate(u, p), body = pars) } else { r<-POST(base,authenticate(u, p),add_headers(Accept = "text/plain"), body = pars) } stop_for_status(r) language<-print(content(r)) data[i,1] <- language } modelerData <-cbind(modelerData,data) colnames(modelerData)[ncol(modelerData)] <- "Language Code" #Name new column modelerDataModel <- getMetaData(modelerData)
/Source code/script.r
permissive
KushalVenkatesh/Watson_Language_ID
R
false
false
2,376
r
## Watson Language Identification # IBM SPSS Modeler Node # Install function for packages packages <- function(x){ x <- as.character(match.call()[[2]]) if (!require(x,character.only=TRUE)){ install.packages(pkgs=x,repos="http://cran.r-project.org") require(x,character.only=TRUE) } } packages(httr) packages(XML) # This function is used to generate automatically the dataModel for SPSS Modeler getMetaData <- function (data) { if( is.null(dim(data))) stop("Invalid data received: not a data.frame") if (dim(data)[1]<=0) { print("Warning : modelerData has no line, all fieldStorage fields set to strings") getStorage <- function(x){return("string")} } else { getStorage <- function(x) { x <- unlist(x) res <- NULL #if x is a factor, typeof will return an integer so we treat the case on the side if(is.factor(x)) { res <- "string" } else { res <- switch(typeof(x), integer="integer", double = "real", "string") } return (res) } } col = vector("list", dim(data)[2]) for (i in 1:dim(data)[2]) { col[[i]] <- c(fieldName=names(data[i]), fieldLabel="", fieldStorage=getStorage(data[i]), fieldMeasure="", fieldFormat="", fieldRole="") } mdm<-do.call(cbind,col) mdm<-data.frame(mdm) return(mdm) } version <- R.Version()$major #find version of R for different Post call data <- data.frame() #Data frame to be populated with results for(i in 1:nrow(modelerData)) { text<-modelerData$%%text%%[i] #Load text text <-iconv(text, to="UTF-8") #convert to UTF-8 for Watson base <- "https://gateway.watsonplatform.net/language-translation/api/v2/identify" u <- "%%user%%" #Username and Password provided in CDB p <- "%%password%%" pars <- list(txt=iconv(text,to="UTF-8") ) if (version == 2) { r<-POST(base,authenticate(u, p), body = pars) } else { r<-POST(base,authenticate(u, p),add_headers(Accept = "text/plain"), body = pars) } stop_for_status(r) language<-print(content(r)) data[i,1] <- language } modelerData <-cbind(modelerData,data) colnames(modelerData)[ncol(modelerData)] <- "Language Code" #Name new column modelerDataModel <- getMetaData(modelerData)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/01Classes.R \name{newparam} \alias{newparam} \title{Create new simulation control parameter object} \usage{ newparam(dataframe, type = "default", x, z) } \arguments{ \item{dataframe}{(data frame) original data to be used in simulations} \item{type}{(character) creation type: empty, default or custom, defaults to "default"} \item{x}{(character) primary contamination of interest such as "misval"} \item{z}{(character) secondary contamination of interest such as "noise"} } \value{ preprosimparameter class object } \description{ Preprosim parameter objects contain eight contaminations: noise, lowvar, misval, irfeature, classswap, classimbalance, volumedecrease and outlier. Each contamination has three sub parameters: cols as columns the contamination is applied to, param as the parameter of the contamination itself (i.e. intensity of contamination) and order as order in which the parameter is applied to the data. } \details{ For argument type: empty creates a preprosimparameter object with empty params (but not empty cols or order). default creates 6561 combinations with all params 0, 0.1, 0.2. custom creates params seq(0, 0.9, by 0.1) for primary (x) and 0., 0.1, 0.2 for secondary (z). The implicit y (not an argument) refers to classification accuracy. } \examples{ pa <- newparam(iris) pa1 <- newparam(iris, "empty") pa2 <- newparam(iris, "custom", "misval", "noise") }
/man/newparam.Rd
no_license
cran/preprosim
R
false
true
1,506
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/01Classes.R \name{newparam} \alias{newparam} \title{Create new simulation control parameter object} \usage{ newparam(dataframe, type = "default", x, z) } \arguments{ \item{dataframe}{(data frame) original data to be used in simulations} \item{type}{(character) creation type: empty, default or custom, defaults to "default"} \item{x}{(character) primary contamination of interest such as "misval"} \item{z}{(character) secondary contamination of interest such as "noise"} } \value{ preprosimparameter class object } \description{ Preprosim parameter objects contain eight contaminations: noise, lowvar, misval, irfeature, classswap, classimbalance, volumedecrease and outlier. Each contamination has three sub parameters: cols as columns the contamination is applied to, param as the parameter of the contamination itself (i.e. intensity of contamination) and order as order in which the parameter is applied to the data. } \details{ For argument type: empty creates a preprosimparameter object with empty params (but not empty cols or order). default creates 6561 combinations with all params 0, 0.1, 0.2. custom creates params seq(0, 0.9, by 0.1) for primary (x) and 0., 0.1, 0.2 for secondary (z). The implicit y (not an argument) refers to classification accuracy. } \examples{ pa <- newparam(iris) pa1 <- newparam(iris, "empty") pa2 <- newparam(iris, "custom", "misval", "noise") }
# Test de chi-cuadrado # Al responder 30 personas una encuesta con respuestas: mucho, normal, poco; se obtienen los resultados: 15, 5, 10, respectivamente # ¿Estos resultados son significativamente distintos de unas respuestas equiprobables, es decir, 10, 10, 10? respuestas <- c(15,5,10) probabilidad <- rep(1/3,3) test <- chisq.test(respuestas, p=probabilidad) print(test)
/estadistica/chi-cuadrado_test.R
no_license
jjdeharo/General
R
false
false
378
r
# Test de chi-cuadrado # Al responder 30 personas una encuesta con respuestas: mucho, normal, poco; se obtienen los resultados: 15, 5, 10, respectivamente # ¿Estos resultados son significativamente distintos de unas respuestas equiprobables, es decir, 10, 10, 10? respuestas <- c(15,5,10) probabilidad <- rep(1/3,3) test <- chisq.test(respuestas, p=probabilidad) print(test)
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), ra = numeric(0), relh = numeric(0), rs = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161, -1.51345790188863e+21, 1.44942408802595e-285, -1.72131968218895e+83, -7.88781071482505e+93, 1.0823131123826e-105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(meteor:::ET0_PenmanMonteith,testlist) str(result)
/meteor/inst/testfiles/ET0_PenmanMonteith/AFL_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1615839383-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
1,054
r
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), ra = numeric(0), relh = numeric(0), rs = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161, -1.51345790188863e+21, 1.44942408802595e-285, -1.72131968218895e+83, -7.88781071482505e+93, 1.0823131123826e-105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(meteor:::ET0_PenmanMonteith,testlist) str(result)
##### This code helps ##### 1. locate the grid info for each project ##### 2. create the buffer using different radius and ##### 3. find out grid that are located in the buffer region. ##### We use multiple file as input, ##### Load Data ##### aid <- readOGR(dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa"), layer = "africa_construction") ##### Transform Projection ##### proj.crs <- proj4string(CRS("+proj=utm +zone=33 +ellps=WGS84 +datum=WGS84 +units=m +no_defs")) aid.newproj <- spTransform(aid, proj.crs) ##### Create Buffer ##### aid_buffer.5 <- gBuffer(aid.newproj, width = 5000, byid = TRUE) writeOGR(aid_buffer.5, dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa/aid_buffer"), layer = "buffer_5", driver = "ESRI Shapefile") aid_buffer.10 <- gBuffer(aid.newproj, width = 10000, byid = TRUE) writeOGR(aid_buffer.10, dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa/aid_buffer"), layer = "buffer_10", driver = "ESRI Shapefile") aid_buffer.15 <- gBuffer(aid.newproj, width = 15000, byid = TRUE) writeOGR(aid_buffer.15, dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa/aid_buffer"), layer = "buffer_15", driver = "ESRI Shapefile") aid_buffer.20 <- gBuffer(aid.newproj, width = 20000, byid = TRUE) writeOGR(aid_buffer.20, dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa/aid_buffer"), layer = "buffer_20", driver = "ESRI Shapefile") aid_buffer.25 <- gBuffer(aid.newproj, width = 25000, byid = TRUE) writeOGR(aid_buffer.25, dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa/aid_buffer"), layer = "buffer_25", driver = "ESRI Shapefile") aid_buffer.30 <- gBuffer(aid.newproj, width = 30000, byid = TRUE) writeOGR(aid_buffer.30, dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa/aid_buffer"), layer = "buffer_30", driver = "ESRI Shapefile") aid_buffer.35 <- gBuffer(aid.newproj, width = 35000, byid = TRUE) writeOGR(aid_buffer.35, dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa/aid_buffer"), layer = "buffer_35", driver = "ESRI Shapefile") aid_buffer.40 <- gBuffer(aid.newproj, width = 40000, byid = TRUE) writeOGR(aid_buffer.40, dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa/aid_buffer"), layer = "buffer_40", driver = "ESRI Shapefile") aid_buffer.45 <- gBuffer(aid.newproj, width = 45000, byid = TRUE) writeOGR(aid_buffer.45, dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa/aid_buffer"), layer = "buffer_45", driver = "ESRI Shapefile") aid_buffer.50 <- gBuffer(aid.newproj, width = 50000, byid = TRUE) writeOGR(aid_buffer.50, dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa/aid_buffer"), layer = "buffer_50", driver = "ESRI Shapefile") #####
/aid/reduced_version/only_africa/aid_buffer/buffer_generate.R
no_license
Abel73/China-Aid-and-Africa-Conflict
R
false
false
2,848
r
##### This code helps ##### 1. locate the grid info for each project ##### 2. create the buffer using different radius and ##### 3. find out grid that are located in the buffer region. ##### We use multiple file as input, ##### Load Data ##### aid <- readOGR(dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa"), layer = "africa_construction") ##### Transform Projection ##### proj.crs <- proj4string(CRS("+proj=utm +zone=33 +ellps=WGS84 +datum=WGS84 +units=m +no_defs")) aid.newproj <- spTransform(aid, proj.crs) ##### Create Buffer ##### aid_buffer.5 <- gBuffer(aid.newproj, width = 5000, byid = TRUE) writeOGR(aid_buffer.5, dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa/aid_buffer"), layer = "buffer_5", driver = "ESRI Shapefile") aid_buffer.10 <- gBuffer(aid.newproj, width = 10000, byid = TRUE) writeOGR(aid_buffer.10, dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa/aid_buffer"), layer = "buffer_10", driver = "ESRI Shapefile") aid_buffer.15 <- gBuffer(aid.newproj, width = 15000, byid = TRUE) writeOGR(aid_buffer.15, dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa/aid_buffer"), layer = "buffer_15", driver = "ESRI Shapefile") aid_buffer.20 <- gBuffer(aid.newproj, width = 20000, byid = TRUE) writeOGR(aid_buffer.20, dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa/aid_buffer"), layer = "buffer_20", driver = "ESRI Shapefile") aid_buffer.25 <- gBuffer(aid.newproj, width = 25000, byid = TRUE) writeOGR(aid_buffer.25, dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa/aid_buffer"), layer = "buffer_25", driver = "ESRI Shapefile") aid_buffer.30 <- gBuffer(aid.newproj, width = 30000, byid = TRUE) writeOGR(aid_buffer.30, dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa/aid_buffer"), layer = "buffer_30", driver = "ESRI Shapefile") aid_buffer.35 <- gBuffer(aid.newproj, width = 35000, byid = TRUE) writeOGR(aid_buffer.35, dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa/aid_buffer"), layer = "buffer_35", driver = "ESRI Shapefile") aid_buffer.40 <- gBuffer(aid.newproj, width = 40000, byid = TRUE) writeOGR(aid_buffer.40, dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa/aid_buffer"), layer = "buffer_40", driver = "ESRI Shapefile") aid_buffer.45 <- gBuffer(aid.newproj, width = 45000, byid = TRUE) writeOGR(aid_buffer.45, dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa/aid_buffer"), layer = "buffer_45", driver = "ESRI Shapefile") aid_buffer.50 <- gBuffer(aid.newproj, width = 50000, byid = TRUE) writeOGR(aid_buffer.50, dsn = path.expand("/Users/mxube/Desktop/aid/reduced_version/only_africa/aid_buffer"), layer = "buffer_50", driver = "ESRI Shapefile") #####
testlist <- list(data = structure(4.89053717943093e-315, .Dim = c(1L, 1L)), x = structure(c(1.39069146974254e-309, 3.94604863000294e-114, 1.26575548613742e-309, 4.78479882539232e-304, 2.64895706299806e-260, 1.0323506673724e-310, 3.75956971383845e-227, 0, 2.90905852271326e-319, 1.39069146974254e-309, 0, 1.50192485449236e-307, 1.39117832662268e-308, 2.71615461243555e-312, 5.27103754475774e-310, 0, 1.39069146974254e-309, 0, 0, 0, 0, 8.93007909254939e-311, 1.01268621148405e+266, 3.91757856962238e-316, 6.2663405322389e-106, 2.64227520700917e-308, 7.80639044993566e+115, 1.46942191467086e-105), .Dim = c(4L, 7L))) result <- do.call(distr6:::C_EmpiricalMVPdf,testlist) str(result)
/distr6/inst/testfiles/C_EmpiricalMVPdf/libFuzzer_C_EmpiricalMVPdf/C_EmpiricalMVPdf_valgrind_files/1610036724-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
724
r
testlist <- list(data = structure(4.89053717943093e-315, .Dim = c(1L, 1L)), x = structure(c(1.39069146974254e-309, 3.94604863000294e-114, 1.26575548613742e-309, 4.78479882539232e-304, 2.64895706299806e-260, 1.0323506673724e-310, 3.75956971383845e-227, 0, 2.90905852271326e-319, 1.39069146974254e-309, 0, 1.50192485449236e-307, 1.39117832662268e-308, 2.71615461243555e-312, 5.27103754475774e-310, 0, 1.39069146974254e-309, 0, 0, 0, 0, 8.93007909254939e-311, 1.01268621148405e+266, 3.91757856962238e-316, 6.2663405322389e-106, 2.64227520700917e-308, 7.80639044993566e+115, 1.46942191467086e-105), .Dim = c(4L, 7L))) result <- do.call(distr6:::C_EmpiricalMVPdf,testlist) str(result)
## read in emissions data and classification code emissions_data <- readRDS("summarySCC_PM25.rds") class_code <- readRDS("Source_Classification_Code.rds") ## subset data from Baltimore City, LA county and from type "on road" baltLA_car_data <- subset(emissions_data, emissions_data$fips=="24510" | emissions_data$fips=="06037" & emissions_data$type=="ON-ROAD") baltLA_car_year <- aggregate(baltLA_car_data$Emissions, by=list(baltLA_car_data$fips, baltLA_car_data$year), FUN=sum) colnames(baltLA_car_year) <- c("City", "Year", "Emissions") ## create plot comparing emissions from motor vehicles in Baltimore and LA from 1999-2008 library(ggplot2) png(filename = "plot6.png") qplot(Year, Emissions, data = baltLA_car_year, color = City, geom = "line") + ggtitle("Emissions of PM2.5 in Baltimore City (24510) and LA County (06037)") + ylab("Total Emissions from motor vehicles (tons)") + xlab("Year") dev.off()
/plot6.R
no_license
Paresh1110/ExData4
R
false
false
992
r
## read in emissions data and classification code emissions_data <- readRDS("summarySCC_PM25.rds") class_code <- readRDS("Source_Classification_Code.rds") ## subset data from Baltimore City, LA county and from type "on road" baltLA_car_data <- subset(emissions_data, emissions_data$fips=="24510" | emissions_data$fips=="06037" & emissions_data$type=="ON-ROAD") baltLA_car_year <- aggregate(baltLA_car_data$Emissions, by=list(baltLA_car_data$fips, baltLA_car_data$year), FUN=sum) colnames(baltLA_car_year) <- c("City", "Year", "Emissions") ## create plot comparing emissions from motor vehicles in Baltimore and LA from 1999-2008 library(ggplot2) png(filename = "plot6.png") qplot(Year, Emissions, data = baltLA_car_year, color = City, geom = "line") + ggtitle("Emissions of PM2.5 in Baltimore City (24510) and LA County (06037)") + ylab("Total Emissions from motor vehicles (tons)") + xlab("Year") dev.off()
model <- DualEndpointBeta(E0 = c(0, 100), Emax = c(0, 500), delta1 = c(0, 5), mode = c(1, 15), refDose=10, useLogDose=TRUE, refDoseBeta = 1000, mu = c(0, 1), Sigma = matrix(c(1, 0, 0, 1), nrow=2), sigma2W = c(a=0.1, b=0.1), rho = c(a=1, b=1))
/examples/Model-class-DualEndpointBeta.R
no_license
insightsengineering/crmPack
R
false
false
506
r
model <- DualEndpointBeta(E0 = c(0, 100), Emax = c(0, 500), delta1 = c(0, 5), mode = c(1, 15), refDose=10, useLogDose=TRUE, refDoseBeta = 1000, mu = c(0, 1), Sigma = matrix(c(1, 0, 0, 1), nrow=2), sigma2W = c(a=0.1, b=0.1), rho = c(a=1, b=1))
###################################################################################################### # Title: Functions for various exploratory analysis operations on data frames # Author: Sanjay, Dheekshitha PS # Created on: June 14, 2018 # Description: Consolidated file for all functionsto be used out of the box for exploratory analysis ###################################################################################################### ######################## # FUNCTION DEFINITIONS # ######################## #' @name ignoreCols #' @title Ignores the columns in the loaded dataframe object #' @details The columns selected are removed from the object #' @param data the dataframe object that needs to be loaded #' @param columns the names of columns to be ignored from dataframe object #' @return Updated dataframe object #' @family Package EDA Utilites functions #' @examples #' ignoreCols(data = iris, columns = "Species") #' @export ignoreCols <- function(data, columns){ tryCatch({ if(all(columns %in% colnames(data))){ return(data[, setdiff(colnames(data), columns), drop = F]) }else{ mismatch <- colnames(data)[!all(columns %in% colnames(data))] stop(paste0("Columns ", paste0(mismatch, collapse = ", "), " are not present in the dataset")) } }, error = function(e){ stop(e) }, warning = function(e){ warning(e) }) } # Univariate Categoric Distribution function #' @name univarCatDistPlots #' @title Univariate Categoric Distribution #' @details A univariate distribution graph on the selected categorical columns from the dataframe #' @param data the dataset where the column on which the plot is to be generated is present #' @param uniCol the name of column on which the plot needs to be generated #' @param priColor the primary color for the plots #' @param optionalPlots A Flag for optional plots #' @return A univariate categoric distribution plot #' @family Package EDA Utilites functions #' @examples #' univarCatDistPlots(data = iris, uniCol = "Species") #' @export univarCatDistPlots <- function(data, uniCol, priColor = "blue", optionalPlots = 0){ levels(data[[uniCol]]) <- c(levels(data[[uniCol]]), "NA") data[[uniCol]][is.na(data[[uniCol]])] <- "NA" data <- data %>% dplyr::group_by_(.dots = c(uniCol)) %>% dplyr::summarise(count = dplyr::n()) y=data[[uniCol]] data %>>% dplyr::arrange(.data$count) -> data catPlot <- ggplot2::ggplot(data, ggplot2::aes_(x = as.name(uniCol), y= as.name("count"))) + ggplot2::geom_bar(stat = "identity", fill = priColor,alpha=0.7) + ggplot2::xlab(uniCol) + ggplot2::ylab("Frequency") + ggplot2::theme_bw() + ggplot2::theme( axis.title = ggplot2::element_text(size = 16),panel.grid.major.y=ggplot2::element_blank(),panel.border=ggplot2::element_rect(size=0.1) ) + ggplot2::coord_flip() if(optionalPlots){ catPlot <- plotly::plot_ly(y = y, x=data[["count"]],type="bar",orientation='h',color = I(priColor)) %>% plotly::layout(title=paste0("Frequency Histogram for ",uniCol), xaxis=list(title = "Frequency"), yaxis=list(title = uniCol)) } return(catPlot) } #Outlier Plot Function #' @name outlierPlot #' @title Outlier detection plot #' @details Outlier are to be identified on the selected column from the dataframe #' @param data the dataframe that needs to be loaded #' @param method the method on which outliers are to be identified #' @param columnName the name of column for which the outliers are identified #' @param cutoffValue the cut off value to define the threshold for outliers #' @param priColor the primary color for the plots #' @param optionalPlots A Flag for optional plots #' @return Outliers plot object #' @family Package EDA Utilites functions #' @examples #' \dontrun{ #' outlierPlot(data = iris, columnName = "Sepal.Length") #' } #' @export outlierPlot <- function(data, method = "iqr", columnName, cutoffValue = 0.05, priColor = "blue", optionalPlots = 0){ if(TRUE %in% unique(is.na(data[,columnName]))){ data <- data[-which(is.na(data[,columnName])),] } if(method == "iqr"){ lower <- stats::quantile(data[, columnName], .25,na.rm = T) - 1.5*(stats::IQR(data[, columnName], na.rm = T)) upper <- stats::quantile(data[,columnName],.75,na.rm = T) + 1.5*(stats::IQR(data[,columnName],na.rm = T)) data$Outlier <- data[,columnName] > upper | data[,columnName] < lower outlierPlotObj <- ggplot2::ggplot(data, ggplot2::aes(x="", y = data[,columnName])) + ggplot2::geom_boxplot(fill = priColor,alpha=0.7) + ggplot2::theme_bw() + ggplot2::theme(panel.border=ggplot2::element_rect(size=0.1),panel.grid.minor.x=ggplot2::element_blank(),panel.grid.major.x=ggplot2::element_blank(),legend.position = "bottom") +ggplot2::ylab(columnName) + ggplot2::xlab("") } if(method == "percentile"){ lower <- stats::quantile(data[,columnName],cutoffValue,na.rm = T) upper <- stats::quantile(data[,columnName],(1-cutoffValue),na.rm = T) data$Outlier <- data[,columnName] > upper | data[,columnName] < lower Outlier<-data$Outlier Value<-data[,columnName] outlierPlotObj <- ggplot2::ggplot(data) + ggplot2::geom_histogram(ggplot2::aes(x = Value, fill = as.name("Outlier")), bins=30, alpha=0.7) + ggplot2::scale_fill_manual(values = c(priColor, "red"),breaks=c("FALSE", "TRUE"), labels=c("Normal", "Outlier"),name = "Status") + ggplot2::theme_bw() + ggplot2::theme(panel.border=ggplot2::element_rect(size=0.1),panel.grid.minor.x=ggplot2::element_blank(),panel.grid.major.x=ggplot2::element_blank(),legend.position = "bottom") + ggplot2::xlab(columnName) } if(method == "z_score"){ lower <- mean(data[,columnName],na.rm = T) - (cutoffValue*(stats::sd(data[,columnName],na.rm = T))) upper <- mean(data[,columnName],na.rm = T) + (cutoffValue*(stats::sd(data[,columnName],na.rm = T))) data$Outlier <- data[,columnName] > upper | data[,columnName] < lower data$zScore <- scale(data[,columnName],center = T, scale = T) Zscore<-as.vector(data$zScore) y<-data[,columnName] outlierPlotObj <- ggplot2::ggplot(data, ggplot2::aes(x = Zscore, y = y)) + ggplot2::geom_point(ggplot2::aes(color = as.name("Outlier")), alpha=0.7)+ ggplot2::scale_color_manual("Status", values = c("TRUE" = "red","FALSE" =priColor))+ ggplot2::ylab(columnName)+ ggplot2::theme_bw() + ggplot2::theme(panel.border=ggplot2::element_rect(size=0.1),panel.grid.minor.x=ggplot2::element_blank(),panel.grid.major.x=ggplot2::element_blank(),legend.position = "bottom") + ggplot2::xlab("Z-score")+ ggplot2::geom_vline(xintercept = (cutoffValue),linetype = "dashed")+ ggplot2::geom_vline(xintercept = -(cutoffValue), linetype = "dashed") } #conditionToBe if(optionalPlots) { outlierPlotObj <- plotly::ggplotly(outlierPlotObj) outlierPlotObj$x$layout$margin$l <- outlierPlotObj$x$layout$margin$l + 30 outlierPlotObj$x$layout$margin$b <- outlierPlotObj$x$layout$margin$b + 3 } return(outlierPlotObj) } #Mutlivariate Outlier Plot Function #' @name multiVarOutlierPlot #' @title Multi-Variate Outlier Plot #' @details Multivaraite outlier plot using the selected columns from the dataframe #' @param data the dataframe that needs to be loaded #' @param depCol the name of column which is to be identified as dependent column #' @param indepCol the name of an independent column #' @param sizeCol the name of column used to define the size of point in plots #' @param priColor the primary color for the plots #' @param optionalPlots A Flag for optional plots #' @param cutoffValue A p-alue cutoff for detecting outliers #' @return Outliers plot #' @family Package EDA Utilites functions #' @examples #' \dontrun{ #' multiVarOutlierPlot(data = iris, depCol = "Sepal.Length", #' indepCol = "Sepal.Width", sizeCol = "Petal.Length") #' } #' @export multiVarOutlierPlot <- function(data, depCol, indepCol, sizeCol, priColor = "blue", optionalPlots = 0, cutoffValue = 0.05){ if(TRUE %in% unique(is.na(data[,depCol]))) { data <- data[-which(is.na(data[,depCol])== T),] } indep_form <- paste(indepCol, collapse = "+") form <- paste(depCol, indep_form, sep = "~") form <- stats::formula(form) lmObject <- lm(form,data) limit <- nrow(data) outlierDetect <- car::outlierTest(lmObject, cutoffValue, n.max = limit) outlierDetect <- data.frame(outlierDetect[c(1,2,3)]) outlierDetect <- round(outlierDetect, 4) colnames(outlierDetect) <- c("Studentized Residuals","P-Value", "P-Value(Bonferroni Correction)") data$Outlier <- ifelse(rownames(data) %in% rownames(outlierDetect),"Outlier","Normal") outlierTable <- data[data$Outlier == "Outlier", ] outlierTable <- cbind(outlierDetect,outlierTable) x<-data[,indepCol] y<-data[,depCol] size<-data[,sizeCol] outlierPlot <- ggplot2::ggplot(data, ggplot2::aes(x = x,y = y), alpha=0.6)+ ggplot2::geom_point(ggplot2::aes(color = as.name("Outlier"), size = size), alpha=0.7)+ ggplot2::scale_color_manual("",values = c("Outlier" = "red", "Normal" = priColor))+ ggplot2::labs(title = paste(depCol,"vs",indepCol)) + ggplot2::theme_bw() + ggplot2::theme(panel.border=ggplot2::element_rect(size=0.1),panel.grid.minor.x=ggplot2::element_blank(),legend.position = "bottom") + ggplot2::ylab(depCol) + ggplot2::xlab(indepCol) #conditionToBe if(optionalPlots) { outlierPlot <- plotly::ggplotly(outlierPlot,tooltip=c("all")) outlierPlot$x$layout$margin$l <- outlierPlot$x$layout$margin$l + 30 outlierPlot$x$layout$margin$b <- outlierPlot$x$layout$margin$b + 3 } return(outlierPlot) } ## Bivariate Plots #' @name bivarPlots #' @title Bi-Variate Plot #' @details A bivariate distribution graph on the selected columns from the dataframe.Selected two columns are on two axis' and a plot is generated #' @param dataset the dataframe that needs to be loaded #' @param select_var_name_1 the name of first column on which the plot needs to be generated #' @param select_var_name_2 the name of second column on which the plot needs to be generated #' @param priColor the primary color for the plots #' @param secColor A secondary color for the plots #' @return Bivariate plot #' @family Package EDA Utilites functions #' @examples #' bivarPlots(dataset = iris, select_var_name_1 = "Sepal.Length", #' select_var_name_2 = "Sepal.Width") #' @export bivarPlots <- function(dataset, select_var_name_1, select_var_name_2, priColor = "blue", secColor= "black") { numeric_cols <- unlist(getDatatype(dataset)['numeric_cols']) cat_cols <- unlist(getDatatype(dataset)['cat_cols']) if (select_var_name_1 %in% numeric_cols && select_var_name_2 %in% numeric_cols) { x = dataset[, select_var_name_1] y = dataset[, select_var_name_2] bivarPlot <- ggplot2::ggplot(dataset, ggplot2::aes(x, y)) + ggplot2::geom_point(color = priColor, alpha = 0.7) + ggplot2::geom_smooth(method = lm, color = secColor) + ggplot2::xlab(select_var_name_1) + ggplot2::ylab(select_var_name_2) + ggplot2::theme_bw() + ggplot2::ggtitle(paste( 'Bivariate plot for', select_var_name_1, 'and', select_var_name_2, sep = ' ' )) + ggplot2::theme( plot.title = ggplot2::element_text(hjust = 0.5, size = 10), axis.text = ggplot2::element_text(size = 10), axis.title = ggplot2::element_text(size = 10) ) } else if (select_var_name_1 %in% cat_cols && select_var_name_2 %in% cat_cols) { new_df <- dataset %>% dplyr::group_by_(.dots=c(select_var_name_1,select_var_name_2)) %>% dplyr::summarise(n = dplyr::n()) colfunc <- grDevices::colorRampPalette(c(priColor, "white" , secColor)) colorvar <- length(unique(new_df[[select_var_name_2]])) a=as.vector(as.character(unique(new_df[[select_var_name_1]]))) y=new_df[[select_var_name_1]] label=new_df[[select_var_name_2]] bivarPlot <-ggplot2::ggplot(new_df, ggplot2::aes(x = y, y= .data$n, fill = label)) + ggplot2::geom_bar(position = "dodge", stat = "identity",alpha=0.9) + ggplot2::guides(fill=ggplot2::guide_legend(title=select_var_name_2)) + ggplot2::coord_flip()+ ggplot2::xlab(select_var_name_1) + ggplot2::ylab("count") + ggplot2::theme_bw() + ggplot2::ggtitle(paste('Bivariate plot for',select_var_name_1,'and',select_var_name_2,sep=' '))+ ggplot2::theme(plot.title = ggplot2::element_text(hjust = 0.5, size = 10),axis.text = ggplot2::element_text(size=10), axis.title=ggplot2::element_text(size=10),legend.position="bottom",axis.text.x=ggplot2::element_text(angle=45, hjust=1))+ ggplot2::scale_fill_manual(values = colfunc(colorvar)) } else { cols <- c(select_var_name_1, select_var_name_2) cat_col <- cols[which(cols %in% cat_cols)] num_col <- cols[which(cols %in% numeric_cols)] a = as.vector(as.character(unique(dataset[[cat_col]]))) y = dataset[[cat_col]] x = dataset[[num_col]] bivarPlot <- ggplot2::ggplot(dataset, ggplot2::aes(x = y, y = x)) + ggplot2::geom_point(color = priColor, alpha = 0.7) + ggplot2::coord_flip() + ggplot2::xlab(cat_col) + ggplot2::ylab(num_col) + ggplot2::theme_bw() + ggplot2::ggtitle(paste( 'Bivariate plot for', select_var_name_1, 'and', select_var_name_2, sep = ' ' )) + ggplot2::theme( plot.title = ggplot2::element_text(hjust = 0.5, size = 10), axis.text = ggplot2::element_text(size = 10), axis.title = ggplot2::element_text(size = 10) ) } return(bivarPlot) } ## Correlation Matrix #' @name correlationMatPlot #' @title Correlation Matrix Plot #' @description A correlation matrix is created and plotted across all the columns in the dataset #' @param dataset the dataset that needs to be loaded #' @param methodused methods to be used for computing correlation #' @return Correlation Matrix graph #' @family Package EDA Utilites functions #' @examples #' correlationMatPlot(dataset = iris) #' @export correlationMatPlot <- function(dataset, methodused = "everything"){ numeric_cols <- getDatatype(dataset)['numeric_cols'] cormatrix <- base::round(stats::cor(dataset[,unlist(numeric_cols),drop=F], use = methodused),3) return(R.devices::capturePlot(corrplot::corrplot(cormatrix, method = "color", outline = T, addgrid.col = "darkgray", # order="hclust", addrect = 4, rect.col = "black", rect.lwd = 5, cl.pos = "b", tl.col = "black", tl.cex = 1, cl.cex = 1.5, addCoef.col = "black", number.digits = 2, number.cex = 0.75, type = "lower", col = grDevices::colorRampPalette(c("red","white","green"))(200)))) } ################## # MISC FUNCTIONS # ################## ## Return the column type #' @name CheckColumnType #' @title Check for type of column #' @details Checking for type of columns in the datavector #' @param dataVector a data vector of a column #' @return column Type #' @family Package EDA Utilites functions #' @examples #' CheckColumnType(iris$Sepal.Length) #' @export CheckColumnType <- function(dataVector) { #Check if the column type is "numeric" or "character" & decide type accordDingly if (class(dataVector) == "integer" || class(dataVector) == "numeric") { columnType <- "numeric" } else { columnType <- "character" } #Return the result return(columnType) } ## Get numeric and categoric #' @name getDatatype #' @title Get Data Type #' @details Based on the datatype the columns are seperated into categorical and numerical columns #' @param dataset a dataset which needs to be loaded #' @return list with \code{numeric_cols} and \code{cat_cols} #' @family Package EDA Utilites functions #' @examples getDatatype(iris) #' @export getDatatype <- function(dataset){ numeric_cols <- colnames(dataset)[unlist(sapply(dataset,FUN = function(x){ CheckColumnType(x) == "numeric"}))] cat_cols <- colnames(dataset)[unlist(sapply(dataset,FUN = function(x){CheckColumnType(x) == "character"|| CheckColumnType(x) == "factor"}))] return(list("numeric_cols"=numeric_cols , "cat_cols"=cat_cols)) }
/R/r-batch-eda-utilities.R
no_license
zhaoxiaohe/analysisPipelines
R
false
false
17,098
r
###################################################################################################### # Title: Functions for various exploratory analysis operations on data frames # Author: Sanjay, Dheekshitha PS # Created on: June 14, 2018 # Description: Consolidated file for all functionsto be used out of the box for exploratory analysis ###################################################################################################### ######################## # FUNCTION DEFINITIONS # ######################## #' @name ignoreCols #' @title Ignores the columns in the loaded dataframe object #' @details The columns selected are removed from the object #' @param data the dataframe object that needs to be loaded #' @param columns the names of columns to be ignored from dataframe object #' @return Updated dataframe object #' @family Package EDA Utilites functions #' @examples #' ignoreCols(data = iris, columns = "Species") #' @export ignoreCols <- function(data, columns){ tryCatch({ if(all(columns %in% colnames(data))){ return(data[, setdiff(colnames(data), columns), drop = F]) }else{ mismatch <- colnames(data)[!all(columns %in% colnames(data))] stop(paste0("Columns ", paste0(mismatch, collapse = ", "), " are not present in the dataset")) } }, error = function(e){ stop(e) }, warning = function(e){ warning(e) }) } # Univariate Categoric Distribution function #' @name univarCatDistPlots #' @title Univariate Categoric Distribution #' @details A univariate distribution graph on the selected categorical columns from the dataframe #' @param data the dataset where the column on which the plot is to be generated is present #' @param uniCol the name of column on which the plot needs to be generated #' @param priColor the primary color for the plots #' @param optionalPlots A Flag for optional plots #' @return A univariate categoric distribution plot #' @family Package EDA Utilites functions #' @examples #' univarCatDistPlots(data = iris, uniCol = "Species") #' @export univarCatDistPlots <- function(data, uniCol, priColor = "blue", optionalPlots = 0){ levels(data[[uniCol]]) <- c(levels(data[[uniCol]]), "NA") data[[uniCol]][is.na(data[[uniCol]])] <- "NA" data <- data %>% dplyr::group_by_(.dots = c(uniCol)) %>% dplyr::summarise(count = dplyr::n()) y=data[[uniCol]] data %>>% dplyr::arrange(.data$count) -> data catPlot <- ggplot2::ggplot(data, ggplot2::aes_(x = as.name(uniCol), y= as.name("count"))) + ggplot2::geom_bar(stat = "identity", fill = priColor,alpha=0.7) + ggplot2::xlab(uniCol) + ggplot2::ylab("Frequency") + ggplot2::theme_bw() + ggplot2::theme( axis.title = ggplot2::element_text(size = 16),panel.grid.major.y=ggplot2::element_blank(),panel.border=ggplot2::element_rect(size=0.1) ) + ggplot2::coord_flip() if(optionalPlots){ catPlot <- plotly::plot_ly(y = y, x=data[["count"]],type="bar",orientation='h',color = I(priColor)) %>% plotly::layout(title=paste0("Frequency Histogram for ",uniCol), xaxis=list(title = "Frequency"), yaxis=list(title = uniCol)) } return(catPlot) } #Outlier Plot Function #' @name outlierPlot #' @title Outlier detection plot #' @details Outlier are to be identified on the selected column from the dataframe #' @param data the dataframe that needs to be loaded #' @param method the method on which outliers are to be identified #' @param columnName the name of column for which the outliers are identified #' @param cutoffValue the cut off value to define the threshold for outliers #' @param priColor the primary color for the plots #' @param optionalPlots A Flag for optional plots #' @return Outliers plot object #' @family Package EDA Utilites functions #' @examples #' \dontrun{ #' outlierPlot(data = iris, columnName = "Sepal.Length") #' } #' @export outlierPlot <- function(data, method = "iqr", columnName, cutoffValue = 0.05, priColor = "blue", optionalPlots = 0){ if(TRUE %in% unique(is.na(data[,columnName]))){ data <- data[-which(is.na(data[,columnName])),] } if(method == "iqr"){ lower <- stats::quantile(data[, columnName], .25,na.rm = T) - 1.5*(stats::IQR(data[, columnName], na.rm = T)) upper <- stats::quantile(data[,columnName],.75,na.rm = T) + 1.5*(stats::IQR(data[,columnName],na.rm = T)) data$Outlier <- data[,columnName] > upper | data[,columnName] < lower outlierPlotObj <- ggplot2::ggplot(data, ggplot2::aes(x="", y = data[,columnName])) + ggplot2::geom_boxplot(fill = priColor,alpha=0.7) + ggplot2::theme_bw() + ggplot2::theme(panel.border=ggplot2::element_rect(size=0.1),panel.grid.minor.x=ggplot2::element_blank(),panel.grid.major.x=ggplot2::element_blank(),legend.position = "bottom") +ggplot2::ylab(columnName) + ggplot2::xlab("") } if(method == "percentile"){ lower <- stats::quantile(data[,columnName],cutoffValue,na.rm = T) upper <- stats::quantile(data[,columnName],(1-cutoffValue),na.rm = T) data$Outlier <- data[,columnName] > upper | data[,columnName] < lower Outlier<-data$Outlier Value<-data[,columnName] outlierPlotObj <- ggplot2::ggplot(data) + ggplot2::geom_histogram(ggplot2::aes(x = Value, fill = as.name("Outlier")), bins=30, alpha=0.7) + ggplot2::scale_fill_manual(values = c(priColor, "red"),breaks=c("FALSE", "TRUE"), labels=c("Normal", "Outlier"),name = "Status") + ggplot2::theme_bw() + ggplot2::theme(panel.border=ggplot2::element_rect(size=0.1),panel.grid.minor.x=ggplot2::element_blank(),panel.grid.major.x=ggplot2::element_blank(),legend.position = "bottom") + ggplot2::xlab(columnName) } if(method == "z_score"){ lower <- mean(data[,columnName],na.rm = T) - (cutoffValue*(stats::sd(data[,columnName],na.rm = T))) upper <- mean(data[,columnName],na.rm = T) + (cutoffValue*(stats::sd(data[,columnName],na.rm = T))) data$Outlier <- data[,columnName] > upper | data[,columnName] < lower data$zScore <- scale(data[,columnName],center = T, scale = T) Zscore<-as.vector(data$zScore) y<-data[,columnName] outlierPlotObj <- ggplot2::ggplot(data, ggplot2::aes(x = Zscore, y = y)) + ggplot2::geom_point(ggplot2::aes(color = as.name("Outlier")), alpha=0.7)+ ggplot2::scale_color_manual("Status", values = c("TRUE" = "red","FALSE" =priColor))+ ggplot2::ylab(columnName)+ ggplot2::theme_bw() + ggplot2::theme(panel.border=ggplot2::element_rect(size=0.1),panel.grid.minor.x=ggplot2::element_blank(),panel.grid.major.x=ggplot2::element_blank(),legend.position = "bottom") + ggplot2::xlab("Z-score")+ ggplot2::geom_vline(xintercept = (cutoffValue),linetype = "dashed")+ ggplot2::geom_vline(xintercept = -(cutoffValue), linetype = "dashed") } #conditionToBe if(optionalPlots) { outlierPlotObj <- plotly::ggplotly(outlierPlotObj) outlierPlotObj$x$layout$margin$l <- outlierPlotObj$x$layout$margin$l + 30 outlierPlotObj$x$layout$margin$b <- outlierPlotObj$x$layout$margin$b + 3 } return(outlierPlotObj) } #Mutlivariate Outlier Plot Function #' @name multiVarOutlierPlot #' @title Multi-Variate Outlier Plot #' @details Multivaraite outlier plot using the selected columns from the dataframe #' @param data the dataframe that needs to be loaded #' @param depCol the name of column which is to be identified as dependent column #' @param indepCol the name of an independent column #' @param sizeCol the name of column used to define the size of point in plots #' @param priColor the primary color for the plots #' @param optionalPlots A Flag for optional plots #' @param cutoffValue A p-alue cutoff for detecting outliers #' @return Outliers plot #' @family Package EDA Utilites functions #' @examples #' \dontrun{ #' multiVarOutlierPlot(data = iris, depCol = "Sepal.Length", #' indepCol = "Sepal.Width", sizeCol = "Petal.Length") #' } #' @export multiVarOutlierPlot <- function(data, depCol, indepCol, sizeCol, priColor = "blue", optionalPlots = 0, cutoffValue = 0.05){ if(TRUE %in% unique(is.na(data[,depCol]))) { data <- data[-which(is.na(data[,depCol])== T),] } indep_form <- paste(indepCol, collapse = "+") form <- paste(depCol, indep_form, sep = "~") form <- stats::formula(form) lmObject <- lm(form,data) limit <- nrow(data) outlierDetect <- car::outlierTest(lmObject, cutoffValue, n.max = limit) outlierDetect <- data.frame(outlierDetect[c(1,2,3)]) outlierDetect <- round(outlierDetect, 4) colnames(outlierDetect) <- c("Studentized Residuals","P-Value", "P-Value(Bonferroni Correction)") data$Outlier <- ifelse(rownames(data) %in% rownames(outlierDetect),"Outlier","Normal") outlierTable <- data[data$Outlier == "Outlier", ] outlierTable <- cbind(outlierDetect,outlierTable) x<-data[,indepCol] y<-data[,depCol] size<-data[,sizeCol] outlierPlot <- ggplot2::ggplot(data, ggplot2::aes(x = x,y = y), alpha=0.6)+ ggplot2::geom_point(ggplot2::aes(color = as.name("Outlier"), size = size), alpha=0.7)+ ggplot2::scale_color_manual("",values = c("Outlier" = "red", "Normal" = priColor))+ ggplot2::labs(title = paste(depCol,"vs",indepCol)) + ggplot2::theme_bw() + ggplot2::theme(panel.border=ggplot2::element_rect(size=0.1),panel.grid.minor.x=ggplot2::element_blank(),legend.position = "bottom") + ggplot2::ylab(depCol) + ggplot2::xlab(indepCol) #conditionToBe if(optionalPlots) { outlierPlot <- plotly::ggplotly(outlierPlot,tooltip=c("all")) outlierPlot$x$layout$margin$l <- outlierPlot$x$layout$margin$l + 30 outlierPlot$x$layout$margin$b <- outlierPlot$x$layout$margin$b + 3 } return(outlierPlot) } ## Bivariate Plots #' @name bivarPlots #' @title Bi-Variate Plot #' @details A bivariate distribution graph on the selected columns from the dataframe.Selected two columns are on two axis' and a plot is generated #' @param dataset the dataframe that needs to be loaded #' @param select_var_name_1 the name of first column on which the plot needs to be generated #' @param select_var_name_2 the name of second column on which the plot needs to be generated #' @param priColor the primary color for the plots #' @param secColor A secondary color for the plots #' @return Bivariate plot #' @family Package EDA Utilites functions #' @examples #' bivarPlots(dataset = iris, select_var_name_1 = "Sepal.Length", #' select_var_name_2 = "Sepal.Width") #' @export bivarPlots <- function(dataset, select_var_name_1, select_var_name_2, priColor = "blue", secColor= "black") { numeric_cols <- unlist(getDatatype(dataset)['numeric_cols']) cat_cols <- unlist(getDatatype(dataset)['cat_cols']) if (select_var_name_1 %in% numeric_cols && select_var_name_2 %in% numeric_cols) { x = dataset[, select_var_name_1] y = dataset[, select_var_name_2] bivarPlot <- ggplot2::ggplot(dataset, ggplot2::aes(x, y)) + ggplot2::geom_point(color = priColor, alpha = 0.7) + ggplot2::geom_smooth(method = lm, color = secColor) + ggplot2::xlab(select_var_name_1) + ggplot2::ylab(select_var_name_2) + ggplot2::theme_bw() + ggplot2::ggtitle(paste( 'Bivariate plot for', select_var_name_1, 'and', select_var_name_2, sep = ' ' )) + ggplot2::theme( plot.title = ggplot2::element_text(hjust = 0.5, size = 10), axis.text = ggplot2::element_text(size = 10), axis.title = ggplot2::element_text(size = 10) ) } else if (select_var_name_1 %in% cat_cols && select_var_name_2 %in% cat_cols) { new_df <- dataset %>% dplyr::group_by_(.dots=c(select_var_name_1,select_var_name_2)) %>% dplyr::summarise(n = dplyr::n()) colfunc <- grDevices::colorRampPalette(c(priColor, "white" , secColor)) colorvar <- length(unique(new_df[[select_var_name_2]])) a=as.vector(as.character(unique(new_df[[select_var_name_1]]))) y=new_df[[select_var_name_1]] label=new_df[[select_var_name_2]] bivarPlot <-ggplot2::ggplot(new_df, ggplot2::aes(x = y, y= .data$n, fill = label)) + ggplot2::geom_bar(position = "dodge", stat = "identity",alpha=0.9) + ggplot2::guides(fill=ggplot2::guide_legend(title=select_var_name_2)) + ggplot2::coord_flip()+ ggplot2::xlab(select_var_name_1) + ggplot2::ylab("count") + ggplot2::theme_bw() + ggplot2::ggtitle(paste('Bivariate plot for',select_var_name_1,'and',select_var_name_2,sep=' '))+ ggplot2::theme(plot.title = ggplot2::element_text(hjust = 0.5, size = 10),axis.text = ggplot2::element_text(size=10), axis.title=ggplot2::element_text(size=10),legend.position="bottom",axis.text.x=ggplot2::element_text(angle=45, hjust=1))+ ggplot2::scale_fill_manual(values = colfunc(colorvar)) } else { cols <- c(select_var_name_1, select_var_name_2) cat_col <- cols[which(cols %in% cat_cols)] num_col <- cols[which(cols %in% numeric_cols)] a = as.vector(as.character(unique(dataset[[cat_col]]))) y = dataset[[cat_col]] x = dataset[[num_col]] bivarPlot <- ggplot2::ggplot(dataset, ggplot2::aes(x = y, y = x)) + ggplot2::geom_point(color = priColor, alpha = 0.7) + ggplot2::coord_flip() + ggplot2::xlab(cat_col) + ggplot2::ylab(num_col) + ggplot2::theme_bw() + ggplot2::ggtitle(paste( 'Bivariate plot for', select_var_name_1, 'and', select_var_name_2, sep = ' ' )) + ggplot2::theme( plot.title = ggplot2::element_text(hjust = 0.5, size = 10), axis.text = ggplot2::element_text(size = 10), axis.title = ggplot2::element_text(size = 10) ) } return(bivarPlot) } ## Correlation Matrix #' @name correlationMatPlot #' @title Correlation Matrix Plot #' @description A correlation matrix is created and plotted across all the columns in the dataset #' @param dataset the dataset that needs to be loaded #' @param methodused methods to be used for computing correlation #' @return Correlation Matrix graph #' @family Package EDA Utilites functions #' @examples #' correlationMatPlot(dataset = iris) #' @export correlationMatPlot <- function(dataset, methodused = "everything"){ numeric_cols <- getDatatype(dataset)['numeric_cols'] cormatrix <- base::round(stats::cor(dataset[,unlist(numeric_cols),drop=F], use = methodused),3) return(R.devices::capturePlot(corrplot::corrplot(cormatrix, method = "color", outline = T, addgrid.col = "darkgray", # order="hclust", addrect = 4, rect.col = "black", rect.lwd = 5, cl.pos = "b", tl.col = "black", tl.cex = 1, cl.cex = 1.5, addCoef.col = "black", number.digits = 2, number.cex = 0.75, type = "lower", col = grDevices::colorRampPalette(c("red","white","green"))(200)))) } ################## # MISC FUNCTIONS # ################## ## Return the column type #' @name CheckColumnType #' @title Check for type of column #' @details Checking for type of columns in the datavector #' @param dataVector a data vector of a column #' @return column Type #' @family Package EDA Utilites functions #' @examples #' CheckColumnType(iris$Sepal.Length) #' @export CheckColumnType <- function(dataVector) { #Check if the column type is "numeric" or "character" & decide type accordDingly if (class(dataVector) == "integer" || class(dataVector) == "numeric") { columnType <- "numeric" } else { columnType <- "character" } #Return the result return(columnType) } ## Get numeric and categoric #' @name getDatatype #' @title Get Data Type #' @details Based on the datatype the columns are seperated into categorical and numerical columns #' @param dataset a dataset which needs to be loaded #' @return list with \code{numeric_cols} and \code{cat_cols} #' @family Package EDA Utilites functions #' @examples getDatatype(iris) #' @export getDatatype <- function(dataset){ numeric_cols <- colnames(dataset)[unlist(sapply(dataset,FUN = function(x){ CheckColumnType(x) == "numeric"}))] cat_cols <- colnames(dataset)[unlist(sapply(dataset,FUN = function(x){CheckColumnType(x) == "character"|| CheckColumnType(x) == "factor"}))] return(list("numeric_cols"=numeric_cols , "cat_cols"=cat_cols)) }
# Qualitative variable modeling (classification problem) # data source: https://www.kaggle.com/sakshigoyal7/credit-card-customers # exploratory data alysis done additionaly in Python at: # https://github.com/pzeb23/Python_code/tree/master/Non-parametric_models library(tree) library(rpart) library(rpart.plot) ### Załadowanie danych setwd("~/Projects/AnalysisInR") churners <- read.csv("BankChurners.csv") ### Podzielić dane losowo na zbiór uczący i zbiór testowy set.seed(123) testowe.nr <- sample(x = nrow(churners), size = nrow(churners)/4, replace = F) #testowe.nr <- sample(x = nrow(spam), size = nrow(spam)/4, replace = F) churners.uczacy <- churners[-testowe.nr, ] churners.testowy <- churners[testowe.nr, ] #spam.uczacy <- spam[-testowe.nr, ] #spam.testowy <- spam[testowe.nr, ] ### Zbudować drzewo klasyfikacyjne (zmienna celu: Spam) drzewo.churners0 <- rpart(Attrition_Flag ~ ., data = churners.uczacy) drzewo.churners0 summary(drzewo.churners0) drzewo.churners0$variable.importance rpart.rules(drzewo.churners0, style = "tallw") # Narysować wykres drzewa prp(drzewo.churners0) rpart.plot(drzewo.churners0) # Ustalić optymalne parametry modelu drzewo.churners1 <- rpart(Attrition_Flag ~ ., data = churners.uczacy, control = rpart.control(cp = 0)) drzewo.churners1$cptable printcp(drzewo.churners1) plotcp(drzewo.churners1) # reczne obliczanie, bierzemy wiersz z najmniejszym xerror # a nastepnie dodajemy xerror i xstd tego wiersza 0.3436197 + 0.01646342 # które drzewo ma xerror mniejsze od 0.3600831 # oraz ktore z nich jest najmniej złożone? # 10 0.00500417 20 0.29191 0.35780 0.016780 <-- to nasz kandydat # bierzemy jego CP = 0.00500417 i podstawiamy do modelu drzewo.churners2 <- rpart(Attrition_Flag ~ ., data = churners.uczacy, control = rpart.control(cp = 0.00500417)) drzewo.churners2$cptable printcp(drzewo.churners2) # drugi raz plot cp, nadal 2 sa pod kreska plotcp(drzewo.churners2) # wiec mozna by jeszcze przyciac # reczne obliczanie # 11 0.0050042 22 0.28190 0.34445 0.016482 <-- wiersz z najmniejszym xerror 0.34445 + 0.016482 # = 0.360932 <-- który węzeł ma xerror mniejszy od tego i jest najmniej złożony? # # 10 0.0050042 20 0.29191 0.35363 0.016688 < -- to nasz kandydat # # bierzemy jego CP i podstawiamy do modelu: drzewo.churners3 <- rpart(Attrition_Flag ~ ., data = churners.uczacy, control = rpart.control(cp = 0.005)) drzewo.churners3$cptable printcp(drzewo.churners3) plotcp(drzewo.churners3) # Dalsze przycinanie nie przyniosło lepszych efektów # # 0.37531 + 0.017160 # drzewo.churners <- rpart(Attrition_Flag ~ ., # data = churners.uczacy, # control = rpart.control(cp = 0.017)) # drzewo.churners$cptable # printcp(drzewo.churners) # plotcp(drzewo.churners) # Odczytać ważność zmiennych drzewo.churners3$variable.importance cbind(drzewo.churners3$variable.importance) dotchart(sort(drzewo.churners3$variable.importance, decreasing = F), pch = 16) # Sprawdzić dokładność modelu na zbiorze testowym predykcje.klasy <- predict(drzewo.churners3, newdata = churners.testowy, type = "class") mean(predykcje.klasy != churners.testowy$Attrition_Flag) # 6%, jak sie przytnie to bedzie jeszcze lepiej table(Rzeczywiste = churners.testowy$Attrition_Flag, Przewidywane = predykcje.klasy) predykcje.prob <- predict(drzewo.churners3, newdata = churners.testowy, type = "prob") head(predykcje.prob) hist(predykcje.prob[churners.testowy$Attrition_Flag == "Attrited Customer", "Attrited Customer"])
/BankChurnersModel.R
no_license
pzeb23/AnalysisInR
R
false
false
3,833
r
# Qualitative variable modeling (classification problem) # data source: https://www.kaggle.com/sakshigoyal7/credit-card-customers # exploratory data alysis done additionaly in Python at: # https://github.com/pzeb23/Python_code/tree/master/Non-parametric_models library(tree) library(rpart) library(rpart.plot) ### Załadowanie danych setwd("~/Projects/AnalysisInR") churners <- read.csv("BankChurners.csv") ### Podzielić dane losowo na zbiór uczący i zbiór testowy set.seed(123) testowe.nr <- sample(x = nrow(churners), size = nrow(churners)/4, replace = F) #testowe.nr <- sample(x = nrow(spam), size = nrow(spam)/4, replace = F) churners.uczacy <- churners[-testowe.nr, ] churners.testowy <- churners[testowe.nr, ] #spam.uczacy <- spam[-testowe.nr, ] #spam.testowy <- spam[testowe.nr, ] ### Zbudować drzewo klasyfikacyjne (zmienna celu: Spam) drzewo.churners0 <- rpart(Attrition_Flag ~ ., data = churners.uczacy) drzewo.churners0 summary(drzewo.churners0) drzewo.churners0$variable.importance rpart.rules(drzewo.churners0, style = "tallw") # Narysować wykres drzewa prp(drzewo.churners0) rpart.plot(drzewo.churners0) # Ustalić optymalne parametry modelu drzewo.churners1 <- rpart(Attrition_Flag ~ ., data = churners.uczacy, control = rpart.control(cp = 0)) drzewo.churners1$cptable printcp(drzewo.churners1) plotcp(drzewo.churners1) # reczne obliczanie, bierzemy wiersz z najmniejszym xerror # a nastepnie dodajemy xerror i xstd tego wiersza 0.3436197 + 0.01646342 # które drzewo ma xerror mniejsze od 0.3600831 # oraz ktore z nich jest najmniej złożone? # 10 0.00500417 20 0.29191 0.35780 0.016780 <-- to nasz kandydat # bierzemy jego CP = 0.00500417 i podstawiamy do modelu drzewo.churners2 <- rpart(Attrition_Flag ~ ., data = churners.uczacy, control = rpart.control(cp = 0.00500417)) drzewo.churners2$cptable printcp(drzewo.churners2) # drugi raz plot cp, nadal 2 sa pod kreska plotcp(drzewo.churners2) # wiec mozna by jeszcze przyciac # reczne obliczanie # 11 0.0050042 22 0.28190 0.34445 0.016482 <-- wiersz z najmniejszym xerror 0.34445 + 0.016482 # = 0.360932 <-- który węzeł ma xerror mniejszy od tego i jest najmniej złożony? # # 10 0.0050042 20 0.29191 0.35363 0.016688 < -- to nasz kandydat # # bierzemy jego CP i podstawiamy do modelu: drzewo.churners3 <- rpart(Attrition_Flag ~ ., data = churners.uczacy, control = rpart.control(cp = 0.005)) drzewo.churners3$cptable printcp(drzewo.churners3) plotcp(drzewo.churners3) # Dalsze przycinanie nie przyniosło lepszych efektów # # 0.37531 + 0.017160 # drzewo.churners <- rpart(Attrition_Flag ~ ., # data = churners.uczacy, # control = rpart.control(cp = 0.017)) # drzewo.churners$cptable # printcp(drzewo.churners) # plotcp(drzewo.churners) # Odczytać ważność zmiennych drzewo.churners3$variable.importance cbind(drzewo.churners3$variable.importance) dotchart(sort(drzewo.churners3$variable.importance, decreasing = F), pch = 16) # Sprawdzić dokładność modelu na zbiorze testowym predykcje.klasy <- predict(drzewo.churners3, newdata = churners.testowy, type = "class") mean(predykcje.klasy != churners.testowy$Attrition_Flag) # 6%, jak sie przytnie to bedzie jeszcze lepiej table(Rzeczywiste = churners.testowy$Attrition_Flag, Przewidywane = predykcje.klasy) predykcje.prob <- predict(drzewo.churners3, newdata = churners.testowy, type = "prob") head(predykcje.prob) hist(predykcje.prob[churners.testowy$Attrition_Flag == "Attrited Customer", "Attrited Customer"])
#' Give consent to do things that require permission #' #' Potentially destructive actions require that you confirm that you really #' want to do them. If you're running a script and you know that you want to #' perform those actions, you can preemptively provide \code{consent}. #' #' @param expr Code to evaluate with consent #' @return \code{consent} returns an S3 class "contextManager" object, which #' you can use with \code{with}. \code{with_consent} evaluates its arguments #' inside the \code{consent} context. #' @seealso \link{with-context-manager} \link{ContextManager} #' @examples #' \dontrun{ #' with(consent(), delete(ds)) #' # Equivalent to: #' with_consent(delete(ds)) #' } #' @export consent <- function () { temp.options(crunch.require.confirmation=FALSE) } #' @rdname consent #' @export with_consent <- function (expr) { with(consent(), eval.parent(expr)) } askForPermission <- function (prompt="") { ## If options explicitly say we don't need to ask, bail. ## Have to check that it's FALSE and not NULL. Silence doesn't mean consent. must.confirm <- getOption("crunch.require.confirmation", TRUE) if (must.confirm == FALSE) return(TRUE) ## If we're here but not interactive, we can't give permission. if (!is.interactive()) return(FALSE) prompt <- paste(prompt, "(y/n) ") proceed <- "" while (!(proceed %in% c("y", "n"))) { proceed <- tolower(readline(prompt)) } return(proceed == "y") } is.interactive <- function () interactive() ## Alias this so that we can mock it out
/R/consent.R
no_license
npelikan/rcrunch
R
false
false
1,559
r
#' Give consent to do things that require permission #' #' Potentially destructive actions require that you confirm that you really #' want to do them. If you're running a script and you know that you want to #' perform those actions, you can preemptively provide \code{consent}. #' #' @param expr Code to evaluate with consent #' @return \code{consent} returns an S3 class "contextManager" object, which #' you can use with \code{with}. \code{with_consent} evaluates its arguments #' inside the \code{consent} context. #' @seealso \link{with-context-manager} \link{ContextManager} #' @examples #' \dontrun{ #' with(consent(), delete(ds)) #' # Equivalent to: #' with_consent(delete(ds)) #' } #' @export consent <- function () { temp.options(crunch.require.confirmation=FALSE) } #' @rdname consent #' @export with_consent <- function (expr) { with(consent(), eval.parent(expr)) } askForPermission <- function (prompt="") { ## If options explicitly say we don't need to ask, bail. ## Have to check that it's FALSE and not NULL. Silence doesn't mean consent. must.confirm <- getOption("crunch.require.confirmation", TRUE) if (must.confirm == FALSE) return(TRUE) ## If we're here but not interactive, we can't give permission. if (!is.interactive()) return(FALSE) prompt <- paste(prompt, "(y/n) ") proceed <- "" while (!(proceed %in% c("y", "n"))) { proceed <- tolower(readline(prompt)) } return(proceed == "y") } is.interactive <- function () interactive() ## Alias this so that we can mock it out
Sys.setenv("INSTALLDIR"="C:/PROGRA~1/R/R-3.5.1/library/Certara.NLME8/InstallDirNLME") library(Certara.NLME8) host=hosts[[1]] dataset = NlmeDataset(workingDir=getwd()) param = NlmeEngineExtraParams(PARAMS_METHOD=METHOD_FOCE_ELS, PARAMS_NUM_ITERATIONS=1000) cm=ReadNlmeCovariateEffectModel("covariates.txt") sp = NlmeStepwiseParams(0.05, 0.001, "-2LL") file.remove("progress.xml") job=RunStepwiseSearch(host,dataset,param,cm,sp,workingDir=getwd()) while (!(NlmeJobStatus(job) == "Finished" || NlmeJobStatus(job) == "Failed") ) { print(NlmeJobStatus(job)) print(job) Sys.sleep(5) } print(job)
/Certara.NLME8/inst/extdata/Stepwise/run.r
no_license
phxnlmedev/rpackages
R
false
false
641
r
Sys.setenv("INSTALLDIR"="C:/PROGRA~1/R/R-3.5.1/library/Certara.NLME8/InstallDirNLME") library(Certara.NLME8) host=hosts[[1]] dataset = NlmeDataset(workingDir=getwd()) param = NlmeEngineExtraParams(PARAMS_METHOD=METHOD_FOCE_ELS, PARAMS_NUM_ITERATIONS=1000) cm=ReadNlmeCovariateEffectModel("covariates.txt") sp = NlmeStepwiseParams(0.05, 0.001, "-2LL") file.remove("progress.xml") job=RunStepwiseSearch(host,dataset,param,cm,sp,workingDir=getwd()) while (!(NlmeJobStatus(job) == "Finished" || NlmeJobStatus(job) == "Failed") ) { print(NlmeJobStatus(job)) print(job) Sys.sleep(5) } print(job)
source("load_data.R") plot4 <- function(data=NULL) { if(is.null(data)) data <- load_data() png("plot4.png", width=400, height=400) par(mfrow=c(2,2)) # 1 plot(data$Time, data$Global_active_power, type="l", xlab="", ylab="Global Active Power") # 2 plot(data$Time, data$Voltage, type="l", xlab="datetime", ylab="Voltage") # 3 plot(data$Time, data$Sub_metering_1, type="l", col="black", xlab="", ylab="Energy sub metering") lines(data$Time, data$Sub_metering_2, col="red") lines(data$Time, data$Sub_metering_3, col="blue") legend("topright", col=c("black", "red", "blue"), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, box.lwd=0) # 4 plot(data$Time, data$Global_reactive_power, type="n", xlab="datetime", ylab="Global_reactive_power") lines(data$Time, data$Global_reactive_power) dev.off() 34 35 }
/plot4.R
no_license
psouthekal/datasciencecoursera
R
false
false
1,351
r
source("load_data.R") plot4 <- function(data=NULL) { if(is.null(data)) data <- load_data() png("plot4.png", width=400, height=400) par(mfrow=c(2,2)) # 1 plot(data$Time, data$Global_active_power, type="l", xlab="", ylab="Global Active Power") # 2 plot(data$Time, data$Voltage, type="l", xlab="datetime", ylab="Voltage") # 3 plot(data$Time, data$Sub_metering_1, type="l", col="black", xlab="", ylab="Energy sub metering") lines(data$Time, data$Sub_metering_2, col="red") lines(data$Time, data$Sub_metering_3, col="blue") legend("topright", col=c("black", "red", "blue"), c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, box.lwd=0) # 4 plot(data$Time, data$Global_reactive_power, type="n", xlab="datetime", ylab="Global_reactive_power") lines(data$Time, data$Global_reactive_power) dev.off() 34 35 }
% Generated by roxygen2 (4.0.1): do not edit by hand \name{messages_info} \alias{messages_info} \title{Extract information from a list of messages} \usage{ messages_info(messages, what = "id") } \arguments{ \item{messages}{The output from \link{messages}.} \item{what}{Either \code{'id'}, \code{'threadId'}, \code{'nextPageToken'}, \code{'resultSizeEstimate'}.} } \value{ For \code{what = 'id'} or \code{what = 'threadId'} a vector with the message ids or thread ids, respectively. Otherwise the next page token or result size estimate information. } \description{ Extract the message ids, thread ids, the next page token or the result size estimate from a \code{gmail_messages} object created with \link{messages}. } \examples{ \dontrun{ #Search for R, return 10 results using label 1 including spam and trash folders my_messages = messages("R", 10, "label_1", TRUE) ## Extract the ids from the messages ids <- messages_info(my_messages) ## You can then use these ids as input for other functions. } } \author{ L. Collado-Torres <lcollado@jhu.edu> } \seealso{ \link{messages} }
/man/messages_info.Rd
no_license
lcolladotor/gmailr
R
false
false
1,083
rd
% Generated by roxygen2 (4.0.1): do not edit by hand \name{messages_info} \alias{messages_info} \title{Extract information from a list of messages} \usage{ messages_info(messages, what = "id") } \arguments{ \item{messages}{The output from \link{messages}.} \item{what}{Either \code{'id'}, \code{'threadId'}, \code{'nextPageToken'}, \code{'resultSizeEstimate'}.} } \value{ For \code{what = 'id'} or \code{what = 'threadId'} a vector with the message ids or thread ids, respectively. Otherwise the next page token or result size estimate information. } \description{ Extract the message ids, thread ids, the next page token or the result size estimate from a \code{gmail_messages} object created with \link{messages}. } \examples{ \dontrun{ #Search for R, return 10 results using label 1 including spam and trash folders my_messages = messages("R", 10, "label_1", TRUE) ## Extract the ids from the messages ids <- messages_info(my_messages) ## You can then use these ids as input for other functions. } } \author{ L. Collado-Torres <lcollado@jhu.edu> } \seealso{ \link{messages} }
library(stringr) library(purrr) library(dplyr) library(httr) #POSSIBLY REMOVE THESE LINES IF WE HAVE TO FOR AUTOMATION #library(rstudioapi) #API_KEY <- askForPassword(prompt = "Enter Mapbox API Key") #--END PROPOSED REMOVES run_geocode <- function(df) { # STEP 1 df_filter <- df %>% filter(is.na(latitude) | is.na(longitude) | latitude==0 | longitude==0) # STEP 2 df_transmute <- df_filter %>% transmute(id = id, address = str_c(address, city, state, zip_code, sep = ", ")) # Step 3 df_geocode <- df_transmute %>% mutate(geometry=map(address, geocode_single)) # Step 4 df_geocode <- df_geocode %>% mutate(long = unlist(map(geometry, get_longitude)), lat=unlist(map(geometry, get_latitude))) df_geocode <- df_geocode %>% select(-geometry) # STEP 5 df <- df %>% left_join(df_geocode %>% select(id, long, lat), by = "id") df <- df %>% mutate(longitude = ifelse(is.na(longitude), long, longitude), latitude = ifelse(is.na(latitude), lat, latitude), latlng_source = ifelse(is.na(latlng_source), "Mapbox Geocode", latlng_source)) df <- df %>% select(-lat, -long) return(df) } geocode_single <- function(search) { url <- URLencode(paste("https://api.mapbox.com/geocoding/v5/mapbox.places/", search,".json?access_token=", API_KEY, sep = "")) r <- GET(url) c <- content(r, as="parsed", type="application/json") features <- c[[3]] geometry <- paste(features[[1]]$geometry$coordinates, collapse=";") return(geometry) } get_latitude <- function(geometry) { return(as.numeric(unlist(strsplit(geometry, ";")[1][[1]][[2]]))) } get_longitude <- function(geometry) { return(as.numeric(unlist(strsplit(geometry, ";")[1][[1]][[1]]))) }
/data_prep_scripts/auto_geocode_functions.R
permissive
maxachis/food-access-map-data
R
false
false
1,733
r
library(stringr) library(purrr) library(dplyr) library(httr) #POSSIBLY REMOVE THESE LINES IF WE HAVE TO FOR AUTOMATION #library(rstudioapi) #API_KEY <- askForPassword(prompt = "Enter Mapbox API Key") #--END PROPOSED REMOVES run_geocode <- function(df) { # STEP 1 df_filter <- df %>% filter(is.na(latitude) | is.na(longitude) | latitude==0 | longitude==0) # STEP 2 df_transmute <- df_filter %>% transmute(id = id, address = str_c(address, city, state, zip_code, sep = ", ")) # Step 3 df_geocode <- df_transmute %>% mutate(geometry=map(address, geocode_single)) # Step 4 df_geocode <- df_geocode %>% mutate(long = unlist(map(geometry, get_longitude)), lat=unlist(map(geometry, get_latitude))) df_geocode <- df_geocode %>% select(-geometry) # STEP 5 df <- df %>% left_join(df_geocode %>% select(id, long, lat), by = "id") df <- df %>% mutate(longitude = ifelse(is.na(longitude), long, longitude), latitude = ifelse(is.na(latitude), lat, latitude), latlng_source = ifelse(is.na(latlng_source), "Mapbox Geocode", latlng_source)) df <- df %>% select(-lat, -long) return(df) } geocode_single <- function(search) { url <- URLencode(paste("https://api.mapbox.com/geocoding/v5/mapbox.places/", search,".json?access_token=", API_KEY, sep = "")) r <- GET(url) c <- content(r, as="parsed", type="application/json") features <- c[[3]] geometry <- paste(features[[1]]$geometry$coordinates, collapse=";") return(geometry) } get_latitude <- function(geometry) { return(as.numeric(unlist(strsplit(geometry, ";")[1][[1]][[2]]))) } get_longitude <- function(geometry) { return(as.numeric(unlist(strsplit(geometry, ";")[1][[1]][[1]]))) }
# change this to your own directory setwd("C:/Users/user1/Documents/GitHub/ggplot2_tutorial/time_ranges") library(readr) library(ggplot2) data <- read_csv("data/age_data.csv") p <- ggplot() for(i in 1:nrow(data)){ x <- c(data$min_year[i],data$max_year[i]) y <- c(data$arrangement[i],data$arrangement[i]) plot.data <- data.frame(x = x, y = y, type = data$class[i]) p <- p + geom_line(data=plot.data, aes(x=x,y=y,color=type)) cat("finishing sample",i,"\n") } p <- p + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_rect(colour="black", fill = NA), axis.line = element_line(color = "black"), axis.text.y = element_blank(), axis.ticks.y = element_blank(), legend.justification = c(0, 0), axis.text.x = element_text(size = 14), #axis.ticks.x = element_blank(), axis.title = element_text(size = 20), legend.position = "right", text = element_text(family='Kai'), legend.background = element_rect(colour=NA, fill = NA), legend.key = element_rect(colour = "white", fill = NA) )+ #customize your colors here, change the Type1, Type2, Type3 into Best, Shorter, Problem, etc. scale_color_manual( values = c( "type1" = "red", "type2" = "blue", "type3" = "green" ), labels = c("A","B","C"))+ #--------------------------- #if you want to have a shaded vertical bar, use this section; otherwise, delete this section annotate("rect", fill = "tan", xmin = 11700, xmax = 12900, ymin = -Inf, ymax = Inf, alpha = .3)+ annotate("text", x = 12300, y = 0, label = "YD", size = 4, color = 'orange3')+ #-------------------------- labs(y = "Samples", x = "Years BP", color = "Types")+ scale_x_continuous( expand = c(0.025, 0.025),breaks = scales::pretty_breaks(n = 10))+ scale_y_continuous(limits=c(0,nrow(data)),expand = c(0.025, 0.02),breaks = scales::pretty_breaks(n = 10)) p
/time_ranges/time_range_plot.R
no_license
yeshancqcq/ggplot2_tutorial
R
false
false
2,052
r
# change this to your own directory setwd("C:/Users/user1/Documents/GitHub/ggplot2_tutorial/time_ranges") library(readr) library(ggplot2) data <- read_csv("data/age_data.csv") p <- ggplot() for(i in 1:nrow(data)){ x <- c(data$min_year[i],data$max_year[i]) y <- c(data$arrangement[i],data$arrangement[i]) plot.data <- data.frame(x = x, y = y, type = data$class[i]) p <- p + geom_line(data=plot.data, aes(x=x,y=y,color=type)) cat("finishing sample",i,"\n") } p <- p + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_rect(colour="black", fill = NA), axis.line = element_line(color = "black"), axis.text.y = element_blank(), axis.ticks.y = element_blank(), legend.justification = c(0, 0), axis.text.x = element_text(size = 14), #axis.ticks.x = element_blank(), axis.title = element_text(size = 20), legend.position = "right", text = element_text(family='Kai'), legend.background = element_rect(colour=NA, fill = NA), legend.key = element_rect(colour = "white", fill = NA) )+ #customize your colors here, change the Type1, Type2, Type3 into Best, Shorter, Problem, etc. scale_color_manual( values = c( "type1" = "red", "type2" = "blue", "type3" = "green" ), labels = c("A","B","C"))+ #--------------------------- #if you want to have a shaded vertical bar, use this section; otherwise, delete this section annotate("rect", fill = "tan", xmin = 11700, xmax = 12900, ymin = -Inf, ymax = Inf, alpha = .3)+ annotate("text", x = 12300, y = 0, label = "YD", size = 4, color = 'orange3')+ #-------------------------- labs(y = "Samples", x = "Years BP", color = "Types")+ scale_x_continuous( expand = c(0.025, 0.025),breaks = scales::pretty_breaks(n = 10))+ scale_y_continuous(limits=c(0,nrow(data)),expand = c(0.025, 0.02),breaks = scales::pretty_breaks(n = 10)) p
library(configural) ### Name: cor_covariance ### Title: Calculate the asymptotic sampling covariance matrix for the ### unique elements of a correlation matrix ### Aliases: cor_covariance ### ** Examples cor_covariance(matrix(c(1, .2, .3, .2, 1, .3, .3, .3, 1), ncol = 3), 100)
/data/genthat_extracted_code/configural/examples/cor_covariance.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
287
r
library(configural) ### Name: cor_covariance ### Title: Calculate the asymptotic sampling covariance matrix for the ### unique elements of a correlation matrix ### Aliases: cor_covariance ### ** Examples cor_covariance(matrix(c(1, .2, .3, .2, 1, .3, .3, .3, 1), ncol = 3), 100)
################################################################################ # Aim: Download full text pdfs, given PMID and url # # Contact: Herm Lamberink, h.j.lamberink@umcutrecht.nl # Date: 2018-03-19 ############################# .libPaths( c(.libPaths(), "/mnt/data/live02/stress/hlamberink/RLibrary" ) ) library( 'xml2' ) # used by rvest package library( 'rvest' ) # web scraping package library( "curl" ) library( "XML" ) library( "pbapply" ) # power bar during sapply library( 'plyr' ); library( 'dplyr' ) library( 'tidyr' ) ################################### # FUNCTIONS ################################### ### # Get pdf from given pmid ## get.pdf <- function( pmid, url, outdr = outdir ) { # prevent the function from shutting down due to an error v <- tryCatch( { # output pdf outpdf <- paste0( outdr, '/', pmid, '.pdf' ) if( ! file.exists( outpdf ) ) { # set empty pdflink pdflink <- NA ####################### # pdflink per publisher ####################### # url is from arvojournals if( grepl( "arvojournals", url ) ) { # url to pdf pdflink <- get.pdflink.arvojournals( url ) } # url is from JAMA if( grepl( "jamanetwork.com", url ) ) { # url to pdf pdflink <- get.pdflink.jama( url ) } # url is from PLOS if( grepl( "dx.plos", url ) ) { # url to pdf pdflink <- get.pdflink.plos( url ) } # url is from EHP if( grepl( "/EHP", url ) ) { pdflink <- get.pdflink.ehp( url ) } # url is from doi/bjs if( grepl( "/bjs", url ) ) { pdflink <- get.pdflink.doibjs( url ) } # url is from Wiley, via doi.org #if( grepl( "dx.doi.org", url ) ) #{ # pdflink <- get.pdflink.doiwiley( url ) #} # url is from wiley if( grepl( "wiley.com", url ) ) { pdflink <- get.pdflink.wileyreal( url ) } # url is from bmj if( grepl( "bmj.com", url ) ) { pdflink <- get.pdflink.bmj( url ) } # url is from cmaj if( grepl( "cmaj.ca", url ) ) { pdflink <- get.pdflink.cmaj( url ) } # url is from nejm if( grepl( "nejm.org", url ) ) { pdflink <- get.pdflink.nejm( url ) } # url is from scielo if( grepl( "scielo.br", url ) ) { pdflink <- get.pdflink.scielo( url ) } # url is from academic.oup if( grepl( "academic.oup", url ) ) { pdflink <- get.pdflink.acoup( url ) } # url is from annals if( grepl( "annals", url ) ) { pdflink <- get.pdflink.annals( url ) } # url is from cambridge if( grepl( "cambridge.org", url ) ) { pdflink <- get.pdflink.cambridge( url ) } # url is from OVID if( grepl( "Insights.ovid", url ) ) { # url to pdf pdflink <- get.pdflink.ovid1( url ) if( length(pdflink) == 0 ) pdflink <- get.pdflink.ovid2( url ) } # url is from iiar if( grepl( "iiar", url ) ) { pdflink <- get.pdflink.iiar( url ) } # url is from ahajournals if( grepl( "ahajournals", url ) ) { pdflink <- get.pdflink.ahaj( url ) } # url is from sciencedirect if( grepl( "sciencedirect.com", url ) ) { pdflink <- get.pdflink.sciencedirect( url ) } # url is from asm if( grepl( "asm", url ) ) { pdflink <- get.pdflink.asm( url ) } # url is from ajp if( grepl( "ajp", url ) ) { pdflink <- get.pdflink.ajp } # url is from apsjournals if( grepl( "apsjournals", url ) ) { pdflink <- get.pdflink.apsjournals( url ) } # url is from arjournals if( grepl( "arjournals", url ) ) { pdflink <- get.pdflink.arjournals( url ) } # url is from ascopubs if( grepl( "ascopubs", url ) ) { pdflink <- get.pdflink.ascopubs( url ) } # url is from avmajournals if( grepl( "avmajournals", url ) ) { pdflink <- get.pdflink.avma( url ) } # url is from bjgp if( grepl( "bjgp", url ) ) { pdflink <- get.pdflink.bjgp( url ) } # url is from boneandjoint if( grepl( "boneandjoint", url ) ) { pdflink <- get.pdflink.boneandjoint( url ) } # url is from aacrjournals if( grepl( "aacrjournals", url ) ) { pdflink <- get.pdflink.aacrjournals( url ) } # url is from diabetesjournals if( grepl( "diabetesjournals", url ) ) { pdflink <- get.pdflink.diabetesjournals( url ) } # url is from asnjournals if( grepl( "asnjournals", url ) ) { pdflink <- get.pdflink.asnjournals( url ) } # url is from ersjournals if( grepl( "ersjournals", url ) ) { pdflink <- get.pdflink.ersjournals( url ) } # url is from gacetamedicade if( grepl( "gacetamedicade", url ) ) { pdflink <- get.pdflink.gacetamedicade( url ) } # url is from tums.ac.ir if( grepl( "tums.ac.ir", url ) ) { pdflink <- get.pdflink.tums( url ) } # url is from nutrition.org if( grepl( "nutrition.org", url ) ) { pdflink <- get.pdflink.nutrition( url ) } # url is from aota.org if( grepl( "aota.org", url ) ) { pdflink <- get.pdflink.aota( url ) } # url is from physiology.org if( grepl( "physiology.org", url ) ) { pdflink <- get.pdflink.physiology( url ) } # url is from asahq.org if( grepl( "asahq.org", url ) ) { pdflink <- get.pdflink.asahq( url ) } # url is from upol.cz if( grepl( "upol.cz", url ) ) { pdflink <- get.pdflink.upol.cz( url ) } # url is from rcpsych if( grepl( "rcpsych.org", url ) ) { pdflink <- get.pdflink.rcpsych( url ) } # url is from sabinet.co.za if( grepl( "sabinet.co.za", url ) ) { pdflink <- get.pdflink.sabinet( url ) } # url is from quintessenz if( grepl( "quintessenz", url ) ) { pdflink <- get.pdflink.quintessenz( url ) } # url is from clinicalandtranslationalinvestigation if( grepl( "clinicalandtranslationalinvestigation", url ) ) { pdflink <- get.pdflink.clinicalandtranslationalinvestigation( url ) } # url is from jaoa.org if( grepl( "jaoa.org", url ) ) { pdflink <- get.pdflink.jaoa( url ) } # url is from snmjournals if( grepl( "snmjournals", url ) ) { pdflink <- get.pdflink.snmjournals( url ) } # url is from umsha.ac.ir if( grepl( "umsha" , url ) ) { pdflink <- get.pdflink.umsha( url ) } # url is from tokai if( grepl( "tokai" , url ) ) { pdflink <- get.pdflink.tokai( url ) } # url is from pamw.pl if( grepl( "pamw.pl", url ) ) { pdflink <- get.pdflink.pamw( url ) } # url is from aappublications if( grepl( "aappublications", url ) ) { pdflink <- get.pdflink.aappublications( url ) } # url is from publisherspanel if( grepl( "publisherspanel", url ) ) { pdflink <- get.pdflink.publisherspanel( url ) } # url is from rcseng if( grepl( "rcseng", url ) ) { pdflink <- get.pdflink.rcseng( url ) } # url is from rsna if( grepl( "rsna", url ) ) { pdflink <- get.pdflink.rsna( url ) } # url is from rcjournal if( grepl( "rcjournal", url ) ) { pdflink <- get.pdflink.rcjournal( url ) } # url is from revistachirurgia if( grepl( "revistachirurgia", url ) ) { pdflink <- get.pdflink.revistachirurgia( url ) } # url is from thejns if( grepl( "thejns", url ) ) { pdflink <- get.pdflink.thejns( url ) } # url is from alphamedpress if( grepl( "alphamedpress", url ) ) { pdflink <- get.pdflink.alphamedpress( url ) } # url is from aepress if( grepl( "aepress", url ) ) { pdflink <- get.pdflink.aepress( url ) } # url is from ajronline if( grepl( "ajronline", url ) ) { pdflink <- get.pdflink.ajronline( url ) } # url is from ajcn if( grepl( "ajcn", url ) ) { pdflink <- get.pdflink.ajcn( url ) } # url is from ams.ac.ir if( grepl( "ams.ac.ir", url ) ) { pdflink <- get.pdflink.ams.ac.ir( url ) } # url is from annfammed if( grepl( "annfammed", url ) ) { pdflink <- get.pdflink.annfammed( url ) } # url is from annsaudimed if( grepl( "annsaudimed", url ) ) { pdflink <- get.pdflink.annsaudimed( url ) } # url is from atsjournals if( grepl( "atsjournals", url ) ) { pdflink <- get.pdflink.atsjournals( url ) } # url is from birpublications if( grepl( "birpublications", url ) ) { pdflink <- get.pdflink.birpublications( url ) } # url is from bloodjournal if( grepl( "bloodjournal", url ) ) { pdflink <- get.pdflink.bloodjournal( url ) } # url is from cfp if( grepl( "cfp.org", url ) ) { pdflink <- get.pdflink.cfp( url ) } # url is from cmj.hr if( grepl( "cmj.hr", url ) ) { pdflink <- get.pdflink.cmj.hr( url ) } # url is from cmj.org if( grepl( "cmj.org", url ) ) { pdflink <- get.pdflink.cmj.org( url ) } # url is from danmedj if( grepl( "danmedj", url ) ) { pdflink <- get.pdflink.danmedj( url ) } # url is from dirjournal if( grepl( "dirjournal", url ) ) { pdflink <- get.pdflink.dirjournal( url ) } # url is from e-cmh if( grepl( "e-cmh", url ) ) { pdflink <- get.pdflink.ecmh( url ) } # url is from ectrx if( grepl( "ectrx", url ) ) { pdflink <- get.pdflink.ectrx( url ) } # url is from educationforhealth if( grepl( "educationforhealth", url ) ) { pdflink <- get.pdflink.educationforhealth( url ) } # url is from eje-online if( grepl( "eje-online", url ) ) { pdflink <- get.pdflink.ejeonline( url ) } # url is from europeanreview if( grepl( "europeanreview", url ) ) { pdflink <- get.pdflink.europeanreview( url ) } # url is from haematologica if( grepl( "haematologica", url ) ) { pdflink <- get.pdflink.haematologica( url ) } # url is from hdbp if( grepl( "hdbp", url ) ) { pdflink <- get.pdflink.hdbp( url ) } # url is from healio if( grepl( "healio", url ) ) { pdflink <- get.pdflink.healio( url ) } # url is from ijkd if( grepl( "ijkd", url ) ) { pdflink <- get.pdflink.ijkd( url ) } # url is from ijo.in if( grepl( "ijo.in", url ) ) { pdflink <- get.pdflink.ijo.in( url ) } # url is from impactjournals if( grepl( "impactjournals", url ) ) { pdflink <- get.pdflink.impactjournals( url ) } # url is from inaactamedica if( grepl( "inaactamedica", url ) ) { pdflink <- get.pdflink.inaactamedica( url ) } # url is from indianjcancer if( grepl( "indianjcancer", url ) ) { pdflink <- get.pdflink.indianjcancer( url ) } # url is from intbrazjurol if( grepl( "intbrazjurol", url ) ) { pdflink <- url } # url is from jiaci if( grepl( "jiaci", url ) ) { pdflink <- get.pdflink.jiaci( url ) } # url is from jmir if( grepl( "jmir", url ) ) { pdflink <- get.pdflink.jmir( url ) } # url is from jneurosci if( grepl( "jneurosci", url ) ) { pdflink <- get.pdflink.jneurosci( url ) } # url is from jospt if( grepl( "jospt", url ) ) { pdflink <- get.pdflink.jospt( url ) } # url is from mdpi.com if( grepl( "mdpi.com", url ) ) { pdflink <- get.pdflink.mdpi.com( url ) } # url is from painphysicianjournal if( grepl( "painphysicianjournal", url ) ) { pdflink <- get.pdflink.painphysicianjournal( url ) } # url is from sjweh if( grepl( "sjweh", url ) ) { pdflink <- get.pdflink.sjweh( url ) } # url is from tandfonline if( grepl( "tandfonline", url ) ) { pdflink <- get.pdflink.tandfonline( url ) } # url is from thieme-connect if( grepl( "thieme-connect", url ) ) { pdflink <- get.pdflink.thieme( url ) } # url is from wjgnet if( grepl( "wjgnet", url ) ) { pdflink <- get.pdflink.wjgnet( url ) } # url is from degruyter if( grepl( "degruyter", url ) ) { pdflink <- get.pdflink.degruyter( url ) } # url is from biomedcentral if( grepl( "biomedcentral", url ) ) { pdflink <- get.pdflink.biomedcentral( url ) } # url is from karger if( grepl( "karger", url ) ) { pdflink <- get.pdflink.karger( url ) } # url is from jkan.or.kr if( grepl( "jkan.or.kr", url ) ) { pdflink <- get.pdflink.jkan.or.kr( url ) } # url is from medicaljournals.se if( grepl( "medicaljournals.se", url ) ) { pdflink <- get.pdflink.medicaljournals.se( url ) } # url is from anesthesiology if( grepl( "anesthesiology", url ) ) { pdflink <- get.pdflink.anesthesiology( url ) } # url is from linkinghub if( grepl( "linkinghub", url ) ) { pdflink <- get.pdflink.linkinghub( url ) } # url contains 10.1038 (nature publishers) if( grepl( "doi.org/10.1038", url ) ) { pdflink <- get.pdflink.nature( url ) } # url conains 10.1089 (acm journal) if( grepl( "doi.org/10.1089", url ) ) { pdflink <- get.pdflink.acm( url ) } # url conains 10.1111 (acm journal) if( grepl( "doi.org/10.1111", url ) ) { pdflink <- get.pdflink.wiley( url ) } # url conains 10.1002 (acm journal) if( grepl( "doi.org/10.1002", url ) ) { pdflink <- get.pdflink.wiley( url ) } # url contains 10.1038 (springerlink) if( grepl( "doi.org/10.1007", url ) ) { pdflink <- get.pdflink.springerlink( url ) } # psychiatryonline if( grepl( "psychiatryonline", url ) ) { pdflink <- get.pdflink.psychiatryonline( url ) } ####################### # downoad pdf ####################### # write pdf to output if link is available if( ! is.na( pdflink ) ) { # download pdf (only if output is yet downloaded) download.file( url = pdflink, destfile = outpdf, mode = "wb", quiet = TRUE ) } } return( NA ) }, error=function(err) { #message(paste("URL does not seem to exist:", url)) #message("Here's the original error message:") message(paste( pmid, err, "\n" ) ) # Choose a return value in case of error return( paste( pmid, "URL does not seem to exist" ) ) }, warning=function(war) { #message(paste("URL caused a warning:", url)) #message("Here's the original warning message: ") message(paste( pmid, war, "\n" ) ) # Choose a return value in case of warning return( paste( pmid, "warning, test if downloaded" ) ) } #finally={ # NOTE: # Here goes everything that should be executed at the end, # regardless of success or error. # If you want more than one expression to be executed, then you # need to wrap them in curly brackets ({...}); otherwise you could # just have written 'finally=<expression>' #message(paste("Processed URL:", url)) #message("Some other message at the end") #} ) } ### # Get full text pdf link from psychiatryonline.org full text website. ## get.pdflink.psychiatryonline <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- ".show-pdf" # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) return( pdflink ) } ### # Get full text pdf link from springerlink full text website. ## get.pdflink.springerlink <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from nature full text website. ## get.pdflink.nature <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf css <- 'meta[name="citation_pdf_url"]' # save pdflink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) if( identical( pdflink, character(0) ) ) { css <- 'a[class="inline-block block-link pa10 pl0"]' intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) if( !identical( intermed1, character(0))) { pdflink <- paste0( "https://www.nature.com", intermed1[1] ) return( pdflink ) } } } ### # Get full text pdf link from acm full text website. ## get.pdflink.acm <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf css <- '.pdfprint a' # save pdflink intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) if( !identical( intermed, character(0) ) ) { pdflink <- paste0( "http://online.liebertpub.com", intermed ) return( pdflink ) } } ### # Get full text pdf link from wiley full text website. ## get.pdflink.wiley <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf css <- 'meta[name="citation_pdf_url"]' pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from wiley full text website. ## get.pdflink.wileyreal <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf css <- 'meta[name="citation_pdf_url"]' pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } get.pdflink.sciencedirect <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) css <- 'input[name="redirectURL"]' intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "value" ) intermed2 <- URLdecode(intermed1) page <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # xpath of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css = 'meta[name="citation_pdf_url"]' # get href to pdfLink intermed3 <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) pdflink <- paste0( "https://www.sciencedirect.com", intermed3 ) return( pdflink ) } ### # Get full text pdf link from springerlink full text website. ## get.pdflink.springerlink <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from medicaljournals.se full text website. ## get.pdflink.medicaljournals.se <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'li:nth-child(2) .btn-success2' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "https://www.medicaljournals.se", intermed1 ) return( pdflink ) } ### # Get full text pdf link from jkan.or.kr full text website. ## get.pdflink.jkan.or.kr <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '#portlet_content_Format li:nth-child(4) a' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "https://www.jkan.or.kr", intermed1 ) return( pdflink ) } ### # Get full text pdf link from karger full text website. ## get.pdflink.karger <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.btn-karger' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "https://www.karger.com", intermed1 ) return( pdflink ) } ### # Get full text pdf link from degruyter full text website. ## get.pdflink.degruyter <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.pdf-link' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "https://www.degruyter.com", intermed1 ) return( pdflink ) } ### # Get full text pdf link from biomedcentral full text website. ## get.pdflink.biomedcentral <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from wjgnet full text website. ## get.pdflink.wjgnet <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.left-articlenav li:nth-child(3) a' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) return( pdflink ) } ### # Get full text pdf link from thieme-connect full text website. ## get.pdflink.thieme <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '#articleTabs :nth-child(2) a' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) intermed2 <- paste0( "http://www.thieme-connect.com", intermed1 ) page2 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) css2 <- '#pdfLink' intermed3 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.thieme-connect.com", intermed3 ) return( pdflink ) } ### # Get full text pdf link from tandfonline full text website. ## get.pdflink.tandfonline <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.show-pdf' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.tandfonline.com", intermed1 ) return( pdflink ) } ### # Get full text pdf link from sjweh full text website. ## get.pdflink.sjweh <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.pdf-download' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.sjweh.fi/", intermed1 ) return( pdflink ) } ### # Get full text pdf link from painphysicianjournal full text website. ## get.pdflink.painphysicianjournal <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.row .float-right' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.painphysicianjournal.com", intermed1 ) return( pdflink ) } ### # Get full text pdf link from mdpi.com full text website. ## get.pdflink.mdpi.com <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from jospt full text website. ## get.pdflink.jospt <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'a[href^="/doi/pdf"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.jospt.org", intermed1[1] ) return( pdflink ) } ### # Get full text pdf link from jneurosci full text website. ## get.pdflink.jneurosci <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from jmir.org full text website. ## get.pdflink.jmir.org <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_abstract_pdf_url"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) css2 <- 'a[href^="http://www.jmir.org/article/download"]' pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" ) return( pdflink ) } ### # Get full text pdf link from jiaci full text website. ## get.pdflink.jiaci <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'li:nth-child(1) a:nth-child(2)' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.jiaci.org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from indianjcancer full text website. ## get.pdflink.indianjcancer <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) css2 <- 'a[href$=".pdf"]' intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.indianjcancer.com/", intermed2 ) return( pdflink ) } ### # Get full text pdf link from inaactamedica full text website. ## get.pdflink.inaactamedica <- function( url ) { # get href to pdfLink pdflink <- url return( pdflink ) } ### # Get full text pdf link from impactjournals full text website. ## get.pdflink.impactjournals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from ijo.in full text website. ## get.pdflink.ijo.in <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) page2 <- xml2::read_html( curl( intermed1[1], handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) css2 <- 'a[href$=".pdf"]' intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href") pdflink <- paste0( "http://www.ijo.in/", intermed2 ) return( pdflink ) } ### # Get full text pdf link from ijkd full text website. ## get.pdflink.ijkd <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'frame' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "src" ) page2 <- xml2::read_html( curl( intermed1[1], handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) css2 <- 'a[href^="http://www.ijkd"]' pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href") return( pdflink ) } ### # Get full text pdf link from healio full text website. ## get.pdflink.healio <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from hdbp full text website. ## get.pdflink.hdbp <- function( url ) { # get href to pdfLink pdflink <- url return( pdflink ) } ### # Get full text pdf link from haematologica full text website. ## get.pdflink.haematologica <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from europeanreview full text website. ## get.pdflink.europeanreview <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.right' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- sub( " http", "http", intermed1 ) return( pdflink ) } ### # Get full text pdf link from eje-online full text website. ## get.pdflink.ejeonline <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from educationforhealth full text website. ## get.pdflink.educationforhealth <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) css2 <- 'a[href$=".pdf"]' intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.educationforhealth.net/", intermed2) return( pdflink ) } ### # Get full text pdf link from ectrx full text website. ## get.pdflink.ectrx <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'b a' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.ectrx.org/forms/", intermed1) return( pdflink ) } ### # Get full text pdf link from e-cmh full text website. ## get.pdflink.ecmh <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="fulltext_pdf"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from dirjournal full text website. ## get.pdflink.dirjournal <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'a[href$=".pdf"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.dirjournal.org", intermed1[2] ) return( pdflink ) } ### # Get full text pdf link from danmedj full text website. ## get.pdflink.danmedj <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'a[href$=".pdf"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) return( pdflink ) } ### # Get full text pdf link from cmj.org full text website. ## get.pdflink.cmj.org <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) css2 <- 'p a:nth-child(1)' intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.cmj.org/", intermed2 ) return( pdflink ) } ### # Get full text pdf link from cmj.hr full text website. ## get.pdflink.cmj.hr <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'frame[src^="http"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "src" ) return( pdflink ) } ### # Get full text pdf link from cfp full text website. ## get.pdflink.cfp <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from canjsurg full text website. ## get.pdflink.canjsurg <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'p:nth-child(2) a:nth-child(2)' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) return( pdflink ) } ### # Get full text pdf link from bloodjournal full text website. ## get.pdflink.bloodjournal <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from birpublications full text website. ## get.pdflink.birpublications <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.show-pdf' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.birpublications.org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from atsjournals full text website. ## get.pdflink.atsjournals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.show-pdf' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.atsjournals.org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from annsaudimed full text website. ## get.pdflink.annsaudimed <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.desc' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) return( pdflink ) } ### # Get full text pdf link from annfammed.org full text website. ## get.pdflink.annfammed <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.full-text-pdf-view-link a' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) intermed2 <- sub( "\\+html", "", intermed1 ) pdflink <- paste0( "http://www.annfammed.org", intermed2 ) return( pdflink ) } ### # Get full text pdf link from ams.ac.ir full text website. ## get.pdflink.ams.ac.ir <- function( url ) { pdflink <- url return( pdflink ) } ### # Get full text pdf link from ajronline full text website. ## get.pdflink.ajronline <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '#refLinkList+ li .nowrap' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.ajronline.org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from ajcn full text website. ## get.pdflink.ajcn <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.full-text-pdf-view-link a' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) intermed2 <- sub( "\\+html", "", intermed1 ) pdflink <- paste0( "http://www.ajcn.org", intermed2 ) return( pdflink ) } ### # Get full text pdf link from aepress.sk full text website. ## get.pdflink.aepress <- function( url ) { pdflink <- url return( pdflink ) } ### # Get full text pdf link from alphamedpress full text website. ## get.pdflink.alphamedpress <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from thejns full text website. ## get.pdflink.thejns <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.article-tools li:nth-child(2)' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://thejns.org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from revistachirurgia full text website. ## get.pdflink.revistachirurgia <- function( url ) { pdflink <- url return( pdflink ) } ### # Get full text pdf link from rcjournal full text website. ## get.pdflink.rcjournal <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from rsna full text website. ## get.pdflink.rsna <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.tab-nav li:nth-child(6) a' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://pubs.rsna.org", intermed1) return( pdflink ) } ### # Get full text pdf link from rcseng.ac.uk full text website. ## get.pdflink.rcseng <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.tab-nav li:nth-child(4) a' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://publishing.rcseng.ac.uk", intermed1) return( pdflink ) } ### # Get full text pdf link from publisherspanel full text website. ## get.pdflink.publisherspanel <- function( url ) { pdflink <- url return( pdflink ) } ### # Get full text pdf link from aappublications full text website. ## get.pdflink.aappublications <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from pamw.pl full text website. ## get.pdflink.pamw <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'div[class="field-item even"] a' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- intermed1[1] return( pdflink ) } ### # Get full text pdf link from tokai.com full text website. ## get.pdflink.tokai <- function( url ) { pdflink <- url return( pdflink ) } ### # Get full text pdf link from umsha.ac.ir full text website. ## get.pdflink.umsha <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from aspet full text website. ## get.pdflink.aspet <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from waocp full text website. ## get.pdflink.waocp <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.pdf' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) intermed2 <- sub( "./", "", intermed1 ) pdflink <- paste0( "http://journal.waocp.org/", intermed2 ) return( pdflink ) } ### # Get full text pdf link from snmjournals full text website. ## get.pdflink.snmjournals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from jaoa.org full text website. ## get.pdflink.jaoa <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from clinicalandtranslationalinvestigation full text website. ## get.pdflink.clinicalandtranslationalinvestigation <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'a[href^="files/"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://clinicalandtranslationalinvestigation.com/", intermed1 ) return( pdflink ) } ### # Get full text pdf link from quintessenz full text website. ## get.pdflink.quintessenz <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'a[class="tocbut"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) link1 <- strsplit( url, ".de" ) pdflink <- paste0( link1[[1]][1], ".de/", intermed1 ) return( pdflink ) } ### # Get full text pdf link from sabinet.co.za full text website. ## get.pdflink.sabinet <- function( url ) { pdflink <- url return( pdflink ) } ### # Get full text pdf link from rcpsych full text website. ## get.pdflink.rcpsych <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'link[type="application/pdf"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) link1 <- strsplit( url, ".org" ) pdflink <- paste0( link1[[1]][1], ".org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from upol.cz full text website. ## get.pdflink.upol.cz <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from asahq.org full text website. ## get.pdflink.asahq <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- "#pdfLink" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" ) link1 <- strsplit( url, ".org" ) pdflink <- paste0( link1[[1]][1], ".org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from physiology full text website. ## get.pdflink.physiology <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'link[type="application/pdf"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) link1 <- strsplit( url, ".org" ) pdflink <- paste0( link1[[1]][1], ".org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from aota.org full text website. ## get.pdflink.aota <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from nutrition.org full text website. ## get.pdflink.nutrition <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- ".full-text-pdf-view-link a" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) link1 <- strsplit( url, ".org" ) intermed2 <- paste0( link1[[1]][1], ".org", intermed1 ) pdflink <- sub( "\\+html", "", intermed2) return( pdflink ) } ### # Get full text pdf link from tums.ac.ir full text website. ## get.pdflink.tums <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- "#sidebarRTArticleTools .file" # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) return( pdflink ) } ### # Get full text pdf link from arvojournals full text website. ## get.pdflink.arvojournals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- "#pdfLink" # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" ) pdflink <- paste0( "http://iovs.arvojournals.org/", pdflink ) return( pdflink ) } ### # Get full text pdf link from JAMA full text website. ## get.pdflink.jama <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- "#full-text-tab #pdf-link" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" ) link1 <- strsplit( url, ".com" ) pdflink <- paste0( link1[[1]][1], ".com", intermed1 ) return( pdflink ) } ### # Get full text pdf link from plos full text website. ## get.pdflink.plos <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- "#downloadPdf" # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://journals.plos.org", pdflink ) return( pdflink ) } ### # Get full text pdf link from bmj full text website. ## get.pdflink.bmj <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- "link[type='application/pdf']" # get href to pdfLink intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.bmj.com", intermed ) return( pdflink ) } ### # Get full text pdf link from nejm full text website. ## get.pdflink.nejm <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- "li a[href^='/doi/pdf']" # get href to pdfLink intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.nejm.org", intermed ) return( pdflink ) } ### # Get full text pdf link from academic.oup full text website. ## get.pdflink.acoup <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- ".al-link" # get href to pdfLink intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "https://academic.oup.com", intermed ) return( pdflink ) } ### # Get full text pdf link from annals full text website. ## get.pdflink.annals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- "#tagmasterPDF" # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" ) pdflink <- paste0( "https://www.annals.org", pdflink ) return( pdflink ) } ### # Get full text pdf link from cambridge full text website. ## get.pdflink.cambridge <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- ".download-types li:nth-child(1) a" # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "https://www.cambridge.org", pdflink[1] ) return( pdflink ) } ### # Get full text pdf link from OVID full text website. ## get.pdflink.ovid1 <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink # p1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) # p2 <- xml2::read_html( curl( p1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) p3 <- page %>% html_nodes( css = "script[type='text/javascript']") if ( grepl( "ovidFullTextUrlForButtons = ", p3[2]) ) { p4 <- p3[2] p5 <- gsub( ".*ovidFullTextUrlForButtons = \"|PubMed.*", "", p4 ) p6 <- paste0( p5, "PubMed" ) } page2 <- xml2::read_html( curl( p6, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ), options = "HUGE" ) pdflink <- page2 %>% html_nodes( css = "iframe" ) %>% html_attr( "src" ) #intermed2 <- paste0( "http://ovidsp.tx.ovid.com/", intermed1 ) #page3 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) #pdflink <- page3 %>% html_nodes( css = "iframe") %>% html_attr( "src" ) return( pdflink ) } ### # Get full text pdf link from OVID full text website. ## get.pdflink.ovid2 <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink p1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) if(identical(p1, character(0))){ p3 <- page %>% html_nodes( css = "script[type='text/javascript']") if ( grepl( "ovidFullTextUrlForButtons = ", p3[2]) ) { p4 <- p3[2] p5 <- gsub( ".*ovidFullTextUrlForButtons = \"|PubMed.*", "", p4 ) p6 <- paste0( p5, "PubMed" ) } page2 <- xml2::read_html( curl( p6, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ), options = "HUGE" ) pdflink <- page2 %>% html_nodes( css = "iframe" ) %>% html_attr( "src" ) }else{ p2 <- xml2::read_html( curl( p1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) p3 <- p2 %>% html_nodes( css = "script[type='text/javascript']") if ( grepl( "ovidFullTextUrlForButtons = ", p3[2]) ) { p4 <- p3[2] p5 <- gsub( ".*ovidFullTextUrlForButtons = \"|PubMed.*", "", p4 ) p6 <- paste0( p5, "PubMed" ) } page3 <- xml2::read_html( curl( p6, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ), options = "HUGE" ) intermed1 <- page3 %>% html_nodes( css = "#pdf" ) %>% html_attr( "href" ) intermed2 <- paste0( "http://ovidsp.tx.ovid.com/", intermed1 ) page4 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) pdflink <- page4 %>% html_nodes( css = "iframe") %>% html_attr( "src" ) } return( pdflink ) } ### # Get full text pdf link from EHP full text website. ## get.pdflink.ehp <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.pdf_icon' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "https://ehp.niehs.nih.gov", pdflink ) return( pdflink ) } ### # Get full text pdf link from Science Direct full text website. ## get.pdflink.sciencedirect <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # xpath of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css = ".pdf-download-btn-link" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) intermed2 <- paste0( "http://www.sciencedirect.com", intermed1 ) page2 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) css2 = 'meta[content^="0;URL"]' intermed3 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "content" ) pdflink <- strsplit(intermed3, "URL=")[[1]][2] return( pdflink ) } # for springerlink, retrieve the correct url get.pdflink.linkinghub <- function( url ) { # parse url further and get the specific node with the URL page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Mozilla/5.0" ) ) ) parsedfull <- htmlParse( page ) rootnode <- xmlRoot( parsedfull ) o <- getNodeSet( rootnode, "//input[@name='redirectURL']" )[[1]] # convert to character o2 <- capture.output(o) # extract URL from character string o3 <- data.frame( col = strsplit( o2, split = " " )[[1]] ) o4 <- separate( o3, col = "col", into = c("a", "b"), sep = "=", fill = "right" ) http <- o4[ o4$a == "value", "b" ] http <- gsub( "\"", "", http ) outurl <- URLdecode(http) # parse page page <- xml2::read_html( curl( outurl, handle = curl::new_handle( "useragent" = "Mozilla/5.0" ) ) ) # xpath of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css = 'meta[name="citation_pdf_url"]' # get href to pdfLink intermed3 <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) pdflink1 <- sub( "amp;", "", intermed3 ) page2 <- xml2::read_html( pdflink1 ) css2 = 'div a' pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" ) return( pdflink ) } ### # Get full text pdf link from scielo full text website. ## get.pdflink.scielo <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- "li:nth-child(2) a:nth-child(1)" # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.scielo.br", pdflink[1] ) return( pdflink ) } ### # Get full text pdf link from hyper.ahajournals full text website. ## get.pdflink.ahaj <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name=citation_pdf_url]' ".aha-icon-download" # get href to following page, then repeat the above steps pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) # page1 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css <- ".input-text-url input" # intermed2 <- page1 %>% html_nodes( css = css ) %>% html_attr( "value" ) # pdflink <- paste0( intermed2, ".full.pdf" ) return( pdflink ) } ### # Get full text pdf link from cmaj full text website. ## get.pdflink.cmaj <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- ".full-text-pdf-view-link a" # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.cmaj.ca", pdflink ) pdflink <- sub( "+html", "", pdflink) return( pdflink ) } ### # Get full text pdf link from doi.org (Wiley) full text website. ## get.pdflink.doiwiley <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) css2 <- "#pdfDocument" pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "src" ) return( pdflink ) } ### # Get full text pdf link from doi.org (bjs) full text website. ## get.pdflink.doibjs <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- ".js-infopane-epdf" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- sub( "epdf", "pdf", intermed1) return( pdflink ) } ### # Get full text pdf link from asm.org full text website. ## get.pdflink.asm <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # get href to pdfLink pdflink <- sub( "long", "full.pdf", url) return( pdflink ) } ### # Get full text pdf link from ajp... full text website. ## get.pdflink.ajp <- function( url ) { pdflink <- url return( pdflink ) } ### # Get full text pdf link from apsjournals full text website. ## get.pdflink.apsjournals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- "li:nth-child(2) .nowrap" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://apsjournals.apsnet.org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from arjournals full text website. ## get.pdflink.arjournals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- "a[href^='/doi/pdf']" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://arjournals.annualreviews.org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from ascopubs full text website. ## get.pdflink.ascopubs <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- ".show-pdf" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) intermed2 <- paste0( "http://ascopubs.org", intermed1 ) pdflink <- sub( "/pdf", "/pdfdirect", intermed2 ) return( pdflink ) } ### # Get full text pdf link from avmajournals full text website. ## get.pdflink.avma <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- ".article_link td:nth-child(2) .header4" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://avmajournals.avma.org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from bjgp full text website. ## get.pdflink.bjgp <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- "link[type='application/pdf']" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://bjgp.org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from boneandjoint full text website. ## get.pdflink.boneandjoint <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- "link[type='application/pdf']" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://bjj.boneandjoint.org.uk", intermed1 ) return( pdflink ) } ### # Get full text pdf link from aacrjournals full text website. ## get.pdflink.aacrjournals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- ".last .highwire-article-nav-jumplink" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) link1 <- strsplit(url, ".org") pdflink <- paste0( link1[[1]][1], ".org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from diabetesjournals full text website. ## get.pdflink.diabetesjournals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- "link[type='application/pdf']" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) link1 <- strsplit(url, ".org") pdflink <- paste0( link1[[1]][1], ".org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from asnjournals full text website. ## get.pdflink.asnjournals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- ".primary a" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) intermed2 <- sub( ".pdf\\+html", ".pdf", intermed1 ) link1 <- strsplit( url, ".org" ) pdflink <- paste0( link1[[1]][1], ".org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from ersjournals full text website. ## get.pdflink.ersjournals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- "link[type='application/pdf']" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) link1 <- strsplit( url, ".com" ) pdflink <- paste0( link1[[1]][1], ".com", intermed1 ) return( pdflink ) } ### # Get full text pdf link from gacetamedicade full text website. ## get.pdflink.gacetamedicade <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- ".col-sm-2 li:nth-child(1) a" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://gacetamedicademexico.com/", intermed1 ) return( pdflink ) } ### # Get full text pdf link from iiar full text website. ## get.pdflink.iiar <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- ".full-text-pdf-view-link a" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) link1 <- strsplit( url, ".org" ) intermed2 <- paste0( link1[[1]][1], ".org", intermed1 ) pdflink <- sub( "\\+html", "", intermed2) return( pdflink ) } ### # Get full text pdf link from anesthesiology full text website. ## get.pdflink.anesthesiology <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- "#pdfLink" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" ) link1 <- strsplit( url, ".org" ) pdflink <- paste0( link1[[1]][1], ".org", intermed1 ) return( pdflink ) } ################################### # END FUNCTIONS ################################### # output directory to store full text pdf outdir <- 'pdfNEW/pdfs2' # read data of missing pdfs missings <- read.csv2( "missingsWithURL.csv", stringsAsFactors = F ) head(missings) names(missings) <- c( "pmid", "url" ) min <- 10000 max <- 20000 # set progress bar progbar <- txtProgressBar( min = min, max = max, style = 3 ) # for every pmid, add url for( i in min:max ) { setTxtProgressBar( progbar, i ) # add url pp <- data.frame( pmid = missings$pmid[ i ], url = missings$url[ i ], stringsAsFactors = FALSE ) get.pdf( pmid = pp$pmid, url = pp$url ) } # quit R session q( save = "no" )
/scripts/obtainingPDFS/7_url.to.pdfdownloadRE2.R
permissive
wmotte/frrp
R
false
false
83,839
r
################################################################################ # Aim: Download full text pdfs, given PMID and url # # Contact: Herm Lamberink, h.j.lamberink@umcutrecht.nl # Date: 2018-03-19 ############################# .libPaths( c(.libPaths(), "/mnt/data/live02/stress/hlamberink/RLibrary" ) ) library( 'xml2' ) # used by rvest package library( 'rvest' ) # web scraping package library( "curl" ) library( "XML" ) library( "pbapply" ) # power bar during sapply library( 'plyr' ); library( 'dplyr' ) library( 'tidyr' ) ################################### # FUNCTIONS ################################### ### # Get pdf from given pmid ## get.pdf <- function( pmid, url, outdr = outdir ) { # prevent the function from shutting down due to an error v <- tryCatch( { # output pdf outpdf <- paste0( outdr, '/', pmid, '.pdf' ) if( ! file.exists( outpdf ) ) { # set empty pdflink pdflink <- NA ####################### # pdflink per publisher ####################### # url is from arvojournals if( grepl( "arvojournals", url ) ) { # url to pdf pdflink <- get.pdflink.arvojournals( url ) } # url is from JAMA if( grepl( "jamanetwork.com", url ) ) { # url to pdf pdflink <- get.pdflink.jama( url ) } # url is from PLOS if( grepl( "dx.plos", url ) ) { # url to pdf pdflink <- get.pdflink.plos( url ) } # url is from EHP if( grepl( "/EHP", url ) ) { pdflink <- get.pdflink.ehp( url ) } # url is from doi/bjs if( grepl( "/bjs", url ) ) { pdflink <- get.pdflink.doibjs( url ) } # url is from Wiley, via doi.org #if( grepl( "dx.doi.org", url ) ) #{ # pdflink <- get.pdflink.doiwiley( url ) #} # url is from wiley if( grepl( "wiley.com", url ) ) { pdflink <- get.pdflink.wileyreal( url ) } # url is from bmj if( grepl( "bmj.com", url ) ) { pdflink <- get.pdflink.bmj( url ) } # url is from cmaj if( grepl( "cmaj.ca", url ) ) { pdflink <- get.pdflink.cmaj( url ) } # url is from nejm if( grepl( "nejm.org", url ) ) { pdflink <- get.pdflink.nejm( url ) } # url is from scielo if( grepl( "scielo.br", url ) ) { pdflink <- get.pdflink.scielo( url ) } # url is from academic.oup if( grepl( "academic.oup", url ) ) { pdflink <- get.pdflink.acoup( url ) } # url is from annals if( grepl( "annals", url ) ) { pdflink <- get.pdflink.annals( url ) } # url is from cambridge if( grepl( "cambridge.org", url ) ) { pdflink <- get.pdflink.cambridge( url ) } # url is from OVID if( grepl( "Insights.ovid", url ) ) { # url to pdf pdflink <- get.pdflink.ovid1( url ) if( length(pdflink) == 0 ) pdflink <- get.pdflink.ovid2( url ) } # url is from iiar if( grepl( "iiar", url ) ) { pdflink <- get.pdflink.iiar( url ) } # url is from ahajournals if( grepl( "ahajournals", url ) ) { pdflink <- get.pdflink.ahaj( url ) } # url is from sciencedirect if( grepl( "sciencedirect.com", url ) ) { pdflink <- get.pdflink.sciencedirect( url ) } # url is from asm if( grepl( "asm", url ) ) { pdflink <- get.pdflink.asm( url ) } # url is from ajp if( grepl( "ajp", url ) ) { pdflink <- get.pdflink.ajp } # url is from apsjournals if( grepl( "apsjournals", url ) ) { pdflink <- get.pdflink.apsjournals( url ) } # url is from arjournals if( grepl( "arjournals", url ) ) { pdflink <- get.pdflink.arjournals( url ) } # url is from ascopubs if( grepl( "ascopubs", url ) ) { pdflink <- get.pdflink.ascopubs( url ) } # url is from avmajournals if( grepl( "avmajournals", url ) ) { pdflink <- get.pdflink.avma( url ) } # url is from bjgp if( grepl( "bjgp", url ) ) { pdflink <- get.pdflink.bjgp( url ) } # url is from boneandjoint if( grepl( "boneandjoint", url ) ) { pdflink <- get.pdflink.boneandjoint( url ) } # url is from aacrjournals if( grepl( "aacrjournals", url ) ) { pdflink <- get.pdflink.aacrjournals( url ) } # url is from diabetesjournals if( grepl( "diabetesjournals", url ) ) { pdflink <- get.pdflink.diabetesjournals( url ) } # url is from asnjournals if( grepl( "asnjournals", url ) ) { pdflink <- get.pdflink.asnjournals( url ) } # url is from ersjournals if( grepl( "ersjournals", url ) ) { pdflink <- get.pdflink.ersjournals( url ) } # url is from gacetamedicade if( grepl( "gacetamedicade", url ) ) { pdflink <- get.pdflink.gacetamedicade( url ) } # url is from tums.ac.ir if( grepl( "tums.ac.ir", url ) ) { pdflink <- get.pdflink.tums( url ) } # url is from nutrition.org if( grepl( "nutrition.org", url ) ) { pdflink <- get.pdflink.nutrition( url ) } # url is from aota.org if( grepl( "aota.org", url ) ) { pdflink <- get.pdflink.aota( url ) } # url is from physiology.org if( grepl( "physiology.org", url ) ) { pdflink <- get.pdflink.physiology( url ) } # url is from asahq.org if( grepl( "asahq.org", url ) ) { pdflink <- get.pdflink.asahq( url ) } # url is from upol.cz if( grepl( "upol.cz", url ) ) { pdflink <- get.pdflink.upol.cz( url ) } # url is from rcpsych if( grepl( "rcpsych.org", url ) ) { pdflink <- get.pdflink.rcpsych( url ) } # url is from sabinet.co.za if( grepl( "sabinet.co.za", url ) ) { pdflink <- get.pdflink.sabinet( url ) } # url is from quintessenz if( grepl( "quintessenz", url ) ) { pdflink <- get.pdflink.quintessenz( url ) } # url is from clinicalandtranslationalinvestigation if( grepl( "clinicalandtranslationalinvestigation", url ) ) { pdflink <- get.pdflink.clinicalandtranslationalinvestigation( url ) } # url is from jaoa.org if( grepl( "jaoa.org", url ) ) { pdflink <- get.pdflink.jaoa( url ) } # url is from snmjournals if( grepl( "snmjournals", url ) ) { pdflink <- get.pdflink.snmjournals( url ) } # url is from umsha.ac.ir if( grepl( "umsha" , url ) ) { pdflink <- get.pdflink.umsha( url ) } # url is from tokai if( grepl( "tokai" , url ) ) { pdflink <- get.pdflink.tokai( url ) } # url is from pamw.pl if( grepl( "pamw.pl", url ) ) { pdflink <- get.pdflink.pamw( url ) } # url is from aappublications if( grepl( "aappublications", url ) ) { pdflink <- get.pdflink.aappublications( url ) } # url is from publisherspanel if( grepl( "publisherspanel", url ) ) { pdflink <- get.pdflink.publisherspanel( url ) } # url is from rcseng if( grepl( "rcseng", url ) ) { pdflink <- get.pdflink.rcseng( url ) } # url is from rsna if( grepl( "rsna", url ) ) { pdflink <- get.pdflink.rsna( url ) } # url is from rcjournal if( grepl( "rcjournal", url ) ) { pdflink <- get.pdflink.rcjournal( url ) } # url is from revistachirurgia if( grepl( "revistachirurgia", url ) ) { pdflink <- get.pdflink.revistachirurgia( url ) } # url is from thejns if( grepl( "thejns", url ) ) { pdflink <- get.pdflink.thejns( url ) } # url is from alphamedpress if( grepl( "alphamedpress", url ) ) { pdflink <- get.pdflink.alphamedpress( url ) } # url is from aepress if( grepl( "aepress", url ) ) { pdflink <- get.pdflink.aepress( url ) } # url is from ajronline if( grepl( "ajronline", url ) ) { pdflink <- get.pdflink.ajronline( url ) } # url is from ajcn if( grepl( "ajcn", url ) ) { pdflink <- get.pdflink.ajcn( url ) } # url is from ams.ac.ir if( grepl( "ams.ac.ir", url ) ) { pdflink <- get.pdflink.ams.ac.ir( url ) } # url is from annfammed if( grepl( "annfammed", url ) ) { pdflink <- get.pdflink.annfammed( url ) } # url is from annsaudimed if( grepl( "annsaudimed", url ) ) { pdflink <- get.pdflink.annsaudimed( url ) } # url is from atsjournals if( grepl( "atsjournals", url ) ) { pdflink <- get.pdflink.atsjournals( url ) } # url is from birpublications if( grepl( "birpublications", url ) ) { pdflink <- get.pdflink.birpublications( url ) } # url is from bloodjournal if( grepl( "bloodjournal", url ) ) { pdflink <- get.pdflink.bloodjournal( url ) } # url is from cfp if( grepl( "cfp.org", url ) ) { pdflink <- get.pdflink.cfp( url ) } # url is from cmj.hr if( grepl( "cmj.hr", url ) ) { pdflink <- get.pdflink.cmj.hr( url ) } # url is from cmj.org if( grepl( "cmj.org", url ) ) { pdflink <- get.pdflink.cmj.org( url ) } # url is from danmedj if( grepl( "danmedj", url ) ) { pdflink <- get.pdflink.danmedj( url ) } # url is from dirjournal if( grepl( "dirjournal", url ) ) { pdflink <- get.pdflink.dirjournal( url ) } # url is from e-cmh if( grepl( "e-cmh", url ) ) { pdflink <- get.pdflink.ecmh( url ) } # url is from ectrx if( grepl( "ectrx", url ) ) { pdflink <- get.pdflink.ectrx( url ) } # url is from educationforhealth if( grepl( "educationforhealth", url ) ) { pdflink <- get.pdflink.educationforhealth( url ) } # url is from eje-online if( grepl( "eje-online", url ) ) { pdflink <- get.pdflink.ejeonline( url ) } # url is from europeanreview if( grepl( "europeanreview", url ) ) { pdflink <- get.pdflink.europeanreview( url ) } # url is from haematologica if( grepl( "haematologica", url ) ) { pdflink <- get.pdflink.haematologica( url ) } # url is from hdbp if( grepl( "hdbp", url ) ) { pdflink <- get.pdflink.hdbp( url ) } # url is from healio if( grepl( "healio", url ) ) { pdflink <- get.pdflink.healio( url ) } # url is from ijkd if( grepl( "ijkd", url ) ) { pdflink <- get.pdflink.ijkd( url ) } # url is from ijo.in if( grepl( "ijo.in", url ) ) { pdflink <- get.pdflink.ijo.in( url ) } # url is from impactjournals if( grepl( "impactjournals", url ) ) { pdflink <- get.pdflink.impactjournals( url ) } # url is from inaactamedica if( grepl( "inaactamedica", url ) ) { pdflink <- get.pdflink.inaactamedica( url ) } # url is from indianjcancer if( grepl( "indianjcancer", url ) ) { pdflink <- get.pdflink.indianjcancer( url ) } # url is from intbrazjurol if( grepl( "intbrazjurol", url ) ) { pdflink <- url } # url is from jiaci if( grepl( "jiaci", url ) ) { pdflink <- get.pdflink.jiaci( url ) } # url is from jmir if( grepl( "jmir", url ) ) { pdflink <- get.pdflink.jmir( url ) } # url is from jneurosci if( grepl( "jneurosci", url ) ) { pdflink <- get.pdflink.jneurosci( url ) } # url is from jospt if( grepl( "jospt", url ) ) { pdflink <- get.pdflink.jospt( url ) } # url is from mdpi.com if( grepl( "mdpi.com", url ) ) { pdflink <- get.pdflink.mdpi.com( url ) } # url is from painphysicianjournal if( grepl( "painphysicianjournal", url ) ) { pdflink <- get.pdflink.painphysicianjournal( url ) } # url is from sjweh if( grepl( "sjweh", url ) ) { pdflink <- get.pdflink.sjweh( url ) } # url is from tandfonline if( grepl( "tandfonline", url ) ) { pdflink <- get.pdflink.tandfonline( url ) } # url is from thieme-connect if( grepl( "thieme-connect", url ) ) { pdflink <- get.pdflink.thieme( url ) } # url is from wjgnet if( grepl( "wjgnet", url ) ) { pdflink <- get.pdflink.wjgnet( url ) } # url is from degruyter if( grepl( "degruyter", url ) ) { pdflink <- get.pdflink.degruyter( url ) } # url is from biomedcentral if( grepl( "biomedcentral", url ) ) { pdflink <- get.pdflink.biomedcentral( url ) } # url is from karger if( grepl( "karger", url ) ) { pdflink <- get.pdflink.karger( url ) } # url is from jkan.or.kr if( grepl( "jkan.or.kr", url ) ) { pdflink <- get.pdflink.jkan.or.kr( url ) } # url is from medicaljournals.se if( grepl( "medicaljournals.se", url ) ) { pdflink <- get.pdflink.medicaljournals.se( url ) } # url is from anesthesiology if( grepl( "anesthesiology", url ) ) { pdflink <- get.pdflink.anesthesiology( url ) } # url is from linkinghub if( grepl( "linkinghub", url ) ) { pdflink <- get.pdflink.linkinghub( url ) } # url contains 10.1038 (nature publishers) if( grepl( "doi.org/10.1038", url ) ) { pdflink <- get.pdflink.nature( url ) } # url conains 10.1089 (acm journal) if( grepl( "doi.org/10.1089", url ) ) { pdflink <- get.pdflink.acm( url ) } # url conains 10.1111 (acm journal) if( grepl( "doi.org/10.1111", url ) ) { pdflink <- get.pdflink.wiley( url ) } # url conains 10.1002 (acm journal) if( grepl( "doi.org/10.1002", url ) ) { pdflink <- get.pdflink.wiley( url ) } # url contains 10.1038 (springerlink) if( grepl( "doi.org/10.1007", url ) ) { pdflink <- get.pdflink.springerlink( url ) } # psychiatryonline if( grepl( "psychiatryonline", url ) ) { pdflink <- get.pdflink.psychiatryonline( url ) } ####################### # downoad pdf ####################### # write pdf to output if link is available if( ! is.na( pdflink ) ) { # download pdf (only if output is yet downloaded) download.file( url = pdflink, destfile = outpdf, mode = "wb", quiet = TRUE ) } } return( NA ) }, error=function(err) { #message(paste("URL does not seem to exist:", url)) #message("Here's the original error message:") message(paste( pmid, err, "\n" ) ) # Choose a return value in case of error return( paste( pmid, "URL does not seem to exist" ) ) }, warning=function(war) { #message(paste("URL caused a warning:", url)) #message("Here's the original warning message: ") message(paste( pmid, war, "\n" ) ) # Choose a return value in case of warning return( paste( pmid, "warning, test if downloaded" ) ) } #finally={ # NOTE: # Here goes everything that should be executed at the end, # regardless of success or error. # If you want more than one expression to be executed, then you # need to wrap them in curly brackets ({...}); otherwise you could # just have written 'finally=<expression>' #message(paste("Processed URL:", url)) #message("Some other message at the end") #} ) } ### # Get full text pdf link from psychiatryonline.org full text website. ## get.pdflink.psychiatryonline <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- ".show-pdf" # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) return( pdflink ) } ### # Get full text pdf link from springerlink full text website. ## get.pdflink.springerlink <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from nature full text website. ## get.pdflink.nature <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf css <- 'meta[name="citation_pdf_url"]' # save pdflink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) if( identical( pdflink, character(0) ) ) { css <- 'a[class="inline-block block-link pa10 pl0"]' intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) if( !identical( intermed1, character(0))) { pdflink <- paste0( "https://www.nature.com", intermed1[1] ) return( pdflink ) } } } ### # Get full text pdf link from acm full text website. ## get.pdflink.acm <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf css <- '.pdfprint a' # save pdflink intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) if( !identical( intermed, character(0) ) ) { pdflink <- paste0( "http://online.liebertpub.com", intermed ) return( pdflink ) } } ### # Get full text pdf link from wiley full text website. ## get.pdflink.wiley <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf css <- 'meta[name="citation_pdf_url"]' pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from wiley full text website. ## get.pdflink.wileyreal <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf css <- 'meta[name="citation_pdf_url"]' pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } get.pdflink.sciencedirect <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) css <- 'input[name="redirectURL"]' intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "value" ) intermed2 <- URLdecode(intermed1) page <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # xpath of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css = 'meta[name="citation_pdf_url"]' # get href to pdfLink intermed3 <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) pdflink <- paste0( "https://www.sciencedirect.com", intermed3 ) return( pdflink ) } ### # Get full text pdf link from springerlink full text website. ## get.pdflink.springerlink <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from medicaljournals.se full text website. ## get.pdflink.medicaljournals.se <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'li:nth-child(2) .btn-success2' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "https://www.medicaljournals.se", intermed1 ) return( pdflink ) } ### # Get full text pdf link from jkan.or.kr full text website. ## get.pdflink.jkan.or.kr <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '#portlet_content_Format li:nth-child(4) a' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "https://www.jkan.or.kr", intermed1 ) return( pdflink ) } ### # Get full text pdf link from karger full text website. ## get.pdflink.karger <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.btn-karger' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "https://www.karger.com", intermed1 ) return( pdflink ) } ### # Get full text pdf link from degruyter full text website. ## get.pdflink.degruyter <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.pdf-link' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "https://www.degruyter.com", intermed1 ) return( pdflink ) } ### # Get full text pdf link from biomedcentral full text website. ## get.pdflink.biomedcentral <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from wjgnet full text website. ## get.pdflink.wjgnet <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.left-articlenav li:nth-child(3) a' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) return( pdflink ) } ### # Get full text pdf link from thieme-connect full text website. ## get.pdflink.thieme <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '#articleTabs :nth-child(2) a' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) intermed2 <- paste0( "http://www.thieme-connect.com", intermed1 ) page2 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) css2 <- '#pdfLink' intermed3 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.thieme-connect.com", intermed3 ) return( pdflink ) } ### # Get full text pdf link from tandfonline full text website. ## get.pdflink.tandfonline <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.show-pdf' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.tandfonline.com", intermed1 ) return( pdflink ) } ### # Get full text pdf link from sjweh full text website. ## get.pdflink.sjweh <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.pdf-download' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.sjweh.fi/", intermed1 ) return( pdflink ) } ### # Get full text pdf link from painphysicianjournal full text website. ## get.pdflink.painphysicianjournal <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.row .float-right' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.painphysicianjournal.com", intermed1 ) return( pdflink ) } ### # Get full text pdf link from mdpi.com full text website. ## get.pdflink.mdpi.com <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from jospt full text website. ## get.pdflink.jospt <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'a[href^="/doi/pdf"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.jospt.org", intermed1[1] ) return( pdflink ) } ### # Get full text pdf link from jneurosci full text website. ## get.pdflink.jneurosci <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from jmir.org full text website. ## get.pdflink.jmir.org <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_abstract_pdf_url"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) css2 <- 'a[href^="http://www.jmir.org/article/download"]' pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" ) return( pdflink ) } ### # Get full text pdf link from jiaci full text website. ## get.pdflink.jiaci <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'li:nth-child(1) a:nth-child(2)' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.jiaci.org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from indianjcancer full text website. ## get.pdflink.indianjcancer <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) css2 <- 'a[href$=".pdf"]' intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.indianjcancer.com/", intermed2 ) return( pdflink ) } ### # Get full text pdf link from inaactamedica full text website. ## get.pdflink.inaactamedica <- function( url ) { # get href to pdfLink pdflink <- url return( pdflink ) } ### # Get full text pdf link from impactjournals full text website. ## get.pdflink.impactjournals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from ijo.in full text website. ## get.pdflink.ijo.in <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) page2 <- xml2::read_html( curl( intermed1[1], handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) css2 <- 'a[href$=".pdf"]' intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href") pdflink <- paste0( "http://www.ijo.in/", intermed2 ) return( pdflink ) } ### # Get full text pdf link from ijkd full text website. ## get.pdflink.ijkd <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'frame' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "src" ) page2 <- xml2::read_html( curl( intermed1[1], handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) css2 <- 'a[href^="http://www.ijkd"]' pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href") return( pdflink ) } ### # Get full text pdf link from healio full text website. ## get.pdflink.healio <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from hdbp full text website. ## get.pdflink.hdbp <- function( url ) { # get href to pdfLink pdflink <- url return( pdflink ) } ### # Get full text pdf link from haematologica full text website. ## get.pdflink.haematologica <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from europeanreview full text website. ## get.pdflink.europeanreview <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.right' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- sub( " http", "http", intermed1 ) return( pdflink ) } ### # Get full text pdf link from eje-online full text website. ## get.pdflink.ejeonline <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from educationforhealth full text website. ## get.pdflink.educationforhealth <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) css2 <- 'a[href$=".pdf"]' intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.educationforhealth.net/", intermed2) return( pdflink ) } ### # Get full text pdf link from ectrx full text website. ## get.pdflink.ectrx <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'b a' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.ectrx.org/forms/", intermed1) return( pdflink ) } ### # Get full text pdf link from e-cmh full text website. ## get.pdflink.ecmh <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="fulltext_pdf"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from dirjournal full text website. ## get.pdflink.dirjournal <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'a[href$=".pdf"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.dirjournal.org", intermed1[2] ) return( pdflink ) } ### # Get full text pdf link from danmedj full text website. ## get.pdflink.danmedj <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'a[href$=".pdf"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) return( pdflink ) } ### # Get full text pdf link from cmj.org full text website. ## get.pdflink.cmj.org <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) css2 <- 'p a:nth-child(1)' intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.cmj.org/", intermed2 ) return( pdflink ) } ### # Get full text pdf link from cmj.hr full text website. ## get.pdflink.cmj.hr <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'frame[src^="http"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "src" ) return( pdflink ) } ### # Get full text pdf link from cfp full text website. ## get.pdflink.cfp <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from canjsurg full text website. ## get.pdflink.canjsurg <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'p:nth-child(2) a:nth-child(2)' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) return( pdflink ) } ### # Get full text pdf link from bloodjournal full text website. ## get.pdflink.bloodjournal <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from birpublications full text website. ## get.pdflink.birpublications <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.show-pdf' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.birpublications.org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from atsjournals full text website. ## get.pdflink.atsjournals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.show-pdf' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.atsjournals.org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from annsaudimed full text website. ## get.pdflink.annsaudimed <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.desc' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) return( pdflink ) } ### # Get full text pdf link from annfammed.org full text website. ## get.pdflink.annfammed <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.full-text-pdf-view-link a' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) intermed2 <- sub( "\\+html", "", intermed1 ) pdflink <- paste0( "http://www.annfammed.org", intermed2 ) return( pdflink ) } ### # Get full text pdf link from ams.ac.ir full text website. ## get.pdflink.ams.ac.ir <- function( url ) { pdflink <- url return( pdflink ) } ### # Get full text pdf link from ajronline full text website. ## get.pdflink.ajronline <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '#refLinkList+ li .nowrap' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.ajronline.org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from ajcn full text website. ## get.pdflink.ajcn <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.full-text-pdf-view-link a' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) intermed2 <- sub( "\\+html", "", intermed1 ) pdflink <- paste0( "http://www.ajcn.org", intermed2 ) return( pdflink ) } ### # Get full text pdf link from aepress.sk full text website. ## get.pdflink.aepress <- function( url ) { pdflink <- url return( pdflink ) } ### # Get full text pdf link from alphamedpress full text website. ## get.pdflink.alphamedpress <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from thejns full text website. ## get.pdflink.thejns <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.article-tools li:nth-child(2)' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://thejns.org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from revistachirurgia full text website. ## get.pdflink.revistachirurgia <- function( url ) { pdflink <- url return( pdflink ) } ### # Get full text pdf link from rcjournal full text website. ## get.pdflink.rcjournal <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from rsna full text website. ## get.pdflink.rsna <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.tab-nav li:nth-child(6) a' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://pubs.rsna.org", intermed1) return( pdflink ) } ### # Get full text pdf link from rcseng.ac.uk full text website. ## get.pdflink.rcseng <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.tab-nav li:nth-child(4) a' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://publishing.rcseng.ac.uk", intermed1) return( pdflink ) } ### # Get full text pdf link from publisherspanel full text website. ## get.pdflink.publisherspanel <- function( url ) { pdflink <- url return( pdflink ) } ### # Get full text pdf link from aappublications full text website. ## get.pdflink.aappublications <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from pamw.pl full text website. ## get.pdflink.pamw <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'div[class="field-item even"] a' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- intermed1[1] return( pdflink ) } ### # Get full text pdf link from tokai.com full text website. ## get.pdflink.tokai <- function( url ) { pdflink <- url return( pdflink ) } ### # Get full text pdf link from umsha.ac.ir full text website. ## get.pdflink.umsha <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from aspet full text website. ## get.pdflink.aspet <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from waocp full text website. ## get.pdflink.waocp <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.pdf' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) intermed2 <- sub( "./", "", intermed1 ) pdflink <- paste0( "http://journal.waocp.org/", intermed2 ) return( pdflink ) } ### # Get full text pdf link from snmjournals full text website. ## get.pdflink.snmjournals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from jaoa.org full text website. ## get.pdflink.jaoa <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from clinicalandtranslationalinvestigation full text website. ## get.pdflink.clinicalandtranslationalinvestigation <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'a[href^="files/"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://clinicalandtranslationalinvestigation.com/", intermed1 ) return( pdflink ) } ### # Get full text pdf link from quintessenz full text website. ## get.pdflink.quintessenz <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'a[class="tocbut"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) link1 <- strsplit( url, ".de" ) pdflink <- paste0( link1[[1]][1], ".de/", intermed1 ) return( pdflink ) } ### # Get full text pdf link from sabinet.co.za full text website. ## get.pdflink.sabinet <- function( url ) { pdflink <- url return( pdflink ) } ### # Get full text pdf link from rcpsych full text website. ## get.pdflink.rcpsych <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'link[type="application/pdf"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) link1 <- strsplit( url, ".org" ) pdflink <- paste0( link1[[1]][1], ".org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from upol.cz full text website. ## get.pdflink.upol.cz <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from asahq.org full text website. ## get.pdflink.asahq <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- "#pdfLink" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" ) link1 <- strsplit( url, ".org" ) pdflink <- paste0( link1[[1]][1], ".org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from physiology full text website. ## get.pdflink.physiology <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'link[type="application/pdf"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) link1 <- strsplit( url, ".org" ) pdflink <- paste0( link1[[1]][1], ".org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from aota.org full text website. ## get.pdflink.aota <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) return( pdflink ) } ### # Get full text pdf link from nutrition.org full text website. ## get.pdflink.nutrition <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- ".full-text-pdf-view-link a" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) link1 <- strsplit( url, ".org" ) intermed2 <- paste0( link1[[1]][1], ".org", intermed1 ) pdflink <- sub( "\\+html", "", intermed2) return( pdflink ) } ### # Get full text pdf link from tums.ac.ir full text website. ## get.pdflink.tums <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- "#sidebarRTArticleTools .file" # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) return( pdflink ) } ### # Get full text pdf link from arvojournals full text website. ## get.pdflink.arvojournals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- "#pdfLink" # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" ) pdflink <- paste0( "http://iovs.arvojournals.org/", pdflink ) return( pdflink ) } ### # Get full text pdf link from JAMA full text website. ## get.pdflink.jama <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- "#full-text-tab #pdf-link" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" ) link1 <- strsplit( url, ".com" ) pdflink <- paste0( link1[[1]][1], ".com", intermed1 ) return( pdflink ) } ### # Get full text pdf link from plos full text website. ## get.pdflink.plos <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- "#downloadPdf" # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://journals.plos.org", pdflink ) return( pdflink ) } ### # Get full text pdf link from bmj full text website. ## get.pdflink.bmj <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- "link[type='application/pdf']" # get href to pdfLink intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.bmj.com", intermed ) return( pdflink ) } ### # Get full text pdf link from nejm full text website. ## get.pdflink.nejm <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- "li a[href^='/doi/pdf']" # get href to pdfLink intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.nejm.org", intermed ) return( pdflink ) } ### # Get full text pdf link from academic.oup full text website. ## get.pdflink.acoup <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- ".al-link" # get href to pdfLink intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "https://academic.oup.com", intermed ) return( pdflink ) } ### # Get full text pdf link from annals full text website. ## get.pdflink.annals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- "#tagmasterPDF" # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" ) pdflink <- paste0( "https://www.annals.org", pdflink ) return( pdflink ) } ### # Get full text pdf link from cambridge full text website. ## get.pdflink.cambridge <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- ".download-types li:nth-child(1) a" # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "https://www.cambridge.org", pdflink[1] ) return( pdflink ) } ### # Get full text pdf link from OVID full text website. ## get.pdflink.ovid1 <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink # p1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) # p2 <- xml2::read_html( curl( p1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) p3 <- page %>% html_nodes( css = "script[type='text/javascript']") if ( grepl( "ovidFullTextUrlForButtons = ", p3[2]) ) { p4 <- p3[2] p5 <- gsub( ".*ovidFullTextUrlForButtons = \"|PubMed.*", "", p4 ) p6 <- paste0( p5, "PubMed" ) } page2 <- xml2::read_html( curl( p6, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ), options = "HUGE" ) pdflink <- page2 %>% html_nodes( css = "iframe" ) %>% html_attr( "src" ) #intermed2 <- paste0( "http://ovidsp.tx.ovid.com/", intermed1 ) #page3 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) #pdflink <- page3 %>% html_nodes( css = "iframe") %>% html_attr( "src" ) return( pdflink ) } ### # Get full text pdf link from OVID full text website. ## get.pdflink.ovid2 <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink p1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) if(identical(p1, character(0))){ p3 <- page %>% html_nodes( css = "script[type='text/javascript']") if ( grepl( "ovidFullTextUrlForButtons = ", p3[2]) ) { p4 <- p3[2] p5 <- gsub( ".*ovidFullTextUrlForButtons = \"|PubMed.*", "", p4 ) p6 <- paste0( p5, "PubMed" ) } page2 <- xml2::read_html( curl( p6, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ), options = "HUGE" ) pdflink <- page2 %>% html_nodes( css = "iframe" ) %>% html_attr( "src" ) }else{ p2 <- xml2::read_html( curl( p1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) p3 <- p2 %>% html_nodes( css = "script[type='text/javascript']") if ( grepl( "ovidFullTextUrlForButtons = ", p3[2]) ) { p4 <- p3[2] p5 <- gsub( ".*ovidFullTextUrlForButtons = \"|PubMed.*", "", p4 ) p6 <- paste0( p5, "PubMed" ) } page3 <- xml2::read_html( curl( p6, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ), options = "HUGE" ) intermed1 <- page3 %>% html_nodes( css = "#pdf" ) %>% html_attr( "href" ) intermed2 <- paste0( "http://ovidsp.tx.ovid.com/", intermed1 ) page4 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) pdflink <- page4 %>% html_nodes( css = "iframe") %>% html_attr( "src" ) } return( pdflink ) } ### # Get full text pdf link from EHP full text website. ## get.pdflink.ehp <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- '.pdf_icon' # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "https://ehp.niehs.nih.gov", pdflink ) return( pdflink ) } ### # Get full text pdf link from Science Direct full text website. ## get.pdflink.sciencedirect <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # xpath of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css = ".pdf-download-btn-link" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) intermed2 <- paste0( "http://www.sciencedirect.com", intermed1 ) page2 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) css2 = 'meta[content^="0;URL"]' intermed3 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "content" ) pdflink <- strsplit(intermed3, "URL=")[[1]][2] return( pdflink ) } # for springerlink, retrieve the correct url get.pdflink.linkinghub <- function( url ) { # parse url further and get the specific node with the URL page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Mozilla/5.0" ) ) ) parsedfull <- htmlParse( page ) rootnode <- xmlRoot( parsedfull ) o <- getNodeSet( rootnode, "//input[@name='redirectURL']" )[[1]] # convert to character o2 <- capture.output(o) # extract URL from character string o3 <- data.frame( col = strsplit( o2, split = " " )[[1]] ) o4 <- separate( o3, col = "col", into = c("a", "b"), sep = "=", fill = "right" ) http <- o4[ o4$a == "value", "b" ] http <- gsub( "\"", "", http ) outurl <- URLdecode(http) # parse page page <- xml2::read_html( curl( outurl, handle = curl::new_handle( "useragent" = "Mozilla/5.0" ) ) ) # xpath of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css = 'meta[name="citation_pdf_url"]' # get href to pdfLink intermed3 <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) pdflink1 <- sub( "amp;", "", intermed3 ) page2 <- xml2::read_html( pdflink1 ) css2 = 'div a' pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" ) return( pdflink ) } ### # Get full text pdf link from scielo full text website. ## get.pdflink.scielo <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- "li:nth-child(2) a:nth-child(1)" # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.scielo.br", pdflink[1] ) return( pdflink ) } ### # Get full text pdf link from hyper.ahajournals full text website. ## get.pdflink.ahaj <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- 'meta[name=citation_pdf_url]' ".aha-icon-download" # get href to following page, then repeat the above steps pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) # page1 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css <- ".input-text-url input" # intermed2 <- page1 %>% html_nodes( css = css ) %>% html_attr( "value" ) # pdflink <- paste0( intermed2, ".full.pdf" ) return( pdflink ) } ### # Get full text pdf link from cmaj full text website. ## get.pdflink.cmaj <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- ".full-text-pdf-view-link a" # get href to pdfLink pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://www.cmaj.ca", pdflink ) pdflink <- sub( "+html", "", pdflink) return( pdflink ) } ### # Get full text pdf link from doi.org (Wiley) full text website. ## get.pdflink.doiwiley <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- 'meta[name="citation_pdf_url"]' # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" ) page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) css2 <- "#pdfDocument" pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "src" ) return( pdflink ) } ### # Get full text pdf link from doi.org (bjs) full text website. ## get.pdflink.doibjs <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- ".js-infopane-epdf" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- sub( "epdf", "pdf", intermed1) return( pdflink ) } ### # Get full text pdf link from asm.org full text website. ## get.pdflink.asm <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # get href to pdfLink pdflink <- sub( "long", "full.pdf", url) return( pdflink ) } ### # Get full text pdf link from ajp... full text website. ## get.pdflink.ajp <- function( url ) { pdflink <- url return( pdflink ) } ### # Get full text pdf link from apsjournals full text website. ## get.pdflink.apsjournals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- "li:nth-child(2) .nowrap" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://apsjournals.apsnet.org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from arjournals full text website. ## get.pdflink.arjournals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- "a[href^='/doi/pdf']" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://arjournals.annualreviews.org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from ascopubs full text website. ## get.pdflink.ascopubs <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- ".show-pdf" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) intermed2 <- paste0( "http://ascopubs.org", intermed1 ) pdflink <- sub( "/pdf", "/pdfdirect", intermed2 ) return( pdflink ) } ### # Get full text pdf link from avmajournals full text website. ## get.pdflink.avma <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- ".article_link td:nth-child(2) .header4" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://avmajournals.avma.org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from bjgp full text website. ## get.pdflink.bjgp <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- "link[type='application/pdf']" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://bjgp.org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from boneandjoint full text website. ## get.pdflink.boneandjoint <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- "link[type='application/pdf']" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://bjj.boneandjoint.org.uk", intermed1 ) return( pdflink ) } ### # Get full text pdf link from aacrjournals full text website. ## get.pdflink.aacrjournals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- ".last .highwire-article-nav-jumplink" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) link1 <- strsplit(url, ".org") pdflink <- paste0( link1[[1]][1], ".org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from diabetesjournals full text website. ## get.pdflink.diabetesjournals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- "link[type='application/pdf']" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) link1 <- strsplit(url, ".org") pdflink <- paste0( link1[[1]][1], ".org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from asnjournals full text website. ## get.pdflink.asnjournals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- ".primary a" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) intermed2 <- sub( ".pdf\\+html", ".pdf", intermed1 ) link1 <- strsplit( url, ".org" ) pdflink <- paste0( link1[[1]][1], ".org", intermed1 ) return( pdflink ) } ### # Get full text pdf link from ersjournals full text website. ## get.pdflink.ersjournals <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- "link[type='application/pdf']" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) link1 <- strsplit( url, ".com" ) pdflink <- paste0( link1[[1]][1], ".com", intermed1 ) return( pdflink ) } ### # Get full text pdf link from gacetamedicade full text website. ## get.pdflink.gacetamedicade <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] # only modification is "\" before the double quotes. css <- ".col-sm-2 li:nth-child(1) a" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) pdflink <- paste0( "http://gacetamedicademexico.com/", intermed1 ) return( pdflink ) } ### # Get full text pdf link from iiar full text website. ## get.pdflink.iiar <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- ".full-text-pdf-view-link a" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" ) link1 <- strsplit( url, ".org" ) intermed2 <- paste0( link1[[1]][1], ".org", intermed1 ) pdflink <- sub( "\\+html", "", intermed2) return( pdflink ) } ### # Get full text pdf link from anesthesiology full text website. ## get.pdflink.anesthesiology <- function( url ) { # parse page page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) ) # css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/] css <- "#pdfLink" # get href to pdfLink intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" ) link1 <- strsplit( url, ".org" ) pdflink <- paste0( link1[[1]][1], ".org", intermed1 ) return( pdflink ) } ################################### # END FUNCTIONS ################################### # output directory to store full text pdf outdir <- 'pdfNEW/pdfs2' # read data of missing pdfs missings <- read.csv2( "missingsWithURL.csv", stringsAsFactors = F ) head(missings) names(missings) <- c( "pmid", "url" ) min <- 10000 max <- 20000 # set progress bar progbar <- txtProgressBar( min = min, max = max, style = 3 ) # for every pmid, add url for( i in min:max ) { setTxtProgressBar( progbar, i ) # add url pp <- data.frame( pmid = missings$pmid[ i ], url = missings$url[ i ], stringsAsFactors = FALSE ) get.pdf( pmid = pp$pmid, url = pp$url ) } # quit R session q( save = "no" )
testlist <- list(A = structure(c(1.38997190089718e-309, 3.81575932257023e-236, 3.80851568444077e-236), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613126265-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
226
r
testlist <- list(A = structure(c(1.38997190089718e-309, 3.81575932257023e-236, 3.80851568444077e-236), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
#============================================================# Load cleaned data news_pop_svm <- read.csv('Newspop_cleaned.csv', stringsAsFactors = TRUE) str(news_pop_pred) #============================================================# Load required packages library(reshape2) library(DAAG) library(e1071) library(lattice) library(pROC) library(randomForest) library(heuristica) library(caret) library(ROSE) library(kernlab) # Linear Svm #============================================================# Log transformation for target variable news_pop_svm$normshares <- log(news_pop_svm$shares) boxplot(news_pop_svm$normshares, names = 'Number of shares', main = 'Boxplot for Number of shares', ylab = 'Number of shares', sub = 'After log transformation') boxplotstats<-boxplot(news_pop_svm$normshares)$stats #Returns a vector in length 5, containes the extreme value of lower whisker, lower hinge, median, upper hinge, upper whisker print(boxplotstats) #Assign values into variables for easier calling in subsquent analyses min_share <- boxplotstats [1,1] low_hinge <- boxplotstats [2,1] median_share <- boxplotstats [3,1] up_hinge <- boxplotstats [4,1] max_share <- boxplotstats [5,1] spread<-up_hinge-low_hinge low_fence<-low_hinge-3*spread #3 Standard deviation below from lower hinge upfence<-up_hinge+3*spread #3 standard deviation above from upper hinge print(spread) print(low_fence) print(upfence) #============================================================# Divide target variable (shares) into categorical variable #Two categories (Popular/Not_popular) news_pop_svm$popularity <-cut(news_pop_svm$normshares,c(low_fence,median_share,upfence),labels=c("Not_Popular","Popular")) table(news_pop_svm$popularity) #Label encoding for target variable news_pop_svm$popularity <- ifelse(news_pop_svm$popularity == "Popular", news_pop_svm$popularity <- 1, news_pop_svm$popularity <- 0) table(news_pop_svm$popularity) #Convert variables into factors news_pop_svm$data_channel_is_bus <- as.factor(news_pop_svm$data_channel_is_bus) news_pop_svm$data_channel_is_entertainment <- as.factor(news_pop_svm$data_channel_is_entertainment) news_pop_svm$data_channel_is_lifestyle <- as.factor(news_pop_svm$data_channel_is_lifestyle) news_pop_svm$data_channel_is_socmed <- as.factor(news_pop_svm$data_channel_is_socmed) news_pop_svm$data_channel_is_tech <- as.factor(news_pop_svm$data_channel_is_tech) news_pop_svm$data_channel_is_world <- as.factor(news_pop_svm$data_channel_is_world) news_pop_svm$weekday_is_monday <- as.factor(news_pop_svm$weekday_is_monday) news_pop_svm$weekday_is_tuesday <- as.factor(news_pop_svm$weekday_is_tuesday) news_pop_svm$weekday_is_wednesday <- as.factor(news_pop_svm$weekday_is_wednesday) news_pop_svm$weekday_is_thursday <- as.factor(news_pop_svm$weekday_is_thursday) news_pop_svm$weekday_is_friday <- as.factor(news_pop_svm$weekday_is_friday) news_pop_svm$weekday_is_saturday <- as.factor(news_pop_svm$weekday_is_saturday) news_pop_svm$weekday_is_sunday <- as.factor(news_pop_svm$weekday_is_sunday) #============================================================# Divide data into training and testing set ind_svm <- sample(nrow(news_pop_svm), 0.7*nrow(news_pop_svm)) train_svm <- news_pop_svm[ind_svm, ] test_svm <- news_pop_svm[-ind_svm, ] table(train_svm$popularity) table(test_svm$popularity) table(news_pop_svm$popularity) # Equal size prop.table(table(news_pop_svm$popularity)) prop.table(table(train_svm$popularity)) prop.table(table(test_svm$popularity)) #============================================================# Training model # Basic Support Vector Machine ( Linear ) with all variables Experiment 1 #============================================================# # Experiment 1 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Linear SVM svm_model_1 <- ksvm(popularity ~., data = train_svm, kernel = "vanilladot") svm_model_1 # Basic Support Vector Machine ( Linear ) with all variables set.seed(123) # predict train set - fitting classifier into training set 1) svm_pred_train <- predict(svm_model_1, train_svm) svm_prd_bin <- ifelse(svm_pred_train > 0.5, 1, 0) svm_cm <- table(svm_prd_bin, train_svm$popularity) confusionMatrix(svm_cm) roc.curve(svm_prd_bin, train_svm$popularity) set.seed(661) # predict train set - fitting classifier into testing set 2) svm_pred_test <- predict(svm_model_1, test_svm) svm_pred_bin <- ifelse(svm_pred_test > 0.5, 1, 0) svm_cm <- table(svm_pred_bin, test_svm$popularity) svm_cm confusionMatrix(svm_cm) roc.curve(svm_pred_bin, test_svm$popularity) #============================================================# Training model # Basic Support Vector Machine ( Polynomial ) with all variables Experiment 2 #============================================================# # Experiment 2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Polynomial SVM svm_model_polydot <- ksvm(popularity ~., data = train_svm, kernel = "polydot") svm_model_polydot # Basic Support Vector Machine ( Polynomial ) with all variables # predict train set - fitting classifier into training set 1) svm_pred_train <- predict(svm_model_polydot, train_svm) svm_pred_bin <- ifelse(svm_pred_train > 0.5, 1, 0) svm_cm <- table(svm_pred_bin, train_svm$popularity) confusionMatrix(svm_cm) roc.curve(svm_pred_bin, train_svm$popularity) # predict train set - fitting classifier into testing set 2) svm_pred_test <- predict(svm_model_polydot, test_svm) svm_pred_bin_test <- ifelse(svm_pred_test > 0.5, 1, 0) svm_cm <- table(svm_pred_bin_test, test_svm$popularity) confusionMatrix(svm_cm) roc.curve(svm_pred_bin_test, test_svm$popularity) #============================================================# Training model # Basic Support Vector Machine ( Radial ) with all variables Experiment 3 #============================================================# # Experiment 3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Radial SVM svm_model_rbf <- ksvm(popularity ~., data = train_svm, kernel = "rbfdot") svm_model_rbf # Basic Support Vector Machine ( Radial ) with all variables # predict train set - fitting classifier into training set 1) svm_pred_train <- predict(svm_model_rbf, train_svm) svm_pred_bin_train <- ifelse(svm_pred_train > 0.5, 1, 0) svm_cm <- table(train_svm$popularity, svm_pred_bin_train) confusionMatrix(svm_cm) roc.curve(train_svm$popularity, svm_pred_bin_train) # predict train set - fitting classifier into testing set 2) svm_pred_test <- predict(svm_model_rbf, test_svm) svm_pred_bin_test <- ifelse(svm_pred_test > 0.5, 1, 0) svm_cm <- table(test_svm$popularity, svm_pred_bin_test) confusionMatrix(svm_cm) roc.curve(test_svm$popularity, svm_pred_bin_test) #============================================================# Training model # Basic Support Vector Machine ( Linear ) with selected variables Experiment 4 #============================================================# # Selected variable input_var_svm <- "popularity~kw_avg_avg+ self_reference_min_shares+ kw_max_avg+ self_reference_avg_sharess+ LDA_00+ X+ n_unique_tokens+ kw_min_avg+ LDA_02+ n_tokens_content+ n_non_stop_words+ weekday_is_saturday+ LDA_04+ data_channel_is_entertainment+ LDA_01+ data_channel_is_socmed+ self_reference_max_shares+ kw_avg_max+ n_non_stop_unique_tokens+ LDA_03+ kw_avg_min+ num_imgs+ rate_positive_words+ num_hrefs+ global_sentiment_polarity+ average_token_length+ global_rate_positive_words+ global_subjectivity+ rate_negative_words+ global_rate_negative_words" # Experiment 4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Linear SVM svm_model_linear <- ksvm(as.formula(input_var_svm), data = train_svm, kernel = "vanilladot") svm_model_linear # Basic Support Vector Machine ( Linear ) with selected variables # predict train set - fitting classifier into training set 1) svm_pred_train <- predict(svm_model_linear, train_svm, type = "response") svm_pred_train_bin <- ifelse(svm_pred_train > 0.5, 1, 0) svm_linear_cm <- table(svm_pred_train_bin, train_svm$popularity) confusionMatrix(svm_linear_cm) roc.curve(svm_pred_train_bin, train_svm$popularity) # predict train set - fitting classifier into testing set 2) svm_pred_test <- predict(svm_model_linear, test_svm, type="response") svm_pred_test_bin <- ifelse(svm_pred_test > 0.5, 1, 0) svm_linear_cm <- table(svm_pred_test_bin, test_svm$popularity) confusionMatrix(svm_linear_cm) roc.curve(svm_pred_test_bin, test_svm$popularity) #============================================================# Training model # Basic Support Vector Machine ( Polynomial ) with selected variables Experiment 5 #============================================================# # Experiment 5 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Polynomial SVM svm_model_poly <- ksvm(as.formula(input_var_svm), data = train_svm, kernel = "polydot") svm_model_poly # Basic Support Vector Machine ( Polynomial ) with selected variables - Experiment 5 # predict train set - fitting classifier into training set 1) svm_pred_train <- predict(svm_model_poly, train_svm, type = "response") svm_pred_train_bin <- ifelse(svm_pred_train > 0.5, 1, 0) svm_poly_cm <- table(svm_pred_train_bin, train_svm$popularity) confusionMatrix(svm_poly_cm) roc.curve(svm_pred_train_bin, train_svm$popularity) # predict train set - fitting classifier into testing set 2) svm_pred_test <- predict(svm_model_poly, test_svm, type="response") svm_pred_test_bin <- ifelse(svm_pred_test > 0.5, 1, 0) svm_poly_test_cm <- table(svm_pred_test_bin, test_svm$popularity) confusionMatrix(svm_poly_test_cm) roc.curve(svm_pred_test_bin, test_svm$popularity) #============================================================# Training model # Basic Support Vector Machine ( Radial ) with selected variables Experiment 6 #============================================================# # Experiment 6 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Radial SVM svm_pop_7 <- ksvm(as.formula(input_var_svm), data = train_svm, kernel = "rbfdot") svm_pop_7 # predict train set - fitting classifier into training set 1) svm_pred_train <- predict(svm_pop_7, train_svm, type = "response") svm_pred_train_probabilities <- ifelse(svm_pred_train > 0.5, 1, 0) svm_radial_cm <- table(svm_pred_train_probabilities, train_svm$popularity) confusionMatrix(svm_radial_cm) roc.curve(svm_pred_train_probabilities, train_svm$popularity) # predict train set - fitting classifier into testing set 2) svm_pred_test <- predict(svm_pop_7, test_svm, type="response") svm_pred_test_probabilities <- ifelse(svm_pred_test > 0.5, 1, 0) svm_radial_test_cm <- table(svm_pred_test_probabilities, test_svm$popularity) confusionMatrix(svm_radial_test_cm) roc.curve(svm_pred_test_probabilities, test_svm$popularity) #============================================================#
/Popularity prediction/SVM.R
no_license
kahmannC/ml
R
false
false
11,641
r
#============================================================# Load cleaned data news_pop_svm <- read.csv('Newspop_cleaned.csv', stringsAsFactors = TRUE) str(news_pop_pred) #============================================================# Load required packages library(reshape2) library(DAAG) library(e1071) library(lattice) library(pROC) library(randomForest) library(heuristica) library(caret) library(ROSE) library(kernlab) # Linear Svm #============================================================# Log transformation for target variable news_pop_svm$normshares <- log(news_pop_svm$shares) boxplot(news_pop_svm$normshares, names = 'Number of shares', main = 'Boxplot for Number of shares', ylab = 'Number of shares', sub = 'After log transformation') boxplotstats<-boxplot(news_pop_svm$normshares)$stats #Returns a vector in length 5, containes the extreme value of lower whisker, lower hinge, median, upper hinge, upper whisker print(boxplotstats) #Assign values into variables for easier calling in subsquent analyses min_share <- boxplotstats [1,1] low_hinge <- boxplotstats [2,1] median_share <- boxplotstats [3,1] up_hinge <- boxplotstats [4,1] max_share <- boxplotstats [5,1] spread<-up_hinge-low_hinge low_fence<-low_hinge-3*spread #3 Standard deviation below from lower hinge upfence<-up_hinge+3*spread #3 standard deviation above from upper hinge print(spread) print(low_fence) print(upfence) #============================================================# Divide target variable (shares) into categorical variable #Two categories (Popular/Not_popular) news_pop_svm$popularity <-cut(news_pop_svm$normshares,c(low_fence,median_share,upfence),labels=c("Not_Popular","Popular")) table(news_pop_svm$popularity) #Label encoding for target variable news_pop_svm$popularity <- ifelse(news_pop_svm$popularity == "Popular", news_pop_svm$popularity <- 1, news_pop_svm$popularity <- 0) table(news_pop_svm$popularity) #Convert variables into factors news_pop_svm$data_channel_is_bus <- as.factor(news_pop_svm$data_channel_is_bus) news_pop_svm$data_channel_is_entertainment <- as.factor(news_pop_svm$data_channel_is_entertainment) news_pop_svm$data_channel_is_lifestyle <- as.factor(news_pop_svm$data_channel_is_lifestyle) news_pop_svm$data_channel_is_socmed <- as.factor(news_pop_svm$data_channel_is_socmed) news_pop_svm$data_channel_is_tech <- as.factor(news_pop_svm$data_channel_is_tech) news_pop_svm$data_channel_is_world <- as.factor(news_pop_svm$data_channel_is_world) news_pop_svm$weekday_is_monday <- as.factor(news_pop_svm$weekday_is_monday) news_pop_svm$weekday_is_tuesday <- as.factor(news_pop_svm$weekday_is_tuesday) news_pop_svm$weekday_is_wednesday <- as.factor(news_pop_svm$weekday_is_wednesday) news_pop_svm$weekday_is_thursday <- as.factor(news_pop_svm$weekday_is_thursday) news_pop_svm$weekday_is_friday <- as.factor(news_pop_svm$weekday_is_friday) news_pop_svm$weekday_is_saturday <- as.factor(news_pop_svm$weekday_is_saturday) news_pop_svm$weekday_is_sunday <- as.factor(news_pop_svm$weekday_is_sunday) #============================================================# Divide data into training and testing set ind_svm <- sample(nrow(news_pop_svm), 0.7*nrow(news_pop_svm)) train_svm <- news_pop_svm[ind_svm, ] test_svm <- news_pop_svm[-ind_svm, ] table(train_svm$popularity) table(test_svm$popularity) table(news_pop_svm$popularity) # Equal size prop.table(table(news_pop_svm$popularity)) prop.table(table(train_svm$popularity)) prop.table(table(test_svm$popularity)) #============================================================# Training model # Basic Support Vector Machine ( Linear ) with all variables Experiment 1 #============================================================# # Experiment 1 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Linear SVM svm_model_1 <- ksvm(popularity ~., data = train_svm, kernel = "vanilladot") svm_model_1 # Basic Support Vector Machine ( Linear ) with all variables set.seed(123) # predict train set - fitting classifier into training set 1) svm_pred_train <- predict(svm_model_1, train_svm) svm_prd_bin <- ifelse(svm_pred_train > 0.5, 1, 0) svm_cm <- table(svm_prd_bin, train_svm$popularity) confusionMatrix(svm_cm) roc.curve(svm_prd_bin, train_svm$popularity) set.seed(661) # predict train set - fitting classifier into testing set 2) svm_pred_test <- predict(svm_model_1, test_svm) svm_pred_bin <- ifelse(svm_pred_test > 0.5, 1, 0) svm_cm <- table(svm_pred_bin, test_svm$popularity) svm_cm confusionMatrix(svm_cm) roc.curve(svm_pred_bin, test_svm$popularity) #============================================================# Training model # Basic Support Vector Machine ( Polynomial ) with all variables Experiment 2 #============================================================# # Experiment 2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Polynomial SVM svm_model_polydot <- ksvm(popularity ~., data = train_svm, kernel = "polydot") svm_model_polydot # Basic Support Vector Machine ( Polynomial ) with all variables # predict train set - fitting classifier into training set 1) svm_pred_train <- predict(svm_model_polydot, train_svm) svm_pred_bin <- ifelse(svm_pred_train > 0.5, 1, 0) svm_cm <- table(svm_pred_bin, train_svm$popularity) confusionMatrix(svm_cm) roc.curve(svm_pred_bin, train_svm$popularity) # predict train set - fitting classifier into testing set 2) svm_pred_test <- predict(svm_model_polydot, test_svm) svm_pred_bin_test <- ifelse(svm_pred_test > 0.5, 1, 0) svm_cm <- table(svm_pred_bin_test, test_svm$popularity) confusionMatrix(svm_cm) roc.curve(svm_pred_bin_test, test_svm$popularity) #============================================================# Training model # Basic Support Vector Machine ( Radial ) with all variables Experiment 3 #============================================================# # Experiment 3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Radial SVM svm_model_rbf <- ksvm(popularity ~., data = train_svm, kernel = "rbfdot") svm_model_rbf # Basic Support Vector Machine ( Radial ) with all variables # predict train set - fitting classifier into training set 1) svm_pred_train <- predict(svm_model_rbf, train_svm) svm_pred_bin_train <- ifelse(svm_pred_train > 0.5, 1, 0) svm_cm <- table(train_svm$popularity, svm_pred_bin_train) confusionMatrix(svm_cm) roc.curve(train_svm$popularity, svm_pred_bin_train) # predict train set - fitting classifier into testing set 2) svm_pred_test <- predict(svm_model_rbf, test_svm) svm_pred_bin_test <- ifelse(svm_pred_test > 0.5, 1, 0) svm_cm <- table(test_svm$popularity, svm_pred_bin_test) confusionMatrix(svm_cm) roc.curve(test_svm$popularity, svm_pred_bin_test) #============================================================# Training model # Basic Support Vector Machine ( Linear ) with selected variables Experiment 4 #============================================================# # Selected variable input_var_svm <- "popularity~kw_avg_avg+ self_reference_min_shares+ kw_max_avg+ self_reference_avg_sharess+ LDA_00+ X+ n_unique_tokens+ kw_min_avg+ LDA_02+ n_tokens_content+ n_non_stop_words+ weekday_is_saturday+ LDA_04+ data_channel_is_entertainment+ LDA_01+ data_channel_is_socmed+ self_reference_max_shares+ kw_avg_max+ n_non_stop_unique_tokens+ LDA_03+ kw_avg_min+ num_imgs+ rate_positive_words+ num_hrefs+ global_sentiment_polarity+ average_token_length+ global_rate_positive_words+ global_subjectivity+ rate_negative_words+ global_rate_negative_words" # Experiment 4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Linear SVM svm_model_linear <- ksvm(as.formula(input_var_svm), data = train_svm, kernel = "vanilladot") svm_model_linear # Basic Support Vector Machine ( Linear ) with selected variables # predict train set - fitting classifier into training set 1) svm_pred_train <- predict(svm_model_linear, train_svm, type = "response") svm_pred_train_bin <- ifelse(svm_pred_train > 0.5, 1, 0) svm_linear_cm <- table(svm_pred_train_bin, train_svm$popularity) confusionMatrix(svm_linear_cm) roc.curve(svm_pred_train_bin, train_svm$popularity) # predict train set - fitting classifier into testing set 2) svm_pred_test <- predict(svm_model_linear, test_svm, type="response") svm_pred_test_bin <- ifelse(svm_pred_test > 0.5, 1, 0) svm_linear_cm <- table(svm_pred_test_bin, test_svm$popularity) confusionMatrix(svm_linear_cm) roc.curve(svm_pred_test_bin, test_svm$popularity) #============================================================# Training model # Basic Support Vector Machine ( Polynomial ) with selected variables Experiment 5 #============================================================# # Experiment 5 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Polynomial SVM svm_model_poly <- ksvm(as.formula(input_var_svm), data = train_svm, kernel = "polydot") svm_model_poly # Basic Support Vector Machine ( Polynomial ) with selected variables - Experiment 5 # predict train set - fitting classifier into training set 1) svm_pred_train <- predict(svm_model_poly, train_svm, type = "response") svm_pred_train_bin <- ifelse(svm_pred_train > 0.5, 1, 0) svm_poly_cm <- table(svm_pred_train_bin, train_svm$popularity) confusionMatrix(svm_poly_cm) roc.curve(svm_pred_train_bin, train_svm$popularity) # predict train set - fitting classifier into testing set 2) svm_pred_test <- predict(svm_model_poly, test_svm, type="response") svm_pred_test_bin <- ifelse(svm_pred_test > 0.5, 1, 0) svm_poly_test_cm <- table(svm_pred_test_bin, test_svm$popularity) confusionMatrix(svm_poly_test_cm) roc.curve(svm_pred_test_bin, test_svm$popularity) #============================================================# Training model # Basic Support Vector Machine ( Radial ) with selected variables Experiment 6 #============================================================# # Experiment 6 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~Radial SVM svm_pop_7 <- ksvm(as.formula(input_var_svm), data = train_svm, kernel = "rbfdot") svm_pop_7 # predict train set - fitting classifier into training set 1) svm_pred_train <- predict(svm_pop_7, train_svm, type = "response") svm_pred_train_probabilities <- ifelse(svm_pred_train > 0.5, 1, 0) svm_radial_cm <- table(svm_pred_train_probabilities, train_svm$popularity) confusionMatrix(svm_radial_cm) roc.curve(svm_pred_train_probabilities, train_svm$popularity) # predict train set - fitting classifier into testing set 2) svm_pred_test <- predict(svm_pop_7, test_svm, type="response") svm_pred_test_probabilities <- ifelse(svm_pred_test > 0.5, 1, 0) svm_radial_test_cm <- table(svm_pred_test_probabilities, test_svm$popularity) confusionMatrix(svm_radial_test_cm) roc.curve(svm_pred_test_probabilities, test_svm$popularity) #============================================================#
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctDataHelp.R \name{AnomAuth} \alias{AnomAuth} \title{AnomAuth} \format{data frame with 2722 rows, 14 columns. Column Y1 represents anomia, Y2 Authoritarianism, dTx the time interval for measurement occasion x.} \source{ See \url{http://psycnet.apa.org/journals/met/17/2/176/} for details. } \description{ A dataset containing panel data assessments of individuals Anomia and Authoritarianism. }
/man/AnomAuth.Rd
no_license
davan690/ctsem
R
false
true
474
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctDataHelp.R \name{AnomAuth} \alias{AnomAuth} \title{AnomAuth} \format{data frame with 2722 rows, 14 columns. Column Y1 represents anomia, Y2 Authoritarianism, dTx the time interval for measurement occasion x.} \source{ See \url{http://psycnet.apa.org/journals/met/17/2/176/} for details. } \description{ A dataset containing panel data assessments of individuals Anomia and Authoritarianism. }
#' @name sleuth_to_count #' #' @title Convert a sleuth object to count #' #' @description Convert a sleuth object to a count matrix with the condition names. #' #' @param obj \code{sleuth} object. #' @param log bool; should natural log be returned? #' @param normalize bool; should normalized counts be returned #' @param pc numeric; pseudo-counts #' @param aggregation_column character; aggregation column #' #' #' @return a matrix which contains a matrix of target_ids and transcript (or gene) expression in read counts. #' #' @export #' sleuth_to_count <- function(obj, log = TRUE, normalize = TRUE, pc = 0.5, aggregation_column = "wb") { obs_raw <- as.data.table(obj$obs_raw) setkeyv(obs_raw, "target_id") est_counts <- castToMatrix(data = obs_raw, formula = target_id ~ sample, value.var = "est_counts") # add pseudo cout est_counts <- est_counts + pc # aggregate to gene level ag <- factor(obj$target_mapping[match(rownames(est_counts), obj$target_mapping$target_id), ][, aggregation_column]) est_counts <- apply(est_counts, 2, function(x) tapply(x, ag, sum)) # normalize if (normalize) { sf <- obj$est_counts_sf est_counts <- sapply(colnames(est_counts), function(i) est_counts[, i] / sf[i]) } # filter est_counts <- est_counts[rownames(est_counts) %in% obj$filter_df$target_id, ] if (log) log(est_counts) else est_counts }
/R/sleuth_to_count.R
no_license
nstroustrup/HelpingHand
R
false
false
1,546
r
#' @name sleuth_to_count #' #' @title Convert a sleuth object to count #' #' @description Convert a sleuth object to a count matrix with the condition names. #' #' @param obj \code{sleuth} object. #' @param log bool; should natural log be returned? #' @param normalize bool; should normalized counts be returned #' @param pc numeric; pseudo-counts #' @param aggregation_column character; aggregation column #' #' #' @return a matrix which contains a matrix of target_ids and transcript (or gene) expression in read counts. #' #' @export #' sleuth_to_count <- function(obj, log = TRUE, normalize = TRUE, pc = 0.5, aggregation_column = "wb") { obs_raw <- as.data.table(obj$obs_raw) setkeyv(obs_raw, "target_id") est_counts <- castToMatrix(data = obs_raw, formula = target_id ~ sample, value.var = "est_counts") # add pseudo cout est_counts <- est_counts + pc # aggregate to gene level ag <- factor(obj$target_mapping[match(rownames(est_counts), obj$target_mapping$target_id), ][, aggregation_column]) est_counts <- apply(est_counts, 2, function(x) tapply(x, ag, sum)) # normalize if (normalize) { sf <- obj$est_counts_sf est_counts <- sapply(colnames(est_counts), function(i) est_counts[, i] / sf[i]) } # filter est_counts <- est_counts[rownames(est_counts) %in% obj$filter_df$target_id, ] if (log) log(est_counts) else est_counts }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/systemApi.r \name{system$userInfo} \alias{system$userInfo} \title{Get information about the Windows identity used to fulfill the request. This depends on the service's authentication method and the credentials passed by the client. The impersonation level of the Windows identity is included.} \value{ Information about the current user. } \description{ Get information about the Windows identity used to fulfill the request. This depends on the service's authentication method and the credentials passed by the client. The impersonation level of the Windows identity is included. }
/man/system-cash-userInfo.Rd
permissive
eddyrene/PI-Web-API-Client-R
R
false
true
687
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/systemApi.r \name{system$userInfo} \alias{system$userInfo} \title{Get information about the Windows identity used to fulfill the request. This depends on the service's authentication method and the credentials passed by the client. The impersonation level of the Windows identity is included.} \value{ Information about the current user. } \description{ Get information about the Windows identity used to fulfill the request. This depends on the service's authentication method and the credentials passed by the client. The impersonation level of the Windows identity is included. }
################################# # FNNs Classification Paper # # # # Helper Functions File # # # # Anonymized # ################################# FNN_Preprocess <- function(func_cov, scalar_cov = NULL, basis_choice, num_basis, domain_range, covariate_scaling = T, raw_data = F){ #### Error Checks if(length(domain_range) != length(num_basis)){ stop("The number of domain ranges doesn't match length of num_basis") } if(length(domain_range) != length(basis_choice)){ stop("The number of domain ranges doesn't match number of basis choices") } if(length(num_basis) != length(basis_choice)){ stop("Too many/few num_basis - doesn't match number of basis choices") } # Getting check for raw vs. non raw if(raw_data == T){ dim_check = length(func_cov) } else { dim_check = dim(func_cov)[3] } if(dim_check > length(num_basis)){ # Fixing domain range domain_range_list = list() for (t in 1:dim_check) { domain_range_list[[t]] = domain_range[[1]] } # Fixing num basis num_basis = rep(num_basis, dim_check) # Fixing basis type basis_choice = rep(basis_choice, dim_check) # Final update to domain range domain_range = domain_range_list # Warning print("Warning: You only specified basis information for one functional covariate -- it will be repeated for all functional covariates") } #### Creating functional observations in the case of raw data if(raw_data == T){ # Taking in data dat = func_cov # Setting up array temp_tensor = array(dim = c(31, nrow(dat[[1]]), length(dat))) for (t in 1:length(dat)) { # Getting appropriate obs curr_func = dat[[t]] # Getting current domain curr_domain = domain_range[[t]] # Creating basis (using bspline) basis_setup = create.bspline.basis(rangeval = c(curr_domain[1], curr_domain[2]), nbasis = 31, norder = 4) # Time points time_points = seq(curr_domain[1], curr_domain[2], length.out = ncol(curr_func)) # Making functional observation temp_fd = Data2fd(time_points, t(curr_func), basis_setup) # Storing data temp_tensor[,,t] = temp_fd$coefs } # Saving as appropriate names func_cov = temp_tensor } ##### Helper Functions ##### # Composite approximator composite_approximator <- function(f, a, b, n) { # This function does the integral approximations and gets called in the # integral approximator function. In the integral approximator function # we pass in a function f into this and that is final output - a collection # of numbers - one for each of the functional observations # Error checking code if (is.function(f) == FALSE) { stop('The input f(x) must be a function with one parameter (variable)') } # General formula h <- (b - a)/n # Setting parameters xn <- seq.int(a, b, length.out = n + 1) xn <- xn[-1] xn <- xn[-length(xn)] # Approximating using the composite rule formula integ_approx <- (h/3)*(f(a) + 2*sum(f(xn[seq.int(2, length(xn), 2)])) + 4*sum(f(xn[seq.int(1, length(xn), 2)])) + f(b)) # Returning result return(integ_approx) } # Integration Approximation for fourier and b-spline integral_form_fourier <- function(functional_data, beta_basis = NULL, num_fd_basis = dim(func_cov)[1], num_beta_basis, range){ ######################################################################## #### Setting up x_i(s) form #### # Initializing func_basis_sin <- c() func_basis_cos <- c() # Setting up vectors for (i in 1:((num_fd_basis - 1)/2)) { func_basis_sin[i] <- paste0("sin(2*pi*x*", i, "/", range[2], ")") } for (i in 1:((num_fd_basis - 1)/2)) { func_basis_cos[i] <- paste0("cos(2*pi*x*", i, "/", range[2], ")") } # Putting together fd_basis_form <- c(1, rbind(func_basis_sin, func_basis_cos)) # Combining with functional data x_1s <- paste0(functional_data, "*", fd_basis_form, collapse = " + ") ######################################################################## #### Setting up beta_(s) #### beta_basis_sin <- c() beta_basis_cos <- c() # Setting up vectors for (i in 1:((num_beta_basis - 1)/2)) { beta_basis_sin[i] <- paste0("sin(2*pi*x*", i, "/", range[2], ")") } for (i in 1:((num_beta_basis - 1)/2)) { beta_basis_cos[i] <- paste0("cos(2*pi*x*", i, "/", range[2], ")") } # Combining with functional data beta_basis_form <- c(1, rbind(beta_basis_sin, beta_basis_cos)) ######################################################################## #### Getting approximations #### # Initializing - should be vector of size 11 integ_approximations <- c() for (i in 1:length(beta_basis_form)) { # Combining form_approximated <- paste0(beta_basis_form[i], "*(", x_1s, ")") # Passing to appropriate form final_func <- function(x){ a = eval(parse(text = form_approximated)) return(a) } # Evaluating integ_approximations[i] <- composite_approximator(final_func, range[1], range[2], 5000) } return(integ_approximations) } integral_form_bspline <- function(functional_data, beta_basis = NULL, num_fd_basis = dim(func_cov)[1], num_beta_basis){ } if(is.null(scalar_cov)){ converted_df <- data.frame(matrix(nrow = dim(func_cov)[2], ncol = sum(num_basis))) } else { converted_df <- data.frame(matrix(nrow = dim(func_cov)[2], ncol = sum(num_basis) + ncol(scalar_cov))) } # Looping to get approximations for (i in 1:dim(func_cov)[3]) { # Current data set df <- func_cov[,,i] # Turning into matrix if(is.vector(df) == T){ test_mat = matrix(nrow = length(df), ncol = 1) test_mat[,1] = df df = test_mat } # Current number of basis and choice of basis information cur_basis_num <- num_basis[i] cur_basis <- basis_choice[i] # Getting current range cur_range <- domain_range[[i]] # Storing previous numbers if(i == 1){ left_end = 1 right_end = cur_basis_num } else { left_end = sum(num_basis[1:(i - 1)]) + 1 right_end = (left_end - 1) + cur_basis_num } if(cur_basis == "fourier"){ for (j in 1:ncol(df)) { converted_df[j, left_end:right_end] <- c(integral_form_fourier(df[,j], num_beta_basis = cur_basis_num, range = cur_range)) } } else{ } } # Now attaching scalar covariates if(is.null(scalar_cov)){ converted_df <- converted_df } else{ for (k in 1:nrow(converted_df)) { converted_df[k, (sum(num_basis) + 1):(sum(num_basis) + ncol(scalar_cov))] <- scalar_cov[k,] } } # Normalize training data if(covariate_scaling == T){ train_x <- scale(converted_df) } else { train_x <- as.matrix(cbind(converted_df[,c(1:sum(num_basis))], scale(converted_df[,-c(1:sum(num_basis))]))) } # Returning the model return(list(data = train_x, raw_data = converted_df, fnc_basis_num = num_basis, fnc_type = basis_choice, func_obs = func_cov) ) } # (0) Packages library(keras) library(tensorflow) library(fda) library(ggplot2) library(ggpubr) library(caret) library(pbapply) # (1) FNN Function FNN <- function(resp, func_cov, scalar_cov = NULL, basis_choice, num_basis, hidden_layers, neurons_per_layer, activations_in_layers, domain_range, epochs, output_size = 1, loss_choice = "mse", metric_choice = list("mean_squared_error"), val_split = 0.2, learn_rate = 0.001, patience_param = 15, early_stopping = T, print_info = T, batch_size = 32, decay_rate = 0){ # # Required packages # library(keras) # library(tensorflow) # library(fda) # library(ggplot2) # DESCRIPTIONS OF PARAMETERS # # Resp = this is your response and will be dimension equal to the number of observations # func_cov = this will be a 3 dimensional tensor where the third dimension = K, the COLUMNS are the observations # and the ROWS are the coefficient values (so the number of rows is the choice in number of basis functions) # scalar_cov = this is a matrix where the ROWS are the observations and the columns are the corresponding # feature values # basis_choice = this will be updated more later but this should be a vector of names (e.g c("Fourier", "B-Spline")) of # dimension K (the number of functional covariates) # num_basis = this will be the number of basis used for each of the K functional covariates. So, for example, this will # be something like c(3, 7) meaning that there will be 3 basis functions defining the first functional # covariate and 7 defining the second # hiden_layers = this is a single number equal to the number of hidden layers in the network (including the first layer) # neurons_per_layer = this will be a vector of size equal to the number of hidden layers # activations_in_layers = this is just the choice of activation function for each of the layers, same dimension as the # neurons_per_layer vector above # domain_range = list of size K of two digit numbers indicating the range for each of the functional covariates # Epochs = the number of times we go through the backward and forward pass (or rather, total run throughs of the data) # Output_size = the number of outputs you want at the end -> usually this is going to be 1 for regression # loss_choice = this is a keras parameter to see the loss criterion # metric_choice = this is another keras parameter # val_split = this is another keras parameter to split the training into a validation and actual training set # patience_param = this is another keras parameter used in early stopping # learn_rate = the learning rate for the optimizer # early_stop = this is a keras paramter; decide if you want to stop the training once there is not # much improvement in the validation loss # END OF DESCRIPTIONS # ##### Helper Functions ##### # Composite approximator composite_approximator <- function(f, a, b, n) { # This function does the integral approximations and gets called in the # integral approximator function. In the integral approximator function # we pass in a function f into this and that is final output - a collection # of numbers - one for each of the functional observations # Error checking code if (is.function(f) == FALSE) { stop('The input f(x) must be a function with one parameter (variable)') } # General formula h <- (b - a)/n # Setting parameters xn <- seq.int(a, b, length.out = n + 1) xn <- xn[-1] xn <- xn[-length(xn)] # Approximating using the composite rule formula integ_approx <- (h/3)*(f(a) + 2*sum(f(xn[seq.int(2, length(xn), 2)])) + 4*sum(f(xn[seq.int(1, length(xn), 2)])) + f(b)) # Returning result return(integ_approx) } # Integration Approximation for fourier and b-spline integral_form_fourier <- function(functional_data, beta_basis = NULL, num_fd_basis = dim(func_cov)[1], num_beta_basis, range){ ######################################################################## #### Setting up x_i(s) form #### # Initializing func_basis_sin <- c() func_basis_cos <- c() # Setting up vectors for (i in 1:((num_fd_basis - 1)/2)) { func_basis_sin[i] <- paste0("sin(2*pi*x*", i, "/", range[2], ")") } for (i in 1:((num_fd_basis - 1)/2)) { func_basis_cos[i] <- paste0("cos(2*pi*x*", i, "/", range[2], ")") } # Putting together fd_basis_form <- c(1, rbind(func_basis_sin, func_basis_cos)) # Combining with functional data x_1s <- paste0(functional_data, "*", fd_basis_form, collapse = " + ") ######################################################################## #### Setting up beta_(s) #### beta_basis_sin <- c() beta_basis_cos <- c() # Setting up vectors for (i in 1:((num_beta_basis - 1)/2)) { beta_basis_sin[i] <- paste0("sin(2*pi*x*", i, "/", range[2], ")") } for (i in 1:((num_beta_basis - 1)/2)) { beta_basis_cos[i] <- paste0("cos(2*pi*x*", i, "/", range[2], ")") } # Combining with functional data beta_basis_form <- c(1, rbind(beta_basis_sin, beta_basis_cos)) ######################################################################## #### Getting approximations #### # Initializing - should be vector of size 11 integ_approximations <- c() for (i in 1:length(beta_basis_form)) { # Combining form_approximated <- paste0(beta_basis_form[i], "*(", x_1s, ")") # Passing to appropriate form final_func <- function(x){ a = eval(parse(text = form_approximated)) return(a) } # Evaluating integ_approximations[i] <- composite_approximator(final_func, range[1], range[2], 5000) } return(integ_approximations) } integral_form_bspline <- function(functional_data, beta_basis = NULL, num_fd_basis = dim(func_cov)[1], num_beta_basis){ } # First, we need to create the proper data set. This means to get the approximations and append # them together for each of the covariates. We are asking for the user to pass an array where the # third dimension is equal to K = the number of functional covariates. Each of these will contain # the coefficients as found by turning the data into a functional data object. # Initializing matrix to keep everything inside across all functional covariates ######### TEST CODE ############ #scalar_cov = NULL ################################ if(is.null(scalar_cov)){ converted_df <- data.frame(matrix(nrow = dim(func_cov)[2], ncol = sum(num_basis))) } else { converted_df <- data.frame(matrix(nrow = dim(func_cov)[2], ncol = sum(num_basis) + ncol(scalar_cov))) } ######### TEST CODE ############ #func_cov = test_data #num_basis = c(3, 5) #basis_choice = c("fourier", "fourier") #domain_range = list(c(1, 24), c(1, 24)) ################################ # Looping to get approximations for (i in 1:dim(func_cov)[3]) { # Current data set df <- func_cov[,,i] # Turning into matrix if(is.vector(df) == T){ test_mat = matrix(nrow = length(df), ncol = 1) test_mat[,1] = df df = test_mat } # Current number of basis and choice of basis information cur_basis_num <- num_basis[i] cur_basis <- basis_choice[i] # Getting current range cur_range <- domain_range[[i]] # Storing previous numbers if(i == 1){ left_end = 1 right_end = cur_basis_num } else { left_end = sum(num_basis[1:(i - 1)]) + 1 right_end = (left_end - 1) + cur_basis_num } if(cur_basis == "fourier"){ for (j in 1:ncol(df)) { converted_df[j, left_end:right_end] <- c(integral_form_fourier(df[,j], num_beta_basis = cur_basis_num, range = cur_range)) } } else{ } } # Now attaching scalar covariates if(is.null(scalar_cov)){ converted_df <- converted_df } else{ for (k in 1:nrow(converted_df)) { converted_df[k, (sum(num_basis) + 1):(sum(num_basis) + ncol(scalar_cov))] <- scalar_cov[k,] } } # Now we have the data set to pass onto the network, we can set up the data so that it is well suited to be # passed onto the network. This means normalizing things and rewriting some other things # Normalize training data train_x <- scale(converted_df) train_y <- resp # Now, we can move onto creating the model. This means taking advantage of the last three variables. We will use another # function to do this that lets us add layers easily. ######### TEST CODE ############ #hidden_layers = 4 #activations_in_layers = c("relu", "relu", "relu", "relu") #neurons_per_layer = c(64, 64, 64, 64) #output_size = 1 #loss_choice = "mse" #metric_choice = list("mean_squared_error") ################################ # Creating model build_model <- function(train_x, neurons_per_layer, activations_in_layers, hidden_layers, output_size, loss_choice, metric_choice) { # Initializing model for FNN layer model <- keras_model_sequential() %>% layer_dense(units = neurons_per_layer[1], activation = activations_in_layers[1], input_shape = dim(train_x)[2]) # Adding in additional model layers if(hidden_layers > 1){ for (i in 1:(hidden_layers - 1)) { model <- model %>% layer_dense(units = neurons_per_layer[i + 1], activation = activations_in_layers[i + 1]) } } # Setting up final layer model <- model %>% layer_dense(units = output_size) # Setting up other model parameters model %>% compile( loss = loss_choice, optimizer = optimizer_adam(lr = learn_rate, decay = decay_rate), metrics = metric_choice ) return(model) } # Now we have the model set up, we can begin to initialize the network before it is ultimately trained. This will also # print out a summary of the model thus far model <- build_model(train_x, neurons_per_layer, activations_in_layers, hidden_layers, output_size, loss_choice, metric_choice) if(print_info == T){ print(model) } # We can also display the progress of the network to make it easier to visualize using the following. This is # borrowed from the keras write up for R on the official website print_dot_callback <- callback_lambda( on_epoch_end = function(epoch, logs) { if (epoch %% 80 == 0) cat("\n") cat("x") } ) # The patience parameter is the amount of epochs to check for improvement. early_stop <- callback_early_stopping(monitor = "val_loss", patience = patience_param) # Now finally, we can fit the model if(early_stopping == T & print_info == T){ history <- model %>% fit( train_x, train_y, epochs = epochs, batch_size = batch_size, validation_split = val_split, verbose = 0, callbacks = list(early_stop, print_dot_callback) ) } else if(early_stopping == T & print_info == F) { history <- model %>% fit( train_x, train_y, epochs = epochs, validation_split = val_split, verbose = 0, callbacks = list(early_stop) ) } else if(early_stopping == F & print_info == T){ history <- model %>% fit( train_x, train_y, epochs = epochs, validation_split = val_split, verbose = 0, callbacks = list(print_dot_callback) ) } else { history <- model %>% fit( train_x, train_y, epochs = epochs, validation_split = val_split, verbose = 0, callbacks = list() ) } # Plotting the errors if(print_info == T){ print(plot(history, metrics = "mean_squared_error", smooth = FALSE) + theme_bw() + xlab("Epoch Number") + ylab("")) } # Skipping line cat("\n") # Printing out if(print_info == T){ print(history) } # Returning the model return(list(model = model, data = train_x, fnc_basis_num = num_basis, fnc_type = basis_choice, parameter_info = history$params, per_iter_info = history$metrics)) } # (2) Predict Function FNN_Predict = function(model, func_cov, scalar_cov = NULL, basis_choice, num_basis, domain_range){ ##### Helper Functions ##### # Composite approximator composite_approximator <- function(f, a, b, n) { # This function does the integral approximations and gets called in the # integral approximator function. In the integral approximator function # we pass in a function f into this and that is final output - a collection # of numbers - one for each of the functional observations # Error checking code if (is.function(f) == FALSE) { stop('The input f(x) must be a function with one parameter (variable)') } # General formula h <- (b - a)/n # Setting parameters xn <- seq.int(a, b, length.out = n + 1) xn <- xn[-1] xn <- xn[-length(xn)] # Approximating using the composite rule formula integ_approx <- (h/3)*(f(a) + 2*sum(f(xn[seq.int(2, length(xn), 2)])) + 4*sum(f(xn[seq.int(1, length(xn), 2)])) + f(b)) # Returning result return(integ_approx) } # Integration Approximation for fourier and b-spline integral_form_fourier <- function(functional_data, beta_basis = NULL, num_fd_basis = dim(func_cov)[1], num_beta_basis, range){ ######################################################################## #### Setting up x_i(s) form #### # Initializing func_basis_sin <- c() func_basis_cos <- c() # Setting up vectors for (i in 1:((num_fd_basis - 1)/2)) { func_basis_sin[i] <- paste0("sin(2*pi*x*", i, "/", range[2], ")") } for (i in 1:((num_fd_basis - 1)/2)) { func_basis_cos[i] <- paste0("cos(2*pi*x*", i, "/", range[2], ")") } # Putting together fd_basis_form <- c(1, rbind(func_basis_sin, func_basis_cos)) # Combining with functional data x_1s <- paste0(functional_data, "*", fd_basis_form, collapse = " + ") ######################################################################## #### Setting up beta_(s) #### beta_basis_sin <- c() beta_basis_cos <- c() # Setting up vectors for (i in 1:((num_beta_basis - 1)/2)) { beta_basis_sin[i] <- paste0("sin(2*pi*x*", i, "/", range[2], ")") } for (i in 1:((num_beta_basis - 1)/2)) { beta_basis_cos[i] <- paste0("cos(2*pi*x*", i, "/", range[2], ")") } # Combining with functional data beta_basis_form <- c(1, rbind(beta_basis_sin, beta_basis_cos)) ######################################################################## #### Getting approximations #### # Initializing - should be vector of size whatever integ_approximations <- c() for (i in 1:length(beta_basis_form)) { # Combining form_approximated <- paste0(beta_basis_form[i], "*(", x_1s, ")") # Passing to appropriate form final_func <- function(x){ a = eval(parse(text = form_approximated)) return(a) } # Evaluating integ_approximations[i] <- composite_approximator(final_func, range[1], range[2], 5000) } return(integ_approximations) } integral_form_bspline <- function(functional_data, beta_basis = NULL, num_fd_basis = dim(func_cov)[1], num_beta_basis){ } # First, we need to create the proper data set. This means to get the approximations and append # them together for each of the covariates. We are asking for the user to pass an array where the # third dimension is equal to K = the number of functional covariates. Each of these will contain # the coefficients as found by turning the data into a functional data object. # Initializing matrix to keep everything inside across all functional covariates ######### TEST CODE ############ #scalar_cov = NULL ################################ if(is.null(scalar_cov)){ converted_df <- data.frame(matrix(nrow = dim(func_cov)[2], ncol = sum(num_basis))) } else { converted_df <- data.frame(matrix(nrow = dim(func_cov)[2], ncol = sum(num_basis) + ncol(scalar_cov))) } ######### TEST CODE ############ #func_cov = bike_data_test #num_basis = c(3, 5) #basis_choice = c("fourier", "fourier") #domain_range = list(c(1, 24), c(1, 24)) ################################ # Looping to get approximations for (i in 1:dim(func_cov)[3]) { # Current data set df <- func_cov[,,i] # Turning into matrix if(is.vector(df) == T){ test_mat = matrix(nrow = length(df), ncol = 1) test_mat[,1] = df df = test_mat } # Current number of basis and choice of basis information cur_basis_num <- num_basis[i] cur_basis <- basis_choice[i] # Getting current range cur_range <- domain_range[[i]] # Storing previous numbers if(i == 1){ left_end = 1 right_end = cur_basis_num } else { left_end = sum(num_basis[1:(i - 1)]) + 1 right_end = (left_end - 1) + cur_basis_num } if(cur_basis == "fourier"){ for (j in 1:ncol(df)) { converted_df[j, left_end:right_end] <- c(integral_form_fourier(df[,j], num_beta_basis = cur_basis_num, range = cur_range)) } } else{ } } # Now attaching scalar covariates if(is.null(scalar_cov)){ converted_df <- converted_df } else{ for (k in 1:nrow(converted_df)) { converted_df[k, (sum(num_basis) + 1):(sum(num_basis) + ncol(scalar_cov))] <- scalar_cov[k,] } } # Now we have the data set to pass onto the network, we can set up the data so that it is well suited to be # passed onto the network. This means normalizing things and rewriting some other things # Use means and standard deviations from training set to normalize test set ######### TEST CODE ############ #model = bike_example ################################ col_means_train <- attr(model$data, "scaled:center") col_stddevs_train <- attr(model$data, "scaled:scale") test_x <- scale(converted_df, center = col_means_train, scale = col_stddevs_train) # Predicting test_predictions <- model$model %>% predict(test_x) # Returning prediction return(prediction = test_predictions[ , 1]) } final_beta_fourier <- function(x, d, range){ # Appending on 0s zero_vals = rep(0, 51 - length(d)) # creating c vector c = c(d, zero_vals) # Getting values value <- c[1] + c[2]*sin(1*2*pi*x/range[2]) + c[3]*cos(1*2*pi*x/range[2]) + c[4]*sin(2*2*pi*x/range[2]) + c[5]*cos(2*2*pi*x/range[2]) + c[6]*sin(3*2*pi*x/range[2]) + c[7]*cos(3*2*pi*x/range[2]) + c[8]*sin(4*2*pi*x/range[2]) + c[9]*cos(4*2*pi*x/range[2]) + c[10]*sin(5*2*pi*x/range[2]) + c[11]*cos(5*2*pi*x/range[2]) + c[12]*sin(6*2*pi*x/range[2]) + c[13]*cos(6*2*pi*x/range[2]) + c[14]*sin(7*2*pi*x/range[2]) + c[15]*cos(7*2*pi*x/range[2]) + c[16]*sin(8*2*pi*x/range[2]) + c[17]*cos(8*2*pi*x/range[2]) + c[18]*sin(9*2*pi*x/range[2]) + c[19]*cos(9*2*pi*x/range[2]) + c[20]*sin(10*2*pi*x/range[2]) + c[21]*cos(10*2*pi*x/range[2]) + c[22]*sin(11*2*pi*x/range[2]) + c[23]*cos(11*2*pi*x/range[2]) + c[24]*sin(12*2*pi*x/range[2]) + c[25]*cos(12*2*pi*x/range[2]) + c[26]*sin(13*2*pi*x/range[2]) + c[27]*cos(13*2*pi*x/range[2]) + c[28]*sin(14*2*pi*x/range[2]) + c[29]*cos(14*2*pi*x/range[2]) + c[30]*sin(15*2*pi*x/range[2]) + c[31]*cos(15*2*pi*x/range[2]) + c[32]*sin(16*2*pi*x/range[2]) + c[33]*cos(16*2*pi*x/range[2]) + c[34]*sin(17*2*pi*x/range[2]) + c[35]*cos(17*2*pi*x/range[2]) + c[36]*sin(18*2*pi*x/range[2]) + c[37]*cos(18*2*pi*x/range[2]) + c[38]*sin(19*2*pi*x/range[2]) + c[39]*cos(19*2*pi*x/range[2]) + c[40]*sin(20*2*pi*x/range[2]) + c[41]*cos(20*2*pi*x/range[2]) + c[42]*sin(21*2*pi*x/range[2]) + c[43]*cos(21*2*pi*x/range[2]) + c[44]*sin(22*2*pi*x/range[2]) + c[45]*cos(22*2*pi*x/range[2]) + c[46]*sin(23*2*pi*x/range[2]) + c[47]*cos(23*2*pi*x/range[2]) + c[48]*sin(24*2*pi*x/range[2]) + c[49]*cos(24*2*pi*x/range[2]) + c[50]*sin(25*2*pi*x/range[2]) + c[51]*cos(25*2*pi*x/range[2]) # Returning return(value) } # Composite approximator other composite_approximator_other <- function(f, a, b, n, x_obs, beta) { # This function does the integral approximations and gets called in the # integral approximator function. In the integral approximator function # we pass in a function f into this and that is final output - a collection # of numbers - one for each of the functional observations # Error checking code if (is.function(f) == FALSE) { stop('The input f(x) must be a function with one parameter (variable)') } # General formula h <- (b - a)/n # Setting parameters xn <- seq.int(a, b, length.out = n + 1) xn <- xn[-1] xn <- xn[-length(xn)] # Approximating using the composite rule formula integ_approx <- (h/3)*(f(a, x_obs, beta) + 2*sum(f(xn[seq.int(2, length(xn), 2)], x_obs, beta)) + 4*sum(f(xn[seq.int(1, length(xn), 2)], x_obs, beta)) + f(b, x_obs, beta)) # Returning result return(integ_approx) }
/FNN_FunctionsFile.R
no_license
caojiguo/FunClassifiers
R
false
false
32,838
r
################################# # FNNs Classification Paper # # # # Helper Functions File # # # # Anonymized # ################################# FNN_Preprocess <- function(func_cov, scalar_cov = NULL, basis_choice, num_basis, domain_range, covariate_scaling = T, raw_data = F){ #### Error Checks if(length(domain_range) != length(num_basis)){ stop("The number of domain ranges doesn't match length of num_basis") } if(length(domain_range) != length(basis_choice)){ stop("The number of domain ranges doesn't match number of basis choices") } if(length(num_basis) != length(basis_choice)){ stop("Too many/few num_basis - doesn't match number of basis choices") } # Getting check for raw vs. non raw if(raw_data == T){ dim_check = length(func_cov) } else { dim_check = dim(func_cov)[3] } if(dim_check > length(num_basis)){ # Fixing domain range domain_range_list = list() for (t in 1:dim_check) { domain_range_list[[t]] = domain_range[[1]] } # Fixing num basis num_basis = rep(num_basis, dim_check) # Fixing basis type basis_choice = rep(basis_choice, dim_check) # Final update to domain range domain_range = domain_range_list # Warning print("Warning: You only specified basis information for one functional covariate -- it will be repeated for all functional covariates") } #### Creating functional observations in the case of raw data if(raw_data == T){ # Taking in data dat = func_cov # Setting up array temp_tensor = array(dim = c(31, nrow(dat[[1]]), length(dat))) for (t in 1:length(dat)) { # Getting appropriate obs curr_func = dat[[t]] # Getting current domain curr_domain = domain_range[[t]] # Creating basis (using bspline) basis_setup = create.bspline.basis(rangeval = c(curr_domain[1], curr_domain[2]), nbasis = 31, norder = 4) # Time points time_points = seq(curr_domain[1], curr_domain[2], length.out = ncol(curr_func)) # Making functional observation temp_fd = Data2fd(time_points, t(curr_func), basis_setup) # Storing data temp_tensor[,,t] = temp_fd$coefs } # Saving as appropriate names func_cov = temp_tensor } ##### Helper Functions ##### # Composite approximator composite_approximator <- function(f, a, b, n) { # This function does the integral approximations and gets called in the # integral approximator function. In the integral approximator function # we pass in a function f into this and that is final output - a collection # of numbers - one for each of the functional observations # Error checking code if (is.function(f) == FALSE) { stop('The input f(x) must be a function with one parameter (variable)') } # General formula h <- (b - a)/n # Setting parameters xn <- seq.int(a, b, length.out = n + 1) xn <- xn[-1] xn <- xn[-length(xn)] # Approximating using the composite rule formula integ_approx <- (h/3)*(f(a) + 2*sum(f(xn[seq.int(2, length(xn), 2)])) + 4*sum(f(xn[seq.int(1, length(xn), 2)])) + f(b)) # Returning result return(integ_approx) } # Integration Approximation for fourier and b-spline integral_form_fourier <- function(functional_data, beta_basis = NULL, num_fd_basis = dim(func_cov)[1], num_beta_basis, range){ ######################################################################## #### Setting up x_i(s) form #### # Initializing func_basis_sin <- c() func_basis_cos <- c() # Setting up vectors for (i in 1:((num_fd_basis - 1)/2)) { func_basis_sin[i] <- paste0("sin(2*pi*x*", i, "/", range[2], ")") } for (i in 1:((num_fd_basis - 1)/2)) { func_basis_cos[i] <- paste0("cos(2*pi*x*", i, "/", range[2], ")") } # Putting together fd_basis_form <- c(1, rbind(func_basis_sin, func_basis_cos)) # Combining with functional data x_1s <- paste0(functional_data, "*", fd_basis_form, collapse = " + ") ######################################################################## #### Setting up beta_(s) #### beta_basis_sin <- c() beta_basis_cos <- c() # Setting up vectors for (i in 1:((num_beta_basis - 1)/2)) { beta_basis_sin[i] <- paste0("sin(2*pi*x*", i, "/", range[2], ")") } for (i in 1:((num_beta_basis - 1)/2)) { beta_basis_cos[i] <- paste0("cos(2*pi*x*", i, "/", range[2], ")") } # Combining with functional data beta_basis_form <- c(1, rbind(beta_basis_sin, beta_basis_cos)) ######################################################################## #### Getting approximations #### # Initializing - should be vector of size 11 integ_approximations <- c() for (i in 1:length(beta_basis_form)) { # Combining form_approximated <- paste0(beta_basis_form[i], "*(", x_1s, ")") # Passing to appropriate form final_func <- function(x){ a = eval(parse(text = form_approximated)) return(a) } # Evaluating integ_approximations[i] <- composite_approximator(final_func, range[1], range[2], 5000) } return(integ_approximations) } integral_form_bspline <- function(functional_data, beta_basis = NULL, num_fd_basis = dim(func_cov)[1], num_beta_basis){ } if(is.null(scalar_cov)){ converted_df <- data.frame(matrix(nrow = dim(func_cov)[2], ncol = sum(num_basis))) } else { converted_df <- data.frame(matrix(nrow = dim(func_cov)[2], ncol = sum(num_basis) + ncol(scalar_cov))) } # Looping to get approximations for (i in 1:dim(func_cov)[3]) { # Current data set df <- func_cov[,,i] # Turning into matrix if(is.vector(df) == T){ test_mat = matrix(nrow = length(df), ncol = 1) test_mat[,1] = df df = test_mat } # Current number of basis and choice of basis information cur_basis_num <- num_basis[i] cur_basis <- basis_choice[i] # Getting current range cur_range <- domain_range[[i]] # Storing previous numbers if(i == 1){ left_end = 1 right_end = cur_basis_num } else { left_end = sum(num_basis[1:(i - 1)]) + 1 right_end = (left_end - 1) + cur_basis_num } if(cur_basis == "fourier"){ for (j in 1:ncol(df)) { converted_df[j, left_end:right_end] <- c(integral_form_fourier(df[,j], num_beta_basis = cur_basis_num, range = cur_range)) } } else{ } } # Now attaching scalar covariates if(is.null(scalar_cov)){ converted_df <- converted_df } else{ for (k in 1:nrow(converted_df)) { converted_df[k, (sum(num_basis) + 1):(sum(num_basis) + ncol(scalar_cov))] <- scalar_cov[k,] } } # Normalize training data if(covariate_scaling == T){ train_x <- scale(converted_df) } else { train_x <- as.matrix(cbind(converted_df[,c(1:sum(num_basis))], scale(converted_df[,-c(1:sum(num_basis))]))) } # Returning the model return(list(data = train_x, raw_data = converted_df, fnc_basis_num = num_basis, fnc_type = basis_choice, func_obs = func_cov) ) } # (0) Packages library(keras) library(tensorflow) library(fda) library(ggplot2) library(ggpubr) library(caret) library(pbapply) # (1) FNN Function FNN <- function(resp, func_cov, scalar_cov = NULL, basis_choice, num_basis, hidden_layers, neurons_per_layer, activations_in_layers, domain_range, epochs, output_size = 1, loss_choice = "mse", metric_choice = list("mean_squared_error"), val_split = 0.2, learn_rate = 0.001, patience_param = 15, early_stopping = T, print_info = T, batch_size = 32, decay_rate = 0){ # # Required packages # library(keras) # library(tensorflow) # library(fda) # library(ggplot2) # DESCRIPTIONS OF PARAMETERS # # Resp = this is your response and will be dimension equal to the number of observations # func_cov = this will be a 3 dimensional tensor where the third dimension = K, the COLUMNS are the observations # and the ROWS are the coefficient values (so the number of rows is the choice in number of basis functions) # scalar_cov = this is a matrix where the ROWS are the observations and the columns are the corresponding # feature values # basis_choice = this will be updated more later but this should be a vector of names (e.g c("Fourier", "B-Spline")) of # dimension K (the number of functional covariates) # num_basis = this will be the number of basis used for each of the K functional covariates. So, for example, this will # be something like c(3, 7) meaning that there will be 3 basis functions defining the first functional # covariate and 7 defining the second # hiden_layers = this is a single number equal to the number of hidden layers in the network (including the first layer) # neurons_per_layer = this will be a vector of size equal to the number of hidden layers # activations_in_layers = this is just the choice of activation function for each of the layers, same dimension as the # neurons_per_layer vector above # domain_range = list of size K of two digit numbers indicating the range for each of the functional covariates # Epochs = the number of times we go through the backward and forward pass (or rather, total run throughs of the data) # Output_size = the number of outputs you want at the end -> usually this is going to be 1 for regression # loss_choice = this is a keras parameter to see the loss criterion # metric_choice = this is another keras parameter # val_split = this is another keras parameter to split the training into a validation and actual training set # patience_param = this is another keras parameter used in early stopping # learn_rate = the learning rate for the optimizer # early_stop = this is a keras paramter; decide if you want to stop the training once there is not # much improvement in the validation loss # END OF DESCRIPTIONS # ##### Helper Functions ##### # Composite approximator composite_approximator <- function(f, a, b, n) { # This function does the integral approximations and gets called in the # integral approximator function. In the integral approximator function # we pass in a function f into this and that is final output - a collection # of numbers - one for each of the functional observations # Error checking code if (is.function(f) == FALSE) { stop('The input f(x) must be a function with one parameter (variable)') } # General formula h <- (b - a)/n # Setting parameters xn <- seq.int(a, b, length.out = n + 1) xn <- xn[-1] xn <- xn[-length(xn)] # Approximating using the composite rule formula integ_approx <- (h/3)*(f(a) + 2*sum(f(xn[seq.int(2, length(xn), 2)])) + 4*sum(f(xn[seq.int(1, length(xn), 2)])) + f(b)) # Returning result return(integ_approx) } # Integration Approximation for fourier and b-spline integral_form_fourier <- function(functional_data, beta_basis = NULL, num_fd_basis = dim(func_cov)[1], num_beta_basis, range){ ######################################################################## #### Setting up x_i(s) form #### # Initializing func_basis_sin <- c() func_basis_cos <- c() # Setting up vectors for (i in 1:((num_fd_basis - 1)/2)) { func_basis_sin[i] <- paste0("sin(2*pi*x*", i, "/", range[2], ")") } for (i in 1:((num_fd_basis - 1)/2)) { func_basis_cos[i] <- paste0("cos(2*pi*x*", i, "/", range[2], ")") } # Putting together fd_basis_form <- c(1, rbind(func_basis_sin, func_basis_cos)) # Combining with functional data x_1s <- paste0(functional_data, "*", fd_basis_form, collapse = " + ") ######################################################################## #### Setting up beta_(s) #### beta_basis_sin <- c() beta_basis_cos <- c() # Setting up vectors for (i in 1:((num_beta_basis - 1)/2)) { beta_basis_sin[i] <- paste0("sin(2*pi*x*", i, "/", range[2], ")") } for (i in 1:((num_beta_basis - 1)/2)) { beta_basis_cos[i] <- paste0("cos(2*pi*x*", i, "/", range[2], ")") } # Combining with functional data beta_basis_form <- c(1, rbind(beta_basis_sin, beta_basis_cos)) ######################################################################## #### Getting approximations #### # Initializing - should be vector of size 11 integ_approximations <- c() for (i in 1:length(beta_basis_form)) { # Combining form_approximated <- paste0(beta_basis_form[i], "*(", x_1s, ")") # Passing to appropriate form final_func <- function(x){ a = eval(parse(text = form_approximated)) return(a) } # Evaluating integ_approximations[i] <- composite_approximator(final_func, range[1], range[2], 5000) } return(integ_approximations) } integral_form_bspline <- function(functional_data, beta_basis = NULL, num_fd_basis = dim(func_cov)[1], num_beta_basis){ } # First, we need to create the proper data set. This means to get the approximations and append # them together for each of the covariates. We are asking for the user to pass an array where the # third dimension is equal to K = the number of functional covariates. Each of these will contain # the coefficients as found by turning the data into a functional data object. # Initializing matrix to keep everything inside across all functional covariates ######### TEST CODE ############ #scalar_cov = NULL ################################ if(is.null(scalar_cov)){ converted_df <- data.frame(matrix(nrow = dim(func_cov)[2], ncol = sum(num_basis))) } else { converted_df <- data.frame(matrix(nrow = dim(func_cov)[2], ncol = sum(num_basis) + ncol(scalar_cov))) } ######### TEST CODE ############ #func_cov = test_data #num_basis = c(3, 5) #basis_choice = c("fourier", "fourier") #domain_range = list(c(1, 24), c(1, 24)) ################################ # Looping to get approximations for (i in 1:dim(func_cov)[3]) { # Current data set df <- func_cov[,,i] # Turning into matrix if(is.vector(df) == T){ test_mat = matrix(nrow = length(df), ncol = 1) test_mat[,1] = df df = test_mat } # Current number of basis and choice of basis information cur_basis_num <- num_basis[i] cur_basis <- basis_choice[i] # Getting current range cur_range <- domain_range[[i]] # Storing previous numbers if(i == 1){ left_end = 1 right_end = cur_basis_num } else { left_end = sum(num_basis[1:(i - 1)]) + 1 right_end = (left_end - 1) + cur_basis_num } if(cur_basis == "fourier"){ for (j in 1:ncol(df)) { converted_df[j, left_end:right_end] <- c(integral_form_fourier(df[,j], num_beta_basis = cur_basis_num, range = cur_range)) } } else{ } } # Now attaching scalar covariates if(is.null(scalar_cov)){ converted_df <- converted_df } else{ for (k in 1:nrow(converted_df)) { converted_df[k, (sum(num_basis) + 1):(sum(num_basis) + ncol(scalar_cov))] <- scalar_cov[k,] } } # Now we have the data set to pass onto the network, we can set up the data so that it is well suited to be # passed onto the network. This means normalizing things and rewriting some other things # Normalize training data train_x <- scale(converted_df) train_y <- resp # Now, we can move onto creating the model. This means taking advantage of the last three variables. We will use another # function to do this that lets us add layers easily. ######### TEST CODE ############ #hidden_layers = 4 #activations_in_layers = c("relu", "relu", "relu", "relu") #neurons_per_layer = c(64, 64, 64, 64) #output_size = 1 #loss_choice = "mse" #metric_choice = list("mean_squared_error") ################################ # Creating model build_model <- function(train_x, neurons_per_layer, activations_in_layers, hidden_layers, output_size, loss_choice, metric_choice) { # Initializing model for FNN layer model <- keras_model_sequential() %>% layer_dense(units = neurons_per_layer[1], activation = activations_in_layers[1], input_shape = dim(train_x)[2]) # Adding in additional model layers if(hidden_layers > 1){ for (i in 1:(hidden_layers - 1)) { model <- model %>% layer_dense(units = neurons_per_layer[i + 1], activation = activations_in_layers[i + 1]) } } # Setting up final layer model <- model %>% layer_dense(units = output_size) # Setting up other model parameters model %>% compile( loss = loss_choice, optimizer = optimizer_adam(lr = learn_rate, decay = decay_rate), metrics = metric_choice ) return(model) } # Now we have the model set up, we can begin to initialize the network before it is ultimately trained. This will also # print out a summary of the model thus far model <- build_model(train_x, neurons_per_layer, activations_in_layers, hidden_layers, output_size, loss_choice, metric_choice) if(print_info == T){ print(model) } # We can also display the progress of the network to make it easier to visualize using the following. This is # borrowed from the keras write up for R on the official website print_dot_callback <- callback_lambda( on_epoch_end = function(epoch, logs) { if (epoch %% 80 == 0) cat("\n") cat("x") } ) # The patience parameter is the amount of epochs to check for improvement. early_stop <- callback_early_stopping(monitor = "val_loss", patience = patience_param) # Now finally, we can fit the model if(early_stopping == T & print_info == T){ history <- model %>% fit( train_x, train_y, epochs = epochs, batch_size = batch_size, validation_split = val_split, verbose = 0, callbacks = list(early_stop, print_dot_callback) ) } else if(early_stopping == T & print_info == F) { history <- model %>% fit( train_x, train_y, epochs = epochs, validation_split = val_split, verbose = 0, callbacks = list(early_stop) ) } else if(early_stopping == F & print_info == T){ history <- model %>% fit( train_x, train_y, epochs = epochs, validation_split = val_split, verbose = 0, callbacks = list(print_dot_callback) ) } else { history <- model %>% fit( train_x, train_y, epochs = epochs, validation_split = val_split, verbose = 0, callbacks = list() ) } # Plotting the errors if(print_info == T){ print(plot(history, metrics = "mean_squared_error", smooth = FALSE) + theme_bw() + xlab("Epoch Number") + ylab("")) } # Skipping line cat("\n") # Printing out if(print_info == T){ print(history) } # Returning the model return(list(model = model, data = train_x, fnc_basis_num = num_basis, fnc_type = basis_choice, parameter_info = history$params, per_iter_info = history$metrics)) } # (2) Predict Function FNN_Predict = function(model, func_cov, scalar_cov = NULL, basis_choice, num_basis, domain_range){ ##### Helper Functions ##### # Composite approximator composite_approximator <- function(f, a, b, n) { # This function does the integral approximations and gets called in the # integral approximator function. In the integral approximator function # we pass in a function f into this and that is final output - a collection # of numbers - one for each of the functional observations # Error checking code if (is.function(f) == FALSE) { stop('The input f(x) must be a function with one parameter (variable)') } # General formula h <- (b - a)/n # Setting parameters xn <- seq.int(a, b, length.out = n + 1) xn <- xn[-1] xn <- xn[-length(xn)] # Approximating using the composite rule formula integ_approx <- (h/3)*(f(a) + 2*sum(f(xn[seq.int(2, length(xn), 2)])) + 4*sum(f(xn[seq.int(1, length(xn), 2)])) + f(b)) # Returning result return(integ_approx) } # Integration Approximation for fourier and b-spline integral_form_fourier <- function(functional_data, beta_basis = NULL, num_fd_basis = dim(func_cov)[1], num_beta_basis, range){ ######################################################################## #### Setting up x_i(s) form #### # Initializing func_basis_sin <- c() func_basis_cos <- c() # Setting up vectors for (i in 1:((num_fd_basis - 1)/2)) { func_basis_sin[i] <- paste0("sin(2*pi*x*", i, "/", range[2], ")") } for (i in 1:((num_fd_basis - 1)/2)) { func_basis_cos[i] <- paste0("cos(2*pi*x*", i, "/", range[2], ")") } # Putting together fd_basis_form <- c(1, rbind(func_basis_sin, func_basis_cos)) # Combining with functional data x_1s <- paste0(functional_data, "*", fd_basis_form, collapse = " + ") ######################################################################## #### Setting up beta_(s) #### beta_basis_sin <- c() beta_basis_cos <- c() # Setting up vectors for (i in 1:((num_beta_basis - 1)/2)) { beta_basis_sin[i] <- paste0("sin(2*pi*x*", i, "/", range[2], ")") } for (i in 1:((num_beta_basis - 1)/2)) { beta_basis_cos[i] <- paste0("cos(2*pi*x*", i, "/", range[2], ")") } # Combining with functional data beta_basis_form <- c(1, rbind(beta_basis_sin, beta_basis_cos)) ######################################################################## #### Getting approximations #### # Initializing - should be vector of size whatever integ_approximations <- c() for (i in 1:length(beta_basis_form)) { # Combining form_approximated <- paste0(beta_basis_form[i], "*(", x_1s, ")") # Passing to appropriate form final_func <- function(x){ a = eval(parse(text = form_approximated)) return(a) } # Evaluating integ_approximations[i] <- composite_approximator(final_func, range[1], range[2], 5000) } return(integ_approximations) } integral_form_bspline <- function(functional_data, beta_basis = NULL, num_fd_basis = dim(func_cov)[1], num_beta_basis){ } # First, we need to create the proper data set. This means to get the approximations and append # them together for each of the covariates. We are asking for the user to pass an array where the # third dimension is equal to K = the number of functional covariates. Each of these will contain # the coefficients as found by turning the data into a functional data object. # Initializing matrix to keep everything inside across all functional covariates ######### TEST CODE ############ #scalar_cov = NULL ################################ if(is.null(scalar_cov)){ converted_df <- data.frame(matrix(nrow = dim(func_cov)[2], ncol = sum(num_basis))) } else { converted_df <- data.frame(matrix(nrow = dim(func_cov)[2], ncol = sum(num_basis) + ncol(scalar_cov))) } ######### TEST CODE ############ #func_cov = bike_data_test #num_basis = c(3, 5) #basis_choice = c("fourier", "fourier") #domain_range = list(c(1, 24), c(1, 24)) ################################ # Looping to get approximations for (i in 1:dim(func_cov)[3]) { # Current data set df <- func_cov[,,i] # Turning into matrix if(is.vector(df) == T){ test_mat = matrix(nrow = length(df), ncol = 1) test_mat[,1] = df df = test_mat } # Current number of basis and choice of basis information cur_basis_num <- num_basis[i] cur_basis <- basis_choice[i] # Getting current range cur_range <- domain_range[[i]] # Storing previous numbers if(i == 1){ left_end = 1 right_end = cur_basis_num } else { left_end = sum(num_basis[1:(i - 1)]) + 1 right_end = (left_end - 1) + cur_basis_num } if(cur_basis == "fourier"){ for (j in 1:ncol(df)) { converted_df[j, left_end:right_end] <- c(integral_form_fourier(df[,j], num_beta_basis = cur_basis_num, range = cur_range)) } } else{ } } # Now attaching scalar covariates if(is.null(scalar_cov)){ converted_df <- converted_df } else{ for (k in 1:nrow(converted_df)) { converted_df[k, (sum(num_basis) + 1):(sum(num_basis) + ncol(scalar_cov))] <- scalar_cov[k,] } } # Now we have the data set to pass onto the network, we can set up the data so that it is well suited to be # passed onto the network. This means normalizing things and rewriting some other things # Use means and standard deviations from training set to normalize test set ######### TEST CODE ############ #model = bike_example ################################ col_means_train <- attr(model$data, "scaled:center") col_stddevs_train <- attr(model$data, "scaled:scale") test_x <- scale(converted_df, center = col_means_train, scale = col_stddevs_train) # Predicting test_predictions <- model$model %>% predict(test_x) # Returning prediction return(prediction = test_predictions[ , 1]) } final_beta_fourier <- function(x, d, range){ # Appending on 0s zero_vals = rep(0, 51 - length(d)) # creating c vector c = c(d, zero_vals) # Getting values value <- c[1] + c[2]*sin(1*2*pi*x/range[2]) + c[3]*cos(1*2*pi*x/range[2]) + c[4]*sin(2*2*pi*x/range[2]) + c[5]*cos(2*2*pi*x/range[2]) + c[6]*sin(3*2*pi*x/range[2]) + c[7]*cos(3*2*pi*x/range[2]) + c[8]*sin(4*2*pi*x/range[2]) + c[9]*cos(4*2*pi*x/range[2]) + c[10]*sin(5*2*pi*x/range[2]) + c[11]*cos(5*2*pi*x/range[2]) + c[12]*sin(6*2*pi*x/range[2]) + c[13]*cos(6*2*pi*x/range[2]) + c[14]*sin(7*2*pi*x/range[2]) + c[15]*cos(7*2*pi*x/range[2]) + c[16]*sin(8*2*pi*x/range[2]) + c[17]*cos(8*2*pi*x/range[2]) + c[18]*sin(9*2*pi*x/range[2]) + c[19]*cos(9*2*pi*x/range[2]) + c[20]*sin(10*2*pi*x/range[2]) + c[21]*cos(10*2*pi*x/range[2]) + c[22]*sin(11*2*pi*x/range[2]) + c[23]*cos(11*2*pi*x/range[2]) + c[24]*sin(12*2*pi*x/range[2]) + c[25]*cos(12*2*pi*x/range[2]) + c[26]*sin(13*2*pi*x/range[2]) + c[27]*cos(13*2*pi*x/range[2]) + c[28]*sin(14*2*pi*x/range[2]) + c[29]*cos(14*2*pi*x/range[2]) + c[30]*sin(15*2*pi*x/range[2]) + c[31]*cos(15*2*pi*x/range[2]) + c[32]*sin(16*2*pi*x/range[2]) + c[33]*cos(16*2*pi*x/range[2]) + c[34]*sin(17*2*pi*x/range[2]) + c[35]*cos(17*2*pi*x/range[2]) + c[36]*sin(18*2*pi*x/range[2]) + c[37]*cos(18*2*pi*x/range[2]) + c[38]*sin(19*2*pi*x/range[2]) + c[39]*cos(19*2*pi*x/range[2]) + c[40]*sin(20*2*pi*x/range[2]) + c[41]*cos(20*2*pi*x/range[2]) + c[42]*sin(21*2*pi*x/range[2]) + c[43]*cos(21*2*pi*x/range[2]) + c[44]*sin(22*2*pi*x/range[2]) + c[45]*cos(22*2*pi*x/range[2]) + c[46]*sin(23*2*pi*x/range[2]) + c[47]*cos(23*2*pi*x/range[2]) + c[48]*sin(24*2*pi*x/range[2]) + c[49]*cos(24*2*pi*x/range[2]) + c[50]*sin(25*2*pi*x/range[2]) + c[51]*cos(25*2*pi*x/range[2]) # Returning return(value) } # Composite approximator other composite_approximator_other <- function(f, a, b, n, x_obs, beta) { # This function does the integral approximations and gets called in the # integral approximator function. In the integral approximator function # we pass in a function f into this and that is final output - a collection # of numbers - one for each of the functional observations # Error checking code if (is.function(f) == FALSE) { stop('The input f(x) must be a function with one parameter (variable)') } # General formula h <- (b - a)/n # Setting parameters xn <- seq.int(a, b, length.out = n + 1) xn <- xn[-1] xn <- xn[-length(xn)] # Approximating using the composite rule formula integ_approx <- (h/3)*(f(a, x_obs, beta) + 2*sum(f(xn[seq.int(2, length(xn), 2)], x_obs, beta)) + 4*sum(f(xn[seq.int(1, length(xn), 2)], x_obs, beta)) + f(b, x_obs, beta)) # Returning result return(integ_approx) }
check_for_settlements_names <- function(text, rai) { sol <- c() for (j in seq_len(str_length(rai))) { exm <- str_sub(rai, start = 1, end = j) if (length(text[grep(exm, text)]) == 1) { sol <- text[grep(exm, text)] } } if (length(sol) == 0) { rai } else { sol } }
/Getting know of an address/check_for_settlements_names.R
no_license
vydevyatnikov/Graduate_work
R
false
false
413
r
check_for_settlements_names <- function(text, rai) { sol <- c() for (j in seq_len(str_length(rai))) { exm <- str_sub(rai, start = 1, end = j) if (length(text[grep(exm, text)]) == 1) { sol <- text[grep(exm, text)] } } if (length(sol) == 0) { rai } else { sol } }
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 7 c Performing E1-Autarky iteration. c Input Parameter (command line, file): c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query05_falsequ_1344n.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 10 c no.of clauses 7 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 0 c c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query05_falsequ_1344n.qdimacs 10 7 E1 [1 2 3 4 5 6 7 8 9 10] 0 0 0 0 SAT
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query05_falsequ_1344n/query05_falsequ_1344n.R
no_license
arey0pushpa/dcnf-autarky
R
false
false
657
r
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 7 c Performing E1-Autarky iteration. c Input Parameter (command line, file): c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query05_falsequ_1344n.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 10 c no.of clauses 7 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 0 c c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query05_falsequ_1344n.qdimacs 10 7 E1 [1 2 3 4 5 6 7 8 9 10] 0 0 0 0 SAT
# tests for eol_invasive fxn in taxize context("eol_invasive") gisd <- suppressWarnings(eol_invasive(name='Brassica oleracea', dataset='gisd', verbose=FALSE)) mineps <- suppressWarnings(eol_invasive(name='Ciona intestinalis', dataset='mineps', verbose=FALSE)) test_that("eol_invasive returns the correct value", { expect_that(gisd[1,1], equals('Brassica oleracea')) expect_that(gisd[1,"db"], equals("gisd")) expect_that(mineps[1,1], equals('Ciona intestinalis')) expect_that(mineps[1,"db"], equals("mineps")) }) test_that("eol_invasive returns the correct dimensions", { expect_that(dim(gisd), equals(c(1,4))) expect_that(dim(mineps), equals(c(1,4))) }) test_that("eol_invasive returns the correct class", { expect_that(gisd, is_a("data.frame")) expect_that(mineps, is_a("data.frame")) })
/tests/testthat/test-eol_invasive.R
permissive
eduardszoecs/taxize
R
false
false
803
r
# tests for eol_invasive fxn in taxize context("eol_invasive") gisd <- suppressWarnings(eol_invasive(name='Brassica oleracea', dataset='gisd', verbose=FALSE)) mineps <- suppressWarnings(eol_invasive(name='Ciona intestinalis', dataset='mineps', verbose=FALSE)) test_that("eol_invasive returns the correct value", { expect_that(gisd[1,1], equals('Brassica oleracea')) expect_that(gisd[1,"db"], equals("gisd")) expect_that(mineps[1,1], equals('Ciona intestinalis')) expect_that(mineps[1,"db"], equals("mineps")) }) test_that("eol_invasive returns the correct dimensions", { expect_that(dim(gisd), equals(c(1,4))) expect_that(dim(mineps), equals(c(1,4))) }) test_that("eol_invasive returns the correct class", { expect_that(gisd, is_a("data.frame")) expect_that(mineps, is_a("data.frame")) })
# ------------------------------------------------------------------------------ # Purpose: This script searches for the CMIP5 netcdf files, checks to see if # the files contains any information about ocean depth. File information and # dpeth status is collected into a single data frame then exported as a .csv # Output from this script will be written to the results folder as # ------------------------------------------------------------------------------ # 0. Decisions to make. # ------------------------------------------------------------------------------ # Define the directory, it should the directory of the pacakage. DIR <- "/pic/projects/GCAM/Dorheim/oceanpH/" # Define the direcotry to serach for the CMIP5 files. CMIP5_DIR <- "/pic/projects/GCAM/CMIP5-CLynch/PH_extra" # Select the CMIP5 files to search for VARIABLE <- "ph" DOMAIN <- "Omon" EXPERIMENT <- c("rcp85", "historical") ENSEMBLE <- "r1i1p1" # Define the output file name (will be saved in DIR/results/) file_name <- "CMIP5_depth.csv" # ------------------------------------------------------------------------------ # 1. Set up the environment # ------------------------------------------------------------------------------ # Load libraries library(dplyr) # Source the script that builds and loads the package. source(paste0(DIR,"exec/processing/exploratory/call_package.R")) # ------------------------------------------------------------------------------ # 2. Functional Code # ------------------------------------------------------------------------------ cmip.find_me(path = paste0(CMIP5_DIR), variable = VARIABLE, domain = DOMAIN, experiment = EXPERIMENT, ensemble = ENSEMBLE) %>% cmip.check_depth %>% write.csv(paste0(DIR,"exec/output/L1/",file_name), row.names = FALSE)
/oceanpH/exec/methods_check/exploratory/L1_get_depth_info.R
no_license
JGCRI/OA_variability
R
false
false
1,780
r
# ------------------------------------------------------------------------------ # Purpose: This script searches for the CMIP5 netcdf files, checks to see if # the files contains any information about ocean depth. File information and # dpeth status is collected into a single data frame then exported as a .csv # Output from this script will be written to the results folder as # ------------------------------------------------------------------------------ # 0. Decisions to make. # ------------------------------------------------------------------------------ # Define the directory, it should the directory of the pacakage. DIR <- "/pic/projects/GCAM/Dorheim/oceanpH/" # Define the direcotry to serach for the CMIP5 files. CMIP5_DIR <- "/pic/projects/GCAM/CMIP5-CLynch/PH_extra" # Select the CMIP5 files to search for VARIABLE <- "ph" DOMAIN <- "Omon" EXPERIMENT <- c("rcp85", "historical") ENSEMBLE <- "r1i1p1" # Define the output file name (will be saved in DIR/results/) file_name <- "CMIP5_depth.csv" # ------------------------------------------------------------------------------ # 1. Set up the environment # ------------------------------------------------------------------------------ # Load libraries library(dplyr) # Source the script that builds and loads the package. source(paste0(DIR,"exec/processing/exploratory/call_package.R")) # ------------------------------------------------------------------------------ # 2. Functional Code # ------------------------------------------------------------------------------ cmip.find_me(path = paste0(CMIP5_DIR), variable = VARIABLE, domain = DOMAIN, experiment = EXPERIMENT, ensemble = ENSEMBLE) %>% cmip.check_depth %>% write.csv(paste0(DIR,"exec/output/L1/",file_name), row.names = FALSE)
## ========================================================================== ## Versions: version string information ## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ## methods and class defined together so available for prototypes elsewhere .Versions <- setClass("Versions", contains="list") setMethod("initialize", signature(.Object="Versions"), function(.Object, ...) callNextMethod(.Object, .asValidVersions(list(...)))) .isValidVersion <- function(versions) { tryCatch(all(as.integer(versions)==versions) && all(versions >= 0) && length(versions) > 1, error=function(err) FALSE, warning=function(warn) {}) } .asValidVersions <- function(versions) { res <- list() for (i in seq(along=versions)) res[i] <- if (!is.character(versions[[i]]) && .isValidVersion(versions[[i]])) versions[i] else unclass(numeric_version(versions[[i]])) names(res) <- names(versions) res } ## update setMethod("updateObject", signature(object="Versions"), function(object, ..., verbose=FALSE) { if (verbose) message("updateObject(object = 'Versions')") if (!isS4(object)) do.call(new, c("Versions", object)) else object }) ## access setMethod("[", signature=signature(x="Versions"), function(x, i, j, ..., drop = FALSE) { ## 'dispatch' on i to avoid S4 complaint about j 'missing' if (is(i, "character") && !all(i %in% names(x))) { bad <- unique(i[!i %in% names(x)]) cl <- deparse(match.call()[[2]]) stop(sprintf("'[' indices '%s' not found in '%s'", paste(bad, collapse="', '"), cl)) } as(callNextMethod(), "Versions") }) ## assign setReplaceMethod("[", signature(x="Versions"), function(x, i, j, value) { lst <- x@.Data names(lst) <- names(x) lst[i] <- .asValidVersions(value) x@.Data <- lst names(x) <- names(lst) x }) setReplaceMethod("[[", signature(x="Versions"), function(x, i, j, value) { lst <- x@.Data names(lst) <- names(x) lst[[i]] <- unlist(.asValidVersions(value)) x@.Data <- lst names(x) <- names(lst) x }) setReplaceMethod("$", signature(x="Versions"), function(x, name, value) { x[[name]] <- value; x }) ## Compare .as.numeric_version <- function(x) numeric_version(as(x, "character")) .canVersionCompare <- function(e1, e2) { if (length(e1) != length(e2)) stop(sprintf("cannot compare versions with length '%d', '%d'", length(e1), length(e2))) if (length(e1)>1 && (!all(names(e1) %in% names(e2)) || !all(names(e2) %in% names(e1)))) stop("cannot compare versions with different names") TRUE } setMethod("Compare", signature=signature( e1="Versions", e2="Versions"), function(e1, e2) { .canVersionCompare(e1, e2) e2 <- e2[names(e1)] e1 <- .as.numeric_version(e1) e2 <- .as.numeric_version(e2) callNextMethod(e1, e2) }) setMethod("Compare", signature=signature( e1="Versions", e2="character"), function(e1, e2) { .canVersionCompare(e1, e2) if (length(e2) > 1) e2 <- e2[names(e1)] e1 <- .as.numeric_version(e1) e2 <- numeric_version(e2) callNextMethod(e1, e2) }) setMethod("Compare", signature=signature( e1="character", e2="Versions"), function(e1, e2) callGeneric(e2,e1)) ## show setAs("Versions", "character", function(from) { if (length(from)) sapply(from, paste, collapse=".") else "Versioned; no version string" }) setMethod("show", signature(object="Versions"), function(object) print(as(object, "character")))
/R/VersionsClass.R
no_license
mukhyala/Biobase
R
false
false
4,443
r
## ========================================================================== ## Versions: version string information ## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ## methods and class defined together so available for prototypes elsewhere .Versions <- setClass("Versions", contains="list") setMethod("initialize", signature(.Object="Versions"), function(.Object, ...) callNextMethod(.Object, .asValidVersions(list(...)))) .isValidVersion <- function(versions) { tryCatch(all(as.integer(versions)==versions) && all(versions >= 0) && length(versions) > 1, error=function(err) FALSE, warning=function(warn) {}) } .asValidVersions <- function(versions) { res <- list() for (i in seq(along=versions)) res[i] <- if (!is.character(versions[[i]]) && .isValidVersion(versions[[i]])) versions[i] else unclass(numeric_version(versions[[i]])) names(res) <- names(versions) res } ## update setMethod("updateObject", signature(object="Versions"), function(object, ..., verbose=FALSE) { if (verbose) message("updateObject(object = 'Versions')") if (!isS4(object)) do.call(new, c("Versions", object)) else object }) ## access setMethod("[", signature=signature(x="Versions"), function(x, i, j, ..., drop = FALSE) { ## 'dispatch' on i to avoid S4 complaint about j 'missing' if (is(i, "character") && !all(i %in% names(x))) { bad <- unique(i[!i %in% names(x)]) cl <- deparse(match.call()[[2]]) stop(sprintf("'[' indices '%s' not found in '%s'", paste(bad, collapse="', '"), cl)) } as(callNextMethod(), "Versions") }) ## assign setReplaceMethod("[", signature(x="Versions"), function(x, i, j, value) { lst <- x@.Data names(lst) <- names(x) lst[i] <- .asValidVersions(value) x@.Data <- lst names(x) <- names(lst) x }) setReplaceMethod("[[", signature(x="Versions"), function(x, i, j, value) { lst <- x@.Data names(lst) <- names(x) lst[[i]] <- unlist(.asValidVersions(value)) x@.Data <- lst names(x) <- names(lst) x }) setReplaceMethod("$", signature(x="Versions"), function(x, name, value) { x[[name]] <- value; x }) ## Compare .as.numeric_version <- function(x) numeric_version(as(x, "character")) .canVersionCompare <- function(e1, e2) { if (length(e1) != length(e2)) stop(sprintf("cannot compare versions with length '%d', '%d'", length(e1), length(e2))) if (length(e1)>1 && (!all(names(e1) %in% names(e2)) || !all(names(e2) %in% names(e1)))) stop("cannot compare versions with different names") TRUE } setMethod("Compare", signature=signature( e1="Versions", e2="Versions"), function(e1, e2) { .canVersionCompare(e1, e2) e2 <- e2[names(e1)] e1 <- .as.numeric_version(e1) e2 <- .as.numeric_version(e2) callNextMethod(e1, e2) }) setMethod("Compare", signature=signature( e1="Versions", e2="character"), function(e1, e2) { .canVersionCompare(e1, e2) if (length(e2) > 1) e2 <- e2[names(e1)] e1 <- .as.numeric_version(e1) e2 <- numeric_version(e2) callNextMethod(e1, e2) }) setMethod("Compare", signature=signature( e1="character", e2="Versions"), function(e1, e2) callGeneric(e2,e1)) ## show setAs("Versions", "character", function(from) { if (length(from)) sapply(from, paste, collapse=".") else "Versioned; no version string" }) setMethod("show", signature(object="Versions"), function(object) print(as(object, "character")))
# # TTR: Technical Trading Rules # # Copyright (C) 2007-2013 Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # #'Moving Averages #' #'Calculate various moving averages (MA) of a series. #' #'\code{SMA} calculates the arithmetic mean of the series over the past #'\code{n} observations. #' #'\code{EMA} calculates an exponentially-weighted mean, giving more weight to #'recent observations. See Warning section below. #' #'\code{WMA} is similar to an EMA, but with linear weighting if the length of #'\code{wts} is equal to \code{n}. If the length of \code{wts} is equal to the #'length of \code{x}, the WMA will use the values of \code{wts} as weights. #' #'\code{DEMA} is calculated as: \code{DEMA = (1 + v) * EMA(x,n) - #'EMA(EMA(x,n),n) * v} (with the corresponding \code{wilder} and \code{ratio} #'arguments). #' #'\code{EVWMA} uses volume to define the period of the MA. #' #'\code{ZLEMA} is similar to an EMA, as it gives more weight to recent #'observations, but attempts to remove lag by subtracting data prior to #'\code{(n-1)/2} periods (default) to minimize the cumulative effect. #' #'\code{VWMA} and \code{VWAP} calculate the volume-weighted moving average #'price. #' #'\code{VMA} calculate a variable-length moving average based on the absolute #'value of \code{w}. Higher (lower) values of \code{w} will cause \code{VMA} #'to react faster (slower). #' #'\code{HMA} a WMA of the difference of two other WMAs, making it very #'reponsive. #' #'\code{ALMA} inspired by Gaussian filters. Tends to put less weight on most #'recent observations, reducing tendency to overshoot. #' #'@aliases MovingAverages SMA EMA WMA DEMA GD T3 EVWMA ZLEMA VWAP VWMA VMA MA #'@param x Price, volume, etc. series that is coercible to xts or matrix. #'@param price Price series that is coercible to xts or matrix. #'@param volume Volume series that is coercible to xts or matrix, that #'corresponds to price series, or a constant. See Notes. #'@param n Number of periods to average over. #'@param v The 'volume factor' (a number in [0,1]). See Notes. #'@param w Vector of weights (in [0,1]) the same length as \code{x}. #'@param wts Vector of weights. Length of \code{wts} vector must equal the #'length of \code{x}, or \code{n} (the default). #'@param wilder logical; if \code{TRUE}, a Welles Wilder type EMA will be #'calculated; see notes. #'@param ratio A smoothing/decay ratio. \code{ratio} overrides \code{wilder} #'in \code{EMA}, and provides additional smoothing in \code{VMA}. #'@param offset Percentile at which the center of the distribution should occur. #'@param sigma Standard deviation of the distribution. #'@param \dots any other passthrough parameters #'@return A object of the same class as \code{x} or \code{price} or a vector #'(if \code{try.xts} fails) containing the columns: #' \describe{ #' \item{SMA}{ Simple moving average. } #' \item{EMA}{ Exponential moving average. } #' \item{WMA}{ Weighted moving average. } #' \item{DEMA}{ Double-exponential moving average. } #' \item{EVWMA}{ Elastic, volume-weighted moving average. } #' \item{ZLEMA}{ Zero lag exponential moving average. } #' \item{VWMA}{ Volume-weighed moving average (same as \code{VWAP}). } #' \item{VWAP}{ Volume-weighed average price (same as \code{VWMA}). } #' \item{VWA}{ Variable-length moving average. } #' \item{HMA}{ Hull moving average. } #' \item{ALMA}{ Arnaud Legoux moving average. } #' } #'@note For \code{EMA}, \code{wilder=FALSE} (the default) uses an exponential #'smoothing ratio of \code{2/(n+1)}, while \code{wilder=TRUE} uses Welles #'Wilder's exponential smoothing ratio of \code{1/n}. #' #'Since \code{WMA} can accept a weight vector of length equal to the length of #'\code{x} or of length \code{n}, it can be used as a regular weighted moving #'average (in the case \code{wts=1:n}) or as a moving average weighted by #'volume, another indicator, etc. #' #'Since \code{DEMA} allows adjusting \code{v}, it is technically Tim Tillson's #'generalized DEMA (GD). When \code{v=1} (the default), the result is the #'standard DEMA. When \code{v=0}, the result is a regular EMA. All other #'values of \code{v} return the GD result. This function can be used to #'calculate Tillson's T3 indicator (see example below). Thanks to John Gavin #'for suggesting the generalization. #' #'For \code{EVWMA}, if \code{volume} is a series, \code{n} should be chosen so #'the sum of the volume for \code{n} periods approximates the total number of #'outstanding shares for the security being averaged. If \code{volume} is a #'constant, it should represent the total number of outstanding shares for the #'security being averaged. #'@section Warning : Some indicators (e.g. EMA, DEMA, EVWMA, etc.) are #'calculated using the indicators' own previous values, and are therefore #'unstable in the short-term. As the indicator receives more data, its output #'becomes more stable. See example below. #'@author Joshua Ulrich, Ivan Popivanov (HMA, ALMA) #'@seealso See \code{\link{wilderSum}}, which is used in calculating a Welles #'Wilder type MA. #'@references The following site(s) were used to code/document this #'indicator:\cr \url{http://www.fmlabs.com/reference/ExpMA.htm}\cr #'\url{http://www.fmlabs.com/reference/WeightedMA.htm}\cr #'\url{http://www.fmlabs.com/reference/DEMA.htm}\cr #'\url{http://www.fmlabs.com/reference/T3.htm}\cr #'\url{http://linnsoft.com/tour/techind/evwma.htm}\cr #'\url{http://www.fmlabs.com/reference/ZeroLagExpMA.htm}\cr #'\url{http://www.fmlabs.com/reference/VIDYA.htm}\cr #'\url{http://www.traderslog.com/hullmovingaverage}\cr #'\url{http://www.arnaudlegoux.com/}\cr #'@keywords ts #'@examples #' #' data(ttrc) #' ema.20 <- EMA(ttrc[,"Close"], 20) #' sma.20 <- SMA(ttrc[,"Close"], 20) #' dema.20 <- DEMA(ttrc[,"Close"], 20) #' evwma.20 <- EVWMA(ttrc[,"Close"], ttrc[,"Volume"], 20) #' zlema.20 <- ZLEMA(ttrc[,"Close"], 20) #' alma <- ALMA(ttrc[,"Close"]) #' hma <- HMA(ttrc[,"Close"]) #' #' ## Example of Tim Tillson's T3 indicator #' T3 <- function(x, n=10, v=1) DEMA(DEMA(DEMA(x,n,v),n,v),n,v) #' t3 <- T3(ttrc[,"Close"]) #' #' ## Example of short-term instability of EMA #' ## (and other indicators mentioned above) #' x <- rnorm(100) #' tail( EMA(x[90:100],10), 1 ) #' tail( EMA(x[70:100],10), 1 ) #' tail( EMA(x[50:100],10), 1 ) #' tail( EMA(x[30:100],10), 1 ) #' tail( EMA(x[10:100],10), 1 ) #' tail( EMA(x[ 1:100],10), 1 ) #' #'@rdname MovingAverages #'@export "SMA" <- function(x, n=10, ...) { # Simple Moving Average ma <- runMean( x, n ) if(!is.null(dim(ma))) { colnames(ma) <- "SMA" } return(ma) } #-------------------------------------------------------------------------# #'@rdname MovingAverages #'@export "EMA" <- function (x, n=10, wilder=FALSE, ratio=NULL, ...) { # Exponential Moving Average x <- try.xts(x, error=as.matrix) if( n < 1 || n > NROW(x) ) stop("Invalid 'n'") if( any(nNonNA <- n > colSums(!is.na(x))) ) stop("n > number of non-NA values in column(s) ", paste(which(nNonNA), collapse=", ")) # Check for non-leading NAs # Leading NAs are handled in the C code x.na <- xts:::naCheck(x, n) # If ratio is specified, and n is not, set n to approx 'correct' # value backed out from ratio if(missing(n) && !missing(ratio)) n <- trunc(2/ratio - 1) # Determine decay ratio if(is.null(ratio)) { if(wilder) ratio <- 1/n else ratio <- 2/(n+1) } # Call C routine ma <- .Call("ema", x, n, ratio, PACKAGE = "TTR") ma <- reclass(ma,x) if(!is.null(dim(ma))) { colnames(ma) <- "EMA" } return(ma) } #-------------------------------------------------------------------------# #'@rdname MovingAverages #'@export "DEMA" <- function(x, n=10, v=1, wilder=FALSE, ratio=NULL) { # Double Exponential Moving Average # Thanks to John Gavin for the v-factor generalization if(v < 0 || v > 1) { stop("Please ensure 0 <= v <= 1") } dema <- (1 + v) * EMA(x,n,wilder,ratio) - EMA(EMA(x,n,wilder,ratio),n,wilder,ratio) * v if(!is.null(dim(dema))) { colnames(dema) <- "DEMA" } return(dema) } #-------------------------------------------------------------------------# #'@rdname MovingAverages #'@export "WMA" <- function(x, n=10, wts=1:n, ...) { # Weighted Moving Average x <- try.xts(x, error=as.matrix) wts <- try.xts(wts, error=as.matrix) if( !any( NROW(wts) == c( NROW(x), n ) ) ) stop("Length of 'wts' must equal the length of 'x' or 'n'") if( n < 1 || n > NROW(x) ) stop("Invalid 'n'") # Count NAs, ensure they're only at beginning of data, then remove. NAx <- sum( is.na(x) ) NAw <- sum( is.na(wts) ) NAs <- max( NAx, NAw ) if( NAs > 0 ) { if( any( is.na( x[-(1:NAx)]) ) ) stop("'x' contains non-leading NAs") if( any( is.na(wts[-(1:NAw)]) ) ) stop("'wts' contains non-leading NAs") } if( NROW(wts) == n ) { x <- na.omit(x) NAs <- NAx if( any(is.na(wts)) ) stop("'wts' vector of length 'n' cannot have NA values") # Call Fortran routine ma <- .Fortran( "wma", ia = as.double(x), lia = as.integer(NROW(x)), wts = as.double(wts), n = as.integer(n), oa = as.double(x), loa = as.integer(NROW(x)), PACKAGE = "TTR", DUP = TRUE )$oa } else { xw <- na.omit( cbind(x, wts) ) ma <- runSum( xw[,1]*xw[,2], n) / runSum(xw[,2], n) } # replace 1:(n-1) with NAs and prepend NAs from original data ma[1:(n-1)] <- NA ma <- c( rep( NA, NAs ), ma ) if(!is.null(dim(ma))) { colnames(ma) <- "WMA" } reclass(ma,x) } #-------------------------------------------------------------------------# #'@rdname MovingAverages #'@export "EVWMA" <- function(price, volume, n=10, ...) { # Elastic, Volume-Weighted Moving Average price <- try.xts(price, error=as.matrix) volume <- try.xts(volume, error=as.matrix) if( !any( NROW(volume) == c( NROW(price), 1 ) ) ) stop("Length of 'volume' must equal 1 or the length of 'price'") if( n < 1 || n > NROW(price) ) stop("Invalid 'n'") pv <- cbind(price, volume) if( any(nNonNA <- n > colSums(!is.na(pv))) ) stop("n > number of non-NA values in ", paste(c("price","volume")[which(nNonNA)], collapse=", ")) # Check for non-leading NAs # Leading NAs are handled in the C code pv.na <- xts:::naCheck(pv, n) # Call C routine ma <- .Call("evwma", pv[,1], pv[,2], n, PACKAGE = "TTR") if(!is.null(dim(ma))) { colnames(ma) <- "EVWMA" } # Convert back to original class reclass(ma, price) } #-------------------------------------------------------------------------# #'@rdname MovingAverages #'@export "ZLEMA" <- function (x, n=10, ratio=NULL, ...) { # Zero-Lag Exponential Moving Average x <- try.xts(x, error=as.matrix) # Count NAs, ensure they're only at beginning of data, then remove. NAs <- sum( is.na(x) ) if( NAs > 0 ) { if( any( is.na(x[-(1:NAs)]) ) ) stop("Series contains non-leading NAs") } x <- na.omit(x) # Initialize ma vector ma <- rep(1, NROW(x)) ma[n] <- mean(x[1:n]) # Determine decay ratio if(is.null(ratio)) { ratio <- 2/(n+1) } # Call Fortran routine ma <- .Fortran( "zlema", ia = as.double(x), lia = as.integer(NROW(x)), n = as.integer(n), oa = as.double(ma), loa = as.integer(NROW(ma)), ratio = as.double(ratio), PACKAGE = "TTR", DUP = TRUE )$oa # replace 1:(n-1) with NAs and prepend NAs from original data ma[1:(n-1)] <- NA ma <- c( rep( NA, NAs ), ma ) if(!is.null(dim(ma))) { colnames(ma) <- "ZLEMA" } reclass(ma,x) } #-------------------------------------------------------------------------# #'@rdname MovingAverages #'@export VWAP VWMA "VWAP" <- "VWMA" <- function(price, volume, n=10, ...) { # Volume-weighted average price # Volume-weighted moving average res <- WMA(price, n=n, volume) if(!is.null(dim(res))) { colnames(res) <- "VWAP" } return(res) } #-------------------------------------------------------------------------# #'@rdname MovingAverages #'@export "VMA" <- function (x, w, ratio=1, ...) { # Variable Moving Average x <- try.xts(x, error=as.matrix) w <- try.xts(w, error=as.matrix) if( NROW(w) != NROW(x) ) stop("Length of 'w' must equal the length of 'x'") # Check for non-leading NAs # Leading NAs are handled in the C code x.na <- xts:::naCheck(x, 1) w.na <- xts:::naCheck(w, 1) # Call C routine ma <- .Call("vma", x, abs(w), ratio, PACKAGE = "TTR") if(!is.null(dim(ma))) { colnames(ma) <- "VMA" } reclass(ma,x) } #-------------------------------------------------------------------------# #'@rdname MovingAverages #'@export "HMA" <- function(x, n=20, ...) { # Hull Moving Average reclass(WMA(2*WMA(x, n=n/2, ...) - WMA(x, n=n, ...), n=trunc(sqrt(n)), ...), x) } #-------------------------------------------------------------------------# #'@rdname MovingAverages #'@export "ALMA" <- function(x, n=9, offset=0.85, sigma=6, ...) { # ALMA (Arnaud Legoux Moving Average) if(offset < 0 || offset > 1) { stop("Please ensure 0 <= offset <= 1") } if(sigma <= 0) stop("sigma must be > 0") m <- floor(offset*(n-1)) s <- n/sigma wts <- exp(-((seq(0,n-1)-m)^2)/(2*s*s)) sumWeights <- sum(wts) if(sumWeights != 0) wts <- wts/sumWeights alma <- rollapply(x, width=n, FUN=function(xx) sum(xx*wts), align="right") reclass(alma, x) }
/R/MovingAverages.R
no_license
dshen1/TTR
R
false
false
14,396
r
# # TTR: Technical Trading Rules # # Copyright (C) 2007-2013 Joshua M. Ulrich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # #'Moving Averages #' #'Calculate various moving averages (MA) of a series. #' #'\code{SMA} calculates the arithmetic mean of the series over the past #'\code{n} observations. #' #'\code{EMA} calculates an exponentially-weighted mean, giving more weight to #'recent observations. See Warning section below. #' #'\code{WMA} is similar to an EMA, but with linear weighting if the length of #'\code{wts} is equal to \code{n}. If the length of \code{wts} is equal to the #'length of \code{x}, the WMA will use the values of \code{wts} as weights. #' #'\code{DEMA} is calculated as: \code{DEMA = (1 + v) * EMA(x,n) - #'EMA(EMA(x,n),n) * v} (with the corresponding \code{wilder} and \code{ratio} #'arguments). #' #'\code{EVWMA} uses volume to define the period of the MA. #' #'\code{ZLEMA} is similar to an EMA, as it gives more weight to recent #'observations, but attempts to remove lag by subtracting data prior to #'\code{(n-1)/2} periods (default) to minimize the cumulative effect. #' #'\code{VWMA} and \code{VWAP} calculate the volume-weighted moving average #'price. #' #'\code{VMA} calculate a variable-length moving average based on the absolute #'value of \code{w}. Higher (lower) values of \code{w} will cause \code{VMA} #'to react faster (slower). #' #'\code{HMA} a WMA of the difference of two other WMAs, making it very #'reponsive. #' #'\code{ALMA} inspired by Gaussian filters. Tends to put less weight on most #'recent observations, reducing tendency to overshoot. #' #'@aliases MovingAverages SMA EMA WMA DEMA GD T3 EVWMA ZLEMA VWAP VWMA VMA MA #'@param x Price, volume, etc. series that is coercible to xts or matrix. #'@param price Price series that is coercible to xts or matrix. #'@param volume Volume series that is coercible to xts or matrix, that #'corresponds to price series, or a constant. See Notes. #'@param n Number of periods to average over. #'@param v The 'volume factor' (a number in [0,1]). See Notes. #'@param w Vector of weights (in [0,1]) the same length as \code{x}. #'@param wts Vector of weights. Length of \code{wts} vector must equal the #'length of \code{x}, or \code{n} (the default). #'@param wilder logical; if \code{TRUE}, a Welles Wilder type EMA will be #'calculated; see notes. #'@param ratio A smoothing/decay ratio. \code{ratio} overrides \code{wilder} #'in \code{EMA}, and provides additional smoothing in \code{VMA}. #'@param offset Percentile at which the center of the distribution should occur. #'@param sigma Standard deviation of the distribution. #'@param \dots any other passthrough parameters #'@return A object of the same class as \code{x} or \code{price} or a vector #'(if \code{try.xts} fails) containing the columns: #' \describe{ #' \item{SMA}{ Simple moving average. } #' \item{EMA}{ Exponential moving average. } #' \item{WMA}{ Weighted moving average. } #' \item{DEMA}{ Double-exponential moving average. } #' \item{EVWMA}{ Elastic, volume-weighted moving average. } #' \item{ZLEMA}{ Zero lag exponential moving average. } #' \item{VWMA}{ Volume-weighed moving average (same as \code{VWAP}). } #' \item{VWAP}{ Volume-weighed average price (same as \code{VWMA}). } #' \item{VWA}{ Variable-length moving average. } #' \item{HMA}{ Hull moving average. } #' \item{ALMA}{ Arnaud Legoux moving average. } #' } #'@note For \code{EMA}, \code{wilder=FALSE} (the default) uses an exponential #'smoothing ratio of \code{2/(n+1)}, while \code{wilder=TRUE} uses Welles #'Wilder's exponential smoothing ratio of \code{1/n}. #' #'Since \code{WMA} can accept a weight vector of length equal to the length of #'\code{x} or of length \code{n}, it can be used as a regular weighted moving #'average (in the case \code{wts=1:n}) or as a moving average weighted by #'volume, another indicator, etc. #' #'Since \code{DEMA} allows adjusting \code{v}, it is technically Tim Tillson's #'generalized DEMA (GD). When \code{v=1} (the default), the result is the #'standard DEMA. When \code{v=0}, the result is a regular EMA. All other #'values of \code{v} return the GD result. This function can be used to #'calculate Tillson's T3 indicator (see example below). Thanks to John Gavin #'for suggesting the generalization. #' #'For \code{EVWMA}, if \code{volume} is a series, \code{n} should be chosen so #'the sum of the volume for \code{n} periods approximates the total number of #'outstanding shares for the security being averaged. If \code{volume} is a #'constant, it should represent the total number of outstanding shares for the #'security being averaged. #'@section Warning : Some indicators (e.g. EMA, DEMA, EVWMA, etc.) are #'calculated using the indicators' own previous values, and are therefore #'unstable in the short-term. As the indicator receives more data, its output #'becomes more stable. See example below. #'@author Joshua Ulrich, Ivan Popivanov (HMA, ALMA) #'@seealso See \code{\link{wilderSum}}, which is used in calculating a Welles #'Wilder type MA. #'@references The following site(s) were used to code/document this #'indicator:\cr \url{http://www.fmlabs.com/reference/ExpMA.htm}\cr #'\url{http://www.fmlabs.com/reference/WeightedMA.htm}\cr #'\url{http://www.fmlabs.com/reference/DEMA.htm}\cr #'\url{http://www.fmlabs.com/reference/T3.htm}\cr #'\url{http://linnsoft.com/tour/techind/evwma.htm}\cr #'\url{http://www.fmlabs.com/reference/ZeroLagExpMA.htm}\cr #'\url{http://www.fmlabs.com/reference/VIDYA.htm}\cr #'\url{http://www.traderslog.com/hullmovingaverage}\cr #'\url{http://www.arnaudlegoux.com/}\cr #'@keywords ts #'@examples #' #' data(ttrc) #' ema.20 <- EMA(ttrc[,"Close"], 20) #' sma.20 <- SMA(ttrc[,"Close"], 20) #' dema.20 <- DEMA(ttrc[,"Close"], 20) #' evwma.20 <- EVWMA(ttrc[,"Close"], ttrc[,"Volume"], 20) #' zlema.20 <- ZLEMA(ttrc[,"Close"], 20) #' alma <- ALMA(ttrc[,"Close"]) #' hma <- HMA(ttrc[,"Close"]) #' #' ## Example of Tim Tillson's T3 indicator #' T3 <- function(x, n=10, v=1) DEMA(DEMA(DEMA(x,n,v),n,v),n,v) #' t3 <- T3(ttrc[,"Close"]) #' #' ## Example of short-term instability of EMA #' ## (and other indicators mentioned above) #' x <- rnorm(100) #' tail( EMA(x[90:100],10), 1 ) #' tail( EMA(x[70:100],10), 1 ) #' tail( EMA(x[50:100],10), 1 ) #' tail( EMA(x[30:100],10), 1 ) #' tail( EMA(x[10:100],10), 1 ) #' tail( EMA(x[ 1:100],10), 1 ) #' #'@rdname MovingAverages #'@export "SMA" <- function(x, n=10, ...) { # Simple Moving Average ma <- runMean( x, n ) if(!is.null(dim(ma))) { colnames(ma) <- "SMA" } return(ma) } #-------------------------------------------------------------------------# #'@rdname MovingAverages #'@export "EMA" <- function (x, n=10, wilder=FALSE, ratio=NULL, ...) { # Exponential Moving Average x <- try.xts(x, error=as.matrix) if( n < 1 || n > NROW(x) ) stop("Invalid 'n'") if( any(nNonNA <- n > colSums(!is.na(x))) ) stop("n > number of non-NA values in column(s) ", paste(which(nNonNA), collapse=", ")) # Check for non-leading NAs # Leading NAs are handled in the C code x.na <- xts:::naCheck(x, n) # If ratio is specified, and n is not, set n to approx 'correct' # value backed out from ratio if(missing(n) && !missing(ratio)) n <- trunc(2/ratio - 1) # Determine decay ratio if(is.null(ratio)) { if(wilder) ratio <- 1/n else ratio <- 2/(n+1) } # Call C routine ma <- .Call("ema", x, n, ratio, PACKAGE = "TTR") ma <- reclass(ma,x) if(!is.null(dim(ma))) { colnames(ma) <- "EMA" } return(ma) } #-------------------------------------------------------------------------# #'@rdname MovingAverages #'@export "DEMA" <- function(x, n=10, v=1, wilder=FALSE, ratio=NULL) { # Double Exponential Moving Average # Thanks to John Gavin for the v-factor generalization if(v < 0 || v > 1) { stop("Please ensure 0 <= v <= 1") } dema <- (1 + v) * EMA(x,n,wilder,ratio) - EMA(EMA(x,n,wilder,ratio),n,wilder,ratio) * v if(!is.null(dim(dema))) { colnames(dema) <- "DEMA" } return(dema) } #-------------------------------------------------------------------------# #'@rdname MovingAverages #'@export "WMA" <- function(x, n=10, wts=1:n, ...) { # Weighted Moving Average x <- try.xts(x, error=as.matrix) wts <- try.xts(wts, error=as.matrix) if( !any( NROW(wts) == c( NROW(x), n ) ) ) stop("Length of 'wts' must equal the length of 'x' or 'n'") if( n < 1 || n > NROW(x) ) stop("Invalid 'n'") # Count NAs, ensure they're only at beginning of data, then remove. NAx <- sum( is.na(x) ) NAw <- sum( is.na(wts) ) NAs <- max( NAx, NAw ) if( NAs > 0 ) { if( any( is.na( x[-(1:NAx)]) ) ) stop("'x' contains non-leading NAs") if( any( is.na(wts[-(1:NAw)]) ) ) stop("'wts' contains non-leading NAs") } if( NROW(wts) == n ) { x <- na.omit(x) NAs <- NAx if( any(is.na(wts)) ) stop("'wts' vector of length 'n' cannot have NA values") # Call Fortran routine ma <- .Fortran( "wma", ia = as.double(x), lia = as.integer(NROW(x)), wts = as.double(wts), n = as.integer(n), oa = as.double(x), loa = as.integer(NROW(x)), PACKAGE = "TTR", DUP = TRUE )$oa } else { xw <- na.omit( cbind(x, wts) ) ma <- runSum( xw[,1]*xw[,2], n) / runSum(xw[,2], n) } # replace 1:(n-1) with NAs and prepend NAs from original data ma[1:(n-1)] <- NA ma <- c( rep( NA, NAs ), ma ) if(!is.null(dim(ma))) { colnames(ma) <- "WMA" } reclass(ma,x) } #-------------------------------------------------------------------------# #'@rdname MovingAverages #'@export "EVWMA" <- function(price, volume, n=10, ...) { # Elastic, Volume-Weighted Moving Average price <- try.xts(price, error=as.matrix) volume <- try.xts(volume, error=as.matrix) if( !any( NROW(volume) == c( NROW(price), 1 ) ) ) stop("Length of 'volume' must equal 1 or the length of 'price'") if( n < 1 || n > NROW(price) ) stop("Invalid 'n'") pv <- cbind(price, volume) if( any(nNonNA <- n > colSums(!is.na(pv))) ) stop("n > number of non-NA values in ", paste(c("price","volume")[which(nNonNA)], collapse=", ")) # Check for non-leading NAs # Leading NAs are handled in the C code pv.na <- xts:::naCheck(pv, n) # Call C routine ma <- .Call("evwma", pv[,1], pv[,2], n, PACKAGE = "TTR") if(!is.null(dim(ma))) { colnames(ma) <- "EVWMA" } # Convert back to original class reclass(ma, price) } #-------------------------------------------------------------------------# #'@rdname MovingAverages #'@export "ZLEMA" <- function (x, n=10, ratio=NULL, ...) { # Zero-Lag Exponential Moving Average x <- try.xts(x, error=as.matrix) # Count NAs, ensure they're only at beginning of data, then remove. NAs <- sum( is.na(x) ) if( NAs > 0 ) { if( any( is.na(x[-(1:NAs)]) ) ) stop("Series contains non-leading NAs") } x <- na.omit(x) # Initialize ma vector ma <- rep(1, NROW(x)) ma[n] <- mean(x[1:n]) # Determine decay ratio if(is.null(ratio)) { ratio <- 2/(n+1) } # Call Fortran routine ma <- .Fortran( "zlema", ia = as.double(x), lia = as.integer(NROW(x)), n = as.integer(n), oa = as.double(ma), loa = as.integer(NROW(ma)), ratio = as.double(ratio), PACKAGE = "TTR", DUP = TRUE )$oa # replace 1:(n-1) with NAs and prepend NAs from original data ma[1:(n-1)] <- NA ma <- c( rep( NA, NAs ), ma ) if(!is.null(dim(ma))) { colnames(ma) <- "ZLEMA" } reclass(ma,x) } #-------------------------------------------------------------------------# #'@rdname MovingAverages #'@export VWAP VWMA "VWAP" <- "VWMA" <- function(price, volume, n=10, ...) { # Volume-weighted average price # Volume-weighted moving average res <- WMA(price, n=n, volume) if(!is.null(dim(res))) { colnames(res) <- "VWAP" } return(res) } #-------------------------------------------------------------------------# #'@rdname MovingAverages #'@export "VMA" <- function (x, w, ratio=1, ...) { # Variable Moving Average x <- try.xts(x, error=as.matrix) w <- try.xts(w, error=as.matrix) if( NROW(w) != NROW(x) ) stop("Length of 'w' must equal the length of 'x'") # Check for non-leading NAs # Leading NAs are handled in the C code x.na <- xts:::naCheck(x, 1) w.na <- xts:::naCheck(w, 1) # Call C routine ma <- .Call("vma", x, abs(w), ratio, PACKAGE = "TTR") if(!is.null(dim(ma))) { colnames(ma) <- "VMA" } reclass(ma,x) } #-------------------------------------------------------------------------# #'@rdname MovingAverages #'@export "HMA" <- function(x, n=20, ...) { # Hull Moving Average reclass(WMA(2*WMA(x, n=n/2, ...) - WMA(x, n=n, ...), n=trunc(sqrt(n)), ...), x) } #-------------------------------------------------------------------------# #'@rdname MovingAverages #'@export "ALMA" <- function(x, n=9, offset=0.85, sigma=6, ...) { # ALMA (Arnaud Legoux Moving Average) if(offset < 0 || offset > 1) { stop("Please ensure 0 <= offset <= 1") } if(sigma <= 0) stop("sigma must be > 0") m <- floor(offset*(n-1)) s <- n/sigma wts <- exp(-((seq(0,n-1)-m)^2)/(2*s*s)) sumWeights <- sum(wts) if(sumWeights != 0) wts <- wts/sumWeights alma <- rollapply(x, width=n, FUN=function(xx) sum(xx*wts), align="right") reclass(alma, x) }
install.packages(c("rgdal","ggmap","sp","maptools","viridis","magrittr","scales","gridExtra","data.table", "tidyverse","lubridate","factoextra","tfplot","tsfa","cluster","IRdisplay","foreign","extrafont", "showtext","grid")) suppressPackageStartupMessages({ library(rgdal,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(ggmap,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(sp,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(maptools,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(viridis,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(magrittr,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(scales,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(gridExtra,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(data.table, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(tidyverse, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(lubridate, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(gridExtra, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(factoextra, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(tfplot, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(tsfa, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(factoextra, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(cluster, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(IRdisplay, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(foreign,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(extrafont,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(showtext,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(grid,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) }) setwd('C:/Users/samsung/Desktop/대학교/시각화공모전/KT_data_20200703') getwd() #행정구역 지도를 가져와 줍니다. korea_map_shp = rgdal::readOGR("C:/Users/qtuej/Desktop/피샛/공모전/KT_data_20200703/지도/CTPRVN.shp") korea_map = fortify(korea_map_shp) korea_map %>% str() #지역별 확진자 수를 지도에 표시하기 위해 확진자 수 데이터를 가져오고, 알맞게 전처리해 줍니다. TimeProvince <-fread('C:/Users/qtuej/Desktop/피샛/공모전/KT_data_20200703/COVID_19/TimeProvince.csv', stringsAsFactors=FALSE, encoding = "UTF-8") TimeProvince$date <- as.Date(TimeProvince$date) TimeProvince$date <- as.character(TimeProvince$date,'%m/%d') TimeProvince$province=TimeProvince$province %>% as.factor() confirm_added=TimeProvince %>% group_by(province)%>% summarize(N=max(confirmed)) confirm_added$province=plyr::revalue(confirm_added$province,c("서울"="0","부산"="1","대구"="2", "인천"="3","광주"="4","대전"="5", "울산"="6","세종"="7","경기도"="8", "강원도"="9","충청북도"="10","충청남도"="11", "전라북도"="12","전라남도"="13","경상북도"="14", "경상남도"="15","제주도"="16")) colnames(confirm_added)<-c("id","confirmed") # 지도 데이터와 확진자 수 데이터를 해당하는 지역에 알맞게 병합하고, 지도에 나타내 줍니다. korea_map=merge(korea_map,confirm_added, by="id") mycorona=ggplot() + geom_polygon(data=korea_map, aes(x=long, y=lat, group=group, fill=confirmed)) mycorona+scale_fill_gradient(low = "#F1C5C5", high = "#D92027")+ theme_bw() + theme(panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(), panel.grid.major.y = element_blank(), panel.grid.minor.y = element_blank(), plot.title = element_text(face = "bold", size = 18, hjust = 0.5)) #전처리 age <- fread('COVID_19/TimeAge.csv', stringsAsFactors=FALSE, data.table=FALSE, encoding = "UTF-8") age <- age[age$date == '2020-06-30',] age <- age[c(3:7),] #plot_age c <-ggplot(age, aes(x = "", y = confirmed, fill = age)) + geom_bar(width = 1, stat = "identity", color = "white") + coord_polar("y") + geom_text(aes(label = paste0(age,"\n",round(confirmed/128,1),"%")), position = position_stack(vjust = 0.5),color = "white") + scale_fill_manual(values = c("#CD0000","#F06E6E","#D25A5A","#CD4646","#FF7878","#F4A0A0","#FA8282")) + theme_void() + theme(text =element_text(face = "bold")) #plot_gender gender <- fread('COVID_19/TimeGender.csv', stringsAsFactors=FALSE,data.table=FALSE, encoding = "UTF-8") gender <- gender[gender$date == '2020-06-30',] d <-ggplot(gender, aes(x = "", y = confirmed, fill = sex)) + geom_bar(width = 1, stat = "identity", color = "white") + coord_polar("y") + geom_text(aes(label = paste0(sex,"\n",round(confirmed/128,1),"%")), position = position_stack(vjust = 0.5),color = "white") + scale_fill_manual(values = c("#45556b","#637a9a")) + theme_void() +theme(text =element_text(face = "bold")) grid.arrange(c,d, nrow=2, top = textGrob("나이대 및 성별 누적확진자 비율",gp=gpar(fontsize=20,font=2))) ##### 전처리 Time <-fread('COVID_19/Time.csv', stringsAsFactors=FALSE,data.table=FALSE, encoding = "UTF-8") Time$date <- as.Date(Time$date) str(Time) c <- c(rep(0,163)) c[1] <- 1 for (i in 2:length(Time$confirmed)){ if (Time$confirmed[i] - Time$confirmed[i-1] > 0) { c[i] <- Time$confirmed[i] - Time$confirmed[i-1] }else{ c[i] <- 0 } } Time <- cbind(Time, c) #plot ggplot(Time, aes(x =date, y = `c`)) + geom_line(color='#c00000', size = 0.9) + coord_fixed(ratio = 0.1) + theme(panel.background = element_rect(fill = "#dee3eb" , color = "#6a6a6a"),plot.background = element_rect(fill = "white", size = 0.5, linetype = "solid"),panel.grid.major = element_line(color = "white"), plot.title = element_text(face = "bold",hjust = 0.5, size =20),text =element_text(face = "bold", size =15)) + ggtitle("COVID-19 실확진자수 추이") + xlab("date(month)") + ylab("실확진자수(명)")+ geom_point(mapping =aes(x =Time$date[which.max(Time$`c`)] ,y = 813 ), color="black", size =2.5)+ annotate("text", x=Time$date[which.max(Time$`c`)], y=813,fontface=2,label="\n2020-02-29(813명)\n이유 : 신천지 집단감염",hjust = 1.1, size = 4.6 , color = "#1E3269") + geom_point(mapping =aes(x =Time$date[104] ,y = 2 ), color="black", size =2.5) + annotate("text", x=Time$date[104], y=2, label="2020-05-02(2명)\n\n\n", size = 5 ,fontface=2, color = "#1E3269") + geom_hline(yintercept=78.53, linetype='dashed', color='#FA8282', size=0.7,alpha = 0.7)+ annotate("text", x=Time$date[5], y=120, label="평균(약 78명)", size = 4 , fontface=2, color = "#D25A5A") #전처리 #1. fpopl data 를 주별 유동인구 평균 데이터로 전처리 fpopl <-fread('fpopl.csv', stringsAsFactors=FALSE,data.table=FALSE, encoding = "UTF-8") fpopl %<>% select(base_ymd,adstrd_code,popltn_cascnt) code <-fread('adstrd_master.csv', stringsAsFactors=FALSE,data.table=FALSE, encoding = "UTF-8") code %<>% select(adstrd_code,signgu_nm) doraga <- left_join(fpopl,code,by="adstrd_code") doraga %<>% select(-adstrd_code) doraga$base_ymd <- as.character(doraga$base_ymd) doraga$base_ymd <- as.Date(doraga$base_ymd,"%Y%m%d") doraga$base_ymd <- week(doraga$base_ymd) week <- c(1:24) fpopl_mean <- c(rep(0,24)) df <- data.frame(week,fpopl_mean) for (i in 1:24){ a <- doraga %>% filter(base_ymd == i) %>% summarise(n=mean(popltn_cascnt)) df$fpopl_mean[i] <- a$n } #2. 전처리한 유동인구 데이터와 실확진자 수 비교를 위해time 데이터와 병합 Time$date <- week(Time$date) Time %<>% group_by(date) %>% summarize(c_mean= mean(c)) %>% unique() colnames(Time) <- c("week","c_mean") time_fpopl <- plyr::join(Time, df, by = "week") time_fpopl <- time_fpopl[time_fpopl$week <= 24,] #plot period <-data.frame(시기 = c("2"),start =c(8), end=c(11)) ggplot() + geom_bar(data=time_fpopl, aes(x = week, y = fpopl_mean*2 ,color = "유동인구 수") ,fill = '#1E3269',alpha= 0.3,stat ="identity", position="dodge")+ ylab("명") +geom_smooth(data=time_fpopl, aes(x = week, y = fpopl_mean*2),method = 'loess', formula ='y ~ x',span = 0.3, color = "#ff8888",size = 0.9,se =F, linetype= "dashed")+ geom_line(data=time_fpopl, aes(x = week, y = c_mean, color = "실확진자 수"),size =0.9)+ scale_colour_manual("", values=c("실확진자 수" = "#c00000", "유동인구 수" = "#1E3269")) + coord_fixed(ratio = 0.025) + theme(panel.background = element_rect(fill = "#dee3eb" , color = "black"),plot.background = element_rect(fill = "white", size = 0.5, linetype = "solid"),legend.position="bottom", plot.title = element_text(face = "bold",hjust = 0.5, size =20), text = element_text(face = "bold",hjust = 0.5, size =15)) + ggtitle("주 단위 실확진자와 유동인구 추이 비교")+ scale_y_continuous( # Features of the first axis name = "주별 평균 실확진자 수", # Add a second axis and specify its features sec.axis = sec_axis(~./2, name="주별 평균 유동인구 수") )+ geom_rect(data=period, aes(NULL,NULL,xmin=start,xmax=end),fill="#adb9ca", ymin=0,ymax=573, colour="white", size=0.5, alpha=0.35) display_png(file="period2.png") display_png(file="period.png") ######################## ### Delivery 전처리 ### ####################### delivery <- fread('delivery.csv', header = T, stringsAsFactors = F, data.table = F, encoding = 'UTF-8' ) code = delivery %>% select(ends_with('code')) %>% colnames() id = c('SERIAL_NUMBER', colnames(select(delivery, ends_with('ID')))) delivery = delivery %>% select(-c(code, id)) delivery = delivery %>% separate(PROCESS_DT, sep = '-', into = c('year', 'month', 'day'), remove = FALSE) rm(list = c('code', 'id')) index <- data.frame('period' = c('기', '승', '전-1', '전-2'), 'start' = c('2020-01-01', '2020-02-22', '2020-03-08', '2020-05-06'), 'end' = c('2020-02-22', '2020-03-08', '2020-05-06', '2020-06-21')) p1 <- delivery %>% group_by(PROCESS_DT) %>% summarize(N = n()) %>% ungroup() %>% ggplot(aes(x = PROCESS_DT, y = N, group = 1)) + geom_line(linetype = "dashed", color = 'black') + geom_smooth(method = 'lm', se = FALSE, formula = y~x, color = 'firebrick', linetype = 'longdash') + geom_point(color = "red", size = 1) + geom_rect(data = index, aes(NULL, NULL, fill = period, xmin = start, xmax = end), ymin = 0, ymax = 47000, alpha=0.5) + scale_fill_manual(values=c("기" = "#d6dce5", "승" = "#adb9ca", '전-1' = '#d6dce5', '전-2' = '#adb9ca')) + labs(x = "일자", y = '평균 배달 건수', title = '일별 평균 배달 건수', fill = '코로나 시기') + scale_x_discrete(breaks = NULL) + theme(legend.position = 'bottom', legend.box = 'vertical') + theme(panel.background = element_rect(fill = "white" , color = "black"), plot.title = element_text(face = "bold", hjust = 0.5, size =20), text = element_text(face = "bold",hjust = 0.5,size =15), plot.margin = margin(30,40,30,40)) p2 <- delivery %>% filter(DLVR_REQUST_STTUS_VALUE == 1) %>% mutate('SELL_AMOUNT' = GOODS_AMOUNT - DLVR_AMOUNT - CALL_RLAY_FEE_AMOUNT) %>% group_by(PROCESS_DT) %>% summarize(MEAN = mean(SELL_AMOUNT)) %>% ungroup() %>% ggplot(aes(x = PROCESS_DT, y = MEAN, group = 1)) + geom_line(linetype = "dashed", color = 'black') + geom_smooth(method = 'lm', se = FALSE, formula = y~x, color = 'firebrick', linetype = 'longdash') + geom_point(color = "red", size = 1) + geom_rect(data = index, aes(NULL, NULL, fill = period, xmin = start, xmax = end), ymin = 0, ymax = 45224, alpha=0.5) + scale_fill_manual(values=c("기" = "#d6dce5", "승" = "#adb9ca", '전-1' = '#d6dce5', '전-2' = '#adb9ca')) + labs(x = "일자", y = '평균 배달 매출', title = '일별 평균 배달 매출', fill = '코로나 시기') + scale_x_discrete(breaks = NULL) + theme(legend.position = 'bottom', legend.box = 'vertical') + theme(panel.background = element_rect(fill = "white" , color = "black"), plot.title = element_text(face = "bold", hjust = 0.5, size =20), text = element_text(face = "bold",hjust = 0.5,size =15), plot.margin = margin(30,40,30,40)) grid.arrange(p1, p2, ncol = 1) display_png(file="delivery_increase.JPG") delivery$bad = ifelse(delivery$DLVR_STORE_SIDO %in% c('경기도', '경상북도', '대구광역시', '서울'), 1, 0) delivery %>% group_by(PROCESS_DT, bad) %>% summarize(N = n()) %>% ungroup() %>% ggplot(aes(x = PROCESS_DT, y = N)) + geom_line(aes(group = as.factor(bad), color = as.factor(bad)), size = 0.6) + geom_smooth(aes(group = as.factor(bad), color = as.factor(bad)), method = 'lm', se = FALSE, formula = y~x, linetype = 'longdash') + geom_rect(data = index, aes(NULL, NULL, fill = period, xmin = start, xmax = end), ymin = 0, ymax = 30000, alpha=0.5) + scale_fill_manual(values=c("기" = "#d6dce5", "승" = "#adb9ca", '전-1' = '#d6dce5', '전-2' = '#adb9ca')) + scale_linetype_discrete('코로나 심각 수준(지역)', labels = c('완만', '심각'), aes(Color = as.factor(bad))) + scale_color_manual(labels = c('완만', '심각'),values=c( "#1E3269","#c00000")) + scale_x_discrete(breaks = NULL) + labs(x = '일자', color = '코로나 심각 수준(지역)', y = '배달 건수', fill = '코로나 시기') + ggtitle('코로나 심각지역 vs 완만지역 일별 배달건수') + theme(legend.position = 'bottom', legend.box = 'vertical') + theme(panel.background = element_rect(fill = "white" , color = "black"), plot.title = element_text(face = "bold", hjust = 0.5, size =20), text = element_text(face = "bold",hjust = 0.5,size =15), plot.margin = margin(30,40,30,40)) month_index <- data.frame('period' = c('기', '승', '전-1'), 'start' = c('01', '02', '03'), 'end' = c('02', '03', '05')) p1 <- delivery %>% filter(DLVR_REQUST_STTUS_VALUE == 1) %>% mutate('SELL_AMOUNT' = GOODS_AMOUNT - DLVR_AMOUNT - CALL_RLAY_FEE_AMOUNT) %>% group_by(month, DLVR_STORE_INDUTY_NM) %>% summarise(MEAN = mean(SELL_AMOUNT)) %>% ungroup() %>% filter(month != "06") %>% mutate('NEW_cate' = ifelse(DLVR_STORE_INDUTY_NM %in% c('심부름', '도시락') , 1, 0)) %>% ggplot(aes(x = month, y = MEAN)) + geom_smooth(aes(group = DLVR_STORE_INDUTY_NM, color= DLVR_STORE_INDUTY_NM, linetype = as.factor(NEW_cate), size = as.factor(NEW_cate)), method = 'lm', se = FALSE, formula = y~x) + geom_rect(data = month_index, aes(NULL, NULL, fill = period, xmin = start, xmax = end), ymin = 0, ymax = 33000, alpha=0.5) + scale_fill_manual(values=c("기" = "#d6dce5", "승" = "#adb9ca", '전-1' = '#d6dce5')) + scale_linetype_manual(values=c("dashed", "solid"),labels = c('그외', '심부름&도시락'))+ scale_size_manual(values = c(1, 1.1), guide = 'none') + guides(color = FALSE) + labs(x = '월', y = '평균 배달 매출', linetype = '매출 성장 추세', fill = '코로나 시기', color = '배달품목') + ggtitle('배달품목별 월별 매출 변화 추세') + theme(panel.background = element_rect(fill = "white" , color = "black"), plot.title = element_text(face = "bold", hjust = 0.5, size =20), text = element_text(face = "bold",hjust = 0.5,size =15), plot.margin = margin(30,40,30,40)) p2 <- delivery %>% filter(DLVR_REQUST_STTUS_VALUE == 1) %>% mutate('SELL_AMOUNT' = GOODS_AMOUNT - DLVR_AMOUNT - CALL_RLAY_FEE_AMOUNT) %>% group_by(month, DLVR_STORE_INDUTY_NM) %>% summarise(MEAN = mean(SELL_AMOUNT)) %>% ungroup() %>% filter(month != "06") %>% filter(DLVR_STORE_INDUTY_NM %in% c('심부름', '도시락')) %>% ggplot(aes(x = month, y = MEAN)) + geom_line(aes(group = DLVR_STORE_INDUTY_NM, color= DLVR_STORE_INDUTY_NM)) + geom_point(aes(group = DLVR_STORE_INDUTY_NM, color= DLVR_STORE_INDUTY_NM) ,size = 1) + geom_smooth(aes(group = DLVR_STORE_INDUTY_NM, color= DLVR_STORE_INDUTY_NM), method = 'lm', se = FALSE, formula = y~x, linetype = 'longdash') + geom_rect(data = month_index, aes(NULL, NULL, fill = period, xmin = start, xmax = end), ymin = 0, ymax = 30000, alpha=0.5) + scale_fill_manual(values=c("기" = "#d6dce5", "승" = "#adb9ca", '전-1' = '#d6dce5')) + scale_color_manual(values=c( "#1E3269","#c00000")) + labs(x = '월', y = '평균 배달 매출', fill = '코로나 시기', color = '배달품목') + ggtitle('배달품목별 월별 매출 변화 추세') + theme(panel.background = element_rect(fill = "white" , color = "black"), plot.title = element_text(face = "bold", hjust = 0.5, size =20), text = element_text(face = "bold",hjust = 0.5,size =15), plot.margin = margin(30,40,30,40)) grid.arrange(p1, p2, ncol = 1) delivery = delivery %>% filter(DLVR_REQUST_STTUS_VALUE == 1) %>% mutate('SELL_AMOUNT' = GOODS_AMOUNT - DLVR_AMOUNT - CALL_RLAY_FEE_AMOUNT) %>% mutate('NEW_cate' = ifelse(DLVR_STORE_INDUTY_NM %in% c('심부름', '도시락') , 1, 0)) p1 <- delivery %>% group_by(month, NEW_cate) %>% summarise(MEAN = mean(SELL_AMOUNT)) %>% ungroup() %>% filter(month != "06") %>% ggplot(aes(x = month, y = MEAN)) + geom_line(aes(group = as.factor(NEW_cate), color = as.factor(NEW_cate))) + geom_point(aes(group = as.factor(NEW_cate), color = as.factor(NEW_cate)), size = 1) + geom_smooth(aes(group = as.factor(NEW_cate), color = as.factor(NEW_cate)), method = 'lm', se = FALSE, formula = y~x, linetype = 'longdash') + geom_rect(data = month_index, aes(NULL, NULL, fill = period, xmin = start, xmax = end), ymin = 0, ymax = 33000, alpha=0.5) + scale_fill_manual(values=c("기" = "#d6dce5", "승" = "#adb9ca", '전-1' = '#d6dce5')) + scale_color_manual(labels = c('그외', '심부름&도시락'), values=c( "#1E3269","#c00000")) + labs(x = '월', color = '배달품목', y = '월별 평균 배달 매출', fill = '코로나 시기') + ggtitle('배달품목별 월별 매출 변화 추세') + theme(panel.background = element_rect(fill = "white" , color = "black"), plot.title = element_text(face = "bold", hjust = 0.5, size =20), text = element_text(face = "bold",hjust = 0.5,size =15), plot.margin = margin(30,40,30,40)) p2 <- delivery %>% group_by(month, NEW_cate) %>% summarise(MEAN = mean(n())) %>% ungroup() %>% filter(month != "06") %>% ggplot(aes(x = month, y = MEAN)) + geom_line(aes(group = as.factor(NEW_cate), color = as.factor(NEW_cate))) + geom_point(aes(group = as.factor(NEW_cate), color = as.factor(NEW_cate)), size = 1) + geom_smooth(aes(group = as.factor(NEW_cate), color = as.factor(NEW_cate)), method = 'lm', se = FALSE, formula = y~x, linetype = 'longdash') + geom_rect(data = month_index, aes(NULL, NULL, fill = period, xmin = start, xmax = end), ymin = -1000000, ymax = 1200000, alpha=0.5) + scale_fill_manual(values=c("기" = "#d6dce5", "승" = "#adb9ca", '전-1' = '#d6dce5')) + scale_color_manual(labels = c('그외', '심부름&도시락'), values=c( "#1E3269","#c00000")) + labs(x = '월', color = '배달품목', y = '월별 배달 건수', fill = '코로나 시기') + ggtitle('배달품목별 월별 건수 변화 추세') + theme(panel.background = element_rect(fill = "white" , color = "black"), plot.title = element_text(face = "bold", hjust = 0.5, size =20), text = element_text(face = "bold",hjust = 0.5,size =15), plot.margin = margin(30,40,30,40)) grid.arrange(p1, p2, ncol = 1) delivery %>% group_by(month, NEW_cate) %>% summarise(MEAN = mean(n())) %>% ungroup() %>% filter(month != "06") %>% filter(NEW_cate == 1) %>% ggplot(aes(x = month, y = MEAN)) + geom_line(group = 1, color = "#c00000") + geom_point(group = 1, color = "#c00000", size = 1) + geom_smooth(aes(group = 1), color = "#c00000", method = 'lm', se = FALSE, formula = y ~ x + I(x^2), linetype = 'longdash') + geom_rect(data = month_index, aes(NULL, NULL, fill = period, xmin = start, xmax = end), ymin = 0, ymax = 15000, alpha=0.5) + scale_fill_manual(values=c("기" = "#d6dce5", "승" = "#adb9ca", '전-1' = '#d6dce5')) + labs(x = '월', y = '월별 배달 건수', fill = '코로나 시기') + ggtitle('도시락&심부름 : 월별 건수 변화 및 추세') + theme(legend.position = 'bottom') + theme(panel.background = element_rect(fill = "white" , color = "black"), plot.title = element_text(face = "bold", hjust = 0.5, size =20), text = element_text(face = "bold",hjust = 0.5,size =15), plot.margin = margin(30,30,30,30)) display_png(file="delivery_2.JPG") delivery %>% group_by(month, NEW_cate, bad) %>% summarise(MEAN = mean(SELL_AMOUNT)) %>% ungroup() %>% filter(month != '06') %>% ggplot(aes(y = MEAN, x = as.factor(bad), color = as.factor(NEW_cate))) + stat_boxplot(alpha = 0.4, geom = 'errorbar') + stat_summary(aes(fill = as.factor(NEW_cate)), fun = mean, size= 5, geom="point", shape=20, position = position_dodge(width = .75)) + geom_boxplot(alpha = 0.4) + scale_color_manual(labels = c('그외', '심부름,도시락'), values=c("#c00000", "#1E3269")) + scale_fill_discrete(guide = 'none') + scale_x_discrete(labels = c('완만지역', '심각지역')) + labs(x = '코로나 심각 수준', color = '배달품목', y = '배달 매출') + ggtitle('코로나 심각에 따른 심부름&도시락 vs 그외품목 매출 비교') + theme_classic() + theme(legend.position = "bottom", title = element_text(size = 15, face = 'bold'), text = element_text(face = "bold",hjust = 0.5,size =15), plot.margin = margin(45,45,45,45)) #전처리 index <- fread('index.csv', data.table = FALSE, encoding = 'UTF-8') index %<>% group_by(period,catm,age,gender) %>% mutate(cgi_mean = mean(cgi)) %>% select(period,catm,age,gender,cgi_mean) %>% ungroup() %>% unique() index <- index[index$period == 202003 | index$period == 202005,] a <- c(rep(0,594)) month03 <- filter(index,index$period == 202003) month05 <- filter(index,index$period == 202005) change <- plyr::join(month03, month05, by = c("catm","age","gender")) colnames(change) <- c("period","catm","age","gender","mean1","period2","mean2") change %<>% mutate(change = (mean2-mean1)/mean1) %>% na.omit() %>% select(catm,age,gender,change,period2) change%<>% spread(key='catm', value='change') %>% select(-period2) change #scaling 하기 change_factor = change %>% select(-age,-gender) %>% na.omit change_factor = scale(change_factor) %>% na.omit %>% as.tibble() #scree plot을 통해 factor 수 정하기 par(mar = c(12,5,12,5)) plot(prcomp(change_factor),type="l",sub = 'Scree Plot' , col = "#1E3269") #시각화 change_fa <- factanal(change_factor,factors=2,rotation = "varimax",scores = "regression") par(mar = c(6,5,6,5)) plot(change_fa$scores, col = "#1E3269",pch = 20, fg = "#423144",col.axis ='#423144') text(change_fa$scores[,1], change_fa$scores[,2], labels = change$age, cex = 0.8, pos = 2, col = "#1E3269") points(change_fa$loadings, pch= 20, col = "#c00000") text(change_fa$loadings[,1], change_fa$loadings[,2],labels = rownames(change_fa$loadings), cex = 0.5, pos = 4, col = "#c00000") index <- fread('index.csv', stringsAsFactors=FALSE,data.table=FALSE, encoding = "UTF-8") index %<>% mutate(year = ifelse(period>202000,2020,2019)) index %<>% group_by(period,catm) %>% mutate(n=mean(cgi)) %>% filter(period < 201906 | period >= 202001 ) index %<>% select(period, catm, n, year) %>% unique() index %<>% mutate(month = substr(as.character(period),5,6)) a <- index %>% filter(catm == "화장품")%>% ggplot(aes(x = month, y = n, group = as.character(year) ,color =as.character(year))) +geom_line(size = 0.4, linetype="longdash")+labs(color = "Year\n")+ ggtitle("화장품 : 2019/2020 cgi 변화율 추이") + scale_color_manual(values=c("#c00000", "#1E3269"))+ylab("cgi") +geom_point(size=2)+ theme(legend.position="right", panel.background = element_rect(fill = "white" , color = "black"),plot.background = element_rect(fill = "white", size = 0.5, linetype = "solid"),plot.title = element_text(face = "bold",hjust = 0,size =15)) b <- index %>% filter(catm == "건강관리용품")%>% ggplot(aes(x = month, y = n, group = as.character(year) ,color =as.character(year))) +geom_line(size = 0.4, linetype="longdash")+labs(color = "Year\n")+ ggtitle("건강관리용품 : 2019/2020 cgi 변화율 추이") +geom_point(size=2)+ scale_color_manual(values=c("#c00000", "#1E3269"))+ylab("cgi") + theme(legend.position="right", panel.background = element_rect(fill = "white" , color = "black"),plot.background = element_rect(fill = "white", size = 0.5, linetype = "solid"),plot.title = element_text(face = "bold",hjust = 0,size =15)) c <- index %>% filter(catm == "애완동물용품")%>% ggplot(aes(x = month, y = n, group = as.character(year) ,color =as.character(year))) +geom_line(size = 0.4, linetype="longdash")+labs(color = "Year\n")+ ggtitle("애완동물용품 : 2019/2020 cgi 변화율 추이") + geom_point(size=2)+ scale_color_manual(values=c("#c00000", "#1E3269"))+ylab("cgi") + theme(legend.position="right", panel.background = element_rect(fill = "white" , color = "black"),plot.background = element_rect(fill = "white", size = 0.5, linetype = "solid"),plot.title = element_text(face = "bold",hjust = 0,size =15)) grid.arrange(a,b,c) cosmetic <- fread('Amore Pacific.csv', data.table = FALSE, encoding = 'UTF-8') cosmetic <- cosmetic[c(3,26,27,28),c(1,2,3,4,6,7,8,9,10)] cosmetic 카테고리 <- c("오프라인","오프라인","오프라인","오프라인","오프라인","오프라인","오프라인","오프라인", "온라인","온라인","온라인","온라인","온라인","온라인","온라인","온라인", "면세점","면세점","면세점","면세점","면세점","면세점","면세점","면세점") 비율<- c(13.2,46.7,40.9,40.1,38.2,29.7,20.4,27.4, 12.9,13.2,11.5,13.4,15.5,14.2,19.8,30.7, 40.9,40.1,47.5,46.5,46.2,56.1,59.8,41.8) 분기 <- c("18-1","18-2","18-3","19-1","19-2","19-3","19-4","20-1","18-1","18-2","18-3","19-1","19-2","19-3","19-4","20-1","18-1","18-2","18-3","19-1","19-2","19-3","19-4","20-1") cosmetic<- data.frame(cbind(카테고리,비율,분기)) a<- ggplot(cosmetic, aes(x=분기, y = 비율, group = 카테고리,color = 카테고리))+ geom_line(size = 0.5, linetype="longdash") + scale_color_manual(values=c('#FFBE0A','#1E3269','#CD1F48'))+ ggtitle("아모레퍼시픽 분기 매출비율 추이") + ylab("%") + geom_point(size=2)+ scale_y_discrete(breaks = c(11.5,15.5,30.7,46.2,59.8))+ theme(legend.position="top", panel.background = element_rect(fill = "white" , color = "black"),plot.background = element_rect(fill = "white", size = 0.5, linetype = "solid"),plot.title = element_text(face = "bold",hjust = 0.5,size =15), text = element_text(face = "bold",hjust = 0.5,size =15)) + geom_vline(xintercept="19-4",linetype= "longdash", color='grey', size=0.4) b<- ggplot(cosmetic, aes(x=분기, y = 비율, group = 카테고리,color = 카테고리)) +geom_line(size = 0.5, linetype="longdash") + scale_color_manual(values=c('#FFBE0A','#1E3269','#CD1F48'))+ ylab("%") + geom_point(size=2)+ coord_fixed(ratio = 0.33) + scale_y_discrete(breaks = c(11.5,15.5,30.7,46.2,59.8))+ theme(legend.position="", panel.background = element_rect(fill = "white" , color = "black"),plot.background = element_rect(fill = "white", size = 0.5, linetype = "solid"),plot.title = element_text(face = "bold",hjust = 0,size =15), text = element_text(face = "bold",hjust = 0.5,size =15)) + facet_wrap(~카테고리, ncol=3)+ theme(axis.text.x=element_blank())+ geom_vline(xintercept="19-4",linetype= "longdash", color='grey', size=0.4) grid.arrange(a,b, ncol=1) card <- fread('card.csv', header = T, stringsAsFactors = F, data.table = F, encoding = 'UTF-8') ## 한글 없애기 ## data <- card %>% filter(! (selng_cascnt %in% grep('[ㄱ-힣]',unique(card$selng_cascnt), value = T)), ! (salamt %in% grep('[ㄱ-힣]',unique(card$salamt), value = T))) %>% mutate(selng_cascnt = as.numeric(selng_cascnt), salamt = as.numeric(salamt)) %>% select(- c(adstrd_code, mrhst_induty_cl_code)) rm(list = c('card')) data$receipt_dttm=data$receipt_dttm %>% as.character() %>% as.Date('%Y%m%d') ## 음수 값 확인 - 양수만 넣기## data$selng_cascnt %>% summary() data$salamt %>% summary() data = data %>% filter(selng_cascnt > 0, salamt > 0) %>% mutate(receipt_dttm = ymd(receipt_dttm), week = week(receipt_dttm)) data %>% glimpse() #코로나 시기를 새로운 period변수로 나타내 줍니다. index1 = which(data$receipt_dttm == '2020-02-22') %>% max() #기 index2 = which(data$receipt_dttm == '2020-03-08') %>% max() #승 index3 = which(data$receipt_dttm == '2020-05-06') %>% max() #전-1 index4 = nrow(data) #전-2 data_period = data data_period$period = c(rep(1, index1), rep(2, index2 - index1), rep(3, index3 - index2), rep(4, index4 - index3)) ##이상치 및 결측치 처리## data_period %>% is.na() %>% colSums() mean_amount=data_period %>% group_by(mrhst_induty_cl_nm) %>% summarise(N_amount=mean(selng_cascnt)) %>% arrange(N_amount) mean_amount %>% ggplot(aes(x=1, y=N_amount))+ geom_violin( color = "#1E3269",size=0.3)+theme_bw() +theme(plot.margin = margin(60,60,60,60)) categories_new=mean_amount %>% filter(N_amount>=quantile(mean_amount$N_amount)[2]) %>% arrange(desc(N_amount)) %>% select(mrhst_induty_cl_nm)%>% ungroup() categories_new <- as.data.frame(categories_new) data_period <- data_period %>% filter(mrhst_induty_cl_nm%in% as.matrix(categories_new,nrow = 1)) data_amount_period <- data_period %>% group_by(period, mrhst_induty_cl_nm) %>% summarise(mean_amount = mean(selng_cascnt)) %>% ungroup() %>% spread(period, value = mean_amount) data_selling_period <- data_period %>% group_by(period, mrhst_induty_cl_nm) %>% summarise(mean_selling = mean(salamt)) %>% ungroup() %>% spread(period, value = mean_selling) data_price_period <- data_period %>% group_by(period,mrhst_induty_cl_nm) %>% summarize(once_price=sum(salamt)/sum(selng_cascnt)) %>% ungroup() %>% spread(period,value = once_price) colnames(data_amount_period)[-1] = c('amount_1', 'amount_2', 'amount_3', 'amount_4') colnames(data_selling_period)[-1] = c('selling_1', 'selling_2', 'selling_3', 'selling_4') colnames(data_price_period)[-1] = c('price_1','price_2','price_3','price_4') data_clust = data_period %>% group_by(mrhst_induty_cl_nm) %>% summarise(MEAN_SELLING = mean(salamt), MEAN_AMOUNT = mean(selng_cascnt), once_price = sum(salamt)/sum(selng_cascnt) ) %>% ungroup %>% left_join(data_amount_period) %>% left_join(data_selling_period) %>% left_join(data_price_period) #클러스터링하려면 numeric 변수만 필요합니다. 각 카테고리의 이름을 제거해 줍니다. clust1 = data_clust %>% select(-c(mrhst_induty_cl_nm)) clust_scaled = scale(clust1) %>% as_tibble() set.seed("19990107") kmeans1 <- kmeans(clust_scaled, nstart = 10, iter.max = 15, centers = 4) a<-fviz_nbclust(x = clust_scaled, FUNcluster = kmeans, method='wss') + geom_vline(xintercept = 4, linetype = 2) b<-fviz_nbclust(x = clust_scaled, FUNcluster = kmeans, method = "silhouette") + geom_vline(xintercept = 4, linetype = 2) grid.arrange(a,b) data_clust$cluster = kmeans1$cluster fviz_cluster(kmeans1, clust_scaled)+ theme_bw()+theme( legend.background = element_rect(color = 'black', size = 0.5),plot.margin=margin(50,10,50,10)) final_test_k = data_period %>% left_join(data_clust) %>% group_by(period, cluster) %>% summarise(mean_amount = mean(selng_cascnt), mean_selling = mean(salamt), once_price = sum(salamt)/sum(selng_cascnt) ) %>% ungroup() %>% select(c(period, cluster, mean_amount, mean_selling,once_price)) p1 <- final_test_k %>% ggplot(aes(x = as.factor(period), y = mean_amount, group = as.factor(cluster), color = as.factor(cluster))) + geom_line(size=0.3,linetype="longdash")+geom_point(size=1)+theme_bw()+theme( axis.title = element_text(face = "bold",size =11), title = element_text(face="bold",size=13), legend.position = "none", text = element_text(face = "bold")) + scale_color_manual(values=c('#CD1F48','#FFBE0A',"#006400",'#1E3269'))+labs( color = "clusters",y="평균 판매량",x="",title="시기별 평균 판매량")+ scale_x_discrete(labels = c("기","승","전-1","전-2")) p1_2 <- final_test_k %>% ggplot(aes(x = as.factor(period), y = mean_amount, group = as.factor(cluster), color = as.factor(cluster))) + geom_line(size=0.3)+theme_bw()+theme( axis.title = element_text(face = "bold",size =11), title = element_text(face="bold",size=13,hjust = 0.5),legend.background = element_rect(color = 'black', size = 0.5), legend.title = element_text(face="plain",size=10), legend.text = element_text(size=8)) + scale_color_manual(values=c('#CD1F48','#FFBE0A',"#006400",'#1E3269'))+labs( color = "clusters",y="평균 판매량",x="")+ scale_x_discrete(labels = c("기","승","전-1","전-2"))+facet_wrap(~cluster,scales="free")+theme(axis.text.x=element_blank(), axis.text.y=element_blank())+labs(y="",x="") p2 <- final_test_k %>% ggplot(aes(x = as.factor(period), y = mean_selling, group = as.factor(cluster), color = as.factor(cluster))) + geom_line(size=0.3,linetype="longdash")+geom_point(size=1)+theme_bw()+theme( axis.title = element_text(face = "bold",size =11), title = element_text(face="bold",size=13),legend.position = "none",text = element_text(face = "bold")) + scale_color_manual(values=c('#CD1F48','#FFBE0A',"#006400",'#1E3269'))+labs( color = "clusters",y="평균 매출",x="",title="시기별 평균 매출")+ scale_x_discrete(labels = c("기","승","전-1","전-2")) p2_2<-final_test_k %>% ggplot(aes(x = as.factor(period), y = mean_selling, group = as.factor(cluster), color = as.factor(cluster))) + geom_line(size=0.3)+theme_bw()+theme( axis.title = element_text(face = "bold",size =11), title = element_text(face="bold",size=13,hjust = 0.5), legend.background = element_rect(color = 'black', size = 0.5), legend.title = element_text(face="plain",size=10), legend.text = element_text(size=8)) + scale_color_manual(values=c('#CD1F48','#FFBE0A',"#006400",'#1E3269'))+labs( color = "clusters",y="평균 매출",x="")+ scale_x_discrete(labels = c("기","승","전-1","전-2"))+facet_wrap(~cluster,scales="free")+theme(axis.text.x=element_blank(), axis.text.y=element_blank())+labs(y="",x="") p3 <- final_test_k %>% ggplot(aes(x = as.factor(period), y = once_price, group = as.factor(cluster), color = as.factor(cluster))) + geom_line(size=0.3,linetype="longdash")+geom_point(size=1)+theme_bw()+theme( axis.title = element_text(face = "bold",size =11), title = element_text(face="bold",size=13),legend.position = "none",text = element_text(face = "bold")) + scale_color_manual(values=c('#CD1F48','#FFBE0A',"#006400",'#1E3269'))+labs( color = "clusters",y="평균 가격",x="",title="시기별 평균 가격")+ scale_x_discrete(labels = c("기","승","전-1","전-2")) p3_2<-final_test_k %>% ggplot(aes(x = as.factor(period), y = mean_selling, group = as.factor(cluster), color = as.factor(cluster))) + geom_line(size=0.3)+theme_bw()+theme( axis.title = element_text(face = "bold",size =11), title = element_text(face="bold",size=13,hjust = 0.5), legend.background = element_rect(color = 'black', size = 0.5), legend.title = element_text(face="plain",size=10), legend.text = element_text(size=8) ) + scale_color_manual(values=c('#CD1F48','#FFBE0A',"#006400",'#1E3269'))+labs( color = "clusters",y="평균 가격",x="")+ scale_x_discrete(labels = c("기","승","전-1","전-2"))+facet_wrap(~cluster,scales="free")+theme(axis.text.x=element_blank(), axis.text.y=element_blank())+labs(y="",x="") gridExtra::grid.arrange(p1,p1_2,p2,p2_2,p3,p3_2, ncol = 2, nrow = 3) data_period %>% left_join(data_clust) %>% filter(cluster == 1) %>% select(mrhst_induty_cl_nm) %>% unique() %>% matrix(nrow=1) data_period %>% left_join(data_clust) %>% filter(cluster == 2) %>% select(mrhst_induty_cl_nm) %>% unique() %>% matrix(nrow=1) data_period %>% left_join(data_clust) %>% filter(cluster == 3) %>% select(mrhst_induty_cl_nm) %>% unique() %>% matrix(nrow=1) data_period %>% left_join(data_clust) %>% filter(cluster == 4) %>% select(mrhst_induty_cl_nm) %>% unique() %>% matrix(nrow=1) ##클러스터1## display_png(file="C:/Users/qtuej/Desktop/피샛/공모전/KT_data_20200703/2차본/클러스터1.png") ##클러스터2## display_png(file="C:/Users/qtuej/Desktop/피샛/공모전/KT_data_20200703/2차본/클러스터2.png") ##클러스터3## display_png(file="C:/Users/qtuej/Desktop/피샛/공모전/KT_data_20200703/2차본/클러스터3.png",width=700,height=350) display_png(file="C:/Users/qtuej/Desktop/피샛/공모전/KT_data_20200703/2차본/클러스터3_2.png",width=700,height=350) ##클러스터4## display_png(file="C:/Users/qtuej/Desktop/피샛/공모전/KT_data_20200703/2차본/클러스터4.png",width=700,height=350) display_png(file="C:/Users/qtuej/Desktop/피샛/공모전/KT_data_20200703/2차본/클러스터4_1.png",width=700,height=350) display_png(file="C:/Users/qtuej/Desktop/피샛/공모전/KT_data_20200703/2차본/클러스터4_2.png",width=700,height=350) ###################### ## CARD DATA 전처리 ## ###################### rm(list = ls()) card <- fread('card.csv', header = TRUE, stringsAsFactors = FALSE, data.table = FALSE, encoding = 'UTF-8' ) card <- card %>% filter(! (selng_cascnt %in% grep('[ㄱ-힣]',unique(card$selng_cascnt), value = T)), ! (salamt %in% grep('[ㄱ-힣]',unique(card$salamt), value = T))) %>% mutate(selng_cascnt = as.numeric(selng_cascnt), salamt = as.numeric(salamt), receipt_dttm = ymd(receipt_dttm)) %>% select(-c(adstrd_code, mrhst_induty_cl_code)) card$selng_cascnt = ifelse(card$selng_cascnt < 0, 0, card$selng_cascnt) card$salamt = ifelse(card$salamt < 0, 0, card$salamt) ######################################### ## CARD 품목별 NA(결제 안된 일수) 확인 ## ######################################### card_amount = card %>% group_by(receipt_dttm, mrhst_induty_cl_nm) %>% summarize(mean_amount = mean(selng_cascnt)) %>% ungroup() %>% spread(key = mrhst_induty_cl_nm, value = mean_amount) data_missing = card_amount[,-1] %>% is.na() %>% colSums() %>% as_tibble() data_missing = cbind(colnames(card_amount)[-1], data_missing) colnames(data_missing) = c('name', 'num_missing') data_missing %>% ggplot(aes(x = reorder(name, -num_missing), y = num_missing)) + geom_col(aes(fill = num_missing), alpha = 0.5) + scale_fill_gradient('결제내역 없는 비중', low = "#1E3269", high = "red") + scale_x_discrete(breaks = NULL) + labs(x = '카드 결제 품목', y = '결제 없는 일수') + ggtitle('카드 결제 품목 별 전체 기간(155일) 중 결제 없는 비율') + theme_classic() + theme(title = element_text(size = 15, face = 'bold')) ## NA 비중 큰 품목 제거 및 시계열 데이터 화 ## card_amount = card_amount[colSums(is.na(card_amount)) < 10] card_amount[is.na(card_amount)] <- 0 card_day <- card_amount[,-1] %>% ts(freq = 365.25) ## Time Series Plot 확인 ## par(mfrow = c(1,2)) tfplot(card_day[,1:3], graphs.per.page = 3, Title = "Time Series Plot",col = c("#1E3269")) tfplot(card_day[,4:6], graphs.per.page = 3, Title = "Time Series Plot",col = c("#1E3269")) ## Eigen value를 통한 Factor 개수 결정 ## DX <- diff(card_day, lag = 1) xx <- eigen(cor(diff(card_day, lag = 1)), symmetric = TRUE)[["values"]] data.frame('eigen_value' = xx, 'index' = 1:153) %>% ggplot(aes(x = index, y = xx)) + geom_line(linetype = 'dashed', color = 'black', size = 0.5) + geom_point(col = '#CD1F48', size = 1.2) + labs(x = 'Eigen Value Number', y = 'Eigen Value') + ggtitle('Scree Plot by Eigen Value') + theme_classic() + theme(title = element_text(size = 15, face = 'bold'),plot.margin = margin(50,50,50,50)) card_fa <- estTSF.ML(card_day, 9, rotation = "quartimin", normalize = TRUE) data.frame(factors(card_fa), 'time' = 1:155) %>% gather(key = 'Factor', value = 'value', -time) %>% ggplot(aes(x = time, y = value, color = Factor)) + geom_line() + labs(x = 'Time', y = 'Value') + ggtitle('Factors Time Series plot') + theme_classic() + theme(title = element_text(size = 15, face = 'bold'),plot.margin =margin(30,30,30,30)) + scale_x_discrete(breaks = NULL) + scale_y_discrete(breaks = NULL) tfplot(factors(card_fa), Title = "Factors from model", lty = c("solid"), col = c("#1E3269"), xlab = c(""), ylab = c("Factor1", "Factor2", "Factor3", "Factor4", "Factor5", "Factor6", "Factor7", "Factor8", "Factor9"), par = list(mar = c(2.1, 4.1, 1.1, 0.5)), reset.screen = TRUE) data.frame(factors(card_fa), 'time' = 1:155) %>% gather(key = 'Factor', value = 'value', -time) %>% filter(Factor %in% c('Factor.6', 'Factor.7')) %>% ggplot(aes(x = time, y = value, color = Factor)) + geom_line() + geom_smooth(method = 'loess', formula = y~x, se = FALSE, linetype = 'longdash') + labs(x = 'Time', y = 'Value') + ggtitle('Factor 6 & 7 Time Series plot') + theme_classic() + theme(title = element_text(size = 15, face = 'bold'),plot.margin = margin(50,50,50,50)) + scale_x_discrete(breaks = NULL) + scale_y_discrete(breaks = NULL)+scale_color_manual(values=c( "#1E3269","#c00000"),labels =c('Factor6', 'Factor7')) factor_6 = card_fa$loadings[,6][which(card_fa$loadings[,6] > 100)] %>% data.frame() factor_6 = data.frame('name' = row.names(factor_6), 'value' = factor_6[,1]) p1 <- factor_6 %>% ggplot(aes(x = reorder(name, value), y = value)) + geom_col(aes(fill = value), alpha = 0.4,) + scale_fill_gradient('영향을 끼친 정도', low = "#1E3269", high = "red") + labs(x = '카드 결제 품목', y = '영향을 끼친 정도') + ggtitle('Factor6에 영향을 끼친 카드 품목별 정도') + theme_classic() + theme(title = element_text(size = 15, face = 'bold')) + coord_flip() factor_7 = card_fa$loadings[,7][which(card_fa$loadings[,7] > 100)] %>% data.frame() factor_7 = data.frame('name' = row.names(factor_7), 'value' = factor_7[,1]) p2 <- factor_7 %>% ggplot(aes(x = reorder(name, value), y = value)) + geom_col(aes(fill = value), alpha = 0.4,) + scale_fill_gradient('영향을 끼친 정도', low = "#1E3269", high = "red") + labs(x = '카드 결제 품목', y = '영향을 끼친 정도') + ggtitle('Factor7에 영향을 끼친 카드 품목별 정도') + theme_classic() + theme(title = element_text(size = 15, face = 'bold')) + coord_flip() grid.arrange(p1, p2, ncol = 1) display_png(file="insight.JPG") display_png(file="last.JPG")
/피너트리팀_포스트코로나시각화대회_최종본.r
no_license
syppp/Competition_PostCovid19
R
false
false
45,256
r
install.packages(c("rgdal","ggmap","sp","maptools","viridis","magrittr","scales","gridExtra","data.table", "tidyverse","lubridate","factoextra","tfplot","tsfa","cluster","IRdisplay","foreign","extrafont", "showtext","grid")) suppressPackageStartupMessages({ library(rgdal,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(ggmap,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(sp,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(maptools,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(viridis,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(magrittr,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(scales,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(gridExtra,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(data.table, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(tidyverse, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(lubridate, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(gridExtra, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(factoextra, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(tfplot, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(tsfa, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(factoextra, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(cluster, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(IRdisplay, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(foreign,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(extrafont,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(showtext,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) library(grid,warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE) }) setwd('C:/Users/samsung/Desktop/대학교/시각화공모전/KT_data_20200703') getwd() #행정구역 지도를 가져와 줍니다. korea_map_shp = rgdal::readOGR("C:/Users/qtuej/Desktop/피샛/공모전/KT_data_20200703/지도/CTPRVN.shp") korea_map = fortify(korea_map_shp) korea_map %>% str() #지역별 확진자 수를 지도에 표시하기 위해 확진자 수 데이터를 가져오고, 알맞게 전처리해 줍니다. TimeProvince <-fread('C:/Users/qtuej/Desktop/피샛/공모전/KT_data_20200703/COVID_19/TimeProvince.csv', stringsAsFactors=FALSE, encoding = "UTF-8") TimeProvince$date <- as.Date(TimeProvince$date) TimeProvince$date <- as.character(TimeProvince$date,'%m/%d') TimeProvince$province=TimeProvince$province %>% as.factor() confirm_added=TimeProvince %>% group_by(province)%>% summarize(N=max(confirmed)) confirm_added$province=plyr::revalue(confirm_added$province,c("서울"="0","부산"="1","대구"="2", "인천"="3","광주"="4","대전"="5", "울산"="6","세종"="7","경기도"="8", "강원도"="9","충청북도"="10","충청남도"="11", "전라북도"="12","전라남도"="13","경상북도"="14", "경상남도"="15","제주도"="16")) colnames(confirm_added)<-c("id","confirmed") # 지도 데이터와 확진자 수 데이터를 해당하는 지역에 알맞게 병합하고, 지도에 나타내 줍니다. korea_map=merge(korea_map,confirm_added, by="id") mycorona=ggplot() + geom_polygon(data=korea_map, aes(x=long, y=lat, group=group, fill=confirmed)) mycorona+scale_fill_gradient(low = "#F1C5C5", high = "#D92027")+ theme_bw() + theme(panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank(), panel.grid.major.y = element_blank(), panel.grid.minor.y = element_blank(), plot.title = element_text(face = "bold", size = 18, hjust = 0.5)) #전처리 age <- fread('COVID_19/TimeAge.csv', stringsAsFactors=FALSE, data.table=FALSE, encoding = "UTF-8") age <- age[age$date == '2020-06-30',] age <- age[c(3:7),] #plot_age c <-ggplot(age, aes(x = "", y = confirmed, fill = age)) + geom_bar(width = 1, stat = "identity", color = "white") + coord_polar("y") + geom_text(aes(label = paste0(age,"\n",round(confirmed/128,1),"%")), position = position_stack(vjust = 0.5),color = "white") + scale_fill_manual(values = c("#CD0000","#F06E6E","#D25A5A","#CD4646","#FF7878","#F4A0A0","#FA8282")) + theme_void() + theme(text =element_text(face = "bold")) #plot_gender gender <- fread('COVID_19/TimeGender.csv', stringsAsFactors=FALSE,data.table=FALSE, encoding = "UTF-8") gender <- gender[gender$date == '2020-06-30',] d <-ggplot(gender, aes(x = "", y = confirmed, fill = sex)) + geom_bar(width = 1, stat = "identity", color = "white") + coord_polar("y") + geom_text(aes(label = paste0(sex,"\n",round(confirmed/128,1),"%")), position = position_stack(vjust = 0.5),color = "white") + scale_fill_manual(values = c("#45556b","#637a9a")) + theme_void() +theme(text =element_text(face = "bold")) grid.arrange(c,d, nrow=2, top = textGrob("나이대 및 성별 누적확진자 비율",gp=gpar(fontsize=20,font=2))) ##### 전처리 Time <-fread('COVID_19/Time.csv', stringsAsFactors=FALSE,data.table=FALSE, encoding = "UTF-8") Time$date <- as.Date(Time$date) str(Time) c <- c(rep(0,163)) c[1] <- 1 for (i in 2:length(Time$confirmed)){ if (Time$confirmed[i] - Time$confirmed[i-1] > 0) { c[i] <- Time$confirmed[i] - Time$confirmed[i-1] }else{ c[i] <- 0 } } Time <- cbind(Time, c) #plot ggplot(Time, aes(x =date, y = `c`)) + geom_line(color='#c00000', size = 0.9) + coord_fixed(ratio = 0.1) + theme(panel.background = element_rect(fill = "#dee3eb" , color = "#6a6a6a"),plot.background = element_rect(fill = "white", size = 0.5, linetype = "solid"),panel.grid.major = element_line(color = "white"), plot.title = element_text(face = "bold",hjust = 0.5, size =20),text =element_text(face = "bold", size =15)) + ggtitle("COVID-19 실확진자수 추이") + xlab("date(month)") + ylab("실확진자수(명)")+ geom_point(mapping =aes(x =Time$date[which.max(Time$`c`)] ,y = 813 ), color="black", size =2.5)+ annotate("text", x=Time$date[which.max(Time$`c`)], y=813,fontface=2,label="\n2020-02-29(813명)\n이유 : 신천지 집단감염",hjust = 1.1, size = 4.6 , color = "#1E3269") + geom_point(mapping =aes(x =Time$date[104] ,y = 2 ), color="black", size =2.5) + annotate("text", x=Time$date[104], y=2, label="2020-05-02(2명)\n\n\n", size = 5 ,fontface=2, color = "#1E3269") + geom_hline(yintercept=78.53, linetype='dashed', color='#FA8282', size=0.7,alpha = 0.7)+ annotate("text", x=Time$date[5], y=120, label="평균(약 78명)", size = 4 , fontface=2, color = "#D25A5A") #전처리 #1. fpopl data 를 주별 유동인구 평균 데이터로 전처리 fpopl <-fread('fpopl.csv', stringsAsFactors=FALSE,data.table=FALSE, encoding = "UTF-8") fpopl %<>% select(base_ymd,adstrd_code,popltn_cascnt) code <-fread('adstrd_master.csv', stringsAsFactors=FALSE,data.table=FALSE, encoding = "UTF-8") code %<>% select(adstrd_code,signgu_nm) doraga <- left_join(fpopl,code,by="adstrd_code") doraga %<>% select(-adstrd_code) doraga$base_ymd <- as.character(doraga$base_ymd) doraga$base_ymd <- as.Date(doraga$base_ymd,"%Y%m%d") doraga$base_ymd <- week(doraga$base_ymd) week <- c(1:24) fpopl_mean <- c(rep(0,24)) df <- data.frame(week,fpopl_mean) for (i in 1:24){ a <- doraga %>% filter(base_ymd == i) %>% summarise(n=mean(popltn_cascnt)) df$fpopl_mean[i] <- a$n } #2. 전처리한 유동인구 데이터와 실확진자 수 비교를 위해time 데이터와 병합 Time$date <- week(Time$date) Time %<>% group_by(date) %>% summarize(c_mean= mean(c)) %>% unique() colnames(Time) <- c("week","c_mean") time_fpopl <- plyr::join(Time, df, by = "week") time_fpopl <- time_fpopl[time_fpopl$week <= 24,] #plot period <-data.frame(시기 = c("2"),start =c(8), end=c(11)) ggplot() + geom_bar(data=time_fpopl, aes(x = week, y = fpopl_mean*2 ,color = "유동인구 수") ,fill = '#1E3269',alpha= 0.3,stat ="identity", position="dodge")+ ylab("명") +geom_smooth(data=time_fpopl, aes(x = week, y = fpopl_mean*2),method = 'loess', formula ='y ~ x',span = 0.3, color = "#ff8888",size = 0.9,se =F, linetype= "dashed")+ geom_line(data=time_fpopl, aes(x = week, y = c_mean, color = "실확진자 수"),size =0.9)+ scale_colour_manual("", values=c("실확진자 수" = "#c00000", "유동인구 수" = "#1E3269")) + coord_fixed(ratio = 0.025) + theme(panel.background = element_rect(fill = "#dee3eb" , color = "black"),plot.background = element_rect(fill = "white", size = 0.5, linetype = "solid"),legend.position="bottom", plot.title = element_text(face = "bold",hjust = 0.5, size =20), text = element_text(face = "bold",hjust = 0.5, size =15)) + ggtitle("주 단위 실확진자와 유동인구 추이 비교")+ scale_y_continuous( # Features of the first axis name = "주별 평균 실확진자 수", # Add a second axis and specify its features sec.axis = sec_axis(~./2, name="주별 평균 유동인구 수") )+ geom_rect(data=period, aes(NULL,NULL,xmin=start,xmax=end),fill="#adb9ca", ymin=0,ymax=573, colour="white", size=0.5, alpha=0.35) display_png(file="period2.png") display_png(file="period.png") ######################## ### Delivery 전처리 ### ####################### delivery <- fread('delivery.csv', header = T, stringsAsFactors = F, data.table = F, encoding = 'UTF-8' ) code = delivery %>% select(ends_with('code')) %>% colnames() id = c('SERIAL_NUMBER', colnames(select(delivery, ends_with('ID')))) delivery = delivery %>% select(-c(code, id)) delivery = delivery %>% separate(PROCESS_DT, sep = '-', into = c('year', 'month', 'day'), remove = FALSE) rm(list = c('code', 'id')) index <- data.frame('period' = c('기', '승', '전-1', '전-2'), 'start' = c('2020-01-01', '2020-02-22', '2020-03-08', '2020-05-06'), 'end' = c('2020-02-22', '2020-03-08', '2020-05-06', '2020-06-21')) p1 <- delivery %>% group_by(PROCESS_DT) %>% summarize(N = n()) %>% ungroup() %>% ggplot(aes(x = PROCESS_DT, y = N, group = 1)) + geom_line(linetype = "dashed", color = 'black') + geom_smooth(method = 'lm', se = FALSE, formula = y~x, color = 'firebrick', linetype = 'longdash') + geom_point(color = "red", size = 1) + geom_rect(data = index, aes(NULL, NULL, fill = period, xmin = start, xmax = end), ymin = 0, ymax = 47000, alpha=0.5) + scale_fill_manual(values=c("기" = "#d6dce5", "승" = "#adb9ca", '전-1' = '#d6dce5', '전-2' = '#adb9ca')) + labs(x = "일자", y = '평균 배달 건수', title = '일별 평균 배달 건수', fill = '코로나 시기') + scale_x_discrete(breaks = NULL) + theme(legend.position = 'bottom', legend.box = 'vertical') + theme(panel.background = element_rect(fill = "white" , color = "black"), plot.title = element_text(face = "bold", hjust = 0.5, size =20), text = element_text(face = "bold",hjust = 0.5,size =15), plot.margin = margin(30,40,30,40)) p2 <- delivery %>% filter(DLVR_REQUST_STTUS_VALUE == 1) %>% mutate('SELL_AMOUNT' = GOODS_AMOUNT - DLVR_AMOUNT - CALL_RLAY_FEE_AMOUNT) %>% group_by(PROCESS_DT) %>% summarize(MEAN = mean(SELL_AMOUNT)) %>% ungroup() %>% ggplot(aes(x = PROCESS_DT, y = MEAN, group = 1)) + geom_line(linetype = "dashed", color = 'black') + geom_smooth(method = 'lm', se = FALSE, formula = y~x, color = 'firebrick', linetype = 'longdash') + geom_point(color = "red", size = 1) + geom_rect(data = index, aes(NULL, NULL, fill = period, xmin = start, xmax = end), ymin = 0, ymax = 45224, alpha=0.5) + scale_fill_manual(values=c("기" = "#d6dce5", "승" = "#adb9ca", '전-1' = '#d6dce5', '전-2' = '#adb9ca')) + labs(x = "일자", y = '평균 배달 매출', title = '일별 평균 배달 매출', fill = '코로나 시기') + scale_x_discrete(breaks = NULL) + theme(legend.position = 'bottom', legend.box = 'vertical') + theme(panel.background = element_rect(fill = "white" , color = "black"), plot.title = element_text(face = "bold", hjust = 0.5, size =20), text = element_text(face = "bold",hjust = 0.5,size =15), plot.margin = margin(30,40,30,40)) grid.arrange(p1, p2, ncol = 1) display_png(file="delivery_increase.JPG") delivery$bad = ifelse(delivery$DLVR_STORE_SIDO %in% c('경기도', '경상북도', '대구광역시', '서울'), 1, 0) delivery %>% group_by(PROCESS_DT, bad) %>% summarize(N = n()) %>% ungroup() %>% ggplot(aes(x = PROCESS_DT, y = N)) + geom_line(aes(group = as.factor(bad), color = as.factor(bad)), size = 0.6) + geom_smooth(aes(group = as.factor(bad), color = as.factor(bad)), method = 'lm', se = FALSE, formula = y~x, linetype = 'longdash') + geom_rect(data = index, aes(NULL, NULL, fill = period, xmin = start, xmax = end), ymin = 0, ymax = 30000, alpha=0.5) + scale_fill_manual(values=c("기" = "#d6dce5", "승" = "#adb9ca", '전-1' = '#d6dce5', '전-2' = '#adb9ca')) + scale_linetype_discrete('코로나 심각 수준(지역)', labels = c('완만', '심각'), aes(Color = as.factor(bad))) + scale_color_manual(labels = c('완만', '심각'),values=c( "#1E3269","#c00000")) + scale_x_discrete(breaks = NULL) + labs(x = '일자', color = '코로나 심각 수준(지역)', y = '배달 건수', fill = '코로나 시기') + ggtitle('코로나 심각지역 vs 완만지역 일별 배달건수') + theme(legend.position = 'bottom', legend.box = 'vertical') + theme(panel.background = element_rect(fill = "white" , color = "black"), plot.title = element_text(face = "bold", hjust = 0.5, size =20), text = element_text(face = "bold",hjust = 0.5,size =15), plot.margin = margin(30,40,30,40)) month_index <- data.frame('period' = c('기', '승', '전-1'), 'start' = c('01', '02', '03'), 'end' = c('02', '03', '05')) p1 <- delivery %>% filter(DLVR_REQUST_STTUS_VALUE == 1) %>% mutate('SELL_AMOUNT' = GOODS_AMOUNT - DLVR_AMOUNT - CALL_RLAY_FEE_AMOUNT) %>% group_by(month, DLVR_STORE_INDUTY_NM) %>% summarise(MEAN = mean(SELL_AMOUNT)) %>% ungroup() %>% filter(month != "06") %>% mutate('NEW_cate' = ifelse(DLVR_STORE_INDUTY_NM %in% c('심부름', '도시락') , 1, 0)) %>% ggplot(aes(x = month, y = MEAN)) + geom_smooth(aes(group = DLVR_STORE_INDUTY_NM, color= DLVR_STORE_INDUTY_NM, linetype = as.factor(NEW_cate), size = as.factor(NEW_cate)), method = 'lm', se = FALSE, formula = y~x) + geom_rect(data = month_index, aes(NULL, NULL, fill = period, xmin = start, xmax = end), ymin = 0, ymax = 33000, alpha=0.5) + scale_fill_manual(values=c("기" = "#d6dce5", "승" = "#adb9ca", '전-1' = '#d6dce5')) + scale_linetype_manual(values=c("dashed", "solid"),labels = c('그외', '심부름&도시락'))+ scale_size_manual(values = c(1, 1.1), guide = 'none') + guides(color = FALSE) + labs(x = '월', y = '평균 배달 매출', linetype = '매출 성장 추세', fill = '코로나 시기', color = '배달품목') + ggtitle('배달품목별 월별 매출 변화 추세') + theme(panel.background = element_rect(fill = "white" , color = "black"), plot.title = element_text(face = "bold", hjust = 0.5, size =20), text = element_text(face = "bold",hjust = 0.5,size =15), plot.margin = margin(30,40,30,40)) p2 <- delivery %>% filter(DLVR_REQUST_STTUS_VALUE == 1) %>% mutate('SELL_AMOUNT' = GOODS_AMOUNT - DLVR_AMOUNT - CALL_RLAY_FEE_AMOUNT) %>% group_by(month, DLVR_STORE_INDUTY_NM) %>% summarise(MEAN = mean(SELL_AMOUNT)) %>% ungroup() %>% filter(month != "06") %>% filter(DLVR_STORE_INDUTY_NM %in% c('심부름', '도시락')) %>% ggplot(aes(x = month, y = MEAN)) + geom_line(aes(group = DLVR_STORE_INDUTY_NM, color= DLVR_STORE_INDUTY_NM)) + geom_point(aes(group = DLVR_STORE_INDUTY_NM, color= DLVR_STORE_INDUTY_NM) ,size = 1) + geom_smooth(aes(group = DLVR_STORE_INDUTY_NM, color= DLVR_STORE_INDUTY_NM), method = 'lm', se = FALSE, formula = y~x, linetype = 'longdash') + geom_rect(data = month_index, aes(NULL, NULL, fill = period, xmin = start, xmax = end), ymin = 0, ymax = 30000, alpha=0.5) + scale_fill_manual(values=c("기" = "#d6dce5", "승" = "#adb9ca", '전-1' = '#d6dce5')) + scale_color_manual(values=c( "#1E3269","#c00000")) + labs(x = '월', y = '평균 배달 매출', fill = '코로나 시기', color = '배달품목') + ggtitle('배달품목별 월별 매출 변화 추세') + theme(panel.background = element_rect(fill = "white" , color = "black"), plot.title = element_text(face = "bold", hjust = 0.5, size =20), text = element_text(face = "bold",hjust = 0.5,size =15), plot.margin = margin(30,40,30,40)) grid.arrange(p1, p2, ncol = 1) delivery = delivery %>% filter(DLVR_REQUST_STTUS_VALUE == 1) %>% mutate('SELL_AMOUNT' = GOODS_AMOUNT - DLVR_AMOUNT - CALL_RLAY_FEE_AMOUNT) %>% mutate('NEW_cate' = ifelse(DLVR_STORE_INDUTY_NM %in% c('심부름', '도시락') , 1, 0)) p1 <- delivery %>% group_by(month, NEW_cate) %>% summarise(MEAN = mean(SELL_AMOUNT)) %>% ungroup() %>% filter(month != "06") %>% ggplot(aes(x = month, y = MEAN)) + geom_line(aes(group = as.factor(NEW_cate), color = as.factor(NEW_cate))) + geom_point(aes(group = as.factor(NEW_cate), color = as.factor(NEW_cate)), size = 1) + geom_smooth(aes(group = as.factor(NEW_cate), color = as.factor(NEW_cate)), method = 'lm', se = FALSE, formula = y~x, linetype = 'longdash') + geom_rect(data = month_index, aes(NULL, NULL, fill = period, xmin = start, xmax = end), ymin = 0, ymax = 33000, alpha=0.5) + scale_fill_manual(values=c("기" = "#d6dce5", "승" = "#adb9ca", '전-1' = '#d6dce5')) + scale_color_manual(labels = c('그외', '심부름&도시락'), values=c( "#1E3269","#c00000")) + labs(x = '월', color = '배달품목', y = '월별 평균 배달 매출', fill = '코로나 시기') + ggtitle('배달품목별 월별 매출 변화 추세') + theme(panel.background = element_rect(fill = "white" , color = "black"), plot.title = element_text(face = "bold", hjust = 0.5, size =20), text = element_text(face = "bold",hjust = 0.5,size =15), plot.margin = margin(30,40,30,40)) p2 <- delivery %>% group_by(month, NEW_cate) %>% summarise(MEAN = mean(n())) %>% ungroup() %>% filter(month != "06") %>% ggplot(aes(x = month, y = MEAN)) + geom_line(aes(group = as.factor(NEW_cate), color = as.factor(NEW_cate))) + geom_point(aes(group = as.factor(NEW_cate), color = as.factor(NEW_cate)), size = 1) + geom_smooth(aes(group = as.factor(NEW_cate), color = as.factor(NEW_cate)), method = 'lm', se = FALSE, formula = y~x, linetype = 'longdash') + geom_rect(data = month_index, aes(NULL, NULL, fill = period, xmin = start, xmax = end), ymin = -1000000, ymax = 1200000, alpha=0.5) + scale_fill_manual(values=c("기" = "#d6dce5", "승" = "#adb9ca", '전-1' = '#d6dce5')) + scale_color_manual(labels = c('그외', '심부름&도시락'), values=c( "#1E3269","#c00000")) + labs(x = '월', color = '배달품목', y = '월별 배달 건수', fill = '코로나 시기') + ggtitle('배달품목별 월별 건수 변화 추세') + theme(panel.background = element_rect(fill = "white" , color = "black"), plot.title = element_text(face = "bold", hjust = 0.5, size =20), text = element_text(face = "bold",hjust = 0.5,size =15), plot.margin = margin(30,40,30,40)) grid.arrange(p1, p2, ncol = 1) delivery %>% group_by(month, NEW_cate) %>% summarise(MEAN = mean(n())) %>% ungroup() %>% filter(month != "06") %>% filter(NEW_cate == 1) %>% ggplot(aes(x = month, y = MEAN)) + geom_line(group = 1, color = "#c00000") + geom_point(group = 1, color = "#c00000", size = 1) + geom_smooth(aes(group = 1), color = "#c00000", method = 'lm', se = FALSE, formula = y ~ x + I(x^2), linetype = 'longdash') + geom_rect(data = month_index, aes(NULL, NULL, fill = period, xmin = start, xmax = end), ymin = 0, ymax = 15000, alpha=0.5) + scale_fill_manual(values=c("기" = "#d6dce5", "승" = "#adb9ca", '전-1' = '#d6dce5')) + labs(x = '월', y = '월별 배달 건수', fill = '코로나 시기') + ggtitle('도시락&심부름 : 월별 건수 변화 및 추세') + theme(legend.position = 'bottom') + theme(panel.background = element_rect(fill = "white" , color = "black"), plot.title = element_text(face = "bold", hjust = 0.5, size =20), text = element_text(face = "bold",hjust = 0.5,size =15), plot.margin = margin(30,30,30,30)) display_png(file="delivery_2.JPG") delivery %>% group_by(month, NEW_cate, bad) %>% summarise(MEAN = mean(SELL_AMOUNT)) %>% ungroup() %>% filter(month != '06') %>% ggplot(aes(y = MEAN, x = as.factor(bad), color = as.factor(NEW_cate))) + stat_boxplot(alpha = 0.4, geom = 'errorbar') + stat_summary(aes(fill = as.factor(NEW_cate)), fun = mean, size= 5, geom="point", shape=20, position = position_dodge(width = .75)) + geom_boxplot(alpha = 0.4) + scale_color_manual(labels = c('그외', '심부름,도시락'), values=c("#c00000", "#1E3269")) + scale_fill_discrete(guide = 'none') + scale_x_discrete(labels = c('완만지역', '심각지역')) + labs(x = '코로나 심각 수준', color = '배달품목', y = '배달 매출') + ggtitle('코로나 심각에 따른 심부름&도시락 vs 그외품목 매출 비교') + theme_classic() + theme(legend.position = "bottom", title = element_text(size = 15, face = 'bold'), text = element_text(face = "bold",hjust = 0.5,size =15), plot.margin = margin(45,45,45,45)) #전처리 index <- fread('index.csv', data.table = FALSE, encoding = 'UTF-8') index %<>% group_by(period,catm,age,gender) %>% mutate(cgi_mean = mean(cgi)) %>% select(period,catm,age,gender,cgi_mean) %>% ungroup() %>% unique() index <- index[index$period == 202003 | index$period == 202005,] a <- c(rep(0,594)) month03 <- filter(index,index$period == 202003) month05 <- filter(index,index$period == 202005) change <- plyr::join(month03, month05, by = c("catm","age","gender")) colnames(change) <- c("period","catm","age","gender","mean1","period2","mean2") change %<>% mutate(change = (mean2-mean1)/mean1) %>% na.omit() %>% select(catm,age,gender,change,period2) change%<>% spread(key='catm', value='change') %>% select(-period2) change #scaling 하기 change_factor = change %>% select(-age,-gender) %>% na.omit change_factor = scale(change_factor) %>% na.omit %>% as.tibble() #scree plot을 통해 factor 수 정하기 par(mar = c(12,5,12,5)) plot(prcomp(change_factor),type="l",sub = 'Scree Plot' , col = "#1E3269") #시각화 change_fa <- factanal(change_factor,factors=2,rotation = "varimax",scores = "regression") par(mar = c(6,5,6,5)) plot(change_fa$scores, col = "#1E3269",pch = 20, fg = "#423144",col.axis ='#423144') text(change_fa$scores[,1], change_fa$scores[,2], labels = change$age, cex = 0.8, pos = 2, col = "#1E3269") points(change_fa$loadings, pch= 20, col = "#c00000") text(change_fa$loadings[,1], change_fa$loadings[,2],labels = rownames(change_fa$loadings), cex = 0.5, pos = 4, col = "#c00000") index <- fread('index.csv', stringsAsFactors=FALSE,data.table=FALSE, encoding = "UTF-8") index %<>% mutate(year = ifelse(period>202000,2020,2019)) index %<>% group_by(period,catm) %>% mutate(n=mean(cgi)) %>% filter(period < 201906 | period >= 202001 ) index %<>% select(period, catm, n, year) %>% unique() index %<>% mutate(month = substr(as.character(period),5,6)) a <- index %>% filter(catm == "화장품")%>% ggplot(aes(x = month, y = n, group = as.character(year) ,color =as.character(year))) +geom_line(size = 0.4, linetype="longdash")+labs(color = "Year\n")+ ggtitle("화장품 : 2019/2020 cgi 변화율 추이") + scale_color_manual(values=c("#c00000", "#1E3269"))+ylab("cgi") +geom_point(size=2)+ theme(legend.position="right", panel.background = element_rect(fill = "white" , color = "black"),plot.background = element_rect(fill = "white", size = 0.5, linetype = "solid"),plot.title = element_text(face = "bold",hjust = 0,size =15)) b <- index %>% filter(catm == "건강관리용품")%>% ggplot(aes(x = month, y = n, group = as.character(year) ,color =as.character(year))) +geom_line(size = 0.4, linetype="longdash")+labs(color = "Year\n")+ ggtitle("건강관리용품 : 2019/2020 cgi 변화율 추이") +geom_point(size=2)+ scale_color_manual(values=c("#c00000", "#1E3269"))+ylab("cgi") + theme(legend.position="right", panel.background = element_rect(fill = "white" , color = "black"),plot.background = element_rect(fill = "white", size = 0.5, linetype = "solid"),plot.title = element_text(face = "bold",hjust = 0,size =15)) c <- index %>% filter(catm == "애완동물용품")%>% ggplot(aes(x = month, y = n, group = as.character(year) ,color =as.character(year))) +geom_line(size = 0.4, linetype="longdash")+labs(color = "Year\n")+ ggtitle("애완동물용품 : 2019/2020 cgi 변화율 추이") + geom_point(size=2)+ scale_color_manual(values=c("#c00000", "#1E3269"))+ylab("cgi") + theme(legend.position="right", panel.background = element_rect(fill = "white" , color = "black"),plot.background = element_rect(fill = "white", size = 0.5, linetype = "solid"),plot.title = element_text(face = "bold",hjust = 0,size =15)) grid.arrange(a,b,c) cosmetic <- fread('Amore Pacific.csv', data.table = FALSE, encoding = 'UTF-8') cosmetic <- cosmetic[c(3,26,27,28),c(1,2,3,4,6,7,8,9,10)] cosmetic 카테고리 <- c("오프라인","오프라인","오프라인","오프라인","오프라인","오프라인","오프라인","오프라인", "온라인","온라인","온라인","온라인","온라인","온라인","온라인","온라인", "면세점","면세점","면세점","면세점","면세점","면세점","면세점","면세점") 비율<- c(13.2,46.7,40.9,40.1,38.2,29.7,20.4,27.4, 12.9,13.2,11.5,13.4,15.5,14.2,19.8,30.7, 40.9,40.1,47.5,46.5,46.2,56.1,59.8,41.8) 분기 <- c("18-1","18-2","18-3","19-1","19-2","19-3","19-4","20-1","18-1","18-2","18-3","19-1","19-2","19-3","19-4","20-1","18-1","18-2","18-3","19-1","19-2","19-3","19-4","20-1") cosmetic<- data.frame(cbind(카테고리,비율,분기)) a<- ggplot(cosmetic, aes(x=분기, y = 비율, group = 카테고리,color = 카테고리))+ geom_line(size = 0.5, linetype="longdash") + scale_color_manual(values=c('#FFBE0A','#1E3269','#CD1F48'))+ ggtitle("아모레퍼시픽 분기 매출비율 추이") + ylab("%") + geom_point(size=2)+ scale_y_discrete(breaks = c(11.5,15.5,30.7,46.2,59.8))+ theme(legend.position="top", panel.background = element_rect(fill = "white" , color = "black"),plot.background = element_rect(fill = "white", size = 0.5, linetype = "solid"),plot.title = element_text(face = "bold",hjust = 0.5,size =15), text = element_text(face = "bold",hjust = 0.5,size =15)) + geom_vline(xintercept="19-4",linetype= "longdash", color='grey', size=0.4) b<- ggplot(cosmetic, aes(x=분기, y = 비율, group = 카테고리,color = 카테고리)) +geom_line(size = 0.5, linetype="longdash") + scale_color_manual(values=c('#FFBE0A','#1E3269','#CD1F48'))+ ylab("%") + geom_point(size=2)+ coord_fixed(ratio = 0.33) + scale_y_discrete(breaks = c(11.5,15.5,30.7,46.2,59.8))+ theme(legend.position="", panel.background = element_rect(fill = "white" , color = "black"),plot.background = element_rect(fill = "white", size = 0.5, linetype = "solid"),plot.title = element_text(face = "bold",hjust = 0,size =15), text = element_text(face = "bold",hjust = 0.5,size =15)) + facet_wrap(~카테고리, ncol=3)+ theme(axis.text.x=element_blank())+ geom_vline(xintercept="19-4",linetype= "longdash", color='grey', size=0.4) grid.arrange(a,b, ncol=1) card <- fread('card.csv', header = T, stringsAsFactors = F, data.table = F, encoding = 'UTF-8') ## 한글 없애기 ## data <- card %>% filter(! (selng_cascnt %in% grep('[ㄱ-힣]',unique(card$selng_cascnt), value = T)), ! (salamt %in% grep('[ㄱ-힣]',unique(card$salamt), value = T))) %>% mutate(selng_cascnt = as.numeric(selng_cascnt), salamt = as.numeric(salamt)) %>% select(- c(adstrd_code, mrhst_induty_cl_code)) rm(list = c('card')) data$receipt_dttm=data$receipt_dttm %>% as.character() %>% as.Date('%Y%m%d') ## 음수 값 확인 - 양수만 넣기## data$selng_cascnt %>% summary() data$salamt %>% summary() data = data %>% filter(selng_cascnt > 0, salamt > 0) %>% mutate(receipt_dttm = ymd(receipt_dttm), week = week(receipt_dttm)) data %>% glimpse() #코로나 시기를 새로운 period변수로 나타내 줍니다. index1 = which(data$receipt_dttm == '2020-02-22') %>% max() #기 index2 = which(data$receipt_dttm == '2020-03-08') %>% max() #승 index3 = which(data$receipt_dttm == '2020-05-06') %>% max() #전-1 index4 = nrow(data) #전-2 data_period = data data_period$period = c(rep(1, index1), rep(2, index2 - index1), rep(3, index3 - index2), rep(4, index4 - index3)) ##이상치 및 결측치 처리## data_period %>% is.na() %>% colSums() mean_amount=data_period %>% group_by(mrhst_induty_cl_nm) %>% summarise(N_amount=mean(selng_cascnt)) %>% arrange(N_amount) mean_amount %>% ggplot(aes(x=1, y=N_amount))+ geom_violin( color = "#1E3269",size=0.3)+theme_bw() +theme(plot.margin = margin(60,60,60,60)) categories_new=mean_amount %>% filter(N_amount>=quantile(mean_amount$N_amount)[2]) %>% arrange(desc(N_amount)) %>% select(mrhst_induty_cl_nm)%>% ungroup() categories_new <- as.data.frame(categories_new) data_period <- data_period %>% filter(mrhst_induty_cl_nm%in% as.matrix(categories_new,nrow = 1)) data_amount_period <- data_period %>% group_by(period, mrhst_induty_cl_nm) %>% summarise(mean_amount = mean(selng_cascnt)) %>% ungroup() %>% spread(period, value = mean_amount) data_selling_period <- data_period %>% group_by(period, mrhst_induty_cl_nm) %>% summarise(mean_selling = mean(salamt)) %>% ungroup() %>% spread(period, value = mean_selling) data_price_period <- data_period %>% group_by(period,mrhst_induty_cl_nm) %>% summarize(once_price=sum(salamt)/sum(selng_cascnt)) %>% ungroup() %>% spread(period,value = once_price) colnames(data_amount_period)[-1] = c('amount_1', 'amount_2', 'amount_3', 'amount_4') colnames(data_selling_period)[-1] = c('selling_1', 'selling_2', 'selling_3', 'selling_4') colnames(data_price_period)[-1] = c('price_1','price_2','price_3','price_4') data_clust = data_period %>% group_by(mrhst_induty_cl_nm) %>% summarise(MEAN_SELLING = mean(salamt), MEAN_AMOUNT = mean(selng_cascnt), once_price = sum(salamt)/sum(selng_cascnt) ) %>% ungroup %>% left_join(data_amount_period) %>% left_join(data_selling_period) %>% left_join(data_price_period) #클러스터링하려면 numeric 변수만 필요합니다. 각 카테고리의 이름을 제거해 줍니다. clust1 = data_clust %>% select(-c(mrhst_induty_cl_nm)) clust_scaled = scale(clust1) %>% as_tibble() set.seed("19990107") kmeans1 <- kmeans(clust_scaled, nstart = 10, iter.max = 15, centers = 4) a<-fviz_nbclust(x = clust_scaled, FUNcluster = kmeans, method='wss') + geom_vline(xintercept = 4, linetype = 2) b<-fviz_nbclust(x = clust_scaled, FUNcluster = kmeans, method = "silhouette") + geom_vline(xintercept = 4, linetype = 2) grid.arrange(a,b) data_clust$cluster = kmeans1$cluster fviz_cluster(kmeans1, clust_scaled)+ theme_bw()+theme( legend.background = element_rect(color = 'black', size = 0.5),plot.margin=margin(50,10,50,10)) final_test_k = data_period %>% left_join(data_clust) %>% group_by(period, cluster) %>% summarise(mean_amount = mean(selng_cascnt), mean_selling = mean(salamt), once_price = sum(salamt)/sum(selng_cascnt) ) %>% ungroup() %>% select(c(period, cluster, mean_amount, mean_selling,once_price)) p1 <- final_test_k %>% ggplot(aes(x = as.factor(period), y = mean_amount, group = as.factor(cluster), color = as.factor(cluster))) + geom_line(size=0.3,linetype="longdash")+geom_point(size=1)+theme_bw()+theme( axis.title = element_text(face = "bold",size =11), title = element_text(face="bold",size=13), legend.position = "none", text = element_text(face = "bold")) + scale_color_manual(values=c('#CD1F48','#FFBE0A',"#006400",'#1E3269'))+labs( color = "clusters",y="평균 판매량",x="",title="시기별 평균 판매량")+ scale_x_discrete(labels = c("기","승","전-1","전-2")) p1_2 <- final_test_k %>% ggplot(aes(x = as.factor(period), y = mean_amount, group = as.factor(cluster), color = as.factor(cluster))) + geom_line(size=0.3)+theme_bw()+theme( axis.title = element_text(face = "bold",size =11), title = element_text(face="bold",size=13,hjust = 0.5),legend.background = element_rect(color = 'black', size = 0.5), legend.title = element_text(face="plain",size=10), legend.text = element_text(size=8)) + scale_color_manual(values=c('#CD1F48','#FFBE0A',"#006400",'#1E3269'))+labs( color = "clusters",y="평균 판매량",x="")+ scale_x_discrete(labels = c("기","승","전-1","전-2"))+facet_wrap(~cluster,scales="free")+theme(axis.text.x=element_blank(), axis.text.y=element_blank())+labs(y="",x="") p2 <- final_test_k %>% ggplot(aes(x = as.factor(period), y = mean_selling, group = as.factor(cluster), color = as.factor(cluster))) + geom_line(size=0.3,linetype="longdash")+geom_point(size=1)+theme_bw()+theme( axis.title = element_text(face = "bold",size =11), title = element_text(face="bold",size=13),legend.position = "none",text = element_text(face = "bold")) + scale_color_manual(values=c('#CD1F48','#FFBE0A',"#006400",'#1E3269'))+labs( color = "clusters",y="평균 매출",x="",title="시기별 평균 매출")+ scale_x_discrete(labels = c("기","승","전-1","전-2")) p2_2<-final_test_k %>% ggplot(aes(x = as.factor(period), y = mean_selling, group = as.factor(cluster), color = as.factor(cluster))) + geom_line(size=0.3)+theme_bw()+theme( axis.title = element_text(face = "bold",size =11), title = element_text(face="bold",size=13,hjust = 0.5), legend.background = element_rect(color = 'black', size = 0.5), legend.title = element_text(face="plain",size=10), legend.text = element_text(size=8)) + scale_color_manual(values=c('#CD1F48','#FFBE0A',"#006400",'#1E3269'))+labs( color = "clusters",y="평균 매출",x="")+ scale_x_discrete(labels = c("기","승","전-1","전-2"))+facet_wrap(~cluster,scales="free")+theme(axis.text.x=element_blank(), axis.text.y=element_blank())+labs(y="",x="") p3 <- final_test_k %>% ggplot(aes(x = as.factor(period), y = once_price, group = as.factor(cluster), color = as.factor(cluster))) + geom_line(size=0.3,linetype="longdash")+geom_point(size=1)+theme_bw()+theme( axis.title = element_text(face = "bold",size =11), title = element_text(face="bold",size=13),legend.position = "none",text = element_text(face = "bold")) + scale_color_manual(values=c('#CD1F48','#FFBE0A',"#006400",'#1E3269'))+labs( color = "clusters",y="평균 가격",x="",title="시기별 평균 가격")+ scale_x_discrete(labels = c("기","승","전-1","전-2")) p3_2<-final_test_k %>% ggplot(aes(x = as.factor(period), y = mean_selling, group = as.factor(cluster), color = as.factor(cluster))) + geom_line(size=0.3)+theme_bw()+theme( axis.title = element_text(face = "bold",size =11), title = element_text(face="bold",size=13,hjust = 0.5), legend.background = element_rect(color = 'black', size = 0.5), legend.title = element_text(face="plain",size=10), legend.text = element_text(size=8) ) + scale_color_manual(values=c('#CD1F48','#FFBE0A',"#006400",'#1E3269'))+labs( color = "clusters",y="평균 가격",x="")+ scale_x_discrete(labels = c("기","승","전-1","전-2"))+facet_wrap(~cluster,scales="free")+theme(axis.text.x=element_blank(), axis.text.y=element_blank())+labs(y="",x="") gridExtra::grid.arrange(p1,p1_2,p2,p2_2,p3,p3_2, ncol = 2, nrow = 3) data_period %>% left_join(data_clust) %>% filter(cluster == 1) %>% select(mrhst_induty_cl_nm) %>% unique() %>% matrix(nrow=1) data_period %>% left_join(data_clust) %>% filter(cluster == 2) %>% select(mrhst_induty_cl_nm) %>% unique() %>% matrix(nrow=1) data_period %>% left_join(data_clust) %>% filter(cluster == 3) %>% select(mrhst_induty_cl_nm) %>% unique() %>% matrix(nrow=1) data_period %>% left_join(data_clust) %>% filter(cluster == 4) %>% select(mrhst_induty_cl_nm) %>% unique() %>% matrix(nrow=1) ##클러스터1## display_png(file="C:/Users/qtuej/Desktop/피샛/공모전/KT_data_20200703/2차본/클러스터1.png") ##클러스터2## display_png(file="C:/Users/qtuej/Desktop/피샛/공모전/KT_data_20200703/2차본/클러스터2.png") ##클러스터3## display_png(file="C:/Users/qtuej/Desktop/피샛/공모전/KT_data_20200703/2차본/클러스터3.png",width=700,height=350) display_png(file="C:/Users/qtuej/Desktop/피샛/공모전/KT_data_20200703/2차본/클러스터3_2.png",width=700,height=350) ##클러스터4## display_png(file="C:/Users/qtuej/Desktop/피샛/공모전/KT_data_20200703/2차본/클러스터4.png",width=700,height=350) display_png(file="C:/Users/qtuej/Desktop/피샛/공모전/KT_data_20200703/2차본/클러스터4_1.png",width=700,height=350) display_png(file="C:/Users/qtuej/Desktop/피샛/공모전/KT_data_20200703/2차본/클러스터4_2.png",width=700,height=350) ###################### ## CARD DATA 전처리 ## ###################### rm(list = ls()) card <- fread('card.csv', header = TRUE, stringsAsFactors = FALSE, data.table = FALSE, encoding = 'UTF-8' ) card <- card %>% filter(! (selng_cascnt %in% grep('[ㄱ-힣]',unique(card$selng_cascnt), value = T)), ! (salamt %in% grep('[ㄱ-힣]',unique(card$salamt), value = T))) %>% mutate(selng_cascnt = as.numeric(selng_cascnt), salamt = as.numeric(salamt), receipt_dttm = ymd(receipt_dttm)) %>% select(-c(adstrd_code, mrhst_induty_cl_code)) card$selng_cascnt = ifelse(card$selng_cascnt < 0, 0, card$selng_cascnt) card$salamt = ifelse(card$salamt < 0, 0, card$salamt) ######################################### ## CARD 품목별 NA(결제 안된 일수) 확인 ## ######################################### card_amount = card %>% group_by(receipt_dttm, mrhst_induty_cl_nm) %>% summarize(mean_amount = mean(selng_cascnt)) %>% ungroup() %>% spread(key = mrhst_induty_cl_nm, value = mean_amount) data_missing = card_amount[,-1] %>% is.na() %>% colSums() %>% as_tibble() data_missing = cbind(colnames(card_amount)[-1], data_missing) colnames(data_missing) = c('name', 'num_missing') data_missing %>% ggplot(aes(x = reorder(name, -num_missing), y = num_missing)) + geom_col(aes(fill = num_missing), alpha = 0.5) + scale_fill_gradient('결제내역 없는 비중', low = "#1E3269", high = "red") + scale_x_discrete(breaks = NULL) + labs(x = '카드 결제 품목', y = '결제 없는 일수') + ggtitle('카드 결제 품목 별 전체 기간(155일) 중 결제 없는 비율') + theme_classic() + theme(title = element_text(size = 15, face = 'bold')) ## NA 비중 큰 품목 제거 및 시계열 데이터 화 ## card_amount = card_amount[colSums(is.na(card_amount)) < 10] card_amount[is.na(card_amount)] <- 0 card_day <- card_amount[,-1] %>% ts(freq = 365.25) ## Time Series Plot 확인 ## par(mfrow = c(1,2)) tfplot(card_day[,1:3], graphs.per.page = 3, Title = "Time Series Plot",col = c("#1E3269")) tfplot(card_day[,4:6], graphs.per.page = 3, Title = "Time Series Plot",col = c("#1E3269")) ## Eigen value를 통한 Factor 개수 결정 ## DX <- diff(card_day, lag = 1) xx <- eigen(cor(diff(card_day, lag = 1)), symmetric = TRUE)[["values"]] data.frame('eigen_value' = xx, 'index' = 1:153) %>% ggplot(aes(x = index, y = xx)) + geom_line(linetype = 'dashed', color = 'black', size = 0.5) + geom_point(col = '#CD1F48', size = 1.2) + labs(x = 'Eigen Value Number', y = 'Eigen Value') + ggtitle('Scree Plot by Eigen Value') + theme_classic() + theme(title = element_text(size = 15, face = 'bold'),plot.margin = margin(50,50,50,50)) card_fa <- estTSF.ML(card_day, 9, rotation = "quartimin", normalize = TRUE) data.frame(factors(card_fa), 'time' = 1:155) %>% gather(key = 'Factor', value = 'value', -time) %>% ggplot(aes(x = time, y = value, color = Factor)) + geom_line() + labs(x = 'Time', y = 'Value') + ggtitle('Factors Time Series plot') + theme_classic() + theme(title = element_text(size = 15, face = 'bold'),plot.margin =margin(30,30,30,30)) + scale_x_discrete(breaks = NULL) + scale_y_discrete(breaks = NULL) tfplot(factors(card_fa), Title = "Factors from model", lty = c("solid"), col = c("#1E3269"), xlab = c(""), ylab = c("Factor1", "Factor2", "Factor3", "Factor4", "Factor5", "Factor6", "Factor7", "Factor8", "Factor9"), par = list(mar = c(2.1, 4.1, 1.1, 0.5)), reset.screen = TRUE) data.frame(factors(card_fa), 'time' = 1:155) %>% gather(key = 'Factor', value = 'value', -time) %>% filter(Factor %in% c('Factor.6', 'Factor.7')) %>% ggplot(aes(x = time, y = value, color = Factor)) + geom_line() + geom_smooth(method = 'loess', formula = y~x, se = FALSE, linetype = 'longdash') + labs(x = 'Time', y = 'Value') + ggtitle('Factor 6 & 7 Time Series plot') + theme_classic() + theme(title = element_text(size = 15, face = 'bold'),plot.margin = margin(50,50,50,50)) + scale_x_discrete(breaks = NULL) + scale_y_discrete(breaks = NULL)+scale_color_manual(values=c( "#1E3269","#c00000"),labels =c('Factor6', 'Factor7')) factor_6 = card_fa$loadings[,6][which(card_fa$loadings[,6] > 100)] %>% data.frame() factor_6 = data.frame('name' = row.names(factor_6), 'value' = factor_6[,1]) p1 <- factor_6 %>% ggplot(aes(x = reorder(name, value), y = value)) + geom_col(aes(fill = value), alpha = 0.4,) + scale_fill_gradient('영향을 끼친 정도', low = "#1E3269", high = "red") + labs(x = '카드 결제 품목', y = '영향을 끼친 정도') + ggtitle('Factor6에 영향을 끼친 카드 품목별 정도') + theme_classic() + theme(title = element_text(size = 15, face = 'bold')) + coord_flip() factor_7 = card_fa$loadings[,7][which(card_fa$loadings[,7] > 100)] %>% data.frame() factor_7 = data.frame('name' = row.names(factor_7), 'value' = factor_7[,1]) p2 <- factor_7 %>% ggplot(aes(x = reorder(name, value), y = value)) + geom_col(aes(fill = value), alpha = 0.4,) + scale_fill_gradient('영향을 끼친 정도', low = "#1E3269", high = "red") + labs(x = '카드 결제 품목', y = '영향을 끼친 정도') + ggtitle('Factor7에 영향을 끼친 카드 품목별 정도') + theme_classic() + theme(title = element_text(size = 15, face = 'bold')) + coord_flip() grid.arrange(p1, p2, ncol = 1) display_png(file="insight.JPG") display_png(file="last.JPG")
testlist <- list(a = 0L, b = 0L, x = c(-1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1124073473L, -196609L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -14680052L, 2293759L, -10682368L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L)) result <- do.call(grattan:::anyOutside,testlist) str(result)
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610131575-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
493
r
testlist <- list(a = 0L, b = 0L, x = c(-1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1124073473L, -196609L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, -14680052L, 2293759L, -10682368L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L)) result <- do.call(grattan:::anyOutside,testlist) str(result)
# Install and load packages package_names <- c("survey","dplyr","foreign","devtools") lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x)) lapply(package_names, require, character.only=T) install_github("e-mitchell/meps_r_pkg/MEPS") library(MEPS) options(survey.lonely.psu="adjust") # Load FYC file FYC <- read.xport('C:/MEPS/.FYC..ssp'); year <- .year. if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.) if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.) if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X) FYC <- FYC %>% mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>% mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X)) FYC$ind = 1 # Education if(year <= 1998){ FYC <- FYC %>% mutate(EDUCYR = EDUCYR.yy.) }else if(year <= 2004){ FYC <- FYC %>% mutate(EDUCYR = EDUCYEAR) } if(year >= 2012 & year < 2016){ FYC <- FYC %>% mutate( less_than_hs = (0 <= EDRECODE & EDRECODE < 13), high_school = (EDRECODE == 13), some_college = (EDRECODE > 13)) }else{ FYC <- FYC %>% mutate( less_than_hs = (0 <= EDUCYR & EDUCYR < 12), high_school = (EDUCYR == 12), some_college = (EDUCYR > 12)) } FYC <- FYC %>% mutate( education = 1*less_than_hs + 2*high_school + 3*some_college, education = replace(education, AGELAST < 18, 9), education = recode_factor(education, .default = "Missing", .missing = "Missing", "1" = "Less than high school", "2" = "High school", "3" = "Some college", "9" = "Inapplicable (age < 18)", "0" = "Missing")) # Marital status if(year == 1996){ FYC <- FYC %>% mutate(MARRY42X = ifelse(MARRY2X <= 6, MARRY2X, MARRY2X-6), MARRY31X = ifelse(MARRY1X <= 6, MARRY1X, MARRY1X-6)) } FYC <- FYC %>% mutate_at(vars(starts_with("MARRY")), funs(replace(., .< 0, NA))) %>% mutate(married = coalesce(MARRY.yy.X, MARRY42X, MARRY31X)) %>% mutate(married = recode_factor(married, .default = "Missing", .missing = "Missing", "1" = "Married", "2" = "Widowed", "3" = "Divorced", "4" = "Separated", "5" = "Never married", "6" = "Inapplicable (age < 16)")) # Keep only needed variables from FYC FYCsub <- FYC %>% select(married,education,ind, DUPERSID, PERWT.yy.F, VARSTR, VARPSU) # Load event files RX <- read.xport('C:/MEPS/.RX..ssp') DVT <- read.xport('C:/MEPS/.DV..ssp') IPT <- read.xport('C:/MEPS/.IP..ssp') ERT <- read.xport('C:/MEPS/.ER..ssp') OPT <- read.xport('C:/MEPS/.OP..ssp') OBV <- read.xport('C:/MEPS/.OB..ssp') HHT <- read.xport('C:/MEPS/.HH..ssp') # Define sub-levels for office-based and outpatient # To compute estimates for these sub-events, replace 'event' with 'event_v2X' # in the 'svyby' statement below, when applicable OBV <- OBV %>% mutate(event_v2X = recode_factor( SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OBD', '2' = 'OBO')) OPT <- OPT %>% mutate(event_v2X = recode_factor( SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OPY', '2' = 'OPZ')) # Stack events stacked_events <- stack_events(RX, DVT, IPT, ERT, OPT, OBV, HHT, keep.vars = c('SEEDOC','event_v2X')) stacked_events <- stacked_events %>% mutate(event = data, PR.yy.X = PV.yy.X + TR.yy.X, OZ.yy.X = OF.yy.X + SL.yy.X + OT.yy.X + OR.yy.X + OU.yy.X + WC.yy.X + VA.yy.X) %>% select(DUPERSID, event, event_v2X, SEEDOC, XP.yy.X, SF.yy.X, MR.yy.X, MD.yy.X, PR.yy.X, OZ.yy.X) pers_events <- stacked_events %>% group_by(DUPERSID) %>% summarise(ANY = sum(XP.yy.X >= 0), EXP = sum(XP.yy.X > 0), SLF = sum(SF.yy.X > 0), MCR = sum(MR.yy.X > 0), MCD = sum(MD.yy.X > 0), PTR = sum(PR.yy.X > 0), OTZ = sum(OZ.yy.X > 0)) %>% ungroup n_events <- full_join(pers_events,FYCsub,by='DUPERSID') %>% mutate_at(vars(ANY,EXP,SLF,MCR,MCD,PTR,OTZ), function(x) ifelse(is.na(x),0,x)) nEVTdsgn <- svydesign( id = ~VARPSU, strata = ~VARSTR, weights = ~PERWT.yy.F, data = n_events, nest = TRUE) results <- svyby(~ANY, FUN=svymean, by = ~married + education, design = nEVTdsgn) print(results)
/mepstrends/hc_use/json/code/r/avgEVT__married__education__.r
permissive
RandomCriticalAnalysis/MEPS-summary-tables
R
false
false
4,335
r
# Install and load packages package_names <- c("survey","dplyr","foreign","devtools") lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x)) lapply(package_names, require, character.only=T) install_github("e-mitchell/meps_r_pkg/MEPS") library(MEPS) options(survey.lonely.psu="adjust") # Load FYC file FYC <- read.xport('C:/MEPS/.FYC..ssp'); year <- .year. if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.) if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.) if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X) FYC <- FYC %>% mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>% mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X)) FYC$ind = 1 # Education if(year <= 1998){ FYC <- FYC %>% mutate(EDUCYR = EDUCYR.yy.) }else if(year <= 2004){ FYC <- FYC %>% mutate(EDUCYR = EDUCYEAR) } if(year >= 2012 & year < 2016){ FYC <- FYC %>% mutate( less_than_hs = (0 <= EDRECODE & EDRECODE < 13), high_school = (EDRECODE == 13), some_college = (EDRECODE > 13)) }else{ FYC <- FYC %>% mutate( less_than_hs = (0 <= EDUCYR & EDUCYR < 12), high_school = (EDUCYR == 12), some_college = (EDUCYR > 12)) } FYC <- FYC %>% mutate( education = 1*less_than_hs + 2*high_school + 3*some_college, education = replace(education, AGELAST < 18, 9), education = recode_factor(education, .default = "Missing", .missing = "Missing", "1" = "Less than high school", "2" = "High school", "3" = "Some college", "9" = "Inapplicable (age < 18)", "0" = "Missing")) # Marital status if(year == 1996){ FYC <- FYC %>% mutate(MARRY42X = ifelse(MARRY2X <= 6, MARRY2X, MARRY2X-6), MARRY31X = ifelse(MARRY1X <= 6, MARRY1X, MARRY1X-6)) } FYC <- FYC %>% mutate_at(vars(starts_with("MARRY")), funs(replace(., .< 0, NA))) %>% mutate(married = coalesce(MARRY.yy.X, MARRY42X, MARRY31X)) %>% mutate(married = recode_factor(married, .default = "Missing", .missing = "Missing", "1" = "Married", "2" = "Widowed", "3" = "Divorced", "4" = "Separated", "5" = "Never married", "6" = "Inapplicable (age < 16)")) # Keep only needed variables from FYC FYCsub <- FYC %>% select(married,education,ind, DUPERSID, PERWT.yy.F, VARSTR, VARPSU) # Load event files RX <- read.xport('C:/MEPS/.RX..ssp') DVT <- read.xport('C:/MEPS/.DV..ssp') IPT <- read.xport('C:/MEPS/.IP..ssp') ERT <- read.xport('C:/MEPS/.ER..ssp') OPT <- read.xport('C:/MEPS/.OP..ssp') OBV <- read.xport('C:/MEPS/.OB..ssp') HHT <- read.xport('C:/MEPS/.HH..ssp') # Define sub-levels for office-based and outpatient # To compute estimates for these sub-events, replace 'event' with 'event_v2X' # in the 'svyby' statement below, when applicable OBV <- OBV %>% mutate(event_v2X = recode_factor( SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OBD', '2' = 'OBO')) OPT <- OPT %>% mutate(event_v2X = recode_factor( SEEDOC, .default = 'Missing', .missing = "Missing", '1' = 'OPY', '2' = 'OPZ')) # Stack events stacked_events <- stack_events(RX, DVT, IPT, ERT, OPT, OBV, HHT, keep.vars = c('SEEDOC','event_v2X')) stacked_events <- stacked_events %>% mutate(event = data, PR.yy.X = PV.yy.X + TR.yy.X, OZ.yy.X = OF.yy.X + SL.yy.X + OT.yy.X + OR.yy.X + OU.yy.X + WC.yy.X + VA.yy.X) %>% select(DUPERSID, event, event_v2X, SEEDOC, XP.yy.X, SF.yy.X, MR.yy.X, MD.yy.X, PR.yy.X, OZ.yy.X) pers_events <- stacked_events %>% group_by(DUPERSID) %>% summarise(ANY = sum(XP.yy.X >= 0), EXP = sum(XP.yy.X > 0), SLF = sum(SF.yy.X > 0), MCR = sum(MR.yy.X > 0), MCD = sum(MD.yy.X > 0), PTR = sum(PR.yy.X > 0), OTZ = sum(OZ.yy.X > 0)) %>% ungroup n_events <- full_join(pers_events,FYCsub,by='DUPERSID') %>% mutate_at(vars(ANY,EXP,SLF,MCR,MCD,PTR,OTZ), function(x) ifelse(is.na(x),0,x)) nEVTdsgn <- svydesign( id = ~VARPSU, strata = ~VARSTR, weights = ~PERWT.yy.F, data = n_events, nest = TRUE) results <- svyby(~ANY, FUN=svymean, by = ~married + education, design = nEVTdsgn) print(results)
#' Convert PLEXOS files to SQLite databases #' #' Functions to process PLEXOS solution and input files and dump them into SQLite databases #' that are easier to read. #' #' \code{process_solution} converts a single zipped PLEXOS solution into a SQLite database. #' The resulting database file has a simplified structure so data can be retrieved easily. #' The temporary database contains the original data structure in the PLEXOS solution file. #' #' \code{process_input} converts a single PLEXOS input file (in XML form) solution into a #' SQLite database. The database includes the raw tables and a series of views that better #' organize the data. #' #' \code{process_folders} is used to process one or more folders. It searches each folder for #' PLEXOS solution or input files and applies \code{process_solution} or #' \code{process_input} to each one of them. #' #' The default folder is the working directory. If the wildcard \code{"*"} is provided, all #' the folders in the working directory will be processed (the list of folders if provided by #' the \code{\link{list_folders}} function). #' #' Do not rename the SQLite databases created with these functions. Other code expects #' those filenames to remain unchanged. #' #' @param folders Folder(s) to process (See details) #' @param file Single PLEXOS solution or input file to process #' @param keep.temp Should temporary databases be preserved? #' #' @examples #' # Process the folder with the solution file provided by rplexos #' location_solution <- location_solution_rplexos() #' process_folder(location_solution) #' #' # Process the folder with the input file provided by rplexos #' # location_input <- location_input_rplexos() (for the moment, there is a bug in the soln file) #' # process_folder(location_input) #' #' # Other examples #' \dontrun{process_folder()} #' \dontrun{process_solution("HiWind/Model WWSIS_c_RT_CoreB_M01_SC3 Solution.zip")} #' \dontrun{process_input("WWSIS model.xml")} #' #' @export process_folder <- function(folders = ".", keep.temp = FALSE) { # Check inputs stopifnot(is.character(folders), is.logical(keep.temp), length(keep.temp) == 1L) check_is_folder(folders) # Check for wildcard if (length(folders) == 1L) { if (identical(folders, "*")) { folders <- list_folders() } } # Check that folders exist are.dirs <- file.info(folders)$isdir are.dirs[is.na(are.dirs)] <- FALSE if(!all(are.dirs)) { not.dirs <- folders[!are.dirs] stop(paste(not.dirs, collapse = ", "), " are not valid folders", call. = FALSE) } # Function to list PLEXOS files in each folder plexos_list_files <- function(df) { filename <- list.files(df$folder, ".xml$|.XML$|.zip$|.ZIP$", full.names = TRUE) data_frame(type = ifelse(grepl(".xml$|.XML$", filename), "I", "S"), filename) } # Get database file names df <- data.frame(folder = folders, stringsAsFactors = FALSE) %>% group_by(folder) %>% do(plexos_list_files(.)) # Error if all folders were empty if (nrow(df) == 0L) stop("No input/solution files were found", call. = FALSE) # Check for folders without databases folder.missing <- setdiff(folders, df$folder) if (length(folder.missing) > 0L) { warning("No databases found in folder", ifelse(length(folder.missing) == 1L, ": ", "s: "), paste(folder.missing, collapse = ", "), call. = FALSE) } # Create new id for identification on screen df2 <- df %>% group_by(type) %>% arrange(filename) %>% mutate(id = paste0(type, 1:n())) rplexos_message("Found files:") for (i in 1:nrow(df2)) rplexos_message("\t", df2$id[i], ":\t", df2$filename[i], sep = "") rplexos_message("") # Process files rplexos_message("Processing files:") if (!is_parallel_rplexos()) { df2 %>% group_by(id) %>% do(if(.$type == "I") { process_input(.$filename) data.frame() } else { process_solution(.$filename, keep.temp) data.frame() }) } else { foreach(i = df2$id, .packages = c("dplyr", "rplexos", "DBI", "RSQLite")) %dopar% { df3 <- df2 %>% filter(id == i) if (df3$type == "I") { process_input(df3$filename) } else { process_solution(df3$filename, keep.temp) } } } invisible(TRUE) }
/R/process_folder.R
no_license
NREL/rplexos
R
false
false
4,344
r
#' Convert PLEXOS files to SQLite databases #' #' Functions to process PLEXOS solution and input files and dump them into SQLite databases #' that are easier to read. #' #' \code{process_solution} converts a single zipped PLEXOS solution into a SQLite database. #' The resulting database file has a simplified structure so data can be retrieved easily. #' The temporary database contains the original data structure in the PLEXOS solution file. #' #' \code{process_input} converts a single PLEXOS input file (in XML form) solution into a #' SQLite database. The database includes the raw tables and a series of views that better #' organize the data. #' #' \code{process_folders} is used to process one or more folders. It searches each folder for #' PLEXOS solution or input files and applies \code{process_solution} or #' \code{process_input} to each one of them. #' #' The default folder is the working directory. If the wildcard \code{"*"} is provided, all #' the folders in the working directory will be processed (the list of folders if provided by #' the \code{\link{list_folders}} function). #' #' Do not rename the SQLite databases created with these functions. Other code expects #' those filenames to remain unchanged. #' #' @param folders Folder(s) to process (See details) #' @param file Single PLEXOS solution or input file to process #' @param keep.temp Should temporary databases be preserved? #' #' @examples #' # Process the folder with the solution file provided by rplexos #' location_solution <- location_solution_rplexos() #' process_folder(location_solution) #' #' # Process the folder with the input file provided by rplexos #' # location_input <- location_input_rplexos() (for the moment, there is a bug in the soln file) #' # process_folder(location_input) #' #' # Other examples #' \dontrun{process_folder()} #' \dontrun{process_solution("HiWind/Model WWSIS_c_RT_CoreB_M01_SC3 Solution.zip")} #' \dontrun{process_input("WWSIS model.xml")} #' #' @export process_folder <- function(folders = ".", keep.temp = FALSE) { # Check inputs stopifnot(is.character(folders), is.logical(keep.temp), length(keep.temp) == 1L) check_is_folder(folders) # Check for wildcard if (length(folders) == 1L) { if (identical(folders, "*")) { folders <- list_folders() } } # Check that folders exist are.dirs <- file.info(folders)$isdir are.dirs[is.na(are.dirs)] <- FALSE if(!all(are.dirs)) { not.dirs <- folders[!are.dirs] stop(paste(not.dirs, collapse = ", "), " are not valid folders", call. = FALSE) } # Function to list PLEXOS files in each folder plexos_list_files <- function(df) { filename <- list.files(df$folder, ".xml$|.XML$|.zip$|.ZIP$", full.names = TRUE) data_frame(type = ifelse(grepl(".xml$|.XML$", filename), "I", "S"), filename) } # Get database file names df <- data.frame(folder = folders, stringsAsFactors = FALSE) %>% group_by(folder) %>% do(plexos_list_files(.)) # Error if all folders were empty if (nrow(df) == 0L) stop("No input/solution files were found", call. = FALSE) # Check for folders without databases folder.missing <- setdiff(folders, df$folder) if (length(folder.missing) > 0L) { warning("No databases found in folder", ifelse(length(folder.missing) == 1L, ": ", "s: "), paste(folder.missing, collapse = ", "), call. = FALSE) } # Create new id for identification on screen df2 <- df %>% group_by(type) %>% arrange(filename) %>% mutate(id = paste0(type, 1:n())) rplexos_message("Found files:") for (i in 1:nrow(df2)) rplexos_message("\t", df2$id[i], ":\t", df2$filename[i], sep = "") rplexos_message("") # Process files rplexos_message("Processing files:") if (!is_parallel_rplexos()) { df2 %>% group_by(id) %>% do(if(.$type == "I") { process_input(.$filename) data.frame() } else { process_solution(.$filename, keep.temp) data.frame() }) } else { foreach(i = df2$id, .packages = c("dplyr", "rplexos", "DBI", "RSQLite")) %dopar% { df3 <- df2 %>% filter(id == i) if (df3$type == "I") { process_input(df3$filename) } else { process_solution(df3$filename, keep.temp) } } } invisible(TRUE) }
################################# # # maxnet ENMdetails object #### ################################# # maxnet.name <- "maxnet" maxnet.fun <- maxnet::maxnet maxnet.msgs <- function(tune.args, other.settings) { if(!("rm" %in% names(tune.args)) | !("fc" %in% names(tune.args))) { stop("Maxent settings must include 'rm' (regularization multiplier) and 'fc' (feature class) settings. See ?tune.args for details.") }else{ if(!is.numeric(tune.args[["rm"]])) { stop("Please input numeric values for 'rm' settings for Maxent.") } all.fc <- unlist(sapply(1:5, function(x) apply(combn(c("L","Q","H","P","T"), x), 2, function(y) paste(y, collapse = "")))) if(any(!tune.args[["fc"]] %in% all.fc)) { stop("Please input accepted values for 'fc' settings for Maxent.") } msg <- paste0("maxnet from maxnet package v", packageVersion('maxnet')) return(msg) } } maxnet.args <- function(occs.z, bg.z, tune.i, other.settings) { out <- list() out$data <- rbind(occs.z, bg.z) out$p <- c(rep(1, nrow(occs.z)), rep(0, nrow(bg.z))) out$f <- maxnet::maxnet.formula(out$p, out$data, classes = tolower(tune.i$fc)) out$regmult <- tune.i$rm # some models fail to converge if this parameter is not set to TRUE # usually the case with sparse datasets out$addsamplestobackground <- TRUE out <- c(out, other.settings$other.args) return(out) } maxnet.predict <- function(mod, envs, other.settings) { # function to generate a prediction Raster* when raster data is specified as envs, # and a prediction data frame when a data frame is specified as envs if(inherits(envs, "BasicRaster") == TRUE) { envs.n <- raster::nlayers(envs) envs.pts <- raster::getValues(envs) %>% as.data.frame() mxnet.p <- predict(mod, envs.pts, type = other.settings$pred.type, clamp = FALSE, other.settings$other.args) envs.pts[as.numeric(row.names(mxnet.p)), "pred"] <- mxnet.p pred <- raster::rasterFromXYZ(cbind(raster::coordinates(envs), envs.pts$pred), res=raster::res(envs), crs = raster::crs(envs)) }else{ # otherwise, envs is data frame, so return data frame of predicted values pred <- predict(mod, envs, type = other.settings$pred.type, na.rm = TRUE, clamp = FALSE, other.settings$other.args) %>% as.numeric() } return(pred) } maxnet.ncoefs <- function(mod) { length(mod$betas) } # no existing method in model object for variable importance maxnet.varimp <- function(mod) { NULL } #' @title ENMdetails maxnet #' @description This is the ENMdetails implementation for maxnet, the R version of #' the Maxent algorithm. The configuration for running the model now includes addsamplestobackground = TRUE, #' which explicitly adds presences to the background for model training, though as the current #' version of maxnet has this set to TRUE as default, behavior between ENMeval versions should not differ. #' @export enm.maxnet <- ENMdetails(name = maxnet.name, fun = maxnet.fun, msgs = maxnet.msgs, args = maxnet.args, predict = maxnet.predict, ncoefs = maxnet.ncoefs, varimp = maxnet.varimp)
/R/enm.maxnet.R
no_license
andrepazv/ENMeval
R
false
false
3,148
r
################################# # # maxnet ENMdetails object #### ################################# # maxnet.name <- "maxnet" maxnet.fun <- maxnet::maxnet maxnet.msgs <- function(tune.args, other.settings) { if(!("rm" %in% names(tune.args)) | !("fc" %in% names(tune.args))) { stop("Maxent settings must include 'rm' (regularization multiplier) and 'fc' (feature class) settings. See ?tune.args for details.") }else{ if(!is.numeric(tune.args[["rm"]])) { stop("Please input numeric values for 'rm' settings for Maxent.") } all.fc <- unlist(sapply(1:5, function(x) apply(combn(c("L","Q","H","P","T"), x), 2, function(y) paste(y, collapse = "")))) if(any(!tune.args[["fc"]] %in% all.fc)) { stop("Please input accepted values for 'fc' settings for Maxent.") } msg <- paste0("maxnet from maxnet package v", packageVersion('maxnet')) return(msg) } } maxnet.args <- function(occs.z, bg.z, tune.i, other.settings) { out <- list() out$data <- rbind(occs.z, bg.z) out$p <- c(rep(1, nrow(occs.z)), rep(0, nrow(bg.z))) out$f <- maxnet::maxnet.formula(out$p, out$data, classes = tolower(tune.i$fc)) out$regmult <- tune.i$rm # some models fail to converge if this parameter is not set to TRUE # usually the case with sparse datasets out$addsamplestobackground <- TRUE out <- c(out, other.settings$other.args) return(out) } maxnet.predict <- function(mod, envs, other.settings) { # function to generate a prediction Raster* when raster data is specified as envs, # and a prediction data frame when a data frame is specified as envs if(inherits(envs, "BasicRaster") == TRUE) { envs.n <- raster::nlayers(envs) envs.pts <- raster::getValues(envs) %>% as.data.frame() mxnet.p <- predict(mod, envs.pts, type = other.settings$pred.type, clamp = FALSE, other.settings$other.args) envs.pts[as.numeric(row.names(mxnet.p)), "pred"] <- mxnet.p pred <- raster::rasterFromXYZ(cbind(raster::coordinates(envs), envs.pts$pred), res=raster::res(envs), crs = raster::crs(envs)) }else{ # otherwise, envs is data frame, so return data frame of predicted values pred <- predict(mod, envs, type = other.settings$pred.type, na.rm = TRUE, clamp = FALSE, other.settings$other.args) %>% as.numeric() } return(pred) } maxnet.ncoefs <- function(mod) { length(mod$betas) } # no existing method in model object for variable importance maxnet.varimp <- function(mod) { NULL } #' @title ENMdetails maxnet #' @description This is the ENMdetails implementation for maxnet, the R version of #' the Maxent algorithm. The configuration for running the model now includes addsamplestobackground = TRUE, #' which explicitly adds presences to the background for model training, though as the current #' version of maxnet has this set to TRUE as default, behavior between ENMeval versions should not differ. #' @export enm.maxnet <- ENMdetails(name = maxnet.name, fun = maxnet.fun, msgs = maxnet.msgs, args = maxnet.args, predict = maxnet.predict, ncoefs = maxnet.ncoefs, varimp = maxnet.varimp)
#!/usr/bin/env Rscript require("RPostgreSQL") # create a connection pw <- { "testgres" } # loads the PostgreSQL driver drv <- dbDriver("PostgreSQL") # creates a connection to the postgres database note that "con" will be used later in each connection to the database con <- dbConnect(drv, dbname = "Field_Station2", host = "149.165.170.97", port = 5432, user = "postgres", password = pw) #removes the password rm(pw) date <- format(Sys.Date(), "%m-%d-%Y") #Retrieve data from database according to provided SQL query getDataById <- function(device_id) { db_values <- dbGetQuery(con, sprintf("SELECT * FROM sensor_data2 WHERE device_id = '%s' AND day = '%s'", device_id, date)) return(db_values) } df_device_1 <- getDataById("ApplePi") df_device_2 <- getDataById("rpiX") df_device_3 <- getDataById("Raspi") #Retrieve data from databse and convert to numeric values in order to plot time_1 <-as.POSIXlt(strptime(df_device_1$time, "%H:%M:%S")) time_2 <-as.POSIXlt(strptime(df_device_2$time, "%H:%M:%S")) time_3 <-as.POSIXlt(strptime(df_device_3$time, "%H:%M:%S")) temp_1 <- as.numeric(as.character(df_device_1$temp)) temp_2 <-as.numeric(as.character(df_device_2$temp)) temp_3 <- as.numeric(as.character(df_device_3$temp)) humid_1 <-as.numeric(as.character(df_device_1$humid)) humid_2 <- as.numeric(as.character(df_device_2$humid)) humid_3 <- as.numeric(as.character(df_device_3$humid)) press_1 <- as.numeric(as.character(df_device_1$press)) press_2 <- as.numeric(as.character(df_device_2$press)) press_3 <- as.numeric(as.character(df_device_3$press)) require(ggplot2) #Plotdata using the dataframes and save to indivual files ggplot() + labs(title= paste("Temperature: ",date), x= "time", y= "temperature[F]") + geom_line(aes(x= time_1, y= temp_1), colour= "blue") + geom_line(aes(x= time_2, y= temp_2), colour= "red") + geom_line(aes(x= time_3, y= temp_3), colour= "green") + theme(axis.text.x = element_text(angle = 45, hjust = 1)) ggsave("temperature_plot.jpeg", path= "/var/www/html/images") ggplot() + labs(title= paste("Humidity: ",date), x= "time", y= "humidity[%]") + geom_line(aes(x= time_1, y= humid_1), colour= "blue") + geom_line(aes(x= time_2, y= humid_2), colour= "red") + geom_line(aes(x= time_3, y= humid_3), colour= "green") + theme(axis.text.x = element_text(angle = 45, hjust = 1)) ggsave("humidity_plot.jpeg", path= "/var/www/html/images") ggplot() + labs(title= paste("Pressure: ",date), x= "time", y= "pressure[hPa]") + geom_line(aes(x= time_1, y= press_1), colour= "blue") + geom_line(aes(x= time_2, y= press_2), colour= "red") + geom_line(aes(x= time_3, y= press_3), colour= "green") + theme(axis.text.x = element_text(angle = 45, hjust = 1)) ggsave("pressure_plot.jpeg", path= "/var/www/html/images") #End database connection lapply(dbListConnections(drv = dbDriver("PostgreSQL")), function(x) {dbDisconnect(conn = x)})
/r_script
no_license
vika19/REU-Field-Station
R
false
false
2,916
#!/usr/bin/env Rscript require("RPostgreSQL") # create a connection pw <- { "testgres" } # loads the PostgreSQL driver drv <- dbDriver("PostgreSQL") # creates a connection to the postgres database note that "con" will be used later in each connection to the database con <- dbConnect(drv, dbname = "Field_Station2", host = "149.165.170.97", port = 5432, user = "postgres", password = pw) #removes the password rm(pw) date <- format(Sys.Date(), "%m-%d-%Y") #Retrieve data from database according to provided SQL query getDataById <- function(device_id) { db_values <- dbGetQuery(con, sprintf("SELECT * FROM sensor_data2 WHERE device_id = '%s' AND day = '%s'", device_id, date)) return(db_values) } df_device_1 <- getDataById("ApplePi") df_device_2 <- getDataById("rpiX") df_device_3 <- getDataById("Raspi") #Retrieve data from databse and convert to numeric values in order to plot time_1 <-as.POSIXlt(strptime(df_device_1$time, "%H:%M:%S")) time_2 <-as.POSIXlt(strptime(df_device_2$time, "%H:%M:%S")) time_3 <-as.POSIXlt(strptime(df_device_3$time, "%H:%M:%S")) temp_1 <- as.numeric(as.character(df_device_1$temp)) temp_2 <-as.numeric(as.character(df_device_2$temp)) temp_3 <- as.numeric(as.character(df_device_3$temp)) humid_1 <-as.numeric(as.character(df_device_1$humid)) humid_2 <- as.numeric(as.character(df_device_2$humid)) humid_3 <- as.numeric(as.character(df_device_3$humid)) press_1 <- as.numeric(as.character(df_device_1$press)) press_2 <- as.numeric(as.character(df_device_2$press)) press_3 <- as.numeric(as.character(df_device_3$press)) require(ggplot2) #Plotdata using the dataframes and save to indivual files ggplot() + labs(title= paste("Temperature: ",date), x= "time", y= "temperature[F]") + geom_line(aes(x= time_1, y= temp_1), colour= "blue") + geom_line(aes(x= time_2, y= temp_2), colour= "red") + geom_line(aes(x= time_3, y= temp_3), colour= "green") + theme(axis.text.x = element_text(angle = 45, hjust = 1)) ggsave("temperature_plot.jpeg", path= "/var/www/html/images") ggplot() + labs(title= paste("Humidity: ",date), x= "time", y= "humidity[%]") + geom_line(aes(x= time_1, y= humid_1), colour= "blue") + geom_line(aes(x= time_2, y= humid_2), colour= "red") + geom_line(aes(x= time_3, y= humid_3), colour= "green") + theme(axis.text.x = element_text(angle = 45, hjust = 1)) ggsave("humidity_plot.jpeg", path= "/var/www/html/images") ggplot() + labs(title= paste("Pressure: ",date), x= "time", y= "pressure[hPa]") + geom_line(aes(x= time_1, y= press_1), colour= "blue") + geom_line(aes(x= time_2, y= press_2), colour= "red") + geom_line(aes(x= time_3, y= press_3), colour= "green") + theme(axis.text.x = element_text(angle = 45, hjust = 1)) ggsave("pressure_plot.jpeg", path= "/var/www/html/images") #End database connection lapply(dbListConnections(drv = dbDriver("PostgreSQL")), function(x) {dbDisconnect(conn = x)})
###### Create correlated random variable ###### x <- rnorm(10000) y <- rnorm(10000) rho <- 0.3 # Test correlation (both close to 0) cov(x, y) cor(x, y) # Design correlated random variable z <- rho * x + sqrt(1 - rho ^ 2) * y cov(x, z) cor(x, z) #these numbers are the same because they follow the same normal dist ###### 3-d plot for joint distribution ###### #install.packages("LaplacesDemon") #install.packages("plot3D") #install.packages("rgl") library(LaplacesDemon) library(plot3D) library(rgl) # Level set graph joint.density.plot(x, y, Title=NULL, contour=TRUE, color=FALSE, Trace=NULL) # 3-dimensional histogram ## Create cuts: x_c <- cut(x, 40) y_c <- cut(y, 40) x_c ## Calculate joint counts at cut levels: z <- table(x_c, y_c) ## Plot as a 3D histogram: hist3D(z=z, border="black") # probability density function x_pdf <- seq(-5, 5, length=100) # set boundary for the axis y_pdf <- seq(-5, 5, length=100) z_pdf <- outer(x_pdf, y_pdf, function(x,y) dnorm(x,0,1)*dnorm(y,0,1)) persp3d(x_pdf, y_pdf, z_pdf, col = rainbow(100)) # probability density function for dependent r.v.s install.packages("mvtnorm") library(mvtnorm) x_pdf <- seq(-5, 5, length=100) # set boundary for the axis y_pdf <- seq(-5, 5, length=100) z_pdf <- outer(x_pdf, y_pdf, function(x,y) dmvnorm(cbind(x, y), mean = c(0,0), sigma = cbind(c(1, 0.2), c(0.2, 1)))) persp3d(x_pdf, y_pdf, z_pdf, col = rainbow(100)) create.3d <- function(rho) { z_pdf <- outer(x_pdf, y_pdf, function(x,y) dmvnorm(cbind(x, y), mean = c(0,0), sigma = cbind(c(1, rho*1*1), c(rho*1*1, 1)))) persp3d(x_pdf, y_pdf, z_pdf, col = rainbow(100)) } create.3d(0.0) create.3d(0.3) create.3d(0.6) create.3d(0.9) ###### Create correlated random variable ###### x <- rnorm(10000) y <- rnorm(10000) rho <- 0.3 # Test correlation (both close to 0) cov(x, y) cor(x, y) # Design correlated random variable z <- rho * x + sqrt(1 - rho ^ 2) * y cov(x, z) cor(x, z) ###### Conditional probability & Conditional expectation ###### pair_xy <- cbind(x, y) head(pair_xy) class(pair_xy) is.matrix(pair_xy) which(x >= 0) y[which(x >= 0)] # Conditional probability Prob(y>0|x>0) # Recall the equality P(A|B)P(B)=P(A & B) Conditional_Prob <- length(which(y[which(x > 0)] > 0)) / length(which(x > 0)) Conditional_Prob # Probability Prob(x>0, y>0) intersect(which(x>0), which(y>0)) sum(x>0 & y>0) Inter_Prob <- length(intersect(which(x>0), which(y>0))) / length(x) Inter_Prob # Probability Prob(x>0) Prob_x <- length(which(x>0)) / length(x) Prob_x Inter_Prob / Prob_x # Prob(y>0 | x>0) same as the conditional probability ####### Conditional expectation ######## # Conditional expectation E[Y|x>0] mean(y) mean(y[which(x>0)]) ### Let's do the same thing for X and Z. # Conditional probability Prob(z>0|x>0) # Recall the equality P(A|B)P(B)=P(A & B) Conditional_Prob <- length(which(z[which(x > 0)] > 0)) / length(which(x > 0)) Conditional_Prob # Probability Prob(z>0) Prob_z <- length(which(z>0)) / length(z) Prob_z ####### Conditional expectation ######## # Conditional expectation E[Z|x>0] mean(z) mean(z[which(x>0)]) ###### Download stock prices and calculate returns ###### install.packages("quantmod") library(quantmod) name_list <- c("MSFT", "GOOG", "AAPL") getSymbols(name_list, from = "2010-01-01", to = "2019-12-31", src = "yahoo") # Simple return diff(AAPL$AAPL.Adjusted) diff(AAPL$AAPL.Adjusted) / AAPL$AAPL.Adjusted[-length(AAPL$AAPL.Adjusted)] # Log return log_returns = diff(log(AAPL$AAPL.Adjusted)) log_returns n <- length(log_returns) x <- as.vector(log_returns[2:(n-1)]) y <- as.vector(log_returns[3:n]) head(cbind(x, y)) # Check covariance and correlation cov(x, y) cor(x, y) cov(cbind(x, y)) cor(cbind(x, y)) # ### Crossectional correlation aapl_ret = diff(log(AAPL$AAPL.Adjusted))[-1] msft_ret = diff(log(MSFT$MSFT.Adjusted))[-1] goog_ret = diff(log(GOOG$GOOG.Adjusted))[-1] data <- cbind(aapl_ret, msft_ret, goog_ret) data cov(data) cor(data) mean(msft_ret) mean(msft_ret[aapl_ret>0])
/2021S QF202 Recitation 2.R
no_license
lmaksymenko/QF202
R
false
false
4,253
r
###### Create correlated random variable ###### x <- rnorm(10000) y <- rnorm(10000) rho <- 0.3 # Test correlation (both close to 0) cov(x, y) cor(x, y) # Design correlated random variable z <- rho * x + sqrt(1 - rho ^ 2) * y cov(x, z) cor(x, z) #these numbers are the same because they follow the same normal dist ###### 3-d plot for joint distribution ###### #install.packages("LaplacesDemon") #install.packages("plot3D") #install.packages("rgl") library(LaplacesDemon) library(plot3D) library(rgl) # Level set graph joint.density.plot(x, y, Title=NULL, contour=TRUE, color=FALSE, Trace=NULL) # 3-dimensional histogram ## Create cuts: x_c <- cut(x, 40) y_c <- cut(y, 40) x_c ## Calculate joint counts at cut levels: z <- table(x_c, y_c) ## Plot as a 3D histogram: hist3D(z=z, border="black") # probability density function x_pdf <- seq(-5, 5, length=100) # set boundary for the axis y_pdf <- seq(-5, 5, length=100) z_pdf <- outer(x_pdf, y_pdf, function(x,y) dnorm(x,0,1)*dnorm(y,0,1)) persp3d(x_pdf, y_pdf, z_pdf, col = rainbow(100)) # probability density function for dependent r.v.s install.packages("mvtnorm") library(mvtnorm) x_pdf <- seq(-5, 5, length=100) # set boundary for the axis y_pdf <- seq(-5, 5, length=100) z_pdf <- outer(x_pdf, y_pdf, function(x,y) dmvnorm(cbind(x, y), mean = c(0,0), sigma = cbind(c(1, 0.2), c(0.2, 1)))) persp3d(x_pdf, y_pdf, z_pdf, col = rainbow(100)) create.3d <- function(rho) { z_pdf <- outer(x_pdf, y_pdf, function(x,y) dmvnorm(cbind(x, y), mean = c(0,0), sigma = cbind(c(1, rho*1*1), c(rho*1*1, 1)))) persp3d(x_pdf, y_pdf, z_pdf, col = rainbow(100)) } create.3d(0.0) create.3d(0.3) create.3d(0.6) create.3d(0.9) ###### Create correlated random variable ###### x <- rnorm(10000) y <- rnorm(10000) rho <- 0.3 # Test correlation (both close to 0) cov(x, y) cor(x, y) # Design correlated random variable z <- rho * x + sqrt(1 - rho ^ 2) * y cov(x, z) cor(x, z) ###### Conditional probability & Conditional expectation ###### pair_xy <- cbind(x, y) head(pair_xy) class(pair_xy) is.matrix(pair_xy) which(x >= 0) y[which(x >= 0)] # Conditional probability Prob(y>0|x>0) # Recall the equality P(A|B)P(B)=P(A & B) Conditional_Prob <- length(which(y[which(x > 0)] > 0)) / length(which(x > 0)) Conditional_Prob # Probability Prob(x>0, y>0) intersect(which(x>0), which(y>0)) sum(x>0 & y>0) Inter_Prob <- length(intersect(which(x>0), which(y>0))) / length(x) Inter_Prob # Probability Prob(x>0) Prob_x <- length(which(x>0)) / length(x) Prob_x Inter_Prob / Prob_x # Prob(y>0 | x>0) same as the conditional probability ####### Conditional expectation ######## # Conditional expectation E[Y|x>0] mean(y) mean(y[which(x>0)]) ### Let's do the same thing for X and Z. # Conditional probability Prob(z>0|x>0) # Recall the equality P(A|B)P(B)=P(A & B) Conditional_Prob <- length(which(z[which(x > 0)] > 0)) / length(which(x > 0)) Conditional_Prob # Probability Prob(z>0) Prob_z <- length(which(z>0)) / length(z) Prob_z ####### Conditional expectation ######## # Conditional expectation E[Z|x>0] mean(z) mean(z[which(x>0)]) ###### Download stock prices and calculate returns ###### install.packages("quantmod") library(quantmod) name_list <- c("MSFT", "GOOG", "AAPL") getSymbols(name_list, from = "2010-01-01", to = "2019-12-31", src = "yahoo") # Simple return diff(AAPL$AAPL.Adjusted) diff(AAPL$AAPL.Adjusted) / AAPL$AAPL.Adjusted[-length(AAPL$AAPL.Adjusted)] # Log return log_returns = diff(log(AAPL$AAPL.Adjusted)) log_returns n <- length(log_returns) x <- as.vector(log_returns[2:(n-1)]) y <- as.vector(log_returns[3:n]) head(cbind(x, y)) # Check covariance and correlation cov(x, y) cor(x, y) cov(cbind(x, y)) cor(cbind(x, y)) # ### Crossectional correlation aapl_ret = diff(log(AAPL$AAPL.Adjusted))[-1] msft_ret = diff(log(MSFT$MSFT.Adjusted))[-1] goog_ret = diff(log(GOOG$GOOG.Adjusted))[-1] data <- cbind(aapl_ret, msft_ret, goog_ret) data cov(data) cor(data) mean(msft_ret) mean(msft_ret[aapl_ret>0])
library(BiDimRegression) ### Name: predict.lm2 ### Title: Predict method for Bidimensional Regression Model Fits ### Aliases: predict.lm2 ### ** Examples lm2euc <- lm2(depV1+depV2~indepV1+indepV2, NakayaData, transformation = 'Euclidean') predict(lm2euc, NakayaData[, 3:4])
/data/genthat_extracted_code/BiDimRegression/examples/predict.lm2.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
281
r
library(BiDimRegression) ### Name: predict.lm2 ### Title: Predict method for Bidimensional Regression Model Fits ### Aliases: predict.lm2 ### ** Examples lm2euc <- lm2(depV1+depV2~indepV1+indepV2, NakayaData, transformation = 'Euclidean') predict(lm2euc, NakayaData[, 3:4])
# The ratio of boys to girls born in Russia is 1.09:1. # What proportion of Russian families with exactly 6 children will have at least 3 boys? (Ignore the probability of multiple births.) ans <- 0 for(i in 3:6) ans <- ans + dbinom(i, 6, 1.09/(1+1.09)) cat(round(ans,digits = 3), "\n")
/Artificial Intelligence/Binomial Distribution #2/main.r
no_license
mudream4869/hackerrank-code
R
false
false
291
r
# The ratio of boys to girls born in Russia is 1.09:1. # What proportion of Russian families with exactly 6 children will have at least 3 boys? (Ignore the probability of multiple births.) ans <- 0 for(i in 3:6) ans <- ans + dbinom(i, 6, 1.09/(1+1.09)) cat(round(ans,digits = 3), "\n")
#' Remove all but the specified individuals from a genelight \{adegenet\} object #' #' The script, having deleted individuals, optionally identifies resultant monomorphic loci or loci #' with all values missing and deletes them (using gl.filter.monomorphs.r). The script also optionally #' recalculates statistics made redundant by the deletion of individuals from the dataset. #' #' The script returns a genlight object with the individuals deleted and, optionally, the recalculated locus metadata. #' #' @param x -- name of the genlight object containing SNP genotypes or a genind object containing presence/absence data [required] #' @param ind.list -- a list of individuals to be removed [required] #' @param recalc -- Recalculate the locus metadata statistics [default FALSE] #' @param mono.rm -- Remove monomorphic loci [default FALSE] #' @param verbose -- verbosity: 0, silent or fatal errors; 1, begin and end; 2, progress log ; 3, progress and results summary; 5, full report [default 2 or as specified using gl.set.verbosity] #' @return A genlight object with the reduced data #' @export #' @author Arthur Georges (Post to \url{https://groups.google.com/d/forum/dartr}) #' @examples #' # SNP data #' gl2 <- gl.keep.ind(testset.gl, ind.list=c("AA019073","AA004859")) #' # Tag P/A data #' gs2 <- gl.keep.ind(testset.gs, ind.list=c("AA020656","AA19077","AA004859")) #' #' @seealso \code{\link{gl.filter.monomorphs}} for when mono.rm=TRUE, \code{\link{gl.recalc.metrics}} for when recalc=TRUE #' @seealso \code{\link{gl.drop.ind}} to drop rather than keep specified individuals gl.keep.ind <- function(x, ind.list, recalc=FALSE, mono.rm=FALSE, verbose=NULL){ # TRAP COMMAND, SET VERSION funname <- match.call()[[1]] build <- "Jacob" # SET VERBOSITY if (is.null(verbose)){ if(!is.null(x@other$verbose)){ verbose <- x@other$verbose } else { verbose <- 2 } } if (verbose < 0 | verbose > 5){ cat(paste(" Warning: Parameter 'verbose' must be an integer between 0 [silent] and 5 [full report], set to 2\n")) verbose <- 2 } # FLAG SCRIPT START if (verbose >= 1){ if(verbose==5){ cat("Starting",funname,"[ Build =",build,"]\n") } else { cat("Starting",funname,"\n") } } # STANDARD ERROR CHECKING if(class(x)!="genlight") { stop("Fatal Error: genlight object required!\n") } if (all(x@ploidy == 1)){ if (verbose >= 2){cat(" Processing Presence/Absence (SilicoDArT) data\n")} data.type <- "SilicoDArT" } else if (all(x@ploidy == 2)){ if (verbose >= 2){cat(" Processing a SNP dataset\n")} data.type <- "SNP" } else { stop("Fatal Error: Ploidy must be universally 1 (fragment P/A data) or 2 (SNP data)") } # FUNCTION SPECIFIC ERROR CHECKING for (case in ind.list){ if (!(case%in%indNames(x))){ cat(" Warning: Listed individual",case,"not present in the dataset -- ignored\n") ind.list <- ind.list[!(ind.list==case)] } } if (length(ind.list) == 0) { stop(" Fatal Error: no individuals listed to keep!\n") } # DO THE JOB if (verbose >= 2) { cat(" Deleteing all but the listed individuals", ind.list, "\n") } # Delete all but the listed individuals, recalculate relevant locus metadata and remove monomorphic loci # Remove rows flagged for deletion x <- x[x$ind.names%in%ind.list] # Monomorphic loci may have been created x@other$loc.metrics.flags$monomorphs == FALSE # Remove monomorphic loci if(mono.rm){ if(verbose >= 2){cat(" Deleting monomorphic loc\n")} x <- gl.filter.monomorphs(x,verbose=0) } # Check monomorphs have been removed if (x@other$loc.metrics.flags$monomorphs == FALSE){ if (verbose >= 2){ cat(" Warning: Resultant dataset may contain monomorphic loci\n") } } # Recalculate statistics if (recalc) { x <- gl.recalc.metrics(x,verbose=0) if(verbose >= 2){cat(" Recalculating locus metrics\n")} } else { if(verbose >= 2){ cat(" Locus metrics not recalculated\n") x <- utils.reset.flags(x,verbose=0) } } # REPORT A SUMMARY if (verbose >= 3) { cat(" Summary of recoded dataset\n") cat(paste(" No. of loci:",nLoc(x),"\n")) cat(paste(" No. of individuals:", nInd(x),"\n")) cat(paste(" No. of populations:", nPop(x),"\n")) } # ADD TO HISTORY nh <- length(x@other$history) x@other$history[[nh + 1]] <- match.call() # FLAG SCRIPT END if (verbose > 0) { cat("Completed: gl.keep.ind\n") } return(x) }
/R/gl.keep.ind.r
no_license
carlopacioni/dartR
R
false
false
4,632
r
#' Remove all but the specified individuals from a genelight \{adegenet\} object #' #' The script, having deleted individuals, optionally identifies resultant monomorphic loci or loci #' with all values missing and deletes them (using gl.filter.monomorphs.r). The script also optionally #' recalculates statistics made redundant by the deletion of individuals from the dataset. #' #' The script returns a genlight object with the individuals deleted and, optionally, the recalculated locus metadata. #' #' @param x -- name of the genlight object containing SNP genotypes or a genind object containing presence/absence data [required] #' @param ind.list -- a list of individuals to be removed [required] #' @param recalc -- Recalculate the locus metadata statistics [default FALSE] #' @param mono.rm -- Remove monomorphic loci [default FALSE] #' @param verbose -- verbosity: 0, silent or fatal errors; 1, begin and end; 2, progress log ; 3, progress and results summary; 5, full report [default 2 or as specified using gl.set.verbosity] #' @return A genlight object with the reduced data #' @export #' @author Arthur Georges (Post to \url{https://groups.google.com/d/forum/dartr}) #' @examples #' # SNP data #' gl2 <- gl.keep.ind(testset.gl, ind.list=c("AA019073","AA004859")) #' # Tag P/A data #' gs2 <- gl.keep.ind(testset.gs, ind.list=c("AA020656","AA19077","AA004859")) #' #' @seealso \code{\link{gl.filter.monomorphs}} for when mono.rm=TRUE, \code{\link{gl.recalc.metrics}} for when recalc=TRUE #' @seealso \code{\link{gl.drop.ind}} to drop rather than keep specified individuals gl.keep.ind <- function(x, ind.list, recalc=FALSE, mono.rm=FALSE, verbose=NULL){ # TRAP COMMAND, SET VERSION funname <- match.call()[[1]] build <- "Jacob" # SET VERBOSITY if (is.null(verbose)){ if(!is.null(x@other$verbose)){ verbose <- x@other$verbose } else { verbose <- 2 } } if (verbose < 0 | verbose > 5){ cat(paste(" Warning: Parameter 'verbose' must be an integer between 0 [silent] and 5 [full report], set to 2\n")) verbose <- 2 } # FLAG SCRIPT START if (verbose >= 1){ if(verbose==5){ cat("Starting",funname,"[ Build =",build,"]\n") } else { cat("Starting",funname,"\n") } } # STANDARD ERROR CHECKING if(class(x)!="genlight") { stop("Fatal Error: genlight object required!\n") } if (all(x@ploidy == 1)){ if (verbose >= 2){cat(" Processing Presence/Absence (SilicoDArT) data\n")} data.type <- "SilicoDArT" } else if (all(x@ploidy == 2)){ if (verbose >= 2){cat(" Processing a SNP dataset\n")} data.type <- "SNP" } else { stop("Fatal Error: Ploidy must be universally 1 (fragment P/A data) or 2 (SNP data)") } # FUNCTION SPECIFIC ERROR CHECKING for (case in ind.list){ if (!(case%in%indNames(x))){ cat(" Warning: Listed individual",case,"not present in the dataset -- ignored\n") ind.list <- ind.list[!(ind.list==case)] } } if (length(ind.list) == 0) { stop(" Fatal Error: no individuals listed to keep!\n") } # DO THE JOB if (verbose >= 2) { cat(" Deleteing all but the listed individuals", ind.list, "\n") } # Delete all but the listed individuals, recalculate relevant locus metadata and remove monomorphic loci # Remove rows flagged for deletion x <- x[x$ind.names%in%ind.list] # Monomorphic loci may have been created x@other$loc.metrics.flags$monomorphs == FALSE # Remove monomorphic loci if(mono.rm){ if(verbose >= 2){cat(" Deleting monomorphic loc\n")} x <- gl.filter.monomorphs(x,verbose=0) } # Check monomorphs have been removed if (x@other$loc.metrics.flags$monomorphs == FALSE){ if (verbose >= 2){ cat(" Warning: Resultant dataset may contain monomorphic loci\n") } } # Recalculate statistics if (recalc) { x <- gl.recalc.metrics(x,verbose=0) if(verbose >= 2){cat(" Recalculating locus metrics\n")} } else { if(verbose >= 2){ cat(" Locus metrics not recalculated\n") x <- utils.reset.flags(x,verbose=0) } } # REPORT A SUMMARY if (verbose >= 3) { cat(" Summary of recoded dataset\n") cat(paste(" No. of loci:",nLoc(x),"\n")) cat(paste(" No. of individuals:", nInd(x),"\n")) cat(paste(" No. of populations:", nPop(x),"\n")) } # ADD TO HISTORY nh <- length(x@other$history) x@other$history[[nh + 1]] <- match.call() # FLAG SCRIPT END if (verbose > 0) { cat("Completed: gl.keep.ind\n") } return(x) }
qmq <- function(x, y) { if (! (is.numeric(x) || is.character(x))) stop("'x' not numeric or character") if (! (is.numeric(y) || is.character(y))) stop("'y' not numeric or character") if (is.numeric(x)) x <- d2q(x) if (is.numeric(y)) y <- d2q(y) if (! (is.character(x) && is.character(y))) { stop("Cannot happen!") } .Call("qoq", x, y, as.integer(2), PACKAGE = "rcdd") }
/rcdd/R/qmq.R
no_license
ingted/R-Examples
R
false
false
441
r
qmq <- function(x, y) { if (! (is.numeric(x) || is.character(x))) stop("'x' not numeric or character") if (! (is.numeric(y) || is.character(y))) stop("'y' not numeric or character") if (is.numeric(x)) x <- d2q(x) if (is.numeric(y)) y <- d2q(y) if (! (is.character(x) && is.character(y))) { stop("Cannot happen!") } .Call("qoq", x, y, as.integer(2), PACKAGE = "rcdd") }
FindInitialVectors <- function(theta, utimes0, utimes1) { eps <- 0.01 w0 <- rep(0, length(utimes0)) w1 <- rep(0, length(utimes1)) w1[utimes1 < theta] <- rep(eps, sum(utimes1 < theta)) n00 <- sum(utimes0 < theta) w0[utimes0 < theta] <- rep(eps/n00, n00) gap <- sum(w1) - sum(w0) w0[utimes0 >= theta] <- rep(eps, sum(utimes0 >= theta)) n11 <- sum(utimes1 >= theta) w1[utimes1 >= theta] <- rep(eps/n11, n11) f.pass <- min(which(utimes0>=theta)) w0[f.pass] <- gap + eps ans <- list(w0=w0, w1=w1) return(ans) }
/R/FindInitialVectors.R
no_license
nchenderson/DelayedSurvFit
R
false
false
536
r
FindInitialVectors <- function(theta, utimes0, utimes1) { eps <- 0.01 w0 <- rep(0, length(utimes0)) w1 <- rep(0, length(utimes1)) w1[utimes1 < theta] <- rep(eps, sum(utimes1 < theta)) n00 <- sum(utimes0 < theta) w0[utimes0 < theta] <- rep(eps/n00, n00) gap <- sum(w1) - sum(w0) w0[utimes0 >= theta] <- rep(eps, sum(utimes0 >= theta)) n11 <- sum(utimes1 >= theta) w1[utimes1 >= theta] <- rep(eps/n11, n11) f.pass <- min(which(utimes0>=theta)) w0[f.pass] <- gap + eps ans <- list(w0=w0, w1=w1) return(ans) }
#' Optimize the Dimensional Anchors Position using the Graphviz algorithm #' #' Allows to compute the best arrangement of Dimensional Anchors so that #' visualization efficiency (i.e. maintaining graph structure) is optimized. #' The Graphviz algorithm is implemented in C++ for optimal computational efficiency. #' #' @param x a data.frame or matrix to be projected, with column names matching row names in springs #' @param graph \code{igraph} object #' @param attractG Number specifying the weight of the attractive forces #' @param repelG Number specifying the weight of the repulsive forces #' @param law Integer, specifying how forces change with distance: 0 = (inverse) linear, 1 = (inverse) square #' @param steps Number of iterations of the algorithm before re-considering convergence criterion #' @param springs Numeric matrix with initial anchor coordinates. When \code{NULL} (=default), springs are initialized by \code{\link{make.S}} #' @param weight the name of the attribute containing the edge weights to use for optimization #' #' @importFrom utils install.packages #' @importFrom stats cutree dist hclust #' @useDynLib Radviz #' #' @details Graphviz is a variant of Freeviz (\code{\link{do.optimFreeviz}}, applicable to a dataset for which a graph structure (i.e. \code{igraph} object) is available. #' Attractive forces are defined between connected nodes in the graph, and repulsive forces between all non-connected nodes. #' To better maintain the original graph structure after projection, spring constants between connected nodes are proportional to their edge weights. #' Graphviz can be used as an alternative to Freeviz when class labels are not available. #' #' @return A matrix with 2 columns (x and y coordinates of dimensional anchors) and 1 line #' per dimensional anchor (so called springs). #' #' @example examples/example-do.radviz.R #' @examples #' #' plot(rv,anchors.only=FALSE) #' #' @example examples/example-graphviz.R #' #' @importFrom igraph E ends degree get.edge.attribute #' @importFrom Rcpp evalCpp #' #' @author Nicolas Sauwen #' @export do.optimGraphviz <- function(x, graph, attractG = 1, repelG = 1, law = 0, steps = 10, springs = NULL, weight = "weight"){ if(any(class(x) == "x.frame")) x <- as.matrix(x) if(!(law %in% c(0,1))) stop("Parameter 'law' not properly specified. Valid values are 0 or 1") if(!requireNamespace("igraph", quietly = FALSE)) install.packages("igraph") # Get edge info from graph edges <- E(graph) edgesMat <- ends(graph, edges, names = FALSE) mode(edgesMat) <- "integer" edgeWeights <- get.edge.attribute(graph,weight) rm(list = c("edges")) edgesMat <- rbind(edgesMat, edgesMat[,2:1]) edgeWeights <- c(edgeWeights, edgeWeights) orderInd <- order(edgesMat[,1]) edgesMat <- edgesMat[orderInd,] edgeWeights <- edgeWeights[orderInd] degreeVect <- degree(graph) edgesInds <- edgesMat[,2] rm(list = c("edgesMat", "orderInd")) if(is.null(springs)) springs <- make.S(colnames(x)) dataNormalized <- apply(x,2, do.L) dataNormalized <- t(dataNormalized) # C++ code currently assumes observations as columns and attributes as rows graphVizSprings <- springs maxIters <- 1e5 iter <- 1 converged <- FALSE convTol <- 1e-3 while(!converged & iter < maxIters){ oldSprings <- graphVizSprings graphVizSprings <- optimizeAnchorsGraph(dataNormalized, edgesInds, edgeWeights, degreeVect, graphVizSprings, attractG, repelG, law, steps, normalizeExamples = 0) # Avoid axes being suppressed in the first convergence steps: if(iter < 5){ axesLengths <- sqrt(apply(graphVizSprings^2, 1, sum)) suppressedInds <- which(axesLengths < 1e-2) if(length(suppressedInds) > 0){ graphVizSprings <- pracma::flipdim(springs, 1) } } springsDiff <- sqrt(apply((oldSprings - graphVizSprings)^2, 1, sum)) if(max(springsDiff) < convTol) converged <- TRUE iter <- iter + 1 } rownames(graphVizSprings) <- rownames(springs) # colnames(graphVizSprings) <- c("x","y") print(paste0("# iters: ", iter)) if(iter == maxIters) warning("Maximum number of iterations reached without convergence") return(graphVizSprings) }
/R/do.optimGraphviz.R
no_license
cran/Radviz
R
false
false
4,350
r
#' Optimize the Dimensional Anchors Position using the Graphviz algorithm #' #' Allows to compute the best arrangement of Dimensional Anchors so that #' visualization efficiency (i.e. maintaining graph structure) is optimized. #' The Graphviz algorithm is implemented in C++ for optimal computational efficiency. #' #' @param x a data.frame or matrix to be projected, with column names matching row names in springs #' @param graph \code{igraph} object #' @param attractG Number specifying the weight of the attractive forces #' @param repelG Number specifying the weight of the repulsive forces #' @param law Integer, specifying how forces change with distance: 0 = (inverse) linear, 1 = (inverse) square #' @param steps Number of iterations of the algorithm before re-considering convergence criterion #' @param springs Numeric matrix with initial anchor coordinates. When \code{NULL} (=default), springs are initialized by \code{\link{make.S}} #' @param weight the name of the attribute containing the edge weights to use for optimization #' #' @importFrom utils install.packages #' @importFrom stats cutree dist hclust #' @useDynLib Radviz #' #' @details Graphviz is a variant of Freeviz (\code{\link{do.optimFreeviz}}, applicable to a dataset for which a graph structure (i.e. \code{igraph} object) is available. #' Attractive forces are defined between connected nodes in the graph, and repulsive forces between all non-connected nodes. #' To better maintain the original graph structure after projection, spring constants between connected nodes are proportional to their edge weights. #' Graphviz can be used as an alternative to Freeviz when class labels are not available. #' #' @return A matrix with 2 columns (x and y coordinates of dimensional anchors) and 1 line #' per dimensional anchor (so called springs). #' #' @example examples/example-do.radviz.R #' @examples #' #' plot(rv,anchors.only=FALSE) #' #' @example examples/example-graphviz.R #' #' @importFrom igraph E ends degree get.edge.attribute #' @importFrom Rcpp evalCpp #' #' @author Nicolas Sauwen #' @export do.optimGraphviz <- function(x, graph, attractG = 1, repelG = 1, law = 0, steps = 10, springs = NULL, weight = "weight"){ if(any(class(x) == "x.frame")) x <- as.matrix(x) if(!(law %in% c(0,1))) stop("Parameter 'law' not properly specified. Valid values are 0 or 1") if(!requireNamespace("igraph", quietly = FALSE)) install.packages("igraph") # Get edge info from graph edges <- E(graph) edgesMat <- ends(graph, edges, names = FALSE) mode(edgesMat) <- "integer" edgeWeights <- get.edge.attribute(graph,weight) rm(list = c("edges")) edgesMat <- rbind(edgesMat, edgesMat[,2:1]) edgeWeights <- c(edgeWeights, edgeWeights) orderInd <- order(edgesMat[,1]) edgesMat <- edgesMat[orderInd,] edgeWeights <- edgeWeights[orderInd] degreeVect <- degree(graph) edgesInds <- edgesMat[,2] rm(list = c("edgesMat", "orderInd")) if(is.null(springs)) springs <- make.S(colnames(x)) dataNormalized <- apply(x,2, do.L) dataNormalized <- t(dataNormalized) # C++ code currently assumes observations as columns and attributes as rows graphVizSprings <- springs maxIters <- 1e5 iter <- 1 converged <- FALSE convTol <- 1e-3 while(!converged & iter < maxIters){ oldSprings <- graphVizSprings graphVizSprings <- optimizeAnchorsGraph(dataNormalized, edgesInds, edgeWeights, degreeVect, graphVizSprings, attractG, repelG, law, steps, normalizeExamples = 0) # Avoid axes being suppressed in the first convergence steps: if(iter < 5){ axesLengths <- sqrt(apply(graphVizSprings^2, 1, sum)) suppressedInds <- which(axesLengths < 1e-2) if(length(suppressedInds) > 0){ graphVizSprings <- pracma::flipdim(springs, 1) } } springsDiff <- sqrt(apply((oldSprings - graphVizSprings)^2, 1, sum)) if(max(springsDiff) < convTol) converged <- TRUE iter <- iter + 1 } rownames(graphVizSprings) <- rownames(springs) # colnames(graphVizSprings) <- c("x","y") print(paste0("# iters: ", iter)) if(iter == maxIters) warning("Maximum number of iterations reached without convergence") return(graphVizSprings) }
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/tools-arrays.R \name{colwise-rowise} \alias{cols_means} \alias{cols_variances} \alias{colwise-rowise} \alias{rows_means} \alias{rows_variances} \title{Colwise and rowwise operations} \usage{ cols_variances(x) rows_variances(x) cols_means(x, na.rm = FALSE, dims = 1L) rows_means(x, na.rm = FALSE, dims = 1L) } \arguments{ \item{x}{numeric matrix.} } \description{ \code{cols_means()} and \code{rows_means()} are simple aliases to \code{colMeans()} and \code{rowMeans()}. \code{colVars()} uses \code{colMeans()} to provide an efficient way of computing the variances of the columns of a matrix while \code{rowVars()} uses \code{rowMeans()} to provide an efficient way of computing the variances of the rows of a matrix. }
/man/colwise-rowise.Rd
no_license
lionel-/gsim
R
false
false
810
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/tools-arrays.R \name{colwise-rowise} \alias{cols_means} \alias{cols_variances} \alias{colwise-rowise} \alias{rows_means} \alias{rows_variances} \title{Colwise and rowwise operations} \usage{ cols_variances(x) rows_variances(x) cols_means(x, na.rm = FALSE, dims = 1L) rows_means(x, na.rm = FALSE, dims = 1L) } \arguments{ \item{x}{numeric matrix.} } \description{ \code{cols_means()} and \code{rows_means()} are simple aliases to \code{colMeans()} and \code{rowMeans()}. \code{colVars()} uses \code{colMeans()} to provide an efficient way of computing the variances of the columns of a matrix while \code{rowVars()} uses \code{rowMeans()} to provide an efficient way of computing the variances of the rows of a matrix. }
/functions_nve.R
no_license
mhheien/IRiR-1
R
false
false
3,366
r
#' @name pathway_vectorization #' @title Vectorization of pathways #' #' @description This function vectorizes pathways by creating pathway-gene profile. #' #' @param expression_profile Gene expression profile contains gene expression. Rownames are entrez IDs. Colnames are sample names. #' #' @param condition A vector contains group information of samples. For example, condition<-c(type1,type1,type2,type2) of four samples. #' #' @param group1 Group1 to be compared. #' #' @param group2 Group1 to be compared. #' #' @param pathway_info A list containing pathway information to be compared, which can be generated by the merge_pathway function. #' #' @usage pathway_vectorization(expression_profile,condition,group1,group2,pathway_info) #' #' @return Return a dataframe which contains pathway-gene information. Colnames are pathways' names, rownames are genes' names and the value of each element is Foldchange between two groups. #' #' @examples #' data(example) #' group1='Treatment' #' group2='Model' #' group3='Control' #' Treatment_profile=pathway_vectorize(expression_profile,condition,group1,group2,pathway_info) #' Model_profile=pathway_vectorize(expression_profile,condition,group2,group3,pathway_info) #' #' @export #' #' #' pathway_vectorization<-function(expression_profile,condition,group1,group2,pathway_info){ index1<-is.na(factor(condition,levels=group1))==FALSE edata1=expression_profile[,index1] edatamean1=apply(edata1,1,function(x) mean(x)) ex1=data.frame(mrna1=rownames(expression_profile),edatamean1) index2<-is.na(factor(condition,levels=group2))==FALSE edata2=expression_profile[,index2] edatamean2=apply(edata2,1,function(x) mean(x) ) ex2=data.frame(mrna2=rownames(expression_profile),edatamean2) kg.sets=pathway_info[[3]] expression<-cbind(ex1,ex2) profile<-data.frame() gene_id=pathway_info[[2]] for(i in 1:length(gene_id)){ ############## Compute the row vector of the profile exvector<-c() for(j in 1:length(pathway_info[[1]])){ if(gene_id[i]%in%kg.sets[[pathway_info[[1]][j]]]&gene_id[i]%in%expression[,'mrna1']){ exp=ex1[ex1[,'mrna1']==gene_id[i],2]/ex2[ex2[,'mrna2']==gene_id[i],2] ####### compute FC value exvector<-c(exvector,exp) } else{exvector<-c(exvector,1)} } profile<-rbind(profile,exvector) } rownames(profile)<-gene_id colnames(profile)<-pathway_info[[1]] profile=profile[rowSums(profile)>0,] profile=profile[rowSums(profile)!=Inf,] profile=na.omit(profile) return(profile) }
/R/pathway_vectorization.R
no_license
github-gs/QPA
R
false
false
2,533
r
#' @name pathway_vectorization #' @title Vectorization of pathways #' #' @description This function vectorizes pathways by creating pathway-gene profile. #' #' @param expression_profile Gene expression profile contains gene expression. Rownames are entrez IDs. Colnames are sample names. #' #' @param condition A vector contains group information of samples. For example, condition<-c(type1,type1,type2,type2) of four samples. #' #' @param group1 Group1 to be compared. #' #' @param group2 Group1 to be compared. #' #' @param pathway_info A list containing pathway information to be compared, which can be generated by the merge_pathway function. #' #' @usage pathway_vectorization(expression_profile,condition,group1,group2,pathway_info) #' #' @return Return a dataframe which contains pathway-gene information. Colnames are pathways' names, rownames are genes' names and the value of each element is Foldchange between two groups. #' #' @examples #' data(example) #' group1='Treatment' #' group2='Model' #' group3='Control' #' Treatment_profile=pathway_vectorize(expression_profile,condition,group1,group2,pathway_info) #' Model_profile=pathway_vectorize(expression_profile,condition,group2,group3,pathway_info) #' #' @export #' #' #' pathway_vectorization<-function(expression_profile,condition,group1,group2,pathway_info){ index1<-is.na(factor(condition,levels=group1))==FALSE edata1=expression_profile[,index1] edatamean1=apply(edata1,1,function(x) mean(x)) ex1=data.frame(mrna1=rownames(expression_profile),edatamean1) index2<-is.na(factor(condition,levels=group2))==FALSE edata2=expression_profile[,index2] edatamean2=apply(edata2,1,function(x) mean(x) ) ex2=data.frame(mrna2=rownames(expression_profile),edatamean2) kg.sets=pathway_info[[3]] expression<-cbind(ex1,ex2) profile<-data.frame() gene_id=pathway_info[[2]] for(i in 1:length(gene_id)){ ############## Compute the row vector of the profile exvector<-c() for(j in 1:length(pathway_info[[1]])){ if(gene_id[i]%in%kg.sets[[pathway_info[[1]][j]]]&gene_id[i]%in%expression[,'mrna1']){ exp=ex1[ex1[,'mrna1']==gene_id[i],2]/ex2[ex2[,'mrna2']==gene_id[i],2] ####### compute FC value exvector<-c(exvector,exp) } else{exvector<-c(exvector,1)} } profile<-rbind(profile,exvector) } rownames(profile)<-gene_id colnames(profile)<-pathway_info[[1]] profile=profile[rowSums(profile)>0,] profile=profile[rowSums(profile)!=Inf,] profile=na.omit(profile) return(profile) }
library(vegan) library(phyloseq) library(ape) source("https://raw.githubusercontent.com/jesusNPL/Public/master/ordinate_mod.R") data("varespec") ### Run ENVFIT ### # Create a distance matrix dist <- vegdist(varespec, method = "bray") # PCoA using APE pcoa_ape <- pcoa(dist) # PCoA using stats pcoa_stats <- cmdscale(dist, eig = TRUE) species.envfit <- envfit(pcoa_ape, varespec, choices = c(1, 2), permutations = 999) #Error in scores.default(ord, display = display, choices = choices, ...) : cannot find scores species.envfit <- envfit(pcoa_stats, varespec, choices = c(1, 2), permutations = 999) # Success ### Run PCoA using {phyloseg} ### data("GlobalPatterns") GP = GlobalPatterns wh0 = genefilter_sample(GP, filterfun_sample(function(x) x > 5), A=0.5*nsamples(GP)) GP1 = prune_taxa(wh0, GP) GP = GlobalPatterns wh0 = genefilter_sample(GP, filterfun_sample(function(x) x > 5), A=0.5*nsamples(GP)) GP1 = prune_taxa(wh0, GP) GP1 = transform_sample_counts(GP1, function(x) 1E6 * x/sum(x)) phylum.sum = tapply(taxa_sums(GP1), tax_table(GP1)[, "Phylum"], sum, na.rm=TRUE) top5phyla = names(sort(phylum.sum, TRUE))[1:5] GP1 = prune_taxa((tax_table(GP1)[, "Phylum"] %in% top5phyla), GP1) pcoa_phyloseq <- ordinate(GP1, method = "PCoA", distance = "bray") ### Run PCoA using modified function ### pcoa_phyloseq_mod <- ordinate2(GP1, method = "PCoA", distance = "bray") ##### Compare PCoA objects from all four methods ##### # PCoA ape names(pcoa_ape) # PCoA phyloseq names(pcoa_phyloseq) # PCoA stats names(pcoa_stats) # PCoA phyloseq modified names(pcoa_phyloseq_mod)
/PCoA_Taylor.R
no_license
jesusNPL/Public
R
false
false
1,582
r
library(vegan) library(phyloseq) library(ape) source("https://raw.githubusercontent.com/jesusNPL/Public/master/ordinate_mod.R") data("varespec") ### Run ENVFIT ### # Create a distance matrix dist <- vegdist(varespec, method = "bray") # PCoA using APE pcoa_ape <- pcoa(dist) # PCoA using stats pcoa_stats <- cmdscale(dist, eig = TRUE) species.envfit <- envfit(pcoa_ape, varespec, choices = c(1, 2), permutations = 999) #Error in scores.default(ord, display = display, choices = choices, ...) : cannot find scores species.envfit <- envfit(pcoa_stats, varespec, choices = c(1, 2), permutations = 999) # Success ### Run PCoA using {phyloseg} ### data("GlobalPatterns") GP = GlobalPatterns wh0 = genefilter_sample(GP, filterfun_sample(function(x) x > 5), A=0.5*nsamples(GP)) GP1 = prune_taxa(wh0, GP) GP = GlobalPatterns wh0 = genefilter_sample(GP, filterfun_sample(function(x) x > 5), A=0.5*nsamples(GP)) GP1 = prune_taxa(wh0, GP) GP1 = transform_sample_counts(GP1, function(x) 1E6 * x/sum(x)) phylum.sum = tapply(taxa_sums(GP1), tax_table(GP1)[, "Phylum"], sum, na.rm=TRUE) top5phyla = names(sort(phylum.sum, TRUE))[1:5] GP1 = prune_taxa((tax_table(GP1)[, "Phylum"] %in% top5phyla), GP1) pcoa_phyloseq <- ordinate(GP1, method = "PCoA", distance = "bray") ### Run PCoA using modified function ### pcoa_phyloseq_mod <- ordinate2(GP1, method = "PCoA", distance = "bray") ##### Compare PCoA objects from all four methods ##### # PCoA ape names(pcoa_ape) # PCoA phyloseq names(pcoa_phyloseq) # PCoA stats names(pcoa_stats) # PCoA phyloseq modified names(pcoa_phyloseq_mod)
library(glmnet) mydata = read.table("../../../../TrainingSet/FullSet/Correlation/upper_aerodigestive_tract.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.35,family="gaussian",standardize=FALSE) sink('./upper_aerodigestive_tract_048.txt',append=TRUE) print(glm$glmnet.fit) sink()
/Model/EN/Correlation/upper_aerodigestive_tract/upper_aerodigestive_tract_048.R
no_license
esbgkannan/QSMART
R
false
false
389
r
library(glmnet) mydata = read.table("../../../../TrainingSet/FullSet/Correlation/upper_aerodigestive_tract.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.35,family="gaussian",standardize=FALSE) sink('./upper_aerodigestive_tract_048.txt',append=TRUE) print(glm$glmnet.fit) sink()
library(shiny) shinyServer(function(input, output) { output$myPlot <- renderPlot({ distType <- input$Distribution size <- input$sampleSize if(distType == "Normal"){ randomvec <- rnorm(size, mean = as.numeric(input$mean), sd = as.numeric(input$sd)) } else{ randomvec <- rexp(size, rate = 1/ as.numeric(input$lambda)) } hist(randomvec, col = "red") }) })
/server.R
no_license
arshaddd/Developing_Data_Products_W4
R
false
false
478
r
library(shiny) shinyServer(function(input, output) { output$myPlot <- renderPlot({ distType <- input$Distribution size <- input$sampleSize if(distType == "Normal"){ randomvec <- rnorm(size, mean = as.numeric(input$mean), sd = as.numeric(input$sd)) } else{ randomvec <- rexp(size, rate = 1/ as.numeric(input$lambda)) } hist(randomvec, col = "red") }) })
#' Raw Data Import for USGS NWIS QW Data #' #' Imports data from NWIS web service. This function gets the data from here: \url{http://nwis.waterdata.usgs.gov/nwis/qwdata} #' A list of parameter codes can be found here: \url{http://nwis.waterdata.usgs.gov/nwis/pmcodes/} #' A list of statistic codes can be found here: \url{http://nwis.waterdata.usgs.gov/nwis/help/?read_file=stat&format=table} #' #' @param siteNumbers character of USGS site numbers. This is usually an 8 digit number #' @param parameterCd character of USGS parameter code(s). This is usually an 5 digit number. #' @param startDate character starting date for data retrieval in the form YYYY-MM-DD. Default is "" which indicates #' retrieval for the earliest possible record. #' @param endDate character ending date for data retrieval in the form YYYY-MM-DD. Default is "" which indicates #' retrieval for the latest possible record. #' @param expanded logical defaults to \code{TRUE}. If \code{TRUE}, retrieves additional information. Expanded data includes #' remark_cd (remark code), result_va (result value), val_qual_tx (result value qualifier code), meth_cd (method code), #' dqi_cd (data-quality indicator code), rpt_lev_va (reporting level), and rpt_lev_cd (reporting level type). If \code{FALSE}, #' only returns remark_cd (remark code) and result_va (result value). Expanded = \code{FALSE} will not give #' sufficient information for unbiased statistical analysis. #' @param reshape logical, reshape the data. If \code{TRUE}, then return a wide data frame with all water-quality in a single row for each sample. #' If \code{FALSE} (default), then return a long data frame with each water-quality result in a single row. #' @param tz character to set timezone attribute of datetime. Default is an empty quote, which converts the #' datetimes to UTC (properly accounting for daylight savings times based on the data's provided tz_cd column). #' Possible values to provide are "America/New_York","America/Chicago", "America/Denver","America/Los_Angeles", #' "America/Anchorage","America/Honolulu","America/Jamaica","America/Managua","America/Phoenix", and "America/Metlakatla" #' @keywords data import USGS web service #' @return A data frame with the following columns: #' \tabular{lll}{ #' Name \tab Type \tab Description \cr #' agency_cd \tab character \tab The NWIS code for the agency reporting the data\cr #' site_no \tab character \tab The USGS site number \cr #' datetime \tab POSIXct \tab The date and time of the value converted to UTC (if asDateTime = TRUE), \cr #' \tab character \tab or raw character string (if asDateTime = FALSE) \cr #' tz_cd \tab character \tab The time zone code for datetime \cr #' code \tab character \tab Any codes that qualify the corresponding value\cr #' value \tab numeric \tab The numeric value for the parameter \cr #' } #' #' There are also several useful attributes attached to the data frame: #' \tabular{lll}{ #' Name \tab Type \tab Description \cr #' url \tab character \tab The url used to generate the data \cr #' queryTime \tab POSIXct \tab The time the data was returned \cr #' comment \tab character \tab Header comments from the RDB file \cr #' siteInfo \tab data.frame \tab A data frame containing information on the requested sites \cr #' variableInfo \tab data.frame \tab A data frame containing information on the requested parameters \cr #' } #' @export #' @import reshape2 #' @seealso \code{\link{readWQPdata}}, \code{\link{whatWQPsites}}, #' \code{\link{readWQPqw}}, \code{\link{constructNWISURL}} #' @examples #' siteNumbers <- c('04024430','04024000') #' startDate <- '2010-01-01' #' endDate <- '' #' parameterCd <- c('34247','30234','32104','34220') #' \dontrun{ #' rawNWISqwData <- readNWISqw(siteNumbers,parameterCd,startDate,endDate) #' rawNWISqwDataReshaped <- readNWISqw(siteNumbers,parameterCd, #' startDate,endDate,reshape=TRUE) #' } readNWISqw <- function (siteNumbers,parameterCd,startDate="",endDate="", expanded=TRUE,reshape=FALSE,tz=""){ url <- constructNWISURL(siteNumbers,parameterCd,startDate,endDate,"qw",expanded=expanded) data <- importRDB1(url,asDateTime=TRUE, qw=TRUE, tz = tz) originalHeader <- comment(data) if(reshape & expanded){ columnsToMelt <- c("agency_cd","site_no","sample_dt","sample_tm", "sample_end_dt","sample_end_tm","sample_start_time_datum_cd","tm_datum_rlbty_cd", "parm_cd","startDateTime","endDateTime") columnsToMelt <- columnsToMelt[columnsToMelt %in% names(data)] longDF <- melt(data, columnsToMelt) wideDF <- dcast(longDF, ... ~ variable + parm_cd ) wideDF[,grep("_va_",names(wideDF))] <- sapply(wideDF[,grep("_va_",names(wideDF))], function(x) as.numeric(x)) groupByPCode <- as.vector(sapply(parameterCd, function(x) grep(x, names(wideDF)) )) data <- wideDF[,c(1:length(columnsToMelt)-1,groupByPCode)] comment(data) <- originalHeader } if(reshape & !expanded){ warning("Reshape can only be used with expanded data. Reshape request will be ignored.") } siteInfo <- readNWISsite(siteNumbers) varInfo <- readNWISpCode(parameterCd) attr(data, "siteInfo") <- siteInfo attr(data, "variableInfo") <- varInfo attr(data, "statisticInfo") <- NULL return (data) }
/R/readNWISqw.r
permissive
kbrannan/dataRetrieval
R
false
false
5,328
r
#' Raw Data Import for USGS NWIS QW Data #' #' Imports data from NWIS web service. This function gets the data from here: \url{http://nwis.waterdata.usgs.gov/nwis/qwdata} #' A list of parameter codes can be found here: \url{http://nwis.waterdata.usgs.gov/nwis/pmcodes/} #' A list of statistic codes can be found here: \url{http://nwis.waterdata.usgs.gov/nwis/help/?read_file=stat&format=table} #' #' @param siteNumbers character of USGS site numbers. This is usually an 8 digit number #' @param parameterCd character of USGS parameter code(s). This is usually an 5 digit number. #' @param startDate character starting date for data retrieval in the form YYYY-MM-DD. Default is "" which indicates #' retrieval for the earliest possible record. #' @param endDate character ending date for data retrieval in the form YYYY-MM-DD. Default is "" which indicates #' retrieval for the latest possible record. #' @param expanded logical defaults to \code{TRUE}. If \code{TRUE}, retrieves additional information. Expanded data includes #' remark_cd (remark code), result_va (result value), val_qual_tx (result value qualifier code), meth_cd (method code), #' dqi_cd (data-quality indicator code), rpt_lev_va (reporting level), and rpt_lev_cd (reporting level type). If \code{FALSE}, #' only returns remark_cd (remark code) and result_va (result value). Expanded = \code{FALSE} will not give #' sufficient information for unbiased statistical analysis. #' @param reshape logical, reshape the data. If \code{TRUE}, then return a wide data frame with all water-quality in a single row for each sample. #' If \code{FALSE} (default), then return a long data frame with each water-quality result in a single row. #' @param tz character to set timezone attribute of datetime. Default is an empty quote, which converts the #' datetimes to UTC (properly accounting for daylight savings times based on the data's provided tz_cd column). #' Possible values to provide are "America/New_York","America/Chicago", "America/Denver","America/Los_Angeles", #' "America/Anchorage","America/Honolulu","America/Jamaica","America/Managua","America/Phoenix", and "America/Metlakatla" #' @keywords data import USGS web service #' @return A data frame with the following columns: #' \tabular{lll}{ #' Name \tab Type \tab Description \cr #' agency_cd \tab character \tab The NWIS code for the agency reporting the data\cr #' site_no \tab character \tab The USGS site number \cr #' datetime \tab POSIXct \tab The date and time of the value converted to UTC (if asDateTime = TRUE), \cr #' \tab character \tab or raw character string (if asDateTime = FALSE) \cr #' tz_cd \tab character \tab The time zone code for datetime \cr #' code \tab character \tab Any codes that qualify the corresponding value\cr #' value \tab numeric \tab The numeric value for the parameter \cr #' } #' #' There are also several useful attributes attached to the data frame: #' \tabular{lll}{ #' Name \tab Type \tab Description \cr #' url \tab character \tab The url used to generate the data \cr #' queryTime \tab POSIXct \tab The time the data was returned \cr #' comment \tab character \tab Header comments from the RDB file \cr #' siteInfo \tab data.frame \tab A data frame containing information on the requested sites \cr #' variableInfo \tab data.frame \tab A data frame containing information on the requested parameters \cr #' } #' @export #' @import reshape2 #' @seealso \code{\link{readWQPdata}}, \code{\link{whatWQPsites}}, #' \code{\link{readWQPqw}}, \code{\link{constructNWISURL}} #' @examples #' siteNumbers <- c('04024430','04024000') #' startDate <- '2010-01-01' #' endDate <- '' #' parameterCd <- c('34247','30234','32104','34220') #' \dontrun{ #' rawNWISqwData <- readNWISqw(siteNumbers,parameterCd,startDate,endDate) #' rawNWISqwDataReshaped <- readNWISqw(siteNumbers,parameterCd, #' startDate,endDate,reshape=TRUE) #' } readNWISqw <- function (siteNumbers,parameterCd,startDate="",endDate="", expanded=TRUE,reshape=FALSE,tz=""){ url <- constructNWISURL(siteNumbers,parameterCd,startDate,endDate,"qw",expanded=expanded) data <- importRDB1(url,asDateTime=TRUE, qw=TRUE, tz = tz) originalHeader <- comment(data) if(reshape & expanded){ columnsToMelt <- c("agency_cd","site_no","sample_dt","sample_tm", "sample_end_dt","sample_end_tm","sample_start_time_datum_cd","tm_datum_rlbty_cd", "parm_cd","startDateTime","endDateTime") columnsToMelt <- columnsToMelt[columnsToMelt %in% names(data)] longDF <- melt(data, columnsToMelt) wideDF <- dcast(longDF, ... ~ variable + parm_cd ) wideDF[,grep("_va_",names(wideDF))] <- sapply(wideDF[,grep("_va_",names(wideDF))], function(x) as.numeric(x)) groupByPCode <- as.vector(sapply(parameterCd, function(x) grep(x, names(wideDF)) )) data <- wideDF[,c(1:length(columnsToMelt)-1,groupByPCode)] comment(data) <- originalHeader } if(reshape & !expanded){ warning("Reshape can only be used with expanded data. Reshape request will be ignored.") } siteInfo <- readNWISsite(siteNumbers) varInfo <- readNWISpCode(parameterCd) attr(data, "siteInfo") <- siteInfo attr(data, "variableInfo") <- varInfo attr(data, "statisticInfo") <- NULL return (data) }
yadirGetToken <- function(){ browseURL("https://oauth.yandex.ru/authorize?response_type=token&client_id=c441fa0194f04f4ea2c238c0c2c40ec9") token <- readline(prompt = "Enter your token: ") return(token) }
/R/yadirGetToken.R
no_license
alex-www-marketing/ryandexdirect_edit
R
false
false
216
r
yadirGetToken <- function(){ browseURL("https://oauth.yandex.ru/authorize?response_type=token&client_id=c441fa0194f04f4ea2c238c0c2c40ec9") token <- readline(prompt = "Enter your token: ") return(token) }
"permutations" <- function(n) { ##function permutations from package e1071 if (n == 1) return(matrix(1)) else if (n < 2) stop("n must be a positive integer") z <- matrix(1) for (i in 2:n) { x <- cbind(z, i) a <- c(1:i, 1:(i - 1)) z <- matrix(0, ncol = ncol(x), nrow = i * nrow(x)) z[1:nrow(x), ] <- x for (j in 2:i - 1) { z[j * nrow(x) + 1:nrow(x), ] <- x[, a[1:i + j]] } } dimnames(z) <- NULL z }
/R/permutations.R
no_license
l5d1l5/relaimpo
R
false
false
544
r
"permutations" <- function(n) { ##function permutations from package e1071 if (n == 1) return(matrix(1)) else if (n < 2) stop("n must be a positive integer") z <- matrix(1) for (i in 2:n) { x <- cbind(z, i) a <- c(1:i, 1:(i - 1)) z <- matrix(0, ncol = ncol(x), nrow = i * nrow(x)) z[1:nrow(x), ] <- x for (j in 2:i - 1) { z[j * nrow(x) + 1:nrow(x), ] <- x[, a[1:i + j]] } } dimnames(z) <- NULL z }
.str_quote <- function(strings) { purrr::map_chr(strings, function(string) { paste0("\"", string, "\"") }) %>% paste0(collapse = ", ") } .str_df <- function(df) { col_names <- paste0(colnames(df), collapse = ", ") paste0(nrow(df), " x ", ncol(df), " (", col_names, ")") } .str_list <- function(l) { purrr::imap_chr(l, function(body, name) { if (is.list(body) && !is.data.frame(body)) { body <- paste0("list(", .str_list(body), ")") paste0(name, " = ", body) } else { ## TODO Support more class if (is.character(body)) body <- .str_quote(body) if (is.data.frame(body)) body <- "data.frame" if (is.function(body)) body <- "function" if (is.null(body)) body <- "NULL" paste0(name, " = ", body) } }) %>% paste0(collapse = ", ") } .str_call <- function(call) { deparse(call) %>% stringr::str_trim() %>% paste(collapse = " ") } .str_task <- function(task_type, label_levels) { task_type <- .str_quote(task_type) if (!is.null(label_levels)) { label_levels <- paste0(label_levels, collapse = ", ") task_type <- paste0(task_type, " (Levels: ", label_levels, ")") } task_type } .str_keys <- function(keys) { purrr::imap_chr(keys, function(val, name) { paste0(name, "=", .str_quote(val)) }) %>% paste0(collapse = ", ") } .str_model <- function(model) { paste("Engine =", .str_quote(model$engine)) } ## TODO Add unit test .str_fit_param_spec <- function(spec) { if (spec$type == "character") { values <- paste0("[", .str_quote(spec$trans_values), "]") if (spec$has_default) { default <- .str_quote(spec$trans_default) values <- paste0(values, " (d=", default, ")") } } else { values <- paste0("[", paste(spec$trans_values, collapse = ", "), "]") if (spec$has_default) { values <- paste0(values, " (d=", spec$trans_default, ")") } } } .str_split <- function(string, width) { len <- stringr::str_length(string) i <- 1 strings <- character() while (i <= len) { str <- stringr::str_sub(string, i, i+width-1) strings <- c(strings, str) i <- i+width } strings } .print <- function(key, value, key_width, quote_key = FALSE, quote_value = FALSE, value_width = Inf) { width <- options()$width key_col_w <- min(key_width, round(width * 0.3)) # max 24 value_w <- width - key_col_w - 1 # one space ## key string width if (quote_key) { ## two quotation + one semicolon key_str_w <- key_col_w - 3 } else { ## one semicolon key_str_w <- key_col_w - 1 } if (stringr::str_length(key) > key_str_w) key <- stringr::str_trunc(key, key_str_w) if (quote_key) key <- .str_quote(key) key <- paste0(key, ":") key <- stringr::str_pad(key, key_col_w, "right") ## values if (is.null(value)) { value <- "NULL" quote_value <- FALSE } if (stringr::str_length(value) > value_width) value <- stringr::str_trunc(value, value_width) if (quote_value) value <- .str_quote(value) values <- .str_split(value, value_w) ## first line cat(key, " ", values[1], "\n", sep = "") ## rest lines if (length(values) > 1) { for (i in 2:length(values)) { pad <- stringr::str_pad("", key_col_w) cat(pad, " ", values[i], "\n", sep = "") } } } .print_sep <- function() { width <- options()$width sep <- stringr::str_dup("-", width) cat(sep, "\n") }
/R/helper_print.R
permissive
five-dots/ml4e
R
false
false
3,412
r
.str_quote <- function(strings) { purrr::map_chr(strings, function(string) { paste0("\"", string, "\"") }) %>% paste0(collapse = ", ") } .str_df <- function(df) { col_names <- paste0(colnames(df), collapse = ", ") paste0(nrow(df), " x ", ncol(df), " (", col_names, ")") } .str_list <- function(l) { purrr::imap_chr(l, function(body, name) { if (is.list(body) && !is.data.frame(body)) { body <- paste0("list(", .str_list(body), ")") paste0(name, " = ", body) } else { ## TODO Support more class if (is.character(body)) body <- .str_quote(body) if (is.data.frame(body)) body <- "data.frame" if (is.function(body)) body <- "function" if (is.null(body)) body <- "NULL" paste0(name, " = ", body) } }) %>% paste0(collapse = ", ") } .str_call <- function(call) { deparse(call) %>% stringr::str_trim() %>% paste(collapse = " ") } .str_task <- function(task_type, label_levels) { task_type <- .str_quote(task_type) if (!is.null(label_levels)) { label_levels <- paste0(label_levels, collapse = ", ") task_type <- paste0(task_type, " (Levels: ", label_levels, ")") } task_type } .str_keys <- function(keys) { purrr::imap_chr(keys, function(val, name) { paste0(name, "=", .str_quote(val)) }) %>% paste0(collapse = ", ") } .str_model <- function(model) { paste("Engine =", .str_quote(model$engine)) } ## TODO Add unit test .str_fit_param_spec <- function(spec) { if (spec$type == "character") { values <- paste0("[", .str_quote(spec$trans_values), "]") if (spec$has_default) { default <- .str_quote(spec$trans_default) values <- paste0(values, " (d=", default, ")") } } else { values <- paste0("[", paste(spec$trans_values, collapse = ", "), "]") if (spec$has_default) { values <- paste0(values, " (d=", spec$trans_default, ")") } } } .str_split <- function(string, width) { len <- stringr::str_length(string) i <- 1 strings <- character() while (i <= len) { str <- stringr::str_sub(string, i, i+width-1) strings <- c(strings, str) i <- i+width } strings } .print <- function(key, value, key_width, quote_key = FALSE, quote_value = FALSE, value_width = Inf) { width <- options()$width key_col_w <- min(key_width, round(width * 0.3)) # max 24 value_w <- width - key_col_w - 1 # one space ## key string width if (quote_key) { ## two quotation + one semicolon key_str_w <- key_col_w - 3 } else { ## one semicolon key_str_w <- key_col_w - 1 } if (stringr::str_length(key) > key_str_w) key <- stringr::str_trunc(key, key_str_w) if (quote_key) key <- .str_quote(key) key <- paste0(key, ":") key <- stringr::str_pad(key, key_col_w, "right") ## values if (is.null(value)) { value <- "NULL" quote_value <- FALSE } if (stringr::str_length(value) > value_width) value <- stringr::str_trunc(value, value_width) if (quote_value) value <- .str_quote(value) values <- .str_split(value, value_w) ## first line cat(key, " ", values[1], "\n", sep = "") ## rest lines if (length(values) > 1) { for (i in 2:length(values)) { pad <- stringr::str_pad("", key_col_w) cat(pad, " ", values[i], "\n", sep = "") } } } .print_sep <- function() { width <- options()$width sep <- stringr::str_dup("-", width) cat(sep, "\n") }
tuneIrace = function(learner, task, resampling, measures, par.set, control, opt.path, show.info) { requirePackages("irace", why = "tuneIrace", default.method = "load") cx = function(x) convertXLogicalsNotAsStrings(x, par.set) hookRun = function(experiment, config = list()) { rin = experiment$instance tunerFitnFun(as.list(experiment$candidate), learner = learner, task = task, resampling = rin, measures = measures, par.set = par.set, ctrl = control, opt.path = opt.path, show.info = show.info, convertx = cx, remove.nas = TRUE) } n.instances = control$extra.args$n.instances control$extra.args$n.instances = NULL show.irace.output = control$extra.args$show.irace.output control$extra.args$show.irace.output = NULL instances = lapply(seq_len(n.instances), function(i) makeResampleInstance(resampling, task = task)) if (is.null(control$extra.args$digits)) { control$extra.args$digits = .Machine$integer.max } else { control$extra.args$digits = asInt(control$extra.args$digits) } parameters = convertParamSetToIrace(par.set) log.file = tempfile() tuner.config = c(list(hookRun = hookRun, instances = instances, logFile = log.file), control$extra.args) g = if (show.irace.output) identity else capture.output g(or <- irace::irace(tunerConfig = tuner.config, parameters = parameters)) unlink(log.file) if (nrow(or) == 0L) stop("irace produced no result, possibly the budget was set too low?") id = or[1L, 1L] # get best candidate x1 = as.list(irace::removeCandidatesMetaData(or[1L,])) x2 = trafoValue(par.set, x1) # we need chars, not factors / logicals, so we can match 'x' d = convertDfCols(as.data.frame(opt.path), logicals.as.factor = TRUE) d = convertDfCols(d, factors.as.char = TRUE) par.names = names(x1) # get all lines in opt.path which correspond to x and average their perf values j = vlapply(seq_row(d), function(i) isTRUE(all.equal(as.list(d[i, par.names, drop = FALSE]), x1))) if (!any(j)) stop("No matching rows for final elite candidate found in opt.path! This cannot be!") y = colMeans(d[j, opt.path$y.names, drop = FALSE]) # take first index of mating lines to get recommended x e = getOptPathEl(opt.path, which.first(j)) x = trafoValue(par.set, e$x) x = removeMissingValues(x) if (control$tune.threshold) # now get thresholds and average them threshold = getThresholdFromOptPath(opt.path, which(j)) else threshold = NULL makeTuneResult(learner, control, x, y, threshold, opt.path) }
/R/tuneIrace.R
no_license
gragusa/mlr
R
false
false
2,530
r
tuneIrace = function(learner, task, resampling, measures, par.set, control, opt.path, show.info) { requirePackages("irace", why = "tuneIrace", default.method = "load") cx = function(x) convertXLogicalsNotAsStrings(x, par.set) hookRun = function(experiment, config = list()) { rin = experiment$instance tunerFitnFun(as.list(experiment$candidate), learner = learner, task = task, resampling = rin, measures = measures, par.set = par.set, ctrl = control, opt.path = opt.path, show.info = show.info, convertx = cx, remove.nas = TRUE) } n.instances = control$extra.args$n.instances control$extra.args$n.instances = NULL show.irace.output = control$extra.args$show.irace.output control$extra.args$show.irace.output = NULL instances = lapply(seq_len(n.instances), function(i) makeResampleInstance(resampling, task = task)) if (is.null(control$extra.args$digits)) { control$extra.args$digits = .Machine$integer.max } else { control$extra.args$digits = asInt(control$extra.args$digits) } parameters = convertParamSetToIrace(par.set) log.file = tempfile() tuner.config = c(list(hookRun = hookRun, instances = instances, logFile = log.file), control$extra.args) g = if (show.irace.output) identity else capture.output g(or <- irace::irace(tunerConfig = tuner.config, parameters = parameters)) unlink(log.file) if (nrow(or) == 0L) stop("irace produced no result, possibly the budget was set too low?") id = or[1L, 1L] # get best candidate x1 = as.list(irace::removeCandidatesMetaData(or[1L,])) x2 = trafoValue(par.set, x1) # we need chars, not factors / logicals, so we can match 'x' d = convertDfCols(as.data.frame(opt.path), logicals.as.factor = TRUE) d = convertDfCols(d, factors.as.char = TRUE) par.names = names(x1) # get all lines in opt.path which correspond to x and average their perf values j = vlapply(seq_row(d), function(i) isTRUE(all.equal(as.list(d[i, par.names, drop = FALSE]), x1))) if (!any(j)) stop("No matching rows for final elite candidate found in opt.path! This cannot be!") y = colMeans(d[j, opt.path$y.names, drop = FALSE]) # take first index of mating lines to get recommended x e = getOptPathEl(opt.path, which.first(j)) x = trafoValue(par.set, e$x) x = removeMissingValues(x) if (control$tune.threshold) # now get thresholds and average them threshold = getThresholdFromOptPath(opt.path, which(j)) else threshold = NULL makeTuneResult(learner, control, x, y, threshold, opt.path) }
# Repeat pure genomics model 5 times (100 times on the server) # Library and working directory library(tidyverse) library(tidymodels) library(themis) library(vip) library(patchwork) # Prepare input data f <- list.files("input_data", pattern = "*pheno.geno.Rda", full.names = T) pheno_geno <- readRDS(f) for (i in 1:100){ set.seed(i) print(glue::glue("Starting replicate with seed {i}")) dir <- str_c("seed_", i) dir.create(dir) setwd(dir) # Data splitting splits <- rsample::initial_split(pheno_geno, prop = .8, strata = mortality) df_train <- training(splits) df_test <- testing(splits) # Construct workflow rf_rec <- recipes::recipe(mortality ~ ., data = df_train) %>% themis::step_downsample(mortality) rf_mod <- parsnip::rand_forest( mtry = tune(), min_n = tune(), trees = 500 ) %>% set_mode("classification") %>% set_engine("ranger") rf_wf <- workflows::workflow() %>% add_recipe(rf_rec) %>% add_model(rf_mod) # Resampling set.seed(i) folds <- rsample::vfold_cv(df_train, v = 10, strata = mortality) folds # Train model doParallel::registerDoParallel() rf_grid <- grid_regular( mtry(range = c(1,5)), min_n(range = c(30,40)), levels = 5 ) rf_fit_rs <- rf_wf %>% tune_grid(resamples = folds, grid = rf_grid, control = control_grid(save_pred = TRUE), metrics = metric_set(roc_auc)) # Collect metrics df_metrics <- tune::collect_metrics(rf_fit_rs) metrics_train <- df_metrics %>% slice_max(order_by = mean, n = 1) %>% transmute(dataset = "train", .metric, .estimate = mean) p1 <- rf_fit_rs %>% collect_metrics() %>% filter(.metric == "roc_auc") %>% mutate(min_n = factor(min_n)) %>% ggplot(aes(mtry, mean, color = min_n)) + geom_line(alpha = 0.5, size = 1.5) + geom_point() + labs(y = "AUC") + theme_bw() # Collect predictions df_roc <- rf_fit_rs %>% tune::collect_predictions() p2 <- df_roc %>% group_by(.config) %>% yardstick::roc_curve(truth = mortality, .pred_Died) %>% autoplot() # Chose the best model and fit it best <- rf_fit_rs %>% tune::select_best() final_wf <- rf_wf %>% tune::finalize_workflow(best) final_rf <- final_wf %>% extract_spec_parsnip() %>% set_engine(engine = "ranger", importance = "impurity") final_wf <- final_wf %>% workflows::update_model(final_rf) final_fit <- final_wf %>% tune::last_fit(splits) # Final assessment on the test dataset ## Metrics df_metrics <- final_fit %>% collect_metrics() %>% filter(.metric == "roc_auc") %>% mutate(dataset = "test") %>% select(-c(.config, .estimator)) %>% bind_rows(metrics_train) ## Confusion matrix rf_testing_pred <- final_fit %>% collect_predictions() rf_testing_pred <- rf_testing_pred %>% mutate(dataset = "test") p3 <- rf_testing_pred %>% conf_mat(mortality, .pred_class) %>% autoplot(type = "heatmap") ## ROC curve df_roc <- df_roc %>% mutate(dataset = "train") %>% semi_join(best) %>% bind_rows(rf_testing_pred) p4 <- df_roc %>% group_by(dataset) %>% yardstick::roc_curve(truth = mortality, .pred_Died) %>% autoplot() + scale_colour_manual(values = c("red", "blue")) # Here we construct the precision recall curve p5 <- df_roc %>% group_by(dataset) %>% yardstick::pr_curve(truth = mortality, .pred_Died) %>% autoplot() + scale_colour_manual(values = c("red", "blue")) df_metrics <- df_roc %>% group_by(dataset) %>% yardstick::pr_auc(truth = mortality, .pred_Died) %>% select(-.estimator) %>% bind_rows(df_metrics) %>% arrange(desc(.metric), desc(dataset)) ## Feature importance p6 <- final_fit %>% purrr::pluck(".workflow", 1) %>% workflowsets::extract_fit_parsnip() %>% vip::vip() + theme_bw() # Output ## Models name <- str_c("ML_mortality.with_lineages.replicate_", i) dir <- str_c("models/") dir.create(dir) rf_fit_rs %>% saveRDS(str_c(dir, name, ".train_resample.Rda")) final_fit %>% saveRDS(str_c(dir, name, ".last_fit.Rda")) ## Metrics dir <- "processed_data/" dir.create(dir) df_metrics %>% write_tsv(str_c(dir, name, ".metrics.tab")) df_roc %>% write_tsv(str_c(dir, name, ".predictions.tab")) ## Importance df_importance <- final_fit %>% extract_workflow() %>% extract_fit_parsnip() %>% vi() df_importance %>% write_tsv(str_c(dir, name, ".importance.tab")) ## Figures dir <- "figures/" dir.create(dir) p7 <- wrap_plots(p1, p2, p3, p4, p5, p6, nrow = 3) ggsave(str_c(dir, name, ".plots.pdf"), plot = p7, width = 11.9, height = 7.18, units = "in") # Go back to the working directory setwd("..") }
/R/original_model/ML_replicates.R
no_license
stefanogg/CLOGEN
R
false
false
4,922
r
# Repeat pure genomics model 5 times (100 times on the server) # Library and working directory library(tidyverse) library(tidymodels) library(themis) library(vip) library(patchwork) # Prepare input data f <- list.files("input_data", pattern = "*pheno.geno.Rda", full.names = T) pheno_geno <- readRDS(f) for (i in 1:100){ set.seed(i) print(glue::glue("Starting replicate with seed {i}")) dir <- str_c("seed_", i) dir.create(dir) setwd(dir) # Data splitting splits <- rsample::initial_split(pheno_geno, prop = .8, strata = mortality) df_train <- training(splits) df_test <- testing(splits) # Construct workflow rf_rec <- recipes::recipe(mortality ~ ., data = df_train) %>% themis::step_downsample(mortality) rf_mod <- parsnip::rand_forest( mtry = tune(), min_n = tune(), trees = 500 ) %>% set_mode("classification") %>% set_engine("ranger") rf_wf <- workflows::workflow() %>% add_recipe(rf_rec) %>% add_model(rf_mod) # Resampling set.seed(i) folds <- rsample::vfold_cv(df_train, v = 10, strata = mortality) folds # Train model doParallel::registerDoParallel() rf_grid <- grid_regular( mtry(range = c(1,5)), min_n(range = c(30,40)), levels = 5 ) rf_fit_rs <- rf_wf %>% tune_grid(resamples = folds, grid = rf_grid, control = control_grid(save_pred = TRUE), metrics = metric_set(roc_auc)) # Collect metrics df_metrics <- tune::collect_metrics(rf_fit_rs) metrics_train <- df_metrics %>% slice_max(order_by = mean, n = 1) %>% transmute(dataset = "train", .metric, .estimate = mean) p1 <- rf_fit_rs %>% collect_metrics() %>% filter(.metric == "roc_auc") %>% mutate(min_n = factor(min_n)) %>% ggplot(aes(mtry, mean, color = min_n)) + geom_line(alpha = 0.5, size = 1.5) + geom_point() + labs(y = "AUC") + theme_bw() # Collect predictions df_roc <- rf_fit_rs %>% tune::collect_predictions() p2 <- df_roc %>% group_by(.config) %>% yardstick::roc_curve(truth = mortality, .pred_Died) %>% autoplot() # Chose the best model and fit it best <- rf_fit_rs %>% tune::select_best() final_wf <- rf_wf %>% tune::finalize_workflow(best) final_rf <- final_wf %>% extract_spec_parsnip() %>% set_engine(engine = "ranger", importance = "impurity") final_wf <- final_wf %>% workflows::update_model(final_rf) final_fit <- final_wf %>% tune::last_fit(splits) # Final assessment on the test dataset ## Metrics df_metrics <- final_fit %>% collect_metrics() %>% filter(.metric == "roc_auc") %>% mutate(dataset = "test") %>% select(-c(.config, .estimator)) %>% bind_rows(metrics_train) ## Confusion matrix rf_testing_pred <- final_fit %>% collect_predictions() rf_testing_pred <- rf_testing_pred %>% mutate(dataset = "test") p3 <- rf_testing_pred %>% conf_mat(mortality, .pred_class) %>% autoplot(type = "heatmap") ## ROC curve df_roc <- df_roc %>% mutate(dataset = "train") %>% semi_join(best) %>% bind_rows(rf_testing_pred) p4 <- df_roc %>% group_by(dataset) %>% yardstick::roc_curve(truth = mortality, .pred_Died) %>% autoplot() + scale_colour_manual(values = c("red", "blue")) # Here we construct the precision recall curve p5 <- df_roc %>% group_by(dataset) %>% yardstick::pr_curve(truth = mortality, .pred_Died) %>% autoplot() + scale_colour_manual(values = c("red", "blue")) df_metrics <- df_roc %>% group_by(dataset) %>% yardstick::pr_auc(truth = mortality, .pred_Died) %>% select(-.estimator) %>% bind_rows(df_metrics) %>% arrange(desc(.metric), desc(dataset)) ## Feature importance p6 <- final_fit %>% purrr::pluck(".workflow", 1) %>% workflowsets::extract_fit_parsnip() %>% vip::vip() + theme_bw() # Output ## Models name <- str_c("ML_mortality.with_lineages.replicate_", i) dir <- str_c("models/") dir.create(dir) rf_fit_rs %>% saveRDS(str_c(dir, name, ".train_resample.Rda")) final_fit %>% saveRDS(str_c(dir, name, ".last_fit.Rda")) ## Metrics dir <- "processed_data/" dir.create(dir) df_metrics %>% write_tsv(str_c(dir, name, ".metrics.tab")) df_roc %>% write_tsv(str_c(dir, name, ".predictions.tab")) ## Importance df_importance <- final_fit %>% extract_workflow() %>% extract_fit_parsnip() %>% vi() df_importance %>% write_tsv(str_c(dir, name, ".importance.tab")) ## Figures dir <- "figures/" dir.create(dir) p7 <- wrap_plots(p1, p2, p3, p4, p5, p6, nrow = 3) ggsave(str_c(dir, name, ".plots.pdf"), plot = p7, width = 11.9, height = 7.18, units = "in") # Go back to the working directory setwd("..") }
### ========================================================================= ### DelayedNaryIsoOp objects ### ------------------------------------------------------------------------- ### ### Representation of a delayed N-ary isometric operation. ### The input arrays must be "conformable" i.e. they all must have the same ### dimensions. ### setClass("DelayedNaryIsoOp", contains="DelayedNaryOp", representation( OP="function", # The function to use to combine the input objects. # Should act as an isomorphism i.e. always return an # array-like object **parallel** to the input objects # (i.e. with the same dimensions). Rargs="list" # Additional right arguments to OP. ), prototype( OP=identity ) ) .arrays_are_conformable <- function(objects) { dims <- lapply(objects, dim) ndims <- lengths(dims) first_ndim <- ndims[[1L]] if (!all(ndims == first_ndim)) return(FALSE) tmp <- unlist(dims, use.names=FALSE) if (is.null(tmp)) return(FALSE) dims <- matrix(tmp, ncol=length(objects)) first_dim <- dims[ , 1L] all(dims == first_dim) } .validate_DelayedNaryIsoOp <- function(x) { ## 'seeds' slot. if (!.arrays_are_conformable(x@seeds)) return("'x@seeds' must be a list of conformable array-like objects") TRUE } setValidity2("DelayedNaryIsoOp", .validate_DelayedNaryIsoOp) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### Constructor ### new_DelayedNaryIsoOp <- function(OP=identity, seed=new("array"), ..., Rargs=list()) { OP <- match.fun(OP) seeds <- unname(list(seed, ...)) if (!.arrays_are_conformable(seeds)) stop(wmsg("non-conformable array-like objects")) new2("DelayedNaryIsoOp", seeds=seeds, OP=OP, Rargs=Rargs, check=FALSE) } ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### Display ### ### S3/S4 combo for summary.DelayedNaryIsoOp .DelayedNaryIsoOp_summary <- function(object) "N-ary iso op" summary.DelayedNaryIsoOp <- function(object, ...) .DelayedNaryIsoOp_summary(object, ...) setMethod("summary", "DelayedNaryIsoOp", summary.DelayedNaryIsoOp) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### Seed contract ### setMethod("dim", "DelayedNaryIsoOp", function(x) dim(x@seeds[[1L]])) setMethod("dimnames", "DelayedNaryIsoOp", function(x) get_first_non_NULL_dimnames(x@seeds) ) setMethod("extract_array", "DelayedNaryIsoOp", function(x, index) { arrays <- lapply(x@seeds, extract_array, index) do.call(x@OP, c(arrays, x@Rargs)) } ) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### Propagation of sparsity ### setMethod("is_sparse", "DelayedNaryIsoOp", function(x) { ok <- vapply(x@seeds, is_sparse, logical(1), USE.NAMES=FALSE) if (!all(ok)) return(FALSE) if (length(x@Rargs) != 0L) return(FALSE) ## Structural sparsity will be propagated if the operation in ## x@OP preserves the zeros. To find out whether zeros are preserved ## or not, we replace each current seed with an array of one "zero", ## that is, with an ordinary array of the same number of dimensions ## and type as the seed, but with a single "zero" element. Then we ## apply the n-ary operation in x@OP to them and see whether the ## zero were preserved or not. seed_ndim <- length(dim(x@seeds[[1L]])) x@seeds <- lapply(x@seeds, function(seed) make_one_zero_array(type(seed), seed_ndim)) ## Same as 'as.array(x)' but doesn't try to propagate the dimnames. a0 <- extract_array(x, vector("list", length=seed_ndim)) is_filled_with_zeros(a0) } ) ### 'is_sparse(x)' is assumed to be TRUE and 'index' is assumed to ### not contain duplicates. See "extract_sparse_array() Terms of Use" ### in SparseArraySeed-class.R setMethod("extract_sparse_array", "DelayedNaryIsoOp", function(x, index) { stop("NOT IMPLEMENTED YET!") } ) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### Backward compatibility with DelayedArray < 0.5.24 ### ### In DelayedArray 0.5.24 the ConformableSeedCombiner class got renamed ### DelayedNaryIsoOp. DelayedArray objects serialized with DelayedArray < ### 0.5.24 might contain ConformableSeedCombiner instances nested in their ### "seed" slot so we need to keep the class around for now. ### setClass("ConformableSeedCombiner", contains="DelayedNaryIsoOp") setMethod("updateObject", "ConformableSeedCombiner", function(object, ..., verbose=FALSE) { object <- new2("DelayedNaryIsoOp", seeds=object@seeds, OP=object@COMBINING_OP, Rargs=object@Rargs) callNextMethod() } )
/R/DelayedNaryIsoOp-class.R
no_license
LTLA/DelayedArray
R
false
false
5,039
r
### ========================================================================= ### DelayedNaryIsoOp objects ### ------------------------------------------------------------------------- ### ### Representation of a delayed N-ary isometric operation. ### The input arrays must be "conformable" i.e. they all must have the same ### dimensions. ### setClass("DelayedNaryIsoOp", contains="DelayedNaryOp", representation( OP="function", # The function to use to combine the input objects. # Should act as an isomorphism i.e. always return an # array-like object **parallel** to the input objects # (i.e. with the same dimensions). Rargs="list" # Additional right arguments to OP. ), prototype( OP=identity ) ) .arrays_are_conformable <- function(objects) { dims <- lapply(objects, dim) ndims <- lengths(dims) first_ndim <- ndims[[1L]] if (!all(ndims == first_ndim)) return(FALSE) tmp <- unlist(dims, use.names=FALSE) if (is.null(tmp)) return(FALSE) dims <- matrix(tmp, ncol=length(objects)) first_dim <- dims[ , 1L] all(dims == first_dim) } .validate_DelayedNaryIsoOp <- function(x) { ## 'seeds' slot. if (!.arrays_are_conformable(x@seeds)) return("'x@seeds' must be a list of conformable array-like objects") TRUE } setValidity2("DelayedNaryIsoOp", .validate_DelayedNaryIsoOp) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### Constructor ### new_DelayedNaryIsoOp <- function(OP=identity, seed=new("array"), ..., Rargs=list()) { OP <- match.fun(OP) seeds <- unname(list(seed, ...)) if (!.arrays_are_conformable(seeds)) stop(wmsg("non-conformable array-like objects")) new2("DelayedNaryIsoOp", seeds=seeds, OP=OP, Rargs=Rargs, check=FALSE) } ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### Display ### ### S3/S4 combo for summary.DelayedNaryIsoOp .DelayedNaryIsoOp_summary <- function(object) "N-ary iso op" summary.DelayedNaryIsoOp <- function(object, ...) .DelayedNaryIsoOp_summary(object, ...) setMethod("summary", "DelayedNaryIsoOp", summary.DelayedNaryIsoOp) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### Seed contract ### setMethod("dim", "DelayedNaryIsoOp", function(x) dim(x@seeds[[1L]])) setMethod("dimnames", "DelayedNaryIsoOp", function(x) get_first_non_NULL_dimnames(x@seeds) ) setMethod("extract_array", "DelayedNaryIsoOp", function(x, index) { arrays <- lapply(x@seeds, extract_array, index) do.call(x@OP, c(arrays, x@Rargs)) } ) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### Propagation of sparsity ### setMethod("is_sparse", "DelayedNaryIsoOp", function(x) { ok <- vapply(x@seeds, is_sparse, logical(1), USE.NAMES=FALSE) if (!all(ok)) return(FALSE) if (length(x@Rargs) != 0L) return(FALSE) ## Structural sparsity will be propagated if the operation in ## x@OP preserves the zeros. To find out whether zeros are preserved ## or not, we replace each current seed with an array of one "zero", ## that is, with an ordinary array of the same number of dimensions ## and type as the seed, but with a single "zero" element. Then we ## apply the n-ary operation in x@OP to them and see whether the ## zero were preserved or not. seed_ndim <- length(dim(x@seeds[[1L]])) x@seeds <- lapply(x@seeds, function(seed) make_one_zero_array(type(seed), seed_ndim)) ## Same as 'as.array(x)' but doesn't try to propagate the dimnames. a0 <- extract_array(x, vector("list", length=seed_ndim)) is_filled_with_zeros(a0) } ) ### 'is_sparse(x)' is assumed to be TRUE and 'index' is assumed to ### not contain duplicates. See "extract_sparse_array() Terms of Use" ### in SparseArraySeed-class.R setMethod("extract_sparse_array", "DelayedNaryIsoOp", function(x, index) { stop("NOT IMPLEMENTED YET!") } ) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### Backward compatibility with DelayedArray < 0.5.24 ### ### In DelayedArray 0.5.24 the ConformableSeedCombiner class got renamed ### DelayedNaryIsoOp. DelayedArray objects serialized with DelayedArray < ### 0.5.24 might contain ConformableSeedCombiner instances nested in their ### "seed" slot so we need to keep the class around for now. ### setClass("ConformableSeedCombiner", contains="DelayedNaryIsoOp") setMethod("updateObject", "ConformableSeedCombiner", function(object, ..., verbose=FALSE) { object <- new2("DelayedNaryIsoOp", seeds=object@seeds, OP=object@COMBINING_OP, Rargs=object@Rargs) callNextMethod() } )
### calcDistMax.R --- #---------------------------------------------------------------------- ## author: Brice Ozenne ## created: jun 21 2017 (16:44) ## Version: ## last-updated: sep 21 2018 (16:24) ## By: Brice Ozenne ## Update #: 615 #---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: #---------------------------------------------------------------------- ## ### Code: ## * documentation #' @title Adjust the p.values Using the Quantiles of the Max Statistic #' @description Adjust the p.values using the quantiles of the max statistic. #' @name calcDistMax #' #' @param statistic [numeric vector] the observed Wald statistic. #' Each statistic correspond to a null hypothesis (i.e. a coefficient) that one wish to test. #' @param iid [matrix] zero-mean iid decomposition of the coefficient used to compute the statistic. #' @param df [numeric] the degree of freedom defining the multivariate Student's t distribution. #' If \code{NULL} the multivariate Gaussian distribution will be used instead. #' @param iid.previous [matrix, EXPERIMENTAL] zero-mean iid decomposition of previously tested coefficient. #' @param quantile.compute [logical] should the rejection quantile be computed? #' @param quantile.previous [numeric, EXPERIMENTAL] rejection quantiles of the previously tested hypotheses. If not \code{NULL} the values should correspond the variable in to the first column(s) of the argument \code{iid.previous}. #' @param method [character] the method used to compute the p-values. #' See the output of \code{lava.options()$search.calcMaxDist} for the possible values. #' @param alpha [numeric 0-1] the significance cutoff for the p-values. #' When the p-value is below, the corresponding link will be retained. #' @param cpus [integer >0] the number of processors to use. #' If greater than 1, the computation of the p-value relative to each test is performed in parallel. #' @param cl [cluster] a parallel socket cluster generated by \code{parallel::makeCluster} #' that has been registered using \code{registerDoParallel}. #' @param n.sim [integer >0] the number of bootstrap simulations used to compute each p-values. #' Disregarded when the p-values are computed using numerical integration. #' @param n.repmax [integer >0] the maximum number of rejection for each bootstrap sample before switching to a new bootstrap sample. #' Only relevant when conditioning on a previous test. #' Disregarded when the p-values are computed using numerical integration. #' @param trace [logical] should the execution of the function be traced? #' #' @return A list containing #' \itemize{ #' \item p.adjust: the adjusted p-values. #' \item z: the rejection threshold. #' \item Sigma: the correlation matrix between the test statistic. #' \item correctedLevel: the alpha level corrected for conditioning on previous tests. #' } #' #' @examples #' library(mvtnorm) #' #' set.seed(10) #' n <- 100 #' p <- 4 #' link <- letters[1:p] #' n.sim <- 1e3 # number of bootstrap simulations #' #' #### test - not conditional #### #' X.iid <- rmvnorm(n, mean = rep(0,p), sigma = diag(1,p)) #' colnames(X.iid) <- link #' statistic <- setNames(1:p,link) #' #' #' r1 <- calcDistMaxIntegral(statistic = statistic, iid = X.iid, #' trace = FALSE, alpha = 0.05, df = 1e6) #' #' r3 <- calcDistMaxBootstrap(statistic = statistic, iid = X.iid, #' method = "residual", #' trace = FALSE, alpha = 0.05, n.sim = n.sim) #' #' r4 <- calcDistMaxBootstrap(statistic = statistic, iid = X.iid, #' method = "wild", #' trace = FALSE, alpha = 0.05, n.sim = n.sim) #' #' rbind(integration = c(r1$p.adjust, quantile = r1$z), #' bootResidual = c(r3$p.adjust, quantile = r3$z), #' bootWild = c(r4$p.adjust, quantile = r4$z)) #' #' #### test - conditional #### #' \dontrun{ #' Z.iid <- rmvnorm(n, mean = rep(0,p+1), sigma = diag(1,p+1)) #' seqQuantile <- qmvnorm(p = 0.95, delta = rep(0,p+1), sigma = diag(1,p+1), #' tail = "both.tails")$quantile #' #' r1c <- calcDistMaxIntegral(statistic = statistic, iid = X.iid, #' iid.previous = Z.iid, quantile.previous = seqQuantile, #' trace = FALSE, alpha = 0.05, df = NULL) #' #' r3c <- calcDistMaxBootstrap(statistic = statistic, iid = X.iid, #' iid.previous = Z.iid, quantile.previous = seqQuantile, method = "residual", #' trace = FALSE, alpha = 0.05, n.sim = n.sim) #' #' r4c <- calcDistMaxBootstrap(statistic = statistic, iid = X.iid, #' iid.previous = Z.iid, quantile.previous = seqQuantile, method = "wild", #' trace = FALSE, alpha = 0.05, n.sim = n.sim) #' #' rbind(integration = c(r1c$p.adjust, quantile = r1c$z), #' bootResidual = c(r3c$p.adjust, quantile = r3c$z), #' bootWild = c(r4c$p.adjust, quantile = r4c$z)) #' } #' @concept modelsearch #' @concept post-selection inference ## * calcDistMaxIntegral #' @rdname calcDistMax #' @export calcDistMaxIntegral <- function(statistic, iid, df, iid.previous = NULL, quantile.previous = NULL, quantile.compute = lava.options()$search.calc.quantile.int, alpha, cpus = 1, cl = NULL, trace){ ## ** normalize arguments p.iid <- NCOL(iid) n <- NROW(iid) conditional <- length(quantile.previous) if(length(quantile.previous)>1){ stop("Can only condition on one previous step \n") } if(is.null(df)){ distribution.statistic <- "gaussian" }else{ distribution.statistic <- "student" } iid.all <- cbind(iid,iid.previous) index.new <- 1:NCOL(iid) index.previous <- setdiff(1:NCOL(iid.all),index.new) p.iid.all <- NCOL(iid.all) ## ** Compute the correlation matrix between the test statistics # center to be under the null # scale since we want the distribution of the Wald statistic (i.e. statistic with unit variance) iid.statistic <- scale(iid.all, center = TRUE, scale = TRUE) Sigma.statistic <- stats::cov(iid.statistic, use = "pairwise.complete.obs") out <- list(p.adjust = NULL, z = NULL, Sigma = Sigma.statistic[index.new,index.new,drop=FALSE]) ## ** Definition of the functions used to compute the quantiles warperQ <- function(alpha){ .calcQmaxIntegration(alpha = alpha, p = p.iid, Sigma = Sigma.statistic[index.new,index.new,drop=FALSE], df = df, distribution = distribution.statistic) } warperP <- function(index){ .calcPmaxIntegration(statistic = statistic[index], p = p.iid, Sigma = Sigma.statistic[index.new,index.new,drop=FALSE], df = df, distribution = distribution.statistic) } ## ** correction for conditioning on the previous steps if(conditional==TRUE){ out$correctedLevel <- calcType1postSelection(1-alpha, quantile.previous = quantile.previous, mu = rep(0,p.iid.all), Sigma = Sigma.statistic, distribution = distribution.statistic, df = df) alpha <- 1-out$correctedLevel }else{ out$correctedLevel <- NA } if(quantile.compute){ out$z <- warperQ(alpha) }else{ out$z <- NA } ## ** start parallel computation init.cpus <- (cpus > 1 && is.null(cl)) if(init.cpus){ ## define cluster if(trace>0){ cl <- parallel::makeCluster(cpus, outfile = "") }else{ cl <- parallel::makeCluster(cpus) } ## link to foreach doParallel::registerDoParallel(cl) } ## ** Computation if(trace > 0){ cat("Computation of multivariate student probabilities to adjust the p.values \n") } if(cpus > 1){ ## *** parallel computations if(trace>0){ pb <- utils::txtProgressBar(max = length(index.new), style = 3) } ## export package parallel::clusterCall(cl, fun = function(x){ suppressPackageStartupMessages(requireNamespace("mvtnorm", quietly = TRUE)) }) value <- NULL # [:for CRAN check] foreach out$p.adjust <- foreach::`%dopar%`( foreach::foreach(value = 1:length(statistic), .export = c(".calcPmaxIntegration"), .combine = "c"), { if(trace>0){utils::setTxtProgressBar(pb, value)} return(warperP(index.new[value])) }) if(trace>0){close(pb)} }else{ ## *** sequential computations if(trace>0){ out$p.adjust <- pbapply::pbsapply(1:length(statistic), function(iStat){ warperP(index.new[iStat]) }) }else{ out$p.adjust <- sapply(1:length(statistic), function(iStat){ warperP(index.new[iStat]) }) } } out$p.adjust <- stats::setNames(out$p.adjust, names(statistic)) ## ** end parallel computation if(init.cpus){ parallel::stopCluster(cl) } ## ** export return(out) } ## * calcDistMaxBootstrap #' @rdname calcDistMax #' @export calcDistMaxBootstrap <- function(statistic, iid, iid.previous = NULL, quantile.previous = NULL, method, alpha, cpus = 1, cl = NULL, n.sim, trace, n.repmax = 100){ ## ** normalize arguments n <- NROW(iid) conditional <- length(quantile.previous)>0 if(length(quantile.previous)>1){ stop("Can only condition on one previous step \n") } iid.all <- cbind(iid,iid.previous) index.new <- 1:NCOL(iid) index.previous <- setdiff(1:NCOL(iid.all),index.new) ## ** Function used for the simulations warperBoot <- .bootMaxDist ## ** Compute the correlation matrix between the test statistics # center to be under the null # scale since we want the distribution of the Wald statistic (i.e. statistic with unit variance) iid.statistic <- scale(iid.all, center = TRUE, scale = TRUE) Sigma.statistic <- stats::cov(iid.statistic, use = "pairwise.complete.obs") ## ** start parallel computation init.cpus <- (cpus > 1 && is.null(cl)) if(init.cpus){ ## define cluster if(trace>0){ cl <- parallel::makeCluster(cpus, outfile = "") }else{ cl <- parallel::makeCluster(cpus) } ## link to foreach doParallel::registerDoParallel(cl) } ## ** Computation if(trace > 0){ cat("Bootsrap simulations to get the 95% quantile of the max statistic: ") } if(cpus>1){ n.simCpus <- rep(round(n.sim/cpus),cpus) n.simCpus[1] <- n.sim-sum(n.simCpus[-1]) i <- NULL # [:for CRAN check] foreach distMax <- foreach::`%dopar%`( foreach::foreach(i = 1:cpus, .packages = c("MASS"), .export = "calcDistMax", .combine = "c"),{ replicate(n.simCpus[i], warperBoot(iid = iid.all, sigma = Sigma.statistic, n = n, method = method, index.new = index.new, index.previous = index.previous, quantile.previous = quantile.previous, n.repmax = n.repmax)) }) }else{ if(trace>0){ distMax <- pbapply::pbsapply(1:n.sim, warperBoot, method = method, iid = iid.all, sigma = Sigma.statistic, n = n, index.new = index.new, index.previous = index.previous, quantile.previous = quantile.previous, n.repmax = n.repmax) }else{ distMax <- sapply(1:n.sim, warperBoot, method = method, iid = iid.all, sigma = Sigma.statistic, n = n, index.new = index.new, index.previous = index.previous, quantile.previous = quantile.previous, n.repmax = n.repmax) } } if(trace > 0){ cat("done \n") } ## ** end parallel calculation if(init.cpus){ parallel::stopCluster(cl) } ## ** export out <- list() out$z <- stats::quantile(distMax, probs = 1-alpha, na.rm = TRUE) out$p.adjust <- sapply(abs(statistic), function(x){mean(distMax>x,na.rm=TRUE)}) out$Sigma <- Sigma.statistic out$correctedLevel <- NA return(out) } ## * .calcQmaxIntegration: numerical integration to compute the critical threshold .calcQmaxIntegration <- function(alpha, p, Sigma, df, distribution){ if(distribution == "gaussian"){ if(p==1){ q.alpha <- stats::qnorm(1-alpha, mean = 0, sd = 1) }else{ q.alpha <- mvtnorm::qmvnorm(1-alpha, mean = rep(0,p), corr = Sigma, tail = "both.tails")$quantile } }else if(distribution == "student"){ if(p==1){ q.alpha <- stats::qt(1-alpha, df = df) }else{ q.alpha <- mvtnorm::qmvt(1-alpha, delta = rep(0,p), corr = Sigma, df = df, tail = "both.tails")$quantile } } return(q.alpha) } ## * .calcPmaxIntegration_firstStep: numerical integration to compute the p.values .calcPmaxIntegration <- function(statistic, p, Sigma, df, distribution){ value <- abs(statistic) if(!is.na(value)){ if(distribution == "gaussian"){ if(p==1){ p <- stats::pnorm(value, mean = 0, sd = Sigma)-stats::pnorm(-value, mean = 0, sd = Sigma) }else{ p <- mvtnorm::pmvnorm(lower = -value, upper = value, mean = rep(0, p), corr = Sigma) } }else if(distribution == "student"){ if(p==1){ p <- stats::pt(value, df = df)-stats::pt(-value, df = df) }else{ p <- mvtnorm::pmvt(lower = -value, upper = value, delta = rep(0, p), corr = Sigma, df = df) } } return(1-p) }else{ return(NA) } } ## * .bootMaxDist: bootstrap simulation .bootMaxDist <- function(iid, sigma, n, method, index.new, index.previous, quantile.previous, n.repmax, ...){ iRep <- 0 cv <- FALSE while(iRep < n.repmax && cv == FALSE){ ## ** resample to obtain a new influence function if(method == "residual"){ iid.sim <- MASS::mvrnorm(n,rep(0,NCOL(sigma)),sigma) }else if(method == "wild"){ e <- stats::rnorm(n,mean=0,sd=1) iid.sim <- sapply(1:NCOL(sigma),function(x){e*iid[,x]}) } if(!is.null(quantile.previous)){ iid.previous <- iid.sim[,index.previous] test.previous <- apply(iid.previous,2,function(x){sqrt(n)*mean(x)/stats::sd(x)}) max.previous <- max(abs(test.previous)) if(max.previous<quantile.previous){ iRep <- iRep + 1 }else{ iid.sim <- iid.sim[,index.new] cv <- TRUE } }else{ cv <- TRUE } } ## ** compute the bootstrap test statistic if(cv){ Test <- apply(iid.sim,2,function(x){sqrt(n)*mean(x)/stats::sd(x)}) }else{ Test <- NA } return(max(abs(Test))) } #---------------------------------------------------------------------- ### calcDistMax.R ends here
/R/calcDistMax.R
no_license
kkholst/lavaSearch2
R
false
false
16,686
r
### calcDistMax.R --- #---------------------------------------------------------------------- ## author: Brice Ozenne ## created: jun 21 2017 (16:44) ## Version: ## last-updated: sep 21 2018 (16:24) ## By: Brice Ozenne ## Update #: 615 #---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: #---------------------------------------------------------------------- ## ### Code: ## * documentation #' @title Adjust the p.values Using the Quantiles of the Max Statistic #' @description Adjust the p.values using the quantiles of the max statistic. #' @name calcDistMax #' #' @param statistic [numeric vector] the observed Wald statistic. #' Each statistic correspond to a null hypothesis (i.e. a coefficient) that one wish to test. #' @param iid [matrix] zero-mean iid decomposition of the coefficient used to compute the statistic. #' @param df [numeric] the degree of freedom defining the multivariate Student's t distribution. #' If \code{NULL} the multivariate Gaussian distribution will be used instead. #' @param iid.previous [matrix, EXPERIMENTAL] zero-mean iid decomposition of previously tested coefficient. #' @param quantile.compute [logical] should the rejection quantile be computed? #' @param quantile.previous [numeric, EXPERIMENTAL] rejection quantiles of the previously tested hypotheses. If not \code{NULL} the values should correspond the variable in to the first column(s) of the argument \code{iid.previous}. #' @param method [character] the method used to compute the p-values. #' See the output of \code{lava.options()$search.calcMaxDist} for the possible values. #' @param alpha [numeric 0-1] the significance cutoff for the p-values. #' When the p-value is below, the corresponding link will be retained. #' @param cpus [integer >0] the number of processors to use. #' If greater than 1, the computation of the p-value relative to each test is performed in parallel. #' @param cl [cluster] a parallel socket cluster generated by \code{parallel::makeCluster} #' that has been registered using \code{registerDoParallel}. #' @param n.sim [integer >0] the number of bootstrap simulations used to compute each p-values. #' Disregarded when the p-values are computed using numerical integration. #' @param n.repmax [integer >0] the maximum number of rejection for each bootstrap sample before switching to a new bootstrap sample. #' Only relevant when conditioning on a previous test. #' Disregarded when the p-values are computed using numerical integration. #' @param trace [logical] should the execution of the function be traced? #' #' @return A list containing #' \itemize{ #' \item p.adjust: the adjusted p-values. #' \item z: the rejection threshold. #' \item Sigma: the correlation matrix between the test statistic. #' \item correctedLevel: the alpha level corrected for conditioning on previous tests. #' } #' #' @examples #' library(mvtnorm) #' #' set.seed(10) #' n <- 100 #' p <- 4 #' link <- letters[1:p] #' n.sim <- 1e3 # number of bootstrap simulations #' #' #### test - not conditional #### #' X.iid <- rmvnorm(n, mean = rep(0,p), sigma = diag(1,p)) #' colnames(X.iid) <- link #' statistic <- setNames(1:p,link) #' #' #' r1 <- calcDistMaxIntegral(statistic = statistic, iid = X.iid, #' trace = FALSE, alpha = 0.05, df = 1e6) #' #' r3 <- calcDistMaxBootstrap(statistic = statistic, iid = X.iid, #' method = "residual", #' trace = FALSE, alpha = 0.05, n.sim = n.sim) #' #' r4 <- calcDistMaxBootstrap(statistic = statistic, iid = X.iid, #' method = "wild", #' trace = FALSE, alpha = 0.05, n.sim = n.sim) #' #' rbind(integration = c(r1$p.adjust, quantile = r1$z), #' bootResidual = c(r3$p.adjust, quantile = r3$z), #' bootWild = c(r4$p.adjust, quantile = r4$z)) #' #' #### test - conditional #### #' \dontrun{ #' Z.iid <- rmvnorm(n, mean = rep(0,p+1), sigma = diag(1,p+1)) #' seqQuantile <- qmvnorm(p = 0.95, delta = rep(0,p+1), sigma = diag(1,p+1), #' tail = "both.tails")$quantile #' #' r1c <- calcDistMaxIntegral(statistic = statistic, iid = X.iid, #' iid.previous = Z.iid, quantile.previous = seqQuantile, #' trace = FALSE, alpha = 0.05, df = NULL) #' #' r3c <- calcDistMaxBootstrap(statistic = statistic, iid = X.iid, #' iid.previous = Z.iid, quantile.previous = seqQuantile, method = "residual", #' trace = FALSE, alpha = 0.05, n.sim = n.sim) #' #' r4c <- calcDistMaxBootstrap(statistic = statistic, iid = X.iid, #' iid.previous = Z.iid, quantile.previous = seqQuantile, method = "wild", #' trace = FALSE, alpha = 0.05, n.sim = n.sim) #' #' rbind(integration = c(r1c$p.adjust, quantile = r1c$z), #' bootResidual = c(r3c$p.adjust, quantile = r3c$z), #' bootWild = c(r4c$p.adjust, quantile = r4c$z)) #' } #' @concept modelsearch #' @concept post-selection inference ## * calcDistMaxIntegral #' @rdname calcDistMax #' @export calcDistMaxIntegral <- function(statistic, iid, df, iid.previous = NULL, quantile.previous = NULL, quantile.compute = lava.options()$search.calc.quantile.int, alpha, cpus = 1, cl = NULL, trace){ ## ** normalize arguments p.iid <- NCOL(iid) n <- NROW(iid) conditional <- length(quantile.previous) if(length(quantile.previous)>1){ stop("Can only condition on one previous step \n") } if(is.null(df)){ distribution.statistic <- "gaussian" }else{ distribution.statistic <- "student" } iid.all <- cbind(iid,iid.previous) index.new <- 1:NCOL(iid) index.previous <- setdiff(1:NCOL(iid.all),index.new) p.iid.all <- NCOL(iid.all) ## ** Compute the correlation matrix between the test statistics # center to be under the null # scale since we want the distribution of the Wald statistic (i.e. statistic with unit variance) iid.statistic <- scale(iid.all, center = TRUE, scale = TRUE) Sigma.statistic <- stats::cov(iid.statistic, use = "pairwise.complete.obs") out <- list(p.adjust = NULL, z = NULL, Sigma = Sigma.statistic[index.new,index.new,drop=FALSE]) ## ** Definition of the functions used to compute the quantiles warperQ <- function(alpha){ .calcQmaxIntegration(alpha = alpha, p = p.iid, Sigma = Sigma.statistic[index.new,index.new,drop=FALSE], df = df, distribution = distribution.statistic) } warperP <- function(index){ .calcPmaxIntegration(statistic = statistic[index], p = p.iid, Sigma = Sigma.statistic[index.new,index.new,drop=FALSE], df = df, distribution = distribution.statistic) } ## ** correction for conditioning on the previous steps if(conditional==TRUE){ out$correctedLevel <- calcType1postSelection(1-alpha, quantile.previous = quantile.previous, mu = rep(0,p.iid.all), Sigma = Sigma.statistic, distribution = distribution.statistic, df = df) alpha <- 1-out$correctedLevel }else{ out$correctedLevel <- NA } if(quantile.compute){ out$z <- warperQ(alpha) }else{ out$z <- NA } ## ** start parallel computation init.cpus <- (cpus > 1 && is.null(cl)) if(init.cpus){ ## define cluster if(trace>0){ cl <- parallel::makeCluster(cpus, outfile = "") }else{ cl <- parallel::makeCluster(cpus) } ## link to foreach doParallel::registerDoParallel(cl) } ## ** Computation if(trace > 0){ cat("Computation of multivariate student probabilities to adjust the p.values \n") } if(cpus > 1){ ## *** parallel computations if(trace>0){ pb <- utils::txtProgressBar(max = length(index.new), style = 3) } ## export package parallel::clusterCall(cl, fun = function(x){ suppressPackageStartupMessages(requireNamespace("mvtnorm", quietly = TRUE)) }) value <- NULL # [:for CRAN check] foreach out$p.adjust <- foreach::`%dopar%`( foreach::foreach(value = 1:length(statistic), .export = c(".calcPmaxIntegration"), .combine = "c"), { if(trace>0){utils::setTxtProgressBar(pb, value)} return(warperP(index.new[value])) }) if(trace>0){close(pb)} }else{ ## *** sequential computations if(trace>0){ out$p.adjust <- pbapply::pbsapply(1:length(statistic), function(iStat){ warperP(index.new[iStat]) }) }else{ out$p.adjust <- sapply(1:length(statistic), function(iStat){ warperP(index.new[iStat]) }) } } out$p.adjust <- stats::setNames(out$p.adjust, names(statistic)) ## ** end parallel computation if(init.cpus){ parallel::stopCluster(cl) } ## ** export return(out) } ## * calcDistMaxBootstrap #' @rdname calcDistMax #' @export calcDistMaxBootstrap <- function(statistic, iid, iid.previous = NULL, quantile.previous = NULL, method, alpha, cpus = 1, cl = NULL, n.sim, trace, n.repmax = 100){ ## ** normalize arguments n <- NROW(iid) conditional <- length(quantile.previous)>0 if(length(quantile.previous)>1){ stop("Can only condition on one previous step \n") } iid.all <- cbind(iid,iid.previous) index.new <- 1:NCOL(iid) index.previous <- setdiff(1:NCOL(iid.all),index.new) ## ** Function used for the simulations warperBoot <- .bootMaxDist ## ** Compute the correlation matrix between the test statistics # center to be under the null # scale since we want the distribution of the Wald statistic (i.e. statistic with unit variance) iid.statistic <- scale(iid.all, center = TRUE, scale = TRUE) Sigma.statistic <- stats::cov(iid.statistic, use = "pairwise.complete.obs") ## ** start parallel computation init.cpus <- (cpus > 1 && is.null(cl)) if(init.cpus){ ## define cluster if(trace>0){ cl <- parallel::makeCluster(cpus, outfile = "") }else{ cl <- parallel::makeCluster(cpus) } ## link to foreach doParallel::registerDoParallel(cl) } ## ** Computation if(trace > 0){ cat("Bootsrap simulations to get the 95% quantile of the max statistic: ") } if(cpus>1){ n.simCpus <- rep(round(n.sim/cpus),cpus) n.simCpus[1] <- n.sim-sum(n.simCpus[-1]) i <- NULL # [:for CRAN check] foreach distMax <- foreach::`%dopar%`( foreach::foreach(i = 1:cpus, .packages = c("MASS"), .export = "calcDistMax", .combine = "c"),{ replicate(n.simCpus[i], warperBoot(iid = iid.all, sigma = Sigma.statistic, n = n, method = method, index.new = index.new, index.previous = index.previous, quantile.previous = quantile.previous, n.repmax = n.repmax)) }) }else{ if(trace>0){ distMax <- pbapply::pbsapply(1:n.sim, warperBoot, method = method, iid = iid.all, sigma = Sigma.statistic, n = n, index.new = index.new, index.previous = index.previous, quantile.previous = quantile.previous, n.repmax = n.repmax) }else{ distMax <- sapply(1:n.sim, warperBoot, method = method, iid = iid.all, sigma = Sigma.statistic, n = n, index.new = index.new, index.previous = index.previous, quantile.previous = quantile.previous, n.repmax = n.repmax) } } if(trace > 0){ cat("done \n") } ## ** end parallel calculation if(init.cpus){ parallel::stopCluster(cl) } ## ** export out <- list() out$z <- stats::quantile(distMax, probs = 1-alpha, na.rm = TRUE) out$p.adjust <- sapply(abs(statistic), function(x){mean(distMax>x,na.rm=TRUE)}) out$Sigma <- Sigma.statistic out$correctedLevel <- NA return(out) } ## * .calcQmaxIntegration: numerical integration to compute the critical threshold .calcQmaxIntegration <- function(alpha, p, Sigma, df, distribution){ if(distribution == "gaussian"){ if(p==1){ q.alpha <- stats::qnorm(1-alpha, mean = 0, sd = 1) }else{ q.alpha <- mvtnorm::qmvnorm(1-alpha, mean = rep(0,p), corr = Sigma, tail = "both.tails")$quantile } }else if(distribution == "student"){ if(p==1){ q.alpha <- stats::qt(1-alpha, df = df) }else{ q.alpha <- mvtnorm::qmvt(1-alpha, delta = rep(0,p), corr = Sigma, df = df, tail = "both.tails")$quantile } } return(q.alpha) } ## * .calcPmaxIntegration_firstStep: numerical integration to compute the p.values .calcPmaxIntegration <- function(statistic, p, Sigma, df, distribution){ value <- abs(statistic) if(!is.na(value)){ if(distribution == "gaussian"){ if(p==1){ p <- stats::pnorm(value, mean = 0, sd = Sigma)-stats::pnorm(-value, mean = 0, sd = Sigma) }else{ p <- mvtnorm::pmvnorm(lower = -value, upper = value, mean = rep(0, p), corr = Sigma) } }else if(distribution == "student"){ if(p==1){ p <- stats::pt(value, df = df)-stats::pt(-value, df = df) }else{ p <- mvtnorm::pmvt(lower = -value, upper = value, delta = rep(0, p), corr = Sigma, df = df) } } return(1-p) }else{ return(NA) } } ## * .bootMaxDist: bootstrap simulation .bootMaxDist <- function(iid, sigma, n, method, index.new, index.previous, quantile.previous, n.repmax, ...){ iRep <- 0 cv <- FALSE while(iRep < n.repmax && cv == FALSE){ ## ** resample to obtain a new influence function if(method == "residual"){ iid.sim <- MASS::mvrnorm(n,rep(0,NCOL(sigma)),sigma) }else if(method == "wild"){ e <- stats::rnorm(n,mean=0,sd=1) iid.sim <- sapply(1:NCOL(sigma),function(x){e*iid[,x]}) } if(!is.null(quantile.previous)){ iid.previous <- iid.sim[,index.previous] test.previous <- apply(iid.previous,2,function(x){sqrt(n)*mean(x)/stats::sd(x)}) max.previous <- max(abs(test.previous)) if(max.previous<quantile.previous){ iRep <- iRep + 1 }else{ iid.sim <- iid.sim[,index.new] cv <- TRUE } }else{ cv <- TRUE } } ## ** compute the bootstrap test statistic if(cv){ Test <- apply(iid.sim,2,function(x){sqrt(n)*mean(x)/stats::sd(x)}) }else{ Test <- NA } return(max(abs(Test))) } #---------------------------------------------------------------------- ### calcDistMax.R ends here
### to be careful about here: # each time another env block is used as env.only --> use another 'both' so as not to overwrite sth (or solve this in any way) # for combination approach: ### just cbind the env in order: fam, env.new, interactions ### bild list with the right.names and give it to function as argument # be aware of: interactions have the highest dimension!! if a big selection of snps is used then something has to be changed # however!!: we never can take into account so many interactions so we should stay with the 19 snps here anyway!! ### lit snps setwd("~/../../../storage/cmbstore/norbert.krautenbacher/1-Projects/2-Asthma") source("~/../../../storage/cmbstore/norbert.krautenbacher/1-Projects/1-R-functions/3-wrp-learn-on-snp-env.R") source("~/../../../storage/cmbstore/norbert.krautenbacher/1-Projects/1-R-functions/0-functions.R") library(parallel) # load imputed data sets: load(paste0("Data/5imps.rda")) ### data load("LiteratureSNPs/Data/dat.lit.snps.19.RData") load("ImputedSNPs/Data/mat.imp.pruned.rda") load("Data/gabriel.1707.Rdata") load("Data/dat.outcome.rda") # load("../../Data/x.env.imp.rda") load("Data/x.wheeze_onset.rda") # # new: leave out a final test set # load("Data/fin.ind.rda") #### MARKNEW # take only farm and no innsbruck load("Data/ind.part.rda") train.fin.ind <- setdiff(ind.part$farm, ind.part$farm.inn) do.for.imp.set <- function(i, env.ifs, snp.as){ eval(parse(text=paste0("x.env.imp <- imp",i))) #### MARKNEW ## remove variables farm and center for the corresponding subgroupanalysis x.env.imp$center <- NULL x.env.imp$farm <- NULL #### MARKNEW # thus no interaction with farm either: x.env <- model.matrix(~. # + farm:Sex_female ,x.env.imp)[,-1][ train.fin.ind,] # outcome: ################# adjust snp.data, env.data and outcome (same number of observations etc) snp.data <- X[ train.fin.ind,] # snp.data for interactions snp.for.int <- dat.lit.snps.19[train.fin.ind, -1] ######### change: we define several data sets replacing x.env ### confounders: always to include - center, age, month of birth cnames.env <- colnames(x.env) confs <- cnames.env[c(grep("center", cnames.env), #grep("month", cnames.env), (let month of birth stay environmental) which(cnames.env=="age"), which(cnames.env=="child_age"), which(cnames.env=="child_BMI"), which(cnames.env=="Sex_female2"))] x.conf <- x.env[ , confs] x.env <- x.env[ , -which(cnames.env %in% confs)] ## 1. x.env.nofh (no family anamnese) fam.an <- c("fhasthma2", "fhhayfev2", "fheczema2", "FHx_Atopy2" ) x.env.nofh <- x.env[ , - which(colnames(x.env) %in% fam.an)] ## 2. x.fh (family anamnese) x.fh <- x.env[ , fam.an] #### MARKNEW # leave out farm-interactions ## 3. x.interact (several interactions of snps with fam-hist and env) # formula: form.interact.char <- paste( "~(", paste(colnames(snp.for.int), collapse="+"), ")", ":", "(", paste(c("Num_Sibs_12", "Sex_female2", fam.an ), collapse="+"), ")") form.interact <- formula(form.interact.char) #### MARKNEW x.interact <- model.matrix( form.interact, data= data.frame(snp.for.int, x.env[ , c("Num_Sibs_12", fam.an )], x.conf[ , "Sex_female2", drop=FALSE] )) x.env.3blocks <- cbind(x.env.nofh, x.fh, x.interact) blocks.ipf <- list(1:ncol(x.conf), (ncol(x.conf)+1):(ncol(x.conf) + ncol(x.env.nofh)), (ncol(x.conf) + ncol(x.env.nofh) + 1): (ncol(x.conf) + ncol(x.env.nofh) + ncol(x.fh)), (ncol(x.conf) + ncol(x.env.nofh) + ncol(x.fh)+1): (ncol(x.conf) + ncol(x.env.nofh) + ncol(x.fh) + ncol(x.interact)), (ncol(x.conf) + ncol(x.env.nofh) + ncol(x.fh) + ncol(x.interact)+1): (ncol(x.conf) + ncol(x.env.nofh) + ncol(x.fh) + ncol(x.interact)+ncol(snp.data)) ) ## old (age of onset wheeze-variable) # x.wo <- as.matrix(x.wheeze_onset)[train.fin.ind,,drop=FALSE ] rm(x.env) # outcome get.y <- function(y.label){ if(y.label == "dda") y <- dat.outcome$dd_asthma if(y.label == "dda_notany") y <- dat.outcome$dda_notany if(y.label == "dda.wh12_notany") y <- dat.outcome$dda.wh12_notany y <- y[ train.fin.ind] eval(parse(text=paste0("data.frame(",y.label,"=y)"))) } # env-type get.env <- function(env.label){ if(env.label == "conf_only") x.env <- x.conf if(env.label == "env_only") x.env <- x.env.nofh if(env.label == "fh_only") x.env <- x.fh if(env.label == "interactions_only") x.env <- x.interact if(env.label == "both") x.env <- x.env.3blocks if(env.label == "snps_only") x.env <- NULL if(env.label == "fh_conf" | env.label == "fh_conf_snps") x.env <- cbind(x.fh, x.conf) if(env.label == "fh_conf_env" | env.label=="fh_conf_env_snps") x.env <- cbind(x.fh, x.conf, x.env.nofh) x.env } # weights: obs.weights <- gabriel.1707$weight_dna[ train.fin.ind] ###### application: y.label = "dda" # methods <- c("glmnet", "glmnet_weights", "elnet", "elnet_weights", "ipflasso", "ipflasso_weights", # "ranger", "pca_snps_rf", "pca_rf") methods <- "glmnet_weights" ## test: # # y.label="dda.wh12_notany" # env.if <- "both" # method = "ipflasso_weights" # # # method="glmnet_weights" # perform.asthma(y=get.y(y.label), snp.as="estdos", env.if=env.if, # method=method, x.env=get.env(env.if), snp.data=snp.data, # obs.weights=obs.weights, x.conf=x.conf, blocks.ipf=blocks.ipf, # path.results="Results19/" ) # ### Perform: for(method in methods){ for(env.if in env.ifs){ # for(y.label in y.labels){ perform.asthma(y=get.y(y.label), snp.as=snp.as, env.if=env.if, method=method, x.env=get.env(env.if), snp.data=snp.data, obs.weights=obs.weights, x.conf=x.conf, blocks.ipf=blocks.ipf, n.cores=2, #### MARKNEW ## change path! path.results=paste0("3-onlyfarm/ImputedSNPs/Results_genomewide/imp",i,"/"), needed.as.mat=TRUE,needed.as.df=FALSE, ntrees=20000, conv.x.all.to.model.matrix=FALSE ) } # print(paste(method, env.if, ": for all outcomes performed")) # } print(paste(method, "for all outcomes performed and SNP, env and both performed")) } } # # mclapply(1:5, function(x) do.for.imp.set(x, env.ifs = "snps_only", snp.as = "genomewide"), mc.cores=5) # mclapply(1:5, function(x) do.for.imp.set(x, env.ifs = "both", snp.as = "genomewide"), mc.cores=5) mclapply(1:5, function(x) do.for.imp.set(x, env.ifs = "fh_conf_snps", snp.as = "genomewide"), mc.cores=5)
/gabriela/3-onlyfarm/ImputedSNPs/4.1-Genome-wide-learn-LASSO-4blocks.R
no_license
fuchslab/gabriela
R
false
false
6,952
r
### to be careful about here: # each time another env block is used as env.only --> use another 'both' so as not to overwrite sth (or solve this in any way) # for combination approach: ### just cbind the env in order: fam, env.new, interactions ### bild list with the right.names and give it to function as argument # be aware of: interactions have the highest dimension!! if a big selection of snps is used then something has to be changed # however!!: we never can take into account so many interactions so we should stay with the 19 snps here anyway!! ### lit snps setwd("~/../../../storage/cmbstore/norbert.krautenbacher/1-Projects/2-Asthma") source("~/../../../storage/cmbstore/norbert.krautenbacher/1-Projects/1-R-functions/3-wrp-learn-on-snp-env.R") source("~/../../../storage/cmbstore/norbert.krautenbacher/1-Projects/1-R-functions/0-functions.R") library(parallel) # load imputed data sets: load(paste0("Data/5imps.rda")) ### data load("LiteratureSNPs/Data/dat.lit.snps.19.RData") load("ImputedSNPs/Data/mat.imp.pruned.rda") load("Data/gabriel.1707.Rdata") load("Data/dat.outcome.rda") # load("../../Data/x.env.imp.rda") load("Data/x.wheeze_onset.rda") # # new: leave out a final test set # load("Data/fin.ind.rda") #### MARKNEW # take only farm and no innsbruck load("Data/ind.part.rda") train.fin.ind <- setdiff(ind.part$farm, ind.part$farm.inn) do.for.imp.set <- function(i, env.ifs, snp.as){ eval(parse(text=paste0("x.env.imp <- imp",i))) #### MARKNEW ## remove variables farm and center for the corresponding subgroupanalysis x.env.imp$center <- NULL x.env.imp$farm <- NULL #### MARKNEW # thus no interaction with farm either: x.env <- model.matrix(~. # + farm:Sex_female ,x.env.imp)[,-1][ train.fin.ind,] # outcome: ################# adjust snp.data, env.data and outcome (same number of observations etc) snp.data <- X[ train.fin.ind,] # snp.data for interactions snp.for.int <- dat.lit.snps.19[train.fin.ind, -1] ######### change: we define several data sets replacing x.env ### confounders: always to include - center, age, month of birth cnames.env <- colnames(x.env) confs <- cnames.env[c(grep("center", cnames.env), #grep("month", cnames.env), (let month of birth stay environmental) which(cnames.env=="age"), which(cnames.env=="child_age"), which(cnames.env=="child_BMI"), which(cnames.env=="Sex_female2"))] x.conf <- x.env[ , confs] x.env <- x.env[ , -which(cnames.env %in% confs)] ## 1. x.env.nofh (no family anamnese) fam.an <- c("fhasthma2", "fhhayfev2", "fheczema2", "FHx_Atopy2" ) x.env.nofh <- x.env[ , - which(colnames(x.env) %in% fam.an)] ## 2. x.fh (family anamnese) x.fh <- x.env[ , fam.an] #### MARKNEW # leave out farm-interactions ## 3. x.interact (several interactions of snps with fam-hist and env) # formula: form.interact.char <- paste( "~(", paste(colnames(snp.for.int), collapse="+"), ")", ":", "(", paste(c("Num_Sibs_12", "Sex_female2", fam.an ), collapse="+"), ")") form.interact <- formula(form.interact.char) #### MARKNEW x.interact <- model.matrix( form.interact, data= data.frame(snp.for.int, x.env[ , c("Num_Sibs_12", fam.an )], x.conf[ , "Sex_female2", drop=FALSE] )) x.env.3blocks <- cbind(x.env.nofh, x.fh, x.interact) blocks.ipf <- list(1:ncol(x.conf), (ncol(x.conf)+1):(ncol(x.conf) + ncol(x.env.nofh)), (ncol(x.conf) + ncol(x.env.nofh) + 1): (ncol(x.conf) + ncol(x.env.nofh) + ncol(x.fh)), (ncol(x.conf) + ncol(x.env.nofh) + ncol(x.fh)+1): (ncol(x.conf) + ncol(x.env.nofh) + ncol(x.fh) + ncol(x.interact)), (ncol(x.conf) + ncol(x.env.nofh) + ncol(x.fh) + ncol(x.interact)+1): (ncol(x.conf) + ncol(x.env.nofh) + ncol(x.fh) + ncol(x.interact)+ncol(snp.data)) ) ## old (age of onset wheeze-variable) # x.wo <- as.matrix(x.wheeze_onset)[train.fin.ind,,drop=FALSE ] rm(x.env) # outcome get.y <- function(y.label){ if(y.label == "dda") y <- dat.outcome$dd_asthma if(y.label == "dda_notany") y <- dat.outcome$dda_notany if(y.label == "dda.wh12_notany") y <- dat.outcome$dda.wh12_notany y <- y[ train.fin.ind] eval(parse(text=paste0("data.frame(",y.label,"=y)"))) } # env-type get.env <- function(env.label){ if(env.label == "conf_only") x.env <- x.conf if(env.label == "env_only") x.env <- x.env.nofh if(env.label == "fh_only") x.env <- x.fh if(env.label == "interactions_only") x.env <- x.interact if(env.label == "both") x.env <- x.env.3blocks if(env.label == "snps_only") x.env <- NULL if(env.label == "fh_conf" | env.label == "fh_conf_snps") x.env <- cbind(x.fh, x.conf) if(env.label == "fh_conf_env" | env.label=="fh_conf_env_snps") x.env <- cbind(x.fh, x.conf, x.env.nofh) x.env } # weights: obs.weights <- gabriel.1707$weight_dna[ train.fin.ind] ###### application: y.label = "dda" # methods <- c("glmnet", "glmnet_weights", "elnet", "elnet_weights", "ipflasso", "ipflasso_weights", # "ranger", "pca_snps_rf", "pca_rf") methods <- "glmnet_weights" ## test: # # y.label="dda.wh12_notany" # env.if <- "both" # method = "ipflasso_weights" # # # method="glmnet_weights" # perform.asthma(y=get.y(y.label), snp.as="estdos", env.if=env.if, # method=method, x.env=get.env(env.if), snp.data=snp.data, # obs.weights=obs.weights, x.conf=x.conf, blocks.ipf=blocks.ipf, # path.results="Results19/" ) # ### Perform: for(method in methods){ for(env.if in env.ifs){ # for(y.label in y.labels){ perform.asthma(y=get.y(y.label), snp.as=snp.as, env.if=env.if, method=method, x.env=get.env(env.if), snp.data=snp.data, obs.weights=obs.weights, x.conf=x.conf, blocks.ipf=blocks.ipf, n.cores=2, #### MARKNEW ## change path! path.results=paste0("3-onlyfarm/ImputedSNPs/Results_genomewide/imp",i,"/"), needed.as.mat=TRUE,needed.as.df=FALSE, ntrees=20000, conv.x.all.to.model.matrix=FALSE ) } # print(paste(method, env.if, ": for all outcomes performed")) # } print(paste(method, "for all outcomes performed and SNP, env and both performed")) } } # # mclapply(1:5, function(x) do.for.imp.set(x, env.ifs = "snps_only", snp.as = "genomewide"), mc.cores=5) # mclapply(1:5, function(x) do.for.imp.set(x, env.ifs = "both", snp.as = "genomewide"), mc.cores=5) mclapply(1:5, function(x) do.for.imp.set(x, env.ifs = "fh_conf_snps", snp.as = "genomewide"), mc.cores=5)
#============================================================================== # SI FIGURE: CORRELATIONS BETWEEN ACCURACY AND X METRICS #============================================================================== #------------------------------------------------------------------------------ # Preamble #------------------------------------------------------------------------------ # Libraries & formatting source("scripts/aux_format-plots.R") # Input path path_teams <- "data/team-stats.csv" # Output paths out_path_acc <- "figures/si_accuracy-correlations.png" out_path_x <- "figures/si_xmetric-correlations.png" out_path_mix_all <- "figures/si_mix-correlations-all.png" out_path_mix_filtered <- "figures/si_mix-correlations-filtered.png" # Output parameters out_width <- page_width_in out_asp <- 1.05 # Themes & scales theme_cor <- theme_base + theme( axis.title = element_blank(), strip.text = element_text(margin = lmargin(rep(0.2, 4))), aspect.ratio = 1, plot.margin = lmargin(0.3, 0.4, 0.4, 0.4), ) scale_x_acc <- purrr::partial(scale_x_continuous, breaks = seq(0,1,0.2), minor_breaks = NULL, labels = function(x) round(x*100)) scale_y_acc <- purrr::partial(scale_y_continuous, breaks = seq(0,1,0.2), minor_breaks = NULL, labels = function(x) round(x*100)) scale_x_nlabs <- purrr::partial(scale_x_cont_nominor, limits = c(0,n_labs), breaks = seq(0,2000,400), expand = c(0,0)) scale_y_nlabs <- purrr::partial(scale_y_cont_nominor, limits = c(0,n_labs), breaks = seq(0,2000,400), expand = c(0,0)) #------------------------------------------------------------------------------ # Import data #------------------------------------------------------------------------------ # Team data data_teams <- suppressMessages(read_csv(path_teams)) #------------------------------------------------------------------------------ # Part 1: Intra-competition accuracy correlations #------------------------------------------------------------------------------ # Types of accuracy metric tab_types <- tibble(accuracy_type_base = data_teams %>% select(starts_with("accuracy")) %>% colnames) %>% mutate(accuracy_type_nice = paste(sub("accuracy_top", "Top-", accuracy_type_base), "\nAccuracy (%)"), accuracy_type_nice_asc = fct_inorder(accuracy_type_nice), accuracy_type_nice_desc = fct_rev(accuracy_type_nice_asc)) %>% select(-accuracy_type_nice) # Combinations of metrics to compare tab_acc_base <- expand_grid(ranking = data_teams$prediction_track_ranking, accuracy_type_1 = tab_types$accuracy_type_base, accuracy_type_2 = tab_types$accuracy_type_base) %>% mutate(winning_rank = ifelse(ranking <= n_winners, ranking, "Other")) # Add nice accuracy names tab_acc_nice <- tab_acc_base %>% inner_join(tab_types, by = c("accuracy_type_1" = "accuracy_type_base")) %>% rename(accuracy_nice_1 = accuracy_type_nice_desc) %>% select(-accuracy_type_nice_asc) %>% inner_join(tab_types, by = c("accuracy_type_2" = "accuracy_type_base")) %>% rename(accuracy_nice_2 = accuracy_type_nice_asc) %>% select(-accuracy_type_nice_desc) # Add accuracy values tab_teams_melt <- data_teams %>% select(ranking = prediction_track_ranking, starts_with("accuracy")) %>% gather(accuracy_type, accuracy, -ranking) tab_acc <- tab_acc_nice %>% inner_join(tab_teams_melt, by = c("ranking" = "ranking", "accuracy_type_1" = "accuracy_type")) %>% rename(accuracy_1 = accuracy) %>% inner_join(tab_teams_melt, by = c("ranking" = "ranking", "accuracy_type_2" = "accuracy_type")) %>% rename(accuracy_2 = accuracy) # Split into winners/nonwinners tab_acc_win <- tab_acc %>% filter(winning_rank != "Other") tab_acc_nonwin <- tab_acc %>% filter(winning_rank == "Other") # Make plots aes_col <- aes(colour=winning_rank) stat_spear_no_p <- purrr::partial(stat_spear, digits = 3, mapping = aes(label = ..r.label..)) stat_aspear <- purrr::partial(stat_spear_no_p, label.x = 0.05, label.y = 0.95, vjust = 1, hjust = 0, data = tab_acc) g_acc_cor <- ggplot(aes(x=accuracy_1, y=accuracy_2), data = NULL) + geom_abline(size = 0.3, colour = palette_primary["grey"]) + geom_point_nonwinners(data=tab_acc_nonwin, alpha=0.7, mapping=aes_col) + geom_point_winners(data=tab_acc_win, alpha=1, mapping=aes_col) + stat_aspear() + facet_grid(accuracy_nice_1 ~ accuracy_nice_2, switch = "both") + scale_x_acc() + scale_y_acc() + scale_colour_winners() + theme_cor #------------------------------------------------------------------------------ # Part 2: Inter-X-metric correlations #------------------------------------------------------------------------------ # Combinations of metrics to compare x_levels <- c("X99", "X95", "X90", "X80") x_types <- fct_inorder(x_levels) x_types_rev <- fct_inorder(rev(x_levels)) tab_x_base <- expand_grid(ranking = data_teams$prediction_track_ranking, x_type_1 = x_types, x_type_2 = x_types_rev) %>% mutate(winning_rank = ifelse(ranking <= n_winners, ranking, "Other")) # Add values tab_teams_melt_x <- data_teams %>% select(ranking = prediction_track_ranking, starts_with("X")) %>% gather(x_type, N, -ranking) tab_x <- tab_x_base %>% inner_join(tab_teams_melt_x, by = c("ranking" = "ranking", "x_type_1" = "x_type")) %>% rename(N1 = N) %>% inner_join(tab_teams_melt_x, by = c("ranking" = "ranking", "x_type_2" = "x_type")) %>% rename(N2 = N) %>% mutate(x_type_1 = factor(x_type_1, levels = x_levels), x_type_2 = factor(x_type_2, levels = rev(x_levels))) # Split into winners/nonwinners tab_x_win <- tab_x %>% filter(winning_rank != "Other") tab_x_nonwin <- tab_x %>% filter(winning_rank == "Other") # Make plots stat_xspear <- purrr::partial(stat_spear_no_p, label.x = n_labs*0.05, label.y = n_labs*0.95, vjust = 1, hjust = 0, data = tab_x) g_x_cor <- ggplot(aes(x=N1, y=N2), data = NULL) + geom_abline(size = 0.3, colour = palette_primary["grey"]) + geom_point_nonwinners(data=tab_x_nonwin, alpha=0.7, mapping=aes_col) + geom_point_winners(data=tab_x_win, alpha=1, mapping=aes_col) + stat_xspear() + facet_grid(x_type_1 ~ x_type_2, switch = "both") + scale_x_nlabs() + scale_y_nlabs() + scale_colour_winners() + theme_cor #------------------------------------------------------------------------------ # Part 3: X-vs-accuracy correlations (all teams) #------------------------------------------------------------------------------ # Combinations to compare tab_mix_base <- expand_grid(ranking = data_teams$prediction_track_ranking, accuracy_type = tab_types$accuracy_type_base, x_type = x_types_rev) %>% mutate(winning_rank = ifelse(ranking <= n_winners, ranking, "Other")) # Add nice accuracy names tab_mix_nice <- tab_mix_base %>% inner_join(tab_types, by = c("accuracy_type" = "accuracy_type_base")) %>% rename(accuracy_nice = accuracy_type_nice_asc) %>% select(-accuracy_type_nice_desc) # Add values tab_mix <- tab_mix_nice %>% inner_join(tab_teams_melt, by = c("ranking", "accuracy_type")) %>% inner_join(tab_teams_melt_x, by = c("ranking", "x_type")) %>% mutate(x_type = factor(x_type, levels = x_levels)) # Split into winners/nonwinners tab_mix_win <- tab_mix %>% filter(winning_rank != "Other") tab_mix_nonwin <- tab_mix %>% filter(winning_rank == "Other") # Make plots stat_mspear <- purrr::partial(stat_spear_no_p, label.x = 0.05, label.y = n_labs*0.95, vjust = 1, hjust = 0, data = tab_mix) g_mix_cor_all <- ggplot(aes(x=accuracy, y=N), data = NULL) + geom_point_nonwinners(data=tab_mix_nonwin, alpha=0.7, mapping=aes_col) + geom_point_winners(data=tab_mix_win, alpha=1, mapping=aes_col) + stat_mspear() + facet_grid(x_type ~ accuracy_nice, switch = "both") + scale_x_acc() + scale_y_nlabs() + scale_colour_winners() + theme_cor #------------------------------------------------------------------------------ # Part 4: X-vs-accuracy correlations (filtered) #------------------------------------------------------------------------------ # Filter data to remove outliers tab_mixf <- tab_mix %>% filter(N < n_labs) tab_mixf_win <- tab_mixf %>% filter(winning_rank != "Other") tab_mixf_nonwin <- tab_mixf %>% filter(winning_rank == "Other") # Make plots stat_fspear <- purrr::partial(stat_spear_no_p, label.x = 0.05, label.y = n_labs*0.95, vjust = 1, hjust = 0, data = tab_mixf) g_mix_cor_filtered <- ggplot(aes(x=accuracy, y=N), data = NULL) + geom_point_nonwinners(data=tab_mixf_nonwin, alpha=0.7, mapping=aes_col) + geom_point_winners(data=tab_mixf_win, alpha=1, mapping=aes_col) + stat_fspear() + facet_grid(x_type ~ accuracy_nice, switch = "both") + scale_x_acc() + scale_y_nlabs() + scale_colour_winners() + theme_cor #------------------------------------------------------------------------------ # Save output #------------------------------------------------------------------------------ save_fig(out_path_acc, g_acc_cor, out_width, out_asp) save_fig(out_path_x, g_x_cor, out_width, out_asp) save_fig(out_path_mix_all, g_mix_cor_all, out_width, out_asp) save_fig(out_path_mix_filtered, g_mix_cor_filtered, out_width, out_asp)
/scripts/si_correlations.R
permissive
willbradshaw/geac
R
false
false
9,890
r
#============================================================================== # SI FIGURE: CORRELATIONS BETWEEN ACCURACY AND X METRICS #============================================================================== #------------------------------------------------------------------------------ # Preamble #------------------------------------------------------------------------------ # Libraries & formatting source("scripts/aux_format-plots.R") # Input path path_teams <- "data/team-stats.csv" # Output paths out_path_acc <- "figures/si_accuracy-correlations.png" out_path_x <- "figures/si_xmetric-correlations.png" out_path_mix_all <- "figures/si_mix-correlations-all.png" out_path_mix_filtered <- "figures/si_mix-correlations-filtered.png" # Output parameters out_width <- page_width_in out_asp <- 1.05 # Themes & scales theme_cor <- theme_base + theme( axis.title = element_blank(), strip.text = element_text(margin = lmargin(rep(0.2, 4))), aspect.ratio = 1, plot.margin = lmargin(0.3, 0.4, 0.4, 0.4), ) scale_x_acc <- purrr::partial(scale_x_continuous, breaks = seq(0,1,0.2), minor_breaks = NULL, labels = function(x) round(x*100)) scale_y_acc <- purrr::partial(scale_y_continuous, breaks = seq(0,1,0.2), minor_breaks = NULL, labels = function(x) round(x*100)) scale_x_nlabs <- purrr::partial(scale_x_cont_nominor, limits = c(0,n_labs), breaks = seq(0,2000,400), expand = c(0,0)) scale_y_nlabs <- purrr::partial(scale_y_cont_nominor, limits = c(0,n_labs), breaks = seq(0,2000,400), expand = c(0,0)) #------------------------------------------------------------------------------ # Import data #------------------------------------------------------------------------------ # Team data data_teams <- suppressMessages(read_csv(path_teams)) #------------------------------------------------------------------------------ # Part 1: Intra-competition accuracy correlations #------------------------------------------------------------------------------ # Types of accuracy metric tab_types <- tibble(accuracy_type_base = data_teams %>% select(starts_with("accuracy")) %>% colnames) %>% mutate(accuracy_type_nice = paste(sub("accuracy_top", "Top-", accuracy_type_base), "\nAccuracy (%)"), accuracy_type_nice_asc = fct_inorder(accuracy_type_nice), accuracy_type_nice_desc = fct_rev(accuracy_type_nice_asc)) %>% select(-accuracy_type_nice) # Combinations of metrics to compare tab_acc_base <- expand_grid(ranking = data_teams$prediction_track_ranking, accuracy_type_1 = tab_types$accuracy_type_base, accuracy_type_2 = tab_types$accuracy_type_base) %>% mutate(winning_rank = ifelse(ranking <= n_winners, ranking, "Other")) # Add nice accuracy names tab_acc_nice <- tab_acc_base %>% inner_join(tab_types, by = c("accuracy_type_1" = "accuracy_type_base")) %>% rename(accuracy_nice_1 = accuracy_type_nice_desc) %>% select(-accuracy_type_nice_asc) %>% inner_join(tab_types, by = c("accuracy_type_2" = "accuracy_type_base")) %>% rename(accuracy_nice_2 = accuracy_type_nice_asc) %>% select(-accuracy_type_nice_desc) # Add accuracy values tab_teams_melt <- data_teams %>% select(ranking = prediction_track_ranking, starts_with("accuracy")) %>% gather(accuracy_type, accuracy, -ranking) tab_acc <- tab_acc_nice %>% inner_join(tab_teams_melt, by = c("ranking" = "ranking", "accuracy_type_1" = "accuracy_type")) %>% rename(accuracy_1 = accuracy) %>% inner_join(tab_teams_melt, by = c("ranking" = "ranking", "accuracy_type_2" = "accuracy_type")) %>% rename(accuracy_2 = accuracy) # Split into winners/nonwinners tab_acc_win <- tab_acc %>% filter(winning_rank != "Other") tab_acc_nonwin <- tab_acc %>% filter(winning_rank == "Other") # Make plots aes_col <- aes(colour=winning_rank) stat_spear_no_p <- purrr::partial(stat_spear, digits = 3, mapping = aes(label = ..r.label..)) stat_aspear <- purrr::partial(stat_spear_no_p, label.x = 0.05, label.y = 0.95, vjust = 1, hjust = 0, data = tab_acc) g_acc_cor <- ggplot(aes(x=accuracy_1, y=accuracy_2), data = NULL) + geom_abline(size = 0.3, colour = palette_primary["grey"]) + geom_point_nonwinners(data=tab_acc_nonwin, alpha=0.7, mapping=aes_col) + geom_point_winners(data=tab_acc_win, alpha=1, mapping=aes_col) + stat_aspear() + facet_grid(accuracy_nice_1 ~ accuracy_nice_2, switch = "both") + scale_x_acc() + scale_y_acc() + scale_colour_winners() + theme_cor #------------------------------------------------------------------------------ # Part 2: Inter-X-metric correlations #------------------------------------------------------------------------------ # Combinations of metrics to compare x_levels <- c("X99", "X95", "X90", "X80") x_types <- fct_inorder(x_levels) x_types_rev <- fct_inorder(rev(x_levels)) tab_x_base <- expand_grid(ranking = data_teams$prediction_track_ranking, x_type_1 = x_types, x_type_2 = x_types_rev) %>% mutate(winning_rank = ifelse(ranking <= n_winners, ranking, "Other")) # Add values tab_teams_melt_x <- data_teams %>% select(ranking = prediction_track_ranking, starts_with("X")) %>% gather(x_type, N, -ranking) tab_x <- tab_x_base %>% inner_join(tab_teams_melt_x, by = c("ranking" = "ranking", "x_type_1" = "x_type")) %>% rename(N1 = N) %>% inner_join(tab_teams_melt_x, by = c("ranking" = "ranking", "x_type_2" = "x_type")) %>% rename(N2 = N) %>% mutate(x_type_1 = factor(x_type_1, levels = x_levels), x_type_2 = factor(x_type_2, levels = rev(x_levels))) # Split into winners/nonwinners tab_x_win <- tab_x %>% filter(winning_rank != "Other") tab_x_nonwin <- tab_x %>% filter(winning_rank == "Other") # Make plots stat_xspear <- purrr::partial(stat_spear_no_p, label.x = n_labs*0.05, label.y = n_labs*0.95, vjust = 1, hjust = 0, data = tab_x) g_x_cor <- ggplot(aes(x=N1, y=N2), data = NULL) + geom_abline(size = 0.3, colour = palette_primary["grey"]) + geom_point_nonwinners(data=tab_x_nonwin, alpha=0.7, mapping=aes_col) + geom_point_winners(data=tab_x_win, alpha=1, mapping=aes_col) + stat_xspear() + facet_grid(x_type_1 ~ x_type_2, switch = "both") + scale_x_nlabs() + scale_y_nlabs() + scale_colour_winners() + theme_cor #------------------------------------------------------------------------------ # Part 3: X-vs-accuracy correlations (all teams) #------------------------------------------------------------------------------ # Combinations to compare tab_mix_base <- expand_grid(ranking = data_teams$prediction_track_ranking, accuracy_type = tab_types$accuracy_type_base, x_type = x_types_rev) %>% mutate(winning_rank = ifelse(ranking <= n_winners, ranking, "Other")) # Add nice accuracy names tab_mix_nice <- tab_mix_base %>% inner_join(tab_types, by = c("accuracy_type" = "accuracy_type_base")) %>% rename(accuracy_nice = accuracy_type_nice_asc) %>% select(-accuracy_type_nice_desc) # Add values tab_mix <- tab_mix_nice %>% inner_join(tab_teams_melt, by = c("ranking", "accuracy_type")) %>% inner_join(tab_teams_melt_x, by = c("ranking", "x_type")) %>% mutate(x_type = factor(x_type, levels = x_levels)) # Split into winners/nonwinners tab_mix_win <- tab_mix %>% filter(winning_rank != "Other") tab_mix_nonwin <- tab_mix %>% filter(winning_rank == "Other") # Make plots stat_mspear <- purrr::partial(stat_spear_no_p, label.x = 0.05, label.y = n_labs*0.95, vjust = 1, hjust = 0, data = tab_mix) g_mix_cor_all <- ggplot(aes(x=accuracy, y=N), data = NULL) + geom_point_nonwinners(data=tab_mix_nonwin, alpha=0.7, mapping=aes_col) + geom_point_winners(data=tab_mix_win, alpha=1, mapping=aes_col) + stat_mspear() + facet_grid(x_type ~ accuracy_nice, switch = "both") + scale_x_acc() + scale_y_nlabs() + scale_colour_winners() + theme_cor #------------------------------------------------------------------------------ # Part 4: X-vs-accuracy correlations (filtered) #------------------------------------------------------------------------------ # Filter data to remove outliers tab_mixf <- tab_mix %>% filter(N < n_labs) tab_mixf_win <- tab_mixf %>% filter(winning_rank != "Other") tab_mixf_nonwin <- tab_mixf %>% filter(winning_rank == "Other") # Make plots stat_fspear <- purrr::partial(stat_spear_no_p, label.x = 0.05, label.y = n_labs*0.95, vjust = 1, hjust = 0, data = tab_mixf) g_mix_cor_filtered <- ggplot(aes(x=accuracy, y=N), data = NULL) + geom_point_nonwinners(data=tab_mixf_nonwin, alpha=0.7, mapping=aes_col) + geom_point_winners(data=tab_mixf_win, alpha=1, mapping=aes_col) + stat_fspear() + facet_grid(x_type ~ accuracy_nice, switch = "both") + scale_x_acc() + scale_y_nlabs() + scale_colour_winners() + theme_cor #------------------------------------------------------------------------------ # Save output #------------------------------------------------------------------------------ save_fig(out_path_acc, g_acc_cor, out_width, out_asp) save_fig(out_path_x, g_x_cor, out_width, out_asp) save_fig(out_path_mix_all, g_mix_cor_all, out_width, out_asp) save_fig(out_path_mix_filtered, g_mix_cor_filtered, out_width, out_asp)
# test biases: ntests <- 100 res <- data.frame(run = 1:ntests, idbias = runif(ntests), rankbias = runif(ntests), cv_id = NA, cor_w_rank = NA) for (i in seq_len(nrow(res))) { x <- generate_interaction_probs(n_ind = sample(5:20, 1), id_bias = res$idbias[i], rank_bias = res$rankbias[i]) # generate 10000 'interactions' s <- sample(seq_len(nrow(x)), 10000, TRUE, prob = x[, "final"]) xtab <- as.numeric(table(as.numeric(x[s, 1:2]))) res$cv_id[i] <- sd(xtab) / mean(xtab) xtab <- table((x[s, 2] - x[s, 1])) rds <- as.numeric(names(xtab)) # coef(lm(scale(xtab) ~ scale(rds)))[2] # res$cor_w_rank[i] <- cor(xtab, as.numeric(names(xtab))) rankdiff <- x[, 2] - x[, 1] interactprob <- x[, "final"] res$cor_w_rank[i] <- cor(rankdiff, interactprob) } plot(res$idbias, res$cv_id) cor(res$idbias, res$cv_id) plot(res$rankbias, res$cor_w_rank) cor(res$rankbias, res$cor_w_rank) test_that("multiplication works", { expect_true(cor(res$idbias, res$cv_id) > 0.3) expect_true(cor(res$rankbias, res$cor_w_rank) < -0.3) })
/tests/testthat/test-generate_interaction_probs.R
no_license
gobbios/EloRating.extension
R
false
false
1,042
r
# test biases: ntests <- 100 res <- data.frame(run = 1:ntests, idbias = runif(ntests), rankbias = runif(ntests), cv_id = NA, cor_w_rank = NA) for (i in seq_len(nrow(res))) { x <- generate_interaction_probs(n_ind = sample(5:20, 1), id_bias = res$idbias[i], rank_bias = res$rankbias[i]) # generate 10000 'interactions' s <- sample(seq_len(nrow(x)), 10000, TRUE, prob = x[, "final"]) xtab <- as.numeric(table(as.numeric(x[s, 1:2]))) res$cv_id[i] <- sd(xtab) / mean(xtab) xtab <- table((x[s, 2] - x[s, 1])) rds <- as.numeric(names(xtab)) # coef(lm(scale(xtab) ~ scale(rds)))[2] # res$cor_w_rank[i] <- cor(xtab, as.numeric(names(xtab))) rankdiff <- x[, 2] - x[, 1] interactprob <- x[, "final"] res$cor_w_rank[i] <- cor(rankdiff, interactprob) } plot(res$idbias, res$cv_id) cor(res$idbias, res$cv_id) plot(res$rankbias, res$cor_w_rank) cor(res$rankbias, res$cor_w_rank) test_that("multiplication works", { expect_true(cor(res$idbias, res$cv_id) > 0.3) expect_true(cor(res$rankbias, res$cor_w_rank) < -0.3) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tau.R \name{tau_p} \alias{tau_p} \title{Pimentel's tau_b} \usage{ tau_p(x, y, estimator = "values", p11 = 0, p01 = 0, p10 = 0) } \arguments{ \item{x, y}{vectors to be correlated. Must be numeric and have the same length.} \item{estimator}{string indicating how the parameters $p_{11}$, $p_{01}$, $p_{10}$ are to be estimated. The default is 'values', which indicates that they are estimated based on the entries of x and y. If estimates=='own', the $p_ji$'s must be given as arguments.} \item{p11}{probability that a bivariate observation is of the type (m,n), where m,n>0. Default is 0.} \item{p01}{probability that a bivariate observation is of the type (0,n), where n>0.Default is 0.} \item{p10}{probability that a bivariate observation is of the type (n,0), where n>0.Default is 0.} } \value{ correlation values } \description{ Computes the estimator for Kendall's tau_b for zero inflated continuous data proposed by Pimentel(2009). } \keyword{internal}
/man/tau_p.Rd
no_license
cran/mazeinda
R
false
true
1,040
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tau.R \name{tau_p} \alias{tau_p} \title{Pimentel's tau_b} \usage{ tau_p(x, y, estimator = "values", p11 = 0, p01 = 0, p10 = 0) } \arguments{ \item{x, y}{vectors to be correlated. Must be numeric and have the same length.} \item{estimator}{string indicating how the parameters $p_{11}$, $p_{01}$, $p_{10}$ are to be estimated. The default is 'values', which indicates that they are estimated based on the entries of x and y. If estimates=='own', the $p_ji$'s must be given as arguments.} \item{p11}{probability that a bivariate observation is of the type (m,n), where m,n>0. Default is 0.} \item{p01}{probability that a bivariate observation is of the type (0,n), where n>0.Default is 0.} \item{p10}{probability that a bivariate observation is of the type (n,0), where n>0.Default is 0.} } \value{ correlation values } \description{ Computes the estimator for Kendall's tau_b for zero inflated continuous data proposed by Pimentel(2009). } \keyword{internal}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_default_mhcnuggets_folder.R \name{get_default_mhcnuggets_folder} \alias{get_default_mhcnuggets_folder} \title{Get the path to the folder where this package installs MHCnuggets by default} \usage{ get_default_mhcnuggets_folder() } \value{ the path to the folder where this package installs MHCnuggets by default } \description{ Get the path to the folder where this package installs MHCnuggets by default } \examples{ get_default_mhcnuggets_folder() } \author{ Richèl J.C. Bilderbeek }
/man/get_default_mhcnuggets_folder.Rd
no_license
cran/mhcnuggetsr
R
false
true
568
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_default_mhcnuggets_folder.R \name{get_default_mhcnuggets_folder} \alias{get_default_mhcnuggets_folder} \title{Get the path to the folder where this package installs MHCnuggets by default} \usage{ get_default_mhcnuggets_folder() } \value{ the path to the folder where this package installs MHCnuggets by default } \description{ Get the path to the folder where this package installs MHCnuggets by default } \examples{ get_default_mhcnuggets_folder() } \author{ Richèl J.C. Bilderbeek }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/wrenchSource.R \name{.getReference} \alias{.getReference} \title{This function generates the reference.} \usage{ .getReference(mat, ref.est = "sw.means", ...) } \arguments{ \item{mat}{count matrix; rows are features and columns are samples} \item{ref.est}{reference estimate method} \item{...}{other parameters} } \value{ the reference to be used for normalization } \description{ This function generates the reference. }
/man/dot-getReference.Rd
no_license
HCBravoLab/Wrench
R
false
true
502
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/wrenchSource.R \name{.getReference} \alias{.getReference} \title{This function generates the reference.} \usage{ .getReference(mat, ref.est = "sw.means", ...) } \arguments{ \item{mat}{count matrix; rows are features and columns are samples} \item{ref.est}{reference estimate method} \item{...}{other parameters} } \value{ the reference to be used for normalization } \description{ This function generates the reference. }
\name{bamsignals-package} \alias{bamsignals-package} \alias{bamsignals} \docType{package} \title{ What the package does (short line) } \description{ More about what it does (maybe more than one line) ~~ A concise (1-5 lines) description of the package ~~ } \details{ \tabular{ll}{ Package: \tab bamsignals\cr Type: \tab Package\cr Version: \tab 1.0\cr Date: \tab 2013-11-20\cr License: \tab What Licence is it under ?\cr } ~~ An overview of how to use the package, including the most important functions ~~ } \author{ Who wrote it Maintainer: Who to complain to <yourfault@somewhere.net> } \references{ ~~ Literature or other references for background information ~~ } \keyword{ package } \seealso{ ~~ Optional links to other man pages, e.g. ~~ ~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~ } \examples{ %% ~~ simple examples of the most important functions ~~ }
/bamsignals/man/bamsignals-package.Rd
no_license
al2na/cmbr
R
false
false
865
rd
\name{bamsignals-package} \alias{bamsignals-package} \alias{bamsignals} \docType{package} \title{ What the package does (short line) } \description{ More about what it does (maybe more than one line) ~~ A concise (1-5 lines) description of the package ~~ } \details{ \tabular{ll}{ Package: \tab bamsignals\cr Type: \tab Package\cr Version: \tab 1.0\cr Date: \tab 2013-11-20\cr License: \tab What Licence is it under ?\cr } ~~ An overview of how to use the package, including the most important functions ~~ } \author{ Who wrote it Maintainer: Who to complain to <yourfault@somewhere.net> } \references{ ~~ Literature or other references for background information ~~ } \keyword{ package } \seealso{ ~~ Optional links to other man pages, e.g. ~~ ~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~ } \examples{ %% ~~ simple examples of the most important functions ~~ }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SED_SAMPLES.R \name{SED_SAMPLES} \alias{SED_SAMPLES} \title{Extract instantaneous sediment sample information from the HYDAT database} \source{ HYDAT } \usage{ SED_SAMPLES(hydat_path = NULL, STATION_NUMBER = NULL, PROV_TERR_STATE_LOC = NULL, start_date = "ALL", end_date = "ALL") } \arguments{ \item{hydat_path}{Directory to the hydat database. Can be set as "Hydat.sqlite3" which will look for Hydat in the working directory. The hydat path can also be set in the \code{.Renviron} file so that it doesn't have to specified every function call. The path should set as the variable \code{hydat}. Open the \code{.Renviron} file using this command: \code{file.edit("~/.Renviron")}.} \item{STATION_NUMBER}{Water Survey of Canada station number. If this argument is omitted, the value of \code{PROV_TERR_STATE_LOC} is returned.} \item{PROV_TERR_STATE_LOC}{Province, state or territory. If this argument is omitted, the value of \code{STATION_NUMBER} is returned. See \code{unique(allstations$PROV_TERR_STATE_LOC)}} \item{start_date}{Leave blank if all dates are required. Date format needs to be in YYYY-MM-DD. Date is inclusive.} \item{end_date}{Leave blank if all dates are required. Date format needs to be in YYYY-MM-DD. Date is inclusive.} } \value{ A tibble of instantaneous sediment samples data } \description{ Provides wrapper to turn the SED_SAMPLES table in HYDAT into a tidy data frame. \code{STATION_NUMBER} and \code{PROV_TERR_STATE_LOC} can both be supplied. If both are omitted all values from the \code{STATIONS} table are returned. That is a large vector for \code{SED_SAMPLES}. } \examples{ \donttest{ SED_SAMPLES(STATION_NUMBER = c("08MH024","08MH001"), hydat_path = "H:/Hydat.sqlite3", start_date = "1996-01-01", end_date = "2000-01-01") SED_SAMPLES(PROV_TERR_STATE_LOC = "PE", hydat_path = "H:/Hydat.sqlite3") } } \seealso{ Other HYDAT functions: \code{\link{AGENCY_LIST}}, \code{\link{ANNUAL_INSTANT_PEAKS}}, \code{\link{ANNUAL_STATISTICS}}, \code{\link{DATUM_LIST}}, \code{\link{DLY_FLOWS}}, \code{\link{DLY_LEVELS}}, \code{\link{MONTHLY_FLOWS}}, \code{\link{MONTHLY_LEVELS}}, \code{\link{REGIONAL_OFFICE_LIST}}, \code{\link{SED_DLY_LOADS}}, \code{\link{SED_DLY_SUSCON}}, \code{\link{SED_MONTHLY_LOADS}}, \code{\link{SED_MONTHLY_SUSCON}}, \code{\link{SED_SAMPLES_PSD}}, \code{\link{STATIONS}}, \code{\link{STN_DATA_COLLECTION}}, \code{\link{STN_DATA_RANGE}}, \code{\link{STN_OPERATION_SCHEDULE}}, \code{\link{STN_REGULATION}}, \code{\link{VERSION}} }
/man/SED_SAMPLES.Rd
permissive
stephhazlitt/tidyhydat
R
false
true
2,598
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/SED_SAMPLES.R \name{SED_SAMPLES} \alias{SED_SAMPLES} \title{Extract instantaneous sediment sample information from the HYDAT database} \source{ HYDAT } \usage{ SED_SAMPLES(hydat_path = NULL, STATION_NUMBER = NULL, PROV_TERR_STATE_LOC = NULL, start_date = "ALL", end_date = "ALL") } \arguments{ \item{hydat_path}{Directory to the hydat database. Can be set as "Hydat.sqlite3" which will look for Hydat in the working directory. The hydat path can also be set in the \code{.Renviron} file so that it doesn't have to specified every function call. The path should set as the variable \code{hydat}. Open the \code{.Renviron} file using this command: \code{file.edit("~/.Renviron")}.} \item{STATION_NUMBER}{Water Survey of Canada station number. If this argument is omitted, the value of \code{PROV_TERR_STATE_LOC} is returned.} \item{PROV_TERR_STATE_LOC}{Province, state or territory. If this argument is omitted, the value of \code{STATION_NUMBER} is returned. See \code{unique(allstations$PROV_TERR_STATE_LOC)}} \item{start_date}{Leave blank if all dates are required. Date format needs to be in YYYY-MM-DD. Date is inclusive.} \item{end_date}{Leave blank if all dates are required. Date format needs to be in YYYY-MM-DD. Date is inclusive.} } \value{ A tibble of instantaneous sediment samples data } \description{ Provides wrapper to turn the SED_SAMPLES table in HYDAT into a tidy data frame. \code{STATION_NUMBER} and \code{PROV_TERR_STATE_LOC} can both be supplied. If both are omitted all values from the \code{STATIONS} table are returned. That is a large vector for \code{SED_SAMPLES}. } \examples{ \donttest{ SED_SAMPLES(STATION_NUMBER = c("08MH024","08MH001"), hydat_path = "H:/Hydat.sqlite3", start_date = "1996-01-01", end_date = "2000-01-01") SED_SAMPLES(PROV_TERR_STATE_LOC = "PE", hydat_path = "H:/Hydat.sqlite3") } } \seealso{ Other HYDAT functions: \code{\link{AGENCY_LIST}}, \code{\link{ANNUAL_INSTANT_PEAKS}}, \code{\link{ANNUAL_STATISTICS}}, \code{\link{DATUM_LIST}}, \code{\link{DLY_FLOWS}}, \code{\link{DLY_LEVELS}}, \code{\link{MONTHLY_FLOWS}}, \code{\link{MONTHLY_LEVELS}}, \code{\link{REGIONAL_OFFICE_LIST}}, \code{\link{SED_DLY_LOADS}}, \code{\link{SED_DLY_SUSCON}}, \code{\link{SED_MONTHLY_LOADS}}, \code{\link{SED_MONTHLY_SUSCON}}, \code{\link{SED_SAMPLES_PSD}}, \code{\link{STATIONS}}, \code{\link{STN_DATA_COLLECTION}}, \code{\link{STN_DATA_RANGE}}, \code{\link{STN_OPERATION_SCHEDULE}}, \code{\link{STN_REGULATION}}, \code{\link{VERSION}} }
rankhospital <- function(state, outcome, num = "best") { library(plyr) ## Read outcome data data <- read.csv("outcome-of-care-measures.csv", colClasses = "character") ## Check that state and outcome are valid if(!any(data$State == state)) stop("invalid state") if(!(outcome %in% c("heart attack", "heart failure", "pneumonia"))) stop("invalid outcome") if( num != "best" && num != "worst" && num%%1 != 0 ) stop("invalid num") ## Return hospital name in that state with the given rank ## 30-day death rate # [11] "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack" # [17] "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure" # [23] "Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia" if(outcome == "heart attack") col <- 11 if(outcome == "heart failure") col <- 17 if(outcome == "pneumonia") col <- 23 data <- data[data$state==state & data[,col] != 'Not Available', ] data[,col] <- as.data.frame(sapply(data[,col], as.numeric)) data <- data[!is.na(data[,col]),] data <- data[order(data$Hospital.Name, decreasing = FALSE), ] data <- data[order(data[outc,col], decreasing = FALSE), ] if(num == "best") return(data$Hospital.Name[1]) if(num == "worst") return(data$Hospital.Name[length(data$Hospital.Name)]) data$Hospital.Name[num] }
/02_R_progamming/week_4/prog_assign_3_hospital_quality/rankhospital.R
no_license
tommyilpazzo/datasciencecoursera
R
false
false
1,434
r
rankhospital <- function(state, outcome, num = "best") { library(plyr) ## Read outcome data data <- read.csv("outcome-of-care-measures.csv", colClasses = "character") ## Check that state and outcome are valid if(!any(data$State == state)) stop("invalid state") if(!(outcome %in% c("heart attack", "heart failure", "pneumonia"))) stop("invalid outcome") if( num != "best" && num != "worst" && num%%1 != 0 ) stop("invalid num") ## Return hospital name in that state with the given rank ## 30-day death rate # [11] "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack" # [17] "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure" # [23] "Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia" if(outcome == "heart attack") col <- 11 if(outcome == "heart failure") col <- 17 if(outcome == "pneumonia") col <- 23 data <- data[data$state==state & data[,col] != 'Not Available', ] data[,col] <- as.data.frame(sapply(data[,col], as.numeric)) data <- data[!is.na(data[,col]),] data <- data[order(data$Hospital.Name, decreasing = FALSE), ] data <- data[order(data[outc,col], decreasing = FALSE), ] if(num == "best") return(data$Hospital.Name[1]) if(num == "worst") return(data$Hospital.Name[length(data$Hospital.Name)]) data$Hospital.Name[num] }
test_that("update_csasstyle() works", { wd <- getwd() testing_path <- file.path(tempdir(), "test-update-csasstyle-resdoc") unlink(testing_path, recursive = TRUE, force = TRUE) dir.create(testing_path, showWarnings = FALSE) setwd(testing_path) suppressMessages(csasdown::draft( system.file("rmarkdown", "templates", "resdoc", package = "csasdown"), create_dir = FALSE, edit = FALSE )) # --------------------------------------------------------------------------- expect_error(csasdown:::update_csasstyle(copy = FALSE, line_nums = TRUE), paste0("YAML header. The permanent style file cannot be ", "modified as needed to include line numbering")) # --------------------------------------------------------------------------- expect_error(csasdown:::update_csasstyle(copy = FALSE, line_nums = FALSE, lot_lof = TRUE), paste0("YAML header. The permanent style file cannot be ", "modified as needed to include the lists of tables ", "and figures")) # --------------------------------------------------------------------------- expect_error(csasdown:::update_csasstyle(copy = FALSE, line_nums = FALSE, draft_watermark = TRUE), paste0("YAML header. The permanent style file cannot be ", "modified as needed to include the DRAFT watermark")) # --------------------------------------------------------------------------- # Set lot_lof (toggle show List of tables/List of Figures in doc) rmd <- readLines("index.Rmd") ind <- grep("lot_lof:", rmd) rmd[ind] <- " lot_lof: true" writeLines(rmd, "index.Rmd") csasdown::render() expect_true(file.exists("_book/resdoc-english.pdf")) # --------------------------------------------------------------------------- # Set draft_watermark unlink("_book/resdoc-english.pdf", force = TRUE) unlink("_book/resdoc-english.tex", force = TRUE) rmd <- readLines("index.Rmd") ind <- grep("lot_lof:", rmd) rmd[ind] <- " lot_lof: false" ind <- grep("draft_watermark:", rmd) rmd[ind] <- " draft_watermark: true" writeLines(rmd, "index.Rmd") csasdown::render() expect_true(file.exists("_book/resdoc-english.pdf")) setwd(wd) })
/tests/testthat/test-update-csasstyle.R
permissive
pbs-assess/csasdown
R
false
false
2,395
r
test_that("update_csasstyle() works", { wd <- getwd() testing_path <- file.path(tempdir(), "test-update-csasstyle-resdoc") unlink(testing_path, recursive = TRUE, force = TRUE) dir.create(testing_path, showWarnings = FALSE) setwd(testing_path) suppressMessages(csasdown::draft( system.file("rmarkdown", "templates", "resdoc", package = "csasdown"), create_dir = FALSE, edit = FALSE )) # --------------------------------------------------------------------------- expect_error(csasdown:::update_csasstyle(copy = FALSE, line_nums = TRUE), paste0("YAML header. The permanent style file cannot be ", "modified as needed to include line numbering")) # --------------------------------------------------------------------------- expect_error(csasdown:::update_csasstyle(copy = FALSE, line_nums = FALSE, lot_lof = TRUE), paste0("YAML header. The permanent style file cannot be ", "modified as needed to include the lists of tables ", "and figures")) # --------------------------------------------------------------------------- expect_error(csasdown:::update_csasstyle(copy = FALSE, line_nums = FALSE, draft_watermark = TRUE), paste0("YAML header. The permanent style file cannot be ", "modified as needed to include the DRAFT watermark")) # --------------------------------------------------------------------------- # Set lot_lof (toggle show List of tables/List of Figures in doc) rmd <- readLines("index.Rmd") ind <- grep("lot_lof:", rmd) rmd[ind] <- " lot_lof: true" writeLines(rmd, "index.Rmd") csasdown::render() expect_true(file.exists("_book/resdoc-english.pdf")) # --------------------------------------------------------------------------- # Set draft_watermark unlink("_book/resdoc-english.pdf", force = TRUE) unlink("_book/resdoc-english.tex", force = TRUE) rmd <- readLines("index.Rmd") ind <- grep("lot_lof:", rmd) rmd[ind] <- " lot_lof: false" ind <- grep("draft_watermark:", rmd) rmd[ind] <- " draft_watermark: true" writeLines(rmd, "index.Rmd") csasdown::render() expect_true(file.exists("_book/resdoc-english.pdf")) setwd(wd) })
fastDoCall("complex", list(imaginary = 1:3)) ## if we already have a list (e.g. a data frame) ## we need c() to add further arguments tmp <- expand.grid(letters[1:2], 1:3, c("+", "-")) fastDoCall("paste", c(tmp, sep = "")) ## examples of where objects will be found. A <- 2 f <- function(x) print(x^2) env <- new.env() assign("A", 10, envir = env) assign("f", f, envir = env) f <- function(x) print(x) f(A) # 2 fastDoCall("f", list(A)) # 2 fastDoCall("f", list(A), envir = env) # 4 fastDoCall(f, list(A), envir = env) # 2 fastDoCall("f", list(quote(A)), envir = env) # 100 fastDoCall(f, list(quote(A)), envir = env) # 10 fastDoCall("f", list(as.name("A")), envir = env) # 100 eval(call("f", A)) # 2 eval(call("f", quote(A))) # 2 eval(call("f", A), envir = env) # 4 eval(call("f", quote(A)), envir = env) # 100
/Gmisc/inst/examples/fastDoCall_ex.R
no_license
ingted/R-Examples
R
false
false
958
r
fastDoCall("complex", list(imaginary = 1:3)) ## if we already have a list (e.g. a data frame) ## we need c() to add further arguments tmp <- expand.grid(letters[1:2], 1:3, c("+", "-")) fastDoCall("paste", c(tmp, sep = "")) ## examples of where objects will be found. A <- 2 f <- function(x) print(x^2) env <- new.env() assign("A", 10, envir = env) assign("f", f, envir = env) f <- function(x) print(x) f(A) # 2 fastDoCall("f", list(A)) # 2 fastDoCall("f", list(A), envir = env) # 4 fastDoCall(f, list(A), envir = env) # 2 fastDoCall("f", list(quote(A)), envir = env) # 100 fastDoCall(f, list(quote(A)), envir = env) # 10 fastDoCall("f", list(as.name("A")), envir = env) # 100 eval(call("f", A)) # 2 eval(call("f", quote(A))) # 2 eval(call("f", A), envir = env) # 4 eval(call("f", quote(A)), envir = env) # 100
library(shiny) library(plotly) library(ggplot2) # Define UI for application that draws a histogram shinyUI(fluidPage( titlePanel("Canada Unemployment Rate from 2012 to 2017"), sidebarLayout( sidebarPanel( h1("Move the month slider!"), sliderInput("slider1", "Slide Me!", 0, 12, 0, step =1) ), mainPanel( wellPanel( conditionalPanel(condition = "output.test1 == false", plotlyOutput("plot0", width = "100%") ), conditionalPanel(condition = "output.test1 == true", plotOutput("plot1", width = "100%")) ) ) ) ))
/ui.R
no_license
WeiLiMontreal/Developing_Data_Product-Week4Pro
R
false
false
689
r
library(shiny) library(plotly) library(ggplot2) # Define UI for application that draws a histogram shinyUI(fluidPage( titlePanel("Canada Unemployment Rate from 2012 to 2017"), sidebarLayout( sidebarPanel( h1("Move the month slider!"), sliderInput("slider1", "Slide Me!", 0, 12, 0, step =1) ), mainPanel( wellPanel( conditionalPanel(condition = "output.test1 == false", plotlyOutput("plot0", width = "100%") ), conditionalPanel(condition = "output.test1 == true", plotOutput("plot1", width = "100%")) ) ) ) ))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{add_julia_processes} \alias{add_julia_processes} \title{Add additional Julia worker processes to parallelize workloads} \usage{ add_julia_processes(...) } \arguments{ \item{...}{Refer to the Julia documentation for available parameters} } \description{ Julia Equivalent: \href{https://docs.julialang.org/en/v1/stdlib/Distributed/#Distributed.addprocs}{\code{Distributed.addprocs!}} } \details{ For more information, refer to the \href{https://docs.interpretable.ai/stable/IAIBase/advanced/#IAIBase-Parallelization-1}{documentation on parallelization} } \examples{ \dontrun{iai::add_julia_processes(3)} }
/man/add_julia_processes.Rd
no_license
cran/iai
R
false
true
697
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{add_julia_processes} \alias{add_julia_processes} \title{Add additional Julia worker processes to parallelize workloads} \usage{ add_julia_processes(...) } \arguments{ \item{...}{Refer to the Julia documentation for available parameters} } \description{ Julia Equivalent: \href{https://docs.julialang.org/en/v1/stdlib/Distributed/#Distributed.addprocs}{\code{Distributed.addprocs!}} } \details{ For more information, refer to the \href{https://docs.interpretable.ai/stable/IAIBase/advanced/#IAIBase-Parallelization-1}{documentation on parallelization} } \examples{ \dontrun{iai::add_julia_processes(3)} }
# Test utils context("Test defac conversion of factors") test_that("defac works for all types of factors", { a <- as.factor(LETTERS) b <- ordered(c(1, 3, '09', 7, 5, "B")) expect_is(defac(a), "character") expect_is(defac(b), "character") a2 <- defac(a) b2 <- defac(b) expect_identical(levels(a), a2) expect_true(all(levels(b) %in% b2)) expect_identical(length(a), length(a2)) expect_identical(length(b), length(b2)) }) context("Forcing numerics with makenum") test_that("makenum works for all types of factors", { a <- ordered(c(1, 3, '09', 7, 5)) a2 <- makenum(a) b <- factor(c(1, 3, '09', 7, 5)) b2 <- makenum(b) c <- factor(c(1, 3, '09', 7, 5, "B")) c2 <- makenum(c) expect_is(a2, "numeric") expect_is(b2, "numeric") expect_is(c2, "numeric") expect_identical(length(a), length(a2)) expect_identical(length(b), length(b2)) expect_identical(length(c), length(c2)) expect_identical(a2, b2) expect_identical(c2[6], NA_real_) }) context("Test that cutoff is numerically accurate") test_that("cutoff gets the desired result", { set.seed(1024) a <- rnorm(1000, mean = 0, sd = 1) b <- rlnorm(1000, meanlog = 2, sdlog = 1) expect_equal(cutoff(a, .05), 0) expect_equal(cutoff(a, 0.5), 2) expect_equal(cutoff(b, .8), 427) d <- b d[400:500] <- NA expect_equal(cutoff(d, 0.2), 131) expect_equal(cutoff(d, 0.9, na.rm=FALSE), NA) expect_equal(cutoff(d, 0.2, na.rm=FALSE), NA) expect_equal(cutoff(d, 0.9, na.rm=TRUE), 648) expect_equal(cutoff(d, 0.2, na.rm=TRUE), 131) expect_error(cutoff(d, 39)) expect_error(cutoff(d, -39)) expect_error(cutoff(d, -0.00039)) }) context("Test the threshold function for numeric accuracy") test_that("thresh gets the accurate result", { set.seed(1024) a <- rnorm(1000, mean = 0, sd = 1) b <- rlnorm(1000, meanlog = 2, sdlog = 1) expect_error(thresh(a, 0)) expect_equal(thresh(a, 2), 0.5, tol = 0.03, scale = 1) expect_equal(thresh(b, 427), 0.8, tol = 0.01) d <- b d[400:500] <- NA expect_equal(thresh(d, 131), 0.48, tol = 0.01) expect_equal(thresh(d, 648, na.rm=FALSE), NA) expect_equal(thresh(d, 131, na.rm=FALSE), NA) expect_equal(thresh(d, 600, na.rm=TRUE), 0.92, tol = 0.005) expect_equal(thresh(d, 131, na.rm=TRUE), 0.48, tol = 0.01) expect_error(thresh(d, 0.39)) expect_error(thresh(d, -0.39)) expect_error(thresh(d, -39)) }) context("Test that max_mis works correctly") test_that("max_mis handles missing data correctly", { expect_identical(max(c(7,NA,3,2,0),na.rm=TRUE), max_mis(c(7,NA,3,2,0))) max(c(NA,NA,NA,NA),na.rm=TRUE) expect_identical(max_mis(c(NA,NA,NA,NA)), NA_real_) expect_identical(max_mis(c(NA_real_, NA_real_)), NA_real_) expect_identical(max_mis(vector(mode = 'integer')), NA_integer_) expect_identical(max_mis(c()), NA_real_) expect_error(max_mis(c("A", "B", "C"))) expect_error(max_mis(factor("A", "B", "C"))) expect_error(max_mis(ordered("A", "B", "C"))) }) context("Remove character") test_that("Remove character works for multiple character type", { a <- c(1, 5, 3, 6, "*", 2, 5, "*", "*") b <- remove_char(a, "*") expect_is(b, "character") expect_identical(length(a), length(b)) expect_equal(length(b[is.na(b)]), 3) a <- c(1, 3, 5, "B", "D", ".", ".", ".") b <- remove_char(a, ".") expect_is(b, "character") expect_identical(length(a), length(b)) expect_equal(length(b[is.na(b)]), 3) a <- c(1, 3, 5, "B", "D", "Unk.", "Unk.", "Unk.") b <- remove_char(a, "Unk.") expect_is(b, "character") expect_identical(length(a), length(b)) expect_equal(length(b[is.na(b)]), 3) a <- c(1, 3, 5, "B", "D", "Unk.", "Unk.", "Unk.", NA, NA, NA) b <- remove_char(a, "Unk.") expect_is(b, "character") expect_identical(length(a), length(b)) expect_equal(length(b[is.na(b)]), 6) }) context("Leading zero functions as desired") test_that("Function works for multiple types of inputs", { a <- seq(1, 9) a2 <- leading_zero(a, digits = 2) expect_is(a2, "character") expect_true(all(sapply(a2, nchar)==2)) expect_error(leading_zero(a2, digits = -1)) expect_error(leading_zero(a2, digits = 0)) expect_identical(leading_zero(a, digits = -1), leading_zero(a, digits = 0)) a <- seq(9, 25) a2 <- leading_zero(a, digits = 3) expect_is(a2, "character") expect_true(all(sapply(a2, nchar)==3)) a2 <- leading_zero(a, digits = 1) expect_false(all(sapply(a2, nchar)==1)) expect_error(leading_zero(c("A", "B", "C", digits = 2))) a <- c(-5000, -50, -5, -.01, 0, 0.1, 4, 40, 400, 4000) a2 <- leading_zero(a, digits = 3) expect_identical(a2, c("-5000", "-050", "-005", "0000", "0000", "0000", "0004", "0040", "0400", "4000")) }) context("Test decomma") a <- c("12,124", "21,131", "A,b") b <- c("12124", "21131", "Ab") c <- a[1:2] d <- as.numeric(b[1:2]) test_that("decomma returns the right class", { expect_that(decomma(c), equals(d)) expect_that(decomma(a), gives_warning()) expect_that(decomma(a), is_a("numeric")) expect_that(decomma(b), is_a("numeric")) expect_that(decomma(c), is_a("numeric")) expect_that(decomma(d), is_a("numeric")) }) n <- c(NA, NA, NA, "7,102", "27,125", "23,325,22", "Ab") test_that("decomma handles NAs properly", { expect_that(length(decomma(n)[!is.na(decomma(n))]), equals(3)) expect_that(decomma(n)[6], equals(2332522)) }) context("nth max") test_that("Numeric accuracy", { a <- c(1:20, 20:1) b <- c(LETTERS[c(2, 9, 3, 12, 1)]) z <- c(121253125, 12401892377905, 31221, 12, 45, -2145125, -123, 0) f <- c(10, 10, 10, 10, 9, 9, 10.0001, 10.0001) expect_error(nth_max(a, 100), "index .* outside bounds") expect_error(nth_max(a, -1), "index .* outside bounds") expect_equal(nth_max(a), 20) expect_equal(nth_max(b, 3), "C") expect_equal(nth_max(z), 12401892377905) expect_equal(nth_max(f), 10.0001) }) context("Test isid") test_that("ISID returns correct values", { data(stuatt) total <- nrow(stuatt) expect_false(isid(stuatt, vars = c("sid"))) expect_false(isid(stuatt, vars = c("sid", "school_year"))) expect_output(isid(stuatt, vars = c("sid", "school_year"), verbose = TRUE)) expect_output(isid(stuatt, vars = c("sid", "school_year", "male", "race_ethnicity", "hs_diploma_type"), verbose=TRUE)) expect_true(isid(stuatt, vars = c("sid", "school_year", "male", "race_ethnicity", "hs_diploma_type"))) })
/tests/testthat/test-utils.R
no_license
cran/eeptools
R
false
false
6,688
r
# Test utils context("Test defac conversion of factors") test_that("defac works for all types of factors", { a <- as.factor(LETTERS) b <- ordered(c(1, 3, '09', 7, 5, "B")) expect_is(defac(a), "character") expect_is(defac(b), "character") a2 <- defac(a) b2 <- defac(b) expect_identical(levels(a), a2) expect_true(all(levels(b) %in% b2)) expect_identical(length(a), length(a2)) expect_identical(length(b), length(b2)) }) context("Forcing numerics with makenum") test_that("makenum works for all types of factors", { a <- ordered(c(1, 3, '09', 7, 5)) a2 <- makenum(a) b <- factor(c(1, 3, '09', 7, 5)) b2 <- makenum(b) c <- factor(c(1, 3, '09', 7, 5, "B")) c2 <- makenum(c) expect_is(a2, "numeric") expect_is(b2, "numeric") expect_is(c2, "numeric") expect_identical(length(a), length(a2)) expect_identical(length(b), length(b2)) expect_identical(length(c), length(c2)) expect_identical(a2, b2) expect_identical(c2[6], NA_real_) }) context("Test that cutoff is numerically accurate") test_that("cutoff gets the desired result", { set.seed(1024) a <- rnorm(1000, mean = 0, sd = 1) b <- rlnorm(1000, meanlog = 2, sdlog = 1) expect_equal(cutoff(a, .05), 0) expect_equal(cutoff(a, 0.5), 2) expect_equal(cutoff(b, .8), 427) d <- b d[400:500] <- NA expect_equal(cutoff(d, 0.2), 131) expect_equal(cutoff(d, 0.9, na.rm=FALSE), NA) expect_equal(cutoff(d, 0.2, na.rm=FALSE), NA) expect_equal(cutoff(d, 0.9, na.rm=TRUE), 648) expect_equal(cutoff(d, 0.2, na.rm=TRUE), 131) expect_error(cutoff(d, 39)) expect_error(cutoff(d, -39)) expect_error(cutoff(d, -0.00039)) }) context("Test the threshold function for numeric accuracy") test_that("thresh gets the accurate result", { set.seed(1024) a <- rnorm(1000, mean = 0, sd = 1) b <- rlnorm(1000, meanlog = 2, sdlog = 1) expect_error(thresh(a, 0)) expect_equal(thresh(a, 2), 0.5, tol = 0.03, scale = 1) expect_equal(thresh(b, 427), 0.8, tol = 0.01) d <- b d[400:500] <- NA expect_equal(thresh(d, 131), 0.48, tol = 0.01) expect_equal(thresh(d, 648, na.rm=FALSE), NA) expect_equal(thresh(d, 131, na.rm=FALSE), NA) expect_equal(thresh(d, 600, na.rm=TRUE), 0.92, tol = 0.005) expect_equal(thresh(d, 131, na.rm=TRUE), 0.48, tol = 0.01) expect_error(thresh(d, 0.39)) expect_error(thresh(d, -0.39)) expect_error(thresh(d, -39)) }) context("Test that max_mis works correctly") test_that("max_mis handles missing data correctly", { expect_identical(max(c(7,NA,3,2,0),na.rm=TRUE), max_mis(c(7,NA,3,2,0))) max(c(NA,NA,NA,NA),na.rm=TRUE) expect_identical(max_mis(c(NA,NA,NA,NA)), NA_real_) expect_identical(max_mis(c(NA_real_, NA_real_)), NA_real_) expect_identical(max_mis(vector(mode = 'integer')), NA_integer_) expect_identical(max_mis(c()), NA_real_) expect_error(max_mis(c("A", "B", "C"))) expect_error(max_mis(factor("A", "B", "C"))) expect_error(max_mis(ordered("A", "B", "C"))) }) context("Remove character") test_that("Remove character works for multiple character type", { a <- c(1, 5, 3, 6, "*", 2, 5, "*", "*") b <- remove_char(a, "*") expect_is(b, "character") expect_identical(length(a), length(b)) expect_equal(length(b[is.na(b)]), 3) a <- c(1, 3, 5, "B", "D", ".", ".", ".") b <- remove_char(a, ".") expect_is(b, "character") expect_identical(length(a), length(b)) expect_equal(length(b[is.na(b)]), 3) a <- c(1, 3, 5, "B", "D", "Unk.", "Unk.", "Unk.") b <- remove_char(a, "Unk.") expect_is(b, "character") expect_identical(length(a), length(b)) expect_equal(length(b[is.na(b)]), 3) a <- c(1, 3, 5, "B", "D", "Unk.", "Unk.", "Unk.", NA, NA, NA) b <- remove_char(a, "Unk.") expect_is(b, "character") expect_identical(length(a), length(b)) expect_equal(length(b[is.na(b)]), 6) }) context("Leading zero functions as desired") test_that("Function works for multiple types of inputs", { a <- seq(1, 9) a2 <- leading_zero(a, digits = 2) expect_is(a2, "character") expect_true(all(sapply(a2, nchar)==2)) expect_error(leading_zero(a2, digits = -1)) expect_error(leading_zero(a2, digits = 0)) expect_identical(leading_zero(a, digits = -1), leading_zero(a, digits = 0)) a <- seq(9, 25) a2 <- leading_zero(a, digits = 3) expect_is(a2, "character") expect_true(all(sapply(a2, nchar)==3)) a2 <- leading_zero(a, digits = 1) expect_false(all(sapply(a2, nchar)==1)) expect_error(leading_zero(c("A", "B", "C", digits = 2))) a <- c(-5000, -50, -5, -.01, 0, 0.1, 4, 40, 400, 4000) a2 <- leading_zero(a, digits = 3) expect_identical(a2, c("-5000", "-050", "-005", "0000", "0000", "0000", "0004", "0040", "0400", "4000")) }) context("Test decomma") a <- c("12,124", "21,131", "A,b") b <- c("12124", "21131", "Ab") c <- a[1:2] d <- as.numeric(b[1:2]) test_that("decomma returns the right class", { expect_that(decomma(c), equals(d)) expect_that(decomma(a), gives_warning()) expect_that(decomma(a), is_a("numeric")) expect_that(decomma(b), is_a("numeric")) expect_that(decomma(c), is_a("numeric")) expect_that(decomma(d), is_a("numeric")) }) n <- c(NA, NA, NA, "7,102", "27,125", "23,325,22", "Ab") test_that("decomma handles NAs properly", { expect_that(length(decomma(n)[!is.na(decomma(n))]), equals(3)) expect_that(decomma(n)[6], equals(2332522)) }) context("nth max") test_that("Numeric accuracy", { a <- c(1:20, 20:1) b <- c(LETTERS[c(2, 9, 3, 12, 1)]) z <- c(121253125, 12401892377905, 31221, 12, 45, -2145125, -123, 0) f <- c(10, 10, 10, 10, 9, 9, 10.0001, 10.0001) expect_error(nth_max(a, 100), "index .* outside bounds") expect_error(nth_max(a, -1), "index .* outside bounds") expect_equal(nth_max(a), 20) expect_equal(nth_max(b, 3), "C") expect_equal(nth_max(z), 12401892377905) expect_equal(nth_max(f), 10.0001) }) context("Test isid") test_that("ISID returns correct values", { data(stuatt) total <- nrow(stuatt) expect_false(isid(stuatt, vars = c("sid"))) expect_false(isid(stuatt, vars = c("sid", "school_year"))) expect_output(isid(stuatt, vars = c("sid", "school_year"), verbose = TRUE)) expect_output(isid(stuatt, vars = c("sid", "school_year", "male", "race_ethnicity", "hs_diploma_type"), verbose=TRUE)) expect_true(isid(stuatt, vars = c("sid", "school_year", "male", "race_ethnicity", "hs_diploma_type"))) })
#A simple exercise for setting values for nodesize, sampsize and mtry in randomForest modelling #Used for require(randomForest) library(PresenceAbsence) #For AUC calculation #loops for nodesize, sampsize and mtry #nodesize ########################################### nodesize.results<-matrix(data = NA, nrow = 50, ncol = ncol(taxa.ref)) colnames(nodesize.results)<-colnames(taxa.ref) rownames(nodesize.results)<-1:50 nodesize.results.auc<-nodesize.results for(taxon in colnames(taxa.ref)) { for(nodesize in 1:50) { data.tmp<-data.frame(taxa.ref[,taxon], env.ref) model.RF<-randomForest(as.factor(data.tmp[,1]) ~ .,data=data.tmp[,-1], importance=F, nodesize=nodesize, ntree=1000) nodesize.results[nodesize,taxon]<-model.RF$err.rate[1000,'OOB'] modelresults.tmp<-data.frame(rownames(taxa.ref),taxa.ref[,taxon],model.RF$votes[,'1']) nodesize.results.auc[nodesize,taxon]<-auc(modelresults.tmp, na.rm=T)[1,'AUC'] print(nodesize) } print(taxon) } #sampsize ########################################### sampsize.results<-matrix(data = NA, nrow = 41, ncol = ncol(taxa.ref)) colnames(sampsize.results)<-colnames(taxa.ref) rownames(sampsize.results)<-10:50 sampsize.results.auc<-sampsize.results for(taxon in colnames(taxa.ref)) { for(sampsize in 10:50) { data.tmp<-data.frame(taxa.ref[,taxon], env.ref) model.RF<-randomForest(as.factor(data.tmp[,1]) ~ .,data=data.tmp[,-1], importance=F, sampsize=sampsize, ntree=1000) sampsize.results[sampsize-9,taxon]<-model.RF$err.rate[1000,'OOB'] modelresults.tmp<-data.frame(rownames(taxa.ref),taxa.ref[,taxon],model.RF$votes[,'1']) sampsize.results.auc[sampsize-9,taxon]<-auc(modelresults.tmp, na.rm=T)[1,'AUC'] print(sampsize) } print(taxon) } #mtry ########################################### mtry.results<-matrix(data = NA, nrow = 11, ncol = ncol(taxa.ref)) colnames(mtry.results)<-colnames(taxa.ref) rownames(mtry.results)<-1:11 mtry.results.auc<-mtry.results for(taxon in colnames(taxa.ref)) { for(mtry in 1:11) { data.tmp<-data.frame(taxa.ref[,taxon], env.ref) model.RF<-randomForest(as.factor(data.tmp[,1]) ~ .,data=data.tmp[,-1], importance=F, mtry=mtry, ntree=1000) mtry.results[mtry,taxon]<-model.RF$err.rate[1000,'OOB'] modelresults.tmp<-data.frame(rownames(taxa.ref),taxa.ref[,taxon],model.RF$votes[,'1']) mtry.results.auc[mtry,taxon]<-auc(modelresults.tmp, na.rm=T)[1,'AUC'] print(mtry) } print(taxon) } #default settings for comparison ########################################### default.results<-matrix(data = NA, ncol = 1, nrow = ncol(taxa.ref)) rownames(default.results)<-colnames(taxa.ref) colnames(default.results)<-c('AUC') for(taxon in colnames(taxa.ref)) { data.tmp<-data.frame(taxa.ref[,taxon], env.ref) model.RF<-randomForest(as.factor(data.tmp[,1]) ~ .,data=data.tmp[,-1], importance=F, ntree=1000) default.results[taxon,'AUC']<-auc(modelresults.tmp, na.rm=T)[1,'AUC'] print(taxon) } mean(default.results[,'AUC']) ############# #save results or find the lowest mean (across taxon) write.table(nodesize.results, "nodesize.results.txt", sep="\t", dec=",", col.names=NA) write.table(sampsize.results, "sampsize.results.txt", sep="\t", dec=",", col.names=NA) write.table(mtry.results, "mtry.results.txt", sep="\t", dec=",", col.names=NA) write.table(nodesize.results.auc, "nodesize.results.auc.txt", sep="\t", dec=",", col.names=NA) write.table(sampsize.results.auc, "sampsize.results.auc.txt", sep="\t", dec=",", col.names=NA) write.table(mtry.results.auc, "mtry.results.auc.txt", sep="\t", dec=",", col.names=NA) sort(apply(nodesize.results.auc,1, mean) ) sort(apply(sampsize.results.auc,1, mean) ) sort(apply(mtry.results.auc,1, mean) ) which(apply(nodesize.results.auc,1, mean)==max(apply(nodesize.results.auc,1, mean))) which(apply(sampsize.results.auc,1, mean)==max(apply(sampsize.results.auc,1, mean))) which(apply(mtry.results.auc,1, mean)==max(apply(mtry.results.auc,1, mean)))
/2. random forest setting.R
no_license
simonhallstan/Refmodels-RI-
R
false
false
3,972
r
#A simple exercise for setting values for nodesize, sampsize and mtry in randomForest modelling #Used for require(randomForest) library(PresenceAbsence) #For AUC calculation #loops for nodesize, sampsize and mtry #nodesize ########################################### nodesize.results<-matrix(data = NA, nrow = 50, ncol = ncol(taxa.ref)) colnames(nodesize.results)<-colnames(taxa.ref) rownames(nodesize.results)<-1:50 nodesize.results.auc<-nodesize.results for(taxon in colnames(taxa.ref)) { for(nodesize in 1:50) { data.tmp<-data.frame(taxa.ref[,taxon], env.ref) model.RF<-randomForest(as.factor(data.tmp[,1]) ~ .,data=data.tmp[,-1], importance=F, nodesize=nodesize, ntree=1000) nodesize.results[nodesize,taxon]<-model.RF$err.rate[1000,'OOB'] modelresults.tmp<-data.frame(rownames(taxa.ref),taxa.ref[,taxon],model.RF$votes[,'1']) nodesize.results.auc[nodesize,taxon]<-auc(modelresults.tmp, na.rm=T)[1,'AUC'] print(nodesize) } print(taxon) } #sampsize ########################################### sampsize.results<-matrix(data = NA, nrow = 41, ncol = ncol(taxa.ref)) colnames(sampsize.results)<-colnames(taxa.ref) rownames(sampsize.results)<-10:50 sampsize.results.auc<-sampsize.results for(taxon in colnames(taxa.ref)) { for(sampsize in 10:50) { data.tmp<-data.frame(taxa.ref[,taxon], env.ref) model.RF<-randomForest(as.factor(data.tmp[,1]) ~ .,data=data.tmp[,-1], importance=F, sampsize=sampsize, ntree=1000) sampsize.results[sampsize-9,taxon]<-model.RF$err.rate[1000,'OOB'] modelresults.tmp<-data.frame(rownames(taxa.ref),taxa.ref[,taxon],model.RF$votes[,'1']) sampsize.results.auc[sampsize-9,taxon]<-auc(modelresults.tmp, na.rm=T)[1,'AUC'] print(sampsize) } print(taxon) } #mtry ########################################### mtry.results<-matrix(data = NA, nrow = 11, ncol = ncol(taxa.ref)) colnames(mtry.results)<-colnames(taxa.ref) rownames(mtry.results)<-1:11 mtry.results.auc<-mtry.results for(taxon in colnames(taxa.ref)) { for(mtry in 1:11) { data.tmp<-data.frame(taxa.ref[,taxon], env.ref) model.RF<-randomForest(as.factor(data.tmp[,1]) ~ .,data=data.tmp[,-1], importance=F, mtry=mtry, ntree=1000) mtry.results[mtry,taxon]<-model.RF$err.rate[1000,'OOB'] modelresults.tmp<-data.frame(rownames(taxa.ref),taxa.ref[,taxon],model.RF$votes[,'1']) mtry.results.auc[mtry,taxon]<-auc(modelresults.tmp, na.rm=T)[1,'AUC'] print(mtry) } print(taxon) } #default settings for comparison ########################################### default.results<-matrix(data = NA, ncol = 1, nrow = ncol(taxa.ref)) rownames(default.results)<-colnames(taxa.ref) colnames(default.results)<-c('AUC') for(taxon in colnames(taxa.ref)) { data.tmp<-data.frame(taxa.ref[,taxon], env.ref) model.RF<-randomForest(as.factor(data.tmp[,1]) ~ .,data=data.tmp[,-1], importance=F, ntree=1000) default.results[taxon,'AUC']<-auc(modelresults.tmp, na.rm=T)[1,'AUC'] print(taxon) } mean(default.results[,'AUC']) ############# #save results or find the lowest mean (across taxon) write.table(nodesize.results, "nodesize.results.txt", sep="\t", dec=",", col.names=NA) write.table(sampsize.results, "sampsize.results.txt", sep="\t", dec=",", col.names=NA) write.table(mtry.results, "mtry.results.txt", sep="\t", dec=",", col.names=NA) write.table(nodesize.results.auc, "nodesize.results.auc.txt", sep="\t", dec=",", col.names=NA) write.table(sampsize.results.auc, "sampsize.results.auc.txt", sep="\t", dec=",", col.names=NA) write.table(mtry.results.auc, "mtry.results.auc.txt", sep="\t", dec=",", col.names=NA) sort(apply(nodesize.results.auc,1, mean) ) sort(apply(sampsize.results.auc,1, mean) ) sort(apply(mtry.results.auc,1, mean) ) which(apply(nodesize.results.auc,1, mean)==max(apply(nodesize.results.auc,1, mean))) which(apply(sampsize.results.auc,1, mean)==max(apply(sampsize.results.auc,1, mean))) which(apply(mtry.results.auc,1, mean)==max(apply(mtry.results.auc,1, mean)))
#The following code is to combine and arrange the output from the simulation study #according to type of design (accuracy, location, no effect) for the purpose of #creating plots and investigating the performance. B0I <- c(3,1,0,-1,-3) B0II <- c(3,1,0,-1,-3) B1I <- c(2,1,0.5,0,-0.5,-1,-2) B1II <- c(2,1,0.5,0,-0.5,-1,-2) #Create matrices indicating if a design is a location/any effect design LocArray <- array(NA, dim = c(500, 10, 800)) LocIArray <- array(NA, dim = c(500, 10, 256)) AccArray <- array(NA, dim = c(500, 10, 144)) NoEffArray <- array(NA, dim = c(500, 10, 25)) #Create output matrices for performance measures for all designs LocationI <- matrix(NA, 256, 56) Location <- matrix(NA, 800, 56) Accuracy <- matrix(NA, 144, 56) NoEff <- matrix(NA, 25, 56) colnames(Location) <- c(rep(c("B0I", "B1I", "B0II", "B1II", "ax", "ac", "Bc", "mBc", "BcmX"), 6), "AI", "NEI") colnames(LocationI) <- c(rep(c("B0I", "B1I", "B0II", "B1II", "ax", "ac", "Bc", "mBc", "BcmX"), 6), "AI", "NEI") colnames(Accuracy) <- c(rep(c("B0I", "B1I", "B0II", "B1II", "ax", "ac", "Bc", "mBc", "BcmX"), 6), "AI", "NEI") colnames(NoEff) <- c(rep(c("B0I", "B1I", "B0II", "B1II", "ax", "ac", "Bc", "mBc", "BcmX"), 6), "AI", "NEI") AcmeasLoc <- matrix(NA, 800, 6) AcmeasLocI <- matrix(NA, 256, 6) AcmeasA <- matrix(NA, 144, 6) AcmeasNoEff <- matrix(NA, 25, 6) colnames(AcmeasLoc) <- c(rep("SSDO",6)) colnames(AcmeasLocI) <- c(rep("SSDO",6)) colnames(AcmeasA) <- c(rep("SSDO",6)) colnames(AcmeasNoEff) <- c(rep("SSDO",6)) #Create matrices for the proportions of zero effects for Bc, SAM (BcmX) and AS (mBc) PZeroEffBcLoc <- rep(NA, 800) PZeroEffBcLocI <- rep(NA, 256) PZeroEffBcA <- rep(NA, 144) PZeroEffmBcLoc <- rep(NA, 800) PZeroEffmBcLocI <- rep(NA, 256) PZeroEffmBcA <- rep(NA, 144) PZeroEffBcmXLoc <- rep(NA, 800) PZeroEffBcmXLocI <- rep(NA, 256) PZeroEffBcmXA <- rep(NA, 144) #Set counts count <- 0 countAccuracy <- 0 countNoEff <- 0 countColumn <- 0 countLocation <- 0 countLocationI <- 0 IsAccuracy <- 0 IsNoEff <- 0 for(i in 1:7){ for(j in 1:7){ for(k in 1:5){ for(z in 1:5){ count <- count + 1 Sumres <- read.table(file=paste("Paper 3 simulations/ResultsN=50/result", B0I[k], ",", B1I[i], ",", B0II[z], ",", B1II[j], ".txt", sep=""), sep="\t") #load results for whether HPD SSDO contains 0 load(file = paste("Paper 3 simulations/ResultsN=50/Accuracyindicator", B0I[k], ",", B1I[i], ",", B0II[z], ",", B1II[j], ".Rdata", sep="")) #load results for whether HPDs of either B1I or B1II do not contain 0 load(file = paste("Paper 3 simulations/ResultsN=50/NoEffindicator", B0I[k], ",", B1I[i], ",", B0II[z], ",", B1II[j], ".Rdata", sep="")) #load results for whether HPDs of Bc, AS (mBC) and SAM (BxmX) contain 0 load(file = paste("Paper 3 simulations/ResultsN=50/ZeroEffBc", B0I[k], ",", B1I[i], ",", B0II[z], ",", B1II[j], ".Rdata", sep="")) load(file = paste("Paper 3 simulations/ResultsN=50/ZeroEffmBc", B0I[k], ",", B1I[i], ",", B0II[z], ",", B1II[j], ".Rdata", sep="")) load(file = paste("Paper 3 simulations/ResultsN=50/ZeroEffBcmX", B0I[k], ",", B1I[i], ",", B0II[z], ",", B1II[j], ".Rdata", sep="")) SDisOrigin <- abs(sqrt((B0I[k]+B1I[i]*Sumres[5,1])^2 + (B0II[z]+B1II[j]*Sumres[5,1])^2)) if(B1I[i] == B1II[j] & B1I[i] == 0){ IsNoEff <- 1 }else{ IsNoEff <- 0 if(B0II[z] == 0 | B0I[k] == 0 | B1II[j] == 0 | B1I[i] == 0){ if(B0II[z] == 0 & B0I[k] == 0){ IsAccuracy <- 1 } else if((B0I[k] == B1I[i]) & B0I[k] == 0){ IsAccuracy <- 1 } else if((B0II[z] == B1II[j]) & B0II[z] ==0){ IsAccuracy <- 1 } else if((B1I[i]/B1II[j] - B0I[k]/B0II[z]) == 0){ IsAccuracy <- 1 }else{ IsAccuracy <- 0 } }else{ if((B1I[i]/B1II[j] - B0I[k]/B0II[z]) == 0){ IsAccuracy <- 1 }else{ IsAccuracy <- 0 } } } if(IsAccuracy == 1 & IsNoEff == 0){ countAccuracy <- countAccuracy + 1 AccArray[,1,countAccuracy] <- Accuracyindicator AccArray[,2,countAccuracy] <- NoEffindicator AccArray[,3,countAccuracy] <- ZeroEffBc AccArray[,4,countAccuracy] <- ZeroEffmBc AccArray[,5,countAccuracy] <- ZeroEffBcmX for(ii in 1:500){ if(ZeroEffBc[ii] == ZeroEffmBc[ii]){ AccArray[ii,6,countAccuracy] = 1 }else{ AccArray[ii,6,countAccuracy] = 0 } if(ZeroEffBcmX[ii] == ZeroEffmBc[ii]){ AccArray[ii,7,countAccuracy] = 1 }else{ AccArray[ii,7,countAccuracy] = 0 } if(ZeroEffBc[ii] == ZeroEffBcmX[ii]){ AccArray[ii,8,countAccuracy] = 1 }else{ AccArray[ii,8,countAccuracy] = 0 } if(NoEffindicator[ii] == ZeroEffBc[ii]){ AccArray[ii,9,countAccuracy] = 1 }else{ AccArray[ii,9,countAccuracy] = 0 } if(Accuracyindicator[ii] == ZeroEffBc[ii]){ AccArray[ii,10,countAccuracy] = 0 }else{ AccArray[ii,10,countAccuracy] = 1 } } for(s in 1:6){ for(p in 1:9){ countColumn <- countColumn + 1 Accuracy[countAccuracy, countColumn] <- Sumres[p,s] } AcmeasA[countAccuracy, s] <- Sumres[10,s] } Accuracy[countAccuracy, 55] <- sum(Accuracyindicator)/500 Accuracy[countAccuracy, 56] <- sum(NoEffindicator)/500 PZeroEffBcA[countAccuracy] <- sum(ZeroEffBc)/500 PZeroEffmBcA[countAccuracy] <- sum(ZeroEffmBc)/500 PZeroEffBcmXA[countAccuracy] <- sum(ZeroEffBcmX)/500 countColumn <- 0 } if(IsAccuracy == 0 & IsNoEff == 0){ if(SDisOrigin > 1){ countLocation <- countLocation + 1 LocArray[,1, countLocation] <- Accuracyindicator LocArray[,2, countLocation] <- NoEffindicator LocArray[,3, countLocation] <- ZeroEffBc LocArray[,4, countLocation] <- ZeroEffmBc LocArray[,5, countLocation] <- ZeroEffBcmX for(ii in 1:500){ if(ZeroEffBc[ii] == ZeroEffmBc[ii]){ LocArray[ii,6,countLocation] = 1 }else{ LocArray[ii,6,countLocation] = 0 } if(ZeroEffBcmX[ii] == ZeroEffmBc[ii]){ LocArray[ii,7,countLocation] = 1 }else{ LocArray[ii,7,countLocation] = 0 } if(ZeroEffBc[ii] == ZeroEffBcmX[ii]){ LocArray[ii,8,countLocation] = 1 }else{ LocArray[ii,8,countLocation] = 0 } if(NoEffindicator[ii] == ZeroEffBc[ii]){ LocArray[ii,9,countLocation] = 1 }else{ LocArray[ii,9,countLocation] = 0 } if(Accuracyindicator[ii] == ZeroEffBc[ii]){ LocArray[ii,10,countLocation] = 0 }else{ LocArray[ii,10,countLocation] = 1 } } for(s in 1:6){ for(p in 1:9){ countColumn <- countColumn + 1 Location[countLocation, countColumn] <- Sumres[p,s] } AcmeasLoc[countLocation, s] <- Sumres[10,s] } Location[countLocation, 55] <- sum(Accuracyindicator)/500 Location[countLocation, 56] <- sum(NoEffindicator)/500 PZeroEffBcLoc[countLocation] <- sum(ZeroEffBc)/500 PZeroEffmBcLoc[countLocation] <- sum(ZeroEffmBc)/500 PZeroEffBcmXLoc[countLocation] <- sum(ZeroEffBcmX)/500 countColumn <- 0 }else if(SDisOrigin <= 1){ countLocationI <- countLocationI + 1 LocIArray[,1, countLocationI] <- Accuracyindicator LocIArray[,2, countLocationI] <- NoEffindicator LocIArray[,3, countLocationI] <- ZeroEffBc LocIArray[,4, countLocationI] <- ZeroEffmBc LocIArray[,5, countLocationI] <- ZeroEffBcmX for(ii in 1:500){ if(ZeroEffBc[ii] == ZeroEffmBc[ii]){ LocIArray[ii,6,countLocationI] = 1 }else{ LocIArray[ii,6,countLocationI] = 0 } if(ZeroEffBcmX[ii] == ZeroEffmBc[ii]){ LocIArray[ii,7,countLocationI] = 1 }else{ LocIArray[ii,7,countLocationI] = 0 } if(ZeroEffBc[ii] == ZeroEffBcmX[ii]){ LocIArray[ii,8,countLocationI] = 1 }else{ LocIArray[ii,8,countLocationI] = 0 } if(NoEffindicator[ii] == ZeroEffBc[ii]){ LocIArray[ii,9,countLocationI] = 1 }else{ LocIArray[ii,9,countLocationI] = 0 } if(Accuracyindicator[ii] == ZeroEffBc[ii]){ LocIArray[ii,10,countLocationI] = 0 }else{ LocIArray[ii,10,countLocationI] = 1 } } for(s in 1:6){ for(p in 1:9){ countColumn <- countColumn + 1 LocationI[countLocationI, countColumn] <- Sumres[p,s] } AcmeasLocI[countLocationI, s] <- Sumres[10,s] } LocationI[countLocationI, 55] <- sum(Accuracyindicator)/500 LocationI[countLocationI, 56] <- sum(NoEffindicator)/500 PZeroEffBcLocI[countLocationI] <- sum(ZeroEffBc)/500 PZeroEffmBcLocI[countLocationI] <- sum(ZeroEffmBc)/500 PZeroEffBcmXLocI[countLocationI] <- sum(ZeroEffBcmX)/500 countColumn <- 0 } } if(IsNoEff == 1){ countNoEff <- countNoEff + 1 NoEffArray[,1,countNoEff] <- Accuracyindicator NoEffArray[,2,countNoEff] <- NoEffindicator NoEffArray[,3,countNoEff] <- ZeroEffBc NoEffArray[,4,countNoEff] <- ZeroEffmBc NoEffArray[,5,countNoEff] <- ZeroEffBcmX for(ii in 1:500){ if(ZeroEffBc[ii] == ZeroEffmBc[ii]){ NoEffArray[ii,6,countNoEff] = 1 }else{ NoEffArray[ii,6,countNoEff] = 0 } if(ZeroEffBcmX[ii] == ZeroEffmBc[ii]){ NoEffArray[ii,7,countNoEff] = 1 }else{ NoEffArray[ii,7,countNoEff] = 0 } if(ZeroEffBc[ii] == ZeroEffBcmX[ii]){ NoEffArray[ii,8,countNoEff] = 1 }else{ NoEffArray[ii,8,countNoEff] = 0 } if(NoEffindicator[ii] == ZeroEffBc[ii]){ NoEffArray[ii,9,countNoEff] = 1 }else{ NoEffArray[ii,9,countNoEff] = 0 } if(Accuracyindicator[ii] == ZeroEffBc[ii]){ NoEffArray[ii,10,countNoEff] = 0 }else{ NoEffArray[ii,10,countNoEff] = 1 } } for(s in 1:6){ for(p in 1:9){ countColumn <- countColumn + 1 NoEff[countNoEff, countColumn] <- Sumres[p,s] } AcmeasNoEff[countNoEff, s] <- Sumres[10,s] } NoEff[countNoEff, 55] <- sum(Accuracyindicator)/500 NoEff[countNoEff, 56] <- sum(NoEffindicator)/500 countColumn <- 0 } } } } } library(ggplot2) library(RColorBrewer) library(extrafont) font_install("fontcm") loadfonts() loadfonts(device = "win") par(family = "LM Roman 10") #Data for crosstabulations 5-7 Tabulate <- function(x, y){ X <- as.factor(x) levels(X) <- c(0,1) Y <- as.factor(y) levels(Y) <- c(0,1) return(table(X,Y)) } TabAcc <- (sapply((1:144), function(w){Tabulate(AccArray[,3,w], AccArray[,1,w])})/500)*100 TabLoc <- (sapply((1:800), function(w){Tabulate(LocArray[,3,w], LocArray[,1,w])})/500)*100 TabLocI <- (sapply((1:256), function(w){Tabulate(LocIArray[,3,w], LocIArray[,1,w])})/500)*100 TabBAcc <- (sapply((1:144), function(w){c(sum(AccArray[,2,w]), 500-sum(AccArray[,2,w]))})/500)*100 TabBLoc <- (sapply((1:800), function(w){c(sum(LocArray[,2,w]), 500-sum(LocArray[,2,w]))})/500)*100 TabBLocI <- (sapply((1:256), function(w){c(sum(LocIArray[,2,w]), 500-sum(LocIArray[,2,w]))})/500)*100 rowMeans(TabAcc) rowMeans(TabLoc) rowMeans(TabLocI) rowMeans(TabBAcc) rowMeans(TabBLoc) rowMeans(TabBLocI) library(matrixStats) rowSds(TabAcc) rowSds(TabLoc) rowSds(TabLocI) rowSds(TabBAcc) rowSds(TabBLoc) rowSds(TabBLocI) ################Plots for Relative Bias, Coverage and AIW####################### pdf("Paper 3 simulations/Plots/FigureBiasAcmeasBc.pdf", family = "CM Roman", pointsize = 24) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Bias <- c(abs(Location[,25]) / abs(Location[,7]), abs(LocationI[,25]) / abs(LocationI[,7]), abs(Accuracy[,7] - Accuracy[,25])) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(Bias, PopulationSD, Type) dg <- qplot(PopulationSD, Bias, colour = Type, data = d) + ylim(0, 0.8) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(axis.line.x = element_line(color="black", size = 0.5), axis.line.y = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5))+ theme(legend.position = "none", text = element_text(family="CM Roman", size = 24), axis.title.x = element_text(size = 24), axis.title.y = element_text(size = 24), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Relative Bias\n") dev.off() png("Paper 3 simulations/Plots/FigureBiasAcmeasBc.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Bias <- c(abs(Location[,25]) / abs(Location[,7]), abs(LocationI[,25]) / abs(LocationI[,7]), abs(Accuracy[,7] - Accuracy[,25])) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(Bias, PopulationSD, Type) dg <- qplot(PopulationSD, Bias, colour = Type, data = d) + ylim(0, 0.8) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(axis.line.x = element_line(color="black", size = 0.5), axis.line.y = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5))+ theme(legend.position = "none", text = element_text(family="LM Roman 10", size = 16), axis.title.x = element_text(size = 16), axis.title.y = element_text(size = 16), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Relative Bias\n") dev.off() pdf("Paper 3 simulations/Plots/FigureBiasAcmeasmBc.pdf", family = "CM Roman", pointsize = 24) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Bias <- c(abs(Location[,26]) / abs(Location[,8]), abs(LocationI[,26]) / abs(LocationI[,8]), abs(Accuracy[,8] - Accuracy[,26])) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(Bias, PopulationSD, Type) dg <- qplot(PopulationSD, Bias, colour = Type, data = d) + ylim(0, 0.8) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5))+ theme(legend.position = "none", text = element_text(family="CM Roman", size = 24), axis.title.x = element_text(size = 24), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Relative Bias\n") dev.off() png("Paper 3 simulations/Plots/FigureBiasAcmeasmBc.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Bias <- c(abs(Location[,26]) / abs(Location[,8]), abs(LocationI[,26]) / abs(LocationI[,8]), abs(Accuracy[,8] - Accuracy[,26])) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(Bias, PopulationSD, Type) dg <- qplot(PopulationSD, Bias, colour = Type, data = d) + ylim(0, 0.8) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5))+ theme(legend.position = "none", text = element_text(family="LM Roman 10", size = 16), axis.title.x = element_text(size = 16), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Relative Bias\n") dev.off() pdf("Paper 3 simulations/Plots/FigureBiasAcmeasBcmX.pdf", family = "CM Roman", pointsize = 24) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Bias <- c(abs(Location[,27]) / abs(Location[,9]), abs(LocationI[,27]) / abs(LocationI[,9]), abs(Accuracy[,9] - Accuracy[,27])) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(Bias, PopulationSD, Type) dg <- qplot(PopulationSD, Bias, colour = Type, data = d) + ylim(0, 0.8) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5))+ theme(legend.position = "none", text = element_text(family="CM Roman", size = 24), axis.title.x = element_text(size = 24), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Relative Bias\n") dev.off() png("Paper 3 simulations/Plots/FigureBiasAcmeasBcmX.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Bias <- c(abs(Location[,27]) / abs(Location[,9]), abs(LocationI[,27]) / abs(LocationI[,9]), abs(Accuracy[,9] - Accuracy[,27])) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(Bias, PopulationSD, Type) dg <- qplot(PopulationSD, Bias, colour = Type, data = d) + ylim(0, 0.8) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5))+ theme(legend.position = "none", text = element_text(family="LM Roman 10", size = 16), axis.title.x = element_text(size = 16), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Relative Bias\n") dev.off() pdf("Paper 3 simulations/Plots/FigureCoverageAcmeasBc.pdf", family = "CM Roman", pointsize = 24) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Cov <- c(Location[,52], LocationI[,52], Accuracy[,52]) Type <- as.factor(c(rep("Location",1056), rep("Accuracy",144))) d <- data.frame(Cov, PopulationSD, Type) dg <- qplot(PopulationSD,Cov, colour = Type, data=d) + ylim(0.85,1) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="CM Roman", size = 24), axis.title.x = element_text(size = 24), axis.title.y = element_text(size = 24), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Coverage\n") + theme(axis.line.x = element_line(color="black", size = 0.5), axis.line.y = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() png("Paper 3 simulations/Plots/FigureCoverageAcmeasBc.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Cov <- c(Location[,52], LocationI[,52], Accuracy[,52]) Type <- as.factor(c(rep("Location",1056), rep("Accuracy",144))) d <- data.frame(Cov, PopulationSD, Type) dg <- qplot(PopulationSD,Cov, colour = Type, data=d) + ylim(0.85,1) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="LM Roman 10", size = 16), axis.title.x = element_text(size = 16), axis.title.y = element_text(size = 16), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Coverage\n") + theme(axis.line.x = element_line(color="black", size = 0.5), axis.line.y = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() pdf("Paper 3 simulations/Plots/FigureCoverageAcmeasmBc.pdf", family = "CM Roman", pointsize = 24) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Cov <- c(Location[,53], LocationI[,53], Accuracy[,53]) Type <- as.factor(c(rep("Location",1056), rep("Accuracy",144))) d <- data.frame(Cov, PopulationSD, Type) dg <- qplot(PopulationSD,Cov, colour = Type, data=d) + ylim(0.85,1) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="CM Roman", size = 24), axis.title.x = element_text(size = 24), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Coverage\n") + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() png("Paper 3 simulations/Plots/FigureCoverageAcmeasmBc.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Cov <- c(Location[,53], LocationI[,53], Accuracy[,53]) Type <- as.factor(c(rep("Location",1056), rep("Accuracy",144))) d <- data.frame(Cov, PopulationSD, Type) dg <- qplot(PopulationSD,Cov, colour = Type, data=d) + ylim(0.85,1) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="LM Roman 10", size = 16), axis.title.x = element_text(size = 16), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Coverage\n") + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() pdf("Paper 3 simulations/Plots/FigureCoverageAcmeasBcmX.pdf", family = "CM Roman", pointsize = 24) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Cov <- c(Location[,54], LocationI[,54], Accuracy[,54]) Type <- as.factor(c(rep("Location",1056), rep("Accuracy",144))) d <- data.frame(Cov, PopulationSD, Type) dg <- qplot(PopulationSD,Cov, colour = Type, data=d) + ylim(0.85,1) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="CM Roman", size = 24), axis.title.x = element_text(size = 24), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Coverage\n") + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() png("Paper 3 simulations/Plots/FigureCoverageAcmeasBcmX.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Cov <- c(Location[,54], LocationI[,54], Accuracy[,54]) Type <- as.factor(c(rep("Location",1056), rep("Accuracy",144))) d <- data.frame(Cov, PopulationSD, Type) dg <- qplot(PopulationSD,Cov, colour = Type, data=d) + ylim(0.85,1) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="LM Roman 10", size = 16), axis.title.x = element_text(size = 16), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Coverage\n") + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() pdf("Paper 3 simulations/Plots/FigureAIWAcmeasBc.pdf", family = "CM Roman", pointsize = 24) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) AIW <- log(c(abs(Location[,43] - Location[,34]) / abs(Location[,16]), abs(LocationI[,43] - LocationI[,34]) / abs(LocationI[,16]), abs(Accuracy[,43] - Accuracy[,34]) / abs(Accuracy[,16]))) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(AIW, PopulationSD, Type) dg <- qplot(PopulationSD, AIW, colour = Type, data = d) + ylim(-3,12) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="CM Roman", size = 24), axis.title.x = element_text(size = 24), axis.title.y = element_text(size = 24), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("log(AIW)\n") + theme(axis.line.x = element_line(color="black", size = 0.5), axis.line.y = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() png("Paper 3 simulations/Plots/FigureAIWAcmeasBc.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) AIW <- log(c(abs(Location[,43] - Location[,34]) / abs(Location[,16]), abs(LocationI[,43] - LocationI[,34]) / abs(LocationI[,16]), abs(Accuracy[,43] - Accuracy[,34]) / abs(Accuracy[,16]))) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(AIW, PopulationSD, Type) dg <- qplot(PopulationSD, AIW, colour = Type, data = d) + ylim(-3,12) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="LM Roman 10", size = 16), axis.title.x = element_text(size = 16), axis.title.y = element_text(size = 16), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("log(AIW)\n") + theme(axis.line.x = element_line(color="black", size = 0.5), axis.line.y = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() pdf("Paper 3 simulations/Plots/FigureAIWAcmeasmBc.pdf", family = "CM Roman", pointsize = 24) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) AIW <- log(c(abs(Location[,44] - Location[,35]) / abs(Location[,17]), abs(LocationI[,44] - LocationI[,35]) / abs(LocationI[,17]), abs(Accuracy[,44] - Accuracy[,35]) / abs(Accuracy[,17]))) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(AIW, PopulationSD, Type) dg <- qplot(PopulationSD, AIW, colour = Type, data = d) + ylim(-3,12) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="CM Roman", size = 24), axis.title.x = element_text(size = 24), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("log(AIW)\n") + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() png("Paper 3 simulations/Plots/FigureAIWAcmeasmBc.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) AIW <- log(c(abs(Location[,44] - Location[,35]) / abs(Location[,17]), abs(LocationI[,44] - LocationI[,35]) / abs(LocationI[,17]), abs(Accuracy[,44] - Accuracy[,35]) / abs(Accuracy[,17]))) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(AIW, PopulationSD, Type) dg <- qplot(PopulationSD, AIW, colour = Type, data = d) + ylim(-3,12) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="LM Roman 10", size = 16), axis.title.x = element_text(size = 16), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("log(AIW)\n") + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() pdf("Paper 3 simulations/Plots/FigureAIWAcmeasBcmX.pdf", family = "CM Roman", pointsize = 24) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) AIW <- log(c(abs(Location[,45] - Location[,36]) / abs(Location[,18]), abs(LocationI[,45] - LocationI[,36]) / abs(LocationI[,18]), abs(Accuracy[,45] - Accuracy[,36]) / abs(Accuracy[,18]))) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(AIW, PopulationSD, Type) dg <- qplot(PopulationSD, AIW, colour = Type, data = d) + ylim(-3,12) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="CM Roman", size = 24), axis.title.x = element_text(size = 24), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("log(AIW)\n") + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() png("Paper 3 simulations/Plots/FigureAIWAcmeasBcmX.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) AIW <- log(c(abs(Location[,45] - Location[,36]) / abs(Location[,18]), abs(LocationI[,45] - LocationI[,36]) / abs(LocationI[,18]), abs(Accuracy[,45] - Accuracy[,36]) / abs(Accuracy[,18]))) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(AIW, PopulationSD, Type) dg <- qplot(PopulationSD, AIW, colour = Type, data = d) + ylim(-3,12) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="LM Roman 10", size = 16), axis.title.x = element_text(size = 16), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("log(AIW)\n") + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() ###############Histograms of posterior modes for 3 datasets##################### require(Rcpp) sourceCpp("Paper 3 simulations/SimulationCode.cpp") set.seed(101) B0I <- 3 B0II <- 1 B1I <- 2 B1II <- 0.5 N <- 50 p <- 2 mu <- 0 sd <- 1 tm <- 5000 t.lag <- 1 burn <- 0 nsim <- 500 EXBurn <- 1000 Res <- SimRegRCPP1P(N, p, c(B0I, B1I), c(B0II, B1II), mu, sd, tm, t.lag, burn, nsim, EXBurn) Max <- c() Mac <- c() MBc <- c() MmBc <- c() MBcmX <- c() for(i in 1:nsim){ Max[i] <- hmode(Res$ax[1001:5000,,i], 0.1) Mac[i] <- hmode(Res$ac[1001:5000,,i], 0.1) MBc[i] <- hmode(Res$Bc[1001:5000,,i], 0.1) MmBc[i] <- hmode(Res$mBc[1001:5000,,i], 0.1) MBcmX[i] <- hmode(Res$BcmX[1001:5000,,i], 0.1) } hist(Max) hist(Mac) hist(MBc) hist(MmBc) hist(MBcmX) pdf("Paper 3 simulations/Plots/FigureHist3210,5ac.pdf", family = "CM Roman", pointsize = 26) par(mar = c(4,4,0.1,0) + 0.1) hist(Mac%%(2*pi), main= "", xlab = expression(a[c]), ylim = c(0,150), breaks = 60) dev.off() png("Paper 3 simulations/Plots/FigureHist3210,5ac.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) par(mar = c(4,4,0.1,0) + 0.1) hist(Mac%%(2*pi), main= "", xlab = expression(a[c]), ylim = c(0,150), breaks = 60) dev.off() pdf("Paper 3 simulations/Plots/FigureHist3210,5Bc.pdf", family = "CM Roman", pointsize = 26) par(mar = c(4,4,0.1,0) + 0.1) hist(MBc, main= "", xlab = expression(b[c]), ylab = "", ylim = c(0,150), yaxt='n', breaks = 60) dev.off() png("Paper 3 simulations/Plots/FigureHist3210,5Bc.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) par(mar = c(4,4,0.1,0) + 0.1) hist(MBc, main= "", xlab = expression(b[c]), ylab = "", ylim = c(0,150), yaxt='n', breaks = 60) dev.off() pdf("Paper 3 simulations/Plots/FigureHist3210,5mBc.pdf", family = "CM Roman", pointsize = 26) par(mar = c(4,4,0.1,0) + 0.1) hist(MmBc, main= "", xlab = "AS", ylim = c(0,150), breaks = 60) dev.off() png("Paper 3 simulations/Plots/FigureHist3210,5mBc.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) par(mar = c(4,4,0.1,0) + 0.1) hist(MmBc, main= "", xlab = "AS", ylim = c(0,150), breaks = 60) dev.off() pdf("Paper 3 simulations/Plots/FigureHist3210,5BcmX.pdf", family = "CM Roman", pointsize = 26) par(mar = c(4,4,0.1,0) + 0.1) hist(MBcmX, main= "", xlab = "SAM", ylab = "", ylim = c(0,150), yaxt='n', breaks = 60) dev.off() png("Paper 3 simulations/Plots/FigureHist3210,5BcmX.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) par(mar = c(4,4,0.1,0) + 0.1) hist(MBcmX, main= "", xlab = "SAM", ylab = "", ylim = c(0,150), yaxt='n', breaks = 60) dev.off() library(zoo) ###################Convergence Plot of Exemplary Design######################### pdf("Paper 3 simulations/Plots/FigureConvergence3210,5.pdf", height = 4, width = 8, family = "CM Roman", pointsize = 12) data <- as.matrix(cbind(Res$ac[4500:5000,,300]%%(2*pi),Res$Bc[4500:5000,,300])) plot(as.zoo(data), ylab = expression(a[c], b[c]), main = "", xlab = "Iteration") dev.off() png("Paper 3 simulations/Plots/FigureConvergence3210,5.png", height = 4, width = 8, family = "LM Roman 10", units = "in", pointsize = 12, res = 1200) data <- as.matrix(cbind(Res$ac[4500:5000,,300]%%(2*pi),Res$Bc[4500:5000,,300])) plot(as.zoo(data), ylab = expression(a[c], b[c]), main = "", xlab = "Iteration") dev.off()
/Paper 3 simulations/Plots/Plots Simulation Study N = 50.R
no_license
joliencremers/Circular-Interpretation-BJMSP
R
false
false
39,957
r
#The following code is to combine and arrange the output from the simulation study #according to type of design (accuracy, location, no effect) for the purpose of #creating plots and investigating the performance. B0I <- c(3,1,0,-1,-3) B0II <- c(3,1,0,-1,-3) B1I <- c(2,1,0.5,0,-0.5,-1,-2) B1II <- c(2,1,0.5,0,-0.5,-1,-2) #Create matrices indicating if a design is a location/any effect design LocArray <- array(NA, dim = c(500, 10, 800)) LocIArray <- array(NA, dim = c(500, 10, 256)) AccArray <- array(NA, dim = c(500, 10, 144)) NoEffArray <- array(NA, dim = c(500, 10, 25)) #Create output matrices for performance measures for all designs LocationI <- matrix(NA, 256, 56) Location <- matrix(NA, 800, 56) Accuracy <- matrix(NA, 144, 56) NoEff <- matrix(NA, 25, 56) colnames(Location) <- c(rep(c("B0I", "B1I", "B0II", "B1II", "ax", "ac", "Bc", "mBc", "BcmX"), 6), "AI", "NEI") colnames(LocationI) <- c(rep(c("B0I", "B1I", "B0II", "B1II", "ax", "ac", "Bc", "mBc", "BcmX"), 6), "AI", "NEI") colnames(Accuracy) <- c(rep(c("B0I", "B1I", "B0II", "B1II", "ax", "ac", "Bc", "mBc", "BcmX"), 6), "AI", "NEI") colnames(NoEff) <- c(rep(c("B0I", "B1I", "B0II", "B1II", "ax", "ac", "Bc", "mBc", "BcmX"), 6), "AI", "NEI") AcmeasLoc <- matrix(NA, 800, 6) AcmeasLocI <- matrix(NA, 256, 6) AcmeasA <- matrix(NA, 144, 6) AcmeasNoEff <- matrix(NA, 25, 6) colnames(AcmeasLoc) <- c(rep("SSDO",6)) colnames(AcmeasLocI) <- c(rep("SSDO",6)) colnames(AcmeasA) <- c(rep("SSDO",6)) colnames(AcmeasNoEff) <- c(rep("SSDO",6)) #Create matrices for the proportions of zero effects for Bc, SAM (BcmX) and AS (mBc) PZeroEffBcLoc <- rep(NA, 800) PZeroEffBcLocI <- rep(NA, 256) PZeroEffBcA <- rep(NA, 144) PZeroEffmBcLoc <- rep(NA, 800) PZeroEffmBcLocI <- rep(NA, 256) PZeroEffmBcA <- rep(NA, 144) PZeroEffBcmXLoc <- rep(NA, 800) PZeroEffBcmXLocI <- rep(NA, 256) PZeroEffBcmXA <- rep(NA, 144) #Set counts count <- 0 countAccuracy <- 0 countNoEff <- 0 countColumn <- 0 countLocation <- 0 countLocationI <- 0 IsAccuracy <- 0 IsNoEff <- 0 for(i in 1:7){ for(j in 1:7){ for(k in 1:5){ for(z in 1:5){ count <- count + 1 Sumres <- read.table(file=paste("Paper 3 simulations/ResultsN=50/result", B0I[k], ",", B1I[i], ",", B0II[z], ",", B1II[j], ".txt", sep=""), sep="\t") #load results for whether HPD SSDO contains 0 load(file = paste("Paper 3 simulations/ResultsN=50/Accuracyindicator", B0I[k], ",", B1I[i], ",", B0II[z], ",", B1II[j], ".Rdata", sep="")) #load results for whether HPDs of either B1I or B1II do not contain 0 load(file = paste("Paper 3 simulations/ResultsN=50/NoEffindicator", B0I[k], ",", B1I[i], ",", B0II[z], ",", B1II[j], ".Rdata", sep="")) #load results for whether HPDs of Bc, AS (mBC) and SAM (BxmX) contain 0 load(file = paste("Paper 3 simulations/ResultsN=50/ZeroEffBc", B0I[k], ",", B1I[i], ",", B0II[z], ",", B1II[j], ".Rdata", sep="")) load(file = paste("Paper 3 simulations/ResultsN=50/ZeroEffmBc", B0I[k], ",", B1I[i], ",", B0II[z], ",", B1II[j], ".Rdata", sep="")) load(file = paste("Paper 3 simulations/ResultsN=50/ZeroEffBcmX", B0I[k], ",", B1I[i], ",", B0II[z], ",", B1II[j], ".Rdata", sep="")) SDisOrigin <- abs(sqrt((B0I[k]+B1I[i]*Sumres[5,1])^2 + (B0II[z]+B1II[j]*Sumres[5,1])^2)) if(B1I[i] == B1II[j] & B1I[i] == 0){ IsNoEff <- 1 }else{ IsNoEff <- 0 if(B0II[z] == 0 | B0I[k] == 0 | B1II[j] == 0 | B1I[i] == 0){ if(B0II[z] == 0 & B0I[k] == 0){ IsAccuracy <- 1 } else if((B0I[k] == B1I[i]) & B0I[k] == 0){ IsAccuracy <- 1 } else if((B0II[z] == B1II[j]) & B0II[z] ==0){ IsAccuracy <- 1 } else if((B1I[i]/B1II[j] - B0I[k]/B0II[z]) == 0){ IsAccuracy <- 1 }else{ IsAccuracy <- 0 } }else{ if((B1I[i]/B1II[j] - B0I[k]/B0II[z]) == 0){ IsAccuracy <- 1 }else{ IsAccuracy <- 0 } } } if(IsAccuracy == 1 & IsNoEff == 0){ countAccuracy <- countAccuracy + 1 AccArray[,1,countAccuracy] <- Accuracyindicator AccArray[,2,countAccuracy] <- NoEffindicator AccArray[,3,countAccuracy] <- ZeroEffBc AccArray[,4,countAccuracy] <- ZeroEffmBc AccArray[,5,countAccuracy] <- ZeroEffBcmX for(ii in 1:500){ if(ZeroEffBc[ii] == ZeroEffmBc[ii]){ AccArray[ii,6,countAccuracy] = 1 }else{ AccArray[ii,6,countAccuracy] = 0 } if(ZeroEffBcmX[ii] == ZeroEffmBc[ii]){ AccArray[ii,7,countAccuracy] = 1 }else{ AccArray[ii,7,countAccuracy] = 0 } if(ZeroEffBc[ii] == ZeroEffBcmX[ii]){ AccArray[ii,8,countAccuracy] = 1 }else{ AccArray[ii,8,countAccuracy] = 0 } if(NoEffindicator[ii] == ZeroEffBc[ii]){ AccArray[ii,9,countAccuracy] = 1 }else{ AccArray[ii,9,countAccuracy] = 0 } if(Accuracyindicator[ii] == ZeroEffBc[ii]){ AccArray[ii,10,countAccuracy] = 0 }else{ AccArray[ii,10,countAccuracy] = 1 } } for(s in 1:6){ for(p in 1:9){ countColumn <- countColumn + 1 Accuracy[countAccuracy, countColumn] <- Sumres[p,s] } AcmeasA[countAccuracy, s] <- Sumres[10,s] } Accuracy[countAccuracy, 55] <- sum(Accuracyindicator)/500 Accuracy[countAccuracy, 56] <- sum(NoEffindicator)/500 PZeroEffBcA[countAccuracy] <- sum(ZeroEffBc)/500 PZeroEffmBcA[countAccuracy] <- sum(ZeroEffmBc)/500 PZeroEffBcmXA[countAccuracy] <- sum(ZeroEffBcmX)/500 countColumn <- 0 } if(IsAccuracy == 0 & IsNoEff == 0){ if(SDisOrigin > 1){ countLocation <- countLocation + 1 LocArray[,1, countLocation] <- Accuracyindicator LocArray[,2, countLocation] <- NoEffindicator LocArray[,3, countLocation] <- ZeroEffBc LocArray[,4, countLocation] <- ZeroEffmBc LocArray[,5, countLocation] <- ZeroEffBcmX for(ii in 1:500){ if(ZeroEffBc[ii] == ZeroEffmBc[ii]){ LocArray[ii,6,countLocation] = 1 }else{ LocArray[ii,6,countLocation] = 0 } if(ZeroEffBcmX[ii] == ZeroEffmBc[ii]){ LocArray[ii,7,countLocation] = 1 }else{ LocArray[ii,7,countLocation] = 0 } if(ZeroEffBc[ii] == ZeroEffBcmX[ii]){ LocArray[ii,8,countLocation] = 1 }else{ LocArray[ii,8,countLocation] = 0 } if(NoEffindicator[ii] == ZeroEffBc[ii]){ LocArray[ii,9,countLocation] = 1 }else{ LocArray[ii,9,countLocation] = 0 } if(Accuracyindicator[ii] == ZeroEffBc[ii]){ LocArray[ii,10,countLocation] = 0 }else{ LocArray[ii,10,countLocation] = 1 } } for(s in 1:6){ for(p in 1:9){ countColumn <- countColumn + 1 Location[countLocation, countColumn] <- Sumres[p,s] } AcmeasLoc[countLocation, s] <- Sumres[10,s] } Location[countLocation, 55] <- sum(Accuracyindicator)/500 Location[countLocation, 56] <- sum(NoEffindicator)/500 PZeroEffBcLoc[countLocation] <- sum(ZeroEffBc)/500 PZeroEffmBcLoc[countLocation] <- sum(ZeroEffmBc)/500 PZeroEffBcmXLoc[countLocation] <- sum(ZeroEffBcmX)/500 countColumn <- 0 }else if(SDisOrigin <= 1){ countLocationI <- countLocationI + 1 LocIArray[,1, countLocationI] <- Accuracyindicator LocIArray[,2, countLocationI] <- NoEffindicator LocIArray[,3, countLocationI] <- ZeroEffBc LocIArray[,4, countLocationI] <- ZeroEffmBc LocIArray[,5, countLocationI] <- ZeroEffBcmX for(ii in 1:500){ if(ZeroEffBc[ii] == ZeroEffmBc[ii]){ LocIArray[ii,6,countLocationI] = 1 }else{ LocIArray[ii,6,countLocationI] = 0 } if(ZeroEffBcmX[ii] == ZeroEffmBc[ii]){ LocIArray[ii,7,countLocationI] = 1 }else{ LocIArray[ii,7,countLocationI] = 0 } if(ZeroEffBc[ii] == ZeroEffBcmX[ii]){ LocIArray[ii,8,countLocationI] = 1 }else{ LocIArray[ii,8,countLocationI] = 0 } if(NoEffindicator[ii] == ZeroEffBc[ii]){ LocIArray[ii,9,countLocationI] = 1 }else{ LocIArray[ii,9,countLocationI] = 0 } if(Accuracyindicator[ii] == ZeroEffBc[ii]){ LocIArray[ii,10,countLocationI] = 0 }else{ LocIArray[ii,10,countLocationI] = 1 } } for(s in 1:6){ for(p in 1:9){ countColumn <- countColumn + 1 LocationI[countLocationI, countColumn] <- Sumres[p,s] } AcmeasLocI[countLocationI, s] <- Sumres[10,s] } LocationI[countLocationI, 55] <- sum(Accuracyindicator)/500 LocationI[countLocationI, 56] <- sum(NoEffindicator)/500 PZeroEffBcLocI[countLocationI] <- sum(ZeroEffBc)/500 PZeroEffmBcLocI[countLocationI] <- sum(ZeroEffmBc)/500 PZeroEffBcmXLocI[countLocationI] <- sum(ZeroEffBcmX)/500 countColumn <- 0 } } if(IsNoEff == 1){ countNoEff <- countNoEff + 1 NoEffArray[,1,countNoEff] <- Accuracyindicator NoEffArray[,2,countNoEff] <- NoEffindicator NoEffArray[,3,countNoEff] <- ZeroEffBc NoEffArray[,4,countNoEff] <- ZeroEffmBc NoEffArray[,5,countNoEff] <- ZeroEffBcmX for(ii in 1:500){ if(ZeroEffBc[ii] == ZeroEffmBc[ii]){ NoEffArray[ii,6,countNoEff] = 1 }else{ NoEffArray[ii,6,countNoEff] = 0 } if(ZeroEffBcmX[ii] == ZeroEffmBc[ii]){ NoEffArray[ii,7,countNoEff] = 1 }else{ NoEffArray[ii,7,countNoEff] = 0 } if(ZeroEffBc[ii] == ZeroEffBcmX[ii]){ NoEffArray[ii,8,countNoEff] = 1 }else{ NoEffArray[ii,8,countNoEff] = 0 } if(NoEffindicator[ii] == ZeroEffBc[ii]){ NoEffArray[ii,9,countNoEff] = 1 }else{ NoEffArray[ii,9,countNoEff] = 0 } if(Accuracyindicator[ii] == ZeroEffBc[ii]){ NoEffArray[ii,10,countNoEff] = 0 }else{ NoEffArray[ii,10,countNoEff] = 1 } } for(s in 1:6){ for(p in 1:9){ countColumn <- countColumn + 1 NoEff[countNoEff, countColumn] <- Sumres[p,s] } AcmeasNoEff[countNoEff, s] <- Sumres[10,s] } NoEff[countNoEff, 55] <- sum(Accuracyindicator)/500 NoEff[countNoEff, 56] <- sum(NoEffindicator)/500 countColumn <- 0 } } } } } library(ggplot2) library(RColorBrewer) library(extrafont) font_install("fontcm") loadfonts() loadfonts(device = "win") par(family = "LM Roman 10") #Data for crosstabulations 5-7 Tabulate <- function(x, y){ X <- as.factor(x) levels(X) <- c(0,1) Y <- as.factor(y) levels(Y) <- c(0,1) return(table(X,Y)) } TabAcc <- (sapply((1:144), function(w){Tabulate(AccArray[,3,w], AccArray[,1,w])})/500)*100 TabLoc <- (sapply((1:800), function(w){Tabulate(LocArray[,3,w], LocArray[,1,w])})/500)*100 TabLocI <- (sapply((1:256), function(w){Tabulate(LocIArray[,3,w], LocIArray[,1,w])})/500)*100 TabBAcc <- (sapply((1:144), function(w){c(sum(AccArray[,2,w]), 500-sum(AccArray[,2,w]))})/500)*100 TabBLoc <- (sapply((1:800), function(w){c(sum(LocArray[,2,w]), 500-sum(LocArray[,2,w]))})/500)*100 TabBLocI <- (sapply((1:256), function(w){c(sum(LocIArray[,2,w]), 500-sum(LocIArray[,2,w]))})/500)*100 rowMeans(TabAcc) rowMeans(TabLoc) rowMeans(TabLocI) rowMeans(TabBAcc) rowMeans(TabBLoc) rowMeans(TabBLocI) library(matrixStats) rowSds(TabAcc) rowSds(TabLoc) rowSds(TabLocI) rowSds(TabBAcc) rowSds(TabBLoc) rowSds(TabBLocI) ################Plots for Relative Bias, Coverage and AIW####################### pdf("Paper 3 simulations/Plots/FigureBiasAcmeasBc.pdf", family = "CM Roman", pointsize = 24) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Bias <- c(abs(Location[,25]) / abs(Location[,7]), abs(LocationI[,25]) / abs(LocationI[,7]), abs(Accuracy[,7] - Accuracy[,25])) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(Bias, PopulationSD, Type) dg <- qplot(PopulationSD, Bias, colour = Type, data = d) + ylim(0, 0.8) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(axis.line.x = element_line(color="black", size = 0.5), axis.line.y = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5))+ theme(legend.position = "none", text = element_text(family="CM Roman", size = 24), axis.title.x = element_text(size = 24), axis.title.y = element_text(size = 24), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Relative Bias\n") dev.off() png("Paper 3 simulations/Plots/FigureBiasAcmeasBc.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Bias <- c(abs(Location[,25]) / abs(Location[,7]), abs(LocationI[,25]) / abs(LocationI[,7]), abs(Accuracy[,7] - Accuracy[,25])) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(Bias, PopulationSD, Type) dg <- qplot(PopulationSD, Bias, colour = Type, data = d) + ylim(0, 0.8) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(axis.line.x = element_line(color="black", size = 0.5), axis.line.y = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5))+ theme(legend.position = "none", text = element_text(family="LM Roman 10", size = 16), axis.title.x = element_text(size = 16), axis.title.y = element_text(size = 16), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Relative Bias\n") dev.off() pdf("Paper 3 simulations/Plots/FigureBiasAcmeasmBc.pdf", family = "CM Roman", pointsize = 24) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Bias <- c(abs(Location[,26]) / abs(Location[,8]), abs(LocationI[,26]) / abs(LocationI[,8]), abs(Accuracy[,8] - Accuracy[,26])) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(Bias, PopulationSD, Type) dg <- qplot(PopulationSD, Bias, colour = Type, data = d) + ylim(0, 0.8) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5))+ theme(legend.position = "none", text = element_text(family="CM Roman", size = 24), axis.title.x = element_text(size = 24), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Relative Bias\n") dev.off() png("Paper 3 simulations/Plots/FigureBiasAcmeasmBc.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Bias <- c(abs(Location[,26]) / abs(Location[,8]), abs(LocationI[,26]) / abs(LocationI[,8]), abs(Accuracy[,8] - Accuracy[,26])) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(Bias, PopulationSD, Type) dg <- qplot(PopulationSD, Bias, colour = Type, data = d) + ylim(0, 0.8) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5))+ theme(legend.position = "none", text = element_text(family="LM Roman 10", size = 16), axis.title.x = element_text(size = 16), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Relative Bias\n") dev.off() pdf("Paper 3 simulations/Plots/FigureBiasAcmeasBcmX.pdf", family = "CM Roman", pointsize = 24) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Bias <- c(abs(Location[,27]) / abs(Location[,9]), abs(LocationI[,27]) / abs(LocationI[,9]), abs(Accuracy[,9] - Accuracy[,27])) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(Bias, PopulationSD, Type) dg <- qplot(PopulationSD, Bias, colour = Type, data = d) + ylim(0, 0.8) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5))+ theme(legend.position = "none", text = element_text(family="CM Roman", size = 24), axis.title.x = element_text(size = 24), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Relative Bias\n") dev.off() png("Paper 3 simulations/Plots/FigureBiasAcmeasBcmX.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Bias <- c(abs(Location[,27]) / abs(Location[,9]), abs(LocationI[,27]) / abs(LocationI[,9]), abs(Accuracy[,9] - Accuracy[,27])) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(Bias, PopulationSD, Type) dg <- qplot(PopulationSD, Bias, colour = Type, data = d) + ylim(0, 0.8) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5))+ theme(legend.position = "none", text = element_text(family="LM Roman 10", size = 16), axis.title.x = element_text(size = 16), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Relative Bias\n") dev.off() pdf("Paper 3 simulations/Plots/FigureCoverageAcmeasBc.pdf", family = "CM Roman", pointsize = 24) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Cov <- c(Location[,52], LocationI[,52], Accuracy[,52]) Type <- as.factor(c(rep("Location",1056), rep("Accuracy",144))) d <- data.frame(Cov, PopulationSD, Type) dg <- qplot(PopulationSD,Cov, colour = Type, data=d) + ylim(0.85,1) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="CM Roman", size = 24), axis.title.x = element_text(size = 24), axis.title.y = element_text(size = 24), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Coverage\n") + theme(axis.line.x = element_line(color="black", size = 0.5), axis.line.y = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() png("Paper 3 simulations/Plots/FigureCoverageAcmeasBc.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Cov <- c(Location[,52], LocationI[,52], Accuracy[,52]) Type <- as.factor(c(rep("Location",1056), rep("Accuracy",144))) d <- data.frame(Cov, PopulationSD, Type) dg <- qplot(PopulationSD,Cov, colour = Type, data=d) + ylim(0.85,1) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="LM Roman 10", size = 16), axis.title.x = element_text(size = 16), axis.title.y = element_text(size = 16), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Coverage\n") + theme(axis.line.x = element_line(color="black", size = 0.5), axis.line.y = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() pdf("Paper 3 simulations/Plots/FigureCoverageAcmeasmBc.pdf", family = "CM Roman", pointsize = 24) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Cov <- c(Location[,53], LocationI[,53], Accuracy[,53]) Type <- as.factor(c(rep("Location",1056), rep("Accuracy",144))) d <- data.frame(Cov, PopulationSD, Type) dg <- qplot(PopulationSD,Cov, colour = Type, data=d) + ylim(0.85,1) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="CM Roman", size = 24), axis.title.x = element_text(size = 24), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Coverage\n") + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() png("Paper 3 simulations/Plots/FigureCoverageAcmeasmBc.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Cov <- c(Location[,53], LocationI[,53], Accuracy[,53]) Type <- as.factor(c(rep("Location",1056), rep("Accuracy",144))) d <- data.frame(Cov, PopulationSD, Type) dg <- qplot(PopulationSD,Cov, colour = Type, data=d) + ylim(0.85,1) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="LM Roman 10", size = 16), axis.title.x = element_text(size = 16), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Coverage\n") + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() pdf("Paper 3 simulations/Plots/FigureCoverageAcmeasBcmX.pdf", family = "CM Roman", pointsize = 24) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Cov <- c(Location[,54], LocationI[,54], Accuracy[,54]) Type <- as.factor(c(rep("Location",1056), rep("Accuracy",144))) d <- data.frame(Cov, PopulationSD, Type) dg <- qplot(PopulationSD,Cov, colour = Type, data=d) + ylim(0.85,1) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="CM Roman", size = 24), axis.title.x = element_text(size = 24), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Coverage\n") + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() png("Paper 3 simulations/Plots/FigureCoverageAcmeasBcmX.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) Cov <- c(Location[,54], LocationI[,54], Accuracy[,54]) Type <- as.factor(c(rep("Location",1056), rep("Accuracy",144))) d <- data.frame(Cov, PopulationSD, Type) dg <- qplot(PopulationSD,Cov, colour = Type, data=d) + ylim(0.85,1) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="LM Roman 10", size = 16), axis.title.x = element_text(size = 16), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("Coverage\n") + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() pdf("Paper 3 simulations/Plots/FigureAIWAcmeasBc.pdf", family = "CM Roman", pointsize = 24) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) AIW <- log(c(abs(Location[,43] - Location[,34]) / abs(Location[,16]), abs(LocationI[,43] - LocationI[,34]) / abs(LocationI[,16]), abs(Accuracy[,43] - Accuracy[,34]) / abs(Accuracy[,16]))) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(AIW, PopulationSD, Type) dg <- qplot(PopulationSD, AIW, colour = Type, data = d) + ylim(-3,12) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="CM Roman", size = 24), axis.title.x = element_text(size = 24), axis.title.y = element_text(size = 24), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("log(AIW)\n") + theme(axis.line.x = element_line(color="black", size = 0.5), axis.line.y = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() png("Paper 3 simulations/Plots/FigureAIWAcmeasBc.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) AIW <- log(c(abs(Location[,43] - Location[,34]) / abs(Location[,16]), abs(LocationI[,43] - LocationI[,34]) / abs(LocationI[,16]), abs(Accuracy[,43] - Accuracy[,34]) / abs(Accuracy[,16]))) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(AIW, PopulationSD, Type) dg <- qplot(PopulationSD, AIW, colour = Type, data = d) + ylim(-3,12) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="LM Roman 10", size = 16), axis.title.x = element_text(size = 16), axis.title.y = element_text(size = 16), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("log(AIW)\n") + theme(axis.line.x = element_line(color="black", size = 0.5), axis.line.y = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() pdf("Paper 3 simulations/Plots/FigureAIWAcmeasmBc.pdf", family = "CM Roman", pointsize = 24) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) AIW <- log(c(abs(Location[,44] - Location[,35]) / abs(Location[,17]), abs(LocationI[,44] - LocationI[,35]) / abs(LocationI[,17]), abs(Accuracy[,44] - Accuracy[,35]) / abs(Accuracy[,17]))) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(AIW, PopulationSD, Type) dg <- qplot(PopulationSD, AIW, colour = Type, data = d) + ylim(-3,12) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="CM Roman", size = 24), axis.title.x = element_text(size = 24), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("log(AIW)\n") + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() png("Paper 3 simulations/Plots/FigureAIWAcmeasmBc.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) AIW <- log(c(abs(Location[,44] - Location[,35]) / abs(Location[,17]), abs(LocationI[,44] - LocationI[,35]) / abs(LocationI[,17]), abs(Accuracy[,44] - Accuracy[,35]) / abs(Accuracy[,17]))) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(AIW, PopulationSD, Type) dg <- qplot(PopulationSD, AIW, colour = Type, data = d) + ylim(-3,12) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="LM Roman 10", size = 16), axis.title.x = element_text(size = 16), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("log(AIW)\n") + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() pdf("Paper 3 simulations/Plots/FigureAIWAcmeasBcmX.pdf", family = "CM Roman", pointsize = 24) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) AIW <- log(c(abs(Location[,45] - Location[,36]) / abs(Location[,18]), abs(LocationI[,45] - LocationI[,36]) / abs(LocationI[,18]), abs(Accuracy[,45] - Accuracy[,36]) / abs(Accuracy[,18]))) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(AIW, PopulationSD, Type) dg <- qplot(PopulationSD, AIW, colour = Type, data = d) + ylim(-3,12) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="CM Roman", size = 24), axis.title.x = element_text(size = 24), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("log(AIW)\n") + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() png("Paper 3 simulations/Plots/FigureAIWAcmeasBcmX.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) PopulationSD <- c(AcmeasLoc[,1], AcmeasLocI[,1], AcmeasA[,1]) AIW <- log(c(abs(Location[,45] - Location[,36]) / abs(Location[,18]), abs(LocationI[,45] - LocationI[,36]) / abs(LocationI[,18]), abs(Accuracy[,45] - Accuracy[,36]) / abs(Accuracy[,18]))) Type <- as.factor(c(rep("Location", 1056), rep("Accuracy", 144))) d <- data.frame(AIW, PopulationSD, Type) dg <- qplot(PopulationSD, AIW, colour = Type, data = d) + ylim(-3,12) + scale_color_manual(breaks = c("Location", "Accuracy"), values=c("black", "grey")) dg + theme_classic() + theme(legend.position = "none", text=element_text(family="LM Roman 10", size = 16), axis.title.x = element_text(size = 16), axis.ticks.y = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank(), plot.margin = unit(c(0,0,0.5,0.5), "cm")) + xlab("\nTrue SSDO") + ylab("log(AIW)\n") + theme(axis.line.x = element_line(color="black", size = 0.5), panel.grid.major.y=element_line(color="grey", size = 0.5)) dev.off() ###############Histograms of posterior modes for 3 datasets##################### require(Rcpp) sourceCpp("Paper 3 simulations/SimulationCode.cpp") set.seed(101) B0I <- 3 B0II <- 1 B1I <- 2 B1II <- 0.5 N <- 50 p <- 2 mu <- 0 sd <- 1 tm <- 5000 t.lag <- 1 burn <- 0 nsim <- 500 EXBurn <- 1000 Res <- SimRegRCPP1P(N, p, c(B0I, B1I), c(B0II, B1II), mu, sd, tm, t.lag, burn, nsim, EXBurn) Max <- c() Mac <- c() MBc <- c() MmBc <- c() MBcmX <- c() for(i in 1:nsim){ Max[i] <- hmode(Res$ax[1001:5000,,i], 0.1) Mac[i] <- hmode(Res$ac[1001:5000,,i], 0.1) MBc[i] <- hmode(Res$Bc[1001:5000,,i], 0.1) MmBc[i] <- hmode(Res$mBc[1001:5000,,i], 0.1) MBcmX[i] <- hmode(Res$BcmX[1001:5000,,i], 0.1) } hist(Max) hist(Mac) hist(MBc) hist(MmBc) hist(MBcmX) pdf("Paper 3 simulations/Plots/FigureHist3210,5ac.pdf", family = "CM Roman", pointsize = 26) par(mar = c(4,4,0.1,0) + 0.1) hist(Mac%%(2*pi), main= "", xlab = expression(a[c]), ylim = c(0,150), breaks = 60) dev.off() png("Paper 3 simulations/Plots/FigureHist3210,5ac.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) par(mar = c(4,4,0.1,0) + 0.1) hist(Mac%%(2*pi), main= "", xlab = expression(a[c]), ylim = c(0,150), breaks = 60) dev.off() pdf("Paper 3 simulations/Plots/FigureHist3210,5Bc.pdf", family = "CM Roman", pointsize = 26) par(mar = c(4,4,0.1,0) + 0.1) hist(MBc, main= "", xlab = expression(b[c]), ylab = "", ylim = c(0,150), yaxt='n', breaks = 60) dev.off() png("Paper 3 simulations/Plots/FigureHist3210,5Bc.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) par(mar = c(4,4,0.1,0) + 0.1) hist(MBc, main= "", xlab = expression(b[c]), ylab = "", ylim = c(0,150), yaxt='n', breaks = 60) dev.off() pdf("Paper 3 simulations/Plots/FigureHist3210,5mBc.pdf", family = "CM Roman", pointsize = 26) par(mar = c(4,4,0.1,0) + 0.1) hist(MmBc, main= "", xlab = "AS", ylim = c(0,150), breaks = 60) dev.off() png("Paper 3 simulations/Plots/FigureHist3210,5mBc.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) par(mar = c(4,4,0.1,0) + 0.1) hist(MmBc, main= "", xlab = "AS", ylim = c(0,150), breaks = 60) dev.off() pdf("Paper 3 simulations/Plots/FigureHist3210,5BcmX.pdf", family = "CM Roman", pointsize = 26) par(mar = c(4,4,0.1,0) + 0.1) hist(MBcmX, main= "", xlab = "SAM", ylab = "", ylim = c(0,150), yaxt='n', breaks = 60) dev.off() png("Paper 3 simulations/Plots/FigureHist3210,5BcmX.png", family = "LM Roman 10", width = 5, height = 5, units = "in", pointsize = 16, res = 1200) par(mar = c(4,4,0.1,0) + 0.1) hist(MBcmX, main= "", xlab = "SAM", ylab = "", ylim = c(0,150), yaxt='n', breaks = 60) dev.off() library(zoo) ###################Convergence Plot of Exemplary Design######################### pdf("Paper 3 simulations/Plots/FigureConvergence3210,5.pdf", height = 4, width = 8, family = "CM Roman", pointsize = 12) data <- as.matrix(cbind(Res$ac[4500:5000,,300]%%(2*pi),Res$Bc[4500:5000,,300])) plot(as.zoo(data), ylab = expression(a[c], b[c]), main = "", xlab = "Iteration") dev.off() png("Paper 3 simulations/Plots/FigureConvergence3210,5.png", height = 4, width = 8, family = "LM Roman 10", units = "in", pointsize = 12, res = 1200) data <- as.matrix(cbind(Res$ac[4500:5000,,300]%%(2*pi),Res$Bc[4500:5000,,300])) plot(as.zoo(data), ylab = expression(a[c], b[c]), main = "", xlab = "Iteration") dev.off()
library(dplyr) ##reading column names into the vector named features features <- read.table("UCI HAR Dataset\\features.txt") features <- features$V2 ##reading test data message("reading test dataset...") test_data <- read.table("UCI HAR Dataset\\test\\X_test.txt",col.names = features, header = FALSE) test_data <- tbl_df(test_data) test_data <- mutate(test_data, MeasureType = "test") #reading activity column for test dataset test_label <- read.table("UCI HAR Dataset\\test\\y_test.txt", header = FALSE) #reading subject column for test dataset test_subject <- read.table("UCI HAR Dataset\\test\\subject_test.txt", header = FALSE) ##adding columns to the test dataset test_data <- mutate(test_data, Activity = test_label[,1]) test_data <- mutate(test_data, SubjectID = test_subject[,1]) message("OK") ##reading train data message("reading train dataset...") train_data <- read.table("UCI HAR Dataset\\train\\X_train.txt",col.names = features, header = FALSE) train_data <- tbl_df(train_data) train_data <- mutate(train_data, MeasureType = "train") #reading activity labels for train dataset train_label <- read.table("UCI HAR Dataset\\train\\y_train.txt", header = FALSE) #reading subject column for test dataset train_subject <- read.table("UCI HAR Dataset\\train\\subject_train.txt", header = FALSE) ##adding columns to the train dataset train_data <- mutate(train_data, Activity = train_label[,1]) train_data <- mutate(train_data, SubjectID = train_subject[,1]) message("OK") ##merging data mean_std <- union(test_data,train_data) rm(test_data) rm(test_label) rm(test_subject) rm(train_data) rm(train_label) rm(train_subject) ##extracting only the measurements on the mean and standard deviation for each measurement. mean_std <- select(mean_std, contains("Activity"), contains("SubjectID"), contains("mean"),contains("std")) ##reading activity labels labels <- read.table("UCI HAR Dataset\\activity_labels.txt",header = FALSE, col.names = c("ActivityID","Activity")) ##adding descriptive activity names to name the activities in the data set mean_std$Activity <- factor(mean_std$Activity, levels = labels$ActivityID, labels = labels$Activity) ##modifying column names with the descriptive variable names names(mean_std) <- gsub("tBody","TimeBody",names(mean_std)) names(mean_std) <- gsub("tAcc","TimeAcc",names(mean_std)) names(mean_std) <- gsub("tGravity","TimeGravity",names(mean_std)) names(mean_std) <- gsub("fBody","FFTBody",names(mean_std)) names(mean_std) <- gsub("fAcc","FFTAcc",names(mean_std)) names(mean_std) <- gsub(".std","Std",names(mean_std)) names(mean_std) <- gsub(".mean","Mean",names(mean_std)) names(mean_std) <- gsub(".gravity","Gravity",names(mean_std)) names(mean_std) <- gsub("angle.","Angle",names(mean_std)) names(mean_std) <- gsub(".","",names(mean_std),fixed = TRUE) ##Grouping and summarising. Calculating the average of each variable for each activity and each subject result_dataset <- group_by(mean_std,Activity,SubjectID) result_dataset <- summarise_each(result_dataset, funs(mean)) ##Saving result_dataset to the file message("Writing dataset to result.txt....") write.table(result_dataset,"result.txt",row.names = FALSE) message("The datasets are ready. The names are mean_std and result_dataset")
/run_analysis.R
no_license
kozh1121/getdata
R
false
false
3,947
r
library(dplyr) ##reading column names into the vector named features features <- read.table("UCI HAR Dataset\\features.txt") features <- features$V2 ##reading test data message("reading test dataset...") test_data <- read.table("UCI HAR Dataset\\test\\X_test.txt",col.names = features, header = FALSE) test_data <- tbl_df(test_data) test_data <- mutate(test_data, MeasureType = "test") #reading activity column for test dataset test_label <- read.table("UCI HAR Dataset\\test\\y_test.txt", header = FALSE) #reading subject column for test dataset test_subject <- read.table("UCI HAR Dataset\\test\\subject_test.txt", header = FALSE) ##adding columns to the test dataset test_data <- mutate(test_data, Activity = test_label[,1]) test_data <- mutate(test_data, SubjectID = test_subject[,1]) message("OK") ##reading train data message("reading train dataset...") train_data <- read.table("UCI HAR Dataset\\train\\X_train.txt",col.names = features, header = FALSE) train_data <- tbl_df(train_data) train_data <- mutate(train_data, MeasureType = "train") #reading activity labels for train dataset train_label <- read.table("UCI HAR Dataset\\train\\y_train.txt", header = FALSE) #reading subject column for test dataset train_subject <- read.table("UCI HAR Dataset\\train\\subject_train.txt", header = FALSE) ##adding columns to the train dataset train_data <- mutate(train_data, Activity = train_label[,1]) train_data <- mutate(train_data, SubjectID = train_subject[,1]) message("OK") ##merging data mean_std <- union(test_data,train_data) rm(test_data) rm(test_label) rm(test_subject) rm(train_data) rm(train_label) rm(train_subject) ##extracting only the measurements on the mean and standard deviation for each measurement. mean_std <- select(mean_std, contains("Activity"), contains("SubjectID"), contains("mean"),contains("std")) ##reading activity labels labels <- read.table("UCI HAR Dataset\\activity_labels.txt",header = FALSE, col.names = c("ActivityID","Activity")) ##adding descriptive activity names to name the activities in the data set mean_std$Activity <- factor(mean_std$Activity, levels = labels$ActivityID, labels = labels$Activity) ##modifying column names with the descriptive variable names names(mean_std) <- gsub("tBody","TimeBody",names(mean_std)) names(mean_std) <- gsub("tAcc","TimeAcc",names(mean_std)) names(mean_std) <- gsub("tGravity","TimeGravity",names(mean_std)) names(mean_std) <- gsub("fBody","FFTBody",names(mean_std)) names(mean_std) <- gsub("fAcc","FFTAcc",names(mean_std)) names(mean_std) <- gsub(".std","Std",names(mean_std)) names(mean_std) <- gsub(".mean","Mean",names(mean_std)) names(mean_std) <- gsub(".gravity","Gravity",names(mean_std)) names(mean_std) <- gsub("angle.","Angle",names(mean_std)) names(mean_std) <- gsub(".","",names(mean_std),fixed = TRUE) ##Grouping and summarising. Calculating the average of each variable for each activity and each subject result_dataset <- group_by(mean_std,Activity,SubjectID) result_dataset <- summarise_each(result_dataset, funs(mean)) ##Saving result_dataset to the file message("Writing dataset to result.txt....") write.table(result_dataset,"result.txt",row.names = FALSE) message("The datasets are ready. The names are mean_std and result_dataset")
####################################################### # MJ Counotte # # A very simple-fast references list # # https://ispmbern.github.io/covid-19/living-review/ # # # ####################################################### content <- navbarPage("SARS-CoV-2 references", id="nav", tabPanel("References", hr(), downloadButton("downloadData", "Download as CSV"), downloadButton("downloadData2", "Download as RIS"), dataTableOutput('zikref'), HTML("loading the data can take a few seconds...") ) ,tabPanel("About", fluidRow( h2("About the ncov LSR project") ), HTML("This Living Systematic Review retrieves and deduplicates scientific publications from different sources: PubMed, EMBASE, MedRxiv and BioRxiv. <br><small>Please keep in mind that some sources are pre-print servers and these publications did not undergo peer-review</small>"), br(), br(), HTML("Highlighting: <mark style=\"background-color:#ffcccb\">PREPRINT</mark> <mark style=\"background-color:#90ee90\">Peer reviewed</mark>"), br(),br(), HTML("<a href=\"https://ispmbern.github.io/covid-19/living-review/\">More info about the project and methods</a>") ))
/living-review/ReferencesShinyApp/ui.R
no_license
ISPMBern/covid-19
R
false
false
1,371
r
####################################################### # MJ Counotte # # A very simple-fast references list # # https://ispmbern.github.io/covid-19/living-review/ # # # ####################################################### content <- navbarPage("SARS-CoV-2 references", id="nav", tabPanel("References", hr(), downloadButton("downloadData", "Download as CSV"), downloadButton("downloadData2", "Download as RIS"), dataTableOutput('zikref'), HTML("loading the data can take a few seconds...") ) ,tabPanel("About", fluidRow( h2("About the ncov LSR project") ), HTML("This Living Systematic Review retrieves and deduplicates scientific publications from different sources: PubMed, EMBASE, MedRxiv and BioRxiv. <br><small>Please keep in mind that some sources are pre-print servers and these publications did not undergo peer-review</small>"), br(), br(), HTML("Highlighting: <mark style=\"background-color:#ffcccb\">PREPRINT</mark> <mark style=\"background-color:#90ee90\">Peer reviewed</mark>"), br(),br(), HTML("<a href=\"https://ispmbern.github.io/covid-19/living-review/\">More info about the project and methods</a>") ))
<!-- This syntax can be used to add comments that are ignored during knitting process. --> ##### Collaborators: <!-- BE SURE TO LIST ALL COLLABORATORS HERE! --> External SOurces : http://soutik.github.io/NYC-Flight-Analysis/ ##### Instructions: Before beginning this assignment, please ensure you have access to R and RStudio; this can be on your own personal computer or on the IMT 573 R Studio Server. 1. Download the `problemset1.Rmd` file from Canvas or save a copy to your local directory on RStudio Server. Open `problemset1.Rmd` in RStudio and supply your solutions to the assignment by editing `problemset1.Rmd`. 2. Replace the "Insert Your Name Here" text in the `author:` field with your own full name. Any collaborators must be listed on the top of your assignment. 3. Be sure to include well-documented (e.g. commented) code chucks, figures, and clearly written text chunk explanations as necessary. Any figures should be clearly labeled and appropriately referenced within the text. Be sure that each visualization adds value to your written explanation; avoid redundancy -- you do no need four different visualizations of the same pattern. 4. Collaboration on problem sets is fun and useful, and we encourage it, but each student must turn in an individual write-up in their own words as well as code/work that is their own. Regardless of whether you work with others, what you turn in must be your own work; this includes code and interpretation of results. The names of all collaborators must be listed on each assignment. Do not copy-and-paste from other students' responses or code. 5. All materials and resources that you use (with the exception of lecture slides) must be appropriately referenced within your assignment. 6. Remember partial credit will be awarded for each question for which a serious attempt at finding an answer has been shown. Students are \emph{strongly} encouraged to attempt each question and to document their reasoning process even if they cannot find the correct answer. If you would like to include R code to show this process, but it does not run withouth errors you can do so with the `eval=FALSE` option as follows: ```{r example chunk with a bug, eval=FALSE} a + b # these object dont' exist # if you run this on its own it with give an error ``` 7. When you have completed the assignment and have **checked** that your code both runs in the Console and knits correctly when you click `Knit PDF`, rename the knitted PDF file to `Yps1_ourLastName_YourFirstName.pdf`, and submit the PDF file on Canvas. ##### Setup: In this problem set you will need, at minimum, the following R packages. ```{r Setup, message=FALSE} # Load standard libraries library(tidyverse) install.packages("nycflights13", repos = "http://cran.us.r-project.org") library(nycflights13) ``` #### Problem 1: Exploring the NYC Flights Data In this problem set we will use the data on all flights that departed NYC (i.e. JFK, LGA or EWR) in 2013. You can find this data in the `nycflights13` R package. ```{r} # Load the nycflights13 library which includes data on all data = library(nycflights13) # flights departing NYC data(flights) # Note the data itself is called flights, we will make it into a local df # for readability flights <- tbl_df(flights) # Look at the help file for information about the data # ?flights flights # summary(flights) ``` ##### (a) Importing and Inspecting Data Load the data and describe in a short paragraph how the data was collected and what each variable represents. Perform a basic inspection of the data and discuss what you find. ```{r} flights #head of the data head(flights,10) #tail of the data tail(flights,10) #check variables in data names(flights) #check headers flights[1,] str(flights) #levels(flights$origin) unique(flights$carrier) ``` There are 19 columns and 336776 rows in the Dataset. Variables: Year, Month, Day: represents the day of the flight in data, dep_time, sched-dep_time, dep_delay: represent all the time data related to flight departures. arr_time, sched_arr_time, arr_delay: represents all time data related to arrivals of the flights. carrier, flight and tailnum: gives data related to flight to identify individual filghts based on number and carrier. origin and dest: these columns are origin location and destination locations for the flight. distance, hour, minute, time_hour: this data is related to actual journy of the flight to calculates speed, understand the time related questions Findings from the data: 1. we can calculate total delay for flight by adding departure delay and arrival delay 2. The data given has 3 different airports from NYC (JFK, LGA, EWR) so we can do analysis based on airport location as well 3. Departure delays has negative values as well and its not considered delay in actual sense 4. We can calculate flight performance by doing (departure delay + arrival delay))/Air time 5.There are 15 unique carriers from NYC airport and one value is missing 6. Flights connects to planes via a single variable, tailnum. 7. Flights connects to airlines through the carrier variable. 8. Flights connects to airports in two ways: via the origin and dest variables. 9. Flights connects to weather via origin (the location), and year, month, day and hour (the time). ##### (b) Formulating Questions Consider the NYC flights data. Formulate two motivating questions you want to explore using this data. Describe why these questions are interesting and how you might go about answering them. 1. we can find out if there are any seasonal patterns in departure delays for flights from NYC Why -> We can explore which seasons more has departure delays, if we know the seasons and the period we can try to understand specific patterns in that season that creates delays in departure. We can also compare this data with data from other years and try to find a pattern. How -> we will group flights by month of the year 2013, calculate average departure delay for every month and maximum departure delay can be found by taking max value. 2. Is there any relationship between Departure Delay and Arrival Delay Why <- I want to understand those flights which gets delayed also arrive late? or the delay is recovered and the flights arrive on the same time? or there is negative or positive relationship between Arrival delay and departure delay. This will help us conclude if your flight from NYC got delayed it will arrive late (or not) How <- we can create a scatterplot of departure delay and arriavl delay for all the flights from NYC, We can also understand if this relationship changes with the change in the airport by grouping flights data by airport. ##### (c) Exploring Data For each of the questions you proposed in Problem 1b, perform an exploratory data analysis designed to address the question. At a minimum, you should produce two visualizations (graphics or tables) related to each question. Be sure to describe what the visuals show and how they speak to your question of interest. ```{r Q1 visualization1} library(dplyr) #group flights by month and take average of departure delay for each month flights_by_month <- flights %>% group_by(month)%>% summarise_at(vars(dep_delay), funs(mean(., na.rm=TRUE))) #flights grouped by month flights_by_month #Visualize this data plot(flights_by_month ,type="l") ``` The above visualization shows departure delayes are maxium in June, July. It shows summer is the season where there are maxium departure delays for flights from NYC ```{r Q1 visualization2} # Exploring the seasonal patterns in total number of delayed by Month library(dplyr) #create date variable using year, month, day flights$date <- with(flights, ISOdate(year = 2013, month, day)) #group flights by date and count total departure delay for the that day flights_by_date <- flights %>% group_by(date) %>% count(dep_delay) #plot the visualization of date vs delay plot(x=flights$date, y=flights$dep_delay, xlab ="Date", ylab = "Delayed Flights Count" ) ``` In the above visualization we see total counts of departure delays plotted against month, which shows departure delays are more in June and July i.e. Summer season also one month of winter i.e. Jan This shows us that there is a PEAK in delays during June, July & August month and the delays generally fall down during the winter months i.e. October, November, December ##### (c) Visualizations for question 2 ```{r Q2 Visulalization 1} #plot departure delay vs arrival delay and show color coding based on carrier ggplot(flights , aes(x=dep_delay, y= arr_delay, color= carrier))+ geom_point() ``` We can see in the above visualization, there is a positive relationship between Departure Delays and Arrival Delays, this means number of arrival delays will increase with the increase in departure delays ```{r Q2 visualization 2} #group flights by origin flights_by_airport <- flights %>% group_by(origin) #plot departure delay vs arrival delay for this data grouped by origin ggplot(flights_by_airport , aes(x=dep_delay, y= arr_delay, color= origin))+ geom_point() ``` By this visualization, I am trying to understand if the relation between departure delay and arrival delay changes with Airport location of the departure, but looks like even after grouping by airport, departure delay and arrival delay has a positive relation. ##### (d) Challenge Your Results After completing the exploratory analyses from Problem 1c, do you have any concerns about your findings? How well defined was your original question? Do you still believe this question can be answered using this dataset? Comment on any ethical and/or privacy concerns you have with your analysis. 1. In some visualization where I created scatterplot with geom_point, 9430 rows got removed because it had missing values, if we had these missing values in our data in first place, result might have been something different and our results would have been more accurate. 2. If we had more data and realistic samples, I feel the relationship between departure delays and arrival delays could be studied in a better way. Currently it is showing positive relation between departure delay and arrival delay, but I doesn't show any cases where there is different kind of relation, I am not sure if the data is biased or not well collected or data is insuffiecient. 3. From question 1 even I figure out that most delays occure in Summer, I cannot really understand the reason behind this from the data. 4. Question 1 visualizations show some incosistancy when we created avg delay and delay counts, January shows more delays when calcualted by counting delays 5. Departure delay has some negative values as well, this might change the answer when calculating the average delay when grouped by months 6. If we had some more data like, when the plane was manufactured, we can research if the flight performance is dependent on the age of the plane 7. If had known number of seats in the plane, we can research if performance has any relation with number of seats 8. Dataset doesn't provide any reasons for delay or fuel consumption, flight diversion. This data would have helped to come to more accurate conclusions.
/problem-set1.R
no_license
nivdungebhagyashri/DS_with_R
R
false
false
11,300
r
<!-- This syntax can be used to add comments that are ignored during knitting process. --> ##### Collaborators: <!-- BE SURE TO LIST ALL COLLABORATORS HERE! --> External SOurces : http://soutik.github.io/NYC-Flight-Analysis/ ##### Instructions: Before beginning this assignment, please ensure you have access to R and RStudio; this can be on your own personal computer or on the IMT 573 R Studio Server. 1. Download the `problemset1.Rmd` file from Canvas or save a copy to your local directory on RStudio Server. Open `problemset1.Rmd` in RStudio and supply your solutions to the assignment by editing `problemset1.Rmd`. 2. Replace the "Insert Your Name Here" text in the `author:` field with your own full name. Any collaborators must be listed on the top of your assignment. 3. Be sure to include well-documented (e.g. commented) code chucks, figures, and clearly written text chunk explanations as necessary. Any figures should be clearly labeled and appropriately referenced within the text. Be sure that each visualization adds value to your written explanation; avoid redundancy -- you do no need four different visualizations of the same pattern. 4. Collaboration on problem sets is fun and useful, and we encourage it, but each student must turn in an individual write-up in their own words as well as code/work that is their own. Regardless of whether you work with others, what you turn in must be your own work; this includes code and interpretation of results. The names of all collaborators must be listed on each assignment. Do not copy-and-paste from other students' responses or code. 5. All materials and resources that you use (with the exception of lecture slides) must be appropriately referenced within your assignment. 6. Remember partial credit will be awarded for each question for which a serious attempt at finding an answer has been shown. Students are \emph{strongly} encouraged to attempt each question and to document their reasoning process even if they cannot find the correct answer. If you would like to include R code to show this process, but it does not run withouth errors you can do so with the `eval=FALSE` option as follows: ```{r example chunk with a bug, eval=FALSE} a + b # these object dont' exist # if you run this on its own it with give an error ``` 7. When you have completed the assignment and have **checked** that your code both runs in the Console and knits correctly when you click `Knit PDF`, rename the knitted PDF file to `Yps1_ourLastName_YourFirstName.pdf`, and submit the PDF file on Canvas. ##### Setup: In this problem set you will need, at minimum, the following R packages. ```{r Setup, message=FALSE} # Load standard libraries library(tidyverse) install.packages("nycflights13", repos = "http://cran.us.r-project.org") library(nycflights13) ``` #### Problem 1: Exploring the NYC Flights Data In this problem set we will use the data on all flights that departed NYC (i.e. JFK, LGA or EWR) in 2013. You can find this data in the `nycflights13` R package. ```{r} # Load the nycflights13 library which includes data on all data = library(nycflights13) # flights departing NYC data(flights) # Note the data itself is called flights, we will make it into a local df # for readability flights <- tbl_df(flights) # Look at the help file for information about the data # ?flights flights # summary(flights) ``` ##### (a) Importing and Inspecting Data Load the data and describe in a short paragraph how the data was collected and what each variable represents. Perform a basic inspection of the data and discuss what you find. ```{r} flights #head of the data head(flights,10) #tail of the data tail(flights,10) #check variables in data names(flights) #check headers flights[1,] str(flights) #levels(flights$origin) unique(flights$carrier) ``` There are 19 columns and 336776 rows in the Dataset. Variables: Year, Month, Day: represents the day of the flight in data, dep_time, sched-dep_time, dep_delay: represent all the time data related to flight departures. arr_time, sched_arr_time, arr_delay: represents all time data related to arrivals of the flights. carrier, flight and tailnum: gives data related to flight to identify individual filghts based on number and carrier. origin and dest: these columns are origin location and destination locations for the flight. distance, hour, minute, time_hour: this data is related to actual journy of the flight to calculates speed, understand the time related questions Findings from the data: 1. we can calculate total delay for flight by adding departure delay and arrival delay 2. The data given has 3 different airports from NYC (JFK, LGA, EWR) so we can do analysis based on airport location as well 3. Departure delays has negative values as well and its not considered delay in actual sense 4. We can calculate flight performance by doing (departure delay + arrival delay))/Air time 5.There are 15 unique carriers from NYC airport and one value is missing 6. Flights connects to planes via a single variable, tailnum. 7. Flights connects to airlines through the carrier variable. 8. Flights connects to airports in two ways: via the origin and dest variables. 9. Flights connects to weather via origin (the location), and year, month, day and hour (the time). ##### (b) Formulating Questions Consider the NYC flights data. Formulate two motivating questions you want to explore using this data. Describe why these questions are interesting and how you might go about answering them. 1. we can find out if there are any seasonal patterns in departure delays for flights from NYC Why -> We can explore which seasons more has departure delays, if we know the seasons and the period we can try to understand specific patterns in that season that creates delays in departure. We can also compare this data with data from other years and try to find a pattern. How -> we will group flights by month of the year 2013, calculate average departure delay for every month and maximum departure delay can be found by taking max value. 2. Is there any relationship between Departure Delay and Arrival Delay Why <- I want to understand those flights which gets delayed also arrive late? or the delay is recovered and the flights arrive on the same time? or there is negative or positive relationship between Arrival delay and departure delay. This will help us conclude if your flight from NYC got delayed it will arrive late (or not) How <- we can create a scatterplot of departure delay and arriavl delay for all the flights from NYC, We can also understand if this relationship changes with the change in the airport by grouping flights data by airport. ##### (c) Exploring Data For each of the questions you proposed in Problem 1b, perform an exploratory data analysis designed to address the question. At a minimum, you should produce two visualizations (graphics or tables) related to each question. Be sure to describe what the visuals show and how they speak to your question of interest. ```{r Q1 visualization1} library(dplyr) #group flights by month and take average of departure delay for each month flights_by_month <- flights %>% group_by(month)%>% summarise_at(vars(dep_delay), funs(mean(., na.rm=TRUE))) #flights grouped by month flights_by_month #Visualize this data plot(flights_by_month ,type="l") ``` The above visualization shows departure delayes are maxium in June, July. It shows summer is the season where there are maxium departure delays for flights from NYC ```{r Q1 visualization2} # Exploring the seasonal patterns in total number of delayed by Month library(dplyr) #create date variable using year, month, day flights$date <- with(flights, ISOdate(year = 2013, month, day)) #group flights by date and count total departure delay for the that day flights_by_date <- flights %>% group_by(date) %>% count(dep_delay) #plot the visualization of date vs delay plot(x=flights$date, y=flights$dep_delay, xlab ="Date", ylab = "Delayed Flights Count" ) ``` In the above visualization we see total counts of departure delays plotted against month, which shows departure delays are more in June and July i.e. Summer season also one month of winter i.e. Jan This shows us that there is a PEAK in delays during June, July & August month and the delays generally fall down during the winter months i.e. October, November, December ##### (c) Visualizations for question 2 ```{r Q2 Visulalization 1} #plot departure delay vs arrival delay and show color coding based on carrier ggplot(flights , aes(x=dep_delay, y= arr_delay, color= carrier))+ geom_point() ``` We can see in the above visualization, there is a positive relationship between Departure Delays and Arrival Delays, this means number of arrival delays will increase with the increase in departure delays ```{r Q2 visualization 2} #group flights by origin flights_by_airport <- flights %>% group_by(origin) #plot departure delay vs arrival delay for this data grouped by origin ggplot(flights_by_airport , aes(x=dep_delay, y= arr_delay, color= origin))+ geom_point() ``` By this visualization, I am trying to understand if the relation between departure delay and arrival delay changes with Airport location of the departure, but looks like even after grouping by airport, departure delay and arrival delay has a positive relation. ##### (d) Challenge Your Results After completing the exploratory analyses from Problem 1c, do you have any concerns about your findings? How well defined was your original question? Do you still believe this question can be answered using this dataset? Comment on any ethical and/or privacy concerns you have with your analysis. 1. In some visualization where I created scatterplot with geom_point, 9430 rows got removed because it had missing values, if we had these missing values in our data in first place, result might have been something different and our results would have been more accurate. 2. If we had more data and realistic samples, I feel the relationship between departure delays and arrival delays could be studied in a better way. Currently it is showing positive relation between departure delay and arrival delay, but I doesn't show any cases where there is different kind of relation, I am not sure if the data is biased or not well collected or data is insuffiecient. 3. From question 1 even I figure out that most delays occure in Summer, I cannot really understand the reason behind this from the data. 4. Question 1 visualizations show some incosistancy when we created avg delay and delay counts, January shows more delays when calcualted by counting delays 5. Departure delay has some negative values as well, this might change the answer when calculating the average delay when grouped by months 6. If we had some more data like, when the plane was manufactured, we can research if the flight performance is dependent on the age of the plane 7. If had known number of seats in the plane, we can research if performance has any relation with number of seats 8. Dataset doesn't provide any reasons for delay or fuel consumption, flight diversion. This data would have helped to come to more accurate conclusions.
# Chapter 4.2 # Medicorp example # The following code shows how to plot a simple linear regression line library(readxl) # Import data from an Excel workbook MEDDICORP <- read_excel("C:\\Users\\Zack\\Documents\\Spring 2021\\MATH 212\\Data Sets\\MEDDICORP4.xlsx", sheet=1) # View Data head(MEDDICORP) # Create a variable called SALES SALES <- MEDDICORP$SALES # Create a variable called ADV ADV <- MEDDICORP$ADV # Create a variable called BONUS BONUS <- MEDDICORP$BONUS # Building a regression line Model <- lm(SALES ~ ADV + BONUS) summary(Model) # Coefficients: # Estimate Std. Error t value Pr(>|t|) #(Intercept) -516.4443 189.8757 -2.720 0.0125 * # ADV 2.4732 0.2753 8.983 8.18e-09 *** # BONUS 1.8562 0.7157 2.593 0.0166 * ################################################################# # Example 4.2 Meddicorp # Question1 # Testing for advertising coeffcinets # calculating pvalues 2*(1-pt(8.98,22)) ############ # Question 2 # Testing for advertising coeffcinets # calculating pvalues 2*(1-pt(2.59,22)) ############ # Question 3 # Testing for advertising coeffcinets # calculating pvalues (1-pt(2.59,22)) ############ # Question 4 # Testing for advertising coeffcinets # calculating pvalues qt(0.975,22) # finding 95% confidence intervals for coefficients confint(Model, level =0.95) ############ # Question 5 # Testing for advertising coeffcinets # calculating pvalues qt(0.975,22) # finding 95% confidence intervals for coefficients confint(Model, level =0.95) ########################################################################## # Question 4 Admission rate # Question 14 Graduation Rate library(readxl) # Import data from an Excel workbook COLLEGE <- read_excel("C:\\Users\\dassanayakes\\Desktop\\Toshiba\\Rhodes MATH 212\\COLLEGE4.xlsx", sheet=1) # View Data head(COLLEGE) # Create a variable called EPS GRADRATE <- COLLEGE$GRADRATE4 # Create a variable called BONUS ADMISRATE <- COLLEGE$ADMISRATE # Create a variable called BONUS SFACRATIO <- COLLEGE$SFACRATIO # Create a variable called BONUS AVGDEBT <- COLLEGE$AVGDEBT # Building a regression line Model <- lm(GRADRATE ~ ADMISRATE + SFACRATIO + AVGDEBT) summary(Model) # Estimate Std. Error t value Pr(>|t|) #(Intercept) 1.110e+00 5.141e-02 21.580 < 2e-16 *** # ADMISRATE -3.798e-01 6.898e-02 -5.505 1.18e-07 *** #SFACRATIO -2.789e-02 3.404e-03 -8.194 3.57e-14 *** # AVGDEBT 5.169e-07 2.399e-06 0.215 0.83 ###### #Question 1 2*(1-pt(5.51,191)) ###### #Question 2 2*(pt(-8.176,191)) ###### #Question 3 qt(0.975, 191) # finding 95% confidence intervals for coefficients confint(Model, level =0.95) #########################
/MATH 212/R Files/Chapter 4.2.R
no_license
zackroder/Spring-2021
R
false
false
2,737
r
# Chapter 4.2 # Medicorp example # The following code shows how to plot a simple linear regression line library(readxl) # Import data from an Excel workbook MEDDICORP <- read_excel("C:\\Users\\Zack\\Documents\\Spring 2021\\MATH 212\\Data Sets\\MEDDICORP4.xlsx", sheet=1) # View Data head(MEDDICORP) # Create a variable called SALES SALES <- MEDDICORP$SALES # Create a variable called ADV ADV <- MEDDICORP$ADV # Create a variable called BONUS BONUS <- MEDDICORP$BONUS # Building a regression line Model <- lm(SALES ~ ADV + BONUS) summary(Model) # Coefficients: # Estimate Std. Error t value Pr(>|t|) #(Intercept) -516.4443 189.8757 -2.720 0.0125 * # ADV 2.4732 0.2753 8.983 8.18e-09 *** # BONUS 1.8562 0.7157 2.593 0.0166 * ################################################################# # Example 4.2 Meddicorp # Question1 # Testing for advertising coeffcinets # calculating pvalues 2*(1-pt(8.98,22)) ############ # Question 2 # Testing for advertising coeffcinets # calculating pvalues 2*(1-pt(2.59,22)) ############ # Question 3 # Testing for advertising coeffcinets # calculating pvalues (1-pt(2.59,22)) ############ # Question 4 # Testing for advertising coeffcinets # calculating pvalues qt(0.975,22) # finding 95% confidence intervals for coefficients confint(Model, level =0.95) ############ # Question 5 # Testing for advertising coeffcinets # calculating pvalues qt(0.975,22) # finding 95% confidence intervals for coefficients confint(Model, level =0.95) ########################################################################## # Question 4 Admission rate # Question 14 Graduation Rate library(readxl) # Import data from an Excel workbook COLLEGE <- read_excel("C:\\Users\\dassanayakes\\Desktop\\Toshiba\\Rhodes MATH 212\\COLLEGE4.xlsx", sheet=1) # View Data head(COLLEGE) # Create a variable called EPS GRADRATE <- COLLEGE$GRADRATE4 # Create a variable called BONUS ADMISRATE <- COLLEGE$ADMISRATE # Create a variable called BONUS SFACRATIO <- COLLEGE$SFACRATIO # Create a variable called BONUS AVGDEBT <- COLLEGE$AVGDEBT # Building a regression line Model <- lm(GRADRATE ~ ADMISRATE + SFACRATIO + AVGDEBT) summary(Model) # Estimate Std. Error t value Pr(>|t|) #(Intercept) 1.110e+00 5.141e-02 21.580 < 2e-16 *** # ADMISRATE -3.798e-01 6.898e-02 -5.505 1.18e-07 *** #SFACRATIO -2.789e-02 3.404e-03 -8.194 3.57e-14 *** # AVGDEBT 5.169e-07 2.399e-06 0.215 0.83 ###### #Question 1 2*(1-pt(5.51,191)) ###### #Question 2 2*(pt(-8.176,191)) ###### #Question 3 qt(0.975, 191) # finding 95% confidence intervals for coefficients confint(Model, level =0.95) #########################
#' Title #' #' @param x #' #' @return #' @export #' #' @examples myseq_graph <- function(x){ if (is.data.frame(x) != "TRUE"){ stop("x should be data frame") } else if (ncol(x) != 4){ stop("x must have 4 values") } else { num <- vector(mode = "double", length = nrow(x)) for (i in seq_along(num)){ a <- x[i,] num[[i]] <- myseq_n(as.double(c(a[1], a[2], a[3])), n = as.integer(a[4])) } xlab <- x[,4] ylab <- data.frame(num) dataset <- cbind(xlab, ylab) colnames(dataset) <- c("xlab", "ylab") dataset <- data.frame(dataset) ggplot2::ggplot(data = dataset, mapping = ggplot2::aes(x = xlab, y = ylab))+ ggplot2::geom_line()+ ggplot2::xlab("n")+ ggplot2::ylab("output")+ ggplot2::ggtitle("My sequence") } }
/R/function2.R
permissive
jiarong0829/hw04pjiarong0829
R
false
false
794
r
#' Title #' #' @param x #' #' @return #' @export #' #' @examples myseq_graph <- function(x){ if (is.data.frame(x) != "TRUE"){ stop("x should be data frame") } else if (ncol(x) != 4){ stop("x must have 4 values") } else { num <- vector(mode = "double", length = nrow(x)) for (i in seq_along(num)){ a <- x[i,] num[[i]] <- myseq_n(as.double(c(a[1], a[2], a[3])), n = as.integer(a[4])) } xlab <- x[,4] ylab <- data.frame(num) dataset <- cbind(xlab, ylab) colnames(dataset) <- c("xlab", "ylab") dataset <- data.frame(dataset) ggplot2::ggplot(data = dataset, mapping = ggplot2::aes(x = xlab, y = ylab))+ ggplot2::geom_line()+ ggplot2::xlab("n")+ ggplot2::ylab("output")+ ggplot2::ggtitle("My sequence") } }
# Libraries used in the program library(openxlsx) # retrieve data from an excel file library(tidyverse) # organize the data library(moments) # determine the moments based on empirical information library(ChainLadder) # estimate the development factors (function ata) library(ggplot2) # visual analysis of the data library(rootSolve) # solve for the single discount rate library(stats4) # call function mle library(formattable) # format data library(plotly) # present the results in a graphic source("Functions - general.R") source("Functions - cash flows.R") source("Functions - parameters.R") source("Functions - results.R") source("Data.R") # Risk group 5 # Determine the severity distribution moments based on the CP CMOM_RBNS_CP_1 <- cmom_cp(D_RBNS_1,Vt_DF_1,G_1_CP) CMOM_IBNR_CP_1 <- cmom_cp(D_IBNR_1,Vt_DF_1,G_1_CP) CMOM_CBNI_CP_1 <- cmom_cp(D_CBNI_1,Vt_DF_1,G_1_CP) # Determine the moments based on the CP MOM_RBNS_CP_1 <- mom_rbns_gen(D_RBNS_1,Njd_1,CMOM_RBNS_CP_1) MOM_IBNR_CP_1 <- mom_ibnr_gen(D_IBNR_1,Pj_1,PId_DF_1,Post_Mom_DV_1,CMOM_IBNR_CP_1) MOM_CBNI_CP_1 <- mom_cbni_gen(D_CBNI_1,PJ_1,PId_DF_1,Prior_Mom_DV_1,CMOM_CBNI_CP_1) #Determine the RA based on the CP RA_LIC_CP_1 <- ra_lic(0.75,MOM_RBNS_CP_1,MOM_IBNR_CP_1) RA_LRC_CP_1 <- ra_lrc(0.75,MOM_CBNI_CP_1) # Determine the severity distribution moments based on the multinomial CMOM_RBNS_MULT_1 <- cmom_mult(D_RBNS_1,Vt_DF_1,G_1) CMOM_IBNR_MULT_1 <- cmom_mult(D_IBNR_1,Vt_DF_1,G_1) CMOM_CBNI_MULT_1 <- cmom_mult(D_CBNI_1,Vt_DF_1,G_1) # Determine the moments based on the multinomial MOM_RBNS_MULT_1 <- mom_rbns_gen(D_RBNS_1,Njd_1,CMOM_RBNS_MULT_1) MOM_IBNR_MULT_1 <- mom_ibnr_gen(D_IBNR_1,Pj_1,PId_DF_1,Post_Mom_DV_1,CMOM_IBNR_MULT_1) MOM_CBNI_MULT_1 <- mom_cbni_gen(D_CBNI_1,PJ_1,PId_DF_1,Prior_Mom_DV_1,CMOM_CBNI_MULT_1) # Determine the RA based on the multinomial RA_LIC_MULT_1 <- ra_lic(0.75,MOM_RBNS_MULT_1,MOM_IBNR_MULT_1) RA_LRC_MULT_1 <- ra_lrc(0.75,MOM_CBNI_MULT_1) #Determine the severity distribution moments based on the Dirichlet #Assume that delta=1 so that the payment pattern for the Dirichlet deltat is the same as for the other models vt #The delta chosen impacts the volatility of the model CMOM_RBNS_DIR_1 <- cmom_dir(D_RBNS_1,Vt_DF_1,G_1) CMOM_IBNR_DIR_1 <- cmom_dir(D_IBNR_1,Vt_DF_1,G_1) CMOM_CBNI_DIR_1 <- cmom_dir(D_CBNI_1,Vt_DF_1,G_1) #Determine the moments based on the Dirichlet MOM_RBNS_DIR_1 <- mom_rbns_gen(D_RBNS_1,Njd_1,CMOM_RBNS_DIR_1) MOM_IBNR_DIR_1 <- mom_ibnr_gen(D_IBNR_1,Pj_1,PId_DF_1,Post_Mom_DV_1,CMOM_IBNR_DIR_1) MOM_CBNI_DIR_1 <- mom_cbni_gen(D_CBNI_1,PJ_1,PId_DF_1,Prior_Mom_DV_1,CMOM_CBNI_DIR_1) #Determine the RA based on the Dirichlet RA_LIC_DIR_1 <- ra_lic(0.75,MOM_RBNS_DIR_1,MOM_IBNR_DIR_1) RA_LRC_DIR_1 <- ra_lrc(0.75,MOM_CBNI_DIR_1) # Risk group 6 # Determine the severity distribution moments based on the CP CMOM_RBNS_CP_2 <- cmom_cp(D_RBNS_2,Vt_DF_2,G_2_CP) CMOM_IBNR_CP_2 <- cmom_cp(D_IBNR_2,Vt_DF_2,G_2_CP) CMOM_CBNI_CP_2 <- cmom_cp(D_CBNI_2,Vt_DF_2,G_2_CP) # Determine the moments based on the CP MOM_RBNS_CP_2 <- mom_rbns_gen(D_RBNS_2,Njd_2,CMOM_RBNS_CP_2) MOM_IBNR_CP_2 <- mom_ibnr_gen(D_IBNR_2,Pj_2,PId_DF_2,Post_Mom_DV_2,CMOM_IBNR_CP_2) MOM_CBNI_CP_2 <- mom_cbni_gen(D_CBNI_2,PJ_2,PId_DF_2,Prior_Mom_DV_2,CMOM_CBNI_CP_2) #Determine the RA based on the CP RA_LIC_CP_2 <- ra_lic(0.75,MOM_RBNS_CP_2,MOM_IBNR_CP_2) RA_LRC_CP_2 <- ra_lrc(0.75,MOM_CBNI_CP_2) # Determine the severity distribution moments based on the multinomial CMOM_RBNS_MULT_2 <- cmom_mult(D_RBNS_2,Vt_DF_2,G_2) CMOM_IBNR_MULT_2 <- cmom_mult(D_IBNR_2,Vt_DF_2,G_2) CMOM_CBNI_MULT_2 <- cmom_mult(D_CBNI_2,Vt_DF_2,G_2) # Determine the moments based on the multinomial MOM_RBNS_MULT_2 <- mom_rbns_gen(D_RBNS_2,Njd_2,CMOM_RBNS_MULT_2) MOM_IBNR_MULT_2 <- mom_ibnr_gen(D_IBNR_2,Pj_2,PId_DF_2,Post_Mom_DV_2,CMOM_IBNR_MULT_2) MOM_CBNI_MULT_2 <- mom_cbni_gen(D_CBNI_2,PJ_2,PId_DF_2,Prior_Mom_DV_2,CMOM_CBNI_MULT_2) # Determine the RA based on the multinomial RA_LIC_MULT_2 <- ra_lic(0.75,MOM_RBNS_MULT_2,MOM_IBNR_MULT_2) RA_LRC_MULT_2 <- ra_lrc(0.75,MOM_CBNI_MULT_2) #Determine the severity distribution moments based on the Dirichlet #Assume that delta=1 so that the payment pattern for the Dirichlet deltat is the same as for the other models vt #The delta chosen impacts the volatility of the model CMOM_RBNS_DIR_2 <- cmom_dir(D_RBNS_2,Vt_DF_2,G_2) CMOM_IBNR_DIR_2 <- cmom_dir(D_IBNR_2,Vt_DF_2,G_2) CMOM_CBNI_DIR_2 <- cmom_dir(D_CBNI_2,Vt_DF_2,G_2) #Determine the moments based on the Dirichlet MOM_RBNS_DIR_2 <- mom_rbns_gen(D_RBNS_2,Njd_2,CMOM_RBNS_DIR_2) MOM_IBNR_DIR_2 <- mom_ibnr_gen(D_IBNR_2,Pj_2,PId_DF_2,Post_Mom_DV_2,CMOM_IBNR_DIR_2) MOM_CBNI_DIR_2 <- mom_cbni_gen(D_CBNI_2,PJ_2,PId_DF_2,Prior_Mom_DV_2,CMOM_CBNI_DIR_2) #Determine the RA based on the Dirichlet RA_LIC_DIR_2 <- ra_lic(0.75,MOM_RBNS_DIR_2,MOM_IBNR_DIR_2) RA_LRC_DIR_2 <- ra_lrc(0.75,MOM_CBNI_DIR_2) # Presentation of the results # Data frame with the mean obtained for each case: RBNS, IBNR and CBNI Summary_mean <- data.frame("RBNS" = c(CF_RBNS_DISC_1, CF_RBNS_DISC_2), "IBNR" = c(CF_IBNR_DISC_1, CF_IBNR_DISC_2), "CBNI" = c(CF_CBNI_DISC_1, CF_CBNI_DISC_2)) Summary_mean <- Summary_mean %>% mutate(IBNS = RBNS + IBNR) #Data frame with the elements of the Risk Adjustment separately: SD adjustment and skewness adjustment Summary_85_CP <- data.frame("Element"=c("SD_1", "Skewness_1","SD_2", "Skewness_2"), "RBNS"=c(qnorm(0.85)*sqrt(MOM_RBNS_CP_1[2]), ((qnorm(0.85)^2-1)*MOM_RBNS_CP_1[3])/(6*MOM_RBNS_CP_1[2]), qnorm(0.85)*sqrt(MOM_RBNS_CP_2[2]), ((qnorm(0.85)^2-1)*MOM_RBNS_CP_2[3])/(6*MOM_RBNS_CP_2[2])), "IBNR"=c(qnorm(0.85)*sqrt(MOM_IBNR_CP_1[2]), ((qnorm(0.85)^2-1)*MOM_IBNR_CP_1[3])/(6*MOM_IBNR_CP_1[2]), qnorm(0.85)*sqrt(MOM_IBNR_CP_2[2]), ((qnorm(0.85)^2-1)*MOM_IBNR_CP_2[3])/(6*MOM_IBNR_CP_2[2])), "IBNS"=c(qnorm(0.85)*sqrt(MOM_RBNS_CP_1[2]+MOM_IBNR_CP_1[2]), ((qnorm(0.85)^2-1)*(MOM_RBNS_CP_1[3]+MOM_IBNR_CP_1[3]))/(6*(MOM_RBNS_CP_1[2]+MOM_IBNR_CP_1[2])), qnorm(0.85)*sqrt(MOM_RBNS_CP_2[2]+MOM_IBNR_CP_2[2]), ((qnorm(0.85)^2-1)*(MOM_RBNS_CP_2[3]+MOM_IBNR_CP_2[3]))/(6*(MOM_RBNS_CP_2[2]+MOM_IBNR_CP_2[2]))), "CBNI"=c(qnorm(0.85)*sqrt(MOM_CBNI_CP_1[2]), ((qnorm(0.85)^2-1)*MOM_CBNI_CP_1[3])/(6*MOM_CBNI_CP_1[2]), qnorm(0.85)*sqrt(MOM_CBNI_CP_2[2]), ((qnorm(0.85)^2-1)*MOM_CBNI_CP_2[3])/(6*MOM_CBNI_CP_2[2]))) # Risk group 5 # Data frame with the RA for different confidence levels and different assumptions: CP, Mult, Dir RA_LIC_CL_1 <- ra_lic_cl(MOM_RBNS_CP_1,MOM_RBNS_MULT_1,MOM_RBNS_DIR_1,MOM_IBNR_CP_1,MOM_IBNR_MULT_1,MOM_IBNR_DIR_1) RA_LRC_CL_1 <- ra_lrc_cl(MOM_CBNI_CP_1,MOM_CBNI_MULT_1,MOM_CBNI_DIR_1) # Add columns with the relative size of the RA compared to the mean RA_LIC_CL_1 <- RA_LIC_CL_1 %>% mutate(RA_CP_PER = CP / Summary_mean$IBNS[1], RA_MULT_PER = MULT / Summary_mean$IBNS[1], RA_DIR_PER = DIR / Summary_mean$IBNS[1]) RA_LRC_CL_1 <- RA_LRC_CL_1 %>% mutate(RA_CP_PER = CP / Summary_mean$CBNI[1], RA_MULT_PER = MULT / Summary_mean$CBNI[1], RA_DIR_PER = DIR / Summary_mean$CBNI[1]) # Risk group 6 # Data frame with the RA for different confidence levels and different assumptions: CP, Mult, Dir RA_LIC_CL_2 <- ra_lic_cl(MOM_RBNS_CP_2,MOM_RBNS_MULT_2,MOM_RBNS_DIR_2,MOM_IBNR_CP_2,MOM_IBNR_MULT_2,MOM_IBNR_DIR_2) RA_LRC_CL_2 <- ra_lrc_cl(MOM_CBNI_CP_2,MOM_CBNI_MULT_2,MOM_CBNI_DIR_2) # Add columns with the relative size of the RA compared to the mean RA_LIC_CL_2 <- RA_LIC_CL_2 %>% mutate(RA_CP_PER = CP / Summary_mean$IBNS[2], RA_MULT_PER = MULT / Summary_mean$IBNS[2], RA_DIR_PER = DIR / Summary_mean$IBNS[2]) RA_LRC_CL_2 <- RA_LRC_CL_2 %>% mutate(RA_CP_PER = CP / Summary_mean$CBNI[2], RA_MULT_PER = MULT / Summary_mean$CBNI[2], RA_DIR_PER = DIR / Summary_mean$CBNI[2]) # Graphics with the results # Risk group 5 LIC FIG_LIC_1 <- plot_ly(data = RA_LIC_CL_1, x = ~CL) %>% add_trace(y = ~RA_CP_PER, type = "scatter", name = "Compound Poisson", mode = "lines", line = list(color = "#5BC8AC")) FIG_LIC_1 <- FIG_LIC_1 %>% add_trace(y = ~RA_MULT_PER, type = "scatter", name = "Multinomial", mode = "lines", line = list(color = "#E6D72A")) FIG_LIC_1 <- FIG_LIC_1 %>% add_trace(y = ~RA_DIR_PER, type = "scatter", name = "Dirichlet", mode = "lines", line = list(color = "#F18D9E")) FIG_LIC_1 <- FIG_LIC_1 %>% layout(xaxis = list(tickformat = "%", title = "Confidence level"), yaxis = list(tickformat = "%", title = "Risk adjustment"), legend = list(x=0.1, y=0.9), title ="Risk adjustment LIC for risk group 5") #Risk group 5 LRC FIG_LRC_1 <- plot_ly(data = RA_LRC_CL_1, x = ~CL) %>% add_trace(y = ~RA_CP_PER, type = "scatter", name = "Compound Poisson", mode = "lines", line = list(color = "#9BC01C")) FIG_LRC_1 <- FIG_LRC_1 %>% add_trace(y = ~RA_MULT_PER, type = "scatter", name = "Multinomial", mode = "lines", line = list(color = "#FA6775")) FIG_LRC_1 <- FIG_LRC_1 %>% add_trace(y = ~RA_DIR_PER, type = "scatter", name = "Dirichlet", mode = "lines", line = list(color = "#FFD64D")) FIG_LRC_1 <- FIG_LRC_1 %>% layout(xaxis = list(tickformat = "%", title = "Confidence level"), yaxis = list(tickformat = "%", title = "Risk adjustment"), legend = list(x=0.1, y=0.9), title ="Risk adjustment LRC for risk group 5") # Risk group 6 LIC FIG_LIC_2 <- plot_ly(data = RA_LIC_CL_2, x = ~CL) %>% add_trace(y = ~RA_CP_PER, type = "scatter", name = "Compound Poisson", mode = "lines", line = list(color = "#5BC8AC")) FIG_LIC_2 <- FIG_LIC_2 %>% add_trace(y = ~RA_MULT_PER, type = "scatter", name = "Multinomial", mode = "lines", line = list(color = "1E656D")) FIG_LIC_2 <- FIG_LIC_2 %>% add_trace(y = ~RA_DIR_PER, type = "scatter", name = "Dirichlet", mode = "lines", line = list(color = "#F62A00")) FIG_LIC_2 <- FIG_LIC_2 %>% layout(xaxis = list(tickformat = "%", title = "Confidence level"), yaxis = list(tickformat = "%", title = "Risk adjustment"), legend = list(x=0.1, y=0.9), title ="Risk adjustment LIC for risk group 6") #Risk group 6 LRC FIG_LRC_2 <- plot_ly(data = RA_LRC_CL_2, x = ~CL) %>% add_trace(y = ~RA_CP_PER, type = "scatter", name = "Compound Poisson", mode = "lines", line = list(color = "#FD974F")) FIG_LRC_2 <- FIG_LRC_2 %>% add_trace(y = ~RA_MULT_PER, type = "scatter", name = "Multinomial", mode = "lines", line = list(color = "#C60000")) FIG_LRC_2 <- FIG_LRC_2 %>% add_trace(y = ~RA_DIR_PER, type = "scatter", name = "Dirichlet", mode = "lines", line = list(color = "#805A3B")) FIG_LRC_2 <- FIG_LRC_2 %>% layout(xaxis = list(tickformat = "%", title = "Confidence level"), yaxis = list(tickformat = "%", title = "Risk adjustment"), legend = list(x=0.1, y=0.9), title ="Risk adjustment LRC for risk group 6") FIG_LIC_1 FIG_LIC_2 FIG_LRC_1 FIG_LRC_2
/Results.R
no_license
tatianasantos/MFW-RA
R
false
false
11,978
r
# Libraries used in the program library(openxlsx) # retrieve data from an excel file library(tidyverse) # organize the data library(moments) # determine the moments based on empirical information library(ChainLadder) # estimate the development factors (function ata) library(ggplot2) # visual analysis of the data library(rootSolve) # solve for the single discount rate library(stats4) # call function mle library(formattable) # format data library(plotly) # present the results in a graphic source("Functions - general.R") source("Functions - cash flows.R") source("Functions - parameters.R") source("Functions - results.R") source("Data.R") # Risk group 5 # Determine the severity distribution moments based on the CP CMOM_RBNS_CP_1 <- cmom_cp(D_RBNS_1,Vt_DF_1,G_1_CP) CMOM_IBNR_CP_1 <- cmom_cp(D_IBNR_1,Vt_DF_1,G_1_CP) CMOM_CBNI_CP_1 <- cmom_cp(D_CBNI_1,Vt_DF_1,G_1_CP) # Determine the moments based on the CP MOM_RBNS_CP_1 <- mom_rbns_gen(D_RBNS_1,Njd_1,CMOM_RBNS_CP_1) MOM_IBNR_CP_1 <- mom_ibnr_gen(D_IBNR_1,Pj_1,PId_DF_1,Post_Mom_DV_1,CMOM_IBNR_CP_1) MOM_CBNI_CP_1 <- mom_cbni_gen(D_CBNI_1,PJ_1,PId_DF_1,Prior_Mom_DV_1,CMOM_CBNI_CP_1) #Determine the RA based on the CP RA_LIC_CP_1 <- ra_lic(0.75,MOM_RBNS_CP_1,MOM_IBNR_CP_1) RA_LRC_CP_1 <- ra_lrc(0.75,MOM_CBNI_CP_1) # Determine the severity distribution moments based on the multinomial CMOM_RBNS_MULT_1 <- cmom_mult(D_RBNS_1,Vt_DF_1,G_1) CMOM_IBNR_MULT_1 <- cmom_mult(D_IBNR_1,Vt_DF_1,G_1) CMOM_CBNI_MULT_1 <- cmom_mult(D_CBNI_1,Vt_DF_1,G_1) # Determine the moments based on the multinomial MOM_RBNS_MULT_1 <- mom_rbns_gen(D_RBNS_1,Njd_1,CMOM_RBNS_MULT_1) MOM_IBNR_MULT_1 <- mom_ibnr_gen(D_IBNR_1,Pj_1,PId_DF_1,Post_Mom_DV_1,CMOM_IBNR_MULT_1) MOM_CBNI_MULT_1 <- mom_cbni_gen(D_CBNI_1,PJ_1,PId_DF_1,Prior_Mom_DV_1,CMOM_CBNI_MULT_1) # Determine the RA based on the multinomial RA_LIC_MULT_1 <- ra_lic(0.75,MOM_RBNS_MULT_1,MOM_IBNR_MULT_1) RA_LRC_MULT_1 <- ra_lrc(0.75,MOM_CBNI_MULT_1) #Determine the severity distribution moments based on the Dirichlet #Assume that delta=1 so that the payment pattern for the Dirichlet deltat is the same as for the other models vt #The delta chosen impacts the volatility of the model CMOM_RBNS_DIR_1 <- cmom_dir(D_RBNS_1,Vt_DF_1,G_1) CMOM_IBNR_DIR_1 <- cmom_dir(D_IBNR_1,Vt_DF_1,G_1) CMOM_CBNI_DIR_1 <- cmom_dir(D_CBNI_1,Vt_DF_1,G_1) #Determine the moments based on the Dirichlet MOM_RBNS_DIR_1 <- mom_rbns_gen(D_RBNS_1,Njd_1,CMOM_RBNS_DIR_1) MOM_IBNR_DIR_1 <- mom_ibnr_gen(D_IBNR_1,Pj_1,PId_DF_1,Post_Mom_DV_1,CMOM_IBNR_DIR_1) MOM_CBNI_DIR_1 <- mom_cbni_gen(D_CBNI_1,PJ_1,PId_DF_1,Prior_Mom_DV_1,CMOM_CBNI_DIR_1) #Determine the RA based on the Dirichlet RA_LIC_DIR_1 <- ra_lic(0.75,MOM_RBNS_DIR_1,MOM_IBNR_DIR_1) RA_LRC_DIR_1 <- ra_lrc(0.75,MOM_CBNI_DIR_1) # Risk group 6 # Determine the severity distribution moments based on the CP CMOM_RBNS_CP_2 <- cmom_cp(D_RBNS_2,Vt_DF_2,G_2_CP) CMOM_IBNR_CP_2 <- cmom_cp(D_IBNR_2,Vt_DF_2,G_2_CP) CMOM_CBNI_CP_2 <- cmom_cp(D_CBNI_2,Vt_DF_2,G_2_CP) # Determine the moments based on the CP MOM_RBNS_CP_2 <- mom_rbns_gen(D_RBNS_2,Njd_2,CMOM_RBNS_CP_2) MOM_IBNR_CP_2 <- mom_ibnr_gen(D_IBNR_2,Pj_2,PId_DF_2,Post_Mom_DV_2,CMOM_IBNR_CP_2) MOM_CBNI_CP_2 <- mom_cbni_gen(D_CBNI_2,PJ_2,PId_DF_2,Prior_Mom_DV_2,CMOM_CBNI_CP_2) #Determine the RA based on the CP RA_LIC_CP_2 <- ra_lic(0.75,MOM_RBNS_CP_2,MOM_IBNR_CP_2) RA_LRC_CP_2 <- ra_lrc(0.75,MOM_CBNI_CP_2) # Determine the severity distribution moments based on the multinomial CMOM_RBNS_MULT_2 <- cmom_mult(D_RBNS_2,Vt_DF_2,G_2) CMOM_IBNR_MULT_2 <- cmom_mult(D_IBNR_2,Vt_DF_2,G_2) CMOM_CBNI_MULT_2 <- cmom_mult(D_CBNI_2,Vt_DF_2,G_2) # Determine the moments based on the multinomial MOM_RBNS_MULT_2 <- mom_rbns_gen(D_RBNS_2,Njd_2,CMOM_RBNS_MULT_2) MOM_IBNR_MULT_2 <- mom_ibnr_gen(D_IBNR_2,Pj_2,PId_DF_2,Post_Mom_DV_2,CMOM_IBNR_MULT_2) MOM_CBNI_MULT_2 <- mom_cbni_gen(D_CBNI_2,PJ_2,PId_DF_2,Prior_Mom_DV_2,CMOM_CBNI_MULT_2) # Determine the RA based on the multinomial RA_LIC_MULT_2 <- ra_lic(0.75,MOM_RBNS_MULT_2,MOM_IBNR_MULT_2) RA_LRC_MULT_2 <- ra_lrc(0.75,MOM_CBNI_MULT_2) #Determine the severity distribution moments based on the Dirichlet #Assume that delta=1 so that the payment pattern for the Dirichlet deltat is the same as for the other models vt #The delta chosen impacts the volatility of the model CMOM_RBNS_DIR_2 <- cmom_dir(D_RBNS_2,Vt_DF_2,G_2) CMOM_IBNR_DIR_2 <- cmom_dir(D_IBNR_2,Vt_DF_2,G_2) CMOM_CBNI_DIR_2 <- cmom_dir(D_CBNI_2,Vt_DF_2,G_2) #Determine the moments based on the Dirichlet MOM_RBNS_DIR_2 <- mom_rbns_gen(D_RBNS_2,Njd_2,CMOM_RBNS_DIR_2) MOM_IBNR_DIR_2 <- mom_ibnr_gen(D_IBNR_2,Pj_2,PId_DF_2,Post_Mom_DV_2,CMOM_IBNR_DIR_2) MOM_CBNI_DIR_2 <- mom_cbni_gen(D_CBNI_2,PJ_2,PId_DF_2,Prior_Mom_DV_2,CMOM_CBNI_DIR_2) #Determine the RA based on the Dirichlet RA_LIC_DIR_2 <- ra_lic(0.75,MOM_RBNS_DIR_2,MOM_IBNR_DIR_2) RA_LRC_DIR_2 <- ra_lrc(0.75,MOM_CBNI_DIR_2) # Presentation of the results # Data frame with the mean obtained for each case: RBNS, IBNR and CBNI Summary_mean <- data.frame("RBNS" = c(CF_RBNS_DISC_1, CF_RBNS_DISC_2), "IBNR" = c(CF_IBNR_DISC_1, CF_IBNR_DISC_2), "CBNI" = c(CF_CBNI_DISC_1, CF_CBNI_DISC_2)) Summary_mean <- Summary_mean %>% mutate(IBNS = RBNS + IBNR) #Data frame with the elements of the Risk Adjustment separately: SD adjustment and skewness adjustment Summary_85_CP <- data.frame("Element"=c("SD_1", "Skewness_1","SD_2", "Skewness_2"), "RBNS"=c(qnorm(0.85)*sqrt(MOM_RBNS_CP_1[2]), ((qnorm(0.85)^2-1)*MOM_RBNS_CP_1[3])/(6*MOM_RBNS_CP_1[2]), qnorm(0.85)*sqrt(MOM_RBNS_CP_2[2]), ((qnorm(0.85)^2-1)*MOM_RBNS_CP_2[3])/(6*MOM_RBNS_CP_2[2])), "IBNR"=c(qnorm(0.85)*sqrt(MOM_IBNR_CP_1[2]), ((qnorm(0.85)^2-1)*MOM_IBNR_CP_1[3])/(6*MOM_IBNR_CP_1[2]), qnorm(0.85)*sqrt(MOM_IBNR_CP_2[2]), ((qnorm(0.85)^2-1)*MOM_IBNR_CP_2[3])/(6*MOM_IBNR_CP_2[2])), "IBNS"=c(qnorm(0.85)*sqrt(MOM_RBNS_CP_1[2]+MOM_IBNR_CP_1[2]), ((qnorm(0.85)^2-1)*(MOM_RBNS_CP_1[3]+MOM_IBNR_CP_1[3]))/(6*(MOM_RBNS_CP_1[2]+MOM_IBNR_CP_1[2])), qnorm(0.85)*sqrt(MOM_RBNS_CP_2[2]+MOM_IBNR_CP_2[2]), ((qnorm(0.85)^2-1)*(MOM_RBNS_CP_2[3]+MOM_IBNR_CP_2[3]))/(6*(MOM_RBNS_CP_2[2]+MOM_IBNR_CP_2[2]))), "CBNI"=c(qnorm(0.85)*sqrt(MOM_CBNI_CP_1[2]), ((qnorm(0.85)^2-1)*MOM_CBNI_CP_1[3])/(6*MOM_CBNI_CP_1[2]), qnorm(0.85)*sqrt(MOM_CBNI_CP_2[2]), ((qnorm(0.85)^2-1)*MOM_CBNI_CP_2[3])/(6*MOM_CBNI_CP_2[2]))) # Risk group 5 # Data frame with the RA for different confidence levels and different assumptions: CP, Mult, Dir RA_LIC_CL_1 <- ra_lic_cl(MOM_RBNS_CP_1,MOM_RBNS_MULT_1,MOM_RBNS_DIR_1,MOM_IBNR_CP_1,MOM_IBNR_MULT_1,MOM_IBNR_DIR_1) RA_LRC_CL_1 <- ra_lrc_cl(MOM_CBNI_CP_1,MOM_CBNI_MULT_1,MOM_CBNI_DIR_1) # Add columns with the relative size of the RA compared to the mean RA_LIC_CL_1 <- RA_LIC_CL_1 %>% mutate(RA_CP_PER = CP / Summary_mean$IBNS[1], RA_MULT_PER = MULT / Summary_mean$IBNS[1], RA_DIR_PER = DIR / Summary_mean$IBNS[1]) RA_LRC_CL_1 <- RA_LRC_CL_1 %>% mutate(RA_CP_PER = CP / Summary_mean$CBNI[1], RA_MULT_PER = MULT / Summary_mean$CBNI[1], RA_DIR_PER = DIR / Summary_mean$CBNI[1]) # Risk group 6 # Data frame with the RA for different confidence levels and different assumptions: CP, Mult, Dir RA_LIC_CL_2 <- ra_lic_cl(MOM_RBNS_CP_2,MOM_RBNS_MULT_2,MOM_RBNS_DIR_2,MOM_IBNR_CP_2,MOM_IBNR_MULT_2,MOM_IBNR_DIR_2) RA_LRC_CL_2 <- ra_lrc_cl(MOM_CBNI_CP_2,MOM_CBNI_MULT_2,MOM_CBNI_DIR_2) # Add columns with the relative size of the RA compared to the mean RA_LIC_CL_2 <- RA_LIC_CL_2 %>% mutate(RA_CP_PER = CP / Summary_mean$IBNS[2], RA_MULT_PER = MULT / Summary_mean$IBNS[2], RA_DIR_PER = DIR / Summary_mean$IBNS[2]) RA_LRC_CL_2 <- RA_LRC_CL_2 %>% mutate(RA_CP_PER = CP / Summary_mean$CBNI[2], RA_MULT_PER = MULT / Summary_mean$CBNI[2], RA_DIR_PER = DIR / Summary_mean$CBNI[2]) # Graphics with the results # Risk group 5 LIC FIG_LIC_1 <- plot_ly(data = RA_LIC_CL_1, x = ~CL) %>% add_trace(y = ~RA_CP_PER, type = "scatter", name = "Compound Poisson", mode = "lines", line = list(color = "#5BC8AC")) FIG_LIC_1 <- FIG_LIC_1 %>% add_trace(y = ~RA_MULT_PER, type = "scatter", name = "Multinomial", mode = "lines", line = list(color = "#E6D72A")) FIG_LIC_1 <- FIG_LIC_1 %>% add_trace(y = ~RA_DIR_PER, type = "scatter", name = "Dirichlet", mode = "lines", line = list(color = "#F18D9E")) FIG_LIC_1 <- FIG_LIC_1 %>% layout(xaxis = list(tickformat = "%", title = "Confidence level"), yaxis = list(tickformat = "%", title = "Risk adjustment"), legend = list(x=0.1, y=0.9), title ="Risk adjustment LIC for risk group 5") #Risk group 5 LRC FIG_LRC_1 <- plot_ly(data = RA_LRC_CL_1, x = ~CL) %>% add_trace(y = ~RA_CP_PER, type = "scatter", name = "Compound Poisson", mode = "lines", line = list(color = "#9BC01C")) FIG_LRC_1 <- FIG_LRC_1 %>% add_trace(y = ~RA_MULT_PER, type = "scatter", name = "Multinomial", mode = "lines", line = list(color = "#FA6775")) FIG_LRC_1 <- FIG_LRC_1 %>% add_trace(y = ~RA_DIR_PER, type = "scatter", name = "Dirichlet", mode = "lines", line = list(color = "#FFD64D")) FIG_LRC_1 <- FIG_LRC_1 %>% layout(xaxis = list(tickformat = "%", title = "Confidence level"), yaxis = list(tickformat = "%", title = "Risk adjustment"), legend = list(x=0.1, y=0.9), title ="Risk adjustment LRC for risk group 5") # Risk group 6 LIC FIG_LIC_2 <- plot_ly(data = RA_LIC_CL_2, x = ~CL) %>% add_trace(y = ~RA_CP_PER, type = "scatter", name = "Compound Poisson", mode = "lines", line = list(color = "#5BC8AC")) FIG_LIC_2 <- FIG_LIC_2 %>% add_trace(y = ~RA_MULT_PER, type = "scatter", name = "Multinomial", mode = "lines", line = list(color = "1E656D")) FIG_LIC_2 <- FIG_LIC_2 %>% add_trace(y = ~RA_DIR_PER, type = "scatter", name = "Dirichlet", mode = "lines", line = list(color = "#F62A00")) FIG_LIC_2 <- FIG_LIC_2 %>% layout(xaxis = list(tickformat = "%", title = "Confidence level"), yaxis = list(tickformat = "%", title = "Risk adjustment"), legend = list(x=0.1, y=0.9), title ="Risk adjustment LIC for risk group 6") #Risk group 6 LRC FIG_LRC_2 <- plot_ly(data = RA_LRC_CL_2, x = ~CL) %>% add_trace(y = ~RA_CP_PER, type = "scatter", name = "Compound Poisson", mode = "lines", line = list(color = "#FD974F")) FIG_LRC_2 <- FIG_LRC_2 %>% add_trace(y = ~RA_MULT_PER, type = "scatter", name = "Multinomial", mode = "lines", line = list(color = "#C60000")) FIG_LRC_2 <- FIG_LRC_2 %>% add_trace(y = ~RA_DIR_PER, type = "scatter", name = "Dirichlet", mode = "lines", line = list(color = "#805A3B")) FIG_LRC_2 <- FIG_LRC_2 %>% layout(xaxis = list(tickformat = "%", title = "Confidence level"), yaxis = list(tickformat = "%", title = "Risk adjustment"), legend = list(x=0.1, y=0.9), title ="Risk adjustment LRC for risk group 6") FIG_LIC_1 FIG_LIC_2 FIG_LRC_1 FIG_LRC_2
#' Produces graphs of IAAO standards over time #' #' @param stats the output of iaao_stats #' @param ratios the output of reformat_data #' @param min_reporting_yr the minimum year to plot #' @param max_reporting_yr the maximum year to ploy #' @param jurisdiction_name the name of the jurisdiction analyzed #' @return a list of captions and associated plots (cod_text, cod_plot, prd_text, prd_plot, prb_text, prb_plot) #' @importFrom magrittr %>% #' @export iaao_graphs <- function(stats, ratios, min_reporting_yr, max_reporting_yr, jurisdiction_name){ ii <- min_reporting_yr:max_reporting_yr max_stats <- stats[stats["Year"] == max_reporting_yr, ] rect_alpha <- 0.25 #COD cod_standard <- ifelse(max_stats$COD < 15, "did", "did not") cod_text <- paste0("For ", max_reporting_yr, ", the COD in ", jurisdiction_name, " was ", round(max_stats$COD, 2), " which <b>", cod_standard, " meet</b> the IAAO standard for uniformity. ") cod_plot <- ggplot(data = stats, aes(x = .data$Year)) + geom_line(aes(y = .data$COD), color = "#2C4894", size = 1.5) + geom_point(aes(y = .data$COD), color = "#2C4894", size = 3) + geom_line(aes(y = .data$COD + 1.96 * .data$COD_SE), linetype = "dashed") + geom_line(aes(y = .data$COD - 1.96 * .data$COD_SE), linetype = "dashed") + my_theme_rotated + labs(caption = "IAAO Benchmark: 15 or below (shaded). Dotted lines represent the 95% Confidence Interval.", y = "COD", x = "Year") + annotate("rect", xmin = as.numeric(min_reporting_yr), xmax = as.numeric(max_reporting_yr), ymin = 10, ymax = 15, alpha = rect_alpha) + scale_x_continuous(breaks = ii) #PRD prd_text <- paste0(" In ", max_reporting_yr, ", the PRD in ", jurisdiction_name, ", was ", round(max_stats$PRD, 3), " which <b>", ifelse(dplyr::between(max_stats$PRD, 0.98, 1.03), "meets ", "does not meet "), "</b> the IAAO standard for vertical equity.") prd_plot <- ggplot(data = stats, aes(x = .data$Year)) + geom_line(aes(y = .data$PRD), color = "#2C4894", size = 1.5) + geom_point(aes(y = .data$PRD), color = "#2C4894", size = 3) + geom_line(aes(y = .data$PRD - 1.96 * .data$PRD_SE), linetype = "dashed") + geom_line(aes(y = .data$PRD + 1.96 * .data$PRD_SE), linetype = "dashed") + my_theme_rotated + annotate("rect", xmin = as.numeric(min_reporting_yr), xmax = as.numeric(max_reporting_yr), ymin = 0.98, ymax = 1.03, alpha = rect_alpha) + labs(caption = "IAAO Benchmark: 0.98 to 1.03 (shaded). Dotted lines represent the 95% Confidence Interval.", y = "Year", x = "PRD") + scale_x_continuous(breaks = ii) #PRB prb_text <- paste0("In ", max_reporting_yr, ", the PRB in ", jurisdiction_name, " was ", round(max_stats$PRB, 3), " which indicates that sales ratios ", ifelse(max_stats$PRB > 0, "increase", "decrease"), " by ", scales::percent(abs(max_stats$PRB), 0.1), " when home values double.", " This <b>", ifelse(dplyr::between(max_stats$PRB, -0.05, 0.05), "meets ", "does not meet "), "</b>the IAAO standard.") prb_plot <- ggplot(data = stats, aes(x = .data$Year)) + geom_line(aes(y = .data$PRB), color = "#2C4894", size = 1.5) + geom_point(aes(y = .data$PRB), color = "#2C4894", size = 3) + geom_line(aes(y = .data$PRB - 1.96 * .data$PRB_SE), linetype = "dashed") + geom_line(aes(y = .data$PRB + 1.96 * .data$PRB_SE), linetype = "dashed") + my_theme_rotated + labs(caption = "IAAO Benchmark: +/- 0.05 (shaded). Dotted lines represent the 95% Confidence Interval.", y = "PRB", x = "Year") + annotate("rect", xmin = as.numeric(min_reporting_yr), xmax = as.numeric(max_reporting_yr), ymin = -.05, ymax = .05, alpha = rect_alpha) + scale_x_continuous(breaks = ii) return(list(cod_text, cod_plot, prd_text, prd_plot, prb_text, prb_plot)) }
/R/iaao_graphs.R
permissive
jrockower/cmfproperty
R
false
false
3,904
r
#' Produces graphs of IAAO standards over time #' #' @param stats the output of iaao_stats #' @param ratios the output of reformat_data #' @param min_reporting_yr the minimum year to plot #' @param max_reporting_yr the maximum year to ploy #' @param jurisdiction_name the name of the jurisdiction analyzed #' @return a list of captions and associated plots (cod_text, cod_plot, prd_text, prd_plot, prb_text, prb_plot) #' @importFrom magrittr %>% #' @export iaao_graphs <- function(stats, ratios, min_reporting_yr, max_reporting_yr, jurisdiction_name){ ii <- min_reporting_yr:max_reporting_yr max_stats <- stats[stats["Year"] == max_reporting_yr, ] rect_alpha <- 0.25 #COD cod_standard <- ifelse(max_stats$COD < 15, "did", "did not") cod_text <- paste0("For ", max_reporting_yr, ", the COD in ", jurisdiction_name, " was ", round(max_stats$COD, 2), " which <b>", cod_standard, " meet</b> the IAAO standard for uniformity. ") cod_plot <- ggplot(data = stats, aes(x = .data$Year)) + geom_line(aes(y = .data$COD), color = "#2C4894", size = 1.5) + geom_point(aes(y = .data$COD), color = "#2C4894", size = 3) + geom_line(aes(y = .data$COD + 1.96 * .data$COD_SE), linetype = "dashed") + geom_line(aes(y = .data$COD - 1.96 * .data$COD_SE), linetype = "dashed") + my_theme_rotated + labs(caption = "IAAO Benchmark: 15 or below (shaded). Dotted lines represent the 95% Confidence Interval.", y = "COD", x = "Year") + annotate("rect", xmin = as.numeric(min_reporting_yr), xmax = as.numeric(max_reporting_yr), ymin = 10, ymax = 15, alpha = rect_alpha) + scale_x_continuous(breaks = ii) #PRD prd_text <- paste0(" In ", max_reporting_yr, ", the PRD in ", jurisdiction_name, ", was ", round(max_stats$PRD, 3), " which <b>", ifelse(dplyr::between(max_stats$PRD, 0.98, 1.03), "meets ", "does not meet "), "</b> the IAAO standard for vertical equity.") prd_plot <- ggplot(data = stats, aes(x = .data$Year)) + geom_line(aes(y = .data$PRD), color = "#2C4894", size = 1.5) + geom_point(aes(y = .data$PRD), color = "#2C4894", size = 3) + geom_line(aes(y = .data$PRD - 1.96 * .data$PRD_SE), linetype = "dashed") + geom_line(aes(y = .data$PRD + 1.96 * .data$PRD_SE), linetype = "dashed") + my_theme_rotated + annotate("rect", xmin = as.numeric(min_reporting_yr), xmax = as.numeric(max_reporting_yr), ymin = 0.98, ymax = 1.03, alpha = rect_alpha) + labs(caption = "IAAO Benchmark: 0.98 to 1.03 (shaded). Dotted lines represent the 95% Confidence Interval.", y = "Year", x = "PRD") + scale_x_continuous(breaks = ii) #PRB prb_text <- paste0("In ", max_reporting_yr, ", the PRB in ", jurisdiction_name, " was ", round(max_stats$PRB, 3), " which indicates that sales ratios ", ifelse(max_stats$PRB > 0, "increase", "decrease"), " by ", scales::percent(abs(max_stats$PRB), 0.1), " when home values double.", " This <b>", ifelse(dplyr::between(max_stats$PRB, -0.05, 0.05), "meets ", "does not meet "), "</b>the IAAO standard.") prb_plot <- ggplot(data = stats, aes(x = .data$Year)) + geom_line(aes(y = .data$PRB), color = "#2C4894", size = 1.5) + geom_point(aes(y = .data$PRB), color = "#2C4894", size = 3) + geom_line(aes(y = .data$PRB - 1.96 * .data$PRB_SE), linetype = "dashed") + geom_line(aes(y = .data$PRB + 1.96 * .data$PRB_SE), linetype = "dashed") + my_theme_rotated + labs(caption = "IAAO Benchmark: +/- 0.05 (shaded). Dotted lines represent the 95% Confidence Interval.", y = "PRB", x = "Year") + annotate("rect", xmin = as.numeric(min_reporting_yr), xmax = as.numeric(max_reporting_yr), ymin = -.05, ymax = .05, alpha = rect_alpha) + scale_x_continuous(breaks = ii) return(list(cod_text, cod_plot, prd_text, prd_plot, prb_text, prb_plot)) }
# Load packages library(shiny) library(dplyr) # Load error identification data load("manualIdentifiers.r") # Generate error identification regular expressions splitDelimitersOne <- lapply(poorPhrasesDocument[,"phrase"], function(item){ paste0("(?<=",item,")") }) %>% paste(., collapse = "|") splitDelimitersTwo <- lapply(poorPhrasesDocument[,"phrase"], function(item){ paste0("(?=",item,")") }) %>% paste0(., collapse = "|") # Generate HTML output formatting objects normalSpanFront <- "<span>" spanEnd <- "</span>" badSpanFrontPartOne <- "<span onclick='showProblem(" badSpanFrontPartThree <- ")' class='badPhrase'>" badSpanFrontPartFour <- ")' class='badPhrase currentlySelectedBadPhrase'>" betweenIssueMarker <- "<br><br>" issueTitleLead <- "<span class='issueTitle'>" issueTitleEnd <- "</span>" issueNumberSpan <- "<span class='issueNumber'>" issueNumberSpanEnd <- ". </span>" solutionStart <- "<br><span class='solution'>" solutionEnd <- "</span>" previousIssueFieldOutput <- "<span></span>" # Server logic shinyServer(function(session, input, output) { # Set UI renderUI({getPage(includeHTML("www/index.html"))}) # Input tab #------------------------------------- # Set empty objects that will be modified pending input finalOutput <- "" textCharList <- list() outputTextList <- list() colorDeterminer <- list() processedInputText <- list() currentInput <- "" # Function to set input data when the user enters text in input field getInput <- reactive({ input$inputField }) # When user inputs text, update the output field observeEvent(input$inputField, { # Retrieve input inputText <- getInput() output$outputField <- renderText({ # Handle the no text condition if (nchar(inputText) == 0) return("") currentInput <<- inputText # Find bad phrases listText <- rep(list(c()), nchar(inputText)) listChars <- strsplit(inputText, "") %>% unlist() # For each bad phrase... lapply(1:nrow(poorPhrasesDocument), function(delimiterNumber){ # Find the locations of each bad phrase delimiterPhrase <- poorPhrasesDocument[delimiterNumber, "phrase"] temp <- gregexpr(delimiterPhrase, inputText, ignore.case = !poorPhrasesDocument[delimiterNumber, "mindCase"])[[1]] tempDataFrame <- data.frame( "matchPosition" = as.vector(temp), "matchLength" = attr(temp, "match.length") ) # Identify the solution(s) for this issue currentIssueId <- poorPhrasesDocument[delimiterNumber, "issueIds"] for (matchNumber in 1:nrow(tempDataFrame)) { matchPosition <- tempDataFrame[matchNumber,"matchPosition"] matchLength <- tempDataFrame[matchNumber,"matchLength"] if (matchLength == -1) next positionMatches <- matchPosition:(matchPosition + matchLength - 1) lapply(positionMatches, function(index){ listText[[index]] <<- c(listText[[index]], currentIssueId) }) } }) textCharList <<- listText # Determine the number of problems problemText <- input$problemNumber if (!exists("problemText")) { problemText <- 0 } else if (length(problemText) == 0) { problemText <- 0 } probNum <- as.numeric(problemText) # Determine text colors if (probNum == 0) { colorDeterminer <<- rep(FALSE, length(textCharList)) } else { currentIssues <- textCharList[[probNum]] colorDeterminer <<- lapply(1:length(textCharList), function(currentItem){ if (is.null(textCharList[[currentItem]])[1]) return(FALSE) currentValue <- textCharList[[currentItem]] if (any(currentValue %in% currentIssues)) return(TRUE) return(FALSE) }) %>% unlist() } # Set text colors outputTextList <<- lapply(1:length(listChars), function(index){ if (length(listText[[index]]) == 0) { return(paste0(normalSpanFront, listChars[[index]], spanEnd)) } if (!colorDeterminer[index]) return(paste0(badSpanFrontPartOne, index, badSpanFrontPartThree, listChars[[index]], spanEnd)) return(paste0(badSpanFrontPartOne, index, badSpanFrontPartFour, listChars[[index]], spanEnd)) }) outputText <- outputTextList %>% unlist() %>% paste0(., collapse = "") return(outputText) }) }) # Set feedback HTML output$issueField <- renderText({ # Identify problem number problemText <- input$problemNumber probNum <- as.numeric(problemText) # Identify the number of problems probNumLength <- length(probNum) if (probNumLength != 1) return(finalOutput) # Determine current issue issueNumber <- textCharList[[probNum]] %>% unique() allIssues <- issueIds[issueNumber] %>% unlist() %>% unique() # Determine solutions solutionIds <- issueIdToSolutionMap[issueIdToSolutionMap[,"issueId"] %in% allIssues,"solutionId"] allSolutions <- solutions[solutionIds] # Retrieve and format solution HTML unlistedOutput <- lapply(1:length(allSolutions), function(currentSolutionNumber){ solutionData <- allSolutions[[currentSolutionNumber]] title <- solutionData$title solution <- solutionData$solution return( paste0(issueNumberSpan, currentSolutionNumber,issueNumberSpanEnd, issueTitleLead, title, issueTitleEnd, solutionStart, solution, solutionEnd) ) }) %>% unlist() %>% as.vector() # Merge all solution feedback into a single HTML block finalOutput <- paste0(unlistedOutput, collapse = betweenIssueMarker) finalOutput <<- finalOutput return(finalOutput) }) })
/server.R
no_license
zcolburn/Lab-Report-Corrector_Shiny-App
R
false
false
5,926
r
# Load packages library(shiny) library(dplyr) # Load error identification data load("manualIdentifiers.r") # Generate error identification regular expressions splitDelimitersOne <- lapply(poorPhrasesDocument[,"phrase"], function(item){ paste0("(?<=",item,")") }) %>% paste(., collapse = "|") splitDelimitersTwo <- lapply(poorPhrasesDocument[,"phrase"], function(item){ paste0("(?=",item,")") }) %>% paste0(., collapse = "|") # Generate HTML output formatting objects normalSpanFront <- "<span>" spanEnd <- "</span>" badSpanFrontPartOne <- "<span onclick='showProblem(" badSpanFrontPartThree <- ")' class='badPhrase'>" badSpanFrontPartFour <- ")' class='badPhrase currentlySelectedBadPhrase'>" betweenIssueMarker <- "<br><br>" issueTitleLead <- "<span class='issueTitle'>" issueTitleEnd <- "</span>" issueNumberSpan <- "<span class='issueNumber'>" issueNumberSpanEnd <- ". </span>" solutionStart <- "<br><span class='solution'>" solutionEnd <- "</span>" previousIssueFieldOutput <- "<span></span>" # Server logic shinyServer(function(session, input, output) { # Set UI renderUI({getPage(includeHTML("www/index.html"))}) # Input tab #------------------------------------- # Set empty objects that will be modified pending input finalOutput <- "" textCharList <- list() outputTextList <- list() colorDeterminer <- list() processedInputText <- list() currentInput <- "" # Function to set input data when the user enters text in input field getInput <- reactive({ input$inputField }) # When user inputs text, update the output field observeEvent(input$inputField, { # Retrieve input inputText <- getInput() output$outputField <- renderText({ # Handle the no text condition if (nchar(inputText) == 0) return("") currentInput <<- inputText # Find bad phrases listText <- rep(list(c()), nchar(inputText)) listChars <- strsplit(inputText, "") %>% unlist() # For each bad phrase... lapply(1:nrow(poorPhrasesDocument), function(delimiterNumber){ # Find the locations of each bad phrase delimiterPhrase <- poorPhrasesDocument[delimiterNumber, "phrase"] temp <- gregexpr(delimiterPhrase, inputText, ignore.case = !poorPhrasesDocument[delimiterNumber, "mindCase"])[[1]] tempDataFrame <- data.frame( "matchPosition" = as.vector(temp), "matchLength" = attr(temp, "match.length") ) # Identify the solution(s) for this issue currentIssueId <- poorPhrasesDocument[delimiterNumber, "issueIds"] for (matchNumber in 1:nrow(tempDataFrame)) { matchPosition <- tempDataFrame[matchNumber,"matchPosition"] matchLength <- tempDataFrame[matchNumber,"matchLength"] if (matchLength == -1) next positionMatches <- matchPosition:(matchPosition + matchLength - 1) lapply(positionMatches, function(index){ listText[[index]] <<- c(listText[[index]], currentIssueId) }) } }) textCharList <<- listText # Determine the number of problems problemText <- input$problemNumber if (!exists("problemText")) { problemText <- 0 } else if (length(problemText) == 0) { problemText <- 0 } probNum <- as.numeric(problemText) # Determine text colors if (probNum == 0) { colorDeterminer <<- rep(FALSE, length(textCharList)) } else { currentIssues <- textCharList[[probNum]] colorDeterminer <<- lapply(1:length(textCharList), function(currentItem){ if (is.null(textCharList[[currentItem]])[1]) return(FALSE) currentValue <- textCharList[[currentItem]] if (any(currentValue %in% currentIssues)) return(TRUE) return(FALSE) }) %>% unlist() } # Set text colors outputTextList <<- lapply(1:length(listChars), function(index){ if (length(listText[[index]]) == 0) { return(paste0(normalSpanFront, listChars[[index]], spanEnd)) } if (!colorDeterminer[index]) return(paste0(badSpanFrontPartOne, index, badSpanFrontPartThree, listChars[[index]], spanEnd)) return(paste0(badSpanFrontPartOne, index, badSpanFrontPartFour, listChars[[index]], spanEnd)) }) outputText <- outputTextList %>% unlist() %>% paste0(., collapse = "") return(outputText) }) }) # Set feedback HTML output$issueField <- renderText({ # Identify problem number problemText <- input$problemNumber probNum <- as.numeric(problemText) # Identify the number of problems probNumLength <- length(probNum) if (probNumLength != 1) return(finalOutput) # Determine current issue issueNumber <- textCharList[[probNum]] %>% unique() allIssues <- issueIds[issueNumber] %>% unlist() %>% unique() # Determine solutions solutionIds <- issueIdToSolutionMap[issueIdToSolutionMap[,"issueId"] %in% allIssues,"solutionId"] allSolutions <- solutions[solutionIds] # Retrieve and format solution HTML unlistedOutput <- lapply(1:length(allSolutions), function(currentSolutionNumber){ solutionData <- allSolutions[[currentSolutionNumber]] title <- solutionData$title solution <- solutionData$solution return( paste0(issueNumberSpan, currentSolutionNumber,issueNumberSpanEnd, issueTitleLead, title, issueTitleEnd, solutionStart, solution, solutionEnd) ) }) %>% unlist() %>% as.vector() # Merge all solution feedback into a single HTML block finalOutput <- paste0(unlistedOutput, collapse = betweenIssueMarker) finalOutput <<- finalOutput return(finalOutput) }) })
dat_lim <- function(dat, ZLIM){ dat[dat < ZLIM[1]] = ZLIM[1] dat[dat > ZLIM[2]] = ZLIM[2] return(dat) } library(MASS) #Calculate production log ratios between high and low diversity treatments: prefix <- '~/Roms_tools/Run/NPacS1_' VTR <- c(0, 0.1) avgFiles <- paste0(prefix, VTR, '/npacS_avg.nc') bioFiles <- paste0(prefix, VTR, '/npacS_dbio_avg.nc') NPP_H <- integann(bioFiles[2]) NPP_L <- integann(bioFiles[1]) NPP.dff <- log(NPP_H/NPP_L) #Difference between high and low diversity x1 <--165+360; x2 <- -155+360; y1 <- 22; y2 <- 28 X1 <- 190; X2 <- 225; Y1 <- -5; Y2 <- 5 DRAW <- function(){ #Northern area DrawRect(x1, y1, x2, y2) x <- (x1+x2)/2 y <- (y1+y2)/2 text(x,y,'A') #Southern area DrawRect(X1, Y1, X2, Y2) X <- (X1+X2)/2 Y <- (Y1+Y2)/2 text(X,Y,'B') } muAvg <- function(avgfile, biofile){ PHY.a <- Sur_mean(avgfile, 'PHYTO') mu.a <- Sur_mean(biofile, 'omuNet') d2mudL2 <- Sur_mean(biofile, 'od2mudl2') LNV.a <- Sur_mean(avgfile, 'LNV') VAR.a <- Sur_mean(avgfile, 'VAR') LNV.a <- LNV.a/PHY.a VAR.a <- VAR.a/PHY.a - LNV.a^2 muAvg <- mu.a + d2mudL2*VAR.a/2 return(muAvg) } muAvg_L <- muAvg(avgFiles[1], bioFiles[1]) muAvg_H <- muAvg(avgFiles[2], bioFiles[2]) muAvg.dff <- log(muAvg_H/muAvg_L) #Calculate kernel densities of mu vs. NPP: f2 <- data.frame(x=as.numeric(muAvg.dff), y=as.numeric(NPP.dff)) f2 <- na.omit(f2) f2 <- kde2d(f2$x, f2$y, n = 50, lims = c(-0.1, .1, -.1, .1)) pdf('FigS4_NPP_VAR_diff.pdf',width=6, height=4) op <- par( font.lab = 1, family ="serif", mar = c(4,4,1.5,2), mgp = c(2.3,1,0), cex.lab = 1.4, lwd = 1.5, mfcol = c(1,1), cex.axis= 1) image2D(NPP.dff, Lon, Lat, col = jet.colors(18), # zlim = ZLIM, xaxt = 'n',frame = F, xlab = "Longitude (ºE)", ylab = "Latitude (ºN)") DRAW() #mtext('a) NPP log ratio of high vs. low diversity', adj=0, line = .5) axis(side=1, at = lon1, labels=lon2) #image2D(f2, col = jet.colors(20), # zlim = c(0,.05), # xlab='Productivity log ratios', # ylab='NPP log ratios') # ##plot(as.vector(muAvg.dff), as.vector(NPP.dff), pch=16, cex=.5, #abline(0,1) #mtext('b)', adj=0, line = .5) dev.off()
/Rscripts/Plot_diff.R
permissive
BingzhangChen/ROMS-NPZDcont
R
false
false
2,328
r
dat_lim <- function(dat, ZLIM){ dat[dat < ZLIM[1]] = ZLIM[1] dat[dat > ZLIM[2]] = ZLIM[2] return(dat) } library(MASS) #Calculate production log ratios between high and low diversity treatments: prefix <- '~/Roms_tools/Run/NPacS1_' VTR <- c(0, 0.1) avgFiles <- paste0(prefix, VTR, '/npacS_avg.nc') bioFiles <- paste0(prefix, VTR, '/npacS_dbio_avg.nc') NPP_H <- integann(bioFiles[2]) NPP_L <- integann(bioFiles[1]) NPP.dff <- log(NPP_H/NPP_L) #Difference between high and low diversity x1 <--165+360; x2 <- -155+360; y1 <- 22; y2 <- 28 X1 <- 190; X2 <- 225; Y1 <- -5; Y2 <- 5 DRAW <- function(){ #Northern area DrawRect(x1, y1, x2, y2) x <- (x1+x2)/2 y <- (y1+y2)/2 text(x,y,'A') #Southern area DrawRect(X1, Y1, X2, Y2) X <- (X1+X2)/2 Y <- (Y1+Y2)/2 text(X,Y,'B') } muAvg <- function(avgfile, biofile){ PHY.a <- Sur_mean(avgfile, 'PHYTO') mu.a <- Sur_mean(biofile, 'omuNet') d2mudL2 <- Sur_mean(biofile, 'od2mudl2') LNV.a <- Sur_mean(avgfile, 'LNV') VAR.a <- Sur_mean(avgfile, 'VAR') LNV.a <- LNV.a/PHY.a VAR.a <- VAR.a/PHY.a - LNV.a^2 muAvg <- mu.a + d2mudL2*VAR.a/2 return(muAvg) } muAvg_L <- muAvg(avgFiles[1], bioFiles[1]) muAvg_H <- muAvg(avgFiles[2], bioFiles[2]) muAvg.dff <- log(muAvg_H/muAvg_L) #Calculate kernel densities of mu vs. NPP: f2 <- data.frame(x=as.numeric(muAvg.dff), y=as.numeric(NPP.dff)) f2 <- na.omit(f2) f2 <- kde2d(f2$x, f2$y, n = 50, lims = c(-0.1, .1, -.1, .1)) pdf('FigS4_NPP_VAR_diff.pdf',width=6, height=4) op <- par( font.lab = 1, family ="serif", mar = c(4,4,1.5,2), mgp = c(2.3,1,0), cex.lab = 1.4, lwd = 1.5, mfcol = c(1,1), cex.axis= 1) image2D(NPP.dff, Lon, Lat, col = jet.colors(18), # zlim = ZLIM, xaxt = 'n',frame = F, xlab = "Longitude (ºE)", ylab = "Latitude (ºN)") DRAW() #mtext('a) NPP log ratio of high vs. low diversity', adj=0, line = .5) axis(side=1, at = lon1, labels=lon2) #image2D(f2, col = jet.colors(20), # zlim = c(0,.05), # xlab='Productivity log ratios', # ylab='NPP log ratios') # ##plot(as.vector(muAvg.dff), as.vector(NPP.dff), pch=16, cex=.5, #abline(0,1) #mtext('b)', adj=0, line = .5) dev.off()
# Classify Emails # Ye (Kate) Cai # 2019-04-26 ###### Overview ############################################################# # We have a data set of emails. We would like to classify "fake news" from real. # The data have raw features: text body, news title, and news source. The # response is "Real" or "Fake". # install.packages("tidyverse") library(tidyverse) fakeNewsTrain_df <- read_csv("fake_news_train.csv") # Explore anyNA(fakeNewsTrain_df) dim(fakeNewsTrain_df) ### Goals ### # Build a model to discriminate real news from fake news, and also construct # visuals to aid in the model description to the masses. # For an overview of text mining in R, see # https://www.tidytextmining.com/index.html ###### Pre-processing ####################################################### # We need to transform our data matrix into "tidy text". #install.packages("tidytext") library(tidytext) fakeNewsTrain_df <- fakeNewsTrain_df %>% mutate(ID = 1:nrow(fakeNewsTrain_df)) # Include the individual words from the title splitTitles_df <- fakeNewsTrain_df %>% unnest_tokens(word, title) %>% mutate(textFromBody = FALSE) %>% select(-text) splitText_df <- fakeNewsTrain_df %>% unnest_tokens(word, text) %>% mutate(textFromBody = TRUE) %>% select(-title) splitFakeNews_df <- bind_rows( splitText_df, splitTitles_df ) %>% arrange(ID) ###### Clean the Text ####################################################### # adding stop words View(stop_words) # we found 12 words to add to the stop_words table (this is just an example) numWords <- 12 myStopWords_df <- tibble( word = c( "your", "yours", "yourself", "yourselves", "he", "him", "his", "himself", "she", "her", "hers", "herself" ), lexicon = rep("ye", numWords) ) # now add them myStopWords_df <- bind_rows( stop_words, myStopWords_df ) # Remove stop words cleanSplitFN_df <- anti_join( splitFakeNews_df, myStopWords_df, by = "word" ) ### Clean Bad Entries ### # We have some entries that are gibberish alphanumerics cleanSplitFN_df %>% select(word) %>% arrange(word) %>% View() # Split off rows with numbers cleanSplitFN_df <- cleanSplitFN_df %>% filter(!str_detect(word, "[0-9]")) # This removes websites with numbers in them, so not ideal ###### Explore Data ######################################################### # Plot most common words in body cleanSplitFN_df %>% filter(textFromBody == TRUE) %>% group_by(label) %>% count(word, sort = TRUE) %>% slice(1:20) %>% ggplot() + geom_col(aes(x = word, y = n)) + coord_flip() + ggtitle("Most Common Words in News Text Body") + facet_grid(~label) # Plot most common words in title cleanSplitFN_df %>% filter(textFromBody == FALSE) %>% group_by(label) %>% count(word, sort = TRUE) %>% slice(1:20) %>% ggplot() + geom_col(aes(x = word, y = n)) + coord_flip() + ggtitle("Most Common Words in News Text Title") + facet_grid(~label) # Find words that only exist in fake emails: anti_join( cleanSplitFN_df %>% filter(label == "Fake"), cleanSplitFN_df %>% filter(label == "Real"), by = "word" ) %>% count(word, sort = TRUE) # Find words that only exist in real emails: anti_join( cleanSplitFN_df %>% filter(label == "Real"), cleanSplitFN_df %>% filter(label == "Fake"), by = "word" ) %>% count(word, sort = TRUE) ###### Remove Equally-occuring Words ######################################## # Remove words with roughly the same chance of occurance in each type of news # article summary(as.factor(cleanSplitFN_df$label)) # 17k counts, but we need 17k probabilities cleanSplitFN_df %>% group_by(label, textFromBody) %>% count(word) ### Split Data by Group ### # Split into four lists cleanSplitFN2_df <- cleanSplitFN_df %>% mutate(textFrom = ifelse(textFromBody, "Body", "Title")) %>% select(-textFromBody) cleanSplitFN2_df <- cleanSplitFN2_df %>% mutate(group = paste0(label, "_x_", textFrom)) %>% select(-label, -textFrom) cleanFN_ls <- split( cleanSplitFN2_df, f = cleanSplitFN2_df$group ) # Add Probabilities for Each Word cleanWordProbs_ls <- lapply(cleanFN_ls, function(df){ nObs <- nrow(df) df %>% count(word) %>% # "n" is the column added by the count() function mutate(prob = n / nObs) }) ### Words with Low Discriminatory Power ### # Rows with words to keep: inner_join( cleanWordProbs_ls$Fake_x_Body, cleanWordProbs_ls$Real_x_Body, by = "word" ) %>% mutate(ratio = prob.x / prob.y) %>% filter((ratio < 1 / 1.5) | (ratio > 1.5)) # This cuts from 3973 to 2047 words # Words to Cut lowPowerBodyWords_char <- inner_join( cleanWordProbs_ls$Fake_x_Body, cleanWordProbs_ls$Real_x_Body, by = "word" ) %>% mutate(ratio = prob.x / prob.y) %>% filter((ratio > 1 / 1.5) & (ratio < 1.5)) %>% select(word) %>% unlist() %>% as.character() # Rows with title words to keep: inner_join( cleanWordProbs_ls$Fake_x_Title, cleanWordProbs_ls$Real_x_Title, by = "word" ) %>% mutate(ratio = prob.x / prob.y) %>% filter((ratio < 1 / 1.5) | (ratio > 1.5)) # This cuts from 155 to 77 words # Words to Cut lowPowerTitleWords_char <- inner_join( cleanWordProbs_ls$Fake_x_Title, cleanWordProbs_ls$Real_x_Title, by = "word" ) %>% mutate(ratio = prob.x / prob.y) %>% filter((ratio > 1 / 1.5) & (ratio < 1.5)) %>% select(word) %>% unlist() %>% as.character() ### Join Data Frames and Filter Out LP Words ### LPwords_char <- c(lowPowerBodyWords_char, lowPowerTitleWords_char) # Start with 17321 unique words cleanWordProbs_df <- cleanWordProbs_ls %>% bind_rows(.id = "group") %>% separate(group, c("label", "textFrom"), "_x_") %>% filter(!(word %in% LPwords_char)) # Now we have 12916 words (with higher discriminatory power) fakeNewsPowerWords_df <- cleanWordProbs_df %>% select(-prob) write_csv(fakeNewsPowerWords_df, "fake_news_WC_clean_train.csv") ### Models ### # Kate has tried the topicmodels::LDA (Latent Dirichlet Allocation) install.packages("topicmodels") library(topicmodels) fakeNews_dtm <- cast_dtm( data = fakeNewsPowerWords_df, document = textFrom, term = word, value = n ) fakeNews_lda <- LDA( fakeNews_dtm, k = 2, control = list(seed = 1234) ) tidy(fakeNews_lda, matrix = "beta") ###Modeling based on "text mining" library(tm) train_model <- read_csv("fake_news_train.csv") # remove things before converting to corpus x <- fake_news_train$text[6] gsub('\x', '', x) gsub('<>', '', x) fake_news_train$text2 <- gsub('[[:punct:]]', '', fake_news_train$text) fake_news_train$text3 <- gsub('<>', '', fake_news_train$text2) require(caTools) set.seed(123456) #sample <-sample.split(train_model,SplitRatio = 0.75) #dtrain <- subset(train_model,sample==TRUE) #for text part #create a corpus corpusText = VCorpus(VectorSource(dtrain$text)) # Remove Punctuation corpusText <- tm_map(corpusText, myFunction) corpusText <- tm_map(corpusText, removePunctuation) corpusText <- tm_map(corpusText,removeNumbers) corpusText <- tm_map(corpusText, tolower) # Remove Stopwords corpusText = tm_map(corpusText,removeWords,stopwords("english")) corpusText = tm_map(corpusText, stemDocument) corpusText <- Corpus(VectorSource(corpusText)) # Create matrix dtmText = DocumentTermMatrix(corpusText) # Remove sparse terms dtmText = removeSparseTerms(dtmText, .95) # Create data frame wordsText = as.data.frame(as.matrix(dtmText)) wordsText[1:6,1:6] colnames(wordsText) = paste("Text_", colnames(wordsText)) ##for title part corpusTitle = VCorpus(VectorSource(train_model$title)) #corpusR = tm_map(corpusR, PlainTextDocument) corpusTitle = tm_map(corpusTitle, removePunctuation) corpusTitle <- tm_map(corpusTitle,removeNumbers) corpusTitle <- tm_map(corpusTitle, tolower) corpusTitle = tm_map(corpusTitle, removeWords, stopwords("english")) corpusTitle = tm_map(corpusTitle, stemDocument) dtmTitle = DocumentTermMatrix(corpusTitle) # Remove sparse terms dtmTitle = removeSparseTerms(dtmTitle, .95) # Create data frame wordsTitle = as.data.frame(as.matrix(dtmTitle)) wordsTitle[1:6,1:6] colnames(wordsTitle) = paste("Title_", colnames(wordsTitle)) ##combine them together wikiWords = cbind(wordsAdded, wordsRemoved) ###try another function from "tidtext" package
/Code/text_mining_intro_script_20190426.R
no_license
YeahCai/TextMining2019
R
false
false
8,371
r
# Classify Emails # Ye (Kate) Cai # 2019-04-26 ###### Overview ############################################################# # We have a data set of emails. We would like to classify "fake news" from real. # The data have raw features: text body, news title, and news source. The # response is "Real" or "Fake". # install.packages("tidyverse") library(tidyverse) fakeNewsTrain_df <- read_csv("fake_news_train.csv") # Explore anyNA(fakeNewsTrain_df) dim(fakeNewsTrain_df) ### Goals ### # Build a model to discriminate real news from fake news, and also construct # visuals to aid in the model description to the masses. # For an overview of text mining in R, see # https://www.tidytextmining.com/index.html ###### Pre-processing ####################################################### # We need to transform our data matrix into "tidy text". #install.packages("tidytext") library(tidytext) fakeNewsTrain_df <- fakeNewsTrain_df %>% mutate(ID = 1:nrow(fakeNewsTrain_df)) # Include the individual words from the title splitTitles_df <- fakeNewsTrain_df %>% unnest_tokens(word, title) %>% mutate(textFromBody = FALSE) %>% select(-text) splitText_df <- fakeNewsTrain_df %>% unnest_tokens(word, text) %>% mutate(textFromBody = TRUE) %>% select(-title) splitFakeNews_df <- bind_rows( splitText_df, splitTitles_df ) %>% arrange(ID) ###### Clean the Text ####################################################### # adding stop words View(stop_words) # we found 12 words to add to the stop_words table (this is just an example) numWords <- 12 myStopWords_df <- tibble( word = c( "your", "yours", "yourself", "yourselves", "he", "him", "his", "himself", "she", "her", "hers", "herself" ), lexicon = rep("ye", numWords) ) # now add them myStopWords_df <- bind_rows( stop_words, myStopWords_df ) # Remove stop words cleanSplitFN_df <- anti_join( splitFakeNews_df, myStopWords_df, by = "word" ) ### Clean Bad Entries ### # We have some entries that are gibberish alphanumerics cleanSplitFN_df %>% select(word) %>% arrange(word) %>% View() # Split off rows with numbers cleanSplitFN_df <- cleanSplitFN_df %>% filter(!str_detect(word, "[0-9]")) # This removes websites with numbers in them, so not ideal ###### Explore Data ######################################################### # Plot most common words in body cleanSplitFN_df %>% filter(textFromBody == TRUE) %>% group_by(label) %>% count(word, sort = TRUE) %>% slice(1:20) %>% ggplot() + geom_col(aes(x = word, y = n)) + coord_flip() + ggtitle("Most Common Words in News Text Body") + facet_grid(~label) # Plot most common words in title cleanSplitFN_df %>% filter(textFromBody == FALSE) %>% group_by(label) %>% count(word, sort = TRUE) %>% slice(1:20) %>% ggplot() + geom_col(aes(x = word, y = n)) + coord_flip() + ggtitle("Most Common Words in News Text Title") + facet_grid(~label) # Find words that only exist in fake emails: anti_join( cleanSplitFN_df %>% filter(label == "Fake"), cleanSplitFN_df %>% filter(label == "Real"), by = "word" ) %>% count(word, sort = TRUE) # Find words that only exist in real emails: anti_join( cleanSplitFN_df %>% filter(label == "Real"), cleanSplitFN_df %>% filter(label == "Fake"), by = "word" ) %>% count(word, sort = TRUE) ###### Remove Equally-occuring Words ######################################## # Remove words with roughly the same chance of occurance in each type of news # article summary(as.factor(cleanSplitFN_df$label)) # 17k counts, but we need 17k probabilities cleanSplitFN_df %>% group_by(label, textFromBody) %>% count(word) ### Split Data by Group ### # Split into four lists cleanSplitFN2_df <- cleanSplitFN_df %>% mutate(textFrom = ifelse(textFromBody, "Body", "Title")) %>% select(-textFromBody) cleanSplitFN2_df <- cleanSplitFN2_df %>% mutate(group = paste0(label, "_x_", textFrom)) %>% select(-label, -textFrom) cleanFN_ls <- split( cleanSplitFN2_df, f = cleanSplitFN2_df$group ) # Add Probabilities for Each Word cleanWordProbs_ls <- lapply(cleanFN_ls, function(df){ nObs <- nrow(df) df %>% count(word) %>% # "n" is the column added by the count() function mutate(prob = n / nObs) }) ### Words with Low Discriminatory Power ### # Rows with words to keep: inner_join( cleanWordProbs_ls$Fake_x_Body, cleanWordProbs_ls$Real_x_Body, by = "word" ) %>% mutate(ratio = prob.x / prob.y) %>% filter((ratio < 1 / 1.5) | (ratio > 1.5)) # This cuts from 3973 to 2047 words # Words to Cut lowPowerBodyWords_char <- inner_join( cleanWordProbs_ls$Fake_x_Body, cleanWordProbs_ls$Real_x_Body, by = "word" ) %>% mutate(ratio = prob.x / prob.y) %>% filter((ratio > 1 / 1.5) & (ratio < 1.5)) %>% select(word) %>% unlist() %>% as.character() # Rows with title words to keep: inner_join( cleanWordProbs_ls$Fake_x_Title, cleanWordProbs_ls$Real_x_Title, by = "word" ) %>% mutate(ratio = prob.x / prob.y) %>% filter((ratio < 1 / 1.5) | (ratio > 1.5)) # This cuts from 155 to 77 words # Words to Cut lowPowerTitleWords_char <- inner_join( cleanWordProbs_ls$Fake_x_Title, cleanWordProbs_ls$Real_x_Title, by = "word" ) %>% mutate(ratio = prob.x / prob.y) %>% filter((ratio > 1 / 1.5) & (ratio < 1.5)) %>% select(word) %>% unlist() %>% as.character() ### Join Data Frames and Filter Out LP Words ### LPwords_char <- c(lowPowerBodyWords_char, lowPowerTitleWords_char) # Start with 17321 unique words cleanWordProbs_df <- cleanWordProbs_ls %>% bind_rows(.id = "group") %>% separate(group, c("label", "textFrom"), "_x_") %>% filter(!(word %in% LPwords_char)) # Now we have 12916 words (with higher discriminatory power) fakeNewsPowerWords_df <- cleanWordProbs_df %>% select(-prob) write_csv(fakeNewsPowerWords_df, "fake_news_WC_clean_train.csv") ### Models ### # Kate has tried the topicmodels::LDA (Latent Dirichlet Allocation) install.packages("topicmodels") library(topicmodels) fakeNews_dtm <- cast_dtm( data = fakeNewsPowerWords_df, document = textFrom, term = word, value = n ) fakeNews_lda <- LDA( fakeNews_dtm, k = 2, control = list(seed = 1234) ) tidy(fakeNews_lda, matrix = "beta") ###Modeling based on "text mining" library(tm) train_model <- read_csv("fake_news_train.csv") # remove things before converting to corpus x <- fake_news_train$text[6] gsub('\x', '', x) gsub('<>', '', x) fake_news_train$text2 <- gsub('[[:punct:]]', '', fake_news_train$text) fake_news_train$text3 <- gsub('<>', '', fake_news_train$text2) require(caTools) set.seed(123456) #sample <-sample.split(train_model,SplitRatio = 0.75) #dtrain <- subset(train_model,sample==TRUE) #for text part #create a corpus corpusText = VCorpus(VectorSource(dtrain$text)) # Remove Punctuation corpusText <- tm_map(corpusText, myFunction) corpusText <- tm_map(corpusText, removePunctuation) corpusText <- tm_map(corpusText,removeNumbers) corpusText <- tm_map(corpusText, tolower) # Remove Stopwords corpusText = tm_map(corpusText,removeWords,stopwords("english")) corpusText = tm_map(corpusText, stemDocument) corpusText <- Corpus(VectorSource(corpusText)) # Create matrix dtmText = DocumentTermMatrix(corpusText) # Remove sparse terms dtmText = removeSparseTerms(dtmText, .95) # Create data frame wordsText = as.data.frame(as.matrix(dtmText)) wordsText[1:6,1:6] colnames(wordsText) = paste("Text_", colnames(wordsText)) ##for title part corpusTitle = VCorpus(VectorSource(train_model$title)) #corpusR = tm_map(corpusR, PlainTextDocument) corpusTitle = tm_map(corpusTitle, removePunctuation) corpusTitle <- tm_map(corpusTitle,removeNumbers) corpusTitle <- tm_map(corpusTitle, tolower) corpusTitle = tm_map(corpusTitle, removeWords, stopwords("english")) corpusTitle = tm_map(corpusTitle, stemDocument) dtmTitle = DocumentTermMatrix(corpusTitle) # Remove sparse terms dtmTitle = removeSparseTerms(dtmTitle, .95) # Create data frame wordsTitle = as.data.frame(as.matrix(dtmTitle)) wordsTitle[1:6,1:6] colnames(wordsTitle) = paste("Title_", colnames(wordsTitle)) ##combine them together wikiWords = cbind(wordsAdded, wordsRemoved) ###try another function from "tidtext" package
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/auxiliary_giotto.R \name{addGeneStatistics} \alias{addGeneStatistics} \title{addGeneStatistics} \usage{ addGeneStatistics( gobject, expression_values = c("normalized", "scaled", "custom"), detection_threshold = 0, return_gobject = TRUE ) } \arguments{ \item{gobject}{giotto object} \item{expression_values}{expression values to use} \item{detection_threshold}{detection threshold to consider a gene detected} \item{return_gobject}{boolean: return giotto object (default = TRUE)} } \value{ giotto object if return_gobject = TRUE } \description{ adds gene statistics to the giotto object } \details{ This function will add the following statistics to gene metadata: \itemize{ \item{nr_cells: }{Denotes in how many cells the gene is detected} \item{per_cells: }{Denotes in what percentage of cells the gene is detected} \item{total_expr: }{Shows the total sum of gene expression in all cells} \item{mean_expr: }{Average gene expression in all cells} \item{mean_expr_det: }{Average gene expression in cells with detectable levels of the gene} } } \examples{ data(mini_giotto_single_cell) updated_giotto_object = addGeneStatistics(mini_giotto_single_cell) }
/man/addGeneStatistics.Rd
permissive
RubD/Giotto
R
false
true
1,255
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/auxiliary_giotto.R \name{addGeneStatistics} \alias{addGeneStatistics} \title{addGeneStatistics} \usage{ addGeneStatistics( gobject, expression_values = c("normalized", "scaled", "custom"), detection_threshold = 0, return_gobject = TRUE ) } \arguments{ \item{gobject}{giotto object} \item{expression_values}{expression values to use} \item{detection_threshold}{detection threshold to consider a gene detected} \item{return_gobject}{boolean: return giotto object (default = TRUE)} } \value{ giotto object if return_gobject = TRUE } \description{ adds gene statistics to the giotto object } \details{ This function will add the following statistics to gene metadata: \itemize{ \item{nr_cells: }{Denotes in how many cells the gene is detected} \item{per_cells: }{Denotes in what percentage of cells the gene is detected} \item{total_expr: }{Shows the total sum of gene expression in all cells} \item{mean_expr: }{Average gene expression in all cells} \item{mean_expr_det: }{Average gene expression in cells with detectable levels of the gene} } } \examples{ data(mini_giotto_single_cell) updated_giotto_object = addGeneStatistics(mini_giotto_single_cell) }
library(tidyverse) col_drop <- c('fpNodeId', 'packet_stemid', 'position', 'pot_stemid', 'Photo_20181129.60.1', 'Photo_20181204.32.1', 'Flowering Date_20180524.40.1') DS_polytunnel_vis <- read_csv('DS_polytunnel-wide.csv') %>% select(-(col_drop)) %>% dplyr::rename_all(funs(make.names(.))) # %>% # filter(line.id < 25) # DS_polytunnel_vis %>% # mutate(flowering_date = Flowering.Date_20180725.15.1,Flowering.Date_20180817.2.1, # final_leaf_number = final.leaf.number_20180524.41.1, final.leaf.number_20181204.26.1, final.leaf.number_20181210.31.1)# %>% # # mutate(final_leaf_number = final.leaf.number_20180524.41.1, final.leaf.number_20181204.26.1, final.leaf.number_20181210.31.1) %>% # # mutate(spikelet_number = spikelet.number_20180524.42.1, spikelet.number_20181204.27.1, spikelet.number_20181210.32.1) %>% # # mutate(height_mm = Height_20180524.43.1, Height_20181204.28.1, Height_20181210.33.1) %>% # # mutate(tiller_count = tillers_20180524.44.1, tillers_20181204.29.1, tillers_20181210.34.1) %>% # # mutate(spike_length = Spike.Length_20181122.59.1, Spike.Length_20181204.31.1, Spike.Length_20181210.36.1) DS_polytunnel_vis <- mutate(DS_polytunnel_vis, flowering_date = Flowering.Date_20180725.15.1,Flowering.Date_20180817.2.1) %>% select(-c(Flowering.Date_20180725.15.1, Flowering.Date_20180817.2.1)) DS_polytunnel_vis <- mutate(DS_polytunnel_vis, final_leaf_number = final.leaf.number_20180524.41.1, final.leaf.number_20181204.26.1, final.leaf.number_20181210.31.1) %>% select(-c(final.leaf.number_20180524.41.1, final.leaf.number_20181204.26.1, final.leaf.number_20181210.31.1)) DS_polytunnel_vis <- mutate(DS_polytunnel_vis, spikelet_number = spikelet.number_20180524.42.1, spikelet.number_20181204.27.1, spikelet.number_20181210.32.1) %>% select(-c(spikelet.number_20180524.42.1, spikelet.number_20181204.27.1, spikelet.number_20181210.32.1)) DS_polytunnel_vis <- mutate(DS_polytunnel_vis, height_mm = Height_20180524.43.1, Height_20181204.28.1, Height_20181210.33.1) %>% select(-c(Height_20180524.43.1, Height_20181204.28.1, Height_20181210.33.1)) DS_polytunnel_vis <- mutate(DS_polytunnel_vis, tiller_count = tillers_20180524.44.1, tillers_20181204.29.1, tillers_20181210.34.1) %>% select(-c(tillers_20180524.44.1, tillers_20181204.29.1, tillers_20181210.34.1)) DS_polytunnel_vis <- mutate(DS_polytunnel_vis, spike_length = Spike.Length_20181122.59.1, Spike.Length_20181204.31.1, Spike.Length_20181210.36.1) %>% select(-c(Spike.Length_20181122.59.1, Spike.Length_20181204.31.1, Spike.Length_20181210.36.1)) col_select <- c('Row', 'Column', 'line.id', 'line.name', 'seed.weight_20190208.65.1', 'plant_date_20190318.1.1', 'vern_end_date_20190318.1.1', 'flowering_date', 'final_leaf_number', 'spikelet_number', 'height_mm', 'tiller_count', 'spike_length') DS_polytunnel_vis <- select(DS_polytunnel_vis, col_select) DS_polytunnel_vis <- na.omit(DS_polytunnel_vis) DS_polytunnel_vis <- filter(DS_polytunnel_vis, line.id < 45) write_csv(DS_polytunnel_vis, 'raw_data/DS_polytunnel_CAS.csv')
/scripts/DS_dataprep.R
no_license
EPLeyne/Data_School_ggplot
R
false
false
3,093
r
library(tidyverse) col_drop <- c('fpNodeId', 'packet_stemid', 'position', 'pot_stemid', 'Photo_20181129.60.1', 'Photo_20181204.32.1', 'Flowering Date_20180524.40.1') DS_polytunnel_vis <- read_csv('DS_polytunnel-wide.csv') %>% select(-(col_drop)) %>% dplyr::rename_all(funs(make.names(.))) # %>% # filter(line.id < 25) # DS_polytunnel_vis %>% # mutate(flowering_date = Flowering.Date_20180725.15.1,Flowering.Date_20180817.2.1, # final_leaf_number = final.leaf.number_20180524.41.1, final.leaf.number_20181204.26.1, final.leaf.number_20181210.31.1)# %>% # # mutate(final_leaf_number = final.leaf.number_20180524.41.1, final.leaf.number_20181204.26.1, final.leaf.number_20181210.31.1) %>% # # mutate(spikelet_number = spikelet.number_20180524.42.1, spikelet.number_20181204.27.1, spikelet.number_20181210.32.1) %>% # # mutate(height_mm = Height_20180524.43.1, Height_20181204.28.1, Height_20181210.33.1) %>% # # mutate(tiller_count = tillers_20180524.44.1, tillers_20181204.29.1, tillers_20181210.34.1) %>% # # mutate(spike_length = Spike.Length_20181122.59.1, Spike.Length_20181204.31.1, Spike.Length_20181210.36.1) DS_polytunnel_vis <- mutate(DS_polytunnel_vis, flowering_date = Flowering.Date_20180725.15.1,Flowering.Date_20180817.2.1) %>% select(-c(Flowering.Date_20180725.15.1, Flowering.Date_20180817.2.1)) DS_polytunnel_vis <- mutate(DS_polytunnel_vis, final_leaf_number = final.leaf.number_20180524.41.1, final.leaf.number_20181204.26.1, final.leaf.number_20181210.31.1) %>% select(-c(final.leaf.number_20180524.41.1, final.leaf.number_20181204.26.1, final.leaf.number_20181210.31.1)) DS_polytunnel_vis <- mutate(DS_polytunnel_vis, spikelet_number = spikelet.number_20180524.42.1, spikelet.number_20181204.27.1, spikelet.number_20181210.32.1) %>% select(-c(spikelet.number_20180524.42.1, spikelet.number_20181204.27.1, spikelet.number_20181210.32.1)) DS_polytunnel_vis <- mutate(DS_polytunnel_vis, height_mm = Height_20180524.43.1, Height_20181204.28.1, Height_20181210.33.1) %>% select(-c(Height_20180524.43.1, Height_20181204.28.1, Height_20181210.33.1)) DS_polytunnel_vis <- mutate(DS_polytunnel_vis, tiller_count = tillers_20180524.44.1, tillers_20181204.29.1, tillers_20181210.34.1) %>% select(-c(tillers_20180524.44.1, tillers_20181204.29.1, tillers_20181210.34.1)) DS_polytunnel_vis <- mutate(DS_polytunnel_vis, spike_length = Spike.Length_20181122.59.1, Spike.Length_20181204.31.1, Spike.Length_20181210.36.1) %>% select(-c(Spike.Length_20181122.59.1, Spike.Length_20181204.31.1, Spike.Length_20181210.36.1)) col_select <- c('Row', 'Column', 'line.id', 'line.name', 'seed.weight_20190208.65.1', 'plant_date_20190318.1.1', 'vern_end_date_20190318.1.1', 'flowering_date', 'final_leaf_number', 'spikelet_number', 'height_mm', 'tiller_count', 'spike_length') DS_polytunnel_vis <- select(DS_polytunnel_vis, col_select) DS_polytunnel_vis <- na.omit(DS_polytunnel_vis) DS_polytunnel_vis <- filter(DS_polytunnel_vis, line.id < 45) write_csv(DS_polytunnel_vis, 'raw_data/DS_polytunnel_CAS.csv')
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/kmermods.R \docType{package} \name{kmermods} \alias{kmermods} \alias{kmermods-package} \title{\code{kmermods}: Represents Kmers As Integers Along The Genome} \description{ \code{kmermods}: Represents Kmers As Integers Along The Genome }
/man/kmermods.Rd
no_license
TomMayo/kmermods
R
false
true
316
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/kmermods.R \docType{package} \name{kmermods} \alias{kmermods} \alias{kmermods-package} \title{\code{kmermods}: Represents Kmers As Integers Along The Genome} \description{ \code{kmermods}: Represents Kmers As Integers Along The Genome }
mapdeckHeatmapDependency <- function() { list( createHtmlDependency( name = "heatmap", version = "1.0.0", src = system.file("htmlwidgets/lib/heatmap", package = "mapdeck"), script = c("heatmap.js"), all_files = FALSE ) ) } #' Add Heatmap #' #' The Heatmap Layer can be used to visualise spatial distribution of data. #' It implements Gaussian Kernel Density Estimation to render the heatmaps. #' #' @section note: #' #' The current version of this layer is supported only for WebGL2 enabled browswers #' So you may find it doesn't render in the RStudio viewer. #' #' @inheritParams add_polygon #' @param lon column containing longitude values #' @param lat column containing latitude values #' @param weight the weight of each value. Default 1 #' @param colour_range vector of 6 hex colours #' @param radius_pixels Radius of the circle in pixels, to which the weight of an object is distributed #' @param intensity Value that is multiplied with the total weight at a pixel to #' obtain the final weight. A value larger than 1 biases the output color towards #' the higher end of the spectrum, and a value less than 1 biases the output #' color towards the lower end of the spectrum #' @param threshold The HeatmapLayer reduces the opacity of the pixels with relatively #' low weight to create a fading effect at the edge. #' A larger threshold smoothens the boundaries of color blobs, while making pixels #' with low relative weight harder to spot (due to low alpha value). #' Threshold is defined as the ratio of the fading weight to the max weight, between 0 and 1. #' For example, 0.1 affects all pixels with weight under 10\% of the max. #' #' @inheritSection add_polygon data #' #' @examples #' \donttest{ #' #' ## You need a valid access token from Mapbox #' key <- 'abc' #' set_token( key ) #' #' df <- read.csv(paste0( #' 'https://raw.githubusercontent.com/uber-common/deck.gl-data/master/', #' 'examples/3d-heatmap/heatmap-data.csv' #' )) #' #' df <- df[ !is.na(df$lng), ] #' df$weight <- sample(1:10, size = nrow(df), replace = T) #' #' mapdeck( style = mapdeck_style('dark'), pitch = 45 ) %>% #' add_heatmap( #' data = df #' , lat = "lat" #' , lon = "lng" #' , weight = "weight", #' , layer_id = "heatmap_layer" #' ) #' #' ## as an sf object #' library(sf) #' sf <- sf::st_as_sf( df, coords = c("lng", "lat")) #' mapdeck( token = key, style = mapdeck_style('dark'), pitch = 45 ) %>% #' add_heatmap( #' data = sf #' , weight = "weight", #' , layer_id = "heatmap_layer" #' ) #' #' } #' #' @details #' #' \code{add_heatmap} supports POINT and MULTIPOINT sf objects #' #' @export add_heatmap <- function( map, data = get_map_data(map), lon = NULL, lat = NULL, polyline = NULL, weight = NULL, colour_range = NULL, radius_pixels = 30, intensity = 1, threshold = 0.05, layer_id = NULL, update_view = TRUE, focus_layer = FALSE, digits = 6, brush_radius = NULL ) { #experimental_layer("heatmap") l <- list() l[["polyline"]] <- force( polyline ) l[["weight"]] <- force( weight ) l[["lon"]] <- force( lon ) l[["lat"]] <- force( lat ) l <- resolve_data( data, l, c("POINT","MULTIPOINT") ) bbox <- init_bbox() update_view <- force( update_view ) focus_layer <- force( focus_layer ) if ( !is.null(l[["data"]]) ) { data <- l[["data"]] l[["data"]] <- NULL } if( !is.null(l[["bbox"]] ) ) { bbox <- l[["bbox"]] l[["bbox"]] <- NULL } ## parmater checks #usePolyline <- isUsingPolyline(polyline) layer_id <- layerId(layer_id, "heatmap") if( is.null( colour_range ) ) { colour_range <- colourvalues::colour_values(1:6, palette = "viridis") } if(length(colour_range) != 6) stop("mapdeck - colour_range must have 6 hex colours") ## end parameter checks checkHex(colour_range) map <- addDependency(map, mapdeckHeatmapDependency()) tp <- l[["data_type"]] l[["data_type"]] <- NULL jsfunc <- "add_heatmap_geo" if( tp == "sf" ) { geometry_column <- c( "geometry" ) shape <- rcpp_heatmap_geojson( data, l, geometry_column, digits ) } else if ( tp == "df" ) { geometry_column <- list( geometry = c("lon", "lat") ) shape <- rcpp_heatmap_geojson_df( data, l, geometry_column, digits ) } else if ( tp == "sfencoded" ) { geometry_column <- "polyline" shape <- rcpp_heatmap_polyline( data, l, geometry_column ) jsfunc <- "add_heatmap_polyline" } invoke_method( map, jsfunc, map_type( map ), shape[["data"]], layer_id, colour_range, radius_pixels, intensity, threshold, bbox, update_view, focus_layer, brush_radius ) } #' @rdname clear #' @export clear_heatmap <- function( map, layer_id = NULL) { layer_id <- layerId(layer_id, "heatmap") invoke_method(map, "md_layer_clear", map_type( map ), layer_id, "heatmap" ) }
/R/map_layer_heatmap.R
no_license
harryprince/mapdeck
R
false
false
4,727
r
mapdeckHeatmapDependency <- function() { list( createHtmlDependency( name = "heatmap", version = "1.0.0", src = system.file("htmlwidgets/lib/heatmap", package = "mapdeck"), script = c("heatmap.js"), all_files = FALSE ) ) } #' Add Heatmap #' #' The Heatmap Layer can be used to visualise spatial distribution of data. #' It implements Gaussian Kernel Density Estimation to render the heatmaps. #' #' @section note: #' #' The current version of this layer is supported only for WebGL2 enabled browswers #' So you may find it doesn't render in the RStudio viewer. #' #' @inheritParams add_polygon #' @param lon column containing longitude values #' @param lat column containing latitude values #' @param weight the weight of each value. Default 1 #' @param colour_range vector of 6 hex colours #' @param radius_pixels Radius of the circle in pixels, to which the weight of an object is distributed #' @param intensity Value that is multiplied with the total weight at a pixel to #' obtain the final weight. A value larger than 1 biases the output color towards #' the higher end of the spectrum, and a value less than 1 biases the output #' color towards the lower end of the spectrum #' @param threshold The HeatmapLayer reduces the opacity of the pixels with relatively #' low weight to create a fading effect at the edge. #' A larger threshold smoothens the boundaries of color blobs, while making pixels #' with low relative weight harder to spot (due to low alpha value). #' Threshold is defined as the ratio of the fading weight to the max weight, between 0 and 1. #' For example, 0.1 affects all pixels with weight under 10\% of the max. #' #' @inheritSection add_polygon data #' #' @examples #' \donttest{ #' #' ## You need a valid access token from Mapbox #' key <- 'abc' #' set_token( key ) #' #' df <- read.csv(paste0( #' 'https://raw.githubusercontent.com/uber-common/deck.gl-data/master/', #' 'examples/3d-heatmap/heatmap-data.csv' #' )) #' #' df <- df[ !is.na(df$lng), ] #' df$weight <- sample(1:10, size = nrow(df), replace = T) #' #' mapdeck( style = mapdeck_style('dark'), pitch = 45 ) %>% #' add_heatmap( #' data = df #' , lat = "lat" #' , lon = "lng" #' , weight = "weight", #' , layer_id = "heatmap_layer" #' ) #' #' ## as an sf object #' library(sf) #' sf <- sf::st_as_sf( df, coords = c("lng", "lat")) #' mapdeck( token = key, style = mapdeck_style('dark'), pitch = 45 ) %>% #' add_heatmap( #' data = sf #' , weight = "weight", #' , layer_id = "heatmap_layer" #' ) #' #' } #' #' @details #' #' \code{add_heatmap} supports POINT and MULTIPOINT sf objects #' #' @export add_heatmap <- function( map, data = get_map_data(map), lon = NULL, lat = NULL, polyline = NULL, weight = NULL, colour_range = NULL, radius_pixels = 30, intensity = 1, threshold = 0.05, layer_id = NULL, update_view = TRUE, focus_layer = FALSE, digits = 6, brush_radius = NULL ) { #experimental_layer("heatmap") l <- list() l[["polyline"]] <- force( polyline ) l[["weight"]] <- force( weight ) l[["lon"]] <- force( lon ) l[["lat"]] <- force( lat ) l <- resolve_data( data, l, c("POINT","MULTIPOINT") ) bbox <- init_bbox() update_view <- force( update_view ) focus_layer <- force( focus_layer ) if ( !is.null(l[["data"]]) ) { data <- l[["data"]] l[["data"]] <- NULL } if( !is.null(l[["bbox"]] ) ) { bbox <- l[["bbox"]] l[["bbox"]] <- NULL } ## parmater checks #usePolyline <- isUsingPolyline(polyline) layer_id <- layerId(layer_id, "heatmap") if( is.null( colour_range ) ) { colour_range <- colourvalues::colour_values(1:6, palette = "viridis") } if(length(colour_range) != 6) stop("mapdeck - colour_range must have 6 hex colours") ## end parameter checks checkHex(colour_range) map <- addDependency(map, mapdeckHeatmapDependency()) tp <- l[["data_type"]] l[["data_type"]] <- NULL jsfunc <- "add_heatmap_geo" if( tp == "sf" ) { geometry_column <- c( "geometry" ) shape <- rcpp_heatmap_geojson( data, l, geometry_column, digits ) } else if ( tp == "df" ) { geometry_column <- list( geometry = c("lon", "lat") ) shape <- rcpp_heatmap_geojson_df( data, l, geometry_column, digits ) } else if ( tp == "sfencoded" ) { geometry_column <- "polyline" shape <- rcpp_heatmap_polyline( data, l, geometry_column ) jsfunc <- "add_heatmap_polyline" } invoke_method( map, jsfunc, map_type( map ), shape[["data"]], layer_id, colour_range, radius_pixels, intensity, threshold, bbox, update_view, focus_layer, brush_radius ) } #' @rdname clear #' @export clear_heatmap <- function( map, layer_id = NULL) { layer_id <- layerId(layer_id, "heatmap") invoke_method(map, "md_layer_clear", map_type( map ), layer_id, "heatmap" ) }
library(assist) context("Test describe") test_that("describe cars", { df = assist::describe(ggplot2::diamonds) expect_equal(df$N[1], 53940) expect_equal(df$mean[6], 3932.800) })
/tests/testthat/test_describe.R
no_license
bjornerstedt/assist
R
false
false
186
r
library(assist) context("Test describe") test_that("describe cars", { df = assist::describe(ggplot2::diamonds) expect_equal(df$N[1], 53940) expect_equal(df$mean[6], 3932.800) })