content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
library(DT) library(shiny) library(ggplot2) library(dplyr) library(tibble) library(wordcloud2) shinyServer(function(input, output){ # show bar chart of top cities by number of listed restaurants output$bar_rest <- renderPlot(ggplot(data=city_rest_cnt, aes(x=reorder(local_city, -total_cnt),y = total_cnt, fill=local_city, label=total_cnt)) + geom_bar(stat="identity", show.legend = FALSE) +coord_flip() + labs(title ="Count of listed restaurants by city in California", x ="Cities", y = "count of unique restaurants") + geom_label(aes(fill = local_city),colour = "white", fontface = "bold", show.legend = FALSE) ) # show bar graph of top sub-menu items across the state output$bar_submenu = renderPlot(ggplot(data=sub_menu_data_all, aes(x=reorder(sub_menu_title, -total_count),y = total_count)) + geom_bar(stat="identity", fill="steelblue") +coord_flip() + labs(title ="Popular sub-menus(All California)", x ="sub-menu sections", y = "count across menus in CA")) # show bar graph of top sub-menu items for selected city output$bar_city_submenu = renderPlot(ggplot(data=filtered_set %>% filter(local_city == input$selectedCity) %>% select(., name,sub_menu_title) %>% distinct() %>% group_by(.,sub_menu_title)%>%summarise(.,total_count = n()) %>% arrange(.,desc(total_count)) %>%head(10), aes(x=reorder(sub_menu_title, -total_count),y = total_count)) + geom_bar(stat="identity", fill="steelblue") +coord_flip() + labs(title = paste("Popular sub-menus in",input$selectedCity) , x ="sub-menus", y = "city-wide count")) winsor_selected = reactive({ #winsorize price data for selected input price_data = inner_join(filtered_set,sub_menu_data_all, by="sub_menu_title") %>% filter(local_city == input$selectedCity) %>%select(sub_menu_title, price) #clean outliers price_cut_off = price_data %>% group_by(sub_menu_title) %>% summarise(., upperB = quantile(price, probs=0.95, na.rm=T)) price_data = inner_join(price_data,price_cut_off, by="sub_menu_title") price_data = price_data %>% filter((price >0.01) & price <= upperB) price_data }) pVal_matrix = reactive({ #perform 2 sample t-tests on the means of like sub-menu prices across different cities cities_to_regress = top_cities if(input$selectedCity_reg != "ALL") { cities_to_regress = input$selectedCity_reg } ##create the master dataframe once master_relative_prices = setNames(data.frame(matrix(ncol = 5, nrow = 0), stringsAsFactors = FALSE), c("sub_menu_title", "p_value", "mean_ratio", "is_diff_price","local_city")) CL = (1 - as.numeric(input$selectedConfi))/2 ## 2 tailed test cat("alpha/2 for 2 tail-test:",CL,"\n") cat("user selection:",input$selectedCity_reg,"\n" ) cat(cities_to_regress,"\n") cat(top_cities,"\n") for(test_c in cities_to_regress) { cat("\ndoing t-tests for city:",test_c,"\n") ##prepare test price_data_test = inner_join(filtered_set,sub_menu_data_all, by="sub_menu_title") %>% filter(local_city == test_c) %>%select(sub_menu_title, price) #clean outliers price_cut_off_test = price_data_test %>% group_by(sub_menu_title) %>% summarise(., upperB = quantile(price, probs=0.95, na.rm=T)) price_data_test = inner_join(price_data_test,price_cut_off_test, by="sub_menu_title") price_data_test = price_data_test %>% filter((price >0.01) & price <= upperB) pVal_df = c() ratios_df = c() for(sm in sub_menu_ls) { control_prices = price_data_control%>% filter(sub_menu_title == sm) %>% select(price) test_prices = price_data_test%>% filter(sub_menu_title == sm) %>% select(price) pVal_df[sm] = as.numeric((t.test(control_prices, test_prices, alternative = "two.sided")$p.value)[[1]]) #Conducting the t-test for same sub-menu across two cities ratios_df[sm] = mean(test_prices$price, na.rm= TRUE)/ mean(control_prices$price, na.rm = TRUE) } pVal_df = data.frame(enframe(pVal_df, name="sub_menu_title", value="p_value"), stringsAsFactors = FALSE) ratios_df = data.frame(enframe(ratios_df, name="sub_menu_title", value="mean_ratio"), stringsAsFactors = FALSE) pVal_df = inner_join(pVal_df, ratios_df, by="sub_menu_title") pVal_df$is_diff_price = ifelse(pVal_df$p_value < CL, "Yes", "No") pVal_df$mean_ratio = ifelse(pVal_df$is_diff_price == "Yes", pVal_df$mean_ratio, 1) pVal_df$local_city = test_c master_relative_prices = rbind(master_relative_prices, pVal_df) } master_relative_prices }) ratings_regression = reactive({ #run linear regression model lm(avg_price ~ ratings -1) #r-squared is much larger without the intercept. #thus regressions down without the intercept cities_to_regress = top_cities if(input$selectedCity_ratings != "ALL") { cities_to_regress = input$selectedCity_ratings } price_data_control = inner_join(filtered_set %>% filter(!is.na(ratings)),sub_menu_data_all, by="sub_menu_title") %>% filter(local_city %in% cities_to_regress) %>% select(local_city, sub_menu_title, ratings, price) #clean outliers price_cut_off_control = price_data_control %>% group_by(sub_menu_title) %>% summarise(., upperB = quantile(price, probs=0.95, na.rm=T)) price_data_control = inner_join(price_data_control,price_cut_off_control, by="sub_menu_title") price_data_control = price_data_control %>% filter((price >0.01) & (price <= upperB)) ratings_data = price_data_control %>% group_by(local_city, ratings) %>% summarise(avg_price = mean(price,na.rm= TRUE)) ratings_data }) ratings_modelOutputs = reactive({ ##prepare regression coefficients and r-square for tabular display linearMod <- lm(avg_price ~ ratings, data=ratings_regression()) modelCoeffs <- summary(linearMod)$coefficients # model coefficients beta.estimate <- modelCoeffs["ratings", "Estimate"] pval.beta = modelCoeffs["ratings", "Pr(>|t|)"] adj_rsq = summary(linearMod)$adj.r.squared data.frame("regression_beta1" = round(beta.estimate,5), "p_value" = pval.beta, "adjusted R-sqrd" = round(adj_rsq,5)) }) dishname_wCloud = reactive({ #generate wordclould of words in dish names. freq set to mean price of the dish. #uses wordcloud2 cities_to_cloud = top_cities if(input$selectedCity_wcloud != "ALL") { cities_to_cloud = input$selectedCity_wcloud } cat("cities to cloud:",cities_to_cloud,"\n") dishes_top_cities = inner_join(filtered_set,sub_menu_data_all, by="sub_menu_title") dishes_top_cities = dishes_top_cities%>%filter((local_city %in% cities_to_cloud) & (!is.na(dish_name) & (dish_name !="NA"))) %>% select(sub_menu_title, dish_name, price, local_city) clean_up = dishes_top_cities %>% group_by(sub_menu_title) %>% summarise(., upperB = quantile(price, probs=0.90, na.rm=T)) %>% filter(upperB > 20) dishes_top_cities = inner_join(dishes_top_cities, clean_up, by="sub_menu_title") dishes_top_cities = dishes_top_cities %>% filter(price >= upperB) %>% group_by(dish_name) %>% summarise(cnt=n(), avg_price = mean(price,na.rm =TRUE))%>% select(word=dish_name,freq=avg_price) dishes_top_cities$word = tolower(dishes_top_cities$word) dishes_top_cities }) recommendation_engine = reactive({ ##all comes down to this. #use regression coefficients, adjust mean prices per menu across cities normalized to prices in Los Angeles #present table of recommended prices based on customer initial input of location, restaurant rating, menu prices master_relative_prices = setNames(data.frame(matrix(ncol = 5, nrow = 0), stringsAsFactors = FALSE), c("sub_menu_title", "p_value", "mean_ratio", "is_diff_price","local_city")) test_c = tolower(unique(customer_data$city)) ##prepare test price_data_test = inner_join(filtered_set,sub_menu_data_all, by="sub_menu_title") %>% filter(local_city == test_c) %>%select(sub_menu_title, price) #clean outliers price_cut_off_test = price_data_test %>% group_by(sub_menu_title) %>% summarise(., upperB = quantile(price, probs=0.95, na.rm=T)) price_data_test = inner_join(price_data_test,price_cut_off_test, by="sub_menu_title") price_data_test = price_data_test %>% filter((price >0.01) & price <= upperB) pVal_df = c() ratios_df = c() control_mean = c() for(sm in sub_menu_ls) { control_prices = price_data_control%>% filter(sub_menu_title == sm) %>% select(price) test_prices = price_data_test%>% filter(sub_menu_title == sm) %>% select(price) pVal_df[sm] = as.numeric((t.test(control_prices, test_prices, alternative = "two.sided")$p.value)[[1]]) #Conducting the t-test for same sub-menu across two cities ratios_df[sm] = mean(test_prices$price, na.rm= TRUE)/ mean(control_prices$price, na.rm = TRUE) control_mean[sm] = mean(control_prices$price, na.rm = TRUE) } pVal_df = data.frame(enframe(pVal_df, name="sub_menu_title", value="p_value"), stringsAsFactors = FALSE) ratios_df = data.frame(enframe(ratios_df, name="sub_menu_title", value="mean_ratio"), stringsAsFactors = FALSE) control_mean_df = data.frame(enframe(control_mean, name="sub_menu_title", value="control_mean"), stringsAsFactors = FALSE) pVal_df = inner_join(pVal_df, ratios_df, by="sub_menu_title") pVal_df = inner_join(pVal_df, control_mean_df, by="sub_menu_title") pVal_df$is_diff_price = ifelse(pVal_df$p_value < pricing_CL, "Yes", "No") pVal_df$mean_ratio = ifelse(pVal_df$is_diff_price == "Yes", pVal_df$mean_ratio, 1) pVal_df$local_city = test_c master_relative_prices = rbind(master_relative_prices, pVal_df) master_relative_prices = inner_join(master_relative_prices, customer_data, by="sub_menu_title") ###now get the regression beta for the city price_data_control = inner_join(filtered_set %>% filter(!is.na(ratings)),sub_menu_data_all, by="sub_menu_title") %>% filter(local_city == test_c) %>%select(local_city, sub_menu_title, ratings, price) #clean outliers price_cut_off_control = price_data_control %>% group_by(sub_menu_title) %>% summarise(., upperB = quantile(price, probs=0.95, na.rm=T)) price_data_control = inner_join(price_data_control,price_cut_off_control, by="sub_menu_title") price_data_control = price_data_control %>% filter((price >0.01) & price <= upperB) ratings_data = price_data_control %>% group_by(local_city, ratings) %>% summarise(avg_price = mean(price,na.rm= TRUE)) linearMod <- lm(avg_price ~ ratings, data=ratings_data) modelCoeffs <- summary(linearMod)$coefficients # model coefficients beta.estimate <- modelCoeffs["ratings", "Estimate"] pval.beta = modelCoeffs["ratings", "Pr(>|t|)"] master_relative_prices$beta.ratio = (master_relative_prices$ratings * ifelse(pval.beta < pricing_CL, beta.estimate,0))/master_relative_prices$control_mean master_relative_prices$weighted_adj = 0.5*(master_relative_prices$mean_ratio + master_relative_prices$beta.ratio) #winsorize price data for selected input and get range for control city price_data = inner_join(filtered_set,sub_menu_data_all, by="sub_menu_title") %>% filter(local_city == control_c) %>%select(sub_menu_title, price) #clean outliers price_cut_off = price_data %>% group_by(sub_menu_title) %>% summarise(., upperB = quantile(price, probs=0.95, na.rm=T)) price_data = inner_join(price_data,price_cut_off, by="sub_menu_title") price_data = price_data %>% filter((price >0.01) & price <= upperB) %>% group_by(sub_menu_title) %>% summarise(control.lower = min(price, na.rm = T), control.upper = max(price, na.rm =T)) master_relative_prices = inner_join(master_relative_prices, price_data, by="sub_menu_title") master_relative_prices$recommended.lower = round(master_relative_prices$weighted_adj* master_relative_prices$control.lower,2) master_relative_prices$recommended.upper = round(master_relative_prices$weighted_adj* master_relative_prices$control.upper,2) master_relative_prices = master_relative_prices %>% select(sub_menu_title, Cust.Lower=Price.Lower, Cust.Upper = Price.Upper, Recommended.Lower= recommended.lower, Recommended.Upper = recommended.upper) master_relative_prices } ) plotRecommendation = reactive({ #same function logic as recommendationEngine above except plot recommended price range against customer's initial price points master_relative_prices = setNames(data.frame(matrix(ncol = 5, nrow = 0), stringsAsFactors = FALSE), c("sub_menu_title", "p_value", "mean_ratio", "is_diff_price","local_city")) test_c = tolower(unique(customer_data$city)) ##prepare test price_data_test = inner_join(filtered_set,sub_menu_data_all, by="sub_menu_title") %>% filter(local_city == test_c) %>%select(sub_menu_title, price) #clean outliers price_cut_off_test = price_data_test %>% group_by(sub_menu_title) %>% summarise(., upperB = quantile(price, probs=0.95, na.rm=T)) price_data_test = inner_join(price_data_test,price_cut_off_test, by="sub_menu_title") price_data_test = price_data_test %>% filter((price >0.01) & price <= upperB) pVal_df = c() ratios_df = c() control_mean = c() for(sm in sub_menu_ls) { control_prices = price_data_control%>% filter(sub_menu_title == sm) %>% select(price) test_prices = price_data_test%>% filter(sub_menu_title == sm) %>% select(price) pVal_df[sm] = as.numeric((t.test(control_prices, test_prices, alternative = "two.sided")$p.value)[[1]]) #Conducting the t-test for same sub-menu across two cities ratios_df[sm] = mean(test_prices$price, na.rm= TRUE)/ mean(control_prices$price, na.rm = TRUE) control_mean[sm] = mean(control_prices$price, na.rm = TRUE) } pVal_df = data.frame(enframe(pVal_df, name="sub_menu_title", value="p_value"), stringsAsFactors = FALSE) ratios_df = data.frame(enframe(ratios_df, name="sub_menu_title", value="mean_ratio"), stringsAsFactors = FALSE) control_mean_df = data.frame(enframe(control_mean, name="sub_menu_title", value="control_mean"), stringsAsFactors = FALSE) pVal_df = inner_join(pVal_df, ratios_df, by="sub_menu_title") pVal_df = inner_join(pVal_df, control_mean_df, by="sub_menu_title") pVal_df$is_diff_price = ifelse(pVal_df$p_value < pricing_CL, "Yes", "No") pVal_df$mean_ratio = ifelse(pVal_df$is_diff_price == "Yes", pVal_df$mean_ratio, 1) pVal_df$local_city = test_c master_relative_prices = rbind(master_relative_prices, pVal_df) master_relative_prices = inner_join(master_relative_prices, customer_data, by="sub_menu_title") ###now get the regression beta for the city price_data_control = inner_join(filtered_set %>% filter(!is.na(ratings)),sub_menu_data_all, by="sub_menu_title") %>% filter(local_city == test_c) %>%select(local_city, sub_menu_title, ratings, price) #clean outliers price_cut_off_control = price_data_control %>% group_by(sub_menu_title) %>% summarise(., upperB = quantile(price, probs=0.95, na.rm=T)) price_data_control = inner_join(price_data_control,price_cut_off_control, by="sub_menu_title") price_data_control = price_data_control %>% filter((price >0.01) & price <= upperB) ##get min max range for city city_min_max = price_data_control %>% select(sub_menu_title,price)%>%group_by(sub_menu_title)%>%summarise(actual.lower = min(price), actual.upper= max(price)) city_min_max = rbind(city_min_max%>%select(sub_menu_title, price =actual.lower), city_min_max%>%select(sub_menu_title, price=actual.upper)) ratings_data = price_data_control %>% group_by(local_city, ratings) %>% summarise(avg_price = mean(price,na.rm= TRUE)) linearMod <- lm(avg_price ~ ratings, data=ratings_data) modelCoeffs <- summary(linearMod)$coefficients # model coefficients beta.estimate <- modelCoeffs["ratings", "Estimate"] pval.beta = modelCoeffs["ratings", "Pr(>|t|)"] master_relative_prices$beta.ratio = (master_relative_prices$ratings * ifelse(pval.beta < pricing_CL, beta.estimate,0))/master_relative_prices$control_mean master_relative_prices$weighted_adj = 0.5*(master_relative_prices$mean_ratio + master_relative_prices$beta.ratio) #winsorize price data for selected input and get range for control city price_data = inner_join(filtered_set,sub_menu_data_all, by="sub_menu_title") %>% filter(local_city == control_c) %>%select(sub_menu_title, price) #clean outliers price_cut_off = price_data %>% group_by(sub_menu_title) %>% summarise(., upperB = quantile(price, probs=0.95, na.rm=T)) price_data = inner_join(price_data,price_cut_off, by="sub_menu_title") price_data = price_data %>% filter((price >0.01) & price <= upperB) %>% group_by(sub_menu_title) %>% summarise(control.lower = min(price, na.rm = T), control.upper = max(price, na.rm =T)) master_relative_prices = inner_join(master_relative_prices, price_data, by="sub_menu_title") master_relative_prices$recommended.lower = round(master_relative_prices$weighted_adj* master_relative_prices$control.lower,2) master_relative_prices$recommended.upper = round(master_relative_prices$weighted_adj* master_relative_prices$control.upper,2) master_relative_prices = master_relative_prices %>% select(sub_menu_title, Cust.Lower=Price.Lower, Cust.Upper = Price.Upper, Recommended.Lower= recommended.lower, Recommended.Upper = recommended.upper) custPrices_df = data.frame(matrix(ncol=2, nrow=0)) colnames(custPrices_df) = c("sub_menu_title","price") recommendPrices_df = data.frame(matrix(ncol=2, nrow=0)) colnames(recommendPrices_df) = c("sub_menu_title","price") for(idx in 1:nrow(master_relative_prices)) { sub_menu_title = master_relative_prices[idx,"sub_menu_title"] price = c(master_relative_prices[idx,"Cust.Lower"],master_relative_prices[idx,"Cust.Upper"]) temp_df = data.frame(sub_menu_title, price, stringsAsFactors = FALSE) custPrices_df= rbind(custPrices_df, temp_df) price = c(master_relative_prices[idx,"Recommended.Lower"], master_relative_prices[idx,"Recommended.Upper"]) temp_df = data.frame(sub_menu_title, price, stringsAsFactors = FALSE) recommendPrices_df = rbind(recommendPrices_df,temp_df) } custPrices_df$party = "Customer Price" recommendPrices_df$party = "Recommended(Model) Price" city_min_max$party = "Actual Price" allPrices = rbind(custPrices_df, recommendPrices_df, city_min_max) allPrices }) output$violin_city_prices = renderPlot(#violin plot of mean prices across cities winsor_selected() %>% ggplot(aes(x = sub_menu_title, y = price)) + geom_violin(aes(fill = sub_menu_title)) +coord_flip() + labs(title = paste("Price distribution sub-menus:",input$selectedCity), x ="sub-menus", y = "meal price($)") ) output$regressOut = renderPlot(#regression dot plot of yes or no pVal_matrix() %>% ggplot(aes( x = sub_menu_title, y = is_diff_price )) + geom_point(aes(color=is_diff_price), show.legend = F) + coord_flip() + facet_wrap(~local_city) + labs(title = "Dot plot showing whether mean of submenu prices in a particular city differ significantly from mean of like submenus in Los Angeles") ) output$ratingsOut = renderPlot(#plot regression line with SE for avg_price ~ ratings regression ratings_regression() %>% ggplot( aes(x = ratings, y = avg_price)) + geom_point(aes(color = local_city)) + geom_smooth(method = lm, se=T) ) output$recommendedPlot = renderPlot(##plot recommended price against customer suggested price plotRecommendation() %>% ggplot(aes(x=sub_menu_title,y=price, color = party)) + geom_line(size =2 , position=position_dodge(width=0.4)) +coord_flip() ) output$wcloudOut = renderWordcloud2({#wordcloud output requires it's on special call dishname_wCloud() %>% wordcloud2() }) # show regression data using DataTable output$betaTable <- DT::renderDataTable({ datatable(ratings_modelOutputs(), rownames=FALSE) }) # show customer input data using DataTable output$customerInput <- DT::renderDataTable({ datatable(customer_data, rownames=FALSE) }) # show recommended data using DataTable output$recommendedTable <- DT::renderDataTable({ datatable(recommendation_engine(), rownames=FALSE) }) output$aboutProject <- renderText({" <br><br><h3><b><u>Project aim:</u></h3></b> <font size=4>The research question is centered around pricing sub-menu sections of restaurant menus. <br>In particular, I sought to find out if I could generate useful features from the data scraped from the UberEats website to create a simple model to price restaurant menus. <br>End product will be a tool that allows a restaurant operator to submit a spreadsheet of their restaurant’s location, the restaurant’s rating and specific sub-menu items and have the model suggest an indicative price range for each sub-menu item that's accurate and representative of the actual observed data. </font> <br><br><h3><b><u>Data source:</u></b></h3> <font size=4>Data scraped from UberEats.com. The dataset comprises of restaurants, their menus and menu prices located in California. </font> <br><br><h3><b><u>Further work:</u></b></h3> <font size=4>Extend coverage to the entire US. <br>Add more features and improve the accuracy of prediction algorithm. </font> <br><br><h3><b><u>Contact:</u></b></h3> <font size=4>Robert Atuahene: <a href=\"https://www.linkedin.com/in/robert-atuahene-cfa-8aa19b8/\">LinkedIn</a></font> "}) # show statistics using infoBox output$rest_total <- renderInfoBox({ total_rest <- filtered_set %>% select(url) %>% distinct()%>% summarise(n=n()) infoBox("Restaurants", total_rest, icon = icon("hand-o-up")) }) output$city_cnt <- renderInfoBox({ city_total <- filtered_set %>% select(uber_city) %>% filter(endsWith(uber_city, "california")) %>% distinct() %>% summarise(n=n()) infoBox("Cities analyzed", city_total, icon = icon("hand-o-up")) }) output$total_submenus <- renderInfoBox({ submenu_cnt = filtered_set %>% select(sub_menu_title) %>% distinct()%>% summarise(n=n()) infoBox("Unique submenus", submenu_cnt, icon = icon("hand-o-up")) }) output$total_dishes <- renderInfoBox({ dishes_cnt = filtered_set %>% select(dish_name) %>% distinct()%>% summarise(n=n()) infoBox("Unique dishes", dishes_cnt, icon = icon("hand-o-up")) }) output$state_cnt <- renderInfoBox({ state_cnt = filtered_set %>% select(state) %>% distinct()%>% summarise(n=n()) infoBox("States analyzed", state_cnt, icon = icon("hand-o-up")) }) output$top_city <- renderInfoBox({ top_city = (filtered_set %>% select(name, local_city) %>% distinct() %>% group_by(local_city) %>% summarise(n=n()) %>% top_n(1))$local_city infoBox("Top city by restaurant count", top_city, icon = icon("hand-o-up")) }) })
/server.R
no_license
rknatuahene/uberEats_shinyApp
R
false
false
24,927
r
library(DT) library(shiny) library(ggplot2) library(dplyr) library(tibble) library(wordcloud2) shinyServer(function(input, output){ # show bar chart of top cities by number of listed restaurants output$bar_rest <- renderPlot(ggplot(data=city_rest_cnt, aes(x=reorder(local_city, -total_cnt),y = total_cnt, fill=local_city, label=total_cnt)) + geom_bar(stat="identity", show.legend = FALSE) +coord_flip() + labs(title ="Count of listed restaurants by city in California", x ="Cities", y = "count of unique restaurants") + geom_label(aes(fill = local_city),colour = "white", fontface = "bold", show.legend = FALSE) ) # show bar graph of top sub-menu items across the state output$bar_submenu = renderPlot(ggplot(data=sub_menu_data_all, aes(x=reorder(sub_menu_title, -total_count),y = total_count)) + geom_bar(stat="identity", fill="steelblue") +coord_flip() + labs(title ="Popular sub-menus(All California)", x ="sub-menu sections", y = "count across menus in CA")) # show bar graph of top sub-menu items for selected city output$bar_city_submenu = renderPlot(ggplot(data=filtered_set %>% filter(local_city == input$selectedCity) %>% select(., name,sub_menu_title) %>% distinct() %>% group_by(.,sub_menu_title)%>%summarise(.,total_count = n()) %>% arrange(.,desc(total_count)) %>%head(10), aes(x=reorder(sub_menu_title, -total_count),y = total_count)) + geom_bar(stat="identity", fill="steelblue") +coord_flip() + labs(title = paste("Popular sub-menus in",input$selectedCity) , x ="sub-menus", y = "city-wide count")) winsor_selected = reactive({ #winsorize price data for selected input price_data = inner_join(filtered_set,sub_menu_data_all, by="sub_menu_title") %>% filter(local_city == input$selectedCity) %>%select(sub_menu_title, price) #clean outliers price_cut_off = price_data %>% group_by(sub_menu_title) %>% summarise(., upperB = quantile(price, probs=0.95, na.rm=T)) price_data = inner_join(price_data,price_cut_off, by="sub_menu_title") price_data = price_data %>% filter((price >0.01) & price <= upperB) price_data }) pVal_matrix = reactive({ #perform 2 sample t-tests on the means of like sub-menu prices across different cities cities_to_regress = top_cities if(input$selectedCity_reg != "ALL") { cities_to_regress = input$selectedCity_reg } ##create the master dataframe once master_relative_prices = setNames(data.frame(matrix(ncol = 5, nrow = 0), stringsAsFactors = FALSE), c("sub_menu_title", "p_value", "mean_ratio", "is_diff_price","local_city")) CL = (1 - as.numeric(input$selectedConfi))/2 ## 2 tailed test cat("alpha/2 for 2 tail-test:",CL,"\n") cat("user selection:",input$selectedCity_reg,"\n" ) cat(cities_to_regress,"\n") cat(top_cities,"\n") for(test_c in cities_to_regress) { cat("\ndoing t-tests for city:",test_c,"\n") ##prepare test price_data_test = inner_join(filtered_set,sub_menu_data_all, by="sub_menu_title") %>% filter(local_city == test_c) %>%select(sub_menu_title, price) #clean outliers price_cut_off_test = price_data_test %>% group_by(sub_menu_title) %>% summarise(., upperB = quantile(price, probs=0.95, na.rm=T)) price_data_test = inner_join(price_data_test,price_cut_off_test, by="sub_menu_title") price_data_test = price_data_test %>% filter((price >0.01) & price <= upperB) pVal_df = c() ratios_df = c() for(sm in sub_menu_ls) { control_prices = price_data_control%>% filter(sub_menu_title == sm) %>% select(price) test_prices = price_data_test%>% filter(sub_menu_title == sm) %>% select(price) pVal_df[sm] = as.numeric((t.test(control_prices, test_prices, alternative = "two.sided")$p.value)[[1]]) #Conducting the t-test for same sub-menu across two cities ratios_df[sm] = mean(test_prices$price, na.rm= TRUE)/ mean(control_prices$price, na.rm = TRUE) } pVal_df = data.frame(enframe(pVal_df, name="sub_menu_title", value="p_value"), stringsAsFactors = FALSE) ratios_df = data.frame(enframe(ratios_df, name="sub_menu_title", value="mean_ratio"), stringsAsFactors = FALSE) pVal_df = inner_join(pVal_df, ratios_df, by="sub_menu_title") pVal_df$is_diff_price = ifelse(pVal_df$p_value < CL, "Yes", "No") pVal_df$mean_ratio = ifelse(pVal_df$is_diff_price == "Yes", pVal_df$mean_ratio, 1) pVal_df$local_city = test_c master_relative_prices = rbind(master_relative_prices, pVal_df) } master_relative_prices }) ratings_regression = reactive({ #run linear regression model lm(avg_price ~ ratings -1) #r-squared is much larger without the intercept. #thus regressions down without the intercept cities_to_regress = top_cities if(input$selectedCity_ratings != "ALL") { cities_to_regress = input$selectedCity_ratings } price_data_control = inner_join(filtered_set %>% filter(!is.na(ratings)),sub_menu_data_all, by="sub_menu_title") %>% filter(local_city %in% cities_to_regress) %>% select(local_city, sub_menu_title, ratings, price) #clean outliers price_cut_off_control = price_data_control %>% group_by(sub_menu_title) %>% summarise(., upperB = quantile(price, probs=0.95, na.rm=T)) price_data_control = inner_join(price_data_control,price_cut_off_control, by="sub_menu_title") price_data_control = price_data_control %>% filter((price >0.01) & (price <= upperB)) ratings_data = price_data_control %>% group_by(local_city, ratings) %>% summarise(avg_price = mean(price,na.rm= TRUE)) ratings_data }) ratings_modelOutputs = reactive({ ##prepare regression coefficients and r-square for tabular display linearMod <- lm(avg_price ~ ratings, data=ratings_regression()) modelCoeffs <- summary(linearMod)$coefficients # model coefficients beta.estimate <- modelCoeffs["ratings", "Estimate"] pval.beta = modelCoeffs["ratings", "Pr(>|t|)"] adj_rsq = summary(linearMod)$adj.r.squared data.frame("regression_beta1" = round(beta.estimate,5), "p_value" = pval.beta, "adjusted R-sqrd" = round(adj_rsq,5)) }) dishname_wCloud = reactive({ #generate wordclould of words in dish names. freq set to mean price of the dish. #uses wordcloud2 cities_to_cloud = top_cities if(input$selectedCity_wcloud != "ALL") { cities_to_cloud = input$selectedCity_wcloud } cat("cities to cloud:",cities_to_cloud,"\n") dishes_top_cities = inner_join(filtered_set,sub_menu_data_all, by="sub_menu_title") dishes_top_cities = dishes_top_cities%>%filter((local_city %in% cities_to_cloud) & (!is.na(dish_name) & (dish_name !="NA"))) %>% select(sub_menu_title, dish_name, price, local_city) clean_up = dishes_top_cities %>% group_by(sub_menu_title) %>% summarise(., upperB = quantile(price, probs=0.90, na.rm=T)) %>% filter(upperB > 20) dishes_top_cities = inner_join(dishes_top_cities, clean_up, by="sub_menu_title") dishes_top_cities = dishes_top_cities %>% filter(price >= upperB) %>% group_by(dish_name) %>% summarise(cnt=n(), avg_price = mean(price,na.rm =TRUE))%>% select(word=dish_name,freq=avg_price) dishes_top_cities$word = tolower(dishes_top_cities$word) dishes_top_cities }) recommendation_engine = reactive({ ##all comes down to this. #use regression coefficients, adjust mean prices per menu across cities normalized to prices in Los Angeles #present table of recommended prices based on customer initial input of location, restaurant rating, menu prices master_relative_prices = setNames(data.frame(matrix(ncol = 5, nrow = 0), stringsAsFactors = FALSE), c("sub_menu_title", "p_value", "mean_ratio", "is_diff_price","local_city")) test_c = tolower(unique(customer_data$city)) ##prepare test price_data_test = inner_join(filtered_set,sub_menu_data_all, by="sub_menu_title") %>% filter(local_city == test_c) %>%select(sub_menu_title, price) #clean outliers price_cut_off_test = price_data_test %>% group_by(sub_menu_title) %>% summarise(., upperB = quantile(price, probs=0.95, na.rm=T)) price_data_test = inner_join(price_data_test,price_cut_off_test, by="sub_menu_title") price_data_test = price_data_test %>% filter((price >0.01) & price <= upperB) pVal_df = c() ratios_df = c() control_mean = c() for(sm in sub_menu_ls) { control_prices = price_data_control%>% filter(sub_menu_title == sm) %>% select(price) test_prices = price_data_test%>% filter(sub_menu_title == sm) %>% select(price) pVal_df[sm] = as.numeric((t.test(control_prices, test_prices, alternative = "two.sided")$p.value)[[1]]) #Conducting the t-test for same sub-menu across two cities ratios_df[sm] = mean(test_prices$price, na.rm= TRUE)/ mean(control_prices$price, na.rm = TRUE) control_mean[sm] = mean(control_prices$price, na.rm = TRUE) } pVal_df = data.frame(enframe(pVal_df, name="sub_menu_title", value="p_value"), stringsAsFactors = FALSE) ratios_df = data.frame(enframe(ratios_df, name="sub_menu_title", value="mean_ratio"), stringsAsFactors = FALSE) control_mean_df = data.frame(enframe(control_mean, name="sub_menu_title", value="control_mean"), stringsAsFactors = FALSE) pVal_df = inner_join(pVal_df, ratios_df, by="sub_menu_title") pVal_df = inner_join(pVal_df, control_mean_df, by="sub_menu_title") pVal_df$is_diff_price = ifelse(pVal_df$p_value < pricing_CL, "Yes", "No") pVal_df$mean_ratio = ifelse(pVal_df$is_diff_price == "Yes", pVal_df$mean_ratio, 1) pVal_df$local_city = test_c master_relative_prices = rbind(master_relative_prices, pVal_df) master_relative_prices = inner_join(master_relative_prices, customer_data, by="sub_menu_title") ###now get the regression beta for the city price_data_control = inner_join(filtered_set %>% filter(!is.na(ratings)),sub_menu_data_all, by="sub_menu_title") %>% filter(local_city == test_c) %>%select(local_city, sub_menu_title, ratings, price) #clean outliers price_cut_off_control = price_data_control %>% group_by(sub_menu_title) %>% summarise(., upperB = quantile(price, probs=0.95, na.rm=T)) price_data_control = inner_join(price_data_control,price_cut_off_control, by="sub_menu_title") price_data_control = price_data_control %>% filter((price >0.01) & price <= upperB) ratings_data = price_data_control %>% group_by(local_city, ratings) %>% summarise(avg_price = mean(price,na.rm= TRUE)) linearMod <- lm(avg_price ~ ratings, data=ratings_data) modelCoeffs <- summary(linearMod)$coefficients # model coefficients beta.estimate <- modelCoeffs["ratings", "Estimate"] pval.beta = modelCoeffs["ratings", "Pr(>|t|)"] master_relative_prices$beta.ratio = (master_relative_prices$ratings * ifelse(pval.beta < pricing_CL, beta.estimate,0))/master_relative_prices$control_mean master_relative_prices$weighted_adj = 0.5*(master_relative_prices$mean_ratio + master_relative_prices$beta.ratio) #winsorize price data for selected input and get range for control city price_data = inner_join(filtered_set,sub_menu_data_all, by="sub_menu_title") %>% filter(local_city == control_c) %>%select(sub_menu_title, price) #clean outliers price_cut_off = price_data %>% group_by(sub_menu_title) %>% summarise(., upperB = quantile(price, probs=0.95, na.rm=T)) price_data = inner_join(price_data,price_cut_off, by="sub_menu_title") price_data = price_data %>% filter((price >0.01) & price <= upperB) %>% group_by(sub_menu_title) %>% summarise(control.lower = min(price, na.rm = T), control.upper = max(price, na.rm =T)) master_relative_prices = inner_join(master_relative_prices, price_data, by="sub_menu_title") master_relative_prices$recommended.lower = round(master_relative_prices$weighted_adj* master_relative_prices$control.lower,2) master_relative_prices$recommended.upper = round(master_relative_prices$weighted_adj* master_relative_prices$control.upper,2) master_relative_prices = master_relative_prices %>% select(sub_menu_title, Cust.Lower=Price.Lower, Cust.Upper = Price.Upper, Recommended.Lower= recommended.lower, Recommended.Upper = recommended.upper) master_relative_prices } ) plotRecommendation = reactive({ #same function logic as recommendationEngine above except plot recommended price range against customer's initial price points master_relative_prices = setNames(data.frame(matrix(ncol = 5, nrow = 0), stringsAsFactors = FALSE), c("sub_menu_title", "p_value", "mean_ratio", "is_diff_price","local_city")) test_c = tolower(unique(customer_data$city)) ##prepare test price_data_test = inner_join(filtered_set,sub_menu_data_all, by="sub_menu_title") %>% filter(local_city == test_c) %>%select(sub_menu_title, price) #clean outliers price_cut_off_test = price_data_test %>% group_by(sub_menu_title) %>% summarise(., upperB = quantile(price, probs=0.95, na.rm=T)) price_data_test = inner_join(price_data_test,price_cut_off_test, by="sub_menu_title") price_data_test = price_data_test %>% filter((price >0.01) & price <= upperB) pVal_df = c() ratios_df = c() control_mean = c() for(sm in sub_menu_ls) { control_prices = price_data_control%>% filter(sub_menu_title == sm) %>% select(price) test_prices = price_data_test%>% filter(sub_menu_title == sm) %>% select(price) pVal_df[sm] = as.numeric((t.test(control_prices, test_prices, alternative = "two.sided")$p.value)[[1]]) #Conducting the t-test for same sub-menu across two cities ratios_df[sm] = mean(test_prices$price, na.rm= TRUE)/ mean(control_prices$price, na.rm = TRUE) control_mean[sm] = mean(control_prices$price, na.rm = TRUE) } pVal_df = data.frame(enframe(pVal_df, name="sub_menu_title", value="p_value"), stringsAsFactors = FALSE) ratios_df = data.frame(enframe(ratios_df, name="sub_menu_title", value="mean_ratio"), stringsAsFactors = FALSE) control_mean_df = data.frame(enframe(control_mean, name="sub_menu_title", value="control_mean"), stringsAsFactors = FALSE) pVal_df = inner_join(pVal_df, ratios_df, by="sub_menu_title") pVal_df = inner_join(pVal_df, control_mean_df, by="sub_menu_title") pVal_df$is_diff_price = ifelse(pVal_df$p_value < pricing_CL, "Yes", "No") pVal_df$mean_ratio = ifelse(pVal_df$is_diff_price == "Yes", pVal_df$mean_ratio, 1) pVal_df$local_city = test_c master_relative_prices = rbind(master_relative_prices, pVal_df) master_relative_prices = inner_join(master_relative_prices, customer_data, by="sub_menu_title") ###now get the regression beta for the city price_data_control = inner_join(filtered_set %>% filter(!is.na(ratings)),sub_menu_data_all, by="sub_menu_title") %>% filter(local_city == test_c) %>%select(local_city, sub_menu_title, ratings, price) #clean outliers price_cut_off_control = price_data_control %>% group_by(sub_menu_title) %>% summarise(., upperB = quantile(price, probs=0.95, na.rm=T)) price_data_control = inner_join(price_data_control,price_cut_off_control, by="sub_menu_title") price_data_control = price_data_control %>% filter((price >0.01) & price <= upperB) ##get min max range for city city_min_max = price_data_control %>% select(sub_menu_title,price)%>%group_by(sub_menu_title)%>%summarise(actual.lower = min(price), actual.upper= max(price)) city_min_max = rbind(city_min_max%>%select(sub_menu_title, price =actual.lower), city_min_max%>%select(sub_menu_title, price=actual.upper)) ratings_data = price_data_control %>% group_by(local_city, ratings) %>% summarise(avg_price = mean(price,na.rm= TRUE)) linearMod <- lm(avg_price ~ ratings, data=ratings_data) modelCoeffs <- summary(linearMod)$coefficients # model coefficients beta.estimate <- modelCoeffs["ratings", "Estimate"] pval.beta = modelCoeffs["ratings", "Pr(>|t|)"] master_relative_prices$beta.ratio = (master_relative_prices$ratings * ifelse(pval.beta < pricing_CL, beta.estimate,0))/master_relative_prices$control_mean master_relative_prices$weighted_adj = 0.5*(master_relative_prices$mean_ratio + master_relative_prices$beta.ratio) #winsorize price data for selected input and get range for control city price_data = inner_join(filtered_set,sub_menu_data_all, by="sub_menu_title") %>% filter(local_city == control_c) %>%select(sub_menu_title, price) #clean outliers price_cut_off = price_data %>% group_by(sub_menu_title) %>% summarise(., upperB = quantile(price, probs=0.95, na.rm=T)) price_data = inner_join(price_data,price_cut_off, by="sub_menu_title") price_data = price_data %>% filter((price >0.01) & price <= upperB) %>% group_by(sub_menu_title) %>% summarise(control.lower = min(price, na.rm = T), control.upper = max(price, na.rm =T)) master_relative_prices = inner_join(master_relative_prices, price_data, by="sub_menu_title") master_relative_prices$recommended.lower = round(master_relative_prices$weighted_adj* master_relative_prices$control.lower,2) master_relative_prices$recommended.upper = round(master_relative_prices$weighted_adj* master_relative_prices$control.upper,2) master_relative_prices = master_relative_prices %>% select(sub_menu_title, Cust.Lower=Price.Lower, Cust.Upper = Price.Upper, Recommended.Lower= recommended.lower, Recommended.Upper = recommended.upper) custPrices_df = data.frame(matrix(ncol=2, nrow=0)) colnames(custPrices_df) = c("sub_menu_title","price") recommendPrices_df = data.frame(matrix(ncol=2, nrow=0)) colnames(recommendPrices_df) = c("sub_menu_title","price") for(idx in 1:nrow(master_relative_prices)) { sub_menu_title = master_relative_prices[idx,"sub_menu_title"] price = c(master_relative_prices[idx,"Cust.Lower"],master_relative_prices[idx,"Cust.Upper"]) temp_df = data.frame(sub_menu_title, price, stringsAsFactors = FALSE) custPrices_df= rbind(custPrices_df, temp_df) price = c(master_relative_prices[idx,"Recommended.Lower"], master_relative_prices[idx,"Recommended.Upper"]) temp_df = data.frame(sub_menu_title, price, stringsAsFactors = FALSE) recommendPrices_df = rbind(recommendPrices_df,temp_df) } custPrices_df$party = "Customer Price" recommendPrices_df$party = "Recommended(Model) Price" city_min_max$party = "Actual Price" allPrices = rbind(custPrices_df, recommendPrices_df, city_min_max) allPrices }) output$violin_city_prices = renderPlot(#violin plot of mean prices across cities winsor_selected() %>% ggplot(aes(x = sub_menu_title, y = price)) + geom_violin(aes(fill = sub_menu_title)) +coord_flip() + labs(title = paste("Price distribution sub-menus:",input$selectedCity), x ="sub-menus", y = "meal price($)") ) output$regressOut = renderPlot(#regression dot plot of yes or no pVal_matrix() %>% ggplot(aes( x = sub_menu_title, y = is_diff_price )) + geom_point(aes(color=is_diff_price), show.legend = F) + coord_flip() + facet_wrap(~local_city) + labs(title = "Dot plot showing whether mean of submenu prices in a particular city differ significantly from mean of like submenus in Los Angeles") ) output$ratingsOut = renderPlot(#plot regression line with SE for avg_price ~ ratings regression ratings_regression() %>% ggplot( aes(x = ratings, y = avg_price)) + geom_point(aes(color = local_city)) + geom_smooth(method = lm, se=T) ) output$recommendedPlot = renderPlot(##plot recommended price against customer suggested price plotRecommendation() %>% ggplot(aes(x=sub_menu_title,y=price, color = party)) + geom_line(size =2 , position=position_dodge(width=0.4)) +coord_flip() ) output$wcloudOut = renderWordcloud2({#wordcloud output requires it's on special call dishname_wCloud() %>% wordcloud2() }) # show regression data using DataTable output$betaTable <- DT::renderDataTable({ datatable(ratings_modelOutputs(), rownames=FALSE) }) # show customer input data using DataTable output$customerInput <- DT::renderDataTable({ datatable(customer_data, rownames=FALSE) }) # show recommended data using DataTable output$recommendedTable <- DT::renderDataTable({ datatable(recommendation_engine(), rownames=FALSE) }) output$aboutProject <- renderText({" <br><br><h3><b><u>Project aim:</u></h3></b> <font size=4>The research question is centered around pricing sub-menu sections of restaurant menus. <br>In particular, I sought to find out if I could generate useful features from the data scraped from the UberEats website to create a simple model to price restaurant menus. <br>End product will be a tool that allows a restaurant operator to submit a spreadsheet of their restaurant’s location, the restaurant’s rating and specific sub-menu items and have the model suggest an indicative price range for each sub-menu item that's accurate and representative of the actual observed data. </font> <br><br><h3><b><u>Data source:</u></b></h3> <font size=4>Data scraped from UberEats.com. The dataset comprises of restaurants, their menus and menu prices located in California. </font> <br><br><h3><b><u>Further work:</u></b></h3> <font size=4>Extend coverage to the entire US. <br>Add more features and improve the accuracy of prediction algorithm. </font> <br><br><h3><b><u>Contact:</u></b></h3> <font size=4>Robert Atuahene: <a href=\"https://www.linkedin.com/in/robert-atuahene-cfa-8aa19b8/\">LinkedIn</a></font> "}) # show statistics using infoBox output$rest_total <- renderInfoBox({ total_rest <- filtered_set %>% select(url) %>% distinct()%>% summarise(n=n()) infoBox("Restaurants", total_rest, icon = icon("hand-o-up")) }) output$city_cnt <- renderInfoBox({ city_total <- filtered_set %>% select(uber_city) %>% filter(endsWith(uber_city, "california")) %>% distinct() %>% summarise(n=n()) infoBox("Cities analyzed", city_total, icon = icon("hand-o-up")) }) output$total_submenus <- renderInfoBox({ submenu_cnt = filtered_set %>% select(sub_menu_title) %>% distinct()%>% summarise(n=n()) infoBox("Unique submenus", submenu_cnt, icon = icon("hand-o-up")) }) output$total_dishes <- renderInfoBox({ dishes_cnt = filtered_set %>% select(dish_name) %>% distinct()%>% summarise(n=n()) infoBox("Unique dishes", dishes_cnt, icon = icon("hand-o-up")) }) output$state_cnt <- renderInfoBox({ state_cnt = filtered_set %>% select(state) %>% distinct()%>% summarise(n=n()) infoBox("States analyzed", state_cnt, icon = icon("hand-o-up")) }) output$top_city <- renderInfoBox({ top_city = (filtered_set %>% select(name, local_city) %>% distinct() %>% group_by(local_city) %>% summarise(n=n()) %>% top_n(1))$local_city infoBox("Top city by restaurant count", top_city, icon = icon("hand-o-up")) }) })
#packages de viz library(shiny) library(shinyWidgets) library(wordcloud) library(tm) library(memoise) library(leaflet) library(lubridate) navbarPage("Flickr API", id="nav", tags$head( # Include our custom CSS includeCSS("styles.css"), includeScript("gomap.js") ), leafletOutput("map",height="1000px",width="100%"), absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE, draggable = TRUE, top = 150, left = "auto", right = 20, bottom = "auto", width = 330, height = "auto",h2("Data Exploration"), dateRangeInput("dates", label = "Période",start=today()-365, end=today() ), searchInput( inputId = "keyword", label = "Entrer mots clés", placeholder = "A city, theme, etc...", btnSearch = icon("search"), btnReset = icon("remove"), width = "450px"), h3("Filtres"), #sliderInput( "range_hour",label="Heure",min=0,max=23,value=c(0,23),step=1 ), fluidRow(column(6,selectInput("range_hour_min",label="heure min",choices = seq(0,23),selected=0) ), column(6,selectInput("range_hour_max",label="heure max",choices = seq(0,23),selected = 23) )), selectInput("theme",label="Sélectionne thème", choices = list("black & white"="black & white","night"="night","street"="street","sunset"="sunset","sunrise"="sunrise","--selection--"="select_theme"), selected = "select_theme"), selectizeInput('theme_tag','Sélectionne tag',choices=NULL,multiple=TRUE,selected=NULL), radioButtons("select_map",label=h3("Map"),choices = c("Trajets"="trajets","Les plus vues"="vues"), selected="vues") ),#end main panel absolutePanel(class = "panel panel-default", fixed = TRUE, draggable = TRUE, bottom=20,left=20,width="260px",height="auto",style="opacity:0.70",h2("Visualisation"), plotOutput("cloud",height="240px",width="260px"), plotOutput("hist_hour",height="200px",width="260px"), plotOutput("hist_month",height="200px",width="260px") )#end second panel )#end navbarPage #end UI #shinyApp(ui = ui, server = server)
/ui.R
no_license
AntoineG92/Flickr-photo-search-API
R
false
false
2,618
r
#packages de viz library(shiny) library(shinyWidgets) library(wordcloud) library(tm) library(memoise) library(leaflet) library(lubridate) navbarPage("Flickr API", id="nav", tags$head( # Include our custom CSS includeCSS("styles.css"), includeScript("gomap.js") ), leafletOutput("map",height="1000px",width="100%"), absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE, draggable = TRUE, top = 150, left = "auto", right = 20, bottom = "auto", width = 330, height = "auto",h2("Data Exploration"), dateRangeInput("dates", label = "Période",start=today()-365, end=today() ), searchInput( inputId = "keyword", label = "Entrer mots clés", placeholder = "A city, theme, etc...", btnSearch = icon("search"), btnReset = icon("remove"), width = "450px"), h3("Filtres"), #sliderInput( "range_hour",label="Heure",min=0,max=23,value=c(0,23),step=1 ), fluidRow(column(6,selectInput("range_hour_min",label="heure min",choices = seq(0,23),selected=0) ), column(6,selectInput("range_hour_max",label="heure max",choices = seq(0,23),selected = 23) )), selectInput("theme",label="Sélectionne thème", choices = list("black & white"="black & white","night"="night","street"="street","sunset"="sunset","sunrise"="sunrise","--selection--"="select_theme"), selected = "select_theme"), selectizeInput('theme_tag','Sélectionne tag',choices=NULL,multiple=TRUE,selected=NULL), radioButtons("select_map",label=h3("Map"),choices = c("Trajets"="trajets","Les plus vues"="vues"), selected="vues") ),#end main panel absolutePanel(class = "panel panel-default", fixed = TRUE, draggable = TRUE, bottom=20,left=20,width="260px",height="auto",style="opacity:0.70",h2("Visualisation"), plotOutput("cloud",height="240px",width="260px"), plotOutput("hist_hour",height="200px",width="260px"), plotOutput("hist_month",height="200px",width="260px") )#end second panel )#end navbarPage #end UI #shinyApp(ui = ui, server = server)
1 # 20.BirthPrep.r 2 # Birth forms were revised in 2004. Some variables were added some deleted or renames 3 # source("20.BirthPrep.r",echo=TRUE) 4 # 5 rm(list=ls()) 6 7 library(knitr) 8 library(gdata) 9 library(rms) 10 library(gdata) 11 library(RMySQL) 12 library(Hmisc) 13 load("../Data/mihow.RData") 14 15 #> names(mihow) 16 # [1] "clientid" "momnamelast" 17 # [3] "momnamegiven" "momage" 18 # [5] "dropreason" "pregnant" 19 # [7] "jobstatus" "language" 20 # [9] "mihow_participant" "primaryadultpovertylevel" 21 #[11] "momwhite" "mommailingzip" 22 #[13] "programyear" "childdob_date" 23 #[15] "due_date" "momdob_date" 24 #[17] "drop_date" "mommarried" 25 #[19] "datayr" "momeducationcode" 26 #[20] "mid" 27 28 29 con<-dbConnect(MySQL(),user="root",password='emma!',dbname='mihow') 30 sql = "select momnamefirst, momnamelast,datayr, childdob, mommailingzip, " 31 sql = paste( sql, " birthwtgrams, gestationweeksestimated ,gestationweeksgenerated",sep=" ") 32 sql = paste( sql," ,APGAR5minute ,deathind ,NoCongenitalAnomaliesInd ,NoAbnormalconditionsInd",sep=" ") 33 sql = paste( sql," ,NoComplicationsInd ,hospital_id ,NoMedicalRiskFactorsInd ",sep=" ") 34 sql = paste( sql," ,Deliveryvaginal,livebirthstotal ,momhispanicorigin ,NoObstetricProcedureInd",sep=" ") 35 sql = paste( sql," ,ChildIDNum ,momssn ,prenatalcarebeganpregnancy ,previouspretermbirth",sep=" ") 36 sql = paste( sql," ,antibioticsmom ,ICUAdmission ,NICU ,ChildTransferred",sep=" ") 37 sql = paste( sql," ,DeathCertNum,paternitySigned,NoInfectionsInd,momwhite,momage,momeducationcode",sep=" ") 38 sql = paste( sql," ,momdob,mommarried,id,MomNameMaidenLast",sep=" ") 39 sql = paste( sql, " from birth.birth_all",sep=" ") 40 sql = paste( sql, " where datayr in (2007,2008,2009,2010) and momrescounty = 'shelby'",sep=' ') 41 # 42 # create hospital_id from BirthPlaceCode 43 # 44 sql2 = "union select momnamefirst, momnamelast,datayr, childdob, mommailingzip, " 45 sql2 = paste( sql2, " birthwtgrams, gestationweeksestimated ,gestationweeksgenerated",sep=" ") 46 sql2 = paste( sql2," ,APGAR5minute ,deathind ,NoCongenitalAnomaliesInd ,NoAbnormalconditionsInd",sep=" ") 47 sql2 = paste( sql2," ,NoComplicationsInd ,BirthPlaceCode hospital_id,NoMedicalRiskFactorsInd ",sep=" ") 48 sql2 = paste( sql2," ,DeliveryVaginal,livebirthstotal ,momhispanicorigin ,NoObstetricProcedureInd",sep=" ") 49 sql2 = paste( sql2," ,ChildIDNum ,momssn ,prenatalcarebeganpregnancy ,previouspretermbirth",sep=" ") 50 sql2 = paste( sql2," ,antibioticsmom ,ICUAdmission ,NICU ,ChildTransferred",sep=" ") 51 sql2 = paste( sql2," ,DeathCertNum,paternitySigned,NoInfectionsInd,momwhite,momage,momeducationcode",sep=" ") 52 sql2 = paste( sql2," ,momdob,mommarried,id,MomNameMaidenLast",sep=" ") 53 sql2 = paste( sql2, " from birth_tdh.birth_2010",sep=" ") 54 sql2 = paste( sql2, " where datayr in (2007,2008,2009,2010) and momrescounty = 'shelby'",sep=' ') 55 56 sql <- paste( sql, sql2,sep=" ") 57 res<-dbSendQuery(con,sql) 58 birth<-fetch(res,n= -1) 59 names(birth)<-tolower(names(birth)) 60 birth$momnamelast<-tolower(birth$momnamelast) 61 birth$momnamefirst<-tolower(birth$momnamefirst) 62 birth$momnamemaidenlast<-tolower(birth$momnamemaidenlast) 63 birth$momssn<-ifelse(birth$momssn %in% c('000000000','11111111','222222222','333333333', 64 '444444444','555555555','666666666','777777777','888888888','999999999') ,NA,birth$momssn) 65 birth$momssn<-ifelse(birth$momssn > " ",birth$momssn,NA) 66 67 gg<-birth[(substr(birth$momssn,1,1)==substr(birth$momssn,2,2)) && (substr(birth$momssn,1,1)==substr(birth$momssn,3,3)),'momssn'] 68 # 69 # 70 # 71 # 72 # 73 # 74 birth$momnamelast<-sub("-","",birth$momnamelast) 75 birth$momnamelast<-gsub("'","",birth$momnamelast) 76 birth$momnamemaidenlast<-sub("-"," ",birth$momnamemaidenlast) 77 birth$momnamemaidenlast<-gsub("'","",birth$momnamelast) 78 birth$momnamefirst<-gsub("'","",birth$momnamefirst) 79 birth$momnamefirst<-gsub("-"," ",birth$momnamefirst) 80 81 mihow$momnamelast<-sub("-"," ",mihow$momnamelast) 82 mihow$momnamelast<-tolower(mihow$momnamelast) 83 mihow$momnamefirst<-tolower(mihow$momnamefirst) 84 85 birth$mommailingzip<-substr(birth$mommailingzip,1,5) 86 birth$birthwtgrams<-ifelse(birth$birthwtgrams==9999,NA,birth$birthwtgrams) 87 birth$gestationweeksestimated<-as.numeric(birth$gestationweeksestimated) 88 birth$gestationweeksestimated<-ifelse( birth$gestationweeksestimated==99,NA,birth$gestationweeksestimated) 89 90 birth$gestationweeksgenerated<-as.numeric(birth$gestationweeksgenerated) 91 birth$gestationweeksgenerated<-ifelse(birth$gestationweeksgenerated > 50 || birth$gestationweeksgenerated == 0 ,NA,birth$gestationweeksgenerated) 92 93 birth$apgar5minute<-as.numeric(birth$apgar5minute) 94 birth$apgar5minute<-ifelse( birth$apgar5minute==99,NA,birth$apgar5minute) 95 96 birth$hospital_id<-as.numeric(birth$hospital_id) 97 birth$hospital_id<-ifelse( birth$hospital_id==0,NA,birth$hospital_id) 98 99 birth$livebirthstotal<-as.numeric(birth$livebirthstotal) 100 birth$livebirthstotal<-ifelse( birth$livebirthstotal==99,NA,birth$livebirthstotal) 101 102 birth$momhispanicorigin<-as.numeric(birth$momhispanicorigin) 103 birth$momhispanicorigin<-ifelse( birth$momhispanicorigin==9,NA,birth$momhispanicorigin) 104 105 birth$prenatalcarebeganpregnancy<-as.numeric(birth$prenatalcarebeganpregnancy) 106 birth$prenatalcarebeganpregnancy<-ifelse( birth$prenatalcarebeganpregnancy==99,NA,birth$prenatalcarebeganpregnancy) 107 108 birth$uid<-row.names(birth) 109 birth$childdobyear<-substr(birth$childdob,1,4) 110 birth$childdobmonth<-subtr(birth$childdob,6,7) 111 birth$childdobday<-substr(birth$childdob,9,10) 112 birth$momdobyear<-substr(birth$momdob,1,4) 113 birth$momdobmonth<-substr(birth$momdob,6,7) 114 birth$momdobday<-substr(birth$momdob,9,10) 115 116 save(birth,file='../Data/birth.RData') 117 # 118 #save update birth records in mihow db 119 # 120 gg<-dbWriteTable(con,"birth",birth,row.names=FALSE,overwrite=TRUE) 121 gg<-dbDisconnect(coSn) 122 rm(gg)
/20.BirthPrepLN.r
no_license
IRRDDv45/Scripts
R
false
false
6,882
r
1 # 20.BirthPrep.r 2 # Birth forms were revised in 2004. Some variables were added some deleted or renames 3 # source("20.BirthPrep.r",echo=TRUE) 4 # 5 rm(list=ls()) 6 7 library(knitr) 8 library(gdata) 9 library(rms) 10 library(gdata) 11 library(RMySQL) 12 library(Hmisc) 13 load("../Data/mihow.RData") 14 15 #> names(mihow) 16 # [1] "clientid" "momnamelast" 17 # [3] "momnamegiven" "momage" 18 # [5] "dropreason" "pregnant" 19 # [7] "jobstatus" "language" 20 # [9] "mihow_participant" "primaryadultpovertylevel" 21 #[11] "momwhite" "mommailingzip" 22 #[13] "programyear" "childdob_date" 23 #[15] "due_date" "momdob_date" 24 #[17] "drop_date" "mommarried" 25 #[19] "datayr" "momeducationcode" 26 #[20] "mid" 27 28 29 con<-dbConnect(MySQL(),user="root",password='emma!',dbname='mihow') 30 sql = "select momnamefirst, momnamelast,datayr, childdob, mommailingzip, " 31 sql = paste( sql, " birthwtgrams, gestationweeksestimated ,gestationweeksgenerated",sep=" ") 32 sql = paste( sql," ,APGAR5minute ,deathind ,NoCongenitalAnomaliesInd ,NoAbnormalconditionsInd",sep=" ") 33 sql = paste( sql," ,NoComplicationsInd ,hospital_id ,NoMedicalRiskFactorsInd ",sep=" ") 34 sql = paste( sql," ,Deliveryvaginal,livebirthstotal ,momhispanicorigin ,NoObstetricProcedureInd",sep=" ") 35 sql = paste( sql," ,ChildIDNum ,momssn ,prenatalcarebeganpregnancy ,previouspretermbirth",sep=" ") 36 sql = paste( sql," ,antibioticsmom ,ICUAdmission ,NICU ,ChildTransferred",sep=" ") 37 sql = paste( sql," ,DeathCertNum,paternitySigned,NoInfectionsInd,momwhite,momage,momeducationcode",sep=" ") 38 sql = paste( sql," ,momdob,mommarried,id,MomNameMaidenLast",sep=" ") 39 sql = paste( sql, " from birth.birth_all",sep=" ") 40 sql = paste( sql, " where datayr in (2007,2008,2009,2010) and momrescounty = 'shelby'",sep=' ') 41 # 42 # create hospital_id from BirthPlaceCode 43 # 44 sql2 = "union select momnamefirst, momnamelast,datayr, childdob, mommailingzip, " 45 sql2 = paste( sql2, " birthwtgrams, gestationweeksestimated ,gestationweeksgenerated",sep=" ") 46 sql2 = paste( sql2," ,APGAR5minute ,deathind ,NoCongenitalAnomaliesInd ,NoAbnormalconditionsInd",sep=" ") 47 sql2 = paste( sql2," ,NoComplicationsInd ,BirthPlaceCode hospital_id,NoMedicalRiskFactorsInd ",sep=" ") 48 sql2 = paste( sql2," ,DeliveryVaginal,livebirthstotal ,momhispanicorigin ,NoObstetricProcedureInd",sep=" ") 49 sql2 = paste( sql2," ,ChildIDNum ,momssn ,prenatalcarebeganpregnancy ,previouspretermbirth",sep=" ") 50 sql2 = paste( sql2," ,antibioticsmom ,ICUAdmission ,NICU ,ChildTransferred",sep=" ") 51 sql2 = paste( sql2," ,DeathCertNum,paternitySigned,NoInfectionsInd,momwhite,momage,momeducationcode",sep=" ") 52 sql2 = paste( sql2," ,momdob,mommarried,id,MomNameMaidenLast",sep=" ") 53 sql2 = paste( sql2, " from birth_tdh.birth_2010",sep=" ") 54 sql2 = paste( sql2, " where datayr in (2007,2008,2009,2010) and momrescounty = 'shelby'",sep=' ') 55 56 sql <- paste( sql, sql2,sep=" ") 57 res<-dbSendQuery(con,sql) 58 birth<-fetch(res,n= -1) 59 names(birth)<-tolower(names(birth)) 60 birth$momnamelast<-tolower(birth$momnamelast) 61 birth$momnamefirst<-tolower(birth$momnamefirst) 62 birth$momnamemaidenlast<-tolower(birth$momnamemaidenlast) 63 birth$momssn<-ifelse(birth$momssn %in% c('000000000','11111111','222222222','333333333', 64 '444444444','555555555','666666666','777777777','888888888','999999999') ,NA,birth$momssn) 65 birth$momssn<-ifelse(birth$momssn > " ",birth$momssn,NA) 66 67 gg<-birth[(substr(birth$momssn,1,1)==substr(birth$momssn,2,2)) && (substr(birth$momssn,1,1)==substr(birth$momssn,3,3)),'momssn'] 68 # 69 # 70 # 71 # 72 # 73 # 74 birth$momnamelast<-sub("-","",birth$momnamelast) 75 birth$momnamelast<-gsub("'","",birth$momnamelast) 76 birth$momnamemaidenlast<-sub("-"," ",birth$momnamemaidenlast) 77 birth$momnamemaidenlast<-gsub("'","",birth$momnamelast) 78 birth$momnamefirst<-gsub("'","",birth$momnamefirst) 79 birth$momnamefirst<-gsub("-"," ",birth$momnamefirst) 80 81 mihow$momnamelast<-sub("-"," ",mihow$momnamelast) 82 mihow$momnamelast<-tolower(mihow$momnamelast) 83 mihow$momnamefirst<-tolower(mihow$momnamefirst) 84 85 birth$mommailingzip<-substr(birth$mommailingzip,1,5) 86 birth$birthwtgrams<-ifelse(birth$birthwtgrams==9999,NA,birth$birthwtgrams) 87 birth$gestationweeksestimated<-as.numeric(birth$gestationweeksestimated) 88 birth$gestationweeksestimated<-ifelse( birth$gestationweeksestimated==99,NA,birth$gestationweeksestimated) 89 90 birth$gestationweeksgenerated<-as.numeric(birth$gestationweeksgenerated) 91 birth$gestationweeksgenerated<-ifelse(birth$gestationweeksgenerated > 50 || birth$gestationweeksgenerated == 0 ,NA,birth$gestationweeksgenerated) 92 93 birth$apgar5minute<-as.numeric(birth$apgar5minute) 94 birth$apgar5minute<-ifelse( birth$apgar5minute==99,NA,birth$apgar5minute) 95 96 birth$hospital_id<-as.numeric(birth$hospital_id) 97 birth$hospital_id<-ifelse( birth$hospital_id==0,NA,birth$hospital_id) 98 99 birth$livebirthstotal<-as.numeric(birth$livebirthstotal) 100 birth$livebirthstotal<-ifelse( birth$livebirthstotal==99,NA,birth$livebirthstotal) 101 102 birth$momhispanicorigin<-as.numeric(birth$momhispanicorigin) 103 birth$momhispanicorigin<-ifelse( birth$momhispanicorigin==9,NA,birth$momhispanicorigin) 104 105 birth$prenatalcarebeganpregnancy<-as.numeric(birth$prenatalcarebeganpregnancy) 106 birth$prenatalcarebeganpregnancy<-ifelse( birth$prenatalcarebeganpregnancy==99,NA,birth$prenatalcarebeganpregnancy) 107 108 birth$uid<-row.names(birth) 109 birth$childdobyear<-substr(birth$childdob,1,4) 110 birth$childdobmonth<-subtr(birth$childdob,6,7) 111 birth$childdobday<-substr(birth$childdob,9,10) 112 birth$momdobyear<-substr(birth$momdob,1,4) 113 birth$momdobmonth<-substr(birth$momdob,6,7) 114 birth$momdobday<-substr(birth$momdob,9,10) 115 116 save(birth,file='../Data/birth.RData') 117 # 118 #save update birth records in mihow db 119 # 120 gg<-dbWriteTable(con,"birth",birth,row.names=FALSE,overwrite=TRUE) 121 gg<-dbDisconnect(coSn) 122 rm(gg)
#' Return period function of Generalized Logistic distribution #' #' @param x quantile/s #' @param para parameters as c(location, scale, shape) #' #' @return Return Period/s corresponding to quantile/s. #' @author Mohanad Zaghloul [aut, cre], Simon Michael Papalexiou [aut, ths], Amin Elshorbagy [aut, ths] #' @export #' @importFrom lmom cdfglo #' #' @examples #' #' RP <- tglo(x = 0.1, para = c(10, 0.1, 0.2)) #' tglo <- function(x , para = c(10, 1.5, 1)){ u <- cdfglo(x , para) RP <- 1/(1 - u) return(RP) }
/R/tglo.R
no_license
cran/LMoFit
R
false
false
536
r
#' Return period function of Generalized Logistic distribution #' #' @param x quantile/s #' @param para parameters as c(location, scale, shape) #' #' @return Return Period/s corresponding to quantile/s. #' @author Mohanad Zaghloul [aut, cre], Simon Michael Papalexiou [aut, ths], Amin Elshorbagy [aut, ths] #' @export #' @importFrom lmom cdfglo #' #' @examples #' #' RP <- tglo(x = 0.1, para = c(10, 0.1, 0.2)) #' tglo <- function(x , para = c(10, 1.5, 1)){ u <- cdfglo(x , para) RP <- 1/(1 - u) return(RP) }
for (job in job_v) { for (val in year_v) { Cloud_engineer <-df_final_matches %>% filter(str_detect(Allwyn,job)) %>% arrange(Year) %>% filter(str_detect(Year,val)) Plot_gg[[val]][[job]] <- ggplot(Cloud_engineer%>%select(H_MEAN), aes(x=H_MEAN,fill=..count..)) + geom_histogram() + scale_fill_gradient("Legend",low = "green", high = "blue")+ labs(title = glue('{job} {val}')) + ylab("Counts") + xlab("States") print( Plot_gg[[val]][[job]]) } } my_bar_plot<- function(y,x){ Cloud_engineer <-df_final_matches %>% filter(str_detect(Allwyn,job)) %>% arrange(Year) %>% filter(str_detect(Year,val)) Plot_gg[[y]][[x]] <- ggplot(Cloud_engineer%>%select(PRIM_STATE), aes(x=PRIM_STATE, color=PRIM_STATE)) + stat_count(geom="bar") + coord_flip() + labs(title = glue('{y} {x}')) + ylab("Counts") + xlab("States") print( Plot_gg[[y]][[x]]) } test <-my_bar_plot("IT Project Manager I","2010") map(job_v,year_v,my_bar_plot) map2(job_v,year_v,my_bar_plot) for (job in job_v) { for (val in year_v) { Cloud_engineer <-df_final_matches %>% filter(str_detect(Allwyn,job)) %>% arrange(Year) %>% filter(str_detect(Year,val)) Plot_gg[[val]][[job]] <- ggplot(Cloud_engineer%>%select(PRIM_STATE), aes(x=PRIM_STATE, color=PRIM_STATE)) + stat_count(geom="bar") + coord_flip() + labs(title = glue('{job} {val}')) + ylab("Counts") + xlab("States") print( Plot_gg[[val]][[job]]) } }
/Scripts/Data_Exploration/Loop_Through_All_Job_Year_Histogram_Plot.R
no_license
laborlab/Labor_Lab_Project
R
false
false
1,595
r
for (job in job_v) { for (val in year_v) { Cloud_engineer <-df_final_matches %>% filter(str_detect(Allwyn,job)) %>% arrange(Year) %>% filter(str_detect(Year,val)) Plot_gg[[val]][[job]] <- ggplot(Cloud_engineer%>%select(H_MEAN), aes(x=H_MEAN,fill=..count..)) + geom_histogram() + scale_fill_gradient("Legend",low = "green", high = "blue")+ labs(title = glue('{job} {val}')) + ylab("Counts") + xlab("States") print( Plot_gg[[val]][[job]]) } } my_bar_plot<- function(y,x){ Cloud_engineer <-df_final_matches %>% filter(str_detect(Allwyn,job)) %>% arrange(Year) %>% filter(str_detect(Year,val)) Plot_gg[[y]][[x]] <- ggplot(Cloud_engineer%>%select(PRIM_STATE), aes(x=PRIM_STATE, color=PRIM_STATE)) + stat_count(geom="bar") + coord_flip() + labs(title = glue('{y} {x}')) + ylab("Counts") + xlab("States") print( Plot_gg[[y]][[x]]) } test <-my_bar_plot("IT Project Manager I","2010") map(job_v,year_v,my_bar_plot) map2(job_v,year_v,my_bar_plot) for (job in job_v) { for (val in year_v) { Cloud_engineer <-df_final_matches %>% filter(str_detect(Allwyn,job)) %>% arrange(Year) %>% filter(str_detect(Year,val)) Plot_gg[[val]][[job]] <- ggplot(Cloud_engineer%>%select(PRIM_STATE), aes(x=PRIM_STATE, color=PRIM_STATE)) + stat_count(geom="bar") + coord_flip() + labs(title = glue('{job} {val}')) + ylab("Counts") + xlab("States") print( Plot_gg[[val]][[job]]) } }
#*[-------------------------------------------------------------------]*# #*[ This function has been edited. See edit below (line 42) ]*# #*[ Source: https://github.com/stefvanbuuren/mice ]*# #*[-------------------------------------------------------------------]*# mice.impute.2l.norm2 <- function(y, ry, x, type, intercept = TRUE, ...) { rwishart <- function(df, p = nrow(SqrtSigma), SqrtSigma = diag(p)) { ## rwishart, written by Bill Venables Z <- matrix(0, p, p) diag(Z) <- sqrt(rchisq(p, df:(df - p + 1))) if (p > 1) { pseq <- 1:(p - 1) Z[rep(p * pseq, pseq) + unlist(lapply(pseq, seq))] <- rnorm(p * (p - 1)/2) } crossprod(Z %*% SqrtSigma) } force.chol <- function(x, warn = TRUE) { z <- 0 repeat { lambda <- 0.1 * z XT <- x + diag(x = lambda, nrow = nrow(x)) XT <- (XT + t(XT))/2 s <- try(expr = chol(XT), silent = TRUE) if (class(s) != "try-error") break z <- z + 1 } attr(s, "forced") <- (z > 0) if (warn && z > 0) warning("Cholesky decomposition had to be forced", call. = FALSE) return(s) } ## added SvB 21jul2016 symridge <- function(x, ridge = 0.0001, ...) { x <- as.matrix((x + t(x))/2) x + diag(as.matrix(diag(x) * ridge)) # <----- THIS IS THE EDIT } ## written by Roel de Jong ## append intercept if (intercept) { x <- cbind(1, as.matrix(x)) type <- c(2, type) } ## Initialize n.iter <- 100 nry <- !ry n.class <- length(unique(x[, type == (-2)])) if (n.class == 0) stop("No class variable") ## SvB 27apr2013 gf.full <- factor(x[, type == (-2)], labels = 1:n.class) gf <- gf.full[ry] XG <- split.data.frame(as.matrix(x[ry, type == 2]), gf) X.SS <- lapply(XG, crossprod) yg <- split(as.vector(y[ry]), gf) n.g <- tabulate(gf) n.rc <- ncol(XG[[1]]) bees <- matrix(0, nrow = n.class, ncol = n.rc) ss <- vector(mode = "numeric", length = n.class) mu <- rep(0, n.rc) inv.psi <- diag(1, n.rc, n.rc) inv.sigma2 <- rep(1, n.class) sigma2.0 <- 1 theta <- 1 ## Execute Gibbs sampler for (iter in 1:n.iter) { ## Draw bees for (class in 1:n.class) { vv <- symridge(inv.sigma2[class] * X.SS[[class]] + inv.psi, ...) bees.var <- chol2inv(chol(vv)) bees[class, ] <- drop(bees.var %*% (crossprod(inv.sigma2[class] * XG[[class]], yg[[class]]) + inv.psi %*% mu)) + drop(rnorm(n = n.rc) %*% chol(symridge(bees.var, ...))) ss[class] <- crossprod(yg[[class]] - XG[[class]] %*% bees[class, ]) } ## Draw mu mu <- colMeans(bees) + drop(rnorm(n = n.rc) %*% chol(chol2inv(chol(symridge(inv.psi, ...)))/n.class)) ## Draw psi inv.psi <- rwishart(df = n.class - n.rc - 1, SqrtSigma = chol(chol2inv(chol(symridge(crossprod(t(t(bees) - mu)), ...))))) ## Draw sigma2 inv.sigma2 <- rgamma(n.class, n.g/2 + 1/(2 * theta), scale = 2 * theta/(ss * theta + sigma2.0)) ## Draw sigma2.0 H <- 1/mean(inv.sigma2) # Harmonic mean sigma2.0 <- rgamma(1, n.class/(2 * theta) + 1, scale = 2 * theta * H/n.class) ## Draw theta G <- exp(mean(log(1/inv.sigma2))) # Geometric mean theta <- 1/rgamma(1, n.class/2 - 1, scale = 2/(n.class * (sigma2.0/H - log(sigma2.0) + log(G) - 1))) } ## Generate imputations imps <- rnorm(n = sum(nry), sd = sqrt(1/inv.sigma2[gf.full[nry]])) + rowSums(as.matrix(x[nry, type == 2, drop = FALSE]) * bees[gf.full[nry], ]) return(imps) } environment(mice.impute.2l.norm2) <- asNamespace('mice')
/Results_Using_MICE/mice.impute.2l.norm2.R
no_license
mariapaquin/persiann-cdr-eval
R
false
false
3,779
r
#*[-------------------------------------------------------------------]*# #*[ This function has been edited. See edit below (line 42) ]*# #*[ Source: https://github.com/stefvanbuuren/mice ]*# #*[-------------------------------------------------------------------]*# mice.impute.2l.norm2 <- function(y, ry, x, type, intercept = TRUE, ...) { rwishart <- function(df, p = nrow(SqrtSigma), SqrtSigma = diag(p)) { ## rwishart, written by Bill Venables Z <- matrix(0, p, p) diag(Z) <- sqrt(rchisq(p, df:(df - p + 1))) if (p > 1) { pseq <- 1:(p - 1) Z[rep(p * pseq, pseq) + unlist(lapply(pseq, seq))] <- rnorm(p * (p - 1)/2) } crossprod(Z %*% SqrtSigma) } force.chol <- function(x, warn = TRUE) { z <- 0 repeat { lambda <- 0.1 * z XT <- x + diag(x = lambda, nrow = nrow(x)) XT <- (XT + t(XT))/2 s <- try(expr = chol(XT), silent = TRUE) if (class(s) != "try-error") break z <- z + 1 } attr(s, "forced") <- (z > 0) if (warn && z > 0) warning("Cholesky decomposition had to be forced", call. = FALSE) return(s) } ## added SvB 21jul2016 symridge <- function(x, ridge = 0.0001, ...) { x <- as.matrix((x + t(x))/2) x + diag(as.matrix(diag(x) * ridge)) # <----- THIS IS THE EDIT } ## written by Roel de Jong ## append intercept if (intercept) { x <- cbind(1, as.matrix(x)) type <- c(2, type) } ## Initialize n.iter <- 100 nry <- !ry n.class <- length(unique(x[, type == (-2)])) if (n.class == 0) stop("No class variable") ## SvB 27apr2013 gf.full <- factor(x[, type == (-2)], labels = 1:n.class) gf <- gf.full[ry] XG <- split.data.frame(as.matrix(x[ry, type == 2]), gf) X.SS <- lapply(XG, crossprod) yg <- split(as.vector(y[ry]), gf) n.g <- tabulate(gf) n.rc <- ncol(XG[[1]]) bees <- matrix(0, nrow = n.class, ncol = n.rc) ss <- vector(mode = "numeric", length = n.class) mu <- rep(0, n.rc) inv.psi <- diag(1, n.rc, n.rc) inv.sigma2 <- rep(1, n.class) sigma2.0 <- 1 theta <- 1 ## Execute Gibbs sampler for (iter in 1:n.iter) { ## Draw bees for (class in 1:n.class) { vv <- symridge(inv.sigma2[class] * X.SS[[class]] + inv.psi, ...) bees.var <- chol2inv(chol(vv)) bees[class, ] <- drop(bees.var %*% (crossprod(inv.sigma2[class] * XG[[class]], yg[[class]]) + inv.psi %*% mu)) + drop(rnorm(n = n.rc) %*% chol(symridge(bees.var, ...))) ss[class] <- crossprod(yg[[class]] - XG[[class]] %*% bees[class, ]) } ## Draw mu mu <- colMeans(bees) + drop(rnorm(n = n.rc) %*% chol(chol2inv(chol(symridge(inv.psi, ...)))/n.class)) ## Draw psi inv.psi <- rwishart(df = n.class - n.rc - 1, SqrtSigma = chol(chol2inv(chol(symridge(crossprod(t(t(bees) - mu)), ...))))) ## Draw sigma2 inv.sigma2 <- rgamma(n.class, n.g/2 + 1/(2 * theta), scale = 2 * theta/(ss * theta + sigma2.0)) ## Draw sigma2.0 H <- 1/mean(inv.sigma2) # Harmonic mean sigma2.0 <- rgamma(1, n.class/(2 * theta) + 1, scale = 2 * theta * H/n.class) ## Draw theta G <- exp(mean(log(1/inv.sigma2))) # Geometric mean theta <- 1/rgamma(1, n.class/2 - 1, scale = 2/(n.class * (sigma2.0/H - log(sigma2.0) + log(G) - 1))) } ## Generate imputations imps <- rnorm(n = sum(nry), sd = sqrt(1/inv.sigma2[gf.full[nry]])) + rowSums(as.matrix(x[nry, type == 2, drop = FALSE]) * bees[gf.full[nry], ]) return(imps) } environment(mice.impute.2l.norm2) <- asNamespace('mice')
# knb 20180302 # https://github.com/traffordDataLab/assets/tree/master/theme/ggplot2 theme_lab <- function () { theme_grey(base_size = 11.5, base_family = "Roboto") %+replace% theme( # add padding to the plot plot.margin = unit(rep(0.5, 4), "cm"), # remove the plot background and border plot.background = element_blank(), panel.background = element_blank(), panel.border = element_blank(), # make the legend and strip background transparent legend.background = element_rect(fill = "transparent", colour = NA), legend.key = element_rect(fill = "transparent", colour = NA), strip.background = element_rect(fill = "transparent", colour = NA), # add light, dotted major grid lines only panel.grid.major = element_line(linetype = "dotted", colour = "#757575", size = 0.3), panel.grid.minor = element_blank(), # remove the axis tick marks and hide axis lines axis.ticks = element_blank(), axis.line = element_line(color = "#FFFFFF", size = 0.3), # modify the bottom margins of the title and subtitle plot.title = element_text(size = 18, colour = "#757575", hjust = 0, margin = margin(b = 4)), plot.subtitle = element_text(size = 12, colour = "#757575", hjust = 0, margin = margin(b = 10)), # add padding to the caption plot.caption = element_text(size = 10, colour = "#212121", hjust = 1, margin = margin(t = 15)), # change to Open Sans for axes titles, tick labels, legend title and legend key, and strip text axis.title = element_text(family = "Open Sans", size = 11, colour = "#757575", face = "plain", hjust = 1), axis.text = element_text(family = "Open Sans", size = 10, colour = "#757575", face = "plain"), legend.title = element_text(size = 12, colour = "#757575"), legend.text = element_text(size = 10, colour = "#757575"), strip.text = element_text(family = "Open Sans", size = 12, colour = "#757575", face = "plain") ) }
/ggplot2/theme_lab.R
no_license
knbknb/R_utils
R
false
false
2,000
r
# knb 20180302 # https://github.com/traffordDataLab/assets/tree/master/theme/ggplot2 theme_lab <- function () { theme_grey(base_size = 11.5, base_family = "Roboto") %+replace% theme( # add padding to the plot plot.margin = unit(rep(0.5, 4), "cm"), # remove the plot background and border plot.background = element_blank(), panel.background = element_blank(), panel.border = element_blank(), # make the legend and strip background transparent legend.background = element_rect(fill = "transparent", colour = NA), legend.key = element_rect(fill = "transparent", colour = NA), strip.background = element_rect(fill = "transparent", colour = NA), # add light, dotted major grid lines only panel.grid.major = element_line(linetype = "dotted", colour = "#757575", size = 0.3), panel.grid.minor = element_blank(), # remove the axis tick marks and hide axis lines axis.ticks = element_blank(), axis.line = element_line(color = "#FFFFFF", size = 0.3), # modify the bottom margins of the title and subtitle plot.title = element_text(size = 18, colour = "#757575", hjust = 0, margin = margin(b = 4)), plot.subtitle = element_text(size = 12, colour = "#757575", hjust = 0, margin = margin(b = 10)), # add padding to the caption plot.caption = element_text(size = 10, colour = "#212121", hjust = 1, margin = margin(t = 15)), # change to Open Sans for axes titles, tick labels, legend title and legend key, and strip text axis.title = element_text(family = "Open Sans", size = 11, colour = "#757575", face = "plain", hjust = 1), axis.text = element_text(family = "Open Sans", size = 10, colour = "#757575", face = "plain"), legend.title = element_text(size = 12, colour = "#757575"), legend.text = element_text(size = 10, colour = "#757575"), strip.text = element_text(family = "Open Sans", size = 12, colour = "#757575", face = "plain") ) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllGenerics.R, R/Deletion-class.R \name{indexProper} \alias{indexProper} \alias{indexProper<-} \alias{indexImproper<-} \alias{indexImproper} \alias{indexProper<-,StructuralVariant,list-method} \alias{indexProper,StructuralVariant,list-method} \alias{indexImproper<-,StructuralVariant,list-method} \alias{indexImproper,StructuralVariant,list-method} \alias{indexProper,StructuralVariant-method} \alias{indexImproper,StructuralVariant-method} \title{Accessor of an index for the proper read pairs} \usage{ indexProper(object) indexProper(object) <- value indexImproper(object) <- value indexImproper(object) \S4method{indexProper}{StructuralVariant,list}(object) <- value \S4method{indexImproper}{StructuralVariant,list}(object) <- value \S4method{indexProper}{StructuralVariant}(object) \S4method{indexProper}{StructuralVariant,list}(object) <- value \S4method{indexImproper}{StructuralVariant}(object) \S4method{indexImproper}{StructuralVariant,list}(object) <- value } \arguments{ \item{object}{a \code{StructuralVariant}} \item{value}{a list} } \value{ a numeric vector } \description{ Accessor of an index for the proper read pairs } \keyword{internal}
/man/indexing-methods.Rd
no_license
cancer-genomics/trellis
R
false
true
1,244
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllGenerics.R, R/Deletion-class.R \name{indexProper} \alias{indexProper} \alias{indexProper<-} \alias{indexImproper<-} \alias{indexImproper} \alias{indexProper<-,StructuralVariant,list-method} \alias{indexProper,StructuralVariant,list-method} \alias{indexImproper<-,StructuralVariant,list-method} \alias{indexImproper,StructuralVariant,list-method} \alias{indexProper,StructuralVariant-method} \alias{indexImproper,StructuralVariant-method} \title{Accessor of an index for the proper read pairs} \usage{ indexProper(object) indexProper(object) <- value indexImproper(object) <- value indexImproper(object) \S4method{indexProper}{StructuralVariant,list}(object) <- value \S4method{indexImproper}{StructuralVariant,list}(object) <- value \S4method{indexProper}{StructuralVariant}(object) \S4method{indexProper}{StructuralVariant,list}(object) <- value \S4method{indexImproper}{StructuralVariant}(object) \S4method{indexImproper}{StructuralVariant,list}(object) <- value } \arguments{ \item{object}{a \code{StructuralVariant}} \item{value}{a list} } \value{ a numeric vector } \description{ Accessor of an index for the proper read pairs } \keyword{internal}
#' Takes a numeric vector and maps it into a finite length sequence #' @param x A numeric or integer vector. #' @param nlevels Integer scalar. Length of the sequence to be map onto. #' @param as_factor Logical scalar. When \code{TRUE} the resulting vector is factor. #' @return A vector of length \code{length(x)} with values mapped to a sequence #' with \code{nlevels} unique valuess #' @export #' @examples #' #' x <- rnorm(100) #' w <- data.frame(as.integer(round_to_seq(x, as_factor = TRUE)),x) #' plot(w,x) #' #' @seealso Used in \code{\link{diffmap}} and \code{\link{plot_diffnet2}} round_to_seq <- function(x, nlevels=20, as_factor=FALSE) { y <- range(x, na.rm = TRUE, finite=TRUE) y <- seq(y[1], y[2], length.out = nlevels) y <- sapply(x, function(z) { if (is.na(z)) return(NA) y[which.min(abs(y-z))] }) # factor(c(1,3), levels = 1:3, labels = letters[1:3]) if (as_factor) as.factor(y) else y } #' Another way of visualizing diffusion #' @templateVar toa TRUE #' @templateVar slice TRUE #' @template graph_template #' @template plotting_template #' @param pers Integer vector of length \eqn{T} indicating the time periods of the data. #' @param color.ramp A function as returned by \code{\link[grDevices:colorRamp]{colorRamp}}. #' @param layout Passed to \code{\link[igraph:plot.igraph]{plot.igraph}}. #' @param key.width Numeric scalar. Sets the proportion of the plot (x-axis) that the key uses. #' @param key.args List. Further arguments to be passed to \code{\link{drawColorKey}}. #' @param main Character scalar. Title of the graph. #' @param add.map Character scalar. When \code{"first"} plots a \code{\link{diffusionMap}} before the #' graph itself. If \code{"last"} then it adds it at the end. When \code{NULL} adds nothing. #' @param diffmap.args List. If \code{add.map=TRUE}, arguments passed to \code{diffusionMap}. #' @param diffmap.alpha Numeric scalar between [0,1]. Alpha level for the map. #' @param include.white Character scalar. Includes white in the color palette used in the map. #' When \code{include.white=NULL} then it won't include it. #' @param ... Further arguments passed to \code{\link[igraph:plot.igraph]{plot.igraph}}. #' @param no.graph Logical scala. When \code{TRUE} the graph is not drawn. This only makes #' sense when the option \code{add.map} is active. #' @details If \code{key.width<=0} then no key is created. #' #' By defult, the function passes the following values to \code{plot.igraph}: #' #' \itemize{ #' \item{\code{vertex.label} equals to \code{""}} #' \item{\code{vertex.frame.color} equals to \code{"white"}} #' \item{\code{add} equals to \code{TRUE}} #' \item{\code{rescale} equals to \code{FALSE}} #' \item{\code{vertex.size} equals to \code{rescale.fun(vertex.size)}} #' } #' #' @return A list with the following elements #' \item{layout}{A numeric matrix with vertex coordinates.} #' \item{vertex.color}{A character vector with computed colors for each vertex.} #' \item{vertex.label}{The value passed to \code{plot_diffnet2}.} #' \item{vertex.shape}{A character vector with assigned shapes.} #' \item{vertex.size}{A numeric vector with vertices sizes} #' \item{diffmap}{If \code{add.map=TRUE}, the returned values from \code{\link{diffmap}}} #' @export #' @family visualizations #' @author George G. Vega Yon plot_diffnet2 <- function(graph, ...) UseMethod("plot_diffnet2") #' @rdname plot_diffnet2 #' @export #' @include diffnet-methods.r data.r plot_diffnet2.diffnet <- function( graph, toa, slice = nslices(graph), ... ) { if (missing(toa)) toa <- graph$toa plot_diffnet2.default( graph = graph$graph[[slice]], toa = toa, pers = graph$meta$pers, ...) } #' @rdname plot_diffnet2 #' @export plot_diffnet2.default <- function( graph, toa, pers = min(toa, na.rm = TRUE):max(toa, na.rm = TRUE), color.ramp = grDevices::colorRamp(c("steelblue","gray", "tomato")), layout = NULL, key.width = 0.1, key.args = list(), main = "Diffusion dynamics", add.map = NULL, diffmap.args = list(kde2d.args=list(n=100)), diffmap.alpha = .5, include.white = "first", vertex.size = "degree", minmax.relative.size = getOption("diffnet.minmax.relative.size", c(0.01, 0.04)), no.graph = FALSE, ...) { # Modifying some arguments oldpar <- graphics::par(no.readonly = TRUE) on.exit(graphics::par(oldpar)) par(xpd = NA) # Collecting arguments igraph.args <- list(...) # Some constants nper <- length(pers) if (length(add.map) && !(add.map %in% c("first", "last"))) stop("When -add.map- is specified it should be either \'first\' or \'last\'.") if (!length(add.map) & no.graph) stop("If -no.graph=TRUE- then you should specify some value for -add.map-.") # Taggin types --------------------------------------------------------------- # 1st adopters type_1st <- toa == pers[nper] # Non Adopters type_non <- is.na(toa) # Adopters type_adopt <- which(!type_1st & !type_non) type_1st <- which(type_1st) type_non <- which(type_non) # Colors t01 <- pers t01 <- c(t01[1], t01[nper]) col <- color.ramp( (toa - t01[1])/(t01[2] - t01[1]) ) # Adding alpha if (ncol(col) < 4) col <- cbind(col, 255) col[type_non,] <- 255 col <- rgb(col[,1], col[,2], col[,3], col[,4], maxColorValue = 255) # Shapes if (!no.graph && !length(igraph.args$vertex.shape)) { igraph.args$vertex.shape <- rep("circle", nnodes(graph)) igraph.args$vertex.shape[type_non] <- "square" } # Adjmat must have dimnames to make sure sorting in igraph is fine add_dimnames.mat(graph) # Computing positions g <- igraph::graph_from_adjacency_matrix(graph, weighted = TRUE) igraph.args$layout <- if (!length(layout)) igraph::layout_nicely(g) else if (inherits(layout, "function")) layout(g) else layout # Keywidth key.width <- max(0, key.width) graphics::plot.new() graphics::plot.window(xlim=c(-1,1 + 5*key.width), ylim=c(-1,1)) graphics::title(main=main) # If adding map! ------------------------------------------------------------- if (length(add.map)) { dm <- do.call(diffusionMap.default, c(diffmap.args, list(graph=graph, x=toa, layout = igraph.args$layout))) # Levels dmlvls <- pretty(range(dm$map$z), diffmap.args$kde2d.args$n) # Colors, in this case we need to extrapolate nper and add white. dmcol <- grDevices::rgb(color.ramp(seq(0,1, length.out = nper*2)), maxColorValue = 255) # Do we need to include white in the map? if (length(include.white)) if (include.white=="first") dmcol <- c("white", dmcol) else if (include.white=="last") dmcol <- c(dmcol, "white") else stop('-include.white- should be either NULL, "first" or "last".') # Palette dmcol <- grDevices::adjustcolor(grDevices::colorRampPalette(dmcol)(length(dmlvls)), alpha.f=diffmap.alpha) # Plot if (add.map=="first") graphics::.filled.contour(dm$map$x, dm$map$y, dm$map$z, levels = dmlvls, col=dmcol) } else dm <- NULL # Plotting graph ------------------------------------------------------------- # Setting up parameters set_igraph_plotting_defaults("igraph.args") igraph.args$vertex.size <- rescale_vertex_igraph( compute_vertex_size(g, vertex.size), minmax.relative.size = minmax.relative.size ) igraph.args$vertex.color <- col # Calling igraph if (!no.graph) do.call( what = igraph::plot.igraph, args = c(list(g),igraph.args) ) if (length(add.map) && (add.map=="last")) graphics::.filled.contour(dm$map$x, dm$map$y, dm$map$z, levels = dmlvls, col=dmcol) # # Plotting boxes ------------------------------------------------------------- if (key.width > 0) { # Adjusting the color color.palette <- color.ramp(c(0,.5,1)) if (ncol(color.palette) < 4) color.palette <- cbind(color.palette, 255) color.palette <- grDevices::rgb( color.palette[,1], color.palette[,2], color.palette[,3], color.palette[,4], maxColorValue = 255) color.palette <- grDevices::colorRampPalette(color.palette, TRUE) # Filling missings if (!length(key.args$main)) key.args$main <- "Time of Adoption" if (!length(key.args$na.col)) key.args$na.col <- "transparent" if (!length(key.args$na.lab)) key.args$na.lab <- "Non-adopters" if (!length(key.args$border)) key.args$border <- "transparent" if (!length(key.args$tick.marks)) { toaran <- range(toa, na.rm=TRUE) key.args$tick.marks <- unique(floor(seq(toaran[1], toaran[2], length.out = 5))) } do.call( what = drawColorKey, args = c( list(toa, key.pos = c(1-key.width, 0.975, 0.05, 0.95), nlevels = 100, color.palette = color.palette(100)), key.args ) ) } invisible(list( layout = igraph.args$layout, vertex.color = col, vertex.size = igraph.args$vertex.size, vertex.shape = igraph.args$vertex.shape, diffmap = dm) ) } #' Creates a heatmap based on a graph layout and a vertex attribute #' #' Using bi-dimensional kernel smoothers, creates a heatmap based on a graph layout #' and colored accordingly to \code{x}. This visualization technique is intended #' to be used with large graphs. #' #' @param graph A square matrix of size \eqn{n\times n}{n * n}. #' @param slice Integer scalar. Slice of the network to be used as baseline for drawing the graph. #' @param x An vector of length \eqn{n}. Usually a \code{toa} vector. #' @param layout Either a \eqn{n\times 2}{n *2} matrix of coordinates or a layout #' function applied to \code{graph} (must return coordinates). #' @param jitter.args A list including arguments to be passed to \code{\link{jitter}}. #' @param kde2d.args A list including arguments to be passed to \code{\link[MASS:kde2d]{kde2d}}. #' @param sharp.criter A function choose whether to apply a weighted mean for each cell, #' or randomize over the values present in that cell (see details). #' @param ... Arguments passed to method. #' @details #' The image is created using the function \code{kde2d} from #' the \pkg{MASS} package. The complete algorithm follows: #' \enumerate{ #' \item \code{x} is coerced into integer and the range is adjusted to start from 1. #' \code{NA} are replaced by zero. #' \item If no \code{layout} is passed, layout is computed using #' \code{\link[igraph:layout_nicely]{layout_nicely}} from \pkg{igraph} #' \item Then, a \code{kde2d} map is computed for each level of \code{x}. The #' resulting matrices are added up as a weighted sum. This only holds if #' at the cell level the function \code{sharp.criter} returns \code{FALSE}. #' \item The jitter function is applied to the repeated coordinates. #' \item 2D kernel is computed using \code{kde2d} over the coordinates. #' } #' #' The function \code{sharp.criter} must take two values, a vector of levels and a #' vector of weights. It must return a logical scalar with value equal to \code{TRUE} #' when a randomization at the cell level must be done, in which case the final #' value of the cell is chosen using \code{sample(x, 1, prob=w)}. #' #' The resulting matrix can be passed to \code{\link{image}} or similar. #' #' The argument \code{x.adj} uses by default the function \code{\link{round_to_seq}} #' which basically maps \code{x} to a fix length sequence of numbers such that #' \code{x.adj(x)} resembles an integer sequence. #' #' @return A list of class \code{diffnet_diffmap} #' \item{coords}{A matrix of size \eqn{n\times 2}{n*2} of vertices coordinates.} #' \item{map}{Output from \code{kde2d}. This is a list with 3 elements, vectors #' \code{x}, \code{y} and matrix \code{z} of size \eqn{n\times n}{n*n} (passed #' via \code{kde2d.args}).} #' \item{h}{Bandwidth passed to \code{kde2d}.} #' @export #' @family visualizations #' @references Vega Yon, George G., and Valente, Thomas W., Visualizing Large Annotated #' Networks as Heatmaps using Weighted Averages based on Kernel Smoothers (Working paper). #' @author George G. Vega Yon #' @examples #' #' # Example with a random graph -------------------------------------------------- #' #' \dontrun{ #' set.seed(1231) #' #' # Random scale-free diffusion network #' x <- rdiffnet(1000, 4, seed.graph="scale-free", seed.p.adopt = .025, #' rewire = FALSE, seed.nodes = "central", #' rgraph.arg=list(self=FALSE, m=4), #' threshold.dist = function(id) runif(1,.2,.4)) #' #' # Diffusion map (no random toa) #' dm0 <- diffusionMap(x, kde2d.args=list(n=150, h=.5), layout=igraph::layout_with_fr) #' #' # Random #' diffnet.toa(x) <- sample(x$toa, size = nnodes(x)) #' #' # Diffusion map (random toa) #' dm1 <- diffusionMap(x, layout = dm0$coords, kde2d.args=list(n=150, h=.5)) #' #' oldpar <- par(no.readonly = TRUE) #' col <- colorRampPalette(blues9)(100) #' par(mfrow=c(1,2), oma=c(1,0,0,0)) #' image(dm0, col=col, main="Non-random Times of Adoption\nAdoption from the core.") #' image(dm1, col=col, main="Random Times of Adoption") #' par(mfrow=c(1,1)) #' mtext("Both networks have the same distribution on times of adoption", 1, #' outer = TRUE) #' par(oldpar) #' } #' #' # Example with Brazilian Farmers -------------------------------------------- #' \dontrun{ #' dn <- brfarmersDiffNet #' #' # Setting last TOA as NA #' diffnet.toa(dn)[dn$toa == max(dn$toa)] <- #' NA #' #' # Coordinates #' coords <- sna::gplot.layout.fruchtermanreingold( #' as.matrix(dn$graph[[1]]), layout.par=NULL #' ) #' #' # Plotting diffusion #' plot_diffnet2(dn, layout=coords, vertex.size = 300) #' #' # Adding diffusion map #' out <- diffusionMap(dn, layout=coords, kde2d.args=list(n=100, h=50)) #' col <- adjustcolor(colorRampPalette(c("white","lightblue", "yellow", "red"))(100),.5) #' with(out$map, .filled.contour(x,y,z,pretty(range(z), 100),col)) #' } #' diffusionMap <- function(graph, ...) UseMethod("diffusionMap") #' @export #' @rdname diffusionMap diffmap <- diffusionMap #' Computes weighted variance #' @param x A numeric vector of length \eqn{n}. #' @param w A numeric vector of length \eqn{n}. #' @details \code{weighted_variance} implements weighted variance computation #' in the following form: #' \deqn{% #' \frac{\sum_i w_i'(x_i - \bar x)^2}{(1-n)} #' }{% #' sum[w(i)'(x(i) - w.mean(x))^2/(1-n)] #' } #' #' where \eqn{w_i'=w_i/\sum_i w_i}{w(i)' = w(i)/sum(w)}, and #' \eqn{\bar x = \sum_i w_i'x_i}{w.mean(x)=sum[w(i)'*x(i)]}. #' @return Numeric scalar with the weighted variance. #' @export #' @seealso This function is used in \code{\link{diffmap}}. weighted_var <- function(x,w) { n <- length(x) w <- w/sum(w, na.rm=TRUE)*n m <- sum(x*w/sum(w, na.rm=TRUE), na.rm=TRUE) sum((x - m)^2*w/(n-1+1e-15), na.rm=TRUE) } #' @export #' @rdname weighted_var wvar <- weighted_var #' @export #' @param x.adj Function to adjust \code{x}. If not \code{NULL} then it is applied #' to \code{x} at the beginning (see details). #' @rdname diffusionMap diffusionMap.default <- function( graph, x, x.adj=round_to_seq, layout=NULL, jitter.args = list(), kde2d.args = list(n=100), sharp.criter=function(x, w) { wvar(x,w) > (max(x, na.rm=TRUE) - min(x, na.rm=TRUE))^2/12 },...) { # Step 0) Preparing the data if (length(x.adj)) { if (!is.function(x.adj)) stop('-x.adj- must be a function') x <- x.adj(x) } # Adjmat must have dimnames to make sure sorting in igraph is fine if (!length(unlist(dimnames(graph), recursive = TRUE))) dimnames(graph) <- list(1:nnodes(graph), 1:nnodes(graph)) # Computing positions g <- igraph::graph_from_adjacency_matrix(graph, weighted = TRUE) coords <- if (is.function(layout)) layout(g) else if (!length(layout)) igraph::layout_nicely(g) else if (is.matrix(layout)) layout # Step 1) Compute densities per level if (!length(kde2d.args$h)) kde2d.args$h <- c(MASS::bandwidth.nrd(coords[,1]), MASS::bandwidth.nrd(coords[,2])) # Mapping limits lims <- c(range(coords[,1]), range(coords[,2])) lvls <- unique(x) nlvls <- length(unique(x)) Maps <- with(kde2d.args, list(z=array(0, dim=c(n,n,nlvls) ))) Maps$W <- Maps$z for (i in 1:nlvls) { # Skip if NA if (is.na(lvls[i])) next # Subset and map dat <- coords[which(x==lvls[i]),,drop=FALSE] map <- do.call(MASS::kde2d, c(kde2d.args, list( x = dat[,1], y=dat[,2], lims=lims))) # Adding up (for weighted average) Maps$W[,,i] <- map$z Maps$z[,,i] <- map$z*lvls[i] } # Processing each level Map <- with(kde2d.args, list(z=matrix(0, ncol=n, nrow=n))) Map$W <- Map$z for (i in 1:kde2d.args$n) for (j in 1:kde2d.args$n) { # Computing variance at that level if (sharp.criter(lvls,Maps$W[i,j,]) || sum(Maps$W[i,j,]) < 1e-30 ) { Map$z[i,j] <- sum(Maps$z[i,j,])/(sum(Maps$W[i,j,]) + 1e-15) } else { Map$z[i,j] <- sample(lvls, 1, prob=Maps$W[i,j,]) } } # Normalizing # Map$z <- Map$z/(Map$W + 1e-15) Map$x <- seq(lims[1], lims[2], length.out = kde2d.args$n) Map$y <- seq(lims[3], lims[4], length.out = kde2d.args$n) structure(list( coords = coords, map = with(Map, list(x=x,y=y,z=z)), h = kde2d.args$h, used_x = x ), class="diffnet_diffmap") } #' @rdname diffusionMap #' @export diffusionMap.diffnet <- function(graph, slice=nslices(graph), ...) { with(graph, diffusionMap.default(graph[[slice]], toa, ...)) } #' @rdname diffusionMap #' @export image.diffnet_diffmap <- function(x, ...) { graphics::image(x$map,...) } #' @rdname diffusionMap #' @export print.diffnet_diffmap <- function(x, ...) { cat("An object of class -diffnet_map-\n") cat(utils::str(x)) cat("Use methods -plot- and -image-.") } #' @rdname diffusionMap #' @param y Ignored. #' @export plot.diffnet_diffmap <- function(x, y=NULL, ...) { image.diffnet_diffmap(x, ...) }
/R/plot_diffnet2.r
no_license
manlius/netdiffuseR
R
false
false
18,035
r
#' Takes a numeric vector and maps it into a finite length sequence #' @param x A numeric or integer vector. #' @param nlevels Integer scalar. Length of the sequence to be map onto. #' @param as_factor Logical scalar. When \code{TRUE} the resulting vector is factor. #' @return A vector of length \code{length(x)} with values mapped to a sequence #' with \code{nlevels} unique valuess #' @export #' @examples #' #' x <- rnorm(100) #' w <- data.frame(as.integer(round_to_seq(x, as_factor = TRUE)),x) #' plot(w,x) #' #' @seealso Used in \code{\link{diffmap}} and \code{\link{plot_diffnet2}} round_to_seq <- function(x, nlevels=20, as_factor=FALSE) { y <- range(x, na.rm = TRUE, finite=TRUE) y <- seq(y[1], y[2], length.out = nlevels) y <- sapply(x, function(z) { if (is.na(z)) return(NA) y[which.min(abs(y-z))] }) # factor(c(1,3), levels = 1:3, labels = letters[1:3]) if (as_factor) as.factor(y) else y } #' Another way of visualizing diffusion #' @templateVar toa TRUE #' @templateVar slice TRUE #' @template graph_template #' @template plotting_template #' @param pers Integer vector of length \eqn{T} indicating the time periods of the data. #' @param color.ramp A function as returned by \code{\link[grDevices:colorRamp]{colorRamp}}. #' @param layout Passed to \code{\link[igraph:plot.igraph]{plot.igraph}}. #' @param key.width Numeric scalar. Sets the proportion of the plot (x-axis) that the key uses. #' @param key.args List. Further arguments to be passed to \code{\link{drawColorKey}}. #' @param main Character scalar. Title of the graph. #' @param add.map Character scalar. When \code{"first"} plots a \code{\link{diffusionMap}} before the #' graph itself. If \code{"last"} then it adds it at the end. When \code{NULL} adds nothing. #' @param diffmap.args List. If \code{add.map=TRUE}, arguments passed to \code{diffusionMap}. #' @param diffmap.alpha Numeric scalar between [0,1]. Alpha level for the map. #' @param include.white Character scalar. Includes white in the color palette used in the map. #' When \code{include.white=NULL} then it won't include it. #' @param ... Further arguments passed to \code{\link[igraph:plot.igraph]{plot.igraph}}. #' @param no.graph Logical scala. When \code{TRUE} the graph is not drawn. This only makes #' sense when the option \code{add.map} is active. #' @details If \code{key.width<=0} then no key is created. #' #' By defult, the function passes the following values to \code{plot.igraph}: #' #' \itemize{ #' \item{\code{vertex.label} equals to \code{""}} #' \item{\code{vertex.frame.color} equals to \code{"white"}} #' \item{\code{add} equals to \code{TRUE}} #' \item{\code{rescale} equals to \code{FALSE}} #' \item{\code{vertex.size} equals to \code{rescale.fun(vertex.size)}} #' } #' #' @return A list with the following elements #' \item{layout}{A numeric matrix with vertex coordinates.} #' \item{vertex.color}{A character vector with computed colors for each vertex.} #' \item{vertex.label}{The value passed to \code{plot_diffnet2}.} #' \item{vertex.shape}{A character vector with assigned shapes.} #' \item{vertex.size}{A numeric vector with vertices sizes} #' \item{diffmap}{If \code{add.map=TRUE}, the returned values from \code{\link{diffmap}}} #' @export #' @family visualizations #' @author George G. Vega Yon plot_diffnet2 <- function(graph, ...) UseMethod("plot_diffnet2") #' @rdname plot_diffnet2 #' @export #' @include diffnet-methods.r data.r plot_diffnet2.diffnet <- function( graph, toa, slice = nslices(graph), ... ) { if (missing(toa)) toa <- graph$toa plot_diffnet2.default( graph = graph$graph[[slice]], toa = toa, pers = graph$meta$pers, ...) } #' @rdname plot_diffnet2 #' @export plot_diffnet2.default <- function( graph, toa, pers = min(toa, na.rm = TRUE):max(toa, na.rm = TRUE), color.ramp = grDevices::colorRamp(c("steelblue","gray", "tomato")), layout = NULL, key.width = 0.1, key.args = list(), main = "Diffusion dynamics", add.map = NULL, diffmap.args = list(kde2d.args=list(n=100)), diffmap.alpha = .5, include.white = "first", vertex.size = "degree", minmax.relative.size = getOption("diffnet.minmax.relative.size", c(0.01, 0.04)), no.graph = FALSE, ...) { # Modifying some arguments oldpar <- graphics::par(no.readonly = TRUE) on.exit(graphics::par(oldpar)) par(xpd = NA) # Collecting arguments igraph.args <- list(...) # Some constants nper <- length(pers) if (length(add.map) && !(add.map %in% c("first", "last"))) stop("When -add.map- is specified it should be either \'first\' or \'last\'.") if (!length(add.map) & no.graph) stop("If -no.graph=TRUE- then you should specify some value for -add.map-.") # Taggin types --------------------------------------------------------------- # 1st adopters type_1st <- toa == pers[nper] # Non Adopters type_non <- is.na(toa) # Adopters type_adopt <- which(!type_1st & !type_non) type_1st <- which(type_1st) type_non <- which(type_non) # Colors t01 <- pers t01 <- c(t01[1], t01[nper]) col <- color.ramp( (toa - t01[1])/(t01[2] - t01[1]) ) # Adding alpha if (ncol(col) < 4) col <- cbind(col, 255) col[type_non,] <- 255 col <- rgb(col[,1], col[,2], col[,3], col[,4], maxColorValue = 255) # Shapes if (!no.graph && !length(igraph.args$vertex.shape)) { igraph.args$vertex.shape <- rep("circle", nnodes(graph)) igraph.args$vertex.shape[type_non] <- "square" } # Adjmat must have dimnames to make sure sorting in igraph is fine add_dimnames.mat(graph) # Computing positions g <- igraph::graph_from_adjacency_matrix(graph, weighted = TRUE) igraph.args$layout <- if (!length(layout)) igraph::layout_nicely(g) else if (inherits(layout, "function")) layout(g) else layout # Keywidth key.width <- max(0, key.width) graphics::plot.new() graphics::plot.window(xlim=c(-1,1 + 5*key.width), ylim=c(-1,1)) graphics::title(main=main) # If adding map! ------------------------------------------------------------- if (length(add.map)) { dm <- do.call(diffusionMap.default, c(diffmap.args, list(graph=graph, x=toa, layout = igraph.args$layout))) # Levels dmlvls <- pretty(range(dm$map$z), diffmap.args$kde2d.args$n) # Colors, in this case we need to extrapolate nper and add white. dmcol <- grDevices::rgb(color.ramp(seq(0,1, length.out = nper*2)), maxColorValue = 255) # Do we need to include white in the map? if (length(include.white)) if (include.white=="first") dmcol <- c("white", dmcol) else if (include.white=="last") dmcol <- c(dmcol, "white") else stop('-include.white- should be either NULL, "first" or "last".') # Palette dmcol <- grDevices::adjustcolor(grDevices::colorRampPalette(dmcol)(length(dmlvls)), alpha.f=diffmap.alpha) # Plot if (add.map=="first") graphics::.filled.contour(dm$map$x, dm$map$y, dm$map$z, levels = dmlvls, col=dmcol) } else dm <- NULL # Plotting graph ------------------------------------------------------------- # Setting up parameters set_igraph_plotting_defaults("igraph.args") igraph.args$vertex.size <- rescale_vertex_igraph( compute_vertex_size(g, vertex.size), minmax.relative.size = minmax.relative.size ) igraph.args$vertex.color <- col # Calling igraph if (!no.graph) do.call( what = igraph::plot.igraph, args = c(list(g),igraph.args) ) if (length(add.map) && (add.map=="last")) graphics::.filled.contour(dm$map$x, dm$map$y, dm$map$z, levels = dmlvls, col=dmcol) # # Plotting boxes ------------------------------------------------------------- if (key.width > 0) { # Adjusting the color color.palette <- color.ramp(c(0,.5,1)) if (ncol(color.palette) < 4) color.palette <- cbind(color.palette, 255) color.palette <- grDevices::rgb( color.palette[,1], color.palette[,2], color.palette[,3], color.palette[,4], maxColorValue = 255) color.palette <- grDevices::colorRampPalette(color.palette, TRUE) # Filling missings if (!length(key.args$main)) key.args$main <- "Time of Adoption" if (!length(key.args$na.col)) key.args$na.col <- "transparent" if (!length(key.args$na.lab)) key.args$na.lab <- "Non-adopters" if (!length(key.args$border)) key.args$border <- "transparent" if (!length(key.args$tick.marks)) { toaran <- range(toa, na.rm=TRUE) key.args$tick.marks <- unique(floor(seq(toaran[1], toaran[2], length.out = 5))) } do.call( what = drawColorKey, args = c( list(toa, key.pos = c(1-key.width, 0.975, 0.05, 0.95), nlevels = 100, color.palette = color.palette(100)), key.args ) ) } invisible(list( layout = igraph.args$layout, vertex.color = col, vertex.size = igraph.args$vertex.size, vertex.shape = igraph.args$vertex.shape, diffmap = dm) ) } #' Creates a heatmap based on a graph layout and a vertex attribute #' #' Using bi-dimensional kernel smoothers, creates a heatmap based on a graph layout #' and colored accordingly to \code{x}. This visualization technique is intended #' to be used with large graphs. #' #' @param graph A square matrix of size \eqn{n\times n}{n * n}. #' @param slice Integer scalar. Slice of the network to be used as baseline for drawing the graph. #' @param x An vector of length \eqn{n}. Usually a \code{toa} vector. #' @param layout Either a \eqn{n\times 2}{n *2} matrix of coordinates or a layout #' function applied to \code{graph} (must return coordinates). #' @param jitter.args A list including arguments to be passed to \code{\link{jitter}}. #' @param kde2d.args A list including arguments to be passed to \code{\link[MASS:kde2d]{kde2d}}. #' @param sharp.criter A function choose whether to apply a weighted mean for each cell, #' or randomize over the values present in that cell (see details). #' @param ... Arguments passed to method. #' @details #' The image is created using the function \code{kde2d} from #' the \pkg{MASS} package. The complete algorithm follows: #' \enumerate{ #' \item \code{x} is coerced into integer and the range is adjusted to start from 1. #' \code{NA} are replaced by zero. #' \item If no \code{layout} is passed, layout is computed using #' \code{\link[igraph:layout_nicely]{layout_nicely}} from \pkg{igraph} #' \item Then, a \code{kde2d} map is computed for each level of \code{x}. The #' resulting matrices are added up as a weighted sum. This only holds if #' at the cell level the function \code{sharp.criter} returns \code{FALSE}. #' \item The jitter function is applied to the repeated coordinates. #' \item 2D kernel is computed using \code{kde2d} over the coordinates. #' } #' #' The function \code{sharp.criter} must take two values, a vector of levels and a #' vector of weights. It must return a logical scalar with value equal to \code{TRUE} #' when a randomization at the cell level must be done, in which case the final #' value of the cell is chosen using \code{sample(x, 1, prob=w)}. #' #' The resulting matrix can be passed to \code{\link{image}} or similar. #' #' The argument \code{x.adj} uses by default the function \code{\link{round_to_seq}} #' which basically maps \code{x} to a fix length sequence of numbers such that #' \code{x.adj(x)} resembles an integer sequence. #' #' @return A list of class \code{diffnet_diffmap} #' \item{coords}{A matrix of size \eqn{n\times 2}{n*2} of vertices coordinates.} #' \item{map}{Output from \code{kde2d}. This is a list with 3 elements, vectors #' \code{x}, \code{y} and matrix \code{z} of size \eqn{n\times n}{n*n} (passed #' via \code{kde2d.args}).} #' \item{h}{Bandwidth passed to \code{kde2d}.} #' @export #' @family visualizations #' @references Vega Yon, George G., and Valente, Thomas W., Visualizing Large Annotated #' Networks as Heatmaps using Weighted Averages based on Kernel Smoothers (Working paper). #' @author George G. Vega Yon #' @examples #' #' # Example with a random graph -------------------------------------------------- #' #' \dontrun{ #' set.seed(1231) #' #' # Random scale-free diffusion network #' x <- rdiffnet(1000, 4, seed.graph="scale-free", seed.p.adopt = .025, #' rewire = FALSE, seed.nodes = "central", #' rgraph.arg=list(self=FALSE, m=4), #' threshold.dist = function(id) runif(1,.2,.4)) #' #' # Diffusion map (no random toa) #' dm0 <- diffusionMap(x, kde2d.args=list(n=150, h=.5), layout=igraph::layout_with_fr) #' #' # Random #' diffnet.toa(x) <- sample(x$toa, size = nnodes(x)) #' #' # Diffusion map (random toa) #' dm1 <- diffusionMap(x, layout = dm0$coords, kde2d.args=list(n=150, h=.5)) #' #' oldpar <- par(no.readonly = TRUE) #' col <- colorRampPalette(blues9)(100) #' par(mfrow=c(1,2), oma=c(1,0,0,0)) #' image(dm0, col=col, main="Non-random Times of Adoption\nAdoption from the core.") #' image(dm1, col=col, main="Random Times of Adoption") #' par(mfrow=c(1,1)) #' mtext("Both networks have the same distribution on times of adoption", 1, #' outer = TRUE) #' par(oldpar) #' } #' #' # Example with Brazilian Farmers -------------------------------------------- #' \dontrun{ #' dn <- brfarmersDiffNet #' #' # Setting last TOA as NA #' diffnet.toa(dn)[dn$toa == max(dn$toa)] <- #' NA #' #' # Coordinates #' coords <- sna::gplot.layout.fruchtermanreingold( #' as.matrix(dn$graph[[1]]), layout.par=NULL #' ) #' #' # Plotting diffusion #' plot_diffnet2(dn, layout=coords, vertex.size = 300) #' #' # Adding diffusion map #' out <- diffusionMap(dn, layout=coords, kde2d.args=list(n=100, h=50)) #' col <- adjustcolor(colorRampPalette(c("white","lightblue", "yellow", "red"))(100),.5) #' with(out$map, .filled.contour(x,y,z,pretty(range(z), 100),col)) #' } #' diffusionMap <- function(graph, ...) UseMethod("diffusionMap") #' @export #' @rdname diffusionMap diffmap <- diffusionMap #' Computes weighted variance #' @param x A numeric vector of length \eqn{n}. #' @param w A numeric vector of length \eqn{n}. #' @details \code{weighted_variance} implements weighted variance computation #' in the following form: #' \deqn{% #' \frac{\sum_i w_i'(x_i - \bar x)^2}{(1-n)} #' }{% #' sum[w(i)'(x(i) - w.mean(x))^2/(1-n)] #' } #' #' where \eqn{w_i'=w_i/\sum_i w_i}{w(i)' = w(i)/sum(w)}, and #' \eqn{\bar x = \sum_i w_i'x_i}{w.mean(x)=sum[w(i)'*x(i)]}. #' @return Numeric scalar with the weighted variance. #' @export #' @seealso This function is used in \code{\link{diffmap}}. weighted_var <- function(x,w) { n <- length(x) w <- w/sum(w, na.rm=TRUE)*n m <- sum(x*w/sum(w, na.rm=TRUE), na.rm=TRUE) sum((x - m)^2*w/(n-1+1e-15), na.rm=TRUE) } #' @export #' @rdname weighted_var wvar <- weighted_var #' @export #' @param x.adj Function to adjust \code{x}. If not \code{NULL} then it is applied #' to \code{x} at the beginning (see details). #' @rdname diffusionMap diffusionMap.default <- function( graph, x, x.adj=round_to_seq, layout=NULL, jitter.args = list(), kde2d.args = list(n=100), sharp.criter=function(x, w) { wvar(x,w) > (max(x, na.rm=TRUE) - min(x, na.rm=TRUE))^2/12 },...) { # Step 0) Preparing the data if (length(x.adj)) { if (!is.function(x.adj)) stop('-x.adj- must be a function') x <- x.adj(x) } # Adjmat must have dimnames to make sure sorting in igraph is fine if (!length(unlist(dimnames(graph), recursive = TRUE))) dimnames(graph) <- list(1:nnodes(graph), 1:nnodes(graph)) # Computing positions g <- igraph::graph_from_adjacency_matrix(graph, weighted = TRUE) coords <- if (is.function(layout)) layout(g) else if (!length(layout)) igraph::layout_nicely(g) else if (is.matrix(layout)) layout # Step 1) Compute densities per level if (!length(kde2d.args$h)) kde2d.args$h <- c(MASS::bandwidth.nrd(coords[,1]), MASS::bandwidth.nrd(coords[,2])) # Mapping limits lims <- c(range(coords[,1]), range(coords[,2])) lvls <- unique(x) nlvls <- length(unique(x)) Maps <- with(kde2d.args, list(z=array(0, dim=c(n,n,nlvls) ))) Maps$W <- Maps$z for (i in 1:nlvls) { # Skip if NA if (is.na(lvls[i])) next # Subset and map dat <- coords[which(x==lvls[i]),,drop=FALSE] map <- do.call(MASS::kde2d, c(kde2d.args, list( x = dat[,1], y=dat[,2], lims=lims))) # Adding up (for weighted average) Maps$W[,,i] <- map$z Maps$z[,,i] <- map$z*lvls[i] } # Processing each level Map <- with(kde2d.args, list(z=matrix(0, ncol=n, nrow=n))) Map$W <- Map$z for (i in 1:kde2d.args$n) for (j in 1:kde2d.args$n) { # Computing variance at that level if (sharp.criter(lvls,Maps$W[i,j,]) || sum(Maps$W[i,j,]) < 1e-30 ) { Map$z[i,j] <- sum(Maps$z[i,j,])/(sum(Maps$W[i,j,]) + 1e-15) } else { Map$z[i,j] <- sample(lvls, 1, prob=Maps$W[i,j,]) } } # Normalizing # Map$z <- Map$z/(Map$W + 1e-15) Map$x <- seq(lims[1], lims[2], length.out = kde2d.args$n) Map$y <- seq(lims[3], lims[4], length.out = kde2d.args$n) structure(list( coords = coords, map = with(Map, list(x=x,y=y,z=z)), h = kde2d.args$h, used_x = x ), class="diffnet_diffmap") } #' @rdname diffusionMap #' @export diffusionMap.diffnet <- function(graph, slice=nslices(graph), ...) { with(graph, diffusionMap.default(graph[[slice]], toa, ...)) } #' @rdname diffusionMap #' @export image.diffnet_diffmap <- function(x, ...) { graphics::image(x$map,...) } #' @rdname diffusionMap #' @export print.diffnet_diffmap <- function(x, ...) { cat("An object of class -diffnet_map-\n") cat(utils::str(x)) cat("Use methods -plot- and -image-.") } #' @rdname diffusionMap #' @param y Ignored. #' @export plot.diffnet_diffmap <- function(x, y=NULL, ...) { image.diffnet_diffmap(x, ...) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datasets.R \docType{data} \name{Selection} \alias{Selection} \title{Selection of genes.} \description{ 20 (at most) genes with differential expression at t1, 20 (at most) genes with differential expression at t2, 20 (at most) genes with differential expression at t3, 20 (at most) genes with differential expression at t4 et 20 (at most) genes with global differential expression were selected. } \examples{ data(Selection) head(Selection) summary(Selection,3) } \keyword{datasets}
/man/Selection.Rd
no_license
cran/Cascade
R
false
true
562
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datasets.R \docType{data} \name{Selection} \alias{Selection} \title{Selection of genes.} \description{ 20 (at most) genes with differential expression at t1, 20 (at most) genes with differential expression at t2, 20 (at most) genes with differential expression at t3, 20 (at most) genes with differential expression at t4 et 20 (at most) genes with global differential expression were selected. } \examples{ data(Selection) head(Selection) summary(Selection,3) } \keyword{datasets}
#/*==========================================================================#*/ #' ## Figures 2A - genomic locations of tag clusters/promoters #' #/*==========================================================================#*/ # add gene annotation using ChIPseeker library(ChIPseeker) library(biomaRt) library(GenomicFeatures) txdb <- makeTxDbFromBiomart(biomart = "ensembl", dataset = "scerevisiae_gene_ensembl") # - rename chromosome names in txdb to match NCBI naming scheme - # seqlevels(txdb) <- c("NC_001133.9","NC_001134.8","NC_001135.5", "NC_001136.10", "NC_001137.3", "NC_001138.5", "NC_001139.9", "NC_001140.6", "NC_001141.2", "NC_001142.9", "NC_001143.9", "NC_001144.5", "NC_001145.3", "NC_001146.8", "NC_001147.6", "NC_001148.4", "NC_001224.1") seqlevels(txdb) txdb # select samples for annotation samples <- c("Y1ng_carrier", "Y5ng_carrier", "Y10ng_carrier1", "BY4741") tc_selected <- tc.grl[samples] # change names names <- c("SLIC 1 ng", "SLIC 5 ng", "SLIC 10 ng", "nAnTi 5 ug") names(tc_selected) <- names # use peak anno to annotate tag clusters peakAnno_list <- lapply(tc_selected, function(x) annotatePeak(x, TxDb = txdb, tssRegion = c(-500, 500), annoDb = "org.Sc.eg.db", sameStrand = TRUE, verbose = FALSE)) # set names names(peakAnno_list) <- names # plotting is limited within the package so I extract features in a list feats.l <- lapply(peakAnno_list, function(x) x@annoStat) names(feats.l) <- names # add names to as a column to each sample for (i in 1:length(names)) { feats.l[[i]]$sample <- rep(names[[i]], nrow(feats.l[[i]])) } # connect to 1 dataframe feats.df <- do.call("rbind", feats.l) # set levels for plotting (sample order and feature order) feats.df$sample <- factor(feats.df$sample, levels = names[4:1]) features <- c("Promoter", "1st Exon", "Other Exon", "1st Intron", "Downstream (<=3kb)", "Distal Intergenic") feats.df$Feature <- factor(feats.df$Feature, levels = features) # set colors library(RColorBrewer) col1 <- brewer.pal(5, "Greys")[3:5] col2 <- brewer.pal(8, "Blues")[8:6] col <- c(col2, col1) # new colour scheme col <- c("#F8F2AB", "#B4D6A4", "#67BC9A", "#13B0A5", "#0071A7", "#3E606F", "#88F9D4", "#18C29C", "#0B877D", "#126872", "#031727") # set feature factors feature_factors <- c("Promoter", "Promoter (<=1kb)", "Promoter (1-3kb)", "5' UTR", "1st Exon", "Other Exon", "1st Intron", "Other Intron", "3' UTR", "Downstream (<=3kb)", "Distal Intergenic") names(col) <- feature_factors col_sel <- col[names(col) %in% unique(feats.df$Feature)] # plot genomic features library(ggplot2) p <- ggplot(feats.df, aes(x = sample, y = Frequency, fill = Feature), alpha = 0.7) + geom_bar(stat = "identity", width = 0.75, col = "black", lwd = 0.125) + coord_flip() + scale_fill_manual("Features", values = col_sel) + theme_bw() + theme(text = element_text(size = 14, colour = "black"), legend.title = element_blank(), axis.title.x = element_text(colour = "black"), axis.title.y = element_text(colour = "black"), axis.text.x = element_text(colour = "black"), axis.text.y = element_text(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + labs(y = "Percentage", x = NULL) pdf(file = "final_figures/genomicFeatures_sc_sel.pdf", height = 3, width = 5) print(p) dev.off() #----- plot for all samples # add alphabet letters to get the proper plotting order names <- c("SLIC 1 ng", "SLIC 2 ng", "SLIC 5 ng", "SLIC 10 ng r1", "SLIC 10 ng r2", "SLIC 25 ng", "SLIC 50 ng", "SLIC 100 ng", "nAnTi PCR", "nAnTi 5 ug") # use peak anno to annotate tag clusters peakAnno_list <- lapply(tc.grl, function(x) annotatePeak(x, TxDb = txdb, tssRegion = c(-500, 500), annoDb = "org.Sc.eg.db", sameStrand = TRUE, verbose = FALSE)) # set names names(peakAnno_list) <- names # plotting is limited within the package so I extract features in a list feats.l <- lapply(peakAnno_list, function(x) x@annoStat) names(feats.l) <- names # add names to as a column to each sample for (i in 1:length(names)) { feats.l[[i]]$sample <- rep(names[[i]], nrow(feats.l[[i]])) } # connect to 1 dataframe feats.df <- do.call("rbind", feats.l) # set levels for plotting (sample order and feature order) feats.df$sample <- factor(feats.df$sample, levels = names[length(names):1]) features <- c("Promoter", "1st Exon", "Other Exon", "1st Intron", "Downstream (<=3kb)", "Distal Intergenic") feats.df$Feature <- factor(feats.df$Feature, levels = features) # set colors library(RColorBrewer) col1 <- brewer.pal(5, "Greys")[3:5] col2 <- brewer.pal(8, "Blues")[8:6] col <- c(col2, col1) # new colour scheme col <- c("#F8F2AB", "#B4D6A4", "#67BC9A", "#13B0A5", "#0071A7", "#3E606F", "#88F9D4", "#18C29C", "#0B877D", "#126872", "#031727") # set feature factors feature_factors <- c("Promoter", "Promoter (<=1kb)", "Promoter (1-3kb)", "5' UTR", "1st Exon", "Other Exon", "1st Intron", "Other Intron", "3' UTR", "Downstream (<=3kb)", "Distal Intergenic") names(col) <- feature_factors col_sel <- col[names(col) %in% unique(feats.df$Feature)] # plot genomic features library(ggplot2) p <- ggplot(feats.df, aes(x = sample, y = Frequency, fill = Feature), alpha = 0.7) + geom_bar(stat = "identity", width = 0.75, colour = "black", lwd = 0.125) + coord_flip() + scale_fill_manual("Features", values = col_sel) + theme_bw() + theme(text = element_text(size = 14, colour = "black"), legend.title = element_blank(), axis.title.x = element_text(colour = "black"), axis.title.y = element_text(colour = "black"), axis.text.x = element_text(colour = "black"), axis.text.y = element_text(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + labs(y = "Percentage", x = NULL) pdf(file = "final_figures/genomicFeatures_sc_all.pdf", height = 3, width = 5) print(p) dev.off()
/analysis/03_genomic_location_tc.R
no_license
ncvetesic/SLIC-CAGE
R
false
false
6,081
r
#/*==========================================================================#*/ #' ## Figures 2A - genomic locations of tag clusters/promoters #' #/*==========================================================================#*/ # add gene annotation using ChIPseeker library(ChIPseeker) library(biomaRt) library(GenomicFeatures) txdb <- makeTxDbFromBiomart(biomart = "ensembl", dataset = "scerevisiae_gene_ensembl") # - rename chromosome names in txdb to match NCBI naming scheme - # seqlevels(txdb) <- c("NC_001133.9","NC_001134.8","NC_001135.5", "NC_001136.10", "NC_001137.3", "NC_001138.5", "NC_001139.9", "NC_001140.6", "NC_001141.2", "NC_001142.9", "NC_001143.9", "NC_001144.5", "NC_001145.3", "NC_001146.8", "NC_001147.6", "NC_001148.4", "NC_001224.1") seqlevels(txdb) txdb # select samples for annotation samples <- c("Y1ng_carrier", "Y5ng_carrier", "Y10ng_carrier1", "BY4741") tc_selected <- tc.grl[samples] # change names names <- c("SLIC 1 ng", "SLIC 5 ng", "SLIC 10 ng", "nAnTi 5 ug") names(tc_selected) <- names # use peak anno to annotate tag clusters peakAnno_list <- lapply(tc_selected, function(x) annotatePeak(x, TxDb = txdb, tssRegion = c(-500, 500), annoDb = "org.Sc.eg.db", sameStrand = TRUE, verbose = FALSE)) # set names names(peakAnno_list) <- names # plotting is limited within the package so I extract features in a list feats.l <- lapply(peakAnno_list, function(x) x@annoStat) names(feats.l) <- names # add names to as a column to each sample for (i in 1:length(names)) { feats.l[[i]]$sample <- rep(names[[i]], nrow(feats.l[[i]])) } # connect to 1 dataframe feats.df <- do.call("rbind", feats.l) # set levels for plotting (sample order and feature order) feats.df$sample <- factor(feats.df$sample, levels = names[4:1]) features <- c("Promoter", "1st Exon", "Other Exon", "1st Intron", "Downstream (<=3kb)", "Distal Intergenic") feats.df$Feature <- factor(feats.df$Feature, levels = features) # set colors library(RColorBrewer) col1 <- brewer.pal(5, "Greys")[3:5] col2 <- brewer.pal(8, "Blues")[8:6] col <- c(col2, col1) # new colour scheme col <- c("#F8F2AB", "#B4D6A4", "#67BC9A", "#13B0A5", "#0071A7", "#3E606F", "#88F9D4", "#18C29C", "#0B877D", "#126872", "#031727") # set feature factors feature_factors <- c("Promoter", "Promoter (<=1kb)", "Promoter (1-3kb)", "5' UTR", "1st Exon", "Other Exon", "1st Intron", "Other Intron", "3' UTR", "Downstream (<=3kb)", "Distal Intergenic") names(col) <- feature_factors col_sel <- col[names(col) %in% unique(feats.df$Feature)] # plot genomic features library(ggplot2) p <- ggplot(feats.df, aes(x = sample, y = Frequency, fill = Feature), alpha = 0.7) + geom_bar(stat = "identity", width = 0.75, col = "black", lwd = 0.125) + coord_flip() + scale_fill_manual("Features", values = col_sel) + theme_bw() + theme(text = element_text(size = 14, colour = "black"), legend.title = element_blank(), axis.title.x = element_text(colour = "black"), axis.title.y = element_text(colour = "black"), axis.text.x = element_text(colour = "black"), axis.text.y = element_text(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + labs(y = "Percentage", x = NULL) pdf(file = "final_figures/genomicFeatures_sc_sel.pdf", height = 3, width = 5) print(p) dev.off() #----- plot for all samples # add alphabet letters to get the proper plotting order names <- c("SLIC 1 ng", "SLIC 2 ng", "SLIC 5 ng", "SLIC 10 ng r1", "SLIC 10 ng r2", "SLIC 25 ng", "SLIC 50 ng", "SLIC 100 ng", "nAnTi PCR", "nAnTi 5 ug") # use peak anno to annotate tag clusters peakAnno_list <- lapply(tc.grl, function(x) annotatePeak(x, TxDb = txdb, tssRegion = c(-500, 500), annoDb = "org.Sc.eg.db", sameStrand = TRUE, verbose = FALSE)) # set names names(peakAnno_list) <- names # plotting is limited within the package so I extract features in a list feats.l <- lapply(peakAnno_list, function(x) x@annoStat) names(feats.l) <- names # add names to as a column to each sample for (i in 1:length(names)) { feats.l[[i]]$sample <- rep(names[[i]], nrow(feats.l[[i]])) } # connect to 1 dataframe feats.df <- do.call("rbind", feats.l) # set levels for plotting (sample order and feature order) feats.df$sample <- factor(feats.df$sample, levels = names[length(names):1]) features <- c("Promoter", "1st Exon", "Other Exon", "1st Intron", "Downstream (<=3kb)", "Distal Intergenic") feats.df$Feature <- factor(feats.df$Feature, levels = features) # set colors library(RColorBrewer) col1 <- brewer.pal(5, "Greys")[3:5] col2 <- brewer.pal(8, "Blues")[8:6] col <- c(col2, col1) # new colour scheme col <- c("#F8F2AB", "#B4D6A4", "#67BC9A", "#13B0A5", "#0071A7", "#3E606F", "#88F9D4", "#18C29C", "#0B877D", "#126872", "#031727") # set feature factors feature_factors <- c("Promoter", "Promoter (<=1kb)", "Promoter (1-3kb)", "5' UTR", "1st Exon", "Other Exon", "1st Intron", "Other Intron", "3' UTR", "Downstream (<=3kb)", "Distal Intergenic") names(col) <- feature_factors col_sel <- col[names(col) %in% unique(feats.df$Feature)] # plot genomic features library(ggplot2) p <- ggplot(feats.df, aes(x = sample, y = Frequency, fill = Feature), alpha = 0.7) + geom_bar(stat = "identity", width = 0.75, colour = "black", lwd = 0.125) + coord_flip() + scale_fill_manual("Features", values = col_sel) + theme_bw() + theme(text = element_text(size = 14, colour = "black"), legend.title = element_blank(), axis.title.x = element_text(colour = "black"), axis.title.y = element_text(colour = "black"), axis.text.x = element_text(colour = "black"), axis.text.y = element_text(colour = "black"), panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + labs(y = "Percentage", x = NULL) pdf(file = "final_figures/genomicFeatures_sc_all.pdf", height = 3, width = 5) print(p) dev.off()
`aa0.a1a2.l` <- function (a,a0, tol=.Machine$double.eps^.5) ifelse(!(le(0,a) && le(a,a0) && le(a0,1)), NA, ifelse(eq(a,0)||eq(a,1)||eq(a,a0), a, uniroot(function(x) a0a1a2.a.l(a0,x,x) - a, lower=0, upper=a0, tol=tol)$root))
/R/aa0.a1a2.l.R
no_license
cran/adaptTest
R
false
false
258
r
`aa0.a1a2.l` <- function (a,a0, tol=.Machine$double.eps^.5) ifelse(!(le(0,a) && le(a,a0) && le(a0,1)), NA, ifelse(eq(a,0)||eq(a,1)||eq(a,a0), a, uniroot(function(x) a0a1a2.a.l(a0,x,x) - a, lower=0, upper=a0, tol=tol)$root))
#' pstest: Tests for the Propensity Score #' #' \emph{pstest} computes Kolmogorov-Smirnov and Cramer-von Mises type tests #' for the null hypothesis that a parametric model for the propensity score is #' is correctly specified. For details of the testing procedure, see #' Sant'Anna and Song (2019),'Specification Tests for the Propensity Score' <doi:10.1016/j.jeconom.2019.02.002>. #' #'@param d a vector containing the binary treatment indicator. #'@param pscore a vector containing the estimated propensity scores. #'@param xpscore a matrix (or data frame) containing the covariates (and their #' transformations) included in the propensity score #' estimation. It should also include the constant term. #'@param model a description of the functional form (link function) used #' to estimated propensity score. The alternatives are: #' 'logit' (default), 'probit', and het.probit #' @param pscore.model in case you you set model="het.probit", pscore.model is the entire hetglm object. #' Default for pscore.model is NULL. #'@param w a description of which weight function the projection is based on. #' The alternatives are 'ind' (default), which set \eqn{w(q,u)=1(q<=u)}, #' 'exp', which set \eqn{w(q,u)=exp(qu)}, 'logistic', which set #' \eqn{w(q,u)=1/[1+exp(1-qu)]}, 'sin', which set \eqn{w(q,u)=sin(qu)}, and #' 'sincos', which set \eqn{w(q,u)=sin(qu)+cos(qu)}. #'@param dist a description of which distribution to use during the bootstrap. #' The alternatives are 'Mammen' (default), and 'Rademacher'. #'@param nboot number of bootstrap replicates to perform. Default is 1,000. #'@param cores number of cores to use during the bootstrap. Default is 1. #' If cores is greater than 1, the bootstrap is conducted using #' parLapply, instead of lapply type call. #'@param chunk a value that determine the size of each 'tile'. Such argument is used #' to split the original data into chunks, saving memory. #' Default value is 1,000. If the \emph{pstest} function throw a #' memory error, you should choose a smaller value for \emph{chunk}. #' #' #'@return a list containing the Kolmogorov-Smirnov and Cramer-von Mises test #' statistics for the null hypothesis of correctly specified propensity #' score model (kstest and cvmtest, respectively), and their associate #' bootstrapped p-values, pvks and pvcvm, respectively. All inputs are also #' returned. #' #'@references #' Sant'Anna, Pedro H. C, and Song, Xiaojun (2019), \emph{Specification Tests for the Propensity Score}, #' Journal of Econometrics, vol. 210 (2), p. 379-404, <doi:10.1016/j.jeconom.2019.02.002>. #' #'@examples #' # Example based on simulation data #' # Simulate vector of covariates #' set.seed(1234) #' x1 <- runif(100) #' x2 <- rt(100, 5) #' x3 <- rpois(100, 3) #' # generate treatment status score based on Probit Specification #' treat <- (x1 + x2 + x3 >= rnorm(100, 4, 5)) #' # estimate correctly specified propensity score based on Probit #' pscore <- stats::glm(treat ~ x1 + x2 + x3, family = binomial(link = "probit"), #' x = TRUE) #' # Test the correct specification of estimated propensity score, using #' # the weight function 'ind', and bootstrap based on 'Mammen'. #' pstest(d = pscore$y, pscore = pscore$fit, xpscore = pscore$x, #' model = "probit", w = "ind", dist = "Mammen") #' # Alternatively, one can use the 'sin' weight function #' pstest(d = pscore$y, pscore = pscore$fit, xpscore = pscore$x, #' model = "probit", w = "sin", dist = "Mammen") #' #'@export #' #'@importFrom stats binomial rbinom runif glm #'@importFrom parallel makeCluster parLapply stopCluster nextRNGStream #'@importFrom glmx hetglm.fit #'@importFrom MASS ginv #------------------------------------------------------------------------------- pstest = function(d, pscore, xpscore, pscore.model = NULL, model = "logit", w = "ind", dist = "Mammen", nboot = 1000, cores = 1, chunk = 1000) { #----------------------------------------------------------------------------- # Define some underlying variables n <- length(d) xx <- as.matrix(xpscore) pscore.fit <- pscore uhat <- d - pscore.fit #----------------------------------------------------------------------------- # Run some tests if( !is.element(model,c("logit", "probit", "het.probit"))) { stop("model must be either 'logit', 'probit' or 'het.probit' ") } if( !is.element(dist,c("Mammen", "Rademacher"))) { stop("dist must be either 'Mammen', or 'Rademacher' ") } if( !is.element(w,c("ind", "exp", "logistic", "sin", "sincos"))) { stop("w must be either 'ind', 'exp', 'logistic', 'sin', or 'sincos' ") } #----------------------------------------------------------------------------- # #Define the score variables for the projection if (model == "logit") { g <- pscore.fit * (1 - pscore.fit) * xx } if (model == "probit") { beta.x <- stats::qnorm(pscore.fit) g <- stats::dnorm(beta.x) * xx rm(beta.x) } if (model == "het.probit") { if(is.null(pscore.model)){ stop(" You must provide the entire hetglm model if you are using het. probit") } if(!class(pscore.model)=="hetglm"){ stop(" pscore.model must be estimated using the hetglm function. See glmx package") } if(is.null(pscore.model$x$scale)){ stop(" You must include the option x=T in your glmx model") } pp <- pscore.model index.mean <- as.numeric(pp$x$mean %*% pp$coefficients$mean) index.scale <- as.numeric(pp$x$scale %*% (pp$coefficients$scale)) #beta.x <- stats::qnorm(pscore.fit) index <- index.mean * exp(-index.scale) g <- cbind(stats::dnorm(index) * exp(-index.scale) *pp$x$mean, - stats::dnorm(index)*index.mean*exp(-index.scale)*pp$x$scale) xx <- as.matrix(cbind(pp$x$mean,pp$x$scale)) #rm(pp,xx.scale,index.mean,index.scale,index ) } gg <- crossprod(g) #----------------------------------------------------------------------------- # Define variables to be used in the loop # Number of covariates k.dim = dim(xx)[2] # unique pscores #un.pscores <- unique(pscore.fit) un.pscores <- (pscore.fit) n.unique <- length(un.pscores) # Initialize `beta` matrix (K coefficients for each of n.unique responses) beta <- matrix(0, k.dim, n.unique) # Initialize `Rw` row vector (n.unique dimension) Rw <- matrix(0, 1, n.unique) # We split n columns into l tiles, each with chunk columns l <- floor(n.unique/chunk) + 1 # Initialize the bootststrap test statistics vectors ksb1 <- matrix(0, nboot, 1) cvmb1 <- matrix(0, nboot, 1) #----------------------------------------------------------------------------- # Let's define some parameters for the bootstrap # Better to define these outside the loop that will follow. if (dist == "Mammen"){ # Use the Mammen(1993) binary V's k1 <- 0.5 * (1 - 5^0.5) k2 <- 0.5 * (1 + 5^0.5) pkappa <- 0.5 * (1 + 5^0.5)/(5^0.5) } if (dist == "Rademacher"){ # Use the Rademacher V's k1 <- 1 k2 <- -1 pkappa <- 0.5 } # function for the bootstrap bootapply <- function(nn, n, pkappa, k1, k2, uhat, w1.temp, Seed) { # to make each run fully reproducible, we set the seed seed.run <- Seed[nn, ] set.seed(seed.run, "L'Ecuyer-CMRG") v <- stats::rbinom(n, 1, pkappa) v <- ifelse(v == 1, k1, k2) # Bootstrapped emprirical process Rwb <- colSums(uhat * v * w1.temp)/n # KS test ksb <- sqrt(n) * max(abs(Rwb)) # Cramer-von Mises test cvmb <- sum(Rwb^2) # Return both tests return(cbind(ksb, cvmb)) } #----------------------------------------------------------------------------- # Define seeds: Guarantee reproducibility ss <- floor(stats::runif(1) * 10000) seed.temp <- gather.ps(nboot, seed = ss) Seed <- matrix(nrow = nboot, ncol = 6) for (i in 1:nboot) { Seed[i, ] <- seed.temp[[i]][2:7] } #----------------------------------------------------------------------------- # If we are going to use paralell coding, initialize the cores if (cores > 1) { cl <- parallel::makeCluster(cores) } #----------------------------------------------------------------------------- # Start the loop to compute the tests (this is more memory efficient) # we do a loop for each weight function, to avoid loss in speed # indicator weight if (w == "ind"){ for (i in 1:l) { start <- min(chunk * (i - 1) + 1, n.unique) end <- min(chunk * i, n.unique) w.temp <- outer(pscore.fit, un.pscores[start:end], "<=") Gw <- crossprod(g, w.temp) beta[, start:end] <- MASS::ginv(crossprod(g)) %*% Gw #beta[, start:end] <- solve(gg, Gw) w1.temp <- (w.temp - g %*% beta[, start:end]) Rw[start:end] <- colSums(uhat * w1.temp)/n # Now the bootstrapped test in the chunk if (cores == 1) { boot.chunk <- lapply(1:nboot, bootapply, n, pkappa, k1, k2, uhat, w1.temp, Seed) } if (cores > 1) { boot.chunk <- parallel::parLapply(cl, 1:nboot, bootapply, n, pkappa, k1, k2, uhat, w1.temp, Seed) } # Put the Bootstrap resuls in a matrix boot.chunk <- t(matrix(unlist(boot.chunk), 2, nboot)) # Compute the KSb and CvMb over chunks if (1000 * (i - 1) + 1 <= n.unique) { ksb1 <- pmax(ksb1, boot.chunk[, 1]) cvmb1 <- cvmb1 + boot.chunk[, 2] } } } #----------------------------------------------------------------------------- # exponential weight if (w == "exp"){ for (i in 1:l) { start <- min(chunk * (i - 1) + 1, n.unique) end <- min(chunk * i, n.unique) w.temp <- tcrossprod(pscore.fit, un.pscores[start:end]) w.temp <- exp(w.temp) Gw <- crossprod(g, w.temp) beta[, start:end] <- solve(gg, Gw) w1.temp <- (w.temp - g %*% beta[, start:end]) Rw[start:end] <- colSums(uhat * w1.temp)/n # Now the bootstrapped test in the chunk if (cores == 1) { boot.chunk <- lapply(1:nboot, bootapply, n, pkappa, k1, k2, uhat, w1.temp, Seed) } if (cores > 1) { boot.chunk <- parallel::parLapply(cl, 1:nboot, bootapply, n, pkappa, k1, k2, uhat, w1.temp, Seed) } # Put the Bootstrap resuls in a matrix boot.chunk <- t(matrix(unlist(boot.chunk), 2, nboot)) # Compute the KSb and CvMb over chunks if (1000 * (i - 1) + 1 <= n.unique) { ksb1 <- pmax(ksb1, boot.chunk[, 1]) cvmb1 <- cvmb1 + boot.chunk[, 2] } } } #----------------------------------------------------------------------------- # Logistic weight if (w == "logistic"){ for (i in 1:l) { start <- min(chunk * (i - 1) + 1, n.unique) end <- min(chunk * i, n.unique) w.temp <- tcrossprod(pscore.fit, un.pscores[start:end]) w.temp <- 1/(1+exp(1-w.temp)) Gw <- crossprod(g, w.temp) beta[, start:end] <- solve(gg, Gw) w1.temp <- (w.temp - g %*% beta[, start:end]) Rw[start:end] <- colSums(uhat * w1.temp)/n # Now the bootstrapped test in the chunk if (cores == 1) { boot.chunk <- lapply(1:nboot, bootapply, n, pkappa, k1, k2, uhat, w1.temp, Seed) } if (cores > 1) { boot.chunk <- parallel::parLapply(cl, 1:nboot, bootapply, n, pkappa, k1, k2, uhat, w1.temp, Seed) } # Put the Bootstrap resuls in a matrix boot.chunk <- t(matrix(unlist(boot.chunk), 2, nboot)) # Compute the KSb and CvMb over chunks if (1000 * (i - 1) + 1 <= n.unique) { ksb1 <- pmax(ksb1, boot.chunk[, 1]) cvmb1 <- cvmb1 + boot.chunk[, 2] } } } #----------------------------------------------------------------------------- # Sine weight if (w == "sin"){ for (i in 1:l) { start <- min(chunk * (i - 1) + 1, n.unique) end <- min(chunk * i, n.unique) w.temp <- tcrossprod(pscore.fit, un.pscores[start:end]) w.temp <- sin(w.temp) Gw <- crossprod(g, w.temp) beta[, start:end] <- solve(gg, Gw) w1.temp <- (w.temp - g %*% beta[, start:end]) Rw[start:end] <- colSums(uhat * w1.temp)/n # Now the bootstrapped test in the chunk if (cores == 1) { boot.chunk <- lapply(1:nboot, bootapply, n, pkappa, k1, k2, uhat, w1.temp, Seed) } if (cores > 1) { boot.chunk <- parallel::parLapply(cl, 1:nboot, bootapply, n, pkappa, k1, k2, uhat, w1.temp, Seed) } # Put the Bootstrap resuls in a matrix boot.chunk <- t(matrix(unlist(boot.chunk), 2, nboot)) # Compute the KSb and CvMb over chunks if (1000 * (i - 1) + 1 <= n.unique) { ksb1 <- pmax(ksb1, boot.chunk[, 1]) cvmb1 <- cvmb1 + boot.chunk[, 2] } } } #----------------------------------------------------------------------------- # Sine and cosine weight if (w == "sincos"){ for (i in 1:l) { start <- min(chunk * (i - 1) + 1, n.unique) end <- min(chunk * i, n.unique) w.temp <- tcrossprod(pscore.fit, un.pscores[start:end]) w.temp <- sin(w.temp)+cos(w.temp) Gw <- crossprod(g, w.temp) beta[, start:end] <- solve(gg, Gw) w1.temp <- (w.temp - g %*% beta[, start:end]) Rw[start:end] <- colSums(uhat * w1.temp)/n # Now the bootstrapped test in the chunk if (cores == 1) { boot.chunk <- lapply(1:nboot, bootapply, n, pkappa, k1, k2, uhat, w1.temp, Seed) } if (cores > 1) { boot.chunk <- parallel::parLapply(cl, 1:nboot, bootapply, n, pkappa, k1, k2, uhat, w1.temp, Seed) } # Put the Bootstrap resuls in a matrix boot.chunk <- t(matrix(unlist(boot.chunk), 2, nboot)) # Compute the KSb and CvMb over chunks if (1000 * (i - 1) + 1 <= n.unique) { ksb1 <- pmax(ksb1, boot.chunk[, 1]) cvmb1 <- cvmb1 + boot.chunk[, 2] } } } #----------------------------------------------------------------------------- # close the clusters, if we used paralell if (cores > 1) { parallel::stopCluster(cl) } #----------------------------------------------------------------------------- # Compute our test statistics cvmtest1 <- sum(Rw^2) kstest1 <- sqrt(n) * max(abs(Rw)) #----------------------------------------------------------------------------- # Put the bootstrap tests in a matrix boottest <- matrix(0, nboot, 2) boottest[, 1] <- ksb1 boottest[, 2] <- cvmb1 #----------------------------------------------------------------------------- # Name the Columns colnames(boottest) <- c("ksb", "cvmb") #----------------------------------------------------------------------------- # compute the Bootstrap P-value pvksb <- sum((boottest[, 1] > kstest1))/nboot pvcvmb <- sum((boottest[, 2] > cvmtest1))/nboot #--------------------------------------------------------------------- # record the call call.param <- match.call() # Record all arguments used in the function argu <- mget(names(formals()),sys.frame(sys.nframe())) argu <- list(model = argu$model, w = argu$w, dist = argu$dist, nboot = argu$nboot ) # Return these variables ret <- list(kstest = kstest1, cvmtest = cvmtest1, pvks = pvksb, pvcvm = pvcvmb, call.param = call.param, argu = argu) # Define a new class class(ret) <- "pstest" # return the list return(ret) }
/R/pstest.R
no_license
pedrohcgs/pstest
R
false
false
16,559
r
#' pstest: Tests for the Propensity Score #' #' \emph{pstest} computes Kolmogorov-Smirnov and Cramer-von Mises type tests #' for the null hypothesis that a parametric model for the propensity score is #' is correctly specified. For details of the testing procedure, see #' Sant'Anna and Song (2019),'Specification Tests for the Propensity Score' <doi:10.1016/j.jeconom.2019.02.002>. #' #'@param d a vector containing the binary treatment indicator. #'@param pscore a vector containing the estimated propensity scores. #'@param xpscore a matrix (or data frame) containing the covariates (and their #' transformations) included in the propensity score #' estimation. It should also include the constant term. #'@param model a description of the functional form (link function) used #' to estimated propensity score. The alternatives are: #' 'logit' (default), 'probit', and het.probit #' @param pscore.model in case you you set model="het.probit", pscore.model is the entire hetglm object. #' Default for pscore.model is NULL. #'@param w a description of which weight function the projection is based on. #' The alternatives are 'ind' (default), which set \eqn{w(q,u)=1(q<=u)}, #' 'exp', which set \eqn{w(q,u)=exp(qu)}, 'logistic', which set #' \eqn{w(q,u)=1/[1+exp(1-qu)]}, 'sin', which set \eqn{w(q,u)=sin(qu)}, and #' 'sincos', which set \eqn{w(q,u)=sin(qu)+cos(qu)}. #'@param dist a description of which distribution to use during the bootstrap. #' The alternatives are 'Mammen' (default), and 'Rademacher'. #'@param nboot number of bootstrap replicates to perform. Default is 1,000. #'@param cores number of cores to use during the bootstrap. Default is 1. #' If cores is greater than 1, the bootstrap is conducted using #' parLapply, instead of lapply type call. #'@param chunk a value that determine the size of each 'tile'. Such argument is used #' to split the original data into chunks, saving memory. #' Default value is 1,000. If the \emph{pstest} function throw a #' memory error, you should choose a smaller value for \emph{chunk}. #' #' #'@return a list containing the Kolmogorov-Smirnov and Cramer-von Mises test #' statistics for the null hypothesis of correctly specified propensity #' score model (kstest and cvmtest, respectively), and their associate #' bootstrapped p-values, pvks and pvcvm, respectively. All inputs are also #' returned. #' #'@references #' Sant'Anna, Pedro H. C, and Song, Xiaojun (2019), \emph{Specification Tests for the Propensity Score}, #' Journal of Econometrics, vol. 210 (2), p. 379-404, <doi:10.1016/j.jeconom.2019.02.002>. #' #'@examples #' # Example based on simulation data #' # Simulate vector of covariates #' set.seed(1234) #' x1 <- runif(100) #' x2 <- rt(100, 5) #' x3 <- rpois(100, 3) #' # generate treatment status score based on Probit Specification #' treat <- (x1 + x2 + x3 >= rnorm(100, 4, 5)) #' # estimate correctly specified propensity score based on Probit #' pscore <- stats::glm(treat ~ x1 + x2 + x3, family = binomial(link = "probit"), #' x = TRUE) #' # Test the correct specification of estimated propensity score, using #' # the weight function 'ind', and bootstrap based on 'Mammen'. #' pstest(d = pscore$y, pscore = pscore$fit, xpscore = pscore$x, #' model = "probit", w = "ind", dist = "Mammen") #' # Alternatively, one can use the 'sin' weight function #' pstest(d = pscore$y, pscore = pscore$fit, xpscore = pscore$x, #' model = "probit", w = "sin", dist = "Mammen") #' #'@export #' #'@importFrom stats binomial rbinom runif glm #'@importFrom parallel makeCluster parLapply stopCluster nextRNGStream #'@importFrom glmx hetglm.fit #'@importFrom MASS ginv #------------------------------------------------------------------------------- pstest = function(d, pscore, xpscore, pscore.model = NULL, model = "logit", w = "ind", dist = "Mammen", nboot = 1000, cores = 1, chunk = 1000) { #----------------------------------------------------------------------------- # Define some underlying variables n <- length(d) xx <- as.matrix(xpscore) pscore.fit <- pscore uhat <- d - pscore.fit #----------------------------------------------------------------------------- # Run some tests if( !is.element(model,c("logit", "probit", "het.probit"))) { stop("model must be either 'logit', 'probit' or 'het.probit' ") } if( !is.element(dist,c("Mammen", "Rademacher"))) { stop("dist must be either 'Mammen', or 'Rademacher' ") } if( !is.element(w,c("ind", "exp", "logistic", "sin", "sincos"))) { stop("w must be either 'ind', 'exp', 'logistic', 'sin', or 'sincos' ") } #----------------------------------------------------------------------------- # #Define the score variables for the projection if (model == "logit") { g <- pscore.fit * (1 - pscore.fit) * xx } if (model == "probit") { beta.x <- stats::qnorm(pscore.fit) g <- stats::dnorm(beta.x) * xx rm(beta.x) } if (model == "het.probit") { if(is.null(pscore.model)){ stop(" You must provide the entire hetglm model if you are using het. probit") } if(!class(pscore.model)=="hetglm"){ stop(" pscore.model must be estimated using the hetglm function. See glmx package") } if(is.null(pscore.model$x$scale)){ stop(" You must include the option x=T in your glmx model") } pp <- pscore.model index.mean <- as.numeric(pp$x$mean %*% pp$coefficients$mean) index.scale <- as.numeric(pp$x$scale %*% (pp$coefficients$scale)) #beta.x <- stats::qnorm(pscore.fit) index <- index.mean * exp(-index.scale) g <- cbind(stats::dnorm(index) * exp(-index.scale) *pp$x$mean, - stats::dnorm(index)*index.mean*exp(-index.scale)*pp$x$scale) xx <- as.matrix(cbind(pp$x$mean,pp$x$scale)) #rm(pp,xx.scale,index.mean,index.scale,index ) } gg <- crossprod(g) #----------------------------------------------------------------------------- # Define variables to be used in the loop # Number of covariates k.dim = dim(xx)[2] # unique pscores #un.pscores <- unique(pscore.fit) un.pscores <- (pscore.fit) n.unique <- length(un.pscores) # Initialize `beta` matrix (K coefficients for each of n.unique responses) beta <- matrix(0, k.dim, n.unique) # Initialize `Rw` row vector (n.unique dimension) Rw <- matrix(0, 1, n.unique) # We split n columns into l tiles, each with chunk columns l <- floor(n.unique/chunk) + 1 # Initialize the bootststrap test statistics vectors ksb1 <- matrix(0, nboot, 1) cvmb1 <- matrix(0, nboot, 1) #----------------------------------------------------------------------------- # Let's define some parameters for the bootstrap # Better to define these outside the loop that will follow. if (dist == "Mammen"){ # Use the Mammen(1993) binary V's k1 <- 0.5 * (1 - 5^0.5) k2 <- 0.5 * (1 + 5^0.5) pkappa <- 0.5 * (1 + 5^0.5)/(5^0.5) } if (dist == "Rademacher"){ # Use the Rademacher V's k1 <- 1 k2 <- -1 pkappa <- 0.5 } # function for the bootstrap bootapply <- function(nn, n, pkappa, k1, k2, uhat, w1.temp, Seed) { # to make each run fully reproducible, we set the seed seed.run <- Seed[nn, ] set.seed(seed.run, "L'Ecuyer-CMRG") v <- stats::rbinom(n, 1, pkappa) v <- ifelse(v == 1, k1, k2) # Bootstrapped emprirical process Rwb <- colSums(uhat * v * w1.temp)/n # KS test ksb <- sqrt(n) * max(abs(Rwb)) # Cramer-von Mises test cvmb <- sum(Rwb^2) # Return both tests return(cbind(ksb, cvmb)) } #----------------------------------------------------------------------------- # Define seeds: Guarantee reproducibility ss <- floor(stats::runif(1) * 10000) seed.temp <- gather.ps(nboot, seed = ss) Seed <- matrix(nrow = nboot, ncol = 6) for (i in 1:nboot) { Seed[i, ] <- seed.temp[[i]][2:7] } #----------------------------------------------------------------------------- # If we are going to use paralell coding, initialize the cores if (cores > 1) { cl <- parallel::makeCluster(cores) } #----------------------------------------------------------------------------- # Start the loop to compute the tests (this is more memory efficient) # we do a loop for each weight function, to avoid loss in speed # indicator weight if (w == "ind"){ for (i in 1:l) { start <- min(chunk * (i - 1) + 1, n.unique) end <- min(chunk * i, n.unique) w.temp <- outer(pscore.fit, un.pscores[start:end], "<=") Gw <- crossprod(g, w.temp) beta[, start:end] <- MASS::ginv(crossprod(g)) %*% Gw #beta[, start:end] <- solve(gg, Gw) w1.temp <- (w.temp - g %*% beta[, start:end]) Rw[start:end] <- colSums(uhat * w1.temp)/n # Now the bootstrapped test in the chunk if (cores == 1) { boot.chunk <- lapply(1:nboot, bootapply, n, pkappa, k1, k2, uhat, w1.temp, Seed) } if (cores > 1) { boot.chunk <- parallel::parLapply(cl, 1:nboot, bootapply, n, pkappa, k1, k2, uhat, w1.temp, Seed) } # Put the Bootstrap resuls in a matrix boot.chunk <- t(matrix(unlist(boot.chunk), 2, nboot)) # Compute the KSb and CvMb over chunks if (1000 * (i - 1) + 1 <= n.unique) { ksb1 <- pmax(ksb1, boot.chunk[, 1]) cvmb1 <- cvmb1 + boot.chunk[, 2] } } } #----------------------------------------------------------------------------- # exponential weight if (w == "exp"){ for (i in 1:l) { start <- min(chunk * (i - 1) + 1, n.unique) end <- min(chunk * i, n.unique) w.temp <- tcrossprod(pscore.fit, un.pscores[start:end]) w.temp <- exp(w.temp) Gw <- crossprod(g, w.temp) beta[, start:end] <- solve(gg, Gw) w1.temp <- (w.temp - g %*% beta[, start:end]) Rw[start:end] <- colSums(uhat * w1.temp)/n # Now the bootstrapped test in the chunk if (cores == 1) { boot.chunk <- lapply(1:nboot, bootapply, n, pkappa, k1, k2, uhat, w1.temp, Seed) } if (cores > 1) { boot.chunk <- parallel::parLapply(cl, 1:nboot, bootapply, n, pkappa, k1, k2, uhat, w1.temp, Seed) } # Put the Bootstrap resuls in a matrix boot.chunk <- t(matrix(unlist(boot.chunk), 2, nboot)) # Compute the KSb and CvMb over chunks if (1000 * (i - 1) + 1 <= n.unique) { ksb1 <- pmax(ksb1, boot.chunk[, 1]) cvmb1 <- cvmb1 + boot.chunk[, 2] } } } #----------------------------------------------------------------------------- # Logistic weight if (w == "logistic"){ for (i in 1:l) { start <- min(chunk * (i - 1) + 1, n.unique) end <- min(chunk * i, n.unique) w.temp <- tcrossprod(pscore.fit, un.pscores[start:end]) w.temp <- 1/(1+exp(1-w.temp)) Gw <- crossprod(g, w.temp) beta[, start:end] <- solve(gg, Gw) w1.temp <- (w.temp - g %*% beta[, start:end]) Rw[start:end] <- colSums(uhat * w1.temp)/n # Now the bootstrapped test in the chunk if (cores == 1) { boot.chunk <- lapply(1:nboot, bootapply, n, pkappa, k1, k2, uhat, w1.temp, Seed) } if (cores > 1) { boot.chunk <- parallel::parLapply(cl, 1:nboot, bootapply, n, pkappa, k1, k2, uhat, w1.temp, Seed) } # Put the Bootstrap resuls in a matrix boot.chunk <- t(matrix(unlist(boot.chunk), 2, nboot)) # Compute the KSb and CvMb over chunks if (1000 * (i - 1) + 1 <= n.unique) { ksb1 <- pmax(ksb1, boot.chunk[, 1]) cvmb1 <- cvmb1 + boot.chunk[, 2] } } } #----------------------------------------------------------------------------- # Sine weight if (w == "sin"){ for (i in 1:l) { start <- min(chunk * (i - 1) + 1, n.unique) end <- min(chunk * i, n.unique) w.temp <- tcrossprod(pscore.fit, un.pscores[start:end]) w.temp <- sin(w.temp) Gw <- crossprod(g, w.temp) beta[, start:end] <- solve(gg, Gw) w1.temp <- (w.temp - g %*% beta[, start:end]) Rw[start:end] <- colSums(uhat * w1.temp)/n # Now the bootstrapped test in the chunk if (cores == 1) { boot.chunk <- lapply(1:nboot, bootapply, n, pkappa, k1, k2, uhat, w1.temp, Seed) } if (cores > 1) { boot.chunk <- parallel::parLapply(cl, 1:nboot, bootapply, n, pkappa, k1, k2, uhat, w1.temp, Seed) } # Put the Bootstrap resuls in a matrix boot.chunk <- t(matrix(unlist(boot.chunk), 2, nboot)) # Compute the KSb and CvMb over chunks if (1000 * (i - 1) + 1 <= n.unique) { ksb1 <- pmax(ksb1, boot.chunk[, 1]) cvmb1 <- cvmb1 + boot.chunk[, 2] } } } #----------------------------------------------------------------------------- # Sine and cosine weight if (w == "sincos"){ for (i in 1:l) { start <- min(chunk * (i - 1) + 1, n.unique) end <- min(chunk * i, n.unique) w.temp <- tcrossprod(pscore.fit, un.pscores[start:end]) w.temp <- sin(w.temp)+cos(w.temp) Gw <- crossprod(g, w.temp) beta[, start:end] <- solve(gg, Gw) w1.temp <- (w.temp - g %*% beta[, start:end]) Rw[start:end] <- colSums(uhat * w1.temp)/n # Now the bootstrapped test in the chunk if (cores == 1) { boot.chunk <- lapply(1:nboot, bootapply, n, pkappa, k1, k2, uhat, w1.temp, Seed) } if (cores > 1) { boot.chunk <- parallel::parLapply(cl, 1:nboot, bootapply, n, pkappa, k1, k2, uhat, w1.temp, Seed) } # Put the Bootstrap resuls in a matrix boot.chunk <- t(matrix(unlist(boot.chunk), 2, nboot)) # Compute the KSb and CvMb over chunks if (1000 * (i - 1) + 1 <= n.unique) { ksb1 <- pmax(ksb1, boot.chunk[, 1]) cvmb1 <- cvmb1 + boot.chunk[, 2] } } } #----------------------------------------------------------------------------- # close the clusters, if we used paralell if (cores > 1) { parallel::stopCluster(cl) } #----------------------------------------------------------------------------- # Compute our test statistics cvmtest1 <- sum(Rw^2) kstest1 <- sqrt(n) * max(abs(Rw)) #----------------------------------------------------------------------------- # Put the bootstrap tests in a matrix boottest <- matrix(0, nboot, 2) boottest[, 1] <- ksb1 boottest[, 2] <- cvmb1 #----------------------------------------------------------------------------- # Name the Columns colnames(boottest) <- c("ksb", "cvmb") #----------------------------------------------------------------------------- # compute the Bootstrap P-value pvksb <- sum((boottest[, 1] > kstest1))/nboot pvcvmb <- sum((boottest[, 2] > cvmtest1))/nboot #--------------------------------------------------------------------- # record the call call.param <- match.call() # Record all arguments used in the function argu <- mget(names(formals()),sys.frame(sys.nframe())) argu <- list(model = argu$model, w = argu$w, dist = argu$dist, nboot = argu$nboot ) # Return these variables ret <- list(kstest = kstest1, cvmtest = cvmtest1, pvks = pvksb, pvcvm = pvcvmb, call.param = call.param, argu = argu) # Define a new class class(ret) <- "pstest" # return the list return(ret) }
GenerateCaseSamples <- function( K, mu, sigma, zetas, bin=TRUE) { case_kt <- array( dim = c( 2, max(K))) case_kt[ 1, 1:K[1]] <- rnorm(K[1]) case_kt[ 2, 1:K[2]] <- rnorm(K[2], mean = mu, sd = sigma) if (bin) { fb <- cut(case_kt[ 1, 1:K[1]], c(-Inf, zetas, Inf), labels = FALSE, right = FALSE) tb <- cut(case_kt[ 2, 1:K[2]], c(-Inf, zetas, Inf), labels = FALSE, right = FALSE) case_kt[ 1, 1:K[1]] <- fb case_kt[ 2, 1:K[2]] <- tb } z1 <- case_kt[1,!is.na(case_kt[1, ])] z2 <- case_kt[2,!is.na(case_kt[2, ])] return( list( z1 = z1, z2 = z2) ) }
/R/CH07-Variability/GenerateCaseSamples.R
permissive
dpc10ster/RJafrocBook
R
false
false
594
r
GenerateCaseSamples <- function( K, mu, sigma, zetas, bin=TRUE) { case_kt <- array( dim = c( 2, max(K))) case_kt[ 1, 1:K[1]] <- rnorm(K[1]) case_kt[ 2, 1:K[2]] <- rnorm(K[2], mean = mu, sd = sigma) if (bin) { fb <- cut(case_kt[ 1, 1:K[1]], c(-Inf, zetas, Inf), labels = FALSE, right = FALSE) tb <- cut(case_kt[ 2, 1:K[2]], c(-Inf, zetas, Inf), labels = FALSE, right = FALSE) case_kt[ 1, 1:K[1]] <- fb case_kt[ 2, 1:K[2]] <- tb } z1 <- case_kt[1,!is.na(case_kt[1, ])] z2 <- case_kt[2,!is.na(case_kt[2, ])] return( list( z1 = z1, z2 = z2) ) }
# # Updated Package Version 120828 # Updated Package Version 130426 # Updated Package Version 130506 - V0.94 # Updated Package Version 130510 - V0.95 - fixes. # Updated Package Version 130511 - V0.96 - attempt to complete - # Updated Package Version 130511 - V0.97 (8:00pm) - fixes # Updated Package Version 130513 - V0.98 (8:00am) - fixes and testing # Updated Package Version 130517 - V0.99 - fixes and work with BW. # - correct ref line color and minor updates. # - corrected micromapSTDefaults and Arrows errors. # - label adjustment and fix parameter checking for boxplots # Updated Package Version 130604 - V1.0.0 - Final Edit and fixes for release. # - Dynamically defined variables must be globalVariables add. # - Formal Release of package. # Updated Package Version 131127 - V1.0.1 - Correct segmented and centered bars to handle only two data columns # Updated Package Version 140104 - V1.0.2 - Add diagonal line in scatter plot with equal x and y values. # - Update NormSeg, Seg, Centered Seg to use variable width bars. # - Changed method of providing colors and details parameters. # - Correct median dot in scatter plots # - Add logic to allow numeric (integer) or column names in col1, col2, col3 # - Correct logic to handle multiple columns in sortVar. # Updated Package Version 140307 - V1.0.3 - Add Rank Glyph # - Remove limit on number of time series elements. # - Plot the median time series data in the panels above and below # the median row. # - Adjusted defaults on stacked bar graphs # Updated Package Version 140712 - V1.0.4 - Correct usage of single and double quote marks in examples. # Updated Package Version 141107 - V1.0.5 - Parameter checking of the panelDesc arguments is incorrect. # Logic is rewritten and migrated into this package. # # Updated Pagkage micromapSEER - 141023 - V0.90 - Modified package to meet NCI Seer's requirements. # Rewrote map... logic to handle different number of rows # per panel, scaled glyphs to be the same size in # panels of 1, 2, 3, 4 or 5 areas. # Modified SegBar, NormBar, Bar, and BoxPlot glyphs # to handle different number of areas per panel and # present the same sized glyph # Modified logic to accept a SEER border and area dataset or # the full US States area dataset. # Fixed logic in mapcum, mapmedian and maptail to correctly # draw a square rectangle in the title, independent on the # number of columns or rows of panels. # Fixed ID glyph to dynamic determine width of column # based on the abbreviated or fullname text in the SEER # or US state datasets. Corrected code to properly draw # the same sized square box and align with text for all # ID glyph lines. # Added logic to force min. and max column widths. # Added logic to force min. and max panel row height. # Correct distance from axis labels to tics to be the same # on the top and bottom axis labels. # Initially setup tables to provide uniform distribution # of areas across panels. This caused to many 3 and 4 # area panels. Re-did the setup tables to minimize the # number of panels and use 4 and 5 areas per panel when # ever possible. # Correct datasets to contain all UPPER case abbreviations # and properly capitalized Full Names. # Internal to program all matching is done using UPPER # case strings. # Added logic to include a "like" string for each SEER area # to allow matching with SEERStat generated datasets. # Since data.frames are mostly constructed with factors, # the user may pass us a statsDFrame containing factors # instead of numeric values. Code added to check for # numerics in the statistical data provided, convert # from character is required, and convert from factors # if required. # User data may have extra line at the end of the data, added # option to delete last line. if not a match. # Fixed validating the data in the statsDFrame columns in # each glyhpic. # Fixed logic handling odd number of groups with the # middle group having > 1 areas. # Added logic to detect empty data columns in the statsDFrame. # character contain had to be checked if it can be converted to # numeric. # Corrected logic to handle multiple border groups. Default for # Seer is "USSeerBG". However, not providing the argument # set no values or the wrong value in BordGrpName. # The aspect of the US maps was off. Corrected the maximum # height value from 0.6 to 0.8 inches. # Changed the name of the main module to micromapPLUS. Add two front-end # functions - micromapST and micromapSEER to provide a dual interface for # existing users. # Separated micromapGSetDefaults and micromapGSetPanelDef functions into # a separate ".r" to share with micromapSEER. # # - 150112 Updates: # Corrected problem with printing US "DC", "HI", "AK" labels on non-US # maps. Used the areaParms$areaUSData to control this feature. This # bordGrp parameter should only be set to "TRUE" when the full US map # and states are used in the bordGrp. # Changed the deleteLast option to ignoreNoMatch options and # redid the code to do this function and generate the information # and error messages. # Changed module name back to micromapST. # Changed version number to 1.1.0 to note major revision. # # - 150312 Updates: # Change USStatesBG user data.frame check from: must be 51 rows to # must be 51 or less rows. Allow data with subsets of states. # - 150713 Updates: # Update structure of areaParms table in border groups # Add several more border groups to the package: UK-Ireland, Seoul, Utah # Add staggered x-Axis labels to keep ends from overlapping. # Add feature to allow user to specify x-Axis for glyph # Update glyphs to formally handle NA values in data. # Update X-Axis to include labels if grid is drawn (???) # Update map code to enforce minimum width to make sure # space is wide enough for all titles and labels. # Add "Alt_Abbr" option for rowNames # Update code to use "LINK" or make sure "Abbr" works. # Changed Border Group .rda file name from ????DF.rda to ????BG.rda. # Added MapLabel field to areaNamesAbbrsIDs tables - to be used to generalize # the over printing of sub area names on first map - AK, HI, DC like. # - 150715 Updates: # Changing name table structure to have "full", "ab", "alt_ab", "id" # pointing to "key" rather than abbr. This is to handle any cases # down the road that don't have abbr, full or ID. If the # column is not present, the option will not be available. # - 160807 Updates: # Fix position of first title above the columns. Too close to # axis by about 1/2 a line. # - 160812 Updates: # add individual DOT symbol control to dotconf and dotSE. # updated detailsVariables to reflect the new details options # and future conversions. Check code and added all missing # variables. # - 160816 Updates: # modified labels code to use odd number of labels and a minimum of 3. # - 161120 Updates: # added regional IDs and Names to the name table. # Added better overlay print control for L2, Reg, and L3. # Corrected X Axis label logic - removed duplications and # parameter resetting. # - 161206 Updates: # Changed NAMESPACE, DESCRIPTION files to meet new # CRAN requirements. # Modified code to not directly use assigns for # variables in the .GlobalEnv space. # Modified all data() function calls to load # the data into the current environment. # # # discussion points: not all border groups have abbreviations or IDs. Names yes, but need to # handle the value inputed by the user and link data to boundaries. May be needed # to build "internal" link and have all else point to it? (if present.) # # Update Log and change details by Jim Pearson # May 31, 2009 - corrected dates on three column micromap # 1990-2000 to 2001-5 --> 1996-2000 to 2001-5 # June 7, 2009 - Added VerStr as a parameter to be able to determine # which output files are from this version. # - Updated book Micromap-Dot-Arrow-Box plot to use new # data files: # WFAgeAdjLungMort2000-4CountyAgeAdj2000.csv # WFLungMort19951999AgeAdj2000State.csv # WFLungMort20002004AgeAdj2000State.csv # and change the titles for the columns in the output to match. # - Updated sections to use labels instead of column numbers. # - Updated Book micromap to merge two files instead of using # one file. This also changed the column number by +1. # Note: future update should look at using column names instead of # numbers. # - Updated ARROW chart to plot DOT when difference is zero. # - Reduce white space between columns (just a little, cannot be eliminate to # maintain readibility. # July 22, 2010 - Correct reference value (refVals) code. # - add variable for reference value label text (refTexts) per column. # panelDesc$refTexts -> vector, one per column. # - add variable to color the reference value label test # details$Ref.Text.col # - No reference label (legend) is printed if no refTexts for the # column is provided. # January 30, 2011 - Determine running directory and load # panelFunctions.r, panelLayout.Rdata, and micromapST.Rdata # from directory. # August 28, 2012 - Cleaned up code and re-packaged it with .onLoad # - duplicate variable cleaned up, and unused code removed. # - integrated the test/demo code correctly. # - made adjustments to handle the micromapST namespace. # - changed refVals and refTexts to local variables (lRefVals and lRefTexts) to add clarity. # - changed parameter for BoxPlots colMedian to BoxP.Median.col to kill duplication with the colMedian # used on the general graphic # - Modified "Details" and "Colors" variable to be unique and # re-ordered by subroutine usage. # October 5, 2012 - update documentation for review. # - deleted second version of panelGroupOutline- in panelFunctions.r # - Changed rlAreaRefText function to build a legend with a line followed by # the reference text. Problem was line was on both sides of the label and # in some cases overlaid the text. This way the line is on the left of the text. # - changed default value for reference text from black to mid green to match the line # color. # April 26, 2013 - add new panel graphic function - TS and TSConf # - added Time Series where each state has a strip within the panel for the line graph. # - changed boxPlot argument to panelData to represent more types of auxilary data for the program. # May 1-2, 2013 - add new panel graphic functions - ScatDot, StackedBar, and Normalized Bar # - add graduated colors to stacked bars and normalized stacked bars. # - changed normalized axis labels to percentages. # - add Time Series with all plots in one panels (one x-y graph) # - change TS confidence band to lighter shade = 10% transparency. # - attempted to fix order issues. On TS series of panels, assume order of the panelData is the # same as the original col1, col2, col3, stateId orders. When they are re-ordered, Save the # index change to remap back to the old order. Use this to re-order panelData. # - On scatdot and segbar panels, the panelData contains a stateId. Reordering is # done by using the sorted stateId column in the statsDFrame to re-order the panelData frames. # - added programing feature to permit adjustments to colsize, left and right margins of a # panel based on the type of panel to be created. Needed to allow space for the # left axis labels for the time series panels (4). # May 4, 2013 - remove prototype strip time series - did not work, code deleted. # - Added centered stacked bars. # - changed circle size on Scatdot of non-colored dots to 75 smaller. # - Changed source of data for "scatdot", "segbar", "normbar", and "ctrbar" from # an extra panelData structure to using columns in the statsDFrame call parameters data.frame. # Now the col1 and col2 parameters in the panelDesc data.frame indicate which columns or # range of columns in the startFrame data.frame to use for the X,Y coordinates or the # set of bar segment values per state. # May 6, 2013 - change package name from stateMicromap to micromapST. # - updated documentation and added new examples to micromapST.Rd # May 8, 2013 - Fixes - change colData to panelData to avoid confusion. # - Add parameter value checks to Arrow, Bar, dot, dotSE, dotconf, TS, ScatDot, segbar, normbar, and ctrbar functions. # - fix examples # May 9, 2013 - switch the TS Array to be 1=x, 2=y, 3=low-y, 4=high-y. # May 10, 2013 - add support for rownames on the time series arrays. # - added validation of state ids in boxplots and time series. # - added new time series dataset to package. # - added panelInBound to generating x and y axis labels. # May 11, 2013 - reduced Y axis labels size to get more detail # - replaced wflung00cnty data file. # - created segbar data file. # - fixed problem with saving new time series file - needed names on all dimensions. # - fixed problem with at and labels argments on mtext calls. # - saved original tests in init/tests directory and replace them # in the micromapST.Rd with the master 6 examples. # - cleaned up examples. # - added code to try and ensure the min and max values on the y axis # are always printed for the median area (middle). # - add code to do Dan's color mixing to get opaque colors in bars. # May 17, 2013 - make adjustment for publishing package # - adjust grey colors to allow a grey scale color pattern to be used. (based on # ColorBrewer "Greys" for 5 colors. # - fixed grey/gray colors issues with dots, etc. using outline colors. # - added circles around dots to make the grey standout more. # May 20, 2013 - Added "grays" as an equivalent palette name. # May 21, 2013 - Fix ref line color to mid-green, change reftext to black. # - check fill color for scat dot, fixed. # - changed scat dot median symbol from triangle to dot and filled with blakc. # - adjusted box positions on maptail, mapcum, and mapmedian titles. # - fixed grays to work with ref lines. # May 24, 2013 - finish clean up - fix micromapSTDefaults error during loading. # - Final Testing. # May 25, 2013 - fixed micromapSTDefaults error on initial load # - fixed arror warning by using > .005 as zero. # - moved up titles printing to let INTERRUPTED pdf build have titles. # May 28, 2013 - fix parameter checking for boxplot list. # - Added names check for box plot, # - Added "list" type check for box plot. # - Reorganized test to not cause a secondary error. # - Added Id.Text.adj parameter to details and rlAreaID to adjust text alignment. # June 2, 2013 - fix DotSE missing X1 variable - should be x. # - Added code to do proper capitalization of state abbreviations and full state names. # - Added code to intercept common names for Washington, D. C. and convert to "D.C." # June 3, 2013 - Released to CRAN. # June 4, 2013 - cran check does not handle automatic variable assignments (around line 3100.) # register them with R via globalVariable function to add them to the list for rcmd check. # During testing, the variables do not show up as globals and are protected within the # micromapST namespace. - re-released. # Nov. 27, 2013 - Correct the parameter check for segmented and centered bars to permit a # minimum of 2 data columns. # Jan 4-9, 2014 - The diagonal line added to the scatter plots must reflect equal x and y values. # Current line is diagonal to the box not the data. # - Add option to vary the segment bar width from small to larger from left to right for # the NormSeg, SegBar, and Centered SegBar glyphs. # - Changed method of setting up details variables within the micromapST namespace. # Originally, user had to provide a complete list of all of the details variables. If # one was missing or misspelled, no detection or correction. New method, starts by # assigning all of the variables from the default values. Then takes the provided details # list from the user and merges it into the already declared variables. If a variable # does not exist or is misspelled, it is caught by checking against the default list of names # and not processed. In the future, a similar structure will be used to check the # ranges or types of information to validate the user provided details variable values. # - Correct median dot in scatter dot plots to only appear in the 4 and 6 rows (just either side # of the median row. # - Update logic in sortVar option to correctly handle multiple sort columns. # - Add ability to reference data.frame columns by name in the col1, col2, col3 and sortVar # parameters. # - Enhanced parameter verification and error checking to help user understand the specific # problem and correct it fast. Don't allow R to abort if possible. # March 7, 2014 - Removed limit on the number of points in Time Series # - Add code for Rank glyph # - The time series line and confidence band are squeezed in the median row space and do not # properly show the data. The median time series data is plotted in the panel above and below # to median row to properly present the data using the same aspect ratio as the other data. # - Adjusted the defaults for the segbar, ctrbar, and normbar graphics to have no center dot # and fixed bar height. # July 12, 2014 - Corrected single and double quote marks usage in examples. # November 7, 2014 - Rewrote panelDesc argument checking. # November 1-7, 2014 - Updated logic to handle the number of areas dynamically and support # US States or US Seer areas and data. # - Added logic to handle the specification of the link row names as a column of the # area Data.frame columns instead of requiring the link to be the Abbr of the area # as the row.names of the statsDFrame data.frame. # April, 2015 - generalize package for NCI and CRAN release. Add additional border groups. # Work on the scaling issues for the larger maps and number of rows and columns. # July, 2015 - Updated code to handle new border group structures # - Add "***" to the beginning of each error message and restructuring the message with a new # message id, and to include name of the glyphs and the panel column number. # - Found error in multiple column sort feature. Rewrote code to handle. # - Found rank functions and code can not handle multiple columns. Implemented # rank code to only handle 1 column. But on new feature list. # - Updated code to work with Abbr, Alt_Abbr, Full Names, ID or Alias and # map them to the border Vis Files key value. This was done to handle # cases where the user border group may not have an abbreviation to use as # the link. If at least one exist, then it can be linked to the key. # - Updated code to correctly calculate the width of the mapxxx and id glyphs # columns using the basic font and text.cex sizes. Must update when scaling is # implemented. # - Modified the colors table to include a color for a 6th row in each group/rows # and two more shading colors for mapmedian and maptail to compensate for issues # when there is a median group/row with more than 1 row. # - Modified all glyphs to handle situations when an NA is present in the user data. # The general rule is not, all of the data or no plot. Ploting anything would # possibly lead to an incorrect reading by the user. # August 2, 2015 - Rewrote the mapping routine to properly handle holes when filling # the polygons in the right order and to draw the borders in the order of # Not Used polygons, background polygons, highlighted polygons, and active polygons. # This code also supported NotUsed sub-areas (color very light grey) and two color # highlights of sub-areas above and below the median when a map is used for the median # group row. # - Fixed problem with title parameter checking to handle 1 or 2 values in the vector. # - Tested 8.5x14 and 11x17 page sizes for Kansas, New York and UKIreland. UKIreland is # still very small but works. Noticed line weight need to be adjusted when images are # small. # - added two colors for the median map to show above and below clear. This is important # then the area has median group row with more than one row. The above and below # are shown on the same map, so must be distinquished. # - corrected the calculations and implementation of the number of Groups, number of rows # per group, number of median group, number of median rows to handle no median group (even # number of groups), a median group with 1 row, and a median group with > 1 row. Adjusted # the code in all glyphs to handle the new implementation. # - implemented MapPolySetup function to do common setup and calculations for all Map functions. # - added check to warn user if there are more data rows, then sub-areas in the border group. # There are move checks later to identify the extra data.frame rows to the user. # - remove any check, stop or warning if the number of data rows are less than then number # of sub-areas in the border group. # - Changed the selecting the number of rows per group pattern from a very large table to a # a calculation with a table override when needed. User is also allowed to specify a # pattern to override micromapST's calculation. # - changed titles on Mapmedian map from "Featured above/below" to "above median" and # "below median". Mapcum map from "Featured above/below" to "above/below panel rows" # Still thinking about the maptail titles. # - Implemented function to center box and text for column headers. # August 4, 2015 - Updated logic for x-Axis labeling. # August 8, 2015 - Fixed/Add ability to specify symbol for the ID glyphs (half implemented, now working.) # - Added details option "Map.Median.text" to allow the Median for Sorted Panels text to be changed. # - Added below column label ("lab3") to the map and id columns. # - Added the ability to change the areaParms variables via the details=list() feature. # - Corrected and re-implemented Id.Dot.pch feature for the ID glyph # August 16, 2015 - Corrected the reference text and line drawing logic - rewrote. Line can now be what's left # up to 1/2 inch in length. Text and line centered in column. # - Added options to specify type of scaling for the axis. Original = pretty function limited by # range of data. Scaled range = subtitle used to identify units and values scaled by the units. # Scaled number = each number in axis scaled, adjusted, and label with suffix to scaling (e.g., M, B, etc.) # Scaling below 1 is also done and properly identified using the International Standards of Units (SI) # symbols. # - Added option to stagger the axis labels on the X axis to prevent overlaying. # August 20, 2015 - changed default labeling algorithm from rpretty to wilkinson. ("o" to "w") # - Implement test X axis labeling and column titling function (DrawXAxisAndTitles) in # all glyphs. # - Reduced size of ID symbols by 20% - looks better. # - Added ConvLinesToUnits function to help convert line coordinates to Unit coordinates and # handle the offset if the bottom left corner is not 0,0. # - Fixed the refText and line problem to place the line in the middle of the text. # September 14, 2015 # - Add additional panelDesc column "adv" to support "new" parameters on a glyph column # basis. Column is a list of lists. The lists in the column is contains # new and old options/parameters. panelDesc column name is "adv". # Any of the old panelDesc columns can have values in the adv list. # - (FUTURE) add ability to detect if panelDesc is the original data.frame, # or the new list of list format. # - Cleaned up warning messages by adding "call.=FALSE" option to remove # calling routine information from warning. # - Started adding validation code for user provided details and colors. This # will later be applied to the glyph parameters set by the user. # September 19, 2015 # - constructed table of details Variables and properties to be used in verifying # the details variables (from system or user). The table also contains information # to permit translation of existing details variables into glyphs based variables. # January 20, 2016 # - Added ability to save list of called variable names for warnings and error messages. # saved the values in list in details. # - Added and tested "regions" call argument to allow only regions (l2) to be mapped # if no data in other regions. # - Added code to capture call variable names (not values) for use in warning messages. # - Added check for rowNames = NULL # February 20, 2016 # - Updated warning message identifiers and documentation to match. # - Corrected statsDFrame column checking routines to handle character numbers and # provided the correct warning messages. # - Add CheckParmColx function to properly handle checking statsDFrame column names # and numbers for rowNamesCol and sortVar call arguments. # February 29, 2016 # - Changed wilkinson labeling algorithm to extended. The option is also changed from "w" to "e". # The wilkinson algorithm generated to many grid lines and labels vs. what was # requested. # May 5, 2016 # - changed alias code to edit user strings to delete special characters, blanks (multiple, trailing and # leading), control characters, punctuation characters, and convert the string to all upper case. # Seer Stat has changed the default registry names to include "_"s instead of " " character between # the words. The extra editing neutralizes the impact. Function CleanString was added to handle lists # of registry names. # August 7, 2016 # - first line of column titles too close to plot area by about 1/2 a line. Found calculation off. # Re-implemented using table of line heights and intra line spacing requirements. # August 8, 2016 # - Started reimplementation of colSize call parameter in the panelDesc data.frame. Document feature, # Added code to validate parameter. Implemented code in panelLayout function. # August 10, 2016 # - Changed the min and max column sizes to 0.25 to 2.5 inches. # - Changed the calculation for the user coordinates width of a panel to include 1/2 the "usr" width of a # a character instead of a fixed amount to ensure the symbol for a dot or arrow head fits within the panel. # - Glyphs that don't use dots or symbols that occupy space around the low and high data # points were offset/padded resulting in the graphics incorrectly floating inside the graph. # Example: bar graphs that not start at left or right edge of graph. time series graphs # don't tough sides of the graph. All of these issues have been corrected to only pad # (expand graph X range) when required - dot, dotconf, dotsignif, dotse, arrow, scatdot. # Any graph that is anchored to the left or right edge is not padded - bar, segbar, normbar. # Changes made in DrawXAxisandTitles function using generalize call perameters. # August 12, 2016 # - Fixed reversed glyph header titles Lab1 and Lab2 problem. # August 13-16, 2016 # - Cleaned up the colSize implementation and added validate checks and warning messages. # Values of NA, "", and " " are acceptable in 'mapxxx', and 'id' columns. Cannot set colSize # for these columns. Other columns must have a numerical value from 0.01 to 200 to use # as the width proportion. Algorithm is each column gets "N1"/sum(all "Ns") percentage of the # available space is allocated to each column. If a column is below the minimum width, # it is set to the minimum. The calculation is then repeated minus the minimum width columns. # The column widths are then compared to the maximum width allows. Any columns over the # maximum are reduced to the maximum width. The algorithm is run one more time minus the # columns set to the minimum or maximum values. # - During the testing of the colSize feature when setting column to small sizes, it was # found the "extended" label algorithm does not behave well when the number of labels is set # less than 2. Also, zero labels were being lost. The general goal of the labeling algorithm # was changed to at a minimum request three labels, even on small columns. The number of labels # per inch was increased from 4 to 5. The algorthim was also modified to handle staggering of # labels when only one label is present. The routine now also gets an odd number of labels # when less or equal 7 labels are wanted. If the column is near the minimum width, any labels # outside the range of the data are stripped, except zero. If the column is over 1", # and > 7 labels, the range is increased to include the label values. # These are signicant changes and will be tested and monitored over the next couple of weeks and # tuned as needed. # - To help stablize the axis labeling, the extended and wilkinson algorithms will be compared. # - Update VisBorders structures and name table to add regional Vis Border support. Also updated all # border groups to new variable names and to support regions features. # - Renamed "regions" feature to "dataRegionsOnly" feature. # - Added "regionB" options to control overlaying region boundaries when "dataRegionsOnly" not active. # - Fixed mapping with region boundaries to do overlays in the correct order. # - Fixed correction of washington DC and all of its forms to a pure character # string with no punctuation. "DC" instead of "D.C." or "DISTRICT OF COLUMBIA". # - Added code to do the washington dc comparisons in all upper case only. # December 7, 2016 (releasing to CRAN) # - Added envir=environment() to all load and data functions # - Hide assign to .GlobalEnv in eval-parse # - Save and restore Sys.getlocale() to compensate for other # country settings that can interfer with the operation of the # package. # # # Used packages: RColorBrewer, stringr, R.rsp, labeling, # # Used internal packages: utils, graphics, R.utils, # # ######## ######## # # Copyrighted 2013, 2014, 2015, 2016 - by: Dan Carr, GMU and Linda Pickle and Jim Pearson of StatNet Consulting, LLC. # ######## ######## # # functions used from RColorBrewer: brewer.pal # # functions used from graphics: plot, lines, arrows, polygon, axis, text, mtext, boxplot, # points, legend, plot.new, plot.default, plot.design, plot.function, # plot.xy, plot.windows, abline, axTicks, barplot, matplot, # matpoints, title # # functions used from stats: qnorm # # functions used from grDevices: rgb, col2rgb # # functions used from stringr: str_trim, str_split, str_replace_all, str_sub # # functions used from labeling: extended, wilkinson, # ######## # # With the generalization of micromapST to cover other geographic area beyond the US, micromapST will # still be called micromapST. A separate function call has been added to help migrate # uses of the test/prototype SEER version "micromapSEER". The default border group will be # "USStatesBG" to support existing users of micromapST. # # Initial Variables that require setting before running this file: # # The current r directory <- location of the three micromapST source files # micromapST.r # panelFunctions.r # micromapDefSets.r # # The current data directory <- location of the supporting border Group datasets and test datasets # USStatesBG.rda # USSeerBG.rda # KansasBG.rda # NewYorkBG,rda # MarylandBG.rda # ChinaBG.rda # UtahBG.rda # UKIrelandBG.rda # SeoulSKoreaBG.rda # AfricaBG.rda # # Future plans are to do the county map for all U. S. States containing Seer Registries, # include a function to validate a user provided Border Group, and to provide functions or # guideance on how to charaterize a collection of boundaries. # # The following datasets must be included in the package to provide the boundaries: # # Each border group contains five R objects. These objects provide the unique # data for the border group's geographic areas, names, abbreviations, and numerical ID. # # The areaParms object provides defaults for several run parameters that tune micromapST # execution. The list of variables are: # bordGrp = a character vector - name of the border group. Must be the same # as the dataset filename minus the ".rda" extension. # Map.Hdr1 = a character vector - title header for the Map and ID glyphs. # This is set to the general name of the geographic area, e.g., "U. S." # or "Kansas". # Map.Hdr2 = a character vector - title header for the Map and ID glyphs. # This identifies the areas used in the linked micromap, e.g., "States" # or "Counties" # Map.L2Borders = a logical variable - if the L2VisBorders need to be overlaid on the # maps, this variable must be set to TRUE. So far, only the U. S. 18 # Seer Areas have required this feature. Most other border groups # will have this set to FALSE. (Old variable name = mapL2Borders) # Map.Aspect = a numerical value. The micromapST package does not know what the # correct aspect ratio is for the map boundaries. Rather than guess, # Map.Aspect is set to the map's aspect ratio when the boundary data # is converted into the micromapST boundary data format. The value # is used to control the width of the map glyph column to ensure # the map is properly presented. Only values between 0.5 and 2.0 are # allowed. This aspect is y/x (height / width) # # Map.MinH = Minimum height for the row if maps are included - units = inches. # Default is 0.5 inches. # Map.MaxH = Maximum height for the row if maps included - units - inches. # Default value is 1 inch. # # Id,Hdr1 = First line of ID glyph column title # Id.Hdr2 = Second line of ID glyph column title. # # areaUSData = a logical variable - if set to TRUE, the package assumes the geographic # areas and boundaries are the USStatesBG or USSeerBG datasets and will # overlay the first map in the column with labels for "AK", "HI", and "DC". # This variable should be set to FALSE for all other border groups. # # enableAlias = Some data may not contain the names or abbreviations contained in # the border group dataset. In the case of the U. S. Seer data, the # Seer Stat output has the area names hidden in the "Registry" label. # The alias feature provides a means of doing a partial match or # "contains" to link the data labels to the geographic objects. # This variable should be TRUE only for the USSeerBG border group. # In all other cases, it should be FALSE. # aP_Proj = proj4 string describing the project used on the boundary data. # aP_Units = x and y coordinates units of boundary data (lat-long, meters, kilometers) # aP_Regions = a logical variable - diaables or enables the regional area mapping feature. # If TRUE, the areasNamesAbbrsIDs data.frame must contain the information # to group sub-areas by regions. Indicates dataRegionsOnly can be used. # Map.RegBorders = Mostly an internal variable - indicated the RegVisBorders bounaries # should be drawn (TRUE). Works with the "regionsB" call option to # control regional area boundary overlay. # Map.L3Border - a logical variable - mostly for internal use - To indicate if # the L3 borders should be drawn. # # All variable names in the areaParms data.frame must be unique within the micromapST package. # # The areaNamesAbbrsIDs R object is a table of the full names, abbreviations, alias strings, and # numeric ID for each geographical area in the boundary dataset. The abbreviation is used as # the internal link between the data and the boundary of the area. The table provides a means # of allowing the user to use the area's full name, abbreviation, or the IDs as the area's label # in the data provided micromapST in the statsDFrame parameter. The full names, abbreviations, # and IDs must match entries in this table or the user is notified and data ignored. # See the documentation on the areaNamesAbbrsIDs for the data structure of this object and # the documentation on each border group for the values for that specific border group. # # The areaVisBorders R object contain sets of boundary data points for each area listed in the # areaNamesAbbrsIDs table. Since the space for the map is limited, these boundaries should be # very simplified or characterized to permit fast drawing and keep the size of the data to a # minimum. See the documentation on the areaVisBorders R object for more details on the structure # of this object. # # The L2VisBorders R object contains a set of boundary data points to outline a set of area # like U. S. states when the areaVisBorders represents subareas. This layer is overlayed # optionally and is only used in the USSeerBG border group, at the present time. # # The L3VisBorders R object contains the outline of the geographic area that contains the # the areaVisBorders' areas. This would be the outline of a country (U.S. or China) or a # state (Kansas, New York, Maryland). This provides a accent to the region's borders. # # Regional mapping feature allows a subset of an area (a collections of sub-areas) to # be mapped based on the data provided by the caller. Sub-areas in regions not # referenced in the statsDFrame are not mapped. When a subset is mapped, the L3VisBorders # and related L2VisBorders outlines are NOT drawn. The regional groupping is based # on the region field in the areaNamesAbbrsIDs table (regID). There are no boundaries # for regions. # # See the documentation on each object for its particular structure and usage. # # See the documentation on each border group for details. # ###### ###### # # Basic data structures to convey information and controls between main function and sub-functions. # # mmSys$sDFName - name of the statsDFrame data frame provided by caller. Not the data itself, # the name of the variable. # # mmSys$pDName - name of the panelDesc data frame provided by caller. Not the data itself, # the name of the variable. # # # ###### # # gC contains the fun information for each glyph column (gC). The index is 1 to "n" # general items for all glyphs. # # gC[j]$cIdx - integer index of the current glyph column (1 to "n") # # gC[j]$cTxt - text version of the integer index of the current glyph column (1 to "n") # # gC[j]$type - glyph type # # gC[j]$lab1 - character # gC[j]$lab2 - character # gC[j]$lab3 - character # gC[j]$lab4 - character # gC[j]$refText # gC[j]$refVal # gC[j]$col1Name - statsDFrame column name # gC[j]$col1Num - statsDFrame column number # # gC[j]$col2Name # gC[j]$col2Num # # gC[j]$col3Name # gC[j]$col3Num # gC[j]$panelData - data structure name for column in panelData. # gC[j]$... - glyph specific parameters and variables (panelDesc expanded.) # ##### ###### # # Intent: # This function suppresses the following notes generated by "R CMD check": # - "Note: no visible binding for global variable '.->ConfigString'" # - "Note: no visible binding for '<<-' assignment to 'ConfigString'" # Usage: # Add the following right in the beginning of the .r file (before the Reference # class is defined in the sourced .r file): # suppressBindingNotes(c(".->ConfigString","ConfigString")) # suppressBindingNotes <- function(variablesMentionedInNotes) { for(variable in variablesMentionedInNotes) { wstr <- paste0("assign(variable,NULL,envir=",".GlobalEnv)") eval(parse(text=wstr)) } } # # ###### ###### # # counter function definition in Global Environment to be accessible from all functions. # NewCounter <- function() { i <- 0 function() { i <<- i + 1 } } # # ###### ##### # # asc and chr # chr(x) returns character value for "x". # if x is a character, x is returned. # if x is numeric, it is converted to character value # chr <- function(x) { if (is.character(x)) { return(x) } else { if (is.numeric(x)) { as.character(rawToChar(as.raw(x))) } else { return("\025") } } } # # asc(x) returns the numerical value for the character "x" # # asc <- function(x) { wX <- x if (is.numeric(wX)) { # numeric - turn into character wX <- as.character(wX) } if (is.character(wX)) { if (nchar(wX) > 1) { wX <- substr(wX,1,1) } # get only one character strtoi(charToRaw(x),16L) # convert character to numericstrtoi(charToRaw(x),16L) } else { NA } } # # #### Global functions ###### # # Update --- If a variable is used but does not seem to be set, RCMD # generates an error. This compensates for the dynamic reference # gVarList <- c("lastLab2Space","lastLab3Space", "staggered") suppressBindingNotes(gVarList) # # Create key global variable before referenced - These variables are referencable by all subroutines and functions # in this package. # utils::globalVariables(c( # Call Parameters "sDFName", "pDName", "wSFName", "callVarList", # panel variables and parameters "numRows", "numGrps", "rowSep", "rowSepGap", "rowSize", "rowSizeMaj", "rowSizeMin", "rowSizeMx", "rowSizeMn", "colSepGap", "colSizeMax", "colSizeMin", "rcRatioMin", "rcRatioMax", "groupedRowSize", "groupedRowSep", "medGrp", "medGrpSize", "medRow", "medRowAbv", "medRowBlw", "ib", "ie", "sc", "pad", "padex", "padMinus", "topMar", "botMar", "botMarLegend", "botMardif", "borderSize", # System "detailsVariables", "varName", "mstColorNames", # Axis adjustments "mgpTop", "mgpBottom", "padjBottom", "mgpLeft", "leftMarAxis", "leftMar", "rightMar", # Axis Lab variables "staggered", "lastLab2Space", "lastLab3Space", # Call Parameters "ignoreNoMatch", "bordGrp", "bordDir", "grpPattern", # Counter functions "warnCnt", "stopCnt", # glyphs variables # General "Title.Line.1.pos", "Title.Line.2.pos", "Title.Line.2x.pos", "Title.Line.3.pos", "Title.Line.4.pos", "Title.Line.5.pos", "Title.cex", "Grid.Line.col", "Grid.Line.lwd", "Panel.Fill.col", "Panel.Outline.col", "Text.cex", "XAxis.L.mcex", "XAxis.M.mcex", "XAxis.S.mcex", "XAxis.Sp.mcex", "XAxis.offset", "XAxis.indent", "XAxis.nGridpIn", "XAxis.staggered", "XAxis.gapPC", "YAxis.cex", "YAxis.offset", "YAxis.nGridpIn", "YAxis.width", # Arrow "Arrow.Head.length", "Arrow.lwd", "Arrow.cex", "Arrow.Shadow.col", "Arrow.Shadow.lwd", "Arrow.Dot.pch", "Arrow.Dot.pch.size", "Arrow.Dot.pch.lwd", "Arrow.Dot.Outline", "Arrow.Dot.Outline.col","Arrow.Dot.Outline.lwd", # Bar "Bar.barht", "Bar.Outline.col", "Bar.Outline.lwd", "Bar.Outline.lty", # Boxplot "BoxP.thin", "BoxP.thick", "BoxP.Use.Black", "BoxP.Median.Line", "BoxP.Median.col", "BoxP.Median.Dot.col","BoxP.Median.Dot.pch","BoxP.Median.Dot.cex","BoxP.Median.Dot.lwd", "BoxP.Outline.col", "BoxP.Outlier.BW.col","BoxP.Outlier.lwd", "BoxP.Outlier.cex", # Center Stacked Bars "CBar.varht", "CBar.two.ended", "CBar.Zero.Line.col", "CBar.Zero.Line.lwd", "CBar.Zero.Line.lty", # Center, Segmented, and Normalized Stacked Bars "CSNBar.barht", "CSNBar.Outline.col", "CSNBar.Outline.lwd", "CSNBar.Outline.lty", "CSNBar.First.barht", "CSNBar.Last.barht", # Dot, Dotsignif, Dotconf, Dotse "Dot.pch", "Dot.pch.size", "Dot.pch.lwd", "Dot.Outline", "Dot.Outline.col", "Dot.Outline.lwd", "Dot.Conf.pch", "Dot.Conf.pch.size", "Dot.Conf.pch.lwd", "Dot.Conf.lwd", "Dot.Conf.Outline", "Dot.Conf.Outline.lwd","Dot.Conf.Outline.col", "Dot.SE", "Dot.SE.pch", "Dot.SE.pch.size", "Dot.SE.pch.lwd", "Dot.SE.lwd", "Dot.SE.Outline", "Dot.SE.Outline.lwd", "Dot.SE.Outline.col", # Dotsignif "Dot.Signif.pch", "Dot.Signif.pch.size","Dot.Signif.pch.col","Dot.Signif.pch.lwd", "Dot.Signif.Outline", "Dot.Signif.Outline.col","Dot.Signif.Outline.lwd", "Dot.Signif.pvalue", "Dot.Signif.range", # Dotconf, Dotse "Dot.conf.pch", "Dot.conf.pch.size", "Dot.conf", "Dot.conf.lwd", "Dot.conf.size", # Id "Id.Hdr1", "Id.Hdr2", "Id.Title.1.pos", "Id.Title.2.pos", "Id.Start", "Id.Space", "Id.Cex.mod", "Id.Text.cex", "Id.Text.adj", "Id.Dot.pch", "Id.Dot.lwd", "Id.Dot.cexm", "Id.Dot.width", "Id.Dot.Outline.col", "Id.Dot.Outline.lwd", # map, mapcum, mapmedian, maptail "Map.Min.width", # will become dynamic "Map.Max.width", # "Map.Aspect", # from areaParms "Map.L2Borders", "Map.RegBorders", "Map.L3Borders", "Map.MinH", "Map.MaxH", "Map.Lab.Box.Width", "Map.Median.text", "Map.Bg.col", "Map.Bg.Line.col", "Map.Bg.Line.lwd", "Map.Fg.Line.col", "Map.Fg.Line.lwd", "Map.L2.Fill.col", "Map.L2.Line.col", "Map.L2.Line.lwd", "Map.L3.Fill.col", "Map.L3.Line.col", "Map.L3.Line.lwd", "Map.Area.Spec.cex", # rank "Rank.width", # Support - refVal, refText "Ref.Val.col", "Ref.Val.BW.col", "Ref.Val.lwd", "Ref.Val.lty", "Ref.Text.col", "Ref.Text.BW.col", "Ref.Text.cex", # ScatDot "SCD.Bg.pch", "SCD.Bg.pch.size", "SCD.Bg.pch.fill", "SCD.Bg.pch.col", "SCD.Bg.pch.lwd", "SCD.Fg.pch", "SCD.Fg.pch.size", "SCD.Fg.pch.col", "SCD.Fg.pch.lwd", "SCD.Median.pch", "SCD.Median.pch.size","SCD.Median.pch.fill", "SCD.Median.pch.col", "SCD.Median.pch.lwd", "SCD.Axis.cex", "SCD.xsc", "SCD.ysc", "SCD.hGrid", "SCD.DiagLine", "SCD.DiagLine.col", "SCD.DiagLine.lwd", "SCD.DiagLine.lty", # Normalized and Segmented stacked bar "SNBar.varht", "SNBar.two.ended", "SNBar.Middle.Dot", "SNBar.MDot.pch", "SNBar.MDot.pch.fill","SNBar.MDot.pch.lwd", "SNBar.MDot.pch.size", "SNBar.MDot.pch.border.col", "SNBar.MDot.pch.border.lwd", # TS and TSConf "TS.lwd", "TS.Axis.cex", "TS.hGrid", # debug "MST.Debug"), "micromapST", add=TRUE) # # Would rather have these variable in the local "micromapST" environment. # ###### ###### # # GlobalEnv Level Functions / micromapST Namespace Functions # accessible by everyone, but can't access variables within caller's space. # # groupPanelOutline # groupPanelOutline = function (panelGroup, j ) ## used in micromapST function - assumes 3 rows in the panels.. { iE <- panelGroup$dim[1] for (i in 1:iE){ panelSelect(panelGroup,i,j) # select a space x <- panelScale() # scale it panelOutline() # outline it. } } #### # # Clean up strings - remove # 1) special single and double quotes (open and closed) # 2) tick mark # 3) general punctuation (periods, etc.) # Designed to allow strings that may have different times of quotes, apos. to be compared. # ClnStr <- function(x) { z <- gsub("[[:punct:]\u2018-\u201F]", "", x, perl=TRUE) z <- str_trim(z) return(z) } # #### #### # # Find shortest format for Axis labels # # Test the following formats on the Axis Labels and determine # the narrowest format. # The formats checked are: # fixed format (up to 1 decimal place) # general format (including scientific notation) # fixed with KMB modification # fixed with "in thousands" type label # FindShorest <- function(x, w) { # x is a vector of numbers # w is the width of the target column (inches) # n <- as.integer(w / 4) # number of labels required xr <- range(x) # get range of the values if (!odd(n)) n = n + 1 xW <- wilkinson(xr[1],xr[2], n, mrange=c(n/2,n)) xE <- extended( xr[1],xr[2], n, w = c(0.25, 0.2, 0.5, 0.05)) # simp, cover, densi, legible # Function is incomplete... } # #### #### # # is.Color takes a hex string, the name of a color (from grDevices::colors()), or palette number # and validates it as a color variable. TRUE - is a valid color, FALSE - not a color. # # Inputs: values can by any color names that matches the grDevices::colors() name list, # a 6 or 8 character hex string starting with a "#" character, or # the palette color number (1 to 8) as integer or character. # # Examples: "white", "red", "lightgreen", "#232323", "#234Ad3", or "#FFDDCC80" # 1, or "1" # # On hex strings, the alpha value is optional (last 2 hex digits) # # is.Color <- function(x) { # handle a vector of colors vapply(x, is.Color2, logical(1)) } # #### #### # # Color string to hex string conversion (handles vectors of values) # col2hex <- function(cname) { res <- try(colMat <- col2rgb(cname), silent=TRUE) if (class(res)!="try-error") { rgb(red=colMat[1,]/255, green=colMat[2,]/255, blue=colMat[3,]/255) } else { res } } # #### #### # # single value test function for colors. # # The test is done against the standard color list and the micromapST color list. # The value can be a color name or a color pallet value. # is.Color2 <- function(x) { ErrFnd <- FALSE # check one color "x" if (is.numeric(x)) { # numeric color value - if so its a relative color number within the pallet. if (x < 0) { # can not be a negative value.. ErrFnd <- TRUE warnCnt() xmsg <- paste0("***0910 is.color2 The color value must be a positive number. Value seen:",x,"\n") stop(xmsg,call.=FALSE) } else { # if value is numeric, convert to integer character string. x <- as.character(x) } } if (!ErrFnd) { # convert factor to character if (is.factor(x)) x <- as.character(x) if (is.character(x)) { # character string, check for palette number or color name. if (!is.na(match(x,c(as.character(c(1:8)),grDevices::colors(),mstColorNames)))) { # test name and/or number TRUE # good color value. } else { # No match with character version of palette number or grDevices::colors(), # so try conversion from color to rgb, if it works, got a color - return TRUE # if it fails, it will return error - catch and return "FALSE" res <- try(col2rgb(x),silent=TRUE) # if class of res is not "try-error", return TRUE, # if class of res is 'try-error", then return FALSE (not a color) return(!"try-error"%in%class(res)) } } else { # not a integer or character FALSE # not a color } } } # #### #### # # function to test if "x" is between or equal to a and b. # is.between <- function(x,a,b) { # function checks x to make sure it's is between a and b # This version supports vectors. if (a>b) { (x >= b & x <= a) } else { (x >= a & x <= b) } } # #### #### # # functiion to test if "x" is within or equal to the range of "r". # "r" must be a vector of length 2 to be evaluated. # is.between.r <- function(x,r) { # the x must be within or equal to the range spacified in R # if (length(r) != 2) { warnCnt() xmsg <- "***0491 INB is.between.r The r range value is not a vector with length of 2. FALSE returned." warning(xmsg, call.=FALSE) return(rep(FALSE,length(x))) # not valid range } else { return(is.between(x,r[1],r[2])) } } # #### #### # # Testing function - print out key par() plot parameters # printPar <- function() { cFin <- par("fin") # get parameters for current panel. cat("cFin:",cFin," (w,h)\n") cFig <- par("fig") # get parameters for current panel. cat("cFig:",cFig," (x,x,y,y)\n") cPin <- par("pin") cat("cPin:",cPin," (w,h)\n") cPlt <- par("plt") cat("cPlt:",cPlt," (x,x,y,y)\n") cMai <- par("mai") cat("cMai:",cMai," (b,l,t,r)\n") cMar <- par("mar") cat("cMar:",cMar," (b,l,t,r)\n") cUsr <- par("usr") cat("cUsr:",cUsr," (x,x,y,y)\n") cPs <- par("ps") cat("cPs :",cPs," pt.\n") } # #### #### # # odd - check if number is odd (TRUE) or even (FALSE) # odd <- function(x) { x%%2 == 1 } # #### #### # # CleanString - clean up character string - remove extra spaces, all punctuation, control characters and # make all caps. # CleanString <- function(wstr) { nstr <- toupper(str_trim(str_replace_all(wstr,"[[:space:][:cntrl:][:punct:]]+"," "))) return(nstr) } # #### #### # # Scaler1 - find scale for range and appropriate axis sub-title # # Find the size of the maximum value. # Select scaling label, and division factor to use on data. # Scaler1 <- function(var) { # xAxis is the number for the Axis labels var1 <- as.numeric(var) if (var1 < 0) { var1 <- abs(var1) } vc <- c(1,"") if (var1 > 1) { # value > 1 --- OK to do log10 to get index. varLog <- as.integer(log10(var1)) vc <- switch(varLog, c(1,""), # 0 - < 10 c(1,""), # 1 - < 100 c(1,""), # 2 - < 1000 c(100,"in hundreds"), # 3 - < 10,000 c(1000,"in thousands"), # 4 - < 100,000 c(10000,"in ten thousands"), # 5 - < 1,000,000 c(100000,"in hundred thousands"), # 6 - < 10,000,000 c(1000000,"in millions"), # 7 - < 100,000,000 c(10000000,"in ten millions"), # 8 - < 1,000,000,000 c(100000000,"in hundred millions"),# 9 - < 10,000,000,000 c(1000000000,"in billions"), #10 - < 100,000,000,000 c(10000000000,"in ten billions"), #11 - < 1,000,000,000,000 c(100000000000,"in hundred billions"),#12 - < 10,000.000,000,000 c(1000000000000,"in trillions"), #13 - < 100,000,000,000,000 c(1,"") ) } else { # value < 1 and > 0, do it differently. repeat { vc <- c(1,"") if (var1 >= 0.1) { # 0.999999 => to >= 0.1 -> 9.99999 -> 1.0 vc <- c(0.1,"in the tenth") break } if (var1 >= 0.01) { # 0.0999999 => to >= 0.01 -> 9.99999 -> 1.0 vc <- c(0.01,"in the hundredth") break } if (var1 >= 0.001) { # 0.00999999 => to >= 0.001 -> 9.99999 -> 1.0 vc <- c(0.001,"in the thousandth") break } if (var1 >= 0.0001) { # 0.0009999999 => to >= 0.0001 -> 9.99999 -> 1.0 vc <- c(0.0001,"in the ten thousandth") break } if (var1 >= 0.00001) { # 0.0000999999 => to >= 0.00001 -> 9.99999 -> 1.0 vc <- c(0.00001,"in the hundred thousandth") break } if (var1 >= 0.000001) { # 0.00000999999 => to >= 0.000001 -> 9.99999 -> 1.0 vc <- c(0.000001,"in the millionth") break } if (var1 >= 0.0000001) { # 0.000000999999 => to >= 0.0000001 -> 9.99999 -> 1.0 vc <- c(0.0000001,"in the ten millionth") break } if (var1 >= 0.00000001) { # 0.000000999999 => to >= 0.0000001 -> 9.99999 -> 1.0 vc <- c(0.00000001,"in the hundred millionth") break } if (var1 >= 0.000000001) { # 0.0000000999999 => to >= 0.000000001 -> 9.99999 -> 1.0 vc <- c(0.000000001,"in the billionth") break } if (var1 >= 0.0000000001) { # 0.00000000999999 => to >= 0.0000000001 -> 9.99999 -> 1.0 vc <- c(0.0000000001,"in the ten billionth") break } if (var1 >= 0.00000000001) { # 0.000000000999999 => to >= 0.00000000001 -> 9.99999 -> 1.0 vc <- c(0.00000000001,"in the hundred billionth") break } if (var1 >= 0.000000000001) { # 0.0000000000999999 => to >= 0.000000000001 -> 9.99999 -> 1.0 vc <- c(0.000000000001,"in the trillionth") break } if (var1 >= 0.0000000000001) { # 0.0000000000999999 => to >= 0.0000000000001 -> 9.99999 -> 1.0 vc <- c(0.0000000000001,"in the ten trillionth") break } if (var1 >= 0.00000000000001) { # 0.00000000000999999 => to >= 0.00000000000001 -> 9.99999 -> 1.0 vc <- c(0.00000000000001,"in the hundred trillionth") break } if (var1 >= 0.000000000000001) { # 0.000000000000999999 => to >= 0.000000000000001 -> 9.99999 -> 1.0 vc <- c(0.000000000000001,"in the quadrillionth") break } if (var1 >= 0.0000000000000001) { # 0.0000000000000999999 => to >= 0.0000000000000001 -> 9.99999 -> 1.0 vc <- c(0.0000000000000001,"in the ten quadrillionth") break } if (var1 >= 0.00000000000000001) { # 0.00000000000000999999 => to >= 0.00000000000000001 -> 9.99999 -> 1.0 vc <- c(0.00000000000000001,"in the hundred quadrillionth") break } } } # vc <- c(divisor, <axis sub-title string>) #cat("returning vc:",vc,"\n") return(vc) # return divisor [1] and subtitle string [2] # need to add code to handle width range of number, getting duplicates at low end. } # #### #### # # Alt_Scaler # # Find the scale of the number (not list of numbers) # Find divisor and apply # Changes number to string. # Apply scale character to end of string # # Need to add logic to convert labels back to numbers and return both. # # var is a vector of numeric values for the Axis labels. # lower is a logical flag. If FALSE, the resulting strings are returned as is. # If TRUE, the resulting strings are converted to lower case. # Scaler2 <- function(var,lower=FALSE) { var1 <- as.numeric(var) minusFlag <- "" if (var1 < 0) { # save fact the number was minus minusFlag = "-" var1 <- abs(var1) } vc <- c(1,"") var2 <- var1 if (var1 != 0) { # number zero, quick exit varLog <- as.integer(log10(var1)) #cat("varLog:",varLog,"\n") if (varLog != 0) { if (varLog > 0) { vc <- switch(varLog, # 0 - < 10 => [0.10000000001 to 10) c(1,""), # 1 - < 100 => [10 to 100) # hecto (hunderds) c(1,""), # 2 - < 1,000 => [100 to 1000) # kilo (thousands) c(1000,"K"), # 3 - < 10,000 => [1,000 to 10,000) c(1000,"K"), # 4 - < 100,000 => [10,000 to 100,000) c(1000,"K"), # 5 - < 1,000,000 => [100,000 to 1,000K) # mega (million) c(1000000,"M"), # 6 - < 10,000,000 => [1,000K to 10,000K) c(1000000,"M"), # 7 - < 100,000,000 => [10,000K to 100,000K) c(1000000,"M"), # 8 - < 1,000,000,000 => [100,000K to 1,000M) # giga (billion) c(1000000000,"B"), # 9 - < 10,000,000,000 => [1,000M to 10,000M) c(1000000000,"B"), # 10 - < 100,000,000,000 => [10,000M to 100,000M) c(1000000000,"B"), # 11 - < 1,000,000,000,000 => [100,000M to 1,000B) # tera (trillion) c(1000000000000,"T"), # 12 - < 10,000,000,000,000 => [1,000B to 10,000B) c(1000000000000,"T"), # 13 - < 100,000.000,000,000 => [10,000B to 100,000B) c(1000000000000,"T"), # 14 - < 1,000,000,000,000,000 => [100,000B to 1,000T) c(1,"") ) var2 <- var1/as.numeric(vc[1]) } else { # negative log values are small numbers, so invert to 1 to N varLog <- (-varLog) # (-1 => 1) repeat { vc <- c(1,"") if (var1 >= 0.1) { # 0.999999 => to >= 0.1 -> 9.99999 -> 1.0 vc <- c(10,"d") # deci break } if (var1 >= 0.01) { # 0.0999999 => to >= 0.01 -> 9.99999 -> 1.0 vc <- c(100,"c") # centi break } if (var1 >= 0.001) { # 0.00999999 => to >= 0.001 -> 9.99999 -> 1.0 vc <- c(1000,"m") # milli break } if (var1 >= 0.000001) { # 0.000999999 => to >= 0.000001 -> 999.999 -> 1.0 vc <- c(1000000,"u") # micro break } if (var1 >= 0.000000001) { # 0.000000999999 => to >= 0.000000001 -> 999.999 -> 1.0 vc <- c(1000000000,"n") # nano break } if (var1 >= 0.000000000001) { # 0.000000000999999 => to >= 0.000000000001 -> 999.999 -> 1.0 vc <- c(1000000000000,"p") # pico break } if (var1 >= 0.000000000000001) { # 0.000000000000999999 => to >= 0.000000000000001 -> 999.999 -> 1.0 vc <- c(1000000000000000,"f") # femto break } } var2 <- var1*as.numeric(vc[1]) } } } #cat("minus:",minusFlag," vc:",vc,"\n") cvx <- paste0(minusFlag, str_trim(formatC(var2,format="fg",width=5,digits=4,drop0trailing=TRUE)),vc[2]) if (lower) { cvx <- tolower(cvx) } return(cvx) # Need to check to see what happens if we have lowe end numbers that may be duplicated. } # #### #### # # simpleCap - capitalize each word in a phrase and removes "."s, "_"s, and extra blanks. # Not good on vectors - must apply # simpleCap <- function (x) { s <- strsplit(x,"[ ._]")[[1]] # split on boundaries " ", "." or "_". s1 <- s[s != ""] # skip empty strings paste0(toupper(substring(s1,1,1)),tolower(substring(s1,2)),collapse=" ") } # # Alternative: # gsub("(^|[[:space:]])([[:alpha:]])", "\\1\\U\\2", name, perl=TRUE) # #### ##### # # plotPoint - takes a give x,y any type of point (0:18, 19:25, > 32 or character) # and correctly plots it at x,y. Other parameters are required incase of outlines. # # plotPoint <- function(ppX, ppY, ppPch, ppColor, ppSize, ppLwd, ppOutline, ppOutline.col, ppOutline.lwd) { # # Call parameters: pchValue, x, y, pch.size, outline.lwd, outline.col, mstColor, # pchValue <- ppPch suppressWarnings(chrValue <- as.numeric(pchValue)) if (is.na(chrValue)) { # the pch value is not a numeric - check for character if (is.character(pchValue)) { # character type value. Get first character. assume > 31 pchValue <- str_sub(str_trim(pchValue),1,1) points(ppX, ppY, pch=pchValue, cex=ppSize, col=ppColor ) #cat("points of character:",pchValue) } else { # set to default since we can't decode it. Set to numeric value. chrValue <- 21 #cat("not a character-typeof:",typeof(pchValue)," setting chrValue to 21.","\n") } } if (!is.na(chrValue)) { #cat("numeric - typeof:",typeof(pchValue), " ", typeof(chrValue)," ",typeof(chrValue)," ",pchValue," ",chrValue,"\n") # have a numeric value (still), got conversion - 0:255 range. # if it's NA, it's character and has been plotted. if (chrValue > 31) { #cat("chrValue > 31 - normal points\n") # normal symbols (numeric) (no border) # > 31 characters points(ppX,ppY,pch=chrValue, cex=ppSize, col=ppColor ) } else { # <= 31 if (chrValue > 25) { # 26:31 -> not used character use default chrValue <- 21 #cat("char 26:31 not used -> use default 21\n") } if (chrValue > 18) { # 19:25 value characters. # Dot.Conf.Outline set by user or by BW/Greya/Grays color scheme if (ppOutline) { # 19:25 with outline around symbol #cat("19:25 -> filled with borders symbols - outline ON \n") points(ppX, ppY, pch=chrValue, cex=ppSize, lwd=ppOutline.lwd, col=ppOutline.col, bg=ppColor ) } else { # 19:25 with no outline (border) #cat("19:25 -> filled with borders symbols - outline OFF \n") points(ppX, ppY, pch=chrValue, cex=ppSize, col=NA, bg=ppColor ) } } else { # 0:18 symbols - line drawings #cat("0:18 symbols - standard print.\n") points(ppX, ppY, pch=chrValue, cex = ppSize, lwd = ppLwd, col = ppColor ) } } } } # # end of point ploter. # ##### #### # # micromapSEER - to support previous users of micromapSEER NCI package. # micromapSEER <- function(statsFrame,panelDesc,...) { micromapST(statsFrame,panelDesc,..., bordGrp="USSeerBG", bordDir=NULL) } # #### #### # # Get micromapST Version # micromapST.Version <- function() { return ("micromapST V1.1.1 built 2016-12-07 11:02am") } # #### #### # # micromapST # # Using the technique of setting parameters to NULL. Later during verification, if # NULL, set to the default. If not NULL, then verify the parameters value. # # micromapST = function( statsDFrame, panelDesc, rowNamesCol = NULL, # Name of name link column. rowNames = NULL, # default = "ab" ### modify to SEER IDs sortVar = NULL, # default = sort on plotNames values ascend = TRUE, # default = ascending sorting order title = c("",""), # default = empty plotNames = NULL, # default = "full" ### modify to SEER Abv and Names axisScale = NULL, # axis Scale Method, default = "e" -> extended staggerLab = NULL, # stagger Axis Labels, default = FALSE bordGrp = NULL, # border and names group to use with micromapST, Def = "USStatesBG" bordDir = NULL, # data directory containing the bordGrp .RDa file to use. # If null or NA, a DATA statement is used to load the # bordGrp from the included package datasets. dataRegionsOnly = NULL, # when regions are defined, permit package to map only regions containing data. Default=FALSE, regionsB = NULL, # when regional boundaries are present, map regional overlays. Default = FALSE. grpPattern = NULL, # Override areas per panel/group pattern ignoreNoMatches = FALSE, # How to handle statsDFrames that don't match. colors = NULL, # Override colors structure details = NULL ) # Override details parameters. { # # Routine: micromapST (and micromapSEER) # # Created by: Dr. Dan Carr # Updated and Extended by: Jim Pearson, April 20, 2009 # Updated and Extended by: Jim Pearson, August 28, 2012 # Updated and Extended by: Jim Pearson, May and June, 2013 # Updated and Extended by: Jim Pearson, Nov, 2013 # Updated and Extended by Jim Pearson, Jan, 2014 # Updated and Extended by: Jim Pearson, March, 2014 # Updated and Extended by: Jim Pearson, October-November, 2014 # Updated impacted every function and feature of the package to generalize the panel layouts. # Updated and Extended by: Jim Pearson, December 2014 and January 2015 # Updated and Extended by: Jim Pearson, March 2015, generalized the package for other geospatial areas. # and refined the scaling and sizing of the rows and columns. # Updated and Extended by: Jim Pearson, September, 2015 and February, 2016 # Updated and Extended by: Jim Pearson, November, 2016 # # Packaged by: Jim Pearson # # Dependencies: micromapGSetDefaults # $colors # $details # micromapGSetPanelDef # panelFunctions.r # # Included bordGrp DataSets: # USStatesBG - equivalent to original micromapST setup # USSeerBG - new setup for borders and behavior for US Seer Areas. # KansasBG - new setup for borders and behavior for Kansas County Areas. # NewYorkBG - new setup for borders and behavior for New York County Areas. # MarylandBG - new setup for borders and behavior for Maryland County Areas. # ChinaBG - new setup for borders and behavior for China. # UKIrelandBG - new setup for borders and behavior for UK-Ireland area # UtahBG - new setup for borders and behavior for Utah County Areas # SeoulSKoreaBG - net setup for borders and behavior for the districts in the city of Seoul South Korea. # AfricaBG - net setup for borders and behavior for the countries of Africa. # # Each contain the following DataFrames, Lists and Vectors:: # Run Parameters: areaParms # Data Level Names, Abbrs. IDs, and Labels: areaNamesAbbrIDs (Old stateNamesFips) # Data Level Boundaries: areaVisBorders (Old stateVisBorders) # L3 (national) Level Boundaries L3VisBorders (Old stateNationVisBorders) # # L2 (state) Level Boundaries (Optional) L2VisBorders (Old stateNationVisBorders) # # Currently the L2 Boundaries are only used with the "USSeerBG" border group at this time. # # If L2 Boundaries are not included in the bordGrp, the L3 Boundaries are copied into # the L2 boundaries as a place holder. # # Source Files: panelFunctions.r, micromapDefSets.r # ##### ##### # # # Call Parameters: # # Defaults List for call simulation # statsDFrame <- data # panelDesc <- panel description data.frame or panel description list of lists. # rowNames <- "ab" # global # sortVar <- NULL # global # ascend <- TRUE # global # title <- c("titles") # global # plotNames <- "full" # global and glyph # axisScale <- "e" # new extended method - global and glyph # staggerLab <- FALSE # global and glyph # colors <- NULL # global # details <- NULL # global and glyph # bordGrp <- "USStatesBG" # global # bordDir <- NULL # global # ignoreNoMatches <- FALSE # global # grpPattern <- NULL # global - default = calculated row / panel pattern # regionsB <- NULL # global - default = FALSE # dataRegionsOnly <- NULL # global - default = FALSE # # colors and details are used to override/modify the basic default structure for the colors # and the operational details information. # # ##### # # statsDFrame data.frame of area ID and data for micromaps. # # rownames must be area abbreviations, names, or fips codes # # Provides the data for the dot, dotConf, dotSE, dotSignif, arrows, bars, # segbar, ctrbar, and normbar glyph panels. # # Not used for boxplots or time series column panels. Pointers to their # data is provided in the panelDesc data.frame. # # The statsDFrame must have the area's abbr, name or ID code (like fips code) as # the rownames of the data.frame. As an alternate a column can contain the # area's identifier and the "rowNameCol" parameter can be used to point to # the column. Once the column is verified, it is assigned to the rownames # of the statsDFrame. # # The data.frame must be at least 2 columns for some of the functions # in R. To compensate for possible 1 column data.frames, a column of zero # is appended to the right side of the data.frame to ensure there is always # 2 columns. (Work-a-round) # # An example of the problem: # When the structure is ordered xxx[ord,] and then assigned to the working # variable "dat", the dimensions are preserved. # If the data.frame has only one column, the ordering and assigned, # strips the rownames and leaves the dim(dat) = NULL. # # The numerical data in the statsDFrame data frame may be in a numerical vector # or a character vector. If the data is found to be a factor, it is converted to # a character vector. If the data is a character vector, then the format of the # numbers is validated. The acceptable numerical value formats are: # # 1, 1.1, 0.1, .1, +1, +1.1, -0.1, -.1, -13434.3 -1234, # 1.1e+01, 1e+01, 0.1e+01, 1e-1, 1.12355e-01, +1.23e+99, # 1,000; -1,343; +1,234; 23,235.00; -23,234.00001 # # Errors will be flagged if there is more than 3 digits between commas and commas or # decimal point, the exponent value is greater than 2 digits, a space is found # between any parts of the number, etc. # # The name of the user provided statsDFrame data frame is stored in # callVarList$statsDFrame variable for later reference. # ###### # # panelDesc data.frame # data frame for panel descriptions/definitions # Example # panelDesc = data.frame( # type=c('mapcum','id','dotconf','dotconf'), # manditory column # lab1=c('','','White Males','White Females'), # recommended # lab2=c('','','Rate and 95% CI','Rate and 95% CI'), # optional # lab3=c('','','Deaths per 100,000','Deaths per 100,000'), # optional # lab4=c('','','',''), # col1=c(NA,NA,2,9), # dependent on "type" # col2=c(NA,NA,4,11), # dependent on "type" # col3=c(NA,NA,5,12), # dependent on "type" # colSize=c(NA,NA,1,1), # rmin=c(NA,NA,NA,1), # rmax=c(NA,NA,NA,5), # refVals=c(NA,NA,NA,wflungbUS[,1]), # optional # refTexts=c(NA,NA,NA,'US Rate'), # optional # panelData=c('','','',''), # required if boxplot or time series used. # adv=list('',list(a=v),'','') # advanced parameters # ) # # The first description row describes the first column of panels # an so on. This is a candidate for change since each column # describing a column avoids a mental transposition. # # The name of the user provided panelDesc data frame (or list) is stored in # callVarList$panelDesc variable for later reference. # # The alternate form of the panelDesc variable is a list of list. # panelDesc is a list. Each glyph column in the linked micromap is represented # by a list in this list. The glyph column list contains all of the # panelDesc variable related and valid for the glyph indicated in the type= variable # in this list. A example is provide at the end of the discussion on the panelDesc # variabls below. # # The type parameter must be present for each panel column. The other parameters are optionals. # However, if a parameter is required for any column, it is present for all columns. # If not used by a column, the parameter's value for that column should be set to "NA". # # type refers the graphic panel type to be used. The valid types are # "map", "mapcum","maptail","mapmedian", for maps # "id", for area ids # "dot", "dotse","dotconf", "dotsignif" for dot plots # "arrow", for arrow plots # "bar", for simple bar plots # "ts", "tsconf", for time series plots # "scatdot", for scatter dot plots # "normbar","segbar","ctrbar", for stacked bar plots # "boxplot", for box plot # "rank" for ranking (not fully implemented) # # For non-highlighted contours: # map accumulates areas top to bottom # maptail accumulates areas outside in # mapMedian feature above median area above the median and vis versa # # bar will accept negative values and plot from 0 in that direction. # # col1, col2, col3 # These values idenfity the column numbers oe names in statsDFrame to be # used as data for most of the panel glyph types. They are used by: # "dot", "bar", "dotse", "dotsignif", "dotconf", "scatdot", # "segbar", "ctrbar", "normbar" # # Panel types using only one column parameter (one data item) are: # # dot: col1 = dot value (estimate) # bar: col1 = bar height from zero (0) # # Panel types using two column parameters (two data items) are: # # dotse, dotsignif, arrow, and scatdot glyphs. # # dotse: col1 = dot value (estimate), col2 = standard error value # dotsignif: col1 = dot value (estimate), col2 = P Value for dot value # arrow: col1 = starting value, col2 = ending value for arrow. The arrow head # is on the ending value. # scatdot: col1 = "x" value of dot, col2 = "y" value of dot. # # Panel types using two column parameters to specify a range of data columns are: # # segbar, ctrbar, normbar: col1 = first data column in statsDFrame, # col2 = last data column in statsDFrame. The data from # columns col1 to col2 are used as the length (values) for each # stacked bar segment. The number of data columns must be between # 3 to 9. # # Panel type dotconf using three column parameters: (col1, col2, col3): # # dotconf: col1 = dot value (estimate), col2 = lower bound and col3 = upper bound # # Panel following types do not requiring any column parameters: # # boxplots uses the "panelData" vector in panelDesc to provide the name of a saved # boxplot structure. The boxplot structure is created by saving the # results of aboxplot(...,plot=F) call. # # ts and tsconf use the "panelData" vector in the panelDesc to obtain the name of # a matrix the data for the time series. The name represents a array(51,"x",4). # The first dimension represents the states (51) for the US # or the number of areas in the border data. The number of entries must # match the number of entries in the statsDFrame. The second dimension # represents the number of samples in the time series. The third dimension # are the "x", "low.y", "y", and "high.y" values for each sample. # For ts glyphs, the "low.y" and "high.y" values are ignored, but required. # colSize # Specifies the proportional size of a glyph column in relation to the other glyph columns. # This is a numeric vector with one element for each glyph column. The sum of the vector # is used as the denominator to calculate the percentage of available width is to be allocated # to the column. For example: colSize = c(NA, NA, 10, 10, 5, 15). The first two columns are # map and id glyphs and are not involved in this feature. The remaining 4 columns have a total # value of 40. The percentage for each column is 25%, 25%, 12.5% and 37.5% = 100%. If 4" of # space is available, then the width of each column will be 1", 1", 0.5", and 1.5". # # # rmin, rmax # Specify the min and/or max values for the X axis range for the any of the graphic # glyphs. If no value is specified, the package will use the range of the # data provided. NA must be used when a value is not being specified. # The user provide range is checked against the range of the data to make sure # all of the data is contained in the range. rmin must be less than rmax. # (in planning stages) # # lab1, lab2 # Two label lines at the top of columns. Use "" for blank, not NA or MULL. # # lab3 # One label line at the bottom of a each column, typically measurement units. # Supported under the "map" and "id" columns for use as a sub-title. # # lab4 # One label line for used with the Y axis on each panel. Only used with time series and ScatDot panels. # # refVals # P-2010/07/23 changed variable from refvals to refVals # # to be consistant. # name of objects providing a reference values shown # as a line down the column # # refTexts # JP-2010/07/23 - New # texts to be used as the legend for the reference values. # If refTexts for column is NA or "", then no legend is added. # # colSize # 8/8/16 - implemented to provide proportional column size control. # A vector of numeric values used to set a proportional column size within the # space provided by the user. The sum of all of colSize values are used as the # demoninator to determine the percentage of the available space to allocate to the # column. The default value for each column is "1". If a column's value is NA, NULL, or <=0.1, # then the column is allocated the 1/"n" of the available space, where "n" is the number # of columns. The map and id columns are fixed width columns and are not effected by the # colSize calculations. # # example: micromapST has 6 columns: map, id, dot, bar, arrow, dotconf. # The available width provided is 6.5" in a PDF. # colSize = c(0,0,5,5,10,3) # Once the map and id column widths are subtracted, the available width for the # four columns is 4". The total value of all columns is 23 (sum(5,5,10,3). # The width of the dot and bar columns will be set at 5/23 * 4 = 0.87 ", # arrow is allocated 1.74" and dotconf is allocated 0.52 ". # # The values in this vector must be positive numerical values. They can range from 0.1 to 100. # The sum of the values is used as the demoninator to calculate the percentage for each column. # # # panelData # (old boxplot column) # names a list object with a boxplot data or time series data (x/y or x/yl/ym/yh # data for each area. # # The boxplot list the xxxx$names list must be the abbreviated area id # for the entry and the related data in the structure. #. # Used to link graphic to additional data beyond the 3 data elements # provided in col1, col2, col3 indexes into the statsDFrame.. # # For boxplot graphics, a list of "boxplot" function values for each area and DC # with the names (2 characters) used as the row.names. # # For time series graphics, the object must be an array(51,"x",4), # where the 1st index is the areas (1 to n), the second index is the number # of time periods ("x") with a minimum of 2 and maximum of 30, and # the third index is the type of variable. The rownames of array must # be the associate area id (a 2 character abbreviation if states). This # is required so the time series array can be properly associated # with the data in the statsDFrame when it's sorted. # For time series with no confidence band, column 1 is the x value and # column 2 is the y value. # For time series with a confidence band, column 1 is the x value, # column 2 is the y-low value, column 3 is the y-median value, # and column 4 is the y-high value. # The number of entries must be equal to the number of areas in the statsDFrame. # # Note: Some descriptors may be omitted if none of the panel plots need them. # often refValues and boxplots can be omitted # # adv = list of parameter lists of for each glyph column. Each item in the list # represents a list of named parameters for that glyphs column. # Example: # # An example of the list form of panelDesc is: # # GC1 <- list(type="map",lab3="bottom lab") # GC2 <- list(type="id") # GC3 <- list(type="dot",lab1="Population",lab2="2010",col1="RATE.10",refVal=100,refTxt="Pop. Ref") # GC4 <- list(type="boxplot",lab1="boxplot",lab2="risk",panelData="BoxData") # # panelDesc <- list(GC1, GC2, GC3, GC4) # # > str(panelDesc) # List of 4 # $ :List of 2 # ..$ type : chr "map" # ..$ lab3 : chr "bottom lab" # $ :List of 1 # ..$ type : chr "id" # $ :List of 6 # ..$ type : chr "dot" # ..$ lab1 : chr "Population" # ..$ lab2 : chr "2010" # ..$ col1 : chr "RATE.10" # ..$ refVal : num 100 # ..$ refTxt : chr "Pop. Ref" # $ :List of 4 # ..$ type : chr "boxplot" # ..$ lab1 : chr "boxplot" # ..$ lab2 : chr "risk" # ..$ panelData: chr "BoxData" # > # # Each list in panelDesc represents a single glyph column in the output generated. # This makes it easier to create the glyph description, you only have to provide # the information needed for the glyph, and allows you to quickly change the # order of the glyphs in the results. As new glyph variables are defined, the # only have to be included in the list for the specific glyph and column. The # same glyph may be used several times with different glyph variables settings. # Currently the glyph (details) variable names must contain the glyph name and # a variable name. With this approach, the variable names are simplified and # have the same meaning across all of the glyphs but are specific to the glyph # and column. For more details see the panelDesc section of the documentation. # #### # # Individual Call Arguments/Parameters: # # rowNamesCol: Optionally the name of the column in the area data.frame that # contains the link names associated with the rows. If not specified, # the row.names of of the statsDFrame are used as the area names. # Using the row.names is the default method of linking the data to the # border data. # # rowNames: Type of area id used as row.names in statsDFrame data.frame. # Acceptable values are: "ab", "alt_ab", "full", "id", Two additional options # have been added to accomodate the SEER data requirements: "seer" or "alias". # This rowNames option requests the packet to do partial matches of an alias for # area against the "registry" column/list outputted by SeerStat. If the partial # match succeeds, the associated area abbreviation is used. # By default the row.names of the statsDFrame are used. Based on # this option, the value is treated as an abbreviation, full area name, # or the numeric ID of the area.. # The default is "ab" for abbreviation, # # ignoreNoMatches is a logical parameter. The default is FALSE. If FALSE, all of the # data rows in the statsDFrame MUST match the area list in the boundaries datasets. # The there is not a match, an error is generated and the call is terminated. # If set to TRUE, any data row that does not match the areas in the boundaries dataset # are ignored and the user is notified of the situation. This may be helpful, if you # know the full names or abbreviations are correct, but the data has a row with "US" or "ALL" # as the link value or the source of the data generated comment lines that should be ignored. # # plotNames: When the ID glyphs is selected, this options informs the # package which form of labels to use. The options are "full" area name # or the abbreviated area name. The default is the "full" for full name. # Acceptable values are: "ab", "full" # The values of the "ab" and "full" labels are provided in the areaNamesAbbrsIDs # data.frame associated with the border structures provided to the package. # # sortVar The column name or number in the statsDFrame to be used as the variable # in sorting. Can be a vector of column subscripts to break ties. # Warning: The sortVar parameter cannot be used to sort a boxplot # or time series, since data is not contained in the statsDFrame. # # ascend TRUE default sorts in ascending order. FALSE indicated descending order. # # title A vector with one or two character strings to use the title.for the page. # # BORDER GROUPS # # bordDir (optional) The path name to a directory containing the border group specified in # bordGrp. The file must be an ".rda" type file that contains the four border group # R objects: areaParms, areaVisBorders, L2VisBorders, L3VisBorders. This parameter # can be used when the user has their own border group dataset or during developement # of a new border group or testing a modified border group before a package is created. # When this field is specified, the internal border groups are ignored. # # bordGrp The package contains two border Groups: USStatesBG and USSeerBG. # When using the "USStatesBG" border group, allows the package to function identically # to the original micromapST package. When the "USSeerBG" border group is # used, the Seer Areas and structures are available to the micromapST user. # The USSeerBG border group contains the names, abbreviations, aliases, and border # structures to support the micromap generation for US Seer Area data. # # NOTE: For border groups to work, lazyloading and lazydata must be DISABLED. # If enabled, the package is unable to load the correct border group dataset based # on the bordGrp parameter value. # # PANEL LAYOUT: # # grpPattern A user provided area to panel group/row mapping. The sum of the vector must # be equal to the number areas provided in the statsDFrame data structure. # The values are the number of areas in each panel created by micromapST. # The values must be in the range of 2 through 5. The value of 1 is allowed, # but only if the number of areas is odd, and in the median position of the # vector. Examples: # For 9 areas grpPattern = c(3,3,3) for 3 areas per panel row. # For 9 areas grpPattern = c(4,1,4) for a pattern of 4 areas, 1 area, # and 4 areas per panel. # For 17 areas grpPattern = c(5,3,1,3,5) or c(4,4,1,4,4) or c(4,3,3,3,4) # For 18 areas grpPattern = c(5,4,4,5) # The grouping pattern must also be symetric about the median point and have # the number of rows per panel desend toward the median point. This is required # make the micromap graphics presentable. A grpPattern = c(3,4,4,5) or c(3,4,4,3) # are not allows. The maximum value for the rows per group is 5. # # MAPPING: # # dataRegionsOnly is a logical parameter. The default is FALSE. If FALSE, the data is # not inspected to determine if a subset of regions could be drawn saving # mapping space in the map glyphs. If set to TRUE, the data sub-areas # are inspected to determine if a sub-set of regions can be drawn to # save graphic space. This feature is only active if the border group's # name table contain region identifiers for each sub-area. This information # is used to determine how many sub-areas are required to be drawn and # how to organize the map for presentation. As before any sub-areas # in the mapped regions without data are only flagged with warning messages # and colored white, but still drawn. If regional boundaries are present, # the boundaries are overlayed for regional with data. # # regionsB is a logical parameter. The default is FALSE. If FALSE, no regional # boundaries are drawn. If set to TRUE, if regional boundaries are # present, they are drawn on the micromap. # # # Glyph Global parameters: # # axisScale A character string indicating the type of axis labels to be used # on the Y and X axis for glyphs with axis labels. The acceptable # values are: # "o" -> original (pretty function) # "e" -> extended algorithm - no scaling. (new default) # "s" -> numbers scaled to millions, billions, etc. with # extra header line # example: # 0 10 20 30 40 # in millions # # "sn" -> numbers scaled individually and marked with # the scaling factor. # example: # 0 500M 1B 1.5B 2B # # "s" and "sn" are based on the "e" algorithm (extended.) # # This call arugment can be overriden for a specific glyph column by # including "axisScale=" in the panelDesc list for the column. # # staggerLab A true/false flag to specify if the axis labels are staggered # alternating low and high labels. The default = FALSE. If FALSE # the axis labels are NOT staggered. If TRUE, two axis label # lines are drawn, with the axis labels alternated low and high lines. # # This call arugment can be overriden for a specific glyph column by # including "staggeredLab=" in the panelDesc list for the column. # # ##### ##### # # List/Control Parameters: (package default data.frames are used if the colors and # details parameters do not specify an alternate data.frame. # It is strongly recommended to use the default data.frame) # # colors a color palette as a vectors of strings (character-vectors) # 6 colors for areas in a group of 6 # 1 color for the median area # 3 foreground color for non-highlighted areas in the map # 2 background colors for not referenced and non-active sub-areas, # and 12 matching colors with 20% transparency for time series. # # If a color vector is provided, it's length must = 24. # # If the value of colors is "bw" or "greys", a grey scale is used instead # of the default or user provided colors vector. # The default is NULL, which indicates the package default colors should used. # # see rlmicromapGDefaults$colors for more details # # # details defines the spacing, line widths, colors and many many other details # controlling the style and apparence of the generated glyphs. # # see the micromapGDefaults$details section for more details. # # The function automatically loads the default values into the code when the # function is started. The user can use the details parameter to override # any of the items and values in the micromapST package. To override a value, # create a list as follows: # # details = list(<variable name> = <value>,,, ) # # See the micromapGSetDefaults function below for a definition of each # micromapST variable and it's default. # ##### ##### # # Load working tables for verifications # # details variable list # data(detailsVariables,envir=environment()) # get validation and translation table for details variables to panelDesc variables. # ##### ##### # # Counter Initialization (Global) - research code = to be removed. # # Variable at the micromapST level. # #Saved_Locale <- Sys.getlocale(category='LC_CTYPE') # save existing locale #x <- Sys.setlocale('LC_ALL','C') # set to 'C' mstColorNames <- "black" mmSTEnvir <- environment() xmsg <- capture.output(mmSTEnvir) #cat("micromapST envir:",xmsg,"\n") # # Set up global variables values. # # # create warning and stop counters - must be in .GlobalEnv so the panelXXXX functions can use them. # var <- "warnCnt" wstr <- paste0("assign(var,NewCounter(),envir=.GlobalEnv)") eval(parse(text=wstr)) var <- "stopCnt" wstr <- paste0("assign(var,NewCounter(),envir=.GlobalEnv)") eval(parse(text=wstr)) # # this should get the global variables set up so they can be referenced within all functions. # # Cross column variables # lastLab2Space <- NULL lastLab3Space <- NULL staggered <- NULL staggering <- NULL var <- "lastLab2Space" wstr <- paste0("assign(var,0,envir=.GlobalEnv)") eval(parse(text=wstr)) var <- "lastLab3Space" wstr <- paste0("assign(var,0,envir=.GlobalEnv)") eval(parse(text=wstr)) var <- "staggered" wstr <- paste0("assign(var,FALSE,envir=.GlobalEnv)") eval(parse(text=wstr)) var <- "staggering" wstr <- paste0("assign(var,FALSE,envir=.GlobalEnv)") eval(parse(text=wstr)) # # glyph variables - at this time this is required to allow us to validate this variable. # Id.Dot.pch <- NULL var <- "Id.Dot.pch" wstr <- paste0("assign(var,22,envir=.GlobalEnv)") # assign default of 22. eval(parse(text=wstr)) #cat("envir=warnCnt:", find("warnCnt"),"\n") # ##### ##### # # Save call parameter values for warning and error messages, not content, name of variables. # # Can't do this in a function because the environment and frames will change. # frml <- formals() # get list of call parameters - the formals - for the function and default values. (as defined). frmlNames <- names(formals()) # get the name of the parameters (as we validate the parameter, we will back file the defaults. callVar <- as.list(match.call())[-1] # get the names and values used on the current call. callVarNames <- names(callVar) # get the names of the used call parameters # merge the formals parameter list with the parameter list used at the time of the micromapST call with user set values. callVL <- frml # Seed the call variable list with the formals and default values callVL[callVarNames] <- callVar[callVarNames] # copy the values used in the call . # save call parameter list and values to .GlobalEnv var <- "callValList" wstr <- paste0("assign(var,callVL,envir=.GlobalEnv)") eval(parse(text=wstr)) # Extract the statsDFrame variable name var <- "sDFName" wstr <- paste0("assign(var,callVL$statsDFrame,envir=.GlobalEnv)") eval(parse(text=wstr)) # Extract the panelDesc variable name var <- "pDName" wstr <- paste0("assign(var,callVL$panelDesc,envir=.GlobalEnv)") eval(parse(text=wstr)) #print(paste0("statsDFrame=",sDFName)) #print(paste0("panelDesc =",pDName )) # # callVarList is now a names list with the names of the parameter variables the the list content the # values at the time of the call. Any variables show up with a typeof "symbol" and class "name". # The value of the variable is not captured. # # Later must copy this information up to the .GlobalEnv so it can be referenced by everyone. # #print(callVL) # ##### #print("callVarList Saved in .GlobalEnv") ##### # # Verify Run Parameter: # # Order of importants: # a) bordDir and bordGrp - needed to get the border group loaded and its particular parameters defaults # b) Validate statsDFrame (but not contents) # c) Validate panelDesc (but not contents, yet) # # bordDir and bordGrp - 1st parameter to check - sets up the information for all of the other parameters. # # Package contained border groups: # PkgBGs <- c("USStatesBG" ,"USSeerBG" ,"KansasBG" ,"MarylandBG" ,"NewYorkBG" ,"UtahBG" ,"AfricaBG" ,"ChinaBG" ,"UKIrelandBG" ,"SeoulSKoreaBG" ) UserBordGrpLoad <- FALSE # FALSE, load from package with data(), TRUE load from directory with load() # Package Variables # # bordDir - if directory then private border group. # # The bordDir is used to direct the border group load to a user directory or during testing # of a new or modified border group. # if (is.null(bordDir) || is.na(bordDir) ) { # make sure its NULL bordDir <- NULL # no directory provided. } else { # validate the directory exists and is referencable. bordDir <- str_trim(bordDir) # trim spaces. if (!dir.exists(bordDir)) { # bordDir path does not exist. xmsg <- paste0("***0153 BGBD The directory specified in the bordDir call parameter does not exist. Value=",bordDir) stopCnt() stop(xmsg,call=FALSE) } else { UserBordGrpLoad = TRUE # load() from directory don't data() xc <- str_sub(bordDir,-1,-1) # get last character if (xc != "/" && xc != "\\") { bordDir <- paste0(bordDir,"/") # add slash if not present. (must check for \ and / slashes.) } } } callVL$bordDir <- bordDir BordGrpName <- bordGrp bgFile <- NA if (!UserBordGrpLoad) { # no valid bordDir directory -> the bordGrp must be a .rda in this package. # If no bordGrp parameter, set default to USStatesBG. if (is.null(bordGrp) || is.na(bordGrp)) { BordGrpName <- "USStatesBG" # indicates which structure .rda file in package to load. } else { BordGrpName <- bordGrp bGM <- match(BordGrpName,PkgBGs) # must be one of the packaged bordGrps if (is.na(bGM)) { # no match to the bordGrps supported within the package. # Use variable to make message dynamic as more bordGrps are added. ymsg <- paste0(shQuote(PkgBGs),collapse=", ") xmsg <- paste0("***0150 BGBD The bordDir call parameter was set to NULL, the bordGrp must be one contain in the package:\n", ymsg, "\n") stopCnt() stop(xmsg, call.=FALSE) # alternative is to check for file in working directory and them varity it's structure. rm(ymsg) } rm(bGM) # DATA bordGrp } } else { if (is.null(bordGrp) || is.na(bordGrp)) { # bordDir provided, but no bordGrp - ouch! error stopCnt() xmsg <- paste0("***0156 BGBD The bordGrp call parameter has not been specified. It is required when the bordDir is provided.") stop(xmsg, call.=FALSE) } else { # if not check to see if the .rda file exists. fnSplit <- str_split(bordGrp,"[.]")[[1]] # split up user provided name. BordGrpName <- fnSplit[1] if (is.na(fnSplit[2])) { # if no extension - then add .rda bordGrp <- paste0(bordGrp,".rda") } else { # if extension is present - must be .rda or .RData if (fnSplit[2] != ".rda" && fnSplit[2] != ".RData") { # error - extension must be .rda or .RData. xmsg <- paste0("***0154 BGBD The bordGrp filename must have an '.rda' or '.RData' file extension.") stopCnt() stop(xmsg,call = FALSE) } } } # test to see if directory and file exist, before trying to load. bgFile <- paste0(bordDir,bordGrp) if (!file.exists(bgFile)) { xmsg <- "***0155 BGBD The bordGrp file in the bordDir directory does not exist." stopCnt() stop(xmsg, call = FALSE) } } # got this far, variables to load/data the border group appear to be good. callVL$bordGrp <- bordGrp callVL$bgFile <- bgFile callVL$BordGrpName <- BordGrpName var <- "callVarList" wstr <- paste0("assign(var,callVL,envir=.GlobalEnv)") eval(parse(text=wstr)) #cat("bordDir = ",bordDir,"\n","bordGrp = ",bordGrp,"\n") #cat("BordGrpName = ",BordGrpName,"\n") #cat("bgFile = ",bgFile,"\n") # ###### # # load micromap border and names tables based on type of run # Currently supported: USStatesBG and USSeerBG # # ## add code to pick up on "bordGrp" provided by user. ## If one of ours use data, otherwise use load or "copy" from structure of that name. ## bordGrp must be data.frame containing "areaNamesAbbrsIDs, areaVisBorders, L2VisBorders, RegVisBorders, ## L3VisBorders, and areaParms. # # Thoughts on border group verication: # 1) Do it once and get a md5 check sum on the files that pass. # 2) Place name of border group file and directory and MD5 in # file in micromapST/data folder under the "library". # 3) Prior to using private border group check library information to see if verifcation must be done. # ## for testing - use load instead of data. # initialize border group variables to determine if they are correctly loaded. areaParms <- NULL areaNamesAbbrsIDs <- NULL areaVisBorders <- NULL L2VisBorders <- NULL RegVisBorders <- NULL L3VisBorders <- NULL if (!UserBordGrpLoad) { # System border group #print (paste0("reading border group ",BordGrpName, " via a data statement.")) data(list=BordGrpName,envir=environment()) # Group Border tables and parameters distributed with package. } else { # user border group #print (paste0("reading border group ",BordGrpName, " via LOAD since bordDir = ",bordDir)) # need to put a try around this incase there is a problem with the user data file. res <- try(load(bgFile)) # only error should be a lock or error in reading file. if (class(res)=="try-error") { # error occurred during user border group file loading. stopCnt() xmsg <- paste0("***0157 BGBD System error encountered when loading the border group. See error message:") ymsg <- paste0("***0157 >>",res[1]) # get message from error warning(xmsg, call.=FALSE) stop(ymsg, call.=FALSE) # stopped. } } # # Basic Verify that all of the bordGrp data.frames have been loaded. # MissInBG <- NULL ErrFnd <- FALSE if (is.null(areaParms)) { ErrFnd <- TRUE MissInBG <- paste0(MissInBG,", areaParms") } if (is.null(areaNamesAbbrsIDs)) { ErrFnd <- TRUE MissInBG <- paste0(MissInBG,", areaNamesAbbrsIDs") } if (is.null(areaVisBorders)) { ErrFnd <- TRUE MissInBG <- paste0(MissInBG,", areaVisBorders") } if (is.null(L3VisBorders)) { ErrFnd <- TRUE MissInBG <- paste0(MissInBG,", L3VisBorders") } #if (is.null(L2VisBorders)) { # No action at this time. Check later when processing areaParms. # L2VisBorders is only needed if Map.L2Borders is TRUE. # If there is no L2VisBorders data.frame, set L2VisBorders to L3VisBorders, # or is equal to NA, then the Map.L2Borders are set to FALSE # and a warning message generated. #} #if (is.null(RegVisBorders)) { # No action at this time. Check later when processing areaParms. # RegVisBorders is only needed if aP_Regions or Map.RegBorders are set to TRUE. # If there is no RegVisBorders data.frame, it is set to L3VisBorders, # then Map.RegRegions is set to FALSE. It is possible to do regional mapping without # regional boundaries. #} #str(areaNamesAbbrsIDs) if (ErrFnd) { # if ErrFnd, the MissInBG must contain at least one entry. MissInBG <- substr(MissInBG,3,nchar(MissInBG)) # Kill leading ", " stopCnt() xmsg <- paste0("***0151 BGBD After loading ",BordGrpName," border group data set, the following objects are missing: ",MissInBG) stop(xmsg, call.=FALSE) } rm(MissInBG,ErrFnd) # Clean up and move data into old structures # # Later add code to validate possibly USER provided border groups. # if (UserBordGrpLoad) { # verify border group objects. (columns, same number of rows, etc.) # objective is to only check data once - mark the data for future reference. # Want to keep run times VERY VERY low and not keep re-checking user data. # Lot of work to be done. # OR set flag in BG.rda indicating it has been verified. Do Once on request. # Check Validation by BGValidate function. # md5sum file is in .Library directory under the name BGmd5.rda # Contents is BG name and md5 check sum. # run md5sum over the BG file and compare values with this file. # if it matches, then BG file does not have to validated and waste time and CPU. # } ########## # # Merge the "areaParms" variables into the global variables. # # They may still be overridden by the details=list(...) parameter in the call. # # Set the type of everything to protect against factors on data.frames. bordGrp <- as.character(areaParms$bordGrp) Map.Hdr1 <- as.character(areaParms$Map.Hdr1) Map.Hdr2 <- as.character(areaParms$Map.Hdr2) Map.MinH <- as.numeric(areaParms$Map.MinH) Map.MaxH <- as.numeric(areaParms$Map.MaxH) Map.Aspect <- as.numeric(areaParms$Map.Aspect) if (is.null(areaParms$ID.Hdr1)) { # New variable names Id.Hdr1 <- as.character(areaParms$Id.Hdr1) Id.Hdr2 <- as.character(areaParms$Id.Hdr2) } else { # Old variable names Id.Hdr1 <- as.character(areaParms$ID.Hdr1) Id.Hdr2 <- as.character(areaParms$ID.Hdr2) } # Map.L2Borders - draw L2 borders if (is.null(areaParms$Map.L2Borders)) { Map.L2Borders <- FALSE } else { Map.L2Borders <- as.logical(areaParms$Map.L2Borders) } # Map.L3Borders - draw L3 borders (option - also turned off if limited regional drawing is done.) if (is.null(areaParms$Map.L3Borders)) { Map.L3Borders <- TRUE } else { Map.L3Borders <- as.logical(areaParms$Map.L3Borders) } areaUSData <- as.logical(areaParms$areaUSData) enableAlias <- as.logical(areaParms$enableAlias) #print("areaParms:") #print(str(areaParms)) # fix up areaParms to unique names # check for old field names. If present - copy to new names. if (!is.null(areaParms$Regions)) { areaParms$aP_Regions <- as.logical(areaParms$Regions) areaParms$Regions <- NULL } if (!is.null(areaParms$Units)) { areaParms$aP_Units <- as.character(areaParms$Units) areaParms$Units <- NULL } if (!is.null(areaParms$Proj)) { areaParms$aP_Proj <- as.character(areaParms$Proj) areaParms$Proj <- NULL } # Check regions, and boundary overlay flags. # # aP_Regions - feature enabler - if RegVisBorder is present and information in name table. # Referres to name table information, not regional boundaries # if (is.null(areaParms$aP_Regions)) { areaParms$aP_Regions <- FALSE # no regional mapping feature } # region borders can be drawn or not. If regions feature enable, default = TRUE. If not, FALSE # if (is.null(areaParms$Map.RegBorders)) { areaParms$Map.RegBorders <- FALSE # no regional boundaries available } # copy from data frame into work variables. aP_Regions <- as.logical(areaParms$aP_Regions) aP_Units <- areaParms$aP_Units aP_Proj <- areaParms$aP_Proj Map.RegBorders <- as.logical(areaParms$Map.RegBorders) #cat("Initial areaParms - Map. L2:",Map.L2Borders," Reg:",Map.RegBorders," L3:",Map.L3Borders," aP_Regions:",aP_Regions,"\n") #print(str(areaParms)) # # after this point we do not reference areaParms again. # #### #### # # The following variables may be included in details, but are not configured here # with defaults. They are variables initialized in the border group areaParms table. # detailExtra <- colnames(areaParms) # get list of parameters from areaParms x <- match("bordGrp",detailExtra) # if list contains "bordGrp" if (!is.na(x)) { detailExtra <- detailExtra[-x] # remove it from the list. } # When "bordGrp" is excluded, this would leave: # "Id.Hdr1","Id.Hdr2", "Map.Hdr1", "Map.Hdr2", "Map.MinH", "Map.MaxH", "Map.L2Borders", # "areaUSData", "enableAlias", "aP_Regions" , "aP_Proj" , "aP_Units" # # This list is appended to the colname list derived from the default details data.frame to # create a list of valid variables. # ########## ########## # # areaNamesAbbrsIDs and xxxVisBorders tables # #cat("dim of areaNamesAbbrsIDs:",dim(areaNamesAbbrsIDs),"\n") #cat("names of areaNamesAbbrsIDs:",names(areaNamesAbbrsIDs),"\n") row.names(areaNamesAbbrsIDs) <- areaNamesAbbrsIDs$Key # ensure row.names match the keys if (is.null(areaNamesAbbrsIDs$regID)) { # current name table does not have regional information. areaNamesAbbrsIDs$regID <- "<NA>" areaNamesAbbrsIDs$regName <- "<NONE>" } rlAreaNamesAbbrsIDs <- areaNamesAbbrsIDs # save copy of name table #cat("dim of rlAreaNamesAbbrsIDs:",dim(rlAreaNamesAbbrsIDs),"\n") #print(str(areaNamesAbbrsIDs)) # sub-area boundaries rlAreaVisBorders <- areaVisBorders # save copy of sub-area boundaries #cat("dim of rlAreaVisBorders :",dim(rlAreaVisBorders),"\n") #print(str(areaVisBorders)) # total area boundaries rlL3VisBorders <- L3VisBorders # save copy of area boundary #cat("dim of rlL3VisBorders :",dim(rlL3VisBorders),"\n") #print(str(L3VisBorders)) # Check on L2VisBorder and complete set up. if (Map.L2Borders) { if (is.null(L2VisBorders) || identical(L2VisBorders,L3VisBorders)) { # no L2VisBorders or L2VisBorders is the same as L3VisBorders # Map.L2Borders set on - but no boundaries to draw. xmsg <- paste0("***0158 BGBD In the areaParms data.frame the Map.L2Borders is TRUE, but no level 2 boundaries are provided, Level 2 overlay is disabled.\n") warnCnt() warning(xmsg,call.=FALSE) L2VisBorders <- L3VisBorders # copy L3 to L2 data.frame (Place holder) Map.L2Borders <- FALSE } } rlL2VisBorders <- L2VisBorders #print(str(rlL2VisBorders)) # # Check on RegVisBorder and complete set up. if (Map.RegBorders) { if (is.null(RegVisBorders) || identical(RegVisBorders,L3VisBorders)) { # no RegVisBorders or RegVisBorders == L3VisBorders xmsg <- paste0("***0159 BGBD The areaParms variable aP_Regions is TRUE and/or Map.RegBorders is TRUE, but no regional boundaries exist in border group. Regions overlay is disabled.") warnCnt() warning(xmsg,call.=FALSE) Map.RegBorders <- FALSE RegVisBorders <- L3VisBorders # copy L3 to L2 data.frame (placeholder) } } # rlRegVisBorders <- RegVisBorders #print(str(rlRegVisBorders)) # # Implementation change note: The regions feature will be implemented using the # regID field in the areaNamesAbbrsIDs table and a RegVisBorders boundary data.frame. # The regID field associates the sub-areas to regions. # If a RegVisBorder file is present, the boundaries # are grouped by the regID codes as it's keys. This permits # sub mapping of its boundaries - hopefully they will # match up with the area boundaries. # # Map.L2Borders > controls if L2VisBorders is drawn. # Map.RegBorders > controls if RegVisBorders is drawn. # Map.L3Borders > controls if L3VisBorders is drawn. # # Map.L3Borders is TRUE by default, but reset to FALSE when a sub-set of regions are drawn. # Map.RegBorders is only TRUE when there is a valid RegVisBorders data.frame. This is not # independent of the aP_Regions feature control flag. # Map.L2Borders is TRUE when a valid L2VisBorders data.frame is present. # # When a subset of the regions in a border group are to be drawn, # a) The areaNamesAbbrsIDs name table is not modified. # b) L2VisBorders, RegVisBorders data.frames are edited to the limited group of areas. # It is assumed L2 is a subset of Reg. # c) Map.L3Borders is set to FALSE to not draw the outline of the total space. # # # If both L2 and Reg are persent, The name table is used to know witch L2 boundaries to draw. # In regions mode: # # listUsedArea <- areaNamesAbbrsIDs[IndexDFtoNT,"Key"] # listUsedL2 <- unique(areaNamesAbbrsIDs[IndexDFtoNT,"L2_ID"]) # listUsedRegions <- unique(areaNamesAbbrsIDs[IndexDFtoNT,"regID"]) # # a) if dataRegionsOnly=TRUE - enable regions feature. # # regMatch <- !is.na(match(areaNamesAbbrsIDs$regID,listUsedRegions)) # list of sub-areas in regions # listAllAreas <- areaNamesAbbrsIDs[regMatch,"Key"] # listAllL2 <- unique(areaNamesAbbrsIDs[regMatch,"L2_ID"]) # listAllRegions <- unique(areaNamesAbbrsIDs[regMatch,"regID"]) # # b) if dataRegionsOnly=FALSE or not enabled. # # listAllAreas <- areaNamesAbbrsIDs[,"Key"] # listAllL2 <- unique(areaNamesAbbrsIDs[,"L2_ID"]) # listAllRegions <- unique(areaNamesAbbrsIDs[,"regID"]) # # ensure Abbreviations are all CAPS. # # The package carries two sets of names for each area # (in the areaNamesAbbrsIDs table and areaVisBorders matrix.) # # abbreviated - always in CAPS. # fullname - always with proper capitalization. # but is CAP'd for all comparisons. # fix up Name Table for sub-area matching. #cat("dim of areaNamesAbbrsIDs:",dim(areaNamesAbbrsIDs),"\n") # Matching strings rlAreaNamesAbbrsIDs$Abbr <- toupper(rlAreaNamesAbbrsIDs$Abbr) # Abbr Must be uppercase. areaNTAbbr <- (rlAreaNamesAbbrsIDs$Abbr) # Get list of abbrevations. (All CAPS) areaNTName <- (toupper(rlAreaNamesAbbrsIDs$Name)) # get list of full area names in uppercase (All CAPS) areaNTAAbbr <- (toupper(rlAreaNamesAbbrsIDs$Alt_Abbr)) # get list of alternate abbreviations. areaNTKey <- (toupper(rlAreaNamesAbbrsIDs$Key)) # get key as uppercase. (links into VisBorder files.) # Presentation strings ID.Abbr <- areaNTAbbr # (All CAPS) # for ID.Name force proper capitalization on the name ID.Name <- as.vector(sapply(areaNTName,function(x) simpleCap(x))) # proper cap. areaNTName <- ClnStr(areaNTName) areaNTAbbr <- ClnStr(areaNTAbbr) areaNTAAbbr <- ClnStr(areaNTAAbbr) areaNTID <- ClnStr(toupper(rlAreaNamesAbbrsIDs$ID)) # fix up areaVisBorders data frame rlAreaVisBorders$Key <- toupper(rlAreaVisBorders$Key) rlRegVisBorders$Key <- toupper(rlRegVisBorders$Key) rlL2VisBorders$Key <- toupper(rlL2VisBorders$Key) # Working vectors for PRINT out. # #### xps <- par("ps") xpin <- par("pin") xdin <- par("din") #cat("check point on par - ps:",xps," pin:",xpin," din:",xdin,"\n") #print("Border Group Read and Setup") #### # #________________ Type of micromap Variable (for now)_______________ # # extend hdr strings to include the other types of maps # # #### ##################### # # Border Group now loaded and name table initial setup completed. # ##################### # # Finish check after the glyph function definitions. # ##################### ##################### # # Define panel glyph functions===================================== # # All of these glyph creating functions are internal to the micromapST function. # ##### # # type = 'arrow' ========================================================= # # rlAreaArrow # # JP - fixed error when difference is zero. # JP - generalize for any collections of area # # PDUsed - list of column names from the panelDesc data.frame provided. # # The col1 and col2 values have already be converted to column numbers and verified. # rlAreaArrow = function(j){ # glyph column setup section - start of code ### # Split into header and trailer. ### #cat("Arrow-StartUp staggered:",staggered,"\n") # j = current panel column number # # col1[j] points to the statsDFrame column holding the first arrow end point.value # col2[j] points to the startFrame column holding the second arrow end point value # wstname <- names(dat) # names of columns in statsDFrame wstdim <- dim(dat) wstMax <- wstdim[2] # number of columns in statsDFrame ErrFnd <- FALSE pdColNum <- formatC(j,format="f",digits=0,width=2,flag="0") # glyph description #xgl <- list(Name="ARROW", n=2, dCols=c("col1","col2"), tCols=("n","n"), iCols=c(col1[j],col2[j]), lCols=c(litcol1[j],litcol2[j])) # where Name = name of glyph # dCols = panelDesc field names # tCols = type of data in each statsDFrame column # iCols = index # into statsDFrame data.frame # lCols = value provided in colx list for this column # # "col1" stColName1 <- wstname[col1[j]] if (is.null(stColName1)) { stColName1 <- as.character(col1[j]) } pdUmsg <- "(Beginning value of arrow)" xr <- CheckPDCol('col1', 'ARROW', col1[j], stColName1, j, 1, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd <- TRUE } else { xdat1 <- xr$Dat } # "col2" stColName2 <- wstname[col2[j]] if (is.null(stColName2)) { stColName2 <- as.character(col2[j]) } pdUmsg <- "{End value of arrow)" xr <- CheckPDCol('col2', 'ARROW', col2[j], stColName2, j, 2, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd <- TRUE } else { xdat2 <- xr$Dat } if (ErrFnd) return() # Error warning noted, return from function. refval <- lRefVals[j] # change to lRefVals - JP-2010/07/23 Reference value for column reftxt <- lRefTexts[j] # added - JP-2010/07/23 Reference test for column good1 <- !is.na(xdat1) # test to see if both values are present. good2 <- !is.na(xdat2) goodrow <- !is.na(xdat1 + xdat2) # used by code to skip bad entries. # Get the value range for the data (col1 and col2) rx <- range(xdat1,xdat2,na.rm=T) # range on of all x1 and x2 values for all areas. #cat("arrow-x range:",rx,"\n") #rx <- sc*diff(rx)*c(-.5,.5)+mean(rx) # # x-scale extention (sc) = 1.08 * # diff of min and max of all * 1/2 + or - to get bracket around mean # if range 1 to 25, mean is 13, diff(rx) = 24, --> 0.04 to 25.96 (almost + and - 1) lPad <- TRUE rPad <- TRUE #cat("arrow-x range adjusted:",rx,"\n") ry <- c(0,1) # Y axis range = 0 to 1.. # ____________labeling and axes_______________ ##### # # Setup and draw top and bottom titles and axis for column (header and trailer) # # Split into two functions - setup and execute-header, execute-trailer. # # Needs padding for tails and arrow heads (how much, not as much as dots.) # Res <- DrawXAxisAndTitles(j, panels, rx, ry, reftxt, refval, leftPad=lPad, rightPad=rPad) atRx <- Res$atRx rx <- Res$rx ry <- Res$ry #cat("arrow-rx after DrawXAxisAndTitles:",rx,"\n") #cat("Arrow-after DrawXAxisAndTitles-staggered:",staggered,"\n") # ##### # End of Arrow Glyph Setup up section # Glyph Column Header section # Titles # X-Axis labels # Arrow Glyph Body Section #_________________drawing loop__________________ # The drawing may be for 1 to 5/6 rows. Find out in the gsubs -> ke. # Draw all of the elements - one per area - group number = 1 to ng. for (i in 1:numGrps){ # loop to generate each panel in column ### # Single Glyph group/row ### gsubs <- ib[i]:ie[i] # get range ib to ie (area indexes for this panel) ---- gsubs vector of the indexes for this panel. ke <- length(gsubs) # get length (length = 1 up to 5) # offset in panel is 0 to 7 (8 units) or 0 to 2 (1 unit), Under the US version # the y would be 5/6 to 1, so lines would be draw at 5, 4, 3, 2, 1 Leaving 7 and 0 open. # # Now we have the challenge of drawing 2, 3, or 4 lines in the panel and make it look the same. # # May need to check to see if the scaling already handles this. So let it go for now. # # One approach is to adjust the values based on the number to graph. Use table. # laby <- ke:1 # labels n:1 depending on number of rows in group (US case 1:1 and 1:5). # select pen color or colors 7 or 1:n pen <- if(i==medGrp & medGrpSize == 1) 7 else 1:ke # if index=medGrp (median group number, if present) then pen = 7, else 1:ke (length of line) #cat("Arrow - panelSelect - 3021 - i:",i," j:",j,"\n") panelSelect(panels,i,j) # select current panel x <- panelScale(rx,c(1-pad,ke+pad)) # scale to rx by 1,ke (pad) (ry = effectively 0.33 to 5.67 (pad = 0.67) # Scale = rx by 0.33 to 5.67 with arrows at 1,2,3,4,5... # scaling of ry handles the issue with the number of rows in group. # for 6, ry => c( 0.33 , 6.67 ) because of padding. (0 -> 7 so 1/2 margin on each side.) # for 5, ry => c( 0.33 , 5.67 ) because of padding. (0 -> 6 so 1/2 margin on each side.) # for 4, ry => c( 0.33 , 4.67 ) # for 3, ry => c( 0.33 , 3.67 ) # for 2, ry => c( 0.33 , 2.67 ) # for 1, ry => c( 0.33 , 1.67 ) (also median) - single at "1", with + or - 0.6667 on each side. # c(1,1) -> (0.33, 1.67 panelFill(col=Panel.Fill.col) # don't like page real size being used here - ? re-sizing. # calculate arrow length the is to small and should be draw as a dot. arrLim <- max(diff(rx)/par("pin")/1000) * 1.05 # diff of range / plot inches / 1000 * 1.05 # verical grid lines axis(side=1, tck=1, labels=F, at=atRx, col=Grid.Line.col, lwd=Grid.Line.lwd) # grid lines in panel # if a refval is provided and in the rx range, then add line. AddRefLine(refval, ke, rx) panelOutline(col=Panel.Outline.col) # outline panel oldpar <- par(lend="butt") # save old # draw the area rows in this panel-row. for (k in 1:ke) { # loop through each item in panel (5/6 or 1) m <- gsubs[k] # get index into data array if (goodrow[m]) { # if good value for this area #print(paste0(k,m,xdat1[m],xdat2[m],abs(xdat1[m]-xdat2[m]))) # Getting warning for NON-ZERO length arrows - must be rounding error to <> 0. # So, taking liberties to say 0 is .002 and below. Arrow works in inches?? # Alternative is to suppressWarnings... # xdat1 and xdat2 are the variables pass in. m is the row index. if (abs(xdat1[m]-xdat2[m])> arrLim){ # If arrow length is > 1.05/1000 inch do line draw... # long line/arrow arrows(xdat1[m],laby[k],xdat2[m],laby[k],col=mstColors[pen[k]], length=Arrow.Head.length,lwd=Arrow.lwd) } else { # length of arrow is zero, so plot a dot.. plotPoint(xdat1[m],laby[k], Arrow.Dot.pch, mstColors[pen[k]], Arrow.Dot.pch.size, Arrow.Dot.pch.lwd, Arrow.Dot.Outline, Arrow.Dot.Outline.col, Arrow.Dot.Outline.lwd) #points(xdat1[m],laby[k],pch=20,cex=Dot.pch.size,col=mstColors[pen[k]]) } } # end of one row. } # y is from 0 to 7, so the enter line for each arrow is 1,2,3,4,5,6, etc. par(oldpar) ### # end of one Arrow glyph panel (row/group) ### } # end of Arrow glyph column # ____________________________PanelOutline____________________ ### # glyph column trailer. ### groupPanelOutline(panelGroup,j) # outline full group (column) # Column done, check to see reference line text is needed in footnotes.. } # # End of Arrow Glyph # ##### ##### # # type = 'bar' ========================================================= # # rlAreaBar # rlAreaBar = function(j){ # j = current panel column number # # col1[j] points to the statsDFrame column holding the bar height from zero. # #cat("Bar Startup staggered:",staggered,"\n") wstname <- names(dat) # names of columns in statsDFrame wstdim <- dim(dat) wstMax <- wstdim[2] # number of columns in statsDFrame ErrFnd <- FALSE pdColNum <- formatC(j,format="f",digits=0,width=2,flag="0") # "col1" stColName1 <- litcol1[j] pdUmsg <- "(Bar length)" xr <- CheckPDCol('col1', 'BAR', col1[j], stColName1, j, 1, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd <- TRUE } else { xdat1 <- xr$Dat } if (ErrFnd) return () # error warning found - return. py <- Bar.barht*c(-.5,-.5,.5,.5,NA) # Bar.barht = 2/3 (0.6667) - basic pattern to form a bar. ry <- c(0,1) refval <- lRefVals[j] # changed to lRefVals - JP-2010/07/23 reftxt <- lRefTexts[j] # new - JP-2010/07/23 # ________scale x axis________________________ good <- !is.na(xdat1) # get x axis range rx <- range(xdat1,na.rm=T) # get range of values (min-1, max-2) #cat("bar-rx:",rx,"\n") lPad <- TRUE rPad <- TRUE if (rx[2]<=0){ # max < 0.. rx[2] <- 0 # set max to zero #rx[1] <- mean(1,sc)*rx[1] # adjust min. (average of 1 and sc) rPad <- FALSE } else if ( rx[1] >= 0 ) { # min > 0 rx[1] <- 0 # set min to zero #rx[2] <- rx[2]*(1+sc)/2 # adjust max lPad <- FALSE } else { # min and max are both > 0 #rx <- sc*diff(rx)*c(-.5,.5)+mean(rx) } # end of if / else if group #cat("bar-rx adjusted:",rx,"\n") # ____________label axis_______________ ##### # # Bar Setup and draw top and bottom titles and axis for column # # No padding if Zero is left or right side. Otherwise minor padding. # #cat("Bar-Calling DrawXAxisAndTitle.\n") Res <- DrawXAxisAndTitles(j, panels, rx, ry,reftxt, refval, leftPad=lPad, rightPad=rPad) atRx <- Res$atRx rx <- Res$rx ry <- Res$ry #cat("Bar-After DrawXAxisAndTitles staggered:",staggered,"\n") # ##### ##### # # Bar Glyph body section # ##### # _______________drawing loop___________________ for (i in 1:numGrps){ ### # Glyph group/row body ### gsubs <- ib[i]:ie[i] # index of elements in panel ke <- length(gsubs) pen <- if(i==medGrp & medGrpSize == 1 ) 7 else 1:ke # Pen indexes. laby <- ke:1 # laby (1 or 1:2, 3, 4, 5, or 6) panelSelect(panels,i,j) # select current panel x <- panelScale(rx,c(1-pad,ke+pad)) # re-scale to 1 or 5/6 entries per panel/row (same physical height used.) # for 1 -> panelFill(col=Panel.Fill.col) # grid lines for bar axis(side=1, tck=1, labels=F, at=atRx, col=Grid.Line.col, lwd=Grid.Line.lwd) # grids # if a refval is provided and in the rx range, then add line. AddRefLine(refval, ke, rx) panelOutline(col=Panel.Outline.col) # outline panel/row ksc <- SetKsc(ke) # get scaler for bar height # play like we have 5 areas in this panel/row wpy <- py * ksc # # All panel/rows are the same height (except when the a single area is used in the median panel/row. # All graphic element heights are calculated based on 5 areas per panel/row. # This keeps the height the same in all panel/rows and provided a uniform graphic. # for (k in 1:ke){ m <- gsubs[k] # draw each entry (1 to ke), get index from gsubs if (good[m]){ # good value - draw bars as polygons. val <- xdat1[m] # get value for bar height polygon(c(0, val, val, 0, NA), rep(laby[k], 5) + wpy, col=mstColors[pen[k]] ) # fill color polygon(c(0, val, val, 0, NA), rep(laby[k], 5) + wpy, col=Bar.Outline.col, lwd=Bar.Outline.lwd, density=0) # outline of bar } lines(c(0,0), c(1-.5*Bar.barht,ke+.5*Bar.barht), col=1) # re-draw base line of bars } ##### # # End of one Group/Row Body for Bar # ##### } # end of bar glyph column # ____________________________PanelOutline____________________ groupPanelOutline(panelGroup,j) # outline column of glyphs ##### # # End of Bar Glyph Body section # ##### } # # End of Bar Glyph # ##### ##### # # type = 'boxplot' ====================================================== # # rlAreaBoxplot # rlAreaBoxplot <- function(j, boxnam){ # boxnam -> name of the panelData value for the boxplot data structure. wstname <- names(dat) # names of columns in statsDFrame wstdim <- dim(dat) wstMax <- wstdim[2] # number of columns in statsDFrame ErrFnd <- FALSE pdColNum <- formatC(j,format="f",digits=0,width=2,flag="0") # can we get access to the boxplot list? boxlist <- tryCatch(get(boxnam, pos=1),error=function(e) e) if (inherits(boxlist,"error")) { # could not find object named in boxnam. ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02B1 BOXPLOT ",pdColNum," The panelData value of ",boxnam," in the ", pDName," data frame does not exist or is not accessible.") warning(xmsg, call.=FALSE) } else { if (!is.list(boxlist)) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02B3 BOXPLOT ",pdColNum," The ", boxnam, " data for the boxplot is not a list.") warning(xmsg, call.=FALSE) } else { lnam <- names(boxlist) # names of lists in boxlist data, one per variable if (is.null(lnam) || is.na(lnam)) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02B4 BOXPLOT ", pdColNum, " The ", boxnam, " structure does not have any name attributes for the boxplot data.") warning(xmsg, call.=FALSE) } else { # The correct structure should have 6 names of elements in the list. if (length(lnam) != 6) { # must have at least 1 element and name ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02B5 BOXPLOT ", pdColNum, " The ", boxnam, " boxplot data is not a valid structure. Must contain 6 boxplot sub lists.") warning(xmsg, call.=FALSE) } else { nbox <- c("stats","n","conf","out","group","names") # correct list of names for boxplot data. # all should be present to be a valid boxplot structure. if (any(is.na(match(lnam,nbox)))) { # at least one of the list names does not match or is missing. ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02B6 BOXPLOT ", pdColNum, " The ", boxnam," boxplot data does not contain all of the lists of boxplot function output. ","Invalid structure.") warning(xmsg, call.=FALSE) } else { # check on the number of rows/etc. - the $names list must be present after the last check. boxlist$names <- toupper(boxlist$names) # force to upper case for match with areaNamesAbbrsIDs table (name table). goodn <- !is.na(boxlist$names) nNams <- length(boxlist$names) # get number of names in structure if (any(!goodn)) { # one of the boxlist names is "NA" - no match warnCnt() xmsg <- paste0("***02B7 BOXPLOT ", pdColNum, " In the ",boxnam, " boxplot data, the $name named list contains one or more missing values.") warning(xmsg, call.=FALSE) } # how to find and edit out entries not being used. ---- boxlist2 <- boxlist[good,] # get only the entires with names != NA listUNames <- unique(boxlist$names) # get list of unique area ids used nn <- length(listUNames) nn2 <- length(boxlist$names) if (nn != nn2) { warnCnt() xmsg <- paste0("***02B8 BOXPLOT ", pdColNum, "There are duplicate sets of boxplot data for the same sub-area. ","Only the first one will be used.") warning(xmsg, call.=FALSE) } ## how to edit out duplicates. - search may be from ID to boxlist, if so only first found will be used. bpNumRows <- nn # number of unique rows of data. nr = dim(boxlist$stat)[1] # get number of data elements per area if (nr != 5) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02BA BOXPLOT ", pdColNum, " The $stats matrix in the ", boxnam, " boxplot data does not have 5 values per area.") warning(xmsg,call.=FALSE) } nc <- dim(boxlist$stat)[2] # number of rows in boxplot stats data list (is this needed?). if (nc != nNams) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02BB BOXPLOT ", pdColNum, " The $stats matrix in the ", boxnam, " boxplot data must have ", nNams, " elements.") warning(xmsg, call.=FALSE) } goods <- !is.na(colSums(boxlist$stat)) # good rows from a missing value view point. if (any(!goods)) { # data contains missing values #ErrFnd <- TRUE not used - not a stopping warning. warnCnt() xmsg <- paste0("***02BC BOXPLOT ", pdColNum, " The $stat matrix in the ", boxnam, " boxplot data has missing values. ", "Sub-areas with missing values will not be drawn.") warning(xmsg, call.=FALSE) } #if (is.null(nn) || is.na(nn)) { # ErrFnd <- TRUE # warnCnt() # xmsg <- paste0("***xxxx BOXPLOT ", pdColNum, " BOXP-03 The list of names is missing or incomplete in the boxplot data." # warning(xmsg, call.=FALSE) # #} else { # if (length(nn) != numRows) { # is this needed? # ErrFnd <- TRUE # warnCnt() # xmsg <- paste0("***xxxx BOXPLOT ", pdColNum, " BOXP-07 The boxplot list does not contain ",numRows," unique entries.") # warning(xmsg, call.=FALSE) #} #cat("boxplot - areaDatKey:",areaDatKey,"\n") tnn = is.na(match(listUNames,areaDatKey)) # match should be against the plotNames variable. if (any(tnn)) { # test to see if any did NOT match ErrFnd <- TRUE warnCnt() lnn <- paste0(nn[tnn],collapse=" ") xmsg <- paste0("***02BD BOXPLOT ", pdColNum, " The sub-area names/abbreviations found in the ", boxnam, " boxplot data $names values do not match the border group names: ",lnn) warning(xmsg, call.=FALSE) } # end of missing sub-areas. } # end of look at boxplot lists. } # end of number of boxplot lists check. } # end of get boxplot named list names (null check) } # end of boxplot list structure test. } # end of fetch of boxplot boxnam variable. if (ErrFnd) return () # End of basic validation for BoxPlot glyph refval <- lRefVals[j] # get referrence to object, changed reftxt <- lRefTexts[j] # new - JP-2010/07/23 #_______________Good Rows__________ #cat("Boxplot - goodn:",length(goodn)," goods:",length(goods),"\n") # if off, why is number of names and number of stats groups different? if (length(goodn) != length(goods)) { print("good vectors for boxplot - Problem") print(goodn) print(goods) } goodAll <- goodn | goods # must have name match and no NAs in data. #_______________Scaling____________ # # normally 5/7 - USStatesBG # Since same height, # y boxplot scaling # standard - horizontal box - no vertical # (y) dimensions py <- c(-.5,-.5,.5,.5) thiny <- BoxP.thin*py thicky <- BoxP.thick*py medy <- BoxP.Median.Line*c(-.5,.5) #cat("point sets for -- thiny:",thiny," thicky:", thicky, " medy:",medy,"\n") ry <- c(0,1) # used in y scaling for grid lines #_______________Gather stats and put in Area Order______________ # For the moment match on names # Boxlist = names, stats, out, group, # # Boxplot function generates a list value containing: # stats = matrix - each column is lower, lower hinge, median, upper hinge, # upper wicker for plot/group # n = vector of number of observ in each group # conf = a matrix which each col contins the low/upper extremes # out = valies of any data points which lie extremes of whiskers # group = vector (same length as out) whose elements indicate to which group # names = vector of names for the groups (must be 2 char area names) # There must be unique names that match the area abbreviation list. # stats <- boxlist$stats # statistics: 1-low,2-25%,3-median,4-75%,5-high # - 5 variables for each sub-area. # indexes to boxplot values. (pull values into thin and thick) (set up for "boxes") thin <- stats[c(1,5,5,1),] # a column for each area - thin line - outliers (Lower, upper wickers) # - columns in boxlist (1,5,5,1) thick <- stats[c(2,4,4,2),] # a column for each area - thick line - 25% to 75% (lower and upper hinge) # - columns in boxlist(2,4,4,2) med <- stats[3,] # a single value for each sub-area (median data value) nam <- boxlist$names # area name list of boxplots # conf <- boxlist$conf # matrix of extremes - not used. Don't check for. outlier <- rep(F,length(med)) # build vector of all outliers - set to False # outlier length = number of boxplots precented by user. if (length(boxlist$out)>0) { # changed from is.null to length check (is.null does not work) # if outliers exist out <- boxlist$out group <- boxlist$group # group and out are same length.. outlier[unique(group)] <- T # get list of groups with outliners and set indicater TRUE # set to True if we have an outlier to graph. } # if group length = 0 error -- message. #### Need to put in order (boxlist may not be in the same order as the statsDFrame) zBPord <- match(dat$RN, nam) # ord based on match between boxplot$names and original link names in statsDFrame (dat). # (Convert XX to index. ord is the sequence of boxplot data to graph. # zBPord is in the statsDFrame order and points to the supporting boxplot row. # if NA, it means the statsDFrame row does not have a boxplot to go with it. IndexDattoBP <- zBPord # should be one boxplot entry per user data.frame entry. if (any(is.na(zBPord))) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02BE BOXPLOT ",pdColNum," There are one or more of rows in the ",sDFName, " that does not have matching boxplot data, (", boxnam, ") entries.") warning(xmsg, call.=FALSE) wx <- is.na(zBPord) xmsg <- paste0("***02BF BOXPLOT ",pdColNum," The missing sub-areas are: ", paste0(areaDatAbbr[wx],collapse=", ") ) warning(xmsg, call.=FALSE) } # what about missing values - if NA do not plot on that line # What about name type inconsistency # I will require use of area name abbreviation # area ID codes be useful # split() based on first two digits of county fips # I could stash area fips in statsDFrame sorted order # For Boxplot median sorting # Currently the user would need to sort the # medians in the statsDFrame making sure the row.names were correct. # # JP-no data in col1, col2, or col3 to sort like the other columns... All of the data is in these structures. # # boxlist$stats[3,] # the median. # # at present no re-ordering of the boxplots like the other plots. # JP-if other column is sorted, boxplots will follow that order via the indexes. # # ___________ scale x axis_______________ lPad <- TRUE rPad <- TRUE if (is.null(out)) { rx <- range(stats,na.rm=TRUE) } else { # if no outliers - range only on stats rx <- range(stats,out,na.rm=TRUE) # if outliers - range on stats and outliers } #cat("boxplot-rx:",rx,"\n") #rx <- sc*diff(rx)*c(-.5,.5)+mean(rx) # min to max range with expansion factors. #cat("boxplot-rx after padding:",rx,"\n") # are these used. dx <- diff(rx)/200 # difference / 200 (??? use) px <- c(-dx,-dx,dx,dx) # is this used??? # ____________titles and labeling axes_______________ ##### # # Setup and draw top and bottom titles and axis for column # # Needs Padding on both sides (again none if one is zero.) # Res <- DrawXAxisAndTitles(j, panels, rx, ry,reftxt, refval, leftPad=lPad, rightPad=rPad) atRx <- Res$atRx rx <- Res$rx ry <- Res$ry #cat("BoxPlot-Result staggering:",staggering," staggered:",staggered,"\n") ##### # # Basic setup and validation for BoxPlot Glyph # ##### # _______________drawing loop___________________ oldpar = par(lend="butt") for (i in 1:numGrps){ # Cycle through the Row/Groups in the micromap column gsubs <- ib[i]:ie[i] # get beginning to end row number in group ke <- length(gsubs) # get number of rows in group pen <- if(i==medGrp & medGrpSize == 1) 7 else 1:ke # if median single group, then pen=6, otherwise pen = c(1...x) laby <- ke:1 # laby = reverse order list for row index. ksc <- SetKsc(ke) panelSelect(panels,i,j) # select panel for group i in column j) panelScale(rx,c(1-pad,ke+pad)) # set scale for panel (0.33333 to 1.666666667) # should work, box plot is set up on 1 as base and + or - 0.5 from the base. panelFill(col=Panel.Fill.col) # set fill for panel # Grid lines axis(side=1, tck=1, labels=F, at=atRx, col=Grid.Line.col, lwd=Grid.Line.lwd) # grid lines # if a refval is provided and in the rx range, then add line. AddRefLine(refval, ke, rx) panelOutline(col=Panel.Outline.col) # outline panel for (k in 1:ke){ # cycle through row-groups and build each box plot m <- zBPord[gsubs[k]] # m is the location of the area in panelData item list (a boxplot element) if (is.na(m)) next # if no boxplot data - skip box plot drawing for sub-area if (goodAll[m]) { #cat("Grp:",i," k;",k," m:",m,"\n") kp <- pen[k] # color number ht <- laby[k] # vector of the index into the panel (for a 5/6 row group -> 6,5,4,3,2,1 (top to bottom) # for the median group/row -> 1 (that's it.) 1.65 box is set to [0:2] 7 box is set to [0:6] # plot outlier points on graph if (outlier[m]) { # flag indicator - saves scaning. # plot points for outliers (rings) vals <- out[group==m] # get the list of values. if (colFull) { # full color do the correct color points(vals, rep(ht,length(vals)), pch=1, col=ifelse(BoxP.Use.Black,"black",mstColors[kp]), cex=BoxP.Outlier.cex, lwd=BoxP.Outlier.lwd) } else { # Greys - do the a grey. points(vals, rep(ht,length(vals)), pch=1, col=BoxP.Outlier.BW.col, cex=BoxP.Outlier.cex, lwd=BoxP.Outlier.lwd) } } # Draw thin line for lower to upper confidence values - box (ht high). wthiny <- thiny * ksc polygon(thin[,m], rep(ht,4)+ wthiny, col=mstColors[kp], border=NA) # Draw middle think box (25% to 75%) wthicky <- thicky * ksc polygon(thick[,m], rep(ht,4)+ wthicky, col=mstColors[kp], border=NA) # Median Bar - Lines looked crooked (Median verical bar) segments(med[m], ht+medy[1], med[m], ht+medy[2], # use segment line. col=BoxP.Median.col, lwd=BoxP.Median.Dot.lwd) } } # end k loop } # end i loop par(oldpar) # ____________________________PanelOutline____________________ groupPanelOutline(panelGroup,j) } # # End of Box Plot Glyph # ##### ##### # # type = 'ctrbar' ==================================== # # rlAreaCtrBar (Centered Bar chart) # # The centered bars is a stacked bar chart with the middle segment centered on the "0" value # of the chart and extending 1/2 it's value in both directions (plus and minus). # The other bar segments are plotted to it's left and right as appropriate. # # # The data structure can have between 2 to 9 data values per area. # Each area must have the same number of values. This limitation may be removed in the future. # # panelData => data.frame where each row is a area with the areaDatKey as the row.name. # The columns are the bar segment values. # rlAreaCtrBar = function(j) { # j = the panel column number # # col1 and col2 indentify the starting column and ending column number in the statsDFrame # that contains the bar values for each area. # wstname <- names(dat) # names of columns in statsDFrame wstdim <- dim(dat) wstMax <- wstdim[2] # number of columns in statsDFrame ErrFnd <- FALSE pdColNum <- formatC(j,format="f",digits=0,width=2,flag="0") # "col1" stColName1 <- wstname[col1[j]] pdUMsg <- "(First data column)" xr <- CheckPDColnCN('col1','CTRBAR', col1[j], stColName1, j, 1, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd = TRUE #} else { # xdat1 <- xr$Dat # with CheckPDColnCN, no xr$Dat is returned } # "col2" stColName2 <- wstname[col2[j]] pdUMsg <- "(Last data column)" xr <- CheckPDColnCN('col2','CTRBAR', col2[j], stColName2, j, 2, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd = TRUE #} else { # xdat2 <- xr$Dat # why } if (!ErrFnd) { if (col1[j] >= col2[j]) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***020A CTRBAR ", pdColNum, " The first column name/number (", stColName1, ") ","must proceed the last column name/number (", stColName2, ") in the ", sDFName," data frame.") warning(xmsg, call.=FALSE) } else { wD <- ( col2[j] - col1[j] + 1 ) # corrected to properly calculate the number of data columns. if (wD < 2 || wD > 9) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***020B CTRBAR ", pdColNum, " The number of segments is ", wD, ". It must be between 2 and 9. If over 9, only the first 9 will be used.") warning(xmsg, call.=FALSE) } } } if (ErrFnd) return () # if either column is missing or invalid - skip this column. # Now check the data in the statsDFrame columns.. stColNums <- c(col1[j]:col2[j]) workCB <- dat[,stColNums] # get bar segment data from the statsDFrame. colNums <- c(1:dim(workCB)[2]) for (ind in colNums) { # check and convert each column iC <- stColNums[ind] # get stDF column number stColNam <- wstname[iC] # get stDF column name F_ind <- formatC(ind,format="f",digits=0,width=1) segNam <- paste0("seg",F_ind) pdUmsg <- paste0("(Bar segment ",F_ind," length)") x <- CheckNum(workCB[,ind],'CTRBAR', ind, pdColNum, segNam, stColNam, pdUmsg) if (x$Err) { ErrFnd <- TRUE } else { workCB[,ind] <- x$Dat } } # ind for loop #print("end of verification in CTRBAR - length of good") good <- !is.na(rowSums(workCB)) # good values (one per row) #print(length(good)) # refval <- lRefVals[j] # get referrence to object, changed reftxt <- lRefTexts[j] # new - JP-2010/07/23 # # mstColors - series of lighter colors of the base colors for each bar. # Use an adjusted list of percentages based on the Number of Segments. # 2 step = 50, 100 # 3 step = 33.3, 66.6, 100 # 4 step = 25, 50, 75, 100 # 5 step = 20, 40, 60, 80, 100 # 6 step = 16.6, 33.3, 50, 66,6, 83.3, 100 # etc. # 1/(NumSegs)*step = transparency or lightness level (100% full) # Dan's addition ==> # as the colors are generated from the base color # # pInc = 1 / NumSegs # # cSteps = cumsum(rep(pInc,NumSegs))^1.35 # # thickness = constant vs. very based on 2 to 9th segment # #_______________Gather stats and put in area Order______________ # # Sorting has already been done - by areaDatKey or value. # The areaID list has therefore been re-ordered accordingly. # Reorder the DataList to match. The assumption was that the input data order for the panelData # matched the order of the original data in the statsDFrame. # workMatCB <- as.matrix(workCB) CBLen <- apply(workMatCB,1,length) # get length of each row. CBLRange <- range(CBLen,na.rm=TRUE) NumSegs <- CBLRange[2] # number of segments CBBarPt <- cbind(rep(0,numRows),workMatCB) CBBarPt <- t(apply(CBBarPt,1,cumsum)) # _____________ Color Patterns _______________ # Inputs, NSegments, mstColors[1:7] Output baseColRgb baseColRgb <- BuildSegColors(NumSegs) #_____________Centering_____________ CtrSeg <- as.integer(NumSegs/2) + 1 # center segment if ((NumSegs %% 2) != 0) { # old number of segments CtrPt <- workMatCB[,CtrSeg]/2 + CBBarPt[,CtrSeg] } else { # even number of segments CtrPt <- CBBarPt[,CtrSeg] } CBPlotPts <- CBBarPt - CtrPt #_______________Scaling____________ # x scaling lPad <- TRUE rPad <- TRUE rx <- range(CBPlotPts,na.rm=TRUE) #cat("ctrbar-rx:",rx,"\n") #rx <- sc*diff(rx)*c(-.5,.5)+mean(rx) #cat("ctrbar-rx after padding:",rx,"\n") ry <- c(0,1) pyPat <- c(-.5,-.5,.5,.5,NA) py <- CSNBar.barht * pyPat # CBar.barht = 2/3 (0.6667) (fixed) # py <- c( -1/3, -1/3, +1/3, +1/3, NA) # variable bar height calculations wYPdelta <- (CSNBar.Last.barht - CSNBar.First.barht)/(NumSegs-1) # increment wYP1 <- CSNBar.First.barht - wYPdelta # ____________titles and labeling axes_______________ ##### # # Setup and draw top and bottom titles and axis for column # # General padding on left or right if not zero. # Res <- DrawXAxisAndTitles(j, panels, rx, ry,reftxt, refval, leftPad=lPad, rightPad=rPad) atRx <- Res$atRx rx <- Res$rx ry <- Res$ry #cat("ctrbar-Result staggering:",staggering," staggered:",staggered,"\n") # # # End of Basic Validation and Setup for CtrBar segmented glyph # ##### # ___________________drawing loop_____________________ oldpar <- par(lend="butt") # build each panel for each stacked bar set. for (i in 1:numGrps) { gsubs <- ib[i]:ie[i] # get beginning to end index row number in this group ke <- length(gsubs) # get number of rows in group (5 or 1) # adjust if median group pen <- if(i==medGrp & medGrpSize == 1 ) 7 else 1:ke # if median group (7)(black), then pen=6, otherwise pen = c(1...x) laby <- ke:1 ksc <- SetKsc(ke) panelSelect(panels,i,j) x <- panelScale(rx,c(1-pad,ke+pad)) # 1 to 5/6 are the y values for each bar. panelFill(col=Panel.Fill.col) axis(side=1, tck=1, labels=F, at=atRx, col=Grid.Line.col, lwd=Grid.Line.lwd) # grid # if a refval is provided and in the rx range, then add line. AddRefLine(refval, ke, rx) # # Process each area's line. # for (k in 1:ke) { # cycle through row-groups and assign colors to associated areas dots. m <- gsubs[k] if (good[m]) { wX <- CBPlotPts[m,] # Get Row of data. #wYP <- rep(laby[k],5)+py wYP <- rep(laby[k],5) # calculate box for each segment wYPht <- wYP1 for (ik in 1:NumSegs) { # Y values for segment box if (CBar.varht) { # variable height bar segments wYPht <- wYPht + wYPdelta wYP2 <- wYP + ((pyPat * wYPht) * ksc) } else { # fixed height bar segments wYP2 <- wYP + ( py * ksc ) } # X values for segment box val0 <- wX[ik] # start val1 <- wX[ik+1] # end position wXP <- c(val0,val1,val1,val0,NA) # good value - draw bars are polygons. (why to polygon) polygon(wXP, wYP2, col=baseColRgb[pen[k],ik], lwd=CSNBar.Outline.lwd, border=CSNBar.Outline.col, lty=CSNBar.Outline.lty) #polygon(wXP, wYP2, col=CSNBar.Outline.col, density=0) } } } # end of k loop # finish up panel # draw vertical line at zero. lines(rep(0,2), c(1-padMinus,ke+padMinus), lty=CBar.Zero.Line.lty, lwd=CBar.Zero.Line.lwd, col=CBar.Zero.Line.col) panelOutline(Panel.Outline.col) } # end of i loop par(oldpar) # ____________________________PanelOutline____________________ groupPanelOutline(panelGroup,j) } # # End of ctrbar Glyph # ##### # NA check, if any NA exist in the series from col1 to col2, then the stacked bar will not # be drawn. # ##### # # type = 'dot' and 'dotsignif' ===================================================== # # rlAreaDot # # glyph will test for significants and if not overlay dot with "x" is dSignif=TRUE is # set in the glyphs call. # rlAreaDot = function(j,dSignif=FALSE){ # # j = current panel column number # # # Single Dot, no extra line or interval # # col1[j] points to the statsDFrame column holding the first arrow end point.value # # OR # # Single Dot with signficants over print, no extra line or interval # # col1[j] points to Dot value in the statsDFrame column holding the Dot Value # col2[j] points to P value - if P value > 0.05 then overprint "x" on the dot. # # # wstname <- names(dat) # names of columns in statsDFrame wstdim <- dim(dat) wstMax <- wstdim[2] # number of columns in statsDFrame ErrFnd <- FALSE pdColNum <- formatC(j,format="f",digits=0,width=2,flag="0") dotMsgHdr <- "DOT" if (dSignif) dotMsgHdr <- "DOTSIG" good1 <- TRUE # vector length of xdat1. TRUE = not NA, FALSE = NA. good2 <- TRUE goodrow <- TRUE pdColNum <- formatC(j,format="f",width=2,digits=0,flag="0") # "col1" stColNum1 <- col1[j] stColName1 <- wstname[stColNum1] pdVarName1 <- "col1" pdUmsg <- "(Dot value)" xr <- CheckPDCol(pdVarName1, dotMsgHdr, stColNum1, stColName1, j, 1, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd <- TRUE } else { xdat1 <- xr$Dat # get column of data (xr$Dat returned by CheckPDCol) good1 <- !is.na(xdat1) } if (dSignif) { # "col2" stColNum2 <- col2[j] stColName2 <- wSFName[stColNum2] pdVarName2 <- 'col2' pdUmsg <- "(Confidence P-Values)" xr <- CheckPDCol(pdVarName2, dotMsgHdr, stColNum2, stColName2, j, 2, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd <- TRUE } else { xdat2 <- xr$Dat # get column of data. good2 <- !is.na(xdat2) # some may be missing, but we have some, check range. if (any(xdat2[good2] > Dot.Signif.range[2] | xdat2[good2] < Dot.Signif.range[1] )) { # some values are out of range # ErrFnd <- TRUE # allow missing values in data column, send warning but do not stop plotting glyph. warnCnt() xmsg <- paste0("***022Q", dotMsgHdr, " ", pdColNum, " One or more P_value data entries in the ", stColName2, " for the panelDesc ", pdVarName2 ," variable are out of range." ) warning(xmsg, call.=FALSE) } } } # # Change 7/24/15 - allow missing values in a column for a row. # Change 7/24/15 - if not signif, copy good1 to good2 # Change 7/24/15 - plot row, only if both data columns are not NA. # if (!dSignif) { # dot function goodrow <- good1 } else { # dotsignif function goodrow <- good1 & good2 } if (ErrFnd) return () # error/warning found and can't plot glyph - return # JB - add "as.double(as.vector(" to handle variation in how objects are converted. #____________ref values_____________ refval <- lRefVals[j] # get reference value for this column, changed reftxt <- lRefTexts[j] # new - JP-2010/07/23 xps <= par("ps") #cat("dot-par(ps):",xps,"\n") #_____________y axis________________ ry <- c(0,1) #____________scale x axis______________________ lPad <- TRUE rPad <- TRUE rx <- range(xdat1,na.rm=TRUE) #cat("dot-rx:",rx," cxy:",par("cxy"),"\n") #cxyAdj <- par("cxy")/2 #rx <- sc*diff(rx)*c(-.5,.5)+mean(rx) # + c(-cxyAdj,cxyAdj) # range = mean(rx)/2 * c(-1,1) * 1.08 #cat("dot-rx after padding:",rx,"\n") # ____________labeling axis_______________ ##### # # Setup and draw top and bottom titles and axis for dot and dotsignif glyph column # # Padding for the dot, regardless if zero is left or right. # Res <- DrawXAxisAndTitles(j, panels, rx, ry, reftxt, refval, leftPad=lPad, rightPad=rPad) atRx <- Res$atRx rx <- Res$rx ry <- Res$ry xps <= par("ps") #cat("dot-par(ps)2:",xps,"\n") # # Basic validation and setup done for dot and dotsignif glyph # ##### ##### # # _______________drawing loop___________________ # for (i in 1:numGrps){ gsubs <- ib[i]:ie[i] ke <- length(gsubs) pen <- if(i==medGrp & medGrpSize == 1 ) 7 else 1:ke laby <- ke:1 panelSelect(panels,i,j) x <- panelScale(rx,c(1-pad,ke+pad)) panelFill(col=Panel.Fill.col) # grid lines axis(side=1, tck=1,labels=F, at=atRx, col=Grid.Line.col, lwd=Grid.Line.lwd) # grid # updated 7/24/15 to include at= # if a refval is provided and in the rx range, then add line. AddRefLine(refval, ke, rx) panelOutline(Panel.Outline.col) for (k in 1:ke) { # step through values for this panel m <- gsubs[k] if (goodrow[m]) { # change 7/24/15 - goodrow reflect both columns of data. # can't plot dot, if signif data if missing. # data good for dot - plot dot. plotPoint(xdat1[m], laby[k], Dot.pch, mstColors[pen[k]], Dot.pch.size, Dot.pch.lwd, Dot.Outline, Dot.Outline.col,Dot.Outline.lwd) if (dSignif) { if (xdat2[m] > Dot.Signif.pvalue) { dsCol <- Dot.Signif.pch.col # if color is NA, then follow color for the row. if (is.na(dsCol)) { dsCol <- mstColors[pen[k]] } plotPoint(xdat1[m], laby[k], Dot.Signif.pch, dsCol, Dot.Signif.pch.size, Dot.Signif.pch.lwd, Dot.Signif.Outline, Dot.Signif.Outline.col, Dot.Signif.Outline.lwd) } } # how to link an overprinting with criteria.. } } # end of k loop } # end of i loop # ____________________________PanelOutline____________________ groupPanelOutline(panelGroup,j) } # # End of dot and dotsignif glyphs # ##### ##### # # type = 'dotconf' ==================================================== # # flAreaDotConf # rlAreaDotConf <- function(j){ # # j is the current panel column index # # col1 indicates the column number for the dot value in the statsDFrame. # col2 indicates the column number for the lower confidence value in the statsDFrame. # col3 indicates the column number for the upper confidence value in the statsDFrame. #cat("\nDotConf:","\n") wstname <- names(dat) # names of columns in statsDFrame wstdim <- dim(dat) wstMax <- wstdim[2] # number of columns in statsDFrame ErrFnd <- FALSE pdColNum <- formatC(j,format="f",digits=0,width=2,flag="0") # "col1" stColNum1 <- col1[j] stColName1 <- wstname[stColNum1] pdVarName1 <- 'col1' pdUmsg <- "(Dot value)" xr <- CheckPDCol(pdVarName1, 'DOTCONF', stColNum1, stColName1, j, 1, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd <- TRUE } else { xmn <- xr$Dat # get column of data (x$Dat returned by CheckPDCol) good1 <- !is.na(xmn) } # "col2" stColNum2 <- col2[j] stColName2 <- wstname[stColNum2] pdVarName2 <- 'col2' pdUmsg <- "(Lower Confidence Value)" xr <- CheckPDCol(pdVarName2, 'DOTCONF', stColNum2, stColName2, j, 2, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd <- TRUE } else { lower <- xr$Dat # get column of data (x$Dat returned by CheckPDCol) good2l <- !is.na(lower) } # "col3" stColNum3 <- col3[j] stColName3 <- wstname[stColNum3] pdVarName3 <- 'col3' pdUmsg <- "(Upper Confidence Value)" xr <- CheckPDCol(pdVarName3, 'DOTCONF', stColNum3, stColName3, j, 3, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd <- TRUE } else { upper <- xr$Dat # get column of data (x$Dat returned by CheckPDCol) good2u <- !is.na(upper) } if (ErrFnd) return () # error warning found - return #cat("dotconf: data OK - plot","\n") # setup column good arrays # xmn <- dat[,col1[j]] # Col 1 = DOT - median/mean # lower <- dat[,col2[j]] # Col 2 = lower # upper <- dat[,col3[j]] # Col 3 = upper good2 <- !is.na(upper+lower) goodrow <- good1 & good2l & good2u # sum of all checks. refval <- lRefVals[j] # changed to lRefVals, JP-2010/07/23 reftxt <- lRefTexts[j] # new - JP-2010/07/23 # Select the first panel in column to allow code to reference its characteristics panelSelect(panels, 1, j) #x <- panelScale(rx, ry) #par(xpd=T) #_____________ y axis ____________________ ry <- c(0,1) #_____________scale x axis________________ lPad <- TRUE rPad <- TRUE rx <- range(upper,lower,xmn,na.rm=TRUE) #cat("dotConf-rx:",rx,"\n") # # NOW DONE in DrawXAxisAndTitle # # dealing with a dot, so padding should be 1/2 width of dot in rx units. #wP <- par("pin")[1] # width of panel #wD <- strwidth(" ",cex=Dot.Conf.pch.size)/2 # get 1/2 of character width #rwD <- (wD/wP) * diff(rx) # dot width as percentage of panel width "times" number of x units to graph #rx <- rx + c(-rwD,rwD) # make room for dot and no more. #cat("dotconf - dot adjust - widthPanel:",wP," widthSp:",wD," diff(rx):",diff(rx)," rwD:",rwD,"\n") # The above is not done in DrawXAxis... # x may not be needed??? #rx_old <- sc*diff(rx)*c(-.5,.5)+mean(rx) #cat("dotConf-rx after padding:",rx," old way:",rx_old,"\n") # ____________labeling axes_______________ ##### # # Setup and draw top and bottom titles and axis for column # # padding on left and right for confidence and dot. # #cat("DotConf-calling DrawXAxisAndTitles","\n") Res <- DrawXAxisAndTitles(j, panels, rx, ry,reftxt, refval, leftPad=lPad, rightPad=rPad) atRx <- Res$atRx rx <- Res$rx ry <- Res$ry #cat("DotConf-back from DrawXAxisAndTitles","\n") #cat("dotconf-Result staggered:",staggered,"\n") # # Basic setup and validation done for dotconf glyph # ##### #cat("Dot.Conf.pch:",Dot.Conf.pch," Dot.Conf.pch.size:",Dot.Conf.pch.size, # " Dot.Conf.Outline:",Dot.Conf.Outline,"\n") doDotOutline <- Dot.Conf.Outline #cat("doDotOutline:",doDotOutline,"\n") #cat("Dot.Conf.Outline.lwd:",Dot.Conf.Outline.lwd," .col:",Dot.Conf.Outline.col,"\n") #cat("dotconf - drawing loop col:",j,"\n") #_____________drawing loop___________________ for (i in 1:numGrps){ gsubs <- ib[i]:ie[i] ke <- length(gsubs) pen <- if(i==medGrp & medGrpSize == 1 ) 7 else 1:ke laby <- ke:1 panelSelect(panels,i,j) panelScale(rx,c(1-pad,ke+pad)) # Adjusted scale for interior panelFill(col=Panel.Fill.col) axis(side=1, tck=1, labels=F, at=atRx, # change 7/24/15 - add at= to get grids at the same points as the ticks col=Grid.Line.col, lwd=Grid.Line.lwd) # vertical grid lines # if a refval is provided and in the rx range, then add line. AddRefLine(refval, ke, rx) panelOutline(col=Panel.Outline.col) # outline scaled image. for (k in 1:ke){ m <- gsubs[k] if (goodrow[m]) { # if valid upper value. # 7/25/15 changed to goodrow and covered all plotting. # draw confidence line. lines(c(lower[m],upper[m]), rep(laby[k],2), col=mstColors[pen[k]], lwd=Dot.Conf.lwd) # plot dot. #cat("m:",m," lower:",lower[m]," upper[m]:",upper[m], # " k:",k," laby[k]:",laby[k]," pen[k]:",pen[k],"\n") #cat("Dot.Conf.lwd:",Dot.Conf.lwd,"\n") # #cat("xmn[m]:",xmn[m],"\n") # # doDotOutline - mostly related to black and white printing. # However, users can also request it. # # 0:25 pch's are at 75% of cex. # 0:18 S compatible, vector symbols - uses lwd(lines), col(borders & fill) # 1, 10, 13, 16 are circles. # 15:18 filled characters have no borders. # 0:14 line drawings # 15:18 fills, but no lines (lwd not used, but col is the fill, not bg) # 19:25 R vector symbols - uses lwd(lines-borders), col(border), bg(fill) # 26:31 not used # 32:127 Ascii Char # 128:255 local characters. # # The issue not is these points are written for 19:25 not the other. # if 19:25 then bg = fill color, col = border color, lwd = weight of border, # pchValue <- Dot.Conf.pch pchOutline <- Dot.Conf.Outline # enable outline of 19:25 characters. plotPoint(xmn[m], laby[k], Dot.Conf.pch, mstColors[pen[k]], Dot.Conf.pch.size, Dot.Conf.pch.lwd, Dot.Conf.Outline, Dot.Conf.Outline.col, Dot.Outline.lwd ) } } # end of k loop } # end of i loop # ____________________________PanelOutline____________________ groupPanelOutline(panelGroup,j) #cat("DotConf: END.\n") } # # End of dotconf glyph # ##### ##### # # type = 'dotse' ======================================================= # # rlAreaDotSe # rlAreaDotSe = function(j){ # j = current panel column # # col1 indicates the column number for the dot value in the stamicroteFrame. # col2 indicates the column number for the SE value in the statsDFrame. wstname <- names(dat) # names of columns in statsDFrame wstdim <- dim(dat) wstMax <- wstdim[2] # number of columns in statsDFrame ErrFnd <- FALSE pdColNum <- formatC(j,format="f",digits=0,width=2,flag="0") # "col1" stColName1 <- wstname[col1[j]] pdUmsg <- "(Dot Value)" xr <- CheckPDCol('col1', 'DOTSE', col1[j], stColName1, j, 1, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd = TRUE } else { xdat1 <- xr$Dat } # "col2" stColName2 <- wstname[col2[j]] pdUmsg <- "{Standard Error Value)" xr <- CheckPDCol('col2', 'DOTSE', col2[j], stColName2, j, 2, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd = TRUE } else { xdat2 <- xr$Dat } if (ErrFnd) return () # error warning found - return good1 <- !is.na(xdat1) good2 <- !is.na(xdat2) goodrow <- good1 & good2 # get sum of the checks - both must be their to plot dot and Se. zval <- qnorm(.5+Dot.SE/200) inc <- zval * xdat2 upper <- xdat1 + inc lower <- xdat1 - inc if (ErrFnd) return () # error warning found - return #______________Ref data______________ refval <- lRefVals[j] # changed to lRefVals, JP-2010/07/23 reftxt <- lRefTexts[j] # new - JP-2010/07/23 #______________y range_______________ ry <- c(0,1) #_______________scale x axis__________________ lPad <- TRUE rPad <- TRUE rx <- range(upper,lower,xdat1,na.rm=TRUE) # use upper, lower and xdat1 to find "range" of x # x may not be needed at all. But best to leave. #cat("dotSE-rx:",rx,"\n") #rx <- sc * diff(rx) * c(-.5,.5) + mean(rx) #cat("dotSE-rx after padding:",rx,"\n") # ____________labeling axes_______________ ##### # # Setup and draw top and bottom titles and axis for column # # Padding on left and right for dot and confidence # Res <- DrawXAxisAndTitles(j, panels, rx, ry,reftxt, refval, leftPad=lPad, rightPad=rPad) atRx <- Res$atRx rx <- Res$rx ry <- Res$ry #cat("dotSE-Result staggering:",staggering," staggered:",staggered,"\n") # # Setup and validation for dotse glyph completed. # ##### #__________________drawing loop________________ for (i in 1:numGrps) { gsubs <- ib[i]:ie[i] ke <- length(gsubs) pen <- if(i==medGrp & medGrpSize == 1 ) 7 else 1:ke laby <- ke:1 panelSelect(panels,i,j) x <- panelScale(rx,c(1-pad,ke+pad)) panelFill(col=Panel.Fill.col) axis(side=1, tck=1, labels=F, at=atRx, col=Grid.Line.col, lwd=Grid.Line.lwd) # grid lines - 7/24/15 add at=atRx to force Grid line to match ticks. # if a refval is provided and in the rx range, then add line. AddRefLine(refval, ke, rx) panelOutline(Panel.Outline.col) for (k in 1:ke){ m <- gsubs[k] # change 7/24/15 - only plot glyph if both data column are not NA. if (goodrow[m]) { # if all values are good # confidence interval based on SE - line . lines(c(lower[m],upper[m]), rep(laby[k], 2), col=mstColors[pen[k]],lwd=Dot.SE.lwd) plotPoint(xdat1[m], laby[k], Dot.SE.pch, mstColors[pen[k]], Dot.SE.pch.size, Dot.SE.pch.lwd, Dot.SE.Outline, Dot.SE.Outline.col, Dot.SE.Outline.lwd ) } } } # ____________________________PanelOutline____________________ groupPanelOutline(panelGroup,j) } # # End of dotse glyph # ##### ##### # # type = 'id' ======================================================= # # rlAreaID # rlAreaID = function(j){ # j = panel column number #_____________ Scaling ______________________ # get corners for the boxes. rx <- c(0,diff(panels$coltabs[j+1,])) # column width in inches - index to coltabs is +1 the column number ry <- c(0,1) # not inches, but 0-1 #______________________panel labels_____________ panelSelect(panels,1,j) # start at I = 1, but j= is the current column. x <- panelScale(rx,ry) # # ID text set based on Text.cex.. for 12 point text in a 3/4 to over 1" height boxes. # xusr <- par("usr") # base decision on first panel - they should all be the same. xpin <- par("pin") IDcex.mod <- Id.Cex.mod # get multiplier based on 12 pt. pchSize <- Id.Text.cex * IDcex.mod * Id.Dot.cexm if (xpin[2] < 0.75) { # panel height is getting smaller. reduce text and symbol size. IDcex.mod <- (1 - (( 1 - xpin[2]/0.75 ) ^ 2 )) # get ratio. #cat(" IDcex.mod change from 1 to :",IDcex.mod,"\n") } ### request to lower title into axis label space. xLab1 <- banner["id","H2"] xLab2 <- banner["id","H3"] if (xLab2 == "") { xLab2 <- xLab1 xLab1 <- "" } # column titles if (xLab1 != "") { mtext(xLab1,side=3,line=Id.Title.1.pos,cex=Text.cex) } mtext(xLab2,side=3,line=Id.Title.2.pos,cex=Text.cex) widthPanel <- xpin[1] # inches widthxLab2 <- strwidth(xLab2,units="inch",cex=Text.cex) # one label for ID column. It's centered, so use 1/2 of the width. lastLab2Space <<- ( widthPanel + colSepGap - widthxLab2 ) / 2 # pos - space (have), neg - overhang (need). #cat("ID - widthPanel:",widthPanel," width xLab2:",widthxLab2," lastLab2Space:",lastLab2Space," staggered:",staggered,"\n") # ______Bottom Label/Title - Lab3 ______ lastLab3Space <<- ( widthPanel + colSepGap ) / 2 if (lab3[j] != "") { panelSelect(panels,numGrps,j) x <- panelScale(rx,ry) # bottom of column footnote (title) mtext(side=1,lab3[j],line=Title.Line.3.pos,cex=Text.cex) # bottom labels. widthxLab3 <- strwidth(lab3[j],units="inch", cex=Text.cex) lastLab3Space <<- ( widthPanel + colSepGap - widthxLab3 ) / 2 } #_____________________Square Sizing and Symbol Placement # square width # xstart = Id.Start # inches from left margins #### idstart = 0.137 # inches from base line (not relative) (appears to be replaced below..) TextH2 <- max(strheight(areaDatIDNames,units="inch",cex=(Id.Text.cex * IDcex.mod) )) / 2 # maximum length value /2 par(pch = Id.Dot.pch) # set up the character. #______________________main loop________________ # Cycle thought the GROUPS (numGrps) for (i in 1:numGrps){ npad <- ifelse((i==medGrp & medGrpSize == 1),0.57,pad) # single row = 0.57, or pad list for multiple rows. gsubs <- ib[i]:ie[i] # first element of group to last element of group. ke <- length(gsubs) # number of elements. (rows per group) # since each panel may have different number of rows, this now must be done for each group. ryusr <- c(1-npad,ke+npad) # set scale for the number of rows in group, plus padding. # y axis value = 1 to nRows.. laby <- ke:1 # y index vector - like 5:1 for 5 areas per panel/row. # ke is the number of area per panel/row. pen <- if(i==medGrp & medGrpSize == 1 ) 7 else 1:ke panelSelect(panels,i,j) # select and setup panel for this group of rows. x <- panelScale(rx,ryusr) gnams <- areaDatIDNames[gsubs] xusr <- par("usr") xpin <- par("pin") xUnitsPerInch <- diff(xusr[1:2]) / xpin[1] # x units per inch yUnitsPerInch <- diff(xusr[3:4]) / xpin[2] # y units per inch #cat("xUPI:",xUnitsPerInch," usr:",xusr," xpin:",xpin," TextH2:",TextH2,"\n") xHalfSym <- ((Id.Dot.width * Id.Cex.mod) + Id.Space)/2 * xUnitsPerInch xStartu <- xHalfSym # ID offset in units. (a little more than 1/2 width of symbole xSymWu <- xHalfSym - 0.25*Id.Space # ID symbol now in units. #cat("xStartu:",xStartu," xHalfSym:",xHalfSym,"\n") xPosu <- rep(xStartu,ke) xPos2u <- xPosu + xSymWu yPosu <- laby yPos2u <- laby - TextH2 * 0.3 * yUnitsPerInch # offset down by half the height #cat("xPosu:",xPosu," xPos2u:",xPos2u,"\n") #cat("yPosu:",yPosu," yPos2u:",yPos2u,"\n") #cat("Id.Text.cex:",Id.Text.cex," IDcex.mod:",IDcex.mod," prod:",(Id.Text.cex * IDcex.mod),"\n") text(xPos2u, yPos2u, gnams, cex=(Id.Text.cex * IDcex.mod ), xpd=T, pos=4) # Note: the xPosu and yPosu coordinates is the center of the point not the starting edge of a character. plotPoint(xPosu, yPosu, Id.Dot.pch, mstColors[pen], Id.Dot.cexm, "black", TRUE, "black", Id.Dot.lwd ) } # No reference values for this type of column # as we exit loop, we are in the last panel.. xpin <- par("pin") lastLab3Space <<- xpin[1]/2 if (lab3[j] != "") { #panelSelect(panels,numGrps,j) #x <- panelScale(rx,ry) # ______Bottom Label/Title - Lab3 ______ mtext(side=1,lab3[j],line=Title.Line.3.pos,cex=Text.cex) # bottom column titles lastLab3Space <<- ( xpin[1] - strwidth(lab3[j], units="inch", cex=Text.cex) ) / 2 } } # # End of id glyph # ##### ###### how to get right abbr or full ######## ##### MAP glyphs ##### # # General Notes: # # Data is always represented in the areaDatKey[] vector order. The sDFdat data.frame has been pre-sorted per "sortVar". # # VisKey contains the list of keys per polygons in areaVisBorders # VisCol should contain the mstColors for each polygon to allow a single "polygon" print. # # # General Map Notes: # # NotUsed = NT T/F list of sub-areas not referenced in the data. # # back = NT T/F list of not active sub-areas # # high = NT T/F list of secondary sub-areas (not active or background or Not Used.) (Color = pale Yellow) # for map -> not used # for mapcum -> accumulative list, colored pale yellow # for mapmedian -> areas below or above median value (two colors and cross in median group.) # for maptail -> accumulative list to median then subtractive list to end. # highU = NT T/F list of above median sub-areas (not active or Not used.) (color = pale red) # highL = NT T/F list of below median sub-areas (not active or Not used.) (color = pale blue) # # gnams = NT T/F list of active colored to match links. # # VisCol = NT list of polygon keys. # # # Map.Hdr1 and Map.Hdr2 # Map.Hdr2 -> Type of sub-areas (Counties, Provinces, States, etc.) # Map.Hdr1 -> Top title in "map" (reserved) # # column titles: # # map mapcum mapmedian maptail # # 1) Cummulative Maps Median Based Contours # 2) Highlighted b zzzzz Featured Above b zzzzz Featured Above Two Ended Cumulative Maps # 3) States b zzzzz Featured Below b zzzzz Featured Below zzzzz Highlighted # # Map.Hdr2 Map.Hdr2, X "Featured Above" Map.Hdr2 X "Featured Above" Map.Hdr2 "Highlighted" # Map.Hdr2, X "Featured Below" Map.Hdr2 X "Featured Below" # # # "Median For Sorted Panel" # # Calculate width of each phrase. # ##### # # type = 'map' ========================================================= # # rlAreaMap # rlAreaMap = function(j) { # Works using area abbreviations # bnd.ord gives abbreviations in the # the boundary are stored. # areaDatKey give the abbreviations in the order plotted # # Areas are colors if associated with active rows # # j = column number, i = row number # bnd.ord is a list of Keys (one per polygon) in the border file. bnd.ord = rlAreaVisBorders$Key[is.na(rlAreaVisBorders$x)] # Area abbrev based on "NA" in point List. #cat("bnd.ord:",bnd.ord,"\n") #cat("Map-Overlays L2:",Map.L2Borders," Reg:",Map.RegBorders," L3:",Map.L3Borders,"\n") # the x,y limits must be based on the biggest area plotted, if the data level # does not cover the entire area, check the L2 and L3 borders. rPoly <- MapPolySetup("map",panels,rlAreaVisBorders,rlL2VisBorders,rlRegVisBorders,rlL3VisBorders,Map.L3Borders) rxpoly2 <- rPoly$rxpoly2 rypoly2 <- rPoly$rypoly2 # must be done before panel is scaled. # Issue: The median single row group does not print a map. So, there aspect ratio normalizations # could cause problems with median text. Keep an eye on. # ____________labeling and axes_______________ panelSelect(panels,1,j) x <- panelScale() par(xpd=T) xpin <- par("pin") #printPar() # column titles - centered no boxes. # Use lines 2 and 3(tick) for two row title. # no need for centering logic - no boxes. xLab1 <- banner["map","H2"] xLab2 <- banner["map","H3"] if (xLab2 == "") { xLab2 <- xLab1 xLab1 <- "" } if (xLab1 != "") mtext(xLab1,side=3,line=Title.Line.2.pos,cex=Text.cex) mtext(xLab2,side=3,line=Title.Line.2x.pos,cex=Text.cex) lastLab2Space <<- - ( xpin[1] - strwidth(xLab2,units="inch",cex=Text.cex) ) / 2 # Put the initial colors for all sub-areas into a vector. VisNodes <- is.na(rlAreaVisBorders$x) # end of point elements for each polygon VisKeys <- rlAreaVisBorders$Key[VisNodes] # key for that polygon VisHoles <- rlAreaVisBorders$hole[VisNodes] # is it a hole NotUsed <- !is.na(match(rlAreaVisBorders$Key,NotUsedKeys)) # list of not used points. NotUsedFlag <- any(NotUsed) # flag to indicate not used exists VisNU <- !is.na(match(VisKeys,NotUsedKeys)) # T/F list of not used polygons. # # Panel Setup already calculated the following variables # # numGrps - number of group/rows # medGrp - the number of the median group/rows (if number of groups is odd, otherwize = 0) # medGrpSize - number of rows in the median group/row (if no median group, value = 0) # medRow - the number of the median row (if number of rows is odd, othersize = 0) # medRowBlw - the number of the row just below the median # medRowAbv - the number of the row just above the median # # #cat("map - areaDatKey:",areaDatKey,"\n") # Drawing Loop for (i in 1:numGrps) { if ( i == medGrp & medGrpSize == 1 ){ # line break in maps. Group n/2 - middle group of n (odd) # Setup Panel for single row median group panelSelect(panels,i,j) x <- panelScale() panelFill (col=Panel.Fill.col) panelOutline() # inform xmsg <- banner["map","M1"] # Insert median - single group/row - centered on the middle of the rectangle (0.5, 0.5) text (.5,.5,xmsg,cex= Text.cex*0.8) # center around 0.5, 0.5 (center) next # skip to next FOR item ### EXIT } # handle groups with 2 or more rows panelSelect(panels,i,j) # Do map in - Panels by group... x <- panelScale(rxpoly2,rypoly2) # apply the required scalling gsubs <- ib[i]:ie[i] # get the index range for this panel blkAreaCol <- 0 if (medGrp > 0 & medGrpSize == 1) { # If this setup has a median group with 1 row - then we must watch for the panel above and below it # to highlight the median row in these panels. # Add median sub-area coloring to the row above and below the median line. if (i == (medGrp-1)) { gsubs <- c(gsubs,medRow) # slot med-1 - add med-row to this group blkAreaCol <- length(gsubs) # indicate median/black area flag saves the "color" number the will be seen in the final. } if (i == (medGrp+1)) { gsubs <- c(gsubs,medRow) # slot med+1 - add med-row to this group blkAreaCol <- length(gsubs) # indicate median/black area and the length of the gsubs vector } # blkAreaCol uses length(gsubs) as key - 2,3,4,5,6 used the index to match up later. } #print(paste0("gsubs:",paste0(gsubs,collapse=" "))) # medRow - the median row, if number of rows is old. # will always be in the medGrp group gnams <- areaDatKey[gsubs] # index to sub-area keys (translation) #print(paste0("gnams:",paste0(gnams,collapse=" "))) # # Even though a hole in a sub-area may be later filled with a color or grey, # it is always filled with tthe background map color. The order of the # Polygons in the VisBorder files always have the holes following the basic sub-area # and sub-areas filling other sub-areas holes after that's area's polygons. # # mstColors = 1-6 -> active sub-area colors # mstColors = 7 -> median sub-area color in panels above and below the median # mstColors = 8-10 -> highlighted colors used in mapcum, mapmedian and maptail # mstColors = 11 -> unreferenced sub-area # 12 -> background sub-area (non-active) # # Run: sequence # Set all colors in VisCol (based on polygons) # Set all background and unused borders in VisLinexx # # Separate highlight sub-area borders (2) # Separate foreground sub-area borders (current set) # # draw fill colors for all (VisCol) # # draw background/Not Referenced lines # draw highlighted lines # draw foreground lines # # Get set up T/F vector for each type of group of sub-areas to plot #cat("length(NotUsedKeys):",length(NotUsedKeys)," with keys:",paste0(NotUsedKeys,collapse=", "),"\n") #cat("gnams:",paste0(gnams,collapse=", "),"\n") VisCol <- rep(11,length(VisKeys)) # reduced size - color per polygon # isolate foreground (active) sub-areas. foreKeys <- gnams # get list of keys fore <- !is.na(match(rlAreaVisBorders$Key,foreKeys)) # find polygon points for each fore sub-areas and assign color based on order in gnams foreFlag <- any(fore) # set flags if any found VisForeCol <- match(VisKeys,foreKeys) # get color index for each foreg subarea (1-6) # NA=not foreground, #=foreground and order (foreKeys 1 to 6) VisFore <- !is.na(VisForeCol) # T/F vector of VisKeys that are foreground VisCol[VisFore] <- VisForeCol[VisFore] # transfer color index for each foreground polygon. if (blkAreaCol>0) { VisCol[VisCol == blkAreaCol] <- 7 # set to black # if VisCol == BlkAreaCol set previously - reset to "black" index (7) } # not really used - can we delete or set to empty? Trying to standardize code?? highKeys <- NA # clear high light vector - always none for "map" high <- !is.na(match(rlAreaVisBorders$Key,highKeys)) highFlag <- any(high) VisHigh <- !is.na(match(VisKeys,highKeys)) VisCol[VisHigh] <- 8 # what is left - the background sub-areas. back <- !(fore | high | NotUsed) # background is anything not active and not used. T/F list backFlag <- any(back) backKeys <- unique(rlAreaVisBorders$Key[back]) VisBack <- !is.na(match(VisKeys,backKeys)) VisCol[VisBack] <- 12 VisCol2 <- mstColors[VisCol] # translate to real colors VisCol2[VisHoles] <- Map.Bg.col # set all holes to the panels background color. # colors are ready for ploting polygons #### # Map background - Layer 2 borders (regional areas (US -> states)) # if (Map.L2Borders) { # area area overlay # map fill sub-areas polygon(rlL2VisBorders$x, rlL2VisBorders$y, density=-1, col=Map.L2.Fill.col, border=FALSE) # map borders of sub-areas polygon(rlL2VisBorders$x, rlL2VisBorders$y, density=0, col=Map.L2.Line.col, lwd=Map.L2.Line.lwd) # white #cat("Drew L2 Borders\n") } # #### #### # # Map sub-areas # # Draw the colors for all active sub-areas. # polygon(rlAreaVisBorders$x,rlAreaVisBorders$y, density=-1, col=VisCol2, border=FALSE) #cat("Drew active sub-area colors.\n") # # setup each group of sub-areas and draw polygons. # Not Referenced sub-areas if (NotUsedFlag) { wVisBorders <- NULL wVisBorders <- rlAreaVisBorders[NotUsed,] # map sub-areas without data (not used) polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Bg.Line.col, lwd=Map.Bg.Line.lwd) # white #cat("Drew not used sub-areas borders.\n") } # # Background (not-active) sub-areas # if (backFlag) { wVisBorders <- NULL wVisBorders <- rlAreaVisBorders[back,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Bg.Line.col, lwd=Map.Bg.Line.lwd) # white #cat("Drew not-active sub-areas borders.\n") } # # Highlighted sub-areas # if (highFlag) { wVisBorders <- NULL wVisBorders <- rlAreaVisBorders[high,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Fg.Line.col, lwd=Map.Fg.Line.lwd) # black #cat("Drew highlighted sub-areas borders.\n") } # # Foreground (active) sub-areas # if (foreFlag) { wVisBorders <- NULL wVisBorders <- rlAreaVisBorders[fore,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Fg.Line.col, lwd=Map.Fg.Line.lwd) # black #cat("Drew Active sub-areas borders.\n") } #### # # map boundaries for regions. # if (Map.RegBorders && regionsB) { # regions boundaries overlay polygon(rlRegVisBorders$x, rlRegVisBorders$y, density=0, col=Map.L3.Line.col, lwd=Map.L3.Line.lwd) # black #cat("Drew Region Borders\n") } # #### #### # # Outline L3 (total) area (complete area boundary) # if (Map.L3Borders) { polygon(rlL3VisBorders$x, rlL3VisBorders$y, density=0, col=Map.L3.Line.col, lwd=Map.L3.Line.lwd) # black - outside US boundary # # If U. S. map, add extra labels for sub-areas moved. # if (areaUSData) { ##### replace with feature based code. if (i==1) { # if first map in column text(135,31,'DC',cex=Map.Area.Spec.cex, adj=.5, col=1) text(22, 17,'AK',cex=Map.Area.Spec.cex, adj=.5, col=1) text(47, 8, 'HI',cex=Map.Area.Spec.cex, adj=.5, col=1) } } } # no reference values for this type of column. If present - ignor. } # i loop # as we finish i loop, we end up in the last panel xpin <- par("pin") lastLab3Space <<- xpin[1]/2 if (lab3[j] != "") { #panelSelect(panels,numGrps,j) #x <- panelScale(rxpoly2,rypoly2) # ______Bottom Label/Title - Lab3 ______ mtext(side=1,lab3[j],line=Title.Line.3.pos,cex=Text.cex) # bottom column titles lastLab3Space <<- (xpin[1] - strwidth(lab3[j], cex=Text.cex, units="inch")) / 2 } } ##### # # type = 'mapcum' ======================================================== # # rlAreaMapCum # rlAreaMapCum = function(j) { # Works using area abbreviations # bnd.ord gives abbreviations in the order the boundary are stored. # areaDatKey give the abbreviations in the order plotted # # Areas are colored if active in row. # Areas are colored cream is they were active in previous groups/rows. # # bnd.ord is a list of Keys (one per polygon) in the border file. bnd.ord = rlAreaVisBorders$Key[is.na(rlAreaVisBorders$x)] # area abbrev for areas with boundaries # the x,y limits must be based on the biggest area plotted, if the data level # does not cover the entire area, check the L2 and L3 borders. rPoly <- MapPolySetup("mapcum",panels,rlAreaVisBorders,rlL2VisBorders,rlRegVisBorders,rlL3VisBorders,Map.L3Borders) rxpoly2 <- rPoly$rxpoly2 rypoly2 <- rPoly$rypoly2 # must be done before panel is scaled. # Issue: The median single row group does not print a map. So, there aspect ratio normalizations # could cause problems with median text. Keep an eye on. # ____________labeling and axes_______________ panelSelect(panels,1,j) x <- panelScale() # default scale 0:1, 0:1 not very useful par(xpd=T) xpin <- par("pin") # make adjustments to handle variable scaling of first panel - at this point its 0,1 by 0,1 # par("fin") has width and height in inches.. (2.4 x 3.6) # par("pin") has plot width and height in inches ( 1.4 x 1.111 ) # So, at 0,1 by 0,1 the aspect is really 1.111/1.4 = 0.79 about. # # # draw box for title label (convert inches into points for the panel.) # # line 1 - title, no boxes. mtext(banner["mapcum","H1"],side=3,line=Title.Line.1.pos,cex=Text.cex) # use line position.. # Line 2 - box and title DrawBoxAndText(banner["mapcum","H2"], Text.cex, Map.Lab.Box.Width, mstColors[8], "black", Title.Line.2.pos) DrawBoxAndText(banner["mapcum","H3"], Text.cex, Map.Lab.Box.Width, Map.Bg.col, "black", Title.Line.2x.pos) lastLab2Space <<- - ( xpin[1] - ( strwidth(banner["mapcum","H3"],units="inch",cex=Text.cex) + 0.15 ) ) / 2 VisNodes <- is.na(rlAreaVisBorders$x) VisKeys <- rlAreaVisBorders$Key[VisNodes] VisHoles <- rlAreaVisBorders$hole[VisNodes] NotUsed <- !is.na(match(rlAreaVisBorders$Key,NotUsedKeys)) NotUsedFlag <- any(NotUsed) VisNU <- !is.na(match(VisKeys,NotUsedKeys)) # # ##### # # Alternative is to must plot the text at x,y points. # # Drawing Loop #cat("mapcum - areaDatKey:",areaDatKey,"\n") for (i in 1:numGrps) { if (i==medGrp & medGrpSize == 1) { panelSelect(panels,i,j) x <- panelScale() panelFill (col=Panel.Fill.col) panelOutline() text (.5,.5,banner["mapcum","M1"],cex=Text.cex*0.8) # centered around 0.5 0.5 next } panelSelect(panels,i,j) x <- panelScale(rxpoly2,rypoly2) gsubs <- ib[i]:ie[i] blkAreaCol <- 0 ke = length(gsubs) # get number of rows. ## if a single row is not the median then the middle group is the median. if ( medGrp > 0 & medGrpSize == 1) { if (i == (medGrp-1)) { gsubs <- c(gsubs,medRow) blkAreaCol <- length(gsubs) } if (i == (medGrp+1)) { gsubs <- c(gsubs,medRow) blkAreaCol <- length(gsubs) } } gnams = areaDatKey[gsubs] # translate from sequence number to sorted order of areas (abbrev) # list of areas in this row (group) panel. #### # Map background - Layer 2 borders # if (Map.L2Borders) { # area area overlay # map fill areas polygon(rlL2VisBorders$x, rlL2VisBorders$y, density=-1, col=Map.L2.Fill.col, border=FALSE) # map borders polygon(rlL2VisBorders$x, rlL2VisBorders$y, density=0, col=Map.L2.Line.col, lwd=Map.L2.Line.lwd) # white } # #### #### # # map boundaries for regions. # if (Map.RegBorders && regionsB) { # regions boundary overlay polygon(rlRegVisBorders$x, rlRegVisBorders$y, density=0, col=Map.L2.Line.col, lwd=Map.L2.Line.lwd) } # #### VisCol <- rep(11,length(VisKeys)) # reduced size foreKeys <- gnams fore <- !is.na(match(rlAreaVisBorders$Key,foreKeys)) # find fore sub-areas and assign color based on order in gnams foreFlag <- any(fore) VisForeCol <- match(VisKeys,foreKeys) VisFore <- !is.na(VisForeCol) VisCol[VisFore] <- VisForeCol[VisFore] if (blkAreaCol>0) { VisCol[VisCol == blkAreaCol] <- 7 # set to black } highKeys <- areaDatKey[1:ib[i]-1] # vector of names used areas include this panel. high <- !is.na(match(rlAreaVisBorders$Key,highKeys)) highFlag <- any(high) VisHigh <- !is.na(match(VisKeys,highKeys)) VisCol[VisHigh] <- 8 # what is left - the background sub-areas. back <- !(fore | high | NotUsed) # background is anything not active and not used. T/F list backFlag <- any(back) backKeys <- unique(rlAreaVisBorders$Key[back]) VisBack <- !is.na(match(VisKeys,backKeys)) VisCol[VisBack] <- 12 VisCol <- mstColors[VisCol] # translate to real colors VisCol[VisHoles] <- Map.Bg.col polygon(rlAreaVisBorders$x,rlAreaVisBorders$y, density=-1, col=VisCol, border=FALSE) # setup each group of sub-areas and draw polygons. # Not Referenced sub-areas if (NotUsedFlag) { wVisBorders <- rlAreaVisBorders[NotUsed,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Bg.Line.col, lwd=Map.Bg.Line.lwd) # white } # Background (not-active) sub-areas if (backFlag) { wVisBorders <- rlAreaVisBorders[back,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Bg.Line.col, lwd=Map.Bg.Line.lwd) # white } # Highlighted sub-areas if (highFlag) { wVisBorders <- rlAreaVisBorders[high,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Fg.Line.col, lwd=Map.Fg.Line.lwd) # black } # Foreground (active) sub-areas if (foreFlag) { wVisBorders <- rlAreaVisBorders[fore,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Fg.Line.col, lwd=Map.Fg.Line.lwd) # black } #### # # map boundaries for regions. # if (Map.RegBorders && regionsB) { # regions boundaries overlay polygon(rlRegVisBorders$x, rlRegVisBorders$y, density=0, col=Map.L3.Line.col, lwd=Map.L3.Line.lwd) # black } # #### #### # # Outline Country area (total area). # if (Map.L3Borders) { polygon(rlL3VisBorders$x, rlL3VisBorders$y, density=0, col=Map.L3.Line.col, lwd=Map.L3.Line.lwd) # black - outside US boundary if (areaUSData) { if (i==1) { text(135,31,'DC',cex=Map.Area.Spec.cex,adj=.5, col=1) text(22, 17,'AK',cex=Map.Area.Spec.cex,adj=.5, col=1) text(47, 8, 'HI',cex=Map.Area.Spec.cex,adj=.5, col=1) } } } } # i loop # no reference values for this type of column. If present - ignor. # as we leave i loop - we are in the last group panel xpin <- par("pin") lastLab3Space <<- xpin[1]/2 if (lab3[j] != "") { #panelSelect(panels,numGrps,j) #x <- panelScale(rxpoly2,rypoly2) # ______Bottom Label/Title - Lab3 ______ mtext(side=1,lab3[j],line=Title.Line.3.pos,cex=Text.cex) # bottom column titles lastLab3Space <<- (xpin[1] - strwidth(lab3[j], cex=Text.cex, units="inch")) / 2 } } ##### # # type = 'mapmedian' ================================================= # # rlAreaMapMedian # rlAreaMapMedian = function(j){ # Works using area abbreviations # bnd.ord gives abbreviations in the # the boundary are stored. # areaDatKey give the abbreviations in the order plotted # This MapMedian cream colors all areas above and below the median area. # Areas < median are colored very light red in upper half of groups, # Areas > median are colored very light blue in lower half of groups. # In the median group when there is more than one row, both above and below # shading are done as a cross over. # bnd.ord = rlAreaVisBorders$Key[is.na(rlAreaVisBorders$x)] # area abbrev # the x,y limits must be based on the biggest area plotted, if the data level # does not cover the entire area, check the L2 and L3 borders. rPoly <- MapPolySetup("mapmedian",panels,rlAreaVisBorders,rlL2VisBorders,rlRegVisBorders,rlL3VisBorders,Map.L3Borders) rxpoly2 <- rPoly$rxpoly2 rypoly2 <- rPoly$rypoly2 # ____________labeling and axes_______________ panelSelect(panels,1,j) x <- panelScale() par(xpd=T) xpin <- par("pin") # # draw box for title label (convert inches into points for the panel.) # # line 1 - title, no boxes. mtext(banner["mapmed","H1"],side=3,line=Title.Line.1.pos,cex=Text.cex) # use line position.. # Line 2 - box and title DrawBoxAndText(banner["mapmed","H2"], Text.cex, Map.Lab.Box.Width, mstColors[9], "black", Title.Line.2.pos) DrawBoxAndText(banner["mapmed","H3"], Text.cex, Map.Lab.Box.Width, mstColors[10], "black", Title.Line.2x.pos) lastLab2Space <<- - ( xpin[1] - ( strwidth(banner["mapmed","H3"],units="inch",cex=Text.cex) + 0.15 ) ) / 2 #cat("mapmed - areaDatKey:",areaDatKey,"\n") # VisNodes <- is.na(rlAreaVisBorders$x) VisKeys <- rlAreaVisBorders$Key[VisNodes] VisHoles <- rlAreaVisBorders$hole[VisNodes] NotUsed <- !is.na(match(rlAreaVisBorders$Key,NotUsedKeys)) NotUsedFlag <- any(NotUsed) VisNU <- !is.na(match(VisKeys,NotUsedKeys)) highUKeys <- areaDatKey[1:medRowAbv] highU <- !is.na(match(rlAreaVisBorders$Key,highUKeys)) highUFlag <- any(highU) VisHighU <- !is.na(match(VisKeys,highUKeys)) highLKeys <- areaDatKey[medRowBlw:numRows] highL <- !is.na(match(rlAreaVisBorders$Key,highLKeys)) highLFlag <- any(highL) VisHighL <- !is.na(match(VisKeys,highLKeys)) # Drawing Loop # if this is the median group, the both get shaped. for (i in 1:numGrps) { # Median Group/Row with 1 row if (i==medGrp & medGrpSize == 1) { # median group/row with 1 row - do text instead of map. panelSelect(panels,i,j) x <- panelScale() panelFill (col=Panel.Fill.col) panelOutline() text (.5,.5,banner["mapmed","M1"],cex=Text.cex*0.8) next # exit for loop to next group/row } # All panels now have 2 or more rows panelSelect(panels,i,j) x <- panelScale(rxpoly2,rypoly2) gsubs <- ib[i]:ie[i] blkAreaCol <- 0 # Median Group/Row Panel if (medGrp > 0 & medGrpSize == 1) { # if we had a median group/row with 1 row, then accent median row in panels above and below. if (i == medGrp-1) { gsubs <- c(gsubs,medRow) # add median row to list blkAreaCol <- length(gsubs) # accent in above panel } if (i == medGrp+1) { gsubs <- c(gsubs,medRow) # add median row to list blkAreaCol <- length(gsubs) # accent in below panel } } # gsubs <- current area list gnams <- areaDatKey[gsubs] # set of areas for normal coloring. (get keys from index #s) # # Sub Divide into four groups: # 1) background, 2) Above Median with data 3) Below Median with data, 4) Active # Whats left 1:medRowAbv medRowBlw:numRows gsubs # note: medRowBlw:numRows will catch NA data items. (ignore is sorted column and NA (st bottom.) # note: non-data sub-area will not any row in the data, but will have a row in the areaNamesAbbrsIDs. # if we don't reference them, then boundaries may not be completely drawn. # #### # Map background - Layer 2 borders (regional areas (US -> states)) # if (Map.L2Borders) { # area area overlay # map fill areas polygon(rlL2VisBorders$x, rlL2VisBorders$y, density=-1, col=Map.L2.Fill.col, border=FALSE) # map borders polygon(rlL2VisBorders$x, rlL2VisBorders$y, density=0, col=Map.L2.Line.col, lwd=Map.L2.Line.lwd) # white } # #### VisCol <- rep(11,length(VisKeys)) highUbdr <- FALSE highLbdr <- FALSE if (i < medGrp ) { high <- highU highUbdr <- TRUE VisCol[VisHighU] <- 9 } if (i > medGrp) { high <- highL highLbdr <- TRUE VisCol[VisHighL] <- 10 } if (i == medGrp) { high <- highU | highL highUbdr <- TRUE highLbdr <- TRUE VisCol[VisHighU] <- 9 VisCol[VisHighL] <- 10 } foreKeys <- gnams fore <- !is.na(match(rlAreaVisBorders$Key,foreKeys)) # find fore sub-areas and assign color based on order in gnams foreFlag <- any(fore) VisForeCol <- match(VisKeys,foreKeys) VisFore <- !is.na(VisForeCol) VisCol[VisFore] <- VisForeCol[VisFore] if (blkAreaCol>0) { VisCol[VisCol == blkAreaCol] <- 7 # set to black } # what is left - the background sub-areas. back <- !(fore | high | NotUsed) # background is anything not active and not used. T/F list backFlag <- any(back) if (backFlag) { backKeys <- unique(rlAreaVisBorders$Key[back]) VisBack <- !is.na(match(VisKeys,backKeys)) VisCol[VisBack] <- 12 } VisCol <- mstColors[VisCol] # translate to real colors VisCol[VisHoles] <- Map.Bg.col polygon(rlAreaVisBorders$x,rlAreaVisBorders$y, density=-1, col=VisCol, border=FALSE) # setup each group of sub-areas and draw polygons. # Not Referenced sub-areas if (NotUsedFlag) { wVisBorders <- rlAreaVisBorders[NotUsed,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Bg.Line.col, lwd=Map.Bg.Line.lwd) # white } # Background (not-active) sub-areas if (backFlag) { wVisBorders <- rlAreaVisBorders[back,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Bg.Line.col, lwd=Map.Bg.Line.lwd) # white } # Highlighted sub-areas (2) if (highUbdr) { wVisBorders <- rlAreaVisBorders[highU,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Fg.Line.col, lwd=Map.Fg.Line.lwd) # black } if (highLbdr) { wVisBorders <- rlAreaVisBorders[highL,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Fg.Line.col, lwd=Map.Fg.Line.lwd) # black } # Foreground (active) sub-areas if (foreFlag) { wVisBorders <- rlAreaVisBorders[fore,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Fg.Line.col, lwd=Map.Fg.Line.lwd) # black } #### # # map boundaries for regions. # if (Map.RegBorders && regionsB) { # regions boundaries overlay polygon(rlRegVisBorders$x, rlRegVisBorders$y, density=0, col=Map.L3.Line.col, lwd=Map.L3.Line.lwd) # black } # #### #### # # Outline Country area (total area). # if (Map.L3Borders) { polygon(rlL3VisBorders$x, rlL3VisBorders$y, density=0, col=Map.L3.Line.col, lwd=Map.L3.Line.lwd) # black - outside US boundary if (areaUSData) { if (i==1) { text(135,31,'DC',cex=Map.Area.Spec.cex,adj=.5, col=1) text(22, 17,'AK',cex=Map.Area.Spec.cex,adj=.5, col=1) text(47, 8, 'HI',cex=Map.Area.Spec.cex,adj=.5, col=1) } } } } # i loop # no reference values for this type of column. If present - ignor. # as we finish i loop - we are in the last group panel. xpin <- par("pin") lastLab3Space <<- xpin[1]/2 if (lab3[j] != "") { #panelSelect(panels,numGrps,j) #x <- panelScale(rxpoly2,rypoly2) # ______Bottom Label/Title - Lab3 ______ mtext(side=1,lab3[j],line=Title.Line.3.pos,cex=Text.cex) # bottom column titles lastLab3Space <<- (xpin[1] - strwidth(lab3[j], cex=Text.cex, units="inch")) / 2 } } ##### # # type = 'maptail' ==================================================== # # rlAreaMapTail # rlAreaMapTail = function(j){ # Works using area abbreviations # bnd.ord gives abbreviations in the # the boundary are stored. # areaDatKey give the abbreviations in the order plotted # MapTail shows current areas in a group as colored and # a tail of areas (in cream color) from the outside inward. # #browser() bnd.ord <- rlAreaVisBorders$Key[is.na(rlAreaVisBorders$x)] # area Key rPoly <- MapPolySetup("maptail",panels,rlAreaVisBorders,rlL2VisBorders,rlRegVisBorders,rlL3VisBorders,Map.L3Borders) rxpoly2 <- rPoly$rxpoly2 rypoly2 <- rPoly$rypoly2 #cat("maptail - areaDatKey:",areaDatKey,"\n") # ____________labeling and axes_______________ # Panel # 1 - header # column header titles and "box" panelSelect(panels,1,j) # Line 1 and Line 2 - panel 1 x <- panelScale() par(xpd=T) xpin <- par("pin") # # draw box for title label (convert inches into points for the panel.) # # Line 1 - Not used # line 2 - title, no boxes. mtext(banner["maptail","H2"],side=3,line=Title.Line.2.pos,cex=Text.cex) # use line position.. # Line 3 - box and title DrawBoxAndText(banner["maptail","H3"], Text.cex, Map.Lab.Box.Width, mstColors[8], "black", Title.Line.2x.pos) lastLab2Space <<- - ( xpin[1] - ( strwidth(banner["maptail","H3"],units="inch",cex=Text.cex) + 0.15 ) ) / 2 # If needed this work be the place for Panel # N - Trailer code. # JP - removed - temp # mtext('Further From Median',side=3,line=Title.Line.2x.pos,at=.15,cex=Text.cex,adj=0) # need a median group point for calculations on the two tailed maps if (medGrp > 0 ) { # odd number of groups medGrpPt <- medGrp } else { medGrpPt <- (numGrps/2) # + one lower } VisNodes <- is.na(rlAreaVisBorders$x) VisKeys <- rlAreaVisBorders$Key[VisNodes] VisHoles <- rlAreaVisBorders$hole[VisNodes] NotUsed <- !is.na(match(rlAreaVisBorders$Key,NotUsedKeys)) NotUsedFlag <- any(NotUsed) VisNU <- !is.na(match(VisKeys,NotUsedKeys)) # Drawing Loop for (i in 1:numGrps) { if(i==medGrp & medGrpSize == 1 ) { panelSelect(panels,i,j) panelScale() panelFill (col=Panel.Fill.col) panelOutline() text (.5,.5,banner["maptail","M1"],cex=Text.cex*0.8) next } panelSelect(panels,i,j) x <- panelScale(rxpoly2,rypoly2) # get list of areas in this group. gsubs <- ib[i]:ie[i] ke <- length(gsubs) blkAreaCol <- 0 if (medGrp > 0 & medGrpSize == 1) { if (i==(medGrp-1)) { gsubs <- c(gsubs,medRow) blkAreaCol <- length(gsubs) } if (i==(medGrp+1)) { gsubs <- c(gsubs,medRow) blkAreaCol <- length(gsubs) } } # get list of group area names gnams = areaDatKey[gsubs] #### # # Map background - Layer 2 borders (regional areas (US -> states)) # if (Map.L2Borders) { # area area overlay # map fill areas polygon(rlL2VisBorders$x, rlL2VisBorders$y, density=-1, col=Map.L2.Fill.col, border=FALSE) # map borders polygon(rlL2VisBorders$x, rlL2VisBorders$y, density=0, col=Map.L2.Line.col, lwd=Map.L2.Line.lwd) # white } # #### VisCol <- rep(11,length(VisKeys)) highKeys <- NA highFlag <- FALSE if (i < medGrpPt) highKeys <- areaDatKey[1:ib[i]] # areas below the median highlighted. if (i > medGrpPt) highKeys <- areaDatKey[ie[i]:numRows] if (length(highKeys) > 0) { high <- !is.na(match(rlAreaVisBorders$Key,highKeys)) highFlag <- any(high) VisHigh <- !is.na(match(VisKeys,highKeys)) VisCol[VisHigh] <- 8 } foreKeys <- gnams fore <- !is.na(match(rlAreaVisBorders$Key,foreKeys)) # find fore sub-areas and assign color based on order in gnams foreFlag <- any(fore) VisForeCol <- match(VisKeys,foreKeys) VisFore <- !is.na(VisForeCol) VisCol[VisFore] <- VisForeCol[VisFore] if (blkAreaCol>0) { VisCol[VisCol == blkAreaCol] <- 7 # set to black } # what is left - the background sub-areas. back <- !(fore | high | NotUsed) # background is anything not active and not used. T/F list backFlag <- any(back) if (backFlag) { backKeys <- unique(rlAreaVisBorders$Key[back]) VisBack <- !is.na(match(VisKeys,backKeys)) VisCol[VisBack] <- 12 } VisCol <- mstColors[VisCol] # translate to real colors VisCol[VisHoles] <- Map.Bg.col # draw the combined fill colors in VisBorder file order. polygon(rlAreaVisBorders$x,rlAreaVisBorders$y, # plot all polygons density=-1, col = VisCol, border = FALSE) # fill in all areas. (1 to 6, 7, hole) # setup each group of sub-areas and draw polygons. # Not Referenced sub-areas if (NotUsedFlag) { wVisBorders <- rlAreaVisBorders[NotUsed,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Bg.Line.col, lwd=Map.Bg.Line.lwd) # white } # Background (not-active) sub-areas if (backFlag) { wVisBorders <- rlAreaVisBorders[back,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Bg.Line.col, lwd=Map.Bg.Line.lwd) # white } # Highlighted sub-areas if (highFlag) { wVisBorders <- rlAreaVisBorders[high,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Fg.Line.col, lwd=Map.Fg.Line.lwd) # black } # Foreground (active) sub-areas if (foreFlag) { wVisBorders <- rlAreaVisBorders[fore,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Fg.Line.col, lwd=Map.Fg.Line.lwd) # black } #### # # map boundaries for regions. # if (Map.RegBorders && regionsB) { # regions boundaries overlay polygon(rlRegVisBorders$x, rlRegVisBorders$y, density=0, col=Map.L3.Line.col, lwd=Map.L3.Line.lwd) # black } # #### #### # # Outline Country area (total area). # if (Map.L3Borders) { polygon(rlL3VisBorders$x, rlL3VisBorders$y, density=0, col=Map.L3.Line.col, lwd=Map.L3.Line.lwd) # black - outside US boundary if (areaUSData) { if (i==1) { text(135,31,'DC',cex=Map.Area.Spec.cex, adj=.5, col=1) text(22, 17,'AK',cex=Map.Area.Spec.cex, adj=.5, col=1) text(47, 8, 'HI',cex=Map.Area.Spec.cex, adj=.5, col=1) } } } } # i loop # no reference values for this type of column. If present - ignor. # as we finish i loop - we are in the last group panel xpin <- par("pin") lastLab3Space <<- xpin[1]/2 if (lab3[j] != "") { #panelSelect(panels,numGrps,j) #x <- panelScale(rxpoly2,rypoly2) # ______Bottom Label/Title - Lab3 ______ mtext(side=1,lab3[j],line=Title.Line.3.pos,cex=Text.cex) # bottom column titles lastLab3Space <<- (xpin[1] - strwidth(lab3[j], cex=Text.cex, units="inch")) / 2 } } # # ##### # # Area Rank Number ================================================================ # # rlAreaRank # based ID dot. # display the sorted rank. # need to update to reflect RANKing based on sorted value. Could have ties. # ##### # # Re-Think and rewrite before documenting. # ##### rlAreaRank = function(j){ # j = panel column number #________________ Scaling _______________ rx <- c(0,1) ry <- c(0,1) rankstart <- 0.137 #______________________panel labels_____________ panelSelect(panels,1,j) panelScale(rx,ry) mtext('Area Rank',side=3,line=Title.Line.1.pos,cex=Text.cex) # mtext('areas',side=3,line=Title.Line.2.pos,cex=Text.cex) for (i in 1:numGrps){ gsubs <- ib[i]:ie[i] ke <- length(gsubs) laby <- ke:1 rsubs <- xDFrame$Rank[gsubs] pen <- if(i==medGrp & medGrpSize == 1) 7 else 1:ke panelSelect(panels, i, j) x <- panelScale(rx, c(1-pad, ke+pad)) Fgsubs <- formatC(rsubs, format="f", width=3, digits=0) text(rep(rankstart, ke), laby+.1, Fgsubs, adj=0, cex=Text.cex) } # No reference values for this type of column. } ##### # # type = 'ScatDot' ===================================================== # # rlAreaScatDot (Scattered Plot Dots) # rlAreaScatDot = function(j){ # # j = panel column number # # col1 and col2 point to the X and Y data values in the statsDFrame data.frame (known here as "dat"). # # wstname <- names(dat) # names of columns in statsDFrame wstdim <- dim(dat) wstMax <- wstdim[2] # number of columns in statsDFrame ErrFnd <- FALSE pdColNum <- formatC(j,format="f",digits=0,width=2,flag="0") # "col1" stColName1 <- wstname[col1[j]] pdUmsg <- "(X coordinates)" xr <- CheckPDCol('col1', 'SCATDOT', col1[j], stColName1, j, 1, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd <- TRUE } else { xdat1 <- xr$Dat } # "col2" stColName2 <- wstname[col2[j]] pdUmsg <- "(Y coordinates)" xr <- CheckPDCol('col2', 'SCATDOT', col2[j], stColName2, j, 2, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd <- TRUE } else { xdat2 <- xr$Dat } if (ErrFnd) return () good1 <- !is.na(xdat1) # test to see if both values are present. good2 <- !is.na(xdat2) goodrow <- !is.na(xdat1 + xdat2) # used by code to skip bad entries. # x and y data loaded into workSCD data.frame workSCD <- data.frame(x=xdat1,y=xdat2) # get x and y data from the statsDFrame. # x and y are the coordinates of each dot. # # other fields added later # $pch - symbol code (only 19:25 are supported) # $cex - symbol size # $bg - background color - symbol fill color # $col - color of line # $lwd - line weight of outline of symbol # rownames(workSCD) <- rownames(dat) # transfer row.names refval <- lRefVals[j] # get referrence to object, changed reftxt <- lRefTexts[j] # new - JP-2010/07/23 #_______________Gather stats and put in area Order______________ # Sorting has already been done of the statsDFrame (dat) by areaDatKey or value # in the function startup. #_______________Scaling____________ # x scaling lPad <- TRUE rPad <- TRUE rx <- range(workSCD$x,na.rm=TRUE) # range of X values #cat("scatdot-rx:",rx,"\n") #rx <- SCD.xsc*diff(rx)*c(-.5,.5)+mean(rx) # min to max range with expansion factors. #cat("scatdot-rx after padding:",rx,"\n") # y scaling ry <- range(workSCD$y,na.rm=TRUE) # range of Y values ry <- SCD.ysc*diff(ry)*c(-.5,.5)+mean(ry) # diagonal end points dx <- max(rx[1],ry[1]) diagr <- c(max(rx[1],ry[1]), min(rx[2],ry[2])) # ____________titles and labeling axes_______________ ##### # # Setup and draw top and bottom titles and axis for column # # Padding on left and right for dots. # Res <- DrawXAxisAndTitles(j, panels, rx, ry,reftxt, refval, leftPad=lPad, rightPad=rPad, YAxisPad=TRUE) atRx <- Res$atRx rx <- Res$rx ry <- Res$ry #cat("ScatDot-Result staggering:",staggering," staggered:",staggered,"\n") # ##### # ___________________drawing loop_____________________ # in the ordered list, the median should be 26 of 51 items. changed because of generalization. oldpar <- par(lend="butt") # build each panel for scatter plot dots # Y axis & text - can do once for all YAxis_cex <- TS.Axis.cex * 0.75 xPs <- par("ps") xHPsLU <- strheight("00000",cex=1,units="user") xHDesPsLU <- strheight("00000",cex=YAxis_cex,units="user") xDifHLU <- xHPsLU - xHDesPsLU YAxis_adj <- xDifHLU / xHPsLU #cat("YAxis adjustment - YAxis_adj:",YAxis_adj," YAxis_cex:",YAxis_cex,"\n") for (i in 1:numGrps) { # groups from 1 to 5, 6, 7 to 11 ## 6 is the median group. # Cycle through the Row/Groups in the micromap column # This glyph is special in that it draws the data in every panel for all of the scatdot data points. # Only the ones related to the group/row are modified and colored. # Set defaults values for all dots for this panel workSCD$pch <- SCD.Bg.pch # default pch code. workSCD$cex <- SCD.Bg.pch.size # default size, except median workSCD$bg <- SCD.Bg.pch.fill # default symbol color file - was SCD.Bg.pch.fill workSCD$col <- SCD.Bg.pch.col # default line color of outline ("black") workSCD$lwd <- SCD.Bg.pch.lwd # default line weight of outline if (medGrp > 0 & medGrpSize == 1) { # if there is a median Group/Row and it contains one row, then if (i >= medGrp-1 && i <= medGrp + 1) { # force median dot to be highlighted in median and near groups. # modify characteristics of the point in previous and following group/rows to the median group/row workSCD$pch[medRow] <- SCD.Median.pch workSCD$cex[medRow] <- SCD.Median.pch.size workSCD$bg[medRow] <- SCD.Median.pch.fill workSCD$col[medRow] <- SCD.Median.pch.col workSCD$lwd[medRow] <- SCD.Median.pch.lwd } } # plot points. # get list of active rows in this group/row gsubs <- ib[i]:ie[i] # get beginning to end index row number in this group ke <- length(gsubs) # get number of rows in group (5 or 1) # Get color indexes. # adjust if median group pen <- if(i==medGrp & medGrpSize == 1 ) 7 else 1:ke # if median group (6)(black), then pen=6, otherwise pen = c(1...x) panelSelect(panels,i,j) # select panel for group i in column j) x <- panelScale(rx,ry) # set scale for panel (should this be ry * 5 or 1?) panelFill(col=Panel.Fill.col) # set fill for panel # vertical grid lines. axis(side=1, tck=1, labels=F, at=atRx, col=Grid.Line.col, lwd=Grid.Line.lwd) # grid lines # y axis labels if (i==medGrp & medGrpSize == 1) { # median panel # special for median group/row with one row atRy <- c(saveAtRy[1],saveAtRy[length(saveAtRy)]) # for margin panel, print the lowest and highest. } else { atRy <- panelInbounds(ry) # prettyprint a range. } # optional horizontal grid. if (SCD.hGrid) { axis(side=2,tck=1,labels=F,col=Grid.Line.col,lwd=Grid.Line.lwd, at=atRy) # Grid lines } # parameters and variable setup outside of loop. axis(side=2, tick=F, cex.axis=YAxis_cex, mgp=mgpLeft, line= -YAxis_adj*0.3, at=atRy, labels=as.character(atRy)) mtext(lab4[j],side=2, line=Title.Line.5.pos, cex=TS.Axis.cex) panelOutline(col=Panel.Outline.col) # outline panel # dv <- c(gsubs[1:ke],medRow) # was 26. # # draw diagonal line of symetry from c(min (x, y),min(x,y)) to # c(max(x,y), max(x,y)), all point have x=y. # if ((diagr[1] < diagr[2]) && SCD.DiagLine) { # draw symetric line if within box range. dx <- c(diagr[1],diagr[2]) dy <- c(diagr[1],diagr[2]) lines(dx,dy, col=SCD.DiagLine.col, lwd=SCD.DiagLine.lwd, lty=SCD.DiagLine.lty) # place a diagonal line on plot. # print out the statistics for the line if (MST.Debug == 1) { print(paste0("line:",paste0(c(dx,dy),collapse=" "))) print(paste0("usr:",paste0(par("usr"),collapse=" "))) print(paste0("pin:",paste0(par("pin"),collapse=" "))) MST.Debug = 0 # turn off. } } # plot points if (i == medGrp & medGrpSize == 1) { wS <- workSCD[gsubs[1],] # get one entry - the median (Median group/row with 1 row). } else { # standard group/row or median without single row. for (k in 1:ke) { # Process each slot of panel - step 1 to 5/6 or 1 to 1 # cycle through row-groups and assign colors to associated area's dots. m <- gsubs[k] workSCD$pch[m] <- SCD.Fg.pch # only 19:25 are supported. workSCD$cex[m] <- SCD.Fg.pch.size workSCD$bg[m] <- mstColors[pen[k]] # set approvate color to circle fill. workSCD$col[m] <- SCD.Fg.pch.col # color of outline of symbol workSCD$lwd[m] <- SCD.Fg.pch.lwd # weight of outline of symbol } wS <- workSCD[order(workSCD$cex,decreasing=FALSE),] # sort by text size to get active point on top. # plot all points by size, others first, colored and median last. } # Have lists of points to plot in wS # Since the points we plot must have outlines and have fill colors, # only the graphic points 19:25 are supported. # points(wS$x, wS$y, pch=wS$pch, col=wS$col, bg=wS$bg, cex=wS$cex, lwd=wS$lwd) # removed # col = border of symbol, bg = background color of symbol. # related to NA processing, points will just not draw a symbol if one of the x,y coordinates is NA. saveAtRy <- atRy # save for possible use on median panel. } # end of i loop par(oldpar) # ____________________________PanelOutline____________________ groupPanelOutline(panelGroup,j) } ############################################ #### # # type = 'segbar' and 'normbar' ==================================== # # rlAreaSegBar (Segmented Bar chart) # # Segmented bars is actually a stacked bar chart. Each segment is the length of one value. # The total length is the sum of the lengths of all segments. # The x scale of the column panels will be set to the "max" length of any bar. # # In the normalized mode, the total for the segments is divided into value of each # segment to get a percentage (0 to 100%). The segments are then plotted as stacked # bars using the percentage. The complete bar will be drawn from the left to right edge of # the panel. # # The data structure can have between 2 to 9 values per area. # Each area must have the same number of values. This limitation may be removed in the future. # # Feature added to make each segment a different thickness. 1/4/2014 # # panelData => data.frame where each row is a area with the areaIUKey as the row.name. # The columns are the bar segment values. # rlAreaSegBar = function(j, SBnorm=FALSE) { # j = the panel column number # SBnorm (FALSE = stacked, TRUE = normalized) # col1 indicates the starting or first column in the statsDFrame data for bar segment values. # col2 indicates the ending or last column in the statsDFrame data. # # The bar segment values are in the statsDFrame for each area in columns "col1" to "col2". # wstname <- names(dat) # names of columns in statsDFrame wstdim <- dim(dat) wstMax <- wstdim[2] # number of columns in statsDFrame ErrFnd <- FALSE pdColNum <- formatC(j,format="f",digits=0,width=2,flag="0") gName <- "SEGBAR" if (SBnorm) gName <- "NORMBAR" # "col1" stColName1 <- wstname[col1[j]] #print("col1") pdUmsg <- "(First Segment Data Column)" xr <- CheckPDColnCN('col1', gName, col1[j], stColName1, j, 1, wstMax, dat, pdUmsg) #print(xr) if (xr$Err) { ErrFnd <- TRUE #} else { # xdat1 <- xr$Dat } # "col2" stColName2 <- wstname[col2[j]] #print("col2") pdUmsg <- "{Last Segment Data Column)" xr <- CheckPDColnCN('col2', gName, col2[j], stColName2, j, 2, wstMax, dat, pdUmsg) #print(xr) if (xr$Err) { ErrFnd <- TRUE #} else { # xdat1 <- xr$Dat } if (!ErrFnd) { if (col1[j] >= col2[j]) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***020A ", gName, " ", pdColNum, " The first column name/number (", stColName1,") must proceed the last column name/number (", stColName2,") in the ", sDFName," data frame.") warning(xmsg, call.=FALSE) } else { wD <- ( col2[j] - col1[j] + 1 ) # corrected to calculate the number of data columns if (wD < 2 || wD > 9) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***020B", gName, " ", pdColNum, " The number of segments is ", wD, ". It must be between 2 and 9. If over 9, only the first 9 will be used.") warning(xmsg, call.=FALSE) } } } if (ErrFnd) return () # error warning found - return stColNums <- c(col1[j]:col2[j]) workSB <- dat[,stColNums] # get bar segment data from the statsDFrame. colNums <- c(1:dim(workSB)[2]) for (ind in colNums) { # check and convert each column iC <- stColNums[ind] # get stDF column number stColNam <- wSFName[iC] # get stDF column name F_ind <- formatC(ind,format="f",digits=0,width=1) segNam <- paste0("seg",F_ind) pdUmsg <- paste0("(Bar segment ",F_ind," length)") x <- CheckNum(workSB[,ind], gName, ind, pdColNum, segNam, stColNam, pdUmsg) if (x$Err) { ErrFnd <- TRUE } else { workSB[,ind] <- x$Dat } } good <- !is.na(rowSums(workSB)) # all good values. if any are na # refval <- lRefVals[j] # get referrence to object, changed reftxt <- lRefTexts[j] # new - JP-2010/07/23 # # # Colors - added transparency from x in steps of number of Segments up to 100% # so 2 step = 50, 100 # 3 step = 33.3, 66.6, 100 # 4 step = 25, 50, 75, 100 # 5 step = 20, 40, 60, 80, 100 # 6 step = 16.6, 33.3, 50, 66,6, 83.3, 100 # etc. # 1/(NumSegs)*step = transparency # # Dan's addition ==> # as the colors are generated from the base color # # pInc = 1 / NumSegs # # cSteps = cumsum(rep(pInc,NumSegs))^1.35 # # thickness = constant vs. very based on 2 to 9th segment # #_______________Gather stats and put in area Order______________ # Sorting has already been done - by areaDatKey or value. # The areaID list has therefore been re-ordered accordingly. # Reorder the DataList to match. The assumption was that the input data order for the panelData # matched the order of the original data in the statsDFrame. # #cat("SBBar - areaDatKey:",areaDatKey,"\n") workMatSB <- as.matrix(workSB) SBLen <- apply(workMatSB,1,length) # get length of each row. SBLRange <- range(SBLen,na.rm=TRUE) NumSegs <- SBLRange[2] # number of segments (Max Length) SBBarPt <- cbind(rep(0,numRows),workMatSB) SBBarPt <- t(apply(SBBarPt,1,cumsum)) #_______________Scaling____________ # x scaling lPad <- TRUE rPad <- TRUE rMax <- max(SBBarPt) if (SBnorm) { rx <- c(0,100) lPad <- FALSE rPad <- FALSE } else { rx <- c(0,rMax*1.02) lPad <- FALSE } #cat("seg/normbar-rx:",rx,"\n") ry <- c(0,1) pyPat <- c(-0.5,-0.5,0.5,0.5,NA) py <- CSNBar.barht * pyPat # SNBar.barht = 2/3 (0.6667) (fixed) # py <- c( -1/3, -1/3, +1/3, +1/3, NA) # variable bar height calculations wYPdelta <- (CSNBar.Last.barht - CSNBar.First.barht)/(NumSegs-1) # increment wYP1 <- CSNBar.First.barht - wYPdelta # _____________ Color Patterns _______________ baseColRgb <- BuildSegColors(NumSegs) # ___________titles and labeling axes_______________ ##### # # Setup and draw top and bottom titles and axis for column # # if segmented stacked - no padding on side with zero. # if normalized stacked - no padding on either side. # Res <- DrawXAxisAndTitles(j, panels, rx, ry,reftxt, refval, leftPad=lPad, rightPad=rPad) atRx <- Res$atRx rx <- Res$rx ry <- Res$ry #cat("SN-staggering:",staggering," Result staggered:",staggered,"\n") # ##### # ___________________drawing loop_____________________ oldpar <- par(lend="butt") # build each panel for each stacked bar set. #printPar() #print(paste0("rx:",paste0(rx,collapse=" ")," ry:",paste0(c(1-pad,ke+pad),collapse=" "))) for (i in 1:numGrps) { gsubs <- ib[i]:ie[i] # get beginning to end index row number in this group ke <- length(gsubs) # get number of rows in group (5 or 1) # adjust if median group pen <- if(i==medGrp & medGrpSize == 1) 7 else 1:ke # if median group (6)(black), then pen=6, otherwise pen = c(1...x) laby <- ke:1 ksc <- SetKsc(ke) panelSelect(panels,i,j) x <- panelScale(rx,c(1-pad,ke+pad)) # 1 to 5 are the y values for each bar. panelFill(col=Panel.Fill.col) axis(side=1, tck=1, labels=F, at=atRx, col=Grid.Line.col, lwd=Grid.Line.lwd) # grid # if a refval is provided and in the rx range, then add line. AddRefLine(refval, ke, rx) # # Not checking "good" values provided. # # # Process each area's line. # for (k in 1:ke) { # cycle through row-groups and assign colors to associated areas dots. m <- gsubs[k] if (good[m]) { wX <- SBBarPt[m,] # Get Row of data. if (SBnorm) { wX <- wX / wX[NumSegs+1] * 100 # last segment value is in NumSegs + 1 to get last column (end point) } #wYP <- rep(laby[k],5)+py # height of segment (laby[k] => center line of segbar) wYP <- rep(laby[k],5) # height of segment (laby[k] => center line of segbar) # calculate box for each segment wYPht <- wYP1 for (ik in 1:NumSegs) { if (SNBar.varht) { # variable height bar segments wYPht <- wYPht + wYPdelta wYP2 <- wYP + ((pyPat * wYPht) * ksc ) #print(paste0("Seg:",ik," wYP2:",wYP2)) } else { # fixed height bar segments wYP2 <- wYP + (py * ksc) } val0 <- wX[ik] # start val1 <- wX[ik+1] # end position wXP <- c(val0,val1,val1,val0,NA) # good value - draw bars are polygons. (why to polygon) polygon(wXP,wYP2,col=baseColRgb[pen[k],ik],lwd=CSNBar.Outline.lwd,border=CSNBar.Outline.col,lty=CSNBar.Outline.lty) #polygon(wXP,wYP2,col=CSNBar.Outline.col,density=0) } # end of ik loop (plotting Segments) # if (SNBar.Middle.Dot) { # do we graph a middle dot on the row? mY <- laby[k] # get Y position # put dot on boundary if even number of segments or in middle of middle segment if odd. if ((NumSegs %% 2)==1) { # put dot in middle of middle segment. mSeg <- NumSegs %/% 2 + 1 mX <- (wX[mSeg] + wX[mSeg+1])/2 # middle of segment } else { # put dot on border between two middle segments. mSeg <- NumSegs %/% 2 mX <- wX[mSeg+1] } if (SNBar.MDot.pch >= 21 && SNBar.MDot.pch <= 25) { # treat filled and non-filled symbols differently - get close to same results. # with filled, fill is bg, col and lwd deal with border # with non-filled, fill is col, lwd deals with border using col. # filled symbol points(mX,mY,pch=SNBar.MDot.pch, cex=SNBar.MDot.pch.size, bg=SNBar.MDot.pch.fill, # fill color col = SNBar.MDot.pch.border.col, # border color lwd = SNBar.MDot.pch.border.lwd) } else { # non filled symbol points(mX,mY,pch=SNBar.MDot.pch, cex=SNBar.MDot.pch.size, col = SNBar.MDot.pch.fill, # fill and border color lwd = SNBar.MDot.pch.border.lwd) } } # end of Middle Dot drawing. } # end of "good" check for row. } # end of k loop (group/row) # finish up panel panelOutline(Panel.Outline.col) } # end of i loop par(oldpar) # ____________________________PanelOutline____________________ groupPanelOutline(panelGroup,j) } ################################################### # # For TS, and TSConf I could not find a way to use to have areaDatKeys as the names of # each area matrix, in list or data.frame. So, the out at this time is # to assume the original panelData array is in the order of the original statsDFrame data.frame. # When statsDFrame is re-ordered, I have captured the re-ordering. Using the "order" index # the raw panelData is used via the order index to associate the line on the micromap to the data. # # Boxplot uses $names to look up to find out the record and link the Boxplot list to the # statsDFrame data. # # ##### ##### # # type = TS and TSConf ===================================================== # # rlAreaTSConf (Time Series with and without confidence interval in panel groups) # # Plot all data for panel's areas as one graph in panel. # rlAreaTSConf = function(j,dataNam,conf=TRUE){ # # j = panel column number # # dataNam = Name of large data array containing the x, y (or y low, med and high) values # for each time period and area. Data element is three dimensions (area, sample, value) # The area index is limited to 1:51. The value index is limited ot 1:4. # The sample index is not limited, but a practical limit is around 200-250 samples. # # conf = logical. # If TRUE, do the confidence band using y-low, y-med, and y-high values (columns 2, 3, 4) # If FALSE, only plot the Y value (column 2) # #cat("TS - areaDatKey:",areaDatKey,"\n") ErrFnd <- FALSE TSMsgLabel <- "TS" if (conf) TSMsgLabel <- "TSCONF" pdColNum <- formatC(j,format="f",digits=0,width=2,flag="0") # Check data DataList = tryCatch(get(dataNam,pos=1),error=function(e) e) # get name of array data object list. if (inherits(DataList,"error")) { ErrFnd <- TRUE warnCnt() xmsg <-paste0("***02T1", TSMsgLabel, " ", pdColNum, " column in data.frame ", dataNam, " does not exist or is not valid.") warning(xmsg, call.=FALSE) } else { # data.frame (r object) exists - can do other checks workDArr <- DataList wDArrNames <- rownames(workDArr) # get rownames if (!is.array(workDArr)) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02T2", TSMsgLabel, " ", pdColNum, " The ", dataNam, " data structured in the panelData field is not an array.") warning(xmsg, call.=FALSE) } dimDArr <- dim(workDArr) #if (dimDArr[1] != numRows) { # RETIRE.. # ErrFnd <- TRUE # warnCnt() # xmsg <- paste0("***02T3", TSMsgLabel, " ", pdColNum, " The " , dataNam, " array\'s 1st dimension is not ", numRows, " areas. It is ", dimDArr[1], ".") # warning(xmsg, call.=FALSE) #} #if (dimDArr[2] < 2 || dimDArr[2] > 31) # removed upper limit of the number of time points. if (dimDArr[2] < 2 ) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02T4", TSMsgLabel, " ", pdColNum," The ", dataNam, " array\'s 2nd dimension (time periods) must have at least 2 points. It is ", dimDArr[2], ".") warning(xmsg, call.=FALSE) } if (conf) { # TSCONF option. # Time Series with Confidence Bands if (dimDArr[3] !=4) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02T5", TSMsgLabel, " ", pdColNum, " The ", dataNam, " array\'s 3rd dimension is not 4. It is ", dimDArr[3], ",") warning(xmsg, call.=FALSE) } } else { # Time Series without Confidence Bands if (dimDArr[3] < 2) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02TA", TSMsgLabel, " ", pdColNum, " The time series array\'s 3nd dimension must be at least 2. It is ", dimDArr[3], ".") warning(xmsg, call.=FALSE) } if (dimDArr[3] != 2 && dimDArr[3] != 4) { # accept confidence data - don't stop run. ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02T6", TSMsgLabel, " ", pdColNum, " The time series array\'s 3rd dimension must be 2 or 4. It is ", dimDArr[3], ".") warning(xmsg,call.=FALSE) } } if (is.null(wDArrNames)) { # names are not present ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02TB", TSMsgLabel, " ", pdColNum, " The time series array does not have rownames assigned to the 1st dimension. Data cannot be paired up with area.") warning(xmsg, call.=FALSE) } else { tnn <- is.na(match(wDArrNames,areaDatKey)) if (any(tnn)) { # non-match found. ErrFnd <- TRUE warnCnt() lnn <- paste0(wDArrNames[tnn],collapse=" ") xmsg <- paste0("***02T7", TSMsgLabel, " ", pdColNum," Rownames on array do not match subarea ID list. The bad area IDs are:", lnn) warning(xmsg, call.=FALSE) } } } if (ErrFnd) return () # if any errors found - don't draw column. refval <- lRefVals[j] # get referrence to object, changed reftxt <- lRefTexts[j] # new - JP-2010/07/23 # structure of dataArr # dataList is a 3 dim array : # a * b * c, where: # a is the area index number (1 to "n") (sub-area) # b is the time period index (2 to "n" range) (Limited only by R and memory) # c is the type of value (1=x, 2=low, 3=mid, 4=high) or (1=x, 2=y) # #_______________Scaling of TS Axis____________ # x scaling lPad <- FALSE rPad <- FALSE rx <- range(workDArr[,,1],na.rm=TRUE) # x range from all values in vector #cat("ts-rx:",rx,"\n") #rx <- sc*diff(rx)*c(-.5,.5)+mean(rx) # min to max range with expansion factors. #cat("ts-rx after padding:",rx,"\n") # y scaling if (conf) { # range of line, high and low. ry <- range(workDArr[,,c(-1)],na.rm=TRUE) # range of all Y values } else { # range of line. ry <- range(workDArr[,,2],na.rm=TRUE) # range for the one Y value } #cat("ts-ry:",ry,"\n") ry <- sc*diff(ry)*c(-.5,.5)+mean(ry) # min to max range with expansion factors. #cat("ts-ry after padding:",ry,"\n") #_______________Find range/min/max of median row line/high/low.____________ #_______________Gather stats and put in area Order______________ # # JP-no data in col1, col2, or col3 to sort like the other columns... # All of the data is in these structures. # # at present no re-ordering of the time series like the other plots. # JP-if other column is sorted, time series will follow that order via the indexes. # #### # ____________column titles and axis_______________ ##### # # Setup and draw top and bottom titles and axis for column # # TS, TS-Conf no padding on either side - graph starts at first data point to last data point. # Check out this effects labeling. # Res <- DrawXAxisAndTitles(j, panels, rx, ry,reftxt, refval, leftPad=lPad, rightPad=rPad, YAxisPad=TRUE) atRx <- Res$atRx rx <- Res$rx ry <- Res$ry #cat("Ts-Result staggering:",staggering," staggered:",staggered,"\n") # ##### oldpar <- par(lend="butt") ##### Can be done once for all interations of loop. YAxis_cex <- TS.Axis.cex * 0.75 xPs <- par("ps") xHPsLU <- strheight("00000",cex=1,units="user") xHDesPsLU <- strheight("00000",cex=YAxis_cex,units="user") xDifHLU <- xHPsLU - xHDesPsLU YAxis_adj <- xDifHLU / xHPsLU #cat("YAxis adjustment - YAxis_adj:",YAxis_adj," YAxis_cex:",YAxis_cex,"\n") # _______________drawing loop (panels 1->11)___________________ for (i in 1:numGrps) { # 1,2,3,4,5, 6, 7,8,9,10,11 ng=11 (for US) # Cycle through the Row/Groups in the micromap column gsubs <- ib[i]:ie[i] # get beginning to end index row number in group ke <- length(gsubs) # get number of rows in group (5/6 or 1) pen <- if(i==medGrp & medGrpSize == 1) 7 else 1:ke # if middle group (7), then pen=7 (Black), otherwise pen = c(1...5) or c(1...6) kcol <- c(mstColors[c(1:ke,7)]) # get major colors addBlack <- 0 if (medGrp > 0 & medGrpSize == 1) { if (i == medGrp-1) { # panel before the median row gsubs <- c(gsubs,ib[i+1]:ie[i+1]) # extend one more to get median row addBlack <- 7 } if (i == medGrp+1) { # panel after the median row gsubs <- c(gsubs,ib[i-1]:ie[i-1]) # extend to include at end of the list addBlack <- 7 } } gnams <- areaDatKey[gsubs] # get list of area ids for data group of data. # adjust if middle group if ( addBlack > 0 ) pen <- c( pen, 7 ) # do panel - panelSelect(panels,i,j) # select panel for group i in column j) panelScale(rx,ry) # set scale for panel (should this be ry * 5 or 1?) # scale x and y to the shape of the panel (6 - median is squeezed.) panelFill(col=Panel.Fill.col) # set fill for panel # draw grid lines in panel - vertical (x axis) axis(side=1, tck=1, labels=F, at=atRx, col=Grid.Line.col, lwd=Grid.Line.lwd) # grid lines (x axis) if (i==medGrp & medGrpSize == 1 ) { # median panel # median panel atRy <- c(saveAtRy[1],saveAtRy[length(saveAtRy)]) # median panel range (Get copy of first and last number) } else { # all other panels atRy <- panelInbounds(ry) # get labels for y-axis } if (TS.hGrid) { # horizontal grids on Y axis axis(side=2,tck=1,labels=F,col=Grid.Line.col,lwd=Grid.Line.lwd, at=atRy) # Grid lines } ## Y axis values and labels #axis(side=2, tick=F, mgp=mgpLeft, cex.axis= TS.Axis.cex*.75 , # at=atRy, labels=as.character(atRy)) # Y axis labels #mtext(lab4[j],side=2,line=Title.Line.5.pos,cex=TS.Axis.cex) # Y axis title # axis(side=2, tick=F, cex.axis=YAxis_cex, mgp=mgpLeft, line= -YAxis_adj*0.3, at=atRy, labels=as.character(atRy)) mtext(lab4[j],side=2, line=Title.Line.5.pos, cex=TS.Axis.cex) panelOutline(col=Panel.Outline.col) # outline panel ##### # Issue with median row - line drawing. The y axis is squeezed # to about 1/5 of the scale used in the other rows. This distorts # the line graph and any confidence band. ##### ##### # # Current take each row and: # draw confidence (if required) # draw line # next row. # This leads to confidence overlaying the lines of rows. - need to do confidence blocks, then all lines. # Change Sept 1, 2015 # ##### # handle confidence bands if (conf) { for (k in 1:ke) { # Process each slot of panel - step 1 to 5 or 1 to 1 # cycle through row-groups and build each time series kp = pen[k] # color number wDArr <- workDArr[gnams[k],,] wX <- wDArr[,1] # get X values for line and polygon plots wLine = wDArr[,2] # Get Y values for mid line # build polygon of confidence band to fill (y-low to y-high) and draw first. # new logic to handle NA in X or Y data. Have to break up the polygons into separate plots. cX <- c(wX,NA) cY1 <- c(wDArr[,3],NA) # lower Y data points cY2 <- c(wDArr[,4],NA) # upper Y data points #cat("cY1:",paste0(cY1,collapse=", "),"\n") #cat("cY2:",paste0(cY2,collapse=", "),"\n") #cat("cX :",paste0(wX ,collapse=", "),"\n") Breaks <- is.na(c(cX+cY1+cY2)) #cat("Breaks:",paste0(Breaks,collapse=", "),"\n") # we found at least one NA in the data. wXz <- MMVSplit(wX, Breaks) wY1z <- MMVSplit(cY1,Breaks) wY2z <- MMVSplit(cY2,Breaks) #cat("wY1z:",paste0(wY1z,collapse=", "),"\n") #cat("wY2z:",paste0(wY2z,collapse=", "),"\n") #cat("wXz :",paste0(wXz ,collapse=", "),"\n") vL <- length(wXz) # if only one list - then length = 15 installed of one. ***************** #cat("vL:",vL,"\n") # draw confidence shades for (ind in c(1:vL)) { if (length(wXz[[ind]])>0) { xL <- c(wXz[[ind]], rev(wXz[[ind]] ), NA) yL <- c(wY1z[[ind]], rev(wY2z[[ind]]), NA) wPoly <- data.frame(x=xL, y=yL) #print(wPoly) #cat("colors:", mstColors[kp+12]," kp+12:", kp+12,"\n") polygon(wPoly, col=mstColors[kp+12], border=NA) } } # shaped polygons of confidence band have been plotted. } # end of k loop rows. } # end of confidence test. # draw lines for (k in 1:ke) { # Process each slot of panel - step 1 to 5 or 1 to 1 # cycle through row-groups and build each time series kp = pen[k] # color number wDArr <- workDArr[gnams[k],,] wX <- wDArr[,1] # get X values for line and polygon plots wLine = wDArr[,2] # Get Y values for mid line # Plot mid Line lines(wX,wLine,col=mstColors[kp],lwd=TS.lwd) # NA processing, in the lines call, the missing point (x,y) is just not drawn or other points connected to it. # a gap is generated. } # end of k loop rows. saveAtRy <- atRy } par(oldpar) # ____________________________PanelOutline____________________ groupPanelOutline(panelGroup,j) } ##### # # ##### end of glyph functions ##### # # ############################# ############################# #print("Glyph functions loaded") ############################# ############################# # # # General Functions for micromapST and glyphs # # # AddRefLine - adds the reference line to the current panel (wKe). # AddRefLine <- function (wRefVal, wKe, wRx) { if (!is.na(wRefVal)) { if(is.between.r(wRefVal,wRx)) { # reference line lines(rep(wRefVal,2),c(1-padMinus,wKe+padMinus),lty=Ref.Val.lty,lwd=Ref.Val.lwd,col=iRef.Val.col) } } } # #_________ function to pattern match alias names # AliasToIndex <- function(xR,aNAIAlias) { # # xR is the string list, aNAIAlias is the Name Table $ Alias column # return index into the NAI table # The user string must be cleaned up to make sure it can match one of the wildcard alias strings. # The user strings are edited to convert any punctuation marks, control characters, spaces, tabs, cr, etc. # into blanks, multiple blanks, leading and trailing blanks are eliminated and the string is converted to # all uppercase. # # xR --> a vector of the registry names from SeerStat output wReg <- CleanString(xR) wIndex <- rep(NA,length(wReg)) # match results - NA default - no match # wild card match of input character vector to alias in name table. xouta <- t( sapply(c(1:length(aNAIAlias)), function(x) { y=grep(aNAIAlias[x],wReg,ignore.case=TRUE) # user string list against each entry. ifelse(length(y)==0,return(c(NA,NA)),return(c(x,y))) # if result length = 0 -> no match. otherwise return the string and index. } )) # result - matrix is column 1 = aNAI index that matched, column 2 = index into char vector . xoutb <- xouta[!is.na(xouta[,1]),] # keep only matches. wIndex[xoutb[,2]] <- xoutb[,1] wMissing <- is.na(wIndex) wMissingList <- paste0(xR[wMissing],collapse=", ") #if (any(wMissing)) { # # xmsg <- paste0("***0195 ALIAS Alias Name(s) in the data does not match the name table for the area. The unmatched data rows are:",wMissingList) # stopCnt() # stop(xmsg, call.=FALSE) # # } # let duplicate and missing through. Handled by caller. return(wIndex) # return index to name table } # ### ### # #_________ function to pattern match alias names # AliasToKey <- function(xR,aNAI) { # xR is the string list, aNAI is the Name Table # return index into the NAI table # x --> a vector of the registry names from SeerStat output ErrFnd <- FALSE wReg <- toupper(xR) wIndex <- rep(NA,length(wReg)) # NA results of keys wKey <- rep(NA,length(wReg)) # NA results of keys xout1 <- sapply(c(1:length(aNAI$Alias)), function (x) grep(aNAI$Alias[x], wReg, ignore.case=TRUE)) # one entry per aNAI row, NA or # of wReg Row of match. xout1a <- unlist(xout1) # list of matched locations for each item. # NA's and lists removes, just a list of matches. # Get list of those items that did not find a match. - find list of wReg item that did not match. xout2 <- !is.na( lapply( xout1, function(x) ifelse(length(x)==0,NA,x) ) ) # xout2 is converts results from "" into NA. xout3 <- unlist( lapply( xout1, function(x) { if(length(x[])>1) { x } else { NA } } ) ) # xout3 is string or NA - string if no match. if (any(!is.na(xout3))) { ErrFnd <- TRUE StopFnd <- TRUE xout4 <- paste0(xout3[!is.na(xout3)], collapse=" ") xmsg <- paste0("***0196 ALIAS Sub-area names in the data have duplicate name in rows:",xout4, " Only one row per sub-area is permitted.\n") stopCnt() stop(xmsg, call.=FALSE) } wIndex[xout1a] <- aNAI$Key[xout2] wKey[xout1a] <- aNAI$Key[xout2] return(wKey) # return list of abbreviates or NA if no match. } # ### ### # # Function to generate the segment blended colors for the stacked bar glyphs. # It takes the base 5 or 6 colors used in the maps and other glyphs # and generates a progression of light to full color for use in the # segments of a stacked bar glyph. # BuildSegColors <- function(NumSegs) { # Build color patterns for all bar charts baseColors <- t(col2rgb(mstColors[1:7])) # "#ffffff" to x, y, z bgColors <- t(col2rgb("white")) # New Way with lighter colors - but opaque x1 <- cumsum(rep(1/NumSegs,NumSegs)) # x1 vector of accum values from 1/NumSegs to 1, NumSegs values. x2 <- x1 ^ 1.9 # raised by 1.9 (exponential curve pInc <- (x2 * 0.6) + 0.4 # multiply and shift (want to run from 0.4 to 1. # baseColors -- base 255... baseCol2 <- baseColors/255 # convert each value from 0:255 to 0:1 # baseCol2[Colors,RGB] # Apply the pInc (5,1) modifier vector to each color (1,7). -> full color table (5,7) baseCol3 <- sapply(pInc,function(x) baseCol2 * x) # mstColors(1-7), segment(1-5) for (Rgb=RED) # mstColors(8-14), segment(1-5) for (Rgb=GREEN) # mstColors(15-21),segment(1-5) for (Rbg=BLUE) # baseCol3[(Colors-Red,Colors-Grn,Colors-Blu),Segments] baseColMod <- array(baseCol3,c(7,3,NumSegs)) # we only use the first 7, so ignore 8, 9, 10 (shading colors) # [x,,] x = color (1-7) # [,,y] y = segment (1-5) # [,z,] z = RGB 1=RED, 2=GREEN, 3=BLUE # # [1,2,3] 1 fills first, 2 fills next, 3 fills last. # # invert the modifier vector and apply it to the white background colour (for BW images) pIncM <- 1-pInc bgCol2 <- bgColors/255 bgCol3 <- sapply(pIncM,function(x) bgCol2 * x) # [rgb,segment] bgColMod <- t(bgCol3) # [segment, rgb] # bgColMod[Segments,RGB] (Segment =5 ==> 0)p # NumSegs, RGB value baseColRgb <- matrix(rep(0,7*NumSegs),nrow=7,ncol=NumSegs) # baseColRgb[Colors, Segment] # Convert Rgb matrix back to a matrix of segment by color. for (isg in 1:NumSegs) { # [,,isg] Level for (icl in 1:7) { # colors [icl,,] wC <- baseColMod[icl,,isg] + bgColMod[isg,] baseColRgb[icl,isg] <- rgb(wC[1],wC[2],wC[3]) } } # # Resulting colors are in baseColRgb[color,segment] # # Now I have a matrix of colors - [x,y] where # x is the color base - 1 to 7 (we use 1 to 6). # y is the level based on the number of segments = 1 : NumSegs # # rows - color ID # columns - segment 1:x # result => baseColRgb [color (1:7), segmentNum (1:n)] return(baseColRgb) } # ### ### # # Subroutine to take values in the col<x> vectors (panelDesc variable), # convert numerics to integer, convert character (column names) # by matching with statsDFrame column names to statsDFrame column numbers. # NA's (no name match) and out of range numbers are set to "0" - NOT VALID. # # Used to check column specifications for sortVar, rowNamesCol and colx variables during # initial setup. By the time the glyphs runs, the col1,...,col3 variables are translated # into column numbers and no long needs to be checked. Except to validate they exist when needed. # # This routine takes any number/name of columns provided by user and validates it and translates to # column number. Will not translate "NA", missing, "" or "0" values. glyph will test if # data is missing. # # This routine does a general check of a named list of statsDFrame column names or numbers. # At the end of the verification, the names are translated into statsDFrame column numbers. # # The caller should save the original named list vectors for diagnostic messages. # # Used mostly used by sortVar, rowColName, and other arguments. # ##### # # CheckColx appears to not be used any more. Verify. # ##### # # CheckColx <- function(wcol, colname, wnam2, len_wnam) { # wcol = col vector of names/number in statsDFrame from panelDesc # colname = literal character name of col vector for error message.(panelDesc variable name (col1, col2, col3)) # wnam2 = character list of column names and row numbers (in character format) # len_wnam = number of original set of columns. (length(wcol) # # Results Rules: "0" means invalid number, out of range number or invalid name. # NAs are converted to "0" values. # glyphs check for valid values based on need. # # xwcol <- wcol l_wcol <- length(wcol) ErrFnd <- FALSE if (is.factor(xwcol)) { xwcol <- as.character(xwcol) } if (is.numeric(xwcol) || is.logical(xwcol)) { # have number (double, single, integer, or logical) # we are dealing with numeric or logical rcol <- as.integer(xwcol) # convert numeric to integer. rcol[is.na(rcol)] <- 0 # get rid of NA. Turn to zeros doesn't get rid of negatives. if (any(rcol < 0)) { ErrFnd <- TRUE xmsg <- paste0("***0201 PDCOL In the ",colname," named list in the ", pDName," panelDesc structure there are one or more negative values: ",paste0(rcol,collapse=", ")," Literal:",wcol) warnCnt() warning(xmsg,call.=FALSE) } else { if (any(rcol > len_wnam)) { ErrFnd <- TRUE xmsg <- paste0("***0202 PDCOL One or more of the values in the \var{<pdVarName>} named list in the \var{<panelDesc>} structure is greater than the columns in the \var{<statsDFrame>} data.frame: ",paste0(rcol,collapse=", ")) warnCnt() warning(xmsg,call.=FALSE) } } # if ErrFnd = FALSE, the all number in vector are within range. # check valid range in glyph (NA become zeros.) Leave the final check to the glyphs. } else { if (is.character(xwcol)) { # have character - may be name or number - check each # get number for other code, if column name. xcol <- match(xwcol,wnam2,nomatch=0) # match against column names and numbers (as characters) rcol <- ifelse(xcol > len_wnam, xcol-len_wnam, xcol) # adjust matches to row numbers to real row numbers. # name and character number converted to integer # bad and NA values are "0" and will be caught in the glyph } else { # invalid variable type ErrFnd <- TRUE xmsg <- paste0("***CCOL-03 The type of ",colname," panelDesc variable is invalid. ",typeof(xwcol),". Must be integer or character.") warnCnt() warning(xmsg,call.=FALSE) } } if (ErrFnd) { return (rep.int(0,l_wcol)) } else { # clean up any NAs in list, set to 0 rcol[is.na(rcol)] <- 0 # set NA to 0 (invalid) return (rcol) #print(rcol) } } # ### ### # # CheckColx2 <- function(colValues, varName, varNum, gNameList, stColNames, len_sCN) { # xx <- gsub(",","",<value>,fixed=TRUE) # gc4real <- "^[-+]?[ ]?[0-9]{1,3}(,[0-9]{3})*(\\.[0-9]*)?$|^[-+]?[ ]?[0-9]*(\\.[0-9]*)?$" # is real number with commas gc4int <- "^[-+]?[ ]?[0-9]{1,3}(,[0-9]{3})*$|^[-+]?[ ]?[0-9]*$" # is integer number with commas #cat("colValues:",paste0(colValues,collapse=", "),"\n") #cat("varName :",varName,"\n") #cat("varNum :",varNum,"\n") #cat("gNameList:",paste0(gNameList,collapse=", "),"\n") #cat("stColNames:",stColNames,"\n") #cat("len_sCN :",len_sCN,"\n") # # # Routine is used to check out the information provided by the user. If # vector contains a number or a character string, it will validate the number against # the column number range of statsDFrame. If a character string vector is provided, # each items is checked for being a number or non-number string. If numeric, # the value is converted to integer and validated for <= 0 and range. If a non-numeric # string, the string to match against the column names on statsDFrame and translated to # the column number. "" and NA values are ignored and not translated or matched. # If a string does not match, then it resolves to NA. "" are converted to NA. # # The glyphs are left to determine if all of the needed data columns are provided. # This only validates the information present. If the data column is not used, we don't care. # # pdVarData - colValues = character or numeric vector of column names/numbers in statsDFrame. # Can be a list from sortVar, rowColName, or panelDesc col1, col2, or col3.. # pdVarName = name of variable - vector being checked. (used in messages.). (example: col1, col2, sortVar, rowColName, etc.) # pdVarNum = 3rd character in message identifiers: "0" for sortVar and rowColNames and 1 to 3 for panelDesc columns # gNameList = associated "type" list of glyphs per entry in vector. Used in messages. Must be the same length as # colValues. For sortVar and rowColName, this parameter is set to "". # stColNames = character list of column names and numbers (in character format) (statsDFname column names) # len_sCN = number of original number of columns. dim(statsDFrame)[2] The stColNames list is 2 x this value. # # Rules: Not provided = "" and NA. # "0" means invalid number or name. Error message already generated. # glyphs check for valid values based on need. We just make sure the column has a # a valid reference and can be accessed. Not valid content. # # Working variables: # FvarNum = 1 character version of varNum (if positive) othersize set to "0" (single value) # #cat("len_sCN:",len_sCN," varNum:",varNum,"\n") ErrFnd <- FALSE # no errors indicator xwcol <- colValues # get working copy of panelDesc contains of a variable list. l_xwcol <- length(colValues) # length of variable contents vector l_gNameList <- length(gNameList) # length of type list (number of glyphs) rcol <- rep(NA,l_xwcol) # results column number list. if (varNum >= 0) { FvarNum <- formatC(varNum,format="f",digits=0,width=1) } else { FvarNum <- "0" } if (l_xwcol != l_gNameList) { # panelDesc variable not same length as number of types if (l_gNameList == 0) { # gNameList is absent - possible sortVar or rowColNames arguments. l_gNameList <- l_xwcol } else { # error - they should be the same length, possible type-o in variable list. ErrFnd <- TRUE warnCnt() xmsg <- paste0("***0205 ",gNameList," The length of the glyph type list is different the length of the variables list.") } } #cat("l_xwcol:",l_xwcol," len_sCN:",len_sCN," varNum:",varNum," FvarNum:",FvarNum,"\n") #print(stColNames) FndNames <- rep(TRUE,l_xwcol) skipList <- (is.na(xwcol) | xwcol == "") # no values provided in entry ("", "" from ,, or NA) if (is.factor(xwcol)) { # if a factor convert to character - should not be a factor if it's a numeric, xwcol <- as.character(xwcol) # remove factor index. } if (is.numeric(xwcol)) { # have number (double, single, integer) rcol <- as.integer(xwcol) # convert numeric to integer (column indexes are integers) # NA values still show up as NA. Still need to validate. #print("numeric") } else { if (is.character(xwcol)) { xTcol <- grepl(gc4int,xwcol) # T/F vector of type of string T=Numeric xTcol[skipList] <- FALSE if (any(xTcol)) { rcol[xTcol] <- as.integer(xwcol[xTcol]) # convert chars to numbers } xTcol <- !xTcol # reverse flags T = character xTcol[skipList] <- FALSE # don't check any NA fields. if (any(xTcol)) { # check all, but we will only update the character ones. xcol <- match(xwcol,stColNames) # match against column names and numbers (as characters) # and translate column names and numbers to integers. xcol2 <- ifelse(xcol>len_sCN,xcol-len_sCN,xcol) # adjust matches to verified/matched column numbers to real column numbers. rcol[xTcol] <- xcol2[xTcol] FndNames[xTcol] <- !is.na(xcol)[xTcol] } } else { # invalid type of vector, numeric, integer, or character # is this possible because prior checks??? stopCnt() StopFnd <- TRUE xmsg <- paste0("***02",FvarNum,"0 PDCOL The ",varName," named list is type ",typeof(xwcol)," is invalid. Must be a numeric, integer or character vector.") stop(xmsg, call.=FALSE) } } # validate #print("xwcol") #print(xwcol) #print("rcol") #print(rcol) #cat("FndNames:",FndNames,"\n") #cat("len_sCN:",len_sCN,"\n") for (ind in c(1:l_xwcol)) { # validate each one - rcol. pdColNum <- formatC(ind, format="f",digits=0, width=2, flag="0") xRcol <- rcol[ind] if (!is.na(xRcol)) { #cat("ind:",ind," xRcol:",xRcol,"\n") if (xRcol <= 0) { warnCnt() ErrFnd <- TRUE xmsg <- paste0("***02",FvarNum,"2 PDCOL ",gNameList[ind]," ",pdColNum," The column number of ",xwcol[ind]," in '",varName,"' is negative or zero. Must be a positive integer or the name of a column in statsDFrame.") warning(xmsg,call.=FALSE) rcol[ind] <- 0 } else { if(xRcol > len_sCN) { warnCnt() ErrFnd <- TRUE xmsg <- paste0("***02",FvarNum,"3 PDCOL ",gNameList[ind]," ",pdColNum," The column number of ",xwcol[ind]," in '",varName,"' is greater than the number of columns ",len_sCN," in the statsDFrame data.frame.") warning(xmsg,call.=FALSE) rcol[ind] <- 0 } } } if (!FndNames[ind]) { # did not find the name in statsDFrame warnCnt() ErrFnd <- TRUE xmsg <- paste0("***02",FvarNum,"1 PDCOL ",gNameList[ind]," ",pdColNum," The column name of ",xwcol[ind]," in '",varName,"' does not exist in the statsDFrame data.frame.") warning(xmsg,call.=FALSE) rcol[ind] <- 0 } } return(rcol) } # ### CheckParmColx <- function(colNames, parmCode, wSDFNames, len_wSDFNames) { # This function validates the statsDFrame column name/numbers for call arguments/parameters. # It is essentually the same function as CheckColx, but does not generate error messages # related to panelDesc variables or lists. If the list of names/numbers is limited to "N", # then this check is done prior to calling this function. # # Used by sortVar and rowNamesCol argument checks # # colNames = col Name vector of names/number in statsDFrame from panelDesc # parmCode = is a vector containing the error message identifier and string and the parameter name. # parmCode[1] = second part of the "CARG-" tag. # parmCode[2] = name of the calling argument/parameter # c("RNC","rowNamesCol") # Any invalid names/numbers are passed by as 0. # wSDFNames = character list of column names and numbers (in character format) # len_wSDFNames= number of original set of columns. (length(wcol) # # Results Rules: "0" means invalid number, out of range number or invalid name. # NAs are converted to "0" values. # glyphs check for valid values based on need. # # The check for zero length value is done before the call to this routine. # xColNames <- colNames l_wcol <- length(colNames) # number of values ErrFnd <- FALSE if (l_wcol <= 0) { ErrFnd <- TRUE xmsg <- paste0("***0124 CARG-",parmCode[1]," The ",parmCode[2]," call argument is empty. Argument ignored.") warnCnt() warning(xmsg, call.=FALSE) res <- NA } else { # number of values are 1 or more res <- rep(0,l_wcol) # default results are none found. if (is.factor(xColNames)) { xColNames <- as.character(xColNames) } if (is.character(xColNames) || is.numeric(xColNames)) { # what are each element in the vector - a number or name? xColType <- unlist(gregexpr("^[ \t]*[+-]?[0-9]*[ \t]*$",xColNames)) # find out number or name # NA - NA (missing) # -1 - Character # 1 - Number #print("parameter value to check:") #print(xColNames) #print(xColType) # Loop through list and check each one based on its type. for (ind in c(1:l_wcol)) { # get value type wCT <- xColType[ind] # get type wCName <- xColNames[ind] # get value if (is.na(wCT)) { # NA value - pass it back to caller as 0 - not found. res[ind] <- 0 } else { if (wCT < 0) { # have character type value (not a just numbers) - should be column name wColN <- match(wCName,wSDFNames,nomatch=0) # match against column names and numbers (as characters) wColN <- ifelse(wColN > len_wSDFNames, wColN-len_wSDFNames, wColN) # adjust matches to row numbers to real row numbers. res[ind] <- wColN # save resulting column index number # check if column name found. if (wColN <= 0) { # if it was a no match ... ErrFnd <- TRUE xmsg <- paste0("***0123 CARG-",parmCode[1]," A column names in the ",parmCode[2], " call argument does not exist in the ",sDFName," data.frame:", wCName) warnCnt() warning(xmsg,call.=FALSE) } # end of name valid check. } else { # numeric value - (integer, numeric, or character format) - convert and check wColN <- as.integer(wCName) # convert to number if (is.na(wColN)) { # string did not convert to integer - Error. (unexpected since we validated character string first. ErrFnd <- TRUE xmsg <- paste0("***0125 CARG-",parmCode[1]," A column index number in the ",parmCode[2]," call argument did not convert from character to integer: ",wColN) warnCnt() warning(xmsg, call.=FALSE) } else { if (wColN < 0) { ErrFnd <- TRUE xmsg <- paste0("***0120 CARG-",parmCode[1]," A column index number in the ",parmCode[2], " call argument is a negative or zero: ",wColN) warnCnt() warning(xmsg, call.=FALSE) } else { if (wColN > len_wSDFNames) { ErrFnd <- TRUE xmsg <- paste0("***0121 CARG-",parmCode[1]," A column index number in the ",parmCode[2]," call argument is a greater than the number of columns in ",sDFName," data.frame: ", wColN) warnCnt() warning(xmsg, call.=FALSE) } else { res[ind] = wColN # save the column number } # end of range check. } # end of neg check. } # end of integer convert error check } # end of char vs numeric check } # end of NA vs other type check } # end of for loop } else { # invalid variable type ErrFnd <- TRUE xmsg <- paste0("***0122 CARG-",parmCode[1]," The call argument/parameter, ",parmCode[2]," is not a valid variable type. It must be a numeric or character type value.") warnCnt() warning(xmsg,call.=FALSE) } # end of type check. } # end of zero length check #cat("CheckParmColx Results:",paste0(res,collapse=", "),"\n") return(res) } # ### ### # # function CheckNum takes a vector or data.frame of numbers provided in the statsDFrame by the # user. It check to make sure they are numeric via "is.numeric" and a grep string comparison. # In the process, it checks for factors and converts them to character vectors. # Character vectors are scan to eliminate commas in numbers and verify the string is only # digits and decimal points. A list of Err and Dat is returned. # If an error is found, Err is set to TRUE, The cleaned up numeric vector is returned as Dat. # # Input: xd <- data column (from statsDFrame data.frame) # gName <- glyph Name (character) # pdVarNum <- pd variable (col1, col2, col3) number (integer) # pdColNum <- glyph Column Number (2 character) # pdVarName <- pd variable name (col1, col2, col3) # stColName <- stDF column reference # pdUsage <- brief usage description for error messages. # CheckNum <- function(xd, gName, pdVarNum, pdColNum, pdVarName, stColName, pdUsage) { # for error messages, the last digit of 7 and 8 is reserved for this check. ErrFnd <- FALSE xn <- formatC(pdVarNum,format="f",width=1,digits=0) #cat("CheckNum - gName:",gName," pdVarNum:",pdVarNum," pdColNum:", pdColNum," pdVarName:",pdVarName,"\n") #cat(" stColName:",stColName," pdUsage:",pdUsage," xn:",xn," length(xd):",length(xd),"\n") #cat(" xd:",paste0(xd,collapse=", "),"\n") if (length(xd) == 0) { # invalid vector - length = 0 -> no data ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02", xn, "D ", gName, " ", pdColNum, " The ", stColName, " data column in the ", sDFName, " data frame does not contain any data. Data vector length has length of zero. ", pdUsage) warning(xmsg, call.=FALSE) # can't process or check return NULL vector xdr <- xd # return short vector #print("zero length vector") } else { xdr <- rep(NA,length(xd)) # default results - vector of NAs. # have data to check # Convert factors to characters - this applies even if vector is numeric or character. # Normally only strings are saved as factors in data.frames, but a numeric vector can also # be converted to a factor. It then becomes a character value. # if it is a factor, we will eventually be headed down the character path. if (is.factor(xd)) { xd <- as.character(xd) # convert factors to characters #print("converted from factor to character") } # check for missing values in the vector # Check # 1 - all missing if (all(is.na(xd))) { warnCnt() ErrFnd <- TRUE # no data can be converted. ALL NA. could be all blanks. xmsg <- paste0("***02", xn, "A ", gName, " ", pdColNum, " The data provided in the ", stColName, " column of the ", sDFName, " data frame does not contain any numerical data. No rows will be drawn. ", pdUsage) warning(xmsg,call.=FALSE) # return all NA vector #print("all are NA") } else { # Check # 2 - one or more missing. if (any(is.na(xd))) { lenxd <- length(xd) seqxd <- seq(1,lenxd) BadSeqNum <- seqxd[is.na(xd)] # one or more entires are NA (missing) - This check should be done before manipulating the vectors. # check is primarily if user leaves entries missing, not is the translation to numeric leaves them NA. warnCnt() xmsg <- paste0("***02", xn, "B ", gName, " ", pdColNum, " The ", stColName, " data column in the ", sDFName, " data frame contains one or more missing values. "," Rows with missing values will not be drawn. ", pdUsage) warning(xmsg, call.=FALSE) xmsg <- paste0("***02", xn, "C ",gName," ",pdColNum," The rows with missing data are:",paste0(BadSeqNum,collapse=", ")) warning(xmsg, call.=FALSE) #print("one or more are NA") } # we may have missing values, but we can still check the vector. if (!is.numeric(xd)) { #print("not numeric") # no numeric - better be character type.. if (is.character(xd)) { #print("character") # its character (from factor or has always been character) # check character string for valid numerical format and allow for commas. Any NA values are passed through as NA in the results. x <- gregexpr("^[ \t]*[+-]?((([0-9]{1,3}[,])?([0-9]{3}[,])*[0-9]{3})|([0-9]*))?(([.][0-9]*)|)([eE][-+]?[0-9]+)?[ \t]*$",xd) # verify characters are all numeric (not scientific notation) # regexpr notes: # ^ - begin of string. # [ \t]* - any number of leading spaces or tabs. # [-+]? - optional at most one (sign) # ( - leading digits patterns # leading digits pattern 1 - leading numbers with commas - logic catches 1,000 and higher, 999 falls through to second pattern. # (([0-9]{1,3}[,])?([0-9]{3}[,])*[0-9]{3}) # ( - leading digits # [0-9]{1,3} - 1 to 3 digits (could be 1,2, but left 1,3 # [,] - comma # )? - leading 1,2,3 digits and comma, optional - no more than once. # ( - body 3 digits # [0-9]{3} - body 3 digit sets # [,] - comma # )* - zero or more times # ( - last section of digits # [0-9]{3} - body 3 digits # ) - one time # # or alternate pattern 2 - leading numbers without commas # # ( # [0-9]* - zero or more digits # ) # )? - leading digits are optional, but can happen just once # # section to handle option decimal point and following digits # # ([.][0-9]*) - decimal and digits # # or # # () - nothing. (maybe I could have used ? after the {[.][0-9]*) group # # section to handle possible scientific expression after decimal point and digits or nothing. # # ([eE][+-]?[0-9]*)? - optional scientific expression appendage # # [ \t]* - any number of trailing spaces or tabs # $ - end of string # # February 15-16, 2016 - improved reg-exp to handle: # a) leading + or - # b) commas in number - correct format. Needed to do this before # removed commas since an incorrect format could be handled. # c) redid how decimal point and following digits are handled. # d) added logic for scientific notation (e+10) # # This led to redoing the other validation coding since we had more # information on valid numbers. # xtf <- unlist(x) > 0 # get list of valid numbers in vector. (TRUE = good number / FALSE = bad number) # use this vector to only convert valid numbers. xd <- gsub(",","",xd) # eliminate commas in number xdr <- rep(NA,length(xd)) # default return value. # all checking for missing numbers has already been done. xdr[xtf] <- as.numeric(xd[xtf]) # only convert good formats. xtf2 <- !is.na(xdr[xtf]) # check the conversion and see if all were converted? if (any(xtf2)) { # something happened and a number we thought was good did not get converted to numeric. print("Internal Note - good numeric format did not get converted") print(paste0("Input :",paste0(xd[xtf], collapse=", "))) print(paste0("Output:",paste0(xdr[xtf],collapse=", "))) } } else { # not a numeric or character type vector ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02", xn, "9 ", gName, " ", pdColNum, " The ", stColName, " data column in the ", sDFName, " data frame is not a character or numeric vector. ", pdUsage) warning(xmsg, call.=FALSE) } # end of character/invalid } else { # numeric xdr <- xd } # end of not numeric } # end of all missing or process. } # end of vector length check return(list(Error=ErrFnd,Dat=xdr)) } # ### ### # # # Input: xd <- data column (from statsDFrame data.frame) # gName <- glyph Name (character) # pdVarNum <- pd variable (col1, col2, col3) number (integer) # pdColNum <- glyph Column Number (2 character) # pdVarName <- pd variable name (col1, col2, col3) # stColName <- stDF column reference # # Input: xd <- data column (from statsDFrame data.frame) # gName <- glyph Name (character) # pdColNum <- glyph Column Number (2 character) # pdVarName <- pd variable name (col1, col2, col3) # pdVarNum <- pd variable (col1, col2, col3) number (integer) # stColName <- stDF column reference # # x <- CheckNum2(xd, gName, pdColNum, pdVarName, pdVarNum, stColName) - retired. # #### # # Why was checknum2 created??? now CheckNum is much better. - This routine appears to be RETIRED> # #### CheckNum2 <- function(xd, gName, pdColNum, PDVarName, pdVarNum, stColName) { # for error messages, the last digit of 3 and 4 is reserved for this check. ErrFnd <- FALSE xn <- formatC(pdVarNum,format="f",digits=0,width=1) if (is.factor(xd)) { # numeric or character can be factored - must go to character to get to numeric. xd <- as.character(xd) } # check for missing values. if (is.na(xd)) { # one or more entires are NA (missing) warnCnt() xmsg <- paste0("***02",xn,"B ",gName," ",pdColNum," The ",stColName," data column in the ",sDFName," data frame contains one or more missing values. Rows with missing values will not be drawn.") warning(xmsg,call.=FALSE) } # Check for factors if (is.factor(xd)) { xd <- as.character(xd) } # if numeric vector - just return the vector. if (!is.numeric(xd)) { if (is.character(xd)) { # can it be translated without error to numeric. # determine if string is a valid number format. x <- gregexpr("^[ \t]*[+-]?((([0-9]{1,3}[,])?([0-9]{3}[,])*[0-9]{3})|([0-9]*))?(([.][0-9]*)|)([eE][-+]?[0-9]+)?[ \t]*$",xd) # verify characters are all numeric (not scientific notation) # eliminate commas from number text. xd <- gsub(",","",xd) # regexpr notes: # ^ - begin of string. # [ \t]* - any number of leading spaces or tabs. # [-+]? - optional at most one (sign). # [0-9]* - any number of digits # [.]? - optional at most one (decimal point) # [0-9]* - any number of digits # (...)? - option field at most once (scientic exponent) # ... => [e][-+][0-9]{1,2} # [ \t]* - any number of trailing spaces or tabs # $ - end of string # if (any(x < 0)) { # one of the values failed the numeric test. ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02",xn,"7 ",gName," ",pdColNum," The data provided in the ",stColName," column in the ",sDFName," data frame contains one or more non-numeric characters.") warning(xmsg,call.=FALSE) } else { # convert to numeric for the return. xd <- as.numeric(xd) areNA <- is.na(xd) if (all(areNA)) { ErrFnd <- TRUE # no data can be converted. ALL NA. could be all blanks. warnCnt() xmsg <- paste0("***02", xn, "A ", gName, " ", pdColNum, " The ", stColName, " data column in the ", sDFName, " data frame contains one or more missing values. "," Rows with missing values will not be drawn.") warning(xmsg, call.=FALSE) } else { if (any(areNA)) { warnCnt() xmsg <- paste0("***02", xn, "8 ", gName, " ", pdColNum, " The data provided in the ", stColName, " column in the ", sDFName, " data frame contains one or more entries have non-numeric characters.") warning(xmsg,call.=FALSE) } } } } else { # not a valid type of vector ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02",xn,"9 ",gName," ",pdColNum," The ",stColName," data column in the ",sDFName," data frame is not a character or numeric vector.") warning(xmsg,call.=FALSE) } } return(list(Error=ErrFnd,Dat=xd)) } # ### ### # # function to verify the presents and type of data in a statsDFrame column. # CheckPDCol <- function(pdVarName, gName, stColNum, stColName, gColNum, pdVarNum, stMaxColNum, stDat, pdUsage) { # xr <- list(Err = FALSE, Dat = c(0)) xn <- formatC(pdVarNum,format="f",width=1,digits=0) # get last character (number of col1, 2, 3) pdColNum <- formatC(gColNum,format="f",width=2,digits=0,flag="0") wstname <- names(stDat) wstMax <- dim(stDat)[2] #cat("CheckPDCol-pdVarName:",pdVarName," gName:",gName," stColNum:",stColNum," stColName:",stColName," gColNum:",gColNum,"\n") #cat(" pdVarNum:",pdVarNum," stMaxColNum:", stMaxColNum," pdUsage:",pdUsage," xn:",xn," pdColNum:",pdColNum,"\n") #cat(" stDat:",paste0(stDat,collapse=", "),"\n") #cat(" wstname:",paste0(wstname,collapse=", "),"\n") #cat(" wstMax :",wstMax,"\n") if (is.na(match(pdVarName,PDUsed))) { # pdVarName is not present in the panelDesc data.frame variable lists. xr$Err <- TRUE warnCnt() xmsg <- paste0("***02",xn,",5 ", gName," ",pdColNum," The required panelDesc variable ", pdVarName, " is missing from the ", pDName, " data.frame. ", pdUsage) warning(xmsg, call.=FALSE) } if (stColNum == 0) { xr$Err <- TRUE # if stColNum is zero, then error message already generated. So signal error and stop. } if (!xr$Err) { # no error found yet.... if (is.na(stColNum)) { # missing stColName xr$Err <- TRUE warnCnt() xmsg <- paste0("***02", xn, "4 ", gName, " ", pdColNum, " There is no ",sDFName, " column was specified in ", pdVarName, " variable in the ", pDName, " panelDesc data.frame.", " A data column name/number is required. ", pdUsage) warning(xmsg, call.=FALSE) } else { xr <- CheckNum(stDat[,stColNum], gName, pdVarNum, pdColNum, pdVarName, stColName, pdUsage) # check and get the data in col"x" } } #print("CheckPDCol - Output") #print(xr) return(xr) } # ### ### # # function to verify the presents and type of data in a statsDFrame column. # Same as the CheckPDCol function, but without any CheckNum call to verify the data. # Used by ctrbar, segbar, normbar glyphs. They do a CheckNum on each column as they # pull the data. # CheckPDColnCN <- function(pdVarName, gName, stColNum, stColName, gColNum, pdVarNum, stMaxColNum, stDat, pdUsage) { xr <- list(Err = FALSE, Dat = c(0)) xn <- formatC(pdVarNum,format="f",width=1,digits=0) # get last character (number of col1, 2, 3) pdColNum <- formatC(gColNum,format="f",width=2,digits=0,flag="0") wstname <- names(stDat) wstMax <- dim(stDat)[2] # Can't create stColName - if not valid, stColNum was set to 0 if bad or NA if pdVarName variable vector was missing. # Check if the pdVarName exist in the panelDesc data.frame if (is.na(match(pdVarName, PDUsed))) { xr$Err <- TRUE warnCnt() xmsg <- paste0("***02",xn,",5 ", gName, " ", pdColNum, " The required panelDesc variable ", pdVarName, " is missing from the ", pDName, " data.frame. ", pdUsage) warning(xmsg, call.=FALSE) } if (!xr$Err) { # no error found yet.... # Check to see if statsDFrame column in the panelDesc variable was found to be valid by CheckColx function earlier. if (is.na(stColNum) || stColNum == 0) { # invalid name or column number in statsDFrame xr$Err <- TRUE warnCnt() xmsg <- paste0("***02",xn,"6 ", gName, " ", pdColNum, " The specified column name or number in ", pdVarName, " panelDesc variable (", stColName, ") does not exist in the for ", sDFName, " data frame or is out of range. ", pdUsage) warning(xmsg, call.=FALSE) } } return(xr) } # ### ### # # function to verify the presents and type of data in a statsDFrame column, Used with "col1" "col2", and "col3" panelDesc lists. # remember - convertion to numeric column number - done # verification of column names - done # NA value = means was not provided # 0 value = means invalid value or name provided and error message already generated. # col1, col2, col3 values not needed by a glyph are not checked or processed. # # xr <- CheckPDCol2(pdVarName, pdVarNum, pdColNum, gName, stColNum, stDat) ---- Retired # CheckPDCol2 <- function(pdVarName, pdVarNum, pdColNum, gName, stColNum, stDat, pdUsage) { # # PDVarName = "col1", "col2", or "col3" - character string name of the panelDesc list (field): # pdVarNum = numeric 1, 2, 3 for messages. # pdColNum = panelDesc glyph column number # gName = character string name of the calling glyph # stColNum = column number in statsDFrame data.frame # stDat = statsDFrame data.frame # # xr = result - xr$Err - error indicator # xr#Dat - statsDFrame data column # xr <- data.frame(Err = FALSE, Dat = c(0)) xn <- formatC(pdVarNum,format="f",digits=0,width=1) stColName <- wSFName[stColNum] # it's not a given that the colx was in the panelDesc data.frame # is the panelDesc variable list (col1, col2, etc.) exit in the panelDesc? if (is.na(match(pdVarName,PDUsed))) { # no - not present xr$Err <- TRUE xmsg <- paste0("***02", xn, "5 ", gName, " ", pdColNum, " The required panelDesc variable ", pdVarName, " is missing from the ", pDName, " data frame. ", pdUsage) warnCnt() warning(xmsg, call.=FALSE) } if (stColNum == 0) { xr$Err <- TRUE # if stColNum is zero, then error message already generated. So signal error and stop. } if (!xr$Err) { # no error found yet.... if (is.na(stColNum)) { # Required col value not provided. xr$Err <- TRUE xmsg <- paste0("***02", xn, "4 ", gName, " ", pdColNum, " There is no ", sDFName, " column was specified in ", pdVarName, " variable in the ", pDName, " panelDesc data.frame."," A data column name/number is required. ", pdUsage) warnCnt() warning(xmsg, call.=FALSE) } else { # stColNum is in range - already checked before and error message generated. xr <- CheckNum(stDat[,stColNum], gName, pdVarNum, pdColNum, pdVarName, stColName, pdUsage) } } } # ### ### # # ConvertDV - Converts original details variable list into the new by glyph variable list. # ConvertDV <- function(DV) { # This routine converts an old details variables structure into a new structure. # Each named list in panelDesc is the same length, but may or may not be used # by the glyph. # # Generate a list containing a list for each glyph column. The glyph list # contains all of the variable (named lists) for it operation. # This is organized vertically, instead of horizontally. # The glyph list need only contain the variables required/used for a glyph. # # Variables and table for Convertion of PD from old format to new format. # # DV is the details variable structure. a list of named lists. # # Return value is the "NewDV" with new variable names grouped by glyph name. # # #data(detailsVariables) # already loaded. # # For testing - load #DVFile <- "c:/projects/statnet/r code/micromapST/data/detailsVariables.rda" #load(DVFile) # loads detailsVariables structure glyphNames <- c("arrow", "bar","boxplot", "ctrbar", "dot", "dotsignif", "dotconf", "dotse", "id", "map", "mapcum", "mapmedian", "maptail", "normbar", "panel", "rank", "scatdot", "segbar", "system", "ts", "tsconf" ) initDVList <- function(glyphNames) { NewDV <- NULL NewDV <- list() for (iDx in seq_along(glyphNames)) { NewDV[[glyphNames[iDx]]] <- list() } return(NewDV) } DVTable <- detailsVariables DVTable$varName <- str_trim(DVTable$varName) DVTable$newVarName <- str_trim(DVTable$newVarName) # ErrFnd <- FALSE if (!is.list(DV)) { ErrFnd <- TRUE xmsg <- "convertDV - DV structure is not a list." warning(xmsg,call.=FALSE) } varsNum <- length(DV) # number of variables varsName <- names(DV) # names of variables #cat("varsNum :",varsNum,"\n") #cat("varsName:",paste0(varsName,"\n"),"\n") # NewDV <- initDVList(glyphNames) # initializes each glyph list to a list. for (ind in seq_along(varsName)) { # step through each variable name # validate value vName <- names(DV)[ind] # get name vValue <- DV[[ind]] # get value xIndex <- match(vName,DVTable$varName) #cat("vName:",vName," vValue:",vValue," xIndex:",xIndex,"\n") if (is.na(xIndex)) { xmsg <- paste0("variable: ",vName," not found in master variable list. Name is not valid, skipped") warning(xmsg,call.=FALSE) } else { varData <- DVTable[xIndex,] # get info to validate and translate #cat("validate-method:",varData$method," v1:",varData$v1," v2:",varData$v2,"\n") tag <- paste0(varName," variable") res <- switch(varData$method, "colors" = { is.Color(vValue) }, "numeric" = { if (is.numeric(vValue)) { (is.between(vValue,as.numeric(varData$v1),as.numeric(varData$v2))) } }, "integer" = { if (is.numeric(vValue)) { (is.between(as.integer(vValue),varData$v1,varData$v2)) } }, "lines" = { wS <- c('1','2','3','4','5','6','blank','solid','dashed','dotted','dotdash','longdash','twodash') wV <- as.character(vValue) xIdx <- match(wV,wS) !is.na(xIdx) }, "logical" = { is.logical(vValue) }, "match" = { wS <- eval(parse(text=varData$v1)) # must do this to build vector. wV <- as.character(vValue) xIdx <- match(wV,wS) !is.na(xIdx) }, "text" = { if (is.character(vValue)) { (is.between(nchar(vValue),as.integer(varData$v1),as.integer(varData$v2))) } }, "vectOf3" = { if (is.atomic(vValue)) { if (length(vValue) == 3) { (all(is.between(vValue,varData$v1,varData$v2))) } } }, { FALSE } ) # res has the validation results #cat("res:",res," typeof(res):", typeof(res)," ",class(res),"\n") if (!res) { xmsg <- paste0("***01N0 DETS The ",tag," does not have a valid value: ",vValue," Check type ",varData$method," used.") warning(xmsg) } else { # translate # replicate variable for each glyph that uses it. newVarN <- varData$newVarName #cat("usedBy:",varData$usedBy,"\n") GNList <- eval(parse(text=varData$usedBy)) # list of glyph that use this variable. # build the new variable for each glyph. for (jnd in seq_along(GNList)) { GName <- GNList[jnd] #cat("Added GN:",GName," / ",newVarN," = ",vValue,"\n") NewDV[[GName]][[newVarN]] <- vValue # add list with single element. } # end of jnd loop } # end of test results from validation. } # end of check for match variable name. #cat("Check next variable in list.\n") } # end of ind loop return(NewDV) } # end of ConvertDV function # ### ### # # How to convert old panelDesc structure to a new panelDesc structure # # "advanced" named list used to add new variables to the panelDesc instead # of keep adding named lists across all of the glyph columns. # # Old Structure: # # panelDesc # type = c( 1, 2, 3, 4, 5, 6, ...) # lab1 = c( 1, 2, 3, 4, 5, 6, ...) # lab2 = c( 1, 2, 3, 4, 5, 6, ...) # lab3 = c( 1, 2, 3, 4, 5, 6, ...) # col1 = c( 1, 2, 3, 4, 5, 6, ...) # col2 = c( 1, 2, 3, 4, 5, 6, ...) # col3 = c( 1, 2, 3, 4, 5, 6, ...) # colSize = c( 1, 2, 3, 4, 5, 6, ...) # lab4 = c( 1, 2, 3, 4, 5, 6, ...) # refText = c( 1, 2, 3, 4, 5, 6, ...) # refVal = c( 1, 2, 3, 4, 5, 6, ...) # panelData=c( 1, 2, 3, 4, 5, 6, ...) # # types: # "map" lab1, lab3 # "mapcum" lab1, lab3 # "mapmedian" lab1, lab3 # "maptail" lab1, lab3 # "id" lab1, lab3 # # "arrow" lab1, lab2, lab3, col1, col2, refText, refVal # "bar" lab1, lab2, lab3, col1, refText, refVal # "dot" lab1, lab2, lab3, col1, refText, refVal # "dotsignif" lab1, lab2, lab3, col1, col2, refText, refVal # "dotse" lab1, lab2, lab3, col1, col2, refText, refVal # "dotconf" lab1, lab2, lab3, col1, col2, col3, refText, refVal # "scatdot" lab1, lab2, lab3, col1, col2, refText, refVal # "rank" lab1, lab2, lab3, # "normbar" lab1, lab2, lab3, col1, col2, refText, refVal # "segbar" lab1, lab2, lab3, col1, col2, refText, refVal # "ctrbar" lab1, lab2, lab3, col1, col2, refText, refVal # "ts" lab1, lab2, lab3, panelData # "tsconf" lab1, lab2, lab3, panelData # "boxplot" lab1, lab2, lab3, panelData, refText, refVal # # ConvertPD <- function(PD) { # This routine converts an old panelDesc structure into a new structure. # Each named list in panelDesc is the same length, but may or may not be used # by the glyph. # # Generate a list containing a list for each glyph column. The glyph list # contains all of the variable (named lists) for it operation. # This is organized vertically, instead of horizontally. # The glyph list need only contain the variables required/used for a glyph. # # Variables and table for Convertion of PD from old format to new format. # PDFldDef <- c("type", "lab1", "lab2", "lab3", "col1", "col2", "col3", "colSize", "panelData","refTexts", "refVals", "rmin", "rmax", "adv" ) PDGlyphReq <- matrix(c( # glyph lab1, 2, 3, col1, 2, 3, colSize, panelData, refT, refV, rmin, rmax, adv c("map", TRUE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE), c("mapcum", TRUE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE), c("mapmedian",TRUE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE), c("maptail", TRUE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE), c("id", TRUE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE), c("arrow", TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE), c("bar", TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE), c("dot", TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE), c("dotsignif",TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE), c("dotse", TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE), c("dotconf", TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE), c("scatdot", TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE), c("rank", TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE), c("segbar", TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE), c("normbar", TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE), c("ctrbar", TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE), c("ts", TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE), c("tsconf", TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE), c("boxplot", TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE) ), ncol=11, byrow=TRUE) PDGlyphDF <- as.data.frame(PDGlyphReq,stringsAsFactors=FALSE) colnames(PDGlyphDF) <- PDFldDef # numVars <- dim(PD)[2] numMMCols <- dim(PD)[1] # wNames <- colnames(PDGlyphDF)[2:11] wPDNames <- colnames(PD) # #print(wNames) #print(wPDNames) # NewPD <- list() for (ind in c(1:numMMCols)) { # step through each column and convert vertically # step through by glyph column wType <- as.character(PD$type[ind]) # get glyph type for column wSel <- (PDGlyphDF$type == wType) wList <- as.logical(unlist(PDGlyphDF[wSel,c(2:11)])) # get associated usage row wNames2 <- wNames[wList] # get associated variable names gList <- list(type=wType) # initialize output list for column xVar <- " " for (jnd in c(1:length(wNames2))) { # step through possible variables varName <- wNames2[jnd] # get next variable name if (!is.na(match(varName,wPDNames))) { # PD variable is present in the panelDesc # build string for command and execute to get value of variable cmdStr1 <- paste0("xVar <- as.character(PD$",varName,"[",ind,"])") #print(cmdStr1) eval(parse(text=cmdStr1)) # get value of variable into xVar if ((!is.na(xVar)) && (xVar != "") && (xVar != "NA")) { # check to see if the value is "not present" # Only process if the variable contains something. if (varName == "adv") { # if the variable is "adv", then process a little differently # adv is a list of variables and values, must add it to gList. gList <- c(gList, xVar) # add adv list to the output list } else { # other variables, built string and set up variable in output # xVar is the value of varName, so create the variable and set the value cmdStr2 <- paste0("gList$",varName," <- xVar") #print(cmdStr2) eval(parse(text=cmdStr2)) } } # end of check for good data to convert (not NA, "", "NA") } # end of check if variable present } # end of jnd loop #str(gList) NewPD[[ind]] <- gList } # end if ind loop # return(NewPD) } # ### #### # # The panels work on scales user units. In some cases this is 0,1 on both axis. # In many cases, this is the rx and ry ranges for the graphs. # The axis and titles lines are outside of the plotting area and # most of the text is written with 'mtext' function that place the text using # the "line" offset from the plot area. In the case a line must be draw and # text must be written, this must be done with the line and text functions # under par(xpd=TRUE) using user units to position the text and line. # # The drawing of the refTexts and dotted line must use this approach. # ConvLineToUser <- function(iSide, iLine) { # iSide is the side of the plot to draw the text and line. # 1 = bottom, 2 = left, 3 = top, 4 = right margins # iLine is the line offset from the plotting area. # 0 = next to the plot box, 4 = 5th line away from the box. # # Returns the iLine position in user scales in units. # xpin <- par("pin") xusr <- par("usr") xmar <- par("mar") xmai <- par("mai") #printPar() if (iSide == 1 || iSide == 3) { # for top and bottom sides, get Y units per inch UnitsPerInch <- diff(xusr[3:4])/xpin[2] } else { # for left and right sides, get X units per inch UnitsPerInch <- diff(xusr[1:2])/xpin[1] } InchesPerUnit <- 1/UnitsPerInch #cat("iSide:",iSide," UnitsPerInch:",UnitsPerInch," InchesPerUnit:",InchesPerUnit,"\n") # side = below, left, above, right distZerou <- NULL distZerou <- switch(iSide, # 1 2 3 4 5 6 # usrZoffs d s s adjA adjL c(-xusr[3],-1,1,1, 0.25, 0), # bottom 1 # 5 was -0.25 changed to 0.25 c(-xusr[1],-1,2,2, -0.05, 0), # left 2 c( xusr[4], 1,1,3, -0.30, 0), # top 3 c( xusr[2], 1,2,4, 0.10, 0), # right 4 c( 0, 0,0,0, 0, 0) # null ) # item 1 => base value at edge line. # item 2 => sign + or - (add or subtract distance calculated) # item 3 => not used. # item 4 => mar and mai reference indexes. # item 5 => basic adjustment amount. (offset) #cat("distZerou:",distZerou,"\n") LinesPerInch <- xmar[distZerou[4]]/xmai[distZerou[4]] # below, left, above, right mar/mai -> "5" InchesPerLine <- 1/LinesPerInch # 1/5 = 0.2 inches per line. UnitsPerLine <- InchesPerLine * UnitsPerInch + distZerou[6] # adjust line height #cat("distZerou:",distZerou,"\n") #cat("LinesPerInch:",LinesPerInch," InchesPerLine:",InchesPerLine," UnitsPerLine:",UnitsPerLine,"\n") # if convert line to user scale Position in user scale # Line to user scale Pos conversion Posu <- distZerou[2] * ( ( ( iLine + distZerou[5] ) * UnitsPerLine ) + distZerou[1] ) # direction * ( ( ( line # + offset ) * Units per Line ) + unit offset (base axis line value.) ) #cat("iLine:",iLine," Posu:",Posu,"\n") return(Posu) } # ### ### # # Function used by mapping functions to draw the column titles for MapCum, # MapMedian, and MapTail. These titles have colored boxes preceeding # the titles. This function adds four blanks lead of the title as placeholders, # draws the text center, then overlays the boxes as required. # DrawBoxAndText <- function(wTxt, wTxt.cex, sq.width, sq.col, sq.border.col, yposl) { # # function to draw and center the glyphs column titles with a preceeding # colored box. Used by the MapMedian, MapTail, and MapCum mapping # functions. # # yposl = mtext line position - 0 on top edge to 3 lines??? # xps <- par("ps") xpin <- par("pin") xusr <- par("usr") xmar <- par("mar") xmai <- par("mai") #cat("xmai:",xmai," xmar:",xmar," xusr:",xusr," xpin:",xpin," xps:",xps,"\n") itouX <- diff(xusr[c(1,2)])/xpin[1] itouY <- diff(xusr[c(3,4)])/xpin[2] inchPerLine <- xmai[1]/xmar[1] # top lines -> inches per line. (line position to inches). sqSize <- sq.width * ( xps / 9 ) * wTxt.cex # scale size of square based on the point size of the font # may need to add logic to change number of leading blanks based on point size. wLeni <- strwidth(paste0(" ",wTxt),units="in", cex=wTxt.cex) #wLenu <- strwidth(paste0(" ",wTxt),units="us", cex=wTxt.cex) #cat("len i:", wLeni, " len u:",wLenu," ratio:",wLenu/wLeni,"\n") nStr1i <- (xpin[1]/2) - (wLeni/2) nStr1u <- nStr1i * itouX #wUseru <- diff(xusr[c(1,2)]) #nStr2u <- (wUseru/2) - (wLenu/2) #cat("nStr1u:",nStr1u," nStr2u:", nStr2u,"\n") yadji <- 0.045 # inches (subtracted) #xadji <- 0.10 xadji <- (1.25 ^ ( xps * wTxt.cex )) / 2100 # + ( 0.1 * 1/ScR) # inches if (xadji > 0.05) xadji = 0.05 # 0.08 at 28pt # 0.04 at 24pt # value of 0.04 at 20pt. # 0.025 at 16pt. # 0.01 at 14pt. # 0.005 at 12pt. # 0.005 at 10pt. # 0.001 at 9pt. # 0.001 at 8pt. # 0.001 at 6pt. # # Going to try --> ( 1.25 ^ ( Points * wTxt.cex ) ) / 2100 = xadji # box.xi <- c(0, 0, sqSize, sqSize, NA) + xadji box.yi <- c(0, sqSize, sqSize, 0, NA) + yadji # y baseline = line positiion * inchToLIne + height of plot area. yposi <- yposl * inchPerLine + xpin[2] # add base position and convert to units. box.yu <- ( ( box.yi + yposi ) * itouY ) # then convert to units box.xu <- ( ( box.xi + nStr1i ) * itouX ) #cat("yposl:",yposl," yposi",yposi, "\n") #cat("box.xu:", box.xu, "\n box.yu:", box.yu,"\n") # use text to print the string centered. # line one. (four blanks for box padding. May have to vary as font size changes. # write text (centered) mtext(paste0(" ",wTxt),line=yposl,side=3, cex=wTxt.cex) # pos = below centered. # draw square over the blanks in the title on the left. polygon(box.xu, box.yu, col=sq.col, border=sq.border.col) #polygon(bpx/xu, box.yu, col="black",density = 0) # draw borders if needed. } # ### ###### ## ## CleanXLabels - clean up set of labels ## ## ## # #CleanXLabels <- function(rx,atRx,nTicks) { # # ## expand range of x if needed. # rAtRx <- range(atRx) # #if(rAtRx[1] < rx[1]) rx[1] <- rAtRx[1] # #if(rAtRx[2] > rx[2]) rx[2] <- rAtRx[2] # # # get number of labels # lAtRx <- length(atRx) # #cat("CXL-lAtRx:",lAtRx," trim.\n") # # trim labels outside of data range. # # nT <- 7 # if (lAtRx <= nT) { # # Delete labels below actual data point # if((atRx[1] < rx[1]) & (atRx[1] != 0) ) atRx <- atRx[-1] # delete grid line below rx minimum # } # # Delete labels above actual data points # lAtRx <- length(atRx) # if (lAtRx <= nT) { # if((atRx[lAtRx] > rx[2]) & (atRx[lAtRx] != 0) ) atRx <- atRx[-lAtRx] # delete grid line above rx maximum # } # lAtRx <- length(atRx) # #cat("DrawXAxis s adjusted atRx:",paste0(atRx,collapes=", "),"\n") # # return(atRx) # } # ## ###### ##### # # CleanXLabels2 - # If greater than 3 labels - trims off any label point outside of the range of the data and not zero. # expands data range(rx) to cover remaining edge labels. # CleanXLabels2 <- function(rx, atRx) { lAtRx <- length(atRx) # length of atRx and number of labels. #cat("CXL2-lAtRx:",lAtRx," trim.\n") if (lAtRx > 3) { # if greater than 3 labels - large number of labels - trim labels that are out of range. # Check low end label if (atRx[1] < rx[1] & atRx[1] !=0 ) { atRx <- atRx[-1] # trim first value lAtRx <- length(atRx) } # Check high end label if (atRx[lAtRx] > rx[2] & atRx[lAtRx] != 0 ) { atRx <- atRx[-lAtRx] lAtRx <- length(atRx) } } # Extend data range based on labels and grid lines # Check low end data range vs. label if (atRx[1] < rx[1]) { # first label outside of data range. rx[1] <- atRx[1] # expand low end. } # Check high end data range vs. label if (atRx[lAtRx] > rx[2]) { # last label outside of data range. rx[2] <- atRx[lAtRx] # expand high end } #cat("After Extended - rx:",rx," atRx:",atRx,"\n") return(list(rx=rx,atRx=atRx)) } # ##### ##### # TestOverlap <- function(Acex, atLab, atRx, nSp) { lAtLab <- length(atLab) widthSp <- strwidth("0",cex=Acex,units="user") widthSpN <- widthSp * nSp #cat("TestOverlap-cex:",Acex," nSp:",nSp," widthSpN:",widthSpN," len(atLab):",lAtLab,"\n") widthAtLabH <- strwidth(atLab,cex=Acex,units="user")/2 SrtLab <- atRx - widthAtLabH EndLab <- atRx + widthAtLabH #cat("SrtLab:",SrtLab,"\n") #cat("EndLab:",EndLab,"\n") # number of labels 1 to n, so check space between 1-2, 2-3, ... , nm1-n OverLapFnd <- FALSE # Check to see if any labels would overlap each other based on width and grid point location. for (ind in c(1:(lAtLab-1)) ) { wX <- SrtLab[ind+1] - EndLab[ind] #cat("ind:",ind," wX:",wX,"\n") if (wX < widthSpN) { OverLapFnd <- TRUE } } #cat("OverLapFnd:",OverLapFnd,"\n") return(OverLapFnd) } # ##### ##### # # Test to see if labels overlap text from neighboring columns. TestLabAtEdge <- function(atLab,atRx,YAxisPad,rx,lineAxisSizes) { # function to test edges for possible shift. # returns atRx adjusted. xusr <- par("usr") xpin <- par("pin") xupi <- diff(xusr[1:2])/xpin[1] #cat(" TestLabAtEdge - xusr:",xusr," xpin:",xpin," xupi:",xupi,"\n") # width of each label. WidthOfLabs <- strwidth(atLab,cex=lineAxisSizes["Ax1"],units="user") # half of the width of each label HalfWidthOfLabs <- WidthOfLabs/2 # starting "x" position of each label SrtOfLabs <- atRx - HalfWidthOfLabs # ending "x" position of each label EndOfLabs <- atRx + HalfWidthOfLabs # number of labels. lAtLab <- length(atLab) # #cat("Label Specifcations: (width, half, srt, end)\n") #print(WidthOfLabs) #print(HalfWidthOfLabs) #print(SrtOfLabs) #print(EndOfLabs) # get 1/2 of the column sep gap (in units) wColSepGapHU <- (colSepGap/2)*xupi #cat("half of colSepGap in units:",wColSepGapHU,"\n") # Viable left edge of column (rx[1] - col sep gap) leftEdge <- rx[1] - wColSepGapHU # 1/2 col sep converted to units. # adjust left edge is Y Axis is present - have more room. if (YAxisPad) { # y Axis present - add standard 0.2 inches of padding. wYAGapHU <- (YAxis.width * xupi) leftEdge <- leftEdge - wYAGapHU #cat("wYAGapU:", wYAGapHU," added to leftEdge.\n") } # Viable right edge of column (rx[2] + col sep gap) rightEdge <- rx[2] + wColSepGapHU #cat("leftEdge:",leftEdge," rightEdge:",rightEdge," units.\n") #cat("atRx:",atRx," rx:",rx,"\n") # # Adjust first and last label point inward for apperance. # # Check overhangs of last column and this column. # pos values - have space (inches) # neg values - need space # sum < 0 - needed more space then available - problem - go do stagger # sum >=0 - had enough space - no problem. # wAtRx <- atRx lAtRx <- length(atRx) rAtRx <- range(atRx) WidthRx <- diff(rAtRx) edgeRxAdj <- (WidthRx / 1000) * XAxis.indent #cat("edgeRxAdj:",edgeRxAdj,"\n") # # Is not getting applied if staggered. Problem. # # # Adjustments label atRx to bring the first and last "atRx" points in a little. # if (SrtOfLabs[1] < leftEdge) { #cat("overlap left edge:", leftEdge - SrtOfLabs[1], " units\n") # Adjust both edge at points inwared by 1/1000 the range of labels * XAxis.indent(5) wAtRx[1] <- wAtRx[1] + edgeRxAdj # key adjustment move inward. SrtOfLabs[1] <- SrtOfLabs[1] + edgeRxAdj EndOfLabs[1] <- EndOfLabs[1] + edgeRxAdj #cat("adj - SrtOfLabs[1]:",SrtOfLabs[1]," EndOfLabs[1]:",EndOfLabs[1],"\n") } if (EndOfLabs[lAtRx] > rightEdge) { wAtRx[lAtRx] <- wAtRx[lAtRx] - edgeRxAdj # key adjustment EndOfLabs[lAtRx] <- EndOfLabs[lAtLab] - edgeRxAdj SrtOfLabs[lAtRx] <- SrtOfLabs[lAtLab] - edgeRxAdj #cat("adj - SrtOfLabs[lAtRx]:",SrtOfLabs[lAtRx]," EndOfLabs[lAtRx]:",EndOfLabs[lAtRx],"\n") } # add check to see if shift causses overlap with neighbor label. #cat("after 1st and last shift-rx:",rx," atRx:",wAtRx,"\n") #cat(" atLab:",atLab," axisSubTitle:",axisSubTitle,"\n") atRx <- wAtRx # update label points. Shift completed, if needed. # # Deal with overlap to over columns. ( see is overlap is happening ) # # Check for overlap with previous column. w1stLabOverU <- SrtOfLabs[1] - leftEdge # have number of units over the edge of the plot. w1stLabOverI <- (w1stLabOverU / xupi) # Convert units to inches of overhang. # if negative, then label is extended into next column # if positive or zero, then label is within column # add the values: if negative - OVERLAP. # if positive - space was available. # # TEST for overlap done outside of this routine, we just calculate the variable. # # Calculate the right edge overlap being used. Will use as lastLab2Space handoff to next column. # wLastLabOverU <- rightEdge - EndOfLabs[lAtRx] wLastLabOverI <- (wLastLabOverU / xupi) # if pos value - we have room. neg - we need room. return(list(atRx=atRx,w1stLabOverI=w1stLabOverI,wLastLabOverI=wLastLabOverI)) } ##### # # DrawXAxisAndTitles - This functions takes the rx range of the data and calculates the X axis labels and # grid line positions. Four methods are supported: # original ("o") - the original method of labeling best on panelInbound and pretty functions # extended ("e") - use of the extended algorithm and no panelInbound limiting. # wilkinson("w") - use the wilkinson algorithm. # scale ("s") - use of the extended algorithm and then scaling based on the largest value # and sub titling the scale used. (e.g., 100000 -> 10 in the ten thousands. # scale number ("sn") - use of the extended algorithm and then scaling each number and adding a suffix # to the number to indicate the scale used. (e.g., 10000 -> 10M) # # New Feature - lastLab2Space and lastLab3Space. this allows us to determine if the lab2 or lab3 lines on # maps and ids to axis on glyphs. # Process: # 1) if staggered, exit # 2) get width of axis first label. # 3) discount offset (indent) # 4) get amount of room for handover of label - space between plot and mid point. # 5) see if room for remainder in lastLab2Space (and lab3). If no room, # instigate staggerLab. # # Take into account, user's request for scaling and staggering first. # If they don't fit, warn user, do scaling first ("sn") and try again. If # unsuccessful, then force staggering of labels. # # Other options to consider: # Force Zero to be gridded. # Optional edge grid line labels. # Enlarge edge buffer space to handle labels. # Modification of "referred" number created by the expended algorithm. # # # titles may run into each other. # # The function also handles the staggering of labels if requested. # # Since the type of axis labeling impacts the lab1, lab2, lab3, and reftxt titles, this function also # handles the placement and drawing of the column titles and footnotes. # # Subdivide into X-Axis and Title processing. Let X-Axis find out how much space it needs, fill it # and pass to Titles, where to pick up the labeling. If no X-Axis, then a known spacing will be passed # to the titles. # # Basically start at 0 off the axis (panel) line. # Simple X-Axis is font "9" and takes up 1-line of space. # Staggered X-Axis is font "9" * 0.65(?) and takes up 1.5-lines of space. # Scaled with subtitel X-Axis is font "9" * 0.75 font and takes up 0.8 lines of space. # # Combinations are (on top of title labels (1 or 2): # # Simple --------------- 1 line (font = 9) = axis label(0.75) + space(0.25) = 1 # Staggered, Simple ---- 1.5 lines (font = 9) = axis small label(2*0.625) + space(0.125) + space(0.125) = 1.5 # Scaled with subtitle - 1.5 lines (font = 9) = axis small label(2*0.625) + space(0.125) + space(0.125) = 1.5 # Stag. Scaled --------- 2.0 lines (font = 9) = axis small label(3*0.625) + space(2*0.125) + space(0.125)= 2.25 # one or two labels -- 2.0 lines # # So header can range from 1 line (no X-Axis) to 2 lines X-Axis with 1 label or 3 lines X-Axis and 2 labels # to a complex X-Axis > 1 to 2.05 lines plue the 1 or 2 lines of title. # # Need space for 1 to 4.05 lines with gaps. # # The same applies to the bottom labels. Lab3 is a title, and refText is the other title. # # Other discussion: Indenting edge labels. # 1) get length of labels # 2) determine how much room is available from edge to next inner label (length of that label and position.) # 3) How much to move to position inside box (or at least no further then 0.05" over the edge?) # 4) Is staggering label requested or required. # If labels fit, staggered may need to be turned off. # 5) If size of labels (all) do not fit, will staggering help? # 6) How to keep key values like "0" always labeled? What does the Axis algorithm use to omit labels. # # DrawXAxisAndTitles <- function(j, panels, rx, ry, reftxt, refval, leftPad=TRUE, rightPad=TRUE, YAxisPad=FALSE ) { ##### Start of Scaling and alternate labeling algorithms # # parameters needed: rx, ry, j, panels, reftxt, refval, XAxis=TRUE # # globals: Title.Line.X.pos set of variables. # axisMethod # Text.cex # staggerLab # staggered # lab1 # lab2 # lab3 # refTexts # refVals # # functions: Scaler1, Scaler2, extended, panelSelect, panelScale, warning # # must initially select panel to start getting widths and heights for scaling. #cat("\n\nEntered DrawXAxisAndTitle function","\n") #cat("DX01-panels and j:\n") #print(panels) #cat("i:",1," j:",j," rx:",rx," ry:",ry,"\n") panelSelect(panels, 1, j) # select panel x <- panelScale(rx, ry) # set scale for panel based on rx, ry xpin <- par("pin") xusr <- par("usr") xupi <- diff(xusr[1:2])/xpin[1] xps <- par("ps") staggering <- staggerLab # default is the user request. May be changed if needed. #cat("DXAT-start staggered:",staggered," staggerLab:",staggerLab," staggering:",staggering,"\n") #cat("Initial rx :",rx,"\n") # range adjustment xcxy <- par("cxy") # must be in the scale of the panel to get the right character width. #cat("xcxy:", xcxy," usr :",xusr," pin :",xpin," upi :",xupi," ps:",xps,"\n") xcxyMod <- xcxy[1]/4 # assume dot is least then the width of a average character. Use 1/4 for spacing. #cat("xcxyMod:",xcxyMod,"\n") if (leftPad) rx[1] <- rx[1] - xcxyMod if (rightPad) rx[2] <- rx[2] + xcxyMod #cat("Adjustment made for dot size - new rx:",rx,"\n") # reset scaling based on new rx range. x <- panelScale(rx, ry) # re-scale panel # get new values for user and units per inch xusr <- par("usr") xupi <- diff(xusr[1:2])/xpin[1] #cat("After dot re-scaling - usr :",xusr," pin :",xpin," upi :",xupi,"\n") par(xpd=T) # turn off clipping for panel # based on the axis method - prepare the atRx tick points and the atLab vector of labels # # Setup axis variables and get the axis "at" points and labels. # ##### ##### # # Scan possible labels and save heights. Lab1, Lab2, Lab3, refTexts # Check axis scaling and staggered and setup Axis1, Axis2 and subTitles # adjust Labx heights and spacings if required.l # Spaces and Heights are the constants, not the positions. We set them here. # # This also makes it simplier to have generic code further on. # # This will be coded to automatically: # labels at 3 points lower (25%) reduction of given point size. # Axis Large at 1 point lower than labels. (about 11.11%) # ability to reduce Axis Large by 1 pt for sizing of labels. (another 11.11%) # Axis Small (stagger) at 2 point lower than large labels. (22.22% below labels.) # # axisSubTitle <- "" # starts empty. # everything is based on a starting pointsize of 12. atLabcex <- Text.cex # Text.cex # 0.75 of 12 pt, -> 0.75 % (9 pt.) #cat("atLabcex:",atLabcex,"\n") # # Build elements to construct header and footer title and axis label positions. # xps <- par("ps") # current point size. ippt <- 1/72 # inches per point lppt <- 1/xps # line per point at par("ps") value (default = 12 pt. for 12 pt per line) ipl <- xps * ippt # points * inches per point at par("ps") -> inches per line. #cat("Step size 1 pt:",ippt," in. ",ippt*xupi," usr - lppt:",lppt," pt/line. \n") # 12pt * 0.75 -> 9pt, 18pt * 0.75 -> 13.5pt, 24pt * 0.75 -> 18 point lineNLabSize <- Text.cex # par("ps") - 3 points # 0.75 -> 0.75 % 1 line (0.75% of point size) (9 pt) lineNSpLabSize <- lineNLabSize * XAxis.Sp.mcex # PS * 15% # 0.75 * 0.2 -> 0.15 -> 20% of title line (1.8 pt) axisNLabSize <- lineNLabSize - (lppt) # - 1 pt delta / alternate -> XAxis.S.mcex = 0.666667 # 0.75 (9pt) - 1 pt -> 0.6667 % 89% line (8 pt) axisMLabSize <- lineNLabSize - (2 * lppt ) # - 2 pt delta # 0.75 (9pt) - 2 pt -> 0.5833 % 78% line (7 pt) axisSLabSize <- lineNLabSize - (3 * lppt ) # - 3 pt delta # 0.75 (9pt) - 3 pt -> 0.5 % 66.7% line (6 pt) axisLowestSize <- lineNLabSize - (4 * lppt ) # - 4 pt delta (lowest limit.) (5 pt) # 0.75 (9pt) - 4 pt -> 0.4167 % 55.5% line (5 pt) axisSubTSize <- axisSLabSize lineSSpLabSize <- lineNSpLabSize * 0.5 # 0.15 * 0.5 -> 0.075 % 10% line # calculations are dynamic - using ratios and percentages. # # Two labels and Axis = 0.66667 + 0.15 + 0.75 + 0.75 => 2.316667 + line height. # Axis Stag, Title, two labels = 0.5 + 0.5 + 0.075 + 0.75 + 0.75 -> 2.575 + line height. # must have at least 3.25 lines available. # #cat("lineNLabSize :",lineNLabSize,"\n") #cat("axisNLabSize :",axisNLabSize,"\n") #cat("axisMLabSize :",axisMLabSize,"\n") #cat("axisSLabSize :",axisSLabSize,"\n") #cat("axisSubTSize :",axisSubTSize,"\n") #cat("\n") #cat("lineNSpLabSize:",lineNSpLabSize,"\n") #cat("lineSSpLabSize:",lineSSpLabSize,"\n") #cat("\n") #cat("lineNLabSize-ps:",lineNLabSize*xps,"\n") #cat("axisNLabSize-ps:",axisNLabSize*xps,"\n") #cat("axisMLabSize-ps:",axisMLabSize*xps,"\n") #cat("axisSLabSize-ps:",axisSLabSize*xps,"\n") #cat("axisSubTSize-ps:",axisSubTSize*xps,"\n") #cat("\n") xusr <- par("usr") xupi <- diff(xusr[1:2])/xpin[1] #cat("A-usr:",xusr," xupi:",xupi,"\n") # # Work pattern, list for which to draw and where # lineTopSizes <- c(0, 0) # Lab2 Lab1 lineBotSizes <- c(0, 0) # Lab3 refText lineAxisSizes <- c(0, 0, 0, 0, 0) # Ax2 Ax1 SP AST`SP names(lineAxisSizes) <- c("Ax2","Ax1","SPT","AST","SP") # Axis spacing names(lineTopSizes) <- c("L2","L1") names(lineBotSizes) <- c("L3","L4") lineDo <- c( F, F, F, F, F, F, F, F, F) names(lineDo) <- c("Ax2","Ax1","SPT","AST","SP","L2","L1","L3","L4") xAxisDo <- FALSE xAxisDoOverlap <- TRUE lineMultiT <- c(1, 0.9, 0.9, 0.9, 1, 1, 1, 1) # size multiplier for proper spacing. names(lineMultiT) <- c("srt","Ax2","Ax1","SPT","AST","SP","L2", "L1") lineMultiB <- c(1, 0.9, 0.9, 0.9, 1, 1, 1, 1) # size multiplier for proper spacing. names(lineMultiB) <- c("srt","Ax2","Ax1","SPT","AST","SP","L3", "L4") # as of 8/17/16, we always print double axis labels to get them all printed. # atLab1 and atLab2 with atRx1 and atRx2 are created as the two halfs of the labels. # Set indicators if title/labels are present. if (lab1[j] != "") { lineDo["L1"] <- TRUE lineTopSizes["L1"] <- lineNLabSize } if (lab2[j] != "") { lineDo["L2"] <- TRUE lineTopSizes["L2"] <- lineNLabSize } if (lab3[j] != "") { lineDo["L3"] <- TRUE lineBotSizes["L3"] <- lineNLabSize } if (!is.na(reftxt)) { if (reftxt != "" || reftxt != " ") { lineDo["L4"] <- TRUE lineBotSizes["L4"] <- lineNLabSize } } # test to see if we have an axis to label. rx is not null. if (!is.null(rx)) { # X axis range present # initialize - we will have at least 1 X Axis line. - minimum setup. xAxisDo <- TRUE lineDo["Ax1"] <- TRUE # X Axis labels # 1 lineAxisSizes["Ax1"] <- axisNLabSize lineDo["Ax2"] <- TRUE # X Axis labels # 2 lineAxisSizes["Ax2"] <- 0 # zero to allow the overlap. lineDo["SP"] <- TRUE # Add spacing between title and X Axis. lineAxisSizes["SP"] <- lineNSpLabSize } # Use lineAxisSizes["Ax2"] to allow overlaying of Ax1 and Ax2 and use lineAxisSizes["Ax1"] # as the cex/font size for both Ax1 and Ax2 lines. # # if scales to TextCex = 0.7 then all times cex. = 4 * 0.7 => 2.8 lines of margin. # therefore, must have space for 3 mcex=1 height lines. # ######### # # Processing XAxis and rx. # Generate axis labels, scale and subtitle as required. # Results may be - single XAxis labels or XAxis labels with subtitle #cat("axisMethod:",axisMethod, " rx:",rx,"\n") if (axisMethod < 1 || axisMethod > 5) { #cat("***01X1 CARG-AX The Value for axisMethod internal variable is out of range 1-5 : ",axisMethod,"\n") axisMethod <- 4 } ### # # methods: # 1 = "o" use pretty to generate labels (original method), no scaling of labels. # 2 = "s" scale full range of numbers, # 3 = "sn" scale each number in label list. # 4 = "e" use extended labeling method. # 5 = "w" use wilkinson method # 6 = "a" automatics - evalute number, range, possible results of labeling calls, # edge number requiredments, range containing zero - and pick best set of tools. # (Future not coded - using "4" code. # # Future - add automatic - look at spacing and do scaling if required # auto scale to be done. # look at edges and do edge labels if required # # make sure zero is seen # wilkinson an extended handle # do staggered if edges overlap. # implemented # check for overlap with map or id column. # ID done # # Rules: # a) 3.4 labels per inch # b) number of labels must be at least 3. # c) request odd number of labels 3, 5, 7, 9 (expect no more than 9 labels on 2.5") # d) if number of labels > 3, trim labels not within rx data range, except zero value. # e) if panel width < 0.5, trim first and/or last labels if not within data range and zero # f) Never trim Zero. # ErrFnd <- FALSE # note errors #DoNarCol <- FALSE # indicate we are in the "narrow" column situtation. #cat("start of label fitting\n") #cat("lastLab2Space:",lastLab2Space,"\n") #cat("par('pin') :",par('pin'),"\n") #cat("par('usr') :",par('usr'),"\n") #cat("xupi :",xupi,"\n") ### # # estimated number of labels for glyph and make it an odd number. # reqNumLabels <- ((( xpin[1] * XAxis.nGridpIn ) %/% 2) * 2) + 1 # average 3.4 ticks/grid per inch made an odd number # average of 3.4 per inch * width in inches of panel. # force a minimum of 3 labels. if (reqNumLabels < 3) reqNumLabels <- 3 #cat("Start-reqNumLabels:", reqNumLabels," width in:",xpin[1]," XAxis.nGridlIn:",XAxis.nGridpIn,"\n") #cat("rx :",rx," axisMethod:",axisMethod,"\n") #cat("setup - colSepGap:",colSepGap," staggered:",staggered,"\n") # get sequence of possible number of labels listNumLabels <- seq(reqNumLabels, 3,by=-2) if (axisMethod==1) listNumLabels <- c(reqNumLabels) # method 1 does not use # of labels seed. # # main loop to find a set of X Axis labels that will file the space for the range. # # The major steps are repeated until a fit is found. # for (numLabels in listNumLabels) { #cat("Loop Start:",numLabels,"\n") #cat("lineAxisSizes:\n") #print(lineAxisSizes) #cat("lineDo :\n") #print(lineDo) ##### start of big loop to get solution between font size and number of labels. # Step 1 - generate list of labels for the requested number of labels. # do requested label generation and scaling. # Label Generation: o = panelInbounds, e = extended. # Scaling Methods : None, Scale range, Scale individual number. switch (axisMethod, { # method 1 - pretty - the "original" "o" #cat("Method 1-atRx:",atRx,"\n") # get reference points. atRx <- panelInbounds(rx) # list of ticks within range of x. (n=5 with triming) # pretty does n=5 by default. res <- CleanXLabels2(rx, atRx) atRx <- res$atRx rx <- res$rx # convert to character. atLab <- as.character(atRx) }, { # method 2 - scale range with subtitle "s" # scaling range - may have subtitle to include #cat("Method 2-atRx:",atRx,"\n") # get reference points atRx <- extended(rx[1],rx[2],numLabels) res <- CleanXLabels2(rx, atRx) atRx <- res$atRx rx <- res$rx # get Scaler1 results on max. atLabVc <- Scaler1(rx[2]) # get divisor and subtitle based on max value #cat("atLabVc:",atLabVc,"\n") axisSubTitle <- atLabVc[2] # get sub-title (if any)[2] [1] multiplier # scale the values into the character string. atLab <- formatC(atRx / as.numeric(atLabVc[1]), format="f", digits=2, drop0trailing=TRUE) if (axisSubTitle != "") { # add sub-title to header #cat("Add - axisSubTitle:",axisSubTitle,"\n") # Make adjustments # Scale each number (S) # Add subtitle and spacer at small axis size (Norm to Med - 1 pt) lineAxisSizes["AST"] <- axisMLabSize lineDo["AST"] <- TRUE lineAxisSizes["SPT"] <- lineSSpLabSize # use 1/2 of axis to titles spacing. lineDo["SPT"] <- TRUE # reduce size of axis labels lineAxisSizes["Ax1"] <- axisMLabSize lineDo["Ax1"] <- TRUE lineAxisSizes["Ax2"] <- 0 # no staggering yet - Ax1 and Ax2 on same line. lineDo["Ax2"] <- TRUE # include spacing between title and axis. lineAxisSizes["SP"] <- lineNSpLabSize # normal spacing because we have not staggered, yet. lineDo["SP"] <- TRUE lineMultiB["SP"] <- 2.25 # need a fudge on the Bottom. } }, { # method 3 - scale numbers with suffix "sn" # no subtitle will be added. #cat("Method 3-atRx:",atRx,"\n") atRx <- extended(rx[1],rx[2], numLabels) res <- CleanXLabels2(rx, atRx) atRx <- res$atRx rx <- res$rx atLab <- sapply(atRx, Scaler2) # scale the X axis labels. Scaler2 does label formating for each value. }, { # method 4 - extended algorithm (no scaling) "e" # no scaling - no subtitles # replaced wilkinson algorithm with extended - better behaved in number of labels generated vs. request. #cat("Method4 - extended rx:",rx," numLabels:",numLabels,"\n") atRx <- extended(rx[1],rx[2],numLabels) res <- CleanXLabels2(rx, atRx) atRx <- res$atRx rx <- res$rx atLab <- as.character(atRx) }, { # method 5 - wilkinson algorithm (no scaling) "w" # no scaling - no subtitles # replaced wilkinson algorithm with extended - better behaved in number of labels generated vs. request. #cat("Method5 - wilkinson rx:",rx," numLabels:",numLabels,"\n") atRx <- wilkinson(rx[1],rx[2],numLabels) res <- CleanXLabels2(rx, atRx) atRx <- res$atRx rx <- res$rx atLab <- as.character(atRx) }, { # method 6 - placeholder for automatic scaling, edge numbers, and staggering of labels. "e" # for now same as 4 # Future Coding - place holder. # # Do each scaling and see which creates the smallest set of labels. # Which way to do: 1) number of characters, 2) strwidth each summed, # 3) concat. labels with 1, 2 spaces? # #cat("Method6 - extended rx:",rx," numLabels:",numLabels,"\n") atRx0 <- panelInbounds(rx) # list of ticks within range of x. (n=5 with triming) res <- CleanXLabels2(rx, atRx0) atRx0 <- res$atRx rx0 <- res$rx atLab0 <- as.character(atRx0) atRx1 <- extended(rx[1],rx[2],numLabels) res <- CleanXLabels2(rx, atRx1) atRx1 <- res$atRx rx1 <- res$rx atLab1 <- as.character(atRx1) atLabVc <- Scaler1(rx1[2]) # get divisor and subtitle based on max value axisSubTitle <- atLabVc[2] # get sub-title (if any) #cat("atLabVc:",atLabVc,"\n") atLab2 <- formatC(atRx1 / as.numeric(atLabVc[1]), format="f", digits=2, drop0trailing=TRUE) atLab3 <- sapply(atRx1, Scaler2) stop }, { # default call #cat("axisMethod value unknown:",axisMethod,"\n") ErrFnd <- TRUE stopCnt() xmsg <- paste0("***0490 DMP Error in axisMethod set to ",axisMethod," in DrawXAxisAndTitles. ERROR. Default used.") stop(xmsg,call.=FALSE) atRx <- c(0,1) } ) # #cat("Method executed\n") #cat("atRx :",atRx ,"\n") #cat("atLab:",atLab,"\n") #cat("rx :",rx,"\n") #print(lineAxisSizes) #print(lineDo) #### Labels selected and Scaling done. # # Step 2 - Split the labels into two overlaping vectors. # and initialize for finding fit. # lAtRx <- length(atRx) rAtRx <- range(atRx) # get first and last label values lAtLab <- length(atLab) #cat("lAtRx:",lAtRx," rAtRx:",rAtRx," lAtLab:",lAtLab," rx:",rx,"\n") #cat(" par(usr):",par('usr')," par(pin):",par('pin')," xupi:",xupi,"\n") #cat("staggered :",staggered," staggerLab:",staggerLab," staggering:",staggering,"\n") FndFit <- FALSE MakeStag <- FALSE # # at this point we have: # title1 (opt) # title2 (opt) (but title 1 or title 2 must be present) # subtitle (optional) # axis # 1 & 2 (both used to overlay axis label plotting. # # Adjust the first and last atRx values to move number inward a little. # atLab1 <- atLab[seq(1,length(atLab),by=2)] atLab2 <- atLab[seq(2,length(atLab),by=2)] atRx1 <- atRx[seq(1,length(atRx),by=2)] atRx2 <- atRx[seq(2,length(atRx),by=2)] #cat("Split label list\n") #cat("atLab1:", atLab1 ,"\n") #cat("atRx1 :", atRx1 ,"\n") #cat("atLab2:", atLab2 ,"\n") #cat("atRx2 :", atRx2 ,"\n") # # test to see how axis may draw the labels. # if they will not fit our calculations, then must likely # will be dropped by R's axis function. We are trying to out # guess R. # # Test fitting of single line axis (if not staggerLab) at Normal, -1pt, and -2pt # font sizes. Then test stagger labels at Normal and -1 pt font size. # If these don't work = punt and let the main loop try few labels. # # # Step 3 - Test single line style, if staggerLab not requested by caller. # if (!staggering) { # labels will not be stagger - by us or caller - at least not yet - so check single line format. #cat("NOT STAGGERING - Single Line Style Test\n") # check the fit of the axis labels, adjust as needed up to a point. wX <- lineAxisSizes["Ax1"] # original font size res <- TestOverlap(wX, atLab, atRx, 1) # space between must be 1 space. #cat("test1 - ces=wX:",wX," res:",res,"\n") # check X Axis fit as full non-staggered labels. if (!res) { #cat("full axis no staggered at font - OK - wX:",wX,"\n") # leave parameters as set. FndFit <- TRUE } else { # did not fit single line normal point size. wX <- wX - lppt # back up 1 point # orig font - 1 pt res <- TestOverlap(wX,atLab, atRx, 1) #cat("test2 - ces=wX:",wX," res:",res,"\n") if (!res) { # Good solution - update axis parameters lineAxisSizes["Ax1"] <- wX FndFit <- TRUE } else { # did not fit single line normal-1pt size. wX <- wX - lppt # back up 2 points # orig font - 2 pt res <- TestOverlap(wX,atLab,atRx, 1) #cat("test3 - ces=wX:",wX," res:",res,"\n") if (!res) { # Good Solution - 2 point. - update parameters lineAxisSizes["Ax1"] <- wX FndFit <- TRUE } else { # will not fit as single line axis labels. FndFit <- FALSE } } } # Note: if single line fits, it's still drawn as two overlapping label sets. } # end of single line checks. # # Step 4 - if not fit as single or staggerLab requested, test a staggered label style # if (!FndFit) { # no fit found for single line (or it was bypassed), do staggerLab style. #cat("Testing staggering style\n") # find longest staggered label list. wX <- lineAxisSizes["Ax1"] # remember this is already small. lAtLab1 <- nchar(paste0(atLab1,collapse=" ")) # space added between each label lAtLab2 <- nchar(paste0(atLab2,collapse=" ")) # space added between each label # Find the longest label set to use for test based on characters. if (lAtLab1 > lAtLab2) { wAtLab <- atLab1 wAtRx <- atRx1 } else { wAtLab <- atLab2 wAtRx <- atRx2 } # wAtLab is the longest set of labels based on character count. lwAtLab <- length(wAtLab) #cat("Longest of labels - wAtLab:",wAtLab," lwAtLab:",lwAtLab," axisLowestSize:",axisLowestSize,"\n") FndFit <- FALSE res <- TestOverlap(wX, wAtLab, wAtRx, 2) #cat("testS1 - cex=wX:",wX," 2 space res:",res,"\n") if (!res) { # Good should fit using standard height and staggered #cat("Initial values are good - keep them:",wX," Fit found\n") MakeStag <- TRUE FndFit <- TRUE } else { # no fit - try one small font wX <- wX - lppt # reduce size 1 point. if (wX > axisLowestSize) { # if bigger then smallest permitted. continue. # test labels and cex res <- TestOverlap(wX, wAtLab, wAtRx, 2) #cat("test s2 - cex=wX:",wX," 2 space res:",res,"\n") if (!res) { # good fit at small font. lineAxisSizes["Ax1"] <- wX #cat("fit found at ",wX,"\n") MakeStag <- TRUE FndFit <- TRUE } else { wX <- wX - lppt # reduce size 1 point. if (wX > axisLowestSize) { # if bigger then smallest permitted. continue. # test labels and cex - 2 pts. res <- TestOverlap(wX, wAtLab, wAtRx, 2) #cat("test s3 - cex=wX:",wX," 2 space res:",res,"\n") if (!res) { # goo fit at smaller font lineAxisSizes["Ax1"] <- wX #cat("fit found at ",wX,"\n") MakeStag <- TRUE FndFit <- TRUE } } } } } } if (FndFit) break # if have solution - stop looping. # if not fit, try reducing number of labels. #cat("End of Single and Staggered - FndFit:",FndFit," numLabels:",numLabels," len(atRx):",length(atRx),"\n") } # end of for loop on number of labels. # # Checking is done. Have fit or not. # #cat("exit numLabels loop\n") ##### end of loop - have solution??? if (!FndFit) { # no solution found???? cat("no XAxis labels fit found!!!\n") MakeStag <- TRUE } #cat("end of numLabels loop - FndFit:",FndFit,"\n") #cat("atLab1:",atLab1,"\n") #cat("atRx1 :",atRx1, "\n") #cat("atLab2:",atLab2,"\n") #cat("atRx2 :",atRx2, "\n") #cat("lineDo:\n") #print(lineDo) #cat("lineAxisSizes:\n") #print(lineAxisSizes) #cat("lineTopSizes:\n") #print(lineTopSizes) #cat("lineBotSizes:\n") #print(lineBotSizes) #cat("staggering:", staggering," staggered:",staggered," MakeStag:",MakeStag,"\n") #cat("start of edge checking - lastLab2Space:",lastLab2Space,"\n") ##### # # issues with labels - if label/grid near the edge - label hangs over the edge to next column. # solutions: a) move edge labels inward. labels like 0 may not need to be moved. # b) enforce staggered, so next columns number is on a different level. # c) delete edge label (if > 3 labels) # ##### # # Step 5 - check edge labels to see if indenting them will help. # # # Have to sets of labels atLab1 and atLab2... # # Situations: # rx[1] = edge (always), no atRx is outside this value. # atRx[1] - rx[1], is units from edge to grid for label # colSepGap can also be used as working space. # #cat("par('usr'):",par("usr"),"\n") #cat("par('pin'):",par("pin"),"\n") #cat("atRx :",atRx,"\n") #cat("atRx1 :",atRx1,"\n") res <- TestLabAtEdge(atLab1,atRx1,YAxisPad,rx,lineAxisSizes) # get results. w1stLabOverI <- res$w1stLabOverI wLastLabOverI <- res$wLastLabOverI atRx1 <- res$atRx #cat("1-res$atRx:",res$atRx," $1st:",res$w1stLabOverI," $Last:",res$wLastLabOverI,"\n") #cat("atRx2 :",atRx2,"\n") res <- TestLabAtEdge(atLab2,atRx2,YAxisPad,rx,lineAxisSizes) # get results. #cat("2-res$atRx:",res$atRx," $1st:",res$w1stLabOverI," $Last:",res$wLastLabOverI,"\n") atRx2 <- res$atRx if (res$w1stLabOverI < w1stLabOverI) { w1stLabOverI <- res$w1stLabOverI } if (res$wLastLabOverI < wLastLabOverI) { wLastLabOverI <- res$wLastLabOverI } #cat("results -> w1st:",w1stLabOverI," in. wLast:",wLastLabOverI," in.\n") #cat("lastLab2Space :",lastLab2Space," in.\n") # check the column overlap: xW <- strwidth("0",cex=lineAxisSizes["Ax1"],units="inch") # get size of a digit in inches. xW <- xW * XAxis.gapPC # 75% of the width. #cat("sum column overlap:",(w1stLabOverI+lastLab2Space)," in. Size Digit:",xW," in.\n") if ((w1stLabOverI + lastLab2Space) <= xW ) { # overlap condition. Force staggered. #cat("Lab2 text overlapping between columns - MakeStag set to TRUE\n") MakeStag <- TRUE # set staggering active flag. (column request.) } # lastLab2Space is the number of inches the left column has intruded into our column. #cat("lastLab2Space:",lastLab2Space," last column: + need space, - has space. lab 2 row.\n") # lastLab2Space < 0, last column needs space from us. # If sum(lastLab2Space,w1stLabOverI) => 0 there is room. # < 0 not enough room - overlap issue. # # lastLab2Space =>0, last column has space for us. # if sum(lastLab2Space,w1stLabOverI) => 0 there is room. # < 0 not enough room - overlap issue. # lastLab2Space <<- wLastLabOverI #cat("Setting lastLab2Space:",lastLab2Space,"\n") #cat("lastLab3Space:",lastLab3Space,"\n") #cat("staggering:", staggering," staggered:",staggered," MakeStag:",MakeStag,"\n") # # Step 6 - if staggered was requested or found to be the solution, set up all parameters. # if (MakeStag) { # take the two label sets and make a staggered XAxis #cat("MakeStag = TRUE - Modifying vector to do staggered.\n") # Adjust the sizes of font and spaces between lines for staggered style. # put in right order for neighboring column # check status of last column - staggered = TRUE, ended HIGH, = FALSE, ended LOW. #cat("Last Column position - staggered:",staggered,"\n") if (!staggered) { # staggered = FALSE (no stagger or ended low.) start high. # last column had no stagger, no stagger done, or ends in low position. # move to start in high position. # get updated information. # # No change. # #s1 <- seq(1,lAtLab,by=2) #s2 <- seq(2,lAtLab,by=2) # start high (ax1) #cat("HIGH position, keep labels in same order - 1st value LOW - atLab1.\n") } else { # start low #s1 <- seq(2,lAtLab,by=2) #s2 <- seq(1,lAtLab,by=2) # switch them wAtLab <- atLab1 atLab1 <- atLab2 atLab2 <- wAtLab wAtRx <- atRx1 atRx1 <- atRx2 atRx2 <- wAtRx #cat("LOW position, swap labels - 1st value HIGH - atLab2.\n") } #cat("lineAxisSizes:\n") #print(lineAxisSizes) #if (lineAxisSizes["Ax1"] == axisNLabSize ) { # # # change is not modified previously. # lineAxisSizes["Ax1"] <- axisSLabSize # set new height for axis # 1 #} lineDo["Ax1"] <- TRUE # enable # Change line size same as Ax1 - it may have been reduced. lineAxisSizes["Ax2"] <- lineAxisSizes["Ax1"] lineDo["Ax2"] <- TRUE # enable # If subtitle, change it's size and spacing. if (lineDo["AST"]) { # if subtitle present from before. # Scale Subtitle is present with staggered. lineAxisSizes["AST"] <- lineAxisSizes["Ax1"] # reduce title to axis line spacing # set new subtitle height lineAxisSizes["SPT"] <- lineSSpLabSize # set new subtitle space height } if (length(atRx1) != length(atRx2)) { # if not the same length the pattern is AVAVA or VAVAV pattern. # in the AVAVA case, staggered must have been FALSE to start high. # in the VAVAV case, staggered must have been TRUE to start low. # in either case, reverse staggered staggered <<- xor(staggered, TRUE) } else { # same length pair - AVAV or VAVA pattern. Leave staggered set the same. } # reduce spacing between titles and axis labels/subtitles. lineAxisSizes["SP"] <- lineSSpLabSize # reduce space to labels/titles # set new title to axis space height. lineMultiB["SP"] <- 2.25 # fudge on bottom. #cat("Make Staggered - settings - lineAxisSizes:\n") #print(lineAxisSizes) #cat("lineDo:\n") #print(lineDo) } # ##### # # # # Process StaggerLab option. # # # split up the labels for overlapping. # # #cat("Done with methods - on to plotting.\n") # ## now this is already done. just need to change font sizes. # ## if label staggering requested - add space for line NOW! ## don't know by whom!!! ## ## staggerLab set by package call parameter (user) ## staggering set by internal code to force staggering for this column. ## # ##cat("staggerLab:",staggerLab," staggering:",staggering," staggered:",staggered,"\n") # #if (staggerLab) { staggering <- TRUE } # ## atLab1, atLab2, atRx1, atRx2 already setup for the overlaid axis, with sequence normal 1... and 2.... # #if (staggering) { # # Check to make sure we have 2 or more labels. # # if (length(atLab) > 1) { # # # Can only stagger labels if more than one. Code should not let this happen now. # # #cat("Process staggering request from user or axis label processing - staggerLab:", staggerLab," staggering:",staggering,"\n") # # we have enough labels ( > 1 ) to stagger. # # # We have already split the labels up into atLab1 and atLab2 for the overlap printing. # # The only think to do to stagger the drawing is to change the # # spacing of the lines (especially "Ax2"). # # # # we are doing staggered axis labels (2 lines) # # # Staggered Labels - setup # # #cat("adjusting label sizes for staggering with two axis rows. Axis/SubT Size set to small.\n") # # # Change line size. # if (lineAxisSizes["Ax1"] == axisNLabSize ) { # lineAxisSizes["Ax1"] <- axisSLabSize # set new height for axis # 1 # } # lineDo["Ax1"] <- TRUE # enable # # lineAxisSizes["Ax2"] <- lineAxisSizes["Ax1"] # add axis # 2 (will cause two rows.) # lineDo["Ax2"] <- TRUE # enable # # # If subtitle, change it's size and spacing. # if (lineDo["AST"]) { # if subtitle present from before. # # Scale Subtitle is present with staggered. # lineAxisSizes["AST"] <- lineAxisSizes["Ax1"] # reduce title to axis line spacing # set new subtitle height # lineAxisSizes["SPT"] <- lineSSpLabSize # set new subtitle space height # } # # # reduce spacing between titles and axis labels/subtitles. # # lineAxisSizes["SP"] <- lineSSpLabSize # reduce space to labels/titles # set new title to axis space height. # lineMultiB["SP"] <- 2.25 # fudge on bottom. # # #cat("Stagger settings - lineAxisSizes:\n") # #print(lineAxisSizes) # # # # # Done setting up labels and dual axis with sizes and spaces # # # # # Now handle the order (high and low.) # # # # Check what happened in the last column to see where to start the staggering. # # # # staggered = false (def) - last column was low (also no staggered may have happened.) # # if staggering = false, ignore leave along, not doing stagger (not in this code) # # = true, process (go high and process last label. # # # # true - last column was staggered and ended HIGH. # # if staggering = false, ignore set staggered <- FALSE (not in this code) # # = true, process (go low and process last label. # # # # # # # we are set to start low. # # if (!staggered) { # staggered = FALSE (no stagger or ended low.) start high. # # last column had no stagger, no stagger done, or ends in low position. # # move to start in high position. # # get updated information. # s1 <- seq(1,length(atLab),by=2) # s2 <- seq(2,length(atLab),by=2) # # start high (ax1) # } else { # # start low # s1 <- seq(2,length(atLab),by=2) # s2 <- seq(1,length(atLab),by=2) # } # atLab1 <- atLab[s1] # atLab2 <- atLab[s2] # atRx1 <- atRx[s1] # atRx2 <- atRx[s2] # lAtRx1 <- length(atRx1) # lAtRx2 <- length(atRx2) # # if (lAtRx1 != lAtRx2) { # # if not the same length the pattern is AVAVA or VAVAV pattern. # # in the AVAVA case, staggered must have been FALSE to start high. # # in the VAVAV case, staggered must have been TRUE to start low. # # in either case, reverse staggered # staggered <<- xor(staggered, TRUE) # # } else { # # same length pair - AVAV or VAVA pattern. Leave staggered set the same. # } # } #} # #cat("Staggered - atRx1:",atRx1," atRx2:",atRx2," len(1):",length(atRx1)," len(2):",length(atRx2),"\n") #cat("staggering:",staggering," staggered:",staggered," MakeStag:",MakeStag,"\n") # ##### #cat("atLab1:",atLab1,"\n") #cat("atLab2:",atLab2,"\n") #cat("atRx1 :",atRx1 ,"\n") #cat("atRx2 :",atRx2,"\n") #cat("\n") #cat("lineAxisSizes:\n") #print(lineAxisSizes) #cat("lineTopSizes:\n") #print(lineTopSizes) #cat("lineBotSizes:\n") #print(lineBotSizes) # # Step 7 - finish setting up the top and bottom labels. # # Top margin titles/axis lineSizesT <- c(0,lineAxisSizes,lineTopSizes) # combine axis and top titles spacings names(lineSizesT) <- c("N","Ax2","Ax1","SPT","AST","SP","L2","L1") lineSizesTM <- lineSizesT * lineMultiT #cat("lineSizesT&TM:\n") #print(lineSizesT) #print(lineSizesTM) # calculate the positions of each and add offset. linePosT <- cumsum(lineSizesTM) + 0.01 # get line position of each element names(linePosT) <- c("Ax2","Ax1", "SPT", "AST", "SP", "L2", "L1") #cat("linePosT:\n") #print(linePosT) # if overlaped but not staggered, linePosT "Ax1" and "Ax2" should be the same. # ##### ##### # # Bottom margin titles/axis # lineSizesB <- c(0,lineAxisSizes,lineBotSizes) # combine axis and bottom title spacings. names(lineSizesB) <- c("N","Ax2","Ax1","SPT","AST","SP","L3","L4") lineSizesBM <- lineSizesB * lineMultiB #cat("lineSizesB&BM:\n") #print(lineSizesB) #print(lineSizesBM) # calculate the positions of each and add offset. linePosB <- cumsum(lineSizesBM) + 0.01 # get line position of each elements names(linePosB) <- c("Ax2","Ax1", "SPT", "AST", "SP", "L3", "L4") #cat("linePosB:\n") #print(linePosB) titleLab3 <- linePosB["L3"] # make any adjustments in the trailer code. titleLab4 <- linePosB["L4"] #cat("lineDo:\n") #print(lineDo) # ##### ###### ## ## Calculate the lastLab2Space to hand off to next column. ## ## if not staggered Labs - then calculate the space left # #LabLastOverU <- rightEdge - EndOfLabs[lAtRx] #LabLastOverI <- LabLastOverU / xupi #lastLab2Space <<- LabLastOverI ##cat("Setting lastLab2Space & LabLastOverI",lastLab2Space,"\n") # ## if pos value - we have room. neg - we need room. # # End of Xaxis processing. # ######## ######## # # Column Headers - printing # # Note: mgp(a,b,c) - a => position for axis labels, b,c => position for axis values and line, # in mcex values. def = c(3,1,0) # # # Select panel and re-scale - 1st panel (top) to do title/labels and axis labels #cat("DX02-column headers printing - rx:",rx," ry:",ry," i:",1," j:",j,"\n") panelSelect(panels,1,j) x <- panelScale(rx,ry) par(xpd=T) # print in margin space above panel 1 of column. # # column titles # if (lineDo["L1"]) mtext(lab1[j],side=3, line=linePosT["L1"], cex=lineTopSizes["L1"]) if (lineDo["L2"]) mtext(lab2[j], side=3, line=linePosT["L2"], cex=lineTopSizes["L2"]) # # axis sub-title # if (lineDo["AST"]) { mtext(axisSubTitle, side=3, line=linePosT["AST"], cex=lineAxisSizes["AST"]) # line 2 or 3 } # # column top axis(es) # if (lineDo["Ax1"]) { # line 1 or 2 (above axis # 2) #cat("Top-axis calls - atLab1:",atLab1," atRx1:",atRx1,"\n") #cat(" mgp:linePosT['Ax1']:",linePosT["Ax1"],"\n", # " lineAxisSizes['Ax1']:",lineAxisSizes["Ax1"],"\n") axis(side=3, tick=F, at=atRx1, labels=atLab1, mgp=c(3.2,linePosT["Ax1"],0), cex.axis=lineSizesT["Ax1"] ) } if (lineDo["Ax2"]) { # line 1 #cat("Top-axis calls - atLab2:",atLab2," atRx2:",atRx2,"\n") #cat(" mgp:linePosT['Ax2']:",linePosT["Ax2"],"\n", # " lineAxisSizes['Ax1']:",lineAxisSizes["Ax1"],"\n") axis(side=3, tick=F, at=atRx2, labels=atLab2, mgp=c(3.2,linePosT["Ax2"],0), cex.axis=lineAxisSizes["Ax1"]) # this is not an error, Ax2 is always printed the same size as Ax1 } # # ######## end of column header ##### # # Column Trailers # # Select and Scale to bottom panel in column #cat("DX03-trailer column headers - numGrps-i:",numGrps," j:",j," numGrps:",numGrps," rx:",rx," ry:",ry,"\n") panelSelect(panels,numGrps,j) x <- panelScale(rx,ry) par(xpd=T) # print in margin space below bottom panel # padj in axis needed to make grid line label close ##### # # Adjustment values to make bottom labels have the same space as the top labels. # old method. # #botLAdj <- -0.05 # label height adjustment (title/reftext) #botAdj <- -lineSizesB["Ax1"] # ->> (-0.666667 or -0.5 ) (axis row height in lines) #botAxisAdj <- - 0.2 #-lineSizesB["Ax1"] * 0.3333 # - 0.05 # 1/2 * 0.8 of row height in lines #botAxisBase <- Title.Line.2x.pos - ( ( 1 - lineSizesB["Ax1"] ) * 0.6333333 ) #cat("bottom title/labels-botLAdj:",botLAdj,"\n") #cat(" botAdj:",botAdj,"\n") #cat(" botAxisAdj:",botAxisAdj,"\n") #cat(" botAxisBase:",botAxisBase,"\n") # # ##### ##### # # new bottom margin line adjustment algorithm # desiredCex <- lineAxisSizes["Ax1"] xPs <- par("ps") # get current system base point size being used. Everything is based on this value. xHPsLU <- strheight("00000",cex=1,units="user") xHDesPsLU <- strheight("00000",cex=desiredCex,units="user") xDifHU <- xHPsLU - xHDesPsLU # different between system line and our line xBotAdj <- xDifHU / xHPsLU # ratio of dif (not used) and full line. % percent of line. botAxisBase <- 0.15 - xBotAdj # in lines. botAxisBAdj <- botAxisBase # + 0.05 botLAdj <- 0.05 #cat("New Bottom - botAxisBase:",botAxisBase," botAxisBAdj:",botAxisBAdj," botLAdj:",botLAdj,"\n") # column bottom axis lines if (lineDo["Ax1"]) { #cat("Bot-axis #1 - linePosB['Ax1']:",linePosB["Ax1"],"\n", # " lineAxisSizes['Ax1']:",lineAxisSizes["Ax1"],"\n", # " botAxisBase:",botAxisBase,"\n", # " botAxisBAdj:",botAxisBAdj,"\n") #cat(" atRx1:",atRx1," atLab1:",atLab1,"\n") axis(side=1, tick=F, at=atRx1, labels=atLab1, line=botAxisBAdj, mgp=c(3.2, linePosB["Ax1"],0), cex.axis=lineAxisSizes["Ax1"]) } if (lineDo["Ax2"]) { #cat("Bot-axis #2 - linePosB['Ax2']:",linePosB["Ax2"],"\n", # " lineSizesB['Ax1']:",lineSizesB["Ax1"],"\n", # #" botAxisAdj:",botAxisAdj,"\n") # " botAxisBase:",botAxisBase,"\n") #cat(" atRx2:",atRx2," atLab2:",atLab2,"\n") axis(side=1, tick=F, at=atRx2, labels=atLab2, line=botAxisBAdj, mgp=c(3.2, linePosB["Ax2"],0), cex.axis=lineAxisSizes["Ax1"]) } # if axis sub-title if (lineDo["AST"]) { wAST <- linePosB["AST"] + botAxisBAdj #cat("BotAST - linePosB['AST']:",linePosB["AST"],"\n", # " lineAxisSizes['AST']:",lineAxisSizes["AST"],"\n", # #" botAxisAdj:",botAxisAdj,"\n") # " botAxisBase:",botAxisBase,"\n") #cat(" line=wAST:",wAST,"\n") mtext(axisSubTitle, side=1, line = wAST, cex=lineAxisSizes["AST"]) } # ______Bottom Label/Title - Lab3 ______ # bottom of column footnote (title) if (lineDo["L3"]) { titleLab3 <- linePosB["L3"] + botAxisBase - 0.05 #cat("BotAxis # 3 - linePosB['L3']:",linePosB["L3"],"\n", # " botAxisBAdj:", botAxisBAdj, "\n", # " botAxisBase:", botAxisBase, "\n", # #" botLAdj :",botLAdj,"\n", # " line=titleLab3:",titleLab3,"\n") mtext(side=1,lab3[j], line=titleLab3, cex=lineBotSizes["L3"]) # bottom labels. } # _______Reference Value Legend titleLab4 <- linePosB["L4"] + botAxisBase #cat("reftxt:",reftxt," refval:",refval," lineDo[L4]:",lineDo["L4"],"\n") #cat("BotAxis # 4 (reftext) - linePosB['L4']:",linePosB["L4"],"\n", # " botAxisBase:",botAxisBase,"\n", # #" botAxisAdj:",botAxisAdj,"\n", # #" botLAdj:",botLAdj,"\n", # " line=titleLab4:",titleLab4,"\n") # Handle special needs of the reftext and it's line. if (!is.na(refval)) { if (is.between.r(refval,rx)) { # refval must be in the range of the data. Otherwize - ignore. if (!is.na(reftxt) ) { # Get y pos in user units to draw the line and text. # select panel done before this call. xpin <- par("pin") # distances in inches xusr <- par("usr") # distances in user scale (points) xmar <- par("mar") xmai <- par("mai") #fpc <- 0.95 # fudge adjustment #cat("xpin:",xpin," xusr:",xusr," xmar:",xmar," xmai:",xmai,"\n") # # Calculate X positions for the line and text in the margin. (units=user) # xCenterU <- mean(xusr[1:2]) # center of the glyph column xWidthU <- diff(xusr[1:2]) # unit width of glyph column => diff(rx) - user units xUnitsPerInch <- xWidthU/xpin[1] # units / inch for x xHalfInchU <- xUnitsPerInch * 0.5 #* fpc # 1/2" of units #cat(" center of glyph-xCenterU:",xCenterU, "\n", # " width of glyph -xWidthU :",xWidthU, "\n", # " xUnitsPerInch :",xUnitsPerInch,"\n", # " xHalfInchU :",xHalfInchU,"\n") # # line length will be whats left after taking away room for text or 1/2 inch # xTxt <- str_trim(reftxt) # get refText and trim side blanks. # length of texts in units xTxtLenU <- strwidth(xTxt,units="user", cex=lineSizesB["L4"]) #* fpc # length text # xHalfFreeLenU <- ((xWidthU - xTxtLenU) / 2 ) #* fpc # half space left for line #cat("xTxtLenU:", xTxtLenU," half free avail-xHalfFreeLenU:",xHalfFreeLenU,"\n") xLineLenU <- xHalfFreeLenU # see if room for half inche line, if not use shorter value. if (xLineLenU > xHalfInchU) xLineLenU <- xHalfInchU # get length of line to 1/2 inch # calculate start of line. xLineStartu <- xCenterU - (xLineLenU + xTxtLenU) / 2 # center - half (text length + line length) xTxtStartu <- xLineStartu + xLineLenU #cat("xLineStartu:",xLineStartu," xTxtStartu:",xTxtStartu,"\n") # # Calculate the Y positions for the line and text in the margin for the refText. # line needs units=users, text needs "lines" # xTitleLab4 <- titleLab4 # + botLAdj # Text Line offset from Axis line. #cat("ConvLineToUser call-xTitleLab4:",xTitleLab4,"\n") yTextPosu <- ConvLineToUser(1, xTitleLab4) # position text position in user units. yTextHu <- strheight(xTxt, units="user", cex=lineSizesB["L4"]) # find height of text in user units. #cat("yTextPosu:",yTextPosu," yTextHu:",yTextHu,"\n") # position of line based on Text position(user) - 60% of the text height. yLinePosu <- yTextPosu - (yTextHu * 0.6) # lines y coord. is 1/2 text height toward plot. #cat("Y Position for L4 - line(u):",yLinePosu," text(u):",yTextPosu," text(l) xTitleLab4:",xTitleLab4," titleLab4:",titleLab4,"\n") #cat("xTitleLab4:",xTitleLab4," titleLab4:", titleLab4,"\n", # " linePosB['L4'] :",linePosB["L4"],"\n", # " lineSizesB['L4']:",lineSizesB["L4"],"\n", # " botAxisAdj :",botAxisAdj," botLAdj:",botLAdj,"\n") # way to find graphic length of string --> sw <- strwidth(reftxt,cex=Text.cex) # add text definition for legend. (5/21/13 - added color to line) # draw line. lines(c(xLineStartu, xTxtStartu), rep(yLinePosu, 2), lty=Ref.Val.lty, lwd=Ref.Val.lwd, col=iRef.Val.col) # draw length line up to 1/2 inch. # mtext does not let you set the X position of the text, so the old text function must be used with x, y coordinates. text(xTxtStartu, y=yLinePosu, labels=xTxt, cex=lineBotSizes["L4"], col=iRef.Text.col, offset=0, adj=c(0,NA)) # text starting at line end. #cat("Line%Start:", xLineStartu/xWidthu, " Txt%Start:",xTxtStartu/xWidthu," titleLab4:", titleLab4,"\n") } } } # ##### end of trailer #cat("Returned staggered:",staggered,"\n") return(list(atRx=atRx, rx=rx, ry=ry)) # return the set of tick points for grid lines. } # ### ### # # MapDrawer # MapDrawer <- function(wAreaVisBorders, wL2VisBorders, wRegVisBorders, wL3VisBorders, WorkList) { # # # wLen <- dim(WorkList)[1] # get number of entries for (ind in c(1:wLen)) { wEntry <- WorkList[ind,] if (wEntry$Borders==1) { # L2 borders # Map background - Layer 2 borders (regional areas (US -> states)) polygon(wL2VisBorders$x, wL2VisBorders$y, density=-1, col=wEntry$Fill.col, border=FALSE) polygon(wL2VisBorders$x, wL2VisBorders$y, density=0, col=wEntry$Line.col, lwd=wEntry$Line.lwd) } if (wEntry$Borders==2) { # L1 colors polygon(wAreaVisBorders$x,wAreaVisBorders$y, density=-1, col=wEntry$Fill.col, border=FALSE) } if (wEntry$Borders==3) { # L1 borders # setup each group of sub-areas and draw polygons. # Not Referenced sub-areas wVisBorders <- wAreaVisBorders[wEntry$Selected,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= wEntry$Line.col, lwd=wEntry$Line.lwd) } if (wEntry$Borders==4) { # L3 borders # Outline Country area (total area). polygon(wL3VisBorders$x, wL3VisBorders$y, density=0, col=wEntry$Line.col, lwd=wEntry$Line.lwd) # outside US boundary } } } # ### ### # # MapPolySetup function - used by all areaMap glyphs to process the panel dimensions # and adjust the x and y ranges and scales for the particular map used. # # MapPolySetup <- function(mapType,wPanels,wAreaVisBorders,wL2VisBorders,wRegVisBorders, wL3VisBorders, DL3) { # entire area.. (what if subset is used.) # all but L3VisBorders if (DL3) { # all sets of boundaries rxpoly <- range(wL3VisBorders$x,wRegVisBorders$x,wL2VisBorders$x,wAreaVisBorders$x,na.rm=TRUE) rypoly <- range(wL3VisBorders$y,wRegVisBorders$y,wL2VisBorders$y,wAreaVisBorders$y,na.rm=TRUE) } else { rxpoly <- range(wRegVisBorders$x,wL2VisBorders$x,wAreaVisBorders$x,na.rm=TRUE) rypoly <- range(wRegVisBorders$y,wL2VisBorders$y,wAreaVisBorders$y,na.rm=TRUE) } rxadj <- diff(rxpoly) * 0.02 # adjust x by + and - 2% of the size of the range rxVadj <- c(-rxadj,rxadj) rxpoly <- rxpoly + rxVadj ryadj <- diff(rypoly) * 0.05 # adjust y by + and - 5% of the size of the range. ryVadj <- c(-ryadj,ryadj) rypoly <- rypoly + ryVadj yxA <- diff(rypoly) / diff(rxpoly) # calculated aspect from MAP information. #cat("Map yxAspect:", yxA, "\n") #print(paste0("rxpoly:",paste0(rxpoly,collapse=" ")," rypoly:",paste0(rypoly,collapse=" "))) # aspect ratio is y / x... # size of space in panel = panelW <- diff(wPanels$coltabs[j+1,]) panelH <- diff(wPanels$rowtabs[2,]) # grap first row as model - All should be the same except median row #cat("Panel W:",panelW," H:",panelH,"\n") #cat("banner.max:",banner.max[mapType,"width"],"\n") rxDiff <- diff(rxpoly) ryDiff <- diff(rypoly) rxpoly2 <- rxpoly rypoly2 <- rypoly # # Adjust rx and ry - rule: NEVER NEVER decrease rx or ry. # if map Aspect (y/x) is lower then panel (h/w) then # example: 90/150 = 0.6 and 0.78/1.117 -> 0.698 # 150 <> 90 * 1.117 / 0.78 # map in this space is about 104/150 map will be taller then it should be. # increase y range # if map Aspect (y/x) is high than panel (h/w) then # example: 90/150 = 0.6 and 0.66/1.117 -> 0.59 # map in this space is about 88/150 map will be wider then it should be # increase x range. # # One assumption is that the original panel width and height were laid out # to accomodate the minimum/maximum height, aspect ratio, and title lengths. # # This is to adjust to fit the space. # Objective: # ryDiff panelH # --------- = -------- --> rxDiff =? ryDiff * panelW / panelH # rxDiff panelW # wfx <- ryDiff * panelW / panelH if (wfx > rxDiff) { # change rx (expand) wfxd <- abs(wfx - rxDiff) vfx <- c(-wfxd/2,wfxd/2) rxpoly2 <- rxpoly + vfx } else { # change ry (expand) wfy <- rxDiff * panelH / panelW wfyd <- abs(wfy - ryDiff) # change needed. vfy <- c(-wfyd/2, wfyd/2) rypoly2 <- rypoly + vfy } #cat("rxpoly2:",rxpoly2," rypoly2:",rypoly2,"\n") return(list(rxpoly2=rxpoly2, rypoly2=rypoly2)) } # ### ### # # Function to split numeric X,Y coordinate vectors based on NA. # # Return is a list of parts of the original vector up to the NA. # MMVSplit <- function(wX,Brks) { #print(Brks) wXa <- wX wXa[Brks] <- NA wXs <- split(wXa, cumsum(Brks)) # split up vector into smaller vectors in list wXz <- sapply(wXs, function(x) x[!is.na(x)]) # remove NAs #print(wXz) return(wXz) } # ### ### # # printPanelsParms - prints the associated parameter in creating a panel. # printPanelParms <- function(t) { print(t) cat("numGrps:",numGrps,"\n") cat("numCol :",numCol,"\n") cat("topMar :",topMar,"\n") cat("botMar :",botMar,"\n") cat("rowSize:",paste0(rowSize,collapse=" "),"\n") cat("rowSep :",paste0(rowSep,collapse=" "),"\n") cat("colSize:",paste0(colSize,collapse=" "),"\n") cat("colWidths",paste0(colWidths,collapse=" "),"\n") cat("colSep :",paste0(colSep,collapse=" "),"\n") cat("rSizeMx:",rowMaxH,"\n") cat("rSizeMn:",rowMinH,"\n") cat("rSizeMaj:",rowSizeMaj,"\n") cat("rMapCol:",PDMapCol,"\n") cat("\n") } # ### ### # #_________ function to pattern match alias names # SeerToAbbr <- function(xR,aNAI) { # x --> a vector of the registry names from SeerStat output ErrFnd <- FALSE wReg <- toupper(xR) wAbbr <- rep(NA,length(wReg)) xout1 <- sapply(c(1:length(aNAI$Alias)), function (x) grep(aNAI$Alias[x], wReg, ignore.case=TRUE)) xout1a <- unlist(xout1) xout2 <- !is.na(lapply(xout1, function(x) ifelse(length(x)==0,NA,x))) xout3 <- unlist( lapply( xout1, function(x) { if(length(x[])>1) { x } else { NA } } ) ) if (any(!is.na(xout3))) { xout4 <- paste0(xout3[!is.na(xout3)], collapse=" ") xmsg <- paste0("***MST-30 Registries in the data have duplicate name in rows:",xout4, " Only one row per area is permitted.\n") ErrFnd <- TRUE stopCnt() stop(xmsg, call.=FALSE) } if (!ErrFnd) { # continue wAbbr[xout1a] <- aNAI$Abbr[xout2] } return(wAbbr) # return list of abbreviates or NA if no match. } # ### ### # # function to calculate and return scaling variable - ksc # # based on the value of xke => 1 to 5. # UPDATE to pass real height, and handle 1 to 6 properly. This code assumes height used for 5. # SetKsc <- function(xke) { C13 <- 0.33333 if (xke == 1) { wKsc <- 1 } else { wKsc <- (xke + C13)/(5 + C13) # scale value for the "y" element of the bar to keep uniformity } return(wKsc) } # ### # # #### end of micromap functions (glyphs and micromapST internal functions) # # ########################### ########################### ########################### #print("micromapST functions loaded") ################################################################################ # # # Continue startup - verification code. # # ################################################################################ ################################################################################ # # Call Argument validation # # # Previously Checked: # # bordDir # bordGrp # # load border group # # Start setting up .GlobalEnv variables. # # # 1) statsDFrame -> present # 2) panelDesc -> present # # 3) statsDFrame -> get column names and number of columns; # 4) statsDFrame & rowColName -> locate row names for later linking. # 5) Compare row names and name table # 6) Check for duplicate statsDFrame rows # 7) Handle dataRegionsOnly call parameter - sub-map Setup # 8) Set values for regional or full mapping. # # 9) rowName # 10) # # # Basic checks to make sure statsDFrame and panelDesc arguments are present and usable. # More detailed checks done later. # StopFnd <- FALSE # #_________ 1) statsDFrame (basic check) argument # # check to see if the statsDFrame was provided and is the correct datatype if (missing(statsDFrame) || is.null(statsDFrame) || is.na(statsDFrame) || !is.data.frame(statsDFrame)) { stopCnt() StopFnd <- TRUE xmsg <- paste0("***0101 CARG-DF First argument (statsDFrame) is missing or not a data.frame.") warning(xmsg, call. = FALSE) } # #_________ 2) panelDesc - Basic initial check - Process the arguments # # check to see if the panelDesc was provided and is the correct datatype. if (missing(panelDesc) || is.null(panelDesc) || is.na(panelDesc) || !is.data.frame(panelDesc) || !is.list(panelDesc)) { stopCnt() StopFnd <- TRUE xmsg <- paste0("***0111 CARG-PD The second argument, the panelDesc structure, is missing or not a data.frame or list.") warning(xmsg, call. = FALSE) } # if (StopFnd) { stopCnt() xmsg <- paste0("***01Z0 CARG Key call arguments are missing, Execution stopped.") stop(xmsg, call. = FALSE) } ### most of panelDesc is validated later. #print("statsDFrame and panelDesc variables are present.") # # Now get the column names of the statsDFrame and verify the match up of the rownames with # the border group names, abbreviations or IDs. # #_________ Get list of column name in statsDFrame for parameter verification wSFName <- names(statsDFrame) # get the column names from data.frame len_wSFName <- length(wSFName) # record the number of "named" rows in list (at front.) wSFNameList <- c(wSFName,seq(from=1, to=len_wSFName)) # add valid row numbers to the list. numRows <- nrow(statsDFrame) # get number of rows in statsDFrame # # wSFNameList now contains a list of the column names and column numbers # as character strings. This string will be used to verify any user # provided column names or character column numbers. # # # Start Accumulating the micromapST System variable list # mmSys <- list(SFVarName = sDFName, SFNameL = wSFNameList, SFColNum = len_wSFName, SFRowNum = numRows) #print("mmSys") #print(mmSys) # # Check to make sure user provide data frame has at least 1 row and at least 1 column. # if (len_wSFName == 0 || numRows == 0) { xmsg <- paste0("***0103 CARG-DF The ",sDFName," statsDFrame data.frame has no columns or rows. ") StopFnd <- TRUE stopCnt() stop(xmsg, call. = FALSE) } # # statsDFrame - data rows # # headers or total area rate rows should not be included in data.format structure.. # # #______________statsDFrame - data frame - verify row links/names______________ # numRowsBG <- nrow(areaNamesAbbrsIDs) # get number of rows in name table # # Must validate statsDFrame row names against the area list - process rowNamesCol to be able to proceed with # the link verification. # # # Step 1 - find out where the row names for the sub-area are in the statsDFrame data.frame. # ### ### If user provided a column with the sub-area "names", then we have to check to make ### sure there are no duplicates in the statsDFrame data.frame. If they were in the row.names, ### R already makes sure there are no duplicates. ### ### Dont care what type of link it is at this point. ### # #_____________Check and Process the rowNamesCol call argument/parameter option___________________ # StopFnd <- FALSE ErrFnd <- FALSE if (missing(rowNamesCol) || is.null(rowNamesCol) || is.na(rowNamesCol) ) { # rowNamesCol is missing or not provided - no action - use the row.names on statsDFrame for the sub-area names. statsDFrame$RN <- rownames(statsDFrame) # get a copy of the rownames. (row.names) # If no rowNamesCol provided, then we must assume the row names (sub-area names) are being # provided in the row.names of the data.frame. If the row.names were not assigned a sub-area # identifier (full name, abbr, alias, or ID) by the user, then the row.names will be # then we will just get "1", "2", ... as the row.names and they will not match anything. # could be dangerous - later it may be best to STOP. #print("No rowNamesCol provided - must be in row.names") } else { # Have the rowNameCol call argument/parameter and a statsDFrame column name/number retrievve the sub-area links. # if (length(rowNamesCol) > 1) { # rowNamesCol can only be a vector of length 1. warnCnt() ErrFnd <- TRUE xmsg <- paste0("***0173 CARG-RNC The rowNamesCol argument value must have a length = 1. Only first value used.") warning(xmsg, call. = FALSE) rowNamesCol <- as.vector(unlist(rowNamesCol))[1] # pick off the first entry. dont care what type of variable it is. # # Cheat and do unlist and as.vector first. This kills must structures. # "unlist" destroys any lists and most data.frames. # as.vector linearizes what is left so one element can be picked off and kills factors. } # Look up the name and covert it into a column number - or - verify column number.. if (!is.character(rowNamesCol) && !is.numeric(rowNamesCol) && !is.integer(rowNamesCol)) { # rowNamesCol is not the correct type of vector. stopCnt() StopFnd <- TRUE # stop because user did specify, but its wrong. xmsg <- paste0("***0172 CARG-RNC The rowNamesCol argument value must be a character or numeric value. ", "It was found to be: ",class(rowNamesCol),".") stop(xmsg, call. = FALSE) } litrowNamesCol = rowNamesCol # Save the original literal value from rowNamesCol (could be number or name) rowNamesCol <- CheckParmColx(litrowNamesCol, c('RNC','rowNamesCol'), wSFNameList, len_wSFName) # see if value is good. # if error, CheckParmColx issues the warning message and return 0. # got column number if good. if (!all(rowNamesCol>0)) { # check to see if the value is good (>0 -> a valid column number) # Bad column name or column number found. Error message was generated by CheckColx. stopCnt() StopFnd <- TRUE # again stop because user specified, but its wrong. xmsg <- paste0("***01Z1 CARG Errors found in call arguments. Execution stopped.") stop(xmsg, call.=FALSE) # # Cant continue. User provided rowNamesCol, so must have a valid column name/number and a valid list of links. # If not, then looking at the row.names of the data.frame does not make sense. Why would they specify a rowNameCol? # } # # if problems are identified prior to this line, the package has stopped. # # At this point, the rolColName exists and is a valid column name. # #cat("rowNamesCol is valid : ",rowNamesCol," - Now check for duplicates.","\n") ### # # get copy of column, convert column into row.names, but first # need to check for duplicates before we do this. Dont have to do this check if sub-area names are in row.names of the data.frame. # dupNames <- duplicated(statsDFrame[,rowNamesCol]) dupRows <- c(seq_along(dupNames))[dupNames] if (any(dupNames)) { StopFnd <- TRUE stopCnt() xmsg <- paste0("***0171 CARG-RNC The row names in column ",rowNamesCol, " of the ",sDFName," statsDFrame data frame contain duplicates. Only one row per sub-area is permitted. Duplicate rows are:", paste0(dupRows,collapse=","),".") stop(xmsg, call. = FALSE) # possible work a round - later - is to delete the second occurance. } # # No duplicate sub-area row names in the statsDFrame data provided by user and # column name or number is good - move column to $RN # #print("Moved to $RN") statsDFrame$RN <- statsDFrame[,rowNamesCol] } statsDFrame$rawRN <- statsDFrame$RN # save raw format of row name. statsDFrame$RN <- toupper(statsDFrame$RN) # upper case for comparisons. row.names(statsDFrame) <- statsDFrame$RN # save in statsDFrame$RN as the row.names # ### ### # # At this point the sub-area names from the row.names on statsDFrame or # the sub-area names in a column of the statsDFrame have been added to the # internal statsDFrame data.frame in the $RN column. If the values were # checked for duplicates if provided in a data.frame column. # # # Next step is to validate the names against the programmed name list. # # If provided in column (rowNamesCol), they are checked and moved to row.names. # We only know they are unique. Another check is needed to see if they match # the area name/abbr/ID list. # ### ##____________statsDFrame rows OK to count # # JP - Make sure the input data.frame is at least two columns - add one. A single column data.frame # acts differently then a two or more column data.frame under many operations. # # JP - Dot code (at least) has problems with single column statsDFrame structures. # # To protect code and any other areas that may have problems, # quick fix is to append "0" column to the right of the provided data.frame. # This forces the data.frame to be at least 2 columns. # numRows <- nrow(statsDFrame) Ex <- rep(0,numRows) ADFrame <- cbind(statsDFrame,Ex) # move to ADFrame and add Zero column. # a 1 column data.frame has a little different behavior the s 2 column data.frame #cat("Add 0 column to statsDFrame\n") # # statsDFrame number of rows - validated. # ##### # # Get general defaults -> colors and details # par(fin = par("din")) # safety value to get moving. plot.new() # # ________________Load Colors and Details defaults_______________________________ # #print("Calling micromapGSetDefaults") micromapGDefaults <- micromapGSetDefaults() # get master list of variables and defaults #print("Got data.frame from micromapGSetDefaults") ##### # #_________________colors _______________________________________ # # Must do after completing the details list processing # # Verify "colors=" argument # # Second purpose is to set the graphics colors not in the "colors" vector to grays or colors. # # Read defaults into memory # #print("Validate colors") colFull <- TRUE # control logical = TRUE doing Color, FALSE doing Greys NoErrs <- TRUE doDotOutline <- FALSE mstColors <- colors if (missing(colors) || is.null(mstColors) ) { mstColors <- micromapGDefaults$colors # use package defaults. } else { if (typeof(mstColors) == "character") { if (length(mstColors) != 24) { if (length(mstColors) == 12) { # check for the basic colors. # we have the basic 12 colors. Expand to the list of 24. colorlab <- names(mstColors) TransColors <- adjustcolor(mstColors,0.2) mtColors <- c(mstColors, TransColors) if (!is.null(colorlab)) { names(mstColors) <- c(colorlab,paste0("l_",colorlab)) } } else { if (length(mstColors) == 1) { wStr <- toupper(mstColors) if (wStr == "BW" || wStr == "GRAYS" || wStr == "GREYS" ) { # set up the colors for printing in BW or Gray tones # Get the main greys for the 6 colors (the middle 3-7/8 grays in the RColorBrewer scale. # and add the black for the median and a grey for the area highlight color. xbw <- brewer.pal(name="Greys",9) greyColors <- c(xbw[c(3:8)],"#000000","#E8E8E8") # Build the transparent colors for the segmented bar charts. TransColors <- adjustcolor(greyColors,0.2) # Set up the grey color vector as requested. mstColors <- c(greyColors,TransColors) # Set up running parameters. colFull <- FALSE Dot.Outline <- TRUE Dot.Conf.Outline <- TRUE Dot.SE.Outline <- TRUE doDotOutline <- TRUE # outline dots in dot glyphs. } else { mstColors <- micromapGDefaults$colors warnCnt() xmsg <- paste0("***01K0 COLORS A invalid single value is provided for the colors argument. It must be 'BW', 'greys', or 'grays'. The argument is ignored.") warning(xmsg,call.=FALSE) } } else { warnCnt() xmsg <- paste0("***01K1 COLORS The colors vector has the incorrect number of elements. It must have 1 or 24 entries. ",length(mstColors)," provided.") warning(xmsg,call.=FALSE) } } } else { # have 24 values in vector } } else { mstColors <- micromapGDefaults$colors warnCnt() xmsg <- "***01K2 COLORS The colors vector type is invalid. It must be a character vector." warning(xmsg,call.=FALSE) } } assign("mstColors",as.character(mstColors)) mstColorNames <- names(mstColors) rm(colors) #____ end of color check and adjustments.___ # # #______________________Process details Defaults_________________________ # #print("Validate details") # Process defaults into the local variables as before. # Setting the defaults into the system. User provided overrides. wDetails <- micromapGDefaults$details # dynamic assignment of defaults to individual variables in "micromapST" # namespace. #print(wDetails) oldDefNam = "none" defNam = names(wDetails) for (i in 1:length(wDetails)) { if (nchar(defNam[i]) <= 0) { warnCnt() xmsg <- paste0("***01N3 DETS Zero length variable name found in the details list after the ", oldDefNam, " variable.") warning(xmsg,call.=FALSE) } oldDefNam <- defNam[i] assign(defNam[i],wDetails[[i]]) # assign default values. } # All details names must be in the globalVariable call to be visible to CRAN checks. # The valid details variable name list is the "defNam" from above and the detailsExtra list # for the areaParms parameters. DetailNames <- c(defNam,detailExtra) #print(DetailNames) # # The defaults have been moved to the individual variables. # Keep the list of names around to be to verify user supplied names. # # #________________ Process user provided details - merge into memory. # # Now overlay with any values provided by user. # # dynamic assignment of detail data.frame to individual variables in the # "micromapST' namespace.. # # Should I add code to verify names provided? # #print("Merge user details with default details.") # # - validate user provided details before merging. # numOverlaid <- 0 if (!(missing(details) || is.null(details) || is.na(details))) { if (typeof(details) == "list") { nam <- names(details) # parse the details list into variable that can be nam_match <- match(nam,defNam) for (i in 1:length(details)) { # referenced using the list's name. if (is.na(nam_match[i])) { # invalid variable name in details warnCnt() xmsg <- paste0("***01N2 DETS Invalid details variable name: ",nam[i], " in the details list. Variable is IGNORED.") warning(xmsg,call.=FALSE) } else { # valid name numOverlaid <- numOverlaid + 1 assign(nam[i],details[[i]]) #print(paste0("details overlay of ",nam[i]," with ",details[i])) } } } else { stopCnt() xmsg <- "***01N1 DETS The details parameter is not a list." stop(xmsg, call.=FALSE) } } #cat("envir=Id.Dot.pch:",find("Id.Dot.pch"),"\n") #cat("envir=topMar:",find("topMar"),"\n") #if (numOverlaid>0) { # xmsg <- paste0("***0501 PANEL Number of parameters overlaid = ",numOverlaid) # message(xmsg) #} # # Verify and adjust details variables # #cat("In micromapST - processing parameters.\n") #cat("envir=warnCnt:",find("warnCnt"),"\n") #cat("envir=staggered:",find("staggered"),"\n") #cat("envir=lastLab2Space:", find("lastLab2Space"), "\n") #### # # Set in colors with BW or gray requested. This resets it - to Dot.Outline value? OUCH! #doDotOutline <- Dot.Outline # #### #### # # Id.Dot.pch # #print("Validate Id.Dot.pch") if (!is.between.r(Id.Dot.pch,c(1,25))) { # not an acceptable pch value #cat("envir=Id.Dot.pch:", find("Id.Dot.pch"),"\n") Id.Dot.pch <<- 22 # set to default warnCnt() xmsg <- paste0("***01NA DETS The Id.Dot.pch variable can only be set to a range from 1 to 25. Using the default of 22.") warning(xmsg,call.=FALSE) } # # This is the code the rcmd check could not detect the scope of the detail$ variables. # # ##### # Need to get ID width values before setting the panel defaults # #______________Function Call Argument Checks______________________ # #------- Working variables for map and id glyphs. #------- Start Getting widths of labels and titles to help setup column widths # # # This will have to be re-written to handle user provided labels and titles for the glyph columns. # medianBanner <- Map.Median.text #cat("Calculating banners and column fixed widths.","\n") #print(medianBanner) #print(Map.Hdr1) #print(Map.Hdr2) #print(Id.Hdr1) #print(Id.Hdr1) # # Map titles with symbols # sw = Map.Lab.Box.Width + 0.05 + 0.04 # square width and spaces on each side. (inches) #cat("Size of Box Symbols (guess) sw:",sw,"\n") # empty banner data.frame banner <- data.frame(H1=character(),H2=character(),H3=character(),M1=character(),stringsAsFactors=FALSE) # add "Highlighed" titles for default. banner <- rbind(banner,t(c("","Highlighted",Map.Hdr2,medianBanner))) # add headers for cumulative banner <- rbind(banner,t(c("Cumulative Maps", paste0(Map.Hdr2,' Above Featured Rows'), paste0(Map.Hdr2,' Below Featured Rows'), medianBanner) ) ) # add headers for median banner <- rbind(banner,t(c("Median Based Contours", paste0(Map.Hdr2,' Above the Median'), paste0(Map.Hdr2,' Below the Median'), medianBanner) ) ) # add headers for two ended (tail) banner <- rbind(banner,t(c("", "Two Ended Cumulative Maps", paste0(Map.Hdr2," Highlighted"), medianBanner) ) ) banner <- rbind(banner,t(c("",Id.Hdr1,Id.Hdr2,"") ) ) bcn <- c("H1","H2","H3","M1") # h1, h2, h3, median brn <- c("map","mapcum","mapmed","maptail","id") row.names(banner) <- brn colnames(banner) <- bcn banner$H1 <- as.character(banner$H1) banner$H2 <- as.character(banner$H2) banner$H3 <- as.character(banner$H3) banner$M1 <- as.character(banner$M1) #cat("banner header data.frame:\n") #print(banner) # .adj -> which lines in each header have symbols? banner.adj <- data.frame(H1=c(0,0,0,0,0),H2=c(0,sw,sw,0,0),H3=c(0,sw,sw,0,0),M1=c(0,0,0,0,0)) row.names(banner.adj) <- brn banner.m <- c(1,1,1,0.8) # text size multiplier for H1, H2, H3, Med1 banner.tc <- Text.cex * banner.m #cat("CEX for headers and median - banner.tc:",banner.tc,"\n") banner.w <- banner # replace strings with width values for current font and Text.cex values. for (iH in c(1:4)) { for (iT in c(1:5)) { banner.w[iT,iH] <- strwidth(banner[iT,iH],units="inches",cex=banner.tc[iH]) } } # banner.w <- as.data.frame(sapply(banner.w, function(x) as.numeric(x))) # convert numeric. row.names(banner.w) <- brn #cat("widths in banners - banner.w:\n") #print(banner.w) banner.max <- as.data.frame(sapply(c(1:5), function(x) max(banner.w[x,]+banner.adj[x,]))) colnames(banner.max) <- "width" row.names(banner.max) <- brn #cat("maximum widths for each type of header - banner.max:\n") #print(banner.max) # Make subroutine to be able to do again later. ID.Abbr.width <- max(strwidth(ID.Abbr,units="inches",cex=(Id.Text.cex * Id.Cex.mod))) ID.Name.width <- max(strwidth(ID.Name,units="inches",cex=(Id.Text.cex * Id.Cex.mod))) #cat("ID.Abbr.width:",ID.Abbr.width,"\n ") #cat("ID.Name.width:",ID.Name.width,"\n\n") Id.OverH <- Id.Dot.width*(Id.Dot.cexm * Id.Cex.mod) + Id.Space*2.5 # two spaces left and right of name. #cat("ID overhead (Id.Start, Dot.width, Space (box to letters), space (letter to edge):",Id.OverH,"\n") #cat("banner.max ID:",banner.max["id","width"]," IDName:",Id.OverH+ID.Name.width," IDAbbr:",Id.OverH+ID.Abbr.width,"\n") # width of ID glyph with border Group names/abbreviations Id.width <- c(1.5,1) # initialize Id.width[1] <- max((Id.OverH + ID.Name.width ),banner.max["id","width"]) # plus padding. FULLNAMES Id.width[2] <- max((Id.OverH + ID.Abbr.width ),banner.max["id","width"]) # ABBREVIATIONS #cat("Id.width:",Id.width,"\n") # # Build title lists for maps and get width for point size. # #cat("Map.Aspect:",Map.Aspect,"\n\n") # #print("Column Hdrs - Done") #_____________Set up for Area Names and Abbreviation links. # #_____________Borders to data Link ---- rowNames and rowNamesCol # #_____________Process rowNames option___________________ # # if (missing(rowNames) || is.null(rowNames) || is.na(rowNames) ) { # no rowNames provided up front. Set to default rowNames <- "ab" } #cat("Validate rowNames : ", rowNames,"\n") #__________________ # # Verify the rownames are valid and can be translated into abbrevation versions. # # # The user can enter abbr, full, alt_ab, alias, or ID with the data. # Which everone is picked, it must be the string in the data.frame and the panelData-data.frames to # allow matching to the boundaries VisBorderr data. # # Each value is translated to the Key that is used to link the data to the # key in the boundary data in areaVisBorders. # # AD.link is the user value in the order of the data table. # # areaIndex is in the order of the data table (AD.link) and points to the # matching entry in the name table, based on the proper match for the type of value. # #print("Clean up rownames in $RN") #cat("Border Group Name:",BordGrpName,"\n") statsDFrame$RN <- ClnStr(as.character(statsDFrame$RN)) AD.link <- (as.character(statsDFrame$RN)) # get rownames.information (link to borders) (all CAPS) ##### may be changed. #cat("Initial AD.link:",AD.link,"\n") if (BordGrpName == "USStatesBG") { ### If US States Patterns - look for the many ways Washington DC is possibly enter in the user data.. ### # Compare against common "DC" names and replace with "DC" if (rowNames == "full") { AD.Test <- toupper(AD.link) # get capitalized version for the DC conversion. # Build DC name table (all caps) DCnames = c("WASHINGTON, D. C.", "WASHINGTON D. C.", "WASHINGTON, D C", "WASHINGTON D C", "WASHINGTON, DC", "WASHINGTON DC", "DISTRICT COLUMBIA", "DISTRICT OF COLUMBIA", "DC", "D C", "D, C.","D.C","D C.","D.C.") # only clean up full names. AD.link[!is.na(match(AD.Test,DCnames))] <- "DC" ### match short form in border group } #cat("Updated AD.link:",AD.link,"\n") } # if (rowNames == "alias" && enableAlias == FALSE) { stopCnt() StopFnd <- TRUE xmsg <- paste0("***0191 CARG-RN rowNames='alias' is not supported for this bordGrp.") stop(xmsg, call.=FALSE) } if (rowNames == "seer" && BordGrpName != "USSeerBG") { stopCnt() StopFnd <- TRUE xmsg <- paste0("***0192 CARG-RN rowNames='seer' is only supported for the 'USSeerBG' bordGrp.") stop(xmsg, call.=FALSE) } # areaIndex pointer to Name Table is order of the user data.frame based on the rowNames parameter. IndexDFtoNT = switch(rowNames, # find the correct list to match user provide links. # if "ab", use current name - get index "ab" = {match(AD.link, areaNTAbbr)}, # if "id", convert to index "id" = {match(as.integer(AD.link), as.integer(rlAreaNamesAbbrsIDs$ID))}, # if "FIPS", convert to index (alias for "id") "FIPS" = (match(as.integer(AD.link), as.integer(rlAreaNamesAbbrsIDs$ID))), # if "full" sub-area name, convert index "full" = {match(AD.link, areaNTName)}, # if "seer" seer sub-area names from SeerStat (read and convert to index ) "seer" = {AliasToIndex(AD.link,rlAreaNamesAbbrsIDs$Alias)}, # if "alias" seer sub-area names from SeerStat (read and convert to index.) "alias" = {AliasToIndex(AD.link,rlAreaNamesAbbrsIDs$Alias)}, # if "alt_ab" alternate name abbreviation used in data, convert to index. "alt_ab" = {match(AD.link, rlAreaNamesAbbrsIDs$Alt_Abbr)}, # No match.. { stopCnt() StopFnd <- TRUE xmsg <- paste0("***0190 CARG-RN Invalid rowNames call parameter value. The value must be 'ab', 'alt_ab', 'id', 'alias', or 'full'.") stop(xmsg, call.=FALSE) } ) # # IndexDFtoNT is index from caller's data.frame rows into the name table # (areaNamesAbbrsIDs) data.frame # # By default, we will handle cases where statsDFrames does not contain # sub-areas in the border group. # callVL$rowNames <- rowNames var <- "callVarList" wstr <- paste0("assign(var,callVL,envir=.GlobalEnv)") eval(parse(text=wstr)) #cat("Initial IndexDFtoNT:",IndexDFtoNT,"\n") ###### # # Process ignoreNoMatches - the case where data is provided, but the is no row in the name table (and # therefore, no boundaries in the border group. # # This also deals with data rows that don't match boundary information. # a) all match - all data and all boundaries # b) all data match boundaries (all data is matched, but not all boundaries are used.) # b1) dataRegionsOnly option enabled - find regions and only do regions with data. # b2) no regions - draw all. # c) not all data matches boundaries (data without boundary,) # c1) ignoreNoMatches = false -> warning message and stop. # c2) ignoreNoMatches = true -> warning message, delete data rows, continue. # d) no data matches in boundaries. (total miss match). Warning and stop. # # # Check and Implement delete last row (blank) as no match ignore option # # set defaults for ignoreNoMatches call parameter if (is.null(ignoreNoMatches)) ignoreNoMatches = FALSE if (is.na(ignoreNoMatches)) ignoreNoMatches = FALSE # #cat("ignoreNoMatches : ",ignoreNoMatches,"\n") #cat("number of rows : ",numRows,"\n") DFtoNTMissed <- is.na(IndexDFtoNT) #cat("DFtoNTMissed:",DFtoNTMissed,"\n") #cat("any(DFtoNTMissed):",any(DFtoNTMissed),"\n") # # areaUKey is list of abbreviation for each area. If there is no match # between the data links and areaNamesAbbrsIDs table, then it shows up as an NA. # if (any(DFtoNTMissed)) { # one or more of the data rows didn't match the name table # # if ignoreNoMatches in data, strip rows that don't match name table. # BadList <- statsDFrame[DFtoNTMissed,"rawRN"] # get list of rows that did not match. xmsg <- paste0("***0106 CARG-DF The following rows in the ",sDFName," data.frame do not match any boundary name:") warning(xmsg,call.=FALSE) xmsg <- paste0("***0107 ",paste0(BadList,collapse=", ")) warning(xmsg,call.=FALSE) # if (ignoreNoMatches) { # ignore data rows that do not match match the name table. # remove row from data.frame xmsg <- paste0("***0108 CARG-DF The rows not matched to boundaries will be removed and not mapped.") warning(xmsg,call.=FALSE) KeepList <- !DFtoNTMissed # get list of areas that don't match (T/F)- good entires = TRUE #cat("Good data rows:",paste0(KeepList,collapse=" ")) # delete bad rows (those not matching) # Keep only rows that matched the name table. IndexDFtoNT <- IndexDFtoNT[KeepList] # clean up index statsDFrame <- statsDFrame[KeepList,] # clean up data frame AD.link <- AD.link[KeepList] # clean up AD.link # if ignoreNoMatches set - this has removed the rows from the user's data table. } else { # stop if a missing match # at least one NA in list xmsg <- paste0("***0109 CARG-DF Data row names in the ",sDFName," data.frame must the boundary names in the name table. Call stopped.") stop(xmsg,call.=FALSE) } } #cat("Adjusted data.frames - statsDFrame, AD.link, IndexDFtoNT:\n") #print(statsDFrame) #print(AD.link) #print(IndexDFtoNT) numRows <- length(IndexDFtoNT) # update number of rows in data frame #cat("numRows:",numRows,"\n") # #### #### # # grpPattern argument - default = NULL (use calculated pattern) # #print("Validate - grpPattern") if (!(is.null(grpPattern) || is.na(grpPattern))) { # we have a user specifed grpPattern if (!is.numeric(grpPattern)) { warnCnt() ErrFnd <- TRUE xmsg <- paste0("***01C0 CARG-GP The grpPattern call parameter must be an integer vector. grpPattern ignored.") warning(xmsg, call.=FALSE) grpPattern <- NULL } else { xg <- sum(grpPattern) if (xg != numRows) { # grpPattern number of rows does not match the statsDFrame data.frame warnCnt() ErrFnd <- TRUE xmsg <- paste0("***01C1 CARG-GP The total number of rows in the grpPattern call parameter must be equal to the number of rows in the ", sDFName," data.frame. grpPattern ignored.") warning(xmsg, call.=FALSE) grpPattern <- NULL } else { # check for correct group formats. # No element greater than 5 xg <- max(grpPattern) if (xg > 5) { # grpPattern number of rows does not match the statsDFrame data.frame warnCnt() ErrFnd <- TRUE xmsg <- paste0("***01C2 CARG-GP Each value in grpPattern call parameter vector must be <= 5 (rows per group). A value of ",xg," was found.") warning(xmsg, call.=FALSE) grpPattern <- NULL } else { # Rows descend in order to middle. xl <- length(grpPattern) # number of groups in grpPattern xlh <- ceiling(xl/2) # number of groups to median point. grpL <- grpPattern[1:xlh] # lower half groups grpU <- grpPattern[(xl-xlh+1):xl] # upper half groups if ( !all(grpL == sort(grpL,descreasing=TRUE)) || !all(grpU == sort(grpU)) ) { # if the sorted order of either half of the groups does not match the # pattern provided, warning and ignore the grpPattern. warnCnt() ErrFnd <- TRUE xmsg <- paste0("***01C3 CARG-GP The grpPattern call parameter is not properly ordered. ", "The number of rows per group must be in desending order toward the median sub-area.") warning(xmsg, call.=FALSE) grpPattern <- NULL } } } } } # ##### ##### # # regionsB argument - default = FALSE. # #print("regionsB parameter Check.") def_regionsB <- FALSE regionsBFlag <- def_regionsB if (! (is.null(RegVisBorders) || identical(RegVisBorders,L3VisBorders)) ) { # RegVisBorders boundary data.frame is present and different from L3. # validate parameter if ( is.null(regionsB) || any(is.na(regionsB)) ) { # argument is missing or not provided regionsB <- def_regionsB regionsBFlag <- def_regionsB # default #cat("regionsB support enabled - but no regionsB call parameter provided - regionsB set to FALSE.\n") } else { regionsB <- regionsB[[1]][1] if ( !is.logical(regionsB) ) { ErrFnd <- TRUE warnCnt() xmsg <- "***01G0 CARG-RB The regionsB call argument is not a logical variable. The default of FALSE will be used." warning(xmsg,call. = FALSE) regionsBFlag <- def_regionsB regionsB <- def_regionsB } else { regionsBFlag <- regionsB } } } #cat("regionsBFlag parameter:",regionsBFlag," regionsB:",regionsB,"\n") # ##### ##### # # dataRegionsOnly argument - default = FALSE. # #print("dataRegionsOnly parameter Check.") def_dataRegionsOnly <- FALSE dataRegionsOnlyFlag <- def_dataRegionsOnly if ( aP_Regions ) { # border group supports regions (feature enabled) # validate parameter if ( is.null(dataRegionsOnly) || any(is.na(dataRegionsOnly)) ) { # argument is missing or not provided dataRegionsOnly <- def_dataRegionsOnly dataRegionsOnlyFlag <- def_dataRegionsOnly # default #cat("regions support enabled - but no regions call parameter provided - regions set to TRUE.\n") } else { dataRegionsOnly <- dataRegionsOnly[[1]][1] if ( !is.logical(dataRegionsOnly) ) { ErrFnd <- TRUE warnCnt() xmsg <- "***01G5 CARG-DRO The dataRegionsOnly call argument is not a logical variable. The default of FALSE will be used." warning(xmsg,call. = FALSE) dataRegionsOnlyFlag <- def_dataRegionsOnly dataRegionsOnly <- def_dataRegionsOnly } else { dataRegionsOnlyFlag <- dataRegionsOnly } } } #cat("dataRegionsOnlyFlag parameter:",dataRegionsOnlyFlag," dataRegionsOnly:",dataRegionsOnly,"\n") # # If duplicated rows exist, Notify user and stop. # # Is this now a duplicate test to the previous test??? Yes it is. (retire) # #print("check for duplicate statsDF rows - duplicate?") dupL <- duplicated(IndexDFtoNT) # check for duplicate references to Name Table if (any(dupL)) { # some of the matches are duplicates - not allowed. One row per sub-area. DupList <- paste0(AD.link[dupL],collapse=", ") stopCnt() xmsg <- paste0("***0104 CARG-DF There are duplicate entries in the statsDFrame data.frame. Duplicate entries are ignored.\n", "***0105 CARG-DF The duplicate rows are: ",DupList,"\n") stop(xmsg, call.=FALSE) rm(DupList) } rm(dupL) # one of the names provided abrv, alt_abrv, ID or full names are not valid # and did not match the data in the Name Table. Can't link to any boundary data. # What link to use for boxplot and TS type data? #print("Get panelData Key.") panelDataKey <- switch(rowNames, "ab" = areaNamesAbbrsIDs$Abbr[IndexDFtoNT], "full" = areaNamesAbbrsIDs$Name[IndexDFtoNT], "id" = areaNamesAbbrsIDs$ID[IndexDFtoNT], "alias" = areaNamesAbbrsIDs$Abbr[IndexDFtoNT], "seer" = areaNamesAbbrsIDs$Abbr[IndexDFtoNT], "alt_ab"= areaNamesAbbrsIDs$Alt_Abbr[IndexDFtoNT] ) #cat("panelDataKey:",panelDataKey,"\n") # IndexDFtoNT is an index list to name/abbr/ID rows that match the # ADFrame rows -> position = ADFrame Row, value = abbr # Still need to check for duplicates. # # Setup for IndexDFtoNT checks # areaDatKey is the abbreviation in order of data.frame # # statsDFrame$RN (AD.link) is the cleaned up strings initially used for link # Should be able to re-use this field to link to any panelData structure. # # # sub-areas to regions to Area processing # # Get list of sub areas in regions referenced by the data.j # Set up used regions as the only spaces to map. # Get list of all sub areas in the regions referenced. # areaNamesAbbrsIDs$NotUsed <- FALSE # What does NT hold? # List of all regions and L2 keys #print("Build regions lists from NT regID") listAllL2 <- unique(areaNamesAbbrsIDs$L2_ID) #cat("listAllL2:",listAllL2,"\n") listAllRegions <- unique(areaNamesAbbrsIDs$regID) #cat("listAllRegions:",listAllRegions,"\n") listAllAreas <- areaNamesAbbrsIDs$Key #cat("listAllAreas:",listAllAreas,"\n") #cat("dataRegionsOnlyFlag:",dataRegionsOnlyFlag,"\n") if (dataRegionsOnlyFlag) { # save L2_ID for each data row. statsDFrame$L2_ID <- areaNamesAbbrsIDs$L2_ID[IndexDFtoNT] # put L2_ID into statsDFrame # Get list of used L2 areas. listUsedL2 <- unique(statsDFrame$L2_ID) #cat("listUsedL2:",listUsedL2,"\n") # What does the data show? # data - regions and L2 key lists # Pick up regID for each data row. statsDFrame$regID <- areaNamesAbbrsIDs$regID[IndexDFtoNT] # put regID into statsDFrame # Get list of used Regions listUsedRegions <- unique(statsDFrame$regID) # get list of regions used. #cat("listUsedRegions:",listUsedRegions,"\n") areaRegMatch <- match(areaNamesAbbrsIDs$regID,listUsedRegions) # find all sub-areas in regions to be mapped areaRegKeep <- !is.na(areaRegMatch) listUsedAreas <- areaNamesAbbrsIDs[areaRegKeep,"Key"] } else { listUsedRegions <- listAllRegions listUsedL2 <- listAllL2 listUsedAreas <- listAllAreas #cat("regionsFlag=FALSE -> reset listUsed to listAll\n") } # Have all sub-areas in areaNamesAbbrsIDs (name table), areaVisBorders, L2VisBorders, and RegVisBorders. # if used is less the total and regions set, reduce these tables to only the valid region. #cat("UsedRegions:",listUsedRegions,"\n") #cat("UsedL2 :",listUsedL2,"\n") #cat("UsedAreas :",listUsedAreas,"\n") #cat("Overlays - L2:",Map.L2Borders," Reg:",Map.RegBorders," L3:",Map.L3Borders,"\n") if (length(listUsedRegions) != length(listAllRegions)) { # number of used regions is less that number of all regions in border group. # This only happens if regions=TRUE and aP_Regions is TRUE. # The rlXXXX tables must be sub-divided # get index to sub-areas in used regions (all aub-areas, not just was matched data.) # mark not used name table entries SubAallR_Match <- match(areaNamesAbbrsIDs$regID,listUsedRegions) # find all subarea enters within regions used SubAallR_Good <- !is.na(SubAallR_Match) # list of valid name table entries. #cat("SubAallR_Good:",SubAallR_Good,"\n") # get keys to aub-areas in all subset of regions (no match against used regions list) areaNamesAbbrsIDs$NotUsed[!SubAallR_Good] <- TRUE # set not used flag in all sub-area not in regions referenced # get list of sub-area keys in region listAllAreas <- listNTKeysUsed <- areaNamesAbbrsIDs$Key[SubAallR_Good] # get which sub-area keys are in region #cat("listAllAreas(in Reg.):",listAllAreas,"\n") # get keys to sub-areas that match data rows listUsedAreas <- areaNamesAbbrsIDs$Key[IndexDFtoNT] # get list of sub-areas in data (used) #cat("listUsedAreas:",listUsedAreas,"\n") # sub-divide areaVisBorders rlAreaVM <- match(rlAreaVisBorders$Key,listAllAreas) rlAreaKeep <- !is.na(rlAreaVM) # good rows to keep #cat("rlAreaKeep:",rlAreaKeep,"\n") rlAreaVisBorders <- rlAreaVisBorders[rlAreaKeep,] #cat("rlAreaVisBorders:\n") #print(head(rlAreaVisBorders,50)) # sub-divide RegVisBorders rlRegVM <- match(rlRegVisBorders$Key,listUsedRegions) rlRegKeep <- !is.na(rlRegVM) # good rows to keep #cat("rlRegKeep:",rlRegKeep,"\n") rlRegVisBorders <- rlRegVisBorders[rlRegKeep,] #cat("rlRegVisBorders:\n") #print(head(rlRegVisBorders,50)) if (!identical(RegVisBorders,L3VisBorders)) { # RegVisBorders is not equal to L3, so it has real boundaries in it. Map.RegBorders <- TRUE # make sure Reg overlays are enabled. We need them. regionsBFlag <- TRUE # print boundaries. } # sub-divide L2VisBorders rlL2VM <- match(rlL2VisBorders$Key,listUsedL2) rlL2Keep <- !is.na(rlL2VM) #cat("rlL2Keep:",rlL2Keep,"\n") rlL2VisBorders <- rlL2VisBorders[rlL2Keep,] #cat("rlL2VisBorders:\n") #print(head(rlL2VisBorders,50)) # Handle L3VisBorders # Turn off overlaying L3 Map.L3Borders <- FALSE # Report status #cat("Adjusted listUsed - area, Regions and listUsedL2:\n") #cat("UsedRegions:",listUsedRegions,"\n") #cat("UsedL2 :",listUsedL2,"\n") #cat("UsedAreas :",listUsedAreas,"\n") #cat("AllAreas :",listAllAreas,"\n") #cat("Num data SAs:", length(listUsedAreas)," Num NT SAs:",length(listAllAreas),"\n") #print("-end-") } # End of regional VisBorder processing sub-dividing. #cat("Overlays - L2:",Map.L2Borders," Reg:",Map.RegBorders," L3:",Map.L3Borders,"\n") #print("Completed regions subsetting of boundary data.") # Can't do much more until after the sortVar is handled. # # #_______________plotNames option__________________ # # Get area names or abbreviations to plot_______________________ #print("Validate plotNames.") # Set the defaults if not present or NA if (is.null(plotNames) || is.na(plotNames) ) { plotNames = "ab" } # areaIDNames are used in the ID glyph as the literal for the area. # Get list of names to use in glyph and the width required. # Default - abbreviations areaUAbbr <- areaNamesAbbrsIDs$Abbr[IndexDFtoNT] areaUFull <- areaNamesAbbrsIDs$Name[IndexDFtoNT] areaIDNames <- areaUAbbr IdW <- Id.width[2] # temp initialization (abbreviation) # Get width of ID gryphics column areaIDNames = switch(plotNames, "ab" = {IdW <- Id.width[2]; areaUAbbr}, # set IdW to value and return vector of names. "full"= {IdW <- Id.width[1]; areaUFull}, { # no match plotNames = "ab" warnCnt() xmsg <- "***01B0 CARG-PN Invalid plotNames argument value. The value must be 'ab' or 'full'. The default of 'ab' will be used." warning(xmsg,call.=FALSE) } ) # areaIDNames are in statsDFrame order containing the ab or full name associated with the row in statsDFrame statsDFrame$IDNames <- as.character(areaIDNames) # set IDNames for sorting and ID into statsDFrame IdColWidth <- max(banner.max["id","width"],IdW) #cat("ID column width to use - IdColWidth:",IdColWidth,"\n") # now complete the default sort. # statsDFrame$IDNames is in the order of the user data. Not the Names/Abbr Table. # areaIDNames is a list of name/abbr literals based on the plotNames specified and # the areaUIndex values. The name or abbreviation values are pulled from the Name Table # incase an alias or alt_abbreivation was used to link the data to boundaries. # #cat("areaIDNames:",areaIDNames,"\n") #_______________title option (mstTitle)______________________ # #print("title validation.") mstTitle <- title rm(title) # checks missing,, is character, length = 1 or 2. if (is.null(mstTitle)) { # set to the default mstTitle <- c("") } if (length(mstTitle) < 1) { mstTitle <- c("") warnCnt() xmsg <- "***01A2 CARG-TL The title parameter is empty. Recommend providing a title for the linked micromap." warning(xmsg,call.=FALSE) } if (length(mstTitle) == 1) { if (is.na(mstTitle)) { # set to the default mstTitle <- c("") } } if (typeof(mstTitle) != "character" || class(mstTitle) != "character") { mstTitle <- as.character(unlist(mstTitle)) warnCnt() xmsg <- paste0("***01A1 CARG-TL The typeof/class of the title parameter is not character. ","Only character vectors are supported. The 'title' argument is ignored.") warning(xmsg,call.=FALSE) } if (length(mstTitle) > 2) { mstTitle <- mstTitle[1:2] warnCnt() xmsg <- paste0("***01A0 CARG-TL The title argument contains more than 2 items. ", "Only the first two will be used.") warning(xmsg,call.=FALSE) } #print("statsDFrame before sort") #print(str(statsDFrame)) #_______________ascend option_____________________ # # default value is ascending. (TRUE) #print("Validate ascend") ordDecr <- FALSE if (!(missing(ascend) || is.null(ascend) || is.na(ascend))) { if (is.logical(ascend)) { ordDecr <- !(unlist(ascend)[[1]]) } else { warnCnt() xmsg <- "***0186 CARG-AS The ascend parameter is not a logical variable. Must be TRUE or FALSE." warning(xmsg,call.=FALSE) } } #_______________sortVar option____________________ # #print("Validate sortVar") # sort and store statsDFrame, areaID, and areaNames____________ # rules for sortVar data columns. # a) list of columns collected from sortVar parameter # b) The numbers in the column are processed to trim blanks and eliminate "," from numbers # c) The numbers in the column are converted to numeric. # d) If the column does not have numbers, it is left as character and only blanks are trimed. # # Set Default sort orders results ord <- order(statsDFrame$IDNames, na.last=TRUE, decreasing=ordDecr) # default is to sort in the sub-area names/abbr rankOrd <- rank(sort(statsDFrame$IDNames),ties.method="min",na.last=TRUE) # ord and rankOrd are re-ordered (sorted) but point to the User data.frame. # sorted order -> data.frame (or areaUIndex) # # data data.frame must be edited by now or sort will not work. # # names table must stay the same from now on. # # process sortVar if (missing(sortVar) || is.null(sortVar) || is.na(sortVar) ) { # if field omitted (null) sort use default values sortVar <- NULL } else { # value/name provides - verify it. litsortVar <- sortVar sortVar <- CheckParmColx(litsortVar,c('SORT','sortVar'),wSFNameList,len_wSFName) # column names and numbers are verified and converted to column numbers. # column 0 represents a no match, can't find. #print("sortVar returned by CheckParmColx") #print(sortVar) wSortVar <- sortVar[sortVar > 0] # keep good column indexes if (length(wSortVar) > 0) { wSv <- lapply(wSortVar, function(x) str_trim(statsDFrame[,x])) # pull one or more rows and trim blanks wSv2 <- lapply(wSv, function(y) as.numeric(gsub(",","",y))) # kill "," and convert to numeric wSv9Test <- lapply(wSv2, function(z) all(is.na(z))) # did data get converted to numeric? # check on the conversion to numeric - if fails, keep as character. wSv3 <- lapply(c(1:length(wSv9Test)), function(a) if(wSv9Test[[a]]) { # TRUE - All entries are NA - most likely a character column wSv[[a]] # return original text version trimmed } else { wSv2[[a]] # return numeric version } ) wSv3$na.last <- TRUE # set na.last = TRUE option wSv3$decreasing <- ordDecr # set sort order ord <- do.call(order,wSv3) rankOrd <- rank(statsDFrame[,sortVar[1]],ties.method="min",na.last=TRUE) } else { # can't use sortVar - set to NULL sortVar <- NULL # no good values - NULL argument as if it was not present. } } #cat("sortVar - ord:",ord,"\n") #print(rankOrd) # #--------------Set up working vectors based on the sort # # ord has the sorted order by ADFrame row numbers for indexing. # # sortedOrd is the order of the statsDFrame data.frame # sortedOrd <- ord # sorted display names (abbr or full) #print("sort completed.") #cat("sortedOrd:",sortedOrd,"\n") # #_______________SORT the data array as requested____________ # ### are assigns needed in our mode? Data area for all calls below... assign("dat",statsDFrame[sortedOrd,]) # data fields "dat" has sorted data frame of the statsDFrame #cat("dim(dat):",dim(dat),"\n") # # From now on, the "dat" structure is the primary data.frame containing the user's data. # IndexDattoNT <- IndexDFtoNT[sortedOrd] # index list from "dat" to Name table #cat("IndexDFtoNT:",IndexDFtoNT,"\n") #cat("IndexDattoNT:",IndexDattoNT,"\n") areaDatIDNames <- areaIDNames[sortedOrd] # IndexDattoNT is in data.frame order pointing to the name table areaDatKey <- areaNTKey[IndexDattoNT] # keys in order of the user data. areaDatAbbr <- areaNTAbbr[IndexDattoNT] areaDatFull <- areaNamesAbbrsIDs$Name[IndexDattoNT] areaDatID <- areaNamesAbbrsIDs$ID[IndexDattoNT] areaDatAlt_Abbr <- areaNamesAbbrsIDs$Alt_Abbr[IndexDattoNT] #cat("dim(dat):",dim(dat),"\n") #cat("length of areaID (areaDatKey): ",length(areaDatKey),"\n") #cat("areaID (areaDatKey) : ",paste0(areaDatKey,collapse=" "),"\n") naADK <- is.na(areaDatKey) #cat("areaDatKey-NA:",naADK,"\n") #cat("length(naADK):",length(naADK)," any(naADK):",any(naADK)," all:",all(naADK)," sum:",sum(naADK),"\n") if (any(naADK)) { cat("bad areaDatKey:\n") print(dat[naADK,]) print("SHOULD not get here.") } #print(dat) #cat("areaDatKey:",areaDatKey,"\n") #cat("row.names(dat):",row.names(dat),"\n") row.names(dat) <- areaDatKey # reset the row.names to the Key xDFrame <- data.frame(Key=areaDatKey, Abbr=areaDatAbbr, Full=areaDatFull, ID=areaDatID, IDNames=areaIDNames, Rank=rankOrd, Index=IndexDattoNT) #cat("xDFrame:\n") #print(xDFrame) # build index from name table to statsDFrame IndexNTtoDat <- rep(NA,length(areaNamesAbbrsIDs$Key)) for (ind in c(1:length(IndexDattoNT))) { IndexNTtoDat[IndexDattoNT[ind]] <- ind } #cat("IndexNTtoDat:",paste0(IndexNTtoDat,collapse=", "),"\n") # IndexNTtoDat is in the name table order pointing to the data.frame. NotUsedList <- is.na(IndexNTtoDat) NotUsedKeys <- areaNTKey[NotUsedList] # get list of unreferred sub-areas. NotUsedNames <- areaNTName[NotUsedList] # get list of names not referenced. #cat("NotUsedKeys>",paste0(NotUsedKeys,collapse=", "),"<\n") #if (any(NotUsedList)) { # better message? # warnCnt() # xmsg <- paste0("***0102 CARG-DF The following sub-area(s) in the name table were not referenced in the user data.") # warning(xmsg,call.=FALSE) # xmsg <- paste0("***0102 CARG-DF >",paste0(NotUsedNames, collapse=", "),"<") # warning(xmsg,call.=FALSE) #} #cat("NotUsedKeys:",paste0(NotUsedKeys,collapse=", "),"\n") #cat("NotUsedList:\n") #print(NotUsedList) #cat("\n") assign("areaDatAbbr" ,areaDatAbbr) # area Abbr "area Abbr" in order of the dat assign("areaDatID" ,areaDatID) # area ID "area ID" in order of the dat assign("areaDatFull" ,areaDatFull) # area Full "area Full" in order of the dat assign("areaDatKey" ,areaDatKey) # area Key "area Key" in order of the dat assign("areaDatAlt_Abbr" ,areaDatAlt_Abbr) # area Alt_Abbr "area Alt_Abbr" in order of the dat assign("areaIDNames" ,areaIDNames[sortedOrd]) # area Display Names "areaNames in order of the dat. assign("NotUsedKeys" ,NotUsedKeys) # area keys that were not referenced in the data. assign("NotUsedList" ,NotUsedList) # T/F list of not used sub-areas. assign("datOrder",sortedOrd) # data order for use with panelData back to statsDFrame # Note: sDFdat is the statsDFrame in sorted order. All areaDatxxx are in the same sorted order. # #print("done with Not Used Key List.") # # Working references on VisBorders # # # axisScale # # Default Call = NULL, Default value = "e" new extended # #cat("axisScale>",axisScale,"<\n") #print("Validating axisScale:") axisMethod = 0 if (!(missing(axisScale) || is.null(axisScale) || is.na(axisScale))) { if (axisScale == "s") { # set up axis to use titled scaling axisMethod <- 2 } if (axisScale =="sn") { # set up axis to use number scaling with suffix. axisMethod <- 3 } if (axisScale == "e") { axisMethod <- 4 } if (axisScale == "o") { # set up axis to use titled scaling axisMethod <- 1 } if (axisMethod == 0) { # if still set, but bad value warnCnt() xmsg <- paste0("***01D0 CARG-SC The axisScale argument set to ",axisScale,", must be set to 'o', 'e', 's', or 'sn'. The default of 'e' will be used.") warning(xmsg,call.=FALSE) axisScale <- "e" # extended algorithm axisMethod <- 4 } } else { # parameter not present or set to NULL/NA axisScale <- "e" # extended algorithm axisMethod <- 4 } if (axisMethod == 0) { warnCnt() xmsg <- "***01D1 CARG-SC The axisScale argument is Missing, NULL or NA. It must be set to 'o', 'e', 's', or 'sn'. The default of 'e' will be used." warning(xmsg,call.=FALSE) axisScale <- "e" # extended algorithm axisMethod <- 4 } #cat("axisScale:",axisScale," axisMethod:",axisMethod,"\n") # # staggerLab # # Default Call = NULL, Default value = FALSE # #print("Validating staggered:") staggered <<- FALSE # start with a lower value. if (!(missing(staggerLab) || is.null(staggerLab) || is.na(staggerLab))) { if (!is.logical(staggerLab)) { staggerLab <- FALSE warnCnt() xmsg <- "***01E0 CARG-SL The staggerLab argument is not a logical value. Setting staggerLab to FALSE." warning(xmsg,call.=FALSE) } } else { # parameter not present or set to NULL/NA staggerLab <- FALSE # default = FALSE - don't stagger axis labels. } #cat("staggerLab:",staggerLab,"\n") #cat("staggered:",staggered,"\n") # ###### ###### # # Now that the row names and any deletions have been done, then # panels can finally be setup. # numRows <- nrow(dat) # ###### #print("done call parameters - on to panelDesc..") ###### #_________________________ Get Panel Default Values ______________________ # use details in memory - now that we have merged them with users. micromapGPanelDefaults <- micromapGSetPanelDef(numRows,rowSizeMaj,rowSizeMin,rowSepGap, 5, grpPattern) #__________________________ Save Panel Defaults to memory # get copy of panel defaults wPanelDet <- micromapGPanelDefaults # copy to micromapST memory space. defNam = names(wPanelDet) for (i in 1:length(wPanelDet)) { assign(defNam[i],wPanelDet[[i]]) } # cGrpRatios <- c(1.333/5.333, 2.333/5.333, 3.333/5.333, 4.333/5.333, 5.333/5.333) # ##### ######### # # Call arguments are checkes - on to panelDesc # # ######### ErrFnd <- FALSE StopFnd <- FALSE # #_________________ Check panel description content and formats _____________ # # # Since the panelDesc is a data.frame, it is a given the number of items in each # variable list is the same number. # # When we move to list of lists, this is no longer true, but we don't care. # # If the objective is the list of list, then we can't do a full scan of each # variable at this stage of the processing. # #______________Check for panelDesc$type validity______________ valid = c("map","mapcum","maptail","mapmedian", "rank","id","arrow","bar", "dot","dotse","dotconf","dotsignif", "ts","tsconf", "scatdot", "segbar","normbar","ctrbar", "boxplot") # idDot and rank are not currently implemented #____________________ List of expected and valid parameters in the panelDesc PDParms <- c('type', 'lab1','lab2','lab3','lab4', 'col1','col2','col3', 'colSize', 'rmin','rmax', 'refVals','refTexts', 'panelData', 'adv' ) # get list of names/options in panelDesc PDUsed <- names(panelDesc) # used by every glyph function to check for parameters PDPmatch <- match(PDUsed,PDParms) # is if all entries in panelDesc are valid if (any(is.na(PDPmatch))) { # one of more panelDesc parameters are bad stopCnt() StopFnd <- TRUE #PDErrorList <- paste0(PDUsed[is.na(PDPmatch)],collapse=" ") xmsg <- paste0("***0113 CARG-PD The following named lists in ",pDName," panelDesc data.frame are not valid: ",paste0(PDUsed[is.na(PDPmatch)],collapse=" ")) warning(xmsg,call.=FALSE) } #___________________the panelDesc parameters (column names) are good _____ # numTopHeaderRows <- 4.25 # start with 1-Titles, 2-lab & 1-X Axis two lines. (have to cover ID and Map headers) numBotHeaderRows <- 1 # bottom 1-X axis lines. # if (axisScale=="s") { # add 1/2 line for reduced size and sub-title on units. numTopHeaderRows <- numTopHeaderRows + 0.5 numBotHeaderRows <- numBotHeaderRows + 0.5 } if (staggerLab) { # if staggerLab is specified (forces) add 0.25. Will know until it too late if is dyn turned on. numTopHeaderRows <- numTopHeaderRows + 0.25 numBotHeaderRows <- numBotHeaderRows + 0.25 } if (length(mstTitle)>1) numTopHeaderRows <- numTopHeaderRows + 1.25 # # May be able to do a better job - later - future enhancement # # #________________type parameter # if (is.na(match('type',PDUsed))) { # Error 'type' parameter is not present stopCnt() StopFnd <- TRUE xmsg <- paste0('***0114 CARG-PD The required "type" named list is missing in the ',pDName,' panelDesc data.frame.') warning(xmsg,call.=FALSE) } # get type vector as characters no factor, etc. type = as.character(panelDesc$type) # test contents of type vector for validity PDTmatch = match(type,valid) if ( any( is.na(PDTmatch) ) ) { PDErrorList <- paste0(type[is.na(PDTmatch)],collapse=" ") StopFnd <- TRUE stopCnt() xmsg <- paste0("***0115 CARG-PD The ",pDName," type named list contains one or more invalid glyph name(s): ",PDErrorList) stop(xmsg, call. = FALSE) } PDMap <- (PDTmatch <= 4) # the first four are maps (TRUE if columns is a Map). xSeq <- seq(1,length(PDMap),by=1) PDMapCol <- xSeq[PDMap] # Get column number of maps #print(paste0("Map columns=",PDMapCol)) # Set up number of glyphs columns numCol <- nrow(panelDesc) # number of glyphs columns numPDRow <- nrow(panelDesc) # number of values in each parameter in panelDesc numPDCol <- ncol(panelDesc) # number of parameters present in panelDesc # #_________________panelDesc$labx____________________ # blank <- rep('',numCol) # empty vector for labels NAList <- rep(NA,numCol) # NA vector oneList <- rep(1,numCol) # numeric vector of all 1s. zeroList <- rep(0,numCol) # a NULL column cannot exist in a data.frame. If the name is present, it exist! # lab1 if (is.na(match('lab1',PDUsed))) { lab1 <- blank } else { lab1 <- as.character(panelDesc$lab1) # convert to character xlna <- is.na(lab1) # find NA values in vector if (any(xlna)) lab1[xlna] <- "" # change NAs to "" } # lab2 if (is.na(match('lab2',PDUsed))) { lab2 <- blank } else { lab2 <- as.character(panelDesc$lab2) # convert to character xlna <- is.na(lab2) # find NA values in vector if (any(xlna)) lab2[xlna] <- "" # change NAs to "" } # lab3 if (is.na(match('lab3',PDUsed))) { lab3 <- blank } else { lab3 <- as.character(panelDesc$lab3) # convert to character xlna <- is.na(lab3) # find NA values in vector if (any(xlna)) lab3[xlna] <- "" # change NAs to "" numBotHeaderRows <- numBotHeaderRows + 1 } # lab4 if (is.na(match('lab4',PDUsed))) { lab4 <- blank } else { lab4 <- as.character(panelDesc$lab4) # convert to character xlna <- is.na(lab4) # find NA values in vector if (any(xlna)) lab4[xlna] <- "" # change NAs to "" } # All labels (1-4) are either text or "" entries. Don't have to check for missing, NULL or NA. #_________Save panelDesc Parameters in to namespace____________ # assign('lab1',lab1) assign('lab2',lab2) assign('lab3',lab3) assign('lab4',lab4) #print(find("lab1")) # print environment # more panelDesc checks and setups after the function definitions. # #_______________________panelDesc$colx_____________________ # # Process - # 1) check entire panelDesc variable vector and convert to numbers "CheckCol" # 2) In glyph check value and get data "CheckPDCol" # 3) check data vector for valid data "CheckNum" # # number of columns based on the presence of Descriptions for Column # col1 if (!is.na(match('col1',PDUsed))) { # col1 is present litcol1 <- as.character(panelDesc$col1) col1 <- CheckColx2(litcol1,"col1",1,panelDesc$type,wSFNameList,len_wSFName) x <- (col1 == 0) #print(x) if (any(x,na.rm=TRUE)) { StopFnd <- TRUE } } else { litcol1 <- NAList col1 <- NAList } #cat("col1:",paste0(col1,collapse=", "),">>",paste0(litcol1,collapse=", "),"\n") # col2 if (!is.na(match('col2',PDUsed))) { # col2 is present litcol2 <- as.character(panelDesc$col2) col2 <- CheckColx2(litcol2,"col2",2,panelDesc$type,wSFNameList,len_wSFName) x <- (col2 == 0) #print(x) if (any(x,na.rm=TRUE)) { StopFnd <- TRUE } } else { litcol2 <- NAList col2 <- NAList } #cat("col2:",paste0(col2,collapse=", "),">>",paste0(litcol2,collapse=", "),"\n") # col3 if(!is.na(match('col3',PDUsed))) { # col3 is present litcol3 <- as.character(panelDesc$col3) col3 <- CheckColx2(litcol3,"col3",3,panelDesc$type,wSFNameList,len_wSFName) x <- (col3 == 0) #print(x) if (any(x,na.rm=TRUE)) { StopFnd <- TRUE } } else { litcol3 <- NAList col3 <- NAList } #cat("col3:",paste0(col3,collapse=", "),">>",paste0(litcol3,collapse=", "),"\n") # #_____________panelDesc$rmin and rmax______________ # if (is.na(match('rmin',PDUsed))) rmin = NAList else rmin = as.numeric(panelDesc$rmin) if (is.na(match('rmax',PDUsed))) rmax = NAList else rmax = as.numeric(panelDesc$rmax) # #_____________panelDesc$refxxx________________ # if (!is.na(match('refVals',PDUsed))) { assign('lRefVals',as.numeric(panelDesc$refVals)) # detail test in glyphs } else { assign('lRefVals',NAList) } # no check if RefVals are numeric. ???? if (!is.na(match('refTexts',PDUsed))) { assign('lRefTexts',str_trim(panelDesc$refTexts)) lRefTexts[lRefTexts == ""] <- NA # convert blanks. numBotHeaderRows <- numBotHeaderRows + 1 } else { assign('lRefTexts',NAList) } # no check if RefTexts are character. ???? # # Make adjustments for color or grays # if (colFull) { # set color values to work variables iRef.Val.col <- Ref.Val.col iRef.Text.col <- Ref.Text.col } else { # set gray values to work variables iRef.Val.col <- Ref.Val.BW.col iRef.Text.col <- Ref.Text.BW.col } #_____________panelDesc$panelData_______________ # # if present is the typeof correct ? - check within the glyph - it may be different. if (is.na(match('panelData',PDUsed))) { wPanelData <- NAList } else { wPanelData <- as.character(panelDesc$panelData) # save pointer to panelD } assign('panelData',wPanelData) rm(wPanelData) # #_________________- # #cat("Check on header row counts - top:",numTopHeaderRows," bot:",numBotHeaderRows,"\n") #cat(" top mar:",numTopHeaderRows * 0.2, " bot mar:",numBotHeaderRows* 0.2,"\n") #cat(" compare to 1.1 and 0.5/0.75\n") #___panelDesc$colSize_________User specificed column width processing and checking # ____________________Column Size layout (initial) # IdW set up in plotNames check numCol = length(type) # get number of columns to support #cat("Building cparm table for run - Number of columns:",numCol,"\n") cparm <- data.frame(cSize=numeric(0),lSep=numeric(0),rSep=numeric(0),rMinH=numeric(0),rMaxH=numeric(0)) # empty data.frame # Build column width table based on the types of columns specified. for (j in 1:numCol) { # Test type of column to be built and call build routine. #cat("top of loop - type=",type[j],"\n") cparm2 = switch(type[j], # colSize, col width, left sep, right sep, row min, row max) "map"= c(max(banner.max["map","width"],Map.Min.width),0,0,Map.MinH, Map.MaxH), "mapcum"= c(max(banner.max["mapcum","width"],Map.Min.width),0,0,Map.MinH, Map.MaxH), "maptail"= c(max(banner.max["maptail","width"],Map.Min.width),0,0,Map.MinH, Map.MaxH), "mapmedian"=c(max(banner.max["mapmed","width"],Map.Min.width),0,0,Map.MinH, Map.MaxH), "id"= c(IdColWidth,0,0,0,0), "dot"= c(0,0,0,0,0), "dotse"= c(0,0,0,0,0), "dotconf"= c(0,0,0,0,0), "dotsignif"=c(0,0,0,0,0), "arrow"= c(0,0,0,0,0), "bar"= c(0,0,0,0,0), "boxplot"= c(0,0,0,0,0), "ts" = c(0,.175,0,0,0), "tsconf" = c(0,.175,0,0,0), "scatdot" = c(0,.175,0,0,0), "segbar" = c(0,0,0,0,0), "normbar" = c(0,0,0,0,0), "ctrbar" = c(0,0,0,0,0), "rank" = c(Rank.width,0,0,0,0), "nomatch" = c(0,0,0,0,0) ) #cat("cparm2:",paste0(cparm2,collapse=", "),"\n") cparm <- rbind(cparm,cparm2) } # now have one row per column in the user panelDesc data.frame. colnames(cparm) <- c("cSize","lSep","rSep","rMinH","rMaxH") #cat("Column Sizing Table completed.\n") # dump table. #print(cparm) #cat("\n") # one row per column. borders = rep(borderSize,4) # set borders widths to 0.5 inches ### Add check of column type to table of miniumal or statics column widths. ### Must have details lists processed to do this. ### Recreate plotX as done in panelLayout # Pick up row height min and max from types used. rowMinH <- max(cparm[,"rMinH"],rowSizeMn) # Largest mininum for all glyphs involved and system minimum size (inches) rowMaxH <- max(cparm[,"rMaxH"],rowSizeMx) # Largest maximum for all glyphs involved #cat("rowMinH:",rowMinH," rowMaxH:",rowMaxH,"\n") # Same formula as panelLayout xPlotWidth = (par("din")[1])-borders[1]-borders[2]-leftMar-rightMar # width in inches - (colSep, borders, left and right margins). #cat("xPlotWidth:", xPlotWidth,"\n") # done IdW = Id.width[1] # bigger value for full names [2] is for abbreviations - default # #_____________panelDesc$colSize____________________ # colWidths <- cparm[,1] # get list of fixed width glyphs that have been requested (column # 1) from cparm. # In this table, a value of zero is NO Fixed Width. colFlex <- !(colWidths > 0) # save list of glyphs that use don't have fixed widths - flexible values ( not maps and id ) colNumID <- c(1:length(colWidths)) colGood <- rep(TRUE,length(colWidths)) #cat("colSize-Start colWidths:",colWidths," len:",length(colWidths)," colFlex:",colFlex,"\n") DoColSize <- FALSE # check for parameter? if (!is.na(match('colSize',PDUsed))) { # colSize is present DoColSize <- TRUE # colSize is present - do proportional space allocation. wColS <- panelDesc$colSize #cat("Processing colSize parameter:",wColS," len:",length(wColS),"\n") if (length(wColS) != length(colWidths)) stop # check for NA's in colSize fields. - Error. Clear to NULL "" wColBad <- is.na(wColS[colFlex]) if (any(wColBad)) { # yes, invalid value by user. wColBadList <-colNumID[colFlex & is.na(wColS)] if (length(wColBad)<=0) stop warnCnt() xmsg <- paste0("***01F1 CARG-CS The 'colSize' parameter in ",pDName," contains NA values in columns: ",paste0(wColBadList,collapse=","),". "," Values must be numeric and > 0.") warning(xmsg,call.=FALSE) colGood[wColBadList] <- FALSE # mark problem column } #cat("1-wColS:",wColS," colGood:",colGood,"\n") # check for invalid fixed width fields in colSize - NA, "", " ", 0 -> OK. Else - Bad and report. # Set to NA all valid fixed width column values in the colSize vector. # NA is valid wColS[!colFlex & wColS == 0 ] <- NA # 0 is valid wColS[!colFlex & str_trim(wColS) == "" ] <- NA # "", " ", etc is valid # What we have left is possible invalid entries. # if any fixed width column is not NA, problem if (any(!is.na(wColS[!colFlex]))) { # fixed width columns have characters or numeric or logical vlaues - OUCH! wColBadList <- wColS[!colFlex & !is.na(wColS)] # get list of bad values. if (length(wColBad)<=0) stop # check on programmer warnCnt() xmsg <- paste0("***01F2 CARG-CS The 'colSize' parameter in ",pDName," has values for fixed width glyphs. Value(s): ",paste0(wColBadList,collapse=","),". ", "Value(s) are ignored and set to NA.") warning(xmsg,call.=FALSE) # at this point the fixed columns are NA or can be set to NA. wColS[!colFlex] <- NA } #cat("2-wColS:",wColS," colFlex:",colFlex," colGood:",colGood,"\n") # Convert to numeric, if NA in colSize fields - eError report and set to NULL or "". # Fixed Width Columns are NA, so we not work on flexible columns that can have values. suppressWarnings(numColS <- as.numeric(wColS)) # make sure it's numeric. # Any flex column that is not a number or can not be converted to number -> NA. # also check for "Inf" values. Will use as marker later. wColFG <- colFlex & colGood wColSize <- numColS[wColFG] wColNum <- colNumID[wColFG] wColBad <- is.na(numColS[wColFG]) if (any(wColBad)) { # have colSize value(s) that is not numeric or are "Inf". wColBadList <- wColSize[wColBad] # get list of bad entries. # invalid colSize entries, not numeric, could be character, logical, etc. warnCnt() xmsg <- paste0("***01F3 CARG-CS The 'colSize' parameter in ",pDName," does not contain numeric values : ",paste0(wColBadList,collapse=","),".") warning(xmsg,call.=FALSE) # wColBadList <- wColNum[wColBad] # get index numbers colGood[wColBadList] <- FALSE } #cat("3-wColS:",wColS," numColS:",numColS," colGood:",colGood,"\n") # colSize check range. wColFG <- colFlex & colGood # only range check good (so far) colSize values. wColSize <- numColS[wColFG] # list of values wColNum <- colNumID[wColFG] # indexes to vector # run the test. wColBad <- ( wColSize <= 0 | wColSize > 200 ) # Only look at remaining good entries. if (any(wColBad)) { # colSize values out of acceptable range. wColBadList <- wColS[wColNum[wColBad]] # get list of bad entries # colSize entries are out of range <= 0 or > 200. warnCnt() xmsg <- paste0("***01F4 CARG-CS The 'colSize' entries in ",pDName," are out of range ( <= 0 or > 200 ). Values: ", paste0(wColBadList,collapse=","), ".") warning(xmsg,call.=FALSE) colGood[wColNum[wColBad]] <- FALSE # set all out of range values as no bad. } #cat("4-wColS:",wColS," numColS:",numColS," colGood:",colGood,"\n") numColS[!colGood] <- 0 # set bad values to zero. #cat("5-wColS:",wColS," numColS:",numColS,"\n") # Fix colSize columsn to Mean - "" columns in colFlex range. # Get sum of valid colSize entries. wColFG <- colFlex & colGood sumFixCol <- sum(colWidths) # sum of fixed widths sumColSize <- sum(numColS[wColFG]) # sum of values in user provided colSize # bad values were set to zero. meanColSize <- mean(numColS[wColFG]) # mean of values #cat("6-sumFix:",sumFixCol," sum colSize:",sumColSize," mean:",meanColSize,"\n") if (sumColSize == 0) { DoColSize <- FALSE } # sum of colSize = zero. if (all(!colGood[colFlex])) { DoColSize <- FALSE } # if all entries are bad - ignore colSize if (DoColSize) { # All flex columns must have a value # replace bad values with mean of good values. repColS <- colFlex & !colGood if (any(repColS)) { # we have come bad values to change to mean. wColBadList <- wColS[repColS] # get list of values being changed. warnCnt() xmsg <- paste0("***01F5 CARG-CS The reviewed 'colSize' parameter in ",pDName," has bad values (see above) and have been replaced by the mean of the good values: ", meanColSize,". Bad Values:", paste0(wColBadList,collapse=",")) warning(xmsg,call.=FALSE) numColS[repColS] <- meanColSize } colSize <- numColS # transfer back to colSize. litColSize <- as.character(numColS) # common starting point - either character or numeric. #cat("final colSize:",colSize,"\n") } else { warnCnt() xmsg <- paste0("***01F6 CARG-CS The 'colSize' parameter in ",pDName," contains no useful information and will be ignored.") warning(xmsg,call.=FALSE) colSize <- NAList } } else { # no parameter specified. colSize <- NAList DoColSize <- FALSE } # # Only keep colSize entires for flexible glyphs # #cat("Finish pre-processing colSize -- DoColSize:",DoColSize," colSize:",paste0(colSize,collapse=", "),"\n") #cat("Starting column width calculations\n") # colWidths has column widths in inches or zero is not set yet. (initially fixed width columsn.) # colFlex has TRUE for columns that are width is undetermined. # colSize edited vector of relative ratio values for each column. # basic column separators (0 on edges, colSepGap for all internal) colSep <- c(0,rep(colSepGap,numCol-1),0) # based on column type, add more space on left or right. (cparm[,2] for left, cparm[,3] for right.) - Y Axis. colSep[1:numCol] <- colSep[1:numCol] + cparm[,2] # add space on left of panel colSep[2:(numCol+1)] <- colSep[2:(numCol+1)] + cparm[,3] # add space on right of panel #cat("colSep:",colSep,"\n") colSepSum <- sum(colSep) # total width used by separators xPlotWidthOrg <- xPlotWidth xPlotWidth <- xPlotWidth - colSepSum # space - subtract separator space = Available X width # available space. usedSpace <- sum(colWidths) # get amount of allocated space. freeSpace <- xPlotWidth - usedSpace # available space #cat("Setup-Space:",xPlotWidthOrg," colSepSum:",colSepSum," Avail:",xPlotWidth," freeSpace:",freeSpace," usedSpace:",usedSpace,"\n") if (DoColSize) { #cat("Doing colSize - colSize:",colSize," colWidths:",colWidths,"\n") if (length(colSize) <= 0) stop # Cycle 1 - calculate and adjust for minimum column widths sumColSize <- sum(colSize,na.rm=TRUE) # sum values wColSizeP <- colSize/sumColSize # get proportion. wColSize <- wColSizeP * freeSpace # calculate allocations. wColMinE <- (wColSize < colSizeMin) # find too small columns. colWidths[wColMinE] <- colSizeMin # set low values to min. (if they exist) colSize[wColMinE] <- 0 # remove low values from colSize calculation. #cat("C1-colSize:",colSize," wColSizeP:",wColSizeP," colWidths:",colWidths,"\n") # Cycle 2 - calculate (again) and adjust for maximum column widths usedSpace <- sum(colWidths) freeSpace <- xPlotWidth - usedSpace #cat("C2-usedSpace:",usedSpace," freeSpace:",freeSpace,"\n") sumColSize <- sum(colSize,na.rm=TRUE) # sum values wColSizeP <- colSize/sumColSize # get proportion. wColSize <- wColSizeP * freeSpace # calculate allocations. wColMaxE <- (wColSize > colSizeMax) #cat("C2-Max test - sumColSize:",sumColSize," wColSizeP:",wColSizeP," wColSize:",wColSize," wColMaxE:",wColMaxE,"\n") if (any(wColMaxE,na.rm=TRUE)) { # only do one more cycle if a value > max is found. colWidths[wColMaxE] <- colSizeMax # set high values to max. colSize[wColMaxE] <- 0 # remove high values from colSize calculation. #cat("C2-Max adj-colSize:",colSize," wColSizeP:",wColSizeP," colWidths:",colWidths,"\n") # Cycle 3 - if max adjustments - do it one more time. usedSpace <- sum(colWidths) freeSpace <- xPlotWidth - usedSpace #cat("C3-usedSpace:",usedSpace," freeSpace:",freeSpace,"\n") # Repeat for final values. sumColSize <- sum(colSize,na.rm=TRUE) # sum values wColSizeP <- colSize/sumColSize # get proportion. wColSize <- wColSizeP * freeSpace # calculate allocations. } # Last step - place the widths in to colWidths # colSize columns hitting the minimum and maximum values have already been set in colWidths vector. # last calculation setup wColSize with the last columns. wColValFlag <- (wColSize > 0 ) # list of values to merge into colWidths wColValFlag[is.na(wColValFlag)] <- FALSE # NA are fixed columns, so make FALSE (no update) colWidths[wColValFlag] <- wColSize[wColValFlag] # put values into wColWidths } else { # no colSize - do old way - equal subdivide. zeroCol <- !(colWidths > 0) # TRUE for any column with no width assigned. numberCol <- sum(zeroCol,na.rm=TRUE) # get number of TRUEs = number of columns that need widths. (sum 1s) equalCol <- freeSpace / numberCol # get width of each column. #cat("Initial equalCol:",equalCol," FreeSpace:",freeSpace,"\n") if (equalCol > colSizeMax) { equalCol <- colSizeMax } if (equalCol < colSizeMin) { warnCnt() ErrFnd <- TRUE xmsg <- paste0("***0420 PANEL Calculated column widths is less than minimum ",colSizeMin," inches - too many columns specified.") warning(xmsg,call.=FALSE) if (equalCol < colSizeMin/2) { stopCnt() StopFnd <- TRUE xmsg <- paste0("***0421 PANEL Column width is too small to be useful, Package stopped.") stop(xmsg,call.=FALSE) } } colWidths[zeroCol] <- equalCol } #cat("Final-colWidths:",colWidths,"\n") # savedColWidths <- colWidths # save a copy of the column size parameters. savedColSep <- colSep # save a copy legfactor <- 1 # add space if reference values provided. # JP-2010/07/23 0 change to refVals to be consistent. #cat("numTopHeaderRows:",numTopHeaderRows," numBotHeaderRows:",numBotHeaderRows,"\n") if(!is.null(panelDesc$refVals)){ # if field present. if(any(!is.na(panelDesc$refVals))){ # if value provided, provide room in the bottom margin for the reference test. botMar <- botMarLegend # revisit calculation below to be more precise legfactor <- 9/(9-botMardif) # ???? #### Check on the use and need for "legfactor" in older code. } } #cat("botMar:",botMar,"\n") #assign('legfactor',legfactor) ######## # # Check for warnings or stops that should terminate the package/function # if (StopFnd) { xmsg <- "***01Z9 CARG Errors have been found in parameters and data. Please review program log and fix problems. Packaged stopped." stop(xmsg, call.=FALSE) } if (ErrFnd) { warnCnt() xmsg <- "***01Z8 CARG Warnings have been found in the parameters and data. Package continues, but results are unpredictable. Review log and fix errors." warning(xmsg, call.=FALSE) } ######## # # # Process and calculate column spacing and offsets. # # # should not need to set this again. numCol <- length(type) ##### # # We build three panel layouts: # 1) layout of all glyphs panels (ngroups by ncols) # 2) layout of general blocks of glyphs (top, median, bottom groups) (3 or 2 by ncols) # 3) layout of page blocks (top, median, bottom groups) but only 1 column (3 o4 2 by 1) # ##### # # USStatesBG set up - 50 or 51 rows -> 10 or 11 groups - median - single element # # USSeerBG set up - 9 to 20 rows -> 3(of 3,3,3) to 4(of 5,5,5,5) groups # # KansasBG set up - 105 rows -> 21 groups - median - 5 rows/group (11 groups) # # NewYorkBG set up - 62 rows -> 13 groups (5..5,4,4,4,5...5) # # MarylandBG set up - 24 rows (counties + 1 city) -> 5 groups (5,5,4,5,5) # # UtahBG set up - 29 rows -> (5.5.4.1.4.5.5) -> 7 groups # # ChinaBG set up - 34 rows -> (5,5,5, <1,2,3,4> ,5,5,5) -> 7 groups # # UKIrelandBG set up - 218 rows -> (5,5,5,...,4,4,...,5,5,5) # # SeoulKoreaBG set up - 25 rows (districts) -> (5,5,5,5,5) -> 5 groups # # AfricaBG set up - 52 rows (countries) -> (5,5,5,5,5,2,5,5,5,5,5) -> 11 groups # # #printPanelsParms() # build layout for glyphs panels (numGrps x ncol) (Individual) #cat("panelLayout - panels\n") assign("panels",panelLayout( vnrow = numGrps, # num of Row/Groups vncol = numCol, # num of columns topMargin = topMar, # 0.95 bottomMargin= botMar, # 0.5 leftMargin = 0, rightMargin = 0, rowSep = rowSep, # vector rowSize = rowSize, # vector colSize = colWidths, # calculated column widths (inches) colSep = colSep, # vector rSizeMx = rowMaxH, rSizeMn = rowMinH, rSizeMaj = rowSizeMaj, # 7 rows per group/row rMapCol = PDMapCol, disErr = FALSE, rDebug = MST.Debug) ) # c(.1,.1,.1) for 3 # Done above by "micromapSetPanelDef" #grounpedRowSize = details[["groupedRowSize"]] # c(35,1.65,35) -> USStatesBG (51) # c(7,7,7) or c(7,7,7,7) -> USSeerBG (9 -- 20) # c(70,7,70) -> KansasBG (105) # c(42,7,42) -> NewYorkBG (62) #groupedRowSep = details[["groupedRowSep"]] # c(0,0.1,0.1,0) or c(0,0.1,0) #print("panels;") #print(panels) #cat("medGrp:",medGrp,"\n") # Major panel group title-top, panels, title-bottom by columns (overlays panels) # section of panels (top(25), median(1), bottom(25) and "N" columns wide. ### generalize settings - main panels (middle level) (3 rows - "N" cols) ### rows= title, glypics, footnotes cols=one for each glyph panelBlocks <- 2 # Number of blocks for an even number of group/Rows if (medGrp > 0) { panelBlocks <- 3 # Number of blocks for an odd number of group/Rows } #printPanelParms("panelGroup") #cat("panelLayout - panelGroup\n") # build layout for top, median(if present) and bottom cover panels (3 or 2 x numCol) assign("panelGroup",panelLayout( vnrow = panelBlocks, # 2 or 3 vncol = numCol, # numCols topMargin = topMar, bottomMargin = botMar, leftMargin = 0, rightMargin = 0, rowSize = groupedRowSize, rowSep = groupedRowSep, colSize = colWidths, colSep = colSep, rSizeMx = rowMaxH, rSizeMn = rowMinH, rSizeMaj = rowSizeMaj, rMapCol = PDMapCol, disErr = TRUE, rDebug = MST.Debug) ) #print("panelGroup:") #print(panelGroup) #cat("panelLayout - panelOne\n") # build layout for page (3 or 2 x 1) assign("panelOne",panelLayout( vnrow = panelBlocks, # 2 or 3 vncol = 1, # 1 topMargin = topMar, bottomMargin = botMar, leftMargin = 0, rightMargin = 0, rowSize = groupedRowSize, rowSep = groupedRowSep, rSizeMx = rowMaxH, rSizeMn = rowMinH, rSizeMaj = rowSizeMaj, rMapCol = PDMapCol, disErr = TRUE, rDebug = MST.Debug) ) #print("panelOne:") #print(panelOne) # # Variables that span glyphs # #staggered <- FALSE # Flag to indicate where the current column should start staggering numbers # FALSE = first label on line 1, TRUE = first label on line 2. # This value is set when staggered labels are proceed based on if the last value # in the atRx1 is greater thatn atRx2 = TRUE then value is TRUE. ##### # ____________________Main loop______________________________ # # Future of main loop. # This will change to do: Setup, Page 1-Page Header, Glyph "n1" to "n2", and then the next page. # ##### #cat("Main Loop\n") # Build images of each column for (j in 1:numCol) { #cat("Doing Type:",type[j],"\n") # Test type of column to be built and call build routine. switch(type[j], "map"= rlAreaMap(j), "mapcum"= rlAreaMapCum(j), "maptail"= rlAreaMapTail(j), "mapmedian"=rlAreaMapMedian(j), "id"= rlAreaID(j), "dot"= rlAreaDot(j, dSignif=FALSE), "dotse"= rlAreaDotSe(j), "dotconf"= rlAreaDotConf(j), "dotsignif"=rlAreaDot(j, dSignif=TRUE), "arrow"= rlAreaArrow(j), "bar"= rlAreaBar(j), "boxplot"= rlAreaBoxplot(j, as.character(panelDesc$panelData[j]) ), "ts" = rlAreaTSConf(j, as.character(panelDesc$panelData[j]), conf=FALSE), "tsconf" = rlAreaTSConf(j, as.character(panelDesc$panelData[j]), conf=TRUE), "scatdot" = rlAreaScatDot(j), "segbar" = rlAreaSegBar(j), "normbar" = rlAreaSegBar(j, SBnorm=TRUE), "ctrbar" = rlAreaCtrBar(j), "rank" = rlAreaRank(j), "nomatch" ) #cat("End of glyphs Call - lastSpace Lab2:",lastLab2Space," Lab3:", lastLab3Space,"\n") } # All columns are built and sitting in the panel. ##### # # Fill in the top Page Titles # #cat("panelSelect - panelOne - margin='top'\n") panelSelect(panelOne,margin="top") # full page top label area. x <- panelScale() if (length(mstTitle)==1){ text(.5,.77,mstTitle,cex=Title.cex) } else { # only use the first two title character strings text(0.5, 0.9, mstTitle[1],cex=Title.cex) text(0.5, 0.65,mstTitle[2],cex=Title.cex) } # ##### ##### # # Time to report on the warnings and errors # message("End of micromapST processing.\n\n") warnNum <- get("i",envir=environment(warnCnt)) # get warnings counter if (warnNum > 0) { message(paste0(warnNum," warnings messages were logged. Please review the run log and resolve any issues.")) } else { message("No warnings were logged.") } stopNum <- get("i",envir=environment(stopCnt)) # get stop message counter if (stopNum > 0) { message(paste0(stopNum," Stop messages were logged. Please resolve issues and rerun.")) } else { message("No stop messages were logged.") } if (( warnNum + stopNum ) > 0) { message("If warnings and error messages did not appear on your R console, please execute 'warnings()' to list them.\n") } message(" ") # change the following to call end of run report. - set at start so R stops will be caught. # ##### #x <- Sys.setlocale('LC_ALL',Saved_Locale) on.exit(print("micromapST Ends")) } # end of micromapST Function ### End of micromapST #### # # .onLoad function - executed when the package is loaded initially. # builds a non-changeable micromapGDefault data.frame for use # as the default when colors and/or details are not specified. # # Added by JP - Oct, 2012 - Setup permanent micromapGDefault data.frame for # use as the default values on the call. # # No longer required. # #### #.onLoad = function (libraryName, pkgName) # # { # #packageStartupMessage(".onLoad") # #packageStartupMessage(libraryName) # #packageStartupMessage(pkgName) # # generate default data.frame for micromapST. # #rlmicromapGDefaults <- micromapGSetDefaults() # #micromapGDefaults <<- rlmicromapGDefaults # # } # # #### # # End of load and variable initialization # #### ###### #### ADD CHECK to make sure values are numeric when required. (content of columns.) #### Done for Arrow, the Dot set, Bar, SegBar/NormBar, CtrBar #### Not yet for BoxPlot and TS. ######
/R/micromapST.r
no_license
jianruchen33/micromapST
R
false
false
537,714
r
# # Updated Package Version 120828 # Updated Package Version 130426 # Updated Package Version 130506 - V0.94 # Updated Package Version 130510 - V0.95 - fixes. # Updated Package Version 130511 - V0.96 - attempt to complete - # Updated Package Version 130511 - V0.97 (8:00pm) - fixes # Updated Package Version 130513 - V0.98 (8:00am) - fixes and testing # Updated Package Version 130517 - V0.99 - fixes and work with BW. # - correct ref line color and minor updates. # - corrected micromapSTDefaults and Arrows errors. # - label adjustment and fix parameter checking for boxplots # Updated Package Version 130604 - V1.0.0 - Final Edit and fixes for release. # - Dynamically defined variables must be globalVariables add. # - Formal Release of package. # Updated Package Version 131127 - V1.0.1 - Correct segmented and centered bars to handle only two data columns # Updated Package Version 140104 - V1.0.2 - Add diagonal line in scatter plot with equal x and y values. # - Update NormSeg, Seg, Centered Seg to use variable width bars. # - Changed method of providing colors and details parameters. # - Correct median dot in scatter plots # - Add logic to allow numeric (integer) or column names in col1, col2, col3 # - Correct logic to handle multiple columns in sortVar. # Updated Package Version 140307 - V1.0.3 - Add Rank Glyph # - Remove limit on number of time series elements. # - Plot the median time series data in the panels above and below # the median row. # - Adjusted defaults on stacked bar graphs # Updated Package Version 140712 - V1.0.4 - Correct usage of single and double quote marks in examples. # Updated Package Version 141107 - V1.0.5 - Parameter checking of the panelDesc arguments is incorrect. # Logic is rewritten and migrated into this package. # # Updated Pagkage micromapSEER - 141023 - V0.90 - Modified package to meet NCI Seer's requirements. # Rewrote map... logic to handle different number of rows # per panel, scaled glyphs to be the same size in # panels of 1, 2, 3, 4 or 5 areas. # Modified SegBar, NormBar, Bar, and BoxPlot glyphs # to handle different number of areas per panel and # present the same sized glyph # Modified logic to accept a SEER border and area dataset or # the full US States area dataset. # Fixed logic in mapcum, mapmedian and maptail to correctly # draw a square rectangle in the title, independent on the # number of columns or rows of panels. # Fixed ID glyph to dynamic determine width of column # based on the abbreviated or fullname text in the SEER # or US state datasets. Corrected code to properly draw # the same sized square box and align with text for all # ID glyph lines. # Added logic to force min. and max column widths. # Added logic to force min. and max panel row height. # Correct distance from axis labels to tics to be the same # on the top and bottom axis labels. # Initially setup tables to provide uniform distribution # of areas across panels. This caused to many 3 and 4 # area panels. Re-did the setup tables to minimize the # number of panels and use 4 and 5 areas per panel when # ever possible. # Correct datasets to contain all UPPER case abbreviations # and properly capitalized Full Names. # Internal to program all matching is done using UPPER # case strings. # Added logic to include a "like" string for each SEER area # to allow matching with SEERStat generated datasets. # Since data.frames are mostly constructed with factors, # the user may pass us a statsDFrame containing factors # instead of numeric values. Code added to check for # numerics in the statistical data provided, convert # from character is required, and convert from factors # if required. # User data may have extra line at the end of the data, added # option to delete last line. if not a match. # Fixed validating the data in the statsDFrame columns in # each glyhpic. # Fixed logic handling odd number of groups with the # middle group having > 1 areas. # Added logic to detect empty data columns in the statsDFrame. # character contain had to be checked if it can be converted to # numeric. # Corrected logic to handle multiple border groups. Default for # Seer is "USSeerBG". However, not providing the argument # set no values or the wrong value in BordGrpName. # The aspect of the US maps was off. Corrected the maximum # height value from 0.6 to 0.8 inches. # Changed the name of the main module to micromapPLUS. Add two front-end # functions - micromapST and micromapSEER to provide a dual interface for # existing users. # Separated micromapGSetDefaults and micromapGSetPanelDef functions into # a separate ".r" to share with micromapSEER. # # - 150112 Updates: # Corrected problem with printing US "DC", "HI", "AK" labels on non-US # maps. Used the areaParms$areaUSData to control this feature. This # bordGrp parameter should only be set to "TRUE" when the full US map # and states are used in the bordGrp. # Changed the deleteLast option to ignoreNoMatch options and # redid the code to do this function and generate the information # and error messages. # Changed module name back to micromapST. # Changed version number to 1.1.0 to note major revision. # # - 150312 Updates: # Change USStatesBG user data.frame check from: must be 51 rows to # must be 51 or less rows. Allow data with subsets of states. # - 150713 Updates: # Update structure of areaParms table in border groups # Add several more border groups to the package: UK-Ireland, Seoul, Utah # Add staggered x-Axis labels to keep ends from overlapping. # Add feature to allow user to specify x-Axis for glyph # Update glyphs to formally handle NA values in data. # Update X-Axis to include labels if grid is drawn (???) # Update map code to enforce minimum width to make sure # space is wide enough for all titles and labels. # Add "Alt_Abbr" option for rowNames # Update code to use "LINK" or make sure "Abbr" works. # Changed Border Group .rda file name from ????DF.rda to ????BG.rda. # Added MapLabel field to areaNamesAbbrsIDs tables - to be used to generalize # the over printing of sub area names on first map - AK, HI, DC like. # - 150715 Updates: # Changing name table structure to have "full", "ab", "alt_ab", "id" # pointing to "key" rather than abbr. This is to handle any cases # down the road that don't have abbr, full or ID. If the # column is not present, the option will not be available. # - 160807 Updates: # Fix position of first title above the columns. Too close to # axis by about 1/2 a line. # - 160812 Updates: # add individual DOT symbol control to dotconf and dotSE. # updated detailsVariables to reflect the new details options # and future conversions. Check code and added all missing # variables. # - 160816 Updates: # modified labels code to use odd number of labels and a minimum of 3. # - 161120 Updates: # added regional IDs and Names to the name table. # Added better overlay print control for L2, Reg, and L3. # Corrected X Axis label logic - removed duplications and # parameter resetting. # - 161206 Updates: # Changed NAMESPACE, DESCRIPTION files to meet new # CRAN requirements. # Modified code to not directly use assigns for # variables in the .GlobalEnv space. # Modified all data() function calls to load # the data into the current environment. # # # discussion points: not all border groups have abbreviations or IDs. Names yes, but need to # handle the value inputed by the user and link data to boundaries. May be needed # to build "internal" link and have all else point to it? (if present.) # # Update Log and change details by Jim Pearson # May 31, 2009 - corrected dates on three column micromap # 1990-2000 to 2001-5 --> 1996-2000 to 2001-5 # June 7, 2009 - Added VerStr as a parameter to be able to determine # which output files are from this version. # - Updated book Micromap-Dot-Arrow-Box plot to use new # data files: # WFAgeAdjLungMort2000-4CountyAgeAdj2000.csv # WFLungMort19951999AgeAdj2000State.csv # WFLungMort20002004AgeAdj2000State.csv # and change the titles for the columns in the output to match. # - Updated sections to use labels instead of column numbers. # - Updated Book micromap to merge two files instead of using # one file. This also changed the column number by +1. # Note: future update should look at using column names instead of # numbers. # - Updated ARROW chart to plot DOT when difference is zero. # - Reduce white space between columns (just a little, cannot be eliminate to # maintain readibility. # July 22, 2010 - Correct reference value (refVals) code. # - add variable for reference value label text (refTexts) per column. # panelDesc$refTexts -> vector, one per column. # - add variable to color the reference value label test # details$Ref.Text.col # - No reference label (legend) is printed if no refTexts for the # column is provided. # January 30, 2011 - Determine running directory and load # panelFunctions.r, panelLayout.Rdata, and micromapST.Rdata # from directory. # August 28, 2012 - Cleaned up code and re-packaged it with .onLoad # - duplicate variable cleaned up, and unused code removed. # - integrated the test/demo code correctly. # - made adjustments to handle the micromapST namespace. # - changed refVals and refTexts to local variables (lRefVals and lRefTexts) to add clarity. # - changed parameter for BoxPlots colMedian to BoxP.Median.col to kill duplication with the colMedian # used on the general graphic # - Modified "Details" and "Colors" variable to be unique and # re-ordered by subroutine usage. # October 5, 2012 - update documentation for review. # - deleted second version of panelGroupOutline- in panelFunctions.r # - Changed rlAreaRefText function to build a legend with a line followed by # the reference text. Problem was line was on both sides of the label and # in some cases overlaid the text. This way the line is on the left of the text. # - changed default value for reference text from black to mid green to match the line # color. # April 26, 2013 - add new panel graphic function - TS and TSConf # - added Time Series where each state has a strip within the panel for the line graph. # - changed boxPlot argument to panelData to represent more types of auxilary data for the program. # May 1-2, 2013 - add new panel graphic functions - ScatDot, StackedBar, and Normalized Bar # - add graduated colors to stacked bars and normalized stacked bars. # - changed normalized axis labels to percentages. # - add Time Series with all plots in one panels (one x-y graph) # - change TS confidence band to lighter shade = 10% transparency. # - attempted to fix order issues. On TS series of panels, assume order of the panelData is the # same as the original col1, col2, col3, stateId orders. When they are re-ordered, Save the # index change to remap back to the old order. Use this to re-order panelData. # - On scatdot and segbar panels, the panelData contains a stateId. Reordering is # done by using the sorted stateId column in the statsDFrame to re-order the panelData frames. # - added programing feature to permit adjustments to colsize, left and right margins of a # panel based on the type of panel to be created. Needed to allow space for the # left axis labels for the time series panels (4). # May 4, 2013 - remove prototype strip time series - did not work, code deleted. # - Added centered stacked bars. # - changed circle size on Scatdot of non-colored dots to 75 smaller. # - Changed source of data for "scatdot", "segbar", "normbar", and "ctrbar" from # an extra panelData structure to using columns in the statsDFrame call parameters data.frame. # Now the col1 and col2 parameters in the panelDesc data.frame indicate which columns or # range of columns in the startFrame data.frame to use for the X,Y coordinates or the # set of bar segment values per state. # May 6, 2013 - change package name from stateMicromap to micromapST. # - updated documentation and added new examples to micromapST.Rd # May 8, 2013 - Fixes - change colData to panelData to avoid confusion. # - Add parameter value checks to Arrow, Bar, dot, dotSE, dotconf, TS, ScatDot, segbar, normbar, and ctrbar functions. # - fix examples # May 9, 2013 - switch the TS Array to be 1=x, 2=y, 3=low-y, 4=high-y. # May 10, 2013 - add support for rownames on the time series arrays. # - added validation of state ids in boxplots and time series. # - added new time series dataset to package. # - added panelInBound to generating x and y axis labels. # May 11, 2013 - reduced Y axis labels size to get more detail # - replaced wflung00cnty data file. # - created segbar data file. # - fixed problem with saving new time series file - needed names on all dimensions. # - fixed problem with at and labels argments on mtext calls. # - saved original tests in init/tests directory and replace them # in the micromapST.Rd with the master 6 examples. # - cleaned up examples. # - added code to try and ensure the min and max values on the y axis # are always printed for the median area (middle). # - add code to do Dan's color mixing to get opaque colors in bars. # May 17, 2013 - make adjustment for publishing package # - adjust grey colors to allow a grey scale color pattern to be used. (based on # ColorBrewer "Greys" for 5 colors. # - fixed grey/gray colors issues with dots, etc. using outline colors. # - added circles around dots to make the grey standout more. # May 20, 2013 - Added "grays" as an equivalent palette name. # May 21, 2013 - Fix ref line color to mid-green, change reftext to black. # - check fill color for scat dot, fixed. # - changed scat dot median symbol from triangle to dot and filled with blakc. # - adjusted box positions on maptail, mapcum, and mapmedian titles. # - fixed grays to work with ref lines. # May 24, 2013 - finish clean up - fix micromapSTDefaults error during loading. # - Final Testing. # May 25, 2013 - fixed micromapSTDefaults error on initial load # - fixed arror warning by using > .005 as zero. # - moved up titles printing to let INTERRUPTED pdf build have titles. # May 28, 2013 - fix parameter checking for boxplot list. # - Added names check for box plot, # - Added "list" type check for box plot. # - Reorganized test to not cause a secondary error. # - Added Id.Text.adj parameter to details and rlAreaID to adjust text alignment. # June 2, 2013 - fix DotSE missing X1 variable - should be x. # - Added code to do proper capitalization of state abbreviations and full state names. # - Added code to intercept common names for Washington, D. C. and convert to "D.C." # June 3, 2013 - Released to CRAN. # June 4, 2013 - cran check does not handle automatic variable assignments (around line 3100.) # register them with R via globalVariable function to add them to the list for rcmd check. # During testing, the variables do not show up as globals and are protected within the # micromapST namespace. - re-released. # Nov. 27, 2013 - Correct the parameter check for segmented and centered bars to permit a # minimum of 2 data columns. # Jan 4-9, 2014 - The diagonal line added to the scatter plots must reflect equal x and y values. # Current line is diagonal to the box not the data. # - Add option to vary the segment bar width from small to larger from left to right for # the NormSeg, SegBar, and Centered SegBar glyphs. # - Changed method of setting up details variables within the micromapST namespace. # Originally, user had to provide a complete list of all of the details variables. If # one was missing or misspelled, no detection or correction. New method, starts by # assigning all of the variables from the default values. Then takes the provided details # list from the user and merges it into the already declared variables. If a variable # does not exist or is misspelled, it is caught by checking against the default list of names # and not processed. In the future, a similar structure will be used to check the # ranges or types of information to validate the user provided details variable values. # - Correct median dot in scatter dot plots to only appear in the 4 and 6 rows (just either side # of the median row. # - Update logic in sortVar option to correctly handle multiple sort columns. # - Add ability to reference data.frame columns by name in the col1, col2, col3 and sortVar # parameters. # - Enhanced parameter verification and error checking to help user understand the specific # problem and correct it fast. Don't allow R to abort if possible. # March 7, 2014 - Removed limit on the number of points in Time Series # - Add code for Rank glyph # - The time series line and confidence band are squeezed in the median row space and do not # properly show the data. The median time series data is plotted in the panel above and below # to median row to properly present the data using the same aspect ratio as the other data. # - Adjusted the defaults for the segbar, ctrbar, and normbar graphics to have no center dot # and fixed bar height. # July 12, 2014 - Corrected single and double quote marks usage in examples. # November 7, 2014 - Rewrote panelDesc argument checking. # November 1-7, 2014 - Updated logic to handle the number of areas dynamically and support # US States or US Seer areas and data. # - Added logic to handle the specification of the link row names as a column of the # area Data.frame columns instead of requiring the link to be the Abbr of the area # as the row.names of the statsDFrame data.frame. # April, 2015 - generalize package for NCI and CRAN release. Add additional border groups. # Work on the scaling issues for the larger maps and number of rows and columns. # July, 2015 - Updated code to handle new border group structures # - Add "***" to the beginning of each error message and restructuring the message with a new # message id, and to include name of the glyphs and the panel column number. # - Found error in multiple column sort feature. Rewrote code to handle. # - Found rank functions and code can not handle multiple columns. Implemented # rank code to only handle 1 column. But on new feature list. # - Updated code to work with Abbr, Alt_Abbr, Full Names, ID or Alias and # map them to the border Vis Files key value. This was done to handle # cases where the user border group may not have an abbreviation to use as # the link. If at least one exist, then it can be linked to the key. # - Updated code to correctly calculate the width of the mapxxx and id glyphs # columns using the basic font and text.cex sizes. Must update when scaling is # implemented. # - Modified the colors table to include a color for a 6th row in each group/rows # and two more shading colors for mapmedian and maptail to compensate for issues # when there is a median group/row with more than 1 row. # - Modified all glyphs to handle situations when an NA is present in the user data. # The general rule is not, all of the data or no plot. Ploting anything would # possibly lead to an incorrect reading by the user. # August 2, 2015 - Rewrote the mapping routine to properly handle holes when filling # the polygons in the right order and to draw the borders in the order of # Not Used polygons, background polygons, highlighted polygons, and active polygons. # This code also supported NotUsed sub-areas (color very light grey) and two color # highlights of sub-areas above and below the median when a map is used for the median # group row. # - Fixed problem with title parameter checking to handle 1 or 2 values in the vector. # - Tested 8.5x14 and 11x17 page sizes for Kansas, New York and UKIreland. UKIreland is # still very small but works. Noticed line weight need to be adjusted when images are # small. # - added two colors for the median map to show above and below clear. This is important # then the area has median group row with more than one row. The above and below # are shown on the same map, so must be distinquished. # - corrected the calculations and implementation of the number of Groups, number of rows # per group, number of median group, number of median rows to handle no median group (even # number of groups), a median group with 1 row, and a median group with > 1 row. Adjusted # the code in all glyphs to handle the new implementation. # - implemented MapPolySetup function to do common setup and calculations for all Map functions. # - added check to warn user if there are more data rows, then sub-areas in the border group. # There are move checks later to identify the extra data.frame rows to the user. # - remove any check, stop or warning if the number of data rows are less than then number # of sub-areas in the border group. # - Changed the selecting the number of rows per group pattern from a very large table to a # a calculation with a table override when needed. User is also allowed to specify a # pattern to override micromapST's calculation. # - changed titles on Mapmedian map from "Featured above/below" to "above median" and # "below median". Mapcum map from "Featured above/below" to "above/below panel rows" # Still thinking about the maptail titles. # - Implemented function to center box and text for column headers. # August 4, 2015 - Updated logic for x-Axis labeling. # August 8, 2015 - Fixed/Add ability to specify symbol for the ID glyphs (half implemented, now working.) # - Added details option "Map.Median.text" to allow the Median for Sorted Panels text to be changed. # - Added below column label ("lab3") to the map and id columns. # - Added the ability to change the areaParms variables via the details=list() feature. # - Corrected and re-implemented Id.Dot.pch feature for the ID glyph # August 16, 2015 - Corrected the reference text and line drawing logic - rewrote. Line can now be what's left # up to 1/2 inch in length. Text and line centered in column. # - Added options to specify type of scaling for the axis. Original = pretty function limited by # range of data. Scaled range = subtitle used to identify units and values scaled by the units. # Scaled number = each number in axis scaled, adjusted, and label with suffix to scaling (e.g., M, B, etc.) # Scaling below 1 is also done and properly identified using the International Standards of Units (SI) # symbols. # - Added option to stagger the axis labels on the X axis to prevent overlaying. # August 20, 2015 - changed default labeling algorithm from rpretty to wilkinson. ("o" to "w") # - Implement test X axis labeling and column titling function (DrawXAxisAndTitles) in # all glyphs. # - Reduced size of ID symbols by 20% - looks better. # - Added ConvLinesToUnits function to help convert line coordinates to Unit coordinates and # handle the offset if the bottom left corner is not 0,0. # - Fixed the refText and line problem to place the line in the middle of the text. # September 14, 2015 # - Add additional panelDesc column "adv" to support "new" parameters on a glyph column # basis. Column is a list of lists. The lists in the column is contains # new and old options/parameters. panelDesc column name is "adv". # Any of the old panelDesc columns can have values in the adv list. # - (FUTURE) add ability to detect if panelDesc is the original data.frame, # or the new list of list format. # - Cleaned up warning messages by adding "call.=FALSE" option to remove # calling routine information from warning. # - Started adding validation code for user provided details and colors. This # will later be applied to the glyph parameters set by the user. # September 19, 2015 # - constructed table of details Variables and properties to be used in verifying # the details variables (from system or user). The table also contains information # to permit translation of existing details variables into glyphs based variables. # January 20, 2016 # - Added ability to save list of called variable names for warnings and error messages. # saved the values in list in details. # - Added and tested "regions" call argument to allow only regions (l2) to be mapped # if no data in other regions. # - Added code to capture call variable names (not values) for use in warning messages. # - Added check for rowNames = NULL # February 20, 2016 # - Updated warning message identifiers and documentation to match. # - Corrected statsDFrame column checking routines to handle character numbers and # provided the correct warning messages. # - Add CheckParmColx function to properly handle checking statsDFrame column names # and numbers for rowNamesCol and sortVar call arguments. # February 29, 2016 # - Changed wilkinson labeling algorithm to extended. The option is also changed from "w" to "e". # The wilkinson algorithm generated to many grid lines and labels vs. what was # requested. # May 5, 2016 # - changed alias code to edit user strings to delete special characters, blanks (multiple, trailing and # leading), control characters, punctuation characters, and convert the string to all upper case. # Seer Stat has changed the default registry names to include "_"s instead of " " character between # the words. The extra editing neutralizes the impact. Function CleanString was added to handle lists # of registry names. # August 7, 2016 # - first line of column titles too close to plot area by about 1/2 a line. Found calculation off. # Re-implemented using table of line heights and intra line spacing requirements. # August 8, 2016 # - Started reimplementation of colSize call parameter in the panelDesc data.frame. Document feature, # Added code to validate parameter. Implemented code in panelLayout function. # August 10, 2016 # - Changed the min and max column sizes to 0.25 to 2.5 inches. # - Changed the calculation for the user coordinates width of a panel to include 1/2 the "usr" width of a # a character instead of a fixed amount to ensure the symbol for a dot or arrow head fits within the panel. # - Glyphs that don't use dots or symbols that occupy space around the low and high data # points were offset/padded resulting in the graphics incorrectly floating inside the graph. # Example: bar graphs that not start at left or right edge of graph. time series graphs # don't tough sides of the graph. All of these issues have been corrected to only pad # (expand graph X range) when required - dot, dotconf, dotsignif, dotse, arrow, scatdot. # Any graph that is anchored to the left or right edge is not padded - bar, segbar, normbar. # Changes made in DrawXAxisandTitles function using generalize call perameters. # August 12, 2016 # - Fixed reversed glyph header titles Lab1 and Lab2 problem. # August 13-16, 2016 # - Cleaned up the colSize implementation and added validate checks and warning messages. # Values of NA, "", and " " are acceptable in 'mapxxx', and 'id' columns. Cannot set colSize # for these columns. Other columns must have a numerical value from 0.01 to 200 to use # as the width proportion. Algorithm is each column gets "N1"/sum(all "Ns") percentage of the # available space is allocated to each column. If a column is below the minimum width, # it is set to the minimum. The calculation is then repeated minus the minimum width columns. # The column widths are then compared to the maximum width allows. Any columns over the # maximum are reduced to the maximum width. The algorithm is run one more time minus the # columns set to the minimum or maximum values. # - During the testing of the colSize feature when setting column to small sizes, it was # found the "extended" label algorithm does not behave well when the number of labels is set # less than 2. Also, zero labels were being lost. The general goal of the labeling algorithm # was changed to at a minimum request three labels, even on small columns. The number of labels # per inch was increased from 4 to 5. The algorthim was also modified to handle staggering of # labels when only one label is present. The routine now also gets an odd number of labels # when less or equal 7 labels are wanted. If the column is near the minimum width, any labels # outside the range of the data are stripped, except zero. If the column is over 1", # and > 7 labels, the range is increased to include the label values. # These are signicant changes and will be tested and monitored over the next couple of weeks and # tuned as needed. # - To help stablize the axis labeling, the extended and wilkinson algorithms will be compared. # - Update VisBorders structures and name table to add regional Vis Border support. Also updated all # border groups to new variable names and to support regions features. # - Renamed "regions" feature to "dataRegionsOnly" feature. # - Added "regionB" options to control overlaying region boundaries when "dataRegionsOnly" not active. # - Fixed mapping with region boundaries to do overlays in the correct order. # - Fixed correction of washington DC and all of its forms to a pure character # string with no punctuation. "DC" instead of "D.C." or "DISTRICT OF COLUMBIA". # - Added code to do the washington dc comparisons in all upper case only. # December 7, 2016 (releasing to CRAN) # - Added envir=environment() to all load and data functions # - Hide assign to .GlobalEnv in eval-parse # - Save and restore Sys.getlocale() to compensate for other # country settings that can interfer with the operation of the # package. # # # Used packages: RColorBrewer, stringr, R.rsp, labeling, # # Used internal packages: utils, graphics, R.utils, # # ######## ######## # # Copyrighted 2013, 2014, 2015, 2016 - by: Dan Carr, GMU and Linda Pickle and Jim Pearson of StatNet Consulting, LLC. # ######## ######## # # functions used from RColorBrewer: brewer.pal # # functions used from graphics: plot, lines, arrows, polygon, axis, text, mtext, boxplot, # points, legend, plot.new, plot.default, plot.design, plot.function, # plot.xy, plot.windows, abline, axTicks, barplot, matplot, # matpoints, title # # functions used from stats: qnorm # # functions used from grDevices: rgb, col2rgb # # functions used from stringr: str_trim, str_split, str_replace_all, str_sub # # functions used from labeling: extended, wilkinson, # ######## # # With the generalization of micromapST to cover other geographic area beyond the US, micromapST will # still be called micromapST. A separate function call has been added to help migrate # uses of the test/prototype SEER version "micromapSEER". The default border group will be # "USStatesBG" to support existing users of micromapST. # # Initial Variables that require setting before running this file: # # The current r directory <- location of the three micromapST source files # micromapST.r # panelFunctions.r # micromapDefSets.r # # The current data directory <- location of the supporting border Group datasets and test datasets # USStatesBG.rda # USSeerBG.rda # KansasBG.rda # NewYorkBG,rda # MarylandBG.rda # ChinaBG.rda # UtahBG.rda # UKIrelandBG.rda # SeoulSKoreaBG.rda # AfricaBG.rda # # Future plans are to do the county map for all U. S. States containing Seer Registries, # include a function to validate a user provided Border Group, and to provide functions or # guideance on how to charaterize a collection of boundaries. # # The following datasets must be included in the package to provide the boundaries: # # Each border group contains five R objects. These objects provide the unique # data for the border group's geographic areas, names, abbreviations, and numerical ID. # # The areaParms object provides defaults for several run parameters that tune micromapST # execution. The list of variables are: # bordGrp = a character vector - name of the border group. Must be the same # as the dataset filename minus the ".rda" extension. # Map.Hdr1 = a character vector - title header for the Map and ID glyphs. # This is set to the general name of the geographic area, e.g., "U. S." # or "Kansas". # Map.Hdr2 = a character vector - title header for the Map and ID glyphs. # This identifies the areas used in the linked micromap, e.g., "States" # or "Counties" # Map.L2Borders = a logical variable - if the L2VisBorders need to be overlaid on the # maps, this variable must be set to TRUE. So far, only the U. S. 18 # Seer Areas have required this feature. Most other border groups # will have this set to FALSE. (Old variable name = mapL2Borders) # Map.Aspect = a numerical value. The micromapST package does not know what the # correct aspect ratio is for the map boundaries. Rather than guess, # Map.Aspect is set to the map's aspect ratio when the boundary data # is converted into the micromapST boundary data format. The value # is used to control the width of the map glyph column to ensure # the map is properly presented. Only values between 0.5 and 2.0 are # allowed. This aspect is y/x (height / width) # # Map.MinH = Minimum height for the row if maps are included - units = inches. # Default is 0.5 inches. # Map.MaxH = Maximum height for the row if maps included - units - inches. # Default value is 1 inch. # # Id,Hdr1 = First line of ID glyph column title # Id.Hdr2 = Second line of ID glyph column title. # # areaUSData = a logical variable - if set to TRUE, the package assumes the geographic # areas and boundaries are the USStatesBG or USSeerBG datasets and will # overlay the first map in the column with labels for "AK", "HI", and "DC". # This variable should be set to FALSE for all other border groups. # # enableAlias = Some data may not contain the names or abbreviations contained in # the border group dataset. In the case of the U. S. Seer data, the # Seer Stat output has the area names hidden in the "Registry" label. # The alias feature provides a means of doing a partial match or # "contains" to link the data labels to the geographic objects. # This variable should be TRUE only for the USSeerBG border group. # In all other cases, it should be FALSE. # aP_Proj = proj4 string describing the project used on the boundary data. # aP_Units = x and y coordinates units of boundary data (lat-long, meters, kilometers) # aP_Regions = a logical variable - diaables or enables the regional area mapping feature. # If TRUE, the areasNamesAbbrsIDs data.frame must contain the information # to group sub-areas by regions. Indicates dataRegionsOnly can be used. # Map.RegBorders = Mostly an internal variable - indicated the RegVisBorders bounaries # should be drawn (TRUE). Works with the "regionsB" call option to # control regional area boundary overlay. # Map.L3Border - a logical variable - mostly for internal use - To indicate if # the L3 borders should be drawn. # # All variable names in the areaParms data.frame must be unique within the micromapST package. # # The areaNamesAbbrsIDs R object is a table of the full names, abbreviations, alias strings, and # numeric ID for each geographical area in the boundary dataset. The abbreviation is used as # the internal link between the data and the boundary of the area. The table provides a means # of allowing the user to use the area's full name, abbreviation, or the IDs as the area's label # in the data provided micromapST in the statsDFrame parameter. The full names, abbreviations, # and IDs must match entries in this table or the user is notified and data ignored. # See the documentation on the areaNamesAbbrsIDs for the data structure of this object and # the documentation on each border group for the values for that specific border group. # # The areaVisBorders R object contain sets of boundary data points for each area listed in the # areaNamesAbbrsIDs table. Since the space for the map is limited, these boundaries should be # very simplified or characterized to permit fast drawing and keep the size of the data to a # minimum. See the documentation on the areaVisBorders R object for more details on the structure # of this object. # # The L2VisBorders R object contains a set of boundary data points to outline a set of area # like U. S. states when the areaVisBorders represents subareas. This layer is overlayed # optionally and is only used in the USSeerBG border group, at the present time. # # The L3VisBorders R object contains the outline of the geographic area that contains the # the areaVisBorders' areas. This would be the outline of a country (U.S. or China) or a # state (Kansas, New York, Maryland). This provides a accent to the region's borders. # # Regional mapping feature allows a subset of an area (a collections of sub-areas) to # be mapped based on the data provided by the caller. Sub-areas in regions not # referenced in the statsDFrame are not mapped. When a subset is mapped, the L3VisBorders # and related L2VisBorders outlines are NOT drawn. The regional groupping is based # on the region field in the areaNamesAbbrsIDs table (regID). There are no boundaries # for regions. # # See the documentation on each object for its particular structure and usage. # # See the documentation on each border group for details. # ###### ###### # # Basic data structures to convey information and controls between main function and sub-functions. # # mmSys$sDFName - name of the statsDFrame data frame provided by caller. Not the data itself, # the name of the variable. # # mmSys$pDName - name of the panelDesc data frame provided by caller. Not the data itself, # the name of the variable. # # # ###### # # gC contains the fun information for each glyph column (gC). The index is 1 to "n" # general items for all glyphs. # # gC[j]$cIdx - integer index of the current glyph column (1 to "n") # # gC[j]$cTxt - text version of the integer index of the current glyph column (1 to "n") # # gC[j]$type - glyph type # # gC[j]$lab1 - character # gC[j]$lab2 - character # gC[j]$lab3 - character # gC[j]$lab4 - character # gC[j]$refText # gC[j]$refVal # gC[j]$col1Name - statsDFrame column name # gC[j]$col1Num - statsDFrame column number # # gC[j]$col2Name # gC[j]$col2Num # # gC[j]$col3Name # gC[j]$col3Num # gC[j]$panelData - data structure name for column in panelData. # gC[j]$... - glyph specific parameters and variables (panelDesc expanded.) # ##### ###### # # Intent: # This function suppresses the following notes generated by "R CMD check": # - "Note: no visible binding for global variable '.->ConfigString'" # - "Note: no visible binding for '<<-' assignment to 'ConfigString'" # Usage: # Add the following right in the beginning of the .r file (before the Reference # class is defined in the sourced .r file): # suppressBindingNotes(c(".->ConfigString","ConfigString")) # suppressBindingNotes <- function(variablesMentionedInNotes) { for(variable in variablesMentionedInNotes) { wstr <- paste0("assign(variable,NULL,envir=",".GlobalEnv)") eval(parse(text=wstr)) } } # # ###### ###### # # counter function definition in Global Environment to be accessible from all functions. # NewCounter <- function() { i <- 0 function() { i <<- i + 1 } } # # ###### ##### # # asc and chr # chr(x) returns character value for "x". # if x is a character, x is returned. # if x is numeric, it is converted to character value # chr <- function(x) { if (is.character(x)) { return(x) } else { if (is.numeric(x)) { as.character(rawToChar(as.raw(x))) } else { return("\025") } } } # # asc(x) returns the numerical value for the character "x" # # asc <- function(x) { wX <- x if (is.numeric(wX)) { # numeric - turn into character wX <- as.character(wX) } if (is.character(wX)) { if (nchar(wX) > 1) { wX <- substr(wX,1,1) } # get only one character strtoi(charToRaw(x),16L) # convert character to numericstrtoi(charToRaw(x),16L) } else { NA } } # # #### Global functions ###### # # Update --- If a variable is used but does not seem to be set, RCMD # generates an error. This compensates for the dynamic reference # gVarList <- c("lastLab2Space","lastLab3Space", "staggered") suppressBindingNotes(gVarList) # # Create key global variable before referenced - These variables are referencable by all subroutines and functions # in this package. # utils::globalVariables(c( # Call Parameters "sDFName", "pDName", "wSFName", "callVarList", # panel variables and parameters "numRows", "numGrps", "rowSep", "rowSepGap", "rowSize", "rowSizeMaj", "rowSizeMin", "rowSizeMx", "rowSizeMn", "colSepGap", "colSizeMax", "colSizeMin", "rcRatioMin", "rcRatioMax", "groupedRowSize", "groupedRowSep", "medGrp", "medGrpSize", "medRow", "medRowAbv", "medRowBlw", "ib", "ie", "sc", "pad", "padex", "padMinus", "topMar", "botMar", "botMarLegend", "botMardif", "borderSize", # System "detailsVariables", "varName", "mstColorNames", # Axis adjustments "mgpTop", "mgpBottom", "padjBottom", "mgpLeft", "leftMarAxis", "leftMar", "rightMar", # Axis Lab variables "staggered", "lastLab2Space", "lastLab3Space", # Call Parameters "ignoreNoMatch", "bordGrp", "bordDir", "grpPattern", # Counter functions "warnCnt", "stopCnt", # glyphs variables # General "Title.Line.1.pos", "Title.Line.2.pos", "Title.Line.2x.pos", "Title.Line.3.pos", "Title.Line.4.pos", "Title.Line.5.pos", "Title.cex", "Grid.Line.col", "Grid.Line.lwd", "Panel.Fill.col", "Panel.Outline.col", "Text.cex", "XAxis.L.mcex", "XAxis.M.mcex", "XAxis.S.mcex", "XAxis.Sp.mcex", "XAxis.offset", "XAxis.indent", "XAxis.nGridpIn", "XAxis.staggered", "XAxis.gapPC", "YAxis.cex", "YAxis.offset", "YAxis.nGridpIn", "YAxis.width", # Arrow "Arrow.Head.length", "Arrow.lwd", "Arrow.cex", "Arrow.Shadow.col", "Arrow.Shadow.lwd", "Arrow.Dot.pch", "Arrow.Dot.pch.size", "Arrow.Dot.pch.lwd", "Arrow.Dot.Outline", "Arrow.Dot.Outline.col","Arrow.Dot.Outline.lwd", # Bar "Bar.barht", "Bar.Outline.col", "Bar.Outline.lwd", "Bar.Outline.lty", # Boxplot "BoxP.thin", "BoxP.thick", "BoxP.Use.Black", "BoxP.Median.Line", "BoxP.Median.col", "BoxP.Median.Dot.col","BoxP.Median.Dot.pch","BoxP.Median.Dot.cex","BoxP.Median.Dot.lwd", "BoxP.Outline.col", "BoxP.Outlier.BW.col","BoxP.Outlier.lwd", "BoxP.Outlier.cex", # Center Stacked Bars "CBar.varht", "CBar.two.ended", "CBar.Zero.Line.col", "CBar.Zero.Line.lwd", "CBar.Zero.Line.lty", # Center, Segmented, and Normalized Stacked Bars "CSNBar.barht", "CSNBar.Outline.col", "CSNBar.Outline.lwd", "CSNBar.Outline.lty", "CSNBar.First.barht", "CSNBar.Last.barht", # Dot, Dotsignif, Dotconf, Dotse "Dot.pch", "Dot.pch.size", "Dot.pch.lwd", "Dot.Outline", "Dot.Outline.col", "Dot.Outline.lwd", "Dot.Conf.pch", "Dot.Conf.pch.size", "Dot.Conf.pch.lwd", "Dot.Conf.lwd", "Dot.Conf.Outline", "Dot.Conf.Outline.lwd","Dot.Conf.Outline.col", "Dot.SE", "Dot.SE.pch", "Dot.SE.pch.size", "Dot.SE.pch.lwd", "Dot.SE.lwd", "Dot.SE.Outline", "Dot.SE.Outline.lwd", "Dot.SE.Outline.col", # Dotsignif "Dot.Signif.pch", "Dot.Signif.pch.size","Dot.Signif.pch.col","Dot.Signif.pch.lwd", "Dot.Signif.Outline", "Dot.Signif.Outline.col","Dot.Signif.Outline.lwd", "Dot.Signif.pvalue", "Dot.Signif.range", # Dotconf, Dotse "Dot.conf.pch", "Dot.conf.pch.size", "Dot.conf", "Dot.conf.lwd", "Dot.conf.size", # Id "Id.Hdr1", "Id.Hdr2", "Id.Title.1.pos", "Id.Title.2.pos", "Id.Start", "Id.Space", "Id.Cex.mod", "Id.Text.cex", "Id.Text.adj", "Id.Dot.pch", "Id.Dot.lwd", "Id.Dot.cexm", "Id.Dot.width", "Id.Dot.Outline.col", "Id.Dot.Outline.lwd", # map, mapcum, mapmedian, maptail "Map.Min.width", # will become dynamic "Map.Max.width", # "Map.Aspect", # from areaParms "Map.L2Borders", "Map.RegBorders", "Map.L3Borders", "Map.MinH", "Map.MaxH", "Map.Lab.Box.Width", "Map.Median.text", "Map.Bg.col", "Map.Bg.Line.col", "Map.Bg.Line.lwd", "Map.Fg.Line.col", "Map.Fg.Line.lwd", "Map.L2.Fill.col", "Map.L2.Line.col", "Map.L2.Line.lwd", "Map.L3.Fill.col", "Map.L3.Line.col", "Map.L3.Line.lwd", "Map.Area.Spec.cex", # rank "Rank.width", # Support - refVal, refText "Ref.Val.col", "Ref.Val.BW.col", "Ref.Val.lwd", "Ref.Val.lty", "Ref.Text.col", "Ref.Text.BW.col", "Ref.Text.cex", # ScatDot "SCD.Bg.pch", "SCD.Bg.pch.size", "SCD.Bg.pch.fill", "SCD.Bg.pch.col", "SCD.Bg.pch.lwd", "SCD.Fg.pch", "SCD.Fg.pch.size", "SCD.Fg.pch.col", "SCD.Fg.pch.lwd", "SCD.Median.pch", "SCD.Median.pch.size","SCD.Median.pch.fill", "SCD.Median.pch.col", "SCD.Median.pch.lwd", "SCD.Axis.cex", "SCD.xsc", "SCD.ysc", "SCD.hGrid", "SCD.DiagLine", "SCD.DiagLine.col", "SCD.DiagLine.lwd", "SCD.DiagLine.lty", # Normalized and Segmented stacked bar "SNBar.varht", "SNBar.two.ended", "SNBar.Middle.Dot", "SNBar.MDot.pch", "SNBar.MDot.pch.fill","SNBar.MDot.pch.lwd", "SNBar.MDot.pch.size", "SNBar.MDot.pch.border.col", "SNBar.MDot.pch.border.lwd", # TS and TSConf "TS.lwd", "TS.Axis.cex", "TS.hGrid", # debug "MST.Debug"), "micromapST", add=TRUE) # # Would rather have these variable in the local "micromapST" environment. # ###### ###### # # GlobalEnv Level Functions / micromapST Namespace Functions # accessible by everyone, but can't access variables within caller's space. # # groupPanelOutline # groupPanelOutline = function (panelGroup, j ) ## used in micromapST function - assumes 3 rows in the panels.. { iE <- panelGroup$dim[1] for (i in 1:iE){ panelSelect(panelGroup,i,j) # select a space x <- panelScale() # scale it panelOutline() # outline it. } } #### # # Clean up strings - remove # 1) special single and double quotes (open and closed) # 2) tick mark # 3) general punctuation (periods, etc.) # Designed to allow strings that may have different times of quotes, apos. to be compared. # ClnStr <- function(x) { z <- gsub("[[:punct:]\u2018-\u201F]", "", x, perl=TRUE) z <- str_trim(z) return(z) } # #### #### # # Find shortest format for Axis labels # # Test the following formats on the Axis Labels and determine # the narrowest format. # The formats checked are: # fixed format (up to 1 decimal place) # general format (including scientific notation) # fixed with KMB modification # fixed with "in thousands" type label # FindShorest <- function(x, w) { # x is a vector of numbers # w is the width of the target column (inches) # n <- as.integer(w / 4) # number of labels required xr <- range(x) # get range of the values if (!odd(n)) n = n + 1 xW <- wilkinson(xr[1],xr[2], n, mrange=c(n/2,n)) xE <- extended( xr[1],xr[2], n, w = c(0.25, 0.2, 0.5, 0.05)) # simp, cover, densi, legible # Function is incomplete... } # #### #### # # is.Color takes a hex string, the name of a color (from grDevices::colors()), or palette number # and validates it as a color variable. TRUE - is a valid color, FALSE - not a color. # # Inputs: values can by any color names that matches the grDevices::colors() name list, # a 6 or 8 character hex string starting with a "#" character, or # the palette color number (1 to 8) as integer or character. # # Examples: "white", "red", "lightgreen", "#232323", "#234Ad3", or "#FFDDCC80" # 1, or "1" # # On hex strings, the alpha value is optional (last 2 hex digits) # # is.Color <- function(x) { # handle a vector of colors vapply(x, is.Color2, logical(1)) } # #### #### # # Color string to hex string conversion (handles vectors of values) # col2hex <- function(cname) { res <- try(colMat <- col2rgb(cname), silent=TRUE) if (class(res)!="try-error") { rgb(red=colMat[1,]/255, green=colMat[2,]/255, blue=colMat[3,]/255) } else { res } } # #### #### # # single value test function for colors. # # The test is done against the standard color list and the micromapST color list. # The value can be a color name or a color pallet value. # is.Color2 <- function(x) { ErrFnd <- FALSE # check one color "x" if (is.numeric(x)) { # numeric color value - if so its a relative color number within the pallet. if (x < 0) { # can not be a negative value.. ErrFnd <- TRUE warnCnt() xmsg <- paste0("***0910 is.color2 The color value must be a positive number. Value seen:",x,"\n") stop(xmsg,call.=FALSE) } else { # if value is numeric, convert to integer character string. x <- as.character(x) } } if (!ErrFnd) { # convert factor to character if (is.factor(x)) x <- as.character(x) if (is.character(x)) { # character string, check for palette number or color name. if (!is.na(match(x,c(as.character(c(1:8)),grDevices::colors(),mstColorNames)))) { # test name and/or number TRUE # good color value. } else { # No match with character version of palette number or grDevices::colors(), # so try conversion from color to rgb, if it works, got a color - return TRUE # if it fails, it will return error - catch and return "FALSE" res <- try(col2rgb(x),silent=TRUE) # if class of res is not "try-error", return TRUE, # if class of res is 'try-error", then return FALSE (not a color) return(!"try-error"%in%class(res)) } } else { # not a integer or character FALSE # not a color } } } # #### #### # # function to test if "x" is between or equal to a and b. # is.between <- function(x,a,b) { # function checks x to make sure it's is between a and b # This version supports vectors. if (a>b) { (x >= b & x <= a) } else { (x >= a & x <= b) } } # #### #### # # functiion to test if "x" is within or equal to the range of "r". # "r" must be a vector of length 2 to be evaluated. # is.between.r <- function(x,r) { # the x must be within or equal to the range spacified in R # if (length(r) != 2) { warnCnt() xmsg <- "***0491 INB is.between.r The r range value is not a vector with length of 2. FALSE returned." warning(xmsg, call.=FALSE) return(rep(FALSE,length(x))) # not valid range } else { return(is.between(x,r[1],r[2])) } } # #### #### # # Testing function - print out key par() plot parameters # printPar <- function() { cFin <- par("fin") # get parameters for current panel. cat("cFin:",cFin," (w,h)\n") cFig <- par("fig") # get parameters for current panel. cat("cFig:",cFig," (x,x,y,y)\n") cPin <- par("pin") cat("cPin:",cPin," (w,h)\n") cPlt <- par("plt") cat("cPlt:",cPlt," (x,x,y,y)\n") cMai <- par("mai") cat("cMai:",cMai," (b,l,t,r)\n") cMar <- par("mar") cat("cMar:",cMar," (b,l,t,r)\n") cUsr <- par("usr") cat("cUsr:",cUsr," (x,x,y,y)\n") cPs <- par("ps") cat("cPs :",cPs," pt.\n") } # #### #### # # odd - check if number is odd (TRUE) or even (FALSE) # odd <- function(x) { x%%2 == 1 } # #### #### # # CleanString - clean up character string - remove extra spaces, all punctuation, control characters and # make all caps. # CleanString <- function(wstr) { nstr <- toupper(str_trim(str_replace_all(wstr,"[[:space:][:cntrl:][:punct:]]+"," "))) return(nstr) } # #### #### # # Scaler1 - find scale for range and appropriate axis sub-title # # Find the size of the maximum value. # Select scaling label, and division factor to use on data. # Scaler1 <- function(var) { # xAxis is the number for the Axis labels var1 <- as.numeric(var) if (var1 < 0) { var1 <- abs(var1) } vc <- c(1,"") if (var1 > 1) { # value > 1 --- OK to do log10 to get index. varLog <- as.integer(log10(var1)) vc <- switch(varLog, c(1,""), # 0 - < 10 c(1,""), # 1 - < 100 c(1,""), # 2 - < 1000 c(100,"in hundreds"), # 3 - < 10,000 c(1000,"in thousands"), # 4 - < 100,000 c(10000,"in ten thousands"), # 5 - < 1,000,000 c(100000,"in hundred thousands"), # 6 - < 10,000,000 c(1000000,"in millions"), # 7 - < 100,000,000 c(10000000,"in ten millions"), # 8 - < 1,000,000,000 c(100000000,"in hundred millions"),# 9 - < 10,000,000,000 c(1000000000,"in billions"), #10 - < 100,000,000,000 c(10000000000,"in ten billions"), #11 - < 1,000,000,000,000 c(100000000000,"in hundred billions"),#12 - < 10,000.000,000,000 c(1000000000000,"in trillions"), #13 - < 100,000,000,000,000 c(1,"") ) } else { # value < 1 and > 0, do it differently. repeat { vc <- c(1,"") if (var1 >= 0.1) { # 0.999999 => to >= 0.1 -> 9.99999 -> 1.0 vc <- c(0.1,"in the tenth") break } if (var1 >= 0.01) { # 0.0999999 => to >= 0.01 -> 9.99999 -> 1.0 vc <- c(0.01,"in the hundredth") break } if (var1 >= 0.001) { # 0.00999999 => to >= 0.001 -> 9.99999 -> 1.0 vc <- c(0.001,"in the thousandth") break } if (var1 >= 0.0001) { # 0.0009999999 => to >= 0.0001 -> 9.99999 -> 1.0 vc <- c(0.0001,"in the ten thousandth") break } if (var1 >= 0.00001) { # 0.0000999999 => to >= 0.00001 -> 9.99999 -> 1.0 vc <- c(0.00001,"in the hundred thousandth") break } if (var1 >= 0.000001) { # 0.00000999999 => to >= 0.000001 -> 9.99999 -> 1.0 vc <- c(0.000001,"in the millionth") break } if (var1 >= 0.0000001) { # 0.000000999999 => to >= 0.0000001 -> 9.99999 -> 1.0 vc <- c(0.0000001,"in the ten millionth") break } if (var1 >= 0.00000001) { # 0.000000999999 => to >= 0.0000001 -> 9.99999 -> 1.0 vc <- c(0.00000001,"in the hundred millionth") break } if (var1 >= 0.000000001) { # 0.0000000999999 => to >= 0.000000001 -> 9.99999 -> 1.0 vc <- c(0.000000001,"in the billionth") break } if (var1 >= 0.0000000001) { # 0.00000000999999 => to >= 0.0000000001 -> 9.99999 -> 1.0 vc <- c(0.0000000001,"in the ten billionth") break } if (var1 >= 0.00000000001) { # 0.000000000999999 => to >= 0.00000000001 -> 9.99999 -> 1.0 vc <- c(0.00000000001,"in the hundred billionth") break } if (var1 >= 0.000000000001) { # 0.0000000000999999 => to >= 0.000000000001 -> 9.99999 -> 1.0 vc <- c(0.000000000001,"in the trillionth") break } if (var1 >= 0.0000000000001) { # 0.0000000000999999 => to >= 0.0000000000001 -> 9.99999 -> 1.0 vc <- c(0.0000000000001,"in the ten trillionth") break } if (var1 >= 0.00000000000001) { # 0.00000000000999999 => to >= 0.00000000000001 -> 9.99999 -> 1.0 vc <- c(0.00000000000001,"in the hundred trillionth") break } if (var1 >= 0.000000000000001) { # 0.000000000000999999 => to >= 0.000000000000001 -> 9.99999 -> 1.0 vc <- c(0.000000000000001,"in the quadrillionth") break } if (var1 >= 0.0000000000000001) { # 0.0000000000000999999 => to >= 0.0000000000000001 -> 9.99999 -> 1.0 vc <- c(0.0000000000000001,"in the ten quadrillionth") break } if (var1 >= 0.00000000000000001) { # 0.00000000000000999999 => to >= 0.00000000000000001 -> 9.99999 -> 1.0 vc <- c(0.00000000000000001,"in the hundred quadrillionth") break } } } # vc <- c(divisor, <axis sub-title string>) #cat("returning vc:",vc,"\n") return(vc) # return divisor [1] and subtitle string [2] # need to add code to handle width range of number, getting duplicates at low end. } # #### #### # # Alt_Scaler # # Find the scale of the number (not list of numbers) # Find divisor and apply # Changes number to string. # Apply scale character to end of string # # Need to add logic to convert labels back to numbers and return both. # # var is a vector of numeric values for the Axis labels. # lower is a logical flag. If FALSE, the resulting strings are returned as is. # If TRUE, the resulting strings are converted to lower case. # Scaler2 <- function(var,lower=FALSE) { var1 <- as.numeric(var) minusFlag <- "" if (var1 < 0) { # save fact the number was minus minusFlag = "-" var1 <- abs(var1) } vc <- c(1,"") var2 <- var1 if (var1 != 0) { # number zero, quick exit varLog <- as.integer(log10(var1)) #cat("varLog:",varLog,"\n") if (varLog != 0) { if (varLog > 0) { vc <- switch(varLog, # 0 - < 10 => [0.10000000001 to 10) c(1,""), # 1 - < 100 => [10 to 100) # hecto (hunderds) c(1,""), # 2 - < 1,000 => [100 to 1000) # kilo (thousands) c(1000,"K"), # 3 - < 10,000 => [1,000 to 10,000) c(1000,"K"), # 4 - < 100,000 => [10,000 to 100,000) c(1000,"K"), # 5 - < 1,000,000 => [100,000 to 1,000K) # mega (million) c(1000000,"M"), # 6 - < 10,000,000 => [1,000K to 10,000K) c(1000000,"M"), # 7 - < 100,000,000 => [10,000K to 100,000K) c(1000000,"M"), # 8 - < 1,000,000,000 => [100,000K to 1,000M) # giga (billion) c(1000000000,"B"), # 9 - < 10,000,000,000 => [1,000M to 10,000M) c(1000000000,"B"), # 10 - < 100,000,000,000 => [10,000M to 100,000M) c(1000000000,"B"), # 11 - < 1,000,000,000,000 => [100,000M to 1,000B) # tera (trillion) c(1000000000000,"T"), # 12 - < 10,000,000,000,000 => [1,000B to 10,000B) c(1000000000000,"T"), # 13 - < 100,000.000,000,000 => [10,000B to 100,000B) c(1000000000000,"T"), # 14 - < 1,000,000,000,000,000 => [100,000B to 1,000T) c(1,"") ) var2 <- var1/as.numeric(vc[1]) } else { # negative log values are small numbers, so invert to 1 to N varLog <- (-varLog) # (-1 => 1) repeat { vc <- c(1,"") if (var1 >= 0.1) { # 0.999999 => to >= 0.1 -> 9.99999 -> 1.0 vc <- c(10,"d") # deci break } if (var1 >= 0.01) { # 0.0999999 => to >= 0.01 -> 9.99999 -> 1.0 vc <- c(100,"c") # centi break } if (var1 >= 0.001) { # 0.00999999 => to >= 0.001 -> 9.99999 -> 1.0 vc <- c(1000,"m") # milli break } if (var1 >= 0.000001) { # 0.000999999 => to >= 0.000001 -> 999.999 -> 1.0 vc <- c(1000000,"u") # micro break } if (var1 >= 0.000000001) { # 0.000000999999 => to >= 0.000000001 -> 999.999 -> 1.0 vc <- c(1000000000,"n") # nano break } if (var1 >= 0.000000000001) { # 0.000000000999999 => to >= 0.000000000001 -> 999.999 -> 1.0 vc <- c(1000000000000,"p") # pico break } if (var1 >= 0.000000000000001) { # 0.000000000000999999 => to >= 0.000000000000001 -> 999.999 -> 1.0 vc <- c(1000000000000000,"f") # femto break } } var2 <- var1*as.numeric(vc[1]) } } } #cat("minus:",minusFlag," vc:",vc,"\n") cvx <- paste0(minusFlag, str_trim(formatC(var2,format="fg",width=5,digits=4,drop0trailing=TRUE)),vc[2]) if (lower) { cvx <- tolower(cvx) } return(cvx) # Need to check to see what happens if we have lowe end numbers that may be duplicated. } # #### #### # # simpleCap - capitalize each word in a phrase and removes "."s, "_"s, and extra blanks. # Not good on vectors - must apply # simpleCap <- function (x) { s <- strsplit(x,"[ ._]")[[1]] # split on boundaries " ", "." or "_". s1 <- s[s != ""] # skip empty strings paste0(toupper(substring(s1,1,1)),tolower(substring(s1,2)),collapse=" ") } # # Alternative: # gsub("(^|[[:space:]])([[:alpha:]])", "\\1\\U\\2", name, perl=TRUE) # #### ##### # # plotPoint - takes a give x,y any type of point (0:18, 19:25, > 32 or character) # and correctly plots it at x,y. Other parameters are required incase of outlines. # # plotPoint <- function(ppX, ppY, ppPch, ppColor, ppSize, ppLwd, ppOutline, ppOutline.col, ppOutline.lwd) { # # Call parameters: pchValue, x, y, pch.size, outline.lwd, outline.col, mstColor, # pchValue <- ppPch suppressWarnings(chrValue <- as.numeric(pchValue)) if (is.na(chrValue)) { # the pch value is not a numeric - check for character if (is.character(pchValue)) { # character type value. Get first character. assume > 31 pchValue <- str_sub(str_trim(pchValue),1,1) points(ppX, ppY, pch=pchValue, cex=ppSize, col=ppColor ) #cat("points of character:",pchValue) } else { # set to default since we can't decode it. Set to numeric value. chrValue <- 21 #cat("not a character-typeof:",typeof(pchValue)," setting chrValue to 21.","\n") } } if (!is.na(chrValue)) { #cat("numeric - typeof:",typeof(pchValue), " ", typeof(chrValue)," ",typeof(chrValue)," ",pchValue," ",chrValue,"\n") # have a numeric value (still), got conversion - 0:255 range. # if it's NA, it's character and has been plotted. if (chrValue > 31) { #cat("chrValue > 31 - normal points\n") # normal symbols (numeric) (no border) # > 31 characters points(ppX,ppY,pch=chrValue, cex=ppSize, col=ppColor ) } else { # <= 31 if (chrValue > 25) { # 26:31 -> not used character use default chrValue <- 21 #cat("char 26:31 not used -> use default 21\n") } if (chrValue > 18) { # 19:25 value characters. # Dot.Conf.Outline set by user or by BW/Greya/Grays color scheme if (ppOutline) { # 19:25 with outline around symbol #cat("19:25 -> filled with borders symbols - outline ON \n") points(ppX, ppY, pch=chrValue, cex=ppSize, lwd=ppOutline.lwd, col=ppOutline.col, bg=ppColor ) } else { # 19:25 with no outline (border) #cat("19:25 -> filled with borders symbols - outline OFF \n") points(ppX, ppY, pch=chrValue, cex=ppSize, col=NA, bg=ppColor ) } } else { # 0:18 symbols - line drawings #cat("0:18 symbols - standard print.\n") points(ppX, ppY, pch=chrValue, cex = ppSize, lwd = ppLwd, col = ppColor ) } } } } # # end of point ploter. # ##### #### # # micromapSEER - to support previous users of micromapSEER NCI package. # micromapSEER <- function(statsFrame,panelDesc,...) { micromapST(statsFrame,panelDesc,..., bordGrp="USSeerBG", bordDir=NULL) } # #### #### # # Get micromapST Version # micromapST.Version <- function() { return ("micromapST V1.1.1 built 2016-12-07 11:02am") } # #### #### # # micromapST # # Using the technique of setting parameters to NULL. Later during verification, if # NULL, set to the default. If not NULL, then verify the parameters value. # # micromapST = function( statsDFrame, panelDesc, rowNamesCol = NULL, # Name of name link column. rowNames = NULL, # default = "ab" ### modify to SEER IDs sortVar = NULL, # default = sort on plotNames values ascend = TRUE, # default = ascending sorting order title = c("",""), # default = empty plotNames = NULL, # default = "full" ### modify to SEER Abv and Names axisScale = NULL, # axis Scale Method, default = "e" -> extended staggerLab = NULL, # stagger Axis Labels, default = FALSE bordGrp = NULL, # border and names group to use with micromapST, Def = "USStatesBG" bordDir = NULL, # data directory containing the bordGrp .RDa file to use. # If null or NA, a DATA statement is used to load the # bordGrp from the included package datasets. dataRegionsOnly = NULL, # when regions are defined, permit package to map only regions containing data. Default=FALSE, regionsB = NULL, # when regional boundaries are present, map regional overlays. Default = FALSE. grpPattern = NULL, # Override areas per panel/group pattern ignoreNoMatches = FALSE, # How to handle statsDFrames that don't match. colors = NULL, # Override colors structure details = NULL ) # Override details parameters. { # # Routine: micromapST (and micromapSEER) # # Created by: Dr. Dan Carr # Updated and Extended by: Jim Pearson, April 20, 2009 # Updated and Extended by: Jim Pearson, August 28, 2012 # Updated and Extended by: Jim Pearson, May and June, 2013 # Updated and Extended by: Jim Pearson, Nov, 2013 # Updated and Extended by Jim Pearson, Jan, 2014 # Updated and Extended by: Jim Pearson, March, 2014 # Updated and Extended by: Jim Pearson, October-November, 2014 # Updated impacted every function and feature of the package to generalize the panel layouts. # Updated and Extended by: Jim Pearson, December 2014 and January 2015 # Updated and Extended by: Jim Pearson, March 2015, generalized the package for other geospatial areas. # and refined the scaling and sizing of the rows and columns. # Updated and Extended by: Jim Pearson, September, 2015 and February, 2016 # Updated and Extended by: Jim Pearson, November, 2016 # # Packaged by: Jim Pearson # # Dependencies: micromapGSetDefaults # $colors # $details # micromapGSetPanelDef # panelFunctions.r # # Included bordGrp DataSets: # USStatesBG - equivalent to original micromapST setup # USSeerBG - new setup for borders and behavior for US Seer Areas. # KansasBG - new setup for borders and behavior for Kansas County Areas. # NewYorkBG - new setup for borders and behavior for New York County Areas. # MarylandBG - new setup for borders and behavior for Maryland County Areas. # ChinaBG - new setup for borders and behavior for China. # UKIrelandBG - new setup for borders and behavior for UK-Ireland area # UtahBG - new setup for borders and behavior for Utah County Areas # SeoulSKoreaBG - net setup for borders and behavior for the districts in the city of Seoul South Korea. # AfricaBG - net setup for borders and behavior for the countries of Africa. # # Each contain the following DataFrames, Lists and Vectors:: # Run Parameters: areaParms # Data Level Names, Abbrs. IDs, and Labels: areaNamesAbbrIDs (Old stateNamesFips) # Data Level Boundaries: areaVisBorders (Old stateVisBorders) # L3 (national) Level Boundaries L3VisBorders (Old stateNationVisBorders) # # L2 (state) Level Boundaries (Optional) L2VisBorders (Old stateNationVisBorders) # # Currently the L2 Boundaries are only used with the "USSeerBG" border group at this time. # # If L2 Boundaries are not included in the bordGrp, the L3 Boundaries are copied into # the L2 boundaries as a place holder. # # Source Files: panelFunctions.r, micromapDefSets.r # ##### ##### # # # Call Parameters: # # Defaults List for call simulation # statsDFrame <- data # panelDesc <- panel description data.frame or panel description list of lists. # rowNames <- "ab" # global # sortVar <- NULL # global # ascend <- TRUE # global # title <- c("titles") # global # plotNames <- "full" # global and glyph # axisScale <- "e" # new extended method - global and glyph # staggerLab <- FALSE # global and glyph # colors <- NULL # global # details <- NULL # global and glyph # bordGrp <- "USStatesBG" # global # bordDir <- NULL # global # ignoreNoMatches <- FALSE # global # grpPattern <- NULL # global - default = calculated row / panel pattern # regionsB <- NULL # global - default = FALSE # dataRegionsOnly <- NULL # global - default = FALSE # # colors and details are used to override/modify the basic default structure for the colors # and the operational details information. # # ##### # # statsDFrame data.frame of area ID and data for micromaps. # # rownames must be area abbreviations, names, or fips codes # # Provides the data for the dot, dotConf, dotSE, dotSignif, arrows, bars, # segbar, ctrbar, and normbar glyph panels. # # Not used for boxplots or time series column panels. Pointers to their # data is provided in the panelDesc data.frame. # # The statsDFrame must have the area's abbr, name or ID code (like fips code) as # the rownames of the data.frame. As an alternate a column can contain the # area's identifier and the "rowNameCol" parameter can be used to point to # the column. Once the column is verified, it is assigned to the rownames # of the statsDFrame. # # The data.frame must be at least 2 columns for some of the functions # in R. To compensate for possible 1 column data.frames, a column of zero # is appended to the right side of the data.frame to ensure there is always # 2 columns. (Work-a-round) # # An example of the problem: # When the structure is ordered xxx[ord,] and then assigned to the working # variable "dat", the dimensions are preserved. # If the data.frame has only one column, the ordering and assigned, # strips the rownames and leaves the dim(dat) = NULL. # # The numerical data in the statsDFrame data frame may be in a numerical vector # or a character vector. If the data is found to be a factor, it is converted to # a character vector. If the data is a character vector, then the format of the # numbers is validated. The acceptable numerical value formats are: # # 1, 1.1, 0.1, .1, +1, +1.1, -0.1, -.1, -13434.3 -1234, # 1.1e+01, 1e+01, 0.1e+01, 1e-1, 1.12355e-01, +1.23e+99, # 1,000; -1,343; +1,234; 23,235.00; -23,234.00001 # # Errors will be flagged if there is more than 3 digits between commas and commas or # decimal point, the exponent value is greater than 2 digits, a space is found # between any parts of the number, etc. # # The name of the user provided statsDFrame data frame is stored in # callVarList$statsDFrame variable for later reference. # ###### # # panelDesc data.frame # data frame for panel descriptions/definitions # Example # panelDesc = data.frame( # type=c('mapcum','id','dotconf','dotconf'), # manditory column # lab1=c('','','White Males','White Females'), # recommended # lab2=c('','','Rate and 95% CI','Rate and 95% CI'), # optional # lab3=c('','','Deaths per 100,000','Deaths per 100,000'), # optional # lab4=c('','','',''), # col1=c(NA,NA,2,9), # dependent on "type" # col2=c(NA,NA,4,11), # dependent on "type" # col3=c(NA,NA,5,12), # dependent on "type" # colSize=c(NA,NA,1,1), # rmin=c(NA,NA,NA,1), # rmax=c(NA,NA,NA,5), # refVals=c(NA,NA,NA,wflungbUS[,1]), # optional # refTexts=c(NA,NA,NA,'US Rate'), # optional # panelData=c('','','',''), # required if boxplot or time series used. # adv=list('',list(a=v),'','') # advanced parameters # ) # # The first description row describes the first column of panels # an so on. This is a candidate for change since each column # describing a column avoids a mental transposition. # # The name of the user provided panelDesc data frame (or list) is stored in # callVarList$panelDesc variable for later reference. # # The alternate form of the panelDesc variable is a list of list. # panelDesc is a list. Each glyph column in the linked micromap is represented # by a list in this list. The glyph column list contains all of the # panelDesc variable related and valid for the glyph indicated in the type= variable # in this list. A example is provide at the end of the discussion on the panelDesc # variabls below. # # The type parameter must be present for each panel column. The other parameters are optionals. # However, if a parameter is required for any column, it is present for all columns. # If not used by a column, the parameter's value for that column should be set to "NA". # # type refers the graphic panel type to be used. The valid types are # "map", "mapcum","maptail","mapmedian", for maps # "id", for area ids # "dot", "dotse","dotconf", "dotsignif" for dot plots # "arrow", for arrow plots # "bar", for simple bar plots # "ts", "tsconf", for time series plots # "scatdot", for scatter dot plots # "normbar","segbar","ctrbar", for stacked bar plots # "boxplot", for box plot # "rank" for ranking (not fully implemented) # # For non-highlighted contours: # map accumulates areas top to bottom # maptail accumulates areas outside in # mapMedian feature above median area above the median and vis versa # # bar will accept negative values and plot from 0 in that direction. # # col1, col2, col3 # These values idenfity the column numbers oe names in statsDFrame to be # used as data for most of the panel glyph types. They are used by: # "dot", "bar", "dotse", "dotsignif", "dotconf", "scatdot", # "segbar", "ctrbar", "normbar" # # Panel types using only one column parameter (one data item) are: # # dot: col1 = dot value (estimate) # bar: col1 = bar height from zero (0) # # Panel types using two column parameters (two data items) are: # # dotse, dotsignif, arrow, and scatdot glyphs. # # dotse: col1 = dot value (estimate), col2 = standard error value # dotsignif: col1 = dot value (estimate), col2 = P Value for dot value # arrow: col1 = starting value, col2 = ending value for arrow. The arrow head # is on the ending value. # scatdot: col1 = "x" value of dot, col2 = "y" value of dot. # # Panel types using two column parameters to specify a range of data columns are: # # segbar, ctrbar, normbar: col1 = first data column in statsDFrame, # col2 = last data column in statsDFrame. The data from # columns col1 to col2 are used as the length (values) for each # stacked bar segment. The number of data columns must be between # 3 to 9. # # Panel type dotconf using three column parameters: (col1, col2, col3): # # dotconf: col1 = dot value (estimate), col2 = lower bound and col3 = upper bound # # Panel following types do not requiring any column parameters: # # boxplots uses the "panelData" vector in panelDesc to provide the name of a saved # boxplot structure. The boxplot structure is created by saving the # results of aboxplot(...,plot=F) call. # # ts and tsconf use the "panelData" vector in the panelDesc to obtain the name of # a matrix the data for the time series. The name represents a array(51,"x",4). # The first dimension represents the states (51) for the US # or the number of areas in the border data. The number of entries must # match the number of entries in the statsDFrame. The second dimension # represents the number of samples in the time series. The third dimension # are the "x", "low.y", "y", and "high.y" values for each sample. # For ts glyphs, the "low.y" and "high.y" values are ignored, but required. # colSize # Specifies the proportional size of a glyph column in relation to the other glyph columns. # This is a numeric vector with one element for each glyph column. The sum of the vector # is used as the denominator to calculate the percentage of available width is to be allocated # to the column. For example: colSize = c(NA, NA, 10, 10, 5, 15). The first two columns are # map and id glyphs and are not involved in this feature. The remaining 4 columns have a total # value of 40. The percentage for each column is 25%, 25%, 12.5% and 37.5% = 100%. If 4" of # space is available, then the width of each column will be 1", 1", 0.5", and 1.5". # # # rmin, rmax # Specify the min and/or max values for the X axis range for the any of the graphic # glyphs. If no value is specified, the package will use the range of the # data provided. NA must be used when a value is not being specified. # The user provide range is checked against the range of the data to make sure # all of the data is contained in the range. rmin must be less than rmax. # (in planning stages) # # lab1, lab2 # Two label lines at the top of columns. Use "" for blank, not NA or MULL. # # lab3 # One label line at the bottom of a each column, typically measurement units. # Supported under the "map" and "id" columns for use as a sub-title. # # lab4 # One label line for used with the Y axis on each panel. Only used with time series and ScatDot panels. # # refVals # P-2010/07/23 changed variable from refvals to refVals # # to be consistant. # name of objects providing a reference values shown # as a line down the column # # refTexts # JP-2010/07/23 - New # texts to be used as the legend for the reference values. # If refTexts for column is NA or "", then no legend is added. # # colSize # 8/8/16 - implemented to provide proportional column size control. # A vector of numeric values used to set a proportional column size within the # space provided by the user. The sum of all of colSize values are used as the # demoninator to determine the percentage of the available space to allocate to the # column. The default value for each column is "1". If a column's value is NA, NULL, or <=0.1, # then the column is allocated the 1/"n" of the available space, where "n" is the number # of columns. The map and id columns are fixed width columns and are not effected by the # colSize calculations. # # example: micromapST has 6 columns: map, id, dot, bar, arrow, dotconf. # The available width provided is 6.5" in a PDF. # colSize = c(0,0,5,5,10,3) # Once the map and id column widths are subtracted, the available width for the # four columns is 4". The total value of all columns is 23 (sum(5,5,10,3). # The width of the dot and bar columns will be set at 5/23 * 4 = 0.87 ", # arrow is allocated 1.74" and dotconf is allocated 0.52 ". # # The values in this vector must be positive numerical values. They can range from 0.1 to 100. # The sum of the values is used as the demoninator to calculate the percentage for each column. # # # panelData # (old boxplot column) # names a list object with a boxplot data or time series data (x/y or x/yl/ym/yh # data for each area. # # The boxplot list the xxxx$names list must be the abbreviated area id # for the entry and the related data in the structure. #. # Used to link graphic to additional data beyond the 3 data elements # provided in col1, col2, col3 indexes into the statsDFrame.. # # For boxplot graphics, a list of "boxplot" function values for each area and DC # with the names (2 characters) used as the row.names. # # For time series graphics, the object must be an array(51,"x",4), # where the 1st index is the areas (1 to n), the second index is the number # of time periods ("x") with a minimum of 2 and maximum of 30, and # the third index is the type of variable. The rownames of array must # be the associate area id (a 2 character abbreviation if states). This # is required so the time series array can be properly associated # with the data in the statsDFrame when it's sorted. # For time series with no confidence band, column 1 is the x value and # column 2 is the y value. # For time series with a confidence band, column 1 is the x value, # column 2 is the y-low value, column 3 is the y-median value, # and column 4 is the y-high value. # The number of entries must be equal to the number of areas in the statsDFrame. # # Note: Some descriptors may be omitted if none of the panel plots need them. # often refValues and boxplots can be omitted # # adv = list of parameter lists of for each glyph column. Each item in the list # represents a list of named parameters for that glyphs column. # Example: # # An example of the list form of panelDesc is: # # GC1 <- list(type="map",lab3="bottom lab") # GC2 <- list(type="id") # GC3 <- list(type="dot",lab1="Population",lab2="2010",col1="RATE.10",refVal=100,refTxt="Pop. Ref") # GC4 <- list(type="boxplot",lab1="boxplot",lab2="risk",panelData="BoxData") # # panelDesc <- list(GC1, GC2, GC3, GC4) # # > str(panelDesc) # List of 4 # $ :List of 2 # ..$ type : chr "map" # ..$ lab3 : chr "bottom lab" # $ :List of 1 # ..$ type : chr "id" # $ :List of 6 # ..$ type : chr "dot" # ..$ lab1 : chr "Population" # ..$ lab2 : chr "2010" # ..$ col1 : chr "RATE.10" # ..$ refVal : num 100 # ..$ refTxt : chr "Pop. Ref" # $ :List of 4 # ..$ type : chr "boxplot" # ..$ lab1 : chr "boxplot" # ..$ lab2 : chr "risk" # ..$ panelData: chr "BoxData" # > # # Each list in panelDesc represents a single glyph column in the output generated. # This makes it easier to create the glyph description, you only have to provide # the information needed for the glyph, and allows you to quickly change the # order of the glyphs in the results. As new glyph variables are defined, the # only have to be included in the list for the specific glyph and column. The # same glyph may be used several times with different glyph variables settings. # Currently the glyph (details) variable names must contain the glyph name and # a variable name. With this approach, the variable names are simplified and # have the same meaning across all of the glyphs but are specific to the glyph # and column. For more details see the panelDesc section of the documentation. # #### # # Individual Call Arguments/Parameters: # # rowNamesCol: Optionally the name of the column in the area data.frame that # contains the link names associated with the rows. If not specified, # the row.names of of the statsDFrame are used as the area names. # Using the row.names is the default method of linking the data to the # border data. # # rowNames: Type of area id used as row.names in statsDFrame data.frame. # Acceptable values are: "ab", "alt_ab", "full", "id", Two additional options # have been added to accomodate the SEER data requirements: "seer" or "alias". # This rowNames option requests the packet to do partial matches of an alias for # area against the "registry" column/list outputted by SeerStat. If the partial # match succeeds, the associated area abbreviation is used. # By default the row.names of the statsDFrame are used. Based on # this option, the value is treated as an abbreviation, full area name, # or the numeric ID of the area.. # The default is "ab" for abbreviation, # # ignoreNoMatches is a logical parameter. The default is FALSE. If FALSE, all of the # data rows in the statsDFrame MUST match the area list in the boundaries datasets. # The there is not a match, an error is generated and the call is terminated. # If set to TRUE, any data row that does not match the areas in the boundaries dataset # are ignored and the user is notified of the situation. This may be helpful, if you # know the full names or abbreviations are correct, but the data has a row with "US" or "ALL" # as the link value or the source of the data generated comment lines that should be ignored. # # plotNames: When the ID glyphs is selected, this options informs the # package which form of labels to use. The options are "full" area name # or the abbreviated area name. The default is the "full" for full name. # Acceptable values are: "ab", "full" # The values of the "ab" and "full" labels are provided in the areaNamesAbbrsIDs # data.frame associated with the border structures provided to the package. # # sortVar The column name or number in the statsDFrame to be used as the variable # in sorting. Can be a vector of column subscripts to break ties. # Warning: The sortVar parameter cannot be used to sort a boxplot # or time series, since data is not contained in the statsDFrame. # # ascend TRUE default sorts in ascending order. FALSE indicated descending order. # # title A vector with one or two character strings to use the title.for the page. # # BORDER GROUPS # # bordDir (optional) The path name to a directory containing the border group specified in # bordGrp. The file must be an ".rda" type file that contains the four border group # R objects: areaParms, areaVisBorders, L2VisBorders, L3VisBorders. This parameter # can be used when the user has their own border group dataset or during developement # of a new border group or testing a modified border group before a package is created. # When this field is specified, the internal border groups are ignored. # # bordGrp The package contains two border Groups: USStatesBG and USSeerBG. # When using the "USStatesBG" border group, allows the package to function identically # to the original micromapST package. When the "USSeerBG" border group is # used, the Seer Areas and structures are available to the micromapST user. # The USSeerBG border group contains the names, abbreviations, aliases, and border # structures to support the micromap generation for US Seer Area data. # # NOTE: For border groups to work, lazyloading and lazydata must be DISABLED. # If enabled, the package is unable to load the correct border group dataset based # on the bordGrp parameter value. # # PANEL LAYOUT: # # grpPattern A user provided area to panel group/row mapping. The sum of the vector must # be equal to the number areas provided in the statsDFrame data structure. # The values are the number of areas in each panel created by micromapST. # The values must be in the range of 2 through 5. The value of 1 is allowed, # but only if the number of areas is odd, and in the median position of the # vector. Examples: # For 9 areas grpPattern = c(3,3,3) for 3 areas per panel row. # For 9 areas grpPattern = c(4,1,4) for a pattern of 4 areas, 1 area, # and 4 areas per panel. # For 17 areas grpPattern = c(5,3,1,3,5) or c(4,4,1,4,4) or c(4,3,3,3,4) # For 18 areas grpPattern = c(5,4,4,5) # The grouping pattern must also be symetric about the median point and have # the number of rows per panel desend toward the median point. This is required # make the micromap graphics presentable. A grpPattern = c(3,4,4,5) or c(3,4,4,3) # are not allows. The maximum value for the rows per group is 5. # # MAPPING: # # dataRegionsOnly is a logical parameter. The default is FALSE. If FALSE, the data is # not inspected to determine if a subset of regions could be drawn saving # mapping space in the map glyphs. If set to TRUE, the data sub-areas # are inspected to determine if a sub-set of regions can be drawn to # save graphic space. This feature is only active if the border group's # name table contain region identifiers for each sub-area. This information # is used to determine how many sub-areas are required to be drawn and # how to organize the map for presentation. As before any sub-areas # in the mapped regions without data are only flagged with warning messages # and colored white, but still drawn. If regional boundaries are present, # the boundaries are overlayed for regional with data. # # regionsB is a logical parameter. The default is FALSE. If FALSE, no regional # boundaries are drawn. If set to TRUE, if regional boundaries are # present, they are drawn on the micromap. # # # Glyph Global parameters: # # axisScale A character string indicating the type of axis labels to be used # on the Y and X axis for glyphs with axis labels. The acceptable # values are: # "o" -> original (pretty function) # "e" -> extended algorithm - no scaling. (new default) # "s" -> numbers scaled to millions, billions, etc. with # extra header line # example: # 0 10 20 30 40 # in millions # # "sn" -> numbers scaled individually and marked with # the scaling factor. # example: # 0 500M 1B 1.5B 2B # # "s" and "sn" are based on the "e" algorithm (extended.) # # This call arugment can be overriden for a specific glyph column by # including "axisScale=" in the panelDesc list for the column. # # staggerLab A true/false flag to specify if the axis labels are staggered # alternating low and high labels. The default = FALSE. If FALSE # the axis labels are NOT staggered. If TRUE, two axis label # lines are drawn, with the axis labels alternated low and high lines. # # This call arugment can be overriden for a specific glyph column by # including "staggeredLab=" in the panelDesc list for the column. # # ##### ##### # # List/Control Parameters: (package default data.frames are used if the colors and # details parameters do not specify an alternate data.frame. # It is strongly recommended to use the default data.frame) # # colors a color palette as a vectors of strings (character-vectors) # 6 colors for areas in a group of 6 # 1 color for the median area # 3 foreground color for non-highlighted areas in the map # 2 background colors for not referenced and non-active sub-areas, # and 12 matching colors with 20% transparency for time series. # # If a color vector is provided, it's length must = 24. # # If the value of colors is "bw" or "greys", a grey scale is used instead # of the default or user provided colors vector. # The default is NULL, which indicates the package default colors should used. # # see rlmicromapGDefaults$colors for more details # # # details defines the spacing, line widths, colors and many many other details # controlling the style and apparence of the generated glyphs. # # see the micromapGDefaults$details section for more details. # # The function automatically loads the default values into the code when the # function is started. The user can use the details parameter to override # any of the items and values in the micromapST package. To override a value, # create a list as follows: # # details = list(<variable name> = <value>,,, ) # # See the micromapGSetDefaults function below for a definition of each # micromapST variable and it's default. # ##### ##### # # Load working tables for verifications # # details variable list # data(detailsVariables,envir=environment()) # get validation and translation table for details variables to panelDesc variables. # ##### ##### # # Counter Initialization (Global) - research code = to be removed. # # Variable at the micromapST level. # #Saved_Locale <- Sys.getlocale(category='LC_CTYPE') # save existing locale #x <- Sys.setlocale('LC_ALL','C') # set to 'C' mstColorNames <- "black" mmSTEnvir <- environment() xmsg <- capture.output(mmSTEnvir) #cat("micromapST envir:",xmsg,"\n") # # Set up global variables values. # # # create warning and stop counters - must be in .GlobalEnv so the panelXXXX functions can use them. # var <- "warnCnt" wstr <- paste0("assign(var,NewCounter(),envir=.GlobalEnv)") eval(parse(text=wstr)) var <- "stopCnt" wstr <- paste0("assign(var,NewCounter(),envir=.GlobalEnv)") eval(parse(text=wstr)) # # this should get the global variables set up so they can be referenced within all functions. # # Cross column variables # lastLab2Space <- NULL lastLab3Space <- NULL staggered <- NULL staggering <- NULL var <- "lastLab2Space" wstr <- paste0("assign(var,0,envir=.GlobalEnv)") eval(parse(text=wstr)) var <- "lastLab3Space" wstr <- paste0("assign(var,0,envir=.GlobalEnv)") eval(parse(text=wstr)) var <- "staggered" wstr <- paste0("assign(var,FALSE,envir=.GlobalEnv)") eval(parse(text=wstr)) var <- "staggering" wstr <- paste0("assign(var,FALSE,envir=.GlobalEnv)") eval(parse(text=wstr)) # # glyph variables - at this time this is required to allow us to validate this variable. # Id.Dot.pch <- NULL var <- "Id.Dot.pch" wstr <- paste0("assign(var,22,envir=.GlobalEnv)") # assign default of 22. eval(parse(text=wstr)) #cat("envir=warnCnt:", find("warnCnt"),"\n") # ##### ##### # # Save call parameter values for warning and error messages, not content, name of variables. # # Can't do this in a function because the environment and frames will change. # frml <- formals() # get list of call parameters - the formals - for the function and default values. (as defined). frmlNames <- names(formals()) # get the name of the parameters (as we validate the parameter, we will back file the defaults. callVar <- as.list(match.call())[-1] # get the names and values used on the current call. callVarNames <- names(callVar) # get the names of the used call parameters # merge the formals parameter list with the parameter list used at the time of the micromapST call with user set values. callVL <- frml # Seed the call variable list with the formals and default values callVL[callVarNames] <- callVar[callVarNames] # copy the values used in the call . # save call parameter list and values to .GlobalEnv var <- "callValList" wstr <- paste0("assign(var,callVL,envir=.GlobalEnv)") eval(parse(text=wstr)) # Extract the statsDFrame variable name var <- "sDFName" wstr <- paste0("assign(var,callVL$statsDFrame,envir=.GlobalEnv)") eval(parse(text=wstr)) # Extract the panelDesc variable name var <- "pDName" wstr <- paste0("assign(var,callVL$panelDesc,envir=.GlobalEnv)") eval(parse(text=wstr)) #print(paste0("statsDFrame=",sDFName)) #print(paste0("panelDesc =",pDName )) # # callVarList is now a names list with the names of the parameter variables the the list content the # values at the time of the call. Any variables show up with a typeof "symbol" and class "name". # The value of the variable is not captured. # # Later must copy this information up to the .GlobalEnv so it can be referenced by everyone. # #print(callVL) # ##### #print("callVarList Saved in .GlobalEnv") ##### # # Verify Run Parameter: # # Order of importants: # a) bordDir and bordGrp - needed to get the border group loaded and its particular parameters defaults # b) Validate statsDFrame (but not contents) # c) Validate panelDesc (but not contents, yet) # # bordDir and bordGrp - 1st parameter to check - sets up the information for all of the other parameters. # # Package contained border groups: # PkgBGs <- c("USStatesBG" ,"USSeerBG" ,"KansasBG" ,"MarylandBG" ,"NewYorkBG" ,"UtahBG" ,"AfricaBG" ,"ChinaBG" ,"UKIrelandBG" ,"SeoulSKoreaBG" ) UserBordGrpLoad <- FALSE # FALSE, load from package with data(), TRUE load from directory with load() # Package Variables # # bordDir - if directory then private border group. # # The bordDir is used to direct the border group load to a user directory or during testing # of a new or modified border group. # if (is.null(bordDir) || is.na(bordDir) ) { # make sure its NULL bordDir <- NULL # no directory provided. } else { # validate the directory exists and is referencable. bordDir <- str_trim(bordDir) # trim spaces. if (!dir.exists(bordDir)) { # bordDir path does not exist. xmsg <- paste0("***0153 BGBD The directory specified in the bordDir call parameter does not exist. Value=",bordDir) stopCnt() stop(xmsg,call=FALSE) } else { UserBordGrpLoad = TRUE # load() from directory don't data() xc <- str_sub(bordDir,-1,-1) # get last character if (xc != "/" && xc != "\\") { bordDir <- paste0(bordDir,"/") # add slash if not present. (must check for \ and / slashes.) } } } callVL$bordDir <- bordDir BordGrpName <- bordGrp bgFile <- NA if (!UserBordGrpLoad) { # no valid bordDir directory -> the bordGrp must be a .rda in this package. # If no bordGrp parameter, set default to USStatesBG. if (is.null(bordGrp) || is.na(bordGrp)) { BordGrpName <- "USStatesBG" # indicates which structure .rda file in package to load. } else { BordGrpName <- bordGrp bGM <- match(BordGrpName,PkgBGs) # must be one of the packaged bordGrps if (is.na(bGM)) { # no match to the bordGrps supported within the package. # Use variable to make message dynamic as more bordGrps are added. ymsg <- paste0(shQuote(PkgBGs),collapse=", ") xmsg <- paste0("***0150 BGBD The bordDir call parameter was set to NULL, the bordGrp must be one contain in the package:\n", ymsg, "\n") stopCnt() stop(xmsg, call.=FALSE) # alternative is to check for file in working directory and them varity it's structure. rm(ymsg) } rm(bGM) # DATA bordGrp } } else { if (is.null(bordGrp) || is.na(bordGrp)) { # bordDir provided, but no bordGrp - ouch! error stopCnt() xmsg <- paste0("***0156 BGBD The bordGrp call parameter has not been specified. It is required when the bordDir is provided.") stop(xmsg, call.=FALSE) } else { # if not check to see if the .rda file exists. fnSplit <- str_split(bordGrp,"[.]")[[1]] # split up user provided name. BordGrpName <- fnSplit[1] if (is.na(fnSplit[2])) { # if no extension - then add .rda bordGrp <- paste0(bordGrp,".rda") } else { # if extension is present - must be .rda or .RData if (fnSplit[2] != ".rda" && fnSplit[2] != ".RData") { # error - extension must be .rda or .RData. xmsg <- paste0("***0154 BGBD The bordGrp filename must have an '.rda' or '.RData' file extension.") stopCnt() stop(xmsg,call = FALSE) } } } # test to see if directory and file exist, before trying to load. bgFile <- paste0(bordDir,bordGrp) if (!file.exists(bgFile)) { xmsg <- "***0155 BGBD The bordGrp file in the bordDir directory does not exist." stopCnt() stop(xmsg, call = FALSE) } } # got this far, variables to load/data the border group appear to be good. callVL$bordGrp <- bordGrp callVL$bgFile <- bgFile callVL$BordGrpName <- BordGrpName var <- "callVarList" wstr <- paste0("assign(var,callVL,envir=.GlobalEnv)") eval(parse(text=wstr)) #cat("bordDir = ",bordDir,"\n","bordGrp = ",bordGrp,"\n") #cat("BordGrpName = ",BordGrpName,"\n") #cat("bgFile = ",bgFile,"\n") # ###### # # load micromap border and names tables based on type of run # Currently supported: USStatesBG and USSeerBG # # ## add code to pick up on "bordGrp" provided by user. ## If one of ours use data, otherwise use load or "copy" from structure of that name. ## bordGrp must be data.frame containing "areaNamesAbbrsIDs, areaVisBorders, L2VisBorders, RegVisBorders, ## L3VisBorders, and areaParms. # # Thoughts on border group verication: # 1) Do it once and get a md5 check sum on the files that pass. # 2) Place name of border group file and directory and MD5 in # file in micromapST/data folder under the "library". # 3) Prior to using private border group check library information to see if verifcation must be done. # ## for testing - use load instead of data. # initialize border group variables to determine if they are correctly loaded. areaParms <- NULL areaNamesAbbrsIDs <- NULL areaVisBorders <- NULL L2VisBorders <- NULL RegVisBorders <- NULL L3VisBorders <- NULL if (!UserBordGrpLoad) { # System border group #print (paste0("reading border group ",BordGrpName, " via a data statement.")) data(list=BordGrpName,envir=environment()) # Group Border tables and parameters distributed with package. } else { # user border group #print (paste0("reading border group ",BordGrpName, " via LOAD since bordDir = ",bordDir)) # need to put a try around this incase there is a problem with the user data file. res <- try(load(bgFile)) # only error should be a lock or error in reading file. if (class(res)=="try-error") { # error occurred during user border group file loading. stopCnt() xmsg <- paste0("***0157 BGBD System error encountered when loading the border group. See error message:") ymsg <- paste0("***0157 >>",res[1]) # get message from error warning(xmsg, call.=FALSE) stop(ymsg, call.=FALSE) # stopped. } } # # Basic Verify that all of the bordGrp data.frames have been loaded. # MissInBG <- NULL ErrFnd <- FALSE if (is.null(areaParms)) { ErrFnd <- TRUE MissInBG <- paste0(MissInBG,", areaParms") } if (is.null(areaNamesAbbrsIDs)) { ErrFnd <- TRUE MissInBG <- paste0(MissInBG,", areaNamesAbbrsIDs") } if (is.null(areaVisBorders)) { ErrFnd <- TRUE MissInBG <- paste0(MissInBG,", areaVisBorders") } if (is.null(L3VisBorders)) { ErrFnd <- TRUE MissInBG <- paste0(MissInBG,", L3VisBorders") } #if (is.null(L2VisBorders)) { # No action at this time. Check later when processing areaParms. # L2VisBorders is only needed if Map.L2Borders is TRUE. # If there is no L2VisBorders data.frame, set L2VisBorders to L3VisBorders, # or is equal to NA, then the Map.L2Borders are set to FALSE # and a warning message generated. #} #if (is.null(RegVisBorders)) { # No action at this time. Check later when processing areaParms. # RegVisBorders is only needed if aP_Regions or Map.RegBorders are set to TRUE. # If there is no RegVisBorders data.frame, it is set to L3VisBorders, # then Map.RegRegions is set to FALSE. It is possible to do regional mapping without # regional boundaries. #} #str(areaNamesAbbrsIDs) if (ErrFnd) { # if ErrFnd, the MissInBG must contain at least one entry. MissInBG <- substr(MissInBG,3,nchar(MissInBG)) # Kill leading ", " stopCnt() xmsg <- paste0("***0151 BGBD After loading ",BordGrpName," border group data set, the following objects are missing: ",MissInBG) stop(xmsg, call.=FALSE) } rm(MissInBG,ErrFnd) # Clean up and move data into old structures # # Later add code to validate possibly USER provided border groups. # if (UserBordGrpLoad) { # verify border group objects. (columns, same number of rows, etc.) # objective is to only check data once - mark the data for future reference. # Want to keep run times VERY VERY low and not keep re-checking user data. # Lot of work to be done. # OR set flag in BG.rda indicating it has been verified. Do Once on request. # Check Validation by BGValidate function. # md5sum file is in .Library directory under the name BGmd5.rda # Contents is BG name and md5 check sum. # run md5sum over the BG file and compare values with this file. # if it matches, then BG file does not have to validated and waste time and CPU. # } ########## # # Merge the "areaParms" variables into the global variables. # # They may still be overridden by the details=list(...) parameter in the call. # # Set the type of everything to protect against factors on data.frames. bordGrp <- as.character(areaParms$bordGrp) Map.Hdr1 <- as.character(areaParms$Map.Hdr1) Map.Hdr2 <- as.character(areaParms$Map.Hdr2) Map.MinH <- as.numeric(areaParms$Map.MinH) Map.MaxH <- as.numeric(areaParms$Map.MaxH) Map.Aspect <- as.numeric(areaParms$Map.Aspect) if (is.null(areaParms$ID.Hdr1)) { # New variable names Id.Hdr1 <- as.character(areaParms$Id.Hdr1) Id.Hdr2 <- as.character(areaParms$Id.Hdr2) } else { # Old variable names Id.Hdr1 <- as.character(areaParms$ID.Hdr1) Id.Hdr2 <- as.character(areaParms$ID.Hdr2) } # Map.L2Borders - draw L2 borders if (is.null(areaParms$Map.L2Borders)) { Map.L2Borders <- FALSE } else { Map.L2Borders <- as.logical(areaParms$Map.L2Borders) } # Map.L3Borders - draw L3 borders (option - also turned off if limited regional drawing is done.) if (is.null(areaParms$Map.L3Borders)) { Map.L3Borders <- TRUE } else { Map.L3Borders <- as.logical(areaParms$Map.L3Borders) } areaUSData <- as.logical(areaParms$areaUSData) enableAlias <- as.logical(areaParms$enableAlias) #print("areaParms:") #print(str(areaParms)) # fix up areaParms to unique names # check for old field names. If present - copy to new names. if (!is.null(areaParms$Regions)) { areaParms$aP_Regions <- as.logical(areaParms$Regions) areaParms$Regions <- NULL } if (!is.null(areaParms$Units)) { areaParms$aP_Units <- as.character(areaParms$Units) areaParms$Units <- NULL } if (!is.null(areaParms$Proj)) { areaParms$aP_Proj <- as.character(areaParms$Proj) areaParms$Proj <- NULL } # Check regions, and boundary overlay flags. # # aP_Regions - feature enabler - if RegVisBorder is present and information in name table. # Referres to name table information, not regional boundaries # if (is.null(areaParms$aP_Regions)) { areaParms$aP_Regions <- FALSE # no regional mapping feature } # region borders can be drawn or not. If regions feature enable, default = TRUE. If not, FALSE # if (is.null(areaParms$Map.RegBorders)) { areaParms$Map.RegBorders <- FALSE # no regional boundaries available } # copy from data frame into work variables. aP_Regions <- as.logical(areaParms$aP_Regions) aP_Units <- areaParms$aP_Units aP_Proj <- areaParms$aP_Proj Map.RegBorders <- as.logical(areaParms$Map.RegBorders) #cat("Initial areaParms - Map. L2:",Map.L2Borders," Reg:",Map.RegBorders," L3:",Map.L3Borders," aP_Regions:",aP_Regions,"\n") #print(str(areaParms)) # # after this point we do not reference areaParms again. # #### #### # # The following variables may be included in details, but are not configured here # with defaults. They are variables initialized in the border group areaParms table. # detailExtra <- colnames(areaParms) # get list of parameters from areaParms x <- match("bordGrp",detailExtra) # if list contains "bordGrp" if (!is.na(x)) { detailExtra <- detailExtra[-x] # remove it from the list. } # When "bordGrp" is excluded, this would leave: # "Id.Hdr1","Id.Hdr2", "Map.Hdr1", "Map.Hdr2", "Map.MinH", "Map.MaxH", "Map.L2Borders", # "areaUSData", "enableAlias", "aP_Regions" , "aP_Proj" , "aP_Units" # # This list is appended to the colname list derived from the default details data.frame to # create a list of valid variables. # ########## ########## # # areaNamesAbbrsIDs and xxxVisBorders tables # #cat("dim of areaNamesAbbrsIDs:",dim(areaNamesAbbrsIDs),"\n") #cat("names of areaNamesAbbrsIDs:",names(areaNamesAbbrsIDs),"\n") row.names(areaNamesAbbrsIDs) <- areaNamesAbbrsIDs$Key # ensure row.names match the keys if (is.null(areaNamesAbbrsIDs$regID)) { # current name table does not have regional information. areaNamesAbbrsIDs$regID <- "<NA>" areaNamesAbbrsIDs$regName <- "<NONE>" } rlAreaNamesAbbrsIDs <- areaNamesAbbrsIDs # save copy of name table #cat("dim of rlAreaNamesAbbrsIDs:",dim(rlAreaNamesAbbrsIDs),"\n") #print(str(areaNamesAbbrsIDs)) # sub-area boundaries rlAreaVisBorders <- areaVisBorders # save copy of sub-area boundaries #cat("dim of rlAreaVisBorders :",dim(rlAreaVisBorders),"\n") #print(str(areaVisBorders)) # total area boundaries rlL3VisBorders <- L3VisBorders # save copy of area boundary #cat("dim of rlL3VisBorders :",dim(rlL3VisBorders),"\n") #print(str(L3VisBorders)) # Check on L2VisBorder and complete set up. if (Map.L2Borders) { if (is.null(L2VisBorders) || identical(L2VisBorders,L3VisBorders)) { # no L2VisBorders or L2VisBorders is the same as L3VisBorders # Map.L2Borders set on - but no boundaries to draw. xmsg <- paste0("***0158 BGBD In the areaParms data.frame the Map.L2Borders is TRUE, but no level 2 boundaries are provided, Level 2 overlay is disabled.\n") warnCnt() warning(xmsg,call.=FALSE) L2VisBorders <- L3VisBorders # copy L3 to L2 data.frame (Place holder) Map.L2Borders <- FALSE } } rlL2VisBorders <- L2VisBorders #print(str(rlL2VisBorders)) # # Check on RegVisBorder and complete set up. if (Map.RegBorders) { if (is.null(RegVisBorders) || identical(RegVisBorders,L3VisBorders)) { # no RegVisBorders or RegVisBorders == L3VisBorders xmsg <- paste0("***0159 BGBD The areaParms variable aP_Regions is TRUE and/or Map.RegBorders is TRUE, but no regional boundaries exist in border group. Regions overlay is disabled.") warnCnt() warning(xmsg,call.=FALSE) Map.RegBorders <- FALSE RegVisBorders <- L3VisBorders # copy L3 to L2 data.frame (placeholder) } } # rlRegVisBorders <- RegVisBorders #print(str(rlRegVisBorders)) # # Implementation change note: The regions feature will be implemented using the # regID field in the areaNamesAbbrsIDs table and a RegVisBorders boundary data.frame. # The regID field associates the sub-areas to regions. # If a RegVisBorder file is present, the boundaries # are grouped by the regID codes as it's keys. This permits # sub mapping of its boundaries - hopefully they will # match up with the area boundaries. # # Map.L2Borders > controls if L2VisBorders is drawn. # Map.RegBorders > controls if RegVisBorders is drawn. # Map.L3Borders > controls if L3VisBorders is drawn. # # Map.L3Borders is TRUE by default, but reset to FALSE when a sub-set of regions are drawn. # Map.RegBorders is only TRUE when there is a valid RegVisBorders data.frame. This is not # independent of the aP_Regions feature control flag. # Map.L2Borders is TRUE when a valid L2VisBorders data.frame is present. # # When a subset of the regions in a border group are to be drawn, # a) The areaNamesAbbrsIDs name table is not modified. # b) L2VisBorders, RegVisBorders data.frames are edited to the limited group of areas. # It is assumed L2 is a subset of Reg. # c) Map.L3Borders is set to FALSE to not draw the outline of the total space. # # # If both L2 and Reg are persent, The name table is used to know witch L2 boundaries to draw. # In regions mode: # # listUsedArea <- areaNamesAbbrsIDs[IndexDFtoNT,"Key"] # listUsedL2 <- unique(areaNamesAbbrsIDs[IndexDFtoNT,"L2_ID"]) # listUsedRegions <- unique(areaNamesAbbrsIDs[IndexDFtoNT,"regID"]) # # a) if dataRegionsOnly=TRUE - enable regions feature. # # regMatch <- !is.na(match(areaNamesAbbrsIDs$regID,listUsedRegions)) # list of sub-areas in regions # listAllAreas <- areaNamesAbbrsIDs[regMatch,"Key"] # listAllL2 <- unique(areaNamesAbbrsIDs[regMatch,"L2_ID"]) # listAllRegions <- unique(areaNamesAbbrsIDs[regMatch,"regID"]) # # b) if dataRegionsOnly=FALSE or not enabled. # # listAllAreas <- areaNamesAbbrsIDs[,"Key"] # listAllL2 <- unique(areaNamesAbbrsIDs[,"L2_ID"]) # listAllRegions <- unique(areaNamesAbbrsIDs[,"regID"]) # # ensure Abbreviations are all CAPS. # # The package carries two sets of names for each area # (in the areaNamesAbbrsIDs table and areaVisBorders matrix.) # # abbreviated - always in CAPS. # fullname - always with proper capitalization. # but is CAP'd for all comparisons. # fix up Name Table for sub-area matching. #cat("dim of areaNamesAbbrsIDs:",dim(areaNamesAbbrsIDs),"\n") # Matching strings rlAreaNamesAbbrsIDs$Abbr <- toupper(rlAreaNamesAbbrsIDs$Abbr) # Abbr Must be uppercase. areaNTAbbr <- (rlAreaNamesAbbrsIDs$Abbr) # Get list of abbrevations. (All CAPS) areaNTName <- (toupper(rlAreaNamesAbbrsIDs$Name)) # get list of full area names in uppercase (All CAPS) areaNTAAbbr <- (toupper(rlAreaNamesAbbrsIDs$Alt_Abbr)) # get list of alternate abbreviations. areaNTKey <- (toupper(rlAreaNamesAbbrsIDs$Key)) # get key as uppercase. (links into VisBorder files.) # Presentation strings ID.Abbr <- areaNTAbbr # (All CAPS) # for ID.Name force proper capitalization on the name ID.Name <- as.vector(sapply(areaNTName,function(x) simpleCap(x))) # proper cap. areaNTName <- ClnStr(areaNTName) areaNTAbbr <- ClnStr(areaNTAbbr) areaNTAAbbr <- ClnStr(areaNTAAbbr) areaNTID <- ClnStr(toupper(rlAreaNamesAbbrsIDs$ID)) # fix up areaVisBorders data frame rlAreaVisBorders$Key <- toupper(rlAreaVisBorders$Key) rlRegVisBorders$Key <- toupper(rlRegVisBorders$Key) rlL2VisBorders$Key <- toupper(rlL2VisBorders$Key) # Working vectors for PRINT out. # #### xps <- par("ps") xpin <- par("pin") xdin <- par("din") #cat("check point on par - ps:",xps," pin:",xpin," din:",xdin,"\n") #print("Border Group Read and Setup") #### # #________________ Type of micromap Variable (for now)_______________ # # extend hdr strings to include the other types of maps # # #### ##################### # # Border Group now loaded and name table initial setup completed. # ##################### # # Finish check after the glyph function definitions. # ##################### ##################### # # Define panel glyph functions===================================== # # All of these glyph creating functions are internal to the micromapST function. # ##### # # type = 'arrow' ========================================================= # # rlAreaArrow # # JP - fixed error when difference is zero. # JP - generalize for any collections of area # # PDUsed - list of column names from the panelDesc data.frame provided. # # The col1 and col2 values have already be converted to column numbers and verified. # rlAreaArrow = function(j){ # glyph column setup section - start of code ### # Split into header and trailer. ### #cat("Arrow-StartUp staggered:",staggered,"\n") # j = current panel column number # # col1[j] points to the statsDFrame column holding the first arrow end point.value # col2[j] points to the startFrame column holding the second arrow end point value # wstname <- names(dat) # names of columns in statsDFrame wstdim <- dim(dat) wstMax <- wstdim[2] # number of columns in statsDFrame ErrFnd <- FALSE pdColNum <- formatC(j,format="f",digits=0,width=2,flag="0") # glyph description #xgl <- list(Name="ARROW", n=2, dCols=c("col1","col2"), tCols=("n","n"), iCols=c(col1[j],col2[j]), lCols=c(litcol1[j],litcol2[j])) # where Name = name of glyph # dCols = panelDesc field names # tCols = type of data in each statsDFrame column # iCols = index # into statsDFrame data.frame # lCols = value provided in colx list for this column # # "col1" stColName1 <- wstname[col1[j]] if (is.null(stColName1)) { stColName1 <- as.character(col1[j]) } pdUmsg <- "(Beginning value of arrow)" xr <- CheckPDCol('col1', 'ARROW', col1[j], stColName1, j, 1, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd <- TRUE } else { xdat1 <- xr$Dat } # "col2" stColName2 <- wstname[col2[j]] if (is.null(stColName2)) { stColName2 <- as.character(col2[j]) } pdUmsg <- "{End value of arrow)" xr <- CheckPDCol('col2', 'ARROW', col2[j], stColName2, j, 2, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd <- TRUE } else { xdat2 <- xr$Dat } if (ErrFnd) return() # Error warning noted, return from function. refval <- lRefVals[j] # change to lRefVals - JP-2010/07/23 Reference value for column reftxt <- lRefTexts[j] # added - JP-2010/07/23 Reference test for column good1 <- !is.na(xdat1) # test to see if both values are present. good2 <- !is.na(xdat2) goodrow <- !is.na(xdat1 + xdat2) # used by code to skip bad entries. # Get the value range for the data (col1 and col2) rx <- range(xdat1,xdat2,na.rm=T) # range on of all x1 and x2 values for all areas. #cat("arrow-x range:",rx,"\n") #rx <- sc*diff(rx)*c(-.5,.5)+mean(rx) # # x-scale extention (sc) = 1.08 * # diff of min and max of all * 1/2 + or - to get bracket around mean # if range 1 to 25, mean is 13, diff(rx) = 24, --> 0.04 to 25.96 (almost + and - 1) lPad <- TRUE rPad <- TRUE #cat("arrow-x range adjusted:",rx,"\n") ry <- c(0,1) # Y axis range = 0 to 1.. # ____________labeling and axes_______________ ##### # # Setup and draw top and bottom titles and axis for column (header and trailer) # # Split into two functions - setup and execute-header, execute-trailer. # # Needs padding for tails and arrow heads (how much, not as much as dots.) # Res <- DrawXAxisAndTitles(j, panels, rx, ry, reftxt, refval, leftPad=lPad, rightPad=rPad) atRx <- Res$atRx rx <- Res$rx ry <- Res$ry #cat("arrow-rx after DrawXAxisAndTitles:",rx,"\n") #cat("Arrow-after DrawXAxisAndTitles-staggered:",staggered,"\n") # ##### # End of Arrow Glyph Setup up section # Glyph Column Header section # Titles # X-Axis labels # Arrow Glyph Body Section #_________________drawing loop__________________ # The drawing may be for 1 to 5/6 rows. Find out in the gsubs -> ke. # Draw all of the elements - one per area - group number = 1 to ng. for (i in 1:numGrps){ # loop to generate each panel in column ### # Single Glyph group/row ### gsubs <- ib[i]:ie[i] # get range ib to ie (area indexes for this panel) ---- gsubs vector of the indexes for this panel. ke <- length(gsubs) # get length (length = 1 up to 5) # offset in panel is 0 to 7 (8 units) or 0 to 2 (1 unit), Under the US version # the y would be 5/6 to 1, so lines would be draw at 5, 4, 3, 2, 1 Leaving 7 and 0 open. # # Now we have the challenge of drawing 2, 3, or 4 lines in the panel and make it look the same. # # May need to check to see if the scaling already handles this. So let it go for now. # # One approach is to adjust the values based on the number to graph. Use table. # laby <- ke:1 # labels n:1 depending on number of rows in group (US case 1:1 and 1:5). # select pen color or colors 7 or 1:n pen <- if(i==medGrp & medGrpSize == 1) 7 else 1:ke # if index=medGrp (median group number, if present) then pen = 7, else 1:ke (length of line) #cat("Arrow - panelSelect - 3021 - i:",i," j:",j,"\n") panelSelect(panels,i,j) # select current panel x <- panelScale(rx,c(1-pad,ke+pad)) # scale to rx by 1,ke (pad) (ry = effectively 0.33 to 5.67 (pad = 0.67) # Scale = rx by 0.33 to 5.67 with arrows at 1,2,3,4,5... # scaling of ry handles the issue with the number of rows in group. # for 6, ry => c( 0.33 , 6.67 ) because of padding. (0 -> 7 so 1/2 margin on each side.) # for 5, ry => c( 0.33 , 5.67 ) because of padding. (0 -> 6 so 1/2 margin on each side.) # for 4, ry => c( 0.33 , 4.67 ) # for 3, ry => c( 0.33 , 3.67 ) # for 2, ry => c( 0.33 , 2.67 ) # for 1, ry => c( 0.33 , 1.67 ) (also median) - single at "1", with + or - 0.6667 on each side. # c(1,1) -> (0.33, 1.67 panelFill(col=Panel.Fill.col) # don't like page real size being used here - ? re-sizing. # calculate arrow length the is to small and should be draw as a dot. arrLim <- max(diff(rx)/par("pin")/1000) * 1.05 # diff of range / plot inches / 1000 * 1.05 # verical grid lines axis(side=1, tck=1, labels=F, at=atRx, col=Grid.Line.col, lwd=Grid.Line.lwd) # grid lines in panel # if a refval is provided and in the rx range, then add line. AddRefLine(refval, ke, rx) panelOutline(col=Panel.Outline.col) # outline panel oldpar <- par(lend="butt") # save old # draw the area rows in this panel-row. for (k in 1:ke) { # loop through each item in panel (5/6 or 1) m <- gsubs[k] # get index into data array if (goodrow[m]) { # if good value for this area #print(paste0(k,m,xdat1[m],xdat2[m],abs(xdat1[m]-xdat2[m]))) # Getting warning for NON-ZERO length arrows - must be rounding error to <> 0. # So, taking liberties to say 0 is .002 and below. Arrow works in inches?? # Alternative is to suppressWarnings... # xdat1 and xdat2 are the variables pass in. m is the row index. if (abs(xdat1[m]-xdat2[m])> arrLim){ # If arrow length is > 1.05/1000 inch do line draw... # long line/arrow arrows(xdat1[m],laby[k],xdat2[m],laby[k],col=mstColors[pen[k]], length=Arrow.Head.length,lwd=Arrow.lwd) } else { # length of arrow is zero, so plot a dot.. plotPoint(xdat1[m],laby[k], Arrow.Dot.pch, mstColors[pen[k]], Arrow.Dot.pch.size, Arrow.Dot.pch.lwd, Arrow.Dot.Outline, Arrow.Dot.Outline.col, Arrow.Dot.Outline.lwd) #points(xdat1[m],laby[k],pch=20,cex=Dot.pch.size,col=mstColors[pen[k]]) } } # end of one row. } # y is from 0 to 7, so the enter line for each arrow is 1,2,3,4,5,6, etc. par(oldpar) ### # end of one Arrow glyph panel (row/group) ### } # end of Arrow glyph column # ____________________________PanelOutline____________________ ### # glyph column trailer. ### groupPanelOutline(panelGroup,j) # outline full group (column) # Column done, check to see reference line text is needed in footnotes.. } # # End of Arrow Glyph # ##### ##### # # type = 'bar' ========================================================= # # rlAreaBar # rlAreaBar = function(j){ # j = current panel column number # # col1[j] points to the statsDFrame column holding the bar height from zero. # #cat("Bar Startup staggered:",staggered,"\n") wstname <- names(dat) # names of columns in statsDFrame wstdim <- dim(dat) wstMax <- wstdim[2] # number of columns in statsDFrame ErrFnd <- FALSE pdColNum <- formatC(j,format="f",digits=0,width=2,flag="0") # "col1" stColName1 <- litcol1[j] pdUmsg <- "(Bar length)" xr <- CheckPDCol('col1', 'BAR', col1[j], stColName1, j, 1, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd <- TRUE } else { xdat1 <- xr$Dat } if (ErrFnd) return () # error warning found - return. py <- Bar.barht*c(-.5,-.5,.5,.5,NA) # Bar.barht = 2/3 (0.6667) - basic pattern to form a bar. ry <- c(0,1) refval <- lRefVals[j] # changed to lRefVals - JP-2010/07/23 reftxt <- lRefTexts[j] # new - JP-2010/07/23 # ________scale x axis________________________ good <- !is.na(xdat1) # get x axis range rx <- range(xdat1,na.rm=T) # get range of values (min-1, max-2) #cat("bar-rx:",rx,"\n") lPad <- TRUE rPad <- TRUE if (rx[2]<=0){ # max < 0.. rx[2] <- 0 # set max to zero #rx[1] <- mean(1,sc)*rx[1] # adjust min. (average of 1 and sc) rPad <- FALSE } else if ( rx[1] >= 0 ) { # min > 0 rx[1] <- 0 # set min to zero #rx[2] <- rx[2]*(1+sc)/2 # adjust max lPad <- FALSE } else { # min and max are both > 0 #rx <- sc*diff(rx)*c(-.5,.5)+mean(rx) } # end of if / else if group #cat("bar-rx adjusted:",rx,"\n") # ____________label axis_______________ ##### # # Bar Setup and draw top and bottom titles and axis for column # # No padding if Zero is left or right side. Otherwise minor padding. # #cat("Bar-Calling DrawXAxisAndTitle.\n") Res <- DrawXAxisAndTitles(j, panels, rx, ry,reftxt, refval, leftPad=lPad, rightPad=rPad) atRx <- Res$atRx rx <- Res$rx ry <- Res$ry #cat("Bar-After DrawXAxisAndTitles staggered:",staggered,"\n") # ##### ##### # # Bar Glyph body section # ##### # _______________drawing loop___________________ for (i in 1:numGrps){ ### # Glyph group/row body ### gsubs <- ib[i]:ie[i] # index of elements in panel ke <- length(gsubs) pen <- if(i==medGrp & medGrpSize == 1 ) 7 else 1:ke # Pen indexes. laby <- ke:1 # laby (1 or 1:2, 3, 4, 5, or 6) panelSelect(panels,i,j) # select current panel x <- panelScale(rx,c(1-pad,ke+pad)) # re-scale to 1 or 5/6 entries per panel/row (same physical height used.) # for 1 -> panelFill(col=Panel.Fill.col) # grid lines for bar axis(side=1, tck=1, labels=F, at=atRx, col=Grid.Line.col, lwd=Grid.Line.lwd) # grids # if a refval is provided and in the rx range, then add line. AddRefLine(refval, ke, rx) panelOutline(col=Panel.Outline.col) # outline panel/row ksc <- SetKsc(ke) # get scaler for bar height # play like we have 5 areas in this panel/row wpy <- py * ksc # # All panel/rows are the same height (except when the a single area is used in the median panel/row. # All graphic element heights are calculated based on 5 areas per panel/row. # This keeps the height the same in all panel/rows and provided a uniform graphic. # for (k in 1:ke){ m <- gsubs[k] # draw each entry (1 to ke), get index from gsubs if (good[m]){ # good value - draw bars as polygons. val <- xdat1[m] # get value for bar height polygon(c(0, val, val, 0, NA), rep(laby[k], 5) + wpy, col=mstColors[pen[k]] ) # fill color polygon(c(0, val, val, 0, NA), rep(laby[k], 5) + wpy, col=Bar.Outline.col, lwd=Bar.Outline.lwd, density=0) # outline of bar } lines(c(0,0), c(1-.5*Bar.barht,ke+.5*Bar.barht), col=1) # re-draw base line of bars } ##### # # End of one Group/Row Body for Bar # ##### } # end of bar glyph column # ____________________________PanelOutline____________________ groupPanelOutline(panelGroup,j) # outline column of glyphs ##### # # End of Bar Glyph Body section # ##### } # # End of Bar Glyph # ##### ##### # # type = 'boxplot' ====================================================== # # rlAreaBoxplot # rlAreaBoxplot <- function(j, boxnam){ # boxnam -> name of the panelData value for the boxplot data structure. wstname <- names(dat) # names of columns in statsDFrame wstdim <- dim(dat) wstMax <- wstdim[2] # number of columns in statsDFrame ErrFnd <- FALSE pdColNum <- formatC(j,format="f",digits=0,width=2,flag="0") # can we get access to the boxplot list? boxlist <- tryCatch(get(boxnam, pos=1),error=function(e) e) if (inherits(boxlist,"error")) { # could not find object named in boxnam. ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02B1 BOXPLOT ",pdColNum," The panelData value of ",boxnam," in the ", pDName," data frame does not exist or is not accessible.") warning(xmsg, call.=FALSE) } else { if (!is.list(boxlist)) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02B3 BOXPLOT ",pdColNum," The ", boxnam, " data for the boxplot is not a list.") warning(xmsg, call.=FALSE) } else { lnam <- names(boxlist) # names of lists in boxlist data, one per variable if (is.null(lnam) || is.na(lnam)) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02B4 BOXPLOT ", pdColNum, " The ", boxnam, " structure does not have any name attributes for the boxplot data.") warning(xmsg, call.=FALSE) } else { # The correct structure should have 6 names of elements in the list. if (length(lnam) != 6) { # must have at least 1 element and name ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02B5 BOXPLOT ", pdColNum, " The ", boxnam, " boxplot data is not a valid structure. Must contain 6 boxplot sub lists.") warning(xmsg, call.=FALSE) } else { nbox <- c("stats","n","conf","out","group","names") # correct list of names for boxplot data. # all should be present to be a valid boxplot structure. if (any(is.na(match(lnam,nbox)))) { # at least one of the list names does not match or is missing. ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02B6 BOXPLOT ", pdColNum, " The ", boxnam," boxplot data does not contain all of the lists of boxplot function output. ","Invalid structure.") warning(xmsg, call.=FALSE) } else { # check on the number of rows/etc. - the $names list must be present after the last check. boxlist$names <- toupper(boxlist$names) # force to upper case for match with areaNamesAbbrsIDs table (name table). goodn <- !is.na(boxlist$names) nNams <- length(boxlist$names) # get number of names in structure if (any(!goodn)) { # one of the boxlist names is "NA" - no match warnCnt() xmsg <- paste0("***02B7 BOXPLOT ", pdColNum, " In the ",boxnam, " boxplot data, the $name named list contains one or more missing values.") warning(xmsg, call.=FALSE) } # how to find and edit out entries not being used. ---- boxlist2 <- boxlist[good,] # get only the entires with names != NA listUNames <- unique(boxlist$names) # get list of unique area ids used nn <- length(listUNames) nn2 <- length(boxlist$names) if (nn != nn2) { warnCnt() xmsg <- paste0("***02B8 BOXPLOT ", pdColNum, "There are duplicate sets of boxplot data for the same sub-area. ","Only the first one will be used.") warning(xmsg, call.=FALSE) } ## how to edit out duplicates. - search may be from ID to boxlist, if so only first found will be used. bpNumRows <- nn # number of unique rows of data. nr = dim(boxlist$stat)[1] # get number of data elements per area if (nr != 5) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02BA BOXPLOT ", pdColNum, " The $stats matrix in the ", boxnam, " boxplot data does not have 5 values per area.") warning(xmsg,call.=FALSE) } nc <- dim(boxlist$stat)[2] # number of rows in boxplot stats data list (is this needed?). if (nc != nNams) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02BB BOXPLOT ", pdColNum, " The $stats matrix in the ", boxnam, " boxplot data must have ", nNams, " elements.") warning(xmsg, call.=FALSE) } goods <- !is.na(colSums(boxlist$stat)) # good rows from a missing value view point. if (any(!goods)) { # data contains missing values #ErrFnd <- TRUE not used - not a stopping warning. warnCnt() xmsg <- paste0("***02BC BOXPLOT ", pdColNum, " The $stat matrix in the ", boxnam, " boxplot data has missing values. ", "Sub-areas with missing values will not be drawn.") warning(xmsg, call.=FALSE) } #if (is.null(nn) || is.na(nn)) { # ErrFnd <- TRUE # warnCnt() # xmsg <- paste0("***xxxx BOXPLOT ", pdColNum, " BOXP-03 The list of names is missing or incomplete in the boxplot data." # warning(xmsg, call.=FALSE) # #} else { # if (length(nn) != numRows) { # is this needed? # ErrFnd <- TRUE # warnCnt() # xmsg <- paste0("***xxxx BOXPLOT ", pdColNum, " BOXP-07 The boxplot list does not contain ",numRows," unique entries.") # warning(xmsg, call.=FALSE) #} #cat("boxplot - areaDatKey:",areaDatKey,"\n") tnn = is.na(match(listUNames,areaDatKey)) # match should be against the plotNames variable. if (any(tnn)) { # test to see if any did NOT match ErrFnd <- TRUE warnCnt() lnn <- paste0(nn[tnn],collapse=" ") xmsg <- paste0("***02BD BOXPLOT ", pdColNum, " The sub-area names/abbreviations found in the ", boxnam, " boxplot data $names values do not match the border group names: ",lnn) warning(xmsg, call.=FALSE) } # end of missing sub-areas. } # end of look at boxplot lists. } # end of number of boxplot lists check. } # end of get boxplot named list names (null check) } # end of boxplot list structure test. } # end of fetch of boxplot boxnam variable. if (ErrFnd) return () # End of basic validation for BoxPlot glyph refval <- lRefVals[j] # get referrence to object, changed reftxt <- lRefTexts[j] # new - JP-2010/07/23 #_______________Good Rows__________ #cat("Boxplot - goodn:",length(goodn)," goods:",length(goods),"\n") # if off, why is number of names and number of stats groups different? if (length(goodn) != length(goods)) { print("good vectors for boxplot - Problem") print(goodn) print(goods) } goodAll <- goodn | goods # must have name match and no NAs in data. #_______________Scaling____________ # # normally 5/7 - USStatesBG # Since same height, # y boxplot scaling # standard - horizontal box - no vertical # (y) dimensions py <- c(-.5,-.5,.5,.5) thiny <- BoxP.thin*py thicky <- BoxP.thick*py medy <- BoxP.Median.Line*c(-.5,.5) #cat("point sets for -- thiny:",thiny," thicky:", thicky, " medy:",medy,"\n") ry <- c(0,1) # used in y scaling for grid lines #_______________Gather stats and put in Area Order______________ # For the moment match on names # Boxlist = names, stats, out, group, # # Boxplot function generates a list value containing: # stats = matrix - each column is lower, lower hinge, median, upper hinge, # upper wicker for plot/group # n = vector of number of observ in each group # conf = a matrix which each col contins the low/upper extremes # out = valies of any data points which lie extremes of whiskers # group = vector (same length as out) whose elements indicate to which group # names = vector of names for the groups (must be 2 char area names) # There must be unique names that match the area abbreviation list. # stats <- boxlist$stats # statistics: 1-low,2-25%,3-median,4-75%,5-high # - 5 variables for each sub-area. # indexes to boxplot values. (pull values into thin and thick) (set up for "boxes") thin <- stats[c(1,5,5,1),] # a column for each area - thin line - outliers (Lower, upper wickers) # - columns in boxlist (1,5,5,1) thick <- stats[c(2,4,4,2),] # a column for each area - thick line - 25% to 75% (lower and upper hinge) # - columns in boxlist(2,4,4,2) med <- stats[3,] # a single value for each sub-area (median data value) nam <- boxlist$names # area name list of boxplots # conf <- boxlist$conf # matrix of extremes - not used. Don't check for. outlier <- rep(F,length(med)) # build vector of all outliers - set to False # outlier length = number of boxplots precented by user. if (length(boxlist$out)>0) { # changed from is.null to length check (is.null does not work) # if outliers exist out <- boxlist$out group <- boxlist$group # group and out are same length.. outlier[unique(group)] <- T # get list of groups with outliners and set indicater TRUE # set to True if we have an outlier to graph. } # if group length = 0 error -- message. #### Need to put in order (boxlist may not be in the same order as the statsDFrame) zBPord <- match(dat$RN, nam) # ord based on match between boxplot$names and original link names in statsDFrame (dat). # (Convert XX to index. ord is the sequence of boxplot data to graph. # zBPord is in the statsDFrame order and points to the supporting boxplot row. # if NA, it means the statsDFrame row does not have a boxplot to go with it. IndexDattoBP <- zBPord # should be one boxplot entry per user data.frame entry. if (any(is.na(zBPord))) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02BE BOXPLOT ",pdColNum," There are one or more of rows in the ",sDFName, " that does not have matching boxplot data, (", boxnam, ") entries.") warning(xmsg, call.=FALSE) wx <- is.na(zBPord) xmsg <- paste0("***02BF BOXPLOT ",pdColNum," The missing sub-areas are: ", paste0(areaDatAbbr[wx],collapse=", ") ) warning(xmsg, call.=FALSE) } # what about missing values - if NA do not plot on that line # What about name type inconsistency # I will require use of area name abbreviation # area ID codes be useful # split() based on first two digits of county fips # I could stash area fips in statsDFrame sorted order # For Boxplot median sorting # Currently the user would need to sort the # medians in the statsDFrame making sure the row.names were correct. # # JP-no data in col1, col2, or col3 to sort like the other columns... All of the data is in these structures. # # boxlist$stats[3,] # the median. # # at present no re-ordering of the boxplots like the other plots. # JP-if other column is sorted, boxplots will follow that order via the indexes. # # ___________ scale x axis_______________ lPad <- TRUE rPad <- TRUE if (is.null(out)) { rx <- range(stats,na.rm=TRUE) } else { # if no outliers - range only on stats rx <- range(stats,out,na.rm=TRUE) # if outliers - range on stats and outliers } #cat("boxplot-rx:",rx,"\n") #rx <- sc*diff(rx)*c(-.5,.5)+mean(rx) # min to max range with expansion factors. #cat("boxplot-rx after padding:",rx,"\n") # are these used. dx <- diff(rx)/200 # difference / 200 (??? use) px <- c(-dx,-dx,dx,dx) # is this used??? # ____________titles and labeling axes_______________ ##### # # Setup and draw top and bottom titles and axis for column # # Needs Padding on both sides (again none if one is zero.) # Res <- DrawXAxisAndTitles(j, panels, rx, ry,reftxt, refval, leftPad=lPad, rightPad=rPad) atRx <- Res$atRx rx <- Res$rx ry <- Res$ry #cat("BoxPlot-Result staggering:",staggering," staggered:",staggered,"\n") ##### # # Basic setup and validation for BoxPlot Glyph # ##### # _______________drawing loop___________________ oldpar = par(lend="butt") for (i in 1:numGrps){ # Cycle through the Row/Groups in the micromap column gsubs <- ib[i]:ie[i] # get beginning to end row number in group ke <- length(gsubs) # get number of rows in group pen <- if(i==medGrp & medGrpSize == 1) 7 else 1:ke # if median single group, then pen=6, otherwise pen = c(1...x) laby <- ke:1 # laby = reverse order list for row index. ksc <- SetKsc(ke) panelSelect(panels,i,j) # select panel for group i in column j) panelScale(rx,c(1-pad,ke+pad)) # set scale for panel (0.33333 to 1.666666667) # should work, box plot is set up on 1 as base and + or - 0.5 from the base. panelFill(col=Panel.Fill.col) # set fill for panel # Grid lines axis(side=1, tck=1, labels=F, at=atRx, col=Grid.Line.col, lwd=Grid.Line.lwd) # grid lines # if a refval is provided and in the rx range, then add line. AddRefLine(refval, ke, rx) panelOutline(col=Panel.Outline.col) # outline panel for (k in 1:ke){ # cycle through row-groups and build each box plot m <- zBPord[gsubs[k]] # m is the location of the area in panelData item list (a boxplot element) if (is.na(m)) next # if no boxplot data - skip box plot drawing for sub-area if (goodAll[m]) { #cat("Grp:",i," k;",k," m:",m,"\n") kp <- pen[k] # color number ht <- laby[k] # vector of the index into the panel (for a 5/6 row group -> 6,5,4,3,2,1 (top to bottom) # for the median group/row -> 1 (that's it.) 1.65 box is set to [0:2] 7 box is set to [0:6] # plot outlier points on graph if (outlier[m]) { # flag indicator - saves scaning. # plot points for outliers (rings) vals <- out[group==m] # get the list of values. if (colFull) { # full color do the correct color points(vals, rep(ht,length(vals)), pch=1, col=ifelse(BoxP.Use.Black,"black",mstColors[kp]), cex=BoxP.Outlier.cex, lwd=BoxP.Outlier.lwd) } else { # Greys - do the a grey. points(vals, rep(ht,length(vals)), pch=1, col=BoxP.Outlier.BW.col, cex=BoxP.Outlier.cex, lwd=BoxP.Outlier.lwd) } } # Draw thin line for lower to upper confidence values - box (ht high). wthiny <- thiny * ksc polygon(thin[,m], rep(ht,4)+ wthiny, col=mstColors[kp], border=NA) # Draw middle think box (25% to 75%) wthicky <- thicky * ksc polygon(thick[,m], rep(ht,4)+ wthicky, col=mstColors[kp], border=NA) # Median Bar - Lines looked crooked (Median verical bar) segments(med[m], ht+medy[1], med[m], ht+medy[2], # use segment line. col=BoxP.Median.col, lwd=BoxP.Median.Dot.lwd) } } # end k loop } # end i loop par(oldpar) # ____________________________PanelOutline____________________ groupPanelOutline(panelGroup,j) } # # End of Box Plot Glyph # ##### ##### # # type = 'ctrbar' ==================================== # # rlAreaCtrBar (Centered Bar chart) # # The centered bars is a stacked bar chart with the middle segment centered on the "0" value # of the chart and extending 1/2 it's value in both directions (plus and minus). # The other bar segments are plotted to it's left and right as appropriate. # # # The data structure can have between 2 to 9 data values per area. # Each area must have the same number of values. This limitation may be removed in the future. # # panelData => data.frame where each row is a area with the areaDatKey as the row.name. # The columns are the bar segment values. # rlAreaCtrBar = function(j) { # j = the panel column number # # col1 and col2 indentify the starting column and ending column number in the statsDFrame # that contains the bar values for each area. # wstname <- names(dat) # names of columns in statsDFrame wstdim <- dim(dat) wstMax <- wstdim[2] # number of columns in statsDFrame ErrFnd <- FALSE pdColNum <- formatC(j,format="f",digits=0,width=2,flag="0") # "col1" stColName1 <- wstname[col1[j]] pdUMsg <- "(First data column)" xr <- CheckPDColnCN('col1','CTRBAR', col1[j], stColName1, j, 1, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd = TRUE #} else { # xdat1 <- xr$Dat # with CheckPDColnCN, no xr$Dat is returned } # "col2" stColName2 <- wstname[col2[j]] pdUMsg <- "(Last data column)" xr <- CheckPDColnCN('col2','CTRBAR', col2[j], stColName2, j, 2, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd = TRUE #} else { # xdat2 <- xr$Dat # why } if (!ErrFnd) { if (col1[j] >= col2[j]) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***020A CTRBAR ", pdColNum, " The first column name/number (", stColName1, ") ","must proceed the last column name/number (", stColName2, ") in the ", sDFName," data frame.") warning(xmsg, call.=FALSE) } else { wD <- ( col2[j] - col1[j] + 1 ) # corrected to properly calculate the number of data columns. if (wD < 2 || wD > 9) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***020B CTRBAR ", pdColNum, " The number of segments is ", wD, ". It must be between 2 and 9. If over 9, only the first 9 will be used.") warning(xmsg, call.=FALSE) } } } if (ErrFnd) return () # if either column is missing or invalid - skip this column. # Now check the data in the statsDFrame columns.. stColNums <- c(col1[j]:col2[j]) workCB <- dat[,stColNums] # get bar segment data from the statsDFrame. colNums <- c(1:dim(workCB)[2]) for (ind in colNums) { # check and convert each column iC <- stColNums[ind] # get stDF column number stColNam <- wstname[iC] # get stDF column name F_ind <- formatC(ind,format="f",digits=0,width=1) segNam <- paste0("seg",F_ind) pdUmsg <- paste0("(Bar segment ",F_ind," length)") x <- CheckNum(workCB[,ind],'CTRBAR', ind, pdColNum, segNam, stColNam, pdUmsg) if (x$Err) { ErrFnd <- TRUE } else { workCB[,ind] <- x$Dat } } # ind for loop #print("end of verification in CTRBAR - length of good") good <- !is.na(rowSums(workCB)) # good values (one per row) #print(length(good)) # refval <- lRefVals[j] # get referrence to object, changed reftxt <- lRefTexts[j] # new - JP-2010/07/23 # # mstColors - series of lighter colors of the base colors for each bar. # Use an adjusted list of percentages based on the Number of Segments. # 2 step = 50, 100 # 3 step = 33.3, 66.6, 100 # 4 step = 25, 50, 75, 100 # 5 step = 20, 40, 60, 80, 100 # 6 step = 16.6, 33.3, 50, 66,6, 83.3, 100 # etc. # 1/(NumSegs)*step = transparency or lightness level (100% full) # Dan's addition ==> # as the colors are generated from the base color # # pInc = 1 / NumSegs # # cSteps = cumsum(rep(pInc,NumSegs))^1.35 # # thickness = constant vs. very based on 2 to 9th segment # #_______________Gather stats and put in area Order______________ # # Sorting has already been done - by areaDatKey or value. # The areaID list has therefore been re-ordered accordingly. # Reorder the DataList to match. The assumption was that the input data order for the panelData # matched the order of the original data in the statsDFrame. # workMatCB <- as.matrix(workCB) CBLen <- apply(workMatCB,1,length) # get length of each row. CBLRange <- range(CBLen,na.rm=TRUE) NumSegs <- CBLRange[2] # number of segments CBBarPt <- cbind(rep(0,numRows),workMatCB) CBBarPt <- t(apply(CBBarPt,1,cumsum)) # _____________ Color Patterns _______________ # Inputs, NSegments, mstColors[1:7] Output baseColRgb baseColRgb <- BuildSegColors(NumSegs) #_____________Centering_____________ CtrSeg <- as.integer(NumSegs/2) + 1 # center segment if ((NumSegs %% 2) != 0) { # old number of segments CtrPt <- workMatCB[,CtrSeg]/2 + CBBarPt[,CtrSeg] } else { # even number of segments CtrPt <- CBBarPt[,CtrSeg] } CBPlotPts <- CBBarPt - CtrPt #_______________Scaling____________ # x scaling lPad <- TRUE rPad <- TRUE rx <- range(CBPlotPts,na.rm=TRUE) #cat("ctrbar-rx:",rx,"\n") #rx <- sc*diff(rx)*c(-.5,.5)+mean(rx) #cat("ctrbar-rx after padding:",rx,"\n") ry <- c(0,1) pyPat <- c(-.5,-.5,.5,.5,NA) py <- CSNBar.barht * pyPat # CBar.barht = 2/3 (0.6667) (fixed) # py <- c( -1/3, -1/3, +1/3, +1/3, NA) # variable bar height calculations wYPdelta <- (CSNBar.Last.barht - CSNBar.First.barht)/(NumSegs-1) # increment wYP1 <- CSNBar.First.barht - wYPdelta # ____________titles and labeling axes_______________ ##### # # Setup and draw top and bottom titles and axis for column # # General padding on left or right if not zero. # Res <- DrawXAxisAndTitles(j, panels, rx, ry,reftxt, refval, leftPad=lPad, rightPad=rPad) atRx <- Res$atRx rx <- Res$rx ry <- Res$ry #cat("ctrbar-Result staggering:",staggering," staggered:",staggered,"\n") # # # End of Basic Validation and Setup for CtrBar segmented glyph # ##### # ___________________drawing loop_____________________ oldpar <- par(lend="butt") # build each panel for each stacked bar set. for (i in 1:numGrps) { gsubs <- ib[i]:ie[i] # get beginning to end index row number in this group ke <- length(gsubs) # get number of rows in group (5 or 1) # adjust if median group pen <- if(i==medGrp & medGrpSize == 1 ) 7 else 1:ke # if median group (7)(black), then pen=6, otherwise pen = c(1...x) laby <- ke:1 ksc <- SetKsc(ke) panelSelect(panels,i,j) x <- panelScale(rx,c(1-pad,ke+pad)) # 1 to 5/6 are the y values for each bar. panelFill(col=Panel.Fill.col) axis(side=1, tck=1, labels=F, at=atRx, col=Grid.Line.col, lwd=Grid.Line.lwd) # grid # if a refval is provided and in the rx range, then add line. AddRefLine(refval, ke, rx) # # Process each area's line. # for (k in 1:ke) { # cycle through row-groups and assign colors to associated areas dots. m <- gsubs[k] if (good[m]) { wX <- CBPlotPts[m,] # Get Row of data. #wYP <- rep(laby[k],5)+py wYP <- rep(laby[k],5) # calculate box for each segment wYPht <- wYP1 for (ik in 1:NumSegs) { # Y values for segment box if (CBar.varht) { # variable height bar segments wYPht <- wYPht + wYPdelta wYP2 <- wYP + ((pyPat * wYPht) * ksc) } else { # fixed height bar segments wYP2 <- wYP + ( py * ksc ) } # X values for segment box val0 <- wX[ik] # start val1 <- wX[ik+1] # end position wXP <- c(val0,val1,val1,val0,NA) # good value - draw bars are polygons. (why to polygon) polygon(wXP, wYP2, col=baseColRgb[pen[k],ik], lwd=CSNBar.Outline.lwd, border=CSNBar.Outline.col, lty=CSNBar.Outline.lty) #polygon(wXP, wYP2, col=CSNBar.Outline.col, density=0) } } } # end of k loop # finish up panel # draw vertical line at zero. lines(rep(0,2), c(1-padMinus,ke+padMinus), lty=CBar.Zero.Line.lty, lwd=CBar.Zero.Line.lwd, col=CBar.Zero.Line.col) panelOutline(Panel.Outline.col) } # end of i loop par(oldpar) # ____________________________PanelOutline____________________ groupPanelOutline(panelGroup,j) } # # End of ctrbar Glyph # ##### # NA check, if any NA exist in the series from col1 to col2, then the stacked bar will not # be drawn. # ##### # # type = 'dot' and 'dotsignif' ===================================================== # # rlAreaDot # # glyph will test for significants and if not overlay dot with "x" is dSignif=TRUE is # set in the glyphs call. # rlAreaDot = function(j,dSignif=FALSE){ # # j = current panel column number # # # Single Dot, no extra line or interval # # col1[j] points to the statsDFrame column holding the first arrow end point.value # # OR # # Single Dot with signficants over print, no extra line or interval # # col1[j] points to Dot value in the statsDFrame column holding the Dot Value # col2[j] points to P value - if P value > 0.05 then overprint "x" on the dot. # # # wstname <- names(dat) # names of columns in statsDFrame wstdim <- dim(dat) wstMax <- wstdim[2] # number of columns in statsDFrame ErrFnd <- FALSE pdColNum <- formatC(j,format="f",digits=0,width=2,flag="0") dotMsgHdr <- "DOT" if (dSignif) dotMsgHdr <- "DOTSIG" good1 <- TRUE # vector length of xdat1. TRUE = not NA, FALSE = NA. good2 <- TRUE goodrow <- TRUE pdColNum <- formatC(j,format="f",width=2,digits=0,flag="0") # "col1" stColNum1 <- col1[j] stColName1 <- wstname[stColNum1] pdVarName1 <- "col1" pdUmsg <- "(Dot value)" xr <- CheckPDCol(pdVarName1, dotMsgHdr, stColNum1, stColName1, j, 1, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd <- TRUE } else { xdat1 <- xr$Dat # get column of data (xr$Dat returned by CheckPDCol) good1 <- !is.na(xdat1) } if (dSignif) { # "col2" stColNum2 <- col2[j] stColName2 <- wSFName[stColNum2] pdVarName2 <- 'col2' pdUmsg <- "(Confidence P-Values)" xr <- CheckPDCol(pdVarName2, dotMsgHdr, stColNum2, stColName2, j, 2, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd <- TRUE } else { xdat2 <- xr$Dat # get column of data. good2 <- !is.na(xdat2) # some may be missing, but we have some, check range. if (any(xdat2[good2] > Dot.Signif.range[2] | xdat2[good2] < Dot.Signif.range[1] )) { # some values are out of range # ErrFnd <- TRUE # allow missing values in data column, send warning but do not stop plotting glyph. warnCnt() xmsg <- paste0("***022Q", dotMsgHdr, " ", pdColNum, " One or more P_value data entries in the ", stColName2, " for the panelDesc ", pdVarName2 ," variable are out of range." ) warning(xmsg, call.=FALSE) } } } # # Change 7/24/15 - allow missing values in a column for a row. # Change 7/24/15 - if not signif, copy good1 to good2 # Change 7/24/15 - plot row, only if both data columns are not NA. # if (!dSignif) { # dot function goodrow <- good1 } else { # dotsignif function goodrow <- good1 & good2 } if (ErrFnd) return () # error/warning found and can't plot glyph - return # JB - add "as.double(as.vector(" to handle variation in how objects are converted. #____________ref values_____________ refval <- lRefVals[j] # get reference value for this column, changed reftxt <- lRefTexts[j] # new - JP-2010/07/23 xps <= par("ps") #cat("dot-par(ps):",xps,"\n") #_____________y axis________________ ry <- c(0,1) #____________scale x axis______________________ lPad <- TRUE rPad <- TRUE rx <- range(xdat1,na.rm=TRUE) #cat("dot-rx:",rx," cxy:",par("cxy"),"\n") #cxyAdj <- par("cxy")/2 #rx <- sc*diff(rx)*c(-.5,.5)+mean(rx) # + c(-cxyAdj,cxyAdj) # range = mean(rx)/2 * c(-1,1) * 1.08 #cat("dot-rx after padding:",rx,"\n") # ____________labeling axis_______________ ##### # # Setup and draw top and bottom titles and axis for dot and dotsignif glyph column # # Padding for the dot, regardless if zero is left or right. # Res <- DrawXAxisAndTitles(j, panels, rx, ry, reftxt, refval, leftPad=lPad, rightPad=rPad) atRx <- Res$atRx rx <- Res$rx ry <- Res$ry xps <= par("ps") #cat("dot-par(ps)2:",xps,"\n") # # Basic validation and setup done for dot and dotsignif glyph # ##### ##### # # _______________drawing loop___________________ # for (i in 1:numGrps){ gsubs <- ib[i]:ie[i] ke <- length(gsubs) pen <- if(i==medGrp & medGrpSize == 1 ) 7 else 1:ke laby <- ke:1 panelSelect(panels,i,j) x <- panelScale(rx,c(1-pad,ke+pad)) panelFill(col=Panel.Fill.col) # grid lines axis(side=1, tck=1,labels=F, at=atRx, col=Grid.Line.col, lwd=Grid.Line.lwd) # grid # updated 7/24/15 to include at= # if a refval is provided and in the rx range, then add line. AddRefLine(refval, ke, rx) panelOutline(Panel.Outline.col) for (k in 1:ke) { # step through values for this panel m <- gsubs[k] if (goodrow[m]) { # change 7/24/15 - goodrow reflect both columns of data. # can't plot dot, if signif data if missing. # data good for dot - plot dot. plotPoint(xdat1[m], laby[k], Dot.pch, mstColors[pen[k]], Dot.pch.size, Dot.pch.lwd, Dot.Outline, Dot.Outline.col,Dot.Outline.lwd) if (dSignif) { if (xdat2[m] > Dot.Signif.pvalue) { dsCol <- Dot.Signif.pch.col # if color is NA, then follow color for the row. if (is.na(dsCol)) { dsCol <- mstColors[pen[k]] } plotPoint(xdat1[m], laby[k], Dot.Signif.pch, dsCol, Dot.Signif.pch.size, Dot.Signif.pch.lwd, Dot.Signif.Outline, Dot.Signif.Outline.col, Dot.Signif.Outline.lwd) } } # how to link an overprinting with criteria.. } } # end of k loop } # end of i loop # ____________________________PanelOutline____________________ groupPanelOutline(panelGroup,j) } # # End of dot and dotsignif glyphs # ##### ##### # # type = 'dotconf' ==================================================== # # flAreaDotConf # rlAreaDotConf <- function(j){ # # j is the current panel column index # # col1 indicates the column number for the dot value in the statsDFrame. # col2 indicates the column number for the lower confidence value in the statsDFrame. # col3 indicates the column number for the upper confidence value in the statsDFrame. #cat("\nDotConf:","\n") wstname <- names(dat) # names of columns in statsDFrame wstdim <- dim(dat) wstMax <- wstdim[2] # number of columns in statsDFrame ErrFnd <- FALSE pdColNum <- formatC(j,format="f",digits=0,width=2,flag="0") # "col1" stColNum1 <- col1[j] stColName1 <- wstname[stColNum1] pdVarName1 <- 'col1' pdUmsg <- "(Dot value)" xr <- CheckPDCol(pdVarName1, 'DOTCONF', stColNum1, stColName1, j, 1, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd <- TRUE } else { xmn <- xr$Dat # get column of data (x$Dat returned by CheckPDCol) good1 <- !is.na(xmn) } # "col2" stColNum2 <- col2[j] stColName2 <- wstname[stColNum2] pdVarName2 <- 'col2' pdUmsg <- "(Lower Confidence Value)" xr <- CheckPDCol(pdVarName2, 'DOTCONF', stColNum2, stColName2, j, 2, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd <- TRUE } else { lower <- xr$Dat # get column of data (x$Dat returned by CheckPDCol) good2l <- !is.na(lower) } # "col3" stColNum3 <- col3[j] stColName3 <- wstname[stColNum3] pdVarName3 <- 'col3' pdUmsg <- "(Upper Confidence Value)" xr <- CheckPDCol(pdVarName3, 'DOTCONF', stColNum3, stColName3, j, 3, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd <- TRUE } else { upper <- xr$Dat # get column of data (x$Dat returned by CheckPDCol) good2u <- !is.na(upper) } if (ErrFnd) return () # error warning found - return #cat("dotconf: data OK - plot","\n") # setup column good arrays # xmn <- dat[,col1[j]] # Col 1 = DOT - median/mean # lower <- dat[,col2[j]] # Col 2 = lower # upper <- dat[,col3[j]] # Col 3 = upper good2 <- !is.na(upper+lower) goodrow <- good1 & good2l & good2u # sum of all checks. refval <- lRefVals[j] # changed to lRefVals, JP-2010/07/23 reftxt <- lRefTexts[j] # new - JP-2010/07/23 # Select the first panel in column to allow code to reference its characteristics panelSelect(panels, 1, j) #x <- panelScale(rx, ry) #par(xpd=T) #_____________ y axis ____________________ ry <- c(0,1) #_____________scale x axis________________ lPad <- TRUE rPad <- TRUE rx <- range(upper,lower,xmn,na.rm=TRUE) #cat("dotConf-rx:",rx,"\n") # # NOW DONE in DrawXAxisAndTitle # # dealing with a dot, so padding should be 1/2 width of dot in rx units. #wP <- par("pin")[1] # width of panel #wD <- strwidth(" ",cex=Dot.Conf.pch.size)/2 # get 1/2 of character width #rwD <- (wD/wP) * diff(rx) # dot width as percentage of panel width "times" number of x units to graph #rx <- rx + c(-rwD,rwD) # make room for dot and no more. #cat("dotconf - dot adjust - widthPanel:",wP," widthSp:",wD," diff(rx):",diff(rx)," rwD:",rwD,"\n") # The above is not done in DrawXAxis... # x may not be needed??? #rx_old <- sc*diff(rx)*c(-.5,.5)+mean(rx) #cat("dotConf-rx after padding:",rx," old way:",rx_old,"\n") # ____________labeling axes_______________ ##### # # Setup and draw top and bottom titles and axis for column # # padding on left and right for confidence and dot. # #cat("DotConf-calling DrawXAxisAndTitles","\n") Res <- DrawXAxisAndTitles(j, panels, rx, ry,reftxt, refval, leftPad=lPad, rightPad=rPad) atRx <- Res$atRx rx <- Res$rx ry <- Res$ry #cat("DotConf-back from DrawXAxisAndTitles","\n") #cat("dotconf-Result staggered:",staggered,"\n") # # Basic setup and validation done for dotconf glyph # ##### #cat("Dot.Conf.pch:",Dot.Conf.pch," Dot.Conf.pch.size:",Dot.Conf.pch.size, # " Dot.Conf.Outline:",Dot.Conf.Outline,"\n") doDotOutline <- Dot.Conf.Outline #cat("doDotOutline:",doDotOutline,"\n") #cat("Dot.Conf.Outline.lwd:",Dot.Conf.Outline.lwd," .col:",Dot.Conf.Outline.col,"\n") #cat("dotconf - drawing loop col:",j,"\n") #_____________drawing loop___________________ for (i in 1:numGrps){ gsubs <- ib[i]:ie[i] ke <- length(gsubs) pen <- if(i==medGrp & medGrpSize == 1 ) 7 else 1:ke laby <- ke:1 panelSelect(panels,i,j) panelScale(rx,c(1-pad,ke+pad)) # Adjusted scale for interior panelFill(col=Panel.Fill.col) axis(side=1, tck=1, labels=F, at=atRx, # change 7/24/15 - add at= to get grids at the same points as the ticks col=Grid.Line.col, lwd=Grid.Line.lwd) # vertical grid lines # if a refval is provided and in the rx range, then add line. AddRefLine(refval, ke, rx) panelOutline(col=Panel.Outline.col) # outline scaled image. for (k in 1:ke){ m <- gsubs[k] if (goodrow[m]) { # if valid upper value. # 7/25/15 changed to goodrow and covered all plotting. # draw confidence line. lines(c(lower[m],upper[m]), rep(laby[k],2), col=mstColors[pen[k]], lwd=Dot.Conf.lwd) # plot dot. #cat("m:",m," lower:",lower[m]," upper[m]:",upper[m], # " k:",k," laby[k]:",laby[k]," pen[k]:",pen[k],"\n") #cat("Dot.Conf.lwd:",Dot.Conf.lwd,"\n") # #cat("xmn[m]:",xmn[m],"\n") # # doDotOutline - mostly related to black and white printing. # However, users can also request it. # # 0:25 pch's are at 75% of cex. # 0:18 S compatible, vector symbols - uses lwd(lines), col(borders & fill) # 1, 10, 13, 16 are circles. # 15:18 filled characters have no borders. # 0:14 line drawings # 15:18 fills, but no lines (lwd not used, but col is the fill, not bg) # 19:25 R vector symbols - uses lwd(lines-borders), col(border), bg(fill) # 26:31 not used # 32:127 Ascii Char # 128:255 local characters. # # The issue not is these points are written for 19:25 not the other. # if 19:25 then bg = fill color, col = border color, lwd = weight of border, # pchValue <- Dot.Conf.pch pchOutline <- Dot.Conf.Outline # enable outline of 19:25 characters. plotPoint(xmn[m], laby[k], Dot.Conf.pch, mstColors[pen[k]], Dot.Conf.pch.size, Dot.Conf.pch.lwd, Dot.Conf.Outline, Dot.Conf.Outline.col, Dot.Outline.lwd ) } } # end of k loop } # end of i loop # ____________________________PanelOutline____________________ groupPanelOutline(panelGroup,j) #cat("DotConf: END.\n") } # # End of dotconf glyph # ##### ##### # # type = 'dotse' ======================================================= # # rlAreaDotSe # rlAreaDotSe = function(j){ # j = current panel column # # col1 indicates the column number for the dot value in the stamicroteFrame. # col2 indicates the column number for the SE value in the statsDFrame. wstname <- names(dat) # names of columns in statsDFrame wstdim <- dim(dat) wstMax <- wstdim[2] # number of columns in statsDFrame ErrFnd <- FALSE pdColNum <- formatC(j,format="f",digits=0,width=2,flag="0") # "col1" stColName1 <- wstname[col1[j]] pdUmsg <- "(Dot Value)" xr <- CheckPDCol('col1', 'DOTSE', col1[j], stColName1, j, 1, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd = TRUE } else { xdat1 <- xr$Dat } # "col2" stColName2 <- wstname[col2[j]] pdUmsg <- "{Standard Error Value)" xr <- CheckPDCol('col2', 'DOTSE', col2[j], stColName2, j, 2, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd = TRUE } else { xdat2 <- xr$Dat } if (ErrFnd) return () # error warning found - return good1 <- !is.na(xdat1) good2 <- !is.na(xdat2) goodrow <- good1 & good2 # get sum of the checks - both must be their to plot dot and Se. zval <- qnorm(.5+Dot.SE/200) inc <- zval * xdat2 upper <- xdat1 + inc lower <- xdat1 - inc if (ErrFnd) return () # error warning found - return #______________Ref data______________ refval <- lRefVals[j] # changed to lRefVals, JP-2010/07/23 reftxt <- lRefTexts[j] # new - JP-2010/07/23 #______________y range_______________ ry <- c(0,1) #_______________scale x axis__________________ lPad <- TRUE rPad <- TRUE rx <- range(upper,lower,xdat1,na.rm=TRUE) # use upper, lower and xdat1 to find "range" of x # x may not be needed at all. But best to leave. #cat("dotSE-rx:",rx,"\n") #rx <- sc * diff(rx) * c(-.5,.5) + mean(rx) #cat("dotSE-rx after padding:",rx,"\n") # ____________labeling axes_______________ ##### # # Setup and draw top and bottom titles and axis for column # # Padding on left and right for dot and confidence # Res <- DrawXAxisAndTitles(j, panels, rx, ry,reftxt, refval, leftPad=lPad, rightPad=rPad) atRx <- Res$atRx rx <- Res$rx ry <- Res$ry #cat("dotSE-Result staggering:",staggering," staggered:",staggered,"\n") # # Setup and validation for dotse glyph completed. # ##### #__________________drawing loop________________ for (i in 1:numGrps) { gsubs <- ib[i]:ie[i] ke <- length(gsubs) pen <- if(i==medGrp & medGrpSize == 1 ) 7 else 1:ke laby <- ke:1 panelSelect(panels,i,j) x <- panelScale(rx,c(1-pad,ke+pad)) panelFill(col=Panel.Fill.col) axis(side=1, tck=1, labels=F, at=atRx, col=Grid.Line.col, lwd=Grid.Line.lwd) # grid lines - 7/24/15 add at=atRx to force Grid line to match ticks. # if a refval is provided and in the rx range, then add line. AddRefLine(refval, ke, rx) panelOutline(Panel.Outline.col) for (k in 1:ke){ m <- gsubs[k] # change 7/24/15 - only plot glyph if both data column are not NA. if (goodrow[m]) { # if all values are good # confidence interval based on SE - line . lines(c(lower[m],upper[m]), rep(laby[k], 2), col=mstColors[pen[k]],lwd=Dot.SE.lwd) plotPoint(xdat1[m], laby[k], Dot.SE.pch, mstColors[pen[k]], Dot.SE.pch.size, Dot.SE.pch.lwd, Dot.SE.Outline, Dot.SE.Outline.col, Dot.SE.Outline.lwd ) } } } # ____________________________PanelOutline____________________ groupPanelOutline(panelGroup,j) } # # End of dotse glyph # ##### ##### # # type = 'id' ======================================================= # # rlAreaID # rlAreaID = function(j){ # j = panel column number #_____________ Scaling ______________________ # get corners for the boxes. rx <- c(0,diff(panels$coltabs[j+1,])) # column width in inches - index to coltabs is +1 the column number ry <- c(0,1) # not inches, but 0-1 #______________________panel labels_____________ panelSelect(panels,1,j) # start at I = 1, but j= is the current column. x <- panelScale(rx,ry) # # ID text set based on Text.cex.. for 12 point text in a 3/4 to over 1" height boxes. # xusr <- par("usr") # base decision on first panel - they should all be the same. xpin <- par("pin") IDcex.mod <- Id.Cex.mod # get multiplier based on 12 pt. pchSize <- Id.Text.cex * IDcex.mod * Id.Dot.cexm if (xpin[2] < 0.75) { # panel height is getting smaller. reduce text and symbol size. IDcex.mod <- (1 - (( 1 - xpin[2]/0.75 ) ^ 2 )) # get ratio. #cat(" IDcex.mod change from 1 to :",IDcex.mod,"\n") } ### request to lower title into axis label space. xLab1 <- banner["id","H2"] xLab2 <- banner["id","H3"] if (xLab2 == "") { xLab2 <- xLab1 xLab1 <- "" } # column titles if (xLab1 != "") { mtext(xLab1,side=3,line=Id.Title.1.pos,cex=Text.cex) } mtext(xLab2,side=3,line=Id.Title.2.pos,cex=Text.cex) widthPanel <- xpin[1] # inches widthxLab2 <- strwidth(xLab2,units="inch",cex=Text.cex) # one label for ID column. It's centered, so use 1/2 of the width. lastLab2Space <<- ( widthPanel + colSepGap - widthxLab2 ) / 2 # pos - space (have), neg - overhang (need). #cat("ID - widthPanel:",widthPanel," width xLab2:",widthxLab2," lastLab2Space:",lastLab2Space," staggered:",staggered,"\n") # ______Bottom Label/Title - Lab3 ______ lastLab3Space <<- ( widthPanel + colSepGap ) / 2 if (lab3[j] != "") { panelSelect(panels,numGrps,j) x <- panelScale(rx,ry) # bottom of column footnote (title) mtext(side=1,lab3[j],line=Title.Line.3.pos,cex=Text.cex) # bottom labels. widthxLab3 <- strwidth(lab3[j],units="inch", cex=Text.cex) lastLab3Space <<- ( widthPanel + colSepGap - widthxLab3 ) / 2 } #_____________________Square Sizing and Symbol Placement # square width # xstart = Id.Start # inches from left margins #### idstart = 0.137 # inches from base line (not relative) (appears to be replaced below..) TextH2 <- max(strheight(areaDatIDNames,units="inch",cex=(Id.Text.cex * IDcex.mod) )) / 2 # maximum length value /2 par(pch = Id.Dot.pch) # set up the character. #______________________main loop________________ # Cycle thought the GROUPS (numGrps) for (i in 1:numGrps){ npad <- ifelse((i==medGrp & medGrpSize == 1),0.57,pad) # single row = 0.57, or pad list for multiple rows. gsubs <- ib[i]:ie[i] # first element of group to last element of group. ke <- length(gsubs) # number of elements. (rows per group) # since each panel may have different number of rows, this now must be done for each group. ryusr <- c(1-npad,ke+npad) # set scale for the number of rows in group, plus padding. # y axis value = 1 to nRows.. laby <- ke:1 # y index vector - like 5:1 for 5 areas per panel/row. # ke is the number of area per panel/row. pen <- if(i==medGrp & medGrpSize == 1 ) 7 else 1:ke panelSelect(panels,i,j) # select and setup panel for this group of rows. x <- panelScale(rx,ryusr) gnams <- areaDatIDNames[gsubs] xusr <- par("usr") xpin <- par("pin") xUnitsPerInch <- diff(xusr[1:2]) / xpin[1] # x units per inch yUnitsPerInch <- diff(xusr[3:4]) / xpin[2] # y units per inch #cat("xUPI:",xUnitsPerInch," usr:",xusr," xpin:",xpin," TextH2:",TextH2,"\n") xHalfSym <- ((Id.Dot.width * Id.Cex.mod) + Id.Space)/2 * xUnitsPerInch xStartu <- xHalfSym # ID offset in units. (a little more than 1/2 width of symbole xSymWu <- xHalfSym - 0.25*Id.Space # ID symbol now in units. #cat("xStartu:",xStartu," xHalfSym:",xHalfSym,"\n") xPosu <- rep(xStartu,ke) xPos2u <- xPosu + xSymWu yPosu <- laby yPos2u <- laby - TextH2 * 0.3 * yUnitsPerInch # offset down by half the height #cat("xPosu:",xPosu," xPos2u:",xPos2u,"\n") #cat("yPosu:",yPosu," yPos2u:",yPos2u,"\n") #cat("Id.Text.cex:",Id.Text.cex," IDcex.mod:",IDcex.mod," prod:",(Id.Text.cex * IDcex.mod),"\n") text(xPos2u, yPos2u, gnams, cex=(Id.Text.cex * IDcex.mod ), xpd=T, pos=4) # Note: the xPosu and yPosu coordinates is the center of the point not the starting edge of a character. plotPoint(xPosu, yPosu, Id.Dot.pch, mstColors[pen], Id.Dot.cexm, "black", TRUE, "black", Id.Dot.lwd ) } # No reference values for this type of column # as we exit loop, we are in the last panel.. xpin <- par("pin") lastLab3Space <<- xpin[1]/2 if (lab3[j] != "") { #panelSelect(panels,numGrps,j) #x <- panelScale(rx,ry) # ______Bottom Label/Title - Lab3 ______ mtext(side=1,lab3[j],line=Title.Line.3.pos,cex=Text.cex) # bottom column titles lastLab3Space <<- ( xpin[1] - strwidth(lab3[j], units="inch", cex=Text.cex) ) / 2 } } # # End of id glyph # ##### ###### how to get right abbr or full ######## ##### MAP glyphs ##### # # General Notes: # # Data is always represented in the areaDatKey[] vector order. The sDFdat data.frame has been pre-sorted per "sortVar". # # VisKey contains the list of keys per polygons in areaVisBorders # VisCol should contain the mstColors for each polygon to allow a single "polygon" print. # # # General Map Notes: # # NotUsed = NT T/F list of sub-areas not referenced in the data. # # back = NT T/F list of not active sub-areas # # high = NT T/F list of secondary sub-areas (not active or background or Not Used.) (Color = pale Yellow) # for map -> not used # for mapcum -> accumulative list, colored pale yellow # for mapmedian -> areas below or above median value (two colors and cross in median group.) # for maptail -> accumulative list to median then subtractive list to end. # highU = NT T/F list of above median sub-areas (not active or Not used.) (color = pale red) # highL = NT T/F list of below median sub-areas (not active or Not used.) (color = pale blue) # # gnams = NT T/F list of active colored to match links. # # VisCol = NT list of polygon keys. # # # Map.Hdr1 and Map.Hdr2 # Map.Hdr2 -> Type of sub-areas (Counties, Provinces, States, etc.) # Map.Hdr1 -> Top title in "map" (reserved) # # column titles: # # map mapcum mapmedian maptail # # 1) Cummulative Maps Median Based Contours # 2) Highlighted b zzzzz Featured Above b zzzzz Featured Above Two Ended Cumulative Maps # 3) States b zzzzz Featured Below b zzzzz Featured Below zzzzz Highlighted # # Map.Hdr2 Map.Hdr2, X "Featured Above" Map.Hdr2 X "Featured Above" Map.Hdr2 "Highlighted" # Map.Hdr2, X "Featured Below" Map.Hdr2 X "Featured Below" # # # "Median For Sorted Panel" # # Calculate width of each phrase. # ##### # # type = 'map' ========================================================= # # rlAreaMap # rlAreaMap = function(j) { # Works using area abbreviations # bnd.ord gives abbreviations in the # the boundary are stored. # areaDatKey give the abbreviations in the order plotted # # Areas are colors if associated with active rows # # j = column number, i = row number # bnd.ord is a list of Keys (one per polygon) in the border file. bnd.ord = rlAreaVisBorders$Key[is.na(rlAreaVisBorders$x)] # Area abbrev based on "NA" in point List. #cat("bnd.ord:",bnd.ord,"\n") #cat("Map-Overlays L2:",Map.L2Borders," Reg:",Map.RegBorders," L3:",Map.L3Borders,"\n") # the x,y limits must be based on the biggest area plotted, if the data level # does not cover the entire area, check the L2 and L3 borders. rPoly <- MapPolySetup("map",panels,rlAreaVisBorders,rlL2VisBorders,rlRegVisBorders,rlL3VisBorders,Map.L3Borders) rxpoly2 <- rPoly$rxpoly2 rypoly2 <- rPoly$rypoly2 # must be done before panel is scaled. # Issue: The median single row group does not print a map. So, there aspect ratio normalizations # could cause problems with median text. Keep an eye on. # ____________labeling and axes_______________ panelSelect(panels,1,j) x <- panelScale() par(xpd=T) xpin <- par("pin") #printPar() # column titles - centered no boxes. # Use lines 2 and 3(tick) for two row title. # no need for centering logic - no boxes. xLab1 <- banner["map","H2"] xLab2 <- banner["map","H3"] if (xLab2 == "") { xLab2 <- xLab1 xLab1 <- "" } if (xLab1 != "") mtext(xLab1,side=3,line=Title.Line.2.pos,cex=Text.cex) mtext(xLab2,side=3,line=Title.Line.2x.pos,cex=Text.cex) lastLab2Space <<- - ( xpin[1] - strwidth(xLab2,units="inch",cex=Text.cex) ) / 2 # Put the initial colors for all sub-areas into a vector. VisNodes <- is.na(rlAreaVisBorders$x) # end of point elements for each polygon VisKeys <- rlAreaVisBorders$Key[VisNodes] # key for that polygon VisHoles <- rlAreaVisBorders$hole[VisNodes] # is it a hole NotUsed <- !is.na(match(rlAreaVisBorders$Key,NotUsedKeys)) # list of not used points. NotUsedFlag <- any(NotUsed) # flag to indicate not used exists VisNU <- !is.na(match(VisKeys,NotUsedKeys)) # T/F list of not used polygons. # # Panel Setup already calculated the following variables # # numGrps - number of group/rows # medGrp - the number of the median group/rows (if number of groups is odd, otherwize = 0) # medGrpSize - number of rows in the median group/row (if no median group, value = 0) # medRow - the number of the median row (if number of rows is odd, othersize = 0) # medRowBlw - the number of the row just below the median # medRowAbv - the number of the row just above the median # # #cat("map - areaDatKey:",areaDatKey,"\n") # Drawing Loop for (i in 1:numGrps) { if ( i == medGrp & medGrpSize == 1 ){ # line break in maps. Group n/2 - middle group of n (odd) # Setup Panel for single row median group panelSelect(panels,i,j) x <- panelScale() panelFill (col=Panel.Fill.col) panelOutline() # inform xmsg <- banner["map","M1"] # Insert median - single group/row - centered on the middle of the rectangle (0.5, 0.5) text (.5,.5,xmsg,cex= Text.cex*0.8) # center around 0.5, 0.5 (center) next # skip to next FOR item ### EXIT } # handle groups with 2 or more rows panelSelect(panels,i,j) # Do map in - Panels by group... x <- panelScale(rxpoly2,rypoly2) # apply the required scalling gsubs <- ib[i]:ie[i] # get the index range for this panel blkAreaCol <- 0 if (medGrp > 0 & medGrpSize == 1) { # If this setup has a median group with 1 row - then we must watch for the panel above and below it # to highlight the median row in these panels. # Add median sub-area coloring to the row above and below the median line. if (i == (medGrp-1)) { gsubs <- c(gsubs,medRow) # slot med-1 - add med-row to this group blkAreaCol <- length(gsubs) # indicate median/black area flag saves the "color" number the will be seen in the final. } if (i == (medGrp+1)) { gsubs <- c(gsubs,medRow) # slot med+1 - add med-row to this group blkAreaCol <- length(gsubs) # indicate median/black area and the length of the gsubs vector } # blkAreaCol uses length(gsubs) as key - 2,3,4,5,6 used the index to match up later. } #print(paste0("gsubs:",paste0(gsubs,collapse=" "))) # medRow - the median row, if number of rows is old. # will always be in the medGrp group gnams <- areaDatKey[gsubs] # index to sub-area keys (translation) #print(paste0("gnams:",paste0(gnams,collapse=" "))) # # Even though a hole in a sub-area may be later filled with a color or grey, # it is always filled with tthe background map color. The order of the # Polygons in the VisBorder files always have the holes following the basic sub-area # and sub-areas filling other sub-areas holes after that's area's polygons. # # mstColors = 1-6 -> active sub-area colors # mstColors = 7 -> median sub-area color in panels above and below the median # mstColors = 8-10 -> highlighted colors used in mapcum, mapmedian and maptail # mstColors = 11 -> unreferenced sub-area # 12 -> background sub-area (non-active) # # Run: sequence # Set all colors in VisCol (based on polygons) # Set all background and unused borders in VisLinexx # # Separate highlight sub-area borders (2) # Separate foreground sub-area borders (current set) # # draw fill colors for all (VisCol) # # draw background/Not Referenced lines # draw highlighted lines # draw foreground lines # # Get set up T/F vector for each type of group of sub-areas to plot #cat("length(NotUsedKeys):",length(NotUsedKeys)," with keys:",paste0(NotUsedKeys,collapse=", "),"\n") #cat("gnams:",paste0(gnams,collapse=", "),"\n") VisCol <- rep(11,length(VisKeys)) # reduced size - color per polygon # isolate foreground (active) sub-areas. foreKeys <- gnams # get list of keys fore <- !is.na(match(rlAreaVisBorders$Key,foreKeys)) # find polygon points for each fore sub-areas and assign color based on order in gnams foreFlag <- any(fore) # set flags if any found VisForeCol <- match(VisKeys,foreKeys) # get color index for each foreg subarea (1-6) # NA=not foreground, #=foreground and order (foreKeys 1 to 6) VisFore <- !is.na(VisForeCol) # T/F vector of VisKeys that are foreground VisCol[VisFore] <- VisForeCol[VisFore] # transfer color index for each foreground polygon. if (blkAreaCol>0) { VisCol[VisCol == blkAreaCol] <- 7 # set to black # if VisCol == BlkAreaCol set previously - reset to "black" index (7) } # not really used - can we delete or set to empty? Trying to standardize code?? highKeys <- NA # clear high light vector - always none for "map" high <- !is.na(match(rlAreaVisBorders$Key,highKeys)) highFlag <- any(high) VisHigh <- !is.na(match(VisKeys,highKeys)) VisCol[VisHigh] <- 8 # what is left - the background sub-areas. back <- !(fore | high | NotUsed) # background is anything not active and not used. T/F list backFlag <- any(back) backKeys <- unique(rlAreaVisBorders$Key[back]) VisBack <- !is.na(match(VisKeys,backKeys)) VisCol[VisBack] <- 12 VisCol2 <- mstColors[VisCol] # translate to real colors VisCol2[VisHoles] <- Map.Bg.col # set all holes to the panels background color. # colors are ready for ploting polygons #### # Map background - Layer 2 borders (regional areas (US -> states)) # if (Map.L2Borders) { # area area overlay # map fill sub-areas polygon(rlL2VisBorders$x, rlL2VisBorders$y, density=-1, col=Map.L2.Fill.col, border=FALSE) # map borders of sub-areas polygon(rlL2VisBorders$x, rlL2VisBorders$y, density=0, col=Map.L2.Line.col, lwd=Map.L2.Line.lwd) # white #cat("Drew L2 Borders\n") } # #### #### # # Map sub-areas # # Draw the colors for all active sub-areas. # polygon(rlAreaVisBorders$x,rlAreaVisBorders$y, density=-1, col=VisCol2, border=FALSE) #cat("Drew active sub-area colors.\n") # # setup each group of sub-areas and draw polygons. # Not Referenced sub-areas if (NotUsedFlag) { wVisBorders <- NULL wVisBorders <- rlAreaVisBorders[NotUsed,] # map sub-areas without data (not used) polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Bg.Line.col, lwd=Map.Bg.Line.lwd) # white #cat("Drew not used sub-areas borders.\n") } # # Background (not-active) sub-areas # if (backFlag) { wVisBorders <- NULL wVisBorders <- rlAreaVisBorders[back,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Bg.Line.col, lwd=Map.Bg.Line.lwd) # white #cat("Drew not-active sub-areas borders.\n") } # # Highlighted sub-areas # if (highFlag) { wVisBorders <- NULL wVisBorders <- rlAreaVisBorders[high,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Fg.Line.col, lwd=Map.Fg.Line.lwd) # black #cat("Drew highlighted sub-areas borders.\n") } # # Foreground (active) sub-areas # if (foreFlag) { wVisBorders <- NULL wVisBorders <- rlAreaVisBorders[fore,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Fg.Line.col, lwd=Map.Fg.Line.lwd) # black #cat("Drew Active sub-areas borders.\n") } #### # # map boundaries for regions. # if (Map.RegBorders && regionsB) { # regions boundaries overlay polygon(rlRegVisBorders$x, rlRegVisBorders$y, density=0, col=Map.L3.Line.col, lwd=Map.L3.Line.lwd) # black #cat("Drew Region Borders\n") } # #### #### # # Outline L3 (total) area (complete area boundary) # if (Map.L3Borders) { polygon(rlL3VisBorders$x, rlL3VisBorders$y, density=0, col=Map.L3.Line.col, lwd=Map.L3.Line.lwd) # black - outside US boundary # # If U. S. map, add extra labels for sub-areas moved. # if (areaUSData) { ##### replace with feature based code. if (i==1) { # if first map in column text(135,31,'DC',cex=Map.Area.Spec.cex, adj=.5, col=1) text(22, 17,'AK',cex=Map.Area.Spec.cex, adj=.5, col=1) text(47, 8, 'HI',cex=Map.Area.Spec.cex, adj=.5, col=1) } } } # no reference values for this type of column. If present - ignor. } # i loop # as we finish i loop, we end up in the last panel xpin <- par("pin") lastLab3Space <<- xpin[1]/2 if (lab3[j] != "") { #panelSelect(panels,numGrps,j) #x <- panelScale(rxpoly2,rypoly2) # ______Bottom Label/Title - Lab3 ______ mtext(side=1,lab3[j],line=Title.Line.3.pos,cex=Text.cex) # bottom column titles lastLab3Space <<- (xpin[1] - strwidth(lab3[j], cex=Text.cex, units="inch")) / 2 } } ##### # # type = 'mapcum' ======================================================== # # rlAreaMapCum # rlAreaMapCum = function(j) { # Works using area abbreviations # bnd.ord gives abbreviations in the order the boundary are stored. # areaDatKey give the abbreviations in the order plotted # # Areas are colored if active in row. # Areas are colored cream is they were active in previous groups/rows. # # bnd.ord is a list of Keys (one per polygon) in the border file. bnd.ord = rlAreaVisBorders$Key[is.na(rlAreaVisBorders$x)] # area abbrev for areas with boundaries # the x,y limits must be based on the biggest area plotted, if the data level # does not cover the entire area, check the L2 and L3 borders. rPoly <- MapPolySetup("mapcum",panels,rlAreaVisBorders,rlL2VisBorders,rlRegVisBorders,rlL3VisBorders,Map.L3Borders) rxpoly2 <- rPoly$rxpoly2 rypoly2 <- rPoly$rypoly2 # must be done before panel is scaled. # Issue: The median single row group does not print a map. So, there aspect ratio normalizations # could cause problems with median text. Keep an eye on. # ____________labeling and axes_______________ panelSelect(panels,1,j) x <- panelScale() # default scale 0:1, 0:1 not very useful par(xpd=T) xpin <- par("pin") # make adjustments to handle variable scaling of first panel - at this point its 0,1 by 0,1 # par("fin") has width and height in inches.. (2.4 x 3.6) # par("pin") has plot width and height in inches ( 1.4 x 1.111 ) # So, at 0,1 by 0,1 the aspect is really 1.111/1.4 = 0.79 about. # # # draw box for title label (convert inches into points for the panel.) # # line 1 - title, no boxes. mtext(banner["mapcum","H1"],side=3,line=Title.Line.1.pos,cex=Text.cex) # use line position.. # Line 2 - box and title DrawBoxAndText(banner["mapcum","H2"], Text.cex, Map.Lab.Box.Width, mstColors[8], "black", Title.Line.2.pos) DrawBoxAndText(banner["mapcum","H3"], Text.cex, Map.Lab.Box.Width, Map.Bg.col, "black", Title.Line.2x.pos) lastLab2Space <<- - ( xpin[1] - ( strwidth(banner["mapcum","H3"],units="inch",cex=Text.cex) + 0.15 ) ) / 2 VisNodes <- is.na(rlAreaVisBorders$x) VisKeys <- rlAreaVisBorders$Key[VisNodes] VisHoles <- rlAreaVisBorders$hole[VisNodes] NotUsed <- !is.na(match(rlAreaVisBorders$Key,NotUsedKeys)) NotUsedFlag <- any(NotUsed) VisNU <- !is.na(match(VisKeys,NotUsedKeys)) # # ##### # # Alternative is to must plot the text at x,y points. # # Drawing Loop #cat("mapcum - areaDatKey:",areaDatKey,"\n") for (i in 1:numGrps) { if (i==medGrp & medGrpSize == 1) { panelSelect(panels,i,j) x <- panelScale() panelFill (col=Panel.Fill.col) panelOutline() text (.5,.5,banner["mapcum","M1"],cex=Text.cex*0.8) # centered around 0.5 0.5 next } panelSelect(panels,i,j) x <- panelScale(rxpoly2,rypoly2) gsubs <- ib[i]:ie[i] blkAreaCol <- 0 ke = length(gsubs) # get number of rows. ## if a single row is not the median then the middle group is the median. if ( medGrp > 0 & medGrpSize == 1) { if (i == (medGrp-1)) { gsubs <- c(gsubs,medRow) blkAreaCol <- length(gsubs) } if (i == (medGrp+1)) { gsubs <- c(gsubs,medRow) blkAreaCol <- length(gsubs) } } gnams = areaDatKey[gsubs] # translate from sequence number to sorted order of areas (abbrev) # list of areas in this row (group) panel. #### # Map background - Layer 2 borders # if (Map.L2Borders) { # area area overlay # map fill areas polygon(rlL2VisBorders$x, rlL2VisBorders$y, density=-1, col=Map.L2.Fill.col, border=FALSE) # map borders polygon(rlL2VisBorders$x, rlL2VisBorders$y, density=0, col=Map.L2.Line.col, lwd=Map.L2.Line.lwd) # white } # #### #### # # map boundaries for regions. # if (Map.RegBorders && regionsB) { # regions boundary overlay polygon(rlRegVisBorders$x, rlRegVisBorders$y, density=0, col=Map.L2.Line.col, lwd=Map.L2.Line.lwd) } # #### VisCol <- rep(11,length(VisKeys)) # reduced size foreKeys <- gnams fore <- !is.na(match(rlAreaVisBorders$Key,foreKeys)) # find fore sub-areas and assign color based on order in gnams foreFlag <- any(fore) VisForeCol <- match(VisKeys,foreKeys) VisFore <- !is.na(VisForeCol) VisCol[VisFore] <- VisForeCol[VisFore] if (blkAreaCol>0) { VisCol[VisCol == blkAreaCol] <- 7 # set to black } highKeys <- areaDatKey[1:ib[i]-1] # vector of names used areas include this panel. high <- !is.na(match(rlAreaVisBorders$Key,highKeys)) highFlag <- any(high) VisHigh <- !is.na(match(VisKeys,highKeys)) VisCol[VisHigh] <- 8 # what is left - the background sub-areas. back <- !(fore | high | NotUsed) # background is anything not active and not used. T/F list backFlag <- any(back) backKeys <- unique(rlAreaVisBorders$Key[back]) VisBack <- !is.na(match(VisKeys,backKeys)) VisCol[VisBack] <- 12 VisCol <- mstColors[VisCol] # translate to real colors VisCol[VisHoles] <- Map.Bg.col polygon(rlAreaVisBorders$x,rlAreaVisBorders$y, density=-1, col=VisCol, border=FALSE) # setup each group of sub-areas and draw polygons. # Not Referenced sub-areas if (NotUsedFlag) { wVisBorders <- rlAreaVisBorders[NotUsed,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Bg.Line.col, lwd=Map.Bg.Line.lwd) # white } # Background (not-active) sub-areas if (backFlag) { wVisBorders <- rlAreaVisBorders[back,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Bg.Line.col, lwd=Map.Bg.Line.lwd) # white } # Highlighted sub-areas if (highFlag) { wVisBorders <- rlAreaVisBorders[high,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Fg.Line.col, lwd=Map.Fg.Line.lwd) # black } # Foreground (active) sub-areas if (foreFlag) { wVisBorders <- rlAreaVisBorders[fore,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Fg.Line.col, lwd=Map.Fg.Line.lwd) # black } #### # # map boundaries for regions. # if (Map.RegBorders && regionsB) { # regions boundaries overlay polygon(rlRegVisBorders$x, rlRegVisBorders$y, density=0, col=Map.L3.Line.col, lwd=Map.L3.Line.lwd) # black } # #### #### # # Outline Country area (total area). # if (Map.L3Borders) { polygon(rlL3VisBorders$x, rlL3VisBorders$y, density=0, col=Map.L3.Line.col, lwd=Map.L3.Line.lwd) # black - outside US boundary if (areaUSData) { if (i==1) { text(135,31,'DC',cex=Map.Area.Spec.cex,adj=.5, col=1) text(22, 17,'AK',cex=Map.Area.Spec.cex,adj=.5, col=1) text(47, 8, 'HI',cex=Map.Area.Spec.cex,adj=.5, col=1) } } } } # i loop # no reference values for this type of column. If present - ignor. # as we leave i loop - we are in the last group panel xpin <- par("pin") lastLab3Space <<- xpin[1]/2 if (lab3[j] != "") { #panelSelect(panels,numGrps,j) #x <- panelScale(rxpoly2,rypoly2) # ______Bottom Label/Title - Lab3 ______ mtext(side=1,lab3[j],line=Title.Line.3.pos,cex=Text.cex) # bottom column titles lastLab3Space <<- (xpin[1] - strwidth(lab3[j], cex=Text.cex, units="inch")) / 2 } } ##### # # type = 'mapmedian' ================================================= # # rlAreaMapMedian # rlAreaMapMedian = function(j){ # Works using area abbreviations # bnd.ord gives abbreviations in the # the boundary are stored. # areaDatKey give the abbreviations in the order plotted # This MapMedian cream colors all areas above and below the median area. # Areas < median are colored very light red in upper half of groups, # Areas > median are colored very light blue in lower half of groups. # In the median group when there is more than one row, both above and below # shading are done as a cross over. # bnd.ord = rlAreaVisBorders$Key[is.na(rlAreaVisBorders$x)] # area abbrev # the x,y limits must be based on the biggest area plotted, if the data level # does not cover the entire area, check the L2 and L3 borders. rPoly <- MapPolySetup("mapmedian",panels,rlAreaVisBorders,rlL2VisBorders,rlRegVisBorders,rlL3VisBorders,Map.L3Borders) rxpoly2 <- rPoly$rxpoly2 rypoly2 <- rPoly$rypoly2 # ____________labeling and axes_______________ panelSelect(panels,1,j) x <- panelScale() par(xpd=T) xpin <- par("pin") # # draw box for title label (convert inches into points for the panel.) # # line 1 - title, no boxes. mtext(banner["mapmed","H1"],side=3,line=Title.Line.1.pos,cex=Text.cex) # use line position.. # Line 2 - box and title DrawBoxAndText(banner["mapmed","H2"], Text.cex, Map.Lab.Box.Width, mstColors[9], "black", Title.Line.2.pos) DrawBoxAndText(banner["mapmed","H3"], Text.cex, Map.Lab.Box.Width, mstColors[10], "black", Title.Line.2x.pos) lastLab2Space <<- - ( xpin[1] - ( strwidth(banner["mapmed","H3"],units="inch",cex=Text.cex) + 0.15 ) ) / 2 #cat("mapmed - areaDatKey:",areaDatKey,"\n") # VisNodes <- is.na(rlAreaVisBorders$x) VisKeys <- rlAreaVisBorders$Key[VisNodes] VisHoles <- rlAreaVisBorders$hole[VisNodes] NotUsed <- !is.na(match(rlAreaVisBorders$Key,NotUsedKeys)) NotUsedFlag <- any(NotUsed) VisNU <- !is.na(match(VisKeys,NotUsedKeys)) highUKeys <- areaDatKey[1:medRowAbv] highU <- !is.na(match(rlAreaVisBorders$Key,highUKeys)) highUFlag <- any(highU) VisHighU <- !is.na(match(VisKeys,highUKeys)) highLKeys <- areaDatKey[medRowBlw:numRows] highL <- !is.na(match(rlAreaVisBorders$Key,highLKeys)) highLFlag <- any(highL) VisHighL <- !is.na(match(VisKeys,highLKeys)) # Drawing Loop # if this is the median group, the both get shaped. for (i in 1:numGrps) { # Median Group/Row with 1 row if (i==medGrp & medGrpSize == 1) { # median group/row with 1 row - do text instead of map. panelSelect(panels,i,j) x <- panelScale() panelFill (col=Panel.Fill.col) panelOutline() text (.5,.5,banner["mapmed","M1"],cex=Text.cex*0.8) next # exit for loop to next group/row } # All panels now have 2 or more rows panelSelect(panels,i,j) x <- panelScale(rxpoly2,rypoly2) gsubs <- ib[i]:ie[i] blkAreaCol <- 0 # Median Group/Row Panel if (medGrp > 0 & medGrpSize == 1) { # if we had a median group/row with 1 row, then accent median row in panels above and below. if (i == medGrp-1) { gsubs <- c(gsubs,medRow) # add median row to list blkAreaCol <- length(gsubs) # accent in above panel } if (i == medGrp+1) { gsubs <- c(gsubs,medRow) # add median row to list blkAreaCol <- length(gsubs) # accent in below panel } } # gsubs <- current area list gnams <- areaDatKey[gsubs] # set of areas for normal coloring. (get keys from index #s) # # Sub Divide into four groups: # 1) background, 2) Above Median with data 3) Below Median with data, 4) Active # Whats left 1:medRowAbv medRowBlw:numRows gsubs # note: medRowBlw:numRows will catch NA data items. (ignore is sorted column and NA (st bottom.) # note: non-data sub-area will not any row in the data, but will have a row in the areaNamesAbbrsIDs. # if we don't reference them, then boundaries may not be completely drawn. # #### # Map background - Layer 2 borders (regional areas (US -> states)) # if (Map.L2Borders) { # area area overlay # map fill areas polygon(rlL2VisBorders$x, rlL2VisBorders$y, density=-1, col=Map.L2.Fill.col, border=FALSE) # map borders polygon(rlL2VisBorders$x, rlL2VisBorders$y, density=0, col=Map.L2.Line.col, lwd=Map.L2.Line.lwd) # white } # #### VisCol <- rep(11,length(VisKeys)) highUbdr <- FALSE highLbdr <- FALSE if (i < medGrp ) { high <- highU highUbdr <- TRUE VisCol[VisHighU] <- 9 } if (i > medGrp) { high <- highL highLbdr <- TRUE VisCol[VisHighL] <- 10 } if (i == medGrp) { high <- highU | highL highUbdr <- TRUE highLbdr <- TRUE VisCol[VisHighU] <- 9 VisCol[VisHighL] <- 10 } foreKeys <- gnams fore <- !is.na(match(rlAreaVisBorders$Key,foreKeys)) # find fore sub-areas and assign color based on order in gnams foreFlag <- any(fore) VisForeCol <- match(VisKeys,foreKeys) VisFore <- !is.na(VisForeCol) VisCol[VisFore] <- VisForeCol[VisFore] if (blkAreaCol>0) { VisCol[VisCol == blkAreaCol] <- 7 # set to black } # what is left - the background sub-areas. back <- !(fore | high | NotUsed) # background is anything not active and not used. T/F list backFlag <- any(back) if (backFlag) { backKeys <- unique(rlAreaVisBorders$Key[back]) VisBack <- !is.na(match(VisKeys,backKeys)) VisCol[VisBack] <- 12 } VisCol <- mstColors[VisCol] # translate to real colors VisCol[VisHoles] <- Map.Bg.col polygon(rlAreaVisBorders$x,rlAreaVisBorders$y, density=-1, col=VisCol, border=FALSE) # setup each group of sub-areas and draw polygons. # Not Referenced sub-areas if (NotUsedFlag) { wVisBorders <- rlAreaVisBorders[NotUsed,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Bg.Line.col, lwd=Map.Bg.Line.lwd) # white } # Background (not-active) sub-areas if (backFlag) { wVisBorders <- rlAreaVisBorders[back,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Bg.Line.col, lwd=Map.Bg.Line.lwd) # white } # Highlighted sub-areas (2) if (highUbdr) { wVisBorders <- rlAreaVisBorders[highU,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Fg.Line.col, lwd=Map.Fg.Line.lwd) # black } if (highLbdr) { wVisBorders <- rlAreaVisBorders[highL,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Fg.Line.col, lwd=Map.Fg.Line.lwd) # black } # Foreground (active) sub-areas if (foreFlag) { wVisBorders <- rlAreaVisBorders[fore,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Fg.Line.col, lwd=Map.Fg.Line.lwd) # black } #### # # map boundaries for regions. # if (Map.RegBorders && regionsB) { # regions boundaries overlay polygon(rlRegVisBorders$x, rlRegVisBorders$y, density=0, col=Map.L3.Line.col, lwd=Map.L3.Line.lwd) # black } # #### #### # # Outline Country area (total area). # if (Map.L3Borders) { polygon(rlL3VisBorders$x, rlL3VisBorders$y, density=0, col=Map.L3.Line.col, lwd=Map.L3.Line.lwd) # black - outside US boundary if (areaUSData) { if (i==1) { text(135,31,'DC',cex=Map.Area.Spec.cex,adj=.5, col=1) text(22, 17,'AK',cex=Map.Area.Spec.cex,adj=.5, col=1) text(47, 8, 'HI',cex=Map.Area.Spec.cex,adj=.5, col=1) } } } } # i loop # no reference values for this type of column. If present - ignor. # as we finish i loop - we are in the last group panel. xpin <- par("pin") lastLab3Space <<- xpin[1]/2 if (lab3[j] != "") { #panelSelect(panels,numGrps,j) #x <- panelScale(rxpoly2,rypoly2) # ______Bottom Label/Title - Lab3 ______ mtext(side=1,lab3[j],line=Title.Line.3.pos,cex=Text.cex) # bottom column titles lastLab3Space <<- (xpin[1] - strwidth(lab3[j], cex=Text.cex, units="inch")) / 2 } } ##### # # type = 'maptail' ==================================================== # # rlAreaMapTail # rlAreaMapTail = function(j){ # Works using area abbreviations # bnd.ord gives abbreviations in the # the boundary are stored. # areaDatKey give the abbreviations in the order plotted # MapTail shows current areas in a group as colored and # a tail of areas (in cream color) from the outside inward. # #browser() bnd.ord <- rlAreaVisBorders$Key[is.na(rlAreaVisBorders$x)] # area Key rPoly <- MapPolySetup("maptail",panels,rlAreaVisBorders,rlL2VisBorders,rlRegVisBorders,rlL3VisBorders,Map.L3Borders) rxpoly2 <- rPoly$rxpoly2 rypoly2 <- rPoly$rypoly2 #cat("maptail - areaDatKey:",areaDatKey,"\n") # ____________labeling and axes_______________ # Panel # 1 - header # column header titles and "box" panelSelect(panels,1,j) # Line 1 and Line 2 - panel 1 x <- panelScale() par(xpd=T) xpin <- par("pin") # # draw box for title label (convert inches into points for the panel.) # # Line 1 - Not used # line 2 - title, no boxes. mtext(banner["maptail","H2"],side=3,line=Title.Line.2.pos,cex=Text.cex) # use line position.. # Line 3 - box and title DrawBoxAndText(banner["maptail","H3"], Text.cex, Map.Lab.Box.Width, mstColors[8], "black", Title.Line.2x.pos) lastLab2Space <<- - ( xpin[1] - ( strwidth(banner["maptail","H3"],units="inch",cex=Text.cex) + 0.15 ) ) / 2 # If needed this work be the place for Panel # N - Trailer code. # JP - removed - temp # mtext('Further From Median',side=3,line=Title.Line.2x.pos,at=.15,cex=Text.cex,adj=0) # need a median group point for calculations on the two tailed maps if (medGrp > 0 ) { # odd number of groups medGrpPt <- medGrp } else { medGrpPt <- (numGrps/2) # + one lower } VisNodes <- is.na(rlAreaVisBorders$x) VisKeys <- rlAreaVisBorders$Key[VisNodes] VisHoles <- rlAreaVisBorders$hole[VisNodes] NotUsed <- !is.na(match(rlAreaVisBorders$Key,NotUsedKeys)) NotUsedFlag <- any(NotUsed) VisNU <- !is.na(match(VisKeys,NotUsedKeys)) # Drawing Loop for (i in 1:numGrps) { if(i==medGrp & medGrpSize == 1 ) { panelSelect(panels,i,j) panelScale() panelFill (col=Panel.Fill.col) panelOutline() text (.5,.5,banner["maptail","M1"],cex=Text.cex*0.8) next } panelSelect(panels,i,j) x <- panelScale(rxpoly2,rypoly2) # get list of areas in this group. gsubs <- ib[i]:ie[i] ke <- length(gsubs) blkAreaCol <- 0 if (medGrp > 0 & medGrpSize == 1) { if (i==(medGrp-1)) { gsubs <- c(gsubs,medRow) blkAreaCol <- length(gsubs) } if (i==(medGrp+1)) { gsubs <- c(gsubs,medRow) blkAreaCol <- length(gsubs) } } # get list of group area names gnams = areaDatKey[gsubs] #### # # Map background - Layer 2 borders (regional areas (US -> states)) # if (Map.L2Borders) { # area area overlay # map fill areas polygon(rlL2VisBorders$x, rlL2VisBorders$y, density=-1, col=Map.L2.Fill.col, border=FALSE) # map borders polygon(rlL2VisBorders$x, rlL2VisBorders$y, density=0, col=Map.L2.Line.col, lwd=Map.L2.Line.lwd) # white } # #### VisCol <- rep(11,length(VisKeys)) highKeys <- NA highFlag <- FALSE if (i < medGrpPt) highKeys <- areaDatKey[1:ib[i]] # areas below the median highlighted. if (i > medGrpPt) highKeys <- areaDatKey[ie[i]:numRows] if (length(highKeys) > 0) { high <- !is.na(match(rlAreaVisBorders$Key,highKeys)) highFlag <- any(high) VisHigh <- !is.na(match(VisKeys,highKeys)) VisCol[VisHigh] <- 8 } foreKeys <- gnams fore <- !is.na(match(rlAreaVisBorders$Key,foreKeys)) # find fore sub-areas and assign color based on order in gnams foreFlag <- any(fore) VisForeCol <- match(VisKeys,foreKeys) VisFore <- !is.na(VisForeCol) VisCol[VisFore] <- VisForeCol[VisFore] if (blkAreaCol>0) { VisCol[VisCol == blkAreaCol] <- 7 # set to black } # what is left - the background sub-areas. back <- !(fore | high | NotUsed) # background is anything not active and not used. T/F list backFlag <- any(back) if (backFlag) { backKeys <- unique(rlAreaVisBorders$Key[back]) VisBack <- !is.na(match(VisKeys,backKeys)) VisCol[VisBack] <- 12 } VisCol <- mstColors[VisCol] # translate to real colors VisCol[VisHoles] <- Map.Bg.col # draw the combined fill colors in VisBorder file order. polygon(rlAreaVisBorders$x,rlAreaVisBorders$y, # plot all polygons density=-1, col = VisCol, border = FALSE) # fill in all areas. (1 to 6, 7, hole) # setup each group of sub-areas and draw polygons. # Not Referenced sub-areas if (NotUsedFlag) { wVisBorders <- rlAreaVisBorders[NotUsed,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Bg.Line.col, lwd=Map.Bg.Line.lwd) # white } # Background (not-active) sub-areas if (backFlag) { wVisBorders <- rlAreaVisBorders[back,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Bg.Line.col, lwd=Map.Bg.Line.lwd) # white } # Highlighted sub-areas if (highFlag) { wVisBorders <- rlAreaVisBorders[high,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Fg.Line.col, lwd=Map.Fg.Line.lwd) # black } # Foreground (active) sub-areas if (foreFlag) { wVisBorders <- rlAreaVisBorders[fore,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= Map.Fg.Line.col, lwd=Map.Fg.Line.lwd) # black } #### # # map boundaries for regions. # if (Map.RegBorders && regionsB) { # regions boundaries overlay polygon(rlRegVisBorders$x, rlRegVisBorders$y, density=0, col=Map.L3.Line.col, lwd=Map.L3.Line.lwd) # black } # #### #### # # Outline Country area (total area). # if (Map.L3Borders) { polygon(rlL3VisBorders$x, rlL3VisBorders$y, density=0, col=Map.L3.Line.col, lwd=Map.L3.Line.lwd) # black - outside US boundary if (areaUSData) { if (i==1) { text(135,31,'DC',cex=Map.Area.Spec.cex, adj=.5, col=1) text(22, 17,'AK',cex=Map.Area.Spec.cex, adj=.5, col=1) text(47, 8, 'HI',cex=Map.Area.Spec.cex, adj=.5, col=1) } } } } # i loop # no reference values for this type of column. If present - ignor. # as we finish i loop - we are in the last group panel xpin <- par("pin") lastLab3Space <<- xpin[1]/2 if (lab3[j] != "") { #panelSelect(panels,numGrps,j) #x <- panelScale(rxpoly2,rypoly2) # ______Bottom Label/Title - Lab3 ______ mtext(side=1,lab3[j],line=Title.Line.3.pos,cex=Text.cex) # bottom column titles lastLab3Space <<- (xpin[1] - strwidth(lab3[j], cex=Text.cex, units="inch")) / 2 } } # # ##### # # Area Rank Number ================================================================ # # rlAreaRank # based ID dot. # display the sorted rank. # need to update to reflect RANKing based on sorted value. Could have ties. # ##### # # Re-Think and rewrite before documenting. # ##### rlAreaRank = function(j){ # j = panel column number #________________ Scaling _______________ rx <- c(0,1) ry <- c(0,1) rankstart <- 0.137 #______________________panel labels_____________ panelSelect(panels,1,j) panelScale(rx,ry) mtext('Area Rank',side=3,line=Title.Line.1.pos,cex=Text.cex) # mtext('areas',side=3,line=Title.Line.2.pos,cex=Text.cex) for (i in 1:numGrps){ gsubs <- ib[i]:ie[i] ke <- length(gsubs) laby <- ke:1 rsubs <- xDFrame$Rank[gsubs] pen <- if(i==medGrp & medGrpSize == 1) 7 else 1:ke panelSelect(panels, i, j) x <- panelScale(rx, c(1-pad, ke+pad)) Fgsubs <- formatC(rsubs, format="f", width=3, digits=0) text(rep(rankstart, ke), laby+.1, Fgsubs, adj=0, cex=Text.cex) } # No reference values for this type of column. } ##### # # type = 'ScatDot' ===================================================== # # rlAreaScatDot (Scattered Plot Dots) # rlAreaScatDot = function(j){ # # j = panel column number # # col1 and col2 point to the X and Y data values in the statsDFrame data.frame (known here as "dat"). # # wstname <- names(dat) # names of columns in statsDFrame wstdim <- dim(dat) wstMax <- wstdim[2] # number of columns in statsDFrame ErrFnd <- FALSE pdColNum <- formatC(j,format="f",digits=0,width=2,flag="0") # "col1" stColName1 <- wstname[col1[j]] pdUmsg <- "(X coordinates)" xr <- CheckPDCol('col1', 'SCATDOT', col1[j], stColName1, j, 1, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd <- TRUE } else { xdat1 <- xr$Dat } # "col2" stColName2 <- wstname[col2[j]] pdUmsg <- "(Y coordinates)" xr <- CheckPDCol('col2', 'SCATDOT', col2[j], stColName2, j, 2, wstMax, dat, pdUmsg) if (xr$Err) { ErrFnd <- TRUE } else { xdat2 <- xr$Dat } if (ErrFnd) return () good1 <- !is.na(xdat1) # test to see if both values are present. good2 <- !is.na(xdat2) goodrow <- !is.na(xdat1 + xdat2) # used by code to skip bad entries. # x and y data loaded into workSCD data.frame workSCD <- data.frame(x=xdat1,y=xdat2) # get x and y data from the statsDFrame. # x and y are the coordinates of each dot. # # other fields added later # $pch - symbol code (only 19:25 are supported) # $cex - symbol size # $bg - background color - symbol fill color # $col - color of line # $lwd - line weight of outline of symbol # rownames(workSCD) <- rownames(dat) # transfer row.names refval <- lRefVals[j] # get referrence to object, changed reftxt <- lRefTexts[j] # new - JP-2010/07/23 #_______________Gather stats and put in area Order______________ # Sorting has already been done of the statsDFrame (dat) by areaDatKey or value # in the function startup. #_______________Scaling____________ # x scaling lPad <- TRUE rPad <- TRUE rx <- range(workSCD$x,na.rm=TRUE) # range of X values #cat("scatdot-rx:",rx,"\n") #rx <- SCD.xsc*diff(rx)*c(-.5,.5)+mean(rx) # min to max range with expansion factors. #cat("scatdot-rx after padding:",rx,"\n") # y scaling ry <- range(workSCD$y,na.rm=TRUE) # range of Y values ry <- SCD.ysc*diff(ry)*c(-.5,.5)+mean(ry) # diagonal end points dx <- max(rx[1],ry[1]) diagr <- c(max(rx[1],ry[1]), min(rx[2],ry[2])) # ____________titles and labeling axes_______________ ##### # # Setup and draw top and bottom titles and axis for column # # Padding on left and right for dots. # Res <- DrawXAxisAndTitles(j, panels, rx, ry,reftxt, refval, leftPad=lPad, rightPad=rPad, YAxisPad=TRUE) atRx <- Res$atRx rx <- Res$rx ry <- Res$ry #cat("ScatDot-Result staggering:",staggering," staggered:",staggered,"\n") # ##### # ___________________drawing loop_____________________ # in the ordered list, the median should be 26 of 51 items. changed because of generalization. oldpar <- par(lend="butt") # build each panel for scatter plot dots # Y axis & text - can do once for all YAxis_cex <- TS.Axis.cex * 0.75 xPs <- par("ps") xHPsLU <- strheight("00000",cex=1,units="user") xHDesPsLU <- strheight("00000",cex=YAxis_cex,units="user") xDifHLU <- xHPsLU - xHDesPsLU YAxis_adj <- xDifHLU / xHPsLU #cat("YAxis adjustment - YAxis_adj:",YAxis_adj," YAxis_cex:",YAxis_cex,"\n") for (i in 1:numGrps) { # groups from 1 to 5, 6, 7 to 11 ## 6 is the median group. # Cycle through the Row/Groups in the micromap column # This glyph is special in that it draws the data in every panel for all of the scatdot data points. # Only the ones related to the group/row are modified and colored. # Set defaults values for all dots for this panel workSCD$pch <- SCD.Bg.pch # default pch code. workSCD$cex <- SCD.Bg.pch.size # default size, except median workSCD$bg <- SCD.Bg.pch.fill # default symbol color file - was SCD.Bg.pch.fill workSCD$col <- SCD.Bg.pch.col # default line color of outline ("black") workSCD$lwd <- SCD.Bg.pch.lwd # default line weight of outline if (medGrp > 0 & medGrpSize == 1) { # if there is a median Group/Row and it contains one row, then if (i >= medGrp-1 && i <= medGrp + 1) { # force median dot to be highlighted in median and near groups. # modify characteristics of the point in previous and following group/rows to the median group/row workSCD$pch[medRow] <- SCD.Median.pch workSCD$cex[medRow] <- SCD.Median.pch.size workSCD$bg[medRow] <- SCD.Median.pch.fill workSCD$col[medRow] <- SCD.Median.pch.col workSCD$lwd[medRow] <- SCD.Median.pch.lwd } } # plot points. # get list of active rows in this group/row gsubs <- ib[i]:ie[i] # get beginning to end index row number in this group ke <- length(gsubs) # get number of rows in group (5 or 1) # Get color indexes. # adjust if median group pen <- if(i==medGrp & medGrpSize == 1 ) 7 else 1:ke # if median group (6)(black), then pen=6, otherwise pen = c(1...x) panelSelect(panels,i,j) # select panel for group i in column j) x <- panelScale(rx,ry) # set scale for panel (should this be ry * 5 or 1?) panelFill(col=Panel.Fill.col) # set fill for panel # vertical grid lines. axis(side=1, tck=1, labels=F, at=atRx, col=Grid.Line.col, lwd=Grid.Line.lwd) # grid lines # y axis labels if (i==medGrp & medGrpSize == 1) { # median panel # special for median group/row with one row atRy <- c(saveAtRy[1],saveAtRy[length(saveAtRy)]) # for margin panel, print the lowest and highest. } else { atRy <- panelInbounds(ry) # prettyprint a range. } # optional horizontal grid. if (SCD.hGrid) { axis(side=2,tck=1,labels=F,col=Grid.Line.col,lwd=Grid.Line.lwd, at=atRy) # Grid lines } # parameters and variable setup outside of loop. axis(side=2, tick=F, cex.axis=YAxis_cex, mgp=mgpLeft, line= -YAxis_adj*0.3, at=atRy, labels=as.character(atRy)) mtext(lab4[j],side=2, line=Title.Line.5.pos, cex=TS.Axis.cex) panelOutline(col=Panel.Outline.col) # outline panel # dv <- c(gsubs[1:ke],medRow) # was 26. # # draw diagonal line of symetry from c(min (x, y),min(x,y)) to # c(max(x,y), max(x,y)), all point have x=y. # if ((diagr[1] < diagr[2]) && SCD.DiagLine) { # draw symetric line if within box range. dx <- c(diagr[1],diagr[2]) dy <- c(diagr[1],diagr[2]) lines(dx,dy, col=SCD.DiagLine.col, lwd=SCD.DiagLine.lwd, lty=SCD.DiagLine.lty) # place a diagonal line on plot. # print out the statistics for the line if (MST.Debug == 1) { print(paste0("line:",paste0(c(dx,dy),collapse=" "))) print(paste0("usr:",paste0(par("usr"),collapse=" "))) print(paste0("pin:",paste0(par("pin"),collapse=" "))) MST.Debug = 0 # turn off. } } # plot points if (i == medGrp & medGrpSize == 1) { wS <- workSCD[gsubs[1],] # get one entry - the median (Median group/row with 1 row). } else { # standard group/row or median without single row. for (k in 1:ke) { # Process each slot of panel - step 1 to 5/6 or 1 to 1 # cycle through row-groups and assign colors to associated area's dots. m <- gsubs[k] workSCD$pch[m] <- SCD.Fg.pch # only 19:25 are supported. workSCD$cex[m] <- SCD.Fg.pch.size workSCD$bg[m] <- mstColors[pen[k]] # set approvate color to circle fill. workSCD$col[m] <- SCD.Fg.pch.col # color of outline of symbol workSCD$lwd[m] <- SCD.Fg.pch.lwd # weight of outline of symbol } wS <- workSCD[order(workSCD$cex,decreasing=FALSE),] # sort by text size to get active point on top. # plot all points by size, others first, colored and median last. } # Have lists of points to plot in wS # Since the points we plot must have outlines and have fill colors, # only the graphic points 19:25 are supported. # points(wS$x, wS$y, pch=wS$pch, col=wS$col, bg=wS$bg, cex=wS$cex, lwd=wS$lwd) # removed # col = border of symbol, bg = background color of symbol. # related to NA processing, points will just not draw a symbol if one of the x,y coordinates is NA. saveAtRy <- atRy # save for possible use on median panel. } # end of i loop par(oldpar) # ____________________________PanelOutline____________________ groupPanelOutline(panelGroup,j) } ############################################ #### # # type = 'segbar' and 'normbar' ==================================== # # rlAreaSegBar (Segmented Bar chart) # # Segmented bars is actually a stacked bar chart. Each segment is the length of one value. # The total length is the sum of the lengths of all segments. # The x scale of the column panels will be set to the "max" length of any bar. # # In the normalized mode, the total for the segments is divided into value of each # segment to get a percentage (0 to 100%). The segments are then plotted as stacked # bars using the percentage. The complete bar will be drawn from the left to right edge of # the panel. # # The data structure can have between 2 to 9 values per area. # Each area must have the same number of values. This limitation may be removed in the future. # # Feature added to make each segment a different thickness. 1/4/2014 # # panelData => data.frame where each row is a area with the areaIUKey as the row.name. # The columns are the bar segment values. # rlAreaSegBar = function(j, SBnorm=FALSE) { # j = the panel column number # SBnorm (FALSE = stacked, TRUE = normalized) # col1 indicates the starting or first column in the statsDFrame data for bar segment values. # col2 indicates the ending or last column in the statsDFrame data. # # The bar segment values are in the statsDFrame for each area in columns "col1" to "col2". # wstname <- names(dat) # names of columns in statsDFrame wstdim <- dim(dat) wstMax <- wstdim[2] # number of columns in statsDFrame ErrFnd <- FALSE pdColNum <- formatC(j,format="f",digits=0,width=2,flag="0") gName <- "SEGBAR" if (SBnorm) gName <- "NORMBAR" # "col1" stColName1 <- wstname[col1[j]] #print("col1") pdUmsg <- "(First Segment Data Column)" xr <- CheckPDColnCN('col1', gName, col1[j], stColName1, j, 1, wstMax, dat, pdUmsg) #print(xr) if (xr$Err) { ErrFnd <- TRUE #} else { # xdat1 <- xr$Dat } # "col2" stColName2 <- wstname[col2[j]] #print("col2") pdUmsg <- "{Last Segment Data Column)" xr <- CheckPDColnCN('col2', gName, col2[j], stColName2, j, 2, wstMax, dat, pdUmsg) #print(xr) if (xr$Err) { ErrFnd <- TRUE #} else { # xdat1 <- xr$Dat } if (!ErrFnd) { if (col1[j] >= col2[j]) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***020A ", gName, " ", pdColNum, " The first column name/number (", stColName1,") must proceed the last column name/number (", stColName2,") in the ", sDFName," data frame.") warning(xmsg, call.=FALSE) } else { wD <- ( col2[j] - col1[j] + 1 ) # corrected to calculate the number of data columns if (wD < 2 || wD > 9) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***020B", gName, " ", pdColNum, " The number of segments is ", wD, ". It must be between 2 and 9. If over 9, only the first 9 will be used.") warning(xmsg, call.=FALSE) } } } if (ErrFnd) return () # error warning found - return stColNums <- c(col1[j]:col2[j]) workSB <- dat[,stColNums] # get bar segment data from the statsDFrame. colNums <- c(1:dim(workSB)[2]) for (ind in colNums) { # check and convert each column iC <- stColNums[ind] # get stDF column number stColNam <- wSFName[iC] # get stDF column name F_ind <- formatC(ind,format="f",digits=0,width=1) segNam <- paste0("seg",F_ind) pdUmsg <- paste0("(Bar segment ",F_ind," length)") x <- CheckNum(workSB[,ind], gName, ind, pdColNum, segNam, stColNam, pdUmsg) if (x$Err) { ErrFnd <- TRUE } else { workSB[,ind] <- x$Dat } } good <- !is.na(rowSums(workSB)) # all good values. if any are na # refval <- lRefVals[j] # get referrence to object, changed reftxt <- lRefTexts[j] # new - JP-2010/07/23 # # # Colors - added transparency from x in steps of number of Segments up to 100% # so 2 step = 50, 100 # 3 step = 33.3, 66.6, 100 # 4 step = 25, 50, 75, 100 # 5 step = 20, 40, 60, 80, 100 # 6 step = 16.6, 33.3, 50, 66,6, 83.3, 100 # etc. # 1/(NumSegs)*step = transparency # # Dan's addition ==> # as the colors are generated from the base color # # pInc = 1 / NumSegs # # cSteps = cumsum(rep(pInc,NumSegs))^1.35 # # thickness = constant vs. very based on 2 to 9th segment # #_______________Gather stats and put in area Order______________ # Sorting has already been done - by areaDatKey or value. # The areaID list has therefore been re-ordered accordingly. # Reorder the DataList to match. The assumption was that the input data order for the panelData # matched the order of the original data in the statsDFrame. # #cat("SBBar - areaDatKey:",areaDatKey,"\n") workMatSB <- as.matrix(workSB) SBLen <- apply(workMatSB,1,length) # get length of each row. SBLRange <- range(SBLen,na.rm=TRUE) NumSegs <- SBLRange[2] # number of segments (Max Length) SBBarPt <- cbind(rep(0,numRows),workMatSB) SBBarPt <- t(apply(SBBarPt,1,cumsum)) #_______________Scaling____________ # x scaling lPad <- TRUE rPad <- TRUE rMax <- max(SBBarPt) if (SBnorm) { rx <- c(0,100) lPad <- FALSE rPad <- FALSE } else { rx <- c(0,rMax*1.02) lPad <- FALSE } #cat("seg/normbar-rx:",rx,"\n") ry <- c(0,1) pyPat <- c(-0.5,-0.5,0.5,0.5,NA) py <- CSNBar.barht * pyPat # SNBar.barht = 2/3 (0.6667) (fixed) # py <- c( -1/3, -1/3, +1/3, +1/3, NA) # variable bar height calculations wYPdelta <- (CSNBar.Last.barht - CSNBar.First.barht)/(NumSegs-1) # increment wYP1 <- CSNBar.First.barht - wYPdelta # _____________ Color Patterns _______________ baseColRgb <- BuildSegColors(NumSegs) # ___________titles and labeling axes_______________ ##### # # Setup and draw top and bottom titles and axis for column # # if segmented stacked - no padding on side with zero. # if normalized stacked - no padding on either side. # Res <- DrawXAxisAndTitles(j, panels, rx, ry,reftxt, refval, leftPad=lPad, rightPad=rPad) atRx <- Res$atRx rx <- Res$rx ry <- Res$ry #cat("SN-staggering:",staggering," Result staggered:",staggered,"\n") # ##### # ___________________drawing loop_____________________ oldpar <- par(lend="butt") # build each panel for each stacked bar set. #printPar() #print(paste0("rx:",paste0(rx,collapse=" ")," ry:",paste0(c(1-pad,ke+pad),collapse=" "))) for (i in 1:numGrps) { gsubs <- ib[i]:ie[i] # get beginning to end index row number in this group ke <- length(gsubs) # get number of rows in group (5 or 1) # adjust if median group pen <- if(i==medGrp & medGrpSize == 1) 7 else 1:ke # if median group (6)(black), then pen=6, otherwise pen = c(1...x) laby <- ke:1 ksc <- SetKsc(ke) panelSelect(panels,i,j) x <- panelScale(rx,c(1-pad,ke+pad)) # 1 to 5 are the y values for each bar. panelFill(col=Panel.Fill.col) axis(side=1, tck=1, labels=F, at=atRx, col=Grid.Line.col, lwd=Grid.Line.lwd) # grid # if a refval is provided and in the rx range, then add line. AddRefLine(refval, ke, rx) # # Not checking "good" values provided. # # # Process each area's line. # for (k in 1:ke) { # cycle through row-groups and assign colors to associated areas dots. m <- gsubs[k] if (good[m]) { wX <- SBBarPt[m,] # Get Row of data. if (SBnorm) { wX <- wX / wX[NumSegs+1] * 100 # last segment value is in NumSegs + 1 to get last column (end point) } #wYP <- rep(laby[k],5)+py # height of segment (laby[k] => center line of segbar) wYP <- rep(laby[k],5) # height of segment (laby[k] => center line of segbar) # calculate box for each segment wYPht <- wYP1 for (ik in 1:NumSegs) { if (SNBar.varht) { # variable height bar segments wYPht <- wYPht + wYPdelta wYP2 <- wYP + ((pyPat * wYPht) * ksc ) #print(paste0("Seg:",ik," wYP2:",wYP2)) } else { # fixed height bar segments wYP2 <- wYP + (py * ksc) } val0 <- wX[ik] # start val1 <- wX[ik+1] # end position wXP <- c(val0,val1,val1,val0,NA) # good value - draw bars are polygons. (why to polygon) polygon(wXP,wYP2,col=baseColRgb[pen[k],ik],lwd=CSNBar.Outline.lwd,border=CSNBar.Outline.col,lty=CSNBar.Outline.lty) #polygon(wXP,wYP2,col=CSNBar.Outline.col,density=0) } # end of ik loop (plotting Segments) # if (SNBar.Middle.Dot) { # do we graph a middle dot on the row? mY <- laby[k] # get Y position # put dot on boundary if even number of segments or in middle of middle segment if odd. if ((NumSegs %% 2)==1) { # put dot in middle of middle segment. mSeg <- NumSegs %/% 2 + 1 mX <- (wX[mSeg] + wX[mSeg+1])/2 # middle of segment } else { # put dot on border between two middle segments. mSeg <- NumSegs %/% 2 mX <- wX[mSeg+1] } if (SNBar.MDot.pch >= 21 && SNBar.MDot.pch <= 25) { # treat filled and non-filled symbols differently - get close to same results. # with filled, fill is bg, col and lwd deal with border # with non-filled, fill is col, lwd deals with border using col. # filled symbol points(mX,mY,pch=SNBar.MDot.pch, cex=SNBar.MDot.pch.size, bg=SNBar.MDot.pch.fill, # fill color col = SNBar.MDot.pch.border.col, # border color lwd = SNBar.MDot.pch.border.lwd) } else { # non filled symbol points(mX,mY,pch=SNBar.MDot.pch, cex=SNBar.MDot.pch.size, col = SNBar.MDot.pch.fill, # fill and border color lwd = SNBar.MDot.pch.border.lwd) } } # end of Middle Dot drawing. } # end of "good" check for row. } # end of k loop (group/row) # finish up panel panelOutline(Panel.Outline.col) } # end of i loop par(oldpar) # ____________________________PanelOutline____________________ groupPanelOutline(panelGroup,j) } ################################################### # # For TS, and TSConf I could not find a way to use to have areaDatKeys as the names of # each area matrix, in list or data.frame. So, the out at this time is # to assume the original panelData array is in the order of the original statsDFrame data.frame. # When statsDFrame is re-ordered, I have captured the re-ordering. Using the "order" index # the raw panelData is used via the order index to associate the line on the micromap to the data. # # Boxplot uses $names to look up to find out the record and link the Boxplot list to the # statsDFrame data. # # ##### ##### # # type = TS and TSConf ===================================================== # # rlAreaTSConf (Time Series with and without confidence interval in panel groups) # # Plot all data for panel's areas as one graph in panel. # rlAreaTSConf = function(j,dataNam,conf=TRUE){ # # j = panel column number # # dataNam = Name of large data array containing the x, y (or y low, med and high) values # for each time period and area. Data element is three dimensions (area, sample, value) # The area index is limited to 1:51. The value index is limited ot 1:4. # The sample index is not limited, but a practical limit is around 200-250 samples. # # conf = logical. # If TRUE, do the confidence band using y-low, y-med, and y-high values (columns 2, 3, 4) # If FALSE, only plot the Y value (column 2) # #cat("TS - areaDatKey:",areaDatKey,"\n") ErrFnd <- FALSE TSMsgLabel <- "TS" if (conf) TSMsgLabel <- "TSCONF" pdColNum <- formatC(j,format="f",digits=0,width=2,flag="0") # Check data DataList = tryCatch(get(dataNam,pos=1),error=function(e) e) # get name of array data object list. if (inherits(DataList,"error")) { ErrFnd <- TRUE warnCnt() xmsg <-paste0("***02T1", TSMsgLabel, " ", pdColNum, " column in data.frame ", dataNam, " does not exist or is not valid.") warning(xmsg, call.=FALSE) } else { # data.frame (r object) exists - can do other checks workDArr <- DataList wDArrNames <- rownames(workDArr) # get rownames if (!is.array(workDArr)) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02T2", TSMsgLabel, " ", pdColNum, " The ", dataNam, " data structured in the panelData field is not an array.") warning(xmsg, call.=FALSE) } dimDArr <- dim(workDArr) #if (dimDArr[1] != numRows) { # RETIRE.. # ErrFnd <- TRUE # warnCnt() # xmsg <- paste0("***02T3", TSMsgLabel, " ", pdColNum, " The " , dataNam, " array\'s 1st dimension is not ", numRows, " areas. It is ", dimDArr[1], ".") # warning(xmsg, call.=FALSE) #} #if (dimDArr[2] < 2 || dimDArr[2] > 31) # removed upper limit of the number of time points. if (dimDArr[2] < 2 ) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02T4", TSMsgLabel, " ", pdColNum," The ", dataNam, " array\'s 2nd dimension (time periods) must have at least 2 points. It is ", dimDArr[2], ".") warning(xmsg, call.=FALSE) } if (conf) { # TSCONF option. # Time Series with Confidence Bands if (dimDArr[3] !=4) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02T5", TSMsgLabel, " ", pdColNum, " The ", dataNam, " array\'s 3rd dimension is not 4. It is ", dimDArr[3], ",") warning(xmsg, call.=FALSE) } } else { # Time Series without Confidence Bands if (dimDArr[3] < 2) { ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02TA", TSMsgLabel, " ", pdColNum, " The time series array\'s 3nd dimension must be at least 2. It is ", dimDArr[3], ".") warning(xmsg, call.=FALSE) } if (dimDArr[3] != 2 && dimDArr[3] != 4) { # accept confidence data - don't stop run. ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02T6", TSMsgLabel, " ", pdColNum, " The time series array\'s 3rd dimension must be 2 or 4. It is ", dimDArr[3], ".") warning(xmsg,call.=FALSE) } } if (is.null(wDArrNames)) { # names are not present ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02TB", TSMsgLabel, " ", pdColNum, " The time series array does not have rownames assigned to the 1st dimension. Data cannot be paired up with area.") warning(xmsg, call.=FALSE) } else { tnn <- is.na(match(wDArrNames,areaDatKey)) if (any(tnn)) { # non-match found. ErrFnd <- TRUE warnCnt() lnn <- paste0(wDArrNames[tnn],collapse=" ") xmsg <- paste0("***02T7", TSMsgLabel, " ", pdColNum," Rownames on array do not match subarea ID list. The bad area IDs are:", lnn) warning(xmsg, call.=FALSE) } } } if (ErrFnd) return () # if any errors found - don't draw column. refval <- lRefVals[j] # get referrence to object, changed reftxt <- lRefTexts[j] # new - JP-2010/07/23 # structure of dataArr # dataList is a 3 dim array : # a * b * c, where: # a is the area index number (1 to "n") (sub-area) # b is the time period index (2 to "n" range) (Limited only by R and memory) # c is the type of value (1=x, 2=low, 3=mid, 4=high) or (1=x, 2=y) # #_______________Scaling of TS Axis____________ # x scaling lPad <- FALSE rPad <- FALSE rx <- range(workDArr[,,1],na.rm=TRUE) # x range from all values in vector #cat("ts-rx:",rx,"\n") #rx <- sc*diff(rx)*c(-.5,.5)+mean(rx) # min to max range with expansion factors. #cat("ts-rx after padding:",rx,"\n") # y scaling if (conf) { # range of line, high and low. ry <- range(workDArr[,,c(-1)],na.rm=TRUE) # range of all Y values } else { # range of line. ry <- range(workDArr[,,2],na.rm=TRUE) # range for the one Y value } #cat("ts-ry:",ry,"\n") ry <- sc*diff(ry)*c(-.5,.5)+mean(ry) # min to max range with expansion factors. #cat("ts-ry after padding:",ry,"\n") #_______________Find range/min/max of median row line/high/low.____________ #_______________Gather stats and put in area Order______________ # # JP-no data in col1, col2, or col3 to sort like the other columns... # All of the data is in these structures. # # at present no re-ordering of the time series like the other plots. # JP-if other column is sorted, time series will follow that order via the indexes. # #### # ____________column titles and axis_______________ ##### # # Setup and draw top and bottom titles and axis for column # # TS, TS-Conf no padding on either side - graph starts at first data point to last data point. # Check out this effects labeling. # Res <- DrawXAxisAndTitles(j, panels, rx, ry,reftxt, refval, leftPad=lPad, rightPad=rPad, YAxisPad=TRUE) atRx <- Res$atRx rx <- Res$rx ry <- Res$ry #cat("Ts-Result staggering:",staggering," staggered:",staggered,"\n") # ##### oldpar <- par(lend="butt") ##### Can be done once for all interations of loop. YAxis_cex <- TS.Axis.cex * 0.75 xPs <- par("ps") xHPsLU <- strheight("00000",cex=1,units="user") xHDesPsLU <- strheight("00000",cex=YAxis_cex,units="user") xDifHLU <- xHPsLU - xHDesPsLU YAxis_adj <- xDifHLU / xHPsLU #cat("YAxis adjustment - YAxis_adj:",YAxis_adj," YAxis_cex:",YAxis_cex,"\n") # _______________drawing loop (panels 1->11)___________________ for (i in 1:numGrps) { # 1,2,3,4,5, 6, 7,8,9,10,11 ng=11 (for US) # Cycle through the Row/Groups in the micromap column gsubs <- ib[i]:ie[i] # get beginning to end index row number in group ke <- length(gsubs) # get number of rows in group (5/6 or 1) pen <- if(i==medGrp & medGrpSize == 1) 7 else 1:ke # if middle group (7), then pen=7 (Black), otherwise pen = c(1...5) or c(1...6) kcol <- c(mstColors[c(1:ke,7)]) # get major colors addBlack <- 0 if (medGrp > 0 & medGrpSize == 1) { if (i == medGrp-1) { # panel before the median row gsubs <- c(gsubs,ib[i+1]:ie[i+1]) # extend one more to get median row addBlack <- 7 } if (i == medGrp+1) { # panel after the median row gsubs <- c(gsubs,ib[i-1]:ie[i-1]) # extend to include at end of the list addBlack <- 7 } } gnams <- areaDatKey[gsubs] # get list of area ids for data group of data. # adjust if middle group if ( addBlack > 0 ) pen <- c( pen, 7 ) # do panel - panelSelect(panels,i,j) # select panel for group i in column j) panelScale(rx,ry) # set scale for panel (should this be ry * 5 or 1?) # scale x and y to the shape of the panel (6 - median is squeezed.) panelFill(col=Panel.Fill.col) # set fill for panel # draw grid lines in panel - vertical (x axis) axis(side=1, tck=1, labels=F, at=atRx, col=Grid.Line.col, lwd=Grid.Line.lwd) # grid lines (x axis) if (i==medGrp & medGrpSize == 1 ) { # median panel # median panel atRy <- c(saveAtRy[1],saveAtRy[length(saveAtRy)]) # median panel range (Get copy of first and last number) } else { # all other panels atRy <- panelInbounds(ry) # get labels for y-axis } if (TS.hGrid) { # horizontal grids on Y axis axis(side=2,tck=1,labels=F,col=Grid.Line.col,lwd=Grid.Line.lwd, at=atRy) # Grid lines } ## Y axis values and labels #axis(side=2, tick=F, mgp=mgpLeft, cex.axis= TS.Axis.cex*.75 , # at=atRy, labels=as.character(atRy)) # Y axis labels #mtext(lab4[j],side=2,line=Title.Line.5.pos,cex=TS.Axis.cex) # Y axis title # axis(side=2, tick=F, cex.axis=YAxis_cex, mgp=mgpLeft, line= -YAxis_adj*0.3, at=atRy, labels=as.character(atRy)) mtext(lab4[j],side=2, line=Title.Line.5.pos, cex=TS.Axis.cex) panelOutline(col=Panel.Outline.col) # outline panel ##### # Issue with median row - line drawing. The y axis is squeezed # to about 1/5 of the scale used in the other rows. This distorts # the line graph and any confidence band. ##### ##### # # Current take each row and: # draw confidence (if required) # draw line # next row. # This leads to confidence overlaying the lines of rows. - need to do confidence blocks, then all lines. # Change Sept 1, 2015 # ##### # handle confidence bands if (conf) { for (k in 1:ke) { # Process each slot of panel - step 1 to 5 or 1 to 1 # cycle through row-groups and build each time series kp = pen[k] # color number wDArr <- workDArr[gnams[k],,] wX <- wDArr[,1] # get X values for line and polygon plots wLine = wDArr[,2] # Get Y values for mid line # build polygon of confidence band to fill (y-low to y-high) and draw first. # new logic to handle NA in X or Y data. Have to break up the polygons into separate plots. cX <- c(wX,NA) cY1 <- c(wDArr[,3],NA) # lower Y data points cY2 <- c(wDArr[,4],NA) # upper Y data points #cat("cY1:",paste0(cY1,collapse=", "),"\n") #cat("cY2:",paste0(cY2,collapse=", "),"\n") #cat("cX :",paste0(wX ,collapse=", "),"\n") Breaks <- is.na(c(cX+cY1+cY2)) #cat("Breaks:",paste0(Breaks,collapse=", "),"\n") # we found at least one NA in the data. wXz <- MMVSplit(wX, Breaks) wY1z <- MMVSplit(cY1,Breaks) wY2z <- MMVSplit(cY2,Breaks) #cat("wY1z:",paste0(wY1z,collapse=", "),"\n") #cat("wY2z:",paste0(wY2z,collapse=", "),"\n") #cat("wXz :",paste0(wXz ,collapse=", "),"\n") vL <- length(wXz) # if only one list - then length = 15 installed of one. ***************** #cat("vL:",vL,"\n") # draw confidence shades for (ind in c(1:vL)) { if (length(wXz[[ind]])>0) { xL <- c(wXz[[ind]], rev(wXz[[ind]] ), NA) yL <- c(wY1z[[ind]], rev(wY2z[[ind]]), NA) wPoly <- data.frame(x=xL, y=yL) #print(wPoly) #cat("colors:", mstColors[kp+12]," kp+12:", kp+12,"\n") polygon(wPoly, col=mstColors[kp+12], border=NA) } } # shaped polygons of confidence band have been plotted. } # end of k loop rows. } # end of confidence test. # draw lines for (k in 1:ke) { # Process each slot of panel - step 1 to 5 or 1 to 1 # cycle through row-groups and build each time series kp = pen[k] # color number wDArr <- workDArr[gnams[k],,] wX <- wDArr[,1] # get X values for line and polygon plots wLine = wDArr[,2] # Get Y values for mid line # Plot mid Line lines(wX,wLine,col=mstColors[kp],lwd=TS.lwd) # NA processing, in the lines call, the missing point (x,y) is just not drawn or other points connected to it. # a gap is generated. } # end of k loop rows. saveAtRy <- atRy } par(oldpar) # ____________________________PanelOutline____________________ groupPanelOutline(panelGroup,j) } ##### # # ##### end of glyph functions ##### # # ############################# ############################# #print("Glyph functions loaded") ############################# ############################# # # # General Functions for micromapST and glyphs # # # AddRefLine - adds the reference line to the current panel (wKe). # AddRefLine <- function (wRefVal, wKe, wRx) { if (!is.na(wRefVal)) { if(is.between.r(wRefVal,wRx)) { # reference line lines(rep(wRefVal,2),c(1-padMinus,wKe+padMinus),lty=Ref.Val.lty,lwd=Ref.Val.lwd,col=iRef.Val.col) } } } # #_________ function to pattern match alias names # AliasToIndex <- function(xR,aNAIAlias) { # # xR is the string list, aNAIAlias is the Name Table $ Alias column # return index into the NAI table # The user string must be cleaned up to make sure it can match one of the wildcard alias strings. # The user strings are edited to convert any punctuation marks, control characters, spaces, tabs, cr, etc. # into blanks, multiple blanks, leading and trailing blanks are eliminated and the string is converted to # all uppercase. # # xR --> a vector of the registry names from SeerStat output wReg <- CleanString(xR) wIndex <- rep(NA,length(wReg)) # match results - NA default - no match # wild card match of input character vector to alias in name table. xouta <- t( sapply(c(1:length(aNAIAlias)), function(x) { y=grep(aNAIAlias[x],wReg,ignore.case=TRUE) # user string list against each entry. ifelse(length(y)==0,return(c(NA,NA)),return(c(x,y))) # if result length = 0 -> no match. otherwise return the string and index. } )) # result - matrix is column 1 = aNAI index that matched, column 2 = index into char vector . xoutb <- xouta[!is.na(xouta[,1]),] # keep only matches. wIndex[xoutb[,2]] <- xoutb[,1] wMissing <- is.na(wIndex) wMissingList <- paste0(xR[wMissing],collapse=", ") #if (any(wMissing)) { # # xmsg <- paste0("***0195 ALIAS Alias Name(s) in the data does not match the name table for the area. The unmatched data rows are:",wMissingList) # stopCnt() # stop(xmsg, call.=FALSE) # # } # let duplicate and missing through. Handled by caller. return(wIndex) # return index to name table } # ### ### # #_________ function to pattern match alias names # AliasToKey <- function(xR,aNAI) { # xR is the string list, aNAI is the Name Table # return index into the NAI table # x --> a vector of the registry names from SeerStat output ErrFnd <- FALSE wReg <- toupper(xR) wIndex <- rep(NA,length(wReg)) # NA results of keys wKey <- rep(NA,length(wReg)) # NA results of keys xout1 <- sapply(c(1:length(aNAI$Alias)), function (x) grep(aNAI$Alias[x], wReg, ignore.case=TRUE)) # one entry per aNAI row, NA or # of wReg Row of match. xout1a <- unlist(xout1) # list of matched locations for each item. # NA's and lists removes, just a list of matches. # Get list of those items that did not find a match. - find list of wReg item that did not match. xout2 <- !is.na( lapply( xout1, function(x) ifelse(length(x)==0,NA,x) ) ) # xout2 is converts results from "" into NA. xout3 <- unlist( lapply( xout1, function(x) { if(length(x[])>1) { x } else { NA } } ) ) # xout3 is string or NA - string if no match. if (any(!is.na(xout3))) { ErrFnd <- TRUE StopFnd <- TRUE xout4 <- paste0(xout3[!is.na(xout3)], collapse=" ") xmsg <- paste0("***0196 ALIAS Sub-area names in the data have duplicate name in rows:",xout4, " Only one row per sub-area is permitted.\n") stopCnt() stop(xmsg, call.=FALSE) } wIndex[xout1a] <- aNAI$Key[xout2] wKey[xout1a] <- aNAI$Key[xout2] return(wKey) # return list of abbreviates or NA if no match. } # ### ### # # Function to generate the segment blended colors for the stacked bar glyphs. # It takes the base 5 or 6 colors used in the maps and other glyphs # and generates a progression of light to full color for use in the # segments of a stacked bar glyph. # BuildSegColors <- function(NumSegs) { # Build color patterns for all bar charts baseColors <- t(col2rgb(mstColors[1:7])) # "#ffffff" to x, y, z bgColors <- t(col2rgb("white")) # New Way with lighter colors - but opaque x1 <- cumsum(rep(1/NumSegs,NumSegs)) # x1 vector of accum values from 1/NumSegs to 1, NumSegs values. x2 <- x1 ^ 1.9 # raised by 1.9 (exponential curve pInc <- (x2 * 0.6) + 0.4 # multiply and shift (want to run from 0.4 to 1. # baseColors -- base 255... baseCol2 <- baseColors/255 # convert each value from 0:255 to 0:1 # baseCol2[Colors,RGB] # Apply the pInc (5,1) modifier vector to each color (1,7). -> full color table (5,7) baseCol3 <- sapply(pInc,function(x) baseCol2 * x) # mstColors(1-7), segment(1-5) for (Rgb=RED) # mstColors(8-14), segment(1-5) for (Rgb=GREEN) # mstColors(15-21),segment(1-5) for (Rbg=BLUE) # baseCol3[(Colors-Red,Colors-Grn,Colors-Blu),Segments] baseColMod <- array(baseCol3,c(7,3,NumSegs)) # we only use the first 7, so ignore 8, 9, 10 (shading colors) # [x,,] x = color (1-7) # [,,y] y = segment (1-5) # [,z,] z = RGB 1=RED, 2=GREEN, 3=BLUE # # [1,2,3] 1 fills first, 2 fills next, 3 fills last. # # invert the modifier vector and apply it to the white background colour (for BW images) pIncM <- 1-pInc bgCol2 <- bgColors/255 bgCol3 <- sapply(pIncM,function(x) bgCol2 * x) # [rgb,segment] bgColMod <- t(bgCol3) # [segment, rgb] # bgColMod[Segments,RGB] (Segment =5 ==> 0)p # NumSegs, RGB value baseColRgb <- matrix(rep(0,7*NumSegs),nrow=7,ncol=NumSegs) # baseColRgb[Colors, Segment] # Convert Rgb matrix back to a matrix of segment by color. for (isg in 1:NumSegs) { # [,,isg] Level for (icl in 1:7) { # colors [icl,,] wC <- baseColMod[icl,,isg] + bgColMod[isg,] baseColRgb[icl,isg] <- rgb(wC[1],wC[2],wC[3]) } } # # Resulting colors are in baseColRgb[color,segment] # # Now I have a matrix of colors - [x,y] where # x is the color base - 1 to 7 (we use 1 to 6). # y is the level based on the number of segments = 1 : NumSegs # # rows - color ID # columns - segment 1:x # result => baseColRgb [color (1:7), segmentNum (1:n)] return(baseColRgb) } # ### ### # # Subroutine to take values in the col<x> vectors (panelDesc variable), # convert numerics to integer, convert character (column names) # by matching with statsDFrame column names to statsDFrame column numbers. # NA's (no name match) and out of range numbers are set to "0" - NOT VALID. # # Used to check column specifications for sortVar, rowNamesCol and colx variables during # initial setup. By the time the glyphs runs, the col1,...,col3 variables are translated # into column numbers and no long needs to be checked. Except to validate they exist when needed. # # This routine takes any number/name of columns provided by user and validates it and translates to # column number. Will not translate "NA", missing, "" or "0" values. glyph will test if # data is missing. # # This routine does a general check of a named list of statsDFrame column names or numbers. # At the end of the verification, the names are translated into statsDFrame column numbers. # # The caller should save the original named list vectors for diagnostic messages. # # Used mostly used by sortVar, rowColName, and other arguments. # ##### # # CheckColx appears to not be used any more. Verify. # ##### # # CheckColx <- function(wcol, colname, wnam2, len_wnam) { # wcol = col vector of names/number in statsDFrame from panelDesc # colname = literal character name of col vector for error message.(panelDesc variable name (col1, col2, col3)) # wnam2 = character list of column names and row numbers (in character format) # len_wnam = number of original set of columns. (length(wcol) # # Results Rules: "0" means invalid number, out of range number or invalid name. # NAs are converted to "0" values. # glyphs check for valid values based on need. # # xwcol <- wcol l_wcol <- length(wcol) ErrFnd <- FALSE if (is.factor(xwcol)) { xwcol <- as.character(xwcol) } if (is.numeric(xwcol) || is.logical(xwcol)) { # have number (double, single, integer, or logical) # we are dealing with numeric or logical rcol <- as.integer(xwcol) # convert numeric to integer. rcol[is.na(rcol)] <- 0 # get rid of NA. Turn to zeros doesn't get rid of negatives. if (any(rcol < 0)) { ErrFnd <- TRUE xmsg <- paste0("***0201 PDCOL In the ",colname," named list in the ", pDName," panelDesc structure there are one or more negative values: ",paste0(rcol,collapse=", ")," Literal:",wcol) warnCnt() warning(xmsg,call.=FALSE) } else { if (any(rcol > len_wnam)) { ErrFnd <- TRUE xmsg <- paste0("***0202 PDCOL One or more of the values in the \var{<pdVarName>} named list in the \var{<panelDesc>} structure is greater than the columns in the \var{<statsDFrame>} data.frame: ",paste0(rcol,collapse=", ")) warnCnt() warning(xmsg,call.=FALSE) } } # if ErrFnd = FALSE, the all number in vector are within range. # check valid range in glyph (NA become zeros.) Leave the final check to the glyphs. } else { if (is.character(xwcol)) { # have character - may be name or number - check each # get number for other code, if column name. xcol <- match(xwcol,wnam2,nomatch=0) # match against column names and numbers (as characters) rcol <- ifelse(xcol > len_wnam, xcol-len_wnam, xcol) # adjust matches to row numbers to real row numbers. # name and character number converted to integer # bad and NA values are "0" and will be caught in the glyph } else { # invalid variable type ErrFnd <- TRUE xmsg <- paste0("***CCOL-03 The type of ",colname," panelDesc variable is invalid. ",typeof(xwcol),". Must be integer or character.") warnCnt() warning(xmsg,call.=FALSE) } } if (ErrFnd) { return (rep.int(0,l_wcol)) } else { # clean up any NAs in list, set to 0 rcol[is.na(rcol)] <- 0 # set NA to 0 (invalid) return (rcol) #print(rcol) } } # ### ### # # CheckColx2 <- function(colValues, varName, varNum, gNameList, stColNames, len_sCN) { # xx <- gsub(",","",<value>,fixed=TRUE) # gc4real <- "^[-+]?[ ]?[0-9]{1,3}(,[0-9]{3})*(\\.[0-9]*)?$|^[-+]?[ ]?[0-9]*(\\.[0-9]*)?$" # is real number with commas gc4int <- "^[-+]?[ ]?[0-9]{1,3}(,[0-9]{3})*$|^[-+]?[ ]?[0-9]*$" # is integer number with commas #cat("colValues:",paste0(colValues,collapse=", "),"\n") #cat("varName :",varName,"\n") #cat("varNum :",varNum,"\n") #cat("gNameList:",paste0(gNameList,collapse=", "),"\n") #cat("stColNames:",stColNames,"\n") #cat("len_sCN :",len_sCN,"\n") # # # Routine is used to check out the information provided by the user. If # vector contains a number or a character string, it will validate the number against # the column number range of statsDFrame. If a character string vector is provided, # each items is checked for being a number or non-number string. If numeric, # the value is converted to integer and validated for <= 0 and range. If a non-numeric # string, the string to match against the column names on statsDFrame and translated to # the column number. "" and NA values are ignored and not translated or matched. # If a string does not match, then it resolves to NA. "" are converted to NA. # # The glyphs are left to determine if all of the needed data columns are provided. # This only validates the information present. If the data column is not used, we don't care. # # pdVarData - colValues = character or numeric vector of column names/numbers in statsDFrame. # Can be a list from sortVar, rowColName, or panelDesc col1, col2, or col3.. # pdVarName = name of variable - vector being checked. (used in messages.). (example: col1, col2, sortVar, rowColName, etc.) # pdVarNum = 3rd character in message identifiers: "0" for sortVar and rowColNames and 1 to 3 for panelDesc columns # gNameList = associated "type" list of glyphs per entry in vector. Used in messages. Must be the same length as # colValues. For sortVar and rowColName, this parameter is set to "". # stColNames = character list of column names and numbers (in character format) (statsDFname column names) # len_sCN = number of original number of columns. dim(statsDFrame)[2] The stColNames list is 2 x this value. # # Rules: Not provided = "" and NA. # "0" means invalid number or name. Error message already generated. # glyphs check for valid values based on need. We just make sure the column has a # a valid reference and can be accessed. Not valid content. # # Working variables: # FvarNum = 1 character version of varNum (if positive) othersize set to "0" (single value) # #cat("len_sCN:",len_sCN," varNum:",varNum,"\n") ErrFnd <- FALSE # no errors indicator xwcol <- colValues # get working copy of panelDesc contains of a variable list. l_xwcol <- length(colValues) # length of variable contents vector l_gNameList <- length(gNameList) # length of type list (number of glyphs) rcol <- rep(NA,l_xwcol) # results column number list. if (varNum >= 0) { FvarNum <- formatC(varNum,format="f",digits=0,width=1) } else { FvarNum <- "0" } if (l_xwcol != l_gNameList) { # panelDesc variable not same length as number of types if (l_gNameList == 0) { # gNameList is absent - possible sortVar or rowColNames arguments. l_gNameList <- l_xwcol } else { # error - they should be the same length, possible type-o in variable list. ErrFnd <- TRUE warnCnt() xmsg <- paste0("***0205 ",gNameList," The length of the glyph type list is different the length of the variables list.") } } #cat("l_xwcol:",l_xwcol," len_sCN:",len_sCN," varNum:",varNum," FvarNum:",FvarNum,"\n") #print(stColNames) FndNames <- rep(TRUE,l_xwcol) skipList <- (is.na(xwcol) | xwcol == "") # no values provided in entry ("", "" from ,, or NA) if (is.factor(xwcol)) { # if a factor convert to character - should not be a factor if it's a numeric, xwcol <- as.character(xwcol) # remove factor index. } if (is.numeric(xwcol)) { # have number (double, single, integer) rcol <- as.integer(xwcol) # convert numeric to integer (column indexes are integers) # NA values still show up as NA. Still need to validate. #print("numeric") } else { if (is.character(xwcol)) { xTcol <- grepl(gc4int,xwcol) # T/F vector of type of string T=Numeric xTcol[skipList] <- FALSE if (any(xTcol)) { rcol[xTcol] <- as.integer(xwcol[xTcol]) # convert chars to numbers } xTcol <- !xTcol # reverse flags T = character xTcol[skipList] <- FALSE # don't check any NA fields. if (any(xTcol)) { # check all, but we will only update the character ones. xcol <- match(xwcol,stColNames) # match against column names and numbers (as characters) # and translate column names and numbers to integers. xcol2 <- ifelse(xcol>len_sCN,xcol-len_sCN,xcol) # adjust matches to verified/matched column numbers to real column numbers. rcol[xTcol] <- xcol2[xTcol] FndNames[xTcol] <- !is.na(xcol)[xTcol] } } else { # invalid type of vector, numeric, integer, or character # is this possible because prior checks??? stopCnt() StopFnd <- TRUE xmsg <- paste0("***02",FvarNum,"0 PDCOL The ",varName," named list is type ",typeof(xwcol)," is invalid. Must be a numeric, integer or character vector.") stop(xmsg, call.=FALSE) } } # validate #print("xwcol") #print(xwcol) #print("rcol") #print(rcol) #cat("FndNames:",FndNames,"\n") #cat("len_sCN:",len_sCN,"\n") for (ind in c(1:l_xwcol)) { # validate each one - rcol. pdColNum <- formatC(ind, format="f",digits=0, width=2, flag="0") xRcol <- rcol[ind] if (!is.na(xRcol)) { #cat("ind:",ind," xRcol:",xRcol,"\n") if (xRcol <= 0) { warnCnt() ErrFnd <- TRUE xmsg <- paste0("***02",FvarNum,"2 PDCOL ",gNameList[ind]," ",pdColNum," The column number of ",xwcol[ind]," in '",varName,"' is negative or zero. Must be a positive integer or the name of a column in statsDFrame.") warning(xmsg,call.=FALSE) rcol[ind] <- 0 } else { if(xRcol > len_sCN) { warnCnt() ErrFnd <- TRUE xmsg <- paste0("***02",FvarNum,"3 PDCOL ",gNameList[ind]," ",pdColNum," The column number of ",xwcol[ind]," in '",varName,"' is greater than the number of columns ",len_sCN," in the statsDFrame data.frame.") warning(xmsg,call.=FALSE) rcol[ind] <- 0 } } } if (!FndNames[ind]) { # did not find the name in statsDFrame warnCnt() ErrFnd <- TRUE xmsg <- paste0("***02",FvarNum,"1 PDCOL ",gNameList[ind]," ",pdColNum," The column name of ",xwcol[ind]," in '",varName,"' does not exist in the statsDFrame data.frame.") warning(xmsg,call.=FALSE) rcol[ind] <- 0 } } return(rcol) } # ### CheckParmColx <- function(colNames, parmCode, wSDFNames, len_wSDFNames) { # This function validates the statsDFrame column name/numbers for call arguments/parameters. # It is essentually the same function as CheckColx, but does not generate error messages # related to panelDesc variables or lists. If the list of names/numbers is limited to "N", # then this check is done prior to calling this function. # # Used by sortVar and rowNamesCol argument checks # # colNames = col Name vector of names/number in statsDFrame from panelDesc # parmCode = is a vector containing the error message identifier and string and the parameter name. # parmCode[1] = second part of the "CARG-" tag. # parmCode[2] = name of the calling argument/parameter # c("RNC","rowNamesCol") # Any invalid names/numbers are passed by as 0. # wSDFNames = character list of column names and numbers (in character format) # len_wSDFNames= number of original set of columns. (length(wcol) # # Results Rules: "0" means invalid number, out of range number or invalid name. # NAs are converted to "0" values. # glyphs check for valid values based on need. # # The check for zero length value is done before the call to this routine. # xColNames <- colNames l_wcol <- length(colNames) # number of values ErrFnd <- FALSE if (l_wcol <= 0) { ErrFnd <- TRUE xmsg <- paste0("***0124 CARG-",parmCode[1]," The ",parmCode[2]," call argument is empty. Argument ignored.") warnCnt() warning(xmsg, call.=FALSE) res <- NA } else { # number of values are 1 or more res <- rep(0,l_wcol) # default results are none found. if (is.factor(xColNames)) { xColNames <- as.character(xColNames) } if (is.character(xColNames) || is.numeric(xColNames)) { # what are each element in the vector - a number or name? xColType <- unlist(gregexpr("^[ \t]*[+-]?[0-9]*[ \t]*$",xColNames)) # find out number or name # NA - NA (missing) # -1 - Character # 1 - Number #print("parameter value to check:") #print(xColNames) #print(xColType) # Loop through list and check each one based on its type. for (ind in c(1:l_wcol)) { # get value type wCT <- xColType[ind] # get type wCName <- xColNames[ind] # get value if (is.na(wCT)) { # NA value - pass it back to caller as 0 - not found. res[ind] <- 0 } else { if (wCT < 0) { # have character type value (not a just numbers) - should be column name wColN <- match(wCName,wSDFNames,nomatch=0) # match against column names and numbers (as characters) wColN <- ifelse(wColN > len_wSDFNames, wColN-len_wSDFNames, wColN) # adjust matches to row numbers to real row numbers. res[ind] <- wColN # save resulting column index number # check if column name found. if (wColN <= 0) { # if it was a no match ... ErrFnd <- TRUE xmsg <- paste0("***0123 CARG-",parmCode[1]," A column names in the ",parmCode[2], " call argument does not exist in the ",sDFName," data.frame:", wCName) warnCnt() warning(xmsg,call.=FALSE) } # end of name valid check. } else { # numeric value - (integer, numeric, or character format) - convert and check wColN <- as.integer(wCName) # convert to number if (is.na(wColN)) { # string did not convert to integer - Error. (unexpected since we validated character string first. ErrFnd <- TRUE xmsg <- paste0("***0125 CARG-",parmCode[1]," A column index number in the ",parmCode[2]," call argument did not convert from character to integer: ",wColN) warnCnt() warning(xmsg, call.=FALSE) } else { if (wColN < 0) { ErrFnd <- TRUE xmsg <- paste0("***0120 CARG-",parmCode[1]," A column index number in the ",parmCode[2], " call argument is a negative or zero: ",wColN) warnCnt() warning(xmsg, call.=FALSE) } else { if (wColN > len_wSDFNames) { ErrFnd <- TRUE xmsg <- paste0("***0121 CARG-",parmCode[1]," A column index number in the ",parmCode[2]," call argument is a greater than the number of columns in ",sDFName," data.frame: ", wColN) warnCnt() warning(xmsg, call.=FALSE) } else { res[ind] = wColN # save the column number } # end of range check. } # end of neg check. } # end of integer convert error check } # end of char vs numeric check } # end of NA vs other type check } # end of for loop } else { # invalid variable type ErrFnd <- TRUE xmsg <- paste0("***0122 CARG-",parmCode[1]," The call argument/parameter, ",parmCode[2]," is not a valid variable type. It must be a numeric or character type value.") warnCnt() warning(xmsg,call.=FALSE) } # end of type check. } # end of zero length check #cat("CheckParmColx Results:",paste0(res,collapse=", "),"\n") return(res) } # ### ### # # function CheckNum takes a vector or data.frame of numbers provided in the statsDFrame by the # user. It check to make sure they are numeric via "is.numeric" and a grep string comparison. # In the process, it checks for factors and converts them to character vectors. # Character vectors are scan to eliminate commas in numbers and verify the string is only # digits and decimal points. A list of Err and Dat is returned. # If an error is found, Err is set to TRUE, The cleaned up numeric vector is returned as Dat. # # Input: xd <- data column (from statsDFrame data.frame) # gName <- glyph Name (character) # pdVarNum <- pd variable (col1, col2, col3) number (integer) # pdColNum <- glyph Column Number (2 character) # pdVarName <- pd variable name (col1, col2, col3) # stColName <- stDF column reference # pdUsage <- brief usage description for error messages. # CheckNum <- function(xd, gName, pdVarNum, pdColNum, pdVarName, stColName, pdUsage) { # for error messages, the last digit of 7 and 8 is reserved for this check. ErrFnd <- FALSE xn <- formatC(pdVarNum,format="f",width=1,digits=0) #cat("CheckNum - gName:",gName," pdVarNum:",pdVarNum," pdColNum:", pdColNum," pdVarName:",pdVarName,"\n") #cat(" stColName:",stColName," pdUsage:",pdUsage," xn:",xn," length(xd):",length(xd),"\n") #cat(" xd:",paste0(xd,collapse=", "),"\n") if (length(xd) == 0) { # invalid vector - length = 0 -> no data ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02", xn, "D ", gName, " ", pdColNum, " The ", stColName, " data column in the ", sDFName, " data frame does not contain any data. Data vector length has length of zero. ", pdUsage) warning(xmsg, call.=FALSE) # can't process or check return NULL vector xdr <- xd # return short vector #print("zero length vector") } else { xdr <- rep(NA,length(xd)) # default results - vector of NAs. # have data to check # Convert factors to characters - this applies even if vector is numeric or character. # Normally only strings are saved as factors in data.frames, but a numeric vector can also # be converted to a factor. It then becomes a character value. # if it is a factor, we will eventually be headed down the character path. if (is.factor(xd)) { xd <- as.character(xd) # convert factors to characters #print("converted from factor to character") } # check for missing values in the vector # Check # 1 - all missing if (all(is.na(xd))) { warnCnt() ErrFnd <- TRUE # no data can be converted. ALL NA. could be all blanks. xmsg <- paste0("***02", xn, "A ", gName, " ", pdColNum, " The data provided in the ", stColName, " column of the ", sDFName, " data frame does not contain any numerical data. No rows will be drawn. ", pdUsage) warning(xmsg,call.=FALSE) # return all NA vector #print("all are NA") } else { # Check # 2 - one or more missing. if (any(is.na(xd))) { lenxd <- length(xd) seqxd <- seq(1,lenxd) BadSeqNum <- seqxd[is.na(xd)] # one or more entires are NA (missing) - This check should be done before manipulating the vectors. # check is primarily if user leaves entries missing, not is the translation to numeric leaves them NA. warnCnt() xmsg <- paste0("***02", xn, "B ", gName, " ", pdColNum, " The ", stColName, " data column in the ", sDFName, " data frame contains one or more missing values. "," Rows with missing values will not be drawn. ", pdUsage) warning(xmsg, call.=FALSE) xmsg <- paste0("***02", xn, "C ",gName," ",pdColNum," The rows with missing data are:",paste0(BadSeqNum,collapse=", ")) warning(xmsg, call.=FALSE) #print("one or more are NA") } # we may have missing values, but we can still check the vector. if (!is.numeric(xd)) { #print("not numeric") # no numeric - better be character type.. if (is.character(xd)) { #print("character") # its character (from factor or has always been character) # check character string for valid numerical format and allow for commas. Any NA values are passed through as NA in the results. x <- gregexpr("^[ \t]*[+-]?((([0-9]{1,3}[,])?([0-9]{3}[,])*[0-9]{3})|([0-9]*))?(([.][0-9]*)|)([eE][-+]?[0-9]+)?[ \t]*$",xd) # verify characters are all numeric (not scientific notation) # regexpr notes: # ^ - begin of string. # [ \t]* - any number of leading spaces or tabs. # [-+]? - optional at most one (sign) # ( - leading digits patterns # leading digits pattern 1 - leading numbers with commas - logic catches 1,000 and higher, 999 falls through to second pattern. # (([0-9]{1,3}[,])?([0-9]{3}[,])*[0-9]{3}) # ( - leading digits # [0-9]{1,3} - 1 to 3 digits (could be 1,2, but left 1,3 # [,] - comma # )? - leading 1,2,3 digits and comma, optional - no more than once. # ( - body 3 digits # [0-9]{3} - body 3 digit sets # [,] - comma # )* - zero or more times # ( - last section of digits # [0-9]{3} - body 3 digits # ) - one time # # or alternate pattern 2 - leading numbers without commas # # ( # [0-9]* - zero or more digits # ) # )? - leading digits are optional, but can happen just once # # section to handle option decimal point and following digits # # ([.][0-9]*) - decimal and digits # # or # # () - nothing. (maybe I could have used ? after the {[.][0-9]*) group # # section to handle possible scientific expression after decimal point and digits or nothing. # # ([eE][+-]?[0-9]*)? - optional scientific expression appendage # # [ \t]* - any number of trailing spaces or tabs # $ - end of string # # February 15-16, 2016 - improved reg-exp to handle: # a) leading + or - # b) commas in number - correct format. Needed to do this before # removed commas since an incorrect format could be handled. # c) redid how decimal point and following digits are handled. # d) added logic for scientific notation (e+10) # # This led to redoing the other validation coding since we had more # information on valid numbers. # xtf <- unlist(x) > 0 # get list of valid numbers in vector. (TRUE = good number / FALSE = bad number) # use this vector to only convert valid numbers. xd <- gsub(",","",xd) # eliminate commas in number xdr <- rep(NA,length(xd)) # default return value. # all checking for missing numbers has already been done. xdr[xtf] <- as.numeric(xd[xtf]) # only convert good formats. xtf2 <- !is.na(xdr[xtf]) # check the conversion and see if all were converted? if (any(xtf2)) { # something happened and a number we thought was good did not get converted to numeric. print("Internal Note - good numeric format did not get converted") print(paste0("Input :",paste0(xd[xtf], collapse=", "))) print(paste0("Output:",paste0(xdr[xtf],collapse=", "))) } } else { # not a numeric or character type vector ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02", xn, "9 ", gName, " ", pdColNum, " The ", stColName, " data column in the ", sDFName, " data frame is not a character or numeric vector. ", pdUsage) warning(xmsg, call.=FALSE) } # end of character/invalid } else { # numeric xdr <- xd } # end of not numeric } # end of all missing or process. } # end of vector length check return(list(Error=ErrFnd,Dat=xdr)) } # ### ### # # # Input: xd <- data column (from statsDFrame data.frame) # gName <- glyph Name (character) # pdVarNum <- pd variable (col1, col2, col3) number (integer) # pdColNum <- glyph Column Number (2 character) # pdVarName <- pd variable name (col1, col2, col3) # stColName <- stDF column reference # # Input: xd <- data column (from statsDFrame data.frame) # gName <- glyph Name (character) # pdColNum <- glyph Column Number (2 character) # pdVarName <- pd variable name (col1, col2, col3) # pdVarNum <- pd variable (col1, col2, col3) number (integer) # stColName <- stDF column reference # # x <- CheckNum2(xd, gName, pdColNum, pdVarName, pdVarNum, stColName) - retired. # #### # # Why was checknum2 created??? now CheckNum is much better. - This routine appears to be RETIRED> # #### CheckNum2 <- function(xd, gName, pdColNum, PDVarName, pdVarNum, stColName) { # for error messages, the last digit of 3 and 4 is reserved for this check. ErrFnd <- FALSE xn <- formatC(pdVarNum,format="f",digits=0,width=1) if (is.factor(xd)) { # numeric or character can be factored - must go to character to get to numeric. xd <- as.character(xd) } # check for missing values. if (is.na(xd)) { # one or more entires are NA (missing) warnCnt() xmsg <- paste0("***02",xn,"B ",gName," ",pdColNum," The ",stColName," data column in the ",sDFName," data frame contains one or more missing values. Rows with missing values will not be drawn.") warning(xmsg,call.=FALSE) } # Check for factors if (is.factor(xd)) { xd <- as.character(xd) } # if numeric vector - just return the vector. if (!is.numeric(xd)) { if (is.character(xd)) { # can it be translated without error to numeric. # determine if string is a valid number format. x <- gregexpr("^[ \t]*[+-]?((([0-9]{1,3}[,])?([0-9]{3}[,])*[0-9]{3})|([0-9]*))?(([.][0-9]*)|)([eE][-+]?[0-9]+)?[ \t]*$",xd) # verify characters are all numeric (not scientific notation) # eliminate commas from number text. xd <- gsub(",","",xd) # regexpr notes: # ^ - begin of string. # [ \t]* - any number of leading spaces or tabs. # [-+]? - optional at most one (sign). # [0-9]* - any number of digits # [.]? - optional at most one (decimal point) # [0-9]* - any number of digits # (...)? - option field at most once (scientic exponent) # ... => [e][-+][0-9]{1,2} # [ \t]* - any number of trailing spaces or tabs # $ - end of string # if (any(x < 0)) { # one of the values failed the numeric test. ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02",xn,"7 ",gName," ",pdColNum," The data provided in the ",stColName," column in the ",sDFName," data frame contains one or more non-numeric characters.") warning(xmsg,call.=FALSE) } else { # convert to numeric for the return. xd <- as.numeric(xd) areNA <- is.na(xd) if (all(areNA)) { ErrFnd <- TRUE # no data can be converted. ALL NA. could be all blanks. warnCnt() xmsg <- paste0("***02", xn, "A ", gName, " ", pdColNum, " The ", stColName, " data column in the ", sDFName, " data frame contains one or more missing values. "," Rows with missing values will not be drawn.") warning(xmsg, call.=FALSE) } else { if (any(areNA)) { warnCnt() xmsg <- paste0("***02", xn, "8 ", gName, " ", pdColNum, " The data provided in the ", stColName, " column in the ", sDFName, " data frame contains one or more entries have non-numeric characters.") warning(xmsg,call.=FALSE) } } } } else { # not a valid type of vector ErrFnd <- TRUE warnCnt() xmsg <- paste0("***02",xn,"9 ",gName," ",pdColNum," The ",stColName," data column in the ",sDFName," data frame is not a character or numeric vector.") warning(xmsg,call.=FALSE) } } return(list(Error=ErrFnd,Dat=xd)) } # ### ### # # function to verify the presents and type of data in a statsDFrame column. # CheckPDCol <- function(pdVarName, gName, stColNum, stColName, gColNum, pdVarNum, stMaxColNum, stDat, pdUsage) { # xr <- list(Err = FALSE, Dat = c(0)) xn <- formatC(pdVarNum,format="f",width=1,digits=0) # get last character (number of col1, 2, 3) pdColNum <- formatC(gColNum,format="f",width=2,digits=0,flag="0") wstname <- names(stDat) wstMax <- dim(stDat)[2] #cat("CheckPDCol-pdVarName:",pdVarName," gName:",gName," stColNum:",stColNum," stColName:",stColName," gColNum:",gColNum,"\n") #cat(" pdVarNum:",pdVarNum," stMaxColNum:", stMaxColNum," pdUsage:",pdUsage," xn:",xn," pdColNum:",pdColNum,"\n") #cat(" stDat:",paste0(stDat,collapse=", "),"\n") #cat(" wstname:",paste0(wstname,collapse=", "),"\n") #cat(" wstMax :",wstMax,"\n") if (is.na(match(pdVarName,PDUsed))) { # pdVarName is not present in the panelDesc data.frame variable lists. xr$Err <- TRUE warnCnt() xmsg <- paste0("***02",xn,",5 ", gName," ",pdColNum," The required panelDesc variable ", pdVarName, " is missing from the ", pDName, " data.frame. ", pdUsage) warning(xmsg, call.=FALSE) } if (stColNum == 0) { xr$Err <- TRUE # if stColNum is zero, then error message already generated. So signal error and stop. } if (!xr$Err) { # no error found yet.... if (is.na(stColNum)) { # missing stColName xr$Err <- TRUE warnCnt() xmsg <- paste0("***02", xn, "4 ", gName, " ", pdColNum, " There is no ",sDFName, " column was specified in ", pdVarName, " variable in the ", pDName, " panelDesc data.frame.", " A data column name/number is required. ", pdUsage) warning(xmsg, call.=FALSE) } else { xr <- CheckNum(stDat[,stColNum], gName, pdVarNum, pdColNum, pdVarName, stColName, pdUsage) # check and get the data in col"x" } } #print("CheckPDCol - Output") #print(xr) return(xr) } # ### ### # # function to verify the presents and type of data in a statsDFrame column. # Same as the CheckPDCol function, but without any CheckNum call to verify the data. # Used by ctrbar, segbar, normbar glyphs. They do a CheckNum on each column as they # pull the data. # CheckPDColnCN <- function(pdVarName, gName, stColNum, stColName, gColNum, pdVarNum, stMaxColNum, stDat, pdUsage) { xr <- list(Err = FALSE, Dat = c(0)) xn <- formatC(pdVarNum,format="f",width=1,digits=0) # get last character (number of col1, 2, 3) pdColNum <- formatC(gColNum,format="f",width=2,digits=0,flag="0") wstname <- names(stDat) wstMax <- dim(stDat)[2] # Can't create stColName - if not valid, stColNum was set to 0 if bad or NA if pdVarName variable vector was missing. # Check if the pdVarName exist in the panelDesc data.frame if (is.na(match(pdVarName, PDUsed))) { xr$Err <- TRUE warnCnt() xmsg <- paste0("***02",xn,",5 ", gName, " ", pdColNum, " The required panelDesc variable ", pdVarName, " is missing from the ", pDName, " data.frame. ", pdUsage) warning(xmsg, call.=FALSE) } if (!xr$Err) { # no error found yet.... # Check to see if statsDFrame column in the panelDesc variable was found to be valid by CheckColx function earlier. if (is.na(stColNum) || stColNum == 0) { # invalid name or column number in statsDFrame xr$Err <- TRUE warnCnt() xmsg <- paste0("***02",xn,"6 ", gName, " ", pdColNum, " The specified column name or number in ", pdVarName, " panelDesc variable (", stColName, ") does not exist in the for ", sDFName, " data frame or is out of range. ", pdUsage) warning(xmsg, call.=FALSE) } } return(xr) } # ### ### # # function to verify the presents and type of data in a statsDFrame column, Used with "col1" "col2", and "col3" panelDesc lists. # remember - convertion to numeric column number - done # verification of column names - done # NA value = means was not provided # 0 value = means invalid value or name provided and error message already generated. # col1, col2, col3 values not needed by a glyph are not checked or processed. # # xr <- CheckPDCol2(pdVarName, pdVarNum, pdColNum, gName, stColNum, stDat) ---- Retired # CheckPDCol2 <- function(pdVarName, pdVarNum, pdColNum, gName, stColNum, stDat, pdUsage) { # # PDVarName = "col1", "col2", or "col3" - character string name of the panelDesc list (field): # pdVarNum = numeric 1, 2, 3 for messages. # pdColNum = panelDesc glyph column number # gName = character string name of the calling glyph # stColNum = column number in statsDFrame data.frame # stDat = statsDFrame data.frame # # xr = result - xr$Err - error indicator # xr#Dat - statsDFrame data column # xr <- data.frame(Err = FALSE, Dat = c(0)) xn <- formatC(pdVarNum,format="f",digits=0,width=1) stColName <- wSFName[stColNum] # it's not a given that the colx was in the panelDesc data.frame # is the panelDesc variable list (col1, col2, etc.) exit in the panelDesc? if (is.na(match(pdVarName,PDUsed))) { # no - not present xr$Err <- TRUE xmsg <- paste0("***02", xn, "5 ", gName, " ", pdColNum, " The required panelDesc variable ", pdVarName, " is missing from the ", pDName, " data frame. ", pdUsage) warnCnt() warning(xmsg, call.=FALSE) } if (stColNum == 0) { xr$Err <- TRUE # if stColNum is zero, then error message already generated. So signal error and stop. } if (!xr$Err) { # no error found yet.... if (is.na(stColNum)) { # Required col value not provided. xr$Err <- TRUE xmsg <- paste0("***02", xn, "4 ", gName, " ", pdColNum, " There is no ", sDFName, " column was specified in ", pdVarName, " variable in the ", pDName, " panelDesc data.frame."," A data column name/number is required. ", pdUsage) warnCnt() warning(xmsg, call.=FALSE) } else { # stColNum is in range - already checked before and error message generated. xr <- CheckNum(stDat[,stColNum], gName, pdVarNum, pdColNum, pdVarName, stColName, pdUsage) } } } # ### ### # # ConvertDV - Converts original details variable list into the new by glyph variable list. # ConvertDV <- function(DV) { # This routine converts an old details variables structure into a new structure. # Each named list in panelDesc is the same length, but may or may not be used # by the glyph. # # Generate a list containing a list for each glyph column. The glyph list # contains all of the variable (named lists) for it operation. # This is organized vertically, instead of horizontally. # The glyph list need only contain the variables required/used for a glyph. # # Variables and table for Convertion of PD from old format to new format. # # DV is the details variable structure. a list of named lists. # # Return value is the "NewDV" with new variable names grouped by glyph name. # # #data(detailsVariables) # already loaded. # # For testing - load #DVFile <- "c:/projects/statnet/r code/micromapST/data/detailsVariables.rda" #load(DVFile) # loads detailsVariables structure glyphNames <- c("arrow", "bar","boxplot", "ctrbar", "dot", "dotsignif", "dotconf", "dotse", "id", "map", "mapcum", "mapmedian", "maptail", "normbar", "panel", "rank", "scatdot", "segbar", "system", "ts", "tsconf" ) initDVList <- function(glyphNames) { NewDV <- NULL NewDV <- list() for (iDx in seq_along(glyphNames)) { NewDV[[glyphNames[iDx]]] <- list() } return(NewDV) } DVTable <- detailsVariables DVTable$varName <- str_trim(DVTable$varName) DVTable$newVarName <- str_trim(DVTable$newVarName) # ErrFnd <- FALSE if (!is.list(DV)) { ErrFnd <- TRUE xmsg <- "convertDV - DV structure is not a list." warning(xmsg,call.=FALSE) } varsNum <- length(DV) # number of variables varsName <- names(DV) # names of variables #cat("varsNum :",varsNum,"\n") #cat("varsName:",paste0(varsName,"\n"),"\n") # NewDV <- initDVList(glyphNames) # initializes each glyph list to a list. for (ind in seq_along(varsName)) { # step through each variable name # validate value vName <- names(DV)[ind] # get name vValue <- DV[[ind]] # get value xIndex <- match(vName,DVTable$varName) #cat("vName:",vName," vValue:",vValue," xIndex:",xIndex,"\n") if (is.na(xIndex)) { xmsg <- paste0("variable: ",vName," not found in master variable list. Name is not valid, skipped") warning(xmsg,call.=FALSE) } else { varData <- DVTable[xIndex,] # get info to validate and translate #cat("validate-method:",varData$method," v1:",varData$v1," v2:",varData$v2,"\n") tag <- paste0(varName," variable") res <- switch(varData$method, "colors" = { is.Color(vValue) }, "numeric" = { if (is.numeric(vValue)) { (is.between(vValue,as.numeric(varData$v1),as.numeric(varData$v2))) } }, "integer" = { if (is.numeric(vValue)) { (is.between(as.integer(vValue),varData$v1,varData$v2)) } }, "lines" = { wS <- c('1','2','3','4','5','6','blank','solid','dashed','dotted','dotdash','longdash','twodash') wV <- as.character(vValue) xIdx <- match(wV,wS) !is.na(xIdx) }, "logical" = { is.logical(vValue) }, "match" = { wS <- eval(parse(text=varData$v1)) # must do this to build vector. wV <- as.character(vValue) xIdx <- match(wV,wS) !is.na(xIdx) }, "text" = { if (is.character(vValue)) { (is.between(nchar(vValue),as.integer(varData$v1),as.integer(varData$v2))) } }, "vectOf3" = { if (is.atomic(vValue)) { if (length(vValue) == 3) { (all(is.between(vValue,varData$v1,varData$v2))) } } }, { FALSE } ) # res has the validation results #cat("res:",res," typeof(res):", typeof(res)," ",class(res),"\n") if (!res) { xmsg <- paste0("***01N0 DETS The ",tag," does not have a valid value: ",vValue," Check type ",varData$method," used.") warning(xmsg) } else { # translate # replicate variable for each glyph that uses it. newVarN <- varData$newVarName #cat("usedBy:",varData$usedBy,"\n") GNList <- eval(parse(text=varData$usedBy)) # list of glyph that use this variable. # build the new variable for each glyph. for (jnd in seq_along(GNList)) { GName <- GNList[jnd] #cat("Added GN:",GName," / ",newVarN," = ",vValue,"\n") NewDV[[GName]][[newVarN]] <- vValue # add list with single element. } # end of jnd loop } # end of test results from validation. } # end of check for match variable name. #cat("Check next variable in list.\n") } # end of ind loop return(NewDV) } # end of ConvertDV function # ### ### # # How to convert old panelDesc structure to a new panelDesc structure # # "advanced" named list used to add new variables to the panelDesc instead # of keep adding named lists across all of the glyph columns. # # Old Structure: # # panelDesc # type = c( 1, 2, 3, 4, 5, 6, ...) # lab1 = c( 1, 2, 3, 4, 5, 6, ...) # lab2 = c( 1, 2, 3, 4, 5, 6, ...) # lab3 = c( 1, 2, 3, 4, 5, 6, ...) # col1 = c( 1, 2, 3, 4, 5, 6, ...) # col2 = c( 1, 2, 3, 4, 5, 6, ...) # col3 = c( 1, 2, 3, 4, 5, 6, ...) # colSize = c( 1, 2, 3, 4, 5, 6, ...) # lab4 = c( 1, 2, 3, 4, 5, 6, ...) # refText = c( 1, 2, 3, 4, 5, 6, ...) # refVal = c( 1, 2, 3, 4, 5, 6, ...) # panelData=c( 1, 2, 3, 4, 5, 6, ...) # # types: # "map" lab1, lab3 # "mapcum" lab1, lab3 # "mapmedian" lab1, lab3 # "maptail" lab1, lab3 # "id" lab1, lab3 # # "arrow" lab1, lab2, lab3, col1, col2, refText, refVal # "bar" lab1, lab2, lab3, col1, refText, refVal # "dot" lab1, lab2, lab3, col1, refText, refVal # "dotsignif" lab1, lab2, lab3, col1, col2, refText, refVal # "dotse" lab1, lab2, lab3, col1, col2, refText, refVal # "dotconf" lab1, lab2, lab3, col1, col2, col3, refText, refVal # "scatdot" lab1, lab2, lab3, col1, col2, refText, refVal # "rank" lab1, lab2, lab3, # "normbar" lab1, lab2, lab3, col1, col2, refText, refVal # "segbar" lab1, lab2, lab3, col1, col2, refText, refVal # "ctrbar" lab1, lab2, lab3, col1, col2, refText, refVal # "ts" lab1, lab2, lab3, panelData # "tsconf" lab1, lab2, lab3, panelData # "boxplot" lab1, lab2, lab3, panelData, refText, refVal # # ConvertPD <- function(PD) { # This routine converts an old panelDesc structure into a new structure. # Each named list in panelDesc is the same length, but may or may not be used # by the glyph. # # Generate a list containing a list for each glyph column. The glyph list # contains all of the variable (named lists) for it operation. # This is organized vertically, instead of horizontally. # The glyph list need only contain the variables required/used for a glyph. # # Variables and table for Convertion of PD from old format to new format. # PDFldDef <- c("type", "lab1", "lab2", "lab3", "col1", "col2", "col3", "colSize", "panelData","refTexts", "refVals", "rmin", "rmax", "adv" ) PDGlyphReq <- matrix(c( # glyph lab1, 2, 3, col1, 2, 3, colSize, panelData, refT, refV, rmin, rmax, adv c("map", TRUE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE), c("mapcum", TRUE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE), c("mapmedian",TRUE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE), c("maptail", TRUE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE), c("id", TRUE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE), c("arrow", TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE), c("bar", TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE), c("dot", TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE), c("dotsignif",TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE), c("dotse", TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE), c("dotconf", TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE), c("scatdot", TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE), c("rank", TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE), c("segbar", TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE), c("normbar", TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE), c("ctrbar", TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE), c("ts", TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE), c("tsconf", TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE), c("boxplot", TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE) ), ncol=11, byrow=TRUE) PDGlyphDF <- as.data.frame(PDGlyphReq,stringsAsFactors=FALSE) colnames(PDGlyphDF) <- PDFldDef # numVars <- dim(PD)[2] numMMCols <- dim(PD)[1] # wNames <- colnames(PDGlyphDF)[2:11] wPDNames <- colnames(PD) # #print(wNames) #print(wPDNames) # NewPD <- list() for (ind in c(1:numMMCols)) { # step through each column and convert vertically # step through by glyph column wType <- as.character(PD$type[ind]) # get glyph type for column wSel <- (PDGlyphDF$type == wType) wList <- as.logical(unlist(PDGlyphDF[wSel,c(2:11)])) # get associated usage row wNames2 <- wNames[wList] # get associated variable names gList <- list(type=wType) # initialize output list for column xVar <- " " for (jnd in c(1:length(wNames2))) { # step through possible variables varName <- wNames2[jnd] # get next variable name if (!is.na(match(varName,wPDNames))) { # PD variable is present in the panelDesc # build string for command and execute to get value of variable cmdStr1 <- paste0("xVar <- as.character(PD$",varName,"[",ind,"])") #print(cmdStr1) eval(parse(text=cmdStr1)) # get value of variable into xVar if ((!is.na(xVar)) && (xVar != "") && (xVar != "NA")) { # check to see if the value is "not present" # Only process if the variable contains something. if (varName == "adv") { # if the variable is "adv", then process a little differently # adv is a list of variables and values, must add it to gList. gList <- c(gList, xVar) # add adv list to the output list } else { # other variables, built string and set up variable in output # xVar is the value of varName, so create the variable and set the value cmdStr2 <- paste0("gList$",varName," <- xVar") #print(cmdStr2) eval(parse(text=cmdStr2)) } } # end of check for good data to convert (not NA, "", "NA") } # end of check if variable present } # end of jnd loop #str(gList) NewPD[[ind]] <- gList } # end if ind loop # return(NewPD) } # ### #### # # The panels work on scales user units. In some cases this is 0,1 on both axis. # In many cases, this is the rx and ry ranges for the graphs. # The axis and titles lines are outside of the plotting area and # most of the text is written with 'mtext' function that place the text using # the "line" offset from the plot area. In the case a line must be draw and # text must be written, this must be done with the line and text functions # under par(xpd=TRUE) using user units to position the text and line. # # The drawing of the refTexts and dotted line must use this approach. # ConvLineToUser <- function(iSide, iLine) { # iSide is the side of the plot to draw the text and line. # 1 = bottom, 2 = left, 3 = top, 4 = right margins # iLine is the line offset from the plotting area. # 0 = next to the plot box, 4 = 5th line away from the box. # # Returns the iLine position in user scales in units. # xpin <- par("pin") xusr <- par("usr") xmar <- par("mar") xmai <- par("mai") #printPar() if (iSide == 1 || iSide == 3) { # for top and bottom sides, get Y units per inch UnitsPerInch <- diff(xusr[3:4])/xpin[2] } else { # for left and right sides, get X units per inch UnitsPerInch <- diff(xusr[1:2])/xpin[1] } InchesPerUnit <- 1/UnitsPerInch #cat("iSide:",iSide," UnitsPerInch:",UnitsPerInch," InchesPerUnit:",InchesPerUnit,"\n") # side = below, left, above, right distZerou <- NULL distZerou <- switch(iSide, # 1 2 3 4 5 6 # usrZoffs d s s adjA adjL c(-xusr[3],-1,1,1, 0.25, 0), # bottom 1 # 5 was -0.25 changed to 0.25 c(-xusr[1],-1,2,2, -0.05, 0), # left 2 c( xusr[4], 1,1,3, -0.30, 0), # top 3 c( xusr[2], 1,2,4, 0.10, 0), # right 4 c( 0, 0,0,0, 0, 0) # null ) # item 1 => base value at edge line. # item 2 => sign + or - (add or subtract distance calculated) # item 3 => not used. # item 4 => mar and mai reference indexes. # item 5 => basic adjustment amount. (offset) #cat("distZerou:",distZerou,"\n") LinesPerInch <- xmar[distZerou[4]]/xmai[distZerou[4]] # below, left, above, right mar/mai -> "5" InchesPerLine <- 1/LinesPerInch # 1/5 = 0.2 inches per line. UnitsPerLine <- InchesPerLine * UnitsPerInch + distZerou[6] # adjust line height #cat("distZerou:",distZerou,"\n") #cat("LinesPerInch:",LinesPerInch," InchesPerLine:",InchesPerLine," UnitsPerLine:",UnitsPerLine,"\n") # if convert line to user scale Position in user scale # Line to user scale Pos conversion Posu <- distZerou[2] * ( ( ( iLine + distZerou[5] ) * UnitsPerLine ) + distZerou[1] ) # direction * ( ( ( line # + offset ) * Units per Line ) + unit offset (base axis line value.) ) #cat("iLine:",iLine," Posu:",Posu,"\n") return(Posu) } # ### ### # # Function used by mapping functions to draw the column titles for MapCum, # MapMedian, and MapTail. These titles have colored boxes preceeding # the titles. This function adds four blanks lead of the title as placeholders, # draws the text center, then overlays the boxes as required. # DrawBoxAndText <- function(wTxt, wTxt.cex, sq.width, sq.col, sq.border.col, yposl) { # # function to draw and center the glyphs column titles with a preceeding # colored box. Used by the MapMedian, MapTail, and MapCum mapping # functions. # # yposl = mtext line position - 0 on top edge to 3 lines??? # xps <- par("ps") xpin <- par("pin") xusr <- par("usr") xmar <- par("mar") xmai <- par("mai") #cat("xmai:",xmai," xmar:",xmar," xusr:",xusr," xpin:",xpin," xps:",xps,"\n") itouX <- diff(xusr[c(1,2)])/xpin[1] itouY <- diff(xusr[c(3,4)])/xpin[2] inchPerLine <- xmai[1]/xmar[1] # top lines -> inches per line. (line position to inches). sqSize <- sq.width * ( xps / 9 ) * wTxt.cex # scale size of square based on the point size of the font # may need to add logic to change number of leading blanks based on point size. wLeni <- strwidth(paste0(" ",wTxt),units="in", cex=wTxt.cex) #wLenu <- strwidth(paste0(" ",wTxt),units="us", cex=wTxt.cex) #cat("len i:", wLeni, " len u:",wLenu," ratio:",wLenu/wLeni,"\n") nStr1i <- (xpin[1]/2) - (wLeni/2) nStr1u <- nStr1i * itouX #wUseru <- diff(xusr[c(1,2)]) #nStr2u <- (wUseru/2) - (wLenu/2) #cat("nStr1u:",nStr1u," nStr2u:", nStr2u,"\n") yadji <- 0.045 # inches (subtracted) #xadji <- 0.10 xadji <- (1.25 ^ ( xps * wTxt.cex )) / 2100 # + ( 0.1 * 1/ScR) # inches if (xadji > 0.05) xadji = 0.05 # 0.08 at 28pt # 0.04 at 24pt # value of 0.04 at 20pt. # 0.025 at 16pt. # 0.01 at 14pt. # 0.005 at 12pt. # 0.005 at 10pt. # 0.001 at 9pt. # 0.001 at 8pt. # 0.001 at 6pt. # # Going to try --> ( 1.25 ^ ( Points * wTxt.cex ) ) / 2100 = xadji # box.xi <- c(0, 0, sqSize, sqSize, NA) + xadji box.yi <- c(0, sqSize, sqSize, 0, NA) + yadji # y baseline = line positiion * inchToLIne + height of plot area. yposi <- yposl * inchPerLine + xpin[2] # add base position and convert to units. box.yu <- ( ( box.yi + yposi ) * itouY ) # then convert to units box.xu <- ( ( box.xi + nStr1i ) * itouX ) #cat("yposl:",yposl," yposi",yposi, "\n") #cat("box.xu:", box.xu, "\n box.yu:", box.yu,"\n") # use text to print the string centered. # line one. (four blanks for box padding. May have to vary as font size changes. # write text (centered) mtext(paste0(" ",wTxt),line=yposl,side=3, cex=wTxt.cex) # pos = below centered. # draw square over the blanks in the title on the left. polygon(box.xu, box.yu, col=sq.col, border=sq.border.col) #polygon(bpx/xu, box.yu, col="black",density = 0) # draw borders if needed. } # ### ###### ## ## CleanXLabels - clean up set of labels ## ## ## # #CleanXLabels <- function(rx,atRx,nTicks) { # # ## expand range of x if needed. # rAtRx <- range(atRx) # #if(rAtRx[1] < rx[1]) rx[1] <- rAtRx[1] # #if(rAtRx[2] > rx[2]) rx[2] <- rAtRx[2] # # # get number of labels # lAtRx <- length(atRx) # #cat("CXL-lAtRx:",lAtRx," trim.\n") # # trim labels outside of data range. # # nT <- 7 # if (lAtRx <= nT) { # # Delete labels below actual data point # if((atRx[1] < rx[1]) & (atRx[1] != 0) ) atRx <- atRx[-1] # delete grid line below rx minimum # } # # Delete labels above actual data points # lAtRx <- length(atRx) # if (lAtRx <= nT) { # if((atRx[lAtRx] > rx[2]) & (atRx[lAtRx] != 0) ) atRx <- atRx[-lAtRx] # delete grid line above rx maximum # } # lAtRx <- length(atRx) # #cat("DrawXAxis s adjusted atRx:",paste0(atRx,collapes=", "),"\n") # # return(atRx) # } # ## ###### ##### # # CleanXLabels2 - # If greater than 3 labels - trims off any label point outside of the range of the data and not zero. # expands data range(rx) to cover remaining edge labels. # CleanXLabels2 <- function(rx, atRx) { lAtRx <- length(atRx) # length of atRx and number of labels. #cat("CXL2-lAtRx:",lAtRx," trim.\n") if (lAtRx > 3) { # if greater than 3 labels - large number of labels - trim labels that are out of range. # Check low end label if (atRx[1] < rx[1] & atRx[1] !=0 ) { atRx <- atRx[-1] # trim first value lAtRx <- length(atRx) } # Check high end label if (atRx[lAtRx] > rx[2] & atRx[lAtRx] != 0 ) { atRx <- atRx[-lAtRx] lAtRx <- length(atRx) } } # Extend data range based on labels and grid lines # Check low end data range vs. label if (atRx[1] < rx[1]) { # first label outside of data range. rx[1] <- atRx[1] # expand low end. } # Check high end data range vs. label if (atRx[lAtRx] > rx[2]) { # last label outside of data range. rx[2] <- atRx[lAtRx] # expand high end } #cat("After Extended - rx:",rx," atRx:",atRx,"\n") return(list(rx=rx,atRx=atRx)) } # ##### ##### # TestOverlap <- function(Acex, atLab, atRx, nSp) { lAtLab <- length(atLab) widthSp <- strwidth("0",cex=Acex,units="user") widthSpN <- widthSp * nSp #cat("TestOverlap-cex:",Acex," nSp:",nSp," widthSpN:",widthSpN," len(atLab):",lAtLab,"\n") widthAtLabH <- strwidth(atLab,cex=Acex,units="user")/2 SrtLab <- atRx - widthAtLabH EndLab <- atRx + widthAtLabH #cat("SrtLab:",SrtLab,"\n") #cat("EndLab:",EndLab,"\n") # number of labels 1 to n, so check space between 1-2, 2-3, ... , nm1-n OverLapFnd <- FALSE # Check to see if any labels would overlap each other based on width and grid point location. for (ind in c(1:(lAtLab-1)) ) { wX <- SrtLab[ind+1] - EndLab[ind] #cat("ind:",ind," wX:",wX,"\n") if (wX < widthSpN) { OverLapFnd <- TRUE } } #cat("OverLapFnd:",OverLapFnd,"\n") return(OverLapFnd) } # ##### ##### # # Test to see if labels overlap text from neighboring columns. TestLabAtEdge <- function(atLab,atRx,YAxisPad,rx,lineAxisSizes) { # function to test edges for possible shift. # returns atRx adjusted. xusr <- par("usr") xpin <- par("pin") xupi <- diff(xusr[1:2])/xpin[1] #cat(" TestLabAtEdge - xusr:",xusr," xpin:",xpin," xupi:",xupi,"\n") # width of each label. WidthOfLabs <- strwidth(atLab,cex=lineAxisSizes["Ax1"],units="user") # half of the width of each label HalfWidthOfLabs <- WidthOfLabs/2 # starting "x" position of each label SrtOfLabs <- atRx - HalfWidthOfLabs # ending "x" position of each label EndOfLabs <- atRx + HalfWidthOfLabs # number of labels. lAtLab <- length(atLab) # #cat("Label Specifcations: (width, half, srt, end)\n") #print(WidthOfLabs) #print(HalfWidthOfLabs) #print(SrtOfLabs) #print(EndOfLabs) # get 1/2 of the column sep gap (in units) wColSepGapHU <- (colSepGap/2)*xupi #cat("half of colSepGap in units:",wColSepGapHU,"\n") # Viable left edge of column (rx[1] - col sep gap) leftEdge <- rx[1] - wColSepGapHU # 1/2 col sep converted to units. # adjust left edge is Y Axis is present - have more room. if (YAxisPad) { # y Axis present - add standard 0.2 inches of padding. wYAGapHU <- (YAxis.width * xupi) leftEdge <- leftEdge - wYAGapHU #cat("wYAGapU:", wYAGapHU," added to leftEdge.\n") } # Viable right edge of column (rx[2] + col sep gap) rightEdge <- rx[2] + wColSepGapHU #cat("leftEdge:",leftEdge," rightEdge:",rightEdge," units.\n") #cat("atRx:",atRx," rx:",rx,"\n") # # Adjust first and last label point inward for apperance. # # Check overhangs of last column and this column. # pos values - have space (inches) # neg values - need space # sum < 0 - needed more space then available - problem - go do stagger # sum >=0 - had enough space - no problem. # wAtRx <- atRx lAtRx <- length(atRx) rAtRx <- range(atRx) WidthRx <- diff(rAtRx) edgeRxAdj <- (WidthRx / 1000) * XAxis.indent #cat("edgeRxAdj:",edgeRxAdj,"\n") # # Is not getting applied if staggered. Problem. # # # Adjustments label atRx to bring the first and last "atRx" points in a little. # if (SrtOfLabs[1] < leftEdge) { #cat("overlap left edge:", leftEdge - SrtOfLabs[1], " units\n") # Adjust both edge at points inwared by 1/1000 the range of labels * XAxis.indent(5) wAtRx[1] <- wAtRx[1] + edgeRxAdj # key adjustment move inward. SrtOfLabs[1] <- SrtOfLabs[1] + edgeRxAdj EndOfLabs[1] <- EndOfLabs[1] + edgeRxAdj #cat("adj - SrtOfLabs[1]:",SrtOfLabs[1]," EndOfLabs[1]:",EndOfLabs[1],"\n") } if (EndOfLabs[lAtRx] > rightEdge) { wAtRx[lAtRx] <- wAtRx[lAtRx] - edgeRxAdj # key adjustment EndOfLabs[lAtRx] <- EndOfLabs[lAtLab] - edgeRxAdj SrtOfLabs[lAtRx] <- SrtOfLabs[lAtLab] - edgeRxAdj #cat("adj - SrtOfLabs[lAtRx]:",SrtOfLabs[lAtRx]," EndOfLabs[lAtRx]:",EndOfLabs[lAtRx],"\n") } # add check to see if shift causses overlap with neighbor label. #cat("after 1st and last shift-rx:",rx," atRx:",wAtRx,"\n") #cat(" atLab:",atLab," axisSubTitle:",axisSubTitle,"\n") atRx <- wAtRx # update label points. Shift completed, if needed. # # Deal with overlap to over columns. ( see is overlap is happening ) # # Check for overlap with previous column. w1stLabOverU <- SrtOfLabs[1] - leftEdge # have number of units over the edge of the plot. w1stLabOverI <- (w1stLabOverU / xupi) # Convert units to inches of overhang. # if negative, then label is extended into next column # if positive or zero, then label is within column # add the values: if negative - OVERLAP. # if positive - space was available. # # TEST for overlap done outside of this routine, we just calculate the variable. # # Calculate the right edge overlap being used. Will use as lastLab2Space handoff to next column. # wLastLabOverU <- rightEdge - EndOfLabs[lAtRx] wLastLabOverI <- (wLastLabOverU / xupi) # if pos value - we have room. neg - we need room. return(list(atRx=atRx,w1stLabOverI=w1stLabOverI,wLastLabOverI=wLastLabOverI)) } ##### # # DrawXAxisAndTitles - This functions takes the rx range of the data and calculates the X axis labels and # grid line positions. Four methods are supported: # original ("o") - the original method of labeling best on panelInbound and pretty functions # extended ("e") - use of the extended algorithm and no panelInbound limiting. # wilkinson("w") - use the wilkinson algorithm. # scale ("s") - use of the extended algorithm and then scaling based on the largest value # and sub titling the scale used. (e.g., 100000 -> 10 in the ten thousands. # scale number ("sn") - use of the extended algorithm and then scaling each number and adding a suffix # to the number to indicate the scale used. (e.g., 10000 -> 10M) # # New Feature - lastLab2Space and lastLab3Space. this allows us to determine if the lab2 or lab3 lines on # maps and ids to axis on glyphs. # Process: # 1) if staggered, exit # 2) get width of axis first label. # 3) discount offset (indent) # 4) get amount of room for handover of label - space between plot and mid point. # 5) see if room for remainder in lastLab2Space (and lab3). If no room, # instigate staggerLab. # # Take into account, user's request for scaling and staggering first. # If they don't fit, warn user, do scaling first ("sn") and try again. If # unsuccessful, then force staggering of labels. # # Other options to consider: # Force Zero to be gridded. # Optional edge grid line labels. # Enlarge edge buffer space to handle labels. # Modification of "referred" number created by the expended algorithm. # # # titles may run into each other. # # The function also handles the staggering of labels if requested. # # Since the type of axis labeling impacts the lab1, lab2, lab3, and reftxt titles, this function also # handles the placement and drawing of the column titles and footnotes. # # Subdivide into X-Axis and Title processing. Let X-Axis find out how much space it needs, fill it # and pass to Titles, where to pick up the labeling. If no X-Axis, then a known spacing will be passed # to the titles. # # Basically start at 0 off the axis (panel) line. # Simple X-Axis is font "9" and takes up 1-line of space. # Staggered X-Axis is font "9" * 0.65(?) and takes up 1.5-lines of space. # Scaled with subtitel X-Axis is font "9" * 0.75 font and takes up 0.8 lines of space. # # Combinations are (on top of title labels (1 or 2): # # Simple --------------- 1 line (font = 9) = axis label(0.75) + space(0.25) = 1 # Staggered, Simple ---- 1.5 lines (font = 9) = axis small label(2*0.625) + space(0.125) + space(0.125) = 1.5 # Scaled with subtitle - 1.5 lines (font = 9) = axis small label(2*0.625) + space(0.125) + space(0.125) = 1.5 # Stag. Scaled --------- 2.0 lines (font = 9) = axis small label(3*0.625) + space(2*0.125) + space(0.125)= 2.25 # one or two labels -- 2.0 lines # # So header can range from 1 line (no X-Axis) to 2 lines X-Axis with 1 label or 3 lines X-Axis and 2 labels # to a complex X-Axis > 1 to 2.05 lines plue the 1 or 2 lines of title. # # Need space for 1 to 4.05 lines with gaps. # # The same applies to the bottom labels. Lab3 is a title, and refText is the other title. # # Other discussion: Indenting edge labels. # 1) get length of labels # 2) determine how much room is available from edge to next inner label (length of that label and position.) # 3) How much to move to position inside box (or at least no further then 0.05" over the edge?) # 4) Is staggering label requested or required. # If labels fit, staggered may need to be turned off. # 5) If size of labels (all) do not fit, will staggering help? # 6) How to keep key values like "0" always labeled? What does the Axis algorithm use to omit labels. # # DrawXAxisAndTitles <- function(j, panels, rx, ry, reftxt, refval, leftPad=TRUE, rightPad=TRUE, YAxisPad=FALSE ) { ##### Start of Scaling and alternate labeling algorithms # # parameters needed: rx, ry, j, panels, reftxt, refval, XAxis=TRUE # # globals: Title.Line.X.pos set of variables. # axisMethod # Text.cex # staggerLab # staggered # lab1 # lab2 # lab3 # refTexts # refVals # # functions: Scaler1, Scaler2, extended, panelSelect, panelScale, warning # # must initially select panel to start getting widths and heights for scaling. #cat("\n\nEntered DrawXAxisAndTitle function","\n") #cat("DX01-panels and j:\n") #print(panels) #cat("i:",1," j:",j," rx:",rx," ry:",ry,"\n") panelSelect(panels, 1, j) # select panel x <- panelScale(rx, ry) # set scale for panel based on rx, ry xpin <- par("pin") xusr <- par("usr") xupi <- diff(xusr[1:2])/xpin[1] xps <- par("ps") staggering <- staggerLab # default is the user request. May be changed if needed. #cat("DXAT-start staggered:",staggered," staggerLab:",staggerLab," staggering:",staggering,"\n") #cat("Initial rx :",rx,"\n") # range adjustment xcxy <- par("cxy") # must be in the scale of the panel to get the right character width. #cat("xcxy:", xcxy," usr :",xusr," pin :",xpin," upi :",xupi," ps:",xps,"\n") xcxyMod <- xcxy[1]/4 # assume dot is least then the width of a average character. Use 1/4 for spacing. #cat("xcxyMod:",xcxyMod,"\n") if (leftPad) rx[1] <- rx[1] - xcxyMod if (rightPad) rx[2] <- rx[2] + xcxyMod #cat("Adjustment made for dot size - new rx:",rx,"\n") # reset scaling based on new rx range. x <- panelScale(rx, ry) # re-scale panel # get new values for user and units per inch xusr <- par("usr") xupi <- diff(xusr[1:2])/xpin[1] #cat("After dot re-scaling - usr :",xusr," pin :",xpin," upi :",xupi,"\n") par(xpd=T) # turn off clipping for panel # based on the axis method - prepare the atRx tick points and the atLab vector of labels # # Setup axis variables and get the axis "at" points and labels. # ##### ##### # # Scan possible labels and save heights. Lab1, Lab2, Lab3, refTexts # Check axis scaling and staggered and setup Axis1, Axis2 and subTitles # adjust Labx heights and spacings if required.l # Spaces and Heights are the constants, not the positions. We set them here. # # This also makes it simplier to have generic code further on. # # This will be coded to automatically: # labels at 3 points lower (25%) reduction of given point size. # Axis Large at 1 point lower than labels. (about 11.11%) # ability to reduce Axis Large by 1 pt for sizing of labels. (another 11.11%) # Axis Small (stagger) at 2 point lower than large labels. (22.22% below labels.) # # axisSubTitle <- "" # starts empty. # everything is based on a starting pointsize of 12. atLabcex <- Text.cex # Text.cex # 0.75 of 12 pt, -> 0.75 % (9 pt.) #cat("atLabcex:",atLabcex,"\n") # # Build elements to construct header and footer title and axis label positions. # xps <- par("ps") # current point size. ippt <- 1/72 # inches per point lppt <- 1/xps # line per point at par("ps") value (default = 12 pt. for 12 pt per line) ipl <- xps * ippt # points * inches per point at par("ps") -> inches per line. #cat("Step size 1 pt:",ippt," in. ",ippt*xupi," usr - lppt:",lppt," pt/line. \n") # 12pt * 0.75 -> 9pt, 18pt * 0.75 -> 13.5pt, 24pt * 0.75 -> 18 point lineNLabSize <- Text.cex # par("ps") - 3 points # 0.75 -> 0.75 % 1 line (0.75% of point size) (9 pt) lineNSpLabSize <- lineNLabSize * XAxis.Sp.mcex # PS * 15% # 0.75 * 0.2 -> 0.15 -> 20% of title line (1.8 pt) axisNLabSize <- lineNLabSize - (lppt) # - 1 pt delta / alternate -> XAxis.S.mcex = 0.666667 # 0.75 (9pt) - 1 pt -> 0.6667 % 89% line (8 pt) axisMLabSize <- lineNLabSize - (2 * lppt ) # - 2 pt delta # 0.75 (9pt) - 2 pt -> 0.5833 % 78% line (7 pt) axisSLabSize <- lineNLabSize - (3 * lppt ) # - 3 pt delta # 0.75 (9pt) - 3 pt -> 0.5 % 66.7% line (6 pt) axisLowestSize <- lineNLabSize - (4 * lppt ) # - 4 pt delta (lowest limit.) (5 pt) # 0.75 (9pt) - 4 pt -> 0.4167 % 55.5% line (5 pt) axisSubTSize <- axisSLabSize lineSSpLabSize <- lineNSpLabSize * 0.5 # 0.15 * 0.5 -> 0.075 % 10% line # calculations are dynamic - using ratios and percentages. # # Two labels and Axis = 0.66667 + 0.15 + 0.75 + 0.75 => 2.316667 + line height. # Axis Stag, Title, two labels = 0.5 + 0.5 + 0.075 + 0.75 + 0.75 -> 2.575 + line height. # must have at least 3.25 lines available. # #cat("lineNLabSize :",lineNLabSize,"\n") #cat("axisNLabSize :",axisNLabSize,"\n") #cat("axisMLabSize :",axisMLabSize,"\n") #cat("axisSLabSize :",axisSLabSize,"\n") #cat("axisSubTSize :",axisSubTSize,"\n") #cat("\n") #cat("lineNSpLabSize:",lineNSpLabSize,"\n") #cat("lineSSpLabSize:",lineSSpLabSize,"\n") #cat("\n") #cat("lineNLabSize-ps:",lineNLabSize*xps,"\n") #cat("axisNLabSize-ps:",axisNLabSize*xps,"\n") #cat("axisMLabSize-ps:",axisMLabSize*xps,"\n") #cat("axisSLabSize-ps:",axisSLabSize*xps,"\n") #cat("axisSubTSize-ps:",axisSubTSize*xps,"\n") #cat("\n") xusr <- par("usr") xupi <- diff(xusr[1:2])/xpin[1] #cat("A-usr:",xusr," xupi:",xupi,"\n") # # Work pattern, list for which to draw and where # lineTopSizes <- c(0, 0) # Lab2 Lab1 lineBotSizes <- c(0, 0) # Lab3 refText lineAxisSizes <- c(0, 0, 0, 0, 0) # Ax2 Ax1 SP AST`SP names(lineAxisSizes) <- c("Ax2","Ax1","SPT","AST","SP") # Axis spacing names(lineTopSizes) <- c("L2","L1") names(lineBotSizes) <- c("L3","L4") lineDo <- c( F, F, F, F, F, F, F, F, F) names(lineDo) <- c("Ax2","Ax1","SPT","AST","SP","L2","L1","L3","L4") xAxisDo <- FALSE xAxisDoOverlap <- TRUE lineMultiT <- c(1, 0.9, 0.9, 0.9, 1, 1, 1, 1) # size multiplier for proper spacing. names(lineMultiT) <- c("srt","Ax2","Ax1","SPT","AST","SP","L2", "L1") lineMultiB <- c(1, 0.9, 0.9, 0.9, 1, 1, 1, 1) # size multiplier for proper spacing. names(lineMultiB) <- c("srt","Ax2","Ax1","SPT","AST","SP","L3", "L4") # as of 8/17/16, we always print double axis labels to get them all printed. # atLab1 and atLab2 with atRx1 and atRx2 are created as the two halfs of the labels. # Set indicators if title/labels are present. if (lab1[j] != "") { lineDo["L1"] <- TRUE lineTopSizes["L1"] <- lineNLabSize } if (lab2[j] != "") { lineDo["L2"] <- TRUE lineTopSizes["L2"] <- lineNLabSize } if (lab3[j] != "") { lineDo["L3"] <- TRUE lineBotSizes["L3"] <- lineNLabSize } if (!is.na(reftxt)) { if (reftxt != "" || reftxt != " ") { lineDo["L4"] <- TRUE lineBotSizes["L4"] <- lineNLabSize } } # test to see if we have an axis to label. rx is not null. if (!is.null(rx)) { # X axis range present # initialize - we will have at least 1 X Axis line. - minimum setup. xAxisDo <- TRUE lineDo["Ax1"] <- TRUE # X Axis labels # 1 lineAxisSizes["Ax1"] <- axisNLabSize lineDo["Ax2"] <- TRUE # X Axis labels # 2 lineAxisSizes["Ax2"] <- 0 # zero to allow the overlap. lineDo["SP"] <- TRUE # Add spacing between title and X Axis. lineAxisSizes["SP"] <- lineNSpLabSize } # Use lineAxisSizes["Ax2"] to allow overlaying of Ax1 and Ax2 and use lineAxisSizes["Ax1"] # as the cex/font size for both Ax1 and Ax2 lines. # # if scales to TextCex = 0.7 then all times cex. = 4 * 0.7 => 2.8 lines of margin. # therefore, must have space for 3 mcex=1 height lines. # ######### # # Processing XAxis and rx. # Generate axis labels, scale and subtitle as required. # Results may be - single XAxis labels or XAxis labels with subtitle #cat("axisMethod:",axisMethod, " rx:",rx,"\n") if (axisMethod < 1 || axisMethod > 5) { #cat("***01X1 CARG-AX The Value for axisMethod internal variable is out of range 1-5 : ",axisMethod,"\n") axisMethod <- 4 } ### # # methods: # 1 = "o" use pretty to generate labels (original method), no scaling of labels. # 2 = "s" scale full range of numbers, # 3 = "sn" scale each number in label list. # 4 = "e" use extended labeling method. # 5 = "w" use wilkinson method # 6 = "a" automatics - evalute number, range, possible results of labeling calls, # edge number requiredments, range containing zero - and pick best set of tools. # (Future not coded - using "4" code. # # Future - add automatic - look at spacing and do scaling if required # auto scale to be done. # look at edges and do edge labels if required # # make sure zero is seen # wilkinson an extended handle # do staggered if edges overlap. # implemented # check for overlap with map or id column. # ID done # # Rules: # a) 3.4 labels per inch # b) number of labels must be at least 3. # c) request odd number of labels 3, 5, 7, 9 (expect no more than 9 labels on 2.5") # d) if number of labels > 3, trim labels not within rx data range, except zero value. # e) if panel width < 0.5, trim first and/or last labels if not within data range and zero # f) Never trim Zero. # ErrFnd <- FALSE # note errors #DoNarCol <- FALSE # indicate we are in the "narrow" column situtation. #cat("start of label fitting\n") #cat("lastLab2Space:",lastLab2Space,"\n") #cat("par('pin') :",par('pin'),"\n") #cat("par('usr') :",par('usr'),"\n") #cat("xupi :",xupi,"\n") ### # # estimated number of labels for glyph and make it an odd number. # reqNumLabels <- ((( xpin[1] * XAxis.nGridpIn ) %/% 2) * 2) + 1 # average 3.4 ticks/grid per inch made an odd number # average of 3.4 per inch * width in inches of panel. # force a minimum of 3 labels. if (reqNumLabels < 3) reqNumLabels <- 3 #cat("Start-reqNumLabels:", reqNumLabels," width in:",xpin[1]," XAxis.nGridlIn:",XAxis.nGridpIn,"\n") #cat("rx :",rx," axisMethod:",axisMethod,"\n") #cat("setup - colSepGap:",colSepGap," staggered:",staggered,"\n") # get sequence of possible number of labels listNumLabels <- seq(reqNumLabels, 3,by=-2) if (axisMethod==1) listNumLabels <- c(reqNumLabels) # method 1 does not use # of labels seed. # # main loop to find a set of X Axis labels that will file the space for the range. # # The major steps are repeated until a fit is found. # for (numLabels in listNumLabels) { #cat("Loop Start:",numLabels,"\n") #cat("lineAxisSizes:\n") #print(lineAxisSizes) #cat("lineDo :\n") #print(lineDo) ##### start of big loop to get solution between font size and number of labels. # Step 1 - generate list of labels for the requested number of labels. # do requested label generation and scaling. # Label Generation: o = panelInbounds, e = extended. # Scaling Methods : None, Scale range, Scale individual number. switch (axisMethod, { # method 1 - pretty - the "original" "o" #cat("Method 1-atRx:",atRx,"\n") # get reference points. atRx <- panelInbounds(rx) # list of ticks within range of x. (n=5 with triming) # pretty does n=5 by default. res <- CleanXLabels2(rx, atRx) atRx <- res$atRx rx <- res$rx # convert to character. atLab <- as.character(atRx) }, { # method 2 - scale range with subtitle "s" # scaling range - may have subtitle to include #cat("Method 2-atRx:",atRx,"\n") # get reference points atRx <- extended(rx[1],rx[2],numLabels) res <- CleanXLabels2(rx, atRx) atRx <- res$atRx rx <- res$rx # get Scaler1 results on max. atLabVc <- Scaler1(rx[2]) # get divisor and subtitle based on max value #cat("atLabVc:",atLabVc,"\n") axisSubTitle <- atLabVc[2] # get sub-title (if any)[2] [1] multiplier # scale the values into the character string. atLab <- formatC(atRx / as.numeric(atLabVc[1]), format="f", digits=2, drop0trailing=TRUE) if (axisSubTitle != "") { # add sub-title to header #cat("Add - axisSubTitle:",axisSubTitle,"\n") # Make adjustments # Scale each number (S) # Add subtitle and spacer at small axis size (Norm to Med - 1 pt) lineAxisSizes["AST"] <- axisMLabSize lineDo["AST"] <- TRUE lineAxisSizes["SPT"] <- lineSSpLabSize # use 1/2 of axis to titles spacing. lineDo["SPT"] <- TRUE # reduce size of axis labels lineAxisSizes["Ax1"] <- axisMLabSize lineDo["Ax1"] <- TRUE lineAxisSizes["Ax2"] <- 0 # no staggering yet - Ax1 and Ax2 on same line. lineDo["Ax2"] <- TRUE # include spacing between title and axis. lineAxisSizes["SP"] <- lineNSpLabSize # normal spacing because we have not staggered, yet. lineDo["SP"] <- TRUE lineMultiB["SP"] <- 2.25 # need a fudge on the Bottom. } }, { # method 3 - scale numbers with suffix "sn" # no subtitle will be added. #cat("Method 3-atRx:",atRx,"\n") atRx <- extended(rx[1],rx[2], numLabels) res <- CleanXLabels2(rx, atRx) atRx <- res$atRx rx <- res$rx atLab <- sapply(atRx, Scaler2) # scale the X axis labels. Scaler2 does label formating for each value. }, { # method 4 - extended algorithm (no scaling) "e" # no scaling - no subtitles # replaced wilkinson algorithm with extended - better behaved in number of labels generated vs. request. #cat("Method4 - extended rx:",rx," numLabels:",numLabels,"\n") atRx <- extended(rx[1],rx[2],numLabels) res <- CleanXLabels2(rx, atRx) atRx <- res$atRx rx <- res$rx atLab <- as.character(atRx) }, { # method 5 - wilkinson algorithm (no scaling) "w" # no scaling - no subtitles # replaced wilkinson algorithm with extended - better behaved in number of labels generated vs. request. #cat("Method5 - wilkinson rx:",rx," numLabels:",numLabels,"\n") atRx <- wilkinson(rx[1],rx[2],numLabels) res <- CleanXLabels2(rx, atRx) atRx <- res$atRx rx <- res$rx atLab <- as.character(atRx) }, { # method 6 - placeholder for automatic scaling, edge numbers, and staggering of labels. "e" # for now same as 4 # Future Coding - place holder. # # Do each scaling and see which creates the smallest set of labels. # Which way to do: 1) number of characters, 2) strwidth each summed, # 3) concat. labels with 1, 2 spaces? # #cat("Method6 - extended rx:",rx," numLabels:",numLabels,"\n") atRx0 <- panelInbounds(rx) # list of ticks within range of x. (n=5 with triming) res <- CleanXLabels2(rx, atRx0) atRx0 <- res$atRx rx0 <- res$rx atLab0 <- as.character(atRx0) atRx1 <- extended(rx[1],rx[2],numLabels) res <- CleanXLabels2(rx, atRx1) atRx1 <- res$atRx rx1 <- res$rx atLab1 <- as.character(atRx1) atLabVc <- Scaler1(rx1[2]) # get divisor and subtitle based on max value axisSubTitle <- atLabVc[2] # get sub-title (if any) #cat("atLabVc:",atLabVc,"\n") atLab2 <- formatC(atRx1 / as.numeric(atLabVc[1]), format="f", digits=2, drop0trailing=TRUE) atLab3 <- sapply(atRx1, Scaler2) stop }, { # default call #cat("axisMethod value unknown:",axisMethod,"\n") ErrFnd <- TRUE stopCnt() xmsg <- paste0("***0490 DMP Error in axisMethod set to ",axisMethod," in DrawXAxisAndTitles. ERROR. Default used.") stop(xmsg,call.=FALSE) atRx <- c(0,1) } ) # #cat("Method executed\n") #cat("atRx :",atRx ,"\n") #cat("atLab:",atLab,"\n") #cat("rx :",rx,"\n") #print(lineAxisSizes) #print(lineDo) #### Labels selected and Scaling done. # # Step 2 - Split the labels into two overlaping vectors. # and initialize for finding fit. # lAtRx <- length(atRx) rAtRx <- range(atRx) # get first and last label values lAtLab <- length(atLab) #cat("lAtRx:",lAtRx," rAtRx:",rAtRx," lAtLab:",lAtLab," rx:",rx,"\n") #cat(" par(usr):",par('usr')," par(pin):",par('pin')," xupi:",xupi,"\n") #cat("staggered :",staggered," staggerLab:",staggerLab," staggering:",staggering,"\n") FndFit <- FALSE MakeStag <- FALSE # # at this point we have: # title1 (opt) # title2 (opt) (but title 1 or title 2 must be present) # subtitle (optional) # axis # 1 & 2 (both used to overlay axis label plotting. # # Adjust the first and last atRx values to move number inward a little. # atLab1 <- atLab[seq(1,length(atLab),by=2)] atLab2 <- atLab[seq(2,length(atLab),by=2)] atRx1 <- atRx[seq(1,length(atRx),by=2)] atRx2 <- atRx[seq(2,length(atRx),by=2)] #cat("Split label list\n") #cat("atLab1:", atLab1 ,"\n") #cat("atRx1 :", atRx1 ,"\n") #cat("atLab2:", atLab2 ,"\n") #cat("atRx2 :", atRx2 ,"\n") # # test to see how axis may draw the labels. # if they will not fit our calculations, then must likely # will be dropped by R's axis function. We are trying to out # guess R. # # Test fitting of single line axis (if not staggerLab) at Normal, -1pt, and -2pt # font sizes. Then test stagger labels at Normal and -1 pt font size. # If these don't work = punt and let the main loop try few labels. # # # Step 3 - Test single line style, if staggerLab not requested by caller. # if (!staggering) { # labels will not be stagger - by us or caller - at least not yet - so check single line format. #cat("NOT STAGGERING - Single Line Style Test\n") # check the fit of the axis labels, adjust as needed up to a point. wX <- lineAxisSizes["Ax1"] # original font size res <- TestOverlap(wX, atLab, atRx, 1) # space between must be 1 space. #cat("test1 - ces=wX:",wX," res:",res,"\n") # check X Axis fit as full non-staggered labels. if (!res) { #cat("full axis no staggered at font - OK - wX:",wX,"\n") # leave parameters as set. FndFit <- TRUE } else { # did not fit single line normal point size. wX <- wX - lppt # back up 1 point # orig font - 1 pt res <- TestOverlap(wX,atLab, atRx, 1) #cat("test2 - ces=wX:",wX," res:",res,"\n") if (!res) { # Good solution - update axis parameters lineAxisSizes["Ax1"] <- wX FndFit <- TRUE } else { # did not fit single line normal-1pt size. wX <- wX - lppt # back up 2 points # orig font - 2 pt res <- TestOverlap(wX,atLab,atRx, 1) #cat("test3 - ces=wX:",wX," res:",res,"\n") if (!res) { # Good Solution - 2 point. - update parameters lineAxisSizes["Ax1"] <- wX FndFit <- TRUE } else { # will not fit as single line axis labels. FndFit <- FALSE } } } # Note: if single line fits, it's still drawn as two overlapping label sets. } # end of single line checks. # # Step 4 - if not fit as single or staggerLab requested, test a staggered label style # if (!FndFit) { # no fit found for single line (or it was bypassed), do staggerLab style. #cat("Testing staggering style\n") # find longest staggered label list. wX <- lineAxisSizes["Ax1"] # remember this is already small. lAtLab1 <- nchar(paste0(atLab1,collapse=" ")) # space added between each label lAtLab2 <- nchar(paste0(atLab2,collapse=" ")) # space added between each label # Find the longest label set to use for test based on characters. if (lAtLab1 > lAtLab2) { wAtLab <- atLab1 wAtRx <- atRx1 } else { wAtLab <- atLab2 wAtRx <- atRx2 } # wAtLab is the longest set of labels based on character count. lwAtLab <- length(wAtLab) #cat("Longest of labels - wAtLab:",wAtLab," lwAtLab:",lwAtLab," axisLowestSize:",axisLowestSize,"\n") FndFit <- FALSE res <- TestOverlap(wX, wAtLab, wAtRx, 2) #cat("testS1 - cex=wX:",wX," 2 space res:",res,"\n") if (!res) { # Good should fit using standard height and staggered #cat("Initial values are good - keep them:",wX," Fit found\n") MakeStag <- TRUE FndFit <- TRUE } else { # no fit - try one small font wX <- wX - lppt # reduce size 1 point. if (wX > axisLowestSize) { # if bigger then smallest permitted. continue. # test labels and cex res <- TestOverlap(wX, wAtLab, wAtRx, 2) #cat("test s2 - cex=wX:",wX," 2 space res:",res,"\n") if (!res) { # good fit at small font. lineAxisSizes["Ax1"] <- wX #cat("fit found at ",wX,"\n") MakeStag <- TRUE FndFit <- TRUE } else { wX <- wX - lppt # reduce size 1 point. if (wX > axisLowestSize) { # if bigger then smallest permitted. continue. # test labels and cex - 2 pts. res <- TestOverlap(wX, wAtLab, wAtRx, 2) #cat("test s3 - cex=wX:",wX," 2 space res:",res,"\n") if (!res) { # goo fit at smaller font lineAxisSizes["Ax1"] <- wX #cat("fit found at ",wX,"\n") MakeStag <- TRUE FndFit <- TRUE } } } } } } if (FndFit) break # if have solution - stop looping. # if not fit, try reducing number of labels. #cat("End of Single and Staggered - FndFit:",FndFit," numLabels:",numLabels," len(atRx):",length(atRx),"\n") } # end of for loop on number of labels. # # Checking is done. Have fit or not. # #cat("exit numLabels loop\n") ##### end of loop - have solution??? if (!FndFit) { # no solution found???? cat("no XAxis labels fit found!!!\n") MakeStag <- TRUE } #cat("end of numLabels loop - FndFit:",FndFit,"\n") #cat("atLab1:",atLab1,"\n") #cat("atRx1 :",atRx1, "\n") #cat("atLab2:",atLab2,"\n") #cat("atRx2 :",atRx2, "\n") #cat("lineDo:\n") #print(lineDo) #cat("lineAxisSizes:\n") #print(lineAxisSizes) #cat("lineTopSizes:\n") #print(lineTopSizes) #cat("lineBotSizes:\n") #print(lineBotSizes) #cat("staggering:", staggering," staggered:",staggered," MakeStag:",MakeStag,"\n") #cat("start of edge checking - lastLab2Space:",lastLab2Space,"\n") ##### # # issues with labels - if label/grid near the edge - label hangs over the edge to next column. # solutions: a) move edge labels inward. labels like 0 may not need to be moved. # b) enforce staggered, so next columns number is on a different level. # c) delete edge label (if > 3 labels) # ##### # # Step 5 - check edge labels to see if indenting them will help. # # # Have to sets of labels atLab1 and atLab2... # # Situations: # rx[1] = edge (always), no atRx is outside this value. # atRx[1] - rx[1], is units from edge to grid for label # colSepGap can also be used as working space. # #cat("par('usr'):",par("usr"),"\n") #cat("par('pin'):",par("pin"),"\n") #cat("atRx :",atRx,"\n") #cat("atRx1 :",atRx1,"\n") res <- TestLabAtEdge(atLab1,atRx1,YAxisPad,rx,lineAxisSizes) # get results. w1stLabOverI <- res$w1stLabOverI wLastLabOverI <- res$wLastLabOverI atRx1 <- res$atRx #cat("1-res$atRx:",res$atRx," $1st:",res$w1stLabOverI," $Last:",res$wLastLabOverI,"\n") #cat("atRx2 :",atRx2,"\n") res <- TestLabAtEdge(atLab2,atRx2,YAxisPad,rx,lineAxisSizes) # get results. #cat("2-res$atRx:",res$atRx," $1st:",res$w1stLabOverI," $Last:",res$wLastLabOverI,"\n") atRx2 <- res$atRx if (res$w1stLabOverI < w1stLabOverI) { w1stLabOverI <- res$w1stLabOverI } if (res$wLastLabOverI < wLastLabOverI) { wLastLabOverI <- res$wLastLabOverI } #cat("results -> w1st:",w1stLabOverI," in. wLast:",wLastLabOverI," in.\n") #cat("lastLab2Space :",lastLab2Space," in.\n") # check the column overlap: xW <- strwidth("0",cex=lineAxisSizes["Ax1"],units="inch") # get size of a digit in inches. xW <- xW * XAxis.gapPC # 75% of the width. #cat("sum column overlap:",(w1stLabOverI+lastLab2Space)," in. Size Digit:",xW," in.\n") if ((w1stLabOverI + lastLab2Space) <= xW ) { # overlap condition. Force staggered. #cat("Lab2 text overlapping between columns - MakeStag set to TRUE\n") MakeStag <- TRUE # set staggering active flag. (column request.) } # lastLab2Space is the number of inches the left column has intruded into our column. #cat("lastLab2Space:",lastLab2Space," last column: + need space, - has space. lab 2 row.\n") # lastLab2Space < 0, last column needs space from us. # If sum(lastLab2Space,w1stLabOverI) => 0 there is room. # < 0 not enough room - overlap issue. # # lastLab2Space =>0, last column has space for us. # if sum(lastLab2Space,w1stLabOverI) => 0 there is room. # < 0 not enough room - overlap issue. # lastLab2Space <<- wLastLabOverI #cat("Setting lastLab2Space:",lastLab2Space,"\n") #cat("lastLab3Space:",lastLab3Space,"\n") #cat("staggering:", staggering," staggered:",staggered," MakeStag:",MakeStag,"\n") # # Step 6 - if staggered was requested or found to be the solution, set up all parameters. # if (MakeStag) { # take the two label sets and make a staggered XAxis #cat("MakeStag = TRUE - Modifying vector to do staggered.\n") # Adjust the sizes of font and spaces between lines for staggered style. # put in right order for neighboring column # check status of last column - staggered = TRUE, ended HIGH, = FALSE, ended LOW. #cat("Last Column position - staggered:",staggered,"\n") if (!staggered) { # staggered = FALSE (no stagger or ended low.) start high. # last column had no stagger, no stagger done, or ends in low position. # move to start in high position. # get updated information. # # No change. # #s1 <- seq(1,lAtLab,by=2) #s2 <- seq(2,lAtLab,by=2) # start high (ax1) #cat("HIGH position, keep labels in same order - 1st value LOW - atLab1.\n") } else { # start low #s1 <- seq(2,lAtLab,by=2) #s2 <- seq(1,lAtLab,by=2) # switch them wAtLab <- atLab1 atLab1 <- atLab2 atLab2 <- wAtLab wAtRx <- atRx1 atRx1 <- atRx2 atRx2 <- wAtRx #cat("LOW position, swap labels - 1st value HIGH - atLab2.\n") } #cat("lineAxisSizes:\n") #print(lineAxisSizes) #if (lineAxisSizes["Ax1"] == axisNLabSize ) { # # # change is not modified previously. # lineAxisSizes["Ax1"] <- axisSLabSize # set new height for axis # 1 #} lineDo["Ax1"] <- TRUE # enable # Change line size same as Ax1 - it may have been reduced. lineAxisSizes["Ax2"] <- lineAxisSizes["Ax1"] lineDo["Ax2"] <- TRUE # enable # If subtitle, change it's size and spacing. if (lineDo["AST"]) { # if subtitle present from before. # Scale Subtitle is present with staggered. lineAxisSizes["AST"] <- lineAxisSizes["Ax1"] # reduce title to axis line spacing # set new subtitle height lineAxisSizes["SPT"] <- lineSSpLabSize # set new subtitle space height } if (length(atRx1) != length(atRx2)) { # if not the same length the pattern is AVAVA or VAVAV pattern. # in the AVAVA case, staggered must have been FALSE to start high. # in the VAVAV case, staggered must have been TRUE to start low. # in either case, reverse staggered staggered <<- xor(staggered, TRUE) } else { # same length pair - AVAV or VAVA pattern. Leave staggered set the same. } # reduce spacing between titles and axis labels/subtitles. lineAxisSizes["SP"] <- lineSSpLabSize # reduce space to labels/titles # set new title to axis space height. lineMultiB["SP"] <- 2.25 # fudge on bottom. #cat("Make Staggered - settings - lineAxisSizes:\n") #print(lineAxisSizes) #cat("lineDo:\n") #print(lineDo) } # ##### # # # # Process StaggerLab option. # # # split up the labels for overlapping. # # #cat("Done with methods - on to plotting.\n") # ## now this is already done. just need to change font sizes. # ## if label staggering requested - add space for line NOW! ## don't know by whom!!! ## ## staggerLab set by package call parameter (user) ## staggering set by internal code to force staggering for this column. ## # ##cat("staggerLab:",staggerLab," staggering:",staggering," staggered:",staggered,"\n") # #if (staggerLab) { staggering <- TRUE } # ## atLab1, atLab2, atRx1, atRx2 already setup for the overlaid axis, with sequence normal 1... and 2.... # #if (staggering) { # # Check to make sure we have 2 or more labels. # # if (length(atLab) > 1) { # # # Can only stagger labels if more than one. Code should not let this happen now. # # #cat("Process staggering request from user or axis label processing - staggerLab:", staggerLab," staggering:",staggering,"\n") # # we have enough labels ( > 1 ) to stagger. # # # We have already split the labels up into atLab1 and atLab2 for the overlap printing. # # The only think to do to stagger the drawing is to change the # # spacing of the lines (especially "Ax2"). # # # # we are doing staggered axis labels (2 lines) # # # Staggered Labels - setup # # #cat("adjusting label sizes for staggering with two axis rows. Axis/SubT Size set to small.\n") # # # Change line size. # if (lineAxisSizes["Ax1"] == axisNLabSize ) { # lineAxisSizes["Ax1"] <- axisSLabSize # set new height for axis # 1 # } # lineDo["Ax1"] <- TRUE # enable # # lineAxisSizes["Ax2"] <- lineAxisSizes["Ax1"] # add axis # 2 (will cause two rows.) # lineDo["Ax2"] <- TRUE # enable # # # If subtitle, change it's size and spacing. # if (lineDo["AST"]) { # if subtitle present from before. # # Scale Subtitle is present with staggered. # lineAxisSizes["AST"] <- lineAxisSizes["Ax1"] # reduce title to axis line spacing # set new subtitle height # lineAxisSizes["SPT"] <- lineSSpLabSize # set new subtitle space height # } # # # reduce spacing between titles and axis labels/subtitles. # # lineAxisSizes["SP"] <- lineSSpLabSize # reduce space to labels/titles # set new title to axis space height. # lineMultiB["SP"] <- 2.25 # fudge on bottom. # # #cat("Stagger settings - lineAxisSizes:\n") # #print(lineAxisSizes) # # # # # Done setting up labels and dual axis with sizes and spaces # # # # # Now handle the order (high and low.) # # # # Check what happened in the last column to see where to start the staggering. # # # # staggered = false (def) - last column was low (also no staggered may have happened.) # # if staggering = false, ignore leave along, not doing stagger (not in this code) # # = true, process (go high and process last label. # # # # true - last column was staggered and ended HIGH. # # if staggering = false, ignore set staggered <- FALSE (not in this code) # # = true, process (go low and process last label. # # # # # # # we are set to start low. # # if (!staggered) { # staggered = FALSE (no stagger or ended low.) start high. # # last column had no stagger, no stagger done, or ends in low position. # # move to start in high position. # # get updated information. # s1 <- seq(1,length(atLab),by=2) # s2 <- seq(2,length(atLab),by=2) # # start high (ax1) # } else { # # start low # s1 <- seq(2,length(atLab),by=2) # s2 <- seq(1,length(atLab),by=2) # } # atLab1 <- atLab[s1] # atLab2 <- atLab[s2] # atRx1 <- atRx[s1] # atRx2 <- atRx[s2] # lAtRx1 <- length(atRx1) # lAtRx2 <- length(atRx2) # # if (lAtRx1 != lAtRx2) { # # if not the same length the pattern is AVAVA or VAVAV pattern. # # in the AVAVA case, staggered must have been FALSE to start high. # # in the VAVAV case, staggered must have been TRUE to start low. # # in either case, reverse staggered # staggered <<- xor(staggered, TRUE) # # } else { # # same length pair - AVAV or VAVA pattern. Leave staggered set the same. # } # } #} # #cat("Staggered - atRx1:",atRx1," atRx2:",atRx2," len(1):",length(atRx1)," len(2):",length(atRx2),"\n") #cat("staggering:",staggering," staggered:",staggered," MakeStag:",MakeStag,"\n") # ##### #cat("atLab1:",atLab1,"\n") #cat("atLab2:",atLab2,"\n") #cat("atRx1 :",atRx1 ,"\n") #cat("atRx2 :",atRx2,"\n") #cat("\n") #cat("lineAxisSizes:\n") #print(lineAxisSizes) #cat("lineTopSizes:\n") #print(lineTopSizes) #cat("lineBotSizes:\n") #print(lineBotSizes) # # Step 7 - finish setting up the top and bottom labels. # # Top margin titles/axis lineSizesT <- c(0,lineAxisSizes,lineTopSizes) # combine axis and top titles spacings names(lineSizesT) <- c("N","Ax2","Ax1","SPT","AST","SP","L2","L1") lineSizesTM <- lineSizesT * lineMultiT #cat("lineSizesT&TM:\n") #print(lineSizesT) #print(lineSizesTM) # calculate the positions of each and add offset. linePosT <- cumsum(lineSizesTM) + 0.01 # get line position of each element names(linePosT) <- c("Ax2","Ax1", "SPT", "AST", "SP", "L2", "L1") #cat("linePosT:\n") #print(linePosT) # if overlaped but not staggered, linePosT "Ax1" and "Ax2" should be the same. # ##### ##### # # Bottom margin titles/axis # lineSizesB <- c(0,lineAxisSizes,lineBotSizes) # combine axis and bottom title spacings. names(lineSizesB) <- c("N","Ax2","Ax1","SPT","AST","SP","L3","L4") lineSizesBM <- lineSizesB * lineMultiB #cat("lineSizesB&BM:\n") #print(lineSizesB) #print(lineSizesBM) # calculate the positions of each and add offset. linePosB <- cumsum(lineSizesBM) + 0.01 # get line position of each elements names(linePosB) <- c("Ax2","Ax1", "SPT", "AST", "SP", "L3", "L4") #cat("linePosB:\n") #print(linePosB) titleLab3 <- linePosB["L3"] # make any adjustments in the trailer code. titleLab4 <- linePosB["L4"] #cat("lineDo:\n") #print(lineDo) # ##### ###### ## ## Calculate the lastLab2Space to hand off to next column. ## ## if not staggered Labs - then calculate the space left # #LabLastOverU <- rightEdge - EndOfLabs[lAtRx] #LabLastOverI <- LabLastOverU / xupi #lastLab2Space <<- LabLastOverI ##cat("Setting lastLab2Space & LabLastOverI",lastLab2Space,"\n") # ## if pos value - we have room. neg - we need room. # # End of Xaxis processing. # ######## ######## # # Column Headers - printing # # Note: mgp(a,b,c) - a => position for axis labels, b,c => position for axis values and line, # in mcex values. def = c(3,1,0) # # # Select panel and re-scale - 1st panel (top) to do title/labels and axis labels #cat("DX02-column headers printing - rx:",rx," ry:",ry," i:",1," j:",j,"\n") panelSelect(panels,1,j) x <- panelScale(rx,ry) par(xpd=T) # print in margin space above panel 1 of column. # # column titles # if (lineDo["L1"]) mtext(lab1[j],side=3, line=linePosT["L1"], cex=lineTopSizes["L1"]) if (lineDo["L2"]) mtext(lab2[j], side=3, line=linePosT["L2"], cex=lineTopSizes["L2"]) # # axis sub-title # if (lineDo["AST"]) { mtext(axisSubTitle, side=3, line=linePosT["AST"], cex=lineAxisSizes["AST"]) # line 2 or 3 } # # column top axis(es) # if (lineDo["Ax1"]) { # line 1 or 2 (above axis # 2) #cat("Top-axis calls - atLab1:",atLab1," atRx1:",atRx1,"\n") #cat(" mgp:linePosT['Ax1']:",linePosT["Ax1"],"\n", # " lineAxisSizes['Ax1']:",lineAxisSizes["Ax1"],"\n") axis(side=3, tick=F, at=atRx1, labels=atLab1, mgp=c(3.2,linePosT["Ax1"],0), cex.axis=lineSizesT["Ax1"] ) } if (lineDo["Ax2"]) { # line 1 #cat("Top-axis calls - atLab2:",atLab2," atRx2:",atRx2,"\n") #cat(" mgp:linePosT['Ax2']:",linePosT["Ax2"],"\n", # " lineAxisSizes['Ax1']:",lineAxisSizes["Ax1"],"\n") axis(side=3, tick=F, at=atRx2, labels=atLab2, mgp=c(3.2,linePosT["Ax2"],0), cex.axis=lineAxisSizes["Ax1"]) # this is not an error, Ax2 is always printed the same size as Ax1 } # # ######## end of column header ##### # # Column Trailers # # Select and Scale to bottom panel in column #cat("DX03-trailer column headers - numGrps-i:",numGrps," j:",j," numGrps:",numGrps," rx:",rx," ry:",ry,"\n") panelSelect(panels,numGrps,j) x <- panelScale(rx,ry) par(xpd=T) # print in margin space below bottom panel # padj in axis needed to make grid line label close ##### # # Adjustment values to make bottom labels have the same space as the top labels. # old method. # #botLAdj <- -0.05 # label height adjustment (title/reftext) #botAdj <- -lineSizesB["Ax1"] # ->> (-0.666667 or -0.5 ) (axis row height in lines) #botAxisAdj <- - 0.2 #-lineSizesB["Ax1"] * 0.3333 # - 0.05 # 1/2 * 0.8 of row height in lines #botAxisBase <- Title.Line.2x.pos - ( ( 1 - lineSizesB["Ax1"] ) * 0.6333333 ) #cat("bottom title/labels-botLAdj:",botLAdj,"\n") #cat(" botAdj:",botAdj,"\n") #cat(" botAxisAdj:",botAxisAdj,"\n") #cat(" botAxisBase:",botAxisBase,"\n") # # ##### ##### # # new bottom margin line adjustment algorithm # desiredCex <- lineAxisSizes["Ax1"] xPs <- par("ps") # get current system base point size being used. Everything is based on this value. xHPsLU <- strheight("00000",cex=1,units="user") xHDesPsLU <- strheight("00000",cex=desiredCex,units="user") xDifHU <- xHPsLU - xHDesPsLU # different between system line and our line xBotAdj <- xDifHU / xHPsLU # ratio of dif (not used) and full line. % percent of line. botAxisBase <- 0.15 - xBotAdj # in lines. botAxisBAdj <- botAxisBase # + 0.05 botLAdj <- 0.05 #cat("New Bottom - botAxisBase:",botAxisBase," botAxisBAdj:",botAxisBAdj," botLAdj:",botLAdj,"\n") # column bottom axis lines if (lineDo["Ax1"]) { #cat("Bot-axis #1 - linePosB['Ax1']:",linePosB["Ax1"],"\n", # " lineAxisSizes['Ax1']:",lineAxisSizes["Ax1"],"\n", # " botAxisBase:",botAxisBase,"\n", # " botAxisBAdj:",botAxisBAdj,"\n") #cat(" atRx1:",atRx1," atLab1:",atLab1,"\n") axis(side=1, tick=F, at=atRx1, labels=atLab1, line=botAxisBAdj, mgp=c(3.2, linePosB["Ax1"],0), cex.axis=lineAxisSizes["Ax1"]) } if (lineDo["Ax2"]) { #cat("Bot-axis #2 - linePosB['Ax2']:",linePosB["Ax2"],"\n", # " lineSizesB['Ax1']:",lineSizesB["Ax1"],"\n", # #" botAxisAdj:",botAxisAdj,"\n") # " botAxisBase:",botAxisBase,"\n") #cat(" atRx2:",atRx2," atLab2:",atLab2,"\n") axis(side=1, tick=F, at=atRx2, labels=atLab2, line=botAxisBAdj, mgp=c(3.2, linePosB["Ax2"],0), cex.axis=lineAxisSizes["Ax1"]) } # if axis sub-title if (lineDo["AST"]) { wAST <- linePosB["AST"] + botAxisBAdj #cat("BotAST - linePosB['AST']:",linePosB["AST"],"\n", # " lineAxisSizes['AST']:",lineAxisSizes["AST"],"\n", # #" botAxisAdj:",botAxisAdj,"\n") # " botAxisBase:",botAxisBase,"\n") #cat(" line=wAST:",wAST,"\n") mtext(axisSubTitle, side=1, line = wAST, cex=lineAxisSizes["AST"]) } # ______Bottom Label/Title - Lab3 ______ # bottom of column footnote (title) if (lineDo["L3"]) { titleLab3 <- linePosB["L3"] + botAxisBase - 0.05 #cat("BotAxis # 3 - linePosB['L3']:",linePosB["L3"],"\n", # " botAxisBAdj:", botAxisBAdj, "\n", # " botAxisBase:", botAxisBase, "\n", # #" botLAdj :",botLAdj,"\n", # " line=titleLab3:",titleLab3,"\n") mtext(side=1,lab3[j], line=titleLab3, cex=lineBotSizes["L3"]) # bottom labels. } # _______Reference Value Legend titleLab4 <- linePosB["L4"] + botAxisBase #cat("reftxt:",reftxt," refval:",refval," lineDo[L4]:",lineDo["L4"],"\n") #cat("BotAxis # 4 (reftext) - linePosB['L4']:",linePosB["L4"],"\n", # " botAxisBase:",botAxisBase,"\n", # #" botAxisAdj:",botAxisAdj,"\n", # #" botLAdj:",botLAdj,"\n", # " line=titleLab4:",titleLab4,"\n") # Handle special needs of the reftext and it's line. if (!is.na(refval)) { if (is.between.r(refval,rx)) { # refval must be in the range of the data. Otherwize - ignore. if (!is.na(reftxt) ) { # Get y pos in user units to draw the line and text. # select panel done before this call. xpin <- par("pin") # distances in inches xusr <- par("usr") # distances in user scale (points) xmar <- par("mar") xmai <- par("mai") #fpc <- 0.95 # fudge adjustment #cat("xpin:",xpin," xusr:",xusr," xmar:",xmar," xmai:",xmai,"\n") # # Calculate X positions for the line and text in the margin. (units=user) # xCenterU <- mean(xusr[1:2]) # center of the glyph column xWidthU <- diff(xusr[1:2]) # unit width of glyph column => diff(rx) - user units xUnitsPerInch <- xWidthU/xpin[1] # units / inch for x xHalfInchU <- xUnitsPerInch * 0.5 #* fpc # 1/2" of units #cat(" center of glyph-xCenterU:",xCenterU, "\n", # " width of glyph -xWidthU :",xWidthU, "\n", # " xUnitsPerInch :",xUnitsPerInch,"\n", # " xHalfInchU :",xHalfInchU,"\n") # # line length will be whats left after taking away room for text or 1/2 inch # xTxt <- str_trim(reftxt) # get refText and trim side blanks. # length of texts in units xTxtLenU <- strwidth(xTxt,units="user", cex=lineSizesB["L4"]) #* fpc # length text # xHalfFreeLenU <- ((xWidthU - xTxtLenU) / 2 ) #* fpc # half space left for line #cat("xTxtLenU:", xTxtLenU," half free avail-xHalfFreeLenU:",xHalfFreeLenU,"\n") xLineLenU <- xHalfFreeLenU # see if room for half inche line, if not use shorter value. if (xLineLenU > xHalfInchU) xLineLenU <- xHalfInchU # get length of line to 1/2 inch # calculate start of line. xLineStartu <- xCenterU - (xLineLenU + xTxtLenU) / 2 # center - half (text length + line length) xTxtStartu <- xLineStartu + xLineLenU #cat("xLineStartu:",xLineStartu," xTxtStartu:",xTxtStartu,"\n") # # Calculate the Y positions for the line and text in the margin for the refText. # line needs units=users, text needs "lines" # xTitleLab4 <- titleLab4 # + botLAdj # Text Line offset from Axis line. #cat("ConvLineToUser call-xTitleLab4:",xTitleLab4,"\n") yTextPosu <- ConvLineToUser(1, xTitleLab4) # position text position in user units. yTextHu <- strheight(xTxt, units="user", cex=lineSizesB["L4"]) # find height of text in user units. #cat("yTextPosu:",yTextPosu," yTextHu:",yTextHu,"\n") # position of line based on Text position(user) - 60% of the text height. yLinePosu <- yTextPosu - (yTextHu * 0.6) # lines y coord. is 1/2 text height toward plot. #cat("Y Position for L4 - line(u):",yLinePosu," text(u):",yTextPosu," text(l) xTitleLab4:",xTitleLab4," titleLab4:",titleLab4,"\n") #cat("xTitleLab4:",xTitleLab4," titleLab4:", titleLab4,"\n", # " linePosB['L4'] :",linePosB["L4"],"\n", # " lineSizesB['L4']:",lineSizesB["L4"],"\n", # " botAxisAdj :",botAxisAdj," botLAdj:",botLAdj,"\n") # way to find graphic length of string --> sw <- strwidth(reftxt,cex=Text.cex) # add text definition for legend. (5/21/13 - added color to line) # draw line. lines(c(xLineStartu, xTxtStartu), rep(yLinePosu, 2), lty=Ref.Val.lty, lwd=Ref.Val.lwd, col=iRef.Val.col) # draw length line up to 1/2 inch. # mtext does not let you set the X position of the text, so the old text function must be used with x, y coordinates. text(xTxtStartu, y=yLinePosu, labels=xTxt, cex=lineBotSizes["L4"], col=iRef.Text.col, offset=0, adj=c(0,NA)) # text starting at line end. #cat("Line%Start:", xLineStartu/xWidthu, " Txt%Start:",xTxtStartu/xWidthu," titleLab4:", titleLab4,"\n") } } } # ##### end of trailer #cat("Returned staggered:",staggered,"\n") return(list(atRx=atRx, rx=rx, ry=ry)) # return the set of tick points for grid lines. } # ### ### # # MapDrawer # MapDrawer <- function(wAreaVisBorders, wL2VisBorders, wRegVisBorders, wL3VisBorders, WorkList) { # # # wLen <- dim(WorkList)[1] # get number of entries for (ind in c(1:wLen)) { wEntry <- WorkList[ind,] if (wEntry$Borders==1) { # L2 borders # Map background - Layer 2 borders (regional areas (US -> states)) polygon(wL2VisBorders$x, wL2VisBorders$y, density=-1, col=wEntry$Fill.col, border=FALSE) polygon(wL2VisBorders$x, wL2VisBorders$y, density=0, col=wEntry$Line.col, lwd=wEntry$Line.lwd) } if (wEntry$Borders==2) { # L1 colors polygon(wAreaVisBorders$x,wAreaVisBorders$y, density=-1, col=wEntry$Fill.col, border=FALSE) } if (wEntry$Borders==3) { # L1 borders # setup each group of sub-areas and draw polygons. # Not Referenced sub-areas wVisBorders <- wAreaVisBorders[wEntry$Selected,] polygon(wVisBorders$x,wVisBorders$y, density=0, col= wEntry$Line.col, lwd=wEntry$Line.lwd) } if (wEntry$Borders==4) { # L3 borders # Outline Country area (total area). polygon(wL3VisBorders$x, wL3VisBorders$y, density=0, col=wEntry$Line.col, lwd=wEntry$Line.lwd) # outside US boundary } } } # ### ### # # MapPolySetup function - used by all areaMap glyphs to process the panel dimensions # and adjust the x and y ranges and scales for the particular map used. # # MapPolySetup <- function(mapType,wPanels,wAreaVisBorders,wL2VisBorders,wRegVisBorders, wL3VisBorders, DL3) { # entire area.. (what if subset is used.) # all but L3VisBorders if (DL3) { # all sets of boundaries rxpoly <- range(wL3VisBorders$x,wRegVisBorders$x,wL2VisBorders$x,wAreaVisBorders$x,na.rm=TRUE) rypoly <- range(wL3VisBorders$y,wRegVisBorders$y,wL2VisBorders$y,wAreaVisBorders$y,na.rm=TRUE) } else { rxpoly <- range(wRegVisBorders$x,wL2VisBorders$x,wAreaVisBorders$x,na.rm=TRUE) rypoly <- range(wRegVisBorders$y,wL2VisBorders$y,wAreaVisBorders$y,na.rm=TRUE) } rxadj <- diff(rxpoly) * 0.02 # adjust x by + and - 2% of the size of the range rxVadj <- c(-rxadj,rxadj) rxpoly <- rxpoly + rxVadj ryadj <- diff(rypoly) * 0.05 # adjust y by + and - 5% of the size of the range. ryVadj <- c(-ryadj,ryadj) rypoly <- rypoly + ryVadj yxA <- diff(rypoly) / diff(rxpoly) # calculated aspect from MAP information. #cat("Map yxAspect:", yxA, "\n") #print(paste0("rxpoly:",paste0(rxpoly,collapse=" ")," rypoly:",paste0(rypoly,collapse=" "))) # aspect ratio is y / x... # size of space in panel = panelW <- diff(wPanels$coltabs[j+1,]) panelH <- diff(wPanels$rowtabs[2,]) # grap first row as model - All should be the same except median row #cat("Panel W:",panelW," H:",panelH,"\n") #cat("banner.max:",banner.max[mapType,"width"],"\n") rxDiff <- diff(rxpoly) ryDiff <- diff(rypoly) rxpoly2 <- rxpoly rypoly2 <- rypoly # # Adjust rx and ry - rule: NEVER NEVER decrease rx or ry. # if map Aspect (y/x) is lower then panel (h/w) then # example: 90/150 = 0.6 and 0.78/1.117 -> 0.698 # 150 <> 90 * 1.117 / 0.78 # map in this space is about 104/150 map will be taller then it should be. # increase y range # if map Aspect (y/x) is high than panel (h/w) then # example: 90/150 = 0.6 and 0.66/1.117 -> 0.59 # map in this space is about 88/150 map will be wider then it should be # increase x range. # # One assumption is that the original panel width and height were laid out # to accomodate the minimum/maximum height, aspect ratio, and title lengths. # # This is to adjust to fit the space. # Objective: # ryDiff panelH # --------- = -------- --> rxDiff =? ryDiff * panelW / panelH # rxDiff panelW # wfx <- ryDiff * panelW / panelH if (wfx > rxDiff) { # change rx (expand) wfxd <- abs(wfx - rxDiff) vfx <- c(-wfxd/2,wfxd/2) rxpoly2 <- rxpoly + vfx } else { # change ry (expand) wfy <- rxDiff * panelH / panelW wfyd <- abs(wfy - ryDiff) # change needed. vfy <- c(-wfyd/2, wfyd/2) rypoly2 <- rypoly + vfy } #cat("rxpoly2:",rxpoly2," rypoly2:",rypoly2,"\n") return(list(rxpoly2=rxpoly2, rypoly2=rypoly2)) } # ### ### # # Function to split numeric X,Y coordinate vectors based on NA. # # Return is a list of parts of the original vector up to the NA. # MMVSplit <- function(wX,Brks) { #print(Brks) wXa <- wX wXa[Brks] <- NA wXs <- split(wXa, cumsum(Brks)) # split up vector into smaller vectors in list wXz <- sapply(wXs, function(x) x[!is.na(x)]) # remove NAs #print(wXz) return(wXz) } # ### ### # # printPanelsParms - prints the associated parameter in creating a panel. # printPanelParms <- function(t) { print(t) cat("numGrps:",numGrps,"\n") cat("numCol :",numCol,"\n") cat("topMar :",topMar,"\n") cat("botMar :",botMar,"\n") cat("rowSize:",paste0(rowSize,collapse=" "),"\n") cat("rowSep :",paste0(rowSep,collapse=" "),"\n") cat("colSize:",paste0(colSize,collapse=" "),"\n") cat("colWidths",paste0(colWidths,collapse=" "),"\n") cat("colSep :",paste0(colSep,collapse=" "),"\n") cat("rSizeMx:",rowMaxH,"\n") cat("rSizeMn:",rowMinH,"\n") cat("rSizeMaj:",rowSizeMaj,"\n") cat("rMapCol:",PDMapCol,"\n") cat("\n") } # ### ### # #_________ function to pattern match alias names # SeerToAbbr <- function(xR,aNAI) { # x --> a vector of the registry names from SeerStat output ErrFnd <- FALSE wReg <- toupper(xR) wAbbr <- rep(NA,length(wReg)) xout1 <- sapply(c(1:length(aNAI$Alias)), function (x) grep(aNAI$Alias[x], wReg, ignore.case=TRUE)) xout1a <- unlist(xout1) xout2 <- !is.na(lapply(xout1, function(x) ifelse(length(x)==0,NA,x))) xout3 <- unlist( lapply( xout1, function(x) { if(length(x[])>1) { x } else { NA } } ) ) if (any(!is.na(xout3))) { xout4 <- paste0(xout3[!is.na(xout3)], collapse=" ") xmsg <- paste0("***MST-30 Registries in the data have duplicate name in rows:",xout4, " Only one row per area is permitted.\n") ErrFnd <- TRUE stopCnt() stop(xmsg, call.=FALSE) } if (!ErrFnd) { # continue wAbbr[xout1a] <- aNAI$Abbr[xout2] } return(wAbbr) # return list of abbreviates or NA if no match. } # ### ### # # function to calculate and return scaling variable - ksc # # based on the value of xke => 1 to 5. # UPDATE to pass real height, and handle 1 to 6 properly. This code assumes height used for 5. # SetKsc <- function(xke) { C13 <- 0.33333 if (xke == 1) { wKsc <- 1 } else { wKsc <- (xke + C13)/(5 + C13) # scale value for the "y" element of the bar to keep uniformity } return(wKsc) } # ### # # #### end of micromap functions (glyphs and micromapST internal functions) # # ########################### ########################### ########################### #print("micromapST functions loaded") ################################################################################ # # # Continue startup - verification code. # # ################################################################################ ################################################################################ # # Call Argument validation # # # Previously Checked: # # bordDir # bordGrp # # load border group # # Start setting up .GlobalEnv variables. # # # 1) statsDFrame -> present # 2) panelDesc -> present # # 3) statsDFrame -> get column names and number of columns; # 4) statsDFrame & rowColName -> locate row names for later linking. # 5) Compare row names and name table # 6) Check for duplicate statsDFrame rows # 7) Handle dataRegionsOnly call parameter - sub-map Setup # 8) Set values for regional or full mapping. # # 9) rowName # 10) # # # Basic checks to make sure statsDFrame and panelDesc arguments are present and usable. # More detailed checks done later. # StopFnd <- FALSE # #_________ 1) statsDFrame (basic check) argument # # check to see if the statsDFrame was provided and is the correct datatype if (missing(statsDFrame) || is.null(statsDFrame) || is.na(statsDFrame) || !is.data.frame(statsDFrame)) { stopCnt() StopFnd <- TRUE xmsg <- paste0("***0101 CARG-DF First argument (statsDFrame) is missing or not a data.frame.") warning(xmsg, call. = FALSE) } # #_________ 2) panelDesc - Basic initial check - Process the arguments # # check to see if the panelDesc was provided and is the correct datatype. if (missing(panelDesc) || is.null(panelDesc) || is.na(panelDesc) || !is.data.frame(panelDesc) || !is.list(panelDesc)) { stopCnt() StopFnd <- TRUE xmsg <- paste0("***0111 CARG-PD The second argument, the panelDesc structure, is missing or not a data.frame or list.") warning(xmsg, call. = FALSE) } # if (StopFnd) { stopCnt() xmsg <- paste0("***01Z0 CARG Key call arguments are missing, Execution stopped.") stop(xmsg, call. = FALSE) } ### most of panelDesc is validated later. #print("statsDFrame and panelDesc variables are present.") # # Now get the column names of the statsDFrame and verify the match up of the rownames with # the border group names, abbreviations or IDs. # #_________ Get list of column name in statsDFrame for parameter verification wSFName <- names(statsDFrame) # get the column names from data.frame len_wSFName <- length(wSFName) # record the number of "named" rows in list (at front.) wSFNameList <- c(wSFName,seq(from=1, to=len_wSFName)) # add valid row numbers to the list. numRows <- nrow(statsDFrame) # get number of rows in statsDFrame # # wSFNameList now contains a list of the column names and column numbers # as character strings. This string will be used to verify any user # provided column names or character column numbers. # # # Start Accumulating the micromapST System variable list # mmSys <- list(SFVarName = sDFName, SFNameL = wSFNameList, SFColNum = len_wSFName, SFRowNum = numRows) #print("mmSys") #print(mmSys) # # Check to make sure user provide data frame has at least 1 row and at least 1 column. # if (len_wSFName == 0 || numRows == 0) { xmsg <- paste0("***0103 CARG-DF The ",sDFName," statsDFrame data.frame has no columns or rows. ") StopFnd <- TRUE stopCnt() stop(xmsg, call. = FALSE) } # # statsDFrame - data rows # # headers or total area rate rows should not be included in data.format structure.. # # #______________statsDFrame - data frame - verify row links/names______________ # numRowsBG <- nrow(areaNamesAbbrsIDs) # get number of rows in name table # # Must validate statsDFrame row names against the area list - process rowNamesCol to be able to proceed with # the link verification. # # # Step 1 - find out where the row names for the sub-area are in the statsDFrame data.frame. # ### ### If user provided a column with the sub-area "names", then we have to check to make ### sure there are no duplicates in the statsDFrame data.frame. If they were in the row.names, ### R already makes sure there are no duplicates. ### ### Dont care what type of link it is at this point. ### # #_____________Check and Process the rowNamesCol call argument/parameter option___________________ # StopFnd <- FALSE ErrFnd <- FALSE if (missing(rowNamesCol) || is.null(rowNamesCol) || is.na(rowNamesCol) ) { # rowNamesCol is missing or not provided - no action - use the row.names on statsDFrame for the sub-area names. statsDFrame$RN <- rownames(statsDFrame) # get a copy of the rownames. (row.names) # If no rowNamesCol provided, then we must assume the row names (sub-area names) are being # provided in the row.names of the data.frame. If the row.names were not assigned a sub-area # identifier (full name, abbr, alias, or ID) by the user, then the row.names will be # then we will just get "1", "2", ... as the row.names and they will not match anything. # could be dangerous - later it may be best to STOP. #print("No rowNamesCol provided - must be in row.names") } else { # Have the rowNameCol call argument/parameter and a statsDFrame column name/number retrievve the sub-area links. # if (length(rowNamesCol) > 1) { # rowNamesCol can only be a vector of length 1. warnCnt() ErrFnd <- TRUE xmsg <- paste0("***0173 CARG-RNC The rowNamesCol argument value must have a length = 1. Only first value used.") warning(xmsg, call. = FALSE) rowNamesCol <- as.vector(unlist(rowNamesCol))[1] # pick off the first entry. dont care what type of variable it is. # # Cheat and do unlist and as.vector first. This kills must structures. # "unlist" destroys any lists and most data.frames. # as.vector linearizes what is left so one element can be picked off and kills factors. } # Look up the name and covert it into a column number - or - verify column number.. if (!is.character(rowNamesCol) && !is.numeric(rowNamesCol) && !is.integer(rowNamesCol)) { # rowNamesCol is not the correct type of vector. stopCnt() StopFnd <- TRUE # stop because user did specify, but its wrong. xmsg <- paste0("***0172 CARG-RNC The rowNamesCol argument value must be a character or numeric value. ", "It was found to be: ",class(rowNamesCol),".") stop(xmsg, call. = FALSE) } litrowNamesCol = rowNamesCol # Save the original literal value from rowNamesCol (could be number or name) rowNamesCol <- CheckParmColx(litrowNamesCol, c('RNC','rowNamesCol'), wSFNameList, len_wSFName) # see if value is good. # if error, CheckParmColx issues the warning message and return 0. # got column number if good. if (!all(rowNamesCol>0)) { # check to see if the value is good (>0 -> a valid column number) # Bad column name or column number found. Error message was generated by CheckColx. stopCnt() StopFnd <- TRUE # again stop because user specified, but its wrong. xmsg <- paste0("***01Z1 CARG Errors found in call arguments. Execution stopped.") stop(xmsg, call.=FALSE) # # Cant continue. User provided rowNamesCol, so must have a valid column name/number and a valid list of links. # If not, then looking at the row.names of the data.frame does not make sense. Why would they specify a rowNameCol? # } # # if problems are identified prior to this line, the package has stopped. # # At this point, the rolColName exists and is a valid column name. # #cat("rowNamesCol is valid : ",rowNamesCol," - Now check for duplicates.","\n") ### # # get copy of column, convert column into row.names, but first # need to check for duplicates before we do this. Dont have to do this check if sub-area names are in row.names of the data.frame. # dupNames <- duplicated(statsDFrame[,rowNamesCol]) dupRows <- c(seq_along(dupNames))[dupNames] if (any(dupNames)) { StopFnd <- TRUE stopCnt() xmsg <- paste0("***0171 CARG-RNC The row names in column ",rowNamesCol, " of the ",sDFName," statsDFrame data frame contain duplicates. Only one row per sub-area is permitted. Duplicate rows are:", paste0(dupRows,collapse=","),".") stop(xmsg, call. = FALSE) # possible work a round - later - is to delete the second occurance. } # # No duplicate sub-area row names in the statsDFrame data provided by user and # column name or number is good - move column to $RN # #print("Moved to $RN") statsDFrame$RN <- statsDFrame[,rowNamesCol] } statsDFrame$rawRN <- statsDFrame$RN # save raw format of row name. statsDFrame$RN <- toupper(statsDFrame$RN) # upper case for comparisons. row.names(statsDFrame) <- statsDFrame$RN # save in statsDFrame$RN as the row.names # ### ### # # At this point the sub-area names from the row.names on statsDFrame or # the sub-area names in a column of the statsDFrame have been added to the # internal statsDFrame data.frame in the $RN column. If the values were # checked for duplicates if provided in a data.frame column. # # # Next step is to validate the names against the programmed name list. # # If provided in column (rowNamesCol), they are checked and moved to row.names. # We only know they are unique. Another check is needed to see if they match # the area name/abbr/ID list. # ### ##____________statsDFrame rows OK to count # # JP - Make sure the input data.frame is at least two columns - add one. A single column data.frame # acts differently then a two or more column data.frame under many operations. # # JP - Dot code (at least) has problems with single column statsDFrame structures. # # To protect code and any other areas that may have problems, # quick fix is to append "0" column to the right of the provided data.frame. # This forces the data.frame to be at least 2 columns. # numRows <- nrow(statsDFrame) Ex <- rep(0,numRows) ADFrame <- cbind(statsDFrame,Ex) # move to ADFrame and add Zero column. # a 1 column data.frame has a little different behavior the s 2 column data.frame #cat("Add 0 column to statsDFrame\n") # # statsDFrame number of rows - validated. # ##### # # Get general defaults -> colors and details # par(fin = par("din")) # safety value to get moving. plot.new() # # ________________Load Colors and Details defaults_______________________________ # #print("Calling micromapGSetDefaults") micromapGDefaults <- micromapGSetDefaults() # get master list of variables and defaults #print("Got data.frame from micromapGSetDefaults") ##### # #_________________colors _______________________________________ # # Must do after completing the details list processing # # Verify "colors=" argument # # Second purpose is to set the graphics colors not in the "colors" vector to grays or colors. # # Read defaults into memory # #print("Validate colors") colFull <- TRUE # control logical = TRUE doing Color, FALSE doing Greys NoErrs <- TRUE doDotOutline <- FALSE mstColors <- colors if (missing(colors) || is.null(mstColors) ) { mstColors <- micromapGDefaults$colors # use package defaults. } else { if (typeof(mstColors) == "character") { if (length(mstColors) != 24) { if (length(mstColors) == 12) { # check for the basic colors. # we have the basic 12 colors. Expand to the list of 24. colorlab <- names(mstColors) TransColors <- adjustcolor(mstColors,0.2) mtColors <- c(mstColors, TransColors) if (!is.null(colorlab)) { names(mstColors) <- c(colorlab,paste0("l_",colorlab)) } } else { if (length(mstColors) == 1) { wStr <- toupper(mstColors) if (wStr == "BW" || wStr == "GRAYS" || wStr == "GREYS" ) { # set up the colors for printing in BW or Gray tones # Get the main greys for the 6 colors (the middle 3-7/8 grays in the RColorBrewer scale. # and add the black for the median and a grey for the area highlight color. xbw <- brewer.pal(name="Greys",9) greyColors <- c(xbw[c(3:8)],"#000000","#E8E8E8") # Build the transparent colors for the segmented bar charts. TransColors <- adjustcolor(greyColors,0.2) # Set up the grey color vector as requested. mstColors <- c(greyColors,TransColors) # Set up running parameters. colFull <- FALSE Dot.Outline <- TRUE Dot.Conf.Outline <- TRUE Dot.SE.Outline <- TRUE doDotOutline <- TRUE # outline dots in dot glyphs. } else { mstColors <- micromapGDefaults$colors warnCnt() xmsg <- paste0("***01K0 COLORS A invalid single value is provided for the colors argument. It must be 'BW', 'greys', or 'grays'. The argument is ignored.") warning(xmsg,call.=FALSE) } } else { warnCnt() xmsg <- paste0("***01K1 COLORS The colors vector has the incorrect number of elements. It must have 1 or 24 entries. ",length(mstColors)," provided.") warning(xmsg,call.=FALSE) } } } else { # have 24 values in vector } } else { mstColors <- micromapGDefaults$colors warnCnt() xmsg <- "***01K2 COLORS The colors vector type is invalid. It must be a character vector." warning(xmsg,call.=FALSE) } } assign("mstColors",as.character(mstColors)) mstColorNames <- names(mstColors) rm(colors) #____ end of color check and adjustments.___ # # #______________________Process details Defaults_________________________ # #print("Validate details") # Process defaults into the local variables as before. # Setting the defaults into the system. User provided overrides. wDetails <- micromapGDefaults$details # dynamic assignment of defaults to individual variables in "micromapST" # namespace. #print(wDetails) oldDefNam = "none" defNam = names(wDetails) for (i in 1:length(wDetails)) { if (nchar(defNam[i]) <= 0) { warnCnt() xmsg <- paste0("***01N3 DETS Zero length variable name found in the details list after the ", oldDefNam, " variable.") warning(xmsg,call.=FALSE) } oldDefNam <- defNam[i] assign(defNam[i],wDetails[[i]]) # assign default values. } # All details names must be in the globalVariable call to be visible to CRAN checks. # The valid details variable name list is the "defNam" from above and the detailsExtra list # for the areaParms parameters. DetailNames <- c(defNam,detailExtra) #print(DetailNames) # # The defaults have been moved to the individual variables. # Keep the list of names around to be to verify user supplied names. # # #________________ Process user provided details - merge into memory. # # Now overlay with any values provided by user. # # dynamic assignment of detail data.frame to individual variables in the # "micromapST' namespace.. # # Should I add code to verify names provided? # #print("Merge user details with default details.") # # - validate user provided details before merging. # numOverlaid <- 0 if (!(missing(details) || is.null(details) || is.na(details))) { if (typeof(details) == "list") { nam <- names(details) # parse the details list into variable that can be nam_match <- match(nam,defNam) for (i in 1:length(details)) { # referenced using the list's name. if (is.na(nam_match[i])) { # invalid variable name in details warnCnt() xmsg <- paste0("***01N2 DETS Invalid details variable name: ",nam[i], " in the details list. Variable is IGNORED.") warning(xmsg,call.=FALSE) } else { # valid name numOverlaid <- numOverlaid + 1 assign(nam[i],details[[i]]) #print(paste0("details overlay of ",nam[i]," with ",details[i])) } } } else { stopCnt() xmsg <- "***01N1 DETS The details parameter is not a list." stop(xmsg, call.=FALSE) } } #cat("envir=Id.Dot.pch:",find("Id.Dot.pch"),"\n") #cat("envir=topMar:",find("topMar"),"\n") #if (numOverlaid>0) { # xmsg <- paste0("***0501 PANEL Number of parameters overlaid = ",numOverlaid) # message(xmsg) #} # # Verify and adjust details variables # #cat("In micromapST - processing parameters.\n") #cat("envir=warnCnt:",find("warnCnt"),"\n") #cat("envir=staggered:",find("staggered"),"\n") #cat("envir=lastLab2Space:", find("lastLab2Space"), "\n") #### # # Set in colors with BW or gray requested. This resets it - to Dot.Outline value? OUCH! #doDotOutline <- Dot.Outline # #### #### # # Id.Dot.pch # #print("Validate Id.Dot.pch") if (!is.between.r(Id.Dot.pch,c(1,25))) { # not an acceptable pch value #cat("envir=Id.Dot.pch:", find("Id.Dot.pch"),"\n") Id.Dot.pch <<- 22 # set to default warnCnt() xmsg <- paste0("***01NA DETS The Id.Dot.pch variable can only be set to a range from 1 to 25. Using the default of 22.") warning(xmsg,call.=FALSE) } # # This is the code the rcmd check could not detect the scope of the detail$ variables. # # ##### # Need to get ID width values before setting the panel defaults # #______________Function Call Argument Checks______________________ # #------- Working variables for map and id glyphs. #------- Start Getting widths of labels and titles to help setup column widths # # # This will have to be re-written to handle user provided labels and titles for the glyph columns. # medianBanner <- Map.Median.text #cat("Calculating banners and column fixed widths.","\n") #print(medianBanner) #print(Map.Hdr1) #print(Map.Hdr2) #print(Id.Hdr1) #print(Id.Hdr1) # # Map titles with symbols # sw = Map.Lab.Box.Width + 0.05 + 0.04 # square width and spaces on each side. (inches) #cat("Size of Box Symbols (guess) sw:",sw,"\n") # empty banner data.frame banner <- data.frame(H1=character(),H2=character(),H3=character(),M1=character(),stringsAsFactors=FALSE) # add "Highlighed" titles for default. banner <- rbind(banner,t(c("","Highlighted",Map.Hdr2,medianBanner))) # add headers for cumulative banner <- rbind(banner,t(c("Cumulative Maps", paste0(Map.Hdr2,' Above Featured Rows'), paste0(Map.Hdr2,' Below Featured Rows'), medianBanner) ) ) # add headers for median banner <- rbind(banner,t(c("Median Based Contours", paste0(Map.Hdr2,' Above the Median'), paste0(Map.Hdr2,' Below the Median'), medianBanner) ) ) # add headers for two ended (tail) banner <- rbind(banner,t(c("", "Two Ended Cumulative Maps", paste0(Map.Hdr2," Highlighted"), medianBanner) ) ) banner <- rbind(banner,t(c("",Id.Hdr1,Id.Hdr2,"") ) ) bcn <- c("H1","H2","H3","M1") # h1, h2, h3, median brn <- c("map","mapcum","mapmed","maptail","id") row.names(banner) <- brn colnames(banner) <- bcn banner$H1 <- as.character(banner$H1) banner$H2 <- as.character(banner$H2) banner$H3 <- as.character(banner$H3) banner$M1 <- as.character(banner$M1) #cat("banner header data.frame:\n") #print(banner) # .adj -> which lines in each header have symbols? banner.adj <- data.frame(H1=c(0,0,0,0,0),H2=c(0,sw,sw,0,0),H3=c(0,sw,sw,0,0),M1=c(0,0,0,0,0)) row.names(banner.adj) <- brn banner.m <- c(1,1,1,0.8) # text size multiplier for H1, H2, H3, Med1 banner.tc <- Text.cex * banner.m #cat("CEX for headers and median - banner.tc:",banner.tc,"\n") banner.w <- banner # replace strings with width values for current font and Text.cex values. for (iH in c(1:4)) { for (iT in c(1:5)) { banner.w[iT,iH] <- strwidth(banner[iT,iH],units="inches",cex=banner.tc[iH]) } } # banner.w <- as.data.frame(sapply(banner.w, function(x) as.numeric(x))) # convert numeric. row.names(banner.w) <- brn #cat("widths in banners - banner.w:\n") #print(banner.w) banner.max <- as.data.frame(sapply(c(1:5), function(x) max(banner.w[x,]+banner.adj[x,]))) colnames(banner.max) <- "width" row.names(banner.max) <- brn #cat("maximum widths for each type of header - banner.max:\n") #print(banner.max) # Make subroutine to be able to do again later. ID.Abbr.width <- max(strwidth(ID.Abbr,units="inches",cex=(Id.Text.cex * Id.Cex.mod))) ID.Name.width <- max(strwidth(ID.Name,units="inches",cex=(Id.Text.cex * Id.Cex.mod))) #cat("ID.Abbr.width:",ID.Abbr.width,"\n ") #cat("ID.Name.width:",ID.Name.width,"\n\n") Id.OverH <- Id.Dot.width*(Id.Dot.cexm * Id.Cex.mod) + Id.Space*2.5 # two spaces left and right of name. #cat("ID overhead (Id.Start, Dot.width, Space (box to letters), space (letter to edge):",Id.OverH,"\n") #cat("banner.max ID:",banner.max["id","width"]," IDName:",Id.OverH+ID.Name.width," IDAbbr:",Id.OverH+ID.Abbr.width,"\n") # width of ID glyph with border Group names/abbreviations Id.width <- c(1.5,1) # initialize Id.width[1] <- max((Id.OverH + ID.Name.width ),banner.max["id","width"]) # plus padding. FULLNAMES Id.width[2] <- max((Id.OverH + ID.Abbr.width ),banner.max["id","width"]) # ABBREVIATIONS #cat("Id.width:",Id.width,"\n") # # Build title lists for maps and get width for point size. # #cat("Map.Aspect:",Map.Aspect,"\n\n") # #print("Column Hdrs - Done") #_____________Set up for Area Names and Abbreviation links. # #_____________Borders to data Link ---- rowNames and rowNamesCol # #_____________Process rowNames option___________________ # # if (missing(rowNames) || is.null(rowNames) || is.na(rowNames) ) { # no rowNames provided up front. Set to default rowNames <- "ab" } #cat("Validate rowNames : ", rowNames,"\n") #__________________ # # Verify the rownames are valid and can be translated into abbrevation versions. # # # The user can enter abbr, full, alt_ab, alias, or ID with the data. # Which everone is picked, it must be the string in the data.frame and the panelData-data.frames to # allow matching to the boundaries VisBorderr data. # # Each value is translated to the Key that is used to link the data to the # key in the boundary data in areaVisBorders. # # AD.link is the user value in the order of the data table. # # areaIndex is in the order of the data table (AD.link) and points to the # matching entry in the name table, based on the proper match for the type of value. # #print("Clean up rownames in $RN") #cat("Border Group Name:",BordGrpName,"\n") statsDFrame$RN <- ClnStr(as.character(statsDFrame$RN)) AD.link <- (as.character(statsDFrame$RN)) # get rownames.information (link to borders) (all CAPS) ##### may be changed. #cat("Initial AD.link:",AD.link,"\n") if (BordGrpName == "USStatesBG") { ### If US States Patterns - look for the many ways Washington DC is possibly enter in the user data.. ### # Compare against common "DC" names and replace with "DC" if (rowNames == "full") { AD.Test <- toupper(AD.link) # get capitalized version for the DC conversion. # Build DC name table (all caps) DCnames = c("WASHINGTON, D. C.", "WASHINGTON D. C.", "WASHINGTON, D C", "WASHINGTON D C", "WASHINGTON, DC", "WASHINGTON DC", "DISTRICT COLUMBIA", "DISTRICT OF COLUMBIA", "DC", "D C", "D, C.","D.C","D C.","D.C.") # only clean up full names. AD.link[!is.na(match(AD.Test,DCnames))] <- "DC" ### match short form in border group } #cat("Updated AD.link:",AD.link,"\n") } # if (rowNames == "alias" && enableAlias == FALSE) { stopCnt() StopFnd <- TRUE xmsg <- paste0("***0191 CARG-RN rowNames='alias' is not supported for this bordGrp.") stop(xmsg, call.=FALSE) } if (rowNames == "seer" && BordGrpName != "USSeerBG") { stopCnt() StopFnd <- TRUE xmsg <- paste0("***0192 CARG-RN rowNames='seer' is only supported for the 'USSeerBG' bordGrp.") stop(xmsg, call.=FALSE) } # areaIndex pointer to Name Table is order of the user data.frame based on the rowNames parameter. IndexDFtoNT = switch(rowNames, # find the correct list to match user provide links. # if "ab", use current name - get index "ab" = {match(AD.link, areaNTAbbr)}, # if "id", convert to index "id" = {match(as.integer(AD.link), as.integer(rlAreaNamesAbbrsIDs$ID))}, # if "FIPS", convert to index (alias for "id") "FIPS" = (match(as.integer(AD.link), as.integer(rlAreaNamesAbbrsIDs$ID))), # if "full" sub-area name, convert index "full" = {match(AD.link, areaNTName)}, # if "seer" seer sub-area names from SeerStat (read and convert to index ) "seer" = {AliasToIndex(AD.link,rlAreaNamesAbbrsIDs$Alias)}, # if "alias" seer sub-area names from SeerStat (read and convert to index.) "alias" = {AliasToIndex(AD.link,rlAreaNamesAbbrsIDs$Alias)}, # if "alt_ab" alternate name abbreviation used in data, convert to index. "alt_ab" = {match(AD.link, rlAreaNamesAbbrsIDs$Alt_Abbr)}, # No match.. { stopCnt() StopFnd <- TRUE xmsg <- paste0("***0190 CARG-RN Invalid rowNames call parameter value. The value must be 'ab', 'alt_ab', 'id', 'alias', or 'full'.") stop(xmsg, call.=FALSE) } ) # # IndexDFtoNT is index from caller's data.frame rows into the name table # (areaNamesAbbrsIDs) data.frame # # By default, we will handle cases where statsDFrames does not contain # sub-areas in the border group. # callVL$rowNames <- rowNames var <- "callVarList" wstr <- paste0("assign(var,callVL,envir=.GlobalEnv)") eval(parse(text=wstr)) #cat("Initial IndexDFtoNT:",IndexDFtoNT,"\n") ###### # # Process ignoreNoMatches - the case where data is provided, but the is no row in the name table (and # therefore, no boundaries in the border group. # # This also deals with data rows that don't match boundary information. # a) all match - all data and all boundaries # b) all data match boundaries (all data is matched, but not all boundaries are used.) # b1) dataRegionsOnly option enabled - find regions and only do regions with data. # b2) no regions - draw all. # c) not all data matches boundaries (data without boundary,) # c1) ignoreNoMatches = false -> warning message and stop. # c2) ignoreNoMatches = true -> warning message, delete data rows, continue. # d) no data matches in boundaries. (total miss match). Warning and stop. # # # Check and Implement delete last row (blank) as no match ignore option # # set defaults for ignoreNoMatches call parameter if (is.null(ignoreNoMatches)) ignoreNoMatches = FALSE if (is.na(ignoreNoMatches)) ignoreNoMatches = FALSE # #cat("ignoreNoMatches : ",ignoreNoMatches,"\n") #cat("number of rows : ",numRows,"\n") DFtoNTMissed <- is.na(IndexDFtoNT) #cat("DFtoNTMissed:",DFtoNTMissed,"\n") #cat("any(DFtoNTMissed):",any(DFtoNTMissed),"\n") # # areaUKey is list of abbreviation for each area. If there is no match # between the data links and areaNamesAbbrsIDs table, then it shows up as an NA. # if (any(DFtoNTMissed)) { # one or more of the data rows didn't match the name table # # if ignoreNoMatches in data, strip rows that don't match name table. # BadList <- statsDFrame[DFtoNTMissed,"rawRN"] # get list of rows that did not match. xmsg <- paste0("***0106 CARG-DF The following rows in the ",sDFName," data.frame do not match any boundary name:") warning(xmsg,call.=FALSE) xmsg <- paste0("***0107 ",paste0(BadList,collapse=", ")) warning(xmsg,call.=FALSE) # if (ignoreNoMatches) { # ignore data rows that do not match match the name table. # remove row from data.frame xmsg <- paste0("***0108 CARG-DF The rows not matched to boundaries will be removed and not mapped.") warning(xmsg,call.=FALSE) KeepList <- !DFtoNTMissed # get list of areas that don't match (T/F)- good entires = TRUE #cat("Good data rows:",paste0(KeepList,collapse=" ")) # delete bad rows (those not matching) # Keep only rows that matched the name table. IndexDFtoNT <- IndexDFtoNT[KeepList] # clean up index statsDFrame <- statsDFrame[KeepList,] # clean up data frame AD.link <- AD.link[KeepList] # clean up AD.link # if ignoreNoMatches set - this has removed the rows from the user's data table. } else { # stop if a missing match # at least one NA in list xmsg <- paste0("***0109 CARG-DF Data row names in the ",sDFName," data.frame must the boundary names in the name table. Call stopped.") stop(xmsg,call.=FALSE) } } #cat("Adjusted data.frames - statsDFrame, AD.link, IndexDFtoNT:\n") #print(statsDFrame) #print(AD.link) #print(IndexDFtoNT) numRows <- length(IndexDFtoNT) # update number of rows in data frame #cat("numRows:",numRows,"\n") # #### #### # # grpPattern argument - default = NULL (use calculated pattern) # #print("Validate - grpPattern") if (!(is.null(grpPattern) || is.na(grpPattern))) { # we have a user specifed grpPattern if (!is.numeric(grpPattern)) { warnCnt() ErrFnd <- TRUE xmsg <- paste0("***01C0 CARG-GP The grpPattern call parameter must be an integer vector. grpPattern ignored.") warning(xmsg, call.=FALSE) grpPattern <- NULL } else { xg <- sum(grpPattern) if (xg != numRows) { # grpPattern number of rows does not match the statsDFrame data.frame warnCnt() ErrFnd <- TRUE xmsg <- paste0("***01C1 CARG-GP The total number of rows in the grpPattern call parameter must be equal to the number of rows in the ", sDFName," data.frame. grpPattern ignored.") warning(xmsg, call.=FALSE) grpPattern <- NULL } else { # check for correct group formats. # No element greater than 5 xg <- max(grpPattern) if (xg > 5) { # grpPattern number of rows does not match the statsDFrame data.frame warnCnt() ErrFnd <- TRUE xmsg <- paste0("***01C2 CARG-GP Each value in grpPattern call parameter vector must be <= 5 (rows per group). A value of ",xg," was found.") warning(xmsg, call.=FALSE) grpPattern <- NULL } else { # Rows descend in order to middle. xl <- length(grpPattern) # number of groups in grpPattern xlh <- ceiling(xl/2) # number of groups to median point. grpL <- grpPattern[1:xlh] # lower half groups grpU <- grpPattern[(xl-xlh+1):xl] # upper half groups if ( !all(grpL == sort(grpL,descreasing=TRUE)) || !all(grpU == sort(grpU)) ) { # if the sorted order of either half of the groups does not match the # pattern provided, warning and ignore the grpPattern. warnCnt() ErrFnd <- TRUE xmsg <- paste0("***01C3 CARG-GP The grpPattern call parameter is not properly ordered. ", "The number of rows per group must be in desending order toward the median sub-area.") warning(xmsg, call.=FALSE) grpPattern <- NULL } } } } } # ##### ##### # # regionsB argument - default = FALSE. # #print("regionsB parameter Check.") def_regionsB <- FALSE regionsBFlag <- def_regionsB if (! (is.null(RegVisBorders) || identical(RegVisBorders,L3VisBorders)) ) { # RegVisBorders boundary data.frame is present and different from L3. # validate parameter if ( is.null(regionsB) || any(is.na(regionsB)) ) { # argument is missing or not provided regionsB <- def_regionsB regionsBFlag <- def_regionsB # default #cat("regionsB support enabled - but no regionsB call parameter provided - regionsB set to FALSE.\n") } else { regionsB <- regionsB[[1]][1] if ( !is.logical(regionsB) ) { ErrFnd <- TRUE warnCnt() xmsg <- "***01G0 CARG-RB The regionsB call argument is not a logical variable. The default of FALSE will be used." warning(xmsg,call. = FALSE) regionsBFlag <- def_regionsB regionsB <- def_regionsB } else { regionsBFlag <- regionsB } } } #cat("regionsBFlag parameter:",regionsBFlag," regionsB:",regionsB,"\n") # ##### ##### # # dataRegionsOnly argument - default = FALSE. # #print("dataRegionsOnly parameter Check.") def_dataRegionsOnly <- FALSE dataRegionsOnlyFlag <- def_dataRegionsOnly if ( aP_Regions ) { # border group supports regions (feature enabled) # validate parameter if ( is.null(dataRegionsOnly) || any(is.na(dataRegionsOnly)) ) { # argument is missing or not provided dataRegionsOnly <- def_dataRegionsOnly dataRegionsOnlyFlag <- def_dataRegionsOnly # default #cat("regions support enabled - but no regions call parameter provided - regions set to TRUE.\n") } else { dataRegionsOnly <- dataRegionsOnly[[1]][1] if ( !is.logical(dataRegionsOnly) ) { ErrFnd <- TRUE warnCnt() xmsg <- "***01G5 CARG-DRO The dataRegionsOnly call argument is not a logical variable. The default of FALSE will be used." warning(xmsg,call. = FALSE) dataRegionsOnlyFlag <- def_dataRegionsOnly dataRegionsOnly <- def_dataRegionsOnly } else { dataRegionsOnlyFlag <- dataRegionsOnly } } } #cat("dataRegionsOnlyFlag parameter:",dataRegionsOnlyFlag," dataRegionsOnly:",dataRegionsOnly,"\n") # # If duplicated rows exist, Notify user and stop. # # Is this now a duplicate test to the previous test??? Yes it is. (retire) # #print("check for duplicate statsDF rows - duplicate?") dupL <- duplicated(IndexDFtoNT) # check for duplicate references to Name Table if (any(dupL)) { # some of the matches are duplicates - not allowed. One row per sub-area. DupList <- paste0(AD.link[dupL],collapse=", ") stopCnt() xmsg <- paste0("***0104 CARG-DF There are duplicate entries in the statsDFrame data.frame. Duplicate entries are ignored.\n", "***0105 CARG-DF The duplicate rows are: ",DupList,"\n") stop(xmsg, call.=FALSE) rm(DupList) } rm(dupL) # one of the names provided abrv, alt_abrv, ID or full names are not valid # and did not match the data in the Name Table. Can't link to any boundary data. # What link to use for boxplot and TS type data? #print("Get panelData Key.") panelDataKey <- switch(rowNames, "ab" = areaNamesAbbrsIDs$Abbr[IndexDFtoNT], "full" = areaNamesAbbrsIDs$Name[IndexDFtoNT], "id" = areaNamesAbbrsIDs$ID[IndexDFtoNT], "alias" = areaNamesAbbrsIDs$Abbr[IndexDFtoNT], "seer" = areaNamesAbbrsIDs$Abbr[IndexDFtoNT], "alt_ab"= areaNamesAbbrsIDs$Alt_Abbr[IndexDFtoNT] ) #cat("panelDataKey:",panelDataKey,"\n") # IndexDFtoNT is an index list to name/abbr/ID rows that match the # ADFrame rows -> position = ADFrame Row, value = abbr # Still need to check for duplicates. # # Setup for IndexDFtoNT checks # areaDatKey is the abbreviation in order of data.frame # # statsDFrame$RN (AD.link) is the cleaned up strings initially used for link # Should be able to re-use this field to link to any panelData structure. # # # sub-areas to regions to Area processing # # Get list of sub areas in regions referenced by the data.j # Set up used regions as the only spaces to map. # Get list of all sub areas in the regions referenced. # areaNamesAbbrsIDs$NotUsed <- FALSE # What does NT hold? # List of all regions and L2 keys #print("Build regions lists from NT regID") listAllL2 <- unique(areaNamesAbbrsIDs$L2_ID) #cat("listAllL2:",listAllL2,"\n") listAllRegions <- unique(areaNamesAbbrsIDs$regID) #cat("listAllRegions:",listAllRegions,"\n") listAllAreas <- areaNamesAbbrsIDs$Key #cat("listAllAreas:",listAllAreas,"\n") #cat("dataRegionsOnlyFlag:",dataRegionsOnlyFlag,"\n") if (dataRegionsOnlyFlag) { # save L2_ID for each data row. statsDFrame$L2_ID <- areaNamesAbbrsIDs$L2_ID[IndexDFtoNT] # put L2_ID into statsDFrame # Get list of used L2 areas. listUsedL2 <- unique(statsDFrame$L2_ID) #cat("listUsedL2:",listUsedL2,"\n") # What does the data show? # data - regions and L2 key lists # Pick up regID for each data row. statsDFrame$regID <- areaNamesAbbrsIDs$regID[IndexDFtoNT] # put regID into statsDFrame # Get list of used Regions listUsedRegions <- unique(statsDFrame$regID) # get list of regions used. #cat("listUsedRegions:",listUsedRegions,"\n") areaRegMatch <- match(areaNamesAbbrsIDs$regID,listUsedRegions) # find all sub-areas in regions to be mapped areaRegKeep <- !is.na(areaRegMatch) listUsedAreas <- areaNamesAbbrsIDs[areaRegKeep,"Key"] } else { listUsedRegions <- listAllRegions listUsedL2 <- listAllL2 listUsedAreas <- listAllAreas #cat("regionsFlag=FALSE -> reset listUsed to listAll\n") } # Have all sub-areas in areaNamesAbbrsIDs (name table), areaVisBorders, L2VisBorders, and RegVisBorders. # if used is less the total and regions set, reduce these tables to only the valid region. #cat("UsedRegions:",listUsedRegions,"\n") #cat("UsedL2 :",listUsedL2,"\n") #cat("UsedAreas :",listUsedAreas,"\n") #cat("Overlays - L2:",Map.L2Borders," Reg:",Map.RegBorders," L3:",Map.L3Borders,"\n") if (length(listUsedRegions) != length(listAllRegions)) { # number of used regions is less that number of all regions in border group. # This only happens if regions=TRUE and aP_Regions is TRUE. # The rlXXXX tables must be sub-divided # get index to sub-areas in used regions (all aub-areas, not just was matched data.) # mark not used name table entries SubAallR_Match <- match(areaNamesAbbrsIDs$regID,listUsedRegions) # find all subarea enters within regions used SubAallR_Good <- !is.na(SubAallR_Match) # list of valid name table entries. #cat("SubAallR_Good:",SubAallR_Good,"\n") # get keys to aub-areas in all subset of regions (no match against used regions list) areaNamesAbbrsIDs$NotUsed[!SubAallR_Good] <- TRUE # set not used flag in all sub-area not in regions referenced # get list of sub-area keys in region listAllAreas <- listNTKeysUsed <- areaNamesAbbrsIDs$Key[SubAallR_Good] # get which sub-area keys are in region #cat("listAllAreas(in Reg.):",listAllAreas,"\n") # get keys to sub-areas that match data rows listUsedAreas <- areaNamesAbbrsIDs$Key[IndexDFtoNT] # get list of sub-areas in data (used) #cat("listUsedAreas:",listUsedAreas,"\n") # sub-divide areaVisBorders rlAreaVM <- match(rlAreaVisBorders$Key,listAllAreas) rlAreaKeep <- !is.na(rlAreaVM) # good rows to keep #cat("rlAreaKeep:",rlAreaKeep,"\n") rlAreaVisBorders <- rlAreaVisBorders[rlAreaKeep,] #cat("rlAreaVisBorders:\n") #print(head(rlAreaVisBorders,50)) # sub-divide RegVisBorders rlRegVM <- match(rlRegVisBorders$Key,listUsedRegions) rlRegKeep <- !is.na(rlRegVM) # good rows to keep #cat("rlRegKeep:",rlRegKeep,"\n") rlRegVisBorders <- rlRegVisBorders[rlRegKeep,] #cat("rlRegVisBorders:\n") #print(head(rlRegVisBorders,50)) if (!identical(RegVisBorders,L3VisBorders)) { # RegVisBorders is not equal to L3, so it has real boundaries in it. Map.RegBorders <- TRUE # make sure Reg overlays are enabled. We need them. regionsBFlag <- TRUE # print boundaries. } # sub-divide L2VisBorders rlL2VM <- match(rlL2VisBorders$Key,listUsedL2) rlL2Keep <- !is.na(rlL2VM) #cat("rlL2Keep:",rlL2Keep,"\n") rlL2VisBorders <- rlL2VisBorders[rlL2Keep,] #cat("rlL2VisBorders:\n") #print(head(rlL2VisBorders,50)) # Handle L3VisBorders # Turn off overlaying L3 Map.L3Borders <- FALSE # Report status #cat("Adjusted listUsed - area, Regions and listUsedL2:\n") #cat("UsedRegions:",listUsedRegions,"\n") #cat("UsedL2 :",listUsedL2,"\n") #cat("UsedAreas :",listUsedAreas,"\n") #cat("AllAreas :",listAllAreas,"\n") #cat("Num data SAs:", length(listUsedAreas)," Num NT SAs:",length(listAllAreas),"\n") #print("-end-") } # End of regional VisBorder processing sub-dividing. #cat("Overlays - L2:",Map.L2Borders," Reg:",Map.RegBorders," L3:",Map.L3Borders,"\n") #print("Completed regions subsetting of boundary data.") # Can't do much more until after the sortVar is handled. # # #_______________plotNames option__________________ # # Get area names or abbreviations to plot_______________________ #print("Validate plotNames.") # Set the defaults if not present or NA if (is.null(plotNames) || is.na(plotNames) ) { plotNames = "ab" } # areaIDNames are used in the ID glyph as the literal for the area. # Get list of names to use in glyph and the width required. # Default - abbreviations areaUAbbr <- areaNamesAbbrsIDs$Abbr[IndexDFtoNT] areaUFull <- areaNamesAbbrsIDs$Name[IndexDFtoNT] areaIDNames <- areaUAbbr IdW <- Id.width[2] # temp initialization (abbreviation) # Get width of ID gryphics column areaIDNames = switch(plotNames, "ab" = {IdW <- Id.width[2]; areaUAbbr}, # set IdW to value and return vector of names. "full"= {IdW <- Id.width[1]; areaUFull}, { # no match plotNames = "ab" warnCnt() xmsg <- "***01B0 CARG-PN Invalid plotNames argument value. The value must be 'ab' or 'full'. The default of 'ab' will be used." warning(xmsg,call.=FALSE) } ) # areaIDNames are in statsDFrame order containing the ab or full name associated with the row in statsDFrame statsDFrame$IDNames <- as.character(areaIDNames) # set IDNames for sorting and ID into statsDFrame IdColWidth <- max(banner.max["id","width"],IdW) #cat("ID column width to use - IdColWidth:",IdColWidth,"\n") # now complete the default sort. # statsDFrame$IDNames is in the order of the user data. Not the Names/Abbr Table. # areaIDNames is a list of name/abbr literals based on the plotNames specified and # the areaUIndex values. The name or abbreviation values are pulled from the Name Table # incase an alias or alt_abbreivation was used to link the data to boundaries. # #cat("areaIDNames:",areaIDNames,"\n") #_______________title option (mstTitle)______________________ # #print("title validation.") mstTitle <- title rm(title) # checks missing,, is character, length = 1 or 2. if (is.null(mstTitle)) { # set to the default mstTitle <- c("") } if (length(mstTitle) < 1) { mstTitle <- c("") warnCnt() xmsg <- "***01A2 CARG-TL The title parameter is empty. Recommend providing a title for the linked micromap." warning(xmsg,call.=FALSE) } if (length(mstTitle) == 1) { if (is.na(mstTitle)) { # set to the default mstTitle <- c("") } } if (typeof(mstTitle) != "character" || class(mstTitle) != "character") { mstTitle <- as.character(unlist(mstTitle)) warnCnt() xmsg <- paste0("***01A1 CARG-TL The typeof/class of the title parameter is not character. ","Only character vectors are supported. The 'title' argument is ignored.") warning(xmsg,call.=FALSE) } if (length(mstTitle) > 2) { mstTitle <- mstTitle[1:2] warnCnt() xmsg <- paste0("***01A0 CARG-TL The title argument contains more than 2 items. ", "Only the first two will be used.") warning(xmsg,call.=FALSE) } #print("statsDFrame before sort") #print(str(statsDFrame)) #_______________ascend option_____________________ # # default value is ascending. (TRUE) #print("Validate ascend") ordDecr <- FALSE if (!(missing(ascend) || is.null(ascend) || is.na(ascend))) { if (is.logical(ascend)) { ordDecr <- !(unlist(ascend)[[1]]) } else { warnCnt() xmsg <- "***0186 CARG-AS The ascend parameter is not a logical variable. Must be TRUE or FALSE." warning(xmsg,call.=FALSE) } } #_______________sortVar option____________________ # #print("Validate sortVar") # sort and store statsDFrame, areaID, and areaNames____________ # rules for sortVar data columns. # a) list of columns collected from sortVar parameter # b) The numbers in the column are processed to trim blanks and eliminate "," from numbers # c) The numbers in the column are converted to numeric. # d) If the column does not have numbers, it is left as character and only blanks are trimed. # # Set Default sort orders results ord <- order(statsDFrame$IDNames, na.last=TRUE, decreasing=ordDecr) # default is to sort in the sub-area names/abbr rankOrd <- rank(sort(statsDFrame$IDNames),ties.method="min",na.last=TRUE) # ord and rankOrd are re-ordered (sorted) but point to the User data.frame. # sorted order -> data.frame (or areaUIndex) # # data data.frame must be edited by now or sort will not work. # # names table must stay the same from now on. # # process sortVar if (missing(sortVar) || is.null(sortVar) || is.na(sortVar) ) { # if field omitted (null) sort use default values sortVar <- NULL } else { # value/name provides - verify it. litsortVar <- sortVar sortVar <- CheckParmColx(litsortVar,c('SORT','sortVar'),wSFNameList,len_wSFName) # column names and numbers are verified and converted to column numbers. # column 0 represents a no match, can't find. #print("sortVar returned by CheckParmColx") #print(sortVar) wSortVar <- sortVar[sortVar > 0] # keep good column indexes if (length(wSortVar) > 0) { wSv <- lapply(wSortVar, function(x) str_trim(statsDFrame[,x])) # pull one or more rows and trim blanks wSv2 <- lapply(wSv, function(y) as.numeric(gsub(",","",y))) # kill "," and convert to numeric wSv9Test <- lapply(wSv2, function(z) all(is.na(z))) # did data get converted to numeric? # check on the conversion to numeric - if fails, keep as character. wSv3 <- lapply(c(1:length(wSv9Test)), function(a) if(wSv9Test[[a]]) { # TRUE - All entries are NA - most likely a character column wSv[[a]] # return original text version trimmed } else { wSv2[[a]] # return numeric version } ) wSv3$na.last <- TRUE # set na.last = TRUE option wSv3$decreasing <- ordDecr # set sort order ord <- do.call(order,wSv3) rankOrd <- rank(statsDFrame[,sortVar[1]],ties.method="min",na.last=TRUE) } else { # can't use sortVar - set to NULL sortVar <- NULL # no good values - NULL argument as if it was not present. } } #cat("sortVar - ord:",ord,"\n") #print(rankOrd) # #--------------Set up working vectors based on the sort # # ord has the sorted order by ADFrame row numbers for indexing. # # sortedOrd is the order of the statsDFrame data.frame # sortedOrd <- ord # sorted display names (abbr or full) #print("sort completed.") #cat("sortedOrd:",sortedOrd,"\n") # #_______________SORT the data array as requested____________ # ### are assigns needed in our mode? Data area for all calls below... assign("dat",statsDFrame[sortedOrd,]) # data fields "dat" has sorted data frame of the statsDFrame #cat("dim(dat):",dim(dat),"\n") # # From now on, the "dat" structure is the primary data.frame containing the user's data. # IndexDattoNT <- IndexDFtoNT[sortedOrd] # index list from "dat" to Name table #cat("IndexDFtoNT:",IndexDFtoNT,"\n") #cat("IndexDattoNT:",IndexDattoNT,"\n") areaDatIDNames <- areaIDNames[sortedOrd] # IndexDattoNT is in data.frame order pointing to the name table areaDatKey <- areaNTKey[IndexDattoNT] # keys in order of the user data. areaDatAbbr <- areaNTAbbr[IndexDattoNT] areaDatFull <- areaNamesAbbrsIDs$Name[IndexDattoNT] areaDatID <- areaNamesAbbrsIDs$ID[IndexDattoNT] areaDatAlt_Abbr <- areaNamesAbbrsIDs$Alt_Abbr[IndexDattoNT] #cat("dim(dat):",dim(dat),"\n") #cat("length of areaID (areaDatKey): ",length(areaDatKey),"\n") #cat("areaID (areaDatKey) : ",paste0(areaDatKey,collapse=" "),"\n") naADK <- is.na(areaDatKey) #cat("areaDatKey-NA:",naADK,"\n") #cat("length(naADK):",length(naADK)," any(naADK):",any(naADK)," all:",all(naADK)," sum:",sum(naADK),"\n") if (any(naADK)) { cat("bad areaDatKey:\n") print(dat[naADK,]) print("SHOULD not get here.") } #print(dat) #cat("areaDatKey:",areaDatKey,"\n") #cat("row.names(dat):",row.names(dat),"\n") row.names(dat) <- areaDatKey # reset the row.names to the Key xDFrame <- data.frame(Key=areaDatKey, Abbr=areaDatAbbr, Full=areaDatFull, ID=areaDatID, IDNames=areaIDNames, Rank=rankOrd, Index=IndexDattoNT) #cat("xDFrame:\n") #print(xDFrame) # build index from name table to statsDFrame IndexNTtoDat <- rep(NA,length(areaNamesAbbrsIDs$Key)) for (ind in c(1:length(IndexDattoNT))) { IndexNTtoDat[IndexDattoNT[ind]] <- ind } #cat("IndexNTtoDat:",paste0(IndexNTtoDat,collapse=", "),"\n") # IndexNTtoDat is in the name table order pointing to the data.frame. NotUsedList <- is.na(IndexNTtoDat) NotUsedKeys <- areaNTKey[NotUsedList] # get list of unreferred sub-areas. NotUsedNames <- areaNTName[NotUsedList] # get list of names not referenced. #cat("NotUsedKeys>",paste0(NotUsedKeys,collapse=", "),"<\n") #if (any(NotUsedList)) { # better message? # warnCnt() # xmsg <- paste0("***0102 CARG-DF The following sub-area(s) in the name table were not referenced in the user data.") # warning(xmsg,call.=FALSE) # xmsg <- paste0("***0102 CARG-DF >",paste0(NotUsedNames, collapse=", "),"<") # warning(xmsg,call.=FALSE) #} #cat("NotUsedKeys:",paste0(NotUsedKeys,collapse=", "),"\n") #cat("NotUsedList:\n") #print(NotUsedList) #cat("\n") assign("areaDatAbbr" ,areaDatAbbr) # area Abbr "area Abbr" in order of the dat assign("areaDatID" ,areaDatID) # area ID "area ID" in order of the dat assign("areaDatFull" ,areaDatFull) # area Full "area Full" in order of the dat assign("areaDatKey" ,areaDatKey) # area Key "area Key" in order of the dat assign("areaDatAlt_Abbr" ,areaDatAlt_Abbr) # area Alt_Abbr "area Alt_Abbr" in order of the dat assign("areaIDNames" ,areaIDNames[sortedOrd]) # area Display Names "areaNames in order of the dat. assign("NotUsedKeys" ,NotUsedKeys) # area keys that were not referenced in the data. assign("NotUsedList" ,NotUsedList) # T/F list of not used sub-areas. assign("datOrder",sortedOrd) # data order for use with panelData back to statsDFrame # Note: sDFdat is the statsDFrame in sorted order. All areaDatxxx are in the same sorted order. # #print("done with Not Used Key List.") # # Working references on VisBorders # # # axisScale # # Default Call = NULL, Default value = "e" new extended # #cat("axisScale>",axisScale,"<\n") #print("Validating axisScale:") axisMethod = 0 if (!(missing(axisScale) || is.null(axisScale) || is.na(axisScale))) { if (axisScale == "s") { # set up axis to use titled scaling axisMethod <- 2 } if (axisScale =="sn") { # set up axis to use number scaling with suffix. axisMethod <- 3 } if (axisScale == "e") { axisMethod <- 4 } if (axisScale == "o") { # set up axis to use titled scaling axisMethod <- 1 } if (axisMethod == 0) { # if still set, but bad value warnCnt() xmsg <- paste0("***01D0 CARG-SC The axisScale argument set to ",axisScale,", must be set to 'o', 'e', 's', or 'sn'. The default of 'e' will be used.") warning(xmsg,call.=FALSE) axisScale <- "e" # extended algorithm axisMethod <- 4 } } else { # parameter not present or set to NULL/NA axisScale <- "e" # extended algorithm axisMethod <- 4 } if (axisMethod == 0) { warnCnt() xmsg <- "***01D1 CARG-SC The axisScale argument is Missing, NULL or NA. It must be set to 'o', 'e', 's', or 'sn'. The default of 'e' will be used." warning(xmsg,call.=FALSE) axisScale <- "e" # extended algorithm axisMethod <- 4 } #cat("axisScale:",axisScale," axisMethod:",axisMethod,"\n") # # staggerLab # # Default Call = NULL, Default value = FALSE # #print("Validating staggered:") staggered <<- FALSE # start with a lower value. if (!(missing(staggerLab) || is.null(staggerLab) || is.na(staggerLab))) { if (!is.logical(staggerLab)) { staggerLab <- FALSE warnCnt() xmsg <- "***01E0 CARG-SL The staggerLab argument is not a logical value. Setting staggerLab to FALSE." warning(xmsg,call.=FALSE) } } else { # parameter not present or set to NULL/NA staggerLab <- FALSE # default = FALSE - don't stagger axis labels. } #cat("staggerLab:",staggerLab,"\n") #cat("staggered:",staggered,"\n") # ###### ###### # # Now that the row names and any deletions have been done, then # panels can finally be setup. # numRows <- nrow(dat) # ###### #print("done call parameters - on to panelDesc..") ###### #_________________________ Get Panel Default Values ______________________ # use details in memory - now that we have merged them with users. micromapGPanelDefaults <- micromapGSetPanelDef(numRows,rowSizeMaj,rowSizeMin,rowSepGap, 5, grpPattern) #__________________________ Save Panel Defaults to memory # get copy of panel defaults wPanelDet <- micromapGPanelDefaults # copy to micromapST memory space. defNam = names(wPanelDet) for (i in 1:length(wPanelDet)) { assign(defNam[i],wPanelDet[[i]]) } # cGrpRatios <- c(1.333/5.333, 2.333/5.333, 3.333/5.333, 4.333/5.333, 5.333/5.333) # ##### ######### # # Call arguments are checkes - on to panelDesc # # ######### ErrFnd <- FALSE StopFnd <- FALSE # #_________________ Check panel description content and formats _____________ # # # Since the panelDesc is a data.frame, it is a given the number of items in each # variable list is the same number. # # When we move to list of lists, this is no longer true, but we don't care. # # If the objective is the list of list, then we can't do a full scan of each # variable at this stage of the processing. # #______________Check for panelDesc$type validity______________ valid = c("map","mapcum","maptail","mapmedian", "rank","id","arrow","bar", "dot","dotse","dotconf","dotsignif", "ts","tsconf", "scatdot", "segbar","normbar","ctrbar", "boxplot") # idDot and rank are not currently implemented #____________________ List of expected and valid parameters in the panelDesc PDParms <- c('type', 'lab1','lab2','lab3','lab4', 'col1','col2','col3', 'colSize', 'rmin','rmax', 'refVals','refTexts', 'panelData', 'adv' ) # get list of names/options in panelDesc PDUsed <- names(panelDesc) # used by every glyph function to check for parameters PDPmatch <- match(PDUsed,PDParms) # is if all entries in panelDesc are valid if (any(is.na(PDPmatch))) { # one of more panelDesc parameters are bad stopCnt() StopFnd <- TRUE #PDErrorList <- paste0(PDUsed[is.na(PDPmatch)],collapse=" ") xmsg <- paste0("***0113 CARG-PD The following named lists in ",pDName," panelDesc data.frame are not valid: ",paste0(PDUsed[is.na(PDPmatch)],collapse=" ")) warning(xmsg,call.=FALSE) } #___________________the panelDesc parameters (column names) are good _____ # numTopHeaderRows <- 4.25 # start with 1-Titles, 2-lab & 1-X Axis two lines. (have to cover ID and Map headers) numBotHeaderRows <- 1 # bottom 1-X axis lines. # if (axisScale=="s") { # add 1/2 line for reduced size and sub-title on units. numTopHeaderRows <- numTopHeaderRows + 0.5 numBotHeaderRows <- numBotHeaderRows + 0.5 } if (staggerLab) { # if staggerLab is specified (forces) add 0.25. Will know until it too late if is dyn turned on. numTopHeaderRows <- numTopHeaderRows + 0.25 numBotHeaderRows <- numBotHeaderRows + 0.25 } if (length(mstTitle)>1) numTopHeaderRows <- numTopHeaderRows + 1.25 # # May be able to do a better job - later - future enhancement # # #________________type parameter # if (is.na(match('type',PDUsed))) { # Error 'type' parameter is not present stopCnt() StopFnd <- TRUE xmsg <- paste0('***0114 CARG-PD The required "type" named list is missing in the ',pDName,' panelDesc data.frame.') warning(xmsg,call.=FALSE) } # get type vector as characters no factor, etc. type = as.character(panelDesc$type) # test contents of type vector for validity PDTmatch = match(type,valid) if ( any( is.na(PDTmatch) ) ) { PDErrorList <- paste0(type[is.na(PDTmatch)],collapse=" ") StopFnd <- TRUE stopCnt() xmsg <- paste0("***0115 CARG-PD The ",pDName," type named list contains one or more invalid glyph name(s): ",PDErrorList) stop(xmsg, call. = FALSE) } PDMap <- (PDTmatch <= 4) # the first four are maps (TRUE if columns is a Map). xSeq <- seq(1,length(PDMap),by=1) PDMapCol <- xSeq[PDMap] # Get column number of maps #print(paste0("Map columns=",PDMapCol)) # Set up number of glyphs columns numCol <- nrow(panelDesc) # number of glyphs columns numPDRow <- nrow(panelDesc) # number of values in each parameter in panelDesc numPDCol <- ncol(panelDesc) # number of parameters present in panelDesc # #_________________panelDesc$labx____________________ # blank <- rep('',numCol) # empty vector for labels NAList <- rep(NA,numCol) # NA vector oneList <- rep(1,numCol) # numeric vector of all 1s. zeroList <- rep(0,numCol) # a NULL column cannot exist in a data.frame. If the name is present, it exist! # lab1 if (is.na(match('lab1',PDUsed))) { lab1 <- blank } else { lab1 <- as.character(panelDesc$lab1) # convert to character xlna <- is.na(lab1) # find NA values in vector if (any(xlna)) lab1[xlna] <- "" # change NAs to "" } # lab2 if (is.na(match('lab2',PDUsed))) { lab2 <- blank } else { lab2 <- as.character(panelDesc$lab2) # convert to character xlna <- is.na(lab2) # find NA values in vector if (any(xlna)) lab2[xlna] <- "" # change NAs to "" } # lab3 if (is.na(match('lab3',PDUsed))) { lab3 <- blank } else { lab3 <- as.character(panelDesc$lab3) # convert to character xlna <- is.na(lab3) # find NA values in vector if (any(xlna)) lab3[xlna] <- "" # change NAs to "" numBotHeaderRows <- numBotHeaderRows + 1 } # lab4 if (is.na(match('lab4',PDUsed))) { lab4 <- blank } else { lab4 <- as.character(panelDesc$lab4) # convert to character xlna <- is.na(lab4) # find NA values in vector if (any(xlna)) lab4[xlna] <- "" # change NAs to "" } # All labels (1-4) are either text or "" entries. Don't have to check for missing, NULL or NA. #_________Save panelDesc Parameters in to namespace____________ # assign('lab1',lab1) assign('lab2',lab2) assign('lab3',lab3) assign('lab4',lab4) #print(find("lab1")) # print environment # more panelDesc checks and setups after the function definitions. # #_______________________panelDesc$colx_____________________ # # Process - # 1) check entire panelDesc variable vector and convert to numbers "CheckCol" # 2) In glyph check value and get data "CheckPDCol" # 3) check data vector for valid data "CheckNum" # # number of columns based on the presence of Descriptions for Column # col1 if (!is.na(match('col1',PDUsed))) { # col1 is present litcol1 <- as.character(panelDesc$col1) col1 <- CheckColx2(litcol1,"col1",1,panelDesc$type,wSFNameList,len_wSFName) x <- (col1 == 0) #print(x) if (any(x,na.rm=TRUE)) { StopFnd <- TRUE } } else { litcol1 <- NAList col1 <- NAList } #cat("col1:",paste0(col1,collapse=", "),">>",paste0(litcol1,collapse=", "),"\n") # col2 if (!is.na(match('col2',PDUsed))) { # col2 is present litcol2 <- as.character(panelDesc$col2) col2 <- CheckColx2(litcol2,"col2",2,panelDesc$type,wSFNameList,len_wSFName) x <- (col2 == 0) #print(x) if (any(x,na.rm=TRUE)) { StopFnd <- TRUE } } else { litcol2 <- NAList col2 <- NAList } #cat("col2:",paste0(col2,collapse=", "),">>",paste0(litcol2,collapse=", "),"\n") # col3 if(!is.na(match('col3',PDUsed))) { # col3 is present litcol3 <- as.character(panelDesc$col3) col3 <- CheckColx2(litcol3,"col3",3,panelDesc$type,wSFNameList,len_wSFName) x <- (col3 == 0) #print(x) if (any(x,na.rm=TRUE)) { StopFnd <- TRUE } } else { litcol3 <- NAList col3 <- NAList } #cat("col3:",paste0(col3,collapse=", "),">>",paste0(litcol3,collapse=", "),"\n") # #_____________panelDesc$rmin and rmax______________ # if (is.na(match('rmin',PDUsed))) rmin = NAList else rmin = as.numeric(panelDesc$rmin) if (is.na(match('rmax',PDUsed))) rmax = NAList else rmax = as.numeric(panelDesc$rmax) # #_____________panelDesc$refxxx________________ # if (!is.na(match('refVals',PDUsed))) { assign('lRefVals',as.numeric(panelDesc$refVals)) # detail test in glyphs } else { assign('lRefVals',NAList) } # no check if RefVals are numeric. ???? if (!is.na(match('refTexts',PDUsed))) { assign('lRefTexts',str_trim(panelDesc$refTexts)) lRefTexts[lRefTexts == ""] <- NA # convert blanks. numBotHeaderRows <- numBotHeaderRows + 1 } else { assign('lRefTexts',NAList) } # no check if RefTexts are character. ???? # # Make adjustments for color or grays # if (colFull) { # set color values to work variables iRef.Val.col <- Ref.Val.col iRef.Text.col <- Ref.Text.col } else { # set gray values to work variables iRef.Val.col <- Ref.Val.BW.col iRef.Text.col <- Ref.Text.BW.col } #_____________panelDesc$panelData_______________ # # if present is the typeof correct ? - check within the glyph - it may be different. if (is.na(match('panelData',PDUsed))) { wPanelData <- NAList } else { wPanelData <- as.character(panelDesc$panelData) # save pointer to panelD } assign('panelData',wPanelData) rm(wPanelData) # #_________________- # #cat("Check on header row counts - top:",numTopHeaderRows," bot:",numBotHeaderRows,"\n") #cat(" top mar:",numTopHeaderRows * 0.2, " bot mar:",numBotHeaderRows* 0.2,"\n") #cat(" compare to 1.1 and 0.5/0.75\n") #___panelDesc$colSize_________User specificed column width processing and checking # ____________________Column Size layout (initial) # IdW set up in plotNames check numCol = length(type) # get number of columns to support #cat("Building cparm table for run - Number of columns:",numCol,"\n") cparm <- data.frame(cSize=numeric(0),lSep=numeric(0),rSep=numeric(0),rMinH=numeric(0),rMaxH=numeric(0)) # empty data.frame # Build column width table based on the types of columns specified. for (j in 1:numCol) { # Test type of column to be built and call build routine. #cat("top of loop - type=",type[j],"\n") cparm2 = switch(type[j], # colSize, col width, left sep, right sep, row min, row max) "map"= c(max(banner.max["map","width"],Map.Min.width),0,0,Map.MinH, Map.MaxH), "mapcum"= c(max(banner.max["mapcum","width"],Map.Min.width),0,0,Map.MinH, Map.MaxH), "maptail"= c(max(banner.max["maptail","width"],Map.Min.width),0,0,Map.MinH, Map.MaxH), "mapmedian"=c(max(banner.max["mapmed","width"],Map.Min.width),0,0,Map.MinH, Map.MaxH), "id"= c(IdColWidth,0,0,0,0), "dot"= c(0,0,0,0,0), "dotse"= c(0,0,0,0,0), "dotconf"= c(0,0,0,0,0), "dotsignif"=c(0,0,0,0,0), "arrow"= c(0,0,0,0,0), "bar"= c(0,0,0,0,0), "boxplot"= c(0,0,0,0,0), "ts" = c(0,.175,0,0,0), "tsconf" = c(0,.175,0,0,0), "scatdot" = c(0,.175,0,0,0), "segbar" = c(0,0,0,0,0), "normbar" = c(0,0,0,0,0), "ctrbar" = c(0,0,0,0,0), "rank" = c(Rank.width,0,0,0,0), "nomatch" = c(0,0,0,0,0) ) #cat("cparm2:",paste0(cparm2,collapse=", "),"\n") cparm <- rbind(cparm,cparm2) } # now have one row per column in the user panelDesc data.frame. colnames(cparm) <- c("cSize","lSep","rSep","rMinH","rMaxH") #cat("Column Sizing Table completed.\n") # dump table. #print(cparm) #cat("\n") # one row per column. borders = rep(borderSize,4) # set borders widths to 0.5 inches ### Add check of column type to table of miniumal or statics column widths. ### Must have details lists processed to do this. ### Recreate plotX as done in panelLayout # Pick up row height min and max from types used. rowMinH <- max(cparm[,"rMinH"],rowSizeMn) # Largest mininum for all glyphs involved and system minimum size (inches) rowMaxH <- max(cparm[,"rMaxH"],rowSizeMx) # Largest maximum for all glyphs involved #cat("rowMinH:",rowMinH," rowMaxH:",rowMaxH,"\n") # Same formula as panelLayout xPlotWidth = (par("din")[1])-borders[1]-borders[2]-leftMar-rightMar # width in inches - (colSep, borders, left and right margins). #cat("xPlotWidth:", xPlotWidth,"\n") # done IdW = Id.width[1] # bigger value for full names [2] is for abbreviations - default # #_____________panelDesc$colSize____________________ # colWidths <- cparm[,1] # get list of fixed width glyphs that have been requested (column # 1) from cparm. # In this table, a value of zero is NO Fixed Width. colFlex <- !(colWidths > 0) # save list of glyphs that use don't have fixed widths - flexible values ( not maps and id ) colNumID <- c(1:length(colWidths)) colGood <- rep(TRUE,length(colWidths)) #cat("colSize-Start colWidths:",colWidths," len:",length(colWidths)," colFlex:",colFlex,"\n") DoColSize <- FALSE # check for parameter? if (!is.na(match('colSize',PDUsed))) { # colSize is present DoColSize <- TRUE # colSize is present - do proportional space allocation. wColS <- panelDesc$colSize #cat("Processing colSize parameter:",wColS," len:",length(wColS),"\n") if (length(wColS) != length(colWidths)) stop # check for NA's in colSize fields. - Error. Clear to NULL "" wColBad <- is.na(wColS[colFlex]) if (any(wColBad)) { # yes, invalid value by user. wColBadList <-colNumID[colFlex & is.na(wColS)] if (length(wColBad)<=0) stop warnCnt() xmsg <- paste0("***01F1 CARG-CS The 'colSize' parameter in ",pDName," contains NA values in columns: ",paste0(wColBadList,collapse=","),". "," Values must be numeric and > 0.") warning(xmsg,call.=FALSE) colGood[wColBadList] <- FALSE # mark problem column } #cat("1-wColS:",wColS," colGood:",colGood,"\n") # check for invalid fixed width fields in colSize - NA, "", " ", 0 -> OK. Else - Bad and report. # Set to NA all valid fixed width column values in the colSize vector. # NA is valid wColS[!colFlex & wColS == 0 ] <- NA # 0 is valid wColS[!colFlex & str_trim(wColS) == "" ] <- NA # "", " ", etc is valid # What we have left is possible invalid entries. # if any fixed width column is not NA, problem if (any(!is.na(wColS[!colFlex]))) { # fixed width columns have characters or numeric or logical vlaues - OUCH! wColBadList <- wColS[!colFlex & !is.na(wColS)] # get list of bad values. if (length(wColBad)<=0) stop # check on programmer warnCnt() xmsg <- paste0("***01F2 CARG-CS The 'colSize' parameter in ",pDName," has values for fixed width glyphs. Value(s): ",paste0(wColBadList,collapse=","),". ", "Value(s) are ignored and set to NA.") warning(xmsg,call.=FALSE) # at this point the fixed columns are NA or can be set to NA. wColS[!colFlex] <- NA } #cat("2-wColS:",wColS," colFlex:",colFlex," colGood:",colGood,"\n") # Convert to numeric, if NA in colSize fields - eError report and set to NULL or "". # Fixed Width Columns are NA, so we not work on flexible columns that can have values. suppressWarnings(numColS <- as.numeric(wColS)) # make sure it's numeric. # Any flex column that is not a number or can not be converted to number -> NA. # also check for "Inf" values. Will use as marker later. wColFG <- colFlex & colGood wColSize <- numColS[wColFG] wColNum <- colNumID[wColFG] wColBad <- is.na(numColS[wColFG]) if (any(wColBad)) { # have colSize value(s) that is not numeric or are "Inf". wColBadList <- wColSize[wColBad] # get list of bad entries. # invalid colSize entries, not numeric, could be character, logical, etc. warnCnt() xmsg <- paste0("***01F3 CARG-CS The 'colSize' parameter in ",pDName," does not contain numeric values : ",paste0(wColBadList,collapse=","),".") warning(xmsg,call.=FALSE) # wColBadList <- wColNum[wColBad] # get index numbers colGood[wColBadList] <- FALSE } #cat("3-wColS:",wColS," numColS:",numColS," colGood:",colGood,"\n") # colSize check range. wColFG <- colFlex & colGood # only range check good (so far) colSize values. wColSize <- numColS[wColFG] # list of values wColNum <- colNumID[wColFG] # indexes to vector # run the test. wColBad <- ( wColSize <= 0 | wColSize > 200 ) # Only look at remaining good entries. if (any(wColBad)) { # colSize values out of acceptable range. wColBadList <- wColS[wColNum[wColBad]] # get list of bad entries # colSize entries are out of range <= 0 or > 200. warnCnt() xmsg <- paste0("***01F4 CARG-CS The 'colSize' entries in ",pDName," are out of range ( <= 0 or > 200 ). Values: ", paste0(wColBadList,collapse=","), ".") warning(xmsg,call.=FALSE) colGood[wColNum[wColBad]] <- FALSE # set all out of range values as no bad. } #cat("4-wColS:",wColS," numColS:",numColS," colGood:",colGood,"\n") numColS[!colGood] <- 0 # set bad values to zero. #cat("5-wColS:",wColS," numColS:",numColS,"\n") # Fix colSize columsn to Mean - "" columns in colFlex range. # Get sum of valid colSize entries. wColFG <- colFlex & colGood sumFixCol <- sum(colWidths) # sum of fixed widths sumColSize <- sum(numColS[wColFG]) # sum of values in user provided colSize # bad values were set to zero. meanColSize <- mean(numColS[wColFG]) # mean of values #cat("6-sumFix:",sumFixCol," sum colSize:",sumColSize," mean:",meanColSize,"\n") if (sumColSize == 0) { DoColSize <- FALSE } # sum of colSize = zero. if (all(!colGood[colFlex])) { DoColSize <- FALSE } # if all entries are bad - ignore colSize if (DoColSize) { # All flex columns must have a value # replace bad values with mean of good values. repColS <- colFlex & !colGood if (any(repColS)) { # we have come bad values to change to mean. wColBadList <- wColS[repColS] # get list of values being changed. warnCnt() xmsg <- paste0("***01F5 CARG-CS The reviewed 'colSize' parameter in ",pDName," has bad values (see above) and have been replaced by the mean of the good values: ", meanColSize,". Bad Values:", paste0(wColBadList,collapse=",")) warning(xmsg,call.=FALSE) numColS[repColS] <- meanColSize } colSize <- numColS # transfer back to colSize. litColSize <- as.character(numColS) # common starting point - either character or numeric. #cat("final colSize:",colSize,"\n") } else { warnCnt() xmsg <- paste0("***01F6 CARG-CS The 'colSize' parameter in ",pDName," contains no useful information and will be ignored.") warning(xmsg,call.=FALSE) colSize <- NAList } } else { # no parameter specified. colSize <- NAList DoColSize <- FALSE } # # Only keep colSize entires for flexible glyphs # #cat("Finish pre-processing colSize -- DoColSize:",DoColSize," colSize:",paste0(colSize,collapse=", "),"\n") #cat("Starting column width calculations\n") # colWidths has column widths in inches or zero is not set yet. (initially fixed width columsn.) # colFlex has TRUE for columns that are width is undetermined. # colSize edited vector of relative ratio values for each column. # basic column separators (0 on edges, colSepGap for all internal) colSep <- c(0,rep(colSepGap,numCol-1),0) # based on column type, add more space on left or right. (cparm[,2] for left, cparm[,3] for right.) - Y Axis. colSep[1:numCol] <- colSep[1:numCol] + cparm[,2] # add space on left of panel colSep[2:(numCol+1)] <- colSep[2:(numCol+1)] + cparm[,3] # add space on right of panel #cat("colSep:",colSep,"\n") colSepSum <- sum(colSep) # total width used by separators xPlotWidthOrg <- xPlotWidth xPlotWidth <- xPlotWidth - colSepSum # space - subtract separator space = Available X width # available space. usedSpace <- sum(colWidths) # get amount of allocated space. freeSpace <- xPlotWidth - usedSpace # available space #cat("Setup-Space:",xPlotWidthOrg," colSepSum:",colSepSum," Avail:",xPlotWidth," freeSpace:",freeSpace," usedSpace:",usedSpace,"\n") if (DoColSize) { #cat("Doing colSize - colSize:",colSize," colWidths:",colWidths,"\n") if (length(colSize) <= 0) stop # Cycle 1 - calculate and adjust for minimum column widths sumColSize <- sum(colSize,na.rm=TRUE) # sum values wColSizeP <- colSize/sumColSize # get proportion. wColSize <- wColSizeP * freeSpace # calculate allocations. wColMinE <- (wColSize < colSizeMin) # find too small columns. colWidths[wColMinE] <- colSizeMin # set low values to min. (if they exist) colSize[wColMinE] <- 0 # remove low values from colSize calculation. #cat("C1-colSize:",colSize," wColSizeP:",wColSizeP," colWidths:",colWidths,"\n") # Cycle 2 - calculate (again) and adjust for maximum column widths usedSpace <- sum(colWidths) freeSpace <- xPlotWidth - usedSpace #cat("C2-usedSpace:",usedSpace," freeSpace:",freeSpace,"\n") sumColSize <- sum(colSize,na.rm=TRUE) # sum values wColSizeP <- colSize/sumColSize # get proportion. wColSize <- wColSizeP * freeSpace # calculate allocations. wColMaxE <- (wColSize > colSizeMax) #cat("C2-Max test - sumColSize:",sumColSize," wColSizeP:",wColSizeP," wColSize:",wColSize," wColMaxE:",wColMaxE,"\n") if (any(wColMaxE,na.rm=TRUE)) { # only do one more cycle if a value > max is found. colWidths[wColMaxE] <- colSizeMax # set high values to max. colSize[wColMaxE] <- 0 # remove high values from colSize calculation. #cat("C2-Max adj-colSize:",colSize," wColSizeP:",wColSizeP," colWidths:",colWidths,"\n") # Cycle 3 - if max adjustments - do it one more time. usedSpace <- sum(colWidths) freeSpace <- xPlotWidth - usedSpace #cat("C3-usedSpace:",usedSpace," freeSpace:",freeSpace,"\n") # Repeat for final values. sumColSize <- sum(colSize,na.rm=TRUE) # sum values wColSizeP <- colSize/sumColSize # get proportion. wColSize <- wColSizeP * freeSpace # calculate allocations. } # Last step - place the widths in to colWidths # colSize columns hitting the minimum and maximum values have already been set in colWidths vector. # last calculation setup wColSize with the last columns. wColValFlag <- (wColSize > 0 ) # list of values to merge into colWidths wColValFlag[is.na(wColValFlag)] <- FALSE # NA are fixed columns, so make FALSE (no update) colWidths[wColValFlag] <- wColSize[wColValFlag] # put values into wColWidths } else { # no colSize - do old way - equal subdivide. zeroCol <- !(colWidths > 0) # TRUE for any column with no width assigned. numberCol <- sum(zeroCol,na.rm=TRUE) # get number of TRUEs = number of columns that need widths. (sum 1s) equalCol <- freeSpace / numberCol # get width of each column. #cat("Initial equalCol:",equalCol," FreeSpace:",freeSpace,"\n") if (equalCol > colSizeMax) { equalCol <- colSizeMax } if (equalCol < colSizeMin) { warnCnt() ErrFnd <- TRUE xmsg <- paste0("***0420 PANEL Calculated column widths is less than minimum ",colSizeMin," inches - too many columns specified.") warning(xmsg,call.=FALSE) if (equalCol < colSizeMin/2) { stopCnt() StopFnd <- TRUE xmsg <- paste0("***0421 PANEL Column width is too small to be useful, Package stopped.") stop(xmsg,call.=FALSE) } } colWidths[zeroCol] <- equalCol } #cat("Final-colWidths:",colWidths,"\n") # savedColWidths <- colWidths # save a copy of the column size parameters. savedColSep <- colSep # save a copy legfactor <- 1 # add space if reference values provided. # JP-2010/07/23 0 change to refVals to be consistent. #cat("numTopHeaderRows:",numTopHeaderRows," numBotHeaderRows:",numBotHeaderRows,"\n") if(!is.null(panelDesc$refVals)){ # if field present. if(any(!is.na(panelDesc$refVals))){ # if value provided, provide room in the bottom margin for the reference test. botMar <- botMarLegend # revisit calculation below to be more precise legfactor <- 9/(9-botMardif) # ???? #### Check on the use and need for "legfactor" in older code. } } #cat("botMar:",botMar,"\n") #assign('legfactor',legfactor) ######## # # Check for warnings or stops that should terminate the package/function # if (StopFnd) { xmsg <- "***01Z9 CARG Errors have been found in parameters and data. Please review program log and fix problems. Packaged stopped." stop(xmsg, call.=FALSE) } if (ErrFnd) { warnCnt() xmsg <- "***01Z8 CARG Warnings have been found in the parameters and data. Package continues, but results are unpredictable. Review log and fix errors." warning(xmsg, call.=FALSE) } ######## # # # Process and calculate column spacing and offsets. # # # should not need to set this again. numCol <- length(type) ##### # # We build three panel layouts: # 1) layout of all glyphs panels (ngroups by ncols) # 2) layout of general blocks of glyphs (top, median, bottom groups) (3 or 2 by ncols) # 3) layout of page blocks (top, median, bottom groups) but only 1 column (3 o4 2 by 1) # ##### # # USStatesBG set up - 50 or 51 rows -> 10 or 11 groups - median - single element # # USSeerBG set up - 9 to 20 rows -> 3(of 3,3,3) to 4(of 5,5,5,5) groups # # KansasBG set up - 105 rows -> 21 groups - median - 5 rows/group (11 groups) # # NewYorkBG set up - 62 rows -> 13 groups (5..5,4,4,4,5...5) # # MarylandBG set up - 24 rows (counties + 1 city) -> 5 groups (5,5,4,5,5) # # UtahBG set up - 29 rows -> (5.5.4.1.4.5.5) -> 7 groups # # ChinaBG set up - 34 rows -> (5,5,5, <1,2,3,4> ,5,5,5) -> 7 groups # # UKIrelandBG set up - 218 rows -> (5,5,5,...,4,4,...,5,5,5) # # SeoulKoreaBG set up - 25 rows (districts) -> (5,5,5,5,5) -> 5 groups # # AfricaBG set up - 52 rows (countries) -> (5,5,5,5,5,2,5,5,5,5,5) -> 11 groups # # #printPanelsParms() # build layout for glyphs panels (numGrps x ncol) (Individual) #cat("panelLayout - panels\n") assign("panels",panelLayout( vnrow = numGrps, # num of Row/Groups vncol = numCol, # num of columns topMargin = topMar, # 0.95 bottomMargin= botMar, # 0.5 leftMargin = 0, rightMargin = 0, rowSep = rowSep, # vector rowSize = rowSize, # vector colSize = colWidths, # calculated column widths (inches) colSep = colSep, # vector rSizeMx = rowMaxH, rSizeMn = rowMinH, rSizeMaj = rowSizeMaj, # 7 rows per group/row rMapCol = PDMapCol, disErr = FALSE, rDebug = MST.Debug) ) # c(.1,.1,.1) for 3 # Done above by "micromapSetPanelDef" #grounpedRowSize = details[["groupedRowSize"]] # c(35,1.65,35) -> USStatesBG (51) # c(7,7,7) or c(7,7,7,7) -> USSeerBG (9 -- 20) # c(70,7,70) -> KansasBG (105) # c(42,7,42) -> NewYorkBG (62) #groupedRowSep = details[["groupedRowSep"]] # c(0,0.1,0.1,0) or c(0,0.1,0) #print("panels;") #print(panels) #cat("medGrp:",medGrp,"\n") # Major panel group title-top, panels, title-bottom by columns (overlays panels) # section of panels (top(25), median(1), bottom(25) and "N" columns wide. ### generalize settings - main panels (middle level) (3 rows - "N" cols) ### rows= title, glypics, footnotes cols=one for each glyph panelBlocks <- 2 # Number of blocks for an even number of group/Rows if (medGrp > 0) { panelBlocks <- 3 # Number of blocks for an odd number of group/Rows } #printPanelParms("panelGroup") #cat("panelLayout - panelGroup\n") # build layout for top, median(if present) and bottom cover panels (3 or 2 x numCol) assign("panelGroup",panelLayout( vnrow = panelBlocks, # 2 or 3 vncol = numCol, # numCols topMargin = topMar, bottomMargin = botMar, leftMargin = 0, rightMargin = 0, rowSize = groupedRowSize, rowSep = groupedRowSep, colSize = colWidths, colSep = colSep, rSizeMx = rowMaxH, rSizeMn = rowMinH, rSizeMaj = rowSizeMaj, rMapCol = PDMapCol, disErr = TRUE, rDebug = MST.Debug) ) #print("panelGroup:") #print(panelGroup) #cat("panelLayout - panelOne\n") # build layout for page (3 or 2 x 1) assign("panelOne",panelLayout( vnrow = panelBlocks, # 2 or 3 vncol = 1, # 1 topMargin = topMar, bottomMargin = botMar, leftMargin = 0, rightMargin = 0, rowSize = groupedRowSize, rowSep = groupedRowSep, rSizeMx = rowMaxH, rSizeMn = rowMinH, rSizeMaj = rowSizeMaj, rMapCol = PDMapCol, disErr = TRUE, rDebug = MST.Debug) ) #print("panelOne:") #print(panelOne) # # Variables that span glyphs # #staggered <- FALSE # Flag to indicate where the current column should start staggering numbers # FALSE = first label on line 1, TRUE = first label on line 2. # This value is set when staggered labels are proceed based on if the last value # in the atRx1 is greater thatn atRx2 = TRUE then value is TRUE. ##### # ____________________Main loop______________________________ # # Future of main loop. # This will change to do: Setup, Page 1-Page Header, Glyph "n1" to "n2", and then the next page. # ##### #cat("Main Loop\n") # Build images of each column for (j in 1:numCol) { #cat("Doing Type:",type[j],"\n") # Test type of column to be built and call build routine. switch(type[j], "map"= rlAreaMap(j), "mapcum"= rlAreaMapCum(j), "maptail"= rlAreaMapTail(j), "mapmedian"=rlAreaMapMedian(j), "id"= rlAreaID(j), "dot"= rlAreaDot(j, dSignif=FALSE), "dotse"= rlAreaDotSe(j), "dotconf"= rlAreaDotConf(j), "dotsignif"=rlAreaDot(j, dSignif=TRUE), "arrow"= rlAreaArrow(j), "bar"= rlAreaBar(j), "boxplot"= rlAreaBoxplot(j, as.character(panelDesc$panelData[j]) ), "ts" = rlAreaTSConf(j, as.character(panelDesc$panelData[j]), conf=FALSE), "tsconf" = rlAreaTSConf(j, as.character(panelDesc$panelData[j]), conf=TRUE), "scatdot" = rlAreaScatDot(j), "segbar" = rlAreaSegBar(j), "normbar" = rlAreaSegBar(j, SBnorm=TRUE), "ctrbar" = rlAreaCtrBar(j), "rank" = rlAreaRank(j), "nomatch" ) #cat("End of glyphs Call - lastSpace Lab2:",lastLab2Space," Lab3:", lastLab3Space,"\n") } # All columns are built and sitting in the panel. ##### # # Fill in the top Page Titles # #cat("panelSelect - panelOne - margin='top'\n") panelSelect(panelOne,margin="top") # full page top label area. x <- panelScale() if (length(mstTitle)==1){ text(.5,.77,mstTitle,cex=Title.cex) } else { # only use the first two title character strings text(0.5, 0.9, mstTitle[1],cex=Title.cex) text(0.5, 0.65,mstTitle[2],cex=Title.cex) } # ##### ##### # # Time to report on the warnings and errors # message("End of micromapST processing.\n\n") warnNum <- get("i",envir=environment(warnCnt)) # get warnings counter if (warnNum > 0) { message(paste0(warnNum," warnings messages were logged. Please review the run log and resolve any issues.")) } else { message("No warnings were logged.") } stopNum <- get("i",envir=environment(stopCnt)) # get stop message counter if (stopNum > 0) { message(paste0(stopNum," Stop messages were logged. Please resolve issues and rerun.")) } else { message("No stop messages were logged.") } if (( warnNum + stopNum ) > 0) { message("If warnings and error messages did not appear on your R console, please execute 'warnings()' to list them.\n") } message(" ") # change the following to call end of run report. - set at start so R stops will be caught. # ##### #x <- Sys.setlocale('LC_ALL',Saved_Locale) on.exit(print("micromapST Ends")) } # end of micromapST Function ### End of micromapST #### # # .onLoad function - executed when the package is loaded initially. # builds a non-changeable micromapGDefault data.frame for use # as the default when colors and/or details are not specified. # # Added by JP - Oct, 2012 - Setup permanent micromapGDefault data.frame for # use as the default values on the call. # # No longer required. # #### #.onLoad = function (libraryName, pkgName) # # { # #packageStartupMessage(".onLoad") # #packageStartupMessage(libraryName) # #packageStartupMessage(pkgName) # # generate default data.frame for micromapST. # #rlmicromapGDefaults <- micromapGSetDefaults() # #micromapGDefaults <<- rlmicromapGDefaults # # } # # #### # # End of load and variable initialization # #### ###### #### ADD CHECK to make sure values are numeric when required. (content of columns.) #### Done for Arrow, the Dot set, Bar, SegBar/NormBar, CtrBar #### Not yet for BoxPlot and TS. ######
library(geosphere) library(sp) args<-commandArgs(trailingOnly=TRUE) #collect arguments nodefilepath<-args[1] refpoint.lon<-as.numeric(args[2]) refpoint.lat<-as.numeric(args[3]) refpoint<-c(refpoint.lon,refpoint.lat) f<-read.csv(file=nodefilepath,sep="\t",header=FALSE) #V1 = lon #V2 = lat #for each point in nodefile, compute the distance. #if it < then keep that node. otherwise go to the next point, till there is no point remaining closestpoint.lon<-f$V1[1] closestpoint.lat<-f$V2[1] closestpoint.dept<-f$V3[1] closestpoint.node<-f$V4[1] closestpoint<-c(closestpoint.lon,closestpoint.lat) distance<-distm(refpoint,closestpoint) for (i in 2:length(f$V1)) { challengepoint.lon<-f$V1[i] challengepoint.lat<-f$V2[i] challengepoint<-c(challengepoint.lon,challengepoint.lat) if (distm(refpoint,challengepoint) < distance) { distance <- distm(refpoint,challengepoint) closestpoint.lon<-f$V1[i] closestpoint.lat<-f$V2[i] closestpoint.dept<-f$V3[i] closestpoint.node<-f$V4[i] } } sprintf("%s %s %s %s",closestpoint.lon,closestpoint.lat,closestpoint.node,closestpoint.dept)
/extreme/selectnodes/resources/R/nearest.R
no_license
rchailan/mirmidon-toolbox
R
false
false
1,100
r
library(geosphere) library(sp) args<-commandArgs(trailingOnly=TRUE) #collect arguments nodefilepath<-args[1] refpoint.lon<-as.numeric(args[2]) refpoint.lat<-as.numeric(args[3]) refpoint<-c(refpoint.lon,refpoint.lat) f<-read.csv(file=nodefilepath,sep="\t",header=FALSE) #V1 = lon #V2 = lat #for each point in nodefile, compute the distance. #if it < then keep that node. otherwise go to the next point, till there is no point remaining closestpoint.lon<-f$V1[1] closestpoint.lat<-f$V2[1] closestpoint.dept<-f$V3[1] closestpoint.node<-f$V4[1] closestpoint<-c(closestpoint.lon,closestpoint.lat) distance<-distm(refpoint,closestpoint) for (i in 2:length(f$V1)) { challengepoint.lon<-f$V1[i] challengepoint.lat<-f$V2[i] challengepoint<-c(challengepoint.lon,challengepoint.lat) if (distm(refpoint,challengepoint) < distance) { distance <- distm(refpoint,challengepoint) closestpoint.lon<-f$V1[i] closestpoint.lat<-f$V2[i] closestpoint.dept<-f$V3[i] closestpoint.node<-f$V4[i] } } sprintf("%s %s %s %s",closestpoint.lon,closestpoint.lat,closestpoint.node,closestpoint.dept)
#=============================================================================# # Authors: Alex Perkins, Sean Cavany, Sean Moore, Rachel Oidtman, Anita Lerch, Marya Poterek # project: Estimating unobserved SARS-CoV-2 infections in the United States # Year: 2020 # # Code to generate all figures and results from supplementary text # #=============================================================================# # set up workspace #=============================================================================# # load libraries library(extraDistr) library(doParallel) library(mc2d) library(MASS) library(boot) # load function to simulate autochthonous transmission source('simOutbreak.R') # set random number seed set.seed(1234) #=============================================================================# # load in and process data #=============================================================================# # read in line list data for US # updated 20200312 # data from https://github.com/midas-network/COVID-19/tree/master/data/cases/global/line_listings_nihfogarty linelist = read.csv('../data/2020_03_12_1800EST_linelist_NIHFogarty.csv') yesUS = subset(linelist, country=='USA') # remove Diamond Princess repatriated cases yesUS = yesUS[grep("Diamond",yesUS$summary,invert=T),] # fit gamma parameters for symptom to report delay data.delay = as.Date(yesUS$reporting.date) - as.Date(yesUS$symptom_onset) data.delay = as.numeric(data.delay[which(!is.na(data.delay))]) delay.shape.baseline = MASS::fitdistr(data.delay,dgamma,start=list(shape=0.5,rate=0.5))$estimate[1] delay.rate = MASS::fitdistr(data.delay,dgamma,start=list(shape=0.5,rate=0.5))$estimate[2] # number of travelers that were cases or died num.CF = c( nrow(subset(yesUS,international_traveler>0))-sum(subset(yesUS,international_traveler>0)$death>0, na.rm=T), sum(subset(yesUS,international_traveler>0)$death>0,na.rm=T)) # read in case data internationally # updated 20200307 # data from https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series ts = read.csv('../data/time_series_19-covid-Confirmed.csv') ts.natl = matrix(0,length(unique(ts$Country.Region)),ncol(ts)-4) for(ii in 1:ncol(ts.natl)){ ts.natl[,ii] = aggregate(ts[,4+ii],by=list(ts$Country.Region),FUN=sum)[,2] } row.names(ts.natl) = aggregate(ts[,4+ii],by=list(ts$Country.Region),FUN=sum)[,1] for(ii in 1:nrow(ts.natl)){ ts.natl[ii,-1] = pmax(0,diff(ts.natl[ii,])) } # correct for travel ban from China (for non-US citizens starting 2/2 at 5pm) - so 0 out starting 2/3 colnames(ts.natl) = 22:(ncol(ts.natl)+21) which(colnames(ts.natl)==34) ts.natl['China',which(colnames(ts.natl)==34):ncol(ts.natl)] = 0 # read in death data internationally # updated 20200307 # data from https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series tsd = read.csv('../data/time_series_19-covid-Deaths.csv') tsd.natl = matrix(0,length(unique(tsd$Country.Region)),ncol(tsd)-4) for(ii in 1:ncol(ts.natl)){ tsd.natl[,ii] = aggregate(tsd[,4+ii],by=list(tsd$Country.Region),FUN=sum)[,2] } row.names(tsd.natl) = aggregate(tsd[,4+ii],by=list(tsd$Country.Region),FUN=sum)[,1] for(ii in 1:nrow(tsd.natl)){ tsd.natl[ii,-1] = pmax(0,diff(tsd.natl[ii,])) } colnames(tsd.natl) = 22:(ncol(tsd.natl)+21) # count up local cases by day in the US cases.US.total = c(rep(0,21),ts.natl['US',]) cases.US.imported = table( as.Date(as.character(subset(yesUS,international_traveler>0)$reporting.date)) - as.Date('2019-12-31')) tmp = rep(0,length(cases.US.total)) tmp[as.numeric(names(cases.US.imported))] = cases.US.imported cases.US.imported = tmp rm(tmp) cases.US.local = pmax(0, cases.US.total - cases.US.imported) # count up local deaths by day in the US deaths.US.total = c(rep(0,21),tsd.natl['US',]) deaths.US.imported = table( as.Date(as.character(subset(yesUS,international_traveler>0&death>0)$reporting.date)) - as.Date('2019-12-31')) tmp = rep(0,length(deaths.US.total)) tmp[as.numeric(names(deaths.US.imported))] = deaths.US.imported deaths.US.imported = tmp rm(tmp) deaths.US.local = pmax(0, deaths.US.total - deaths.US.imported) #=============================================================================# # simulate imported infections #=============================================================================# # sample replicates of how many infections have been imported into the US maxUS = 2e4 rangeUS = sum(yesUS$international_traveler>0,na.rm=T):maxUS # estimate for asymptomatic proportion based on # https://www.medrxiv.org/content/10.1101/2020.02.20.20025866v2 PrAsymptomatic = exp(optim(par=c(0,0),fn=function(par){ sum(( qbeta(c(0.5,0.025,0.975),exp(par[1]),exp(par[2])) - c(0.179,0.155,0.202)) ^ 2)})$par) # estimate for proportion of symptomatic infections resulting in death based on # http://weekly.chinacdc.cn/en/article/id/e53946e2-c6c4-41e9-9a9b-fea8db1a8f51 PrDeathSymptom = c(1+1023,1+44672-1023) # set values of unknown parameters # note that these values seem to maximize the probability of the cumulative # deaths in the US as of March 8, 2020 predicted by the model replicates = 1000 load("../results/sensitivity/param_estimates_posterior_1.rda", verbose=T) indices = sample(1:length(PrCaseSymptom.trav_posterior), replicates, replace=TRUE) PrCaseSymptom.trav = PrCaseSymptom.trav_posterior[indices] asympRFraction = asympRFraction_posterior[indices] # sample from uncertainty about proportions of infection outcomes propns.ASCF = cbind( rbeta(replicates,PrAsymptomatic[1],PrAsymptomatic[2]), rbeta(replicates,PrDeathSymptom[1],PrDeathSymptom[2])) propns.ASCF = cbind( propns.ASCF[,1], (1-propns.ASCF[,1]) * (1-PrCaseSymptom.trav) * (1-propns.ASCF[,2]), (1-propns.ASCF[,1]) * PrCaseSymptom.trav * (1-propns.ASCF[,2]), (1-propns.ASCF[,1]) * propns.ASCF[,2]) # draw samples of the number of imported infections imports = numeric(length=replicates) for(ii in 1:replicates){ PrImportedInfections = dmultinomial( x = cbind( 0:(maxUS-sum(num.CF)), num.CF[1],num.CF[2]), prob = c(sum(propns.ASCF[ii,1:2]),propns.ASCF[ii,3:4])) imports[ii] = sample( sum(num.CF):maxUS, 1, prob=PrImportedInfections, replace=T) } # draw samples of the day on which imported infections arrived case.days=vector() for(i in 1:length(cases.US.imported)){ if(cases.US.imported[i]>0){ if(length(case.days)==0){ case.days=rep(i,cases.US.imported[i]) }else{ case.days=c(case.days,rep(i,cases.US.imported[i])) } } } import.case.density = density( case.days, from = 1, to = length(cases.US.imported), n = length(cases.US.imported))$y # estimate the day of the year on which imports occur import.doy = list() for(ii in 1:replicates){ import.doy[[ii]] = sample( 1:length(cases.US.imported), imports[ii], prob=import.case.density, replace=T) } #=============================================================================# # simulate local transmission #=============================================================================# # simulate local transmission for each draw of imported infections local = foreach(ii = 1:replicates) %do% { simOutbreak( timeImport = import.doy[[ii]], # timing of each imported infection R = 1.97, # reproduction number k = 1e3, # dispersion parameter si_mean = 4.56, # mean of serial interval distribution si_sd = 0.95, # standard deviation of serial interval distribution inc_shape = 1.88, # shape parameter of incubation period distribution inc_scale = 7.97, # scale parameter of incubation period distribution symp_to_death_mean = 14, # mean of time between symptom onset and death symp_to_death_sd = 5.37, # std. dev. of time between symptom onset and death report_delay_shape = delay.shape.baseline, # shape parameter for delay between symptom and reporting report_delay_rate = delay.rate, # rate parameter for delay between symptom and reporting stopSimulationDay = length(cases.US.imported), # day of year since Jan 1 when simulation stops asympProp = propns.ASCF[ii,1], # proportion of infections that are asymptomatic asympRFraction = asympRFraction[ii], # relative infectiousness of asymptomatics lnormFlag = F # toggles whether serial interval distribution is lognormal ) } #simulate deaths out but turn transmission off on 12 March local.predict = foreach(ii = 1:replicates) %do% { simOutbreakR0Change( timeImport = import.doy[[ii]], # timing of each imported infection R = 1.97, # reproduction number k = 1e3, # dispersion parameter si_mean = 4.56, # mean of serial interval distribution si_sd = 0.95, # standard deviation of serial interval distribution inc_shape = 1.88, # shape parameter of incubation period distribution inc_scale = 7.97, # scale parameter of incubation period distribution symp_to_death_mean = 14, # mean of time between symptom onset and death symp_to_death_sd = 5.37, # std. dev. of time between symptom onset and death report_delay_shape = delay.shape.baseline, # shape parameter for delay between symptom and reporting report_delay_rate = delay.rate, # rate parameter for delay between symptom and reporting stopSimulationDay = 180, # day of year since Jan 1 when simulation stops asympProp = propns.ASCF[ii,1], # proportion of infections that are asymptomatic asympRFraction = asympRFraction[ii], # relative infectiousness of asymptomatics lnormFlag = F, # toggles whether serial interval distribution is lognormal RChangeDay = length(cases.US.imported), # determines when R changes RChange = 0 # determines what R drops to at R0ChangeDay ) } # load the following to generate the objects used to generate the figures in the paper load("../results/objects_used_in_paper.RData",verbose=T) #=============================================================================# # produce plots and results for all supplementary text figures #=============================================================================# # set figure margins par(mar=c(4,5,1,1)) # Figure S1 plot reporting delay distribution pdf("../plots/gamma_reporting_delay.pdf", width=4, height=4, pointsize=10) h = hist(data.delay, plot=F) h$counts = h$counts / sum(h$counts) plot(h, freq=TRUE, ylab="Relative Frequency", xlab="Delay (days)", xaxs='i',yaxs='i',las=1, xlim = c(0,16),main='') lines(seq(0.1,16,0.1),dgamma(seq(0.1,16,0.1), delay.shape.baseline, delay.rate), lwd=2) dev.off() # Figure S2 = smooth vs non-smoothed methods of pLocal updateDaily = FALSE # turn on Bayesian daily updating cases.mat = t(matrix( unlist(lapply(local, function(x) x$cases)), length(local[[1]]$cases), replicates)) pdf('../plots/smooth_vs_nonsmooth_pLocal.pdf', width=9,height=4.8, pointsize=14) par(mfrow=c(1,2)) for (smoothSpline in c(FALSE, TRUE)) { p.mat = matrix(NA,nrow(cases.mat),ncol(cases.mat)) for(ii in 1:nrow(cases.mat)){ alpha.old=1 beta.old=1 for(jj in 1:ncol(cases.mat)){ if(cases.mat[ii,jj]){ actual.cases = rbinom(1,cases.mat[ii,jj], sum(propns.ASCF[ii,2:3])) alpha.new = alpha.old+cases.US.local[jj] beta.new = beta.old+actual.cases-cases.US.local[jj] p.mat[ii,jj] = rbeta(1,alpha.new,max(1,beta.new)) if (updateDaily) { alpha.old=alpha.new beta.old=beta.new } } } if (smoothSpline) { non.NA.indices = which(!is.na(p.mat[ii,])) if(length(non.NA.indices) > ncol(p.mat) / 3){ temp.sp = smooth.spline((1:ncol(p.mat))[non.NA.indices], logit(p.mat[ii,non.NA.indices]), nknots=floor((ncol(p.mat) - non.NA.indices[1])/7 + 0.5)) p.mat[ii,non.NA.indices[1]:ncol(p.mat)] = inv.logit(predict(temp.sp, non.NA.indices[1]:ncol(p.mat))$y) } else { p.mat[ii,non.NA.indices[1]:ncol(p.mat)] = NA } } } plot( as.Date('2019-12-31') + 1:ncol(p.mat), apply(p.mat,2,function(ii)median(ii,na.rm=T)), ylim=c(0,1),col=1,lwd=2,type='l',xaxs='i',yaxs='i',las=1, xlim=as.Date('2019-12-31') + c(31,ncol(p.mat)), xlab='Date',ylab='Symptomatics reporting', main='') polygon( c(as.Date('2019-12-31') + 1:ncol(p.mat), rev(as.Date('2019-12-31') + 1:ncol(p.mat))), c(apply(p.mat,2,function(ii)quantile(ii,0.025,na.rm=T)), rev(apply(p.mat,2,function(ii)quantile(ii,0.975,na.rm=T)))), border=NA,col=rgb(0,0,0,0.25)) mtext(ifelse(smoothSpline,"B","A"),side=3,line=0, at=par("usr")[1]+0.05*diff(par("usr")[1:2]), cex=1.2) } dev.off() # Figure S3 - plot alternative importation scenario ##Plots of import densities alt.import.density=c(rep(0,21), colSums(ts.natl[-which(row.names(ts.natl)=='US'),])/sum(ts.natl[-which(row.names(ts.natl)=='US'),])) pdf('../plots/importation_patterns.pdf', width=7,height=7, pointsize=14) par(mar = c(5, 4, 4, 4) + 0.3) barplot(cases.US.imported,ylab='Imported cases',xlab='Date',ylim=c(0,35), xaxs='i',yaxs='i',las=1,cex.axis=1.2,cex.lab=1.3,width=0.85) axis(1,at=seq(1,length(cases.US.imported),by=15), labels=seq(as.Date('2019-12-31'),as.Date('2019-12-31')+length(cases.US.imported)+1,by=15), cex.axis=1.2,cex.lab=1.3) box() par(new = TRUE) plot(as.Date('2019-12-31') + 1:length(import.case.density), import.case.density, type="l", col="red", axes=F, bty = "n", xlab = "", ylab = "", lwd=2, xaxs='i',yaxs='i',cex.axis=1.2,cex.lab=1.3) lines(as.Date('2019-12-31') + 1:length(import.case.density), alt.import.density, type="l", col="blue",lwd=2) axis(side=4, at = pretty(range(import.case.density)), col.axis="black",las=1,cex.axis=1.2,cex.lab=1.3) mtext("Importation timing", side=4, line=3.2, lwd=2,cex=1.3) legend("topleft", col=c("black","red", "blue"), lty=c(0,1,1),#fill=c("grey",NA,NA), legend=(c("Imported cases","Baseline importation", "Alternative importation")),cex=1.2, pch=c(22,NA,NA),pt.bg=c("grey",NA,NA),pt.cex=2, bty="n", lwd=2) dev.off() # Figure S4 - compare actual cases with model predicted cases pdf('../plots/cases_daily_resample.pdf',width=5,height=4) p.mat.resample = apply(p.mat,2,function(x) sample(x,length(x),replace=T)) det.cases.mat.obs = rbinom(length(cases.mat), as.vector(cases.mat), rowSums(propns.ASCF[,2:3])*p.mat.resample) det.cases.mat = matrix(det.cases.mat.obs, replicates, ncol(cases.mat)) plot( as.Date('2019-12-31') + 1:ncol(det.cases.mat), apply(det.cases.mat,2,function(ii)median(ii,na.rm=T)), ylim=c(0,800),col=1,lwd=2,type='l',xaxs='i',yaxs='i',las=1, xlim=as.Date('2019-12-31') + c(31,ncol(det.cases.mat)), xlab='Date',ylab='Reported cases',main='') polygon( c(as.Date('2019-12-31') + 1:ncol(det.cases.mat), rev(as.Date('2019-12-31') + 1:ncol(det.cases.mat))), c(apply(det.cases.mat,2,function(ii)quantile(ii,0.025,na.rm=T)), rev(apply(det.cases.mat,2,function(ii)quantile(ii,0.975,na.rm=T)))), border=NA,col=rgb(0,0,0,0.25)) legend("topleft",lty=rep("solid",2),lwd=2, legend=c("Data", "Model"),col=c("red","black"), bty='n') lines(as.Date('2019-12-31') + 1:ncol(det.cases.mat), cases.US.local, type='l',lwd=2,col=2,xaxs='i',yaxs='i', xaxt='n',yaxt='n',xlab='',ylab='') dev.off()
/code/script_supplement.R
permissive
kmfolgar/sarscov2_unobserved
R
false
false
15,699
r
#=============================================================================# # Authors: Alex Perkins, Sean Cavany, Sean Moore, Rachel Oidtman, Anita Lerch, Marya Poterek # project: Estimating unobserved SARS-CoV-2 infections in the United States # Year: 2020 # # Code to generate all figures and results from supplementary text # #=============================================================================# # set up workspace #=============================================================================# # load libraries library(extraDistr) library(doParallel) library(mc2d) library(MASS) library(boot) # load function to simulate autochthonous transmission source('simOutbreak.R') # set random number seed set.seed(1234) #=============================================================================# # load in and process data #=============================================================================# # read in line list data for US # updated 20200312 # data from https://github.com/midas-network/COVID-19/tree/master/data/cases/global/line_listings_nihfogarty linelist = read.csv('../data/2020_03_12_1800EST_linelist_NIHFogarty.csv') yesUS = subset(linelist, country=='USA') # remove Diamond Princess repatriated cases yesUS = yesUS[grep("Diamond",yesUS$summary,invert=T),] # fit gamma parameters for symptom to report delay data.delay = as.Date(yesUS$reporting.date) - as.Date(yesUS$symptom_onset) data.delay = as.numeric(data.delay[which(!is.na(data.delay))]) delay.shape.baseline = MASS::fitdistr(data.delay,dgamma,start=list(shape=0.5,rate=0.5))$estimate[1] delay.rate = MASS::fitdistr(data.delay,dgamma,start=list(shape=0.5,rate=0.5))$estimate[2] # number of travelers that were cases or died num.CF = c( nrow(subset(yesUS,international_traveler>0))-sum(subset(yesUS,international_traveler>0)$death>0, na.rm=T), sum(subset(yesUS,international_traveler>0)$death>0,na.rm=T)) # read in case data internationally # updated 20200307 # data from https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series ts = read.csv('../data/time_series_19-covid-Confirmed.csv') ts.natl = matrix(0,length(unique(ts$Country.Region)),ncol(ts)-4) for(ii in 1:ncol(ts.natl)){ ts.natl[,ii] = aggregate(ts[,4+ii],by=list(ts$Country.Region),FUN=sum)[,2] } row.names(ts.natl) = aggregate(ts[,4+ii],by=list(ts$Country.Region),FUN=sum)[,1] for(ii in 1:nrow(ts.natl)){ ts.natl[ii,-1] = pmax(0,diff(ts.natl[ii,])) } # correct for travel ban from China (for non-US citizens starting 2/2 at 5pm) - so 0 out starting 2/3 colnames(ts.natl) = 22:(ncol(ts.natl)+21) which(colnames(ts.natl)==34) ts.natl['China',which(colnames(ts.natl)==34):ncol(ts.natl)] = 0 # read in death data internationally # updated 20200307 # data from https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_time_series tsd = read.csv('../data/time_series_19-covid-Deaths.csv') tsd.natl = matrix(0,length(unique(tsd$Country.Region)),ncol(tsd)-4) for(ii in 1:ncol(ts.natl)){ tsd.natl[,ii] = aggregate(tsd[,4+ii],by=list(tsd$Country.Region),FUN=sum)[,2] } row.names(tsd.natl) = aggregate(tsd[,4+ii],by=list(tsd$Country.Region),FUN=sum)[,1] for(ii in 1:nrow(tsd.natl)){ tsd.natl[ii,-1] = pmax(0,diff(tsd.natl[ii,])) } colnames(tsd.natl) = 22:(ncol(tsd.natl)+21) # count up local cases by day in the US cases.US.total = c(rep(0,21),ts.natl['US',]) cases.US.imported = table( as.Date(as.character(subset(yesUS,international_traveler>0)$reporting.date)) - as.Date('2019-12-31')) tmp = rep(0,length(cases.US.total)) tmp[as.numeric(names(cases.US.imported))] = cases.US.imported cases.US.imported = tmp rm(tmp) cases.US.local = pmax(0, cases.US.total - cases.US.imported) # count up local deaths by day in the US deaths.US.total = c(rep(0,21),tsd.natl['US',]) deaths.US.imported = table( as.Date(as.character(subset(yesUS,international_traveler>0&death>0)$reporting.date)) - as.Date('2019-12-31')) tmp = rep(0,length(deaths.US.total)) tmp[as.numeric(names(deaths.US.imported))] = deaths.US.imported deaths.US.imported = tmp rm(tmp) deaths.US.local = pmax(0, deaths.US.total - deaths.US.imported) #=============================================================================# # simulate imported infections #=============================================================================# # sample replicates of how many infections have been imported into the US maxUS = 2e4 rangeUS = sum(yesUS$international_traveler>0,na.rm=T):maxUS # estimate for asymptomatic proportion based on # https://www.medrxiv.org/content/10.1101/2020.02.20.20025866v2 PrAsymptomatic = exp(optim(par=c(0,0),fn=function(par){ sum(( qbeta(c(0.5,0.025,0.975),exp(par[1]),exp(par[2])) - c(0.179,0.155,0.202)) ^ 2)})$par) # estimate for proportion of symptomatic infections resulting in death based on # http://weekly.chinacdc.cn/en/article/id/e53946e2-c6c4-41e9-9a9b-fea8db1a8f51 PrDeathSymptom = c(1+1023,1+44672-1023) # set values of unknown parameters # note that these values seem to maximize the probability of the cumulative # deaths in the US as of March 8, 2020 predicted by the model replicates = 1000 load("../results/sensitivity/param_estimates_posterior_1.rda", verbose=T) indices = sample(1:length(PrCaseSymptom.trav_posterior), replicates, replace=TRUE) PrCaseSymptom.trav = PrCaseSymptom.trav_posterior[indices] asympRFraction = asympRFraction_posterior[indices] # sample from uncertainty about proportions of infection outcomes propns.ASCF = cbind( rbeta(replicates,PrAsymptomatic[1],PrAsymptomatic[2]), rbeta(replicates,PrDeathSymptom[1],PrDeathSymptom[2])) propns.ASCF = cbind( propns.ASCF[,1], (1-propns.ASCF[,1]) * (1-PrCaseSymptom.trav) * (1-propns.ASCF[,2]), (1-propns.ASCF[,1]) * PrCaseSymptom.trav * (1-propns.ASCF[,2]), (1-propns.ASCF[,1]) * propns.ASCF[,2]) # draw samples of the number of imported infections imports = numeric(length=replicates) for(ii in 1:replicates){ PrImportedInfections = dmultinomial( x = cbind( 0:(maxUS-sum(num.CF)), num.CF[1],num.CF[2]), prob = c(sum(propns.ASCF[ii,1:2]),propns.ASCF[ii,3:4])) imports[ii] = sample( sum(num.CF):maxUS, 1, prob=PrImportedInfections, replace=T) } # draw samples of the day on which imported infections arrived case.days=vector() for(i in 1:length(cases.US.imported)){ if(cases.US.imported[i]>0){ if(length(case.days)==0){ case.days=rep(i,cases.US.imported[i]) }else{ case.days=c(case.days,rep(i,cases.US.imported[i])) } } } import.case.density = density( case.days, from = 1, to = length(cases.US.imported), n = length(cases.US.imported))$y # estimate the day of the year on which imports occur import.doy = list() for(ii in 1:replicates){ import.doy[[ii]] = sample( 1:length(cases.US.imported), imports[ii], prob=import.case.density, replace=T) } #=============================================================================# # simulate local transmission #=============================================================================# # simulate local transmission for each draw of imported infections local = foreach(ii = 1:replicates) %do% { simOutbreak( timeImport = import.doy[[ii]], # timing of each imported infection R = 1.97, # reproduction number k = 1e3, # dispersion parameter si_mean = 4.56, # mean of serial interval distribution si_sd = 0.95, # standard deviation of serial interval distribution inc_shape = 1.88, # shape parameter of incubation period distribution inc_scale = 7.97, # scale parameter of incubation period distribution symp_to_death_mean = 14, # mean of time between symptom onset and death symp_to_death_sd = 5.37, # std. dev. of time between symptom onset and death report_delay_shape = delay.shape.baseline, # shape parameter for delay between symptom and reporting report_delay_rate = delay.rate, # rate parameter for delay between symptom and reporting stopSimulationDay = length(cases.US.imported), # day of year since Jan 1 when simulation stops asympProp = propns.ASCF[ii,1], # proportion of infections that are asymptomatic asympRFraction = asympRFraction[ii], # relative infectiousness of asymptomatics lnormFlag = F # toggles whether serial interval distribution is lognormal ) } #simulate deaths out but turn transmission off on 12 March local.predict = foreach(ii = 1:replicates) %do% { simOutbreakR0Change( timeImport = import.doy[[ii]], # timing of each imported infection R = 1.97, # reproduction number k = 1e3, # dispersion parameter si_mean = 4.56, # mean of serial interval distribution si_sd = 0.95, # standard deviation of serial interval distribution inc_shape = 1.88, # shape parameter of incubation period distribution inc_scale = 7.97, # scale parameter of incubation period distribution symp_to_death_mean = 14, # mean of time between symptom onset and death symp_to_death_sd = 5.37, # std. dev. of time between symptom onset and death report_delay_shape = delay.shape.baseline, # shape parameter for delay between symptom and reporting report_delay_rate = delay.rate, # rate parameter for delay between symptom and reporting stopSimulationDay = 180, # day of year since Jan 1 when simulation stops asympProp = propns.ASCF[ii,1], # proportion of infections that are asymptomatic asympRFraction = asympRFraction[ii], # relative infectiousness of asymptomatics lnormFlag = F, # toggles whether serial interval distribution is lognormal RChangeDay = length(cases.US.imported), # determines when R changes RChange = 0 # determines what R drops to at R0ChangeDay ) } # load the following to generate the objects used to generate the figures in the paper load("../results/objects_used_in_paper.RData",verbose=T) #=============================================================================# # produce plots and results for all supplementary text figures #=============================================================================# # set figure margins par(mar=c(4,5,1,1)) # Figure S1 plot reporting delay distribution pdf("../plots/gamma_reporting_delay.pdf", width=4, height=4, pointsize=10) h = hist(data.delay, plot=F) h$counts = h$counts / sum(h$counts) plot(h, freq=TRUE, ylab="Relative Frequency", xlab="Delay (days)", xaxs='i',yaxs='i',las=1, xlim = c(0,16),main='') lines(seq(0.1,16,0.1),dgamma(seq(0.1,16,0.1), delay.shape.baseline, delay.rate), lwd=2) dev.off() # Figure S2 = smooth vs non-smoothed methods of pLocal updateDaily = FALSE # turn on Bayesian daily updating cases.mat = t(matrix( unlist(lapply(local, function(x) x$cases)), length(local[[1]]$cases), replicates)) pdf('../plots/smooth_vs_nonsmooth_pLocal.pdf', width=9,height=4.8, pointsize=14) par(mfrow=c(1,2)) for (smoothSpline in c(FALSE, TRUE)) { p.mat = matrix(NA,nrow(cases.mat),ncol(cases.mat)) for(ii in 1:nrow(cases.mat)){ alpha.old=1 beta.old=1 for(jj in 1:ncol(cases.mat)){ if(cases.mat[ii,jj]){ actual.cases = rbinom(1,cases.mat[ii,jj], sum(propns.ASCF[ii,2:3])) alpha.new = alpha.old+cases.US.local[jj] beta.new = beta.old+actual.cases-cases.US.local[jj] p.mat[ii,jj] = rbeta(1,alpha.new,max(1,beta.new)) if (updateDaily) { alpha.old=alpha.new beta.old=beta.new } } } if (smoothSpline) { non.NA.indices = which(!is.na(p.mat[ii,])) if(length(non.NA.indices) > ncol(p.mat) / 3){ temp.sp = smooth.spline((1:ncol(p.mat))[non.NA.indices], logit(p.mat[ii,non.NA.indices]), nknots=floor((ncol(p.mat) - non.NA.indices[1])/7 + 0.5)) p.mat[ii,non.NA.indices[1]:ncol(p.mat)] = inv.logit(predict(temp.sp, non.NA.indices[1]:ncol(p.mat))$y) } else { p.mat[ii,non.NA.indices[1]:ncol(p.mat)] = NA } } } plot( as.Date('2019-12-31') + 1:ncol(p.mat), apply(p.mat,2,function(ii)median(ii,na.rm=T)), ylim=c(0,1),col=1,lwd=2,type='l',xaxs='i',yaxs='i',las=1, xlim=as.Date('2019-12-31') + c(31,ncol(p.mat)), xlab='Date',ylab='Symptomatics reporting', main='') polygon( c(as.Date('2019-12-31') + 1:ncol(p.mat), rev(as.Date('2019-12-31') + 1:ncol(p.mat))), c(apply(p.mat,2,function(ii)quantile(ii,0.025,na.rm=T)), rev(apply(p.mat,2,function(ii)quantile(ii,0.975,na.rm=T)))), border=NA,col=rgb(0,0,0,0.25)) mtext(ifelse(smoothSpline,"B","A"),side=3,line=0, at=par("usr")[1]+0.05*diff(par("usr")[1:2]), cex=1.2) } dev.off() # Figure S3 - plot alternative importation scenario ##Plots of import densities alt.import.density=c(rep(0,21), colSums(ts.natl[-which(row.names(ts.natl)=='US'),])/sum(ts.natl[-which(row.names(ts.natl)=='US'),])) pdf('../plots/importation_patterns.pdf', width=7,height=7, pointsize=14) par(mar = c(5, 4, 4, 4) + 0.3) barplot(cases.US.imported,ylab='Imported cases',xlab='Date',ylim=c(0,35), xaxs='i',yaxs='i',las=1,cex.axis=1.2,cex.lab=1.3,width=0.85) axis(1,at=seq(1,length(cases.US.imported),by=15), labels=seq(as.Date('2019-12-31'),as.Date('2019-12-31')+length(cases.US.imported)+1,by=15), cex.axis=1.2,cex.lab=1.3) box() par(new = TRUE) plot(as.Date('2019-12-31') + 1:length(import.case.density), import.case.density, type="l", col="red", axes=F, bty = "n", xlab = "", ylab = "", lwd=2, xaxs='i',yaxs='i',cex.axis=1.2,cex.lab=1.3) lines(as.Date('2019-12-31') + 1:length(import.case.density), alt.import.density, type="l", col="blue",lwd=2) axis(side=4, at = pretty(range(import.case.density)), col.axis="black",las=1,cex.axis=1.2,cex.lab=1.3) mtext("Importation timing", side=4, line=3.2, lwd=2,cex=1.3) legend("topleft", col=c("black","red", "blue"), lty=c(0,1,1),#fill=c("grey",NA,NA), legend=(c("Imported cases","Baseline importation", "Alternative importation")),cex=1.2, pch=c(22,NA,NA),pt.bg=c("grey",NA,NA),pt.cex=2, bty="n", lwd=2) dev.off() # Figure S4 - compare actual cases with model predicted cases pdf('../plots/cases_daily_resample.pdf',width=5,height=4) p.mat.resample = apply(p.mat,2,function(x) sample(x,length(x),replace=T)) det.cases.mat.obs = rbinom(length(cases.mat), as.vector(cases.mat), rowSums(propns.ASCF[,2:3])*p.mat.resample) det.cases.mat = matrix(det.cases.mat.obs, replicates, ncol(cases.mat)) plot( as.Date('2019-12-31') + 1:ncol(det.cases.mat), apply(det.cases.mat,2,function(ii)median(ii,na.rm=T)), ylim=c(0,800),col=1,lwd=2,type='l',xaxs='i',yaxs='i',las=1, xlim=as.Date('2019-12-31') + c(31,ncol(det.cases.mat)), xlab='Date',ylab='Reported cases',main='') polygon( c(as.Date('2019-12-31') + 1:ncol(det.cases.mat), rev(as.Date('2019-12-31') + 1:ncol(det.cases.mat))), c(apply(det.cases.mat,2,function(ii)quantile(ii,0.025,na.rm=T)), rev(apply(det.cases.mat,2,function(ii)quantile(ii,0.975,na.rm=T)))), border=NA,col=rgb(0,0,0,0.25)) legend("topleft",lty=rep("solid",2),lwd=2, legend=c("Data", "Model"),col=c("red","black"), bty='n') lines(as.Date('2019-12-31') + 1:ncol(det.cases.mat), cases.US.local, type='l',lwd=2,col=2,xaxs='i',yaxs='i', xaxt='n',yaxt='n',xlab='',ylab='') dev.off()
## Sample analysis rm(list=ls()) #setwd("\\\\dfs.com/root/Dept-Decision/Dept-Users/Majumdar/Rain") setwd('C:/Study/My projects/Climate-indian monsoon/Codes') # read in data rain = read.csv("../data/rain_1973_2013_test.csv") rainsmall1 = aggregate(PRCP ~ year+STATION_NAME, data=rain, FUN=median) rainsmall2 = aggregate(cbind(LATITUDE, LONGITUDE, ELEVATION, TMAX, TMIN, del_TT_Deg_Celsius, DMI, Nino34, u_wind_200, u_wind_600, u_wind_850, v_wind_200, v_wind_600, v_wind_850) ~ year+STATION_NAME, data=rain, FUN=median) rainsmall = cbind(rainsmall1, rainsmall2) rainsmall[,4:5] = list(NULL) rm(rainsmall1, rainsmall2) # Madden-Julien Oscillation data MJO = read.table("../data/madden_julien_1978_2015.txt", header=T) MJO$year = as.numeric(substr(paste(MJO$PENTAD),1,4)) MJO = MJO[-which(MJO$year > 2013),] for(i in 1:ncol(MJO)){ MJO[,i] = as.numeric(paste(MJO[,i])) } MJOsmall = aggregate(cbind(X20E, X70E, X80E, X100E, X120E, X140E, X160E, X120W, X40W, X10W)~year, data=MJO, FUN=median) rainsmall = merge(rainsmall, MJOsmall) # teleconnections data tele = read.table("../data/teleconnections_1950_2015.txt", header=T) telesmall = aggregate(cbind(NAO, EA, WP, EPNP, PNA, EAWR, SCA, TNH, POL)~yyyy, data=tele, FUN=median) names(telesmall)[1] = 'year' rainsmall = merge(rainsmall, telesmall) # solar flux data solar = read.table('../data/solar_flux_1948_2015.txt', header=F) solarsmall = data.frame(year = solar[,1], SolarFlux = apply(solar[,-1], 1, median)) rainsmall = merge(rainsmall, solarsmall) # Temperature anomaly data temp = read.table('../data/temp_anomaly_index_1948_2012.txt', header=F) tempsmall = data.frame(year = temp[,1], TempAnomaly = apply(temp[,-1], 1, median)) rainsmall = merge(rainsmall, tempsmall) # save data write.csv(rainsmall, '../Data/rainsmall.csv', row.names=FALSE)
/Codes/Process_data.R
no_license
shubhobm/Climate-indian-monsoon
R
false
false
2,016
r
## Sample analysis rm(list=ls()) #setwd("\\\\dfs.com/root/Dept-Decision/Dept-Users/Majumdar/Rain") setwd('C:/Study/My projects/Climate-indian monsoon/Codes') # read in data rain = read.csv("../data/rain_1973_2013_test.csv") rainsmall1 = aggregate(PRCP ~ year+STATION_NAME, data=rain, FUN=median) rainsmall2 = aggregate(cbind(LATITUDE, LONGITUDE, ELEVATION, TMAX, TMIN, del_TT_Deg_Celsius, DMI, Nino34, u_wind_200, u_wind_600, u_wind_850, v_wind_200, v_wind_600, v_wind_850) ~ year+STATION_NAME, data=rain, FUN=median) rainsmall = cbind(rainsmall1, rainsmall2) rainsmall[,4:5] = list(NULL) rm(rainsmall1, rainsmall2) # Madden-Julien Oscillation data MJO = read.table("../data/madden_julien_1978_2015.txt", header=T) MJO$year = as.numeric(substr(paste(MJO$PENTAD),1,4)) MJO = MJO[-which(MJO$year > 2013),] for(i in 1:ncol(MJO)){ MJO[,i] = as.numeric(paste(MJO[,i])) } MJOsmall = aggregate(cbind(X20E, X70E, X80E, X100E, X120E, X140E, X160E, X120W, X40W, X10W)~year, data=MJO, FUN=median) rainsmall = merge(rainsmall, MJOsmall) # teleconnections data tele = read.table("../data/teleconnections_1950_2015.txt", header=T) telesmall = aggregate(cbind(NAO, EA, WP, EPNP, PNA, EAWR, SCA, TNH, POL)~yyyy, data=tele, FUN=median) names(telesmall)[1] = 'year' rainsmall = merge(rainsmall, telesmall) # solar flux data solar = read.table('../data/solar_flux_1948_2015.txt', header=F) solarsmall = data.frame(year = solar[,1], SolarFlux = apply(solar[,-1], 1, median)) rainsmall = merge(rainsmall, solarsmall) # Temperature anomaly data temp = read.table('../data/temp_anomaly_index_1948_2012.txt', header=F) tempsmall = data.frame(year = temp[,1], TempAnomaly = apply(temp[,-1], 1, median)) rainsmall = merge(rainsmall, tempsmall) # save data write.csv(rainsmall, '../Data/rainsmall.csv', row.names=FALSE)
#' Return average hourly weather data and a plot showing the location of weather #' stations for a particular county. #' #' Given a particular county FIPS code, this function returns a list with two #' elements: \code{data}, a dataframe of hourly average weather values, and #' \code{plot}, a plot showing the location of weather stations contributing to #' the average weather in \code{data}. #' #' @inheritParams hourly_df #' #' @param station_label TRUE / FALSE to indicate if you want your plot of #' weather station locations to include labels indicating station usaf id #' numbers. #' @param verbose TRUE / FALSE to indicate if you want the function to print #' out the name of the county it's processing. #' #' @return A list with six elements. The first element (\code{hourly_data}) is a #' dataframe of daily weather data averaged across multiple stations, as well #' as columns (\code{"var"_reporting}) for each weather variable showing the #' number of stations contributing to the average for that variable for that #' hour. The second element (\code{station_metadata}) is a dataframe of station #' metadata for stations included in the \code{daily_data} dataframe, as #' well as statistical information about the values contriuted to each #' weather variable by each station. The third element (\code{station_map}) #' is a plot showing points for all weather stations for a particular county #' satisfying the conditions present in \code{hourly_fips}'s arguments #' (year, coverage, and/or weather variables). \code{radius} is the #' calculated radius within which stations were pulled from the county's #' center. Elements \code{lat_center} and \code{lon_center} are the latitude #' and longitude of the county's center. #' #' @note Observation times are based on Coordinated Universal Time Code (UTC). #' #' @examples #' \dontrun{ #' #' ex <- hourly_fips("12086", coverage = 0.90, year = c(1994, 1995), #' var = "temperature") #' #' data <- ex$hourly_data #' station_data <- ex$station_metadata #' station_map <- ex$station_map #' } #' @export hourly_fips <- function(fips, year, var = "all", coverage = NULL, average_data = TRUE, station_label = FALSE, verbose = TRUE) { census_data <- countyweather::county_centers loc_fips <- which(census_data$fips == fips) if (verbose) { message(paste0("Getting hourly weather data for ", census_data[loc_fips, "name"], ".", " This may take a while.")) } weather_data <- hourly_df(fips = fips, year = year, var = var, coverage = coverage, average_data = average_data) station_map <- hourly_stationmap(fips = fips, hourly_data = weather_data, station_label = station_label) list <- list("hourly_data" = weather_data$hourly_data, "station_metadata" = weather_data$station_df, "station_map" = station_map, "radius" = weather_data$radius, "lat_center" = weather_data$lat_center, "lon_center" = weather_data$lon_center) return(list) } #' Return average hourly weather data for a particular county. #' #' Returns a dataframe of average daily weather values #' for a particular county, year, weather variables, and/or specified coverage. #' #' This function serves as a wrapper to several functions from the \code{rnoaa} #' package, which provides weather data from all relevant stations in a county. #' This function filters and averages across NOAA ISD/ISH stations based on #' user-specified coverage specifications. #' #' @param fips A character string of the five-digit U.S. FIPS code of a U.S. #' county. #' @param year A four-digit number or vector of numbers indicating the year or #' years for which you want to pull hourly data. Values for \code{year} can #' be in the range from 1901 to the current year. #' @param var A character vector specifying desired weather variables. For #' example, \code{var = c("wind_speed", "temperature")} pulls data on hourly #' wind speed and temperature. The core weather variables #' available include \code{"wind_direction"}, \code{"wind_speed"}, #' \code{"ceiling_height"}, \code{"visibility_distance"}, \code{"temperature"}, #' \code{"temperature_dewpoint"}, \code{"air_pressure"}. Alternatively, you #' can specify var = "all" to include additional flag and quality codes. #' @param average_data TRUE / FALSE to indicate if you want the function to #' average daily weather data across multiple monitors. #' @param coverage A numeric value in the range of 0 to 1 that specifies #' the desired percentage coverage for the weather variable (i.e., what #' percent of each weather variable must be non-missing to include data from #' a monitor when calculating daily values averaged across monitors). #' #' @return A list with five elements. The first element, \code{hourly_data}, is #' a dataframe of hourly weather data averaged across multiple stations, #' as well as columns (\code{"var"_reporting}) for each weather variable #' showing the number of stations contributing to the average for that #' variable for each hour. \code{station_df} is a dataframe of station #' metadata for each station contributing weather data. A weather station #' will have one row per weather variable to which it contributes data. In #' addition to information such as USAF and WBAN ids and station names, this #' dataframe includes statistical information about weather values #' contributed by each station for each weather variable. These statistics #' include calculated coverage (\code{calc_coverage}), which is the percent #' of non-missing values for each station and variable for the specified #' date range, \code{standard_dev} (standard deviation), \code{max}, #' \code{min}, and \code{range} values for each station-weather variable #' combination. The element \code{radius} is the calculated radius within #' which stations were pulled from the county's center. Elements #' \code{lat_center} and \code{lon_center} are the latitude and longitude of #' the county's geographic center. #' #' @note Observation times are based on Coordinated Universal Time Code (UTC). #' #' @references For more information on this dataset and available weather and #' flag/quality variables, see #' \url{ftp://ftp.ncdc.noaa.gov/pub/data/noaa/ish-format-document.pdf}. #' #' @examples #' \dontrun{ #' df <- hourly_df(fips = "12086", year = 1992, #' var = c("wind_speed", "temperature")) #' head(df$hourly_data) #' head(df$station_df) #' df$radius #' } #' #' @export hourly_df <- function(fips, year, var = "all", average_data = TRUE, coverage = NULL) { # hourly data for multiple monitors for multiple years hourly_list <- lapply(year, function(x) isd_monitors_data(fips = fips, year = x, var = var)) for (i in 1:length(year)) { list <- hourly_list[[i]] if (i == 1) { data <- list$df } else { data <- dplyr::bind_rows(data, list$df) } } # station meta data for one county (unfiltered) for (i in 1:length(year)) { list <- hourly_list[[i]] if (i == 1) { station_metadata <- list$ids } else { station_metadata <- dplyr::bind_rows(station_metadata, list$ids) } } filtered_list <- filter_hourly(fips = fips, hourly_data = data, coverage = coverage) station_stats <- filtered_list$stations filtered_stations <- unique(station_stats$station) station_metadata[station_metadata == "999999"] <- NA station_metadata[station_metadata == "99999"] <- NA station_metadata <- unique(station_metadata) %>% dplyr::mutate_(station = ~ paste(usaf, wban, sep = "-")) station_metadata <- station_metadata %>% dplyr::filter_(~ station %in% filtered_stations) # combine station_metadata and station_stats station_metadata <- dplyr::right_join(station_metadata, station_stats, by = "station") %>% dplyr::select_(quote(usaf), quote(wban), quote(station), quote(station_name), quote(var), quote(calc_coverage), quote(standard_dev), quote(range), quote(ctry), quote(state), quote(elev_m), quote(begin), quote(end), quote(lon), quote(lat)) # average hourly across multiple stations data <- data %>% dplyr::mutate_(station = ~ paste(usaf_station, wban_station, sep = "-")) %>% dplyr::filter_(~ station %in% filtered_stations) %>% dplyr::select_(quote(-station)) if (average_data == TRUE) { ave_hourly_quietly <- purrr::quietly(ave_hourly) data <- ave_hourly_quietly(data) } data <- tibble::as_tibble(data) radius <- hourly_list[[1]]$radius lat_center <- hourly_list[[1]]$lat_center lon_center <- hourly_list[[1]]$lon_center out <- list("hourly_data" = data, "station_df" = station_metadata, "radius" = radius, "lat_center" = lat_center, "lon_center" = lon_center) return(out) } #' Write hourly weather time series files for U.S. counties. #' #' Given a vector of U.S. county FIPS codes, this function saves each element of #' the lists created from the function \code{daily_fips} to a separate folder #' within a given directory. The dataframe \code{daily_data} is saved to a #' subdirectory of the given directory called "data." This time series dataframe #' gives the values for specified weather variables and the number of #' weather stations contributing to the average for each day within the #' specified year(s). Metadata about the weather stations and #' county are saved in a list with four elements in a subdirectory called #' "metadata." These elements include \code{station_metadata} (station metadata #' for stations contributing to the time series dataframe), \code{radius} #' (the radius, in km, within which weather stations were pulled from each #' county's center), \code{lat_center}, and \code{lon_center} (the latitude #' and longitude of the county's geographic center). If the user specifies "csv" #' output for the \code{metadata_type} argument, \code{radius}, \code{lat_center}, #' and \code{lon_center} are added to the \code{station_metadata} dataframe as #' three additional columns. #' #' @return Writes out three subdirectories of a given directory, with hourly #' weather files saved in "data", station and county metadata saved in #' "metadata", and a map of weather station locations saved in "maps" for each #' FIPS code specified. The user can specify either .rds or .csv files for the #' data and metadatafiles, using the arguments \code{data_type} and #' \code{metadata_type}, respectively. Maps are saved as .png files. #' #' @inheritParams hourly_df #' @inheritParams write_daily_timeseries #' @param out_directory The absolute or relative pathname for the directory #' where you would like the time series files to be saved. #' @param data_type A character string indicating that you would like either #' .rds files (\code{data_type = "rds"}) or .csv files #' (\code{data_type = "csv"}) for the time series output. This option #' defaults to .rds files. #' @param metadata_type A character string indicating that you would like either #' .rds files (\code{metadata_type = "rds"}) or .csv files #' (\code{metadata_type = "csv"}) #' for the station and county metadata output. This option defaults to .rds #' files, in which case a list of four elements is saved #' (\code{station_metadata}, \code{radius}, \code{lat_center}, and #' \code{lon_center}). If the user specifies "csv" output, \code{radius}, #' \code{lat_center}, and \code{lon_center} are added to the #' \code{station_metadata} dataframe as additional columns. #' @param keep_map TRUE / FALSE indicating if a map of the stations should #' be included. The map can substantially increase the size of the files. If #' FALSE, the "maps" subdirectory will not be created. #' @param verbose TRUE / FALSE to indicate if you want the function to print #' out the county or vector of counties it's saving files for. #' #' @note If the function is unable to pull weather data for a particular county #' given the specified percent coverage, date range, and/or weather variables, #' \code{write_hourly_timeseries} will not produce a file for that county. #' #' @examples #' \dontrun{ #' write_hourly_timeseries(fips = c("08031", "12086"), year = c(1994, 1995), #' coverage = 0.90, var = c("wind_speed", "temperature"), #' out_directory = "~/timeseries_hourly") #' } #' @export write_hourly_timeseries <- function(fips, year, coverage = NULL, var = "all", out_directory, data_type = "rds", metadata_type = "rds", average_data = TRUE, station_label = FALSE, keep_map = TRUE, verbose = TRUE){ if (verbose) { if (length(fips) > 2) { for (i in 1:length(fips)) { if (i == 1) { codes <- (paste0(fips[i], ", ")) } else if (i == length(fips)) { codes <- paste0(codes, "and ", fips[i]) } else { codes <- paste0(codes, fips[i], ", ") } } message(paste0("Saving daily weather files for FIPS codes ", codes, " in the directory ", out_directory, ".", " This may take ", "a while.")) } else if (length(fips == 2)) { for (i in 1:length(fips)) { if (i == 1) { codes <- paste0(fips[i], " ") } else if (i == length(fips)) { codes <- paste0(codes, "and ", fips[i]) } else { codes <- paste0(codes, fips[i], ", ") } } message(paste0("Saving daily weather files for FIPS codes ", codes, " in the directory ", out_directory, ".", " This may take ", "a while.")) } else { message(paste0("Saving daily weather files for FIPS code ", fips, " in the directory ", out_directory, ".", " This may take ", "a while.")) } } if (!dir.exists(out_directory)) { dir.create(out_directory) } if (!dir.exists(paste0(out_directory, "/data"))) { dir.create(paste0(out_directory, "/data")) } if (!dir.exists(paste0(out_directory, "/metadata"))) { dir.create(paste0(out_directory, "/metadata")) } for (i in 1:length(fips)) { possibleError <- tryCatch({ out_list <- hourly_fips(fips = fips[i], year = year, var = var, coverage = coverage, average_data = average_data, station_label = station_label, verbose = FALSE) out_data <- out_list$hourly_data meta <- c(2, 4:6) out_metadata <- out_list[meta] if (data_type == "rds") { data_file <- paste0(out_directory, "/data", "/", fips[i], ".rds") saveRDS(out_data, file = data_file) } else if (data_type == "csv") { data_file <- paste0(out_directory, "/data", "/", fips[i], ".csv") utils::write.csv(out_data, file = data_file, row.names = FALSE) } if (metadata_type == "rds") { metadata_file <- paste0(out_directory, "/metadata", "/", fips[i], ".rds") saveRDS(out_metadata, file = metadata_file) } else if (metadata_type == "csv") { out_metadata[[1]]$radius <- out_metadata[[2]] out_metadata[[1]]$lat_center <- out_metadata[[3]] out_metadata[[1]]$lon_center <- out_metadata[[4]] out_metadata <- out_metadata[[1]] metadata_file <- paste0(out_directory, "/metadata", "/", fips[i], ".csv") utils::write.csv(out_metadata, file = metadata_file, row.names = FALSE) } if (keep_map == TRUE) { if (!dir.exists(paste0(out_directory, "/maps"))) { dir.create(paste0(out_directory, "/maps")) } out_map <- out_list$station_map map_file <- paste0(out_directory, "/maps") map_name <- paste0(fips[i], ".png") suppressMessages(ggplot2::ggsave(file = map_name, path = map_file, plot = out_map)) } } , error = function(e) { e message(paste0("Unable to pull weather data for FIPS code ", fips[i], " for the specified percent coverage, year(s), and/or", " weather variables.")) } ) if (inherits(possibleError, "error")) next } } #' Write plot files for hourly weather time series dataframes. #' #' Writes a directory with plots for every weather data timeseries file #' present in the specified directory (as produced by the \code{write_hourly_timeseries} #' function) for a particular weather variable. These plots are meant to aid in #' initial exploratory analysis. #' #' @return Writes out a directory with plots of time series data for a given #' weather variable for each file present in the directory specified. #' #' @inheritParams plot_daily_timeseries #' @param year A year or vector of years giving the year(s) present in the #' time series dataframe. #' #' @examples #' \dontrun{ #' write_hourly_timeseries(fips = c("08031", "12086"), year = c(1994, 1995), #' coverage = 0.90, var = c("wind_speed", "temperature"), #' out_directory = "~/timeseries_hourly") #' plot_hourly_timeseries(var = "wind_speed", year = c(1994, 1995), #' data_directory = "~/timeseries_hourly/data", #' plot_directory = "~/timeseries_hourly/plots_wind_speed") #' plot_hourly_timeseries(var = "temperature", year = c(1994, 1995), #' data_directory = "~/timeseries_hourly/data", #' plot_directory = "~/timeseries_hourly/plots_temperature") #'} #' @importFrom dplyr %>% #' @export plot_hourly_timeseries <- function(var, year, data_directory, plot_directory, data_type = "rds") { files <- list.files(data_directory) date_min <- paste0(min(year), "-01-01 UTC") date_min <- as.POSIXct(date_min, tz = "UTC") date_max <- paste0(max(year), "-12-31 23:00:00 UTC") date_max <- as.POSIXct(date_max, tz = "UTC") if (!dir.exists(plot_directory)) { dir.create(plot_directory) } if (data_type == "rds") { file_names <- gsub(".rds", "", files) } else if (data_type == "csv") { file_names <- gsub(".csv", files) } current_wd <- getwd() for (i in 1:length(files)) { setwd(data_directory) dat <- readRDS(files[i]) # convert tibble to vector (avoiding error "'x' and 'y' lengths differ") y <- dat %>% dplyr::collect() %>% .[[var]] file_name <- paste0(file_names[i], ".png") setwd(plot_directory) grDevices::png(filename = file_name) graphics::plot(dat$date_time, y, type = "l", col = "red", main = file_names[i], xlab = "date", ylab = var, xlim = c(date_min, date_max) ) grDevices::dev.off() } setwd(current_wd) }
/R/hourly_fips.R
no_license
leighseverson/countyweather
R
false
false
19,619
r
#' Return average hourly weather data and a plot showing the location of weather #' stations for a particular county. #' #' Given a particular county FIPS code, this function returns a list with two #' elements: \code{data}, a dataframe of hourly average weather values, and #' \code{plot}, a plot showing the location of weather stations contributing to #' the average weather in \code{data}. #' #' @inheritParams hourly_df #' #' @param station_label TRUE / FALSE to indicate if you want your plot of #' weather station locations to include labels indicating station usaf id #' numbers. #' @param verbose TRUE / FALSE to indicate if you want the function to print #' out the name of the county it's processing. #' #' @return A list with six elements. The first element (\code{hourly_data}) is a #' dataframe of daily weather data averaged across multiple stations, as well #' as columns (\code{"var"_reporting}) for each weather variable showing the #' number of stations contributing to the average for that variable for that #' hour. The second element (\code{station_metadata}) is a dataframe of station #' metadata for stations included in the \code{daily_data} dataframe, as #' well as statistical information about the values contriuted to each #' weather variable by each station. The third element (\code{station_map}) #' is a plot showing points for all weather stations for a particular county #' satisfying the conditions present in \code{hourly_fips}'s arguments #' (year, coverage, and/or weather variables). \code{radius} is the #' calculated radius within which stations were pulled from the county's #' center. Elements \code{lat_center} and \code{lon_center} are the latitude #' and longitude of the county's center. #' #' @note Observation times are based on Coordinated Universal Time Code (UTC). #' #' @examples #' \dontrun{ #' #' ex <- hourly_fips("12086", coverage = 0.90, year = c(1994, 1995), #' var = "temperature") #' #' data <- ex$hourly_data #' station_data <- ex$station_metadata #' station_map <- ex$station_map #' } #' @export hourly_fips <- function(fips, year, var = "all", coverage = NULL, average_data = TRUE, station_label = FALSE, verbose = TRUE) { census_data <- countyweather::county_centers loc_fips <- which(census_data$fips == fips) if (verbose) { message(paste0("Getting hourly weather data for ", census_data[loc_fips, "name"], ".", " This may take a while.")) } weather_data <- hourly_df(fips = fips, year = year, var = var, coverage = coverage, average_data = average_data) station_map <- hourly_stationmap(fips = fips, hourly_data = weather_data, station_label = station_label) list <- list("hourly_data" = weather_data$hourly_data, "station_metadata" = weather_data$station_df, "station_map" = station_map, "radius" = weather_data$radius, "lat_center" = weather_data$lat_center, "lon_center" = weather_data$lon_center) return(list) } #' Return average hourly weather data for a particular county. #' #' Returns a dataframe of average daily weather values #' for a particular county, year, weather variables, and/or specified coverage. #' #' This function serves as a wrapper to several functions from the \code{rnoaa} #' package, which provides weather data from all relevant stations in a county. #' This function filters and averages across NOAA ISD/ISH stations based on #' user-specified coverage specifications. #' #' @param fips A character string of the five-digit U.S. FIPS code of a U.S. #' county. #' @param year A four-digit number or vector of numbers indicating the year or #' years for which you want to pull hourly data. Values for \code{year} can #' be in the range from 1901 to the current year. #' @param var A character vector specifying desired weather variables. For #' example, \code{var = c("wind_speed", "temperature")} pulls data on hourly #' wind speed and temperature. The core weather variables #' available include \code{"wind_direction"}, \code{"wind_speed"}, #' \code{"ceiling_height"}, \code{"visibility_distance"}, \code{"temperature"}, #' \code{"temperature_dewpoint"}, \code{"air_pressure"}. Alternatively, you #' can specify var = "all" to include additional flag and quality codes. #' @param average_data TRUE / FALSE to indicate if you want the function to #' average daily weather data across multiple monitors. #' @param coverage A numeric value in the range of 0 to 1 that specifies #' the desired percentage coverage for the weather variable (i.e., what #' percent of each weather variable must be non-missing to include data from #' a monitor when calculating daily values averaged across monitors). #' #' @return A list with five elements. The first element, \code{hourly_data}, is #' a dataframe of hourly weather data averaged across multiple stations, #' as well as columns (\code{"var"_reporting}) for each weather variable #' showing the number of stations contributing to the average for that #' variable for each hour. \code{station_df} is a dataframe of station #' metadata for each station contributing weather data. A weather station #' will have one row per weather variable to which it contributes data. In #' addition to information such as USAF and WBAN ids and station names, this #' dataframe includes statistical information about weather values #' contributed by each station for each weather variable. These statistics #' include calculated coverage (\code{calc_coverage}), which is the percent #' of non-missing values for each station and variable for the specified #' date range, \code{standard_dev} (standard deviation), \code{max}, #' \code{min}, and \code{range} values for each station-weather variable #' combination. The element \code{radius} is the calculated radius within #' which stations were pulled from the county's center. Elements #' \code{lat_center} and \code{lon_center} are the latitude and longitude of #' the county's geographic center. #' #' @note Observation times are based on Coordinated Universal Time Code (UTC). #' #' @references For more information on this dataset and available weather and #' flag/quality variables, see #' \url{ftp://ftp.ncdc.noaa.gov/pub/data/noaa/ish-format-document.pdf}. #' #' @examples #' \dontrun{ #' df <- hourly_df(fips = "12086", year = 1992, #' var = c("wind_speed", "temperature")) #' head(df$hourly_data) #' head(df$station_df) #' df$radius #' } #' #' @export hourly_df <- function(fips, year, var = "all", average_data = TRUE, coverage = NULL) { # hourly data for multiple monitors for multiple years hourly_list <- lapply(year, function(x) isd_monitors_data(fips = fips, year = x, var = var)) for (i in 1:length(year)) { list <- hourly_list[[i]] if (i == 1) { data <- list$df } else { data <- dplyr::bind_rows(data, list$df) } } # station meta data for one county (unfiltered) for (i in 1:length(year)) { list <- hourly_list[[i]] if (i == 1) { station_metadata <- list$ids } else { station_metadata <- dplyr::bind_rows(station_metadata, list$ids) } } filtered_list <- filter_hourly(fips = fips, hourly_data = data, coverage = coverage) station_stats <- filtered_list$stations filtered_stations <- unique(station_stats$station) station_metadata[station_metadata == "999999"] <- NA station_metadata[station_metadata == "99999"] <- NA station_metadata <- unique(station_metadata) %>% dplyr::mutate_(station = ~ paste(usaf, wban, sep = "-")) station_metadata <- station_metadata %>% dplyr::filter_(~ station %in% filtered_stations) # combine station_metadata and station_stats station_metadata <- dplyr::right_join(station_metadata, station_stats, by = "station") %>% dplyr::select_(quote(usaf), quote(wban), quote(station), quote(station_name), quote(var), quote(calc_coverage), quote(standard_dev), quote(range), quote(ctry), quote(state), quote(elev_m), quote(begin), quote(end), quote(lon), quote(lat)) # average hourly across multiple stations data <- data %>% dplyr::mutate_(station = ~ paste(usaf_station, wban_station, sep = "-")) %>% dplyr::filter_(~ station %in% filtered_stations) %>% dplyr::select_(quote(-station)) if (average_data == TRUE) { ave_hourly_quietly <- purrr::quietly(ave_hourly) data <- ave_hourly_quietly(data) } data <- tibble::as_tibble(data) radius <- hourly_list[[1]]$radius lat_center <- hourly_list[[1]]$lat_center lon_center <- hourly_list[[1]]$lon_center out <- list("hourly_data" = data, "station_df" = station_metadata, "radius" = radius, "lat_center" = lat_center, "lon_center" = lon_center) return(out) } #' Write hourly weather time series files for U.S. counties. #' #' Given a vector of U.S. county FIPS codes, this function saves each element of #' the lists created from the function \code{daily_fips} to a separate folder #' within a given directory. The dataframe \code{daily_data} is saved to a #' subdirectory of the given directory called "data." This time series dataframe #' gives the values for specified weather variables and the number of #' weather stations contributing to the average for each day within the #' specified year(s). Metadata about the weather stations and #' county are saved in a list with four elements in a subdirectory called #' "metadata." These elements include \code{station_metadata} (station metadata #' for stations contributing to the time series dataframe), \code{radius} #' (the radius, in km, within which weather stations were pulled from each #' county's center), \code{lat_center}, and \code{lon_center} (the latitude #' and longitude of the county's geographic center). If the user specifies "csv" #' output for the \code{metadata_type} argument, \code{radius}, \code{lat_center}, #' and \code{lon_center} are added to the \code{station_metadata} dataframe as #' three additional columns. #' #' @return Writes out three subdirectories of a given directory, with hourly #' weather files saved in "data", station and county metadata saved in #' "metadata", and a map of weather station locations saved in "maps" for each #' FIPS code specified. The user can specify either .rds or .csv files for the #' data and metadatafiles, using the arguments \code{data_type} and #' \code{metadata_type}, respectively. Maps are saved as .png files. #' #' @inheritParams hourly_df #' @inheritParams write_daily_timeseries #' @param out_directory The absolute or relative pathname for the directory #' where you would like the time series files to be saved. #' @param data_type A character string indicating that you would like either #' .rds files (\code{data_type = "rds"}) or .csv files #' (\code{data_type = "csv"}) for the time series output. This option #' defaults to .rds files. #' @param metadata_type A character string indicating that you would like either #' .rds files (\code{metadata_type = "rds"}) or .csv files #' (\code{metadata_type = "csv"}) #' for the station and county metadata output. This option defaults to .rds #' files, in which case a list of four elements is saved #' (\code{station_metadata}, \code{radius}, \code{lat_center}, and #' \code{lon_center}). If the user specifies "csv" output, \code{radius}, #' \code{lat_center}, and \code{lon_center} are added to the #' \code{station_metadata} dataframe as additional columns. #' @param keep_map TRUE / FALSE indicating if a map of the stations should #' be included. The map can substantially increase the size of the files. If #' FALSE, the "maps" subdirectory will not be created. #' @param verbose TRUE / FALSE to indicate if you want the function to print #' out the county or vector of counties it's saving files for. #' #' @note If the function is unable to pull weather data for a particular county #' given the specified percent coverage, date range, and/or weather variables, #' \code{write_hourly_timeseries} will not produce a file for that county. #' #' @examples #' \dontrun{ #' write_hourly_timeseries(fips = c("08031", "12086"), year = c(1994, 1995), #' coverage = 0.90, var = c("wind_speed", "temperature"), #' out_directory = "~/timeseries_hourly") #' } #' @export write_hourly_timeseries <- function(fips, year, coverage = NULL, var = "all", out_directory, data_type = "rds", metadata_type = "rds", average_data = TRUE, station_label = FALSE, keep_map = TRUE, verbose = TRUE){ if (verbose) { if (length(fips) > 2) { for (i in 1:length(fips)) { if (i == 1) { codes <- (paste0(fips[i], ", ")) } else if (i == length(fips)) { codes <- paste0(codes, "and ", fips[i]) } else { codes <- paste0(codes, fips[i], ", ") } } message(paste0("Saving daily weather files for FIPS codes ", codes, " in the directory ", out_directory, ".", " This may take ", "a while.")) } else if (length(fips == 2)) { for (i in 1:length(fips)) { if (i == 1) { codes <- paste0(fips[i], " ") } else if (i == length(fips)) { codes <- paste0(codes, "and ", fips[i]) } else { codes <- paste0(codes, fips[i], ", ") } } message(paste0("Saving daily weather files for FIPS codes ", codes, " in the directory ", out_directory, ".", " This may take ", "a while.")) } else { message(paste0("Saving daily weather files for FIPS code ", fips, " in the directory ", out_directory, ".", " This may take ", "a while.")) } } if (!dir.exists(out_directory)) { dir.create(out_directory) } if (!dir.exists(paste0(out_directory, "/data"))) { dir.create(paste0(out_directory, "/data")) } if (!dir.exists(paste0(out_directory, "/metadata"))) { dir.create(paste0(out_directory, "/metadata")) } for (i in 1:length(fips)) { possibleError <- tryCatch({ out_list <- hourly_fips(fips = fips[i], year = year, var = var, coverage = coverage, average_data = average_data, station_label = station_label, verbose = FALSE) out_data <- out_list$hourly_data meta <- c(2, 4:6) out_metadata <- out_list[meta] if (data_type == "rds") { data_file <- paste0(out_directory, "/data", "/", fips[i], ".rds") saveRDS(out_data, file = data_file) } else if (data_type == "csv") { data_file <- paste0(out_directory, "/data", "/", fips[i], ".csv") utils::write.csv(out_data, file = data_file, row.names = FALSE) } if (metadata_type == "rds") { metadata_file <- paste0(out_directory, "/metadata", "/", fips[i], ".rds") saveRDS(out_metadata, file = metadata_file) } else if (metadata_type == "csv") { out_metadata[[1]]$radius <- out_metadata[[2]] out_metadata[[1]]$lat_center <- out_metadata[[3]] out_metadata[[1]]$lon_center <- out_metadata[[4]] out_metadata <- out_metadata[[1]] metadata_file <- paste0(out_directory, "/metadata", "/", fips[i], ".csv") utils::write.csv(out_metadata, file = metadata_file, row.names = FALSE) } if (keep_map == TRUE) { if (!dir.exists(paste0(out_directory, "/maps"))) { dir.create(paste0(out_directory, "/maps")) } out_map <- out_list$station_map map_file <- paste0(out_directory, "/maps") map_name <- paste0(fips[i], ".png") suppressMessages(ggplot2::ggsave(file = map_name, path = map_file, plot = out_map)) } } , error = function(e) { e message(paste0("Unable to pull weather data for FIPS code ", fips[i], " for the specified percent coverage, year(s), and/or", " weather variables.")) } ) if (inherits(possibleError, "error")) next } } #' Write plot files for hourly weather time series dataframes. #' #' Writes a directory with plots for every weather data timeseries file #' present in the specified directory (as produced by the \code{write_hourly_timeseries} #' function) for a particular weather variable. These plots are meant to aid in #' initial exploratory analysis. #' #' @return Writes out a directory with plots of time series data for a given #' weather variable for each file present in the directory specified. #' #' @inheritParams plot_daily_timeseries #' @param year A year or vector of years giving the year(s) present in the #' time series dataframe. #' #' @examples #' \dontrun{ #' write_hourly_timeseries(fips = c("08031", "12086"), year = c(1994, 1995), #' coverage = 0.90, var = c("wind_speed", "temperature"), #' out_directory = "~/timeseries_hourly") #' plot_hourly_timeseries(var = "wind_speed", year = c(1994, 1995), #' data_directory = "~/timeseries_hourly/data", #' plot_directory = "~/timeseries_hourly/plots_wind_speed") #' plot_hourly_timeseries(var = "temperature", year = c(1994, 1995), #' data_directory = "~/timeseries_hourly/data", #' plot_directory = "~/timeseries_hourly/plots_temperature") #'} #' @importFrom dplyr %>% #' @export plot_hourly_timeseries <- function(var, year, data_directory, plot_directory, data_type = "rds") { files <- list.files(data_directory) date_min <- paste0(min(year), "-01-01 UTC") date_min <- as.POSIXct(date_min, tz = "UTC") date_max <- paste0(max(year), "-12-31 23:00:00 UTC") date_max <- as.POSIXct(date_max, tz = "UTC") if (!dir.exists(plot_directory)) { dir.create(plot_directory) } if (data_type == "rds") { file_names <- gsub(".rds", "", files) } else if (data_type == "csv") { file_names <- gsub(".csv", files) } current_wd <- getwd() for (i in 1:length(files)) { setwd(data_directory) dat <- readRDS(files[i]) # convert tibble to vector (avoiding error "'x' and 'y' lengths differ") y <- dat %>% dplyr::collect() %>% .[[var]] file_name <- paste0(file_names[i], ".png") setwd(plot_directory) grDevices::png(filename = file_name) graphics::plot(dat$date_time, y, type = "l", col = "red", main = file_names[i], xlab = "date", ylab = var, xlim = c(date_min, date_max) ) grDevices::dev.off() } setwd(current_wd) }
#' Download dose-response model database from QMRAwiki #' #' @param fromInternet download from internet (default: FALSE), if FALSE import #' from local copy #' @return tibble for different microbial parameters #' @source #' http://qmrawiki.canr.msu.edu/index.php?title=Table_of_Recommended_Best-Fit_Parameters #' #' @export #' dr.db_download <- function(fromInternet = FALSE) { # Load the database from the file if requested if (! fromInternet) { file <- system.file("extdata/doseresponse/dr_db.csv", package = "kwb.qmra") return(readr::read_csv(file)) } # Load the database from the internet pathogens <- c("Bacteria", "Viruses", "Protozoa") url <- "http://qmrawiki.canr.msu.edu/index.php" url_fmt <- "%s?title=Table_of_Recommended_Best-Fit_Parameters#tab=%s" xpath_fmt <- "/html/body/div[3]/div/div[4]/div/div/div[%d]/div/table" dr.db <- do.call(rbind, lapply(seq_along(pathogens), function(i) { result <- xml2::read_html(sprintf(url_fmt, url, pathogens[i])) %>% rvest::html_node(xpath = sprintf(xpath_fmt, i)) %>% rvest::html_table() result$Link <- sprintf("%s/%s", url, gsub(" ", "_", result$Agent)) result$PathogenGroup <- pathogens[i] result })) dr.db <- dr.db %>% dplyr::filter(.data$Agent != "TestPage") dr.db$PathogenName <- stringr::str_replace( string = dr.db$Agent, pattern = ":.*", replacement = "" ) dr.db$Agent <- sprintf("[%s](%s)", dr.db$Agent, dr.db$Link) best_fit_model <- dr.db$`Best fit model*` parameters <- dr.db$`Optimized parameter(s)` dr.db$k <- as.numeric(ifelse( test = (best_fit_model == "exponential"), yes = stringr::str_replace(parameters, "k\\s*=", ""), no = NA )) dr.db$alpha <- as.numeric(ifelse( test = (best_fit_model == "beta-Poisson"), yes = stringr::str_extract_all( parameters, pattern = "\\d+\\.\\d+E[+-]\\d{2}", simplify = TRUE )[, 1], no = NA )) dr.db$N50 <- as.numeric(ifelse( test = (best_fit_model == "beta-Poisson"), yes = stringr::str_extract_all( parameters, pattern = "\\d+\\.\\d+E[+-]\\d{2}", simplify = TRUE )[, 2], no = NA )) dr.db$PathogenID <- seq_len(nrow(dr.db)) #write.csv(dr.db, #"C:/Users/mrustl/Documents/WC_Server/R_Development/trunk/RPackages/kwb.qmra/inst/extdata/doseresponse/dr_db.csv") dr.db } #' Dose-response model: exponential #' #' @param dose vector of dose data (default: #' \code{sfsmisc::lseq(from = 0.1, to = 10^10, length = 1000)}) #' @param k k-value (default: 5.72E-01) #' @return tibble #' @export #' @importFrom sfsmisc lseq #' dr.expo <- function( dose = sfsmisc::lseq(from = 1, to = 10^10, length = 1000), k = 5.72E-01 ) { dplyr::as_tibble(data.frame( model = "exponential", dose = dose, infectionProbability = 1 - exp(-k * dose), k = k )) } #' Dose-response model: beta-poisson #' #' @param dose vector of dose data (default: #' \code{sfsmisc::lseq(from = 0.1, to = 10^10, length = 1000)}) #' @param alpha alpha (default: 3.28E-01) #' @param N50 N50 (default: 5.43E+03) #' @return tibble #' @export #' @importFrom sfsmisc lseq #' dr.betapoisson <- function( dose = sfsmisc::lseq(from = 1, to = 10^10, length = 1000), alpha = 3.28E-01, N50 = 5.43E+03 ) { dplyr::as_tibble(data.frame( model = "betapoisson", dose = dose, infectionProbability = 1 - (1 + dose * (2 ^ (1/alpha) - 1)/N50) ^ -alpha, alpha = alpha, N50 = N50 )) } #' Generate table with different doses for dr.db_download() #' #' @param dr.db as retrieved by dr.db_download(), default: dr.db_download() #' @param dose vector of dose data (default: #' \code{sfsmisc::lseq(from=0.1, to = 10^10,length = 1000)}) #' @return tibble #' @export #' @importFrom plyr rbind.fill #' dr.db_model <- function( dr.db = dr.db_download(), dose = sfsmisc::lseq(from = 1, to = 10^10, length = 1000) ) { is_expo <- (dr.db$`Best fit model*` == "exponential") dr_model_expo <- if (any(is_expo)) { get_dr_model(dr.db[is_expo, ], dose, exponential = TRUE) } # else NULL dr_model_poisson <- if (any(! is_expo)) { get_dr_model(dr.db[! is_expo, ], dose, exponential = FALSE) } # else NULL plyr::rbind.fill(dr_model_expo, dr_model_poisson) %>% dplyr::as_tibble() } # get_dr_model ----------------------------------------------------------------- get_dr_model <- function(dose_response_db, dose, exponential) { key_columns <- c("PathogenID", "PathogenGroup", "PathogenName") result_rows <- lapply(seq_len(nrow(dose_response_db)), function(i) { pars <- dose_response_db[i, ] model <- if (exponential) { dr.expo(dose = dose, k = pars$k) } else { dr.betapoisson(dose = dose, alpha = pars$alpha, N50 = pars$N50) } cbind(pars[, key_columns], model) }) do.call(plyr::rbind.fill, result_rows) } if (FALSE) { dr.db <- dr.db_download() dr.model <- dr.db_model(dr.db = dr.db) ggplot(dr.model, ggplot2::aes_string( x = "dose", y = "infectionProbability", col = "PathogenGroup" )) + geom_point() + scale_x_log10() + theme_bw() tt <- dr.model %>% dplyr::filter( .data$infectionProbability > 0.49, .data$infectionProbability < 0.51 ) %>% dplyr::group_by( .data$PathogenID, .data$PathogenGroup, .data$PathogenName ) %>% dplyr::summarise( infectionProbability = round(median(.data$infectionProbability), 2), dose = median(.data$dose) ) %>% dplyr::arrange(.data$dose) ggplot2::ggplot(tt, ggplot2::aes_string( "PathogenGroup", "dose", col = "PathogenGroup" )) + geom_point(position = position_jitter(w = 0, h = 0)) + ggrepel::geom_text_repel(ggplot2::aes_string(label = "PathogenName")) + scale_y_log10() + theme_bw() + guides(fill = FALSE) + ylab("Dose with 50% infection probability") }
/R/doseresponse.R
permissive
KWB-R/kwb.qmra
R
false
false
5,978
r
#' Download dose-response model database from QMRAwiki #' #' @param fromInternet download from internet (default: FALSE), if FALSE import #' from local copy #' @return tibble for different microbial parameters #' @source #' http://qmrawiki.canr.msu.edu/index.php?title=Table_of_Recommended_Best-Fit_Parameters #' #' @export #' dr.db_download <- function(fromInternet = FALSE) { # Load the database from the file if requested if (! fromInternet) { file <- system.file("extdata/doseresponse/dr_db.csv", package = "kwb.qmra") return(readr::read_csv(file)) } # Load the database from the internet pathogens <- c("Bacteria", "Viruses", "Protozoa") url <- "http://qmrawiki.canr.msu.edu/index.php" url_fmt <- "%s?title=Table_of_Recommended_Best-Fit_Parameters#tab=%s" xpath_fmt <- "/html/body/div[3]/div/div[4]/div/div/div[%d]/div/table" dr.db <- do.call(rbind, lapply(seq_along(pathogens), function(i) { result <- xml2::read_html(sprintf(url_fmt, url, pathogens[i])) %>% rvest::html_node(xpath = sprintf(xpath_fmt, i)) %>% rvest::html_table() result$Link <- sprintf("%s/%s", url, gsub(" ", "_", result$Agent)) result$PathogenGroup <- pathogens[i] result })) dr.db <- dr.db %>% dplyr::filter(.data$Agent != "TestPage") dr.db$PathogenName <- stringr::str_replace( string = dr.db$Agent, pattern = ":.*", replacement = "" ) dr.db$Agent <- sprintf("[%s](%s)", dr.db$Agent, dr.db$Link) best_fit_model <- dr.db$`Best fit model*` parameters <- dr.db$`Optimized parameter(s)` dr.db$k <- as.numeric(ifelse( test = (best_fit_model == "exponential"), yes = stringr::str_replace(parameters, "k\\s*=", ""), no = NA )) dr.db$alpha <- as.numeric(ifelse( test = (best_fit_model == "beta-Poisson"), yes = stringr::str_extract_all( parameters, pattern = "\\d+\\.\\d+E[+-]\\d{2}", simplify = TRUE )[, 1], no = NA )) dr.db$N50 <- as.numeric(ifelse( test = (best_fit_model == "beta-Poisson"), yes = stringr::str_extract_all( parameters, pattern = "\\d+\\.\\d+E[+-]\\d{2}", simplify = TRUE )[, 2], no = NA )) dr.db$PathogenID <- seq_len(nrow(dr.db)) #write.csv(dr.db, #"C:/Users/mrustl/Documents/WC_Server/R_Development/trunk/RPackages/kwb.qmra/inst/extdata/doseresponse/dr_db.csv") dr.db } #' Dose-response model: exponential #' #' @param dose vector of dose data (default: #' \code{sfsmisc::lseq(from = 0.1, to = 10^10, length = 1000)}) #' @param k k-value (default: 5.72E-01) #' @return tibble #' @export #' @importFrom sfsmisc lseq #' dr.expo <- function( dose = sfsmisc::lseq(from = 1, to = 10^10, length = 1000), k = 5.72E-01 ) { dplyr::as_tibble(data.frame( model = "exponential", dose = dose, infectionProbability = 1 - exp(-k * dose), k = k )) } #' Dose-response model: beta-poisson #' #' @param dose vector of dose data (default: #' \code{sfsmisc::lseq(from = 0.1, to = 10^10, length = 1000)}) #' @param alpha alpha (default: 3.28E-01) #' @param N50 N50 (default: 5.43E+03) #' @return tibble #' @export #' @importFrom sfsmisc lseq #' dr.betapoisson <- function( dose = sfsmisc::lseq(from = 1, to = 10^10, length = 1000), alpha = 3.28E-01, N50 = 5.43E+03 ) { dplyr::as_tibble(data.frame( model = "betapoisson", dose = dose, infectionProbability = 1 - (1 + dose * (2 ^ (1/alpha) - 1)/N50) ^ -alpha, alpha = alpha, N50 = N50 )) } #' Generate table with different doses for dr.db_download() #' #' @param dr.db as retrieved by dr.db_download(), default: dr.db_download() #' @param dose vector of dose data (default: #' \code{sfsmisc::lseq(from=0.1, to = 10^10,length = 1000)}) #' @return tibble #' @export #' @importFrom plyr rbind.fill #' dr.db_model <- function( dr.db = dr.db_download(), dose = sfsmisc::lseq(from = 1, to = 10^10, length = 1000) ) { is_expo <- (dr.db$`Best fit model*` == "exponential") dr_model_expo <- if (any(is_expo)) { get_dr_model(dr.db[is_expo, ], dose, exponential = TRUE) } # else NULL dr_model_poisson <- if (any(! is_expo)) { get_dr_model(dr.db[! is_expo, ], dose, exponential = FALSE) } # else NULL plyr::rbind.fill(dr_model_expo, dr_model_poisson) %>% dplyr::as_tibble() } # get_dr_model ----------------------------------------------------------------- get_dr_model <- function(dose_response_db, dose, exponential) { key_columns <- c("PathogenID", "PathogenGroup", "PathogenName") result_rows <- lapply(seq_len(nrow(dose_response_db)), function(i) { pars <- dose_response_db[i, ] model <- if (exponential) { dr.expo(dose = dose, k = pars$k) } else { dr.betapoisson(dose = dose, alpha = pars$alpha, N50 = pars$N50) } cbind(pars[, key_columns], model) }) do.call(plyr::rbind.fill, result_rows) } if (FALSE) { dr.db <- dr.db_download() dr.model <- dr.db_model(dr.db = dr.db) ggplot(dr.model, ggplot2::aes_string( x = "dose", y = "infectionProbability", col = "PathogenGroup" )) + geom_point() + scale_x_log10() + theme_bw() tt <- dr.model %>% dplyr::filter( .data$infectionProbability > 0.49, .data$infectionProbability < 0.51 ) %>% dplyr::group_by( .data$PathogenID, .data$PathogenGroup, .data$PathogenName ) %>% dplyr::summarise( infectionProbability = round(median(.data$infectionProbability), 2), dose = median(.data$dose) ) %>% dplyr::arrange(.data$dose) ggplot2::ggplot(tt, ggplot2::aes_string( "PathogenGroup", "dose", col = "PathogenGroup" )) + geom_point(position = position_jitter(w = 0, h = 0)) + ggrepel::geom_text_repel(ggplot2::aes_string(label = "PathogenName")) + scale_y_log10() + theme_bw() + guides(fill = FALSE) + ylab("Dose with 50% infection probability") }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/JointAI.R \name{sharedParams} \alias{sharedParams} \title{Parameters used by several functions in testpack} \arguments{ \item{object}{object inheriting from class 'testpack'} \item{no_model}{optional; vector of names of variables for which no model should be specified. Note that this is only possible for completely observed variables and implies the assumptions of independence between the excluded variable and the incomplete variables.} \item{timevar}{name of the variable indicating the time of the measurement of a time-varying covariate in a proportional hazards survival model (also in a joint model). The variable specified in "timevar" will automatically be added to "no_model".} \item{assoc_type}{named vector specifying the type of the association used for a time-varying covariate in the linear predictor of the survival model when using a "JM" model. Implemented options are "underl.value" (linear predictor; default for covariates modelled using a Gaussian, Gamma, beta or log-normal distribution) covariates) and "obs.value" (the observed/imputed value; default for covariates modelled using other distributions).} \item{subset}{subset of parameters/variables/nodes (columns in the MCMC sample). Follows the same principle as the argument \code{monitor_params} in \code{\link[testpack:model_imp]{*_imp}}.} \item{exclude_chains}{optional vector of the index numbers of chains that should be excluded} \item{start}{the first iteration of interest (see \code{\link[coda]{window.mcmc}})} \item{end}{the last iteration of interest (see \code{\link[coda]{window.mcmc}})} \item{n.adapt}{number of iterations for adaptation of the MCMC samplers (see \code{\link[rjags]{adapt}})} \item{n.iter}{number of iterations of the MCMC chain (after adaptation; see \code{\link[rjags]{coda.samples}})} \item{n.chains}{number of MCMC chains} \item{quiet}{logical; if \code{TRUE} then messages generated by \strong{rjags} during compilation as well as the progress bar for the adaptive phase will be suppressed, (see \code{\link[rjags]{jags.model}})} \item{thin}{thinning interval (integer; see \code{\link[coda]{window.mcmc}}). For example, \code{thin = 1} (default) will keep the MCMC samples from all iterations; \code{thin = 5} would only keep every 5th iteration.} \item{nrow}{optional; number of rows in the plot layout; automatically chosen if unspecified} \item{ncol}{optional; number of columns in the plot layout; automatically chosen if unspecified} \item{use_ggplot}{logical; Should ggplot be used instead of the base graphics?} \item{warn}{logical; should warnings be given? Default is \code{TRUE}. (Note: this applies only to warnings given directly by \strong{testpack}.)} \item{mess}{logical; should messages be given? Default is \code{TRUE}. (Note: this applies only to messages given directly by \strong{testpack}.)} \item{xlab, ylab}{labels for the x- and y-axis} \item{idvars}{name of the column that specifies the multi-level grouping structure} \item{ridge}{logical; should the parameters of the main model be penalized using ridge regression? Default is \code{FALSE}} \item{parallel}{logical; should the chains be sampled using parallel computation? Default is \code{FALSE}} \item{n.cores}{number of cores to use for parallel computation; if left empty all except two cores will be used} \item{seed}{optional; seed value (for reproducibility)} \item{ppc}{logical: should monitors for posterior predictive checks be set? (not yet used)} } \description{ Parameters used by several functions in testpack }
/man/sharedParams.Rd
permissive
NErler/testpack
R
false
true
3,623
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/JointAI.R \name{sharedParams} \alias{sharedParams} \title{Parameters used by several functions in testpack} \arguments{ \item{object}{object inheriting from class 'testpack'} \item{no_model}{optional; vector of names of variables for which no model should be specified. Note that this is only possible for completely observed variables and implies the assumptions of independence between the excluded variable and the incomplete variables.} \item{timevar}{name of the variable indicating the time of the measurement of a time-varying covariate in a proportional hazards survival model (also in a joint model). The variable specified in "timevar" will automatically be added to "no_model".} \item{assoc_type}{named vector specifying the type of the association used for a time-varying covariate in the linear predictor of the survival model when using a "JM" model. Implemented options are "underl.value" (linear predictor; default for covariates modelled using a Gaussian, Gamma, beta or log-normal distribution) covariates) and "obs.value" (the observed/imputed value; default for covariates modelled using other distributions).} \item{subset}{subset of parameters/variables/nodes (columns in the MCMC sample). Follows the same principle as the argument \code{monitor_params} in \code{\link[testpack:model_imp]{*_imp}}.} \item{exclude_chains}{optional vector of the index numbers of chains that should be excluded} \item{start}{the first iteration of interest (see \code{\link[coda]{window.mcmc}})} \item{end}{the last iteration of interest (see \code{\link[coda]{window.mcmc}})} \item{n.adapt}{number of iterations for adaptation of the MCMC samplers (see \code{\link[rjags]{adapt}})} \item{n.iter}{number of iterations of the MCMC chain (after adaptation; see \code{\link[rjags]{coda.samples}})} \item{n.chains}{number of MCMC chains} \item{quiet}{logical; if \code{TRUE} then messages generated by \strong{rjags} during compilation as well as the progress bar for the adaptive phase will be suppressed, (see \code{\link[rjags]{jags.model}})} \item{thin}{thinning interval (integer; see \code{\link[coda]{window.mcmc}}). For example, \code{thin = 1} (default) will keep the MCMC samples from all iterations; \code{thin = 5} would only keep every 5th iteration.} \item{nrow}{optional; number of rows in the plot layout; automatically chosen if unspecified} \item{ncol}{optional; number of columns in the plot layout; automatically chosen if unspecified} \item{use_ggplot}{logical; Should ggplot be used instead of the base graphics?} \item{warn}{logical; should warnings be given? Default is \code{TRUE}. (Note: this applies only to warnings given directly by \strong{testpack}.)} \item{mess}{logical; should messages be given? Default is \code{TRUE}. (Note: this applies only to messages given directly by \strong{testpack}.)} \item{xlab, ylab}{labels for the x- and y-axis} \item{idvars}{name of the column that specifies the multi-level grouping structure} \item{ridge}{logical; should the parameters of the main model be penalized using ridge regression? Default is \code{FALSE}} \item{parallel}{logical; should the chains be sampled using parallel computation? Default is \code{FALSE}} \item{n.cores}{number of cores to use for parallel computation; if left empty all except two cores will be used} \item{seed}{optional; seed value (for reproducibility)} \item{ppc}{logical: should monitors for posterior predictive checks be set? (not yet used)} } \description{ Parameters used by several functions in testpack }
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 C_lascoplanar <- function(las, k, th1, th2) { .Call('_lidRplugins_C_lascoplanar', PACKAGE = 'lidRplugins', las, k, th1, th2) } C_hamraz_segmentation <- function(disc, nps, sensitivity, MDCW, Epsilon, CLc, CLs, Oc, Os, radius) { .Call('_lidRplugins_C_hamraz_segmentation', PACKAGE = 'lidRplugins', disc, nps, sensitivity, MDCW, Epsilon, CLc, CLs, Oc, Os, radius) } C_lastrees_ptrees <- function(las, k_values, hmin, nmax, segmentation = TRUE) { .Call('_lidRplugins_C_lastrees_ptrees', PACKAGE = 'lidRplugins', las, k_values, hmin, nmax, segmentation) }
/R/RcppExports.R
no_license
jtpils/lidRplugins
R
false
false
694
r
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 C_lascoplanar <- function(las, k, th1, th2) { .Call('_lidRplugins_C_lascoplanar', PACKAGE = 'lidRplugins', las, k, th1, th2) } C_hamraz_segmentation <- function(disc, nps, sensitivity, MDCW, Epsilon, CLc, CLs, Oc, Os, radius) { .Call('_lidRplugins_C_hamraz_segmentation', PACKAGE = 'lidRplugins', disc, nps, sensitivity, MDCW, Epsilon, CLc, CLs, Oc, Os, radius) } C_lastrees_ptrees <- function(las, k_values, hmin, nmax, segmentation = TRUE) { .Call('_lidRplugins_C_lastrees_ptrees', PACKAGE = 'lidRplugins', las, k_values, hmin, nmax, segmentation) }
#' cleans nsub output #' @param path path to file #' @param title keyword to determine title rows (eg. TABLE) #' @param header keyword to determine header rows (eg DV) #' @param overwrite whether to overwrite the original file. If FALSE, will add a '_clean' #' to existing fle name #' @export clean_nsub <- function(path, title = "TABLE", header = "DV", overwrite=T) { path <- normalizePath(path) temp <- readLines(path) gtable <- grep(pattern = "TABLE", x = temp) temp <- temp[-gtable] gr <- grep("DV", temp) temp <- temp[-gr[-1]] if(isTRUE(overwrite)) { message(paste0("writing to ", path)) writeLines(temp, path) return(NULL) } path <- normalizePath(path) end <- stringr::str_extract(path, "\\..*$") path <- gsub("\\..*$", "", path) path <- paste0(path, "_clean", end) message(paste0("writing to ", path)) writeLines(temp, path) return(NULL) }
/R/clean_nsub.R
no_license
DuyTran16/PKPDmisc
R
false
false
896
r
#' cleans nsub output #' @param path path to file #' @param title keyword to determine title rows (eg. TABLE) #' @param header keyword to determine header rows (eg DV) #' @param overwrite whether to overwrite the original file. If FALSE, will add a '_clean' #' to existing fle name #' @export clean_nsub <- function(path, title = "TABLE", header = "DV", overwrite=T) { path <- normalizePath(path) temp <- readLines(path) gtable <- grep(pattern = "TABLE", x = temp) temp <- temp[-gtable] gr <- grep("DV", temp) temp <- temp[-gr[-1]] if(isTRUE(overwrite)) { message(paste0("writing to ", path)) writeLines(temp, path) return(NULL) } path <- normalizePath(path) end <- stringr::str_extract(path, "\\..*$") path <- gsub("\\..*$", "", path) path <- paste0(path, "_clean", end) message(paste0("writing to ", path)) writeLines(temp, path) return(NULL) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dataprocessing.R \name{eq_clean_data} \alias{eq_clean_data} \title{print("eq_clean_data") This function applies some cleaning processes to a dataset downloaded from the \href{https://www.ngdc.noaa.gov/hazel/view/hazards/earthquake/search}{NOAA Significant Earthquake Database}. Specifically, it converts the Latitude and Longitude columns to numeric class and combines year, month and day into a single variable in the Date class.} \usage{ eq_clean_data(data) } \arguments{ \item{data}{the raw data frame to clean} } \value{ This function returns a dataset where Longitude and Latitude are numeric variables and there is a new Date column containing the date. The old year, month and day columns are dropped. } \description{ print("eq_clean_data") This function applies some cleaning processes to a dataset downloaded from the \href{https://www.ngdc.noaa.gov/hazel/view/hazards/earthquake/search}{NOAA Significant Earthquake Database}. Specifically, it converts the Latitude and Longitude columns to numeric class and combines year, month and day into a single variable in the Date class. } \examples{ \dontrun{ cleandata <- eq_clean_data(rawdata) } }
/man/eq_clean_data.Rd
no_license
doctormanuel/earthquake
R
false
true
1,231
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dataprocessing.R \name{eq_clean_data} \alias{eq_clean_data} \title{print("eq_clean_data") This function applies some cleaning processes to a dataset downloaded from the \href{https://www.ngdc.noaa.gov/hazel/view/hazards/earthquake/search}{NOAA Significant Earthquake Database}. Specifically, it converts the Latitude and Longitude columns to numeric class and combines year, month and day into a single variable in the Date class.} \usage{ eq_clean_data(data) } \arguments{ \item{data}{the raw data frame to clean} } \value{ This function returns a dataset where Longitude and Latitude are numeric variables and there is a new Date column containing the date. The old year, month and day columns are dropped. } \description{ print("eq_clean_data") This function applies some cleaning processes to a dataset downloaded from the \href{https://www.ngdc.noaa.gov/hazel/view/hazards/earthquake/search}{NOAA Significant Earthquake Database}. Specifically, it converts the Latitude and Longitude columns to numeric class and combines year, month and day into a single variable in the Date class. } \examples{ \dontrun{ cleandata <- eq_clean_data(rawdata) } }
setMethod("explore", c("xcmsSet", protocolClass("rtcor")), function(object, protocol, raw=NULL, xscale=NULL,geom=c("line","heatmap"),sample=NULL,log=FALSE,gg = ggobi()) { if(is.null(raw)){ message('Raw data after finding components and before retention time correction is missing,\n data neet to be regenerated again(maybe time-consuming),\n are you sure you want to reprocess these data?') res <- menu(c("Yes","No")) if(res==1){ rawpipeline <- pipeline(pipeline(processRawsProto(object)), outtype = "xcmsRaw") rawfiles <- object@filepaths raw <- perform(rawpipeline,rawfiles[sample]) }else{ return() } } ## raw <- perform(processProto(object@pipeline,'findComps'), ## object@filepaths) ## s <- object@comps[,"sample"] == sample ## retcor_d <- data.frame(raw = raw@comps[s,"rt"], ## cor = object@comps[s,"rt"]) ## p_retcor_fit <- qplot(raw, cor-raw, data = retcor_d)+ ## scale_x_continuous(limits=c(360,1000)) + geom_smooth() ## p_tic_cor <- plot_tic_rtcor(raws, object@rt$corrected,sample) ## p_tic_cor$title <- NULL gg_comp_chain <- gg_all_comps_disp(object@comps,gg) ## stack_plots("Retention Time Correction", ## list(p_tic_cor, p_retcor_fit, gg_comp_chain)) geom <- match.arg(geom) if(length(geom)>1) geom <- "line" p_retcor_fit <- cplotRtFit(object,raw,xscale,sample) p_rt <- cplotRT(object,xscale=xscale,sample=sample,geom=geom,log=log) stack_plots("Retention Time Correction", list(p_rt,p_retcor_fit,gg_comp_chain)) }) cplotRT <- function(object,xscale=NULL,geom=c("line","heatmap"),sample=NULL,log=F){ path <- object@filepaths tic <- list() sample.names <- rownames(object@phenoData) if(is.null(sample)) { sample=unique(object@comps[,'sample']) } sample=sample.names[sample] ## raw <- getData("loadSample") ## if(is.null(raw)){ getOption("BioC")$commandr$data_mode for(i in 1:length(path)){ raw <- loadSample(path[i]) tic[[i]] <- raw@tic } restoreDataMode() ## } rt <- object@rt if(is.null(xscale)){ xscale <- range(unlist(rt)) } df2 <- data.frame() for(i in 1:length(rt)){ x <- unlist(rt[[i]]) df <- data.frame(rt=x,tic=unlist(tic),sample=rep(sample.names,each=length(rt[[1]][[1]])), type=names(rt)[i],level=rep(1:length(rt[[1]]),each=length(rt[[1]][[1]]))) df2 <- rbind(df2,df) } df2 <- df2[df2$sample%in%sample,] df2$sample <- factor(df2$sample) if(geom=="line"){ if(log){ p<- qplot(rt,log(tic+1),group=sample,colour=sample,data=df2,geom="path",facets=type~.)+ scale_x_continuous(limits=xscale)+ scale_y_continuous(limits=range(df2[which(df2$rt<xscale[2]&df2$rt>xscale[1]),'tic']))+ opts(legend.position="none") }else{ p<- qplot(rt,tic,group=sample,colour=sample,data=df2,geom="path",facets=type~.)+ scale_x_continuous(limits=xscale)+ scale_y_continuous(limits=range(df2[which(df2$rt<xscale[2]&df2$rt>xscale[1]),'tic']))+ opts(legend.position="none") }} if(geom=="heatmap"){ if(log==FALSE){ p <- qplot(x=rt,y=level,xend=rt,yend=level-0.9,data=df2,colour=tic,geom="segment",facets=type~.,ylab="sample")+ scale_colour_gradient(low="yellow",high="red")+ scale_x_continuous(limits=xscale)+ scale_y_continuous(breaks=1:length(rt[[1]]))+ opts(legend.position="none") }else{ p <- qplot(x=rt,y=level,xend=rt,yend=level-0.9,data=df2,colour=log(tic+1),geom="segment",facets=type~.,ylab="sample")+ scale_colour_gradient(low="yellow",high="red")+ scale_x_continuous(limits=xscale)+ scale_y_continuous(breaks=1:length(rt[[1]]))+ opts(legend.position="none") } } p } cplotRtFit <- function(object,raw,xscale=NULL,sample=NULL){ sample.names <- rownames(object@phenoData) if(is.null(sample)) sample=unique(object@comps[,'sample']) s <- object@comps[,'sample']%in%sample retcor_d <- data.frame(raw=raw@comps[s,'rt'], cor=object@comps[s,'rt'], sample=sample.names[object@comps[s,'sample']]) retcor_d$sample <- factor(retcor_d$sample) if(is.null(xscale)) xscale=range(c(retcor_d$raw,retcor_d$cor)) p_retcor_fit <- qplot(raw,cor-raw,data=retcor_d,color=sample,group=sample,size=1)+ scale_x_continuous(limits=xscale)+geom_smooth(se=FALSE) p_retcor_fit } plot_tic_rtcor <- function(raws, time, sample,label = "corrected") { init <- data.frame(time = time[[sample]], tic = raws[[sample]]@tic) p <- ggplot(init, aes(x = time, y = tic)) + geom_line() + scale_x_continuous(limits=c(360,500)) + scale_y_continuous(limits=c(0,5e6)) for (i in tail(seq_along(raws), -1)) { p <- p + geom_line(aes(x = time[[i]], y = raws[[i]]@tic)) } p$title <- paste("TIC Chromatograms (", label, ")", sep="") p }
/R/rtcor-plots.R
no_license
mariev/chromatoplots
R
false
false
5,326
r
setMethod("explore", c("xcmsSet", protocolClass("rtcor")), function(object, protocol, raw=NULL, xscale=NULL,geom=c("line","heatmap"),sample=NULL,log=FALSE,gg = ggobi()) { if(is.null(raw)){ message('Raw data after finding components and before retention time correction is missing,\n data neet to be regenerated again(maybe time-consuming),\n are you sure you want to reprocess these data?') res <- menu(c("Yes","No")) if(res==1){ rawpipeline <- pipeline(pipeline(processRawsProto(object)), outtype = "xcmsRaw") rawfiles <- object@filepaths raw <- perform(rawpipeline,rawfiles[sample]) }else{ return() } } ## raw <- perform(processProto(object@pipeline,'findComps'), ## object@filepaths) ## s <- object@comps[,"sample"] == sample ## retcor_d <- data.frame(raw = raw@comps[s,"rt"], ## cor = object@comps[s,"rt"]) ## p_retcor_fit <- qplot(raw, cor-raw, data = retcor_d)+ ## scale_x_continuous(limits=c(360,1000)) + geom_smooth() ## p_tic_cor <- plot_tic_rtcor(raws, object@rt$corrected,sample) ## p_tic_cor$title <- NULL gg_comp_chain <- gg_all_comps_disp(object@comps,gg) ## stack_plots("Retention Time Correction", ## list(p_tic_cor, p_retcor_fit, gg_comp_chain)) geom <- match.arg(geom) if(length(geom)>1) geom <- "line" p_retcor_fit <- cplotRtFit(object,raw,xscale,sample) p_rt <- cplotRT(object,xscale=xscale,sample=sample,geom=geom,log=log) stack_plots("Retention Time Correction", list(p_rt,p_retcor_fit,gg_comp_chain)) }) cplotRT <- function(object,xscale=NULL,geom=c("line","heatmap"),sample=NULL,log=F){ path <- object@filepaths tic <- list() sample.names <- rownames(object@phenoData) if(is.null(sample)) { sample=unique(object@comps[,'sample']) } sample=sample.names[sample] ## raw <- getData("loadSample") ## if(is.null(raw)){ getOption("BioC")$commandr$data_mode for(i in 1:length(path)){ raw <- loadSample(path[i]) tic[[i]] <- raw@tic } restoreDataMode() ## } rt <- object@rt if(is.null(xscale)){ xscale <- range(unlist(rt)) } df2 <- data.frame() for(i in 1:length(rt)){ x <- unlist(rt[[i]]) df <- data.frame(rt=x,tic=unlist(tic),sample=rep(sample.names,each=length(rt[[1]][[1]])), type=names(rt)[i],level=rep(1:length(rt[[1]]),each=length(rt[[1]][[1]]))) df2 <- rbind(df2,df) } df2 <- df2[df2$sample%in%sample,] df2$sample <- factor(df2$sample) if(geom=="line"){ if(log){ p<- qplot(rt,log(tic+1),group=sample,colour=sample,data=df2,geom="path",facets=type~.)+ scale_x_continuous(limits=xscale)+ scale_y_continuous(limits=range(df2[which(df2$rt<xscale[2]&df2$rt>xscale[1]),'tic']))+ opts(legend.position="none") }else{ p<- qplot(rt,tic,group=sample,colour=sample,data=df2,geom="path",facets=type~.)+ scale_x_continuous(limits=xscale)+ scale_y_continuous(limits=range(df2[which(df2$rt<xscale[2]&df2$rt>xscale[1]),'tic']))+ opts(legend.position="none") }} if(geom=="heatmap"){ if(log==FALSE){ p <- qplot(x=rt,y=level,xend=rt,yend=level-0.9,data=df2,colour=tic,geom="segment",facets=type~.,ylab="sample")+ scale_colour_gradient(low="yellow",high="red")+ scale_x_continuous(limits=xscale)+ scale_y_continuous(breaks=1:length(rt[[1]]))+ opts(legend.position="none") }else{ p <- qplot(x=rt,y=level,xend=rt,yend=level-0.9,data=df2,colour=log(tic+1),geom="segment",facets=type~.,ylab="sample")+ scale_colour_gradient(low="yellow",high="red")+ scale_x_continuous(limits=xscale)+ scale_y_continuous(breaks=1:length(rt[[1]]))+ opts(legend.position="none") } } p } cplotRtFit <- function(object,raw,xscale=NULL,sample=NULL){ sample.names <- rownames(object@phenoData) if(is.null(sample)) sample=unique(object@comps[,'sample']) s <- object@comps[,'sample']%in%sample retcor_d <- data.frame(raw=raw@comps[s,'rt'], cor=object@comps[s,'rt'], sample=sample.names[object@comps[s,'sample']]) retcor_d$sample <- factor(retcor_d$sample) if(is.null(xscale)) xscale=range(c(retcor_d$raw,retcor_d$cor)) p_retcor_fit <- qplot(raw,cor-raw,data=retcor_d,color=sample,group=sample,size=1)+ scale_x_continuous(limits=xscale)+geom_smooth(se=FALSE) p_retcor_fit } plot_tic_rtcor <- function(raws, time, sample,label = "corrected") { init <- data.frame(time = time[[sample]], tic = raws[[sample]]@tic) p <- ggplot(init, aes(x = time, y = tic)) + geom_line() + scale_x_continuous(limits=c(360,500)) + scale_y_continuous(limits=c(0,5e6)) for (i in tail(seq_along(raws), -1)) { p <- p + geom_line(aes(x = time[[i]], y = raws[[i]]@tic)) } p$title <- paste("TIC Chromatograms (", label, ")", sep="") p }
rd <- lsa[which(lsa[,"domain"] == "reading"),] rd15 <- rd[rd$year == 2015, ] rd15_1 <- rd15[rd15$nest == 1, ] suppressMessages(txt <- capture.output ( m_withoutCross <- repMean(datL = rd15, ID="idstud", type = "JK2", PSU = "jkzone", repInd = "jkrep", imp="imp", nest="nest", groups = c("sex"), group.splits = 0:1, cross.differences = FALSE, dependent = "score", na.rm=FALSE, doCheck=TRUE, linkErr = "leScore", crossDiffSE="old", engine = "BIFIEsurvey"))) suppressMessages(txt2 <- capture.output ( m_oldCross <- repMean(datL = rd15, ID="idstud", type = "JK2", PSU = "jkzone", repInd = "jkrep", imp="imp", nest="nest", groups = c("sex"), group.splits = 0:1, cross.differences = TRUE, dependent = "score", na.rm=FALSE, doCheck=TRUE, linkErr = "leScore", crossDiffSE="old", engine = "BIFIEsurvey"))) test_that("No cross differences", { expect_equal(m_withoutCross[["SE_correction"]], NULL) expect_false("SE_correction" %in% names(m_withoutCross)) }) test_that("Old cross differences", { expect_equal(class(m_oldCross[["SE_correction"]]), c("old", "list")) expect_equal(m_oldCross[["SE_correction"]][[1]], NULL) }) rd15$sex_logic <- as.logical(as.numeric(rd15$sex) - 1) test_that("error for two logical grouping variables", { expect_error(capture.output(repMean(datL = rd15, ID="idstud", type = "JK2", PSU = "jkzone", repInd = "jkrep", imp="imp", nest="nest", groups = c("sex_logic", "mig"), group.splits = 0:1, cross.differences = FALSE, dependent = "score", na.rm=FALSE, doCheck=TRUE, linkErr = "leScore", crossDiffSE="old")), "Factor levels of grouping variables are not disjunct.") }) test_that("error for string with multiple categories to jk2.mean", { rd15_2 <- rd15_1 rd15_2$country <- as.character(rd15_2$country) expect_error(test <- repMean(datL = rd15_2, wgt = "wgt", imp = "imp", dependent = "country", ID = "idstud"), "Dependent variable 'country' has to be of class 'integer' or 'numeric'.") }) ### PISA method test_that("PISA runs through", { expect_silent(suppressWarnings(suppressMessages(txt2 <- capture.output(m_oldCross <- repMean(datL = rd15, ID="idstud", type = "JK2", PSU = "jkzone", repInd = "jkrep", imp="imp", nest="nest", groups = c("sex"), group.splits = 0:1, cross.differences = TRUE, dependent = "score", na.rm=FALSE, doCheck=TRUE, linkErr = "leScore", crossDiffSE="rep"))))) })
/tests/testthat/test_jk2_mean.R
no_license
weirichs/eatRep
R
false
false
2,777
r
rd <- lsa[which(lsa[,"domain"] == "reading"),] rd15 <- rd[rd$year == 2015, ] rd15_1 <- rd15[rd15$nest == 1, ] suppressMessages(txt <- capture.output ( m_withoutCross <- repMean(datL = rd15, ID="idstud", type = "JK2", PSU = "jkzone", repInd = "jkrep", imp="imp", nest="nest", groups = c("sex"), group.splits = 0:1, cross.differences = FALSE, dependent = "score", na.rm=FALSE, doCheck=TRUE, linkErr = "leScore", crossDiffSE="old", engine = "BIFIEsurvey"))) suppressMessages(txt2 <- capture.output ( m_oldCross <- repMean(datL = rd15, ID="idstud", type = "JK2", PSU = "jkzone", repInd = "jkrep", imp="imp", nest="nest", groups = c("sex"), group.splits = 0:1, cross.differences = TRUE, dependent = "score", na.rm=FALSE, doCheck=TRUE, linkErr = "leScore", crossDiffSE="old", engine = "BIFIEsurvey"))) test_that("No cross differences", { expect_equal(m_withoutCross[["SE_correction"]], NULL) expect_false("SE_correction" %in% names(m_withoutCross)) }) test_that("Old cross differences", { expect_equal(class(m_oldCross[["SE_correction"]]), c("old", "list")) expect_equal(m_oldCross[["SE_correction"]][[1]], NULL) }) rd15$sex_logic <- as.logical(as.numeric(rd15$sex) - 1) test_that("error for two logical grouping variables", { expect_error(capture.output(repMean(datL = rd15, ID="idstud", type = "JK2", PSU = "jkzone", repInd = "jkrep", imp="imp", nest="nest", groups = c("sex_logic", "mig"), group.splits = 0:1, cross.differences = FALSE, dependent = "score", na.rm=FALSE, doCheck=TRUE, linkErr = "leScore", crossDiffSE="old")), "Factor levels of grouping variables are not disjunct.") }) test_that("error for string with multiple categories to jk2.mean", { rd15_2 <- rd15_1 rd15_2$country <- as.character(rd15_2$country) expect_error(test <- repMean(datL = rd15_2, wgt = "wgt", imp = "imp", dependent = "country", ID = "idstud"), "Dependent variable 'country' has to be of class 'integer' or 'numeric'.") }) ### PISA method test_that("PISA runs through", { expect_silent(suppressWarnings(suppressMessages(txt2 <- capture.output(m_oldCross <- repMean(datL = rd15, ID="idstud", type = "JK2", PSU = "jkzone", repInd = "jkrep", imp="imp", nest="nest", groups = c("sex"), group.splits = 0:1, cross.differences = TRUE, dependent = "score", na.rm=FALSE, doCheck=TRUE, linkErr = "leScore", crossDiffSE="rep"))))) })
#Portugal Vaccine library(here) source(here("Automation", "00_Functions_automation.R")) # assigning Drive credentials in the case the script is verified manually if (!"email" %in% ls()){ email <- "jessica_d.1994@yahoo.de" } # info country and N drive address ctr <- "Portugal_Vaccine" # it's a placeholder dir_n <- "N:/COVerAGE-DB/Automation/Hydra/" # Drive credentials drive_auth(email = email) gs4_auth(email = email) #Read data in url <- ("https://covid19.min-saude.pt/wp-content/uploads/2021/04/Dataset-Vacinac%CC%A7a%CC%83o-11.csv") In <- read.csv(url, sep = ';') #Process Out <- In %>% subset(TYPE== "AGES" | TYPE== "GENERAL")%>% # there are totals by some regions, but not in iso format, they use some regional health administration select(Date= DATE, Region= REGION, Age= AGEGROUP, Vaccination1= CUMUL_VAC_1, Vaccination2= CUMUL_VAC_2, Vaccinations= CUMUL)%>% pivot_longer(!Age & !Date & !Region, names_to= "Measure", values_to= "Value")%>% mutate(Age=recode(Age, `0-17 anos`="0", `18-24 anos`="18", `25-49 anos`="25", `50-64 anos`="50", `65-79 anos`="65", `80 ou mais anos`="80", `All`="TOT", `Desconhecido`="UNK"))%>% mutate(Age = case_when(is.na(Age) ~ "UNK", TRUE~ as.character(Age))) %>% mutate(AgeInt = case_when( Age == "0" ~ 18L, Age == "18" ~ 7L, Age == "25" ~ 25L, Age == "80" ~ 25L, Age == "UNK" ~ NA_integer_, Age == "TOT" ~ NA_integer_, TRUE ~ 15L))%>% mutate( Metric = "Count", Sex= "b") %>% mutate( Date = dmy(Date), Date = paste(sprintf("%02d",day(Date)), sprintf("%02d",month(Date)), year(Date),sep="."), Code = paste0("PT_All",Date), Country = "Portugal", Region = "All",)%>% select(Country, Region, Code, Date, Sex, Age, AgeInt, Metric, Measure, Value) #save output data write_rds(Out, paste0(dir_n, ctr, ".rds")) # This command append new rows at the end of the sheet log_update(pp = ctr, N = nrow(Out)) # now archive data_source <- paste0(dir_n, "Data_sources/", ctr, "/vaccine_age_",today(), ".csv") write_csv(In, data_source) zipname <- paste0(dir_n, "Data_sources/", ctr, "/", ctr, "_data_", today(), ".zip") zip::zipr(zipname, data_source, recurse = TRUE, compression_level = 9, include_directories = TRUE) file.remove(data_source)
/Automation/00_hydra/Portugal_Vaccine.R
permissive
alburezg/covid_age
R
false
false
2,714
r
#Portugal Vaccine library(here) source(here("Automation", "00_Functions_automation.R")) # assigning Drive credentials in the case the script is verified manually if (!"email" %in% ls()){ email <- "jessica_d.1994@yahoo.de" } # info country and N drive address ctr <- "Portugal_Vaccine" # it's a placeholder dir_n <- "N:/COVerAGE-DB/Automation/Hydra/" # Drive credentials drive_auth(email = email) gs4_auth(email = email) #Read data in url <- ("https://covid19.min-saude.pt/wp-content/uploads/2021/04/Dataset-Vacinac%CC%A7a%CC%83o-11.csv") In <- read.csv(url, sep = ';') #Process Out <- In %>% subset(TYPE== "AGES" | TYPE== "GENERAL")%>% # there are totals by some regions, but not in iso format, they use some regional health administration select(Date= DATE, Region= REGION, Age= AGEGROUP, Vaccination1= CUMUL_VAC_1, Vaccination2= CUMUL_VAC_2, Vaccinations= CUMUL)%>% pivot_longer(!Age & !Date & !Region, names_to= "Measure", values_to= "Value")%>% mutate(Age=recode(Age, `0-17 anos`="0", `18-24 anos`="18", `25-49 anos`="25", `50-64 anos`="50", `65-79 anos`="65", `80 ou mais anos`="80", `All`="TOT", `Desconhecido`="UNK"))%>% mutate(Age = case_when(is.na(Age) ~ "UNK", TRUE~ as.character(Age))) %>% mutate(AgeInt = case_when( Age == "0" ~ 18L, Age == "18" ~ 7L, Age == "25" ~ 25L, Age == "80" ~ 25L, Age == "UNK" ~ NA_integer_, Age == "TOT" ~ NA_integer_, TRUE ~ 15L))%>% mutate( Metric = "Count", Sex= "b") %>% mutate( Date = dmy(Date), Date = paste(sprintf("%02d",day(Date)), sprintf("%02d",month(Date)), year(Date),sep="."), Code = paste0("PT_All",Date), Country = "Portugal", Region = "All",)%>% select(Country, Region, Code, Date, Sex, Age, AgeInt, Metric, Measure, Value) #save output data write_rds(Out, paste0(dir_n, ctr, ".rds")) # This command append new rows at the end of the sheet log_update(pp = ctr, N = nrow(Out)) # now archive data_source <- paste0(dir_n, "Data_sources/", ctr, "/vaccine_age_",today(), ".csv") write_csv(In, data_source) zipname <- paste0(dir_n, "Data_sources/", ctr, "/", ctr, "_data_", today(), ".zip") zip::zipr(zipname, data_source, recurse = TRUE, compression_level = 9, include_directories = TRUE) file.remove(data_source)
#6/19/2017 (Week3) #start in project directory #setwd("/Users/s-allens/Documents/ssp/summer_project") library(tidyverse) library(plotly) library(qtl2) library(ggplot2) library(dplyr) library(corrplot) library(reshape2) #loading in mRNA data load("/Users/s-allens/Documents/ssp/summer_project/data/DO378_islet.RData") # getting access functions that are stored in a separate file source("scripts/functions.R") #get phenotype data matched_phenotypes <- read.csv("/Users/s-allens/Documents/ssp/summer_project/data/matched_pheno_clin.csv", as.is=TRUE) #which phenotypes/gene expression traits differ between sexes? (p vals for phenotypes) #graphical analysis #create a vector of p-values fit1 <- anova(lm(Glu_10wk ~ sex, data = matched_phenotypes)) # to get p-val, fit1[1,5] #tells you if the covariate you give it has a significant #effect on the phenotype #Goal 1: Get a vector of p values for the effects of gender for #each of the ghrelin phenotypes #phenotypes that will be relevant to my ghrelin study ghrelin_phenos <- matched_phenotypes %>% select(sex, Glu_0min, Glu_tAUC, Glu_iAUC, Glu_6wk, Glu_10wk, Glu_14wk, Glu_sac, Ins_0min, Ins_tAUC, Ins_iAUC, Ins_6wk, Ins_10wk, Ins_14wk, Ins_sac, food_ave, weight_sac, G33_ins_secrete, G83_ins_secrete, G167_ins_secrete, KCl_G33_ins_secrete, GLP1_G83_ins_secrete, AA_G83_ins_secrete, PA_G167_ins_secrete, sex) #pval_vec gets the list of values, significance sorts them by significance ghrelin_pvals <- pval_vec(ghrelin_phenos, "sex") sex_insig_ghrelin <- significance(ghrelin_pvals) #sex has a significant effect on all of these phenotypes (p < 0.05) #Goal 2: Use P-val functions to test all gene expression data sex <- matched_phenotypes$sex mrna_z_data <- data.frame(rankz.mrna) rankz_and_gender <- cbind(sex, mrna_z_data) # change all variables to numerical for(i in 2:ncol(rankz_and_gender)) { rankz_and_gender[,i] <- as.numeric(as.character(rankz_and_gender[,i])) } #generate one list of genes that are affected and one of those that are not gene_exp_pvals <- pval_vec(rankz_and_gender, "sex") sex_signif_genexp <- significance(gene_exp_pvals) genes_affected <- sex_signif_genexp$significant_effect genes_unaffected <- sex_signif_genexp$insignificant_effect #It worked!! :) #get the actual names of the genes in each list signif_genes <- character(0) insig_genes <- character(0) for(i in 1:length(genes_affected)) { gene_name <- annot.mrna$symbol[annot.mrna$id == names(genes_affected[i])] signif_genes <- c(signif_genes, gene_name) names(genes_affected[i]) <- gene_name } for(i in 1:length(genes_unaffected)) { gene_name <- annot.mrna$symbol[annot.mrna$id == names(genes_unaffected[i])] insig_genes <- c(insig_genes, gene_name) names(genes_unaffected[i]) <- gene_name } name_p_sig <- data.frame(name = signif_genes, pval = genes_affected) name_p_insig <- data.frame(name = insig_genes, pval = genes_unaffected) #Save this data! #write.csv(name_p_sig, file = "sex_sig_genes") #write.csv(name_p_insig, file = "sex_insig_genes") #if you ever need it again: name_p_sig <- read.csv("/Users/s-allens/Documents/ssp/summer_project/data/sex_sig_genes", as.is=TRUE) name_p_insig <- read.csv("/Users/s-allens/Documents/ssp/summer_project/data/sex_insig_genes", as.is=TRUE) #time to make some histograms? name_p_insig name_p_sig sex_all_expr <- rbind(name_p_insig, name_p_sig) #long form of dataframe...? pvals_long <- melt(sex_all_expr$pval, variable.name = "test", value_name = "pval") #make histogram hist(pvals_long$value, breaks = 100)
/scripts/week_3_scripts/determining_covar_significance.R
no_license
sadiela/summer_project
R
false
false
3,592
r
#6/19/2017 (Week3) #start in project directory #setwd("/Users/s-allens/Documents/ssp/summer_project") library(tidyverse) library(plotly) library(qtl2) library(ggplot2) library(dplyr) library(corrplot) library(reshape2) #loading in mRNA data load("/Users/s-allens/Documents/ssp/summer_project/data/DO378_islet.RData") # getting access functions that are stored in a separate file source("scripts/functions.R") #get phenotype data matched_phenotypes <- read.csv("/Users/s-allens/Documents/ssp/summer_project/data/matched_pheno_clin.csv", as.is=TRUE) #which phenotypes/gene expression traits differ between sexes? (p vals for phenotypes) #graphical analysis #create a vector of p-values fit1 <- anova(lm(Glu_10wk ~ sex, data = matched_phenotypes)) # to get p-val, fit1[1,5] #tells you if the covariate you give it has a significant #effect on the phenotype #Goal 1: Get a vector of p values for the effects of gender for #each of the ghrelin phenotypes #phenotypes that will be relevant to my ghrelin study ghrelin_phenos <- matched_phenotypes %>% select(sex, Glu_0min, Glu_tAUC, Glu_iAUC, Glu_6wk, Glu_10wk, Glu_14wk, Glu_sac, Ins_0min, Ins_tAUC, Ins_iAUC, Ins_6wk, Ins_10wk, Ins_14wk, Ins_sac, food_ave, weight_sac, G33_ins_secrete, G83_ins_secrete, G167_ins_secrete, KCl_G33_ins_secrete, GLP1_G83_ins_secrete, AA_G83_ins_secrete, PA_G167_ins_secrete, sex) #pval_vec gets the list of values, significance sorts them by significance ghrelin_pvals <- pval_vec(ghrelin_phenos, "sex") sex_insig_ghrelin <- significance(ghrelin_pvals) #sex has a significant effect on all of these phenotypes (p < 0.05) #Goal 2: Use P-val functions to test all gene expression data sex <- matched_phenotypes$sex mrna_z_data <- data.frame(rankz.mrna) rankz_and_gender <- cbind(sex, mrna_z_data) # change all variables to numerical for(i in 2:ncol(rankz_and_gender)) { rankz_and_gender[,i] <- as.numeric(as.character(rankz_and_gender[,i])) } #generate one list of genes that are affected and one of those that are not gene_exp_pvals <- pval_vec(rankz_and_gender, "sex") sex_signif_genexp <- significance(gene_exp_pvals) genes_affected <- sex_signif_genexp$significant_effect genes_unaffected <- sex_signif_genexp$insignificant_effect #It worked!! :) #get the actual names of the genes in each list signif_genes <- character(0) insig_genes <- character(0) for(i in 1:length(genes_affected)) { gene_name <- annot.mrna$symbol[annot.mrna$id == names(genes_affected[i])] signif_genes <- c(signif_genes, gene_name) names(genes_affected[i]) <- gene_name } for(i in 1:length(genes_unaffected)) { gene_name <- annot.mrna$symbol[annot.mrna$id == names(genes_unaffected[i])] insig_genes <- c(insig_genes, gene_name) names(genes_unaffected[i]) <- gene_name } name_p_sig <- data.frame(name = signif_genes, pval = genes_affected) name_p_insig <- data.frame(name = insig_genes, pval = genes_unaffected) #Save this data! #write.csv(name_p_sig, file = "sex_sig_genes") #write.csv(name_p_insig, file = "sex_insig_genes") #if you ever need it again: name_p_sig <- read.csv("/Users/s-allens/Documents/ssp/summer_project/data/sex_sig_genes", as.is=TRUE) name_p_insig <- read.csv("/Users/s-allens/Documents/ssp/summer_project/data/sex_insig_genes", as.is=TRUE) #time to make some histograms? name_p_insig name_p_sig sex_all_expr <- rbind(name_p_insig, name_p_sig) #long form of dataframe...? pvals_long <- melt(sex_all_expr$pval, variable.name = "test", value_name = "pval") #make histogram hist(pvals_long$value, breaks = 100)
library(shiny) library(rCharts) shinyUI( pageWithSidebar( headerPanel("Food Preferences in People"), sidebarPanel( radioButtons("gender", label = h5("Please Select Gender:"), c("Males" = "Males", "Females" = "Females"), selected = 0, inline = FALSE), selectInput("age", label = h5("Please Select the Age group:"), choices = list(" " = " ", "15-17 yrs" = "15-17", "15-17 yrs" = "15-17", "18-24 yrs" = "18-24", "25-34 yrs" = "25-34", "35+ yrs" = "35+"), selected = 0), radioButtons("food", label = h5("\nPlease Select the Food Type You Like & Prefer:"), c("Junk" = "Junk", "Healthy" = "Healthy", "All Kind of Food" = "All Kind of Food"), selected = 0), actionButton("goButton", "Go!") ), mainPanel( tabsetPanel( tabPanel("Estimation", h3('Food Preferences Outputs'), h6('Selected Gener: '), textOutput("gender"), h6('Selected Age Group'), textOutput("age"), h6('Selected Food Type'), textOutput("food"), h3("Result"), h2(verbatimTextOutput("estimation")) ), tabPanel('Data Summary', h3('Food Preferences Data'), dataTableOutput("dt") ), tabPanel("Food Preference in Males Plot", showOutput("Plot1", "highcharts") #plotOutput("Plot1", "polycharts", height="300px", width = "500px") ), tabPanel("Food Preference in Females Plot", showOutput("Plot2", "highcharts") #plotOutput("Plot1", "polycharts", height="300px", width = "500px") ) ), p(strong("Healther and Hegenic Food is key to a Prosper Society")) ) ))
/ui.R
no_license
mankubindal/foodpref
R
false
false
3,375
r
library(shiny) library(rCharts) shinyUI( pageWithSidebar( headerPanel("Food Preferences in People"), sidebarPanel( radioButtons("gender", label = h5("Please Select Gender:"), c("Males" = "Males", "Females" = "Females"), selected = 0, inline = FALSE), selectInput("age", label = h5("Please Select the Age group:"), choices = list(" " = " ", "15-17 yrs" = "15-17", "15-17 yrs" = "15-17", "18-24 yrs" = "18-24", "25-34 yrs" = "25-34", "35+ yrs" = "35+"), selected = 0), radioButtons("food", label = h5("\nPlease Select the Food Type You Like & Prefer:"), c("Junk" = "Junk", "Healthy" = "Healthy", "All Kind of Food" = "All Kind of Food"), selected = 0), actionButton("goButton", "Go!") ), mainPanel( tabsetPanel( tabPanel("Estimation", h3('Food Preferences Outputs'), h6('Selected Gener: '), textOutput("gender"), h6('Selected Age Group'), textOutput("age"), h6('Selected Food Type'), textOutput("food"), h3("Result"), h2(verbatimTextOutput("estimation")) ), tabPanel('Data Summary', h3('Food Preferences Data'), dataTableOutput("dt") ), tabPanel("Food Preference in Males Plot", showOutput("Plot1", "highcharts") #plotOutput("Plot1", "polycharts", height="300px", width = "500px") ), tabPanel("Food Preference in Females Plot", showOutput("Plot2", "highcharts") #plotOutput("Plot1", "polycharts", height="300px", width = "500px") ) ), p(strong("Healther and Hegenic Food is key to a Prosper Society")) ) ))
library(glmnet) mydata = read.table("./TrainingSet/ReliefF/NSCLC.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.5,family="gaussian",standardize=FALSE) sink('./Model/EN/ReliefF/NSCLC/NSCLC_058.txt',append=TRUE) print(glm$glmnet.fit) sink()
/Model/EN/ReliefF/NSCLC/NSCLC_058.R
no_license
leon1003/QSMART
R
false
false
349
r
library(glmnet) mydata = read.table("./TrainingSet/ReliefF/NSCLC.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.5,family="gaussian",standardize=FALSE) sink('./Model/EN/ReliefF/NSCLC/NSCLC_058.txt',append=TRUE) print(glm$glmnet.fit) sink()
#Read file and transform date source("readfile.R") # Plot a simple line graph plot(df$Time,df$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)") pngfile="plot2.png" #Source for function for creating png source("createpng.R")
/plot2.R
no_license
AnneLeroy/ExData_Plotting1
R
false
false
276
r
#Read file and transform date source("readfile.R") # Plot a simple line graph plot(df$Time,df$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)") pngfile="plot2.png" #Source for function for creating png source("createpng.R")
# datasets.R # Create and interact with MicroStrategy datasets #' @title Create, update, and delete MicroStrategy datasets #' #' @description When creating a new dataset, provide a dataset name and an optional description. #' When updating a pre-existing dataset, provide the dataset identifier. Tables are added to the #' dataset in an iterative manner using `add_table()`. #' @field connection MicroStrategy connection object #' @field name Name of the dataset #' @field description Description of the dataset. Must be less than or equal to 250 characters #' @field dataset_id Identifier of a pre-existing dataset. Used when updating a pre-existing dataset #' @field verbose Print API requests to console. Used for debugging #' @examples #' \donttest{ #' # Create data frames #' df1 <- data.frame("id" = c(1, 2, 3, 4, 5), #' "first_name" = c("Jason", "Molly", "Tina", "Jake", "Amy"), #' "last_name" = c("Miller", "Jacobson", "Turner", "Milner", "Cooze")) #' #' df2 <- data.frame("id" = c(1, 2, 3, 4, 5), #' "age" = c(42, 52, 36, 24, 73), #' "state" = c("VA", "NC", "WY", "CA", "CA"), #' "salary" = c(50000, 100000, 75000, 85000, 250000)) #' #' # Create a list of tables containing one or more tables and their names #' my_dataset <- Dataset$new(connection=conn, name="HR Analysis") #' my_dataset$add_table("Employees", df1, "add") #' my_dataset$add_table("Salaries", df2, "add") #' my_dataset$create() #' #' # By default Dataset$create() will upload the data to the Intelligence Server and publish the #' dataset. #' # If you just want to create the dataset but not upload the row-level data, use #' Dataset$create(auto_upload=FALSE) #' #' # followed by #' Dataset$update() #' Dataset$publish() #' #' # When the source data changes and users need the latest data for analysis and reporting in #' # MicroStrategy, mstrio allows you to update the previously created dataset. #' #' ds <- Dataset$new(connection=conn, dataset_id="...") #' ds$add_table(name = "Stores", data_frame = stores_df, update_policy = 'update') #' ds$add_table(name = "Sales", data_frame = stores_df, update_policy = 'upsert') #' ds$update() #' ds$publish() #' #' # By default, the raw data is transmitted to the server in increments of 25,000 rows. On very #' # large datasets (>1 GB), it is beneficial to increase the number of rows transmitted to the #' # Intelligence Server with each request. Do this with the chunksize parameter: #' #' ds$update(chunksize = 500000) #' } #' @docType class #' @importFrom R6 R6Class #' @importFrom jsonlite toJSON #' @export Dataset <- R6Class("Dataset", public = list( # instance variables name = NULL, description = NULL, folder_id = NULL, dataset_id = NULL, session_id = NULL, upload_body = NULL, verbose = NULL, VALID_POLICY = c("add", "update", "replace", "upsert"), MAX_DESC_LEN = 250, initialize = function(connection, name=NULL, description=NULL, dataset_id=NULL, verbose=FALSE) { # Initialize dataset constructor private$connection <- connection if(!is.null(name)) { private$check_param_str(name, msg="Dataset name should be a string.") private$check_param_len(name, msg="Dataset name should be <= 250 characters.", length=self$MAX_DESC_LEN) } self$name <- name if(!is.null(description)) { private$check_param_str(description, msg="Dataset description should be a string.") private$check_param_len(description, msg="Dataset description should be <= 250 characters.", length=self$MAX_DESC_LEN) } self$description <- description if(!is.null(dataset_id)) { private$check_param_str(dataset_id, msg="Dataset ID should be a string.") } self$dataset_id <- dataset_id self$verbose <- verbose }, add_table = function(name, data_frame, update_policy, to_metric=NULL, to_attribute=NULL) { # Add a data.frame to a collection of tables which are later used to update the MicroStrategy dataset if(class(data_frame) != "data.frame") { stop("data_frame must be a valid R data.frame.") } if(!update_policy %in% self$VALID_POLICY) { stop("Invalid update policy. Only 'add', 'update', 'replace', and 'upsert' are supported.") } table <- list("table_name" = name, "data_frame" = data_frame, "update_policy" = tolower(update_policy)) if(any(to_attribute %in% to_metric)) { stop(paste0("Column name(s) present in `to_attribute` also present in 'to_metric'.")) } if(!is.null(to_attribute)) { if(!all(to_attribute %in% names(data_frame))) { stop(paste0("Column name(s) in `to_attribute` were not found in `names(data_frame)`.")) } else { table["to_attribute"] <- to_attribute } } if(!is.null(to_metric)) { if(!all(to_metric %in% names(data_frame))) { stop(paste0("Column name(s) in `to_metric` were not found in `names(data_frame)`.")) } else { table["to_metric"] <- to_metric } } # add the new dataframe to the list of dataframes private$tables <- c(private$tables, list(table)) }, create = function(folder_id=NULL, auto_upload=TRUE) { # Creates a new dataset # Check that tables object contains data private$check_tables(private$tables) if(!is.null(folder_id)) { self$folder_id <- folder_id } else { self$folder_id <- "" } # generate model of the dataset private$build_model() # makes request to create the dataset definition on the server response <- create_multitable_dataset(private$connection, body=private$model_list$json, verbose=self$verbose) if(http_error(response)) { stop(private$response_handler(response, msg="Error creating new dataset definition.")) } else { response <- content(response, as="parsed", type="application/json") self$dataset_id <- response$id if(self$verbose) { sprintf("Created dataset %s with ID: %s", self$name, self$dataset_id) } } # if desired, automatically upload and publish the data to the new dataset if(auto_upload) { self$update() self$publish() status <- 6 while(status != 1){ pub <- publish_status(connection=private$connection, dataset_id=self$dataset_id, session_id=self$session_id, verbose=self$verbose) if(http_error(pub)) { stop(private$response_handler(response, msg="Error creating new dataset definition.")) } else { pub <- content(pub, as="parsed", type="application/json") status <- pub$status if(status == 1){ break } } } } }, update = function(chunksize=100000) { # Updates an existing dataset # Check that tables object contains data private$check_tables(private$tables) # form request body and create a session for data uploads private$form_upload_body() response <- upload_session(connection=private$connection, dataset_id=self$dataset_id, body=self$upload_body$json, verbose=self$verbose) if(http_error(response)) { # http != 200 stop(private$response_handler(response, msg="Error creating new data upload session")) } response <- content(response, as="parsed", type="application/json") self$session_id <- response$uploadSessionId # upload each table for(table in private$tables) { # break the data up into chunks rows <- 0 total <- nrow(table$data_frame) chunks <- split(table$data_frame, rep(1:ceiling(nrow(table$data_frame)/chunksize), each=chunksize, length.out=nrow(table$data_frame))) for(i in seq_along(chunks)) { # base64 encode the data enc <- Encoder$new(chunks[[i]], "multi") b64_enc <- enc$encode() # form body of the request body <- toJSON(list("tableName"=table$table_name, "index"=i, "data"=b64_enc), auto_unbox = TRUE) # make request to upload the data response <- upload(private$connection, dataset_id=self$dataset_id, session_id=self$session_id, body=body, verbose=self$verbose) if(http_error(response)) { # http != 200 private$response_handler(response, msg="Error uploading data.") publish_cancel(private$connection, self$dataset_id, self$session_id, verbose=self$verbose) } rows <- rows + nrow(chunks[i]) if(self$verbose) { private$upload_progress(table$table_name, rows, total) } } } }, publish = function() { # Publish the uploaded data to the selected dataset response <- publish(connection=private$connection, dataset_id=self$dataset_id, session_id=self$session_id, verbose=self$verbose) if(http_error(response)) { # http != 200 # on error, cancel the previously uploaded data private$response_handler(response, msg="Error publishing updated data. Cancelling upload.") publish_cancel(private$connection, self$dataset_id, self$session_id, verbose=self$verbose) } return(response) }, publish_status = function() { # Check the status of data that was uploaded to a dataset response <- publish_status(connection=private$connection, dataset_id=self$dataset_id, session_id=self$session_id, verbose=self$verbose) status <- content(response, as="parsed", type="application/json") return(status) }, delete = function() { # Delete a dataset that was previously created using the REST API response <- delete_dataset(connection=private$connection, dataset_id=self$dataset_id, verbose=self$verbose) if(http_error(response)) { # http != 200 private$response_handler(response, msg=paste("Error deleting dataset with ID:", dataset_id)) } else { print(paste("Successfully deleted dataset with ID:", dataset_id)) } } ), private = list( connection = NULL, tables = list(), definition = NULL, model_list = NULL, build_model = function() { # generate model of the dataset using Models class model <- Model$new(tables=private$tables, name=self$name, description=self$description, folder_id=self$folder_id) private$model_list <- model$get_model() }, form_upload_body = function() { # Form request body for creating an upload session for data uploads body <- list("tables" = lapply(private$tables, function(x) { list("name" = x$table_name, "updatePolicy" = x$update_policy, "columnHeaders" = names(x$data_frame)) })) body_json <- toJSON(body, auto_unbox = TRUE) self$upload_body <- list("raw" = body, "json" = body_json) }, load_definition = function() { # Load definition of an existing dataset response <- dataset_definition(connection=private$connection, dataset_id=self$dataset_id, verbose=self$verbose) if(http_error(response)) { # http != 200 private$response_handler(response=response, msg="Error loading dataset definition. Check dataset ID.") } else { private$definition <- content(response, as="parsed", type="application/json") self$name <- private$definition$name self$dataset_id <- private$definition$id } }, upload_progress = function(table_name, rows, total) { # Prints status of dataset upload sprintf("%s status: %s of %s rows", table_name, round(rows / total, 2) * 100, rows) }, response_handler = function(response, msg) { # Generic error message handler for transactions against datasets status <- http_status(response) errors <- content(response) stop(sprintf("%s\n HTTP Error: %s %s %s\n I-Server Error: %s %s", msg, response$status_code, status$reason, status$message, errors$code, errors$message), call.=FALSE) }, check_param_len = function(param, msg, length) { if(nchar(param) >= length) { stop(msg) } else { return(TRUE) } }, check_param_str = function(param, msg) { if(class(param) != "character") { stop(msg) } else { return(TRUE) } }, check_tables = function(tables) { if(length(tables) == 0) { stop("No tables have been added to the dataset. Use `Dataset$add_table()` to add a table.") } } ) )
/R/datasets.R
permissive
apiotrowskiMicroStrategy/mstrio
R
false
false
13,501
r
# datasets.R # Create and interact with MicroStrategy datasets #' @title Create, update, and delete MicroStrategy datasets #' #' @description When creating a new dataset, provide a dataset name and an optional description. #' When updating a pre-existing dataset, provide the dataset identifier. Tables are added to the #' dataset in an iterative manner using `add_table()`. #' @field connection MicroStrategy connection object #' @field name Name of the dataset #' @field description Description of the dataset. Must be less than or equal to 250 characters #' @field dataset_id Identifier of a pre-existing dataset. Used when updating a pre-existing dataset #' @field verbose Print API requests to console. Used for debugging #' @examples #' \donttest{ #' # Create data frames #' df1 <- data.frame("id" = c(1, 2, 3, 4, 5), #' "first_name" = c("Jason", "Molly", "Tina", "Jake", "Amy"), #' "last_name" = c("Miller", "Jacobson", "Turner", "Milner", "Cooze")) #' #' df2 <- data.frame("id" = c(1, 2, 3, 4, 5), #' "age" = c(42, 52, 36, 24, 73), #' "state" = c("VA", "NC", "WY", "CA", "CA"), #' "salary" = c(50000, 100000, 75000, 85000, 250000)) #' #' # Create a list of tables containing one or more tables and their names #' my_dataset <- Dataset$new(connection=conn, name="HR Analysis") #' my_dataset$add_table("Employees", df1, "add") #' my_dataset$add_table("Salaries", df2, "add") #' my_dataset$create() #' #' # By default Dataset$create() will upload the data to the Intelligence Server and publish the #' dataset. #' # If you just want to create the dataset but not upload the row-level data, use #' Dataset$create(auto_upload=FALSE) #' #' # followed by #' Dataset$update() #' Dataset$publish() #' #' # When the source data changes and users need the latest data for analysis and reporting in #' # MicroStrategy, mstrio allows you to update the previously created dataset. #' #' ds <- Dataset$new(connection=conn, dataset_id="...") #' ds$add_table(name = "Stores", data_frame = stores_df, update_policy = 'update') #' ds$add_table(name = "Sales", data_frame = stores_df, update_policy = 'upsert') #' ds$update() #' ds$publish() #' #' # By default, the raw data is transmitted to the server in increments of 25,000 rows. On very #' # large datasets (>1 GB), it is beneficial to increase the number of rows transmitted to the #' # Intelligence Server with each request. Do this with the chunksize parameter: #' #' ds$update(chunksize = 500000) #' } #' @docType class #' @importFrom R6 R6Class #' @importFrom jsonlite toJSON #' @export Dataset <- R6Class("Dataset", public = list( # instance variables name = NULL, description = NULL, folder_id = NULL, dataset_id = NULL, session_id = NULL, upload_body = NULL, verbose = NULL, VALID_POLICY = c("add", "update", "replace", "upsert"), MAX_DESC_LEN = 250, initialize = function(connection, name=NULL, description=NULL, dataset_id=NULL, verbose=FALSE) { # Initialize dataset constructor private$connection <- connection if(!is.null(name)) { private$check_param_str(name, msg="Dataset name should be a string.") private$check_param_len(name, msg="Dataset name should be <= 250 characters.", length=self$MAX_DESC_LEN) } self$name <- name if(!is.null(description)) { private$check_param_str(description, msg="Dataset description should be a string.") private$check_param_len(description, msg="Dataset description should be <= 250 characters.", length=self$MAX_DESC_LEN) } self$description <- description if(!is.null(dataset_id)) { private$check_param_str(dataset_id, msg="Dataset ID should be a string.") } self$dataset_id <- dataset_id self$verbose <- verbose }, add_table = function(name, data_frame, update_policy, to_metric=NULL, to_attribute=NULL) { # Add a data.frame to a collection of tables which are later used to update the MicroStrategy dataset if(class(data_frame) != "data.frame") { stop("data_frame must be a valid R data.frame.") } if(!update_policy %in% self$VALID_POLICY) { stop("Invalid update policy. Only 'add', 'update', 'replace', and 'upsert' are supported.") } table <- list("table_name" = name, "data_frame" = data_frame, "update_policy" = tolower(update_policy)) if(any(to_attribute %in% to_metric)) { stop(paste0("Column name(s) present in `to_attribute` also present in 'to_metric'.")) } if(!is.null(to_attribute)) { if(!all(to_attribute %in% names(data_frame))) { stop(paste0("Column name(s) in `to_attribute` were not found in `names(data_frame)`.")) } else { table["to_attribute"] <- to_attribute } } if(!is.null(to_metric)) { if(!all(to_metric %in% names(data_frame))) { stop(paste0("Column name(s) in `to_metric` were not found in `names(data_frame)`.")) } else { table["to_metric"] <- to_metric } } # add the new dataframe to the list of dataframes private$tables <- c(private$tables, list(table)) }, create = function(folder_id=NULL, auto_upload=TRUE) { # Creates a new dataset # Check that tables object contains data private$check_tables(private$tables) if(!is.null(folder_id)) { self$folder_id <- folder_id } else { self$folder_id <- "" } # generate model of the dataset private$build_model() # makes request to create the dataset definition on the server response <- create_multitable_dataset(private$connection, body=private$model_list$json, verbose=self$verbose) if(http_error(response)) { stop(private$response_handler(response, msg="Error creating new dataset definition.")) } else { response <- content(response, as="parsed", type="application/json") self$dataset_id <- response$id if(self$verbose) { sprintf("Created dataset %s with ID: %s", self$name, self$dataset_id) } } # if desired, automatically upload and publish the data to the new dataset if(auto_upload) { self$update() self$publish() status <- 6 while(status != 1){ pub <- publish_status(connection=private$connection, dataset_id=self$dataset_id, session_id=self$session_id, verbose=self$verbose) if(http_error(pub)) { stop(private$response_handler(response, msg="Error creating new dataset definition.")) } else { pub <- content(pub, as="parsed", type="application/json") status <- pub$status if(status == 1){ break } } } } }, update = function(chunksize=100000) { # Updates an existing dataset # Check that tables object contains data private$check_tables(private$tables) # form request body and create a session for data uploads private$form_upload_body() response <- upload_session(connection=private$connection, dataset_id=self$dataset_id, body=self$upload_body$json, verbose=self$verbose) if(http_error(response)) { # http != 200 stop(private$response_handler(response, msg="Error creating new data upload session")) } response <- content(response, as="parsed", type="application/json") self$session_id <- response$uploadSessionId # upload each table for(table in private$tables) { # break the data up into chunks rows <- 0 total <- nrow(table$data_frame) chunks <- split(table$data_frame, rep(1:ceiling(nrow(table$data_frame)/chunksize), each=chunksize, length.out=nrow(table$data_frame))) for(i in seq_along(chunks)) { # base64 encode the data enc <- Encoder$new(chunks[[i]], "multi") b64_enc <- enc$encode() # form body of the request body <- toJSON(list("tableName"=table$table_name, "index"=i, "data"=b64_enc), auto_unbox = TRUE) # make request to upload the data response <- upload(private$connection, dataset_id=self$dataset_id, session_id=self$session_id, body=body, verbose=self$verbose) if(http_error(response)) { # http != 200 private$response_handler(response, msg="Error uploading data.") publish_cancel(private$connection, self$dataset_id, self$session_id, verbose=self$verbose) } rows <- rows + nrow(chunks[i]) if(self$verbose) { private$upload_progress(table$table_name, rows, total) } } } }, publish = function() { # Publish the uploaded data to the selected dataset response <- publish(connection=private$connection, dataset_id=self$dataset_id, session_id=self$session_id, verbose=self$verbose) if(http_error(response)) { # http != 200 # on error, cancel the previously uploaded data private$response_handler(response, msg="Error publishing updated data. Cancelling upload.") publish_cancel(private$connection, self$dataset_id, self$session_id, verbose=self$verbose) } return(response) }, publish_status = function() { # Check the status of data that was uploaded to a dataset response <- publish_status(connection=private$connection, dataset_id=self$dataset_id, session_id=self$session_id, verbose=self$verbose) status <- content(response, as="parsed", type="application/json") return(status) }, delete = function() { # Delete a dataset that was previously created using the REST API response <- delete_dataset(connection=private$connection, dataset_id=self$dataset_id, verbose=self$verbose) if(http_error(response)) { # http != 200 private$response_handler(response, msg=paste("Error deleting dataset with ID:", dataset_id)) } else { print(paste("Successfully deleted dataset with ID:", dataset_id)) } } ), private = list( connection = NULL, tables = list(), definition = NULL, model_list = NULL, build_model = function() { # generate model of the dataset using Models class model <- Model$new(tables=private$tables, name=self$name, description=self$description, folder_id=self$folder_id) private$model_list <- model$get_model() }, form_upload_body = function() { # Form request body for creating an upload session for data uploads body <- list("tables" = lapply(private$tables, function(x) { list("name" = x$table_name, "updatePolicy" = x$update_policy, "columnHeaders" = names(x$data_frame)) })) body_json <- toJSON(body, auto_unbox = TRUE) self$upload_body <- list("raw" = body, "json" = body_json) }, load_definition = function() { # Load definition of an existing dataset response <- dataset_definition(connection=private$connection, dataset_id=self$dataset_id, verbose=self$verbose) if(http_error(response)) { # http != 200 private$response_handler(response=response, msg="Error loading dataset definition. Check dataset ID.") } else { private$definition <- content(response, as="parsed", type="application/json") self$name <- private$definition$name self$dataset_id <- private$definition$id } }, upload_progress = function(table_name, rows, total) { # Prints status of dataset upload sprintf("%s status: %s of %s rows", table_name, round(rows / total, 2) * 100, rows) }, response_handler = function(response, msg) { # Generic error message handler for transactions against datasets status <- http_status(response) errors <- content(response) stop(sprintf("%s\n HTTP Error: %s %s %s\n I-Server Error: %s %s", msg, response$status_code, status$reason, status$message, errors$code, errors$message), call.=FALSE) }, check_param_len = function(param, msg, length) { if(nchar(param) >= length) { stop(msg) } else { return(TRUE) } }, check_param_str = function(param, msg) { if(class(param) != "character") { stop(msg) } else { return(TRUE) } }, check_tables = function(tables) { if(length(tables) == 0) { stop("No tables have been added to the dataset. Use `Dataset$add_table()` to add a table.") } } ) )
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DiceRoll.R \name{multiRoll} \alias{multiRoll} \title{composite die-roll closure function} \usage{ multiRoll(rollFuncList, evalFunc = sum) } \arguments{ \item{rollFuncList}{of die-roll functors to combine} \item{evalFunc}{optional combining operation (default is sum)} } \value{ a parameterless function that will make the combined roll } \description{ \code{multiRoll} creates a parameterless functor that sums a list of other rolls, e.g. composite effects from weapons and magic. } \examples{ # Short-sword two-weapon fighting with for 3rd level rogue shortSwordTwFRoll <- multiRoll( c(makeRoll(1,6,2), makeRoll(1,6,0)) ) advantageAttack <- multiRoll( rep( } \seealso{ \code{\link{multiRoll}} }
/man/multiRoll.Rd
permissive
Normanator/DnD-RLib
R
false
true
787
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DiceRoll.R \name{multiRoll} \alias{multiRoll} \title{composite die-roll closure function} \usage{ multiRoll(rollFuncList, evalFunc = sum) } \arguments{ \item{rollFuncList}{of die-roll functors to combine} \item{evalFunc}{optional combining operation (default is sum)} } \value{ a parameterless function that will make the combined roll } \description{ \code{multiRoll} creates a parameterless functor that sums a list of other rolls, e.g. composite effects from weapons and magic. } \examples{ # Short-sword two-weapon fighting with for 3rd level rogue shortSwordTwFRoll <- multiRoll( c(makeRoll(1,6,2), makeRoll(1,6,0)) ) advantageAttack <- multiRoll( rep( } \seealso{ \code{\link{multiRoll}} }
#!/usr/bin/env Rscript #test3_predictingAgainsENDBpedia.R #test_exp3_Holiday_vs_SeveralFirstLevel.R source(paste(getwd(),"/get_predictionsFromClassBinaryModel.R",sep="")) pc1 <- "<http://dbpedia.org/ontology/Holiday>" npc1 <- 1000 nc1 <- list() nc1[[1]] <- "<http://dbpedia.org/ontology/Activity>" nc1[[2]] <- "<http://dbpedia.org/ontology/Agent>" nc1[[3]] <- "<http://dbpedia.org/ontology/AnatomicalStructure>" nc1[[4]] <- "<http://dbpedia.org/ontology/Award>" nc1[[5]] <- "<http://dbpedia.org/ontology/Biomolecule>" nc1[[6]] <- "<http://dbpedia.org/ontology/ChemicalSubstance>" nc1[[7]] <- "<http://dbpedia.org/ontology/Colour>" nc1[[8]] <- "<http://dbpedia.org/ontology/Currency>" nc1[[9]] <- "<http://dbpedia.org/ontology/Device>" nc1[[10]] <- "<http://dbpedia.org/ontology/Disease>" nc1[[11]] <- "<http://dbpedia.org/ontology/Event>" nc1[[12]] <- "<http://dbpedia.org/ontology/Food>" nc1[[13]] <- "<http://dbpedia.org/ontology/Language>" nc1[[14]] <- "<http://dbpedia.org/ontology/MeanOfTransportation>" nc1[[15]] <- "<http://dbpedia.org/ontology/Species>" nc1[[16]] <- "<http://dbpedia.org/ontology/SportCompetitionResult>" nc1[[17]] <- "<http://dbpedia.org/ontology/TopicalConcept>" nc1[[18]] <- "<http://dbpedia.org/ontology/Work>" nc1[[19]] <- "<http://dbpedia.org/ontology/Place>" nnc1 <- list() nnc1[[1]] <- 50 nnc1[[2]] <- 50 nnc1[[3]] <- 50 nnc1[[4]] <- 50 nnc1[[5]] <- 50 nnc1[[6]] <- 50 nnc1[[7]] <- 50 nnc1[[8]] <- 50 nnc1[[9]] <- 50 nnc1[[10]] <- 50 nnc1[[11]] <- 50 nnc1[[12]] <- 50 nnc1[[13]] <- 50 nnc1[[14]] <- 50 nnc1[[15]] <- 50 nnc1[[16]] <- 50 nnc1[[17]] <- 50 nnc1[[18]] <- 50 nnc1[[19]] <- 100 nr <- 1 # urlEsDBpedia <- "http://es.dbpedia.org/sparql" urlEnDBpedia <- "https://dbpedia.org/sparql" qL <- 10000 rf_HvsL1T <- "randomForest_HolidayVsLevel1Types1" rf_path <- paste0(getwd(),"/") load(file = paste0(getwd(),"/exp3_Holiday_vs_SeveralFirstLevel.RData")) binaryModel <- test_experiment3 test_exp3_ENDBpedia <- get_predictionsFromClassBinaryModel(classBinaryModel_object = binaryModel, positiveClass = pc1, numberPositiveCases = npc1, negativeClasses = nc1, numberNegativeCases = nnc1, nameFile = rf_HvsL1T, pathModel = rf_path, numberOfRequest = nr, urlEndpoint = urlEnDBpedia,queryLimit = qL) save(test_exp3_ENDBpedia, file = paste0(getwd(),"/exp3_ENDBpedia_JustCollectAndPrepare.RData"))
/experiments_andTestCode/test3_predictingAgainsENDBpedia.R
no_license
Pedrodpj92/BinaryClassifiers_DBpediaTypes
R
false
false
2,612
r
#!/usr/bin/env Rscript #test3_predictingAgainsENDBpedia.R #test_exp3_Holiday_vs_SeveralFirstLevel.R source(paste(getwd(),"/get_predictionsFromClassBinaryModel.R",sep="")) pc1 <- "<http://dbpedia.org/ontology/Holiday>" npc1 <- 1000 nc1 <- list() nc1[[1]] <- "<http://dbpedia.org/ontology/Activity>" nc1[[2]] <- "<http://dbpedia.org/ontology/Agent>" nc1[[3]] <- "<http://dbpedia.org/ontology/AnatomicalStructure>" nc1[[4]] <- "<http://dbpedia.org/ontology/Award>" nc1[[5]] <- "<http://dbpedia.org/ontology/Biomolecule>" nc1[[6]] <- "<http://dbpedia.org/ontology/ChemicalSubstance>" nc1[[7]] <- "<http://dbpedia.org/ontology/Colour>" nc1[[8]] <- "<http://dbpedia.org/ontology/Currency>" nc1[[9]] <- "<http://dbpedia.org/ontology/Device>" nc1[[10]] <- "<http://dbpedia.org/ontology/Disease>" nc1[[11]] <- "<http://dbpedia.org/ontology/Event>" nc1[[12]] <- "<http://dbpedia.org/ontology/Food>" nc1[[13]] <- "<http://dbpedia.org/ontology/Language>" nc1[[14]] <- "<http://dbpedia.org/ontology/MeanOfTransportation>" nc1[[15]] <- "<http://dbpedia.org/ontology/Species>" nc1[[16]] <- "<http://dbpedia.org/ontology/SportCompetitionResult>" nc1[[17]] <- "<http://dbpedia.org/ontology/TopicalConcept>" nc1[[18]] <- "<http://dbpedia.org/ontology/Work>" nc1[[19]] <- "<http://dbpedia.org/ontology/Place>" nnc1 <- list() nnc1[[1]] <- 50 nnc1[[2]] <- 50 nnc1[[3]] <- 50 nnc1[[4]] <- 50 nnc1[[5]] <- 50 nnc1[[6]] <- 50 nnc1[[7]] <- 50 nnc1[[8]] <- 50 nnc1[[9]] <- 50 nnc1[[10]] <- 50 nnc1[[11]] <- 50 nnc1[[12]] <- 50 nnc1[[13]] <- 50 nnc1[[14]] <- 50 nnc1[[15]] <- 50 nnc1[[16]] <- 50 nnc1[[17]] <- 50 nnc1[[18]] <- 50 nnc1[[19]] <- 100 nr <- 1 # urlEsDBpedia <- "http://es.dbpedia.org/sparql" urlEnDBpedia <- "https://dbpedia.org/sparql" qL <- 10000 rf_HvsL1T <- "randomForest_HolidayVsLevel1Types1" rf_path <- paste0(getwd(),"/") load(file = paste0(getwd(),"/exp3_Holiday_vs_SeveralFirstLevel.RData")) binaryModel <- test_experiment3 test_exp3_ENDBpedia <- get_predictionsFromClassBinaryModel(classBinaryModel_object = binaryModel, positiveClass = pc1, numberPositiveCases = npc1, negativeClasses = nc1, numberNegativeCases = nnc1, nameFile = rf_HvsL1T, pathModel = rf_path, numberOfRequest = nr, urlEndpoint = urlEnDBpedia,queryLimit = qL) save(test_exp3_ENDBpedia, file = paste0(getwd(),"/exp3_ENDBpedia_JustCollectAndPrepare.RData"))
#!/usr/bin/env Rscript kons <- function(hd, tl=NULL) function(x) if (x) hd else tl virst <- function(l) l(TRUE ) rrest <- function(l) l(FALSE) lstt <- kons(1, kons(2)) virst(rrest(lstt))
/R/lambdaconscarcdr.Rscript
no_license
JoergWMittag/lambdaconscarcdr
R
false
false
193
rscript
#!/usr/bin/env Rscript kons <- function(hd, tl=NULL) function(x) if (x) hd else tl virst <- function(l) l(TRUE ) rrest <- function(l) l(FALSE) lstt <- kons(1, kons(2)) virst(rrest(lstt))
x<- readLines("/dev/stdin") n<- as.numeric(x[1]) a<- as.numeric(unlist(strsplit(x[2], "\\s+"))) mode<- min(as.numeric(names(table(a))[which.max(table(a))])) cat(paste(mean(a),median(a), mode, sep="\n"))
/Day 0 Mean, Median, and Mode.R
no_license
23devanshi/hackerrank
R
false
false
204
r
x<- readLines("/dev/stdin") n<- as.numeric(x[1]) a<- as.numeric(unlist(strsplit(x[2], "\\s+"))) mode<- min(as.numeric(names(table(a))[which.max(table(a))])) cat(paste(mean(a),median(a), mode, sep="\n"))
library(tikzDevice) ### run all four fake models source('R/fakePrep.r') source('R/fakeNoEff.r') source('R/fakeConstEff.r') source('R/fakeLinEff.r') source('R/fakeQuadEff.r') extract <- rstan::extract ###################### ### results from fake models ##################### estEff <- list() print(load('fitModels/noEffect.RData')) pd <- pdMod(noEff,1,1,function(x) x*0) estEff$noEff <- summary(noEff,par=c('b0','b1'))[[1]] rm(noEff);gc() print(load('fitModels/constEff.RData')) pd <- rbind(pd,pdMod(constEff,1,2,function(x) x*0+0.18)) estEff$constEff <- summary(constEff,par=c('b0','b1'))[[1]] rm(constEff); gc() print(load('fitModels/linEff.RData')) pd <- rbind(pd,pdMod(linEff,2,1,function(x) mean(effs$b0)+x*mean(effs$b1))) estEff$linEff <- summary(linEff,par=c('b0','b1'))[[1]] rm(linEff);gc() print(load('fitModels/quadEff.RData')) U <- datF$U mux <- mean(U) te <- -(U-mux)^2 sigte <- sd(te) mute <- mean(te/sigte*0.1) pd <- rbind(pd,pdMod(quadEff,2,2,function(x) -(x-mux)^2*0.1/sigte-mute+0.13)) estEff$quadEff <- summary(quadEff,par=c('b0','b1'))[[1]] rm(quadEff);gc() pd$title <- NA pd <- within(pd,{ title[row==1 & column==1] <- paste0('$\\tau=0$\n$\\hat{\\tau}=',sprintf("%.2f",estEff$noEff['b0',1]), ifelse(estEff$noEff['b1',1]>0,'+',''), sprintf("%.2f",estEff$noEff['b1',1]),'\\eta_T$') title[row==1 & column==2] <- paste0('$\\tau=0.18+\\epsilon$\n$\\hat{\\tau}=', sprintf("%.2f",estEff$constEff['b0',1]), ifelse(estEff$constEff['b1',1]>0,'+',''), sprintf("%.2f",estEff$constEff['b1',1]),'\\eta_T$') title[row==2 & column==1] <- paste0('$\\tau=', round(mean(effs$b0),2),'+', round(mean(effs$b1),2), '\\eta_T$\n$\\hat{\\tau}=', sprintf("%.2f",estEff$linEff['b0',1]), ifelse(estEff$linEff['b1',1]>0,'+',''), (sprintf("%.2f",estEff$linEff['b1',1])),'\\eta_T$') title[row==2 & column==2] <- paste0('$\\tau=-0.04\\eta_T^2+0.01\\eta_T+0.02$\n$\\hat{\\tau}=', sprintf("%.2f",estEff$quadEff['b0',1]), ifelse(estEff$quadEff['b1',1]>0,'+',' '), sprintf("%.2f",estEff$quadEff['b1',1]),'\\eta_T$')}) pd <- within(pd, { title <- factor(title,levels=c(title[row==1 & column==1][1], title[row==1 & column==2][1], title[row==2 & column==1][1], title[row==2 & column==2][1]))}) tikz('output/fakePlots.tex',standAlone=TRUE,width=6,height=6) print(ggplot(pd)+ geom_abline(aes(intercept=b0,slope=b1,group=id),color='red')+ coord_cartesian(xlim=c(min(pd$xmin),max(pd$xmax)),ylim=c(min(pd$ymin),max(pd$ymax)),expand=FALSE)+ geom_line(aes(x=x,y=y,group=truthOrAvg,linetype=truthOrAvg,color=truthOrAvg,alpha=truthOrAvg),size=1.5)+ facet_wrap(~title,ncol=2)+xlab('$\\eta_T$')+ylab('$\\hat{\\tau}(\\eta_T)$')+ labs(group=NULL,color=NULL,linetype=NULL)+ #theme(strip.background = element_blank(),strip.text.x = element_blank(),strip.text.y=element_blank())+ scale_color_manual(values=c('black','red','black'))+scale_linetype_manual(values=c('solid','solid','dotted'))+scale_alpha_manual(values=c(1,0,1),guide=FALSE)+theme(legend.position='top')+theme(text=element_text(size=15))+theme(legend.key.width=unit(.5,'in'))) dev.off() setwd('output'); tools::texi2dvi('fakePlots.tex', pdf = T, clean = T); setwd('..')
/R/fakeModels.r
no_license
adamSales/logDataCaseStudy
R
false
false
3,825
r
library(tikzDevice) ### run all four fake models source('R/fakePrep.r') source('R/fakeNoEff.r') source('R/fakeConstEff.r') source('R/fakeLinEff.r') source('R/fakeQuadEff.r') extract <- rstan::extract ###################### ### results from fake models ##################### estEff <- list() print(load('fitModels/noEffect.RData')) pd <- pdMod(noEff,1,1,function(x) x*0) estEff$noEff <- summary(noEff,par=c('b0','b1'))[[1]] rm(noEff);gc() print(load('fitModels/constEff.RData')) pd <- rbind(pd,pdMod(constEff,1,2,function(x) x*0+0.18)) estEff$constEff <- summary(constEff,par=c('b0','b1'))[[1]] rm(constEff); gc() print(load('fitModels/linEff.RData')) pd <- rbind(pd,pdMod(linEff,2,1,function(x) mean(effs$b0)+x*mean(effs$b1))) estEff$linEff <- summary(linEff,par=c('b0','b1'))[[1]] rm(linEff);gc() print(load('fitModels/quadEff.RData')) U <- datF$U mux <- mean(U) te <- -(U-mux)^2 sigte <- sd(te) mute <- mean(te/sigte*0.1) pd <- rbind(pd,pdMod(quadEff,2,2,function(x) -(x-mux)^2*0.1/sigte-mute+0.13)) estEff$quadEff <- summary(quadEff,par=c('b0','b1'))[[1]] rm(quadEff);gc() pd$title <- NA pd <- within(pd,{ title[row==1 & column==1] <- paste0('$\\tau=0$\n$\\hat{\\tau}=',sprintf("%.2f",estEff$noEff['b0',1]), ifelse(estEff$noEff['b1',1]>0,'+',''), sprintf("%.2f",estEff$noEff['b1',1]),'\\eta_T$') title[row==1 & column==2] <- paste0('$\\tau=0.18+\\epsilon$\n$\\hat{\\tau}=', sprintf("%.2f",estEff$constEff['b0',1]), ifelse(estEff$constEff['b1',1]>0,'+',''), sprintf("%.2f",estEff$constEff['b1',1]),'\\eta_T$') title[row==2 & column==1] <- paste0('$\\tau=', round(mean(effs$b0),2),'+', round(mean(effs$b1),2), '\\eta_T$\n$\\hat{\\tau}=', sprintf("%.2f",estEff$linEff['b0',1]), ifelse(estEff$linEff['b1',1]>0,'+',''), (sprintf("%.2f",estEff$linEff['b1',1])),'\\eta_T$') title[row==2 & column==2] <- paste0('$\\tau=-0.04\\eta_T^2+0.01\\eta_T+0.02$\n$\\hat{\\tau}=', sprintf("%.2f",estEff$quadEff['b0',1]), ifelse(estEff$quadEff['b1',1]>0,'+',' '), sprintf("%.2f",estEff$quadEff['b1',1]),'\\eta_T$')}) pd <- within(pd, { title <- factor(title,levels=c(title[row==1 & column==1][1], title[row==1 & column==2][1], title[row==2 & column==1][1], title[row==2 & column==2][1]))}) tikz('output/fakePlots.tex',standAlone=TRUE,width=6,height=6) print(ggplot(pd)+ geom_abline(aes(intercept=b0,slope=b1,group=id),color='red')+ coord_cartesian(xlim=c(min(pd$xmin),max(pd$xmax)),ylim=c(min(pd$ymin),max(pd$ymax)),expand=FALSE)+ geom_line(aes(x=x,y=y,group=truthOrAvg,linetype=truthOrAvg,color=truthOrAvg,alpha=truthOrAvg),size=1.5)+ facet_wrap(~title,ncol=2)+xlab('$\\eta_T$')+ylab('$\\hat{\\tau}(\\eta_T)$')+ labs(group=NULL,color=NULL,linetype=NULL)+ #theme(strip.background = element_blank(),strip.text.x = element_blank(),strip.text.y=element_blank())+ scale_color_manual(values=c('black','red','black'))+scale_linetype_manual(values=c('solid','solid','dotted'))+scale_alpha_manual(values=c(1,0,1),guide=FALSE)+theme(legend.position='top')+theme(text=element_text(size=15))+theme(legend.key.width=unit(.5,'in'))) dev.off() setwd('output'); tools::texi2dvi('fakePlots.tex', pdf = T, clean = T); setwd('..')
if (!require("gridExtra")) { install.packages("gridExtra") library(gridExtra) } if (!require("moments")) { install.packages("moments") library(moments) } if (!require("lubridate")) { install.packages("lubridate") library(lubridate) } if(!require('dplyr')) { install.packages("dplyr") library(dplyr) } if (!require("ggplot2")) { install.packages("ggplot2") library(ggplot2) } if (!require("vader")) { install.packages("vader") library(vader) } if (!require("egg")) { install.packages("egg") library(egg) } discrete_sentiment_barplot <- function(tweet.vectors.df, graph_shape, sentiment_threshold) { # throw out cluster and subcluster centers tweets.df <- tweet.vectors.df[tweet.vectors.df$vector_type == "tweet",] tweets.df$created_at <- as.POSIXct(strptime(tweets.df$created_at, format="%a %b %d %H:%M:%S +0000 %Y", tz="UTC")) tweets.df$week <- epiweek(tweets.df$created_at) # CDC epidemiological week # create the data frame which will be used for the bar plot num_two_weeks <- max(as.integer(tweets.df$week / 2)) # num_two_weeks <- tweets.df$created_at %>% week() / 2 %>% max(as.integer()) summary.df <- data.frame(two_week = rep(c(1:num_two_weeks), each=3), sentiment = factor(rep(c("positive", "neutral", "negative"), num_two_weeks), levels = c("negative", "neutral", "positive"), ordered=TRUE), count = 0, binned_date = ymd("2019-12-22")) summary.df$binned_date <- summary.df$binned_date + (14 * summary.df$two_week) # because summarize() brings about mysterious errors # take counts and mean of sentiment for (i in 1:length(tweets.df$week)) { # temporary hack / TODO j <- as.integer(tweets.df[i,]$week / 2) if (tweets.df[i,]$sentiment >= sentiment_threshold) { summary.df[3*j-2,]$count = summary.df[3*j-2,]$count + 1 } else if (tweets.df[i,]$sentiment <= - sentiment_threshold) { summary.df[3*j,]$count = summary.df[3*j,]$count + 1 } else { summary.df[3*j -1,]$count = summary.df[3*j -1,]$count + 1 } } # colors source: Color Brewer 2.0 colors <- c("positive" = "#91BFDB", "neutral" = "#FFFFBF", "negative" = "#FC8D59") # colorblind friendly # bar plot showing sentiment over time ggplot(summary.df, aes(x = binned_date, y = count, fill=sentiment)) + geom_bar(stat = "identity", color = "azure3", position = graph_shape, width=11) + scale_color_manual(values = colors, aesthetics = c("colour", "fill")) + coord_cartesian(xlim = c(ymd("2020-03-08"), ymd("2020-08-01"))) + ggtitle("Tweet Counts by Sentiment", subtitle = "Tweets binned into two week periods") + ylab("Tweet Count") + theme(axis.title.x = element_blank(), panel.background = element_rect(fill = "#33352C", colour = "#EFF0F0")) } continuous_sentiment_barplot <- function(tweet.vectors.df) { # filter out centers from the dataframe tweets.df <- tweet.vectors.df[tweet.vectors.df$vector_type == "tweet",] tweets.df$created_at <- as.POSIXct(strptime(tweets.df$created_at, format="%a %b %d %H:%M:%S +0000 %Y", tz="UTC")) tweets.df$week <- epiweek(tweets.df$created_at) # find CDC epidemiological week num_weeks <- max(tweets.df$week) # create the data frame which will be used for the bar plot summary.df <- data.frame(week = c(1:num_weeks), count = 0, sentiment = "", sentiment_mean = 0, binned_date = ymd("2019-12-22")) summary.df$binned_date <- summary.df$binned_date + (7 * summary.df$week) for (i in 1:length(tweets.df$week)) { j <- tweets.df[i,]$week summary.df[j,]$count = summary.df[j,]$count + 1 summary.df[j,]$sentiment_mean = summary.df[j,]$sentiment_mean + tweets.df[i,]$sentiment } summary.df$sentiment_mean = summary.df$sentiment_mean / summary.df$count summary.df$sentiment <- summary.df$sentiment %>% factor(levels = c("negative", "neutral", "positive"), ordered = TRUE) #set NaNs to 0 summary.df$sentiment_mean[is.na(summary.df$sentiment_mean)] <- 0 # discretize sentiment for (i in 1:num_weeks) { if (summary.df[i,]$sentiment_mean >= sentiment_threshold) { summary.df[i,]$sentiment <- "positive" } else if (summary.df[i,]$sentiment_mean <= - sentiment_threshold) { summary.df[i,]$sentiment <- "negative" } else { summary.df[i,]$sentiment <- "neutral" } } # colors source: Color Brewer 2.0 colors <- c("positive" = "#91BFDB", "neutral" = "#FFFFBF", "negative" = "#FC8D59") # colorblind friendly # bar plot showing sentiment over time ggplot(summary.df, aes(x = binned_date, y = sentiment_mean, fill=sentiment)) + geom_bar(stat = "identity", color = "azure3", position = graph_shape) + scale_color_manual(values = colors, aesthetics = c("colour", "fill")) + coord_cartesian(xlim = c(ymd("2020-03-08"), ymd("2020-08-01"))) + ggtitle("Sentiment over Time", subtitle = "Tweets binned in one week intervals") + ylab("Tweet Count") + theme(axis.title.x = element_blank(), panel.background = element_rect(fill = "#33352C", colour = "#EFF0F0")) } cluster_sentiments_plots <- function(tweet.vectors.df) { # filter out centers from the dataframe tweets.df <- tweet.vectors.df[tweet.vectors.df$vector_type == "tweet",] tweets.df$created_at <- as.POSIXct(strptime(tweets.df$created_at, format="%a %b %d %H:%M:%S +0000 %Y", tz="UTC")) tweets.df$week <- epiweek(tweets.df$created_at) # find CDC epidemiological week num_weeks <- max(tweets.df$week) # define titles of clusters titles <- rep("Cluster", k) titles <- titles %>% paste(c(1:k)) titles <- factor(titles, levels = titles, ordered = TRUE) # create the data frame which will be used for the bar plot summary.df <- data.frame(week = rep(c(1:num_weeks), k), cluster = rep(titles, each=num_weeks), count = 0, sentiment_mean = 0, binned_date = ymd("2019-12-22")) summary.df$binned_date <- summary.df$binned_date + (7 * summary.df$week) for (i in 1:length(tweets.df$week)) { wk <- tweets.df[i,]$week cl <- tweets.df[i,]$cluster # temporary hack j <- (cl - 1) * num_weeks + wk #summary.df[summary.df$week == wk && summary.df$cluster == cl,]$count = summary.df[summary.df$week == wk && summary.df$cluster == cl,]$count + 1 ##summary.df$count <- (summary.df %>% filter(week == wk & cluster == cl))$count + 1 summary.df[j,]$count = summary.df[j,]$count + 1 #summary.df[summary.df$week == wk && summary.df$cluster == cl,]$sentiment_mean = summary.df[summary.df$week == wk && summary.df$cluster == cl,]$sentiment_mean + tweets.df[i,]$sentiment summary.df[j,]$sentiment_mean = summary.df[j,]$sentiment_mean + tweets.df[i,]$sentiment } summary.df$sentiment_mean = summary.df$sentiment_mean / summary.df$count #set NaNs to 0 summary.df$sentiment_mean[is.na(summary.df$sentiment_mean)] <- 0 # bar plot showing sentiment over time ggplot(summary.df, aes(x = binned_date, y = count, fill = sentiment_mean)) + geom_bar(stat = "identity", color = "azure3") + scale_fill_gradient2(name = "Sentiment Average", limits = c(-0.5,0.5), low = "#FC8D59", mid = "white", high = "#91BFDB", midpoint = 0) + ggtitle("Sentiment by Week for each Cluster", subtitle = "Tweets binned in one-week intervals") + ylab("Tweet Count") + coord_cartesian(xlim = c(ymd("2020-03-08"), ymd("2020-08-01"))) + theme(axis.title.x = element_blank()) + facet_wrap(~ cluster, ncol = 3) }
/analysis/archive/sentiment_graphs.R
permissive
shwehtom89/COVID-Twitter
R
false
false
7,389
r
if (!require("gridExtra")) { install.packages("gridExtra") library(gridExtra) } if (!require("moments")) { install.packages("moments") library(moments) } if (!require("lubridate")) { install.packages("lubridate") library(lubridate) } if(!require('dplyr')) { install.packages("dplyr") library(dplyr) } if (!require("ggplot2")) { install.packages("ggplot2") library(ggplot2) } if (!require("vader")) { install.packages("vader") library(vader) } if (!require("egg")) { install.packages("egg") library(egg) } discrete_sentiment_barplot <- function(tweet.vectors.df, graph_shape, sentiment_threshold) { # throw out cluster and subcluster centers tweets.df <- tweet.vectors.df[tweet.vectors.df$vector_type == "tweet",] tweets.df$created_at <- as.POSIXct(strptime(tweets.df$created_at, format="%a %b %d %H:%M:%S +0000 %Y", tz="UTC")) tweets.df$week <- epiweek(tweets.df$created_at) # CDC epidemiological week # create the data frame which will be used for the bar plot num_two_weeks <- max(as.integer(tweets.df$week / 2)) # num_two_weeks <- tweets.df$created_at %>% week() / 2 %>% max(as.integer()) summary.df <- data.frame(two_week = rep(c(1:num_two_weeks), each=3), sentiment = factor(rep(c("positive", "neutral", "negative"), num_two_weeks), levels = c("negative", "neutral", "positive"), ordered=TRUE), count = 0, binned_date = ymd("2019-12-22")) summary.df$binned_date <- summary.df$binned_date + (14 * summary.df$two_week) # because summarize() brings about mysterious errors # take counts and mean of sentiment for (i in 1:length(tweets.df$week)) { # temporary hack / TODO j <- as.integer(tweets.df[i,]$week / 2) if (tweets.df[i,]$sentiment >= sentiment_threshold) { summary.df[3*j-2,]$count = summary.df[3*j-2,]$count + 1 } else if (tweets.df[i,]$sentiment <= - sentiment_threshold) { summary.df[3*j,]$count = summary.df[3*j,]$count + 1 } else { summary.df[3*j -1,]$count = summary.df[3*j -1,]$count + 1 } } # colors source: Color Brewer 2.0 colors <- c("positive" = "#91BFDB", "neutral" = "#FFFFBF", "negative" = "#FC8D59") # colorblind friendly # bar plot showing sentiment over time ggplot(summary.df, aes(x = binned_date, y = count, fill=sentiment)) + geom_bar(stat = "identity", color = "azure3", position = graph_shape, width=11) + scale_color_manual(values = colors, aesthetics = c("colour", "fill")) + coord_cartesian(xlim = c(ymd("2020-03-08"), ymd("2020-08-01"))) + ggtitle("Tweet Counts by Sentiment", subtitle = "Tweets binned into two week periods") + ylab("Tweet Count") + theme(axis.title.x = element_blank(), panel.background = element_rect(fill = "#33352C", colour = "#EFF0F0")) } continuous_sentiment_barplot <- function(tweet.vectors.df) { # filter out centers from the dataframe tweets.df <- tweet.vectors.df[tweet.vectors.df$vector_type == "tweet",] tweets.df$created_at <- as.POSIXct(strptime(tweets.df$created_at, format="%a %b %d %H:%M:%S +0000 %Y", tz="UTC")) tweets.df$week <- epiweek(tweets.df$created_at) # find CDC epidemiological week num_weeks <- max(tweets.df$week) # create the data frame which will be used for the bar plot summary.df <- data.frame(week = c(1:num_weeks), count = 0, sentiment = "", sentiment_mean = 0, binned_date = ymd("2019-12-22")) summary.df$binned_date <- summary.df$binned_date + (7 * summary.df$week) for (i in 1:length(tweets.df$week)) { j <- tweets.df[i,]$week summary.df[j,]$count = summary.df[j,]$count + 1 summary.df[j,]$sentiment_mean = summary.df[j,]$sentiment_mean + tweets.df[i,]$sentiment } summary.df$sentiment_mean = summary.df$sentiment_mean / summary.df$count summary.df$sentiment <- summary.df$sentiment %>% factor(levels = c("negative", "neutral", "positive"), ordered = TRUE) #set NaNs to 0 summary.df$sentiment_mean[is.na(summary.df$sentiment_mean)] <- 0 # discretize sentiment for (i in 1:num_weeks) { if (summary.df[i,]$sentiment_mean >= sentiment_threshold) { summary.df[i,]$sentiment <- "positive" } else if (summary.df[i,]$sentiment_mean <= - sentiment_threshold) { summary.df[i,]$sentiment <- "negative" } else { summary.df[i,]$sentiment <- "neutral" } } # colors source: Color Brewer 2.0 colors <- c("positive" = "#91BFDB", "neutral" = "#FFFFBF", "negative" = "#FC8D59") # colorblind friendly # bar plot showing sentiment over time ggplot(summary.df, aes(x = binned_date, y = sentiment_mean, fill=sentiment)) + geom_bar(stat = "identity", color = "azure3", position = graph_shape) + scale_color_manual(values = colors, aesthetics = c("colour", "fill")) + coord_cartesian(xlim = c(ymd("2020-03-08"), ymd("2020-08-01"))) + ggtitle("Sentiment over Time", subtitle = "Tweets binned in one week intervals") + ylab("Tweet Count") + theme(axis.title.x = element_blank(), panel.background = element_rect(fill = "#33352C", colour = "#EFF0F0")) } cluster_sentiments_plots <- function(tweet.vectors.df) { # filter out centers from the dataframe tweets.df <- tweet.vectors.df[tweet.vectors.df$vector_type == "tweet",] tweets.df$created_at <- as.POSIXct(strptime(tweets.df$created_at, format="%a %b %d %H:%M:%S +0000 %Y", tz="UTC")) tweets.df$week <- epiweek(tweets.df$created_at) # find CDC epidemiological week num_weeks <- max(tweets.df$week) # define titles of clusters titles <- rep("Cluster", k) titles <- titles %>% paste(c(1:k)) titles <- factor(titles, levels = titles, ordered = TRUE) # create the data frame which will be used for the bar plot summary.df <- data.frame(week = rep(c(1:num_weeks), k), cluster = rep(titles, each=num_weeks), count = 0, sentiment_mean = 0, binned_date = ymd("2019-12-22")) summary.df$binned_date <- summary.df$binned_date + (7 * summary.df$week) for (i in 1:length(tweets.df$week)) { wk <- tweets.df[i,]$week cl <- tweets.df[i,]$cluster # temporary hack j <- (cl - 1) * num_weeks + wk #summary.df[summary.df$week == wk && summary.df$cluster == cl,]$count = summary.df[summary.df$week == wk && summary.df$cluster == cl,]$count + 1 ##summary.df$count <- (summary.df %>% filter(week == wk & cluster == cl))$count + 1 summary.df[j,]$count = summary.df[j,]$count + 1 #summary.df[summary.df$week == wk && summary.df$cluster == cl,]$sentiment_mean = summary.df[summary.df$week == wk && summary.df$cluster == cl,]$sentiment_mean + tweets.df[i,]$sentiment summary.df[j,]$sentiment_mean = summary.df[j,]$sentiment_mean + tweets.df[i,]$sentiment } summary.df$sentiment_mean = summary.df$sentiment_mean / summary.df$count #set NaNs to 0 summary.df$sentiment_mean[is.na(summary.df$sentiment_mean)] <- 0 # bar plot showing sentiment over time ggplot(summary.df, aes(x = binned_date, y = count, fill = sentiment_mean)) + geom_bar(stat = "identity", color = "azure3") + scale_fill_gradient2(name = "Sentiment Average", limits = c(-0.5,0.5), low = "#FC8D59", mid = "white", high = "#91BFDB", midpoint = 0) + ggtitle("Sentiment by Week for each Cluster", subtitle = "Tweets binned in one-week intervals") + ylab("Tweet Count") + coord_cartesian(xlim = c(ymd("2020-03-08"), ymd("2020-08-01"))) + theme(axis.title.x = element_blank()) + facet_wrap(~ cluster, ncol = 3) }
# title: make shots data script # description: In this script, we will be making data charts of different players. # input(s): .csv # output(s): .txt, .csv library(stringr) library(dplyr) # Reading file with relative path curry <- read.csv("../data/stephen-curry.csv", stringsAsFactors = FALSE , sep = "," , header = TRUE, na.strings = c("", " ", "NA"), colClasses = c( "character", "factor", "double", "double", "double", "double", "character", "character", "character", "double", "character", "double", "double" )) # add new column "name" curry <- cbind(name = "Stephen Curry", curry) # replacing shot_made_flag n <- curry$shot_made_flag == "n" curry$shot_made_flag[n] <- "shot_no" y <- curry$shot_made_flag == "y" curry$shot_made_flag[y] <- "shot_yes" # adding minute column curry <- mutate(curry, minute = period * 12 - minutes_remaining) #sink summary to output sink(file = '../output/stephen-curry.txt') summary(curry) sink() iguodala <- read.csv("../data/andre-iguodala.csv", stringsAsFactors = FALSE , sep = "," , header = TRUE, na.strings = c("", " ", "NA"), colClasses = c( "character", "factor", "double", "double", "double", "double", "character", "character", "character", "double", "character", "double", "double" )) # add a new column 'name' iguodala <- cbind(name = "Andre Iguodala", iguodala) # replacing shot_made_flag n <- iguodala$shot_made_flag == "n" iguodala$shot_made_flag[n] <- "shot_no" y <- iguodala$shot_made_flag == "y" iguodala$shot_made_flag[y] <- "shot_yes" iguodala <- mutate(iguodala, minute = period * 12 - minutes_remaining) sink(file = '../output/andre-iguodala.txt') summary(iguodala) sink() green <- read.csv("../data/draymond-green.csv", stringsAsFactors = FALSE, sep = "," , header = TRUE, na.strings = c("", " ", "NA"), colClasses = c( "character", "factor", "double", "double", "double", "double", "character", "character", "character", "double", "character", "double", "double" )) # add a new column 'name' green <- cbind(name = "Draymond Green", green) # replacing shot_made_flag n <- green$shot_made_flag == "n" green$shot_made_flag[n] <- "shot_no" y <- green$shot_made_flag == "y" green$shot_made_flag[y] <- "shot_yes" green <- mutate(green, minute = period * 12 - minutes_remaining) sink(file = '../output/draymond-green.txt') summary(green) sink() durant <- read.csv("../data/kevin-durant.csv", stringsAsFactors = FALSE, sep = "," , header = TRUE, na.strings = c("", " ", "NA"), colClasses = c( "character", "factor", "double", "double", "double", "double", "character", "character", "character", "double", "character", "double", "double" )) # add a new column 'name' durant <- cbind(name = "Kevin Durant", durant) # replacing shot_made_flag n <- durant$shot_made_flag == "n" durant$shot_made_flag[n] <- "shot_no" y <- durant$shot_made_flag == "y" durant$shot_made_flag[y] <- "shot_yes" durant <- mutate(durant, minute = period * 12 - minutes_remaining) sink(file = '../output/kevin-durant.txt') summary(durant) sink() thompson <- read.csv("../data/klay-thompson.csv", stringsAsFactors = FALSE, sep = "," , header = TRUE, na.strings = c("", " ", "NA"), colClasses = c( "character", "factor", "double", "double", "double", "double", "character", "character", "character", "double", "character", "double", "double" )) # add a new column 'name' thompson <- cbind(name = "Klay Thompson", thompson) # replacing shot_made_flag n <- thompson$shot_made_flag == "n" thompson$shot_made_flag[n] <- "shot_no" y <- thompson$shot_made_flag == "y" thompson$shot_made_flag[y] <- "shot_yes" thompson <- mutate(thompson, minute = period * 12 - minutes_remaining) sink(file = '../output/klay-thompson.txt') summary(thompson) sink() # stack tables into one single data frame using rbind() shots_data <- rbind(curry, durant) shots_data <- rbind(shots_data, green) shots_data <- rbind(shots_data, iguodala) shots_data <- rbind(shots_data, thompson) # export shots-data to data folder write.csv(shots_data, "../data/shots-data.csv") # export shots-data-summary.txt sink(file = '../output/shots-data-summary.txt') summary(shots_data) sink()
/workout01/code/make-shots-data-script.R
no_license
nahyungkim1220/workout_01
R
false
false
4,440
r
# title: make shots data script # description: In this script, we will be making data charts of different players. # input(s): .csv # output(s): .txt, .csv library(stringr) library(dplyr) # Reading file with relative path curry <- read.csv("../data/stephen-curry.csv", stringsAsFactors = FALSE , sep = "," , header = TRUE, na.strings = c("", " ", "NA"), colClasses = c( "character", "factor", "double", "double", "double", "double", "character", "character", "character", "double", "character", "double", "double" )) # add new column "name" curry <- cbind(name = "Stephen Curry", curry) # replacing shot_made_flag n <- curry$shot_made_flag == "n" curry$shot_made_flag[n] <- "shot_no" y <- curry$shot_made_flag == "y" curry$shot_made_flag[y] <- "shot_yes" # adding minute column curry <- mutate(curry, minute = period * 12 - minutes_remaining) #sink summary to output sink(file = '../output/stephen-curry.txt') summary(curry) sink() iguodala <- read.csv("../data/andre-iguodala.csv", stringsAsFactors = FALSE , sep = "," , header = TRUE, na.strings = c("", " ", "NA"), colClasses = c( "character", "factor", "double", "double", "double", "double", "character", "character", "character", "double", "character", "double", "double" )) # add a new column 'name' iguodala <- cbind(name = "Andre Iguodala", iguodala) # replacing shot_made_flag n <- iguodala$shot_made_flag == "n" iguodala$shot_made_flag[n] <- "shot_no" y <- iguodala$shot_made_flag == "y" iguodala$shot_made_flag[y] <- "shot_yes" iguodala <- mutate(iguodala, minute = period * 12 - minutes_remaining) sink(file = '../output/andre-iguodala.txt') summary(iguodala) sink() green <- read.csv("../data/draymond-green.csv", stringsAsFactors = FALSE, sep = "," , header = TRUE, na.strings = c("", " ", "NA"), colClasses = c( "character", "factor", "double", "double", "double", "double", "character", "character", "character", "double", "character", "double", "double" )) # add a new column 'name' green <- cbind(name = "Draymond Green", green) # replacing shot_made_flag n <- green$shot_made_flag == "n" green$shot_made_flag[n] <- "shot_no" y <- green$shot_made_flag == "y" green$shot_made_flag[y] <- "shot_yes" green <- mutate(green, minute = period * 12 - minutes_remaining) sink(file = '../output/draymond-green.txt') summary(green) sink() durant <- read.csv("../data/kevin-durant.csv", stringsAsFactors = FALSE, sep = "," , header = TRUE, na.strings = c("", " ", "NA"), colClasses = c( "character", "factor", "double", "double", "double", "double", "character", "character", "character", "double", "character", "double", "double" )) # add a new column 'name' durant <- cbind(name = "Kevin Durant", durant) # replacing shot_made_flag n <- durant$shot_made_flag == "n" durant$shot_made_flag[n] <- "shot_no" y <- durant$shot_made_flag == "y" durant$shot_made_flag[y] <- "shot_yes" durant <- mutate(durant, minute = period * 12 - minutes_remaining) sink(file = '../output/kevin-durant.txt') summary(durant) sink() thompson <- read.csv("../data/klay-thompson.csv", stringsAsFactors = FALSE, sep = "," , header = TRUE, na.strings = c("", " ", "NA"), colClasses = c( "character", "factor", "double", "double", "double", "double", "character", "character", "character", "double", "character", "double", "double" )) # add a new column 'name' thompson <- cbind(name = "Klay Thompson", thompson) # replacing shot_made_flag n <- thompson$shot_made_flag == "n" thompson$shot_made_flag[n] <- "shot_no" y <- thompson$shot_made_flag == "y" thompson$shot_made_flag[y] <- "shot_yes" thompson <- mutate(thompson, minute = period * 12 - minutes_remaining) sink(file = '../output/klay-thompson.txt') summary(thompson) sink() # stack tables into one single data frame using rbind() shots_data <- rbind(curry, durant) shots_data <- rbind(shots_data, green) shots_data <- rbind(shots_data, iguodala) shots_data <- rbind(shots_data, thompson) # export shots-data to data folder write.csv(shots_data, "../data/shots-data.csv") # export shots-data-summary.txt sink(file = '../output/shots-data-summary.txt') summary(shots_data) sink()
# Script para el proyecto final de simulación library(ggplot2) ############# Teorema de la convergencia dominada de Lebesgue ### f1 set.seed(18) N <- 1000000 ns <- c(1, seq(from = 10, to = 1000, by = 10)) enes <- length(ns) f1n <- function(n, x){ return((n*x+x)/n) } f1 <- function(x){ return(x) } # Monte Carlo crudo exps <- rexp(N, rate = 2) indicadora <- function(x)ifelse(x<=3 & x>= 1, 1, 0) # Para la funcion limite x <- f1(exps)*indicadora(exps) # Sacamos el estimador de MC (Int_f1C <- mean(x)) # Ahora lo hacemos para la sucesion Int_f1nC <- rep(0, enes) for (k in 1:enes){ Int_f1nC[k] <- mean(f1n(ns[k], exps)*indicadora(exps)) } # Variadas antiteticas mi_rexp <- function(n) { u1 <- runif(n/2) u2 <- 1-u1 u <- c(u1, u2) return( -0.5*log(-u+1) ) } expsA <- mi_rexp(N) # Para la funcion limite x <- f1(expsA)*indicadora(expsA) # Sacamos el estimador de MC (Int_f1A <- mean(x)) # Ahora lo hacemos para la sucesion Int_f1nA <- rep(0, enes) for (k in 1:enes){ Int_f1nA[k] <- mean(f1n(ns[k], expsA)*indicadora(expsA)) } #(Int_f1nA[990:1001]) ### f2 set.seed(18) N <- 1000000 ns <- c(1, seq(from = 10, to = 10000, by = 10)) f2n <- function(n, x){ return( (n*x^3-10*n*x^2 + 25*n*x - 10*x + 25)/(1+n*x) ) } f2 <- function(x){ return((x-5)^2) } # Monte Carlo crudo gammas <- rgamma(N, 5, 1) # Para la funcion limite # Sacamos el estimador de MC (Int_f2C <- mean(f2(gammas))) # Ahora lo hacemos para la sucesion Int_f2nC <- rep(0, enes) for (k in 1:enes){ Int_f2nC[k] <- mean(f2n(ns[k], gammas)) } #(Int_f2nC[990:1001]) # f3, cuando no se pueden conmutar set.seed(19) f3 <- function(x){ return(rep(0, length(x))) } f3n <- function(n, x){ return(ifelse(x> 0 & x<=1/n, n, 0)) } # Para la funcion limite u <- runif(N) Int_f3C <- mean(f3(u)) # Para la sucesion Int_f3nC <- rep(0, enes) for (k in 1:enes){ Int_f3nC[k] <- mean(f3n(ns[k], u)) } #(Int_f3nC[990:1001]) # Con variadas antiteticas u1 <- runif(N) u2 <- 1-u1 u <- c(u1, u2) Int_f3A <- mean(f3(u)) # Para la sucesion Int_f3nA <- rep(0, enes) for (k in 1:enes){ Int_f3nA[k] <- mean(f3n(ns[k], u)) } #(Int_f3nA[990:1001]) datosGraficarCrudo <- data.frame(N = ns, f1Crudo = Int_f1nC, f2Crudo = Int_f2nC, f3Crudo = Int_f3nC) datosGraficarAnti <- data.frame(N = ns, f1Anti = Int_f1nA, f3Anti = Int_f3nA) library(reshape2) datosGraficarCrudo.m <- melt(datosGraficarCrudo, id.vars=c("N")) datosGraficarAnti.m <- melt(datosGraficarAnti, id.vars=c("N")) plotCrudo <- ggplot(datosGraficarCrudo.m, aes(x = N, y = value, color = variable)) + geom_point(size = 0.1) + ggtitle("Convergencia integrales", "Monte Carlo crudo") + theme_light() + scale_colour_manual(values = c("#00197d", "#6d008e", "#007013")) + geom_hline(yintercept = Int_f1C, color = "#00197d", alpha = 0.3) + geom_hline(yintercept = Int_f2C, color = "#8801ff", alpha = 0.3) + geom_hline(yintercept = Int_f3C, color = "#00ab1d", alpha = 0.3) + ylab("Valor de la integral") + xlab("Termino de la sucesión") show(plotCrudo) plotAnti <- ggplot(datosGraficarAnti.m, aes(x = N, y = value, color = variable)) + geom_point(size = 0.1) + ggtitle("Convergencia integrales", "Tecnicas de reducción de varianza") + theme_light() + scale_colour_manual(values = c("#00197d", "#007013")) + geom_hline(yintercept = Int_f1A, color = "#00197d", alpha = 0.3) + geom_hline(yintercept = Int_f3A, color = "#00ab1d", alpha = 0.3) + ylab("Valor de la integral") + xlab("Termino de la sucesión") show(plotAnti)
/TCDL.R
no_license
mtzmarianaa/Simulacion-ProyectoFinal
R
false
false
3,541
r
# Script para el proyecto final de simulación library(ggplot2) ############# Teorema de la convergencia dominada de Lebesgue ### f1 set.seed(18) N <- 1000000 ns <- c(1, seq(from = 10, to = 1000, by = 10)) enes <- length(ns) f1n <- function(n, x){ return((n*x+x)/n) } f1 <- function(x){ return(x) } # Monte Carlo crudo exps <- rexp(N, rate = 2) indicadora <- function(x)ifelse(x<=3 & x>= 1, 1, 0) # Para la funcion limite x <- f1(exps)*indicadora(exps) # Sacamos el estimador de MC (Int_f1C <- mean(x)) # Ahora lo hacemos para la sucesion Int_f1nC <- rep(0, enes) for (k in 1:enes){ Int_f1nC[k] <- mean(f1n(ns[k], exps)*indicadora(exps)) } # Variadas antiteticas mi_rexp <- function(n) { u1 <- runif(n/2) u2 <- 1-u1 u <- c(u1, u2) return( -0.5*log(-u+1) ) } expsA <- mi_rexp(N) # Para la funcion limite x <- f1(expsA)*indicadora(expsA) # Sacamos el estimador de MC (Int_f1A <- mean(x)) # Ahora lo hacemos para la sucesion Int_f1nA <- rep(0, enes) for (k in 1:enes){ Int_f1nA[k] <- mean(f1n(ns[k], expsA)*indicadora(expsA)) } #(Int_f1nA[990:1001]) ### f2 set.seed(18) N <- 1000000 ns <- c(1, seq(from = 10, to = 10000, by = 10)) f2n <- function(n, x){ return( (n*x^3-10*n*x^2 + 25*n*x - 10*x + 25)/(1+n*x) ) } f2 <- function(x){ return((x-5)^2) } # Monte Carlo crudo gammas <- rgamma(N, 5, 1) # Para la funcion limite # Sacamos el estimador de MC (Int_f2C <- mean(f2(gammas))) # Ahora lo hacemos para la sucesion Int_f2nC <- rep(0, enes) for (k in 1:enes){ Int_f2nC[k] <- mean(f2n(ns[k], gammas)) } #(Int_f2nC[990:1001]) # f3, cuando no se pueden conmutar set.seed(19) f3 <- function(x){ return(rep(0, length(x))) } f3n <- function(n, x){ return(ifelse(x> 0 & x<=1/n, n, 0)) } # Para la funcion limite u <- runif(N) Int_f3C <- mean(f3(u)) # Para la sucesion Int_f3nC <- rep(0, enes) for (k in 1:enes){ Int_f3nC[k] <- mean(f3n(ns[k], u)) } #(Int_f3nC[990:1001]) # Con variadas antiteticas u1 <- runif(N) u2 <- 1-u1 u <- c(u1, u2) Int_f3A <- mean(f3(u)) # Para la sucesion Int_f3nA <- rep(0, enes) for (k in 1:enes){ Int_f3nA[k] <- mean(f3n(ns[k], u)) } #(Int_f3nA[990:1001]) datosGraficarCrudo <- data.frame(N = ns, f1Crudo = Int_f1nC, f2Crudo = Int_f2nC, f3Crudo = Int_f3nC) datosGraficarAnti <- data.frame(N = ns, f1Anti = Int_f1nA, f3Anti = Int_f3nA) library(reshape2) datosGraficarCrudo.m <- melt(datosGraficarCrudo, id.vars=c("N")) datosGraficarAnti.m <- melt(datosGraficarAnti, id.vars=c("N")) plotCrudo <- ggplot(datosGraficarCrudo.m, aes(x = N, y = value, color = variable)) + geom_point(size = 0.1) + ggtitle("Convergencia integrales", "Monte Carlo crudo") + theme_light() + scale_colour_manual(values = c("#00197d", "#6d008e", "#007013")) + geom_hline(yintercept = Int_f1C, color = "#00197d", alpha = 0.3) + geom_hline(yintercept = Int_f2C, color = "#8801ff", alpha = 0.3) + geom_hline(yintercept = Int_f3C, color = "#00ab1d", alpha = 0.3) + ylab("Valor de la integral") + xlab("Termino de la sucesión") show(plotCrudo) plotAnti <- ggplot(datosGraficarAnti.m, aes(x = N, y = value, color = variable)) + geom_point(size = 0.1) + ggtitle("Convergencia integrales", "Tecnicas de reducción de varianza") + theme_light() + scale_colour_manual(values = c("#00197d", "#007013")) + geom_hline(yintercept = Int_f1A, color = "#00197d", alpha = 0.3) + geom_hline(yintercept = Int_f3A, color = "#00ab1d", alpha = 0.3) + ylab("Valor de la integral") + xlab("Termino de la sucesión") show(plotAnti)
source("comboPlot.R") require("sp") require("maptools") require("ggplot2") gpclibPermit() # import data load(url("http://gadm.org/data/rda/GBR_adm1.RData")) uk.gadm <-gadm #define common text #main.text="Unit Sales in the month\nof January 2010" #ylab.text="Number of units sold" base.recipe.11c <-function() { print(spplot(gadm,"Shape_Area")) } # ggplot data shaping uk.polygon <-fortify(uk.gadm,region="ID_1") uk.polygon$grCodes<-factor(cut(as.double((as.character(uk.polygon$group))),20,labels=FALSE)) ## ggplot # Remember apply opts post theme # remove legend # opts(title=main.text, legend.position = "none") # push x-axis title down # opts(axis.title.x = theme_text(vjust=-0.25)) # rotate the x-axis text # opts(axis.text.x = theme_text(angle=30,hjust=1,vjust=0.5)) # move y-axis title to left # opts( axis.title.y = theme_text(angle=90,vjust=0.02)) # increase space between title and graph - bold title text # opts(plot.title = theme_text(vjust= 1.25,face="bold")) # adjust space around graph # opts(plot.margin = unit(c(2, 2, 2, 1), "lines")) plot11.3<-ggplot(uk.polygon,aes(long,lat,group=group)) + geom_polygon(aes(fill=grCodes))+opts(legend.position='none') ggRecipe11.3<-function(){ print(plot11.3) }
/chapter/01/R/recipe11c.R
no_license
Keniajin/graphsCookbook
R
false
false
1,253
r
source("comboPlot.R") require("sp") require("maptools") require("ggplot2") gpclibPermit() # import data load(url("http://gadm.org/data/rda/GBR_adm1.RData")) uk.gadm <-gadm #define common text #main.text="Unit Sales in the month\nof January 2010" #ylab.text="Number of units sold" base.recipe.11c <-function() { print(spplot(gadm,"Shape_Area")) } # ggplot data shaping uk.polygon <-fortify(uk.gadm,region="ID_1") uk.polygon$grCodes<-factor(cut(as.double((as.character(uk.polygon$group))),20,labels=FALSE)) ## ggplot # Remember apply opts post theme # remove legend # opts(title=main.text, legend.position = "none") # push x-axis title down # opts(axis.title.x = theme_text(vjust=-0.25)) # rotate the x-axis text # opts(axis.text.x = theme_text(angle=30,hjust=1,vjust=0.5)) # move y-axis title to left # opts( axis.title.y = theme_text(angle=90,vjust=0.02)) # increase space between title and graph - bold title text # opts(plot.title = theme_text(vjust= 1.25,face="bold")) # adjust space around graph # opts(plot.margin = unit(c(2, 2, 2, 1), "lines")) plot11.3<-ggplot(uk.polygon,aes(long,lat,group=group)) + geom_polygon(aes(fill=grCodes))+opts(legend.position='none') ggRecipe11.3<-function(){ print(plot11.3) }
library(magrittr) library(purrr) library(gh) make_student_repo <- function(orgName, repoName, auth, private = T, auto_init = T){ repo_names <- gh("GET /orgs/:org/repos", org = orgName, .token = auth) %>% map_chr(., "name") ifelse(repoName %in% repo_names, warning(glue::glue("{repoName} already exists in {orgName}. Skipping creation.")), gh("POST /orgs/:org/repos", org = orgName, name = repoName, private = private, has_issues = T, has_projects = F, has_wiki = F, auto_init = auto_init, .token = auth)) } get_team_id <- function(orgName, teamName, auth){ teams <- gh("GET /orgs/:org/teams", org = orgName, .token = auth) %>% lapply(., data.frame) %>% dplyr::bind_rows(.) if (!(teamName %in% teams$name)){ stop(glue::glue("Error: {teamName} is not a team of {orgName}")) } teams %>% dplyr::filter(name == teamName) %>% .$id } assign_team_to_repo <- function(orgName, repoName, teamId, permission = "pull", auth){ # from: https://stackoverflow.com/questions/43498035/add-github-team-in-org-to-a-repo # teamId is from `get_team_id` gh("PUT /teams/:id/repos/:org/:repo", repo = repoName, org = orgName, id = teamId, permission = permission, .token = auth) } add_student_to_repo <- function(orgName, repoName, userName, auth){ gh("PUT /repos/:owner/:repo/collaborators/:username", owner = orgName, repo = repoName, username = userName, .token = auth) } add_student_to_team <- function(orgName, teamId, userName, auth){ #gh("PUT /teams/:id/:org/memberships/:username", gh("PUT /teams/:id/memberships/:username", id = teamId, org = orgName, username = userName, .token = auth) } unwatch_repo <- function(orgName, repoName, auth){ gh("DELETE /repos/:owner/:repo/subscription", owner = orgName, repo = repoName, .token = auth) } delete_student_repo <- function(orgName, repoName, auth){ gh("DELETE /repos/:owner/:repo", owner = orgName, repo = repoName, .token = auth) } # setup: # make students team on github # get usernames and student names # steps: # 1. create repos for each student # - auto-initialize w/ README.md by default # 2. add students to student team # 2. give student team read access to other student repos # -instructors should have push access by default # 3. add student as collaborator with push access to their own repo # 4. unwatch student repos. setup_course_repos <- function(repoNames, userNames, orgName, studentTeamName, instructorTeamName, auth, private = T, auto_init = T, student_team_repo_permission = "pull"){ # where: # repoNames is a list of repository names # userNames is a list of usernames parallel to the repository name they will be assigned to if (length(repoNames) != length(userNames)){ stop("ERROR: repoNames and userNames must be equal width") } studentTeamId <- get_team_id(orgName, studentTeamName, auth) instructorTeamId <- get_team_id(orgName, instructorTeamName, auth) map2(repoNames, userNames, ~{ repo <- .x user <- .y add_student_to_team(orgName, studentTeamId, userName = user, auth) make_student_repo(orgName, repoName = repo, auth, private = private, auto_init = auto_init) assign_team_to_repo(orgName, repoName = repo, studentTeamId, student_team_repo_permission, auth) assign_team_to_repo(orgName, repoName = repo, instructorTeamId, "push", auth) add_student_to_repo(orgName, repoName = repo, userName = user, auth) unwatch_repo(orgName, repoName = repo, auth) }) }
/github_functions.R
no_license
How-to-Learn-to-Code/git_course_management
R
false
false
3,904
r
library(magrittr) library(purrr) library(gh) make_student_repo <- function(orgName, repoName, auth, private = T, auto_init = T){ repo_names <- gh("GET /orgs/:org/repos", org = orgName, .token = auth) %>% map_chr(., "name") ifelse(repoName %in% repo_names, warning(glue::glue("{repoName} already exists in {orgName}. Skipping creation.")), gh("POST /orgs/:org/repos", org = orgName, name = repoName, private = private, has_issues = T, has_projects = F, has_wiki = F, auto_init = auto_init, .token = auth)) } get_team_id <- function(orgName, teamName, auth){ teams <- gh("GET /orgs/:org/teams", org = orgName, .token = auth) %>% lapply(., data.frame) %>% dplyr::bind_rows(.) if (!(teamName %in% teams$name)){ stop(glue::glue("Error: {teamName} is not a team of {orgName}")) } teams %>% dplyr::filter(name == teamName) %>% .$id } assign_team_to_repo <- function(orgName, repoName, teamId, permission = "pull", auth){ # from: https://stackoverflow.com/questions/43498035/add-github-team-in-org-to-a-repo # teamId is from `get_team_id` gh("PUT /teams/:id/repos/:org/:repo", repo = repoName, org = orgName, id = teamId, permission = permission, .token = auth) } add_student_to_repo <- function(orgName, repoName, userName, auth){ gh("PUT /repos/:owner/:repo/collaborators/:username", owner = orgName, repo = repoName, username = userName, .token = auth) } add_student_to_team <- function(orgName, teamId, userName, auth){ #gh("PUT /teams/:id/:org/memberships/:username", gh("PUT /teams/:id/memberships/:username", id = teamId, org = orgName, username = userName, .token = auth) } unwatch_repo <- function(orgName, repoName, auth){ gh("DELETE /repos/:owner/:repo/subscription", owner = orgName, repo = repoName, .token = auth) } delete_student_repo <- function(orgName, repoName, auth){ gh("DELETE /repos/:owner/:repo", owner = orgName, repo = repoName, .token = auth) } # setup: # make students team on github # get usernames and student names # steps: # 1. create repos for each student # - auto-initialize w/ README.md by default # 2. add students to student team # 2. give student team read access to other student repos # -instructors should have push access by default # 3. add student as collaborator with push access to their own repo # 4. unwatch student repos. setup_course_repos <- function(repoNames, userNames, orgName, studentTeamName, instructorTeamName, auth, private = T, auto_init = T, student_team_repo_permission = "pull"){ # where: # repoNames is a list of repository names # userNames is a list of usernames parallel to the repository name they will be assigned to if (length(repoNames) != length(userNames)){ stop("ERROR: repoNames and userNames must be equal width") } studentTeamId <- get_team_id(orgName, studentTeamName, auth) instructorTeamId <- get_team_id(orgName, instructorTeamName, auth) map2(repoNames, userNames, ~{ repo <- .x user <- .y add_student_to_team(orgName, studentTeamId, userName = user, auth) make_student_repo(orgName, repoName = repo, auth, private = private, auto_init = auto_init) assign_team_to_repo(orgName, repoName = repo, studentTeamId, student_team_repo_permission, auth) assign_team_to_repo(orgName, repoName = repo, instructorTeamId, "push", auth) add_student_to_repo(orgName, repoName = repo, userName = user, auth) unwatch_repo(orgName, repoName = repo, auth) }) }
#! /usr/bin/env Rscript args <- commandArgs(TRUE) if("--help" %in% args){ cat(" Name: SCC_curve_and_QC_ChIPseq_SE.R - Calculates the SCC curve for a given SE ChIP-seq sample. Arguments: -- bedfile File in bed format with the 5' ends of the paired reads -- pairsfile Text file with complete already paired fragments -- outfile Name of the file, where the output is gonna be saved -- summaryfile Name of the file, where a collection of ChIP-Seq QC metrics are gonna be saved -- sizefile File without header and with two column indicating the respective chromosome and it's length. If the data was aligned to any of the dm3, hg19, mm9 or mm10 genomes, then it loads the file automatically by using dm3, hg19, etc. -- isPET Boolean variable indicating if the reads in the bamfile are paired -- maxShift Max possible shift for the curve, i.e. the output is gonna be a data.table with format [shift = 1:maxShift , cross.corr] Author: Rene Wech, Department of Statistics, University of Wisconsin - Madison ");q()} stopifnot(length(args) == 7) bedfile <- args[1] pairsfile <- args[2] outfile <- args[3] summaryfile <- args[4] sizefile <- args[5] isPET <- as.logical(args[6]) maxShift <- as.numeric(args[7]) stopifnot(file.exists(bedfile)) stopifnot(file.exists(pairsfile)) stopifnot(maxShift > 0) library(parallel) library(data.table) library(GenomicAlignments) library(devtools) load_all("~/Desktop/Docs/Code/ChIPUtils") mc <- detectCores() create_reads_dt_pet <- function(dt,reads_file,pairs_file){ dt1 <- fread(reads_file) dt2 <- fread(pairs_file) greads1 <- dt1[,GRanges(seqnames = V1, ranges = IRanges(start = V2,end = V3),strand = V6)] rl <- floor(mean(width(greads1))) gr1 <- gr2dt(greads1) setkey(gr1,strand) fwd <- gr1["+",nomatch = 0] fwd <- split(fwd,fwd[,(seqnames)]) greads2 <- dt2[,GRanges(seqnames = seqnames(greads1), ranges = IRanges(width = 1,end = V2),strand = "-")] gr2 <- gr2dt(greads2) setkey(gr2,strand) bwd <- gr2["-",nomatch = 0] bwd <- split(bwd,bwd[,(seqnames)]) gr <- rbind(gr1["+",nomatch = 0],gr2["-",nomatch = 0]) out <- new("reads",readsFile = reads_file,readsF = fwd,readsR = bwd, nReads = nrow(gr),isPET = TRUE) return(out) } reads <- create_reads_dt_pet(dt,bedfile,pairsfile) if(tolower(sizefile) %in% c("hg19","mm9","mm10","dm3")){ sizedir <- system.file("extdata","chrom.sizes", package = "ChIPUtils") sizefiles <- list.files(sizedir) sizefile <- sizefiles[grep(sizefile,sizefiles)] sizefile <- file.path(sizedir,sizefile) rm(sizedir,sizefiles) } sizes <- data.table(read.table(sizefile,header = FALSE)) scc <- strand_cross_corr(reads,shift = 1:maxShift, chrom.sizes = sizes,parallel = TRUE) write.table(format(scc,digits = 6),file = outfile,quote = FALSE, sep = "\t",row.names = FALSE,col.names = TRUE) strand_ratio <- function(reads){ fwd <- length(dt2gr(do.call(rbind,readsF(reads)))) bwd <- length(dt2gr(do.call(rbind,readsR(reads)))) out <- fwd / (fwd + bwd) return(out) } read_length <- function(reads){ fwd <- dt2gr(do.call(rbind,readsF(reads))) bwd <- dt2gr(do.call(rbind,readsR(reads))) out <- c(fwd,bwd) out <- table(width(out)) out <- as.numeric(names(which.max(out))) return(out) } NSC <- function(scc)scc[,max(cross.corr) /min(cross.corr)] RSC1 <- function(scc,read_length){ out <- scc[,max(cross.corr)] / scc[shift == read_length, (cross.corr)] return(out) } RSC2 <- function(scc,read_length){ mm <- scc[,min(cross.corr)] out <- (scc[,max(cross.corr)] - mm) /( scc[shift == read_length, (cross.corr)] - mm) return(out) } rl <- read_length(reads) fl <- scc[which.max(cross.corr),(shift)] summary <- data.table(depth = nreads(reads), PBC = PBC(reads), FSR = strand_ratio(reads), read_length = rl, frag_length = fl, NSC = NSC(scc), RSC1 = RSC1(scc,rl), RSC2 = RSC2(scc,rl)) write.table(format(summary,digits = 6),file = summaryfile,quote = FALSE, sep = "\t",row.names = FALSE,col.names = TRUE)
/rscripts/scripts/SCC_curve_and_QC_ChIPseq_PE.R
no_license
welch16/ChIPexo
R
false
false
4,315
r
#! /usr/bin/env Rscript args <- commandArgs(TRUE) if("--help" %in% args){ cat(" Name: SCC_curve_and_QC_ChIPseq_SE.R - Calculates the SCC curve for a given SE ChIP-seq sample. Arguments: -- bedfile File in bed format with the 5' ends of the paired reads -- pairsfile Text file with complete already paired fragments -- outfile Name of the file, where the output is gonna be saved -- summaryfile Name of the file, where a collection of ChIP-Seq QC metrics are gonna be saved -- sizefile File without header and with two column indicating the respective chromosome and it's length. If the data was aligned to any of the dm3, hg19, mm9 or mm10 genomes, then it loads the file automatically by using dm3, hg19, etc. -- isPET Boolean variable indicating if the reads in the bamfile are paired -- maxShift Max possible shift for the curve, i.e. the output is gonna be a data.table with format [shift = 1:maxShift , cross.corr] Author: Rene Wech, Department of Statistics, University of Wisconsin - Madison ");q()} stopifnot(length(args) == 7) bedfile <- args[1] pairsfile <- args[2] outfile <- args[3] summaryfile <- args[4] sizefile <- args[5] isPET <- as.logical(args[6]) maxShift <- as.numeric(args[7]) stopifnot(file.exists(bedfile)) stopifnot(file.exists(pairsfile)) stopifnot(maxShift > 0) library(parallel) library(data.table) library(GenomicAlignments) library(devtools) load_all("~/Desktop/Docs/Code/ChIPUtils") mc <- detectCores() create_reads_dt_pet <- function(dt,reads_file,pairs_file){ dt1 <- fread(reads_file) dt2 <- fread(pairs_file) greads1 <- dt1[,GRanges(seqnames = V1, ranges = IRanges(start = V2,end = V3),strand = V6)] rl <- floor(mean(width(greads1))) gr1 <- gr2dt(greads1) setkey(gr1,strand) fwd <- gr1["+",nomatch = 0] fwd <- split(fwd,fwd[,(seqnames)]) greads2 <- dt2[,GRanges(seqnames = seqnames(greads1), ranges = IRanges(width = 1,end = V2),strand = "-")] gr2 <- gr2dt(greads2) setkey(gr2,strand) bwd <- gr2["-",nomatch = 0] bwd <- split(bwd,bwd[,(seqnames)]) gr <- rbind(gr1["+",nomatch = 0],gr2["-",nomatch = 0]) out <- new("reads",readsFile = reads_file,readsF = fwd,readsR = bwd, nReads = nrow(gr),isPET = TRUE) return(out) } reads <- create_reads_dt_pet(dt,bedfile,pairsfile) if(tolower(sizefile) %in% c("hg19","mm9","mm10","dm3")){ sizedir <- system.file("extdata","chrom.sizes", package = "ChIPUtils") sizefiles <- list.files(sizedir) sizefile <- sizefiles[grep(sizefile,sizefiles)] sizefile <- file.path(sizedir,sizefile) rm(sizedir,sizefiles) } sizes <- data.table(read.table(sizefile,header = FALSE)) scc <- strand_cross_corr(reads,shift = 1:maxShift, chrom.sizes = sizes,parallel = TRUE) write.table(format(scc,digits = 6),file = outfile,quote = FALSE, sep = "\t",row.names = FALSE,col.names = TRUE) strand_ratio <- function(reads){ fwd <- length(dt2gr(do.call(rbind,readsF(reads)))) bwd <- length(dt2gr(do.call(rbind,readsR(reads)))) out <- fwd / (fwd + bwd) return(out) } read_length <- function(reads){ fwd <- dt2gr(do.call(rbind,readsF(reads))) bwd <- dt2gr(do.call(rbind,readsR(reads))) out <- c(fwd,bwd) out <- table(width(out)) out <- as.numeric(names(which.max(out))) return(out) } NSC <- function(scc)scc[,max(cross.corr) /min(cross.corr)] RSC1 <- function(scc,read_length){ out <- scc[,max(cross.corr)] / scc[shift == read_length, (cross.corr)] return(out) } RSC2 <- function(scc,read_length){ mm <- scc[,min(cross.corr)] out <- (scc[,max(cross.corr)] - mm) /( scc[shift == read_length, (cross.corr)] - mm) return(out) } rl <- read_length(reads) fl <- scc[which.max(cross.corr),(shift)] summary <- data.table(depth = nreads(reads), PBC = PBC(reads), FSR = strand_ratio(reads), read_length = rl, frag_length = fl, NSC = NSC(scc), RSC1 = RSC1(scc,rl), RSC2 = RSC2(scc,rl)) write.table(format(summary,digits = 6),file = summaryfile,quote = FALSE, sep = "\t",row.names = FALSE,col.names = TRUE)
set.seed(58) #library(polynom) library(colorRamps) library(plot3D) ## HELP functions make_N_legendre <- function(q, x){ #sum part as for loop y <- rep(0, length(x)) for(k in 0:q){ legendre_k <- x**k * choose(q, k) * choose((q+k-1)/2,q) y <- y +legendre_k } y <- 2**q * y return(y) } targetFunction2 <- function(x, beta_set, Q){ func <- rep(0, length(x)) for(q in 0:Q){ func <- func + beta_set[q+1] * make_N_legendre(q, x) } return(func) } generate_uniform_values <- function(n){ runif(n, min = -1, max = 1) } # make a g of order Q make_model_g <- function(Q, x){ matrix_legendre <- matrix(0L, nrow = length(x), ncol = Q+1) for(i in 0:Q){ matrix_legendre[, i+1] <- make_N_legendre(i, x) } return(matrix_legendre[,2:ncol(matrix_legendre)]) } ## TASK 1 i calculate_e_out_2 <- function(number_new, beta_set_tf, beta_set_g, Q_tf, sigma){ #comparison_beta_set <- beta_set_tf - beta_set_g new_x_values <- generate_uniform_values(number_new) new_y_values <- targetFunction2(new_x_values, beta_set_tf, Q_tf) + rnorm(n = length(new_x_values), mean = 0, sd = sigma**2) new_fit_values <- targetFunction2(new_x_values, beta_set_g, Q_tf) mse <- mean((new_y_values - new_fit_values)**2) #print(mse) return(mse) } find_difference <- function(x, beta_set_tf, beta_set_g){ diff <- 0 #diff = difference for(i in 1:length(beta_set_tf)){ diff <- diff + (beta_set_tf[i]-beta_set_g[i]) * make_N_legendre(i, x) } return(diff^2) } calculate_e_out <- function(x_tf, y_tf, beta_set_tf, Q_g, sigma){ calculated_coefficients <- lm(y_tf ~ make_model_g(Q_g, x_tf)) #tf = targetfunction, Q = order beta_set_g <- unname(coef(calculated_coefficients)) size_difference_beta <- length(beta_set_tf) - Q_g if(size_difference_beta != 0){ if( size_difference_beta > 0){ beta_set_g <- c(beta_set_g, rep(0, size_difference_beta)) } else{ beta_set_tf <- c(beta_set_tf, rep(0, -size_difference_beta)) } } # bias_g <- integrate(find_difference, -1, 1, beta_set_tf, beta_set_g) # return (bias_g$value) error_mse <- calculate_e_out_2(30, beta_set_tf, beta_set_g, length(beta_set_tf) - 1, sigma) return(error_mse) } find_overfit <- function(x_tf, y_tf, beta_set_tf, Q_set, sigma){ err_g_1 <- calculate_e_out(x_tf, y_tf, beta_set_tf, Q_set[1], sigma) #g_1 is first g, in this task g2 err_g_2 <- calculate_e_out(x_tf, y_tf, beta_set_tf, Q_set[2], sigma) #g_2 is second g, in this task g10 return(err_g_2 - err_g_1) } make_error_matrix_i <- function(N_set, sigma_set, beta_set_tf, Q_tf){ final_matrix <- matrix(0L, nrow = length(N_set), ncol = length(sigma_set)) #print(length(sigma_set)) #iterate through size of data for(k in 1:length(N_set)){ x_tf <- sort(generate_uniform_values(N_set[k])) # tf = target function y_tf <- targetFunction2(x_tf, beta_set_tf, Q_tf) for(l in 1:length(sigma_set)){ sigma <- sigma_set[l] y_tf <- y_tf + rnorm(n = length(x_tf), mean = 0, sd = sigma**2) overfit_measure <- find_overfit(x_tf, y_tf, beta_set_tf, c(2,10), sigma) #to get smud results set all values above 0.2 or below -0.2 to +- 0.2 (probably not right?) if(abs(overfit_measure) > 0.2){ overfit_measure <- 0.2 * sign(overfit_measure) } final_matrix[k, l] <- overfit_measure } } return(final_matrix) } avg_runs_i <- function(Q_tf, n_averages, N_set, sigma_set){ avg_matrix <- matrix(0L, nrow = length(N_set), ncol = length(sigma_set)) pb <- txtProgressBar(min = 0, max = n_averages) beta_set_tf <- runif(Q_tf+1, min = -1, max = 1) for(i in 1:n_averages){ setTxtProgressBar(pb, i) new_matrix <- make_error_matrix_i(N_set, sigma_set, beta_set_tf, Q_tf) matrix_list <- list(avg_matrix*(i-1), new_matrix) avg_matrix <- Reduce("+", matrix_list) / i print(min(avg_matrix)) print(max(avg_matrix)) #Live plot, open pdf with reader that refresh pdf("error_matrix.pdf") image(avg_matrix, zlim = c(-0.2,0.2), col = colorRamps::matlab.like(100)) dev.off() pdf("3D-plot2.pdf") persp3D(x = N_set, y = sigma_set, z = avg_matrix, colvar = avg_matrix , clim = c(-0.2,0.2), zlim = c(-0.2,0.2)) dev.off() } close(pb) return(avg_matrix) } taski <- function(){ Q_tf <- 10 # tf = target function beta_set_tf <- runif(Q_tf+1, min = -1, max = 1) N <- seq(20, 110, by = 1) sigma <- seq(0.2, 1.1, by = 0.02) error_matrix <- make_error_matrix_i(N, sigma, beta_set_tf, Q_tf) error_matrix <- avg_runs_i(Q_tf, 100, N, sigma) print(error_matrix) image(error_matrix, zlim = c(-0.5,0.5), col = colorRamps::matlab.like(10)) ## UNDER: just to show that the fit function actuall works, uncomment and look at the produced plot to clarify :) # x_experiment <- sort(generate_uniform_values(100)) # #plot_model_g(x_experiment, make_model_g(2, x_experiment)) # y_experiment_underlying <- targetFunction2(x_experiment, beta_set_tf, Q_tf) # y_experiment <- y_experiment_underlying + rnorm(n = length(x_experiment), mean = 0, sd = 0.5**2) # y_10g <- lm(y_experiment ~ make_model_g(10, x_experiment)) # print(beta_set_tf) # print(y_10g) # beta_set_10g <- unname(coef(y_10g)) # y_10g <- targetFunction2(x_experiment, beta_set_10g, 10) # y_compare <- targetFunction2(x_experiment, beta_set_tf - beta_set_10g, 10) # y_2g <- lm(y_experiment ~ make_model_g(2, x_experiment)) # print(y_2g) # beta_set_2g <- unname(coef(y_2g)) # y_2g <- targetFunction2(x_experiment, beta_set_2g, 2) # # plot(x_experiment, y_experiment_underlying, type = "l", col = "red") # lines(x_experiment, y_10g, type = "l", col = "blue") # lines(x_experiment, y_2g, type = "l", col = "purple") # lines(x_experiment, y_compare, type = "l", col = "orange") } plot_model_g <- function(x, model_g){ plot(x, model_g[,1], type = "l", xlim = c(-1,1), ylim = c(min(model_g), max(model_g))) for(i in 2:ncol(model_g)){ lines(x, model_g[,i]) } } taski()
/Assignment1/src/Remake/Task2/task2.R
no_license
iZome/MachineLearning
R
false
false
6,302
r
set.seed(58) #library(polynom) library(colorRamps) library(plot3D) ## HELP functions make_N_legendre <- function(q, x){ #sum part as for loop y <- rep(0, length(x)) for(k in 0:q){ legendre_k <- x**k * choose(q, k) * choose((q+k-1)/2,q) y <- y +legendre_k } y <- 2**q * y return(y) } targetFunction2 <- function(x, beta_set, Q){ func <- rep(0, length(x)) for(q in 0:Q){ func <- func + beta_set[q+1] * make_N_legendre(q, x) } return(func) } generate_uniform_values <- function(n){ runif(n, min = -1, max = 1) } # make a g of order Q make_model_g <- function(Q, x){ matrix_legendre <- matrix(0L, nrow = length(x), ncol = Q+1) for(i in 0:Q){ matrix_legendre[, i+1] <- make_N_legendre(i, x) } return(matrix_legendre[,2:ncol(matrix_legendre)]) } ## TASK 1 i calculate_e_out_2 <- function(number_new, beta_set_tf, beta_set_g, Q_tf, sigma){ #comparison_beta_set <- beta_set_tf - beta_set_g new_x_values <- generate_uniform_values(number_new) new_y_values <- targetFunction2(new_x_values, beta_set_tf, Q_tf) + rnorm(n = length(new_x_values), mean = 0, sd = sigma**2) new_fit_values <- targetFunction2(new_x_values, beta_set_g, Q_tf) mse <- mean((new_y_values - new_fit_values)**2) #print(mse) return(mse) } find_difference <- function(x, beta_set_tf, beta_set_g){ diff <- 0 #diff = difference for(i in 1:length(beta_set_tf)){ diff <- diff + (beta_set_tf[i]-beta_set_g[i]) * make_N_legendre(i, x) } return(diff^2) } calculate_e_out <- function(x_tf, y_tf, beta_set_tf, Q_g, sigma){ calculated_coefficients <- lm(y_tf ~ make_model_g(Q_g, x_tf)) #tf = targetfunction, Q = order beta_set_g <- unname(coef(calculated_coefficients)) size_difference_beta <- length(beta_set_tf) - Q_g if(size_difference_beta != 0){ if( size_difference_beta > 0){ beta_set_g <- c(beta_set_g, rep(0, size_difference_beta)) } else{ beta_set_tf <- c(beta_set_tf, rep(0, -size_difference_beta)) } } # bias_g <- integrate(find_difference, -1, 1, beta_set_tf, beta_set_g) # return (bias_g$value) error_mse <- calculate_e_out_2(30, beta_set_tf, beta_set_g, length(beta_set_tf) - 1, sigma) return(error_mse) } find_overfit <- function(x_tf, y_tf, beta_set_tf, Q_set, sigma){ err_g_1 <- calculate_e_out(x_tf, y_tf, beta_set_tf, Q_set[1], sigma) #g_1 is first g, in this task g2 err_g_2 <- calculate_e_out(x_tf, y_tf, beta_set_tf, Q_set[2], sigma) #g_2 is second g, in this task g10 return(err_g_2 - err_g_1) } make_error_matrix_i <- function(N_set, sigma_set, beta_set_tf, Q_tf){ final_matrix <- matrix(0L, nrow = length(N_set), ncol = length(sigma_set)) #print(length(sigma_set)) #iterate through size of data for(k in 1:length(N_set)){ x_tf <- sort(generate_uniform_values(N_set[k])) # tf = target function y_tf <- targetFunction2(x_tf, beta_set_tf, Q_tf) for(l in 1:length(sigma_set)){ sigma <- sigma_set[l] y_tf <- y_tf + rnorm(n = length(x_tf), mean = 0, sd = sigma**2) overfit_measure <- find_overfit(x_tf, y_tf, beta_set_tf, c(2,10), sigma) #to get smud results set all values above 0.2 or below -0.2 to +- 0.2 (probably not right?) if(abs(overfit_measure) > 0.2){ overfit_measure <- 0.2 * sign(overfit_measure) } final_matrix[k, l] <- overfit_measure } } return(final_matrix) } avg_runs_i <- function(Q_tf, n_averages, N_set, sigma_set){ avg_matrix <- matrix(0L, nrow = length(N_set), ncol = length(sigma_set)) pb <- txtProgressBar(min = 0, max = n_averages) beta_set_tf <- runif(Q_tf+1, min = -1, max = 1) for(i in 1:n_averages){ setTxtProgressBar(pb, i) new_matrix <- make_error_matrix_i(N_set, sigma_set, beta_set_tf, Q_tf) matrix_list <- list(avg_matrix*(i-1), new_matrix) avg_matrix <- Reduce("+", matrix_list) / i print(min(avg_matrix)) print(max(avg_matrix)) #Live plot, open pdf with reader that refresh pdf("error_matrix.pdf") image(avg_matrix, zlim = c(-0.2,0.2), col = colorRamps::matlab.like(100)) dev.off() pdf("3D-plot2.pdf") persp3D(x = N_set, y = sigma_set, z = avg_matrix, colvar = avg_matrix , clim = c(-0.2,0.2), zlim = c(-0.2,0.2)) dev.off() } close(pb) return(avg_matrix) } taski <- function(){ Q_tf <- 10 # tf = target function beta_set_tf <- runif(Q_tf+1, min = -1, max = 1) N <- seq(20, 110, by = 1) sigma <- seq(0.2, 1.1, by = 0.02) error_matrix <- make_error_matrix_i(N, sigma, beta_set_tf, Q_tf) error_matrix <- avg_runs_i(Q_tf, 100, N, sigma) print(error_matrix) image(error_matrix, zlim = c(-0.5,0.5), col = colorRamps::matlab.like(10)) ## UNDER: just to show that the fit function actuall works, uncomment and look at the produced plot to clarify :) # x_experiment <- sort(generate_uniform_values(100)) # #plot_model_g(x_experiment, make_model_g(2, x_experiment)) # y_experiment_underlying <- targetFunction2(x_experiment, beta_set_tf, Q_tf) # y_experiment <- y_experiment_underlying + rnorm(n = length(x_experiment), mean = 0, sd = 0.5**2) # y_10g <- lm(y_experiment ~ make_model_g(10, x_experiment)) # print(beta_set_tf) # print(y_10g) # beta_set_10g <- unname(coef(y_10g)) # y_10g <- targetFunction2(x_experiment, beta_set_10g, 10) # y_compare <- targetFunction2(x_experiment, beta_set_tf - beta_set_10g, 10) # y_2g <- lm(y_experiment ~ make_model_g(2, x_experiment)) # print(y_2g) # beta_set_2g <- unname(coef(y_2g)) # y_2g <- targetFunction2(x_experiment, beta_set_2g, 2) # # plot(x_experiment, y_experiment_underlying, type = "l", col = "red") # lines(x_experiment, y_10g, type = "l", col = "blue") # lines(x_experiment, y_2g, type = "l", col = "purple") # lines(x_experiment, y_compare, type = "l", col = "orange") } plot_model_g <- function(x, model_g){ plot(x, model_g[,1], type = "l", xlim = c(-1,1), ylim = c(min(model_g), max(model_g))) for(i in 2:ncol(model_g)){ lines(x, model_g[,i]) } } taski()
library(dplyr) turismos=read.csv2("data/turismos.csv") summary(turismos) turismos$Año=as.factor(turismos$Año) modeloturismos=lm(Turismos ~ PIB, data = turismos) summary(modeloturismos) plot(turismos$PIB, turismos$Turismos, xlab = "PIB", ylab = "Turismos") abline(modeloturismos) hist(modeloturismos$residuals) qqnorm(modeloturismos$residuals); qqline(modeloturismos$residuals,col=2) confint(modeloturismos,level=0.95) #ANDALUCIA modeloandalucia=lm(Turismos~PIB,data=Andalucia) summary(modeloandalucia) Andalucia = turismos[turismos$Comunidad=="ANDALUCÍA",] Andalucia$Predict = predict(modeloandalucia, type = 'response') plot(Andalucia$PIB, Andalucia$Turismos, xlab = "PIB", ylab = "Turismos") abline(modeloandalucia) Andalucia = cbind(Andalucia,predict(modeloandalucia, Andalucia, interval = 'prediction', level = 0.90)) #MADRID Madrid = turismos[turismos$Comunidad=="MADRID, COMUNIDAD DE",] modelomadrid=lm(Turismos~PIB,data=Madrid) summary(modelomadrid) plot(Madrid$PIB, Madrid$Turismos, xlab = "PIB", ylab = "Turismos") abline(modelomadrid) Madrid$Predict = predict(modelomadrid, type = 'response') Madrid = cbind(Madrid, predict(modelomadrid, Madrid, interval = 'prediction', level = 0.95)) #ESTIMACIONES PIB estimaciones = read.csv2("data/estimacionpib.csv") turismos2017 = turismos[turismos$Año == '2017', c(1,3)] estimaciones2018 = left_join(estimaciones,turismos2017,by=c("Comunidad"="Comunidad")) estimaciones2018$PIB = estimaciones2018$PIB * ((estimaciones2018$var/100)+1) #PREDICCIÓN MADRID prediccionmadrid = estimaciones2018[estimaciones2018$Comunidad == 'MADRID, COMUNIDAD DE',c(1,3)] predict(modelomadrid, prediccionmadrid) #455111.7 intervalo = predict(modelomadrid, prediccionmadrid,interval = "prediction", level = 0.90) confint(modelomadrid, level = 0.95) prediccionmadrid$predict = predict(modelomadrid, prediccionmadrid) prediccionmadrid = cbind(prediccionmadrid, intervalo) #PREDICCIÓN ANDALUCIA prediccionandalucia = estimaciones2018[estimaciones2018$Comunidad == 'ANDALUCÍA',c(1,3)] predict(modeloandalucia, prediccionandalucia) #207719.7 predict(modeloandalucia, prediccionandalucia, interval = 'prediction', level = 0.9) prediccionandalucia$predict = predict(modeloandalucia, prediccionandalucia)
/R/Modelo lineal Turismos.R
no_license
franciscojdiezc/DataScience
R
false
false
2,352
r
library(dplyr) turismos=read.csv2("data/turismos.csv") summary(turismos) turismos$Año=as.factor(turismos$Año) modeloturismos=lm(Turismos ~ PIB, data = turismos) summary(modeloturismos) plot(turismos$PIB, turismos$Turismos, xlab = "PIB", ylab = "Turismos") abline(modeloturismos) hist(modeloturismos$residuals) qqnorm(modeloturismos$residuals); qqline(modeloturismos$residuals,col=2) confint(modeloturismos,level=0.95) #ANDALUCIA modeloandalucia=lm(Turismos~PIB,data=Andalucia) summary(modeloandalucia) Andalucia = turismos[turismos$Comunidad=="ANDALUCÍA",] Andalucia$Predict = predict(modeloandalucia, type = 'response') plot(Andalucia$PIB, Andalucia$Turismos, xlab = "PIB", ylab = "Turismos") abline(modeloandalucia) Andalucia = cbind(Andalucia,predict(modeloandalucia, Andalucia, interval = 'prediction', level = 0.90)) #MADRID Madrid = turismos[turismos$Comunidad=="MADRID, COMUNIDAD DE",] modelomadrid=lm(Turismos~PIB,data=Madrid) summary(modelomadrid) plot(Madrid$PIB, Madrid$Turismos, xlab = "PIB", ylab = "Turismos") abline(modelomadrid) Madrid$Predict = predict(modelomadrid, type = 'response') Madrid = cbind(Madrid, predict(modelomadrid, Madrid, interval = 'prediction', level = 0.95)) #ESTIMACIONES PIB estimaciones = read.csv2("data/estimacionpib.csv") turismos2017 = turismos[turismos$Año == '2017', c(1,3)] estimaciones2018 = left_join(estimaciones,turismos2017,by=c("Comunidad"="Comunidad")) estimaciones2018$PIB = estimaciones2018$PIB * ((estimaciones2018$var/100)+1) #PREDICCIÓN MADRID prediccionmadrid = estimaciones2018[estimaciones2018$Comunidad == 'MADRID, COMUNIDAD DE',c(1,3)] predict(modelomadrid, prediccionmadrid) #455111.7 intervalo = predict(modelomadrid, prediccionmadrid,interval = "prediction", level = 0.90) confint(modelomadrid, level = 0.95) prediccionmadrid$predict = predict(modelomadrid, prediccionmadrid) prediccionmadrid = cbind(prediccionmadrid, intervalo) #PREDICCIÓN ANDALUCIA prediccionandalucia = estimaciones2018[estimaciones2018$Comunidad == 'ANDALUCÍA',c(1,3)] predict(modeloandalucia, prediccionandalucia) #207719.7 predict(modeloandalucia, prediccionandalucia, interval = 'prediction', level = 0.9) prediccionandalucia$predict = predict(modeloandalucia, prediccionandalucia)
## Put comments here that give an overall description of what your ## functions do ## Function in charge of caching the object makeCacheMatrix <- function(k = matrix()) { m<- NULL ## ===== setter====== set <- function (y){ k<<-y m<<- NULL } ##===== getter ===== get <- function () k setInverse <- function(inverse) m<<- inverse getInverse <- function() m list (set = set, get=get, setInverse= setInverse, getInverse=getInverse) } ## Checks if cache exists, if yes, delivers the object from cache, if not create cacheSolve <- function(k, ...) { ## Return a matrix that is the inverse of 'x' m <- k$getInverse() if (!is.null(m)){ message ('getting from the chache') return (m) } data<- k$get() m<-solve(data,...) k$setInverse(m) m }
/cachematrix.R
no_license
lnitzu/ProgrammingAssignment2
R
false
false
844
r
## Put comments here that give an overall description of what your ## functions do ## Function in charge of caching the object makeCacheMatrix <- function(k = matrix()) { m<- NULL ## ===== setter====== set <- function (y){ k<<-y m<<- NULL } ##===== getter ===== get <- function () k setInverse <- function(inverse) m<<- inverse getInverse <- function() m list (set = set, get=get, setInverse= setInverse, getInverse=getInverse) } ## Checks if cache exists, if yes, delivers the object from cache, if not create cacheSolve <- function(k, ...) { ## Return a matrix that is the inverse of 'x' m <- k$getInverse() if (!is.null(m)){ message ('getting from the chache') return (m) } data<- k$get() m<-solve(data,...) k$setInverse(m) m }
testlist <- list(a = 0, b = 0) result <- do.call(BayesMRA::rmvn_arma_scalar,testlist) str(result)
/BayesMRA/inst/testfiles/rmvn_arma_scalar/libFuzzer_rmvn_arma_scalar/rmvn_arma_scalar_valgrind_files/1612726010-test.R
no_license
akhikolla/updatedatatype-list1
R
false
false
97
r
testlist <- list(a = 0, b = 0) result <- do.call(BayesMRA::rmvn_arma_scalar,testlist) str(result)
groupSummarise = function(D,minNumSpeciesWithImages=1) { groupingVar= c("friendlyName","friendlyKey","basisOfRecord","country","license") D = D %>% group_by_(.dots=groupingVar) %>% summarise(numSpeciesWithImages = n()) %>% filter(numSpeciesWithImages >= !!minNumSpeciesWithImages) return(D) }
/R/gbifimagedata/R/groupSummarise.R
permissive
gbif/machine-vision
R
false
false
303
r
groupSummarise = function(D,minNumSpeciesWithImages=1) { groupingVar= c("friendlyName","friendlyKey","basisOfRecord","country","license") D = D %>% group_by_(.dots=groupingVar) %>% summarise(numSpeciesWithImages = n()) %>% filter(numSpeciesWithImages >= !!minNumSpeciesWithImages) return(D) }
############################################################################################################################################# ###################################################### main Lasso function ################################################################# est.glmmLasso<-function(fix,rnd,data,lambda,family=gaussian(link = "identity"), final.re=FALSE,switch.NR=FALSE,control=list()) { if(class(data)[1]=="tbl_df") data <- as.data.frame(data) if(!is.null(rnd)) { return.obj <- est.glmmLasso.RE(fix=fix,rnd=rnd,data=data,lambda=lambda,family=family, final.re=final.re,switch.NR=switch.NR,control=control) }else{ return.obj <- est.glmmLasso.noRE(fix=fix,data=data,lambda=lambda,family=family, final.re=final.re,switch.NR=switch.NR,control=control) } return(return.obj) } ############################################################################################################################################# ############################################################################################################################################# glmmLasso <- function(fix=formula, rnd=formula, data, lambda, family=gaussian(link = "identity"), switch.NR = FALSE, final.re=FALSE, control=list()){ est <- est.glmmLasso(fix,rnd,data=data,lambda=lambda,family=family, switch.NR=switch.NR,final.re=final.re,control=control) est$fitted.values <- est$y_hat est$StdDev <- est$Q est$call <- match.call() class(est) <- "glmmLasso" est } print.glmmLasso <- function(x, ...) { cat("Call:\n") print(x$call) cat("\nFixed Effects:\n") cat("\nCoefficients:\n") print(x$coefficients) if(!is.null(x$smooth)) { cat("\nSmooth Effects:\n") print(colnames(x$B)) } if(!is.null(x$rnd)) { cat("\nRandom Effects:\n") cat("\nStdDev:\n") print(x$StdDev) }else{ cat("\nNo random effects included!\n") } } summary.glmmLasso <- function(object, ...) { se <- object$fixerror zval <- coefficients(object) / se TAB <- cbind(Estimate = coefficients(object), StdErr = se, z.value = zval, p.value = 2*pnorm(-abs(zval))) res <- list(call=object$call, coefficients=TAB,smooth.eff=colnames(object$B),StdDev=object$StdDev,rnd=object$rnd) class(res) <- "summary.glmmLasso" res } print.summary.glmmLasso <- function(x, ...) { cat("Call:\n") print(x$call) cat("\n") cat("\nFixed Effects:\n") cat("\nCoefficients:\n") printCoefmat(x$coefficients, P.values=TRUE, has.Pvalue=TRUE) if(!is.null(x$smooth)) { cat("\nSmooth Effects:\n") print(x$smooth.eff) } if(!is.null(x$rnd)) { cat("\nRandom Effects:\n") cat("\nStdDev:\n") print(x$StdDev) }else{ cat("\nNo random effects included!\n") } } predict.glmmLasso <- function(object,newdata=NULL,new.random.design=NULL,...) { if(is.null(newdata)) { y<-fitted(object) }else{ X <- model.matrix(formula(object$fix), newdata) family<-object$family if(!is.null(family$multivariate)){ K <- object$K if(all(X[,1]==1)){ X <- X[,-1] } names.x <- colnames(X) theta <- matrix(rep(diag(1,K),nrow(X)),ncol=K,byrow=TRUE) X <- cbind(theta, matrix(rep(X,each=K),ncol=ncol(X))) colnames(X) <- c(paste0("theta",1:K),names.x) } if(!is.null(object$rnd)) { rnd.len<-object$rnd.len if(is.null(new.random.design)) { if(rnd.len==1) { subj.new<-levels(as.factor(newdata[,object$subject])) subj.old<-levels(as.factor(object$data[,object$subject])) subj.test<-is.element(subj.new,subj.old) subj.ok<-subj.new[subj.test] krit.random<-!all(!is.element(subj.new,subj.old)) if(krit.random) { W_start <- model.matrix(formula(object$newrndfrml), newdata) rnlabels<-terms(formula(object$newrndfrml)) random.labels<-attr(rnlabels,"term.labels") s<-length(random.labels) k<-table(newdata[,colnames(newdata)==(object$subject)]) n<-length(k) if(s>1) { subj.test<-rep(subj.test,s) #for (i in 2:s) #subj.test<-cbind(subj.test,subj.test) subj.test<-as.vector(t(subj.test)) } if(s>1) { W<-W_start[,seq(from=1,to=1+(s-1)*n,by=n)] for (i in 2:n) W<-cbind(W,W_start[,seq(from=i,to=i+(s-1)*n,by=n)]) }else{ W<-W_start } y<- as.vector(family$linkinv(X[,is.element(colnames(X),names(object$coef))]%*%object$coef[is.element(names(object$coef),colnames(X))])) rand.ok<-is.element(newdata[,object$subject],subj.ok) W.neu<-W[,subj.test] if(!is.null(family$multivariate)) { names.of.W <- colnames(W.neu) W.neu <- matrix(rep(W,each=K),ncol=ncol(W.neu)) colnames(W.neu) <- names.of.W } if(nrow(X)!=1) { y[rand.ok]<- family$linkinv(cbind(X[,is.element(colnames(X),names(object$coef))],W.neu)[rand.ok,]%*%c(object$coef[is.element(names(object$coef),colnames(X))],object$ranef[match(colnames(W.neu),names(object$ranef))])) }else{ y[rand.ok]<- family$linkinv(c(X[,is.element(colnames(X),names(object$coef))],W.neu)[rand.ok]%*%c(object$coef[is.element(names(object$coef),colnames(X))],object$ranef[match(names(W.neu),names(object$ranef))])) } }else{ W<-NULL y<- as.vector(family$linkinv(X[,is.element(colnames(X),names(object$coef))]%*%object$coef[is.element(names(object$coef),colnames(X))])) } }else{ rnlabels<-list() random.labels<-list() s<-numeric() k<-NULL n<-numeric() W<- NULL subj.test.long<-numeric() subj.ok<-character() krit.random<-logical() subj.ok<-list() W.single <- list() for(zu in 1:rnd.len) { subj.new<-levels(as.factor(newdata[,object$subject[zu]])) subj.old<-levels(as.factor(object$data[,object$subject[zu]])) subj.test<-is.element(subj.new,subj.old) subj.ok[[zu]]<-subj.new[subj.test] krit.random[zu]<-!all(!is.element(subj.new,subj.old)) if(krit.random[zu]) { rnlabels[[zu]]<-terms(formula(object$newrndfrml[[zu]])) random.labels[[zu]]<-attr(rnlabels[[zu]],"term.labels") s[zu]<-length(random.labels[[zu]]) k1<-table(newdata[,colnames(newdata)==(object$subject[zu])]) k<-c(k,k1) n[zu]<-length(k1) W_start <- model.matrix(formula(object$newrndfrml[[zu]]), newdata) if(s[zu]>1) { W2<-W_start[,seq(from=1,to=1+(s[zu]-1)*n[zu],by=n[zu])] for (i in 2:n[zu]) W2<-cbind(W2,W_start[,seq(from=i,to=i+(s[zu]-1)*n[zu],by=n[zu])]) }else{ W2<-W_start } W<-cbind(W,W2) W.single[[zu]]<-W2 if(s[zu]>1) { subj.test<- rep(subj.test,s[zu]) #for (i in 2:s[zu]) #subj.test<-cbind(subj.test,subj.test) subj.test<-as.vector(t(subj.test)) } subj.test.long<-c(subj.test.long,subj.test) }} dim.W.single<-rep(0,rnd.len+1) for(zu in 1:rnd.len) { if(krit.random[zu]) dim.W.single[zu+1]<-dim(W.single[[zu]])[2] } if(!all(!krit.random)) { rand.ok<-matrix(0,dim(newdata)[1],rnd.len) for(zu in 1:rnd.len) rand.ok[,zu]<-is.element(newdata[,object$subject[zu]],subj.ok[[zu]]) W.rnd<-matrix(0,dim(W)[1],dim(W)[2]) for(ur in 1:dim(newdata)[1]) { for (zu in 1:rnd.len) { if(rand.ok[ur,zu]==1) W.rnd[ur,sum(dim.W.single[1:zu])+1:sum(dim.W.single[zu+1])]<-W.single[[zu]][ur,] } } W.neu<-W.rnd[,as.logical(subj.test.long)] if(!is.matrix(W.neu)) W.neu<-t(as.matrix(W.neu)) colnames(W.neu)<-colnames(W)[as.logical(subj.test.long)] if(!is.null(family$multivariate)) { names.of.W <- colnames(W.neu) W.neu <- matrix(rep(W.neu,each=K),ncol=ncol(W.neu)) colnames(W.neu) <- names.of.W } if(dim(X)[1]!=1) { y<- family$linkinv(cbind(X[,is.element(colnames(X),names(object$coef))],W.neu)%*%c(object$coef[is.element(names(object$coef),colnames(X))],object$ranef[match(colnames(W.neu),names(object$ranef))])) }else{ y<- family$linkinv(c(X[,is.element(colnames(X),names(object$coef))],W.neu)%*%c(object$coef[is.element(names(object$coef),colnames(X))],object$ranef[match(colnames(W.neu),names(object$ranef))])) } }else{ y<- as.vector(family$linkinv(X[,is.element(colnames(X),names(object$coef))]%*%object$coef[is.element(names(object$coef),colnames(X))])) }} }else{ Design<-cbind(X,new.random.design) Delta<-c(object$coef,object$ranef) if(ncol(Design)!=length(Delta)) stop("Wrong dimension of random effects design matrix!") y<- as.vector(family$linkinv(Design%*%Delta)) }}else{ y <- family$linkinv(X%*%object$coef) } } y } plot.glmmLasso <- function(x,which=NULL,plot.data=TRUE,include.icept=FALSE,ylab=NULL,main=NULL,...) { if(is.null(ylab)) ylab<-"" if(is.null(main)) main<-"" if(is.null(x$B)) stop("No smooth terms to plot!") Phi<-x$B m<-dim(Phi)[2] if(is.null(which)) which<-1:m p<-length(which) if(p>9) stop("Too many smooth functions! Please specify at maximum nine.") a<-ceiling(sqrt(p)) b<-round(sqrt(p)) if(b==0) b<-1 nbasis<-x$nbasis diff.ord<-x$diff.ord spline.degree<-x$spline.degree knots.no<-nbasis-1 if(spline.degree<3 && (spline.degree-diff.ord)<2) knots.no<-knots.no+1 spline.ma<-list() Design<-list() smooth.ma<-matrix(0,m,dim(Phi)[1]) for(i in which) { if(plot.data) { spline.ma[[i]]<-bs.design(sort(Phi[,i]), diff.ord=diff.ord, spline.degree=spline.degree, knots.no=knots.no) }else{ smooth.ma<-matrix(0,m,1000) data.seq<-seq(min(Phi[,i]),max(Phi[,i]),length.out=1000) spline.ma[[i]]<-bs.design(data.seq, diff.ord=diff.ord, spline.degree=spline.degree, knots.no=knots.no) } Design[[i]]<-cbind(spline.ma[[i]]$X[,-1],spline.ma[[i]]$Z) smooth.ma[i,]<-Design[[i]]%*%x$smooth[((i-1)*nbasis+1):(i*nbasis)] } par(mfrow=c(a,b)) for(i in which) { if(include.icept && is.element("(Intercept)",names(x$coef))) { if(plot.data) { plot(sort(Phi[,i]), x$coef[match("(Intercept)",names(x$coef))]+smooth.ma[i,], type = "l", lwd=2, xlab=paste(colnames(Phi)[i]), ylab=ylab, main=main,cex.lab=2,cex.axis=2,...) }else{ data.seq<-seq(min(Phi[,i]),max(Phi[,i]),length.out=1000) plot(data.seq, x$coef[match("(Intercept)",names(x$coef))]+smooth.ma[i,], type = "l", lwd=2, xlab=paste(colnames(Phi)[i]), ylab=ylab,main=main,cex.lab=2,cex.axis=2,...) } rug(jitter(Phi[,i])) }else{ if(plot.data) { plot(sort(Phi[,i]), smooth.ma[i,], type = "l", lwd=2, xlab=paste(colnames(Phi)[i]), ylab=ylab,main=main,cex.lab=2,cex.axis=2,...) }else{ data.seq<-seq(min(Phi[,i]),max(Phi[,i]),length.out=1000) plot(data.seq, smooth.ma[i,], type = "l", lwd=2, xlab=paste(colnames(Phi)[i]), ylab=ylab,main=main,cex.lab=2,cex.axis=2,...) } rug(jitter(Phi[,i])) } } }
/R/glmmLasso.r
no_license
cran/glmmLasso
R
false
false
12,019
r
############################################################################################################################################# ###################################################### main Lasso function ################################################################# est.glmmLasso<-function(fix,rnd,data,lambda,family=gaussian(link = "identity"), final.re=FALSE,switch.NR=FALSE,control=list()) { if(class(data)[1]=="tbl_df") data <- as.data.frame(data) if(!is.null(rnd)) { return.obj <- est.glmmLasso.RE(fix=fix,rnd=rnd,data=data,lambda=lambda,family=family, final.re=final.re,switch.NR=switch.NR,control=control) }else{ return.obj <- est.glmmLasso.noRE(fix=fix,data=data,lambda=lambda,family=family, final.re=final.re,switch.NR=switch.NR,control=control) } return(return.obj) } ############################################################################################################################################# ############################################################################################################################################# glmmLasso <- function(fix=formula, rnd=formula, data, lambda, family=gaussian(link = "identity"), switch.NR = FALSE, final.re=FALSE, control=list()){ est <- est.glmmLasso(fix,rnd,data=data,lambda=lambda,family=family, switch.NR=switch.NR,final.re=final.re,control=control) est$fitted.values <- est$y_hat est$StdDev <- est$Q est$call <- match.call() class(est) <- "glmmLasso" est } print.glmmLasso <- function(x, ...) { cat("Call:\n") print(x$call) cat("\nFixed Effects:\n") cat("\nCoefficients:\n") print(x$coefficients) if(!is.null(x$smooth)) { cat("\nSmooth Effects:\n") print(colnames(x$B)) } if(!is.null(x$rnd)) { cat("\nRandom Effects:\n") cat("\nStdDev:\n") print(x$StdDev) }else{ cat("\nNo random effects included!\n") } } summary.glmmLasso <- function(object, ...) { se <- object$fixerror zval <- coefficients(object) / se TAB <- cbind(Estimate = coefficients(object), StdErr = se, z.value = zval, p.value = 2*pnorm(-abs(zval))) res <- list(call=object$call, coefficients=TAB,smooth.eff=colnames(object$B),StdDev=object$StdDev,rnd=object$rnd) class(res) <- "summary.glmmLasso" res } print.summary.glmmLasso <- function(x, ...) { cat("Call:\n") print(x$call) cat("\n") cat("\nFixed Effects:\n") cat("\nCoefficients:\n") printCoefmat(x$coefficients, P.values=TRUE, has.Pvalue=TRUE) if(!is.null(x$smooth)) { cat("\nSmooth Effects:\n") print(x$smooth.eff) } if(!is.null(x$rnd)) { cat("\nRandom Effects:\n") cat("\nStdDev:\n") print(x$StdDev) }else{ cat("\nNo random effects included!\n") } } predict.glmmLasso <- function(object,newdata=NULL,new.random.design=NULL,...) { if(is.null(newdata)) { y<-fitted(object) }else{ X <- model.matrix(formula(object$fix), newdata) family<-object$family if(!is.null(family$multivariate)){ K <- object$K if(all(X[,1]==1)){ X <- X[,-1] } names.x <- colnames(X) theta <- matrix(rep(diag(1,K),nrow(X)),ncol=K,byrow=TRUE) X <- cbind(theta, matrix(rep(X,each=K),ncol=ncol(X))) colnames(X) <- c(paste0("theta",1:K),names.x) } if(!is.null(object$rnd)) { rnd.len<-object$rnd.len if(is.null(new.random.design)) { if(rnd.len==1) { subj.new<-levels(as.factor(newdata[,object$subject])) subj.old<-levels(as.factor(object$data[,object$subject])) subj.test<-is.element(subj.new,subj.old) subj.ok<-subj.new[subj.test] krit.random<-!all(!is.element(subj.new,subj.old)) if(krit.random) { W_start <- model.matrix(formula(object$newrndfrml), newdata) rnlabels<-terms(formula(object$newrndfrml)) random.labels<-attr(rnlabels,"term.labels") s<-length(random.labels) k<-table(newdata[,colnames(newdata)==(object$subject)]) n<-length(k) if(s>1) { subj.test<-rep(subj.test,s) #for (i in 2:s) #subj.test<-cbind(subj.test,subj.test) subj.test<-as.vector(t(subj.test)) } if(s>1) { W<-W_start[,seq(from=1,to=1+(s-1)*n,by=n)] for (i in 2:n) W<-cbind(W,W_start[,seq(from=i,to=i+(s-1)*n,by=n)]) }else{ W<-W_start } y<- as.vector(family$linkinv(X[,is.element(colnames(X),names(object$coef))]%*%object$coef[is.element(names(object$coef),colnames(X))])) rand.ok<-is.element(newdata[,object$subject],subj.ok) W.neu<-W[,subj.test] if(!is.null(family$multivariate)) { names.of.W <- colnames(W.neu) W.neu <- matrix(rep(W,each=K),ncol=ncol(W.neu)) colnames(W.neu) <- names.of.W } if(nrow(X)!=1) { y[rand.ok]<- family$linkinv(cbind(X[,is.element(colnames(X),names(object$coef))],W.neu)[rand.ok,]%*%c(object$coef[is.element(names(object$coef),colnames(X))],object$ranef[match(colnames(W.neu),names(object$ranef))])) }else{ y[rand.ok]<- family$linkinv(c(X[,is.element(colnames(X),names(object$coef))],W.neu)[rand.ok]%*%c(object$coef[is.element(names(object$coef),colnames(X))],object$ranef[match(names(W.neu),names(object$ranef))])) } }else{ W<-NULL y<- as.vector(family$linkinv(X[,is.element(colnames(X),names(object$coef))]%*%object$coef[is.element(names(object$coef),colnames(X))])) } }else{ rnlabels<-list() random.labels<-list() s<-numeric() k<-NULL n<-numeric() W<- NULL subj.test.long<-numeric() subj.ok<-character() krit.random<-logical() subj.ok<-list() W.single <- list() for(zu in 1:rnd.len) { subj.new<-levels(as.factor(newdata[,object$subject[zu]])) subj.old<-levels(as.factor(object$data[,object$subject[zu]])) subj.test<-is.element(subj.new,subj.old) subj.ok[[zu]]<-subj.new[subj.test] krit.random[zu]<-!all(!is.element(subj.new,subj.old)) if(krit.random[zu]) { rnlabels[[zu]]<-terms(formula(object$newrndfrml[[zu]])) random.labels[[zu]]<-attr(rnlabels[[zu]],"term.labels") s[zu]<-length(random.labels[[zu]]) k1<-table(newdata[,colnames(newdata)==(object$subject[zu])]) k<-c(k,k1) n[zu]<-length(k1) W_start <- model.matrix(formula(object$newrndfrml[[zu]]), newdata) if(s[zu]>1) { W2<-W_start[,seq(from=1,to=1+(s[zu]-1)*n[zu],by=n[zu])] for (i in 2:n[zu]) W2<-cbind(W2,W_start[,seq(from=i,to=i+(s[zu]-1)*n[zu],by=n[zu])]) }else{ W2<-W_start } W<-cbind(W,W2) W.single[[zu]]<-W2 if(s[zu]>1) { subj.test<- rep(subj.test,s[zu]) #for (i in 2:s[zu]) #subj.test<-cbind(subj.test,subj.test) subj.test<-as.vector(t(subj.test)) } subj.test.long<-c(subj.test.long,subj.test) }} dim.W.single<-rep(0,rnd.len+1) for(zu in 1:rnd.len) { if(krit.random[zu]) dim.W.single[zu+1]<-dim(W.single[[zu]])[2] } if(!all(!krit.random)) { rand.ok<-matrix(0,dim(newdata)[1],rnd.len) for(zu in 1:rnd.len) rand.ok[,zu]<-is.element(newdata[,object$subject[zu]],subj.ok[[zu]]) W.rnd<-matrix(0,dim(W)[1],dim(W)[2]) for(ur in 1:dim(newdata)[1]) { for (zu in 1:rnd.len) { if(rand.ok[ur,zu]==1) W.rnd[ur,sum(dim.W.single[1:zu])+1:sum(dim.W.single[zu+1])]<-W.single[[zu]][ur,] } } W.neu<-W.rnd[,as.logical(subj.test.long)] if(!is.matrix(W.neu)) W.neu<-t(as.matrix(W.neu)) colnames(W.neu)<-colnames(W)[as.logical(subj.test.long)] if(!is.null(family$multivariate)) { names.of.W <- colnames(W.neu) W.neu <- matrix(rep(W.neu,each=K),ncol=ncol(W.neu)) colnames(W.neu) <- names.of.W } if(dim(X)[1]!=1) { y<- family$linkinv(cbind(X[,is.element(colnames(X),names(object$coef))],W.neu)%*%c(object$coef[is.element(names(object$coef),colnames(X))],object$ranef[match(colnames(W.neu),names(object$ranef))])) }else{ y<- family$linkinv(c(X[,is.element(colnames(X),names(object$coef))],W.neu)%*%c(object$coef[is.element(names(object$coef),colnames(X))],object$ranef[match(colnames(W.neu),names(object$ranef))])) } }else{ y<- as.vector(family$linkinv(X[,is.element(colnames(X),names(object$coef))]%*%object$coef[is.element(names(object$coef),colnames(X))])) }} }else{ Design<-cbind(X,new.random.design) Delta<-c(object$coef,object$ranef) if(ncol(Design)!=length(Delta)) stop("Wrong dimension of random effects design matrix!") y<- as.vector(family$linkinv(Design%*%Delta)) }}else{ y <- family$linkinv(X%*%object$coef) } } y } plot.glmmLasso <- function(x,which=NULL,plot.data=TRUE,include.icept=FALSE,ylab=NULL,main=NULL,...) { if(is.null(ylab)) ylab<-"" if(is.null(main)) main<-"" if(is.null(x$B)) stop("No smooth terms to plot!") Phi<-x$B m<-dim(Phi)[2] if(is.null(which)) which<-1:m p<-length(which) if(p>9) stop("Too many smooth functions! Please specify at maximum nine.") a<-ceiling(sqrt(p)) b<-round(sqrt(p)) if(b==0) b<-1 nbasis<-x$nbasis diff.ord<-x$diff.ord spline.degree<-x$spline.degree knots.no<-nbasis-1 if(spline.degree<3 && (spline.degree-diff.ord)<2) knots.no<-knots.no+1 spline.ma<-list() Design<-list() smooth.ma<-matrix(0,m,dim(Phi)[1]) for(i in which) { if(plot.data) { spline.ma[[i]]<-bs.design(sort(Phi[,i]), diff.ord=diff.ord, spline.degree=spline.degree, knots.no=knots.no) }else{ smooth.ma<-matrix(0,m,1000) data.seq<-seq(min(Phi[,i]),max(Phi[,i]),length.out=1000) spline.ma[[i]]<-bs.design(data.seq, diff.ord=diff.ord, spline.degree=spline.degree, knots.no=knots.no) } Design[[i]]<-cbind(spline.ma[[i]]$X[,-1],spline.ma[[i]]$Z) smooth.ma[i,]<-Design[[i]]%*%x$smooth[((i-1)*nbasis+1):(i*nbasis)] } par(mfrow=c(a,b)) for(i in which) { if(include.icept && is.element("(Intercept)",names(x$coef))) { if(plot.data) { plot(sort(Phi[,i]), x$coef[match("(Intercept)",names(x$coef))]+smooth.ma[i,], type = "l", lwd=2, xlab=paste(colnames(Phi)[i]), ylab=ylab, main=main,cex.lab=2,cex.axis=2,...) }else{ data.seq<-seq(min(Phi[,i]),max(Phi[,i]),length.out=1000) plot(data.seq, x$coef[match("(Intercept)",names(x$coef))]+smooth.ma[i,], type = "l", lwd=2, xlab=paste(colnames(Phi)[i]), ylab=ylab,main=main,cex.lab=2,cex.axis=2,...) } rug(jitter(Phi[,i])) }else{ if(plot.data) { plot(sort(Phi[,i]), smooth.ma[i,], type = "l", lwd=2, xlab=paste(colnames(Phi)[i]), ylab=ylab,main=main,cex.lab=2,cex.axis=2,...) }else{ data.seq<-seq(min(Phi[,i]),max(Phi[,i]),length.out=1000) plot(data.seq, smooth.ma[i,], type = "l", lwd=2, xlab=paste(colnames(Phi)[i]), ylab=ylab,main=main,cex.lab=2,cex.axis=2,...) } rug(jitter(Phi[,i])) } } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utilities.R, R/utilities-grid.R \docType{import} \name{reexports} \alias{reexports} \alias{alpha} \alias{unit} \alias{arrow} \title{Objects exported from other packages} \examples{ ggplot(mpg, aes(displ, hwy)) + geom_point(alpha = 0.5, colour = "blue") ggplot(mpg, aes(displ, hwy)) + geom_point(colour = alpha("blue", 0.5)) } \keyword{internal} \description{ These objects are imported from other packages. Follow the links below to see their documentation. \describe{ \item{grid}{\code{\link[grid]{arrow}}, \code{\link[grid]{unit}}} \item{scales}{\code{\link[scales]{alpha}}} }}
/man/reexports.Rd
no_license
cran/ggplot2
R
false
true
697
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utilities.R, R/utilities-grid.R \docType{import} \name{reexports} \alias{reexports} \alias{alpha} \alias{unit} \alias{arrow} \title{Objects exported from other packages} \examples{ ggplot(mpg, aes(displ, hwy)) + geom_point(alpha = 0.5, colour = "blue") ggplot(mpg, aes(displ, hwy)) + geom_point(colour = alpha("blue", 0.5)) } \keyword{internal} \description{ These objects are imported from other packages. Follow the links below to see their documentation. \describe{ \item{grid}{\code{\link[grid]{arrow}}, \code{\link[grid]{unit}}} \item{scales}{\code{\link[scales]{alpha}}} }}
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----eval=FALSE--------------------------------------------------------------- # install.packages("pcLasso") ## ----------------------------------------------------------------------------- library(pcLasso) ## ----------------------------------------------------------------------------- set.seed(944) n <- 100 p <- 20 x <- matrix(rnorm(n*p), n, p) beta <- matrix(c(rep(2, 5), rep(0, 15)), ncol = 1) y <- x %*% beta + rnorm(n) ## ----results="hide"----------------------------------------------------------- fit <- pcLasso(x, y, ratio = 0.8) ## ----------------------------------------------------------------------------- # intercept fit$a0[20] # coefficients fit$beta[, 20] ## ----------------------------------------------------------------------------- fit$nzero ## ----------------------------------------------------------------------------- # get predictions for 20th model predict(fit, x[1:5, ])[, 20] ## ----------------------------------------------------------------------------- groups <- vector("list", 4) for (k in 1:4) { groups[[k]] <- 5 * (k-1) + 1:5 } groups ## ----results="hide"----------------------------------------------------------- fit <- pcLasso(x, y, ratio = 0.8, groups = groups) ## ----------------------------------------------------------------------------- groups[[1]] <- 1:7 groups ## ----results="hide"----------------------------------------------------------- fit <- pcLasso(x, y, ratio = 0.8, groups = groups) ## ----------------------------------------------------------------------------- # intercept at 20th model: same as before fit$a0[20] # coefficients at 20th model: look at origbeta instead fit$origbeta[, 20] ## ----------------------------------------------------------------------------- fit$orignzero ## ----results="hide"----------------------------------------------------------- cvfit <- cv.pcLasso(x, y, groups = groups, ratio = 0.8) ## ----results="hide"----------------------------------------------------------- cvfit <- cv.pcLasso(x, y, groups = groups, ratio = 0.8, nfolds = 5) ## ----results="hide"----------------------------------------------------------- foldid <- sample(rep(seq(10), length = n)) cvfit <- cv.pcLasso(x, y, groups = groups, ratio = 0.8, foldid = foldid) ## ----fig.width=5, fig.height=4------------------------------------------------ plot(cvfit) ## ----fig.width=5, fig.height=4------------------------------------------------ plot(cvfit, orignz = FALSE) ## ----------------------------------------------------------------------------- cvfit$lambda.min cvfit$lambda.1se ## ----------------------------------------------------------------------------- predict(cvfit, x[1:5, ]) # s = lambda.1se predict(cvfit, x[1:5, ], s = "lambda.min")
/inst/doc/pcLasso.R
no_license
cran/pcLasso
R
false
false
2,890
r
## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----eval=FALSE--------------------------------------------------------------- # install.packages("pcLasso") ## ----------------------------------------------------------------------------- library(pcLasso) ## ----------------------------------------------------------------------------- set.seed(944) n <- 100 p <- 20 x <- matrix(rnorm(n*p), n, p) beta <- matrix(c(rep(2, 5), rep(0, 15)), ncol = 1) y <- x %*% beta + rnorm(n) ## ----results="hide"----------------------------------------------------------- fit <- pcLasso(x, y, ratio = 0.8) ## ----------------------------------------------------------------------------- # intercept fit$a0[20] # coefficients fit$beta[, 20] ## ----------------------------------------------------------------------------- fit$nzero ## ----------------------------------------------------------------------------- # get predictions for 20th model predict(fit, x[1:5, ])[, 20] ## ----------------------------------------------------------------------------- groups <- vector("list", 4) for (k in 1:4) { groups[[k]] <- 5 * (k-1) + 1:5 } groups ## ----results="hide"----------------------------------------------------------- fit <- pcLasso(x, y, ratio = 0.8, groups = groups) ## ----------------------------------------------------------------------------- groups[[1]] <- 1:7 groups ## ----results="hide"----------------------------------------------------------- fit <- pcLasso(x, y, ratio = 0.8, groups = groups) ## ----------------------------------------------------------------------------- # intercept at 20th model: same as before fit$a0[20] # coefficients at 20th model: look at origbeta instead fit$origbeta[, 20] ## ----------------------------------------------------------------------------- fit$orignzero ## ----results="hide"----------------------------------------------------------- cvfit <- cv.pcLasso(x, y, groups = groups, ratio = 0.8) ## ----results="hide"----------------------------------------------------------- cvfit <- cv.pcLasso(x, y, groups = groups, ratio = 0.8, nfolds = 5) ## ----results="hide"----------------------------------------------------------- foldid <- sample(rep(seq(10), length = n)) cvfit <- cv.pcLasso(x, y, groups = groups, ratio = 0.8, foldid = foldid) ## ----fig.width=5, fig.height=4------------------------------------------------ plot(cvfit) ## ----fig.width=5, fig.height=4------------------------------------------------ plot(cvfit, orignz = FALSE) ## ----------------------------------------------------------------------------- cvfit$lambda.min cvfit$lambda.1se ## ----------------------------------------------------------------------------- predict(cvfit, x[1:5, ]) # s = lambda.1se predict(cvfit, x[1:5, ], s = "lambda.min")
###PACOTES library("readxl") library("ggplot2") library("dplyr") library("stringr") library("AggregateR") library("dummy") #LENDO OS DADOS dados=read_excel("Base de dados_Processo Seletivo_com_ID.xlsx",sheet=1) ###ARRUMANDO O BANCO names(dados)=dados[1,]#nomes estavam na primeira linha dados=dados[-1,] str(dados) ###SEPARANDO O BOLSA AUXILIO E MORADIA bolsa_aux=c() moradia=c() dados$`Bolsa Auxílio|Moradia`=str_replace(dados$`Bolsa Auxílio|Moradia`,"[|]","") for (i in c(1:length(dados$`Bolsa Auxílio|Moradia`))){ aux=unlist(str_split(dados$`Bolsa Auxílio|Moradia`[i],"[|]")) bolsa_aux=append(bolsa_aux,aux[1]) moradia=append(moradia,aux[2]) aux=c() } moradia[which(dados$`Bolsa Auxílio|Moradia`==0)]=0 # tinha um valor que era 0 dados$bolsa_aux=bolsa_aux dados$moradia=moradia ##CONVERTENDO FORMATOS id=as.numeric(dados$ID) ano_processo=dados$`Ano do processo seletivo` bolsa=as.factor(dados$Bolsa) vigencia_inicio=as.Date.character(dados$`Vigência Início`,"%Y-%m-%d") vigencia_final=as.Date.character(dados$`Vigência Fim`,"%Y-%m-%d") curso=as.factor(dados$Curso) nivel=as.factor(dados$Nível) ic_momento_contemp=as.numeric(dados$`IC no momento da contemplação`) data_desvinc=as.Date.character(dados$`Desvinculação da bolsa`,"%Y-%m-%d") ic_insc=as.numeric(dados$IC) renda_total=as.numeric(dados$RT) grupo_familiar=as.numeric(dados$GF) doenca_grave=as.factor(dados$DG) gastos_moradia_indice=as.numeric(dados$MT) gastos_transp_inidce=as.numeric(dados$TR) escola_publica=as.factor(dados$EP) bolsa_aux_social_solicitada=as.factor(dados$`BAS (solicitada)`) alimentacao_solicitada=as.factor(dados$`Alimentação (solicitada)`) transp_solicitada=as.factor(dados$`Transporte (solicitada)`) moradia_solictada=as.factor(dados$`Moradia (solicitada)`) bolsa_aux_social_contemp=as.factor(dados$`BAS (contemplada)`) alimetacao_contemp=as.factor(dados$`Alimentação (contemplada)`) transp_contemp=as.factor(dados$`Transporte (contemplada)`) moradia_contemp=as.factor(dados$`Moradia (contemplada)`) paais=as.factor(dados$PAAIS) divergencia=as.factor(dados$`Divergência?`) obs=(dados$Observação) bolsa_aux=as.factor(dados$bolsa_aux) moradia=as.factor(dados$moradia) dados1=data.frame(id,ano_processo,bolsa,vigencia_inicio,vigencia_final,curso,nivel,ic_momento_contemp, data_desvinc,ic_insc,renda_total,grupo_familiar,doenca_grave,gastos_moradia_indice, gastos_transp_inidce,escola_publica,bolsa_aux_social_solicitada,alimentacao_solicitada, transp_solicitada,moradia_solictada,bolsa_aux_social_contemp,alimetacao_contemp, transp_contemp,moradia_contemp,paais,divergencia,obs,bolsa_aux,moradia) #AGRUPANDO AS PESSOAS PARA CONSIDERAR A HISTORIA DE CADA PESSOA dados_agrupar=dados1[,c(3,8,11:26)] agrupado=Aggregate(x=dados_agrupar,by=dados1$id,object=categories(dados_agrupar))
/Projeto - SAE.R
no_license
ribeiroleeo/PROJETO---SAE
R
false
false
2,985
r
###PACOTES library("readxl") library("ggplot2") library("dplyr") library("stringr") library("AggregateR") library("dummy") #LENDO OS DADOS dados=read_excel("Base de dados_Processo Seletivo_com_ID.xlsx",sheet=1) ###ARRUMANDO O BANCO names(dados)=dados[1,]#nomes estavam na primeira linha dados=dados[-1,] str(dados) ###SEPARANDO O BOLSA AUXILIO E MORADIA bolsa_aux=c() moradia=c() dados$`Bolsa Auxílio|Moradia`=str_replace(dados$`Bolsa Auxílio|Moradia`,"[|]","") for (i in c(1:length(dados$`Bolsa Auxílio|Moradia`))){ aux=unlist(str_split(dados$`Bolsa Auxílio|Moradia`[i],"[|]")) bolsa_aux=append(bolsa_aux,aux[1]) moradia=append(moradia,aux[2]) aux=c() } moradia[which(dados$`Bolsa Auxílio|Moradia`==0)]=0 # tinha um valor que era 0 dados$bolsa_aux=bolsa_aux dados$moradia=moradia ##CONVERTENDO FORMATOS id=as.numeric(dados$ID) ano_processo=dados$`Ano do processo seletivo` bolsa=as.factor(dados$Bolsa) vigencia_inicio=as.Date.character(dados$`Vigência Início`,"%Y-%m-%d") vigencia_final=as.Date.character(dados$`Vigência Fim`,"%Y-%m-%d") curso=as.factor(dados$Curso) nivel=as.factor(dados$Nível) ic_momento_contemp=as.numeric(dados$`IC no momento da contemplação`) data_desvinc=as.Date.character(dados$`Desvinculação da bolsa`,"%Y-%m-%d") ic_insc=as.numeric(dados$IC) renda_total=as.numeric(dados$RT) grupo_familiar=as.numeric(dados$GF) doenca_grave=as.factor(dados$DG) gastos_moradia_indice=as.numeric(dados$MT) gastos_transp_inidce=as.numeric(dados$TR) escola_publica=as.factor(dados$EP) bolsa_aux_social_solicitada=as.factor(dados$`BAS (solicitada)`) alimentacao_solicitada=as.factor(dados$`Alimentação (solicitada)`) transp_solicitada=as.factor(dados$`Transporte (solicitada)`) moradia_solictada=as.factor(dados$`Moradia (solicitada)`) bolsa_aux_social_contemp=as.factor(dados$`BAS (contemplada)`) alimetacao_contemp=as.factor(dados$`Alimentação (contemplada)`) transp_contemp=as.factor(dados$`Transporte (contemplada)`) moradia_contemp=as.factor(dados$`Moradia (contemplada)`) paais=as.factor(dados$PAAIS) divergencia=as.factor(dados$`Divergência?`) obs=(dados$Observação) bolsa_aux=as.factor(dados$bolsa_aux) moradia=as.factor(dados$moradia) dados1=data.frame(id,ano_processo,bolsa,vigencia_inicio,vigencia_final,curso,nivel,ic_momento_contemp, data_desvinc,ic_insc,renda_total,grupo_familiar,doenca_grave,gastos_moradia_indice, gastos_transp_inidce,escola_publica,bolsa_aux_social_solicitada,alimentacao_solicitada, transp_solicitada,moradia_solictada,bolsa_aux_social_contemp,alimetacao_contemp, transp_contemp,moradia_contemp,paais,divergencia,obs,bolsa_aux,moradia) #AGRUPANDO AS PESSOAS PARA CONSIDERAR A HISTORIA DE CADA PESSOA dados_agrupar=dados1[,c(3,8,11:26)] agrupado=Aggregate(x=dados_agrupar,by=dados1$id,object=categories(dados_agrupar))
context("Expectation Maximization Like Least Squares Classifier") # Simple dataset used in the tests data(testdata) modelform <- testdata$modelform classname<-all.vars(modelform)[1] D <- testdata$D D_test <- testdata$D_test X <- testdata$X X_u <- testdata$X_u y <- testdata$y X_test <- testdata$X_test y_test <- testdata$y_test test_that("Formula and matrix formulation give same results",{ g_matrix <- EMLeastSquaresClassifier(X,factor(y),X_u) g_model <- EMLeastSquaresClassifier(modelform, D) expect_equal(predict(g_matrix,X_test),predict(g_model,D_test)) expect_equal(loss(g_matrix, X_test, y_test),loss(g_model, D_test)) expect_equal(g_matrix@classnames,g_model@classnames) }) test_that("Different settings return the same loss",{ g_1 <- EMLeastSquaresClassifier(X,y,X_u,intercept=TRUE,scale=TRUE) g_2 <- EMLeastSquaresClassifier(X,y,X_u,intercept=TRUE,x_center=TRUE,scale=TRUE) expect_equal(loss(g_1,X_test,y_test),loss(g_2,X_test,y_test),tolerance=10e-6) # We get a different loss when we center the output g_3<-EMLeastSquaresClassifier(X,y,X_u,intercept=TRUE,x_center=TRUE,scale=TRUE,y_scale=TRUE) g_4<-EMLeastSquaresClassifier(X,y,X_u,intercept=TRUE,scale=TRUE,x_center=TRUE,y_scale=TRUE) expect_equal(loss(g_3,X_test,y_test),loss(g_4,X_test,y_test),tolerance=10e-6) })
/tests/testthat/test-EMLeastSquaresClassifier.R
no_license
trappmartin/RSSL
R
false
false
1,320
r
context("Expectation Maximization Like Least Squares Classifier") # Simple dataset used in the tests data(testdata) modelform <- testdata$modelform classname<-all.vars(modelform)[1] D <- testdata$D D_test <- testdata$D_test X <- testdata$X X_u <- testdata$X_u y <- testdata$y X_test <- testdata$X_test y_test <- testdata$y_test test_that("Formula and matrix formulation give same results",{ g_matrix <- EMLeastSquaresClassifier(X,factor(y),X_u) g_model <- EMLeastSquaresClassifier(modelform, D) expect_equal(predict(g_matrix,X_test),predict(g_model,D_test)) expect_equal(loss(g_matrix, X_test, y_test),loss(g_model, D_test)) expect_equal(g_matrix@classnames,g_model@classnames) }) test_that("Different settings return the same loss",{ g_1 <- EMLeastSquaresClassifier(X,y,X_u,intercept=TRUE,scale=TRUE) g_2 <- EMLeastSquaresClassifier(X,y,X_u,intercept=TRUE,x_center=TRUE,scale=TRUE) expect_equal(loss(g_1,X_test,y_test),loss(g_2,X_test,y_test),tolerance=10e-6) # We get a different loss when we center the output g_3<-EMLeastSquaresClassifier(X,y,X_u,intercept=TRUE,x_center=TRUE,scale=TRUE,y_scale=TRUE) g_4<-EMLeastSquaresClassifier(X,y,X_u,intercept=TRUE,scale=TRUE,x_center=TRUE,y_scale=TRUE) expect_equal(loss(g_3,X_test,y_test),loss(g_4,X_test,y_test),tolerance=10e-6) })
#load data HPC <- read.table("household_power_consumption.txt", header = TRUE, sep=";"); hpc_Date <- as.Date(HPC$Date); d_begin <- as.Date(c("01/02/2007")); d_end <- as.Date(c("03/02/2007")); i_begin <- match(d_begin, hpc_Date); i_end <- match(d_end, hpc_Date) - 1; hpc <- HPC[i_begin:i_end, 1:9]; hpc$DateTime <- strptime(paste(hpc$Date, hpc$Time), "%d/%m/%Y %H:%M") hpc$DayOfWeek <- format(hpc$DateTime, "%a") #plot 3 png(filename = "plot3.png", width = 480, height = 480); hpc_sm1 <- as.double(as.vector(hpc$Sub_metering_1)); hpc_sm2 <- as.double(as.vector(hpc$Sub_metering_2)); hpc_sm3 <- as.double(as.vector(hpc$Sub_metering_3)); par(mar = c(2,4,1,1)); par(cex.axis = .7); par(cex.lab = .8); par(cex.main = .8); y_range <- range(c(hpc_sm1, hpc_sm2, hpc_sm3)); plot(x = c(1:2880), y = hpc_sm1, xlab = "" , ylab = "Energy sub metering", type = "l", xaxt = "n", yaxt = "n", ylim=y_range); par(new = TRUE); lines(x = c(1:2880), y = hpc_sm2, col = "red", ylim=y_range); lines(x = c(1:2880), y = hpc_sm3, col = "blue", ylim=y_range); axis(1,at = c(0:2)*24*60, labels = c("Thu", "Fri", "Sat")); axis(2,at = c(0:3)*10); legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c(1,2,4), lty = 1, cex = .7, pt.cex = .9); dev.off();
/plot3.R
no_license
ABouyssi/ExDataCourseProject1
R
false
false
1,265
r
#load data HPC <- read.table("household_power_consumption.txt", header = TRUE, sep=";"); hpc_Date <- as.Date(HPC$Date); d_begin <- as.Date(c("01/02/2007")); d_end <- as.Date(c("03/02/2007")); i_begin <- match(d_begin, hpc_Date); i_end <- match(d_end, hpc_Date) - 1; hpc <- HPC[i_begin:i_end, 1:9]; hpc$DateTime <- strptime(paste(hpc$Date, hpc$Time), "%d/%m/%Y %H:%M") hpc$DayOfWeek <- format(hpc$DateTime, "%a") #plot 3 png(filename = "plot3.png", width = 480, height = 480); hpc_sm1 <- as.double(as.vector(hpc$Sub_metering_1)); hpc_sm2 <- as.double(as.vector(hpc$Sub_metering_2)); hpc_sm3 <- as.double(as.vector(hpc$Sub_metering_3)); par(mar = c(2,4,1,1)); par(cex.axis = .7); par(cex.lab = .8); par(cex.main = .8); y_range <- range(c(hpc_sm1, hpc_sm2, hpc_sm3)); plot(x = c(1:2880), y = hpc_sm1, xlab = "" , ylab = "Energy sub metering", type = "l", xaxt = "n", yaxt = "n", ylim=y_range); par(new = TRUE); lines(x = c(1:2880), y = hpc_sm2, col = "red", ylim=y_range); lines(x = c(1:2880), y = hpc_sm3, col = "blue", ylim=y_range); axis(1,at = c(0:2)*24*60, labels = c("Thu", "Fri", "Sat")); axis(2,at = c(0:3)*10); legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c(1,2,4), lty = 1, cex = .7, pt.cex = .9); dev.off();
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clust_class_viz.R \name{clust_class_viz} \alias{clust_class_viz} \title{Title visual contingency table of the true classification (partitioning) and the clustering results} \usage{ clust_class_viz(object, features = NULL, clusters = NULL) } \arguments{ \item{object}{: Clustering result from usual methods of clustering} \item{features}{: True Partition (Classification) of the observation} \item{clusters}{: Possibility to display a visual contingency table from just clustering results} } \value{ a GGalluvial graph } \description{ Title visual contingency table of the true classification (partitioning) and the clustering results } \examples{ : # depencies : #library("ggplot2") #library("ggalluvial") # features : vector of factors # clusters : vector of factors # Data preparation # +++++++++++++++ # data("iris") # head(iris) # # Remove species column (5) and scale the data iris.scaled <- scale(iris[, -5]) # true_class = iris[,5] # # methods from the package stats # library(stats) # # (kmeans method) # km.res <- kmeans(iris.scaled, 5, nstart = 10) # clust_class_viz(km.res, true_class) # # ------------------------------------------------------------------------------ # # methods from the package cluster # library(cluster) # # (pam method) # pam.res = pam(iris.scaled, 3) # clust_class_viz(pam.res, true_class) # # (clara method) # clara.res = clara(iris.scaled, 3) # clust_class_viz(clara.res, true_class) # # (fanny method) # fanny.res = fanny(iris.scaled, 3) # clust_class_viz(fanny.res, true_class) # # ------------------------------------------------------------------------------ # # methods from the package fpc # library(fpc) # # (dbscan method) # dbscan.res = dbscan(iris.scaled, eps = 0.2) # clust_class_viz(dbscan.res, true_class) # # dbscan method don't provide information, clusters are # # required for external measures computation # # ------------------------------------------------------------------------------ # # methods from the package mclust # library(mclust) # # Mclust method # Mclust.res = Mclust(iris.scaled,4) # clust_class_viz(Mclust.res, true_class) # # ------------------------------------------------------------------------------ # # methods from the package FactoMineR # library(FactoMineR) # # HCPC method # HCPC.res = HCPC(data.frame(iris.scaled),3) # clust_class_viz(HCPC.res,true_class) # # ------------------------------------------------------------------------------ # # methods from the package factoextra # library(factoextra) # # hkmeans method # hkmeans.res = hkmeans(iris.scaled,3) # clust_class_viz(hkmeans.res,true_class) # ------------------------------------------------------------------------------ # using the two arguments of the function : features, clusters #clust_class_viz(object = NULL,features = true_class, clusters=HCPC.res$call$X$clust) } \author{ Mohamed Al Mahdi Tantaoui }
/man/clust_class_viz.Rd
no_license
Mathias-dcm/CCarac
R
false
true
2,933
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clust_class_viz.R \name{clust_class_viz} \alias{clust_class_viz} \title{Title visual contingency table of the true classification (partitioning) and the clustering results} \usage{ clust_class_viz(object, features = NULL, clusters = NULL) } \arguments{ \item{object}{: Clustering result from usual methods of clustering} \item{features}{: True Partition (Classification) of the observation} \item{clusters}{: Possibility to display a visual contingency table from just clustering results} } \value{ a GGalluvial graph } \description{ Title visual contingency table of the true classification (partitioning) and the clustering results } \examples{ : # depencies : #library("ggplot2") #library("ggalluvial") # features : vector of factors # clusters : vector of factors # Data preparation # +++++++++++++++ # data("iris") # head(iris) # # Remove species column (5) and scale the data iris.scaled <- scale(iris[, -5]) # true_class = iris[,5] # # methods from the package stats # library(stats) # # (kmeans method) # km.res <- kmeans(iris.scaled, 5, nstart = 10) # clust_class_viz(km.res, true_class) # # ------------------------------------------------------------------------------ # # methods from the package cluster # library(cluster) # # (pam method) # pam.res = pam(iris.scaled, 3) # clust_class_viz(pam.res, true_class) # # (clara method) # clara.res = clara(iris.scaled, 3) # clust_class_viz(clara.res, true_class) # # (fanny method) # fanny.res = fanny(iris.scaled, 3) # clust_class_viz(fanny.res, true_class) # # ------------------------------------------------------------------------------ # # methods from the package fpc # library(fpc) # # (dbscan method) # dbscan.res = dbscan(iris.scaled, eps = 0.2) # clust_class_viz(dbscan.res, true_class) # # dbscan method don't provide information, clusters are # # required for external measures computation # # ------------------------------------------------------------------------------ # # methods from the package mclust # library(mclust) # # Mclust method # Mclust.res = Mclust(iris.scaled,4) # clust_class_viz(Mclust.res, true_class) # # ------------------------------------------------------------------------------ # # methods from the package FactoMineR # library(FactoMineR) # # HCPC method # HCPC.res = HCPC(data.frame(iris.scaled),3) # clust_class_viz(HCPC.res,true_class) # # ------------------------------------------------------------------------------ # # methods from the package factoextra # library(factoextra) # # hkmeans method # hkmeans.res = hkmeans(iris.scaled,3) # clust_class_viz(hkmeans.res,true_class) # ------------------------------------------------------------------------------ # using the two arguments of the function : features, clusters #clust_class_viz(object = NULL,features = true_class, clusters=HCPC.res$call$X$clust) } \author{ Mohamed Al Mahdi Tantaoui }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/QueryHelper.R \name{projectWriteTable} \alias{projectWriteTable} \title{Write the dataframe to the table} \usage{ projectWriteTable(data, tble_name, db_path = "") } \arguments{ \item{data}{Data frame containing the data to save to the table. This will overwrite any existing data} \item{tble_name}{Name of the table to write the data to. NOTE SQLITE table names are not case sensitive} \item{db_path}{The location of the project database. This defaults to database/project.sqlite and is not expected to change} } \value{ None } \description{ The functions writes the datafarme to the sqlite table. It will overwrite any data It is used internally to load project data }
/man/projectWriteTable.Rd
no_license
deepika060193/plethem
R
false
true
750
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/QueryHelper.R \name{projectWriteTable} \alias{projectWriteTable} \title{Write the dataframe to the table} \usage{ projectWriteTable(data, tble_name, db_path = "") } \arguments{ \item{data}{Data frame containing the data to save to the table. This will overwrite any existing data} \item{tble_name}{Name of the table to write the data to. NOTE SQLITE table names are not case sensitive} \item{db_path}{The location of the project database. This defaults to database/project.sqlite and is not expected to change} } \value{ None } \description{ The functions writes the datafarme to the sqlite table. It will overwrite any data It is used internally to load project data }
# install.packages("cowplot") # install.packages("hrbrthemes") # install.packages("gcookbook") # install.packages("gridExtra") library(tidyverse) library(cowplot) library(gridExtra) ################################################################################ #######################Section Zero. Exact distribution ########################## ################################################################################ qpois(0.95, 24) qpois(0.95, 48) qpois(0.95, 8) qpois(0.95, 16) qexp(p = 0.05, rate = 2) qexp(p = 0.05, rate = 2/3) ################################################################################ #######################Section I. Part (a) simulation ########################## ################################################################################ seed_init <- 20200321 sim <- 10000 lambda <- 2 febrile_rate <- 1/3 # --- I.(a) Number of patients in 12 hours --- # N_pat_12 <- c() for (iter in 1:sim) { set.seed(seed_init+iter) temp_time_interv <- rexp(1, lambda) max_hour <- temp_time_interv N_temp = 0 while (max_hour <= 12) { N_temp = N_temp + 1 temp_time_interv <- rexp(1, lambda) max_hour <- max_hour + temp_time_interv } N_pat_12 <- c(N_pat_12, N_temp) } mean(N_pat_12) var(N_pat_12) sd(N_pat_12) sort(N_pat_12, decreasing = FALSE)[sim - sim*0.05] patient_12 <- data.frame(N_pat_12=N_pat_12) %>% ggplot(., aes(x=N_pat_12)) + geom_histogram(aes(y=..density..), binwidth = 1, fill = "#012169", color = "#0c2340", alpha=0.9, position="identity") + # geom_density(alpha=0.6) + labs(title="General patients: density plot of arrival counts in one 12-hour period", x = "Arrival counts", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=24), linetype="dashed", color = "red")+ annotate(geom="text", x=27, size=5, y=0.09, label="E(N_{12})=24", color="red")+ geom_vline(aes(xintercept=33), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=38, y=0.09, label="P(N_{12}>=33)<=0.05", color="red") patient_12 # --- I.(b) Number of patients in 24 hours --- # N_pat_24 <- c() for (iter in 1:sim) { set.seed(seed_init+iter) temp_time_interv <- rexp(1, lambda) max_hour <- temp_time_interv N_temp = 0 while (max_hour <= 24) { N_temp = N_temp + 1 temp_time_interv <- rexp(1, lambda) max_hour <- max_hour + temp_time_interv } N_pat_24 <- c(N_pat_24, N_temp) } mean(N_pat_24) var(N_pat_24) sd(N_pat_24) sort(N_pat_24, decreasing = FALSE)[sim - sim*0.05] qpois(0.95, 48) patient_24 <- data.frame(N_pat_24=N_pat_24) %>% ggplot(., aes(x=N_pat_24)) + geom_histogram(aes(y=..density..), binwidth = 1, fill = "#012169", color = "#0c2340", alpha=0.9, position="identity") + # geom_density(alpha=0.6) + labs(title="General patients: density plot of arrival counts in one 24-hour period", x = "Arrival counts", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=48), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=52, y=0.065, label="E(N_{24})=48", color="red")+ geom_vline(aes(xintercept=60), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=67, y=0.065, label="P(N_{24}>=60)<=0.05", color="red") patient_24 patient12_24<-grid.arrange(patient_12, patient_24, nrow = 1) ggsave(filename = "./Dropbox/Emory Courses/Spring 2020/BIOS 723/Project/Result/general_patient_count.png", plot = patient12_24, width = 60, height = 30, units = "cm") # --- I.(c) Number of febrile in 12 hours --- # N_feb_12 <- c() for (iter in 1:sim) { set.seed(seed_init+iter) temp_time_interv <- rexp(1, lambda) max_hour <- temp_time_interv N_temp = 0 while (max_hour <= 12) { if (rbinom(1,1,1/3) == 1) { N_temp = N_temp + 1 } temp_time_interv <- rexp(1, lambda) max_hour <- max_hour + temp_time_interv } N_feb_12 <- c(N_feb_12, N_temp) } mean(N_feb_12) var(N_feb_12) sd(N_feb_12) sort(N_feb_12, decreasing = FALSE)[sim - sim*0.05] feb_patient_12 <- data.frame(N_feb_12=N_feb_12) %>% ggplot(., aes(x=N_feb_12)) + geom_histogram(aes(y=..density..), binwidth = 1, fill = "#012169", color = "#0c2340", alpha=0.9, position="identity") + labs(title="Febrile patients: density plot of arrival counts in one 12-hour period", x = "Arrival counts", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=8), linetype="dashed", color = "red")+ annotate(geom="text", x=10, size=5, y=0.16, label="E(M_{12})=8", color="red")+ geom_vline(aes(xintercept=13), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=16.5, y=0.16, label="P(M_{12}>=13)<=0.05", color="red") feb_patient_12 # --- I.(d) Number of febrile in 24 hours --- # N_feb_24 <- c() for (iter in 1:sim) { set.seed(seed_init+iter) temp_time_interv <- rexp(1, lambda) max_hour <- temp_time_interv N_temp = 0 while (max_hour <= 24) { if (rbinom(1,1,1/3) == 1) { N_temp = N_temp + 1 } temp_time_interv <- rexp(1, lambda) max_hour <- max_hour + temp_time_interv } N_feb_24 <- c(N_feb_24, N_temp) } mean(N_feb_24) var(N_feb_24) sd(N_feb_24) sort(N_feb_24, decreasing = FALSE)[sim - sim*0.05] qpois(0.95, 48) feb_patient_24 <- data.frame(N_feb_24=N_feb_24) %>% ggplot(., aes(x=N_feb_24)) + geom_histogram(aes(y=..density..), binwidth = 1, fill = "#012169", color = "#0c2340", alpha=0.9, position="identity") + # geom_density(alpha=0.6) + labs(title="Febrile patients: density plot of arrival counts in one 24-hour period", x = "Arrival counts", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=16), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=19, y=0.12, label="E(M_{24})=16", color="red")+ geom_vline(aes(xintercept=23), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=27.5, y=0.12, label="P(M_{24}>=23)<=0.05", color="red") feb_patient_24 feb_patient12_24<-grid.arrange(feb_patient_12, feb_patient_24, nrow = 1) ggsave(filename = "./Dropbox/Emory Courses/Spring 2020/BIOS 723/Project/Result/feb_patient_count.png", plot = feb_patient12_24, width = 60, height = 30, units = "cm") ################################################################################ #######################Section II. Part b simulation ########################### ################################################################################ # --- II.(a) Number of sick patients in 24 hours --- # t = 24 sim = 5000 df_24_patient <- data.frame() for (iter in 1:sim) { N = rpois(1, lambda*t) unifs = runif(N,0,t) arrivals = sort(unifs) df_temp <- data.frame(index = iter, arrivals = c(0, arrivals)) %>% mutate(Diff = arrivals - lag(arrivals)) df_24_patient <- rbind(df_24_patient, df_temp) } general_pat_time_interval <- df_24_patient %>% group_by(index) %>% summarise(mean_v=mean(Diff, na.rm = T), variance_v = var(Diff, na.rm = T), sd_v = sd(Diff, na.rm = T), N_v=n()) mean(general_pat_time_interval$mean_v) mean(general_pat_time_interval$variance_v,na.rm = T) quantile(general_pat_time_interval$mean_v, 0.05) general_pat_time_interval <- data.frame(general_pat_time_interval, stringsAsFactors = F) general_pat_time_interval_plot <- ggplot(general_pat_time_interval, aes(x=mean_v)) + geom_histogram(aes(y=..density..), #binwidth = 0.025, fill = "#012169", color = "#0c2340", alpha=0.9) + geom_density( alpha=0.6)+ labs(title="General patients: density plot of interarrival intervals", x = "Interarrival intervals", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=mean(general_pat_time_interval$mean_v)), linetype="dashed", color = "red")+ annotate(geom="text", x=0.5+0.06, size=5, y=6, label="Simulated mean: 0.50", color="red")+ geom_vline(aes(xintercept=quantile(general_pat_time_interval$mean_v, 0.05)[[1]]), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=quantile(general_pat_time_interval$mean_v, 0.05)[[1]]-0.055, y=6, label="5% Quantile: 0.39", color="red") general_pat_time_interval_plot # # --- II.(b) Number of sick patients in 12 hours --- # # t = 12 # sim = 5000 # df_12_patient <- data.frame() # for (iter in 1:sim) { # # N = rpois(1, lambda*t) # unifs = runif(N,0,t) # arrivals = sort(unifs) # # df_temp <- data.frame(index = iter, # arrivals = c(0, arrivals)) %>% # mutate(Diff = arrivals - lag(arrivals)) # df_12_patient <- rbind(df_12_patient, df_temp) # } # df2 <- df_12_patient %>% # group_by(index) %>% # summarise(mean=mean(Diff, na.rm = T), # variance = var(Diff, na.rm = T), # N=n()) # mean(df2$mean, na.rm = T) # mean(df2$variance, na.rm = T) # --- II.(c) Number of febrile in 12 hours --- # t = 24 sim = 5000 df_12_feb <- data.frame() set.seed(123) for (iter in 1:sim) { N = rpois(1, lambda*t) unifs = runif(N,0,t) feb_pat <- rbinom(N,1,1/3) while(sum(feb_pat==1)==1){ feb_pat <- rbinom(N,1,1/3) } unifs_keep <- unifs[as.logical(feb_pat)] arrivals = sort(unifs_keep) df_temp <- data.frame(index = iter, arrivals = c(0, arrivals)) %>% mutate(Diff = arrivals - lag(arrivals)) df_12_feb <- rbind(df_12_feb, df_temp) } feb_pat_time_interval <- df_12_feb %>% group_by(index) %>% summarise(mean_v=mean(Diff, na.rm = T), variance=var(Diff, na.rm = T)) mean(feb_pat_time_interval$mean_v, na.rm = T) mean(feb_pat_time_interval$variance, na.rm = T) feb_pat_time_interval <- data.frame(feb_pat_time_interval, stringsAsFactors = F) feb_pat_time_interval_plot <- ggplot(feb_pat_time_interval, aes(x=mean_v)) + geom_histogram(aes(y=..density..), #binwidth = 0.025, fill = "#012169", color = "#0c2340", alpha=0.9) + geom_density( alpha=0.6)+ labs(title="Febrile patients: density plot of interarrival intervals", x = "Interarrival intervals", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=mean(feb_pat_time_interval$mean_v)), linetype="dashed", color = "red")+ annotate(geom="text", x=mean(feb_pat_time_interval$mean_v)+0.5, size=5, y=1.3, label="Simulated mean: 1.50", color="red")+ geom_vline(aes(xintercept=quantile(feb_pat_time_interval$mean_v, 0.05)[[1]]), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=quantile(feb_pat_time_interval$mean_v, 0.05)[[1]]-0.5, y=1.3, label="5% Quantile: \n0.99", color="red") feb_pat_time_interval_plot feb_patient12_24<-grid.arrange(general_pat_time_interval_plot, feb_pat_time_interval_plot, nrow = 1) ggsave(filename = "./Dropbox/Emory Courses/Spring 2020/BIOS 723/Project/Result/time_interval.png", plot = feb_patient12_24, width = 60, height = 30, units = "cm") # # t = 12 # sim = 10000 # df_12_feb <- data.frame() # #set.seed(runif(1, 0, 100000)) # set.seed(1) # for (iter in 1:sim) { # # N = rpois(1, (lambda*t)/3) # unifs = runif(N,0,t) # arrivals = sort(unifs) # # df_temp <- data.frame(index = iter, # arrivals = c(0, arrivals)) %>% # mutate(Diff = arrivals - lag(arrivals)) # df_12_feb <- rbind(df_12_feb, df_temp) # } # df2 <- df_12_feb %>% # group_by(index) %>% # summarise(mean=mean(Diff, na.rm = T), # variance = var(Diff, na.rm = T), # med = median(Diff, na.rm = T), # N=n()) # mean(df2$mean, na.rm = T) # mean(df2$variance, na.rm = T) # ################################################################################ #######################Section II. Part b simulation - SD ########################### ################################################################################ # --- II.(a) Number of sick patients in 24 hours --- # t = 24 sim = 5000 df_24_patient <- data.frame() for (iter in 1:sim) { N = rpois(1, lambda*t) unifs = runif(N,0,t) arrivals = sort(unifs) df_temp <- data.frame(index = iter, arrivals = c(0, arrivals)) %>% mutate(Diff = arrivals - lag(arrivals)) df_24_patient <- rbind(df_24_patient, df_temp) } general_pat_time_interval <- df_24_patient %>% group_by(index) %>% summarise(mean_v=mean(Diff, na.rm = T), variance_v = var(Diff, na.rm = T), sd_v = sd(Diff, na.rm = T), N_v=n()) mean(general_pat_time_interval$mean_v) mean(general_pat_time_interval$variance_v,na.rm = T) mean(general_pat_time_interval$sd_v,na.rm = T) quantile(general_pat_time_interval$mean_v, 0.05) quantile(general_pat_time_interval$sd_v, 0.05) general_pat_time_interval <- data.frame(general_pat_time_interval, stringsAsFactors = F) general_pat_time_interval_plot <- ggplot(general_pat_time_interval, aes(x=sd_v)) + geom_histogram(aes(y=..density..), #binwidth = 0.025, fill = "#012169", color = "#0c2340", alpha=0.9) + geom_density( alpha=0.6)+ labs(title="General patients: density plot of standard deviation for interarrival intervals", x = "Interarrival intervals", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=mean(general_pat_time_interval$sd_v)), linetype="dashed", color = "red")+ annotate(geom="text", x=0.6, size=5, y=6, label="Simulated mean of SD: 0.49", color="red")+ geom_vline(aes(xintercept=quantile(general_pat_time_interval$sd_v, 0.05)[[1]]), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=quantile(general_pat_time_interval$sd_v, 0.05)[[1]]-0.055, y=6, label="5% Quantile: 0.35", color="red") general_pat_time_interval_plot # # --- II.(b) Number of sick patients in 12 hours --- # # t = 12 # sim = 5000 # df_12_patient <- data.frame() # for (iter in 1:sim) { # # N = rpois(1, lambda*t) # unifs = runif(N,0,t) # arrivals = sort(unifs) # # df_temp <- data.frame(index = iter, # arrivals = c(0, arrivals)) %>% # mutate(Diff = arrivals - lag(arrivals)) # df_12_patient <- rbind(df_12_patient, df_temp) # } # df2 <- df_12_patient %>% # group_by(index) %>% # summarise(mean=mean(Diff, na.rm = T), # variance = var(Diff, na.rm = T), # N=n()) # mean(df2$mean, na.rm = T) # mean(df2$variance, na.rm = T) # --- II.(c) Number of febrile in 12 hours --- # t = 24 sim = 5000 df_12_feb <- data.frame() set.seed(123) for (iter in 1:sim) { N = rpois(1, lambda*t) unifs = runif(N,0,t) feb_pat <- rbinom(N,1,1/3) while(sum(feb_pat==1)==1){ feb_pat <- rbinom(N,1,1/3) } unifs_keep <- unifs[as.logical(feb_pat)] arrivals = sort(unifs_keep) df_temp <- data.frame(index = iter, arrivals = c(0, arrivals)) %>% mutate(Diff = arrivals - lag(arrivals)) df_12_feb <- rbind(df_12_feb, df_temp) } feb_pat_time_interval <- df_12_feb %>% group_by(index) %>% summarise(mean_v=mean(Diff, na.rm = T), variance=var(Diff, na.rm = T), sd_v = sd(Diff, na.rm = T), N=n()) mean(feb_pat_time_interval$mean_v, na.rm = T) mean(feb_pat_time_interval$variance, na.rm = T) mean(feb_pat_time_interval$sd_v, na.rm = T) feb_pat_time_interval <- data.frame(feb_pat_time_interval, stringsAsFactors = F) feb_pat_time_interval_plot <- ggplot(feb_pat_time_interval, aes(x=sd_v)) + geom_histogram(aes(y=..density..), #binwidth = 0.025, fill = "#012169", color = "#0c2340", alpha=0.9) + geom_density( alpha=0.6)+ labs(title="Febrile patients: density plot of standard deviation for interarrival intervals", x = "Interarrival intervals", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=mean(feb_pat_time_interval$sd_v)), linetype="dashed", color = "red")+ annotate(geom="text", x=mean(feb_pat_time_interval$sd_v)+0.5, size=5, y=1.1, label="Simulated mean of SD: 1.41", color="red")+ geom_vline(aes(xintercept=quantile(feb_pat_time_interval$sd_v, 0.05)[[1]]), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=quantile(feb_pat_time_interval$sd_v, 0.05)[[1]]-0.2, y=1.1, label="5% Quantile: \n0.82", color="red") feb_pat_time_interval_plot feb_patient12_24<-grid.arrange(general_pat_time_interval_plot, feb_pat_time_interval_plot, nrow = 1) ggsave(filename = "./Dropbox/Emory Courses/Spring 2020/BIOS 723/Project/Result/time_interval_standard_deviation.png", plot = feb_patient12_24, width = 60, height = 30, units = "cm") # # t = 12 # sim = 10000 # df_12_feb <- data.frame() # #set.seed(runif(1, 0, 100000)) # set.seed(1) # for (iter in 1:sim) { # # N = rpois(1, (lambda*t)/3) # unifs = runif(N,0,t) # arrivals = sort(unifs) # # df_temp <- data.frame(index = iter, # arrivals = c(0, arrivals)) %>% # mutate(Diff = arrivals - lag(arrivals)) # df_12_feb <- rbind(df_12_feb, df_temp) # } # df2 <- df_12_feb %>% # group_by(index) %>% # summarise(mean=mean(Diff, na.rm = T), # variance = var(Diff, na.rm = T), # med = median(Diff, na.rm = T), # N=n()) # mean(df2$mean, na.rm = T) # mean(df2$variance, na.rm = T) # ################################################################################ #######################Section II. Part b simulation 72 hour ########################### ################################################################################ # --- II.(a) Number of sick patients in 24 hours --- # t = 120 sim = 5000 df_24_patient <- data.frame() for (iter in 1:sim) { N = rpois(1, lambda*t) unifs = runif(N,0,t) arrivals = sort(unifs) df_temp <- data.frame(index = iter, arrivals = c(0, arrivals)) %>% mutate(Diff = arrivals - lag(arrivals)) df_24_patient <- rbind(df_24_patient, df_temp) } general_pat_time_interval <- df_24_patient %>% group_by(index) %>% summarise(mean_v=mean(Diff, na.rm = T), variance_v = var(Diff, na.rm = T), sd_v = sd(Diff, na.rm = T), N_v=n()) mean(general_pat_time_interval$mean_v) mean(general_pat_time_interval$variance_v,na.rm = T) quantile(general_pat_time_interval$mean_v, 0.05) general_pat_time_interval <- data.frame(general_pat_time_interval, stringsAsFactors = F) general_pat_time_interval_plot <- ggplot(general_pat_time_interval, aes(x=mean_v)) + geom_histogram(aes(y=..density..), #binwidth = 0.025, fill = "#012169", color = "#0c2340", alpha=0.9) + geom_density( alpha=0.6)+ labs(title="General patients: density plot of interarrival intervals on 120-hour interval", x = "Interarrival intervals", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=mean(general_pat_time_interval$mean_v)), linetype="dashed", color = "red")+ annotate(geom="text", x=0.5+0.06, size=5, y=15, label="Simulated mean: 0.50", color="red")+ geom_vline(aes(xintercept=quantile(general_pat_time_interval$mean_v, 0.05)[[1]]), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=quantile(general_pat_time_interval$mean_v, 0.05)[[1]], y=15, label="5% Quantile: 0.45", color="red") general_pat_time_interval_plot # # --- II.(b) Number of sick patients in 12 hours --- # # t = 12 # sim = 5000 # df_12_patient <- data.frame() # for (iter in 1:sim) { # # N = rpois(1, lambda*t) # unifs = runif(N,0,t) # arrivals = sort(unifs) # # df_temp <- data.frame(index = iter, # arrivals = c(0, arrivals)) %>% # mutate(Diff = arrivals - lag(arrivals)) # df_12_patient <- rbind(df_12_patient, df_temp) # } # df2 <- df_12_patient %>% # group_by(index) %>% # summarise(mean=mean(Diff, na.rm = T), # variance = var(Diff, na.rm = T), # N=n()) # mean(df2$mean, na.rm = T) # mean(df2$variance, na.rm = T) # --- II.(c) Number of febrile in 12 hours --- # t = 120 sim = 5000 df_12_feb <- data.frame() set.seed(123) for (iter in 1:sim) { N = rpois(1, lambda*t) unifs = runif(N,0,t) feb_pat <- rbinom(N,1,1/3) while(sum(feb_pat==1)==1){ feb_pat <- rbinom(N,1,1/3) } unifs_keep <- unifs[as.logical(feb_pat)] arrivals = sort(unifs_keep) df_temp <- data.frame(index = iter, arrivals = c(0, arrivals)) %>% mutate(Diff = arrivals - lag(arrivals)) df_12_feb <- rbind(df_12_feb, df_temp) } feb_pat_time_interval <- df_12_feb %>% group_by(index) %>% summarise(mean_v=mean(Diff, na.rm = T), variance=var(Diff, na.rm = T)) mean(feb_pat_time_interval$mean_v, na.rm = T) mean(feb_pat_time_interval$variance, na.rm = T) feb_pat_time_interval <- data.frame(feb_pat_time_interval, stringsAsFactors = F) feb_pat_time_interval_plot <- ggplot(feb_pat_time_interval, aes(x=mean_v)) + geom_histogram(aes(y=..density..), #binwidth = 0.025, fill = "#012169", color = "#0c2340", alpha=0.9) + geom_density( alpha=0.6)+ labs(title="Febrile patients: density plot of interarrival intervals on 120-hour interval", x = "Interarrival intervals", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=mean(feb_pat_time_interval$mean_v)), linetype="dashed", color = "red")+ annotate(geom="text", x=mean(feb_pat_time_interval$mean_v), size=5, y=2.6, label="Simulated mean: 1.50", color="red")+ geom_vline(aes(xintercept=quantile(feb_pat_time_interval$mean_v, 0.05)[[1]]), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=quantile(feb_pat_time_interval$mean_v, 0.05)[[1]], y=2.6, label="5% Quantile: \n1.26", color="red") feb_pat_time_interval_plot feb_patient12_24<-grid.arrange(general_pat_time_interval_plot, feb_pat_time_interval_plot, nrow = 1) ggsave(filename = "./Dropbox/Emory Courses/Spring 2020/BIOS 723/Project/Result/time_interval_120_hour.png", plot = feb_patient12_24, width = 60, height = 30, units = "cm") # # t = 12 # sim = 10000 # df_12_feb <- data.frame() # #set.seed(runif(1, 0, 100000)) # set.seed(1) # for (iter in 1:sim) { # # N = rpois(1, (lambda*t)/3) # unifs = runif(N,0,t) # arrivals = sort(unifs) # # df_temp <- data.frame(index = iter, # arrivals = c(0, arrivals)) %>% # mutate(Diff = arrivals - lag(arrivals)) # df_12_feb <- rbind(df_12_feb, df_temp) # } # df2 <- df_12_feb %>% # group_by(index) %>% # summarise(mean=mean(Diff, na.rm = T), # variance = var(Diff, na.rm = T), # med = median(Diff, na.rm = T), # N=n()) # mean(df2$mean, na.rm = T) # mean(df2$variance, na.rm = T) # # --- II.(a) Number of sick patients in 24 hours --- # t = 72 sim = 5000 df_24_patient <- data.frame() for (iter in 1:sim) { N = rpois(1, lambda*t) unifs = runif(N,0,t) arrivals = sort(unifs) df_temp <- data.frame(index = iter, arrivals = c(0, arrivals)) %>% mutate(Diff = arrivals - lag(arrivals)) df_24_patient <- rbind(df_24_patient, df_temp) } general_pat_time_interval <- df_24_patient %>% group_by(index) %>% summarise(mean_v=mean(Diff, na.rm = T), variance_v = var(Diff, na.rm = T), sd_v = sd(Diff, na.rm = T), N_v=n()) mean(general_pat_time_interval$mean_v) mean(general_pat_time_interval$variance_v,na.rm = T) mean(general_pat_time_interval$sd_v,na.rm = T) quantile(general_pat_time_interval$mean_v, 0.05) quantile(general_pat_time_interval$sd_v, 0.05) general_pat_time_interval <- data.frame(general_pat_time_interval, stringsAsFactors = F) general_pat_time_interval_plot <- ggplot(general_pat_time_interval, aes(x=sd_v)) + geom_histogram(aes(y=..density..), #binwidth = 0.025, fill = "#012169", color = "#0c2340", alpha=0.9) + geom_density( alpha=0.6)+ labs(title="General patients: density plot of standard deviation for interarrival intervals", x = "Interarrival intervals", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=mean(general_pat_time_interval$sd_v)), linetype="dashed", color = "red")+ annotate(geom="text", x=0.6, size=5, y=6, label="Simulated mean of SD: 0.49", color="red")+ geom_vline(aes(xintercept=quantile(general_pat_time_interval$sd_v, 0.05)[[1]]), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=quantile(general_pat_time_interval$sd_v, 0.05)[[1]]-0.055, y=6, label="5% Quantile: 0.35", color="red") general_pat_time_interval_plot # # --- II.(b) Number of sick patients in 12 hours --- # # t = 12 # sim = 5000 # df_12_patient <- data.frame() # for (iter in 1:sim) { # # N = rpois(1, lambda*t) # unifs = runif(N,0,t) # arrivals = sort(unifs) # # df_temp <- data.frame(index = iter, # arrivals = c(0, arrivals)) %>% # mutate(Diff = arrivals - lag(arrivals)) # df_12_patient <- rbind(df_12_patient, df_temp) # } # df2 <- df_12_patient %>% # group_by(index) %>% # summarise(mean=mean(Diff, na.rm = T), # variance = var(Diff, na.rm = T), # N=n()) # mean(df2$mean, na.rm = T) # mean(df2$variance, na.rm = T) # --- II.(c) Number of febrile in 12 hours --- # t = 72 sim = 5000 df_12_feb <- data.frame() set.seed(123) for (iter in 1:sim) { N = rpois(1, lambda*t) unifs = runif(N,0,t) feb_pat <- rbinom(N,1,1/3) while(sum(feb_pat==1)==1){ feb_pat <- rbinom(N,1,1/3) } unifs_keep <- unifs[as.logical(feb_pat)] arrivals = sort(unifs_keep) df_temp <- data.frame(index = iter, arrivals = c(0, arrivals)) %>% mutate(Diff = arrivals - lag(arrivals)) df_12_feb <- rbind(df_12_feb, df_temp) } feb_pat_time_interval <- df_12_feb %>% group_by(index) %>% summarise(mean_v=mean(Diff, na.rm = T), variance=var(Diff, na.rm = T), sd_v = sd(Diff, na.rm = T), N=n()) mean(feb_pat_time_interval$mean_v, na.rm = T) mean(feb_pat_time_interval$variance, na.rm = T) mean(feb_pat_time_interval$sd_v, na.rm = T) feb_pat_time_interval <- data.frame(feb_pat_time_interval, stringsAsFactors = F) feb_pat_time_interval_plot <- ggplot(feb_pat_time_interval, aes(x=sd_v)) + geom_histogram(aes(y=..density..), #binwidth = 0.025, fill = "#012169", color = "#0c2340", alpha=0.9) + geom_density( alpha=0.6)+ labs(title="Febrile patients: density plot of standard deviation for interarrival intervals", x = "Interarrival intervals", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=mean(feb_pat_time_interval$sd_v)), linetype="dashed", color = "red")+ annotate(geom="text", x=mean(feb_pat_time_interval$sd_v)+0.5, size=5, y=1.1, label="Simulated mean of SD: 1.41", color="red")+ geom_vline(aes(xintercept=quantile(feb_pat_time_interval$sd_v, 0.05)[[1]]), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=quantile(feb_pat_time_interval$sd_v, 0.05)[[1]]-0.2, y=1.1, label="5% Quantile: \n0.82", color="red") feb_pat_time_interval_plot feb_patient12_24<-grid.arrange(general_pat_time_interval_plot, feb_pat_time_interval_plot, nrow = 1) ggsave(filename = "./Dropbox/Emory Courses/Spring 2020/BIOS 723/Project/Result/time_interval_standard_deviation_72_hour.png", plot = feb_patient12_24, width = 60, height = 30, units = "cm") # # t = 12 # sim = 10000 # df_12_feb <- data.frame() # #set.seed(runif(1, 0, 100000)) # set.seed(1) # for (iter in 1:sim) { # # N = rpois(1, (lambda*t)/3) # unifs = runif(N,0,t) # arrivals = sort(unifs) # # df_temp <- data.frame(index = iter, # arrivals = c(0, arrivals)) %>% # mutate(Diff = arrivals - lag(arrivals)) # df_12_feb <- rbind(df_12_feb, df_temp) # } # df2 <- df_12_feb %>% # group_by(index) %>% # summarise(mean=mean(Diff, na.rm = T), # variance = var(Diff, na.rm = T), # med = median(Diff, na.rm = T), # N=n()) # mean(df2$mean, na.rm = T) # mean(df2$variance, na.rm = T) #
/BIOS723_Proj1/BIOS723_Proj1_Code.R
no_license
shijbian/BIOS723
R
false
false
33,216
r
# install.packages("cowplot") # install.packages("hrbrthemes") # install.packages("gcookbook") # install.packages("gridExtra") library(tidyverse) library(cowplot) library(gridExtra) ################################################################################ #######################Section Zero. Exact distribution ########################## ################################################################################ qpois(0.95, 24) qpois(0.95, 48) qpois(0.95, 8) qpois(0.95, 16) qexp(p = 0.05, rate = 2) qexp(p = 0.05, rate = 2/3) ################################################################################ #######################Section I. Part (a) simulation ########################## ################################################################################ seed_init <- 20200321 sim <- 10000 lambda <- 2 febrile_rate <- 1/3 # --- I.(a) Number of patients in 12 hours --- # N_pat_12 <- c() for (iter in 1:sim) { set.seed(seed_init+iter) temp_time_interv <- rexp(1, lambda) max_hour <- temp_time_interv N_temp = 0 while (max_hour <= 12) { N_temp = N_temp + 1 temp_time_interv <- rexp(1, lambda) max_hour <- max_hour + temp_time_interv } N_pat_12 <- c(N_pat_12, N_temp) } mean(N_pat_12) var(N_pat_12) sd(N_pat_12) sort(N_pat_12, decreasing = FALSE)[sim - sim*0.05] patient_12 <- data.frame(N_pat_12=N_pat_12) %>% ggplot(., aes(x=N_pat_12)) + geom_histogram(aes(y=..density..), binwidth = 1, fill = "#012169", color = "#0c2340", alpha=0.9, position="identity") + # geom_density(alpha=0.6) + labs(title="General patients: density plot of arrival counts in one 12-hour period", x = "Arrival counts", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=24), linetype="dashed", color = "red")+ annotate(geom="text", x=27, size=5, y=0.09, label="E(N_{12})=24", color="red")+ geom_vline(aes(xintercept=33), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=38, y=0.09, label="P(N_{12}>=33)<=0.05", color="red") patient_12 # --- I.(b) Number of patients in 24 hours --- # N_pat_24 <- c() for (iter in 1:sim) { set.seed(seed_init+iter) temp_time_interv <- rexp(1, lambda) max_hour <- temp_time_interv N_temp = 0 while (max_hour <= 24) { N_temp = N_temp + 1 temp_time_interv <- rexp(1, lambda) max_hour <- max_hour + temp_time_interv } N_pat_24 <- c(N_pat_24, N_temp) } mean(N_pat_24) var(N_pat_24) sd(N_pat_24) sort(N_pat_24, decreasing = FALSE)[sim - sim*0.05] qpois(0.95, 48) patient_24 <- data.frame(N_pat_24=N_pat_24) %>% ggplot(., aes(x=N_pat_24)) + geom_histogram(aes(y=..density..), binwidth = 1, fill = "#012169", color = "#0c2340", alpha=0.9, position="identity") + # geom_density(alpha=0.6) + labs(title="General patients: density plot of arrival counts in one 24-hour period", x = "Arrival counts", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=48), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=52, y=0.065, label="E(N_{24})=48", color="red")+ geom_vline(aes(xintercept=60), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=67, y=0.065, label="P(N_{24}>=60)<=0.05", color="red") patient_24 patient12_24<-grid.arrange(patient_12, patient_24, nrow = 1) ggsave(filename = "./Dropbox/Emory Courses/Spring 2020/BIOS 723/Project/Result/general_patient_count.png", plot = patient12_24, width = 60, height = 30, units = "cm") # --- I.(c) Number of febrile in 12 hours --- # N_feb_12 <- c() for (iter in 1:sim) { set.seed(seed_init+iter) temp_time_interv <- rexp(1, lambda) max_hour <- temp_time_interv N_temp = 0 while (max_hour <= 12) { if (rbinom(1,1,1/3) == 1) { N_temp = N_temp + 1 } temp_time_interv <- rexp(1, lambda) max_hour <- max_hour + temp_time_interv } N_feb_12 <- c(N_feb_12, N_temp) } mean(N_feb_12) var(N_feb_12) sd(N_feb_12) sort(N_feb_12, decreasing = FALSE)[sim - sim*0.05] feb_patient_12 <- data.frame(N_feb_12=N_feb_12) %>% ggplot(., aes(x=N_feb_12)) + geom_histogram(aes(y=..density..), binwidth = 1, fill = "#012169", color = "#0c2340", alpha=0.9, position="identity") + labs(title="Febrile patients: density plot of arrival counts in one 12-hour period", x = "Arrival counts", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=8), linetype="dashed", color = "red")+ annotate(geom="text", x=10, size=5, y=0.16, label="E(M_{12})=8", color="red")+ geom_vline(aes(xintercept=13), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=16.5, y=0.16, label="P(M_{12}>=13)<=0.05", color="red") feb_patient_12 # --- I.(d) Number of febrile in 24 hours --- # N_feb_24 <- c() for (iter in 1:sim) { set.seed(seed_init+iter) temp_time_interv <- rexp(1, lambda) max_hour <- temp_time_interv N_temp = 0 while (max_hour <= 24) { if (rbinom(1,1,1/3) == 1) { N_temp = N_temp + 1 } temp_time_interv <- rexp(1, lambda) max_hour <- max_hour + temp_time_interv } N_feb_24 <- c(N_feb_24, N_temp) } mean(N_feb_24) var(N_feb_24) sd(N_feb_24) sort(N_feb_24, decreasing = FALSE)[sim - sim*0.05] qpois(0.95, 48) feb_patient_24 <- data.frame(N_feb_24=N_feb_24) %>% ggplot(., aes(x=N_feb_24)) + geom_histogram(aes(y=..density..), binwidth = 1, fill = "#012169", color = "#0c2340", alpha=0.9, position="identity") + # geom_density(alpha=0.6) + labs(title="Febrile patients: density plot of arrival counts in one 24-hour period", x = "Arrival counts", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=16), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=19, y=0.12, label="E(M_{24})=16", color="red")+ geom_vline(aes(xintercept=23), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=27.5, y=0.12, label="P(M_{24}>=23)<=0.05", color="red") feb_patient_24 feb_patient12_24<-grid.arrange(feb_patient_12, feb_patient_24, nrow = 1) ggsave(filename = "./Dropbox/Emory Courses/Spring 2020/BIOS 723/Project/Result/feb_patient_count.png", plot = feb_patient12_24, width = 60, height = 30, units = "cm") ################################################################################ #######################Section II. Part b simulation ########################### ################################################################################ # --- II.(a) Number of sick patients in 24 hours --- # t = 24 sim = 5000 df_24_patient <- data.frame() for (iter in 1:sim) { N = rpois(1, lambda*t) unifs = runif(N,0,t) arrivals = sort(unifs) df_temp <- data.frame(index = iter, arrivals = c(0, arrivals)) %>% mutate(Diff = arrivals - lag(arrivals)) df_24_patient <- rbind(df_24_patient, df_temp) } general_pat_time_interval <- df_24_patient %>% group_by(index) %>% summarise(mean_v=mean(Diff, na.rm = T), variance_v = var(Diff, na.rm = T), sd_v = sd(Diff, na.rm = T), N_v=n()) mean(general_pat_time_interval$mean_v) mean(general_pat_time_interval$variance_v,na.rm = T) quantile(general_pat_time_interval$mean_v, 0.05) general_pat_time_interval <- data.frame(general_pat_time_interval, stringsAsFactors = F) general_pat_time_interval_plot <- ggplot(general_pat_time_interval, aes(x=mean_v)) + geom_histogram(aes(y=..density..), #binwidth = 0.025, fill = "#012169", color = "#0c2340", alpha=0.9) + geom_density( alpha=0.6)+ labs(title="General patients: density plot of interarrival intervals", x = "Interarrival intervals", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=mean(general_pat_time_interval$mean_v)), linetype="dashed", color = "red")+ annotate(geom="text", x=0.5+0.06, size=5, y=6, label="Simulated mean: 0.50", color="red")+ geom_vline(aes(xintercept=quantile(general_pat_time_interval$mean_v, 0.05)[[1]]), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=quantile(general_pat_time_interval$mean_v, 0.05)[[1]]-0.055, y=6, label="5% Quantile: 0.39", color="red") general_pat_time_interval_plot # # --- II.(b) Number of sick patients in 12 hours --- # # t = 12 # sim = 5000 # df_12_patient <- data.frame() # for (iter in 1:sim) { # # N = rpois(1, lambda*t) # unifs = runif(N,0,t) # arrivals = sort(unifs) # # df_temp <- data.frame(index = iter, # arrivals = c(0, arrivals)) %>% # mutate(Diff = arrivals - lag(arrivals)) # df_12_patient <- rbind(df_12_patient, df_temp) # } # df2 <- df_12_patient %>% # group_by(index) %>% # summarise(mean=mean(Diff, na.rm = T), # variance = var(Diff, na.rm = T), # N=n()) # mean(df2$mean, na.rm = T) # mean(df2$variance, na.rm = T) # --- II.(c) Number of febrile in 12 hours --- # t = 24 sim = 5000 df_12_feb <- data.frame() set.seed(123) for (iter in 1:sim) { N = rpois(1, lambda*t) unifs = runif(N,0,t) feb_pat <- rbinom(N,1,1/3) while(sum(feb_pat==1)==1){ feb_pat <- rbinom(N,1,1/3) } unifs_keep <- unifs[as.logical(feb_pat)] arrivals = sort(unifs_keep) df_temp <- data.frame(index = iter, arrivals = c(0, arrivals)) %>% mutate(Diff = arrivals - lag(arrivals)) df_12_feb <- rbind(df_12_feb, df_temp) } feb_pat_time_interval <- df_12_feb %>% group_by(index) %>% summarise(mean_v=mean(Diff, na.rm = T), variance=var(Diff, na.rm = T)) mean(feb_pat_time_interval$mean_v, na.rm = T) mean(feb_pat_time_interval$variance, na.rm = T) feb_pat_time_interval <- data.frame(feb_pat_time_interval, stringsAsFactors = F) feb_pat_time_interval_plot <- ggplot(feb_pat_time_interval, aes(x=mean_v)) + geom_histogram(aes(y=..density..), #binwidth = 0.025, fill = "#012169", color = "#0c2340", alpha=0.9) + geom_density( alpha=0.6)+ labs(title="Febrile patients: density plot of interarrival intervals", x = "Interarrival intervals", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=mean(feb_pat_time_interval$mean_v)), linetype="dashed", color = "red")+ annotate(geom="text", x=mean(feb_pat_time_interval$mean_v)+0.5, size=5, y=1.3, label="Simulated mean: 1.50", color="red")+ geom_vline(aes(xintercept=quantile(feb_pat_time_interval$mean_v, 0.05)[[1]]), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=quantile(feb_pat_time_interval$mean_v, 0.05)[[1]]-0.5, y=1.3, label="5% Quantile: \n0.99", color="red") feb_pat_time_interval_plot feb_patient12_24<-grid.arrange(general_pat_time_interval_plot, feb_pat_time_interval_plot, nrow = 1) ggsave(filename = "./Dropbox/Emory Courses/Spring 2020/BIOS 723/Project/Result/time_interval.png", plot = feb_patient12_24, width = 60, height = 30, units = "cm") # # t = 12 # sim = 10000 # df_12_feb <- data.frame() # #set.seed(runif(1, 0, 100000)) # set.seed(1) # for (iter in 1:sim) { # # N = rpois(1, (lambda*t)/3) # unifs = runif(N,0,t) # arrivals = sort(unifs) # # df_temp <- data.frame(index = iter, # arrivals = c(0, arrivals)) %>% # mutate(Diff = arrivals - lag(arrivals)) # df_12_feb <- rbind(df_12_feb, df_temp) # } # df2 <- df_12_feb %>% # group_by(index) %>% # summarise(mean=mean(Diff, na.rm = T), # variance = var(Diff, na.rm = T), # med = median(Diff, na.rm = T), # N=n()) # mean(df2$mean, na.rm = T) # mean(df2$variance, na.rm = T) # ################################################################################ #######################Section II. Part b simulation - SD ########################### ################################################################################ # --- II.(a) Number of sick patients in 24 hours --- # t = 24 sim = 5000 df_24_patient <- data.frame() for (iter in 1:sim) { N = rpois(1, lambda*t) unifs = runif(N,0,t) arrivals = sort(unifs) df_temp <- data.frame(index = iter, arrivals = c(0, arrivals)) %>% mutate(Diff = arrivals - lag(arrivals)) df_24_patient <- rbind(df_24_patient, df_temp) } general_pat_time_interval <- df_24_patient %>% group_by(index) %>% summarise(mean_v=mean(Diff, na.rm = T), variance_v = var(Diff, na.rm = T), sd_v = sd(Diff, na.rm = T), N_v=n()) mean(general_pat_time_interval$mean_v) mean(general_pat_time_interval$variance_v,na.rm = T) mean(general_pat_time_interval$sd_v,na.rm = T) quantile(general_pat_time_interval$mean_v, 0.05) quantile(general_pat_time_interval$sd_v, 0.05) general_pat_time_interval <- data.frame(general_pat_time_interval, stringsAsFactors = F) general_pat_time_interval_plot <- ggplot(general_pat_time_interval, aes(x=sd_v)) + geom_histogram(aes(y=..density..), #binwidth = 0.025, fill = "#012169", color = "#0c2340", alpha=0.9) + geom_density( alpha=0.6)+ labs(title="General patients: density plot of standard deviation for interarrival intervals", x = "Interarrival intervals", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=mean(general_pat_time_interval$sd_v)), linetype="dashed", color = "red")+ annotate(geom="text", x=0.6, size=5, y=6, label="Simulated mean of SD: 0.49", color="red")+ geom_vline(aes(xintercept=quantile(general_pat_time_interval$sd_v, 0.05)[[1]]), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=quantile(general_pat_time_interval$sd_v, 0.05)[[1]]-0.055, y=6, label="5% Quantile: 0.35", color="red") general_pat_time_interval_plot # # --- II.(b) Number of sick patients in 12 hours --- # # t = 12 # sim = 5000 # df_12_patient <- data.frame() # for (iter in 1:sim) { # # N = rpois(1, lambda*t) # unifs = runif(N,0,t) # arrivals = sort(unifs) # # df_temp <- data.frame(index = iter, # arrivals = c(0, arrivals)) %>% # mutate(Diff = arrivals - lag(arrivals)) # df_12_patient <- rbind(df_12_patient, df_temp) # } # df2 <- df_12_patient %>% # group_by(index) %>% # summarise(mean=mean(Diff, na.rm = T), # variance = var(Diff, na.rm = T), # N=n()) # mean(df2$mean, na.rm = T) # mean(df2$variance, na.rm = T) # --- II.(c) Number of febrile in 12 hours --- # t = 24 sim = 5000 df_12_feb <- data.frame() set.seed(123) for (iter in 1:sim) { N = rpois(1, lambda*t) unifs = runif(N,0,t) feb_pat <- rbinom(N,1,1/3) while(sum(feb_pat==1)==1){ feb_pat <- rbinom(N,1,1/3) } unifs_keep <- unifs[as.logical(feb_pat)] arrivals = sort(unifs_keep) df_temp <- data.frame(index = iter, arrivals = c(0, arrivals)) %>% mutate(Diff = arrivals - lag(arrivals)) df_12_feb <- rbind(df_12_feb, df_temp) } feb_pat_time_interval <- df_12_feb %>% group_by(index) %>% summarise(mean_v=mean(Diff, na.rm = T), variance=var(Diff, na.rm = T), sd_v = sd(Diff, na.rm = T), N=n()) mean(feb_pat_time_interval$mean_v, na.rm = T) mean(feb_pat_time_interval$variance, na.rm = T) mean(feb_pat_time_interval$sd_v, na.rm = T) feb_pat_time_interval <- data.frame(feb_pat_time_interval, stringsAsFactors = F) feb_pat_time_interval_plot <- ggplot(feb_pat_time_interval, aes(x=sd_v)) + geom_histogram(aes(y=..density..), #binwidth = 0.025, fill = "#012169", color = "#0c2340", alpha=0.9) + geom_density( alpha=0.6)+ labs(title="Febrile patients: density plot of standard deviation for interarrival intervals", x = "Interarrival intervals", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=mean(feb_pat_time_interval$sd_v)), linetype="dashed", color = "red")+ annotate(geom="text", x=mean(feb_pat_time_interval$sd_v)+0.5, size=5, y=1.1, label="Simulated mean of SD: 1.41", color="red")+ geom_vline(aes(xintercept=quantile(feb_pat_time_interval$sd_v, 0.05)[[1]]), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=quantile(feb_pat_time_interval$sd_v, 0.05)[[1]]-0.2, y=1.1, label="5% Quantile: \n0.82", color="red") feb_pat_time_interval_plot feb_patient12_24<-grid.arrange(general_pat_time_interval_plot, feb_pat_time_interval_plot, nrow = 1) ggsave(filename = "./Dropbox/Emory Courses/Spring 2020/BIOS 723/Project/Result/time_interval_standard_deviation.png", plot = feb_patient12_24, width = 60, height = 30, units = "cm") # # t = 12 # sim = 10000 # df_12_feb <- data.frame() # #set.seed(runif(1, 0, 100000)) # set.seed(1) # for (iter in 1:sim) { # # N = rpois(1, (lambda*t)/3) # unifs = runif(N,0,t) # arrivals = sort(unifs) # # df_temp <- data.frame(index = iter, # arrivals = c(0, arrivals)) %>% # mutate(Diff = arrivals - lag(arrivals)) # df_12_feb <- rbind(df_12_feb, df_temp) # } # df2 <- df_12_feb %>% # group_by(index) %>% # summarise(mean=mean(Diff, na.rm = T), # variance = var(Diff, na.rm = T), # med = median(Diff, na.rm = T), # N=n()) # mean(df2$mean, na.rm = T) # mean(df2$variance, na.rm = T) # ################################################################################ #######################Section II. Part b simulation 72 hour ########################### ################################################################################ # --- II.(a) Number of sick patients in 24 hours --- # t = 120 sim = 5000 df_24_patient <- data.frame() for (iter in 1:sim) { N = rpois(1, lambda*t) unifs = runif(N,0,t) arrivals = sort(unifs) df_temp <- data.frame(index = iter, arrivals = c(0, arrivals)) %>% mutate(Diff = arrivals - lag(arrivals)) df_24_patient <- rbind(df_24_patient, df_temp) } general_pat_time_interval <- df_24_patient %>% group_by(index) %>% summarise(mean_v=mean(Diff, na.rm = T), variance_v = var(Diff, na.rm = T), sd_v = sd(Diff, na.rm = T), N_v=n()) mean(general_pat_time_interval$mean_v) mean(general_pat_time_interval$variance_v,na.rm = T) quantile(general_pat_time_interval$mean_v, 0.05) general_pat_time_interval <- data.frame(general_pat_time_interval, stringsAsFactors = F) general_pat_time_interval_plot <- ggplot(general_pat_time_interval, aes(x=mean_v)) + geom_histogram(aes(y=..density..), #binwidth = 0.025, fill = "#012169", color = "#0c2340", alpha=0.9) + geom_density( alpha=0.6)+ labs(title="General patients: density plot of interarrival intervals on 120-hour interval", x = "Interarrival intervals", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=mean(general_pat_time_interval$mean_v)), linetype="dashed", color = "red")+ annotate(geom="text", x=0.5+0.06, size=5, y=15, label="Simulated mean: 0.50", color="red")+ geom_vline(aes(xintercept=quantile(general_pat_time_interval$mean_v, 0.05)[[1]]), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=quantile(general_pat_time_interval$mean_v, 0.05)[[1]], y=15, label="5% Quantile: 0.45", color="red") general_pat_time_interval_plot # # --- II.(b) Number of sick patients in 12 hours --- # # t = 12 # sim = 5000 # df_12_patient <- data.frame() # for (iter in 1:sim) { # # N = rpois(1, lambda*t) # unifs = runif(N,0,t) # arrivals = sort(unifs) # # df_temp <- data.frame(index = iter, # arrivals = c(0, arrivals)) %>% # mutate(Diff = arrivals - lag(arrivals)) # df_12_patient <- rbind(df_12_patient, df_temp) # } # df2 <- df_12_patient %>% # group_by(index) %>% # summarise(mean=mean(Diff, na.rm = T), # variance = var(Diff, na.rm = T), # N=n()) # mean(df2$mean, na.rm = T) # mean(df2$variance, na.rm = T) # --- II.(c) Number of febrile in 12 hours --- # t = 120 sim = 5000 df_12_feb <- data.frame() set.seed(123) for (iter in 1:sim) { N = rpois(1, lambda*t) unifs = runif(N,0,t) feb_pat <- rbinom(N,1,1/3) while(sum(feb_pat==1)==1){ feb_pat <- rbinom(N,1,1/3) } unifs_keep <- unifs[as.logical(feb_pat)] arrivals = sort(unifs_keep) df_temp <- data.frame(index = iter, arrivals = c(0, arrivals)) %>% mutate(Diff = arrivals - lag(arrivals)) df_12_feb <- rbind(df_12_feb, df_temp) } feb_pat_time_interval <- df_12_feb %>% group_by(index) %>% summarise(mean_v=mean(Diff, na.rm = T), variance=var(Diff, na.rm = T)) mean(feb_pat_time_interval$mean_v, na.rm = T) mean(feb_pat_time_interval$variance, na.rm = T) feb_pat_time_interval <- data.frame(feb_pat_time_interval, stringsAsFactors = F) feb_pat_time_interval_plot <- ggplot(feb_pat_time_interval, aes(x=mean_v)) + geom_histogram(aes(y=..density..), #binwidth = 0.025, fill = "#012169", color = "#0c2340", alpha=0.9) + geom_density( alpha=0.6)+ labs(title="Febrile patients: density plot of interarrival intervals on 120-hour interval", x = "Interarrival intervals", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=mean(feb_pat_time_interval$mean_v)), linetype="dashed", color = "red")+ annotate(geom="text", x=mean(feb_pat_time_interval$mean_v), size=5, y=2.6, label="Simulated mean: 1.50", color="red")+ geom_vline(aes(xintercept=quantile(feb_pat_time_interval$mean_v, 0.05)[[1]]), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=quantile(feb_pat_time_interval$mean_v, 0.05)[[1]], y=2.6, label="5% Quantile: \n1.26", color="red") feb_pat_time_interval_plot feb_patient12_24<-grid.arrange(general_pat_time_interval_plot, feb_pat_time_interval_plot, nrow = 1) ggsave(filename = "./Dropbox/Emory Courses/Spring 2020/BIOS 723/Project/Result/time_interval_120_hour.png", plot = feb_patient12_24, width = 60, height = 30, units = "cm") # # t = 12 # sim = 10000 # df_12_feb <- data.frame() # #set.seed(runif(1, 0, 100000)) # set.seed(1) # for (iter in 1:sim) { # # N = rpois(1, (lambda*t)/3) # unifs = runif(N,0,t) # arrivals = sort(unifs) # # df_temp <- data.frame(index = iter, # arrivals = c(0, arrivals)) %>% # mutate(Diff = arrivals - lag(arrivals)) # df_12_feb <- rbind(df_12_feb, df_temp) # } # df2 <- df_12_feb %>% # group_by(index) %>% # summarise(mean=mean(Diff, na.rm = T), # variance = var(Diff, na.rm = T), # med = median(Diff, na.rm = T), # N=n()) # mean(df2$mean, na.rm = T) # mean(df2$variance, na.rm = T) # # --- II.(a) Number of sick patients in 24 hours --- # t = 72 sim = 5000 df_24_patient <- data.frame() for (iter in 1:sim) { N = rpois(1, lambda*t) unifs = runif(N,0,t) arrivals = sort(unifs) df_temp <- data.frame(index = iter, arrivals = c(0, arrivals)) %>% mutate(Diff = arrivals - lag(arrivals)) df_24_patient <- rbind(df_24_patient, df_temp) } general_pat_time_interval <- df_24_patient %>% group_by(index) %>% summarise(mean_v=mean(Diff, na.rm = T), variance_v = var(Diff, na.rm = T), sd_v = sd(Diff, na.rm = T), N_v=n()) mean(general_pat_time_interval$mean_v) mean(general_pat_time_interval$variance_v,na.rm = T) mean(general_pat_time_interval$sd_v,na.rm = T) quantile(general_pat_time_interval$mean_v, 0.05) quantile(general_pat_time_interval$sd_v, 0.05) general_pat_time_interval <- data.frame(general_pat_time_interval, stringsAsFactors = F) general_pat_time_interval_plot <- ggplot(general_pat_time_interval, aes(x=sd_v)) + geom_histogram(aes(y=..density..), #binwidth = 0.025, fill = "#012169", color = "#0c2340", alpha=0.9) + geom_density( alpha=0.6)+ labs(title="General patients: density plot of standard deviation for interarrival intervals", x = "Interarrival intervals", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=mean(general_pat_time_interval$sd_v)), linetype="dashed", color = "red")+ annotate(geom="text", x=0.6, size=5, y=6, label="Simulated mean of SD: 0.49", color="red")+ geom_vline(aes(xintercept=quantile(general_pat_time_interval$sd_v, 0.05)[[1]]), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=quantile(general_pat_time_interval$sd_v, 0.05)[[1]]-0.055, y=6, label="5% Quantile: 0.35", color="red") general_pat_time_interval_plot # # --- II.(b) Number of sick patients in 12 hours --- # # t = 12 # sim = 5000 # df_12_patient <- data.frame() # for (iter in 1:sim) { # # N = rpois(1, lambda*t) # unifs = runif(N,0,t) # arrivals = sort(unifs) # # df_temp <- data.frame(index = iter, # arrivals = c(0, arrivals)) %>% # mutate(Diff = arrivals - lag(arrivals)) # df_12_patient <- rbind(df_12_patient, df_temp) # } # df2 <- df_12_patient %>% # group_by(index) %>% # summarise(mean=mean(Diff, na.rm = T), # variance = var(Diff, na.rm = T), # N=n()) # mean(df2$mean, na.rm = T) # mean(df2$variance, na.rm = T) # --- II.(c) Number of febrile in 12 hours --- # t = 72 sim = 5000 df_12_feb <- data.frame() set.seed(123) for (iter in 1:sim) { N = rpois(1, lambda*t) unifs = runif(N,0,t) feb_pat <- rbinom(N,1,1/3) while(sum(feb_pat==1)==1){ feb_pat <- rbinom(N,1,1/3) } unifs_keep <- unifs[as.logical(feb_pat)] arrivals = sort(unifs_keep) df_temp <- data.frame(index = iter, arrivals = c(0, arrivals)) %>% mutate(Diff = arrivals - lag(arrivals)) df_12_feb <- rbind(df_12_feb, df_temp) } feb_pat_time_interval <- df_12_feb %>% group_by(index) %>% summarise(mean_v=mean(Diff, na.rm = T), variance=var(Diff, na.rm = T), sd_v = sd(Diff, na.rm = T), N=n()) mean(feb_pat_time_interval$mean_v, na.rm = T) mean(feb_pat_time_interval$variance, na.rm = T) mean(feb_pat_time_interval$sd_v, na.rm = T) feb_pat_time_interval <- data.frame(feb_pat_time_interval, stringsAsFactors = F) feb_pat_time_interval_plot <- ggplot(feb_pat_time_interval, aes(x=sd_v)) + geom_histogram(aes(y=..density..), #binwidth = 0.025, fill = "#012169", color = "#0c2340", alpha=0.9) + geom_density( alpha=0.6)+ labs(title="Febrile patients: density plot of standard deviation for interarrival intervals", x = "Interarrival intervals", y = "Density") + theme(panel.background = element_rect(fill = "white", colour = "grey50"), plot.title = element_text(color="red", size=20, face="bold.italic"), axis.title.x = element_text(color="blue", size=18, face="bold"), axis.title.y = element_text(color="#993333", size=18, face="bold")) + geom_vline(aes(xintercept=mean(feb_pat_time_interval$sd_v)), linetype="dashed", color = "red")+ annotate(geom="text", x=mean(feb_pat_time_interval$sd_v)+0.5, size=5, y=1.1, label="Simulated mean of SD: 1.41", color="red")+ geom_vline(aes(xintercept=quantile(feb_pat_time_interval$sd_v, 0.05)[[1]]), linetype="dashed", color = "red")+ annotate(geom="text", size=5, x=quantile(feb_pat_time_interval$sd_v, 0.05)[[1]]-0.2, y=1.1, label="5% Quantile: \n0.82", color="red") feb_pat_time_interval_plot feb_patient12_24<-grid.arrange(general_pat_time_interval_plot, feb_pat_time_interval_plot, nrow = 1) ggsave(filename = "./Dropbox/Emory Courses/Spring 2020/BIOS 723/Project/Result/time_interval_standard_deviation_72_hour.png", plot = feb_patient12_24, width = 60, height = 30, units = "cm") # # t = 12 # sim = 10000 # df_12_feb <- data.frame() # #set.seed(runif(1, 0, 100000)) # set.seed(1) # for (iter in 1:sim) { # # N = rpois(1, (lambda*t)/3) # unifs = runif(N,0,t) # arrivals = sort(unifs) # # df_temp <- data.frame(index = iter, # arrivals = c(0, arrivals)) %>% # mutate(Diff = arrivals - lag(arrivals)) # df_12_feb <- rbind(df_12_feb, df_temp) # } # df2 <- df_12_feb %>% # group_by(index) %>% # summarise(mean=mean(Diff, na.rm = T), # variance = var(Diff, na.rm = T), # med = median(Diff, na.rm = T), # N=n()) # mean(df2$mean, na.rm = T) # mean(df2$variance, na.rm = T) #
library(Rdpack) ### Name: list_Rd ### Title: Combine Rd fragments ### Aliases: list_Rd ### Keywords: RdoBuild ### ** Examples ## see also the examples for c_Rd dummyfun <- function(x, ...) x u1 <- list_Rd(name = "Dummyname", alias = "dummyfun", title = "Dummy title", description = "Dummy description", usage = "dummyfun(x)", value = "numeric vector", author = "A. Author", Rd_class=TRUE ) Rdo_show(u1) # call reprompt to fill the arguments section # (and correct the usage) fn <- tempfile("dummyfun", fileext="Rd") reprompt(dummyfun, filename=fn) # check that the result can be parsed and show it. Rdo_show(tools::parse_Rd(fn)) unlink(fn)
/data/genthat_extracted_code/Rdpack/examples/list_Rd.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
728
r
library(Rdpack) ### Name: list_Rd ### Title: Combine Rd fragments ### Aliases: list_Rd ### Keywords: RdoBuild ### ** Examples ## see also the examples for c_Rd dummyfun <- function(x, ...) x u1 <- list_Rd(name = "Dummyname", alias = "dummyfun", title = "Dummy title", description = "Dummy description", usage = "dummyfun(x)", value = "numeric vector", author = "A. Author", Rd_class=TRUE ) Rdo_show(u1) # call reprompt to fill the arguments section # (and correct the usage) fn <- tempfile("dummyfun", fileext="Rd") reprompt(dummyfun, filename=fn) # check that the result can be parsed and show it. Rdo_show(tools::parse_Rd(fn)) unlink(fn)
# read data and subset with observations from February 1st and 2nd, 2007 energy <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?") smpl <- energy[as.character(energy$Date)=="1/2/2007"|as.character(energy$Date)=="2/2/2007",] # set system locale and create date-time variable Sys.setlocale("LC_TIME", "C") smpl$datetime <- strptime(paste(smpl$Date, smpl$Time), "%d/%m/%Y %H:%M:%S") #Create plot 2 and write it to a png file png(file="plot2.png") plot(smpl$datetime, smpl$Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "") dev.off()
/plot2.R
no_license
PetruLuta/ExData_Plotting1
R
false
false
621
r
# read data and subset with observations from February 1st and 2nd, 2007 energy <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?") smpl <- energy[as.character(energy$Date)=="1/2/2007"|as.character(energy$Date)=="2/2/2007",] # set system locale and create date-time variable Sys.setlocale("LC_TIME", "C") smpl$datetime <- strptime(paste(smpl$Date, smpl$Time), "%d/%m/%Y %H:%M:%S") #Create plot 2 and write it to a png file png(file="plot2.png") plot(smpl$datetime, smpl$Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "") dev.off()
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/Lock5withR-package.R \docType{data} \name{SandwichAnts} \alias{SandwichAnts} \title{Sandwich Ants} \format{A data frame with 24 observations on the following 5 variables. \itemize{ \item{\code{Butter}} {Butter on the sandwich? \code{no}} \item{\code{Filling}} {Type of filling: \code{Ham \& Pickles}, \code{Peanut Butter}, or \code{Vegemite}} \item{\code{Bread}} {Type of bread: \code{Multigrain}, \code{Rye}, \code{White}, or \code{Wholemeal}} \item{\code{Ants}} {Number of ants on the sandwich} \item{\code{Order}} {Trial number} }} \source{ Margaret Mackisack, ``Favourite Experiments: An Addendum to What is the Use of Experiments Conducted by Statistics Students?", Journal of Statistics Education (1994) \url{http://www.amstat.org/publications/jse/v2n1/mackisack.supp.html} } \description{ Ant Counts on samples of different sandwiches } \details{ As young students, Dominic Kelly and his friends enjoyed watching ants gather on pieces of sandwiches. Later, as a university student, Dominic decided to study this with a more formal experiment. He chose three types of sandwich fillings (vegemite, peanut butter, and ham \& pickles), four types of bread (multigrain, rye, white, and wholemeal), and put butter on some of the sandwiches. \cr To conduct the experiment he randomly chose a sandwich, broke off a piece, and left it on the ground near an ant hill. After several minutes he placed a jar over the sandwich bit and counteed the number of ants. He repeated the process, allowing time for ants to return to the hill after each trial, until he had two samples for each combination of the factors. \cr This dataset has only sandwiches with no butter. The data in SandwichAnts2 adds information for samples with butter. } \examples{ data(SandwichAnts) } \keyword{datasets}
/man/SandwichAnts.Rd
no_license
stacyderuiter/Lock5withR
R
false
false
1,889
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/Lock5withR-package.R \docType{data} \name{SandwichAnts} \alias{SandwichAnts} \title{Sandwich Ants} \format{A data frame with 24 observations on the following 5 variables. \itemize{ \item{\code{Butter}} {Butter on the sandwich? \code{no}} \item{\code{Filling}} {Type of filling: \code{Ham \& Pickles}, \code{Peanut Butter}, or \code{Vegemite}} \item{\code{Bread}} {Type of bread: \code{Multigrain}, \code{Rye}, \code{White}, or \code{Wholemeal}} \item{\code{Ants}} {Number of ants on the sandwich} \item{\code{Order}} {Trial number} }} \source{ Margaret Mackisack, ``Favourite Experiments: An Addendum to What is the Use of Experiments Conducted by Statistics Students?", Journal of Statistics Education (1994) \url{http://www.amstat.org/publications/jse/v2n1/mackisack.supp.html} } \description{ Ant Counts on samples of different sandwiches } \details{ As young students, Dominic Kelly and his friends enjoyed watching ants gather on pieces of sandwiches. Later, as a university student, Dominic decided to study this with a more formal experiment. He chose three types of sandwich fillings (vegemite, peanut butter, and ham \& pickles), four types of bread (multigrain, rye, white, and wholemeal), and put butter on some of the sandwiches. \cr To conduct the experiment he randomly chose a sandwich, broke off a piece, and left it on the ground near an ant hill. After several minutes he placed a jar over the sandwich bit and counteed the number of ants. He repeated the process, allowing time for ants to return to the hill after each trial, until he had two samples for each combination of the factors. \cr This dataset has only sandwiches with no butter. The data in SandwichAnts2 adds information for samples with butter. } \examples{ data(SandwichAnts) } \keyword{datasets}
#!/usr/bin/Rscript # Purpose: Small initialization script # Date: 2014-10-09 # Author: Tim Hagmann # Notes: WINDOWS: In order for it to work, RTools() has to be installed # R Version: R version 3.1.1 -- "Sock it to Me" ################################################################################ ## Library Functions # Function to load libraries loadLibraries <- function(required_packages){ required_packages_cut <- cutTxt(x=required_packages, identifier="@", cut2="right") # Remove @ dev etc. for(i in seq_along(required_packages_cut)){ library(required_packages_cut[i], character.only=TRUE) } } # Function to find missing packages findMissingPackages <- function(required_packages){ required_packages_cut <- cutTxt(x=required_packages, identifier="@", cut2="right") # Remove @ dev etc. missing_packages <- required_packages[!(required_packages_cut %in% installed.packages()[ ,"Package"])] missing_packages } # Function to install and/or load packages from CRAN packagesCRAN <- function(required_packages, update=FALSE){ missing_packages <- findMissingPackages(required_packages) if(length(missing_packages) > 0 || update){ if(update){missing_packages <- required_packages} # Base (required) install.packages(missing_packages) } loadLibraries(required_packages) } # Function to install and/or load missing packages from Bioconductor packagesBioconductor <- function(required_packages, update=FALSE){ missing_packages <- findMissingPackages(required_packages) if(length(missing_packages) > 0 || update){ if(update){missing_packages <- required_packages} # Base (required) dir.create("tmp") download.file(url="https://rawgit.com/greenore/initR/master/biocLite.R", destfile="tmp/biocLite.R", method=ifelse(Sys.info()["sysname"][[1]] == "Linux", "wget", "auto")) source("tmp/biocLite.R") unlink("tmp", recursive=TRUE) biocLite(missing_packages) } loadLibraries(required_packages) } # Function to install and/or load missing packages from Github packagesGithub <- function(required_packages, repo_name, auth_token=NULL, proxy_url=NULL, port=NULL, update=FALSE){ packagesCRAN("devtools") missing_packages <- findMissingPackages(required_packages) if(length(missing_packages) > 0 || update){ setProxy(proxy_url=proxy_url, port=port) full_repo_name <- paste0(repo_name, '/', missing_packages) # Base (missing) if(update) { full_repo_name <- paste0(repo_name, '/', required_packages) # Base (required) } for(i in seq_along(full_repo_name)){ install_github(repo=full_repo_name[i], auth_token=auth_token) } } loadLibraries(required_packages) } ## Proxy Functions # Function to ping a server (i.e., does the server exist) pingServer <- function(url, stderr=FALSE, stdout=FALSE, ...){ vec <- suppressWarnings(system2("ping", url, stderr=stderr, stdout=stdout, ...)) if (vec == 0){TRUE} else {FALSE} } # Function to set a proxy setProxy <- function(proxy_url, port){ packagesCRAN("httr") port <- as.numeric(port) if(pingServer(proxy_url)){ usr <- readline("Bitte Benutzername eingeben: ") pwd <- readline("Bitte Passwort eingeben: ") cat("\14") reset_config() set_config(use_proxy(url=proxy_url, port=port, username=usr, password=pwd)) } } ## Additional helper functions # Cut txt to either the left or right of an identifier cutTxt <- function(x, identifier, regex="[[:alnum:]]{1, }", cut2="right"){ if(cut2=="right"){ x <- gsub(paste0(identifier, regex), "", x) } if(cut2=="left"){ x <- gsub(paste0(regex, identifier), "", x) } x }
/01_init.R
no_license
greenore/Rhtml
R
false
false
3,787
r
#!/usr/bin/Rscript # Purpose: Small initialization script # Date: 2014-10-09 # Author: Tim Hagmann # Notes: WINDOWS: In order for it to work, RTools() has to be installed # R Version: R version 3.1.1 -- "Sock it to Me" ################################################################################ ## Library Functions # Function to load libraries loadLibraries <- function(required_packages){ required_packages_cut <- cutTxt(x=required_packages, identifier="@", cut2="right") # Remove @ dev etc. for(i in seq_along(required_packages_cut)){ library(required_packages_cut[i], character.only=TRUE) } } # Function to find missing packages findMissingPackages <- function(required_packages){ required_packages_cut <- cutTxt(x=required_packages, identifier="@", cut2="right") # Remove @ dev etc. missing_packages <- required_packages[!(required_packages_cut %in% installed.packages()[ ,"Package"])] missing_packages } # Function to install and/or load packages from CRAN packagesCRAN <- function(required_packages, update=FALSE){ missing_packages <- findMissingPackages(required_packages) if(length(missing_packages) > 0 || update){ if(update){missing_packages <- required_packages} # Base (required) install.packages(missing_packages) } loadLibraries(required_packages) } # Function to install and/or load missing packages from Bioconductor packagesBioconductor <- function(required_packages, update=FALSE){ missing_packages <- findMissingPackages(required_packages) if(length(missing_packages) > 0 || update){ if(update){missing_packages <- required_packages} # Base (required) dir.create("tmp") download.file(url="https://rawgit.com/greenore/initR/master/biocLite.R", destfile="tmp/biocLite.R", method=ifelse(Sys.info()["sysname"][[1]] == "Linux", "wget", "auto")) source("tmp/biocLite.R") unlink("tmp", recursive=TRUE) biocLite(missing_packages) } loadLibraries(required_packages) } # Function to install and/or load missing packages from Github packagesGithub <- function(required_packages, repo_name, auth_token=NULL, proxy_url=NULL, port=NULL, update=FALSE){ packagesCRAN("devtools") missing_packages <- findMissingPackages(required_packages) if(length(missing_packages) > 0 || update){ setProxy(proxy_url=proxy_url, port=port) full_repo_name <- paste0(repo_name, '/', missing_packages) # Base (missing) if(update) { full_repo_name <- paste0(repo_name, '/', required_packages) # Base (required) } for(i in seq_along(full_repo_name)){ install_github(repo=full_repo_name[i], auth_token=auth_token) } } loadLibraries(required_packages) } ## Proxy Functions # Function to ping a server (i.e., does the server exist) pingServer <- function(url, stderr=FALSE, stdout=FALSE, ...){ vec <- suppressWarnings(system2("ping", url, stderr=stderr, stdout=stdout, ...)) if (vec == 0){TRUE} else {FALSE} } # Function to set a proxy setProxy <- function(proxy_url, port){ packagesCRAN("httr") port <- as.numeric(port) if(pingServer(proxy_url)){ usr <- readline("Bitte Benutzername eingeben: ") pwd <- readline("Bitte Passwort eingeben: ") cat("\14") reset_config() set_config(use_proxy(url=proxy_url, port=port, username=usr, password=pwd)) } } ## Additional helper functions # Cut txt to either the left or right of an identifier cutTxt <- function(x, identifier, regex="[[:alnum:]]{1, }", cut2="right"){ if(cut2=="right"){ x <- gsub(paste0(identifier, regex), "", x) } if(cut2=="left"){ x <- gsub(paste0(regex, identifier), "", x) } x }
# # _ _ _ _ _ # (_) | | | | | | | | # _ __ ___ _ _ __ | |_ | |__ | | __ _ _ __ | | __ # | '_ \ / _ \ | || '_ \ | __|| '_ \ | | / _` || '_ \ | |/ / # | |_) || (_) || || | | || |_ | |_) || || (_| || | | || < # | .__/ \___/ |_||_| |_| \__||_.__/ |_| \__,_||_| |_||_|\_\ # | | # |_| # # This file is part of the 'rich-iannone/pointblank' package. # # (c) Richard Iannone <riannone@me.com> # # For full copyright and license information, please look at # https://rich-iannone.github.io/pointblank/LICENSE.html # #' Enable logging of failure conditions at the validation step level #' #' The `log4r_step()` function can be used as an action in the [action_levels()] #' function (as a list component for the `fns` list). Place a call to this #' function in every failure condition that should produce a log (i.e., `warn`, #' `stop`, `notify`). Only the failure condition with the highest severity for a #' given validation step will produce a log entry (skipping failure conditions #' with lower severity) so long as the call to `log4r_step()` is present. #' #' @param x A reference to the x-list object prepared by the `agent`. This #' version of the x-list is the same as that generated via #' `get_agent_x_list(<agent>, i = <step>)` except this version is internally #' generated and hence only available in an internal evaluation context. #' @param message The message to use for the log entry. When not provided, a #' default glue string is used for the messaging. This is dynamic since the #' internal `glue::glue()` call occurs in the same environment as `x`, the #' x-list that's constrained to the validation step. The default message, used #' when `message = NULL` is the glue string `"Step {x$i} exceeded the {level} #' failure threshold (f_failed = {x$f_failed}) ['{x$type}']"`. As can be seen, #' a custom message can be crafted that uses other elements of the x-list with #' the `{x$<component>}` construction. #' @param append_to The file to which log entries at the warn level are #' appended. This can alternatively be one or more **log4r** appenders. #' #' @family Logging #' @section Function ID: #' 5-1 #' #' @export log4r_step <- function(x, message = NULL, append_to = "pb_log_file") { if (!requireNamespace("log4r", quietly = TRUE)) { stop("Using the `log4r_step()` function requires ", "the log4r package:\n", " * It can be installed with `install.packages(\"log4r\")`.", call. = FALSE) } # nocov start type <- x$this_type warn_val <- x$warn stop_val <- x$stop notify_val <- x$notify log4r_fn_present <- vapply( c("warn", "stop", "notify"), FUN.VALUE = logical(1), USE.NAMES = FALSE, FUN = function(y) { grepl( "log4r_step(x", paste( as.character(x$actions[[paste0("fns.", y)]]), collapse = "" ), fixed = TRUE ) } ) level <- toupper(type) level_val <- switch( level, "WARN" = 3, "STOP" = 4, "NOTIFY" = 5, 3 ) # Skip logging at this level if a higher severity # condition is present for this validation step *and* # there is a `log4r_step()` function ready for # evaluation at those higher severities if (warn_val && log4r_fn_present[1]) highest_level <- 3 if (stop_val && log4r_fn_present[2]) highest_level <- 4 if (notify_val && log4r_fn_present[3]) highest_level <- 5 if (highest_level > level_val) { return(invisible(NULL)) } if (is.character(append_to)) { appenders <- log4r::file_appender(file = append_to[1]) } logger <- log4r::logger(appenders = appenders) log4r::levellog( logger = logger, level = level_val, message = glue::glue( "Step {x$i} exceeded the {level} failure threshold \\ (f_failed = {x$f_failed}) ['{x$type}']" ) ) # nocov end }
/R/logging.R
permissive
LDalby/pointblank
R
false
false
4,169
r
# # _ _ _ _ _ # (_) | | | | | | | | # _ __ ___ _ _ __ | |_ | |__ | | __ _ _ __ | | __ # | '_ \ / _ \ | || '_ \ | __|| '_ \ | | / _` || '_ \ | |/ / # | |_) || (_) || || | | || |_ | |_) || || (_| || | | || < # | .__/ \___/ |_||_| |_| \__||_.__/ |_| \__,_||_| |_||_|\_\ # | | # |_| # # This file is part of the 'rich-iannone/pointblank' package. # # (c) Richard Iannone <riannone@me.com> # # For full copyright and license information, please look at # https://rich-iannone.github.io/pointblank/LICENSE.html # #' Enable logging of failure conditions at the validation step level #' #' The `log4r_step()` function can be used as an action in the [action_levels()] #' function (as a list component for the `fns` list). Place a call to this #' function in every failure condition that should produce a log (i.e., `warn`, #' `stop`, `notify`). Only the failure condition with the highest severity for a #' given validation step will produce a log entry (skipping failure conditions #' with lower severity) so long as the call to `log4r_step()` is present. #' #' @param x A reference to the x-list object prepared by the `agent`. This #' version of the x-list is the same as that generated via #' `get_agent_x_list(<agent>, i = <step>)` except this version is internally #' generated and hence only available in an internal evaluation context. #' @param message The message to use for the log entry. When not provided, a #' default glue string is used for the messaging. This is dynamic since the #' internal `glue::glue()` call occurs in the same environment as `x`, the #' x-list that's constrained to the validation step. The default message, used #' when `message = NULL` is the glue string `"Step {x$i} exceeded the {level} #' failure threshold (f_failed = {x$f_failed}) ['{x$type}']"`. As can be seen, #' a custom message can be crafted that uses other elements of the x-list with #' the `{x$<component>}` construction. #' @param append_to The file to which log entries at the warn level are #' appended. This can alternatively be one or more **log4r** appenders. #' #' @family Logging #' @section Function ID: #' 5-1 #' #' @export log4r_step <- function(x, message = NULL, append_to = "pb_log_file") { if (!requireNamespace("log4r", quietly = TRUE)) { stop("Using the `log4r_step()` function requires ", "the log4r package:\n", " * It can be installed with `install.packages(\"log4r\")`.", call. = FALSE) } # nocov start type <- x$this_type warn_val <- x$warn stop_val <- x$stop notify_val <- x$notify log4r_fn_present <- vapply( c("warn", "stop", "notify"), FUN.VALUE = logical(1), USE.NAMES = FALSE, FUN = function(y) { grepl( "log4r_step(x", paste( as.character(x$actions[[paste0("fns.", y)]]), collapse = "" ), fixed = TRUE ) } ) level <- toupper(type) level_val <- switch( level, "WARN" = 3, "STOP" = 4, "NOTIFY" = 5, 3 ) # Skip logging at this level if a higher severity # condition is present for this validation step *and* # there is a `log4r_step()` function ready for # evaluation at those higher severities if (warn_val && log4r_fn_present[1]) highest_level <- 3 if (stop_val && log4r_fn_present[2]) highest_level <- 4 if (notify_val && log4r_fn_present[3]) highest_level <- 5 if (highest_level > level_val) { return(invisible(NULL)) } if (is.character(append_to)) { appenders <- log4r::file_appender(file = append_to[1]) } logger <- log4r::logger(appenders = appenders) log4r::levellog( logger = logger, level = level_val, message = glue::glue( "Step {x$i} exceeded the {level} failure threshold \\ (f_failed = {x$f_failed}) ['{x$type}']" ) ) # nocov end }
### Getting and Cleaning Data Course Project ## 1. Download the file # Set Working Directory setwd("C:/Users/Yu-Chen Wang/Documents/My Box Files/R/Coursera/") # Check if folder exist, then create the folder if it is needed if (!file.exists("Getting_Cleaning")){ dir.create("Getting_Cleaning") } # Set Url file_url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" # Download the file to the working directory download.file(file_url, destfile = "./Getting_Cleaning/Project.zip") # Unzip the file unzip(zipfile="./Getting_Cleaning/Project.zip", exdir = "./Getting_Cleaning") ## 2. Read the files # Read training Data, save to "X_train", "y_train", and "subject_train" X_train <- read.table("./Getting_Cleaning/UCI HAR Dataset/train/X_train.txt", sep = "", comment.char = "",colClasses="numeric", header = FALSE) y_train <- read.table("./Getting_Cleaning/UCI HAR Dataset/train/y_train.txt", sep = "", colClasses="character") subject_train <- read.table("./Getting_Cleaning/UCI HAR Dataset/train/subject_train.txt", sep = "", colClasses="numeric") # Read testing Data, save to "X_test", "y_test", "subject_test" X_test <- read.table("./Getting_Cleaning/UCI HAR Dataset/test/X_test.txt", sep = "", comment.char = "",colClasses="numeric", header = FALSE) y_test <- read.table("./Getting_Cleaning/UCI HAR Dataset/test/y_test.txt", sep = "", colClasses="character") subject_test <- read.table("./Getting_Cleaning/UCI HAR Dataset/test/subject_test.txt", sep = "", colClasses="numeric") # Read variables, save to "features" features <- read.table("./Getting_Cleaning/UCI HAR Dataset/features.txt", sep = "", colClasses="character") ## 3. Merge Training and Testing Data # Change the column name of the subject data to "Subject" in both trainging and testing data colnames(subject_train) <- "Subject" colnames(subject_test) <- "Subject" # Change the column name of the activity data to "Activity" in both trainging and testing data colnames(y_train) <- "Activity" colnames(y_test) <- "Activity" # Combine the main dataset, subject data, and activity data in both trainging and testing data SummaryTraining <- cbind(X_train,subject_train,y_train) SummaryTesting <- cbind(X_test,subject_test,y_test) # Combine the "SummaryTraining" and "SummaryTesting" into "SummaryData" SummaryData <- rbind(SummaryTraining,SummaryTesting) ## 4. Change Activity numbers to descriptive activity names # I define the activity numbers to be the following terms based on the activity_labels.txt file: # 1:walking, 2:walkingup, 3:walkingdown, 4:sitting, 5:standing, 6:laying SummaryData$Activity[SummaryData$Activity == "1"] <- "walking" SummaryData$Activity[SummaryData$Activity == "2"] <- "walkingup" SummaryData$Activity[SummaryData$Activity == "3"] <- "walkingdown" SummaryData$Activity[SummaryData$Activity == "4"] <- "sitting" SummaryData$Activity[SummaryData$Activity == "5"] <- "standing" SummaryData$Activity[SummaryData$Activity == "6"] <- "laying" ## 5. Find the variables with mean() and std() # Create an index called "meanindex" containing variables with the pattern string "mean()" in "features" meanindex <- grep("mean()",features[,2],value=FALSE,fixed=TRUE) # Create an index called "stdindex" containing variables with the pattern string "std()" in "features" stdindex <- grep("std()",features[,2],value=FALSE,fixed=TRUE) # Create an index called "Dataindex", which combines "meanindex" and "stdindex", # for later subsetting the "SummaryData" to have the final raw output Dataindex <- c(meanindex,stdindex) ## 6. Adjust the variable name # Use gsub and sub function to adjust the variable names. # Please refer to the codebook.txt file for more information aa <- features[,2] bb <- gsub("\\()-","in",aa) cc <- gsub("-","of",bb) dd <- gsub("\\,","and",cc) ee <- sub("tBody","TDSBody",dd) ff <- sub("tGravity","TDSGravity",ee) gg <- sub("fBody","FDS",ff) hh <- sub("angle","Angle",gg) ii <- sub("gravity","Gravity",hh,fixed=TRUE) jj <- gsub("\\)","",ii) kk <- gsub("\\(","",jj) # Change the colname name of the "SummaryData" using the adjusted variable names, # "Subject","Activity", and "Dataset" columns remain the same name. colnames(SummaryData) <- c(kk, "Subject","Activity") ## 7. Create the final raw output dataframe # Subset the "SummaryData" using "Dataindex", resulting in "SubSummaryData1" SubSummaryData1 <- SummaryData[,Dataindex] # Subset the "SummaryData" to select the "Subject", "Dataset", and "Activity" columns, # resulting in "SubSummaryData2" SubSummaryData2 <- SummaryData[,c("Subject","Activity")] # Combine "SummaryData1" and "SummaryData2" to form the "FinalrawData" FinalrawData <- cbind(SubSummaryData2,SubSummaryData1) ## 8. Calculate the average of each variable for each activity and each subject # Load reshape2 package library(reshape2) # Melt "FinalrawData" based on "Subject" and "Activity" to form "FinalrawDataMelt" FinalrawDataMelt <- melt(FinalrawData,id=c("Subject","Activity")) # Calculate the average using for each activity and each subject using "dcast" function, # resulting in "FinalData" FinalData <- dcast(FinalrawDataMelt, Subject + Activity ~ variable, mean, margin = TRUE) ## Write the FinalData to .txt file write.table(FinalData,file = "./Getting_Cleaning/UCI HAR Dataset/FinalData.txt",sep="\t", col.names=TRUE,row.names=FALSE)
/run_analysis.R
no_license
yuchenw/Getting_Cleaning
R
false
false
5,539
r
### Getting and Cleaning Data Course Project ## 1. Download the file # Set Working Directory setwd("C:/Users/Yu-Chen Wang/Documents/My Box Files/R/Coursera/") # Check if folder exist, then create the folder if it is needed if (!file.exists("Getting_Cleaning")){ dir.create("Getting_Cleaning") } # Set Url file_url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" # Download the file to the working directory download.file(file_url, destfile = "./Getting_Cleaning/Project.zip") # Unzip the file unzip(zipfile="./Getting_Cleaning/Project.zip", exdir = "./Getting_Cleaning") ## 2. Read the files # Read training Data, save to "X_train", "y_train", and "subject_train" X_train <- read.table("./Getting_Cleaning/UCI HAR Dataset/train/X_train.txt", sep = "", comment.char = "",colClasses="numeric", header = FALSE) y_train <- read.table("./Getting_Cleaning/UCI HAR Dataset/train/y_train.txt", sep = "", colClasses="character") subject_train <- read.table("./Getting_Cleaning/UCI HAR Dataset/train/subject_train.txt", sep = "", colClasses="numeric") # Read testing Data, save to "X_test", "y_test", "subject_test" X_test <- read.table("./Getting_Cleaning/UCI HAR Dataset/test/X_test.txt", sep = "", comment.char = "",colClasses="numeric", header = FALSE) y_test <- read.table("./Getting_Cleaning/UCI HAR Dataset/test/y_test.txt", sep = "", colClasses="character") subject_test <- read.table("./Getting_Cleaning/UCI HAR Dataset/test/subject_test.txt", sep = "", colClasses="numeric") # Read variables, save to "features" features <- read.table("./Getting_Cleaning/UCI HAR Dataset/features.txt", sep = "", colClasses="character") ## 3. Merge Training and Testing Data # Change the column name of the subject data to "Subject" in both trainging and testing data colnames(subject_train) <- "Subject" colnames(subject_test) <- "Subject" # Change the column name of the activity data to "Activity" in both trainging and testing data colnames(y_train) <- "Activity" colnames(y_test) <- "Activity" # Combine the main dataset, subject data, and activity data in both trainging and testing data SummaryTraining <- cbind(X_train,subject_train,y_train) SummaryTesting <- cbind(X_test,subject_test,y_test) # Combine the "SummaryTraining" and "SummaryTesting" into "SummaryData" SummaryData <- rbind(SummaryTraining,SummaryTesting) ## 4. Change Activity numbers to descriptive activity names # I define the activity numbers to be the following terms based on the activity_labels.txt file: # 1:walking, 2:walkingup, 3:walkingdown, 4:sitting, 5:standing, 6:laying SummaryData$Activity[SummaryData$Activity == "1"] <- "walking" SummaryData$Activity[SummaryData$Activity == "2"] <- "walkingup" SummaryData$Activity[SummaryData$Activity == "3"] <- "walkingdown" SummaryData$Activity[SummaryData$Activity == "4"] <- "sitting" SummaryData$Activity[SummaryData$Activity == "5"] <- "standing" SummaryData$Activity[SummaryData$Activity == "6"] <- "laying" ## 5. Find the variables with mean() and std() # Create an index called "meanindex" containing variables with the pattern string "mean()" in "features" meanindex <- grep("mean()",features[,2],value=FALSE,fixed=TRUE) # Create an index called "stdindex" containing variables with the pattern string "std()" in "features" stdindex <- grep("std()",features[,2],value=FALSE,fixed=TRUE) # Create an index called "Dataindex", which combines "meanindex" and "stdindex", # for later subsetting the "SummaryData" to have the final raw output Dataindex <- c(meanindex,stdindex) ## 6. Adjust the variable name # Use gsub and sub function to adjust the variable names. # Please refer to the codebook.txt file for more information aa <- features[,2] bb <- gsub("\\()-","in",aa) cc <- gsub("-","of",bb) dd <- gsub("\\,","and",cc) ee <- sub("tBody","TDSBody",dd) ff <- sub("tGravity","TDSGravity",ee) gg <- sub("fBody","FDS",ff) hh <- sub("angle","Angle",gg) ii <- sub("gravity","Gravity",hh,fixed=TRUE) jj <- gsub("\\)","",ii) kk <- gsub("\\(","",jj) # Change the colname name of the "SummaryData" using the adjusted variable names, # "Subject","Activity", and "Dataset" columns remain the same name. colnames(SummaryData) <- c(kk, "Subject","Activity") ## 7. Create the final raw output dataframe # Subset the "SummaryData" using "Dataindex", resulting in "SubSummaryData1" SubSummaryData1 <- SummaryData[,Dataindex] # Subset the "SummaryData" to select the "Subject", "Dataset", and "Activity" columns, # resulting in "SubSummaryData2" SubSummaryData2 <- SummaryData[,c("Subject","Activity")] # Combine "SummaryData1" and "SummaryData2" to form the "FinalrawData" FinalrawData <- cbind(SubSummaryData2,SubSummaryData1) ## 8. Calculate the average of each variable for each activity and each subject # Load reshape2 package library(reshape2) # Melt "FinalrawData" based on "Subject" and "Activity" to form "FinalrawDataMelt" FinalrawDataMelt <- melt(FinalrawData,id=c("Subject","Activity")) # Calculate the average using for each activity and each subject using "dcast" function, # resulting in "FinalData" FinalData <- dcast(FinalrawDataMelt, Subject + Activity ~ variable, mean, margin = TRUE) ## Write the FinalData to .txt file write.table(FinalData,file = "./Getting_Cleaning/UCI HAR Dataset/FinalData.txt",sep="\t", col.names=TRUE,row.names=FALSE)
testlist <- list(a = 0L, b = 0L, x = c(-245L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L)) result <- do.call(grattan:::anyOutside,testlist) str(result)
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610129030-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
188
r
testlist <- list(a = 0L, b = 0L, x = c(-245L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L)) result <- do.call(grattan:::anyOutside,testlist) str(result)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/wavelet.R \name{createPhaseMatrix} \alias{createPhaseMatrix} \title{Create a matrix of phase angles} \usage{ createPhaseMatrix(wts, period = 1, significantPowerThreshold = 0) } \arguments{ \item{wts}{List of wavelet transform outputs.} \item{period}{The period of interest.} \item{significantPowerThreshold}{A threshold for power, e.g. if equals to 5, phase angles with corresponding power < 5 will be NAs.} } \value{ Returns a matrix with phase angles. } \description{ Creates a matrix of phase angles for a specified period } \author{ Mikhail Churakov (\email{mikhail.churakov@pasteur.fr}). }
/man/createPhaseMatrix.Rd
permissive
mc30/wasp
R
false
true
675
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/wavelet.R \name{createPhaseMatrix} \alias{createPhaseMatrix} \title{Create a matrix of phase angles} \usage{ createPhaseMatrix(wts, period = 1, significantPowerThreshold = 0) } \arguments{ \item{wts}{List of wavelet transform outputs.} \item{period}{The period of interest.} \item{significantPowerThreshold}{A threshold for power, e.g. if equals to 5, phase angles with corresponding power < 5 will be NAs.} } \value{ Returns a matrix with phase angles. } \description{ Creates a matrix of phase angles for a specified period } \author{ Mikhail Churakov (\email{mikhail.churakov@pasteur.fr}). }
#Aufgabe 1 Skritpum MT122.3 Seite 49 #Vektor erstellen mit den Werten aus Beispiel 3 (Seite 46) x <- c(1, 3, 1, 3, 2, 2, 5, 4, 3, 2, 3, 4, 6, 3) #Unter Zuhilfenahme von Stackoverflow: https://stackoverflow.com/questions/2547402/how-to-find-the-statistical-mode #Funktion für Modalwert Mode <- function(x) { ux <- unique(x) ux[which.max(tabulate(match(x,ux)))] } #Extended Funktion für Modalwert falls mehrere auftreten sollten Modes <- function(x) { ux <- unique(x) tab <- tabulate(match(x, ux)) ux[tab == max(tab)] } #Zuweisen der Modus-Werte über die Funktionen Mode bzw. Modes xmod <- Mode(x) xmodes <- Modes(x) #Ausgabe der Werte für xmod und xmodes xmod xmodes
/MT122.3_Aufgabe1.R
no_license
wernled/wiba_fh_mt122
R
false
false
687
r
#Aufgabe 1 Skritpum MT122.3 Seite 49 #Vektor erstellen mit den Werten aus Beispiel 3 (Seite 46) x <- c(1, 3, 1, 3, 2, 2, 5, 4, 3, 2, 3, 4, 6, 3) #Unter Zuhilfenahme von Stackoverflow: https://stackoverflow.com/questions/2547402/how-to-find-the-statistical-mode #Funktion für Modalwert Mode <- function(x) { ux <- unique(x) ux[which.max(tabulate(match(x,ux)))] } #Extended Funktion für Modalwert falls mehrere auftreten sollten Modes <- function(x) { ux <- unique(x) tab <- tabulate(match(x, ux)) ux[tab == max(tab)] } #Zuweisen der Modus-Werte über die Funktionen Mode bzw. Modes xmod <- Mode(x) xmodes <- Modes(x) #Ausgabe der Werte für xmod und xmodes xmod xmodes
# Data Transformation Load libaries. library(nycsflights13) library(tidyverse) Loading tydiverse displays a message that dplyr overwrites some functions in base R. To get rid of the message, one uses stats::filter() and stats::lag() ## dplyr basics filter(): pack observations by their values arrange(): Reording rows() select(): pick variables by their names mutate():creating new variables with functions of existing variables. summarise(): summary of values group_by() instead of operating the entire dataset, group_by helps to just operate on the dataset group by group. ### Helper functions that one can use within select(): - starts_with("abc"): matches names that begin with "abc" - ends_with("xyz"): matches names that end with "xyz". - contains("ijk"): matches names that contain "ijk". - matches("(.)\\1"): selects variables that match a regular expression. This particular function will match variables that contain repeated characters. ###num_range("x",1:3); matches x1, x2, and x3 ### instead of using select() to rename variables, it is better to use rename() ### rename(dataset, oldname = newname) ### Transmute(): is used when one wants to keep only the new variables. # Examples of Questions that Data Can Answer
/Task3_RScript.R
no_license
exoden1/myrepo
R
false
false
1,242
r
# Data Transformation Load libaries. library(nycsflights13) library(tidyverse) Loading tydiverse displays a message that dplyr overwrites some functions in base R. To get rid of the message, one uses stats::filter() and stats::lag() ## dplyr basics filter(): pack observations by their values arrange(): Reording rows() select(): pick variables by their names mutate():creating new variables with functions of existing variables. summarise(): summary of values group_by() instead of operating the entire dataset, group_by helps to just operate on the dataset group by group. ### Helper functions that one can use within select(): - starts_with("abc"): matches names that begin with "abc" - ends_with("xyz"): matches names that end with "xyz". - contains("ijk"): matches names that contain "ijk". - matches("(.)\\1"): selects variables that match a regular expression. This particular function will match variables that contain repeated characters. ###num_range("x",1:3); matches x1, x2, and x3 ### instead of using select() to rename variables, it is better to use rename() ### rename(dataset, oldname = newname) ### Transmute(): is used when one wants to keep only the new variables. # Examples of Questions that Data Can Answer
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merc.R \name{merc} \alias{merc} \title{Mercator extent} \usage{ merc(x = 146.7, y = -42, wh = 256000) } \arguments{ \item{x}{longitude} \item{y}{latitude} \item{wh}{width and height, in metres - if only one vaue provided it is also used for height} } \value{ four values, xmin, xmax, ymin, ymax in global Mercator } \description{ Create an extent in Mercator projection from a longitude, latitude, and a width,height. } \examples{ merc() ## a parochial default library(lazyraster) vearth <- '<GDAL_WMS> <Service name="VirtualEarth"> <ServerUrl>http://a${server_num}.ortho.tiles.virtualearth.net/tiles/a${quadkey}.jpeg?g=90</ServerUrl></Service> <MaxConnections>4</MaxConnections> <Cache/> </GDAL_WMS>' lr <- lazyraster(vearth) \dontrun{\donttest{ raster::plotRGB(as_raster(crop(lr, merc(-90, 52, 256e4)), band = 1:3)) }} }
/man/merc.Rd
no_license
hypertidy/lazyraster
R
false
true
910
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merc.R \name{merc} \alias{merc} \title{Mercator extent} \usage{ merc(x = 146.7, y = -42, wh = 256000) } \arguments{ \item{x}{longitude} \item{y}{latitude} \item{wh}{width and height, in metres - if only one vaue provided it is also used for height} } \value{ four values, xmin, xmax, ymin, ymax in global Mercator } \description{ Create an extent in Mercator projection from a longitude, latitude, and a width,height. } \examples{ merc() ## a parochial default library(lazyraster) vearth <- '<GDAL_WMS> <Service name="VirtualEarth"> <ServerUrl>http://a${server_num}.ortho.tiles.virtualearth.net/tiles/a${quadkey}.jpeg?g=90</ServerUrl></Service> <MaxConnections>4</MaxConnections> <Cache/> </GDAL_WMS>' lr <- lazyraster(vearth) \dontrun{\donttest{ raster::plotRGB(as_raster(crop(lr, merc(-90, 52, 256e4)), band = 1:3)) }} }
## Matrix inversion is usually a costly computation and there may be some benefit ## to caching the inverse of a matrix rather than compute it repeatedly. The ## following two functions are used to cache the inverse of a matrix. ## Creates an "object" which stores matrix and its inverse # and provides list of methods to get/set them makeCacheMatrix <- function(x = matrix()) { inv_cache <- NULL set <- function(y) { x <<- y inv_cache <<- NULL } get <- function() x setInverse <- function(inverse) inv_cache <<- inverse getInverse <- function() inv_cache list(set=set, get=get, setInverse=setInverse, getInverse=getInverse) } ## Calculates inverse matrix, caches result to speed up mutliple ## calls with the same input cacheSolve <- function(x, ...) { # check cache and retrun cached value inverse <- x$getInverse() if (!is.null(inverse)) { message("Using cached data.") return(inverse) } # calculate inverse matrix and cache it message("Calculating new inverse.") inverse <- solve(x$get()) x$setInverse(inverse) # return inverse matrix inverse }
/cachematrix.R
no_license
dskripin/ProgrammingAssignment2
R
false
false
1,103
r
## Matrix inversion is usually a costly computation and there may be some benefit ## to caching the inverse of a matrix rather than compute it repeatedly. The ## following two functions are used to cache the inverse of a matrix. ## Creates an "object" which stores matrix and its inverse # and provides list of methods to get/set them makeCacheMatrix <- function(x = matrix()) { inv_cache <- NULL set <- function(y) { x <<- y inv_cache <<- NULL } get <- function() x setInverse <- function(inverse) inv_cache <<- inverse getInverse <- function() inv_cache list(set=set, get=get, setInverse=setInverse, getInverse=getInverse) } ## Calculates inverse matrix, caches result to speed up mutliple ## calls with the same input cacheSolve <- function(x, ...) { # check cache and retrun cached value inverse <- x$getInverse() if (!is.null(inverse)) { message("Using cached data.") return(inverse) } # calculate inverse matrix and cache it message("Calculating new inverse.") inverse <- solve(x$get()) x$setInverse(inverse) # return inverse matrix inverse }
setwd("~/Desktop/RAnalysis/rScripts") library(ggplot2) playText.a <- scan("~/Desktop/RAnalysis/plainText/twelfthNight.txt", what="character", sep="\n") playText.a <- unlist(playText.a) scene.positions.v <- grep("^SCENE", playText.a) last.position.v <- length(playText.a) scene.positions.v <- c(scene.positions.v, last.position.v) scene.raws.l <- list() scene.freqs.l <- list() for (i in 1:length(scene.positions.v)) { if (i != length(scene.positions.v)) { scene.number <- playText.a[scene.positions.v[i]] start.positions <- scene.positions.v[i] +1 end.positions <- scene.positions.v[i+1] -1 scene.v <- playText.a[start.positions:end.positions] scene.words.v <- tolower(paste(scene.v, collapse= " ")) scene.words.l <- strsplit(scene.words.v, "\\W") scene.word.v <- unlist(scene.words.l) scene.word.v <- scene.word.v[which(scene.word.v != "")] scene.freqs.t <- table(scene.word.v) scene.raws.l[[scene.number]] <- scene.freqs.t scene.freqs.t.rel <- 100*(scene.freqs.t/sum(scene.freqs.t)) scene.freqs.l[[scene.number]] <- scene.freqs.t.rel } } textHapax <- sapply(scene.raws.l, function(x) sum(x == 1)) scene.lengths.m <- do.call(rbind, lapply(scene.raws.l,sum)) scene.percentage <- textHapax / scene.lengths.m barplot(scene.percentage, beside=T, col="cadetblue", col.main="maroon", col.lab="maroon", ylim=c(0,.5), xlab="Scene", ylab="Percentage",main="Hapax by Scene in Twelfth Night", names.arg = seq(1:length(scene.raws.l))) # Acknowledgments: Code adapted from Matthew Jockers’ Text Analysis with R for Students of Literature.
/rScripts/twelfthNightHapaxByScene.R
no_license
cfhh888/RAnalysis
R
false
false
1,601
r
setwd("~/Desktop/RAnalysis/rScripts") library(ggplot2) playText.a <- scan("~/Desktop/RAnalysis/plainText/twelfthNight.txt", what="character", sep="\n") playText.a <- unlist(playText.a) scene.positions.v <- grep("^SCENE", playText.a) last.position.v <- length(playText.a) scene.positions.v <- c(scene.positions.v, last.position.v) scene.raws.l <- list() scene.freqs.l <- list() for (i in 1:length(scene.positions.v)) { if (i != length(scene.positions.v)) { scene.number <- playText.a[scene.positions.v[i]] start.positions <- scene.positions.v[i] +1 end.positions <- scene.positions.v[i+1] -1 scene.v <- playText.a[start.positions:end.positions] scene.words.v <- tolower(paste(scene.v, collapse= " ")) scene.words.l <- strsplit(scene.words.v, "\\W") scene.word.v <- unlist(scene.words.l) scene.word.v <- scene.word.v[which(scene.word.v != "")] scene.freqs.t <- table(scene.word.v) scene.raws.l[[scene.number]] <- scene.freqs.t scene.freqs.t.rel <- 100*(scene.freqs.t/sum(scene.freqs.t)) scene.freqs.l[[scene.number]] <- scene.freqs.t.rel } } textHapax <- sapply(scene.raws.l, function(x) sum(x == 1)) scene.lengths.m <- do.call(rbind, lapply(scene.raws.l,sum)) scene.percentage <- textHapax / scene.lengths.m barplot(scene.percentage, beside=T, col="cadetblue", col.main="maroon", col.lab="maroon", ylim=c(0,.5), xlab="Scene", ylab="Percentage",main="Hapax by Scene in Twelfth Night", names.arg = seq(1:length(scene.raws.l))) # Acknowledgments: Code adapted from Matthew Jockers’ Text Analysis with R for Students of Literature.
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/change_rec_devs.r \name{change_rec_devs} \alias{change_rec_devs} \title{Replace recruitment deviations} \usage{ change_rec_devs(recdevs_new, file_in = "ss3.par", file_out = "ss3.par") } \arguments{ \item{recdevs_new}{A vector of new recruitment deviations.} \item{file_in}{Input SS3 par file.} \item{file_out}{Output SS3 par file.} } \value{ A modified SS3 \code{.par} file. } \description{ This function replaces the recruitment deviations in the \code{ss3.par} file with those specified in \code{recdevs_new}, as well as a comment (for debugging). It then writes a new file with name \code{file_out} into the working directory. } \details{ This function does not need to be specified in a case file if you are running and ss3sim simulation through case files with \code{\link{run_ss3sim}}. } \examples{ # Create a temporary folder for the output: temp_path <- file.path(tempdir(), "ss3sim-recdev-example") dir.create(temp_path, showWarnings = FALSE) par_file <- system.file("extdata", "models", "cod-om", "ss3.par", package = "ss3sim") change_rec_devs(recdevs_new = rlnorm(100), file_in = par_file, file_out = paste0(temp_path, "/test.par")) } \author{ Cole Monnahan }
/man/change_rec_devs.Rd
no_license
ElizabethCouncill/ss3sim
R
false
false
1,273
rd
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/change_rec_devs.r \name{change_rec_devs} \alias{change_rec_devs} \title{Replace recruitment deviations} \usage{ change_rec_devs(recdevs_new, file_in = "ss3.par", file_out = "ss3.par") } \arguments{ \item{recdevs_new}{A vector of new recruitment deviations.} \item{file_in}{Input SS3 par file.} \item{file_out}{Output SS3 par file.} } \value{ A modified SS3 \code{.par} file. } \description{ This function replaces the recruitment deviations in the \code{ss3.par} file with those specified in \code{recdevs_new}, as well as a comment (for debugging). It then writes a new file with name \code{file_out} into the working directory. } \details{ This function does not need to be specified in a case file if you are running and ss3sim simulation through case files with \code{\link{run_ss3sim}}. } \examples{ # Create a temporary folder for the output: temp_path <- file.path(tempdir(), "ss3sim-recdev-example") dir.create(temp_path, showWarnings = FALSE) par_file <- system.file("extdata", "models", "cod-om", "ss3.par", package = "ss3sim") change_rec_devs(recdevs_new = rlnorm(100), file_in = par_file, file_out = paste0(temp_path, "/test.par")) } \author{ Cole Monnahan }
##library(stminsights) rm(list=ls()) ###################### ## Declare Data Paths ###################### Sys.info() if(Sys.info()['user']=="Ergane"){## desktop dataPathDesktop <- "~/Dropbox/WTO-Data/rdatas/" print(paste0("On desktop, data path is ", dataPathDesktop)) }else{ ## any other machine dataPathDesktop <- "../../../" print(paste0("On remote, data path is ", dataPathDesktop)) } library(stm) ##### ## Summarize searchK for the full meeting range ## load(paste0(dataPathDesktop, "tradDevParaM1to113-Pandemic.RData")) load(paste0(dataPathDesktop, "Pairwise/K24-25Model.Rdata")) ## for the model output ## load(paste0(dataPathDesktop, "stmM1to113ComparisonK20to25.Rdata")) ## looking for the metadata ls() meta <- out$meta head(meta) ##################### ## Summarize output from models ##################### ## K=24 model: the model that we decided to move forward with ## K85 model: look for rates of pandemic ## topic ###################### ### K= 24 ##################### attributes(mod.out.24) summary(mod.out.24) ##Extract topics: theta24Out <- as.data.frame(round(mod.out.24$theta, 3)) ## round to three places dim(theta24Out) ## 8553x 24 head(theta24Out) ## replace "V-" in the column names colnames(theta24Out) <- gsub(pattern="V", replace="M24.Topic", x=as.character(colnames(theta24Out))) theta24Out <- cbind(meta$pid, theta24Out) head(theta24Out) ########## ### Extract the highest proportion ## Attach to the paragraph IDs #### Assigned paragraph topic in the 24 model: theta24Out$max24 <- colnames(theta24Out[,2:22])[apply( theta24Out[,2:22],1,which.max)] table(theta24Out$max24) ## There is some variation in the assignment of topics ## send out the paragraph-level topics: ## will add columns for each topic, which makes a bigger ## dataframe; but might save some hassle later paraTopics <- merge(x =meta, y=theta24Out, by.x="pid", by.y="meta$pid") dim(paraTopics) ## 8853 x 41 colnames(paraTopics) paraTopics$X <- NULL ## No idea what that column is or was head(paraTopics) tail(paraTopics) summary(paraTopics$pid) write.csv(paraTopics, file=paste0(dataPathDesktop, "M1to113DataAndTopics.csv")) ########### ## 24-topic model topic correlations cors.24 <- topicCorr(mod.out.24, method = c("simple"), cutoff = 0.2, verbose = TRUE) class(cors.24) attributes(cors.24) plot(cors.24, vertex.size=.75, vertex.color="darkblue")
/dataanalysis/analysis/topic-modeling/05paraLevelTopicProportions.R
no_license
margaretfoster/WTO
R
false
false
2,715
r
##library(stminsights) rm(list=ls()) ###################### ## Declare Data Paths ###################### Sys.info() if(Sys.info()['user']=="Ergane"){## desktop dataPathDesktop <- "~/Dropbox/WTO-Data/rdatas/" print(paste0("On desktop, data path is ", dataPathDesktop)) }else{ ## any other machine dataPathDesktop <- "../../../" print(paste0("On remote, data path is ", dataPathDesktop)) } library(stm) ##### ## Summarize searchK for the full meeting range ## load(paste0(dataPathDesktop, "tradDevParaM1to113-Pandemic.RData")) load(paste0(dataPathDesktop, "Pairwise/K24-25Model.Rdata")) ## for the model output ## load(paste0(dataPathDesktop, "stmM1to113ComparisonK20to25.Rdata")) ## looking for the metadata ls() meta <- out$meta head(meta) ##################### ## Summarize output from models ##################### ## K=24 model: the model that we decided to move forward with ## K85 model: look for rates of pandemic ## topic ###################### ### K= 24 ##################### attributes(mod.out.24) summary(mod.out.24) ##Extract topics: theta24Out <- as.data.frame(round(mod.out.24$theta, 3)) ## round to three places dim(theta24Out) ## 8553x 24 head(theta24Out) ## replace "V-" in the column names colnames(theta24Out) <- gsub(pattern="V", replace="M24.Topic", x=as.character(colnames(theta24Out))) theta24Out <- cbind(meta$pid, theta24Out) head(theta24Out) ########## ### Extract the highest proportion ## Attach to the paragraph IDs #### Assigned paragraph topic in the 24 model: theta24Out$max24 <- colnames(theta24Out[,2:22])[apply( theta24Out[,2:22],1,which.max)] table(theta24Out$max24) ## There is some variation in the assignment of topics ## send out the paragraph-level topics: ## will add columns for each topic, which makes a bigger ## dataframe; but might save some hassle later paraTopics <- merge(x =meta, y=theta24Out, by.x="pid", by.y="meta$pid") dim(paraTopics) ## 8853 x 41 colnames(paraTopics) paraTopics$X <- NULL ## No idea what that column is or was head(paraTopics) tail(paraTopics) summary(paraTopics$pid) write.csv(paraTopics, file=paste0(dataPathDesktop, "M1to113DataAndTopics.csv")) ########### ## 24-topic model topic correlations cors.24 <- topicCorr(mod.out.24, method = c("simple"), cutoff = 0.2, verbose = TRUE) class(cors.24) attributes(cors.24) plot(cors.24, vertex.size=.75, vertex.color="darkblue")
# Interaction Terms -------------------------------------- # LOAD LIBRARY ------------------------------------------- library(MASS) library(ISLR) # DATASET ------------------------------------------------ View(Boston) # MODEL -------------------------------------------------- g <- lm(medv ~ lstat * age, data = Boston) summary(g)
/01-linear-regression/03-interaction-terms.R
no_license
sercandogan/statistical-learning
R
false
false
336
r
# Interaction Terms -------------------------------------- # LOAD LIBRARY ------------------------------------------- library(MASS) library(ISLR) # DATASET ------------------------------------------------ View(Boston) # MODEL -------------------------------------------------- g <- lm(medv ~ lstat * age, data = Boston) summary(g)
library(xml2) load("C:/Users/USER/Desktop/BGG/data.RData") data <- data[-which(is.na(data$type)),] description <- rep(NA,nrow(data)) data <- cbind(data,description) for(i in 1:nrow(data)){ url <- paste0("https://api.geekdo.com/xmlapi2/thing?id=",data$id[i]) Description_xml <- read_xml(x=url) Description_list <- as_list(Description_xml) data$description[i] <- Description_list$items$item$description[[1]] } save(data,file = "C:/Users/USER/Desktop/BGG/data.RData")
/爬蟲/BGG/api-description.R
no_license
chenxuepu/Record-by-R
R
false
false
475
r
library(xml2) load("C:/Users/USER/Desktop/BGG/data.RData") data <- data[-which(is.na(data$type)),] description <- rep(NA,nrow(data)) data <- cbind(data,description) for(i in 1:nrow(data)){ url <- paste0("https://api.geekdo.com/xmlapi2/thing?id=",data$id[i]) Description_xml <- read_xml(x=url) Description_list <- as_list(Description_xml) data$description[i] <- Description_list$items$item$description[[1]] } save(data,file = "C:/Users/USER/Desktop/BGG/data.RData")
pp_file_pattern <- '.*_pp.csv' #-------------------------# #---FUNCTION DEFINITION---# #-------------------------# getAllDirectoryList <- function(directory) { return(list.dirs(path=directory, full.names=F, recursive=F)) } getMatchedFileNames <- function(directory, file_pattern) { return(list.files(path=directory, pattern=file_pattern, recursive=F)) } getMatchedFileNamesRecursively <- function(directory, file_pattern) { return(list.files(path=directory, pattern=file_pattern, recursive=T)) } getMatchedFileNamesFullPath <- function(directory, file_pattern) { return(list.files(path=directory, pattern=file_pattern, recursive=F, full.names=T)) } copyReExtractedDataToNsfDir <- function() { subj_list <- getAllDirectoryList(re_extract_pp_dir) sapply(subj_list, function(subj_name) { subj_dir <- file.path(getwd(), re_extract_pp_dir, subj_name) subj_serial <- as.numeric(substr(subj_name, 2, 4)) pp_file_name <- getMatchedFileNames(subj_dir, pp_file_pattern) pp_file_full_path <- getMatchedFileNamesFullPath(subj_dir, pp_file_pattern) if(!isEmpty(pp_file_name)) { grp_name <- paste0('Group', toString(subj_serial%/%60 + 1)) pp_new_file_name <- paste0(substr(pp_file_name, 1, nchar(pp_file_name)-4), '_new.csv') pp_new_dest_path <- file.path(getwd(), data_dir, grp_name, subj_name, 'SuperSession', pp_new_file_name) file.copy(from = pp_file_full_path, to = pp_new_dest_path) } }) } #-------------------------# #-------Main Program------# #-------------------------# # CHANGE THIS current_dir <- dirname(dirname(rstudioapi::getSourceEditorContext()$path)) setwd(current_dir) copyReExtractedDataToNsfDir()
/nsf-stress-study-scripts/CopyAndBackUpNewFiles.R
no_license
Shaila96/nsf-email-stress-study
R
false
false
1,691
r
pp_file_pattern <- '.*_pp.csv' #-------------------------# #---FUNCTION DEFINITION---# #-------------------------# getAllDirectoryList <- function(directory) { return(list.dirs(path=directory, full.names=F, recursive=F)) } getMatchedFileNames <- function(directory, file_pattern) { return(list.files(path=directory, pattern=file_pattern, recursive=F)) } getMatchedFileNamesRecursively <- function(directory, file_pattern) { return(list.files(path=directory, pattern=file_pattern, recursive=T)) } getMatchedFileNamesFullPath <- function(directory, file_pattern) { return(list.files(path=directory, pattern=file_pattern, recursive=F, full.names=T)) } copyReExtractedDataToNsfDir <- function() { subj_list <- getAllDirectoryList(re_extract_pp_dir) sapply(subj_list, function(subj_name) { subj_dir <- file.path(getwd(), re_extract_pp_dir, subj_name) subj_serial <- as.numeric(substr(subj_name, 2, 4)) pp_file_name <- getMatchedFileNames(subj_dir, pp_file_pattern) pp_file_full_path <- getMatchedFileNamesFullPath(subj_dir, pp_file_pattern) if(!isEmpty(pp_file_name)) { grp_name <- paste0('Group', toString(subj_serial%/%60 + 1)) pp_new_file_name <- paste0(substr(pp_file_name, 1, nchar(pp_file_name)-4), '_new.csv') pp_new_dest_path <- file.path(getwd(), data_dir, grp_name, subj_name, 'SuperSession', pp_new_file_name) file.copy(from = pp_file_full_path, to = pp_new_dest_path) } }) } #-------------------------# #-------Main Program------# #-------------------------# # CHANGE THIS current_dir <- dirname(dirname(rstudioapi::getSourceEditorContext()$path)) setwd(current_dir) copyReExtractedDataToNsfDir()
library(DBI) # needed to connect to database library(dbplyr) # needed to connect to database library(RPostgreSQL) # needed to connect to our database library(rstudioapi) # just so we can type the password as we run the script, so it is not written in the clear library(tidyverse) # con is short for connection # Create connection to the database con <- DBI::dbConnect( PostgreSQL(), host = "192.168.1.16", dbname = 'bight2018', user = 'b18read', password = '1969$Harbor' # if we post to github, we might want to do rstudioapi::askForPassword() ) # Bring in our tables from the database infauna <- tbl(con, "tbl_infaunalabundance_initial") %>% as_tibble station_occupation <- tbl(con, "tbl_stationoccupation") %>% as_tibble grab <- tbl(con, "tbl_grabevent") %>% as_tibble benthic_data <- grab %>% dplyr::filter(benthicinfauna == 'Yes') %>% dplyr::inner_join(station_occupation, by = c('stationid','sampledate' = 'occupationdate')) %>% dplyr::inner_join(infauna, by = c('stationid','sampledate')) %>% dplyr::select('stationid','replicate','sampledate','latitude','longitude','taxon','abundance','salinity') %>% dplyr::rename(species = taxon)
/MAMBIcalculator/R/benthic_query.R
no_license
jl-perdomo/MAMBI_Calculator
R
false
false
1,201
r
library(DBI) # needed to connect to database library(dbplyr) # needed to connect to database library(RPostgreSQL) # needed to connect to our database library(rstudioapi) # just so we can type the password as we run the script, so it is not written in the clear library(tidyverse) # con is short for connection # Create connection to the database con <- DBI::dbConnect( PostgreSQL(), host = "192.168.1.16", dbname = 'bight2018', user = 'b18read', password = '1969$Harbor' # if we post to github, we might want to do rstudioapi::askForPassword() ) # Bring in our tables from the database infauna <- tbl(con, "tbl_infaunalabundance_initial") %>% as_tibble station_occupation <- tbl(con, "tbl_stationoccupation") %>% as_tibble grab <- tbl(con, "tbl_grabevent") %>% as_tibble benthic_data <- grab %>% dplyr::filter(benthicinfauna == 'Yes') %>% dplyr::inner_join(station_occupation, by = c('stationid','sampledate' = 'occupationdate')) %>% dplyr::inner_join(infauna, by = c('stationid','sampledate')) %>% dplyr::select('stationid','replicate','sampledate','latitude','longitude','taxon','abundance','salinity') %>% dplyr::rename(species = taxon)
library(ggplot2) library(grid) library(gridExtra) #options(scipen=10) #options(width=250) #cls <- function() cat(rep("\n",100)) #cls() # Remove all the variables rm(list=setdiff(ls(), lsf.str())) print_stats <- function (df, chart, range_min, range_max, step) { label <- as.vector(df[,1]) user <- as.vector(df[,2]) system <- as.vector(df[,3]) elapsed <- sapply(df[,4], function(x) toString(x)) real_sec <- user + system real_min <- real_sec/60 elapsed_sec <- sapply(elapsed, function(x) eval(parse(text=x))) median <- median(elapsed_sec) max <- max(elapsed_sec) min <- min(elapsed_sec) max_idx <- which(elapsed_sec == max) min_idx <- which(elapsed_sec == min) median_idx <- which(elapsed_sec == median) max_idx min_idx median_idx max_label <- label[max_idx[1]] max_label <- gsub("(.*?)_.*","\\1", max_label) min_label <- label[min_idx[1]] min_label <- gsub("(.*?)_.*","\\1", min_label) median_label <- label[median_idx[1]] median_label <- gsub("(.*?)_.*","\\1", median_label) cat("Median:", median, ":", median_label, "\n") cat("Max:", max, ":" , max_label, "\n") cat("Min:", min, ":", min_label, "\n") imp_point <- c(min, max) imp_label <- c(min_label, max_label) #range_min <- floor(min) # range_max <- ceiling(min) #boxplot(elapsed_sec, ylab = "Time (secs)", main = chart, horizontal = TRUE, axes = FALSE, staplewex = 1); boxplot(elapsed_sec, xlab = "Time (secs)", main = chart, axes=TRUE, horizontal=TRUE) text(y=imp_point, labels=imp_label, x=1.25) axis(2, at = seq(range_min,range_max,step)) } # Give the chart file a name. pdf(file = "docs/siv.pdf") print("Stats: Xspec \n") # cat docs/kliPassR.log | parallel "echo {}; echo ==; tail -n3 register-variants/{}/Output/test-xspec.out" > docs/stat_xspec.time df <- read.table("docs/stat_xspec.time", header = FALSE, sep=',') print_stats(df, "Summary Generation Time for X86-64 instructions", 110, 370, 20) print("Stats: Lspec \n") # cat docs/kliPassR.log | parallel "echo {}; echo ==; tail -n3 register-variants/{}/Output/test-lspec.out" > docs/stat_lspec.time df <- read.table("docs/stat_lspec.time", header = FALSE, sep=',') print_stats(df, "Summary generation Time for Lifted LLVM IR", 110, 310, 20) # Save the file. dev.off() png(file = "docs/solver.png") print("Stats: Prove \n") # cat docs/genz3PassR.log | parallel "echo; echo; cd register-variants/{}; make provez3; cd -" |& tee log df <- read.table("docs/stat_prove.time", header = FALSE, sep=',') print_stats(df, "Z3 Solver Time", 0, 30, 1) # Save the file. dev.off()
/tests/single_instruction_translation_validation/mcsema/docs/stat.r
permissive
Mthandazo42/validating-binary-decompilation
R
false
false
2,575
r
library(ggplot2) library(grid) library(gridExtra) #options(scipen=10) #options(width=250) #cls <- function() cat(rep("\n",100)) #cls() # Remove all the variables rm(list=setdiff(ls(), lsf.str())) print_stats <- function (df, chart, range_min, range_max, step) { label <- as.vector(df[,1]) user <- as.vector(df[,2]) system <- as.vector(df[,3]) elapsed <- sapply(df[,4], function(x) toString(x)) real_sec <- user + system real_min <- real_sec/60 elapsed_sec <- sapply(elapsed, function(x) eval(parse(text=x))) median <- median(elapsed_sec) max <- max(elapsed_sec) min <- min(elapsed_sec) max_idx <- which(elapsed_sec == max) min_idx <- which(elapsed_sec == min) median_idx <- which(elapsed_sec == median) max_idx min_idx median_idx max_label <- label[max_idx[1]] max_label <- gsub("(.*?)_.*","\\1", max_label) min_label <- label[min_idx[1]] min_label <- gsub("(.*?)_.*","\\1", min_label) median_label <- label[median_idx[1]] median_label <- gsub("(.*?)_.*","\\1", median_label) cat("Median:", median, ":", median_label, "\n") cat("Max:", max, ":" , max_label, "\n") cat("Min:", min, ":", min_label, "\n") imp_point <- c(min, max) imp_label <- c(min_label, max_label) #range_min <- floor(min) # range_max <- ceiling(min) #boxplot(elapsed_sec, ylab = "Time (secs)", main = chart, horizontal = TRUE, axes = FALSE, staplewex = 1); boxplot(elapsed_sec, xlab = "Time (secs)", main = chart, axes=TRUE, horizontal=TRUE) text(y=imp_point, labels=imp_label, x=1.25) axis(2, at = seq(range_min,range_max,step)) } # Give the chart file a name. pdf(file = "docs/siv.pdf") print("Stats: Xspec \n") # cat docs/kliPassR.log | parallel "echo {}; echo ==; tail -n3 register-variants/{}/Output/test-xspec.out" > docs/stat_xspec.time df <- read.table("docs/stat_xspec.time", header = FALSE, sep=',') print_stats(df, "Summary Generation Time for X86-64 instructions", 110, 370, 20) print("Stats: Lspec \n") # cat docs/kliPassR.log | parallel "echo {}; echo ==; tail -n3 register-variants/{}/Output/test-lspec.out" > docs/stat_lspec.time df <- read.table("docs/stat_lspec.time", header = FALSE, sep=',') print_stats(df, "Summary generation Time for Lifted LLVM IR", 110, 310, 20) # Save the file. dev.off() png(file = "docs/solver.png") print("Stats: Prove \n") # cat docs/genz3PassR.log | parallel "echo; echo; cd register-variants/{}; make provez3; cd -" |& tee log df <- read.table("docs/stat_prove.time", header = FALSE, sep=',') print_stats(df, "Z3 Solver Time", 0, 30, 1) # Save the file. dev.off()
#!/usr/bin/env Rscript suppressPackageStartupMessages (library(ggplot2)) suppressPackageStartupMessages (library(reshape2)) ## suppressPackageStartupMessages (library(doBy)) source ("face-flow-style.R") processedFile = "preprocess.data" load (file=processedFile) g <- ggplot (data=data.run ,aes(x=Time, y=Kilobytes )) + geom_point(size=3) + ylab ("Link utilization [Kilobytes]") + stat_smooth(se = FALSE)+ theme_custom () cat ("Writing graph to [per-flow.pdf]\n") pdf (file = "per-flow.pdf",width=9,height=6) g x = dev.off ();
/r/perFlow.R
no_license
codeditor2/graphs
R
false
false
541
r
#!/usr/bin/env Rscript suppressPackageStartupMessages (library(ggplot2)) suppressPackageStartupMessages (library(reshape2)) ## suppressPackageStartupMessages (library(doBy)) source ("face-flow-style.R") processedFile = "preprocess.data" load (file=processedFile) g <- ggplot (data=data.run ,aes(x=Time, y=Kilobytes )) + geom_point(size=3) + ylab ("Link utilization [Kilobytes]") + stat_smooth(se = FALSE)+ theme_custom () cat ("Writing graph to [per-flow.pdf]\n") pdf (file = "per-flow.pdf",width=9,height=6) g x = dev.off ();
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/myboot2.R \name{myboot2} \alias{myboot2} \title{myboot2} \usage{ myboot2(iter = 10000, x, fun = "mean", alpha = 0.05, cx = 1.5, ...) } \arguments{ \item{...}{} } \value{ } \description{ myboot2 }
/man/myboot2.Rd
no_license
z99jacc/MATH4753ouchavv0029
R
false
true
275
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/myboot2.R \name{myboot2} \alias{myboot2} \title{myboot2} \usage{ myboot2(iter = 10000, x, fun = "mean", alpha = 0.05, cx = 1.5, ...) } \arguments{ \item{...}{} } \value{ } \description{ myboot2 }
# Gradient Boosted Machine # load the package library(gbm) # load data data(iris) # fit model fit <- gbm(Species~., data=iris, distribution="multinomial") # summarize the fit print(fit) # make predictions probabilities <- predict(fit, iris[,1:4], n.trees=1) predictions <- colnames(probabilities)[apply(probabilities, 1, which.max)] # summarize accuracy table(predictions, iris$Species)
/08_Machine_Learning_Mastery_with_R/03_Algorithms/01_Algorithms/5-NonLinearClassiication/gradient_boosted_machine.R
no_license
jggrimesdc-zz/MachineLearningExercises
R
false
false
390
r
# Gradient Boosted Machine # load the package library(gbm) # load data data(iris) # fit model fit <- gbm(Species~., data=iris, distribution="multinomial") # summarize the fit print(fit) # make predictions probabilities <- predict(fit, iris[,1:4], n.trees=1) predictions <- colnames(probabilities)[apply(probabilities, 1, which.max)] # summarize accuracy table(predictions, iris$Species)
#' A phylosql Function #' #' function to upload lab data to mysql database #' @param data data to upload #' @param database database to send data #' @param con connection #' @keywords #' @import dplyr #' @import RMariaDB #' @export #' upload_lab_data<- function(data,database="labdata", con=NULL){ if(is.null(con)){ stop("You need to specify a database connection") } si<- dplyr::as_tibble( dplyr::tbl(con,database)) existingID<- paste0(si$MetagenNumber,si$variable) newID<- paste0(data$MetagenNumber,data$variable) upload<- which(!newID %in% existingID) stopifnot(length(upload)>0) message(paste0("Uploading ",length(upload)," samples.")) RMariaDB::dbAppendTable(con, database,value= data[upload,] ) message("Complete.") } #' A phylosql Function #' #' function to upload sv table to mysql database #' @param data data to upload #' @param database database to send data #' @param con connection #' @keywords #' @import dplyr #' @import RMariaDB #' @export #' upload_sv<- function(data,database=NULL,con=NULL){ if(is.null(con)){ stop("You need to specify a database connection") } # Preprocess data for sql here sv<- dplyr::as_tibble( dplyr::tbl(con,database)) existingID<- paste0(sv$MetagenNumber,sv$SV) newID<- paste0(data$MetagenNumber,data$SV) upload<- which(!newID %in% existingID) stopifnot(length(upload)>0) message(paste0("Uploading ",length(upload)," samples.")) RMariaDB::dbAppendTable(con, database,value= data[upload,] ) message("Complete.") } #' A phylosql Function #' #' function to upload taxonomy table to mysql database #' @param data data to upload #' @param database database to send data #' @param con connection #' @keywords #' @import dplyr #' @import RMariaDB #' @export #' upload_taxonomy<- function(data,database=NULL,con=NULL){ if(is.null(con)){ stop("You need to specify a database connection") } # Preprocess data for sql here tax<- dplyr::as_tibble( dplyr::tbl(con,database)) existingID<- paste0(tax$SV) newID<- paste0(data$SV) upload<- which(!newID %in% existingID) stopifnot(length(upload)>0) message(paste0("Uploading ",length(upload)," samples.")) RMariaDB::dbAppendTable(con, database,value= data[upload,] ) message("Complete.") } #' A phylosql Function #' #' function to upload cms data to mysql database #' @param data data to upload #' @param database database to send data #' @param con connection #' @keywords #' @import dplyr #' @import RMariaDB #' @export #' upload_cms_data<- function(data,database="cmsdata",con=NULL){ if(is.null(con)){ stop("You need to specify a database connection") } si<- dplyr::as_tibble( dplyr::tbl(con,database)) existingID<- paste0(si$MetagenNumber) newID<- paste0(data$MetagenNumber) upload<- which(!newID %in% existingID) stopifnot(length(upload)>0) message(paste0("Uploading ",length(upload)," samples.")) RMariaDB::dbAppendTable(con, database,value= data[upload,] ) message("Complete.") } #' A phylosql Function #' #' function to upload cms data to mysql database #' @param data data to upload #' @param database database to send data #' @param con connection #' @keywords #' @import dplyr #' @import RMariaDB #' @export #' upload_cms_data_Long<- function(data,database="cmsdatalong",con=NULL){ if(is.null(con)){ stop("You need to specify a database connection") } if(ncol(data)!=3){ stop("This data is not the correct format") } if(any(is.na(data$Level))){ stop("Some cells contain NAs. Delete these and reattempt upload.") } si<- dplyr::as_tibble( dplyr::tbl(con,database)) existingID<- paste0(si$MetagenNumber,si$Factor) newID<- paste0(data$MetagenNumber,data$Factor) upload<- which(!newID %in% existingID) stopifnot(length(upload)>0) message(paste0("Uploading ",length(upload)," samples.")) RMariaDB::dbAppendTable(con, database,value= data[upload,] ) message("Complete.") } #' A phylosql Function #' #' function to upload a long format SV table to mysql database (quickly) #' @param data data to upload #' @param database database to send data #' @param con connection #' @keywords #' @import dplyr #' @import RMariaDB #' @export #' upload_bulk_sv<- function (data, database = NULL, con = NULL) { if (is.null(con)) { stop("You need to specify a database connection") } if (is.null(database)) { stop("You need to specify a database") } sv <- dplyr::as_tibble(dplyr::tbl(con, database)) existingID <- paste0(sv$MetagenNumber, sv$SV) newID <- paste0(data$MetagenNumber, data$SV) upload <- which(!newID %in% existingID) stopifnot(length(upload) > 0) message(paste0("Uploading ", length(upload), " samples.")) uploadData(data=data[upload,],database) message("Complete.") dbDisconnect(con) } #' A phylosql Function #' #' function to upload a taxonomy table to mysql database (quickly) #' @param data data to upload #' @param database database to send data #' @param con connection #' @keywords #' @import dplyr #' @import RMariaDB #' @export #' upload_bulk_tax<- function (data, database = NULL, con = NULL) { if (is.null(con)) { stop("You need to specify a database connection") } if (is.null(database)) { stop("You need to specify a database") } tax <- dplyr::as_tibble(dplyr::tbl(con, database)) existingID <- paste0(tax$SV) newID <- paste0(data$SV) upload <- which(!newID %in% existingID) stopifnot(length(upload) > 0) message(paste0("Uploading ", length(upload), " samples.")) uploadData(data=data[upload,],database) message("Complete.") dbDisconnect(con) } #' A phylosql Function #' #' A backend function for bulk uploading data to a mysql database #' @param data data to upload #' @param tableName database to send data #' @param con connection #' @keywords #' @import dplyr #' @import RMariaDB #' @export #' uploadData <- function(data, # a data frame tableName, # table name, possibly qualified (e.g. "my_db.customers") con=get_mtgn_connection()) # arguments to DBI::dbConnect { TEMPFILE <- tempfile(fileext='.csv') TEMPFILE<- normalizePath(TEMPFILE, winslash = "/") query <- sprintf("LOAD DATA LOCAL INFILE '%s' INTO TABLE %s FIELDS TERMINATED BY ',' LINES TERMINATED BY '\\n' IGNORE 1 LINES;" , TEMPFILE,tableName) write.csv(data,TEMPFILE, row.names = FALSE,quote = FALSE) #on.exit(file.remove(TEMPFILE)) # CONNECT TO THE DATABASE # SUBMIT THE UPDATE QUERY AND DISCONNECT RMariaDB::dbExecute(con, query) dbDisconnect(con) }
/R/upload.R
no_license
metagen-Au/phylosql
R
false
false
7,065
r
#' A phylosql Function #' #' function to upload lab data to mysql database #' @param data data to upload #' @param database database to send data #' @param con connection #' @keywords #' @import dplyr #' @import RMariaDB #' @export #' upload_lab_data<- function(data,database="labdata", con=NULL){ if(is.null(con)){ stop("You need to specify a database connection") } si<- dplyr::as_tibble( dplyr::tbl(con,database)) existingID<- paste0(si$MetagenNumber,si$variable) newID<- paste0(data$MetagenNumber,data$variable) upload<- which(!newID %in% existingID) stopifnot(length(upload)>0) message(paste0("Uploading ",length(upload)," samples.")) RMariaDB::dbAppendTable(con, database,value= data[upload,] ) message("Complete.") } #' A phylosql Function #' #' function to upload sv table to mysql database #' @param data data to upload #' @param database database to send data #' @param con connection #' @keywords #' @import dplyr #' @import RMariaDB #' @export #' upload_sv<- function(data,database=NULL,con=NULL){ if(is.null(con)){ stop("You need to specify a database connection") } # Preprocess data for sql here sv<- dplyr::as_tibble( dplyr::tbl(con,database)) existingID<- paste0(sv$MetagenNumber,sv$SV) newID<- paste0(data$MetagenNumber,data$SV) upload<- which(!newID %in% existingID) stopifnot(length(upload)>0) message(paste0("Uploading ",length(upload)," samples.")) RMariaDB::dbAppendTable(con, database,value= data[upload,] ) message("Complete.") } #' A phylosql Function #' #' function to upload taxonomy table to mysql database #' @param data data to upload #' @param database database to send data #' @param con connection #' @keywords #' @import dplyr #' @import RMariaDB #' @export #' upload_taxonomy<- function(data,database=NULL,con=NULL){ if(is.null(con)){ stop("You need to specify a database connection") } # Preprocess data for sql here tax<- dplyr::as_tibble( dplyr::tbl(con,database)) existingID<- paste0(tax$SV) newID<- paste0(data$SV) upload<- which(!newID %in% existingID) stopifnot(length(upload)>0) message(paste0("Uploading ",length(upload)," samples.")) RMariaDB::dbAppendTable(con, database,value= data[upload,] ) message("Complete.") } #' A phylosql Function #' #' function to upload cms data to mysql database #' @param data data to upload #' @param database database to send data #' @param con connection #' @keywords #' @import dplyr #' @import RMariaDB #' @export #' upload_cms_data<- function(data,database="cmsdata",con=NULL){ if(is.null(con)){ stop("You need to specify a database connection") } si<- dplyr::as_tibble( dplyr::tbl(con,database)) existingID<- paste0(si$MetagenNumber) newID<- paste0(data$MetagenNumber) upload<- which(!newID %in% existingID) stopifnot(length(upload)>0) message(paste0("Uploading ",length(upload)," samples.")) RMariaDB::dbAppendTable(con, database,value= data[upload,] ) message("Complete.") } #' A phylosql Function #' #' function to upload cms data to mysql database #' @param data data to upload #' @param database database to send data #' @param con connection #' @keywords #' @import dplyr #' @import RMariaDB #' @export #' upload_cms_data_Long<- function(data,database="cmsdatalong",con=NULL){ if(is.null(con)){ stop("You need to specify a database connection") } if(ncol(data)!=3){ stop("This data is not the correct format") } if(any(is.na(data$Level))){ stop("Some cells contain NAs. Delete these and reattempt upload.") } si<- dplyr::as_tibble( dplyr::tbl(con,database)) existingID<- paste0(si$MetagenNumber,si$Factor) newID<- paste0(data$MetagenNumber,data$Factor) upload<- which(!newID %in% existingID) stopifnot(length(upload)>0) message(paste0("Uploading ",length(upload)," samples.")) RMariaDB::dbAppendTable(con, database,value= data[upload,] ) message("Complete.") } #' A phylosql Function #' #' function to upload a long format SV table to mysql database (quickly) #' @param data data to upload #' @param database database to send data #' @param con connection #' @keywords #' @import dplyr #' @import RMariaDB #' @export #' upload_bulk_sv<- function (data, database = NULL, con = NULL) { if (is.null(con)) { stop("You need to specify a database connection") } if (is.null(database)) { stop("You need to specify a database") } sv <- dplyr::as_tibble(dplyr::tbl(con, database)) existingID <- paste0(sv$MetagenNumber, sv$SV) newID <- paste0(data$MetagenNumber, data$SV) upload <- which(!newID %in% existingID) stopifnot(length(upload) > 0) message(paste0("Uploading ", length(upload), " samples.")) uploadData(data=data[upload,],database) message("Complete.") dbDisconnect(con) } #' A phylosql Function #' #' function to upload a taxonomy table to mysql database (quickly) #' @param data data to upload #' @param database database to send data #' @param con connection #' @keywords #' @import dplyr #' @import RMariaDB #' @export #' upload_bulk_tax<- function (data, database = NULL, con = NULL) { if (is.null(con)) { stop("You need to specify a database connection") } if (is.null(database)) { stop("You need to specify a database") } tax <- dplyr::as_tibble(dplyr::tbl(con, database)) existingID <- paste0(tax$SV) newID <- paste0(data$SV) upload <- which(!newID %in% existingID) stopifnot(length(upload) > 0) message(paste0("Uploading ", length(upload), " samples.")) uploadData(data=data[upload,],database) message("Complete.") dbDisconnect(con) } #' A phylosql Function #' #' A backend function for bulk uploading data to a mysql database #' @param data data to upload #' @param tableName database to send data #' @param con connection #' @keywords #' @import dplyr #' @import RMariaDB #' @export #' uploadData <- function(data, # a data frame tableName, # table name, possibly qualified (e.g. "my_db.customers") con=get_mtgn_connection()) # arguments to DBI::dbConnect { TEMPFILE <- tempfile(fileext='.csv') TEMPFILE<- normalizePath(TEMPFILE, winslash = "/") query <- sprintf("LOAD DATA LOCAL INFILE '%s' INTO TABLE %s FIELDS TERMINATED BY ',' LINES TERMINATED BY '\\n' IGNORE 1 LINES;" , TEMPFILE,tableName) write.csv(data,TEMPFILE, row.names = FALSE,quote = FALSE) #on.exit(file.remove(TEMPFILE)) # CONNECT TO THE DATABASE # SUBMIT THE UPDATE QUERY AND DISCONNECT RMariaDB::dbExecute(con, query) dbDisconnect(con) }
if(!require(dplyr)) { install.packages("dplyr") } if(!require(mgsub)) { install.packages("mgsub") } library(dplyr) library(mgsub) #Checks if the dataset exists. If not, downloads and extracts it if (!file.exists("data.zip")) { download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", destfile = "data.zip") unzip("data.zip", exdir = "./data/") } #Read all tables into memory x_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt") x_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt") y_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt") y_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt") s_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt") s_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt") #Merge the tables into a single dataset merged_data <- cbind(rbind(s_train, s_test), rbind(y_train, y_test), rbind(x_train, x_test)) #Remove unneeded temporary variables from memory rm(x_test, x_train, y_test, y_train, s_test, s_train) #Read in feature names and assign to columns features <- read.table("./data/UCI HAR Dataset/features.txt")[[2]] names(merged_data) <- c("Subject", "Activity", features) #Extracts the indices for mean and std features and subset the data based off it pattern = "mean\\(\\)|std\\(\\)" mean_std <- grep(pattern, names(merged_data), ignore.case = TRUE) mean_std <- c(1,2,mean_std) #Include prefix binded Subject and Activity cols merged_data <- merged_data[,mean_std] #Replace numeric variable with descriptive activity name act <- read.table("./data/UCI HAR Dataset/activity_labels.txt") lapply(merged_data["Activity"], function(x) {merged_data["Activity"] <<- act[x,2]}) #Group data, performing mean operation on each Subject/Activity pair tidy <- summarise_all(group_by(merged_data,Activity,Subject), mean) #Remove unneeded variables rm(merged_data, act, features, mean_std, pattern) #Cleaning column names, making them more human readable names(tidy) <- mgsub(names(tidy), c("\\(", "\\)", "^t", "^f"), c("","", "Time", "Frequency")) #Output to file write.table(tidy, "tidy_data.txt")
/run_analysis.R
no_license
JulesV3rne/Coursera3Project
R
false
false
2,182
r
if(!require(dplyr)) { install.packages("dplyr") } if(!require(mgsub)) { install.packages("mgsub") } library(dplyr) library(mgsub) #Checks if the dataset exists. If not, downloads and extracts it if (!file.exists("data.zip")) { download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", destfile = "data.zip") unzip("data.zip", exdir = "./data/") } #Read all tables into memory x_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt") x_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt") y_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt") y_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt") s_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt") s_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt") #Merge the tables into a single dataset merged_data <- cbind(rbind(s_train, s_test), rbind(y_train, y_test), rbind(x_train, x_test)) #Remove unneeded temporary variables from memory rm(x_test, x_train, y_test, y_train, s_test, s_train) #Read in feature names and assign to columns features <- read.table("./data/UCI HAR Dataset/features.txt")[[2]] names(merged_data) <- c("Subject", "Activity", features) #Extracts the indices for mean and std features and subset the data based off it pattern = "mean\\(\\)|std\\(\\)" mean_std <- grep(pattern, names(merged_data), ignore.case = TRUE) mean_std <- c(1,2,mean_std) #Include prefix binded Subject and Activity cols merged_data <- merged_data[,mean_std] #Replace numeric variable with descriptive activity name act <- read.table("./data/UCI HAR Dataset/activity_labels.txt") lapply(merged_data["Activity"], function(x) {merged_data["Activity"] <<- act[x,2]}) #Group data, performing mean operation on each Subject/Activity pair tidy <- summarise_all(group_by(merged_data,Activity,Subject), mean) #Remove unneeded variables rm(merged_data, act, features, mean_std, pattern) #Cleaning column names, making them more human readable names(tidy) <- mgsub(names(tidy), c("\\(", "\\)", "^t", "^f"), c("","", "Time", "Frequency")) #Output to file write.table(tidy, "tidy_data.txt")
library(shiny) library(leaflet) library(leaflet.mapboxgl) #devtools::install_github("rstudio/leaflet.mapboxgl") library(dplyr) library(arrow) #install.packages("arrow", repos = "https://packagemanager.rstudio.com/all/__linux__/focal/latest") library(flexdashboard) library(shinyjs) library(waiter) options(mapbox.accessToken = "pk.eyJ1IjoibWR1YmVsIiwiYSI6ImNrNTgweTlwOTAweDczbXBneTJtNTA2Y2UifQ.kIHidFuI7ooK0KU5yigvqg")
/shiny/global.R
no_license
Appsilon/covid-hackathon
R
false
false
421
r
library(shiny) library(leaflet) library(leaflet.mapboxgl) #devtools::install_github("rstudio/leaflet.mapboxgl") library(dplyr) library(arrow) #install.packages("arrow", repos = "https://packagemanager.rstudio.com/all/__linux__/focal/latest") library(flexdashboard) library(shinyjs) library(waiter) options(mapbox.accessToken = "pk.eyJ1IjoibWR1YmVsIiwiYSI6ImNrNTgweTlwOTAweDczbXBneTJtNTA2Y2UifQ.kIHidFuI7ooK0KU5yigvqg")
#################################################1º) K-MEANS # Função para gerar a variabilidade de cada sugestão de cluster Cluster.Number = function(x) { # Verificando se todos os dados são numéricos for(i in 1:ncol(x)) { if (is.numeric(x[,1]) == FALSE) { print('All elements from sample vectors must be numeric') } break } #Cálculo da variabilidade inicial wss = sum(apply(x, 2, var)) * (nrow(x)-1) #Cálculo das variabilidades geral após a clusterização for(i in 2:20) { wss[i] = sum(kmeans(x, centers = i, iter.max = 500)$withinss) } return(wss) } wss = Cluster.Number(x) #Plotando os resultados plotaLinhas = function(vetor_de_cluster, subtitulo){ # Convertendo o vetor para um dataframe vetor_de_cluster = as.data.frame(vetor_de_cluster) # Criando um vetor de dados para representar o número de clusters clusters = 1:nrow(vetor_de_cluster) vetor_de_cluster$clusters = clusters # Plotando o gráfico ggplot(vetor_de_cluster, aes(x = clusters, y = vetor_de_cluster))+ geom_line()+ geom_point()+ ggtitle("Redução da variabilidade",subtitle = subtitulo)+ ylab("Variabilidade")+ xlab("Número de Clusters") } #Applying the K-MEANS algorithm Kme = kmeans(x, centers = 5, iter.max = 100, algorithm = 'MacQueen', nstart = 4) #Algorithm methods: #a) Hartigan-Wong #b) Lloyd #c) Forgy #d) MacQueen #Printing data with the calculate clusters data.frame(x, ncluster$cluster)
/Clusterizacao_Nao_hierarquica-Kmeans.R
no_license
fabiorfc/Tecnicas-Agrupamento
R
false
false
1,499
r
#################################################1º) K-MEANS # Função para gerar a variabilidade de cada sugestão de cluster Cluster.Number = function(x) { # Verificando se todos os dados são numéricos for(i in 1:ncol(x)) { if (is.numeric(x[,1]) == FALSE) { print('All elements from sample vectors must be numeric') } break } #Cálculo da variabilidade inicial wss = sum(apply(x, 2, var)) * (nrow(x)-1) #Cálculo das variabilidades geral após a clusterização for(i in 2:20) { wss[i] = sum(kmeans(x, centers = i, iter.max = 500)$withinss) } return(wss) } wss = Cluster.Number(x) #Plotando os resultados plotaLinhas = function(vetor_de_cluster, subtitulo){ # Convertendo o vetor para um dataframe vetor_de_cluster = as.data.frame(vetor_de_cluster) # Criando um vetor de dados para representar o número de clusters clusters = 1:nrow(vetor_de_cluster) vetor_de_cluster$clusters = clusters # Plotando o gráfico ggplot(vetor_de_cluster, aes(x = clusters, y = vetor_de_cluster))+ geom_line()+ geom_point()+ ggtitle("Redução da variabilidade",subtitle = subtitulo)+ ylab("Variabilidade")+ xlab("Número de Clusters") } #Applying the K-MEANS algorithm Kme = kmeans(x, centers = 5, iter.max = 100, algorithm = 'MacQueen', nstart = 4) #Algorithm methods: #a) Hartigan-Wong #b) Lloyd #c) Forgy #d) MacQueen #Printing data with the calculate clusters data.frame(x, ncluster$cluster)
library(jaatha) ### Name: create_jaatha_model.function ### Title: Specify a jaatha model using a simulation function ### Aliases: create_jaatha_model.function ### ** Examples create_jaatha_model(function(x) rpois(10, x), par_ranges = matrix(c(0.1, 0.1, 10, 10), 2, 2), sum_stats = list(create_jaatha_stat("sum", sum)))
/data/genthat_extracted_code/jaatha/examples/create_jaatha_model.function.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
366
r
library(jaatha) ### Name: create_jaatha_model.function ### Title: Specify a jaatha model using a simulation function ### Aliases: create_jaatha_model.function ### ** Examples create_jaatha_model(function(x) rpois(10, x), par_ranges = matrix(c(0.1, 0.1, 10, 10), 2, 2), sum_stats = list(create_jaatha_stat("sum", sum)))
#################################################### # computing standard deviation in the age at death # # including an attempt at smoothing and # # interpolation of sub-integer ages # # TOMCOD project # # A.Remund February 2018 # #################################################### #### set working directory #### if(Sys.info()["nodename"] == "CD-5VV9QK2"){ setwd("D:/switchdrive/SNC 2.0/TOMCOD") } #### load data #### library(HMDHFDplus) data <- readHMDweb(CNTRY = "CHE", item = "mltper_1x1", username = "", password = "") data <- data[,c("Year","Age","dx")] #### interpolate #### # needs to be fixed mat <- matrix(data$dx, nrow = length(unique(data$Age))) library(MortalitySmooth) fit <- Mort2Dsmooth(x = unique(data$Age), y = unique(data$Year), Z = mat, method = 3, lambdas = c(1,1)) plot(fit) xx <- seq(0,max(data$Age),0.1) mat <- predict(fit, type = "response", newdata = list(y = unique(data$Year), x = xx)) / 10 # something wrong with interpolation => missing values created data_interpol <- data.frame(Year = rep(unique(data$Year), each = nrow(mat)), Age = rep(xx, times = ncol(mat))) data_interpol$dx <- as.vector(mat) matplot(mat, type = "l") #### function #### sdfun <- function(x, smooth = FALSE, plot = FALSE, trun = 0, inter = FALSE){ if(plot == TRUE){ plot(x$Age, x$dx, las = 1, xlab = "age", ylab = "dx") } if(is.numeric(trun)){ x <- x[x$Age >= trun,] if(plot == TRUE){ abline(v = trun) } } if(smooth == TRUE){ if(is.numeric(inter)){ dx <- predict(smooth.spline(x = x$Age, y = x$dx, control.spar = list(low = 0.3, high = 1)), x = inter)$y x <- data.frame(Year = rep(x$Year,length(inter)), Age = inter) x$dx <- dx #* c(diff(inter),1) }else{ x$dx <- predict(smooth.spline(x = x$Age, y = x$dx, control.spar = list(low = 0.3, high = 1)))$y } if(plot == TRUE){ lines(x$Age, x$dx) } } if(trun == "mode"){ ranks <- order(x$dx, decreasing = TRUE) i <- 1 idx <- ranks[i] mode <- x$Age[idx] while(mode < 5){ i <- i + 1 idx <- ranks[i] mode <- x$Age[idx] } x <- x[x$Age >= mode,] if(plot == TRUE){ abline(v = mode) } } mean <- sum(x$Age * x$dx) / sum(x$dx) sdev <- sqrt(sum((x$Age - mean)^2 * x$dx) / sum(x$dx)) return(sdev) } #### apply #### sdfun(data[data$Year == 1942,], smooth = TRUE, plot = TRUE, trun = "mode", inter = seq(0,110,0.1)) sd <- by(data = data, INDICES = data$Year, FUN = sdfun, trun = trun, smooth = TRUE, inter = seq(0,110,0.1)) plot(unique(data$Year), sd, type = "l", xlab = "", ylab = "sd", las = 1) #### comparison #### pdf("Output/comparison.pdf", width = 10, height = 5) par(mfrow = c(1,2)) for(trun in c(0,"mode")){ plot(1, 1, type = "n", ylim = c(ifelse(trun == 0, 10, 3),ifelse(trun == 0, 35, 6)), xlim = c(1900, 2020), xlab = "", ylab = "sd", las = 1, main = paste("truncated at",trun)) for(ctry in c("CHE","NLD")){ cat("\n",ctry) for(sex in c("m","f")){ cat("\n","-",sex) data <- readHMDweb(CNTRY = ctry, item = paste(sex,"ltper_1x1",sep = ""), username = "", password = "") data <- data[,c("Year","Age","dx")] sd <- by(data = data, INDICES = data$Year, FUN = sdfun, trun = trun, smooth = TRUE, inter = c(seq(0,110,0.1))) col <- ifelse(sex == "m", "blue", "red") lty <- ifelse(ctry == "CHE", 1, 2) lines(unique(data$Year), sd, col = col, lty = lty) } } legend("topright", legend = c("CH females", "CH males", "NL females", "NL males"), lty = c(1,1,2,2), col = c("red","blue","red","blue")) } par(mfrow = c(1,1)) dev.off()
/sd.R
no_license
mvoigt87/SD_Swiss
R
false
false
3,887
r
#################################################### # computing standard deviation in the age at death # # including an attempt at smoothing and # # interpolation of sub-integer ages # # TOMCOD project # # A.Remund February 2018 # #################################################### #### set working directory #### if(Sys.info()["nodename"] == "CD-5VV9QK2"){ setwd("D:/switchdrive/SNC 2.0/TOMCOD") } #### load data #### library(HMDHFDplus) data <- readHMDweb(CNTRY = "CHE", item = "mltper_1x1", username = "", password = "") data <- data[,c("Year","Age","dx")] #### interpolate #### # needs to be fixed mat <- matrix(data$dx, nrow = length(unique(data$Age))) library(MortalitySmooth) fit <- Mort2Dsmooth(x = unique(data$Age), y = unique(data$Year), Z = mat, method = 3, lambdas = c(1,1)) plot(fit) xx <- seq(0,max(data$Age),0.1) mat <- predict(fit, type = "response", newdata = list(y = unique(data$Year), x = xx)) / 10 # something wrong with interpolation => missing values created data_interpol <- data.frame(Year = rep(unique(data$Year), each = nrow(mat)), Age = rep(xx, times = ncol(mat))) data_interpol$dx <- as.vector(mat) matplot(mat, type = "l") #### function #### sdfun <- function(x, smooth = FALSE, plot = FALSE, trun = 0, inter = FALSE){ if(plot == TRUE){ plot(x$Age, x$dx, las = 1, xlab = "age", ylab = "dx") } if(is.numeric(trun)){ x <- x[x$Age >= trun,] if(plot == TRUE){ abline(v = trun) } } if(smooth == TRUE){ if(is.numeric(inter)){ dx <- predict(smooth.spline(x = x$Age, y = x$dx, control.spar = list(low = 0.3, high = 1)), x = inter)$y x <- data.frame(Year = rep(x$Year,length(inter)), Age = inter) x$dx <- dx #* c(diff(inter),1) }else{ x$dx <- predict(smooth.spline(x = x$Age, y = x$dx, control.spar = list(low = 0.3, high = 1)))$y } if(plot == TRUE){ lines(x$Age, x$dx) } } if(trun == "mode"){ ranks <- order(x$dx, decreasing = TRUE) i <- 1 idx <- ranks[i] mode <- x$Age[idx] while(mode < 5){ i <- i + 1 idx <- ranks[i] mode <- x$Age[idx] } x <- x[x$Age >= mode,] if(plot == TRUE){ abline(v = mode) } } mean <- sum(x$Age * x$dx) / sum(x$dx) sdev <- sqrt(sum((x$Age - mean)^2 * x$dx) / sum(x$dx)) return(sdev) } #### apply #### sdfun(data[data$Year == 1942,], smooth = TRUE, plot = TRUE, trun = "mode", inter = seq(0,110,0.1)) sd <- by(data = data, INDICES = data$Year, FUN = sdfun, trun = trun, smooth = TRUE, inter = seq(0,110,0.1)) plot(unique(data$Year), sd, type = "l", xlab = "", ylab = "sd", las = 1) #### comparison #### pdf("Output/comparison.pdf", width = 10, height = 5) par(mfrow = c(1,2)) for(trun in c(0,"mode")){ plot(1, 1, type = "n", ylim = c(ifelse(trun == 0, 10, 3),ifelse(trun == 0, 35, 6)), xlim = c(1900, 2020), xlab = "", ylab = "sd", las = 1, main = paste("truncated at",trun)) for(ctry in c("CHE","NLD")){ cat("\n",ctry) for(sex in c("m","f")){ cat("\n","-",sex) data <- readHMDweb(CNTRY = ctry, item = paste(sex,"ltper_1x1",sep = ""), username = "", password = "") data <- data[,c("Year","Age","dx")] sd <- by(data = data, INDICES = data$Year, FUN = sdfun, trun = trun, smooth = TRUE, inter = c(seq(0,110,0.1))) col <- ifelse(sex == "m", "blue", "red") lty <- ifelse(ctry == "CHE", 1, 2) lines(unique(data$Year), sd, col = col, lty = lty) } } legend("topright", legend = c("CH females", "CH males", "NL females", "NL males"), lty = c(1,1,2,2), col = c("red","blue","red","blue")) } par(mfrow = c(1,1)) dev.off()
#' Calculate Area Under the Moment Curve Using Trapezoids #' #' Calculates AUMC using the trapezoidal method. #' Assumes data represent a single profile. #' Despite choice of method, only linear interpolation is used #' for areas of intervals beginning or ending with y: 0. #' @param x variable names of x coordinates #' @param y variable names of y coordinates #' @importFrom dplyr first lag #' @param method method: #' * 1: linear up - linear down #' * 2: linear up - logarithmic down #' * 3: linear before Tmax, logarithmic after Tmax #' @return area (length-one numeric) trapm=function(x=NA, y=NA, method=1) { cm=max(y,na.rm=T) tmax=first(x[y==cm&!is.na(y)]) if (method==1) { z= sum((x-lag(x))*(y*x+lag(y)*lag(x))/2,na.rm=T) } if (method==2) { z= sum(ifelse(lag(y)>y&lag(y)>0&y>0, (x-lag(x))*(y*x-lag(y)*lag(x))/log(y/lag(y))-(x-lag(x))^2*(y-lag(y))/(log(y/lag(y)))^2, (x-lag(x))*(y*x+lag(y)*lag(x))/2 ),na.rm=T) } if (method==3) { z= sum(ifelse(x>tmax&lag(y)>0&y>0&lag(y)!=y, (x-lag(x))*(y*x-lag(y)*lag(x))/log(y/lag(y))-(x-lag(x))^2*(y-lag(y))/(log(y/lag(y)))^2, (x-lag(x))*(y*x+lag(y)*lag(x))/2 ),na.rm=T) } return(z) }
/R/trapm.r
no_license
cran/qpNCA
R
false
false
1,284
r
#' Calculate Area Under the Moment Curve Using Trapezoids #' #' Calculates AUMC using the trapezoidal method. #' Assumes data represent a single profile. #' Despite choice of method, only linear interpolation is used #' for areas of intervals beginning or ending with y: 0. #' @param x variable names of x coordinates #' @param y variable names of y coordinates #' @importFrom dplyr first lag #' @param method method: #' * 1: linear up - linear down #' * 2: linear up - logarithmic down #' * 3: linear before Tmax, logarithmic after Tmax #' @return area (length-one numeric) trapm=function(x=NA, y=NA, method=1) { cm=max(y,na.rm=T) tmax=first(x[y==cm&!is.na(y)]) if (method==1) { z= sum((x-lag(x))*(y*x+lag(y)*lag(x))/2,na.rm=T) } if (method==2) { z= sum(ifelse(lag(y)>y&lag(y)>0&y>0, (x-lag(x))*(y*x-lag(y)*lag(x))/log(y/lag(y))-(x-lag(x))^2*(y-lag(y))/(log(y/lag(y)))^2, (x-lag(x))*(y*x+lag(y)*lag(x))/2 ),na.rm=T) } if (method==3) { z= sum(ifelse(x>tmax&lag(y)>0&y>0&lag(y)!=y, (x-lag(x))*(y*x-lag(y)*lag(x))/log(y/lag(y))-(x-lag(x))^2*(y-lag(y))/(log(y/lag(y)))^2, (x-lag(x))*(y*x+lag(y)*lag(x))/2 ),na.rm=T) } return(z) }
contour.TPCmsm <- function(x, contour.type="tc", tr.choice, nlevels=20, levels=pretty(zlim, nlevels), xlim, ylim, zlim=c(0, 1), col=grey(0.4), xlab, ylab, main="", sub="", add=FALSE, las=1, conf.int=FALSE, legend=TRUE, curvlab, ...) { if ( !inherits(x, "TPCmsm") ) {stop("'x' must be of class 'TPCmsm'");} if ( !( contour.type %in% c("tc", "ct") ) ) {stop("Argument 'contour.type' must be one of 'tc' or 'ct'");} if ( missing(tr.choice) ) {tr.choice <- dimnames(x$est)[[3]];} lt <- length(tr.choice); if (sum( tr.choice %in% dimnames(x$est)[[3]] ) != lt) {stop("Argument 'tr.choice' and possible transitions must match");} if ( anyDuplicated(tr.choice) ) {stop("Argument 'tr.choice' must be unique");} itr <- match( tr.choice, dimnames(x$est)[[3]] ); if ( missing(curvlab) ) {curvlab <- tr.choice;} mat <- matrix(nrow=2*conf.int+1, ncol=lt); par.orig <- par( c("las", "mfrow", "new") ); on.exit( par(par.orig) ); if (conf.int) { mat[1,1:lt] <- (lt*2+1):(lt*3); mat[2,1:lt] <- 1:lt; mat[3,1:lt] <- (lt+1):(lt*2); } else {mat[1,1:lt] <- 1:lt;} layout( mat, widths=rep(1, lt) ); par(las=las); if (contour.type == "tc") { if ( missing(xlab) ) {xlab <- "Time";} if ( missing(ylab) ) {ylab <- "Covariate";} if ( missing(xlim) ) {xlim <- c(x$time[1], x$time[length(x$time)]);} if ( missing(ylim) ) {ylim <- c(x$covariate[1], x$covariate[length(x$covariate)]);} for ( i in seq_len(lt) ) { contour(x=x$time, y=x$covariate, z=x$est[,,itr[i]], nlevels=nlevels, levels=levels, xlim=xlim, ylim=ylim, zlim=zlim, col=col, add=add, ...); if (legend) {title(main=curvlab[i], sub="", xlab="", ylab="", ...);} } } else if (contour.type == "ct") { if ( missing(xlab) ) {xlab <- "Covariate";} if ( missing(ylab) ) {ylab <- "Time";} if ( missing(xlim) ) {xlim <- c(x$covariate[1], x$covariate[length(x$covariate)]);} if ( missing(ylim) ) {ylim <- c(x$time[1], x$time[length(x$time)]);} for ( i in seq_len(lt) ) { contour(x=x$covariate, y=x$time, z=t(x$est[,,itr[i]]), nlevels=nlevels, levels=levels, xlim=xlim, ylim=ylim, zlim=zlim, col=col, add=add, ...); if (legend) {title(main=curvlab[i], sub="", xlab="", ylab="", ...);} } } if ( conf.int & !is.null(x$inf) & !is.null(x$sup) ) { if (contour.type == "tc") { for ( i in seq_len(lt) ) { contour(x=x$time, y=x$covariate, z=x$inf[,,itr[i]], nlevels=nlevels, levels=levels, xlim=xlim, ylim=ylim, zlim=zlim, col=col, add=add, ...); } for ( i in seq_len(lt) ) { contour(x=x$time, y=x$covariate, z=x$sup[,,itr[i]], nlevels=nlevels, levels=levels, xlim=xlim, ylim=ylim, zlim=zlim, col=col, add=add, ...); } } else if (contour.type == "ct") { for ( i in seq_len(lt) ) { contour(x=x$covariate, y=x$time, z=t(x$inf[,,itr[i]]), nlevels=nlevels, levels=levels, xlim=xlim, ylim=ylim, zlim=zlim, col=col, add=add, ...); } for ( i in seq_len(lt) ) { contour(x=x$covariate, y=x$time, z=t(x$sup[,,itr[i]]), nlevels=nlevels, levels=levels, xlim=xlim, ylim=ylim, zlim=zlim, col=col, add=add, ...); } } } par(par.orig); par(new=TRUE); plot.new(); title(main=main, sub=sub, xlab=xlab, ylab=ylab, cex.main=1.2, cex.lab=1.2, ...); invisible(); } # contour.TPCmsm
/R/contour.TPCmsm.R
no_license
cran/TPmsm
R
false
false
3,183
r
contour.TPCmsm <- function(x, contour.type="tc", tr.choice, nlevels=20, levels=pretty(zlim, nlevels), xlim, ylim, zlim=c(0, 1), col=grey(0.4), xlab, ylab, main="", sub="", add=FALSE, las=1, conf.int=FALSE, legend=TRUE, curvlab, ...) { if ( !inherits(x, "TPCmsm") ) {stop("'x' must be of class 'TPCmsm'");} if ( !( contour.type %in% c("tc", "ct") ) ) {stop("Argument 'contour.type' must be one of 'tc' or 'ct'");} if ( missing(tr.choice) ) {tr.choice <- dimnames(x$est)[[3]];} lt <- length(tr.choice); if (sum( tr.choice %in% dimnames(x$est)[[3]] ) != lt) {stop("Argument 'tr.choice' and possible transitions must match");} if ( anyDuplicated(tr.choice) ) {stop("Argument 'tr.choice' must be unique");} itr <- match( tr.choice, dimnames(x$est)[[3]] ); if ( missing(curvlab) ) {curvlab <- tr.choice;} mat <- matrix(nrow=2*conf.int+1, ncol=lt); par.orig <- par( c("las", "mfrow", "new") ); on.exit( par(par.orig) ); if (conf.int) { mat[1,1:lt] <- (lt*2+1):(lt*3); mat[2,1:lt] <- 1:lt; mat[3,1:lt] <- (lt+1):(lt*2); } else {mat[1,1:lt] <- 1:lt;} layout( mat, widths=rep(1, lt) ); par(las=las); if (contour.type == "tc") { if ( missing(xlab) ) {xlab <- "Time";} if ( missing(ylab) ) {ylab <- "Covariate";} if ( missing(xlim) ) {xlim <- c(x$time[1], x$time[length(x$time)]);} if ( missing(ylim) ) {ylim <- c(x$covariate[1], x$covariate[length(x$covariate)]);} for ( i in seq_len(lt) ) { contour(x=x$time, y=x$covariate, z=x$est[,,itr[i]], nlevels=nlevels, levels=levels, xlim=xlim, ylim=ylim, zlim=zlim, col=col, add=add, ...); if (legend) {title(main=curvlab[i], sub="", xlab="", ylab="", ...);} } } else if (contour.type == "ct") { if ( missing(xlab) ) {xlab <- "Covariate";} if ( missing(ylab) ) {ylab <- "Time";} if ( missing(xlim) ) {xlim <- c(x$covariate[1], x$covariate[length(x$covariate)]);} if ( missing(ylim) ) {ylim <- c(x$time[1], x$time[length(x$time)]);} for ( i in seq_len(lt) ) { contour(x=x$covariate, y=x$time, z=t(x$est[,,itr[i]]), nlevels=nlevels, levels=levels, xlim=xlim, ylim=ylim, zlim=zlim, col=col, add=add, ...); if (legend) {title(main=curvlab[i], sub="", xlab="", ylab="", ...);} } } if ( conf.int & !is.null(x$inf) & !is.null(x$sup) ) { if (contour.type == "tc") { for ( i in seq_len(lt) ) { contour(x=x$time, y=x$covariate, z=x$inf[,,itr[i]], nlevels=nlevels, levels=levels, xlim=xlim, ylim=ylim, zlim=zlim, col=col, add=add, ...); } for ( i in seq_len(lt) ) { contour(x=x$time, y=x$covariate, z=x$sup[,,itr[i]], nlevels=nlevels, levels=levels, xlim=xlim, ylim=ylim, zlim=zlim, col=col, add=add, ...); } } else if (contour.type == "ct") { for ( i in seq_len(lt) ) { contour(x=x$covariate, y=x$time, z=t(x$inf[,,itr[i]]), nlevels=nlevels, levels=levels, xlim=xlim, ylim=ylim, zlim=zlim, col=col, add=add, ...); } for ( i in seq_len(lt) ) { contour(x=x$covariate, y=x$time, z=t(x$sup[,,itr[i]]), nlevels=nlevels, levels=levels, xlim=xlim, ylim=ylim, zlim=zlim, col=col, add=add, ...); } } } par(par.orig); par(new=TRUE); plot.new(); title(main=main, sub=sub, xlab=xlab, ylab=ylab, cex.main=1.2, cex.lab=1.2, ...); invisible(); } # contour.TPCmsm
#' replace.range #' #' I don't recall what this was supposed to do. I am sure it was a good idea. #' # @export replace.range <- function() NULL
/R/replace.range.R
no_license
decisionpatterns/monotone
R
false
false
149
r
#' replace.range #' #' I don't recall what this was supposed to do. I am sure it was a good idea. #' # @export replace.range <- function() NULL
#load data div <- div_data(newdata) train = newdata[div,] print(table(train$V45)) test = newdata[-div,] print(table(test$V45)) train = newdata test = newdata library(randomForest) s = randomForest(fs[[1]],data = train) p = predict(s,test) plot(test$V45,col=1,pch=2) points(p,col=3,pch=20) library(ada) control <- rpart.control(cp = -1, maxdepth = 14,maxcompete = 1,xval = 0) gen1 <- ada(V45~., data = train[c(-1,-2,-3)], test.x = test[,-45], test.y = test[,45], type = "gentle", control = control, iter = 70) p <- predict(gen1,test) confusionMatrix(p,test$V45) summary(gen1) varplot(gen1) library(kernlab) s = ksvm(fs[[1]], data = train, type = "C-bsvc", kernel = "rbfdot", kpar = list(sigma = 0.1), C = 10, prob.model = TRUE) p = predict(s,newdata=test) confusionMatrix(p,test$V45) #lm s = lm(f1,data = train) p = predict(s,test) pos = nrow(health)/nrow(newdata) p[p>=pos] = 1 p[p<pos] = 0 confusionMatrix(p,test$V45) plot(test$V45,col=1,pch=2) points(p,col=3,pch=20) library(RSofia) s <- sofia(fs[[1]],data = newdata,loop_type="balanced-stochastic") p = predict(s,newdata=test) confusionMatrix(p,test$V45)
/single.R
no_license
diegozeng/ECG
R
false
false
1,160
r
#load data div <- div_data(newdata) train = newdata[div,] print(table(train$V45)) test = newdata[-div,] print(table(test$V45)) train = newdata test = newdata library(randomForest) s = randomForest(fs[[1]],data = train) p = predict(s,test) plot(test$V45,col=1,pch=2) points(p,col=3,pch=20) library(ada) control <- rpart.control(cp = -1, maxdepth = 14,maxcompete = 1,xval = 0) gen1 <- ada(V45~., data = train[c(-1,-2,-3)], test.x = test[,-45], test.y = test[,45], type = "gentle", control = control, iter = 70) p <- predict(gen1,test) confusionMatrix(p,test$V45) summary(gen1) varplot(gen1) library(kernlab) s = ksvm(fs[[1]], data = train, type = "C-bsvc", kernel = "rbfdot", kpar = list(sigma = 0.1), C = 10, prob.model = TRUE) p = predict(s,newdata=test) confusionMatrix(p,test$V45) #lm s = lm(f1,data = train) p = predict(s,test) pos = nrow(health)/nrow(newdata) p[p>=pos] = 1 p[p<pos] = 0 confusionMatrix(p,test$V45) plot(test$V45,col=1,pch=2) points(p,col=3,pch=20) library(RSofia) s <- sofia(fs[[1]],data = newdata,loop_type="balanced-stochastic") p = predict(s,newdata=test) confusionMatrix(p,test$V45)
### # this code takes a shapefile polygon extent and a directory containging # a bunch of NEON H5 files and returns the flightlines that INTERSECT the # polygon boundary ### # library(devtools) # install from github # install_github("lwasser/neon-aop-package/neonAOP") # load libraries library(sp) library(rgeos) library(rhdf5) library(neonAOP) ########## Inputs ##### # shapefile path and name # shape.path <- "NEONdata/D17-California/SJER/vector_data" # clipFile <- "sjer_clip_extent" ########## Inputs ##### ### TEAK Clip #the name of the site # site <- "TEAK" # domain <- "D17" # fullDomain <- "D17-California" # year <- "2013" #### OSBS Clip # # the name of the site # site <- "OSBS" # domain <- "D03" # fullDomain <- "D03-Florida" # year <- "2014" ## SOAP Clip # the name of the site site <- "SOAP" domain <- "D17" fullDomain <- "D17-California" year <- "2013" ### SJER clip # site <- "SJER" # domain <- "D17" # fullDomain <- "D17-California" # year <- "2013" #### Identify where the shapefile is located on your hard drive ### NOTE: change these to the polygon that contains your crop extent # define clip file name (this could be a plot) clipFile <- paste0(site, "_crop") # import shapefile clipFilePath <- file.path("neonData", fullDomain, site, "vector_data") ##### Define your hard drive path. Mac's often use "volumes" for an external hard drive ##### Windows often use a letter "D:" etc drivePath <- "Volumes" driveName <- "My Passport" ########## Reflectance data variables based upon the hard drive structure dataProduct <- "Reflectance" dataType <- "Spectrometer" level <- paste0(site,"_L1") productType <- paste0(site,"_", dataType) ### Define the data directory where the h5 files are stored dataDir <- file.path(drivePath, driveName, domain, site, year, level, productType, dataProduct) # you only need this is you are using a mac dataDir <- paste0("/", dataDir) # get a list of all files in the dir # if this variable returns no values, that means your dataDir is wrong OR # the data dir doesn't have any information in it. h5.files <- list.files(dataDir, pattern = '\\.h5$', full.names = TRUE) #### Import shapefile / clipping spatial object #### # note this could be replaced with a spatial polygon rather than importing a shapefile clip.polygon <- readOGR(clipFilePath, clipFile) ####################### Check Extent Function ########################### # this function checks to see if a raster falls within a spatial extent # inputs: # h5.extent: the spatial extent of the h5 file # clipShp: a spatial polygon object checkExtent <- function(h5.extent, clipShp){ # create polygon extent assign CRS to extent h5.extent.sp <- as(h5.extent, "SpatialPolygons") # note this is ASSUMING both the extent and the h5 file are in the same CRS crs(rasterExtPoly) <- crs(clip.polygon) # check to see if the polygons overlap # return a boolean (1= the raster contains pixels within the extent, 0 it doesn't) return(gIntersects(h5.extent.sp, clip.polygon)) } ################ Write Extent Shapefiles Function ######################## # Inputs: # f: a path to an h5 file that you'd like an extent for. # shpDir: path to the output directory where you want to store the data # projf4Str: the proj4 formated string of the CRS that the H5 file is in. # NOTE: proj4 NEEDS to be in the same proj as your h5 file write_shapefile_bound <- function(f, shpDir, proj4Str){ # create shapefileName # output h5.extent <- create_extent(f) # create polygon extent assign CRS to extent h5.extent.sp <- as(h5.extent, "SpatialPolygons") # create data.frame, add the name of the file to the shapefile sp.df <- data.frame(id=basename(f)) sp.obj <- SpatialPolygonsDataFrame(h5.extent.sp, sp.df) # assign CRS crs(sp.obj) <- CRS(proj4Str) # create shapefile output name outName <- gsub(pattern = ".h5", x = basename(f), replacement = "") writeOGR(sp.obj, shpDir, #path to export to outName, driver="ESRI Shapefile", overwrite_layer = TRUE) } ##################### Run Export Polygon Boundary for Each Flightline ############## # export extent polygon for all flightlines proj4Str <- "+proj=utm +zone=11 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs" shpDir <- paste0("exports/", site, "_flightLines") sapply(h5.files, write_shapefile_bound, proj4Str = proj4Str, shpDir = shpDir) #################### End Export Polygon Boundary for Each Flightline ########### ##################### Run Find Flightlines that Intersect With Spatial Extent ############## # # initalize counter and list object # h5.files: a list of h5 files (full paths) to review # clip.polygon: the clipping polygon spatial object that you want to find the h5 files that intersect it find_intersect_h5 <- function(h5.files, clip.polygon){ recordRaster <- NA i <- 0 # the loop below returns a LIST of the files that have overlapping extent for(afile in h5.files){ # get extent of h5 file h5Extent <- create_extent(afile) # turn into polygon extent object h5.poly <- as(h5Extent, "SpatialPolygons") # this is assuming both are in the same CRS! crs(h5.poly) <- crs(clip.polygon) # check to see if the polygons overlap if(gIntersects(h5.poly, clip.polygon)){ i <- i+1 recordRaster[i] <- afile } else { print("not in") } } return(recordRaster) } ## Loop through all of the H5 files and return a list of what's "in" intersect_files <- find_intersect_h5(h5.files, clip.polygon) ################### Next -- see if you can loop through polygons #### # for this to work the shapefile has to have an "Id" field with numbers 1-x soap.plots <- readOGR("exports/SOAP_subsets_poly/", "SOAP_subsets") #clip.file <- soap.plots[soap.plots$Id == 1,] final.files <- vector("list", length(soap.plots)) # This for loop generates a list of flightlines that intersect each polygon in a # shapefile. Note that the spatial object needs an Id field that is numbered 1-n for(i in 1:length(soap.plots)){ clip.file <- soap.plots[soap.plots$Id == i,] final.files[[i]] <- find_intersect_h5(h5.files, clip.file) } first.box <- soap.plots[soap.plots$Id == 1,] first.box summary(soap.plots$Id)
/scripts/neon-create-aop-subset-master/find-aopHSI-flightlines.R
no_license
tresmont/neon_final_project
R
false
false
6,435
r
### # this code takes a shapefile polygon extent and a directory containging # a bunch of NEON H5 files and returns the flightlines that INTERSECT the # polygon boundary ### # library(devtools) # install from github # install_github("lwasser/neon-aop-package/neonAOP") # load libraries library(sp) library(rgeos) library(rhdf5) library(neonAOP) ########## Inputs ##### # shapefile path and name # shape.path <- "NEONdata/D17-California/SJER/vector_data" # clipFile <- "sjer_clip_extent" ########## Inputs ##### ### TEAK Clip #the name of the site # site <- "TEAK" # domain <- "D17" # fullDomain <- "D17-California" # year <- "2013" #### OSBS Clip # # the name of the site # site <- "OSBS" # domain <- "D03" # fullDomain <- "D03-Florida" # year <- "2014" ## SOAP Clip # the name of the site site <- "SOAP" domain <- "D17" fullDomain <- "D17-California" year <- "2013" ### SJER clip # site <- "SJER" # domain <- "D17" # fullDomain <- "D17-California" # year <- "2013" #### Identify where the shapefile is located on your hard drive ### NOTE: change these to the polygon that contains your crop extent # define clip file name (this could be a plot) clipFile <- paste0(site, "_crop") # import shapefile clipFilePath <- file.path("neonData", fullDomain, site, "vector_data") ##### Define your hard drive path. Mac's often use "volumes" for an external hard drive ##### Windows often use a letter "D:" etc drivePath <- "Volumes" driveName <- "My Passport" ########## Reflectance data variables based upon the hard drive structure dataProduct <- "Reflectance" dataType <- "Spectrometer" level <- paste0(site,"_L1") productType <- paste0(site,"_", dataType) ### Define the data directory where the h5 files are stored dataDir <- file.path(drivePath, driveName, domain, site, year, level, productType, dataProduct) # you only need this is you are using a mac dataDir <- paste0("/", dataDir) # get a list of all files in the dir # if this variable returns no values, that means your dataDir is wrong OR # the data dir doesn't have any information in it. h5.files <- list.files(dataDir, pattern = '\\.h5$', full.names = TRUE) #### Import shapefile / clipping spatial object #### # note this could be replaced with a spatial polygon rather than importing a shapefile clip.polygon <- readOGR(clipFilePath, clipFile) ####################### Check Extent Function ########################### # this function checks to see if a raster falls within a spatial extent # inputs: # h5.extent: the spatial extent of the h5 file # clipShp: a spatial polygon object checkExtent <- function(h5.extent, clipShp){ # create polygon extent assign CRS to extent h5.extent.sp <- as(h5.extent, "SpatialPolygons") # note this is ASSUMING both the extent and the h5 file are in the same CRS crs(rasterExtPoly) <- crs(clip.polygon) # check to see if the polygons overlap # return a boolean (1= the raster contains pixels within the extent, 0 it doesn't) return(gIntersects(h5.extent.sp, clip.polygon)) } ################ Write Extent Shapefiles Function ######################## # Inputs: # f: a path to an h5 file that you'd like an extent for. # shpDir: path to the output directory where you want to store the data # projf4Str: the proj4 formated string of the CRS that the H5 file is in. # NOTE: proj4 NEEDS to be in the same proj as your h5 file write_shapefile_bound <- function(f, shpDir, proj4Str){ # create shapefileName # output h5.extent <- create_extent(f) # create polygon extent assign CRS to extent h5.extent.sp <- as(h5.extent, "SpatialPolygons") # create data.frame, add the name of the file to the shapefile sp.df <- data.frame(id=basename(f)) sp.obj <- SpatialPolygonsDataFrame(h5.extent.sp, sp.df) # assign CRS crs(sp.obj) <- CRS(proj4Str) # create shapefile output name outName <- gsub(pattern = ".h5", x = basename(f), replacement = "") writeOGR(sp.obj, shpDir, #path to export to outName, driver="ESRI Shapefile", overwrite_layer = TRUE) } ##################### Run Export Polygon Boundary for Each Flightline ############## # export extent polygon for all flightlines proj4Str <- "+proj=utm +zone=11 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs" shpDir <- paste0("exports/", site, "_flightLines") sapply(h5.files, write_shapefile_bound, proj4Str = proj4Str, shpDir = shpDir) #################### End Export Polygon Boundary for Each Flightline ########### ##################### Run Find Flightlines that Intersect With Spatial Extent ############## # # initalize counter and list object # h5.files: a list of h5 files (full paths) to review # clip.polygon: the clipping polygon spatial object that you want to find the h5 files that intersect it find_intersect_h5 <- function(h5.files, clip.polygon){ recordRaster <- NA i <- 0 # the loop below returns a LIST of the files that have overlapping extent for(afile in h5.files){ # get extent of h5 file h5Extent <- create_extent(afile) # turn into polygon extent object h5.poly <- as(h5Extent, "SpatialPolygons") # this is assuming both are in the same CRS! crs(h5.poly) <- crs(clip.polygon) # check to see if the polygons overlap if(gIntersects(h5.poly, clip.polygon)){ i <- i+1 recordRaster[i] <- afile } else { print("not in") } } return(recordRaster) } ## Loop through all of the H5 files and return a list of what's "in" intersect_files <- find_intersect_h5(h5.files, clip.polygon) ################### Next -- see if you can loop through polygons #### # for this to work the shapefile has to have an "Id" field with numbers 1-x soap.plots <- readOGR("exports/SOAP_subsets_poly/", "SOAP_subsets") #clip.file <- soap.plots[soap.plots$Id == 1,] final.files <- vector("list", length(soap.plots)) # This for loop generates a list of flightlines that intersect each polygon in a # shapefile. Note that the spatial object needs an Id field that is numbered 1-n for(i in 1:length(soap.plots)){ clip.file <- soap.plots[soap.plots$Id == i,] final.files[[i]] <- find_intersect_h5(h5.files, clip.file) } first.box <- soap.plots[soap.plots$Id == 1,] first.box summary(soap.plots$Id)
#' Fit model stack members with non-zero stacking coefficients #' #' @description #' After evaluating a data stack with [blend_predictions()], #' some number of candidates will have nonzero stacking #' coefficients. Such candidates are referred to as "members." #' Since members' predictions will ultimately inform the model #' stack's predictions, members should be trained on the full #' training set using `fit_members()`. #' #' @param model_stack A `model_stack` object outputted by [blend_predictions()]. #' @inheritParams stacks #' @inheritParams blend_predictions #' @return A `model_stack` object with a subclass `linear_stack`---this fitted #' model contains the necessary components to predict on new data. #' #' @details #' To fit members in parallel, please register a parallel backend function. #' See the documentation of [foreach::foreach()] for examples. #' #' @template note_example_data #' #' @examples #' \donttest{ #' # see the "Example Data" section above for #' # clarification on the objects used in these examples! #' #' # put together a data stack #' reg_st <- #' stacks() %>% #' add_candidates(reg_res_lr) %>% #' add_candidates(reg_res_svm) %>% #' add_candidates(reg_res_sp) #' #' reg_st #' #' # evaluate the data stack and fit the member models #' reg_st %>% #' blend_predictions() %>% #' fit_members() #' #' reg_st #' #' # do the same with multinomial classification models #' class_st <- #' stacks() %>% #' add_candidates(class_res_nn) %>% #' add_candidates(class_res_rf) %>% #' blend_predictions() %>% #' fit_members() #' #' class_st #' #' # ...or binomial classification models #' log_st <- #' stacks() %>% #' add_candidates(log_res_nn) %>% #' add_candidates(log_res_rf) %>% #' blend_predictions() %>% #' fit_members() #' #' log_st #' } #' #' @family core verbs #' @export fit_members <- function(model_stack, ...) { check_model_stack(model_stack) dat <- model_stack[["train"]] # pick out which submodels have nonzero coefs member_names <- .get_glmn_coefs( model_stack[["coefs"]][["fit"]], model_stack[["coefs"]][["spec"]][["args"]][["penalty"]] ) %>% dplyr::filter(estimate != 0 & terms != "(Intercept)") %>% dplyr::pull(terms) if (model_stack[["mode"]] == "classification") { member_dict <- sanitize_classification_names(model_stack, member_names) member_names <- member_dict$new %>% unique() } # make model specs with the chosen parameters # for chosen sub-models metrics_dict <- tibble::enframe(model_stack[["model_metrics"]]) %>% tidyr::unnest(cols = value) %>% dplyr::mutate(.config = process_.config(.config, ., name = name)) if (model_stack[["mode"]] == "regression") { members_map <- tibble::enframe(model_stack[["cols_map"]]) %>% tidyr::unnest(cols = value) %>% dplyr::full_join(metrics_dict, by = c("value" = ".config")) } else { members_map <- tibble::enframe(model_stack[["cols_map"]]) %>% tidyr::unnest(cols = value) %>% dplyr::full_join(member_dict, by = c("value" = "old")) %>% dplyr::filter(!is.na(new)) %>% dplyr::select(name, value = new) %>% dplyr::filter(!duplicated(.$value)) %>% dplyr::full_join(metrics_dict, by = c("value" = ".config")) } if (foreach::getDoParWorkers() > 1) { `%do_op%` <- foreach::`%dopar%` } else { `%do_op%` <- foreach::`%do%` } # fit each of them member_fits <- foreach::foreach(mem = member_names, .inorder = FALSE) %do_op% { asNamespace("stacks")$fit_member( name = mem, wflows = model_stack[["model_defs"]], members_map = members_map, train_dat = dat ) } model_stack[["member_fits"]] <- setNames(member_fits, member_names) if (model_stack_constr(model_stack)) {model_stack} } # fit one member of the ensemble fit_member <- function(name, wflows, members_map, train_dat) { member_row <- members_map %>% dplyr::filter(value == name) member_params <- wflows[[member_row$name.x[1]]] %>% dials::parameters() %>% dplyr::pull(id) needs_finalizing <- length(member_params) != 0 if (needs_finalizing) { member_metrics <- members_map %>% dplyr::filter(value == name) %>% dplyr::slice(1) member_wf <- wflows[[member_metrics$name.x]] new_member <- tune::finalize_workflow(member_wf, member_metrics[,member_params]) %>% generics::fit(data = train_dat) } else { member_model <- members_map %>% dplyr::filter(value == name) %>% dplyr::select(name.x) %>% dplyr::pull() new_member <- generics::fit(wflows[[member_model[1]]], data = train_dat) } new_member } # creates a map for column / entry names resulting # from tuning in the classification setting sanitize_classification_names <- function(model_stack, member_names) { outcome_levels <- model_stack[["train"]] %>% dplyr::select(!!.get_outcome(model_stack)) %>% dplyr::pull() %>% as.character() %>% unique() pred_strings <- paste0(".pred_", outcome_levels, "_") new_member_names <- gsub( pattern = paste0(pred_strings, collapse = "|"), x = member_names, replacement = "" ) tibble::tibble( old = member_names, new = new_member_names ) } check_model_stack <- function(model_stack) { if (inherits(model_stack, "model_stack")) { if (!is.null(model_stack[["member_fits"]])) { glue_warn( "The members in the supplied `model_stack` have already been fitted ", "and need not be fitted again." ) } return(invisible(TRUE)) } else if (inherits(model_stack, "data_stack")) { glue_stop( "The supplied `model_stack` argument is a data stack rather than ", "a model stack. Did you forget to first evaluate the ensemble's ", "stacking coefficients with `blend_predictions()`?" ) } else { check_inherits(model_stack, "model_stack") } }
/R/fit_members.R
permissive
YX577/stacks
R
false
false
6,075
r
#' Fit model stack members with non-zero stacking coefficients #' #' @description #' After evaluating a data stack with [blend_predictions()], #' some number of candidates will have nonzero stacking #' coefficients. Such candidates are referred to as "members." #' Since members' predictions will ultimately inform the model #' stack's predictions, members should be trained on the full #' training set using `fit_members()`. #' #' @param model_stack A `model_stack` object outputted by [blend_predictions()]. #' @inheritParams stacks #' @inheritParams blend_predictions #' @return A `model_stack` object with a subclass `linear_stack`---this fitted #' model contains the necessary components to predict on new data. #' #' @details #' To fit members in parallel, please register a parallel backend function. #' See the documentation of [foreach::foreach()] for examples. #' #' @template note_example_data #' #' @examples #' \donttest{ #' # see the "Example Data" section above for #' # clarification on the objects used in these examples! #' #' # put together a data stack #' reg_st <- #' stacks() %>% #' add_candidates(reg_res_lr) %>% #' add_candidates(reg_res_svm) %>% #' add_candidates(reg_res_sp) #' #' reg_st #' #' # evaluate the data stack and fit the member models #' reg_st %>% #' blend_predictions() %>% #' fit_members() #' #' reg_st #' #' # do the same with multinomial classification models #' class_st <- #' stacks() %>% #' add_candidates(class_res_nn) %>% #' add_candidates(class_res_rf) %>% #' blend_predictions() %>% #' fit_members() #' #' class_st #' #' # ...or binomial classification models #' log_st <- #' stacks() %>% #' add_candidates(log_res_nn) %>% #' add_candidates(log_res_rf) %>% #' blend_predictions() %>% #' fit_members() #' #' log_st #' } #' #' @family core verbs #' @export fit_members <- function(model_stack, ...) { check_model_stack(model_stack) dat <- model_stack[["train"]] # pick out which submodels have nonzero coefs member_names <- .get_glmn_coefs( model_stack[["coefs"]][["fit"]], model_stack[["coefs"]][["spec"]][["args"]][["penalty"]] ) %>% dplyr::filter(estimate != 0 & terms != "(Intercept)") %>% dplyr::pull(terms) if (model_stack[["mode"]] == "classification") { member_dict <- sanitize_classification_names(model_stack, member_names) member_names <- member_dict$new %>% unique() } # make model specs with the chosen parameters # for chosen sub-models metrics_dict <- tibble::enframe(model_stack[["model_metrics"]]) %>% tidyr::unnest(cols = value) %>% dplyr::mutate(.config = process_.config(.config, ., name = name)) if (model_stack[["mode"]] == "regression") { members_map <- tibble::enframe(model_stack[["cols_map"]]) %>% tidyr::unnest(cols = value) %>% dplyr::full_join(metrics_dict, by = c("value" = ".config")) } else { members_map <- tibble::enframe(model_stack[["cols_map"]]) %>% tidyr::unnest(cols = value) %>% dplyr::full_join(member_dict, by = c("value" = "old")) %>% dplyr::filter(!is.na(new)) %>% dplyr::select(name, value = new) %>% dplyr::filter(!duplicated(.$value)) %>% dplyr::full_join(metrics_dict, by = c("value" = ".config")) } if (foreach::getDoParWorkers() > 1) { `%do_op%` <- foreach::`%dopar%` } else { `%do_op%` <- foreach::`%do%` } # fit each of them member_fits <- foreach::foreach(mem = member_names, .inorder = FALSE) %do_op% { asNamespace("stacks")$fit_member( name = mem, wflows = model_stack[["model_defs"]], members_map = members_map, train_dat = dat ) } model_stack[["member_fits"]] <- setNames(member_fits, member_names) if (model_stack_constr(model_stack)) {model_stack} } # fit one member of the ensemble fit_member <- function(name, wflows, members_map, train_dat) { member_row <- members_map %>% dplyr::filter(value == name) member_params <- wflows[[member_row$name.x[1]]] %>% dials::parameters() %>% dplyr::pull(id) needs_finalizing <- length(member_params) != 0 if (needs_finalizing) { member_metrics <- members_map %>% dplyr::filter(value == name) %>% dplyr::slice(1) member_wf <- wflows[[member_metrics$name.x]] new_member <- tune::finalize_workflow(member_wf, member_metrics[,member_params]) %>% generics::fit(data = train_dat) } else { member_model <- members_map %>% dplyr::filter(value == name) %>% dplyr::select(name.x) %>% dplyr::pull() new_member <- generics::fit(wflows[[member_model[1]]], data = train_dat) } new_member } # creates a map for column / entry names resulting # from tuning in the classification setting sanitize_classification_names <- function(model_stack, member_names) { outcome_levels <- model_stack[["train"]] %>% dplyr::select(!!.get_outcome(model_stack)) %>% dplyr::pull() %>% as.character() %>% unique() pred_strings <- paste0(".pred_", outcome_levels, "_") new_member_names <- gsub( pattern = paste0(pred_strings, collapse = "|"), x = member_names, replacement = "" ) tibble::tibble( old = member_names, new = new_member_names ) } check_model_stack <- function(model_stack) { if (inherits(model_stack, "model_stack")) { if (!is.null(model_stack[["member_fits"]])) { glue_warn( "The members in the supplied `model_stack` have already been fitted ", "and need not be fitted again." ) } return(invisible(TRUE)) } else if (inherits(model_stack, "data_stack")) { glue_stop( "The supplied `model_stack` argument is a data stack rather than ", "a model stack. Did you forget to first evaluate the ensemble's ", "stacking coefficients with `blend_predictions()`?" ) } else { check_inherits(model_stack, "model_stack") } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{segList_ensemblGeneID} \alias{segList_ensemblGeneID} \title{Stably expressed gene list in EnsemblGeneID for both human and mouse} \format{ An object of class \code{list} of length 2. } \usage{ data(segList_ensemblGeneID, package = 'scMerge') } \description{ A list includes the stably expressed genes for both human and mouse } \keyword{datasets}
/man/segList_ensemblGeneID.Rd
no_license
SydneyBioX/scMerge
R
false
true
454
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{segList_ensemblGeneID} \alias{segList_ensemblGeneID} \title{Stably expressed gene list in EnsemblGeneID for both human and mouse} \format{ An object of class \code{list} of length 2. } \usage{ data(segList_ensemblGeneID, package = 'scMerge') } \description{ A list includes the stably expressed genes for both human and mouse } \keyword{datasets}
#' predict_prepare #' #' Prepares design matrices and other inputs needed for predictive methods #' #' @param x_mat Original data, i.e. row for each unit. #' @param index_list List of indices to use in the matching: #' \describe{ #' \item{\code{treat_train}}{Index of treated units for the training matrix} #' \item{\code{control_train}}{Index of control units for the training matrix} #' \item{\code{treat_test}}{Index of treated units for the test matrix} #' \item{\code{control_test}}{Index of control units for the test matrix} #' } #' @param design The design matrix form to use. #' @return list: #' \describe{ #' \item{\code{x_train}}{Design matrix for training} #' \item{\code{x_test}}{Design matrix for testing} #' \item{\code{y_train}}{Training outcome vector} #' \item{\code{y_test}}{Test outcome vector} #' } #' #' @export predict_prepare <- function(x_mat, index_list, design = c( "cross_all", "cross_random", "differences_random", "differences_plain" )) { design <- match.arg(design) stopifnot(length(index_list[["treat_train"]]) == length(index_list[["control_train"]]) && length(index_list[["treat_test"]]) == length(index_list[["control_test"]])) ## ------------------------------------ treat_train_ind <- index_list[["treat_train"]] control_train_ind <- index_list[["control_train"]] treat_test_ind <- index_list[["treat_test"]] control_test_ind <- index_list[["control_test"]] ## ------------------------------------ x_treat_train <- x_mat[treat_train_ind, , drop = FALSE] x_control_train <- x_mat[control_train_ind, , drop = FALSE] x_treat_test <- x_mat[treat_test_ind, , drop = FALSE] x_control_test <- x_mat[control_test_ind, , drop = FALSE] train_index <- seq_len(length(treat_train_ind)) test_index <- seq_len(length(treat_test_ind)) ## y_train_full <- ## ------------------------------------ if (design %in% c("cross_random", "differences_random")) { ## basically exactly half in each treat_left_train <- train_index %in% sample(length(treat_train_ind), size = length(treat_train_ind) / 2L) treat_left_test <- test_index %in% sample(length(treat_test_ind), size = length(treat_test_ind) / 2L) } else { ## cross_all has all, differences_random just has left treat_left_train <- rep(TRUE, times = length(treat_train_ind)) treat_left_test <- rep(TRUE, times = length(treat_test_ind)) } if (design == "cross_all") { treat_right_train <- rep(TRUE, times = length(treat_train_ind)) treat_right_test <- rep(FALSE, times = length(treat_test_ind)) train_order <- seq_len(length(treat_train_ind) * 2L) test_order <- seq_len(length(treat_test_ind)) y_train <- rep(c(1L, 0L), each = length(treat_train_ind) ) y_test <- rep(1L, times = length(treat_test_ind)) } else { treat_right_train <- !treat_left_train treat_right_test <- !treat_left_test train_order <- order(c( train_index[treat_left_train], train_index[treat_right_train] )) test_order <- order(c( test_index[treat_left_test], test_index[treat_right_test] )) y_train <- treat_left_train * 1L y_test <- treat_left_test * 1L } if (design %in% c("cross_all", "cross_random")) { ## these will "cross" the rows, and use all gen_design_mat <- function(x, y) { cbind(x, y) } } else { ## differences regime gen_design_mat <- function(x, y) { x - y } } ## ------------------------------------ x_tc_train <- gen_design_mat( x_treat_train[treat_left_train, , drop = FALSE], x_control_train[treat_left_train, , drop = FALSE] ) x_ct_train <- gen_design_mat( x_control_train[treat_right_train, , drop = FALSE], x_treat_train[treat_right_train, , drop = FALSE] ) x_tc_test <- gen_design_mat( x_treat_test[treat_left_test, , drop = FALSE], x_control_test[treat_left_test, , drop = FALSE] ) x_ct_test <- gen_design_mat( x_control_test[treat_right_test, , drop = FALSE], x_treat_test[treat_right_test, , drop = FALSE] ) ## combining to form full input list( x_train = rbind(x_tc_train, x_ct_train)[train_order, , drop = FALSE], x_test = rbind(x_tc_test, x_ct_test)[test_order, , drop = FALSE], y_train = y_train, y_test = y_test ) }
/R/predict_prepare.R
permissive
rzgross/uRbanmatching
R
false
false
4,590
r
#' predict_prepare #' #' Prepares design matrices and other inputs needed for predictive methods #' #' @param x_mat Original data, i.e. row for each unit. #' @param index_list List of indices to use in the matching: #' \describe{ #' \item{\code{treat_train}}{Index of treated units for the training matrix} #' \item{\code{control_train}}{Index of control units for the training matrix} #' \item{\code{treat_test}}{Index of treated units for the test matrix} #' \item{\code{control_test}}{Index of control units for the test matrix} #' } #' @param design The design matrix form to use. #' @return list: #' \describe{ #' \item{\code{x_train}}{Design matrix for training} #' \item{\code{x_test}}{Design matrix for testing} #' \item{\code{y_train}}{Training outcome vector} #' \item{\code{y_test}}{Test outcome vector} #' } #' #' @export predict_prepare <- function(x_mat, index_list, design = c( "cross_all", "cross_random", "differences_random", "differences_plain" )) { design <- match.arg(design) stopifnot(length(index_list[["treat_train"]]) == length(index_list[["control_train"]]) && length(index_list[["treat_test"]]) == length(index_list[["control_test"]])) ## ------------------------------------ treat_train_ind <- index_list[["treat_train"]] control_train_ind <- index_list[["control_train"]] treat_test_ind <- index_list[["treat_test"]] control_test_ind <- index_list[["control_test"]] ## ------------------------------------ x_treat_train <- x_mat[treat_train_ind, , drop = FALSE] x_control_train <- x_mat[control_train_ind, , drop = FALSE] x_treat_test <- x_mat[treat_test_ind, , drop = FALSE] x_control_test <- x_mat[control_test_ind, , drop = FALSE] train_index <- seq_len(length(treat_train_ind)) test_index <- seq_len(length(treat_test_ind)) ## y_train_full <- ## ------------------------------------ if (design %in% c("cross_random", "differences_random")) { ## basically exactly half in each treat_left_train <- train_index %in% sample(length(treat_train_ind), size = length(treat_train_ind) / 2L) treat_left_test <- test_index %in% sample(length(treat_test_ind), size = length(treat_test_ind) / 2L) } else { ## cross_all has all, differences_random just has left treat_left_train <- rep(TRUE, times = length(treat_train_ind)) treat_left_test <- rep(TRUE, times = length(treat_test_ind)) } if (design == "cross_all") { treat_right_train <- rep(TRUE, times = length(treat_train_ind)) treat_right_test <- rep(FALSE, times = length(treat_test_ind)) train_order <- seq_len(length(treat_train_ind) * 2L) test_order <- seq_len(length(treat_test_ind)) y_train <- rep(c(1L, 0L), each = length(treat_train_ind) ) y_test <- rep(1L, times = length(treat_test_ind)) } else { treat_right_train <- !treat_left_train treat_right_test <- !treat_left_test train_order <- order(c( train_index[treat_left_train], train_index[treat_right_train] )) test_order <- order(c( test_index[treat_left_test], test_index[treat_right_test] )) y_train <- treat_left_train * 1L y_test <- treat_left_test * 1L } if (design %in% c("cross_all", "cross_random")) { ## these will "cross" the rows, and use all gen_design_mat <- function(x, y) { cbind(x, y) } } else { ## differences regime gen_design_mat <- function(x, y) { x - y } } ## ------------------------------------ x_tc_train <- gen_design_mat( x_treat_train[treat_left_train, , drop = FALSE], x_control_train[treat_left_train, , drop = FALSE] ) x_ct_train <- gen_design_mat( x_control_train[treat_right_train, , drop = FALSE], x_treat_train[treat_right_train, , drop = FALSE] ) x_tc_test <- gen_design_mat( x_treat_test[treat_left_test, , drop = FALSE], x_control_test[treat_left_test, , drop = FALSE] ) x_ct_test <- gen_design_mat( x_control_test[treat_right_test, , drop = FALSE], x_treat_test[treat_right_test, , drop = FALSE] ) ## combining to form full input list( x_train = rbind(x_tc_train, x_ct_train)[train_order, , drop = FALSE], x_test = rbind(x_tc_test, x_ct_test)[test_order, , drop = FALSE], y_train = y_train, y_test = y_test ) }
# File: 05_bismark_array_job.R # Auth: umar.niazi@kcl.as.uk # DESC: create a parameter file and shell script to run array job on hpc # Date: 16/08/2017 ## set variables and source libraries source('header.R') ## connect to mysql database to get sample information library('RMySQL') ##### connect to mysql database to get samples db = dbConnect(MySQL(), user='rstudio', password='12345', dbname='Projects', host='127.0.0.1') dbListTables(db) # check how many files each sample has g_did q = paste0('select count(File.idSample) as files, Sample.idData, Sample.title, Sample.id as SampleID from File, Sample where (Sample.idData = 14 and File.idSample = Sample.id) group by File.idSample') dfQuery = dbGetQuery(db, q) dfQuery$title = gsub(" ", "", dfQuery$title, fixed = T) dfQuery # get the count of files q = paste0('select File.*, Sample.idData from File, Sample where (Sample.idData = 14) and (File.idSample = Sample.id) ') dfCounts = dbGetQuery(db, q) dfCounts$name = gsub(" ", "", dfCounts$name, fixed = T) head(dfCounts) nrow(dfCounts) dfCounts # for each sample id, get the corresponding files cvQueries = paste0('select File.*, Sample.title from File, Sample where (Sample.idData = 14 and Sample.id =', dfQuery$SampleID, ') and (File.idSample = Sample.id) ') # set header variables cvShell = '#!/bin/bash' cvShell.2 = '#$ -S /bin/bash' cvProcessors = '#$ -pe smp 8' cvWorkingDir = '#$ -cwd' cvJobName = '#$ -N bismark-array' cvStdout = '#$ -j y' cvMemoryReserve = '#$ -l h_vmem=19G' cvArrayJob = paste0('#$ -t 1-', nrow(dfCounts)/2) # using high memory queue with one slot and 19 Gigs of memory # set the directory names cvInput = 'input/' cvOutput = 'output/Aligned/' cvTempDir = 'output/Aligned/temp/' cvBismark = '/users/k1625253/brc_scratch/Programs/bismark_v0.18.1/' cvBowtie2 = '/opt/apps/bioinformatics/bowtie2/2.2.5/bowtie2' cvGeneIndex = '/users/k1625253/brc_scratch/Data/MetaData/GenomeIndex/mm10_bismark/' # create a parameter file and shell script dir.create('AutoScripts') oFile.param = file('AutoScripts/bismark_param.txt', 'wt') temp = sapply(cvQueries, function(x){ # get the file names dfFiles = dbGetQuery(db, x) # check for null return if (nrow(dfFiles) == 0) return(); # remove white space from title dfFiles$title = gsub(" ", "", dfFiles$title, fixed=T) # split the file names into paired end 1 and 2, identified by R1 and R2 in the file name f = dfFiles$name d = grepl('_R1_', f) d = as.character(d) d[d == 'TRUE'] = 'R1' d[d == 'FALSE'] = 'R2' lf = split(f, d) # write parameter file in.r1 = paste0(cvInput, 'trim_', lf[[1]]) in.r2 = paste0(cvInput, 'trim_', lf[[2]]) p1 = paste(in.r1, in.r2, sep=' ') writeLines(p1, oFile.param) }) close(oFile.param) oFile = file('AutoScripts/bismark.sh', 'wt') writeLines(c('# Autogenerated script from 05_bismark_array_job.R', paste('# date', date())), oFile) writeLines(c('# make sure directory paths exist before running script'), oFile) writeLines(c(cvShell, cvShell.2, cvProcessors, cvWorkingDir, cvJobName, cvStdout, cvMemoryReserve, cvArrayJob), oFile) writeLines('\n\n', oFile) # module load writeLines(c('module load bioinformatics/bowtie2/2.2.5'), oFile) writeLines(c('module load bioinformatics/samtools/1.3.1'), oFile) writeLines('\n\n', oFile) ## write array job lines writeLines("# Parse parameter file to get variables. number=$SGE_TASK_ID paramfile=bismark_param.txt inr1=`sed -n ${number}p $paramfile | awk '{print $1}'` inr2=`sed -n ${number}p $paramfile | awk '{print $2}'` # 9. Run the program.", oFile) cvProg = paste(cvBismark, 'bismark', sep='') p1 = paste(cvProg, cvGeneIndex, '-1', '$inr1', '-2', '$inr2', '-p 4', '--multicore 8', '-o', cvOutput, '--temp_dir', cvTempDir, sep=' ') com = paste(p1) writeLines(com, oFile) writeLines('\n\n', oFile) close(oFile) dbDisconnect(db)
/S107/05_bismark_array_job.R
permissive
uhkniazi/BRC_NeuralTube_Miho
R
false
false
3,867
r
# File: 05_bismark_array_job.R # Auth: umar.niazi@kcl.as.uk # DESC: create a parameter file and shell script to run array job on hpc # Date: 16/08/2017 ## set variables and source libraries source('header.R') ## connect to mysql database to get sample information library('RMySQL') ##### connect to mysql database to get samples db = dbConnect(MySQL(), user='rstudio', password='12345', dbname='Projects', host='127.0.0.1') dbListTables(db) # check how many files each sample has g_did q = paste0('select count(File.idSample) as files, Sample.idData, Sample.title, Sample.id as SampleID from File, Sample where (Sample.idData = 14 and File.idSample = Sample.id) group by File.idSample') dfQuery = dbGetQuery(db, q) dfQuery$title = gsub(" ", "", dfQuery$title, fixed = T) dfQuery # get the count of files q = paste0('select File.*, Sample.idData from File, Sample where (Sample.idData = 14) and (File.idSample = Sample.id) ') dfCounts = dbGetQuery(db, q) dfCounts$name = gsub(" ", "", dfCounts$name, fixed = T) head(dfCounts) nrow(dfCounts) dfCounts # for each sample id, get the corresponding files cvQueries = paste0('select File.*, Sample.title from File, Sample where (Sample.idData = 14 and Sample.id =', dfQuery$SampleID, ') and (File.idSample = Sample.id) ') # set header variables cvShell = '#!/bin/bash' cvShell.2 = '#$ -S /bin/bash' cvProcessors = '#$ -pe smp 8' cvWorkingDir = '#$ -cwd' cvJobName = '#$ -N bismark-array' cvStdout = '#$ -j y' cvMemoryReserve = '#$ -l h_vmem=19G' cvArrayJob = paste0('#$ -t 1-', nrow(dfCounts)/2) # using high memory queue with one slot and 19 Gigs of memory # set the directory names cvInput = 'input/' cvOutput = 'output/Aligned/' cvTempDir = 'output/Aligned/temp/' cvBismark = '/users/k1625253/brc_scratch/Programs/bismark_v0.18.1/' cvBowtie2 = '/opt/apps/bioinformatics/bowtie2/2.2.5/bowtie2' cvGeneIndex = '/users/k1625253/brc_scratch/Data/MetaData/GenomeIndex/mm10_bismark/' # create a parameter file and shell script dir.create('AutoScripts') oFile.param = file('AutoScripts/bismark_param.txt', 'wt') temp = sapply(cvQueries, function(x){ # get the file names dfFiles = dbGetQuery(db, x) # check for null return if (nrow(dfFiles) == 0) return(); # remove white space from title dfFiles$title = gsub(" ", "", dfFiles$title, fixed=T) # split the file names into paired end 1 and 2, identified by R1 and R2 in the file name f = dfFiles$name d = grepl('_R1_', f) d = as.character(d) d[d == 'TRUE'] = 'R1' d[d == 'FALSE'] = 'R2' lf = split(f, d) # write parameter file in.r1 = paste0(cvInput, 'trim_', lf[[1]]) in.r2 = paste0(cvInput, 'trim_', lf[[2]]) p1 = paste(in.r1, in.r2, sep=' ') writeLines(p1, oFile.param) }) close(oFile.param) oFile = file('AutoScripts/bismark.sh', 'wt') writeLines(c('# Autogenerated script from 05_bismark_array_job.R', paste('# date', date())), oFile) writeLines(c('# make sure directory paths exist before running script'), oFile) writeLines(c(cvShell, cvShell.2, cvProcessors, cvWorkingDir, cvJobName, cvStdout, cvMemoryReserve, cvArrayJob), oFile) writeLines('\n\n', oFile) # module load writeLines(c('module load bioinformatics/bowtie2/2.2.5'), oFile) writeLines(c('module load bioinformatics/samtools/1.3.1'), oFile) writeLines('\n\n', oFile) ## write array job lines writeLines("# Parse parameter file to get variables. number=$SGE_TASK_ID paramfile=bismark_param.txt inr1=`sed -n ${number}p $paramfile | awk '{print $1}'` inr2=`sed -n ${number}p $paramfile | awk '{print $2}'` # 9. Run the program.", oFile) cvProg = paste(cvBismark, 'bismark', sep='') p1 = paste(cvProg, cvGeneIndex, '-1', '$inr1', '-2', '$inr2', '-p 4', '--multicore 8', '-o', cvOutput, '--temp_dir', cvTempDir, sep=' ') com = paste(p1) writeLines(com, oFile) writeLines('\n\n', oFile) close(oFile) dbDisconnect(db)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/read_mothur2phyloseq.R \name{read_mothur2phyloseq} \alias{read_mothur2phyloseq} \title{Read Mothur Output into a Phyloseq Object} \usage{ read_mothur2phyloseq(shared.file, consensus.taxonomy.file, mapping.file = NULL) } \arguments{ \item{shared.file}{A \href{http://www.mothur.org/wiki/Shared_file}{shared file} produced by \emph{mothur}. Identified from the .shared extension} \item{consensus.taxonomy.file}{Consensus taxonomy file produced by \emph{mothur}. Identified from with the .taxonomy extension. See \url{http://www.mothur.org/wiki/ConTaxonomy_file}.} \item{mapping.file}{Metadata/mapping file with .csv extension} } \value{ \code{\link{phyloseq-class}} object. } \description{ Read mothur shared and consensus taxonomy files into a \code{\link{phyloseq-class}} object. } \details{ Mothur shared and consensus taxonomy files will be converted to \code{\link{phyloseq-class}}. } \examples{ \dontrun{ otu.file <- system.file( "extdata/Baxter_FITs_Microbiome_2016_fit.final.tx.1.subsample.shared", package='microbiome') tax.file <- system.file( "extdata/Baxter_FITs_Microbiome_2016_fit.final.tx.1.cons.taxonomy", package='microbiome') meta.file <- system.file( "extdata/Baxter_FITs_Microbiome_2016_mapping.csv", package='microbiome') p0 <- read_mothur2phyloseq( shared.file=otu.file, consensus.taxonomy.file=tax.file, mapping.file=meta.file) } } \author{ Sudarshan A. Shetty \email{sudarshanshetty9@gmail.com} } \keyword{utilities}
/man/read_mothur2phyloseq.Rd
no_license
satopan/microbiome
R
false
true
1,552
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/read_mothur2phyloseq.R \name{read_mothur2phyloseq} \alias{read_mothur2phyloseq} \title{Read Mothur Output into a Phyloseq Object} \usage{ read_mothur2phyloseq(shared.file, consensus.taxonomy.file, mapping.file = NULL) } \arguments{ \item{shared.file}{A \href{http://www.mothur.org/wiki/Shared_file}{shared file} produced by \emph{mothur}. Identified from the .shared extension} \item{consensus.taxonomy.file}{Consensus taxonomy file produced by \emph{mothur}. Identified from with the .taxonomy extension. See \url{http://www.mothur.org/wiki/ConTaxonomy_file}.} \item{mapping.file}{Metadata/mapping file with .csv extension} } \value{ \code{\link{phyloseq-class}} object. } \description{ Read mothur shared and consensus taxonomy files into a \code{\link{phyloseq-class}} object. } \details{ Mothur shared and consensus taxonomy files will be converted to \code{\link{phyloseq-class}}. } \examples{ \dontrun{ otu.file <- system.file( "extdata/Baxter_FITs_Microbiome_2016_fit.final.tx.1.subsample.shared", package='microbiome') tax.file <- system.file( "extdata/Baxter_FITs_Microbiome_2016_fit.final.tx.1.cons.taxonomy", package='microbiome') meta.file <- system.file( "extdata/Baxter_FITs_Microbiome_2016_mapping.csv", package='microbiome') p0 <- read_mothur2phyloseq( shared.file=otu.file, consensus.taxonomy.file=tax.file, mapping.file=meta.file) } } \author{ Sudarshan A. Shetty \email{sudarshanshetty9@gmail.com} } \keyword{utilities}
monty <- function() { doors <- 1:3 # randomly pick where the car is cardoor <- sample(doors,1) # prompt player print("Monty Hall says `Pick a door, any door!'") # receive the player's choice of door (should be 1,2, or 3) chosen <- scan(what = integer(), nlines = 1, quiet = TRUE) # pick Monty's door (can't be the player's door or the car door) if (chosen != cardoor) montydoor <- doors[-c(chosen, cardoor)] else montydoor <- sample(doors[-chosen],1) # find out whether the player wants to switch doors print(paste("Monty opens door ", montydoor, "!", sep="")) print("Would you like to switch (y/n)?") reply <- scan(what = character(), nlines = 1, quiet = TRUE) # interpret what player wrote as "yes" if it starts with "y" if (substr(reply,1,1) == "y") chosen <- doors[-c(chosen,montydoor)] # announce the result of the game! if (chosen == cardoor) print("You won!") else print("You lost!") } monty <- function() { doors <- 1:3 # randomly pick where the car is cardoor <- sample(doors,1) # prompt player print("Monty Hall says `Pick a door, any door!'") # receive the player's choice of door (should be 1,2, or 3) chosen <- scan(what = integer(), nlines = 1, quiet = TRUE) # pick Monty's door (can't be the player's door or the car door) if (chosen != cardoor) montydoor <- doors[-c(chosen, cardoor)] else montydoor <- sample(doors[-chosen],1) # find out whether the player wants to switch doors print(paste("Monty opens door ", montydoor, "!", sep="")) print("Would you like to switch (y/n)?") reply <- scan(what = character(), nlines = 1, quiet = TRUE) # interpret what player wrote as "yes" if it starts with "y" if (substr(reply,1,1) == "y") chosen <- doors[-c(chosen,montydoor)] # announce the result of the game! if (chosen == cardoor) print("You won!") else print("You lost!") } monty.auto <- function(switch.door) { doors <- 1:3 # randomly pick where the car is cardoor <- sample(doors,1) # prompt player # print("Monty Hall says `Pick a door, any door!'") # receive the player's choice of door (should be 1,2, or 3) chosen <-sample(doors,1) # pick Monty's door (can't be the player's door or the car door) if (chosen != cardoor) montydoor <- doors[-c(chosen, cardoor)] else montydoor <- sample(doors[-chosen],1) # find out whether the player wants to switch doors # print(paste("Monty opens door ", montydoor, "!", sep="")) # print("Would you like to switch (y/n)?") # reply <- scan(what = character(), nlines = 1, quiet = TRUE) # interpret what player wrote as "yes" if it starts with "y" if (switch.door == "y") chosen <- doors[-c(chosen,montydoor)] # announce the result of the game! if (chosen == cardoor) return (1) else return(0) }
/monty.R
no_license
janice-mccarthy/Bios701-2015
R
false
false
2,889
r
monty <- function() { doors <- 1:3 # randomly pick where the car is cardoor <- sample(doors,1) # prompt player print("Monty Hall says `Pick a door, any door!'") # receive the player's choice of door (should be 1,2, or 3) chosen <- scan(what = integer(), nlines = 1, quiet = TRUE) # pick Monty's door (can't be the player's door or the car door) if (chosen != cardoor) montydoor <- doors[-c(chosen, cardoor)] else montydoor <- sample(doors[-chosen],1) # find out whether the player wants to switch doors print(paste("Monty opens door ", montydoor, "!", sep="")) print("Would you like to switch (y/n)?") reply <- scan(what = character(), nlines = 1, quiet = TRUE) # interpret what player wrote as "yes" if it starts with "y" if (substr(reply,1,1) == "y") chosen <- doors[-c(chosen,montydoor)] # announce the result of the game! if (chosen == cardoor) print("You won!") else print("You lost!") } monty <- function() { doors <- 1:3 # randomly pick where the car is cardoor <- sample(doors,1) # prompt player print("Monty Hall says `Pick a door, any door!'") # receive the player's choice of door (should be 1,2, or 3) chosen <- scan(what = integer(), nlines = 1, quiet = TRUE) # pick Monty's door (can't be the player's door or the car door) if (chosen != cardoor) montydoor <- doors[-c(chosen, cardoor)] else montydoor <- sample(doors[-chosen],1) # find out whether the player wants to switch doors print(paste("Monty opens door ", montydoor, "!", sep="")) print("Would you like to switch (y/n)?") reply <- scan(what = character(), nlines = 1, quiet = TRUE) # interpret what player wrote as "yes" if it starts with "y" if (substr(reply,1,1) == "y") chosen <- doors[-c(chosen,montydoor)] # announce the result of the game! if (chosen == cardoor) print("You won!") else print("You lost!") } monty.auto <- function(switch.door) { doors <- 1:3 # randomly pick where the car is cardoor <- sample(doors,1) # prompt player # print("Monty Hall says `Pick a door, any door!'") # receive the player's choice of door (should be 1,2, or 3) chosen <-sample(doors,1) # pick Monty's door (can't be the player's door or the car door) if (chosen != cardoor) montydoor <- doors[-c(chosen, cardoor)] else montydoor <- sample(doors[-chosen],1) # find out whether the player wants to switch doors # print(paste("Monty opens door ", montydoor, "!", sep="")) # print("Would you like to switch (y/n)?") # reply <- scan(what = character(), nlines = 1, quiet = TRUE) # interpret what player wrote as "yes" if it starts with "y" if (switch.door == "y") chosen <- doors[-c(chosen,montydoor)] # announce the result of the game! if (chosen == cardoor) return (1) else return(0) }
# 6/10/20 # Compares batch vs unbatch for omni or reg metaphlan library(tidyverse) data_dir <- "Data/Tidy" graph_dir <- "Graphs/Metaphlan/Omni_Batched_vs_Unbatched_Scaled" if (!dir.exists(graph_dir)) dir.create(graph_dir) summarize <- dplyr::summarize ds1 <- "metaphlan" load(file.path(data_dir, paste("Tidy_Scaled_", ds1, ".RData", sep = ""))) metaphlan_df <- met_taxa_df batch_unbatch_comp <- function(met_lev, met_reg, met_lab, sing_lab, N, comp_N, coll_type) { graph_dir2 <- file.path(graph_dir, paste("Top", N, met_lab, sep = "_")) if (!dir.exists(graph_dir2)) dir.create(graph_dir2) desc1 <- paste("Batched Corrected vs Uncorrected, Top", comp_N, met_lab, "By Correction Difference", sep = " ") desc2 <- paste("Batched Corrected vs Uncorrected, Top", N, met_lab, "By Relative Abundance", sep = " ") met_phyla_df <- metaphlan_df %>% filter(omni_comparison == "comparison" & level == met_lev & collection_type %in% coll_type) %>% mutate(taxa = str_replace(taxa, met_reg, "")) met_phyla_df$batched <- "unbatched" met_phyla_df$batched[which(met_phyla_df$batch)] <- "batched" phy <- met_phyla_df %>% group_by(taxa) %>% summarize(mean_val = mean(val)) %>% arrange(desc(mean_val)) top_N_phy <- phy$taxa[1:N] # plot most diff phyla, batch vs unbatch comp_phyla <- met_phyla_df %>% group_by(taxa, batched) %>% summarize(mean_val = mean(val)) %>% spread(batched, mean_val) %>% mutate(batch_diff = batched - unbatched, batch_diff_abs = abs(batch_diff)) %>% arrange(desc(batch_diff_abs)) diff_N_phy <- comp_phyla$taxa[1:comp_N] comp_phyla <- comp_phyla %>% filter(taxa %in% diff_N_phy) pdf(file.path(graph_dir2, paste(met_lab, "By_Batch_Diff.pdf", sep = "_")), width = 18, height = 12) gg <- comp_phyla %>% ggplot(aes(x = reorder(taxa, -batch_diff), y = batch_diff)) + geom_bar(position = "stack", stat = "identity") + xlab(sing_lab) + ylab("Relative Abundance Diff (batched - unbatched)") + theme(axis.text.x = element_text(size = 10, angle = 90, hjust = 1)) + labs(title = desc1) plot(gg) dev.off() # plot top phyla met_phyla_df2 <- met_phyla_df %>% filter(taxa %in% top_N_phy) %>% mutate(study_id = paste(study_id, batched, sep = "_")) %>% group_by(taxa, study_id) %>% summarize(mean_val = mean(val)) %>% arrange(mean_val) # View(met_phyla_df) pdf(file.path(graph_dir2, paste("Top", N, met_lab, "Stack.pdf", sep = "_")), width = 18, height = 12) gg <- met_phyla_df2 %>% ggplot(aes(x = study_id, y = mean_val, fill = taxa)) + geom_bar(position = "stack", stat = "identity") + xlab("Sample") + ylab("Relative Abundance") + theme(axis.text.x = element_text(size = 10, angle = 90, hjust = 1)) + guides(fill = guide_legend(title = sing_lab)) + labs(title = desc2) plot(gg) dev.off() met_phyla_df2 <- met_phyla_df %>% filter(taxa %in% top_N_phy) %>% group_by(batched, taxa) %>% summarize(mean_val = mean(val)) %>% arrange(mean_val) pdf(file.path(graph_dir2, paste(met_lab, "Aggregated.pdf", sep = "_")), width = 18, height = 12) gg <- met_phyla_df2 %>% ggplot(aes(x = batched, y = mean_val, fill = taxa)) + geom_bar(position = "stack", stat = "identity") + xlab("Collection Type") + ylab("Relative Abundance") + theme(axis.text.x = element_text(size = 10, angle = 90, hjust = 1)) + guides(fill = guide_legend(title = sing_lab)) + labs(title = paste(desc2, ", Aggregated", sep = "")) plot(gg) dev.off() } ### comparing for metaphlan, phyla batch_unbatch_comp(met_lev = "phylum", met_reg = ".*p__", met_lab = "Phyla", sing_lab = "Phylum", N = 5, comp_N = 20, coll_type = "omni") ### comparing for metaphlan, genera batch_unbatch_comp(met_lev = "genus", met_reg = ".*g__", met_lab = "Genera", sing_lab = "Genus", N = 20, comp_N = 20, coll_type = "omni") ### now, same but with regular graph_dir <- "Graphs/Metaphlan/Regular_Batched_vs_Unbatched_Scaled" if (!dir.exists(graph_dir)) dir.create(graph_dir) ### comparing for metaphlan, phyla batch_unbatch_comp(met_lev = "phylum", met_reg = ".*p__", met_lab = "Phyla", sing_lab = "Phylum", N = 5, comp_N = 20, coll_type = "regular") ### comparing for metaphlan, genera batch_unbatch_comp(met_lev = "genus", met_reg = ".*g__", met_lab = "Genera", sing_lab = "Genus", N = 20, comp_N = 20, coll_type = "regular") ### now, same but with both graph_dir <- "Graphs/Metaphlan/Comparison_Batched_vs_Unbatched_Scaled" if (!dir.exists(graph_dir)) dir.create(graph_dir) ### comparing for metaphlan, phyla batch_unbatch_comp(met_lev = "phylum", met_reg = ".*p__", met_lab = "Phyla", sing_lab = "Phylum", N = 5, comp_N = 20, coll_type = c("omni", "regular")) ### comparing for metaphlan, genera batch_unbatch_comp(met_lev = "genus", met_reg = ".*g__", met_lab = "Genera", sing_lab = "Genus", N = 20, comp_N = 20, coll_type = c("omni", "regular"))
/Batch_vs_Unbatch_Metaphlan.R
no_license
michaelwilliamtang/ListenToYourHeart
R
false
false
5,133
r
# 6/10/20 # Compares batch vs unbatch for omni or reg metaphlan library(tidyverse) data_dir <- "Data/Tidy" graph_dir <- "Graphs/Metaphlan/Omni_Batched_vs_Unbatched_Scaled" if (!dir.exists(graph_dir)) dir.create(graph_dir) summarize <- dplyr::summarize ds1 <- "metaphlan" load(file.path(data_dir, paste("Tidy_Scaled_", ds1, ".RData", sep = ""))) metaphlan_df <- met_taxa_df batch_unbatch_comp <- function(met_lev, met_reg, met_lab, sing_lab, N, comp_N, coll_type) { graph_dir2 <- file.path(graph_dir, paste("Top", N, met_lab, sep = "_")) if (!dir.exists(graph_dir2)) dir.create(graph_dir2) desc1 <- paste("Batched Corrected vs Uncorrected, Top", comp_N, met_lab, "By Correction Difference", sep = " ") desc2 <- paste("Batched Corrected vs Uncorrected, Top", N, met_lab, "By Relative Abundance", sep = " ") met_phyla_df <- metaphlan_df %>% filter(omni_comparison == "comparison" & level == met_lev & collection_type %in% coll_type) %>% mutate(taxa = str_replace(taxa, met_reg, "")) met_phyla_df$batched <- "unbatched" met_phyla_df$batched[which(met_phyla_df$batch)] <- "batched" phy <- met_phyla_df %>% group_by(taxa) %>% summarize(mean_val = mean(val)) %>% arrange(desc(mean_val)) top_N_phy <- phy$taxa[1:N] # plot most diff phyla, batch vs unbatch comp_phyla <- met_phyla_df %>% group_by(taxa, batched) %>% summarize(mean_val = mean(val)) %>% spread(batched, mean_val) %>% mutate(batch_diff = batched - unbatched, batch_diff_abs = abs(batch_diff)) %>% arrange(desc(batch_diff_abs)) diff_N_phy <- comp_phyla$taxa[1:comp_N] comp_phyla <- comp_phyla %>% filter(taxa %in% diff_N_phy) pdf(file.path(graph_dir2, paste(met_lab, "By_Batch_Diff.pdf", sep = "_")), width = 18, height = 12) gg <- comp_phyla %>% ggplot(aes(x = reorder(taxa, -batch_diff), y = batch_diff)) + geom_bar(position = "stack", stat = "identity") + xlab(sing_lab) + ylab("Relative Abundance Diff (batched - unbatched)") + theme(axis.text.x = element_text(size = 10, angle = 90, hjust = 1)) + labs(title = desc1) plot(gg) dev.off() # plot top phyla met_phyla_df2 <- met_phyla_df %>% filter(taxa %in% top_N_phy) %>% mutate(study_id = paste(study_id, batched, sep = "_")) %>% group_by(taxa, study_id) %>% summarize(mean_val = mean(val)) %>% arrange(mean_val) # View(met_phyla_df) pdf(file.path(graph_dir2, paste("Top", N, met_lab, "Stack.pdf", sep = "_")), width = 18, height = 12) gg <- met_phyla_df2 %>% ggplot(aes(x = study_id, y = mean_val, fill = taxa)) + geom_bar(position = "stack", stat = "identity") + xlab("Sample") + ylab("Relative Abundance") + theme(axis.text.x = element_text(size = 10, angle = 90, hjust = 1)) + guides(fill = guide_legend(title = sing_lab)) + labs(title = desc2) plot(gg) dev.off() met_phyla_df2 <- met_phyla_df %>% filter(taxa %in% top_N_phy) %>% group_by(batched, taxa) %>% summarize(mean_val = mean(val)) %>% arrange(mean_val) pdf(file.path(graph_dir2, paste(met_lab, "Aggregated.pdf", sep = "_")), width = 18, height = 12) gg <- met_phyla_df2 %>% ggplot(aes(x = batched, y = mean_val, fill = taxa)) + geom_bar(position = "stack", stat = "identity") + xlab("Collection Type") + ylab("Relative Abundance") + theme(axis.text.x = element_text(size = 10, angle = 90, hjust = 1)) + guides(fill = guide_legend(title = sing_lab)) + labs(title = paste(desc2, ", Aggregated", sep = "")) plot(gg) dev.off() } ### comparing for metaphlan, phyla batch_unbatch_comp(met_lev = "phylum", met_reg = ".*p__", met_lab = "Phyla", sing_lab = "Phylum", N = 5, comp_N = 20, coll_type = "omni") ### comparing for metaphlan, genera batch_unbatch_comp(met_lev = "genus", met_reg = ".*g__", met_lab = "Genera", sing_lab = "Genus", N = 20, comp_N = 20, coll_type = "omni") ### now, same but with regular graph_dir <- "Graphs/Metaphlan/Regular_Batched_vs_Unbatched_Scaled" if (!dir.exists(graph_dir)) dir.create(graph_dir) ### comparing for metaphlan, phyla batch_unbatch_comp(met_lev = "phylum", met_reg = ".*p__", met_lab = "Phyla", sing_lab = "Phylum", N = 5, comp_N = 20, coll_type = "regular") ### comparing for metaphlan, genera batch_unbatch_comp(met_lev = "genus", met_reg = ".*g__", met_lab = "Genera", sing_lab = "Genus", N = 20, comp_N = 20, coll_type = "regular") ### now, same but with both graph_dir <- "Graphs/Metaphlan/Comparison_Batched_vs_Unbatched_Scaled" if (!dir.exists(graph_dir)) dir.create(graph_dir) ### comparing for metaphlan, phyla batch_unbatch_comp(met_lev = "phylum", met_reg = ".*p__", met_lab = "Phyla", sing_lab = "Phylum", N = 5, comp_N = 20, coll_type = c("omni", "regular")) ### comparing for metaphlan, genera batch_unbatch_comp(met_lev = "genus", met_reg = ".*g__", met_lab = "Genera", sing_lab = "Genus", N = 20, comp_N = 20, coll_type = c("omni", "regular"))
## ## This Code generates Plot3 for Exploratory Data Analysis Assignement-2 if(!exists("NEI")){ NEI <- readRDS("./data/summarySCC_PM25.rds") } if(!exists("SCC")){ SCC <- readRDS("./data/Source_Classification_Code.rds") } library(ggplot2) # Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad) variable, # which of these four sources have seen decreases in emissions from 1999 2008 for Baltimore City? # Which have seen increases in emissions from 1999 2008? # Use the ggplot2 plotting system to make a plot answer this question. # 24510 is Baltimore, see plot2.R subsetNEI <- NEI[NEI$fips=="24510", ] aggregatedTotalByYearAndType <- aggregate(Emissions ~ year + type, subsetNEI, sum) png("plot3.png", width=640, height=480) g <- ggplot(aggregatedTotalByYearAndType, aes(year, Emissions, color = type)) g <- g + geom_line() + xlab("year") + ylab(expression('Total PM'[2.5]*" Emissions")) + ggtitle('Total Emissions in Baltimore City, Maryland (fips == "24510") from 1999 to 2008') print(g) dev.off()
/Plot3.R
no_license
InduVeeramgari/ExploratoryDataAnalysis
R
false
false
1,053
r
## ## This Code generates Plot3 for Exploratory Data Analysis Assignement-2 if(!exists("NEI")){ NEI <- readRDS("./data/summarySCC_PM25.rds") } if(!exists("SCC")){ SCC <- readRDS("./data/Source_Classification_Code.rds") } library(ggplot2) # Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad) variable, # which of these four sources have seen decreases in emissions from 1999 2008 for Baltimore City? # Which have seen increases in emissions from 1999 2008? # Use the ggplot2 plotting system to make a plot answer this question. # 24510 is Baltimore, see plot2.R subsetNEI <- NEI[NEI$fips=="24510", ] aggregatedTotalByYearAndType <- aggregate(Emissions ~ year + type, subsetNEI, sum) png("plot3.png", width=640, height=480) g <- ggplot(aggregatedTotalByYearAndType, aes(year, Emissions, color = type)) g <- g + geom_line() + xlab("year") + ylab(expression('Total PM'[2.5]*" Emissions")) + ggtitle('Total Emissions in Baltimore City, Maryland (fips == "24510") from 1999 to 2008') print(g) dev.off()
#' @importFrom magrittr %>% #' @importFrom tibble tibble #' @importFrom rlang .data NULL
/R/import_packages.R
no_license
cran/rapsimng
R
false
false
97
r
#' @importFrom magrittr %>% #' @importFrom tibble tibble #' @importFrom rlang .data NULL