content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/em_functions.r \name{beem2biomass} \alias{beem2biomass} \title{beem2biomass} \usage{ beem2biomass(beem) } \arguments{ \item{beem}{a BEEM object} } \description{ extract biomass estimates from a BEEM object } \author{ Chenhao Li, Niranjan Nagarajan }
/man/beem2biomass.Rd
permissive
pythseq/BEEM-static
R
false
true
328
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/em_functions.r \name{beem2biomass} \alias{beem2biomass} \title{beem2biomass} \usage{ beem2biomass(beem) } \arguments{ \item{beem}{a BEEM object} } \description{ extract biomass estimates from a BEEM object } \author{ Chenhao Li, Niranjan Nagarajan }
#' Save list of expression matrices in a format suitable for runPEER.py #' #' @param expression_list list of expression matrices #' @param outdir output directory #' @param file_suffix suffix of the output file #' #' @export #' savePEERData <- function(expression_list, outdir, file_suffix = "exprs"){ #Save eQTL gene expression data to be used with PEER sample_names = names(expression_list) for (sample in sample_names){ matrix = expression_list[[sample]] path = file.path(outdir, paste(sample, file_suffix ,"txt", sep = ".")) print(path) write.table(t(matrix), path, row.names = FALSE, col.names = FALSE, sep = ",") } } importPEERFactors <- function(file_path, design_matrix, remove_mean = TRUE){ #Import output from runPEER.py script back into R peer_factors = read.table(file_path, sep =",") if(remove_mean){ #Remove the first column that contains the mean peer_factors = peer_factors[,2:ncol(peer_factors)] } colnames(peer_factors) = paste("PEER_factor_", c(1:ncol(peer_factors)), sep = "") peer_factors = dplyr::mutate(peer_factors, sample_id = design_matrix$sample_id) %>% dplyr::select(sample_id, everything()) return(peer_factors) }
/seqUtils/R/qtl_PEER.R
permissive
kauralasoo/macrophage-tuQTLs
R
false
false
1,190
r
#' Save list of expression matrices in a format suitable for runPEER.py #' #' @param expression_list list of expression matrices #' @param outdir output directory #' @param file_suffix suffix of the output file #' #' @export #' savePEERData <- function(expression_list, outdir, file_suffix = "exprs"){ #Save eQTL gene expression data to be used with PEER sample_names = names(expression_list) for (sample in sample_names){ matrix = expression_list[[sample]] path = file.path(outdir, paste(sample, file_suffix ,"txt", sep = ".")) print(path) write.table(t(matrix), path, row.names = FALSE, col.names = FALSE, sep = ",") } } importPEERFactors <- function(file_path, design_matrix, remove_mean = TRUE){ #Import output from runPEER.py script back into R peer_factors = read.table(file_path, sep =",") if(remove_mean){ #Remove the first column that contains the mean peer_factors = peer_factors[,2:ncol(peer_factors)] } colnames(peer_factors) = paste("PEER_factor_", c(1:ncol(peer_factors)), sep = "") peer_factors = dplyr::mutate(peer_factors, sample_id = design_matrix$sample_id) %>% dplyr::select(sample_id, everything()) return(peer_factors) }
library(RRTCS) ### Name: ChaudhuriChristofidesDatapij ### Title: Matrix of the second-order inclusion probabilities ### Aliases: ChaudhuriChristofidesDatapij ### Keywords: datasets ### ** Examples data(ChaudhuriChristofidesDatapij) #Now, let select only the first-order inclusion probabilities diag(ChaudhuriChristofidesDatapij)
/data/genthat_extracted_code/RRTCS/examples/ChaudhuriChristofidesDatapij.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
336
r
library(RRTCS) ### Name: ChaudhuriChristofidesDatapij ### Title: Matrix of the second-order inclusion probabilities ### Aliases: ChaudhuriChristofidesDatapij ### Keywords: datasets ### ** Examples data(ChaudhuriChristofidesDatapij) #Now, let select only the first-order inclusion probabilities diag(ChaudhuriChristofidesDatapij)
pacotes <- c("tidyverse","knitr","kableExtra","car","rgl","gridExtra", "PerformanceAnalytics","reshape2","rayshader","psych","pracma", "polynom","rqPen","ggrepel") if(sum(as.numeric(!pacotes %in% installed.packages())) != 0){ instalador <- pacotes[!pacotes %in% installed.packages()] for(i in 1:length(instalador)) { install.packages(instalador, dependencies = T) break()} sapply(pacotes, require, character = T) } else { sapply(pacotes, require, character = T) } #carregando arquivo arquivo1 <- load("notasfatorial.RData") #criando tabela notasfatorial %>% kable() %>% kable_styling(bootstrap_options = "striped", full_width = T, font_size = 12) #fatiando os valores dados <- notasfatorial[1:9,2:5] #calculando a correlação rho <- cor(dados) rho # Observando as correlações entre variáveis chart.Correlation(notasfatorial[,2:5]) # A estatística KMO KMO(r = rho) # O teste de efericidade de Bartlett -------------------------------------- cortest.bartlett(R = rho) #Extraindo os autovalores eigenvalues_rho <- eigen(rho) eigenvalues_rho$values sum(eigenvalues_rho$values) #variancia compartilhada var_compartilhada <- (eigenvalues_rho$values/sum(eigenvalues_rho$values)) var_compartilhada sum(var_compartilhada) principais_componentes <- 1:sum(eigenvalues_rho$values) principais_componentes # Juntando tudo o que temos até o momento: data.frame(principais_componentes = paste0("PC", principais_componentes), eigenvalue = eigenvalues_rho$values, var_compartilhada = var_compartilhada) -> relatorio_eigen # Overview dos resultados até o momento relatorio_eigen %>% kable() %>% kable_styling(bootstrap_options = "striped", full_width = T, font_size = 12) # A determinação dos autovetores a partir de seus respectivos eigenvalues eigenvalues_rho$vectors # Estabelecendo a matriz diagonal de eigenvalues (L2) L2 <- diag(eigenvalues_rho$values) L2 # Assim, com os eigenvectors calculados, podemos provar que V'.rho.V = L2 prova_01 <- t(eigenvalues_rho$vectors) %*% rho %*% eigenvalues_rho$vectors round(x = prova_01, digits = 14) # Calculando os scores fatoriais # Relembrando: eigenvalues_rho$values eigenvalues_rho$vectors # De forma simplificada: scores_fatoriais <- t(eigenvalues_rho$vectors)/sqrt(eigenvalues_rho$values) (scores_fatoriais) # Calculando os fatores # O primeiro passo é o de padronização da base de dados pelo procedimento # zscores, utilizando a função scale(): notasfatorial_std <- dados %>% scale() %>% data.frame() # A seguir, vamos criar um objeto que servirá como um receptáculo para os # k fatores (4, no caso estudado) a serem calculados: fatores <- list() # Agora, utilizaremos a função for for(i in 1:nrow(scores_fatoriais)){ fatores[[i]] <- rowSums(x = sweep(x = notasfatorial_std, MARGIN = 2, STATS = scores_fatoriais[i,], FUN = `*`)) ,3 } fatores_df <- data.frame((sapply(X = fatores, FUN = c))) fatores_df fatores_df %>% rename(F1 = X1, F2 = X2, F3 = X3, F4 = X4) %>% kable() %>% kable_styling(bootstrap_options = "striped", full_width = T, font_size = 12) # Podemos verificar que os fatores calculados, de fato, são ortogonais entre # si, isto é, possuem correlações iguais a 0: round(x = cor(fatores_df), digits = 14) # Combinando a base original 'notasfatorial' com o objeto 'fatores_df': notasfatorial_final <- cbind(dados, fatores_df) %>% rename(F1 = X1, F2 = X2, F3 = X3, F4 = X4) # Calculando as correlações entre as variáveis e originais e os fatores correlacoes_entre_fatores <- cor(notasfatorial_final)
/Análise Fatorial - PCA/Exemplo1.R
no_license
Felipe0042/Ciencia_dados_R
R
false
false
4,083
r
pacotes <- c("tidyverse","knitr","kableExtra","car","rgl","gridExtra", "PerformanceAnalytics","reshape2","rayshader","psych","pracma", "polynom","rqPen","ggrepel") if(sum(as.numeric(!pacotes %in% installed.packages())) != 0){ instalador <- pacotes[!pacotes %in% installed.packages()] for(i in 1:length(instalador)) { install.packages(instalador, dependencies = T) break()} sapply(pacotes, require, character = T) } else { sapply(pacotes, require, character = T) } #carregando arquivo arquivo1 <- load("notasfatorial.RData") #criando tabela notasfatorial %>% kable() %>% kable_styling(bootstrap_options = "striped", full_width = T, font_size = 12) #fatiando os valores dados <- notasfatorial[1:9,2:5] #calculando a correlação rho <- cor(dados) rho # Observando as correlações entre variáveis chart.Correlation(notasfatorial[,2:5]) # A estatística KMO KMO(r = rho) # O teste de efericidade de Bartlett -------------------------------------- cortest.bartlett(R = rho) #Extraindo os autovalores eigenvalues_rho <- eigen(rho) eigenvalues_rho$values sum(eigenvalues_rho$values) #variancia compartilhada var_compartilhada <- (eigenvalues_rho$values/sum(eigenvalues_rho$values)) var_compartilhada sum(var_compartilhada) principais_componentes <- 1:sum(eigenvalues_rho$values) principais_componentes # Juntando tudo o que temos até o momento: data.frame(principais_componentes = paste0("PC", principais_componentes), eigenvalue = eigenvalues_rho$values, var_compartilhada = var_compartilhada) -> relatorio_eigen # Overview dos resultados até o momento relatorio_eigen %>% kable() %>% kable_styling(bootstrap_options = "striped", full_width = T, font_size = 12) # A determinação dos autovetores a partir de seus respectivos eigenvalues eigenvalues_rho$vectors # Estabelecendo a matriz diagonal de eigenvalues (L2) L2 <- diag(eigenvalues_rho$values) L2 # Assim, com os eigenvectors calculados, podemos provar que V'.rho.V = L2 prova_01 <- t(eigenvalues_rho$vectors) %*% rho %*% eigenvalues_rho$vectors round(x = prova_01, digits = 14) # Calculando os scores fatoriais # Relembrando: eigenvalues_rho$values eigenvalues_rho$vectors # De forma simplificada: scores_fatoriais <- t(eigenvalues_rho$vectors)/sqrt(eigenvalues_rho$values) (scores_fatoriais) # Calculando os fatores # O primeiro passo é o de padronização da base de dados pelo procedimento # zscores, utilizando a função scale(): notasfatorial_std <- dados %>% scale() %>% data.frame() # A seguir, vamos criar um objeto que servirá como um receptáculo para os # k fatores (4, no caso estudado) a serem calculados: fatores <- list() # Agora, utilizaremos a função for for(i in 1:nrow(scores_fatoriais)){ fatores[[i]] <- rowSums(x = sweep(x = notasfatorial_std, MARGIN = 2, STATS = scores_fatoriais[i,], FUN = `*`)) ,3 } fatores_df <- data.frame((sapply(X = fatores, FUN = c))) fatores_df fatores_df %>% rename(F1 = X1, F2 = X2, F3 = X3, F4 = X4) %>% kable() %>% kable_styling(bootstrap_options = "striped", full_width = T, font_size = 12) # Podemos verificar que os fatores calculados, de fato, são ortogonais entre # si, isto é, possuem correlações iguais a 0: round(x = cor(fatores_df), digits = 14) # Combinando a base original 'notasfatorial' com o objeto 'fatores_df': notasfatorial_final <- cbind(dados, fatores_df) %>% rename(F1 = X1, F2 = X2, F3 = X3, F4 = X4) # Calculando as correlações entre as variáveis e originais e os fatores correlacoes_entre_fatores <- cor(notasfatorial_final)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dose_resp_ind.r \name{dose_resp_ind} \alias{dose_resp_ind} \title{Extract the information from the simulation data frame to analyse the dose response effects} \usage{ dose_resp_ind(allsim, dataset, dr = seq(0, 1, 0.1)) } \arguments{ \item{allsim}{dataset with all simulations values} \item{dataset}{dataset with all variables} \item{dr}{a vector with dose response values} } \value{ a data frame with dose response values } \description{ Extract the information from the simulation data frame to analyse the dose response effects }
/man/dose_resp_ind.Rd
no_license
cran/expose
R
false
true
633
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dose_resp_ind.r \name{dose_resp_ind} \alias{dose_resp_ind} \title{Extract the information from the simulation data frame to analyse the dose response effects} \usage{ dose_resp_ind(allsim, dataset, dr = seq(0, 1, 0.1)) } \arguments{ \item{allsim}{dataset with all simulations values} \item{dataset}{dataset with all variables} \item{dr}{a vector with dose response values} } \value{ a data frame with dose response values } \description{ Extract the information from the simulation data frame to analyse the dose response effects }
# https://ourcodingclub.github.io/2017/03/15/mixed-models.html # https://rpsychologist.com/r-guide-longitudinal-lme-lmer # https://stackoverflow.com/questions/49033016/plm-or-lme4-for-random-and-fixed-effects-model-on-panel-data # https://www.princeton.edu/~otorres/Panel101R.pdf # fredr_set_key('d0b9e64aba30b479343a06037a5a10c1') library(rvest) library(httr) library(data.table) library(tidyverse) library(WDI) library(countrycode) library(lmtest) library(tseries) library(plm) library(rvest) library(httr) library(quantmod) # library(fredr) library(scales) library(quantreg) refresh_downloads = T setwd('~\\Public_Policy\\Projects\\Taxes vs. Deficits\\data') ##### Get political data ##### if (refresh_downloads) { the_url = 'https://www.presidency.ucsb.edu/statistics/data/house-and-senate-concurrence-with-presidents' concurrence_table = html_table(GET(the_url) %>% content(), fill = TRUE) concurrence_table[[1]] %>% write.csv('concurrence_with_president.csv') } concurrence_with_president_clean = read_csv('concurrence_with_president_clean.csv') # MIT data lab house_elections = read.csv('1976-2018-house.csv') %>% mutate(candidate_votes = as.numeric(candidatevotes)) %>% filter(stage == 'gen') %>% data.table() senate_elections = read.csv('1976-2018-senate.csv') %>% filter(stage == 'gen') %>% data.table() presidential_elections = read.csv('1976-2016-president.csv') %>% data.table() ##### Combine historical top tax rate data from Brookings and Current OECD Data ##### ## Brookings source: # http://www.taxpolicycenter.org/sites/default/files/legacy/taxfacts/content/PDF/oecd_historical_toprate.pdf ## OECD source: # https://www.oecd.org/tax/tax-policy/tax-database/tax-database-update-note.pdf # get TOP marginal tax rates brookings_oecd_toptax = read_csv('oecd_historical_toprate_raw.csv', na = '--') %>% pivot_longer(cols = paste(c(1975:2013)), names_to = 'Year', values_to = 'TOP_TRATE') current_oecd_toptax = read_csv('TABLE_I7_30102020214927846.csv') %>% pivot_wider(id_cols = c('Country', 'Year'), names_from = 'TAX', values_from = 'Value') # get tax revenues to GDP all_oecd_downloads = c("oecd_net_lending.csv" , "oecd_revenue.csv" , "oecd_spending.csv" ) %>% map(read_csv) %>% bind_rows() %>% filter(LOCATION != 'OAVG', MEASURE == 'PC_GDP', SUBJECT == 'TOT') # map country names country_code_name_mappings = data.frame( LOCATION = unique(all_oecd_downloads$LOCATION) ) %>% mutate( Country = map_chr(LOCATION, function(the_country){ countrycode(the_country, 'iso3c', 'country.name') }), Country = recode(Country, `South Korea` = 'Korea') ) stacked_extra_oecd_stats = inner_join(all_oecd_downloads, country_code_name_mappings) # check col classes map_chr(brookings_oecd_toptax, class) map_chr(current_oecd_toptax, class) # check if any countries aren't found in either dataset, may need to change names current_oecd_toptax$Country[!current_oecd_toptax$Country %in% brookings_oecd_toptax$Country] %>% unique() brookings_oecd_toptax$Country[!brookings_oecd_toptax$Country %in% current_oecd_toptax$Country] %>% unique() # check year ranges -- only keep non-overlapping range(brookings_oecd_toptax$Year) range(current_oecd_toptax$Year) # recode columns brookings_oecd_toptax_clean = mutate( brookings_oecd_toptax, Country = recode(Country, `Korea, Republic of` = 'Korea'), Year = as.numeric(Year), TOP_TRATE = TOP_TRATE * 100 # to match OECD ) %>% filter( Year < min(current_oecd_toptax$Year, na.rm = T) ) range(brookings_oecd_toptax_clean$Year) # clear memory rm(brookings_oecd_toptax) # outer join on country and year, keeping the year overlaps from oecd joined_oecd_brookings_toptax = bind_rows( brookings_oecd_toptax_clean, current_oecd_toptax ) %>% arrange( Country, Year ) # take a look at the data ggplot(joined_oecd_brookings_toptax, aes(Year, TOP_TRATE, colour = Country)) + geom_line() # lots of missing values in the middle of the dataset, let's interpolate those joined_oecd_brookings_toptax_dt = data.table(joined_oecd_brookings_toptax) joined_oecd_brookings_toptax_filled = joined_oecd_brookings_toptax_dt[, { # cat("\nCountry ==",.BY[[1]],"\n\n") non_na_years = Year[!is.na(TOP_TRATE)] na_years = Year[is.na(TOP_TRATE)] # find the most recent existing value filled_vals = map_dbl(na_years, function(this_year){ years_to_check = non_na_years[non_na_years < this_year] if (length(years_to_check) == 0) { return(NA) } else { closest_year = max(years_to_check) return(TOP_TRATE[Year == closest_year]) } }) filled_df = data.frame( Year = na_years, TOP_TRATE = filled_vals ) full_df = data.frame( Year, TOP_TRATE ) %>% filter(Year %in% non_na_years) stacked_df = bind_rows(filled_df, full_df) %>% arrange(Year) list( Year = stacked_df$Year, TOP_TRATE = stacked_df$TOP_TRATE ) }, by = Country] # add back other variables joined_oecd_brookings_toptax_fin = inner_join( select(joined_oecd_brookings_toptax, -TOP_TRATE), joined_oecd_brookings_toptax_filled ) rm(joined_oecd_brookings_toptax, joined_oecd_brookings_toptax_dt) # looks much better now ggplot(joined_oecd_brookings_toptax_filled, aes(Year, TOP_TRATE, colour = Country)) + geom_line() ##### Get fiscal data from the world bank ##### # WDIsearch('debt') %>% View() start_year = min(joined_oecd_brookings_toptax_filled$Year, na.rm = T) end_year = max(joined_oecd_brookings_toptax_filled$Year, na.rm = T) wdi_indicators = c('NY.GDP.PCAP.KD.ZG', 'GC.TAX.TOTL.GD.ZS', 'DT.DOD.DECT.GN.ZS', 'GC.TAX.YPKG.RV.ZS', 'GC.XPN.INTP.RV.ZS', 'GC.REV.TOTL.CD', 'PAY.TAX.RK.DB19', 'NY.GDP.MKTP.KD.ZG', 'NY.GDP.PCAP.KD', 'GC.DOD.TOTL.GD.ZS') wdi_names = map(wdi_indicators, function(ind){ WDIsearch(ind, field = 'indicator') %>% t() %>% as.data.frame() }) %>% bind_rows() if (refresh_downloads | !file.exists('wdi_download_long.csv')) { wdi_download = WDI(indicator = wdi_indicators, start = start_year, end = end_year, extra = T) %>% filter( income == 'High income' ) wdi_download_long = pivot_longer(wdi_download, cols = wdi_indicators, names_to = 'indicator') %>% inner_join(wdi_names) %>% mutate( country = recode(country, `Korea, Rep.` = 'Korea') ) write.csv(wdi_download_long, 'wdi_download_long.csv', row.names = F) } else { wdi_download_long = read_csv('wdi_download_long.csv') } # check country names # wdi_download_long$country[!wdi_download_long$country %in% joined_oecd_brookings_toptax_filled$Country] %>% unique() # joined_oecd_brookings_toptax_filled$Country[!joined_oecd_brookings_toptax_filled$Country %in% wdi_download_long$country] %>% unique() ##### Combine all world bank and OECD data together ##### stacked_oecd_wdi_data = bind_rows( joined_oecd_brookings_toptax_fin %>% mutate(Indicator = 'top_tax_rate') %>% select(Country, Year, Value = TOP_TRATE, Indicator), stacked_extra_oecd_stats %>% select(Country, Year = TIME, Value, Indicator = INDICATOR) ) %>% bind_rows( wdi_download_long %>% select(Country = country, Year = year, Indicator = indicator, Value = value) ) %>% arrange( Country, Indicator, Year ) %>% filter(!is.na(Country)) # test uniqueness of observations unique_obs = select(stacked_oecd_wdi_data, Country, Year, Indicator) %>% unique() stopifnot(nrow(unique_obs) == nrow(stacked_oecd_wdi_data)) stacked_oecd_wdi_data_dt = data.table(stacked_oecd_wdi_data) duped_dt = stacked_oecd_wdi_data_dt[duplicated(stacked_oecd_wdi_data_dt, by = c('Country', 'Year', 'Indicator')),] inner_join(stacked_oecd_wdi_data, duped_dt) %>% dim() ##### Compute new variables ##### # for each country and indicator, compute: # Lags # First differences stacked_oecd_wdi_data_dt = data.table(stacked_oecd_wdi_data) stacked_oecd_wdi_data_lags_diffs = stacked_oecd_wdi_data_dt[, { last_value = dplyr::lag(Value, 1) diff_value = Value - last_value lag_diff_value = dplyr::lag(diff_value, 1) pct_change = diff_value / last_value lag_pct_change = dplyr::lag(pct_change, 1) list( Year = Year, value = Value, # lowercase now last_value = last_value, diff_value = diff_value, pct_change = pct_change, lag_pct_change = lag_pct_change, lag_diff_value = lag_diff_value ) }, by = list(Country, Indicator)] wide_oecd_wdi_data = pivot_wider( stacked_oecd_wdi_data_lags_diffs, id_cols = c('Country', 'Year'), names_from = 'Indicator', values_from = c('value', 'last_value', 'diff_value', 'pct_change', 'lag_pct_change', 'lag_diff_value') ) %>% arrange( Country, Year ) write.csv(stacked_oecd_wdi_data_lags_diffs, 'stacked_oecd_wdi_data_lags_diffs.csv', row.names = F) write.csv(wide_oecd_wdi_data, 'wide_oecd_wdi_data.csv', row.names = F)
/Projects/Taxes vs. Deficits/(1)_get_political_fiscal_economic_data.r
permissive
vishalbelsare/Public_Policy
R
false
false
8,936
r
# https://ourcodingclub.github.io/2017/03/15/mixed-models.html # https://rpsychologist.com/r-guide-longitudinal-lme-lmer # https://stackoverflow.com/questions/49033016/plm-or-lme4-for-random-and-fixed-effects-model-on-panel-data # https://www.princeton.edu/~otorres/Panel101R.pdf # fredr_set_key('d0b9e64aba30b479343a06037a5a10c1') library(rvest) library(httr) library(data.table) library(tidyverse) library(WDI) library(countrycode) library(lmtest) library(tseries) library(plm) library(rvest) library(httr) library(quantmod) # library(fredr) library(scales) library(quantreg) refresh_downloads = T setwd('~\\Public_Policy\\Projects\\Taxes vs. Deficits\\data') ##### Get political data ##### if (refresh_downloads) { the_url = 'https://www.presidency.ucsb.edu/statistics/data/house-and-senate-concurrence-with-presidents' concurrence_table = html_table(GET(the_url) %>% content(), fill = TRUE) concurrence_table[[1]] %>% write.csv('concurrence_with_president.csv') } concurrence_with_president_clean = read_csv('concurrence_with_president_clean.csv') # MIT data lab house_elections = read.csv('1976-2018-house.csv') %>% mutate(candidate_votes = as.numeric(candidatevotes)) %>% filter(stage == 'gen') %>% data.table() senate_elections = read.csv('1976-2018-senate.csv') %>% filter(stage == 'gen') %>% data.table() presidential_elections = read.csv('1976-2016-president.csv') %>% data.table() ##### Combine historical top tax rate data from Brookings and Current OECD Data ##### ## Brookings source: # http://www.taxpolicycenter.org/sites/default/files/legacy/taxfacts/content/PDF/oecd_historical_toprate.pdf ## OECD source: # https://www.oecd.org/tax/tax-policy/tax-database/tax-database-update-note.pdf # get TOP marginal tax rates brookings_oecd_toptax = read_csv('oecd_historical_toprate_raw.csv', na = '--') %>% pivot_longer(cols = paste(c(1975:2013)), names_to = 'Year', values_to = 'TOP_TRATE') current_oecd_toptax = read_csv('TABLE_I7_30102020214927846.csv') %>% pivot_wider(id_cols = c('Country', 'Year'), names_from = 'TAX', values_from = 'Value') # get tax revenues to GDP all_oecd_downloads = c("oecd_net_lending.csv" , "oecd_revenue.csv" , "oecd_spending.csv" ) %>% map(read_csv) %>% bind_rows() %>% filter(LOCATION != 'OAVG', MEASURE == 'PC_GDP', SUBJECT == 'TOT') # map country names country_code_name_mappings = data.frame( LOCATION = unique(all_oecd_downloads$LOCATION) ) %>% mutate( Country = map_chr(LOCATION, function(the_country){ countrycode(the_country, 'iso3c', 'country.name') }), Country = recode(Country, `South Korea` = 'Korea') ) stacked_extra_oecd_stats = inner_join(all_oecd_downloads, country_code_name_mappings) # check col classes map_chr(brookings_oecd_toptax, class) map_chr(current_oecd_toptax, class) # check if any countries aren't found in either dataset, may need to change names current_oecd_toptax$Country[!current_oecd_toptax$Country %in% brookings_oecd_toptax$Country] %>% unique() brookings_oecd_toptax$Country[!brookings_oecd_toptax$Country %in% current_oecd_toptax$Country] %>% unique() # check year ranges -- only keep non-overlapping range(brookings_oecd_toptax$Year) range(current_oecd_toptax$Year) # recode columns brookings_oecd_toptax_clean = mutate( brookings_oecd_toptax, Country = recode(Country, `Korea, Republic of` = 'Korea'), Year = as.numeric(Year), TOP_TRATE = TOP_TRATE * 100 # to match OECD ) %>% filter( Year < min(current_oecd_toptax$Year, na.rm = T) ) range(brookings_oecd_toptax_clean$Year) # clear memory rm(brookings_oecd_toptax) # outer join on country and year, keeping the year overlaps from oecd joined_oecd_brookings_toptax = bind_rows( brookings_oecd_toptax_clean, current_oecd_toptax ) %>% arrange( Country, Year ) # take a look at the data ggplot(joined_oecd_brookings_toptax, aes(Year, TOP_TRATE, colour = Country)) + geom_line() # lots of missing values in the middle of the dataset, let's interpolate those joined_oecd_brookings_toptax_dt = data.table(joined_oecd_brookings_toptax) joined_oecd_brookings_toptax_filled = joined_oecd_brookings_toptax_dt[, { # cat("\nCountry ==",.BY[[1]],"\n\n") non_na_years = Year[!is.na(TOP_TRATE)] na_years = Year[is.na(TOP_TRATE)] # find the most recent existing value filled_vals = map_dbl(na_years, function(this_year){ years_to_check = non_na_years[non_na_years < this_year] if (length(years_to_check) == 0) { return(NA) } else { closest_year = max(years_to_check) return(TOP_TRATE[Year == closest_year]) } }) filled_df = data.frame( Year = na_years, TOP_TRATE = filled_vals ) full_df = data.frame( Year, TOP_TRATE ) %>% filter(Year %in% non_na_years) stacked_df = bind_rows(filled_df, full_df) %>% arrange(Year) list( Year = stacked_df$Year, TOP_TRATE = stacked_df$TOP_TRATE ) }, by = Country] # add back other variables joined_oecd_brookings_toptax_fin = inner_join( select(joined_oecd_brookings_toptax, -TOP_TRATE), joined_oecd_brookings_toptax_filled ) rm(joined_oecd_brookings_toptax, joined_oecd_brookings_toptax_dt) # looks much better now ggplot(joined_oecd_brookings_toptax_filled, aes(Year, TOP_TRATE, colour = Country)) + geom_line() ##### Get fiscal data from the world bank ##### # WDIsearch('debt') %>% View() start_year = min(joined_oecd_brookings_toptax_filled$Year, na.rm = T) end_year = max(joined_oecd_brookings_toptax_filled$Year, na.rm = T) wdi_indicators = c('NY.GDP.PCAP.KD.ZG', 'GC.TAX.TOTL.GD.ZS', 'DT.DOD.DECT.GN.ZS', 'GC.TAX.YPKG.RV.ZS', 'GC.XPN.INTP.RV.ZS', 'GC.REV.TOTL.CD', 'PAY.TAX.RK.DB19', 'NY.GDP.MKTP.KD.ZG', 'NY.GDP.PCAP.KD', 'GC.DOD.TOTL.GD.ZS') wdi_names = map(wdi_indicators, function(ind){ WDIsearch(ind, field = 'indicator') %>% t() %>% as.data.frame() }) %>% bind_rows() if (refresh_downloads | !file.exists('wdi_download_long.csv')) { wdi_download = WDI(indicator = wdi_indicators, start = start_year, end = end_year, extra = T) %>% filter( income == 'High income' ) wdi_download_long = pivot_longer(wdi_download, cols = wdi_indicators, names_to = 'indicator') %>% inner_join(wdi_names) %>% mutate( country = recode(country, `Korea, Rep.` = 'Korea') ) write.csv(wdi_download_long, 'wdi_download_long.csv', row.names = F) } else { wdi_download_long = read_csv('wdi_download_long.csv') } # check country names # wdi_download_long$country[!wdi_download_long$country %in% joined_oecd_brookings_toptax_filled$Country] %>% unique() # joined_oecd_brookings_toptax_filled$Country[!joined_oecd_brookings_toptax_filled$Country %in% wdi_download_long$country] %>% unique() ##### Combine all world bank and OECD data together ##### stacked_oecd_wdi_data = bind_rows( joined_oecd_brookings_toptax_fin %>% mutate(Indicator = 'top_tax_rate') %>% select(Country, Year, Value = TOP_TRATE, Indicator), stacked_extra_oecd_stats %>% select(Country, Year = TIME, Value, Indicator = INDICATOR) ) %>% bind_rows( wdi_download_long %>% select(Country = country, Year = year, Indicator = indicator, Value = value) ) %>% arrange( Country, Indicator, Year ) %>% filter(!is.na(Country)) # test uniqueness of observations unique_obs = select(stacked_oecd_wdi_data, Country, Year, Indicator) %>% unique() stopifnot(nrow(unique_obs) == nrow(stacked_oecd_wdi_data)) stacked_oecd_wdi_data_dt = data.table(stacked_oecd_wdi_data) duped_dt = stacked_oecd_wdi_data_dt[duplicated(stacked_oecd_wdi_data_dt, by = c('Country', 'Year', 'Indicator')),] inner_join(stacked_oecd_wdi_data, duped_dt) %>% dim() ##### Compute new variables ##### # for each country and indicator, compute: # Lags # First differences stacked_oecd_wdi_data_dt = data.table(stacked_oecd_wdi_data) stacked_oecd_wdi_data_lags_diffs = stacked_oecd_wdi_data_dt[, { last_value = dplyr::lag(Value, 1) diff_value = Value - last_value lag_diff_value = dplyr::lag(diff_value, 1) pct_change = diff_value / last_value lag_pct_change = dplyr::lag(pct_change, 1) list( Year = Year, value = Value, # lowercase now last_value = last_value, diff_value = diff_value, pct_change = pct_change, lag_pct_change = lag_pct_change, lag_diff_value = lag_diff_value ) }, by = list(Country, Indicator)] wide_oecd_wdi_data = pivot_wider( stacked_oecd_wdi_data_lags_diffs, id_cols = c('Country', 'Year'), names_from = 'Indicator', values_from = c('value', 'last_value', 'diff_value', 'pct_change', 'lag_pct_change', 'lag_diff_value') ) %>% arrange( Country, Year ) write.csv(stacked_oecd_wdi_data_lags_diffs, 'stacked_oecd_wdi_data_lags_diffs.csv', row.names = F) write.csv(wide_oecd_wdi_data, 'wide_oecd_wdi_data.csv', row.names = F)
#Surya Aenuganti Ushasri #Assignment: World Happiness Analysis #Loading the data worldh <- read.csv("WH_2017.csv") #Loading packages required for the analysis library(plyr) library(plotly) library(dplyr) library(tidyverse) library(lubridate) library(caTools) library(ggplot2) library(ggthemes) library(reshape2) library(data.table) library(tidyr) library(corrgram) library(corrplot) library(formattable) library(cowplot) library(ggpubr) library(plot3D) #View the data #View(worldh) #Displays the first few rows of the dataset head(worldh) #Display the structure of the attributes str(worldh) # Changing the name of columns colnames (worldh) <- c("Country", "Happiness.Rank", "Happiness.Score", "Whisker.High", "Whisker.Low", "Economy", "Family", "Life.Expectancy", "Freedom", "Generosity", "Trust", "Dystopia.Residual") # Deleting unnecessary columns (Whisker.high and Whisker.low) worldh <- worldh[, -c(4,5)] # Finding the correlation between numerical columns Num.cols <- sapply(worldh, is.numeric) Cor.data <- cor(worldh[, Num.cols]) corrplot(Cor.data, method = 'color') #Analysis: We can see there is an inverse correlation between "Happiness Rank" and all the other numerical variables. In other words, the lower the happiness rank, the higher the happiness score, and the higher the other seven factors that contribute to happiness. So let's remove the happiness rank, and see the correlation again. # Create a correlation plot newdatacor = cor(worldh[c(3:10)]) corrplot(newdatacor, method = "number") #Analysis: In the above cor plot, Economy, life expectancy, and family play the most significant role in contributing to happiness. #Trust and generosity have the lowest impact on the happiness score. #Plotting ScatterPLot plot_ly(data = worldh, x=~Economy, y=~Happiness.Score, type = "scatter", text = ~paste("Country:", Country)) %>% layout(title = "Happiness and GDP", xaxis = list(title = "GDP per Capita"), yaxis = list(title = "Happiness Score")) #Analysis: This interactive scatterplot shows that there is a strong positive correlation between GDP and Happiness. #Let's do multiple Regression dat <- worldh[c("Happiness.Score","Economy","Generosity")] head(dat) plot(dat) #It seems like there is a positive correlation between economy and happiness score but this is not true between happiness score #and generosity. #3D plot of same scatter3D(dat$Generosity, dat$Economy, dat$Happiness.Score, phi = 0, bty = "g", pch = 20, cex = 2, ticktype = "detailed", main = "Happiness data", xlab = "Generosity", ylab ="Economy", zlab = "Happiness.Score") #From the scatter plot we cannot determine that combination of high economy and generosity leads to greater happiness score. #This is something we have to conclude after analyzing the effect of these 2 taken together.
/MVA_WorldHappinessAnalysis.R
no_license
ausurya/Multivariate-Analysis
R
false
false
3,022
r
#Surya Aenuganti Ushasri #Assignment: World Happiness Analysis #Loading the data worldh <- read.csv("WH_2017.csv") #Loading packages required for the analysis library(plyr) library(plotly) library(dplyr) library(tidyverse) library(lubridate) library(caTools) library(ggplot2) library(ggthemes) library(reshape2) library(data.table) library(tidyr) library(corrgram) library(corrplot) library(formattable) library(cowplot) library(ggpubr) library(plot3D) #View the data #View(worldh) #Displays the first few rows of the dataset head(worldh) #Display the structure of the attributes str(worldh) # Changing the name of columns colnames (worldh) <- c("Country", "Happiness.Rank", "Happiness.Score", "Whisker.High", "Whisker.Low", "Economy", "Family", "Life.Expectancy", "Freedom", "Generosity", "Trust", "Dystopia.Residual") # Deleting unnecessary columns (Whisker.high and Whisker.low) worldh <- worldh[, -c(4,5)] # Finding the correlation between numerical columns Num.cols <- sapply(worldh, is.numeric) Cor.data <- cor(worldh[, Num.cols]) corrplot(Cor.data, method = 'color') #Analysis: We can see there is an inverse correlation between "Happiness Rank" and all the other numerical variables. In other words, the lower the happiness rank, the higher the happiness score, and the higher the other seven factors that contribute to happiness. So let's remove the happiness rank, and see the correlation again. # Create a correlation plot newdatacor = cor(worldh[c(3:10)]) corrplot(newdatacor, method = "number") #Analysis: In the above cor plot, Economy, life expectancy, and family play the most significant role in contributing to happiness. #Trust and generosity have the lowest impact on the happiness score. #Plotting ScatterPLot plot_ly(data = worldh, x=~Economy, y=~Happiness.Score, type = "scatter", text = ~paste("Country:", Country)) %>% layout(title = "Happiness and GDP", xaxis = list(title = "GDP per Capita"), yaxis = list(title = "Happiness Score")) #Analysis: This interactive scatterplot shows that there is a strong positive correlation between GDP and Happiness. #Let's do multiple Regression dat <- worldh[c("Happiness.Score","Economy","Generosity")] head(dat) plot(dat) #It seems like there is a positive correlation between economy and happiness score but this is not true between happiness score #and generosity. #3D plot of same scatter3D(dat$Generosity, dat$Economy, dat$Happiness.Score, phi = 0, bty = "g", pch = 20, cex = 2, ticktype = "detailed", main = "Happiness data", xlab = "Generosity", ylab ="Economy", zlab = "Happiness.Score") #From the scatter plot we cannot determine that combination of high economy and generosity leads to greater happiness score. #This is something we have to conclude after analyzing the effect of these 2 taken together.
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/getHSMetrics.R \name{getHSMetrics} \alias{getHSMetrics} \title{Get HS Metrics} \usage{ getHSMetrics(reports, src = "PANEL_TUMOR_HSMETRICS") } \arguments{ \item{reports}{data frame with reports} \item{src}{string with name of column name to use (from reports)} } \value{ A data table with hs metrics } \description{ Get HS Metrics } \examples{ #dat <- getHSMetrics(reports, src="PANEL_TUMOR_HSMETRICS") #dat <- getHSMetrics(reports, src="PANEL_NORMAL_HSMETRICS") #dat <- getHSMetrics(reports, src="RNASEQCAP_HSMETRICS") }
/man/getHSMetrics.Rd
permissive
dakl/clinseqr
R
false
false
609
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/getHSMetrics.R \name{getHSMetrics} \alias{getHSMetrics} \title{Get HS Metrics} \usage{ getHSMetrics(reports, src = "PANEL_TUMOR_HSMETRICS") } \arguments{ \item{reports}{data frame with reports} \item{src}{string with name of column name to use (from reports)} } \value{ A data table with hs metrics } \description{ Get HS Metrics } \examples{ #dat <- getHSMetrics(reports, src="PANEL_TUMOR_HSMETRICS") #dat <- getHSMetrics(reports, src="PANEL_NORMAL_HSMETRICS") #dat <- getHSMetrics(reports, src="RNASEQCAP_HSMETRICS") }
#' Adjust p-values for multiple comparisons #' #' Depending upon the \code{pval_adjust} method selected, the supplied p_values are compared against an adjusted \code{pval_thresh} value or the provided #' means are used to compute new statistics, p-values are computed and compared against the provided \code{pval_thresh}. A \code{data.frame} that indicates which #' of the tests are significant, 1 if significant or 0 if insignificant. If \code{means} is also provided and the p-value is signficant then the direction #' of the change is indicated by the sign on 1, i.e., means<0 and p_value<pval_thresh will return -1, similarly for means>0. #' #' @param p_values A matrix (or \code{data.frame}) of p-values to be adjusted. #' @param diff_mean A matrix (or \code{data.frame}) of groups means that are to be compared #' @param t_stats A matrix (or \code{data.frame}) of t-test statistics resulting from from standard procedures #' @param sizes A matrix (or \code{data.frame}) of group sizes #' @param pval_adjust character vector specifying the type of multiple comparisons adjustment to implement. A NULL value corresponds to no adjustment. Valid options include: holm, bonferroni, dunnett, tukey or none. #' #' @return a data frame with the following columns: group means, global G-test statistic and corresponding p-value #' #' @author Bryan Stanfill #' @examples #' dontrun{ #' library(pmartRdata) #' library(pmartR) #' mypepData <- edata_transform(omicsData = pep_object, data_scale = "log2") #' mypepData <- group_designation(omicsData = mypepData, main_effects = c("Condition")) #' group_df <- attr(mypepData, "group_DF") #' imdanova_Filt <- imdanova_filter(omicsData = mypepData) #' mypepData <- applyFilt(filter_object = imdanova_Filt, omicsData = mypepData, min_nonmiss_anova=2) #' anova_res <- anova_test(omicsData = mypepData) #' #' adjusted_pvalues <- p_adjustment_anova(p_values = anova_res$Fold_change_pvalues, diff_mean = anova_res$Results[, grep("Mean", names(anova_res$Results))]) #' } #' @export p_adjustment_anova <- function(p_values = NULL, diff_mean = NULL, t_stats = NULL, sizes = NULL, pval_adjust = "None"){ #Match provided 'pval_adjust' to available options pval_adjust <- try(match.arg(tolower(pval_adjust),c("bonferroni","none","tukey","dunnett","holm")),silent=TRUE) if(class(pval_adjust)=='try-error') stop("Provided 'pval_adjust' argument is invalid, please select 'holm', 'bonferroni', 'tukey', 'dunnett' or 'none'.") if(pval_adjust=="tukey" | pval_adjust=="dunnett"){ #Tukey-Kramer statistics are t-statistic/sqrt(2) if(is.null(t_stats) | is.null(sizes)){ stop("The standard t-tests and group sizes need to be supplied in order to apply Tukey-Kramer adjustment.") } n_compares <- ncol(t_stats) if(n_compares==1){ #Tukey adjustment is not done if only one comparison is provided pval_adjust <- 'none' }else{ if(pval_adjust=="tukey"){ tukey_stats <- data.matrix(t_stats*sqrt(2)) #Tukey only needs sizes for the rows, not individual group sizes if(is.data.frame(sizes)){ sizes <- rowSums(data.matrix(sizes)) }else if(is.matrix(sizes)){ sizes <- rowSums(sizes) } #Rcpp::sourceCpp('src/tukey_helper.cpp') #Use for debugging adjusted_pvals <- ptukey_speed(tukey_stats,sizes) }else{#Dunnett adjustment - Needs to be sped up adjusted_pvals <- matrix(NA,nrow(t_stats), ncol(t_stats)) #colnames(adjusted_pvals) <- colnames(t_stats) for(i in 1:nrow(adjusted_pvals)){ k <- length(which(!is.na(p_values[i,]))) if(k>0){ dfi <- sum(sizes[i,])-k rm_nas <- which(is.na(p_values[i,])) #Modified version of an example from "Additional multcomp Examples" viggnette of the multcomp package #Until we add covariates the correlation matrix is assumed to be the identity if(length(rm_nas)>0){ adjusted_pvals[i,-rm_nas] <- as.numeric(sapply(abs(t_stats[i,-rm_nas]), function(x,k,df) 1 - mvtnorm::pmvt(-rep(x, k), rep(x, k), df = df),k=k,df=dfi)) }else{ adjusted_pvals[i,] <- as.numeric(sapply(abs(t_stats[i,]), function(x,k,df) 1 - mvtnorm::pmvt(-rep(x, k), rep(x, k), df = df),k=k,df=dfi)) } } } } colnames(adjusted_pvals) <- colnames(t_stats) colnames(adjusted_pvals) <- gsub("Test-Stat","p-value",colnames(adjusted_pvals)) #This is a band-aid right now, may need to make more general later } } #Don't make this an "else if" because pval_adjust can be changed to 'none' if n_compares==1 if(pval_adjust%in%c('none',"bonferroni","holm")){ #p_values needs to be supplied to apply bonferroni adjustment, if it's NULL tell them that's a problem if(is.null(p_values)){ stop("The `p_values` argument must be supplied to perform the selected `pval_adjust` method") } if(is.null(dim(p_values)) || ncol(p_values)==1){ # leaving this as "||" because only evaluating the 1st argument is fine in this case, as it returns TRUE when p_values is a vector (and thus ncol(p_values) does not compute and using a "|" gives an error) pval_adjust='none' } if(pval_adjust !='holm'){ #For bonferroni adjustment, multiply p_values by number of comparisons (columns in p_values) else do no adjustment multiplier <- ifelse(pval_adjust=="bonferroni",ncol(p_values),1) adjusted_pvals <- multiplier*p_values }else{ #Rcpp::sourceCpp('src/holm_adjustment.cpp') #For debugging #source('~/pmartR/R/support_p_adjustment.R') #For debugging adjusted_pvals <- t(apply(p_values,1,ranked_holm_cpp)) #NaN p-values should stay NaN nan_pvals <- lapply(p_values,is.nan) for(i in 1:ncol(adjusted_pvals)){ adjusted_pvals[,i][nan_pvals[[i]]] <- NaN } } } ############## #Return the adjusted p-values return(pvalues=data.frame(adjusted_pvals)) } #Can be used to replace "adjusted_pvals <- ptukey_speed(tukey_stats,sizes)" if necessary #adjusted_pvals <- matrix(NA,nrow(p_values), ncol(p_values)) #for(i in 1:nrow(tukey_stats)){ # adjusted_pvals[i,] <- ptukey(abs(tukey_stats[i,]), nmeans = n_compares, df = sizes[i]-n_compares,lower.tail=FALSE) #}
/R/p_value_adjustment.R
permissive
rarichardson92/pmartR
R
false
false
6,373
r
#' Adjust p-values for multiple comparisons #' #' Depending upon the \code{pval_adjust} method selected, the supplied p_values are compared against an adjusted \code{pval_thresh} value or the provided #' means are used to compute new statistics, p-values are computed and compared against the provided \code{pval_thresh}. A \code{data.frame} that indicates which #' of the tests are significant, 1 if significant or 0 if insignificant. If \code{means} is also provided and the p-value is signficant then the direction #' of the change is indicated by the sign on 1, i.e., means<0 and p_value<pval_thresh will return -1, similarly for means>0. #' #' @param p_values A matrix (or \code{data.frame}) of p-values to be adjusted. #' @param diff_mean A matrix (or \code{data.frame}) of groups means that are to be compared #' @param t_stats A matrix (or \code{data.frame}) of t-test statistics resulting from from standard procedures #' @param sizes A matrix (or \code{data.frame}) of group sizes #' @param pval_adjust character vector specifying the type of multiple comparisons adjustment to implement. A NULL value corresponds to no adjustment. Valid options include: holm, bonferroni, dunnett, tukey or none. #' #' @return a data frame with the following columns: group means, global G-test statistic and corresponding p-value #' #' @author Bryan Stanfill #' @examples #' dontrun{ #' library(pmartRdata) #' library(pmartR) #' mypepData <- edata_transform(omicsData = pep_object, data_scale = "log2") #' mypepData <- group_designation(omicsData = mypepData, main_effects = c("Condition")) #' group_df <- attr(mypepData, "group_DF") #' imdanova_Filt <- imdanova_filter(omicsData = mypepData) #' mypepData <- applyFilt(filter_object = imdanova_Filt, omicsData = mypepData, min_nonmiss_anova=2) #' anova_res <- anova_test(omicsData = mypepData) #' #' adjusted_pvalues <- p_adjustment_anova(p_values = anova_res$Fold_change_pvalues, diff_mean = anova_res$Results[, grep("Mean", names(anova_res$Results))]) #' } #' @export p_adjustment_anova <- function(p_values = NULL, diff_mean = NULL, t_stats = NULL, sizes = NULL, pval_adjust = "None"){ #Match provided 'pval_adjust' to available options pval_adjust <- try(match.arg(tolower(pval_adjust),c("bonferroni","none","tukey","dunnett","holm")),silent=TRUE) if(class(pval_adjust)=='try-error') stop("Provided 'pval_adjust' argument is invalid, please select 'holm', 'bonferroni', 'tukey', 'dunnett' or 'none'.") if(pval_adjust=="tukey" | pval_adjust=="dunnett"){ #Tukey-Kramer statistics are t-statistic/sqrt(2) if(is.null(t_stats) | is.null(sizes)){ stop("The standard t-tests and group sizes need to be supplied in order to apply Tukey-Kramer adjustment.") } n_compares <- ncol(t_stats) if(n_compares==1){ #Tukey adjustment is not done if only one comparison is provided pval_adjust <- 'none' }else{ if(pval_adjust=="tukey"){ tukey_stats <- data.matrix(t_stats*sqrt(2)) #Tukey only needs sizes for the rows, not individual group sizes if(is.data.frame(sizes)){ sizes <- rowSums(data.matrix(sizes)) }else if(is.matrix(sizes)){ sizes <- rowSums(sizes) } #Rcpp::sourceCpp('src/tukey_helper.cpp') #Use for debugging adjusted_pvals <- ptukey_speed(tukey_stats,sizes) }else{#Dunnett adjustment - Needs to be sped up adjusted_pvals <- matrix(NA,nrow(t_stats), ncol(t_stats)) #colnames(adjusted_pvals) <- colnames(t_stats) for(i in 1:nrow(adjusted_pvals)){ k <- length(which(!is.na(p_values[i,]))) if(k>0){ dfi <- sum(sizes[i,])-k rm_nas <- which(is.na(p_values[i,])) #Modified version of an example from "Additional multcomp Examples" viggnette of the multcomp package #Until we add covariates the correlation matrix is assumed to be the identity if(length(rm_nas)>0){ adjusted_pvals[i,-rm_nas] <- as.numeric(sapply(abs(t_stats[i,-rm_nas]), function(x,k,df) 1 - mvtnorm::pmvt(-rep(x, k), rep(x, k), df = df),k=k,df=dfi)) }else{ adjusted_pvals[i,] <- as.numeric(sapply(abs(t_stats[i,]), function(x,k,df) 1 - mvtnorm::pmvt(-rep(x, k), rep(x, k), df = df),k=k,df=dfi)) } } } } colnames(adjusted_pvals) <- colnames(t_stats) colnames(adjusted_pvals) <- gsub("Test-Stat","p-value",colnames(adjusted_pvals)) #This is a band-aid right now, may need to make more general later } } #Don't make this an "else if" because pval_adjust can be changed to 'none' if n_compares==1 if(pval_adjust%in%c('none',"bonferroni","holm")){ #p_values needs to be supplied to apply bonferroni adjustment, if it's NULL tell them that's a problem if(is.null(p_values)){ stop("The `p_values` argument must be supplied to perform the selected `pval_adjust` method") } if(is.null(dim(p_values)) || ncol(p_values)==1){ # leaving this as "||" because only evaluating the 1st argument is fine in this case, as it returns TRUE when p_values is a vector (and thus ncol(p_values) does not compute and using a "|" gives an error) pval_adjust='none' } if(pval_adjust !='holm'){ #For bonferroni adjustment, multiply p_values by number of comparisons (columns in p_values) else do no adjustment multiplier <- ifelse(pval_adjust=="bonferroni",ncol(p_values),1) adjusted_pvals <- multiplier*p_values }else{ #Rcpp::sourceCpp('src/holm_adjustment.cpp') #For debugging #source('~/pmartR/R/support_p_adjustment.R') #For debugging adjusted_pvals <- t(apply(p_values,1,ranked_holm_cpp)) #NaN p-values should stay NaN nan_pvals <- lapply(p_values,is.nan) for(i in 1:ncol(adjusted_pvals)){ adjusted_pvals[,i][nan_pvals[[i]]] <- NaN } } } ############## #Return the adjusted p-values return(pvalues=data.frame(adjusted_pvals)) } #Can be used to replace "adjusted_pvals <- ptukey_speed(tukey_stats,sizes)" if necessary #adjusted_pvals <- matrix(NA,nrow(p_values), ncol(p_values)) #for(i in 1:nrow(tukey_stats)){ # adjusted_pvals[i,] <- ptukey(abs(tukey_stats[i,]), nmeans = n_compares, df = sizes[i]-n_compares,lower.tail=FALSE) #}
#' Small contact matrix #' #' Example of a contact matrix. #' #' @format A 20x20 symmetric nonnegative integer-valued matrix. #' "C" #' Hi-C data for IMR90 cells #' #' Contact matrix calculated for chromosomes 20 and resolutions 25kb (obtained from the Gene Expression Omnibus). #' #' @format A 2498x2498 symmetric nonnegative integer-valued matrix. #' "IMR90_25kb_chr20" #' Hi-C data for IMR90 cells #' #' Contact matrix calculated for chromosomes 21 and resolutions 25kb (obtained from the Gene Expression Omnibus). #' #' @format A 1878x1878 symmetric nonnegative integer-valued matrix. #' "IMR90_25kb_chr21" #' Hi-C data for IMR90 cells #' #' Contact matrix calculated for chromosomes 20 and resolutions 50kb (obtained from the Gene Expression Omnibus). #' #' @format A 1249x1249 symmetric nonnegative integer-valued matrix. #' "IMR90_50kb_chr20" #' Hi-C data for IMR90 cells #' #' Contact matrix calculated for chromosomes 21 and resolutions 50kb (obtained from the Gene Expression Omnibus). #' #' @format A 939x939 symmetric nonnegative integer-valued matrix. #' "IMR90_50kb_chr21" #' Hi-C data for IMR90 cells #' #' Contact matrix calculated for chromosomes 20 and resolutions 100kb (obtained from the Gene Expression Omnibus). #' #' @format A 625x625 symmetric nonnegative integer-valued matrix. #' "IMR90_100kb_chr20" #' Hi-C data for IMR90 cells #' #' Contact matrix calculated for chromosomes 21 and resolutions 100kb (obtained from the Gene Expression Omnibus). #' #' @format A 470x470 symmetric nonnegative integer-valued matrix. #' "IMR90_100kb_chr21"
/R/data.R
no_license
ElenaTuzhilina/PoisMS
R
false
false
1,597
r
#' Small contact matrix #' #' Example of a contact matrix. #' #' @format A 20x20 symmetric nonnegative integer-valued matrix. #' "C" #' Hi-C data for IMR90 cells #' #' Contact matrix calculated for chromosomes 20 and resolutions 25kb (obtained from the Gene Expression Omnibus). #' #' @format A 2498x2498 symmetric nonnegative integer-valued matrix. #' "IMR90_25kb_chr20" #' Hi-C data for IMR90 cells #' #' Contact matrix calculated for chromosomes 21 and resolutions 25kb (obtained from the Gene Expression Omnibus). #' #' @format A 1878x1878 symmetric nonnegative integer-valued matrix. #' "IMR90_25kb_chr21" #' Hi-C data for IMR90 cells #' #' Contact matrix calculated for chromosomes 20 and resolutions 50kb (obtained from the Gene Expression Omnibus). #' #' @format A 1249x1249 symmetric nonnegative integer-valued matrix. #' "IMR90_50kb_chr20" #' Hi-C data for IMR90 cells #' #' Contact matrix calculated for chromosomes 21 and resolutions 50kb (obtained from the Gene Expression Omnibus). #' #' @format A 939x939 symmetric nonnegative integer-valued matrix. #' "IMR90_50kb_chr21" #' Hi-C data for IMR90 cells #' #' Contact matrix calculated for chromosomes 20 and resolutions 100kb (obtained from the Gene Expression Omnibus). #' #' @format A 625x625 symmetric nonnegative integer-valued matrix. #' "IMR90_100kb_chr20" #' Hi-C data for IMR90 cells #' #' Contact matrix calculated for chromosomes 21 and resolutions 100kb (obtained from the Gene Expression Omnibus). #' #' @format A 470x470 symmetric nonnegative integer-valued matrix. #' "IMR90_100kb_chr21"
##' @param codon_partitioning Do the partitions follow the codon positions? ##' @rdname beast ##' @export ##' @importFrom httr upload_file ##' @importFrom assertthat assert_that is.count cipres_submit_beast1 <- function(input_file, beast_version = c("1.8.2", "1.8.1", "1.8.0"), use_beagle = TRUE, max_runtime = 10, codon_partitioning = FALSE, use_seed = NULL, job_name = NULL, get_email = TRUE, note = NULL, ...) { ## Documentation here: http://www.phylo.org/rest/beast_tg.html beast_version <- match.arg(beast_version) beast_version <- switch(beast_version, "1.8.0" = "0", "1.8.1" = "1", "1.8.2" = "2" ) input_file <- normalizePath(input_file) check_file(input_file) assertthat::assert_that(is_maxruntime(max_runtime)) assertthat::assert_that(assertthat::is.flag(use_beagle)) assertthat::assert_that(assertthat::is.flag(codon_partitioning)) bdy <- .cipres_submit_beast1(input_file = input_file, beast_version = beast_version, max_runtime = max_runtime, codon_partitioning = codon_partitioning, use_beagle = use_beagle, use_seed = use_seed, job_name = job_name, get_email = get_email) cipres_submit(bdy, tool = "BEAST_TG", job_name = job_name, note = note, ...) } .cipres_submit_beast1 <- function(input_file, beast_version, use_beagle, max_runtime, codon_partitioning, use_seed, job_name, get_email) { bdy <- list( `input.infile_` = httr::upload_file(input_file), `vparam.which_beast_` = beast_version, `vparam.no_beagle_` = as.numeric(!use_beagle), ## they use the opposite logic hence the negation `vparam.runtime_` = max_runtime, `vparam.codon_partitioning_` = as.numeric(codon_partitioning) ) alg_info <- parse_beast1_xml(input_file) n_patterns <- alg_info[["n_patterns"]] assertthat::assert_that(assertthat::is.count(n_patterns)) bdy$`vparam.nu_patterns_` <- n_patterns n_partitions <- alg_info[["n_partitions"]] bdy <- beast_check_partitions(bdy, n_partitions, beast2 = FALSE) bdy <- beast_use_seed(bdy, use_seed) bdy <- add_meta_data(bdy, get_email, job_name) bdy } beast_check_partitions <- function(bdy, n_partitions, beast2) { assertthat::assert_that(assertthat::is.count(n_partitions)) if (n_partitions > 1) { if (beast2) { bdy$`vparam.is_partitioned_` <- "1" } bdy$`vparam.nu_partitions_` <- n_partitions } else { bdy$`vparam.is_partitioned_` <- "0" } bdy } beast_use_seed <- function(bdy, use_seed) { if (!is.null(use_seed)) { assertthat::assert_that(assertthat::is.count(use_seed)) bdy$`vparam.spec_seed_` <- "1" bdy$`vparam.seed_val_` <- use_seed } else { bdy$`vparam.spec_seed_` <- "0" } bdy } ##' @importFrom xml2 read_xml xml_find_all xml_contents parse_beast1_xml <- function(input_file) { bst <- xml2::read_xml(x = input_file) ## Get the number of partitions based on the number of elements ## with siteModel n_partitions <- length(xml2::xml_find_all(bst, ".//siteModel[@id]")) ## Get length of sequences by calculating length of the first ## sequence for each partitions seq_info <- xml2::xml_contents( xml2::xml_find_all(bst, ".//alignment/sequence[1]") ) clean_seq <- gsub("\\s|(^<.+)", "", seq_info) clean_seq <- clean_seq[nzchar(clean_seq)] n_patterns <- sum(sapply(clean_seq, nchar)) list(n_partitions = n_partitions, n_patterns = n_patterns) }
/R/beast1.R
permissive
fmichonneau/cipresr
R
false
false
4,326
r
##' @param codon_partitioning Do the partitions follow the codon positions? ##' @rdname beast ##' @export ##' @importFrom httr upload_file ##' @importFrom assertthat assert_that is.count cipres_submit_beast1 <- function(input_file, beast_version = c("1.8.2", "1.8.1", "1.8.0"), use_beagle = TRUE, max_runtime = 10, codon_partitioning = FALSE, use_seed = NULL, job_name = NULL, get_email = TRUE, note = NULL, ...) { ## Documentation here: http://www.phylo.org/rest/beast_tg.html beast_version <- match.arg(beast_version) beast_version <- switch(beast_version, "1.8.0" = "0", "1.8.1" = "1", "1.8.2" = "2" ) input_file <- normalizePath(input_file) check_file(input_file) assertthat::assert_that(is_maxruntime(max_runtime)) assertthat::assert_that(assertthat::is.flag(use_beagle)) assertthat::assert_that(assertthat::is.flag(codon_partitioning)) bdy <- .cipres_submit_beast1(input_file = input_file, beast_version = beast_version, max_runtime = max_runtime, codon_partitioning = codon_partitioning, use_beagle = use_beagle, use_seed = use_seed, job_name = job_name, get_email = get_email) cipres_submit(bdy, tool = "BEAST_TG", job_name = job_name, note = note, ...) } .cipres_submit_beast1 <- function(input_file, beast_version, use_beagle, max_runtime, codon_partitioning, use_seed, job_name, get_email) { bdy <- list( `input.infile_` = httr::upload_file(input_file), `vparam.which_beast_` = beast_version, `vparam.no_beagle_` = as.numeric(!use_beagle), ## they use the opposite logic hence the negation `vparam.runtime_` = max_runtime, `vparam.codon_partitioning_` = as.numeric(codon_partitioning) ) alg_info <- parse_beast1_xml(input_file) n_patterns <- alg_info[["n_patterns"]] assertthat::assert_that(assertthat::is.count(n_patterns)) bdy$`vparam.nu_patterns_` <- n_patterns n_partitions <- alg_info[["n_partitions"]] bdy <- beast_check_partitions(bdy, n_partitions, beast2 = FALSE) bdy <- beast_use_seed(bdy, use_seed) bdy <- add_meta_data(bdy, get_email, job_name) bdy } beast_check_partitions <- function(bdy, n_partitions, beast2) { assertthat::assert_that(assertthat::is.count(n_partitions)) if (n_partitions > 1) { if (beast2) { bdy$`vparam.is_partitioned_` <- "1" } bdy$`vparam.nu_partitions_` <- n_partitions } else { bdy$`vparam.is_partitioned_` <- "0" } bdy } beast_use_seed <- function(bdy, use_seed) { if (!is.null(use_seed)) { assertthat::assert_that(assertthat::is.count(use_seed)) bdy$`vparam.spec_seed_` <- "1" bdy$`vparam.seed_val_` <- use_seed } else { bdy$`vparam.spec_seed_` <- "0" } bdy } ##' @importFrom xml2 read_xml xml_find_all xml_contents parse_beast1_xml <- function(input_file) { bst <- xml2::read_xml(x = input_file) ## Get the number of partitions based on the number of elements ## with siteModel n_partitions <- length(xml2::xml_find_all(bst, ".//siteModel[@id]")) ## Get length of sequences by calculating length of the first ## sequence for each partitions seq_info <- xml2::xml_contents( xml2::xml_find_all(bst, ".//alignment/sequence[1]") ) clean_seq <- gsub("\\s|(^<.+)", "", seq_info) clean_seq <- clean_seq[nzchar(clean_seq)] n_patterns <- sum(sapply(clean_seq, nchar)) list(n_partitions = n_partitions, n_patterns = n_patterns) }
source('./take_input.R') library(party) library(caTools) #library(rpart) #library(rpart.plot) data$game_event_id = NULL data$game_id = NULL data$time_remaining_in_period = (data$minutes_remaining*60 + data$seconds_remaining) data$seconds_remaining = NULL data$minutes_remaining = NULL data$shot_distance = NULL #Banking on shot_zone_range for this data$game_date = NULL data$distance = (data$loc_x^2 + data$loc_y^2)^(1/2) data$angle = atan(data$loc_y/data$loc_x) #angles in radians data$loc_x = NULL data$loc_y = NULL train = subset(data, !is.na(data$shot_made_flag)) sample = sample.split(train$shot_made_flag, SplitRatio = 0.80) train_train = subset(train, sample == TRUE) train_test = subset(train, sample == FALSE) test = subset(data, is.na(data$shot_made_flag)) train$shot_made_flag <- as.factor(train$shot_made_flag) test$shot_made_flag = NULL result= data.frame() for(i in levels(train$season)){ train_season = subset(train_train, as.character(train_train$season) <= i) test_season = subset(train_test, as.character(train_test$season) == i) train_season$season = NULL test_season$season = NULL tree = ctree(shot_made_flag~.-shot_id, data = train_season) pred_tree = predict(tree, type = "prob",newdata = test_season) #for (j in 1:length(pred_tree)){ # result = rbind(result, list(test_season[j,]$shot_id, pred_tree[j][[1]][2])) #} for (j in 1:nrow(test_season)){ result = rbind(result, list(as.numeric(test_season[j,]$shot_made_flag), (as.numeric(pred_tree[j][[1]] > 0.5)))) #result = rbind(result, list(as.numeric(test_season[j,]$shot_made_flag), (as.numeric(pred_tree[j] > 0.5)+1))) } } colnames(result) = c("shot_made_flag", "predicted shot flag") print(nrow(subset(result, (result$shot_made_flag == 1 & result$`predicted shot flag`==1) | ((result$shot_made_flag == 0 & result$`predicted shot flag`==0))))/nrow(result)) #write.csv(result, "cTree_Train_Previous_Seasons_with_Distance_Angle_Time.csv")
/tree_forest.R
permissive
shubh24/kobe
R
false
false
1,959
r
source('./take_input.R') library(party) library(caTools) #library(rpart) #library(rpart.plot) data$game_event_id = NULL data$game_id = NULL data$time_remaining_in_period = (data$minutes_remaining*60 + data$seconds_remaining) data$seconds_remaining = NULL data$minutes_remaining = NULL data$shot_distance = NULL #Banking on shot_zone_range for this data$game_date = NULL data$distance = (data$loc_x^2 + data$loc_y^2)^(1/2) data$angle = atan(data$loc_y/data$loc_x) #angles in radians data$loc_x = NULL data$loc_y = NULL train = subset(data, !is.na(data$shot_made_flag)) sample = sample.split(train$shot_made_flag, SplitRatio = 0.80) train_train = subset(train, sample == TRUE) train_test = subset(train, sample == FALSE) test = subset(data, is.na(data$shot_made_flag)) train$shot_made_flag <- as.factor(train$shot_made_flag) test$shot_made_flag = NULL result= data.frame() for(i in levels(train$season)){ train_season = subset(train_train, as.character(train_train$season) <= i) test_season = subset(train_test, as.character(train_test$season) == i) train_season$season = NULL test_season$season = NULL tree = ctree(shot_made_flag~.-shot_id, data = train_season) pred_tree = predict(tree, type = "prob",newdata = test_season) #for (j in 1:length(pred_tree)){ # result = rbind(result, list(test_season[j,]$shot_id, pred_tree[j][[1]][2])) #} for (j in 1:nrow(test_season)){ result = rbind(result, list(as.numeric(test_season[j,]$shot_made_flag), (as.numeric(pred_tree[j][[1]] > 0.5)))) #result = rbind(result, list(as.numeric(test_season[j,]$shot_made_flag), (as.numeric(pred_tree[j] > 0.5)+1))) } } colnames(result) = c("shot_made_flag", "predicted shot flag") print(nrow(subset(result, (result$shot_made_flag == 1 & result$`predicted shot flag`==1) | ((result$shot_made_flag == 0 & result$`predicted shot flag`==0))))/nrow(result)) #write.csv(result, "cTree_Train_Previous_Seasons_with_Distance_Angle_Time.csv")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stack.R \name{stack_trim} \alias{stack_trim} \title{Trim top call layers from the evaluation stack} \usage{ stack_trim(stack, n = 1) } \arguments{ \item{stack}{An evaluation stack.} \item{n}{The number of call frames (not eval frames) to trim off the top of the stack. In other words, the number of layers of intervening frames to trim.} } \description{ \code{\link[=ctxt_stack]{ctxt_stack()}} can be tricky to use in real code because all intervening frames are returned with the stack, including those at \code{ctxt_stack()} own call site. \code{stack_trim()} makes it easy to remove layers of intervening calls. } \section{Life cycle}{ These functions are in the questioning stage. We are no longer convinced they belong in rlang as they are mostly for REPL interaction and runtime inspection rather than function development. } \examples{ # Intervening frames appear on the evaluation stack: identity(identity(ctxt_stack())) # stack_trim() will trim the first n layers of calls: stack_trim(identity(identity(ctxt_stack()))) # Note that it also takes care of calls intervening at its own call # site: identity(identity( stack_trim(identity(identity(ctxt_stack()))) )) # It is especially useful when used within a function that needs to # inspect the evaluation stack but should nonetheless be callable # within nested calls without side effects: stack_util <- function() { # n = 2 means that two layers of intervening calls should be # removed: The layer at ctxt_stack()'s call site (including the # stack_trim() call), and the layer at stack_util()'s call. stack <- stack_trim(ctxt_stack(), n = 2) stack } user_fn <- function() { # A user calls your stack utility with intervening frames: identity(identity(stack_util())) } # These intervening frames won't appear in the evaluation stack identity(user_fn()) } \keyword{internal}
/man/stack_trim.Rd
no_license
mikmart/rlang
R
false
true
1,934
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stack.R \name{stack_trim} \alias{stack_trim} \title{Trim top call layers from the evaluation stack} \usage{ stack_trim(stack, n = 1) } \arguments{ \item{stack}{An evaluation stack.} \item{n}{The number of call frames (not eval frames) to trim off the top of the stack. In other words, the number of layers of intervening frames to trim.} } \description{ \code{\link[=ctxt_stack]{ctxt_stack()}} can be tricky to use in real code because all intervening frames are returned with the stack, including those at \code{ctxt_stack()} own call site. \code{stack_trim()} makes it easy to remove layers of intervening calls. } \section{Life cycle}{ These functions are in the questioning stage. We are no longer convinced they belong in rlang as they are mostly for REPL interaction and runtime inspection rather than function development. } \examples{ # Intervening frames appear on the evaluation stack: identity(identity(ctxt_stack())) # stack_trim() will trim the first n layers of calls: stack_trim(identity(identity(ctxt_stack()))) # Note that it also takes care of calls intervening at its own call # site: identity(identity( stack_trim(identity(identity(ctxt_stack()))) )) # It is especially useful when used within a function that needs to # inspect the evaluation stack but should nonetheless be callable # within nested calls without side effects: stack_util <- function() { # n = 2 means that two layers of intervening calls should be # removed: The layer at ctxt_stack()'s call site (including the # stack_trim() call), and the layer at stack_util()'s call. stack <- stack_trim(ctxt_stack(), n = 2) stack } user_fn <- function() { # A user calls your stack utility with intervening frames: identity(identity(stack_util())) } # These intervening frames won't appear in the evaluation stack identity(user_fn()) } \keyword{internal}
setwd("C:/Users/Kshiti Mehta/Desktop/Exploratory Data Analysis Coursera") data_full <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"') data1 <- subset(data_full, Date %in% c("1/2/2007","2/2/2007")) data1$Date <- as.Date(data1$Date, format="%d/%m/%Y") hist(data1$Global_active_power, main="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
/Plot1.R
no_license
kshitimehta/Exploratory-Data-Analysis
R
false
false
520
r
setwd("C:/Users/Kshiti Mehta/Desktop/Exploratory Data Analysis Coursera") data_full <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"') data1 <- subset(data_full, Date %in% c("1/2/2007","2/2/2007")) data1$Date <- as.Date(data1$Date, format="%d/%m/%Y") hist(data1$Global_active_power, main="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
#use kNN to impute TMB library(caret) args <- commandArgs(trailingOnly=TRUE) #data.expr<-read.csv("../FeatureGen/TMB-Syn/GRCh37ERCC_refseq105_genes_tpm.csv",header=T,row.names = 1) #data.anno<-read.csv("../FeatureGen/TMB-Syn/clinical_data.csv",header=T,row.names = 1) data.expr<-read.csv(args[1],header=T,row.names = 1) data.anno<-read.csv(args[2],header=T,row.names = 1) data.anno<-data.anno[colnames(data.expr),] #make sure these two data are the same output<-args[3] #filter data.expr data.expr.sd<-apply(data.expr,1,sd) data.expr.sel<-data.expr[order(data.expr.sd,decreasing = T)[1:2000],] data.merged.tmb<-as.data.frame(cbind(t(data.expr.sel),data.anno$TMB)) colnames(data.merged.tmb)<-c(rownames(data.expr.sel),"TMB") set.seed(300) training <- data.merged.tmb[!is.na(data.merged.tmb$TMB),] testing <- data.merged.tmb[is.na(data.merged.tmb$TMB),] trainX <- training[,names(training) != "TMB"] preProcValues <- preProcess(x = trainX,method = c("center", "scale")) #10 times CV ctrl <- trainControl(method="cv", number = 10) #,classProbs=TRUE,summaryFunction = twoClassSummary) #fit KNN knnFit <- train(TMB~ ., data = training, method = "knn", trControl = ctrl, preProcess = c("center","scale"), tuneLength = 20) prediction<-as.integer(predict(knnFit,newdata = testing)) data.merged.tmb[is.na(data.merged.tmb$TMB),]<-prediction tmb.results<-cbind(rownames(data.merged.tmb),data.merged.tmb$TMB) colnames(tmb.results)<-c("Patient","TMB") #write results write.csv(file=output,tmb.results,quote = F, row.names = F)
/Scripts/run_TMB.R
no_license
yinjun111/dream-pd1
R
false
false
1,593
r
#use kNN to impute TMB library(caret) args <- commandArgs(trailingOnly=TRUE) #data.expr<-read.csv("../FeatureGen/TMB-Syn/GRCh37ERCC_refseq105_genes_tpm.csv",header=T,row.names = 1) #data.anno<-read.csv("../FeatureGen/TMB-Syn/clinical_data.csv",header=T,row.names = 1) data.expr<-read.csv(args[1],header=T,row.names = 1) data.anno<-read.csv(args[2],header=T,row.names = 1) data.anno<-data.anno[colnames(data.expr),] #make sure these two data are the same output<-args[3] #filter data.expr data.expr.sd<-apply(data.expr,1,sd) data.expr.sel<-data.expr[order(data.expr.sd,decreasing = T)[1:2000],] data.merged.tmb<-as.data.frame(cbind(t(data.expr.sel),data.anno$TMB)) colnames(data.merged.tmb)<-c(rownames(data.expr.sel),"TMB") set.seed(300) training <- data.merged.tmb[!is.na(data.merged.tmb$TMB),] testing <- data.merged.tmb[is.na(data.merged.tmb$TMB),] trainX <- training[,names(training) != "TMB"] preProcValues <- preProcess(x = trainX,method = c("center", "scale")) #10 times CV ctrl <- trainControl(method="cv", number = 10) #,classProbs=TRUE,summaryFunction = twoClassSummary) #fit KNN knnFit <- train(TMB~ ., data = training, method = "knn", trControl = ctrl, preProcess = c("center","scale"), tuneLength = 20) prediction<-as.integer(predict(knnFit,newdata = testing)) data.merged.tmb[is.na(data.merged.tmb$TMB),]<-prediction tmb.results<-cbind(rownames(data.merged.tmb),data.merged.tmb$TMB) colnames(tmb.results)<-c("Patient","TMB") #write results write.csv(file=output,tmb.results,quote = F, row.names = F)
#!/usr/bin/env Rscript # Helper script to create QC report # Stephen Zhang, 2018 # Usage: # create_qc.r [bamFilePath] [sampleName] [genomeBioStrings] [genomeTxDb] # [Rmd_script] [outputDir] [nChr] args <- commandArgs(trailingOnly = T) bamFilePath <- args[1] sampleName <- args[2] genomeBioStrings <- args[3] genomeTxDb <- args[4] rmdPath <- args[5] outputDir <- args[6] nchr <- args[7] library(rmarkdown) render(rmdPath, output_format = "html_document", output_dir = outputDir, intermediates_dir = outputDir, knit_root_dir = outputDir, clean = T, params = list( bamFilePath = bamFilePath, sampleName = sampleName, genomeBioStrings = genomeBioStrings, genomeTxDb = genomeTxDb, nchr = nchr ))
/qc/create_qc.r
permissive
zsteve/nf-ATAC
R
false
false
791
r
#!/usr/bin/env Rscript # Helper script to create QC report # Stephen Zhang, 2018 # Usage: # create_qc.r [bamFilePath] [sampleName] [genomeBioStrings] [genomeTxDb] # [Rmd_script] [outputDir] [nChr] args <- commandArgs(trailingOnly = T) bamFilePath <- args[1] sampleName <- args[2] genomeBioStrings <- args[3] genomeTxDb <- args[4] rmdPath <- args[5] outputDir <- args[6] nchr <- args[7] library(rmarkdown) render(rmdPath, output_format = "html_document", output_dir = outputDir, intermediates_dir = outputDir, knit_root_dir = outputDir, clean = T, params = list( bamFilePath = bamFilePath, sampleName = sampleName, genomeBioStrings = genomeBioStrings, genomeTxDb = genomeTxDb, nchr = nchr ))
## Investigation into the quality of articles created by newly created accounts ## (within the first 30 days after registration). library(data.table); library(ggplot2); creations_30days = fread('datasets/article_creations_30days.tsv'); qual_preds = fread('datasets/article_quality_predictions_2009-2017.tsv'); creations_30days_recent = fread('datasets/article_creations_30days_recent.tsv'); qual_preds_recent = fread('datasets/article_quality_predictions_2017.tsv'); ## Set keys to revision ID and do a left join. setkey(creations_30days, revision_id); setkey(qual_preds, rev_id); creations_30days = creations_30days[qual_preds, nomatch = 0]; setkey(creations_30days_recent, revision_id); setkey(qual_preds_recent, rev_id); creations_30days_recent = creations_30days_recent[qual_preds_recent, nomatch = 0]; ## Total creations: 1,353,535 (wc -l) ## Creations that match a quality prediction: 524,271 ## That suggests that 829,264 articles (61.27%) were deleted in such a way that ## we no longer can retrieve the revision and make a prediction (copyright, attack) ## Total recent creations: 18662 ## Creations that match a quality prediction: 8011 ## That suggests that 57.1% were deleted in such a way that we no longer ## can retrieve them. ## That's concerning. I should plot that over time. Also, how does it develop ## since the start of ACTRIAL? all_30day_creations = fread('datasets/article_creations_30days.tsv'); ## Parse the timestamps and split into date/time. all_30day_creations[, c("event_date", "event_time") := IDateTime(as.POSIXct(event_timestamp, format='%Y-%m-%d %H:%M:%S.0', tz='UTC'))]; creations_30days[, c("event_date", "event_time") := IDateTime( as.POSIXct(rev_timestamp, format='%Y-%m-%d %H:%M:%S', tz='UTC'))]; surviving_article_data = merge( all_30day_creations[, list(num_creations=sum(.N)), by=event_date], creations_30days[, list(num_surviving_articles=sum(.N)), by=event_date], by='event_date'); surviving_article_data[, prop_surviving := 100*num_surviving_articles/num_creations] ggplot(surviving_article_data, aes(x=event_date, y=prop_surviving)) + geom_line() + scale_x_date(date_breaks='1 year', date_labels = '%Y') + xlab('Date') + ylab('Proportion in %') + ggtitle("Proportion of created articles retrievable in late 2017") + geom_smooth(method='loess', span=0.2) + ylim(0, 65) all_recent_30day_creations = fread('datasets/article_creations_30days_recent.tsv'); all_recent_30day_creations[, c("event_date", "event_time") := IDateTime(as.POSIXct(event_timestamp, format='%Y-%m-%d %H:%M:%S', tz='UTC'))]; creations_30days_recent[, c("event_date", "event_time") := IDateTime( as.POSIXct(event_timestamp, format='%Y-%m-%d %H:%M:%S', tz='UTC'))]; surviving_article_data_recent = merge( all_recent_30day_creations[, list(num_creations=sum(.N)), by=event_date], creations_30days_recent[, list(num_surviving_articles=sum(.N)), by=event_date], by='event_date'); surviving_article_data_recent[, prop_surviving := 100*num_surviving_articles/num_creations] ggplot(surviving_article_data_recent, aes(x=event_date, y=prop_surviving)) + geom_line() + scale_x_date(date_breaks='1 month', date_labels = '%m') + xlab('Date') + ylab('Proportion in %') + ggtitle("Proportion of articles created in 2017 retrievable in early 2018") + geom_vline(xintercept=as.IDate('2017-09-14'), linetype='dashed') + geom_smooth(method='loess', span=0.2) + ylim(0, 100) ## Q1: What is the average quality score per day across time? ## We use Halfaker's measure: sum(I(c)*P(c)) where 'c' is the class, ## I(c) is 0 for "Stub", "1" for Start, and so on up to "5" for FA. creations_30days[, qual_sum := start_prob + 2*c_prob + 3*b_prob + 4*ga_prob + 5*fa_prob]; ggplot(creations_30days[, list(avg_weigh_sum=mean(qual_sum)), by=event_date], aes(x=event_date, y=avg_weigh_sum)) + geom_line() + scale_x_date(date_breaks='1 year', date_labels = '%Y') + xlab('Date') + ylab('Weighed sum') + ggtitle("Average quality weighed sum per day, all retrievable revisions") + geom_smooth(method='loess', span=0.2) + ylim(0, 1.5); creations_30days_recent[, qual_sum := start_prob + 2*c_prob + 3*b_prob + 4*ga_prob + 5*fa_prob]; ggplot(creations_30days_recent[, list(avg_weigh_sum=mean(qual_sum)), by=event_date], aes(x=event_date, y=avg_weigh_sum)) + geom_line() + scale_x_date(date_breaks='1 month', date_labels = '%m') + xlab('Date') + ylab('Weighed sum') + ggtitle("Average quality weighed sum per day, all retrievable revisions") + geom_smooth(method='loess', span=0.2) + ylim(0, 1.5); ## Q2: What proportion of articles are flagged as not-OK by the draft quality model? draft_pred_ok = merge( creations_30days[, list(num_creations=sum(.N)), by=event_date], creations_30days[draft_prediction == 'OK' & ok_prob >= 0.664, list(num_ok=sum(.N)), by=event_date], by='event_date'); ggplot(draft_pred_ok, aes(x=event_date, y=100*num_ok/num_creations)) + geom_line() + scale_x_date(date_breaks='1 year', date_labels = '%Y') + xlab('Date') + ylab('Proportion (%)') + ggtitle("Proportion of articles not flagged by the draft quality model") + geom_smooth(method='loess', span=0.2) + ylim(0, 90); draft_pred_ok_recent = merge( creations_30days_recent[, list(num_creations=sum(.N)), by=event_date], creations_30days_recent[draft_prediction == 'OK' & ok_prob >= 0.664, list(num_ok=sum(.N)), by=event_date], by='event_date'); ggplot(draft_pred_ok_recent, aes(x=event_date, y=100*num_ok/num_creations)) + geom_line() + scale_x_date(date_breaks='1 month', date_labels = '%m') + xlab('Date') + ylab('Proportion (%)') + ggtitle("Proportion of non-flagged articles created in 2017") + geom_vline(xintercept=as.IDate('2017-09-14'), linetype='dashed') + geom_smooth(method='loess', span=0.2) + ylim(0, 100); ## Q3: What proportion of articles are flagged as spam/vandalism/attack. draft_pred_split = merge( creations_30days[, list(num_creations=sum(.N)), by=event_date], merge( creations_30days[draft_prediction == 'spam', list(num_spam=sum(.N)), by=event_date], merge( creations_30days[draft_prediction == 'vandalism', list(num_vandalism=sum(.N)), by=event_date], creations_30days[draft_prediction == 'attack', list(num_attack=sum(.N)), by=event_date], by='event_date'), by='event_date'), by='event_date'); ggplot() + geom_line(data=draft_pred_split, aes(x=event_date, y=100*num_spam/num_creations, colour='Spam')) + geom_line(data=draft_pred_split, aes(x=event_date, y=100*num_vandalism/num_creations, colour='Vandalism')) + geom_line(data=draft_pred_split, aes(x=event_date, y=100*num_attack/num_creations, colour='Attack')) + geom_smooth(data=draft_pred_split, aes(x=event_date, y=100*num_spam/num_creations, colour='Spam'), method='loess', span=0.2) + geom_smooth(data=draft_pred_split, aes(x=event_date, y=100*num_vandalism/num_creations, colour='Vandalism'), method='loess', span=0.2) + geom_smooth(data=draft_pred_split, aes(x=event_date, y=100*num_attack/num_creations, colour='Attack'), method='loess', span=0.2) + ylim(0,45) + xlab('Date') + ylab('Proportion (%)') + ggtitle("Proportion of new articles flagged by the draft quality model") + scale_color_manual(values=c( Attack=cbbPalette[1], Spam=cbbPalette[2], Vandalism=cbbPalette[3])) + guides(colour=guide_legend(title='Flagged as')) ## There's a lot of variation in this data, can we do it by month instead? ## Revisiting Q1, isn't it more interesting to look at average quality for articles ## that were _not_ flagged by the draft quality model? Arguably, if the article ## is flagged, we suspect it'll get deleted or otherwise moved out of Main? ggplot(creations_30days[draft_prediction == 'OK' & ok_prob >= 0.664, list(avg_weigh_sum=mean(qual_sum)), by=event_date], aes(x=event_date, y=avg_weigh_sum)) + geom_line() + scale_x_date(date_breaks='1 year', date_labels = '%Y') + xlab('Date') + ylab('Weighed sum') + ggtitle("Average quality weighed sum per day, non-flagged articles") + geom_smooth(method='loess', span=0.2) + ylim(0, 1.5); ggplot(creations_30days_recent[draft_prediction == 'OK' & ok_prob >= 0.664, list(avg_weigh_sum=mean(qual_sum)), by=event_date], aes(x=event_date, y=avg_weigh_sum)) + geom_line() + scale_x_date(date_breaks='1 month', date_labels = '%m') + xlab('Date') + ylab('Weighed sum') + ggtitle("Avg quality weighed sum per day, non-flagged 2017 articles") + geom_vline(xintercept=as.IDate('2017-09-14'), linetype='dashed') + geom_smooth(method='loess', span=0.2) + ylim(0, 1.5); ## Question: is the quality of content created by newcomers slowly increasing, ## or is the bar for not getting your article deleted increasing? ## Question: should we investigate average probability of an article not being ## flagged, given that the flags are not exclusive (e.g. an article can be 25% ## spam and 28% vandalism)? ## Q4: linear model for weighed sum of quality predicted by age and number of edits: qualmodel = lm(qual_sum ~ event_user_age + event_user_revision_count, data=creations_30days[draft_prediction == 'OK' & ok_prob >= 0.664]); summary(qualmodel); qualmodel.log = lm(qual_sum ~ log2(1 + event_user_age) + log2(1 + event_user_revision_count), data=creations_30days[draft_prediction == 'OK' & ok_prob >= 0.664]); summary(qualmodel.log); ## Let's redo the same analysis for more recent creations!
/R/article_quality_predictions.R
no_license
nettrom/actrial
R
false
false
10,035
r
## Investigation into the quality of articles created by newly created accounts ## (within the first 30 days after registration). library(data.table); library(ggplot2); creations_30days = fread('datasets/article_creations_30days.tsv'); qual_preds = fread('datasets/article_quality_predictions_2009-2017.tsv'); creations_30days_recent = fread('datasets/article_creations_30days_recent.tsv'); qual_preds_recent = fread('datasets/article_quality_predictions_2017.tsv'); ## Set keys to revision ID and do a left join. setkey(creations_30days, revision_id); setkey(qual_preds, rev_id); creations_30days = creations_30days[qual_preds, nomatch = 0]; setkey(creations_30days_recent, revision_id); setkey(qual_preds_recent, rev_id); creations_30days_recent = creations_30days_recent[qual_preds_recent, nomatch = 0]; ## Total creations: 1,353,535 (wc -l) ## Creations that match a quality prediction: 524,271 ## That suggests that 829,264 articles (61.27%) were deleted in such a way that ## we no longer can retrieve the revision and make a prediction (copyright, attack) ## Total recent creations: 18662 ## Creations that match a quality prediction: 8011 ## That suggests that 57.1% were deleted in such a way that we no longer ## can retrieve them. ## That's concerning. I should plot that over time. Also, how does it develop ## since the start of ACTRIAL? all_30day_creations = fread('datasets/article_creations_30days.tsv'); ## Parse the timestamps and split into date/time. all_30day_creations[, c("event_date", "event_time") := IDateTime(as.POSIXct(event_timestamp, format='%Y-%m-%d %H:%M:%S.0', tz='UTC'))]; creations_30days[, c("event_date", "event_time") := IDateTime( as.POSIXct(rev_timestamp, format='%Y-%m-%d %H:%M:%S', tz='UTC'))]; surviving_article_data = merge( all_30day_creations[, list(num_creations=sum(.N)), by=event_date], creations_30days[, list(num_surviving_articles=sum(.N)), by=event_date], by='event_date'); surviving_article_data[, prop_surviving := 100*num_surviving_articles/num_creations] ggplot(surviving_article_data, aes(x=event_date, y=prop_surviving)) + geom_line() + scale_x_date(date_breaks='1 year', date_labels = '%Y') + xlab('Date') + ylab('Proportion in %') + ggtitle("Proportion of created articles retrievable in late 2017") + geom_smooth(method='loess', span=0.2) + ylim(0, 65) all_recent_30day_creations = fread('datasets/article_creations_30days_recent.tsv'); all_recent_30day_creations[, c("event_date", "event_time") := IDateTime(as.POSIXct(event_timestamp, format='%Y-%m-%d %H:%M:%S', tz='UTC'))]; creations_30days_recent[, c("event_date", "event_time") := IDateTime( as.POSIXct(event_timestamp, format='%Y-%m-%d %H:%M:%S', tz='UTC'))]; surviving_article_data_recent = merge( all_recent_30day_creations[, list(num_creations=sum(.N)), by=event_date], creations_30days_recent[, list(num_surviving_articles=sum(.N)), by=event_date], by='event_date'); surviving_article_data_recent[, prop_surviving := 100*num_surviving_articles/num_creations] ggplot(surviving_article_data_recent, aes(x=event_date, y=prop_surviving)) + geom_line() + scale_x_date(date_breaks='1 month', date_labels = '%m') + xlab('Date') + ylab('Proportion in %') + ggtitle("Proportion of articles created in 2017 retrievable in early 2018") + geom_vline(xintercept=as.IDate('2017-09-14'), linetype='dashed') + geom_smooth(method='loess', span=0.2) + ylim(0, 100) ## Q1: What is the average quality score per day across time? ## We use Halfaker's measure: sum(I(c)*P(c)) where 'c' is the class, ## I(c) is 0 for "Stub", "1" for Start, and so on up to "5" for FA. creations_30days[, qual_sum := start_prob + 2*c_prob + 3*b_prob + 4*ga_prob + 5*fa_prob]; ggplot(creations_30days[, list(avg_weigh_sum=mean(qual_sum)), by=event_date], aes(x=event_date, y=avg_weigh_sum)) + geom_line() + scale_x_date(date_breaks='1 year', date_labels = '%Y') + xlab('Date') + ylab('Weighed sum') + ggtitle("Average quality weighed sum per day, all retrievable revisions") + geom_smooth(method='loess', span=0.2) + ylim(0, 1.5); creations_30days_recent[, qual_sum := start_prob + 2*c_prob + 3*b_prob + 4*ga_prob + 5*fa_prob]; ggplot(creations_30days_recent[, list(avg_weigh_sum=mean(qual_sum)), by=event_date], aes(x=event_date, y=avg_weigh_sum)) + geom_line() + scale_x_date(date_breaks='1 month', date_labels = '%m') + xlab('Date') + ylab('Weighed sum') + ggtitle("Average quality weighed sum per day, all retrievable revisions") + geom_smooth(method='loess', span=0.2) + ylim(0, 1.5); ## Q2: What proportion of articles are flagged as not-OK by the draft quality model? draft_pred_ok = merge( creations_30days[, list(num_creations=sum(.N)), by=event_date], creations_30days[draft_prediction == 'OK' & ok_prob >= 0.664, list(num_ok=sum(.N)), by=event_date], by='event_date'); ggplot(draft_pred_ok, aes(x=event_date, y=100*num_ok/num_creations)) + geom_line() + scale_x_date(date_breaks='1 year', date_labels = '%Y') + xlab('Date') + ylab('Proportion (%)') + ggtitle("Proportion of articles not flagged by the draft quality model") + geom_smooth(method='loess', span=0.2) + ylim(0, 90); draft_pred_ok_recent = merge( creations_30days_recent[, list(num_creations=sum(.N)), by=event_date], creations_30days_recent[draft_prediction == 'OK' & ok_prob >= 0.664, list(num_ok=sum(.N)), by=event_date], by='event_date'); ggplot(draft_pred_ok_recent, aes(x=event_date, y=100*num_ok/num_creations)) + geom_line() + scale_x_date(date_breaks='1 month', date_labels = '%m') + xlab('Date') + ylab('Proportion (%)') + ggtitle("Proportion of non-flagged articles created in 2017") + geom_vline(xintercept=as.IDate('2017-09-14'), linetype='dashed') + geom_smooth(method='loess', span=0.2) + ylim(0, 100); ## Q3: What proportion of articles are flagged as spam/vandalism/attack. draft_pred_split = merge( creations_30days[, list(num_creations=sum(.N)), by=event_date], merge( creations_30days[draft_prediction == 'spam', list(num_spam=sum(.N)), by=event_date], merge( creations_30days[draft_prediction == 'vandalism', list(num_vandalism=sum(.N)), by=event_date], creations_30days[draft_prediction == 'attack', list(num_attack=sum(.N)), by=event_date], by='event_date'), by='event_date'), by='event_date'); ggplot() + geom_line(data=draft_pred_split, aes(x=event_date, y=100*num_spam/num_creations, colour='Spam')) + geom_line(data=draft_pred_split, aes(x=event_date, y=100*num_vandalism/num_creations, colour='Vandalism')) + geom_line(data=draft_pred_split, aes(x=event_date, y=100*num_attack/num_creations, colour='Attack')) + geom_smooth(data=draft_pred_split, aes(x=event_date, y=100*num_spam/num_creations, colour='Spam'), method='loess', span=0.2) + geom_smooth(data=draft_pred_split, aes(x=event_date, y=100*num_vandalism/num_creations, colour='Vandalism'), method='loess', span=0.2) + geom_smooth(data=draft_pred_split, aes(x=event_date, y=100*num_attack/num_creations, colour='Attack'), method='loess', span=0.2) + ylim(0,45) + xlab('Date') + ylab('Proportion (%)') + ggtitle("Proportion of new articles flagged by the draft quality model") + scale_color_manual(values=c( Attack=cbbPalette[1], Spam=cbbPalette[2], Vandalism=cbbPalette[3])) + guides(colour=guide_legend(title='Flagged as')) ## There's a lot of variation in this data, can we do it by month instead? ## Revisiting Q1, isn't it more interesting to look at average quality for articles ## that were _not_ flagged by the draft quality model? Arguably, if the article ## is flagged, we suspect it'll get deleted or otherwise moved out of Main? ggplot(creations_30days[draft_prediction == 'OK' & ok_prob >= 0.664, list(avg_weigh_sum=mean(qual_sum)), by=event_date], aes(x=event_date, y=avg_weigh_sum)) + geom_line() + scale_x_date(date_breaks='1 year', date_labels = '%Y') + xlab('Date') + ylab('Weighed sum') + ggtitle("Average quality weighed sum per day, non-flagged articles") + geom_smooth(method='loess', span=0.2) + ylim(0, 1.5); ggplot(creations_30days_recent[draft_prediction == 'OK' & ok_prob >= 0.664, list(avg_weigh_sum=mean(qual_sum)), by=event_date], aes(x=event_date, y=avg_weigh_sum)) + geom_line() + scale_x_date(date_breaks='1 month', date_labels = '%m') + xlab('Date') + ylab('Weighed sum') + ggtitle("Avg quality weighed sum per day, non-flagged 2017 articles") + geom_vline(xintercept=as.IDate('2017-09-14'), linetype='dashed') + geom_smooth(method='loess', span=0.2) + ylim(0, 1.5); ## Question: is the quality of content created by newcomers slowly increasing, ## or is the bar for not getting your article deleted increasing? ## Question: should we investigate average probability of an article not being ## flagged, given that the flags are not exclusive (e.g. an article can be 25% ## spam and 28% vandalism)? ## Q4: linear model for weighed sum of quality predicted by age and number of edits: qualmodel = lm(qual_sum ~ event_user_age + event_user_revision_count, data=creations_30days[draft_prediction == 'OK' & ok_prob >= 0.664]); summary(qualmodel); qualmodel.log = lm(qual_sum ~ log2(1 + event_user_age) + log2(1 + event_user_revision_count), data=creations_30days[draft_prediction == 'OK' & ok_prob >= 0.664]); summary(qualmodel.log); ## Let's redo the same analysis for more recent creations!
library(shiny) library(glasso) library(d3heatmap) library(igraph) ################### Function Definiton Begin #################### # convert inverse covariance matrix to links wi2link <- function(mat) { mat <- abs(mat) diag(mat) <- 0 min <- min(mat) max <- max(mat) links <- data.frame(from = NA, to = NA, weight = NA) for (i in 1:nrow(mat)) { for (j in 1:ncol(mat)) { if (mat[i,j] != 0 & i != j) { links <- rbind(links, c(i-1,j-1,range01(mat[i,j],min,max))) } } } return(links[-1,]) } # scale to range [0,1] range01 <- function(var, min, max) { res <- (var-min)/(max-min) return(res) } # for d3heatmap use reorderfun <- function(d, w) { return(d) } # get dendrogram for reorder getDendrogram <- function(mat) { distance <- dist(mat) cluster <- hclust(distance, method = "ward.D") dendrogram <- as.dendrogram(cluster) Rowv <- rowMeans(mat) dendrogram <- reorder(dendrogram, Rowv) return(dendrogram) } ################### Function Definiton END #################### ################### Logic Begin ################### # Prepare Data nodes <- read.csv(file = "AAL_Yan.csv") measure = c("Assortivity", "Cliques", "Transitivity") statsTable <- data.frame(measure, value = rep(NA, length(measure)), meaning = c("The assortativity coefficient is positive is similar vertices (based on some external property) tend to connect to each, and negative otherwise.", "The size of the largest clique", "Transitivity measures the probability that the adjacent vertices of a vertex are connected. This is sometimes also called the clustering coefficient.")) # shiny server shinyServer(function(input, output) { ### sharable data inputdata <- reactive({ inFile <- input$file if (is.null(inFile)) { return(NULL) } tmp <- read.csv(inFile$datapath, header = TRUE) tmp[,nodes$name] }) # generate inverse covariance matrix given lambda wi <- reactive({ tmp <- glasso(s = cov(inputdata()), rho = input$lambda) tmp <- tmp$wi }) # generate adjacancy matrix adjMat <- reactive({ tmp <- wi() tmp[which(tmp!=0)] <- 1 diag(tmp) <- 0 tmp }) # update statistical table statsTable2 <- reactive({ tmp.g <- graph.adjacency(adjMat(), mode = "undirected") # create igraph object given adjacency matrix # update graph statistics statsTable[statsTable$measure=="Assortivity","value"] <- assortativity_degree(tmp.g) statsTable[statsTable$measure=="Cliques","value"] <- clique_num(tmp.g) statsTable[statsTable$measure=="Transitivity","value"] <- transitivity(tmp.g, type = "undirected") statsTable }) ### output # output: data table output$table <- renderTable({ tmp <- inputdata() tmp[1:15, 1:6] }) # output: interactive network plot networkPlot <- eventReactive(input$updateNet, { links <- wi2link(wi()) forceNetwork(Nodes = nodes, Links = links, Source = "from", Target = "to", Value = "weight", NodeID = "name", Group = "region", zoom = TRUE) }) output$networkPlot <- renderForceNetwork({ networkPlot() }) # output: checkbox network plot checkboxPlot <- eventReactive(input$updateNet, { d3heatmap(adjMat(), Rowv = FALSE, Colv = "Rowv", colors = grey(c(1, 0)), labRow = nodes$name, labCol = nodes$name) }) output$checkboxPlot <- renderD3heatmap({ checkboxPlot() }) # output: visulization for download usage plotIgraph <- function() { # create igraph object given adjacency matrix tmp <- adjMat() colnames(tmp) <- colnames(inputdata()) tmp.g <- graph.adjacency(tmp, mode = "undirected", add.colnames = 'label') plot(tmp.g, layout=layout.lgl) } igraphPlot <- eventReactive(input$updateNet, { plotIgraph() }) output$igraph <- renderPlot({ igraphPlot() }) # output: statistics stats <- eventReactive(input$updateNet, { statsTable2() }) output$stats <- renderTable({ stats() }) ## download output$downloadGraph <- downloadHandler( filename = c('brain_connectivity.png'), content = function(file) { png(file) plotIgraph() dev.off() } ) output$downloadStats <- downloadHandler( filename = c('brain_connectivity.csv'), content = function(file) { write.csv(statsTable2(), file) } ) }) # shinyServer end ################### Logic END ###################
/main/server.R
permissive
jyfeather/BrainConn_APP
R
false
false
4,578
r
library(shiny) library(glasso) library(d3heatmap) library(igraph) ################### Function Definiton Begin #################### # convert inverse covariance matrix to links wi2link <- function(mat) { mat <- abs(mat) diag(mat) <- 0 min <- min(mat) max <- max(mat) links <- data.frame(from = NA, to = NA, weight = NA) for (i in 1:nrow(mat)) { for (j in 1:ncol(mat)) { if (mat[i,j] != 0 & i != j) { links <- rbind(links, c(i-1,j-1,range01(mat[i,j],min,max))) } } } return(links[-1,]) } # scale to range [0,1] range01 <- function(var, min, max) { res <- (var-min)/(max-min) return(res) } # for d3heatmap use reorderfun <- function(d, w) { return(d) } # get dendrogram for reorder getDendrogram <- function(mat) { distance <- dist(mat) cluster <- hclust(distance, method = "ward.D") dendrogram <- as.dendrogram(cluster) Rowv <- rowMeans(mat) dendrogram <- reorder(dendrogram, Rowv) return(dendrogram) } ################### Function Definiton END #################### ################### Logic Begin ################### # Prepare Data nodes <- read.csv(file = "AAL_Yan.csv") measure = c("Assortivity", "Cliques", "Transitivity") statsTable <- data.frame(measure, value = rep(NA, length(measure)), meaning = c("The assortativity coefficient is positive is similar vertices (based on some external property) tend to connect to each, and negative otherwise.", "The size of the largest clique", "Transitivity measures the probability that the adjacent vertices of a vertex are connected. This is sometimes also called the clustering coefficient.")) # shiny server shinyServer(function(input, output) { ### sharable data inputdata <- reactive({ inFile <- input$file if (is.null(inFile)) { return(NULL) } tmp <- read.csv(inFile$datapath, header = TRUE) tmp[,nodes$name] }) # generate inverse covariance matrix given lambda wi <- reactive({ tmp <- glasso(s = cov(inputdata()), rho = input$lambda) tmp <- tmp$wi }) # generate adjacancy matrix adjMat <- reactive({ tmp <- wi() tmp[which(tmp!=0)] <- 1 diag(tmp) <- 0 tmp }) # update statistical table statsTable2 <- reactive({ tmp.g <- graph.adjacency(adjMat(), mode = "undirected") # create igraph object given adjacency matrix # update graph statistics statsTable[statsTable$measure=="Assortivity","value"] <- assortativity_degree(tmp.g) statsTable[statsTable$measure=="Cliques","value"] <- clique_num(tmp.g) statsTable[statsTable$measure=="Transitivity","value"] <- transitivity(tmp.g, type = "undirected") statsTable }) ### output # output: data table output$table <- renderTable({ tmp <- inputdata() tmp[1:15, 1:6] }) # output: interactive network plot networkPlot <- eventReactive(input$updateNet, { links <- wi2link(wi()) forceNetwork(Nodes = nodes, Links = links, Source = "from", Target = "to", Value = "weight", NodeID = "name", Group = "region", zoom = TRUE) }) output$networkPlot <- renderForceNetwork({ networkPlot() }) # output: checkbox network plot checkboxPlot <- eventReactive(input$updateNet, { d3heatmap(adjMat(), Rowv = FALSE, Colv = "Rowv", colors = grey(c(1, 0)), labRow = nodes$name, labCol = nodes$name) }) output$checkboxPlot <- renderD3heatmap({ checkboxPlot() }) # output: visulization for download usage plotIgraph <- function() { # create igraph object given adjacency matrix tmp <- adjMat() colnames(tmp) <- colnames(inputdata()) tmp.g <- graph.adjacency(tmp, mode = "undirected", add.colnames = 'label') plot(tmp.g, layout=layout.lgl) } igraphPlot <- eventReactive(input$updateNet, { plotIgraph() }) output$igraph <- renderPlot({ igraphPlot() }) # output: statistics stats <- eventReactive(input$updateNet, { statsTable2() }) output$stats <- renderTable({ stats() }) ## download output$downloadGraph <- downloadHandler( filename = c('brain_connectivity.png'), content = function(file) { png(file) plotIgraph() dev.off() } ) output$downloadStats <- downloadHandler( filename = c('brain_connectivity.csv'), content = function(file) { write.csv(statsTable2(), file) } ) }) # shinyServer end ################### Logic END ###################
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mpl_morphyex.R \name{mpl_second_up_recon} \alias{mpl_second_up_recon} \title{Finalises the ancestral state reconstructions for characters with inapplicable values.} \usage{ mpl_second_up_recon(node_id, left_id, right_id, anc_id, morphyobj) } \arguments{ \item{node_id}{The index of the node being reconstructed.} \item{left_id}{The index of the left descendant.} \item{right_id}{The index of the right descendant.} \item{anc_id}{The index of the immediate ancestor of the node.} \item{morphyobj}{An instance of the Morphy object.} } \value{ The integral parsimony length (right now) } \description{ Finalises the nodal sets for any characters that may have involved the inapplicable token and counts excess regions of applicability at nodes having at least two descendant subtrees that possess any applicable characters. Because this function needs to be fairly high-performance, it does not do much checking for parameter validity, thus unsafe usage of this function might not be caught. It is up to calling functions to ensure that the appropriate parameters have been set before use. } \seealso{ Other Morphy API functions: \code{\link{GapHandler}()}, \code{\link{MorphyErrorCheck}()}, \code{\link{MorphyWeights}()}, \code{\link{PhyDat2Morphy}()}, \code{\link{SingleCharMorphy}()}, \code{\link{UnloadMorphy}()}, \code{\link{is.morphyPtr}()}, \code{\link{mpl_apply_tipdata}()}, \code{\link{mpl_attach_rawdata}()}, \code{\link{mpl_attach_symbols}()}, \code{\link{mpl_delete_Morphy}()}, \code{\link{mpl_delete_rawdata}()}, \code{\link{mpl_first_down_recon}()}, \code{\link{mpl_first_up_recon}()}, \code{\link{mpl_get_charac_weight}()}, \code{\link{mpl_get_gaphandl}()}, \code{\link{mpl_get_num_charac}()}, \code{\link{mpl_get_num_internal_nodes}()}, \code{\link{mpl_get_numtaxa}()}, \code{\link{mpl_get_symbols}()}, \code{\link{mpl_init_Morphy}()}, \code{\link{mpl_new_Morphy}()}, \code{\link{mpl_second_down_recon}()}, \code{\link{mpl_set_charac_weight}()}, \code{\link{mpl_set_num_internal_nodes}()}, \code{\link{mpl_set_parsim_t}()}, \code{\link{mpl_translate_error}()}, \code{\link{mpl_update_lower_root}()}, \code{\link{mpl_update_tip}()}, \code{\link{summary.morphyPtr}()} } \author{ Thomas Guillerme } \concept{Morphy API functions} \keyword{internal}
/man/mpl_second_up_recon.Rd
no_license
cran/TreeSearch
R
false
true
2,412
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mpl_morphyex.R \name{mpl_second_up_recon} \alias{mpl_second_up_recon} \title{Finalises the ancestral state reconstructions for characters with inapplicable values.} \usage{ mpl_second_up_recon(node_id, left_id, right_id, anc_id, morphyobj) } \arguments{ \item{node_id}{The index of the node being reconstructed.} \item{left_id}{The index of the left descendant.} \item{right_id}{The index of the right descendant.} \item{anc_id}{The index of the immediate ancestor of the node.} \item{morphyobj}{An instance of the Morphy object.} } \value{ The integral parsimony length (right now) } \description{ Finalises the nodal sets for any characters that may have involved the inapplicable token and counts excess regions of applicability at nodes having at least two descendant subtrees that possess any applicable characters. Because this function needs to be fairly high-performance, it does not do much checking for parameter validity, thus unsafe usage of this function might not be caught. It is up to calling functions to ensure that the appropriate parameters have been set before use. } \seealso{ Other Morphy API functions: \code{\link{GapHandler}()}, \code{\link{MorphyErrorCheck}()}, \code{\link{MorphyWeights}()}, \code{\link{PhyDat2Morphy}()}, \code{\link{SingleCharMorphy}()}, \code{\link{UnloadMorphy}()}, \code{\link{is.morphyPtr}()}, \code{\link{mpl_apply_tipdata}()}, \code{\link{mpl_attach_rawdata}()}, \code{\link{mpl_attach_symbols}()}, \code{\link{mpl_delete_Morphy}()}, \code{\link{mpl_delete_rawdata}()}, \code{\link{mpl_first_down_recon}()}, \code{\link{mpl_first_up_recon}()}, \code{\link{mpl_get_charac_weight}()}, \code{\link{mpl_get_gaphandl}()}, \code{\link{mpl_get_num_charac}()}, \code{\link{mpl_get_num_internal_nodes}()}, \code{\link{mpl_get_numtaxa}()}, \code{\link{mpl_get_symbols}()}, \code{\link{mpl_init_Morphy}()}, \code{\link{mpl_new_Morphy}()}, \code{\link{mpl_second_down_recon}()}, \code{\link{mpl_set_charac_weight}()}, \code{\link{mpl_set_num_internal_nodes}()}, \code{\link{mpl_set_parsim_t}()}, \code{\link{mpl_translate_error}()}, \code{\link{mpl_update_lower_root}()}, \code{\link{mpl_update_tip}()}, \code{\link{summary.morphyPtr}()} } \author{ Thomas Guillerme } \concept{Morphy API functions} \keyword{internal}
ttestInheritedMethods()
/test.R
no_license
Yashimasan/new_git_test
R
false
false
23
r
ttestInheritedMethods()
####################################################################### # stream - Infrastructure for Data Stream Mining # Copyright (C) 2013 Michael Hahsler, Matthew Bolanos, John Forrest # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. DSC_TwoStage <- function(micro, macro) { state <- new.env() state$newdata <- TRUE l <- list( description = paste(micro$description, " + ", macro$description, sep=''), micro_dsc = micro, macro_dsc = macro, macro = state ) czs <- c("DSC_Macro", "DSC") if(is(micro,"DSC_Outlier")) { czs <- c("DSC_Outlier", czs) l$recheck_outliers <- micro$recheck_outliers } if(is(micro,"DSC_SinglePass")) czs <- c("DSC_SinglePass", czs) czs <- c("DSC_TwoStage", czs) structure(l,class = czs) } get_outlier_positions.DSC_TwoStage <- function(x, ...) { get_outlier_positions(x$micro_dsc) } recheck_outlier.DSC_TwoStage <- function(x, outlier_correlated_id, ...) { recheck_outlier(x$micro_dsc, outlier_correlated_id) } ### TwoStage has its own interface (does not use DSC_R) update.DSC_TwoStage <- function(object, dsd, n=1, verbose=FALSE, block=10000L, ...) { ### dsc contains an RObj which is a reference object with a cluster method ### some matrix to be processed in one go if(!is(dsd, "DSD")) { n <- nrow(dsd) dsd <- DSD_Memory(dsd) } n <- as.integer(n) if(n>0) { if(!is(dsd, "DSD_data.frame")) stop("Cannot cluster stream (need a DSD_data.frame.)") ### for DSC_TwoStage if(is.environment(object$macro)) object$macro$newdata <- TRUE ### TODO: Check data for(bl in .make_block(n, block)) { update(object$micro_dsc, dsd, n=bl, ...) if(verbose) cat("Processed", bl, "points -", nclusters(object), "clusters\n") } } # so cl <- cluster(cl, ...) also works invisible(object) } ### accessors get_centers.DSC_TwoStage <- function(x, type=c("auto", "micro", "macro"), ...) { type <- match.arg(type) if(type=="micro") get_centers(x$micro_dsc) else { if(x$macro$newdata) { recluster(x$macro_dsc, x$micro_dsc) x$macro$newdata <- FALSE } get_centers(x$macro_dsc) } } get_weights.DSC_TwoStage <- function(x, type=c("auto", "micro", "macro"), ...) { type <- match.arg(type) if(type=="micro") get_weights(x$micro_dsc, ...) else { if(x$macro$newdata) { recluster(x$macro_dsc, x$micro_dsc) x$macro$newdata <- FALSE } get_weights(x$macro_dsc, ...) } } microToMacro.DSC_TwoStage <- function(x, micro=NULL, ...) { if(x$macro$newdata) { recluster(x$macro_dsc, x$micro_dsc) x$macro$newdata <- FALSE } microToMacro(x$macro_dsc, micro, ...) } get_assignment.DSC_TwoStage <- function(dsc, points, type=c("auto", "micro", "macro"), method="auto", ...) { type <- match.arg(type) if(type=="micro") { if(is(dsc$micro_dsc, "DSC_SinglePass")) dsc$macro$newdata <- TRUE get_assignment(dsc$micro_dsc, points, type, method, ...) } else { if(dsc$macro$newdata) { recluster(dsc$macro_dsc, dsc$micro_dsc) dsc$macro$newdata <- FALSE } get_assignment(dsc$macro_dsc, points, type, method, ...) } } ### make a deep copy get_copy.DSC_TwoStage <- function(x) { copy <- DSC_TwoStage(micro=get_copy(x$micro_dsc), macro=get_copy(x$macro_dsc)) copy$macro$newdata <- x$macro$newdata copy }
/R/DSC_TwoStage.R
no_license
dinarior/stream
R
false
false
4,079
r
####################################################################### # stream - Infrastructure for Data Stream Mining # Copyright (C) 2013 Michael Hahsler, Matthew Bolanos, John Forrest # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. DSC_TwoStage <- function(micro, macro) { state <- new.env() state$newdata <- TRUE l <- list( description = paste(micro$description, " + ", macro$description, sep=''), micro_dsc = micro, macro_dsc = macro, macro = state ) czs <- c("DSC_Macro", "DSC") if(is(micro,"DSC_Outlier")) { czs <- c("DSC_Outlier", czs) l$recheck_outliers <- micro$recheck_outliers } if(is(micro,"DSC_SinglePass")) czs <- c("DSC_SinglePass", czs) czs <- c("DSC_TwoStage", czs) structure(l,class = czs) } get_outlier_positions.DSC_TwoStage <- function(x, ...) { get_outlier_positions(x$micro_dsc) } recheck_outlier.DSC_TwoStage <- function(x, outlier_correlated_id, ...) { recheck_outlier(x$micro_dsc, outlier_correlated_id) } ### TwoStage has its own interface (does not use DSC_R) update.DSC_TwoStage <- function(object, dsd, n=1, verbose=FALSE, block=10000L, ...) { ### dsc contains an RObj which is a reference object with a cluster method ### some matrix to be processed in one go if(!is(dsd, "DSD")) { n <- nrow(dsd) dsd <- DSD_Memory(dsd) } n <- as.integer(n) if(n>0) { if(!is(dsd, "DSD_data.frame")) stop("Cannot cluster stream (need a DSD_data.frame.)") ### for DSC_TwoStage if(is.environment(object$macro)) object$macro$newdata <- TRUE ### TODO: Check data for(bl in .make_block(n, block)) { update(object$micro_dsc, dsd, n=bl, ...) if(verbose) cat("Processed", bl, "points -", nclusters(object), "clusters\n") } } # so cl <- cluster(cl, ...) also works invisible(object) } ### accessors get_centers.DSC_TwoStage <- function(x, type=c("auto", "micro", "macro"), ...) { type <- match.arg(type) if(type=="micro") get_centers(x$micro_dsc) else { if(x$macro$newdata) { recluster(x$macro_dsc, x$micro_dsc) x$macro$newdata <- FALSE } get_centers(x$macro_dsc) } } get_weights.DSC_TwoStage <- function(x, type=c("auto", "micro", "macro"), ...) { type <- match.arg(type) if(type=="micro") get_weights(x$micro_dsc, ...) else { if(x$macro$newdata) { recluster(x$macro_dsc, x$micro_dsc) x$macro$newdata <- FALSE } get_weights(x$macro_dsc, ...) } } microToMacro.DSC_TwoStage <- function(x, micro=NULL, ...) { if(x$macro$newdata) { recluster(x$macro_dsc, x$micro_dsc) x$macro$newdata <- FALSE } microToMacro(x$macro_dsc, micro, ...) } get_assignment.DSC_TwoStage <- function(dsc, points, type=c("auto", "micro", "macro"), method="auto", ...) { type <- match.arg(type) if(type=="micro") { if(is(dsc$micro_dsc, "DSC_SinglePass")) dsc$macro$newdata <- TRUE get_assignment(dsc$micro_dsc, points, type, method, ...) } else { if(dsc$macro$newdata) { recluster(dsc$macro_dsc, dsc$micro_dsc) dsc$macro$newdata <- FALSE } get_assignment(dsc$macro_dsc, points, type, method, ...) } } ### make a deep copy get_copy.DSC_TwoStage <- function(x) { copy <- DSC_TwoStage(micro=get_copy(x$micro_dsc), macro=get_copy(x$macro_dsc)) copy$macro$newdata <- x$macro$newdata copy }
#' Slide along multiple inputs simultaneously relative to an index chunked by period #' #' `slide_period2()` and `pslide_period()` represent the combination #' of [slide2()] and [pslide()] with [slide_period()], allowing you to slide #' over multiple vectors at once, using indices defined by breaking up the #' `.i`-ndex by `.period`. #' #' @inheritParams slide_period #' #' @template param-x-y #' @template param-l #' #' @return #' A vector fulfilling the following invariants: #' #' \subsection{`slide_period2()`}{ #' #' * `vec_size(slide_period2(.x, .y)) == vec_size(unique(warp::warp_distance(.i)))` #' #' * `vec_ptype(slide_period2(.x, .y)) == list()` #' #' } #' #' \subsection{`slide_period2_vec()` and `slide_period2_*()` variants}{ #' #' * `vec_size(slide_period2_vec(.x, .y)) == vec_size(unique(warp::warp_distance(.i)))` #' #' * `vec_size(slide_period2_vec(.x, .y)[[1]]) == 1L` #' #' * `vec_ptype(slide_period2_vec(.x, .y, .ptype = ptype)) == ptype` #' #' } #' #' \subsection{`pslide_period()`}{ #' #' * `vec_size(pslide_period(.l)) == vec_size(unique(warp::warp_distance(.i)))` #' #' * `vec_ptype(pslide_period(.l)) == list()` #' #' } #' #' \subsection{`pslide_period_vec()` and `pslide_period_*()` variants}{ #' #' * `vec_size(pslide_period_vec(.l)) == vec_size(unique(warp::warp_distance(.i)))` #' #' * `vec_size(pslide_period_vec(.l)[[1]]) == 1L` #' #' * `vec_ptype(pslide_period_vec(.l, .ptype = ptype)) == ptype` #' #' } #' #' @examples #' i <- as.Date("2019-01-28") + 0:5 #' #' slide_period2( #' .x = 1:6, #' .y = i, #' .i = i, #' .period = "month", #' .f = ~data.frame(x = .x, i = .y) #' ) #' #' pslide_period( #' .l = list(1:6, 7:12, i), #' .i = i, #' .period = "month", #' .f = ~data.frame(x = .x, y = .y, i = ..3) #' ) #' #' @seealso [slide2()], [slide_index2()], [slide_period()] #' @export slide_period2 <- function(.x, .y, .i, .period, .f, ..., .every = 1L, .origin = NULL, .before = 0L, .after = 0L, .complete = FALSE) { slide_period2_impl( .x, .y, .i, .period, .f, ..., .every = .every, .origin = .origin, .before = .before, .after = .after, .complete = .complete, .ptype = list(), .constrain = FALSE, .atomic = FALSE ) } #' @rdname slide_period2 #' @export slide_period2_vec <- function(.x, .y, .i, .period, .f, ..., .every = 1L, .origin = NULL, .before = 0L, .after = 0L, .complete = FALSE, .ptype = NULL) { if (is.null(.ptype)) { out <- slide_period2_vec_simplify( .x, .y, .i, .period, .f, ..., .every = .every, .origin = .origin, .before = .before, .after = .after, .complete = .complete ) return(out) } slide_period2_impl( .x, .y, .i, .period, .f, ..., .every = .every, .origin = .origin, .before = .before, .after = .after, .complete = .complete, .ptype = .ptype, .constrain = TRUE, .atomic = TRUE ) } slide_period2_vec_simplify <- function(.x, .y, .i, .period, .f, ..., .every, .origin, .before, .after, .complete) { out <- slide_period2_impl( .x, .y, .i, .period, .f, ..., .every = .every, .origin = .origin, .before = .before, .after = .after, .complete = .complete, .ptype = list(), .constrain = FALSE, .atomic = TRUE ) vec_simplify(out) } #' @rdname slide_period2 #' @export slide_period2_dbl <- function(.x, .y, .i, .period, .f, ..., .every = 1L, .origin = NULL, .before = 0L, .after = 0L, .complete = FALSE) { slide_period2_vec( .x, .y, .i, .period, .f, ..., .every = .every, .origin = .origin, .before = .before, .after = .after, .complete = .complete, .ptype = double() ) } #' @rdname slide_period2 #' @export slide_period2_int <- function(.x, .y, .i, .period, .f, ..., .every = 1L, .origin = NULL, .before = 0L, .after = 0L, .complete = FALSE) { slide_period2_vec( .x, .y, .i, .period, .f, ..., .every = .every, .origin = .origin, .before = .before, .after = .after, .complete = .complete, .ptype = integer() ) } #' @rdname slide_period2 #' @export slide_period2_lgl <- function(.x, .y, .i, .period, .f, ..., .every = 1L, .origin = NULL, .before = 0L, .after = 0L, .complete = FALSE) { slide_period2_vec( .x, .y, .i, .period, .f, ..., .every = .every, .origin = .origin, .before = .before, .after = .after, .complete = .complete, .ptype = logical() ) } #' @rdname slide_period2 #' @export slide_period2_chr <- function(.x, .y, .i, .period, .f, ..., .every = 1L, .origin = NULL, .before = 0L, .after = 0L, .complete = FALSE) { slide_period2_vec( .x, .y, .i, .period, .f, ..., .every = .every, .origin = .origin, .before = .before, .after = .after, .complete = .complete, .ptype = character() ) } #' @rdname slide_period2 #' @export slide_period2_dfr <- function(.x, .y, .i, .period, .f, ..., .every = 1L, .origin = NULL, .before = 0L, .after = 0L, .complete = FALSE, .names_to = NULL, .name_repair = c("unique", "universal", "check_unique")) { out <- slide_period2( .x, .y, .i, .period, .f, ..., .every = .every, .origin = .origin, .before = .before, .after = .after, .complete = .complete ) vec_rbind(!!!out, .names_to = .names_to, .name_repair = .name_repair) } #' @rdname slide_period2 #' @export slide_period2_dfc <- function(.x, .y, .i, .period, .f, ..., .every = 1L, .origin = NULL, .before = 0L, .after = 0L, .complete = FALSE, .size = NULL, .name_repair = c("unique", "universal", "check_unique", "minimal")) { out <- slide_period2( .x, .y, .i, .period, .f, ..., .every = .every, .origin = .origin, .before = .before, .after = .after, .complete = .complete ) vec_cbind(!!!out, .size = .size, .name_repair = .name_repair) } # ------------------------------------------------------------------------------ slide_period2_impl <- function(.x, .y, .i, .period, .f, ..., .every, .origin, .before, .after, .complete, .ptype, .constrain, .atomic) { vec_assert(.x) vec_assert(.y) # TODO - Do more efficiently internally by reusing rather than recycling # https://github.com/tidyverse/purrr/blob/e4d553989e3d18692ebeeedb334b6223ae9ea294/src/map.c#L129 # But use `vec_size_common()` to check sizes and get `.size` args <- vec_recycle_common(.x, .y) .f <- as_function(.f) f_call <- expr(.f(.x, .y, ...)) type <- -2L slide_period_common( x = args, i = .i, period = .period, f_call = f_call, every = .every, origin = .origin, before = .before, after = .after, complete = .complete, ptype = .ptype, constrain = .constrain, atomic = .atomic, env = environment(), type = type ) }
/R/slide-period2.R
permissive
romainfrancois/slider
R
false
false
10,215
r
#' Slide along multiple inputs simultaneously relative to an index chunked by period #' #' `slide_period2()` and `pslide_period()` represent the combination #' of [slide2()] and [pslide()] with [slide_period()], allowing you to slide #' over multiple vectors at once, using indices defined by breaking up the #' `.i`-ndex by `.period`. #' #' @inheritParams slide_period #' #' @template param-x-y #' @template param-l #' #' @return #' A vector fulfilling the following invariants: #' #' \subsection{`slide_period2()`}{ #' #' * `vec_size(slide_period2(.x, .y)) == vec_size(unique(warp::warp_distance(.i)))` #' #' * `vec_ptype(slide_period2(.x, .y)) == list()` #' #' } #' #' \subsection{`slide_period2_vec()` and `slide_period2_*()` variants}{ #' #' * `vec_size(slide_period2_vec(.x, .y)) == vec_size(unique(warp::warp_distance(.i)))` #' #' * `vec_size(slide_period2_vec(.x, .y)[[1]]) == 1L` #' #' * `vec_ptype(slide_period2_vec(.x, .y, .ptype = ptype)) == ptype` #' #' } #' #' \subsection{`pslide_period()`}{ #' #' * `vec_size(pslide_period(.l)) == vec_size(unique(warp::warp_distance(.i)))` #' #' * `vec_ptype(pslide_period(.l)) == list()` #' #' } #' #' \subsection{`pslide_period_vec()` and `pslide_period_*()` variants}{ #' #' * `vec_size(pslide_period_vec(.l)) == vec_size(unique(warp::warp_distance(.i)))` #' #' * `vec_size(pslide_period_vec(.l)[[1]]) == 1L` #' #' * `vec_ptype(pslide_period_vec(.l, .ptype = ptype)) == ptype` #' #' } #' #' @examples #' i <- as.Date("2019-01-28") + 0:5 #' #' slide_period2( #' .x = 1:6, #' .y = i, #' .i = i, #' .period = "month", #' .f = ~data.frame(x = .x, i = .y) #' ) #' #' pslide_period( #' .l = list(1:6, 7:12, i), #' .i = i, #' .period = "month", #' .f = ~data.frame(x = .x, y = .y, i = ..3) #' ) #' #' @seealso [slide2()], [slide_index2()], [slide_period()] #' @export slide_period2 <- function(.x, .y, .i, .period, .f, ..., .every = 1L, .origin = NULL, .before = 0L, .after = 0L, .complete = FALSE) { slide_period2_impl( .x, .y, .i, .period, .f, ..., .every = .every, .origin = .origin, .before = .before, .after = .after, .complete = .complete, .ptype = list(), .constrain = FALSE, .atomic = FALSE ) } #' @rdname slide_period2 #' @export slide_period2_vec <- function(.x, .y, .i, .period, .f, ..., .every = 1L, .origin = NULL, .before = 0L, .after = 0L, .complete = FALSE, .ptype = NULL) { if (is.null(.ptype)) { out <- slide_period2_vec_simplify( .x, .y, .i, .period, .f, ..., .every = .every, .origin = .origin, .before = .before, .after = .after, .complete = .complete ) return(out) } slide_period2_impl( .x, .y, .i, .period, .f, ..., .every = .every, .origin = .origin, .before = .before, .after = .after, .complete = .complete, .ptype = .ptype, .constrain = TRUE, .atomic = TRUE ) } slide_period2_vec_simplify <- function(.x, .y, .i, .period, .f, ..., .every, .origin, .before, .after, .complete) { out <- slide_period2_impl( .x, .y, .i, .period, .f, ..., .every = .every, .origin = .origin, .before = .before, .after = .after, .complete = .complete, .ptype = list(), .constrain = FALSE, .atomic = TRUE ) vec_simplify(out) } #' @rdname slide_period2 #' @export slide_period2_dbl <- function(.x, .y, .i, .period, .f, ..., .every = 1L, .origin = NULL, .before = 0L, .after = 0L, .complete = FALSE) { slide_period2_vec( .x, .y, .i, .period, .f, ..., .every = .every, .origin = .origin, .before = .before, .after = .after, .complete = .complete, .ptype = double() ) } #' @rdname slide_period2 #' @export slide_period2_int <- function(.x, .y, .i, .period, .f, ..., .every = 1L, .origin = NULL, .before = 0L, .after = 0L, .complete = FALSE) { slide_period2_vec( .x, .y, .i, .period, .f, ..., .every = .every, .origin = .origin, .before = .before, .after = .after, .complete = .complete, .ptype = integer() ) } #' @rdname slide_period2 #' @export slide_period2_lgl <- function(.x, .y, .i, .period, .f, ..., .every = 1L, .origin = NULL, .before = 0L, .after = 0L, .complete = FALSE) { slide_period2_vec( .x, .y, .i, .period, .f, ..., .every = .every, .origin = .origin, .before = .before, .after = .after, .complete = .complete, .ptype = logical() ) } #' @rdname slide_period2 #' @export slide_period2_chr <- function(.x, .y, .i, .period, .f, ..., .every = 1L, .origin = NULL, .before = 0L, .after = 0L, .complete = FALSE) { slide_period2_vec( .x, .y, .i, .period, .f, ..., .every = .every, .origin = .origin, .before = .before, .after = .after, .complete = .complete, .ptype = character() ) } #' @rdname slide_period2 #' @export slide_period2_dfr <- function(.x, .y, .i, .period, .f, ..., .every = 1L, .origin = NULL, .before = 0L, .after = 0L, .complete = FALSE, .names_to = NULL, .name_repair = c("unique", "universal", "check_unique")) { out <- slide_period2( .x, .y, .i, .period, .f, ..., .every = .every, .origin = .origin, .before = .before, .after = .after, .complete = .complete ) vec_rbind(!!!out, .names_to = .names_to, .name_repair = .name_repair) } #' @rdname slide_period2 #' @export slide_period2_dfc <- function(.x, .y, .i, .period, .f, ..., .every = 1L, .origin = NULL, .before = 0L, .after = 0L, .complete = FALSE, .size = NULL, .name_repair = c("unique", "universal", "check_unique", "minimal")) { out <- slide_period2( .x, .y, .i, .period, .f, ..., .every = .every, .origin = .origin, .before = .before, .after = .after, .complete = .complete ) vec_cbind(!!!out, .size = .size, .name_repair = .name_repair) } # ------------------------------------------------------------------------------ slide_period2_impl <- function(.x, .y, .i, .period, .f, ..., .every, .origin, .before, .after, .complete, .ptype, .constrain, .atomic) { vec_assert(.x) vec_assert(.y) # TODO - Do more efficiently internally by reusing rather than recycling # https://github.com/tidyverse/purrr/blob/e4d553989e3d18692ebeeedb334b6223ae9ea294/src/map.c#L129 # But use `vec_size_common()` to check sizes and get `.size` args <- vec_recycle_common(.x, .y) .f <- as_function(.f) f_call <- expr(.f(.x, .y, ...)) type <- -2L slide_period_common( x = args, i = .i, period = .period, f_call = f_call, every = .every, origin = .origin, before = .before, after = .after, complete = .complete, ptype = .ptype, constrain = .constrain, atomic = .atomic, env = environment(), type = type ) }
library(ggplot2) library(ggrepel) library(grid) library(gridExtra) library(dplyr) library(ggExtra) library(lattice) #install.packages("reshape2") library(reshape2) #histogram lattice.options(axis.padding=list(factor=0.5)) df<-read.csv('birth-rate.csv') head(df) my.settings <- list(par.main.text = list(just = "left", col = 'dimgray',x = grid::unit(5, "mm")), axis.line = list(col='transparent')) histogram(df$X2008,breaks=10, xlab = "Births per 1,000 population",ylab = "Percent of total(%)",border = 'white',par.settings=my.settings, main="DISTRIBUTION OF BIRTH RATES, 2008",col = c("orange")) #boxplot df1<-read.csv('crimeratesbystate-formatted.csv') head(df1) myvars <- c("state", "robbery", "burglary") df1 <- df1[myvars] head(df1) df1<-melt(data = df1, id.vars = c("state"), measure.vars = c("burglary", "robbery")) head(df1) ggplot(df1, aes(x=variable, y=value, fill=variable)) + geom_boxplot(outlier.size = 2.5)+ theme_classic()+ scale_fill_manual(values=c("gray", "orange"))+ ggtitle("\nBURGLARY AND ROBBERY DISTRIBUTIONS\n")+ ylab("Rates ( per 100,000 population)")+ theme_bw()+ removeGrid()+ theme(panel.border = element_blank(), title=element_text(color = 'dimgray'), axis.ticks.y = element_blank(), axis.ticks.x = element_blank(),axis.text.y = element_text(size = 10), axis.text.x = element_text(size = 11), axis.title.y=element_text(size=12, color = 'dimgray'))+ theme(axis.line = element_line(color = 'white'))+ theme(axis.title.x = element_blank()) #bulletplot library(plotly) library("readxl") df4<-read_excel('sales_devices.xlsx') head(df4) df41<-df4[df4$devices == 'alarm clock', ] head(df41) df42<-df4[df4$devices == 'camera', ] head(df42) df43<-df4[df4$devices == 'printer', ] head(df43) t <- list( family = "sans serif", size = 20, color = 'dimgray', adj=0) fig <- plot_ly() fig <- fig %>% add_trace( type = "indicator", mode = "number+gauge+delta", value = sum(df43$actual_sales), delta = list(reference = sum(df43$expected_sales)), domain = list(x = c(0.25, 1), y = c(0.08, 0.29)), title =list(text = "printer sales"), gauge = list( shape = "bullet", axis = list(range = c(0, 500)), threshold = list( line= list(color = "black", width = 2), thickness = 0.55, value = sum(df43$expected_sales)), steps = list( list(range = c(0, sum(df43$actual_sales)*60*0.01), color = "gray"), list(range = c(sum(df43$actual_sales)*60*0.01, sum(df43$actual_sales)*80*0.01), color = "lightgray")), bar = list(color = "orange"))) #radar chart fig <- fig %>% add_trace( type = "indicator", mode = "number+gauge+delta", value = sum(df42$actual_sales), delta = list(reference = sum(df42$expected_sales)), domain = list(x = c(0.25, 1), y = c(0.4, 0.6)), title = list(text = "camera sales"), gauge = list( shape = "bullet", axis = list(range = list(NULL, 500)), threshold = list( line = list(color = "black", width= 2), thickness = 0.55, value = sum(df42$expected_sales)), steps = list( list(range = c(0, sum(df42$actual_sales)*60*0.01), color = "gray"), list(range = c(sum(df42$actual_sales)*60*0.01, sum(df42$actual_sales)*80*0.01), color = "lightgray")), bar = list(color = "orange"))) fig <- fig %>% add_trace( type = "indicator", mode = "number+gauge+delta", value = sum(df41$actual_sales), delta = list(reference = sum(df41$expected_sales)), domain = list(x = c(0.25, 1), y = c(0.7, 0.9)), title = list(text = "alarm clock sales"), gauge = list( shape = "bullet", axis = list(range = list(NULL, 500)), threshold = list( line = list(color = "black", width = 2), thickness = 0.55, value = sum(df41$expected_sales)), steps = list( list(range = c(0, sum(df41$actual_sales)*60*0.01), color = "gray"), list(range = c(sum(df41$actual_sales)*60*0.01, sum(df41$actual_sales)*80*0.01), color = "lightgray")), bar = list(color = "orange"))) fig<-fig %>% layout( title="\nACTUAL VS EXPECTED DEVICE SALES", font = t ) fig # RADAR #install.packages('fmsb') library(fmsb) df6<-read.csv('crimeratesbystate-formatted.csv') head(df1) myvars <- c( 'state','murder', 'robbery', 'aggravated_assault', 'burglary', 'larceny_theft', 'motor_vehicle_theft') df6 <- df6[myvars] df6<-df6 %>% filter(state == 'United States ') df6 my<-c( 'murder', 'robbery', 'aggravated_assault', 'burglary', 'larceny_theft','motor_vehicle_theft') df6 <- df6[my] df6 df6 <- rbind(df6, '2' = c(0,0,0,0,0,0)) df6 df6 <- rbind(df6, '2' = c(2286.3,2286.3,2286.3,2286.3,2286.3, 2286.3)) df6 df6 <- rbind(df6, '2' = c(0,0,0,0,0)) df6 <- rbind(df6, '2' = c(5.6,140.7,291.1,726.7,2286.3,416.7)) df6 newdata <- df6[-c(1, 2), ] names(newdata)[names(newdata) == "motor_vehicle_theft"] <- "motor vehicle theft" names(newdata)[names(newdata) == "larceny_theft"] <- "larceny theft" names(newdata)[names(newdata) == "aggravated_assault"] <- "aggravated asault" head(newdata) radarchart(newdata, pcol=rgb(0.2,0.5,0.5,0.9) , pfcol=rgb(0.2,0.5,0.5,0.5) , seg=5,axistype=1,cglcol="grey", caxislabels=c("0", "500", "1000", "1500","2000", ""),cglty=1,plwd = 2, axislabcol="grey", cglwd=0.5, #custom labels vlcex=0.8 ) mtext(side = 3, line = 2.5, at = 0, cex = 1.75, "CRIME IN THE UNITED STATES", font = 0.1, col = 'dimgray')
/Casey_Natallia_histogram_boxplot_bulletchart_radarchart.R
no_license
natacasey/histogram_boxplot_bulletchart_radarchart
R
false
false
5,521
r
library(ggplot2) library(ggrepel) library(grid) library(gridExtra) library(dplyr) library(ggExtra) library(lattice) #install.packages("reshape2") library(reshape2) #histogram lattice.options(axis.padding=list(factor=0.5)) df<-read.csv('birth-rate.csv') head(df) my.settings <- list(par.main.text = list(just = "left", col = 'dimgray',x = grid::unit(5, "mm")), axis.line = list(col='transparent')) histogram(df$X2008,breaks=10, xlab = "Births per 1,000 population",ylab = "Percent of total(%)",border = 'white',par.settings=my.settings, main="DISTRIBUTION OF BIRTH RATES, 2008",col = c("orange")) #boxplot df1<-read.csv('crimeratesbystate-formatted.csv') head(df1) myvars <- c("state", "robbery", "burglary") df1 <- df1[myvars] head(df1) df1<-melt(data = df1, id.vars = c("state"), measure.vars = c("burglary", "robbery")) head(df1) ggplot(df1, aes(x=variable, y=value, fill=variable)) + geom_boxplot(outlier.size = 2.5)+ theme_classic()+ scale_fill_manual(values=c("gray", "orange"))+ ggtitle("\nBURGLARY AND ROBBERY DISTRIBUTIONS\n")+ ylab("Rates ( per 100,000 population)")+ theme_bw()+ removeGrid()+ theme(panel.border = element_blank(), title=element_text(color = 'dimgray'), axis.ticks.y = element_blank(), axis.ticks.x = element_blank(),axis.text.y = element_text(size = 10), axis.text.x = element_text(size = 11), axis.title.y=element_text(size=12, color = 'dimgray'))+ theme(axis.line = element_line(color = 'white'))+ theme(axis.title.x = element_blank()) #bulletplot library(plotly) library("readxl") df4<-read_excel('sales_devices.xlsx') head(df4) df41<-df4[df4$devices == 'alarm clock', ] head(df41) df42<-df4[df4$devices == 'camera', ] head(df42) df43<-df4[df4$devices == 'printer', ] head(df43) t <- list( family = "sans serif", size = 20, color = 'dimgray', adj=0) fig <- plot_ly() fig <- fig %>% add_trace( type = "indicator", mode = "number+gauge+delta", value = sum(df43$actual_sales), delta = list(reference = sum(df43$expected_sales)), domain = list(x = c(0.25, 1), y = c(0.08, 0.29)), title =list(text = "printer sales"), gauge = list( shape = "bullet", axis = list(range = c(0, 500)), threshold = list( line= list(color = "black", width = 2), thickness = 0.55, value = sum(df43$expected_sales)), steps = list( list(range = c(0, sum(df43$actual_sales)*60*0.01), color = "gray"), list(range = c(sum(df43$actual_sales)*60*0.01, sum(df43$actual_sales)*80*0.01), color = "lightgray")), bar = list(color = "orange"))) #radar chart fig <- fig %>% add_trace( type = "indicator", mode = "number+gauge+delta", value = sum(df42$actual_sales), delta = list(reference = sum(df42$expected_sales)), domain = list(x = c(0.25, 1), y = c(0.4, 0.6)), title = list(text = "camera sales"), gauge = list( shape = "bullet", axis = list(range = list(NULL, 500)), threshold = list( line = list(color = "black", width= 2), thickness = 0.55, value = sum(df42$expected_sales)), steps = list( list(range = c(0, sum(df42$actual_sales)*60*0.01), color = "gray"), list(range = c(sum(df42$actual_sales)*60*0.01, sum(df42$actual_sales)*80*0.01), color = "lightgray")), bar = list(color = "orange"))) fig <- fig %>% add_trace( type = "indicator", mode = "number+gauge+delta", value = sum(df41$actual_sales), delta = list(reference = sum(df41$expected_sales)), domain = list(x = c(0.25, 1), y = c(0.7, 0.9)), title = list(text = "alarm clock sales"), gauge = list( shape = "bullet", axis = list(range = list(NULL, 500)), threshold = list( line = list(color = "black", width = 2), thickness = 0.55, value = sum(df41$expected_sales)), steps = list( list(range = c(0, sum(df41$actual_sales)*60*0.01), color = "gray"), list(range = c(sum(df41$actual_sales)*60*0.01, sum(df41$actual_sales)*80*0.01), color = "lightgray")), bar = list(color = "orange"))) fig<-fig %>% layout( title="\nACTUAL VS EXPECTED DEVICE SALES", font = t ) fig # RADAR #install.packages('fmsb') library(fmsb) df6<-read.csv('crimeratesbystate-formatted.csv') head(df1) myvars <- c( 'state','murder', 'robbery', 'aggravated_assault', 'burglary', 'larceny_theft', 'motor_vehicle_theft') df6 <- df6[myvars] df6<-df6 %>% filter(state == 'United States ') df6 my<-c( 'murder', 'robbery', 'aggravated_assault', 'burglary', 'larceny_theft','motor_vehicle_theft') df6 <- df6[my] df6 df6 <- rbind(df6, '2' = c(0,0,0,0,0,0)) df6 df6 <- rbind(df6, '2' = c(2286.3,2286.3,2286.3,2286.3,2286.3, 2286.3)) df6 df6 <- rbind(df6, '2' = c(0,0,0,0,0)) df6 <- rbind(df6, '2' = c(5.6,140.7,291.1,726.7,2286.3,416.7)) df6 newdata <- df6[-c(1, 2), ] names(newdata)[names(newdata) == "motor_vehicle_theft"] <- "motor vehicle theft" names(newdata)[names(newdata) == "larceny_theft"] <- "larceny theft" names(newdata)[names(newdata) == "aggravated_assault"] <- "aggravated asault" head(newdata) radarchart(newdata, pcol=rgb(0.2,0.5,0.5,0.9) , pfcol=rgb(0.2,0.5,0.5,0.5) , seg=5,axistype=1,cglcol="grey", caxislabels=c("0", "500", "1000", "1500","2000", ""),cglty=1,plwd = 2, axislabcol="grey", cglwd=0.5, #custom labels vlcex=0.8 ) mtext(side = 3, line = 2.5, at = 0, cex = 1.75, "CRIME IN THE UNITED STATES", font = 0.1, col = 'dimgray')
library(Rcpp) library(dplyr) #Tarea predecir Sepal.Length con las otras variables # ESTIMATION OF A MEAN data(iris) Y <- iris$Sepal.Length N <- dim(iris)[1] X <- as.matrix(cbind(1,iris[,2:4])) beta.hat<-solve(t(X)%*%X,t(X)%*%Y) pred<-X%*%beta.hat residuals<-Y-pred #hist(residuals,breaks = 20) SS<-sqrt(sum((Y-pred)^2)/(N-dim(iris)[2]+1)) cov.betas<-(SS^2)*solve(t(X)%*%X) sqrt(diag(cov.betas)) # APPROACH 1) POINTWISE ESTIMATORS AND PIVOTAL QUANTITIES lm <- lm(Sepal.Length ~ ., iris) summary(lm) # BAYESIAN APPROACH # beta_j ~ N(0,10) # Se puede jugar con los parámetros de la inicial aquí y en 1). Cuando es muy plana los coeficientes se parecen mucho a los de la regresión lineal prior.beta <- function(x) dnorm(x, 0, .2) prior.sd <- function(x) dgamma(x,5,100) plot(prior.beta, col="darkblue", xlim=c(-50,50), lwd="2", main="Prior for mean", ylab="density") plot(prior.sd, col="darkblue", xlim=c(-0,1), lwd="2", main="Prior for mean", ylab="density") # 1) logposterior distribution (up to a constant) cppFunction(' double objdens(NumericMatrix X, NumericVector y, NumericVector theta){ int i; double lkh, logprior, yhat; int m=X.nrow(), p=X.ncol(); NumericVector beta(m-1); double sd; for (i=0; i<p; i++){ beta[i] = theta[i]; } sd = theta[p]; NumericVector aux(m); // Compute loglikelihood lkh=0; for (int i=0; i<m; i++){ aux = X(i,_)*beta; yhat = std::accumulate(aux.begin(), aux.end(), 0.0); lkh += -.5*pow((y[i] - yhat)/sd,2) - log(sd); } // Compute logprior logprior = 0.0; for(int j=0; j<p; j++){ logprior += R::dnorm(beta[j], 0.0, 0.8, true); // Aquí la inicial!! } logprior += R::dgamma(sd, 5.0, 0.1, true); // Log of target density return lkh + logprior; }') # 2) Proposal: random walk in the same dimension as the number of parameters cppFunction(' NumericVector proposal(NumericVector theta, NumericMatrix X){ int nparam = theta.size(); int m=X.nrow(); double jump = 0.25/sqrt(m); //checar paper NumericVector newtheta(nparam); for (int i=0; i<nparam; i++){ newtheta[i] = R::rnorm(theta[i], jump); } return newtheta; }') # 3) METROPOLIS sourceCpp("MHBayesLinReg.cpp") nsim <- 10000 init <- c(0,0,0,0.5) proposal(init,X) mh.samp <- MHBayesLinReg(nsim, init, objdens, proposal, X, Y) estims <- mh.samp$theta # SOME DIAGNOSTIC IMPORTANT STUFF # Exploration graph: library(calibrate) pts <- seq(1,nrow(estims),by=5) plot(estims[pts, ], type="l", asp=1) ###aceptacion rejections <- mh.samp$rejections[-1] trials <- rejections + 1 acc.rate<- 1-cumsum(rejections)/cumsum(trials) plot(100*acc.rate,type = 'l',ylim = c(0,100), main = "Acceptance rate", ylab = "%") ### 2) AUTOCORRELATION par(mfrow=c(2,3)) for(i in 1:ncol(estims)){ acf(estims[ , i],main=paste("theta",i)) } # burnin and subsampling burnin <- round(0.2*sim) estims <- estims[-(1:burnin), ]
/Tarea5/BayesianInference_regresion.R
no_license
montactuaria/CompuStat
R
false
false
3,249
r
library(Rcpp) library(dplyr) #Tarea predecir Sepal.Length con las otras variables # ESTIMATION OF A MEAN data(iris) Y <- iris$Sepal.Length N <- dim(iris)[1] X <- as.matrix(cbind(1,iris[,2:4])) beta.hat<-solve(t(X)%*%X,t(X)%*%Y) pred<-X%*%beta.hat residuals<-Y-pred #hist(residuals,breaks = 20) SS<-sqrt(sum((Y-pred)^2)/(N-dim(iris)[2]+1)) cov.betas<-(SS^2)*solve(t(X)%*%X) sqrt(diag(cov.betas)) # APPROACH 1) POINTWISE ESTIMATORS AND PIVOTAL QUANTITIES lm <- lm(Sepal.Length ~ ., iris) summary(lm) # BAYESIAN APPROACH # beta_j ~ N(0,10) # Se puede jugar con los parámetros de la inicial aquí y en 1). Cuando es muy plana los coeficientes se parecen mucho a los de la regresión lineal prior.beta <- function(x) dnorm(x, 0, .2) prior.sd <- function(x) dgamma(x,5,100) plot(prior.beta, col="darkblue", xlim=c(-50,50), lwd="2", main="Prior for mean", ylab="density") plot(prior.sd, col="darkblue", xlim=c(-0,1), lwd="2", main="Prior for mean", ylab="density") # 1) logposterior distribution (up to a constant) cppFunction(' double objdens(NumericMatrix X, NumericVector y, NumericVector theta){ int i; double lkh, logprior, yhat; int m=X.nrow(), p=X.ncol(); NumericVector beta(m-1); double sd; for (i=0; i<p; i++){ beta[i] = theta[i]; } sd = theta[p]; NumericVector aux(m); // Compute loglikelihood lkh=0; for (int i=0; i<m; i++){ aux = X(i,_)*beta; yhat = std::accumulate(aux.begin(), aux.end(), 0.0); lkh += -.5*pow((y[i] - yhat)/sd,2) - log(sd); } // Compute logprior logprior = 0.0; for(int j=0; j<p; j++){ logprior += R::dnorm(beta[j], 0.0, 0.8, true); // Aquí la inicial!! } logprior += R::dgamma(sd, 5.0, 0.1, true); // Log of target density return lkh + logprior; }') # 2) Proposal: random walk in the same dimension as the number of parameters cppFunction(' NumericVector proposal(NumericVector theta, NumericMatrix X){ int nparam = theta.size(); int m=X.nrow(); double jump = 0.25/sqrt(m); //checar paper NumericVector newtheta(nparam); for (int i=0; i<nparam; i++){ newtheta[i] = R::rnorm(theta[i], jump); } return newtheta; }') # 3) METROPOLIS sourceCpp("MHBayesLinReg.cpp") nsim <- 10000 init <- c(0,0,0,0.5) proposal(init,X) mh.samp <- MHBayesLinReg(nsim, init, objdens, proposal, X, Y) estims <- mh.samp$theta # SOME DIAGNOSTIC IMPORTANT STUFF # Exploration graph: library(calibrate) pts <- seq(1,nrow(estims),by=5) plot(estims[pts, ], type="l", asp=1) ###aceptacion rejections <- mh.samp$rejections[-1] trials <- rejections + 1 acc.rate<- 1-cumsum(rejections)/cumsum(trials) plot(100*acc.rate,type = 'l',ylim = c(0,100), main = "Acceptance rate", ylab = "%") ### 2) AUTOCORRELATION par(mfrow=c(2,3)) for(i in 1:ncol(estims)){ acf(estims[ , i],main=paste("theta",i)) } # burnin and subsampling burnin <- round(0.2*sim) estims <- estims[-(1:burnin), ]
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/inspect_cor.R \name{inspect_cor} \alias{inspect_cor} \title{Summarise and compare Pearson's correlation coefficients for numeric columns in one or two dataframes.} \usage{ inspect_cor(df1, df2 = NULL, method = "pearson", with_col = NULL, alpha = 0.05, show_plot = FALSE) } \arguments{ \item{df1}{A data frame.} \item{df2}{An optional second data frame for comparing correlation coefficients. Defaults to \code{NULL}.} \item{method}{a character string indicating which type of correlation coefficient to use, one of \code{"pearson"}, \code{"kendall"}, or \code{"spearman"}, which can be abbreviated.} \item{with_col}{Character vector of column names to calculate correlations with all other numeric features. The default \code{with_col = NULL} returns all pairs of correlations.} \item{alpha}{Alpha level for correlation confidence intervals. Defaults to 0.05.} \item{show_plot}{(Deprecated) Logical flag indicating whether a plot should be shown. Superseded by the function \code{show_plot()} and will be dropped in a future version.} } \value{ A tibble summarising and comparing the correlations for each numeric column in one or a pair of data frames. } \description{ Summarise and compare Pearson's correlation coefficients for numeric columns in one or two dataframes. } \details{ When \code{df2 = NULL}, a tibble containing correlation coefficients for \code{df1} is returned: \itemize{ \item \code{col_1}, \code{co1_2} character vectors containing names of numeric columns in \code{df1}. \item \code{corr} the calculated correlation coefficient. \item \code{lower}, \code{upper} lower and upper values of the confidence interval for the correlations. \item \code{p_value} p-value associated with a test where the null hypothesis is that the numeric pair have 0 correlation. } If \code{df1} has class \code{grouped_df}, then correlations will be calculated within the grouping levels and the tibble returned will have an additional column corresponding to the group labels. When both \code{df1} and \code{df2} are specified, the tibble returned contains a comparison of the correlation coefficients across pairs of columns common to both dataframes. \itemize{ \item \code{col_1}, \code{co1_2} character vectors containing names of numeric columns in either \code{df1} or \code{df2}. \item \code{corr_1}, \code{corr_2} numeric columns containing correlation coefficients from \code{df1} and \code{df2}, respectively. \item \code{p_value} p-value associated with the null hypothesis that the two correlation coefficients are the same. Small values indicate that the true correlation coefficients differ between the two dataframes. } Note that confidence intervals for \code{kendall} and \code{spearman} assume a normal sampling distribution for the Fisher z-transform of the correlation. } \examples{ # Load dplyr for starwars data & pipe library(dplyr) # Single dataframe summary inspect_cor(starwars) # Only show correlations with 'mass' column inspect_cor(starwars, with_col = "mass") # Paired dataframe summary inspect_cor(starwars, starwars[1:10, ]) # NOT RUN - change in correlation over time # library(dplyr) # tech_grp <- tech \%>\% # group_by(year) \%>\% # inspect_cor() # tech_grp \%>\% show_plot() }
/man/inspect_cor.Rd
no_license
wulixin/inspectdf
R
false
true
3,377
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/inspect_cor.R \name{inspect_cor} \alias{inspect_cor} \title{Summarise and compare Pearson's correlation coefficients for numeric columns in one or two dataframes.} \usage{ inspect_cor(df1, df2 = NULL, method = "pearson", with_col = NULL, alpha = 0.05, show_plot = FALSE) } \arguments{ \item{df1}{A data frame.} \item{df2}{An optional second data frame for comparing correlation coefficients. Defaults to \code{NULL}.} \item{method}{a character string indicating which type of correlation coefficient to use, one of \code{"pearson"}, \code{"kendall"}, or \code{"spearman"}, which can be abbreviated.} \item{with_col}{Character vector of column names to calculate correlations with all other numeric features. The default \code{with_col = NULL} returns all pairs of correlations.} \item{alpha}{Alpha level for correlation confidence intervals. Defaults to 0.05.} \item{show_plot}{(Deprecated) Logical flag indicating whether a plot should be shown. Superseded by the function \code{show_plot()} and will be dropped in a future version.} } \value{ A tibble summarising and comparing the correlations for each numeric column in one or a pair of data frames. } \description{ Summarise and compare Pearson's correlation coefficients for numeric columns in one or two dataframes. } \details{ When \code{df2 = NULL}, a tibble containing correlation coefficients for \code{df1} is returned: \itemize{ \item \code{col_1}, \code{co1_2} character vectors containing names of numeric columns in \code{df1}. \item \code{corr} the calculated correlation coefficient. \item \code{lower}, \code{upper} lower and upper values of the confidence interval for the correlations. \item \code{p_value} p-value associated with a test where the null hypothesis is that the numeric pair have 0 correlation. } If \code{df1} has class \code{grouped_df}, then correlations will be calculated within the grouping levels and the tibble returned will have an additional column corresponding to the group labels. When both \code{df1} and \code{df2} are specified, the tibble returned contains a comparison of the correlation coefficients across pairs of columns common to both dataframes. \itemize{ \item \code{col_1}, \code{co1_2} character vectors containing names of numeric columns in either \code{df1} or \code{df2}. \item \code{corr_1}, \code{corr_2} numeric columns containing correlation coefficients from \code{df1} and \code{df2}, respectively. \item \code{p_value} p-value associated with the null hypothesis that the two correlation coefficients are the same. Small values indicate that the true correlation coefficients differ between the two dataframes. } Note that confidence intervals for \code{kendall} and \code{spearman} assume a normal sampling distribution for the Fisher z-transform of the correlation. } \examples{ # Load dplyr for starwars data & pipe library(dplyr) # Single dataframe summary inspect_cor(starwars) # Only show correlations with 'mass' column inspect_cor(starwars, with_col = "mass") # Paired dataframe summary inspect_cor(starwars, starwars[1:10, ]) # NOT RUN - change in correlation over time # library(dplyr) # tech_grp <- tech \%>\% # group_by(year) \%>\% # inspect_cor() # tech_grp \%>\% show_plot() }
read_r24all <- function(ofile,sta=NULL) { Nsta <- length(sta) r24 <- scan(ofile,skip=11,quiet=TRUE,na.strings=c("****","*****","******")) r24 <- matrix(r24,5,length(r24)/5) r24 <- aperm(r24) if(!is.null(sta)) { r24all <- rep(0,Nsta) r24 <- r24[order(r24[,1]),] index <- diff(r24[,1])>0 index <- c(TRUE,index) r24 <- r24[index,] r24 <- r24[r24[,1] %in% sta,] r24 <- matrix(r24,length(r24)/5,5) r24all[sta %in% r24[,1]] <- r24[,5] r24all[r24all<0] <- 0 } else {r24all <- r24} r24all }
/R/read_r24all.r
no_license
Quandony/RMicapsData
R
false
false
524
r
read_r24all <- function(ofile,sta=NULL) { Nsta <- length(sta) r24 <- scan(ofile,skip=11,quiet=TRUE,na.strings=c("****","*****","******")) r24 <- matrix(r24,5,length(r24)/5) r24 <- aperm(r24) if(!is.null(sta)) { r24all <- rep(0,Nsta) r24 <- r24[order(r24[,1]),] index <- diff(r24[,1])>0 index <- c(TRUE,index) r24 <- r24[index,] r24 <- r24[r24[,1] %in% sta,] r24 <- matrix(r24,length(r24)/5,5) r24all[sta %in% r24[,1]] <- r24[,5] r24all[r24all<0] <- 0 } else {r24all <- r24} r24all }
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ra = numeric(0), relh = -1.72131968218895e+83, rs = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.57081998962836e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161)) result <- do.call(meteor:::ET0_PenmanMonteith,testlist) str(result)
/meteor/inst/testfiles/ET0_PenmanMonteith/AFL_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1615839019-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
826
r
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ra = numeric(0), relh = -1.72131968218895e+83, rs = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.57081998962836e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161)) result <- do.call(meteor:::ET0_PenmanMonteith,testlist) str(result)
test_that("type of element translate in correct number of vertexes", { expect_equal(getVertexFromTypeATLAS("102"),2) expect_equal(getVertexFromTypeATLAS("203"),3) expect_equal(getVertexFromTypeATLAS("204"),4) expect_equal(getVertexFromTypeATLAS("304"),4) expect_equal(getVertexFromTypeATLAS("308"),8) })
/tests/testthat/test-type_from_vertex.R
permissive
gabrielpreda/mesh2D
R
false
false
325
r
test_that("type of element translate in correct number of vertexes", { expect_equal(getVertexFromTypeATLAS("102"),2) expect_equal(getVertexFromTypeATLAS("203"),3) expect_equal(getVertexFromTypeATLAS("204"),4) expect_equal(getVertexFromTypeATLAS("304"),4) expect_equal(getVertexFromTypeATLAS("308"),8) })
\name{hist} \alias{hist.gsaResult} \title{ Null distribution histogram and statistic of the input set for enrichment analyses. } \description{ Plots the distribution of gene set statistic values obtained in different resampling settings of an enrichment analysis, and draws the statistic value of the input set as a vertical line. } \usage{ \method{hist}{gsaResult}(x, signLevel = x$signLevel, subset = NULL, ask = FALSE, addLegend = TRUE, ...) } \arguments{ \item{x}{ A result of a call to \code{geneSetAnalysis} (see also Details). } \item{signLevel}{ The significance level that should be applied for the plots. Default is the significance level used for the analysis in \code{x}. } \item{subset}{ Indices for the results that should be included in the diagram. } \item{ask}{ If set to true, the plot function will prompt for a user input for each new plot that is shown on an interactive device (see \code{\link[=par]{par("ask")}}). } \item{addLegend}{ If set to true (default), a \code{\link{legend}} is added to the plot. } \item{...}{ Other parameters which can be used for histograms (see \code{\link[graphics]{hist}}). } } \details{ The function plots the distribution of gene set statistic values under the null hypothesis. It requires the significance assessment step of the enrichment analysis configuration (parameter \code{significance} or \code{\link{gsAnalysis}}) to be a computer-intensive testing procedure that yields a distribution of gene set statistic p-values under the null hypothesis. Predefined configurations for which this plot works are \code{\link{analysis.gsea}}, \code{\link{analysis.averageCorrelation}} and \code{\link{analysis.averageTStatistic}}. A histogram is plotted for the analysis in \code{x}. If \code{x} includes the analyses for several gene sets, one histogram is plotted for each of the gene sets. The statistic value of the input set is depicted as a vertical line. The most common graphical parameters can be supplied as vectors (one entry per analyzed gene set) to vary them between the different analyses. These parameters are: \code{main}, \code{xlab}, \code{ylab}. } \seealso{ \code{\link{geneSetAnalysis}}, \code{\link{predefinedAnalyses}}, \code{\link{gsAnalysis}}, \code{\link{evaluateGeneSetUncertainty}}, \code{\link{plot.uncertaintyResult}} } \examples{ # load data require(GlobalAncova) data(vantVeer) data(phenodata) data(pathways) res <- geneSetAnalysis( # global parameters dat = vantVeer, geneSets = pathways[3], analysis = analysis.averageCorrelation(), # additional parameters for analysis.averageCorrelation labs = phenodata$metastases, p = 1, numSamples = 10) # plot the histogram for the cell cycle control gene set hist(res, main = names(pathways[3])) } %\keyword{}
/man/hist.Rd
no_license
cran/GiANT
R
false
false
2,770
rd
\name{hist} \alias{hist.gsaResult} \title{ Null distribution histogram and statistic of the input set for enrichment analyses. } \description{ Plots the distribution of gene set statistic values obtained in different resampling settings of an enrichment analysis, and draws the statistic value of the input set as a vertical line. } \usage{ \method{hist}{gsaResult}(x, signLevel = x$signLevel, subset = NULL, ask = FALSE, addLegend = TRUE, ...) } \arguments{ \item{x}{ A result of a call to \code{geneSetAnalysis} (see also Details). } \item{signLevel}{ The significance level that should be applied for the plots. Default is the significance level used for the analysis in \code{x}. } \item{subset}{ Indices for the results that should be included in the diagram. } \item{ask}{ If set to true, the plot function will prompt for a user input for each new plot that is shown on an interactive device (see \code{\link[=par]{par("ask")}}). } \item{addLegend}{ If set to true (default), a \code{\link{legend}} is added to the plot. } \item{...}{ Other parameters which can be used for histograms (see \code{\link[graphics]{hist}}). } } \details{ The function plots the distribution of gene set statistic values under the null hypothesis. It requires the significance assessment step of the enrichment analysis configuration (parameter \code{significance} or \code{\link{gsAnalysis}}) to be a computer-intensive testing procedure that yields a distribution of gene set statistic p-values under the null hypothesis. Predefined configurations for which this plot works are \code{\link{analysis.gsea}}, \code{\link{analysis.averageCorrelation}} and \code{\link{analysis.averageTStatistic}}. A histogram is plotted for the analysis in \code{x}. If \code{x} includes the analyses for several gene sets, one histogram is plotted for each of the gene sets. The statistic value of the input set is depicted as a vertical line. The most common graphical parameters can be supplied as vectors (one entry per analyzed gene set) to vary them between the different analyses. These parameters are: \code{main}, \code{xlab}, \code{ylab}. } \seealso{ \code{\link{geneSetAnalysis}}, \code{\link{predefinedAnalyses}}, \code{\link{gsAnalysis}}, \code{\link{evaluateGeneSetUncertainty}}, \code{\link{plot.uncertaintyResult}} } \examples{ # load data require(GlobalAncova) data(vantVeer) data(phenodata) data(pathways) res <- geneSetAnalysis( # global parameters dat = vantVeer, geneSets = pathways[3], analysis = analysis.averageCorrelation(), # additional parameters for analysis.averageCorrelation labs = phenodata$metastases, p = 1, numSamples = 10) # plot the histogram for the cell cycle control gene set hist(res, main = names(pathways[3])) } %\keyword{}
library(car) mydat<-Prestige dim(mydat) str(mydat) summary(mydat) head(mydat,5)##top 5 values library(ggplot2) result<-ggplot(data = mydat, mapping = aes(x = income, y = education)) + geom_point() ##Average years of education is proportional to the income ggplot(data = mydat, mapping = aes(x = type, y = women)) + geom_point() ##ggplot(data = mydat, mapping = aes(x = education, y = women)) + ##geom_rug(sides = "tr") ##Type of job - Blue collar workers are higher compared to white collared workers. ggplot(mydat) + geom_bar(aes(type)) ##Model building ##Linear model ##using rmse function rmse_fun = function() { return(c(mean( (dtrain$pred - dtrain$income)^2,na.rm = TRUE) ^0.5, mean( (dtest$pred - dtest$income)^2 ,na.rm = TRUE) ^0.5)) } mydat$rowno <- 1:nrow(mydat) dtrain<-subset(mydat,rowno <= nrow(mydat)*0.7) dtest<-subset(mydat,rowno > nrow(mydat)*0.7) model_lm <- lm(income ~ education, data=dtrain) summary(model_lm) dtrain$pred <- predict(model_lm, newdata = dtrain) dtest$pred <- predict(model_lm, newdata = dtest) rmse_fun() cor(mydat$income,mydat$education) ##0.5 shows a moderate positive relationship cor(dtest$income,dtest$education)##test data cor(dtrain$income,dtrain$education)##train data ##visualising the train and test model fit lines plot(dtrain$income ~ dtrain$education,data = dtrain)+ abline(model_lm) plot(dtest$income ~ dtest$education,data = dtest)+ abline(model_lm)
/Prestige_LR.R
no_license
RajashriN/PrestigeLinearRegression
R
false
false
1,558
r
library(car) mydat<-Prestige dim(mydat) str(mydat) summary(mydat) head(mydat,5)##top 5 values library(ggplot2) result<-ggplot(data = mydat, mapping = aes(x = income, y = education)) + geom_point() ##Average years of education is proportional to the income ggplot(data = mydat, mapping = aes(x = type, y = women)) + geom_point() ##ggplot(data = mydat, mapping = aes(x = education, y = women)) + ##geom_rug(sides = "tr") ##Type of job - Blue collar workers are higher compared to white collared workers. ggplot(mydat) + geom_bar(aes(type)) ##Model building ##Linear model ##using rmse function rmse_fun = function() { return(c(mean( (dtrain$pred - dtrain$income)^2,na.rm = TRUE) ^0.5, mean( (dtest$pred - dtest$income)^2 ,na.rm = TRUE) ^0.5)) } mydat$rowno <- 1:nrow(mydat) dtrain<-subset(mydat,rowno <= nrow(mydat)*0.7) dtest<-subset(mydat,rowno > nrow(mydat)*0.7) model_lm <- lm(income ~ education, data=dtrain) summary(model_lm) dtrain$pred <- predict(model_lm, newdata = dtrain) dtest$pred <- predict(model_lm, newdata = dtest) rmse_fun() cor(mydat$income,mydat$education) ##0.5 shows a moderate positive relationship cor(dtest$income,dtest$education)##test data cor(dtrain$income,dtrain$education)##train data ##visualising the train and test model fit lines plot(dtrain$income ~ dtrain$education,data = dtrain)+ abline(model_lm) plot(dtest$income ~ dtest$education,data = dtest)+ abline(model_lm)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/body.R \name{mj_divider} \alias{mj_divider} \title{Divider} \usage{ mj_divider(...) } \arguments{ \item{...}{Unnamed items that comprise this list of MJML tags.} } \description{ Displays a horizontal divider that can be customized like a HTML border. } \examples{ mj_ml( mj_body( mj_text("Some text"), mj_divider(), mj_text("The rest of the body") ) ) } \seealso{ \href{official documentation}{https://mjml.io/documentation/#mjml-divider} }
/man/mj_divider.Rd
no_license
jimsforks/mjml
R
false
true
536
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/body.R \name{mj_divider} \alias{mj_divider} \title{Divider} \usage{ mj_divider(...) } \arguments{ \item{...}{Unnamed items that comprise this list of MJML tags.} } \description{ Displays a horizontal divider that can be customized like a HTML border. } \examples{ mj_ml( mj_body( mj_text("Some text"), mj_divider(), mj_text("The rest of the body") ) ) } \seealso{ \href{official documentation}{https://mjml.io/documentation/#mjml-divider} }
# # ddi.example() # # Return example of DDI-list-structure. # ddi.example = function() { ddi = list( doc_dscr = list(), stdy_dscr = list(), file_dscr = list( file_plong = list( name = "file1", var_dscr = list( var_age = list( name = "age", labl = "Age in 2011", nature = "interval", valid = c(12, 45, 22, 76, 52), invalid = c(NA, NA, NA, NA, NA), sumStat = list( min = 12, max = 76, valid = 5, invalid = 0 ) ), var_sex = list( name = "sex", labl = "Gender", nature = "categorial", valid = c(1, NA, 0, 1, 0), invalid = c(NA, 1, NA, NA, NA), sumStat = list( min = 0, max = 1, valid = 4, invalid = 1 ), catgry = list( cat_1 = list( catValu = 0, labl = "male", valid = TRUE, catStat = list( type = "freq", value = 2 ) ), cat_2 = list( catValu = 1, labl = "female", valid = TRUE, catStat = list( type = "freq", value = 2 ) ) ) ) ) ), file_hlong = list() ) ) class(ddi) = "ddi" return(ddi) } # # print.ddi(ddi) # # DDI-object-specific function for printing a DDI-object. # print.ddi = function(ddi) { result = lapply( ddi$file_dscr, function(x) { x$name } ) return(result) } # # stata2ddi(filename) # # Returns file-element for given filename. # stata2ddi = function(filename) { library("foreign") # Read Stata file stata_file = read.dta( filename, convert.factors=FALSE, convert.dates=FALSE, missing.type=TRUE ) # Generate scaleton for file-object. file = list() class(file) = "ddi.file" file$name = filename file$var_dscr = list() # Internal function: extract_metadata(stata_var) extract_metadata = function(stata_var) { var = list() var$raw = stata_var return(var) } file$var_dscr = lapply(stata_file, extract_metadata) return(file) }
/examples/ddi.R
no_license
mhebing/r2ddi
R
false
false
2,493
r
# # ddi.example() # # Return example of DDI-list-structure. # ddi.example = function() { ddi = list( doc_dscr = list(), stdy_dscr = list(), file_dscr = list( file_plong = list( name = "file1", var_dscr = list( var_age = list( name = "age", labl = "Age in 2011", nature = "interval", valid = c(12, 45, 22, 76, 52), invalid = c(NA, NA, NA, NA, NA), sumStat = list( min = 12, max = 76, valid = 5, invalid = 0 ) ), var_sex = list( name = "sex", labl = "Gender", nature = "categorial", valid = c(1, NA, 0, 1, 0), invalid = c(NA, 1, NA, NA, NA), sumStat = list( min = 0, max = 1, valid = 4, invalid = 1 ), catgry = list( cat_1 = list( catValu = 0, labl = "male", valid = TRUE, catStat = list( type = "freq", value = 2 ) ), cat_2 = list( catValu = 1, labl = "female", valid = TRUE, catStat = list( type = "freq", value = 2 ) ) ) ) ) ), file_hlong = list() ) ) class(ddi) = "ddi" return(ddi) } # # print.ddi(ddi) # # DDI-object-specific function for printing a DDI-object. # print.ddi = function(ddi) { result = lapply( ddi$file_dscr, function(x) { x$name } ) return(result) } # # stata2ddi(filename) # # Returns file-element for given filename. # stata2ddi = function(filename) { library("foreign") # Read Stata file stata_file = read.dta( filename, convert.factors=FALSE, convert.dates=FALSE, missing.type=TRUE ) # Generate scaleton for file-object. file = list() class(file) = "ddi.file" file$name = filename file$var_dscr = list() # Internal function: extract_metadata(stata_var) extract_metadata = function(stata_var) { var = list() var$raw = stata_var return(var) } file$var_dscr = lapply(stata_file, extract_metadata) return(file) }
#' Add weather data to leaflet map. #' #' @param map \code{\link[leaflet]{leaflet}} map object #' @param data owm data #' @param lng numeric vector of longitudes #' (if \code{NULL} it will be taken from \code{data}) #' @param lat numeric vector of latitudes #' (if \code{NULL} it will be taken from \code{data}) #' @param icon vector of owm icon names #' (usually included in weather column of owm data) #' @param template template in the form of \cr #' \code{"<b>{{name}}</b>"} \cr #' where variable names in brackets correspond to #' column names of \code{data} (see also \code{\link{render}}) #' @param popup vector containing (HTML) content for popups, #' skipped in case parameter \code{template} is given #' @param ... see \code{\link[leaflet]{addMarkers}} #' #' @return updated map object #' @export #' #' @examples \dontrun{ #' owm_data <- find_city("Malaga", units = "metric")$list %>% tidy_up_() #' map <- leaflet() %>% addTiles() %>% #' add_weather(owm_data, #' template = "<b>{{name}}</b>, {{main_temp}}°C", #' icon = owm_data$weather_icon) #' } add_weather <- function(map, data, lng = NULL, lat = NULL, icon = NULL, template = NULL, popup = NULL, ...){ if(is.null(lng) | is.null(lat)){ lng <- data[[grep("lon", names(data))]] lat <- data[[grep("lat", names(data))]] } if(!is.null(icon)){ icon %<>% get_icon_url() %>% leaflet::icons() } if(!is.null(template)){ popup <- template %$$% data } leaflet::addMarkers(map, lng, lat, data = data, icon = icon, popup = popup, ...) }
/R/leaflet.R
no_license
rubedawg/owmr
R
false
false
1,575
r
#' Add weather data to leaflet map. #' #' @param map \code{\link[leaflet]{leaflet}} map object #' @param data owm data #' @param lng numeric vector of longitudes #' (if \code{NULL} it will be taken from \code{data}) #' @param lat numeric vector of latitudes #' (if \code{NULL} it will be taken from \code{data}) #' @param icon vector of owm icon names #' (usually included in weather column of owm data) #' @param template template in the form of \cr #' \code{"<b>{{name}}</b>"} \cr #' where variable names in brackets correspond to #' column names of \code{data} (see also \code{\link{render}}) #' @param popup vector containing (HTML) content for popups, #' skipped in case parameter \code{template} is given #' @param ... see \code{\link[leaflet]{addMarkers}} #' #' @return updated map object #' @export #' #' @examples \dontrun{ #' owm_data <- find_city("Malaga", units = "metric")$list %>% tidy_up_() #' map <- leaflet() %>% addTiles() %>% #' add_weather(owm_data, #' template = "<b>{{name}}</b>, {{main_temp}}°C", #' icon = owm_data$weather_icon) #' } add_weather <- function(map, data, lng = NULL, lat = NULL, icon = NULL, template = NULL, popup = NULL, ...){ if(is.null(lng) | is.null(lat)){ lng <- data[[grep("lon", names(data))]] lat <- data[[grep("lat", names(data))]] } if(!is.null(icon)){ icon %<>% get_icon_url() %>% leaflet::icons() } if(!is.null(template)){ popup <- template %$$% data } leaflet::addMarkers(map, lng, lat, data = data, icon = icon, popup = popup, ...) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dataset_documentation.R \name{GeneDropEx_ped} \alias{GeneDropEx_ped} \title{Dataset GeneDropEx_ped} \description{ This dataframe contains a simulated pedigree for use in gene-dropping } \details{ This is simulated pedigree data for a population that experiences density dependant selection and has modest migration. It is loosely based on the dynamics seen in the data collected by the Soay Sheep Project. There is a small amount of missing data. It spans 35 years and includes 10000 individuals. Migrants do not have a known cohort, but the time that they arrived in the population can be found in the 'Arrived' column. Please feel free to play around with this data and let me know how realistic, or otherwise, you think it is. }
/man/GeneDropEx_ped.Rd
no_license
simplydch/GeneDrop
R
false
true
815
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dataset_documentation.R \name{GeneDropEx_ped} \alias{GeneDropEx_ped} \title{Dataset GeneDropEx_ped} \description{ This dataframe contains a simulated pedigree for use in gene-dropping } \details{ This is simulated pedigree data for a population that experiences density dependant selection and has modest migration. It is loosely based on the dynamics seen in the data collected by the Soay Sheep Project. There is a small amount of missing data. It spans 35 years and includes 10000 individuals. Migrants do not have a known cohort, but the time that they arrived in the population can be found in the 'Arrived' column. Please feel free to play around with this data and let me know how realistic, or otherwise, you think it is. }
#' Extract rank probability matrix #' #' A rank probability matrix is a matrix with all entries between 0 and 1. The #' rows represent treatments while the columns represent ranks. #' #' @details #' This function creats a rank probability matrix by reading information from a #' model object. For example if \code{nma} is the result from a call to #' \code{nma.ab.bin} from the \code{pcnetmeta} package, calling #' \code{rank_probabilities(nma)} returns a matrix. #' #' @param x A model object holding rank probabilities. #' @param ... currently not used #' #' #' @family extractors #' #' @return A \code{matrix} #' @export rank_probabilities <- function(x,...){ UseMethod("rank_probabilities") } #' @rdname rank_probabilities #' #' @export rank_probabilities.nma.ab <- function(x,...){ rp <- x$TrtRankProb array(as.numeric(rp) , dim = c(nrow(rp),ncol(rp)) , dimnames = list(treatment=rownames(rp), rank=seq_len(ncol(rp))) ) } #' Extract scra values #' #' @param x a \code{matrix} or a model object holding rank probabilities #' @param ... currently not used #' #' @family extractors #' #' @export sucra_values <- function(x,...){ UseMethod("sucra_values") } #' @rdname sucra_values #' @export sucra_values.matrix <- function(x,...){ t(apply(x,1,cumsum)) } #' @rdname sucra_values #' @export sucra_values.default <- function(x,...){ x <- rank_probabilities(x) sucra_values.matrix(x) }
/pkg/R/rankprobability.R
no_license
markvanderloo/metaplot
R
false
false
1,428
r
#' Extract rank probability matrix #' #' A rank probability matrix is a matrix with all entries between 0 and 1. The #' rows represent treatments while the columns represent ranks. #' #' @details #' This function creats a rank probability matrix by reading information from a #' model object. For example if \code{nma} is the result from a call to #' \code{nma.ab.bin} from the \code{pcnetmeta} package, calling #' \code{rank_probabilities(nma)} returns a matrix. #' #' @param x A model object holding rank probabilities. #' @param ... currently not used #' #' #' @family extractors #' #' @return A \code{matrix} #' @export rank_probabilities <- function(x,...){ UseMethod("rank_probabilities") } #' @rdname rank_probabilities #' #' @export rank_probabilities.nma.ab <- function(x,...){ rp <- x$TrtRankProb array(as.numeric(rp) , dim = c(nrow(rp),ncol(rp)) , dimnames = list(treatment=rownames(rp), rank=seq_len(ncol(rp))) ) } #' Extract scra values #' #' @param x a \code{matrix} or a model object holding rank probabilities #' @param ... currently not used #' #' @family extractors #' #' @export sucra_values <- function(x,...){ UseMethod("sucra_values") } #' @rdname sucra_values #' @export sucra_values.matrix <- function(x,...){ t(apply(x,1,cumsum)) } #' @rdname sucra_values #' @export sucra_values.default <- function(x,...){ x <- rank_probabilities(x) sucra_values.matrix(x) }
attach_trace_info <- function(ca, dc) { if (missing(dc)) { if (missing(ca)) { abort("either 'ca' or 'dc' required") } dc_t <- compose_cells_raw(ca, trace_it_back = TRUE, silent = TRUE) dc <- dc_t } else { if (!any(stringr::str_detect(colnames(dc), "cellAddress_"))) { if (!missing(ca)) { dc_t <- compose_cells_raw(ca, trace_it_back = TRUE, silent = TRUE) dc_t <- dc_t[c("row", "col", colnames(dc_t)[stringr::str_detect(colnames(dc_t), "cellAddress_")])] dc_t$chk_this <- "ok" dc <- dc %>% left_join(dc_t, by = c("row", "col")) if (any(is.na(dc$chk_this))) { abort("at least one row could not be mapped properly") } } else { abort("supplied composition does not contain trace information and 'ca' is not given") } } } dc }
/R/attach_trace_info.R
permissive
dondealban/tidycells
R
false
false
846
r
attach_trace_info <- function(ca, dc) { if (missing(dc)) { if (missing(ca)) { abort("either 'ca' or 'dc' required") } dc_t <- compose_cells_raw(ca, trace_it_back = TRUE, silent = TRUE) dc <- dc_t } else { if (!any(stringr::str_detect(colnames(dc), "cellAddress_"))) { if (!missing(ca)) { dc_t <- compose_cells_raw(ca, trace_it_back = TRUE, silent = TRUE) dc_t <- dc_t[c("row", "col", colnames(dc_t)[stringr::str_detect(colnames(dc_t), "cellAddress_")])] dc_t$chk_this <- "ok" dc <- dc %>% left_join(dc_t, by = c("row", "col")) if (any(is.na(dc$chk_this))) { abort("at least one row could not be mapped properly") } } else { abort("supplied composition does not contain trace information and 'ca' is not given") } } } dc }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PerMallows.R \name{read.perms} \alias{read.perms} \title{Read a text file with a collection of permtuations} \usage{ read.perms(path) } \arguments{ \item{path}{string with a path} } \value{ A collection of permutations in matrix form } \description{ This function reads the text file in the specified path and checks if each row is a proper permutation } \examples{ path = system.file("test.txt", package="PerMallows") sample = read.perms(path) }
/fuzzedpackages/PerMallows/man/read.perms.Rd
no_license
akhikolla/testpackages
R
false
true
526
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PerMallows.R \name{read.perms} \alias{read.perms} \title{Read a text file with a collection of permtuations} \usage{ read.perms(path) } \arguments{ \item{path}{string with a path} } \value{ A collection of permutations in matrix form } \description{ This function reads the text file in the specified path and checks if each row is a proper permutation } \examples{ path = system.file("test.txt", package="PerMallows") sample = read.perms(path) }
# titanic is avaliable in your workspace # 1 - Check the structure of titanic str(titanic) # 2 - Use ggplot() for the first instruction ggplot(titanic, aes(x = factor(Pclass), fill = factor(Sex))) + geom_bar(position = "dodge") # 3 - Plot 2, add facet_grid() layer ggplot(titanic, aes(x = factor(Pclass), fill = factor(Sex))) + geom_bar(position = "dodge") # 3 - Plot 2, add facet_grid() layer ggplot(titanic, aes(x = Pclass, fill = Sex)) + geom_bar(position = "dodge") + facet_grid( . ~ Survived) # 4 - Define an object for position jitterdodge, to use below posn.jd <- position_jitterdodge(0.5, 0, 0.6) # 5 - Plot 3, but use the position object from instruction 4 ggplot(titanic, aes(x = factor(Pclass), y =Age, col = factor(Sex))) + geom_point(size = 3, alpha=0.5,position = posn.jd ) + facet_grid( . ~ Survived)
/R/Titanic.R
no_license
venkatkasarla/SB_DataWrangling
R
false
false
839
r
# titanic is avaliable in your workspace # 1 - Check the structure of titanic str(titanic) # 2 - Use ggplot() for the first instruction ggplot(titanic, aes(x = factor(Pclass), fill = factor(Sex))) + geom_bar(position = "dodge") # 3 - Plot 2, add facet_grid() layer ggplot(titanic, aes(x = factor(Pclass), fill = factor(Sex))) + geom_bar(position = "dodge") # 3 - Plot 2, add facet_grid() layer ggplot(titanic, aes(x = Pclass, fill = Sex)) + geom_bar(position = "dodge") + facet_grid( . ~ Survived) # 4 - Define an object for position jitterdodge, to use below posn.jd <- position_jitterdodge(0.5, 0, 0.6) # 5 - Plot 3, but use the position object from instruction 4 ggplot(titanic, aes(x = factor(Pclass), y =Age, col = factor(Sex))) + geom_point(size = 3, alpha=0.5,position = posn.jd ) + facet_grid( . ~ Survived)
library(faraway) data(gala) gala <- gala[,-2] modl <- lm(Species ~ . , gala) plot(predict(modl),residuals(modl),xlab="Fitted",ylab=" Residuals") modt <- lm(sqrt(Species) ~ . , gala) plot(predict(modt),residuals(modt),xlab="Fitted",ylab=" Residuals") summary(modt) modp <- glm(Species ~ .,family=poisson, gala) summary(modp) halfnorm(residuals(modp)) plot(log(fitted(modp)),log((gala$Speciesfitted(modp))^2), xlab=expression(hat(mu)),ylab=expression((yhat(mu))^2)) abline(0,1) (dp <- sum(residuals(modp,type="pearson")^2)/modp$df.res) summary (modp, dispersion=dp) drop1(modp,test="F") ########### options(contrasts=c("contr.treatment","contr.poly")) #### Logistic regression r=c(10,17,12,7,23,22,29,29,23) n=c(31,30,31,27,26,30,31,30,30) logconc=c(2.68,2.76,2.82,2.90,3.02,3.04,3.13,3.20,3.21) counts=cbind(r,n-r) result=glm(counts~logconc,family=binomial("logit")) summary(result,correlation=TRUE,symbolic.cor = TRUE) result$coefficients #### plot residuals vs. linear predictor plot(residuals(result, type="pearson"),result$linear.predictors) #### plot logconc vs. empirical logits emplogit=log((r+0.5)/(n-r+0.5)) plot(logconc,emplogit) #### adjusting for overdispersion #### This should give you the same model but with adjusted covariance #### matirix, that is SE for your beta's and also changed z-values. #### First estimate the dispersion parameter based on the MAXIMAL model; #### in our example this is simple since we have only one model #### X^2/df=4.08 #### Notice that this does not adjust overall fit statistics summary(result, dispersion=4.08,correlation=TRUE,symbolic.cor = TRUE) #### Notice you can also use new package DISPMOD #### gives a bit different result because it uses G^2/df #### It adjusts the overall fit statistis too #install.packages("dispmod") library(dispmod) glm.binomial.disp(result) #### For other diagonostic plots, see donner.R #### Here is another way to get regression type plots library() #### This gives a series of plots such as; #### residuals vs. fitted values #### Q-Q plots #### levarage, etc... plot.lm(result) #### The following is a function adapted from http//www.math.mcmaster.capeters4f03s4f03_0607index.html #### roc.plot() will plot the ROC curve given two vectors of scores, #### the first for the treatment group (y==1) and the second for the control group (y==0). roc.plot <- function (sd, sdc, newplot = TRUE, ...) { sall <- sort(c(sd, sdc)) sens <- 0 specc <- 0 for (i in length(sall):1) { sens <- c(sens, mean(sd >= sall[i], na.rm = T)) specc <- c(specc, mean(sdc >= sall[i], na.rm = T)) } if (newplot) { plot(specc, sens, xlim = c(0, 1), ylim = c(0, 1), type = "l", xlab = "1-specificity", ylab = "sensitivity", ...) abline(0, 1) } else lines(specc, sens, ...) npoints <- length(sens) area <- sum(0.5 * (sens[-1] + sens[-npoints]) * (specc[-1] - specc[-npoints])) lift <- (sens - specc)[-1] cutoff <- sall[lift == max(lift)][1] sensopt <- sens[-1][lift == max(lift)][1] specopt <- 1 - specc[-1][lift == max(lift)][1] list(area = area, cutoff = cutoff, sensopt = sensopt, specopt = specopt) } #### Let us draw the ROC plot roc.plot(r,n-r) ###### data(dicentric) round(xtabs(ca/cells ~ doseamt+doserate, dicentric),2) with(dicentric,interaction.plot(doseamt,doserate,ca/cells)) interaction.plot(dicentric$doseamt,dicentric$doserate,dicentric$ca/dicentric$cells) lmod <- lm(ca/cells ~ log(doserate)*factor(doseamt), dicentric) summary(lmod)$adj plot(residuals(lmod) ~ fitted(lmod),xlab="Fitted",ylab="Residuals") abline(h=0) dicentric$dosef <- factor(dicentric$doseamt) pmod <- glm(ca ~ log(cells)+log(doserate)*dosef, family=poisson,dicentric) summary(pmod) rmod <- glm(ca ~ offset (log(cells))+log(doserate)*dosef, family=poisson,dicentric) summary(rmod) data(solder) modp <- glm(skips ~ . , family=poisson, data=solder) deviance(modp) summary(modp) df.residual(modp) modp2 <- glm(skips ~ Opening +Solder + Mask + PadType + Panel ,family=poisson, data=solder) deviance(modp2) pchisq(deviance(modp2),df.residual(modp2),lower=FALSE) library(MASS) modn <- glm(skips ~ . , negative.binomial(1),solder) modn modn <- glm.nb(skips ~ .,solder) summary (modn)
/621 - Data Mining/Logistic_regression_models/practice/count_regression_practice.R
no_license
bvshyam/cuny_data_science_repo
R
false
false
4,446
r
library(faraway) data(gala) gala <- gala[,-2] modl <- lm(Species ~ . , gala) plot(predict(modl),residuals(modl),xlab="Fitted",ylab=" Residuals") modt <- lm(sqrt(Species) ~ . , gala) plot(predict(modt),residuals(modt),xlab="Fitted",ylab=" Residuals") summary(modt) modp <- glm(Species ~ .,family=poisson, gala) summary(modp) halfnorm(residuals(modp)) plot(log(fitted(modp)),log((gala$Speciesfitted(modp))^2), xlab=expression(hat(mu)),ylab=expression((yhat(mu))^2)) abline(0,1) (dp <- sum(residuals(modp,type="pearson")^2)/modp$df.res) summary (modp, dispersion=dp) drop1(modp,test="F") ########### options(contrasts=c("contr.treatment","contr.poly")) #### Logistic regression r=c(10,17,12,7,23,22,29,29,23) n=c(31,30,31,27,26,30,31,30,30) logconc=c(2.68,2.76,2.82,2.90,3.02,3.04,3.13,3.20,3.21) counts=cbind(r,n-r) result=glm(counts~logconc,family=binomial("logit")) summary(result,correlation=TRUE,symbolic.cor = TRUE) result$coefficients #### plot residuals vs. linear predictor plot(residuals(result, type="pearson"),result$linear.predictors) #### plot logconc vs. empirical logits emplogit=log((r+0.5)/(n-r+0.5)) plot(logconc,emplogit) #### adjusting for overdispersion #### This should give you the same model but with adjusted covariance #### matirix, that is SE for your beta's and also changed z-values. #### First estimate the dispersion parameter based on the MAXIMAL model; #### in our example this is simple since we have only one model #### X^2/df=4.08 #### Notice that this does not adjust overall fit statistics summary(result, dispersion=4.08,correlation=TRUE,symbolic.cor = TRUE) #### Notice you can also use new package DISPMOD #### gives a bit different result because it uses G^2/df #### It adjusts the overall fit statistis too #install.packages("dispmod") library(dispmod) glm.binomial.disp(result) #### For other diagonostic plots, see donner.R #### Here is another way to get regression type plots library() #### This gives a series of plots such as; #### residuals vs. fitted values #### Q-Q plots #### levarage, etc... plot.lm(result) #### The following is a function adapted from http//www.math.mcmaster.capeters4f03s4f03_0607index.html #### roc.plot() will plot the ROC curve given two vectors of scores, #### the first for the treatment group (y==1) and the second for the control group (y==0). roc.plot <- function (sd, sdc, newplot = TRUE, ...) { sall <- sort(c(sd, sdc)) sens <- 0 specc <- 0 for (i in length(sall):1) { sens <- c(sens, mean(sd >= sall[i], na.rm = T)) specc <- c(specc, mean(sdc >= sall[i], na.rm = T)) } if (newplot) { plot(specc, sens, xlim = c(0, 1), ylim = c(0, 1), type = "l", xlab = "1-specificity", ylab = "sensitivity", ...) abline(0, 1) } else lines(specc, sens, ...) npoints <- length(sens) area <- sum(0.5 * (sens[-1] + sens[-npoints]) * (specc[-1] - specc[-npoints])) lift <- (sens - specc)[-1] cutoff <- sall[lift == max(lift)][1] sensopt <- sens[-1][lift == max(lift)][1] specopt <- 1 - specc[-1][lift == max(lift)][1] list(area = area, cutoff = cutoff, sensopt = sensopt, specopt = specopt) } #### Let us draw the ROC plot roc.plot(r,n-r) ###### data(dicentric) round(xtabs(ca/cells ~ doseamt+doserate, dicentric),2) with(dicentric,interaction.plot(doseamt,doserate,ca/cells)) interaction.plot(dicentric$doseamt,dicentric$doserate,dicentric$ca/dicentric$cells) lmod <- lm(ca/cells ~ log(doserate)*factor(doseamt), dicentric) summary(lmod)$adj plot(residuals(lmod) ~ fitted(lmod),xlab="Fitted",ylab="Residuals") abline(h=0) dicentric$dosef <- factor(dicentric$doseamt) pmod <- glm(ca ~ log(cells)+log(doserate)*dosef, family=poisson,dicentric) summary(pmod) rmod <- glm(ca ~ offset (log(cells))+log(doserate)*dosef, family=poisson,dicentric) summary(rmod) data(solder) modp <- glm(skips ~ . , family=poisson, data=solder) deviance(modp) summary(modp) df.residual(modp) modp2 <- glm(skips ~ Opening +Solder + Mask + PadType + Panel ,family=poisson, data=solder) deviance(modp2) pchisq(deviance(modp2),df.residual(modp2),lower=FALSE) library(MASS) modn <- glm(skips ~ . , negative.binomial(1),solder) modn modn <- glm.nb(skips ~ .,solder) summary (modn)
### Required packages ### library(ggplot2); library(dplyr); library(tidyr);library(haven);library(readstata13);library(foreign);library(mice);library(MatchIt);library(stringdist);library(RecordLinkage);library(modelr) ### Parameters ### #f_base10 = 0.95012 # baseline 10-year survival for females #m_base10 = 0.88936 # baseline 10-year survival for males f_rf <- list(log_age = 2.32888, log_hdl = -0.70833,log_totchol = 1.20904,log_SBP = 2.82263,Smoking = 0.52873,Diabetes = 0.69154) # log of hazard ratio for each risk factor for females m_rf <- list(log_age = 3.06117, log_hdl = -0.93263,log_totchol = 1.12370,log_SBP = 1.99881,Smoking = 0.65451,Diabetes = 0.57367) # log of hazard ratio for each risk factor for males m_log_age = 3.06117; m_log_hdl = -0.93263;m_log_totchol = 1.12370;m_log_SBP = 1.99881;m_Smoking = 0.65451;m_Diabetes = 0.57367 # log of hazard ratio for each risk factor for males f_log_age = 2.32888; f_log_hdl = -0.70833;f_log_totchol = 1.20904;f_log_SBP = 2.82263;f_Smoking = 0.52873;f_Diabetes = 0.69154 # log of hazard ratio for each risk factor for females betas <- list(f=f_rf,m=m_rf) # nested list of f_rf and m_rf f_points=data.frame("points"=seq(-3,12),"age_low"=c(NA,NA,NA,30,NA,35,NA,40,45,NA,50,55,60,65,70,75),"age_high"=c(NA,NA,NA,34,NA,39,NA,44,49,NA,54,59,64,69,74,120),"HDL_low"=c(NA,60,50,45,35,0,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),"HDL_high"=c(NA,1000,59,49,44,35,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),"TotChol_low"=c(NA,NA,NA,0,160,NA,200,240,280,NA,NA,NA,NA,NA,NA,NA),"TotChol_high"=c(NA,NA,NA,160,199,NA,239,279,1000,NA,NA,NA,NA,NA,NA,NA), "SBP_low"=c(NA,NA,0,NA,NA,NA,120,130,NA,140,150,160,NA,NA,NA,NA),"SBP_high"=c(NA,NA,120,NA,NA,NA,129,139,NA,149,159,1000,NA,NA,NA,NA), "Smoker"=c(NA,NA,NA,"No",NA,NA,"Yes",NA,NA,NA,NA,NA,NA,NA,NA,NA), "Diabetic"=c(NA,NA,NA,"No",NA,NA,NA,"Yes",NA,NA,NA,NA,NA,NA,NA,NA)) m_points=data.frame("points"=seq(-2,15),"age_low"=c(NA,NA,30,NA,35,NA,NA,40,45,NA,50,NA,55,60,65,NA,70,75),"age_high"=c(NA,NA,34,NA,39,NA,NA,44,49,NA,54,NA,59,64,69,NA,74,120),"HDL_low"=c(60,50,45,35,0,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),"HDL_high"=c(1000,59,49,44,35,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),"TotChol_low"=c(NA,NA,0,160,200,240,280,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),"TotChol_high"=c(NA,NA,160,199,239,279,1000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA), "SBP_low"=c(NA,NA,0,NA,120,130,140,160,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),"SBP_high"=c(NA,NA,120,NA,129,139,149,1000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA), "Smoker"=c(NA,NA,"No",NA,NA,NA,"Yes",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA), "Diabetic"=c(NA,NA,"No",NA,NA,"Yes",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA)) ################################################################################################################################################################ # This gets the row where the input (30 in this example) satisfies the condition that it is between the age_low and age_high columns of the data frame f_points i=sapply(30, function(p) { which(f_points$age_low <= p & f_points$age_high >= p)}) # Delete all rows except row "i" keep_df=f_points[-(setdiff(1:16,i)),] ################################################################################################################################################################ get_frs = function(riagendr,ridageyr,HDL,TotChol,sbp,smoker,diabetic) { list_pts=c() # Initiate list of points #### MALES #### if (riagendr == 1) { ## GET POINTS FROM AGE ## i=sapply(ridageyr, function(p) { which(m_points$age_low <= p & m_points$age_high >= p)}); keep_df=m_points[-(setdiff(1:length(m_points$points),i)),];m_age <<- keep_df[[1]] list_pts=append(list_pts,keep_df[[1]]) ## GET POINTS FROM HDL ## i=sapply(HDL, function(p) { which(m_points$HDL_low <= p & m_points$HDL_high >= p)}); keep_df=m_points[-(setdiff(1:length(m_points$points),i)),];m_HDL <<- keep_df[[1]] list_pts=append(list_pts,keep_df[[1]]) ## GET POINTS FROM TOTAL CHOLESTEROL ## i=sapply(TotChol, function(p) { which(m_points$TotChol_low <= p & m_points$TotChol_high >= p)}); keep_df=m_points[-(setdiff(1:length(m_points$points),i)),];m_TotChol <<- keep_df[[1]] list_pts=append(list_pts,keep_df[[1]]) ## GET POINTS FROM SBP ## i=sapply(sbp, function(p) { which(m_points$SBP_low <= p & m_points$SBP_high >= p)}); keep_df=m_points[-(setdiff(1:length(m_points$points),i)),];m_SBP <<- keep_df[[1]] list_pts=append(list_pts,keep_df[[1]]) ## GET POINTS FROM SMOKER ## if (isTRUE(smoker == 2)) {list_pts=append(list_pts,0)} else {list_pts=append(list_pts,4)} # m_smoke <<- 4 ## GET POINTS FROM DIABETIC ## if (isTRUE(diabetic < 6.5)) {list_pts=append(list_pts,0)} else {list_pts=append(list_pts,3)} #if (isTRUE(diabetic >= 126)) {list_pts=append(list_pts,3)} else {list_pts=append(list_pts,0)} # m_diabetic <<- 3 } #### FEMALES #### else { ## GET POINTS FROM AGE ## i=sapply(ridageyr, function(p) { which(f_points$age_low <= p & f_points$age_high >= p)}); keep_df=f_points[-(setdiff(1:length(f_points$points),i)),];f_age <<- keep_df[[1]] list_pts=append(list_pts,keep_df[[1]]) ## GET POINTS FROM HDL ## i=sapply(HDL, function(p) { which(f_points$HDL_low <= p & f_points$HDL_high >= p)}); keep_df=f_points[-(setdiff(1:length(f_points$points),i)),];f_HDL <<- keep_df[[1]] list_pts=append(list_pts,keep_df[[1]]) ## GET POINTS FROM TOTAL CHOLESTEROL ## i=sapply(TotChol, function(p) { which(f_points$TotChol_low <= p & f_points$TotChol_high >= p)}); keep_df=f_points[-(setdiff(1:length(f_points$points),i)),];f_TotChol <<- keep_df[[1]] list_pts=append(list_pts,keep_df[[1]]) ## GET POINTS FROM SBP ## i=sapply(sbp, function(p) { which(f_points$SBP_low <= p & f_points$SBP_high >= p)}); keep_df=f_points[-(setdiff(1:length(f_points$points),i)),];f_SBP <<- keep_df[[1]] list_pts=append(list_pts,keep_df[[1]]) ## GET POINTS FROM SMOKER ## if (isTRUE(smoker == 2)) {list_pts=append(list_pts,0)} else {list_pts=append(list_pts,3)} # f_smoke <<- 3 ## GET POINTS FROM DIABETIC ## if (isTRUE(diabetic < 6.5)) {list_pts=append(list_pts,0)} else {list_pts=append(list_pts,4)} #if (isTRUE(diabetic >= 126)) {list_pts=append(list_pts,4)} else {list_pts=append(list_pts,0)} # f_diabetic <<- 4 } return(sum(list_pts)) } ############## #### ATP3 #### ##############
/Code/NHANES_continuous/compute_frs.R
no_license
bryansashakim/Framingham-Risk-Scores
R
false
false
6,336
r
### Required packages ### library(ggplot2); library(dplyr); library(tidyr);library(haven);library(readstata13);library(foreign);library(mice);library(MatchIt);library(stringdist);library(RecordLinkage);library(modelr) ### Parameters ### #f_base10 = 0.95012 # baseline 10-year survival for females #m_base10 = 0.88936 # baseline 10-year survival for males f_rf <- list(log_age = 2.32888, log_hdl = -0.70833,log_totchol = 1.20904,log_SBP = 2.82263,Smoking = 0.52873,Diabetes = 0.69154) # log of hazard ratio for each risk factor for females m_rf <- list(log_age = 3.06117, log_hdl = -0.93263,log_totchol = 1.12370,log_SBP = 1.99881,Smoking = 0.65451,Diabetes = 0.57367) # log of hazard ratio for each risk factor for males m_log_age = 3.06117; m_log_hdl = -0.93263;m_log_totchol = 1.12370;m_log_SBP = 1.99881;m_Smoking = 0.65451;m_Diabetes = 0.57367 # log of hazard ratio for each risk factor for males f_log_age = 2.32888; f_log_hdl = -0.70833;f_log_totchol = 1.20904;f_log_SBP = 2.82263;f_Smoking = 0.52873;f_Diabetes = 0.69154 # log of hazard ratio for each risk factor for females betas <- list(f=f_rf,m=m_rf) # nested list of f_rf and m_rf f_points=data.frame("points"=seq(-3,12),"age_low"=c(NA,NA,NA,30,NA,35,NA,40,45,NA,50,55,60,65,70,75),"age_high"=c(NA,NA,NA,34,NA,39,NA,44,49,NA,54,59,64,69,74,120),"HDL_low"=c(NA,60,50,45,35,0,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),"HDL_high"=c(NA,1000,59,49,44,35,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),"TotChol_low"=c(NA,NA,NA,0,160,NA,200,240,280,NA,NA,NA,NA,NA,NA,NA),"TotChol_high"=c(NA,NA,NA,160,199,NA,239,279,1000,NA,NA,NA,NA,NA,NA,NA), "SBP_low"=c(NA,NA,0,NA,NA,NA,120,130,NA,140,150,160,NA,NA,NA,NA),"SBP_high"=c(NA,NA,120,NA,NA,NA,129,139,NA,149,159,1000,NA,NA,NA,NA), "Smoker"=c(NA,NA,NA,"No",NA,NA,"Yes",NA,NA,NA,NA,NA,NA,NA,NA,NA), "Diabetic"=c(NA,NA,NA,"No",NA,NA,NA,"Yes",NA,NA,NA,NA,NA,NA,NA,NA)) m_points=data.frame("points"=seq(-2,15),"age_low"=c(NA,NA,30,NA,35,NA,NA,40,45,NA,50,NA,55,60,65,NA,70,75),"age_high"=c(NA,NA,34,NA,39,NA,NA,44,49,NA,54,NA,59,64,69,NA,74,120),"HDL_low"=c(60,50,45,35,0,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),"HDL_high"=c(1000,59,49,44,35,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),"TotChol_low"=c(NA,NA,0,160,200,240,280,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),"TotChol_high"=c(NA,NA,160,199,239,279,1000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA), "SBP_low"=c(NA,NA,0,NA,120,130,140,160,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),"SBP_high"=c(NA,NA,120,NA,129,139,149,1000,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA), "Smoker"=c(NA,NA,"No",NA,NA,NA,"Yes",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA), "Diabetic"=c(NA,NA,"No",NA,NA,"Yes",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA)) ################################################################################################################################################################ # This gets the row where the input (30 in this example) satisfies the condition that it is between the age_low and age_high columns of the data frame f_points i=sapply(30, function(p) { which(f_points$age_low <= p & f_points$age_high >= p)}) # Delete all rows except row "i" keep_df=f_points[-(setdiff(1:16,i)),] ################################################################################################################################################################ get_frs = function(riagendr,ridageyr,HDL,TotChol,sbp,smoker,diabetic) { list_pts=c() # Initiate list of points #### MALES #### if (riagendr == 1) { ## GET POINTS FROM AGE ## i=sapply(ridageyr, function(p) { which(m_points$age_low <= p & m_points$age_high >= p)}); keep_df=m_points[-(setdiff(1:length(m_points$points),i)),];m_age <<- keep_df[[1]] list_pts=append(list_pts,keep_df[[1]]) ## GET POINTS FROM HDL ## i=sapply(HDL, function(p) { which(m_points$HDL_low <= p & m_points$HDL_high >= p)}); keep_df=m_points[-(setdiff(1:length(m_points$points),i)),];m_HDL <<- keep_df[[1]] list_pts=append(list_pts,keep_df[[1]]) ## GET POINTS FROM TOTAL CHOLESTEROL ## i=sapply(TotChol, function(p) { which(m_points$TotChol_low <= p & m_points$TotChol_high >= p)}); keep_df=m_points[-(setdiff(1:length(m_points$points),i)),];m_TotChol <<- keep_df[[1]] list_pts=append(list_pts,keep_df[[1]]) ## GET POINTS FROM SBP ## i=sapply(sbp, function(p) { which(m_points$SBP_low <= p & m_points$SBP_high >= p)}); keep_df=m_points[-(setdiff(1:length(m_points$points),i)),];m_SBP <<- keep_df[[1]] list_pts=append(list_pts,keep_df[[1]]) ## GET POINTS FROM SMOKER ## if (isTRUE(smoker == 2)) {list_pts=append(list_pts,0)} else {list_pts=append(list_pts,4)} # m_smoke <<- 4 ## GET POINTS FROM DIABETIC ## if (isTRUE(diabetic < 6.5)) {list_pts=append(list_pts,0)} else {list_pts=append(list_pts,3)} #if (isTRUE(diabetic >= 126)) {list_pts=append(list_pts,3)} else {list_pts=append(list_pts,0)} # m_diabetic <<- 3 } #### FEMALES #### else { ## GET POINTS FROM AGE ## i=sapply(ridageyr, function(p) { which(f_points$age_low <= p & f_points$age_high >= p)}); keep_df=f_points[-(setdiff(1:length(f_points$points),i)),];f_age <<- keep_df[[1]] list_pts=append(list_pts,keep_df[[1]]) ## GET POINTS FROM HDL ## i=sapply(HDL, function(p) { which(f_points$HDL_low <= p & f_points$HDL_high >= p)}); keep_df=f_points[-(setdiff(1:length(f_points$points),i)),];f_HDL <<- keep_df[[1]] list_pts=append(list_pts,keep_df[[1]]) ## GET POINTS FROM TOTAL CHOLESTEROL ## i=sapply(TotChol, function(p) { which(f_points$TotChol_low <= p & f_points$TotChol_high >= p)}); keep_df=f_points[-(setdiff(1:length(f_points$points),i)),];f_TotChol <<- keep_df[[1]] list_pts=append(list_pts,keep_df[[1]]) ## GET POINTS FROM SBP ## i=sapply(sbp, function(p) { which(f_points$SBP_low <= p & f_points$SBP_high >= p)}); keep_df=f_points[-(setdiff(1:length(f_points$points),i)),];f_SBP <<- keep_df[[1]] list_pts=append(list_pts,keep_df[[1]]) ## GET POINTS FROM SMOKER ## if (isTRUE(smoker == 2)) {list_pts=append(list_pts,0)} else {list_pts=append(list_pts,3)} # f_smoke <<- 3 ## GET POINTS FROM DIABETIC ## if (isTRUE(diabetic < 6.5)) {list_pts=append(list_pts,0)} else {list_pts=append(list_pts,4)} #if (isTRUE(diabetic >= 126)) {list_pts=append(list_pts,4)} else {list_pts=append(list_pts,0)} # f_diabetic <<- 4 } return(sum(list_pts)) } ############## #### ATP3 #### ##############
# Implementación en R del Algoritmo Toom Cook # Cargando librerías necesarias---- library(stringr) # creo que ni la usé # Cargando funciones necesarias---- calcula_base <- function(num_1,num_2,b,k){ i <- max(floor(floor(log(x = as.numeric(num_1), base= b))/k), floor(floor(log(x = as.numeric(num_2), base= b))/k)) i <- i+1 return(i) } separa_en_secciones <- function(cadena, tamanio_max_secciones){ "Esta función recibe el número como caracter y el tamaño máximo de las secciones y lo separa en pedazos del tamaño correcto. Devuelve un vector con el número seccionado" num_secciones <- ceiling(nchar(cadena)/tamanio_max_secciones) numero_seccionado <- vector(mode="character") for(i in 1:num_secciones){ numero_seccionado[num_secciones+1-i] <- substr(x = cadena, start = max(1,(nchar(cadena)-i*tamanio_max_secciones+1)), stop = nchar(cadena)-((i-1)*tamanio_max_secciones)) }# for return(numero_seccionado) }# function separa_en_secciones define_numero_de_puntos_a_evaluar <- function(k_m,k_n){ num_puntos_a_evaluar <- k_m + k_n -1 return(num_puntos_a_evaluar) }#function define_numero_de_puntos_a_evaluar rellena_matriz_evaluada <- function(valores_a_evaluar, num_columnas, num_puntos_a_evaluar){ matriz_final <- matrix(ncol=num_columnas, nrow=num_puntos_a_evaluar) for(i in 1:num_puntos_a_evaluar){ for(k in 1:num_columnas){ matriz_final[i,k] <- valores_a_evaluar[i]^(num_columnas-k) }#for anidado }#for return(matriz_final) }#function # Declaración de variables---- # m_original <- "7" # n_original <- "8" # m_original <- "78979879" m_original <- "1234567890" n_original <- "1234567890" # m_original <- "1234567890123456789012" # n_original <- "987654321987654321098" # m_original <-"931275469" # n_original <-"897512436" base_b <- 10 exponente_inicial_b <- 4 b <-base_b ^ exponente_inicial_b # esto es un ejemplo, 10^4 es un buen inicio k <-3 # Calculamos la i y el tamaño máximo de las secciones---- # B=b^i si b es 10^4 entonces i*4 es el tamaño de máximo de los pedacitos i <- calcula_base(num_1 = m_original, num_2 = n_original, b = base_b^exponente_inicial_b, k = k) tamanio_max_secciones <- i*exponente_inicial_b tamanio_max_secciones <- 6 # Separamos los números originales en k secciones de tamaño máximo "tamanio_maximo_secciones"---- num_digitos<-18 ceros_m <- str_c(rep("0",num_digitos - nchar(m_original)),collapse = "") m_orig_rellenado <- str_c(c(ceros_m,m_original),collapse="") m_vector <- separa_en_secciones(cadena = m_orig_rellenado, tamanio_max_secciones = tamanio_max_secciones) ceros_n <- str_c(rep("0",num_digitos - nchar(n_original)),collapse = "") n_orig_rellenado <- str_c(c(ceros_n,n_original),collapse="") n_vector <- separa_en_secciones(cadena = n_orig_rellenado, tamanio_max_secciones = tamanio_max_secciones) # Definimos el número de puntos a evaluar ---- # esto es un cálculo super sencillo (lo especifico por si k_m y k_n llegaran a ser diferentes) num_puntos_a_evaluar <- define_numero_de_puntos_a_evaluar(k_m = k, k_n = k) # Elegimos la matriz correcta ---- valores <- c(0,1,-1,2,-2) matriz_evaluada_p_q <- rellena_matriz_evaluada(valores_a_evaluar = valores, num_columnas = 3, num_puntos_a_evaluar = num_puntos_a_evaluar) matriz_evaluada_r <- rellena_matriz_evaluada(valores_a_evaluar = valores, num_columnas = 5, num_puntos_a_evaluar = num_puntos_a_evaluar) # Encontramos el vector r(x) <- p(x)q(x) ---- vector_r <-(matriz_evaluada_p_q%*%as.numeric(m_vector))*(matriz_evaluada_p_q%*%as.numeric(n_vector)) # Resolvemos para encontrar los sumandos a utilizar---- inversa <- solve(a=matriz_evaluada_r)*24 sumandos <- solve(a=matriz_evaluada_r,b=vector_r) # dividimos estos mismos en secciones del mismo tamaño---- sumandos_char <- as.character(trunc(sumandos)) num_columnas_matriz_final <- length(sumandos_char)+1 # creamos una matriz con las columnas necesarias y una fila extra para sumar los que llevamos---- matriz_suma_posicionada <- matrix(data=rep("0",(num_columnas_matriz_final*num_columnas_matriz_final)), ncol = num_columnas_matriz_final, nrow = num_columnas_matriz_final) sumando_a_acomodar <- vector(mode="character") # Relleno matriz sumandos ---- for(i in num_columnas_matriz_final:2){ sumando_a_acomodar <- separa_en_secciones(cadena = sumandos_char[i-1], tamanio_max_secciones = tamanio_max_secciones) for(j in length(sumando_a_acomodar):1){ matriz_suma_posicionada[i,i-length(sumando_a_acomodar)+j] <- sumando_a_acomodar[j] }#for anidado1 }#for # sumamos matriz y guardamos en el vector resultado_seccionado ---- resultado_seccionado <- vector(mode="character") options(scipen=999) for(i in ncol(matriz_suma_posicionada):1){ suma_auxiliar <- as.character(sum(as.numeric(matriz_suma_posicionada[,i]))) if(nchar(suma_auxiliar)>tamanio_max_secciones & i !=1){ matriz_suma_posicionada[1,i-1] <- substr(suma_auxiliar, start = 1, stop = nchar(suma_auxiliar)-tamanio_max_secciones) resultado_seccionado[i] <- substr(suma_auxiliar, start = nchar(suma_auxiliar)-tamanio_max_secciones+1, stop = nchar(suma_auxiliar)) }else if(nchar(suma_auxiliar) < tamanio_max_secciones){ ceros_sumando <- str_c(rep("0",tamanio_max_secciones - nchar(suma_auxiliar)),collapse = "") suma_auxiliar_completa <- str_c(c(ceros_sumando,suma_auxiliar),collapse="") resultado_seccionado[i] <- suma_auxiliar_completa } else { resultado_seccionado[i] <- suma_auxiliar } }#for # Juntamos las secciones en un solo número-caracter ---- resultado_final <- str_c(resultado_seccionado,collapse = "") #Creamos las funciónes---- multiplica_numerotes <- function(numerote1, numerote2){ if(nchar(numerote1)<=18 & nchar(numerote2)<=18){ m_original <- numerote1 n_original <- numerote2 tamanio_max_secciones <- 6 # dividimos los números en secciones completando con ceros a la izquierda de ser necesario num_digitos<-18 ceros_m <- str_c(rep("0",num_digitos - nchar(m_original)),collapse = "") m_orig_rellenado <- str_c(c(ceros_m,m_original),collapse="") m_vector <- separa_en_secciones(cadena = m_orig_rellenado, tamanio_max_secciones = tamanio_max_secciones) ceros_n <- str_c(rep("0",num_digitos - nchar(n_original)),collapse = "") n_orig_rellenado <- str_c(c(ceros_n,n_original),collapse="") n_vector <- separa_en_secciones(cadena = n_orig_rellenado, tamanio_max_secciones = tamanio_max_secciones) # definimos el número de puntos a evaluar k <- 3 num_puntos_a_evaluar <- define_numero_de_puntos_a_evaluar(k_m = k, k_n = k) # definimos las matrices que utilizaremos para evaluar valores <- c(0,1,-1,2,-2) matriz_evaluada_p_q <- rellena_matriz_evaluada(valores_a_evaluar = valores, num_columnas = 3, num_puntos_a_evaluar = num_puntos_a_evaluar) matriz_evaluada_r <- rellena_matriz_evaluada(valores_a_evaluar = valores, num_columnas = 5, num_puntos_a_evaluar = num_puntos_a_evaluar) # encontramos el vector r para el cual buscaremos resolver vector_r <-(matriz_evaluada_p_q%*%as.numeric(m_vector))*(matriz_evaluada_p_q%*%as.numeric(n_vector)) # resolvemos el sistemirri inversa <- solve(a=matriz_evaluada_r)*24 sumandos <- solve(a=matriz_evaluada_r,b=vector_r) # dividimos en pedacillos y los acomodamos en la matriz correcta sumandos_char <- as.character(trunc(sumandos)) num_columnas_matriz_final <- length(sumandos_char)+1 matriz_suma_posicionada <- matrix(data=rep("0",(num_columnas_matriz_final*num_columnas_matriz_final)), ncol = num_columnas_matriz_final, nrow = num_columnas_matriz_final) sumando_a_acomodar <- vector(mode="character") #rellenamos la matriz-illa for(i in num_columnas_matriz_final:2){ sumando_a_acomodar <- separa_en_secciones(cadena = sumandos_char[i-1], tamanio_max_secciones = tamanio_max_secciones) for(j in length(sumando_a_acomodar):1){ matriz_suma_posicionada[i,i-length(sumando_a_acomodar)+j] <- sumando_a_acomodar[j] }#for anidado1 }#for #sumamos posicionadamente resultado_seccionado <- vector(mode="character") options(scipen=999) for(i in ncol(matriz_suma_posicionada):1){ suma_auxiliar <- as.character(sum(as.numeric(matriz_suma_posicionada[,i]))) if(nchar(suma_auxiliar)>tamanio_max_secciones & i !=1){ matriz_suma_posicionada[1,i-1] <- substr(suma_auxiliar, start = 1, stop = nchar(suma_auxiliar)-tamanio_max_secciones) resultado_seccionado[i] <- substr(suma_auxiliar, start = nchar(suma_auxiliar)-tamanio_max_secciones+1, stop = nchar(suma_auxiliar)) }else if(nchar(suma_auxiliar) < tamanio_max_secciones){ ceros_sumando <- str_c(rep("0",tamanio_max_secciones - nchar(suma_auxiliar)),collapse = "") suma_auxiliar_completa <- str_c(c(ceros_sumando,suma_auxiliar),collapse="") resultado_seccionado[i] <- suma_auxiliar_completa } else { resultado_seccionado[i] <- suma_auxiliar } }#for resultado_final <- str_c(resultado_seccionado,collapse = "") }# ifsote return(resultado_final) }#function multiplica_numerotes suma_numerotes <- function(numerote1, numerote2){ # los números pueden ser de hasta 48 dígitos num_digitos<-48 ceros_m <- str_c(rep("0",num_digitos - nchar(numerote1)),collapse = "") m_orig_rellenado <- str_c(c(ceros_m,numerote1),collapse="") m_vector <- separa_en_secciones(cadena = m_orig_rellenado, tamanio_max_secciones = tamanio_max_secciones) ceros_n <- str_c(rep("0",num_digitos - nchar(numerote2)),collapse = "") n_orig_rellenado <- str_c(c(ceros_n,numerote2),collapse="") n_vector <- separa_en_secciones(cadena = n_orig_rellenado, tamanio_max_secciones = tamanio_max_secciones) num_columnas_vector_final <- max(length(m_vector), length(n_vector))+1 # creamos un vector que recibirá todos los pedacitos vector_suma_seccionada <- vector(mode="character",length= num_columnas_vector_final) aux_suma_extra <-"0" for (seccion in num_columnas_vector_final:2){ aux_suma_seccionada <- 0 if(as.numeric(aux_suma_extra)!=0){ aux_suma_seccionada <- as.character(sum(as.numeric(m_vector[seccion-1]), as.numeric(n_vector[seccion-1]), as.numeric(aux_suma_extra))) aux_suma_extra <-0 } else{ aux_suma_seccionada <- as.character(sum(as.numeric(m_vector[seccion-1]), as.numeric(n_vector[seccion-1]))) }#if # casos para completar la suma if(nchar(aux_suma_seccionada)>tamanio_max_secciones){ aux_suma_extra <- substr(x = aux_suma_seccionada, start = 1, stop = (nchar(aux_suma_seccionada)-tamanio_max_secciones)) aux_suma_seccionada <- substr(x=aux_suma_seccionada, start= (nchar(aux_suma_seccionada)-tamanio_max_secciones+1), stop= nchar(aux_suma_seccionada)) } else if(nchar(aux_suma_seccionada)<tamanio_max_secciones){ ceros_sumando <- str_c(rep("0",tamanio_max_secciones - nchar(aux_suma_seccionada)),collapse = "") aux_suma_seccionada <- str_c(c(ceros_sumando,aux_suma_seccionada),collapse="") } vector_suma_seccionada[seccion] <- aux_suma_seccionada } suma_colapsada <- str_c(vector_suma_seccionada,collapse = "") return (suma_colapsada) }# function suma_numerotes ## matrices a multiplicar ---- matriz1 <- matrix(data = c("1234567890","1234567890","78978978","7897879","78979879","34245322"), ncol = 2, nrow=3, byrow = TRUE) matriz2 <- matrix(data = c("1234567890","1234567890","78978978","7897879","78979879","34245322"), ncol = 3, nrow=2, byrow = TRUE) res_mult_1 <- multiplica_numerotes(matriz1[1,1], matriz2[1,1]) res_mult_2 <- multiplica_numerotes(matriz1[1,2], matriz2[2,1]) matriz_final <- matrix(data=rep("0",9), ncol=ncol(matriz2), nrow=nrow(matriz1)) for(rowfinal in 1:nrow(matriz_final)){ for(colfinal in 1:ncol(matriz_final)){ vectorsin1 <- matriz1[rowfinal,] vectorsin2 <-matriz2[,colfinal] sumandos <- vector(mode="character", length=length(vectorsin2)) suma_total<-"0" for(i in 1:length(vectorsin2)){ sumandos[i]<- multiplica_numerotes(vectorsin1[i],vectorsin2[i]) suma_total <- suma_numerotes(suma_total,sumandos[i]) }#for i matriz_final[rowfinal,colfinal]<- suma_total }#for colfinal }#for rowfinal
/MNO/proyecto_final/MNO_2017/proyectos/equipos/equipo_4/avance_22_05_2017/codigoR/toom_cook.R
permissive
fernandatellezg/analisis-numerico-computo-cientifico
R
false
false
13,820
r
# Implementación en R del Algoritmo Toom Cook # Cargando librerías necesarias---- library(stringr) # creo que ni la usé # Cargando funciones necesarias---- calcula_base <- function(num_1,num_2,b,k){ i <- max(floor(floor(log(x = as.numeric(num_1), base= b))/k), floor(floor(log(x = as.numeric(num_2), base= b))/k)) i <- i+1 return(i) } separa_en_secciones <- function(cadena, tamanio_max_secciones){ "Esta función recibe el número como caracter y el tamaño máximo de las secciones y lo separa en pedazos del tamaño correcto. Devuelve un vector con el número seccionado" num_secciones <- ceiling(nchar(cadena)/tamanio_max_secciones) numero_seccionado <- vector(mode="character") for(i in 1:num_secciones){ numero_seccionado[num_secciones+1-i] <- substr(x = cadena, start = max(1,(nchar(cadena)-i*tamanio_max_secciones+1)), stop = nchar(cadena)-((i-1)*tamanio_max_secciones)) }# for return(numero_seccionado) }# function separa_en_secciones define_numero_de_puntos_a_evaluar <- function(k_m,k_n){ num_puntos_a_evaluar <- k_m + k_n -1 return(num_puntos_a_evaluar) }#function define_numero_de_puntos_a_evaluar rellena_matriz_evaluada <- function(valores_a_evaluar, num_columnas, num_puntos_a_evaluar){ matriz_final <- matrix(ncol=num_columnas, nrow=num_puntos_a_evaluar) for(i in 1:num_puntos_a_evaluar){ for(k in 1:num_columnas){ matriz_final[i,k] <- valores_a_evaluar[i]^(num_columnas-k) }#for anidado }#for return(matriz_final) }#function # Declaración de variables---- # m_original <- "7" # n_original <- "8" # m_original <- "78979879" m_original <- "1234567890" n_original <- "1234567890" # m_original <- "1234567890123456789012" # n_original <- "987654321987654321098" # m_original <-"931275469" # n_original <-"897512436" base_b <- 10 exponente_inicial_b <- 4 b <-base_b ^ exponente_inicial_b # esto es un ejemplo, 10^4 es un buen inicio k <-3 # Calculamos la i y el tamaño máximo de las secciones---- # B=b^i si b es 10^4 entonces i*4 es el tamaño de máximo de los pedacitos i <- calcula_base(num_1 = m_original, num_2 = n_original, b = base_b^exponente_inicial_b, k = k) tamanio_max_secciones <- i*exponente_inicial_b tamanio_max_secciones <- 6 # Separamos los números originales en k secciones de tamaño máximo "tamanio_maximo_secciones"---- num_digitos<-18 ceros_m <- str_c(rep("0",num_digitos - nchar(m_original)),collapse = "") m_orig_rellenado <- str_c(c(ceros_m,m_original),collapse="") m_vector <- separa_en_secciones(cadena = m_orig_rellenado, tamanio_max_secciones = tamanio_max_secciones) ceros_n <- str_c(rep("0",num_digitos - nchar(n_original)),collapse = "") n_orig_rellenado <- str_c(c(ceros_n,n_original),collapse="") n_vector <- separa_en_secciones(cadena = n_orig_rellenado, tamanio_max_secciones = tamanio_max_secciones) # Definimos el número de puntos a evaluar ---- # esto es un cálculo super sencillo (lo especifico por si k_m y k_n llegaran a ser diferentes) num_puntos_a_evaluar <- define_numero_de_puntos_a_evaluar(k_m = k, k_n = k) # Elegimos la matriz correcta ---- valores <- c(0,1,-1,2,-2) matriz_evaluada_p_q <- rellena_matriz_evaluada(valores_a_evaluar = valores, num_columnas = 3, num_puntos_a_evaluar = num_puntos_a_evaluar) matriz_evaluada_r <- rellena_matriz_evaluada(valores_a_evaluar = valores, num_columnas = 5, num_puntos_a_evaluar = num_puntos_a_evaluar) # Encontramos el vector r(x) <- p(x)q(x) ---- vector_r <-(matriz_evaluada_p_q%*%as.numeric(m_vector))*(matriz_evaluada_p_q%*%as.numeric(n_vector)) # Resolvemos para encontrar los sumandos a utilizar---- inversa <- solve(a=matriz_evaluada_r)*24 sumandos <- solve(a=matriz_evaluada_r,b=vector_r) # dividimos estos mismos en secciones del mismo tamaño---- sumandos_char <- as.character(trunc(sumandos)) num_columnas_matriz_final <- length(sumandos_char)+1 # creamos una matriz con las columnas necesarias y una fila extra para sumar los que llevamos---- matriz_suma_posicionada <- matrix(data=rep("0",(num_columnas_matriz_final*num_columnas_matriz_final)), ncol = num_columnas_matriz_final, nrow = num_columnas_matriz_final) sumando_a_acomodar <- vector(mode="character") # Relleno matriz sumandos ---- for(i in num_columnas_matriz_final:2){ sumando_a_acomodar <- separa_en_secciones(cadena = sumandos_char[i-1], tamanio_max_secciones = tamanio_max_secciones) for(j in length(sumando_a_acomodar):1){ matriz_suma_posicionada[i,i-length(sumando_a_acomodar)+j] <- sumando_a_acomodar[j] }#for anidado1 }#for # sumamos matriz y guardamos en el vector resultado_seccionado ---- resultado_seccionado <- vector(mode="character") options(scipen=999) for(i in ncol(matriz_suma_posicionada):1){ suma_auxiliar <- as.character(sum(as.numeric(matriz_suma_posicionada[,i]))) if(nchar(suma_auxiliar)>tamanio_max_secciones & i !=1){ matriz_suma_posicionada[1,i-1] <- substr(suma_auxiliar, start = 1, stop = nchar(suma_auxiliar)-tamanio_max_secciones) resultado_seccionado[i] <- substr(suma_auxiliar, start = nchar(suma_auxiliar)-tamanio_max_secciones+1, stop = nchar(suma_auxiliar)) }else if(nchar(suma_auxiliar) < tamanio_max_secciones){ ceros_sumando <- str_c(rep("0",tamanio_max_secciones - nchar(suma_auxiliar)),collapse = "") suma_auxiliar_completa <- str_c(c(ceros_sumando,suma_auxiliar),collapse="") resultado_seccionado[i] <- suma_auxiliar_completa } else { resultado_seccionado[i] <- suma_auxiliar } }#for # Juntamos las secciones en un solo número-caracter ---- resultado_final <- str_c(resultado_seccionado,collapse = "") #Creamos las funciónes---- multiplica_numerotes <- function(numerote1, numerote2){ if(nchar(numerote1)<=18 & nchar(numerote2)<=18){ m_original <- numerote1 n_original <- numerote2 tamanio_max_secciones <- 6 # dividimos los números en secciones completando con ceros a la izquierda de ser necesario num_digitos<-18 ceros_m <- str_c(rep("0",num_digitos - nchar(m_original)),collapse = "") m_orig_rellenado <- str_c(c(ceros_m,m_original),collapse="") m_vector <- separa_en_secciones(cadena = m_orig_rellenado, tamanio_max_secciones = tamanio_max_secciones) ceros_n <- str_c(rep("0",num_digitos - nchar(n_original)),collapse = "") n_orig_rellenado <- str_c(c(ceros_n,n_original),collapse="") n_vector <- separa_en_secciones(cadena = n_orig_rellenado, tamanio_max_secciones = tamanio_max_secciones) # definimos el número de puntos a evaluar k <- 3 num_puntos_a_evaluar <- define_numero_de_puntos_a_evaluar(k_m = k, k_n = k) # definimos las matrices que utilizaremos para evaluar valores <- c(0,1,-1,2,-2) matriz_evaluada_p_q <- rellena_matriz_evaluada(valores_a_evaluar = valores, num_columnas = 3, num_puntos_a_evaluar = num_puntos_a_evaluar) matriz_evaluada_r <- rellena_matriz_evaluada(valores_a_evaluar = valores, num_columnas = 5, num_puntos_a_evaluar = num_puntos_a_evaluar) # encontramos el vector r para el cual buscaremos resolver vector_r <-(matriz_evaluada_p_q%*%as.numeric(m_vector))*(matriz_evaluada_p_q%*%as.numeric(n_vector)) # resolvemos el sistemirri inversa <- solve(a=matriz_evaluada_r)*24 sumandos <- solve(a=matriz_evaluada_r,b=vector_r) # dividimos en pedacillos y los acomodamos en la matriz correcta sumandos_char <- as.character(trunc(sumandos)) num_columnas_matriz_final <- length(sumandos_char)+1 matriz_suma_posicionada <- matrix(data=rep("0",(num_columnas_matriz_final*num_columnas_matriz_final)), ncol = num_columnas_matriz_final, nrow = num_columnas_matriz_final) sumando_a_acomodar <- vector(mode="character") #rellenamos la matriz-illa for(i in num_columnas_matriz_final:2){ sumando_a_acomodar <- separa_en_secciones(cadena = sumandos_char[i-1], tamanio_max_secciones = tamanio_max_secciones) for(j in length(sumando_a_acomodar):1){ matriz_suma_posicionada[i,i-length(sumando_a_acomodar)+j] <- sumando_a_acomodar[j] }#for anidado1 }#for #sumamos posicionadamente resultado_seccionado <- vector(mode="character") options(scipen=999) for(i in ncol(matriz_suma_posicionada):1){ suma_auxiliar <- as.character(sum(as.numeric(matriz_suma_posicionada[,i]))) if(nchar(suma_auxiliar)>tamanio_max_secciones & i !=1){ matriz_suma_posicionada[1,i-1] <- substr(suma_auxiliar, start = 1, stop = nchar(suma_auxiliar)-tamanio_max_secciones) resultado_seccionado[i] <- substr(suma_auxiliar, start = nchar(suma_auxiliar)-tamanio_max_secciones+1, stop = nchar(suma_auxiliar)) }else if(nchar(suma_auxiliar) < tamanio_max_secciones){ ceros_sumando <- str_c(rep("0",tamanio_max_secciones - nchar(suma_auxiliar)),collapse = "") suma_auxiliar_completa <- str_c(c(ceros_sumando,suma_auxiliar),collapse="") resultado_seccionado[i] <- suma_auxiliar_completa } else { resultado_seccionado[i] <- suma_auxiliar } }#for resultado_final <- str_c(resultado_seccionado,collapse = "") }# ifsote return(resultado_final) }#function multiplica_numerotes suma_numerotes <- function(numerote1, numerote2){ # los números pueden ser de hasta 48 dígitos num_digitos<-48 ceros_m <- str_c(rep("0",num_digitos - nchar(numerote1)),collapse = "") m_orig_rellenado <- str_c(c(ceros_m,numerote1),collapse="") m_vector <- separa_en_secciones(cadena = m_orig_rellenado, tamanio_max_secciones = tamanio_max_secciones) ceros_n <- str_c(rep("0",num_digitos - nchar(numerote2)),collapse = "") n_orig_rellenado <- str_c(c(ceros_n,numerote2),collapse="") n_vector <- separa_en_secciones(cadena = n_orig_rellenado, tamanio_max_secciones = tamanio_max_secciones) num_columnas_vector_final <- max(length(m_vector), length(n_vector))+1 # creamos un vector que recibirá todos los pedacitos vector_suma_seccionada <- vector(mode="character",length= num_columnas_vector_final) aux_suma_extra <-"0" for (seccion in num_columnas_vector_final:2){ aux_suma_seccionada <- 0 if(as.numeric(aux_suma_extra)!=0){ aux_suma_seccionada <- as.character(sum(as.numeric(m_vector[seccion-1]), as.numeric(n_vector[seccion-1]), as.numeric(aux_suma_extra))) aux_suma_extra <-0 } else{ aux_suma_seccionada <- as.character(sum(as.numeric(m_vector[seccion-1]), as.numeric(n_vector[seccion-1]))) }#if # casos para completar la suma if(nchar(aux_suma_seccionada)>tamanio_max_secciones){ aux_suma_extra <- substr(x = aux_suma_seccionada, start = 1, stop = (nchar(aux_suma_seccionada)-tamanio_max_secciones)) aux_suma_seccionada <- substr(x=aux_suma_seccionada, start= (nchar(aux_suma_seccionada)-tamanio_max_secciones+1), stop= nchar(aux_suma_seccionada)) } else if(nchar(aux_suma_seccionada)<tamanio_max_secciones){ ceros_sumando <- str_c(rep("0",tamanio_max_secciones - nchar(aux_suma_seccionada)),collapse = "") aux_suma_seccionada <- str_c(c(ceros_sumando,aux_suma_seccionada),collapse="") } vector_suma_seccionada[seccion] <- aux_suma_seccionada } suma_colapsada <- str_c(vector_suma_seccionada,collapse = "") return (suma_colapsada) }# function suma_numerotes ## matrices a multiplicar ---- matriz1 <- matrix(data = c("1234567890","1234567890","78978978","7897879","78979879","34245322"), ncol = 2, nrow=3, byrow = TRUE) matriz2 <- matrix(data = c("1234567890","1234567890","78978978","7897879","78979879","34245322"), ncol = 3, nrow=2, byrow = TRUE) res_mult_1 <- multiplica_numerotes(matriz1[1,1], matriz2[1,1]) res_mult_2 <- multiplica_numerotes(matriz1[1,2], matriz2[2,1]) matriz_final <- matrix(data=rep("0",9), ncol=ncol(matriz2), nrow=nrow(matriz1)) for(rowfinal in 1:nrow(matriz_final)){ for(colfinal in 1:ncol(matriz_final)){ vectorsin1 <- matriz1[rowfinal,] vectorsin2 <-matriz2[,colfinal] sumandos <- vector(mode="character", length=length(vectorsin2)) suma_total<-"0" for(i in 1:length(vectorsin2)){ sumandos[i]<- multiplica_numerotes(vectorsin1[i],vectorsin2[i]) suma_total <- suma_numerotes(suma_total,sumandos[i]) }#for i matriz_final[rowfinal,colfinal]<- suma_total }#for colfinal }#for rowfinal
# Copyright (c) Microsoft Corporation. All rights reserved. # Third Party Programs. This software enables you to obtain software applications from other sources. # Those applications are offered and distributed by third parties under their own license terms. # Microsoft is not developing, distributing or licensing those applications to you, but instead, # as a convenience, enables you to use this software to obtain those applications directly from # the application providers. # By using the software, you acknowledge and agree that you are obtaining the applications directly # from the third party providers and under separate license terms, and that it is your responsibility to locate, # understand and comply with those license terms. # Microsoft grants you no license rights for third-party software or applications that is obtained using this software. ##PBI_R_VISUAL: VIZGAL_CLUSTERING_WITH_OUTLIERS Graphical display of a clustering applied to point cloud # Computes and visualizes a clustering performed with DBSCAN clustering algorithm. # Allows user to control granularity of clusters or to find it automatically. # Provides several options for scaling the data and for visualization of clusters. # INPUT: # The input dataset should include at least two numerical non-constant columns # # # WARNINGS: Time consuming for large datasets # # CREATION DATE: 11/12/2016 # # LAST UPDATE: 11/22/2016 # # VERSION: 0.0.1 # # R VERSION TESTED: 3.2.2 # # AUTHOR: pbicvsupport@microsoft.com # # REFERENCES: https://cran.r-project.org/package=dbscan # https://en.wikibooks.org/wiki/Data_Mining_Algorithms_In_R/Clustering/Density-Based_Clustering ############ User Parameters ######### #DEBUG #save(list = ls(all.names = TRUE), file='C:/Users/boefraty/projects/PBI/R/tempData.Rda') #load(file='C:/Users/boefraty/projects/PBI/R/tempData.Rda') ###############Library Declarations############### libraryRequireInstall = function(packageName) { if(!require(packageName, character.only = TRUE)) warning( paste ("*** Failed to install '", packageName, "' ***", sep = "")) } libraryRequireInstall("scales") libraryRequireInstall("fpc") libraryRequireInstall("car") libraryRequireInstall("dbscan") ###############Internal parameters definitions################# ##PBI_PARAM: the random number generator (RNG) state for random number generation #Type: numeric, Default:42, Range:NA, PossibleValues:NA, Remarks: NA randSeed = 42 ##PBI_PARAM: transparency of points on plot, 0 is invisible, 1 is opaque #Type: numeric, Default:0.25, Range:[0, 1], PossibleValues:NA, Remarks: NA #pointOpacity = 0.5 ##PBI_PARAM: minimum required samples (rows in data table) #Type: positive integer, Default:10, Range:[5, 100], PossibleValues:NA, Remarks: NA minSamplesToRun = 12 ##PBI_PARAM: maximum samples to use inside autoNumClusters function #Type: positive integer, Default:1200, Range:[100, 10000], PossibleValues:NA, Remarks: NA maxSamples4autoGranularity = 1200 ##PBI_PARAM: insignificant principle component threshold # If PCA is applied all dimensions, that explain less than insigPC percentage of variance are removed #Type: positive numeric, Default:0.05, Range:[0, 1], PossibleValues:NA, Remarks: NA insigPC = 0.05 ##PBI_PARAM: type for outlier marker #Type: integer, Default:4, Range:[1:20], PossibleValues:NA, Remarks: NA outLierPch = 4 ##PBI_PARAM: size for legend text #Type: float, Default:1, Range:[0:5], PossibleValues:NA, Remarks: NA legendTextSize = 1 ##PBI_PARAM: size for warning text #Type: float, Default:0.8, Range:[0:2], PossibleValues:NA, Remarks: NA warningCex = 0.8 ###############Internal functions definitions################# # if not enough space --> do not show the legend validateIfToShowLegend = function(numClust, textSize) { ppp = par()$din horFlag = (2.5*textSize < ppp[1]) verFlag = (0.35*numClust*textSize < ppp[2]) return(horFlag && verFlag) } # if not enough space replace "long text" by "long te..." cutStr2Show = function(strText, strCex = 0.8, abbrTo = 100, isH = TRUE, maxChar = 0, partAvailable = 1) { # strText = text to modify # strCex = font size # abbrTo = very long string will be abbreviated to "abbrTo" characters # isH = "is horizontal" ? # maxChar = text smaller than maxChar is replaced by NULL # partAvailable = which portion of window is available for text, in [0,1] if(is.null(strText)) return (NULL) SCL = 0.094*strCex pardin = par()$din gStand = partAvailable*(isH*pardin[1]+(1-isH)*pardin[2]) /SCL # if very very long abbreviate if(nchar(strText)>abbrTo && nchar(strText)> 1) strText = abbreviate(strText, abbrTo) # if looooooong convert to lo... if(nchar(strText)>round(gStand) && nchar(strText)> 1) strText = paste(substring(strText,1,floor(gStand)),"...",sep="") # if shorter than maxChar remove if(gStand<=maxChar) strText = NULL return(strText) } #function finds average distance of np1'th to np2'th neighbour, for example: # if np1=np2=1, will find distance to the nearest neighbour # in np1=np2=3, will find distance to the third nearest neighbour # in np1=1, np2=3, will find average distance to the three nearest neighbours avgNearestDist <- function(data, np1 = 1, np2 = np1 ) { nn <- dbscan::kNN(data, k = np2) distances = nn$dist[, c(np1:np2)] if( np1<np2 ) { res <- sort(apply (distances, 1, mean) ) }else{ res = sort(distances) } return(res) } #sum of square errors for linear fit SSE = function(x, y) {sum( abs( lm( formula = y ~ x, data = data.frame(x = x, y = y) )$residuals )^2)} # find knee point which corresponds to best cut-point of two linear fits findKnee <- function( inX, inY ) { orderX = order( inX ) inX = inX[orderX];inY = inY[orderX] L = length(inX) resV = rep(Inf, L) first = 3 last = L-3 for (i in (first+2):(last-2)) { x = inX[first:(i-1)] y = inY[first:(i-1)] resid = SSE(x, y) x = inX[(i+1):last] y = inY[(i+1):last] resV[i]=resid+SSE(x, y) } mi = which.min(resV)-1 return( c(inX[mi], inY[mi]) ) } #verify if the column is numeric and non-constant correctColumn <- function(someColumn){ is.numeric(someColumn)&&length(unique(someColumn)) > 1 } #euclidean distance between two points euc.dist <- function(x1, x2) sqrt(sum((x1 - x2) ^ 2)) #plot convex hull plotCH = function(xcoord, ycoord, lcolor){ hpts <- chull(x = xcoord, y = ycoord) hpts <- c(hpts, hpts[1]) lines( xcoord[hpts], ycoord[hpts], col = lcolor, lty = 3 ) } # get datapoints closest to centers getDelegates <- function(orig_data, clean_data, cluster_centers) { nc <- nrow(cluster_centers) dc <- ncol(cluster_centers) nr <- nrow(clean_data) delegates <- NULL for(clus in seq(1, length.out = nc)) { B <- matrix(rep(cluster_centers[clus, ], times = nr), nrow = nr, ncol = dc, byrow = TRUE) D <- clean_data[, c(1:dc)]-B ed <- apply(D^2, 1, sum) delegates <- rbind(delegates, orig_data[which.min(ed), ]) } return(delegates) } # heuristic method for minPts parameter in dbscan # larger number of points for high dimensionality autoParamMinPtsFunc <- function(mydata, defaultMinPts = 5, extraPointPerRowCount = 250) { nc <- ncol( mydata ) nr <- nrow( mydata ) minPts <- max(defaultMinPts, (2*nc-1)) + floor( nr/extraPointPerRowCount ) return(minPts) } #autoGranularity autoEpsFunc <- function(mydata, maxPoints, reachMinPts, alpha = 0.6) { if(nrow(mydata) > maxPoints) mydata <- mydata[sample(nrow(mydata), maxPoints), ] avgNNmp <- avgNearestDist(mydata, 1, reachMinPts) # to reachMinPts nearest avgNN1 <- avgNearestDist(mydata, 1, 1) # to nearest avgNN = alpha*avgNN1+(1-alpha)*avgNNmp eps = findKnee(1:length(avgNNmp), avgNNmp)[2] #eps = findKnee(1:length(avgNN1), avgNN1)[2] return(eps) } #granularity2eps granularity2epsMinMax <- function(mydata, g, maxPoints, reachMinPts) { #subsample if needed if(ncol(mydata) > maxPoints) mydata <- mydata[sample(ncol(mydata), maxPoints), ] avgNN <- avgNearestDist(mydata, reachMinPts, reachMinPts) resEps = as.numeric(quantile(avgNN, g)) return(resEps) } #compute eps for dbscan (not in use) getEps <- function(mydata, frac = 0.04){euc.dist(sapply(mydata, max), sapply(mydata, min))*frac} #get centers from clusters centersFromClusters <- function(mydata, clusters) { NC <- max(clusters) centers <- NULL for (c in seq(1, length.out = NC)) { centers = rbind(centers, apply(mydata[clusters == c, ], 2, mean)) } return(centers) } myPallete = function(n=100,palleteType = "rainbow") { mp = rainbow(n) if(palleteType == "heat") mp = heat.colors(n) if(palleteType == "terrain") mp = terrain.colors(n) if(palleteType == "topo") mp = topo.colors(n) if(palleteType == "cm") mp = cm.colors(n+1)[-1] #remove white if(palleteType == "gray") mp = gray(0:n/ n) return(mp) } # variables to come from group:show addLabel2clusterDelegate = TRUE addLabel2points = TRUE addLegend = TRUE ################################################################################# ### The fuction to produce visual ################################################################################# #********* PBI Parameters Block *************** if(!exists("Values")) Values = NULL if(!exists("PointLabels")) PointLabels = NULL addLabel2points = TRUE #default if (exists("settings_labeling_params_show")) { addLabel2points = settings_labeling_params_show } addLabel2clusterDelegate = FALSE #default if (exists("settings_representative_params_show")) { addLabel2clusterDelegate = settings_representative_params_show } addLegend = TRUE #default if (exists("settings_legend_params_show")) { addLegend = settings_legend_params_show } if(exists("settings_prepocessing_params_show") && settings_prepocessing_params_show == FALSE) rm(list= ls(pattern = "settings_prepocessing_params_" )) if(exists("settings_clusterNum_params_show") && settings_clusterNum_params_show == FALSE) rm(list= ls(pattern = "settings_clusterNum_params_" )) if(exists("settings_viz_params_show") && settings_viz_params_show == FALSE) rm(list= ls(pattern = "settings_viz_params_" )) if(exists("settings_labeling_params_show") && settings_labeling_params_show == FALSE) rm(list= ls(pattern = "settings_labeling_params_" )) if(exists("settings_representative_params_show") && settings_representative_params_show == FALSE) rm(list= ls(pattern = "settings_representative_params_" )) if(exists("settings_legend_params_show") && settings_legend_params_show == FALSE) rm(list= ls(pattern = "settings_legend_params_" )) if(exists("settings_additional_params_show") && settings_additional_params_show == FALSE) rm(list= ls(pattern = "settings_additional_params_" )) ##PBI_PARAM: display_name: Scale data, tooltip:Used to standardize the range of features of data # Type: bool, default:FALSE, # Min: , Max: scaleData = FALSE #default if (exists("settings_prepocessing_params_scaleData")) { scaleData = settings_prepocessing_params_scaleData } ##PBI_PARAM: display_name: Apply PCA, tooltip:Recommended for data with more than two dimensions # Type: bool, default:FALSE, # Min: , Max: applyPCA = FALSE #default if (exists("settings_prepocessing_params_applyPCA")) { applyPCA = settings_prepocessing_params_applyPCA } ##PBI_PARAM: display_name: Granularity method, tooltip:Select preferable method to set granularity parameter(eps) # Type: enumeration, default:'scale', # Min: , Max: # enumeration options: auto ,scale ,manual , granularityParameterType = 'auto' #default if (exists("settings_clusterNum_params_granularityParameterType")) { granularityParameterType = settings_clusterNum_params_granularityParameterType } ##PBI_PARAM: display_name: Granularity, tooltip:User-defined granularity level, used only if autoGranularity = FALSE. Smaller values correspond to more clusters # Type: numeric, default:50, # Min: 1, Max:100 granularityLevel = 50 #default if (exists("settings_clusterNum_params_percentile")) { granularityLevel = settings_clusterNum_params_percentile granularityLevel = max( min (granularityLevel, 100), 1) } ##PBI_PARAM: display_name: Reachability distance, tooltip:How close points should be to each other to be considered a part of a cluster (eps) # Type: numeric, default:0, # Min: 0, Max:1e+08 eps = 0 #default if (exists("settings_clusterNum_params_eps")) { eps = settings_clusterNum_params_eps eps = max( min (eps, 1e+08), 0) } ##PBI_PARAM: display_name: Find minimum points automatically, tooltip: # Type: bool, default:TRUE, # Min: , Max: autoParamMinPts = TRUE #default if (exists("settings_clusterNum_params_autoParamMinPts")) { autoParamMinPts = settings_clusterNum_params_autoParamMinPts } ##PBI_PARAM: display_name: Minimum points per cluster, tooltip:User-defined minimum points parameter. Smaller values correspond to more clusters # Type: numeric, default:5, # Min: 1, Max:1000 minPtsParam = 5 #default if (exists("settings_clusterNum_params_minPtsParam")) { minPtsParam = settings_clusterNum_params_minPtsParam minPtsParam = max( min (minPtsParam, 1000), 1) } ##PBI_PARAM: display_name: Draw ellipse, tooltip: # Type: bool, default:FALSE, # Min: , Max: drawEllipse = FALSE #default if (exists("settings_viz_params_drawEllipse")) { drawEllipse = settings_viz_params_drawEllipse } ##PBI_PARAM: display_name: Draw convex hull, tooltip: # Type: bool, default:FALSE, # Min: , Max: drawConvexHull = FALSE #default if (exists("settings_viz_params_drawConvexHull")) { drawConvexHull = settings_viz_params_drawConvexHull } ##PBI_PARAM: display_name: Draw centroid, tooltip: # Type: bool, default:FALSE, # Min: , Max: drawCenters = FALSE #default if (exists("settings_viz_params_drawCenters")) { drawCenters = settings_viz_params_drawCenters } ##PBI_PARAM: display_name: Point opacity, tooltip: # Type: numeric, default:30, # Min: 0, Max:100 pointOpacity = 30 #default if (exists("settings_viz_params_percentile")) { pointOpacity = settings_viz_params_percentile pointOpacity = max( min (pointOpacity, 100), 0) } ##PBI_PARAM: display_name: Point size, tooltip: # Type: numeric, default:10, # Min: 1, Max:50 pointSize = 10 #default if (exists("settings_viz_params_weight")) { pointSize = settings_viz_params_weight pointSize = max( min (pointSize, 50), 1) } ##PBI_PARAM: display_name: Font size, tooltip: # Type: numeric, default:8, # Min: 8, Max:40 labelingFontSize = 8 #default if (exists("settings_labeling_params_textSize")) { labelingFontSize = settings_labeling_params_textSize labelingFontSize = max( min (labelingFontSize, 40), 8) } ##PBI_PARAM: display_name: Label opacity, tooltip: # Type: numeric, default:100, # Min: 0, Max:100 labelOpacity = 100 #default if (exists("settings_labeling_params_percentile")) { labelOpacity = settings_labeling_params_percentile labelOpacity = max( min (labelOpacity, 100), 0) } ##PBI_PARAM: display_name: Limit label length, tooltip:Abbreviate labels to a limited length # Type: numeric, default:5, # Min: 1, Max:100 maxLenPointLabel = 5 #default if (exists("settings_labeling_params_maxLenPointLabel")) { maxLenPointLabel = settings_labeling_params_maxLenPointLabel maxLenPointLabel = max( min (maxLenPointLabel, 100), 1) } ##PBI_PARAM: display_name: Percentage of points labeled, tooltip:Avoids cluttering text # Type: numeric, default:100, # Min: 0, Max:100 percPointsLabeled = 100 #default if (exists("settings_labeling_params_percentile1")) { percPointsLabeled = settings_labeling_params_percentile1 percPointsLabeled = max( min (percPointsLabeled, 100), 0) } ##PBI_PARAM: display_name: Font size, tooltip: # Type: numeric, default:8, # Min: 1, Max:40 representativeFontSize = 8 #default if (exists("settings_representative_params_textSize")) { representativeFontSize = settings_representative_params_textSize representativeFontSize = max( min (representativeFontSize, 40), 1) } ##PBI_PARAM: display_name: Limit label length, tooltip:Abbreviate labels to a limited length # Type: numeric, default:30, # Min: 1, Max:100 maxLenDelegate = 30 #default if (exists("settings_representative_params_maxLenDelegate")) { maxLenDelegate = settings_representative_params_maxLenDelegate maxLenDelegate = max( min (maxLenDelegate, 100), 1) } ##PBI_PARAM: display_name: Pallete type, tooltip:Color pallete type # Type: enumeration, default:'rainbow', # Min: , Max: # enumeration options: rainbow ,gray ,cm ,topo ,terrain ,heat , palleteType = 'rainbow' #default if (exists("settings_legend_params_palleteType")) { palleteType = settings_legend_params_palleteType } ##PBI_PARAM: display_name: Color of outliers, tooltip: # Type: fill, default:'black', # Min: , Max: outLierCol = 'black' #default if (exists("settings_legend_params_outLierCol")) { outLierCol = settings_legend_params_outLierCol } ##PBI_PARAM: display_name: Show warnings, tooltip: # Type: bool, default:TRUE, # Min: , Max: showWarnings = TRUE #default if (exists("settings_additional_params_showWarnings")) { showWarnings = settings_additional_params_showWarnings } { if(eps ==0) #not valid "eps" => "auto" mode eps = NULL #addLegend = TRUE delegateCex = representativeFontSize/10 skipLabel2points = max(100/as.numeric(percPointsLabeled) , 1) labelOpacity = labelOpacity/100 pointOpacity = pointOpacity/100 cexLabel2points = labelingFontSize/10 pointMarkerSize = pointSize/10 drawPoints = TRUE if(!is.null(Values)) dataset = Values if(!is.null(PointLabels)) PointLabels[,1] = as.character(PointLabels[,1]) if(!is.null(PointLabels) && !is.null(Values)) dataset = cbind(PointLabels, dataset) if(!is.null(PointLabels) && is.null(Values)) dataset = PointLabels if(addLabel2points && is.null(PointLabels)) addLabel2points = FALSE ###############Upfront input correctness validations (where possible)################# minPtsParam = round(minPtsParam) pbiWarning <- NULL dataset <- na.omit(dataset) # deletion of missing orig_dataset <- dataset #used later for delegates # verify correctness of dataset useColumns <- sapply(dataset, correctColumn) if(showWarnings && sum(useColumns[-1])<ncol(dataset)-1) pbiWarning <- "At least one of the columns was not numeric, or constant" #exclude defect columns dataset <- as.data.frame(dataset[, useColumns]) nc <- ncol(dataset) nr <- nrow(dataset) checkDimiensionality <- TRUE if(nc < 2 || nr < minSamplesToRun || is.null(Values)) { checkDimiensionality <- FALSE if(showWarnings) pbiWarning <- paste(pbiWarning, "\nNot enough input dimensions"); } if(!autoParamMinPts && minPtsParam >= nr) { checkDimiensionality <- FALSE if(showWarnings) pbiWarning <- paste(pbiWarning, "\nParameter minPts is out of limits"); } ##############Main Visualization script########### maxGLevel = 100 # for "scale" mode maxGLevel can be used to squeeze the range set.seed(randSeed) if(!checkDimiensionality) { plot.new() }else { if(scaleData) { dataset <- as.data.frame(scale(dataset)) names(dataset) = paste(names(dataset), "scaled", sep = ".") } if(applyPCA) { dataset.pca <- prcomp(dataset, center = TRUE, scale = F) pExplained <- dataset.pca$sdev^2/sum(dataset.pca$sdev^2) flags <- (pExplained > insigPC); flags[1:2] = TRUE #at least 2 dimensions dataset = as.data.frame(dataset.pca$x[, flags])#reduce dimensions with less than 5% variance } if(autoParamMinPts) minPtsParam = autoParamMinPtsFunc(dataset, extraPointPerRowCount = 175) #find eps if(granularityParameterType == "auto") eps = autoEpsFunc(dataset, maxSamples4autoGranularity, minPtsParam) if(granularityParameterType == "scale") eps <- granularity2epsMinMax(dataset, granularityLevel/maxGLevel, maxSamples4autoGranularity, minPtsParam) if(is.null(eps)) eps = autoEpsFunc(dataset, maxSamples4autoGranularity, minPtsParam) #DBSCAN call cl <- dbscan::dbscan(dataset, eps, minPts = minPtsParam, scale = FALSE, borderPoints = TRUE) numOfClusters = max(cl$cluster) cl$centers <- centersFromClusters(dataset, cl$cluster) drawColors <- c(outLierCol, myPallete(numOfClusters,palleteType = palleteType)) drawColorsLeg <- c(rainbow(numOfClusters), outLierCol) drawPch <- c(outLierPch, rep(19, numOfClusters)) #visualize 2 first coordinates if(drawPoints) colpoints = drawColors[cl$cluster+1] else colpoints = NULL pchPoints = drawPch[cl$cluster+1] #in case of legend extend xlim to the right by 20% xrange = range(dataset[, 1]) drange = xrange[2]-xrange[1] xlim = c(xrange[1]-0.01*drange, xrange[2]+0.01*drange+drange*0.20*addLegend) plot(dataset[, 1], dataset[, 2], col = alpha(colpoints, pointOpacity), pch = pchPoints, xlab = cutStr2Show(names(dataset)[1], strCex =1.1, isH = TRUE), ylab = cutStr2Show(names(dataset)[2], strCex =1.1, isH = FALSE), xlim = xlim, cex = pointMarkerSize) leg <- NULL if(!is.null(cl$centers)) leg <- paste("Cluster ", seq(1, length.out = numOfClusters)) #text pc <- c(rep(19, numOfClusters)) # markers colMarkers <- drawColors[-1] if(drawCenters && !is.null(cl$centers)) { points(cl$centers, pch = 7, col = drawColors[-1]) leg <- cbind(leg, paste("Cluster center " , seq(1, length.out = numOfClusters))) pc <- cbind(pc, rep(7, numOfClusters)) colMarkers <- rep(colMarkers, 2) } leg <- c(leg, "Outlers") pc = c(pc, outLierPch) colMarkers <- c(colMarkers, outLierCol) if(drawEllipse) { for(clus in seq(1, length.out = numOfClusters)) { iii <- (cl$cluster == clus) if(sum(iii) > 2) dataEllipse(dataset[iii, 1], dataset[iii, 2], add = T, plot.points = F, levels = 0.85, col = drawColorsLeg[clus], lwd = 1, fill = TRUE, fill.alpha = 0.075, center.pch = NULL) } } if(drawConvexHull) { for(clus in seq(1, length.out = numOfClusters)) { iii <- (cl$cluster == clus) if(sum(iii) > 2) plotCH(dataset[iii, 1], dataset[iii, 2], lcolor = drawColorsLeg[clus]) } } if(addLabel2clusterDelegate) { clean_data = dataset cluster_centers = (cl$centers) if(!is.null(cluster_centers)) { deleg <- getDelegates(orig_dataset, dataset, cl$centers) delegateText = abbreviate(apply(deleg, 1, toString),maxLenDelegate) delegateText = sapply(delegateText, cutStr2Show, strCex = delegateCex, partAvailable = 0.75) text(x = cl$centers[, 1], y = cl$centers[, 2], delegateText, col = "black", cex = delegateCex) } } if(addLabel2points) { iii=sample(1:nrow(dataset),max(1,floor(nrow(dataset)/skipLabel2points))) text(x = dataset[iii, 1], y = dataset[iii, 2], labels = abbreviate(orig_dataset[iii,1],maxLenPointLabel), col = alpha(colpoints[iii], labelOpacity), cex = cexLabel2points) } if(addLegend && validateIfToShowLegend(numClust = numOfClusters, textSize = legendTextSize )) legend("topright", legend = leg, pch = pc, col = alpha(colMarkers, 1), cex = legendTextSize) } if(showWarnings && !is.null(pbiWarning)) { pbiWarning = cutStr2Show(pbiWarning, strCex = warningCex) title(main = NULL, sub = pbiWarning, outer = FALSE, col.sub = "gray50", cex.sub = warningCex) } }
/old/PowerBI-visuals-dbscan/script.r
no_license
boefraty74/ProjectR2pbiviz
R
false
false
23,868
r
# Copyright (c) Microsoft Corporation. All rights reserved. # Third Party Programs. This software enables you to obtain software applications from other sources. # Those applications are offered and distributed by third parties under their own license terms. # Microsoft is not developing, distributing or licensing those applications to you, but instead, # as a convenience, enables you to use this software to obtain those applications directly from # the application providers. # By using the software, you acknowledge and agree that you are obtaining the applications directly # from the third party providers and under separate license terms, and that it is your responsibility to locate, # understand and comply with those license terms. # Microsoft grants you no license rights for third-party software or applications that is obtained using this software. ##PBI_R_VISUAL: VIZGAL_CLUSTERING_WITH_OUTLIERS Graphical display of a clustering applied to point cloud # Computes and visualizes a clustering performed with DBSCAN clustering algorithm. # Allows user to control granularity of clusters or to find it automatically. # Provides several options for scaling the data and for visualization of clusters. # INPUT: # The input dataset should include at least two numerical non-constant columns # # # WARNINGS: Time consuming for large datasets # # CREATION DATE: 11/12/2016 # # LAST UPDATE: 11/22/2016 # # VERSION: 0.0.1 # # R VERSION TESTED: 3.2.2 # # AUTHOR: pbicvsupport@microsoft.com # # REFERENCES: https://cran.r-project.org/package=dbscan # https://en.wikibooks.org/wiki/Data_Mining_Algorithms_In_R/Clustering/Density-Based_Clustering ############ User Parameters ######### #DEBUG #save(list = ls(all.names = TRUE), file='C:/Users/boefraty/projects/PBI/R/tempData.Rda') #load(file='C:/Users/boefraty/projects/PBI/R/tempData.Rda') ###############Library Declarations############### libraryRequireInstall = function(packageName) { if(!require(packageName, character.only = TRUE)) warning( paste ("*** Failed to install '", packageName, "' ***", sep = "")) } libraryRequireInstall("scales") libraryRequireInstall("fpc") libraryRequireInstall("car") libraryRequireInstall("dbscan") ###############Internal parameters definitions################# ##PBI_PARAM: the random number generator (RNG) state for random number generation #Type: numeric, Default:42, Range:NA, PossibleValues:NA, Remarks: NA randSeed = 42 ##PBI_PARAM: transparency of points on plot, 0 is invisible, 1 is opaque #Type: numeric, Default:0.25, Range:[0, 1], PossibleValues:NA, Remarks: NA #pointOpacity = 0.5 ##PBI_PARAM: minimum required samples (rows in data table) #Type: positive integer, Default:10, Range:[5, 100], PossibleValues:NA, Remarks: NA minSamplesToRun = 12 ##PBI_PARAM: maximum samples to use inside autoNumClusters function #Type: positive integer, Default:1200, Range:[100, 10000], PossibleValues:NA, Remarks: NA maxSamples4autoGranularity = 1200 ##PBI_PARAM: insignificant principle component threshold # If PCA is applied all dimensions, that explain less than insigPC percentage of variance are removed #Type: positive numeric, Default:0.05, Range:[0, 1], PossibleValues:NA, Remarks: NA insigPC = 0.05 ##PBI_PARAM: type for outlier marker #Type: integer, Default:4, Range:[1:20], PossibleValues:NA, Remarks: NA outLierPch = 4 ##PBI_PARAM: size for legend text #Type: float, Default:1, Range:[0:5], PossibleValues:NA, Remarks: NA legendTextSize = 1 ##PBI_PARAM: size for warning text #Type: float, Default:0.8, Range:[0:2], PossibleValues:NA, Remarks: NA warningCex = 0.8 ###############Internal functions definitions################# # if not enough space --> do not show the legend validateIfToShowLegend = function(numClust, textSize) { ppp = par()$din horFlag = (2.5*textSize < ppp[1]) verFlag = (0.35*numClust*textSize < ppp[2]) return(horFlag && verFlag) } # if not enough space replace "long text" by "long te..." cutStr2Show = function(strText, strCex = 0.8, abbrTo = 100, isH = TRUE, maxChar = 0, partAvailable = 1) { # strText = text to modify # strCex = font size # abbrTo = very long string will be abbreviated to "abbrTo" characters # isH = "is horizontal" ? # maxChar = text smaller than maxChar is replaced by NULL # partAvailable = which portion of window is available for text, in [0,1] if(is.null(strText)) return (NULL) SCL = 0.094*strCex pardin = par()$din gStand = partAvailable*(isH*pardin[1]+(1-isH)*pardin[2]) /SCL # if very very long abbreviate if(nchar(strText)>abbrTo && nchar(strText)> 1) strText = abbreviate(strText, abbrTo) # if looooooong convert to lo... if(nchar(strText)>round(gStand) && nchar(strText)> 1) strText = paste(substring(strText,1,floor(gStand)),"...",sep="") # if shorter than maxChar remove if(gStand<=maxChar) strText = NULL return(strText) } #function finds average distance of np1'th to np2'th neighbour, for example: # if np1=np2=1, will find distance to the nearest neighbour # in np1=np2=3, will find distance to the third nearest neighbour # in np1=1, np2=3, will find average distance to the three nearest neighbours avgNearestDist <- function(data, np1 = 1, np2 = np1 ) { nn <- dbscan::kNN(data, k = np2) distances = nn$dist[, c(np1:np2)] if( np1<np2 ) { res <- sort(apply (distances, 1, mean) ) }else{ res = sort(distances) } return(res) } #sum of square errors for linear fit SSE = function(x, y) {sum( abs( lm( formula = y ~ x, data = data.frame(x = x, y = y) )$residuals )^2)} # find knee point which corresponds to best cut-point of two linear fits findKnee <- function( inX, inY ) { orderX = order( inX ) inX = inX[orderX];inY = inY[orderX] L = length(inX) resV = rep(Inf, L) first = 3 last = L-3 for (i in (first+2):(last-2)) { x = inX[first:(i-1)] y = inY[first:(i-1)] resid = SSE(x, y) x = inX[(i+1):last] y = inY[(i+1):last] resV[i]=resid+SSE(x, y) } mi = which.min(resV)-1 return( c(inX[mi], inY[mi]) ) } #verify if the column is numeric and non-constant correctColumn <- function(someColumn){ is.numeric(someColumn)&&length(unique(someColumn)) > 1 } #euclidean distance between two points euc.dist <- function(x1, x2) sqrt(sum((x1 - x2) ^ 2)) #plot convex hull plotCH = function(xcoord, ycoord, lcolor){ hpts <- chull(x = xcoord, y = ycoord) hpts <- c(hpts, hpts[1]) lines( xcoord[hpts], ycoord[hpts], col = lcolor, lty = 3 ) } # get datapoints closest to centers getDelegates <- function(orig_data, clean_data, cluster_centers) { nc <- nrow(cluster_centers) dc <- ncol(cluster_centers) nr <- nrow(clean_data) delegates <- NULL for(clus in seq(1, length.out = nc)) { B <- matrix(rep(cluster_centers[clus, ], times = nr), nrow = nr, ncol = dc, byrow = TRUE) D <- clean_data[, c(1:dc)]-B ed <- apply(D^2, 1, sum) delegates <- rbind(delegates, orig_data[which.min(ed), ]) } return(delegates) } # heuristic method for minPts parameter in dbscan # larger number of points for high dimensionality autoParamMinPtsFunc <- function(mydata, defaultMinPts = 5, extraPointPerRowCount = 250) { nc <- ncol( mydata ) nr <- nrow( mydata ) minPts <- max(defaultMinPts, (2*nc-1)) + floor( nr/extraPointPerRowCount ) return(minPts) } #autoGranularity autoEpsFunc <- function(mydata, maxPoints, reachMinPts, alpha = 0.6) { if(nrow(mydata) > maxPoints) mydata <- mydata[sample(nrow(mydata), maxPoints), ] avgNNmp <- avgNearestDist(mydata, 1, reachMinPts) # to reachMinPts nearest avgNN1 <- avgNearestDist(mydata, 1, 1) # to nearest avgNN = alpha*avgNN1+(1-alpha)*avgNNmp eps = findKnee(1:length(avgNNmp), avgNNmp)[2] #eps = findKnee(1:length(avgNN1), avgNN1)[2] return(eps) } #granularity2eps granularity2epsMinMax <- function(mydata, g, maxPoints, reachMinPts) { #subsample if needed if(ncol(mydata) > maxPoints) mydata <- mydata[sample(ncol(mydata), maxPoints), ] avgNN <- avgNearestDist(mydata, reachMinPts, reachMinPts) resEps = as.numeric(quantile(avgNN, g)) return(resEps) } #compute eps for dbscan (not in use) getEps <- function(mydata, frac = 0.04){euc.dist(sapply(mydata, max), sapply(mydata, min))*frac} #get centers from clusters centersFromClusters <- function(mydata, clusters) { NC <- max(clusters) centers <- NULL for (c in seq(1, length.out = NC)) { centers = rbind(centers, apply(mydata[clusters == c, ], 2, mean)) } return(centers) } myPallete = function(n=100,palleteType = "rainbow") { mp = rainbow(n) if(palleteType == "heat") mp = heat.colors(n) if(palleteType == "terrain") mp = terrain.colors(n) if(palleteType == "topo") mp = topo.colors(n) if(palleteType == "cm") mp = cm.colors(n+1)[-1] #remove white if(palleteType == "gray") mp = gray(0:n/ n) return(mp) } # variables to come from group:show addLabel2clusterDelegate = TRUE addLabel2points = TRUE addLegend = TRUE ################################################################################# ### The fuction to produce visual ################################################################################# #********* PBI Parameters Block *************** if(!exists("Values")) Values = NULL if(!exists("PointLabels")) PointLabels = NULL addLabel2points = TRUE #default if (exists("settings_labeling_params_show")) { addLabel2points = settings_labeling_params_show } addLabel2clusterDelegate = FALSE #default if (exists("settings_representative_params_show")) { addLabel2clusterDelegate = settings_representative_params_show } addLegend = TRUE #default if (exists("settings_legend_params_show")) { addLegend = settings_legend_params_show } if(exists("settings_prepocessing_params_show") && settings_prepocessing_params_show == FALSE) rm(list= ls(pattern = "settings_prepocessing_params_" )) if(exists("settings_clusterNum_params_show") && settings_clusterNum_params_show == FALSE) rm(list= ls(pattern = "settings_clusterNum_params_" )) if(exists("settings_viz_params_show") && settings_viz_params_show == FALSE) rm(list= ls(pattern = "settings_viz_params_" )) if(exists("settings_labeling_params_show") && settings_labeling_params_show == FALSE) rm(list= ls(pattern = "settings_labeling_params_" )) if(exists("settings_representative_params_show") && settings_representative_params_show == FALSE) rm(list= ls(pattern = "settings_representative_params_" )) if(exists("settings_legend_params_show") && settings_legend_params_show == FALSE) rm(list= ls(pattern = "settings_legend_params_" )) if(exists("settings_additional_params_show") && settings_additional_params_show == FALSE) rm(list= ls(pattern = "settings_additional_params_" )) ##PBI_PARAM: display_name: Scale data, tooltip:Used to standardize the range of features of data # Type: bool, default:FALSE, # Min: , Max: scaleData = FALSE #default if (exists("settings_prepocessing_params_scaleData")) { scaleData = settings_prepocessing_params_scaleData } ##PBI_PARAM: display_name: Apply PCA, tooltip:Recommended for data with more than two dimensions # Type: bool, default:FALSE, # Min: , Max: applyPCA = FALSE #default if (exists("settings_prepocessing_params_applyPCA")) { applyPCA = settings_prepocessing_params_applyPCA } ##PBI_PARAM: display_name: Granularity method, tooltip:Select preferable method to set granularity parameter(eps) # Type: enumeration, default:'scale', # Min: , Max: # enumeration options: auto ,scale ,manual , granularityParameterType = 'auto' #default if (exists("settings_clusterNum_params_granularityParameterType")) { granularityParameterType = settings_clusterNum_params_granularityParameterType } ##PBI_PARAM: display_name: Granularity, tooltip:User-defined granularity level, used only if autoGranularity = FALSE. Smaller values correspond to more clusters # Type: numeric, default:50, # Min: 1, Max:100 granularityLevel = 50 #default if (exists("settings_clusterNum_params_percentile")) { granularityLevel = settings_clusterNum_params_percentile granularityLevel = max( min (granularityLevel, 100), 1) } ##PBI_PARAM: display_name: Reachability distance, tooltip:How close points should be to each other to be considered a part of a cluster (eps) # Type: numeric, default:0, # Min: 0, Max:1e+08 eps = 0 #default if (exists("settings_clusterNum_params_eps")) { eps = settings_clusterNum_params_eps eps = max( min (eps, 1e+08), 0) } ##PBI_PARAM: display_name: Find minimum points automatically, tooltip: # Type: bool, default:TRUE, # Min: , Max: autoParamMinPts = TRUE #default if (exists("settings_clusterNum_params_autoParamMinPts")) { autoParamMinPts = settings_clusterNum_params_autoParamMinPts } ##PBI_PARAM: display_name: Minimum points per cluster, tooltip:User-defined minimum points parameter. Smaller values correspond to more clusters # Type: numeric, default:5, # Min: 1, Max:1000 minPtsParam = 5 #default if (exists("settings_clusterNum_params_minPtsParam")) { minPtsParam = settings_clusterNum_params_minPtsParam minPtsParam = max( min (minPtsParam, 1000), 1) } ##PBI_PARAM: display_name: Draw ellipse, tooltip: # Type: bool, default:FALSE, # Min: , Max: drawEllipse = FALSE #default if (exists("settings_viz_params_drawEllipse")) { drawEllipse = settings_viz_params_drawEllipse } ##PBI_PARAM: display_name: Draw convex hull, tooltip: # Type: bool, default:FALSE, # Min: , Max: drawConvexHull = FALSE #default if (exists("settings_viz_params_drawConvexHull")) { drawConvexHull = settings_viz_params_drawConvexHull } ##PBI_PARAM: display_name: Draw centroid, tooltip: # Type: bool, default:FALSE, # Min: , Max: drawCenters = FALSE #default if (exists("settings_viz_params_drawCenters")) { drawCenters = settings_viz_params_drawCenters } ##PBI_PARAM: display_name: Point opacity, tooltip: # Type: numeric, default:30, # Min: 0, Max:100 pointOpacity = 30 #default if (exists("settings_viz_params_percentile")) { pointOpacity = settings_viz_params_percentile pointOpacity = max( min (pointOpacity, 100), 0) } ##PBI_PARAM: display_name: Point size, tooltip: # Type: numeric, default:10, # Min: 1, Max:50 pointSize = 10 #default if (exists("settings_viz_params_weight")) { pointSize = settings_viz_params_weight pointSize = max( min (pointSize, 50), 1) } ##PBI_PARAM: display_name: Font size, tooltip: # Type: numeric, default:8, # Min: 8, Max:40 labelingFontSize = 8 #default if (exists("settings_labeling_params_textSize")) { labelingFontSize = settings_labeling_params_textSize labelingFontSize = max( min (labelingFontSize, 40), 8) } ##PBI_PARAM: display_name: Label opacity, tooltip: # Type: numeric, default:100, # Min: 0, Max:100 labelOpacity = 100 #default if (exists("settings_labeling_params_percentile")) { labelOpacity = settings_labeling_params_percentile labelOpacity = max( min (labelOpacity, 100), 0) } ##PBI_PARAM: display_name: Limit label length, tooltip:Abbreviate labels to a limited length # Type: numeric, default:5, # Min: 1, Max:100 maxLenPointLabel = 5 #default if (exists("settings_labeling_params_maxLenPointLabel")) { maxLenPointLabel = settings_labeling_params_maxLenPointLabel maxLenPointLabel = max( min (maxLenPointLabel, 100), 1) } ##PBI_PARAM: display_name: Percentage of points labeled, tooltip:Avoids cluttering text # Type: numeric, default:100, # Min: 0, Max:100 percPointsLabeled = 100 #default if (exists("settings_labeling_params_percentile1")) { percPointsLabeled = settings_labeling_params_percentile1 percPointsLabeled = max( min (percPointsLabeled, 100), 0) } ##PBI_PARAM: display_name: Font size, tooltip: # Type: numeric, default:8, # Min: 1, Max:40 representativeFontSize = 8 #default if (exists("settings_representative_params_textSize")) { representativeFontSize = settings_representative_params_textSize representativeFontSize = max( min (representativeFontSize, 40), 1) } ##PBI_PARAM: display_name: Limit label length, tooltip:Abbreviate labels to a limited length # Type: numeric, default:30, # Min: 1, Max:100 maxLenDelegate = 30 #default if (exists("settings_representative_params_maxLenDelegate")) { maxLenDelegate = settings_representative_params_maxLenDelegate maxLenDelegate = max( min (maxLenDelegate, 100), 1) } ##PBI_PARAM: display_name: Pallete type, tooltip:Color pallete type # Type: enumeration, default:'rainbow', # Min: , Max: # enumeration options: rainbow ,gray ,cm ,topo ,terrain ,heat , palleteType = 'rainbow' #default if (exists("settings_legend_params_palleteType")) { palleteType = settings_legend_params_palleteType } ##PBI_PARAM: display_name: Color of outliers, tooltip: # Type: fill, default:'black', # Min: , Max: outLierCol = 'black' #default if (exists("settings_legend_params_outLierCol")) { outLierCol = settings_legend_params_outLierCol } ##PBI_PARAM: display_name: Show warnings, tooltip: # Type: bool, default:TRUE, # Min: , Max: showWarnings = TRUE #default if (exists("settings_additional_params_showWarnings")) { showWarnings = settings_additional_params_showWarnings } { if(eps ==0) #not valid "eps" => "auto" mode eps = NULL #addLegend = TRUE delegateCex = representativeFontSize/10 skipLabel2points = max(100/as.numeric(percPointsLabeled) , 1) labelOpacity = labelOpacity/100 pointOpacity = pointOpacity/100 cexLabel2points = labelingFontSize/10 pointMarkerSize = pointSize/10 drawPoints = TRUE if(!is.null(Values)) dataset = Values if(!is.null(PointLabels)) PointLabels[,1] = as.character(PointLabels[,1]) if(!is.null(PointLabels) && !is.null(Values)) dataset = cbind(PointLabels, dataset) if(!is.null(PointLabels) && is.null(Values)) dataset = PointLabels if(addLabel2points && is.null(PointLabels)) addLabel2points = FALSE ###############Upfront input correctness validations (where possible)################# minPtsParam = round(minPtsParam) pbiWarning <- NULL dataset <- na.omit(dataset) # deletion of missing orig_dataset <- dataset #used later for delegates # verify correctness of dataset useColumns <- sapply(dataset, correctColumn) if(showWarnings && sum(useColumns[-1])<ncol(dataset)-1) pbiWarning <- "At least one of the columns was not numeric, or constant" #exclude defect columns dataset <- as.data.frame(dataset[, useColumns]) nc <- ncol(dataset) nr <- nrow(dataset) checkDimiensionality <- TRUE if(nc < 2 || nr < minSamplesToRun || is.null(Values)) { checkDimiensionality <- FALSE if(showWarnings) pbiWarning <- paste(pbiWarning, "\nNot enough input dimensions"); } if(!autoParamMinPts && minPtsParam >= nr) { checkDimiensionality <- FALSE if(showWarnings) pbiWarning <- paste(pbiWarning, "\nParameter minPts is out of limits"); } ##############Main Visualization script########### maxGLevel = 100 # for "scale" mode maxGLevel can be used to squeeze the range set.seed(randSeed) if(!checkDimiensionality) { plot.new() }else { if(scaleData) { dataset <- as.data.frame(scale(dataset)) names(dataset) = paste(names(dataset), "scaled", sep = ".") } if(applyPCA) { dataset.pca <- prcomp(dataset, center = TRUE, scale = F) pExplained <- dataset.pca$sdev^2/sum(dataset.pca$sdev^2) flags <- (pExplained > insigPC); flags[1:2] = TRUE #at least 2 dimensions dataset = as.data.frame(dataset.pca$x[, flags])#reduce dimensions with less than 5% variance } if(autoParamMinPts) minPtsParam = autoParamMinPtsFunc(dataset, extraPointPerRowCount = 175) #find eps if(granularityParameterType == "auto") eps = autoEpsFunc(dataset, maxSamples4autoGranularity, minPtsParam) if(granularityParameterType == "scale") eps <- granularity2epsMinMax(dataset, granularityLevel/maxGLevel, maxSamples4autoGranularity, minPtsParam) if(is.null(eps)) eps = autoEpsFunc(dataset, maxSamples4autoGranularity, minPtsParam) #DBSCAN call cl <- dbscan::dbscan(dataset, eps, minPts = minPtsParam, scale = FALSE, borderPoints = TRUE) numOfClusters = max(cl$cluster) cl$centers <- centersFromClusters(dataset, cl$cluster) drawColors <- c(outLierCol, myPallete(numOfClusters,palleteType = palleteType)) drawColorsLeg <- c(rainbow(numOfClusters), outLierCol) drawPch <- c(outLierPch, rep(19, numOfClusters)) #visualize 2 first coordinates if(drawPoints) colpoints = drawColors[cl$cluster+1] else colpoints = NULL pchPoints = drawPch[cl$cluster+1] #in case of legend extend xlim to the right by 20% xrange = range(dataset[, 1]) drange = xrange[2]-xrange[1] xlim = c(xrange[1]-0.01*drange, xrange[2]+0.01*drange+drange*0.20*addLegend) plot(dataset[, 1], dataset[, 2], col = alpha(colpoints, pointOpacity), pch = pchPoints, xlab = cutStr2Show(names(dataset)[1], strCex =1.1, isH = TRUE), ylab = cutStr2Show(names(dataset)[2], strCex =1.1, isH = FALSE), xlim = xlim, cex = pointMarkerSize) leg <- NULL if(!is.null(cl$centers)) leg <- paste("Cluster ", seq(1, length.out = numOfClusters)) #text pc <- c(rep(19, numOfClusters)) # markers colMarkers <- drawColors[-1] if(drawCenters && !is.null(cl$centers)) { points(cl$centers, pch = 7, col = drawColors[-1]) leg <- cbind(leg, paste("Cluster center " , seq(1, length.out = numOfClusters))) pc <- cbind(pc, rep(7, numOfClusters)) colMarkers <- rep(colMarkers, 2) } leg <- c(leg, "Outlers") pc = c(pc, outLierPch) colMarkers <- c(colMarkers, outLierCol) if(drawEllipse) { for(clus in seq(1, length.out = numOfClusters)) { iii <- (cl$cluster == clus) if(sum(iii) > 2) dataEllipse(dataset[iii, 1], dataset[iii, 2], add = T, plot.points = F, levels = 0.85, col = drawColorsLeg[clus], lwd = 1, fill = TRUE, fill.alpha = 0.075, center.pch = NULL) } } if(drawConvexHull) { for(clus in seq(1, length.out = numOfClusters)) { iii <- (cl$cluster == clus) if(sum(iii) > 2) plotCH(dataset[iii, 1], dataset[iii, 2], lcolor = drawColorsLeg[clus]) } } if(addLabel2clusterDelegate) { clean_data = dataset cluster_centers = (cl$centers) if(!is.null(cluster_centers)) { deleg <- getDelegates(orig_dataset, dataset, cl$centers) delegateText = abbreviate(apply(deleg, 1, toString),maxLenDelegate) delegateText = sapply(delegateText, cutStr2Show, strCex = delegateCex, partAvailable = 0.75) text(x = cl$centers[, 1], y = cl$centers[, 2], delegateText, col = "black", cex = delegateCex) } } if(addLabel2points) { iii=sample(1:nrow(dataset),max(1,floor(nrow(dataset)/skipLabel2points))) text(x = dataset[iii, 1], y = dataset[iii, 2], labels = abbreviate(orig_dataset[iii,1],maxLenPointLabel), col = alpha(colpoints[iii], labelOpacity), cex = cexLabel2points) } if(addLegend && validateIfToShowLegend(numClust = numOfClusters, textSize = legendTextSize )) legend("topright", legend = leg, pch = pc, col = alpha(colMarkers, 1), cex = legendTextSize) } if(showWarnings && !is.null(pbiWarning)) { pbiWarning = cutStr2Show(pbiWarning, strCex = warningCex) title(main = NULL, sub = pbiWarning, outer = FALSE, col.sub = "gray50", cex.sub = warningCex) } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rotate3d.R \name{flip3D} \alias{flip3D} \title{Flip a 3D array along an axis.} \usage{ flip3D(volume, axis = 1L, how = "horizontally") } \arguments{ \item{volume}{a 3D image volume} \item{axis}{positive integer in range 1L..3L or an axis name, the axis to use.} \item{how}{character string, one of 'horizontally' / 'h' or 'vertically' / 'v'. How to flip the 2D slices. Note that flipping *horizontally* means that the image will be mirrored along the central *vertical* axis.} } \value{ a 3D image volume, flipped around the axis. The dimensions are identical to the dimensions of the input image. } \description{ Flip the slice of an 3D array horizontally or vertically along an axis. This leads to an output array with identical dimensions. } \seealso{ Other volume math: \code{\link{rotate3D}()} } \concept{volume math}
/man/flip3D.Rd
permissive
dfsp-spirit/freesurferformats
R
false
true
904
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rotate3d.R \name{flip3D} \alias{flip3D} \title{Flip a 3D array along an axis.} \usage{ flip3D(volume, axis = 1L, how = "horizontally") } \arguments{ \item{volume}{a 3D image volume} \item{axis}{positive integer in range 1L..3L or an axis name, the axis to use.} \item{how}{character string, one of 'horizontally' / 'h' or 'vertically' / 'v'. How to flip the 2D slices. Note that flipping *horizontally* means that the image will be mirrored along the central *vertical* axis.} } \value{ a 3D image volume, flipped around the axis. The dimensions are identical to the dimensions of the input image. } \description{ Flip the slice of an 3D array horizontally or vertically along an axis. This leads to an output array with identical dimensions. } \seealso{ Other volume math: \code{\link{rotate3D}()} } \concept{volume math}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods.R \name{outliers} \alias{outliers} \title{Extracting the standard error} \usage{ outliers(object, as.ts = FALSE) } \arguments{ \item{object}{a praislm or twoStepsBenchmark object.} \item{as.ts}{a boolean of length 1. If \code{TRUE}, the returned outliers are returned as a time series with (dim and colnames). If \code{FALSE}, the returned outliers is the named list that was submitted as a function argument.} } \value{ a named list or a time series, depending of the argument \code{"as.ts"}. } \description{ The function \code{outliers} returns the outliers from either a \link{praislm} or a \link{twoStepsBenchmark} object. } \keyword{internal}
/man/outliers.Rd
permissive
InseeFr/disaggR
R
false
true
735
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods.R \name{outliers} \alias{outliers} \title{Extracting the standard error} \usage{ outliers(object, as.ts = FALSE) } \arguments{ \item{object}{a praislm or twoStepsBenchmark object.} \item{as.ts}{a boolean of length 1. If \code{TRUE}, the returned outliers are returned as a time series with (dim and colnames). If \code{FALSE}, the returned outliers is the named list that was submitted as a function argument.} } \value{ a named list or a time series, depending of the argument \code{"as.ts"}. } \description{ The function \code{outliers} returns the outliers from either a \link{praislm} or a \link{twoStepsBenchmark} object. } \keyword{internal}
rm(list=ls()) library(dplyr) library(tidyr) setwd("~/RStats") df <- read.csv('data/Churn.csv', stringsAsFactors = F) names(df) contract <- df %>% select(Account.Length, VMail.Message, Day.Mins, Eve.Mins, Night.Mins, Intl.Mins, CustServ.Calls, Churn, Int.l.Plan, VMail.Plan, Area.Code, Phone) usage <- df%>% select(Area.Code, Phone, Day.Calls, Day.Charge,Eve.Calls, Eve.Charge, Night.Calls, Night.Charge, Intl.Calls, Intl.Charge, State) write.csv(usage, "Phone_Usage.csv", row.names = F) write.csv(contract, "Contracts.csv", row.names = F) df2 <- inner_join(usage, contract) #quick check table(df$Churn) #split the dataset into Train and Test split = as.integer(nrow(df)*0.8) train = df[1:split,] test = df[(split+1):nrow(df), ] ## here's an arguably better way to split set.seed(4444) train_rows = sample(1:nrow(df), 0.8*nrow(df)) str(train_rows) train = df[train_rows, ] test <- df[-train_rows,] table(train$Churn) table(test$Churn) # Random Forest prediction of Churn data names(df) library(randomForest) fit <- randomForest(Churn ~ CustServ.Calls + Day.Charge + Eve.Charge + Night.Charge + Intl.Charge, data=df) print(fit) # view results importance(fit) # importance of each predictor text(fit, pretty=0) #---------------------------------------- library(rpart) # grow tree tree <- rpart(Churn ~ CustServ.Calls + Day.Charge + Eve.Charge + Night.Charge + Intl.Charge, method="class", data=train) formula2 <- Churn ~ CustServ.Calls + Intl.Charge fit2 <- rpart(formula2, method="class", data=train) printcp(fit2) plotcp(fit) summary(fit2) # plot tree plot(tree, uniform=TRUE, main="Classification Tree for Churn") text(tree, use.n=TRUE, all=TRUE, cex=.8) # create attractive postscript plot of tree post(fit, file = "churn_tree.ps", title = "Classification Tree for Churn Data") #Try some predictions variablesToUse <- c("CustServ.Calls", "Day.Charge", "Eve.Charge", "Night.Charge", "Intl.Charge") treePredictions <- predict(tree, newdata=test, type='class') table(treePredictions, test$Churn) #predictions using formula 2 pred_fit2 <- predict(fit2, newdata=test, type='class') table(pred_fit2, test$Churn)
/usecases/churn_prediction.R
no_license
Ram-N/analysis-class
R
false
false
2,292
r
rm(list=ls()) library(dplyr) library(tidyr) setwd("~/RStats") df <- read.csv('data/Churn.csv', stringsAsFactors = F) names(df) contract <- df %>% select(Account.Length, VMail.Message, Day.Mins, Eve.Mins, Night.Mins, Intl.Mins, CustServ.Calls, Churn, Int.l.Plan, VMail.Plan, Area.Code, Phone) usage <- df%>% select(Area.Code, Phone, Day.Calls, Day.Charge,Eve.Calls, Eve.Charge, Night.Calls, Night.Charge, Intl.Calls, Intl.Charge, State) write.csv(usage, "Phone_Usage.csv", row.names = F) write.csv(contract, "Contracts.csv", row.names = F) df2 <- inner_join(usage, contract) #quick check table(df$Churn) #split the dataset into Train and Test split = as.integer(nrow(df)*0.8) train = df[1:split,] test = df[(split+1):nrow(df), ] ## here's an arguably better way to split set.seed(4444) train_rows = sample(1:nrow(df), 0.8*nrow(df)) str(train_rows) train = df[train_rows, ] test <- df[-train_rows,] table(train$Churn) table(test$Churn) # Random Forest prediction of Churn data names(df) library(randomForest) fit <- randomForest(Churn ~ CustServ.Calls + Day.Charge + Eve.Charge + Night.Charge + Intl.Charge, data=df) print(fit) # view results importance(fit) # importance of each predictor text(fit, pretty=0) #---------------------------------------- library(rpart) # grow tree tree <- rpart(Churn ~ CustServ.Calls + Day.Charge + Eve.Charge + Night.Charge + Intl.Charge, method="class", data=train) formula2 <- Churn ~ CustServ.Calls + Intl.Charge fit2 <- rpart(formula2, method="class", data=train) printcp(fit2) plotcp(fit) summary(fit2) # plot tree plot(tree, uniform=TRUE, main="Classification Tree for Churn") text(tree, use.n=TRUE, all=TRUE, cex=.8) # create attractive postscript plot of tree post(fit, file = "churn_tree.ps", title = "Classification Tree for Churn Data") #Try some predictions variablesToUse <- c("CustServ.Calls", "Day.Charge", "Eve.Charge", "Night.Charge", "Intl.Charge") treePredictions <- predict(tree, newdata=test, type='class') table(treePredictions, test$Churn) #predictions using formula 2 pred_fit2 <- predict(fit2, newdata=test, type='class') table(pred_fit2, test$Churn)
# Set R terminal widht to actual terminal size #options(width=Sys.getenv("COLUMNS")) # Make all repositories available for package download: setRepositories(ind = c(1,2,3,4,5,6,7,8,9)) # load libraries # define Platform and initial path ---------------------------- if ( grepl(x = R.Version()["os"], pattern = "linux" ) ) { pathPrefix <- "/data/SKDngs01/pg/" } else { pathPrefix <- "X:/pg/" } # Set your working directory where output files will be saved --------- workingDir <- paste(pathPrefix , "collaborators/junke/eigenTrait_DEG/Raw", sep = ""); # setwd(dir = workingDir) # read in file names # b1_tub_comps <- dir(pattern = "_T_*", all.files = F,full.names = T,include.dirs = F, recursive = F) # b2_tub_comps <- dir(pattern = "_T_*", all.files = F,full.names = T,include.dirs = F, recursive = F) # glom_comps <- dir(pattern = "*.txt", all.files = F,full.names = T,include.dirs = F, recursive = T) #combined_comps <- c(tub_comps,glom_comps) combined_comps <- dir(pattern = "deg_biopsy*", all.files = F,full.names = T,include.dirs = F, recursive = F) # collection of resulting outputs from analysis res <- list() colsToExtract <- c("adj.P.Val","rstat") # isFirst <- TRUE genesOrder <- character() message("Preprocessing comparisons...") for (i in combined_comps) { message("Reading file: ",i) if (grepl(pattern = "dge_Summary.txt", x = i, fixed = T)) { message("Skipping") next; } output <- read.table(file = i,header = T,sep = "\t",quote = "",as.is = T) rownames(output) <- output[,"geneName"] # parse file name i <- gsub(pattern = ".txt",replacement = "", x = i, fixed = T) i <- gsub(pattern = "./deg_",replacement = "", x = i, fixed = T) i <- gsub(pattern = "biopsy_",replacement = "B_", x = i, fixed = T) i <- gsub(pattern = "_X.",replacement = "_", x = i, fixed = T) message("Name parsed as: ",i) # if (isFirst) { # genesOrder <- setNames(object = as.character(output[,"geneName"]), nm = as.character(row.names(output))) # isFirst <- FALSE # } # res[[i]] <- output[names(genesOrder), colsToExtract] genesOrder <- unique(c(genesOrder,as.character(output[,"geneName"]))) res[[i]] <- output[, colsToExtract] } for (i in names(res)) { message("Resorting: ", i) res[[i]] <- res[[i]][genesOrder,] } FirstHeader <- rep(names(res), each = 2) SecondHeader <- rep(colsToExtract, length(res)) FinalHeader <- paste(FirstHeader,SecondHeader, sep = "_") resDF <- as.data.frame(matrix(unlist(res), nrow = length(genesOrder), ncol = length(res)*length(colsToExtract))) colnames(resDF) <- FinalHeader FinalDF <- cbind(Symbol = genesOrder, resDF) write.table(x = FinalDF, file = "DEGTableWithAllComps.txt", sep = "\t", quote = F, row.names = F, col.names = T) saveRDS(object = FinalDF, compress = T, file = "DEGTableWithAllComps.rds")
/compileDEGTableWithAllComps.R
no_license
Kris69w/BI_Summer
R
false
false
2,877
r
# Set R terminal widht to actual terminal size #options(width=Sys.getenv("COLUMNS")) # Make all repositories available for package download: setRepositories(ind = c(1,2,3,4,5,6,7,8,9)) # load libraries # define Platform and initial path ---------------------------- if ( grepl(x = R.Version()["os"], pattern = "linux" ) ) { pathPrefix <- "/data/SKDngs01/pg/" } else { pathPrefix <- "X:/pg/" } # Set your working directory where output files will be saved --------- workingDir <- paste(pathPrefix , "collaborators/junke/eigenTrait_DEG/Raw", sep = ""); # setwd(dir = workingDir) # read in file names # b1_tub_comps <- dir(pattern = "_T_*", all.files = F,full.names = T,include.dirs = F, recursive = F) # b2_tub_comps <- dir(pattern = "_T_*", all.files = F,full.names = T,include.dirs = F, recursive = F) # glom_comps <- dir(pattern = "*.txt", all.files = F,full.names = T,include.dirs = F, recursive = T) #combined_comps <- c(tub_comps,glom_comps) combined_comps <- dir(pattern = "deg_biopsy*", all.files = F,full.names = T,include.dirs = F, recursive = F) # collection of resulting outputs from analysis res <- list() colsToExtract <- c("adj.P.Val","rstat") # isFirst <- TRUE genesOrder <- character() message("Preprocessing comparisons...") for (i in combined_comps) { message("Reading file: ",i) if (grepl(pattern = "dge_Summary.txt", x = i, fixed = T)) { message("Skipping") next; } output <- read.table(file = i,header = T,sep = "\t",quote = "",as.is = T) rownames(output) <- output[,"geneName"] # parse file name i <- gsub(pattern = ".txt",replacement = "", x = i, fixed = T) i <- gsub(pattern = "./deg_",replacement = "", x = i, fixed = T) i <- gsub(pattern = "biopsy_",replacement = "B_", x = i, fixed = T) i <- gsub(pattern = "_X.",replacement = "_", x = i, fixed = T) message("Name parsed as: ",i) # if (isFirst) { # genesOrder <- setNames(object = as.character(output[,"geneName"]), nm = as.character(row.names(output))) # isFirst <- FALSE # } # res[[i]] <- output[names(genesOrder), colsToExtract] genesOrder <- unique(c(genesOrder,as.character(output[,"geneName"]))) res[[i]] <- output[, colsToExtract] } for (i in names(res)) { message("Resorting: ", i) res[[i]] <- res[[i]][genesOrder,] } FirstHeader <- rep(names(res), each = 2) SecondHeader <- rep(colsToExtract, length(res)) FinalHeader <- paste(FirstHeader,SecondHeader, sep = "_") resDF <- as.data.frame(matrix(unlist(res), nrow = length(genesOrder), ncol = length(res)*length(colsToExtract))) colnames(resDF) <- FinalHeader FinalDF <- cbind(Symbol = genesOrder, resDF) write.table(x = FinalDF, file = "DEGTableWithAllComps.txt", sep = "\t", quote = F, row.names = F, col.names = T) saveRDS(object = FinalDF, compress = T, file = "DEGTableWithAllComps.rds")
library(tidyverse) library(lubridate) source('./data_preprocessing/processing_utils.R') species_names = read_csv('raw_data/hjandrews/species_names.csv') species_status_info = read_csv('raw_data/hjandrews/species_status_info.csv') observations = read_csv('raw_data/hjandrews/TV07501_v1_without_encoding.csv') %>% rename(species_code=SPECIES) %>% left_join(species_names, by='species_code') %>% select(year = YEAR, plot = PLOT, individual_id = TAG, date = SAMPLEDATE, species, vegatative = VEG_CODE, reproductive = RPRO_CODE) %>% gather(obs_type, obs_code, vegatative, reproductive) #Makes sites numerica to work in the python models site_info = read_csv('non_npn_site_info.csv') %>% filter(dataset=='hjandrews') %>% select(Site_ID, plot=note) observations = observations %>% left_join(site_info, by='plot') %>% select(-plot) #Convert observations of leavs/flowers to status codes. observations = observations %>% left_join(species_status_info, by=c('species','obs_type')) %>% mutate(status = 1*(obs_code == target_obs_code)) %>% select(-obs_type, -obs_code, -target_obs_code) #Add in doy and process observations = observations %>% mutate(doy = yday(date)) %>% select(-date) %>% process_phenology_observations() %>% #group_sites_together() %>% apply_minimum_observation_threshold(min_num_obs = 30) write_csv(observations, './cleaned_data/hjandrews_observations.csv') species_counts = observations %>% group_by(species, Phenophase_ID) %>% tally() #Record the species and phenophase type to use in NPN data filter species = observations %>% select(species, Phenophase_ID) %>% distinct() %>% mutate(dataset='hjandrews') append_species_file(species)
/data_preprocessing/compile_hjandrews_data.R
no_license
sdtaylor/phenology_dataset_study
R
false
false
1,701
r
library(tidyverse) library(lubridate) source('./data_preprocessing/processing_utils.R') species_names = read_csv('raw_data/hjandrews/species_names.csv') species_status_info = read_csv('raw_data/hjandrews/species_status_info.csv') observations = read_csv('raw_data/hjandrews/TV07501_v1_without_encoding.csv') %>% rename(species_code=SPECIES) %>% left_join(species_names, by='species_code') %>% select(year = YEAR, plot = PLOT, individual_id = TAG, date = SAMPLEDATE, species, vegatative = VEG_CODE, reproductive = RPRO_CODE) %>% gather(obs_type, obs_code, vegatative, reproductive) #Makes sites numerica to work in the python models site_info = read_csv('non_npn_site_info.csv') %>% filter(dataset=='hjandrews') %>% select(Site_ID, plot=note) observations = observations %>% left_join(site_info, by='plot') %>% select(-plot) #Convert observations of leavs/flowers to status codes. observations = observations %>% left_join(species_status_info, by=c('species','obs_type')) %>% mutate(status = 1*(obs_code == target_obs_code)) %>% select(-obs_type, -obs_code, -target_obs_code) #Add in doy and process observations = observations %>% mutate(doy = yday(date)) %>% select(-date) %>% process_phenology_observations() %>% #group_sites_together() %>% apply_minimum_observation_threshold(min_num_obs = 30) write_csv(observations, './cleaned_data/hjandrews_observations.csv') species_counts = observations %>% group_by(species, Phenophase_ID) %>% tally() #Record the species and phenophase type to use in NPN data filter species = observations %>% select(species, Phenophase_ID) %>% distinct() %>% mutate(dataset='hjandrews') append_species_file(species)
require(ggplot2) # Load data b <- read.csv("clients.csv", stringsAsFactors = FALSE) summary(b) # Filter by latin american countries b <- b[b$country %in% c("ar", "bo", "br", "cl", "cr", "do", "ec", "sv", "gt", "hn", "mx", "pa", "py", "pe", "sr", "uy", "ve", "gy", "gf", "ni", "cu"),] summary(b) # Plot per country ggplot(b, aes(x = as.Date(date, "%Y-%m-%d"), y = clients, colour = country)) + # geom_point() + geom_line() + ggtitle("Latin American Tor clients by Country") + scale_x_date(name = "Date") + # scale_y_log10() + facet_grid(node~.,scales = "free_y") # Aggregated data from all countries d <- aggregate(list(clients = b$clients), by = list(date = b$date, node = b$node), FUN = sum) summary(d) # Plot aggregated data from all countries ggplot(d, aes(x = as.Date(date, "%Y-%m-%d"), y = clients, colour = node)) + geom_line() + ggtitle("Latin American Tor clients") + scale_x_date(name = "Date") + # facet_grid(node~.,scales = "free_y") + expand_limits(y = 0)
/latin-america.r
no_license
arthuredelstein/metrics-playground
R
false
false
983
r
require(ggplot2) # Load data b <- read.csv("clients.csv", stringsAsFactors = FALSE) summary(b) # Filter by latin american countries b <- b[b$country %in% c("ar", "bo", "br", "cl", "cr", "do", "ec", "sv", "gt", "hn", "mx", "pa", "py", "pe", "sr", "uy", "ve", "gy", "gf", "ni", "cu"),] summary(b) # Plot per country ggplot(b, aes(x = as.Date(date, "%Y-%m-%d"), y = clients, colour = country)) + # geom_point() + geom_line() + ggtitle("Latin American Tor clients by Country") + scale_x_date(name = "Date") + # scale_y_log10() + facet_grid(node~.,scales = "free_y") # Aggregated data from all countries d <- aggregate(list(clients = b$clients), by = list(date = b$date, node = b$node), FUN = sum) summary(d) # Plot aggregated data from all countries ggplot(d, aes(x = as.Date(date, "%Y-%m-%d"), y = clients, colour = node)) + geom_line() + ggtitle("Latin American Tor clients") + scale_x_date(name = "Date") + # facet_grid(node~.,scales = "free_y") + expand_limits(y = 0)
## These functions can cache matrix inverses in order to reduce the time needed to compute ## the inverses of previously used matrices. This is especially useful when the inverses of ## these matrices are computed repeatedly. ## This function creates a matrix that allows the user to cache its inverse through a list; ## this list contains four different functions. makeCacheMatrix <- function(x = matrix()) { n <- NULL set <- function(y) { x <<- y n <<- NULL } get <- function() x setinverse <- function(solve) n <<- solve ## for storing inverse for specific matrix getinverse <- function() n ## for retrieving cached inverse for specific matrix list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## This function checks if the inverse is calculated. ## If so, this function retrieves the inverse from the cache; ## otherwise, it calculates and stores the inverse. cacheSolve <- function(x, ...) { n <- x$getinverse() if(!is.null(n)) { message("getting cached data") return(n) } data <- x$get() ## skipped if inverse is previously calculated n <- solve(data, ...) ## solves inverse of specific matrix x$setinverse(n) ## stores inverse to cache n }
/cachematrix.R
no_license
rjl1719/Programming-Assignment-2
R
false
false
1,394
r
## These functions can cache matrix inverses in order to reduce the time needed to compute ## the inverses of previously used matrices. This is especially useful when the inverses of ## these matrices are computed repeatedly. ## This function creates a matrix that allows the user to cache its inverse through a list; ## this list contains four different functions. makeCacheMatrix <- function(x = matrix()) { n <- NULL set <- function(y) { x <<- y n <<- NULL } get <- function() x setinverse <- function(solve) n <<- solve ## for storing inverse for specific matrix getinverse <- function() n ## for retrieving cached inverse for specific matrix list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## This function checks if the inverse is calculated. ## If so, this function retrieves the inverse from the cache; ## otherwise, it calculates and stores the inverse. cacheSolve <- function(x, ...) { n <- x$getinverse() if(!is.null(n)) { message("getting cached data") return(n) } data <- x$get() ## skipped if inverse is previously calculated n <- solve(data, ...) ## solves inverse of specific matrix x$setinverse(n) ## stores inverse to cache n }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/read_gt3x_utils.R \name{check_packets} \alias{check_packets} \title{Check for packets with failed checksum} \usage{ check_packets(packets) } \arguments{ \item{packets}{object containing data packets or subsequently parsed data} } \description{ Check for packets with failed checksum } \keyword{internal}
/man/check_packets.Rd
permissive
muschellij2/AGread
R
false
true
382
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/read_gt3x_utils.R \name{check_packets} \alias{check_packets} \title{Check for packets with failed checksum} \usage{ check_packets(packets) } \arguments{ \item{packets}{object containing data packets or subsequently parsed data} } \description{ Check for packets with failed checksum } \keyword{internal}
# read data dt <- fread("https://raw.githubusercontent.com/SpehleHSGM/Datasets/main/datatable.csv",sep = ";",dec = ",",check.names=T) # format dt[,Wdh:=as.factor(Wdh)][,CO2_Stufe:=as.factor(CO2_Stufe)][,N_Form:=as.factor(N_Form)] dt[,Knollenvolumen:=as.numeric(Knollenvolumen)][,Frischmasse.Knolle:=as.numeric(Frischmasse.Knolle)] # define outlier function: Whisker by John W. Tukey outlier.iqr <- function(dp){ qt <- quantile(dp,probs=c(0.25,0.75),na.rm = TRUE) iqr <- IQR(dp,na.rm = TRUE) out <- ifelse(dp>(qt[1]-1.5*iqr) & dp<(qt[2]+1.5*iqr),FALSE,TRUE) return(out) } # define outlier function: median absolute deviation (MAD) outlier.mad <- function(dp){ med <- median(dp) mad <- mad(dp, constant=1) out <- ifelse((dp > med-5.2*mad) & (dp < med + 5.2*mad),FALSE,TRUE) return(out) } # add column Flag_outlier.iqr dt[,Flag_outlier.iqr:=outlier.iqr(eval(traitName)),by=interaction(CO2_Stufe,N_Form)] # add column Flag_outlier.mad dt[,Flag_outlier.mad:=outlier.mad(eval(traitName)),by=interaction(CO2_Stufe,N_Form)] # add column Flag_outlier.bonf (mean shift outlier) dt[,Flag_outlier.bonf:=outlierTest(lm(eval(traitName)~CO2_Stufe*N_Form),cutoff=Inf, n.max=Inf,order=FALSE)$bonf.p < 0.05] # data.table aggregate of trait means by Wdh*CO2_Stufe*N_Form dt_means <- dt[ ,.(Knollenvolumen=mean(Knollenvolumen), Frischmasse.Knolle=mean(Frischmasse.Knolle), count=.N, mainPlots=factor(Wdh:CO2_Stufe)), by=list(Wdh,CO2_Stufe,N_Form)] # add column with boolean "Flag_outlier" (TRUE/FALSE) dt_means[,Flag_outlier.iqr:=outlier.iqr(eval(traitName)),by=interaction(CO2_Stufe,N_Form)]
/dtRead_format.R
no_license
SpehleHSGM/Funktions
R
false
false
1,666
r
# read data dt <- fread("https://raw.githubusercontent.com/SpehleHSGM/Datasets/main/datatable.csv",sep = ";",dec = ",",check.names=T) # format dt[,Wdh:=as.factor(Wdh)][,CO2_Stufe:=as.factor(CO2_Stufe)][,N_Form:=as.factor(N_Form)] dt[,Knollenvolumen:=as.numeric(Knollenvolumen)][,Frischmasse.Knolle:=as.numeric(Frischmasse.Knolle)] # define outlier function: Whisker by John W. Tukey outlier.iqr <- function(dp){ qt <- quantile(dp,probs=c(0.25,0.75),na.rm = TRUE) iqr <- IQR(dp,na.rm = TRUE) out <- ifelse(dp>(qt[1]-1.5*iqr) & dp<(qt[2]+1.5*iqr),FALSE,TRUE) return(out) } # define outlier function: median absolute deviation (MAD) outlier.mad <- function(dp){ med <- median(dp) mad <- mad(dp, constant=1) out <- ifelse((dp > med-5.2*mad) & (dp < med + 5.2*mad),FALSE,TRUE) return(out) } # add column Flag_outlier.iqr dt[,Flag_outlier.iqr:=outlier.iqr(eval(traitName)),by=interaction(CO2_Stufe,N_Form)] # add column Flag_outlier.mad dt[,Flag_outlier.mad:=outlier.mad(eval(traitName)),by=interaction(CO2_Stufe,N_Form)] # add column Flag_outlier.bonf (mean shift outlier) dt[,Flag_outlier.bonf:=outlierTest(lm(eval(traitName)~CO2_Stufe*N_Form),cutoff=Inf, n.max=Inf,order=FALSE)$bonf.p < 0.05] # data.table aggregate of trait means by Wdh*CO2_Stufe*N_Form dt_means <- dt[ ,.(Knollenvolumen=mean(Knollenvolumen), Frischmasse.Knolle=mean(Frischmasse.Knolle), count=.N, mainPlots=factor(Wdh:CO2_Stufe)), by=list(Wdh,CO2_Stufe,N_Form)] # add column with boolean "Flag_outlier" (TRUE/FALSE) dt_means[,Flag_outlier.iqr:=outlier.iqr(eval(traitName)),by=interaction(CO2_Stufe,N_Form)]
# We first want to read our data so that R registers it as a dataset! read.csv("https://michaelfrancenelson.github.io/eco_602_634_2020/data/hab.sta.csv") # We can then input this read dataset into a vector so we can use it for processing! dat_habitat <- read.csv("https://michaelfrancenelson.github.io/eco_602_634_2020/data/hab.sta.csv") read.csv("https://michaelfrancenelson.github.io/eco_602_634_2020/data/bird.sta.csv") dat_birds <- read.csv("https://michaelfrancenelson.github.io/eco_602_634_2020/data/bird.sta.csv") # You can also use the 'here' package to download and read data files into R from your local drive # here() works from your working directory, and you need to input subsequent coordinates within that working directory for it to identify it # Your working directory is by default based off of the folder which holds your R-script file! # getwd() - tells you what your current working directory is # You can also manually set your working directory with setwd() - this is unsafe because your wd can always change getwd() require(here) here("AoED Data", "AoED Birds", "bird.sta.csv") here("AoED Data", "AoED Birds", "hab.sta.csv") read.csv(here("AoED Data", "AoED Birds", "bird.sta.csv")) read.csv(here("AoED Data", "AoED Birds", "hab.sta.csv")) # file.exists() tells you if you're looking in the right spot for your file file.exists(here("AoED Data", "AoED Birds", "bird.sta.csv")) file.exists(here("AoED Data", "AoED Birds", "hab.sta.csv")) # If the file truly exists, it will output : TRUE! # Library() and require() allow you to load extra packages that you;ve installed # Library() will re-load already loaded packages, while require() will check if it's already loaded and will decide whether or not it needs to activate it (does not reactivate) # Using this at the beginning of your script is helpful! library(name_of_package) require(name_of_package) # Require() is much more streamlined because some packages take a long time to load! # ------------------------------------------------------------------------------------------------------------------- # # In-Class R-Script! # pairs(dat_habitat[, c("slope", "aspect")]) # pairs() creates multiple scatterplots for multiple variables - and most importantly for # each combination of each variable! In this case we plotted just slope and aspect, but # it is possible to add more variables which will all be plotted against each other. # Next, for using the his() script, we first need to find the maximum data value. # This is to allow the break argument to function properly. max(dat_birds[,"AMRO"]) # Output: [1] 4 # This means that there is a maximum value of 4 # In the context of the dat_birds dataset, this means that the most amount of # American Robins (AMRO) observed at a specific site is 4! # This tells us our minimum break range. # The break argument requires that the range be at least the # total number of observations. # Now to use the hist() function to plot a histogram in R! hist(dat_birds[,"AMRO"],breaks = 0:5 - .5, xlab = "Number of Birds Counted", ylab = "Frequency", main = "Histogram of American Robin") # In the dataset there is a max of [4] American Robins observations at a single site, # which would initially make you want to have a break range of 0:4. # Though this works, it doesn’t work well for discrete data where a break = 0:4 # will give you a histogram with x-values depicted as ranges from 0-1, 1-2, 2-3 etc.. # So in this case, we can fix this by adding 1 to the break range so you are able to more # accurately describe the discrete nature of the data! By adding the 5th break range, # you isolate each value as its own discrete (non-continuous) value! # ------------------------------------------------------------------------------------------------------------------- # # Individual Assignment Portion # ?par() # par() allows us to view plots side by side # First number is number of rows to arrange # Second number is number of columns to arrange dat_all = merge(dat_birds, dat_habitat) ?hist() par(mfrow = c(1,3)) hist(dat_all$elev, xlab = "Elevation (m)", main = "Frequency of Elevation (m) Observed", xlim = c(0, 1000)) hist(dat_all$slope, main = "Frequency of Slope (%) Observed", xlab = "Slope (%)", xlim = c(0, 120), ylim = c(0, 175)) hist(dat_habitat$aspect, xlab = "Aspect (degrees)", main = "Frequency of Aspect (degrees) Observed", ylim = c(0, 175), xlim = c(0, 400)) # dev.off() will clear the current plot being plotted! dev.off() # ---------------------------------------------------------------------------------- # # Calculates the value of x for a linear function, given the coordinates # of a known point (x1, y1) and the slope of the line. line_point_slope = function(x, x1, y1, slope) { get_y_intercept = function(x1, y1, slope) return(-(x1 * slope) + y1) linear = function(x, yint, slope) return(yint + x * slope) return(linear(x, get_y_intercept(x1, y1, slope), slope)) } # x1 and y1 are the coordinates which anchors your slope! It can be anywhere on the graph. curve(line_point_slope(x, x1 = , y1 = , slope = ), add = TRUE) # ---------------------------------------------------------------------------------- # par(mfrow = c(1,3)) # You can combine this w/ the mtext() and line() to label! plot(dat_all$elev, y = dat_all$ba.tot, xlab = "Elevation (m)", ylab = "Basal Area (m^2 per ha)", main = "Basal Area versus Elevation") curve(line_point_slope(x, x1 = 250, y1 = 30, slope = -0.01), add = TRUE) plot(x = dat_all$slope, y = dat_all$ba.tot, xlab = "Slope (%)", ylab = "Basal Area (m^2 per ha)", main = "Basal Area versus Slope") curve(line_point_slope(x, x1 = 10, y1 = 20, slope = -0.06), add = TRUE) # Slope is the steepness or the degree of incline of a surface plot(dat_all$aspect, y = dat_all$ba.tot, xlab = "Aspect (degrees)", ylab = "Basal Area (m^2 per ha)", main = "Basal Area versus Aspect") curve(line_point_slope(x, x1 = 0, y1 = 20, slope = 0), add = TRUE) # Aspect is the orientation of slope, measured clockwise in degrees from 0 to 360 # Where 0 is north-facing, 90 is east-facing, 180 is south-facing, and 270 is west-facing # ------------------------------------------------------------------------------------------------------------------- # # Lab 3 # # Create pair plots using psych to plot slope, elevation and aspect! # Use the pair plot function from psych to create a pair plot # of the three terrain variables and basal area from the lecture questions. # psych package helps make pretty pair plots ?pairs.panels require(psych) env_var = data.frame(dat_all$slope, dat_all$elev, dat_all$aspect, dat_all$ba.tot) env_var pairs.panels(env_var) # ---------------------------------------------------------------------------------- # # We want to merge dat_habitat and dat_birds # How do we know if there are the same number of rows in both data frames? # How can we be sure that we associate the correct row of dat+habitat/birds dat_all = merge(dat_birds, dat_habitat) plot(ba.tot ~ elev, data = dat_all) # How to use R to to calculate the total number of waxwings CEWA_sum = sum(dat_birds[, "CEWA"]) CEWA_sum # Converting to Presence/Absence using as.numeric ********* CEWA_boolean = dat_birds[, "CEWA"] >= 1 CEWA_boolean # You could also have inputted the CEWA_boolean vector script directly into the as.numeric() function CEWA_present_absent = as.numeric(CEWA_boolean) CEWA_present_absent # This code creates a binary plot like in the McGarigal reading! # You can transform these well with logistic models plot(x = dat_all$elev, y = CEWA_present_absent) # ---------------------------------------------------------------------------------- # # Function to calculate the logistic parameter a given the slope and midpoint get_logistic_param_a = function(slope, midpoint) { b = slope / 4 return (-midpoint * (slope / 4)) } # Function to calculate the logistic parameter b given the slope get_logistic_param_b = function(slope) { return (slope / 4) } # Calculate the value of the logistic function at x, given the parameters a and b. logistic = function(x, a, b) { val = exp(a + b * x) return(val / (1 + val)) } # Calculate the value of the logistic function at x, given a slopoe and midpoint. logistic_midpoint_slope = function(x, midpoint, slope) { b = get_logistic_param_b(slope) a = get_logistic_param_a(slope, midpoint) return(logistic(x, a, b)) } plot(x = dat_all$elev, y = CEWA_present_absent) curve(logistic_midpoint_slope(x, midpoint = 400, slope = -0.05), add = TRUE) # ---------------------------------------------------------------------------------- # # Question 1 & 2: Choose Two Bird Species and create plots of presence/absence (y) and basal area (x) # I chose the American Goldfinch (AMGO) and Black-cap Chickadee (BCCH) AMGO_boolean = dat_birds[, "AMGO"] >= 1 AMGO_present_absent = as.numeric(AMGO_boolean) plot(x = dat_habitat$ba.tot, y = AMGO_present_absent, main = "Total Basal Area versus American Goldfinch Observations", xlab = "Total Basal Area (m^2 per ha)", ylab = "Presence/Absence of American Goldfinch") curve(logistic_midpoint_slope(x, midpoint = 12, slope = -0.8), add = TRUE) BCCH_boolean = dat_birds[, "BCCH"] >= 1 BCCH_present_absent = as.numeric(BCCH_boolean) plot(x = dat_habitat$ba.tot, y = BCCH_present_absent, main = "Total Basal Area versus Black Cap Chickadee Observations", xlab = "Total Basal Area (m^2 per ha)", ylab = "Presence/Absence of Black-cap Chickadee") curve(logistic_midpoint_slope(x, midpoint = 25, slope = -1), add = TRUE) # Question 3: Sum of Observed Grey Jays GRJA_sum = sum(dat_all[, "GRJA"]) GRJA_sum # Question 4: Total number of sampling sites in which Gray Jay were observed GRJA_sites = sum(dat_all[, "GRJA"] > 0) GRJA_sites
/individual_assignments/AoED Labs/AoED Lab 3/AoED - Lab 3.R
no_license
JamesMacCabeTsalah/ECo_602_2020
R
false
false
10,523
r
# We first want to read our data so that R registers it as a dataset! read.csv("https://michaelfrancenelson.github.io/eco_602_634_2020/data/hab.sta.csv") # We can then input this read dataset into a vector so we can use it for processing! dat_habitat <- read.csv("https://michaelfrancenelson.github.io/eco_602_634_2020/data/hab.sta.csv") read.csv("https://michaelfrancenelson.github.io/eco_602_634_2020/data/bird.sta.csv") dat_birds <- read.csv("https://michaelfrancenelson.github.io/eco_602_634_2020/data/bird.sta.csv") # You can also use the 'here' package to download and read data files into R from your local drive # here() works from your working directory, and you need to input subsequent coordinates within that working directory for it to identify it # Your working directory is by default based off of the folder which holds your R-script file! # getwd() - tells you what your current working directory is # You can also manually set your working directory with setwd() - this is unsafe because your wd can always change getwd() require(here) here("AoED Data", "AoED Birds", "bird.sta.csv") here("AoED Data", "AoED Birds", "hab.sta.csv") read.csv(here("AoED Data", "AoED Birds", "bird.sta.csv")) read.csv(here("AoED Data", "AoED Birds", "hab.sta.csv")) # file.exists() tells you if you're looking in the right spot for your file file.exists(here("AoED Data", "AoED Birds", "bird.sta.csv")) file.exists(here("AoED Data", "AoED Birds", "hab.sta.csv")) # If the file truly exists, it will output : TRUE! # Library() and require() allow you to load extra packages that you;ve installed # Library() will re-load already loaded packages, while require() will check if it's already loaded and will decide whether or not it needs to activate it (does not reactivate) # Using this at the beginning of your script is helpful! library(name_of_package) require(name_of_package) # Require() is much more streamlined because some packages take a long time to load! # ------------------------------------------------------------------------------------------------------------------- # # In-Class R-Script! # pairs(dat_habitat[, c("slope", "aspect")]) # pairs() creates multiple scatterplots for multiple variables - and most importantly for # each combination of each variable! In this case we plotted just slope and aspect, but # it is possible to add more variables which will all be plotted against each other. # Next, for using the his() script, we first need to find the maximum data value. # This is to allow the break argument to function properly. max(dat_birds[,"AMRO"]) # Output: [1] 4 # This means that there is a maximum value of 4 # In the context of the dat_birds dataset, this means that the most amount of # American Robins (AMRO) observed at a specific site is 4! # This tells us our minimum break range. # The break argument requires that the range be at least the # total number of observations. # Now to use the hist() function to plot a histogram in R! hist(dat_birds[,"AMRO"],breaks = 0:5 - .5, xlab = "Number of Birds Counted", ylab = "Frequency", main = "Histogram of American Robin") # In the dataset there is a max of [4] American Robins observations at a single site, # which would initially make you want to have a break range of 0:4. # Though this works, it doesn’t work well for discrete data where a break = 0:4 # will give you a histogram with x-values depicted as ranges from 0-1, 1-2, 2-3 etc.. # So in this case, we can fix this by adding 1 to the break range so you are able to more # accurately describe the discrete nature of the data! By adding the 5th break range, # you isolate each value as its own discrete (non-continuous) value! # ------------------------------------------------------------------------------------------------------------------- # # Individual Assignment Portion # ?par() # par() allows us to view plots side by side # First number is number of rows to arrange # Second number is number of columns to arrange dat_all = merge(dat_birds, dat_habitat) ?hist() par(mfrow = c(1,3)) hist(dat_all$elev, xlab = "Elevation (m)", main = "Frequency of Elevation (m) Observed", xlim = c(0, 1000)) hist(dat_all$slope, main = "Frequency of Slope (%) Observed", xlab = "Slope (%)", xlim = c(0, 120), ylim = c(0, 175)) hist(dat_habitat$aspect, xlab = "Aspect (degrees)", main = "Frequency of Aspect (degrees) Observed", ylim = c(0, 175), xlim = c(0, 400)) # dev.off() will clear the current plot being plotted! dev.off() # ---------------------------------------------------------------------------------- # # Calculates the value of x for a linear function, given the coordinates # of a known point (x1, y1) and the slope of the line. line_point_slope = function(x, x1, y1, slope) { get_y_intercept = function(x1, y1, slope) return(-(x1 * slope) + y1) linear = function(x, yint, slope) return(yint + x * slope) return(linear(x, get_y_intercept(x1, y1, slope), slope)) } # x1 and y1 are the coordinates which anchors your slope! It can be anywhere on the graph. curve(line_point_slope(x, x1 = , y1 = , slope = ), add = TRUE) # ---------------------------------------------------------------------------------- # par(mfrow = c(1,3)) # You can combine this w/ the mtext() and line() to label! plot(dat_all$elev, y = dat_all$ba.tot, xlab = "Elevation (m)", ylab = "Basal Area (m^2 per ha)", main = "Basal Area versus Elevation") curve(line_point_slope(x, x1 = 250, y1 = 30, slope = -0.01), add = TRUE) plot(x = dat_all$slope, y = dat_all$ba.tot, xlab = "Slope (%)", ylab = "Basal Area (m^2 per ha)", main = "Basal Area versus Slope") curve(line_point_slope(x, x1 = 10, y1 = 20, slope = -0.06), add = TRUE) # Slope is the steepness or the degree of incline of a surface plot(dat_all$aspect, y = dat_all$ba.tot, xlab = "Aspect (degrees)", ylab = "Basal Area (m^2 per ha)", main = "Basal Area versus Aspect") curve(line_point_slope(x, x1 = 0, y1 = 20, slope = 0), add = TRUE) # Aspect is the orientation of slope, measured clockwise in degrees from 0 to 360 # Where 0 is north-facing, 90 is east-facing, 180 is south-facing, and 270 is west-facing # ------------------------------------------------------------------------------------------------------------------- # # Lab 3 # # Create pair plots using psych to plot slope, elevation and aspect! # Use the pair plot function from psych to create a pair plot # of the three terrain variables and basal area from the lecture questions. # psych package helps make pretty pair plots ?pairs.panels require(psych) env_var = data.frame(dat_all$slope, dat_all$elev, dat_all$aspect, dat_all$ba.tot) env_var pairs.panels(env_var) # ---------------------------------------------------------------------------------- # # We want to merge dat_habitat and dat_birds # How do we know if there are the same number of rows in both data frames? # How can we be sure that we associate the correct row of dat+habitat/birds dat_all = merge(dat_birds, dat_habitat) plot(ba.tot ~ elev, data = dat_all) # How to use R to to calculate the total number of waxwings CEWA_sum = sum(dat_birds[, "CEWA"]) CEWA_sum # Converting to Presence/Absence using as.numeric ********* CEWA_boolean = dat_birds[, "CEWA"] >= 1 CEWA_boolean # You could also have inputted the CEWA_boolean vector script directly into the as.numeric() function CEWA_present_absent = as.numeric(CEWA_boolean) CEWA_present_absent # This code creates a binary plot like in the McGarigal reading! # You can transform these well with logistic models plot(x = dat_all$elev, y = CEWA_present_absent) # ---------------------------------------------------------------------------------- # # Function to calculate the logistic parameter a given the slope and midpoint get_logistic_param_a = function(slope, midpoint) { b = slope / 4 return (-midpoint * (slope / 4)) } # Function to calculate the logistic parameter b given the slope get_logistic_param_b = function(slope) { return (slope / 4) } # Calculate the value of the logistic function at x, given the parameters a and b. logistic = function(x, a, b) { val = exp(a + b * x) return(val / (1 + val)) } # Calculate the value of the logistic function at x, given a slopoe and midpoint. logistic_midpoint_slope = function(x, midpoint, slope) { b = get_logistic_param_b(slope) a = get_logistic_param_a(slope, midpoint) return(logistic(x, a, b)) } plot(x = dat_all$elev, y = CEWA_present_absent) curve(logistic_midpoint_slope(x, midpoint = 400, slope = -0.05), add = TRUE) # ---------------------------------------------------------------------------------- # # Question 1 & 2: Choose Two Bird Species and create plots of presence/absence (y) and basal area (x) # I chose the American Goldfinch (AMGO) and Black-cap Chickadee (BCCH) AMGO_boolean = dat_birds[, "AMGO"] >= 1 AMGO_present_absent = as.numeric(AMGO_boolean) plot(x = dat_habitat$ba.tot, y = AMGO_present_absent, main = "Total Basal Area versus American Goldfinch Observations", xlab = "Total Basal Area (m^2 per ha)", ylab = "Presence/Absence of American Goldfinch") curve(logistic_midpoint_slope(x, midpoint = 12, slope = -0.8), add = TRUE) BCCH_boolean = dat_birds[, "BCCH"] >= 1 BCCH_present_absent = as.numeric(BCCH_boolean) plot(x = dat_habitat$ba.tot, y = BCCH_present_absent, main = "Total Basal Area versus Black Cap Chickadee Observations", xlab = "Total Basal Area (m^2 per ha)", ylab = "Presence/Absence of Black-cap Chickadee") curve(logistic_midpoint_slope(x, midpoint = 25, slope = -1), add = TRUE) # Question 3: Sum of Observed Grey Jays GRJA_sum = sum(dat_all[, "GRJA"]) GRJA_sum # Question 4: Total number of sampling sites in which Gray Jay were observed GRJA_sites = sum(dat_all[, "GRJA"] > 0) GRJA_sites
library(XML) library(jsonlite) # This code snippet downloads a table of Sabah holidays from # the web and creates inserts for the backend db years <- c(2016, 2017) calFun <- function(year){ # load the calendar days url <- sprintf('http://www.officeholidays.com/countries/malaysia/regional.php?list_year=%i&list_region=sabah', year) # extract the first table in the page cal <- readHTMLTable(url, stringsAsFactors=FALSE)[[1]] # drop unneeded columns cal <- cal[,c('Date', 'Holiday')] # format dates cal$Date <- gsub('\n.*', paste('', year), cal$Date) cal$Date <- strptime(cal$Date, '%B %d %Y') return(cal) } # get the calendar blocks for each year and combine calendars <- lapply(years, calFun) calendars <- do.call('rbind', calendars) # create SQL inserts for public_holidays table inserts <- sprintf("insert into public_holidays (date,title) values ('%s','%s');\n", calendars$Date, calendars$Holiday) # tidy out some non-essential ones inserts <- inserts[! grepl('Mother', inserts)] cat(inserts, file='public_holidays.sql')
/static/public_holidays/create_public_holidays.R
permissive
ImperialCollegeLondon/safe_web
R
false
false
1,046
r
library(XML) library(jsonlite) # This code snippet downloads a table of Sabah holidays from # the web and creates inserts for the backend db years <- c(2016, 2017) calFun <- function(year){ # load the calendar days url <- sprintf('http://www.officeholidays.com/countries/malaysia/regional.php?list_year=%i&list_region=sabah', year) # extract the first table in the page cal <- readHTMLTable(url, stringsAsFactors=FALSE)[[1]] # drop unneeded columns cal <- cal[,c('Date', 'Holiday')] # format dates cal$Date <- gsub('\n.*', paste('', year), cal$Date) cal$Date <- strptime(cal$Date, '%B %d %Y') return(cal) } # get the calendar blocks for each year and combine calendars <- lapply(years, calFun) calendars <- do.call('rbind', calendars) # create SQL inserts for public_holidays table inserts <- sprintf("insert into public_holidays (date,title) values ('%s','%s');\n", calendars$Date, calendars$Holiday) # tidy out some non-essential ones inserts <- inserts[! grepl('Mother', inserts)] cat(inserts, file='public_holidays.sql')
train_test_split <- function(df){ smp_size <- floor(0.75* nrow(df)) set.seed(123) train_ind <- sample(seq_len(nrow(df)), size = smp_size) train <- df[train_ind, ] test <- df[-train_ind, ] return (list(train,test)) }
/Aluminium data/train_test_split.R
no_license
onicolini/libs
R
false
false
236
r
train_test_split <- function(df){ smp_size <- floor(0.75* nrow(df)) set.seed(123) train_ind <- sample(seq_len(nrow(df)), size = smp_size) train <- df[train_ind, ] test <- df[-train_ind, ] return (list(train,test)) }
# This script is a little bit different from the other createTables because # we need to have a matrix with all samples to perform the downsampling. # In the other script fpkm and tpm are first calculated for every sample # and later the different data.frames are merged. Here first we have to merge # all the counts and then perform the downsampling and fpkm/tpm. log <- file(snakemake@log[[1]], open = "wt") sink(log) sink(log, type = "message") library(dplyr) library(tibble) source("workflow/scripts/custom_functions.R") # Seed for downsampling set.seed(snakemake@params[["seed"]]) #------------------------------------------------------------------------------------------ # Preapre count table #------------------------------------------------------------------------------------------ sample_names <- unlist(snakemake@input) %>% basename %>% gsub(pattern = ".featureCounts", replacement = "") # Merge all samples keeping geneid and length columns to calculate fpkm/tpm counts <- snakemake@input %>% purrr::map(read.delim, header = TRUE, skip = 1) %>% # Ignore first line of featureCounts output purrr::map(select, 1,6,7) %>% # Select geneid, length and counts purrr::map(setNames, c("Geneid", "Length", "Counts")) %>% plyr::join_all(type='inner', by = c("Geneid", "Length")) %>% setNames(c("Geneid", "Length", sample_names)) #------------------------------------------------------------------------------------------ # Remove excluded samples in case they are defined in config file prior to downsample #------------------------------------------------------------------------------------------ if(!is.null(snakemake@params[["exclude"]])) { counts <- counts %>% select(-one_of(snakemake@params[["exclude"]])) } #------------------------------------------------------------------------------------------ # Downsample counts and calculate fpkm / tpm #------------------------------------------------------------------------------------------ # Downsample count matrix to the sample with less reads counts_downsampled <- cbind( select(counts, Geneid, Length), Down_Sample_Matrix( select(counts, -Geneid, -Length) ) ) fpkm_downsampled <- counts_downsampled %>% mutate_at( vars(sample_names), list(~ do_fpkm(., Length)) ) %>% select(-Length) tpm_downsampled <- counts_downsampled %>% mutate_at( vars(sample_names), list(~ do_tpm(., Length)) ) %>% select(-Length) counts_downsampled <- select(counts_downsampled, - Length) #------------------------------------------------------------------------------------------ # Write output #------------------------------------------------------------------------------------------ write.table(counts_downsampled, snakemake@output[["raw_counts"]], sep = "\t", quote = F, row.names = FALSE) write.table(fpkm_downsampled, snakemake@output[["fpkm"]], sep = "\t", quote = F, row.names = FALSE) write.table(tpm_downsampled, snakemake@output[["tpm"]], sep = "\t", quote = F, row.names = FALSE)
/workflow/scripts/createTables_count_rpkm_tpm_downsampled.R
no_license
dfernandezperez/RNAseq-Snakemake
R
false
false
2,977
r
# This script is a little bit different from the other createTables because # we need to have a matrix with all samples to perform the downsampling. # In the other script fpkm and tpm are first calculated for every sample # and later the different data.frames are merged. Here first we have to merge # all the counts and then perform the downsampling and fpkm/tpm. log <- file(snakemake@log[[1]], open = "wt") sink(log) sink(log, type = "message") library(dplyr) library(tibble) source("workflow/scripts/custom_functions.R") # Seed for downsampling set.seed(snakemake@params[["seed"]]) #------------------------------------------------------------------------------------------ # Preapre count table #------------------------------------------------------------------------------------------ sample_names <- unlist(snakemake@input) %>% basename %>% gsub(pattern = ".featureCounts", replacement = "") # Merge all samples keeping geneid and length columns to calculate fpkm/tpm counts <- snakemake@input %>% purrr::map(read.delim, header = TRUE, skip = 1) %>% # Ignore first line of featureCounts output purrr::map(select, 1,6,7) %>% # Select geneid, length and counts purrr::map(setNames, c("Geneid", "Length", "Counts")) %>% plyr::join_all(type='inner', by = c("Geneid", "Length")) %>% setNames(c("Geneid", "Length", sample_names)) #------------------------------------------------------------------------------------------ # Remove excluded samples in case they are defined in config file prior to downsample #------------------------------------------------------------------------------------------ if(!is.null(snakemake@params[["exclude"]])) { counts <- counts %>% select(-one_of(snakemake@params[["exclude"]])) } #------------------------------------------------------------------------------------------ # Downsample counts and calculate fpkm / tpm #------------------------------------------------------------------------------------------ # Downsample count matrix to the sample with less reads counts_downsampled <- cbind( select(counts, Geneid, Length), Down_Sample_Matrix( select(counts, -Geneid, -Length) ) ) fpkm_downsampled <- counts_downsampled %>% mutate_at( vars(sample_names), list(~ do_fpkm(., Length)) ) %>% select(-Length) tpm_downsampled <- counts_downsampled %>% mutate_at( vars(sample_names), list(~ do_tpm(., Length)) ) %>% select(-Length) counts_downsampled <- select(counts_downsampled, - Length) #------------------------------------------------------------------------------------------ # Write output #------------------------------------------------------------------------------------------ write.table(counts_downsampled, snakemake@output[["raw_counts"]], sep = "\t", quote = F, row.names = FALSE) write.table(fpkm_downsampled, snakemake@output[["fpkm"]], sep = "\t", quote = F, row.names = FALSE) write.table(tpm_downsampled, snakemake@output[["tpm"]], sep = "\t", quote = F, row.names = FALSE)
utils::globalVariables(c("Pseudotime", "value", "ids", "prin_graph_dim_1", "prin_graph_dim_2", "State", "value", "feature_label", "expectation", "colInd", "rowInd", "value", "source_prin_graph_dim_1", "source_prin_graph_dim_2")) monocle_theme_opts <- function() { theme(strip.background = element_rect(colour = 'white', fill = 'white')) + theme(panel.border = element_blank()) + theme(axis.line.x = element_line(size=0.25, color="black")) + theme(axis.line.y = element_line(size=0.25, color="black")) + theme(panel.grid.minor.x = element_blank(), panel.grid.minor.y = element_blank()) + theme(panel.grid.major.x = element_blank(), panel.grid.major.y = element_blank()) + theme(panel.background = element_rect(fill='white')) + theme(legend.key=element_blank()) } #' Plots the minimum spanning tree on cells. #' #' @param cds CellDataSet for the experiment #' @param x the column of reducedDimS(cds) to plot on the horizontal axis #' @param y the column of reducedDimS(cds) to plot on the vertical axis #' @param color_by the cell attribute (e.g. the column of pData(cds)) to map to each cell's color #' @param show_tree whether to show the links between cells connected in the minimum spanning tree #' @param show_backbone whether to show the diameter path of the MST used to order the cells #' @param backbone_color the color used to render the backbone. #' @param markers a gene name or gene id to use for setting the size of each cell in the plot #' @param use_color_gradient Whether or not to use color gradient instead of cell size to show marker expression level #' @param markers_linear a boolean used to indicate whether you want to scale the markers logarithimically or linearly #' @param show_cell_names draw the name of each cell in the plot #' @param show_state_number show state number #' @param cell_size The size of the point for each cell #' @param cell_link_size The size of the line segments connecting cells (when used with ICA) or the principal graph (when used with DDRTree) #' @param cell_name_size the size of cell name labels #' @param state_number_size the size of the state number #' @param show_branch_points Whether to show icons for each branch point (only available when reduceDimension was called with DDRTree) #' @param theta How many degrees you want to rotate the trajectory #' @param ... Additional arguments passed into scale_color_viridis function #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom reshape2 melt #' @importFrom igraph get.edgelist #' @importFrom viridis scale_color_viridis #' @export #' @examples #' \dontrun{ #' lung <- load_lung() #' plot_cell_trajectory(lung) #' plot_cell_trajectory(lung, color_by="Pseudotime", show_backbone=FALSE) #' plot_cell_trajectory(lung, markers="MYH3") #' } plot_cell_trajectory <- function(cds, x=1, y=2, color_by="State", show_tree=TRUE, show_backbone=TRUE, backbone_color="black", markers=NULL, use_color_gradient = FALSE, markers_linear = FALSE, show_cell_names=FALSE, show_state_number = FALSE, cell_size=1.5, cell_link_size=0.75, cell_name_size=2, state_number_size = 2.9, show_branch_points=TRUE, theta = 0, ...){ gene_short_name <- NA sample_name <- NA sample_state <- pData(cds)$State data_dim_1 <- NA data_dim_2 <- NA #TODO: need to validate cds as ready for this plot (need mst, pseudotime, etc) lib_info_with_pseudo <- pData(cds) if (is.null(cds@dim_reduce_type)){ stop("Error: dimensionality not yet reduced. Please call reduceDimension() before calling this function.") } if (cds@dim_reduce_type == "ICA"){ reduced_dim_coords <- reducedDimS(cds) }else if (cds@dim_reduce_type %in% c("simplePPT", "DDRTree") ){ reduced_dim_coords <- reducedDimK(cds) }else { stop("Error: unrecognized dimensionality reduction method.") } ica_space_df <- data.frame(Matrix::t(reduced_dim_coords[c(x,y),])) colnames(ica_space_df) <- c("prin_graph_dim_1", "prin_graph_dim_2") ica_space_df$sample_name <- row.names(ica_space_df) ica_space_df$sample_state <- row.names(ica_space_df) #ica_space_with_state_df <- merge(ica_space_df, lib_info_with_pseudo, by.x="sample_name", by.y="row.names") #print(ica_space_with_state_df) dp_mst <- minSpanningTree(cds) if (is.null(dp_mst)){ stop("You must first call orderCells() before using this function") } edge_list <- as.data.frame(get.edgelist(dp_mst)) colnames(edge_list) <- c("source", "target") edge_df <- merge(ica_space_df, edge_list, by.x="sample_name", by.y="source", all=TRUE) #edge_df <- ica_space_df edge_df <- plyr::rename(edge_df, c("prin_graph_dim_1"="source_prin_graph_dim_1", "prin_graph_dim_2"="source_prin_graph_dim_2")) edge_df <- merge(edge_df, ica_space_df[,c("sample_name", "prin_graph_dim_1", "prin_graph_dim_2")], by.x="target", by.y="sample_name", all=TRUE) edge_df <- plyr::rename(edge_df, c("prin_graph_dim_1"="target_prin_graph_dim_1", "prin_graph_dim_2"="target_prin_graph_dim_2")) S_matrix <- reducedDimS(cds) data_df <- data.frame(t(S_matrix[c(x,y),])) data_df <- cbind(data_df, sample_state) colnames(data_df) <- c("data_dim_1", "data_dim_2") data_df$sample_name <- row.names(data_df) data_df <- merge(data_df, lib_info_with_pseudo, by.x="sample_name", by.y="row.names") return_rotation_mat <- function(theta) { theta <- theta / 180 * pi matrix(c(cos(theta), sin(theta), -sin(theta), cos(theta)), nrow = 2) } tmp <- return_rotation_mat(theta) %*% t(as.matrix(data_df[, c(2, 3)])) data_df$data_dim_1 <- tmp[1, ] data_df$data_dim_2 <- tmp[2, ] tmp <- return_rotation_mat(theta = theta) %*% t(as.matrix(edge_df[, c('source_prin_graph_dim_1', 'source_prin_graph_dim_2')])) edge_df$source_prin_graph_dim_1 <- tmp[1, ] edge_df$source_prin_graph_dim_2 <- tmp[2, ] tmp <- return_rotation_mat(theta) %*% t(as.matrix(edge_df[, c('target_prin_graph_dim_1', 'target_prin_graph_dim_2')])) edge_df$target_prin_graph_dim_1 <- tmp[1, ] edge_df$target_prin_graph_dim_2 <- tmp[2, ] markers_exprs <- NULL if (is.null(markers) == FALSE){ markers_fData <- subset(fData(cds), gene_short_name %in% markers) if (nrow(markers_fData) >= 1){ markers_exprs <- reshape2::melt(as.matrix(exprs(cds[row.names(markers_fData),]))) colnames(markers_exprs)[1:2] <- c('feature_id','cell_id') markers_exprs <- merge(markers_exprs, markers_fData, by.x = "feature_id", by.y="row.names") #print (head( markers_exprs[is.na(markers_exprs$gene_short_name) == FALSE,])) markers_exprs$feature_label <- as.character(markers_exprs$gene_short_name) markers_exprs$feature_label[is.na(markers_exprs$feature_label)] <- markers_exprs$Var1 } } if (is.null(markers_exprs) == FALSE && nrow(markers_exprs) > 0){ data_df <- merge(data_df, markers_exprs, by.x="sample_name", by.y="cell_id") if(use_color_gradient) { if(markers_linear){ g <- ggplot(data=data_df, aes(x=data_dim_1, y=data_dim_2)) + geom_point(aes(color= value), size=I(cell_size), na.rm = TRUE) + scale_color_viridis(name = paste0("value"), ...) + facet_wrap(~feature_label) } else { g <- ggplot(data=data_df, aes(x=data_dim_1, y=data_dim_2)) + geom_point(aes(color=log10(value + 0.1)), size=I(cell_size), na.rm = TRUE) + scale_color_viridis(name = paste0("log10(value + 0.1)"), ...) + facet_wrap(~feature_label) } } else { if(markers_linear){ g <- ggplot(data=data_df, aes(x=data_dim_1, y=data_dim_2, size= (value * 0.1))) + facet_wrap(~feature_label) } else { g <- ggplot(data=data_df, aes(x=data_dim_1, y=data_dim_2, size=log10(value + 0.1))) + facet_wrap(~feature_label) } } }else{ g <- ggplot(data=data_df, aes(x=data_dim_1, y=data_dim_2)) } if (show_tree){ g <- g + geom_segment(aes_string(x="source_prin_graph_dim_1", y="source_prin_graph_dim_2", xend="target_prin_graph_dim_1", yend="target_prin_graph_dim_2"), size=cell_link_size, linetype="solid", na.rm=TRUE, data=edge_df) } # FIXME: setting size here overrides the marker expression funtionality. # Don't do it! if (is.null(markers_exprs) == FALSE && nrow(markers_exprs) > 0){ if(use_color_gradient) { # g <- g + geom_point(aes_string(color = color_by), na.rm = TRUE) } else { g <- g + geom_point(aes_string(color = color_by), na.rm = TRUE) } }else { if(use_color_gradient) { # g <- g + geom_point(aes_string(color = color_by), na.rm = TRUE) } else { g <- g + geom_point(aes_string(color = color_by), size=I(cell_size), na.rm = TRUE) } } if (show_branch_points && cds@dim_reduce_type == 'DDRTree'){ mst_branch_nodes <- cds@auxOrderingData[[cds@dim_reduce_type]]$branch_points branch_point_df <- subset(edge_df, sample_name %in% mst_branch_nodes)[,c("sample_name", "source_prin_graph_dim_1", "source_prin_graph_dim_2")] branch_point_df$branch_point_idx <- match(branch_point_df$sample_name, mst_branch_nodes) branch_point_df <- branch_point_df[!duplicated(branch_point_df$branch_point_idx), ] g <- g + geom_point(aes_string(x="source_prin_graph_dim_1", y="source_prin_graph_dim_2"), size=5, na.rm=TRUE, data=branch_point_df) + geom_text(aes_string(x="source_prin_graph_dim_1", y="source_prin_graph_dim_2", label="branch_point_idx"), size=4, color="white", na.rm=TRUE, data=branch_point_df) } if (show_cell_names){ g <- g +geom_text(aes(label=sample_name), size=cell_name_size) } if (show_state_number){ g <- g + geom_text(aes(label = sample_state), size = state_number_size) } g <- g + #scale_color_brewer(palette="Set1") + monocle_theme_opts() + xlab(paste("Component", x)) + ylab(paste("Component", y)) + theme(legend.position="top", legend.key.height=grid::unit(0.35, "in")) + #guides(color = guide_legend(label.position = "top")) + theme(legend.key = element_blank()) + theme(panel.background = element_rect(fill='white')) g } #' @rdname package-deprecated #' @title Plots the minimum spanning tree on cells. #' This function is deprecated. #' @description This function arranges all of the cells in the cds in a tree and #' predicts their location based on their pseudotime value #' @param cds CellDataSet for the experiment #' @param x the column of reducedDimS(cds) to plot on the horizontal axis #' @param y the column of reducedDimS(cds) to plot on the vertical axis #' @param color_by the cell attribute (e.g. the column of pData(cds)) to map to each cell's color #' @param show_tree whether to show the links between cells connected in the minimum spanning tree #' @param show_backbone whether to show the diameter path of the MST used to order the cells #' @param backbone_color the color used to render the backbone. #' @param markers a gene name or gene id to use for setting the size of each cell in the plot #' @param show_cell_names draw the name of each cell in the plot #' @param cell_size The size of the point for each cell #' @param cell_link_size The size of the line segments connecting cells (when used with ICA) or the principal graph (when used with DDRTree) #' @param cell_name_size the size of cell name labels #' @param show_branch_points Whether to show icons for each branch point (only available when reduceDimension was called with DDRTree) #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom reshape2 melt #' @export #' @seealso plot_cell_trajectory #' @examples #' \dontrun{ #' library(HSMMSingleCell) #' HSMM <- load_HSMM() #' plot_cell_trajectory(HSMM) #' plot_cell_trajectory(HSMM, color_by="Pseudotime", show_backbone=FALSE) #' plot_cell_trajectory(HSMM, markers="MYH3") #' } plot_spanning_tree <- function(cds, x=1, y=2, color_by="State", show_tree=TRUE, show_backbone=TRUE, backbone_color="black", markers=NULL, show_cell_names=FALSE, cell_size=1.5, cell_link_size=0.75, cell_name_size=2, show_branch_points=TRUE){ .Deprecated("plot_cell_trajectory") #include a package argument, too plot_cell_trajectory(cds=cds, x=x, y=y, color_by=color_by, show_tree=show_tree, show_backbone=show_backbone, backbone_color=backbone_color, markers=markers, show_cell_names=show_cell_names, cell_size=cell_size, cell_link_size=cell_link_size, cell_name_size=cell_name_size, show_branch_points=show_branch_points) } #' @title Plots expression for one or more genes as a violin plot #' #' @description Accepts a subset of a CellDataSet and an attribute to group cells by, #' and produces one or more ggplot2 objects that plots the level of expression for #' each group of cells. #' #' @param cds_subset CellDataSet for the experiment #' @param grouping the cell attribute (e.g. the column of pData(cds)) to group cells by on the horizontal axis #' @param min_expr the minimum (untransformed) expression level to use in plotted the genes. #' @param cell_size the size (in points) of each cell used in the plot #' @param nrow the number of rows used when laying out the panels for each gene's expression #' @param ncol the number of columns used when laying out the panels for each gene's expression #' @param panel_order the order in which genes should be layed out (left-to-right, top-to-bottom) #' @param color_by the cell attribute (e.g. the column of pData(cds)) to be used to color each cell #' @param plot_trend whether to plot a trendline tracking the average expression across the horizontal axis. #' @param label_by_short_name label figure panels by gene_short_name (TRUE) or feature id (FALSE) #' @param relative_expr Whether to transform expression into relative values #' @param log_scale a boolean that determines whether or not to scale data logarithmically #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom reshape2 melt #' @importFrom BiocGenerics sizeFactors #' @export #' @examples #' \dontrun{ #' library(HSMMSingleCell) #' HSMM <- load_HSMM() #' my_genes <- HSMM[row.names(subset(fData(HSMM), gene_short_name %in% c("ACTA1", "ID1", "CCNB2"))),] #' plot_genes_violin(my_genes, grouping="Hours", ncol=2, min_expr=0.1) #' } plot_genes_violin <- function (cds_subset, grouping = "State", min_expr = NULL, cell_size = 0.75, nrow = NULL, ncol = 1, panel_order = NULL, color_by = NULL, plot_trend = FALSE, label_by_short_name = TRUE, relative_expr = TRUE, log_scale = TRUE) { if (cds_subset@expressionFamily@vfamily %in% c("negbinomial", "negbinomial.size")) { integer_expression = TRUE } else { integer_expression = FALSE relative_expr = TRUE } if (integer_expression) { cds_exprs = exprs(cds_subset) if (relative_expr) { if (is.null(sizeFactors(cds_subset))) { stop("Error: to call this function with relative_expr=TRUE, you must call estimateSizeFactors() first") } cds_exprs = Matrix::t(Matrix::t(cds_exprs)/sizeFactors(cds_subset)) } #cds_exprs = reshape2::melt(round(as.matrix(cds_exprs))) cds_exprs = reshape2::melt(as.matrix(cds_exprs)) } else { cds_exprs = exprs(cds_subset) cds_exprs = reshape2::melt(as.matrix(cds_exprs)) } if (is.null(min_expr)) { min_expr = cds_subset@lowerDetectionLimit } colnames(cds_exprs) = c("f_id", "Cell", "expression") cds_exprs$expression[cds_exprs$expression < min_expr] = min_expr cds_pData = pData(cds_subset) # # # Custom bit for adding in a group for # if(! is.null(show_combined)) { # for(combine_gene in show_combined) { # cds_pData_all = subset(cds_pData, gene == combine_gene) # cds_pData_all[, grouping] = paste("All", combine_gene) # cds_pData = rbind(cds_pData, cds_pData_all) # } # } cds_fData = fData(cds_subset) cds_exprs = merge(cds_exprs, cds_fData, by.x = "f_id", by.y = "row.names") cds_exprs = merge(cds_exprs, cds_pData, by.x = "Cell", by.y = "row.names") cds_exprs$adjusted_expression = log10(cds_exprs$expression) if (label_by_short_name == TRUE) { if (is.null(cds_exprs$gene_short_name) == FALSE) { cds_exprs$feature_label = cds_exprs$gene_short_name cds_exprs$feature_label[is.na(cds_exprs$feature_label)] = cds_exprs$f_id } else { cds_exprs$feature_label = cds_exprs$f_id } } else { cds_exprs$feature_label = cds_exprs$f_id } if (is.null(panel_order) == FALSE) { cds_exprs$feature_label = factor(cds_exprs$feature_label, levels = panel_order) } q = ggplot(aes_string(x = grouping, y = "expression"), data = cds_exprs) if (is.null(color_by) == FALSE) { q = q + geom_violin(aes_string(fill = color_by)) } else { q = q + geom_violin() } if (plot_trend == TRUE) { q = q + stat_summary(fun.data = "mean_cl_boot", size = 0.2) q = q + stat_summary(aes_string(x = grouping, y = "expression", group = color_by), fun.data = "mean_cl_boot", size = 0.2, geom = "line") } q = q + facet_wrap(~feature_label, nrow = nrow, ncol = ncol, scales = "free_y") if (min_expr < 1) { q = q + expand_limits(y = c(min_expr, 1)) } q = q + ylab("Expression") + xlab(grouping) if (log_scale == TRUE){ q = q + scale_y_log10() } q } #' Plots expression for one or more genes as a jittered, grouped points #' #' @description Accepts a subset of a CellDataSet and an attribute to group cells by, #' and produces one or more ggplot2 objects that plots the level of expression for #' each group of cells. #' #' @param cds_subset CellDataSet for the experiment #' @param grouping the cell attribute (e.g. the column of pData(cds)) to group cells by on the horizontal axis #' @param min_expr the minimum (untransformed) expression level to use in plotted the genes. #' @param cell_size the size (in points) of each cell used in the plot #' @param nrow the number of rows used when laying out the panels for each gene's expression #' @param ncol the number of columns used when laying out the panels for each gene's expression #' @param panel_order the order in which genes should be layed out (left-to-right, top-to-bottom) #' @param color_by the cell attribute (e.g. the column of pData(cds)) to be used to color each cell #' @param plot_trend whether to plot a trendline tracking the average expression across the horizontal axis. #' @param label_by_short_name label figure panels by gene_short_name (TRUE) or feature id (FALSE) #' @param relative_expr Whether to transform expression into relative values #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom reshape2 melt #' @importFrom BiocGenerics sizeFactors #' @export #' @examples #' \dontrun{ #' library(HSMMSingleCell) #' HSMM <- load_HSMM() #' my_genes <- HSMM[row.names(subset(fData(HSMM), gene_short_name %in% c("MYOG", "ID1", "CCNB2"))),] #' plot_genes_jitter(my_genes, grouping="Media", ncol=2) #' } plot_genes_jitter <- function(cds_subset, grouping = "State", min_expr=NULL, cell_size=0.75, nrow=NULL, ncol=1, panel_order=NULL, color_by=NULL, plot_trend=FALSE, label_by_short_name=TRUE, relative_expr=TRUE){ if (cds_subset@expressionFamily@vfamily %in% c("negbinomial", "negbinomial.size")){ integer_expression <- TRUE }else{ integer_expression <- FALSE relative_expr <- TRUE } if (integer_expression) { cds_exprs <- exprs(cds_subset) if (relative_expr){ if (is.null(sizeFactors(cds_subset))) { stop("Error: to call this function with relative_expr=TRUE, you must call estimateSizeFactors() first") } cds_exprs <- Matrix::t(Matrix::t(cds_exprs) / sizeFactors(cds_subset)) } cds_exprs <- reshape2::melt(round(as.matrix(cds_exprs))) }else{ cds_exprs <- exprs(cds_subset) cds_exprs <- reshape2::melt(as.matrix(cds_exprs)) } if (is.null(min_expr)){ min_expr <- cds_subset@lowerDetectionLimit } colnames(cds_exprs) <- c("f_id", "Cell", "expression") cds_exprs$expression[cds_exprs$expression < min_expr] <- min_expr cds_pData <- pData(cds_subset) cds_fData <- fData(cds_subset) cds_exprs <- merge(cds_exprs, cds_fData, by.x="f_id", by.y="row.names") cds_exprs <- merge(cds_exprs, cds_pData, by.x="Cell", by.y="row.names") cds_exprs$adjusted_expression <- log10(cds_exprs$expression) #cds_exprs$adjusted_expression <- log10(cds_exprs$adjusted_expression + abs(rnorm(nrow(cds_exprs), min_expr, sqrt(min_expr)))) if (label_by_short_name == TRUE){ if (is.null(cds_exprs$gene_short_name) == FALSE){ cds_exprs$feature_label <- cds_exprs$gene_short_name cds_exprs$feature_label[is.na(cds_exprs$feature_label)] <- cds_exprs$f_id }else{ cds_exprs$feature_label <- cds_exprs$f_id } }else{ cds_exprs$feature_label <- cds_exprs$f_id } #print (head(cds_exprs)) if (is.null(panel_order) == FALSE) { cds_exprs$feature_label <- factor(cds_exprs$feature_label, levels=panel_order) } q <- ggplot(aes_string(x=grouping, y="expression"), data=cds_exprs) if (is.null(color_by) == FALSE){ q <- q + geom_jitter(aes_string(color=color_by), size=I(cell_size)) }else{ q <- q + geom_jitter(size=I(cell_size)) } if (plot_trend == TRUE){ q <- q + stat_summary(aes_string(color=color_by), fun.data = "mean_cl_boot", size=0.35) q <- q + stat_summary(aes_string(x=grouping, y="expression", color=color_by, group=color_by), fun.data = "mean_cl_boot", size=0.35, geom="line") } q <- q + scale_y_log10() + facet_wrap(~feature_label, nrow=nrow, ncol=ncol, scales="free_y") # Need this to guard against plotting failures caused by non-expressed genes if (min_expr < 1) { q <- q + expand_limits(y=c(min_expr, 1)) } q <- q + ylab("Expression") + xlab(grouping) q <- q + monocle_theme_opts() q } #' Plots the number of cells expressing one or more genes as a barplot #' #' @description Accetps a CellDataSet and a parameter,"grouping", used for dividing cells into groups. #' Returns one or more bar graphs (one graph for each gene in the CellDataSet). #' Each graph shows the percentage of cells that express a gene in the in the CellDataSet for #' each sub-group of cells created by "grouping". #' #' Let's say the CellDataSet passed in included genes A, B, and C and the "grouping parameter divided #' all of the cells into three groups called X, Y, and Z. Then three graphs would be produced called A, #' B, and C. In the A graph there would be three bars one for X, one for Y, and one for Z. So X bar in the #' A graph would show the percentage of cells in the X group that express gene A. #' #' @param cds_subset CellDataSet for the experiment #' @param grouping the cell attribute (e.g. the column of pData(cds)) to group cells by on the horizontal axis #' @param min_expr the minimum (untransformed) expression level to use in plotted the genes. #' @param nrow the number of rows used when laying out the panels for each gene's expression #' @param ncol the number of columns used when laying out the panels for each gene's expression #' @param panel_order the order in which genes should be layed out (left-to-right, top-to-bottom) #' @param plot_as_fraction whether to show the percent instead of the number of cells expressing each gene #' @param label_by_short_name label figure panels by gene_short_name (TRUE) or feature id (FALSE) #' @param relative_expr Whether to transform expression into relative values #' @param plot_limits A pair of number specifying the limits of the y axis. If NULL, scale to the range of the data. #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom plyr ddply #' @importFrom reshape2 melt #' @importFrom BiocGenerics sizeFactors #' @export #' @examples #' \dontrun{ #' library(HSMMSingleCell) #' HSMM <- load_HSMM() #' MYOG_ID1 <- HSMM[row.names(subset(fData(HSMM), gene_short_name %in% c("MYOG", "ID1"))),] #' plot_genes_positive_cells(MYOG_ID1, grouping="Media", ncol=2) #' } plot_genes_positive_cells <- function(cds_subset, grouping = "State", min_expr=0.1, nrow=NULL, ncol=1, panel_order=NULL, plot_as_fraction=TRUE, label_by_short_name=TRUE, relative_expr=TRUE, plot_limits=c(0,100)){ percent <- NULL if (cds_subset@expressionFamily@vfamily %in% c("negbinomial", "negbinomial.size")){ integer_expression <- TRUE }else{ integer_expression <- FALSE relative_expr <- TRUE } if (integer_expression) { marker_exprs <- exprs(cds_subset) if (relative_expr){ if (is.null(sizeFactors(cds_subset))) { stop("Error: to call this function with relative_expr=TRUE, you must call estimateSizeFactors() first") } marker_exprs <- Matrix::t(Matrix::t(marker_exprs) / sizeFactors(cds_subset)) } marker_exprs_melted <- reshape2::melt(round(as.matrix(marker_exprs))) }else{ marker_exprs_melted <- reshape2::melt(exprs(marker_exprs)) } colnames(marker_exprs_melted) <- c("f_id", "Cell", "expression") marker_exprs_melted <- merge(marker_exprs_melted, pData(cds_subset), by.x="Cell", by.y="row.names") marker_exprs_melted <- merge(marker_exprs_melted, fData(cds_subset), by.x="f_id", by.y="row.names") if (label_by_short_name == TRUE){ if (is.null(marker_exprs_melted$gene_short_name) == FALSE){ marker_exprs_melted$feature_label <- marker_exprs_melted$gene_short_name marker_exprs_melted$feature_label[is.na(marker_exprs_melted$feature_label)] <- marker_exprs_melted$f_id }else{ marker_exprs_melted$feature_label <- marker_exprs_melted$f_id } }else{ marker_exprs_melted$feature_label <- marker_exprs_melted$f_id } if (is.null(panel_order) == FALSE) { marker_exprs_melted$feature_label <- factor(marker_exprs_melted$feature_label, levels=panel_order) } marker_counts <- plyr::ddply(marker_exprs_melted, c("feature_label", grouping), function(x) { data.frame(target=sum(x$expression > min_expr), target_fraction=sum(x$expression > min_expr)/nrow(x)) } ) #print (head(marker_counts)) if (plot_as_fraction){ marker_counts$target_fraction <- marker_counts$target_fraction * 100 qp <- ggplot(aes_string(x=grouping, y="target_fraction", fill=grouping), data=marker_counts) + ylab("Cells (percent)") if (is.null(plot_limits) == FALSE) qp <- qp + scale_y_continuous(limits=plot_limits) }else{ qp <- ggplot(aes_string(x=grouping, y="target", fill=grouping), data=marker_counts) + ylab("Cells") } qp <- qp + facet_wrap(~feature_label, nrow=nrow, ncol=ncol, scales="free_y") qp <- qp + geom_bar(stat="identity") + monocle_theme_opts() return(qp) } #' Plots expression for one or more genes as a function of pseudotime #' #' @description Plots expression for one or more genes as a function of pseudotime. #' Plotting allows you determine if the ordering produced by orderCells() is correct #' and it does not need to be flipped using the "reverse" flag in orderCells #' #' @param cds_subset CellDataSet for the experiment #' @param min_expr the minimum (untransformed) expression level to use in plotted the genes. #' @param cell_size the size (in points) of each cell used in the plot #' @param nrow the number of rows used when laying out the panels for each gene's expression #' @param ncol the number of columns used when laying out the panels for each gene's expression #' @param panel_order the order in which genes should be layed out (left-to-right, top-to-bottom) #' @param color_by the cell attribute (e.g. the column of pData(cds)) to be used to color each cell #' @param trend_formula the model formula to be used for fitting the expression trend over pseudotime #' @param label_by_short_name label figure panels by gene_short_name (TRUE) or feature id (FALSE) #' @param relative_expr Whether to transform expression into relative values #' @param vertical_jitter A value passed to ggplot to jitter the points in the vertical dimension. Prevents overplotting, and is particularly helpful for rounded transcript count data. #' @param horizontal_jitter A value passed to ggplot to jitter the points in the horizontal dimension. Prevents overplotting, and is particularly helpful for rounded transcript count data. #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom plyr ddply . #' @importFrom reshape2 melt #' @importFrom ggplot2 Position #' @export #' @examples #' \dontrun{ #' library(HSMMSingleCell) #' HSMM <- load_HSMM() #' my_genes <- row.names(subset(fData(HSMM), gene_short_name %in% c("CDK1", "MEF2C", "MYH3"))) #' cds_subset <- HSMM[my_genes,] #' plot_genes_in_pseudotime(cds_subset, color_by="Time") #' } plot_genes_in_pseudotime <-function(cds_subset, min_expr=NULL, cell_size=0.75, nrow=NULL, ncol=1, panel_order=NULL, color_by="State", trend_formula="~ sm.ns(Pseudotime, df=3)", label_by_short_name=TRUE, relative_expr=TRUE, vertical_jitter=NULL, horizontal_jitter=NULL){ f_id <- NA Cell <- NA if (cds_subset@expressionFamily@vfamily %in% c("negbinomial", "negbinomial.size")) { integer_expression <- TRUE } else { integer_expression <- FALSE relative_expr <- TRUE } if (integer_expression) { cds_exprs <- exprs(cds_subset) if (relative_expr) { if (is.null(sizeFactors(cds_subset))) { stop("Error: to call this function with relative_expr=TRUE, you must call estimateSizeFactors() first") } cds_exprs <- Matrix::t(Matrix::t(cds_exprs)/sizeFactors(cds_subset)) } cds_exprs <- reshape2::melt(round(as.matrix(cds_exprs))) } else { cds_exprs <- reshape2::melt(as.matrix(exprs(cds_subset))) } if (is.null(min_expr)) { min_expr <- cds_subset@lowerDetectionLimit } colnames(cds_exprs) <- c("f_id", "Cell", "expression") cds_pData <- pData(cds_subset) cds_fData <- fData(cds_subset) cds_exprs <- merge(cds_exprs, cds_fData, by.x = "f_id", by.y = "row.names") cds_exprs <- merge(cds_exprs, cds_pData, by.x = "Cell", by.y = "row.names") #cds_exprs$f_id <- as.character(cds_exprs$f_id) #cds_exprs$Cell <- as.character(cds_exprs$Cell) if (integer_expression) { cds_exprs$adjusted_expression <- cds_exprs$expression } else { cds_exprs$adjusted_expression <- log10(cds_exprs$expression) } # trend_formula <- paste("adjusted_expression", trend_formula, # sep = "") if (label_by_short_name == TRUE) { if (is.null(cds_exprs$gene_short_name) == FALSE) { cds_exprs$feature_label <- as.character(cds_exprs$gene_short_name) cds_exprs$feature_label[is.na(cds_exprs$feature_label)] <- cds_exprs$f_id } else { cds_exprs$feature_label <- cds_exprs$f_id } } else { cds_exprs$feature_label <- cds_exprs$f_id } cds_exprs$f_id <- as.character(cds_exprs$f_id) cds_exprs$feature_label <- factor(cds_exprs$feature_label) new_data <- data.frame(Pseudotime = pData(cds_subset)$Pseudotime) model_expectation <- genSmoothCurves(cds_subset, cores=1, trend_formula = trend_formula, relative_expr = T, new_data = new_data) colnames(model_expectation) <- colnames(cds_subset) expectation <- ddply(cds_exprs, .(f_id, Cell), function(x) data.frame("expectation"=model_expectation[x$f_id, x$Cell])) cds_exprs <- merge(cds_exprs, expectation) #cds_exprs$expectation <- expectation#apply(cds_exprs,1, function(x) model_expectation[x$f_id, x$Cell]) cds_exprs$expression[cds_exprs$expression < min_expr] <- min_expr cds_exprs$expectation[cds_exprs$expectation < min_expr] <- min_expr if (is.null(panel_order) == FALSE) { cds_exprs$feature_label <- factor(cds_exprs$feature_label, levels = panel_order) } q <- ggplot(aes(Pseudotime, expression), data = cds_exprs) if (is.null(color_by) == FALSE) { q <- q + geom_point(aes_string(color = color_by), size = I(cell_size), position=position_jitter(horizontal_jitter, vertical_jitter)) } else { q <- q + geom_point(size = I(cell_size), position=position_jitter(horizontal_jitter, vertical_jitter)) } q <- q + geom_line(aes(x = Pseudotime, y = expectation), data = cds_exprs) q <- q + scale_y_log10() + facet_wrap(~feature_label, nrow = nrow, ncol = ncol, scales = "free_y") if (min_expr < 1) { q <- q + expand_limits(y = c(min_expr, 1)) } if (relative_expr) { q <- q + ylab("Relative Expression") } else { q <- q + ylab("Absolute Expression") } q <- q + xlab("Pseudo-time") q <- q + monocle_theme_opts() q } #' Plots kinetic clusters of genes. #' #' @description returns a ggplot2 object showing the shapes of the #' expression patterns followed by a set of pre-selected genes. #' The topographic lines highlight the distributions of the kinetic patterns #' relative to overall trend lines. #' #' @param cds CellDataSet for the experiment #' @param clustering a clustering object produced by clusterCells #' @param drawSummary whether to draw the summary line for each cluster #' @param sumFun whether the function used to generate the summary for each cluster #' @param ncol number of columns used to layout the faceted cluster panels #' @param nrow number of columns used to layout the faceted cluster panels #' @param row_samples how many genes to randomly select from the data #' @param callout_ids a vector of gene names or gene ids to manually render as part of the plot #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom reshape2 melt #' @importFrom stringr str_join #' @importFrom ggplot2 Position #' @import grid #' @export #' @examples #' \dontrun{ #' full_model_fits <- fitModel(HSMM_filtered[sample(nrow(fData(HSMM_filtered)), 100),], #' modelFormulaStr="~VGAM::bs(Pseudotime)") #' expression_curve_matrix <- responseMatrix(full_model_fits) #' clusters <- clusterGenes(expression_curve_matrix, k=4) #' plot_clusters(HSMM_filtered[ordering_genes,], clusters) #' } plot_clusters<-function(cds, clustering, drawSummary=TRUE, sumFun=mean_cl_boot, ncol=NULL, nrow=NULL, row_samples=NULL, callout_ids=NULL){ .Deprecated("plot_genes_heatmap") m <- as.data.frame(clustering$exprs) m$ids <- rownames(clustering$exprs) if (is.null(clustering$labels) == FALSE) { m$cluster = factor(clustering$labels[clustering$clustering], levels = levels(clustering$labels)) }else{ m$cluster <- factor(clustering$clustering) } cluster_sizes <- as.data.frame(table(m$cluster)) cluster_sizes$Freq <- paste("(", cluster_sizes$Freq, ")") facet_labels <- str_join(cluster_sizes$Var1, cluster_sizes$Freq, sep=" ") #update the function m.melt <- melt(m, id.vars = c("ids", "cluster")) m.melt <- merge(m.melt, pData(cds), by.x="variable", by.y="row.names") if (is.null(row_samples) == FALSE){ m.melt <- m.melt[sample(nrow(m.melt), row_samples),] } c <- ggplot(m.melt) + facet_wrap("cluster", ncol=ncol, nrow=nrow, scales="free_y") #c <- c + stat_density2d(aes(x = Pseudotime, y = value), geom="polygon", fill="white", color="black", size=I(0.1)) + facet_wrap("cluster", ncol=ncol, nrow=nrow) if (drawSummary) { c <- c + stat_summary(aes(x = Pseudotime, y = value, group = 1), fun.data = sumFun, color = "red", alpha = 0.2, size = 0.5, geom = "smooth") } #cluster_medians <- subset(m.melt, ids %in% clustering$medoids) #c <- c + geom_line() #c <- c + geom_line(aes(x=Pseudotime, y=value), data=cluster_medians, color=I("red")) c <- c + scale_color_hue(l = 50, h.start = 200) + theme(axis.text.x = element_text(angle = 0, hjust = 0)) + xlab("Pseudo-time") + ylab("Expression") c <- c + theme(strip.background = element_rect(colour = 'white', fill = 'white')) + theme(panel.border = element_blank()) + theme(legend.position="none") + theme(panel.grid.minor.x = element_blank(), panel.grid.minor.y = element_blank()) + theme(panel.grid.major.x = element_blank(), panel.grid.major.y = element_blank()) # if (draw_cluster_size){ # cluster_sizes <- as.data.frame(table(m$cluster)) # colnames(cluster_sizes) <- c("cluster", "Freq") # cluster_sizes <- cbind (cluster_sizes, Pseudotime = cluster_label_text_x, value = cluster_label_text_y) # c <- c + geom_text(aes(x=Pseudotime, y=value, label=Freq), data=cluster_sizes, size=cluster_label_text_size) # } if (is.null(callout_ids) == FALSE) { callout_melt <- subset(m.melt, ids %in% callout_ids) c <- c + geom_line(aes(x=Pseudotime, y=value), data=callout_melt, color=I("steelblue")) } c <- c + monocle_theme_opts() #c <- facet_wrap_labeller(c, facet_labels) c } # # #' Plots a pseudotime-ordered, row-centered heatmap # #' @export # plot_genes_heatmap <- function(cds, # rescaling='row', # clustering='row', # labCol=FALSE, # labRow=TRUE, # logMode=TRUE, # use_vst=TRUE, # border=FALSE, # heatscale=c(low='steelblue',mid='white',high='tomato'), # heatMidpoint=0, # method="none", # scaleMax=2, # scaleMin=-2, # relative_expr=TRUE, # ...){ # # ## the function can be be viewed as a two step process # ## 1. using the rehape package and other funcs the data is clustered, scaled, and reshaped # ## using simple options or by a user supplied function # ## 2. with the now resahped data the plot, the chosen labels and plot style are built # FM <- exprs(cds) # # if (cds@expressionFamily@vfamily %in% c("negbinomial", "negbinomial.size")){ # integer_expression <- TRUE # }else{ # integer_expression <- FALSE # relative_expr <- TRUE # } # # if (integer_expression) # { # if (relative_expr){ # if (is.null(sizeFactors(cds))) # { # stop("Error: you must call estimateSizeFactors() first") # } # FM <- Matrix::t(Matrix::t(FM) / sizeFactors(cds)) # } # FM <- round(FM) # } # # m=FM # # if (is.null(fData(cds)$gene_short_name) == FALSE){ # feature_labels <- fData(cds)$gene_short_name # feature_labels[is.na(feature_labels)] <- fData(cds)$f_id # row.names(m) <- feature_labels # } # # #remove genes with no expression in any condition # m=m[!apply(m,1,sum)==0,] # # if (use_vst && is.null(cds@dispFitInfo[["blind"]]$disp_func) == FALSE){ # m = vstExprs(cds, expr_matrix=m) # }else if(logMode){ # m = log10(m+pseudocount) # } # # #remove genes with no sd # #m=m[!apply(m,1,sd)==0,] # # ## you can either scale by row or column not both! # ## if you wish to scale by both or use a different scale method then simply supply a scale # ## function instead NB scale is a base funct # # ## I have supplied the default cluster and euclidean distance (JSdist) - and chose to cluster after scaling # ## if you want a different distance/cluster method-- or to cluster and then scale # ## then you can supply a custom function # # if(!is.function(method)){ # method = function(mat){as.dist((1 - cor(Matrix::t(mat)))/2)} # } # # ## this is just reshaping into a ggplot format matrix and making a ggplot layer # # if(is.function(rescaling)) # { # m=rescaling(m) # } else { # if(rescaling=='column'){ # m=m[!apply(m,2,sd)==0,] # m=scale(m, center=TRUE) # m[is.nan(m)] = 0 # m[m>scaleMax] = scaleMax # m[m<scaleMin] = scaleMin # } # if(rescaling=='row'){ # m=m[!apply(m,1,sd)==0,] # m=Matrix::t(scale(Matrix::t(m),center=TRUE)) # m[is.nan(m)] = 0 # m[m>scaleMax] = scaleMax # m[m<scaleMin] = scaleMin # } # } # # # If we aren't going to re-ordering the columns, order them by Pseudotime # if (clustering %in% c("row", "none")) # m = m[,row.names(pData(cds)[order(-pData(cds)$Pseudotime),])] # # if(clustering=='row') # m=m[hclust(method(m))$order, ] # if(clustering=='column') # m=m[,hclust(method(Matrix::t(m)))$order] # if(clustering=='both') # m=m[hclust(method(m))$order ,hclust(method(Matrix::t(m)))$order] # # # rows=dim(m)[1] # cols=dim(m)[2] # # # # # if(logMode) { # # melt.m=cbind(rowInd=rep(1:rows, times=cols), colInd=rep(1:cols, each=rows), reshape2::melt( log10(m+pseudocount))) # # }else{ # # melt.m=cbind(rowInd=rep(1:rows, times=cols), colInd=rep(1:cols, each=rows), reshape2::melt(m)) # # } # # melt.m=cbind(rowInd=rep(1:rows, times=cols), colInd=rep(1:cols, each=rows), reshape2::melt(m)) # # g=ggplot(data=melt.m) # # ## add the heat tiles with or without a white border for clarity # # if(border==TRUE) # g2=g+geom_raster(aes(x=colInd,y=rowInd, fill=value),colour='grey') # if(border==FALSE) # g2=g+geom_raster(aes(x=colInd,y=rowInd,ymax=rowInd, fill=value)) # # ## add axis labels either supplied or from the colnames rownames of the matrix # # if(labCol==TRUE) # { # g2=g2+scale_x_continuous(breaks=(1:cols)-0.5, labels=colnames(m)) # } # if(labCol==FALSE) # { # g2=g2+scale_x_continuous(breaks=(1:cols)-0.5, labels=rep('',cols)) # } # # # if(labRow==TRUE) # { # g2=g2+scale_y_continuous(breaks=(1:rows)-0.5, labels=rownames(m)) # } # if(labRow==FALSE) # { # g2=g2+scale_y_continuous(breaks=(1:rows)-0.5, labels=rep('',rows)) # } # # # Get rid of the ticks, they get way too dense with lots of rows # g2 <- g2 + theme(axis.ticks = element_blank()) # # ## get rid of grey panel background and gridlines # # g2=g2+theme(panel.grid.minor=element_line(colour=NA), panel.grid.major=element_line(colour=NA), # panel.background=element_rect(fill=NA, colour=NA)) # # ##adjust x-axis labels # g2=g2+theme(axis.text.x=element_text(angle=-90, hjust=0)) # # #write(paste(c("Length of heatscale is :", length(heatscale))), stderr()) # # if(is.function(rescaling)) # { # # }else{ # if(rescaling=='row' || rescaling == 'column'){ # legendTitle <- "Relative\nexpression" # }else{ # if (logMode) # { # legendTitle <- bquote(paste(log[10]," FPKM + ",.(pseudocount),sep="")) # #legendTitle <- paste(expression(plain(log)[10])," FPKM + ",pseudocount,sep="") # } else { # legendTitle <- "FPKM" # } # } # } # # if (length(heatscale) == 2){ # g2 <- g2 + scale_fill_gradient(low=heatscale[1], high=heatscale[2], name=legendTitle) # } else if (length(heatscale) == 3) { # if (is.null(heatMidpoint)) # { # heatMidpoint = (max(m) + min(m)) / 2.0 # #write(heatMidpoint, stderr()) # } # g2 <- g2 + theme(panel.border = element_blank()) # g2 <- g2 + scale_fill_gradient2(low=heatscale[1], mid=heatscale[2], high=heatscale[3], midpoint=heatMidpoint, name=legendTitle) # }else { # g2 <- g2 + scale_fill_gradientn(colours=heatscale, name=legendTitle) # } # # #g2<-g2+scale_x_discrete("",breaks=tracking_ids,labels=gene_short_names) # # g2 <- g2 + theme(axis.title.x=element_blank(), axis.title.y=element_blank()) # # ## finally add the fill colour ramp of your choice (default is blue to red)-- and return # return (g2) # } plot_genes_heatmap <- function(...){ .Deprecated("plot_pseudotime_heatmap") plot_pseudotime_heatmap(...) } #' Plots a pseudotime-ordered, row-centered heatmap #' #' @description The function plot_pseudotime_heatmap takes a CellDataSet object #' (usually containing a only subset of significant genes) and generates smooth expression #' curves much like plot_genes_in_pseudotime. #' Then, it clusters these genes and plots them using the pheatmap package. #' This allows you to visualize modules of genes that co-vary across pseudotime. #' #' @param cds_subset CellDataSet for the experiment (normally only the branching genes detected with branchTest) #' @param cluster_rows Whether to cluster the rows of the heatmap. #' @param hclust_method The method used by pheatmap to perform hirearchical clustering of the rows. #' @param num_clusters Number of clusters for the heatmap of branch genes #' @param hmcols The color scheme for drawing the heatmap. #' @param add_annotation_row Additional annotations to show for each row in the heatmap. Must be a dataframe with one row for each row in the fData table of cds_subset, with matching IDs. #' @param add_annotation_col Additional annotations to show for each column in the heatmap. Must be a dataframe with one row for each cell in the pData table of cds_subset, with matching IDs. #' @param show_rownames Whether to show the names for each row in the table. #' @param use_gene_short_name Whether to use the short names for each row. If FALSE, uses row IDs from the fData table. #' @param scale_max The maximum value (in standard deviations) to show in the heatmap. Values larger than this are set to the max. #' @param scale_min The minimum value (in standard deviations) to show in the heatmap. Values smaller than this are set to the min. #' @param norm_method Determines how to transform expression values prior to rendering #' @param trend_formula A formula string specifying the model used in fitting the spline curve for each gene/feature. #' @param return_heatmap Whether to return the pheatmap object to the user. #' @param cores Number of cores to use when smoothing the expression curves shown in the heatmap. #' @return A list of heatmap_matrix (expression matrix for the branch committment), ph (pheatmap heatmap object), #' annotation_row (annotation data.frame for the row), annotation_col (annotation data.frame for the column). #' @import pheatmap #' @importFrom stats sd as.dist cor cutree #' @export #' plot_pseudotime_heatmap <- function(cds_subset, cluster_rows = TRUE, hclust_method = "ward.D2", num_clusters = 6, hmcols = NULL, add_annotation_row = NULL, add_annotation_col = NULL, show_rownames = FALSE, use_gene_short_name = TRUE, norm_method = c("log", "vstExprs"), scale_max=3, scale_min=-3, trend_formula = '~sm.ns(Pseudotime, df=3)', return_heatmap=FALSE, cores=1){ num_clusters <- min(num_clusters, nrow(cds_subset)) pseudocount <- 1 newdata <- data.frame(Pseudotime = seq(min(pData(cds_subset)$Pseudotime), max(pData(cds_subset)$Pseudotime),length.out = 100)) m <- genSmoothCurves(cds_subset, cores=cores, trend_formula = trend_formula, relative_expr = T, new_data = newdata) #remove genes with no expression in any condition m=m[!apply(m,1,sum)==0,] norm_method <- match.arg(norm_method) # FIXME: this needs to check that vst values can even be computed. (They can only be if we're using NB as the expressionFamily) if(norm_method == 'vstExprs' && is.null(cds_subset@dispFitInfo[["blind"]]$disp_func) == FALSE) { m = vstExprs(cds_subset, expr_matrix=m) } else if(norm_method == 'log') { m = log10(m+pseudocount) } # Row-center the data. m=m[!apply(m,1,sd)==0,] m=Matrix::t(scale(Matrix::t(m),center=TRUE)) m=m[is.na(row.names(m)) == FALSE,] m[is.nan(m)] = 0 m[m>scale_max] = scale_max m[m<scale_min] = scale_min heatmap_matrix <- m row_dist <- as.dist((1 - cor(Matrix::t(heatmap_matrix)))/2) row_dist[is.na(row_dist)] <- 1 if(is.null(hmcols)) { bks <- seq(-3.1,3.1, by = 0.1) hmcols <- blue2green2red(length(bks) - 1) } else { bks <- seq(-3.1,3.1, length.out = length(hmcols)) } ph <- pheatmap(heatmap_matrix, useRaster = T, cluster_cols=FALSE, cluster_rows=cluster_rows, show_rownames=F, show_colnames=F, clustering_distance_rows=row_dist, clustering_method = hclust_method, cutree_rows=num_clusters, silent=TRUE, filename=NA, breaks=bks, border_color = NA, color=hmcols) annotation_row <- data.frame(Cluster=factor(cutree(ph$tree_row, num_clusters))) if(!is.null(add_annotation_row)) { old_colnames_length <- ncol(annotation_row) annotation_row <- cbind(annotation_row, add_annotation_row[row.names(annotation_row), ]) colnames(annotation_row)[(old_colnames_length+1):ncol(annotation_row)] <- colnames(add_annotation_row) # annotation_row$bif_time <- add_annotation_row[as.character(fData(absolute_cds[row.names(annotation_row), ])$gene_short_name), 1] } if(!is.null(add_annotation_col)) { if(nrow(add_annotation_col) != 100) { stop('add_annotation_col should have only 100 rows (check genSmoothCurves before you supply the annotation data)!') } annotation_col <- add_annotation_col } else { annotation_col <- NA } if (use_gene_short_name == TRUE) { if (is.null(fData(cds_subset)$gene_short_name) == FALSE) { feature_label <- as.character(fData(cds_subset)[row.names(heatmap_matrix), 'gene_short_name']) feature_label[is.na(feature_label)] <- row.names(heatmap_matrix) row_ann_labels <- as.character(fData(cds_subset)[row.names(annotation_row), 'gene_short_name']) row_ann_labels[is.na(row_ann_labels)] <- row.names(annotation_row) } else { feature_label <- row.names(heatmap_matrix) row_ann_labels <- row.names(annotation_row) } } else { feature_label <- row.names(heatmap_matrix) row_ann_labels <- row.names(annotation_row) } row.names(heatmap_matrix) <- feature_label row.names(annotation_row) <- row_ann_labels colnames(heatmap_matrix) <- c(1:ncol(heatmap_matrix)) ph_res <- pheatmap(heatmap_matrix[, ], #ph$tree_row$order useRaster = T, cluster_cols = FALSE, cluster_rows = cluster_rows, show_rownames=show_rownames, show_colnames=F, #scale="row", clustering_distance_rows=row_dist, #row_dist clustering_method = hclust_method, #ward.D2 cutree_rows=num_clusters, # cutree_cols = 2, annotation_row=annotation_row, annotation_col=annotation_col, treeheight_row = 20, breaks=bks, fontsize = 6, color=hmcols, border_color = NA, silent=TRUE, filename=NA ) grid::grid.rect(gp=grid::gpar("fill", col=NA)) grid::grid.draw(ph_res$gtable) if (return_heatmap){ return(ph_res) } } #' Plot the branch genes in pseduotime with separate branch curves. #' #' @description Works similarly to plot_genes_in_psuedotime esceptit shows #' one kinetic trend for each lineage. #' #' @details This plotting function is used to make the branching plots for a branch dependent gene goes through the progenitor state #' and bifurcating into two distinct branchs (Similar to the pitch-fork bifurcation in dynamic systems). In order to make the #' bifurcation plot, we first duplicated the progenitor states and by default stretch each branch into maturation level 0-100. #' Then we fit two nature spline curves for each branchs using VGAM package. #' #' @param cds CellDataSet for the experiment #' @param branch_states The states for two branching branchs #' @param branch_point The ID of the branch point to analyze. Can only be used when reduceDimension is called with method = "DDRTree". #' @param branch_labels The names for each branching branch #' @param method The method to draw the curve for the gene expression branching pattern, either loess ('loess') or VGLM fitting ('fitting') #' @param min_expr The minimum (untransformed) expression level to use in plotted the genes. #' @param cell_size The size (in points) of each cell used in the plot #' @param nrow Number of columns used to layout the faceted cluster panels #' @param ncol Number of columns used to layout the faceted cluster panels #' @param panel_order The a character vector of gene short names (or IDs, if that's what you're using), specifying order in which genes should be layed out (left-to-right, top-to-bottom) #' @param color_by The cell attribute (e.g. the column of pData(cds)) to be used to color each cell #' @param expression_curve_linetype_by The cell attribute (e.g. the column of pData(cds)) to be used for the linetype of each branch curve #' @param trend_formula The model formula to be used for fitting the expression trend over pseudotime #' @param reducedModelFormulaStr A formula specifying a null model. If used, the plot shows a p value from the likelihood ratio test that uses trend_formula as the full model #' @param label_by_short_name Whether to label figure panels by gene_short_name (TRUE) or feature id (FALSE) #' @param relative_expr Whether or not the plot should use relative expression values (only relevant for CellDataSets using transcript counts) #' @param ... Additional arguments passed on to branchTest. Only used when reducedModelFormulaStr is not NULL. #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom plyr ddply #' @importFrom reshape2 melt #' @importFrom BiocGenerics sizeFactors #' @export plot_genes_branched_pseudotime <- function (cds, branch_states = NULL, branch_point=1, branch_labels = NULL, method = "fitting", min_expr = NULL, cell_size = 0.75, nrow = NULL, ncol = 1, panel_order = NULL, color_by = "State", expression_curve_linetype_by = "Branch", trend_formula = "~ sm.ns(Pseudotime, df=3) * Branch", reducedModelFormulaStr = NULL, label_by_short_name = TRUE, relative_expr = TRUE, #gene_pairs = NULL, ...) { Branch <- NA if (is.null(reducedModelFormulaStr) == FALSE) { pval_df <- branchTest(cds, branch_states=branch_states, branch_point=branch_point, fullModelFormulaStr = trend_formula, reducedModelFormulaStr = "~ sm.ns(Pseudotime, df=3)", ...) fData(cds)[, "pval"] <- pval_df[row.names(cds), 'pval'] } if("Branch" %in% all.vars(terms(as.formula(trend_formula)))) { #only when Branch is in the model formula we will duplicate the "progenitor" cells cds_subset <- buildBranchCellDataSet(cds = cds, branch_states = branch_states, branch_point=branch_point, branch_labels = branch_labels, progenitor_method = 'duplicate', ...) } else { cds_subset <- cds pData(cds_subset)$Branch <- pData(cds_subset)$State } if (cds_subset@expressionFamily@vfamily %in% c("negbinomial", "negbinomial.size")) { integer_expression <- TRUE } else { integer_expression <- FALSE } if (integer_expression) { CM <- exprs(cds_subset) if (relative_expr){ if (is.null(sizeFactors(cds_subset))) { stop("Error: to call this function with relative_expr=TRUE, you must call estimateSizeFactors() first") } CM <- Matrix::t(Matrix::t(CM)/sizeFactors(cds_subset)) } cds_exprs <- reshape2::melt(round(as.matrix(CM))) } else { cds_exprs <- reshape2::melt(exprs(cds_subset)) } if (is.null(min_expr)) { min_expr <- cds_subset@lowerDetectionLimit } colnames(cds_exprs) <- c("f_id", "Cell", "expression") cds_pData <- pData(cds_subset) cds_fData <- fData(cds_subset) cds_exprs <- merge(cds_exprs, cds_fData, by.x = "f_id", by.y = "row.names") cds_exprs <- merge(cds_exprs, cds_pData, by.x = "Cell", by.y = "row.names") if (integer_expression) { cds_exprs$adjusted_expression <- round(cds_exprs$expression) } else { cds_exprs$adjusted_expression <- log10(cds_exprs$expression) } if (label_by_short_name == TRUE) { if (is.null(cds_exprs$gene_short_name) == FALSE) { cds_exprs$feature_label <- as.character(cds_exprs$gene_short_name) cds_exprs$feature_label[is.na(cds_exprs$feature_label)] <- cds_exprs$f_id } else { cds_exprs$feature_label <- cds_exprs$f_id } } else { cds_exprs$feature_label <- cds_exprs$f_id } cds_exprs$feature_label <- as.factor(cds_exprs$feature_label) # trend_formula <- paste("adjusted_expression", trend_formula, # sep = "") cds_exprs$Branch <- as.factor(cds_exprs$Branch) new_data <- data.frame(Pseudotime = pData(cds_subset)$Pseudotime, Branch = pData(cds_subset)$Branch) full_model_expectation <- genSmoothCurves(cds_subset, cores=1, trend_formula = trend_formula, relative_expr = T, new_data = new_data) colnames(full_model_expectation) <- colnames(cds_subset) cds_exprs$full_model_expectation <- apply(cds_exprs,1, function(x) full_model_expectation[x[2], x[1]]) if(!is.null(reducedModelFormulaStr)){ reduced_model_expectation <- genSmoothCurves(cds_subset, cores=1, trend_formula = reducedModelFormulaStr, relative_expr = T, new_data = new_data) colnames(reduced_model_expectation) <- colnames(cds_subset) cds_exprs$reduced_model_expectation <- apply(cds_exprs,1, function(x) reduced_model_expectation[x[2], x[1]]) } # FIXME: If you want to show the bifurcation time for each gene, this function # should just compute it. Passing it in as a dataframe is just too complicated # and will be hard on the user. # if(!is.null(bifurcation_time)){ # cds_exprs$bifurcation_time <- bifurcation_time[as.vector(cds_exprs$gene_short_name)] # } if (method == "loess") cds_exprs$expression <- cds_exprs$expression + cds@lowerDetectionLimit if (label_by_short_name == TRUE) { if (is.null(cds_exprs$gene_short_name) == FALSE) { cds_exprs$feature_label <- as.character(cds_exprs$gene_short_name) cds_exprs$feature_label[is.na(cds_exprs$feature_label)] <- cds_exprs$f_id } else { cds_exprs$feature_label <- cds_exprs$f_id } } else { cds_exprs$feature_label <- cds_exprs$f_id } cds_exprs$feature_label <- factor(cds_exprs$feature_label) if (is.null(panel_order) == FALSE) { cds_exprs$feature_label <- factor(cds_exprs$feature_label, levels = panel_order) } cds_exprs$expression[is.na(cds_exprs$expression)] <- min_expr cds_exprs$expression[cds_exprs$expression < min_expr] <- min_expr cds_exprs$full_model_expectation[is.na(cds_exprs$full_model_expectation)] <- min_expr cds_exprs$full_model_expectation[cds_exprs$full_model_expectation < min_expr] <- min_expr if(!is.null(reducedModelFormulaStr)){ cds_exprs$reduced_model_expectation[is.na(cds_exprs$reduced_model_expectation)] <- min_expr cds_exprs$reduced_model_expectation[cds_exprs$reduced_model_expectation < min_expr] <- min_expr } cds_exprs$State <- as.factor(cds_exprs$State) cds_exprs$Branch <- as.factor(cds_exprs$Branch) q <- ggplot(aes(Pseudotime, expression), data = cds_exprs) # if (!is.null(bifurcation_time)) { # q <- q + geom_vline(aes(xintercept = bifurcation_time), # color = "black", linetype = "longdash") # } if (is.null(color_by) == FALSE) { q <- q + geom_point(aes_string(color = color_by), size = I(cell_size)) } if (is.null(reducedModelFormulaStr) == FALSE) q <- q + scale_y_log10() + facet_wrap(~feature_label + pval, nrow = nrow, ncol = ncol, scales = "free_y") else q <- q + scale_y_log10() + facet_wrap(~feature_label, nrow = nrow, ncol = ncol, scales = "free_y") if (method == "loess") q <- q + stat_smooth(aes(fill = Branch, color = Branch), method = "loess") else if (method == "fitting") { q <- q + geom_line(aes_string(x = "Pseudotime", y = "full_model_expectation", linetype = "Branch"), data = cds_exprs) #+ scale_color_manual(name = "Type", values = c(colour_cell, colour), labels = c("Pre-branch", "AT1", "AT2", "AT1", "AT2") } if(!is.null(reducedModelFormulaStr)) { q <- q + geom_line(aes_string(x = "Pseudotime", y = "reduced_model_expectation"), color = 'black', linetype = 2, data = cds_exprs) } q <- q + ylab("Expression") + xlab("Pseudotime (stretched)") q <- q + monocle_theme_opts() q + expand_limits(y = min_expr) } #' Not sure we're ready to release this one quite yet: #' Plot the branch genes in pseduotime with separate branch curves #' @param cds CellDataSet for the experiment #' @param rowgenes Gene ids or short names to be arrayed on the vertical axis. #' @param colgenes Gene ids or short names to be arrayed on the horizontal axis #' @param relative_expr Whether to transform expression into relative values #' @param min_expr The minimum level of expression to show in the plot #' @param cell_size A number how large the cells should be in the plot #' @param label_by_short_name a boolean that indicates whether cells should be labeled by their short name #' @param show_density a boolean that indicates whether a 2D density estimation should be shown in the plot #' @param round_expr a boolean that indicates whether cds_expr values should be rounded or not #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom reshape2 melt plot_coexpression_matrix <- function(cds, rowgenes, colgenes, relative_expr=TRUE, min_expr=NULL, cell_size=0.85, label_by_short_name=TRUE, show_density=TRUE, round_expr=FALSE){ gene_short_name <- NA f_id <- NA adjusted_expression.x <- NULL adjusted_expression.y <- NULL ..density.. <- NULL row_gene_ids <- row.names(subset(fData(cds), gene_short_name %in% rowgenes)) row_gene_ids <- union(row_gene_ids, intersect(rowgenes, row.names(fData(cds)))) col_gene_ids <- row.names(subset(fData(cds), gene_short_name %in% colgenes)) col_gene_ids <- union(col_gene_ids, intersect(colgenes, row.names(fData(cds)))) cds_subset <- cds[union(row_gene_ids, col_gene_ids),] if (cds_subset@expressionFamily@vfamily %in% c("negbinomial", "negbinomial.size")){ integer_expression <- TRUE }else{ integer_expression <- FALSE relative_expr <- TRUE } if (integer_expression) { cds_exprs <- exprs(cds_subset) if (relative_expr){ if (is.null(sizeFactors(cds_subset))) { stop("Error: to call this function with relative_expr=TRUE, you must call estimateSizeFactors() first") } cds_exprs <- Matrix::t(Matrix::t(cds_exprs) / sizeFactors(cds_subset)) } if (round_expr){ cds_exprs <- reshape2::melt(round(as.matrix(cds_exprs))) } else { cds_exprs <- reshape2::melt(as.matrix(cds_exprs)) } }else{ cds_exprs <- reshape2::melt(exprs(cds_subset)) } if (is.null(min_expr)){ min_expr <- cds_subset@lowerDetectionLimit } colnames(cds_exprs) <- c("f_id", "Cell", "expression") cds_exprs$expression[cds_exprs$expression < min_expr] <- min_expr cds_pData <- pData(cds_subset) cds_fData <- fData(cds_subset) cds_exprs <- merge(cds_exprs, cds_fData, by.x="f_id", by.y="row.names") cds_exprs$adjusted_expression <- cds_exprs$expression #cds_exprs$adjusted_expression <- log10(cds_exprs$adjusted_expression + abs(rnorm(nrow(cds_exprs), min_expr, sqrt(min_expr)))) if (label_by_short_name == TRUE){ if (is.null(cds_exprs$gene_short_name) == FALSE){ cds_exprs$feature_label <- as.character(cds_exprs$gene_short_name) cds_exprs$feature_label[is.na(cds_exprs$feature_label)] <- cds_exprs$f_id }else{ cds_exprs$feature_label <- cds_exprs$f_id } }else{ cds_exprs$feature_label <- cds_exprs$f_id } cds_exprs$feature_label <- factor(cds_exprs$feature_label) row_cds_exprs <- subset(cds_exprs, f_id %in% row_gene_ids) col_cds_exprs <- subset(cds_exprs, f_id %in% col_gene_ids) joined_exprs <- merge(row_cds_exprs, col_cds_exprs, by="Cell") cds_exprs <- joined_exprs cds_exprs <- merge(cds_exprs, cds_pData, by.x="Cell", by.y="row.names") cds_exprs <- subset(cds_exprs, adjusted_expression.x > min_expr | adjusted_expression.y > min_expr) q <- ggplot(aes(adjusted_expression.x, adjusted_expression.y), data=cds_exprs, size=I(1)) if (show_density){ q <- q + stat_density2d(geom="raster", aes(fill = ..density..), contour = FALSE) + scale_fill_gradient(low="white", high="red") } q <- q + scale_x_log10() + scale_y_log10() + geom_point(color=I("black"), size=I(cell_size * 1.50)) + geom_point(color=I("white"), size=I(cell_size)) + facet_grid(feature_label.x ~ feature_label.y, scales="free") #scale_color_brewer(palette="Set1") + if (min_expr < 1) { q <- q + expand_limits(y=c(min_expr, 1), x=c(min_expr, 1)) } #q <- q + monocle_theme_opts() q } #The following code is swipped from colorRamps package which is used to make the pallette table.ramp <- function(n, mid = 0.5, sill = 0.5, base = 1, height = 1) { x <- seq(0, 1, length.out = n) y <- rep(0, length(x)) sill.min <- max(c(1, round((n - 1) * (mid - sill / 2)) + 1)) sill.max <- min(c(n, round((n - 1) * (mid + sill / 2)) + 1)) y[sill.min:sill.max] <- 1 base.min <- round((n - 1) * (mid - base / 2)) + 1 base.max <- round((n - 1) * (mid + base / 2)) + 1 xi <- base.min:sill.min yi <- seq(0, 1, length.out = length(xi)) i <- which(xi > 0 & xi <= n) y[xi[i]] <- yi[i] xi <- sill.max:base.max yi <- seq(1, 0, length.out = length(xi)) i <- which(xi > 0 & xi <= n) y[xi[i]] <- yi[i] height * y } #' @importFrom grDevices rgb rgb.tables <- function(n, red = c(0.75, 0.25, 1), green = c(0.5, 0.25, 1), blue = c(0.25, 0.25, 1)) { rr <- do.call("table.ramp", as.list(c(n, red))) gr <- do.call("table.ramp", as.list(c(n, green))) br <- do.call("table.ramp", as.list(c(n, blue))) rgb(rr, gr, br) } matlab.like <- function(n) rgb.tables(n) matlab.like2 <- function(n) rgb.tables(n, red = c(0.8, 0.2, 1), green = c(0.5, 0.4, 0.8), blue = c(0.2, 0.2, 1)) blue2green2red <- matlab.like2 #' Create a heatmap to demonstrate the bifurcation of gene expression along two branchs #' #' @description returns a heatmap that shows changes in both lineages at the same time. #' It also requires that you choose a branch point to inspect. #' Columns are points in pseudotime, rows are genes, and the beginning of pseudotime is in the middle of the heatmap. #' As you read from the middle of the heatmap to the right, you are following one lineage through pseudotime. As you read left, the other. #' The genes are clustered hierarchically, so you can visualize modules of genes that have similar lineage-dependent expression patterns. #' #' @param cds_subset CellDataSet for the experiment (normally only the branching genes detected with branchTest) #' @param branch_point The ID of the branch point to visualize. Can only be used when reduceDimension is called with method = "DDRTree". #' @param branch_states The two states to compare in the heatmap. Mutually exclusive with branch_point. #' @param branch_labels The labels for the branchs. #' @param cluster_rows Whether to cluster the rows of the heatmap. #' @param hclust_method The method used by pheatmap to perform hirearchical clustering of the rows. #' @param num_clusters Number of clusters for the heatmap of branch genes #' @param hmcols The color scheme for drawing the heatmap. #' @param branch_colors The colors used in the annotation strip indicating the pre- and post-branch cells. #' @param add_annotation_row Additional annotations to show for each row in the heatmap. Must be a dataframe with one row for each row in the fData table of cds_subset, with matching IDs. #' @param add_annotation_col Additional annotations to show for each column in the heatmap. Must be a dataframe with one row for each cell in the pData table of cds_subset, with matching IDs. #' @param show_rownames Whether to show the names for each row in the table. #' @param use_gene_short_name Whether to use the short names for each row. If FALSE, uses row IDs from the fData table. #' @param scale_max The maximum value (in standard deviations) to show in the heatmap. Values larger than this are set to the max. #' @param scale_min The minimum value (in standard deviations) to show in the heatmap. Values smaller than this are set to the min. #' @param norm_method Determines how to transform expression values prior to rendering #' @param trend_formula A formula string specifying the model used in fitting the spline curve for each gene/feature. #' @param return_heatmap Whether to return the pheatmap object to the user. #' @param cores Number of cores to use when smoothing the expression curves shown in the heatmap. #' @param ... Additional arguments passed to buildBranchCellDataSet #' @return A list of heatmap_matrix (expression matrix for the branch committment), ph (pheatmap heatmap object), #' annotation_row (annotation data.frame for the row), annotation_col (annotation data.frame for the column). #' @import pheatmap #' @importFrom stats sd as.dist cor cutree #' @export #' plot_genes_branched_heatmap <- function(cds_subset, branch_point=1, branch_states=NULL, branch_labels = c("Cell fate 1", "Cell fate 2"), cluster_rows = TRUE, hclust_method = "ward.D2", num_clusters = 6, hmcols = NULL, branch_colors = c('#979797', '#F05662', '#7990C8'), add_annotation_row = NULL, add_annotation_col = NULL, show_rownames = FALSE, use_gene_short_name = TRUE, scale_max=3, scale_min=-3, norm_method = c("log", "vstExprs"), trend_formula = '~sm.ns(Pseudotime, df=3) * Branch', return_heatmap=FALSE, cores = 1, ...) { cds <- NA new_cds <- buildBranchCellDataSet(cds_subset, branch_states=branch_states, branch_point=branch_point, progenitor_method = 'duplicate', ...) new_cds@dispFitInfo <- cds_subset@dispFitInfo if(is.null(branch_states)) { progenitor_state <- subset(pData(cds_subset), Pseudotime == 0)[, 'State'] branch_states <- setdiff(pData(cds_subset)$State, progenitor_state) } col_gap_ind <- 101 # newdataA <- data.frame(Pseudotime = seq(0, 100, length.out = 100)) # newdataB <- data.frame(Pseudotime = seq(0, 100, length.out = 100)) newdataA <- data.frame(Pseudotime = seq(0, 100, length.out = 100), Branch = as.factor(unique(as.character(pData(new_cds)$Branch))[1])) newdataB <- data.frame(Pseudotime = seq(0, 100, length.out = 100), Branch = as.factor(unique(as.character(pData(new_cds)$Branch))[2])) BranchAB_exprs <- genSmoothCurves(new_cds[, ], cores=cores, trend_formula = trend_formula, relative_expr = T, new_data = rbind(newdataA, newdataB)) BranchA_exprs <- BranchAB_exprs[, 1:100] BranchB_exprs <- BranchAB_exprs[, 101:200] #common_ancestor_cells <- row.names(pData(new_cds)[duplicated(pData(new_cds)$original_cell_id),]) common_ancestor_cells <- row.names(pData(new_cds)[pData(new_cds)$State == setdiff(pData(new_cds)$State, branch_states),]) BranchP_num <- (100 - floor(max(pData(new_cds)[common_ancestor_cells, 'Pseudotime']))) BranchA_num <- floor(max(pData(new_cds)[common_ancestor_cells, 'Pseudotime'])) BranchB_num <- BranchA_num norm_method <- match.arg(norm_method) # FIXME: this needs to check that vst values can even be computed. (They can only be if we're using NB as the expressionFamily) if(norm_method == 'vstExprs') { BranchA_exprs <- vstExprs(new_cds, expr_matrix=BranchA_exprs) BranchB_exprs <- vstExprs(new_cds, expr_matrix=BranchB_exprs) } else if(norm_method == 'log') { BranchA_exprs <- log10(BranchA_exprs + 1) BranchB_exprs <- log10(BranchB_exprs + 1) } heatmap_matrix <- cBind(BranchA_exprs[, (col_gap_ind - 1):1], BranchB_exprs) heatmap_matrix=heatmap_matrix[!apply(heatmap_matrix, 1, sd)==0,] heatmap_matrix=Matrix::t(scale(Matrix::t(heatmap_matrix),center=TRUE)) heatmap_matrix=heatmap_matrix[is.na(row.names(heatmap_matrix)) == FALSE,] heatmap_matrix[is.nan(heatmap_matrix)] = 0 heatmap_matrix[heatmap_matrix>scale_max] = scale_max heatmap_matrix[heatmap_matrix<scale_min] = scale_min heatmap_matrix_ori <- heatmap_matrix heatmap_matrix <- heatmap_matrix[is.finite(heatmap_matrix[, 1]) & is.finite(heatmap_matrix[, col_gap_ind]), ] #remove the NA fitting failure genes for each branch row_dist <- as.dist((1 - cor(Matrix::t(heatmap_matrix)))/2) row_dist[is.na(row_dist)] <- 1 exp_rng <- range(heatmap_matrix) #bks is based on the expression range bks <- seq(exp_rng[1] - 0.1, exp_rng[2] + 0.1, by=0.1) if(is.null(hmcols)) { hmcols <- blue2green2red(length(bks) - 1) } # prin t(hmcols) ph <- pheatmap(heatmap_matrix, useRaster = T, cluster_cols=FALSE, cluster_rows=TRUE, show_rownames=F, show_colnames=F, #scale="row", clustering_distance_rows=row_dist, clustering_method = hclust_method, cutree_rows=num_clusters, silent=TRUE, filename=NA, breaks=bks, color=hmcols #color=hmcols#, # filename="expression_pseudotime_pheatmap.pdf", ) #save(heatmap_matrix, row_dist, num_clusters, hmcols, ph, branchTest_df, qval_lowest_thrsd, branch_labels, BranchA_num, BranchP_num, BranchB_num, file = 'heatmap_matrix') annotation_row <- data.frame(Cluster=factor(cutree(ph$tree_row, num_clusters))) if(!is.null(add_annotation_row)) { annotation_row <- cbind(annotation_row, add_annotation_row[row.names(annotation_row), ]) # annotation_row$bif_time <- add_annotation_row[as.character(fData(absolute_cds[row.names(annotation_row), ])$gene_short_name), 1] } colnames(heatmap_matrix) <- c(1:ncol(heatmap_matrix)) annotation_col <- data.frame(row.names = c(1:ncol(heatmap_matrix)), "Cell Type" = c(rep(branch_labels[1], BranchA_num), rep("Pre-branch", 2 * BranchP_num), rep(branch_labels[2], BranchB_num))) colnames(annotation_col) <- "Cell Type" if(!is.null(add_annotation_col)) { annotation_col <- cbind(annotation_col, add_annotation_col[fData(cds[row.names(annotation_col), ])$gene_short_name, 1]) } names(branch_colors) <- c("Pre-branch", branch_labels[1], branch_labels[2]) annotation_colors=list("Cell Type"=branch_colors) names(annotation_colors$`Cell Type`) = c('Pre-branch', branch_labels) if (use_gene_short_name == TRUE) { if (is.null(fData(cds_subset)$gene_short_name) == FALSE) { feature_label <- as.character(fData(cds_subset)[row.names(heatmap_matrix), 'gene_short_name']) feature_label[is.na(feature_label)] <- row.names(heatmap_matrix) row_ann_labels <- as.character(fData(cds_subset)[row.names(annotation_row), 'gene_short_name']) row_ann_labels[is.na(row_ann_labels)] <- row.names(annotation_row) } else { feature_label <- row.names(heatmap_matrix) row_ann_labels <- row.names(annotation_row) } } else { feature_label <- row.names(heatmap_matrix) row_ann_labels <- row.names(annotation_row) } row.names(heatmap_matrix) <- feature_label row.names(annotation_row) <- row_ann_labels ph_res <- pheatmap(heatmap_matrix[, ], #ph$tree_row$order useRaster = T, cluster_cols=FALSE, cluster_rows=TRUE, show_rownames=show_rownames, show_colnames=F, #scale="row", clustering_distance_rows=row_dist, #row_dist clustering_method = hclust_method, #ward.D2 cutree_rows=num_clusters, # cutree_cols = 2, annotation_row=annotation_row, annotation_col=annotation_col, annotation_colors=annotation_colors, gaps_col = col_gap_ind, treeheight_row = 20, breaks=bks, fontsize = 6, color=hmcols, border_color = NA, silent=TRUE) grid::grid.rect(gp=grid::gpar("fill", col=NA)) grid::grid.draw(ph_res$gtable) if (return_heatmap){ return(list(BranchA_exprs = BranchA_exprs, BranchB_exprs = BranchB_exprs, heatmap_matrix = heatmap_matrix, heatmap_matrix_ori = heatmap_matrix_ori, ph = ph, col_gap_ind = col_gap_ind, row_dist = row_dist, hmcols = hmcols, annotation_colors = annotation_colors, annotation_row = annotation_row, annotation_col = annotation_col, ph_res = ph_res)) } } #' Plots genes by mean vs. dispersion, highlighting those selected for ordering #' #' Each gray point in the plot is a gene. The black dots are those that were included #' in the last call to setOrderingFilter. The red curve shows the mean-variance #' model learning by estimateDispersions(). #' #' @param cds The CellDataSet to be used for the plot. #' @export plot_ordering_genes <- function(cds){ if(class(cds)[1] != "CellDataSet") { stop("Error input object is not of type 'CellDataSet'") } disp_table <- dispersionTable(cds) use_for_ordering <- NA mean_expression <- NA dispersion_empirical <- NA dispersion_fit <- NA gene_id <- NA ordering_genes <- row.names(subset(fData(cds), use_for_ordering == TRUE)) g <- qplot(mean_expression, dispersion_empirical, data=disp_table, log="xy", color=I("darkgrey")) + geom_line(aes(y=dispersion_fit), color="red") if (length(ordering_genes) > 0){ g <- g + geom_point(aes(mean_expression, dispersion_empirical), data=subset(disp_table, gene_id %in% ordering_genes), color="black") } g <- g + monocle_theme_opts() g } #' Plots clusters of cells . #' #' @param cds CellDataSet for the experiment #' @param x the column of reducedDimS(cds) to plot on the horizontal axis #' @param y the column of reducedDimS(cds) to plot on the vertical axis #' @param color_by the cell attribute (e.g. the column of pData(cds)) to map to each cell's color #' @param markers a gene name or gene id to use for setting the size of each cell in the plot #' @param show_cell_names draw the name of each cell in the plot #' @param cell_size The size of the point for each cell #' @param cell_name_size the size of cell name labels #' @param ... additional arguments passed into the scale_color_viridis function #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom reshape2 melt #' @importFrom viridis scale_color_viridis #' @export #' @examples #' \dontrun{ #' library(HSMMSingleCell) #' HSMM <- load_HSMM() #' HSMM <- reduceD #' plot_cell_clusters(HSMM) #' plot_cell_clusters(HSMM, color_by="Pseudotime") #' plot_cell_clusters(HSMM, markers="MYH3") #' } plot_cell_clusters <- function(cds, x=1, y=2, color_by="Cluster", markers=NULL, show_cell_names=FALSE, cell_size=1.5, cell_name_size=2, ...){ if (is.null(cds@reducedDimA) | length(pData(cds)$Cluster) == 0){ stop("Error: Clustering is not performed yet. Please call clusterCells() before calling this function.") } gene_short_name <- NULL sample_name <- NULL data_dim_1 <- NULL data_dim_2 <- NULL #TODO: need to validate cds as ready for this plot (need mst, pseudotime, etc) lib_info <- pData(cds) tSNE_dim_coords <- reducedDimA(cds) data_df <- data.frame(t(tSNE_dim_coords[c(x,y),])) colnames(data_df) <- c("data_dim_1", "data_dim_2") data_df$sample_name <- colnames(cds) data_df <- merge(data_df, lib_info, by.x="sample_name", by.y="row.names") markers_exprs <- NULL if (is.null(markers) == FALSE){ markers_fData <- subset(fData(cds), gene_short_name %in% markers) if (nrow(markers_fData) >= 1){ markers_exprs <- reshape2::melt(as.matrix(exprs(cds[row.names(markers_fData),]))) colnames(markers_exprs)[1:2] <- c('feature_id','cell_id') markers_exprs <- merge(markers_exprs, markers_fData, by.x = "feature_id", by.y="row.names") #print (head( markers_exprs[is.na(markers_exprs$gene_short_name) == FALSE,])) markers_exprs$feature_label <- as.character(markers_exprs$gene_short_name) markers_exprs$feature_label[is.na(markers_exprs$feature_label)] <- markers_exprs$Var1 } } if (is.null(markers_exprs) == FALSE && nrow(markers_exprs) > 0){ data_df <- merge(data_df, markers_exprs, by.x="sample_name", by.y="cell_id") g <- ggplot(data=data_df, aes(x=data_dim_1, y=data_dim_2)) + facet_wrap(~feature_label) }else{ g <- ggplot(data=data_df, aes(x=data_dim_1, y=data_dim_2)) } # FIXME: setting size here overrides the marker expression funtionality. # Don't do it! if (is.null(markers_exprs) == FALSE && nrow(markers_exprs) > 0){ g <- g + geom_point(aes(color=log10(value + 0.1)), size=I(cell_size), na.rm = TRUE) + scale_color_viridis(name = paste0("log10(value + 0.1)"), ...) }else { g <- g + geom_point(aes_string(color = color_by), size=I(cell_size), na.rm = TRUE) } g <- g + #scale_color_brewer(palette="Set1") + monocle_theme_opts() + xlab(paste("Component", x)) + ylab(paste("Component", y)) + theme(legend.position="top", legend.key.height=grid::unit(0.35, "in")) + #guides(color = guide_legend(label.position = "top")) + theme(legend.key = element_blank()) + theme(panel.background = element_rect(fill='white')) + theme(text = element_text(size = 15)) g } #' Plots the decision map of density clusters . #' #' @param cds CellDataSet for the experiment after running clusterCells_Density_Peak #' @param rho_threshold The threshold of local density (rho) used to select the density peaks for plotting #' @param delta_threshold The threshold of local distance (delta) used to select the density peaks for plotting #' @export #' @examples #' \dontrun{ #' library(HSMMSingleCell) #' HSMM <- load_HSMM() #' plot_rho_delta(HSMM) #' } plot_rho_delta <- function(cds, rho_threshold = NULL, delta_threshold = NULL){ if(!is.null(cds@auxClusteringData[["tSNE"]]$densityPeak) & !is.null(pData(cds)$Cluster) & !is.null(pData(cds)$peaks) & !is.null(pData(cds)$halo) & !is.null(pData(cds)$delta) & !is.null(pData(cds)$rho)) { rho <- NULL delta <- NULL # df <- data.frame(rho = as.numeric(levels(pData(cds)$rho))[pData(cds)$rho], # delta = as.numeric(levels(pData(cds)$delta))[pData(cds)$delta]) if(!is.null(rho_threshold) & !is.null(delta_threshold)){ peaks <- pData(cds)$rho >= rho_threshold & pData(cds)$delta >= delta_threshold } else peaks <- pData(cds)$peaks df <- data.frame(rho = pData(cds)$rho, delta = pData(cds)$delta, peaks = peaks) g <- qplot(rho, delta, data = df, alpha = I(0.5), color = peaks) + monocle_theme_opts() + theme(legend.position="top", legend.key.height=grid::unit(0.35, "in")) + scale_color_manual(values=c("grey","black")) + theme(legend.key = element_blank()) + theme(panel.background = element_rect(fill='white')) } else { stop('Please run clusterCells_Density_Peak before using this plotting function') } g } #' Plots the percentage of variance explained by the each component based on PCA from the normalized expression #' data using the same procedure used in reduceDimension function. #' #' @param cds CellDataSet for the experiment after running reduceDimension with reduction_method as tSNE #' @param max_components Maximum number of components shown in the scree plot (variance explained by each component) #' @param norm_method Determines how to transform expression values prior to reducing dimensionality #' @param residualModelFormulaStr A model formula specifying the effects to subtract from the data before clustering. #' @param pseudo_expr amount to increase expression values before dimensionality reduction #' @param return_all A logical argument to determine whether or not the variance of each component is returned #' @param use_existing_pc_variance Whether to plot existing results for variance explained by each PC #' @param verbose Whether to emit verbose output during dimensionality reduction #' @param ... additional arguments to pass to the dimensionality reduction function #' @export #' @examples #' \dontrun{ #' library(HSMMSingleCell) #' HSMM <- load_HSMM() #' plot_pc_variance_explained(HSMM) #' } plot_pc_variance_explained <- function(cds, max_components=100, # reduction_method=c("DDRTree", "ICA", 'tSNE'), norm_method = c("log", "vstExprs", "none"), residualModelFormulaStr=NULL, pseudo_expr=NULL, return_all = F, use_existing_pc_variance=FALSE, verbose=FALSE, ...){ set.seed(2016) if(!is.null(cds@auxClusteringData[["tSNE"]]$variance_explained) & use_existing_pc_variance == T){ prop_varex <- cds@auxClusteringData[["tSNE"]]$variance_explained } else{ FM <- normalize_expr_data(cds, norm_method, pseudo_expr) #FM <- FM[unlist(sparseApply(FM, 1, sd, convert_to_dense=TRUE)) > 0, ] xm <- Matrix::rowMeans(FM) xsd <- sqrt(Matrix::rowMeans((FM - xm)^2)) FM <- FM[xsd > 0,] if (is.null(residualModelFormulaStr) == FALSE) { if (verbose) message("Removing batch effects") X.model_mat <- sparse.model.matrix(as.formula(residualModelFormulaStr), data = pData(cds), drop.unused.levels = TRUE) fit <- limma::lmFit(FM, X.model_mat, ...) beta <- fit$coefficients[, -1, drop = FALSE] beta[is.na(beta)] <- 0 FM <- as.matrix(FM) - beta %*% t(X.model_mat[, -1]) }else{ X.model_mat <- NULL } if (nrow(FM) == 0) { stop("Error: all rows have standard deviation zero") } # FM <- convert2DRData(cds, norm_method = 'log') # FM <- FM[rowSums(is.na(FM)) == 0, ] irlba_res <- prcomp_irlba(t(FM), n = min(max_components, min(dim(FM)) - 1), center = TRUE, scale. = TRUE) prop_varex <- irlba_res$sdev^2 / sum(irlba_res$sdev^2) # # cell_means <- Matrix::rowMeans(FM_t) # cell_vars <- Matrix::rowMeans((FM_t - cell_means)^2) # # irlba_res <- irlba(FM, # nv= min(max_components, min(dim(FM)) - 1), #avoid calculating components in the tail # nu=0, # center=cell_means, # scale=sqrt(cell_vars), # right_only=TRUE) # prop_varex <- irlba_res$d / sum(irlba_res$d) # # # pca_res <- prcomp(t(FM), center = T, scale = T) # # std_dev <- pca_res$sdev # # pr_var <- std_dev^2 # prop_varex <- pr_var/sum(pr_var) } p <- qplot(1:length(prop_varex), prop_varex, alpha = I(0.5)) + monocle_theme_opts() + theme(legend.position="top", legend.key.height=grid::unit(0.35, "in")) + theme(panel.background = element_rect(fill='white')) + xlab('components') + ylab('Variance explained \n by each component') cds@auxClusteringData[["tSNE"]]$variance_explained <- prop_varex # update CDS slot for variance_explained if(return_all) { return(list(variance_explained = prop_varex, p = p)) } else return(p) } #' @importFrom igraph shortest_paths degree shortest.paths traverseTree <- function(g, starting_cell, end_cells){ distance <- shortest.paths(g, v=starting_cell, to=end_cells) branchPoints <- which(degree(g) == 3) path <- shortest_paths(g, from = starting_cell, end_cells) return(list(shortest_path = path$vpath, distance = distance, branch_points = intersect(branchPoints, unlist(path$vpath)))) } #' Plots the minimum spanning tree on cells. #' @description Plots the minimum spanning tree on cells. #' @param cds CellDataSet for the experiment #' @param x the column of reducedDimS(cds) to plot on the horizontal axis #' @param y the column of reducedDimS(cds) to plot on the vertical axis #' @param root_states the state used to set as the root of the graph #' @param color_by the cell attribute (e.g. the column of pData(cds)) to map to each cell's color #' @param show_tree whether to show the links between cells connected in the minimum spanning tree #' @param show_backbone whether to show the diameter path of the MST used to order the cells #' @param backbone_color the color used to render the backbone. #' @param markers a gene name or gene id to use for setting the size of each cell in the plot #' @param show_cell_names draw the name of each cell in the plot #' @param cell_size The size of the point for each cell #' @param cell_link_size The size of the line segments connecting cells (when used with ICA) or the principal graph (when used with DDRTree) #' @param cell_name_size the size of cell name labels #' @param show_branch_points Whether to show icons for each branch point (only available when reduceDimension was called with DDRTree) #' @param ... Additional arguments passed to the scale_color_viridis function #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom igraph V get.edgelist layout_as_tree #' @importFrom reshape2 melt #' @importFrom viridis scale_color_viridis #' @export #' @examples #' \dontrun{ #' library(HSMMSingleCell) #' HSMM <- load_HSMM() #' plot_complex_cell_trajectory(HSMM) #' plot_complex_cell_trajectory(HSMM, color_by="Pseudotime", show_backbone=FALSE) #' plot_complex_cell_trajectory(HSMM, markers="MYH3") #' } plot_complex_cell_trajectory <- function(cds, x=1, y=2, root_states = NULL, color_by="State", show_tree=TRUE, show_backbone=TRUE, backbone_color="black", markers=NULL, show_cell_names=FALSE, cell_size=1.5, cell_link_size=0.75, cell_name_size=2, show_branch_points=TRUE, ...){ gene_short_name <- NA sample_name <- NA data_dim_1 <- NA data_dim_2 <- NA #TODO: need to validate cds as ready for this plot (need mst, pseudotime, etc) lib_info_with_pseudo <- pData(cds) if (is.null(cds@dim_reduce_type)){ stop("Error: dimensionality not yet reduced. Please call reduceDimension() before calling this function.") } if (cds@dim_reduce_type == "ICA"){ reduced_dim_coords <- reducedDimS(cds) }else if (cds@dim_reduce_type %in% c("SimplePPT", "DDRTree", "SGL-tree") ){ reduced_dim_coords <- reducedDimK(cds) closest_vertex <- cds@auxOrderingData[["DDRTree"]]$pr_graph_cell_proj_closest_vertex }else { stop("Error: unrecognized dimensionality reduction method.") } if (is.null(reduced_dim_coords)){ stop("You must first call reduceDimension() before using this function") } dp_mst <- minSpanningTree(cds) if(is.null(root_states)) { if(is.null(lib_info_with_pseudo$Pseudotime)){ root_cell <- row.names(lib_info_with_pseudo)[degree(dp_mst) == 1][1] } else root_cell <- row.names(subset(lib_info_with_pseudo, Pseudotime == 0)) if(cds@dim_reduce_type != "ICA") root_cell <- V(dp_mst)$name[cds@auxOrderingData$DDRTree$pr_graph_cell_proj_closest_vertex[root_cell, ]] } else { candidate_root_cells <- row.names(subset(pData(cds), State %in% root_states)) if(cds@dim_reduce_type == "ICA") { root_cell <- candidate_root_cells[which(degree(dp_mst, candidate_root_cells) == 1)] } else { Y_candidate_root_cells <- V(dp_mst)$name[cds@auxOrderingData$DDRTree$pr_graph_cell_proj_closest_vertex[candidate_root_cells, ]] root_cell <- Y_candidate_root_cells[which(degree(dp_mst, Y_candidate_root_cells) == 1)] } } # #root_cell <- cds@auxOrderingData[[cds@dim_reduce_type]]$root_cell # root_state <- pData(cds)[root_cell,]$State # #root_state <- V(pr_graph_cell_proj_mst)[root_cell,]$State # pr_graph_root <- subset(pData(cds), State == root_state) # closest_vertex <- cds@auxOrderingData[["DDRTree"]]$pr_graph_cell_proj_closest_vertex # root_cell_point_in_Y <- closest_vertex[row.names(pr_graph_root),] tree_coords <- layout_as_tree(dp_mst, root=root_cell) #ica_space_df <- data.frame(Matrix::t(reduced_dim_coords[c(x,y),])) ica_space_df <- data.frame(tree_coords) row.names(ica_space_df) <- colnames(reduced_dim_coords) colnames(ica_space_df) <- c("prin_graph_dim_1", "prin_graph_dim_2") ica_space_df$sample_name <- row.names(ica_space_df) #ica_space_with_state_df <- merge(ica_space_df, lib_info_with_pseudo, by.x="sample_name", by.y="row.names") #print(ica_space_with_state_df) if (is.null(dp_mst)){ stop("You must first call orderCells() before using this function") } edge_list <- as.data.frame(get.edgelist(dp_mst)) colnames(edge_list) <- c("source", "target") edge_df <- merge(ica_space_df, edge_list, by.x="sample_name", by.y="source", all=TRUE) #edge_df <- ica_space_df edge_df <- plyr::rename(edge_df, c("prin_graph_dim_1"="source_prin_graph_dim_1", "prin_graph_dim_2"="source_prin_graph_dim_2")) edge_df <- merge(edge_df, ica_space_df[,c("sample_name", "prin_graph_dim_1", "prin_graph_dim_2")], by.x="target", by.y="sample_name", all=TRUE) edge_df <- plyr::rename(edge_df, c("prin_graph_dim_1"="target_prin_graph_dim_1", "prin_graph_dim_2"="target_prin_graph_dim_2")) #S_matrix <- reducedDimS(cds) #data_df <- data.frame(t(S_matrix[c(x,y),])) if(cds@dim_reduce_type == "ICA"){ S_matrix <- tree_coords[,] #colnames(cds) } else if(cds@dim_reduce_type %in% c("DDRTree", "SimplePPT", "SGL-tree")){ S_matrix <- tree_coords[closest_vertex,] closest_vertex <- cds@auxOrderingData[["DDRTree"]]$pr_graph_cell_proj_closest_vertex } data_df <- data.frame(S_matrix) row.names(data_df) <- colnames(reducedDimS(cds)) colnames(data_df) <- c("data_dim_1", "data_dim_2") data_df$sample_name <- row.names(data_df) data_df <- merge(data_df, lib_info_with_pseudo, by.x="sample_name", by.y="row.names") markers_exprs <- NULL if (is.null(markers) == FALSE){ markers_fData <- subset(fData(cds), gene_short_name %in% markers) if (nrow(markers_fData) >= 1){ markers_exprs <- reshape2::melt(as.matrix(exprs(cds[row.names(markers_fData),]))) colnames(markers_exprs)[1:2] <- c('feature_id','cell_id') markers_exprs <- merge(markers_exprs, markers_fData, by.x = "feature_id", by.y="row.names") #print (head( markers_exprs[is.na(markers_exprs$gene_short_name) == FALSE,])) markers_exprs$feature_label <- as.character(markers_exprs$gene_short_name) markers_exprs$feature_label[is.na(markers_exprs$feature_label)] <- markers_exprs$Var1 } } if (is.null(markers_exprs) == FALSE && nrow(markers_exprs) > 0){ data_df <- merge(data_df, markers_exprs, by.x="sample_name", by.y="cell_id") #print (head(edge_df)) g <- ggplot(data=data_df, aes(x=data_dim_1, y=data_dim_2, I(cell_size))) + facet_wrap(~feature_label) }else{ g <- ggplot(data=data_df, aes(x=data_dim_1, y=data_dim_2)) } if (show_tree){ g <- g + geom_segment(aes_string(x="source_prin_graph_dim_1", y="source_prin_graph_dim_2", xend="target_prin_graph_dim_1", yend="target_prin_graph_dim_2"), size=cell_link_size, linetype="solid", na.rm=TRUE, data=edge_df) } # FIXME: setting size here overrides the marker expression funtionality. # Don't do it! if (is.null(markers_exprs) == FALSE && nrow(markers_exprs) > 0){ if(class(data_df[, color_by]) == 'numeric') { g <- g + geom_jitter(aes_string(color = paste0("log10(", color_by, " + 0.1)")), size=I(cell_size), na.rm = TRUE, height=5) + scale_color_viridis(name = paste0("log10(", color_by, ")"), ...) } else { g <- g + geom_jitter(aes_string(color = color_by), size=I(cell_size), na.rm = TRUE, height=5) } }else { if(class(data_df[, color_by]) == 'numeric') { g <- g + geom_jitter(aes_string(color = paste0("log10(", color_by, " + 0.1)")), size=I(cell_size), na.rm = TRUE, height=5) + scale_color_viridis(name = paste0("log10(", color_by, " + 0.1)"), ...) } else { g <- g + geom_jitter(aes_string(color = color_by), size=I(cell_size), na.rm = TRUE, height=5) } } if (show_branch_points && cds@dim_reduce_type == 'DDRTree'){ mst_branch_nodes <- cds@auxOrderingData[[cds@dim_reduce_type]]$branch_points branch_point_df <- subset(edge_df, sample_name %in% mst_branch_nodes)[,c("sample_name", "source_prin_graph_dim_1", "source_prin_graph_dim_2")] branch_point_df$branch_point_idx <- match(branch_point_df$sample_name, mst_branch_nodes) branch_point_df <- branch_point_df[!duplicated(branch_point_df$branch_point_idx), ] g <- g + geom_point(aes_string(x="source_prin_graph_dim_1", y="source_prin_graph_dim_2"), size=2 * cell_size, na.rm=TRUE, data=branch_point_df) + geom_text(aes_string(x="source_prin_graph_dim_1", y="source_prin_graph_dim_2", label="branch_point_idx"), size=1.5 * cell_size, color="white", na.rm=TRUE, data=branch_point_df) } if (show_cell_names){ g <- g +geom_text(aes(label=sample_name), size=cell_name_size) } g <- g + #scale_color_brewer(palette="Set1") + theme(strip.background = element_rect(colour = 'white', fill = 'white')) + theme(panel.border = element_blank()) + # theme(axis.line.x = element_line(size=0.25, color="black")) + # theme(axis.line.y = element_line(size=0.25, color="black")) + theme(panel.grid.minor.x = element_blank(), panel.grid.minor.y = element_blank()) + theme(panel.grid.major.x = element_blank(), panel.grid.major.y = element_blank()) + theme(panel.background = element_rect(fill='white')) + theme(legend.key=element_blank()) + xlab('') + ylab('') + theme(legend.position="top", legend.key.height=grid::unit(0.35, "in")) + #guides(color = guide_legend(label.position = "top")) + theme(legend.key = element_blank()) + theme(panel.background = element_rect(fill='white')) + theme(line = element_blank(), axis.text.x = element_blank(), axis.text.y = element_blank(), axis.ticks = element_blank()) g } # Modified function: Plot heatmap of 3 branches with the same coloring. Each CDS subset has to have the same set of genes. #' Create a heatmap to demonstrate the bifurcation of gene expression along multiple branches #' #' @param cds CellDataSet for the experiment (normally only the branching genes detected with BEAM) #' @param branches The terminal branches (states) on the developmental tree you want to investigate. #' @param branches_name Name (for example, cell type) of branches you believe the cells on the branches are associated with. #' @param cluster_rows Whether to cluster the rows of the heatmap. #' @param hclust_method The method used by pheatmap to perform hirearchical clustering of the rows. #' @param num_clusters Number of clusters for the heatmap of branch genes #' @param hmcols The color scheme for drawing the heatmap. #' @param add_annotation_row Additional annotations to show for each row in the heatmap. Must be a dataframe with one row for each row in the fData table of cds_subset, with matching IDs. #' @param add_annotation_col Additional annotations to show for each column in the heatmap. Must be a dataframe with one row for each cell in the pData table of cds_subset, with matching IDs. #' @param show_rownames Whether to show the names for each row in the table. #' @param use_gene_short_name Whether to use the short names for each row. If FALSE, uses row IDs from the fData table. #' @param norm_method Determines how to transform expression values prior to rendering #' @param scale_max The maximum value (in standard deviations) to show in the heatmap. Values larger than this are set to the max. #' @param scale_min The minimum value (in standard deviations) to show in the heatmap. Values smaller than this are set to the min. #' @param trend_formula A formula string specifying the model used in fitting the spline curve for each gene/feature. #' @param return_heatmap Whether to return the pheatmap object to the user. #' @param cores Number of cores to use when smoothing the expression curves shown in the heatmap. #' @return A list of heatmap_matrix (expression matrix for the branch committment), ph (pheatmap heatmap object), #' annotation_row (annotation data.frame for the row), annotation_col (annotation data.frame for the column). #' @import pheatmap #' @export #' plot_multiple_branches_heatmap <- function(cds, branches, branches_name = NULL, cluster_rows = TRUE, hclust_method = "ward.D2", num_clusters = 6, hmcols = NULL, add_annotation_row = NULL, add_annotation_col = NULL, show_rownames = FALSE, use_gene_short_name = TRUE, norm_method = c("vstExprs", "log"), scale_max=3, scale_min=-3, trend_formula = '~sm.ns(Pseudotime, df=3)', return_heatmap=FALSE, cores=1){ pseudocount <- 1 if(!(all(branches %in% pData(cds)$State)) & length(branches) == 1){ stop('This function only allows to make multiple branch plots where branches is included in the pData') } branch_label <- branches if(!is.null(branches_name)){ if(length(branches) != length(branches_name)){ stop('branches_name should have the same length as branches') } branch_label <- branches_name } #test whether or not the states passed to branches are true branches (not truncks) or there are terminal cells g <- cds@minSpanningTree m <- NULL # branche_cell_num <- c() for(branch_in in branches) { branches_cells <- row.names(subset(pData(cds), State == branch_in)) root_state <- subset(pData(cds), Pseudotime == 0)[, 'State'] root_state_cells <- row.names(subset(pData(cds), State == root_state)) if(cds@dim_reduce_type != 'ICA') { root_state_cells <- unique(paste('Y_', cds@auxOrderingData$DDRTree$pr_graph_cell_proj_closest_vertex[root_state_cells, ], sep = '')) branches_cells <- unique(paste('Y_', cds@auxOrderingData$DDRTree$pr_graph_cell_proj_closest_vertex[branches_cells, ], sep = '')) } root_cell <- root_state_cells[which(degree(g, v = root_state_cells) == 1)] tip_cell <- branches_cells[which(degree(g, v = branches_cells) == 1)] traverse_res <- traverseTree(g, root_cell, tip_cell) path_cells <- names(traverse_res$shortest_path[[1]]) if(cds@dim_reduce_type != 'ICA') { pc_ind <- cds@auxOrderingData$DDRTree$pr_graph_cell_proj_closest_vertex path_cells <- row.names(pc_ind)[paste('Y_', pc_ind[, 1], sep = '') %in% path_cells] } cds_subset <- cds[, path_cells] newdata <- data.frame(Pseudotime = seq(0, max(pData(cds_subset)$Pseudotime),length.out = 100)) tmp <- genSmoothCurves(cds_subset, cores=cores, trend_formula = trend_formula, relative_expr = T, new_data = newdata) if(is.null(m)) m <- tmp else m <- cbind(m, tmp) } #remove genes with no expression in any condition m=m[!apply(m,1,sum)==0,] norm_method <- match.arg(norm_method) # FIXME: this needs to check that vst values can even be computed. (They can only be if we're using NB as the expressionFamily) if(norm_method == 'vstExprs' && is.null(cds@dispFitInfo[["blind"]]$disp_func) == FALSE) { m = vstExprs(cds, expr_matrix=m) } else if(norm_method == 'log') { m = log10(m+pseudocount) } # Row-center the data. m=m[!apply(m,1,sd)==0,] m=Matrix::t(scale(Matrix::t(m),center=TRUE)) m=m[is.na(row.names(m)) == FALSE,] m[is.nan(m)] = 0 m[m>scale_max] = scale_max m[m<scale_min] = scale_min heatmap_matrix <- m row_dist <- as.dist((1 - cor(Matrix::t(heatmap_matrix)))/2) row_dist[is.na(row_dist)] <- 1 if(is.null(hmcols)) { bks <- seq(-3.1,3.1, by = 0.1) hmcols <- blue2green2red(length(bks) - 1) } else { bks <- seq(-3.1,3.1, length.out = length(hmcols)) } ph <- pheatmap(heatmap_matrix, useRaster = T, cluster_cols=FALSE, cluster_rows=T, show_rownames=F, show_colnames=F, clustering_distance_rows=row_dist, clustering_method = hclust_method, cutree_rows=num_clusters, silent=TRUE, filename=NA, breaks=bks, color=hmcols) annotation_col <- data.frame(Branch=factor(rep(rep(branch_label, each = 100)))) annotation_row <- data.frame(Cluster=factor(cutree(ph$tree_row, num_clusters))) col_gaps_ind <- c(1:(length(branches) - 1)) * 100 if(!is.null(add_annotation_row)) { old_colnames_length <- ncol(annotation_row) annotation_row <- cbind(annotation_row, add_annotation_row[row.names(annotation_row), ]) colnames(annotation_row)[(old_colnames_length+1):ncol(annotation_row)] <- colnames(add_annotation_row) # annotation_row$bif_time <- add_annotation_row[as.character(fData(absolute_cds[row.names(annotation_row), ])$gene_short_name), 1] } if (use_gene_short_name == TRUE) { if (is.null(fData(cds)$gene_short_name) == FALSE) { feature_label <- as.character(fData(cds)[row.names(heatmap_matrix), 'gene_short_name']) feature_label[is.na(feature_label)] <- row.names(heatmap_matrix) row_ann_labels <- as.character(fData(cds)[row.names(annotation_row), 'gene_short_name']) row_ann_labels[is.na(row_ann_labels)] <- row.names(annotation_row) } else { feature_label <- row.names(heatmap_matrix) row_ann_labels <- row.names(annotation_row) } } else { feature_label <- row.names(heatmap_matrix) row_ann_labels <- row.names(annotation_row) } row.names(heatmap_matrix) <- feature_label row.names(annotation_row) <- row_ann_labels colnames(heatmap_matrix) <- c(1:ncol(heatmap_matrix)) if(!(cluster_rows)) { annotation_row <- NA } ph_res <- pheatmap(heatmap_matrix[, ], #ph$tree_row$order useRaster = T, cluster_cols = FALSE, cluster_rows = cluster_rows, show_rownames=show_rownames, show_colnames=F, #scale="row", clustering_distance_rows=row_dist, #row_dist clustering_method = hclust_method, #ward.D2 cutree_rows=num_clusters, # cutree_cols = 2, annotation_row=annotation_row, annotation_col=annotation_col, gaps_col = col_gaps_ind, treeheight_row = 20, breaks=bks, fontsize = 12, color=hmcols, silent=TRUE, border_color = NA, filename=NA ) grid::grid.rect(gp=grid::gpar("fill", col=NA)) grid::grid.draw(ph_res$gtable) if (return_heatmap){ return(ph_res) } } #' Create a kinetic curves to demonstrate the bifurcation of gene expression along multiple branches #' #' @param cds CellDataSet for the experiment (normally only the branching genes detected with BEAM) #' @param branches The terminal branches (states) on the developmental tree you want to investigate. #' @param branches_name Name (for example, cell type) of branches you believe the cells on the branches are associated with. #' @param min_expr The minimum level of expression to show in the plot #' @param cell_size A number how large the cells should be in the plot #' @param norm_method Determines how to transform expression values prior to rendering #' @param nrow the number of rows used when laying out the panels for each gene's expression #' @param ncol the number of columns used when laying out the panels for each gene's expression #' @param panel_order the order in which genes should be layed out (left-to-right, top-to-bottom) #' @param color_by the cell attribute (e.g. the column of pData(cds)) to be used to color each cell #' @param trend_formula the model formula to be used for fitting the expression trend over pseudotime #' @param label_by_short_name label figure panels by gene_short_name (TRUE) or feature id (FALSE) #' @param TPM Whether to convert the expression value into TPM values. #' @param cores Number of cores to use when smoothing the expression curves shown in the heatmap. #' @return a ggplot2 plot object #' #' @importFrom Biobase esApply #' @importFrom stats lowess #' #' @export #' plot_multiple_branches_pseudotime <- function(cds, branches, branches_name = NULL, min_expr = NULL, cell_size = 0.75, norm_method = c("vstExprs", "log"), nrow = NULL, ncol = 1, panel_order = NULL, color_by = "Branch", trend_formula = '~sm.ns(Pseudotime, df=3)', label_by_short_name = TRUE, TPM = FALSE, cores=1){ if(TPM) { exprs(cds) <- esApply(cds, 2, function(x) x / sum(x) * 1e6) } if(!(all(branches %in% pData(cds)$State)) & length(branches) == 1){ stop('This function only allows to make multiple branch plots where branches is included in the pData') } branch_label <- branches if(!is.null(branches_name)){ if(length(branches) != length(branches_name)){ stop('branches_name should have the same length as branches') } branch_label <- branches_name } #test whether or not the states passed to branches are true branches (not truncks) or there are terminal cells g <- cds@minSpanningTree m <- NULL cds_exprs <- NULL # branche_cell_num <- c() for(branch_in in branches) { branches_cells <- row.names(subset(pData(cds), State == branch_in)) root_state <- subset(pData(cds), Pseudotime == 0)[, 'State'] root_state_cells <- row.names(subset(pData(cds), State == root_state)) if(cds@dim_reduce_type != 'ICA') { root_state_cells <- unique(paste('Y_', cds@auxOrderingData$DDRTree$pr_graph_cell_proj_closest_vertex[root_state_cells, ], sep = '')) branches_cells <- unique(paste('Y_', cds@auxOrderingData$DDRTree$pr_graph_cell_proj_closest_vertex[branches_cells, ], sep = '')) } root_cell <- root_state_cells[which(degree(g, v = root_state_cells) == 1)] tip_cell <- branches_cells[which(degree(g, v = branches_cells) == 1)] traverse_res <- traverseTree(g, root_cell, tip_cell) path_cells <- names(traverse_res$shortest_path[[1]]) if(cds@dim_reduce_type != 'ICA') { pc_ind <- cds@auxOrderingData$DDRTree$pr_graph_cell_proj_closest_vertex path_cells <- row.names(pc_ind)[paste('Y_', pc_ind[, 1], sep = '') %in% path_cells] } #if(is.null(pData(cds)$no_expression)) { cds_subset <- cds[, path_cells] # } else { # cds_subset <- cds[, path_cells %in% colnames(cds)[!pData(cds)$no_expression]] # } newdata <- data.frame(Pseudotime = pData(cds_subset)$Pseudotime, row.names = colnames(cds_subset)) tmp <- t(esApply(cds_subset, 1, function(x) lowess(x[order(pData(cds_subset)$Pseudotime)])$y)) # tmp <- t(esApply(cds_subset, 1, function(x) { # x <- x[order(pData(cds_subset)$Pseudotime)] # c(smooth::sma(x, order = 100, h = 1, silent="all")$fitted)}) #, x[length(x)] # ) colnames(tmp) <- colnames(cds_subset)[order(pData(cds_subset)$Pseudotime)] # tmp <- genSmoothCurves(cds_subset, cores=cores, trend_formula = trend_formula, # relative_expr = T, new_data = newdata) cds_exprs_tmp <- reshape2::melt(log2(tmp + 1)) cds_exprs_tmp <- reshape2::melt(tmp) colnames(cds_exprs_tmp) <- c("f_id", "Cell", "expression") cds_exprs_tmp$Branch <- branch_label[which(branches == branch_in)] if(is.null(cds_exprs)) cds_exprs <- cds_exprs_tmp else cds_exprs <- rbind(cds_exprs, cds_exprs_tmp) if(is.null(m)) m <- tmp else m <- cbind(m, tmp) } #remove genes with no expression in any condition m=m[!apply(m,1,sum)==0,] norm_method <- match.arg(norm_method) # FIXME: this needs to check that vst values can even be computed. (They can only be if we're using NB as the expressionFamily) if(norm_method == 'vstExprs' && is.null(cds@dispFitInfo[["blind"]]$disp_func) == FALSE) { m = vstExprs(cds, expr_matrix=m) } else if(norm_method == 'log') { m = log10(m+pseudocount) } if (is.null(min_expr)) { min_expr <- cds@lowerDetectionLimit } cds_pData <- pData(cds) cds_fData <- fData(cds) cds_exprs <- merge(cds_exprs, cds_fData, by.x = "f_id", by.y = "row.names") cds_exprs <- merge(cds_exprs, cds_pData, by.x = "Cell", by.y = "row.names") cds_exprs <- plyr::ddply(cds_exprs, .(Branch), mutate, Pseudotime = (Pseudotime - min(Pseudotime)) * 100 / (max(Pseudotime) - min(Pseudotime)) ) # if (integer_expression) { # cds_exprs$adjusted_expression <- round(cds_exprs$expression) # } # else { # cds_exprs$adjusted_expression <- log10(cds_exprs$expression) # } if (label_by_short_name == TRUE) { if (is.null(cds_exprs$gene_short_name) == FALSE) { cds_exprs$feature_label <- as.character(cds_exprs$gene_short_name) cds_exprs$feature_label[is.na(cds_exprs$feature_label)] <- cds_exprs$f_id } else { cds_exprs$feature_label <- cds_exprs$f_id } } else { cds_exprs$feature_label <- cds_exprs$f_id } cds_exprs$feature_label <- as.factor(cds_exprs$feature_label) # trend_formula <- paste("adjusted_expression", trend_formula, # sep = "") cds_exprs$Branch <- as.factor(cds_exprs$Branch) # new_data <- data.frame(Pseudotime = pData(cds_subset)$Pseudotime, Branch = pData(cds_subset)$Branch) # full_model_expectation <- genSmoothCurves(cds_subset, cores=1, trend_formula = trend_formula, # relative_expr = T, new_data = new_data) # colnames(full_model_expectation) <- colnames(cds_subset) # cds_exprs$full_model_expectation <- apply(cds_exprs,1, function(x) full_model_expectation[x[2], x[1]]) # if(!is.null(reducedModelFormulaStr)){ # reduced_model_expectation <- genSmoothCurves(cds_subset, cores=1, trend_formula = reducedModelFormulaStr, # relative_expr = T, new_data = new_data) # colnames(reduced_model_expectation) <- colnames(cds_subset) # cds_exprs$reduced_model_expectation <- apply(cds_exprs,1, function(x) reduced_model_expectation[x[2], x[1]]) # } # # FIXME: If you want to show the bifurcation time for each gene, this function # # should just compute it. Passing it in as a dataframe is just too complicated # # and will be hard on the user. # # if(!is.null(bifurcation_time)){ # # cds_exprs$bifurcation_time <- bifurcation_time[as.vector(cds_exprs$gene_short_name)] # # } # if (method == "loess") # cds_exprs$expression <- cds_exprs$expression + cds@lowerDetectionLimit # if (label_by_short_name == TRUE) { # if (is.null(cds_exprs$gene_short_name) == FALSE) { # cds_exprs$feature_label <- as.character(cds_exprs$gene_short_name) # cds_exprs$feature_label[is.na(cds_exprs$feature_label)] <- cds_exprs$f_id # } # else { # cds_exprs$feature_label <- cds_exprs$f_id # } # } # else { # cds_exprs$feature_label <- cds_exprs$f_id # } # cds_exprs$feature_label <- factor(cds_exprs$feature_label) # if (is.null(panel_order) == FALSE) { # cds_exprs$feature_label <- factor(cds_exprs$feature_label, # levels = panel_order) # } # cds_exprs$expression[is.na(cds_exprs$expression)] <- min_expr # cds_exprs$expression[cds_exprs$expression < min_expr] <- min_expr # cds_exprs$full_model_expectation[is.na(cds_exprs$full_model_expectation)] <- min_expr # cds_exprs$full_model_expectation[cds_exprs$full_model_expectation < min_expr] <- min_expr # if(!is.null(reducedModelFormulaStr)){ # cds_exprs$reduced_model_expectation[is.na(cds_exprs$reduced_model_expectation)] <- min_expr # cds_exprs$reduced_model_expectation[cds_exprs$reduced_model_expectation < min_expr] <- min_expr # } cds_exprs$State <- as.factor(cds_exprs$State) cds_exprs$Branch <- as.factor(cds_exprs$Branch) q <- ggplot(aes(Pseudotime, expression), data = cds_exprs) # if (!is.null(bifurcation_time)) { # q <- q + geom_vline(aes(xintercept = bifurcation_time), # color = "black", linetype = "longdash") # } if (is.null(color_by) == FALSE) { q <- q + geom_line(aes_string(color = color_by), size = I(cell_size)) } #if (is.null(reducedModelFormulaStr) == FALSE) q <- q + facet_wrap(~feature_label, nrow = nrow, ncol = ncol, scales = "free_y") #+ scale_y_log10() #else q <- q + scale_y_log10() + facet_wrap(~feature_label, # nrow = nrow, ncol = ncol, scales = "free_y") #if (method == "loess") # q <- q + stat_smooth(aes(fill = Branch, color = Branch), # method = "loess") #else if (method == "fitting") { # q <- q + geom_line(aes_string(x = "Pseudotime", y = "full_model_expectation", # linetype = "Branch"), data = cds_exprs) #+ scale_color_manual(name = "Type", values = c(colour_cell, colour), labels = c("Pre-branch", "AT1", "AT2", "AT1", "AT2") #} #if(!is.null(reducedModelFormulaStr)) { # q <- q + geom_line(aes_string(x = "Pseudotime", y = "reduced_model_expectation"), # color = 'black', linetype = 2, data = cds_exprs) #} q <- q + ylab("Expression") + xlab("Pseudotime (stretched)") q <- q + monocle_theme_opts() q + expand_limits(y = min_expr) }
/R/plotting.R
no_license
andrewwbutler/monocle-release
R
false
false
127,507
r
utils::globalVariables(c("Pseudotime", "value", "ids", "prin_graph_dim_1", "prin_graph_dim_2", "State", "value", "feature_label", "expectation", "colInd", "rowInd", "value", "source_prin_graph_dim_1", "source_prin_graph_dim_2")) monocle_theme_opts <- function() { theme(strip.background = element_rect(colour = 'white', fill = 'white')) + theme(panel.border = element_blank()) + theme(axis.line.x = element_line(size=0.25, color="black")) + theme(axis.line.y = element_line(size=0.25, color="black")) + theme(panel.grid.minor.x = element_blank(), panel.grid.minor.y = element_blank()) + theme(panel.grid.major.x = element_blank(), panel.grid.major.y = element_blank()) + theme(panel.background = element_rect(fill='white')) + theme(legend.key=element_blank()) } #' Plots the minimum spanning tree on cells. #' #' @param cds CellDataSet for the experiment #' @param x the column of reducedDimS(cds) to plot on the horizontal axis #' @param y the column of reducedDimS(cds) to plot on the vertical axis #' @param color_by the cell attribute (e.g. the column of pData(cds)) to map to each cell's color #' @param show_tree whether to show the links between cells connected in the minimum spanning tree #' @param show_backbone whether to show the diameter path of the MST used to order the cells #' @param backbone_color the color used to render the backbone. #' @param markers a gene name or gene id to use for setting the size of each cell in the plot #' @param use_color_gradient Whether or not to use color gradient instead of cell size to show marker expression level #' @param markers_linear a boolean used to indicate whether you want to scale the markers logarithimically or linearly #' @param show_cell_names draw the name of each cell in the plot #' @param show_state_number show state number #' @param cell_size The size of the point for each cell #' @param cell_link_size The size of the line segments connecting cells (when used with ICA) or the principal graph (when used with DDRTree) #' @param cell_name_size the size of cell name labels #' @param state_number_size the size of the state number #' @param show_branch_points Whether to show icons for each branch point (only available when reduceDimension was called with DDRTree) #' @param theta How many degrees you want to rotate the trajectory #' @param ... Additional arguments passed into scale_color_viridis function #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom reshape2 melt #' @importFrom igraph get.edgelist #' @importFrom viridis scale_color_viridis #' @export #' @examples #' \dontrun{ #' lung <- load_lung() #' plot_cell_trajectory(lung) #' plot_cell_trajectory(lung, color_by="Pseudotime", show_backbone=FALSE) #' plot_cell_trajectory(lung, markers="MYH3") #' } plot_cell_trajectory <- function(cds, x=1, y=2, color_by="State", show_tree=TRUE, show_backbone=TRUE, backbone_color="black", markers=NULL, use_color_gradient = FALSE, markers_linear = FALSE, show_cell_names=FALSE, show_state_number = FALSE, cell_size=1.5, cell_link_size=0.75, cell_name_size=2, state_number_size = 2.9, show_branch_points=TRUE, theta = 0, ...){ gene_short_name <- NA sample_name <- NA sample_state <- pData(cds)$State data_dim_1 <- NA data_dim_2 <- NA #TODO: need to validate cds as ready for this plot (need mst, pseudotime, etc) lib_info_with_pseudo <- pData(cds) if (is.null(cds@dim_reduce_type)){ stop("Error: dimensionality not yet reduced. Please call reduceDimension() before calling this function.") } if (cds@dim_reduce_type == "ICA"){ reduced_dim_coords <- reducedDimS(cds) }else if (cds@dim_reduce_type %in% c("simplePPT", "DDRTree") ){ reduced_dim_coords <- reducedDimK(cds) }else { stop("Error: unrecognized dimensionality reduction method.") } ica_space_df <- data.frame(Matrix::t(reduced_dim_coords[c(x,y),])) colnames(ica_space_df) <- c("prin_graph_dim_1", "prin_graph_dim_2") ica_space_df$sample_name <- row.names(ica_space_df) ica_space_df$sample_state <- row.names(ica_space_df) #ica_space_with_state_df <- merge(ica_space_df, lib_info_with_pseudo, by.x="sample_name", by.y="row.names") #print(ica_space_with_state_df) dp_mst <- minSpanningTree(cds) if (is.null(dp_mst)){ stop("You must first call orderCells() before using this function") } edge_list <- as.data.frame(get.edgelist(dp_mst)) colnames(edge_list) <- c("source", "target") edge_df <- merge(ica_space_df, edge_list, by.x="sample_name", by.y="source", all=TRUE) #edge_df <- ica_space_df edge_df <- plyr::rename(edge_df, c("prin_graph_dim_1"="source_prin_graph_dim_1", "prin_graph_dim_2"="source_prin_graph_dim_2")) edge_df <- merge(edge_df, ica_space_df[,c("sample_name", "prin_graph_dim_1", "prin_graph_dim_2")], by.x="target", by.y="sample_name", all=TRUE) edge_df <- plyr::rename(edge_df, c("prin_graph_dim_1"="target_prin_graph_dim_1", "prin_graph_dim_2"="target_prin_graph_dim_2")) S_matrix <- reducedDimS(cds) data_df <- data.frame(t(S_matrix[c(x,y),])) data_df <- cbind(data_df, sample_state) colnames(data_df) <- c("data_dim_1", "data_dim_2") data_df$sample_name <- row.names(data_df) data_df <- merge(data_df, lib_info_with_pseudo, by.x="sample_name", by.y="row.names") return_rotation_mat <- function(theta) { theta <- theta / 180 * pi matrix(c(cos(theta), sin(theta), -sin(theta), cos(theta)), nrow = 2) } tmp <- return_rotation_mat(theta) %*% t(as.matrix(data_df[, c(2, 3)])) data_df$data_dim_1 <- tmp[1, ] data_df$data_dim_2 <- tmp[2, ] tmp <- return_rotation_mat(theta = theta) %*% t(as.matrix(edge_df[, c('source_prin_graph_dim_1', 'source_prin_graph_dim_2')])) edge_df$source_prin_graph_dim_1 <- tmp[1, ] edge_df$source_prin_graph_dim_2 <- tmp[2, ] tmp <- return_rotation_mat(theta) %*% t(as.matrix(edge_df[, c('target_prin_graph_dim_1', 'target_prin_graph_dim_2')])) edge_df$target_prin_graph_dim_1 <- tmp[1, ] edge_df$target_prin_graph_dim_2 <- tmp[2, ] markers_exprs <- NULL if (is.null(markers) == FALSE){ markers_fData <- subset(fData(cds), gene_short_name %in% markers) if (nrow(markers_fData) >= 1){ markers_exprs <- reshape2::melt(as.matrix(exprs(cds[row.names(markers_fData),]))) colnames(markers_exprs)[1:2] <- c('feature_id','cell_id') markers_exprs <- merge(markers_exprs, markers_fData, by.x = "feature_id", by.y="row.names") #print (head( markers_exprs[is.na(markers_exprs$gene_short_name) == FALSE,])) markers_exprs$feature_label <- as.character(markers_exprs$gene_short_name) markers_exprs$feature_label[is.na(markers_exprs$feature_label)] <- markers_exprs$Var1 } } if (is.null(markers_exprs) == FALSE && nrow(markers_exprs) > 0){ data_df <- merge(data_df, markers_exprs, by.x="sample_name", by.y="cell_id") if(use_color_gradient) { if(markers_linear){ g <- ggplot(data=data_df, aes(x=data_dim_1, y=data_dim_2)) + geom_point(aes(color= value), size=I(cell_size), na.rm = TRUE) + scale_color_viridis(name = paste0("value"), ...) + facet_wrap(~feature_label) } else { g <- ggplot(data=data_df, aes(x=data_dim_1, y=data_dim_2)) + geom_point(aes(color=log10(value + 0.1)), size=I(cell_size), na.rm = TRUE) + scale_color_viridis(name = paste0("log10(value + 0.1)"), ...) + facet_wrap(~feature_label) } } else { if(markers_linear){ g <- ggplot(data=data_df, aes(x=data_dim_1, y=data_dim_2, size= (value * 0.1))) + facet_wrap(~feature_label) } else { g <- ggplot(data=data_df, aes(x=data_dim_1, y=data_dim_2, size=log10(value + 0.1))) + facet_wrap(~feature_label) } } }else{ g <- ggplot(data=data_df, aes(x=data_dim_1, y=data_dim_2)) } if (show_tree){ g <- g + geom_segment(aes_string(x="source_prin_graph_dim_1", y="source_prin_graph_dim_2", xend="target_prin_graph_dim_1", yend="target_prin_graph_dim_2"), size=cell_link_size, linetype="solid", na.rm=TRUE, data=edge_df) } # FIXME: setting size here overrides the marker expression funtionality. # Don't do it! if (is.null(markers_exprs) == FALSE && nrow(markers_exprs) > 0){ if(use_color_gradient) { # g <- g + geom_point(aes_string(color = color_by), na.rm = TRUE) } else { g <- g + geom_point(aes_string(color = color_by), na.rm = TRUE) } }else { if(use_color_gradient) { # g <- g + geom_point(aes_string(color = color_by), na.rm = TRUE) } else { g <- g + geom_point(aes_string(color = color_by), size=I(cell_size), na.rm = TRUE) } } if (show_branch_points && cds@dim_reduce_type == 'DDRTree'){ mst_branch_nodes <- cds@auxOrderingData[[cds@dim_reduce_type]]$branch_points branch_point_df <- subset(edge_df, sample_name %in% mst_branch_nodes)[,c("sample_name", "source_prin_graph_dim_1", "source_prin_graph_dim_2")] branch_point_df$branch_point_idx <- match(branch_point_df$sample_name, mst_branch_nodes) branch_point_df <- branch_point_df[!duplicated(branch_point_df$branch_point_idx), ] g <- g + geom_point(aes_string(x="source_prin_graph_dim_1", y="source_prin_graph_dim_2"), size=5, na.rm=TRUE, data=branch_point_df) + geom_text(aes_string(x="source_prin_graph_dim_1", y="source_prin_graph_dim_2", label="branch_point_idx"), size=4, color="white", na.rm=TRUE, data=branch_point_df) } if (show_cell_names){ g <- g +geom_text(aes(label=sample_name), size=cell_name_size) } if (show_state_number){ g <- g + geom_text(aes(label = sample_state), size = state_number_size) } g <- g + #scale_color_brewer(palette="Set1") + monocle_theme_opts() + xlab(paste("Component", x)) + ylab(paste("Component", y)) + theme(legend.position="top", legend.key.height=grid::unit(0.35, "in")) + #guides(color = guide_legend(label.position = "top")) + theme(legend.key = element_blank()) + theme(panel.background = element_rect(fill='white')) g } #' @rdname package-deprecated #' @title Plots the minimum spanning tree on cells. #' This function is deprecated. #' @description This function arranges all of the cells in the cds in a tree and #' predicts their location based on their pseudotime value #' @param cds CellDataSet for the experiment #' @param x the column of reducedDimS(cds) to plot on the horizontal axis #' @param y the column of reducedDimS(cds) to plot on the vertical axis #' @param color_by the cell attribute (e.g. the column of pData(cds)) to map to each cell's color #' @param show_tree whether to show the links between cells connected in the minimum spanning tree #' @param show_backbone whether to show the diameter path of the MST used to order the cells #' @param backbone_color the color used to render the backbone. #' @param markers a gene name or gene id to use for setting the size of each cell in the plot #' @param show_cell_names draw the name of each cell in the plot #' @param cell_size The size of the point for each cell #' @param cell_link_size The size of the line segments connecting cells (when used with ICA) or the principal graph (when used with DDRTree) #' @param cell_name_size the size of cell name labels #' @param show_branch_points Whether to show icons for each branch point (only available when reduceDimension was called with DDRTree) #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom reshape2 melt #' @export #' @seealso plot_cell_trajectory #' @examples #' \dontrun{ #' library(HSMMSingleCell) #' HSMM <- load_HSMM() #' plot_cell_trajectory(HSMM) #' plot_cell_trajectory(HSMM, color_by="Pseudotime", show_backbone=FALSE) #' plot_cell_trajectory(HSMM, markers="MYH3") #' } plot_spanning_tree <- function(cds, x=1, y=2, color_by="State", show_tree=TRUE, show_backbone=TRUE, backbone_color="black", markers=NULL, show_cell_names=FALSE, cell_size=1.5, cell_link_size=0.75, cell_name_size=2, show_branch_points=TRUE){ .Deprecated("plot_cell_trajectory") #include a package argument, too plot_cell_trajectory(cds=cds, x=x, y=y, color_by=color_by, show_tree=show_tree, show_backbone=show_backbone, backbone_color=backbone_color, markers=markers, show_cell_names=show_cell_names, cell_size=cell_size, cell_link_size=cell_link_size, cell_name_size=cell_name_size, show_branch_points=show_branch_points) } #' @title Plots expression for one or more genes as a violin plot #' #' @description Accepts a subset of a CellDataSet and an attribute to group cells by, #' and produces one or more ggplot2 objects that plots the level of expression for #' each group of cells. #' #' @param cds_subset CellDataSet for the experiment #' @param grouping the cell attribute (e.g. the column of pData(cds)) to group cells by on the horizontal axis #' @param min_expr the minimum (untransformed) expression level to use in plotted the genes. #' @param cell_size the size (in points) of each cell used in the plot #' @param nrow the number of rows used when laying out the panels for each gene's expression #' @param ncol the number of columns used when laying out the panels for each gene's expression #' @param panel_order the order in which genes should be layed out (left-to-right, top-to-bottom) #' @param color_by the cell attribute (e.g. the column of pData(cds)) to be used to color each cell #' @param plot_trend whether to plot a trendline tracking the average expression across the horizontal axis. #' @param label_by_short_name label figure panels by gene_short_name (TRUE) or feature id (FALSE) #' @param relative_expr Whether to transform expression into relative values #' @param log_scale a boolean that determines whether or not to scale data logarithmically #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom reshape2 melt #' @importFrom BiocGenerics sizeFactors #' @export #' @examples #' \dontrun{ #' library(HSMMSingleCell) #' HSMM <- load_HSMM() #' my_genes <- HSMM[row.names(subset(fData(HSMM), gene_short_name %in% c("ACTA1", "ID1", "CCNB2"))),] #' plot_genes_violin(my_genes, grouping="Hours", ncol=2, min_expr=0.1) #' } plot_genes_violin <- function (cds_subset, grouping = "State", min_expr = NULL, cell_size = 0.75, nrow = NULL, ncol = 1, panel_order = NULL, color_by = NULL, plot_trend = FALSE, label_by_short_name = TRUE, relative_expr = TRUE, log_scale = TRUE) { if (cds_subset@expressionFamily@vfamily %in% c("negbinomial", "negbinomial.size")) { integer_expression = TRUE } else { integer_expression = FALSE relative_expr = TRUE } if (integer_expression) { cds_exprs = exprs(cds_subset) if (relative_expr) { if (is.null(sizeFactors(cds_subset))) { stop("Error: to call this function with relative_expr=TRUE, you must call estimateSizeFactors() first") } cds_exprs = Matrix::t(Matrix::t(cds_exprs)/sizeFactors(cds_subset)) } #cds_exprs = reshape2::melt(round(as.matrix(cds_exprs))) cds_exprs = reshape2::melt(as.matrix(cds_exprs)) } else { cds_exprs = exprs(cds_subset) cds_exprs = reshape2::melt(as.matrix(cds_exprs)) } if (is.null(min_expr)) { min_expr = cds_subset@lowerDetectionLimit } colnames(cds_exprs) = c("f_id", "Cell", "expression") cds_exprs$expression[cds_exprs$expression < min_expr] = min_expr cds_pData = pData(cds_subset) # # # Custom bit for adding in a group for # if(! is.null(show_combined)) { # for(combine_gene in show_combined) { # cds_pData_all = subset(cds_pData, gene == combine_gene) # cds_pData_all[, grouping] = paste("All", combine_gene) # cds_pData = rbind(cds_pData, cds_pData_all) # } # } cds_fData = fData(cds_subset) cds_exprs = merge(cds_exprs, cds_fData, by.x = "f_id", by.y = "row.names") cds_exprs = merge(cds_exprs, cds_pData, by.x = "Cell", by.y = "row.names") cds_exprs$adjusted_expression = log10(cds_exprs$expression) if (label_by_short_name == TRUE) { if (is.null(cds_exprs$gene_short_name) == FALSE) { cds_exprs$feature_label = cds_exprs$gene_short_name cds_exprs$feature_label[is.na(cds_exprs$feature_label)] = cds_exprs$f_id } else { cds_exprs$feature_label = cds_exprs$f_id } } else { cds_exprs$feature_label = cds_exprs$f_id } if (is.null(panel_order) == FALSE) { cds_exprs$feature_label = factor(cds_exprs$feature_label, levels = panel_order) } q = ggplot(aes_string(x = grouping, y = "expression"), data = cds_exprs) if (is.null(color_by) == FALSE) { q = q + geom_violin(aes_string(fill = color_by)) } else { q = q + geom_violin() } if (plot_trend == TRUE) { q = q + stat_summary(fun.data = "mean_cl_boot", size = 0.2) q = q + stat_summary(aes_string(x = grouping, y = "expression", group = color_by), fun.data = "mean_cl_boot", size = 0.2, geom = "line") } q = q + facet_wrap(~feature_label, nrow = nrow, ncol = ncol, scales = "free_y") if (min_expr < 1) { q = q + expand_limits(y = c(min_expr, 1)) } q = q + ylab("Expression") + xlab(grouping) if (log_scale == TRUE){ q = q + scale_y_log10() } q } #' Plots expression for one or more genes as a jittered, grouped points #' #' @description Accepts a subset of a CellDataSet and an attribute to group cells by, #' and produces one or more ggplot2 objects that plots the level of expression for #' each group of cells. #' #' @param cds_subset CellDataSet for the experiment #' @param grouping the cell attribute (e.g. the column of pData(cds)) to group cells by on the horizontal axis #' @param min_expr the minimum (untransformed) expression level to use in plotted the genes. #' @param cell_size the size (in points) of each cell used in the plot #' @param nrow the number of rows used when laying out the panels for each gene's expression #' @param ncol the number of columns used when laying out the panels for each gene's expression #' @param panel_order the order in which genes should be layed out (left-to-right, top-to-bottom) #' @param color_by the cell attribute (e.g. the column of pData(cds)) to be used to color each cell #' @param plot_trend whether to plot a trendline tracking the average expression across the horizontal axis. #' @param label_by_short_name label figure panels by gene_short_name (TRUE) or feature id (FALSE) #' @param relative_expr Whether to transform expression into relative values #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom reshape2 melt #' @importFrom BiocGenerics sizeFactors #' @export #' @examples #' \dontrun{ #' library(HSMMSingleCell) #' HSMM <- load_HSMM() #' my_genes <- HSMM[row.names(subset(fData(HSMM), gene_short_name %in% c("MYOG", "ID1", "CCNB2"))),] #' plot_genes_jitter(my_genes, grouping="Media", ncol=2) #' } plot_genes_jitter <- function(cds_subset, grouping = "State", min_expr=NULL, cell_size=0.75, nrow=NULL, ncol=1, panel_order=NULL, color_by=NULL, plot_trend=FALSE, label_by_short_name=TRUE, relative_expr=TRUE){ if (cds_subset@expressionFamily@vfamily %in% c("negbinomial", "negbinomial.size")){ integer_expression <- TRUE }else{ integer_expression <- FALSE relative_expr <- TRUE } if (integer_expression) { cds_exprs <- exprs(cds_subset) if (relative_expr){ if (is.null(sizeFactors(cds_subset))) { stop("Error: to call this function with relative_expr=TRUE, you must call estimateSizeFactors() first") } cds_exprs <- Matrix::t(Matrix::t(cds_exprs) / sizeFactors(cds_subset)) } cds_exprs <- reshape2::melt(round(as.matrix(cds_exprs))) }else{ cds_exprs <- exprs(cds_subset) cds_exprs <- reshape2::melt(as.matrix(cds_exprs)) } if (is.null(min_expr)){ min_expr <- cds_subset@lowerDetectionLimit } colnames(cds_exprs) <- c("f_id", "Cell", "expression") cds_exprs$expression[cds_exprs$expression < min_expr] <- min_expr cds_pData <- pData(cds_subset) cds_fData <- fData(cds_subset) cds_exprs <- merge(cds_exprs, cds_fData, by.x="f_id", by.y="row.names") cds_exprs <- merge(cds_exprs, cds_pData, by.x="Cell", by.y="row.names") cds_exprs$adjusted_expression <- log10(cds_exprs$expression) #cds_exprs$adjusted_expression <- log10(cds_exprs$adjusted_expression + abs(rnorm(nrow(cds_exprs), min_expr, sqrt(min_expr)))) if (label_by_short_name == TRUE){ if (is.null(cds_exprs$gene_short_name) == FALSE){ cds_exprs$feature_label <- cds_exprs$gene_short_name cds_exprs$feature_label[is.na(cds_exprs$feature_label)] <- cds_exprs$f_id }else{ cds_exprs$feature_label <- cds_exprs$f_id } }else{ cds_exprs$feature_label <- cds_exprs$f_id } #print (head(cds_exprs)) if (is.null(panel_order) == FALSE) { cds_exprs$feature_label <- factor(cds_exprs$feature_label, levels=panel_order) } q <- ggplot(aes_string(x=grouping, y="expression"), data=cds_exprs) if (is.null(color_by) == FALSE){ q <- q + geom_jitter(aes_string(color=color_by), size=I(cell_size)) }else{ q <- q + geom_jitter(size=I(cell_size)) } if (plot_trend == TRUE){ q <- q + stat_summary(aes_string(color=color_by), fun.data = "mean_cl_boot", size=0.35) q <- q + stat_summary(aes_string(x=grouping, y="expression", color=color_by, group=color_by), fun.data = "mean_cl_boot", size=0.35, geom="line") } q <- q + scale_y_log10() + facet_wrap(~feature_label, nrow=nrow, ncol=ncol, scales="free_y") # Need this to guard against plotting failures caused by non-expressed genes if (min_expr < 1) { q <- q + expand_limits(y=c(min_expr, 1)) } q <- q + ylab("Expression") + xlab(grouping) q <- q + monocle_theme_opts() q } #' Plots the number of cells expressing one or more genes as a barplot #' #' @description Accetps a CellDataSet and a parameter,"grouping", used for dividing cells into groups. #' Returns one or more bar graphs (one graph for each gene in the CellDataSet). #' Each graph shows the percentage of cells that express a gene in the in the CellDataSet for #' each sub-group of cells created by "grouping". #' #' Let's say the CellDataSet passed in included genes A, B, and C and the "grouping parameter divided #' all of the cells into three groups called X, Y, and Z. Then three graphs would be produced called A, #' B, and C. In the A graph there would be three bars one for X, one for Y, and one for Z. So X bar in the #' A graph would show the percentage of cells in the X group that express gene A. #' #' @param cds_subset CellDataSet for the experiment #' @param grouping the cell attribute (e.g. the column of pData(cds)) to group cells by on the horizontal axis #' @param min_expr the minimum (untransformed) expression level to use in plotted the genes. #' @param nrow the number of rows used when laying out the panels for each gene's expression #' @param ncol the number of columns used when laying out the panels for each gene's expression #' @param panel_order the order in which genes should be layed out (left-to-right, top-to-bottom) #' @param plot_as_fraction whether to show the percent instead of the number of cells expressing each gene #' @param label_by_short_name label figure panels by gene_short_name (TRUE) or feature id (FALSE) #' @param relative_expr Whether to transform expression into relative values #' @param plot_limits A pair of number specifying the limits of the y axis. If NULL, scale to the range of the data. #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom plyr ddply #' @importFrom reshape2 melt #' @importFrom BiocGenerics sizeFactors #' @export #' @examples #' \dontrun{ #' library(HSMMSingleCell) #' HSMM <- load_HSMM() #' MYOG_ID1 <- HSMM[row.names(subset(fData(HSMM), gene_short_name %in% c("MYOG", "ID1"))),] #' plot_genes_positive_cells(MYOG_ID1, grouping="Media", ncol=2) #' } plot_genes_positive_cells <- function(cds_subset, grouping = "State", min_expr=0.1, nrow=NULL, ncol=1, panel_order=NULL, plot_as_fraction=TRUE, label_by_short_name=TRUE, relative_expr=TRUE, plot_limits=c(0,100)){ percent <- NULL if (cds_subset@expressionFamily@vfamily %in% c("negbinomial", "negbinomial.size")){ integer_expression <- TRUE }else{ integer_expression <- FALSE relative_expr <- TRUE } if (integer_expression) { marker_exprs <- exprs(cds_subset) if (relative_expr){ if (is.null(sizeFactors(cds_subset))) { stop("Error: to call this function with relative_expr=TRUE, you must call estimateSizeFactors() first") } marker_exprs <- Matrix::t(Matrix::t(marker_exprs) / sizeFactors(cds_subset)) } marker_exprs_melted <- reshape2::melt(round(as.matrix(marker_exprs))) }else{ marker_exprs_melted <- reshape2::melt(exprs(marker_exprs)) } colnames(marker_exprs_melted) <- c("f_id", "Cell", "expression") marker_exprs_melted <- merge(marker_exprs_melted, pData(cds_subset), by.x="Cell", by.y="row.names") marker_exprs_melted <- merge(marker_exprs_melted, fData(cds_subset), by.x="f_id", by.y="row.names") if (label_by_short_name == TRUE){ if (is.null(marker_exprs_melted$gene_short_name) == FALSE){ marker_exprs_melted$feature_label <- marker_exprs_melted$gene_short_name marker_exprs_melted$feature_label[is.na(marker_exprs_melted$feature_label)] <- marker_exprs_melted$f_id }else{ marker_exprs_melted$feature_label <- marker_exprs_melted$f_id } }else{ marker_exprs_melted$feature_label <- marker_exprs_melted$f_id } if (is.null(panel_order) == FALSE) { marker_exprs_melted$feature_label <- factor(marker_exprs_melted$feature_label, levels=panel_order) } marker_counts <- plyr::ddply(marker_exprs_melted, c("feature_label", grouping), function(x) { data.frame(target=sum(x$expression > min_expr), target_fraction=sum(x$expression > min_expr)/nrow(x)) } ) #print (head(marker_counts)) if (plot_as_fraction){ marker_counts$target_fraction <- marker_counts$target_fraction * 100 qp <- ggplot(aes_string(x=grouping, y="target_fraction", fill=grouping), data=marker_counts) + ylab("Cells (percent)") if (is.null(plot_limits) == FALSE) qp <- qp + scale_y_continuous(limits=plot_limits) }else{ qp <- ggplot(aes_string(x=grouping, y="target", fill=grouping), data=marker_counts) + ylab("Cells") } qp <- qp + facet_wrap(~feature_label, nrow=nrow, ncol=ncol, scales="free_y") qp <- qp + geom_bar(stat="identity") + monocle_theme_opts() return(qp) } #' Plots expression for one or more genes as a function of pseudotime #' #' @description Plots expression for one or more genes as a function of pseudotime. #' Plotting allows you determine if the ordering produced by orderCells() is correct #' and it does not need to be flipped using the "reverse" flag in orderCells #' #' @param cds_subset CellDataSet for the experiment #' @param min_expr the minimum (untransformed) expression level to use in plotted the genes. #' @param cell_size the size (in points) of each cell used in the plot #' @param nrow the number of rows used when laying out the panels for each gene's expression #' @param ncol the number of columns used when laying out the panels for each gene's expression #' @param panel_order the order in which genes should be layed out (left-to-right, top-to-bottom) #' @param color_by the cell attribute (e.g. the column of pData(cds)) to be used to color each cell #' @param trend_formula the model formula to be used for fitting the expression trend over pseudotime #' @param label_by_short_name label figure panels by gene_short_name (TRUE) or feature id (FALSE) #' @param relative_expr Whether to transform expression into relative values #' @param vertical_jitter A value passed to ggplot to jitter the points in the vertical dimension. Prevents overplotting, and is particularly helpful for rounded transcript count data. #' @param horizontal_jitter A value passed to ggplot to jitter the points in the horizontal dimension. Prevents overplotting, and is particularly helpful for rounded transcript count data. #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom plyr ddply . #' @importFrom reshape2 melt #' @importFrom ggplot2 Position #' @export #' @examples #' \dontrun{ #' library(HSMMSingleCell) #' HSMM <- load_HSMM() #' my_genes <- row.names(subset(fData(HSMM), gene_short_name %in% c("CDK1", "MEF2C", "MYH3"))) #' cds_subset <- HSMM[my_genes,] #' plot_genes_in_pseudotime(cds_subset, color_by="Time") #' } plot_genes_in_pseudotime <-function(cds_subset, min_expr=NULL, cell_size=0.75, nrow=NULL, ncol=1, panel_order=NULL, color_by="State", trend_formula="~ sm.ns(Pseudotime, df=3)", label_by_short_name=TRUE, relative_expr=TRUE, vertical_jitter=NULL, horizontal_jitter=NULL){ f_id <- NA Cell <- NA if (cds_subset@expressionFamily@vfamily %in% c("negbinomial", "negbinomial.size")) { integer_expression <- TRUE } else { integer_expression <- FALSE relative_expr <- TRUE } if (integer_expression) { cds_exprs <- exprs(cds_subset) if (relative_expr) { if (is.null(sizeFactors(cds_subset))) { stop("Error: to call this function with relative_expr=TRUE, you must call estimateSizeFactors() first") } cds_exprs <- Matrix::t(Matrix::t(cds_exprs)/sizeFactors(cds_subset)) } cds_exprs <- reshape2::melt(round(as.matrix(cds_exprs))) } else { cds_exprs <- reshape2::melt(as.matrix(exprs(cds_subset))) } if (is.null(min_expr)) { min_expr <- cds_subset@lowerDetectionLimit } colnames(cds_exprs) <- c("f_id", "Cell", "expression") cds_pData <- pData(cds_subset) cds_fData <- fData(cds_subset) cds_exprs <- merge(cds_exprs, cds_fData, by.x = "f_id", by.y = "row.names") cds_exprs <- merge(cds_exprs, cds_pData, by.x = "Cell", by.y = "row.names") #cds_exprs$f_id <- as.character(cds_exprs$f_id) #cds_exprs$Cell <- as.character(cds_exprs$Cell) if (integer_expression) { cds_exprs$adjusted_expression <- cds_exprs$expression } else { cds_exprs$adjusted_expression <- log10(cds_exprs$expression) } # trend_formula <- paste("adjusted_expression", trend_formula, # sep = "") if (label_by_short_name == TRUE) { if (is.null(cds_exprs$gene_short_name) == FALSE) { cds_exprs$feature_label <- as.character(cds_exprs$gene_short_name) cds_exprs$feature_label[is.na(cds_exprs$feature_label)] <- cds_exprs$f_id } else { cds_exprs$feature_label <- cds_exprs$f_id } } else { cds_exprs$feature_label <- cds_exprs$f_id } cds_exprs$f_id <- as.character(cds_exprs$f_id) cds_exprs$feature_label <- factor(cds_exprs$feature_label) new_data <- data.frame(Pseudotime = pData(cds_subset)$Pseudotime) model_expectation <- genSmoothCurves(cds_subset, cores=1, trend_formula = trend_formula, relative_expr = T, new_data = new_data) colnames(model_expectation) <- colnames(cds_subset) expectation <- ddply(cds_exprs, .(f_id, Cell), function(x) data.frame("expectation"=model_expectation[x$f_id, x$Cell])) cds_exprs <- merge(cds_exprs, expectation) #cds_exprs$expectation <- expectation#apply(cds_exprs,1, function(x) model_expectation[x$f_id, x$Cell]) cds_exprs$expression[cds_exprs$expression < min_expr] <- min_expr cds_exprs$expectation[cds_exprs$expectation < min_expr] <- min_expr if (is.null(panel_order) == FALSE) { cds_exprs$feature_label <- factor(cds_exprs$feature_label, levels = panel_order) } q <- ggplot(aes(Pseudotime, expression), data = cds_exprs) if (is.null(color_by) == FALSE) { q <- q + geom_point(aes_string(color = color_by), size = I(cell_size), position=position_jitter(horizontal_jitter, vertical_jitter)) } else { q <- q + geom_point(size = I(cell_size), position=position_jitter(horizontal_jitter, vertical_jitter)) } q <- q + geom_line(aes(x = Pseudotime, y = expectation), data = cds_exprs) q <- q + scale_y_log10() + facet_wrap(~feature_label, nrow = nrow, ncol = ncol, scales = "free_y") if (min_expr < 1) { q <- q + expand_limits(y = c(min_expr, 1)) } if (relative_expr) { q <- q + ylab("Relative Expression") } else { q <- q + ylab("Absolute Expression") } q <- q + xlab("Pseudo-time") q <- q + monocle_theme_opts() q } #' Plots kinetic clusters of genes. #' #' @description returns a ggplot2 object showing the shapes of the #' expression patterns followed by a set of pre-selected genes. #' The topographic lines highlight the distributions of the kinetic patterns #' relative to overall trend lines. #' #' @param cds CellDataSet for the experiment #' @param clustering a clustering object produced by clusterCells #' @param drawSummary whether to draw the summary line for each cluster #' @param sumFun whether the function used to generate the summary for each cluster #' @param ncol number of columns used to layout the faceted cluster panels #' @param nrow number of columns used to layout the faceted cluster panels #' @param row_samples how many genes to randomly select from the data #' @param callout_ids a vector of gene names or gene ids to manually render as part of the plot #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom reshape2 melt #' @importFrom stringr str_join #' @importFrom ggplot2 Position #' @import grid #' @export #' @examples #' \dontrun{ #' full_model_fits <- fitModel(HSMM_filtered[sample(nrow(fData(HSMM_filtered)), 100),], #' modelFormulaStr="~VGAM::bs(Pseudotime)") #' expression_curve_matrix <- responseMatrix(full_model_fits) #' clusters <- clusterGenes(expression_curve_matrix, k=4) #' plot_clusters(HSMM_filtered[ordering_genes,], clusters) #' } plot_clusters<-function(cds, clustering, drawSummary=TRUE, sumFun=mean_cl_boot, ncol=NULL, nrow=NULL, row_samples=NULL, callout_ids=NULL){ .Deprecated("plot_genes_heatmap") m <- as.data.frame(clustering$exprs) m$ids <- rownames(clustering$exprs) if (is.null(clustering$labels) == FALSE) { m$cluster = factor(clustering$labels[clustering$clustering], levels = levels(clustering$labels)) }else{ m$cluster <- factor(clustering$clustering) } cluster_sizes <- as.data.frame(table(m$cluster)) cluster_sizes$Freq <- paste("(", cluster_sizes$Freq, ")") facet_labels <- str_join(cluster_sizes$Var1, cluster_sizes$Freq, sep=" ") #update the function m.melt <- melt(m, id.vars = c("ids", "cluster")) m.melt <- merge(m.melt, pData(cds), by.x="variable", by.y="row.names") if (is.null(row_samples) == FALSE){ m.melt <- m.melt[sample(nrow(m.melt), row_samples),] } c <- ggplot(m.melt) + facet_wrap("cluster", ncol=ncol, nrow=nrow, scales="free_y") #c <- c + stat_density2d(aes(x = Pseudotime, y = value), geom="polygon", fill="white", color="black", size=I(0.1)) + facet_wrap("cluster", ncol=ncol, nrow=nrow) if (drawSummary) { c <- c + stat_summary(aes(x = Pseudotime, y = value, group = 1), fun.data = sumFun, color = "red", alpha = 0.2, size = 0.5, geom = "smooth") } #cluster_medians <- subset(m.melt, ids %in% clustering$medoids) #c <- c + geom_line() #c <- c + geom_line(aes(x=Pseudotime, y=value), data=cluster_medians, color=I("red")) c <- c + scale_color_hue(l = 50, h.start = 200) + theme(axis.text.x = element_text(angle = 0, hjust = 0)) + xlab("Pseudo-time") + ylab("Expression") c <- c + theme(strip.background = element_rect(colour = 'white', fill = 'white')) + theme(panel.border = element_blank()) + theme(legend.position="none") + theme(panel.grid.minor.x = element_blank(), panel.grid.minor.y = element_blank()) + theme(panel.grid.major.x = element_blank(), panel.grid.major.y = element_blank()) # if (draw_cluster_size){ # cluster_sizes <- as.data.frame(table(m$cluster)) # colnames(cluster_sizes) <- c("cluster", "Freq") # cluster_sizes <- cbind (cluster_sizes, Pseudotime = cluster_label_text_x, value = cluster_label_text_y) # c <- c + geom_text(aes(x=Pseudotime, y=value, label=Freq), data=cluster_sizes, size=cluster_label_text_size) # } if (is.null(callout_ids) == FALSE) { callout_melt <- subset(m.melt, ids %in% callout_ids) c <- c + geom_line(aes(x=Pseudotime, y=value), data=callout_melt, color=I("steelblue")) } c <- c + monocle_theme_opts() #c <- facet_wrap_labeller(c, facet_labels) c } # # #' Plots a pseudotime-ordered, row-centered heatmap # #' @export # plot_genes_heatmap <- function(cds, # rescaling='row', # clustering='row', # labCol=FALSE, # labRow=TRUE, # logMode=TRUE, # use_vst=TRUE, # border=FALSE, # heatscale=c(low='steelblue',mid='white',high='tomato'), # heatMidpoint=0, # method="none", # scaleMax=2, # scaleMin=-2, # relative_expr=TRUE, # ...){ # # ## the function can be be viewed as a two step process # ## 1. using the rehape package and other funcs the data is clustered, scaled, and reshaped # ## using simple options or by a user supplied function # ## 2. with the now resahped data the plot, the chosen labels and plot style are built # FM <- exprs(cds) # # if (cds@expressionFamily@vfamily %in% c("negbinomial", "negbinomial.size")){ # integer_expression <- TRUE # }else{ # integer_expression <- FALSE # relative_expr <- TRUE # } # # if (integer_expression) # { # if (relative_expr){ # if (is.null(sizeFactors(cds))) # { # stop("Error: you must call estimateSizeFactors() first") # } # FM <- Matrix::t(Matrix::t(FM) / sizeFactors(cds)) # } # FM <- round(FM) # } # # m=FM # # if (is.null(fData(cds)$gene_short_name) == FALSE){ # feature_labels <- fData(cds)$gene_short_name # feature_labels[is.na(feature_labels)] <- fData(cds)$f_id # row.names(m) <- feature_labels # } # # #remove genes with no expression in any condition # m=m[!apply(m,1,sum)==0,] # # if (use_vst && is.null(cds@dispFitInfo[["blind"]]$disp_func) == FALSE){ # m = vstExprs(cds, expr_matrix=m) # }else if(logMode){ # m = log10(m+pseudocount) # } # # #remove genes with no sd # #m=m[!apply(m,1,sd)==0,] # # ## you can either scale by row or column not both! # ## if you wish to scale by both or use a different scale method then simply supply a scale # ## function instead NB scale is a base funct # # ## I have supplied the default cluster and euclidean distance (JSdist) - and chose to cluster after scaling # ## if you want a different distance/cluster method-- or to cluster and then scale # ## then you can supply a custom function # # if(!is.function(method)){ # method = function(mat){as.dist((1 - cor(Matrix::t(mat)))/2)} # } # # ## this is just reshaping into a ggplot format matrix and making a ggplot layer # # if(is.function(rescaling)) # { # m=rescaling(m) # } else { # if(rescaling=='column'){ # m=m[!apply(m,2,sd)==0,] # m=scale(m, center=TRUE) # m[is.nan(m)] = 0 # m[m>scaleMax] = scaleMax # m[m<scaleMin] = scaleMin # } # if(rescaling=='row'){ # m=m[!apply(m,1,sd)==0,] # m=Matrix::t(scale(Matrix::t(m),center=TRUE)) # m[is.nan(m)] = 0 # m[m>scaleMax] = scaleMax # m[m<scaleMin] = scaleMin # } # } # # # If we aren't going to re-ordering the columns, order them by Pseudotime # if (clustering %in% c("row", "none")) # m = m[,row.names(pData(cds)[order(-pData(cds)$Pseudotime),])] # # if(clustering=='row') # m=m[hclust(method(m))$order, ] # if(clustering=='column') # m=m[,hclust(method(Matrix::t(m)))$order] # if(clustering=='both') # m=m[hclust(method(m))$order ,hclust(method(Matrix::t(m)))$order] # # # rows=dim(m)[1] # cols=dim(m)[2] # # # # # if(logMode) { # # melt.m=cbind(rowInd=rep(1:rows, times=cols), colInd=rep(1:cols, each=rows), reshape2::melt( log10(m+pseudocount))) # # }else{ # # melt.m=cbind(rowInd=rep(1:rows, times=cols), colInd=rep(1:cols, each=rows), reshape2::melt(m)) # # } # # melt.m=cbind(rowInd=rep(1:rows, times=cols), colInd=rep(1:cols, each=rows), reshape2::melt(m)) # # g=ggplot(data=melt.m) # # ## add the heat tiles with or without a white border for clarity # # if(border==TRUE) # g2=g+geom_raster(aes(x=colInd,y=rowInd, fill=value),colour='grey') # if(border==FALSE) # g2=g+geom_raster(aes(x=colInd,y=rowInd,ymax=rowInd, fill=value)) # # ## add axis labels either supplied or from the colnames rownames of the matrix # # if(labCol==TRUE) # { # g2=g2+scale_x_continuous(breaks=(1:cols)-0.5, labels=colnames(m)) # } # if(labCol==FALSE) # { # g2=g2+scale_x_continuous(breaks=(1:cols)-0.5, labels=rep('',cols)) # } # # # if(labRow==TRUE) # { # g2=g2+scale_y_continuous(breaks=(1:rows)-0.5, labels=rownames(m)) # } # if(labRow==FALSE) # { # g2=g2+scale_y_continuous(breaks=(1:rows)-0.5, labels=rep('',rows)) # } # # # Get rid of the ticks, they get way too dense with lots of rows # g2 <- g2 + theme(axis.ticks = element_blank()) # # ## get rid of grey panel background and gridlines # # g2=g2+theme(panel.grid.minor=element_line(colour=NA), panel.grid.major=element_line(colour=NA), # panel.background=element_rect(fill=NA, colour=NA)) # # ##adjust x-axis labels # g2=g2+theme(axis.text.x=element_text(angle=-90, hjust=0)) # # #write(paste(c("Length of heatscale is :", length(heatscale))), stderr()) # # if(is.function(rescaling)) # { # # }else{ # if(rescaling=='row' || rescaling == 'column'){ # legendTitle <- "Relative\nexpression" # }else{ # if (logMode) # { # legendTitle <- bquote(paste(log[10]," FPKM + ",.(pseudocount),sep="")) # #legendTitle <- paste(expression(plain(log)[10])," FPKM + ",pseudocount,sep="") # } else { # legendTitle <- "FPKM" # } # } # } # # if (length(heatscale) == 2){ # g2 <- g2 + scale_fill_gradient(low=heatscale[1], high=heatscale[2], name=legendTitle) # } else if (length(heatscale) == 3) { # if (is.null(heatMidpoint)) # { # heatMidpoint = (max(m) + min(m)) / 2.0 # #write(heatMidpoint, stderr()) # } # g2 <- g2 + theme(panel.border = element_blank()) # g2 <- g2 + scale_fill_gradient2(low=heatscale[1], mid=heatscale[2], high=heatscale[3], midpoint=heatMidpoint, name=legendTitle) # }else { # g2 <- g2 + scale_fill_gradientn(colours=heatscale, name=legendTitle) # } # # #g2<-g2+scale_x_discrete("",breaks=tracking_ids,labels=gene_short_names) # # g2 <- g2 + theme(axis.title.x=element_blank(), axis.title.y=element_blank()) # # ## finally add the fill colour ramp of your choice (default is blue to red)-- and return # return (g2) # } plot_genes_heatmap <- function(...){ .Deprecated("plot_pseudotime_heatmap") plot_pseudotime_heatmap(...) } #' Plots a pseudotime-ordered, row-centered heatmap #' #' @description The function plot_pseudotime_heatmap takes a CellDataSet object #' (usually containing a only subset of significant genes) and generates smooth expression #' curves much like plot_genes_in_pseudotime. #' Then, it clusters these genes and plots them using the pheatmap package. #' This allows you to visualize modules of genes that co-vary across pseudotime. #' #' @param cds_subset CellDataSet for the experiment (normally only the branching genes detected with branchTest) #' @param cluster_rows Whether to cluster the rows of the heatmap. #' @param hclust_method The method used by pheatmap to perform hirearchical clustering of the rows. #' @param num_clusters Number of clusters for the heatmap of branch genes #' @param hmcols The color scheme for drawing the heatmap. #' @param add_annotation_row Additional annotations to show for each row in the heatmap. Must be a dataframe with one row for each row in the fData table of cds_subset, with matching IDs. #' @param add_annotation_col Additional annotations to show for each column in the heatmap. Must be a dataframe with one row for each cell in the pData table of cds_subset, with matching IDs. #' @param show_rownames Whether to show the names for each row in the table. #' @param use_gene_short_name Whether to use the short names for each row. If FALSE, uses row IDs from the fData table. #' @param scale_max The maximum value (in standard deviations) to show in the heatmap. Values larger than this are set to the max. #' @param scale_min The minimum value (in standard deviations) to show in the heatmap. Values smaller than this are set to the min. #' @param norm_method Determines how to transform expression values prior to rendering #' @param trend_formula A formula string specifying the model used in fitting the spline curve for each gene/feature. #' @param return_heatmap Whether to return the pheatmap object to the user. #' @param cores Number of cores to use when smoothing the expression curves shown in the heatmap. #' @return A list of heatmap_matrix (expression matrix for the branch committment), ph (pheatmap heatmap object), #' annotation_row (annotation data.frame for the row), annotation_col (annotation data.frame for the column). #' @import pheatmap #' @importFrom stats sd as.dist cor cutree #' @export #' plot_pseudotime_heatmap <- function(cds_subset, cluster_rows = TRUE, hclust_method = "ward.D2", num_clusters = 6, hmcols = NULL, add_annotation_row = NULL, add_annotation_col = NULL, show_rownames = FALSE, use_gene_short_name = TRUE, norm_method = c("log", "vstExprs"), scale_max=3, scale_min=-3, trend_formula = '~sm.ns(Pseudotime, df=3)', return_heatmap=FALSE, cores=1){ num_clusters <- min(num_clusters, nrow(cds_subset)) pseudocount <- 1 newdata <- data.frame(Pseudotime = seq(min(pData(cds_subset)$Pseudotime), max(pData(cds_subset)$Pseudotime),length.out = 100)) m <- genSmoothCurves(cds_subset, cores=cores, trend_formula = trend_formula, relative_expr = T, new_data = newdata) #remove genes with no expression in any condition m=m[!apply(m,1,sum)==0,] norm_method <- match.arg(norm_method) # FIXME: this needs to check that vst values can even be computed. (They can only be if we're using NB as the expressionFamily) if(norm_method == 'vstExprs' && is.null(cds_subset@dispFitInfo[["blind"]]$disp_func) == FALSE) { m = vstExprs(cds_subset, expr_matrix=m) } else if(norm_method == 'log') { m = log10(m+pseudocount) } # Row-center the data. m=m[!apply(m,1,sd)==0,] m=Matrix::t(scale(Matrix::t(m),center=TRUE)) m=m[is.na(row.names(m)) == FALSE,] m[is.nan(m)] = 0 m[m>scale_max] = scale_max m[m<scale_min] = scale_min heatmap_matrix <- m row_dist <- as.dist((1 - cor(Matrix::t(heatmap_matrix)))/2) row_dist[is.na(row_dist)] <- 1 if(is.null(hmcols)) { bks <- seq(-3.1,3.1, by = 0.1) hmcols <- blue2green2red(length(bks) - 1) } else { bks <- seq(-3.1,3.1, length.out = length(hmcols)) } ph <- pheatmap(heatmap_matrix, useRaster = T, cluster_cols=FALSE, cluster_rows=cluster_rows, show_rownames=F, show_colnames=F, clustering_distance_rows=row_dist, clustering_method = hclust_method, cutree_rows=num_clusters, silent=TRUE, filename=NA, breaks=bks, border_color = NA, color=hmcols) annotation_row <- data.frame(Cluster=factor(cutree(ph$tree_row, num_clusters))) if(!is.null(add_annotation_row)) { old_colnames_length <- ncol(annotation_row) annotation_row <- cbind(annotation_row, add_annotation_row[row.names(annotation_row), ]) colnames(annotation_row)[(old_colnames_length+1):ncol(annotation_row)] <- colnames(add_annotation_row) # annotation_row$bif_time <- add_annotation_row[as.character(fData(absolute_cds[row.names(annotation_row), ])$gene_short_name), 1] } if(!is.null(add_annotation_col)) { if(nrow(add_annotation_col) != 100) { stop('add_annotation_col should have only 100 rows (check genSmoothCurves before you supply the annotation data)!') } annotation_col <- add_annotation_col } else { annotation_col <- NA } if (use_gene_short_name == TRUE) { if (is.null(fData(cds_subset)$gene_short_name) == FALSE) { feature_label <- as.character(fData(cds_subset)[row.names(heatmap_matrix), 'gene_short_name']) feature_label[is.na(feature_label)] <- row.names(heatmap_matrix) row_ann_labels <- as.character(fData(cds_subset)[row.names(annotation_row), 'gene_short_name']) row_ann_labels[is.na(row_ann_labels)] <- row.names(annotation_row) } else { feature_label <- row.names(heatmap_matrix) row_ann_labels <- row.names(annotation_row) } } else { feature_label <- row.names(heatmap_matrix) row_ann_labels <- row.names(annotation_row) } row.names(heatmap_matrix) <- feature_label row.names(annotation_row) <- row_ann_labels colnames(heatmap_matrix) <- c(1:ncol(heatmap_matrix)) ph_res <- pheatmap(heatmap_matrix[, ], #ph$tree_row$order useRaster = T, cluster_cols = FALSE, cluster_rows = cluster_rows, show_rownames=show_rownames, show_colnames=F, #scale="row", clustering_distance_rows=row_dist, #row_dist clustering_method = hclust_method, #ward.D2 cutree_rows=num_clusters, # cutree_cols = 2, annotation_row=annotation_row, annotation_col=annotation_col, treeheight_row = 20, breaks=bks, fontsize = 6, color=hmcols, border_color = NA, silent=TRUE, filename=NA ) grid::grid.rect(gp=grid::gpar("fill", col=NA)) grid::grid.draw(ph_res$gtable) if (return_heatmap){ return(ph_res) } } #' Plot the branch genes in pseduotime with separate branch curves. #' #' @description Works similarly to plot_genes_in_psuedotime esceptit shows #' one kinetic trend for each lineage. #' #' @details This plotting function is used to make the branching plots for a branch dependent gene goes through the progenitor state #' and bifurcating into two distinct branchs (Similar to the pitch-fork bifurcation in dynamic systems). In order to make the #' bifurcation plot, we first duplicated the progenitor states and by default stretch each branch into maturation level 0-100. #' Then we fit two nature spline curves for each branchs using VGAM package. #' #' @param cds CellDataSet for the experiment #' @param branch_states The states for two branching branchs #' @param branch_point The ID of the branch point to analyze. Can only be used when reduceDimension is called with method = "DDRTree". #' @param branch_labels The names for each branching branch #' @param method The method to draw the curve for the gene expression branching pattern, either loess ('loess') or VGLM fitting ('fitting') #' @param min_expr The minimum (untransformed) expression level to use in plotted the genes. #' @param cell_size The size (in points) of each cell used in the plot #' @param nrow Number of columns used to layout the faceted cluster panels #' @param ncol Number of columns used to layout the faceted cluster panels #' @param panel_order The a character vector of gene short names (or IDs, if that's what you're using), specifying order in which genes should be layed out (left-to-right, top-to-bottom) #' @param color_by The cell attribute (e.g. the column of pData(cds)) to be used to color each cell #' @param expression_curve_linetype_by The cell attribute (e.g. the column of pData(cds)) to be used for the linetype of each branch curve #' @param trend_formula The model formula to be used for fitting the expression trend over pseudotime #' @param reducedModelFormulaStr A formula specifying a null model. If used, the plot shows a p value from the likelihood ratio test that uses trend_formula as the full model #' @param label_by_short_name Whether to label figure panels by gene_short_name (TRUE) or feature id (FALSE) #' @param relative_expr Whether or not the plot should use relative expression values (only relevant for CellDataSets using transcript counts) #' @param ... Additional arguments passed on to branchTest. Only used when reducedModelFormulaStr is not NULL. #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom plyr ddply #' @importFrom reshape2 melt #' @importFrom BiocGenerics sizeFactors #' @export plot_genes_branched_pseudotime <- function (cds, branch_states = NULL, branch_point=1, branch_labels = NULL, method = "fitting", min_expr = NULL, cell_size = 0.75, nrow = NULL, ncol = 1, panel_order = NULL, color_by = "State", expression_curve_linetype_by = "Branch", trend_formula = "~ sm.ns(Pseudotime, df=3) * Branch", reducedModelFormulaStr = NULL, label_by_short_name = TRUE, relative_expr = TRUE, #gene_pairs = NULL, ...) { Branch <- NA if (is.null(reducedModelFormulaStr) == FALSE) { pval_df <- branchTest(cds, branch_states=branch_states, branch_point=branch_point, fullModelFormulaStr = trend_formula, reducedModelFormulaStr = "~ sm.ns(Pseudotime, df=3)", ...) fData(cds)[, "pval"] <- pval_df[row.names(cds), 'pval'] } if("Branch" %in% all.vars(terms(as.formula(trend_formula)))) { #only when Branch is in the model formula we will duplicate the "progenitor" cells cds_subset <- buildBranchCellDataSet(cds = cds, branch_states = branch_states, branch_point=branch_point, branch_labels = branch_labels, progenitor_method = 'duplicate', ...) } else { cds_subset <- cds pData(cds_subset)$Branch <- pData(cds_subset)$State } if (cds_subset@expressionFamily@vfamily %in% c("negbinomial", "negbinomial.size")) { integer_expression <- TRUE } else { integer_expression <- FALSE } if (integer_expression) { CM <- exprs(cds_subset) if (relative_expr){ if (is.null(sizeFactors(cds_subset))) { stop("Error: to call this function with relative_expr=TRUE, you must call estimateSizeFactors() first") } CM <- Matrix::t(Matrix::t(CM)/sizeFactors(cds_subset)) } cds_exprs <- reshape2::melt(round(as.matrix(CM))) } else { cds_exprs <- reshape2::melt(exprs(cds_subset)) } if (is.null(min_expr)) { min_expr <- cds_subset@lowerDetectionLimit } colnames(cds_exprs) <- c("f_id", "Cell", "expression") cds_pData <- pData(cds_subset) cds_fData <- fData(cds_subset) cds_exprs <- merge(cds_exprs, cds_fData, by.x = "f_id", by.y = "row.names") cds_exprs <- merge(cds_exprs, cds_pData, by.x = "Cell", by.y = "row.names") if (integer_expression) { cds_exprs$adjusted_expression <- round(cds_exprs$expression) } else { cds_exprs$adjusted_expression <- log10(cds_exprs$expression) } if (label_by_short_name == TRUE) { if (is.null(cds_exprs$gene_short_name) == FALSE) { cds_exprs$feature_label <- as.character(cds_exprs$gene_short_name) cds_exprs$feature_label[is.na(cds_exprs$feature_label)] <- cds_exprs$f_id } else { cds_exprs$feature_label <- cds_exprs$f_id } } else { cds_exprs$feature_label <- cds_exprs$f_id } cds_exprs$feature_label <- as.factor(cds_exprs$feature_label) # trend_formula <- paste("adjusted_expression", trend_formula, # sep = "") cds_exprs$Branch <- as.factor(cds_exprs$Branch) new_data <- data.frame(Pseudotime = pData(cds_subset)$Pseudotime, Branch = pData(cds_subset)$Branch) full_model_expectation <- genSmoothCurves(cds_subset, cores=1, trend_formula = trend_formula, relative_expr = T, new_data = new_data) colnames(full_model_expectation) <- colnames(cds_subset) cds_exprs$full_model_expectation <- apply(cds_exprs,1, function(x) full_model_expectation[x[2], x[1]]) if(!is.null(reducedModelFormulaStr)){ reduced_model_expectation <- genSmoothCurves(cds_subset, cores=1, trend_formula = reducedModelFormulaStr, relative_expr = T, new_data = new_data) colnames(reduced_model_expectation) <- colnames(cds_subset) cds_exprs$reduced_model_expectation <- apply(cds_exprs,1, function(x) reduced_model_expectation[x[2], x[1]]) } # FIXME: If you want to show the bifurcation time for each gene, this function # should just compute it. Passing it in as a dataframe is just too complicated # and will be hard on the user. # if(!is.null(bifurcation_time)){ # cds_exprs$bifurcation_time <- bifurcation_time[as.vector(cds_exprs$gene_short_name)] # } if (method == "loess") cds_exprs$expression <- cds_exprs$expression + cds@lowerDetectionLimit if (label_by_short_name == TRUE) { if (is.null(cds_exprs$gene_short_name) == FALSE) { cds_exprs$feature_label <- as.character(cds_exprs$gene_short_name) cds_exprs$feature_label[is.na(cds_exprs$feature_label)] <- cds_exprs$f_id } else { cds_exprs$feature_label <- cds_exprs$f_id } } else { cds_exprs$feature_label <- cds_exprs$f_id } cds_exprs$feature_label <- factor(cds_exprs$feature_label) if (is.null(panel_order) == FALSE) { cds_exprs$feature_label <- factor(cds_exprs$feature_label, levels = panel_order) } cds_exprs$expression[is.na(cds_exprs$expression)] <- min_expr cds_exprs$expression[cds_exprs$expression < min_expr] <- min_expr cds_exprs$full_model_expectation[is.na(cds_exprs$full_model_expectation)] <- min_expr cds_exprs$full_model_expectation[cds_exprs$full_model_expectation < min_expr] <- min_expr if(!is.null(reducedModelFormulaStr)){ cds_exprs$reduced_model_expectation[is.na(cds_exprs$reduced_model_expectation)] <- min_expr cds_exprs$reduced_model_expectation[cds_exprs$reduced_model_expectation < min_expr] <- min_expr } cds_exprs$State <- as.factor(cds_exprs$State) cds_exprs$Branch <- as.factor(cds_exprs$Branch) q <- ggplot(aes(Pseudotime, expression), data = cds_exprs) # if (!is.null(bifurcation_time)) { # q <- q + geom_vline(aes(xintercept = bifurcation_time), # color = "black", linetype = "longdash") # } if (is.null(color_by) == FALSE) { q <- q + geom_point(aes_string(color = color_by), size = I(cell_size)) } if (is.null(reducedModelFormulaStr) == FALSE) q <- q + scale_y_log10() + facet_wrap(~feature_label + pval, nrow = nrow, ncol = ncol, scales = "free_y") else q <- q + scale_y_log10() + facet_wrap(~feature_label, nrow = nrow, ncol = ncol, scales = "free_y") if (method == "loess") q <- q + stat_smooth(aes(fill = Branch, color = Branch), method = "loess") else if (method == "fitting") { q <- q + geom_line(aes_string(x = "Pseudotime", y = "full_model_expectation", linetype = "Branch"), data = cds_exprs) #+ scale_color_manual(name = "Type", values = c(colour_cell, colour), labels = c("Pre-branch", "AT1", "AT2", "AT1", "AT2") } if(!is.null(reducedModelFormulaStr)) { q <- q + geom_line(aes_string(x = "Pseudotime", y = "reduced_model_expectation"), color = 'black', linetype = 2, data = cds_exprs) } q <- q + ylab("Expression") + xlab("Pseudotime (stretched)") q <- q + monocle_theme_opts() q + expand_limits(y = min_expr) } #' Not sure we're ready to release this one quite yet: #' Plot the branch genes in pseduotime with separate branch curves #' @param cds CellDataSet for the experiment #' @param rowgenes Gene ids or short names to be arrayed on the vertical axis. #' @param colgenes Gene ids or short names to be arrayed on the horizontal axis #' @param relative_expr Whether to transform expression into relative values #' @param min_expr The minimum level of expression to show in the plot #' @param cell_size A number how large the cells should be in the plot #' @param label_by_short_name a boolean that indicates whether cells should be labeled by their short name #' @param show_density a boolean that indicates whether a 2D density estimation should be shown in the plot #' @param round_expr a boolean that indicates whether cds_expr values should be rounded or not #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom reshape2 melt plot_coexpression_matrix <- function(cds, rowgenes, colgenes, relative_expr=TRUE, min_expr=NULL, cell_size=0.85, label_by_short_name=TRUE, show_density=TRUE, round_expr=FALSE){ gene_short_name <- NA f_id <- NA adjusted_expression.x <- NULL adjusted_expression.y <- NULL ..density.. <- NULL row_gene_ids <- row.names(subset(fData(cds), gene_short_name %in% rowgenes)) row_gene_ids <- union(row_gene_ids, intersect(rowgenes, row.names(fData(cds)))) col_gene_ids <- row.names(subset(fData(cds), gene_short_name %in% colgenes)) col_gene_ids <- union(col_gene_ids, intersect(colgenes, row.names(fData(cds)))) cds_subset <- cds[union(row_gene_ids, col_gene_ids),] if (cds_subset@expressionFamily@vfamily %in% c("negbinomial", "negbinomial.size")){ integer_expression <- TRUE }else{ integer_expression <- FALSE relative_expr <- TRUE } if (integer_expression) { cds_exprs <- exprs(cds_subset) if (relative_expr){ if (is.null(sizeFactors(cds_subset))) { stop("Error: to call this function with relative_expr=TRUE, you must call estimateSizeFactors() first") } cds_exprs <- Matrix::t(Matrix::t(cds_exprs) / sizeFactors(cds_subset)) } if (round_expr){ cds_exprs <- reshape2::melt(round(as.matrix(cds_exprs))) } else { cds_exprs <- reshape2::melt(as.matrix(cds_exprs)) } }else{ cds_exprs <- reshape2::melt(exprs(cds_subset)) } if (is.null(min_expr)){ min_expr <- cds_subset@lowerDetectionLimit } colnames(cds_exprs) <- c("f_id", "Cell", "expression") cds_exprs$expression[cds_exprs$expression < min_expr] <- min_expr cds_pData <- pData(cds_subset) cds_fData <- fData(cds_subset) cds_exprs <- merge(cds_exprs, cds_fData, by.x="f_id", by.y="row.names") cds_exprs$adjusted_expression <- cds_exprs$expression #cds_exprs$adjusted_expression <- log10(cds_exprs$adjusted_expression + abs(rnorm(nrow(cds_exprs), min_expr, sqrt(min_expr)))) if (label_by_short_name == TRUE){ if (is.null(cds_exprs$gene_short_name) == FALSE){ cds_exprs$feature_label <- as.character(cds_exprs$gene_short_name) cds_exprs$feature_label[is.na(cds_exprs$feature_label)] <- cds_exprs$f_id }else{ cds_exprs$feature_label <- cds_exprs$f_id } }else{ cds_exprs$feature_label <- cds_exprs$f_id } cds_exprs$feature_label <- factor(cds_exprs$feature_label) row_cds_exprs <- subset(cds_exprs, f_id %in% row_gene_ids) col_cds_exprs <- subset(cds_exprs, f_id %in% col_gene_ids) joined_exprs <- merge(row_cds_exprs, col_cds_exprs, by="Cell") cds_exprs <- joined_exprs cds_exprs <- merge(cds_exprs, cds_pData, by.x="Cell", by.y="row.names") cds_exprs <- subset(cds_exprs, adjusted_expression.x > min_expr | adjusted_expression.y > min_expr) q <- ggplot(aes(adjusted_expression.x, adjusted_expression.y), data=cds_exprs, size=I(1)) if (show_density){ q <- q + stat_density2d(geom="raster", aes(fill = ..density..), contour = FALSE) + scale_fill_gradient(low="white", high="red") } q <- q + scale_x_log10() + scale_y_log10() + geom_point(color=I("black"), size=I(cell_size * 1.50)) + geom_point(color=I("white"), size=I(cell_size)) + facet_grid(feature_label.x ~ feature_label.y, scales="free") #scale_color_brewer(palette="Set1") + if (min_expr < 1) { q <- q + expand_limits(y=c(min_expr, 1), x=c(min_expr, 1)) } #q <- q + monocle_theme_opts() q } #The following code is swipped from colorRamps package which is used to make the pallette table.ramp <- function(n, mid = 0.5, sill = 0.5, base = 1, height = 1) { x <- seq(0, 1, length.out = n) y <- rep(0, length(x)) sill.min <- max(c(1, round((n - 1) * (mid - sill / 2)) + 1)) sill.max <- min(c(n, round((n - 1) * (mid + sill / 2)) + 1)) y[sill.min:sill.max] <- 1 base.min <- round((n - 1) * (mid - base / 2)) + 1 base.max <- round((n - 1) * (mid + base / 2)) + 1 xi <- base.min:sill.min yi <- seq(0, 1, length.out = length(xi)) i <- which(xi > 0 & xi <= n) y[xi[i]] <- yi[i] xi <- sill.max:base.max yi <- seq(1, 0, length.out = length(xi)) i <- which(xi > 0 & xi <= n) y[xi[i]] <- yi[i] height * y } #' @importFrom grDevices rgb rgb.tables <- function(n, red = c(0.75, 0.25, 1), green = c(0.5, 0.25, 1), blue = c(0.25, 0.25, 1)) { rr <- do.call("table.ramp", as.list(c(n, red))) gr <- do.call("table.ramp", as.list(c(n, green))) br <- do.call("table.ramp", as.list(c(n, blue))) rgb(rr, gr, br) } matlab.like <- function(n) rgb.tables(n) matlab.like2 <- function(n) rgb.tables(n, red = c(0.8, 0.2, 1), green = c(0.5, 0.4, 0.8), blue = c(0.2, 0.2, 1)) blue2green2red <- matlab.like2 #' Create a heatmap to demonstrate the bifurcation of gene expression along two branchs #' #' @description returns a heatmap that shows changes in both lineages at the same time. #' It also requires that you choose a branch point to inspect. #' Columns are points in pseudotime, rows are genes, and the beginning of pseudotime is in the middle of the heatmap. #' As you read from the middle of the heatmap to the right, you are following one lineage through pseudotime. As you read left, the other. #' The genes are clustered hierarchically, so you can visualize modules of genes that have similar lineage-dependent expression patterns. #' #' @param cds_subset CellDataSet for the experiment (normally only the branching genes detected with branchTest) #' @param branch_point The ID of the branch point to visualize. Can only be used when reduceDimension is called with method = "DDRTree". #' @param branch_states The two states to compare in the heatmap. Mutually exclusive with branch_point. #' @param branch_labels The labels for the branchs. #' @param cluster_rows Whether to cluster the rows of the heatmap. #' @param hclust_method The method used by pheatmap to perform hirearchical clustering of the rows. #' @param num_clusters Number of clusters for the heatmap of branch genes #' @param hmcols The color scheme for drawing the heatmap. #' @param branch_colors The colors used in the annotation strip indicating the pre- and post-branch cells. #' @param add_annotation_row Additional annotations to show for each row in the heatmap. Must be a dataframe with one row for each row in the fData table of cds_subset, with matching IDs. #' @param add_annotation_col Additional annotations to show for each column in the heatmap. Must be a dataframe with one row for each cell in the pData table of cds_subset, with matching IDs. #' @param show_rownames Whether to show the names for each row in the table. #' @param use_gene_short_name Whether to use the short names for each row. If FALSE, uses row IDs from the fData table. #' @param scale_max The maximum value (in standard deviations) to show in the heatmap. Values larger than this are set to the max. #' @param scale_min The minimum value (in standard deviations) to show in the heatmap. Values smaller than this are set to the min. #' @param norm_method Determines how to transform expression values prior to rendering #' @param trend_formula A formula string specifying the model used in fitting the spline curve for each gene/feature. #' @param return_heatmap Whether to return the pheatmap object to the user. #' @param cores Number of cores to use when smoothing the expression curves shown in the heatmap. #' @param ... Additional arguments passed to buildBranchCellDataSet #' @return A list of heatmap_matrix (expression matrix for the branch committment), ph (pheatmap heatmap object), #' annotation_row (annotation data.frame for the row), annotation_col (annotation data.frame for the column). #' @import pheatmap #' @importFrom stats sd as.dist cor cutree #' @export #' plot_genes_branched_heatmap <- function(cds_subset, branch_point=1, branch_states=NULL, branch_labels = c("Cell fate 1", "Cell fate 2"), cluster_rows = TRUE, hclust_method = "ward.D2", num_clusters = 6, hmcols = NULL, branch_colors = c('#979797', '#F05662', '#7990C8'), add_annotation_row = NULL, add_annotation_col = NULL, show_rownames = FALSE, use_gene_short_name = TRUE, scale_max=3, scale_min=-3, norm_method = c("log", "vstExprs"), trend_formula = '~sm.ns(Pseudotime, df=3) * Branch', return_heatmap=FALSE, cores = 1, ...) { cds <- NA new_cds <- buildBranchCellDataSet(cds_subset, branch_states=branch_states, branch_point=branch_point, progenitor_method = 'duplicate', ...) new_cds@dispFitInfo <- cds_subset@dispFitInfo if(is.null(branch_states)) { progenitor_state <- subset(pData(cds_subset), Pseudotime == 0)[, 'State'] branch_states <- setdiff(pData(cds_subset)$State, progenitor_state) } col_gap_ind <- 101 # newdataA <- data.frame(Pseudotime = seq(0, 100, length.out = 100)) # newdataB <- data.frame(Pseudotime = seq(0, 100, length.out = 100)) newdataA <- data.frame(Pseudotime = seq(0, 100, length.out = 100), Branch = as.factor(unique(as.character(pData(new_cds)$Branch))[1])) newdataB <- data.frame(Pseudotime = seq(0, 100, length.out = 100), Branch = as.factor(unique(as.character(pData(new_cds)$Branch))[2])) BranchAB_exprs <- genSmoothCurves(new_cds[, ], cores=cores, trend_formula = trend_formula, relative_expr = T, new_data = rbind(newdataA, newdataB)) BranchA_exprs <- BranchAB_exprs[, 1:100] BranchB_exprs <- BranchAB_exprs[, 101:200] #common_ancestor_cells <- row.names(pData(new_cds)[duplicated(pData(new_cds)$original_cell_id),]) common_ancestor_cells <- row.names(pData(new_cds)[pData(new_cds)$State == setdiff(pData(new_cds)$State, branch_states),]) BranchP_num <- (100 - floor(max(pData(new_cds)[common_ancestor_cells, 'Pseudotime']))) BranchA_num <- floor(max(pData(new_cds)[common_ancestor_cells, 'Pseudotime'])) BranchB_num <- BranchA_num norm_method <- match.arg(norm_method) # FIXME: this needs to check that vst values can even be computed. (They can only be if we're using NB as the expressionFamily) if(norm_method == 'vstExprs') { BranchA_exprs <- vstExprs(new_cds, expr_matrix=BranchA_exprs) BranchB_exprs <- vstExprs(new_cds, expr_matrix=BranchB_exprs) } else if(norm_method == 'log') { BranchA_exprs <- log10(BranchA_exprs + 1) BranchB_exprs <- log10(BranchB_exprs + 1) } heatmap_matrix <- cBind(BranchA_exprs[, (col_gap_ind - 1):1], BranchB_exprs) heatmap_matrix=heatmap_matrix[!apply(heatmap_matrix, 1, sd)==0,] heatmap_matrix=Matrix::t(scale(Matrix::t(heatmap_matrix),center=TRUE)) heatmap_matrix=heatmap_matrix[is.na(row.names(heatmap_matrix)) == FALSE,] heatmap_matrix[is.nan(heatmap_matrix)] = 0 heatmap_matrix[heatmap_matrix>scale_max] = scale_max heatmap_matrix[heatmap_matrix<scale_min] = scale_min heatmap_matrix_ori <- heatmap_matrix heatmap_matrix <- heatmap_matrix[is.finite(heatmap_matrix[, 1]) & is.finite(heatmap_matrix[, col_gap_ind]), ] #remove the NA fitting failure genes for each branch row_dist <- as.dist((1 - cor(Matrix::t(heatmap_matrix)))/2) row_dist[is.na(row_dist)] <- 1 exp_rng <- range(heatmap_matrix) #bks is based on the expression range bks <- seq(exp_rng[1] - 0.1, exp_rng[2] + 0.1, by=0.1) if(is.null(hmcols)) { hmcols <- blue2green2red(length(bks) - 1) } # prin t(hmcols) ph <- pheatmap(heatmap_matrix, useRaster = T, cluster_cols=FALSE, cluster_rows=TRUE, show_rownames=F, show_colnames=F, #scale="row", clustering_distance_rows=row_dist, clustering_method = hclust_method, cutree_rows=num_clusters, silent=TRUE, filename=NA, breaks=bks, color=hmcols #color=hmcols#, # filename="expression_pseudotime_pheatmap.pdf", ) #save(heatmap_matrix, row_dist, num_clusters, hmcols, ph, branchTest_df, qval_lowest_thrsd, branch_labels, BranchA_num, BranchP_num, BranchB_num, file = 'heatmap_matrix') annotation_row <- data.frame(Cluster=factor(cutree(ph$tree_row, num_clusters))) if(!is.null(add_annotation_row)) { annotation_row <- cbind(annotation_row, add_annotation_row[row.names(annotation_row), ]) # annotation_row$bif_time <- add_annotation_row[as.character(fData(absolute_cds[row.names(annotation_row), ])$gene_short_name), 1] } colnames(heatmap_matrix) <- c(1:ncol(heatmap_matrix)) annotation_col <- data.frame(row.names = c(1:ncol(heatmap_matrix)), "Cell Type" = c(rep(branch_labels[1], BranchA_num), rep("Pre-branch", 2 * BranchP_num), rep(branch_labels[2], BranchB_num))) colnames(annotation_col) <- "Cell Type" if(!is.null(add_annotation_col)) { annotation_col <- cbind(annotation_col, add_annotation_col[fData(cds[row.names(annotation_col), ])$gene_short_name, 1]) } names(branch_colors) <- c("Pre-branch", branch_labels[1], branch_labels[2]) annotation_colors=list("Cell Type"=branch_colors) names(annotation_colors$`Cell Type`) = c('Pre-branch', branch_labels) if (use_gene_short_name == TRUE) { if (is.null(fData(cds_subset)$gene_short_name) == FALSE) { feature_label <- as.character(fData(cds_subset)[row.names(heatmap_matrix), 'gene_short_name']) feature_label[is.na(feature_label)] <- row.names(heatmap_matrix) row_ann_labels <- as.character(fData(cds_subset)[row.names(annotation_row), 'gene_short_name']) row_ann_labels[is.na(row_ann_labels)] <- row.names(annotation_row) } else { feature_label <- row.names(heatmap_matrix) row_ann_labels <- row.names(annotation_row) } } else { feature_label <- row.names(heatmap_matrix) row_ann_labels <- row.names(annotation_row) } row.names(heatmap_matrix) <- feature_label row.names(annotation_row) <- row_ann_labels ph_res <- pheatmap(heatmap_matrix[, ], #ph$tree_row$order useRaster = T, cluster_cols=FALSE, cluster_rows=TRUE, show_rownames=show_rownames, show_colnames=F, #scale="row", clustering_distance_rows=row_dist, #row_dist clustering_method = hclust_method, #ward.D2 cutree_rows=num_clusters, # cutree_cols = 2, annotation_row=annotation_row, annotation_col=annotation_col, annotation_colors=annotation_colors, gaps_col = col_gap_ind, treeheight_row = 20, breaks=bks, fontsize = 6, color=hmcols, border_color = NA, silent=TRUE) grid::grid.rect(gp=grid::gpar("fill", col=NA)) grid::grid.draw(ph_res$gtable) if (return_heatmap){ return(list(BranchA_exprs = BranchA_exprs, BranchB_exprs = BranchB_exprs, heatmap_matrix = heatmap_matrix, heatmap_matrix_ori = heatmap_matrix_ori, ph = ph, col_gap_ind = col_gap_ind, row_dist = row_dist, hmcols = hmcols, annotation_colors = annotation_colors, annotation_row = annotation_row, annotation_col = annotation_col, ph_res = ph_res)) } } #' Plots genes by mean vs. dispersion, highlighting those selected for ordering #' #' Each gray point in the plot is a gene. The black dots are those that were included #' in the last call to setOrderingFilter. The red curve shows the mean-variance #' model learning by estimateDispersions(). #' #' @param cds The CellDataSet to be used for the plot. #' @export plot_ordering_genes <- function(cds){ if(class(cds)[1] != "CellDataSet") { stop("Error input object is not of type 'CellDataSet'") } disp_table <- dispersionTable(cds) use_for_ordering <- NA mean_expression <- NA dispersion_empirical <- NA dispersion_fit <- NA gene_id <- NA ordering_genes <- row.names(subset(fData(cds), use_for_ordering == TRUE)) g <- qplot(mean_expression, dispersion_empirical, data=disp_table, log="xy", color=I("darkgrey")) + geom_line(aes(y=dispersion_fit), color="red") if (length(ordering_genes) > 0){ g <- g + geom_point(aes(mean_expression, dispersion_empirical), data=subset(disp_table, gene_id %in% ordering_genes), color="black") } g <- g + monocle_theme_opts() g } #' Plots clusters of cells . #' #' @param cds CellDataSet for the experiment #' @param x the column of reducedDimS(cds) to plot on the horizontal axis #' @param y the column of reducedDimS(cds) to plot on the vertical axis #' @param color_by the cell attribute (e.g. the column of pData(cds)) to map to each cell's color #' @param markers a gene name or gene id to use for setting the size of each cell in the plot #' @param show_cell_names draw the name of each cell in the plot #' @param cell_size The size of the point for each cell #' @param cell_name_size the size of cell name labels #' @param ... additional arguments passed into the scale_color_viridis function #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom reshape2 melt #' @importFrom viridis scale_color_viridis #' @export #' @examples #' \dontrun{ #' library(HSMMSingleCell) #' HSMM <- load_HSMM() #' HSMM <- reduceD #' plot_cell_clusters(HSMM) #' plot_cell_clusters(HSMM, color_by="Pseudotime") #' plot_cell_clusters(HSMM, markers="MYH3") #' } plot_cell_clusters <- function(cds, x=1, y=2, color_by="Cluster", markers=NULL, show_cell_names=FALSE, cell_size=1.5, cell_name_size=2, ...){ if (is.null(cds@reducedDimA) | length(pData(cds)$Cluster) == 0){ stop("Error: Clustering is not performed yet. Please call clusterCells() before calling this function.") } gene_short_name <- NULL sample_name <- NULL data_dim_1 <- NULL data_dim_2 <- NULL #TODO: need to validate cds as ready for this plot (need mst, pseudotime, etc) lib_info <- pData(cds) tSNE_dim_coords <- reducedDimA(cds) data_df <- data.frame(t(tSNE_dim_coords[c(x,y),])) colnames(data_df) <- c("data_dim_1", "data_dim_2") data_df$sample_name <- colnames(cds) data_df <- merge(data_df, lib_info, by.x="sample_name", by.y="row.names") markers_exprs <- NULL if (is.null(markers) == FALSE){ markers_fData <- subset(fData(cds), gene_short_name %in% markers) if (nrow(markers_fData) >= 1){ markers_exprs <- reshape2::melt(as.matrix(exprs(cds[row.names(markers_fData),]))) colnames(markers_exprs)[1:2] <- c('feature_id','cell_id') markers_exprs <- merge(markers_exprs, markers_fData, by.x = "feature_id", by.y="row.names") #print (head( markers_exprs[is.na(markers_exprs$gene_short_name) == FALSE,])) markers_exprs$feature_label <- as.character(markers_exprs$gene_short_name) markers_exprs$feature_label[is.na(markers_exprs$feature_label)] <- markers_exprs$Var1 } } if (is.null(markers_exprs) == FALSE && nrow(markers_exprs) > 0){ data_df <- merge(data_df, markers_exprs, by.x="sample_name", by.y="cell_id") g <- ggplot(data=data_df, aes(x=data_dim_1, y=data_dim_2)) + facet_wrap(~feature_label) }else{ g <- ggplot(data=data_df, aes(x=data_dim_1, y=data_dim_2)) } # FIXME: setting size here overrides the marker expression funtionality. # Don't do it! if (is.null(markers_exprs) == FALSE && nrow(markers_exprs) > 0){ g <- g + geom_point(aes(color=log10(value + 0.1)), size=I(cell_size), na.rm = TRUE) + scale_color_viridis(name = paste0("log10(value + 0.1)"), ...) }else { g <- g + geom_point(aes_string(color = color_by), size=I(cell_size), na.rm = TRUE) } g <- g + #scale_color_brewer(palette="Set1") + monocle_theme_opts() + xlab(paste("Component", x)) + ylab(paste("Component", y)) + theme(legend.position="top", legend.key.height=grid::unit(0.35, "in")) + #guides(color = guide_legend(label.position = "top")) + theme(legend.key = element_blank()) + theme(panel.background = element_rect(fill='white')) + theme(text = element_text(size = 15)) g } #' Plots the decision map of density clusters . #' #' @param cds CellDataSet for the experiment after running clusterCells_Density_Peak #' @param rho_threshold The threshold of local density (rho) used to select the density peaks for plotting #' @param delta_threshold The threshold of local distance (delta) used to select the density peaks for plotting #' @export #' @examples #' \dontrun{ #' library(HSMMSingleCell) #' HSMM <- load_HSMM() #' plot_rho_delta(HSMM) #' } plot_rho_delta <- function(cds, rho_threshold = NULL, delta_threshold = NULL){ if(!is.null(cds@auxClusteringData[["tSNE"]]$densityPeak) & !is.null(pData(cds)$Cluster) & !is.null(pData(cds)$peaks) & !is.null(pData(cds)$halo) & !is.null(pData(cds)$delta) & !is.null(pData(cds)$rho)) { rho <- NULL delta <- NULL # df <- data.frame(rho = as.numeric(levels(pData(cds)$rho))[pData(cds)$rho], # delta = as.numeric(levels(pData(cds)$delta))[pData(cds)$delta]) if(!is.null(rho_threshold) & !is.null(delta_threshold)){ peaks <- pData(cds)$rho >= rho_threshold & pData(cds)$delta >= delta_threshold } else peaks <- pData(cds)$peaks df <- data.frame(rho = pData(cds)$rho, delta = pData(cds)$delta, peaks = peaks) g <- qplot(rho, delta, data = df, alpha = I(0.5), color = peaks) + monocle_theme_opts() + theme(legend.position="top", legend.key.height=grid::unit(0.35, "in")) + scale_color_manual(values=c("grey","black")) + theme(legend.key = element_blank()) + theme(panel.background = element_rect(fill='white')) } else { stop('Please run clusterCells_Density_Peak before using this plotting function') } g } #' Plots the percentage of variance explained by the each component based on PCA from the normalized expression #' data using the same procedure used in reduceDimension function. #' #' @param cds CellDataSet for the experiment after running reduceDimension with reduction_method as tSNE #' @param max_components Maximum number of components shown in the scree plot (variance explained by each component) #' @param norm_method Determines how to transform expression values prior to reducing dimensionality #' @param residualModelFormulaStr A model formula specifying the effects to subtract from the data before clustering. #' @param pseudo_expr amount to increase expression values before dimensionality reduction #' @param return_all A logical argument to determine whether or not the variance of each component is returned #' @param use_existing_pc_variance Whether to plot existing results for variance explained by each PC #' @param verbose Whether to emit verbose output during dimensionality reduction #' @param ... additional arguments to pass to the dimensionality reduction function #' @export #' @examples #' \dontrun{ #' library(HSMMSingleCell) #' HSMM <- load_HSMM() #' plot_pc_variance_explained(HSMM) #' } plot_pc_variance_explained <- function(cds, max_components=100, # reduction_method=c("DDRTree", "ICA", 'tSNE'), norm_method = c("log", "vstExprs", "none"), residualModelFormulaStr=NULL, pseudo_expr=NULL, return_all = F, use_existing_pc_variance=FALSE, verbose=FALSE, ...){ set.seed(2016) if(!is.null(cds@auxClusteringData[["tSNE"]]$variance_explained) & use_existing_pc_variance == T){ prop_varex <- cds@auxClusteringData[["tSNE"]]$variance_explained } else{ FM <- normalize_expr_data(cds, norm_method, pseudo_expr) #FM <- FM[unlist(sparseApply(FM, 1, sd, convert_to_dense=TRUE)) > 0, ] xm <- Matrix::rowMeans(FM) xsd <- sqrt(Matrix::rowMeans((FM - xm)^2)) FM <- FM[xsd > 0,] if (is.null(residualModelFormulaStr) == FALSE) { if (verbose) message("Removing batch effects") X.model_mat <- sparse.model.matrix(as.formula(residualModelFormulaStr), data = pData(cds), drop.unused.levels = TRUE) fit <- limma::lmFit(FM, X.model_mat, ...) beta <- fit$coefficients[, -1, drop = FALSE] beta[is.na(beta)] <- 0 FM <- as.matrix(FM) - beta %*% t(X.model_mat[, -1]) }else{ X.model_mat <- NULL } if (nrow(FM) == 0) { stop("Error: all rows have standard deviation zero") } # FM <- convert2DRData(cds, norm_method = 'log') # FM <- FM[rowSums(is.na(FM)) == 0, ] irlba_res <- prcomp_irlba(t(FM), n = min(max_components, min(dim(FM)) - 1), center = TRUE, scale. = TRUE) prop_varex <- irlba_res$sdev^2 / sum(irlba_res$sdev^2) # # cell_means <- Matrix::rowMeans(FM_t) # cell_vars <- Matrix::rowMeans((FM_t - cell_means)^2) # # irlba_res <- irlba(FM, # nv= min(max_components, min(dim(FM)) - 1), #avoid calculating components in the tail # nu=0, # center=cell_means, # scale=sqrt(cell_vars), # right_only=TRUE) # prop_varex <- irlba_res$d / sum(irlba_res$d) # # # pca_res <- prcomp(t(FM), center = T, scale = T) # # std_dev <- pca_res$sdev # # pr_var <- std_dev^2 # prop_varex <- pr_var/sum(pr_var) } p <- qplot(1:length(prop_varex), prop_varex, alpha = I(0.5)) + monocle_theme_opts() + theme(legend.position="top", legend.key.height=grid::unit(0.35, "in")) + theme(panel.background = element_rect(fill='white')) + xlab('components') + ylab('Variance explained \n by each component') cds@auxClusteringData[["tSNE"]]$variance_explained <- prop_varex # update CDS slot for variance_explained if(return_all) { return(list(variance_explained = prop_varex, p = p)) } else return(p) } #' @importFrom igraph shortest_paths degree shortest.paths traverseTree <- function(g, starting_cell, end_cells){ distance <- shortest.paths(g, v=starting_cell, to=end_cells) branchPoints <- which(degree(g) == 3) path <- shortest_paths(g, from = starting_cell, end_cells) return(list(shortest_path = path$vpath, distance = distance, branch_points = intersect(branchPoints, unlist(path$vpath)))) } #' Plots the minimum spanning tree on cells. #' @description Plots the minimum spanning tree on cells. #' @param cds CellDataSet for the experiment #' @param x the column of reducedDimS(cds) to plot on the horizontal axis #' @param y the column of reducedDimS(cds) to plot on the vertical axis #' @param root_states the state used to set as the root of the graph #' @param color_by the cell attribute (e.g. the column of pData(cds)) to map to each cell's color #' @param show_tree whether to show the links between cells connected in the minimum spanning tree #' @param show_backbone whether to show the diameter path of the MST used to order the cells #' @param backbone_color the color used to render the backbone. #' @param markers a gene name or gene id to use for setting the size of each cell in the plot #' @param show_cell_names draw the name of each cell in the plot #' @param cell_size The size of the point for each cell #' @param cell_link_size The size of the line segments connecting cells (when used with ICA) or the principal graph (when used with DDRTree) #' @param cell_name_size the size of cell name labels #' @param show_branch_points Whether to show icons for each branch point (only available when reduceDimension was called with DDRTree) #' @param ... Additional arguments passed to the scale_color_viridis function #' @return a ggplot2 plot object #' @import ggplot2 #' @importFrom igraph V get.edgelist layout_as_tree #' @importFrom reshape2 melt #' @importFrom viridis scale_color_viridis #' @export #' @examples #' \dontrun{ #' library(HSMMSingleCell) #' HSMM <- load_HSMM() #' plot_complex_cell_trajectory(HSMM) #' plot_complex_cell_trajectory(HSMM, color_by="Pseudotime", show_backbone=FALSE) #' plot_complex_cell_trajectory(HSMM, markers="MYH3") #' } plot_complex_cell_trajectory <- function(cds, x=1, y=2, root_states = NULL, color_by="State", show_tree=TRUE, show_backbone=TRUE, backbone_color="black", markers=NULL, show_cell_names=FALSE, cell_size=1.5, cell_link_size=0.75, cell_name_size=2, show_branch_points=TRUE, ...){ gene_short_name <- NA sample_name <- NA data_dim_1 <- NA data_dim_2 <- NA #TODO: need to validate cds as ready for this plot (need mst, pseudotime, etc) lib_info_with_pseudo <- pData(cds) if (is.null(cds@dim_reduce_type)){ stop("Error: dimensionality not yet reduced. Please call reduceDimension() before calling this function.") } if (cds@dim_reduce_type == "ICA"){ reduced_dim_coords <- reducedDimS(cds) }else if (cds@dim_reduce_type %in% c("SimplePPT", "DDRTree", "SGL-tree") ){ reduced_dim_coords <- reducedDimK(cds) closest_vertex <- cds@auxOrderingData[["DDRTree"]]$pr_graph_cell_proj_closest_vertex }else { stop("Error: unrecognized dimensionality reduction method.") } if (is.null(reduced_dim_coords)){ stop("You must first call reduceDimension() before using this function") } dp_mst <- minSpanningTree(cds) if(is.null(root_states)) { if(is.null(lib_info_with_pseudo$Pseudotime)){ root_cell <- row.names(lib_info_with_pseudo)[degree(dp_mst) == 1][1] } else root_cell <- row.names(subset(lib_info_with_pseudo, Pseudotime == 0)) if(cds@dim_reduce_type != "ICA") root_cell <- V(dp_mst)$name[cds@auxOrderingData$DDRTree$pr_graph_cell_proj_closest_vertex[root_cell, ]] } else { candidate_root_cells <- row.names(subset(pData(cds), State %in% root_states)) if(cds@dim_reduce_type == "ICA") { root_cell <- candidate_root_cells[which(degree(dp_mst, candidate_root_cells) == 1)] } else { Y_candidate_root_cells <- V(dp_mst)$name[cds@auxOrderingData$DDRTree$pr_graph_cell_proj_closest_vertex[candidate_root_cells, ]] root_cell <- Y_candidate_root_cells[which(degree(dp_mst, Y_candidate_root_cells) == 1)] } } # #root_cell <- cds@auxOrderingData[[cds@dim_reduce_type]]$root_cell # root_state <- pData(cds)[root_cell,]$State # #root_state <- V(pr_graph_cell_proj_mst)[root_cell,]$State # pr_graph_root <- subset(pData(cds), State == root_state) # closest_vertex <- cds@auxOrderingData[["DDRTree"]]$pr_graph_cell_proj_closest_vertex # root_cell_point_in_Y <- closest_vertex[row.names(pr_graph_root),] tree_coords <- layout_as_tree(dp_mst, root=root_cell) #ica_space_df <- data.frame(Matrix::t(reduced_dim_coords[c(x,y),])) ica_space_df <- data.frame(tree_coords) row.names(ica_space_df) <- colnames(reduced_dim_coords) colnames(ica_space_df) <- c("prin_graph_dim_1", "prin_graph_dim_2") ica_space_df$sample_name <- row.names(ica_space_df) #ica_space_with_state_df <- merge(ica_space_df, lib_info_with_pseudo, by.x="sample_name", by.y="row.names") #print(ica_space_with_state_df) if (is.null(dp_mst)){ stop("You must first call orderCells() before using this function") } edge_list <- as.data.frame(get.edgelist(dp_mst)) colnames(edge_list) <- c("source", "target") edge_df <- merge(ica_space_df, edge_list, by.x="sample_name", by.y="source", all=TRUE) #edge_df <- ica_space_df edge_df <- plyr::rename(edge_df, c("prin_graph_dim_1"="source_prin_graph_dim_1", "prin_graph_dim_2"="source_prin_graph_dim_2")) edge_df <- merge(edge_df, ica_space_df[,c("sample_name", "prin_graph_dim_1", "prin_graph_dim_2")], by.x="target", by.y="sample_name", all=TRUE) edge_df <- plyr::rename(edge_df, c("prin_graph_dim_1"="target_prin_graph_dim_1", "prin_graph_dim_2"="target_prin_graph_dim_2")) #S_matrix <- reducedDimS(cds) #data_df <- data.frame(t(S_matrix[c(x,y),])) if(cds@dim_reduce_type == "ICA"){ S_matrix <- tree_coords[,] #colnames(cds) } else if(cds@dim_reduce_type %in% c("DDRTree", "SimplePPT", "SGL-tree")){ S_matrix <- tree_coords[closest_vertex,] closest_vertex <- cds@auxOrderingData[["DDRTree"]]$pr_graph_cell_proj_closest_vertex } data_df <- data.frame(S_matrix) row.names(data_df) <- colnames(reducedDimS(cds)) colnames(data_df) <- c("data_dim_1", "data_dim_2") data_df$sample_name <- row.names(data_df) data_df <- merge(data_df, lib_info_with_pseudo, by.x="sample_name", by.y="row.names") markers_exprs <- NULL if (is.null(markers) == FALSE){ markers_fData <- subset(fData(cds), gene_short_name %in% markers) if (nrow(markers_fData) >= 1){ markers_exprs <- reshape2::melt(as.matrix(exprs(cds[row.names(markers_fData),]))) colnames(markers_exprs)[1:2] <- c('feature_id','cell_id') markers_exprs <- merge(markers_exprs, markers_fData, by.x = "feature_id", by.y="row.names") #print (head( markers_exprs[is.na(markers_exprs$gene_short_name) == FALSE,])) markers_exprs$feature_label <- as.character(markers_exprs$gene_short_name) markers_exprs$feature_label[is.na(markers_exprs$feature_label)] <- markers_exprs$Var1 } } if (is.null(markers_exprs) == FALSE && nrow(markers_exprs) > 0){ data_df <- merge(data_df, markers_exprs, by.x="sample_name", by.y="cell_id") #print (head(edge_df)) g <- ggplot(data=data_df, aes(x=data_dim_1, y=data_dim_2, I(cell_size))) + facet_wrap(~feature_label) }else{ g <- ggplot(data=data_df, aes(x=data_dim_1, y=data_dim_2)) } if (show_tree){ g <- g + geom_segment(aes_string(x="source_prin_graph_dim_1", y="source_prin_graph_dim_2", xend="target_prin_graph_dim_1", yend="target_prin_graph_dim_2"), size=cell_link_size, linetype="solid", na.rm=TRUE, data=edge_df) } # FIXME: setting size here overrides the marker expression funtionality. # Don't do it! if (is.null(markers_exprs) == FALSE && nrow(markers_exprs) > 0){ if(class(data_df[, color_by]) == 'numeric') { g <- g + geom_jitter(aes_string(color = paste0("log10(", color_by, " + 0.1)")), size=I(cell_size), na.rm = TRUE, height=5) + scale_color_viridis(name = paste0("log10(", color_by, ")"), ...) } else { g <- g + geom_jitter(aes_string(color = color_by), size=I(cell_size), na.rm = TRUE, height=5) } }else { if(class(data_df[, color_by]) == 'numeric') { g <- g + geom_jitter(aes_string(color = paste0("log10(", color_by, " + 0.1)")), size=I(cell_size), na.rm = TRUE, height=5) + scale_color_viridis(name = paste0("log10(", color_by, " + 0.1)"), ...) } else { g <- g + geom_jitter(aes_string(color = color_by), size=I(cell_size), na.rm = TRUE, height=5) } } if (show_branch_points && cds@dim_reduce_type == 'DDRTree'){ mst_branch_nodes <- cds@auxOrderingData[[cds@dim_reduce_type]]$branch_points branch_point_df <- subset(edge_df, sample_name %in% mst_branch_nodes)[,c("sample_name", "source_prin_graph_dim_1", "source_prin_graph_dim_2")] branch_point_df$branch_point_idx <- match(branch_point_df$sample_name, mst_branch_nodes) branch_point_df <- branch_point_df[!duplicated(branch_point_df$branch_point_idx), ] g <- g + geom_point(aes_string(x="source_prin_graph_dim_1", y="source_prin_graph_dim_2"), size=2 * cell_size, na.rm=TRUE, data=branch_point_df) + geom_text(aes_string(x="source_prin_graph_dim_1", y="source_prin_graph_dim_2", label="branch_point_idx"), size=1.5 * cell_size, color="white", na.rm=TRUE, data=branch_point_df) } if (show_cell_names){ g <- g +geom_text(aes(label=sample_name), size=cell_name_size) } g <- g + #scale_color_brewer(palette="Set1") + theme(strip.background = element_rect(colour = 'white', fill = 'white')) + theme(panel.border = element_blank()) + # theme(axis.line.x = element_line(size=0.25, color="black")) + # theme(axis.line.y = element_line(size=0.25, color="black")) + theme(panel.grid.minor.x = element_blank(), panel.grid.minor.y = element_blank()) + theme(panel.grid.major.x = element_blank(), panel.grid.major.y = element_blank()) + theme(panel.background = element_rect(fill='white')) + theme(legend.key=element_blank()) + xlab('') + ylab('') + theme(legend.position="top", legend.key.height=grid::unit(0.35, "in")) + #guides(color = guide_legend(label.position = "top")) + theme(legend.key = element_blank()) + theme(panel.background = element_rect(fill='white')) + theme(line = element_blank(), axis.text.x = element_blank(), axis.text.y = element_blank(), axis.ticks = element_blank()) g } # Modified function: Plot heatmap of 3 branches with the same coloring. Each CDS subset has to have the same set of genes. #' Create a heatmap to demonstrate the bifurcation of gene expression along multiple branches #' #' @param cds CellDataSet for the experiment (normally only the branching genes detected with BEAM) #' @param branches The terminal branches (states) on the developmental tree you want to investigate. #' @param branches_name Name (for example, cell type) of branches you believe the cells on the branches are associated with. #' @param cluster_rows Whether to cluster the rows of the heatmap. #' @param hclust_method The method used by pheatmap to perform hirearchical clustering of the rows. #' @param num_clusters Number of clusters for the heatmap of branch genes #' @param hmcols The color scheme for drawing the heatmap. #' @param add_annotation_row Additional annotations to show for each row in the heatmap. Must be a dataframe with one row for each row in the fData table of cds_subset, with matching IDs. #' @param add_annotation_col Additional annotations to show for each column in the heatmap. Must be a dataframe with one row for each cell in the pData table of cds_subset, with matching IDs. #' @param show_rownames Whether to show the names for each row in the table. #' @param use_gene_short_name Whether to use the short names for each row. If FALSE, uses row IDs from the fData table. #' @param norm_method Determines how to transform expression values prior to rendering #' @param scale_max The maximum value (in standard deviations) to show in the heatmap. Values larger than this are set to the max. #' @param scale_min The minimum value (in standard deviations) to show in the heatmap. Values smaller than this are set to the min. #' @param trend_formula A formula string specifying the model used in fitting the spline curve for each gene/feature. #' @param return_heatmap Whether to return the pheatmap object to the user. #' @param cores Number of cores to use when smoothing the expression curves shown in the heatmap. #' @return A list of heatmap_matrix (expression matrix for the branch committment), ph (pheatmap heatmap object), #' annotation_row (annotation data.frame for the row), annotation_col (annotation data.frame for the column). #' @import pheatmap #' @export #' plot_multiple_branches_heatmap <- function(cds, branches, branches_name = NULL, cluster_rows = TRUE, hclust_method = "ward.D2", num_clusters = 6, hmcols = NULL, add_annotation_row = NULL, add_annotation_col = NULL, show_rownames = FALSE, use_gene_short_name = TRUE, norm_method = c("vstExprs", "log"), scale_max=3, scale_min=-3, trend_formula = '~sm.ns(Pseudotime, df=3)', return_heatmap=FALSE, cores=1){ pseudocount <- 1 if(!(all(branches %in% pData(cds)$State)) & length(branches) == 1){ stop('This function only allows to make multiple branch plots where branches is included in the pData') } branch_label <- branches if(!is.null(branches_name)){ if(length(branches) != length(branches_name)){ stop('branches_name should have the same length as branches') } branch_label <- branches_name } #test whether or not the states passed to branches are true branches (not truncks) or there are terminal cells g <- cds@minSpanningTree m <- NULL # branche_cell_num <- c() for(branch_in in branches) { branches_cells <- row.names(subset(pData(cds), State == branch_in)) root_state <- subset(pData(cds), Pseudotime == 0)[, 'State'] root_state_cells <- row.names(subset(pData(cds), State == root_state)) if(cds@dim_reduce_type != 'ICA') { root_state_cells <- unique(paste('Y_', cds@auxOrderingData$DDRTree$pr_graph_cell_proj_closest_vertex[root_state_cells, ], sep = '')) branches_cells <- unique(paste('Y_', cds@auxOrderingData$DDRTree$pr_graph_cell_proj_closest_vertex[branches_cells, ], sep = '')) } root_cell <- root_state_cells[which(degree(g, v = root_state_cells) == 1)] tip_cell <- branches_cells[which(degree(g, v = branches_cells) == 1)] traverse_res <- traverseTree(g, root_cell, tip_cell) path_cells <- names(traverse_res$shortest_path[[1]]) if(cds@dim_reduce_type != 'ICA') { pc_ind <- cds@auxOrderingData$DDRTree$pr_graph_cell_proj_closest_vertex path_cells <- row.names(pc_ind)[paste('Y_', pc_ind[, 1], sep = '') %in% path_cells] } cds_subset <- cds[, path_cells] newdata <- data.frame(Pseudotime = seq(0, max(pData(cds_subset)$Pseudotime),length.out = 100)) tmp <- genSmoothCurves(cds_subset, cores=cores, trend_formula = trend_formula, relative_expr = T, new_data = newdata) if(is.null(m)) m <- tmp else m <- cbind(m, tmp) } #remove genes with no expression in any condition m=m[!apply(m,1,sum)==0,] norm_method <- match.arg(norm_method) # FIXME: this needs to check that vst values can even be computed. (They can only be if we're using NB as the expressionFamily) if(norm_method == 'vstExprs' && is.null(cds@dispFitInfo[["blind"]]$disp_func) == FALSE) { m = vstExprs(cds, expr_matrix=m) } else if(norm_method == 'log') { m = log10(m+pseudocount) } # Row-center the data. m=m[!apply(m,1,sd)==0,] m=Matrix::t(scale(Matrix::t(m),center=TRUE)) m=m[is.na(row.names(m)) == FALSE,] m[is.nan(m)] = 0 m[m>scale_max] = scale_max m[m<scale_min] = scale_min heatmap_matrix <- m row_dist <- as.dist((1 - cor(Matrix::t(heatmap_matrix)))/2) row_dist[is.na(row_dist)] <- 1 if(is.null(hmcols)) { bks <- seq(-3.1,3.1, by = 0.1) hmcols <- blue2green2red(length(bks) - 1) } else { bks <- seq(-3.1,3.1, length.out = length(hmcols)) } ph <- pheatmap(heatmap_matrix, useRaster = T, cluster_cols=FALSE, cluster_rows=T, show_rownames=F, show_colnames=F, clustering_distance_rows=row_dist, clustering_method = hclust_method, cutree_rows=num_clusters, silent=TRUE, filename=NA, breaks=bks, color=hmcols) annotation_col <- data.frame(Branch=factor(rep(rep(branch_label, each = 100)))) annotation_row <- data.frame(Cluster=factor(cutree(ph$tree_row, num_clusters))) col_gaps_ind <- c(1:(length(branches) - 1)) * 100 if(!is.null(add_annotation_row)) { old_colnames_length <- ncol(annotation_row) annotation_row <- cbind(annotation_row, add_annotation_row[row.names(annotation_row), ]) colnames(annotation_row)[(old_colnames_length+1):ncol(annotation_row)] <- colnames(add_annotation_row) # annotation_row$bif_time <- add_annotation_row[as.character(fData(absolute_cds[row.names(annotation_row), ])$gene_short_name), 1] } if (use_gene_short_name == TRUE) { if (is.null(fData(cds)$gene_short_name) == FALSE) { feature_label <- as.character(fData(cds)[row.names(heatmap_matrix), 'gene_short_name']) feature_label[is.na(feature_label)] <- row.names(heatmap_matrix) row_ann_labels <- as.character(fData(cds)[row.names(annotation_row), 'gene_short_name']) row_ann_labels[is.na(row_ann_labels)] <- row.names(annotation_row) } else { feature_label <- row.names(heatmap_matrix) row_ann_labels <- row.names(annotation_row) } } else { feature_label <- row.names(heatmap_matrix) row_ann_labels <- row.names(annotation_row) } row.names(heatmap_matrix) <- feature_label row.names(annotation_row) <- row_ann_labels colnames(heatmap_matrix) <- c(1:ncol(heatmap_matrix)) if(!(cluster_rows)) { annotation_row <- NA } ph_res <- pheatmap(heatmap_matrix[, ], #ph$tree_row$order useRaster = T, cluster_cols = FALSE, cluster_rows = cluster_rows, show_rownames=show_rownames, show_colnames=F, #scale="row", clustering_distance_rows=row_dist, #row_dist clustering_method = hclust_method, #ward.D2 cutree_rows=num_clusters, # cutree_cols = 2, annotation_row=annotation_row, annotation_col=annotation_col, gaps_col = col_gaps_ind, treeheight_row = 20, breaks=bks, fontsize = 12, color=hmcols, silent=TRUE, border_color = NA, filename=NA ) grid::grid.rect(gp=grid::gpar("fill", col=NA)) grid::grid.draw(ph_res$gtable) if (return_heatmap){ return(ph_res) } } #' Create a kinetic curves to demonstrate the bifurcation of gene expression along multiple branches #' #' @param cds CellDataSet for the experiment (normally only the branching genes detected with BEAM) #' @param branches The terminal branches (states) on the developmental tree you want to investigate. #' @param branches_name Name (for example, cell type) of branches you believe the cells on the branches are associated with. #' @param min_expr The minimum level of expression to show in the plot #' @param cell_size A number how large the cells should be in the plot #' @param norm_method Determines how to transform expression values prior to rendering #' @param nrow the number of rows used when laying out the panels for each gene's expression #' @param ncol the number of columns used when laying out the panels for each gene's expression #' @param panel_order the order in which genes should be layed out (left-to-right, top-to-bottom) #' @param color_by the cell attribute (e.g. the column of pData(cds)) to be used to color each cell #' @param trend_formula the model formula to be used for fitting the expression trend over pseudotime #' @param label_by_short_name label figure panels by gene_short_name (TRUE) or feature id (FALSE) #' @param TPM Whether to convert the expression value into TPM values. #' @param cores Number of cores to use when smoothing the expression curves shown in the heatmap. #' @return a ggplot2 plot object #' #' @importFrom Biobase esApply #' @importFrom stats lowess #' #' @export #' plot_multiple_branches_pseudotime <- function(cds, branches, branches_name = NULL, min_expr = NULL, cell_size = 0.75, norm_method = c("vstExprs", "log"), nrow = NULL, ncol = 1, panel_order = NULL, color_by = "Branch", trend_formula = '~sm.ns(Pseudotime, df=3)', label_by_short_name = TRUE, TPM = FALSE, cores=1){ if(TPM) { exprs(cds) <- esApply(cds, 2, function(x) x / sum(x) * 1e6) } if(!(all(branches %in% pData(cds)$State)) & length(branches) == 1){ stop('This function only allows to make multiple branch plots where branches is included in the pData') } branch_label <- branches if(!is.null(branches_name)){ if(length(branches) != length(branches_name)){ stop('branches_name should have the same length as branches') } branch_label <- branches_name } #test whether or not the states passed to branches are true branches (not truncks) or there are terminal cells g <- cds@minSpanningTree m <- NULL cds_exprs <- NULL # branche_cell_num <- c() for(branch_in in branches) { branches_cells <- row.names(subset(pData(cds), State == branch_in)) root_state <- subset(pData(cds), Pseudotime == 0)[, 'State'] root_state_cells <- row.names(subset(pData(cds), State == root_state)) if(cds@dim_reduce_type != 'ICA') { root_state_cells <- unique(paste('Y_', cds@auxOrderingData$DDRTree$pr_graph_cell_proj_closest_vertex[root_state_cells, ], sep = '')) branches_cells <- unique(paste('Y_', cds@auxOrderingData$DDRTree$pr_graph_cell_proj_closest_vertex[branches_cells, ], sep = '')) } root_cell <- root_state_cells[which(degree(g, v = root_state_cells) == 1)] tip_cell <- branches_cells[which(degree(g, v = branches_cells) == 1)] traverse_res <- traverseTree(g, root_cell, tip_cell) path_cells <- names(traverse_res$shortest_path[[1]]) if(cds@dim_reduce_type != 'ICA') { pc_ind <- cds@auxOrderingData$DDRTree$pr_graph_cell_proj_closest_vertex path_cells <- row.names(pc_ind)[paste('Y_', pc_ind[, 1], sep = '') %in% path_cells] } #if(is.null(pData(cds)$no_expression)) { cds_subset <- cds[, path_cells] # } else { # cds_subset <- cds[, path_cells %in% colnames(cds)[!pData(cds)$no_expression]] # } newdata <- data.frame(Pseudotime = pData(cds_subset)$Pseudotime, row.names = colnames(cds_subset)) tmp <- t(esApply(cds_subset, 1, function(x) lowess(x[order(pData(cds_subset)$Pseudotime)])$y)) # tmp <- t(esApply(cds_subset, 1, function(x) { # x <- x[order(pData(cds_subset)$Pseudotime)] # c(smooth::sma(x, order = 100, h = 1, silent="all")$fitted)}) #, x[length(x)] # ) colnames(tmp) <- colnames(cds_subset)[order(pData(cds_subset)$Pseudotime)] # tmp <- genSmoothCurves(cds_subset, cores=cores, trend_formula = trend_formula, # relative_expr = T, new_data = newdata) cds_exprs_tmp <- reshape2::melt(log2(tmp + 1)) cds_exprs_tmp <- reshape2::melt(tmp) colnames(cds_exprs_tmp) <- c("f_id", "Cell", "expression") cds_exprs_tmp$Branch <- branch_label[which(branches == branch_in)] if(is.null(cds_exprs)) cds_exprs <- cds_exprs_tmp else cds_exprs <- rbind(cds_exprs, cds_exprs_tmp) if(is.null(m)) m <- tmp else m <- cbind(m, tmp) } #remove genes with no expression in any condition m=m[!apply(m,1,sum)==0,] norm_method <- match.arg(norm_method) # FIXME: this needs to check that vst values can even be computed. (They can only be if we're using NB as the expressionFamily) if(norm_method == 'vstExprs' && is.null(cds@dispFitInfo[["blind"]]$disp_func) == FALSE) { m = vstExprs(cds, expr_matrix=m) } else if(norm_method == 'log') { m = log10(m+pseudocount) } if (is.null(min_expr)) { min_expr <- cds@lowerDetectionLimit } cds_pData <- pData(cds) cds_fData <- fData(cds) cds_exprs <- merge(cds_exprs, cds_fData, by.x = "f_id", by.y = "row.names") cds_exprs <- merge(cds_exprs, cds_pData, by.x = "Cell", by.y = "row.names") cds_exprs <- plyr::ddply(cds_exprs, .(Branch), mutate, Pseudotime = (Pseudotime - min(Pseudotime)) * 100 / (max(Pseudotime) - min(Pseudotime)) ) # if (integer_expression) { # cds_exprs$adjusted_expression <- round(cds_exprs$expression) # } # else { # cds_exprs$adjusted_expression <- log10(cds_exprs$expression) # } if (label_by_short_name == TRUE) { if (is.null(cds_exprs$gene_short_name) == FALSE) { cds_exprs$feature_label <- as.character(cds_exprs$gene_short_name) cds_exprs$feature_label[is.na(cds_exprs$feature_label)] <- cds_exprs$f_id } else { cds_exprs$feature_label <- cds_exprs$f_id } } else { cds_exprs$feature_label <- cds_exprs$f_id } cds_exprs$feature_label <- as.factor(cds_exprs$feature_label) # trend_formula <- paste("adjusted_expression", trend_formula, # sep = "") cds_exprs$Branch <- as.factor(cds_exprs$Branch) # new_data <- data.frame(Pseudotime = pData(cds_subset)$Pseudotime, Branch = pData(cds_subset)$Branch) # full_model_expectation <- genSmoothCurves(cds_subset, cores=1, trend_formula = trend_formula, # relative_expr = T, new_data = new_data) # colnames(full_model_expectation) <- colnames(cds_subset) # cds_exprs$full_model_expectation <- apply(cds_exprs,1, function(x) full_model_expectation[x[2], x[1]]) # if(!is.null(reducedModelFormulaStr)){ # reduced_model_expectation <- genSmoothCurves(cds_subset, cores=1, trend_formula = reducedModelFormulaStr, # relative_expr = T, new_data = new_data) # colnames(reduced_model_expectation) <- colnames(cds_subset) # cds_exprs$reduced_model_expectation <- apply(cds_exprs,1, function(x) reduced_model_expectation[x[2], x[1]]) # } # # FIXME: If you want to show the bifurcation time for each gene, this function # # should just compute it. Passing it in as a dataframe is just too complicated # # and will be hard on the user. # # if(!is.null(bifurcation_time)){ # # cds_exprs$bifurcation_time <- bifurcation_time[as.vector(cds_exprs$gene_short_name)] # # } # if (method == "loess") # cds_exprs$expression <- cds_exprs$expression + cds@lowerDetectionLimit # if (label_by_short_name == TRUE) { # if (is.null(cds_exprs$gene_short_name) == FALSE) { # cds_exprs$feature_label <- as.character(cds_exprs$gene_short_name) # cds_exprs$feature_label[is.na(cds_exprs$feature_label)] <- cds_exprs$f_id # } # else { # cds_exprs$feature_label <- cds_exprs$f_id # } # } # else { # cds_exprs$feature_label <- cds_exprs$f_id # } # cds_exprs$feature_label <- factor(cds_exprs$feature_label) # if (is.null(panel_order) == FALSE) { # cds_exprs$feature_label <- factor(cds_exprs$feature_label, # levels = panel_order) # } # cds_exprs$expression[is.na(cds_exprs$expression)] <- min_expr # cds_exprs$expression[cds_exprs$expression < min_expr] <- min_expr # cds_exprs$full_model_expectation[is.na(cds_exprs$full_model_expectation)] <- min_expr # cds_exprs$full_model_expectation[cds_exprs$full_model_expectation < min_expr] <- min_expr # if(!is.null(reducedModelFormulaStr)){ # cds_exprs$reduced_model_expectation[is.na(cds_exprs$reduced_model_expectation)] <- min_expr # cds_exprs$reduced_model_expectation[cds_exprs$reduced_model_expectation < min_expr] <- min_expr # } cds_exprs$State <- as.factor(cds_exprs$State) cds_exprs$Branch <- as.factor(cds_exprs$Branch) q <- ggplot(aes(Pseudotime, expression), data = cds_exprs) # if (!is.null(bifurcation_time)) { # q <- q + geom_vline(aes(xintercept = bifurcation_time), # color = "black", linetype = "longdash") # } if (is.null(color_by) == FALSE) { q <- q + geom_line(aes_string(color = color_by), size = I(cell_size)) } #if (is.null(reducedModelFormulaStr) == FALSE) q <- q + facet_wrap(~feature_label, nrow = nrow, ncol = ncol, scales = "free_y") #+ scale_y_log10() #else q <- q + scale_y_log10() + facet_wrap(~feature_label, # nrow = nrow, ncol = ncol, scales = "free_y") #if (method == "loess") # q <- q + stat_smooth(aes(fill = Branch, color = Branch), # method = "loess") #else if (method == "fitting") { # q <- q + geom_line(aes_string(x = "Pseudotime", y = "full_model_expectation", # linetype = "Branch"), data = cds_exprs) #+ scale_color_manual(name = "Type", values = c(colour_cell, colour), labels = c("Pre-branch", "AT1", "AT2", "AT1", "AT2") #} #if(!is.null(reducedModelFormulaStr)) { # q <- q + geom_line(aes_string(x = "Pseudotime", y = "reduced_model_expectation"), # color = 'black', linetype = 2, data = cds_exprs) #} q <- q + ylab("Expression") + xlab("Pseudotime (stretched)") q <- q + monocle_theme_opts() q + expand_limits(y = min_expr) }
## ******************** ## Create the R package ## ******************** ## To get started, install R from https://cran.r-project.org/ ## and RStudio Desktop https://rstudio.com/products/rstudio/download/#download ## You can install both of them for free. ## Next, open RStudio as the code that will run benefits from running inside ## RStudio for interactivity purposes. ## Next, you might need to install several R packages that you can install with ## the following code: if (!requireNamespace("remotes", quietly = TRUE)) { install.packages("remotes") } remotes::install_cran( c( "available", "BiocManager", "devtools", "knitcitations", "knitr", "pkgdown", "rmarkdown", "rstudioapi", "sessioninfo", "styler", "usethis" ) ) if (!requireNamespace("BiocStyle", quietly = TRUE)) { BiocManager::install("BiocStyle") } remotes::install_github("lcolladotor/biocthis") ## Here's a very quick summary of why these packages are useful: ## * available: to check the name of your package ## * BiocManager: to install Bioconductor packages ## * BiocStyle: for styling your vignette and linking to other packages ## * devtools: to develop R packages ## * knitcitations: for citing utilities in your package vignette ## * knitr: for making your vignette ## * pkgdown: for creating beautiful documentation websites ## * rmarkdown: for making the README.md and processing your vignette ## * remotes: for installing R packages from several locations ## * rstudioapi: for navigating across files in RStudio ## * sessioninfo: for detailed R session information useful to you and your users ## * usethis: for creating templates that will jump start your R package work ## Package names have some properties. You can also use: available::available("JunePackage") ## to check that your package name is not taken and that it doesn't have ## a meaning that you might not be aware of. usethis::create_package("JunePackage") ## This opens a new window in RStudio ## Note: If you create packages frequently, check the help file for ## usethis::use_description() for more information on how to set some R author ## defaults. ## Add package development files from biocthis biocthis::use_bioc_pkg_templates() ## Move to the next step: setting up Git and GitHub for your package rstudioapi::navigateToFile(usethis::proj_path("dev", "02_git_github_setup.R")) ## This template was made using https://lcolladotor.github.io/biocthis/
/dev/01_create_pkg.R
no_license
lahuuki/JunePackage
R
false
false
2,514
r
## ******************** ## Create the R package ## ******************** ## To get started, install R from https://cran.r-project.org/ ## and RStudio Desktop https://rstudio.com/products/rstudio/download/#download ## You can install both of them for free. ## Next, open RStudio as the code that will run benefits from running inside ## RStudio for interactivity purposes. ## Next, you might need to install several R packages that you can install with ## the following code: if (!requireNamespace("remotes", quietly = TRUE)) { install.packages("remotes") } remotes::install_cran( c( "available", "BiocManager", "devtools", "knitcitations", "knitr", "pkgdown", "rmarkdown", "rstudioapi", "sessioninfo", "styler", "usethis" ) ) if (!requireNamespace("BiocStyle", quietly = TRUE)) { BiocManager::install("BiocStyle") } remotes::install_github("lcolladotor/biocthis") ## Here's a very quick summary of why these packages are useful: ## * available: to check the name of your package ## * BiocManager: to install Bioconductor packages ## * BiocStyle: for styling your vignette and linking to other packages ## * devtools: to develop R packages ## * knitcitations: for citing utilities in your package vignette ## * knitr: for making your vignette ## * pkgdown: for creating beautiful documentation websites ## * rmarkdown: for making the README.md and processing your vignette ## * remotes: for installing R packages from several locations ## * rstudioapi: for navigating across files in RStudio ## * sessioninfo: for detailed R session information useful to you and your users ## * usethis: for creating templates that will jump start your R package work ## Package names have some properties. You can also use: available::available("JunePackage") ## to check that your package name is not taken and that it doesn't have ## a meaning that you might not be aware of. usethis::create_package("JunePackage") ## This opens a new window in RStudio ## Note: If you create packages frequently, check the help file for ## usethis::use_description() for more information on how to set some R author ## defaults. ## Add package development files from biocthis biocthis::use_bioc_pkg_templates() ## Move to the next step: setting up Git and GitHub for your package rstudioapi::navigateToFile(usethis::proj_path("dev", "02_git_github_setup.R")) ## This template was made using https://lcolladotor.github.io/biocthis/
#' Calibration for the simple linear regression model. #' #' The function \code{calibrate} computes the maximum likelihood estimate and a #' condfidence interval for the unknown predictor value that corresponds to an #' observed value of the response (or vector thereof) or specified value of the #' mean response. See the reference listed below for more details. #' #' @param object A matrix, list, data frame, or object that inherits from class #' \code{"lm"}. #' @param formula A formula of the form \code{y ~ x}. #' @param data an optional data frame, list or environment (or object coercible #' by \code{as.data.frame} to a data frame) containing the variables in the #' model. If not found in data, the variables are taken from #' \code{environment(formula)}, typically the environment from which \code{lm} #' is called. #' @param subset An optional vector specifying a subset of observations to be #' used in the fitting process. #' @param na.action a function which indicates what should happen when the data #' contain \code{NA}s. #' @param y0 The value of the observed response(s) or specified value of the #' mean response. #' @param interval The method to use for forming a confidence interval. #' @param level A numeric scalar between 0 and 1 giving the confidence level for #' the interval to be calculated. #' @param mean.response Logicial indicating whether confidence intervals should #' correspond to an observed response(s) (\code{FALSE}) or a specified value #' of the mean response (\code{TRUE}). Default is \code{FALSE}. #' @param adjust A logical value indicating if an adjustment should be made to #' the critical value used in constructing the confidence interval. This #' useful when the calibration curve is to be used k > 0 times. #' @param k The number of times the calibration curve is to be used for #' computing a confidence interval. Only needed when \code{adjust = TRUE}. #' @param ... Additional optional arguments. At present, no optional arguments #' are used. #' #' @return An object of class \code{"invest"} containing the following #' components: #' \itemize{ #' \item \code{estimate} The estimate of x0. #' \item \code{lwr} The lower confidence limit for x0. #' \item \code{upr} The upper confidence limit for x0. #' \item \code{se} An estimate of the standard error (Wald interval only). #' \item \code{interval} The method used for calculating \code{lower} and #' \code{upper} (only used by \code{print} method). #' } #' #' @references #' Graybill, F. A., and Iyer, H. K. (1994) #' \emph{Regression analysis: Concepts and Applications}. Duxbury Press. #' #' Miller, R. G. (1981) #' \emph{Simultaneous Statistical Inference}. Springer-Verlag. #' #' @rdname calibrate #' #' @aliases print.calibrate #' #' @export #' #' @note The function \code{invest} is more general, but based on numerical #' techniques to find the solution. When the underlying model is that of the #' simple linear regression model with normal errors, closed-form expressions #' exist which are utilized by the function \code{calibrate}. #' #' @examples #' # #' # Arsenic example (simple linear regression with replication) #' # #' #' # Inverting a prediction interval for an individual response #' arsenic.lm <- stats::lm(measured ~ actual, data = arsenic) #' plotFit(arsenic.lm, interval = "prediction", shade = TRUE, #' col.pred = "lightblue") #' (cal <- calibrate(arsenic.lm, y0 = 3, interval = "inversion")) #' abline(h = 3) #' segments(cal$estimate, 3, cal$estimate, par()$usr[3]) #' arrows(cal$lower, 3, cal$lower, par()$usr[3]) #' arrows(cal$upper, 3, cal$upper, par()$usr[3]) #' #' # #' # Crystal weight example (simple linear regression) #' # #' #' # Inverting a confidence interval for the mean response #' crystal.lm <- stats::lm(weight ~ time, data = crystal) #' plotFit(crystal.lm, interval = "confidence", shade = TRUE, #' col.conf = "lightblue") #' (cal <- calibrate(crystal.lm, y0 = 8, interval = "inversion", #' mean.response = TRUE)) #' abline(h = 8) #' segments(cal$estimate, 8, cal$estimate, par()$usr[3]) #' arrows(cal$lower, 8, cal$lower, par()$usr[3]) #' arrows(cal$upper, 8, cal$upper, par()$usr[3]) #' #' # Wald interval and approximate standard error based on the delta method #' calibrate(crystal.lm, y0 = 8, interval = "Wald", mean.response = TRUE) calibrate <- function(object, ...) { UseMethod("calibrate") } #' @rdname calibrate #' @export #' @method calibrate default calibrate.default <- function(object, y0, interval = c("inversion", "Wald", "none"), level = 0.95, mean.response = FALSE, adjust = c("none", "Bonferroni", "Scheffe"), k, ...) { # Extract needed components from fitted model if (inherits(object, "matrix")) { x <- object[, 1] y <- object[, 2] } else if (inherits(object, "data.frame")) { object <- data.matrix(object) x <- object[, 1] y <- object[, 2] } else if (inherits(object, "list")) { x <- object[[1]] y <- object[[2]] if (length(x) != length(y)) { stop(paste("Components of '", deparse(substitute(object)), "' not of same length.", sep = ""), call. = FALSE) } } else { stop("'", paste(deparse(substitute(object)), "' is not a valid matrix, list, or data frame.", sep = ""), call. = FALSE) } eta <- mean(y0) # mean of new observations m <- length(y0) # number of new observations if (mean.response && m > 1) stop("Only one mean response value allowed.") # Fit a simple linear regression model and compute necessary components z <- stats::lm.fit(cbind(1, x), y) b <- unname(z$coefficients) n <- length(r <- z$residuals) # sample size and residuals DF <- (DF1 <- n - 2) + (DF2 <- m - 1) # degrees of freedom var1 <- sum(r ^ 2) / z$df.residual # stage 1 variance estimate var2 <- if (m == 1) 0 else stats::var(y0) # stage 2 variance estimate var.pooled <- (DF1 * var1 + DF2 * var2) / DF # pooled estimate of variance sigma.pooled <- sqrt(var.pooled) # sqrt of pooled variance estimate ssx <- sum((x - mean(x))^2) # sum-of-squares for x, Sxx x0.mle <- (eta - b[1L])/b[2L] # MLE of x0 # Return point estimate only interval <- match.arg(interval) if (interval == "none") return(x0.mle) # Adjustment for simultaneous intervals adjust <- match.arg(adjust) # FIXME: Does simultaneous work for m > 1? crit <- if (m != 1 || adjust == "none") stats::qt((1 + level)/2, n+m-3) else { switch(adjust, "Bonferroni" = stats::qt((level + 2*k - 1) / (2*k), n+m-3), "Scheffe" = sqrt(k * stats::qf(level, k, n+m-3))) } # Inversion interval -------------------------------------------------------- if (interval == "inversion") { # Define constants; see Graybill and Iyer (1994) c1 <- b[2L]^2 - (sigma.pooled^2 * crit^2)/ssx c2 <- if (mean.response) { c1/n + (eta - mean(y))^2/ssx } else { c1*(1/m + 1/n) + (eta - mean(y))^2/ssx } c3 <- b[2L] * (eta - mean(y)) c4 <- crit * sigma.pooled # Calculate inversion interval if (c1 < 0 && c2 <= 0) { # Throw warning if interval is the real line warning("The calibration line is not well determined.", call. = FALSE) lwr <- -Inf upr <- Inf } else { # Construct inversion interval lwr <- mean(x) + (c3 - c4*sqrt(c2))/c1 upr <- mean(x) + (c3 + c4*sqrt(c2))/c1 # Throw error if result is not a single interval if (c1 < 0 && c2 > 0) { stop(paste("The calibration line is not well determined. The resulting \nconfidence region is the union of two semi-infinite intervals:\n(", -Inf, ",", round(upr, 4), ") U (", round(lwr, 4), ",", Inf, ")"), call. = FALSE) } } # Store results in a list res <- list("estimate" = x0.mle, "lower" = lwr, "upper" = upr, "interval" = interval) } # Wald interval if (interval == "Wald") { # Compute standard error for Wald interval se <- if (mean.response) { abs((sigma.pooled/b[2]))*sqrt((1/n + (x0.mle - mean(x))^2/ssx)) } else { abs((sigma.pooled/b[2]))*sqrt((1/m + 1/n + (x0.mle - mean(x))^2/ssx)) } # Store results in a list res <- list("estimate" = x0.mle, "lower" = x0.mle - crit * se, "upper" = x0.mle + crit * se, "se" = se, "interval" = interval) } # Assign class label and return results class(res) <- "invest" return(res) } #' @rdname calibrate #' @export #' @method calibrate formula calibrate.formula <- function(formula, data = NULL, ..., subset, na.action = stats::na.fail) { m <- match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, sys.parent()))) { m$data <- as.data.frame(data) } m$... <- NULL m[[1]] <- as.name("model.frame") m <- eval(m, sys.parent()) Terms <- attr(m, "terms") attr(Terms, "intercept") <- 0 y <- stats::model.extract(m, "response") mm <- stats::model.matrix(Terms, m) if (ncol(mm) > 1) stop("This function only works for the simple linear regression model (i.e., y ~ x).") x <- as.numeric(mm) calibrate(cbind(x, y), ...) } #' @rdname calibrate #' @export #' @method calibrate lm calibrate.lm <- function(object, y0, interval = c("inversion", "Wald", "none"), level = 0.95, mean.response = FALSE, adjust = c("none", "Bonferroni", "Scheffe"), k, ...) { # Check model formula for correctness xname <- all.vars(stats::formula(object)[[3L]]) yname <- all.vars(stats::formula(object)[[2L]]) if (length(xname) != 1L) { stop("Only one independent variable allowed.") } if (length(yname) != 1L) { stop("Only one dependent variable allowed.") } # Check for intercept using terms object from model fit. Alternatively, this # can also be checked by testing if the first column name in model.matrix is # equal to "(Intercept)". if (!attr(stats::terms(object), "intercept")) { stop(paste(deparse(substitute(object)), "must contain an intercept.")) } # Extract x values and y values from model frame mf <- stats::model.frame(object) if (ncol(mf) != 2) { stop("calibrate only works for the simple linear regression model.") } x <- stats::model.matrix(object)[, 2] y <- stats::model.response(mf) # Eta - mean response or mean of observed respone values eta <- mean(y0) # mean of new observations m <- length(y0) # number of new observations if (mean.response && m > 1) stop("Only one mean response value allowed.") # Fit a simple linear regression model and compute necessary components b <- unname(object$coefficients) n <- length(r <- object$residuals) # sample size and residuals DF <- (DF1 <- n - 2) + (DF2 <- m - 1) # degrees of freedom var1 <- sum(r ^ 2) / object$df.residual # stage 1 variance estimate var2 <- if (m == 1) 0 else stats::var(y0) # stage 2 variance estimate var.pooled <- (DF1 * var1 + DF2 * var2) / DF # pooled estimate of variance sigma.pooled <- sqrt(var.pooled) # sqrt of pooled variance estimate ssx <- sum((x - mean(x))^2) # sum-of-squares for x, Sxx x0.mle <- (eta - b[1L])/b[2L] # MLE of x0 # Return point estimate only interval <- match.arg(interval) if (interval == "none") return(x0.mle) # Adjustment for simultaneous intervals adjust <- match.arg(adjust) # FIXME: Does simultaneous work for m > 1? crit <- if (m != 1 || adjust == "none") stats::qt((1 + level)/2, n+m-3) else { switch(adjust, "Bonferroni" = stats::qt((level + 2*k - 1) / (2*k), n+m-3), "Scheffe" = sqrt(k * stats::qf(level, k, n+m-3))) } # Inversion interval -------------------------------------------------------- if (interval == "inversion") { c1 <- b[2L]^2 - (sigma.pooled^2 * crit^2)/ssx c2 <- if (mean.response) { c1/n + (eta - mean(y))^2/ssx } else { c1*(1/m + 1/n) + (eta - mean(y))^2/ssx } c3 <- b[2L] * (eta - mean(y)) c4 <- crit * sigma.pooled # FIXME: catch errors and throw an appropriate warning if (c1 < 0 && c2 <= 0) { warning("The calibration line is not well determined.", call. = FALSE) lwr <- -Inf upr <- Inf } else { lwr <- mean(x) + (c3 - c4*sqrt(c2))/c1 upr <- mean(x) + (c3 + c4*sqrt(c2))/c1 if (c1 < 0 && c2 > 0) { stop(paste("The calibration line is not well determined. The resulting \nconfidence region is the union of two semi-infinite intervals:\n(", -Inf, ",", round(upr, 4), ") U (", round(lwr, 4), ",", Inf, ")"), call. = FALSE) } } res <- list("estimate" = x0.mle, "lower" = lwr, "upper" = upr, "interval" = interval) } # Wald interval if (interval == "Wald") { # Compute standard error for Wald interval se <- if (mean.response) { abs((sigma.pooled/b[2]))*sqrt((1/n + (x0.mle - mean(x))^2/ssx)) } else { abs((sigma.pooled/b[2]))*sqrt((1/m + 1/n + (x0.mle - mean(x))^2/ssx)) } # Store results in a list res <- list("estimate" = x0.mle, "lower" = x0.mle - crit * se, "upper" = x0.mle + crit * se, "se" = se, "interval" = interval) } # Assign class label and return results class(res) <- "invest" return(res) }
/R/calibrate.R
no_license
PhDMeiwp/investr
R
false
false
13,860
r
#' Calibration for the simple linear regression model. #' #' The function \code{calibrate} computes the maximum likelihood estimate and a #' condfidence interval for the unknown predictor value that corresponds to an #' observed value of the response (or vector thereof) or specified value of the #' mean response. See the reference listed below for more details. #' #' @param object A matrix, list, data frame, or object that inherits from class #' \code{"lm"}. #' @param formula A formula of the form \code{y ~ x}. #' @param data an optional data frame, list or environment (or object coercible #' by \code{as.data.frame} to a data frame) containing the variables in the #' model. If not found in data, the variables are taken from #' \code{environment(formula)}, typically the environment from which \code{lm} #' is called. #' @param subset An optional vector specifying a subset of observations to be #' used in the fitting process. #' @param na.action a function which indicates what should happen when the data #' contain \code{NA}s. #' @param y0 The value of the observed response(s) or specified value of the #' mean response. #' @param interval The method to use for forming a confidence interval. #' @param level A numeric scalar between 0 and 1 giving the confidence level for #' the interval to be calculated. #' @param mean.response Logicial indicating whether confidence intervals should #' correspond to an observed response(s) (\code{FALSE}) or a specified value #' of the mean response (\code{TRUE}). Default is \code{FALSE}. #' @param adjust A logical value indicating if an adjustment should be made to #' the critical value used in constructing the confidence interval. This #' useful when the calibration curve is to be used k > 0 times. #' @param k The number of times the calibration curve is to be used for #' computing a confidence interval. Only needed when \code{adjust = TRUE}. #' @param ... Additional optional arguments. At present, no optional arguments #' are used. #' #' @return An object of class \code{"invest"} containing the following #' components: #' \itemize{ #' \item \code{estimate} The estimate of x0. #' \item \code{lwr} The lower confidence limit for x0. #' \item \code{upr} The upper confidence limit for x0. #' \item \code{se} An estimate of the standard error (Wald interval only). #' \item \code{interval} The method used for calculating \code{lower} and #' \code{upper} (only used by \code{print} method). #' } #' #' @references #' Graybill, F. A., and Iyer, H. K. (1994) #' \emph{Regression analysis: Concepts and Applications}. Duxbury Press. #' #' Miller, R. G. (1981) #' \emph{Simultaneous Statistical Inference}. Springer-Verlag. #' #' @rdname calibrate #' #' @aliases print.calibrate #' #' @export #' #' @note The function \code{invest} is more general, but based on numerical #' techniques to find the solution. When the underlying model is that of the #' simple linear regression model with normal errors, closed-form expressions #' exist which are utilized by the function \code{calibrate}. #' #' @examples #' # #' # Arsenic example (simple linear regression with replication) #' # #' #' # Inverting a prediction interval for an individual response #' arsenic.lm <- stats::lm(measured ~ actual, data = arsenic) #' plotFit(arsenic.lm, interval = "prediction", shade = TRUE, #' col.pred = "lightblue") #' (cal <- calibrate(arsenic.lm, y0 = 3, interval = "inversion")) #' abline(h = 3) #' segments(cal$estimate, 3, cal$estimate, par()$usr[3]) #' arrows(cal$lower, 3, cal$lower, par()$usr[3]) #' arrows(cal$upper, 3, cal$upper, par()$usr[3]) #' #' # #' # Crystal weight example (simple linear regression) #' # #' #' # Inverting a confidence interval for the mean response #' crystal.lm <- stats::lm(weight ~ time, data = crystal) #' plotFit(crystal.lm, interval = "confidence", shade = TRUE, #' col.conf = "lightblue") #' (cal <- calibrate(crystal.lm, y0 = 8, interval = "inversion", #' mean.response = TRUE)) #' abline(h = 8) #' segments(cal$estimate, 8, cal$estimate, par()$usr[3]) #' arrows(cal$lower, 8, cal$lower, par()$usr[3]) #' arrows(cal$upper, 8, cal$upper, par()$usr[3]) #' #' # Wald interval and approximate standard error based on the delta method #' calibrate(crystal.lm, y0 = 8, interval = "Wald", mean.response = TRUE) calibrate <- function(object, ...) { UseMethod("calibrate") } #' @rdname calibrate #' @export #' @method calibrate default calibrate.default <- function(object, y0, interval = c("inversion", "Wald", "none"), level = 0.95, mean.response = FALSE, adjust = c("none", "Bonferroni", "Scheffe"), k, ...) { # Extract needed components from fitted model if (inherits(object, "matrix")) { x <- object[, 1] y <- object[, 2] } else if (inherits(object, "data.frame")) { object <- data.matrix(object) x <- object[, 1] y <- object[, 2] } else if (inherits(object, "list")) { x <- object[[1]] y <- object[[2]] if (length(x) != length(y)) { stop(paste("Components of '", deparse(substitute(object)), "' not of same length.", sep = ""), call. = FALSE) } } else { stop("'", paste(deparse(substitute(object)), "' is not a valid matrix, list, or data frame.", sep = ""), call. = FALSE) } eta <- mean(y0) # mean of new observations m <- length(y0) # number of new observations if (mean.response && m > 1) stop("Only one mean response value allowed.") # Fit a simple linear regression model and compute necessary components z <- stats::lm.fit(cbind(1, x), y) b <- unname(z$coefficients) n <- length(r <- z$residuals) # sample size and residuals DF <- (DF1 <- n - 2) + (DF2 <- m - 1) # degrees of freedom var1 <- sum(r ^ 2) / z$df.residual # stage 1 variance estimate var2 <- if (m == 1) 0 else stats::var(y0) # stage 2 variance estimate var.pooled <- (DF1 * var1 + DF2 * var2) / DF # pooled estimate of variance sigma.pooled <- sqrt(var.pooled) # sqrt of pooled variance estimate ssx <- sum((x - mean(x))^2) # sum-of-squares for x, Sxx x0.mle <- (eta - b[1L])/b[2L] # MLE of x0 # Return point estimate only interval <- match.arg(interval) if (interval == "none") return(x0.mle) # Adjustment for simultaneous intervals adjust <- match.arg(adjust) # FIXME: Does simultaneous work for m > 1? crit <- if (m != 1 || adjust == "none") stats::qt((1 + level)/2, n+m-3) else { switch(adjust, "Bonferroni" = stats::qt((level + 2*k - 1) / (2*k), n+m-3), "Scheffe" = sqrt(k * stats::qf(level, k, n+m-3))) } # Inversion interval -------------------------------------------------------- if (interval == "inversion") { # Define constants; see Graybill and Iyer (1994) c1 <- b[2L]^2 - (sigma.pooled^2 * crit^2)/ssx c2 <- if (mean.response) { c1/n + (eta - mean(y))^2/ssx } else { c1*(1/m + 1/n) + (eta - mean(y))^2/ssx } c3 <- b[2L] * (eta - mean(y)) c4 <- crit * sigma.pooled # Calculate inversion interval if (c1 < 0 && c2 <= 0) { # Throw warning if interval is the real line warning("The calibration line is not well determined.", call. = FALSE) lwr <- -Inf upr <- Inf } else { # Construct inversion interval lwr <- mean(x) + (c3 - c4*sqrt(c2))/c1 upr <- mean(x) + (c3 + c4*sqrt(c2))/c1 # Throw error if result is not a single interval if (c1 < 0 && c2 > 0) { stop(paste("The calibration line is not well determined. The resulting \nconfidence region is the union of two semi-infinite intervals:\n(", -Inf, ",", round(upr, 4), ") U (", round(lwr, 4), ",", Inf, ")"), call. = FALSE) } } # Store results in a list res <- list("estimate" = x0.mle, "lower" = lwr, "upper" = upr, "interval" = interval) } # Wald interval if (interval == "Wald") { # Compute standard error for Wald interval se <- if (mean.response) { abs((sigma.pooled/b[2]))*sqrt((1/n + (x0.mle - mean(x))^2/ssx)) } else { abs((sigma.pooled/b[2]))*sqrt((1/m + 1/n + (x0.mle - mean(x))^2/ssx)) } # Store results in a list res <- list("estimate" = x0.mle, "lower" = x0.mle - crit * se, "upper" = x0.mle + crit * se, "se" = se, "interval" = interval) } # Assign class label and return results class(res) <- "invest" return(res) } #' @rdname calibrate #' @export #' @method calibrate formula calibrate.formula <- function(formula, data = NULL, ..., subset, na.action = stats::na.fail) { m <- match.call(expand.dots = FALSE) if (is.matrix(eval(m$data, sys.parent()))) { m$data <- as.data.frame(data) } m$... <- NULL m[[1]] <- as.name("model.frame") m <- eval(m, sys.parent()) Terms <- attr(m, "terms") attr(Terms, "intercept") <- 0 y <- stats::model.extract(m, "response") mm <- stats::model.matrix(Terms, m) if (ncol(mm) > 1) stop("This function only works for the simple linear regression model (i.e., y ~ x).") x <- as.numeric(mm) calibrate(cbind(x, y), ...) } #' @rdname calibrate #' @export #' @method calibrate lm calibrate.lm <- function(object, y0, interval = c("inversion", "Wald", "none"), level = 0.95, mean.response = FALSE, adjust = c("none", "Bonferroni", "Scheffe"), k, ...) { # Check model formula for correctness xname <- all.vars(stats::formula(object)[[3L]]) yname <- all.vars(stats::formula(object)[[2L]]) if (length(xname) != 1L) { stop("Only one independent variable allowed.") } if (length(yname) != 1L) { stop("Only one dependent variable allowed.") } # Check for intercept using terms object from model fit. Alternatively, this # can also be checked by testing if the first column name in model.matrix is # equal to "(Intercept)". if (!attr(stats::terms(object), "intercept")) { stop(paste(deparse(substitute(object)), "must contain an intercept.")) } # Extract x values and y values from model frame mf <- stats::model.frame(object) if (ncol(mf) != 2) { stop("calibrate only works for the simple linear regression model.") } x <- stats::model.matrix(object)[, 2] y <- stats::model.response(mf) # Eta - mean response or mean of observed respone values eta <- mean(y0) # mean of new observations m <- length(y0) # number of new observations if (mean.response && m > 1) stop("Only one mean response value allowed.") # Fit a simple linear regression model and compute necessary components b <- unname(object$coefficients) n <- length(r <- object$residuals) # sample size and residuals DF <- (DF1 <- n - 2) + (DF2 <- m - 1) # degrees of freedom var1 <- sum(r ^ 2) / object$df.residual # stage 1 variance estimate var2 <- if (m == 1) 0 else stats::var(y0) # stage 2 variance estimate var.pooled <- (DF1 * var1 + DF2 * var2) / DF # pooled estimate of variance sigma.pooled <- sqrt(var.pooled) # sqrt of pooled variance estimate ssx <- sum((x - mean(x))^2) # sum-of-squares for x, Sxx x0.mle <- (eta - b[1L])/b[2L] # MLE of x0 # Return point estimate only interval <- match.arg(interval) if (interval == "none") return(x0.mle) # Adjustment for simultaneous intervals adjust <- match.arg(adjust) # FIXME: Does simultaneous work for m > 1? crit <- if (m != 1 || adjust == "none") stats::qt((1 + level)/2, n+m-3) else { switch(adjust, "Bonferroni" = stats::qt((level + 2*k - 1) / (2*k), n+m-3), "Scheffe" = sqrt(k * stats::qf(level, k, n+m-3))) } # Inversion interval -------------------------------------------------------- if (interval == "inversion") { c1 <- b[2L]^2 - (sigma.pooled^2 * crit^2)/ssx c2 <- if (mean.response) { c1/n + (eta - mean(y))^2/ssx } else { c1*(1/m + 1/n) + (eta - mean(y))^2/ssx } c3 <- b[2L] * (eta - mean(y)) c4 <- crit * sigma.pooled # FIXME: catch errors and throw an appropriate warning if (c1 < 0 && c2 <= 0) { warning("The calibration line is not well determined.", call. = FALSE) lwr <- -Inf upr <- Inf } else { lwr <- mean(x) + (c3 - c4*sqrt(c2))/c1 upr <- mean(x) + (c3 + c4*sqrt(c2))/c1 if (c1 < 0 && c2 > 0) { stop(paste("The calibration line is not well determined. The resulting \nconfidence region is the union of two semi-infinite intervals:\n(", -Inf, ",", round(upr, 4), ") U (", round(lwr, 4), ",", Inf, ")"), call. = FALSE) } } res <- list("estimate" = x0.mle, "lower" = lwr, "upper" = upr, "interval" = interval) } # Wald interval if (interval == "Wald") { # Compute standard error for Wald interval se <- if (mean.response) { abs((sigma.pooled/b[2]))*sqrt((1/n + (x0.mle - mean(x))^2/ssx)) } else { abs((sigma.pooled/b[2]))*sqrt((1/m + 1/n + (x0.mle - mean(x))^2/ssx)) } # Store results in a list res <- list("estimate" = x0.mle, "lower" = x0.mle - crit * se, "upper" = x0.mle + crit * se, "se" = se, "interval" = interval) } # Assign class label and return results class(res) <- "invest" return(res) }
#Shire project #Shire project #main function for k-folds cross validation and grid search(weighted + lambda) for lasso and ridge regression #the functions can do parallel on k-folds CV and meanwhile can do parallel on grid search for optimum parameter selection #Developter - Jie Zhao #Develop time - Dec2015-Jan2016 rm(list=ls()) library(xlsx) library(ROCR) library(plyr) library(caret) library(dplyr) library(glmnet) library(snow) library(snowfall) library(caTools) library(e1071) #set some constants for lasso grid search and optimum parameter selection #rootPath <- "F:\\Jie\\Shire_project\\" rootPath <- "D:\\Shire_project\\" test_1fold <- T k.folds= ifelse(test_1fold, 1, 5) folder <- ifelse(test_1fold, 'test_1fold', 'CV') n.iter=5 crit <- 0.3 result_list <- list() type <- 'v2' model <- 'svm' ONE <- F kernel <- 'lin' #k.folds=1 wt_list <- c(190, 270) if(ONE){ wt_list <- wt_list[1] } #wt_list <- ifelse(ONE, wt_list[1], wt_list) cost_list <- c(0.00003, 0.00012,0.0008, 0.008, 0.003, 0.011, 0.013) #cost_list <- (0.0005) nu_list <- c(0.3, 0.5, 0.7) # a set of values for nu for one-class SVMs if(ONE){ cost_list <- nu_list } #cost_list <- c(0.001, 0.01, 0.1, 1, 10, 100) #gamma_list <-c(seq(0.001, 0.009, 0.003) # , seq(0.01, 0.09, 0.03) # , seq(0.1, 0.9, 0.1)) #gamma_list <- c(0.1, 0.3, 0.5) gamma_list <- 0.5 DR <- 190 DR_onPrb0 <- F iter <- 1 clsMethod <- ifelse(ONE, 'one-classification', 'C-classification') path_sepData <- paste(rootPath, '04_result\\data_sep\\', type, '\\iter', iter, '\\norm', sep='') wt_str <- ifelse(ONE, NA, paste(wt_list, collapse=',')) cost_str <- paste(cost_list, collapse=',') gamma_str <- paste(gamma_list, collapse=',') if(kernel=='lin'){ grid <- expand.grid(cost = cost_list, weights = wt_list)[,] outPath <- paste(rootPath, '04_result\\', model, '\\', folder, '\\iter', iter, '\\cost(', cost_str, ')_wt(', wt_str, ')_ONE(', ONE, ')_rn_DR0(', DR_onPrb0, ')', sep='') }else if(kernel=='rbf'){ grid <- expand.grid(cost = cost_list, gamma = gamma_list, weights = wt_list)[,] outPath <- paste(rootPath, '04_result\\', model, '\\', folder, '\\iter', iter, '\\cost(', cost_str, ')_gm(', gamma_str, ')_wt(', wt_str, ')_ONE(', ONE, ')_rn_DR0(', DR_onPrb0, ')', sep='') } n.cpu <- nrow(grid) if(!file.exists(outPath)){ dir.create(outPath, recursive=T, showWarnings=T) setwd(outPath) }else{ setwd(outPath) } #define the traceFile to trace how the paralle process traceFile <- paste(outPath, '\\traceFile.csv', sep='') #source("F:\\Jie\\Shire_project\\03_code\\svm\\DR_constrain\\svm_subFunc_svm_withFpConstrain.R") source("D:\\Shire_project\\03_code\\Jie\\svm\\DR_constrain\\svm_subFunc_svm_withFpConstrain.R") for(iter in 1:n.iter){ start <- proc.time() cat(file=traceFile, append=T, 'for the model performance\n', 'cost-', cost_list, ' gamma-', gamma_list , ' wt list-', wt_list, ' k.folds-', k.folds, ' running....\n') cat('simulation:', iter, ' start!\n') #load in the separated data for iteration result_list[[iter]] <- get_optimum_model_iter_withCV(traceFile, path_sepData, n.cpu) save(file=paste('result_list_iter', iter, '.RData', sep=''), result_list) cat(file=traceFile, append=TRUE, 'Iteration ', iter, ': grid iteration end!\n') #added by Jie cat(append=TRUE, 'Iteration ', iter, ': grid iteration end!\n') #added by Jie end <- proc.time() execution_time<- (end-start)[3]/60 cat('iteration:', iter, ' end!\n time used:', execution_time, ' min!\n') cat(file=traceFile, append=TRUE, 'iteration:', iter, ' end!\n time used:', execution_time, ' min!\n') } #summarize the results for 5 iterations; pred_list <- lapply(result_list, function(X){ return(X[[1]]) }) #pred_df <- t(ldply(pred_list, quickdf)) pred_df <- pred_list[[iter]] save(pred_df, file="resp_pred_df_forTs.RData") write.csv(pred_df, paste('resp_pred_df_forTs_iter', iter, '.csv', sep=''), row.names = F) optimum_pmt_list <- lapply(result_list, function(X){ return(X[[2]]) }) optimum_pmt_df <- ldply(optimum_pmt_list[iter], quickdf) save(optimum_pmt_df, file="optimum_pmt_df.RData") #outOfSmpCurve <- lapply(result_list, function(X){ # return(X[[3]]) #}) #save(outOfSmpCurve, file='outOfSmpCurve.RData') #lasso grid search and pred on test completed #generate the performance measure across all iterations recall_tar_onTs <- sort(c(0.25, seq(0.5, 0.1, -0.1)), decreasing = T) result_msOnTest <- msOnTest_sep_v2(pred_df[, 2], pred_df[, 1], recall_tar_onTs) ms_onTest <- result_msOnTest$ms write.csv(ms_onTest, 'lasso_result.csv', row.names = T) write.csv(result_msOnTest$curve, 'curve_forTs.csv', quote=T, row.names = F) #end2 <- proc.time() #execution_time2<- (end2-start2)[3]/60
/codess/SVM/DR_constrain/svm_sep_NormByRow_lin.R
no_license
jzhao0802/ShireHAE
R
false
false
4,891
r
#Shire project #Shire project #main function for k-folds cross validation and grid search(weighted + lambda) for lasso and ridge regression #the functions can do parallel on k-folds CV and meanwhile can do parallel on grid search for optimum parameter selection #Developter - Jie Zhao #Develop time - Dec2015-Jan2016 rm(list=ls()) library(xlsx) library(ROCR) library(plyr) library(caret) library(dplyr) library(glmnet) library(snow) library(snowfall) library(caTools) library(e1071) #set some constants for lasso grid search and optimum parameter selection #rootPath <- "F:\\Jie\\Shire_project\\" rootPath <- "D:\\Shire_project\\" test_1fold <- T k.folds= ifelse(test_1fold, 1, 5) folder <- ifelse(test_1fold, 'test_1fold', 'CV') n.iter=5 crit <- 0.3 result_list <- list() type <- 'v2' model <- 'svm' ONE <- F kernel <- 'lin' #k.folds=1 wt_list <- c(190, 270) if(ONE){ wt_list <- wt_list[1] } #wt_list <- ifelse(ONE, wt_list[1], wt_list) cost_list <- c(0.00003, 0.00012,0.0008, 0.008, 0.003, 0.011, 0.013) #cost_list <- (0.0005) nu_list <- c(0.3, 0.5, 0.7) # a set of values for nu for one-class SVMs if(ONE){ cost_list <- nu_list } #cost_list <- c(0.001, 0.01, 0.1, 1, 10, 100) #gamma_list <-c(seq(0.001, 0.009, 0.003) # , seq(0.01, 0.09, 0.03) # , seq(0.1, 0.9, 0.1)) #gamma_list <- c(0.1, 0.3, 0.5) gamma_list <- 0.5 DR <- 190 DR_onPrb0 <- F iter <- 1 clsMethod <- ifelse(ONE, 'one-classification', 'C-classification') path_sepData <- paste(rootPath, '04_result\\data_sep\\', type, '\\iter', iter, '\\norm', sep='') wt_str <- ifelse(ONE, NA, paste(wt_list, collapse=',')) cost_str <- paste(cost_list, collapse=',') gamma_str <- paste(gamma_list, collapse=',') if(kernel=='lin'){ grid <- expand.grid(cost = cost_list, weights = wt_list)[,] outPath <- paste(rootPath, '04_result\\', model, '\\', folder, '\\iter', iter, '\\cost(', cost_str, ')_wt(', wt_str, ')_ONE(', ONE, ')_rn_DR0(', DR_onPrb0, ')', sep='') }else if(kernel=='rbf'){ grid <- expand.grid(cost = cost_list, gamma = gamma_list, weights = wt_list)[,] outPath <- paste(rootPath, '04_result\\', model, '\\', folder, '\\iter', iter, '\\cost(', cost_str, ')_gm(', gamma_str, ')_wt(', wt_str, ')_ONE(', ONE, ')_rn_DR0(', DR_onPrb0, ')', sep='') } n.cpu <- nrow(grid) if(!file.exists(outPath)){ dir.create(outPath, recursive=T, showWarnings=T) setwd(outPath) }else{ setwd(outPath) } #define the traceFile to trace how the paralle process traceFile <- paste(outPath, '\\traceFile.csv', sep='') #source("F:\\Jie\\Shire_project\\03_code\\svm\\DR_constrain\\svm_subFunc_svm_withFpConstrain.R") source("D:\\Shire_project\\03_code\\Jie\\svm\\DR_constrain\\svm_subFunc_svm_withFpConstrain.R") for(iter in 1:n.iter){ start <- proc.time() cat(file=traceFile, append=T, 'for the model performance\n', 'cost-', cost_list, ' gamma-', gamma_list , ' wt list-', wt_list, ' k.folds-', k.folds, ' running....\n') cat('simulation:', iter, ' start!\n') #load in the separated data for iteration result_list[[iter]] <- get_optimum_model_iter_withCV(traceFile, path_sepData, n.cpu) save(file=paste('result_list_iter', iter, '.RData', sep=''), result_list) cat(file=traceFile, append=TRUE, 'Iteration ', iter, ': grid iteration end!\n') #added by Jie cat(append=TRUE, 'Iteration ', iter, ': grid iteration end!\n') #added by Jie end <- proc.time() execution_time<- (end-start)[3]/60 cat('iteration:', iter, ' end!\n time used:', execution_time, ' min!\n') cat(file=traceFile, append=TRUE, 'iteration:', iter, ' end!\n time used:', execution_time, ' min!\n') } #summarize the results for 5 iterations; pred_list <- lapply(result_list, function(X){ return(X[[1]]) }) #pred_df <- t(ldply(pred_list, quickdf)) pred_df <- pred_list[[iter]] save(pred_df, file="resp_pred_df_forTs.RData") write.csv(pred_df, paste('resp_pred_df_forTs_iter', iter, '.csv', sep=''), row.names = F) optimum_pmt_list <- lapply(result_list, function(X){ return(X[[2]]) }) optimum_pmt_df <- ldply(optimum_pmt_list[iter], quickdf) save(optimum_pmt_df, file="optimum_pmt_df.RData") #outOfSmpCurve <- lapply(result_list, function(X){ # return(X[[3]]) #}) #save(outOfSmpCurve, file='outOfSmpCurve.RData') #lasso grid search and pred on test completed #generate the performance measure across all iterations recall_tar_onTs <- sort(c(0.25, seq(0.5, 0.1, -0.1)), decreasing = T) result_msOnTest <- msOnTest_sep_v2(pred_df[, 2], pred_df[, 1], recall_tar_onTs) ms_onTest <- result_msOnTest$ms write.csv(ms_onTest, 'lasso_result.csv', row.names = T) write.csv(result_msOnTest$curve, 'curve_forTs.csv', quote=T, row.names = F) #end2 <- proc.time() #execution_time2<- (end2-start2)[3]/60
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/footywire-calcs.R \name{get_footywire_betting_odds} \alias{get_footywire_betting_odds} \title{Get AFL match betting odds from https://www.footywire.com} \usage{ get_footywire_betting_odds( start_season = "2010", end_season = lubridate::year(Sys.Date()) ) } \arguments{ \item{start_season}{First season to return, in yyyy format. Earliest season with data available is 2010.} \item{end_season}{Last season to return, in yyyy format} } \value{ Returns a data frame containing betting odds and basic match info } \description{ \code{get_footywire_betting_odds} returns a data frame containing betting odds and basic match info for Men's AFL matches. } \details{ The data frame contains the home and away team as well as venue. } \examples{ \dontrun{ get_footywire_betting_odds(2012, 2018) } }
/man/get_footywire_betting_odds.Rd
no_license
DTS098/fitzRoy
R
false
true
873
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/footywire-calcs.R \name{get_footywire_betting_odds} \alias{get_footywire_betting_odds} \title{Get AFL match betting odds from https://www.footywire.com} \usage{ get_footywire_betting_odds( start_season = "2010", end_season = lubridate::year(Sys.Date()) ) } \arguments{ \item{start_season}{First season to return, in yyyy format. Earliest season with data available is 2010.} \item{end_season}{Last season to return, in yyyy format} } \value{ Returns a data frame containing betting odds and basic match info } \description{ \code{get_footywire_betting_odds} returns a data frame containing betting odds and basic match info for Men's AFL matches. } \details{ The data frame contains the home and away team as well as venue. } \examples{ \dontrun{ get_footywire_betting_odds(2012, 2018) } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AutoGeneratedDefinitions.R \name{postEstimationDefinition} \alias{postEstimationDefinition} \title{Post Estimation definition. \lifecycle{maturing}} \usage{ postEstimationDefinition(name, estimationDefinition, baseUrl) } \arguments{ \item{name}{A valid name for the definition. WebApi will use this name (if valid) as the name of the definition. WebApi checks for validity, such as uniqueness, absence of unacceptable character etc. An error might be thrown.} \item{estimationDefinition}{An R list object containing the expression for the specification. This will be converted to JSON expression by function and posted into the WebApi. Note: only limited checks are performed in R to check the validity of this expression.} \item{baseUrl}{The base URL for the WebApi instance, for example: "http://server.org:80/WebAPI". Note, there is no trailing '/'. If trailing '/' is used, you may receive an error.} } \value{ This function will return a dataframe object with one row describing the posted WebApi expression and its details. If unsuccessful a STOP message will be shown. } \description{ Post Estimation definition. \lifecycle{maturing} } \details{ Post Estimation definition to WebAPI } \examples{ \dontrun{ postEstimationDefinition(name = "new valid name", estimationDefinition = definition, baseUrl = "http://server.org:80/WebAPI") } }
/man/postEstimationDefinition.Rd
permissive
George-Argyriou/ROhdsiWebApi
R
false
true
1,473
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AutoGeneratedDefinitions.R \name{postEstimationDefinition} \alias{postEstimationDefinition} \title{Post Estimation definition. \lifecycle{maturing}} \usage{ postEstimationDefinition(name, estimationDefinition, baseUrl) } \arguments{ \item{name}{A valid name for the definition. WebApi will use this name (if valid) as the name of the definition. WebApi checks for validity, such as uniqueness, absence of unacceptable character etc. An error might be thrown.} \item{estimationDefinition}{An R list object containing the expression for the specification. This will be converted to JSON expression by function and posted into the WebApi. Note: only limited checks are performed in R to check the validity of this expression.} \item{baseUrl}{The base URL for the WebApi instance, for example: "http://server.org:80/WebAPI". Note, there is no trailing '/'. If trailing '/' is used, you may receive an error.} } \value{ This function will return a dataframe object with one row describing the posted WebApi expression and its details. If unsuccessful a STOP message will be shown. } \description{ Post Estimation definition. \lifecycle{maturing} } \details{ Post Estimation definition to WebAPI } \examples{ \dontrun{ postEstimationDefinition(name = "new valid name", estimationDefinition = definition, baseUrl = "http://server.org:80/WebAPI") } }
setwd("C:\\Users\\ASUS\\Desktop\\Fiverr\\john\\New folder (3)") library("rgdal") library(raster) library(adehabitatHR) input<- read.csv("2020-10-north-wales-street.csv") input<- input[,1:10] #We only need the first 10 columns input<- input[complete.cases(input),] #This line of code removes rows with NA values in the data. Crime.Spatial<- SpatialPointsDataFrame(input[,5:6], input, proj4string = CRS("+init=epsg:4326")) Crime.Spatial<- spTransform(Crime.Spatial, CRS("+init=epsg:27700")) #We now project from WGS84 for to British National Grid plot(Crime.Spatial) #Plot the data Northwales<- readOGR(".", layer="North_Wales") Extent<- extent(Northwales) #this is the geographic extent of the grid. It is based on the Northwales object. #Here we specify the size of each grid cell in metres (since those are the units our data are projected in). resolution<- 500 #This is some magic that creates the empty grid x <- seq(Extent[1],Extent[2],by=resolution) # where resolution is the pixel size you desire y <- seq(Extent[3],Extent[4],by=resolution) xy <- expand.grid(x=x,y=y) coordinates(xy) <- ~x+y gridded(xy) <- TRUE #You can see the grid here (this may appear solid black if the cells are small) plot(xy) plot(Northwales, border="black", add=T) all <- raster(kernelUD(Crime.Spatial, h="href", grid = xy)) #Note we are running two functions here - first KernelUD then converting the result to a raster object. #First results writeRaster(all,filename="file1.asc",datatype='FLT4S',format="ascii",overwrite=TRUE) plot(all) plot(Northwales, border="black", add=T) plot(Crime.Spatial[Crime.Spatial$Crime.type=="Burglary",]) # quick plot of burglary points Burglary<- raster(kernelUD(Crime.Spatial[Crime.Spatial$Crime.type=="Burglary",], h="href", grid = xy)) writeRaster(Burglary,filename="file2.asc",datatype='FLT4S',format="ascii",overwrite=TRUE) plot(Burglary) plot(Northwales, border="black", add=T) both<-Burglary/all writeRaster(both,filename="file3.asc",datatype='FLT4S',format="ascii",overwrite=TRUE) plot(both) plot(Northwales, border="black", add=T) both2 <- both both2[both <= 0] <- NA both2[both >= 1] <- NA writeRaster(both2,filename="file4.asc",datatype='FLT4S',format="ascii",overwrite=TRUE) #Now we can see the hotspots much more clearly. plot(both2) plot(Northwales, add=T) #writeRaster(both2,filename=pointpatternhotspot.asc,datatype='FLT4S',format="ascii",overwrite=TRUE)
/2020-10.R
no_license
ucfnwda/CASA0005
R
false
false
2,451
r
setwd("C:\\Users\\ASUS\\Desktop\\Fiverr\\john\\New folder (3)") library("rgdal") library(raster) library(adehabitatHR) input<- read.csv("2020-10-north-wales-street.csv") input<- input[,1:10] #We only need the first 10 columns input<- input[complete.cases(input),] #This line of code removes rows with NA values in the data. Crime.Spatial<- SpatialPointsDataFrame(input[,5:6], input, proj4string = CRS("+init=epsg:4326")) Crime.Spatial<- spTransform(Crime.Spatial, CRS("+init=epsg:27700")) #We now project from WGS84 for to British National Grid plot(Crime.Spatial) #Plot the data Northwales<- readOGR(".", layer="North_Wales") Extent<- extent(Northwales) #this is the geographic extent of the grid. It is based on the Northwales object. #Here we specify the size of each grid cell in metres (since those are the units our data are projected in). resolution<- 500 #This is some magic that creates the empty grid x <- seq(Extent[1],Extent[2],by=resolution) # where resolution is the pixel size you desire y <- seq(Extent[3],Extent[4],by=resolution) xy <- expand.grid(x=x,y=y) coordinates(xy) <- ~x+y gridded(xy) <- TRUE #You can see the grid here (this may appear solid black if the cells are small) plot(xy) plot(Northwales, border="black", add=T) all <- raster(kernelUD(Crime.Spatial, h="href", grid = xy)) #Note we are running two functions here - first KernelUD then converting the result to a raster object. #First results writeRaster(all,filename="file1.asc",datatype='FLT4S',format="ascii",overwrite=TRUE) plot(all) plot(Northwales, border="black", add=T) plot(Crime.Spatial[Crime.Spatial$Crime.type=="Burglary",]) # quick plot of burglary points Burglary<- raster(kernelUD(Crime.Spatial[Crime.Spatial$Crime.type=="Burglary",], h="href", grid = xy)) writeRaster(Burglary,filename="file2.asc",datatype='FLT4S',format="ascii",overwrite=TRUE) plot(Burglary) plot(Northwales, border="black", add=T) both<-Burglary/all writeRaster(both,filename="file3.asc",datatype='FLT4S',format="ascii",overwrite=TRUE) plot(both) plot(Northwales, border="black", add=T) both2 <- both both2[both <= 0] <- NA both2[both >= 1] <- NA writeRaster(both2,filename="file4.asc",datatype='FLT4S',format="ascii",overwrite=TRUE) #Now we can see the hotspots much more clearly. plot(both2) plot(Northwales, add=T) #writeRaster(both2,filename=pointpatternhotspot.asc,datatype='FLT4S',format="ascii",overwrite=TRUE)
rm(list=ls()) library(readr) library(dplyr) library(ggplot2) library(lubridate) fname = "yemen_dat.csv" df = read_csv(fname) df = df %>% select(-date_variable, adm2pcod, longitude, latitude) df = df %>% mutate(year = year(date_value)) min_year = min(df$year) df = df %>% mutate(diff_year = year - min_year, add_weeks = diff_year * 52, new_epiweek = add_weeks + epiweek) %>% select(-year, -diff_year) df = df %>% rename(group_name = adm1name) df %>% filter(date_value == ymd("2017-01-01") | date_value == ymd("2017-01-02") | date_value == ymd("2017-01-08") | date_value == ymd("2016-09-29") | date_value == ymd("2016-12-31") ) %>% select(date_value, epiweek, add_weeks, new_epiweek) %>% unique week_df = df %>% group_by(group_name, date_value) %>% summarize(vweek = var(epiweek), epiweek = unique(epiweek), uweek = unique(lubridate::week(date_value)), dow = unique(wday(date_value, label = TRUE))) %>% ungroup stopifnot(all(week_df$vweek == 0)) week_df = df %>% group_by(group_name, new_epiweek) %>% summarize(y = sum(incidence), date_value = first(date_value)) %>% ungroup date_week_df = week_df %>% mutate(group = group_name, x = date_value) date_week_df = date_week_df %>% select(x, y, group, group_name) week_df = week_df %>% mutate(group = group_name, x = new_epiweek) week_df = week_df %>% select(x, y, group, group_name) df = df %>% group_by(group_name, date_value) %>% summarize(y = sum(incidence)) %>% ungroup df = df %>% mutate(group = group_name, x = date_value) df = df %>% select(x, y, group, group_name) regroup = function(x) { factor(as.numeric(factor(x))) } # remove these when using real data df = df %>% mutate(group = regroup(group)) week_df = week_df %>% mutate(group = regroup(group)) date_week_df = date_week_df %>% mutate(group = regroup(group)) g = df %>% ggplot(aes(x = x, y = y, colour = group)) + geom_line() + ylab("Number of Cases") + xlab("Date") pdf("incidence_plots_over_time.pdf", height = 5, width = 10) print(g) print(g + guides(color = FALSE)) print({g %+% week_df}) print({( g + guides(color = FALSE)) %+% week_df}) dev.off() # saveRDS(df, file = "plot_data.rds") saveRDS(week_df, file = "plot_data.rds") saveRDS(date_week_df, file = "plot_data_date.rds")
/summarize_data.R
no_license
muschellij2/epi_click
R
false
false
2,454
r
rm(list=ls()) library(readr) library(dplyr) library(ggplot2) library(lubridate) fname = "yemen_dat.csv" df = read_csv(fname) df = df %>% select(-date_variable, adm2pcod, longitude, latitude) df = df %>% mutate(year = year(date_value)) min_year = min(df$year) df = df %>% mutate(diff_year = year - min_year, add_weeks = diff_year * 52, new_epiweek = add_weeks + epiweek) %>% select(-year, -diff_year) df = df %>% rename(group_name = adm1name) df %>% filter(date_value == ymd("2017-01-01") | date_value == ymd("2017-01-02") | date_value == ymd("2017-01-08") | date_value == ymd("2016-09-29") | date_value == ymd("2016-12-31") ) %>% select(date_value, epiweek, add_weeks, new_epiweek) %>% unique week_df = df %>% group_by(group_name, date_value) %>% summarize(vweek = var(epiweek), epiweek = unique(epiweek), uweek = unique(lubridate::week(date_value)), dow = unique(wday(date_value, label = TRUE))) %>% ungroup stopifnot(all(week_df$vweek == 0)) week_df = df %>% group_by(group_name, new_epiweek) %>% summarize(y = sum(incidence), date_value = first(date_value)) %>% ungroup date_week_df = week_df %>% mutate(group = group_name, x = date_value) date_week_df = date_week_df %>% select(x, y, group, group_name) week_df = week_df %>% mutate(group = group_name, x = new_epiweek) week_df = week_df %>% select(x, y, group, group_name) df = df %>% group_by(group_name, date_value) %>% summarize(y = sum(incidence)) %>% ungroup df = df %>% mutate(group = group_name, x = date_value) df = df %>% select(x, y, group, group_name) regroup = function(x) { factor(as.numeric(factor(x))) } # remove these when using real data df = df %>% mutate(group = regroup(group)) week_df = week_df %>% mutate(group = regroup(group)) date_week_df = date_week_df %>% mutate(group = regroup(group)) g = df %>% ggplot(aes(x = x, y = y, colour = group)) + geom_line() + ylab("Number of Cases") + xlab("Date") pdf("incidence_plots_over_time.pdf", height = 5, width = 10) print(g) print(g + guides(color = FALSE)) print({g %+% week_df}) print({( g + guides(color = FALSE)) %+% week_df}) dev.off() # saveRDS(df, file = "plot_data.rds") saveRDS(week_df, file = "plot_data.rds") saveRDS(date_week_df, file = "plot_data_date.rds")
setwd("/home/andrzej/Dokumenty/Mownit/Lab4") results = read.csv("csv.csv", sep=";") avg_results = aggregate( TIME ~ N:TYPE, data=results, FUN=mean) avg_results$sd = aggregate( TIME ~ N:TYPE, data=results, FUN=sd)$TIME naive_r = avg_results[avg_results$TYPE == 'naive',] better_r = avg_results[avg_results$TYPE == 'better',] blas_r = avg_results[avg_results$TYPE == 'blas',] fit1 = lm(naive_r$TIME ~ poly(naive_r$N, 3, raw=TRUE), data=data.frame(naive_r$N, naive_r$TIME)) fit2= lm(better_r $TIME ~ poly(better_r $N, 3, raw=TRUE), data=data.frame(better_r $N, better_r $TIME)) fit3= lm(blas_r$TIME ~ poly(blas_r$N, 3, raw=TRUE), data=data.frame(blas_r$N, blas_r$TIME)) ggplot(avg_results, aes(N,TIME,color=TYPE)) + geom_point() + geom_errorbar(aes(ymin = TIME - sd, ymax = TIME + sd, width=5)) + geom_line(data=naive_r, aes(naive_r$N, predict(fit1, naive_r))) + geom_line(data=better_r, aes(better_r$N, predict(fit2, better_r))) + geom_line(data=blas_r, aes(blas_r$N, predict(fit3, blas_r))) + xlab("N [size of matrix]") + ylab("Time [ms]")
/Lab4/zadanie4.r
no_license
BarkingBad/MOWNIT
R
false
false
1,062
r
setwd("/home/andrzej/Dokumenty/Mownit/Lab4") results = read.csv("csv.csv", sep=";") avg_results = aggregate( TIME ~ N:TYPE, data=results, FUN=mean) avg_results$sd = aggregate( TIME ~ N:TYPE, data=results, FUN=sd)$TIME naive_r = avg_results[avg_results$TYPE == 'naive',] better_r = avg_results[avg_results$TYPE == 'better',] blas_r = avg_results[avg_results$TYPE == 'blas',] fit1 = lm(naive_r$TIME ~ poly(naive_r$N, 3, raw=TRUE), data=data.frame(naive_r$N, naive_r$TIME)) fit2= lm(better_r $TIME ~ poly(better_r $N, 3, raw=TRUE), data=data.frame(better_r $N, better_r $TIME)) fit3= lm(blas_r$TIME ~ poly(blas_r$N, 3, raw=TRUE), data=data.frame(blas_r$N, blas_r$TIME)) ggplot(avg_results, aes(N,TIME,color=TYPE)) + geom_point() + geom_errorbar(aes(ymin = TIME - sd, ymax = TIME + sd, width=5)) + geom_line(data=naive_r, aes(naive_r$N, predict(fit1, naive_r))) + geom_line(data=better_r, aes(better_r$N, predict(fit2, better_r))) + geom_line(data=blas_r, aes(blas_r$N, predict(fit3, blas_r))) + xlab("N [size of matrix]") + ylab("Time [ms]")
#' @export updateGrid = function (ep,grid,nGrid){ #first we again need our solutions as a matrix matEP=lapply(ep,function(x){ return (x$solution) }) matEP=do.call(rbind,matEP) #we take each solution assignments = getAssignmentC(grid, nGrid, matEP) return (assignments) }
/PESAII/R/updateGrid.R
no_license
Malano1988/MOCK
R
false
false
293
r
#' @export updateGrid = function (ep,grid,nGrid){ #first we again need our solutions as a matrix matEP=lapply(ep,function(x){ return (x$solution) }) matEP=do.call(rbind,matEP) #we take each solution assignments = getAssignmentC(grid, nGrid, matEP) return (assignments) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/keyvalue.R \name{is_keyvalue} \alias{is_keyvalue} \title{Is it a keyvalue?} \usage{ is_keyvalue(x) } \arguments{ \item{x}{object to check} } \description{ Is it a keyvalue? }
/man/is_keyvalue.Rd
permissive
jameelalsalam/mapping
R
false
true
253
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/keyvalue.R \name{is_keyvalue} \alias{is_keyvalue} \title{Is it a keyvalue?} \usage{ is_keyvalue(x) } \arguments{ \item{x}{object to check} } \description{ Is it a keyvalue? }
#' Prior projected Polya tree distribution #' #' @description Simulates paths of prior projected Polya tree distributions centered #' around a projected normal distribution. #' #' @usage dsimpriorppt(nsim = 5, mm = 4,mu = c(0, 0), #' sig = 1, ll = 100, aa = 1, delta = 1.1, units = "radians") #' @param nsim integer indicating the number of simulations. #' @param mm integer indicating the number of finite levels of the Polya tree. #' @param mu mean vector of the projected bivariate normal distribution. #' @param sig standard deviation of the projected bivariate normal distribution. We advise to always use sig = 1. #' @param ll number of equally spaced points at which the projected distribution will be evaluated. #' @param aa alpha. Precision parameter of the Polya tree. #' @param delta controls of the speed at which the variances of the branching probabilities move down in the tree, rho(m)=m^delta. #' @param units units of the support: "radians", "degrees" or "hours". #' #' @examples z <- dsimpriorppt(mu = c(5,5), nsim = 5, units = "radians") #' priorppt.plot(z, plot.type = "line") #' summary(z$stats) #' #' @seealso \code{\link[PPTcirc]{priorppt.plot}}, \code{\link[PPTcirc]{priorppt.summary}} #' #' @return An object with class priorppt.circ whose underlying structure is a list containing the following components: #' \item{x}{points where the density is evaluated.} #' \item{ppt.sims}{simulated density paths of the prior projected Polya tree.} #' \item{stats}{descriptive statistics: mean direction and concentration of each simulated density.} #' @export #' #' #' @references Nieto-Barajas, L.E. & Nunez-Antonio, G. (2019). Projected Polya tree. https://arxiv.org/pdf/1902.06020.pdf dsimpriorppt <- function(nsim=5 , mm=4, mu = c(0,0), sig=1, ll = 100, aa=1, delta=1.1, units= "radians"){ #Prog_bar <- txtProgressBar(min = 0, max = nsim, style = 3) dt <- 2*pi/ll t <- seq(from = 0, to= 2*pi*(1-1/ll), by = dt) FT <- matrix(data = NA, nrow = ll, ncol = nsim, byrow = FALSE, dimnames = NULL) vt <- mt <- vector("numeric", length = nsim) for(i in 1:nsim){ #Generate simulations FT[,i] <- dpptcirc(mm,mu = mu, sig=sig, ll = ll, aa=aa, delta=delta) #Generate stats alpha1 <- dt*sum(cos(t[1:ll])* FT[,i]) beta1 <- dt*sum(sin(t[1:ll])* FT[,i]) vt[i] <- sqrt(alpha1^2+ beta1^2) mt[i] <- ppt.tan(alpha1, beta1) #setTxtProgressBar(Prog_bar,i) } if (units == "degrees") { mt.cc <- mt/pi *180 vt.cc <- vt/pi*180 t.cc <- t/pi*180 } else if (units == "hours") { mt.cc <- mt/pi*12 vt.cc <- vt/pi*12 t.cc <- t/pi*12 } else if (units== "radians"){ mt.cc <- mt vt.cc <- vt t.cc <- t } FT <- as.data.frame(FT) colnames(FT) <- paste0(rep("sim", nsim),1:nsim ) #close(Prog_bar) return(structure(list(x = t.cc ,ppt.sim = FT, stats=data.frame(mean.direction =mt.cc, concentration=vt.cc)), class = "priorppt.circ")) }
/R/dsimpriorppt.R
no_license
Karlampm/PPTcirc
R
false
false
2,983
r
#' Prior projected Polya tree distribution #' #' @description Simulates paths of prior projected Polya tree distributions centered #' around a projected normal distribution. #' #' @usage dsimpriorppt(nsim = 5, mm = 4,mu = c(0, 0), #' sig = 1, ll = 100, aa = 1, delta = 1.1, units = "radians") #' @param nsim integer indicating the number of simulations. #' @param mm integer indicating the number of finite levels of the Polya tree. #' @param mu mean vector of the projected bivariate normal distribution. #' @param sig standard deviation of the projected bivariate normal distribution. We advise to always use sig = 1. #' @param ll number of equally spaced points at which the projected distribution will be evaluated. #' @param aa alpha. Precision parameter of the Polya tree. #' @param delta controls of the speed at which the variances of the branching probabilities move down in the tree, rho(m)=m^delta. #' @param units units of the support: "radians", "degrees" or "hours". #' #' @examples z <- dsimpriorppt(mu = c(5,5), nsim = 5, units = "radians") #' priorppt.plot(z, plot.type = "line") #' summary(z$stats) #' #' @seealso \code{\link[PPTcirc]{priorppt.plot}}, \code{\link[PPTcirc]{priorppt.summary}} #' #' @return An object with class priorppt.circ whose underlying structure is a list containing the following components: #' \item{x}{points where the density is evaluated.} #' \item{ppt.sims}{simulated density paths of the prior projected Polya tree.} #' \item{stats}{descriptive statistics: mean direction and concentration of each simulated density.} #' @export #' #' #' @references Nieto-Barajas, L.E. & Nunez-Antonio, G. (2019). Projected Polya tree. https://arxiv.org/pdf/1902.06020.pdf dsimpriorppt <- function(nsim=5 , mm=4, mu = c(0,0), sig=1, ll = 100, aa=1, delta=1.1, units= "radians"){ #Prog_bar <- txtProgressBar(min = 0, max = nsim, style = 3) dt <- 2*pi/ll t <- seq(from = 0, to= 2*pi*(1-1/ll), by = dt) FT <- matrix(data = NA, nrow = ll, ncol = nsim, byrow = FALSE, dimnames = NULL) vt <- mt <- vector("numeric", length = nsim) for(i in 1:nsim){ #Generate simulations FT[,i] <- dpptcirc(mm,mu = mu, sig=sig, ll = ll, aa=aa, delta=delta) #Generate stats alpha1 <- dt*sum(cos(t[1:ll])* FT[,i]) beta1 <- dt*sum(sin(t[1:ll])* FT[,i]) vt[i] <- sqrt(alpha1^2+ beta1^2) mt[i] <- ppt.tan(alpha1, beta1) #setTxtProgressBar(Prog_bar,i) } if (units == "degrees") { mt.cc <- mt/pi *180 vt.cc <- vt/pi*180 t.cc <- t/pi*180 } else if (units == "hours") { mt.cc <- mt/pi*12 vt.cc <- vt/pi*12 t.cc <- t/pi*12 } else if (units== "radians"){ mt.cc <- mt vt.cc <- vt t.cc <- t } FT <- as.data.frame(FT) colnames(FT) <- paste0(rep("sim", nsim),1:nsim ) #close(Prog_bar) return(structure(list(x = t.cc ,ppt.sim = FT, stats=data.frame(mean.direction =mt.cc, concentration=vt.cc)), class = "priorppt.circ")) }
makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<- NULL } get <- function() x setinverse <- function(inverse) inv <<- inverse getinverse <- function() inv list(set=set, get=get, setinverse=setinverse, getinverse=getinverse) } ##"matrix" object that can cache its inverse
/makeCacheMatrix.R
no_license
questionedandmarked/R
R
false
false
368
r
makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<- NULL } get <- function() x setinverse <- function(inverse) inv <<- inverse getinverse <- function() inv list(set=set, get=get, setinverse=setinverse, getinverse=getinverse) } ##"matrix" object that can cache its inverse
data<-NULL; features <- c("result", as.vector(as.matrix(read.table("{{features}}", header=TRUE)))); {% for file in files -%} tmp_data <- read.table("{{file}}", header=TRUE); tmp_data <- tmp_data[, names(tmp_data) %in% features] data <- rbind(data, tmp_data); g<-gc(); {% endfor -%} write.table(data, file="{{strip_data_file}}")
/pygaga/model/r/generate_strip_data.r
no_license
qiaohui/pygaga
R
false
false
328
r
data<-NULL; features <- c("result", as.vector(as.matrix(read.table("{{features}}", header=TRUE)))); {% for file in files -%} tmp_data <- read.table("{{file}}", header=TRUE); tmp_data <- tmp_data[, names(tmp_data) %in% features] data <- rbind(data, tmp_data); g<-gc(); {% endfor -%} write.table(data, file="{{strip_data_file}}")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calculateWeatherVariables.R \name{calculateDewPointDepression} \alias{calculateDewPointDepression} \title{calculateDewPointDepression.} \usage{ calculateDewPointDepression(airTemp, RH, temp) } \arguments{ \item{-}{airTemp: in C - double} \item{-}{RH: 1-100 - double} \item{-}{temp: in C double} } \value{ double } \description{ \code{calculateDewPointDepression} function to calculate the dew point depression } \details{ function to calculate Dew Point Depression definition from Kim, 2002 paper } \examples{ NEED TO INSERT } \references{ see aWhere API Documentation (insert HTTP address) }
/man/calculateDewPointDepression.Rd
no_license
aWhereAPI/CalculateWeatherVars_aWhere
R
false
true
674
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calculateWeatherVariables.R \name{calculateDewPointDepression} \alias{calculateDewPointDepression} \title{calculateDewPointDepression.} \usage{ calculateDewPointDepression(airTemp, RH, temp) } \arguments{ \item{-}{airTemp: in C - double} \item{-}{RH: 1-100 - double} \item{-}{temp: in C double} } \value{ double } \description{ \code{calculateDewPointDepression} function to calculate the dew point depression } \details{ function to calculate Dew Point Depression definition from Kim, 2002 paper } \examples{ NEED TO INSERT } \references{ see aWhere API Documentation (insert HTTP address) }
#-------------------------------------------------------------------------------------------------------------------------------- #-------------------------------------------------------------------------------------------------------------------------------- # R-3.4.1 # mordor # Objective : Identify cluster specific peaks using scABC's function on GBM, PFA and HF #-------------------------------------------------------------------------------------------------------------------------------- #-------------------------------------------------------------------------------------------------------------------------------- #---------------------------------------------------------------- # load dependencies #---------------------------------------------------------------- library(data.table) source("scripts/scABC.cluster.R") #source("https://raw.githubusercontent.com/timydaley/scABC/master/R/cluster.R") scalered <- colorRampPalette(c("white", "red"), space = "rgb")(256) #---------------------------------------------------------------- # Define function to run scABC on different pairwise comparisons #---------------------------------------------------------------- #-------------------------------- ## Read in binary matrix #-------------------------------- binarymatds="data/ConsensusSet/PCSC1/GBM.PFA.HF.Consensus.Catalogue.Binarymat.txt" binarymat <- read.table(binarymatds, header=T, sep="\t", stringsAsFactors = F,check.names = F) rownames(binarymat) <- paste(binarymat[,1], binarymat[,2], binarymat[,3], sep="_") ncounts <- binarymat[,4:ncol(binarymat)] rownames(ncounts) <- rownames(binarymat) #-------------------------------- ## Annotations #-------------------------------- bg <- c(rep(1,33)) gbmpfa.hf.cl <- c(rep(1,23),rep(2,3), rep(1,7)) names(bg) <- colnames(ncounts) names(gbmpfa.hf.cl) <- colnames(ncounts) #-------------------------------- ## Run scabc #-------------------------------- gbmpfa.hf.scabc = getClusterSpecificPvalue(ForeGround=as.matrix(ncounts), cluster_assignments =gbmpfa.hf.cl, background_medians = bg) save(gbmpfa.hf.scabc, file=paste0("results/PCSC1/Cluster/scABC/scABC.GBMPFAvsHF.peakselection.Rdata")) #-------------------------------- ## Extract P values #-------------------------------- gbmpfa.hf.scabc.pval = as.data.frame(gbmpfa.hf.scabc$pvalue) rownames(gbmpfa.hf.scabc.pval) <- rownames(ncounts) colnames(gbmpfa.hf.scabc.pval) <- c("GBMPFA","HF") gbmpfa.hf.scabc.pval$id <- rownames(gbmpfa.hf.scabc.pval) gbmpfa.hf.scabc.pval <- gbmpfa.hf.scabc.pval[order(gbmpfa.hf.scabc.pval$id),] #-------------------------------- ## Combine dataset #-------------------------------- opname="GBMPFAvsHF" write.table(gbmpfa.hf.scabc.pval, file=paste0("results/PCSC1/Cluster/scABC/scABC.Combined.", opname,".txt"), row.names=F, col.names=T, sep="\t", quote=F) #-------------------------------- ## Plot images #-------------------------------- gbmsp <- subset(gbmpfa.hf.scabc.pval$id, gbmpfa.hf.scabc.pval$GBMPFA < 0.05) hfsp <- subset(gbmpfa.hf.scabc.pval$id, gbmpfa.hf.scabc.pval$HF < 0.05) pdf(paste0("results/PCSC1/Cluster/scABC/scABC.P0.05.Image.", opname,".pdf")) image(t(apply(as.matrix(ncounts[c(gbmsp,hfsp),]),2,rev)), col=scalered) dev.off()
/scripts/11_scabc.GBMPFAHF.R
no_license
aditiq/CSC-lupien
R
false
false
3,208
r
#-------------------------------------------------------------------------------------------------------------------------------- #-------------------------------------------------------------------------------------------------------------------------------- # R-3.4.1 # mordor # Objective : Identify cluster specific peaks using scABC's function on GBM, PFA and HF #-------------------------------------------------------------------------------------------------------------------------------- #-------------------------------------------------------------------------------------------------------------------------------- #---------------------------------------------------------------- # load dependencies #---------------------------------------------------------------- library(data.table) source("scripts/scABC.cluster.R") #source("https://raw.githubusercontent.com/timydaley/scABC/master/R/cluster.R") scalered <- colorRampPalette(c("white", "red"), space = "rgb")(256) #---------------------------------------------------------------- # Define function to run scABC on different pairwise comparisons #---------------------------------------------------------------- #-------------------------------- ## Read in binary matrix #-------------------------------- binarymatds="data/ConsensusSet/PCSC1/GBM.PFA.HF.Consensus.Catalogue.Binarymat.txt" binarymat <- read.table(binarymatds, header=T, sep="\t", stringsAsFactors = F,check.names = F) rownames(binarymat) <- paste(binarymat[,1], binarymat[,2], binarymat[,3], sep="_") ncounts <- binarymat[,4:ncol(binarymat)] rownames(ncounts) <- rownames(binarymat) #-------------------------------- ## Annotations #-------------------------------- bg <- c(rep(1,33)) gbmpfa.hf.cl <- c(rep(1,23),rep(2,3), rep(1,7)) names(bg) <- colnames(ncounts) names(gbmpfa.hf.cl) <- colnames(ncounts) #-------------------------------- ## Run scabc #-------------------------------- gbmpfa.hf.scabc = getClusterSpecificPvalue(ForeGround=as.matrix(ncounts), cluster_assignments =gbmpfa.hf.cl, background_medians = bg) save(gbmpfa.hf.scabc, file=paste0("results/PCSC1/Cluster/scABC/scABC.GBMPFAvsHF.peakselection.Rdata")) #-------------------------------- ## Extract P values #-------------------------------- gbmpfa.hf.scabc.pval = as.data.frame(gbmpfa.hf.scabc$pvalue) rownames(gbmpfa.hf.scabc.pval) <- rownames(ncounts) colnames(gbmpfa.hf.scabc.pval) <- c("GBMPFA","HF") gbmpfa.hf.scabc.pval$id <- rownames(gbmpfa.hf.scabc.pval) gbmpfa.hf.scabc.pval <- gbmpfa.hf.scabc.pval[order(gbmpfa.hf.scabc.pval$id),] #-------------------------------- ## Combine dataset #-------------------------------- opname="GBMPFAvsHF" write.table(gbmpfa.hf.scabc.pval, file=paste0("results/PCSC1/Cluster/scABC/scABC.Combined.", opname,".txt"), row.names=F, col.names=T, sep="\t", quote=F) #-------------------------------- ## Plot images #-------------------------------- gbmsp <- subset(gbmpfa.hf.scabc.pval$id, gbmpfa.hf.scabc.pval$GBMPFA < 0.05) hfsp <- subset(gbmpfa.hf.scabc.pval$id, gbmpfa.hf.scabc.pval$HF < 0.05) pdf(paste0("results/PCSC1/Cluster/scABC/scABC.P0.05.Image.", opname,".pdf")) image(t(apply(as.matrix(ncounts[c(gbmsp,hfsp),]),2,rev)), col=scalered) dev.off()
#' Get imputed data #' #' Get mean imputed data by running E-step once without GQ approximation using the LRT estimates. #' #' @param data.obs Observed count matrix for endogeneous genes, rows represent genes, columns represent cells #' @param par.est LRT estimates, using par.DE by default #' @param sf A vector containing size factors for all cells #' @param CE A vector containing capture efficiencies for all cells #' @param XW A matrix containing information for X (cell-type) and W (other covariates) #' @param tau cell-specific estimates (intcp,slope) that link Beta-Binomial dispersion parameter to the mean expression. #' @param parallel If \code{TRUE}, run in parallel #' #' @return A matrix containing the mean imputed data. #' #' @export #' getImputed <- function(data.obs, par.est, sf, CE, XW, tau, parallel = T) { ngene <- nrow(data.obs) ncell <- ncol(data.obs) DO.coef <- matrix(0, ncell, 2) DO.coef[, 1] <- log(CE/(1-CE)) gq <- gauss.quad(16) if (parallel) { temp <- foreach (i = 1:ngene, .combine = 'rbind', .packages = c('VGAM', 'DECENT2')) %dopar% { est.pi0 <- exp(par.est[i,1])/(1 + exp(par.est[i,1])) est.mu <- c(exp( XW %*% as.matrix(par.est[i,-c(1,ncol(par.est))]) ) ) est.disp<- exp(par.est[i,ncol(par.est)]) out <- Estep2ByGene(par = DO.coef, z = data.obs[i, ], sf = sf, pi0 = rep(est.pi0,ncell), mu = est.mu, disp = est.disp, k = tau[,2], b = tau[,1], GQ.object=gq) return(c(ifelse(is.na(out$EYZ0E1), data.obs[i, ]/CE, out$EYZ0E1), 1 - out$PE0Z0)) } data.imp <- temp[, 1:ncell] PE <- temp[, (ncell+1):(2*ncell)] } else { data.imp <- data.obs PE <- matrix(0, ngene, ncell) for (i in 1:ngene) { est.pi0 <- exp(par.est[i,1])/(1 + exp(par.est[i,1])) est.mu <- c(exp( XW %*% as.matrix(par.est[i,-c(1,ncol(par.est))]) ) ) est.disp<- exp(par.est[i,ncol(par.est)]) out <- Estep2ByGene(par = DO.coef, z = data.obs[i, ], sf = sf, pi0 = rep(est.pi0,ncell), mu = est.mu, disp = est.disp, k = tau[,2], b = tau[,1], GQ.object=gq) data.imp[i, ] <- ifelse(is.na(out$EYZ0E1),data.obs[i, ]/CE,out$EYZ0E1) PE[i, ]<- 1 - out$PE0Z0 } } data.imp = as.matrix(data.imp) dimnames(data.imp) <- dimnames(data.obs) return(data.imp*PE) } #' Get imputed data #' #' Get single imputed data by sampling from the posterior distribution of the complete data given the observed data and LRT estimates. #' #' @param data.obs Observed count matrix for endogeneous genes, rows represent genes, columns represent cells #' @param par.est LRT/EM estimates, using par.DE by default #' @param sf A vector containing size factors for all cells #' @param CE A vector containing capture efficiencies for all cells #' @param XW A matrix containing information for X (cell-type) and W (other covariates) #' @param tau cell-specific estimates (intcp,slope) that link Beta-Binomial dispersion parameter to the mean expression. #' @param M number of imputed values to be generated. #' @param parallel If \code{TRUE}, run in parallel #' #' @return A matrix containing the single imputed data. #' #' @export #' getSImputed <- function(data.obs, par.est, sf, CE, XW, tau, parallel = T) { ngene <- nrow(data.obs) ncell <- ncol(data.obs) DO.coef <- matrix(0, ncell, 2) DO.coef[, 1] <- log(CE/(1-CE)) if (parallel) { temp <- foreach (i = 1:ngene, .combine = 'rbind', .packages = c('VGAM', 'DECENT2')) %dopar% { est.pi0 <- exp(par.est[i,1])/(1 + exp(par.est[i,1])) est.mu <- c(exp( XW %*% as.matrix(par.est[i,-c(1,ncol(par.est))]) ) * sf) est.disp<- exp(par.est[i,ncol(par.est)]) out <- SImputeByGene(par = DO.coef, z = data.obs[i, ], sf=sf, pi0 = rep(est.pi0,ncell), mu = est.mu, disp = est.disp, k = tau[,2], b = tau[,1],M=1) return(out) } data.imp <- temp[,1:ncell] } else { data.imp <- matrix(0,nrow(data.obs),ncol(data.obs)) for (i in 1:ngene) { est.pi0 <- exp(par.est[i,1])/(1 + exp(par.est[i,1])) est.mu <- c(exp( XW %*% as.matrix(par.est[i,-c(1,ncol(par.est))]) ) * sf) est.disp<- exp(par.est[i,ncol(par.est)]) data.imp[i,] <- SImputeByGene(par = DO.coef, z = data.obs[i, ], sf=sf, pi0 = rep(est.pi0,ncell), mu = est.mu, disp = est.disp, k = tau[,2], b = tau[,1],M=1) } } dimnames(data.imp) <- dimnames(data.obs) return(data.imp) }
/R/get_imputed.R
no_license
RudRho/DECENT
R
false
false
4,545
r
#' Get imputed data #' #' Get mean imputed data by running E-step once without GQ approximation using the LRT estimates. #' #' @param data.obs Observed count matrix for endogeneous genes, rows represent genes, columns represent cells #' @param par.est LRT estimates, using par.DE by default #' @param sf A vector containing size factors for all cells #' @param CE A vector containing capture efficiencies for all cells #' @param XW A matrix containing information for X (cell-type) and W (other covariates) #' @param tau cell-specific estimates (intcp,slope) that link Beta-Binomial dispersion parameter to the mean expression. #' @param parallel If \code{TRUE}, run in parallel #' #' @return A matrix containing the mean imputed data. #' #' @export #' getImputed <- function(data.obs, par.est, sf, CE, XW, tau, parallel = T) { ngene <- nrow(data.obs) ncell <- ncol(data.obs) DO.coef <- matrix(0, ncell, 2) DO.coef[, 1] <- log(CE/(1-CE)) gq <- gauss.quad(16) if (parallel) { temp <- foreach (i = 1:ngene, .combine = 'rbind', .packages = c('VGAM', 'DECENT2')) %dopar% { est.pi0 <- exp(par.est[i,1])/(1 + exp(par.est[i,1])) est.mu <- c(exp( XW %*% as.matrix(par.est[i,-c(1,ncol(par.est))]) ) ) est.disp<- exp(par.est[i,ncol(par.est)]) out <- Estep2ByGene(par = DO.coef, z = data.obs[i, ], sf = sf, pi0 = rep(est.pi0,ncell), mu = est.mu, disp = est.disp, k = tau[,2], b = tau[,1], GQ.object=gq) return(c(ifelse(is.na(out$EYZ0E1), data.obs[i, ]/CE, out$EYZ0E1), 1 - out$PE0Z0)) } data.imp <- temp[, 1:ncell] PE <- temp[, (ncell+1):(2*ncell)] } else { data.imp <- data.obs PE <- matrix(0, ngene, ncell) for (i in 1:ngene) { est.pi0 <- exp(par.est[i,1])/(1 + exp(par.est[i,1])) est.mu <- c(exp( XW %*% as.matrix(par.est[i,-c(1,ncol(par.est))]) ) ) est.disp<- exp(par.est[i,ncol(par.est)]) out <- Estep2ByGene(par = DO.coef, z = data.obs[i, ], sf = sf, pi0 = rep(est.pi0,ncell), mu = est.mu, disp = est.disp, k = tau[,2], b = tau[,1], GQ.object=gq) data.imp[i, ] <- ifelse(is.na(out$EYZ0E1),data.obs[i, ]/CE,out$EYZ0E1) PE[i, ]<- 1 - out$PE0Z0 } } data.imp = as.matrix(data.imp) dimnames(data.imp) <- dimnames(data.obs) return(data.imp*PE) } #' Get imputed data #' #' Get single imputed data by sampling from the posterior distribution of the complete data given the observed data and LRT estimates. #' #' @param data.obs Observed count matrix for endogeneous genes, rows represent genes, columns represent cells #' @param par.est LRT/EM estimates, using par.DE by default #' @param sf A vector containing size factors for all cells #' @param CE A vector containing capture efficiencies for all cells #' @param XW A matrix containing information for X (cell-type) and W (other covariates) #' @param tau cell-specific estimates (intcp,slope) that link Beta-Binomial dispersion parameter to the mean expression. #' @param M number of imputed values to be generated. #' @param parallel If \code{TRUE}, run in parallel #' #' @return A matrix containing the single imputed data. #' #' @export #' getSImputed <- function(data.obs, par.est, sf, CE, XW, tau, parallel = T) { ngene <- nrow(data.obs) ncell <- ncol(data.obs) DO.coef <- matrix(0, ncell, 2) DO.coef[, 1] <- log(CE/(1-CE)) if (parallel) { temp <- foreach (i = 1:ngene, .combine = 'rbind', .packages = c('VGAM', 'DECENT2')) %dopar% { est.pi0 <- exp(par.est[i,1])/(1 + exp(par.est[i,1])) est.mu <- c(exp( XW %*% as.matrix(par.est[i,-c(1,ncol(par.est))]) ) * sf) est.disp<- exp(par.est[i,ncol(par.est)]) out <- SImputeByGene(par = DO.coef, z = data.obs[i, ], sf=sf, pi0 = rep(est.pi0,ncell), mu = est.mu, disp = est.disp, k = tau[,2], b = tau[,1],M=1) return(out) } data.imp <- temp[,1:ncell] } else { data.imp <- matrix(0,nrow(data.obs),ncol(data.obs)) for (i in 1:ngene) { est.pi0 <- exp(par.est[i,1])/(1 + exp(par.est[i,1])) est.mu <- c(exp( XW %*% as.matrix(par.est[i,-c(1,ncol(par.est))]) ) * sf) est.disp<- exp(par.est[i,ncol(par.est)]) data.imp[i,] <- SImputeByGene(par = DO.coef, z = data.obs[i, ], sf=sf, pi0 = rep(est.pi0,ncell), mu = est.mu, disp = est.disp, k = tau[,2], b = tau[,1],M=1) } } dimnames(data.imp) <- dimnames(data.obs) return(data.imp) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/paws.mediastoredata_operations.R \name{delete_object} \alias{delete_object} \title{Deletes an object at the specified path} \usage{ delete_object(Path) } \arguments{ \item{Path}{[required] The path (including the file name) where the object is stored in the container. Format: <folder name>/<folder name>/<file name>} } \description{ Deletes an object at the specified path. } \section{Accepted Parameters}{ \preformatted{delete_object( Path = "string" ) } }
/service/paws.mediastoredata/man/delete_object.Rd
permissive
CR-Mercado/paws
R
false
true
540
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/paws.mediastoredata_operations.R \name{delete_object} \alias{delete_object} \title{Deletes an object at the specified path} \usage{ delete_object(Path) } \arguments{ \item{Path}{[required] The path (including the file name) where the object is stored in the container. Format: <folder name>/<folder name>/<file name>} } \description{ Deletes an object at the specified path. } \section{Accepted Parameters}{ \preformatted{delete_object( Path = "string" ) } }
### 描画の基礎 ## 描画の例として東京都の気候データを使う mydata <- read.csv("tokyo-weather.csv",fileEncoding="utf8") ## 基本的な描画 (ベクトルの描画) (temp <- subset(mydata,月==10,select=気温,drop=TRUE)) # 温度を取り出す plot(temp) # 既定値は点(type="p")として描画 plot(temp, type="l", col="blue",ylim=c(0,30)) # y軸を制限,折れ線(type="l") plot(temp, type="s", col="orange", xlab="day",ylab="temperature",main="Tokyo, Oct") # タイトル付き,階段(type="s") ## 関数の描画 curve(sin, 0, 4*pi, col="blue", # グラフの線の色 lwd=2, # グラフの線の太さ ylab="sin/cos" # y軸のラベル ) curve(cos, add=TRUE, # グラフを上書き col="red", lwd=2) ## データ点の描画 (x <- seq(0, 2*pi, by=0.1)) # 0から2πの点列 (y <- sin(x) + rep_len(c(-0.2, 0.1), length(x))) plot(x, y, type="p", pch="x", ylim=c(-2,2)) # ylimで値域を指定 points(x, y, col="green", pch=19) # 点を追加. pchは点の形を指定 curve(sin, add=TRUE, col="orange", lwd=2) lines(x, y, col="blue") # 折れ線を追加 ## データフレームを用いた散布図 par(family="HiraginoSans-W4") # 日本語フォントの指定 (MacOSXの場合) plot(気温 ~ 日, data=subset(mydata,月==8), # 8月の気温の推移 type="l", col="blue", lwd=2) # lwdは線の太さの倍率を指定 plot(気温 ~ 日射量, data=subset(mydata,月==8), # 8月の日射量と気温の関係 pch="*", col="red", cex=2) # cexは点の大きさの倍率を指定 abline(lm(気温 ~ 日射量, data=subset(mydata,月==8)), col="blue",lwd=2) # 回帰直線を書く (詳しくは次回) with(subset(mydata, 月==8), # withの中ではデータフレームの列名が使用可能 scatter.smooth(日射量,気温,col="red",pch="*",cex=2, lpars=list(col="blue",lwd=2))) # 滑らかな曲線の例
/docs/autumn/code/03-plot.r
no_license
noboru-murata/sda
R
false
false
1,938
r
### 描画の基礎 ## 描画の例として東京都の気候データを使う mydata <- read.csv("tokyo-weather.csv",fileEncoding="utf8") ## 基本的な描画 (ベクトルの描画) (temp <- subset(mydata,月==10,select=気温,drop=TRUE)) # 温度を取り出す plot(temp) # 既定値は点(type="p")として描画 plot(temp, type="l", col="blue",ylim=c(0,30)) # y軸を制限,折れ線(type="l") plot(temp, type="s", col="orange", xlab="day",ylab="temperature",main="Tokyo, Oct") # タイトル付き,階段(type="s") ## 関数の描画 curve(sin, 0, 4*pi, col="blue", # グラフの線の色 lwd=2, # グラフの線の太さ ylab="sin/cos" # y軸のラベル ) curve(cos, add=TRUE, # グラフを上書き col="red", lwd=2) ## データ点の描画 (x <- seq(0, 2*pi, by=0.1)) # 0から2πの点列 (y <- sin(x) + rep_len(c(-0.2, 0.1), length(x))) plot(x, y, type="p", pch="x", ylim=c(-2,2)) # ylimで値域を指定 points(x, y, col="green", pch=19) # 点を追加. pchは点の形を指定 curve(sin, add=TRUE, col="orange", lwd=2) lines(x, y, col="blue") # 折れ線を追加 ## データフレームを用いた散布図 par(family="HiraginoSans-W4") # 日本語フォントの指定 (MacOSXの場合) plot(気温 ~ 日, data=subset(mydata,月==8), # 8月の気温の推移 type="l", col="blue", lwd=2) # lwdは線の太さの倍率を指定 plot(気温 ~ 日射量, data=subset(mydata,月==8), # 8月の日射量と気温の関係 pch="*", col="red", cex=2) # cexは点の大きさの倍率を指定 abline(lm(気温 ~ 日射量, data=subset(mydata,月==8)), col="blue",lwd=2) # 回帰直線を書く (詳しくは次回) with(subset(mydata, 月==8), # withの中ではデータフレームの列名が使用可能 scatter.smooth(日射量,気温,col="red",pch="*",cex=2, lpars=list(col="blue",lwd=2))) # 滑らかな曲線の例
plot(hc_result, hang=-1, labels = iris$Species[idx]) rect.hclust(hc_result, k=5)
/work/r데이터분석_예제파일/예제/9_06.R
no_license
bass4th/R
R
false
false
81
r
plot(hc_result, hang=-1, labels = iris$Species[idx]) rect.hclust(hc_result, k=5)
## rnaseq barcode example source("rnaseq_barcode.R") load("geoMeans.rda") load("barcode_params.rda") load(file="e_test.rda") ## object should be a vector of unnormalized counts ## with the same length and names as geoMeans e_test <- e_test[names(geoMeans),,drop=FALSE] identical(names(geoMeans), rownames(e_test)) tst <- rnaseq_barcode(object=e_test, gmeans=geoMeans, mu=params$mu_r2, tau=sqrt(params$tau2_r2), cutoff = 6.5, output = "binary")
/functions/barcode_example.R
no_license
jaredcara/barcode
R
false
false
470
r
## rnaseq barcode example source("rnaseq_barcode.R") load("geoMeans.rda") load("barcode_params.rda") load(file="e_test.rda") ## object should be a vector of unnormalized counts ## with the same length and names as geoMeans e_test <- e_test[names(geoMeans),,drop=FALSE] identical(names(geoMeans), rownames(e_test)) tst <- rnaseq_barcode(object=e_test, gmeans=geoMeans, mu=params$mu_r2, tau=sqrt(params$tau2_r2), cutoff = 6.5, output = "binary")
library(DT) library(shiny) library(dplyr) library(lubridate) library(tidyverse) library(leaflet) library(patchwork) library(ggpattern) library(scales) library(envreportutils) library(plotly) library(sf) library(bcmaps) library(readr) library(ggplot2) library(envair) library(rcaaqs) dirs_location <- 'https://raw.githubusercontent.com/bcgov/air-zone-reports/master/data/out' if (0) { dirs_location <- './data/out' dirs_location <- '../data/out' #local location, two dots for final, one dot for debug list.files(dirs_location) test <- read_csv('./data/out/annual_results.csv') %>% filter(parameter=='PM25') } #define functions------- map_exceedance <- function(map_a = NULL,exceedances,az_mgmt,year,size = c('200px','400px'), airzone = NULL) { if (0) { source('./level4_page/03_setup.R') source('./level4_page/02_setup.R') dirs_location <- './data/out' year = 2017 map_a <- NULL exceedances <- get_PM_exceedancesummary() az_mgmt <- readr::read_rds(paste(dirs_location,'az_mgmt.Rds',sep='/')) %>% left_join(df_colour) size <- c('200px','400px') } df_colour <- tribble( ~airzone,~colour_01, "Northeast",'#CDC08C', "Georgia Strait" ,'#F4B5BD', "Southern Interior",'#9C964A', "Lower Fraser Valley",'#85D4E3', "Central Interior" ,'#FAD77B', "Coastal",'#CEAB07', "Northwest","#24281A" ) airzone_select <- airzone lst_airzones <- az_mgmt %>% pull(airzone) airzone_exceedance_season <- exceedances$season airzone_exceedance <- exceedances$annual station_exceedance <- exceedances$annual_stations station_exceedance_season <- exceedances$season_stations lst_stations <- exceedances$stations year_select <- year colfunc <- colorRampPalette(c("blue", "red")) # station_exceedance$colorscaled <- color_scales[station_exceedance$days_exceed] if (is.null(map_a)) { #create map for annual station a <- leaflet(width = size[1],height = size[2], options = leafletOptions(attributionControl=FALSE, dragging = TRUE, minZoom = 4, maxZoom=10)) %>% set_bc_view(zoom=3.5) %>% # setView(zoom =5) %>% setMaxBounds(lng1 = -110,lat1=45,lng2=-137,lat2=62) %>% addProviderTiles(providers$Stamen.TonerLite, options = providerTileOptions(opacity = 1) ) %>% # addProviderTiles(providers$Stamen.TonerLabels) %>% add_bc_home_button() } else { a <- map_a } #add colour for the station circles max_days <- station_exceedance %>% filter(year == year_select) %>% pull(days_exceed) %>% max() #develop levels for the exceedance legends color_scales <- colfunc(max_days) for (airzone_ in lst_airzones) { if (0) { airzone_ <- lst_airzones[1] } liststations_ <- lst_stations %>% filter(AIRZONE == airzone_, year == year_select) station_exceedance_ <- station_exceedance %>% filter(AIRZONE == airzone_, year == year_select) lst_sites <- station_exceedance %>% filter(AIRZONE == airzone_) %>% pull(site) %>% unique() a <- a %>% addPolygons(data = az_mgmt %>% filter(airzone == airzone_), layerId = airzone_, color = 'black', fillColor = ~colour_01, weight = 1, opacity = 1, fillOpacity = 0.6, label = paste(airzone_,'Air Zone'), labelOptions = labelOptions(textsize = "15px"), highlight = highlightOptions(weight = 3, color = "blue", bringToFront = FALSE)) #add stations if (nrow(liststations_) >0) { # print('trace inside map function') # print(nrow(liststations_)) # print(year_select) a <- a %>% #remove all stations from that airzone removeMarker( layerId = lst_sites) %>% addCircleMarkers(lng=station_exceedance_$LONG, lat = station_exceedance_$LAT, layerId = station_exceedance_$site, label = station_exceedance_$site, color = color_scales[station_exceedance_$days_exceed+1], radius=3 ) # addMarkers(lng=liststations_$LONG, # lat=liststations_$LAT, # # layerId = liststations$AIRZONE, # # group = airzone_, # label = liststations_$site, # options=markerOptions()) } } if (is.null(map_a)) { #add legend based on color_scales tot_ <- length(color_scales) scl <- c(color_scales[5],color_scales[tot_/2],color_scales[tot_]) lbl <- c('<5 days',paste(round(tot_/2),'days'),paste('>',round(tot_*0.8),' days',sep='')) a <- a %>% addLegend(position ="bottomleft", colors = scl,label = lbl) } #add for selected airzone if (!is.null(airzone_select)) { print(paste('updating,highlighting area',airzone_select)) a <- a %>% addPolylines(data = az_mgmt %>% filter(airzone == airzone_select), layerId = 'selectedairzone', group = 'airzonehighlight', color = 'blue',weight = 5) } else { a <- a %>% clearGroup('airzonehighlight') } plot_a <- a return(a) } #' Get a count of PM exceedances #' #' get_PM_exceedancesummary <- function(dirs_location = './data/out') { # dirs_location <- './data/out' #local location, two dots for final, one dot for debug if (0) { dirs_location <- './data/out' } # list.files(dirs_location) print('get PM exceedance') # df_stations <- readRDS(paste(dirs_location,'liststations_merged.Rds',sep='/')) df_stations <- envair::listBC_stations(use_CAAQS = TRUE,merge_Stations = TRUE) lst_remove <- df_stations %>% filter(AQMS == 'N') %>% pull(site) #retrieve complete list of stations, to be included in resulting list for that year df_stationdata <- readr::read_csv(paste(dirs_location,'annual_results.csv',sep='/')) %>% filter(!is.na(value)) %>% select(site,year) %>% distinct() %>% filter(!site %in% lst_remove) %>% left_join(df_stations %>% select(site,LAT,LONG,AIRZONE)) #get TFEE list tfee_list <- readr::read_csv(paste(dirs_location,'tfee.csv',sep='/')) %>% left_join(df_stations %>% select(STATION_NAME,site)) %>% select(site,DATE,PARAMETER) %>% distinct() %>% filter(PARAMETER == 'PM25') %>% mutate(tfee = TRUE, DATE= as.Date(DATE)) # df_exceed_annual <- readRDS(paste(dirs_location,'exceed_annual.Rds',sep='/')) %>% # rename(site = STATION_NAME) # df_exceed_month<- readRDS(paste(dirs_location,'exceed_month.Rds',sep='/')) %>% # rename(site = STATION_NAME) # df_exceed_seasonal <- readRDS(paste(dirs_location,'exceed_seasonal.Rds',sep='/')) %>% # rename(site = STATION_NAME) # df_exceedances <- readRDS(paste(dirs_location,'exceedances.Rds',sep='/')) %>% # rename(site = STATION_NAME) df_exceedances <- read_csv(paste(dirs_location,'exceedances.csv',sep='/')) %>% rename(site = STATION_NAME) # colnames(df_stations) df_seasons <- tibble( month = 1:12 ) df_seasons$seasons[df_seasons$month %in% c(12,1,2)] <- 'Winter' df_seasons$seasons[df_seasons$month %in% c(3,4,5)] <- 'Spring' df_seasons$seasons[df_seasons$month %in% c(6,7,8)] <- 'Summer' df_seasons$seasons[df_seasons$month %in% c(9,10,11)] <- 'Fall' #need to summarize exceedances to air zone and entire province #do not add the number or will have spatial problems #count for air zones ##count for an entire year---- #no wildfire assessment df_exceedances_year <- df_exceedances %>% filter(PARAMETER == 'PM25',name == 'exceed_27') %>% filter(!site %in% lst_remove) %>% left_join(df_stations) %>% mutate(year = lubridate::year(DATE)) %>% ungroup() %>% select(year,DATE,AIRZONE) %>% distinct() %>% group_by(year,AIRZONE) %>% dplyr::mutate(days_exceed = n()) %>% ungroup() #wildfire (tfee) assessment df_exceedances_year_tfee <- df_exceedances %>% filter((PARAMETER == 'PM25' & name == 'exceed_27') )%>% filter(!site %in% lst_remove) %>% left_join(df_stations) %>% left_join(tfee_list) %>% filter(tfee == TRUE) %>% mutate(year = lubridate::year(DATE)) %>% ungroup() %>% select(year,DATE,AIRZONE) %>% distinct() %>% group_by(year,AIRZONE) %>% dplyr::mutate(days_exceed = n()) %>% ungroup() df_exceedances_year_bc <- df_exceedances_year %>% select(year,DATE) %>% distinct() %>% group_by(year) %>% summarise(days_exceed = n()) %>% mutate(AIRZONE = 'BC') df_exceedances_year_bc_tfee <- df_exceedances_year_tfee %>% select(year,DATE) %>% distinct() %>% group_by(year) %>% summarise(days_exceed = n()) %>% mutate(AIRZONE = 'BC') df_exceedances_year <- df_exceedances_year %>% bind_rows(df_exceedances_year_bc) %>% select(year,AIRZONE,days_exceed) %>% distinct() %>% arrange(year,AIRZONE) df_exceedances_year_tfee <- df_exceedances_year_tfee %>% bind_rows(df_exceedances_year_bc_tfee) %>% select(year,AIRZONE,days_exceed) %>% distinct() %>% arrange(year,AIRZONE) ##count for entire seasons----- df_exceedances_season <- df_exceedances %>% filter(PARAMETER == 'PM25',name == 'exceed_27') %>% filter(!site %in% lst_remove) %>% left_join(df_stations) %>% mutate(year = lubridate::year(DATE), month = lubridate::month(DATE)) %>% left_join(df_seasons) %>% select(year,seasons,DATE,AIRZONE) %>% distinct() %>% group_by(year,AIRZONE,seasons) %>% dplyr::mutate(days_exceed = n()) %>% ungroup() df_exceedances_season_tfee <- df_exceedances %>% filter(PARAMETER == 'PM25',name == 'exceed_27') %>% filter(!site %in% lst_remove) %>% left_join(df_stations) %>% left_join(tfee_list) %>% filter(tfee == TRUE) %>% mutate(year = lubridate::year(DATE), month = lubridate::month(DATE)) %>% left_join(df_seasons) %>% select(year,seasons,DATE,AIRZONE) %>% distinct() %>% group_by(year,AIRZONE,seasons) %>% dplyr::mutate(days_exceed = n()) %>% ungroup() df_exceedances_season_bc <- df_exceedances_season %>% select(year,seasons,DATE) %>% distinct() %>% group_by(year,seasons) %>% summarise(days_exceed = n())%>% mutate(AIRZONE = 'BC') df_exceedances_season_bc_tfee <- df_exceedances_season_tfee %>% select(year,seasons,DATE) %>% distinct() %>% group_by(year,seasons) %>% summarise(days_exceed = n())%>% mutate(AIRZONE = 'BC') df_exceedances_season <- df_exceedances_season %>% bind_rows(df_exceedances_season_bc)%>% select(year,seasons,AIRZONE,days_exceed) %>% distinct() %>% arrange(year,AIRZONE) df_exceedances_season_tfee <- df_exceedances_season_tfee %>% bind_rows(df_exceedances_season_bc_tfee)%>% select(year,seasons,AIRZONE,days_exceed) %>% distinct() %>% arrange(year,AIRZONE) #create for each station df_exceedances_station_year <- df_exceedances%>% filter(PARAMETER == 'PM25',name == 'exceed_27') %>% filter(!site %in% lst_remove) %>% left_join(df_stations) %>% mutate(year = lubridate::year(DATE)) %>% ungroup() %>% select(year,site,DATE,AIRZONE) %>% distinct() %>% group_by(year,site,AIRZONE) %>% dplyr::mutate(days_exceed = n()) %>% ungroup() %>% select(year,site,AIRZONE,days_exceed) %>% distinct() df_exceedances_station_year_tfee <- df_exceedances%>% filter(PARAMETER == 'PM25',name == 'exceed_27') %>% filter(!site %in% lst_remove) %>% left_join(df_stations) %>% left_join(tfee_list) %>% filter(tfee == TRUE) %>% mutate(year = lubridate::year(DATE)) %>% ungroup() %>% select(year,site,DATE,AIRZONE) %>% distinct() %>% group_by(year,site,AIRZONE) %>% dplyr::mutate(days_exceed = n()) %>% ungroup() %>% select(year,site,AIRZONE,days_exceed) %>% distinct() df_exceedances_station_seasonal <- df_exceedances %>% filter(PARAMETER == 'PM25',name == 'exceed_27') %>% filter(!site %in% lst_remove) %>% left_join(df_stations) %>% mutate(year = lubridate::year(DATE), month = lubridate::month(DATE)) %>% left_join(df_seasons) %>% select(year,site,seasons,DATE,AIRZONE) %>% distinct() %>% group_by(year,site,AIRZONE,seasons) %>% dplyr::mutate(days_exceed = n()) %>% ungroup()%>% select(year,seasons,site,AIRZONE,days_exceed) %>% distinct() df_exceedances_station_seasonal_tfee <- df_exceedances %>% filter(PARAMETER == 'PM25',name == 'exceed_27') %>% filter(!site %in% lst_remove) %>% left_join(df_stations) %>% left_join(tfee_list) %>% filter(tfee == TRUE) %>% mutate(year = lubridate::year(DATE), month = lubridate::month(DATE)) %>% left_join(df_seasons) %>% select(year,site,seasons,DATE,AIRZONE) %>% distinct() %>% group_by(year,site,AIRZONE,seasons) %>% dplyr::mutate(days_exceed = n()) %>% ungroup()%>% select(year,seasons,site,AIRZONE,days_exceed) %>% distinct() #fix df_stationdata duplicates df_stationdata <- df_stationdata %>% group_by(site,AIRZONE,year) %>% dplyr::mutate(index=1:n()) %>% filter(index==1) %>% select(-index) %>% ungroup() #left join to display zero values df_exceedances_year <- df_stationdata %>% select(AIRZONE,year) %>% distinct() %>% bind_rows(tibble( AIRZONE ='BC', year = min(df_stationdata$year):max(df_stationdata$year) )) %>% left_join(df_exceedances_year) %>% mutate(days_exceed = ifelse(is.na(days_exceed),0,days_exceed)) df_exceedances_year_tfee <- df_stationdata %>% select(AIRZONE,year) %>% distinct() %>% bind_rows(tibble( AIRZONE ='BC',year = min(df_stationdata$year):max(df_stationdata$year) )) %>% filter(year>=2014) %>% left_join(df_exceedances_year_tfee) %>% mutate(days_exceed = ifelse(is.na(days_exceed),0,days_exceed)) df_exceedances_season <- df_stationdata %>% select(AIRZONE,year) %>% merge(tibble(seasons = c('Winter','Spring','Summer','Fall'))) %>% distinct() %>% bind_rows(tibble( AIRZONE ='BC', year = min(df_stationdata$year):max(df_stationdata$year) ) %>% merge(tibble(seasons = c('Winter','Spring','Summer','Fall'))) ) %>% left_join(df_exceedances_season) %>% mutate(days_exceed = ifelse(is.na(days_exceed),0,days_exceed)) df_exceedances_season_tfee <- df_stationdata %>% select(AIRZONE,year) %>% merge(tibble(seasons = c('Winter','Spring','Summer','Fall'))) %>% distinct() %>% bind_rows(tibble( AIRZONE ='BC', year = min(df_stationdata$year):max(df_stationdata$year) ) %>% merge(tibble(seasons = c('Winter','Spring','Summer','Fall'))) ) %>% filter(year >=2014) %>% left_join(df_exceedances_season_tfee) %>% mutate(days_exceed = ifelse(is.na(days_exceed),0,days_exceed)) df_exceedances_station_year <- df_stationdata %>% select(site,AIRZONE,year,LAT,LONG) %>% distinct() %>% left_join(df_exceedances_station_year) %>% mutate(days_exceed = ifelse(is.na(days_exceed),0,days_exceed)) df_exceedances_station_year_tfee <- df_stationdata %>% select(site,AIRZONE,year,LAT,LONG) %>% distinct() %>% filter(year>=2014) %>% left_join(df_exceedances_station_year_tfee) %>% mutate(days_exceed = ifelse(is.na(days_exceed),0,days_exceed)) df_exceedances_station_seasonal <- df_stationdata %>% select(site,AIRZONE,year,LAT,LONG) %>% merge(tibble(seasons = c('Winter','Spring','Summer','Fall'))) %>% distinct() %>% left_join(df_exceedances_station_seasonal) %>% mutate(days_exceed= ifelse(is.na(days_exceed),0,days_exceed)) df_exceedances_station_seasonal_tfee <- df_stationdata %>% select(site,AIRZONE,year,LAT,LONG) %>% merge(tibble(seasons = c('Winter','Spring','Summer','Fall'))) %>% distinct() %>% filter(year>=2014) %>% left_join(df_exceedances_station_seasonal_tfee) %>% mutate(days_exceed= ifelse(is.na(days_exceed),0,days_exceed)) df_exceedance <- list(annual = df_exceedances_year, annual_tfee = df_exceedances_year_tfee, season = df_exceedances_season, season_tfee = df_exceedances_season_tfee, annual_stations =df_exceedances_station_year, annual_stations_tfee = df_exceedances_station_year_tfee, season_stations = df_exceedances_station_seasonal, season_stations_tfee = df_exceedances_station_seasonal_tfee, stations = df_stationdata) return(df_exceedance) } #' Determine the air zone based on lat longs #' #' @param lat is the latitude, vector OK #' @param long is the longitude, vector OK get_airzone <- function(lat,long) { if (0) { latlong <- c(57.68,-120.614) } az_mgmt_gitURL <- 'https://github.com/bcgov/air-zone-reports/blob/master/data/out/az_mgmt.Rds?raw=true' az_mgmt <- readRDS(url(az_mgmt_gitURL)) #---------------- pnts <- data.frame( "x" = long, "y" = lat) # create a points collection pnts_sf <- do.call("st_sfc",c(lapply(1:nrow(pnts), function(i) {st_point(as.numeric(pnts[i, ]))}), list("crs" = 4326))) pnts_trans <- st_transform(pnts_sf, 2163) # apply transformation to pnts sf tt1_trans <- st_transform(az_mgmt, 2163) # apply transformation to polygons sf # intersect and extract state name pnts$airzone <- apply(st_intersects(tt1_trans, pnts_trans, sparse = FALSE), 2, function(col) { tt1_trans[which(col), ]$airzone }) return(pnts$airzone) } #' create graphs for days exceeding #' #' @param exceedances is the result of get_PM_exceedance() function graph_exceedance <- function(exceedances,AIRZONE = NULL,year = NULL) { if (0) { source('./level4_page/02_setup.R') dirs_location <- './data/out' AIRZONE <- NULL } require(tidyr) #set order for the seasons df_seasons <- tribble( ~seasons,~order, 'Winter',4, 'Spring',3, 'Summer',2, 'Fall',1 ) year_select <- year df_colour <- tribble( ~airzone,~colour_01, "Northeast",'#CDC08C', "Georgia Strait" ,'#F4B5BD', "Southern Interior",'#9C964A', "Lower Fraser Valley",'#85D4E3', "Central Interior" ,'#FAD77B', "Coastal",'#CEAB07', "Northwest","#24281A" ) list_airzones <- df_colour$airzone # View(exceedances$season_stations) if (is.null(AIRZONE)) { AIRZONE <- 'BC' } df_annual <- exceedances$annual df_seasonal_tfee <- exceedances$season_tfee %>% mutate(tfee = TRUE) df_seasonal <- exceedances$season %>% mutate(tfee = FALSE) %>% bind_rows(df_seasonal_tfee) %>% pivot_wider(names_from = tfee, values_from =days_exceed) %>% dplyr::rename(`Wildfire Days` = `TRUE`,`Total` = `FALSE`) %>% mutate(`Wildfire Days` = ifelse(is.na(`Wildfire Days`),0,`Wildfire Days`))%>% mutate(`No Wildfire`= `Total` - `Wildfire Days`) %>% select(-Total) %>% pivot_longer(cols = c(`Wildfire Days`,`No Wildfire`)) %>% mutate(seasons = paste(seasons,'(',name,')',sep='')) vline <- function(x = 0, color = "red") { list( type = "line", y0 = 0, y1 = 1, yref = "paper", x0 = x, x1 = x, line = list(color = color,dash = 'dash') ) } p_annual <- df_annual %>% filter(!AIRZONE %in% c('BC',NA)) %>% filter(year>=1997) %>% plotly::plot_ly(x=~year,y=~days_exceed,color = ~AIRZONE, type='scatter',mode='lines',showlegend =T, hoverinfo ='y', hovertemplate = paste('%{y:,.0f}',' days',sep='') ) %>% layout(title = 'High PM<sub>2.5</sub> Levels in Air Zones', legend = list(orientation = 'h'), yaxis = list(title = 'Number of Days with High PM<sub>2.5</sub> Levels'), xaxis = list(title = 'Year') ) %>% plotly::layout(hovermode = 'x unified', barmode = 'stack') %>% layout(shapes = list(vline(year_select))) # event_register("plotly_click") #create results for BC, and all air zones { p_list <- NULL i_list <- NULL i <- 0 for (AIRZONE_ in c('BC',lst_airzones)) { i <- i +1 p_seasonal <- df_seasonal %>% left_join(df_seasons,by = 'seasons') %>% filter(AIRZONE == AIRZONE_) %>% filter(year >=2000) %>% plotly::plot_ly(x=~year,y=~value,color = ~reorder(seasons,order), type='bar',mode='bar',showlegend =T, hoverinfo ='y', hovertemplate = paste('%{y:,.0f}',' days',sep=''), colors = c("navajowhite2", "navajowhite3", "seagreen3", "seagreen4", "red3", "red4", "slategray2", 'slategray4' ) ) %>% layout(title = paste('<br>High PM<sub>2.5</sub> Levels for', ifelse(AIRZONE == 'BC',AIRZONE_, paste('the',AIRZONE_,'Air Zone') )), legend = list(orientation = 'h'), yaxis = list(title = 'Number of Days with High PM<sub>2.5</sub> Levels'), xaxis = list(title = 'Year') ) %>% plotly::layout(hovermode = 'x unified', barmode = 'stack',legend = list(x = 0.01, y = 0.9))%>% layout(shapes = list(vline(year_select))) # layout(annotations = list(x=2014,y=-1,yref='paper',xref='paper',text = 'wildfire counts only from 2014 onwards')) p_list[[i]] <- p_seasonal i_list <- c(i_list,AIRZONE_) } result <- list(plot_annual = p_annual,plot_seasonal = p_list,plot_definition = i_list, data = exceedances) } return(result) } #end of functions----- #define colours----- df_colour <- tribble( ~airzone,~colour_01, "Northeast",'#CDC08C', "Georgia Strait" ,'#F4B5BD', "Southern Interior",'#9C964A', "Lower Fraser Valley",'#85D4E3', "Central Interior" ,'#FAD77B', "Coastal",'#CEAB07', "Northwest","#24281A" ) lst_airzones <- df_colour$airzone #define external data------ exceedances <- get_PM_exceedancesummary(dirs_location) az_mgmt <- readr::read_rds(paste(dirs_location,'az_mgmt.Rds',sep='/')) %>% left_join(df_colour) print('Load az_mgmt complete') plots_list <- graph_exceedance(exceedances = exceedances,year = max(exceedances$annual$year)) print('Load Complete') #---SHINY SECTION---------------- # Actual shiny part ##ui section---- ui <- { fluidPage( h4(HTML('Fine Particulate Matter (PM<sub>2.5</sub>) Pollution')), tags$head( tags$style(HTML(" body { background-color: #f2efe9; } .container-fluid { background-color: #fff; width: 1200px; padding: 5px; } .topimg { width: 0px; display: block; margin: 0px auto 0px auto; } .title { text-align: center; } .toprow { margin: 5px 0px; padding: 5px; background-color: #38598a; } .filters { margin: 0px auto; } .shiny-input-container { width:100% !important; } .table { padding: 0px; margin-top: 0px; } .leaflet-top { z-index:999 !important; } ")) ), # h1("Fine Particulate Pollution", class = "title"), fluidRow(class = "toprow", fluidRow(class = 'filters', column(6, tags$style(type='text/css', '.selectize-input { font-size: 15px; line-height: 10px;} .selectize-dropdown { font-size: 16px; line-height: 20px; } .control-label {font-size: 24px; color: white !important;} .irs-min {font-size: 0px; color: white; !important} .irs-max {font-size: 0px; color: white;} .irs-single {font-size: 20px; color: white;} .irs-grid-text {font-size: 10px; color: white;}' ), sliderInput('year_slider',label ='Year', min = 1997, max = max(exceedances$annual$year), value = max(exceedances$annual$year), sep='') ))), fluidRow( column(4,h6(HTML("Number of Days with High PM<sub>2.5</sub> Levels</br>Click the map to select an air zone")), # fluidRow( leaflet::leafletOutput("map",height = '400px',width = '400px')), column(8,h6(HTML("Scroll through the graph to view the number of days with high PM<sub>2.5</sub> Levels.")), # div(style='height:400px;overflow-y: scroll;'), plotlyOutput("plot1",height = '400px',width = '800px') ) ) ) } ##server section---- server <- {shinyServer(function(input, output) { if (0) { map_exceedance(exceedances = exceedances, az_mgmt = az_mgmt, year = 2010) } #reactive_plot1 can carry over plots across events reactive_plot1 <- reactiveVal(plots_list) a <- map_exceedance(map_a = NULL,exceedances = exceedances ,az_mgmt = az_mgmt,year = max(exceedances$annual$year)) output$map <- renderLeaflet(a) output$plot1 <- renderPlotly({plot_out <- plots_list$plot_annual plot_out %>% ggplotly(source = 'plot1') #%>% event_register("plotly_click") }) observeEvent(input$year_slider, { print('Slider') print(input$year_slider) leafletProxy("map") %>% map_exceedance(exceedances = exceedances ,az_mgmt = az_mgmt,year =input$year_slider) plots_list <- graph_exceedance(exceedances = exceedances,year = input$year_slider) reactive_plot1(plots_list) #pass on value to reactive_plot1 # output$plot1 <- renderPlotly(plots_list$plot_annual) output$plot1 <- renderPlotly({ plot_out <- plots_list$plot_annual p <- plot_out %>% ggplotly(source = 'plot1') p }) }) #clicking the map, an airzone is selected observeEvent(input$map_shape_click, { p <- input$map_shape_click try({ airzone_select <- get_airzone(p$lat,p$lng) print(p$lat) print(p$lng) print(airzone_select) plots_list <- reactive_plot1() if (airzone_select != 'Northwest') { output$plot1 <- renderPlotly({ p <- plots_list$plot_seasonal[[which(plots_list$plot_definition == airzone_select)]] # Add a footnote footnote <- list( font = list(size = 10), x = 2000, y = 8, # xref = "paper", # yref = "paper", text = "*Counts for wildfire days before 2014 are not included", showarrow = FALSE ) p <- layout(p, annotations = list(footnote)) print(p) # Check if the footnote is included in the annotations list print('annotations inserted') # Return the plotly graph p }) print('Plot Refreshed') } leafletProxy("map") %>% map_exceedance(exceedances = exceedances ,az_mgmt = az_mgmt,year =input$year_slider,airzone = airzone_select) }) }) #clicking on the graph # observeEvent(event_data("plotly_click", source = "plot1"), { # values$plot.click.results <- event_data("plotly_click", source = "plot1") # print(values$plot.click.results) # }) }) } shinyApp(ui, server)
/level_shiny/pm_exceedance/shiny_exceedances.R
permissive
bcgov/air-zone-reports
R
false
false
28,935
r
library(DT) library(shiny) library(dplyr) library(lubridate) library(tidyverse) library(leaflet) library(patchwork) library(ggpattern) library(scales) library(envreportutils) library(plotly) library(sf) library(bcmaps) library(readr) library(ggplot2) library(envair) library(rcaaqs) dirs_location <- 'https://raw.githubusercontent.com/bcgov/air-zone-reports/master/data/out' if (0) { dirs_location <- './data/out' dirs_location <- '../data/out' #local location, two dots for final, one dot for debug list.files(dirs_location) test <- read_csv('./data/out/annual_results.csv') %>% filter(parameter=='PM25') } #define functions------- map_exceedance <- function(map_a = NULL,exceedances,az_mgmt,year,size = c('200px','400px'), airzone = NULL) { if (0) { source('./level4_page/03_setup.R') source('./level4_page/02_setup.R') dirs_location <- './data/out' year = 2017 map_a <- NULL exceedances <- get_PM_exceedancesummary() az_mgmt <- readr::read_rds(paste(dirs_location,'az_mgmt.Rds',sep='/')) %>% left_join(df_colour) size <- c('200px','400px') } df_colour <- tribble( ~airzone,~colour_01, "Northeast",'#CDC08C', "Georgia Strait" ,'#F4B5BD', "Southern Interior",'#9C964A', "Lower Fraser Valley",'#85D4E3', "Central Interior" ,'#FAD77B', "Coastal",'#CEAB07', "Northwest","#24281A" ) airzone_select <- airzone lst_airzones <- az_mgmt %>% pull(airzone) airzone_exceedance_season <- exceedances$season airzone_exceedance <- exceedances$annual station_exceedance <- exceedances$annual_stations station_exceedance_season <- exceedances$season_stations lst_stations <- exceedances$stations year_select <- year colfunc <- colorRampPalette(c("blue", "red")) # station_exceedance$colorscaled <- color_scales[station_exceedance$days_exceed] if (is.null(map_a)) { #create map for annual station a <- leaflet(width = size[1],height = size[2], options = leafletOptions(attributionControl=FALSE, dragging = TRUE, minZoom = 4, maxZoom=10)) %>% set_bc_view(zoom=3.5) %>% # setView(zoom =5) %>% setMaxBounds(lng1 = -110,lat1=45,lng2=-137,lat2=62) %>% addProviderTiles(providers$Stamen.TonerLite, options = providerTileOptions(opacity = 1) ) %>% # addProviderTiles(providers$Stamen.TonerLabels) %>% add_bc_home_button() } else { a <- map_a } #add colour for the station circles max_days <- station_exceedance %>% filter(year == year_select) %>% pull(days_exceed) %>% max() #develop levels for the exceedance legends color_scales <- colfunc(max_days) for (airzone_ in lst_airzones) { if (0) { airzone_ <- lst_airzones[1] } liststations_ <- lst_stations %>% filter(AIRZONE == airzone_, year == year_select) station_exceedance_ <- station_exceedance %>% filter(AIRZONE == airzone_, year == year_select) lst_sites <- station_exceedance %>% filter(AIRZONE == airzone_) %>% pull(site) %>% unique() a <- a %>% addPolygons(data = az_mgmt %>% filter(airzone == airzone_), layerId = airzone_, color = 'black', fillColor = ~colour_01, weight = 1, opacity = 1, fillOpacity = 0.6, label = paste(airzone_,'Air Zone'), labelOptions = labelOptions(textsize = "15px"), highlight = highlightOptions(weight = 3, color = "blue", bringToFront = FALSE)) #add stations if (nrow(liststations_) >0) { # print('trace inside map function') # print(nrow(liststations_)) # print(year_select) a <- a %>% #remove all stations from that airzone removeMarker( layerId = lst_sites) %>% addCircleMarkers(lng=station_exceedance_$LONG, lat = station_exceedance_$LAT, layerId = station_exceedance_$site, label = station_exceedance_$site, color = color_scales[station_exceedance_$days_exceed+1], radius=3 ) # addMarkers(lng=liststations_$LONG, # lat=liststations_$LAT, # # layerId = liststations$AIRZONE, # # group = airzone_, # label = liststations_$site, # options=markerOptions()) } } if (is.null(map_a)) { #add legend based on color_scales tot_ <- length(color_scales) scl <- c(color_scales[5],color_scales[tot_/2],color_scales[tot_]) lbl <- c('<5 days',paste(round(tot_/2),'days'),paste('>',round(tot_*0.8),' days',sep='')) a <- a %>% addLegend(position ="bottomleft", colors = scl,label = lbl) } #add for selected airzone if (!is.null(airzone_select)) { print(paste('updating,highlighting area',airzone_select)) a <- a %>% addPolylines(data = az_mgmt %>% filter(airzone == airzone_select), layerId = 'selectedairzone', group = 'airzonehighlight', color = 'blue',weight = 5) } else { a <- a %>% clearGroup('airzonehighlight') } plot_a <- a return(a) } #' Get a count of PM exceedances #' #' get_PM_exceedancesummary <- function(dirs_location = './data/out') { # dirs_location <- './data/out' #local location, two dots for final, one dot for debug if (0) { dirs_location <- './data/out' } # list.files(dirs_location) print('get PM exceedance') # df_stations <- readRDS(paste(dirs_location,'liststations_merged.Rds',sep='/')) df_stations <- envair::listBC_stations(use_CAAQS = TRUE,merge_Stations = TRUE) lst_remove <- df_stations %>% filter(AQMS == 'N') %>% pull(site) #retrieve complete list of stations, to be included in resulting list for that year df_stationdata <- readr::read_csv(paste(dirs_location,'annual_results.csv',sep='/')) %>% filter(!is.na(value)) %>% select(site,year) %>% distinct() %>% filter(!site %in% lst_remove) %>% left_join(df_stations %>% select(site,LAT,LONG,AIRZONE)) #get TFEE list tfee_list <- readr::read_csv(paste(dirs_location,'tfee.csv',sep='/')) %>% left_join(df_stations %>% select(STATION_NAME,site)) %>% select(site,DATE,PARAMETER) %>% distinct() %>% filter(PARAMETER == 'PM25') %>% mutate(tfee = TRUE, DATE= as.Date(DATE)) # df_exceed_annual <- readRDS(paste(dirs_location,'exceed_annual.Rds',sep='/')) %>% # rename(site = STATION_NAME) # df_exceed_month<- readRDS(paste(dirs_location,'exceed_month.Rds',sep='/')) %>% # rename(site = STATION_NAME) # df_exceed_seasonal <- readRDS(paste(dirs_location,'exceed_seasonal.Rds',sep='/')) %>% # rename(site = STATION_NAME) # df_exceedances <- readRDS(paste(dirs_location,'exceedances.Rds',sep='/')) %>% # rename(site = STATION_NAME) df_exceedances <- read_csv(paste(dirs_location,'exceedances.csv',sep='/')) %>% rename(site = STATION_NAME) # colnames(df_stations) df_seasons <- tibble( month = 1:12 ) df_seasons$seasons[df_seasons$month %in% c(12,1,2)] <- 'Winter' df_seasons$seasons[df_seasons$month %in% c(3,4,5)] <- 'Spring' df_seasons$seasons[df_seasons$month %in% c(6,7,8)] <- 'Summer' df_seasons$seasons[df_seasons$month %in% c(9,10,11)] <- 'Fall' #need to summarize exceedances to air zone and entire province #do not add the number or will have spatial problems #count for air zones ##count for an entire year---- #no wildfire assessment df_exceedances_year <- df_exceedances %>% filter(PARAMETER == 'PM25',name == 'exceed_27') %>% filter(!site %in% lst_remove) %>% left_join(df_stations) %>% mutate(year = lubridate::year(DATE)) %>% ungroup() %>% select(year,DATE,AIRZONE) %>% distinct() %>% group_by(year,AIRZONE) %>% dplyr::mutate(days_exceed = n()) %>% ungroup() #wildfire (tfee) assessment df_exceedances_year_tfee <- df_exceedances %>% filter((PARAMETER == 'PM25' & name == 'exceed_27') )%>% filter(!site %in% lst_remove) %>% left_join(df_stations) %>% left_join(tfee_list) %>% filter(tfee == TRUE) %>% mutate(year = lubridate::year(DATE)) %>% ungroup() %>% select(year,DATE,AIRZONE) %>% distinct() %>% group_by(year,AIRZONE) %>% dplyr::mutate(days_exceed = n()) %>% ungroup() df_exceedances_year_bc <- df_exceedances_year %>% select(year,DATE) %>% distinct() %>% group_by(year) %>% summarise(days_exceed = n()) %>% mutate(AIRZONE = 'BC') df_exceedances_year_bc_tfee <- df_exceedances_year_tfee %>% select(year,DATE) %>% distinct() %>% group_by(year) %>% summarise(days_exceed = n()) %>% mutate(AIRZONE = 'BC') df_exceedances_year <- df_exceedances_year %>% bind_rows(df_exceedances_year_bc) %>% select(year,AIRZONE,days_exceed) %>% distinct() %>% arrange(year,AIRZONE) df_exceedances_year_tfee <- df_exceedances_year_tfee %>% bind_rows(df_exceedances_year_bc_tfee) %>% select(year,AIRZONE,days_exceed) %>% distinct() %>% arrange(year,AIRZONE) ##count for entire seasons----- df_exceedances_season <- df_exceedances %>% filter(PARAMETER == 'PM25',name == 'exceed_27') %>% filter(!site %in% lst_remove) %>% left_join(df_stations) %>% mutate(year = lubridate::year(DATE), month = lubridate::month(DATE)) %>% left_join(df_seasons) %>% select(year,seasons,DATE,AIRZONE) %>% distinct() %>% group_by(year,AIRZONE,seasons) %>% dplyr::mutate(days_exceed = n()) %>% ungroup() df_exceedances_season_tfee <- df_exceedances %>% filter(PARAMETER == 'PM25',name == 'exceed_27') %>% filter(!site %in% lst_remove) %>% left_join(df_stations) %>% left_join(tfee_list) %>% filter(tfee == TRUE) %>% mutate(year = lubridate::year(DATE), month = lubridate::month(DATE)) %>% left_join(df_seasons) %>% select(year,seasons,DATE,AIRZONE) %>% distinct() %>% group_by(year,AIRZONE,seasons) %>% dplyr::mutate(days_exceed = n()) %>% ungroup() df_exceedances_season_bc <- df_exceedances_season %>% select(year,seasons,DATE) %>% distinct() %>% group_by(year,seasons) %>% summarise(days_exceed = n())%>% mutate(AIRZONE = 'BC') df_exceedances_season_bc_tfee <- df_exceedances_season_tfee %>% select(year,seasons,DATE) %>% distinct() %>% group_by(year,seasons) %>% summarise(days_exceed = n())%>% mutate(AIRZONE = 'BC') df_exceedances_season <- df_exceedances_season %>% bind_rows(df_exceedances_season_bc)%>% select(year,seasons,AIRZONE,days_exceed) %>% distinct() %>% arrange(year,AIRZONE) df_exceedances_season_tfee <- df_exceedances_season_tfee %>% bind_rows(df_exceedances_season_bc_tfee)%>% select(year,seasons,AIRZONE,days_exceed) %>% distinct() %>% arrange(year,AIRZONE) #create for each station df_exceedances_station_year <- df_exceedances%>% filter(PARAMETER == 'PM25',name == 'exceed_27') %>% filter(!site %in% lst_remove) %>% left_join(df_stations) %>% mutate(year = lubridate::year(DATE)) %>% ungroup() %>% select(year,site,DATE,AIRZONE) %>% distinct() %>% group_by(year,site,AIRZONE) %>% dplyr::mutate(days_exceed = n()) %>% ungroup() %>% select(year,site,AIRZONE,days_exceed) %>% distinct() df_exceedances_station_year_tfee <- df_exceedances%>% filter(PARAMETER == 'PM25',name == 'exceed_27') %>% filter(!site %in% lst_remove) %>% left_join(df_stations) %>% left_join(tfee_list) %>% filter(tfee == TRUE) %>% mutate(year = lubridate::year(DATE)) %>% ungroup() %>% select(year,site,DATE,AIRZONE) %>% distinct() %>% group_by(year,site,AIRZONE) %>% dplyr::mutate(days_exceed = n()) %>% ungroup() %>% select(year,site,AIRZONE,days_exceed) %>% distinct() df_exceedances_station_seasonal <- df_exceedances %>% filter(PARAMETER == 'PM25',name == 'exceed_27') %>% filter(!site %in% lst_remove) %>% left_join(df_stations) %>% mutate(year = lubridate::year(DATE), month = lubridate::month(DATE)) %>% left_join(df_seasons) %>% select(year,site,seasons,DATE,AIRZONE) %>% distinct() %>% group_by(year,site,AIRZONE,seasons) %>% dplyr::mutate(days_exceed = n()) %>% ungroup()%>% select(year,seasons,site,AIRZONE,days_exceed) %>% distinct() df_exceedances_station_seasonal_tfee <- df_exceedances %>% filter(PARAMETER == 'PM25',name == 'exceed_27') %>% filter(!site %in% lst_remove) %>% left_join(df_stations) %>% left_join(tfee_list) %>% filter(tfee == TRUE) %>% mutate(year = lubridate::year(DATE), month = lubridate::month(DATE)) %>% left_join(df_seasons) %>% select(year,site,seasons,DATE,AIRZONE) %>% distinct() %>% group_by(year,site,AIRZONE,seasons) %>% dplyr::mutate(days_exceed = n()) %>% ungroup()%>% select(year,seasons,site,AIRZONE,days_exceed) %>% distinct() #fix df_stationdata duplicates df_stationdata <- df_stationdata %>% group_by(site,AIRZONE,year) %>% dplyr::mutate(index=1:n()) %>% filter(index==1) %>% select(-index) %>% ungroup() #left join to display zero values df_exceedances_year <- df_stationdata %>% select(AIRZONE,year) %>% distinct() %>% bind_rows(tibble( AIRZONE ='BC', year = min(df_stationdata$year):max(df_stationdata$year) )) %>% left_join(df_exceedances_year) %>% mutate(days_exceed = ifelse(is.na(days_exceed),0,days_exceed)) df_exceedances_year_tfee <- df_stationdata %>% select(AIRZONE,year) %>% distinct() %>% bind_rows(tibble( AIRZONE ='BC',year = min(df_stationdata$year):max(df_stationdata$year) )) %>% filter(year>=2014) %>% left_join(df_exceedances_year_tfee) %>% mutate(days_exceed = ifelse(is.na(days_exceed),0,days_exceed)) df_exceedances_season <- df_stationdata %>% select(AIRZONE,year) %>% merge(tibble(seasons = c('Winter','Spring','Summer','Fall'))) %>% distinct() %>% bind_rows(tibble( AIRZONE ='BC', year = min(df_stationdata$year):max(df_stationdata$year) ) %>% merge(tibble(seasons = c('Winter','Spring','Summer','Fall'))) ) %>% left_join(df_exceedances_season) %>% mutate(days_exceed = ifelse(is.na(days_exceed),0,days_exceed)) df_exceedances_season_tfee <- df_stationdata %>% select(AIRZONE,year) %>% merge(tibble(seasons = c('Winter','Spring','Summer','Fall'))) %>% distinct() %>% bind_rows(tibble( AIRZONE ='BC', year = min(df_stationdata$year):max(df_stationdata$year) ) %>% merge(tibble(seasons = c('Winter','Spring','Summer','Fall'))) ) %>% filter(year >=2014) %>% left_join(df_exceedances_season_tfee) %>% mutate(days_exceed = ifelse(is.na(days_exceed),0,days_exceed)) df_exceedances_station_year <- df_stationdata %>% select(site,AIRZONE,year,LAT,LONG) %>% distinct() %>% left_join(df_exceedances_station_year) %>% mutate(days_exceed = ifelse(is.na(days_exceed),0,days_exceed)) df_exceedances_station_year_tfee <- df_stationdata %>% select(site,AIRZONE,year,LAT,LONG) %>% distinct() %>% filter(year>=2014) %>% left_join(df_exceedances_station_year_tfee) %>% mutate(days_exceed = ifelse(is.na(days_exceed),0,days_exceed)) df_exceedances_station_seasonal <- df_stationdata %>% select(site,AIRZONE,year,LAT,LONG) %>% merge(tibble(seasons = c('Winter','Spring','Summer','Fall'))) %>% distinct() %>% left_join(df_exceedances_station_seasonal) %>% mutate(days_exceed= ifelse(is.na(days_exceed),0,days_exceed)) df_exceedances_station_seasonal_tfee <- df_stationdata %>% select(site,AIRZONE,year,LAT,LONG) %>% merge(tibble(seasons = c('Winter','Spring','Summer','Fall'))) %>% distinct() %>% filter(year>=2014) %>% left_join(df_exceedances_station_seasonal_tfee) %>% mutate(days_exceed= ifelse(is.na(days_exceed),0,days_exceed)) df_exceedance <- list(annual = df_exceedances_year, annual_tfee = df_exceedances_year_tfee, season = df_exceedances_season, season_tfee = df_exceedances_season_tfee, annual_stations =df_exceedances_station_year, annual_stations_tfee = df_exceedances_station_year_tfee, season_stations = df_exceedances_station_seasonal, season_stations_tfee = df_exceedances_station_seasonal_tfee, stations = df_stationdata) return(df_exceedance) } #' Determine the air zone based on lat longs #' #' @param lat is the latitude, vector OK #' @param long is the longitude, vector OK get_airzone <- function(lat,long) { if (0) { latlong <- c(57.68,-120.614) } az_mgmt_gitURL <- 'https://github.com/bcgov/air-zone-reports/blob/master/data/out/az_mgmt.Rds?raw=true' az_mgmt <- readRDS(url(az_mgmt_gitURL)) #---------------- pnts <- data.frame( "x" = long, "y" = lat) # create a points collection pnts_sf <- do.call("st_sfc",c(lapply(1:nrow(pnts), function(i) {st_point(as.numeric(pnts[i, ]))}), list("crs" = 4326))) pnts_trans <- st_transform(pnts_sf, 2163) # apply transformation to pnts sf tt1_trans <- st_transform(az_mgmt, 2163) # apply transformation to polygons sf # intersect and extract state name pnts$airzone <- apply(st_intersects(tt1_trans, pnts_trans, sparse = FALSE), 2, function(col) { tt1_trans[which(col), ]$airzone }) return(pnts$airzone) } #' create graphs for days exceeding #' #' @param exceedances is the result of get_PM_exceedance() function graph_exceedance <- function(exceedances,AIRZONE = NULL,year = NULL) { if (0) { source('./level4_page/02_setup.R') dirs_location <- './data/out' AIRZONE <- NULL } require(tidyr) #set order for the seasons df_seasons <- tribble( ~seasons,~order, 'Winter',4, 'Spring',3, 'Summer',2, 'Fall',1 ) year_select <- year df_colour <- tribble( ~airzone,~colour_01, "Northeast",'#CDC08C', "Georgia Strait" ,'#F4B5BD', "Southern Interior",'#9C964A', "Lower Fraser Valley",'#85D4E3', "Central Interior" ,'#FAD77B', "Coastal",'#CEAB07', "Northwest","#24281A" ) list_airzones <- df_colour$airzone # View(exceedances$season_stations) if (is.null(AIRZONE)) { AIRZONE <- 'BC' } df_annual <- exceedances$annual df_seasonal_tfee <- exceedances$season_tfee %>% mutate(tfee = TRUE) df_seasonal <- exceedances$season %>% mutate(tfee = FALSE) %>% bind_rows(df_seasonal_tfee) %>% pivot_wider(names_from = tfee, values_from =days_exceed) %>% dplyr::rename(`Wildfire Days` = `TRUE`,`Total` = `FALSE`) %>% mutate(`Wildfire Days` = ifelse(is.na(`Wildfire Days`),0,`Wildfire Days`))%>% mutate(`No Wildfire`= `Total` - `Wildfire Days`) %>% select(-Total) %>% pivot_longer(cols = c(`Wildfire Days`,`No Wildfire`)) %>% mutate(seasons = paste(seasons,'(',name,')',sep='')) vline <- function(x = 0, color = "red") { list( type = "line", y0 = 0, y1 = 1, yref = "paper", x0 = x, x1 = x, line = list(color = color,dash = 'dash') ) } p_annual <- df_annual %>% filter(!AIRZONE %in% c('BC',NA)) %>% filter(year>=1997) %>% plotly::plot_ly(x=~year,y=~days_exceed,color = ~AIRZONE, type='scatter',mode='lines',showlegend =T, hoverinfo ='y', hovertemplate = paste('%{y:,.0f}',' days',sep='') ) %>% layout(title = 'High PM<sub>2.5</sub> Levels in Air Zones', legend = list(orientation = 'h'), yaxis = list(title = 'Number of Days with High PM<sub>2.5</sub> Levels'), xaxis = list(title = 'Year') ) %>% plotly::layout(hovermode = 'x unified', barmode = 'stack') %>% layout(shapes = list(vline(year_select))) # event_register("plotly_click") #create results for BC, and all air zones { p_list <- NULL i_list <- NULL i <- 0 for (AIRZONE_ in c('BC',lst_airzones)) { i <- i +1 p_seasonal <- df_seasonal %>% left_join(df_seasons,by = 'seasons') %>% filter(AIRZONE == AIRZONE_) %>% filter(year >=2000) %>% plotly::plot_ly(x=~year,y=~value,color = ~reorder(seasons,order), type='bar',mode='bar',showlegend =T, hoverinfo ='y', hovertemplate = paste('%{y:,.0f}',' days',sep=''), colors = c("navajowhite2", "navajowhite3", "seagreen3", "seagreen4", "red3", "red4", "slategray2", 'slategray4' ) ) %>% layout(title = paste('<br>High PM<sub>2.5</sub> Levels for', ifelse(AIRZONE == 'BC',AIRZONE_, paste('the',AIRZONE_,'Air Zone') )), legend = list(orientation = 'h'), yaxis = list(title = 'Number of Days with High PM<sub>2.5</sub> Levels'), xaxis = list(title = 'Year') ) %>% plotly::layout(hovermode = 'x unified', barmode = 'stack',legend = list(x = 0.01, y = 0.9))%>% layout(shapes = list(vline(year_select))) # layout(annotations = list(x=2014,y=-1,yref='paper',xref='paper',text = 'wildfire counts only from 2014 onwards')) p_list[[i]] <- p_seasonal i_list <- c(i_list,AIRZONE_) } result <- list(plot_annual = p_annual,plot_seasonal = p_list,plot_definition = i_list, data = exceedances) } return(result) } #end of functions----- #define colours----- df_colour <- tribble( ~airzone,~colour_01, "Northeast",'#CDC08C', "Georgia Strait" ,'#F4B5BD', "Southern Interior",'#9C964A', "Lower Fraser Valley",'#85D4E3', "Central Interior" ,'#FAD77B', "Coastal",'#CEAB07', "Northwest","#24281A" ) lst_airzones <- df_colour$airzone #define external data------ exceedances <- get_PM_exceedancesummary(dirs_location) az_mgmt <- readr::read_rds(paste(dirs_location,'az_mgmt.Rds',sep='/')) %>% left_join(df_colour) print('Load az_mgmt complete') plots_list <- graph_exceedance(exceedances = exceedances,year = max(exceedances$annual$year)) print('Load Complete') #---SHINY SECTION---------------- # Actual shiny part ##ui section---- ui <- { fluidPage( h4(HTML('Fine Particulate Matter (PM<sub>2.5</sub>) Pollution')), tags$head( tags$style(HTML(" body { background-color: #f2efe9; } .container-fluid { background-color: #fff; width: 1200px; padding: 5px; } .topimg { width: 0px; display: block; margin: 0px auto 0px auto; } .title { text-align: center; } .toprow { margin: 5px 0px; padding: 5px; background-color: #38598a; } .filters { margin: 0px auto; } .shiny-input-container { width:100% !important; } .table { padding: 0px; margin-top: 0px; } .leaflet-top { z-index:999 !important; } ")) ), # h1("Fine Particulate Pollution", class = "title"), fluidRow(class = "toprow", fluidRow(class = 'filters', column(6, tags$style(type='text/css', '.selectize-input { font-size: 15px; line-height: 10px;} .selectize-dropdown { font-size: 16px; line-height: 20px; } .control-label {font-size: 24px; color: white !important;} .irs-min {font-size: 0px; color: white; !important} .irs-max {font-size: 0px; color: white;} .irs-single {font-size: 20px; color: white;} .irs-grid-text {font-size: 10px; color: white;}' ), sliderInput('year_slider',label ='Year', min = 1997, max = max(exceedances$annual$year), value = max(exceedances$annual$year), sep='') ))), fluidRow( column(4,h6(HTML("Number of Days with High PM<sub>2.5</sub> Levels</br>Click the map to select an air zone")), # fluidRow( leaflet::leafletOutput("map",height = '400px',width = '400px')), column(8,h6(HTML("Scroll through the graph to view the number of days with high PM<sub>2.5</sub> Levels.")), # div(style='height:400px;overflow-y: scroll;'), plotlyOutput("plot1",height = '400px',width = '800px') ) ) ) } ##server section---- server <- {shinyServer(function(input, output) { if (0) { map_exceedance(exceedances = exceedances, az_mgmt = az_mgmt, year = 2010) } #reactive_plot1 can carry over plots across events reactive_plot1 <- reactiveVal(plots_list) a <- map_exceedance(map_a = NULL,exceedances = exceedances ,az_mgmt = az_mgmt,year = max(exceedances$annual$year)) output$map <- renderLeaflet(a) output$plot1 <- renderPlotly({plot_out <- plots_list$plot_annual plot_out %>% ggplotly(source = 'plot1') #%>% event_register("plotly_click") }) observeEvent(input$year_slider, { print('Slider') print(input$year_slider) leafletProxy("map") %>% map_exceedance(exceedances = exceedances ,az_mgmt = az_mgmt,year =input$year_slider) plots_list <- graph_exceedance(exceedances = exceedances,year = input$year_slider) reactive_plot1(plots_list) #pass on value to reactive_plot1 # output$plot1 <- renderPlotly(plots_list$plot_annual) output$plot1 <- renderPlotly({ plot_out <- plots_list$plot_annual p <- plot_out %>% ggplotly(source = 'plot1') p }) }) #clicking the map, an airzone is selected observeEvent(input$map_shape_click, { p <- input$map_shape_click try({ airzone_select <- get_airzone(p$lat,p$lng) print(p$lat) print(p$lng) print(airzone_select) plots_list <- reactive_plot1() if (airzone_select != 'Northwest') { output$plot1 <- renderPlotly({ p <- plots_list$plot_seasonal[[which(plots_list$plot_definition == airzone_select)]] # Add a footnote footnote <- list( font = list(size = 10), x = 2000, y = 8, # xref = "paper", # yref = "paper", text = "*Counts for wildfire days before 2014 are not included", showarrow = FALSE ) p <- layout(p, annotations = list(footnote)) print(p) # Check if the footnote is included in the annotations list print('annotations inserted') # Return the plotly graph p }) print('Plot Refreshed') } leafletProxy("map") %>% map_exceedance(exceedances = exceedances ,az_mgmt = az_mgmt,year =input$year_slider,airzone = airzone_select) }) }) #clicking on the graph # observeEvent(event_data("plotly_click", source = "plot1"), { # values$plot.click.results <- event_data("plotly_click", source = "plot1") # print(values$plot.click.results) # }) }) } shinyApp(ui, server)
## Exploratory data Analysis Programming Assignment 1 ## This is a script that reads in electric power consumption data and generates the first ## exploratory plot for a 2 day period in Feb 2007 elecdata<-read.csv2("household_power_consumption.txt", header=TRUE, na.strings = "?", stringsAsFactors=FALSE) # read in data AS CHARACTERS (stringsAsFactors) elecdata<-subset(elecdata,Date == "1/2/2007" | Date == "2/2/2007") # select 2 days of interest for (i in 3:9) { # loop thru' columns 3 to 9 elecdata[,i] <- as.numeric(as.character(elecdata[,i])) # convert data to numeric } temp=par(mar = c(5, 5, 4, 1)) # temporarily adjust the plot margins hist(elecdata$Global_active_power,main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency",col="red") # plot histogram dev.copy(png,file = "plot1.png",width=480,height=480) ## Copy plot to a PNG file dev.off() # turn PNG device off par(temp) # reset the window to original
/plot1.R
no_license
martinjhughes/ExData_Plotting1
R
false
false
990
r
## Exploratory data Analysis Programming Assignment 1 ## This is a script that reads in electric power consumption data and generates the first ## exploratory plot for a 2 day period in Feb 2007 elecdata<-read.csv2("household_power_consumption.txt", header=TRUE, na.strings = "?", stringsAsFactors=FALSE) # read in data AS CHARACTERS (stringsAsFactors) elecdata<-subset(elecdata,Date == "1/2/2007" | Date == "2/2/2007") # select 2 days of interest for (i in 3:9) { # loop thru' columns 3 to 9 elecdata[,i] <- as.numeric(as.character(elecdata[,i])) # convert data to numeric } temp=par(mar = c(5, 5, 4, 1)) # temporarily adjust the plot margins hist(elecdata$Global_active_power,main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency",col="red") # plot histogram dev.copy(png,file = "plot1.png",width=480,height=480) ## Copy plot to a PNG file dev.off() # turn PNG device off par(temp) # reset the window to original
library(dplyr) data(weather_mittadalen) weather_2019 <- weather_mittadalen %>% dplyr::filter(year == 2019) weather_condition_2019 <- with(weather_2019, analyze_weather(date, snow_depth, prec, temp_min, temp_max, temp_avg, start = "first_permanent_snow", plot_first_snow = T)) weather_condition_2019 weather_condition_2019 %>% ggplot(aes(date)) + geom_line(aes(y = snow_depth, color = "snow depth")) + geom_line(aes(y = cumulative_precitation, color = "cum prec")) + geom_line(aes(y = 100*prec_snow_ratio, color = "100*cum prec/snow depth")) + geom_hline(yintercept = 100, linetype = 2) + ylim(0, 250) + labs(x = "Date", y = "Amount (mm)", color = "")
/examples/winter_analyzer_example.R
no_license
bniebuhr/witch
R
false
false
746
r
library(dplyr) data(weather_mittadalen) weather_2019 <- weather_mittadalen %>% dplyr::filter(year == 2019) weather_condition_2019 <- with(weather_2019, analyze_weather(date, snow_depth, prec, temp_min, temp_max, temp_avg, start = "first_permanent_snow", plot_first_snow = T)) weather_condition_2019 weather_condition_2019 %>% ggplot(aes(date)) + geom_line(aes(y = snow_depth, color = "snow depth")) + geom_line(aes(y = cumulative_precitation, color = "cum prec")) + geom_line(aes(y = 100*prec_snow_ratio, color = "100*cum prec/snow depth")) + geom_hline(yintercept = 100, linetype = 2) + ylim(0, 250) + labs(x = "Date", y = "Amount (mm)", color = "")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/linter_tag_docs.R \name{common_mistakes_linters} \alias{common_mistakes_linters} \title{Common mistake linters} \description{ Linters highlighting common mistakes, such as duplicate arguments. } \seealso{ \link{linters} for a complete list of linters available in lintr. } \section{Linters}{ The following linters are tagged with 'common_mistakes': \itemize{ \item{\code{\link{duplicate_argument_linter}}} \item{\code{\link{equals_na_linter}}} \item{\code{\link{missing_argument_linter}}} \item{\code{\link{missing_package_linter}}} \item{\code{\link{redundant_equals_linter}}} \item{\code{\link{sprintf_linter}}} \item{\code{\link{unused_import_linter}}} } }
/man/common_mistakes_linters.Rd
permissive
cordis-dev/lintr
R
false
true
738
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/linter_tag_docs.R \name{common_mistakes_linters} \alias{common_mistakes_linters} \title{Common mistake linters} \description{ Linters highlighting common mistakes, such as duplicate arguments. } \seealso{ \link{linters} for a complete list of linters available in lintr. } \section{Linters}{ The following linters are tagged with 'common_mistakes': \itemize{ \item{\code{\link{duplicate_argument_linter}}} \item{\code{\link{equals_na_linter}}} \item{\code{\link{missing_argument_linter}}} \item{\code{\link{missing_package_linter}}} \item{\code{\link{redundant_equals_linter}}} \item{\code{\link{sprintf_linter}}} \item{\code{\link{unused_import_linter}}} } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/generated_client.R \name{permission_sets_delete_resources} \alias{permission_sets_delete_resources} \title{Delete a resource in a permission set} \usage{ permission_sets_delete_resources(id, name) } \arguments{ \item{id}{integer required. The ID for this permission set.} \item{name}{string required. The name of this resource.} } \value{ An empty HTTP response } \description{ Delete a resource in a permission set }
/man/permission_sets_delete_resources.Rd
no_license
cran/civis
R
false
true
497
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/generated_client.R \name{permission_sets_delete_resources} \alias{permission_sets_delete_resources} \title{Delete a resource in a permission set} \usage{ permission_sets_delete_resources(id, name) } \arguments{ \item{id}{integer required. The ID for this permission set.} \item{name}{string required. The name of this resource.} } \value{ An empty HTTP response } \description{ Delete a resource in a permission set }
# Generation gene-pathway bipartite matrix #' Generate the bipartite matrix from gene-pathway interaction networks #' @description The function \code{genepathwayAdj} is to generate the adjacency #' matrix from gene-pathway interaction network #' @param drugAdj drug adjacency matrix (sparse matrix) generated from drug-drug #' association network via \code{AdjMatrix_drugs} #' @param pathwayadj pathway-pathway association adjacency matrix (sparse #' matrix) #' @param geneadj gene-gene association network adjacency matrix (sparse matrix) #' @param gpweight the weight of gene-pathway edges #' @param load_dir path to load or save modeling data files #' @return \code{genepathwayAdj} gene-pathway adjacency matrix #' @export #' #' @examples #' genepathwayAdj <- function(drugAdj, pathwayadj, geneadj, gpweight, load_dir){ # load(system.file("data", "KEGG_pw_gene.RData", package = "DComboNet")) genepathwayAdj = matrix(0,nrow(pathwayadj),nrow(geneadj)) rownames(genepathwayAdj) = rownames(pathwayadj) colnames(genepathwayAdj) = colnames(geneadj) pathways <- rownames(pathwayadj) N = nrow(pathwayadj) # genepathway = read.table(paste0(load_dir,"pathway/KEGG_ID_gene.txt"),sep="\t",header = T,stringsAsFactors = F) names(genepathway) = c("KEGG_ID","pathway_name","Gene") for(i in 1:N){ pathway <- pathways[i] gene <- genepathway[which(genepathway$KEGG_ID == pathway),]$Gene if(length(gene) > 0){ for(j in 1:length(gene)){ if(!is.na(gene[j])){ # identify the drug position on the matrix. index_gene <- which(colnames(genepathwayAdj) %in% gene[j]) if(length(index_gene) == 1){ genepathwayAdj[i,index_gene] <- gpweight } } } } } return(genepathwayAdj) }
/R/GenePathwayNet.R
no_license
FuChunjin/DComboNet
R
false
false
1,943
r
# Generation gene-pathway bipartite matrix #' Generate the bipartite matrix from gene-pathway interaction networks #' @description The function \code{genepathwayAdj} is to generate the adjacency #' matrix from gene-pathway interaction network #' @param drugAdj drug adjacency matrix (sparse matrix) generated from drug-drug #' association network via \code{AdjMatrix_drugs} #' @param pathwayadj pathway-pathway association adjacency matrix (sparse #' matrix) #' @param geneadj gene-gene association network adjacency matrix (sparse matrix) #' @param gpweight the weight of gene-pathway edges #' @param load_dir path to load or save modeling data files #' @return \code{genepathwayAdj} gene-pathway adjacency matrix #' @export #' #' @examples #' genepathwayAdj <- function(drugAdj, pathwayadj, geneadj, gpweight, load_dir){ # load(system.file("data", "KEGG_pw_gene.RData", package = "DComboNet")) genepathwayAdj = matrix(0,nrow(pathwayadj),nrow(geneadj)) rownames(genepathwayAdj) = rownames(pathwayadj) colnames(genepathwayAdj) = colnames(geneadj) pathways <- rownames(pathwayadj) N = nrow(pathwayadj) # genepathway = read.table(paste0(load_dir,"pathway/KEGG_ID_gene.txt"),sep="\t",header = T,stringsAsFactors = F) names(genepathway) = c("KEGG_ID","pathway_name","Gene") for(i in 1:N){ pathway <- pathways[i] gene <- genepathway[which(genepathway$KEGG_ID == pathway),]$Gene if(length(gene) > 0){ for(j in 1:length(gene)){ if(!is.na(gene[j])){ # identify the drug position on the matrix. index_gene <- which(colnames(genepathwayAdj) %in% gene[j]) if(length(index_gene) == 1){ genepathwayAdj[i,index_gene] <- gpweight } } } } } return(genepathwayAdj) }
library(caret); library(lubridate) ; library(readr) ; library(xgboost) train <- read_csv("D:/kaggle/Springleaf/DATA/CSV/train.csv") test <- read_csv("D:/kaggle/Springleaf/DATA/CSV/test.csv") ##REMOVE RESPONSE AND ID VARIABLES response <- train$target train_ID <- train$ID test_ID <- test$ID ##MODIFY train and test set training <- subset(train, select = -c(ID, target)) testing <- subset(test, select = -c(ID)) dim(training); dim(testing) ######################################################################################## tmp <- rbind(training, testing) tmp_num <- tmp[, sapply(tmp, is.numeric)] dim(tmp_num) tmp_char <- tmp[,sapply(tmp, is.character)] dim(tmp_char) numeric_ele <- (lapply(tmp_num, function(x) length(unique(x)))) char_ele <- (lapply(tmp_char, function(x) length(unique(x)))) numeric_one <- subset(numeric_ele , subset = c(numeric_ele == 1)) names(numeric_one) ids <- c("VAR_0227", "VAR_0228") remove_col <- c(names(numeric_one), ids) tmp <- tmp[, !(names(tmp) %in% (remove_col))] dim(tmp) ###################################################################################### tmp_num <- tmp[, sapply(tmp, is.numeric)] tmp_char <- tmp[,sapply(tmp, is.character)] ##################################################################################### tmp_date <- tmp_char[, grep("JAN1|FEB1|MAR1", tmp_char)] dim(tmp_date) tmp_dates <- data.frame(sapply(tmp_date, function(x) strptime(x, "%d%B%y :%H:%M:%S"))) tmp_year <- data.frame(sapply(tmp_dates, function(x) year(x))) names(tmp_year) <- paste("YEAR_", names(tmp_year), sep = "") tmp_wday <- data.frame(sapply(tmp_dates, function(x) wday(x))) names(tmp_wday) <- paste("WDAY_", names(tmp_wday), sep = "") tmp_month <- data.frame(sapply(tmp_dates, function(x) month(x))) names(tmp_month) <- paste("MONTH_", names(tmp_month), sep = "") tmp_yday <- data.frame(sapply(tmp_dates, function(x) yday(x))) names(tmp_yday) <- paste("YDAY_", names(tmp_yday), sep = "") tmp_mday <- data.frame(sapply(tmp_dates, function(x) mday(x))) names(tmp_mday) <- paste("MDAY_", names(tmp_mday), sep = "") tmp_weekdays <- data.frame(sapply(tmp_dates, function(x) weekdays(x))) names(tmp_weekdays) <- paste("YEAR_", names(tmp_weekdays), sep = "") tmp_hour <- data.frame(sapply(tmp_dates, function(x) hour(x))) names(tmp_hour) <- paste("HOUR_", names(tmp_hour), sep = "") tmp_minute <- data.frame(sapply(tmp_dates, function(x) minute(x))) names(tmp_minute) <- paste("MINUTE_", names(tmp_minute), sep = "") tmp_second <- data.frame(sapply(tmp_dates, function(x) second(x))) names(tmp_second) <- paste("SECOND_", names(tmp_second), sep = "") ################################################################################# tmp <- tmp[, !(names(tmp) %in% names(tmp_date))] dim(tmp) dates <- cbind( tmp_year, tmp_yday, tmp_weekdays, tmp_wday, tmp_second, tmp_hour, tmp_minute) ##further modification when will people most likely take a loan ###################################################################################### training <- tmp[1:145231,] testing <- tmp[(nrow(training)+1): nrow(tmp), ] dim(training); dim(testing) feature.names <- names(training) for (f in feature.names) { if (class(training[[f]])=="character") { levels <- unique(c(training[[f]], testing[[f]])) training[[f]] <- as.integer(factor(training[[f]], levels=levels)) testing[[f]] <- as.integer(factor(testing[[f]], levels=levels)) } } training[is.na(training)] <- -1 testing[is.na(testing)] <- -1 dates[is.na(dates)] <- -1 ############################################################################### tmp <- rbind(training, testing) tmp <- cbind(tmp, dates) training <- tmp[1:145231,] testing <- tmp[(nrow(training)+1): nrow(tmp), ] dim(training); dim(testing) gc(); rm(train);rm(test) ############################################################################# benchmark <- read_csv("D:/kaggle/Springleaf/SUBMISSION/second.csv") first <- read_csv("D:/kaggle/Springleaf/SUBMISSION/third.csv") second <- read_csv("D:/kaggle/Springleaf/SUBMISSION/third_first.csv") third <- read_csv("D:/kaggle/Springleaf/SUBMISSION/fourth.csv") fourth <- read_csv("D:/kaggle/Springleaf/sixth.csv") fifth <- read_csv("D:/kaggle/Springleaf/eight.csv") feature_1 <- benchmark$target[1:145231] training$feature1 <- feature_1 testing$feature1 <- benchmark$target feature_2 <- first$target[1:145231] training$feature2 <- feature_2 testing$feature2 <- first$target feature_3 <- second$target[1:145231] training$feature3 <- feature_3 testing$feature3 <- second$target feature_4 <- third$target[1:145231] training$feature4 <- feature_4 testing$feature4 <- third$target feature_5 <- fourth$target[1:145231] training$feature5 <- feature_5 testing$feature5 <- fourth$target dtraining <- xgb.DMatrix(data.matrix(training[,feature.names]), label= response) param <- list( objective = "binary:logistic", eta = 0.014, max_depth = 10, subsample = 0.7, colsample_bytree = 0.7, eval_metric = "auc" ) #cv <- xgb.cv(params = param,data = dtraining,nrounds = 700, nfold = 5, showsd = T, metrics = "auc" #, verbose = 2, maximize = TRUE) clf_first <- xgb.train( params = param, data = dtraining, nrounds = 2000, # changed from 300 verbose = 2, maximize = TRUE) submission_second <- data.frame(ID=test_ID) submission_second$target <- NA submission_second[,"target"] <- predict(clf_first, data.matrix(testing[,feature.names])) write_csv(submission_second, "ten.csv")
/Springleaf/seventh_2.R
no_license
ronroc/kaggle
R
false
false
5,985
r
library(caret); library(lubridate) ; library(readr) ; library(xgboost) train <- read_csv("D:/kaggle/Springleaf/DATA/CSV/train.csv") test <- read_csv("D:/kaggle/Springleaf/DATA/CSV/test.csv") ##REMOVE RESPONSE AND ID VARIABLES response <- train$target train_ID <- train$ID test_ID <- test$ID ##MODIFY train and test set training <- subset(train, select = -c(ID, target)) testing <- subset(test, select = -c(ID)) dim(training); dim(testing) ######################################################################################## tmp <- rbind(training, testing) tmp_num <- tmp[, sapply(tmp, is.numeric)] dim(tmp_num) tmp_char <- tmp[,sapply(tmp, is.character)] dim(tmp_char) numeric_ele <- (lapply(tmp_num, function(x) length(unique(x)))) char_ele <- (lapply(tmp_char, function(x) length(unique(x)))) numeric_one <- subset(numeric_ele , subset = c(numeric_ele == 1)) names(numeric_one) ids <- c("VAR_0227", "VAR_0228") remove_col <- c(names(numeric_one), ids) tmp <- tmp[, !(names(tmp) %in% (remove_col))] dim(tmp) ###################################################################################### tmp_num <- tmp[, sapply(tmp, is.numeric)] tmp_char <- tmp[,sapply(tmp, is.character)] ##################################################################################### tmp_date <- tmp_char[, grep("JAN1|FEB1|MAR1", tmp_char)] dim(tmp_date) tmp_dates <- data.frame(sapply(tmp_date, function(x) strptime(x, "%d%B%y :%H:%M:%S"))) tmp_year <- data.frame(sapply(tmp_dates, function(x) year(x))) names(tmp_year) <- paste("YEAR_", names(tmp_year), sep = "") tmp_wday <- data.frame(sapply(tmp_dates, function(x) wday(x))) names(tmp_wday) <- paste("WDAY_", names(tmp_wday), sep = "") tmp_month <- data.frame(sapply(tmp_dates, function(x) month(x))) names(tmp_month) <- paste("MONTH_", names(tmp_month), sep = "") tmp_yday <- data.frame(sapply(tmp_dates, function(x) yday(x))) names(tmp_yday) <- paste("YDAY_", names(tmp_yday), sep = "") tmp_mday <- data.frame(sapply(tmp_dates, function(x) mday(x))) names(tmp_mday) <- paste("MDAY_", names(tmp_mday), sep = "") tmp_weekdays <- data.frame(sapply(tmp_dates, function(x) weekdays(x))) names(tmp_weekdays) <- paste("YEAR_", names(tmp_weekdays), sep = "") tmp_hour <- data.frame(sapply(tmp_dates, function(x) hour(x))) names(tmp_hour) <- paste("HOUR_", names(tmp_hour), sep = "") tmp_minute <- data.frame(sapply(tmp_dates, function(x) minute(x))) names(tmp_minute) <- paste("MINUTE_", names(tmp_minute), sep = "") tmp_second <- data.frame(sapply(tmp_dates, function(x) second(x))) names(tmp_second) <- paste("SECOND_", names(tmp_second), sep = "") ################################################################################# tmp <- tmp[, !(names(tmp) %in% names(tmp_date))] dim(tmp) dates <- cbind( tmp_year, tmp_yday, tmp_weekdays, tmp_wday, tmp_second, tmp_hour, tmp_minute) ##further modification when will people most likely take a loan ###################################################################################### training <- tmp[1:145231,] testing <- tmp[(nrow(training)+1): nrow(tmp), ] dim(training); dim(testing) feature.names <- names(training) for (f in feature.names) { if (class(training[[f]])=="character") { levels <- unique(c(training[[f]], testing[[f]])) training[[f]] <- as.integer(factor(training[[f]], levels=levels)) testing[[f]] <- as.integer(factor(testing[[f]], levels=levels)) } } training[is.na(training)] <- -1 testing[is.na(testing)] <- -1 dates[is.na(dates)] <- -1 ############################################################################### tmp <- rbind(training, testing) tmp <- cbind(tmp, dates) training <- tmp[1:145231,] testing <- tmp[(nrow(training)+1): nrow(tmp), ] dim(training); dim(testing) gc(); rm(train);rm(test) ############################################################################# benchmark <- read_csv("D:/kaggle/Springleaf/SUBMISSION/second.csv") first <- read_csv("D:/kaggle/Springleaf/SUBMISSION/third.csv") second <- read_csv("D:/kaggle/Springleaf/SUBMISSION/third_first.csv") third <- read_csv("D:/kaggle/Springleaf/SUBMISSION/fourth.csv") fourth <- read_csv("D:/kaggle/Springleaf/sixth.csv") fifth <- read_csv("D:/kaggle/Springleaf/eight.csv") feature_1 <- benchmark$target[1:145231] training$feature1 <- feature_1 testing$feature1 <- benchmark$target feature_2 <- first$target[1:145231] training$feature2 <- feature_2 testing$feature2 <- first$target feature_3 <- second$target[1:145231] training$feature3 <- feature_3 testing$feature3 <- second$target feature_4 <- third$target[1:145231] training$feature4 <- feature_4 testing$feature4 <- third$target feature_5 <- fourth$target[1:145231] training$feature5 <- feature_5 testing$feature5 <- fourth$target dtraining <- xgb.DMatrix(data.matrix(training[,feature.names]), label= response) param <- list( objective = "binary:logistic", eta = 0.014, max_depth = 10, subsample = 0.7, colsample_bytree = 0.7, eval_metric = "auc" ) #cv <- xgb.cv(params = param,data = dtraining,nrounds = 700, nfold = 5, showsd = T, metrics = "auc" #, verbose = 2, maximize = TRUE) clf_first <- xgb.train( params = param, data = dtraining, nrounds = 2000, # changed from 300 verbose = 2, maximize = TRUE) submission_second <- data.frame(ID=test_ID) submission_second$target <- NA submission_second[,"target"] <- predict(clf_first, data.matrix(testing[,feature.names])) write_csv(submission_second, "ten.csv")
library(data.table) install.packages('bit64') library(bit64) install.packages("ggplot2") library(ggplot2) library(ggrepel) library(magrittr) patient <- fread("PatientInfo.csv", na.strings = "") region <- fread("Region.csv") #Analysis of corrleation between the number of schools in a province and patients ##### 1. Group by the column of province from patient dataset and count the number #of patients in each province. Also, group by the column of province from region #dataset and count the number of schools, namely kindergardens, elementary schools, #and universities, in each province. province_patient <- patient[, .(patient_count = .N), by = province] province_region <- region[province == city, .(province, elementary_school_count, kindergarten_count, university_count)] ###### 2. Merge the above transformed data tables into one table and melt it, #creating a variable of school and a value of school count. merge_province_patient <- merge(province_patient, province_region) melt_province_patient <- melt(merge_province_patient, id.vars = c("province", "patient_count"), variable.name = "school", value.name = "school_count") ###### 3. Create a scatter plot and then it turns out a correlation between schools #in each province and the number of patients exists. ggplot(melt_province_patient, aes(school_count, patient_count, label = province))+ geom_point()+ facet_grid(~school, scales= c("free_x"))+ geom_text_repel() ###### 4. Confirm the correlation by cor.test. The correlation between the number #of universites and the number of patients is approximately 0.83, which is stronger #than all the others. cor.test(merge_province_patient$elementary_school_count, merge_province_patient$patient_count) cor.test(merge_province_patient$kindergarten_count, merge_province_patient$patient_count) cor.test(merge_province_patient$university_count, merge_province_patient$patient_count) cor.test(merge_province_patient$elementary_school_count, merge_province_patient$patient_count, method = "spearman") cor.test(merge_province_patient$kindergarten_count, merge_province_patient$patient_count, method = "spearman") cor.test(merge_province_patient$university_count, merge_province_patient$patient_count, method = "spearman") #Analysis of corrleation between the number of schools in a city and patients ##### 1. Group by the column of city from patient dataset and count the number of patients in each city. Also, group by the column of city from region dataset and count the number of schools, namely kindergardens, elementary schools, and universities, in each city. city_patient <- patient[, .(patient_count = .N), by = city] city_region <- region[, .(sum_elementary_school = sum(elementary_school_count), sum_kindergarden = sum(kindergarten_count), sum_university = sum(university_count)), by = city] ##### 2. Merge the above transformed data tables into one table and melt it, creating a variable of school and a value of school count. merge_city_patient <- merge(city_patient, city_region) melt_city_patient <- melt(merge_city_patient, id.vars = c("city", "patient_count"), variable.name = "school", value.name = "school_count") ##### 3. Create a scatter plot and then it seems that a correlation between schools in each city and the number of patients does not exist. ggplot(melt_city_patient, aes(school_count, patient_count))+ geom_point()+ facet_grid(~school, scales= c("free_x")) ##### 4. Show the correlation by cor.test. The correlations between the numbers of schools and the number of patients are all below 0.4. cor.test(merge_city_patient$elementary_school_count, merge_city_patient$patient_count) cor.test(merge_city_patient$kindergarten_count, merge_city_patient$patient_count) cor.test(merge_city_patient$university_count, merge_city_patient$patient_count)
/association between schools and locations.R
no_license
cory1219/Statistical-Testing-on-correlation-of-COVID-19-in-South-Korea
R
false
false
3,827
r
library(data.table) install.packages('bit64') library(bit64) install.packages("ggplot2") library(ggplot2) library(ggrepel) library(magrittr) patient <- fread("PatientInfo.csv", na.strings = "") region <- fread("Region.csv") #Analysis of corrleation between the number of schools in a province and patients ##### 1. Group by the column of province from patient dataset and count the number #of patients in each province. Also, group by the column of province from region #dataset and count the number of schools, namely kindergardens, elementary schools, #and universities, in each province. province_patient <- patient[, .(patient_count = .N), by = province] province_region <- region[province == city, .(province, elementary_school_count, kindergarten_count, university_count)] ###### 2. Merge the above transformed data tables into one table and melt it, #creating a variable of school and a value of school count. merge_province_patient <- merge(province_patient, province_region) melt_province_patient <- melt(merge_province_patient, id.vars = c("province", "patient_count"), variable.name = "school", value.name = "school_count") ###### 3. Create a scatter plot and then it turns out a correlation between schools #in each province and the number of patients exists. ggplot(melt_province_patient, aes(school_count, patient_count, label = province))+ geom_point()+ facet_grid(~school, scales= c("free_x"))+ geom_text_repel() ###### 4. Confirm the correlation by cor.test. The correlation between the number #of universites and the number of patients is approximately 0.83, which is stronger #than all the others. cor.test(merge_province_patient$elementary_school_count, merge_province_patient$patient_count) cor.test(merge_province_patient$kindergarten_count, merge_province_patient$patient_count) cor.test(merge_province_patient$university_count, merge_province_patient$patient_count) cor.test(merge_province_patient$elementary_school_count, merge_province_patient$patient_count, method = "spearman") cor.test(merge_province_patient$kindergarten_count, merge_province_patient$patient_count, method = "spearman") cor.test(merge_province_patient$university_count, merge_province_patient$patient_count, method = "spearman") #Analysis of corrleation between the number of schools in a city and patients ##### 1. Group by the column of city from patient dataset and count the number of patients in each city. Also, group by the column of city from region dataset and count the number of schools, namely kindergardens, elementary schools, and universities, in each city. city_patient <- patient[, .(patient_count = .N), by = city] city_region <- region[, .(sum_elementary_school = sum(elementary_school_count), sum_kindergarden = sum(kindergarten_count), sum_university = sum(university_count)), by = city] ##### 2. Merge the above transformed data tables into one table and melt it, creating a variable of school and a value of school count. merge_city_patient <- merge(city_patient, city_region) melt_city_patient <- melt(merge_city_patient, id.vars = c("city", "patient_count"), variable.name = "school", value.name = "school_count") ##### 3. Create a scatter plot and then it seems that a correlation between schools in each city and the number of patients does not exist. ggplot(melt_city_patient, aes(school_count, patient_count))+ geom_point()+ facet_grid(~school, scales= c("free_x")) ##### 4. Show the correlation by cor.test. The correlations between the numbers of schools and the number of patients are all below 0.4. cor.test(merge_city_patient$elementary_school_count, merge_city_patient$patient_count) cor.test(merge_city_patient$kindergarten_count, merge_city_patient$patient_count) cor.test(merge_city_patient$university_count, merge_city_patient$patient_count)
nyt1<-read.csv(file.choose()) nyt1<-nyt1[which(nyt1$Impressions>0 & nyt1$Clicks>0 & nyt1$Age>0),] nnyt1<-dim(nyt1)[1] # shrink it down! sampling.rate=0.9 num.test.set.labels=nnyt1*(1.-sampling.rate) training <-sample(1:nnyt1,sampling.rate*nnyt1, replace=FALSE) train<-subset(nyt1[training,],select=c(Age,Impressions)) testing<-setdiff(1:nnyt1,training) test<-subset(nyt1[testing,],select=c(Age,Impressions)) cg<-nyt1$Gender[training] true.labels<-nyt1$Gender[testing] library(class) classif<-knn(train,test,cg,k=5) # classif attributes(.Last.value)
/lab4/lab1_nyt.R
no_license
hangyenli/DataAnalytics_2021_Honghao_Li
R
false
false
552
r
nyt1<-read.csv(file.choose()) nyt1<-nyt1[which(nyt1$Impressions>0 & nyt1$Clicks>0 & nyt1$Age>0),] nnyt1<-dim(nyt1)[1] # shrink it down! sampling.rate=0.9 num.test.set.labels=nnyt1*(1.-sampling.rate) training <-sample(1:nnyt1,sampling.rate*nnyt1, replace=FALSE) train<-subset(nyt1[training,],select=c(Age,Impressions)) testing<-setdiff(1:nnyt1,training) test<-subset(nyt1[testing,],select=c(Age,Impressions)) cg<-nyt1$Gender[training] true.labels<-nyt1$Gender[testing] library(class) classif<-knn(train,test,cg,k=5) # classif attributes(.Last.value)
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/multipass.cppls.R \name{multipass.cppls} \alias{multipass.cppls} \title{Wrapper for cppls function with multiple pass and outliers elmination} \usage{ multipass.cppls(formula, ncomp, validation, data, nbpass, out = 2.5, Y.add) } \arguments{ \item{formula}{a model formula. Most of the lm formula constructs are supported. See below.} \item{ncomp}{the number of components to include in the model (see below).} \item{validation}{character. What kind of (internal) validation to use} \item{data}{an optional data frame with the data to fit the model from.} \item{nbpass}{Number of passes} \item{out}{T value threshold for outlier elimination} \item{Y.add}{a vector or matrix of additional responses containing relevant information about the observations. Only used for cppls.} } \value{ A list with the following components outliers A vector with outliers names as deduced from data row.names plsr the pls model object from the final pass ncomp the number of components to be further used pass a vector of same length as outliers indicating at which pass outliers were removed } \description{ Wrapper for cppls function with multiple pass and outliers elmination } \examples{ \dontrun{ } } \author{ J.-F. Rami \email{rami@cirad.fr} }
/man/multipass.cppls.Rd
no_license
jframi/multipass.pls
R
false
false
1,329
rd
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/multipass.cppls.R \name{multipass.cppls} \alias{multipass.cppls} \title{Wrapper for cppls function with multiple pass and outliers elmination} \usage{ multipass.cppls(formula, ncomp, validation, data, nbpass, out = 2.5, Y.add) } \arguments{ \item{formula}{a model formula. Most of the lm formula constructs are supported. See below.} \item{ncomp}{the number of components to include in the model (see below).} \item{validation}{character. What kind of (internal) validation to use} \item{data}{an optional data frame with the data to fit the model from.} \item{nbpass}{Number of passes} \item{out}{T value threshold for outlier elimination} \item{Y.add}{a vector or matrix of additional responses containing relevant information about the observations. Only used for cppls.} } \value{ A list with the following components outliers A vector with outliers names as deduced from data row.names plsr the pls model object from the final pass ncomp the number of components to be further used pass a vector of same length as outliers indicating at which pass outliers were removed } \description{ Wrapper for cppls function with multiple pass and outliers elmination } \examples{ \dontrun{ } } \author{ J.-F. Rami \email{rami@cirad.fr} }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DRaWR.R \name{DRaWR} \alias{DRaWR} \title{DRaWR} \usage{ DRaWR(possetfile = "data/sample_inputs/test.setlist", unifile = "data/sample_inputs/test.uni", networkfile = "data/sample_inputs/test.edge", outdir = "data/sample_outputs/results_", restarts = c(0.7), nfolds = 1, st2keep = 1, undirected = TRUE, unweighted = FALSE, normalize = "type", maxiters = 50, thresh = 1e-04, property_types = c("allen_brain_atlas", "chip_binding", "gene_ontology", "motif_u5", "pfam_domain", "T1", "T2"), writepreds = 0) } \arguments{ \item{possetfile}{(string): location of file containing location of gene sets to test.} \item{unifile}{(string): location of file listing gene universe.} \item{networkfile}{(string): location of file containing network contents.} \item{outdir}{(string): prefix of location of file to write performance results (optionally prediction results).} \item{restarts}{(vector): vector of restart values to test. Default is c(0.7).} \item{nfolds}{(int): number of folds for cross validation, 1 is no cross-validation. Default is 4.} \item{st2keep}{(int): number of property nodes to keep in second stage for each property type. Default is 50.} \item{undirected}{(bool): boolean to make network undirected.} \item{unweighted}{(bool): boolean to make network unweighted.} \item{normalize}{(string): "type" or "none". Default is 'type'.} \item{maxiters}{(int): maximum number of allowable iterations. Default is 50.} \item{thresh}{(float): threshold for L1 norm convergence. Default is 0.001.} \item{property_types}{(vector): list of possible property types. Default is c("go_curated_evidence", "go_inferred_evidence", "pfam_domain").} \item{writepreds}{(boolean): write predictions out to a file. Default is FALSE} } \description{ This function runs the DRaWR two stage random walk with restart method. } \examples{ DRaWR(possetfile = "data/sample_inputs/test.setlist", unifile = "data/sample_inputs/test.uni", networkfile = "data/sample_inputs/test.edge", outdir = "data/sample_outputs/results_", restarts = c(.7), nfolds = 1, st2keep = 1, undirected = TRUE, unweighted = FALSE, normalize = "type", maxiters = 50, thresh = 0.0001, property_types = c("T1", "T2"), writepreds = 0) } \keyword{random} \keyword{restart} \keyword{walk} \keyword{with}
/packages/DRaWR/man/DRaWR.Rd
permissive
minghao2016/DRaWR
R
false
true
2,358
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DRaWR.R \name{DRaWR} \alias{DRaWR} \title{DRaWR} \usage{ DRaWR(possetfile = "data/sample_inputs/test.setlist", unifile = "data/sample_inputs/test.uni", networkfile = "data/sample_inputs/test.edge", outdir = "data/sample_outputs/results_", restarts = c(0.7), nfolds = 1, st2keep = 1, undirected = TRUE, unweighted = FALSE, normalize = "type", maxiters = 50, thresh = 1e-04, property_types = c("allen_brain_atlas", "chip_binding", "gene_ontology", "motif_u5", "pfam_domain", "T1", "T2"), writepreds = 0) } \arguments{ \item{possetfile}{(string): location of file containing location of gene sets to test.} \item{unifile}{(string): location of file listing gene universe.} \item{networkfile}{(string): location of file containing network contents.} \item{outdir}{(string): prefix of location of file to write performance results (optionally prediction results).} \item{restarts}{(vector): vector of restart values to test. Default is c(0.7).} \item{nfolds}{(int): number of folds for cross validation, 1 is no cross-validation. Default is 4.} \item{st2keep}{(int): number of property nodes to keep in second stage for each property type. Default is 50.} \item{undirected}{(bool): boolean to make network undirected.} \item{unweighted}{(bool): boolean to make network unweighted.} \item{normalize}{(string): "type" or "none". Default is 'type'.} \item{maxiters}{(int): maximum number of allowable iterations. Default is 50.} \item{thresh}{(float): threshold for L1 norm convergence. Default is 0.001.} \item{property_types}{(vector): list of possible property types. Default is c("go_curated_evidence", "go_inferred_evidence", "pfam_domain").} \item{writepreds}{(boolean): write predictions out to a file. Default is FALSE} } \description{ This function runs the DRaWR two stage random walk with restart method. } \examples{ DRaWR(possetfile = "data/sample_inputs/test.setlist", unifile = "data/sample_inputs/test.uni", networkfile = "data/sample_inputs/test.edge", outdir = "data/sample_outputs/results_", restarts = c(.7), nfolds = 1, st2keep = 1, undirected = TRUE, unweighted = FALSE, normalize = "type", maxiters = 50, thresh = 0.0001, property_types = c("T1", "T2"), writepreds = 0) } \keyword{random} \keyword{restart} \keyword{walk} \keyword{with}
context("Projects") with_mock_HTTP({ projects <- session()$projects test_that("Getting projects catalog", { expect_is(projects, "ProjectCatalog") expect_length(projects, 2) expect_identical(names(projects), c("Project One", "Project Two")) }) aproject <- projects[["Project One"]] test_that("Getting project from catalog", { expect_is(projects[[1]], "CrunchProject") expect_is(projects$`Project One`, "CrunchProject") expect_is(projects[["Project One"]], "CrunchProject") expect_null(projects$`Beta Project`) }) test_that("Project attributes", { expect_identical(name(aproject), "Project One") }) test_that("Simple project creation by assignment", { expect_error(projects[["A new project"]] <- list(), 'POST /api/projects.json {"name":"A new project"}', fixed=TRUE) expect_error(projects$`A new project` <- list(), 'POST /api/projects.json {"name":"A new project"}', fixed=TRUE) }) test_that("Project editing", { expect_error(names(projects)[2] <- "New name", paste('PATCH /api/projects.json', '{"/api/projects/project2.json":{"name":"New name"}}'), fixed=TRUE) expect_error(name(projects[[2]]) <- "New name", paste('PATCH /api/projects.json', '{"/api/projects/project2.json":{"name":"New name"}}'), fixed=TRUE) }) test_that("Project deletion", { expect_error(delete(projects[[1]], confirm=TRUE), "Must confirm deleting project") with(consent(), expect_error(delete(projects[[1]], confirm=TRUE), "DELETE /api/projects/project1.json")) }) m <- members(aproject) test_that("Project members catalog", { expect_is(m, "MemberCatalog") expect_identical(names(m), c("Fake User", "Roger User")) expect_identical(emails(m), c("fake.user@example.com", "roger.user@example.com")) }) test_that("Add members by members<-", { expect_error(members(aproject) <- c("new.user@crunch.io", "foo@example.co"), 'PATCH /api/projects/project1/members.json {"new.user@crunch.io":{},"foo@example.co":{}}', fixed=TRUE) }) test_that("Add members doesn't re-add if already a member", { expect_error(members(aproject) <- c("new.user@crunch.io", "roger.user@example.com"), 'PATCH /api/projects/project1/members.json {"new.user@crunch.io":{}}', fixed=TRUE) }) test_that("Remove members by <- NULL", { expect_error(members(aproject)[["roger.user@example.com"]] <- NULL, 'PATCH /api/projects/project1/members.json {"roger.user@example.com":null}', fixed=TRUE) }) test_that("is.editor on member catalog", { expect_identical(is.editor(m), c(TRUE, FALSE)) }) test_that("is.editor<- on member catalog", { expect_error(is.editor(m) <- c(TRUE, TRUE), paste('PATCH /api/projects/project1/members.json', '{"/api/users/user2.json":{"permissions":{"edit":true}}}'), fixed=TRUE) expect_error(is.editor(m[2]) <- TRUE, paste('PATCH /api/projects/project1/members.json', '{"/api/users/user2.json":{"permissions":{"edit":true}}}'), fixed=TRUE) expect_error(is.editor(m[2]) <- FALSE, NA) ## No change, so no PATCH request made }) test_that("Print method for MemberCatalog", { expect_output(m, paste(capture.output(print(data.frame( name=c("Fake User", "Roger User"), email=c("fake.user@example.com", "roger.user@example.com"), is.editor=c(TRUE, FALSE) ))), collapse="\n") ) }) d <- datasets(aproject) test_that("Project datasets catalog", { expect_is(d, "DatasetCatalog") expect_identical(names(d), "ECON.sav") }) do <- ordering(d) test_that("Project datasets order", { expect_is(do, "DatasetOrder") expect_identical(do@graph, list(DatasetGroup("Group 1", "/api/datasets/dataset3.json"))) }) test_that("Add datasets to project by <- a dataset (which transfers ownership)", { ds <- loadDataset("test ds") expect_error(datasets(aproject) <- ds, 'PATCH /api/datasets/dataset1.json {"owner":"/api/projects/project1.json"}', fixed=TRUE) }) test_that("Organize datasets", { expect_identical(DatasetOrder(DatasetGroup("new group", datasets(aproject))), DatasetOrder(DatasetGroup("new group", "/api/datasets/dataset3.json"))) expect_error(ordering(datasets(aproject)) <- DatasetOrder(DatasetGroup("new group", datasets(aproject))), 'PUT /api/projects/project1/datasets/order.json {"graph":[{"new group":["/api/datasets/dataset3.json"]}]}', fixed=TRUE) nested.ord <- DatasetOrder("/api/datasets/dataset3.json", DatasetGroup("new group", list(DatasetGroup("nested", "/api/datasets/dataset3.json")))) expect_error(ordering(datasets(aproject)) <- nested.ord, 'PUT /api/projects/project1/datasets/order.json {"graph":["/api/datasets/dataset3.json",{"new group":[{"nested":["/api/datasets/dataset3.json"]}]}]}', fixed=TRUE) }) }) if (run.integration.tests) { with_test_authentication({ projects <- session()$projects ucat <- getAccountUserCatalog() my.name <- names(ucat)[urls(ucat) == userURL()] my.email <- emails(ucat)[urls(ucat) == userURL()] nprojects.0 <- length(projects) test_that("Can get project catalog", { expect_is(projects, "ProjectCatalog") }) name.of.project1 <- now() test_that("Can create a project", { expect_false(name.of.project1 %in% names(projects)) projects[[name.of.project1]] <- list() expect_true(name.of.project1 %in% names(projects)) expect_true(length(projects) == nprojects.0 + 1L) expect_is(projects[[name.of.project1]], "CrunchProject") expect_length(members(projects[[name.of.project1]]), 1) expect_identical(names(members(projects[[name.of.project1]])), my.name) }) projects <- refresh(projects) pj <- projects[[name.of.project1]] p_url <- self(pj) name2 <- paste(name.of.project1, "revised") test_that("Can rename a project by name<-", { expect_identical(self(projects[[name.of.project1]]), p_url) expect_null(projects[[name2]]) name(projects[[name.of.project1]]) <- name2 expect_identical(projects[[name.of.project1]], NULL) expect_identical(self(projects[[name2]]), p_url) }) name3 <- paste(name2, "FINAL") test_that("Can rename a project with names<-", { expect_false(name3 %in% names(projects)) names(projects)[urls(projects) == p_url] <- name3 expect_true(name3 %in% names(projects)) expect_identical(self(projects[[name3]]), p_url) }) test_that("Get and set project icon", { ico <- icon(pj) # expect_true(nchar(ico) > 0) ## Unskip after #119305641 ships icon(pj) <- "empty.png" expect_false(icon(pj) == "empty.png") expect_true(endsWith(icon(pj), ".png")) expect_false(identical(icon(pj), ico)) }) test_that("Can delete a project by URL", { projects <- refresh(projects) expect_true(p_url %in% urls(projects)) try(crDELETE(p_url)) expect_false(p_url %in% urls(refresh(projects))) }) test_that("Can create a project with members", { skip("TODO") skip_on_jenkins("Jenkins user needs more permissions") projects <- refresh(projects) nprojects.2 <- length(projects) name.of.project2 <- now() expect_false(name.of.project2 %in% names(projects)) with(cleanup(testUser()), as="u", { projects[[name.of.project2]] <- list(members=email(u)) expect_true(name.of.project2 %in% names(projects)) with(cleanup(projects[[name.of.project2]]), as="tp", { expect_true(length(projects) == nprojects.2 + 1L) expect_true(setequal(names(members(tp)), c(name(u), my.name))) }) }) }) test_that("Can add members to a project (and then set as an editor)", { skip_on_jenkins("Jenkins user needs more permissions") with(cleanup(testProject()), as="tp", { with(cleanup(testUser()), as="u", { expect_identical(names(members(tp)), my.name) members(tp) <- email(u) expect_true(setequal(names(members(tp)), c(name(u), my.name))) expect_identical(is.editor(members(tp)), c(TRUE, FALSE)) is.editor(members(tp)[email(u)]) <- TRUE expect_identical(is.editor(members(tp)), c(TRUE, TRUE)) }) }) }) test_that("Can remove members from a project", { skip_on_jenkins("Jenkins user needs more permissions") with(cleanup(testProject()), as="tp", { with(cleanup(testUser()), as="u", { expect_identical(names(members(tp)), my.name) members(tp) <- email(u) expect_true(setequal(names(members(tp)), c(name(u), my.name))) try(members(tp)[[email(u)]] <- NULL) expect_identical(names(members(tp)), my.name) }) }) }) with(test.dataset(), { with(cleanup(testProject()), as="tp", { test_that("Can add datasets to project", { expect_is(tp, "CrunchProject") expect_length(datasets(tp), 0) datasets(tp) <- ds expect_identical(names(datasets(tp)), name(ds)) expect_identical(owner(refresh(ds)), self(tp)) }) ds2 <- loadDataset(datasets(tp)[[1]]) test_that("Can load a dataset from a project", { expect_true(is.dataset(ds2)) expect_identical(self(ds2), self(ds)) }) test_that("Can organize datasets", { expect_identical(as.list(urls(datasets(tp))), entities(ordering(datasets(tp)))) ordering(datasets(tp)) <- DatasetOrder(DatasetGroup("A group of one", list(ds))) expect_identical(ordering(datasets(tp))@graph[[1]], DatasetGroup(name="A group of one", entities=self(ds))) }) with(test.dataset(), as="ds3", { ord2 <- DatasetOrder(DatasetGroup("A group of two", c(self(ds), self(ds3)))) test_that("Have to add dataset to project before organizing it", { expect_error(ordering(datasets(tp)) <- ord2, "Dataset URL referenced in Order not present in catalog") expect_identical(ordering(datasets(tp))@graph[[1]], DatasetGroup(name="A group of one", entities=self(ds))) }) owner(ds3) <- tp tp <- refresh(tp) test_that("Can reorganize datasets", { ordering(datasets(tp)) <- ord2 expect_identical(ordering(datasets(tp))@graph[[1]], DatasetGroup(name="A group of two", entities=c(self(ds), self(ds3)))) expect_output(ordering(datasets(tp)), paste("[+] A group of two", paste0(" ", name(ds)), paste0(" ", name(ds3)), sep="\n"), fixed=TRUE) }) ord3 <- DatasetOrder(DatasetGroup("G1", self(ds3)), DatasetGroup("G2", self(ds))) ord3.list <- list(DatasetGroup("G1", self(ds3)), DatasetGroup("G2", self(ds))) ord3.alt <- DatasetOrder( DatasetGroup("G1", datasets(tp)[names(datasets(tp)) == name(ds3)]), DatasetGroup("G2", datasets(tp)[names(datasets(tp)) == name(ds)])) test_that("Can re-reorganize", { expect_identical(ord3, ord3.alt) ordering(datasets(tp)) <- ord3 expect_identical(ordering(datasets(tp))@graph, ord3.list) expect_identical(ordering(datasets(refresh(tp)))@graph, ord3.list) }) test_that("Can create a Group by assigning by name", { ordering(datasets(tp))[["New group three"]] <- self(ds) expect_output(ordering(datasets(tp)), paste("[+] G1", paste0(" ", name(ds3)), "[+] G2", " (Empty group)", "[+] New group three", paste0(" ", name(ds)), sep="\n"), fixed=TRUE) }) }) test_that("Can rename a dataset in a project", { newname <- paste(name(ds2), "edited") name(ds2) <- newname expect_identical(name(ds2), newname) expect_identical(name(refresh(ds2)), newname) expect_identical(name(datasets(refresh(tp))[[1]]), newname) }) test_that("Can privatize a dataset belonging to a project", { expect_identical(owner(ds2), self(tp)) owner(ds2) <- me() expect_identical(owner(ds2), self(me())) }) }) }) }) }
/tests/testthat/test-projects.R
no_license
digideskio/rcrunch
R
false
false
14,927
r
context("Projects") with_mock_HTTP({ projects <- session()$projects test_that("Getting projects catalog", { expect_is(projects, "ProjectCatalog") expect_length(projects, 2) expect_identical(names(projects), c("Project One", "Project Two")) }) aproject <- projects[["Project One"]] test_that("Getting project from catalog", { expect_is(projects[[1]], "CrunchProject") expect_is(projects$`Project One`, "CrunchProject") expect_is(projects[["Project One"]], "CrunchProject") expect_null(projects$`Beta Project`) }) test_that("Project attributes", { expect_identical(name(aproject), "Project One") }) test_that("Simple project creation by assignment", { expect_error(projects[["A new project"]] <- list(), 'POST /api/projects.json {"name":"A new project"}', fixed=TRUE) expect_error(projects$`A new project` <- list(), 'POST /api/projects.json {"name":"A new project"}', fixed=TRUE) }) test_that("Project editing", { expect_error(names(projects)[2] <- "New name", paste('PATCH /api/projects.json', '{"/api/projects/project2.json":{"name":"New name"}}'), fixed=TRUE) expect_error(name(projects[[2]]) <- "New name", paste('PATCH /api/projects.json', '{"/api/projects/project2.json":{"name":"New name"}}'), fixed=TRUE) }) test_that("Project deletion", { expect_error(delete(projects[[1]], confirm=TRUE), "Must confirm deleting project") with(consent(), expect_error(delete(projects[[1]], confirm=TRUE), "DELETE /api/projects/project1.json")) }) m <- members(aproject) test_that("Project members catalog", { expect_is(m, "MemberCatalog") expect_identical(names(m), c("Fake User", "Roger User")) expect_identical(emails(m), c("fake.user@example.com", "roger.user@example.com")) }) test_that("Add members by members<-", { expect_error(members(aproject) <- c("new.user@crunch.io", "foo@example.co"), 'PATCH /api/projects/project1/members.json {"new.user@crunch.io":{},"foo@example.co":{}}', fixed=TRUE) }) test_that("Add members doesn't re-add if already a member", { expect_error(members(aproject) <- c("new.user@crunch.io", "roger.user@example.com"), 'PATCH /api/projects/project1/members.json {"new.user@crunch.io":{}}', fixed=TRUE) }) test_that("Remove members by <- NULL", { expect_error(members(aproject)[["roger.user@example.com"]] <- NULL, 'PATCH /api/projects/project1/members.json {"roger.user@example.com":null}', fixed=TRUE) }) test_that("is.editor on member catalog", { expect_identical(is.editor(m), c(TRUE, FALSE)) }) test_that("is.editor<- on member catalog", { expect_error(is.editor(m) <- c(TRUE, TRUE), paste('PATCH /api/projects/project1/members.json', '{"/api/users/user2.json":{"permissions":{"edit":true}}}'), fixed=TRUE) expect_error(is.editor(m[2]) <- TRUE, paste('PATCH /api/projects/project1/members.json', '{"/api/users/user2.json":{"permissions":{"edit":true}}}'), fixed=TRUE) expect_error(is.editor(m[2]) <- FALSE, NA) ## No change, so no PATCH request made }) test_that("Print method for MemberCatalog", { expect_output(m, paste(capture.output(print(data.frame( name=c("Fake User", "Roger User"), email=c("fake.user@example.com", "roger.user@example.com"), is.editor=c(TRUE, FALSE) ))), collapse="\n") ) }) d <- datasets(aproject) test_that("Project datasets catalog", { expect_is(d, "DatasetCatalog") expect_identical(names(d), "ECON.sav") }) do <- ordering(d) test_that("Project datasets order", { expect_is(do, "DatasetOrder") expect_identical(do@graph, list(DatasetGroup("Group 1", "/api/datasets/dataset3.json"))) }) test_that("Add datasets to project by <- a dataset (which transfers ownership)", { ds <- loadDataset("test ds") expect_error(datasets(aproject) <- ds, 'PATCH /api/datasets/dataset1.json {"owner":"/api/projects/project1.json"}', fixed=TRUE) }) test_that("Organize datasets", { expect_identical(DatasetOrder(DatasetGroup("new group", datasets(aproject))), DatasetOrder(DatasetGroup("new group", "/api/datasets/dataset3.json"))) expect_error(ordering(datasets(aproject)) <- DatasetOrder(DatasetGroup("new group", datasets(aproject))), 'PUT /api/projects/project1/datasets/order.json {"graph":[{"new group":["/api/datasets/dataset3.json"]}]}', fixed=TRUE) nested.ord <- DatasetOrder("/api/datasets/dataset3.json", DatasetGroup("new group", list(DatasetGroup("nested", "/api/datasets/dataset3.json")))) expect_error(ordering(datasets(aproject)) <- nested.ord, 'PUT /api/projects/project1/datasets/order.json {"graph":["/api/datasets/dataset3.json",{"new group":[{"nested":["/api/datasets/dataset3.json"]}]}]}', fixed=TRUE) }) }) if (run.integration.tests) { with_test_authentication({ projects <- session()$projects ucat <- getAccountUserCatalog() my.name <- names(ucat)[urls(ucat) == userURL()] my.email <- emails(ucat)[urls(ucat) == userURL()] nprojects.0 <- length(projects) test_that("Can get project catalog", { expect_is(projects, "ProjectCatalog") }) name.of.project1 <- now() test_that("Can create a project", { expect_false(name.of.project1 %in% names(projects)) projects[[name.of.project1]] <- list() expect_true(name.of.project1 %in% names(projects)) expect_true(length(projects) == nprojects.0 + 1L) expect_is(projects[[name.of.project1]], "CrunchProject") expect_length(members(projects[[name.of.project1]]), 1) expect_identical(names(members(projects[[name.of.project1]])), my.name) }) projects <- refresh(projects) pj <- projects[[name.of.project1]] p_url <- self(pj) name2 <- paste(name.of.project1, "revised") test_that("Can rename a project by name<-", { expect_identical(self(projects[[name.of.project1]]), p_url) expect_null(projects[[name2]]) name(projects[[name.of.project1]]) <- name2 expect_identical(projects[[name.of.project1]], NULL) expect_identical(self(projects[[name2]]), p_url) }) name3 <- paste(name2, "FINAL") test_that("Can rename a project with names<-", { expect_false(name3 %in% names(projects)) names(projects)[urls(projects) == p_url] <- name3 expect_true(name3 %in% names(projects)) expect_identical(self(projects[[name3]]), p_url) }) test_that("Get and set project icon", { ico <- icon(pj) # expect_true(nchar(ico) > 0) ## Unskip after #119305641 ships icon(pj) <- "empty.png" expect_false(icon(pj) == "empty.png") expect_true(endsWith(icon(pj), ".png")) expect_false(identical(icon(pj), ico)) }) test_that("Can delete a project by URL", { projects <- refresh(projects) expect_true(p_url %in% urls(projects)) try(crDELETE(p_url)) expect_false(p_url %in% urls(refresh(projects))) }) test_that("Can create a project with members", { skip("TODO") skip_on_jenkins("Jenkins user needs more permissions") projects <- refresh(projects) nprojects.2 <- length(projects) name.of.project2 <- now() expect_false(name.of.project2 %in% names(projects)) with(cleanup(testUser()), as="u", { projects[[name.of.project2]] <- list(members=email(u)) expect_true(name.of.project2 %in% names(projects)) with(cleanup(projects[[name.of.project2]]), as="tp", { expect_true(length(projects) == nprojects.2 + 1L) expect_true(setequal(names(members(tp)), c(name(u), my.name))) }) }) }) test_that("Can add members to a project (and then set as an editor)", { skip_on_jenkins("Jenkins user needs more permissions") with(cleanup(testProject()), as="tp", { with(cleanup(testUser()), as="u", { expect_identical(names(members(tp)), my.name) members(tp) <- email(u) expect_true(setequal(names(members(tp)), c(name(u), my.name))) expect_identical(is.editor(members(tp)), c(TRUE, FALSE)) is.editor(members(tp)[email(u)]) <- TRUE expect_identical(is.editor(members(tp)), c(TRUE, TRUE)) }) }) }) test_that("Can remove members from a project", { skip_on_jenkins("Jenkins user needs more permissions") with(cleanup(testProject()), as="tp", { with(cleanup(testUser()), as="u", { expect_identical(names(members(tp)), my.name) members(tp) <- email(u) expect_true(setequal(names(members(tp)), c(name(u), my.name))) try(members(tp)[[email(u)]] <- NULL) expect_identical(names(members(tp)), my.name) }) }) }) with(test.dataset(), { with(cleanup(testProject()), as="tp", { test_that("Can add datasets to project", { expect_is(tp, "CrunchProject") expect_length(datasets(tp), 0) datasets(tp) <- ds expect_identical(names(datasets(tp)), name(ds)) expect_identical(owner(refresh(ds)), self(tp)) }) ds2 <- loadDataset(datasets(tp)[[1]]) test_that("Can load a dataset from a project", { expect_true(is.dataset(ds2)) expect_identical(self(ds2), self(ds)) }) test_that("Can organize datasets", { expect_identical(as.list(urls(datasets(tp))), entities(ordering(datasets(tp)))) ordering(datasets(tp)) <- DatasetOrder(DatasetGroup("A group of one", list(ds))) expect_identical(ordering(datasets(tp))@graph[[1]], DatasetGroup(name="A group of one", entities=self(ds))) }) with(test.dataset(), as="ds3", { ord2 <- DatasetOrder(DatasetGroup("A group of two", c(self(ds), self(ds3)))) test_that("Have to add dataset to project before organizing it", { expect_error(ordering(datasets(tp)) <- ord2, "Dataset URL referenced in Order not present in catalog") expect_identical(ordering(datasets(tp))@graph[[1]], DatasetGroup(name="A group of one", entities=self(ds))) }) owner(ds3) <- tp tp <- refresh(tp) test_that("Can reorganize datasets", { ordering(datasets(tp)) <- ord2 expect_identical(ordering(datasets(tp))@graph[[1]], DatasetGroup(name="A group of two", entities=c(self(ds), self(ds3)))) expect_output(ordering(datasets(tp)), paste("[+] A group of two", paste0(" ", name(ds)), paste0(" ", name(ds3)), sep="\n"), fixed=TRUE) }) ord3 <- DatasetOrder(DatasetGroup("G1", self(ds3)), DatasetGroup("G2", self(ds))) ord3.list <- list(DatasetGroup("G1", self(ds3)), DatasetGroup("G2", self(ds))) ord3.alt <- DatasetOrder( DatasetGroup("G1", datasets(tp)[names(datasets(tp)) == name(ds3)]), DatasetGroup("G2", datasets(tp)[names(datasets(tp)) == name(ds)])) test_that("Can re-reorganize", { expect_identical(ord3, ord3.alt) ordering(datasets(tp)) <- ord3 expect_identical(ordering(datasets(tp))@graph, ord3.list) expect_identical(ordering(datasets(refresh(tp)))@graph, ord3.list) }) test_that("Can create a Group by assigning by name", { ordering(datasets(tp))[["New group three"]] <- self(ds) expect_output(ordering(datasets(tp)), paste("[+] G1", paste0(" ", name(ds3)), "[+] G2", " (Empty group)", "[+] New group three", paste0(" ", name(ds)), sep="\n"), fixed=TRUE) }) }) test_that("Can rename a dataset in a project", { newname <- paste(name(ds2), "edited") name(ds2) <- newname expect_identical(name(ds2), newname) expect_identical(name(refresh(ds2)), newname) expect_identical(name(datasets(refresh(tp))[[1]]), newname) }) test_that("Can privatize a dataset belonging to a project", { expect_identical(owner(ds2), self(tp)) owner(ds2) <- me() expect_identical(owner(ds2), self(me())) }) }) }) }) }
library(mclust) library(lubridate) library(dplyr) library(mvtnorm) library(mixtools) # Thanks to Chad Young for useful mclust discussions ############# # UNIVARIATE ############ # read in our iPhone daily step counts raw <- read.csv("datasets/iphone_health/stepsData.csv") # day-level data steps <- raw %>% mutate(startDate = factor(as.Date(startDate))) %>% dplyr::select(date = startDate, stepsWalked) %>% group_by(date) %>% summarize(stepsWalked = sum(stepsWalked)) ## mixtools # no mu supplied, data is binned, then K is picked mixauto <- normalmixEM(steps$stepsWalked) plot(mixauto, density=TRUE) # K is supplied mix4 <- normalmixEM(steps$stepsWalked, k=4) plot(mix4, density=TRUE) ## mclust # do density estimation, let mclust pick our distribution count for us dens <- densityMclust(steps$stepsWalked) # look at dianogstics plot(dens, what = "BIC") summary(dens$BIC) summary(dens, parameters = TRUE) plot(dens, what = "density", data=steps$stepsWalked) # extract parameters of the Gaussians params <- dens$parameters nDistros <- length(params$pro) distros <- data.frame(matrix(0,4,4)) colnames(distros) <- c("id","n", "mean","sd") distros$id <- 1:4 # re-sample the Gaussians N <- nrow(steps) for (i in 1:nDistros){ distros[distros$id==i,]$n <- floor(N * params$pro[i]) distros[distros$id==i,]$mean <- params$mean[i] distros[distros$id==i,]$sd <- floor(sqrt(params$variance$sigmasq[i])) } set.seed(1) densities <- apply(distros[, -1], 1, function(x) rnorm(n = x[1], mean = x[2], sd = x[2])) # plot out the estimated densities plot(0, type = "n", xlim = c(min(unlist(lapply(densities, min))),max(unlist(lapply(densities, max)))), ylim = c(0, 1.5e-4)) for (d in 1:length(densities)){ lines(density(densities[[d]]), lty = d) } legend("topright", legend=1:length(densities), lty=1:length(densities)) ############## # MULTIVARIATE ############## # probably not a great idea since running and biking are temporally independent... biking <- read.csv("cyclingData.csv", header=T) biking <- biking %>% group_by(startDate) %>% summarize(kcalRun = sum(kcalBurned),milesCycled = sum(milesCycled)) %>% mutate(startDate = as.Date(startDate)) %>% dplyr::filter(milesCycled < 20) %>% dplyr::select(c(startDate,milesCycled)) health <- merge(biking, steps, by="startDate", all=TRUE) health[is.na(health$milesCycled),]$milesCycled <- 0 health <- dplyr::select(health, c(milesCycled, stepsWalked)) mdens <- densityMclust(health, classification=TRUE) # retrieve model summary: model type, log-likelihood, BIC, etc. summary(mdens) # cluster membership mdens$classification # plot the multivariate density plot(mdens, what="density", type="persp") # retrieve the covariance matrix covMat <- cov(health) # resample rmvnorm(n = nrow(health), mean=mdens$parameters$mean, sigma=mdens$parameters$variance$sigma[,,8])
/modeling_gaussian_mixtures.R
no_license
ptvan/R-snippets
R
false
false
2,843
r
library(mclust) library(lubridate) library(dplyr) library(mvtnorm) library(mixtools) # Thanks to Chad Young for useful mclust discussions ############# # UNIVARIATE ############ # read in our iPhone daily step counts raw <- read.csv("datasets/iphone_health/stepsData.csv") # day-level data steps <- raw %>% mutate(startDate = factor(as.Date(startDate))) %>% dplyr::select(date = startDate, stepsWalked) %>% group_by(date) %>% summarize(stepsWalked = sum(stepsWalked)) ## mixtools # no mu supplied, data is binned, then K is picked mixauto <- normalmixEM(steps$stepsWalked) plot(mixauto, density=TRUE) # K is supplied mix4 <- normalmixEM(steps$stepsWalked, k=4) plot(mix4, density=TRUE) ## mclust # do density estimation, let mclust pick our distribution count for us dens <- densityMclust(steps$stepsWalked) # look at dianogstics plot(dens, what = "BIC") summary(dens$BIC) summary(dens, parameters = TRUE) plot(dens, what = "density", data=steps$stepsWalked) # extract parameters of the Gaussians params <- dens$parameters nDistros <- length(params$pro) distros <- data.frame(matrix(0,4,4)) colnames(distros) <- c("id","n", "mean","sd") distros$id <- 1:4 # re-sample the Gaussians N <- nrow(steps) for (i in 1:nDistros){ distros[distros$id==i,]$n <- floor(N * params$pro[i]) distros[distros$id==i,]$mean <- params$mean[i] distros[distros$id==i,]$sd <- floor(sqrt(params$variance$sigmasq[i])) } set.seed(1) densities <- apply(distros[, -1], 1, function(x) rnorm(n = x[1], mean = x[2], sd = x[2])) # plot out the estimated densities plot(0, type = "n", xlim = c(min(unlist(lapply(densities, min))),max(unlist(lapply(densities, max)))), ylim = c(0, 1.5e-4)) for (d in 1:length(densities)){ lines(density(densities[[d]]), lty = d) } legend("topright", legend=1:length(densities), lty=1:length(densities)) ############## # MULTIVARIATE ############## # probably not a great idea since running and biking are temporally independent... biking <- read.csv("cyclingData.csv", header=T) biking <- biking %>% group_by(startDate) %>% summarize(kcalRun = sum(kcalBurned),milesCycled = sum(milesCycled)) %>% mutate(startDate = as.Date(startDate)) %>% dplyr::filter(milesCycled < 20) %>% dplyr::select(c(startDate,milesCycled)) health <- merge(biking, steps, by="startDate", all=TRUE) health[is.na(health$milesCycled),]$milesCycled <- 0 health <- dplyr::select(health, c(milesCycled, stepsWalked)) mdens <- densityMclust(health, classification=TRUE) # retrieve model summary: model type, log-likelihood, BIC, etc. summary(mdens) # cluster membership mdens$classification # plot the multivariate density plot(mdens, what="density", type="persp") # retrieve the covariance matrix covMat <- cov(health) # resample rmvnorm(n = nrow(health), mean=mdens$parameters$mean, sigma=mdens$parameters$variance$sigma[,,8])
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simmr.R \name{simmr} \alias{simmr} \alias{simmr-package} \title{simmr: A package for fitting stable isotope mixing models via JAGS in R} \description{ This package runs a simple Stable Isotope Mixing Model (SIMM) and is meant as a longer term replacement to the previous function SIAR.. These are used to infer dietary proportions of organisms consuming various food sources from observations on the stable isotope values taken from the organisms' tissue samples. However SIMMs can also be used in other scenarios, such as in sediment mixing or the composition of fatty acids. The main functions are \code{\link{simmr_load}} and \code{\link{simmr_mcmc}}. The help files contain examples of the use of this package. See also the vignette for a longer walkthrough. } \details{ An even longer term replacement for properly running SIMMs is MixSIAR, which allows for more detailed random effects and the inclusion of covariates. } \examples{ \dontrun{ # A first example with 2 tracers (isotopes), 10 observations, and 4 food sources data(geese_data_day1) simmr_in <- with( geese_data_day1, simmr_load( mixtures = mixtures, source_names = source_names, source_means = source_means, source_sds = source_sds, correction_means = correction_means, correction_sds = correction_sds, concentration_means = concentration_means ) ) # Plot plot(simmr_in) # MCMC run simmr_out <- simmr_mcmc(simmr_in) # Check convergence - values should all be close to 1 summary(simmr_out, type = "diagnostics") # Look at output summary(simmr_out, type = "statistics") # Look at influence of priors prior_viz(simmr_out) # Plot output plot(simmr_out, type = "histogram") } } \references{ Andrew C. Parnell, Donald L. Phillips, Stuart Bearhop, Brice X. Semmens, Eric J. Ward, Jonathan W. Moore, Andrew L. Jackson, Jonathan Grey, David J. Kelly, and Richard Inger. Bayesian stable isotope mixing models. Environmetrics, 24(6):387–399, 2013. Andrew C Parnell, Richard Inger, Stuart Bearhop, and Andrew L Jackson. Source partitioning using stable isotopes: coping with too much variation. PLoS ONE, 5(3):5, 2010. } \author{ Andrew Parnell <andrew.parnell@mu.ie> } \keyword{multivariate}
/man/simmr.Rd
no_license
andrewcparnell/simmr
R
false
true
2,271
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simmr.R \name{simmr} \alias{simmr} \alias{simmr-package} \title{simmr: A package for fitting stable isotope mixing models via JAGS in R} \description{ This package runs a simple Stable Isotope Mixing Model (SIMM) and is meant as a longer term replacement to the previous function SIAR.. These are used to infer dietary proportions of organisms consuming various food sources from observations on the stable isotope values taken from the organisms' tissue samples. However SIMMs can also be used in other scenarios, such as in sediment mixing or the composition of fatty acids. The main functions are \code{\link{simmr_load}} and \code{\link{simmr_mcmc}}. The help files contain examples of the use of this package. See also the vignette for a longer walkthrough. } \details{ An even longer term replacement for properly running SIMMs is MixSIAR, which allows for more detailed random effects and the inclusion of covariates. } \examples{ \dontrun{ # A first example with 2 tracers (isotopes), 10 observations, and 4 food sources data(geese_data_day1) simmr_in <- with( geese_data_day1, simmr_load( mixtures = mixtures, source_names = source_names, source_means = source_means, source_sds = source_sds, correction_means = correction_means, correction_sds = correction_sds, concentration_means = concentration_means ) ) # Plot plot(simmr_in) # MCMC run simmr_out <- simmr_mcmc(simmr_in) # Check convergence - values should all be close to 1 summary(simmr_out, type = "diagnostics") # Look at output summary(simmr_out, type = "statistics") # Look at influence of priors prior_viz(simmr_out) # Plot output plot(simmr_out, type = "histogram") } } \references{ Andrew C. Parnell, Donald L. Phillips, Stuart Bearhop, Brice X. Semmens, Eric J. Ward, Jonathan W. Moore, Andrew L. Jackson, Jonathan Grey, David J. Kelly, and Richard Inger. Bayesian stable isotope mixing models. Environmetrics, 24(6):387–399, 2013. Andrew C Parnell, Richard Inger, Stuart Bearhop, and Andrew L Jackson. Source partitioning using stable isotopes: coping with too much variation. PLoS ONE, 5(3):5, 2010. } \author{ Andrew Parnell <andrew.parnell@mu.ie> } \keyword{multivariate}
library(devtools) options(repos = c("CRAN" = "http://cran.us.r-project.org")) options(Ncpus = parallel::detectCores()) # Set download method, to avoid the default behavior of using # R's internal HTTP implementation, which doesn't support HTTPS connections. # https://stackoverflow.com/questions/45061272/r-and-ssl-curl-on-ubuntu-linux-failed-ssl-connect-in-r-but-works-in-curl options(download.file.method = "libcurl") # Install the lightGBM installer package install_github("Laurae2/lgbdl") lgbdl::lgb.dl(compiler = "gcc", commit = "tags/v2.3.1") install_github("dgrtwo/widyr") install_github("ellisp/forecastxgb-r-package/pkg") install_github("rstudio/leaflet") # install_github fails for catboost. # Following direct installation instructions instead: https://tech.yandex.com/catboost/doc/dg/installation/r-installation-binary-installation-docpage/ install_url('https://github.com/catboost/catboost/releases/download/v0.23.2/catboost-R-Linux-0.23.2.tgz', INSTALL_opts = c("--no-multiarch")) install_github("sassalley/hexmapr") install_github("hadley/multidplyr") install_github("dselivanov/LSHR") # install latest sparklyr and Spark (for local mode) install_github("rstudio/sparklyr") sparklyr::spark_install() install.packages("genderdata", repos = "http://packages.ropensci.org") install.packages("openNLPmodels.en", repos = "http://datacube.wu.ac.at/", type = "source") install_github("davpinto/fastknn") install_github("mukul13/rword2vec") # These signal processing libraries are on CRAN, but they require apt-get dependences that are # handled in this image's Dockerfile. install.packages("fftw") # https://github.com/Kaggle/docker-rstats/issues/74 install_github("thomasp85/patchwork") # https://github.com/Kaggle/docker-rstats/issues/73 install.packages("topicmodels") install.packages("tesseract") # Try to reinstall igraph and imager her until fixed in rcran. install.packages("igraph") install.packages("imager") # Torch: install the full package upfront otherwise it will be installed on loading the package which doesn't work for kernels # without internet (competitions for example). library(torch) install_torch(reinstall = TRUE) # The R Keras package must be reinstalled after installing it in the python virtualenv. install_version("keras", version = "2.6.0", ask=FALSE)
/package_installs.R
permissive
carlsvelz/docker-rstats
R
false
false
2,338
r
library(devtools) options(repos = c("CRAN" = "http://cran.us.r-project.org")) options(Ncpus = parallel::detectCores()) # Set download method, to avoid the default behavior of using # R's internal HTTP implementation, which doesn't support HTTPS connections. # https://stackoverflow.com/questions/45061272/r-and-ssl-curl-on-ubuntu-linux-failed-ssl-connect-in-r-but-works-in-curl options(download.file.method = "libcurl") # Install the lightGBM installer package install_github("Laurae2/lgbdl") lgbdl::lgb.dl(compiler = "gcc", commit = "tags/v2.3.1") install_github("dgrtwo/widyr") install_github("ellisp/forecastxgb-r-package/pkg") install_github("rstudio/leaflet") # install_github fails for catboost. # Following direct installation instructions instead: https://tech.yandex.com/catboost/doc/dg/installation/r-installation-binary-installation-docpage/ install_url('https://github.com/catboost/catboost/releases/download/v0.23.2/catboost-R-Linux-0.23.2.tgz', INSTALL_opts = c("--no-multiarch")) install_github("sassalley/hexmapr") install_github("hadley/multidplyr") install_github("dselivanov/LSHR") # install latest sparklyr and Spark (for local mode) install_github("rstudio/sparklyr") sparklyr::spark_install() install.packages("genderdata", repos = "http://packages.ropensci.org") install.packages("openNLPmodels.en", repos = "http://datacube.wu.ac.at/", type = "source") install_github("davpinto/fastknn") install_github("mukul13/rword2vec") # These signal processing libraries are on CRAN, but they require apt-get dependences that are # handled in this image's Dockerfile. install.packages("fftw") # https://github.com/Kaggle/docker-rstats/issues/74 install_github("thomasp85/patchwork") # https://github.com/Kaggle/docker-rstats/issues/73 install.packages("topicmodels") install.packages("tesseract") # Try to reinstall igraph and imager her until fixed in rcran. install.packages("igraph") install.packages("imager") # Torch: install the full package upfront otherwise it will be installed on loading the package which doesn't work for kernels # without internet (competitions for example). library(torch) install_torch(reinstall = TRUE) # The R Keras package must be reinstalled after installing it in the python virtualenv. install_version("keras", version = "2.6.0", ask=FALSE)
compileResults.OUTRIDER <- function(object, padjCutoff=0.05, zScoreCutoff=0, round=2, all=FALSE, ...){ # # input check and parsing # checkOutriderDataSet(object) checkFullAnalysis(object) if(is.null(rownames(object))){ rownames(object) <- paste('feature', seq_len(nrow(object)), sep='_') } if(is.null(colnames(object))){ colnames(object) <- paste('sample', seq_len(ncol(object)), sep='_') } if(isTRUE(round)){ round <- 2 } if(isFALSE(all)){ abByGene <- aberrant(object, padjCutoff=padjCutoff, zScoreCutoff=zScoreCutoff, by="gene") abBySample <- aberrant(object, padjCutoff=padjCutoff, zScoreCutoff=zScoreCutoff, by="sample") object <- object[abByGene > 0, abBySample > 0] } if(nrow(object) == 0){ if(isFALSE(all)){ warning('No significant events: use all=TRUE to print all events.') } else { warning('Please provide an object with at least one feature.') } return(data.table( geneID=NA_character_, sampleID=NA_character_, pValue=NA_real_, padjust=NA_real_, zScore=NA_real_, l2fc=NA_real_, rawcounts=NA_integer_, normcounts=NA_real_, meanCorrected=NA_real_, theta=NA_real_, aberrant=NA, AberrantBySample=NA_integer_, AberrantByGene=NA_integer_, padj_rank=NA_real_)[0]) } # # extract data # ans <- data.table( geneID = rownames(object), sampleID = rep(colnames(object), each=nrow(object)), pValue = c(pValue(object)), padjust = c(padj(object)), zScore = c(zScore(object)), l2fc = c(assay(object, "l2fc")), rawcounts = c(counts(object)), normcounts = c(counts(object, normalized=TRUE)), meanCorrected = rowMeans(counts(object, normalized=TRUE)), theta = theta(object), aberrant = c(aberrant(object, padjCutoff=padjCutoff, zScoreCutoff=zScoreCutoff)), AberrantBySample = rep(each=nrow(object), aberrant(object, padjCutoff=padjCutoff, zScoreCutoff=zScoreCutoff, by="sample")), AberrantByGene = aberrant(object, padjCutoff=padjCutoff, zScoreCutoff=zScoreCutoff, by="gene"), padj_rank = c(apply(padj(object), 2, rank))) # round columns if requested if(is.numeric(round)){ devNull <- lapply( c("normcounts", "zScore", "l2fc", "theta", "meanCorrected"), function(x) ans[,c(x):=round(get(x), as.integer(round))] ) } # # keep only aberrent events and sort by padj value # if(isFALSE(all)){ ans <- ans[aberrant == TRUE] } ans <- ans[order(padjust)] return(ans) } #' #' Accessor function for the 'results' object in an OutriderDataSet object. #' #' This function assembles a results table of significant outlier events based #' on the given filter criteria. The table contains various information #' accumulated over the analysis pipeline. #' #' @param object An OutriderDataSet object #' @param padjCutoff The significant threshold to be applied #' @param zScoreCutoff If provided additionally a z score threshold is applied #' @param round Can be TRUE, defaults to 2, or an integer used for rounding #' with \code{\link[base]{round}} to make the output #' more user friendly #' @param all By default FALSE, only significant read counts are listed in the #' results. If TRUE all results are assembled resulting in a #' data.table of length samples x genes #' @param ... Additional arguments, currently not used #' #' @return A data.table where each row is an outlier event and the columns #' contain additional information about this event. Eg padj, l2fc #' #' @examples #' #' ods <- makeExampleOutriderDataSet() #' \dontshow{ #' ods <- ods[1:10,1:10] #' } #' ods <- OUTRIDER(ods) #' #' res <- results(ods, all=TRUE) #' res #' #' @name results #' @rdname results #' @aliases results results,OutriderDataSet-method #' #' @export setMethod("results", "OutriderDataSet", compileResults.OUTRIDER)
/R/method-results.R
permissive
standardgalactic/OUTRIDER
R
false
false
4,529
r
compileResults.OUTRIDER <- function(object, padjCutoff=0.05, zScoreCutoff=0, round=2, all=FALSE, ...){ # # input check and parsing # checkOutriderDataSet(object) checkFullAnalysis(object) if(is.null(rownames(object))){ rownames(object) <- paste('feature', seq_len(nrow(object)), sep='_') } if(is.null(colnames(object))){ colnames(object) <- paste('sample', seq_len(ncol(object)), sep='_') } if(isTRUE(round)){ round <- 2 } if(isFALSE(all)){ abByGene <- aberrant(object, padjCutoff=padjCutoff, zScoreCutoff=zScoreCutoff, by="gene") abBySample <- aberrant(object, padjCutoff=padjCutoff, zScoreCutoff=zScoreCutoff, by="sample") object <- object[abByGene > 0, abBySample > 0] } if(nrow(object) == 0){ if(isFALSE(all)){ warning('No significant events: use all=TRUE to print all events.') } else { warning('Please provide an object with at least one feature.') } return(data.table( geneID=NA_character_, sampleID=NA_character_, pValue=NA_real_, padjust=NA_real_, zScore=NA_real_, l2fc=NA_real_, rawcounts=NA_integer_, normcounts=NA_real_, meanCorrected=NA_real_, theta=NA_real_, aberrant=NA, AberrantBySample=NA_integer_, AberrantByGene=NA_integer_, padj_rank=NA_real_)[0]) } # # extract data # ans <- data.table( geneID = rownames(object), sampleID = rep(colnames(object), each=nrow(object)), pValue = c(pValue(object)), padjust = c(padj(object)), zScore = c(zScore(object)), l2fc = c(assay(object, "l2fc")), rawcounts = c(counts(object)), normcounts = c(counts(object, normalized=TRUE)), meanCorrected = rowMeans(counts(object, normalized=TRUE)), theta = theta(object), aberrant = c(aberrant(object, padjCutoff=padjCutoff, zScoreCutoff=zScoreCutoff)), AberrantBySample = rep(each=nrow(object), aberrant(object, padjCutoff=padjCutoff, zScoreCutoff=zScoreCutoff, by="sample")), AberrantByGene = aberrant(object, padjCutoff=padjCutoff, zScoreCutoff=zScoreCutoff, by="gene"), padj_rank = c(apply(padj(object), 2, rank))) # round columns if requested if(is.numeric(round)){ devNull <- lapply( c("normcounts", "zScore", "l2fc", "theta", "meanCorrected"), function(x) ans[,c(x):=round(get(x), as.integer(round))] ) } # # keep only aberrent events and sort by padj value # if(isFALSE(all)){ ans <- ans[aberrant == TRUE] } ans <- ans[order(padjust)] return(ans) } #' #' Accessor function for the 'results' object in an OutriderDataSet object. #' #' This function assembles a results table of significant outlier events based #' on the given filter criteria. The table contains various information #' accumulated over the analysis pipeline. #' #' @param object An OutriderDataSet object #' @param padjCutoff The significant threshold to be applied #' @param zScoreCutoff If provided additionally a z score threshold is applied #' @param round Can be TRUE, defaults to 2, or an integer used for rounding #' with \code{\link[base]{round}} to make the output #' more user friendly #' @param all By default FALSE, only significant read counts are listed in the #' results. If TRUE all results are assembled resulting in a #' data.table of length samples x genes #' @param ... Additional arguments, currently not used #' #' @return A data.table where each row is an outlier event and the columns #' contain additional information about this event. Eg padj, l2fc #' #' @examples #' #' ods <- makeExampleOutriderDataSet() #' \dontshow{ #' ods <- ods[1:10,1:10] #' } #' ods <- OUTRIDER(ods) #' #' res <- results(ods, all=TRUE) #' res #' #' @name results #' @rdname results #' @aliases results results,OutriderDataSet-method #' #' @export setMethod("results", "OutriderDataSet", compileResults.OUTRIDER)
/Kassambara-Ggplot.R
no_license
christianchmejia/GgPlot
R
false
false
66,285
r
### a first look at the basic features ### calc stats for all train.mean.all <- apply(train[, train.colnames.stats.numeric], 2, function(x) mean(x, na.rm = TRUE)) train.median.all <- apply(train[, train.colnames.stats.numeric], 2, function(x) quantile(x, 0.5, na.rm = TRUE)) #train.q20.all <- apply(train[, train.colnames.stats.numeric], 2, function(x) quantile(x, 0.2, na.rm = TRUE)) #train.q80.all <- apply(train[, train.colnames.stats.numeric], 2, function(x) quantile(x, 0.8, na.rm = TRUE)) ### calc stats for humans train.mean.humans <- apply(train[train$outcome == 0, train.colnames.stats.numeric], 2, function(x) mean(x, na.rm = TRUE)) train.median.humans <- apply(train[train$outcome == 0, train.colnames.stats.numeric], 2, function(x) quantile(x, 0.5, na.rm = TRUE)) train.q20.humans <- apply(train[train$outcome == 0, train.colnames.stats.numeric], 2, function(x) quantile(x, 0.2, na.rm = TRUE)) train.q80.humans <- apply(train[train$outcome == 0, train.colnames.stats.numeric], 2, function(x) quantile(x, 0.8, na.rm = TRUE)) ### calc stats for robots train.mean.robots <- apply(train[train$outcome == 1, train.colnames.stats.numeric], 2, function(x) mean(x, na.rm = TRUE)) train.median.robots <- apply(train[train$outcome == 1, train.colnames.stats.numeric], 2, function(x) quantile(x, 0.5, na.rm = TRUE)) train.q20.robots <- apply(train[train$outcome == 1, train.colnames.stats.numeric], 2, function(x) quantile(x, 0.2, na.rm = TRUE)) train.q80.robots <- apply(train[train$outcome == 1, train.colnames.stats.numeric], 2, function(x) quantile(x, 0.8, na.rm = TRUE)) ### create dataframe train.stats.numeric <- data.frame(train.mean.all, train.mean.humans, train.mean.robots, train.median.all, train.median.humans, train.median.robots, train.q20.humans, train.q80.humans, train.q20.robots, train.q80.robots) ### shorten colnames of data frame colnames(train.stats.numeric) <- sapply(colnames(train.stats.numeric), function(x) gsub("train.", "", x, fixed = TRUE)) ### print data frame print(round(train.stats.numeric, 2))
/features_basic_analysis.R
permissive
PhilippMohr/UniHH-Project-BigData-Challenge
R
false
false
2,072
r
### a first look at the basic features ### calc stats for all train.mean.all <- apply(train[, train.colnames.stats.numeric], 2, function(x) mean(x, na.rm = TRUE)) train.median.all <- apply(train[, train.colnames.stats.numeric], 2, function(x) quantile(x, 0.5, na.rm = TRUE)) #train.q20.all <- apply(train[, train.colnames.stats.numeric], 2, function(x) quantile(x, 0.2, na.rm = TRUE)) #train.q80.all <- apply(train[, train.colnames.stats.numeric], 2, function(x) quantile(x, 0.8, na.rm = TRUE)) ### calc stats for humans train.mean.humans <- apply(train[train$outcome == 0, train.colnames.stats.numeric], 2, function(x) mean(x, na.rm = TRUE)) train.median.humans <- apply(train[train$outcome == 0, train.colnames.stats.numeric], 2, function(x) quantile(x, 0.5, na.rm = TRUE)) train.q20.humans <- apply(train[train$outcome == 0, train.colnames.stats.numeric], 2, function(x) quantile(x, 0.2, na.rm = TRUE)) train.q80.humans <- apply(train[train$outcome == 0, train.colnames.stats.numeric], 2, function(x) quantile(x, 0.8, na.rm = TRUE)) ### calc stats for robots train.mean.robots <- apply(train[train$outcome == 1, train.colnames.stats.numeric], 2, function(x) mean(x, na.rm = TRUE)) train.median.robots <- apply(train[train$outcome == 1, train.colnames.stats.numeric], 2, function(x) quantile(x, 0.5, na.rm = TRUE)) train.q20.robots <- apply(train[train$outcome == 1, train.colnames.stats.numeric], 2, function(x) quantile(x, 0.2, na.rm = TRUE)) train.q80.robots <- apply(train[train$outcome == 1, train.colnames.stats.numeric], 2, function(x) quantile(x, 0.8, na.rm = TRUE)) ### create dataframe train.stats.numeric <- data.frame(train.mean.all, train.mean.humans, train.mean.robots, train.median.all, train.median.humans, train.median.robots, train.q20.humans, train.q80.humans, train.q20.robots, train.q80.robots) ### shorten colnames of data frame colnames(train.stats.numeric) <- sapply(colnames(train.stats.numeric), function(x) gsub("train.", "", x, fixed = TRUE)) ### print data frame print(round(train.stats.numeric, 2))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Write_lt.R \name{Write_lt} \alias{Write_lt} \title{\code{Write_lt} a function to write formatted .txt files for both period and cohort lifetables.} \usage{ Write_lt(WORKING = getwd(), STATSFOLDER = "RSTATS", MPVERSION, XXX = NULL, CountryLong = NULL) } \arguments{ \item{WORKING}{path to working directory, which typically ends with the HMD country abbreviation. Default \code{getwd()}.} \item{STATSFOLDER}{the folder name where output is to be written to (not a full path). Default \code{"RSTATS"}.} \item{MPVERSION}{5 or 6. Default 5. Here this only affects file headers.} \item{XXX}{the HMD country abbreviation. If left \code{NULL}, this is extracted from \code{WORKING} as the last path part.} \item{CountryLong}{the HMD country full name.} } \value{ function called for its side effect of creating the lifetable txt output files, e.g. \code{mltper_1x1.txt} and other time/sex configurations. No value returned. } \description{ This function requires that all pertinent \code{ltper_AxN.Rdata} and \code{ltcoh_AxN()} data objects be present in the folder \code{WORKING/Rbin/} for males, females and both-sex tables. Objects are selected by grepping, which works for the present HMD file naming scheme. \code{.Rdata} files are read in, rounded, formatted and written back out into \code{WORKING/RSTATS/} by default, although this can be changed with the \code{STATSFOLDER} argument. This function must be called after ltper_AxN() (for all time intervals and sexes), which must have been run with the argument \code{save.bin = TRUE}. } \author{ Tim Riffe \email{triffe@demog.berkeley.edu} }
/HMDLifeTables/HMDLifeTables/man/Write_lt.Rd
no_license
timriffe/HMDLifeTables
R
false
true
1,677
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Write_lt.R \name{Write_lt} \alias{Write_lt} \title{\code{Write_lt} a function to write formatted .txt files for both period and cohort lifetables.} \usage{ Write_lt(WORKING = getwd(), STATSFOLDER = "RSTATS", MPVERSION, XXX = NULL, CountryLong = NULL) } \arguments{ \item{WORKING}{path to working directory, which typically ends with the HMD country abbreviation. Default \code{getwd()}.} \item{STATSFOLDER}{the folder name where output is to be written to (not a full path). Default \code{"RSTATS"}.} \item{MPVERSION}{5 or 6. Default 5. Here this only affects file headers.} \item{XXX}{the HMD country abbreviation. If left \code{NULL}, this is extracted from \code{WORKING} as the last path part.} \item{CountryLong}{the HMD country full name.} } \value{ function called for its side effect of creating the lifetable txt output files, e.g. \code{mltper_1x1.txt} and other time/sex configurations. No value returned. } \description{ This function requires that all pertinent \code{ltper_AxN.Rdata} and \code{ltcoh_AxN()} data objects be present in the folder \code{WORKING/Rbin/} for males, females and both-sex tables. Objects are selected by grepping, which works for the present HMD file naming scheme. \code{.Rdata} files are read in, rounded, formatted and written back out into \code{WORKING/RSTATS/} by default, although this can be changed with the \code{STATSFOLDER} argument. This function must be called after ltper_AxN() (for all time intervals and sexes), which must have been run with the argument \code{save.bin = TRUE}. } \author{ Tim Riffe \email{triffe@demog.berkeley.edu} }
\name{condens1} \alias{condens1} \title{Conditional density estimation} \description{Estimates conditional density functions of the form f(y| x) = f(x,y)/f(x). Kernel density estimators are used to estimate f(x,y) and f(x). The conditional density function can be plotted as a three-dimensional surface or as a contour map. Alternatively, the conditional density of y can be graphed for as many as five target values of x. } \usage{ condens1(form,window=.7,bandwidth=0,kern="tcub", mingrid.x=NULL,maxgrid.x=NULL,mingrid.y=NULL,maxgrid.y=NULL,ngrid=50, xlab="x",ylab="y",zlab="fxy/fx",contour=TRUE,level=TRUE,wire=TRUE,dens=TRUE, targetx.dens=NULL,quantile.dens=c(.10,.25,.50,.75,.90),data=NULL) } \arguments{ \item{form}{Model formula} \item{window }{Window size. Default: 0.25. } \item{bandwidth }{Bandwidth. Default: not used. } \item{kern}{Kernel weighting functions. Default is the tri-cube. Options include "rect", "tria", "epan", "bisq", "tcub", "trwt", and "gauss".} \item{mingrid.x, maxgrid.x, mingrid.y, maxgrid.y, ngrid}{The mingrid and maxgrid values are the boundaries for the \emph{ngrid x ngrid} lattice used in the graphs produced by \emph{condens1}. By default, mingrid.x = min(x), maxgrid.x = max(x), mingrid.y = min(y), maxgrid.y = max(y), and ngrid=50. } \item{xlab}{Label for the \emph{x}-axis in graphs. Default: "x"} \item{ylab}{Label for the \emph{y}-axis in graphs. Default: "y"} \item{zlab}{Label for the \emph{z}-axis in graphs. Default: "fxy/fx"} \item{contour}{If \emph{contour=T}, produces a two-dimensional contour plot of the conditional density estimates. Evaluated for an \emph{ngrid} x \emph{ngrid} lattice. Default is \emph{contour=T}.} \item{level}{If \emph{level=T}, produces a two-dimensional level plot of the conditional density estimates. Evaluated for an \emph{ngrid} x \emph{ngrid} lattice. Default is \emph{level=F}.} \item{wire}{If \emph{wire=T}, produces a three-dimensional plot of the conditional density estimates. Evaluated for an \emph{ngrid} x \emph{ngrid} lattice. Default is \emph{wire=T}.} \item{dens}{If \emph{dens=T}, produces a plot showing how \emph{f(y|x)} varies over \emph{y} for given target values of \emph{x}. Target values of \emph{x} are provided using the \emph{targetx.dens} or \emph{quantile.dens} options. Default is dens=F.} \item{targetx.dens}{Target values for \emph{x} in the density plots, e.g, \emph{targetx.dens} = c(200,400,600). Maximum number of entries is 5. If \emph{targetx.dens} has more than 5 entries, only the first 5 will be used. Default is \emph{targetx.dens = NULL}, meaning that the target values for \emph{x} are determined by the \emph{quantile.dens} option.} \item{quantile.dens}{Quantiles for the target values for \emph{x} in the density plots, e.g, \emph{quantile.dens} = c(.25,.50,.75). Maximum number of entries is 5. If quantile.dens has more than 5 entries, only the first 5 will be used. Default is \emph{quantile.dens} = c(.10,.25,.50,.75,.90).} \item{data }{A data frame containing the data. Default: use data in the current working directory.} } \value{ \item{fx}{The values of \emph{f(x)}, one for each data point.} \item{fy}{The values of \emph{f(y)}, one for each data point.} \item{fxy}{The values of \emph{f(x,y)}, one for each data point. The conditional densities are fxy/fx for \emph{x} and fxy/fy for \emph{y}.} \item{gridmat}{An (\emph{ngrid*ngrid})x3 matrix used to produce the contour, level, and wire maps. The first column contains the lattice values for \emph{x}, the second column contains the lattice values for \emph{y}, and the third column has the estimated values of \emph{f(y|x)} at the target values for \emph{x} and \emph{y}.} \item{densmat}{The estimated values of \emph{f(y|x)} for the two-dimensional density graphs produced when \emph{dens = TRUE}. If the number of observations in the call to \emph{condens1} is \emph{n} and the number of entries in \emph{quantile.dens} is \emph{nq}, then densmat is an \emph{n} x \emph{nq} matrix.} } \references{ Li, Oi and Jeffrey Scott Racine. \emph{Nonparametric Econometrics: Theory and Practice.} Princeton, NJ: Princeton University Press, 2007. Loader, Clive. \emph{Local Regression and Likelihood.} New York: Springer, 1999. Pagan, Adrian and Aman Ullah. \emph{Nonparametric Econometrics.} New York: Cambridge University Press, 1999. } \author{ Daniel McMillen } \seealso{ \link{qreglwr1} } \examples{ data(dupage99) dupage99$ratio <- dupage99$av/dupage99$price dropobs1 <- nptrim_obs(dupage99$ratio,k=3) dropobs2 <- (dupage99$price<quantile(dupage99$price,.01))| (dupage99$price>quantile(dupage99$price,.99)) dupage99 <- dupage99[dropobs1==FALSE&dropobs2==FALSE,] o <- order(dupage99$price) dupage99 <- dupage99[o,] dupage99$price <- dupage99$price/1000 par(ask=TRUE) # show all plots fit <- condens1(ratio~price, window=.7, xlab="Price", ylab="Assessment Ratio", zlab="Density", targetx.dens=c(100,200,300,400), ngrid=40, data=dupage99) # skip plots in base command; add them afterward fit <- condens1(ratio~price, window=.7, contour=FALSE, level=FALSE, wire=FALSE, dens=FALSE, targetx.dens=c(100,200,300,400), ngrid=40, data=dupage99) contourplot(fit$gridmat[,3]~fit$gridmat[,1]*fit$gridmat[,2], xlab="Price", ylab="Assessment Ratio") levelplot(fit$gridmat[,3]~fit$gridmat[,1]*fit$gridmat[,2], xlab="Price", ylab="Assessment Ratio") wireframe(fit$gridmat[,3]~fit$gridmat[,1]*fit$gridmat[,2], xlab="Price", ylab="Assessment Ratio", zlab="Density") } \details{ The \emph{locfit} package is used to find the target values of \emph{x} for \emph{f(x)} and \emph{y} for \emph{f(y)}. The \emph{expand.grid} command is then used to determine the target values of \emph{x} and \emph{y} for \emph{f(x,y)}. The \emph{akima} package is used to interpolate \emph{f(x)}, \emph{f(y)}, and \emph{f(x,y)} to the full data set and to the grid of target values for the contour, level, and wire plots. The density functions \emph{f(x)} and \emph{f(y)} are as follows: \deqn{f(x) = \frac{1}{sd(x)*b*n} \sum_i K ( \frac{ x_i - x}{sd(x)*b} )}{(sd(x)*b)^{-1} mean((x_i - x)/(sd(x)*b)) } \deqn{f(y) = \frac{1}{sd(y)*b*n} \sum_i K ( \frac{ y_i - y}{sd(y)*b} )}{(sd(y)*b)^{-1} mean((y_i - y)/(sd(y)*b)) } A product kernel is used for \emph{f(x,y)}: \deqn{f(x,y) = \frac{1}{sd(x)*b*sd(y)*b*n}\sum_i K ( \frac{ x_i - x}{sd(x)*b} ) K ( \frac{ y_i - y}{sd(y)*b} ) }{(sd(x)*b*sd(y)*b)^{-1} mean (K ( (x_i - x)/(sd(x)*b) ) K ( (y_i - y)/(sd(y)*b) ) } where \emph{b} is the bandwidth and the target points are \emph{x} and \emph{y}. The bandwidth, \emph{b}, can be set using the \emph{bandwidth} option. If \emph{b} = 0 (the default), \emph{sd(x)*b} and \emph{sd(y)*b} are replaced by window values, \eqn{h = quantile(dist, window)}, where \eqn{dist = |x_i - x|} or \eqn{dist = |y_i - y|}. The window size is set using the \emph{window} option. By default, \emph{window} = .7 and \emph{bandwidth} = 0. Available kernel weighting functions include the following: \tabular{lll}{ Kernel \tab Call abbreviation \tab Kernel function K(z) \cr Rectangular \tab ``rect'' \tab \eqn{\frac{1}{2} I(|z| <1)}{1/2 * I(|z|<1)} \cr Triangular \tab ``tria'' \tab \eqn{(1-|z|)I(|z|<1)}{(1-|z|) * I(|z|<1)}\cr Epanechnikov \tab ``epan'' \tab \eqn{\frac{3}{4} (1-z^2) * I(|z| <1)}{3/4 * (1-z^2)*I(|z| < 1)} \cr Bi-Square \tab ``bisq'' \tab \eqn{\frac{15}{16} (1-z^2)^2 * I(|z| <1)}{15/16 * (1-z^2)^2 * I(|z| < 1)} \cr Tri-Cube \tab ``tcub'' \tab \eqn{\frac{70}{81} (1-|z|^3)^3 * I(|z| <1)}{70/81 * (1-|z|^3)^3 * I(|z| < 1)} \cr Tri-Weight \tab ``trwt'' \tab \eqn{\frac{35}{32} (1-z^2)^3 * I(|z| <1)}{35/32 * (1-z^2)^3 * I(|z| < 1)} \cr Gaussian \tab ``gauss'' \tab \eqn{(2\pi)^{-.5} e^{-z^2/2}}{2pi^{-.5} exp(-z^2/2)} \cr } The contour, level, and wire plots are produced from the values in \emph{gridmat} using the \emph{lattice} package. The two-dimensional density graphs produced when \emph{dens=TRUE} are plots of \emph{f(y,x)/f(x)} at given values of \emph{x}. By default, the values for \emph{x} are the quantiles given in \emph{quantile.dens}. Alternatively, the values of \emph{x} can be specified directly using the \emph{targetx.dens} option. The values used to construct the density graphs are stored in \emph{densmat}. Both \emph{gridmat} and \emph{densmat} are stored by \emph{condens1} even if the printing of the graphs is suppressed. }
/man/condens1.Rd
no_license
cran/aratio
R
false
false
8,598
rd
\name{condens1} \alias{condens1} \title{Conditional density estimation} \description{Estimates conditional density functions of the form f(y| x) = f(x,y)/f(x). Kernel density estimators are used to estimate f(x,y) and f(x). The conditional density function can be plotted as a three-dimensional surface or as a contour map. Alternatively, the conditional density of y can be graphed for as many as five target values of x. } \usage{ condens1(form,window=.7,bandwidth=0,kern="tcub", mingrid.x=NULL,maxgrid.x=NULL,mingrid.y=NULL,maxgrid.y=NULL,ngrid=50, xlab="x",ylab="y",zlab="fxy/fx",contour=TRUE,level=TRUE,wire=TRUE,dens=TRUE, targetx.dens=NULL,quantile.dens=c(.10,.25,.50,.75,.90),data=NULL) } \arguments{ \item{form}{Model formula} \item{window }{Window size. Default: 0.25. } \item{bandwidth }{Bandwidth. Default: not used. } \item{kern}{Kernel weighting functions. Default is the tri-cube. Options include "rect", "tria", "epan", "bisq", "tcub", "trwt", and "gauss".} \item{mingrid.x, maxgrid.x, mingrid.y, maxgrid.y, ngrid}{The mingrid and maxgrid values are the boundaries for the \emph{ngrid x ngrid} lattice used in the graphs produced by \emph{condens1}. By default, mingrid.x = min(x), maxgrid.x = max(x), mingrid.y = min(y), maxgrid.y = max(y), and ngrid=50. } \item{xlab}{Label for the \emph{x}-axis in graphs. Default: "x"} \item{ylab}{Label for the \emph{y}-axis in graphs. Default: "y"} \item{zlab}{Label for the \emph{z}-axis in graphs. Default: "fxy/fx"} \item{contour}{If \emph{contour=T}, produces a two-dimensional contour plot of the conditional density estimates. Evaluated for an \emph{ngrid} x \emph{ngrid} lattice. Default is \emph{contour=T}.} \item{level}{If \emph{level=T}, produces a two-dimensional level plot of the conditional density estimates. Evaluated for an \emph{ngrid} x \emph{ngrid} lattice. Default is \emph{level=F}.} \item{wire}{If \emph{wire=T}, produces a three-dimensional plot of the conditional density estimates. Evaluated for an \emph{ngrid} x \emph{ngrid} lattice. Default is \emph{wire=T}.} \item{dens}{If \emph{dens=T}, produces a plot showing how \emph{f(y|x)} varies over \emph{y} for given target values of \emph{x}. Target values of \emph{x} are provided using the \emph{targetx.dens} or \emph{quantile.dens} options. Default is dens=F.} \item{targetx.dens}{Target values for \emph{x} in the density plots, e.g, \emph{targetx.dens} = c(200,400,600). Maximum number of entries is 5. If \emph{targetx.dens} has more than 5 entries, only the first 5 will be used. Default is \emph{targetx.dens = NULL}, meaning that the target values for \emph{x} are determined by the \emph{quantile.dens} option.} \item{quantile.dens}{Quantiles for the target values for \emph{x} in the density plots, e.g, \emph{quantile.dens} = c(.25,.50,.75). Maximum number of entries is 5. If quantile.dens has more than 5 entries, only the first 5 will be used. Default is \emph{quantile.dens} = c(.10,.25,.50,.75,.90).} \item{data }{A data frame containing the data. Default: use data in the current working directory.} } \value{ \item{fx}{The values of \emph{f(x)}, one for each data point.} \item{fy}{The values of \emph{f(y)}, one for each data point.} \item{fxy}{The values of \emph{f(x,y)}, one for each data point. The conditional densities are fxy/fx for \emph{x} and fxy/fy for \emph{y}.} \item{gridmat}{An (\emph{ngrid*ngrid})x3 matrix used to produce the contour, level, and wire maps. The first column contains the lattice values for \emph{x}, the second column contains the lattice values for \emph{y}, and the third column has the estimated values of \emph{f(y|x)} at the target values for \emph{x} and \emph{y}.} \item{densmat}{The estimated values of \emph{f(y|x)} for the two-dimensional density graphs produced when \emph{dens = TRUE}. If the number of observations in the call to \emph{condens1} is \emph{n} and the number of entries in \emph{quantile.dens} is \emph{nq}, then densmat is an \emph{n} x \emph{nq} matrix.} } \references{ Li, Oi and Jeffrey Scott Racine. \emph{Nonparametric Econometrics: Theory and Practice.} Princeton, NJ: Princeton University Press, 2007. Loader, Clive. \emph{Local Regression and Likelihood.} New York: Springer, 1999. Pagan, Adrian and Aman Ullah. \emph{Nonparametric Econometrics.} New York: Cambridge University Press, 1999. } \author{ Daniel McMillen } \seealso{ \link{qreglwr1} } \examples{ data(dupage99) dupage99$ratio <- dupage99$av/dupage99$price dropobs1 <- nptrim_obs(dupage99$ratio,k=3) dropobs2 <- (dupage99$price<quantile(dupage99$price,.01))| (dupage99$price>quantile(dupage99$price,.99)) dupage99 <- dupage99[dropobs1==FALSE&dropobs2==FALSE,] o <- order(dupage99$price) dupage99 <- dupage99[o,] dupage99$price <- dupage99$price/1000 par(ask=TRUE) # show all plots fit <- condens1(ratio~price, window=.7, xlab="Price", ylab="Assessment Ratio", zlab="Density", targetx.dens=c(100,200,300,400), ngrid=40, data=dupage99) # skip plots in base command; add them afterward fit <- condens1(ratio~price, window=.7, contour=FALSE, level=FALSE, wire=FALSE, dens=FALSE, targetx.dens=c(100,200,300,400), ngrid=40, data=dupage99) contourplot(fit$gridmat[,3]~fit$gridmat[,1]*fit$gridmat[,2], xlab="Price", ylab="Assessment Ratio") levelplot(fit$gridmat[,3]~fit$gridmat[,1]*fit$gridmat[,2], xlab="Price", ylab="Assessment Ratio") wireframe(fit$gridmat[,3]~fit$gridmat[,1]*fit$gridmat[,2], xlab="Price", ylab="Assessment Ratio", zlab="Density") } \details{ The \emph{locfit} package is used to find the target values of \emph{x} for \emph{f(x)} and \emph{y} for \emph{f(y)}. The \emph{expand.grid} command is then used to determine the target values of \emph{x} and \emph{y} for \emph{f(x,y)}. The \emph{akima} package is used to interpolate \emph{f(x)}, \emph{f(y)}, and \emph{f(x,y)} to the full data set and to the grid of target values for the contour, level, and wire plots. The density functions \emph{f(x)} and \emph{f(y)} are as follows: \deqn{f(x) = \frac{1}{sd(x)*b*n} \sum_i K ( \frac{ x_i - x}{sd(x)*b} )}{(sd(x)*b)^{-1} mean((x_i - x)/(sd(x)*b)) } \deqn{f(y) = \frac{1}{sd(y)*b*n} \sum_i K ( \frac{ y_i - y}{sd(y)*b} )}{(sd(y)*b)^{-1} mean((y_i - y)/(sd(y)*b)) } A product kernel is used for \emph{f(x,y)}: \deqn{f(x,y) = \frac{1}{sd(x)*b*sd(y)*b*n}\sum_i K ( \frac{ x_i - x}{sd(x)*b} ) K ( \frac{ y_i - y}{sd(y)*b} ) }{(sd(x)*b*sd(y)*b)^{-1} mean (K ( (x_i - x)/(sd(x)*b) ) K ( (y_i - y)/(sd(y)*b) ) } where \emph{b} is the bandwidth and the target points are \emph{x} and \emph{y}. The bandwidth, \emph{b}, can be set using the \emph{bandwidth} option. If \emph{b} = 0 (the default), \emph{sd(x)*b} and \emph{sd(y)*b} are replaced by window values, \eqn{h = quantile(dist, window)}, where \eqn{dist = |x_i - x|} or \eqn{dist = |y_i - y|}. The window size is set using the \emph{window} option. By default, \emph{window} = .7 and \emph{bandwidth} = 0. Available kernel weighting functions include the following: \tabular{lll}{ Kernel \tab Call abbreviation \tab Kernel function K(z) \cr Rectangular \tab ``rect'' \tab \eqn{\frac{1}{2} I(|z| <1)}{1/2 * I(|z|<1)} \cr Triangular \tab ``tria'' \tab \eqn{(1-|z|)I(|z|<1)}{(1-|z|) * I(|z|<1)}\cr Epanechnikov \tab ``epan'' \tab \eqn{\frac{3}{4} (1-z^2) * I(|z| <1)}{3/4 * (1-z^2)*I(|z| < 1)} \cr Bi-Square \tab ``bisq'' \tab \eqn{\frac{15}{16} (1-z^2)^2 * I(|z| <1)}{15/16 * (1-z^2)^2 * I(|z| < 1)} \cr Tri-Cube \tab ``tcub'' \tab \eqn{\frac{70}{81} (1-|z|^3)^3 * I(|z| <1)}{70/81 * (1-|z|^3)^3 * I(|z| < 1)} \cr Tri-Weight \tab ``trwt'' \tab \eqn{\frac{35}{32} (1-z^2)^3 * I(|z| <1)}{35/32 * (1-z^2)^3 * I(|z| < 1)} \cr Gaussian \tab ``gauss'' \tab \eqn{(2\pi)^{-.5} e^{-z^2/2}}{2pi^{-.5} exp(-z^2/2)} \cr } The contour, level, and wire plots are produced from the values in \emph{gridmat} using the \emph{lattice} package. The two-dimensional density graphs produced when \emph{dens=TRUE} are plots of \emph{f(y,x)/f(x)} at given values of \emph{x}. By default, the values for \emph{x} are the quantiles given in \emph{quantile.dens}. Alternatively, the values of \emph{x} can be specified directly using the \emph{targetx.dens} option. The values used to construct the density graphs are stored in \emph{densmat}. Both \emph{gridmat} and \emph{densmat} are stored by \emph{condens1} even if the printing of the graphs is suppressed. }
#' @importFrom dplyr vars `%>%` tibble
/R/utils.R
no_license
m-clark/tidyext
R
false
false
39
r
#' @importFrom dplyr vars `%>%` tibble
# 1.1 Imnport test data X_test <- read.table("X_test.txt", sep = "", header = F) y_test <- read.table("y_test.txt", sep = "", header = F) subject_test <- read.table("subject_test.txt", sep = "", header = F) # 1.2 Import train data X_train <- read.table("X_train.txt", sep = "", header = F) y_train <- read.table("y_train.txt", sep = "", header = F) subject_train <- read.table("subject_train.txt", sep = "", header = F) # 1.3 Import features and activity labels features <- read.table("features.txt", col.names = c("ID", "Name")) activity_labels <- read.table("activity_labels.txt", col.names = c("IdActivity", "Activity")) # 1.4 Add colnames colnames(X_test) <- features$Name colnames(y_test) <- "IdActivity" colnames(subject_test) <- "IdSubject" colnames(X_train) <- features$Name colnames(y_train) <- "IdActivity" colnames(subject_train) <- "IdSubject" # 1.5 Merge data test.data <- cbind(subject_test, y_test, X_test) train.data <- cbind(subject_train, y_train, X_train) all.data <- rbind(test.data, train.data) # 2. Extract only mean and std of each measurement all.data_m_std <- all.data[, grepl("mean|std|IdSubject|IdActivity",colnames(all.data))] # 3. Use descriptive activity names to name the activities in the data set install.packages("plyr") library(plyr) all.data_m_std <- join(all.data_m_std, activity_labels, by = "IdActivity", match = "first") all.data_m_std <- all.data_m_std[,-1] # delete first column all.data_m_std <- all.data_m_std[, c(81, 1:80)] # change coloumn order: last order first # 4. Label the data set with descriptive variable names. names(all.data_m_std) <- gsub("Acc", "Accelerometer",names(all.data_m_std)) names(all.data_m_std) <- gsub("Gyro", "Gyroscope",names(all.data_m_std)) names(all.data_m_std) <- gsub("Mag", "Magnitude",names(all.data_m_std)) names(all.data_m_std) <- gsub("Freq", "Frequency",names(all.data_m_std)) names(all.data_m_std) <- gsub("angle", "Angle",names(all.data_m_std)) names(all.data_m_std) <- gsub("gravity", "Gravity",names(all.data_m_std)) names(all.data_m_std) <- gsub("^t", "Time",names(all.data_m_std)) names(all.data_m_std) <- gsub("^f", "Frequency",names(all.data_m_std)) names <- names(all.data_m_std) # 5. From the data set in step 4, create a second, independent tidy data set with the average of each variable for each activity and each subject. all.data_m_std_tidy <- aggregate(all.data_m_std[,3:81], by = list(all.data_m_std$Activity, all.data_m_std$IdSubject), mean) # get the means by the two groups colnames(all.data_m_std_tidy)[1:2] <- c("Activity","IdSubject") # change first two column names # final step: create text file of output 5 write.table(all.data_m_std_tidy,"./tidy_dataset.txt", row.names = F)
/run_analysis.R
no_license
srh-k4/Getting_and_Cleaning_Data
R
false
false
2,708
r
# 1.1 Imnport test data X_test <- read.table("X_test.txt", sep = "", header = F) y_test <- read.table("y_test.txt", sep = "", header = F) subject_test <- read.table("subject_test.txt", sep = "", header = F) # 1.2 Import train data X_train <- read.table("X_train.txt", sep = "", header = F) y_train <- read.table("y_train.txt", sep = "", header = F) subject_train <- read.table("subject_train.txt", sep = "", header = F) # 1.3 Import features and activity labels features <- read.table("features.txt", col.names = c("ID", "Name")) activity_labels <- read.table("activity_labels.txt", col.names = c("IdActivity", "Activity")) # 1.4 Add colnames colnames(X_test) <- features$Name colnames(y_test) <- "IdActivity" colnames(subject_test) <- "IdSubject" colnames(X_train) <- features$Name colnames(y_train) <- "IdActivity" colnames(subject_train) <- "IdSubject" # 1.5 Merge data test.data <- cbind(subject_test, y_test, X_test) train.data <- cbind(subject_train, y_train, X_train) all.data <- rbind(test.data, train.data) # 2. Extract only mean and std of each measurement all.data_m_std <- all.data[, grepl("mean|std|IdSubject|IdActivity",colnames(all.data))] # 3. Use descriptive activity names to name the activities in the data set install.packages("plyr") library(plyr) all.data_m_std <- join(all.data_m_std, activity_labels, by = "IdActivity", match = "first") all.data_m_std <- all.data_m_std[,-1] # delete first column all.data_m_std <- all.data_m_std[, c(81, 1:80)] # change coloumn order: last order first # 4. Label the data set with descriptive variable names. names(all.data_m_std) <- gsub("Acc", "Accelerometer",names(all.data_m_std)) names(all.data_m_std) <- gsub("Gyro", "Gyroscope",names(all.data_m_std)) names(all.data_m_std) <- gsub("Mag", "Magnitude",names(all.data_m_std)) names(all.data_m_std) <- gsub("Freq", "Frequency",names(all.data_m_std)) names(all.data_m_std) <- gsub("angle", "Angle",names(all.data_m_std)) names(all.data_m_std) <- gsub("gravity", "Gravity",names(all.data_m_std)) names(all.data_m_std) <- gsub("^t", "Time",names(all.data_m_std)) names(all.data_m_std) <- gsub("^f", "Frequency",names(all.data_m_std)) names <- names(all.data_m_std) # 5. From the data set in step 4, create a second, independent tidy data set with the average of each variable for each activity and each subject. all.data_m_std_tidy <- aggregate(all.data_m_std[,3:81], by = list(all.data_m_std$Activity, all.data_m_std$IdSubject), mean) # get the means by the two groups colnames(all.data_m_std_tidy)[1:2] <- c("Activity","IdSubject") # change first two column names # final step: create text file of output 5 write.table(all.data_m_std_tidy,"./tidy_dataset.txt", row.names = F)
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/pal-dichromat.r \name{dichromat_pal} \alias{dichromat_pal} \title{Dichromat (colour-blind) palette (discrete).} \usage{ dichromat_pal(name) } \arguments{ \item{name}{Name of colour palette. One of: \Sexpr[results=rd,stage=build]{scales:::dichromat_schemes()}} } \description{ Dichromat (colour-blind) palette (discrete). } \examples{ show_col(dichromat_pal("BluetoOrange.10")(10)) show_col(dichromat_pal("BluetoOrange.10")(5)) # Can use with gradient_n to create a continous gradient cols <- dichromat_pal("DarkRedtoBlue.12")(12) show_col(gradient_n_pal(cols)(seq(0, 1, length = 30))) }
/man/dichromat_pal.Rd
no_license
LluisRamon/scales
R
false
false
676
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/pal-dichromat.r \name{dichromat_pal} \alias{dichromat_pal} \title{Dichromat (colour-blind) palette (discrete).} \usage{ dichromat_pal(name) } \arguments{ \item{name}{Name of colour palette. One of: \Sexpr[results=rd,stage=build]{scales:::dichromat_schemes()}} } \description{ Dichromat (colour-blind) palette (discrete). } \examples{ show_col(dichromat_pal("BluetoOrange.10")(10)) show_col(dichromat_pal("BluetoOrange.10")(5)) # Can use with gradient_n to create a continous gradient cols <- dichromat_pal("DarkRedtoBlue.12")(12) show_col(gradient_n_pal(cols)(seq(0, 1, length = 30))) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ngchmshaidy.R \name{ngchmGetDataFileShaid} \alias{ngchmGetDataFileShaid} \title{Compute shaid for a data file} \usage{ ngchmGetDataFileShaid(format, filename) } \arguments{ \item{format}{The format of the data file} \item{filename}{The filesystem path to the data file} } \value{ The shaid of the data file } \description{ Compute shaid for a data file }
/man/ngchmGetDataFileShaid.Rd
no_license
MD-Anderson-Bioinformatics/NGCHM-R
R
false
true
434
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ngchmshaidy.R \name{ngchmGetDataFileShaid} \alias{ngchmGetDataFileShaid} \title{Compute shaid for a data file} \usage{ ngchmGetDataFileShaid(format, filename) } \arguments{ \item{format}{The format of the data file} \item{filename}{The filesystem path to the data file} } \value{ The shaid of the data file } \description{ Compute shaid for a data file }