blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
โŒ€
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
โŒ€
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
โŒ€
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
bae2438133a3963a633456eff031eaf35284c829
c55c02f27dc68f5a912a0cb7edf232ddc7197f7b
/exercises/test_02_05.R
98f20234d7e0cd28f90b3e2275a4d94338e90c16
[ "MIT", "CC-BY-4.0" ]
permissive
benmarwick/gams-in-r-course
3e631518be8ab89c9e08d83743aa15053a8bc9d1
ed45f12a183d1ba023ee43e8b2fa557773c9b5ef
refs/heads/master
2020-05-27T19:02:16.685461
2019-05-27T02:01:13
2019-05-27T02:01:13
188,754,422
0
0
null
2019-05-27T02:00:18
2019-05-27T02:00:18
null
UTF-8
R
false
false
186
r
test_02_05.R
test <- function() {success("Looking good! Plotting residuals helps you understand the quality of your model fit. Now let's try selecting different parts of your model to visualize.")}
11ae0bda64e67d9366969100f5ce6402b046f9ff
830e99285dbf49c89fb429147e3986bfd242a759
/R/module_desctools.R
235ca3e824973386fca6fb0a52464b342e568fc8
[]
no_license
DaniloCVieira/iMESc
41501e9f39b79753e2ced4b2dc20f1118eba28d7
e31e1c6fb13a685873bc875a77ef7c914d7171f6
refs/heads/main
2023-09-04T22:00:41.355446
2023-08-16T09:56:41
2023-08-16T09:56:41
412,212,574
2
0
null
null
null
null
UTF-8
R
false
false
138,274
r
module_desctools.R
#button( #input( #link( ##buttons( #' @export module_ui_desctools<-function(id){ ns<-NS(id) tagList( column(12,style="background: white", #inline( actionButton(ns("teste_comb"),"SAVE")), #uiOutput(ns("bug")), uiOutput(ns("upload_selection")), uiOutput(ns("panel_main")) ) ) } # Server #' @export module_server_desctools<-function (input,output,session,vals,df_colors,newcolhabs,df_symbol ){ ns<-session$ns pw_icon <- base64enc::dataURI(file = "inst/app/www/pwrda_icon.png", mime = "image/png") smw_icon <- base64enc::dataURI(file = "inst/app/www/smw_icon.png", mime = "image/png") observeEvent(ignoreInit = T,input$teste_comb,{ savereac() }) symbols<-c("pch1","pch2","pch3","pch4",'pch5','pch6','pch7',"pch8") df_symbol <- data.frame( val = c(16,15,17,18,8,1,5,3) ) for(i in 1:length(symbols)) { symbol1<-base64enc::dataURI(file = paste0('inst/app/www/pch',i,".png"), mime = "image/png") df_symbol$img[i]<- sprintf(paste0(img(src = symbol1, width = '10')))} box_y_cur<-reactiveValues(df=1) filter_box2_cur<-reactiveValues(df=1) filter_box1_cur<-reactiveValues(df=1) boxplot_X_cur<-reactiveValues(df=1) bag_smw<-reactiveValues(df=F) aggreg_reac<-reactiveValues(df=0) updp<-reactiveValues(df=F) updp0<-reactiveValues(df=F) getsolid_col<-reactive({ res<-lapply(vals$newcolhabs, function(x) x(2)) res1<-unlist(lapply(res, function(x) x[1]==x[2])) solid<-names(res1[res1==T]) pic<-which(vals$colors_img$val%in%solid) pic }) observeEvent(ignoreInit = T,input$desc_options,{ if(input$desc_options%in%c('tab_scatter','tab_segrda','tab_rda',"tab_omi")){ shinyjs::hide('data_descX') } if(!input$desc_options%in%c('tab_scatter','tab_segrda','tab_rda',"tab_omi")){ shinyjs::show('data_descX') } }) output$upload_selection<-renderUI({ validate(need(length(vals$saved_data)>0,"No Datalist found")) column(12, strong("Datalist:"), inline(pickerInput(ns("data_descX"),NULL,choices=names(vals$saved_data), selected=vals$cur_data, options=list(container="body","style-base" = "form-control", style = "")))) }) observeEvent(ignoreInit = T,input$desc_options,{ vals$cur_desc_options<-input$desc_options }) observe({ req(is.null(vals$cur_desc_options)) vals$cur_desc_options<-'tab1' }) observe({ req(input$desc_options) req(!is.null(vals$cur_desc_options)) req(!vals$cur_desc_options%in%input$desc_options) vals$cur_desc_options<-'tab1' }) output$dtab_boxplot<-renderUI({ column(12,uiOutput(ns("stats_cbox")), div( uiOutput(ns('boxplot_out')) )) }) output$dtab_corr<-renderUI({ div(sidebarLayout(sidebarPanel(fluidRow(class="map_control_style",style="color: #05668D",uiOutput(ns('corr_side')))),mainPanel(uiOutput(ns("corr_plot"))))) }) output$dtab_mds<-renderUI({ div( sidebarLayout( sidebarPanel( fluidRow( class="map_control_style", style="color: #05668D", uiOutput(ns('omds_dist')), uiOutput(ns('mds_options')) ) ), mainPanel( uiOutput(ns("stats_cmds")), uiOutput(ns("stats_pmds")) ) ) ) }) output$dtab_pca<-renderUI({ div( sidebarLayout( sidebarPanel( fluidRow( class="map_control_style", style="color: #05668D", uiOutput(ns('opca_biplot')), uiOutput(ns('pca_options_plot')), uiOutput(ns('pca_summary')) ) ), mainPanel( uiOutput(ns("stats_cpca")), tabsetPanel(id=ns('pca_options'),selected=vals$pca_options, tabPanel("Plot", value="pca_plot", uiOutput(ns("stats_ppca"))), tabPanel("Summary", value="pca_summary", inline(DT::dataTableOutput(ns("summary_pca")))) ) ) ) ) }) output$dtab_rda<-renderUI({ div( div(uiOutput(ns("stats_crda"))), sidebarLayout( sidebarPanel( fluidRow( class="map_control_style", style="color: #05668D", uiOutput(ns('orda_options')), uiOutput(ns('rda_options')) ) ), mainPanel( uiOutput(ns("stats_rda")) ) ) ) }) output$dtab_segrda<-renderUI({ div( div( style="background: white", p(strong("Segmented Redundancy Analysis")), span( inline( span(style="width: 150px", inline( pickerInput(ns("segrda_X"),span("Y Data", tiphelp("Predictors")), choices=names(vals$saved_data), selected=vals$cur_segrda_X)) ) ), inline( pickerInput(ns("segrda_Y"),span("~ X Data", tiphelp("Response data")), choices=names(vals$saved_data), selected=vals$cur_segrda_Y)) ) ), uiOutput(ns('segrda_panels')) ) }) output$panel_main<-renderUI({ validate(need(length(vals$saved_data)>0,"No Datalist found")) req(input$data_descX) column(12, tabsetPanel( id=ns('desc_options'),selected = vals$cur_desc_options, tabPanel('1. Summaries', value="tab1", uiOutput(ns('dtab_summaries'))), tabPanel('2. Boxplot', value="tab_box", uiOutput(ns("dtab_boxplot"))), tabPanel('3. Ridges', value="tab2", uiOutput(ns('dtab_rid'))), # tabPanel('Scatter',value="tab_scatter",uiOutput(ns('dtab_scatter'))), tabPanel( '4. Pair plot', value="tab_ggpair", uiOutput(ns('gg_pairs_panel')) ), # tabPanel('Histogram',value="tab_histo",uiOutput(ns("dtab_histogram"))), tabPanel('5. Correlation plot', value="tab_corr", uiOutput(ns("dtab_corr"))), tabPanel('6. MDS', value="tab_mds", uiOutput(ns("dtab_mds"))), tabPanel('7. PCA', value="tab_pca", uiOutput(ns("dtab_pca"))), tabPanel('8. RDA', value="tab_rda", uiOutput(ns("dtab_rda"))), tabPanel('9. segRDA', value="tab_segrda", uiOutput(ns("dtab_segrda"))) ) ) }) #### observeEvent(getdata_descX(),{ data=getdata_descX() vals$ggpair.variables<-colnames(data)[1:3] }) observeEvent(ignoreInit = T,input$gg_run,{ vals$ggpair.variables<-input$ggpair.variables }) output$gg_pairs_panel<-renderUI({ req(input$data_descX) data=getdata_descX() req(length(data)>0) if(is.null(vals$ggpair.variables)){ vals$ggpair.variables<-colnames(data)[1:3] } column(12, column(3,class="well3", div(class="map_control_style2",style="color: #05668D", tags$div(id="ggpicker1", pickerInput(ns("ggpair.variables"),span("+ Variables:",class='text_alert'),colnames(data), multiple = T,options=list(`actions-box` = TRUE), selected=vals$ggpair.variables) ), uiOutput(ns("side_msp")), uiOutput(ns("side_msp_pairs")) )), column(9, withSpinner(uiOutput(ns("msp_pairs")),8) )) }) output$side_msp<-renderUI({ div( pickerInput(inputId = ns("fm_palette"), label = '+ Palette', choices = vals$colors_img$val, choicesOpt = list(content =vals$colors_img$img), selected=vals$cm_palette, options=list(container="body")), numericInput(ns("msp_plot_width"), "+ Plot width",550), numericInput(ns("msp_plot_height"), "+ Plot height",400), numericInput(ns("msp_plot_base_size"),"+ Base size",12), ) }) output$side_msp_pairs <- renderUI({ req(input$msp_plot_base_size) if (is.null(vals$ggpair.box.include)) { vals$ggpair.box.include <- FALSE } div( div( div(strong("+ Panels:")), div(style = "margin-left: 20px; border-bottom: 1px solid; margin-bottom: 5px", uiOutput(ns("output_ggpair_upper")), uiOutput(ns("output_ggpair_lower")), uiOutput(ns("output_ggpair_diag")), div("+", inline(checkboxInput(ns("ggpair.box.include"), "Y boxplot", vals$ggpair.box.include))) ) ), div( uiOutput(ns("ggpair.y.variable")) ), uiOutput(ns("output_ggpair_method")), uiOutput(ns("output_ggpair_round")), uiOutput(ns("output_ggpair_switch")), uiOutput(ns("output_ggpair_varnames_size")), uiOutput(ns("output_ggpair_cor_size")), uiOutput(ns("output_ggpair_pch")), uiOutput(ns("output_ggpair_points_size")), uiOutput(ns("output_ggpair_legend_text_size")), uiOutput(ns("output_ggpair_legend_title_size")), uiOutput(ns("output_ggpair_alpha_curve")), uiOutput(ns("output_ggpair_title")), uiOutput(ns("output_ggpair_plot_title_size")), uiOutput(ns("output_ggpair_xlab")), uiOutput(ns("output_ggpair_ylab")), uiOutput(ns("output_ggpair_axis_text_size")), uiOutput(ns("output_ggpair_axis_title_size")), div(inline(uiOutput(ns("output_ggpair_title_corr"))), style = "width: 100%"), actionLink( ns("fm_downplot4"), span("+ Download plot 1", icon("fas fa-download"), icon("fas fa-image")), style = "button_active" ) ) }) output$output_ggpair_upper <- renderUI({ pickerInput(ns("ggpair.upper"), "+ Upper", choices = list( "Correlation" = "corr", "Corr + group" = "corr+group", "none" = "blank" ), selected = vals$ggpair.upper) }) output$output_ggpair_lower <- renderUI({ pickerInput(ns("ggpair.lower"), "+ Lower", choices = list( "Points" = "points", "Points + group" = "points+group", "none" = "blank" ), selected = vals$ggpair.lower) }) output$output_ggpair_diag <- renderUI({ pickerInput(ns("ggpair.diag"), "+ Diagonal", choices = list( "Density" = "density", "Density + group" = "density+group", "Hist" = "hist", "Hist+group" = "hist+group", "none" = "blank" ), selected = vals$ggpair.diag) }) output$output_ggpair_method <- renderUI({ pickerInput(ns("ggpair.method"), "+ Correlation method:", c("pearson", "kendall", "spearman", "none")) }) output$output_ggpair_round <- renderUI({ numericInput(ns("ggpair.round"), "+ Digits:", 3) }) output$output_ggpair_switch <- renderUI({ pickerInput(ns("ggpair.switch"), "+ Switch:", list("default" = NULL, "x" = "x", "y" = "y", "both" = "both")) }) output$output_ggpair_varnames_size <- renderUI({ req(input$msp_plot_base_size) bs<-round(input$msp_plot_base_size/12, 2) numericInput(ns("ggpair.varnames.size"), "+ Variable name size:", bs*1.4) }) output$output_ggpair_cor_size <- renderUI({ req(input$msp_plot_base_size) numericInput(ns("ggpair.cor.size"), "+ Corr size:", 2) }) output$output_ggpair_pch <- renderUI({ pickerInput(inputId = ns("ggpair.pch"), label = "+ Point shape", choices = df_symbol$val, choicesOpt = list(content = df_symbol$img), options = list(container = "body"), width = "100px", selected = vals$xyf_symbol) }) output$output_ggpair_points_size <- renderUI({ req(input$msp_plot_base_size) bs<-round(input$msp_plot_base_size/12, 2) numericInput(ns("ggpair.points.size"), "+ Points size", bs) }) output$output_ggpair_legend_text_size <- renderUI({ req(input$msp_plot_base_size) bs<-round(input$msp_plot_base_size/12, 2) numericInput(ns("ggpair.legend.text.size"), "+ legend.text.size:", bs) }) output$output_ggpair_legend_title_size <- renderUI({ req(input$msp_plot_base_size) bs<-round(input$msp_plot_base_size/12, 2) numericInput(ns("ggpair.legend.title.size"), "+ legend.title.size:", bs) }) output$output_ggpair_alpha_curve <- renderUI({ numericInput(ns("ggpair.alpha.curve"), "+ Curve transparency:", 0.8) }) output$output_ggpair_title <- renderUI({ textInput(ns("ggpair.title"), "+ Title:", "") }) output$output_ggpair_plot_title_size <- renderUI({ req(input$msp_plot_base_size) bs<-round(input$msp_plot_base_size/12, 2) numericInput(ns("ggpair.plot.title.size"), "+ Title size:", bs) }) output$output_ggpair_xlab <- renderUI({ textInput(ns("ggpair.xlab"), "+ xlab:", "") }) output$output_ggpair_ylab <- renderUI({ textInput(ns("ggpair.ylab"), "+ ylab:", "") }) output$output_ggpair_axis_text_size <- renderUI({ req(input$msp_plot_base_size) bs<-round(input$msp_plot_base_size/12, 2) numericInput(ns("ggpair.axis.text.size"), "+ Axis tick size:", bs) }) output$output_ggpair_axis_title_size <- renderUI({ bs<-round(input$msp_plot_base_size/12, 2) numericInput(ns("ggpair.axis.title.size"), "+ Axis label size:", bs) }) output$output_ggpair_title_corr <- renderUI({ div(inline(textInput(ns("ggpair.title_corr"), "+ Title corr:", "")), style = "width: 100%") }) output$ggpair.y.variable<-renderUI({ req(isTRUE(yclude_y())) req(input$data_descX) data0<-getdata_descX() req(length(data0)>0) data<-vals$saved_data[[input$data_descX]] req(length(data)>0) factors<-attr(data,"factors") div( style="border-bottom: 1px solid; margin-bottom: 5px; margin-left: 20px", pickerInput(ns("ggpair.y.variable"),strong("+ Y Variable:", class='text_alert'),colnames(factors), selected=vals$ggpair.y.variable, width="220px") ) }) observeEvent(input$ggpair.y.variable,{ vals$ggpair.y.variable<-input$ggpair.y.variable }) output$ggpair.title_corr<-renderUI({ req(input$ggpair.method) textInput(ns("ggpair.title_corr"),"+ title_corr:",paste(input$ggpair.method,"corr")) }) observeEvent(ignoreInit = T,input$fm_downplot4,{ vals$hand_plot<-"Pairs-plot" module_ui_figs("downfigs") mod_downcenter <- callModule(module_server_figs, "downfigs", vals=vals) }) output$gg_run_btn<-renderUI({ req(is.null(vals$gg_run)) div(class="save_changes", actionButton(ns('gg_run'), 'RUN', icon=icon("fas fa-sync")) ) }) observeEvent(ignoreInit = T,input$gg_run,{ vals$desc_pairplot<-get_ggpair() vals$gg_run<-F }) observeEvent(get_ggpair_args(),{ vals$gg_run<-NULL }) observeEvent(get_ggpair_args(),{ args<-get_ggpair_args() req( class(args)=="iggpair") vals$gg_run<-F vals$desc_pairplot<-get_ggpair() }, once=T) yclude_y<-reactive({ req(input$ggpair.lower) req(input$ggpair.upper) req(input$ggpair.diag) req(length(input$ggpair.box.include)>0) input$ggpair.lower=='points+group'|input$ggpair.upper=='corr+group'|input$ggpair.diag%in%c("density+group","hist+group")| isTRUE( input$ggpair.box.include) }) get_ggpair_args<-reactive({ args<-try(silent = T,{ req(input$data_descX) #input<-readRDS("input.rds") #vals<-readRDS("savepoint.rds") req(input$ggpair.variables) req(input$fm_palette) newdata<-getdata_descX() req(length(newdata)>0) req(input$ggpair.variables%in%colnames(newdata)) data<-newdata[,input$ggpair.variables] pred<-y<-NULL df=data cols<-vals$newcolhabs[[input$fm_palette]](1) my_cols<-cols if(yclude_y()){ req(input$data_descX) req(input$ggpair.y.variable) req(input$data_descX %in% names(vals$saved_data)) factors<-attr(vals$saved_data[[input$data_descX]],"factors") req(input$ggpair.y.variable %in% colnames(factors)) y<-pred<-factors[rownames(data),input$ggpair.y.variable, drop=F] #validate(need(nlevels(pred[,1])<=50)) df=cbind(data,pred) cols<-vals$newcolhabs[[input$fm_palette]](nlevels(pred[,1])) my_cols<-cols[pred[,1]] } include.y<-input$ggpair.box.include # library(GGally) size=input$msp_plot_base_size*.09 req(input$ggpair.method) args<-list(x=data,y=y, cols=cols, method=input$ggpair.method, round=input$ggpair.round, switch=input$ggpair.switch, plot.title.size=input$ggpair.plot.title.size, axis.text.size=input$ggpair.axis.text.size, axis.title.size=input$ggpair.axis.title.size, cor.size=input$ggpair.cor.size, varnames.size=input$ggpair.varnames.size, points.size=input$ggpair.points.size, legend.text.size=input$ggpair.legend.text.size, legend.title.size=input$ggpair.legend.title.size, alpha.curve=input$ggpair.alpha.curve, title=input$ggpair.title, xlab=input$ggpair.xlab, ylab=input$ggpair.ylab, title_corr=input$ggpair.title_corr, include.y=include.y, pch=as.numeric(input$ggpair.pch), upper=input$ggpair.upper, lower=input$ggpair.lower, diag=input$ggpair.diag ) # saveRDS(args,"args.rds") req( !any(sapply(args[-2],length)<1)) # attach(args) class(args)<-'iggpair' args }) req(class(args)=="iggpair") args }) get_ggpair<-reactive({ args<-get_ggpair_args() class(args)=="iggpair" p<-do.call(gg_pairplot2,args) p }) output$msp_pairs<-renderUI({ res<-div( uiOutput(ns("gg_run_btn")), renderPlot(vals$desc_pairplot, width=input$msp_plot_width,height=input$msp_plot_height), em(attr(vals$desc_pairplot,"row1"), style="color: gray") ) vals$show_ggrun<-F res }) ### output$corr_cutoff<-renderUI({ req(input$cutoff_hl!="all") if(is.null(vals$cor_cutoff)){vals$cor_cutoff<-0.9} numericInput(ns("cor_cutoff"),NULL,value = vals$cor_cutoff,min = 0.1,max = 1,step = .1, width='100px') }) observeEvent(ignoreInit = T,input$cor_cutoff,{ vals$cor_cutoff<-input$cor_cutoff }) output$corr_side<-renderUI({ cor_method = c("pearson", "kendall", "spearman") cor_use=c( "complete.obs","everything", "all.obs", "na.or.complete", "pairwise.complete.obs") cor_dendogram = c("both","row","column","none") cor_scale = c("none","row", "column") cor_Rowv = c('TRUE','FALSE') cor_Colv=c('Rowv',T,F) cor_revC=c('TRUE','FALSE') cor_na.rm=c('TRUE','FALSE') cor_labRow=c('TRUE','FALSE') cor_labCol=c('TRUE','FALSE') cor_cellnote=c('TRUE','FALSE') cor_density.info=c("histogram","density","none") div( div(pickerInput(ns("cor_method"), span("+ Corr method",tiphelp("correlation coefficient to be computed")), choices=cor_method)), div(pickerInput(ns("cor_use"), span("+ Use",tiphelp("method for computing covariances in the presence of missing values")), choices=cor_use)), div( span(strong("+ Filter correlations:"),tiphelp("ifThe pair-wise absolute correlation cutoff "),inline(pickerInput(ns('cutoff_hl'), NULL, choices=list( "All"="all", "lower than"="lower", "higher than"="higher" ), width="100px")), inline(uiOutput(ns("corr_cutoff")))) ), div(pickerInput(ns("cor_dendogram"), span("+ Dendogram",tiphelp("indicating whether to draw 'none', 'row', 'column' or 'both' dendrograms")), choices=cor_dendogram)), div(pickerInput(ns("cor_scale"), span("+ Scale",tiphelp("indicating if the values should be centered and scaled in either the row direction or the column direction, or none. ")), choices=cor_scale)), div(pickerInput(ns("cor_Rowv"), span("+ Rowv",tiphelp("If is TRUE, which implies dendrogram is computed and reordered based on row means")), choices=cor_Rowv)), div(pickerInput(ns("cor_Colv"), span("+ Colv",tiphelp(" Colv='Rowv' means that columns should be treated identically to the rows.If is TRUE, which implies dendrogram is computed and reordered based on cols means")), choices=cor_Colv)), div(pickerInput(ns("cor_revC"), span("+ revC",tiphelp("Indicating if the column order should be reversed for plotting")), choices=cor_revC)), div(pickerInput(ns("cor_na.rm"), span("+ na.rm",tiphelp("indicating whether NAs should be removed")), choices=cor_na.rm)), div(pickerInput(ns("cor_labRow"), span("+ labRow",tiphelp("show observation labels")), choices=cor_labRow)), div(pickerInput(ns("cor_labCol"), span("+ labCol",tiphelp("show variable labels")), choices=cor_labCol)), div(pickerInput(ns("cor_density.info"), span("+ density.info",tiphelp("indicating whether to superimpose a 'histogram', a 'density' plot, or no plot ('none') on the color-key.")), choices=cor_density.info)), div(class="palette",span("+ Palette:",inline( pickerInput(inputId=ns("cor_palette"), label = NULL, choices = vals$colors_img$val, choicesOpt = list(content = vals$colors_img$img), width="120px", selected=vals$cor_palette) ))), div(class="palette",span(span("+ NA color:",tiphelp("Color to use for missing value")),inline( pickerInput(inputId=ns("cor_na.color"), label = NULL, choices = vals$colors_img$val[getsolid_col()], choicesOpt = list(content = vals$colors_img$img[getsolid_col()]), selected="gray", width="75px") ))), div( span(span("+ X margin"), inline(numericInput(ns("cor_mar_row"),NULL,value = 5,min = 0,step = 1, width='100px') )) ), div( span(span("+ Y margin"), inline(numericInput(ns("cor_mar_col"),NULL,value = 5,min = 0,step = 1, width='100px') )) ), div( span(span("+ sep row width:",tiphelp("space between rows")), inline(numericInput(ns("cor_sepwidth_a"),NULL,value = 0.05,min = 0.1,max = 1,step = .01, width='100px') )) ), div( span(span("+ sep col width:",tiphelp("space between columns")), inline(numericInput(ns("cor_sepwidth_b"),NULL,value = 0.05,min = 0.1,max = 1,step = .01, width='100px') )) ), div(class="palette",span(span("+ Sep color:",tiphelp("color between rows and coluns")),inline( pickerInput(inputId=ns("cor_sepcolor"), label = NULL, choices = vals$colors_img$val[getsolid_col()], choicesOpt = list(content = vals$colors_img$img[getsolid_col()]), selected="white", width="75px") ))), hr(), pickerInput(ns("cor_cellnote"), span("+ Cell note",tiphelp("Show correlation value")), choices=cor_cellnote), div(class="palette", span(span("+ Note color:",tiphelp("Color of the correlation value")),inline( pickerInput(inputId=ns("cor_noteco"), label = NULL, choices = vals$colors_img$val[getsolid_col()], choicesOpt = list(content = vals$colors_img$img[getsolid_col()]), selected="black", width="75px") ))), div( span(span("+ Note size:", tiphelp("Size of the correlation value")), inline(numericInput(ns("cor_notecex"),NULL,value = 1,step=0.1, width='100px') )) ), div( actionLink(ns('corr_downp'),"+ Download plot", style="button_active") ), div( actionLink(ns('corr_down_results'),span("+ Download Results",icon("fas fa-table")), style="button_active") ) ) }) observeEvent(ignoreInit = T,input$corr_down_results,{ vals$hand_down<-"Corr result" module_ui_downcenter("downcenter") mod_downcenter <- callModule(module_server_downcenter, "downcenter", vals=vals) }) get_corrdata<-reactive({ req(input$cor_method) args<-list(data=getdata_descX(),cor_method=input$cor_method,cor_cutoff=input$cor_cutoff,cor_use=input$cor_use,ret=input$cutoff_hl) # saveRDS(args,"args.rds") #args<-readRDS("args.rds") # attach(args) cordata<-do.call(cordata_filter,args) cordata }) output$corr_plot<-renderUI({ cordata=get_corrdata() vals$corr_results<-cordata args<-list(cordata=cordata, newcolhabs=vals$newcolhabs, cor_palette=input$cor_palette, cor_sepwidth_a=input$cor_sepwidth_a, cor_sepwidth_b=input$cor_sepwidth_b, cor_notecex=input$cor_notecex, cor_noteco=input$cor_noteco, cor_na.color=input$cor_na.color, cor_sepcolor=input$cor_sepcolor, cor_dendogram=input$cor_dendogram, cor_scale=input$cor_scale, cor_Rowv=input$cor_Rowv, cor_Colv=input$cor_Colv, cor_revC=input$cor_revC, cor_na.rm=input$cor_na.rm, cor_labRow=input$cor_labRow, cor_labCol=input$cor_labCol, cor_cellnote=input$cor_cellnote, cor_density.info=input$cor_density.info, margins = c(input$cor_mar_row, input$cor_mar_col)) req(!any(unlist(lapply(args,is.null)))) # saveRDS(args,"arg_heat.rds") #args<-readRDS("args.rds") renderPlot({ do.call(i_corplot,args) vals$plot_correlation<-recordPlot() }) }) observeEvent(ignoreInit = T,input$corr_downp,{ vals$hand_plot<-"Correlation Plot" module_ui_figs("downfigs") mod_downcenter<-callModule(module_server_figs, "downfigs", vals=vals) }) getmissing<-reactive({ vals<-readRDS("savepoint.rds") data<-vals$saved_data$zeu req(is.data.frame(vals$saved_data[[input$data_descX]])) data=vals$saved_data[[input$data_descX]] image(as.matrix(data)) res0<-res<-which(is.na(data), arr.ind=TRUE) if(length(res0)>0){ for(i in 1:nrow(res)){ res0[i,1]<-rownames(data)[res[i,1]] res0[i,2]<-colnames(data)[res[i,2]] } colnames(res0)<-c("ID","Variable") rownames(res0)<-NULL res<-data.frame( table(res0[,2])) colnames(res)<-c("Variable","Missing") rownames(res)<-res[,1] pic<-colnames(vals$saved_data[[input$data_descX]])[which(colnames(vals$saved_data[[input$data_descX]])%in%res[,1])] res[,1]<-NULL if(length(pic)>0) res[pic,, drop=F] } }) get_dataord<-reactive({ req(input$missing_reorder!="N missing") data=vals$saved_data[[input$data_descX]] dataord<-if(input$missing_reorder=="Factor"){ attr(data,"factors") } else{data} dataord }) observeEvent(ignoreInit = T,input$missing_id1,{ vals$missing_id1<-input$missing_id1 }) observeEvent(ignoreInit = T,input$missing_id2,{ vals$missing_id2<-input$missing_id2 }) observeEvent(ignoreInit = T,input$missing_var1,{ vals$missing_var1<-input$missing_var1 }) observeEvent(ignoreInit = T,input$missing_var2,{ vals$missing_var2<-input$missing_var2 }) observeEvent(ignoreInit = T,input$missing_reorder,{ vals$missing_reorder<-input$missing_reorder }) observeEvent(ignoreInit = T,input$missing_ord,{ vals$missing_ord<-input$missing_ord }) output$missing_data<-renderUI({ sidebarLayout( sidebarPanel(uiOutput(ns('missing_side'))), mainPanel(uiOutput(ns('missing_plot'))) ) }) output$missing_side<-renderUI({ data=vals$saved_data[[input$data_descX]] if(is.null(vals$missing_id1)){ ob1<-rownames(data)[c(1,nrow(data))] va1<-colnames(data)[c(1,ncol(data))] vals$missing_id1<-ob1[1] vals$missing_id2<-ob1[2] vals$missing_var1<-va1[1] vals$missing_var2<-va1[2] } div(class="map_control_style",style="color: #05668D", div("Row:", inline(pickerInput(ns("missing_id1"), NULL,rownames(data), selected=vals$missing_id1, width="100px")), strong("to"),inline( pickerInput(ns("missing_id2"), NULL,rownames(data), selected=vals$missing_id2, width="100px") ) ), div("Col:", inline(pickerInput(ns("missing_var1"), NULL,colnames(data), selected=vals$missing_var1, width="100px")), strong("to"),inline( pickerInput(ns("missing_var2"), NULL,colnames(data), selected=vals$missing_var2, width="100px") ) ), div("Reorder", inline(pickerInput(ns("missing_reorder"), NULL,c( "N missing","Variable","Factor" ), selected=vals$missing_reorder, width="100px")), inline(uiOutput(ns("missing_ord"))) ), div("+ Palette", pickerInput(inputId=ns("missing_palette"),label = NULL,choices = vals$colors_img$val,choicesOpt = list(content = vals$colors_img$img),options=list(container="body"),selected=vals$colors_img$val[1], width='120px') ), div( actionLink(ns('split_missing'),"+ Split into missing and non-missing", style="button_active") ), div( actionLink(ns('missing_downp'),"+ Download plot", style="button_active") ), actionButton(ns("save_teste"),"SAVE") ) }) observeEvent(ignoreInit = T,input$split_missing,{ #vals<-readRDS("savepoint.rds") #input<-readRDS('input.rds') data=vals$saved_data[[input$data_descX]] req(any(is.na(data))) factors<-attr(data,"factors") coords<-attr(data,"coords") colmiss<-getcol_missing(data)[,1] comi<-which(colnames(data)%in%as.character(colmiss)) romi<-which(rownames(data)%in%as.character(getrow_missing(data)[,1])) ylist<-list() for(i in 1:length(comi)){ romipic<-which(is.na(data[,comi[i]])) X=data[-romipic,-comi, drop=F] attr(X,"factors")<-factors[rownames(X),] attr(X,"coords")<-coords[rownames(X),] Y=data[-romipic,comi[i], drop=F] fac<-factors[rownames(Y),] n_sample<-round(nrow(Y)*20/100) part<-sample(1:nrow(Y),n_sample) name0<-paste0("Partition_",colnames(Y)) name1<-make.unique(c(colnames(factors),name0), sep="_") name_part<-name1[ncol(factors)+1] fac[name_part]<-NA fac[rownames(Y)[as.vector(part)] ,name_part]<-"test" fac[rownames(Y)[-as.vector(part)] ,name_part]<-"training" fac[name_part]<-factor(fac[,name_part]) attr(Y,"factors")<-fac attr(Y,"coords")<-coords[rownames(Y),] ylist[[i]]<-Y newdata=data[romipic,-comi, drop=F] attr(newdata,"factors")<-factors[rownames(newdata),] attr(newdata,"coords")<-coords[rownames(newdata),] name0<-paste0(input$data_descX,"_COMP_X_to_", colnames(Y)) name1<-make.unique(c(names(vals$saved_data),name0), sep="_") namemissing<-name1[length(vals$saved_data)+1] vals$saved_data[[namemissing]]<-X #name0<-paste0(input$data_descX,"_COMP_Y_", colnames(Y)) # name1<-make.unique(c(names(vals$saved_data),name0), sep="_") #namemissing<-name1[length(vals$saved_data)+1] #vals$saved_data[[namemissing]]<-Y name0<-paste0(input$data_descX,"_MISS_newX_to_",colnames(Y)) name1<-make.unique(c(names(vals$saved_data),name0), sep="_") namemissing<-name1[length(vals$saved_data)+1] vals$saved_data[[namemissing]]<-newdata } datY<-mergedatacol(ylist) name0<-paste0(input$data_descX,"_COMP_Y_") name1<-make.unique(c(names(vals$saved_data),name0), sep="_") namemissing<-name1[length(vals$saved_data)+1] vals$saved_data[[namemissing]]<-datY }) observeEvent(ignoreInit = T,input$missing_downp,{ vals$hand_plot<-"Missing plot" module_ui_figs("downfigs") mod_downcenter<-callModule(module_server_figs, "downfigs", vals=vals) }) output$missing_ord<-renderUI({ choices<-colnames(get_dataord()) pickerInput(ns("missing_ord"), NULL,choices, selected=vals$missing_ord, width="100px") }) output$missing_plot<-renderUI({ req(input$missing_reorder) data=vals$saved_data[[input$data_descX]] ob1<-c(input$missing_id1,input$missing_id2) obs<-which(rownames(data)%in%ob1) va1<-c(input$missing_var1,input$missing_var2) var<-which(colnames(data)%in%va1) pic_var<-seq(var[1],var[2]) pic_obs<-seq(obs[1],obs[2]) data<-data[pic_obs,pic_var] renderPlot({ df<-data.frame(data) df$nmissing<-apply(data,1,function(x) sum(is.na(x))) if(input$missing_reorder=='N missing'){ a<-reshape2::melt(data.frame(id=rownames(df),df), c("id","nmissing")) p<-ggplot(a,aes(reorder(variable,nmissing),reorder(id,nmissing)))+ geom_tile(aes(fill=value), color="black")+scale_fill_gradientn(colours= vals$newcolhabs[[input$missing_palette]](100),na.value="black") } else { df<-data.frame(data) df$nmissing<-apply(data,1,function(x) sum(is.na(x))) req(input$missing_reorder) dataord<-get_dataord() ordvar<-dataord[,input$missing_ord] df$ordvar<-ordvar a<-reshape2::melt(data.frame(id=rownames(df),df), c("id","nmissing","ordvar")) p<-ggplot(a,aes(reorder(variable,ordvar),reorder(id,ordvar)))+ geom_tile(aes(fill=value), color="black")+scale_fill_gradientn(colours= vals$newcolhabs[[input$missing_palette]](100),na.value="black") } p<-p+theme(axis.text.x = element_text(angle = 45, hjust = 1))+xlab("Variables")+ylab("Observations") vals$missing_plot<-p vals$missing_plot }) }) observeEvent(ignoreInit = T,input$save_teste,{ saveRDS(reactiveValuesToList(vals),"savepoint.rds") saveRDS(reactiveValuesToList(input),"input.rds") beep() #vals<-readRDS("vals.rds") #input<-readRDS('input.rds') }) output$cor_downplot<-renderUI({ req(!is.null(vals$corplot)) div( actionLink(ns('cordown'),"+ Download plot", style="button_active") ) }) observeEvent(ignoreInit = T,input$data_descX,{ vals$corplot<-NULL }) observeEvent(ignoreInit = T,input$cordown,{ vals$hand_plot<-"Corr plot" module_ui_figs("downfigs") mod_downcenter<-callModule(module_server_figs, "downfigs", vals=vals) }) pic_pca_results<-reactive({ req(input$show_pca_results) switch (input$show_pca_results, 'Standard deviations' = 'sdev', 'Rotation'='rotation', 'Centering'='center', 'Scaling'='scale', 'Scores'='x', 'Importance'='importance', ) }) output$pca_summary<-renderUI({ req(input$pca_options=="pca_summary") div( tags$div( pickerInput(ns("show_pca_results"),"Show result:", c('Importance','Scores',"Standard deviations","Rotation","Centering","Scaling")),style="width: var(--parentHeight);" ), actionLink( ns('down_pca_results'),span("+ Download",icon("fas fa-table"))) ) }) observeEvent(ignoreInit = T,input$down_pca_results,{ vals$hand_down<-"PCA result" module_ui_downcenter("downcenter") mod_downcenter <- callModule(module_server_downcenter, "downcenter", vals=vals) }) observeEvent(ignoreInit = T,input$pca_options,{ vals$pca_options<-input$pca_options }) observeEvent(ignoreInit = T,input$scatter_y_datalist,{ vals$scatter_y_datalist<-input$scatter_y_datalist }) output$scatter_y_datalist<-renderUI({ pickerInput(ns("scatter_y_datalist"),'Datalist Y', choices=names(vals$saved_data), selected=vals$scatter_y_datalist) }) observeEvent(ignoreInit = T,input$scatter_x_datalist,{ vals$scatter_x_datalist<-input$scatter_x_datalist }) output$scatter_x_datalist<-renderUI({ pickerInput(ns("scatter_x_datalist"),'Datalist X', choices=names(vals$saved_data), selected=vals$scatter_x_datalist) }) output$dtab_scatter<-renderUI({ div(style="background: white", p(strong("Scatter plot")), sidebarLayout( sidebarPanel( div( class="map_control_style", style="color: #05668D", div(inline(uiOutput(ns("scatter_x_datalist"))), inline(uiOutput(ns("scatter_x")))), div( inline(uiOutput(ns("scatter_y_datalist"))), inline(uiOutput(ns("scatter_y_input"))) ), uiOutput(ns('scatter_side')) ) ), mainPanel(uiOutput(ns("scatter_plot")))) ) }) output$scatter_side<-renderUI({ req(input$scatter_x) req(input$scatter_y) div( div(span("+ Shape:", inline(pickerInput(inputId=ns("scatter_symbol"), label = NULL, choices = df_symbol$val, options=list(container="body"), choicesOpt = list(content = df_symbol$img), width='75px')))), div( span("+ Size:", inline(numericInput(ns("scatter_cexpoint"),NULL,value = 1,min = 0.1,max = 3,step = .1, width='75px') )) ), div(span('+ X label:', inline( textInput(ns("scatter_xlab"),NULL , value=input$scatter_x, width="120px") ) )), div(span('+ Y label:', inline( textInput(ns("scatter_ylab"),NULL , value=input$scatter_y, width="120px")) )), div( actionLink(ns('scatter_downplot'),"+ Download plot", style="button_active") ) ) }) observeEvent(ignoreInit = T,input$scatter_downplot,{ vals$hand_plot<-"Scatter plot" module_ui_figs("downfigs") mod_downcenter<-callModule(module_server_figs, "downfigs", vals=vals) }) #vals<-readRDS("savepoint.rds") # vals$saved_data$ID_tempo_GF2$Ano== #vals$saved_data$ID_Variรกveis_GF2$Ano output$scatter_plot<-renderUI({ datax<-vals$saved_data[[input$scatter_x_datalist]] datay<-vals$saved_data[[input$scatter_y_datalist]] dataxy<-data.frame(datax[input$scatter_x],datay[input$scatter_y]) renderPlot({ plot(dataxy, pch=as.numeric(input$scatter_symbol), cex=input$scatter_cexpoint, xlab=input$scatter_xlab, ylab=input$scatter_ylab) vals$scatter_plot<-recordPlot() }) }) output$scatter_y_input<-renderUI({ req(input$scatter_y_datalist) data<-vals$saved_data[[input$scatter_y_datalist]] div( pickerInput( ns("scatter_y"),'Y',choices =colnames(data),selected= vals$scatter_y, width="200px" )) }) output$scatter_x<-renderUI({ req(input$scatter_x_datalist) data<-vals$saved_data[[input$scatter_x_datalist]] div( pickerInput( ns("scatter_x"),"X",choices = colnames(data),selected= vals$scatter_x, width="200px" )) }) observeEvent(ignoreInit = T,input$scatter_x,{ vals$scatter_x<-input$scatter_x }) observeEvent(ignoreInit = T,input$scatter_y,{ vals$scatter_y<-input$scatter_y }) output$bug<-renderUI({ renderPrint({ input$box_linecol }) }) observeEvent(input$box_reset,{ shinyjs::reset("side_boxplot") }) output$side_boxplot<-renderUI({ div(class="map_control_style2",style="color: #05668D", uiOutput(ns('sidebox_horiz')), uiOutput(ns('sidebox_theme')), uiOutput(ns('sidebox_palette')), uiOutput(ns('sidebox_alpha')), uiOutput(ns('sidebox_linecol')), uiOutput(ns('sidebox_linewidth')), uiOutput(ns('sidebox_base_size')), uiOutput(ns('sidebox_cex.main')), uiOutput(ns('sidebox_cex.label_panel')), uiOutput(ns('sidebox_cex.lab')), uiOutput(ns('sidebox_cex.axes')), uiOutput(ns('sidebox_title')), uiOutput(ns('sidebox_xlab')), uiOutput(ns('sidebox_xlab_rotate')), uiOutput(ns('sidebox_ylab')), uiOutput(ns('sidebox_ylab_rotate')), uiOutput(ns('sidebox_grid')), uiOutput(ns('sidebox_varwidth')), uiOutput(ns('sidebox_violin')), uiOutput(ns('sidebox_width')), uiOutput(ns('sidebox_height')) ) }) output$sidebox_width<-renderUI({ numericInput(ns('box_width'),"+ Plot widht:",550, step=50) }) output$sidebox_height<-renderUI({ numericInput(ns('box_heigth'),"+ Plot heigth:",400, step=50) }) output$sidebox_xlab_rotate<-renderUI({ numericInput(ns('box_xlab_rotate'),"+ x text angle:", 0,step=5) }) output$sidebox_ylab_rotate<-renderUI({ numericInput(ns('box_ylab_rotate'),"+ y text angle:", 0,step=5) }) output$sidebox_theme<-renderUI({ pickerInput(ns("box_theme"),"+ Theme:",c('theme_bw','theme_grey','theme_linedraw','theme_light','theme_minimal','theme_classic')) }) output$sidebox_horiz<-renderUI({ checkboxInput(ns('box_horiz'),"+ Horizontal:",value=F) }) output$sidebox_palette<-renderUI({ pickerInput(ns("box_palette"), label = "+ Palette:", choices = vals$colors_img$val, choicesOpt = list(content = vals$colors_img$img)) }) output$sidebox_alpha<-renderUI({ numericInput(ns('box_alpha'),"+ Lighten:", .3, step=0.05) }) output$sidebox_linecol<-renderUI({ div( colourpicker::colourInput(ns("box_linecol"), label = "+ Line color:", value ="black",showColour="background") ) }) output$sidebox_linewidth<-renderUI({ numericInput(ns('box_linewidth'),"+ Line width:", .5,step=.1) }) output$sidebox_base_size<-renderUI({ numericInput(ns('box_base_size'),"+ Base size:", 12,step=1) }) output$sidebox_cex.axes<-renderUI({ numericInput(ns('box_cex.axes'),"+ Axis size:", 1.5,step=.1) }) output$sidebox_cex.lab<-renderUI({ numericInput(ns('box_cex.lab'),"+ Label size:", 1.5,step=.1) }) output$sidebox_cex.main<-renderUI({ numericInput(ns('box_cex.main'),"+ Title size:", 1.5,step=.1) }) output$sidebox_cex.label_panel<-renderUI({ req(input$box_y) req(length(input$box_y)>1) numericInput(ns('box_cex.label_panel'),"+ Panel Title Size:", 1.5,step=.1) }) output$sidebox_title<-renderUI({ req(input$boxplot_X) req(input$box_y) value=ifelse(length(input$box_y)>1,"",paste(input$box_y,"~",input$boxplot_X)) textInput(ns('box_title'),"+ Title:",value) }) output$sidebox_xlab<-renderUI({ req(input$boxplot_X) req(input$box_y) value=ifelse(length(input$box_y)>1,"",input$boxplot_X) textInput(ns('box_xlab'),"+ x label:",value) }) output$sidebox_ylab<-renderUI({ req(input$box_y) value=ifelse(length(input$box_y)>1,"Value",input$box_y) textInput(ns('box_ylab'),"+ y label:",value) }) output$sidebox_grid<-renderUI({ checkboxInput(ns('box_grid'),"+ Grid lines:",value=T) }) output$sidebox_violin<-renderUI({ checkboxInput(ns('box_violin'),"+ Violin:",value=F) }) output$sidebox_varwidth<-renderUI({ checkboxInput(ns("box_varwidth"),span("+ Varwidth:",tiphelp("Drawn boxes with widths proportional to the square-roots of the number of observations in the groups","right")),F, width="95px") }) output$stats_pbox<-renderUI({ req(input$filter_box1) if(input$filter_box1 != "none"){ req(input$filter_box2) } req(input$box_width) req(input$box_width>10) req(input$box_heigth) req(input$box_heigth>10) div( column(12,renderPlot({ res<-getbox() req(length(res)>0) violin<-input$box_violin horiz=input$box_horiz base_size=input$box_base_size cex.axes=input$box_cex.axes*base_size cex.lab=input$box_cex.lab*base_size cex.main=input$box_cex.main*base_size pal<-input$box_palette box_alpha=input$box_alpha main=input$box_title xlab<-input$box_xlab ylab<-input$box_ylab box_linecol=input$box_linecol varwidth=input$box_varwidth if(length(input$box_cex.label_panel)>0){ cex.label_panel=input$box_cex.label_panel*base_size } else{ cex.label_panel=1 } linewidth=input$box_linewidth theme=input$box_theme grid=input$box_grid xlab_rotate=input$box_xlab_rotate ylab_rotate=input$box_ylab_rotate vals$pbox_plot<-ggbox( res, pal,violin,horiz,base_size,cex.axes,cex.lab,cex.main, xlab, ylab,main,box_linecol ,box_alpha,vals$newcolhabs,cex.label_panel,varwidth=varwidth,linewidth=linewidth,theme=theme, grid=grid,xlab_rotate=xlab_rotate,ylab_rotate=ylab_rotate ) vals$pbox_plot }, width =input$box_width, height =input$box_heigth )) ) }) output$editbox<-renderUI({ fluidRow(class="map_control_style",style="color: #05668D", column(12,style="border-top: 1px solid #05668D;", fluidRow( actionLink( ns("downp_box"),span("+ Download",icon("fas fa-download")), style="button_active" ) )) ) }) output$omds_dist<-renderUI({ if(is.null(vals$cur_dist_mds)){vals$cur_dist_mds="Choose one"} div( span("+ Distance:", inline( pickerInput(ns("distance"),NULL,choices = c("Choose one" = "", c('bray', "euclidean", 'jaccard')), selected=vals$cur_dist_mds, width="125px") ) ) ) }) observeEvent(ignoreInit = T,input$distance,{ vals$cur_dist_mds<-input$distance }) output$opca_biplot<-renderUI({ req(input$pca_options=="pca_plot") div( span("+", inline( checkboxInput(ns("biplot"), span("Biplot", pophelp(NULL,"show biplot arrows")), T, width="75px") ) ) ) }) output$mds_options<-renderUI({ req(input$distance %in%c("bray","euclidean","jaccard")) div( div( span("+", inline(checkboxInput(ns("mds_show_symbols"),"Symbol" ,T, width='75px')))), uiOutput(ns("mds_show_symbols_out")), div(span("+", inline(checkboxInput(ns("mds_show_labels"),"Labels",F) ))), uiOutput(ns("mds_show_labels_out")), div( actionLink( ns('mds_downp'),span("+ Download",icon("fas fa-download")), style="button_active" ) ) ) }) output$mds_show_labels_out<-renderUI({ req(isTRUE(input$mds_show_labels )) div(style="margin-left: 5px", div(span("+ Factor:", inline(tipify(pickerInput(ns("mds_labfactor"),NULL,choices = colnames(attr(getdata_descX(),"factors")), width="125px"), "label classification factor") ))), div(span("+ Lab Color:", inline(tipify( pickerInput( inputId=ns("mds_labcolor"), label = NULL, selected= vals$colors_img$val[12],choices = vals$colors_img$val,choicesOpt = list(content = vals$colors_img$img),width="75px", options=list(container="body") ), "label classification factor" ) ))), div(span("+ Lab adj:", inline( tipify(pickerInput(ns("mds_labadj"),NULL,choices=c(1:4), width="75px", options=list(containder="body")), "a position specifier for the text. If specified this overrides any adj value given. Values of 1, 2, 3 and 4, respectively indicate positions below, to the left of, above and to the right of the specified (x,y) coordinates.", placement = "right") ))), div(span("+ Lab offset:", inline( tipify(numericInput(ns("mds_offset"),NULL,value = 0,step = .1, width="75px"), "this value controls the distance ('offset') of the text label from the specified coordinate in fractions of a character width.") ))), div(span("+ Size:", numericInput(ns("mds_cextext"),NULL,value = 1,min = 0.1,max = 3,step = .1))) ) }) output$mds_show_symbols_out<-renderUI({ req(isTRUE(input$mds_show_symbols)) div(style="margin-left: 5px", div( span("+ Shape:", inline(pickerInput(inputId=ns("mds_symbol"), label = NULL, choices = df_symbol$val, options=list(container="body"), choicesOpt = list(content = df_symbol$img), width='75px')))), div( span("+ Size:", inline(numericInput(ns("mds_cexpoint"),NULL,value = 1,min = 0.1,max = 3,step = .1, width='75px') )) ), div(class="palette", span("+ Color:", inline( tipify( pickerInput(inputId=ns("mds_colpalette"),label = NULL,choices = vals$colors_img$val,choicesOpt = list(content = vals$colors_img$img),options=list(container="body"),selected=vals$colors_img$val[1], width='120px'), "Symbol palette. Choose a gradient to color observations by a factor")))), uiOutput(ns("mds_fac_palette")) ) }) output$pca_show_labels_out<-renderUI({ req(isTRUE(input$pca_show_labels)) div(style="margin-left: 5px", div(span("+ Factor:", inline(tipify(pickerInput(ns("pca_labfactor"),NULL,choices = colnames(attr(getdata_descX(),"factors")), width="125px"), "label classification factor") ))), div(span("+ Lab Color:", inline(tipify( pickerInput( inputId=ns("pca_labcolor"), label = NULL, selected= vals$colors_img$val[12],choices = vals$colors_img$val,choicesOpt = list(content = vals$colors_img$img),width="75px", options=list(container="body") ), "label classification factor" ) ))), div(span("+ Lab adj:", inline( tipify(pickerInput(ns("pca_labadj"),NULL,choices=c(1:4), width="75px", options=list(containder="body")), "a position specifier for the text. If specified this overrides any adj value given. Values of 1, 2, 3 and 4, respectively indicate positions below, to the left of, above and to the right of the specified (x,y) coordinates.", placement = "right") ))), div(span("+ Lab offset:", inline( tipify(numericInput(ns("pca_offset"),NULL,value = 0,step = .1, width="75px"), "this value controls the distance ('offset') of the text label from the specified coordinate in fractions of a character width.") ))), div(span("+ Size:", inline( tipify(numericInput(ns("pca_cextext"),NULL,value = 1,min = 0.1,max = 3,step = .1), "label text size") ))) ) }) output$mds_fac_palette<-renderUI({ col<-getcolhabs(vals$newcolhabs,input$mds_colpalette,2) req(col[1]!=col[2]) div( span("+ Factor:", inline(tipify(pickerInput(ns("mds_symbol_factor"),NULL,choices = rev(colnames(attr(getdata_descX(),"factors"))), width='125px'), "symbol classification factor")))) }) output$pca_options_plot<-renderUI({ req(input$pca_options=="pca_plot") div( div( span("+", inline(checkboxInput(ns("pca_show_symbols"),"Symbol" ,T, width='75px')))), uiOutput(ns("pca_show_symbols_out")), div(span("+", inline(checkboxInput(ns("pca_show_labels"),"Labels",F) ))), uiOutput(ns("pca_show_labels_out")), div( actionLink( ns('pca_downp'),span("+ Download",icon("fas fa-download")), style="button_active" ) ) ) }) output$pca_show_symbols_out<-renderUI({ req(isTRUE(input$pca_show_symbols)) div(style="margin-left: 5px", div( span("+ Shape:", inline(pickerInput(inputId=ns("pca_symbol"), label = NULL, choices = df_symbol$val, options=list(container="body"), choicesOpt = list(content = df_symbol$img), width='75px')))), div( span("+ Size:", inline(numericInput(ns("pca_cexpoint"),NULL,value = 1,min = 0.1,max = 3,step = .1, width='75px') )) ), div(class="palette", span("+ Color:", inline( tipify( pickerInput(inputId=ns("pca_colpalette"),label = NULL,choices = vals$colors_img$val,choicesOpt = list(content = vals$colors_img$img),options=list(container="body"),selected=vals$colors_img$val[1], width='120px'), "Symbol palette. Choose a gradient to color observations by a factor")))), uiOutput(ns("pca_fac_palette")) ) }) output$pca_fac_palette<-renderUI({ col<-getcolhabs(vals$newcolhabs,input$pca_colpalette,2) req(col[1]!=col[2]) div( span("+ Factor:", inline(tipify(pickerInput(ns("pca_symbol_factor"),NULL,choices = rev(colnames(attr(getdata_descX(),"factors"))), width='125px'), "symbol classification factor")))) }) observeEvent(ignoreInit = T,input$downp_summ_num,{ vals$hand_plot<-"variable summary" module_ui_figs("downfigs") mod_downcenter<-callModule(module_server_figs, "downfigs", vals=vals) }) observeEvent(ignoreInit = T,input$downp_stats_fac,{ vals$hand_plot<-"factor summary" module_ui_figs("downfigs") mod_downcenter<-callModule(module_server_figs, "downfigs", vals=vals) }) observeEvent(ignoreInit = T,input$downp_hist,{ vals$hand_plot<-"histogram" module_ui_figs("downfigs") mod_downcenter<-callModule(module_server_figs, "downfigs", vals=vals) }) observeEvent(ignoreInit = T,input$mds_downp,{ vals$hand_plot<-"mds" module_ui_figs("downfigs") mod_downcenter<-callModule(module_server_figs, "downfigs", vals=vals) }) observeEvent(ignoreInit = T,input$pca_downp,{ vals$hand_plot<-"pca" module_ui_figs("downfigs") mod_downcenter<-callModule(module_server_figs, "downfigs", vals=vals) }) output$dtab_summaries<-renderUI({ if(is.null(vals$curview_summ_options)){vals$curview_summ_options<-'Data'} column(class="side_results", 12, offset = 0, navlistPanel( widths = c(2, 10), id=ns("summ_options"),selected=vals$summ_options, tabPanel( value="Datalist", title = "Datalist", datalist_render(getdata_descX())), tabPanel( value="Data", title = "Data", uiOutput(ns("stats_data")) ), tabPanel( value="Variables", title = "Variables", uiOutput(ns("stats_var")) ), tabPanel( value="Factors", title = "Factors", uiOutput(ns("stats_fac")) ) ) ) }) observeEvent(input$summ_options,{ vals$summ_options<-input$summ_options }) output$dtab_histogram<-renderUI({ fluidRow( column(12,actionButton(ns("downp_hist"),icon("fas fa-image"),icon("fas fa-download"), style="button_active")), column(12,renderPlot({ vals$phist<-phist(getdata_descX()) vals$phist })) ) }) observeEvent(ignoreInit = T,input$cextext,{ vals$cextext<-input$cextext }) output$varhisto_metric<-renderUI({ div(class="cogs_in",style="margin-bottom: 10px; padding:5px", div(class="merge_datalist", div(actionLink(ns("show_metrics_act"),"+ Metrics"), uiOutput(ns('show_metrics')) ) ) ) }) output$varhisto_out<-renderUI({ div(class="cogs_in",style="margin-bottom: 10px; padding:5px;", actionLink(ns("show_histovar"),"+ Select the Variables"), DT::dataTableOutput(ns('histo_var_x')) #uiOutput(ns('varhisto_out3')) ) }) observeEvent(ignoreInit = T,input$show_metrics_act,{ shinyjs::toggle("show_metrics") }) observeEvent(ignoreInit = T,input$show_histovar,{ shinyjs::toggle("histo_var_x") }) observeEvent(ignoreInit = T,input$varhisto_w3,{ shinyjs::hide('show_histo_color') },once=T) observeEvent(ignoreInit = T,input$histo_var_x_rows_selected,{ shinyjs::hide('show_metrics') },once=T) observeEvent(input$data_descX,{ data<-getdata_descX() vals$desc_maxhistvar<-ifelse(ncol(data)>10,10,ncol(data)) }) output$histo_var_x = DT::renderDataTable( { req(is.numeric(vals$desc_maxhistvar)) data<-vals$saved_data[[input$data_descX]] table=data.frame(Variables=colnames(data)) DT::datatable(table, options=list( dom="t", lengthMenu = list(c(-1), c("All")), scrollX = TRUE, scrollY = "200px", autoWidth=F ), class ='compact cell-border',rownames=F, colnames="", selection = list(mode = 'multiple', selected = c(1:vals$desc_maxhistvar))) }) observe({ req(is.null(vals$varhisto_ps_round)) vals$varhisto_ps_round<-2 }) observeEvent(ignoreInit = T,input$varhisto_ps_round,{ vals$varhisto_ps_round<-input$varhisto_ps_round }) output$show_metrics<-renderUI({ div( checkboxGroupInput(ns("varhisto_metric"),NULL,c( 'Min.' ,'1st Qu.',"Mean",'Median','3rd Qu.','Max.' ), selected=vals$varhisto_metric), div("+ Round", numericInput(ns("varhisto_ps_round"),NULL, value=vals$varhisto_ps_round, step=010, width="75px") ) ) }) observe({ req(is.null(vals$varhisto_metric)) vals$varhisto_metric<-c('Min.' ,"Mean",'Max.') }) observeEvent(ignoreInit = T,input$varhisto_metric,{ vals$varhisto_metric<-input$varhisto_metric }) output$show_histo_color<-renderUI({ if(is.null(vals$cur_varhisto_palette)){ vals$cur_varhisto_palette<-"black" } div( style="margin-left: 10px", div( div( class="palette", span("background:", inline(pickerInput(inputId=ns("varhisto_palette"),label = NULL, choices = vals$colors_img$val[getsolid_col()][c(2,1,3,4,5,6)],choicesOpt = list(content =vals$colors_img$img[getsolid_col()])[c(2,1,3,4,5,6)],selected=vals$cur_varhisto_palette, width='75px') )) ), div( class="palette", span("border", inline( pickerInput(inputId=ns("varhisto_border_col"),label = NULL, choices = vals$colors_img$val[getsolid_col()],choicesOpt = list(content =vals$colors_img$img[getsolid_col()]),selected=vals$varhisto_border_col, width='75px' ) ) ) )), uiOutput(ns("varhisto_cex")), uiOutput(ns("varhisto_w1")), uiOutput(ns("varhisto_w2")), uiOutput(ns("varhisto_w3")), ) }) output$varhisto_colors<-renderUI({ div(class="cogs_in",style="margin-bottom: 10px; padding:5px", actionLink(ns("show_histo_colors_act"),'+ Plot params'), uiOutput(ns('show_histo_color')) ) }) output$varhisto_sizeplot<-renderUI({ div(class="cogs_in",style="margin-bottom: 10px; padding:5px", actionLink(ns("show_varhisto_sizeplot_act"),'+ Plot size'), uiOutput(ns('show_varhisto_sizeplot')) ) }) output$show_varhisto_sizeplot<-renderUI({ req(!is.null(vals$varhisto_ps_height)) div( div("+ Width", numericInput(ns("varhisto_ps_width"),NULL, value= vals$varhisto_ps_width, step=010, width="75px") ), div("+ Height", numericInput(ns("varhisto_ps_height"),NULL, value= vals$varhisto_ps_height, step=10, width="75px") ) ) }) observe({ req(is.null(vals$varhisto_ps_width)) vals$varhisto_ps_width<-550 }) observeEvent(ignoreInit = T,input$varhisto_ps_height,{ vals$varhisto_ps_height<-input$varhisto_ps_height }) observeEvent(ignoreInit = T,input$varhisto_ps_width,{ vals$varhisto_ps_width<-input$varhisto_ps_width }) observeEvent(ignoreInit = T,input$show_varhisto_sizeplot_act,{ shinyjs::toggle("show_varhisto_sizeplot") }) observeEvent(ignoreInit = T,input$varhisto_ps_height,{ shinyjs::hide('show_varhisto_sizeplot') },once=T) observeEvent(ignoreInit = T,input$show_histo_colors_act,{ shinyjs::toggle("show_histo_color") }) output$stats_var<-renderUI({ column(12, sidebarLayout( sidebarPanel( width=4, div( class="map_control_style", style="color: #05668D", uiOutput(ns("varhisto_out")), uiOutput(ns("varhisto_metric")), uiOutput(ns("varhisto_colors")), uiOutput(ns("varhisto_sizeplot")), div( actionLink(ns('downp_summ_num'),tipify(span("+ Download",icon("fas fa-download")), "Download Plot"), style="button_active") ) ) ), mainPanel(uiOutput(ns("summ_num"))) )) }) output$varhisto_cex<-renderUI({ req(length(input$histo_var_x_rows_selected)>0) div("+ Text size:", numericInput(ns("cextext"),NULL, value= 2, step=1, width="100px") ) }) output$varhisto_w1<-renderUI({ div("+ Var width", numericInput(ns("varhisto_w1"),NULL, value= vals$varhisto_w1, step=0.05, width="75px") ) }) output$varhisto_w2<-renderUI({ div("+ Metric width", numericInput(ns("varhisto_w2"),NULL, value= vals$varhisto_w2, step=0.05, width="75px") ) }) output$varhisto_w3<-renderUI({ div("+ Histo width", numericInput(ns("varhisto_w3"),NULL, value= vals$varhisto_w3, step=0.05, width="75px") ) }) observe({ if(is.null(vals$varhisto_w1)){ vals$varhisto_w1<-0.2 vals$varhisto_w2<-vals$varhisto_w3<-0.35 } }) observeEvent(ignoreInit = T,input$varhisto_w1,{ vals$varhisto_w1<-input$varhisto_w1 }) observeEvent(ignoreInit = T,input$varhisto_w2,{ vals$varhisto_w2<-input$varhisto_w2 }) observeEvent(ignoreInit = T,input$varhisto_w3,{ vals$varhisto_w3<-input$varhisto_w3 }) output$summ_num_plot<-renderPlot({ req(input$varhisto_palette) req(input$varhisto_border_col) req(input$varhisto_metric) req(input$varhisto_w1) req(input$varhisto_w2) req(input$varhisto_w3) req(input$varhisto_w3) req(input$cextext) data=getdata_descX() req(length(input$histo_var_x_rows_selected)>0) col<-getcolhabs(vals$newcolhabs,input$varhisto_palette,1) col_border<-getcolhabs(vals$newcolhabs,input$varhisto_border_col,1) selected<-colnames(data)[input$histo_var_x_rows_selected] data<-data[,selected, drop=F] str_numerics(data, cextext=input$cextext, col=col, border=col_border, show=input$varhisto_metric,width_varname=input$varhisto_w1, width_metrics=input$varhisto_w2, width_histo=input$varhisto_w3, round=input$varhisto_ps_round) vals$varplot<-recordPlot() }) output$summ_num<-renderUI({ req(input$varhisto_ps_height) req(input$varhisto_ps_width) req(input$varhisto_ps_height>10) req(input$varhisto_ps_width>10) plotOutput(ns('summ_num_plot'), height=paste0(input$varhisto_ps_height,"px"), width=paste0(input$varhisto_ps_width,"px")) }) observeEvent(ignoreInit = T,input$varhisto_border_col,{ vals$varhisto_border_col<-input$varhisto_border_col }) observeEvent(ignoreInit = T,input$varhisto_palette,{ vals$cur_varhisto_palette<-input$varhisto_palette }) output$stats_fac<-renderUI({ column(12, column(12, h5(strong("Factors:")), h5(strong("Structure:")), verbatimTextOutput(ns('strlabels')), ), column(12, column(12,actionButton(ns("downp_stats_fac"),icon("fas fa-image"),icon("fas fa-download"), style="button_active")), column(12,plotOutput(ns("factorsplot"))))) }) output$psummary<-renderPrint({ data=getdata_descX() withProgress(message = "Calculating Numeric-Attribute summary ... Please, wait!", min = 1, max = 13, { nas=sum(is.na(unlist(data))) incProgress(1) n=data.frame(rbind(Param=paste('Missing values:', nas))) incProgress(1) a<-data.frame(rbind(Param=paste('nrow:', nrow(data)),paste('ncol:', ncol(data)))) incProgress(1) ppsummary("-------------------") incProgress(1) ppsummary(n) ppsummary("-------------------") incProgress(1) ppsummary(a) ppsummary("-------------------") incProgress(1) }) }) output$pca_fiz<-renderUI({ renderPlot({ pca_symbol_factor<-pca_symbol_factor() pca<-prcomp(getdata_descX()) { col_pts=getcolhabs(vals$newcolhabs,input$pca_colpalette,nlevels(pca_symbol_factor)) gg<-fviz_pca_biplot(pca,geom="points", label="var",habillage=pca_symbol_factor,col.var ='red') data2<-data.frame(id=rownames(pca$x),factor=pca_symbol_factor,pca$x[,1:2]) if(isTRUE(input$pca_show_symbols )){ gg<-gg+geom_point(data=data2,aes(x = PC1, y = PC2, col=factor), pch=as.numeric(input$pca_symbol), size=input$pca_cexpoint)} if(isTRUE(input$pca_show_labels )){ gg<-gg+geom_text(data=data2,aes(x = PC1, y = PC2, label = factor, col=factor)) } gg } colorFAC<- data.frame(prev_fac=levels(pca_symbol_factor),col_pts, levels=1:nlevels(pca_symbol_factor)) gg<-gg+scale_color_manual(name=input$pca_labfactor,labels = colorFAC$prev_fac,values = colorFAC$col_pts,drop=F ) if(isFALSE(input$biplot)){ gg$layers<-c(gg$layers[-3]) } else{ gg$layers<-c(gg$layers[-3],gg$layers[3]) } gg }) }) output$summary_pca<-DT::renderDataTable({ req(!is.null(vals$pca)) res<- summary(vals$pca) center<-res$center scale<-res$scale sdev<-res$sdev res$center<-data.frame(center) res$scale<-data.frame(scale) res$sdev<-data.frame(sdev) res<-lapply(res, data.frame) vals$pca_out<-res[[pic_pca_results()]] vals$pca_out },options = list(pageLength = 20, info = FALSE,lengthMenu = list(c(20, -1), c( "20","All")), autoWidth=T,dom = 'lt'), rownames = T,class ='cell-border compact stripe') observe({ req(input$desc_options=='tab_pca') req(input$data_descX) req(input$desc_options) req(input$data_descX) validate(need(!anyNA(getdata_descX()), "This functionality does not support missing values; Please use the transformation tool to the handle missing values.")) data<- vals$saved_data[[input$data_descX]] req(is.data.frame(data)) X = as.matrix(data) vals$pca<-prcomp(X) }) output$mdscustom<-renderPlot({plot_mds()}) output$stats_ppca<-renderUI({ validate(need(!anyNA(getdata_descX()), "This functionality does not support missing values; Please use the transformation tool to the handle missing values.")) column( 12, #uiOutput(ns('pca_fiz')), renderPlot({ req(!is.null(vals$pca)) suppressWarnings({ args<-list( pca=vals$pca, key = pca_symbol_factor(), points = input$pca_show_symbols, text = input$pca_show_labels, palette = input$pca_colpalette, cex.points = input$pca_cexpoint, cex.text = input$pca_cextext, pch=pca_symbol(), keytext=pca_text_factor(), biplot=input$biplot, newcolhabs=vals$newcolhabs, textcolor=input$pca_labcolor, pos=input$pca_labadj, offset=input$pca_offset ) do.call(ppca,args) }) vals$ppca_plot<-recordPlot() vals$ppca_plot }) ) }) output$stats_pmds<-renderUI({ validate(need(!anyNA(getdata_descX()), "This functionality does not support missing values; Please use the transformation tool to the handle missing values.")) res<-list( column(12, column(12,plotOutput(ns("mdscustom")))) ) res }) output$factorsplot<-renderPlot({ vals$factorsplot<-pfac(res_pfac()) vals$factorsplot }) output$stats_data<-renderUI({ column( 12, style = "background: white;", fluidRow( column(12, h5(strong( "numeric variables:" ))), column(6, verbatimTextOutput(ns("psummary"))), column(12,uiOutput(ns("Locate_NA"))) ) ) }) output$Locate_NA<-renderUI({ req(anyNA(unlist(getdata_descX()))) div( column(12,strong("Missing Values:")), column(12, div( tags$style('#missing_values td {padding: 3px; text-align: left; font-size:12px}'), tags$style('#missing_values th {padding: 3px; text-align: left; font-size:12px}'), inline( DT::dataTableOutput(ns("missing_values")) ) )) ) }) output$missing_values<-DT::renderDataTable({ data=getdata_descX() res0<-res<-which(is.na(data), arr.ind=TRUE) req(nrow(res)>0) for(i in 1:nrow(res)){ res0[i,1]<-rownames(data)[res[i,1]] res0[i,2]<-colnames(data)[res[i,2]] } colnames(res0)<-c("ID","Variable") rownames(res0)<-NULL res0 },options = list(pageLength = 20, info = FALSE,lengthMenu = list(c(20, -1), c( "20","All")), autoWidth=T,dom = 'lt'), rownames = F,class ='cell-border compact stripe') output$strlabels<-renderPrint({ ppsummary("----------------") ppsummary(paste("Missing values:",sum(is.na(attr(getdata_descX(),"factors"))))) ppsummary("----------------") str(attr(getdata_descX(),"factors")[rownames(getdata_descX()),,drop=F]) }) ## getsolid_col<-reactive({ res<-lapply(vals$newcolhabs, function(x) x(2)) res1<-unlist(lapply(res, function(x) x[1]==x[2])) solid<-names(res1[res1==T]) pic<-which(vals$colors_img$val%in%solid) pic }) getgrad_col<-reactive({ res<-lapply(vals$newcolhabs, function(x) x(2)) res1<-unlist(lapply(res, function(x) x[1]==x[2])) grad<-names(res1[res1==F]) pic<-which(vals$colors_img$val%in%grad) pic }) output$save_breakpoints<-renderUI({ if(is.null(vals$bag_smw)){ class="novo" } else{ class="save_changes"} if(!isFALSE(vals$splitBP)){ if(!any(unlist(lapply(attr(vals$saved_data[[input$segrda_X]],"factors"), function (x) identical(x,as.vector(vals$splitBP)))))){ popify( div(class=class,id=ns("save_changes_bp"), bsButton(ns('tools_saveBP'),icon("fas fa-save"),style='save_button') ),"Save breakpoints from DP", "this action divides the observations according to the breakpoints and assigns a factor to each split" ) } } }) output$dtab_rid<-renderUI({ sidebarLayout( sidebarPanel( div( class="map_control_style2", style="color: #05668D", uiOutput(ns('rid_side')) )), mainPanel( uiOutput(ns('runrid_btn')), # uiOutput(ns('rid_out')) ) ) }) output$runrid_btn<-renderUser({ div( column(12,align="left", div( div(id=ns("runrid_btn"), actionButton(ns('runrid'), 'RUN', icon=icon("fas fa-sync")), ) )), uiOutput(ns('rid_out')) ) }) output$rid_side<-renderUI({ ##here req(input$data_descX) data<-vals$saved_data[[input$data_descX]] factors<-attr(data,"factors") div( pickerInput(inputId=ns("rid_y"),label = "+ X (factor)", choices =rev(colnames(factors)),selected=vals$rid_y), actionLink(ns("show_obs_selection"),"+ Y (numeric)"), uiOutput(ns('rid_x_variables')), uiOutput(ns("ridplot_options")) ) }) output$ridplot_options<-renderUI({ div( pickerInput(inputId=ns("rid_col"),label = "+ Palette:",choices = vals$colors_img$val,choicesOpt = list(content = vals$colors_img$img),options=list(container="body"),selected=vals$colors_img$val[1]), textInput(ns('rid_tittle'),"+ Title",""), numericInput(ns('rid_base_size'),"+ Base size",11), numericInput(ns('rid_ncol'),"+ Nยบ columns",3), numericInput(ns('rid_width'),"+ Plot widht",700), numericInput(ns('rid_heigth'),"+ Plot heigth",300), div( actionLink(ns("rid_downp"),"Download plot", style="button_active") ) ) }) output$rid_x_variables<-renderUI({ div(class="cogs_in_div",style="margin-bottom: 10px; padding:5px", fluidPage( DT::dataTableOutput(ns('rid_x')) ) ) }) observeEvent(ignoreInit = T,input$show_obs_selection,{ shinyjs::toggle("rid_x") }) output$rid_x = DT::renderDataTable( { data<-vals$saved_data[[input$data_descX]] table=data.frame(Variables=colnames(data)) DT::datatable(table, options=list( dom="t", lengthMenu = list(c(-1), c("All")), scrollX = TRUE, scrollY = "200px", autoWidth=F ), class ='compact cell-border',rownames=F, colnames="", selection = list(mode = 'multiple', selected = c(1:3))) }) plot_rid<-reactive({ req(input$rid_y) req(input$data_descX) data=vals$saved_data[[input$data_descX]] factors<-attr(data,"factors") req(length(input$rid_x_rows_selected)>0) data<-data[,input$rid_x_rows_selected, drop=F] fac<-factors[,input$rid_y] #savereac() data$class<-fac args<-list(data=data, fac=input$rid_y, palette=input$rid_col, newcolhabs=vals$newcolhabs, ncol=input$rid_ncol, title=input$rid_tittle, base_size=input$rid_base_size) # saveRDS(args,'argsp.rds') # args<-readRDS('argsp.rds') # attach(args) p<- do.call(plot_ridges,args) shinyjs::removeClass("runrid_btn","save_changes") p }) output$rid_out<-renderUI({ req(!is.null(vals$rid_plot)) width=paste0(input$rid_width,"px") height=paste0(input$rid_heigth,"px") renderPlot(vals$rid_plot, width=input$rid_width,height=input$rid_heigth) }) observeEvent(input$rid_x_rows_selected,{ req(input$data_descX) req(input$rid_x_rows_selected) vals$rid_plot<- plot_rid() shinyjs::removeClass("runrid_btn","save_changes") }, once = T) observeEvent(input$runrid,{ vals$rid_plot<- plot_rid() shinyjs::removeClass("runrid_btn","save_changes") }) observeEvent( vals$rid_plot,{ shinyjs::removeClass("runrid_btn","save_changes") }) observeEvent(list(input$rid_x_rows_selected, input$data_descX, input$rid_col, input$rid_y, input$rid_col, input$rid_ncol ) ,ignoreInit = T,{ req(length(input$rid_x_rows_selected)>0) req(!is.null(vals$rid_plot)) shinyjs::addClass("runrid_btn","save_changes") }) output$boxplot_out<-renderUI({ sidebarLayout( sidebarPanel( column(12,style="margin: 0px; margin-bottom: -20px",align="right",actionLink(ns("box_reset"),"+ reset")), uiOutput(ns('side_boxplot')), uiOutput(ns("editbox"))), mainPanel(uiOutput(ns("stats_pbox"))) ) }) output$stats_cbox<-renderUI({ div(style="background: white", p(strong("Box plot")), inline(uiOutput(ns("box_y_input"))), inline(uiOutput(ns("boxplot_X"))), inline(uiOutput(ns("filter_box1"))), inline(uiOutput(ns("filter_box2"))) ) }) output$box_y_input<-renderUI({ data<-getdata_descX() div( div(tipify( strong("Y ~"), " y is the data values to be split into groups according to the grouping variable", options = list(container="body") )), pickerInput( ns("box_y"),NULL,choices = colnames(data),selected= colnames(data)[1], multiple=T)) }) output$boxplot_X<-renderUI({ div( div(strong("Factor:")), pickerInput(ns("boxplot_X"),NULL, choices =rev(colnames(attr(vals$saved_data[[input$data_descX]], "factors"))), selected=vals$boxplot_X, width="200px") ) }) observeEvent(ignoreInit = T,input$boxplot_X,{vals$box_y<-input$box_y}) observeEvent(ignoreInit = T,input$boxplot_X,{vals$boxplot_X<-input$boxplot_X}) output$filter_box1<-renderUI({ div( div(strong("Filter:")), pickerInput(ns("filter_box1"),NULL,choices = c("none", colnames(attr(getdata_descX(),"factors"))),selected=filter_box1_cur$df, width="200px")) }) output$filter_box2<-renderUI({ req(input$filter_box1) if (input$filter_box1 != "none") { data = getdata_descX() labels<-attr(data,"factors")[rownames(data), input$filter_box1] div( div(strong("Class:")), pickerInput(ns("filter_box2"), NULL, choices = c(levels(as.factor(labels))), selected=filter_box2_cur$df, width="200px") ) } }) pca_symbol<-reactive({ if(isFALSE(input$pca_show_symbols)){NA}else{as.numeric(input$pca_symbol)} }) getdata_descX<-reactive({ req(input$data_descX) vals$saved_data[[input$data_descX]]}) getbox<-reactive({ req(input$boxplot_X) req(input$box_y) req(input$filter_box1) data=getdata_descX() labels<-attr(data,"factors") pic<-1:nrow(data) req(any(input$boxplot_X%in%colnames(labels))) x<-labels[input$boxplot_X] req(any(input$box_y%in%colnames(data))) y<-data[input$box_y] if (input$filter_box1 != "none") { filtro<-as.character(input$filter_box1) filtro2<-as.character(input$filter_box2) pic<-which(as.character(labels[, filtro]) == filtro2) } res = data.frame(x,y)[pic,] res[,1]<-res[,1] res # saveRDS(res,"res.rds") #re<-readRDS('res.rds') # levels(re$Consenso) #vals<-readRDS("savepoint.rds") # levels(attr(vals$saved_data[["SANTOS_C1"]],"factors")$Consenso) }) mds_symbol_factor<-reactive({ req(input$mds_symbol_factor) col<-getcolhabs(vals$newcolhabs,input$mds_colpalette,2) if(col[1]!=col[2]){ data = getdata_descX() attr(data,"factors")[rownames(data), input$mds_symbol_factor] }else{NULL} }) mds_text_factor<-reactive({ if(isFALSE(input$mds_show_labels)){NULL} else{ data = getdata_descX() attr(data,"factors")[rownames(data), input$mds_labfactor]} }) mds_symbol<-reactive({ if(isFALSE(input$mds_show_symbols)){NA}else{as.numeric(input$mds_symbol)} }) pca_symbol_factor<-reactive({ req(input$pca_symbol_factor) col<-getcolhabs(vals$newcolhabs,input$pca_colpalette,2) if(col[1]!=col[2]){ data = getdata_descX() attr(data,"factors")[rownames(data), input$pca_symbol_factor] }else{NULL} }) pca_text_factor<-reactive({ if(isFALSE(input$pca_show_labels)){NULL} else{ data = getdata_descX() attr(data,"factors")[rownames(data), input$pca_labfactor]} }) plot_mds<-reactive({ validate(need(input$distance!='', "Select a distance measure for the mds")) mds_data = vals$mds if (exists("mds_data")) { pmds( mds_data = mds_data, key = mds_symbol_factor(), points =input$mds_show_symbols, text = input$mds_show_labels, palette = input$mds_colpalette, cex.points = input$mds_cexpoint, cex.text = input$mds_cextext, pch=mds_symbol(), keytext=mds_text_factor(), newcolhabs=vals$newcolhabs, textcolor=input$mds_labcolor, pos=input$mds_labadj, offset=input$mds_offset ) } vals$pmds_plot<-recordPlot() res }) res_pfac<-reactive({ attr(getdata_descX(),"factors")[rownames(getdata_descX()),,drop=F] }) observeEvent(ignoreInit = T,input$data_descX,{ req(length(vals$saved_data)>0) vals$cur_data<-input$data_descX }) observeEvent(ignoreInit = T,input$rid_downp,{ vals$hand_plot<-"Ridge plot" module_ui_figs("downfigs") mod_downcenter<-callModule(module_server_figs,"downfigs", vals=vals)}) observeEvent(ignoreInit = T,input$rid_y, vals$rid_y<-input$rid_y) observeEvent(ignoreInit = T,input$box_y,{ box_y_cur$df<-input$box_y }) observeEvent(ignoreInit = T,input$filter_box2,{ filter_box2_cur$df<-input$filter_box2 }) observeEvent(ignoreInit = T,input$filter_box1,{ filter_box1_cur$df<-input$filter_box1 }) observeEvent(ignoreInit = T,input$boxplot_X,{ boxplot_X_cur$df<-input$boxplot_X }) observeEvent(ignoreInit = T,input$downp_box,{ vals$hand_plot<-'boxplot' module_ui_figs("downfigs") mod_downcenter<-callModule(module_server_figs, "downfigs", vals=vals) }) observeEvent(list(input$distance,getdata_descX()),{ req (input$distance %in%c("bray","euclidean","jaccard")) vals$mds<-metaMDS1(getdata_descX(), distance = input$distance) }) observeEvent(ignoreInit = T,input$downcenter_rda,{ vals$hand_down<-"rda" module_ui_downcenter("downcenter") mod_downcenter <- callModule(module_server_downcenter, "downcenter", vals=vals) }) ################## ### SEGRDA ### ### ############### output$segrda_header<-renderUI({ div() }) output$segrda_panels<-renderUI({ req(input$segrda_X) req(input$segrda_Y) column(12, tabsetPanel(id=ns("segrda_panels"), selected=vals$segrda_panels, tabPanel("SMW", uiOutput(ns("segrda_smw")) ), tabPanel("DP", uiOutput(ns("segrda_dp")) ), tabPanel("pwRDA", p(strong("Piecewise RDA")), uiOutput(ns("pw_out"))))) }) observeEvent(ignoreInit = T,input$segrda_Y,{ vals$cur_segrda_Y<-input$segrda_Y}) observeEvent(ignoreInit = T,input$segrda_X,{ vals$cur_segrda_X<-input$segrda_X}) output$databank_storage<-renderUI({ div( column(12, div(strong("action:"),em("*",vals$hand_save,style="color: SeaGreen")), div(vals$hand_save2,style="color: gray"), div(vals$hand_save3)) ) }) output$save_confirm<-renderUI({ actionButton(ns("data_confirm"),strong("confirm")) }) output$ord_side<-renderUI({ fluidRow(class="map_control_style",style="color: #05668D", div( span("+", checkboxInput(ns("segrda_scale"),span("Scale variables",tiphelp("Scale variables to unit variance (like correlations)")), value=T, width = "100px") ) ), div( span("+", inline( checkboxInput(ns("segrda_ord"),strong("Axis ordination",pophelp(NULL,"Both the SMW and pwRDA analyses depend on ordered datasets. Defaults to ordering both response and explanatory matrices using one of the axes of the RDA model. If unchecked,please make sure all your inputs are already ordered.")), value=T) ), inline(uiOutput(ns("ord_check"))) ) ), uiOutput(ns("ord_sure")) ) }) output$ord_check<-renderUI({ req(isTRUE(input$segrda_ord)) inline(numericInput(ns("axis_ord_segrda"),NULL, value=1, step=1, width="75px")) }) output$ord_sure<-renderUI({ req(isFALSE(input$segrda_ord)) div(style='white-space: normal;', strong("Wargning:", style="color: SeaGreen"),"Make sure both X and Y are previously ordered") }) output$ordplot_matrix<-renderUI({ req(isTRUE(input$segrda_ord)) #mybreaks<-vals$window_pool fluidRow( renderPlot({ sim1o<-getord() #sim1o<-readRDS('sim1o.rds') mybreaks<-vals$window_pool #mybreaks<-c(2,50,141) xo<-sim1o$xo ## ordered explanatory matrix. yo<-sim1o$yo ## ordered community matrix (untransformed). x<-sim1o$y par(mfrow = c(1, 2), mgp = c(1, 1, 0), cex = 0.9) image(x, main = "Original response data", col = topo.colors(100), axes = F, xlab = "Observations", ylab = "Variable values") # abline(v=scales::rescale(mybreaks,c(0,1)), col="red") image(yo, main = "Ordered response data", col = topo.colors(100), axes = F, xlab = "Observations", ylab = "Variable values") #abline(v=scales::rescale(mybreaks,c(0,1)), col="red") }) ) }) output$segrda_smw<-renderUI({ div( sidebarLayout( sidebarPanel(uiOutput(ns('side_smw')), br(), div(style="margin-left: 20px", actionButton(ns("go_smw"),strong( img(src=smw_icon,height='20',width='20'),"run SMW"), style="button_active") ) ), mainPanel( tabsetPanel(id=ns("smw_panels"), tabPanel("Data ordination", value="swm_1", uiOutput(ns("ordplot_matrix"))), tabPanel("SMW results", value="swm_2", uiOutput(ns("go_smw"))) ) ) ) ) }) output$ord_windows<-renderUI({ div( tipify(icon("fas fa-question-circle",style="color: gray"),"Enter a vector of breakpoints (comma delimited, within the data range)"), "+ Windows", textInput(ns('custom_windows'), NULL, paste0(get_windows(),collapse=", "), width="200px"), ) }) get_windows<-reactive({ req(input$segrda_X) data<-vals$saved_data[[input$segrda_X]] req(nrow(data)>0) w<-1:(nrow(data)/2) w<- w[which(( w %% 2) == 0)] w<-round(seq(10,w[length(w)], length.out=5)) w[which(( w %% 2) != 0)]<-w[which(( w %% 2) != 0)]+1 w }) is.even<-function(x){ x %% 2 == 0} getpool<-reactive({ mybreaks<-NULL req(input$segrda_X) req(length(input$custom_windows)>0) req(!is.na(input$custom_windows)) mybreaks<-as.numeric(unlist(strsplit(input$custom_windows,","))) data<-vals$saved_data[[input$segrda_X]] cond0<-length(mybreaks)>0 validate(need(cond0,"The windows vector is empty")) cond1<-sum(sapply(mybreaks,is.even))==length(mybreaks) cond2<-min(mybreaks)>=2 cond2_res<-paste("The maximum window size cannot exceed the number of observations:", nrow(data)) validate(need(cond1,"Window sizes must be even")) validate(need(cond2,"The minimum allowed size of the windows is 2")) validate(need(max(mybreaks)<=nrow(data),cond2_res)) mybreaks }) output$smw_tuning<-renderUI({ div( div( span(span(tipify(icon("fas fa-question-circle", style="color: gray"),"Dissimilarity index"),"+ Distance:"), inline( pickerInput(ns("smw_dist"),NULL, choices=c("bray","euclidean","manhattan","jaccard"), width="100px") ) ) ), div( span(span(tipify(actionLink(ns("smw_rand_help"),icon("fas fa-question-circle")),"The type of randomization for significance computation. Click for details"),"+ Randomization:"), inline( pickerInput(ns("smw_rand"),NULL, choices=c("shift","plot"), width="70px") ) ) ), div( span(span(tipify(icon("fas fa-question-circle", style="color: gray"),"The number of randomizations"),"+ n.rand:"), inline( numericInput(ns("smw_nrand"),NULL, value=10, width="100px") ) ) ), uiOutput(ns("down_we_out")) ) }) output$down_we_out<-renderUI({ req(length(vals$smw_dp)>0) tipify( actionLink( ns('downp_we'),span("+ Download",icon("fas fa-download")), style="button_active" ), "Download plot" ) }) output$side_smw<-renderUI({ fluidRow( class="map_control_style",style="color: #05668D", uiOutput(ns("ord_side")), uiOutput(ns("ord_windows")), uiOutput(ns("smw_tuning")) ) }) output$go_smw<-renderUI({ getpool() validate(need(!is.null(vals$window_pool),"The window pool is empty. Use the arrow button to include the window sizes")) validate(need(length(vals$smw_dp)>0,"Please click 'run SMW' button")) fluidRow( column(12, renderPlot({ if(length(vals$window_pool)>1){w.effect=TRUE main="Window size effect"} else{w.effect=F main="Dissimilary Profile"} suppressWarnings(plot( vals$smw_dp, w.effect =w.effect , main=main)) vals$plot_we<-recordPlot() }) ) ) }) observeEvent(vals$max_seq,{ req(!is.null(vals$max_seq)) vals$dp_seq.sig<-attr(max_seq(),"min") }) output$plot_dp<-renderPlot({ req(input$dp_view=='Plot') req(input$dp_seq.sig) req(input$dp_cex) req(input$dp_BPs) max_seq<-max_seq() validate(need(input$dp_seq.sig<=max_seq,paste( "DP shows '", sum(DP_smw()[,5]!="ns"),"' significant dissimilarity values but no breakpoint could be determined for seq.sig='",input$dp_seq.sig,"The maximum value for this input must be","max_seq" ))) smw<- vals$smw_dp getDP() dp_BPs<-if(input$dp_BPs==""){NULL} else{input$dp_BPs} dp_w<-if(is.na(input$dp_w)){NULL} else{input$dp_w} par(cex=input$dp_cex) suppressWarnings( suppressMessages( plot(smw,w=dp_w, sig=input$dp_sig, z=input$dp_z, BPs=dp_BPs, seq.sig=input$dp_seq.sig, bg= getcolhabs(vals$newcolhabs,input$dp_palette,nlevels(as.factor(vals$splitBP[,1]))),bg_alpha=input$dp_bg,cols=c(getcolhabs(vals$newcolhabs,input$dp_dcol,1),getcolhabs(vals$newcolhabs,input$dp_scol,1),getcolhabs(vals$newcolhabs,input$dp_bcol,1))) ) ) if(isTRUE(updp$df)){ updateTabsetPanel(session,'segrda_panels','pwRDA') updp$df<-F } vals$plot_dp<-recordPlot() }) output$dp_extract<-renderUI({ req(input$dp_view=='DP results') dp<-getDP() fluidRow( column(12,renderPrint({dp})) #renderPrint({vals$splitBP}) ) }) output$dp_view_dp<-renderUI({ req(input$dp_view=='DP results') tipify( actionLink( ns('downcenter_dp_smw'),span("+ Download",icon("fas fa-table")), style="button_active" ), "Download DP results" ) }) observeEvent(ignoreInit = T,input$dp_seq.sig,{ vals$dp_seq.sig<-input$dp_seq.sig }) output$side_dp<-renderUI({ if(is.null(vals$dp_seq.sig)){ vals$dp_seq.sig<-3 } validate(need(length(vals$smw_dp)>0,"You need to run SMW analysis first")) fluidRow( class="map_control_style",style="color: #05668D", div( span(span(tipify(icon("fas fa-question-circle", style="color: gray"),"A target window size from which results will be extracted. If empty return z-scores averaged over the set of window sizes"),'+ w:'), inline( numericInput(ns("dp_w"),NULL, value=NULL, width="75px") ) ) ), div( span(span(actionLink(ns("dp_index_help"),tipify(icon("fas fa-question-circle"),"The result to be extracted. Click for details")),'+ index:'), inline( pickerInput(ns("dp_index"),NULL,choices=c("dp","rdp","md","sd","oem","osd","params"), width="75px") ) ) ), div( span(span(actionLink(ns("dp_sig_help"),tipify(icon("fas fa-question-circle"),"Significance test for detecting dissimilarity values that differs significantly from those appearing in a random pattern. Click for details")),'+ sig'), inline( pickerInput(ns("dp_sig"),NULL,choices=c("z","sd","sd2","tail1"), width="75px") ) ) ), div( span(span(tipify(icon("fas fa-question-circle", style="color: gray"),"The critical value for the significance of z-values"),"+ z:"), inline( numericInput(ns("dp_z"),NULL, value=1.85,step=0.01, width="75px") ) ) ), div( span(span(tipify(icon("fas fa-question-circle", style="color: gray"),"Defines if the breakpoints should be chosen as those sample positions corresponding to the maximum dissimilarity in a sequence of significant values (max) or as those sample positions corresponding to the median position of the sequence (median). Defaults to BPs=max. If empty the breakpoints are not computed"),"+ BPs:"), inline( pickerInput(ns("dp_BPs"),NULL,choices=c("","max","median"), selected = "max", width="75px") ) ) ), div( span(span(tipify(icon("fas fa-question-circle", style="color: gray"),"The maximum length of consecutive, significant values of dissimilarity that will be considered in defining the community breakpoints"),"+ seq.sig:"), inline( numericInput(ns("dp_seq.sig"),NULL, value=vals$dp_seq.sig,step=1, width="75px", min=1) ) ) ), div(uiOutput(ns("dp_view_dp"))), div(uiOutput(ns("dp_view_plot"))) ) }) output$dp_view_plot<-renderUI({ req(input$dp_view=='Plot') div( div(class="palette", span("+ Palette", inline( pickerInput(inputId=ns("dp_palette"), label = NULL, choices = vals$colors_img$val[getgrad_col()], choicesOpt = list(content = vals$colors_img$img[getgrad_col()]), width="75px") ) ) ), div( span("+ size", inline( numericInput(ns("dp_cex"), NULL,value=1,min=0.1,step=0.1, width="75px") ) ) ), div( span("+ diss col", inline( pickerInput(inputId=ns("dp_dcol"), label = NULL, choices = vals$colors_img$val[getsolid_col()], choicesOpt = list(content = vals$colors_img$img[getsolid_col()]), width="75px") ) ) ), div( span("+ sig col", inline( pickerInput(inputId=ns("dp_scol"), label = NULL, choices = vals$colors_img$val[getsolid_col()], choicesOpt = list(content = vals$colors_img$img[getsolid_col()]), selected= vals$colors_img$val[getsolid_col()][4], width="75px") ) ) ), div( span("+ bp col", inline( pickerInput(inputId=ns("dp_bcol"), label = NULL, choices = vals$colors_img$val[getsolid_col()], choicesOpt = list(content = vals$colors_img$img[getsolid_col()]), selected= vals$colors_img$val[getsolid_col()][3], width="75px") ) ) ), div( span("+ bg", inline(numericInput(ns("dp_bg"), NULL,value=0.5,min=0,max=1,step=0.05, width="75px")) ) ), tipify( actionLink( ns('downp_dp'),span("+ Download",icon("fas fa-download")) ), "Download plot" ) ) }) output$segrda_dp<-renderUI({ validate(need(length(vals$smw_dp)>0,"You need to run SMW analysis first")) fluidRow( column(12, div(strong("Dissimilary Profile"), inline(uiOutput(ns("save_breakpoints")))),), sidebarLayout( sidebarPanel(uiOutput( ns("side_dp") )), mainPanel( tabsetPanel(id=ns("dp_view"),selected=vals$dp_view, tabPanel("Plot", value="Plot", plotOutput(ns("plot_dp"))), tabPanel("DP results", value="DP results", uiOutput(ns("dp_extract")))) ) ) ) }) observeEvent(ignoreInit = T,input$dp_view, vals$dp_view<-input$dp_view) get_breaks_from_factor<-reactive({ x<-vals$saved_data[[input$segrda_X]] y<-vals$saved_data[[input$segrda_Y]] bp<-attr(x,"factors")[,input$bp_column] xord<-x[order(as.numeric(bp)),] yord<-y[order(as.numeric(bp)),] breaks<-bp[order(as.numeric(bp))] breaks<-which(diff(as.numeric(breaks))==1) pw_in<-list( breaks=breaks, xord=xord, yord=yord ) }) observeEvent(ignoreInit = T,input$segrda_X,{ x<-attr(vals$saved_data[[input$segrda_X]],"factors") }) choices_bp_column<-reactive({ req(input$segrda_X) x<-attr(vals$saved_data[[input$segrda_X]],"factors") colnames(x) }) observeEvent(ignoreInit = T,input$bp_column, vals$bp_column<-input$bp_column) output$user_bp<-renderPrint(vals$bag_user_bp) output$getDP<-renderPrint({ req(!isFALSE(vals$splitBP)) suppressWarnings(bp(getDP()))}) output$pw_out<-renderUI({ div( span( inline( div( tipify(icon("fas fa-question-circle",style="color: gray"),"Enter a vector of breakpoints (comma delimited, within the data range)"), "+ Split reference [Y Datalist]", pickerInput(ns('bp_column'), NULL,choices_bp_column() , width="200px", selected=vals$bp_column) ) ), inline(numericInput(ns("pw_nrand"), 'n.rand', value=99, width="75px")), inline( div(id=ns('run_pwrda_button'), actionButton(ns("run_pwrda"),strong(img(src=pw_icon,height='20',width='20'),"run pwRDA"), style="button_active") ) ), inline(uiOutput(ns('pwrda_models_out'))), ), uiOutput(ns("pwRDA_out")) ) }) observe({ req(input$segrda_X) req(input$pwrda_models) req(input$run_pwrda) if(!length(attr(vals$saved_data[[input$segrda_X]],"pwrda")[[input$pwrda_models]])>0){ addClass('run_pwrda_button',"save_changes") } else{ removeClass('run_pwrda_button',"save_changes") } }) output$pwrda_models_out<-renderUI({ choices<-names(attr(vals$saved_data[[input$segrda_X]],"pwrda")) req(length(choices)>0) div( inline(pickerInput(ns('pwrda_models'),"Results",choices, width="200px", selected=vals$pwrda_models)), inline(uiOutput(ns('save_pw'))), inline(actionButton(ns("delete_pwrda_models"),icon("fas fa-trash-alt"))) ) }) observeEvent(ignoreInit = T,input$delete_pwrda_models,{ attr(vals$saved_data[[input$segrda_X]],"pwrda")[[input$pwrda_models]]<-NULL }) output$save_pw<-renderUI({ if(is.null(vals$bag_pw)){ class="novo" } else{ class="save_changes"} div(class=class, actionButton(ns("save_pwrda"),icon("fas fa-save"), style="button_active") ) }) observeEvent(ignoreInit = T,input$pwrda_models, vals$pwrda_models<-input$pwrda_models) observeEvent(ignoreInit = T,input$save_pwrda,{ vals$hand_save<-"Save pwRDA model in" vals$hand_save2<-div(span("Target:",em(input$segrda_X,style="color: SeaGreen"))) vals$hand_save3<-NULL showModal(module_desctools()) }) observeEvent(ignoreInit = T,input$disegrda_sp_summ, vals$disegrda_sp_summ<-input$disegrda_sp_summ) output$segrda_view_out<-renderUI({ req(input$segrda_view=='Summary') div( div( span( "+ Results:", inline( pickerInput(ns("disegrda_sp_summ"),NULL,choices=c('Summary stats','Importance (unconstrained)',"Importance (constrained)",'Variable scores','Observation scores','Linear constraints','Biplot'), selected=vals$disegrda_sp_summ, options=list(container="body"), width="200px") ) ) ), div( span("+ Axes", inline( numericInput(ns("segrda_axes"),NULL, value=2) ) ) ), div( tipify( actionLink( ns('downcenter_segrda'),span("+ Download",icon("fas fa-download")), style="button_active" ), "Download selected results" ) ) ) }) output$side_pw<-renderUI({ #req(length(vals$segrda_model)>0) #req(length(vals$segrda_model$rda.pw)>0) sidebarPanel( fluidRow(class="map_control_style",style="color: #05668D", uiOutput(ns("segrda_view_out")), uiOutput(ns("segrda_view_plot")) ) ) }) observeEvent(ignoreInit = T,input$segrda_view, vals$segrda_view<-input$segrda_view) output$segrda_sp_shape<-renderUI({ req(input$segrda_sp_display=='Shape') inline(pickerInput( inputId=ns("segrda_spshape"), label = NULL, choices = df_symbol$val, options=list(container="body"), selected=df_symbol$val[8], choicesOpt = list(content = df_symbol$img),width='50px' )) }) output$segrda_sp_on<-renderUI({ req(isTRUE(input$segrda_sp)) div( div( '+ Number:', inline(numericInput(ns("segrda_spnum"),NULL, 10, step=1)),tipify(icon("fas fa-question-circle",style="color: gray"),"Show N variables with the highest scores", placement = "bottom") ), div( "+ Display:", span( inline( pickerInput(ns("segrda_sp_display"),NULL,choices=c("Shape","Label"), width = "75px") ), inline(uiOutput(ns("segrda_sp_shape"))) ) ), div( span("+ Variable Color:", inline( tipify( pickerInput( inputId=ns("segrda_spcolor"),label = NULL,selected= 'firebrick',choices = vals$colors_img$val[getsolid_col()],choicesOpt = list(content = vals$colors_img$img[getsolid_col()]), options=list(container="body")), "Variable Color."))) ) ) }) output$segrda_show_symbol_on<-renderUI({ req(isTRUE(input$segrda_show_symbols)) div(style="margin-left: 5px", div( span("+ Shape:", inline(pickerInput(inputId=ns("segrda_symbol"), label = NULL, choices = df_symbol$val, options=list(container="body"), choicesOpt = list(content = df_symbol$img), width='75px')))), div( span("+ Size:", inline(numericInput(ns("segrda_cexpoint"),NULL,value = 1,min = 0.1,max = 3,step = .1) )) ), div(class="palette", span("+ Color:", inline( tipify( pickerInput(inputId=ns("segrda_colpalette"),label = NULL,choices = vals$colors_img$val,choicesOpt = list(content = vals$colors_img$img),options=list(container="body"),selected=vals$colors_img$val[1]), "Symbol palette. Choose a gradient to color observations by a factor")))), uiOutput(ns("segrda_fac_palette")) ) }) output$segrda_show_labels_on<-renderUI({ req(isTRUE(input$segrda_show_labels)) div(style="margin-left: 5px", div(span("+ Factor:", inline(tipify(pickerInput(ns("segrda_labfactor"),NULL,choices = colnames(attr(vals$saved_data[[input$segrda_X]],"factors")), width="125px"), "label classification factor") ))), div(span("+ Lab Color:", inline(tipify( pickerInput( inputId=ns("segrda_labcolor"), label = NULL, selected= vals$colors_img$val[12],choices = vals$colors_img$val,choicesOpt = list(content = vals$colors_img$img),width="75px", options=list(container="body") ), "label classification factor" ) ))), div(span("+ Lab adj:", inline( tipify(pickerInput(ns("segrda_labadj"),NULL,choices=c(1:4), options=list(containder="body")), "a position specifier for the text. If specified this overrides any adj value given. Values of 1, 2, 3 and 4, respectively indicate positions below, to the left of, above and to the right of the specified (x,y) coordinates.", placement = "right") ))), div(span("+ Lab offset:", inline( tipify(numericInput(ns("segrda_offset"),NULL,value = 0,step = .1), "this value controls the distance ('offset') of the text label from the specified coordinate in fractions of a character width.") ))), div(span("+ Size:", inline( tipify(numericInput(ns("segrda_cextext"),NULL,value = 1,min = 0.1,max = 3,step = .1), "label text size") ))) ) }) output$segrda_view_plot<-renderUI({ req(input$segrda_view=='Plot') div( div( '+ Scaling:', inline(numericInput(ns("segrda_scaling"),NULL, 2, step=1, min=0, max=3)),tipify(icon("fas fa-question-circle",style="color: gray"),"Scaling for species and site scores. Either species (2) or site (1) scores are scaled by eigenvalues, and the other set of scores is left unscaled, or with 3 both are scaled symmetrically by square root of eigenvalues.", placement = "bottom") ), div(span("+",checkboxInput(ns("segrda_biplot"), span("Biplot", pophelp(NULL,"show biplot arrows")), T))), uiOutput(ns("segrda_biplot_options")), div(span("+",checkboxInput(ns("segrda_sp"), span("Variables", pophelp(NULL,"Variables")), T))), uiOutput(ns("segrda_sp_on")), div( span("+", inline(checkboxInput(ns("segrda_show_symbols"),"Symbol" ,T)))), uiOutput(ns("segrda_show_symbol_on")), div(span("+", inline(checkboxInput(ns("segrda_show_labels"),"Labels",F) ))), uiOutput(ns("segrda_show_labels_on")), div( tipify( actionLink( ns('downp_pw'),span("+ Download",icon("fas fa-download")), style="button_active" ), "Download plot" )) ) }) output$segrda_biplot_options<-renderUI({ req(isTRUE(input$segrda_biplot)) div( span("+ Biplot Color:", inline( tipify( pickerInput( inputId=ns("segrda_biplotcolor"),label = NULL,selected="royalblue",choices = vals$colors_img$val[getsolid_col()],choicesOpt = list(content = vals$colors_img$img[getsolid_col()]), options=list(container="body")), "Variable Color."))) ) }) output$segrda_fac_palette<-renderUI({ col<-getcolhabs(vals$newcolhabs,input$segrda_colpalette,2) req(col[1]!=col[2]) div( span("+ Factor:", inline(tipify(pickerInput(ns("segrda_symbol_factor"),NULL,choices = rev(colnames(attr(vals$saved_data[[input$segrda_X]],"factors"))), width='125px'), "symbol classification factor")))) }) output$segrda_print<-renderPrint({ req(input$segrda_view=='Summary') segrda_summary()}) output$pwRDA_out<-renderUI({ # validate(need(length(getBP())>0, "No breakpoints found")) fluidRow( sidebarLayout( uiOutput(ns("side_pw")), mainPanel( tabsetPanel(id=ns("segrda_view"),selected=vals$segrda_view, tabPanel("Plot", value="Plot", uiOutput(ns("ppwRDA"))), tabPanel("Summary", value="Summary", verbatimTextOutput(ns("segrda_print")))) ) ) ) }) max_seq<-reactive({ validate(need(any(DP_smw()[,5]!='ns'),"DP without any significant dissimilarity value: no breakpoint could be determined.")) df<-DP_smw()[,c(1,5)] colnames(df)<-c("a","b") new=transform(df, Counter = ave(a, rleid(b), FUN = seq_along)) max_seq<-max(new[new[,2]=="*",3]) attr(max_seq,"min")<-if(max_seq!=1){ min( new[new[,2]=="*",3][new[new[,2]=="*",3]!=1])} else{ min(new[new[,2]=="*",3]) } vals$max_seq<-max_seq }) DP_smw<-reactive({ req(input$dp_BPs) req(input$dp_w) # savereac() smw<- vals$smw_dp dp_BPs<-if(input$dp_BPs==""){NULL} else{input$dp_BPs} dp_w<-if(is.na(input$dp_w)){NULL} else{input$dp_w} res<-suppressWarnings( suppressMessages( extract(smw,w=dp_w, index=input$dp_index, sig=input$dp_sig, z=input$dp_z, BPs=dp_BPs, seq.sig=input$dp_seq.sig) ) ) vals$dp_smw<-res vals$dp_smw }) getDP<-reactive({ smw<- vals$smw_dp dp_BPs<-if(input$dp_BPs==""){NULL} else{input$dp_BPs} dp_w<-if(is.na(input$dp_w)){NULL} else{input$dp_w} dp<-DP_smw() colnames(dp)[2]<-"SampleID" sim1o<-getord() yo<-data.frame(sim1o$yo) bps<-suppressWarnings(bp(dp)) to_split<-c(1,rep(1:(length(bps)+1), diff(c(1,bps, nrow(yo))))) splits<-lapply(split(yo,to_split),function(x) rownames(x)) data_empty<-data.frame(attr(vals$saved_data[[input$segrda_X]],"factors")[,1,drop=F]) data_empty[,1]<-as.numeric(data_empty[,1]) for(i in 1:length(splits)){ data_empty[splits[[i]],1]<-i } vals$splitBP<- data_empty dp }) getord<-reactive({ req(input$segrda_Y) req(input$segrda_X) req(input$axis_ord_segrda) x<-as.matrix(vals$saved_data[[input$segrda_X]]) colnames(x)<-colnames(vals$saved_data[[input$segrda_X]]) y<-na.omit(as.matrix(vals$saved_data[[input$segrda_Y]][rownames(x),,drop=F])) colnames(y)<-colnames(vals$saved_data[[input$segrda_Y]]) x<-na.omit(x[rownames(y),]) if(isTRUE(input$segrda_ord)){ sim1o<-OrdData(x=y,y=x, axis=input$axis_ord_segrda,scale=input$segrda_scale)} else{ sim1o<-list() sim1o$xo<-y sim1o$yo<-x } sim1o }) save_bpfac<-reactive({ vals$bagbp0<-vals$bagbp0+1 dp<-getDP() newfac<-vals$splitBP factors<-attr(vals$saved_data[[input$segrda_X]],"factors") if(input$hand_save=="create") { attr(vals$saved_data[[input$segrda_X]],"factors")[rownames(vals$splitBP),input$newdatalist]<-as.factor(vals$splitBP[,1]) } else{ attr(vals$saved_data[[input$segrda_X]],"factors")[rownames(vals$splitBP),input$over_datalist]<-as.factor(vals$splitBP[,1]) } vals$bag_smw<-NULL }) getBP<-reactive({ pw_in<-get_breaks_from_factor() breaks=pw_in$breaks breaks }) segrda_summary<-reactive({ res<-summary(vals$segrda_model$rda.pw) res<-switch(input$disegrda_sp_summ, "Summary stats"= vals$segrda_model$summ, "Variable scores"=res$species, "Observation scores"=res$sites, "Linear constraints"=res$constraints, "Biplot"=res$biplot, "Importance (unconstrained)"=res$cont$importance, "Importance (constrained)"=res$concont$importance ) vals$segrda_summary<-res[,1:input$segrda_axes] vals$segrda_summary }) observeEvent(ignoreInit = T,input$segrda_panels,{ vals$segrda_panels<-input$segrda_panels }) observeEvent(ignoreInit = T,input$dp_index_help,{ showModal( modalDialog( column(12, p(strong("dp:"),span("The dissimilarity profile (DP) table containing significant discontinuities and suggested breakpoints")), p(strong("rdp:"),span("data frame containing the randomized DP;")), p(strong("md:"),span("mean dissimilarity of the randomized DP")), p(strong("sd:"),span("standard deviation for each sample position")), p(strong("ospan:"),span("overall expected mean dissimilarity;")), p(strong("osd:"),span("average standard deviation for the dissimilarities;")), p(strong("params:"),span("list with input arguments")) ), easyClose = T, size="m", title="index for extracting SMW results" ) ) }) observeEvent(ignoreInit = T,input$dp_sig_help,{ showModal( modalDialog( column(12, p(strong("z:"),span("consider normalized dissimilarity (z-scores) discontinuities that exceed a z critical value")), p(strong("sd:"),span("consider dissimilarity discontinuities that exceed mean plus one standard deviation")), p(strong("sd2:"),span("consider dissimilarity discontinuities that exceed mean plus two standard deviation")), p(strong("tail1:"),span("Consider dissimilarity discontinuities that exceed 95 percent confidence limits")) ), easyClose = T, size="m", title="Significance test fort the SMW results" ) ) }) observeEvent(ignoreInit = T,input$inc_bp,{ vals$bag_user_bp<-c(vals$bag_user_bp,input$pw_user) }) observeEvent(ignoreInit = T,input$remove_breaks,{ vals$bag_user_bp<-NULL }) observeEvent(ignoreInit = T,input$downcenter_segrda,{ vals$hand_down<-"segRDA" module_ui_downcenter("downcenter") mod_downcenter <- callModule(module_server_downcenter, "downcenter", vals=vals) }) observeEvent(ignoreInit = T,input$go_smw,{ vals$window_pool<-getpool() updateTabsetPanel(session,"smw_panels",selected='swm_2') vals$bag_smw<-T bag_smw$df<-T req(length(vals$window_pool)>0) sim1o<-getord() xo<-sim1o$xo ## ordered explanatory matrix. yo<-sim1o$yo ## ordered community matrix (untransformed) y=yo;ws=vals$window_pool; dist=input$smw_dist;rand=input$smw_rand;n.rand=input$smw_nrand if (n.rand < 2) { stop("number of randomizations not alowed") } if (any(ws%%2 == 1)) { stop("all Window sizes must be enven") } rand<-match.arg(rand, c("shift", "plot")) argg<-c(as.list(environment()), list()) smw<-list() withProgress(message = paste0("SMW analysis (", 1, "/", length(ws), "); w =", ws[1]), min = 1, max = length(ws), { for (j in 1:length(ws)) { w1<-ws[j] DPtable<-smw.root2(yo, w1, dist) OB<-DPtable[, 3] rdp<-data.frame(rep(NA, length(OB))) seq_yo<-1:nrow(yo) withProgress(message="randomizing",min = 1, max = n.rand,{ for (b in 1:n.rand) { if (rand == "shift") { comm.rand<-apply(yo, 2, function(sp) sp[sample(seq_yo)]) rdp[b]<-smw.root2(data.frame(comm.rand), w1, dist)[3] } else if (rand == "plot") { comm.rand<-t(apply(yo, 1, function(sp) sp[sample(seq_yo)])) rdp[b]<-smw.root2(data.frame(comm.rand), w1, dist)[3] } incProgress(1) } }) rownames(rdp)<-DPtable[,1] Dmean<-apply(rdp, 1, mean) SD<-apply(rdp, 1, sd) oem<-sum(Dmean)/(nrow(yo) - w1) osd<-sum(SD)/(nrow(yo) - w1) Dz<-(OB - oem)/osd DPtable$zscore<-Dz smw[[j]]<-list(dp = data.frame(DPtable), rdp = matrix(rdp), md = Dmean, sd = SD, oem = oem, osd = osd, params = argg) class(smw[[j]])<-c("smw") incProgress(1, message=paste0("SMW analysis (", j+1, "/", length(ws), "); w =", ws[j+1])) } }) names(smw)<-paste("w", ws, sep = "") class(smw)<-c("smw") vals$smw_dp<-isolate(smw) }) observeEvent(ignoreInit = T,input$downcenter_dp_smw,{ vals$hand_down<-"DP smw" module_ui_downcenter("downcenter") mod_downcenter <- callModule(module_server_downcenter, "downcenter", vals=vals) }) observeEvent(ignoreInit = T,input$tools_saveBP,{ vals$hand_save<-"Create factor using breakpoints from the dissimilarity profile" vals$hand_save2<-div(span("Target:",em(input$segrda_X,style="color: SeaGreen"))) vals$hand_save3<-NULL showModal(module_desctools()) }) savenames<-reactive({ switch( vals$hand_save, "Create factor using breakpoints from the dissimilarity profile"= {c(paste0("BP_"),nlevels(as.factor(vals$splitBP[,1])))}, "Save pwRDA model in"={ paste0(input$segrda_X,"~",input$segrda_Y,"[factor:",input$bp_column,"]") }) }) observeEvent( input$data_confirm,{ req(!is.null(vals$hand_save)) switch( vals$hand_save, "Create factor using breakpoints from the dissimilarity profile"= {save_bpfac()}, "Save pwRDA model in"={ save_pwrda() }) removeModal() }) save_pwrda<-reactive({ attr(vals$saved_data[[input$segrda_X]],"pwrda")[[input$newdatalist]]<-vals$segrda_model attr(vals$saved_data[[input$segrda_X]],"pwrda")[["pwRda (unsaved)"]]<-NULL updatePickerInput(session,'pwrda_models', selected=input$newdatalist) vals$bag_pw<-NULL }) observeEvent(ignoreInit = T,input$downp_we,{ vals$hand_plot<-"we" module_ui_figs("downfigs") mod_downcenter<-callModule(module_server_figs, "downfigs", vals=vals) }) observeEvent(ignoreInit = T,input$downp_dp,{ vals$hand_plot<-"dp" module_ui_figs("downfigs") mod_downcenter<-callModule(module_server_figs, "downfigs", vals=vals) }) observeEvent(ignoreInit = T,input$downp_pw,{ vals$hand_plot<-"segrda" module_ui_figs("downfigs") mod_downcenter<-callModule(module_server_figs, "downfigs", vals=vals) }) observe({ req(input$pwrda_models) names(attr(vals$saved_data[[input$segrda_X]],"pwrda")) vals$segrda_model<-attr(vals$saved_data[[input$segrda_X]],"pwrda")[[input$pwrda_models]] }) observeEvent(ignoreInit = T,input$run_pwrda,{ vals$bag_pw<-T pw_in<-get_breaks_from_factor() breaks=pw_in$breaks sim1o<-list() sim1o$xo<-pw_in$yord sim1o$yo<-pw_in$xord model=suppressMessages( try( pwRDA2(sim1o$xo,sim1o$yo , BPs=breaks,n.rand = input$pw_nrand) ) ) if(is.null(attr(vals$saved_data[[input$segrda_X]],"pwrda"))){ attr(vals$saved_data[[input$segrda_X]],"pwrda")<-list() } attr(vals$saved_data[[input$segrda_X]],"pwrda")[["pwRda (unsaved)"]]<-model }) savereac<-reactive({ tosave<-isolate(reactiveValuesToList(vals)) tosave<-tosave[-which(names(vals)%in%c("saved_data","newcolhabs",'colors_img'))] tosave<-tosave[-which(unlist(lapply(tosave,function(x) object.size(x)))>1000)] tosave$saved_data<-vals$saved_data tosave$newcolhabs<-vals$newcolhabs tosave$colors_img<-vals$colors_img saveRDS(tosave,"savepoint.rds") saveRDS(reactiveValuesToList(input),"input.rds") beep() }) segrda_symbol_factor<-reactive({ req(input$segrda_symbol_factor) col<-getcolhabs(vals$newcolhabs,input$segrda_colpalette,2) symbol_factor<-if(col[1]!=col[2]){ data = vals$saved_data[[input$segrda_X]] res<-attr(data,"factors")[rownames(data), input$segrda_symbol_factor] names(res)<-rownames(data) res }else{NULL} symbol_factor }) segrda_text_factor<-reactive({ text_factor<-if(isFALSE(input$segrda_show_labels)){NULL} else{ data = vals$saved_data[[input$segrda_X]] res<-attr(data,"factors")[, input$segrda_labfactor] names(res)<-rownames(data) res } text_factor }) segrda_symbol<-reactive({ rda_symbol<-if(isFALSE(input$segrda_show_symbols)){NA}else{as.numeric(input$segrda_symbol)} }) output$ppwRDA<-renderUI({ req(input$segrda_X) req(input$pwrda_models) all_models<-attr(vals$saved_data[[input$segrda_X]],"pwrda") req(length(all_models)>0) pwrda_model<-all_models[[input$pwrda_models]] symbol_factor<-segrda_symbol_factor() rda_symbol<-segrda_symbol() text_factor<-segrda_text_factor() text_factor<-text_factor[rownames(scores(vals$segrda_model$rda.pw)$sites)] symbol_factor<-symbol_factor[rownames(scores(vals$segrda_model$rda.pw)$sites)] args<-list( model=pwrda_model$rda.pw, key = symbol_factor, points =input$segrda_show_symbols, text = input$segrda_show_labels, biplot=input$segrda_biplot, keytext=text_factor, col.arrow= input$segrda_biplotcolor, show.sp=input$segrda_sp, sp.display=input$segrda_sp_display, n.sp=input$segrda_spnum, palette = input$segrda_colpalette, cex.points = input$segrda_cexpoint, cex.text = input$segrda_cextext, pch=rda_symbol, col.sp=getcolhabs(vals$newcolhabs,input$segrda_spcolor,1), pch.sp=as.numeric(input$segrda_spshape), lwd_arrow=1, textcolor=input$segrda_labcolor, scaling=input$segrda_scaling, newcolhabs=vals$newcolhabs, pos=input$segrda_labadj, offset=input$segrda_offset ) #args<-readRDS("args.rds") vals$seg_rda_plot<-do.call(plot_segrda,args) renderPlot(replayPlot( vals$seg_rda_plot)) }) ##### ## RDA ## ## output$rda_fac_palette<-renderUI({ col<-getcolhabs(vals$newcolhabs,input$rda_colpalette,2) req(col[1]!=col[2]) div( span("+ Factor:", inline(tipify(pickerInput(ns("rda_symbol_factor"),NULL,choices = rev(colnames(attr(vals$saved_data[[input$rda_X]],"factors"))), width='125px'), "symbol classification factor")))) }) output$stats_crda<-renderUI({ validate(need(length(vals$saved_data)>1, "This functionality requires at least two datalist as explanatory and response data.")) column(12,style="background: white", p(strong("Redundancy Analysis")), span( inline( span(style="width: 150px", inline(uiOutput(ns("rda_Y"))) ) ), inline(uiOutput(ns("rda_X"))) ) ) }) output$rda_X<-renderUI({ pickerInput(ns("rda_Y"),span("~ X Data", tiphelp("Predictors")), choices=names(vals$saved_data), selected=vals$cur_rda_Y) }) output$rda_Y<-renderUI({ req(input$rda_Y) pickerInput(ns("rda_X"),span("Y Data", tiphelp("Response data")), choices=names(vals$saved_data), selected=vals$cur_rda_X) }) output$rda_options<-renderUI({ req(input$rda_view=='Plot') div( div( span("+", inline(checkboxInput(ns("rda_show_symbols"),"Symbol" ,T, width='75px')))), uiOutput(ns("rda_show_symbols_out")), div(span("+", inline(checkboxInput(ns("rda_show_labels"),"Labels",F) ))), uiOutput(ns("rda_show_labels_out")), div( actionLink( ns('rda_downp'),span("+ Download",icon("fas fa-download")), style="button_active" ) ) ) }) output$rda_show_symbols_out<-renderUI({ req(isTRUE(input$rda_show_symbols)) div(style="margin-left: 5px", div( span("+ Shape:", inline(pickerInput(inputId=ns("rda_symbol"), label = NULL, choices = df_symbol$val, options=list(container="body"), choicesOpt = list(content = df_symbol$img), width='75px')))), div( span("+ Size:", inline(numericInput(ns("rda_cexpoint"),NULL,value = 1,min = 0.1,max = 3,step = .1, width='75px') )) ), div(class="palette", span("+ Color:", inline( tipify( pickerInput(inputId=ns("rda_colpalette"),label = NULL,choices = vals$colors_img$val,choicesOpt = list(content = vals$colors_img$img),options=list(container="body"),selected=vals$colors_img$val[1], width='120px'), "Symbol palette. Choose a gradient to color observations by a factor")))), uiOutput(ns("rda_fac_palette")) ) }) rda_symbol_factor<-reactive({ req(input$rda_symbol_factor) col<-getcolhabs(vals$newcolhabs,input$rda_colpalette,2) if(col[1]!=col[2]){ data = vals$saved_data[[input$rda_X]] attr(data,"factors")[rownames(data), input$rda_symbol_factor] }else{NULL} }) output$rda_show_labels_out<-renderUI({ req(isTRUE(input$rda_show_labels)) column(12, div(span("+ Factor:", inline(tipify(pickerInput(ns("rda_labfactor"),NULL,choices = colnames(attr(vals$saved_data[[input$rda_X]],"factors")), width="125px"), "label classification factor") ))), div(span("+ Lab Color:", inline(tipify( pickerInput( inputId=ns("rda_labcolor"), label = NULL, selected= vals$colors_img$val[12],choices = vals$colors_img$val,choicesOpt = list(content = vals$colors_img$img),width="75px", options=list(container="body") ), "label classification factor" ) ))), div(span("+ Lab adj:", inline( tipify(pickerInput(ns("rda_labadj"),NULL,choices=c(1:4), width="75px", options=list(containder="body")), "a position specifier for the text. If specified this overrides any adj value given. Values of 1, 2, 3 and 4, respectively indicate positions below, to the left of, above and to the right of the specified (x,y) coordinates.", placement = "right") ))), div(span("+ Lab offset:", inline( tipify(numericInput(ns("rda_offset"),NULL,value = 0,step = .1, width="75px"), "this value controls the distance ('offset') of the text label from the specified coordinate in fractions of a character width.") ))), div(span("+ Size:", inline( tipify(numericInput(ns("rda_cextext"),NULL,value = 1,min = 0.1,max = 3,step = .1), "label text size") ))) ) }) output$rda_view_plot<-renderUI({ req(input$rda_view=='Plot') div( div( '+ Scaling:', inline(numericInput(ns("rda_scaling"),NULL, 2, step=1, min=0, max=3)),tipify(icon("fas fa-question-circle",style="color: gray"),"Scaling for species and site scores. Either species (2) or site (1) scores are scaled by eigenvalues, and the other set of scores is left unscaled, or with 3 both are scaled symmetrically by square root of eigenvalues.", placement = "bottom") ), div(span("+",checkboxInput(ns("biplot_rda"), span("Biplot", pophelp(NULL,"show biplot arrows")), T))), div(span("+",checkboxInput(ns("sp_rda"), span("Variables", pophelp(NULL,"Variables"))))), uiOutput(ns("sp_rda_out")) ) }) output$rda_sp_display<-renderUI({ req(input$rda_sp_display=='Shape') inline(pickerInput( inputId=ns("rda_spshape"), label = NULL, choices = df_symbol$val, options=list(container="body"), selected=df_symbol$val[8], choicesOpt = list(content = df_symbol$img))) }) observeEvent(ignoreInit = T,input$disp_rda_summ, vals$disp_rda_summ<-input$disp_rda_summ) output$rda_view_summary<-renderUI({ req(input$rda_view=='Summary') div( div( span( "+ Results:", inline( pickerInput(ns("disp_rda_summ"),NULL,choices=c('Importance (unconstrained)',"Importance (constrained)",'Variable scores','Observation scores','Linear constraints','Biplot'), selected=vals$disp_rda_summ, options=list(container="body"), width="200px") ) ) ), div( span("+ Axes", inline( numericInput(ns("rda_axes"),NULL, value=2) ) ) ), div( tipify( actionLink( ns('downcenter_rda'),span("+ Download",icon("fas fa-download")), style="button_active" ), "Download selected results" ) ) ) }) output$sp_rda_out<-renderUI({ req(isTRUE(input$sp_rda)) div(style="margin-left: 5px", div( '+ Number:', inline(numericInput(ns("rda_spnum"),NULL, 10, step=1, width="100px")),tipify(icon("fas fa-question-circle",style="color: gray"),"Show N variables with the highest scores", placement = "bottom") ), div( "+ Display:", span( inline( pickerInput(ns("rda_sp_display"),NULL,choices=c("Label","Shape"), width="100px") ), inline( uiOutput(ns("rda_sp_display")) ) ) ), div( span("+ Variable Color:", tipify( pickerInput( inputId=ns("rda_spcolor"), label = NULL, selected= vals$colors_img$val[getsolid_col()][4], choices = vals$colors_img$val[getsolid_col()], choicesOpt = list( content = vals$colors_img$img[getsolid_col()] ), options=list(container="body") ), "Variable Color.")) ) ) }) output$rda_plot<-renderPlot({ prda(rda_model(), key = rda_symbol_factor(), points =input$rda_show_symbols, text = input$rda_show_labels, palette = input$rda_colpalette, cex.points = input$rda_cexpoint, cex.text = input$rda_cextext, pch=c(rda_symbol(),3), keytext=rda_text_factor(), biplot=input$biplot_rda, show.sp=input$sp_rda, n.sp=input$rda_spnum, sp.display=input$rda_sp_display, pch.sp=as.numeric(input$rda_spshape), col.sp=getcolhabs(vals$newcolhabs,input$rda_spcolor,1), textcolor=input$rda_labcolor, scaling=input$rda_scaling, newcolhabs=vals$newcolhabs, pos=input$rda_labadj, offset=input$rda_offset ) vals$rda_plot<-recordPlot() rda }) output$stats_rda<-renderUI({ req(input$rda_X) validate(need(!anyNA(vals$saved_data[[input$rda_X]]), "This functionality does not support missing values; Please use the transformation tool to the handle missing values.")) mainPanel( tabsetPanel(id=ns("rda_view"),selected=vals$rda_view, tabPanel("Plot", value="Plot", plotOutput(ns("rda_plot"))), tabPanel("Summary", value="Summary", verbatimTextOutput(ns("rda_print")))) ) }) output$rda_print<-renderPrint({ rda_summary()}) output$orda_options<-renderUI({ div( div( span("+", checkboxInput(ns("rda_scale"),span("Scale variables",tiphelp("Scale variables to unit variance (like correlations)")), value=T) ) ), uiOutput(ns("rda_view_summary")), uiOutput(ns("rda_view_plot")), ) }) observeEvent(ignoreInit = T,input$rda_view, vals$rda_view<-input$rda_view) rda_text_factor<-reactive({ if(isFALSE(input$rda_show_labels)){NULL} else{ data = vals$saved_data[[input$rda_X]] attr(data,"factors")[rownames(data), input$rda_labfactor]} }) rda_model<-reactive({ data<- vals$saved_data[[input$rda_X]] x<-as.matrix(data) colnames(x)<-colnames(data) if(length(input$rda_Y)>0){ y<-na.omit(vals$saved_data[[input$rda_Y]][rownames(x),,drop=F]) colnames(y)<-colnames(vals$saved_data[[input$rda_Y]]) x<-na.omit(x[rownames(y),]) dim(data.frame(y)) dim(x) model=vegan::rda(x~.,data=data.frame(y) ,scale=input$rda_scale) } else{model= vegan::rda(x,scale=input$rda_scale)} model}) rda_summary<-reactive({ res<-summary(rda_model()) res<-switch(input$disp_rda_summ, "Variable scores"=res$species, "Observation scores"=res$sites, "Linear constraints"=res$constraints, "Biplot"=res$biplot, "Importance (unconstrained)"=res$cont$importance, "Importance (constrained)"=res$concont$importance ) vals$rda_summary<-res[,1:input$rda_axes] vals$rda_summary }) rda_symbol<-reactive({ if(isFALSE(input$rda_show_symbols)){NA}else{as.numeric(input$rda_symbol)} }) observeEvent(ignoreInit = T,input$rda_downp,{ vals$hand_plot<-"rda" module_ui_figs("downfigs") mod_downcenter<-callModule(module_server_figs, "downfigs", vals=vals) }) observeEvent(ignoreInit = T,input$rda_X, vals$cur_rda_X<-input$rda_X) observeEvent(ignoreInit = T,input$rda_Y, vals$cur_rda_Y<-input$rda_Y) data_overwritte<-reactiveValues(df=F) data_store<-reactiveValues(df=F) newname<-reactiveValues(df=0) get_newname<-reactive({ req(!is.null(vals$hand_save)) newname$df<-switch( vals$hand_save, "Create Datalist: Niche results"={name_niche()}, "Create factor using breakpoints from the dissimilarity profile"= {c(paste0("BP_"),nlevels(as.factor(vals$splitBP[,1])))}, "Save pwRDA model in"={ paste0(input$segrda_X,"~",input$segrda_Y,"[factor:",input$bp_column,"]") } )}) output$data_over<-renderUI({ data_overwritte$df<-F data<-vals$saved_data[[input$data_descX]] choices<-c(names(vals$saved_data)) if(vals$hand_save=="Create factor using breakpoints from the dissimilarity profile"){choices<-colnames(attr(data,"factors"))} req(input$hand_save=="over") res<-pickerInput(ns("over_datalist"), NULL,choices, width="350px") data_overwritte$df<-T inline(res) }) output$data_create<-renderUI({ req(newname$df!=0) data_store$df<-F req(input$hand_save=="create") res<-textInput(ns("newdatalist"), NULL, newname$df, width="350px") data_store$df<-T inline(res) }) observeEvent( input$data_confirm,{ req(!is.null(vals$hand_save)) switch( vals$hand_save, "Create Datalist: Niche results"={saveniche()}, "Create factor using breakpoints from the dissimilarity profile"= {save_bpfac()}, "Save pwRDA model in"={save_pwrda()} ) removeModal() }) module_desctools <- function() { ns <- session$ns modalDialog( uiOutput(ns("databank_storage")), title=strong(icon("fas fa-save"),'Save'), footer=column(12, fluidRow(modalButton(strong("cancel")), inline(uiOutput(ns("save_confirm"))) ) ), easyClose = T ) } output$databank_storage<-renderUI({ req(!is.null(vals$hand_save)) newname$df<-0 get_newname() div( column(12, div(strong("action:"),em("*",vals$hand_save,style="color: SeaGreen")), div(vals$hand_save2,style="color: gray"), div(vals$hand_save3)), column(12,style="margin-top: 20px", radioButtons(ns("hand_save"),NULL, choiceNames= list(div(style="height: 40px",span("Create", style="margin-right: 15px"), inline(uiOutput(ns("data_create")))), div(style="height: 40px",span("Overwrite", style="margin-right: 15px"), inline(uiOutput(ns("data_over"))))), choiceValues=list('create',"over"), width="800px") ) ) }) output$save_confirm<-renderUI({ req(isTRUE(data_store$df)|isTRUE(data_overwritte$df)) actionButton(ns("data_confirm"),strong("confirm")) }) }
3cb3323139607841e8263a2297e29d3a8a68698c
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
/fuzzedpackages/gbp/R/gbp1d_cpp_rd.r
0b2f144b49868f1a54ec063dae96f802ecbd1b12
[]
no_license
akhikolla/testpackages
62ccaeed866e2194652b65e7360987b3b20df7e7
01259c3543febc89955ea5b79f3a08d3afe57e95
refs/heads/master
2023-02-18T03:50:28.288006
2021-01-18T13:23:32
2021-01-18T13:23:32
329,981,898
7
1
null
null
null
null
UTF-8
R
false
false
1,816
r
gbp1d_cpp_rd.r
#' gbp1d #' @aliases #' gbp1d Rcpp_gbp1d Rcpp_gbp1d-class #' @description #' generalized bin packing problem in 1 dimension, a.k.a knapsack 0-1 problem. #' @details #' gbp1d init a profit vector p, a weight vector w, and a weight constraint c, #' gbp1d solver would solve #' #' maximize sum_{j=1}^{n} p_{j} x_{j} #' #' subject to sum_{j=1}^{n} w_{j} x_{j} leq c #' x_{j} in {0, 1}, j = 1, ...., n #' #' and instantiate a gbp1d object with a selectin vector x and an objective z. #' #' gbp1d is implemented as rcpp class, an instantiate can be solved by calling #' gbp1d_solver_dpp(p, w, c) and gbp1d_solver_min(p, w, c) #' @family gbp1d #' @rdname gbp1d #' @docType class "gbp1d" #' gbp1d_solver_dpp #' @description #' solve gbp1d via dynamic programming simple - adagio::knapsnak() #' @details #' a dynamic programming solver on gbp1d instantiate - knapsack 0-1 problem, see gbp1d. #' #' gbp1d init a profit vector p, a weight vector w, and a weight constraint c, #' gbp1d solver would solve #' #' maximize sum_{j=1}^{n} p_{j} x_{j} #' #' subject to sum_{j=1}^{n} w_{j} x_{j} leq c #' x_{j} in {0, 1}, j = 1, ...., n #' #' and instantiate a gbp1d object with a selectin vector x and an objective z. #' #' gbp1d is implemented as rcpp class, an instantiate can be solved by calling #' gbp1d_solver_dpp(p, w, c) and gbp1d_solver_min(p, w, c) #' #' @param p #' p profit <vector>::<numeric> #' @param w #' w weight <vector>::<integer> #' @param c #' c constraint on weight <integer> #' @return gbp1d #' a gbp1d instantiate with p profit, w weight, c constraint on weight, #' k selection, o objective, and ok an indicator of all fit or not. #' @family gbp1d #' @rdname gbp1d_solver_dpp "gbp1d_solver_dpp"
847509956691b55d888318fcdf81557aae677520
b5ba5c578810105c9148fecadc61f124ae68118c
/man/maxccf.Rd
eebbc2d4d569ebecfbb6ffdb3ad47b6e393f741c
[]
no_license
dangulod/ECTools
cce57dfe0189ee324922d4d014cb7a72bd97817d
a927092249a92ced28c6c50fe7b26588049a07d0
refs/heads/master
2021-01-25T10:51:04.021720
2018-05-16T10:31:25
2018-05-16T10:31:25
93,886,888
1
1
null
null
null
null
UTF-8
R
false
true
648
rd
maxccf.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/maxcorlag.R \name{maxccf} \alias{maxccf} \title{maxccf} \usage{ maxccf(x = x, y = y, lag.max = 6, allow.negative = T, abs = T) } \arguments{ \item{x}{vector} \item{y}{vector} \item{lag.max}{(Optional) maximum lag} \item{allow.negative}{(Optional) logical, if negative lags are allowed, by default TRUE} \item{abs}{(Optional) logical, Should be the maximum in absolute value, by defaukt TRUE} } \value{ Return the lag with the maximum/minimum correlations bewteen two time series if x is a data frame or matrix, u must use maxccfdf fucntion } \description{ maxccf }
1f7de2364ff32df6e9c5aab2ffb7743cebca4fd9
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
/B_analysts_sources_github/GuangchuangYu/bioc-release/ClassGeneClusterSet.R
fce1c732db59c44dcc0006fc1d2e406f2f90e406
[]
no_license
Irbis3/crantasticScrapper
6b6d7596344115343cfd934d3902b85fbfdd7295
7ec91721565ae7c9e2d0e098598ed86e29375567
refs/heads/master
2020-03-09T04:03:51.955742
2018-04-16T09:41:39
2018-04-16T09:41:39
128,578,890
5
0
null
null
null
null
UTF-8
R
false
false
1,251
r
ClassGeneClusterSet.R
setClass("GeneClusterSet", representation(GeneClusters="list")) setMethod( f= "sim", signature= "GeneClusterSet", definition=function(object, params){ if (length(params@combine)==0) { stop("Using setCombineMethod(\"Params\") to specify which method to combine.") } size <- length(object@GeneClusters) cluster_gos=list() for(i in 1:size){ cluster_gos[[i]]=sapply(object@GeneClusters[[i]], gene2GO, params) } assign("GOSemSimCache", new.env(hash=TRUE),envir=.GlobalEnv) simScores <- matrix(NA, nrow=size, ncol=size) rownames(simScores) <- names(object@GeneClusters) colnames(simScores) <- names(object@GeneClusters) for (i in seq(along=object@GeneClusters)) { for (j in 1:i) { gos1 <- unlist(cluster_gos[[i]]) gos2 <- unlist(cluster_gos[[j]]) gos1 <- gos1[!is.na(gos1)] gos2 <- gos2[!is.na(gos2)] if (length(gos1) == 0 || length(gos2)== 0) { simScores[i,j] <- NA } else { goids <- new("GOSet", GOSet1=gos1, GOSet2=gos2) simScores[i,j] = sim(goids, params) } if (i != j ){ simScores[j,i] <- simScores[i,j] } } } remove("GOSemSimCache", envir=.GlobalEnv) removeNA <- apply(!is.na(simScores), 1, sum)>0 return(simScores[removeNA, removeNA]) } )
941225985c413ba13ff4ddcb7b98bcfa0e8c3cf7
06c3bb38a86470847c0d56ded74126889e8762ce
/merge_HTcounts_Matrix_V1.2.R
c4e5cfacf8429b185ff316df3af2e973b436db8d
[]
no_license
haojiang9999/R_Data_analysis
7bc10f7babe3b2a6a9bf81d9618471c7712fef27
882c7b4a1519cd9f5d6071808a34b600d075aa90
refs/heads/master
2020-03-29T01:04:07.581137
2019-11-09T02:31:45
2019-11-09T02:31:45
149,369,687
0
0
null
null
null
null
UTF-8
R
false
false
87
r
merge_HTcounts_Matrix_V1.2.R
all.exp<-readRDS("FANTOM_E_MTAB_3929_exp.rds") all.exp[1530,1530] row.names(all.exp) #
4f611229da7eb2baf1846db54c882555f75812c2
d38abc97785eb0296a1bcc0764edf1df9af7bcad
/support/array/general.R
29cf23c7ea9399e05783f3aaab47dc7a17fe066d
[ "MIT" ]
permissive
lnsongxf/R4Econ
0f0e1ebbaab669a8d00cc7c0d9b4c06e2bd02207
cc50184e5650e94ea5dfa62cbb61d32e6ada3493
refs/heads/master
2021-02-06T06:08:31.951801
2020-02-26T04:08:48
2020-02-26T04:08:48
null
0
0
null
null
null
null
UTF-8
R
false
false
113
r
general.R
# Remove last element of array vars.group.bydf <- c('23','dfa', 'wer') vars.group.bydf[-length(vars.group.bydf)]
7752c98e21e03dc8a619f84760aa51fd4af061da
7d31f360f1ece69b09a4b51e3986ac44025efc7c
/package/clinUtils/man/formatTableLabel.Rd
fd73f6e3561d22e1db353ca459e201b0e3e64efa
[]
no_license
Lion666/clinUtils
a0500a773225ffafc29b7d7f7bcc722dd416743c
dc6118f32d311657d410bdeba02f3720f01d62df
refs/heads/master
2023-08-12T08:48:42.923950
2021-09-21T14:56:18
2021-09-21T16:14:51
null
0
0
null
null
null
null
UTF-8
R
false
true
478
rd
formatTableLabel.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/label.R \name{formatTableLabel} \alias{formatTableLabel} \title{Concatenate and format text strings to a label of a table} \usage{ formatTableLabel(...) } \arguments{ \item{...}{string to be concatenated to form label} } \value{ String with chunk label } \description{ This function concatenates and formats text strings to a label of a table for \code{bookdown} package } \author{ Laure Cougnaud }
0968fe8028774f37a92536b75e1edcc1c998b5ae
32e9a6eb06e58bafd7ec0ea1232f850db6455542
/plot2.r
01a7e97d1571b24c50f7837a9cdfac439611f4d8
[]
no_license
Nempecovest1/ExData_Plotting1
c1a7d4219e73b544547188ea53aafd84284737b8
4c79fdbdf744e2c2ebe4c6d60b9d89f9418e4ceb
refs/heads/master
2021-01-18T11:49:23.029293
2014-12-03T18:00:03
2014-12-03T18:00:03
null
0
0
null
null
null
null
UTF-8
R
false
false
538
r
plot2.r
# Load data data_orig = read.table("household_power_consumption.txt", header=T, sep=";", na.strings="?") data_orig$Date = as.Date(data_orig$Date, format="%d/%m/%Y") data = data_orig[data_orig$Date == "2007-02-01" | data_orig$Date == "2007-02-02",] rm(data_orig) data$DateTime = paste(data$Date, data$Time) data$DateTime = strptime(data$DateTime, format="%Y-%m-%d %H:%M:%S") # Plot 2 png("plot2.png", width=480, height=480) plot(data$DateTime, data$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)") dev.off()
c814df991b94c679763f82fef40b470586b68278
21aed1a841b080bb19b900dc0afd0bb21692d72a
/bayes_crank.R
2f54eca11adf40b8984278744dfd125e35c9ffb1
[]
no_license
anthonyjp87/Teach_Bayes_datacamp
dbaad3bef013ff382dbea440af6587afa5e7b60b
b47858c4c5a8a7f2ca23868ba341981c59dabb74
refs/heads/master
2021-01-21T18:29:08.948781
2017-05-22T14:16:33
2017-05-22T14:16:33
92,051,492
0
1
null
null
null
null
UTF-8
R
false
false
1,080
r
bayes_crank.R
##Thist first example is for generating all of the values for the Bayesian Crank. This is a function in the Teach Bayes library which calculates the Posterior based on the Prior and the Likelihood. It also includes the vector P which contains all of the possible values. In this case, only five values are possible, .1-.5 # Define the values of the proportion: P. These are Discrete, which will rarely be the case, but it is helpful. P <- c(0.1, 0.2, 0.3, 0.4, 0.5) # Define a Prior: Prior <- c(0.3, 0.3, 0.2, 0.1, 0.1) # Compute the likelihoods: Likelihood Likelihood <- dbinom(8, size = 20, prob = P) # Create Bayes data frame: bayes_df bayes_df <- data.frame(P, Prior, Likelihood) print(bayes_df) # Compute and print the posterior probabilities: bayes_df bayes_df <- bayesian_crank(bayes_df) #Bayesian Crank is fairly simple--just the Prior*Likelihood/Sum of Product: # function (d) # { # d$Product <- d$Likelihood * d$Prior # d$Posterior <- d$Product/sum(d$Product) # d # } # Graphically compare the prior and posterior prior_post_plot(bayes_df) print(bayes_df)
67548116cc821f9c4973f59e0e39a6d00bde4306
ef1be2cb902701ee410945a94a2626cc12fdcbeb
/cachematrix.R
eae25a3d56c376304790f86bd8d620ac017d5854
[]
no_license
dckly1976/R-Programming
f578f06ef5f8cb68aee0629635237c16ef9a3d20
02a8e26502f9c5b725b82959df3075a8517cade5
refs/heads/main
2023-08-29T17:31:58.345233
2021-11-12T16:45:56
2021-11-12T16:45:56
427,429,930
0
0
null
null
null
null
UTF-8
R
false
false
2,251
r
cachematrix.R
## This script store matrix in a environment with its inverted value ## so, this way, you just need to calculate the inverted matrix one time ## and obtain this info from cache. ## makeCacheMatrix is the first function, it store the original matrix ## and create functions to get it, modify and assign and inverted matrix makeCacheMatrix <- function(x = matrix()) { inverted_mtx = NULL # subfunction if it needed to modify stored matrix set_new_matrix <- function(new_matrix){ x <<- new_matrix inverted_mtx <<- NULL } # subfunction to get original matrix get <- function() x # subfunction to assing a value to inverted matrix set_inverted <- function(inverted) inverted_mtx <<- inverted # subfunction to get only inverted matrix, using original matrix as key get_inverted <- function() inverted_mtx list(set_inverted = set_inverted, get = get, get_inverted = get_inverted) } ## cacheSolve is a function that will retrieve matrixes stored in cache, or, ## if its first time, will compute inverted matrix and store in cache for ## further uses cacheSolve <- function(x, ...) { # first, we check if original matrix (x) already as a value for inverted on cache ## to that, we use get_inverse() that is assign to x object (original matrix) get_inv_mtx = x$get_inverted() ## if has an value, retrun inverted matrix if(!is.null(get_inv_mtx)) { message("getting cached inverted matrix") return(get_inv_mtx) } # if not, compute inverted matrix for the first time and store in cache original_mtx = x$get() inverted_mtx = solve(original_mtx, ...) x$set_inverted(inverted_mtx) inverted_mtx ## Return a matrix that is the inverse of 'x' } ## tests A <- matrix( c(5, 1, 0, 3,-1, 2, 4, 0,-1), nrow=3, byrow=TRUE) # creating stores for matrix a, inside b variable b = makeCacheMatrix(A) # get matrix (equals to A) b$get() # get inverse matrix before have it (must be NULL) b$get_inverted() # Caching inversed matrix for B (1st time, so must calculate) cacheSolve(b) # matrix remains the same b$get() # inversed matrix now as a value b$get_inverted() # getting inversed matrix from cache cacheSolve(b)
66f2c5e094f833cf4829fdbda1acf986317cc986
5bffae3f3c1f40e6cba7dfe334a1c0b220c13613
/man/ui-server.Rd
76414016ae9c33bcfffac0f5de0b7c1add14d87a
[ "MIT" ]
permissive
curso-r/auth0
f1218c40b9a9509d202f3c48866dd73af33d9e50
19d90e7b533e020449b54f67b93cfedcc30303bb
refs/heads/master
2023-07-14T19:57:10.362245
2023-03-22T18:55:50
2023-03-22T18:55:50
154,844,030
139
29
NOASSERTION
2023-06-21T21:57:55
2018-10-26T14:05:40
R
UTF-8
R
false
true
1,073
rd
ui-server.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/shiny.R \name{ui-server} \alias{ui-server} \alias{auth0_ui} \alias{auth0_server} \title{Modifies ui/server objects to authenticate using Auth0.} \usage{ auth0_ui(ui, info) auth0_server(server, info) } \arguments{ \item{ui}{\code{shiny.tag.list} object to generate the user interface.} \item{info}{object returned from \link{auth0_info}. If not informed, will try to find the \verb{_auth0.yml} and create it automatically.} \item{server}{the shiny server function.} } \description{ These functions can be used in a ui.R/server.R framework, modifying the shiny objects to authenticate using Auth0 service with no pain. } \examples{ \donttest{ # first, create the yml file using use_auth0() function if (interactive()) { # ui.R file library(shiny) library(auth0) auth0_ui(fluidPage(logoutButton())) # server.R file library(auth0) auth0_server(function(input, output, session) {}) # console options(shiny.port = 8080) shiny::runApp() } } } \seealso{ \link{auth0_info}. }
2043e07632a388263c875a6bb03cf6dfae4ca1f3
66317f3e1ba137b5a16e339e358350587cc8ad85
/R/convert_flow_unit.R
9496fe54e24b39d95ecf89e36547f2c6fe65fe29
[]
no_license
cran/clinPK
2dd3c8f90cc711186003a47a229c2153f272be8f
935e5cd7f2e0877814a1ce7347da48fc0ffda0a6
refs/heads/master
2022-05-16T16:11:20.119203
2022-05-09T07:10:05
2022-05-09T07:10:05
94,758,508
0
0
null
null
null
null
UTF-8
R
false
false
4,403
r
convert_flow_unit.R
#' Convert flow (e.g. clearance) from / to units #' #' Flow units are expected to be specified as a combination #' of volume per time units, potentially specified per kg #' body weight, e.g. "mL/min", or "L/hr/kg". #' #' Accepted volume units are "L", "dL", and "mL". #' Accepted time units are "min", "hr", and "day". #' The only accepted weight unit is "kg". #' #' The function is not case-sensitive. #' #' @param value flow value #' @param from from flow unit, e.g. `L/hr`. #' @param to to flow unit, e.g. `mL/min` #' @param weight for performing per weight (kg) conversion #' #' @examples #' #' ## single values #' convert_flow_unit(60, "L/hr", "ml/min") #' convert_flow_unit(1, "L/hr/kg", "ml/min", weight = 80) #' #' ## vectorized #' convert_flow_unit( #' c(10, 20, 30), #' from = c("L/hr", "mL/min", "L/hr"), #' to = c("ml/min/kg", "L/hr", "L/hr/kg"), #' weight = c(70, 80, 90)) #' #' @export convert_flow_unit <- function( value = NULL, from = "l", to = "ml", weight = NULL) { ## Input checks: if(is.null(from)) { stop("`from` argument not specified.") } if(is.null(to)) { stop("`to` argument not specified.") } if(length(from) != 1 && length(from) != length(value)) { stop("`from` argument should be either a single value or a vector of the same length as the `value` argument.") } if(length(to) != 1 && length(to) != length(value)) { stop("`to` argument should be either a single value or a vector of the same length as the `value` argument.") } ## Clean up the from/to units: from <- gsub("\\/", "_", tolower(from)) to <- gsub("\\/", "_", tolower(to)) ## Definition of the units: volume_units <- list( "ml" = 1/1000, "dl" = 1/10, "l" = 1) time_units <- list( "min" = 1/60, "hr" = 1, "day" = 24) ## Calculate volume conversion factors tryCatch({ from_volume_factor <- as.numeric(vapply(from, FUN = function(x) { find_factor(x, units = volume_units, "^") # volume is always at the start, hence ^ }, 1)) }, error = function(e) { stop("Volume unit not recognized in `from` argument.") }) tryCatch({ to_volume_factor <- as.numeric(vapply(to, FUN = function(x) { find_factor(x, units = volume_units, "^") # volume is always at the start, hence ^ }, 1)) }, error = function(e) { stop("Volume unit not recognized in `to` argument.") }) ## Calculate per time conversion factors tryCatch({ from_time_factor <- 1/as.numeric(vapply(from, FUN = function(x) { find_factor(x, units = time_units, "_") # time is never at the start, always after "/" or "_" }, 1)) }, error = function(e) { stop("Time unit not recognized in `from` argument.") }) tryCatch({ to_time_factor <- 1/as.numeric(vapply(to, FUN = function(x) { find_factor(x, units = time_units, "_") # time is never at the start, always after "/" or "_" }, 1)) }, error = function(e) { stop("Time unit not recognized in `to` argument.") }) ## Calculate weight conversion factors from_weight <- as.logical(vapply(from, function(x) { length(grep("_kg", x, value=F))>0 }, TRUE)) to_weight <- as.logical(vapply(to, function(x) { length(grep("_kg", x, value=F))>0 }, TRUE)) if((any(from_weight) || any(to_weight))) { if(is.null(weight)) stop("Weight required for weight-based conversion of flow rates.") if(length(weight) != 1 && length(weight) != length(value)) { stop("`weight` argument should be either a single value or a vector of the same length as the `value` argument.") } } from_weight_factor <- ifelse(from_weight, weight, 1) to_weight_factor <- ifelse(to_weight, weight, 1) ## Combine factors and return value * (from_volume_factor * from_weight_factor * from_time_factor) / (to_volume_factor * to_time_factor * to_weight_factor) } #' Helper function to grab the conversion factor from an input unit and given list #' #' @param full_unit full unit, e.g. "mL/min/kg" #' @param units unit specification list, e.g. `list("ml" = 1/1000, "dl" = 1/10, "l" = 1)` #' @param prefix prefix used in matching units, e.g. "^" only matches at start of string while "_" matches units specified as "/" find_factor <- function(full_unit, units = NULL, prefix = "^") { unlist(units[vapply(names(units), function(x) { grepl(paste0(prefix, x), full_unit) }, FUN.VALUE = logical(1))], use.names = FALSE) }
82522af2f1db681c21062fd1c06b7bb9983c70b3
7efe27117099680642ffb3dc2fd9d9cbf6bf3b95
/decision.R
54cd5e2879f36532e3cfbb6b9e5edc6706e9351c
[]
no_license
rashmibhle/dsr_lab
ff075b40835dbbe4f64090e0471d15e86c30f5c4
9b0a2fa799832fc5074ce83d720ad0300bb4f789
refs/heads/master
2020-08-22T05:04:47.311810
2020-02-03T17:22:20
2020-02-03T17:22:20
216,323,263
0
0
null
null
null
null
UTF-8
R
false
false
328
r
decision.R
library(rpart) library(rpart.plot) dataset = read.csv("F:/7th sem/7th SEM/DSR/Dataset/Dataset/Mail_Respond.csv") dataset part = rpart(Outcome~District+House.Type+Income+Previous_Customer,control = rpart.control(minsplit = 1), parms= list(split="information"), data = dataset) part rpart.plot(part, type= 2,extra=4)
0733ea64479c3435b15b7ea6885dda1558cf86a4
1839b1bc21a43384e9c169f0bf5fd0a3e4c68b0a
/w18/man/getRandomPWMsAndFilts.Rd
9e70021989d94f2dcb8aac821cf985be2c0db2df
[]
no_license
CarlosMoraMartinez/worm19
b592fa703896e1bbb6b83e41289674c63a046313
99fb3ef35d13739ee83f08b2ac1107179ea05ee2
refs/heads/master
2020-07-18T23:25:13.542031
2019-07-03T14:53:04
2019-07-03T14:53:04
206,333,433
0
0
null
null
null
null
UTF-8
R
false
true
774
rd
getRandomPWMsAndFilts.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getRandomPWMsAndFilts.R \name{getRandomPWMsAndFilts} \alias{getRandomPWMsAndFilts} \title{getRandomPWMsAndFilts} \usage{ getRandomPWMsAndFilts(PWMs, filt, n = 10) } \arguments{ \item{PWMs}{list of matrixes, with names} \item{filt}{list of filters (regular expression-like, eg. [AT]TTG[CG]A)} \item{n}{number of permutated lists (defaults to n=10).} } \value{ n lists of PWMs with shuffled columns (1 shuffled PWM per originalPWM) and its corresponding filters (a list of lists, with n permutations of each PWM.) } \description{ Given a set of PWMs and filter (eg. [AT]TTG[CG]A), returns n lists of PWMs with shuffled columns and its corresponding filters. } \keyword{PWMs,} \keyword{filters}
7c9bafec9fef7c894a1c42526dd7ad53bd5f3052
31362fdab2193f92b64f9a82b0fe1ca732fcf6df
/Eumaeus/ui.R
5ff7a7740aab910fcd3a79a7fa773e5d89697c1b
[]
no_license
OHDSI/ShinyDeploy
a5c8bbd5341c96001ebfbb1e42f3bc60eeceee7c
a9d6f598b10174ffa6a1073398565d108e4ccd3c
refs/heads/master
2023-08-30T17:59:17.033360
2023-08-26T12:07:22
2023-08-26T12:07:22
98,995,622
31
49
null
2023-06-26T21:07:33
2017-08-01T11:50:59
R
UTF-8
R
false
false
13,353
r
ui.R
library(shiny) library(DT) shinyUI( fluidPage(style = 'width:1500px;', titlePanel( title = div(img(src = "logo.png", height = 50, width = 50), "Evaluating Use of Methods for Adverse Event Under Surveillance (EUMAEUS)"), windowTitle = "EUMAEUS" ), tabsetPanel( tabPanel("About", br(), p("For review purposes only. Do not use.") ), tabPanel("Effect-size-estimate-based metrics", fluidRow( column(2, selectInput("exposure", label = div("Vaccine", actionLink("vaccineInfo", "", icon = icon("info-circle"))), choices = exposure$exposureName), selectInput("calibrated", label = div("Empirical calibration:", actionLink("calibrationInfo", "", icon = icon("info-circle"))), choices = c("Uncalibrated", "Calibrated")), selectInput("database", label = div("Database:", actionLink("databaseInfo", "", icon = icon("info-circle"))), choices = database$databaseId), selectInput("timeAtRisk", label = div("Time at risk", actionLink("timeAtRiskInfo", "", icon = icon("info-circle"))), choices = timeAtRisks), selectInput("trueRr", label = div("True effect size:", actionLink("trueRrInfo", "", icon = icon("info-circle"))), choices = trueRrs), checkboxGroupInput("method", label = div("Methods:", actionLink("methodsInfo", "", icon = icon("info-circle"))), choices = unique(analysis$method), selected = unique(analysis$method)) ), column(10, tabsetPanel(type = "pills", tabPanel("Per period", selectInput("period", label = div("Time period", actionLink("periodInfo", "", icon = icon("info-circle"))), choices = timePeriod$label[timePeriod$exposureId == exposure$exposureId[1]]), dataTableOutput("performanceMetrics"), uiOutput("tableCaption"), conditionalPanel(condition = "output.details", div(style = "display:inline-block", h4(textOutput("details"))), tabsetPanel(id = "perPeriodTabSetPanel", tabPanel("Estimates", uiOutput("hoverInfoEstimates"), plotOutput("estimates", height = "270px", hover = hoverOpts("plotHoverInfoEstimates", delay = 100, delayType = "debounce")), div(strong("Figure 1.1."),"Estimates with standard errors for the negative and positive controls, stratified by true effect size. Estimates that fall above the red dashed lines have a confidence interval that includes the truth. Hover mouse over point for more information.")), tabPanel("ROC curves", plotOutput("rocCurves", height = "420px"), div(strong("Figure 1.2."),"Receiver Operator Characteristics curves for distinguising positive controls from negative controls. Negative controls not powered for positive control synthesis have been removed.")), tabPanel("Diagnostics", sliderInput("minRateChange", "Minimum relative rate change (%)", min = 0, max = 100, value = 50), plotOutput("monthlyRates", height = "420px"), div(strong("Figure 1.3."),"Monthly incidence rates across the historic and current time windows. Only those outcomes having a relative change greater than the selected threshold are shown.") ) ) )), tabPanel("Across periods", dataTableOutput("performanceMetricsAcrossPeriods"), uiOutput("tableAcrossPeriodsCaption"), conditionalPanel(condition = "output.detailsAcrossPeriods", div(style = "display:inline-block", h4(textOutput("detailsAcrossPeriods"))), tabsetPanel( tabPanel("Estimates", plotOutput("estimatesAcrossPeriods"), div(strong("Figure 1.3."),"Effect-size estimates for the negative and positive controls across time, stratified by true effect size. Closed dots indicate statistical signficance (two-sides) at alpha = 0.05. The red dashed line indicates the true effect size.") ) ) ) ), tabPanel("Across periods & methods", fluidRow( column(5, radioButtons("metricAcrossMethods", label = "Performance metrics", choices = c("Sensitivity & specificity", "AUC"))), column(5, radioButtons("inputAcrossMethods", label = "Decision input (rule)", choices = c("P-value (< 0.05)", "Point estimate (> 1)", "Lower bound of 95% CI (> 1)"))) ), plotOutput("sensSpecAcrossMethods", height = "800px"), dataTableOutput("analysesDescriptions") ) ) ) ) ), tabPanel("MaxSPRT-based metrics", fluidRow( column(2, selectInput("exposure2", label = div("Vaccine", actionLink("vaccineInfo2", "", icon = icon("info-circle"))), choices = exposure$exposureName), selectInput("database2", label = div("Database:", actionLink("databaseInfo2", "", icon = icon("info-circle"))), choices = database$databaseId), textInput("minOutcomes", label = div("Minimum outcomes", actionLink("minimumOutcomesInfo2", "", icon = icon("info-circle"))), value = 1), selectInput("timeAtRisk2", label = div("Time at risk", actionLink("timeAtRiskInfo2", "", icon = icon("info-circle"))), choices = timeAtRisks), selectInput("trueRr2", label = div("True effect size:", actionLink("trueRrInfo2", "", icon = icon("info-circle"))), choices = trueRrs), checkboxGroupInput("method2", label = div("Methods:", actionLink("methodsInfo2", "", icon = icon("info-circle"))), choices = unique(analysis$method), selected = unique(analysis$method)) ), column(10, tabsetPanel(type = "pills", tabPanel("Per method", dataTableOutput("performanceMetrics2"), uiOutput("table2Caption"), conditionalPanel(condition = "output.details2", div(style = "display:inline-block", h4(textOutput("details2"))), tabsetPanel( tabPanel("Log Likelihood Ratios", uiOutput("hoverInfoLlrs"), plotOutput("llrs", height = "650px", hover = hoverOpts("plotHoverInfoLlrs", delay = 100, delayType = "debounce")), div(strong("Figure 2.1."),"Log likelihood ratios (LLR) (left axis) for the negative and positive controls at various points in time, stratified by true effect size. Closed dots indicate the LLR in that period exceeded the critical value. The critical value depends on sample size within and across periods, and is therefore different for each control. The yellow area indicates the cumulative number of vaccinations (right axis). Hover mouse over point for more information.")), tabPanel("Sensitivity / Specificity", plotOutput("sensSpec", height = "800px"), div(strong("Figure 2.2."),"Sensitivity and specificity per period based on whether the log likehood ratio for a negative or positive control exceeded the critical value in that period or any before. Negative controls not powered for positive control synthesis have been removed.")) ) ) ), tabPanel("Across methods", fluidRow( column(5, radioButtons("metricAcrossMethods2", label = "Performance metrics", choices = c("Sensitivity & specificity", "AUC"))) ), plotOutput("sensSpecAcrossMethods2", height = "800px"), dataTableOutput("analysesDescriptions2") ) ) ) ) ), tabPanel("Database information", plotOutput("databaseInfoPlot", height = "650px"), div(strong("Figure 3.1."),"Overall distributions of key characteristics in each database."), dataTableOutput("databaseInfoTable"), div(strong("Table 3.2."),"Information about each database.") ) ) ) )
025eac216b01dd7cb403dbc128da875b571299c4
c05349aeb7e205ebac614d59d9cc4c3f14ead36f
/stochastic_simulations/analysis_dir/02-create_rds_dataset.R
16ccf151fdedd3e36d6df87de7565c8915e57728
[]
no_license
T-Heide/reply_to_tarabichi_et_al_ng2018
d24327a594ab00745f326cd1dbbf2eeeadb0e180
e6629d74573a70549e688e7755b256c81ce4005e
refs/heads/master
2020-03-19T13:48:02.302694
2018-10-30T15:04:43
2018-10-30T15:04:43
136,595,654
2
0
null
null
null
null
UTF-8
R
false
false
3,699
r
02-create_rds_dataset.R
################################################################################ # FILENAME: '02-create_rds_dataset.R' ################################################################################ # Options: simResultDir <- "./results/simulations" # Dir containing the simulation results. datasetDir <- "./results/datasets" # Libs: ######################################################################## library(neutralitytestr) library(dplyr) # Functions: ################################################################### paramsFromFileNames <- function(x) { exprI <- "([[:digit:]]*)" exprF <- "([[:digit:].]*)" expr <- sprintf("^simulation-mmr_%s-mbr_%s-seed_%s-cst_%s-[[:print:]]*$", exprI, exprF, exprI, exprI) base <- basename(x) match <- regexec(expr, base) params <- do.call(rbind, regmatches(base, match)) # Restructure parameter matrix: colnames(params) <- c("file","sc_mutation_rate","sc_deltaS","seed","clst") rownames(params) <- gsub("-simulated_sequencing[.]tsv$", "", params[,"file"]) params <- params[,-1, drop=FALSE] storage.mode(params) <- "numeric" params <- data.frame(params) return(params) } parseSimFiles <- function(f, ...) { # Extract the params from file names: cat("- Extracting parameters from file names.\n") params <- paramsFromFileNames(f) # Neutrality testing: cat("- Loading the data.\n") data <- lapply(lapply(lapply(f, read.delim), "[", "VAF"), unlist) cat("- Performing neutrality tests.\n") rsqs <- list() wh <- sapply(data, function(x) sum(between(x, 0.12, 0.24)) >= 11) test <- lapply(data[wh], neutralitytestr::neutralitytest, ...) rsqs[wh] <- lapply(test, function(x) unlist(x$rsq["metric"])[1]) params$rsq <- sapply(rsqs, function(x) { if (is.null(x)){return(NA)} else {return(x)}}) params$non_neutral <- params$rsq < 0.98 return(params) } parseInBatches <- function(files, batchSiz=200, ...) { Nf <- length(files) Nb <- ceiling(Nf / batchSiz) index <- head(rep(seq_len(Nb), each=batchSiz), Nf) splFiles <- split(files, index) cat(sprintf("Loading %d simulation result files in %d batch(es):\n\n",Nf,Nb)) res_batches <- lapply(seq_along(splFiles), function(i) { cat(sprintf("Batch %d/%d:\n", i, Nb)) res <- parseSimFiles(splFiles[[i]], ...) cat("\n") return(res) }) return(do.call(rbind, res_batches)) } # Main: ######################################################################## # Detect cell count files: countFileMt <- "^simulation[[:print:]]*-cell_number[.]tsv$" countFiles <- list.files(simResultDir, countFileMt, rec=1, full=1) baseCountFiles <- basename(countFiles) # Load cell count data: cellCountData <- do.call(rbind, lapply(countFiles, read.delim)) cellCountData$subcloneFrac <- cellCountData$clone2 / cellCountData$total cellCountData$simID <- gsub("-cell_number[.]tsv$", "", baseCountFiles) cellCountData <- cbind(cellCountData, paramsFromFileNames(countFiles)) # Detect sequencing result files: resFileMt <- "^simulation[[:print:]]*-simulated_sequencing[.]tsv$" resFiles <- list.files(simResultDir, resFileMt, rec=1, full=1) # Load result data: resultData <- parseInBatches(resFiles) resultDataExt <- parseInBatches(resFiles,fmin=0.025, fmax=0.45) # Save as rds files: dir.create(datasetDir, showWarnings=FALSE, recursive=TRUE) saveRDS(resultData, file.path(datasetDir, "1f_model_fits.rds")) saveRDS(resultDataExt, file.path(datasetDir, "1f_model_fits_ext.rds")) saveRDS(cellCountData, file.path(datasetDir,"cell_counts.rds"))
f9c243755c3a373d319fea407572c2ee4101cdc8
70700fe9f3712da839680c1d7bd0085d8b3ef9f2
/Durbin-Watson statistic.R
77c071749f7b234dd5d8de8163f1417a4403131f
[]
no_license
sharyjose/Regression-Analysis
dfc4f210150f904575a175afd19ec961f7f73a50
a87ce17923872f54d72c046ea01c7600de1df83a
refs/heads/main
2023-08-28T04:28:16.821442
2021-09-14T00:02:59
2021-09-14T00:02:59
406,164,935
0
0
null
null
null
null
UTF-8
R
false
false
1,626
r
Durbin-Watson statistic.R
options(digits=3) ######################################################## ## A classic example: Anscombe's quartet! anscombe <- read.csv("D:/stat 315/anscombe.csv") summary(lm(y1~x1, data=anscombe)) summary(lm(y2~x2, data=anscombe)) summary(lm(y3~x3, data=anscombe)) summary(lm(y4~x4, data=anscombe)) # but are the 4 datasets really all the same? # set-up space for 4 plots on a page: par(mfrow=c(2,2)) plot(y1~x1, data= anscombe) plot(y2~x2, data= anscombe) plot(y3~x3, data= anscombe) plot(y4~x4, data= anscombe) ######################################################## ## A classic example from the first class: Age vs. Money x1 <- c(82, 45, 71, 22, 29, 9, 12, 18, 24) x2 <- c(22, 44, 31, 122, 20, 0, 2, 10, 35) y <- c(71, 54, 43, 45, 21, 11, 30, 45, 10) lm(y~x1+x2) lm(y~I(log(x1))+x2) plot(y~(x1)) plot(y~log(x1)) plot(log(y)~x1) plot(log(y)~log(x1)) ######################################################## ## Example for serial correlation: prices over time price_data <- read.csv("D:/stat 315/price_data.csv") n <- dim(price_data)[1] plot(y~location, data=price_data) lmod <- lm(y~location, data=price_data) summary(lmod) res <- lmod$residuals plot(res~time,data= price_data) ## shift by one timepoint: cbind(res[-1],res[-n]) ## correlation at a lag of one timepoint cor1 <- cor(res[-1],res[-n]) cor1 sum(res[-1]*res[-n])/sqrt(sum(res[-1]^2)*sum(res[-n]^2)) ## scatterplot plot(cbind(res[-1],res[-n])) DW <- sum((res[-1] - res[-n])^2)/sum(res^2) DW #approx equal to: 2-2*cor1 library(lmtest) dwtest(lmod) .
6ad696ab5268b2da20b8c1cec99a3f60b6590791
9fd399fc293811236c60b7f63e11090a0cbb5914
/data-raw/create_academic_badges.R
ebf83b26d64c4bb18a859f6731636c74c3d5aca8
[]
no_license
beatrizmilz/shinyresume
e6b3dc667a37ca73f7d5c7e85ec8f7b909cea2aa
5454278435567185a29e504d91115e7b01158878
refs/heads/master
2023-06-13T07:44:01.271243
2021-07-03T21:08:32
2021-07-03T21:08:32
365,636,375
1
1
null
null
null
null
UTF-8
R
false
false
3,539
r
create_academic_badges.R
library(magrittr, include.only = "%>%") academic <- readr::read_csv2("data/academic_dataset.csv") source("R/add_url_to_authors.R", encoding = "UTF-8") academic_badges <- academic %>% dplyr::mutate( status_badges = dplyr::case_when( status == "Presented" ~ glue::glue( "![Status](https://img.shields.io/badge/Status-Presented-green.svg)" ), status == "Published" ~ glue::glue( "![Status](https://img.shields.io/badge/Status-Published-green.svg)" ), status == "Submitted" ~ glue::glue( "![status](https://img.shields.io/badge/Status-Submitted-orange.svg)" ), status == "Approved" ~ glue::glue( "![status](https://img.shields.io/badge/Status-Approved-lightgreen.svg)" ), TRUE ~ glue::glue( "![Status](https://img.shields.io/badge/Status-{status}-lightgray.svg)" ) ), type_of_publication_badges = dplyr::case_when( type_of_publication == "Book chapter" ~ glue::glue( "![Type of publication](https://img.shields.io/badge/Type of publication-Book chapter-blue.svg)" ), type_of_publication == "Journal Editorial" ~ glue::glue( "![Type of publication](https://img.shields.io/badge/Type of publication-Journal Editorial-yellow.svg)" ), type_of_publication == "Journal" ~ glue::glue( "![Type of publication](https://img.shields.io/badge/Type of publication-Journal-yellowgreen.svg)" ), type_of_publication == "Conference presentation" ~ glue::glue( "![Type of publication](https://img.shields.io/badge/Type of publication-Conference presentation-9cf.svg)" ), TRUE ~ glue::glue( "![Type of publication](https://img.shields.io/badge/Type of publication-{type_of_publication}-lightgray.svg)" ) ), url_text_badges = dplyr::case_when( !is.na(url_text) ~ glue::glue( "[![Read the text](https://img.shields.io/badge/URL-Text-lightgray.svg)]({url_text})" ), TRUE ~ glue::glue("") ), url_code_badges = dplyr::case_when( !is.na(url_code) ~ glue::glue( "[![Read the code](https://img.shields.io/badge/URL-Code-lightgray.svg)]({url_code})" ), TRUE ~ glue::glue("") ), url_slides_badges = dplyr::case_when( !is.na(url_slides) ~ glue::glue( "[![Read the slides](https://img.shields.io/badge/URL-Slides-lightgray.svg)]({url_slides})" ), TRUE ~ glue::glue("") ), url_youtube_badges = dplyr::case_when( !is.na(url_youtube) ~ glue::glue( "[![Watch the presentation](https://img.shields.io/badge/URL-Video-lightgray.svg)]({url_youtube})" ), TRUE ~ glue::glue("") ), ) %>% dplyr::mutate(authors_link = add_url_to_authors(authors), item_info_link = add_url_to_authors(item_info), ) academic_text <- academic_badges %>% dplyr::mutate( ano_previsao = dplyr::case_when( status == "Submitted" ~ glue::glue("Not published yet"), status == "Approved" & type_of_publication == "Conference presentation" ~ glue::glue("Not presented yet"), TRUE ~ glue::glue("{year}") ), text = glue::glue( "- {status_badges} {type_of_publication_badges} <br> {url_text_badges} {url_slides_badges} {url_code_badges} {url_youtube_badges} \n - {ano_previsao}. {authors_link}. {title}. {item_info_link}. \n\n \n\n \\<br>" ) ) academic_text %>% readr::write_csv2("data/academic_badges.csv")
75d4d0ad946261accd8b0fc1e100b4c47ce0f142
8ecba2046e47303dfde07cfe0e70886df5f41235
/number_years_in_league_by_player.R
9680722c596036716c5c8e2fa6e666fa5806781d
[]
no_license
drivergit/longest-tenured-college-on-NFL-team
b813bba3378357054bffff85debc7bd1cac7a607
96992121c4d6fbf8123da8c91b5db632bd8044ff
refs/heads/master
2021-01-17T20:55:55.597472
2015-03-30T00:30:41
2015-03-30T00:30:41
null
0
0
null
null
null
null
UTF-8
R
false
false
1,222
r
number_years_in_league_by_player.R
#Load complete roster info roster.master.list<-read.csv('complete-roster-info.csv') #eliminates the first column, which was previously the number roster.master.list<-roster.master.list[,-1] #eliminate the first row, which was the 'x' initialization placeholder roster.master.list<-roster.master.list[-1,] #remove the period in column names, as reading from CSV does not retain format # of original col names colnames(roster.master.list)<-gsub('\\.',' ',colnames(roster.master.list)) library(dplyr) #grab the name, DOB and college for each position on the roster players.years.active<-select(roster.master.list,name,DOB,college,`roster year`)%>% #group by name DOB and college (should be a unique identifier for each player) group_by(name,DOB,college)%>% #counts the number of occurances for roster years for times appearing on roster summarise(roster.years=n())%>% #ungroup so data can be sorted by roster years ungroup%>% #sort by player count, in descending order arrange(desc(roster.years)) #historgram of how many years a player plays in league hist(players.years.active$roster.years) #summary of how many years a player plays in league summary(players.years.active$roster.years)
08282d23f413f9f42ae824cfa4313b66b5bd616e
61b4adde63a7b434e028488d2158ef23014c4cfc
/man/pdf_diff.Rd
f143167be2882b22da3c80a0d2e145490f720574
[]
no_license
SVA-SE/mill
a165deeae9612c1448d287caea73f5de6e87d02a
b5faa7738d6b475759c7f2f980e30628d7f15f35
refs/heads/master
2021-05-10T09:19:22.504502
2020-06-15T12:05:40
2020-06-15T12:05:40
103,141,739
1
0
null
null
null
null
UTF-8
R
false
true
722
rd
pdf_diff.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/images.R \name{pdf_diff} \alias{pdf_diff} \title{pdf_diff} \usage{ pdf_diff(reference, new, dir = tempdir()) } \arguments{ \item{reference}{Path to a pdf file} \item{new}{Path to a pdf file} \item{dir}{directory to perform the comparison inside} } \value{ data.frame A data.frame with 3 columns: page, percent_diff, composite. } \description{ Difference of two pdf:s } \details{ Submitt two pdfs of the same number of pages to this function and get back a data.frame with the same number of rows as the pdf with the percent pixel difference for each page and a path to a composed image of each page highlighting the differences in red. }
2de0c2f66602f792b3d56e8765724c3d0f4d2e9c
63a770312db431190f9bf7db60dacdb86134fa76
/src_tidy/1.1_norm3groups.R
52907b8ec854ecb4f740877e38d0693d4a1be3b4
[]
no_license
zhilongjia/nCoV2019
0ee4aab7dcc35a4273a36dd4be97e9b6d85c55f3
57b616e83aa638fbfcdd4be09101e0c4331eb0e0
refs/heads/master
2023-02-20T11:17:52.551457
2020-07-25T08:00:21
2020-07-25T08:00:21
236,764,272
0
0
null
null
null
null
UTF-8
R
false
false
2,290
r
1.1_norm3groups.R
load("../results/sample_count_df_symbol.RData") library(edgeR) library(limma) ################################################################################ g3_names <- c("Healthy", "Others", "Viral-like", "nCoV" ) subsample_pheno <- dplyr::filter(sample_meta, Types %in% g3_names ) subsample_pheno$Types <- factor(subsample_pheno$Types, levels = g3_names ) Expdesign <- model.matrix(~subsample_pheno$Types) S_raw <- as.matrix(dplyr::select(sample_count_df_symbol, -SYMBOL) ) rownames(S_raw) <- sample_count_df_symbol$SYMBOL dge <- DGEList(counts=S_raw) subsample_dge <- dge[,subsample_pheno$NewID] # filter and norm keep <- filterByExpr(subsample_dge, Expdesign) subsample_dge <- subsample_dge[keep,,keep.lib.sizes=FALSE] subsample_dge <- calcNormFactors(subsample_dge) v <- voom(subsample_dge, Expdesign, plot=FALSE, normalize="quantile") nCoV_pneu_Heal_norm_symbol_GE <- v$E ################################################################################ #PCA 3 groups load("../results/DEA_pneu_list.RData") DEG_nCoV_Heal <- rownames(DEA_list[["limma_DE"]][["nCoV_Heal"]]) DEG_pneuVir_Heal <- rownames(DEA_list[["limma_DE"]][["Vir_Heal"]]) DEG_pneuBac_Heal <- rownames(DEA_list[["limma_DE"]][["Others_Heal"]]) DEG_nCoV_pneuVir <- rownames(DEA_list[["limma_DE"]][["nCoV_Vir"]]) DEG_united <- unique(c(DEG_nCoV_Heal, DEG_pneuVir_Heal, DEG_pneuBac_Heal)) library(ggfortify) # all union genes autoplot(prcomp(t(v$E[intersect(DEG_united, rownames(v$E) ),]), scale=F), data=subsample_pheno, colour = "Types", size = 5, label = F, label.colour="black", ts.colour="black" ) autoplot(prcomp(t(v$E[intersect(DEG_united, rownames(v$E) ),]), scale=F), x=1,y=3, data=subsample_pheno, colour = "Types", size = 5, label = F, label.colour="black", ts.colour="black" ) autoplot(prcomp(t(v$E[intersect(DEG_united, rownames(v$E) ),]), scale=F), x=1,y=2, data=subsample_pheno, colour = "virus", size = 5, label = F, label.colour="black", ts.colour="black" ) readr::write_tsv(tibble::rownames_to_column(as.data.frame(v$E)), path="../results/nCoV_pneu_Heal_norm_symbol_GE.tsv") save.image("../results/1.1_norm3groups.RData") save(nCoV_pneu_Heal_norm_symbol_GE, sample_meta, file="../results/nCoV_pneu_Heal_norm_symbol_GE.RData")
5a241970de240863575b2e262e745ddbfa4fea1c
714e7c6736a2e3d8fd07634427c4a8bb3cef2d61
/R/map_colour_text.R
813ab5f0aff736066694e11b0fd1272982ccb4b8
[ "MIT" ]
permissive
flaneuse/llamar
da7cb58a03b2adbffb6b2fe2e57f3ffeede98afb
ea46e2a9fcb72be872518a51a4550390b952772b
refs/heads/master
2021-01-18T00:10:00.797724
2017-10-24T13:41:21
2017-10-24T13:41:21
48,335,371
0
1
null
null
null
null
UTF-8
R
false
false
3,034
r
map_colour_text.R
#' Modifies a data frame to determine color to overlay on a colored background #' #' Takes a data frame with a value to map to a fill colour and determines whether #' light or dark text should be used as the label on top of the fill. For use with #' \code{ggplot2::scale_colour_identity()} downstream. #' # @import dplyr #' #' @param df data frame containing the data #' @param bckgrnd_column string containing the name of the column to map to fill values #' @param colour_palette colour palette specification (list of hex values). Can use \code{RColorBrewer::brewer.pal} to generate #' @param limits (optional) limits for the fill color palette mapping #' @param sat_threshold (optional) breakpoint between the light and dark text color. 50 percent saturation, by default #' @param dark_colour (optional) dark color to overlay on low fill values #' @param light_colour (optional) light color to overlay on high fill values #' #' @examples { #' # Define a Color Brewer palette #' library(RColorBrewer) #' # Generate random data #' df = data.frame(x = 1:9, y = 1:9) #' pal = 'Reds' #' #' limits = c(0,15) #' df = map_colour_text(df, 'x', brewer.pal(9, pal), limits) #' #' library(ggplot2) #' ggplot(df, aes(x = x, y = y, fill = x, colour = text_colour, label = round(hsv.s,2))) + #' geom_point(size = 10, shape = 21) + #' geom_text() + #' scale_fill_gradientn(colours = brewer.pal(9, pal), limits = limits) + #' scale_colour_identity() + #' theme_blank() #' } #' #' @seealso \code{\link{scale_colour_text}} map_colour_text = function(df, bckgrnd_column, colour_palette, limits = c(min(df[[bckgrnd_column]]), max(df[[bckgrnd_column]])), sat_threshold = 0.5, dark_colour = grey90K, light_colour = 'white') { # -- Create a color palette -- # Returns RGB values ramp = colorRamp(colour_palette) # -- convert background values to colors -- # Adjust to between 0 and 1 df = df %>% mutate_(.dots = setNames(paste0('(', bckgrnd_column, '-', limits[1],')/(',limits[2], '-', limits[1], ')'), 'bckgrnd')) %>% mutate(bckgrnd = ifelse(is.na(bckgrnd), 0, ifelse(bckgrnd < 0, 0, ifelse(bckgrnd > 1, 1, bckgrnd)))) # Check if any values are NA; replace w/ 0 mapped_colours = ramp(df$bckgrnd) # convert to HSV mapped_colours = rgb2hsv(t(mapped_colours)) mapped_colours = data.frame('hsv' = t(mapped_colours)) if(all(round(mapped_colours$hsv.s, 1) == 0)) { # greyscale: use values df = df %>% bind_cols(mapped_colours) %>% mutate(text_colour = ifelse(hsv.v > sat_threshold, dark_colour, light_colour)) } else { # colors: use saturation # pull out the saturation df = df %>% bind_cols(mapped_colours) %>% mutate(text_colour = ifelse(hsv.s < sat_threshold, dark_colour, light_colour)) } return(df) }
ccf46a72b7c1fd15f1206b31be206f1ac96bb635
da0634866b3d3cb67e1770c08e925d8ec30d0714
/app/app_heatmap.R
1db598f7b6dfe2b7168b06555a9bafbdcd6ce306
[]
no_license
TZstatsADS/Spr2017-proj2-grp8
d9796c7c1bcebdd7ac3dfa1b47aea1b5ea8b3cbb
3ef36d5a630d3d4fb4c5f5e0404d79ba8dd17ef4
refs/heads/master
2021-01-18T19:12:06.178211
2017-02-24T21:22:06
2017-02-24T21:22:06
80,873,376
0
3
null
null
null
null
UTF-8
R
false
false
7,540
r
app_heatmap.R
library(shiny) library(ggplot2) library(ggmap) library(choroplethrZip) library(dtplyr) library(dplyr) library(DT) library(lubridate) # Define UI for application that draws a histogram ui <- shinyUI(navbarPage("Perfect City Go", theme="black.css", # heatmap TAB tabPanel('heatmap', titlePanel( h2("heatmap of your city" )), sidebarLayout( sidebarPanel( fixed=TRUE,draggable=TRUE, top=60,left="auto",right=20,bottom="auto", width=330,height="auto", selectInput("city", label = "Where are you living?", choices = c("New York" = "New York" , "Los Angeles" = "Los Angeles", "San Francisco" = "San Francisco", "Austin" = "Austin", "Chicago" = "Chicago") ), selectInput("asp", label = "Which aspect you want to learn about?", choices = c("Population" = 1, "crime rate" = 2, "library" = 3, "Restaurant" = 4, "park" = 5, "health care" =6) ) ), mainPanel( plotOutput("heatmap") ) ))) ) server <- shinyServer(function(input, output){ data("zip.regions") map <- reactive({ if (input$asp == 1){ d <- read.csv("~/GitHub/Spr2017-proj2-proj2_grp8/data/All-in/Population.csv") d <- d[d$city == input$city,] d <- na.omit(d) d_map <- get_map(location = input$city ,maptype = "terrain" , zoom = 12) map <- ggmap(d_map, extent = "device") + geom_density2d(data = d, aes(x = Lon, y = Lat), size = 0.3) + stat_density2d(data = d, aes(x = Lon, y = Lat, fill = ..level.., alpha = ..level..), size = 0.01, bins = 16, geom = "polygon") + scale_fill_gradient(low = "green", high = "red") + scale_alpha(range = c(0, 0.3), guide = FALSE) } if(input$asp == 2 ){ crime <- read.csv(paste("~/GitHub/Spr2017-proj2-proj2_grp8/data/crime/",input$city,".csv",sep = "")) crime <- na.omit(crime) crime_map <- get_map(location = input$city,maptype = 'terrain',zoom = 12) map <- ggmap(crime_map, extent = "device") + geom_density2d(data = crime, aes(x = Lon, y = Lat), size = 0.3) + stat_density2d(data = crime, aes(x = Lon, y = Lat, fill = ..level.., alpha = ..level..), size = 0.01, bins = 16, geom = "polygon") + scale_fill_gradient(low = "green", high = "red") + scale_alpha(range = c(0, 0.3), guide = FALSE) } if(input$asp == 3){ lib <- read.csv(paste("~/GitHub/Spr2017-proj2-proj2_grp8/data/City Raw/",input$city, "/Library.csv", sep = "")) lib= lib%>% filter(lib$ZIP>0)%>% mutate(region=as.character(ZIP)) lib= lib%>% group_by(region)%>% summarise( value=n() ) map <- zip_choropleth(lib, title = paste("Library in ", input$city, sep = ""), legend = "Number of Libraries", zip_zoom = lib$region) } if (input$asp == 4){ res <- read.csv(paste("~/GitHub/Spr2017-proj2-proj2_grp8/data/City Raw/",input$city, "/Restaurant.csv", sep = "")) res= res%>% filter(res$ZIP>0)%>% mutate(region=as.character(ZIP)) res= res%>% group_by(region)%>% summarise( value=n() ) map <- zip_choropleth(res, title = paste("Restaurant in ", input$city, sep = ""), legend = "Number of restaurants", zip_zoom = res$region[res$region %in% zip.regions$region]) } if (input$asp == 5){ res <- read.csv(paste("~/GitHub/Spr2017-proj2-proj2_grp8/data/City Raw/",input$city, "/Park.csv", sep = "")) res= res%>% filter(res$ZIP>0)%>% mutate(region=as.character(ZIP)) res= res%>% group_by(region)%>% summarise( value=n() ) map <- zip_choropleth(res, title = paste("Park in ", input$city, sep = ""), legend = "Number of Parks", zip_zoom = res$region[res$region %in% zip.regions$region]) } if (input$asp == 6){ res <- read.csv(paste("~/GitHub/Spr2017-proj2-proj2_grp8/data/City Raw/",input$city, "/Health.csv", sep = "")) res= res%>% filter(res$ZIP>0)%>% mutate(region=as.character(ZIP)) res= res%>% group_by(region)%>% summarise( value=n() ) map <- zip_choropleth(res, title = paste("Health care in ", input$city, sep = ""), legend = "Number of Health cares", zip_zoom = res$region[res$region %in% zip.regions$region]) } return(map) }) output$heatmap <- renderPlot({ map() } ) }) shinyApp(ui=ui, server = server)
164c6dae08a05d01ac2b9a1691dbae4ca72da5e4
547cee3ec07bbe9ea5708651caafb9b569dee332
/filter_runs_avg.R
f6a09142c4243e782015d1314ca6a9a11e287b6e
[]
no_license
ckrosslowe/sr15-scenarios
1d6c4da9aa35255fe70cc525228dc97f06191496
613b03a85e352c6e092a0859d385dc4a33f9ac0a
refs/heads/master
2021-05-26T09:55:11.324015
2021-01-12T19:56:48
2021-01-12T19:56:48
254,086,065
1
0
null
null
null
null
UTF-8
R
false
false
1,943
r
filter_runs_avg.R
# Function to filter runs require(tidyverse) # Inputs: run sample, warming categories, region, sustainability limits, models to exclude if region selected filter_runs_avg <- function(runs, temp_cats, reg="World", limits, ms_exclude) { # Select runs that meet temp criteria runs <- runs %>% filter(category %in% temp_cats) # calculate 2040-2060 average runs_avg <- runs %>% filter(Year>=2040, Year<=2060, Region %in% "World") %>% select(mod_scen, CarbonSequestration.CCS.Biomass, CarbonSequestration.LandUse, PrimaryEnergy.Biomass) %>% #replace(is.na(.),0) %>% group_by(mod_scen) %>% summarise(avg_beccs = mean(CarbonSequestration.CCS.Biomass, na.rm=T), avg_af = mean(CarbonSequestration.LandUse, na.rm=T), avg_bio = mean(PrimaryEnergy.Biomass, na.rm=T)) %>% replace(is.na(.), 0) # --- BECCS & Bioenergy - global bio_lim <- limits["bio"] beccs_lim <- limits["beccs"] af_lim <- limits["af"] # Which models meet these criteria at a global level in 2050? #keep_ms <- runs$mod_scen[runs$Year==2050 & runs$Region %in% "World" & runs$CarbonSequestration.CCS.Biomass<=beccs_lim & runs$PrimaryEnergy.Biomass<=bio_lim] #keep_ms <- runs$mod_scen[runs$Year==2050 & runs$Region %in% "World" & runs$CarbonSequestration.CCS.Biomass<=beccs_lim & runs$CarbonSequestration.LandUse <= af_lim] keep_ms <- runs_avg$mod_scen[runs_avg$avg_beccs <= beccs_lim & runs_avg$avg_bio <= bio_lim] # --- FILTER runs runs <- filter(runs, mod_scen %in% keep_ms, Region %in% reg) # --- REMOVE runs that don't include regional breakdowns # TEST shows which runs don't have Electricity in 2050 (All do in world, after filters) #table(runs$mod_scen[runs$Year==2050], !is.na(runs$SecondaryEnergy.Electricity[runs$Year==2050])) if (!reg %in% "World") runs <- filter(runs, !mod_scen %in% ms_exclude) return(runs) }
bb33f29d86901d09d4aacdb86d5f157013ab6081
b6ca4c890d29aa7085b47421bc770d37755e3061
/complete.R
bd265c9e9b0d6a3aa1bcbe62b4dff2fcfdecfd9e
[]
no_license
alanmyers/R_Programming
7d72d52a1214d1a008988c20662ff526c3f27cd4
f8d586ca45086570030f34487c5a64e3e3892324
refs/heads/master
2021-03-12T20:45:51.031486
2015-02-15T21:35:58
2015-02-15T21:35:58
30,843,044
0
0
null
null
null
null
UTF-8
R
false
false
972
r
complete.R
complete <- function(directory, id = 1:332, printit=TRUE) { ## 'directory' is a character vector of length 1 indicating ## the location of the CSV files ## 'id' is an integer vector indicating the monitor ID numbers ## to be used ## Return a data frame of the form: ## id nobs ## 1 117 ## 2 1041 ## ... ## where 'id' is the monitor ID number and 'nobs' is the ## number of complete cases df = data.frame() for (i in id) { if (i < 10) { fname <- sprintf("%s/00%d.csv", directory, i) } else if (i < 100) { fname <- sprintf("%s/0%d.csv", directory, i) } else { fname = sprintf("%s/%d.csv", directory, i) } Data = read.csv(fname) nobs <- nrow(subset(Data, Data[2]>= 0 & Data[3]>= 0)) df = rbind(df, c(i, nobs)) str = sprintf("Id: %d, nobs: %d", i, nobs) if (printit) { print(str) } } colnames(df) <- c("id", "nobs") df }
5f0dfd1fe63e0fc55df4c820ea8e8abe859c1608
0edde7dc03658b64303ffc0d4da539f858cf77b9
/R/match_azure_with_cv2.R
34f71452938779e432740bcfca5b2cbe5bb70b3a
[ "MIT" ]
permissive
Atan1988/alvision
8ff114dbff8297768b0e8ef6196d0355b89b412e
15f771d24f70353c81fa62c7461c4e602dc75b01
refs/heads/master
2021-07-11T00:52:34.072235
2020-12-11T05:43:09
2020-12-11T05:43:09
221,738,729
0
0
null
null
null
null
UTF-8
R
false
false
1,476
r
match_azure_with_cv2.R
#' @title azure lines into cv2 bounding boxes #' @param bounds_df bounds_df result from crop_out_boxes function #' @param res_lines lines result from the azure api reading the whole page #' @export az_to_cv2_box <- function(bounds_df, res_lines) { if (nrow(bounds_df) == 0) { match_idx <- rep(NA, length(res_lines)) } else { bounds_list <- bbox_df_to_c(bounds_df) match_idx <- res_lines %>% purrr::map(~pts_to_wh(.$boundingBox)) %>% purrr::map_dbl(function(x) { res <- bounds_list %>% purrr::map_lgl(~chk_box_in(., x, 10)) %>% which(.) if (length(res) == 0) return(NA) return(res[1]) }) bounds_df$az <- 1:nrow(bounds_df) %>% purrr::map( function(x) { idx <- which(match_idx == x) if (length(idx) == 0) return(list()) return(res_lines[idx]) } ) } not_matched_idx <- which(is.na(match_idx)) not_matched_bounds_df <- not_matched_idx %>% purrr::map_df(function(x) { box_ref <- res_lines[[x]]$boundingBox %>% pts_to_wh() %>% t() box_ref <- tibble::as_tibble(box_ref) names(box_ref) <- c('x', 'y', 'w', 'h') #box_ref$az <- list(res_lines[[x]]) return(box_ref) }) not_matched_bounds_df$az <- not_matched_idx %>% purrr::map(~res_lines[.]) bounds_dfb <- dplyr::bind_rows(bounds_df, not_matched_bounds_df) bounds_df1 <- add_rc_bbox(bbox_df = bounds_dfb) return(bounds_df1) }
36fe45701220fca499d4523ddd341dd39f65471c
15e70518da836ba65181d1e0b2f1ef5acc0f7983
/man/multiPredict.Rd
cfe6494b111f277fd4615362c38435c44132e543
[]
no_license
razielmelchor/caretEnsemble
03742a489ff021d7ac0edf5c15109568ad91013c
66f647e1f7994886ba0274dbb8de4124dd9f7dd3
refs/heads/master
2021-01-21T02:35:51.343915
2014-04-22T14:12:45
2014-04-22T14:12:45
null
0
0
null
null
null
null
UTF-8
R
false
false
520
rd
multiPredict.Rd
\name{multiPredict} \alias{multiPredict} \usage{ multiPredict(list_of_models, type, newdata = NULL, ...) } \arguments{ \item{list_of_models}{a list of caret models to make predictions for} \item{type}{Classification or Regression} \item{...}{additional arguments to pass to predict.train. DO NOT PASS the "type" argument. Classsification models will returns probabilities if possible, and regression models will return "raw".} } \description{ Make a matrix of predictions from a list of caret models }
01dc3d897dd7fd157a4e1f25f29fff3e895b28b5
baa9f522320d708c4ac95e75e63849642a83a124
/man/noop.Rd
02289678145bead5e6c9d6b95bd9761b120ba069
[]
no_license
cbaumbach/miscFun
d9faba0e7f00c3a341c747f8288e8e92c70aae15
b7fb44d006b83655bc9b3a87eebf6cd04899ba01
refs/heads/master
2021-01-17T10:23:14.645478
2017-01-23T11:21:17
2017-01-23T11:21:17
30,698,538
0
0
null
null
null
null
UTF-8
R
false
true
279
rd
noop.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/miscFun.R \name{noop} \alias{noop} \title{Do nothing} \usage{ noop(...) } \arguments{ \item{\dots}{Ignored} } \value{ None. } \description{ Do nothing } \examples{ noop(ignore_whatever_is_in_here) }
3536f420b1e8d273c108715c29143837e0ccc023
3e4acee05323c4195a69109151ab1925688b1033
/11_COS_example.R
ecddbcccdf066ac22511542e2376c3f312f39bf5
[]
no_license
costlysignalling/TheorBiol2020
1a83c95aa76400e208613ea07213b14f33d92338
23e4bea87cb63d5672fb279f869a778e04cb6b7b
refs/heads/main
2023-04-01T17:02:51.703110
2021-04-13T17:58:48
2021-04-13T17:58:48
300,630,454
0
1
null
null
null
null
UTF-8
R
false
false
3,044
r
11_COS_example.R
library(rethinking) #Corridor of stability example #Create the dataset, where you know the correct answer h<-rnorm(10000,0,1) w<-h*0.2+rnorm(10000,0,0.6) #Thei function gives you the sequence of beta estimations with random gradually growing sample #Set the ns, you are willing to consider ns<-5:200 #Define the function that will start at 5, estimate the beta (we know the correct answer, because we created the original dataset), add another participant, estimate the beta and iterate until n=max givebet<-function(){ sam<-sample(1:10000) ds<-list(h=h[sam],w=w[sam]) betas<-NA counter<-1 for(n in ns){ dsub<-list(h=ds$h[1:n],w=ds$w[1:n]) m<-quap(alist( w~dnorm(mu,sigma), mu<-a+b*h, a~dnorm(0,0.2), b~dnorm(0,0.5), sigma~dexp(1) ),data=dsub,start=list(a=0,b=0,sigma=1)) betas[counter]<-precis(m)[[1]][2] print(n) counter<-counter+1 } return(betas) } #You need to create multiple curves like this. The more, the better (but it takes more computation time) runs<-20 curves<-replicate(runs,givebet()) str(curves) #Set the COS width, where you wish to land wd<-0.10 plot(ns,curves[,1],type="n",ylim=c(-0.2,0.5)) for(i in 1:runs){ lines(ns,curves[,i],col="#00000080") } #You can higlight single simulation run like this lines(ns,curves[,1],col="#0000FF") #Plot the correct estimate and the corridor of stability abline(h=0.2,col=2) abline(h=0.2+c(-1,1)*wd,col=2,lty=2) #The stability is usually defined as staynw within the corridor #First you need to define, if iven point along each curve is within the corridor between<-curves>0.2-wd&curves<0.2+wd str(between) #Then you need to check whether the given point is not just within the corridor, but also that given curve does not fluctuate outside the COS until the end of the simulation. This is achieved by comparison of the cummulative sum of the reversed T/F vector of being within the corridor with the vector corresponding to the order of the used semple size #This is the procedure for the fourth curve i<-4 rev(cumsum(rev(between[,i]))==c(1:length(ns))) #This is the procedure for all of them (matrix is returned) ins<-sapply(1:20,function(i){rev(cumsum(rev(between[,i]))==c(1:poc))}) #Another arbitrary parameter you need to define (besides COS width, wd in this script) is the Proportion of curves you want to securely keep within the COS until the end of the simulation (until maximum n is reached). 80% was selected here security<-0.80 #Cacluate the proportion of your simulation runs, where the estimate stays within the corridor until the end of the simulation proport<-rowSums(ins)/runs #Result is the threshold. The first point, where the requested proportion of estimates stay within the requested corridor. tt<-ns[min(which((proport>=security)==T))] tt #You can put it as a vertical line to the plot. abline(v=tt,col=3)
6c9585cc1ca0ffc243bab701df4bf83d1d9269ee
f1d2bfa8addbbb74cc8a14269c417667884f7672
/R/emisja.R
5a8dad73d7fe2d2a89a123cc6c637a87837e700a
[ "MIT" ]
permissive
prusakk/KPFuels
21f162446a3a538ef77bb626c7fc1111a473a0e7
91d6663eee4520d1f7579cab40e820af62accdad
refs/heads/main
2023-01-30T03:47:38.998695
2020-12-13T21:45:43
2020-12-13T21:45:43
319,713,311
0
0
null
null
null
null
UTF-8
R
false
false
1,479
r
emisja.R
#' Funckja do obliczania genrowanych emisji przez pojazdy spalinowe #' #' @param dane dataframe - dane wejล›ciowe #' @param kategoria character - kategoria pojazdu np. Passenger Cars, Heavy Duty Trucks itd. #' @param euro character - norma emisji spalin np. Euro II, Euro III itd. #' @param mode character - tryb jazdy pojazdu np. Highway, Urban Peak itd. #' @param substancja character - nazwa substacji emisyjnej np. CH4, N2O #' #' @return dataframe #' #' @import dplyr #' @export #' #' @details Wzรณr wykorzystywany do obliczenia emsji #' #' (Alpha x Procent^2 + Beta x Procent + Gamma + (Delta/Procent)/ #' (Epsilon x Procent^2 + Zita x Procent + Hta) x (1-Reduction) #' emisja <- function(dane = input, kategoria = "Passenger Cars", euro = "Euro 4", mode = "", substancja = "CO") { out <- wskazniki %>% filter(Category %in% kategoria) %>% filter(Euro.Standard %in% euro) %>% filter(Pollutant %in% substancja) %>% filter(Mode %in% mode) out <- inner_join(x = out, y = input, by = c("Segment", "Fuel", "Technology")) out <- out %>% mutate(Emisja = Nat * ((Alpha * Procent ^ 2 + Beta * Procent + Gamma + (Delta/Procent))/ (Epsilon * Procent ^ 2 + Zita * Procent + Hta) * (1-Reduction)) ) %>% select(Category, Fuel, Euro.Standard, Technology, Pollutant, Mode, Segment, Nat, Emisja) out[!duplicated(out), ] -> out return(out) }
0f84b1ec3350f7445a7c3d300fb62ebfebe62fd5
b733d3f7e67a62c34d4889c561d2388da835d451
/tests/testthat/test-createLocationID.R
a8e4869a43d20aa35042cc22b29440e76345ffdc
[]
no_license
cran/MazamaCoreUtils
7c3c4c71d2667b4512f203ca5ba7c67df773dc9d
15f2b32ed32835229b1df8cf74243d745ea7fd16
refs/heads/master
2023-09-05T17:48:57.276030
2023-08-29T21:50:02
2023-08-29T23:30:40
154,902,175
0
0
null
null
null
null
UTF-8
R
false
false
1,092
r
test-createLocationID.R
test_that("algorithms work", { # Setup longitude <- -120:-110 latitude <- 30:40 # Default to "digest" expect_identical( createLocationID(longitude, latitude), c("2579cca9bc8bb160","0bc60b264bab6c8f","242c3d44df97de47","891fb5f2df4f8a39", "1e9bb5a927f39726","890cb1a66d1e9e9d","e2105228a0188686","f61bfb636bba4233", "c60fc5cd3450730d","ef89fa02bbd43fb5","c389bbe887dcf75f") ) # Explicit "digest" expect_identical( createLocationID(longitude, latitude), c("2579cca9bc8bb160","0bc60b264bab6c8f","242c3d44df97de47","891fb5f2df4f8a39", "1e9bb5a927f39726","890cb1a66d1e9e9d","e2105228a0188686","f61bfb636bba4233", "c60fc5cd3450730d","ef89fa02bbd43fb5","c389bbe887dcf75f") ) # Explicit "geohash" expect_identical( createLocationID(longitude, latitude, "geohash"), c("9m6dtm6dtm","9me2k56u54","9msn4c7j88","9mug9x7nym","9qj92me2k5","9qnpp5e9cb", "9qquvceepq","9qxdkxeut5","9wb25msn4c","9wcjf5sr2w","9x1g8cu2yh") ) # Stop on unexpected algorithm expect_error(createLocationID(longitude, latitude, "paste")) })
e9e56bbd33ed0eed28bdc84461cec58893d5fdcb
71342669c1ecd5246822806ba02dcfd2886324b2
/run_analysis.R
1ec7611b909df7d06dbfd3ce48497c48b7febe98
[]
no_license
zephyr213/GCD_courseproject
49895cf710ab8cb8c074a210e8a7cc90fc659ebd
051390d299c9a5737d9291728d6a1ee926147507
refs/heads/master
2021-01-10T20:35:16.754484
2014-10-26T23:34:55
2014-10-26T23:34:55
null
0
0
null
null
null
null
UTF-8
R
false
false
2,082
r
run_analysis.R
# first read in the train, test data testx <- read.table("./UCI HAR Dataset/test/X_test.txt", header = F) testy <- read.table("./UCI HAR Dataset/test/y_test.txt", header = F) tests <- read.table("./UCI HAR Dataset/test/subject_test.txt", header = F) trainx <- read.table("./UCI HAR Dataset/train/X_train.txt", header = F) trainy <- read.table("./UCI HAR Dataset/train/y_train.txt", header = F) trains <- read.table("./UCI HAR Dataset/train/subject_train.txt", header = F) # read in the variable descriptions features <- read.table("./UCI HAR Dataset/features.txt") # find only the mean() and std() variable index and create new index meanindex <- grep("-mean()", features$V2, fixed = T) stdindex <- grep("-std()", features$V2, fixed = T) newindex <- sort(c(meanindex, stdindex)) # subset x data testx1 <- testx[ ,newindex] trainx1 <- trainx[ ,newindex] # include label in y data and testnew <- cbind(testx1, testy, tests) trainnew <- cbind(trainx1, trainy, trains) #merge test and train testnew$group <- "test" trainnew$group <- "train" vnames1 <- featurex$V2[newindex] vnames <- c(vnames1, "activity", "subject", "group") # descriptive variable name names(testnew) <- vnames names(trainnew) <- vnames newdata <- rbind(testnew, trainnew) #substitute label into activity newdata$activity <- as.numeric(newdata$activity) for (i in 1:length(newdata$activity)) { if (newdata$activity[i] == 1) newdata$activity[i] <- "WALKING" if (newdata$activity[i] == 2) newdata$activity[i] <- "WALKING_UPSTAIRS" if (newdata$activity[i] == 3) newdata$activity[i] <- "WALKING_DOWNSTAIRS" if (newdata$activity[i] == 4) newdata$activity[i] <- "SITTING" if (newdata$activity[i] == 5) newdata$activity[i] <- "STANDING" if (newdata$activity[i] == 6) newdata$activity[i] <- "LAYING" } # now newdata is the merged data set # now create a second tidy dataset library(reshape2) tmpdata <- melt(newdata, id = c("activity", "subject"), measure.vars = vnames1) newdata_2 <- dcast(tmpdata, activity + subject ~ variable, mean) # now newdata_2 is the data set required for step 5
37cea900d2d7c2d2b8acf709028c3e9f92cd1e94
1cd5be99e42382f8b7c8aea6b89b59870144b0f4
/codes/plot2Shape.R
0b4a7042e1c29d7025113a10d305b623710998e1
[]
no_license
acdantas/mef2-selexseq
89812eba9216c1277357db2007820dd68f66a276
768f6d2b8b108fedd56429be60db2662e34ba4f5
refs/heads/master
2022-11-22T02:22:22.472942
2020-07-28T06:16:32
2020-07-28T06:16:32
143,241,809
2
0
null
null
null
null
UTF-8
R
false
false
1,402
r
plot2Shape.R
plot2Shape <- function (shapeMatrix, background = NULL, colDots = rgb(0, 0, 1, 0.1), colDotsBg = rgb(0, 0, 0, 0.1), colLine = "steelblue", colLineBg = "gray50", cex = 0.5, lwd = 4, ylim, ...) { n <- nrow(shapeMatrix) mu <- colMeans(shapeMatrix, na.rm = TRUE) m <- length(mu) span <- round(m/2) if (is.null(background)) { if (missing(ylim)) ylim <- range(mu, na.rm = TRUE) plot(mu, col = colDots, pch = 19, cex = cex, xaxt = "n", xlab = "", #ylab = paste0("Mean value (n=", n, ")"), ylim = ylim, ...) # axis(1, at = c(0, m), labels = c(-span, paste0("+", span))) # abline(v = span, lty = 2, col = "gray30") lines(lowess(mu, f = 1/10), col = colLine, lwd = lwd) } else { mu1 <- mu mu2 <- colMeans(background, na.rm = TRUE) if (missing(ylim)) ylim <- range(mu1, mu2, na.rm = TRUE) plot(mu1, col = colDots, pch = 19, cex = cex, xaxt = "n", xlab = "", #ylab = paste0("Mean value (n=", n, ")"), ylim = ylim, ...) points(mu2, pch = 19, cex = cex, col = colDotsBg) # axis(1, at = c(0, m), labels = c(-span, paste0("+", span))) # abline(v = span, lty = 2, col = "gray30") lines(lowess(mu1, f = 1/10), col = colLine, lwd = lwd) lines(lowess(mu2, f = 1/10), col = colLineBg, lwd = lwd) } }
70dec6aeaf7a3535d54f4fe39894ad45ec676003
ff6cd64471c3dd38fb4b8ed3d5b5f816e9450063
/rangesurvey/R/check_groups_recorded.R
eebae5979fd0eafb9d527ba9587efac69af462c0
[ "MIT" ]
permissive
JamieCranston/RangeShift_survey
c9dcc97e9a433079138633f23f97b59c39b777e0
71f9318f650bc9e02389c227c77b16cfda2305cb
refs/heads/main
2023-04-14T02:46:13.075353
2022-03-20T12:08:55
2022-03-20T12:08:55
471,427,098
0
0
null
null
null
null
UTF-8
R
false
false
926
r
check_groups_recorded.R
#' check_groups_recorded #' #' @param data respondent character table #' @param config config file (for paths to validated groups recorded csv) #' #' @return respondent data with a column added for the groups the respondent's reported recording as listed in text field of the other option on the groups recorded question. #' range-shifting species #' @export check_groups_recorded <- function(data, config) { #species_group_val <- readr::read_csv(config$validation_dirs$species_group_val, col_types = readr::cols(id = "c")) print("please see our imputation for respondent answers about other groups they recorded") print(species_group_val) other_validated <- data %>% dplyr::left_join( x = ., y = species_group_val %>% dplyr::select(-.data$OtherGroups), by = "id" ) %>% dplyr::select(-.data$OtherGroups) %>% dplyr::mutate(.data$imputed_group) return(other_validated) }
9e2ab7a8a521d7cf2677a950e3e1c7791a3e0f34
2e1a9e1d0d038293bc8dba83d0473e9dc2f7f93e
/stats/scripts/altmetrics.analysis/man/write.table.gzip.Rd
2532a050220daab774f812cc42b3ac736c5999ed
[ "MIT", "CC0-1.0" ]
permissive
neostoic/plos_altmetrics_study
92821fa2634e75235f0fc5603b7ee7e25d298c91
5d4bd840763286c77cb834ef351e137eefb7946b
refs/heads/master
2020-12-25T08:29:47.940176
2011-03-22T15:00:48
2011-03-22T15:00:48
null
0
0
null
null
null
null
UTF-8
R
false
false
248
rd
write.table.gzip.Rd
\name{write.table.gzip} \alias{write.table.gzip} \title{write table gzip} \usage{ write.table.gzip(data, basedir, filename) } \arguments{ \item{data}{ } \item{basedir}{ } \item{filename}{ } } \author{Jason Priem and Heather Piwowar}
6aeeb6d6be19a0d3d807e3d6bd6d96a6ee3c0050
eb661d348facb5aee7240fbbedf6e615b7dcb3c3
/plot3.R
3a1e5ecda6ece871d3734bd5a5a96dc47eaea8cd
[]
no_license
swoldetsadick/COUREDAPRO2
9169f4c564530b1619d851022b829d842e01c348
3d2dec9bec6e2d19cd12478aca88a505c98fa43c
refs/heads/master
2021-01-19T17:17:41.004522
2014-09-20T16:04:51
2014-09-20T16:04:51
24,243,472
0
1
null
null
null
null
UTF-8
R
false
false
1,575
r
plot3.R
# Downloading in current working directory (CWD) and Loading data sets url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip" download.file(url, "exdata-data-NEI_data.zip", mode="wb") unzip("exdata-data-NEI_data.zip") NEI <- readRDS("summarySCC_PM25.rds") SCC <- readRDS("Source_Classification_Code.rds") # Subsetting data related only to Baltimore city and saving it to dataint # Loading plyr library to use ddply for summing amount of PM2.5 emission in tons # by year and type of source. data obtained is stored in new dataset called data2 # whose first column shows the year, the second type of source and last total # emission of above pollutant that year. Columns are labeled accordingly. library(plyr) library(ggplot2) dataint <- subset(NEI, as.factor(NEI$fips) == 24510) data3 <- ddply(dataint, .(as.factor(year),as.factor(type)), summarize,tot=sum(as.numeric(Emissions))) names(data3)[1] <- "Year" names(data3)[2] <- "Type" # plot3 uses ggplot plotting system and plots a titled lines plot with duely labeled x - and y - # axis. A legend is included on the plot. The plot is then saved in CWD under # png file named plot3.png. png("./plot3.png") plot.title="Total Emissions in Baltimore City from 1999 to 2008 from PM" plot.subtitle="by type of source" plot3 <- ggplot(data3, aes(Year, tot, group = Type)) plot3 <- plot3 + geom_line(aes(color = Type)) + labs(y=expression("Total Emissions in Tons from PM"[2.5])) plot3 <- plot3 + ggtitle(bquote(atop(.(plot.title), atop(italic(.(plot.subtitle)), "")))) + labs(x="Years") plot3 dev.off()
0a4859f9f1ec02057e86ceb03053298241585f8c
81467b617a2cc6e211d0dc0c735a251445f4a163
/uberdata/man/multinomialHierBayesModel.Rd
e8d083b08da2d497e7f51efdb979336437bb0254
[]
no_license
adam-sullivan/uberproject
664d1813296907f0aa148c9848ebdcb252ff7f7e
b0f68bca5b8f7d5df57215300a9fa046b02f4dcd
refs/heads/master
2016-09-06T07:51:59.261832
2015-04-04T22:39:20
2015-04-04T22:39:20
33,405,636
0
0
null
null
null
null
UTF-8
R
false
false
752
rd
multinomialHierBayesModel.Rd
% Generated by roxygen2 (4.1.0.9001): do not edit by hand % Please edit documentation in R/prediction.R \name{multinomialHierBayesModel} \alias{multinomialHierBayesModel} \title{Bayesian Hiearchial multinomial logistic regression} \usage{ multinomialHierBayesModel(testTrip) } \arguments{ \item{testTrip}{an input of the shortened feature vector} } \value{ outMCMCs The multinomial Bayesian model chains for the estimates for beta. } \description{ Exploratory function. No guarantee on code safety, included for demonstration. This function was my top pick for being able to model the dropoff location. It creates a list structure (one for each unique ID) as the input and predictor variable. The output is MCMC samples for the estimates for beta. }
6ef665665b8ab8b86141b2892c1cd4fa25930493
1d9593d1031d38caef98783af997de9a2c02b901
/R/get_results_aj.R
81ec61531180b31bcaf1f57cc6e37012eaed5dda
[]
no_license
antonmalko/ibextor
6b079cf5e5aac0f07e4ba238bfbefbb8e4e120f1
1396d025907985929f9763f2ae6d07a41e76823c
refs/heads/master
2021-09-09T16:37:23.259768
2018-03-18T04:42:33
2018-03-18T04:42:33
125,311,591
1
0
null
null
null
null
UTF-8
R
false
false
1,779
r
get_results_aj.R
get_results_aj <- function(file_name, elem_number = NULL, del_col = NULL, del_mode = "auto", col_names = NULL, partial_names = TRUE, col_classes = NULL, partial_classes = TRUE, short_subj_ids = TRUE, ...) { #' @rdname get_results #' @export if (is.null(col_names)){ col_names <- c("question", "answer", "is_correct", "quest_rt", "sentence") } if (is.null(col_classes)){ col_classes <- c("character", "character", "numeric", "numeric", "character") } res <- read_ibex(file_name, ...) res <- subset_ibex(res, controller = "AcceptabilityJudgment", elem_number = elem_number) res <- res[, 1:11] # Reading odd (first line of code) and even (second line) raws. # They contain different types of info. res_FlashSentence <- res[(seq(1, NROW(res), by=2)), 8 ] if (NROW(res_FlashSentence)==0) stop ("Subsetting for sentences data failed") res <- res[(seq(2, nrow(res), by=2)), ] if(NROW(res)==0) stop ("Subsetting for questions data failed") rownames(res) <- NULL #add FlashSentence info into the 12th column, after the main data res[,12] <- res_FlashSentence droplevels(res) res <- format_ibex(res, col_names = col_names, partial_names = partial_names, col_classes = col_classes, partial_classes = partial_classes) res <- recode_subjects(res, short_ids = short_subj_ids) res <- delete_columns(res, del_col, del_mode) return(res) }
04c1d2cddcf9cb41f7c2e1b948a8a63cf9001bea
2f92299aa6ba0f054d59a07ea71b755b0a472bdc
/GitHub Setup v1.R
7208619e9129a0d204f4d7743608bf1e5bf7c630
[]
no_license
MichaelLandivar/GitHub-Setup
bb44d8c1ac4a119121ab9ea947492a8e5e0700e1
4c6eb0d841cef35120836eae5e055b3cddd0f4ab
refs/heads/master
2021-01-11T01:50:20.126299
2016-11-21T15:54:59
2016-11-21T15:54:59
70,844,678
0
0
null
null
null
null
UTF-8
R
false
false
2,927
r
GitHub Setup v1.R
#Install R #https://www.r-project.org/ #Install RStudio #https://www.rstudio.com/ #Install Git #https://git-scm.com/downloads #Sign up for & install GitHub #https://github.com/ #Installing & Configuring Git #A. Login to github.com and navigate to repositories #1. Select "New" #2. Provide a repository name #3. Provide a description (optional) #4. Select privacy if applicable #5. Check box "Initialize this repository with a README" that provides initial repository information #6. Select "Create repository" #B. Configure Git on your computer #1. Open "Start" and search for "GitHub" and "Git Shell" #2. Pin both to desktop or other location for easy access #3. Open Git shell and enter the following shell commands: #git config --global user.name "YOUR NAME" #git config --global user.email "YOUR EMAIL" #git config --list #The last command verifies the information entered #C. Open RStudio #1. Go to Tools > Global Options > Git/SVN #2. Check box "Enable version control interface for RStudio projects" #3. Provide file path for Git executable (e.g., C:/Program Files/Git/bin/git.exe) #Be sure that when installing git you keep track of where the program files are saved #4. Select "Ok" #5. Restart RStudio #6. Go to File > New Project > Version Control > Git #7. Provide Repository URL from github account # Login to github #Go to Your Profile > Repositories #Select the repository you would like to connect to #Go to Clone or download #Copy URL and paste in RStudio Provide Repository URL dialogue #8. Create project as subdirectory of... #D. Create New File > R Script #1. Write codes #2. Go to tools > Version Control > Commit... #3. Select "Show staged" #3. Check "Staged" box for desired R file(s) and for the GitHub Project #4. Type a commit message description of the action on the repository #5. Select "Commit" #You will be prompted to enter your githum.com credentials #Example of successful commit: #[master YOURHEXADECIMAL] YOUR COMMIT MESSAGE #2 files changed, 63 insertions(+) #create mode 100644 YOURRSCRIPTFILENAME #create mode 100644 YOURGITREPOSITORYNAME #6. Select "Push" #Example of successful branch push: #To https://github.com/YOURGITUSERNAME/YOURGITREPOSITORYNAME #YOURHEXADECIMAL master -> master #7 Go to github.com and refresh page #8 Updates now located in repository > commits #E. Check for track changes/Update with changes #1. Go to Tools > Version Control > Diff "" #2. Type a commit message description of the action on the repository #3. Select "Commit"
7b4db94bf81a259dcef03561b974f2e779e21b99
86b56702a9041a8a17e60d95c212978f388311ad
/follow_seq2ASV.R
7c934154ebe3356f7f9f34d70582fa900ca50d86
[]
no_license
AMCMC/ITS_seq_ASVmapping
c166c72eb4c86b97a409a941dcad486bf6eacea4
b7630c8f083eabe311b85214411f654ee6158226
refs/heads/master
2020-03-27T16:31:50.353858
2018-09-04T14:51:09
2018-09-04T14:51:09
146,789,478
0
0
null
null
null
null
UTF-8
R
false
false
10,407
r
follow_seq2ASV.R
library(dada2);packageVersion("dada2") library(phyloseq);packageVersion("phyloseq") library(ggplot2);packageVersion("ggplot2") source("../../MicrobiotaCentre/MiCA_ITS/Scripts/taxa_facet_barplot_asv.R") #### regular bigdata approach #### fqs <- list.files("./", pattern = "I.*fastq.gz") dereps <- list() for (file in fqs){ dereps[[file]] <- derepFastq(file) } names(dereps) <- gsub("_.*","",names(dereps)) err8 <- readRDS("ITS_0008.R1.RDS") err9 <- readRDS("ITS_0009.R1.RDS") ddF <- list() for (i in names(dereps)){ if (substr(i,2,2)==8){ ddF[[i]] <- dada(derep = dereps[[i]], err = err8) }else{ ddF[[i]] <- dada(derep = dereps[[i]], err = err9) } } topx=10 topxseqs <- c() for (i in dereps){ topxseqs <- c(topxseqs,names(i$uniques[1:topx])) } length(unique(topxseqs)) # track a total of 13 sequences table(table(topxseqs)) #6 sequences are in the top10 in all samples #build dataframe df <- data.frame() for(i in names(dereps)){ df <- rbind(df, data.frame(sequence=names(dereps[[i]]$uniques[1:topx]), Abundance=unname(dereps[[i]]$uniques[1:topx]), ASV=ddF[[i]]$sequence[ddF[[i]]$map[1:topx]], SampleID=i)) } df$ASV2 <- df$ASV levels(df$ASV2) <- paste0("ASV-",1:length(levels(df$ASV2))) df$sequence2 <- df$sequence levels(df$sequence2) <- paste0("sequence-",1:length(levels(df$sequence2))) df$ASVrep <- as.character(df$sequence)==as.character(df$ASV) df$ASVrep2[df$ASVrep] <- as.character(df$ASV2[df$ASVrep]) ggplot(df, aes(x = sequence2, y = Abundance, fill=ASV2)) + geom_bar(stat="identity") + facet_grid(SampleID ~ ., scale="free_y") + theme(axis.text.x = element_text(angle=90, hjust=1)) ggplot(df, aes(x = sequence2, y = Abundance, fill=ASVrep2)) + geom_bar(stat="identity") + facet_grid(SampleID ~ ., scale="free_y") + theme(axis.text.x = element_text(angle=90, hjust=1)) ### identical sequences in the various samples get assigned to different ASVs #### try to resolve this issue with priors #### priors <- unique(topxseqs) ddF2 <- list() for (i in names(dereps)){ if (substr(i,2,2)==8){ ddF2[[i]] <- dada(derep = dereps[[i]], err = err8, priors = priors) }else{ ddF2[[i]] <- dada(derep = dereps[[i]], err = err9, priors = priors) } } #build dataframe df2 <- data.frame() for(i in names(dereps)){ df2 <- rbind(df2, data.frame(sequence=names(dereps[[i]]$uniques[1:topx]), Abundance=unname(dereps[[i]]$uniques[1:topx]), ASV=ddF2[[i]]$sequence[ddF2[[i]]$map[1:topx]], SampleID=i)) } df2$ASV2 <- df2$ASV levels(df2$ASV2) <- paste0("ASV-",1:length(levels(df2$ASV2))) df2$sequence2 <- df2$sequence levels(df2$sequence2) <- paste0("sequence-",1:length(levels(df2$sequence2))) df2$ASVrep <- as.character(df2$sequence)==as.character(df2$ASV) df2$ASVrep2[df2$ASVrep] <- as.character(df2$ASV2[df2$ASVrep]) ggplot(df2, aes(x = sequence2, y = Abundance, fill=ASV2)) + geom_bar(stat="identity") + facet_grid(SampleID ~ ., scale="free_y") + theme(axis.text.x = element_text(angle=90, hjust=1)) ggplot(df2, aes(x = sequence2, y = Abundance, fill=ASVrep2)) + geom_bar(stat="identity") + facet_grid(SampleID ~ ., scale="free_y") + theme(axis.text.x = element_text(angle=90, hjust=1)) #### try to resolve this issue with 1 specific prior #### priors <- as.character(df2$sequence[df2$sequence2=="sequence-4"][1]) ddF3 <- list() for (i in names(dereps)){ if (substr(i,2,2)==8){ ddF3[[i]] <- dada(derep = dereps[[i]], err = err8, priors = priors) }else{ ddF3[[i]] <- dada(derep = dereps[[i]], err = err9, priors = priors) } } #build dataframe df3 <- data.frame() for(i in names(dereps)){ df3 <- rbind(df3, data.frame(sequence=names(dereps[[i]]$uniques[1:topx]), Abundance=unname(dereps[[i]]$uniques[1:topx]), ASV=ddF3[[i]]$sequence[ddF3[[i]]$map[1:topx]], SampleID=i)) } df3$ASV2 <- df3$ASV levels(df3$ASV2) <- paste0("ASV-",1:length(levels(df3$ASV2))) df3$sequence2 <- df3$sequence levels(df3$sequence2) <- paste0("sequence-",1:length(levels(df3$sequence2))) df3$ASVrep <- as.character(df3$sequence)==as.character(df3$ASV) df3$ASVrep2[df3$ASVrep] <- as.character(df3$ASV2[df3$ASVrep]) ggplot(df3, aes(x = sequence2, y = Abundance, fill=ASV2)) + geom_bar(stat="identity") + facet_grid(SampleID ~ ., scale="free_y") + theme(axis.text.x = element_text(angle=90, hjust=1)) ggplot(df3, aes(x = sequence2, y = Abundance, fill=ASVrep2)) + geom_bar(stat="identity") + facet_grid(SampleID ~ ., scale="free_y") + theme(axis.text.x = element_text(angle=90, hjust=1)) #### how to the sequences relate ? levels(df3$sequence)[c(1:7)] # they clearly differ in the length of the homopolymer #### is the issue due to the indel? nwalign(names(dereps[[1]]$uniques)[2],names(dereps[[1]]$uniques)[3]) drmod <- dereps[[1]] #make a substitution rather than an indel names(drmod$uniques)[2] <- "AAAAGTCGTAACAAGGTTTCCGTAGGTGAACCTGCGGAAGGATCATTAAAGAAATTTAATAATTTTGAAAATGGATTTTTTTTTTTAGTTTTGGCAAGAGCATGAGAGCTTTTACTGGGC" ddFmod <- dada(derep = drmod, err = err8) ddFref <- dada(derep = dereps[[1]], err = err8) #### truncate homopolymers in forward read #### fqs <- list.files("./", pattern = "I.*fastq.gz") dereps <- list() for (file in fqs){ dereps[[file]] <- derepFastq(file) } names(dereps) <- gsub("_.*","",names(dereps)) err8 <- readRDS("ITS_0008.R1.RDS") err9 <- readRDS("ITS_0009.R1.RDS") ddF <- list() for (i in names(dereps)){ if (substr(i,2,2)==8){ ddF[[i]] <- dada(derep = dereps[[i]], err = err8) }else{ ddF[[i]] <- dada(derep = dereps[[i]], err = err9) } } for (i in names(ddF)){ ddF[[i]]$sequence <- gsub("AAAAAA*","AAAAAA",ddF[[i]]$sequence) ddF[[i]]$sequence <- gsub("TTTTTT*","TTTTTT",ddF[[i]]$sequence) ddF[[i]]$sequence <- gsub("CCCCCC*","CCCCCC",ddF[[i]]$sequence) ddF[[i]]$sequence <- gsub("GGGGGG*","GGGGGG",ddF[[i]]$sequence) } topx=10 topxseqs <- c() for (i in dereps){ topxseqs <- c(topxseqs,names(i$uniques[1:topx])) } table(table(topxseqs)) #6 sequences are in the top10 in all samples #build dataframe df <- data.frame() for(i in names(dereps)){ df <- rbind(df, data.frame(sequence=names(dereps[[i]]$uniques[1:topx]), Abundance=unname(dereps[[i]]$uniques[1:topx]), ASV=ddF[[i]]$sequence[ddF[[i]]$map[1:topx]], SampleID=i)) } df$ASV2 <- df$ASV levels(df$ASV2) <- paste0("ASV-",1:length(levels(df$ASV2))) df$sequence2 <- df$sequence levels(df$sequence2) <- paste0("sequence-",1:length(levels(df$sequence2))) df$ASVrep <- as.character(df$sequence)==as.character(df$ASV) df$ASVrep2[df$ASVrep] <- as.character(df$ASV2[df$ASVrep]) ggplot(df, aes(x = sequence2, y = Abundance, fill=ASV2)) + geom_bar(stat="identity") + facet_grid(SampleID ~ ., scale="free_y") + theme(axis.text.x = element_text(angle=90, hjust=1)) ggplot(df, aes(x = sequence2, y = Abundance, fill=ASVrep2)) + geom_bar(stat="identity") + facet_grid(SampleID ~ ., scale="free_y") + theme(axis.text.x = element_text(angle=90, hjust=1)) st <- makeSequenceTable(ddF) st2 <- st colnames(st2) <- gsub("AAAAAA*","AAAAAA",colnames(st2)) colnames(st2) <- gsub("TTTTTT*","TTTTTT",colnames(st2)) colnames(st2) <- gsub("CCCCCC*","CCCCCC",colnames(st2)) colnames(st2) <- gsub("GGGGGG*","GGGGGG",colnames(st2)) st2 <- collapseNoMismatch(st2) tt <- cbind(make.unique(substr(colnames(st),1,10)),colnames(st)) rownames(tt) <- tt[,2] ps <- phyloseq(otu_table(st, taxa_are_rows = F), sample_data(data.frame(row.names=names(ddF), Sample_Namex=names(ddF), Lib=substr(names(ddF),1,2), Polymerase=c("T","P","T","P","T","P"))), tax_table(tt) ) ps.rare <- rarefy_even_depth(ps) ord <- ordinate(ps.rare, method = "PCoA", distance = "bray") plot_ordination(ps, ord, label = "Sample_Namex", color="Polymerase") ps.temp <- prune_taxa(colnames(ps.rare@otu_table) %in% colnames(ps.rare@otu_table)[1:10], ps.rare) plot_bar(ps.temp, fill = "ta1") ps.rare <- rarefy_even_depth(ps) ord <- ordinate(ps.rare, method = "PCoA", distance = "bray") plot_ordination(ps, ord, label = "Sample_Namex", color="Polymerase") tt <- cbind(make.unique(substr(colnames(st2),1,10)),colnames(st2)) rownames(tt) <- tt[,2] ps <- phyloseq(otu_table(st2, taxa_are_rows = F), sample_data(data.frame(row.names=names(ddF), Sample_Namex=names(ddF), Lib=substr(names(ddF),1,2), Polymerase=c("T","P","T","P","T","P"))), tax_table(tt) ) ps.rare <- rarefy_even_depth(ps) ord <- ordinate(ps.rare, method = "PCoA", distance = "bray") plot_ordination(ps, ord, label = "Sample_Namex", color="Polymerase") ps.temp <- prune_taxa(colnames(ps.rare@otu_table) %in% colnames(ps.rare@otu_table)[1:10], ps.rare) plot_bar(ps.temp, fill = "ta1") #### where to collapse the homopolyer and nomismatch in the workflow? #### fqs <- list.files("./", pattern = "I.*fastq.gz") dereps <- list() for (file in fqs){ dereps[[file]] <- derepFastq(file) } names(dereps) <- gsub("_.*","",names(dereps)) err8 <- readRDS("ITS_0008.R1.RDS") err9 <- readRDS("ITS_0009.R1.RDS") ddF <- list() for (i in names(dereps)){ if (substr(i,2,2)==8){ ddF[[i]] <- dada(derep = dereps[[i]], err = err8) }else{ ddF[[i]] <- dada(derep = dereps[[i]], err = err9) } } for (i in names(ddF)){ ddF[[i]]$sequence <- gsub("AAAAAA*","AAAAAA",ddF[[i]]$sequence) ddF[[i]]$sequence <- gsub("TTTTTT*","TTTTTT",ddF[[i]]$sequence) ddF[[i]]$sequence <- gsub("CCCCCC*","CCCCCC",ddF[[i]]$sequence) ddF[[i]]$sequence <- gsub("GGGGGG*","GGGGGG",ddF[[i]]$sequence) } table(ddF[[1]]$map) aggregate(ddF[[i]]$denoised, by=list(ddF[[i]]$sequence), FUN=sum) aggregate(ddF[[i]]$denoised, by=list(ddF[[i]]$sequence), FUN=sum) aggregate(ddF[[i]]$denoised, by=list(ddF[[i]]$sequence), FUN=sum) aggregate(ddF[[i]]$denoised, by=list(ddF[[i]]$sequence), FUN=sum) aggregate(ddF[[i]]$denoised, by=list(ddF[[i]]$sequence), FUN=sum) x <- c() for (i in names(dereps)){ x[i] <- length(dereps[[i]]$uniques)/sum(dereps[[i]]$uniques) } ps@sam_data$seqcom <- 1-x sequecne()
40491ead1ceda4fd5ed5863066ba135a39e2005e
4cc92a349885a505896de9056887465f5db40c76
/code/NC13GroomR2.r
6b5dddabafbea3af70f500553d3ce1328fe6a8eb
[]
no_license
guanjiahui/Social-Network_rhesus-macaques
cdaba33cbc333c00e67963e7dffd2c622d66e333
aee1f7583c168ca17a9c83bc0659d1e645cb96e6
refs/heads/master
2020-04-18T14:39:45.612811
2019-01-25T18:46:08
2019-01-25T18:46:08
167,594,532
0
0
null
null
null
null
UTF-8
R
false
false
1,509
r
NC13GroomR2.r
NC13_RU2_Grooming_Matrix <- read.csv("~/Dropbox/Research/SNH_health profile data for Fushing-selected/NC13_RU2_Grooming_Matrix.csv") NC13GroomR2=as.matrix(NC13_RU2_Grooming_Matrix [,-1]) colnames(NC13GroomR2)=NC13_RU2_Grooming_Matrix[,1] rownames(NC13GroomR2)=NC13_RU2_Grooming_Matrix[,1] ############## win2=conductance(NC13GroomR2,maxLength = 4) win_prob2=win2$p.hat temp=c(0.0045,0.07,0.15,0.3,0.8,1) Ens13.groom2=Eigen.plot2(temp, selected.id=c(1,2,3,4,5,6),win_prob2) DCG13.groom2=DCGtree.plot(num.clusters.selected=c(1,1,2,3,4,6), "NC13GroomR2 tree",Ens13.groom2,temp) plot(DCG13.groom2,hang=-1,main="NC13GroomR2 tree") G2=cutree(DCG.groom2,k=5) ############################ Eigen.plot2=function(tempinv,selected.id,D){ tempinv.selected <- tempinv[selected.id] ensM<- list() # your ensemble matrices at each temperature. for ( i in 1:length(selected.id)) ensM[[i]]=EstClust(GetSim2(D,tempinv.selected[i]), MaxIt=1000, m=5) #check eigenvalues par(mfrow=c(2,3)) for (j in 1:length(selected.id)){ Ens=ensM[[j]] N <- nrow(Ens) Dinvsqrt <- diag(sapply(1:N, function(i) 1/sqrt(sum(Ens[i,])))) Lsym <- diag(N) - Dinvsqrt %*% Ens %*% Dinvsqrt Eigen <- eigen(Lsym)$values Eigen <- sort(1 - Eigen/Eigen[1], decreasing=TRUE) #cat(Eigen[1:25],"\n") cat("difference","\n",diff(Eigen[1:20]),"\n") plot(Eigen[1:20],type="b",main=j) } return(ensM) }
5b7ebc73b980bee0227e7f4f5eaa8709c0f3aca5
cc7ce923db8885f1b340384a35fd96eb9d0ed797
/R/filter_reformat_vcf_df.R
4d940dc5e5cad878977106625551eac43f24fe09
[ "MIT" ]
permissive
collaborativebioinformatics/snpReportR
0886c8d2924bff70f766948081f523bcbd9c43ca
48066a3a5ca9002d03b5b7a93009207a50d31b11
refs/heads/main
2023-04-09T15:59:09.877204
2021-04-22T23:59:49
2021-04-22T23:59:49
324,649,334
4
2
null
null
null
null
UTF-8
R
false
false
2,726
r
filter_reformat_vcf_df.R
#Jenny Smith #Jan. 8, 2021 #Purpose: reformat the VCF output from v2.5 CTAT Mutations Pipeline #' Filter and Reformat the VCF from v2.5 CTAT Mutations Pipeline #' #' @param vcf.df is a dataframe derived from vcfR package #' @param vcf.s4 is a s4 vectors object from bioconductor VariantAnnotation package #' #' @return #' @export #' #' @examples #'\dontrun{ #' vcf <- vcfR::read.vcfR("/path/to/vcf") #' vcf.df <- cbind(as.data.frame(vcfR::getFIX(vcf)), vcfR::INFO2df(vcf)) #' vcf.s4 <- VariantAnnotation::readVcf("/path/to/vcf") #' vcf.filtered <- filter_ctat_vcf(vcf.df,vcf.s4) #'} #' #' @import dplyr filter_ctat_vcf <- function(vcf.df, vcf.s4){ #Define annotations in VCF header lines functional_annots_names <- VariantAnnotation::header(vcf.s4) %>% VariantAnnotation::info(.) %>% as.data.frame(.) functional_annots_names <- functional_annots_names["ANN","Description"] %>% gsub("^.+\\'(.+)\\'","\\1",.) %>% stringr::str_split(., pattern = "\\|") %>% unlist() %>% gsub("\\s", "", .) functional_annots_names <- functional_annots_names[-length(functional_annots_names)] # expland the annotations from ANN attribute of the VCF file for the first 3 transcripts. # https://www.biostars.org/p/226965/ functional_annots.df <- data.frame(do.call(rbind, strsplit(as.vector(vcf.df$ANN), split = "\\|"))) functional_annots.df <- functional_annots.df[,1:45] #keep only the first 3 transcripts colnames(functional_annots.df) <- paste(functional_annots_names,rep(1:3, each=15), sep="_") #Run the filtering function vcf.df.filter <- vcf.df %>% mutate(S4_Vector_IDs=names(SummarizedExperiment::rowRanges(vcf.s4))) %>% bind_cols(., functional_annots.df) %>% mutate(rsID=ifelse(!is.na(RS), paste0("rs", RS), RS)) %>% mutate_at(vars(chasmplus_pval,vest_pval), ~as.numeric(.)) %>% group_by(GENE) %>% mutate(Number_SNVs_per_Gene=n()) %>% ungroup() %>% dplyr::select(GENE,Number_SNVs_per_Gene, COSMIC_ID, rsID,CHROM:ALT, FATHMM,SPLICEADJ, matches("chasmplus_(pval|score)"), matches("vest_(pval|score)"), TISSUE,TUMOR, Annotation_1,Annotation_Impact_1,Feature_Type_1, Transcript_BioType_1, coding_DNA_change_1=HGVS.c_1, protein_change_1=HGVS.p_1, -ANN, everything(), ANN) %>% dplyr::filter(grepl("PATHOGENIC", FATHMM) | !is.na(SPLICEADJ)) %>% dplyr::filter(grepl("HIGH|MODERATE",Annotation_Impact_1) | !is.na(SPLICEADJ)) %>% arrange(desc(chasmplus_score), desc(vest_score), desc(Number_SNVs_per_Gene), Annotation_Impact_1) return(vcf.df.filter) }
7f42c8ded979f948d2aabdda7b844fe092f392ae
1540706522486b205bb278399ba86986e264f906
/plot3.R
42bed4ba70ca700590126bb846cd03163dd7f124
[]
no_license
wunzeco/exdata-project2
136d021bb95f5b6303b9994390de54c862d1d435
54681ddb1385990195252633c6c8f07da6178e3d
refs/heads/master
2016-09-09T21:13:08.006451
2014-08-29T06:46:59
2014-08-29T06:46:59
null
0
0
null
null
null
null
UTF-8
R
false
false
970
r
plot3.R
## Question 3. ## Of the four types of sources indicated by the type (point, nonpoint, onroad, ## nonroad) variable, which of these four sources have seen decreases in emissions ## from 1999โ€“2008 for Baltimore City? Which have seen increases in emissions from ## 1999โ€“2008? ## Use the ggplot2 plotting system to make a plot to answer this question. library(plyr) library(ggplot2) NEI <- readRDS("../summarySCC_PM25.rds") SCC <- readRDS("../Source_Classification_Code.rds") ## Baltimore NEI dataset bNEI <- subset(NEI, fips == "24510") ## Summarised NEI: Total emissions from 1999 to 2008 in Baltimore City sNEI <- ddply(bNEI, .(year, type), summarise, total = sum(Emissions)) ## plot graph to png file png(filename = "plot3.png", width = 960, height = 480) qplot(year, total, data = sNEI, geom = c('point','line'), facets = . ~ type, xlab = "Year", ylab = "PM2.5 Emissions", main = "Baltimore City - Total emissions (1999 to 2008)") dev.off()
44cd1175b96702899ebf389fd40eeb602c845030
065495790d7e78412f26434f8dbe3d67eb48c4ba
/R/testmaf.R
6ba8555a004772820dc85b43a927427cfc095539
[]
no_license
lculibrk/Ploidetect
8202834315ec7c6861ed5332233498c7b65c8930
0bf8e1f8f670717adde284f7d0cc47f162150d98
refs/heads/main
2023-05-12T14:09:58.112373
2023-05-05T20:32:30
2023-05-05T20:35:59
224,064,456
6
0
null
2023-05-05T20:36:00
2019-11-26T00:01:31
R
UTF-8
R
false
false
1,784
r
testmaf.R
#' @export testMAF <- function(CN, tp){ ## This special case results in division by zero if(CN == 0 & tp == 1){ return(c("0" = 0.5)) } #if(CN < 0 | CN > 11){ # stop("Please enter a CN between 0 and 8") #} np <- 1-tp halfcn <- ceiling(CN/2) if(CN < 15){ major_allele_possibilities = seq(from = 0, to = min(10, CN), by = 1) } else if(CN < 60){ major_allele_possibilities = c(seq(from = 0, to = 10, by = 1), seq(from = 15, to = min(50, CN), by = 5)) } else if(CN < 150){ major_allele_possibilities = c(seq(from = 0, to = 10, by = 1), seq(from = 15, to = 50, by = 5), seq(from = 60, to = min(140, CN), by = 10)) } else if(CN < 250){ major_allele_possibilities = c(seq(from = 0, to = 10, by = 1), seq(from = 15, to = 50, by = 5), seq(from = 60, to = 140, by = 10), seq(from = 150, to = CN, by = 50)) } else{ major_allele_possibilities = c(seq(from = 0, to = 10, by = 1), seq(from = 15, to = 50, by = 5), seq(from = 60, to = 140, by = 10), seq(from = 150, to = 240, by = 50), seq(from = 250, to = CN, by = 100)) } output <- c() for(al in major_allele_possibilities){ majoraf <- ((al * tp) + (1 * np))/((CN * tp) + (2 * np)) output[as.character(al)] <- majoraf } return(output) } #' @export testMAF_sc <- function(CN, tp){ CN <- max(CN, 0) fraction = CN - floor(CN) if(fraction == 0){ return(testMAF(CN, tp)) } np = 1-tp base_cn <- floor(CN) which_fraction = c(0, 1) major_allele_possibilities = seq(from = 0, to = base_cn, by = 1) major_allele_possibilities = sort(c(major_allele_possibilities, major_allele_possibilities + fraction)) output <- c() for(al in major_allele_possibilities){ output[paste0(al)] <- ((al * tp) + (1 * np))/((CN * tp) + (2 * np)) } return(output) }
95acf8576640395f515f2984f06d423b987c0003
77bbd565e1da3809dbc758dcf75d90ab9b31c855
/man/get_created.Rd
b5c5be4aa8c97d96976ca44b6659e1ee4f4ea4cc
[]
no_license
gmyrland/fduper
21c285e7a7282e41854aafbd084e665c6cccf249
8faba6c9fca284a0016bcdc57f946286080772f7
refs/heads/master
2021-08-14T20:03:03.558646
2017-11-16T16:32:46
2017-11-16T16:32:46
110,996,016
0
0
null
null
null
null
UTF-8
R
false
true
339
rd
get_created.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/file_properties.R \name{get_created} \alias{get_created} \title{Get file created date} \usage{ get_created(path) } \arguments{ \item{path}{The path to a file} } \value{ The file created date of the file } \description{ Returns the file created date of a file }
29137183eed6a1fccb02e866bd84603370c729ac
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/miceadds/examples/library_install.Rd.R
53df2194a76541839b09234d93b70ef2e918213f
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
331
r
library_install.Rd.R
library(miceadds) ### Name: library_install ### Title: R Utilities: Loading a Package or Installation of a Package if ### Necessary ### Aliases: library_install ### Keywords: R utilities ### ** Examples ## Not run: ##D # try to load packages PP and MCMCglmm ##D library_install( pkg=c("PP", "MCMCglmm") ) ## End(Not run)
891c37ce23282e87426fe68f671d2c2e195353c6
2e6555b08b874efe0a455a3bd284c715a7cff976
/man/mergedCEdata.Rd
014aa69263deeb1639e192e0313440f7a9e72abe
[]
no_license
dutchjes/MSMSsim
99d17ead953cb382392cad70eef3a7877e1ffe20
89ced2837f6f597c79198d8fe152fa5d23a6e2ad
refs/heads/master
2022-01-12T11:12:22.893585
2019-06-26T13:43:59
2019-06-26T13:43:59
75,643,765
3
0
null
null
null
null
UTF-8
R
false
true
561
rd
mergedCEdata.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mergedCEdata_func.R \name{mergedCEdata} \alias{mergedCEdata} \title{Merged CE data for comparison} \usage{ mergedCEdata(all.frag, pairs, dmz = 0.001, intensity.type) } \arguments{ \item{all.frag}{} \item{pairs}{} \item{dmz}{} \item{intensity.type}{} } \value{ list of two lists, first with merged spectra of parent, second with merged spectra of TP. New function mergedSpectra is better because doesn't require input of the pairs. } \description{ Merged CE data for comparison }
63b1039dd582e299962eec30601612c5c8e2f0f3
593420234664a284d6919ccc97d4c5fe0dc396fd
/section_6_1_1/section_6_1_1_tests.R
037cfcabddb247116b94c955cd77e15ae7cd8af4
[]
no_license
bgs25/scope-experiments
db3c7edb03d193fb580a6ce2765b89a12ad96d67
aea4872ffc5504c37262fa37f5c58e700d59329e
refs/heads/main
2023-04-20T12:01:34.768821
2021-05-06T13:45:15
2021-05-06T13:45:15
362,241,334
0
0
null
null
null
null
UTF-8
R
false
false
8,271
r
section_6_1_1_tests.R
source("casanovafit.factor.crossval.R") library(CatReg) library(bazar) library(randomForest) library(tictoc) library(DMRnet) library(rpart) library(effectFusion) # This contains the lists that results will be stored in scopecv_model_list = list() scopes_model_list = list() scopem_model_list = list() scopel_model_list = list() acc_model_list = list() rf_model_list = list() cart_model_list = list() dmr_model_list = list() bayes_model_list = list() train.list = list() response.list = list() # signal regime 1, 2 # noise regime 1, 2, 3, 4 # Just fit models, on monday we'll compute prediction error, estimation error, signal proportion, clustering, et cetera #with this regime, probability of not seeing category in training set is < 0.00001 in 1000 replicates so we ignore that it could happen... running it on loads of computers anyway so not an issue n_jobs = 250 gammaseq = c(0.001, 2^(0:7), 1000) #for the cross-validating gamma gamsol = list() bestcv = rep(0, length(gammaseq)) grid.safe = 200 print("Pausing...") Sys.sleep(rexp(1,0.1)) print("Go!") for(job_id in 1:n_jobs) { model.index = 1 # Main code to run. This should write to a file job.txt in the Results directory for ( setting in 1:3 ) { train.design = train.list[[ setting ]] for ( noise.level in 1:5 ) { # Now actually fit the models y = response.list[[ 5*(setting - 1) + noise.level ]] Sys.sleep(rexp(1,0.5)) print(setting) print(noise.level) # Script for cross-validating gamma. Fixes random elements of the # cross-validation process to ensure that coordinate descent cycle order # is preserved, otherwise the randomness in the different CV curves depending # on this causes a lot of variance in the parameters selected. print("scope CV") cycleorder = sample(1:10) cvfold = as.integer(sample(ceiling((1:100)*5/100))) starttime = tic() for ( gam in 1:length(gammaseq) ) { print(paste0("gamma = ", gammaseq[gam])) gamsol[[ gam ]] = scope(y, data.frame(matrix(1, 500, 1) train.design), interceptxlinear = T, gamma = gammaseq[gam], default.length = 150, BICterminate = 40, simply.cross.validated = TRUE, silent = TRUE, blockorder = cycleorder, FoldAssignment = cvfold) bestcv[ gam ] = min(gamsol[[ gam ]][ , 1 ]) } bestgamind = which.min(bestcv) bestgam = gammaseq[ bestgamind ] bestlam = t(gamsol[[ bestgamind ]][ , -1 ]) terminationpoint = which.min(gamsol[[ bestgamind ]][ , 1 ]) grid.safe = 200 bestlam = data.frame(matrix(0, 10, grid.safe), bestlam) lambdaratio = bestlam[ 1, grid.safe + 1 ] / bestlam[ 1, grid.safe + 2 ] for ( i in seq(grid.safe, 1, -1) ) { bestlam[ , i ] = lambdaratio * bestlam[ , i + 1 ] } bestlam = bestlam[ , 1:(terminationpoint + grid.safe) ] print("Cross-validated gamma and lambda, now fitting final version") solution = scope(y, data.frame(matrix(1, 500, 1), train.design), interceptxlinear = T, gamma = bestgam, blockorder = cycleorder, FoldAssignment = cvfold, BICterminate = 40) stoptime = tic() duration = stoptime - starttime scopecv_model_list[[ model.index ]] = list(solution, bestgam, bestcv, duration) print("scope small gamma") starttime = tic() solution = scope(y, data.frame(matrix(1, 500, 1), train.design), interceptxlinear = T, default.length = 150, BICterminate = 40) stoptime = tic() duration = stoptime - starttime scopes_model_list[[ model.index ]] = list(solution, duration) print("scope medium gamma") starttime = tic() solution =scope(y, data.frame(matrix(1, 500, 1), train.design), interceptxlinear = T, default.length = 150, BICterminate = 40, gamma = 16) stoptime = tic() duration = stoptime - starttime scopem_model_list[[ model.index ]] = list(solution, duration) print("scope large gamma") solution =scope(y, data.frame(matrix(1, 500, 1), train.design), interceptxlinear = T, default.length = 150, BICterminate = 40, gamma = 32) stoptime = tic() duration = stoptime - starttime scopel_model_list[[ model.index ]] = list(solution, duration) print("Random Forest") starttime = tic() solution = randomForest(y ~ ., data = data.frame(y, train.design)) stoptime = tic() duration = stoptime - starttime rf_model_list[[ model.index ]] = list(solution, duration) print("CART") starttime = tic() cpsolution = rpart(y ~ ., data = data.frame(y, train.design)) cptable = printcp(cpsolution) minerror = which.min(cptable[ , 4 ]) minthresh = cptable[ minerror, 4 ] + cptable[ minerror, 5 ] # This is using the 1-SE rule for pruning these trees bestcp = min(which(cptable[ , 4 ] < minthresh)) if ( bestcp > 1 ) { cpthresh = 0.5*(cptable[ bestcp, 1 ] + cptable[ bestcp - 1, 1 ]) } else { cpthresh = 1 } solution = prune(cpsolution, cp = cpthresh) stoptime = tic() duration = stoptime - starttime cart_model_list[[ model.index ]] = list(solution, cpsolution, duration) print("DMRnet") starttime = tic() if ( noise.level == 1 ) { print("adding small noise to y") cvy = y + 0.1 * rnorm(500) # This is required because DMRnet often errors in exact noiseless case due to diving through by 0 somewhere } else { cvy = y } print("fitting cv solution") cvsolution = cv.DMRnet(train.design, cvy, nfolds = 5) cvmaxp = cvsolution$df.min - 1 print(cvmaxp) print("fitting full solution") solution = DMRnet(train.design, cvy, maxp = cvmaxp) solution = solution$beta[ , 1 ] stoptime = tic() duration = stoptime - starttime dmr_model_list[[ model.index ]] = list(solution, cvsolution, duration) print("Effect fusion") starttime = tic() if ( ( noise.level == 1 ) || ( noise.level == 5 ) ) { solution = NULL } else { solution = effectFusion(y, train.design, types = rep("n", 10), method = "FinMix") solution = solution$refit$beta } # Modelling category levels in a bayesian way as a sparse finite gaussian mixture model stoptime = tic() duration = stoptime - starttime bayes_model_list[[ model.index ]] = list(solution, duration) print("2-stage adaptive casanova") starttime = tic() if ( ( noise.level == 1 ) || ( noise.level == 5 ) ) { solution = NULL } else { solution = casanovafit(y, matrix(1, 500, 1), train.design, interceptxlinear = T, prev.coefficients = unlist(cas_model_list[[ model.index ]][[ 1 ]][[ 1 ]]), BICterminate = 50) } stoptime = tic() duration = stoptime - starttime acc_model_list[[ model.index ]] = list(solution, duration) model.index = model.index + 1 } } save(lm_model_list, ols_model_list, cas_model_list, acc_model_list, scopecv_model_list, scopes_model_list, scopem_model_list, scopel_model_list, rf_model_list, cart_model_list, dmr_model_list, bayes_model_list, train.list, response.list, file=paste0("section_6_1_1_raw.Rdata")) }
e37a4e75aac508ccab5e5f3536b9685c121154e3
2ec9ffc060f300b96b34bbc994bcb7d85924c817
/orientationwords/data-raw/make.R
472723f40bba7e0c3d730804c17e0e599e4bba5e
[]
no_license
lupyanlab/orientation-words
da5a34c96f00c2f3c1c3f8410fb3f18caa14b8e6
1de56e4c7b3e1ac86c5a4e486380387b04637401
refs/heads/master
2021-01-10T07:42:57.938815
2015-11-30T03:33:44
2015-11-30T03:33:44
46,096,020
0
0
null
null
null
null
UTF-8
R
false
false
658
r
make.R
library(devtools) library(dplyr) load_all() make_unilateral <- function(overwrite = FALSE) { unilateral_dir <- "data-raw/unilateral/" make_unilateral_version <- function(version) { regex_keys <- list("MOW1", "MOW3") compile(unilateral_dir, regex_keys[[version]]) %>% clean %>% recode %>% mutate(version = version) } unilateral <- plyr::rbind.fill( make_unilateral_version(1), make_unilateral_version(2) ) use_data(unilateral, overwrite = overwrite) } make_bilateral <- function(overwrite = FALSE) { bilateral <- compile("data-raw/bilateral/") %>% clean %>% recode use_data(bilateral, overwrite = overwrite) }
6822f3b219b56a757960a640de1489e70a7bbd15
abbc59b48a40e5190c6c32c86e902c1e986eed20
/10 Basic Inferential Statistics/10c_analysis_of_survey_data.R
f0d19ff4e2d056986318572a5b9197532354c265
[]
no_license
IonelaM/Data-Processing-Analysis-Science-with-R
f49788f5bea439894155d80e6df44cde534ac7a5
cdc6086b0e2b9adf3750cc4682e3d38850389a90
refs/heads/master
2020-12-15T18:06:13.754626
2020-01-12T05:21:00
2020-01-12T05:21:00
null
0
0
null
null
null
null
UTF-8
R
false
false
16,085
r
10c_analysis_of_survey_data.R
############################################################################ ### Al.I. Cuza University of Iaศ™i ### ### Faculty of Economics and Business Administration ### ### Department of Accounting, Information Systems and Statistics ### ############################################################################ ### ############################################################################ ### Data Processing/Analysis/Science with R ### ############################################################################ ### 10c. Analysis of Survey Data (Likert) - in Romanian (and English) ### ############################################################################ ### See also the presentation: ### https://github.com/marinfotache/Data-Processing-Analysis-Science-with-R/blob/master/10%20Basic%20Inferential%20Statistics/10_basic_inferential_statistics.pptx ############################################################################ ## last update: 2019-03-25 #install.packages("likert") library(likert) # citation('likert') require(scales) library(tidyverse) ############################################################################ ### Download the necesary data sets for this script ############################################################################ # all the files needed o run this script are available at: # https://github.com/marinfotache/Data-Processing-Analysis-Science-with-R/tree/master/DataSets # Please download the files in a local directory (such as 'DataSets') and # set the directory where you dowloaded the data files as the # default/working directory, ex: setwd('/Users/marinfotache/Google Drive/R(Mac)/DataSets') ############################################################################ ############################################################################ ### Load the data load(file = 'chestionarSIA2013.RData') ####################################################################################### ### For variable visualization and analysis, see scrips 07e and 07f ### ####################################################################################### ## ## we not cover EDA here, but focus instead on likert data ####################################################################################### ### I. Evaluarea generala: program, profi, discipline (scala Likert) ### ####################################################################################### # https://stackoverflow.com/questions/43646659/likert-in-r-with-unequal-number-of-factor-levels/43649056 #df <- rbind(c("Strongly agree","Strongly agree","Strongly agree","Strongly agree","Strongly agree","Strongly agree"), # c("Neither agree nor disagree","Neither agree nor disagree","Neither agree nor disagree","Neither agree nor disagree","Neither agree nor disagree","Neither agree nor disagree"), # c("Disagree","Strongly disagree","Neither agree nor disagree","Disagree","Disagree","Neither agree nor disagree")) #df <- as.data.frame(df) #colnames(df) <- c("Increased student engagement", "Instructional time effectiveness increased", "Increased student confidence", "Increased student performance in class assignments", "Increased learning of the students", "Added unique learning activities") #lookup <- data.frame(levels = 1:5, mylabels = c('Strongly disagree', 'Disagree', 'Neither agree nor disagree', 'Agree', 'Strongly agree')) #df.1 <- as.data.frame(apply(df, 2, function(x) match(x, lookup$mylabels))) #df.new <- as.data.frame(lapply(as.list(df.1), factor, levels = lookup$levels, labels = lookup$mylabels)) names(evaluari.2) from_ <- c('1', '2', '3', '4', '5') niveluri = c("foarte scฤƒzut", "scฤƒzut", "mediu", "bun", "foarte bun") niveluri.en = c("very poor", "poor", "average", "good", "very good") atrib <- c("evMaster", "evProfi", "evDiscipline") evGenerala <- evaluari.2 [, atrib] #evGenerala <- as.data.frame(sapply(evGenerala, as.numeric)) str(evGenerala) names(evGenerala) <- c('Master', 'Profesori', 'Discipline') evGenerala[evGenerala == "1"] <- "foarte scฤƒzut" evGenerala[evGenerala == "2"] <- "scฤƒzut" evGenerala[evGenerala == "3"] <- "mediu" evGenerala[evGenerala == "4"] <- "bun" evGenerala[evGenerala == "5"] <- "foarte bun" i <- 1 for (i in 1:ncol(evGenerala)) { evGenerala[,i] = factor(evGenerala[,i], levels=niveluri, ordered=TRUE) } names(evGenerala) str(evGenerala) ################################################################################### ### I.a Vizualizare date Likert ### ################################################################################### l.evGenerala = likert(evGenerala, nlevels = 5) l.evGenerala summary(l.evGenerala) summary(l.evGenerala, center=2.5) # grafic Likert plot(l.evGenerala, text.size=4.5) + ggtitle("Evaluare generalฤƒ: profesori, discipline ศ™i master") + theme (plot.title = element_text (colour="black", size="16"))+ theme (axis.text.y = element_text (colour="black", size="14", hjust=0))+ theme (axis.text.x = element_text (colour="black", size="14")) + theme (legend.text = element_text (colour="black", size="14")) # alta versiune a graficului library(plyr) plot(l.evGenerala, plot.percents=TRUE, plot.percent.low=FALSE, plot.percent.high=FALSE, text.size=4.5, centered=FALSE) + ggtitle("Evaluare generalฤƒ: profesori, discipline ศ™i master") + theme (plot.title = element_text (colour="black", size=17))+ theme (axis.text.y = element_text (colour="black", size=14, hjust=0))+ theme (axis.text.x = element_text (colour="black", size=14)) + theme (legend.text = element_text (colour="black", size=14)) # Heat map plot(l.evGenerala, type='heat', wrap=30, text.size=4) plot(l.evGenerala, type='heat', wrap=30, text.size=4) + ggtitle("Diagrama heatmap a evaluarii \npersonalului didactic, programului si disciplinelor") + theme (plot.title = element_text (colour="black", size="18"))+ theme (axis.text.y = element_text (colour="black", size="14", hjust=0))+ theme (axis.text.x = element_text (colour="black", size="12")) + theme (legend.text = element_text (colour="black", size="12")) # Density plot plot(l.evGenerala, type='density') plot(l.evGenerala, type='density', facet=FALSE) ################################################################################### ### I.b Vizualizare date Likert, cu gruparea rezultatelor dupa Gen ### ################################################################################### names(evaluari.2) evaluari.3 <- subset(evaluari.2, !is.na(sex)) evaluari.3$sex <- as.factor(evaluari.3$sex) evaluari.3$evMaster <- as.numeric(evaluari.3$evMaster) evaluari.3$evProfi <- as.numeric(evaluari.3$evProfi) evaluari.3$evDiscipline <- as.numeric(evaluari.3$evDiscipline) atrib <- c('evMaster', 'evProfi', 'evDiscipline', 'sex') evGenerala <- evaluari.3 [, atrib] names(evGenerala) <- c('Master', 'Profesori', 'Discipline', 'gen') evGenerala[evGenerala == "1"] <- "foarte scฤƒzut" evGenerala[evGenerala == "2"] <- "scฤƒzut" evGenerala[evGenerala == "3"] <- "mediu" evGenerala[evGenerala == "4"] <- "bun" evGenerala[evGenerala == "5"] <- "foarte bun" for (i in 1:(ncol(evGenerala)-1)) { evGenerala[,i] = factor(evGenerala[,i], levels=niveluri, ordered=TRUE) } names(evGenerala) #evGenerala <- as.data.frame(sapply(evGenerala, as.numeric)) str(evGenerala) names(evGenerala) <- c('Master', 'Profesori', 'Discipline') l.evGenerala.g1 <- likert(evGenerala[,1:3], grouping=evaluari.3$sex) l.evGenerala.g1 summary(l.evGenerala.g1) # Plots plot(l.evGenerala.g1) plot(l.evGenerala.g1, group.order=c('Feminin', 'Masculin')) plot(l.evGenerala.g1, wrap=30, text.size=4.5, panel.background = element_rect(size = 1, color = "grey70", fill = NA), group.order=c('Feminin', 'Masculin')) + ggtitle("Evaluare generalฤƒ: profesori, discipline ศ™i master,\npe genuri/sexe") + theme (plot.title = element_text (colour="black", size=17))+ theme (axis.text.y = element_text (colour="black", size=14, hjust=0))+ theme (axis.text.x = element_text (colour="black", size=12)) + theme (legend.text = element_text (colour="black", size=12)) + theme(strip.text.x = element_text(size = 14, colour = "black", angle = 0)) #plot(l.evGenerala.g1, center=2.5, include.center=FALSE) ## asta e cel mai bun #plot(l.evGenerala.g1, group.order=c('Feminin', 'Masculin')) # Reordonarea grupurilor # Curba densitatii plot(l.evGenerala.g1, type='density') # calcul medie evaluare, pentru cele doua sexe evaluari.3 %>% group_by(sex) %>% dplyr::summarise( mean.of.evMaster= mean(evMaster, na.rm = TRUE), mean.of.evProfi= mean(evProfi, na.rm = TRUE), mean.of.evDiscipline= mean(evDiscipline, na.rm = TRUE) ) # calcul mediana evaluare, pentru cele doua sexe evaluari.3 %>% group_by(sex) %>% dplyr::summarise( median.of.evMaster= median(evMaster, na.rm = TRUE), median.of.evProfi= median(evProfi, na.rm = TRUE), median.of.evDiscipline= median(evDiscipline, na.rm = TRUE) ) ################################################################################### ### I.c Analiza datelor din evaluare ### ################################################################################### names(evaluari.3) ######### ## Intrebare: ## Exista diferente semnificative intre absolventi si absolvente in ceea ce priveste ## evaluarea finala a masterului ? # H0: Nu exista diferente semnificative intre absolventi si absolvente in ceea ce priveste ## evaluarea finala a masterului wilcox.test(evMaster ~ sex, data=evaluari.3) kruskal.test(evMaster ~ sex, data=evaluari.3) # p-value = 0.2296; H0 nu este respinsa, deci, aparent, nu exista diferente semnificative #install.packages('zoo') #install.packages('coin') library(coin) wilcox_test(evMaster ~ sex, alternative="less", conf.int=TRUE, distribution="exact", data=evaluari.3) wilcox_test(evMaster ~ sex, alternative="greater", conf.int=TRUE, distribution="exact", data=evaluari.3) wilcox_test(evMaster ~ sex, alternative="two.sided", conf.int=TRUE, distribution="exact", data=evaluari.3) # effect size # Z / sqrt(nrow(evaluari.3)) 1.2015 / sqrt(nrow(evaluari.3)) # 0.1396715 ## Intrebare: ## Exista diferente semnificative intre absolventi si absolvente in ceea ce priveste ## evaluarea profesorilor ? # H0: Nu exista diferente semnificative intre absolventi si absolvente in ceea ce priveste ## evaluarea profesorilor #evaluari.3$evProfi <- as.numeric(evaluari.3$evProfi) wilcox.test(evProfi ~ sex, data=evaluari.3) kruskal.test(evProfi ~ sex, data=evaluari.3) # p-value = 0.336; H0 nu este respinsa, deci, aparent, nu exista diferente ## Intrebare: ## Exista diferente semnificative intre absolventi si absolvente in ceea ce priveste ## evaluarea disciplinelor masterului ? # H0: Nu exista diferente semnificative intre absolventi si absolvente in ceea ce priveste ## evaluarea disciplinelor masterului #evaluari.3$evDiscipline <- as.numeric(evaluari.3$evDiscipline) wilcox.test(evDiscipline ~ sex, data=evaluari.3) kruskal.test(evDiscipline ~ sex, data=evaluari.3) # p-value = 0.4791; H0 nu este respinsa, deci, aparent, nu exista diferente wilcox_test(evDiscipline ~ sex, alternative="greater", conf.int=TRUE, distribution="exact", data=evaluari.3) # effect size # Z / sqrt(nrow(evaluari.3)) 1.2015 / sqrt(nrow(evaluari.3)) # 0.139 # Testele de mai sus sunt discutabile datorita compararii medianelor # pentru date definite pe scala likert; de aceea, cream un atribut compozit, numeric evaluari.3$PunctajProgram <- ( ifelse(is.na(evaluari.3$evMaster), 0, evaluari.3$evMaster) + ifelse(is.na(evaluari.3$evProfi), 0, evaluari.3$evProfi) + ifelse(is.na(evaluari.3$evDiscipline), 0, evaluari.3$evDiscipline) ) / ( ifelse(is.na(evaluari.3$evMaster), 0, 1) + ifelse(is.na(evaluari.3$evProfi), 0, 1) + ifelse(is.na(evaluari.3$evDiscipline), 0, 1) ) atrib <- c("evMaster", "evProfi", "evDiscipline", "PunctajProgram") evaluari.3[atrib] # calcul medie si mediana punctaj global, pentru cele doua sexe evaluari.3 %>% group_by(sex) %>% dplyr::summarise( medie.punctaj = mean(PunctajProgram, na.rm = TRUE), mediana.punctaj = median(PunctajProgram, na.rm = TRUE) ) # Density plots with semi-transparent fill ggplot(evaluari.3, aes(x=PunctajProgram, fill=sex)) + geom_density(alpha=.3) + ggtitle("Punctaj compozit master,\npe genuri/sexe") ## Intrebare: ## Exista diferente semnificative intre absolventi si absolvente in ceea ce priveste ## evaluarea generala (compozita) a masterului ? # H0: Nu exista diferente semnificative intre absolventi si absolvente in # ceea ce priveste punctajul compozit acordat masterului wilcox.test(PunctajProgram ~ sex, data=evaluari.3) kruskal.test(PunctajProgram ~ sex, data=evaluari.3) # p-value = 0.2223; H0 nu este respinsa, deci, aparent, nu exista diferente wilcox_test(PunctajProgram ~ sex, alternative="two.sided", conf.int=TRUE, distribution="exact", data=evaluari.3) # Z = -1.2205, p-value = 0.2248 # 95 percent confidence interval: [-0.6666667, 0.0000000] wilcox_test(PunctajProgram ~ sex, alternative="greater", conf.int=TRUE, distribution="exact", data=evaluari.3) # p-value = 0.8886; H0 nu este respinsa, deci, aparent, nu exista diferente # effect size # Z / sqrt(nrow(evaluari.3)) 1.2205 / sqrt(nrow(evaluari.3)) # 0.1418 ####################################################################################### ### II. Evaluarea, la momentul curent (2013), a utilitatii disciplinelor ### ####################################################################################### names(evaluari) atribute = names(evaluari) atribute_moment_actual <- atribute[which(str_detect(atribute, "MomActual"))] evalUtilitActuala = subset(evaluari, , select = atribute_moment_actual) head(evalUtilitActuala) evalUtilitActuala = evalUtilitActuala[, -(1:2)] head(evalUtilitActuala) str(evalUtilitActuala) evalUtilitActuala[evalUtilitActuala == "1"] <- "foarte scฤƒzut" evalUtilitActuala[evalUtilitActuala == "2"] <- "scฤƒzut" evalUtilitActuala[evalUtilitActuala == "3"] <- "mediu" evalUtilitActuala[evalUtilitActuala == "4"] <- "bun" evalUtilitActuala[evalUtilitActuala == "5"] <- "foarte bun" for (i in 1:ncol(evalUtilitActuala)) { evalUtilitActuala[,i] = factor(evalUtilitActuala[,i], levels=niveluri, ,ordered=TRUE) } atribute = names(evalUtilitActuala) nume.noi = str_replace(atribute, 'vUtilitateMomActual', '') nume.noi = str_replace(nume.noi, '^e', '') names(evalUtilitActuala) = nume.noi l.evalUtilitActuala = likert(evalUtilitActuala) l.evalUtilitActuala summary(l.evalUtilitActuala) summary(l.evalUtilitActuala, center=2.5) plot(l.evalUtilitActuala, text.size=4) + ggtitle("Evaluare actualฤƒ a utilitฤƒศ›ii disciplinelor (domeniilor)") + theme (plot.title = element_text (colour="black", size="18"))+ theme (axis.text.y = element_text (colour="black", size="12", hjust=0))+ theme (axis.text.x = element_text (colour="black", size="10")) + theme (legend.text = element_text (colour="black", size="11")) plot(l.evalUtilitActuala, ordered=FALSE, group.order=names(evalUtilitActuala)) # specificare ordine de pe axa y plot(l.evalUtilitActuala, centered=FALSE, wrap=30) plot(l.evalUtilitActuala, center=2.5, wrap=30) plot(l.evalUtilitActuala, center=2.5, include.center=FALSE, wrap=30) plot(l.evalUtilitActuala, center=2.5, include.center=FALSE, wrap=20) plot(l.evalUtilitActuala, plot.percents=TRUE, plot.percent.low=FALSE, plot.percent.high=FALSE) # Density plot plot(l.evalUtilitActuala, type='density', facet=FALSE) # Heat map plot(l.evalUtilitActuala, type='heat', wrap=30, text.size=4.5)
a353cad0abed1a017d27fa4f017a18ab2508ee45
42d6315be4acce738f7838c5ed6ee06ed3059a43
/R/plot3.R
c3d7508546c60f9222098992adafd823515e365a
[]
no_license
cesarggtid/ExData_Plotting1
044aef8f33ba3dcb1ad7839459f9fb55fc057f8d
50bd5e35716312e44ed5c70c768b12f709918f8f
refs/heads/master
2020-12-27T09:33:38.619403
2014-10-12T20:39:32
2014-10-12T20:39:32
null
0
0
null
null
null
null
UTF-8
R
false
false
882
r
plot3.R
# Set working directory to current R script directory setwd("D:/workspace-R/coursera/") # Load R file containing the getData function source('getData.R') # Get Tidy data hpc <- getData("D:/workspace-R/coursera/data/", "household_power_consumption.txt") # Choose png graphics device and set image dimensions png(filename = "plot3.png", width = 480, height = 480) # Plot the first line graph (submetering_1) with(hpc, plot(Date.Time, Sub_metering_1, type = "l", xlab="", ylab="Energy Sub Metering")) # Add the second line graph (submetering_2) with(hpc, lines(Date.Time, Sub_metering_2, col="red")) # Add the third line graph (submetering_3) with(hpc, lines(Date.Time, Sub_metering_3, col="blue")) # Add the legend legend("topright", lty=1, col=c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), cex = 0.6) # Close graphics device dev.off()
a03d5fcf45f48a16be6f9778e5e12040bd0df47a
c71056ad296a655e334ea59206f647f3e7e4a070
/Scripts/R/Quarterly/quarterly_chol.R
b0f4919c0e7005af861705a52b6310bff74ed5b4
[]
no_license
Allisterh/VAR_SVAR-MscThesisICEF
4ed4db1a84f28d8ac40494da3c93ce6a5868865a
22220c3844e66532b12ccf5227f3b21708969083
refs/heads/main
2023-05-31T15:52:43.410287
2021-06-10T16:01:41
2021-06-10T16:01:41
null
0
0
null
null
null
null
UTF-8
R
false
false
4,731
r
quarterly_chol.R
library(seasonal) library(rio) library(lubridate) library(bvarsv) library(VARsignR) library(vars) library(svars) library(tsDyn) library(ggplot2) library(mFilter) library(BVAR) dat <- import("/Users/rutra/ะ’ะจะญ/ะœะฐะณะธัั‚ั€ะฐั‚ัƒั€ะฐ/Thesis/Data/Aggregated data/Data Quarterly.xlsx", sheet=3) dat <- na.omit(dat[dat$date > as.Date("2005-05-01"),]) dat <- dat[NROW(dat):1,] #Deseasonalization to_deseasonalize <- c("gdp_nominal_index", "imp_price_qoq", grep("cpi.*", colnames(dat), value=T)) dat_unseas <- dat for(colname in to_deseasonalize){ current_ts <- ts(dat[,colname], start=c(2005, 2), frequency=4) current_seas <- seas(current_ts, transform.function="none", #regression.aictest=NULL, outlier=NULL, #automdl=NULL, seats.noadmiss="yes") dat_unseas[,colname] <- as.numeric(final(current_seas)) } #Output gap dat_unseas$nominal_gdp_gap <- hpfilter(dat_unseas$gdp_nominal_index, freq=1600)$cycle dat_unseas$real_gdp_gap <- hpfilter(dat_unseas$gdp_real_SA_index, freq=1600)$cycle #dat_unseas$d_real_gdp_gap <- c(NA, diff(dat_unseas$real_gdp_gap)) #dat_unseas <- na.omit(dat_unseas) #dat_unseas$miacr_90 <- c(NA, diff(dat_unseas$miacr_90)) #dat_unseas <- na.omit(dat_unseas) plot(dat_unseas$date, dat_unseas$real_gdp_gap) #Cholesky decomposition data_to_model <- dat_unseas[,c( "gdp_real_SA_qoq", "oil_USD_qoq", "miacr_31", "neer_qoq", "cpi_all_qoq" )] data_chol <- data_to_model #data_chol <- dat_unseas[,c("oil_USD_qoq", "imp_price_qoq", "reserves_USD_qoq", # "miacr_31", "neer_qoq", "d_real_gdp_gap", "cpi_all_qoq")] data_chol$neer_qoq <- data_chol$neer_qoq * -1 #data_chol$real_usd_qoq <- data_chol$real_usd_qoq * -1 #data_chol$nom_usd_qoq <- data_chol$nom_usd_qoq * -1 VARselect(data_chol, lag.max=4)$selection model_VAR <- VAR(data_chol, p = 1, type = "const") choldec <- id.chol(model_VAR) irf_choldec <- irf(choldec, n.ahead = 4, ortho=TRUE) #plot(irf_choldec) #REER gives closer results #Oil sum(irf_choldec$irf$`epsilon[ oil_USD_qoq ] %->% cpi_all_qoq`) / sum(irf_choldec$irf$`epsilon[ oil_USD_qoq ] %->% neer_qoq`) #MIACR 31 sum(irf_choldec$irf$`epsilon[ miacr_31 ] %->% cpi_all_qoq`) / sum(irf_choldec$irf$`epsilon[ miacr_31 ] %->% neer_qoq`) #NEER sum(irf_choldec$irf$`epsilon[ neer_qoq ] %->% cpi_all_qoq`) / sum(irf_choldec$irf$`epsilon[ neer_qoq ] %->% neer_qoq`) #Output sum(irf_choldec$irf$`epsilon[ gdp_real_SA_qoq ] %->% cpi_all_qoq`) / sum(irf_choldec$irf$`epsilon[ gdp_real_SA_qoq ] %->% neer_qoq`) #CPI sum(irf_choldec$irf$`epsilon[ cpi_all_qoq ] %->% cpi_all_qoq`) / sum(irf_choldec$irf$`epsilon[ cpi_all_qoq ] %->% neer_qoq`) #Cholesky decomposition (dollars as exchange rate) data_chol_usd <- dat_unseas[,c("oil_USD_qoq", "imp_price_qoq", "reserves_USD_qoq", "miacr_31", "nom_usd_qoq", "gdp_real_SA_qoq", "cpi_all_qoq")] data_chol_usd$nom_usd_qoq <- data_chol_usd$nom_usd_qoq * -1 VARselect(data_chol_usd, lag.max = 4)$selection model_VAR_usd <- VAR(data_chol_usd, p = 4, type = "const") choldec_usd <- id.chol(model_VAR_usd) irf_choldec_usd <- irf(choldec_usd, n.ahead = 4, ortho=TRUE) plot(irf_choldec_usd) #Reserves sum(irf_choldec_usd$irf$`epsilon[ reserves_USD_qoq ] %->% cpi_all_qoq`) / sum(irf_choldec_usd$irf$`epsilon[ reserves_USD_qoq ] %->% nom_usd_qoq`) #Oil sum(irf_choldec_usd$irf$`epsilon[ oil_USD_qoq ] %->% cpi_all_qoq`) / sum(irf_choldec_usd$irf$`epsilon[ oil_USD_qoq ] %->% nom_usd_qoq`) #MIACR 31 sum(irf_choldec_usd$irf$`epsilon[ miacr_31 ] %->% cpi_all_qoq`) / sum(irf_choldec_usd$irf$`epsilon[ miacr_31 ] %->% nom_usd_qoq`) #NEER sum(irf_choldec_usd$irf$`epsilon[ nom_usd_qoq ] %->% cpi_all_qoq`) / sum(irf_choldec_usd$irf$`epsilon[ nom_usd_qoq ] %->% nom_usd_qoq`) #Output sum(irf_choldec_usd$irf$`epsilon[ gdp_nominal_qoq ] %->% cpi_all_qoq`) / sum(irf_choldec_usd$irf$`epsilon[ gdp_nominal_qoq ] %->% nom_usd_qoq`) #Smooth transition model_cv <- id.st(model_VAR, c_lower=-5, c_upper=5, c_step=0.02, nc=8, c_fix=25)#!!! irf_cv <- irf(model_cv, n.ahead = 4, ortho=TRUE) #plot(irf_cv) #Oil sum(irf_cv$irf$`epsilon[ oil_USD_qoq ] %->% cpi_all_qoq`) / sum(irf_cv$irf$`epsilon[ oil_USD_qoq ] %->% neer_qoq`) #Import prices sum(irf_cv$irf$`epsilon[ imp_price_qoq ] %->% cpi_all_qoq`) / sum(irf_cv$irf$`epsilon[ imp_price_qoq ] %->% neer_qoq`) #MIACR 31 sum(irf_cv$irf$`epsilon[ miacr_31 ] %->% cpi_all_qoq`) / sum(irf_cv$irf$`epsilon[ miacr_31 ] %->% neer_qoq`) #NEER sum(irf_cv$irf$`epsilon[ neer_qoq ] %->% cpi_all_qoq`) / sum(irf_cv$irf$`epsilon[ neer_qoq ] %->% neer_qoq`) #Output sum(irf_cv$irf$`epsilon[ gdp_real_SA_qoq ] %->% cpi_all_qoq`) / sum(irf_cv$irf$`epsilon[ gdp_real_SA_qoq ] %->% neer_qoq`)
91bd8e7c426d8432fa1d5e20620b6d96b3c8dbd1
178086eeb8b4158d45b705428d5bf4e3b41d6da4
/demos/nnet_demo.R
1a7335933c73e8595722dd2547272224fa071356
[]
no_license
CameronMSeibel/info201a_final_project
3a90c9a5741f2c276bb3478d65b9c9d361808e8e
50190e57408f203872b34e20162e00f72b85a54d
refs/heads/master
2020-03-17T16:02:51.955682
2018-05-31T19:37:02
2018-05-31T19:37:02
133,734,059
0
0
null
null
null
null
UTF-8
R
false
false
1,432
r
nnet_demo.R
# Cameron Seibel, INFO 201 # NNET Proof of Concept # # This file serves as a proof of concept for using the nnet package to solve classification # problems by exploring one of R's given datasets, iris. # Be sure that nnet is already installed on your machine! library(nnet) library(dplyr) # This sets the size of the hidden layer of the neural net; ultimately determines the # accuracy of classifications, but there is danger of "overfitting" training data. HIDDEN_LAYER_SIZE = 10 iris_df <- iris # Train on a sample of the iris data iris_subset <- sample_n(iris_df, 50) # Test on data not in the training set iris_test <- iris_df %>% anti_join(iris_subset) # Instantiation of the neural net; where Species is a function of the other features, # the network should be trained on the subset of training data, and the number of perceptrons # in the hidden layer is set to some value, where larger values will merit greater accuracy, # but slower performance. iris_classifier <- nnet(Species ~ ., data = iris_subset, size = HIDDEN_LAYER_SIZE) # Output the predictions for the test set to this table. predictions <- data.frame(iris_test$Species, predict(iris_classifier, iris_test, type = "class")) colnames(predictions) <- c("Species", "Prediction") n_wrong <- predictions %>% filter(Species != Prediction) %>% count() print(paste("The network was able to classify the data with", n_wrong, "percent innaccuracy."))
99ff35a84cd29d6e9125b09f844bb961e50e4db8
f17de11f2aa5ba013b442c1fff1ab794b984a792
/man/ros_ping.Rd
ef8a7acb57110cd05149bce3ee01f20a58820cca
[]
no_license
ktargows/rosette
27d136225717254919f8ad0f791ac4fe53a056b4
caea1ecd62fdef278e8927a7eafd043332e7ac19
refs/heads/master
2020-12-30T12:10:45.021008
2016-10-11T22:06:08
2016-10-11T22:06:08
null
0
0
null
null
null
null
UTF-8
R
false
true
214
rd
ros_ping.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ping.r \name{ros_ping} \alias{ros_ping} \title{Rosette API availability} \usage{ ros_ping() } \description{ Rosette API availability }
e1e982411f84b7110ebb14b285c8cddac0bb396e
a6035147e4078ec16659573ee7c7fc89f78efc3d
/R/BPbasis.R
ea86111c8978ebbb3549b3caa847ccc4b7a81069
[]
no_license
Li-Syuan/BayesBP
8b04899d22d6a73b1f10eae1d4c2b9e6c0fb4cd3
182a7eb8d5de2809c8c778bd31725051b0678ca8
refs/heads/main
2022-02-14T01:47:44.753106
2022-01-26T01:50:06
2022-01-26T01:50:06
213,273,058
1
0
null
null
null
null
UTF-8
R
false
false
1,109
r
BPbasis.R
#'Bernstein polynomial basis. #'@description This function build two dimensional Bernstein polynomial basis. #'@param ages Range of ages. #'@param years Range of years. #'@param n0 Upper bound of possion random variable. #'@param N Lower bound of possion random variable. #'@return Bernstein basis. #'@examples #'ages <- 35:85 #'years <- 1988:2007 #'list.basis <- BPbasis(ages,years,10) #'list.basis #'@family Bernstein basis #'@export BPbasis BPbasis <- function(ages, years, n0, N = 1) { x <- scale_to_01(ages) y <- scale_to_01(years) xy <- expand.grid(x, y) basis_list <- list() for (n in N:n0) { i <- j <- 0:n g <- array(0, dim = c(n + 1, n + 1, nrow(xy))) for (k in seq_len(nrow(xy))) { g[, , k] <- outer(i, j, function(i, j) { bin(n, i, xy[k, 1]) * bin(n, j, xy[k, 2]) }) } parameter <- matrix(as.vector(g), nrow = (n + 1)^2, ncol = nrow(xy)) basis_list[[length(basis_list) + 1]] <- parameter } class(basis_list) <- "BPbasis" return(basis_list) }
4a3bb2137eca361fe3d1f77c15044f1de5addb99
4f9015f385c8a02ff258414ba931952afb1e6fac
/R-libraries/spm/R/spm.remove.last.words.R
a816d0c52b416926a509d64fe0b2cf4b55d27d5d
[]
no_license
NIWAFisheriesModelling/SPM
0de0defd30ccc92b47612fa93946ef876196c238
0412f06b19973d728afb09394419df582f1ecbe4
refs/heads/master
2021-06-06T00:15:07.548068
2021-05-27T06:07:46
2021-05-27T06:07:46
21,840,937
6
3
null
null
null
null
UTF-8
R
false
false
255
r
spm.remove.last.words.R
#' utility function #' #' @author Alistair Dunn #' "spm.remove.last.words"<- function(string, words = 1) { temp <- spm.unpaste(string, sep = " ") to.drop <- length(temp) - (0:(words - 1)) paste(unlist(temp[ - to.drop]), collapse = " ") }
d21d5b3820eec1e3f37a65e144b228d0ba7c126e
305cf81d42d09a94f7948a0a47a7a8d58e03f845
/R/base_pymolr.r
5f8793b0935e1ac23719d04364b0de8911ac37fd
[]
no_license
StefansM/pymolr
8c56df35df19024c3dcce058e671faa360159f23
38e946f07f8724ded51788e62662f1598fb4c24f
refs/heads/master
2021-04-27T10:51:43.971589
2018-02-22T23:30:03
2018-02-22T23:30:03
122,548,133
2
1
null
null
null
null
UTF-8
R
false
false
2,949
r
base_pymolr.r
#' Control PyMol from R. #' #' Pymolr makes all PyMol commands available from R, and provides tools to #' manipulate on PyMol selections. #' #' Use the \code{\linkS4class{Pymol}} class to interact with Pymol and the #' \code{\linkS4class{Selection}} class to create PyMol selections. "_PACKAGE" #' Base class for PyMol connections. #' #' This base class implements all PyMol commands, but directly returns the data #' returned by PyMol. The derived class \code{\link{Pymol}} performs #' post-processing on certain methods and is the recommended interface. BasePymol <- setRefClass("BasePymol", fields=list(pid="integer", executable="character", args="character", url="character")) BasePymol$methods( initialize = function(executable=Sys.which("pymol"), show.gui=FALSE, rpc.port=9123) { "Initialise a new Pymol class." rpc.server <- system.file("extdata", "pymol_xmlrpcserver.py", package="pymolr") .self$executable <<- executable .self$args <<- c("-q", if(!show.gui) "-c", rpc.server, if(show.gui) "--rpc-bg", "--rpc-port", rpc.port) .self$url <<- paste0("http://localhost:", rpc.port, "/RPC2") # Before we start a pymol server, make sure that there is not one already # running on this port. if(tryCatch(.self$is.connected(), error=function(...) "err") != "err"){ stop(paste("A process is already running on port", rpc.port)) } .self$pid <<- sys::exec_background(.self$executable, .self$args) # Loop until the RPC server comes up. PyMol can take quite a long time to # start, so we might have to Sys.sleep() a few times until it comes up. exit.status <- NA max.tries <- 10 connection.tries <- 0 while(TRUE){ exit.status <- sys::exec_status(.self$pid, wait=FALSE) if(!is.na(exit.status) || tryCatch(.self$is.connected(), error=function(cond) FALSE) || connection.tries == max.tries) { break } Sys.sleep(1) connection.tries <- connection.tries + 1 } if(!is.na(exit.status)){ stop(paste("Unable to start PyMol process. Exit status:", exit.status)) }else if(connection.tries == max.tries){ tools::pskill(.self$pid) stop("Couldn't connect to PyMol XMLRPC server.") } }, finalize = function() { "Closes PyMol when this class is garbage collected." .self$quit() }, is.connected = function() { "Check that the PyMol server is active." .self$.rpc("ping") == "pong" }, .rpc = function(method, ...) { "Call a remote PyMol method." XMLRPC::xml.rpc(.self$url, method, ...) } )
01924e99890df83b3b26db2c0bae4e66814cef2c
8694cb6d3889d64d8b4d6977774512771002a40f
/IntroR/pollutantmean.r
5b32e03b59896534e1dfa5477696acbea88ed376
[]
no_license
jbewald/R
2abee7df1ea6447dc4981a43317d39979cc0f5e8
c838707a810995b674679c7c87f2fbba1d94491f
refs/heads/master
2021-01-10T08:55:57.045066
2016-01-29T16:32:41
2016-01-29T16:32:41
50,674,756
0
0
null
null
null
null
UTF-8
R
false
false
1,655
r
pollutantmean.r
pollutantmean <- function(directory, pollutant, id = 1:332) { ## 'directory' is a character vector of length 1 indicating ## the location of the CSV files ## 'pollutant' is a character vector of length 1 indicating ## the name of the pollutant for which we will calculate the ## mean; either "sulfate" or "nitrate". ## 'id' is an integer vector indicating the monitor ID numbers ## to be used ## Return the mean of the pollutant across all monitors list ## in the 'id' vector (ignoring NA values) ## NOTE: Do not round the result! ## Function call example: pollutantmean('c:/data/specdata', 'sulfate', 2:3) #print(id) #print (class(id)) # Get the list of files filelist <- list.files(directory, pattern='.csv', full.names = TRUE) #subset(dataset, ID == 1) #################### Create a Dataframe from all Observastions #################### for (file in filelist){ # if the merged dataset doesn't exist, create it if (!exists("dataset")){ dataset <- read.csv(file) } else { temp_dataset <-read.csv(file) dataset<-rbind(dataset, temp_dataset) rm(temp_dataset) } } #mean(dataset[1:1000,2], na.rm = TRUE) #mean(dataset[dataset$ID > 1 & dataset$ID <= 3,2], na.rm = TRUE) if (pollutant == "nitrate") {col = 3} if (pollutant == "sulfate") {col = 2} #Calculate Mean #mu <- mean(dataset[dataset$ID == id, 2], na.rm = TRUE) #sub <- dataset[dataset$ID == id, 2] #sub <- dataset[dataset$ID %in% id, col] mean(dataset[dataset$ID %in% id, col], na.rm = TRUE) }
a9aa45f2b74eddf6149a009d5c188192796da508
19ece375ca4095b5578efe5332e7f79137435b0c
/man/SNR.Rd
2458bfd72eb6a69fcd5066ab61da282e78f9bea0
[]
no_license
rscharpf/crlmmCompendium
547d80cc477b1eb29f3e7764bb37b5fe23fa83c6
1901b99953ad932c64fb12ffd80bf1e68785bb00
refs/heads/master
2020-04-08T22:57:53.965914
2012-05-03T19:02:29
2012-05-03T19:02:29
null
0
0
null
null
null
null
UTF-8
R
false
false
632
rd
SNR.Rd
\name{SNR} \alias{SNR} \docType{data} \title{ Signal to noise ratio estimated from the CRLMM algorithm. } \description{ The signal to noise ratio (SNR) is an overall measure of the separation of the genotype clusters for a particular sample. The SNR can be useful as a measure of quality control. For Affymetrix 6.0, we generally suggest dropping samples with a SNR below 5. } \usage{data(SNR)} \format{ The format is: num [1:1258] 6.6 7.05 7.83 7.35 6.47 ... } \source{ HapMap phase III } \examples{ data(SNR) \dontrun{ ##for an object of class \code{CNSet}, the SNR can be extracted by object$SNR[] } } \keyword{datasets}
c13fe031f38712d7842ef7ad9f584fac42dbada7
35de14603463a45028bd2aca76fa336c41186577
/R/find_consensus_SNPs_no_PolyFun.R
75b899460b6d167ccb4d28d387deeacb015a8a3b
[ "MIT" ]
permissive
UKDRI/echolocatoR
e3cf1d65cc7113d02b2403960d6793b9249892de
0ccf40d2f126f755074e731f82386e4e01d6f6bb
refs/heads/master
2023-07-14T21:55:27.825635
2021-08-28T17:02:33
2021-08-28T17:02:33
416,442,683
0
1
null
null
null
null
UTF-8
R
false
false
471
r
find_consensus_SNPs_no_PolyFun.R
find_consensus_SNPs_no_PolyFun <- function(finemap_dat, verbose=T){ printer("Identifying UCS and Consensus SNPs without PolyFun",v=verbose) newDF <- find_consensus_SNPs(finemap_dat, exclude_methods = "POLYFUN_SUSIE", sort_by_support = F) finemap_dat$Consensus_SNP_noPF <- newDF$Consensus_SNP finemap_dat$Support_noPF <- newDF$Support return(finemap_dat) }
768173f124ea69e4b1426eb59e150a329b3aa34c
a6ca6b4d428124461ef4184f32e35961b8a4ce9e
/02_rprogramming/assignment3/best.R
81c9608cc1705b50eb11546039a0e0fbf6b0626f
[]
no_license
ryanmcdonnell/datasciencecoursera
c81a95beb7afc05fad209dc3f1d858f489de7d4a
1d005de1d3f29a30cc31a5a0114d99cb7895a995
refs/heads/master
2016-08-07T21:51:58.802527
2015-08-10T23:08:05
2015-08-10T23:08:05
38,704,568
0
0
null
null
null
null
UTF-8
R
false
false
1,250
r
best.R
best <- function(state, outcome) { ## Read outcome data ## Check that state and outcome are valid ## Return hospital name in that state with lowest 30-day death ## rate # Validate the outcome argument validOutcomes = c("heart attack", "heart failure", "pneumonia") if(!(outcome %in% validOutcomes)) { stop("invalid outcome") } # Load outcome data data <- read.csv("outcome-of-care-measures.csv", colClasses = "character") # Reduce data to just the columns necessary and rename columns data <- data[c(2, 7, 11, 17, 23)] names(data)[1] <- "name" names(data)[2] <- "state" names(data)[3] <- "heart attack" names(data)[4] <- "heart failure" names(data)[5] <- "pneumonia" # Validate the state argument states <- unique(data$state) if(!(state %in% states)) { stop("invalid state") } # Narrow down data to state and outcome stateData <- data[data$state == state & data[outcome] != 'Not Available', ] # Coerce outcome data to numeric stateData[, outcome] <- as.numeric(stateData[, outcome]) lowest <- min(stateData[, outcome]) hospitals <- stateData[stateData[outcome] == lowest, 1] sorted <- sort(hospitals) sorted[1] }
17ce26b3c8eb6f03b3035660ffd74653c0f19002
a8c143e36e191984fca19f9f421b0231fbc7bc18
/man/pick.batch.sizes.Rd
b8c00f4f6c7d62916c4a57bb45baf591c45ce2b6
[]
no_license
bschiffthaler/BatchMap
3d74d8315dee0d5ae00413fbfed740134c9a064f
21806c09de4b8839b0625d8b73047bc57c4a02ae
refs/heads/master
2021-01-17T23:33:10.251330
2019-12-10T13:49:21
2019-12-10T13:49:21
84,226,774
3
3
null
null
null
null
UTF-8
R
false
true
1,069
rd
pick.batch.sizes.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/overlapping.batches.R \name{pick.batch.sizes} \alias{pick.batch.sizes} \title{Picking optimal batch size values} \usage{ pick.batch.sizes(input.seq, size = 50, overlap = 15, around = 5) } \arguments{ \item{input.seq}{an object of class \code{sequence}.} \item{size}{The center size around which an optimum is to be searched} \item{overlap}{The desired overlap between batches} \item{around}{The range around the center which is maximally allowed to be searched.} } \value{ An integer value for the size which most evenly divides batches. In case of ties, bigger batch sizes are preferred. } \description{ Suggest an optimal batch size value for use in \code{\link[BatchMap]{map.overlapping.batches}} } \examples{ \dontrun{ LG <- structure(list(seq.num = seq(1,800)), class = "sequence") batchsize <- pick.batch.sizes(LG, 50, 19) } } \seealso{ \code{\link[BatchMap]{map.overlapping.batches}} } \author{ Bastian Schiffthaler, \email{bastian.schiffthaler@umu.se} } \keyword{utilities}
7c000d67249182a910864f7a139a476873433fde
e158e9992ab1d0510134ecc123aac7220c612df0
/R/utilityFunctions.R
78091cb630b2a3202fdb00879392b532e19eff04
[]
no_license
grimbough/biomaRt
a7e2a88276238e71898ea70fcf5ac82e1e043afe
8626e95387ef269ccc355dbf767599c9dc4d6600
refs/heads/master
2023-08-28T13:03:08.141211
2022-11-01T14:38:39
2022-11-01T14:38:39
101,400,172
26
8
null
2022-05-17T08:10:51
2017-08-25T12:08:50
R
UTF-8
R
false
false
14,729
r
utilityFunctions.R
## sometimes results can be returned by getBM() in a different order to we ## asked for them, which messes up the column names. Here we try to match ## results to known attribute names and rename accordingly. .setResultColNames <- function(result, mart, attributes, bmHeader = FALSE) { ## get all available attributes and ## filter only for the ones we've actually asked for att <- listAttributes(mart, what = c("name", "description")) att <- att[which(att[,'name'] %in% attributes), ] if(length(which(duplicated(att[,'description']))) > length(which(duplicated(att)))) { warning("Cannot unambiguously match attribute names Ignoring bmHeader argument and using biomart description field") return(result) } resultNames = colnames(result) ## match the returned column names with the attribute names matches <- match(resultNames, att[,2], NA) if(any(is.na(matches))) { warning("Problems assigning column names.", "Currently using the biomart description field.", "You may wish to set these manually.") return(result) } ## if we want to use the attribute names we specified, do this, ## otherwise we use the header returned with the query if(!bmHeader) { colnames(result) = att[matches, 1] } ## now put things in the order we actually asked for the attributes in result <- result[, match(att[matches,1], attributes), drop=FALSE] return(result) } ## BioMart doesn't work well if the list of values provided to a filter is ## longer than 500 values. It returns only a subset of the requested data ## and does so silently! This function is designed to take a list of provided ## filters, and split any longer than 'maxChunkSize'. It operates recursively ## incase there are multiple filters that need splitting, and should ensure ## all possible groupings of filters are retained. .splitValues <- function(valuesList, maxChunkSize = 500) { vLength <- vapply(valuesList[[1]], FUN = length, FUN.VALUE = integer(1)) if(all(vLength <= maxChunkSize)) { return(valuesList) } else { ## pick the next filter to split vIdx <- min(which(vLength > maxChunkSize)) nchunks <- (vLength[vIdx] %/% maxChunkSize) + 1 splitIdx <- rep(1:nchunks, each = ceiling(vLength[vIdx] / nchunks))[ 1:vLength[vIdx] ] ## a new list we will populate with the chunks tmpList <- list() for(i in 1:nchunks) { for( j in 1:length(valuesList) ) { listIdx <- ((i - 1) * length(valuesList)) + j tmpList[[ listIdx ]] <- valuesList[[j]] tmpList[[ listIdx ]][[ vIdx ]] <- tmpList[[ listIdx ]][[ vIdx ]][which(splitIdx == i)] } } ## recursively call the function to process next filter valuesList <- .splitValues(tmpList, maxChunkSize = maxChunkSize) } return(valuesList) } ## Creating the filter XML for a single chunk of values. Returns a character ## vector containing the XML lines for all specified filters & their ## attributes spliced together into a single string. .createFilterXMLchunk <- function(filterChunk, mart) { individualFilters <- vapply(names(filterChunk), FUN = function(filter, values, mart) { ## if the filter exists and is boolean we do this if(filter %in% listFilters(mart, what = "name") && grepl('boolean', filterType(filter = filter, mart = mart)) ) { if(!is.logical(values[[filter]])) stop("'", filter, "' is a boolean filter and needs a ", "corresponding logical value of TRUE or FALSE to ", "indicate if the query should retrieve all data that ", "fulfill the boolean or alternatively that all data ", "that not fulfill the requirement should be retrieved.", call. = FALSE) val <- ifelse(values[[filter]], yes = 0, no = 1) val <- paste0("\" excluded = \"", val, "\" ") } else { ## otherwise the filter isn't boolean, or doesn't exist if(is.numeric(values[[filter]])) values[[filter]] <- as.integer(values[[filter]]) val <- paste0(values[[filter]], collapse = ",") ## convert " to ' to avoid truncating the query string val <- gsub(x = val, pattern = "\"", replacement = "'", fixed = TRUE) val <- paste0('" value = "', val, '" ') } filterXML <- paste0("<Filter name = \"", filter, val, "/>") return(filterXML) }, FUN.VALUE = character(1), filterChunk, mart, USE.NAMES = FALSE) filterXML <- paste0(individualFilters, collapse = "") return(filterXML) } .generateFilterXML <- function(filters = "", values, mart, maxChunkSize = 5000) { ## return empty string if no filter specified & this isn't ensembl ## specifying no filter is generally bad, as it will get 'everything' ## and we might encounter the time out problem if(filters[1] == "") { return("") } ## if we have multiple filters, the values must be specified as a list. if(length(filters) > 1 && class(values) != "list") { stop("If using multiple filters, the 'value' has to be a list.", "\nFor example, a valid list for 'value' could be: list(affyid=c('1939_at','1000_at'), chromosome= '16')", "\nHere we select on Affymetrix identifier and chromosome, only results that pass both filters will be returned"); } ## it's easy to not realise you're passing a data frame here, so check if(is.data.frame(values) && ncol(values == 1)) { values <- values[,1] } if(!is.list(values)){ values <- list(values) } names(values) <- filters values <- .splitValues(list(values), maxChunkSize = maxChunkSize) filterXML_list <- lapply(values, .createFilterXMLchunk, mart) return(filterXML_list) } #' it seems like pretty common practice for users to copy and paste the host #' name from a browser if they're not accessing Ensembl. Typically this will #' include the "http://" and maybe a trailing "/" and this messes up our #' paste the complete URL strategy and produces something invalid. #' This function tidies that up to catch common variants. .cleanHostURL <- function(host, warn = TRUE) { parsed_url <- httr::parse_url(host) ## just supplying 'ensembl.org' is no longer handled correctly ## stick 'www' in front if we see this if( parsed_url$path == "ensembl.org" ) { parsed_url$path = "www.ensembl.org" } ## only prepend http if needed if(is.null(parsed_url$scheme)) { parsed_url$scheme <- "http" parsed_url$hostname <- parsed_url$path parsed_url$path <- "" } ## warn about Ensembl HTTPS here - later we'll force the change if(grepl("ensembl", parsed_url$hostname) && parsed_url$scheme != "https" && warn == TRUE) { warning( "Ensembl will soon enforce the use of https.\n", "Ensure the 'host' argument includes \"https://\"", call. = FALSE) } host <- httr::build_url(parsed_url) ## strip trailing slash host <- gsub(pattern = "/$", replacement = "", x = host) return(host) } .createErrorMessage <- function( error_code, host = "" ) { ## if we encounter internal server error, suggest using a mirror if( error_code == 500) { err_msg <- 'biomaRt has encountered an unexpected server error.' } else if ( error_code == 509) { err_msg <- 'biomaRt has exceeded the bandwidth allowance with this server.' } else { err_msg <- paste0('biomaRt has encountered an unknown server error. HTTP error code: ', error_code, '\nPlease report this on the Bioconductor support site at https://support.bioconductor.org/') } if( grepl("ensembl", x = host) ) { err_msg <- c(err_msg, '\nConsider trying one of the Ensembl mirrors (for more details look at ?useEnsembl)') } return(err_msg) } .submitQueryXML <- function(host, query, httr_config) { res <- httr::POST(url = host, body = list('query' = query), config = httr_config, timeout(300)) if( httr::http_error(res) ) { err_msg <- .createErrorMessage( error_code = status_code(res), host = host ) stop(err_msg, call. = FALSE) } ## content() prints a message about encoding not being supplied ## for ensembl.org - no default, so we suppress it return( suppressMessages(content(res)) ) } #' if parsing of TSV results fails, try this .fetchHTMLresults <- function(host, query, httr_config) { query = gsub(x = query, pattern = "TSV", replacement = "HTML", fixed = TRUE) html_res <- .submitQueryXML(host, query, httr_config) XML::readHTMLTable(html_res, stringsAsFactors = FALSE)[[1]] } #' @param postRes Character vector of length 1 returned by server. We expect #' this to be a tab delimited string that comprises the whole table of results #' including column headers. .processResults <- function(postRes, mart, hostURLsep = "?", fullXmlQuery, quote = "\"", numAttributes) { ## we expect only a character vector of length 1 if(!(is.character(postRes) && (length(postRes)==1L))) { stop("The query to the BioMart webservice returned an invalid result\n", "biomaRt expected a character string of length 1.\n", "Please report this on the support site at http://support.bioconductor.org") } if(grepl(pattern = "^Query ERROR", x = postRes)) stop(postRes) ## convert the serialized table into a dataframe result <- tryCatch(read.table(text = postRes, sep="\t", header = TRUE, quote = quote, comment.char = "", stringsAsFactors = FALSE, check.names = FALSE), error = function(e) { ## if the error relates to number of element, try reading HTML version if(grepl(x = e, pattern = "line [0-9]+ did not have [0-9]+ elements")) .fetchHTMLresults(host = paste0(martHost(mart), hostURLsep), query = fullXmlQuery, httr_config = martHTTRConfig(mart)) else stop(e) } ) if(!(is(result, "data.frame") && (ncol(result) == numAttributes))) { stop("The query to the BioMart webservice returned an invalid result.\n", "The number of columns in the result table does not equal the number of attributes in the query.\n", "Please report this on the support site at http://support.bioconductor.org") } return(result) } ############################################## ## searching Attributes, Filters, and Datasets ############################################## #' given a data.frame, searches every column for #' the value in 'pattern' #' returns index of rows containing a match .searchInternal <- function(pattern, data) { colIdx <- vapply(data, FUN = stringr::str_detect, FUN.VALUE = logical(length = nrow(data)), pattern = pattern) rowIdx <- apply(colIdx, 1, any) ## return either the matching rows, or NULL if(any(rowIdx)) { return(data[rowIdx,]) } else { message('No matching datasets found') return(NULL) } } searchDatasets <- function(mart, pattern) { if(missing(mart)) stop("Argument 'mart' must be specified") if(missing(pattern)) pattern = ".*" datasets <- listDatasets(mart) res <- .searchInternal(pattern = pattern, data = datasets) if(is.null(res)) invisible(res) else res } searchAttributes <- function(mart, pattern) { if(missing(mart)) stop("Argument 'mart' must be specified") if(missing(pattern)) pattern = ".*" attributes <- listAttributes(mart) res <- .searchInternal(pattern = pattern, data = attributes) if(is.null(res)) invisible(res) else res } searchFilters <- function(mart, pattern) { if(missing(mart)) stop("Argument 'mart' must be specified") if(missing(pattern)) pattern = ".*" filters <- listFilters(mart) res <- .searchInternal(pattern = pattern, data = filters) if(is.null(res)) invisible(res) else res } ## Some filters have a predefined list of options that can be selected. ## This function lets us search those values, given a specified filter. searchFilterOptions <- function(mart, filter, pattern) { if(missing(mart)) stop("Argument 'mart' must be specified") if(missing(filter)) stop("Argument 'filter' must be specified") if(missing(pattern)) pattern = ".*" ## first get all filters & their options, then reduce to what's requested filters <- listFilters(mart, what = c("name", "options")) filters <- filters[ filters$name == filter, ] if(nrow(filters) == 0) { stop("Filter '", filter, "' not found.") } options <- gsub(filters$options, pattern = "^\\[|\\]$", replacement = "") options <- strsplit(options, split = ",", fixed = TRUE)[[1]] res <- grep(x = options, pattern = pattern, ignore.case = TRUE, value = TRUE) if(length(res) == 0) message('No matching values found') else res } searchFilterValues <- function(mart, filter, pattern) { .Deprecated(new = "listFilterOptions", msg = c("This function has been renamed searchFilterOptions()", "\nsearchFilterValues() is deprecated and will be removed in the future.")) searchFilterOptions(mart, filter, pattern = pattern) } listFilterOptions <- function(mart, filter) { searchFilterOptions(mart = mart, filter = filter) } listFilterValues <- function(mart, filter) { .Deprecated(new = "listFilterOptions", msg = c("This function has been renamed listFilterOptions()", "\nlistFilterValues() is deprecated and will be removed in the future.")) listFilterOptions(mart, filter) }
ff83ea80ff6ffdb0bee875b0068f1a6726ee6493
358fac5c4137d512c32806b994dccd62a0167352
/R/assetservers.R
957ec0ab8090545b717704f505211b421236659c
[]
no_license
paleolimbot/piplyr
b8a8bcf2bcabbfab5ae982517061eeb2ceae6fd0
87758e0390fa8d23b59c9c64c1208e917281f729
refs/heads/master
2020-08-21T08:56:14.859525
2019-10-19T19:22:06
2019-10-19T19:22:06
216,126,268
0
0
null
null
null
null
UTF-8
R
false
false
791
r
assetservers.R
#' Retrieve a list of all Asset Servers known to this service #' #' @inheritParams pi_get #' @param webId The ID of the server. #' @param ... Passed to [pi_get()] #' #' @export #' #' @references #' https://devdata.osisoft.com/piwebapi/help/controllers/assetserver/actions/list #' https://devdata.osisoft.com/piwebapi/help/controllers/assetserver/actions/get #' #' @examples #' con <- pi_connect_public() #' pi_assetserver_list(con) #' pi_assetserver(con, "F1RSIRAQC7zjPUOfBqai218IAwUElTUlYx") #' pi_assetserver_list <- function(.con, ...) { pi_get(.con, "assetservers", ...) } #' @rdname pi_assetserver_list #' @export pi_assetserver <- function(.con, webId, ...) { webId <- pi_web_id(webId) pi_get( .con, glue::glue("assetservers/{webId}"), webId = webId, ... ) }
f3b9716883e23da355271b6901b40688e0d0a70c
b3acea392da5c57fd91bd14ece661853deed1c05
/Hibridacion_monolitica/Hibridacion_monolitica.R
c73b5359608c7987bfbc36307a066450f93b07dd
[]
no_license
Artcs1/SistemasdeRecomendacion_T2
5c674c9dc590903a4ea29dd142505180f61680ea
976e099f5e656d350aba43d19abb1f7ef70dc08f
refs/heads/master
2020-03-18T12:56:32.713718
2018-06-22T04:47:37
2018-06-22T04:47:37
134,751,809
0
3
null
2018-06-03T21:14:35
2018-05-24T18:04:35
R
ISO-8859-1
R
false
false
5,238
r
Hibridacion_monolitica.R
FBC.genres.sim <- function(dataset,testset,genreset) { dataset = as.matrix(dataset) testset = as.matrix(testset) genreset = as.matrix(genreset) testset = testset[,2:3] nmovies = max(c(dataset[,2],testset[,2])) # nรบmero de filmes A = list("Animation"=1,"Adventure"=2,"Comedy"=3,"Action"=4,"Drama"=5,"Thriller"=6,"Crime"=7,"Romance"=8,"Children's"=9,"Documentary"=10,"Sci-Fi"=11,"Horror"=12,"Western"=13,"Mystery"=14,"Film-Noir"=15,"War"=16,"Musical"=17,"Fantasy"=18) genres = matrix(rep(0,18*nmovies),18,nmovies) # generos #construรงรฃo da matriz gรฉnero vs filme for(i in 1:nmovies) { g = strsplit(genreset[i,3],split = "|" , fixed = TRUE )[[1]] for(j in 1:length(g)) { value = as.numeric(A[g[j]]) genres[value,i]=1; } } #Calculo das similiraridades entre os filmes respeito a seus generos con o mรฉtodo Jaccard library(proxy) sim = simil(t(genres), method="Jaccard") sim = as.matrix(sim) sim[is.na(sim)==T] = 0 return (sim) } computeTFIDF <- function(row) { # TF - IDF df = sum(row[1:3564] > 0) w = rep(0, length(row)) w[row > 0] = (1 + log2(row[row > 0])) * log2(3564/df) return(w) } FBC.metadados.sim <- function(dataset,testset,reviewset) { dataset = as.matrix(dataset) testset = as.matrix(testset) reviewset = as.matrix(reviewset) testset = testset[,2:3] nmovies = max(c(dataset[,2],testset[,2])) # nรƒยบmero de filmes library(tm) library(SnowballC) reviews = c() for(i in 1:nmovies) { reviews = c(reviews , paste(reviewset[which(as.numeric(reviewset[,1]) == i),2],collapse = " ")) # Concatenando todos os textos } reviewList = as.list(reviews) # Transformando o vetor para uma lista nDocs = length(reviewList) reviewList = VectorSource(reviewList) corpus = Corpus(reviewList) corpus = tm_map(corpus, removePunctuation) # Tokenizaรƒยงรƒยฃo corpus = tm_map(corpus, content_transformer(tolower))# Normalizaรƒยงรƒยฃo de termos corpus = tm_map(corpus, stripWhitespace) # Normalizaรƒยงรƒยฃo de termos tdm = TermDocumentMatrix(corpus, control=list(stopwords=TRUE,stemWords=TRUE,wordLengths=c(1,15))) #Remoรƒยงรƒยฃo de stopwords #Radicalizaรƒยงรƒยฃo m = as.matrix(tdm) # matrix do vocabulario vs termos n = t(apply(m, 1, FUN=computeTFIDF)) # fazer TF - IDF n = scale(n, center=FALSE, scale=sqrt(colSums(n^2))) # normalizaรƒยงรƒยฃo sim = t(n) %*% n # Calculo das similaridades return (sim) } HM.model <- function(dataset,testset,genreset,reviewset) { users = dataset[,1] #ID dos usuรƒยกrios movies = dataset[,2] #ID dos filmes ratings = dataset[,3] # Ratings nusers = max(c(dataset[,1],testset[,2])) # nรƒยบmero de usuarios nmovies = max(c(dataset[,2],testset[,3])) # nรƒยบmero de filmes scores = matrix(rep(0,nusers*nmovies),nusers,nmovies) # interacciรƒยณn usuario filme for(i in 1:length(users)) scores[users[i],movies[i]] = ratings[i] #construรƒยงao da matriz de interaรƒยงรƒยฃo usuario filme sim.genres = FBC.genres.sim(dataset,testset,genreset) sim.metadatos = FBC.metadados.sim(dataset,testset,reviewset) sim = 0.385*sim.genres+0.615*sim.metadatos model = list(score = scores,sim = sim) return (model) } HM.predict <- function(model, user , movie, K) { simil = as.matrix(model$sim) score = as.matrix(model$score) similar.movies = order(-simil[movie,])# Ordenando as similiaridades de um filme en forma decrescente rated.movies = which(score[user,] > 0) # Escolher os items que o usuario avalio most.similar.rated = intersect(similar.movies, rated.movies)[1:min(K,length(rated.movies))] #interseรƒยงรƒยฃo #Calculo de la prediรƒยงรƒยฃo sumSim = 0 sumWeight = 0 if(is.na(most.similar.rated[1])) {#Se la interseรƒยงรƒยฃo e vacia retorna a media return (3.603814) } #Calculo de prediรƒยงรƒยฃo con os k vizinhos mais proximos for(j in most.similar.rated) { sumSim = sumSim + simil[movie, j] sumWeight = sumWeight + simil[movie, j] * score[user, j] } sumSim=sumSim+1e-12 # Em caso sumSim seja 0 return(sumWeight/sumSim) } HM.test <- function(model,testset,vizinhos,name) { testset = as.matrix(testset[,2:3]) testUser = testset[,1] #Usuarios testMovie = testset[,2] #Filmes tam = length(testUser) ids = (1:(tam)) ids = ids-1 ratings = rep(0,tam) #vetor para os ratings for ( i in 1:tam) { ratings[i] = HM.predict(model,testUser[i],testMovie[i],vizinhos) #prediรงรฃo } my.dataset <- data.frame(id = ids, rating = ratings) #Criaรงรฃo de um dataframe write.csv(my.dataset,name,row.names=F) #Exportacion } HM.pretest <- function(model,testset) { testset = as.matrix(testset[,2:4]) testUser = testset[,1] #Usuarios testMovie = testset[,2] #Filmes tam = length(testUser) ids = (1:(tam)) ids = ids-1 ratings = rep(0,tam) #vetor para os ratings error = c() for(i in 1:30) # testando os k vizinhos { for(j in 1:tam) { ratings[j] = HM.predict(model,testUser[j],testMovie[j],i) #prediรงรฃo } r = ratings; print(paste("HM_",i,sep="")) e = RMSE(r,testset[,3]) #calculo de Error print(e) error = c(error,e) # lista de errors } return (error/sum(error)) # normalizaรงรฃo }
add1d2008f6f729425bb169d467493991db2762d
84ddf0c12885c83d2c7a32791447f8a56fbb8fa2
/Data_Analysis/downloading file from url.R
5aceb08134df002c419d30691658602a353d3ed6
[]
no_license
Haineycf/R
60fb12c7c12583908663247ccd550a413cbf99f0
468a2c74649cc8a424b86061a92971a108eae5a0
refs/heads/master
2021-01-13T14:47:57.754348
2017-12-11T23:35:12
2017-12-11T23:35:12
76,572,656
0
0
null
null
null
null
UTF-8
R
false
false
221
r
downloading file from url.R
# download a file from a url pate fileUrl <- "https://data.baltimorecity.gov/api/views/dz54-2aru/rows.csv?accessType=DOWNLOAD" download.file(fileUrl, destfile = "./data/cameras.csv", method = "curl") list.files("./data")
8bda21fa5b25d154f6f812767c95994e4cc025f4
04983b845e7fbde889d642890935b8e397cc41ad
/01 carga y limpieza.R
2c71132c448786ecc9afb0bc2292050745079546
[]
no_license
pmtempone/DM_CUP
a0a30ae4d31768443cd84171caeccaf2c03345e9
47ba3b065652c4bff7cf406da71e51906d9b55ff
refs/heads/master
2021-01-19T13:47:32.960958
2017-05-14T18:54:16
2017-05-14T18:54:16
88,108,996
0
0
null
null
null
null
UTF-8
R
false
false
2,416
r
01 carga y limpieza.R
library(ggplot2) library(dplyr) library(data.table) library(dtplyr) library(funModeling) library(doMC) train <- read.delim("/Volumes/Disco_SD/Datamining Cup/DMC_2017_task/train.csv",header = TRUE,sep = "|") items <- read.delim("/Volumes/Disco_SD/Datamining Cup/DMC_2017_task/items.csv",header = TRUE,sep = "|") check.integer <- function(N){ !grepl("[^[:digit:]]", format(N, digits = 20, scientific = FALSE)) } train <- train %>% left_join(items, by = "pid") # Tabla con los NA na_count <- data.frame(sapply(train, function(y) sum(length(which(is.na(y)))))) data_profile <- df_status(train) # na_count #100.687 # CONSTRUCCIร“N DE VARIABLES #Construir units train <- train %>% mutate(units = revenue/price) #Contruir dรญa de la semana train <- train %>% mutate(dia_semana = day %% 7) #Contruir diferencia porcentual con el competidor train <- train %>% mutate(compar_compet = (competitorPrice - price) / price) #Contruir la comparaciรณn con el precio de referencia train <- train %>% mutate(compar_ref = (rrp - price) / price) train <- train %>% mutate(price_changed = !(check.integer(train$revenue/train$price))) distinct_days = train %>% distinct(day) train$orden_dia_producto <- 0 #Porcentaje de acciones que terminan en compra prop.table(table(train$order)) #Ad Flag presente significa que se vende mรกs prop.table(table(train$adFlag,train$order),1) #Availability: Availability 4 casi no se vende!!! A menor avail. mรกs se vende prop.table(table(train$availability,train$order),1) #La comparaciรณn con el precio del competidor no parece influyente prop.table(table(train$compar_compet > 0,train$order),1) #unit. Parece determinante prop.table(table(train$unit,train$order),1) #generico. Se vende mucho mรกs. prop.table(table(train$genericProduct,train$order),1) #sales index. No se quรฉ es. prop.table(table(train$salesIndex,train$order),1) #category. Es factor. Demasiados levels. train$category <- as.factor(train$category) prop.table(table(train$category,train$order),1) #Campaign index. NA se parece mucho a B. prop.table(table(train$campaignIndex,train$order),1) # Podrรญa ser significativa. prop.table(table(train$dia_semana,train$order),1) #La comparaciรณn con el precio del competidor no parece influyente prop.table(table(train$orden_dia_producto,train$order),1) #Campaign index. NA se parece mucho a B. prop.table(table(train$campaignIndex,train$order),1)
f844c116bdf377d9221b96dd66ca3fc8bb49f167
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/FateID/examples/reclassify.Rd.R
846f2cfa22078363eb883e2d0a2c858436b4766a
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
258
r
reclassify.Rd.R
library(FateID) ### Name: reclassify ### Title: Reclassification of cells ### Aliases: reclassify ### ** Examples x <- intestine$x y <- intestine$y tar <- c(6,9,13) rc <- reclassify(x,y,tar,z=NULL,nbfactor=5,use.dist=FALSE,seed=NULL,nbtree=NULL,q=.9)
4bde17e4d3eb3849b774254859dd06a45245165b
d3681eaac1eeb5dea3f30aabeb21029d7ad8feaf
/inst/doc/examples/ssqtest.R
e75f009aa24eb5596f18bf19037774b60358bcc5
[]
no_license
cran/optimx
01f3b3546a1c4f0780063d8f541d8f2680830f9e
675f08649655da273365f1efcc0996b39dc8bb46
refs/heads/master
2023-08-17T00:12:18.490641
2023-08-14T07:10:12
2023-08-14T08:30:59
17,698,097
3
7
null
null
null
null
UTF-8
R
false
false
665
r
ssqtest.R
# ssqbtest.R -- a simple sum of squares showing differences between # opm() and optimx() ## author: John C. Nash rm(list=ls()) require(optimx) sessionInfo() ssqb.f<-function(x){ nn<-length(x) yy <- 1:nn f<-sum((yy-x)^2) f } ssqb.g <- function(x){ nn<-length(x) yy<-1:nn gg<- 2*(x - yy) } ssqb.h <- function(x){ nn<-length(x) hh<- 2*diag(nn) } xx <- rep(pi, 4) all4b <- opm(xx, ssqb.f, ssqb.g, hess=ssqb.h, method="ALL") summary(all4b, order=value) all4bx <- optimx(xx, ssqb.f, ssqb.g, control=list(all.methods=TRUE)) summary(all4bx, order=value) cat("\n\nShow structure differences in solution of opm and optimx\n\n") str(all4b) str(all4bx)
841f0a19e56b7d7335dfb2c81e84b1f773dbc862
2802979852f33dc4336c0e0fbc6a601a928efc5e
/R/cutoffs.R
0becf27bef4f1b298381456d8fe2c2497bc785bc
[]
no_license
cran/netgwas
05ee21591f4bc89b295b4d7d6754ec9fb5cc7225
e661e37640b335d4fa515f03411e08bb12b795fa
refs/heads/master
2023-08-31T22:02:45.223899
2023-08-07T14:40:02
2023-08-07T16:35:15
112,773,132
3
2
null
null
null
null
UTF-8
R
false
false
804
r
cutoffs.R
#-------------------------------------------------------------------------------# # Package: Network-Based Genome-Wide Association Studies # # Author: Pariya Behrouzi # # Emails: <pariya.Behrouzi@gmail.com> # # Date: Nov 21th 2017 # #-------------------------------------------------------------------------------# cutoffs = function(y){ p<-ncol(y) n<-nrow(y) k<-unique(sort(unlist(y))) n.levels<-length(k) q<-matrix(nrow=p,ncol=n.levels) for(i in 1:p){ X=factor(y[,i],levels=k) No<-tabulate(X, nbins=n.levels) q[i,]<-qnorm(cumsum(No)/n) } q[ ,n.levels] <- Inf q<-cbind(-Inf,q) return(q) }
a594795b8d3389bdb13c9367c70b1cb95fcbfcc5
3134e07d1cf55e5ee931f036c67c8749e9682428
/R/camel.tiger.clime.mfista.R
0655813888e841232a0f2717114b7d64be9ecdb7
[]
no_license
cran/camel
462474e8df3bde111fae225d4067741ab852e063
1b480116cd340bfad76b5a411c05a02acc1a3574
refs/heads/master
2020-12-24T12:02:01.521103
2013-09-09T00:00:00
2013-09-09T00:00:00
17,694,927
1
0
null
null
null
null
UTF-8
R
false
false
2,223
r
camel.tiger.clime.mfista.R
#----------------------------------------------------------------------------------# # Package: camel # # camel.tiger.clime.hadm(): Coordinate descent method for sparse precision matrix # # estimation # # Author: Xingguo Li # # Email: <xingguo.leo@gmail.com> # # Date: Aug 23th, 2013 # # Version: 0.1.0 # #----------------------------------------------------------------------------------# camel.tiger.clime.mfista <- function(Sigma, d, maxdf, mu, lambda, shrink, prec, max.ite){ d_sq = d^2 Y = diag(d) # lambda = lambda-shrink*prec nlambda = length(lambda) L = eigen(Sigma)$values[1]^2 icov = array(0,dim=c(d,d,nlambda)) ite.ext = rep(0,d*nlambda) obj = array(0,dim=c(max.ite,nlambda)) runt = array(0,dim=c(max.ite,nlambda)) x = array(0,dim=c(d,maxdf,nlambda)) col_cnz = rep(0,d+1) row_idx = rep(0,d*maxdf*nlambda) begt=Sys.time() str=.C("tiger_clime_mfista", as.double(Y), as.double(Sigma), as.double(icov), as.integer(d), as.double(mu), as.integer(ite.ext), as.double(lambda), as.integer(nlambda), as.integer(max.ite), as.double(prec), as.double(L), as.double(x), as.integer(col_cnz), as.integer(row_idx),PACKAGE="camel") runt1=Sys.time()-begt ite.ext = matrix(unlist(str[6]), byrow = FALSE, ncol = nlambda) obj = 0 icov_list = vector("list", nlambda) icov_list1 = vector("list", nlambda) for(i in 1:nlambda){ icov_i = matrix(unlist(str[3])[((i-1)*d_sq+1):(i*d_sq)], byrow = FALSE, ncol = d) icov_list1[[i]] = icov_i icov_list[[i]] = icov_i*(abs(icov_i)<=abs(t(icov_i)))+t(icov_i)*(abs(t(icov_i))<abs(icov_i)) obj[i] = sum(abs(icov_i)) } x = unlist(str[12]) col_cnz = unlist(str[13]) row_idx = unlist(str[14]) return(list(icov=icov_list, icov1=icov_list1,ite=ite.ext, obj=obj,runt=runt1, x=x, col_cnz=col_cnz, row_idx=row_idx)) }
a14c3126a4d41f132360fa1fda2ba8ccbad17140
16b0ef0cfdebe10c0c37207f75c1c457dd452290
/man/iascagpcp.Rd
08f34c376cbd749e3959196876e188d664a14110
[]
no_license
cran/mbgraphic
ad2c5b907e3dd4c026d6fff41d16212ca05f5f52
15acaaa26de781203c6e80c53d2ae8d4d83fdb57
refs/heads/master
2020-12-30T15:42:12.746005
2019-04-28T18:20:03
2019-04-28T18:20:03
91,166,050
0
0
null
null
null
null
UTF-8
R
false
false
1,486
rd
iascagpcp.Rd
\name{iascagpcp} \alias{iascagpcp} \title{ Interactive parallel coordinate plots for exploring scagnostics results } \description{ An interactive parallel coordinate plot for exploring scagnostics results programmed with the package \pkg{shiny}. If \code{sdfdata} is generated by function \code{\link{sdf}}, \emph{Outliers} and \emph{Exemplars} can be explored separately. Selections within the parallel coordinate plot can be made by drawing boxes on the axes around the chosen line. } \usage{ iascagpcp(sdfdata) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{sdfdata}{ A list of class \code{"sdfdata"}. } } \details{ For scaling the three options 'std' (every scagnostic individually by subtraction of mean and division by standard deviation), 'uniminmax' (every scagnostic individually to unit interval) and 'globalminmax' (no scaling) can be used. See also \code{\link[GGally]{ggparcoord}}. } \value{ A shiny app object. } \references{ W. Chang, J. Cheng, J. Allaire, Y. Xie and J. McPherson (2016) shiny: Web Application Framework for R. \url{https://cran.r-project.org/package=shiny}. B. Schloerke et al. (2016) GGally: Extension to ggplot2. \url{https://cran.r-project.org/package=GGally} } \author{ Katrin Grimm } \seealso{ \code{\link{sdf}}, \code{\link{scag2sdf}} } \examples{ \dontrun{ data(Election2005) # some demographic/economic variables sdfdata <- sdf(Election2005[,5:40]) iascagpcp(sdfdata) } } \keyword{interactive apps}
22cb5863c752aa3b02ccd130d861634e56ededd4
3f887566cd63a82f13d2aae590cdea252f2532db
/backup/HotelR.R
3c9a4c299266cd28d558b4a2d9fa3318bfc6f1e9
[]
no_license
seaireal/hotel_rating
b4a9a7bbdee0b76caad3b4d6fd29a71cf9eba439
3e6ad9de14d05dfe0870f256c3c22865a497d585
refs/heads/master
2020-08-09T11:56:24.236767
2019-12-30T17:05:10
2019-12-30T17:05:10
214,081,977
0
0
null
null
null
null
UTF-8
R
false
false
879
r
HotelR.R
# set working directory setwd("/Users/huiwang/Downloads") HotelReviews = read.csv('Hotel_Reviews.csv') # choose and rename the predictors HotelR = cbind(HotelReviews[,c(2,3,4,5,8,9,11,12,13,15,16,17)]) names(HotelR) = c("addscore", "date", "score", "hotel", "negative", "review", "positive", "reviewers", "avescore", "day", "lat", "lng") # deal with the last predictor to take it numeric without "days" HotelR$day=as.numeric(HotelR$day) head(HotelR) # missing data missing = is.na(HotelR) sum(missing) dim(HotelR) str(HotelR) # Average the observations based on the hotel name Hotel = aggregate(cbind(addscore, score, negative, review, positive, reviewers, avescore, day, lat, lng) ~ hotel, HotelR, mean) head(Hotel) # try regression~ lmod = lm(score ~ addscore + negative + review + positive + reviewers + day + lat + lng, Hotel) summary(lmod)
590b533ed61bf538e79ede177fa5dad0cf5f5e06
b0f7d5d2489e761646c6363d2958e9d3a1b75747
/Analytics Edge/NUnit5_Assignment3.R
3d9cef5a6fc89af3c480882ea73d789400487be9
[]
no_license
krishnakalyan3/Edge
4cd5cb55677ed662dda4d4acdf7fba4c7e813735
ad070911bd36c26ff7c612b4adc4150a53579676
refs/heads/master
2021-01-21T04:46:50.714837
2016-07-03T10:48:34
2016-07-03T10:48:34
53,034,825
0
0
null
null
null
null
UTF-8
R
false
false
4,597
r
NUnit5_Assignment3.R
getwd() setwd("/Users/krishna/MOOC/Edge/Data") email = read.csv("emails.csv",stringsAsFactors=FALSE) # Problem 1.1 - Loading the Dataset str(email) # 'data.frame': 5728 obs. of 2 variables: # Problem 1.2 - Loading the Dataset sum(email$spam) # 1368 # Problem 1.3 - Loading the Dataset # subject # Problem 1.4 - Loading the Dataset # Yes # Problem 1.5 - Loading the Dataset max(nchar(email$text)) # Problem 1.6 - Loading the Dataset which.min(nchar(email$text)) # Problem 2.1 - Preparing the Corpus sparse = 1 - 0.05 corpus = Corpus(VectorSource(email$text)) corpus = tm_map(corpus , tolower) corpus = tm_map(corpus, PlainTextDocument) corpus = tm_map(corpus, removePunctuation) corpus = tm_map(corpus, removeWords, stopwords('english')) corpus = tm_map(corpus, stemDocument, language = 'english') dtm = DocumentTermMatrix(corpus) spdtm = removeSparseTerms(dtm, sparse) spdtm # 28687 # Problem 2.2 - Preparing the Corpus # 330 # Problem 2.3 - Preparing the Corpus emailsSparse = as.data.frame(as.matrix(spdtm)) colnames(emailsSparse) = make.names(colnames(emailsSparse)) which.max(colSums(emailsSparse)) # # enron # 92 # Problem 2.4 - Preparing the Corpus emailsSparse$spam = email$spam ham = subset(emailsSparse,spam ==0) sort(colSums(ham)) # 6 # Problem 2.5 - Preparing the Corpus spam = subset(emailsSparse,spam ==1) (sort(colSums(spam)) ) #3 # Problem 3.1 - Building machine learning models emailsSparse$spam = as.factor(emailsSparse$spam) library(caTools) set.seed(123) spl = sample.split(emailsSparse$spam, 0.7) train = subset(emailsSparse, spl ==T) test = subset(emailsSparse, spl ==F) spamLog = glm(spam ~ . , data = train , family = "binomial") spamCART = rpart(spam ~ . , data = train, method="class") library(randomForest) spamRF = randomForest(spam ~ . , data = train) predlog = predict(spamLog) predlog predCART = predict(spamCART) predCART predRF = predict(spamRF,train) predRF table(predlog < 0.00001) table(predlog > 0.99999) table(predlog >= 0.00001 & predlog <= 0.99999) # Problem 3.2 - Building Machine Learning Models summary(spamLog) # 0 # Problem 3.3 - Building Machine Learning Models prp(spamCART) # 2 # Problem 3.4 - Building Machine Learning Models cm =table(train$spam,predlog>=0.5) TN = cm[1,1] TP = cm[2,2] FN = cm[2,1] FP = cm[1,2] Acc = (TP + TN)/sum(cm) Acc # 0.9990025 # Problem 3.5 - Building Machine Learning Models library(ROCR) predROCR = prediction(predlog,train$spam) prefROCR = performance(predROCR,"tpr","fpr") plot(prefROCR, colorize = T ) performance(predROCR,"auc")@y.values # 0.9999959 # Problem 3.6 - Building Machine Learning Models cm =table(train$spam,predCART[,2]>=0.5) TN = cm[1,1] TP = cm[2,2] FN = cm[2,1] FP = cm[1,2] Acc = (TP + TN)/sum(cm) Acc # 0.942394 # Problem 3.7 - Building Machine Learning Models library(ROCR) predROCR = prediction(predCART[,2],train$spam) prefROCR = performance(predROCR,"tpr","fpr") plot(prefROCR, colorize = T ) performance(predROCR,"auc")@y.values # Problem 3.8 - Building Machine Learning Models cm =table(train$spam,predRF>=0.5) TN = cm[1,1] TP = cm[2,2] FN = cm[2,1] FP = cm[1,2] Acc = (TP + TN)/sum(cm) Acc # Problem 3.9 - Building Machine Learning Models library(ROCR) predROCR = prediction(predRF,train$spam) prefROCR = performance(predROCR,"tpr","fpr") plot(prefROCR, colorize = T ) performance(predROCR,"auc")@y.values # 0.9999928 # Problem 4.1 - Evaluating on the Test Set cm =table(test$spam, predict(spamLog, test) >=0.5) TN = cm[1,1] TP = cm[2,2] FN = cm[2,1] FP = cm[1,2] Acc = (TP + TN)/sum(cm) Acc # 0.9511059 # What is the testing set AUC of spamLog? predROCR = prediction(predict(spamLog, test),test$spam) prefROCR = performance(predROCR,"tpr","fpr") plot(prefROCR, colorize = T ) performance(predROCR,"auc")@y.values # 0.9767994 # Problem 4.3 - Evaluating on the Test Set cm =table(test$spam, predict(spamCART, test)[,2] >=0.5) TN = cm[1,1] TP = cm[2,2] FN = cm[2,1] FP = cm[1,2] Acc = (TP + TN)/sum(cm) Acc # 0.9394645 # Problem 4.4 - Evaluating on the Test Set predROCR = prediction(predict(spamCART, test)[,2],test$spam) prefROCR = performance(predROCR,"tpr","fpr") plot(prefROCR, colorize = T ) performance(predROCR,"auc")@y.values # Problem 4.5 - Evaluating on the Test Set cm =table(test$spam, predict(spamRF, test) >=0.5) TN = cm[1,1] TP = cm[2,2] FN = cm[2,1] FP = cm[1,2] Acc = (TP + TN)/sum(cm) Acc # 0.9743888 # Problem 4.6 - Evaluating on the Test Set predROCR = prediction(predict(spamRF, test),test$spam) prefROCR = performance(predROCR,"tpr","fpr") plot(prefROCR, colorize = T ) performance(predROCR,"auc")@y.values
64c909034486b62c9ff11fad2c241fd9a3159dbd
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/metafor/examples/dat.pritz1997.Rd.R
0bbdf83491659ce9aaad1b3bcced53a569f79e1f
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
1,242
r
dat.pritz1997.Rd.R
library(metafor) ### Name: dat.pritz1997 ### Title: Studies on the Effectiveness of Hyperdynamic Therapy for ### Treating Cerebral Vasospasm ### Aliases: dat.pritz1997 ### Keywords: datasets ### ** Examples ### load data dat <- get(data(dat.pritz1997)) ### computation of "weighted average" in Zhou et al. (1999), Table IV dat <- escalc(measure="PR", xi=xi, ni=ni, data=dat, add=0) theta.hat <- sum(dat$ni * dat$yi) / sum(dat$ni) se.theta.hat <- sqrt(sum(dat$ni^2 * dat$vi) / sum(dat$ni)^2) ci.lb <- theta.hat - 1.96*se.theta.hat ci.ub <- theta.hat + 1.96*se.theta.hat round(c(estimate = theta.hat, se = se.theta.hat, ci.lb = ci.lb, ci.ub = ci.ub), 4) ### this is identical to a FE model with sample size weights rma(yi, vi, weights=ni, method="FE", data=dat) ### random-effects model with raw proportions dat <- escalc(measure="PR", xi=xi, ni=ni, data=dat) res <- rma(yi, vi, data=dat) predict(res) ### random-effects model with logit transformed proportions dat <- escalc(measure="PLO", xi=xi, ni=ni, data=dat) res <- rma(yi, vi, data=dat) predict(res, transf=transf.ilogit) ### mixed-effects logistic regression model res <- rma.glmm(measure="PLO", xi=xi, ni=ni, data=dat) predict(res, transf=transf.ilogit)
a21ac82d8b3a6842998aed97e7384d0ae408a0b8
dbff7481385f4c5e7ae9dbb726835de7970bb22c
/R/daqDarkThemeProvider.R
36055f477cca785e4e4c38a9ab8f959b847f2210
[ "MIT" ]
permissive
emilhe/dash-daq
f2056ef37eab04ba39c954c35faf96da2b7ebe88
34140134f740e5f5186af8e4727de9a5a0cbe917
refs/heads/master
2022-12-03T15:50:19.026762
2020-08-27T06:59:42
2020-08-27T06:59:42
288,721,296
0
0
MIT
2020-08-19T12:06:54
2020-08-19T12:06:53
null
UTF-8
R
false
false
510
r
daqDarkThemeProvider.R
# AUTO GENERATED FILE - DO NOT EDIT daqDarkThemeProvider <- function(children=NULL, theme=NULL) { props <- list(children=children, theme=theme) if (length(props) > 0) { props <- props[!vapply(props, is.null, logical(1))] } component <- list( props = props, type = 'DarkThemeProvider', namespace = 'dash_daq', propNames = c('children', 'theme'), package = 'dashDaq' ) structure(component, class = c('dash_component', 'list')) }
68a93ee5a6390ff4cda4fd1b0bc0aeb6de1a2b6e
242730ec15df34c6d3b4850831c44b5bfa13f3ec
/demos/2020-10-30.R
c8bd1054a8dd80069172a2234782644595524b0c
[]
no_license
tdhock/cs499-599-fall-2020
bcf4d81e96b2685d7fcc22e30c9f95eee4ea959e
b270b5f94a3e36c57313d392176a1b6de455b82a
refs/heads/master
2023-07-10T12:12:41.320347
2021-08-18T18:07:16
2021-08-18T18:07:16
255,656,952
1
5
null
null
null
null
UTF-8
R
false
false
5,204
r
2020-10-30.R
loss.dt.list <- list() change.dt.list <- list() ## penalized model selection / evaluation with Fpsn (dynamic ## programming). data(neuroblastoma, package="neuroblastoma") library(data.table) all.profiles <- data.table(neuroblastoma$profiles) all.labels <- data.table(neuroblastoma$annotations) label.counts <- all.labels[, .( positive.labels=sum(annotation=="breakpoint"), negative.labels=sum(annotation=="normal") ), by=.(profile.id)] label.counts[positive.labels==3 & negative.labels==3] profile.id.show <- "2" labels.show <- all.labels[profile.id==profile.id.show] profiles.show <- all.profiles[ profile.id==profile.id.show & chromosome %in% labels.show$chromosome] label.i.todo <- 1:nrow(labels.show) label.i.done <- as.integer(unique(sub(" .*", "", names(loss.dt.list)))) label.i.new <- label.i.todo[! label.i.todo %in% label.i.done] max.segments <- 10 for(label.i in label.i.new){ cat(sprintf("label.i=%d\n", label.i)) one.label <- labels.show[label.i] select.dt <- one.label[, .(profile.id, chromosome)] pro.dt <- all.profiles[select.dt, on=names(select.dt)] ## Code from demos about dynamic programming for changepoint ## detection (2020-10-16). this.max <- if(nrow(pro.dt) < max.segments){ nrow(pro.dt) }else{ max.segments } optimal.models <- jointseg::Fpsn(pro.dt$logratio, this.max) segs.dt.list <- list() for(n.segs in 1:this.max){ end <- optimal.models$t.est[n.segs, 1:n.segs] start <- c(1, end[-length(end)]+1) segs.dt.list[[paste(n.segs)]] <- data.table(start, end)[, .( segments=n.segs, mean=mean(pro.dt$logratio[start:end]), algorithm="DP" ), by=.(start, end)] } segs.dt <- do.call(rbind, segs.dt.list) for(col.name in c("start", "end")){ col.value <- segs.dt[[col.name]] set(segs.dt, j=paste0(col.name, ".pos"), value=pro.dt$position[col.value]) } segs.dt[, end.before := c(NA, end.pos[-.N]), by=.(segments) ] change.dt <- data.table(select.dt, segs.dt[1 < start]) change.dt[, changepoint := (start.pos+end.before)/2] this.loss.dt <- data.table( segments=1:this.max, loss=optimal.models$J.est) penalty <- 0.12 this.loss.dt[, crit.value := loss + penalty*segments] loss.dt.list[[paste(label.i)]] <- data.table( select.dt, this.loss.dt) change.dt.list[[paste(label.i)]] <- change.dt[, data.table( select.dt, changepoint, segments)] } change.dt <- do.call(rbind, change.dt.list) loss.dt <- do.call(rbind, loss.dt.list) ## Compute model selection function, which maps penalty (lambda) ## values to model complexity (segments) values. all.model.selection <- loss.dt[, penaltyLearning::modelSelection( .SD, "loss", "segments"), by=.(profile.id, chromosome)] pred.penalty.dt <- loss.dt[, data.table( pred.log.lambda=log(10) ), by=.(profile.id, chromosome)] ## Compute label error, fp/fn for each selected model. error.list <- penaltyLearning::labelError( models=all.model.selection, labels=labels.show, changes=change.dt, problem.vars=c("profile.id", "chromosome"), change.var="changepoint", model.vars="segments") error.list$model.errors[, .( profile.id, chromosome, min.lambda, max.lambda, segments, fp, fn)] ## Compute ROC curve, FPR/TPR for every prediction threshold (default ## prediction threshold is zero). roc.list <- penaltyLearning::ROChange( error.list$model.errors, pred.penalty.dt, problem.vars=c("profile.id", "chromosome")) roc.list$roc[, .(min.thresh, max.thresh, FPR, TPR, errors)] ## Visualize ROC curve. library(animint2) ggplot()+ geom_path(aes( FPR, TPR), data=roc.list$roc) ggplot()+ geom_point(aes( position, logratio), data=profiles.show)+ geom_rect(aes( xmin=min, xmax=max, fill=annotation), ymin=-Inf, ymax=Inf, alpha=0.5, data=labels.show)+ facet_grid(chromosome ~ .) show.changes.list <- list() show.roc.list <- list() thresh.vec <- roc.list$roc[, seq(min(max.thresh), max(min.thresh), l=50) ] for(thresh.i in seq_along(thresh.vec)){ thresh <- thresh.vec[[thresh.i]] show.roc.list[[paste(thresh.i)]] <- data.table( thresh.i, roc.list$roc[min.thresh <= thresh & thresh < max.thresh]) pred.penalty.dt[, new.log.lambda := pred.log.lambda + thresh] thresh.selected <- all.model.selection[pred.penalty.dt, on=.( profile.id, chromosome, min.log.lambda <= new.log.lambda, max.log.lambda > new.log.lambda)] thresh.changes <- change.dt[ thresh.selected, nomatch=0L, on=.(profile.id, chromosome, segments)] if(nrow(thresh.changes)){ show.changes.list[[paste(thresh.i)]] <- data.table( thresh.i, thresh.changes) } } show.changes <- do.call(rbind, show.changes.list) show.roc <- do.call(rbind, show.roc.list) animint( ggplot()+ geom_path(aes( FPR, TPR), data=roc.list$roc)+ geom_point(aes( FPR, TPR), clickSelects="thresh.i", size=5, alpha=0.55, data=show.roc), ggplot()+ geom_point(aes( position, logratio), data=profiles.show)+ geom_tallrect(aes( xmin=min, xmax=max, fill=annotation), alpha=0.5, data=labels.show)+ geom_vline(aes( xintercept=changepoint), showSelected="thresh.i", data=show.changes)+ facet_grid(chromosome ~ .) )
9cffae9d756afbbb9cc163d55f5e2e2e605cba8a
f78cd948863b0d44fb817d10e28c52a885d425e5
/ValoresHorariosCompletosMeteorologicosAemet.R
8e3250b8982616806da7b1e5bdf73f02f6185aa7
[]
no_license
GuilleHM/TFM_GuillermoHuerta_MasterBigData_2019_UNIR
6044fdc94c32f01559b9ce1b64dac86c79b6172a
6126784ba90740eded8711bb22517463bc2eeb28
refs/heads/master
2020-07-10T17:02:08.893045
2019-09-15T11:08:40
2019-09-15T11:08:40
204,317,561
0
0
null
null
null
null
ISO-8859-1
R
false
false
4,031
r
ValoresHorariosCompletosMeteorologicosAemet.R
# Este "script" sirve para complementar el archivo csv exportado desde la # colecciรณn "meteorologicos" de la BBDD "aemet" de nuestro servidor mongodb. # En esta colecciรณn se encuentran los valores meteorologicos horarios, obtenidos # mediante llamada a la API OpenData de Aemet, para todas las estaciones meteorolรณgicas # que forman parte de la red de la Agencia Estatal de Meteorologรญa. # Es necesario complementar el archivo csv que exportamos desde mongodb ya que faltan # algunos registros temporales y es necesario que estรฉn todos para poder realizar # la correlaciรณn de dichos valores con los ofrecidos por la estaciรณn FROGGIT, para # asรญ garantizar la bondad de las medidas de รฉsta รบltima. # Utilizamos los valores de la estaciรณn con "idema":"5972X" (SAN FERNANDO), ya que # es la que se ecuentra mรกs prรณxima a la estaciรณn FROGGIT. No obstante, podrรญamos # emplear los registros de cualquier estaciรณn en este "script". # NOTA IMPORTANTE: El archivo tiene que estar codificado en formato ANSI # Establecemos el directorio de trabajo setwd("C:\\Users\\GuilleHM\\TFM\\OpendataAEMET") # Cargamos el paquete que necesitaremos para manipular las fechas library(lubridate) # Cargamos los datos provenientes de la colecciรณn aemetorigin <- read.csv(file = "mongoexport_meteoro_5972X_VvFint_may19.csv", header = TRUE, sep = ",", dec = ".") # Definimos e inicializamos las variables con las que trabajaremos # ---------------------------------------------------------------- # Obtenemos la fecha de inicio (los valores en el csv son para cada mes) Aรฑo <- substring(as.character(aemetorigin$fint[1]),1,4) Mes <- substring(as.character(aemetorigin$fint[1]),6,7) Dรญa <- substring(as.character(aemetorigin$fint[1]),9,10) # Valor que servirรก para comprobar si falta un registro FechaControl <- ISOdate(Aรฑo,Mes,Dรญa,00,00,00, tz="GMT") # Formateamos para poder comparar con FechaControl FechaOriginal <- ymd_hms(aemetorigin$fint, tz ="GMT") # Creamos la tabla final sobre la que realizaremos el anรกlisis # Damos a las columnas los mismos valores que los de la tabla final para los # valores de la estaciรณn meteorolรณgica Froggit. aemetfinal <- data.frame(FechaFinal = FechaOriginal[1], VelFinal = aemetorigin$vv[1]) # n -> Cuenta los registros horarios no existentes en el archivo original de aemet # m -> Puntero para movernos por la tabla aemet final # longitud -> Nรบmero de campos existentes en el archivo original de aemet n <- m <- 0 longitud <- length(FechaOriginal) # ---------------------------------------------------------------- # Recorremos la tabla original e incluimos en la tabla final los registros temporales # que falten, con un valor NA para la velocidad del viento for (i in 1:longitud){ if (i == 1) { FechaControl <- FechaControl + hours(1) next } if (FechaControl != FechaOriginal[i-n]){ n <- n + 1 aemetfinal <- rbind(aemetfinal, list(FechaControl, NA)) } else{ aemetfinal <- rbind(aemetfinal, list(FechaControl, aemetorigin$vv[i-n])) } FechaControl <- FechaControl + hours(1) } # Actualizamos m con la posiciรณn desde donde continuar insertando campos m <- longitud - n + 1 # Repetimos las inserciones hasta que no quede ningรบn registro sin incorporar # a la tabla final de valores de aemet while (n != 0) { temp <- n for (i in 1:n){ if (i == 1) { n <- 0 } if (FechaControl != FechaOriginal[m]){ aemetfinal <- rbind(aemetfinal, list(FechaControl, NA)) n <- n + 1 } else{ aemetfinal <- rbind(aemetfinal, list(FechaControl, aemetorigin$vv[m])) m <- m + 1 } FechaControl <- FechaControl + hours(1) } longitud <- longitud + temp } # Guardamos los valores en el archivo que emplearemos para los anรกlisis write.csv(aemetfinal, file = "SalidaScriptValoresHorariosCompletosAemetMay.csv", row.names = FALSE) # Limpiamos el entorno borrando todas las variables rm(list=ls())
23a0d392cdf10dd070ea0a5ff2e8c10c6bc38b39
e279d4de1f3d9be6fcdd688f375e59535a44d18e
/cachematrix.R
60146d63b9753acad04883d6208d3fb3d1060066
[]
no_license
eniedling/ProgrammingAssignment2
4dbb0d96f5d6696a4ed09ed2a2af42a2f219dff6
92e21ac778b2ce9f91f510a6d99be09b1eeef7ec
refs/heads/master
2021-01-09T06:44:09.437791
2014-09-21T14:48:27
2014-09-21T14:48:27
null
0
0
null
null
null
null
UTF-8
R
false
false
1,605
r
cachematrix.R
## Put comments here that give an overall description of what your ## functions do ## Write a short comment describing this function makeCacheMatrix <- function(x = matrix()) { # initialize inverse matrix object inv <- NULL # define set function, which stores matrix internally AND resets inverse set <- function(y) { x <<- y # any change to the matrix x, which is updated into the matrix object via $set(m) # also leads to a reset of the inverse matrix inv <<- NULL } get <- function() x # store value of solve in object inv setinverse <- function(solve) inv <<- solve # returns value of inverse getinverse <- function() inv # defines matrix object as type list: try # > mo <- makeCacheMatrix( matrix(1:4, nrow=2,ncol=2)) # > class(mo) list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## Write a short comment describing this function # determines the inverse of a matrix via the solve function cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' # get the inverse calculated earlier, reading from cache inv <- x$getinverse() # check if Inv-object is not NULL/empty, and return result if(!is.null(inv)) { message("getting cached data") return(inv) } # inv-object is NULL, hence the following code is executed to solve matrix for inverse # 1) get the intial matrix from cache data <- x$get() # 2) determine the inverse via the solve function inv <- solve(data, ...) # 3) store result in cache with the setinverse function x$setinverse(inv) # 4) output result inv }
0354759d1ee2df3184f70f4c9e6becc61b33f681
f28e53f5f9ad06bf51b69f45fd1425cf02c1709b
/man/granplot.Rd
2bc7d20aead8b881ab26b4334b4b8509f361b789
[]
no_license
gallonr/G2Sd
f42ac156bdab5dc1ded6b82563d864bd92245092
31325d40d14e2df08319ac66ddd9aed7c1f1686b
refs/heads/master
2023-07-04T23:08:17.638661
2020-03-06T16:43:48
2020-03-06T16:43:48
170,348,255
4
2
null
2021-08-25T13:05:39
2019-02-12T16:07:25
R
UTF-8
R
false
false
1,893
rd
granplot.Rd
\encoding{UTF8} \name{granplot} \alias{granplot} \title{ Histogram with a cumulative percentage curve } \description{ This function provides a histogram of the grain-size distribution with a cumulative percentage curve } \usage{ granplot(x, xc = 1, meshmin=1, hist = TRUE, cum = TRUE, main = "", col.cum = "red", col.hist="darkgray", cexname=0.9, cexlab=1.3,decreasing=FALSE) } \arguments{ \item{x}{ A numeric matrix or data frame (see the shape of data(granulo)) } \item{xc}{ A numeric value or a numeric vector to define columns } \item{meshmin}{ Define the size of the smallest meshsize if it is 0 in raw data } \item{hist}{ If TRUE, display a histogram; if FALSE, do not display a histogram (only for only one column) } \item{cum}{ If TRUE, display a cumulative percentage curve; if FALSE do not display a cumulative percentage curve (only for only one column) } \item{main}{ Add a title to the current plot } \item{col.cum}{ Color in which cumulative percentage curve will be drawn } \item{col.hist}{ Color in which histogram will be drawn } \item{cexname}{ A numerical value giving the amount by which plotting text and symbols should be magnified relative to the default. } \item{cexlab}{ A numerical value giving the amount by which axis labels should be magnified relative to the default. } \item{decreasing}{ A logical value defining the order increasing or decreasing } } \details{ The obtained graph is the most commonly used by Sedimentologists } \value{ A histogram with a cumulative percentage curve } \author{ Regis K. Gallon (MNHN) \email{reg.gallon@gmail.com}, Jerome Fournier (CNRS) \email{fournier@mnhn.fr} } \seealso{ \code{\link[G2Sd]{grandistrib}} } \examples{ data(granulo) granplot(granulo,xc=1,hist=TRUE,cum=TRUE,main="Grain-size Distribution", col.hist="gray",col.cum="red") granplot(granulo,xc=2:4,main="Grain-size Distribution") }
184d92fec62d54cc579ab80ab498baba0f5fee4d
313208dae9bd931bd221bfbbb53f97d29fa7433b
/man/GeneratRateMatrix.ProgressionGraph.Rd
e5952717c2339c4779f9f2c56b461bbbbf3e6d5e
[]
no_license
scientific-computing-solutions/badminton
23cf0a4ad7b14fc001108e5e2a98d2b4d081dbcb
a89625d15aa13d3f7ceac8676cc4aef42fb2a4aa
refs/heads/master
2020-12-25T13:44:47.001850
2016-11-23T19:43:24
2016-11-23T19:43:24
65,361,935
0
0
null
2016-11-23T19:43:25
2016-08-10T07:50:11
R
UTF-8
R
false
true
699
rd
GeneratRateMatrix.ProgressionGraph.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/progressionGraph.R \name{GeneratRateMatrix.ProgressionGraph} \alias{GeneratRateMatrix.ProgressionGraph} \title{Outputs a matrix of rates from the edge Data of a \code{ProgressionGraph} object} \usage{ GeneratRateMatrix.ProgressionGraph(object) } \arguments{ \item{object}{A \code{ProgressionGraph} object} } \value{ A Matrix of transition rates } \description{ The matrix that is produced is such that M[i,j] = the rate from node i to node j, where the nodes are numbered in creation order (\code{nodes(object$graph))}. If the transition rates are formula then these should be evaluated before calling this function }
12e629e67f8d35908ca4f9c0bddb656b71f47da8
304e0912b221eca50a1f511525e196f9958bed7f
/capture_url_information.r
225831eeb45e9e76811bbfac12a22052fba53a23
[]
no_license
thefactmachine/ckan_fun
52733d5863962b1c6e2a30e3c2251cb520c187c7
6b9f5ac0f6776a9a480df5d2b720b39f4b03ad78
refs/heads/master
2020-05-21T08:51:56.892383
2016-11-08T02:55:48
2016-11-08T02:55:48
69,430,255
0
0
null
null
null
null
UTF-8
R
false
false
1,193
r
capture_url_information.r
# clear the decks rm(list=ls()) options(stringsAsFactors=FALSE) library(jsonlite) library(dplyr) # library(httr) # read the json file amd parse it into an R list # this is top CKAN url which returns a list of CKAN references to each data set. # the length of the returned data set is equal to the number data sets. lst_ckan <- jsonlite::fromJSON("http://data.gov.au/api/3/action/package_list") # the "result" frame is the only one which is relevant. vct_results <- lst_ckan$result # load in a function to return some text for each url supplied. source("fn_read_url.r") # main CKAN stem for datq.gov.au str_html_stem <- "http://data.gov.au/api/3/action/package_show?id=" # we now have a vector or urls vct_urls <- paste0(str_html_stem, vct_results) # declare a blank list to hold our results lst_ckan_results <- list() # cycle through each url and save the resultant json object in # a list for (i in 1:length(vct_urls)) { lst_ckan_results[[vct_urls[i]]] <- fn_read_url(vct_urls[i]) print(i) } # ASSERT: number of list elements == number of urls. length(lst_ckan_results) == length(vct_results) # save the sucker for future processing. save(lst_ckan_results, file = "results.rda")
38f4c3663d88276cc8a381463ac659b9f75c7ff0
c2d4519a1f951ac6b8acfa8810265334508ea20a
/man/BLRM.fit.mwg.Rd
59a6d51e71fd9a379db29afbc70a5bd96116f571
[]
no_license
lcw68/G3proj
ea56788ad44f9223426492b7416170821e9401a6
a419894ba9dc20e5c13568221d53157c238e4496
refs/heads/main
2023-04-24T03:48:54.759692
2021-05-06T17:10:00
2021-05-06T17:10:00
359,679,879
0
2
null
null
null
null
UTF-8
R
false
true
1,307
rd
BLRM.fit.mwg.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppWrapper.R \name{BLRM.fit.mwg} \alias{BLRM.fit.mwg} \title{Bayesian Logistic Regression Model (BLRM) training} \usage{ BLRM.fit.mwg( Y0, X0, PriorVar, propSD0, nMC = 1000, nBI = 250, thin = 5, seed = 1 ) } \arguments{ \item{Y0}{vector of responses} \item{X0}{covariate matrix} \item{PriorVar}{variance of prior distribution of beta} \item{propSD0}{vector of standard deviations for normal proposal density} \item{nMC}{number of MCMC samples} \item{nBI}{number of burn-in samples} \item{thin}{number of samples to skip over in thinning} \item{seed}{set seed for random number generation} } \value{ a nested list of beta samples, and beta acceptance rates } \description{ Performs Bayesian Logistic Regression Model training by sampling beta from posterior distribution with user specified parameters and data } \examples{ ## simulate data; set.seed(1); N = 100; p = 10; X = matrix(data = rnorm(N*p), nrow=N, ncol=p) beta_true = c(rep(1,p/2),rep(0,p/2)) eta = X \%*\% beta_true pi = exp(eta) / (1 + exp(eta)) Y = rbinom(N,1,pi) propSD = rep(1,p) ## fit model; test1 <- G3proj::BLRM.fit.mwg(Y0 = Y, X0 = X, PriorVar = 1000, propSD0 = propSD, nMC = 500, nBI = 100, thin = 5) }
24e77dbc468047b8d08707e2ee7ed87ec9fbe4ba
8f94ccd8d3aed33b418cb9639dc64a159931ae4e
/R/print.sc_rci.R
0fc5579cbdb124323d2b1a56c5a6535e385e5081
[]
no_license
cran/scan
8c9d9b2dc44bbb8c339be3795a62bb5c49be87b0
860599c21c4c5e37746fa8e6234c6f6cc8028070
refs/heads/master
2023-08-22T17:47:22.450439
2023-08-08T13:00:02
2023-08-08T14:31:36
70,917,562
2
1
null
null
null
null
UTF-8
R
false
false
522
r
print.sc_rci.R
#' @rdname print.sc #' @export #' print.sc_rci <- function(x, ...) { cat("Reliable Change Index\n\n") cat("Mean Difference = ", x$descriptives[2, 2] - x$descriptives[1, 2], "\n") cat("Standardized Difference = ", x$stand.dif, "\n") cat("\n") cat("Descriptives:\n") print(x$descriptives) cat("\n") cat("Reliability = ", x$reliability, "\n") cat("\n") cat(x$conf.percent * 100, "% Confidence Intervals:\n") print(x$conf) cat("\n") cat("Reliable Change Indices:\n") print(x$RCI) cat("\n") }
d8f36b786598e218c32d7a58b627cfe037d92c7e
362495bee5b074de6953d17ca22682829e66168d
/tests/testthat/test-pobierz_wartosci_wskaznikow_ewd.R
741170e80cb31569917a2530595bced80d835c38
[ "MIT" ]
permissive
tzoltak/EWDdane
fc8d59aa97d803b347e70a6f70df4154f82bd6b2
8eb5ef560e999cbde8d0dddb78012e4a52bc3480
refs/heads/master
2022-10-22T02:54:33.216359
2022-09-29T15:32:56
2022-09-29T15:32:56
29,207,245
1
0
null
null
null
null
UTF-8
R
false
false
592
r
test-pobierz_wartosci_wskaznikow_ewd.R
context('pobierz_wartosci_wskaznikow_ewd') # cokolwiek - to siฤ™ po prostu wyล›wietla na ekranie podczas uruchamiania testรณw jako taka wskazรณwka, co siฤ™ wล‚aล›nie testuje # testy grupujemy w nazwane grupy, ale znรณw od strony technicznej wszystko jedno, jak to zrobimy # mogฤ… byฤ‡ wszystkie w jednym test_that() rรณwnie dobrze jak kaลผdy w oddzielnym test_that('pobierz_wartosci_wskaznikow_ewd dziaล‚a', { dane = pobierz_wartosci_wskaznikow_ewd('T', 2013) expect_is(dane, 'data.frame') expect_equal(all(dane$typ_szkoly %in% 'T'), TRUE) expect_equal(all(!is.na(dane$nazwa)), TRUE) })
25f322a8088cf84e795c4e1bccad87adba571196
ea859cacef237016e55cd5af3765fa8c39561367
/R/global.R
8f85314d0c085bb2b07beb2af1a11b50b4ecc9b7
[]
no_license
Kadambala/GWSDAT
a0253617ff7527c508c82dda8c5b37b350ab88ea
0c725e0273a102f51b5126ee3b32ba4696fd50f5
refs/heads/master
2023-08-22T17:00:30.324738
2021-07-30T15:05:52
2021-07-30T15:05:52
null
0
0
null
null
null
null
UTF-8
R
false
false
839
r
global.R
# This is the place in Shiny to define global _constant_ variables. # Note that these variables become locked if the software is packaged. # # The binding can be unlocked, but instead of doing this rather complicated step # I put all non-constant variables into the server() function (non-global) and pass them to # the functions that needs it. # coord_units <- c("","metres", "feet") conc_units <- c("ng/l", "ug/l", "mg/l", "Level", "metres", # for GW (groundwater depth) "feet", # for GW (groundwater depth) "mm", # for NAPL thickness "pH") conc_flags <- c("", "E-acc", "Omit", "NotInNAPL", "Redox") conc_header <- list("WellName", "Constituent", "SampleDate", "Result", "Units", "Flags") well_header <- list("WellName", "XCoord", "YCoord", "Aquifer")
9ff99d7cd7113f608bdf70b1ed7111a087edf4a3
da9922c616758fb2beced7407529d974eb892d3c
/R/pssbBind.R
1cb8435382998ec48626f417ab1bee71af7f8bd6
[]
no_license
kcstreamteam1/wlRd-package
714d749b6480d86e5a1e3a0d81047e9ecbc5acf6
46683391740cedbd2c5b8bccef2fe90c2582a634
refs/heads/master
2021-01-01T18:49:53.992958
2017-07-31T18:17:50
2017-07-31T18:17:50
98,440,798
0
0
null
null
null
null
UTF-8
R
false
false
1,244
r
pssbBind.R
pssbBind <- function(file.path, score.type = NULL, ambient = F) { # validate lengths of file path vector and score type vector are the same if (!is.null(score.type)) { if (length(file.path) != length(score.type)) stop("The number of score types must be equal to the number of file paths")} # loop through file paths, define anonymous function all.list <- lapply(1:length(file.path), function(x) { path.files <- list.files(file.path[x]) # read in files list.with.each.file <- lapply(paste(file.path, list.files(file.path), sep = ''), function(y) read.delim(y, header=TRUE)) # append id to each file individually if (!is.null(score.type)) { list.with.each.file <- lapply(list.with.each.file, function(y) { y$id <- score.type[x] y})} # bind all files from each file.path into data frames do.call("rbind.data.frame", list.with.each.file) }) # bind files from all file.paths into one data frame bibi <- do.call("rbind.data.frame", all.list) if (ambient == TRUE) {bibi <- droplevels(bibi[bibi$Agency=="King County - DNRP" & bibi$Project == 'Boise Ambient' | bibi$Project == 'Ambient Monitoring' | bibi$Project =='Vashon' | bibi$Project =='Seattle',])} return(bibi) }
767a1e0621fc5203cace01834d7444a332ac31f2
e9f2bfee76e2d5a4de154bac6f0f1defcb8f5605
/R/customLosses.R
22af209140771d4d1363276e6cd782bf32f9b194
[]
no_license
devhliu/ANTsRNet
f3f3357df2b4ac54b12a8c02243b4695649d8fde
ba02b69fcb3835d6b9ba178f88c0932d1f3a86f1
refs/heads/master
2020-06-23T08:08:48.638690
2019-07-22T18:41:06
2019-07-22T18:41:06
null
0
0
null
null
null
null
UTF-8
R
false
false
12,116
r
customLosses.R
#' Model loss function for multilabel problems--- multilabel dice coefficient #' #' Based on the keras loss function (losses.R): #' #' \url{https://github.com/rstudio/keras/blob/master/R/losses.R} #' #' @param y_true True labels (Tensor) #' @param y_pred Predictions (Tensor of the same shape as \code{y_true}) #' #' @details Loss functions are to be supplied in the loss parameter of the #' \code{compile()} function. #' #' Loss functions can be specified either using the name of a built in loss #' function (e.g. \code{loss = binary_crossentropy}), a reference to a built in loss #' function (e.g. \code{loss = binary_crossentropy()}) or by passing an #' arbitrary function that returns a scalar for each data-point. #' The actual optimized objective is the mean of the output array across all #' datapoints. #' @export multilabel_dice_coefficient <- function( y_true, y_pred ) { smoothingFactor <- 1.0 K <- keras::backend() K$set_image_data_format( 'channels_last' ) y_dims <- unlist( K$int_shape( y_pred ) ) numberOfLabels <- y_dims[length( y_dims )] # Unlike native R, indexing starts at 0. However, we are # assuming the background is 0 so we skip index 0. if( length( y_dims ) == 3 ) { # 2-D image y_true_permuted <- K$permute_dimensions( y_true, pattern = c( 3L, 0L, 1L, 2L ) ) y_pred_permuted <- K$permute_dimensions( y_pred, pattern = c( 3L, 0L, 1L, 2L ) ) } else { # 3-D image y_true_permuted <- K$permute_dimensions( y_true, pattern = c( 4L, 0L, 1L, 2L, 3L ) ) y_pred_permuted <- K$permute_dimensions( y_pred, pattern = c( 4L, 0L, 1L, 2L, 3L ) ) } y_true_label <- K$gather( y_true_permuted, indices = c( 1L ) ) y_pred_label <- K$gather( y_pred_permuted, indices = c( 1L ) ) y_true_label_f <- K$flatten( y_true_label ) y_pred_label_f <- K$flatten( y_pred_label ) intersection <- y_true_label_f * y_pred_label_f union <- y_true_label_f + y_pred_label_f - intersection numerator <- K$sum( intersection ) denominator <- K$sum( union ) if( numberOfLabels > 2 ) { for( j in 2L:( numberOfLabels - 1L ) ) { y_true_label <- K$gather( y_true_permuted, indices = c( j ) ) y_pred_label <- K$gather( y_pred_permuted, indices = c( j ) ) y_true_label_f <- K$flatten( y_true_label ) y_pred_label_f <- K$flatten( y_pred_label ) intersection <- y_true_label_f * y_pred_label_f union <- y_true_label_f + y_pred_label_f - intersection numerator <- numerator + K$sum( intersection ) denominator <- denominator + K$sum( union ) } } unionOverlap <- numerator / denominator return ( ( 2.0 * unionOverlap + smoothingFactor ) / ( 1.0 + unionOverlap + smoothingFactor ) ) } attr( multilabel_dice_coefficient, "py_function_name" ) <- "multilabel_dice_coefficient" #' Multilabel dice loss function. #' #' @param y_true true encoded labels #' @param y_pred predicted encoded labels #' #' @rdname loss_multilabel_dice_coefficient_error #' @export loss_multilabel_dice_coefficient_error <- function( y_true, y_pred ) { return( -multilabel_dice_coefficient( y_true, y_pred ) ) } attr( loss_multilabel_dice_coefficient_error, "py_function_name" ) <- "multilabel_dice_coefficient_error" #' Peak-signal-to-noise ratio. #' #' Based on the keras loss function (losses.R): #' #' \url{https://github.com/rstudio/keras/blob/master/R/losses.R} #' #' @param y_true True labels (Tensor) #' @param y_pred Predictions (Tensor of the same shape as \code{y_true}) #' #' @details Loss functions are to be supplied in the loss parameter of the #' \code{compile()} function. #' #' @export peak_signal_to_noise_ratio <- function( y_true, y_pred ) { K <- keras::backend() return( -10.0 * K$log( K$mean( K$square( y_pred - y_true ) ) ) / K$log( 10.0 ) ) } attr( peak_signal_to_noise_ratio, "py_function_name" ) <- "peak_signal_to_noise_ratio" #' Peak-signal-to-noise ratio. #' #' @param y_true true encoded labels #' @param y_pred predicted encoded labels #' #' @rdname loss_peak_signal_to_noise_ratio_error #' @export loss_peak_signal_to_noise_ratio_error <- function( y_true, y_pred ) { return( -peak_signal_to_noise_ratio( y_true, y_pred ) ) } attr( loss_peak_signal_to_noise_ratio_error, "py_function_name" ) <- "peak_signal_to_noise_ratio_error" #' Pearson correlation coefficient. #' #' Based on the code found here: #' #' \url{https://github.com/rstudio/keras/issues/160} #' #' @param y_true True labels (Tensor) #' @param y_pred Predictions (Tensor of the same shape as \code{y_true}) #' #' @details Loss functions are to be supplied in the loss parameter of the #' \code{compile()} function. #' #' @export pearson_correlation_coefficient <- function( y_true, y_pred ) { K <- keras::backend() N <- K$sum( K$ones_like( y_true ) ) sum_x <- K$sum( y_true ) sum_y <- K$sum( y_pred ) sum_x_squared <- K$sum( K$square( y_true ) ) sum_y_squared <- K$sum( K$square( y_pred ) ) sum_xy <- K$sum( y_true * y_pred ) numerator <- sum_xy - ( sum_x * sum_y / N ) denominator <- K$sqrt( ( sum_x_squared - K$square( sum_x ) / N ) * ( sum_y_squared - K$square( sum_y ) / N ) ) coefficient <- numerator / denominator return( coefficient ) } attr( pearson_correlation_coefficient, "py_function_name" ) <- "pearson_correlation_coefficient" #' Pearson correlation coefficient #' #' @param y_true true encoded labels #' @param y_pred predicted encoded labels #' #' @rdname loss_pearson_correlation_coefficient_error #' @export loss_pearson_correlation_coefficient_error <- function( y_true, y_pred ) { return( -pearson_correlation_coefficient( y_true, y_pred ) ) } attr( loss_pearson_correlation_coefficient_error, "py_function_name" ) <- "pearson_correlation_coefficient_error" #' Loss function for the SSD deep learning architecture. #' #' Creates an R6 class object for use with the SSD deep learning architecture #' based on the paper #' #' W. Liu, D. Anguelov, D. Erhan, C. Szegedy, S. Reed, C-Y. Fu, A. Berg. #' SSD: Single Shot MultiBox Detector. #' #' available here: #' #' \url{https://arxiv.org/abs/1512.02325} #' #' @docType class #' #' @section Usage: #' \preformatted{ssdLoss <- LossSSD$new( dimension = 2L, backgroundRatio = 3L, #' minNumberOfBackgroundBoxes = 0L, alpha = 1.0, #' numberOfClassificationLabels ) #' #' ssdLoss$smooth_l1_loss( y_true, y_pred ) #' ssdLoss$log_loss( y_true, y_pred ) #' ssdLoss$compute_loss( y_true, y_pred ) #' } #' #' @section Arguments: #' \describe{ #' \item{ssdLoss}{A \code{process} object.} #' \item{dimension}{image dimensionality.} #' \item{backgroundRatio}{The maximum ratio of background to foreround #' for weighting in the loss function. Is rounded to the nearest integer. #' Default is 3.} #' \item{minNumberOfBackgroundBoxes}{The minimum number of background boxes #' to use in loss computation *per batch*. Should reflect a value in #' proportion to the batch size. Default is 0.} #' \item{alpha}{Weighting factor for the localization loss in total loss #' computation.} #' \item{numberOfClassificationLabels}{number of classes including background.} #' } #' #' @section Details: #' \code{$smooth_l1_loss} smooth loss #' #' \code{$log_loss} log loss #' #' \code{$compute_loss} computes total loss. #' #' @author Tustison NJ #' #' @return an SSD loss function #' #' @name LossSSD NULL #' @export LossSSD <- R6::R6Class( "LossSSD", public = list( dimension = 2L, backgroundRatio = 3L, minNumberOfBackgroundBoxes = 0L, alpha = 1.0, numberOfClassificationLabels = NULL, tf = tensorflow::tf, initialize = function( dimension = 2L, backgroundRatio = 3L, minNumberOfBackgroundBoxes = 0L, alpha = 1.0, numberOfClassificationLabels = NULL ) { self$dimension <- as.integer( dimension ) self$backgroundRatio <- self$tf$constant( backgroundRatio ) self$minNumberOfBackgroundBoxes <- self$tf$constant( minNumberOfBackgroundBoxes ) self$alpha <- self$tf$constant( alpha ) self$numberOfClassificationLabels <- as.integer( numberOfClassificationLabels ) }, smooth_l1_loss = function( y_true, y_pred ) { y_true <- self$tf$cast( y_true, dtype = "float32" ) absoluteLoss <- self$tf$abs( y_true - y_pred ) squareLoss <- 0.5 * ( y_true - y_pred )^2 l1Loss <- self$tf$where( self$tf$less( absoluteLoss, 1.0 ), squareLoss, absoluteLoss - 0.5 ) return( self$tf$reduce_sum( l1Loss, axis = -1L, keepdims = FALSE ) ) }, log_loss = function( y_true, y_pred ) { y_true <- self$tf$cast( y_true, dtype = "float32" ) y_pred <- self$tf$maximum( y_pred, 1e-15 ) logLoss <- -self$tf$reduce_sum( y_true * self$tf$log( y_pred ), axis = -1L, keepdims = FALSE ) return( logLoss ) }, compute_loss = function( y_true, y_pred ) { y_true$set_shape( y_pred$get_shape() ) batchSize <- self$tf$shape( y_pred )[1] numberOfBoxesPerCell <- self$tf$shape( y_pred )[2] indices <- 1:self$numberOfClassificationLabels classificationLoss <- self$tf$to_float( self$log_loss( y_true[,, indices], y_pred[,, indices] ) ) indices <- self$numberOfClassificationLabels + 1:( 2 * self$dimension ) localizationLoss <- self$tf$to_float( self$smooth_l1_loss( y_true[,, indices], y_pred[,, indices] ) ) backgroundBoxes <- y_true[,, 1] if( self$numberOfClassificationLabels > 2 ) { foregroundBoxes <- self$tf$to_float( self$tf$reduce_max( y_true[,, 2:self$numberOfClassificationLabels], axis = -1L, keepdims = FALSE ) ) } else { foregroundBoxes <- self$tf$to_float( self$tf$reduce_max( y_true[,, 2:self$numberOfClassificationLabels], axis = -1L, keepdims = TRUE ) ) } numberOfForegroundBoxes <- self$tf$reduce_sum( foregroundBoxes, keepdims = FALSE ) if( self$numberOfClassificationLabels > 2 ) { foregroundClassLoss <- self$tf$reduce_sum( classificationLoss * foregroundBoxes, axis = -1L, keepdims = FALSE ) } else { foregroundClassLoss <- self$tf$reduce_sum( classificationLoss * foregroundBoxes, axis = -1L, keepdims = TRUE ) } backgroundClassLossAll <- classificationLoss * backgroundBoxes nonZeroIndices <- self$tf$count_nonzero( backgroundClassLossAll, dtype = self$tf$int32 ) numberOfBackgroundBoxesToKeep <- self$tf$minimum( self$tf$maximum( self$backgroundRatio * self$tf$to_int32( numberOfForegroundBoxes ), self$minNumberOfBackgroundBoxes ), nonZeroIndices ) f1 = function() { return( self$tf$zeros( list( batchSize ) ) ) } f2 = function() { backgroundClassLossAll1d <- self$tf$reshape( backgroundClassLossAll, list( -1L ) ) topK <- self$tf$nn$top_k( backgroundClassLossAll1d, numberOfBackgroundBoxesToKeep, FALSE ) values <- topK$values indices <- topK$indices backgroundBoxesToKeep <- self$tf$scatter_nd( self$tf$expand_dims( indices, axis = 1L ), updates = self$tf$ones_like( indices, dtype = self$tf$int32 ), shape = self$tf$shape( backgroundClassLossAll1d ) ) backgroundBoxesToKeep <- self$tf$to_float( self$tf$reshape( backgroundBoxesToKeep, list( batchSize, numberOfBoxesPerCell ) ) ) return( self$tf$reduce_sum( classificationLoss * backgroundBoxesToKeep, axis = -1L, keepdims = FALSE ) ) } backgroundClassLoss <- self$tf$cond( self$tf$equal( nonZeroIndices, self$tf$constant( 0L ) ), f1, f2 ) classLoss <- foregroundClassLoss + backgroundClassLoss localizationLoss <- self$tf$reduce_sum( localizationLoss * foregroundBoxes, axis = -1L, keepdims = FALSE ) totalLoss <- ( classLoss + self$alpha * localizationLoss ) / self$tf$maximum( 1.0, numberOfForegroundBoxes ) return( totalLoss ) } ) )
7db0882a15150b63bce78dc09dfcd8bd3d8b9402
846791f0492405acbf9a8eae8d7cd4fa8a43d03b
/R/read_meso_region_bg.R
ac053ad49d2eb056eb2307ee2f4d6d9727869a74
[]
no_license
Prof-Rodrigo-Silva/geobage
1d523c480bd2850556d873282a51dd077ae636ac
aaec7e8e3f537f99b646669dff00898d371a573d
refs/heads/master
2022-11-15T23:25:28.965541
2022-07-26T03:28:33
2022-07-26T03:28:33
275,835,067
0
0
null
null
null
null
UTF-8
R
false
false
462
r
read_meso_region_bg.R
#' Meso region the municipality of Bage #' #' Function returns the meso region in which the municipality of Bage is inserted. Data at scale 1:250,000, using Geodetic reference system "SIRGAS2000" and CRS(4674) #' #' @export #' @family general area functions #' @examples \dontrun{ #' #' library(geobage) #' #' c <- read_meso_region_bg() #' #' } read_meso_region_bg <- function(){ objeto <- geobr::read_meso_region(code_meso=4306) objeto }
cff0749b4b66416cc5ecfe4d1e4d7643675fbc13
09f9121232947f5e8eec489b5db880e3f5a5fc06
/inst/extra_scripts/build_data.R
d3679c63495631f7423d58abe9123adc40a001c7
[]
no_license
mejihero/clustext
9ac986476509e3ef233011858ff2ca6023af08e5
19963bc5a63148aa40f29f32bee3430128206215
refs/heads/master
2020-03-12T00:06:49.552509
2017-04-14T16:42:47
2017-04-14T16:42:47
null
0
0
null
null
null
null
UTF-8
R
false
false
1,081
r
build_data.R
pacman::p_load(clustext, dplyr) x <- presidential_debates_2012 %>% mutate(tot = gsub("\\..+$", "", tot)) %>% textshape::combine() %>% filter(person %in% c("ROMNEY", "OBAMA")) %>% with(data_store(dialogue, stopwords = tm::stopwords("english"), min.char = 3)) set.seed(10) kmeans_assignment <- kmeans_cluster(x, 50) %>% assign_cluster(myfit2) set.seed(10) nmf_assignment <- nmf_cluster(x, 50) %>% assign_cluster(myfit2) set.seed(10) skmeans_assignment <- skmeans_cluster(x, 50) %>% assign_cluster(myfit2) hierarchical_assignment <- hierarchical_cluster(x) %>% assign_cluster(k=50) assignments <- list( hierarchical_assignment = hierarchical_assignment, kmeans_assignment = kmeans_assignment, skmeans_assignment = skmeans_assignment, nmf_assignment =nmf_assignment ) assignments <- lapply(assignments, function(x) { attributes(x)[['data_store']] <- NULL attributes(x)[['model']] <- NULL attributes(x)[['join']] <- NULL x }) lapply(assignments, function(x) {names(attributes(x))}) pax::new_data(assignments)
2e33a5cab2c0b96783a79b3c31c3c403bfca9229
b5ad091aca9e037cdd7750c7c016a9fbc41cc1b1
/FunctionAddNum.R
3f632faaefd313b6005897045dc887396a5097d4
[]
no_license
avdharwadkar/LiveSessionDemoProject
4c2f9f8a577e81e2d1809407046ff42519baa165
548222bbef8425388096383731839947b85f3f9d
refs/heads/master
2021-01-12T15:40:24.868656
2016-10-25T00:44:32
2016-10-25T00:44:32
71,843,124
0
0
null
null
null
null
UTF-8
R
false
false
84
r
FunctionAddNum.R
# Function to Add two number AddNumbers <- function(x, y){ z = x + y return(z) }
08804c1316562c62b1041498be4a120962802453
295d1939656d1b5d20b4eade3e2f9eb622558bec
/scripts/1_MultipleSites_NHA_TemplateGenerator.R
a4ccdf40bbbd15c086f48c2c8738d4be76734814
[]
no_license
PNHP/NHA_newTemplate
1959ad006d0c6af2f93c1cf7a7acb96253d70004
18a0da80db93f0ef8e6e10bb2587d1759133d833
refs/heads/master
2020-04-27T09:30:47.335200
2020-01-06T20:45:01
2020-01-06T20:45:01
174,217,624
2
2
null
2019-06-24T17:27:46
2019-03-06T20:40:53
R
UTF-8
R
false
false
23,801
r
1_MultipleSites_NHA_TemplateGenerator.R
#------------------------------------------------------------------------------- # Name: NHA_TemplateGenerator.r # Purpose: Create a Word template for NHA content for multiple sites at once # Author: Anna Johnson # Created: 2019-10-16 # Updated: # # Updates: # # To Do List/Future ideas: # #------------------------------------------------------------------------------- #################################################### #Set up libraries, paths, and settings # check and load required libraries if (!requireNamespace("here", quietly = TRUE)) install.packages("here") require(here) if (!requireNamespace("arcgisbinding", quietly = TRUE)) install.packages("arcgisbinding") require(arcgisbinding) if (!requireNamespace("RSQLite", quietly = TRUE)) install.packages("RSQLite") require(RSQLite) if (!requireNamespace("knitr", quietly = TRUE)) install.packages("knitr") require(knitr) if (!requireNamespace("xtable", quietly = TRUE)) install.packages("xtable") require(xtable) if (!requireNamespace("flextable", quietly = TRUE)) install.packages("flextable") require(flextable) if (!requireNamespace("dplyr", quietly = TRUE)) install.packages("dplyr") require(dplyr) if (!requireNamespace("dbplyr", quietly = TRUE)) install.packages("dbplyr") require(dbplyr) if (!requireNamespace("rmarkdown", quietly = TRUE)) install.packages("rmarkdown") require(rmarkdown) if (!requireNamespace("tmap", quietly = TRUE)) install.packages("tmap") require(tmap) if (!requireNamespace("OpenStreetMap", quietly = TRUE)) install.packages("OpenStreetMap") require(OpenStreetMap) if (!requireNamespace("openxlsx", quietly = TRUE)) install.packages("openxlsx") require(openxlsx) if (!requireNamespace("sf", quietly = TRUE)) install.packages("sf") require(sf) # if (!requireNamespace("odbc", quietly = TRUE)) install.packages("odbc") # require(odbc) # note: we need to install 64bit java: https://www.java.com/en/download/manual.jsp # load in the paths and settings file source(here::here("scripts", "0_PathsAndSettings.r")) #################################################### # Select focal NHAs #Load list of NHAs that you wish to generate site reports for NHA_list <- read.csv(here("_data", "sourcefiles", "NHAs_SWCounties.csv")) #download list that includes site names and/or (preferably) NHA Join ID #if you are just running a few sites, you can select individual site by name or NHA join id: #selected_nhas <- arc.select(nha, where_clause="SITE_NAME='White's Woods' AND STATUS = 'NP'") #selected_nhas <- arc.select(nha, where_clause="NHA_JOIN_ID IN('alj82942')") #Select larger number of sites #Method A) If using site names (but this gets hung up on apostrophes) NHA_list <- NHA_list[order(NHA_list$Site.Name),] #order alphabetically Site_Name_List <- as.vector(NHA_list$Site.Name) Site_Name_List <- as.list(Site_Name_List) SQLquery_Sites <- paste("SITE_NAME IN(",paste(toString(sQuote(Site_Name_List)),collapse=", "), ") AND STATUS IN('NP','NR')") #use this to input vector of site names to select from into select clause. #Method B) Or use NHA join ID #Site_NHAJoinID_List <-as.character(NHA_list$NHA.Join.ID) #SQLquery_Sites <- paste("NHA_Join_ID IN(",paste(toString(sQuote(Site_NHAJoinID_List)),collapse=", "), ") AND STATUS IN('NP','NR')") serverPath <- paste("C:/Users/",Sys.getenv("USERNAME"),"/AppData/Roaming/ESRI/ArcGISPro/Favorites/PNHP.PGH-gis0.sde/",sep="") nha <- arc.open(paste(serverPath,"PNHP.DBO.NHA_Core", sep="")) selected_nhas <- arc.select(nha, where_clause=SQLquery_Sites) dim(selected_nhas) #check how many records are returned to ensure it meets expectations selected_nhas <- selected_nhas[order(selected_nhas$SITE_NAME),]#order alphabetically #### #manual check to ensure that your original list of NHAs and the selected NHA data frame both have sites in the same order identical(selected_nhas$SITE_NAME, as.character(NHA_list$Site.Name)) #### #################################################### ## Build the Species Table ######################### # open the related species table and get the rows that match the NHA join ids from the selected NHAs nha_relatedSpecies <- arc.open(paste(serverPath,"PNHP.DBO.NHA_SpeciesTable", sep="")) selected_nha_relatedSpecies <- arc.select(nha_relatedSpecies) Site_ID_list <- as.list(unique(selected_nhas$NHA_JOIN_ID)) #added in unique for occasions where a site might be in the import list multiple times (e.g. when it crosses county lines and we want to talk about it for all intersecting counties) #open linked species tables and select based on list of selected NHAs species_table_select <- list() for (i in 1:length(Site_ID_list)) { species_table_select[[i]] <- selected_nha_relatedSpecies[which(selected_nha_relatedSpecies$NHA_JOIN_ID==Site_ID_list[i]),] } species_table_select #list of species tables #merge species lists w/ EO information from Point Reps database #create one big data frame first of all the EOIDs across all the selected NHAs speciestable <- bind_rows(species_table_select, .id = "column_label") SQLquery_pointreps <- paste("EO_ID IN(",paste(toString(speciestable$EO_ID),collapse=", "), ")") #don't use quotes around numbers pointreps <- arc.open("W:/Heritage/Heritage_Data/Biotics_datasets.gdb/eo_ptreps") selected_pointreps <- arc.select(pointreps, c('EO_ID', 'EORANK', 'GRANK', 'SRANK', 'SPROT', 'PBSSTATUS', 'LASTOBS', 'SENSITV_SP', 'SENSITV_EO'), where_clause=SQLquery_pointreps) #select subset of columns from EO pointrep database #if this select command does not work (which sometimes happens to me?), try this method, which will work #selected_pointreps <- arc.select(pointreps, c('EO_ID', 'EORANK', 'GRANK', 'SRANK', 'SPROT', 'PBSSTATUS', 'LASTOBS', 'SENSITV_SP', 'SENSITV_EO')) #selected_pointreps <- subset(selected_pointreps, selected_pointreps$EO_ID %in% speciestable$EO_ID) dim(selected_pointreps) speciestable <- merge(speciestable,selected_pointreps, by="EO_ID") names(speciestable)[c(15:22)] <- c("EORANK","GRANK","SRANK","SPROT","PBSSTATUS","LASTOBS","SENSITIVE","SENSITIVE_EO") #should rewrite this to be resilient to changing order of data frames species_table_select<- split(speciestable, speciestable$column_label) #split back into a list of species tables namevec <- NULL #name species tables so that you can tell if they end up in a weird order for (i in seq_along(species_table_select)){ namevec[i] <- species_table_select[[i]]$NHA_JOIN_ID[1]} names(species_table_select) <- namevec #Make a list of all the ELCODES within all the species tables, to pull further info out from databases SD_specieslist <- lapply(seq_along(species_table_select), function(x) species_table_select[[x]][,c("ELCODE")]) SD_specieslist <- unlist(SD_specieslist) #Connect to database and merge ElSubID into species tables TRdb <- dbConnect(SQLite(), dbname=TRdatabasename) #connect to SQLite DB Join_ElSubID <- dbGetQuery(TRdb, paste0("SELECT ELSubID, ELCODE FROM ET"," WHERE ELCODE IN (", paste(toString(sQuote(SD_specieslist)), collapse = ", "), ");")) dbDisconnect(TRdb) SD_speciesTable <- lapply(seq_along(species_table_select), function(x) merge(species_table_select[[x]], Join_ElSubID, by="ELCODE"))# merge in the ELSubID until we get it fixed in the GIS layer names(SD_speciesTable) <- namevec #keep names associated with list of tables #add a column in each selected NHA species table for the image path, and assign image. #Note: this uses the EO_ImSelect function, which I modified in the source script to work with a list of species tables for (i in 1:length(SD_speciesTable)) { for(j in 1:nrow(SD_speciesTable[[i]])){ SD_speciesTable[[i]]$Images <- EO_ImSelect(SD_speciesTable[[i]][j,]) } } # modify image assignments to account for finer groupings of the inverts--this part is not working right, come back to later #for (i in 1:length(SD_speciesTable)) { # for(j in 1:nrow(SD_speciesTable[[i]])){ # SD_speciesTable[[i]]$Images <- EO_ImFix(SD_speciesTable[[i]][j,]) # } #} ############################################ # write species table to the SQLite database speciesTable4db <- SD_speciesTable for (i in 1:length(speciesTable4db)){ speciesTable4db[[i]] <- cbind(selected_nhas$NHA_JOIN_ID[i], speciesTable4db[[i]]) } for (i in 1:length(speciesTable4db)){ names(speciesTable4db[[i]])[1] <- "NHA_JOIN_ID" speciesTable4db[[i]]$NHA_JOIN_ID <- as.character(speciesTable4db[[i]]$NHA_JOIN_ID) } db_nha <- dbConnect(SQLite(), dbname=nha_databasename) # connect to the database # delete existing threats and recs for this site if they exist for (i in 1:length(selected_nhas$NHA_JOIN_ID)){ dbExecute(db_nha, paste("DELETE FROM nha_species WHERE NHA_JOIN_ID = ", sQuote(selected_nhas$NHA_JOIN_ID[i]), sep="")) } # add in the new data for (i in 1:length(speciesTable4db)){ dbAppendTable(db_nha, "nha_species", speciesTable4db[[i]]) } dbDisconnect(db_nha) ################################################# ### Pull out info from Biotics for each site eoid_list <- list() #list of EOIDs to pull Biotics records with for (i in 1: length(SD_speciesTable)){ eoid_list[[i]] <- paste(toString(SD_speciesTable[[i]]$EO_ID), collapse = ",") } # make a list of EOIDs to get data from ptreps <- arc.open(paste(biotics_gdb,"eo_ptreps",sep="/")) ptreps_selected <- list() #list of EO records for each selected NHA for (i in 1:length(eoid_list)){ ptreps_selected[[i]] <- arc.select(ptreps, fields=c("EO_ID", "SNAME", "EO_DATA", "GEN_DESC","MGMT_COM","GENERL_COM"), where_clause=paste("EO_ID IN (", eoid_list[[i]], ")",sep="") ) } ################################################ # calculate the site significance rank based on the species present at the site source(here::here("scripts","nha_ThreatsRecDatabase","2_loadSpeciesWeights.r")) #check whether there are multiple EOs in the species table for the same species, and only keep one record for each species, the most recently observed entry for (i in 1:length(SD_speciesTable)) { duplic_Spp <- SD_speciesTable[[i]] duplic_Spp <- duplic_Spp[order(duplic_Spp$LASTOBS, decreasing=TRUE),] SD_speciesTable[[i]] <- duplic_Spp[!duplicated(duplic_Spp[1]),] } sigrankspecieslist <- SD_speciesTable #so if things get weird, you only have to come back to this step #remove species which are not included in thesite ranking matrices--GNR, SNR, SH/Eo Rank H, etc. sigrankspecieslist <- lapply(seq_along(sigrankspecieslist), function(x) sigrankspecieslist[[x]][which(sigrankspecieslist[[x]]$GRANK!="GNR"&!is.na(sigrankspecieslist[[x]]$EORANK)),]) #remove EOs which are GNR sigrankspecieslist <- lapply(seq_along(sigrankspecieslist), function(x) sigrankspecieslist[[x]][which(sigrankspecieslist[[x]]$GRANK!="GNA"&!is.na(sigrankspecieslist[[x]]$EORANK)),]) #remove EOs which are GNA sigrankspecieslist <- lapply(seq_along(sigrankspecieslist), function(x) sigrankspecieslist[[x]][which(sigrankspecieslist[[x]]$SRANK!="SNR"&!is.na(sigrankspecieslist[[x]]$EORANK)),]) #remove EOs which are SNR sigrankspecieslist <- lapply(seq_along(sigrankspecieslist), function(x) sigrankspecieslist[[x]][which(sigrankspecieslist[[x]]$SRANK!="SH"&!is.na(sigrankspecieslist[[x]]$EORANK)),]) #remove EOs which are SH sigrankspecieslist <- lapply(seq_along(sigrankspecieslist), function(x) sigrankspecieslist[[x]][which(sigrankspecieslist[[x]]$EORANK!="H"),]) #remove EOs w/ an H quality rank sigrankspecieslist <- lapply(seq_along(sigrankspecieslist), function(x) sigrankspecieslist[[x]][which(sigrankspecieslist[[x]]$SRANK!="SU"&!is.na(sigrankspecieslist[[x]]$EORANK)),]) #remove EOs which are SU #Merge rounded S, G, and EO ranks into individual species tables sigrankspecieslist <- lapply(seq_along(sigrankspecieslist), function(x) merge(sigrankspecieslist[[x]], rounded_grank, by="GRANK")) sigrankspecieslist <- lapply(seq_along(sigrankspecieslist), function(x) merge(sigrankspecieslist[[x]], rounded_srank, by="SRANK")) sigrankspecieslist <- lapply(seq_along(sigrankspecieslist), function(x) merge(sigrankspecieslist[[x]], nha_EORANKweights, by="EORANK")) #Calculate rarity scores for each species within each table RarityScore <- function(x, matt) { matt <- nha_gsrankMatrix if (nrow(x) > 0) { for(i in 1:nrow(x)) { x$rarityscore[i] <- matt[x$GRANK_rounded[i],x$SRANK_rounded[i]] }} else { "NA" } x$rarityscore } res <- lapply(sigrankspecieslist, RarityScore) #calculate rarity score for each species table sigrankspecieslist <- Map(cbind, sigrankspecieslist, RarityScore=res) #bind rarity score into each species table names(sigrankspecieslist) <- namevec #reassign the names #Adjust site significance rankings based on presence of G1, G2, and G3 EOs #create flags for sites with a G3 species (which should automatically be at least regional) G3_regional <- lapply(seq_along(sigrankspecieslist), function(x) "G3" %in% sigrankspecieslist[[x]]$GRANK_rounded) #create flags for sites with a G1 or G2 species (which should automatically be a global site) G1_global <- lapply(seq_along(sigrankspecieslist), function(x) "G1" %in% sigrankspecieslist[[x]]$GRANK_rounded) G2_global <- lapply(seq_along(sigrankspecieslist), function(x) "G2" %in% sigrankspecieslist[[x]]$GRANK_rounded) #Calculate scores for each site, aggregating across all species and assign significance rank category. Skip any remaining NA values in the rarity scores TotalScore <- lapply(seq_along(sigrankspecieslist), function(x) sigrankspecieslist[[x]]$RarityScore[!is.na(sigrankspecieslist[[x]]$RarityScore)] * sigrankspecieslist[[x]]$Weight) # calculate the total score for each species SummedTotalScore <- lapply(TotalScore, sum) SummedTotalScore <- lapply(SummedTotalScore, as.numeric) SiteRank <- list() #create empty list object to write into for (i in seq_along(SummedTotalScore)) { if(SummedTotalScore[[i]]==0|is.na(SummedTotalScore[[i]])){ SiteRank[[i]] <- "Local" } else if(is.na(SummedTotalScore[[i]])){ SiteRank[[i]] <- "Local" } else if(SummedTotalScore[[i]]>0 & SummedTotalScore[[i]]<=152) { SiteRank[[i]] <- "State" } else if(SummedTotalScore[i]>152 & SummedTotalScore[[i]]<=457) { SiteRank[[i]] <- "Regional" } else if (SummedTotalScore[[i]]>457) { SiteRank[[i]] <- "Global" } } #manual check step, take a look if you want to see where things are mismatched--do any sites need to have ranks overriden? check <- as.data.frame(cbind(SiteRank, SummedTotalScore, G3_regional, G2_global, G1_global, namevec, selected_nhas$NHA_JOIN_ID)) #Do the site ranking overrides automatically for (i in seq_along(SiteRank)) { if(G3_regional[[i]]=="TRUE") { SiteRank[[i]] <-"Regional" } else if(G2_global[[i]]=="TRUE"){ SiteRank[[i]] <- "Global" } else if(G1_global[[i]]=="TRUE"){ SiteRank[[i]] <- "Global" } } #reorder the sites selected_nhas <- selected_nhas[match(namevec, selected_nhas$NHA_JOIN_ID),]#order to match order of species tables #ensure that both data frames have sites in the same order identical(selected_nhas$NHA_JOIN_ID, namevec) #merge significance data into NHA table selected_nhas$site_score <- unlist(SiteRank) #add site significance rankings to NHA data frame selected_nhas$site_rank <- unlist(SummedTotalScore) #add site significance score to NHA data frame summary(as.factor(selected_nhas$site_score)) #manual check step: take a look at distribution of significance ranks ######################################################### #Build pieces needed for each site report #generate list of folder paths and file names for selected NHAs nha_foldername_list <- list() for (i in 1:length(Site_Name_List)) { nha_foldername_list[[i]] <- gsub(" ", "", Site_Name_List[i], fixed=TRUE) nha_foldername_list[[i]] <- gsub("#", "", nha_foldername_list[i], fixed=TRUE) nha_foldername_list[[i]] <- gsub("'", "", nha_foldername_list[i], fixed=TRUE) } nha_foldername_list <- unlist(nha_foldername_list) #list of folder names nha_filename_list <- list() for (i in 1:length(nha_foldername_list)) { nha_filename_list[i] <- paste(nha_foldername_list[i],"_",gsub("[^0-9]", "", Sys.Date() ),".docx",sep="") } nha_filename_list <- unlist(nha_filename_list) #list of file names #generate URLs for each EO at site URL_EOs <- list() for (i in 1:length(ptreps_selected)){ URL_EOs[[i]] <- lapply(seq_along(ptreps_selected[[i]]$EO_ID), function(x) paste("https://bioticspa.natureserve.org/biotics/services/page/Eo/",ptreps_selected[[i]]$EO_ID[x],".html", sep="")) URL_EOs[[i]] <- sapply(seq_along(URL_EOs[[i]]), function(x) paste("(",URL_EOs[[i]][x],")", sep="")) } Sname_link <- list() for (i in 1:length(ptreps_selected)){ Sname_link[[i]] <- sapply(seq_along(ptreps_selected[[i]]$SNAME), function(x) paste("[",ptreps_selected[[i]]$SNAME[x],"]", sep="")) } Links <- mapply(paste, Sname_link, URL_EOs, sep="") #for R markdown, list of text plus hyperlinks to create links to biotics page for each EO at each site # set up the directory folders where site account pieces go NHAdest1 <- sapply(seq_along(nha_foldername_list), function(x) paste(NHAdest,"DraftSiteAccounts",nha_foldername_list[x],sep="/")) sapply(seq_along(NHAdest1), function(x) dir.create(NHAdest1[x], showWarnings=FALSE)) # make a folder for each site, if those folders do not exist already sapply(seq_along(NHAdest1), function(x) dir.create(paste(NHAdest1[x],"photos", sep="/"), showWarnings = F)) # make a folder for each site, for photos ####################################################################### #Pull out species-specific threats/recs from the database for each site TRdb <- dbConnect(SQLite(), dbname=TRdatabasename) #connect to SQLite DB ElementTR <- list() # ThreatRecTable <- list() ET <- list() for (i in 1:length(SD_speciesTable)){ ElementTR[[i]] <- dbGetQuery(TRdb, paste0("SELECT * FROM ElementThreatRecs"," WHERE ELSubID IN (", paste(toString(sQuote(SD_speciesTable[[i]]$ELSubID)), collapse = ", "), ");")) ThreatRecTable[[i]] <- dbGetQuery(TRdb, paste0("SELECT * FROM ThreatRecTable"," WHERE TRID IN (", paste(toString(sQuote(ElementTR[[i]]$TRID)), collapse = ", "), ");")) ET[[i]] <- dbGetQuery(TRdb, paste0("SELECT SNAME, ELSubID FROM ET"," WHERE ELSubID IN (", paste(toString(sQuote(ElementTR[[i]]$ELSubID)), collapse = ", "), ");")) } #join general threats/recs table with the element table ELCODE_TR <- list() #create list of threat rec info to print for each site, to call in R Markdown for (i in 1:length(ElementTR)){ ELCODE_TR[[i]] <- ElementTR[[i]] %>% inner_join(ET[[i]]) %>% inner_join(ThreatRecTable[[i]]) } ###################################################### # make the maps #convert geometry to simple features for the map slnha <- list() nha_sf_list <- list() nha_sf_list <- arc.data2sf(selected_nhas) a <- st_area(nha_sf_list) #calculate area a <- a*0.000247105 #convert m2 to acres selected_nhas$Acres <- as.numeric(a) mtype <- 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}?' basetiles <- sapply(seq_along(nha_sf_list$geom), function(x) tmaptools::read_osm(nha_sf_list$geom[x], type=mtype, ext=1.5, use.colortable=FALSE)) # plot the maps nha_map <- list() for (i in 1:length(nha_sf_list$geom)) { tmap_mode("plot") nha_map[[i]] <- tm_shape(basetiles[[i]], unit="m") + tm_rgb() + tm_shape(nha_sf_list[i,]) + tm_borders("red", lwd=1.5)+ tm_legend(show=FALSE) + tm_layout(attr.color="white") + tm_compass(type="arrow", position=c("left","bottom")) + tm_scale_bar(position=c("center","bottom")) tmap_save(nha_map[[i]], filename=paste(NHAdest1[i], "/", nha_foldername_list[[i]],"_tempmap.png",sep=""), units="in", width=7) } ################################################################### #Write the output R markdown document for each site, all at once for (i in 1:length(nha_filename_list)) { NHAdest2 <- NHAdest1[i] selectedNhas <- selected_nhas[i,] speciesTable <- SD_speciesTable[[i]] ptrepsSelected <- ptreps_selected[[i]] ELCODETR <- ELCODE_TR[[i]] nhaFoldername <- nha_foldername_list[[i]] LinksSelect <- Links[[i]] SiteRank1 <- SiteRank[[i]] rmarkdown::render(input=here::here("scripts","template_NHAREport_part1v2.Rmd"), output_format="word_document", output_file=nha_filename_list[[i]], output_dir=NHAdest1[i]) } # delete the map, after its included in the markdown for (i in 1:length(nha_filename_list)){ fn <- paste(NHAdest1[i], "/", nha_foldername_list[i],"_tempmap.png",sep="") if (file.exists(fn)) #Delete file if it exists file.remove(fn) } #################################################### #output data about NHAs with completed templates to database and summary sheets # insert the NHA data into a sqlite database nha_data <- NULL nha_data <- selected_nhas[,c("SITE_NAME","SITE_TYPE","NHA_JOIN_ID","site_rank","site_score","BRIEF_DESC","COUNTY","Muni","USGS_QUAD","ASSOC_NHA","PROTECTED_LANDS")] nha_data$nha_filename <- unlist(nha_filename_list) nha_data$nha_folderpath <- NHAdest1 nha_data$nha_foldername <- unlist(nha_foldername_list) db_nha <- dbConnect(SQLite(), dbname=nha_databasename) # connect to the database dbAppendTable(db_nha, "nha_main", nha_data) dbDisconnect(db_nha) #Create record of NHA creation for organizing writing and editing tasks #create some summary stats describing EOs at each site by taxa group, to help with determining who should write site accounts #build functions plantperc <- function(x) { p <- nrow(species_table_select[[x]][species_table_select[[x]]$ELEMENT_TYPE == 'P',]) pt <- nrow(species_table_select[[x]]) p/pt } musselperc <- function(x){ u <- nrow(species_table_select[[x]][species_table_select[[x]]$ELEMENT_TYPE == 'U',]) ut <- nrow(species_table_select[[x]]) u/ut } insectperc <- function(x){ i <- nrow(species_table_select[[x]][species_table_select[[x]]$ELEMENT_TYPE %in% c('IM','IB','IA','ID','IT'),]) it <- nrow(species_table_select[[x]]) i/it } herpperc <- function(x){ h <- nrow(species_table_select[[x]][species_table_select[[x]]$ELEMENT_TYPE %in% c('R','A'),]) ht <- nrow(species_table_select[[x]]) h/ht } #calculate for each spp table, using functions PlantEO_percent <- unlist(lapply(seq_along(species_table_select), function(x) plantperc(x))) MusselEO_percent <- unlist(lapply(seq_along(species_table_select), function(x) musselperc(x))) InsectEO_percent <- unlist(lapply(seq_along(species_table_select), function(x) insectperc(x))) HerpEO_percent <- unlist(lapply(seq_along(species_table_select), function(x) herpperc(x))) nEOs <- unlist(lapply(seq_along(species_table_select), function(x) nrow(species_table_select[[x]]))) #number of total EOs at site EO_sumtable <- as.data.frame(cbind(nEOs, PlantEO_percent,MusselEO_percent,InsectEO_percent,HerpEO_percent)) #bind summary stats into one table together db_nha <- dbConnect(SQLite(), dbname=nha_databasename) nha_data$Template_Created <- as.character(Sys.Date()) nha_sum <- nha_data[,c("NHA_JOIN_ID","SITE_NAME","COUNTY","nha_folderpath", "site_score")] nha_sum <- cbind(nha_sum, EO_sumtable) dbAppendTable(db_nha, "nha_sitesummary", nha_sum) dbDisconnect(db_nha) #disconnect ## For now, you should hand copy and paste the new rows into the NHA site summary Excel worksheet. I created an exports folder within the database folder where .csv versions can periodically be sent, as batches of NHA templates are created. ########################
f3e380886d5aee530c110ae538171c51d89a4a52
17902a8ed8ac24eaa620ff8da7441c0554d5e06d
/Examples/GSE52870_Analysis.R
aef558c1bf146a7783cb87e9e9638ad61581ce42
[]
no_license
soulj/PhenomeExpress
deb4a9d2a2df9b631a1b6595e6ef22b7eeb502d8
cdc15f801361a791b55c9e7455444c7192ee0593
refs/heads/master
2021-01-18T22:41:11.506795
2015-04-30T11:03:43
2015-04-30T11:03:43
24,099,237
6
2
null
null
null
null
UTF-8
R
false
false
7,405
r
GSE52870_Analysis.R
#Analysis of GSE52870 PAX5 dataset with PhenomeExpress #takes around 15 mins to run require("Matrix") require("igraph") require("data.table") require("DESeq2") # for the RNA-seq analysis require("BioNet") # for comparison purposes - not needed by PhenomeExpress require("VennDiagram") # for making the Venn diagram figures require("RCytoscape") # also requires cytoscape v2.8 to be open with the Cytoscape RPC plugin active setwd("~/PhenomeExpress") #source the methods source("./src/HeterogeneousNetwork.R") source("./src/RHWN.R") source("./src/runGIGA.R") source("./src/runPhenoExpress.R") #calculate the FPKM using the effective gene length and the counts per gene GSE52870_Pax5Restoration.GenewiseCounts <- read.delim("./GSE52870/GSE52870_Pax5Restoration-GenewiseCounts.txt") countmatrix=GSE52870_Pax5Restoration.GenewiseCounts[,3:8] rownames(countmatrix)=GSE52870_Pax5Restoration.GenewiseCounts$EntrezID genelength=GSE52870_Pax5Restoration.GenewiseCounts$GeneLength FPKMtable=(countmatrix * 10^9) /(colSums(countmatrix) * genelength) FPKMtable=ifelse(FPKMtable>1,1,0) countmatrix=countmatrix[Matrix::rowSums(FPKMtable)>2,] #use DESeq2 to analyse the raw data colData=data.frame(colnames=colnames(countmatrix),condition=c(rep("PAX5KD",3),rep("PAX5Rescue",3))) dds=DESeqDataSetFromMatrix(countData=countmatrix,colData=colData,design=~condition) dds$condition=factor(dds$condition, levels =c ( "PAX5KD","PAX5Rescue" )) dds2=DESeq(dds) #get the expression table with the fold changes and p values res=results(dds2) dt=as.data.frame(res[order (res$log2FoldChange),]) dt$EntrezID=rownames(dt) #Anotate the genes with SwissProt names to match the network node names Young_EnteztoSwiss_via_Uniprot <- read.delim("./GSE52870/GenenamesEntreztoUniprot_via_UniProt.txt") Young_EnteztoSwiss_via_David <- read.delim("./GSE52870/GenenamesEntreztoUniprot_via_David.txt", dec=",") Young_EnteztoSwiss_via_David=Young_EnteztoSwiss_via_David[,1:2] Young_EnteztoSwiss=rbind(Young_EnteztoSwiss_via_David,Young_EnteztoSwiss_via_Uniprot) Young_EnteztoSwiss=Young_EnteztoSwiss[!duplicated(Young_EnteztoSwiss),] #note 1 entrez gene maps to more than one protein dt=merge(dt,Young_EnteztoSwiss,by.x="EntrezID",by.y="From") dt=na.omit(dt) colnames(dt)[8]="name" #load the high confidence mouse PPI network from STRING load("./Networks/HCString_Mouse_Graph.RData") presentList=na.omit(match(dt$name,V(HCString_Mouse)$name)) #Use pre-existing networks filter based on genes found in the transcriptomics experiment pax5.network=induced.subgraph(HCString_Mouse,presentList) pax5.network=decompose.graph(pax5.network)[[1]] presentList=na.omit(match(V(pax5.network)$name,dt$name)) #filter the expression data based on proteins present in the network dt=dt[presentList,] dt=na.omit(dt) #calculate the Pi value for use in the node scoring stage dt$Pi=abs(dt$log2FoldChange)*-log10(dt$padj) dt$absFC=abs(dt$log2FoldChange) #select the phenotypes from the UberPheno ontology - the Phenomiser tool and manual searching of the ontolgy by relevent keywords is helpful for this Phenotypes=c("HP:0004812","MP:0012431","HP:0012191","MP:0008211","MP:0008189") #run Phenome Express LeukResults=runPhenomeExpress(pax5.network,dt,Phenotypes,"Mouse") #retrieve the significant sub-networks subnetworks=LeukResults[[1]] #retrieve the table of p-values sigTable=LeukResults[[2]] #collapse all the nodes in the subnetworks from PhenomeExpress nodes=c() for(i in 1:length(subnetworks)) { tempGraph=subnetworks[[i]] nodes=c(nodes,V(tempGraph)$name) } #load the results from JActiveModules and GIGA - run externally, subnetworks >= 5 nodes kept leukJAM <-read.table("./JActiveModules/leukJM2107", quote="\"") leukJAM=leukJAM[!duplicated(leukJAM$V1),] GIGA <- read.delim("./GIGA/leukGIGA.txt", header=F) #run BioNet for comparison pval=dt$pvalue names(pval)=dt$name b <- fitBumModel(pval, plot = FALSE) scores <- scoreNodes(network = pax5.network, fb = b,fdr = 1e-25) #FDR produces similar sized module to max sized PhenomeExpress sub-network module <- runFastHeinz(pax5.network, scores) #count the number of seed Phenotype annotated proteins present in all the sub-networks for each tool #First get the gene to phenotype associations for labelling seed nodes z=getHeterogeneousNetwork(pax5.network,"Mouse")[["genePheno"]] # note contains all proteins - including ones not present in network phenoAnnotated=z[rownames(z) %in% Phenotypes,] phenoAnnotated=phenoAnnotated[,colSums(phenoAnnotated)>0] phenoAnnotated=colnames(phenoAnnotated) #calculate the number of seed phenotype annotated genes for each tool no.Seeds.PhenomeExpress=table(ifelse(nodes %in% phenoAnnotated,1,0)) no.Seeds.leukJAM=table(ifelse(leukJAM %in% phenoAnnotated,1,0)) no.Seeds.GIGA=table(ifelse(GIGA$V2 %in% phenoAnnotated,1,0)) no.Seeds.BioNet=table(ifelse(V(module)$name %in% phenoAnnotated,1,0)) #make a Venn diagram of protein in subnetworks from each tool nodeList=list(PhenomeExpress=nodes,JActivemodules=leukJAM,GIGA=GIGA$V2,BioNet=V(module)$name) venn.diag=venn.diagram(nodeList,fill = c("red", "green","blue","purple"),alpha = c(0.5, 0.5,0.5,0.5), cex = 2,cat.fontface = 4,lty =2, fontfamily =3, filename=NULL ) grid.draw(venn.diag) #send all the sub-networks from PhenomeExpress to cytoscape #colours the nodes according to the fold change #black border if directly annotated to seed phenotype #useful to assign the node with the entrez ID as well - for downstream analysis in cytoscape i.e mapping to genenames or functional annotation V(pax5.network)$EntrezID=as.character(dt$EntrezID) for(i in 1:length(subnetworks)) { presentList=na.omit(match(V(subnetworks[[i]])$name,V(pax5.network)$name)) tempGraph=induced.subgraph(pax5.network,presentList) FC=dt[na.omit(match(V(tempGraph)$name,dt$name)),] V(tempGraph)$logFC=FC$log2FoldChange seedAnnotatedGenes=ifelse(V(tempGraph)$name %in% phenoAnnotated,1,0) V(tempGraph)$Seed=seedAnnotatedGenes #do the network creation stuff #convert the igraph object to a graphNEL object and intialise the attributes tempGraph.NEL=igraph.to.graphNEL(tempGraph) tempGraph.NEL=initEdgeAttribute(tempGraph.NEL,"Confidence","numeric",0) tempGraph.NEL=initEdgeAttribute(tempGraph.NEL,"weight","numeric",0) tempGraph.NEL=initNodeAttribute(tempGraph.NEL,"logFC","numeric",0) tempGraph.NEL=initNodeAttribute(tempGraph.NEL,"Seed","numeric",0) tempGraph.NEL=initNodeAttribute(tempGraph.NEL,"EntrezID","char",0) nodeDataDefaults(tempGraph.NEL, "label") <- "name" nodeData(tempGraph.NEL,V(tempGraph)$name,"label") = V(tempGraph)$name tempGraph.NEL=initNodeAttribute(tempGraph.NEL,"label","char","name") #Open the cytoscape window and send the graph cw1 <- new.CytoscapeWindow (paste("PhenoExpress",as.character(i),sep=""), graph=tempGraph.NEL) #display the graph displayGraph (cw1) #select the layout layoutNetwork (cw1, layout.name='force-directed') #colour according to the logFC control.points <- c(-5,0,5) node.colors <- c ("#00AA00", "#00FF00", "#FFFFFF", "#FF0000", "#AA0000") setNodeColorRule (cw1, node.attribute.name='logFC', control.points, node.colors, mode='interpolate') setDefaultBackgroundColor (cw1, '#FFFFFF') #set the nodeborder to correspond to the seed phenotype annotated genes data.values <- c ("1", "0") line.widths = c ("15","1") setNodeBorderWidthRule (cw1, 'Seed', data.values, line.widths) }
7c110f21cfb8f21761d5e1f3a0779f20a3177c77
419f499346f60b2f341a0a57d5e6842a5ab9b565
/code/09b_ranks_map.R
004bb7dbb61fe6b7aeae46b24f2df49758ded26f
[]
no_license
rhrzic/TreatableMortality
b0d04b6ea44ccd8aa1f4ce238e8947af126ea997
c3cb9f950303687adb7192105fe701c91103878f
refs/heads/master
2022-12-30T06:23:47.796564
2020-10-23T10:37:04
2020-10-23T10:37:04
275,123,796
0
0
null
null
null
null
UTF-8
R
false
false
1,121
r
09b_ranks_map.R
require(eurostat) require(tidyverse) require(sf) male_ranks_overall$Sex = "Male" female_ranks_overall$Sex = "Female" gisco0 <- get_eurostat_geospatial(output_class = "sf", resolution = "03", nuts_level = "0", year = "2016", cache = TRUE) per_country <- rbind(select(male_ranks_overall, Country, Sex, MAD), select(female_ranks_overall, Country, Sex, MAD)) %>% filter(str_length(Country) == 2) map <- left_join(per_country, gisco0, by = c("Country" = "id")) map1 <- ggplot(map) + geom_sf(aes(fill = MAD, geometry = geometry), colour = "transparent") + scale_fill_gradient2(low = "white", mid = "gainsboro", high = "black")+ coord_sf(xlim = c(-10, +30), ylim = c(35, 70)) + theme(panel.grid.major = element_line(colour = 'transparent'), panel.background = element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank()) + labs(fill = "Mean absolute\ndifference\nin rank")+ facet_grid(. ~ Sex) ggsave('plots/m1.png', map1, scale = 2, width = 5, height = 2, units = "in")
1166c138cc6e2432d1ef342f1e5fd1a49be318b2
5f29b8d3189a6526bb6ee0b4dc8e05ab3d2d3069
/001_regularGrid.R
9bcfdfa9fb08e069799ca4af5aa43f0f1e91e48b
[]
no_license
curdon/linguisticDensity_ProcB_derungsEtAl
cb6f6b8acec7b86181146bd00167f539beb79542
318f4ca4e64db4782677e7f189692c3bb35d6fa0
refs/heads/master
2020-03-20T08:56:14.958289
2018-06-14T08:43:18
2018-06-14T08:43:18
137,323,409
0
1
null
null
null
null
UTF-8
R
false
false
1,425
r
001_regularGrid.R
################################################## ## Name: 001_regularGrid ## Script purpose: Creating an even distribution of grid points for different spatial resolutions ## Date: 2018 ## Author: Curdin Derungs ################################################## library(geosphere) library(rgdal) library(plyr) rm(list=ls()) ##three spatial resolutions are defined as numbers of regions used in the following function #the script has the be run for each resolution seperately res<-30 #appr. 1000 grid points # res<-15 #300 # res<-50 #3000 ##rading continental shapes #even grid points are only distributed over continental land masses cont <- readOGR("input", "continents_simple") proj4string(cont)<-CRS("+proj=longlat +datum=WGS84") ##creating regular points using the regularCoordinates() function from the geosphere package #the function solves the Thompson Problem reg.coords<-regularCoordinates(res) ##converting coordinates to spatial points reg.spdf<-SpatialPointsDataFrame(SpatialPoints(reg.coords),data.frame(id=1:nrow(reg.coords))) #defining the spatial projection proj4string(reg.spdf)<-CRS("+proj=longlat +datum=WGS84") ##intersecting evenly distributed points with the continental polygons ov<-over(reg.spdf,cont) #filtering for points on continents reg.spdf<-reg.spdf[!is.na(ov$OBJECTID),] #save the grids save(reg.spdf,file=paste("output/001_regularGrid/randPts_",nrow(reg.spdf),".Rdata",sep=""))
ca5dcaab800eb32ab58810e151a465e6acaf7efd
9217ebebf4325621f8726001596dbf57e922548f
/inst/scripts/make-metadata.R
59bf067a2a805d214b64395fde535e7b5ac1be38
[]
no_license
DKMS-LSL/ipdDb
a851b4ee44a8fab168e59804f12c26ff11d80a0f
57a6201b7fb4337bb3bf1ea5c31f79fa8fb90bbf
refs/heads/master
2020-03-21T17:46:38.184184
2018-10-18T13:20:45
2018-10-18T13:20:45
138,853,086
0
0
null
null
null
null
UTF-8
R
false
false
3,238
r
make-metadata.R
hlaMetadata <- data.frame( Title = "Allele data from the IPD IMGT/HLA database version 3.32.0", Description = paste0("Data for all alleles of selected HLA loci (HLA-A, -B, -C, -DPB1, -DQB1 and -DRB1). The allele annotation, sequence, gene structure and the (sequence-based) closest allele in full-length is stored. Reference: Robinson J, Maccari G, Marsh SGE, Walter L, Blokhuis J, Bimber B, Parham P, De Groot NG, Bontrop RE, Guethlein LA, and Hammond JA KIR Nomenclature in non-human species Immunogenetics (2018), in preparation"), BiocVersion = "3.8", Genome = "no genome data", SourceType = "Zip", SourceUrl = "https://github.com/ANHIG/IMGTHLA/blob/Latest/xml/hla.xml.zip", SourceVersion = "3.32.0", Species = "Homo sapiens", TaxonomyId = 9606, Coordinate_1_based = TRUE, DataProvider = "EMBL-EBI", Maintainer = "Steffen Klasberg <klasberg@dkms-lab.de>", RDataClass = "data.frame, DNAStringSet, GRanges", DispatchClass = "SQLiteFile", RDataPath = "ipdDb/ipdHLA_3.32.0.sqlite", Tags = "ipd:hla:IMGT/HLA:alleles" ) hlaMetadata <- rbind(hlaMetadata, data.frame( Title = "Allele data from the IPD IMGT/HLA database version 3.33.0", Description = paste0("Data for all alleles of selected HLA loci (HLA-A, -B, -C, -DPB1, -DQB1 and -DRB1). The allele annotation, sequence, gene structure and the (sequence-based) closest allele in full-length is stored. Reference: Robinson J, Maccari G, Marsh SGE, Walter L, Blokhuis J, Bimber B, Parham P, De Groot NG, Bontrop RE, Guethlein LA, and Hammond JA KIR Nomenclature in non-human species Immunogenetics (2018), in preparation"), BiocVersion = "3.8", Genome = "no genome data", SourceType = "Zip", SourceUrl = "https://github.com/ANHIG/IMGTHLA/blob/Latest/xml/hla.xml.zip", SourceVersion = "3.33.0", Species = "Homo sapiens", TaxonomyId = 9606, Coordinate_1_based = TRUE, DataProvider = "EMBL-EBI", Maintainer = "Steffen Klasberg <klasberg@dkms-lab.de>", RDataClass = "data.frame, DNAStringSet, GRanges", DispatchClass = "SQLiteFile", RDataPath = "ipdDb/ipdHLA_3.33.0.sqlite", Tags = "ipd:hla:IMGT/HLA:alleles" )) kirMetadata <- data.frame( Title = "Allele data from the IPD KIR database version 2.7.1", Description = paste0("Data for the alleles of all KIR loci in the database. The allele annotation, sequence, gene structure and the (sequence-based) closest allele in full-length is stored. Reference: Robinson J, Maccari G, Marsh SGE, Walter L, Blokhuis J, Bimber B, Parham P, De Groot NG, Bontrop RE, Guethlein LA, and Hammond JA KIR Nomenclature in non-human species Immunogenetics (2018), in preparation"), BiocVersion = "3.8", Genome = "no genome data", SourceType = "Zip", SourceUrl = "https://github.com/ANHIG/IPDKIR/blob/Latest/KIR.dat", SourceVersion = "2.7.1", Species = "Homo sapiens", TaxonomyId = 9606, Coordinate_1_based = TRUE, DataProvider = "EMBL-EBI", Maintainer = "Steffen Klasberg <klasberg@dkms-lab.de>", RDataClass = "data.frame, DNAStringSet, GRanges", DispatchClass = "SQLiteFile", RDataPath = "ipdDb/ipdKIR_2.7.1.sqlite", Tags = "ipd:kir:alleles" ) write.csv(rbind(hlaMetadata, kirMetadata), "inst/extdata/ipd_metadata.csv")
af3a31bfa1f624311a70768ed0f88f0955fa38b9
6e3fa6b477380f245abb385f9c30b1f601f0aa82
/inst/tinytest/test_gdns.R
dee5f4e47e38b04605c86934b5c2a60942b418a9
[]
no_license
cran/gdns
4435597cb607ae897bb597ce02b2bb8d5c677a75
6a2388a6c687f3dc1df3cfc55e8331d7728037bb
refs/heads/master
2020-05-21T08:53:14.863944
2020-05-15T13:00:03
2020-05-15T13:00:03
69,889,209
0
0
null
null
null
null
UTF-8
R
false
false
486
r
test_gdns.R
library(gdns) if (at_home()) { expect_true(length(gdns::query("example.com")) > 0) doms <- c("example.com", "example.org", "example.net") qry <- gdns::bulk_query(doms) expect_true(nrow(qry) > 0) } expect_true(is_soft_fail("v=spf1 include:_spf.apple.com include:_spf-txn.apple.com ~all")) expect_false(is_hard_fail("v=spf1 include:_spf.apple.com include:_spf-txn.apple.com ~all")) expect_false(passes_all("v=spf1 include:_spf.apple.com include:_spf-txn.apple.com ~all"))
e7aeb016269c92d32599713feae0c7dde4b37ab4
4d265c3f4046c3edd1bac44a9894d466526d9d1d
/chap01/10_string.R
a339f4765d5ac23bba7ed99224f32eb4d72a71f5
[]
no_license
kjy3309/R_study
3caa3c0753c3f32b8aefe69afbb9ae97ed7288ce
6ff03f9b4940116a551aaa5e186559400c7746d0
refs/heads/master
2023-01-22T06:07:11.477448
2020-11-12T04:49:27
2020-11-12T04:49:27
312,169,482
0
0
null
null
null
null
UTF-8
R
false
false
1,844
r
10_string.R
library(plotly) library(dplyr) # ์‹ค์—…๋ฅ  ๋ฐ์ดํ„ฐ ๋ผ์ธ์œผ๋กœ ๊ทธ๋ฆฌ๊ธฐ View(economics) plot_ly(economics,x=~date, y=~unemploy) %>% add_lines() %>% layout(title='์‹ค์—…๋ฅ  ์ถ”์ด', xaxis=list(title='์—ฐ๋„'), yaxis=list(title='์‹ค์—…์ž ์ˆ˜')) ## ์ง€ํ•˜์ฒ  ์ด ์Šน๊ฐ ์ˆ˜ ์ผ์ž๋ณ„ ๊ทธ๋ž˜ํ”„ subway <- read.csv('./data/ch02/202006_SUBWAY.csv', stringsAsFactors = F) View(subway) class(subway) ex <- subway %>% group_by(์‚ฌ์šฉ์ผ์ž) %>% summarise(์ด์Šน๊ฐ์ˆ˜ = sum(์Šน์ฐจ์ด์Šน๊ฐ์ˆ˜+ํ•˜์ฐจ์ด์Šน๊ฐ์ˆ˜)) View(ex) ex$์‚ฌ์šฉ์ผ์ž <- ifelse(ex$์‚ฌ์šฉ์ผ์ž>20200600, ex$์‚ฌ์šฉ์ผ์ž-20200600,ex$์‚ฌ์šฉ์ผ์ž) # ์ƒ์„ธ ์„ค์ •์€ https://plotly.com/r plot_ly(ex, x=~์‚ฌ์šฉ์ผ์ž, y=~์ด์Šน๊ฐ์ˆ˜) %>% add_lines() %>% layout(title='6์›” ์Šน๊ฐ ์ถ”์ด', xaxis=list(title='๋‚ ์งœ', autotick=FALSE), yaxis=list(title='์ด์šฉ๊ฐ ์ˆ˜')) ## ๊ฐ ํ˜ธ์„ ๋ณ„ ๋‚ ์งœ๋ณ„ ์ด์šฉ ์Šน๊ฐ list <- subway %>% group_by(์‚ฌ์šฉ์ผ์ž, ๋…ธ์„ ๋ช…) %>% summarise(total=sum(์Šน์ฐจ์ด์Šน๊ฐ์ˆ˜+ํ•˜์ฐจ์ด์Šน๊ฐ์ˆ˜)) list$์‚ฌ์šฉ์ผ์ž <- ifelse(list$์‚ฌ์šฉ์ผ์ž>20200600, list$์‚ฌ์šฉ์ผ์ž-20200600,list$์‚ฌ์šฉ์ผ์ž) write.csv(file='C:/R/chap01/sample.csv',list) list <- read.csv('C:/R/chap01/sample.csv') plot_ly(list, x=~์‚ฌ์šฉ์ผ์ž, y=~total) %>% add_lines(linetype=~๋…ธ์„ ๋ช…) View(list) # 1ํ˜ธ์„  ~ 9ํ˜ธ์„  ๋งŒ ๋‚˜ํƒ€๋‚ด๊ธฐ library(stringr) # ๋ฌธ์ž์—ด์„ ๋‹ค๋ฃจ๋Š” ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ g <- list %>% filter(str_detect(๋…ธ์„ ๋ช…,'^1ํ˜ธ์„ ')|str_detect(๋…ธ์„ ๋ช…,'^2ํ˜ธ์„ ')| str_detect(๋…ธ์„ ๋ช…,'^3ํ˜ธ์„ ')|str_detect(๋…ธ์„ ๋ช…,'^4ํ˜ธ์„ ')| str_detect(๋…ธ์„ ๋ช…,'^5ํ˜ธ์„ ')|str_detect(๋…ธ์„ ๋ช…,'^6ํ˜ธ์„ ')| str_detect(๋…ธ์„ ๋ช…,'^6ํ˜ธ์„ ')|str_detect(๋…ธ์„ ๋ช…,'^8ํ˜ธ์„ ')| str_detect(๋…ธ์„ ๋ช…,'^9ํ˜ธ์„ ')) View(g) plot_ly(g, x=~์‚ฌ์šฉ์ผ์ž, y=~total) %>% add_lines(linetype=~๋…ธ์„ ๋ช…)
0d383538551fe4da4be299692957d8d7b9c46372
8a6fb400998956cec1dd817a74b90c34c62c6f29
/1st.Version/GP_Simulate.R
34e4171ce77a20fb7fb540dd4db5a2bb179fc8a0
[]
no_license
XGerade/GaussianProcessDynamicSystem
9faed684f141e55aec6b0a417ea26971b697ebad
cc4444eec8c74772f86be9afbd0bafcf36277cd0
refs/heads/master
2021-01-23T11:48:10.335574
2014-04-23T16:28:58
2014-04-23T16:28:58
null
0
0
null
null
null
null
UTF-8
R
false
false
1,114
r
GP_Simulate.R
load("Simulate_Results.RData") source("GPRegression.R") tao = 0.02 k1 = -1 k2 = -1 k3 = 1 k4 = -0.1 theta_initial <- -0.15 theta_1_initial <- -1 x_initial <- 1 x_1_initial <- 3 timeLength <- 300 state <- matrix(0, timeLength, 5) sigma.squared1 = 0.01 sigma.squared2 = 0.01 Fm = 2.5 #K.xx <- calculateCovariance(x.data, x.data) for (i in 1: timeLength) { print(i) state[i, 1] <- theta_initial state[i, 2] <- theta_1_initial state[i, 3] <- x_initial state[i, 4] <- x_1_initial F <- Fm * sign(k1 * x_initial+ k2 * x_1_initial+ k3 * theta_initial+ k4 * theta_1_initial) state[i, 5] <- F results1 <- GP.reg(x.data, theta_2_out, matrix(state[i,], 1, 5), sigma.squared1, K.xx) theta_2 <- results1$post.mean[1] results2 <- GP.reg(x.data, x_2_out, matrix(state[i,], 1, 5), sigma.squared2, K.xx) x_2 <- results2$post.mean[1] x_initial <- x_initial + tao * x_1_initial x_1_initial <- x_1_initial + tao * x_2 theta_initial <- theta_initial + tao * theta_1_initial theta_1_initial <- theta_1_initial + tao * theta_2 } save(state, file = "GP_Results.RData")