blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a942ef4f6e8a94b457ec2587ffb426b92d4efe4f
|
1bba1180c422e9789de3c142b8ac13149d5ffd3b
|
/shendure/process_all.R
|
59812a1443fb69ba5db4572555fb1636c8e52c7c
|
[] |
no_license
|
haleykgrant/Shendure_Data
|
f80c0bc54b408582deeecfdf34e9ba7660caa43e
|
72b98be40205ec53ce35fcdc955ce99017e1cc8e
|
refs/heads/master
| 2022-04-23T03:27:41.888631
| 2020-04-23T03:51:49
| 2020-04-23T03:51:49
| 256,798,860
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 698
|
r
|
process_all.R
|
library(pacman)
p_load(data.table, tidyverse, plyr)
memory.limit(size=50000)
# path to cleaned output files from bowtie alignment
d="/data/nfs1/home/hgrant3/cancer_seek/shendure/sam_files/positions/"
samples=list.files(d)[which(endsWith(list.files(d),"all_cleaned.txt"))]
# run in parallel- one per sample
i = as.integer(Sys.getenv("SGE_TASK_ID"))
file=samples[i]
print(file)
# create one file per chromosome
for(j in 1:22){
dat = read.delim(paste(d,samples[i],sep = ""),
col.names = c("chrom","pos","length"))%>%
filter(chrom==paste("chr",j,sep="")& as.numeric(length)>0)
name = substr(samples[i],1,10)
fwrite(dat, file=paste(name,"_",j, "_all.csv",sep = ""))
}
~
|
176fb10f88c9cf6942558480300de723cc8c5eaf
|
0bb873ad8d55d946a90a49a5b8ae0d9a3bb8990f
|
/EdgeR_analysis/0_de_edgeR.R
|
11548f9f53434abfde89d0f53704414ee12501fa
|
[] |
no_license
|
ComplexityBiosystems/Regeneration
|
5056e3c588f49eeab80d2819530b0fdedd424565
|
49f246e93e5912fcb9d3a0e62f80bb198b9d7168
|
refs/heads/master
| 2021-08-23T14:59:22.775955
| 2017-12-05T09:37:50
| 2017-12-05T09:37:50
| 112,357,293
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,081
|
r
|
0_de_edgeR.R
|
##################
### EdgeR differential expression anlysis
### use first
### source("https://bioconductor.org/biocLite.R")
### biocLite("edgeR")
###
##################
### This program is an example of edgeR when a single data for each time point is available
### control allows to use te different time points o estimate dispersion
### See edgeR user guide for other estimate methods
######
library(edgeR)
rawData = read.table('Expression_Genes.txt', row.names = 1, skip=1)
time = factor(c('t0', 't1', 't2', 't3','t4'))
design = model.matrix(~0+time)
colnames(design) = levels(time)
print(time)
DGEData = DGEList(counts=rawData,group = time)
#
DGEData_control = DGEList(counts=rawData,group=rep(1,each=5))
DGEData = calcNormFactors(DGEData)
#
DGEData_control = calcNormFactors(DGEData_control)
normData3 = cpm(DGEData ,normalized.lib.sizes=FALSE)
normData5 = cpm(DGEData_control,normalized.lib.sizes=FALSE)
#
DGEData_control = estimateGLMCommonDisp(DGEData_control, method="deviance",robust=TRUE, subset=NULL)
DGEData$common.dispersion <- DGEData_control$common.dispersion
fit = glmFit(DGEData, design)
#fit = glmFit(DGEData, design, dispersion=0.1) # dispersion can be assume a priori
comps = makeContrasts(Time0_1 = t1-t0,
Time0_2 = t2-t0,
Time0_3 = t3-t0,
Time0_4 = t4-t0,
levels=design)
Time0_1_fit = glmLRT(fit, contrast=comps[,'Time0_1'])
write.table(topTags(Time0_1,n=30000,adjust.method = 'BH', sort.by='p.value'),'Time0_1.de',sep='\t',quote=FALSE)
Time0_2_fit = glmLRT(fit, contrast=comps[,'Time0_2'])
write.table(topTags(Time0_2,n=30000,adjust.method = 'BH', sort.by='p.value'),'Time0_2.de',sep='\t',quote=FALSE)
Time0_3_fit = glmLRT(fit, contrast=comps[,'Time0_3'])
write.table(topTags(Time0_3,n=30000,adjust.method = 'BH', sort.by='p.value'),'Time0_3.de',sep='\t',quote=FALSE)
Time0_4_fit = glmLRT(fit, contrast=comps[,'Time0_4'])
write.table(topTags(Time0_4,n=30000,adjust.method = 'BH', sort.by='p.value'),'Time0_4.de',sep='\t',quote=FALSE)
|
5448baf63b21c08d2d6e318de3511be5935e963e
|
4fe3500210c86139245cb897e32f5dceeceb3ed9
|
/kovaleski_john/app.R
|
fc7d1325cb93f04f6cbedb0ce0ba232d9f188f99
|
[] |
no_license
|
jakekova/datascience_final_assignment
|
3f7b1bc87a9643d2e28ec72f547451d5a7320715
|
ad8dca051ac4c6d55c3a9d8712805a1c8a24785b
|
refs/heads/master
| 2022-07-10T13:34:37.827438
| 2020-05-12T20:13:57
| 2020-05-12T20:13:57
| 259,980,606
| 0
| 0
| null | 2020-04-29T16:29:49
| 2020-04-29T16:29:49
| null |
UTF-8
|
R
| false
| false
| 8,834
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
# Load all libraries ------------------------------------------------------------------------
library(shiny)
library(shinythemes)
library(tidyverse)
library(colourpicker)
source("covid_data_load.R") ## This line runs the Rscript "covid_data_load.R", which is expected to be in the same directory as this shiny app file!
# The variables defined in `covid_data_load.R` are how fully accessible in this shiny app script!!
# UI --------------------------------
ui <- shinyUI(
navbarPage( theme = shinytheme("cyborg"), ### Uncomment the theme and choose your own favorite theme from these options: https://rstudio.github.io/shinythemes/
title = "The Kovaleski Covid-19 Tracker", ### Replace title with something reasonable
## All UI for NYT goes in here:
tabPanel("NYT data visualization", ## do not change this name
# All user-provided input for NYT goes in here:
sidebarPanel(
colourpicker::colourInput("nyt_color_cases", "Color for plotting COVID cases:", value = "navy"),
colourpicker::colourInput("nyt_color_deaths", "Color for plotting COVID deaths:", value = "firebrick"),
selectInput("which_state",
"Which state would you like to plot?",
choices = usa_states,
selected = "New Jersey"),
radioButtons("facet_county",
"Show individual counties by faceting?",
choices = c("No","Yes"),
selected = "No"),
radioButtons("y_scale",
"Scale for Y-axis?",
choices = c("Linear","Log"),
selected = "Linear"),
selectInput("which_theme",
"Which theme would you like to use?",
choices = c("Classic", "Minimal", "Light", "Dark"),
selected = "Classic")
), # closes NYT sidebarPanel. Note: we DO need a comma here, since the next line opens a new function
# All output for NYT goes in here:
mainPanel(
plotOutput("nyt_plot", height = "800px")
) # closes NYT mainPanel. Note: we DO NOT use a comma here, since the next line closes a previous function
), # closes tabPanel for NYT data
## All UI for JHU goes in here:
tabPanel("JHU data visualization", ## do not change this name
# All user-provided input for JHU goes in here:
sidebarPanel(
colourpicker::colourInput("jhu_color_cases", "Color for plotting COVID cases:", value = "pink"),
colourpicker::colourInput("jhu_color_deaths", "Color for plotting COVID deaths:", value = "green"),
selectInput("which_country",
"Which country would you like to plot?",
choices = world_countries_regions,
selected = "US"),
radioButtons("y_scale_jhu",
"Scale for Y-axis?",
choices = c("Linear","Log"),
selected = "Linear"),
# dateRangeInput("dateRange_jhu",
# "Select Date Range",
# start = "2020-01-22",
# end = Sys.Date()-2),
selectInput("which_theme_jhu",
"Which theme would you like to use?",
choices = c("Classic", "Minimal", "Light", "Dark"),
selected = "Dark")
), # closes JHU sidebarPanel
# All output for JHU goes in here:
mainPanel(
plotOutput("jhu_plot", height = "800px")
) # closes JHU mainPanel
) # closes tabPanel for JHU data
) # closes navbarPage
) # closes shinyUI
# Server --------------------------------
server <- function(input, output, session) {
## PROTIP!! Don't forget, all reactives and outputs are enclosed in ({}). Not just parantheses or curly braces, but BOTH! Parentheses on the outside.
## All server logic for NYT goes here ------------------------------------------
## Define a reactive for subsetting the NYT data
nyt_data_subset <- reactive({
nyt_data %>%
filter(state == input$which_state) -> nyt_state
if(input$facet_county == "No"){
nyt_state %>%
group_by(date,covid_type) %>%
summarize(y = sum(cumulative_number)) -> final_nyt_state
}
if(input$facet_county == "Yes"){
nyt_state %>%
rename(y = cumulative_number) -> final_nyt_state
}
final_nyt_state
})
## Define your renderPlot({}) for NYT panel that plots the reactive variable. ALL PLOTTING logic goes here.
output$nyt_plot <- renderPlot({
nyt_data_subset() %>%
ggplot(aes(x=date, y = y, color = covid_type, group = covid_type)) +
geom_point() +
geom_line() +
scale_color_manual(values = c(input$nyt_color_cases, input$nyt_color_deaths)) +
labs(title = paste(input$which_state, "cases and deaths"), x = "Date", color = "Covid-19 Type", y= "Count") -> myplot_nyt
#Choices for y scale
if(input$y_scale == "Log"){
myplot_nyt <- myplot_nyt + scale_y_log10()
}
#faceting counties
if(input$facet_county == "Yes") myplot_nyt <- myplot_nyt + facet_wrap(~county)
# Choices for theme MAKE SURE THERES 4
if(input$which_theme == "Classic") myplot_nyt <- myplot_nyt + theme_classic()
if(input$which_theme == "Minimal") myplot_nyt <- myplot_nyt + theme_minimal()
if(input$which_theme == "Light") myplot_nyt <- myplot_nyt + theme_light()
if(input$which_theme == "Dark") myplot_nyt <- myplot_nyt + theme_dark()
myplot_nyt + theme(legend.position = "bottom")
})
## All server logic for JHU goes here ------------------------------------------
## Define a reactive for subsetting the JHU data
jhu_data_subset <- reactive({
jhu_data %>%
filter(country_or_region == input$which_country) -> jhu_country
jhu_country %>%
group_by(date, covid_type) %>%
summarize(y = sum(cumulative_number)) -> final_jhu_country
final_jhu_country
})
## Define your renderPlot({}) for JHU panel that plots the reactive variable. ALL PLOTTING logic goes here.
output$jhu_plot <- renderPlot({
jhu_data_subset()%>%
ggplot(aes(x=date, y= y, color= covid_type, group= covid_type))+
geom_point() +
geom_line() +
scale_color_manual(values= c(input$jhu_color_cases, input$jhu_color_deaths))+
labs(title = paste(input$which_country, "cases and deaths"), x = "Date", color = "Covid-19 Type", y= "Count") -> myplot_jhu
if(input$y_scale_jhu == "Log"){
myplot_jhu <- myplot_jhu + scale_y_log10()
}
if(input$which_theme_jhu == "Classic") myplot_jhu <- myplot_jhu + theme_classic()
if(input$which_theme_jhu == "Minimal") myplot_jhu <- myplot_jhu + theme_minimal()
if(input$which_theme_jhu == "Light") myplot_jhu <- myplot_jhu + theme_light()
if(input$which_theme_jhu == "Dark") myplot_jhu <- myplot_jhu + theme_dark()
myplot_jhu
})
}
# Do not touch below this line! ----------------------------------
shinyApp(ui = ui, server = server)
|
1f5ff733191958d7d341d4f519bea8ee5fb4aa11
|
381308baf0c24b971c1b1fef3aaf490615beaabd
|
/sess7r/R/sess7r.R
|
8ba3beafa008adf0b5d6b57d4b5e86328b491d66
|
[] |
no_license
|
merralja/SchoolProbit
|
d03b27ed24ff00108c330b2e956a42efc42ace73
|
6d9bc2c1a06c3642eb0b954bea2add9da1bf8e66
|
refs/heads/master
| 2020-08-01T20:28:34.885665
| 2019-12-20T14:59:54
| 2019-12-20T14:59:54
| 211,106,349
| 1
| 0
| null | 2019-11-13T16:02:52
| 2019-09-26T14:15:54
|
HTML
|
UTF-8
|
R
| false
| false
| 498
|
r
|
sess7r.R
|
#' sess7r: A package with a minimum example of package creation.
#'
#' This package is an exercise in package creation using
#' R studio. The package will hopefully include a sample
#' function and a sample dataset with their respective
#' documentation, though the last time I tried this it
#' screwed up miserably.
#'
#' @docType package
#' @name sess7r
#' @author John Merrall, McMaster University \email{merralja@@mcmaster.ca}
#' @references \url{https://github.com/merralja/SchoolProbit}
NULL
|
515fe75d90c56154ca99d6d9da60b3bf78560444
|
0c1c0e8c68835ca908806372236af6f539490042
|
/lecture/2016.05.31/politico.R
|
da7da9c6988f9c0aca1446ed3180e9b0b1ec7ef8
|
[] |
no_license
|
AylNels/notes
|
2343f78064d1bb07dc71d23dc2b70c69b45e52c3
|
e331c4ff2d736cb65f2313659ab42be469d80209
|
refs/heads/master
| 2021-01-15T20:47:57.257314
| 2016-06-05T20:29:02
| 2016-06-05T20:29:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,385
|
r
|
politico.R
|
# Description:
#
# This file is written as an a self-contained R script so that it can be
# modified easily. The goal here is to scrape the results of primary elections
# from Politico.
#
# The general workflow for R code is to modify the script, save the changes,
# and then reload the functions in R with the command
#
# source("politico.R")
#
# This makes it easy to modify the functions until they do what's needed.
#
# The 3 functions in this file were written in the order they appear.
#
# As you read, notice that none of the CSS selectors are particularly tricky.
# The Politico web page is well-organized and although the task seems large,
# it's not too hard once it's broken into smaller steps.
#
# Also make sure to try these functions out as you read.
library("rvest")
# Download the Politico website.
read_politico = function() {
politico = "http://www.politico.com/2016-election/results/map/president"
doc = read_html(politico)
# Recall that the last line of a function is the return value, unless there's
# an explicit return() statement.
html_nodes(doc, ".timeline-group")
}
# Scrape just one state, say Colorado.
scrape_state = function(state) {
# Extract the state's name.
name = html_node(state, ".timeline-header h3 a")
name = html_text(name, trim = TRUE)
# Extract the results for each primary.
repub = html_nodes(state, ".contains-republican")
#
# The scrape_primary() function is our way of saying "I'll deal with this
# particular problem later". The details don't matter yet--we just assume
# that scrape_primary() exists and returns the correct output.
#
# How would you know to make a scrape_primary() function? The key here is
# that we want to do the same thing twice (for Republicans and Democrats).
# Repeating the same thing is usually a sign you should write a function.
# Moreover, scraping one primary is a complicated task in its own right,
# which is another signal that it should be a separate function.
#
repub = scrape_primary(repub, "Republican")
democ = html_nodes(state, ".contains-democrat")
democ = scrape_primary(democ, "Democrat")
out = rbind(repub, democ)
out$state = name
return(out)
}
# Now we can define the scrape_primary() function based on its assumed role
# above. We may have to tweak the code above slightly as we write this
# function, but that's just part of the development process.
#
# Scrape just one party's primaries within a state.
scrape_primary = function(primary, party) {
results = html_table(html_nodes(primary, ".results-table"))
delegates = html_text(html_nodes(primary, ".results-headings .pos-1"))
# An if-statement checks whether the condition is true. If the condition is
# true, the code inside the { } is evaluated. Otherwise, the code inside the
# { } is skipped.
#
# In this case, we return nothing (NULL) immediately if no primary results
# were found.
if (length(results) == 0) {
return(NULL)
}
# After passing the if-statement, we can safely assume some primary results
# were found. This function only returns the first result, which would be a
# major limitation if parties typically had multiple primaries within a
# state.
results = results[[1]]
colnames(results) = c("candidate", "percent", "votes", "delegates")
results$total = delegates
results$party = party
return(results)
}
|
dd5f311216d10c29e7f63817ac584b700b339c11
|
ba2605fbf416b7f7e4cbdc79711eb614ff3eef02
|
/man/plr_xbx_utc_model.Rd
|
a6a69c33e0b3e0d8814cd60f9d6e043f51ed3520
|
[] |
no_license
|
romainfrancois/PVplr
|
e2422c3b7359df03137d559a806b9ac8ad26cb7e
|
862a626fb9ab2b932b0aebf13e10e80971a217e4
|
refs/heads/master
| 2022-12-27T01:05:37.820493
| 2020-10-07T11:00:20
| 2020-10-07T11:00:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,735
|
rd
|
plr_xbx_utc_model.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/power_corrections.R
\name{plr_xbx_utc_model}
\alias{plr_xbx_utc_model}
\title{UTC Method for PLR Determination}
\usage{
plr_xbx_utc_model(
df,
var_list,
by = "month",
data_cutoff = 30,
predict_data = NULL,
ref_irrad = 900,
irrad_range = 10
)
}
\arguments{
\item{df}{A dataframe containing pv data.}
\item{var_list}{A list of the dataframe's standard variable names, obtained from
the output of \code{\link{plr_variable_check}}.}
\item{by}{String, either "day", "week", or "month". The time periods over which
to group data for regression.}
\item{data_cutoff}{The number of data points needed to keep a value in the
final table. Regressions over less than this number and their data will be discarded.}
\item{predict_data}{optional; Dataframe; If you have preferred estimations of irradiance,
temperature, and wind speed, include them here to skip automatic generation. Format:
Irradiance, Temperature, Wind (optional).}
\item{ref_irrad}{The irradiance value at which to calculate the universal
temperature coefficient. Since irradiance is a much stronger influencer on power generation
than temperature, it is important to specify a small range of irradiance data
from which to estimate the effect of temperature.}
\item{irrad_range}{The range of the subset used to calculate the universal
temperature coefficient. See above.}
}
\value{
Returns dataframe of results per passed time scale from XbX with
universal temperature correction modeling
}
\description{
This function groups data by the specified time interval
and performs a linear regression using the formula:
power_corr ~ irrad_var - 1.
Predicted values of irradiance, temperature, and wind speed (if applicable)
are added for reference. The function uses a universal temperature correction,
rather than the monthly regression correction done in other PLR determining methods.
}
\examples{
# build var_list
var_list <- plr_build_var_list(time_var = "timestamp",
power_var = "power",
irrad_var = "g_poa",
temp_var = "mod_temp",
wind_var = NA)
# Clean Data
test_dfc <- plr_cleaning(test_df, var_list, irrad_thresh = 100,
low_power_thresh = 0.01, high_power_cutoff = NA)
# Perform the power predictive modeling step
test_xbx_wbw_res <- plr_xbx_utc_model(test_dfc, var_list, by = "week",
data_cutoff = 30, predict_data = NULL,
ref_irrad = 900, irrad_range = 10)
}
|
c52f311b1580e8143bf9781f37e91d3e1a689920
|
a7ac30628edb64e8735b7cb6c01174ffba34e777
|
/man/select_dataset.Rd
|
88377ed3d3267e32bad471169b3fa3eb19123457
|
[
"MIT"
] |
permissive
|
kant/esriOpenData
|
142b53354bcc5d60715be310758cf504ea446c9e
|
fd0df4de257b78b3ca2a1289777f688e4a7a1b34
|
refs/heads/master
| 2021-01-01T21:14:57.186572
| 2019-05-09T19:38:46
| 2019-05-09T19:38:46
| 239,342,472
| 0
| 0
|
MIT
| 2020-02-09T17:15:31
| 2020-02-09T17:15:31
| null |
UTF-8
|
R
| false
| true
| 713
|
rd
|
select_dataset.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/select_dataset.R
\name{select_dataset}
\alias{select_dataset}
\title{Select a dataset}
\usage{
select_dataset(dataset_df, id)
}
\arguments{
\item{dataset_df}{Dataframe, output of search_datasets().}
\item{id}{row id of desired dataset, e.g. 1 represent first dataset of search results.}
}
\value{
Dataframe, containing only the selected dataset information.
}
\description{
\code{select_dataset} selects one specific dataset from the search result retrieved by
search_datasets().
}
\examples{
datasets <- search_datasets("Climate", aoi = T)
selected <- select_dataset(datasets, 12)
}
\author{
Sandro Groth
}
\keyword{select_dataset}
|
ca3e7da7542b9baccb0a192b5fcc465450dfae04
|
c85a7198653461c25d031f7e93d78368b1eb6833
|
/man/modelDeterministic.Rd
|
2bdd0d3e36a14ec8c58b46e5992dd114a2992a3f
|
[] |
no_license
|
rBatt/timeScales
|
70a6159fde3c5062d6abe37e58811d251dcf384e
|
c25bb8bbc486147a5529bed75ea6b150575ed18e
|
refs/heads/master
| 2021-08-30T16:57:41.955274
| 2021-08-10T23:35:16
| 2021-08-10T23:35:16
| 100,967,320
| 0
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,714
|
rd
|
modelDeterministic.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simLakeP.R
\name{modelDeterministic}
\alias{modelDeterministic}
\title{Eutrophication Bifurcation
Model of lake eutrophication via elevated phosphorus loading. When as phosphorus loading increases, a fold bifurcation is reached, and the system switches into a eutrophic state from a oligotrophic state.}
\usage{
modelDeterministic(state, pars = c(C = 0.00115), F = 14.6, b = 0.001,
h = 0.15, m = 2.4, r = 0.019, q = 8, s = 0.7, sigma = 0.01)
}
\arguments{
\item{state}{state variables, a vector of length 3 with names "X", "M", and "U"}
\item{pars}{a named vector of parameters; currently only option is "C", a coefficient controlling the fraction of soil P that washes into the lake (units of 'per year'); set up this way so as to be compatible with functions in package "rootSolve" (C is a control parameter affecting water quality).}
\item{F}{input rate of phosporus to soil}
\item{b}{permanent burial rate of phosphorus in sediments}
\item{h}{hydrologic loss (outflow)}
\item{m}{in the recycling function, the value of X at which recycling is half the maximum rate}
\item{r}{recycling coefficient}
\item{q}{in the recycling function, the exponent q determines the slope of R(X) near m}
\item{s}{sedimentation loss}
\item{sigma}{standard deviation of recycling noise}
\item{C}{coefficient for transfer of soil phosphorus to the lake}
}
\value{
change of state variables
}
\description{
Eutrophication Bifurcation
Model of lake eutrophication via elevated phosphorus loading. When as phosphorus loading increases, a fold bifurcation is reached, and the system switches into a eutrophic state from a oligotrophic state.
}
|
eb90ef09ea5e6033377ba3376a385370e54a0e54
|
83546f128c57f01828b5d50f12a880cfc13b0dd3
|
/Fall_2019/Intro_to_R_LemayLab_120419.R
|
157592fa48ade9cb8aee353ede166ea7b78f192a
|
[] |
no_license
|
ebeth-chin/Intro_to_R
|
06a008859fdd9a71e37b6cafe12823ae14ba04d1
|
f337a498e6ff564d484c5306d334ea63ed9174b7
|
refs/heads/master
| 2022-12-12T03:07:58.397765
| 2020-08-28T23:55:09
| 2020-08-28T23:55:09
| 291,166,329
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,012
|
r
|
Intro_to_R_LemayLab_120419.R
|
#####################
# Intro to R #
# Elizabeth Chin #
# 09/27/2019 #
#####################
# Purpose: This session aims to get you:
#* up and running with the R language and R Studio
#* using good practices for reproducible computations and analyses
#* reading, understanding, and manipulating dataframes
#* variable types
#* subsetting data
#There are 5 main areas in the RStudio interface (on EC's computer anyways):
# * Prompt/command panel aka "Console" (top right)
# * Script area (top left)
# * Environment, Workspace & History (bottom left)
# * Plots, Packages, Help, Files (bottom right)
# * Menus that bring up different dialogs
# You can change the layout of these windows by going to the menu at the top > tools > global options > pane layout
# We're going to use the script area to write our R commands into files and then send the commands to the prompt/command panel to execute them.
# By putting the commands into a file, we ensure that we have them after the R session and we can reconstruct
# what we did. We'll refine the text in the script/file so that it works.
## Five tips for writing code
# + annotate your code
# + write code for humans, not computers
# + use new lines and indents to organize your code
# + no one likes a run on sentence
#+ troubleshoot small sections of code at a time
# + baby steps before big steps
#+ use the ? to get more info for functions
# + Dr. Google is also your friend
#+ there can be multiple answers to the same problem
# + don't be afraid to try different things
#+ it's unlikely you're going to permanently break anything
# Importing Data
# First, we are going to tell R to use this as the working directory. Next, we can read in our data.
# We are using the example ASA24-2016 ddata set, which can be downloaded from the [ASA24 researcher website.](https://epi.grants.cancer.gov/asa24/researcher/sample.html)
# Be sure to download this file: "Sample Analysis Files and Data Dictionaries for ASA24-2016, ASA24-Canada-2016, and ASA24-Australia-2016"
# It will download as a zip folder to wherever your downloads normally live. Move the folder to the working directory and unzip it.
# We are going to read in the file: Snow_2018-05-30_42236_Items.csv
# We specify that the top row contains the column names using 'header = T'
#######################
# let's get started #
#######################
#set the working directory
#You'll change the directory to wherever your data lives on your computer
setwd("/Users/elizabeth.chin/Desktop/Intro_to_R/Fall_2019/") #press CMD + ENTER (mac) or CTRL + ENTER (win) to run a line
#what's my working directory?
getwd()
#load the data
#don't forget that the data is nested in sub directories
items<- read.csv("./data/Snow_2018-05-30_42236_Items.csv", header=T)
#read-in commands for other types of data: read.delim(), read.table()-- check out ?read.csv() for more
#another way to load the data, without setting the working directory first
#items<- read.csv("/Users/elizabeth.chin/Desktop/Intro_to_R_labmeeting/ASA242016/Studies_created_after_May312018/2016Recall/Snow_2018-05-30_42236_Items.csv", header=T)
#Ideally we'd use version control (e.g. git) for the files in this RIntro directory.
#We can talk about this later if you're interested.
####################################
# Creating and assigning variables #
####################################
#So assignments to create or overwrite variables in R are of the form:
#variable = value
#variable <- value
#(For completeness, you may also see `->` being used, and also a call to the assign() function.)
#You can create new variables yourself, e.g.,
x = 4
name = "Ebeth"
gotIt = TRUE
#Note that these variables appear in the bottom-left panel of the RStudio GUI.
#We can also list the names of the variables we have created with the `ls()` function
ls()
#We can also remove variables when we no longer need their values using `rm()`,
#e.g.,
rm()
rm(x, name)
#Be careful not to remove a variable you do want.
#After a call to rm(), it is gone unless you happened to save()/saveRDS() the variable earlier.
###############################
# Types of variables and data #
###############################
#Adapted from [here](https://www.statmethods.net/input/datatypes.html)
#There are many types of data structures in R, including:
# * vectors (numerical, characters, logical)
# * matrices
# * all columns must have the same mode (numeric, character,etc.) and be the same length
# * arrays
# * similar to matrices, but can have more than 2 dimensions
# * data frames
# * like a matrix, but different columns can have different modes (numeric, character, factor, etc.)
# * lists
# * ordered collection of objects (components)
# * allows you to gather a variety of objects under one name
#R has 6 basic data types:
# * character ("a", "dog")
# * numeric (real or decimal) (2, 1.1)
# * integer (2L (L tells R to store this as an integer))
# * logical (TRUE or FALSE)
# * complex (1 + 4i)
########################
# Inspecting your data #
########################
#We assigned the datafile to an object we called `items`.
#What is it? You can see some details about it in the "Environment" panel in the bottom left panel of the
# RStudio GUI. But that only shows you some information. We can query the object for much more information.
#You should get into the habit of doing this for objects you create.
#The functions we use all the time for this include:
# * size
# * dim() #dimensions
# * nrow() #n of rows
# * ncol() #n of columns
# * length() # n of columns
# + names
# * names() # column names
# * colnames()
# * rownames()
# + summary
# + str() #structure
# + summary() #basic statistics (mean, median...)
# + class()
#Many of these functions are generic and can be applied to different types of data (not just data frames)
#What does these functions do?
#Look them up with the help command, e.g.,
help("class")
?names
names(items)
############
# **DIY:** #
############
#############################################################################################
# ***use the functions listed above to get some summary information about 'items'***
# ***what can you tell me about the data?***
# ***data type:*** DATA FRAME
# ***dimensions:*** 51 x 130
# ***variable types include:*** numeric, integers, factors
# ***how many types of Food Sources (FoodSrce) are there?*** 7
#############################################################################################
str(items)
?factor # categorical variable
#We can also glance at the data using the head() or tail() functions:
head(items) #first six lines (default)
tail(items) #last six lines
head(items, n=3) #first three lines
############
# **DIY:** #
############
#############################################################################################
# ***can you show me the last four lines of 'items'?***
#############################################################################################
tail(items, n=4)
#The View() function is also an easy way to peep at your data in a new window that you can sort, filter, and search.
#You can also double-click on the name in the Environment window.
View(items)
########################
# Indexing data frames #
########################
#'items' is a data frame consisting of 51 rows and 130 columns.
# We can extract specific elements from 'items' by specifying the "coordinates".
# Row numbers come first then columns.
#object_name[rownum, colnum]
## Extract columns:
items[,21] #get the FoodCode column as a vector
items[21] #get the FoodCode column as a data frame, notice no comma!
items[,-21] #get every column EXCEPT column 21
items[,c(21:24)] #get columns 21:24. notice the 'c', which acts to 'concatenate' multiple items together
#we can also extract using the name
items['FoodCode'] #data frame
items[['FoodCode']] #vector
items[,'FoodCode'] #vector, notice the ,
items$FoodCode #vector
#the last three are equivalent
## Extract rows
items[1,] #get the first row
items[c(1:5),] #get the first five rows
items[-1,] #get everything EXCEPT the first row
#'items' rownames are just 1-n. We can still summon a specific row using the rowname instead of location
#(it just so happens in this case that the rowname is the same as the row number)
rownames(items)
items['1',] #get the first row
items[,'FoodCode']# get the column named FoodCode
## Extracting specific data points
#We can combine the indexing of rows and columns to get specific data points:
items[1,1] #the first row and first column (vector)
items[c(1:3), 21] #the first three elements of the 21st column (vector)
############
# **DIY:** #
############
#############################################################################################
# ***1) extract the column named "Food_Description" and assign to a dataframe called "fd"***
colnames(items)
fd<- items[130]
?write.csv #if you wanted to save your object or data frame
#example (file will show up in your working directory): write.csv(fd, "example_name.csv")
# ***2) What is the class of 'fd'? ***
class(fd)
# ***3) Show me the first 5 rows of 'fd'***
fd[c(1:5),]
head(fd,n=5)
#############################################################################################
#############################################################################################
# ***extract columns 95, 119, and 1 (in that order)***
#############################################################################################
# ***How would you get rows 6 through 10 and all columns EXCEPT the last one?***
#############################################################################################
#ereturn column names
colnames(items)
#one way to get column "CALC"
items[,37] #inconvenient because we don't always know the colun number, we don't always want to have to get it
items[, 'CALC'] #use the column name
#the preferred way/most commonly used
items$CALC #notice the drop down menu that appears
#you can use this to auto-complete your column name
## Column Summary Statistics
#We can also use the summary() function on just one column
?summary() #what does this do?
summary(items$CALC) #easy way
summary(items[,37]) #another way, but more typing :(
#summary(items[,21])
#summary(items)
#Another way to select columns in a data frame
#you will need to know the column names
#uses the $
#get the food description
items$Food_Description #preferred way to select columns
#it's easy to select the wrong column number by indexing
###RECAP###
#select columns
# 1) indexing w/ the column number (same for rows)
# 2) indexing w/ the column name (same for rows)
# 3) directly selecting the col name w/ $
############
# **DIY:** #
############
#############################################################################################
# ***can you get the summary for columns 121 to 124 from 'items'?***
# 1) use the column numbers
# 2) use the column names
#############################################################################################
#1) summary w/ col numbers
summary(items[,c(121:124)])
#2) summary w/ column names
summary(items$D_TOTAL)
summary(items$D_CHEESE)
summary(items[,'D_TOTAL'])
# Conditiomal Subsetting
# We can return just part of the data that meet certain conditions
#Conditions can be defined using the following:
# + '==' means 'equal to'
# + '!=' means 'not equal to'
# + '>' means 'greather than'
# + '>=' means 'greater than or equal to'
# + '<' means 'less than'
# + '<=' means 'less than or equal to'
#Joiners
# + '|' means 'or'
# + '&' means 'and'
# e.g. Return all the foods (rows) in items where the value for "OILS" is 0
items$OILS == 0 #returns a logical vector
oil_items<-items[items$OILS==0,] #returns all rows in the dataframe where the vector is TRUE. Notice the comma-- it returns all the columns
#another way to return the rows of your dataframe
?subset()
subset(items, OILS==0) #another way using the subset function
#You can also use conditional subsetting on characters (i.e. not numbers)
items$FoodSrce != "Other cafeteria" #notice the ""
subset(items, FoodSrce != "Other cafeteria")
############
# **DIY:** #
############
#############################################################################################
# ***Return all rows where the PROT value is greather than or equal to 5. How many rows are there?***
#############################################################################################
#use subset
prot<-subset(items, PROT >= 5)
dim(prot) #15 x 130
nrow(prot)#15
# Counting factors
#Sometimes we want to know how many of a specific factor we have in our data. We can do this a few ways:
#e.g. how many counts are there for each FOODSRC?
?table
class(items$FoodSrce) #factor; not numeric
table(items$FoodSrce)
#######################
# The 'dplyr' package #
#######################
# [dplyr](https://dplyr.tidyverse.org/) is an R library used for data manipulation.
# We'll briefly cover the following functions:
# + select
# + filter
# But these are just a few of many dplyr functions that are helpful for data manipulation and organization!
# First, we need to install and load the dplyr package:
#install the package. you'll only need to do this once
install.packages('dplyr')
#after it's installed, you need to load it. you'll do this EVERYTIME you want to use it in a single R or Rmd file
#(not every time you want to use a specific function though)
library(dplyr)
## dplyr::select
#The 'select' function makes it easy to select columns.
#Let's use it to select the FoodCode and Food_Description from items, and assign it to a new df called "tiny_items"
?select()
dim(items)
tiny_items<- select(items,
c(FoodCode, Food_Description)) #remember the c!
#select(items, c("FoodCode", "Food_Description")) #you can also use the "" but you don't need it.
## dplyr::filter
#The 'filter' function is similar to 'subset' and makes it easy to select certain rows from a column given a specific condition.
#E.g. Filter tiny_items to only include FoodCodes greater than 60000000.
?filter()
filter(tiny_items,
FoodCode <= 60000000) #FoodCode is numeric
#Notice that all columns are returned
#Filter can also be used on columns of a non-numeric class
#E.g. Filter dataframe tiny_items to only include rows where the Food_Description is "Cheerios"
class(tiny_items$Food_Description)
filter(tiny_items,
Food_Description != "Cheerios") #notice the ""
##
# == exact match/equal to
# != not equal to
# | "or"
# & "and"
# Here's an example of two rules
filter(tiny_items,
Food_Description == "Cheerios" |
Food_Description == "Banana, raw") #case sensitive
############
# **DIY:** #
############
#############################################################################################
# ***1) select the columns: FoodCode, Food_Description, SUGR, and TFAT from 'items' (in that order)
# and assign to a new dataframe called "df"***
df <- items[,c("FoodCode","Food_Description", "SUGR", "TFAT")]
View(df)
df <- select(items,
c("FoodCode", "Food_Description", "SUGR", "TFAT"))
View(df)
#check out just the column names
colnames(df)
# ***2) filter 'df' to only include rows where the values of SUGR are greater than 4 and the FoodCode is
# less than or equal to 50000000. Assign to a new dataframe called 'df2'***
df2<- filter(df,
SUGR > "4" |
FoodCode <= "50000000")
View(df2)
dim(df)
# ***3) what are the dimensions of df2? What is the median of TFAT in df2? What is the median of TFAT in df?***
dim(df2)
summary(df2)
summary(df2$TFAT)
summary(df$TFAT)
#############################################################################################
|
1a2b0c2580084e4125f13c9f8da9bc8d38d1c415
|
e2beb66552e58cbf67556177b9117f2d0cf59b94
|
/code/R/going_deep_legacy_code/going_deep_package_scripts/model-preds.R
|
9aeb4c36a6695f5e52f600ddbcd2952e7f1ae1f9
|
[] |
no_license
|
mcemerden/bdb-pass-defense
|
8ba712986ce07fd5767000d2c7810c4c02bb4954
|
fcdebcc3614c0457139d67b7ca1f32b22efc7b58
|
refs/heads/main
| 2023-02-12T02:33:16.835505
| 2021-01-08T18:09:53
| 2021-01-08T18:09:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,309
|
r
|
model-preds.R
|
# Francesca Matano
# Purpose: Produce prediction for lstm model
#' Computing lstm model prediction
#' This function takes a scaled matrix in input and an it computes the lstm prediction
#'
#' @param X scaled matrix of predictors
#' @param model lstm model: you can load model with load_model_hdf5(model_path)
#' @param batch_size batch size used in the lstm model. If don't know try length of the max seq. If null the function will try to use max length
#' @param seq_ids sequence id for the X matrix, needed for padding
#' @param frame_ids column to sort the padded X's by
lstm_pred_fun <- function (X, model, batch_size = NULL, seq_ids, frame_ids) {
## Pad the x's
message("Padding the design matrix")
X_new <- padding_fun(X = X, seq_ids = seq_ids, frame_ids = frame_ids)
## Creating the array of predictors
n_predictors <- ncol(X_new)
x_arr <- array(data = as.matrix(X_new), dim = c(nrow(X_new), 1, n_predictors))
## Predictions
if (is.null(batch_size)) {
batch_size = 129
warning("Not batch_size in input, tried 129 which is the max length
of a sequence")
}
message("Making predictions")
preds <- model %>%
predict(x_arr, batch_size = batch_size) %>%
.[,1,]
## Unpad the x's
message("Unpadding the predictions")
wwhich_unpad <- which(X_new[[1]] != 0)
return(preds[wwhich_unpad])
}
#' Padding the design matrix
#'
#' This function pads the design matrix
#' @param X design matrix
#' @param seq_ids column to use to pad the sequences
#' @param frame_ids column to sort the padded X's by
padding_fun <- function(X, seq_ids, frame_ids) {
## Create fake date col to pad the sequences
tmp <- data.frame(X, seq_ids = seq_ids, frame_ids = frame_ids) %>%
group_by(seq_ids) %>%
mutate(Date = seq(as.Date("2000/1/1"), by = "day",
length.out = length(seq_ids))) %>%
ungroup() %>%
as.data.frame()
## Pad the sequences based upon date and remove date and ids
X_new <- padr::pad(tmp, start_val = min(tmp$Date), end_val = max(tmp$Date),
group = "seq_ids") %>%
replace(., is.na(.), 0)
X_new <- X_new %>%
arrange(seq_ids, frame_ids) %>% # need this to make sure the beginning is padded
select(-c(Date, seq_ids, frame_ids))
return(X_new)
}
|
c9fd73b73f4c26cf50034dd46d229e06801725a7
|
2aaa784b1244715bb4cd0dccec20c5014701a292
|
/slider_map.R
|
868db4be60df0c8fd6adf142bb4f98b0e301baf3
|
[] |
no_license
|
MarcellGranat/fertilityEU
|
80720ac0baaefaf425309fb1de83ec2084376a52
|
a2fc2a489c7d7f99d52f3493707cc831e571c198
|
refs/heads/main
| 2023-04-06T07:13:12.443749
| 2021-04-21T18:38:12
| 2021-04-21T18:38:12
| 337,162,523
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,218
|
r
|
slider_map.R
|
library(tidyverse)
dat <- rbind(read_csv("C:/school/szem_8/TDK-fertility/fertilityEU/eurostat_datasets/demo_r_frate2_1_Data.csv", locale = readr::locale(encoding = "UTF-8"))%>%
transmute(time = TIME, geo = GEO, variable = "Fertility", value = Value),
read_csv("C:/school/szem_8/TDK-fertility/fertilityEU/eurostat_datasets/edat_lfse_16_1_Data.csv", locale = readr::locale(encoding = "UTF-8")) %>%
filter(SEX == "Total") %>%
transmute(time = TIME, geo = GEO,
variable = "Early leavers from education and training (%)", value = Value)) %>%
filter(value != ":") %>%
mutate(
value = as.numeric(value)
)
eu_map <- eurostat::get_eurostat_geospatial(nuts_level = 2)
used_dat <- dat %>%
filter(variable == 'Fertility') %>%
{
merge(expand.grid(time = unique(.$time), geo = unique(.$geo)), ., all.x = T)
}
plotly::ggplotly(
used_dat %>%
{merge(eu_map, ., all.x = T)} %>%
ggplot(aes(fill = as.numeric(value), frame = time)) +
geom_sf(color = "black", size = .1) +
scale_fill_viridis_c(na.value = "white") +
theme_minimal() +
xlim(c(-30, 44)) +
ylim(c(35, 75)) +
labs(
fill = 'Fertility'
)
)
|
570e060ebb9c49d69960a0e74cdd747778f4e454
|
302f09f730d4a0b71f97203e244afe341e8a7c08
|
/R/rScatterView.R
|
1104db3cd1c67f11fdf9c1e4dbcb88f4aae98812
|
[] |
no_license
|
BioAmelie/MHCI_TRAF3
|
79425a8cc8ab39a02de41c999bfecae8dfef2a6b
|
1606d33f61670be7120998cca169594d469bbce1
|
refs/heads/master
| 2023-06-16T04:53:52.624145
| 2021-07-08T21:50:10
| 2021-07-08T21:50:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,967
|
r
|
rScatterView.R
|
#' @export
rScatterView <- function(genescore, ylimit = c(0, NA),
cutoff = 1, top = 5, genelist=c(),
color_pal = c("red", "gray70"),
labelsize = 4, title = NULL){
gg = data.frame(Label = names(genescore), Score = genescore,
Rank = sample(1:length(genescore), length(genescore)),
stringsAsFactors = FALSE)
if(!is.na(ylimit[1])) gg = gg[gg$Score>ylimit[1], ]
if(!is.na(ylimit[2])) gg = gg[gg$Score<ylimit[2], ]
gg$group = "Z2"
gg$group[gg$Score>cutoff] = "Z1"
idx = rank(gg$Score)>(nrow(gg)-top) | gg$Label%in%genelist
gg$group[idx] = gg$Label[idx]
tmp = seq(1, nrow(gg), length.out = sum(idx)+2)[-c(1,sum(idx)+2)]
gg$Rank[idx] = sample(tmp, sum(idx))
gg$group = factor(gg$group, levels = c(gg$Label[idx], "Z1", "Z2"))
gg$Label = as.character(gg$group)
gg$Label[gg$Label %in% c("Z1", "Z2")] = ""
pal = c("#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99", "#e31a1c", "#fdbf6f", "#ff7f00", "#cab2d6", "#6a3d9a")
if(length(color_pal)!=length(levels(gg$group))){
idx = length(levels(gg$group))-length(color_pal)
if(idx<6) color_pal = c(pal[sample(seq(2,10,2), idx)], color_pal)
else color_pal = c(pal[sample(1:10, idx)], color_pal)
}
p = ggplot(gg, aes(x=Rank, y=Score, color=group, size=group))
p = p + geom_jitter(alpha = 0.6)
p = p + scale_color_manual(values = color_pal, guide = "none")
p = p + scale_size_manual(values = c(rep(2, sum(idx)), 0.4, 0.4), guide = "none")
p = p + ggrepel::geom_text_repel(aes(label = Label), force = 0.1,
fontface = 'bold', size = labelsize,
segment.color = 'grey50', segment.size = 0.1,
segment.alpha = 0)
p = p + labs(color = NULL, size = NULL, title = title)
p = p + theme_bw(base_line_size = NA)
p = p + theme(plot.title = element_text(hjust = 0.5))
p
}
|
7d15dea50ec21bba22454a852ff9653935179deb
|
c123ffc8e23813033f1a8c90c23f6ee28d13ed13
|
/Rejection based ABC MCMC ASR final.R
|
8812f0625124c03855243ac03c596636ef0044ad
|
[] |
no_license
|
noahmthomas-nmt/ABC_Chapter
|
6aba6de5dc07bb7b9a1976eac867e3511847c02f
|
73eda7d3fc9535806599063315f674562d6d1bef
|
refs/heads/main
| 2023-09-05T16:26:57.443473
| 2021-11-02T17:42:01
| 2021-11-02T17:42:01
| 423,571,684
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,648
|
r
|
Rejection based ABC MCMC ASR final.R
|
# Rejection base ABC MCMC ASR.R
rm(list = ls()) # Clear environment
# ------------------------------------------------------------- Set up
set.seed(43210) # set seed for reproducibility
library("tidyverse") # import tidy function
source("rasr.R") # simulate ASR
source("rconflict_asr.R") # Runs conflict model with experimental design
# Transform parameters to/from normal or ASR param space
source("transformations_for_asr.R")
source("priors_asr.R") # priors for ASR model
source("ABC_functions.R") # functions to run ABC
source("limits.r") # for plotting
# --------------------------------------------------------- Algorithm Parameters
num_posts <- 1000000 # number posterior draws
# create array for theta
theta <- array(NA, dim = c(length(param_names), num_posts))
rownames(theta) <- param_names
log_weight <- matrix(NA, num_posts) # create matrix for log_weights
eps <- 65 # determine maximum difference between simulated and observed data
sigma_proposal <- start_sds / 5 # determine sd or proposal distribution
# ---------------------------------------------------- Experiment Parameters
conds <- c(0, 1) # names of conditions
n <- 2500 # number of trials for cond (comp/incomp) and stim (left/right)
condition_table <- c(n, n) # make a table of the condition/stimuli
names(condition_table) <- conds
# ------------------------------------------------------------- Import Data
Data <- read_table2("Conflict_Data_2.0 (1).txt",
col_types = cols(
Inc = col_integer(),
SOA = col_integer(), Sub = col_integer()
)
)
# Reshape Data
data_list <- list()
data_list[[1]] <- Data %>%
dplyr::select(Inc, RT) %>%
as.list()
Data <- data_list
# ------------------------------------------------------------- Initialization
d <- Inf
while(d >= eps){
theta_star <- sample_prior() # sample from prior
print(d)
if(is.finite(dens_prior(theta_star, LOG = T))){
# get difference between observed and similar data
d <- get_d(params = theta_star, data = Data[[1]])
}
}
# store the first accepted proposal
theta[, 1] <- theta_star
# ------------------------------------------------------------- Sample
I <- 3 # intialize iter
# sample posterior
for (i in (I-1):num_posts) {
theta_1 <- rproposal(theta[, i-1], sigma_proposal) # sample from proposal
# get difference between simulated data and
# observed for this set of parameters
d_new <- get_d(theta_1, Data)
if(d_new <= eps){
# determine weight based on prior and other candidate
new_weight <- dens_prior(theta_1, LOG = T) +
log_dens_proposal(theta[, i-1], theta_1)
old_weight <- dens_prior(theta[, i-1], LOG = T) +
log_dens_proposal(theta_1, theta[, i-1])
# preform MH step
MH <- mh_step(new_weight = new_weight, old_weight = old_weight)
if(MH == "accept"){
theta[, i] <- theta_1 # store the accepted value
}else{
theta[, i] <- theta[, i-1] # store the previous value
}
}else{
theta[, i] <- theta[, i-1] # store the previous value
}
print(i)
}
I <- i # record where you leave off if interrupting
# load("chains.RData") # save work
# --------------------------------------------- prior/posterior density plots
# Extract posteriors for each param
alpha <- theta[1,]
beta <- theta[2,]
mu <- theta[3,]
sigma <- theta[4,]
lambda <- theta[5,]
# ground truth
theta <- c(.0075,.01,350,50,100)
# make plots
png("../reject_alpha.png",3300,1100)
par(mfrow=c(1,3),cex=2.5)
cs.alpha <- quantile(alpha,c(0,.025,.975,1))
range.alpha <- alpha.y[2]-alpha.y[1]
plot(density(alpha),xlim=alpha.x,ylim=alpha.y,
xlab="Log Alpha",lwd=3,cex=1.5,main='',ylab="")
lines(rep(log(1/theta[1]),2),alpha.y,lty=3)
lines(cs.alpha[2:3],rep(-.005*range.alpha,2),lwd=5)
x <- seq(alpha.x[1], alpha.x[2], by = .001)
lines(x, dnorm(x, prior_list$alpha$mu[1], prior_list$alpha$mu[2]))
cs.beta <- quantile(beta,c(0,.025,.975,1))
range.beta <- beta.y[2]-beta.y[1]
plot(density(beta),xlim=beta.x,ylim=beta.y,
xlab="Log Beta",lwd=3,cex=1.5,main='',ylab="")
lines(rep(log(1/theta[2]),2),beta.y,lty=3)
lines(cs.beta[2:3],rep(-.005*range.beta,2),lwd=5)
x <- seq(beta.x[1], beta.x[2], by = .001)
lines(x, dnorm(x, prior_list$beta$mu[1], prior_list$beta$mu[2]))
cs.lambda <- quantile(lambda,c(0,.025,.975,1))
range.lambda <- lambda.y[2]-lambda.y[1]
plot(density(lambda),xlim=lambda.x,ylim=lambda.y,
xlab="Log Lambda",lwd=3,cex=1.5,main='',ylab="")
lines(rep(log(theta[5]),2),lambda.y,lty=3)
lines(cs.lambda[2:3],rep(-.005*range.lambda,2),lwd=5)
x <- seq(lambda.x[1], lambda.x[2], by = .001)
lines(x, dnorm(x, prior_list$lambda$mu[1], prior_list$lambda$mu[2]))
dev.off()
png("../reject_mu.png",2200,1100)
par(mfrow=c(1,2),cex=2.5)
cs.mu <- quantile(mu,c(0,.025,.975,1))
range.mu <- mu.y[2]-mu.y[1]
plot(density(mu),xlim=mu.x,ylim=mu.y,
xlab="Log Mu",lwd=3,cex=1.5,main='',ylab="")
lines(rep(log(theta[3]),2),mu.y,lty=3)
lines(cs.mu[2:3],rep(-.005*range.mu,2),lwd=5)
x <- seq(mu.x[1], mu.x[2], by = .001)
lines(x, dnorm(x, prior_list$mu$mu[1], prior_list$mu$mu[2]))
cs.sigma <- quantile(sigma,c(0,.025,.975,1))
range.sigma <- sigma.y[2]-sigma.y[1]
plot(density(sigma),xlim=sigma.x,ylim=sigma.y,
xlab="Log Sigma",lwd=3,cex=1.5,main='',ylab="")
lines(rep(log(theta[4]),2),sigma.y,lty=3)
lines(cs.sigma[2:3],rep(-.005*range.sigma,2),lwd=5)
x <- seq(sigma.x[1], sigma.x[2], by = .001)
lines(x, dnorm(x, prior_list$sigma$mu[1], prior_list$sigma$mu[2]))
dev.off()
|
355dc68552de7d8141c8d9416f3aeffe6acfe5bc
|
aea74183d6fbd791fe5fb89dea80d32f0b4a822d
|
/man/runStagePopExample.Rd
|
e7b9004a910940e5bbe315a2d86b1e4e93f0642e
|
[] |
no_license
|
cran/stagePop
|
c3fec31a41abc70871ae7eed192e260a817294cf
|
d830266935ddb2d1654d82004927bdfc4ba77e1c
|
refs/heads/master
| 2022-05-23T20:47:55.979477
| 2022-05-10T12:50:02
| 2022-05-10T12:50:02
| 30,255,908
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 489
|
rd
|
runStagePopExample.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runStagePopExample.R
\name{runStagePopExample}
\alias{runStagePopExample}
\title{runStagePopExample}
\usage{
runStagePopExample(name = NULL)
}
\arguments{
\item{name}{Name of the example to run. If Name is NULL the list of examples will be printed.}
}
\description{
This function is similar to the demo() function but requires less interaction
It is used to run the canned examples from the stagePop package.
}
|
8c267a6eb515a8160c8feb043ffa02d3f37c5d78
|
5d13d41a6d0c8215c1ce157c80fb689cc23dc22a
|
/04 - Exploratory Data Analysis/Project 2/plot1.R
|
a086d4b55c4430930828b1bcd88e40dcbf663f18
|
[] |
no_license
|
eablamb/datasciencecoursera
|
6435793f054158e5784d72598fa6afdbffcef020
|
bfc4b3a7a615339c875f4dd6361f1225cf98de88
|
refs/heads/master
| 2020-06-05T05:27:48.624304
| 2015-12-04T23:06:23
| 2015-12-04T23:06:23
| 24,222,281
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,387
|
r
|
plot1.R
|
getPMData <- function(plotFunc) {
# Downloads and extracts fine particulate matter data from
# the National Emmissions Invetory (NEI) Data Set
# availabe at the EPA National Emissions Invetory web site.
# The extracted data is a subset from 1999, 2002, 2005, and 2008
require(utils)
require(RDS)
fileURL <- 'http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip'
destFile <- paste(getwd(),'exdata_data_FNEI_data.zip',sep='/')
print (destFile)
if (!file.exists(destFile)) {
download.file(url = fileURL,
destfile = destFile)
unzip(destFile)
}
NEI <- readRDS('summarySCC_PM25.rds')
SCC <- readRDS('Source_Classification_Code.rds')
plotFunc(NEI = NEI, SCC = SCC)
}
makePlot1 <- function(NEI, ...) {
## Plots the total emissions for 1999, 2002, 2005, and 2008
years <- c(1999, 2002, 2005, 2008)
ems <- c()
for (year in years) {
sub <- NEI[NEI$year == year,]
ems <- c(ems, sum(sub$Emissions))
}
png(file = 'plot1.png')
barplot(ems,
names.arg = years,
main = "Total emissions from PM2.5 1999-2008",
xlab = 'Year',
ylab = 'Total emissions (in tons)',
col = 'red')
dev.off()
}
getPMData(makePlot1)
|
47777cd854c1a2783852db997eb2617c6298a3c7
|
10d13c1d476562d40603aaba095d6342ab232b44
|
/R/plot.sim.R
|
885c504857e6431bc21578f44c89c84eef9b3899
|
[] |
no_license
|
cran/WRSS
|
5fe1633c0fabca858ab10d3d792005d557a1cd49
|
45d362ea30f9c86885ad54b26a3de9c445cb4728
|
refs/heads/master
| 2022-06-29T20:52:05.083546
| 2022-05-29T17:10:02
| 2022-05-29T17:10:02
| 110,153,129
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,400
|
r
|
plot.sim.R
|
plot.sim <-
function(x,...)
{
nRes<-length(x$operation$operation$reservoirs)
nRiv<-length(x$operation$operation$rivers)
nAuq<-length(x$operation$operation$aquifers)
nJun<-length(x$operation$operation$junctions)
nDiv<-length(x$operation$operation$diversions)
nDem<-length(x$operation$operation$demands)
simulation<- x$operation$operation$simulation
dates<-seq(as.Date(simulation$start),as.Date(simulation$end),simulation$interval)
getCycleMean<-function(data)
{
if(simulation$interval=='month')
{
mat<-t(matrix(NA,(floor(length(data)/12)+1),12))
m<-months(seq(as.Date('2000-01-01'),to=as.Date('2000-12-01'),'month'))
start<-which(months(as.Date(simulation$start))==m)
if(start==0) start<-1
mat[start:(start+length(dates)-1)]<-data
mat<-t(mat)
out<-apply(mat,2,mean,na.rm=TRUE)
names(out)<-month.abb
}
if(simulation$interval=='week')
{
start<-floor(as.numeric(as.Date(simulation$start)-as.Date(paste((strsplit(simulation$start,'-')[[1]])[1],'-','01','-','01',sep='')))/7)+1
if(start==0) start<-1
mat<-t(matrix(NA,(floor(length(data)/53)+1),53))
mat[start:(start+length(dates)-1)]<-data
mat<-t(mat)
out<-apply(mat,2,mean,na.rm=TRUE)
names(out)<-paste('week','-',1:length(out),sep='')
}
if(simulation$interval=='day')
{
start<-as.numeric(as.Date(simulation$start)-as.Date(paste((strsplit(simulation$start,'-')[[1]])[1],'-','01','-','01',sep='')))
if(start==0) start<-1
mat<-t(matrix(NA,(floor(length(data)/366)+1),366))
mat[start:(start+length(dates)-1)]<-data
mat<-t(mat)
out<-apply(mat,2,mean,na.rm=TRUE)
names(out)<-paste('day','-',1:length(out),sep='')
}
NANs<-which(is.nan(out))
if(length(NANs)>0) out<-out[-NANs]
return(out)
}
labelRemover<-function(data)
{
if(simulation$interval=='day') CF<-363
if(simulation$interval=='week') CF<-51
if(simulation$interval=='month') CF<-10
intervals<-floor(length(Storage)*18/CF+2)+2
names<-colnames(data)
labels<-rep(NA,length(names))
selctedLabels<-round(seq(1,ncol(data),length.out=intervals))
labels[selctedLabels]<-names[selctedLabels]
colnames(data)<-labels
return(data)
}
if(nRes>0)
{
for(i in 1:nRes)
{
oask <- devAskNewPage(TRUE)
on.exit(devAskNewPage(oask))
inflow <-x$operation$operation$reservoirs[[i]]$operation$inflow
outflow <-x$operation$operation$reservoirs[[i]]$operation$outflow
capacity<-x$operation$operation$reservoirs[[i]]$operation$geometry$capacity
Storage <-getCycleMean(x$operation$operation$reservoirs[[i]]$operation$sim_result$storage)
Spill <-getCycleMean(x$operation$operation$reservoirs[[i]]$operation$sim_result$spill)
Evaporation<-getCycleMean(x$operation$operation$reservoirs[[i]]$operation$sim_result$loss)
Release <-getCycleMean(apply(x$operation$operation$reservoirs[[i]]$operation$outflow,1,sum))
Inflow <-getCycleMean(apply(x$operation$operation$reservoirs[[i]]$operation$inflow,1,sum))
title<-x$operation$operation$reservoirs[[i]]$operation$name
bars<-t(cbind(Evaporation,Release,Storage,Inflow,Spill))
bars<-labelRemover(bars)
Storage<-Storage-Inflow
ylim<-c(0,max(apply(bars,2,sum),na.rm=TRUE))*(1+nrow(bars)*0.05)
barplot(bars,las=2,col=1:5,ylab='Volume (MCM)',ylim=ylim,main=title)
lines(0:length(dates),rep(capacity,length(dates)+1),col=6,typ='o',lwd=2,pch=19)
legend('top',
legend=c('evaporation','release','storage','inflow','spill','capacity'),
ncol=3,
fill=c(1:5,NA),
box.lwd=0,
box.col=NA,
lty=c(rep(0,5),1),
col=1:6,
lwd=c(rep(0,5),2),
border=c(rep(1,5),NA))
}
}
if(nRiv>0)
{
for(i in 1:nRiv)
{
oask <- devAskNewPage(TRUE)
on.exit(devAskNewPage(oask))
inflow <-x$operation$operation$rivers[[i]]$operation$inflow
outflow<-x$operation$operation$rivers[[i]]$operation$outflow
I<-rep(NA,length(getCycleMean(inflow [,1])))
O<-rep(NA,length(getCycleMean(inflow [,1])))
for(j in 1:ncol(inflow)) {I<-cbind(I,getCycleMean(inflow [,j]))};I<-I[,-1,drop=FALSE]
for(j in 1:ncol(outflow)){O<-cbind(O,getCycleMean(outflow[,j]))};O<-O[,-1,drop=FALSE]
I<-t(I);O<-t(O)
ylim<-c(0,max(apply(I,2,sum),apply(O,2,sum)))*(1+(ncol(inflow)+ncol(outflow))*0.1)
title<-x$operation$operation$rivers[[i]]$operation$name
I<-labelRemover(I)
middleOfBars<-barplot(I,ylim=ylim,ylab='Volume (MCM)',las=2,main=title,col=gray((1:ncol(inflow))/(1.2*ncol(inflow))))
for(j in 1:nrow(O)){lines(middleOfBars,O[j,],typ='o',pch=21,bg='white',col=j+1)}
legend('top',
legend=c(colnames(inflow),colnames(outflow)),
ncol=2,
fill=c(gray((1:ncol(inflow))/(1.2*ncol(inflow))),rep(NA,ncol(outflow))),
lty=c(rep(0,ncol(inflow)),rep(1,ncol(outflow))),
col=c(rep(1,ncol(inflow)),2:(ncol(outflow)+1)),
border=c(rep(1,ncol(inflow)),rep(NA,ncol(outflow))),
box.lwd=0,box.col=NA)
}
}
if(nAuq>0)
{
for(i in 1:nAuq)
{
oask <- devAskNewPage(TRUE)
on.exit(devAskNewPage(oask))
inflow <-x$operation$operation$aquifers[[i]]$operation$inflow
outflow<-x$operation$operation$aquifers[[i]]$operation$outflow
storage<-x$operation$operation$aquifers[[i]]$operation$storage
I<-rep(NA,length(getCycleMean(inflow [,1])))
O<-rep(NA,length(getCycleMean(inflow [,1])))
for(j in 1:ncol(inflow)) {I<-cbind(I,getCycleMean(inflow [,j]))};I<-I[,-1,drop=FALSE]
for(j in 1:ncol(outflow)){O<-cbind(O,getCycleMean(outflow[,j]))};O<-O[,-1,drop=FALSE]
I<-t(I);O<-t(O)
ylim<-c(0,max(apply(I,2,sum),apply(O,2,sum)))*(1+(ncol(inflow)+ncol(outflow))*0.1)
title<-x$operation$operation$aquifers[[i]]$operation$name
I<-labelRemover(I)
middleOfBars<-barplot(I,ylim=ylim,ylab='Volume (MCM)',las=2,main=title,col=gray((1:ncol(inflow))/(1.2*ncol(inflow))))
for(j in 1:nrow(O)){lines(middleOfBars,O[j,],typ='o',pch=21,bg='white',col=j+1)}
legend('top',
legend=c(colnames(inflow),colnames(outflow)),
ncol=2,
fill=c(gray((1:ncol(inflow))/(1.2*ncol(inflow))),rep(NA,ncol(outflow))),
lty=c(rep(0,ncol(inflow)),rep(1,ncol(outflow))),
col=c(1:ncol(inflow),2:(ncol(outflow)+1)),
border=c(rep(1,ncol(inflow)),rep(NA,ncol(outflow))),
box.lwd=0,box.col=NA)
inflow <-apply(inflow,1,sum)
outflow<-apply(outflow,1,sum)
ylim<-c(0,max(inflow+outflow))*1.2
par(mar = c(5, 4, 4, 4) + 0.3)
IO<-rbind(inflow,outflow)
IO<-labelRemover(IO)
middleOfBars<-barplot(IO,las=2,col=gray(c(1,3)/4),ylab='Volume of inflow & outflow (MCM)',ylim=ylim, main=title)
par(new = TRUE)
plot(middleOfBars,storage$storage,axes = FALSE, bty = "n", xlab = "", ylab = "",typ='o',col=2,pch=21,bg='white')
axis(side=4, at = pretty(range(storage)))
mtext("Aquifer storage (MCM)", side=4, line=3)
legend('top',
legend=c('inflow','outflow','storage'),
ncol=3,
fill=c(gray(c(1,3)/4),NA),
lty=c(0,0,1),
col=c(1,1,2),
border=c(1,1,NA),
box.lwd=0,box.col=NA)
}
}
if(nDiv>0)
{
for(i in 1:nDiv)
{
oask <- devAskNewPage(TRUE)
on.exit(devAskNewPage(oask))
inflow <-x$operation$operation$diversions[[i]]$operation$inflow
outflow <-x$operation$operation$diversions[[i]]$operation$outflow
diverted<-x$operation$operation$diversions[[i]]$operation$sim_result$diverted$diverted
overflow<-x$operation$operation$diversions[[i]]$operation$sim_result$overflow$overflow
I<-rep(NA,length(getCycleMean(inflow [,1])))
O<-getCycleMean(overflow)
D<-getCycleMean(diverted)
for(j in 1:ncol(inflow)) {I<-cbind(I,getCycleMean(inflow [,j]))};I<-I[,-1,drop=FALSE]
I<-apply(I,1,sum)
ylim<-c(0,max(apply(rbind(I,O,D),2,sum)))*1.1
title<-x$operation$operation$diversions[[i]]$operation$name
IOD<-rbind(I,O,D)
IOD<-labelRemover(IOD)
barplot(IOD,las=2, ylab=c('Volume (MCM)'),ylim=ylim,col=gray(1:3/4),main=title,)
legend('top',
legend=c('inflow','outflow','diverted'),
ncol=3,
fill=c(gray(c(1:3)/4),NA),
box.lwd=0,box.col=NA)
plot(ecdf(diverted)(seq(min(diverted)-0.01,max(diverted)+0.01,0.01)),
seq(min(diverted)-0.01,max(diverted)+0.01,0.01),
typ='l',xlab='Probability',ylab='Diverted Volume (MCM)', main=title)
}
}
if(nJun>0)
{
for(i in 1:nJun)
{
oask <- devAskNewPage(TRUE)
on.exit(devAskNewPage(oask))
inflow <-x$operation$operation$junctions[[i]]$operation$inflow
outflow <-x$operation$operation$junctions[[i]]$operation$outflow[,1]
I<-rep(NA,length(getCycleMean(inflow [,1])))
for(j in 1:ncol(inflow)) {I<-cbind(I,getCycleMean(inflow [,j]))};I<-I[,-1,drop=FALSE]
O<-getCycleMean(outflow)
ylim<-c(0,max(I,O))*(1+0.05*(ncol(I)+1))
title<-x$operation$operation$junctions[[i]]$operation$name
I<-t(labelRemover(t(I)))
plot(I[,1],xaxt='n',ylab='Volume (MCM)',ylim=ylim,typ='o',xlab='',pch=0,main=title)
axis(1, at=1:nrow(I),labels=rownames(I),las=2)
if(ncol(inflow)>1)
{
for(j in 2:ncol(I))
{
lines(I[,j],col=j,pch=j-1,bg='white',typ='o')
}
}
lines(O,typ='o',col=j+1,pch=j,bg='white')
name<-c(colnames(inflow),colnames(outflow))
legend('top',
legend=name,
ncol=2,
col=1:(j+1),
pch=0:j,
box.lwd=0,box.col=NA)
}
}
if(nDem>0)
{
for(i in 1:nDem)
{
oask <- devAskNewPage(TRUE)
on.exit(devAskNewPage(oask))
outflow <-x$operation$operation$demands[[i]]$operation$outflow
demandTS<-getCycleMean(x$operation$operation$demands[[i]]$operation$demandTS$demand)
if(ncol(outflow)>1)
{
inflow <-x$operation$operation$demands[[i]]$operation$inflow
out<-as.matrix(apply(outflow,1,sum))
colnames(out)<-colnames(outflow)[1]
O<-getCycleMean(out)
I<-rep(NA,length(getCycleMean(inflow [,1])))
for(j in 1:ncol(inflow)) {I<-cbind(I,getCycleMean(inflow [,j]))};I<-t(I[,-1,drop=FALSE])
ylim<-c(0,max(apply(rbind(I,O),2,sum),demandTS))*1.1
title<-x$operation$operation$demands[[i]]$operation$name
col<-gray(1:(ncol(inflow)+1)/(ncol(inflow)+1))
I<-labelRemover(I)
middleOfBars<-barplot(I,las=2, ylab=c('Volume (MCM)'),ylim=ylim,main=title,col=col[1:ncol(inflow)])
lines(middleOfBars,demandTS,typ='o',pch=21,bg='white',col=2)
barplot(O,las=2, ylab=c('Volume (MCM)'),ylim=ylim,main=title,col=col[length(col)],add=TRUE)
legend('top',
legend=c(colnames(inflow),colnames(outflow)[2],'demand'),
ncol=2,
fill=c(col,NA),
lty=c(rep(0,ncol(inflow)+1),1),
col=c(rep(0,ncol(inflow)+1),2),
border=c(rep(1,ncol(inflow)+1),NA),
box.lwd=0,box.col=NA)
}else{
inflow <-x$operation$operation$demands[[i]]$operation$inflow
I<-rep(NA,length(getCycleMean(inflow [,1])))
for(j in 1:ncol(inflow)) {I<-cbind(I,getCycleMean(inflow [,j]))};I<-t(I[,-1,drop=FALSE])
ylim<-c(0,max(apply(I,2,sum),demandTS))*1.1
title<-x$operation$operation$demands[[i]]$operation$name
col<-gray(1:ncol(inflow)/ncol(inflow))
I<-labelRemover(I)
middleOfBars<-barplot(I,las=2, ylab=c('Volume (MCM)'),ylim=ylim,main=title,col=col)
lines(middleOfBars,demandTS,typ='o',pch=21,bg='white',col=2)
legend('top',
legend=c(colnames(inflow),'demand'),
ncol=2,
fill=c(col,NA),
lty=c(rep(0,ncol(inflow)),1),
col=c(rep(0,ncol(inflow)),2),
border=c(rep(1,ncol(inflow)),NA),
box.lwd=0,box.col=NA)
}
}
}
}
|
ab9e7477770bb30e58bb49119e14265d3cb96198
|
dc79a2186fcacfeb2aef0c1f096da8a835bd5a69
|
/sentiment.R
|
915efb8c785e0b8e32a7a793c54a9e573e7dc5e9
|
[] |
no_license
|
gaka444/BIG5
|
56b676e1fdb5c627d234f59650b53337402eb122
|
23b1d6acaa0ccf973f41a8c4a64dc49dc4ab8866
|
refs/heads/master
| 2021-01-25T00:03:55.971622
| 2018-04-11T17:50:49
| 2018-04-11T17:50:49
| 123,288,564
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,008
|
r
|
sentiment.R
|
score.sentiment = function(sentences, extra.words, agree.words,open.words,cons.words,neuro.words, .progress='none')
{
require(plyr) # extra agree open cons neuro
require(stringr)
list=lapply(sentences, function(sentence, agree.words, extra.words, cons.words, open.words, neuro.words)
{
sentence = gsub('[[:punct:]]',' ',sentence)
sentence = gsub('[[:cntrl:]]','',sentence)
sentence = gsub('\\d+','',sentence) #removes decimal number
sentence = gsub('\n','',sentence) #removes new lines
word.list = str_split(sentence, '\\s+')
words = unlist(word.list) #changes a list to character vector
extra.matches = match(words, extra.words)
agree.matches = match(words, agree.words)
open.matches = match(words, open.words)
cons.matches = match(words, cons.words)
neuro.matches = match(words, neuro.words)
agree.matches = !is.na(agree.matches)
extra.matches = !is.na(extra.matches)
open.matches = !is.na(open.matches)
cons.matches = !is.na(cons.matches)
neuro.matches = !is.na(neuro.matches)
ee = sum(extra.matches)
aa = sum(agree.matches)
oo = sum(open.matches)
cc = sum(cons.matches)
nn = sum(neuro.matches)
list1 = c(ee,aa,oo,cc,nn)
return (list1)
}, extra.words, agree.words, open.words, cons.words, neuro.words)
ee1 = lapply(list, `[[`, 1)
aa1 = lapply(list, `[[`, 2)
oo1 = lapply(list, `[[`, 3)
cc1 = lapply(list, `[[`, 4)
nn1 = lapply(list, `[[`, 5)
extraversion.df = data.frame(Extraversion = ee1, text=sentences)
openness.df = data.frame(Openness = oo1, text=sentences)
agreeableness.df = data.frame(Agreeableness = aa1, text=sentences)
conscientiousness.df = data.frame(Conscientiousness = cc1, text=sentences)
neuroticism.df = data.frame(Neuroticism = nn1, text=sentences)
list_df = list(extraversion.df, openness.df, agreeableness.df, conscientiousness.df, neuroticism.df)
return(list_df)
}
|
b6938c058537d65f52a8e458753eedfeff805427
|
a4b603fad7a4655815daa2215fe719188b83dac2
|
/pca.R
|
224b87d09b7c3b7c661fd521de31ea32e7655f33
|
[] |
no_license
|
elurisoto/movierec
|
fc638cec25e0bc5fa10423eb132439107816946d
|
a0a490b39796dbdf16a8fe9aa033830e6a0be73f
|
refs/heads/master
| 2016-09-06T21:21:09.324079
| 2015-08-05T16:16:22
| 2015-08-05T16:16:22
| 34,989,142
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,806
|
r
|
pca.R
|
require(robCompositions)
require(mice)
data <- read.csv("data/outputAlexPreprocessed.csv", header = TRUE, na.strings = c("N/A","None","NA"), stringsAsFactors=FALSE)
types <- lapply(data,class)
numerics <- data[types=="integer" | types =="numeric"][-17]
target <- data$user_rating
# Missing value imputation
imputed <- impKNNa(numerics, primitive=TRUE, metric = "Euclidean", k=17)$xImp
numerics.pca <- prcomp(imputed, center=TRUE, scale=TRUE)
summary(numerics.pca)
library(devtools)
install_github("ggbiplot", "vqv")
library(ggbiplot)
g <- ggbiplot(numerics.pca, obs.scale = 1, var.scale = 1,
ellipse = TRUE,
circle = TRUE)
g <- g + scale_color_discrete(name = '')
g <- g + theme(legend.direction = 'horizontal',
legend.position = 'top')
print(g)
# Write the data in different files to use later
write.csv(imputed, "data/imputados.csv", row.names = FALSE)
write.csv(numerics.pca$x, "data/pca/data.csv", row.names = FALSE)
write.csv(numerics.pca$scale, "data/pca/scale.csv", row.names = FALSE)
write.csv(numerics.pca$center, "data/pca/center.csv", row.names = FALSE)
write.csv(numerics.pca$rotation, "data/pca/rotation.csv", row.names = FALSE)
write.csv(target, "data/pca/target.csv", row.names = FALSE)
write.csv(data[types=="integer" | types =="numeric"], "data/numericos.csv", row.names = FALSE)
write.csv(data[types!="integer" & types!="numeric"], "data/nonumericos.csv", row.names = FALSE)
write.csv(imputed, "data/imputados.csv", row.names = FALSE)
col.gender = c(1, 2, 4, 6, 7, 8, 10, 11, 21,22,23,26,31,32,42,44,45,46,47,51,57,58)
write.csv(data[,col.gender], "data/generos.csv", row.names=FALSE)
write.csv(cor(cbind(imputed,target)), "results/matriz correlacion.csv")
# To project new data into the new space: scale(newdata,pca$center,pca$scale) %*% pca$rotation
|
ef07caaf6910176c5b5681a41f7956d9b3195ba3
|
fafcd06a2167b9b9af44ce8061876d56ab0f2cd6
|
/R/stl-seasonal.R
|
3b28c9affcbc99b6e2baa5b4a0897912365ecbd3
|
[] |
no_license
|
cran/ggseas
|
1ab51220956f045c3e17fcf5f61abf6becd79829
|
6b099f9f756708b54e8c3b091fb9c168070b66bd
|
refs/heads/master
| 2021-01-10T13:18:57.074349
| 2018-06-12T12:33:33
| 2018-06-12T12:33:33
| 51,983,068
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,091
|
r
|
stl-seasonal.R
|
StatSTL <- ggproto("StatSTL", Stat,
required_aes = c("x", "y"),
compute_group = function(data, scales, frequency, s.window,
index.ref, index.basis, ...) {
data <- data[order(data$x), ]
if(class(data$x) == "Date" & (is.null(frequency))){
stop("When x is of class 'Date' you need to specify frequency explicitly.")
}
if(is.null(frequency)){
frequency <- unique(round(1 / diff(data$x)))
if(length(frequency) != 1){
stop("Unable to calculate frequency from the data.")
}
message("Calculating frequency of ", frequency, " from the data.")
}
y_ts <- ts(data$y, frequency = frequency)
y_stl <- stl(y_ts, s.window = s.window)
y_sa <- with(as.data.frame(y_stl$time.series), trend + remainder)
result <- data.frame(x = data$x, y = as.numeric(y_sa))
if(!is.null(index.ref)){
result$y <- index_help(result$y, ref = index.ref,
basis = index.basis)
}
return(result)
}
)
#' LOESS seasonal adjustment Stat
#'
#' Conducts seasonal adjustment on the fly for ggplot2, from LOESS seasonal decomposition
#'
#' @export
#' @import ggplot2
#' @param frequency The frequency for the time series
#' @param s.window either the character string \code{"periodic"} or the span (in lags) of the
#' loess window for seasonal extraction, which should be odd and at least 7, according to
#' Cleveland et al. This has no default and must be chosen.
#' @param index.ref if not NULL, a vector of integers indicating which elements of
#' the beginning of each series to use as a reference point for converting to an index.
#' If NULL, no conversion takes place and the data are presented on the original scale.
#' @param index.basis if index.ref is not NULL, the basis point for converting
#' to an index, most commonly 100 or 1000. See examples.
#' @param ... other arguments for the geom
#' @inheritParams ggplot2::stat_identity
#' @family time series stats for ggplot2
#' @examples
#' ap_df <- tsdf(AirPassengers)
#'
#' # periodic if fixed seasonality; doesn't work well:
#' ggplot(ap_df, aes(x = x, y = y)) +
#' stat_stl(s.window = "periodic")
#'
#' # seasonality varies a bit over time, works better:
#' ggplot(ap_df, aes(x = x, y = y)) +
#' stat_stl(s.window = 7)
#'
#' # Multiple time series example:
#' ggplot(ldeaths_df, aes(x = YearMon, y = deaths, colour = sex)) +
#' geom_point() +
#' facet_wrap(~sex) +
#' stat_stl(s.window = 7) +
#' ggtitle("Seasonally adjusted lung deaths")
#'
#' # Index so first value is 100:
#' ggplot(ap_df, aes(x = x, y = y)) +
#' stat_stl(s.window = 7, index.ref = 1)
stat_stl <- function(mapping = NULL, data = NULL, geom = "line",
position = "identity", show.legend = NA,
inherit.aes = TRUE, frequency = NULL, s.window,
index.ref = NULL, index.basis = 100, ...) {
ggplot2::layer(
stat = StatSTL, data = data, mapping = mapping, geom = geom,
position = position, show.legend = show.legend, inherit.aes = inherit.aes,
params = list(frequency = frequency, s.window = s.window, na.rm = FALSE,
index.ref = index.ref, index.basis = index.basis, ...)
# note that this function is unforgiving of NAs.
)
}
|
462432bfd36212bdc87bcee727539603af8d5735
|
be8d70e60dd86be6f9f3b2e33d71226763053f6f
|
/Rpackage/man/get_top_user_posts.Rd
|
ec68360dbeed7a31b32b651f4ee660109119526c
|
[
"MIT"
] |
permissive
|
yoni/insta-sound
|
0101c9d6352eea853b7655dc1a8d3a2fcd9a9b73
|
c9ee4c6be98b065052a566ee9852e8fc0f463ae6
|
refs/heads/master
| 2021-01-18T07:15:01.917677
| 2014-01-27T00:00:34
| 2014-01-27T00:00:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 207
|
rd
|
get_top_user_posts.Rd
|
\name{get_top_user_posts}
\alias{get_top_user_posts}
\title{Reduces posts to only thos of top users.}
\usage{
get_top_user_posts(posts, limit = 9)
}
\description{
Reduces posts to only thos of top users.
}
|
147004da856babcbfae081824ce26fc8c3e92eff
|
6b03397dfa9526499297b460c80a84c514652449
|
/man/identity.matrix.Rd
|
f6dc17528a7592caac52a7932b96bcac9659f5f2
|
[] |
no_license
|
tomateba/tRanslatome
|
726887b6b6edcbf78950263989d5437da8b5f336
|
1f9595cd994e27027eee93f387c6d4c1361be67f
|
refs/heads/master
| 2021-01-16T19:54:07.390987
| 2015-09-07T09:44:42
| 2015-09-07T09:44:42
| 42,043,270
| 2
| 1
| null | 2015-09-07T09:15:30
| 2015-09-07T09:15:30
| null |
UTF-8
|
R
| false
| false
| 668
|
rd
|
identity.matrix.Rd
|
\name{identity.matrix}
\alias{identity.matrix}
\title{labelLevelsGOSetsHelpfile}
\description{
This function displays an object of class \code{character} specifying the names of the two levels compared in the experiment. It takes as input an object of class \code{\linkS4class{GOsims}}.
}
\usage{identity.matrix(object)}
\arguments{
\item{object}{an object of class \code{\linkS4class{GOsims}}.}
}
\author{
Toma Tebaldi, Erik Dassi, Galena Kostoska
}
\seealso{
\code{\linkS4class{GOsims}}
\code{\link{GOComparison}}
}
\examples{
data(tRanslatomeSampleData)
identity.matrix(CCComparison)
}
\keyword{GOsims}
\keyword{identity.matrix}
|
0a9c8cbfe62675b6a237fd0a3b4ed3aa6ca5a197
|
8f88f02674446584914185eb4e2c33e388ae39e5
|
/man/rcbr.fit.GK.Rd
|
3985099e35c49061fae083e7767a570f4b5aa69f
|
[] |
no_license
|
cran/RCBR
|
4d041ba7a0621ac381387567940f009390fb5d3d
|
eb916095e2bf41a4ff50ef3acf816aec3f8eb6e6
|
refs/heads/master
| 2023-01-19T03:22:53.835631
| 2020-11-16T09:10:05
| 2020-11-16T09:10:05
| 315,991,170
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,328
|
rd
|
rcbr.fit.GK.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rcbr.fit.GK.R
\name{rcbr.fit.GK}
\alias{rcbr.fit.GK}
\title{Gautier and Kitamura (2013) bivariate random coefficient binary response}
\usage{
rcbr.fit.GK(X, y, control)
}
\arguments{
\item{X}{the design matrix expected to have an intercept column of
ones as the first column.}
\item{y}{the binary response.}
\item{control}{is a list of tuning parameters for the fitting,see
\code{GK.control} for further details.}
}
\value{
a list with components:
\describe{
\item{u}{grid values}
\item{v}{grid values}
\item{w}{estimated function values on 2d u x v grid}
\item{X}{design matrix}
\item{y}{response vector}
}
}
\description{
This is an implementation based on the matlab version of Gautier and
Kitamura's deconvolution method for the bivariate random coefficient
binary response model. Methods based on the fitted object are provided
for \code{predict}, \code{logLik} and \code{plot}.requires orthopolynom
package for Gegenbauer polynomials
}
\references{
Gautier, E. and Y. Kitamura (2013) Nonparametric estimation in random coefficients
binary choice models, \emph{Ecoonmetrica}, 81, 581-607.
}
\author{
Gautier and Kitamura for original matlab version, Jiaying Gu
and Roger Koenker for the R translation.
}
\keyword{nonparametrics}
|
919e391d99bea5cf1e9cf6dac65a723f4d862241
|
517fce31bb1501401fa19b80c563c29b7b9a8804
|
/functions.R
|
614630dc988b6ba331a9349c2a703b4dac076464
|
[] |
no_license
|
reich-group/Integrative-statistical-methods-for-exposure-mixtures-and-health
|
ba3a19373a2ca4548b384a56b223d4f2f530785c
|
7e095e4c98728049dbc85a96e6f24669dbefb9f4
|
refs/heads/master
| 2022-11-05T18:07:10.958232
| 2020-06-24T13:34:06
| 2020-06-24T13:34:06
| 274,676,293
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,729
|
r
|
functions.R
|
expit <- function(x){ifelse(x>10,1/(1+exp(-10)),1/(1+exp(-x)))}
link <- function(x){ifelse(x<10,exp(x),exp(10))}
log_like<-function(Y,r,eta,N=1){
sum(dnbinom(Y,r,mu=N*link(eta),log=TRUE))
}
log_like<-function(Y,r,eta,N=1){
sum(dpois(Y,N*link(eta),log=TRUE))
}
log_like_nosum<-function(Y,r,eta,N=1){
dpois(Y,N*link(eta),log=TRUE)
}
rlog_like<-function(r,eta,N=1){
rnbinom(length(eta),r,mu=N*link(eta))
}
rlog_like<-function(r,eta,N=1){
rpois(length(eta),N*link(eta))
}
rtnorm<-function(n,m,s,l,u){
lo <- pnorm(l,m,s)
hi <- pnorm(u,m,s)
Z <- runif(n,lo,hi)
Y <- qnorm(Z,m,s)
return(Y)}
update_s <- function(Y,sig,scale=1/qt(0.99,df=1),MH=1){
Y <- as.vector(Y)
A <- sd(Y)
B <- 0.71*MH*A/sqrt(length(Y))
can <- abs(A+B*rt(1,df=2))
R <- sum(dnorm(Y,0,can,log=TRUE))-
sum(dnorm(Y,0,sig,log=TRUE))+
dt(can/scale,df=1,log=TRUE)-
dt(sig/scale,df=1,log=TRUE)+
dt((sig-A)/B,df=2,log=TRUE)-
dt((can-A)/B,df=2,log=TRUE)
if(!is.na(R)){if(can>0){if(can<Inf){
sig <- ifelse(log(runif(1))<R,can,sig)
}}}
return(sig)}
newsd <- function(Y,sig,scale=1/qt(0.99,df=1),MH=1){
Y <- as.vector(Y)
A <- MH*length(Y)/2+0.1
B <- MH*sum(Y^2)/2+0.1
can <- 1/sqrt(rgamma(1,A,B))
R <- sum(dnorm(Y,0,can,log=TRUE))-
sum(dnorm(Y,0,sig,log=TRUE))+
dt(can/scale,df=1,log=TRUE)-
dt(sig/scale,df=1,log=TRUE)+
(dgamma(1/sig^2,A,B,log=TRUE)-3*log(sig))-
(dgamma(1/can^2,A,B,log=TRUE)-3*log(can))
if(!is.na(R)){
sig <- ifelse(log(runif(1))<R,can,sig)
}
return(sig)}
|
54ac0ab2e0142ecc836a681ef7457acd0323b017
|
7a08cd1d405ddec1545b548860b18a68b19fa1ac
|
/man/generate_kernel.Rd
|
5b59ecd8cc5b28477f7ae53f620400a399875705
|
[] |
no_license
|
wxwx1993/GPSmatching
|
b27d419ce33c0b516b10713a4bd428f639a1caf1
|
d6661dca3a027e00cdea0cc30ada0ed42649bc7b
|
refs/heads/develop
| 2023-08-24T03:11:08.438445
| 2023-02-17T21:53:27
| 2023-02-17T21:53:27
| 233,946,225
| 21
| 8
| null | 2023-02-17T21:52:44
| 2020-01-14T22:12:28
|
R
|
UTF-8
|
R
| false
| true
| 356
|
rd
|
generate_kernel.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/causalgps_smooth.R
\name{generate_kernel}
\alias{generate_kernel}
\title{Generate kernel function}
\usage{
generate_kernel(t)
}
\arguments{
\item{t}{standardized vector (z-score)}
}
\value{
probability distribution
}
\description{
Generates kernel function
}
\keyword{internal}
|
c019b0026f1f3ba9e014ed02c9a9c2213c5f5cda
|
9ee0ab61b9d870fa5237a2869b60e9e8877f8403
|
/man/setAttribute.Rd
|
547c4b2e635e3778604897097bfebf16ebbdc53b
|
[] |
no_license
|
AustralianAntarcticDivision/EPOC
|
79390dc3456a9cbacfb64884f10cdcf5fedad393
|
778be35f146197c571bb1ebfb76eb2a629eaad70
|
refs/heads/master
| 2020-09-09T22:12:49.843987
| 2019-11-14T01:55:36
| 2019-11-14T01:55:36
| 221,583,790
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 987
|
rd
|
setAttribute.Rd
|
\name{setAttribute}
\alias{setAttribute}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
EPOCObject methods
}
\description{
Set an element attribute in its list with value.
}
\usage{
setAttribute(.Object, item = c("character", "missing"), value=c("ANY", "list"))
}
\arguments{
\item{.Object}{
EPOCObject
}
\item{item}{
Name of EPOC attribute to insert or overwrite
}
\item{value}{
Value to insert or overwrite as
}
}
\details{}
\value{
EPOCObject passed
}
\references{}
\author{ Troy Robertson }
\note{}
\seealso{
\code{\linkS4class{EPOCObject}, \link{getAttributeNames}, \link{getAttribute}}
}
\examples{
## Set attribute foo with value "bar"
# setAttribute(element, item="foo", value="bar")
## Set all EPOC attributes with a list
# setAttribute(element, value=list("foo"="bar", "bar"="foo"))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
030ad52ed21d6ee994b30b4d786b5a7653ca288c
|
30334ffb2670b112715e811b4ac3d3cfd37a3a57
|
/Lesson2-Data_Cleanliness.R
|
9273336db044d26963cd70f19fa07ba4a7b905d3
|
[
"CC0-1.0"
] |
permissive
|
mdcu-spark/R-Manual-for-Sapiens
|
2e5c930ab6a14d86b25ebe136c19d056e6b9b2ea
|
0f13eb53ce34d2469abd16f6ce637562ee8de1f6
|
refs/heads/master
| 2022-11-24T00:47:44.045435
| 2020-08-02T13:38:18
| 2020-08-02T13:38:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,911
|
r
|
Lesson2-Data_Cleanliness.R
|
########################################################
# #
# PSYC201 R Lab #
# Lesson 2 - Data Storage & Cleanliness #
# #
########################################################
# From now on, we will often be using the PSYC201 package
# This allows us to use the custom functions written for this class
# Thus you will often see the following header to these lesson scripts:
library(PSYC201)
##############################
# 2.1 - Lists & matrices #
##############################
# We've learned how to store multiple pieces of data in vectors
# But what if we want to structure the data into two dimensions...
# Or store multiple types of data in one variable?
# For this we have matrices and lists
# Matrices are just 2-dimensional vectors
# The way to make one is the function: matrix(data,nrow,ncol)
# Data is a vector of values that gets read off into the columns, one by one
# nrow and ncol tell R how to shape the matrix
# For instance, you can create a matrix with the numbers 1 to 6 and 2 rows, 3 columns with:
matrix(1:6,nrow=2,ncol=3)
# Note how the numbers count down the columns
# This is also different from the matrix with the same data, but 3 rows and 2 columns:
matrix(1:6,nrow=3,ncol=2)
# If you want the data to read across the rows instead of down the columns, set 'byrow' to TRUE
matrix(1:6,nrow=3,ncol=2,byrow=TRUE)
# And finally, you can leave out either the number of row or columns and get the same thing
# R knows how to calculate the other one given the length of the input vector
matrix(1:6,ncol=2,byrow=TRUE)
matrix(1:6,nrow=3,byrow=TRUE)
# Note that the length of the vector must have a length equal to nrow * ncol if you give both arguments
# If not, can you guess what happens?
# (Hint: it starts with 'r', ends with 'ecycling', and is bad)
# Now lets store a matrix from 1-9
junk.mat = matrix(1:9,nrow=3,ncol=3,byrow=TRUE)
junk.mat
# We can access individual matrix items by index, similar to the way we do for vectors
# However, rather than one index, you should use two: [row,col]
junk.mat[2,1] # Should be 4; second row, first column
junk.mat[3,2] # Should be 8; third row, second column
# Accessing by individual indices also work (it flattens the matrix back into a vector), but can get confusing
# You can also take matrix slices by ranges, just like for vectors
junk.mat[1:2,2:3] # Gets the upper right square
# And you can also access individual rows or columns, using the index [row#,] or [,col#]
junk.mat[1,] # Gives you the first row
junk.mat[,3] # Gives you the third column
junk.mat[1:2,] # Gives you rows 1 and 2
# You can assign back to a matrix by index
junk.mat[1,3] = 10 # Change one item
junk.mat
junk.mat[,2] = c(11,12,13) # Change a whole column
junk.mat
# Finally, let's say you also want to find the shape of the matrix
# We can't just use the length command... it works, but only gives us the total number of items in the matrix
length(junk.mat)
# Instead, we need to use the dim() command
# This gives us a vector of two numbers... first the number of rows, then the number of columns
dim(junk.mat)
rm(junk.mat)
# Lists are a way of storing multiple vectors or data types into one data structure
# Remember, if you try to store numbers and characters together in a vector, it changes the numbers to characters
c(1,"A",2)
# Likewise, you can't store vectors within a vector:
c(1,c(2,3),4)
# But with lists you can do both of these things
list(1,"A",2)
list(1,c(2,3),4)
# These look a little funny... that's because a list stores a number of different vectors
# The vector at [[1]] above is just 1, the vector at [[2]] is 2,3, etc.
# Let's store that into a variable
junk.list = list(1,c(2,3),4)
# Accessing items out of a list is a little different - you must use double brackets if you want the data
junk.list[[1]]
junk.list[[2]]
# If you use single brackets, it just gives you a list of length 1 back - which you can't use as a number
junk.list[1]
junk.list[1] + 2 # ERROR!
junk.list[[1]] + 2 # Okay!
class(junk.list[[1]]) # A numeric - we pulled out the vector
class(junk.list[1]) # A list - we just got that element of the list
# We can also create names lists within the list function:
junk.list = list('A' = 1, 'B' = c(2,3), 'C' = 4)
junk.list
names(junk.list)
# We can now access items from that list using the name
junk.list[['A']]
# But there's also a shortcut to the names using $ rather than the double brackets:
junk.list$A
# We can also change members of a list, or even elements of list members:
junk.list$A = c(3,5) # Replaced element A with a vector
junk.list
junk.list[[3]] = 10 # Replaced the third element (C) with 10
junk.list
junk.list[3] = 7 # You also don't have to worry about the double-brackets here
junk.list
junk.list$B[1] = 12 # Replaced the first number in B with 12
junk.list
junk.list[['B']][2] = 16 # Replaced the second number in B with 16
junk.list
# You can also add new items to a list by assigning to a new name within the list
junk.list$newitem = c('Hi','there')
junk.list
# Or remove items from a list by assigning NULL to an existing name
# NULL is a special variable that tells R that nothing is there
junk.list$B = NULL
junk.list
rm(junk.list)
##############################
# 2.2 - Data frames #
##############################
# Now we move on to data frames - this is the way R stores most datasets
# Effectively, it is a list of vectors, but all of the vectors must be of equal length
# You can think of these as storing one data observation per row
# Each different type of information you have on those observations make up the columns
# Let's look at a builtin data frame - mtcars - as an example
mtcars
# Each of the rows is named with the car type
# Then each of the columns has different information about those cars (avg mpg, # of cylinders, etc.)
# This data is rectangular - the number of rows is equal for every column
# You can build a data frame just like a list:
data.frame('Name' = c('A','B','C'), 'Num' = c(1,2,3), 'Logic' = c(TRUE,FALSE,TRUE))
# Note how much easier this is to look at than a list with the same data
# It's easy to tell in a data frame which records go together - not so much in a list
list('Name' = c('A','B','C'), 'Num' = c(1,2,3), 'Logic' = c(TRUE,FALSE,TRUE))
# But note that if the vectors aren't equal length, R doesn' like it:
data.frame('Name' = c('A','B'), 'Num' = c(1,2,3), 'Logic' = c(TRUE,FALSE,TRUE)) # Note Name has 2 items only
# And if you have a rectangular list, you can transform it into a data frame easily
l = list('Name' = c('A','B','C'), 'Num' = c(1,2,3), 'Logic' = c(TRUE,FALSE,TRUE))
data.frame(l)
# So let's store that data frame:
junk.df = data.frame(l)
rm(l)
# In addition to typing the name of a data frame in the console, RStudio let's us look at them in a window
# Remeber, you can click on the data frame in the Workspace tab, or use the View() command
View(junk.df)
# We can also access data just like we did for lists:
junk.df$Num
junk.df[[3]]
# Or individual items from the vectors:
junk.df$Num[1] # 1: the first element of Num
# Because all the vectors are the same length, we can also use matrix indices to grab data
# However, it's advised not to use this - it's hard to tell exactly what you're getting back
junk.df[1,2] # 1: the first row and second column (in this case Num)
# Finally, we can access individual rows using matrix indices
# We do this by giving the row number, then a comma, just like with matrices
junk.df[1,] # The first data observation
junk.df[2:3,] # The second and third data observation
# We can also create data frames with row names by giving it the row.names argument
junk.df2 = data.frame('Name' = c('A','B','C'), 'Num' = c(1,2,3), 'Logic' = c(TRUE,FALSE,TRUE),
row.names = c('Elem.1','Elem.2','Elem.3'))
junk.df2
# Now we can access data by names:
junk.df2['Elem.1','Num']
# Or observations by name
junk.df2['Elem.1',]
# We can get these names back by using the dimnames() command
# This provides a list of names for each dimension
dimnames(junk.df2)
# If we use names(), we just get the column names
names(junk.df2)
# We can also set the dimnames by a list of names with an entry for each dimension:
dimnames(junk.df2) = list(c('E1','E2','E3'),c('Name','Number','Logic'))
junk.df2
# Or just the rownames, by changing the second item in the dimnames list:
dimnames(junk.df2)[[1]] = c('El1','El2','El3')
# You can also add new data to data frames, just by assigning it to an empty column
junk.df2$NewCol = c(10,12,14)
junk.df2
# And can add transformed data back to the table:
junk.df2$Times2 = junk.df2$Number * 2
junk.df2
# If you want to access only subsets of the observations based on conditionals, there are two ways of doing this
# First, you can have the conditional in the first part of the index
junk.df2[junk.df2$Logic == TRUE,]
# Or you can just use the subset() command
# This takes two arguments - the data frame and the conditional
# Note that you don't need to tell subset() which data frame you are drawing the columns for
# It assumes that you are using the same data frame you are subsetting
subset(junk.df2,Logic == TRUE)
# And just like conditionals from simulation, you can use multiple conditionals
subset(junk.df2,Logic == TRUE & Number == 3)
# This is very useful when you want to:
# 1) Review/analyze only parts of your data (e.g., only subjects in Condition 1)
# 2) Eliminate outliers or inappropriate data points
# In either case, you can store the reduced table into another variable for easy access
junk.df2.reduced = subset(junk.df2, Logic == TRUE)
junk.df2.reduced
# The final bit of table access we'll go through involve the with() and attach() commands
# Sometimes you'll be accessing a number of variables in the same data frame at once
# It can get long and tedious to tell R that you are accessing the same data frame over and over
# So the with() command tells R that it should just assume the variables you call or from that data frame
# This takes two commands, the data frame, and the expression you want to run
# For instance, this adds the Number data to the NewCol data
with(junk.df2, Number + NewCol)
# This is identical to typing:
junk.df2$Number + junk.df2$NewCol
# This gets very useful when you want to access only a subset of the table:
with(subset(junk.df2,Logic == TRUE), Number + NewCol)
# And then there are other times that you'll be working with one data frame exclusively
# In this case, you might be using those variables on every line, but don't want to write with() over and over
# For this, there's the attach command, which 'attaches' all of the data from the data frame to the console
attach(junk.df2)
# Once you've attached a data frame, you can access the data without having to tell R the frame
Number
NewCol
Number + NewCol
# However, be careful - if you change any of the data, it will create a copy and won't affect the data frame
Number[2] = 10
Number
junk.df2$Number
# Note that 'Number' now shows up in your workspace
# Finally, when you're done with the data frame, you can detach() it
# This will prevent default access to the data, and is good clean-up practice
detach(junk.df2)
NewCol
# But any copied/changed data will remain
Number
# A final note on data frames... sometimes you want to know what type of data is stored in each column
# For this, you can use the str() command
str(junk.df2)
# Note that this tells you what each collumn is (factor, numeric, logical, etc.) and the first few observations
# This can be very useful if you're given a new dataset and want to know what it contains
# You might also want to get just the first few records
# For this you want to use the head() command - for the first six
# This won't have an effect with junk.df2 (there are only 3 records)
# But look at mtcars
mtcars
head(mtcars)
# It's much easier to get a quick readout with 'head'
# Now let's clean up our data frames
rm(junk.df,junk.df2,Number,junk.df2.reduced)
##############################
# 2.3 - Factors #
##############################
# In the first lesson, we talked about different types of singluar data that could be stored in variables
# These could be numbers, character strings, or logical values
# Now it's time to learn about another type of data R uses a lot: factors
# Factors come up often when importing data tables
# For instance, what happens if we look at the class of 'Cond' in sample.data?
class(sample.data$Cond)
# Notice it says "factor"
# If you look in the variable, it looks slightly different than a character string...
# Note the 'Levels'
sample.data$Cond
# So why use factors instead of characters?
# Factors are a way for R to store different conditions in a way that's easy to use and save space
# For instance, let's say you're running an experiment with three drugs, a control group, and 20 subjects
# You could store these conditions a character strings
cond.char = rep(c('DrugA','DrugB','DrugC','Control'),each = 5)
# Okay... that works and differentiates the conditions, but it takes up a lot of space
# Computers need more memory to store character strings than numbers
# This isn't really a problem with 20 subjects, but some datasets can have millions of records
# That space would add up fast
# So how about we store them as a number: 1,2,3,4?
cond.num = rep(c(1,2,3,4),each=5)
# Well, that saves space, but it's not obvious which condition is which
# Did you put control first or last? That could make a huge difference if you forget months down the line!
# Factors are a way of splitting the difference here
# They store information as numbers, but associate those numbers with 'levels'
# We can make factors by using the as.factor() command, but usually they come about from reading data in
# (We'll see data input later in this lesson)
cond.fact = as.factor(cond.char)
# When you print out the factor conditions, it looks like the character list
# But at the bottom, you see something that says 'Levels: Control DrugA DrugB DrugC'
# If you want to get the levels of a factor, you can use the levels() command:
levels(cond.fact)
# You shouldn't worry too much about factors beyond this
# This is the default way categorical data is read into R
# Most of the time, they will act just like character strings when you are doing analysis
# However, sometimes this means we will need to use special commands to deal with factors
# We'll see an example of when this is used later in the lesson
rm(cond.char,cond.num,cond.fact)
##############################
# 2.4 - Input/Output #
##############################
# In many cases you'll want to read and write data from files
# R makes this easy for you with the read.table() and write.table() commands
# To open up a file, first you need to make sure you're in the correct 'working directory'
# This tells the computer where to look for the files
# To learn where your working directory is set, you can use the getwd() command
getwd()
# You can then use the setwd() command with the path argument to set it to where you want to go
# But there are much easier ways of doing this in RStudio
# First, you can use the Files tab
# Just go to the directory you want to access, then click 'More' and select 'Set As Working Directory'
# If you can see the file you want to open in the Files tab, then you're in the right place
# Next, you can select it manually
# Go to the 'Tools' menu, select 'Set Working Directory' and 'Choose Directory'
# You will be able to search through your files to find the right directory
# Finally, if you are working on RScript and your data is in the same directory, this is easy
# Just go to the 'Tools' menu, select 'Set Working Directory' and 'To Source File Location'
# Let's start with the Lesson2_Data1.csv file
# Download it then click on it in the Files pane
# You'll see it's just some text that looks like:
# A,20,1.5
# A,30,0.5
# B,40,2
# B,50,5.5
# This is a typical csv file - different columns of data separated by commas, with each observation on a new line
# You can read this in with the read.table() command
# Note that you must set the 'sep' argument to ',' to tell it that commas count as data breaks
read.table('Lesson2_Data1.csv',sep = ',')
# The columns aren't named - but you can do this with the col.names argument
read.table('Lesson2_Data1.csv',sep = ',',col.names = c('Cond','N1','N2'))
# And can name the rows using the row.names argument
read.table('Lesson2_Data1.csv',sep = ',',col.names = c('Cond','N1','N2'),row.names = c('E1','E2','E3','E4'))
# We can also select only some of the rows
# If you want a certain number of rows, you can use the nrows argument to tell R how many observations you want
# It will only read in the first n rows of the file
read.table('Lesson2_Data1.csv',sep = ',',col.names = c('Cond','N1','N2'),nrows = 2)
# Now let's take a look at the 'Lesson3_Data2.csv' file
# This is exactly like the Data1 file, but column names are already there
# If you just try to read in the table, it will think that the headers are part of the data
read.table('Lesson2_Data2.csv',sep = ',')
# Instead, we want to use the 'header' argument to tell R that the first line is actually a header
# This sets the column names to whatever is in the first line
read.table('Lesson2_Data2.csv',sep=',',header = TRUE)
# A shortcut to all of this is also the read.csv() command
# This is exactly like the read.table command, except it assumes that sep=',' and header=TRUE
read.csv('Lesson2_Data2.csv')
# So now let's store the data:
sample.data = read.csv('Lesson2_Data2.csv',row.names = c('E1','E2','E3','E4'))
sample.data
# It's then a data frame - we can do anything we want to it within R now
sample.data$N3 = sample.data$N1 * 4
sample.data
# But when we're done, we might want to write our data frame back out to a file
# For this, we have the write.table() command
# This needs two arguments, the data frame and the file name
write.table(sample.data,'Lesson2_Output.csv')
# But open it up - this looks a bit messy
# First, we didn't give is a sep argument, so it assumed spaces should separate columns
write.table(sample.data,'Lesson2_Output.csv',sep=',')
# If you open it up again, you'll note that the original data has been overwritten
# We can change this using by setting the append argument to TRUE
# This tells R not to overwrite the file, but instead to write to the end of the file
write.table(sample.data,'Lesson2_Output.csv',sep=',',append=TRUE)
# However, you'll see a warning when you do that - it tells you collumn names are appended
# Usually if you're just adding data, you don't want to rewrite the collumn names
# You can turn this off by setting col.names to FALSE
write.table(sample.data,'Lesson2_Output.csv',sep=',',append=TRUE, col.names = FALSE)
# Let's clear all of this and just get the basic data into this file:
write.table(sample.data,'Lesson2_Output.csv',sep=',')
# Also, like read.csv was shorthand for read.table, there is a write.csv command
# This is almost exactly like setting sep=',' in write.table
write.csv(sample.data,'Lesson2_Output2.csv')
# But open these two files side-by-side and you'll notice one important difference
# There is an extra "", at the start of the write.csv file
# If you tried opening the first output in Excel, the collumn names would be mismatched with the data
# It would put 'Cond' above the element names
# With that extra bit from write.csv, the element names header will be blank and everything else will match
# Finally, we can turn off writing the element names by setting row.names to FALSE
write.csv(sample.data,'Lesson2_Output3.csv',row.names=FALSE)
##############################
# 2.5 - Data cleanliness #
##############################
# So far all we have learned about reading and using data assumes that it's in good condition
# But this often isn't true in reality - there can be missing or misentered data in your files
# While every situation with unclean data is going to be unique,
# right now we'll go over three that can be caught relatively easily
# To do this, first we're going to read in some data
dat = read.csv('Lesson2_DataClean.csv')
# Generally, the first thing you'll want to do when reading in data is use the head() command
head(dat)
# From this you can see that there are four fields - two different conditions, and two data fields
# You might also look at this and think that everything looks good, but there are three problems with this data
# Two can be caught easily by using the str() command
str(dat)
# If you have different conditions, those should be factors...
# But Condition2 looks like an 'int'
# Why? Because Condition2 is tagged with '1' '2' and '3', not letters
# So R has no way to understand that Condition2 should be different conditions as opposed to numbers
# This will cause problems later when we start doing analyses where R treats factors and integers differently
# However, it's an easy fix: we just tell R to treat Condition2 as a factor
dat$Condition2 = factor(dat$Condition2)
# Now R will treat Condition2 appropriately
str(dat)
# But there's another issue - Data2 looks like a factor, but should be a number
# This will cause lots of problems if you ever try to do anything with numbers:
dat$Data2 + 3
# Oops...
# So why did this happen?
# Well, this data frame is small enough that we can look through all of Data2
dat$Data2
# If you look carefully, you'll see '91b' in there
# As if someone fat-fingered the data entry
# If you're not sure that it's fat fingered, and want to get rid of it,
# you can just change it into a character, then a number
dat$Data2Num = as.numeric(as.character(dat$Data2))
# Don't worry about the warning - that's intended because you want to eliminate the 91b
# So why the double transform?
# As we learned in the factors section, factors are stored as numbers with labels
# So if you just use as.numeric, it simply uses the numbers:
as.numeric(dat$Data2)
# By transforming it into characters first, you're making sure that you're using the labels
# But what if you want to correct the fat-finger?
# This isn't easy to do in R...
# But you can easily find where the error is with the which() command
# The which() command returns the indices of a test on a vector
# For instance, if you want to know which rows include Condition1 as 'B', you would write:
which(dat$Condition1 == 'B')
# So if you're testing for where errors occur,
# you want to know which indices get turned into NA when made into numbers
# For this we use the is.na() command, which return TRUE if the value is NA, FALSE otherwise
is.na(dat$Data2Num)
# So we can just throw a which() question around that call:
which(is.na(dat$Data2Num))
# And now we know that when we edit the data, we use the 14th row
# The last issue is harder to find - missing data
# This gets read in without issue and converted to NA with numbers
# And you never get a warning
# R deals with NAs in different ways depending on the command...
# Sometimes it silently removes NAs, sometimes it throws an error
# So you really want to know where NAs exist
# The way to figure out which columns to worry about is the summary() command
summary(dat)
# You'll note that at the bottom of the Data1 and Data2Num columns, there's an NA's row
# This tells you how many NAs are in each of those fields
# We knew there was one in Data2Num, but the Data1 NA is news
# Which means that if we try to use lots of commands (like mean()), they'll fail:
mean(dat$Data1)
# So how do we deal with this?
# One way is to leave it be and deal with errors as they come up
# However, this isn't advised, since the behavior of NAs is always different
# The other option is to eliminate the rows with NAs
# The easist thing to do here is use the command na.omit()
# This returns a new data frame that eliminates any rows with missing data
dat2 = na.omit(dat)
summary(dat2)
# Note, however, that this eliminates two rows:
nrow(dat)
nrow(dat2)
# That's because there are two rows with NAs...
# The one in Data1, and the one we made in Data2Num
# But what if we didn't care about removing missing data from some columns?
# E.g., we had an 'age' variable that we weren't using but had missing data
# In this case, we can use a more targeted method with subset()
# Here, we can eliminate only the NAs in Data1
# We ask R to return only the parts of dat that are *not* NA
dat3 = subset(dat, !is.na(Data1))
# Note that we now have data that removes the NA in Data1, but keeps the NA in Data2Num
summary(dat3)
# These are some simple ways to make your data acceptable for analysis
# Note that there are some other checks you'll want to run for outliers, etc.
# But we will get to those later when we talk about statistical tests
rm(dat,dat2,dat3)
|
88a4bbf9231a2e82945ef20571dcf627fff72346
|
532017b5ba10c5a56df994331914aed1de3c6180
|
/Lake_NoiseMgt_Spectra_2015-06-07.R
|
8ad54ae31ca957561473e6bdcf72fc1757ed2249
|
[] |
no_license
|
CFL-UWMadison/SOSvariance
|
25ab291bcc9f325bcf69be59f3cb2164624ff5e7
|
68c16c6cd6a672545c96b5cd63285bf7521e0d9e
|
refs/heads/master
| 2020-03-29T16:04:52.698902
| 2015-06-16T20:59:18
| 2015-06-16T20:59:18
| 37,533,145
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,109
|
r
|
Lake_NoiseMgt_Spectra_2015-06-07.R
|
# Program to plot spectra for 2-D lake model
# SRC 2015-05-24
rm(list = ls())
graphics.off()
library(multitaper)
# FUNCTIONS <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# 2D rate funtion for simulation
dWM.noh = function(M,W) {
rate = c(0,0)
Rden=mq + W^q
# deterministic part of dW/dt w/o load control
rate[1] = -(s+h)*W + (r*M*W^q/Rden)
# deterministic part of dM/dt
rate[2] = s*W - b*M - (r*M*(W^q)/Rden)
return(rate)
}
# Function to return time series given lamda and u
Tsim = function(lamda,u) {
# Preliminaries
eps = sigma*rnorm(nt)
Wt=rep(0,nt)
Mt=rep(0,nt)
Wt[1:3]=W0 + eps[1:3]
Mt[1:3]=M0 - eps[1:3]
ratemat = matrix(0,nr=nt,ncol=2)
ratemat[1,] = dWM.noh(Mt[1],Wt[1])
ratemat[2,] = dWM.noh(Mt[2],Wt[2])
for(i in 3:(nt-1) ) {
ratemat[i,] = dWM.noh(Mt[i],Wt[i])
Mnext = Mt[i] + (ratemat[i,2])*dt
Mt[i+1] = max(Mnext,1)
Wnext = Wt[i] + (u+phi)*(Wt[i]-Wt[i-1]) - u*phi*(Wt[i-1]-Wt[i-2]) +
ratemat[i,1]*dt - (u + phi)*ratemat[i-1,1]*dt + u*phi*ratemat[i-2,1]*dt +
(1-(u+phi)+u*phi)*lamda + eps[i]
Wt[i+1] = max(Wnext,0.1)
}
outlist=list(Wt,Mt)
return(outlist)
}
# Function to return spectra
GetSpec = function(X) {
Xvar = ts(X,deltat=1)
XS = spec.mtm(Xvar, k=30, nw=15, nFFT = 'default',
centreWithSlepians = T, Ftest = F,
jackknife = F,maxAdaptiveIterations = 100,
plot = F, na.action = na.fail)
Xspec = XS$spec
Xfreq=XS$freq
outlist = list(Xfreq,Xspec)
return(outlist)
}
# END FUNCTIONS >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Parameters
b = 0.002 # this rate is increased from previous papers; burial is 0.001 in E.L. 2006
#h = 0.15 # export coef. from E.L. 2006
h = 1-exp(-0.29) # export coef. from BalanceModelFit_Mendota+2sigmas_2013-08-10.R (Stable balance model)
m = 4 # half-saturation for recycle; wide range 1-15 in Carpenter & Lathrop Ecosystems 2008; 8 in EL 2006
r = 0.019 # max. recycling rate; ~0.002 in Carpenter & Lathrop 2008; 0.019 in E.L. 2006
q = 4 # 4 is near posterior mode in Carpenter & Lathrop Ecosystems 2008; 8 was used in E.L. 2006
mq = m^q
#s = 0.7 # sedimentation from E.L. 2006
#s = 1-exp(-0.34) # sedimentation from BalanceModelFit_Mendota+2sigmas_2013-08-10.R (Stable balance model)
s = 1-h # see typed notes on this model from 2014-12-06
# Variables chosen to be near the threshold but within safe operating space
# See Lake2D_U-in-h_ARMA_V0_2014-12-23.R
# Note that mean Mendota load is about 0.85 g m-2 y-1 (range 0.32 to 1.98)
L.mod = 1.2
M.mod = 330
# Noise process
sigma = 0.35
phi = 0.1 # AR
# Simulation control
nt = (2^11)+500
dt=1
tvec = (1:(nt-500))
# Initial conditions
W0 = 1
M0 = M.mod
# Set up lamda value
lamda=0.5
# Set up u values
nu=2
uvec = c(0.6,0)
# Compute output statistics
Wmat = matrix(0,nr=nt-500,nc=2)
Mmat = matrix(0,nr=nt-500,nc=2)
# Compute time series
for(j in 1:nu) { # loop over u
u=uvec[j]
simlist = Tsim(lamda,u)
Wsraw = simlist[[1]]
Wmat[,j] = Wsraw[501:nt] # throw away the burn-in
Msraw = simlist[[2]]
Mmat[,j] = Msraw[501:nt] # throw away the burn-in
} # end loop over u
# Compute spectra
# Variance control
xcen = Wmat[,1] - mean(Wmat[,1])
xstd = xcen/sd(Wmat[,1])
sp.con = GetSpec(xstd)
# No Variance control
xcen = Wmat[,2] - mean(Wmat[,2])
xstd = xcen/sd(Wmat[,2])
sp.nocon = GetSpec(xstd)
windows()
par(mfrow=c(1,1),mar=c(5, 4.3, 4, 2) + 0.1, cex.axis=1.6,cex.lab=1.6)
yrange=range(Wmat)
plot(tvec,Wmat[,2],type='l',lwd=2,col='blue',ylim=yrange,#log='y',
xlab='Time Step',ylab='Water P')
points(tvec,Wmat[,1],type='l',lwd=2,col='red')
#points(tvec,Hsim.white,type='l',lwd=2,col='black')
windows()
par(mfrow=c(1,1),mar=c(5, 4.3, 4, 2) + 0.1, cex.axis=1.6,cex.lab=1.6)
yrange=range(sp.nocon[[2]],sp.con[[2]])
plot(sp.nocon[[1]],sp.nocon[[2]],type='l',lwd=2,col='blue',ylim=yrange,log='y',
xlab='Frequency',ylab='Spectrum')
points(sp.con[[1]],sp.con[[2]],type='l',lwd=2,col='red')
legend('bottomleft',
legend=c('Nominal','Var. Management'),
col=c('blue','red'),
lwd=c(2,2),cex=1.6)
|
9048bf50cbfca8ee484d55f08ba8ff10e94bc07d
|
3952a65a31e97dfb0d23dfc3e5bbc3fd0d270708
|
/03_func.r
|
2bcac5cd3dfb80c297460530403deb39ed9c9598
|
[] |
no_license
|
jdunic/Dunic-Baum-2017JAE
|
a9fb1d91b232c2530b6d1e6be413c65169ad1c15
|
cb9e7f3673f7d5b082fabcbf94e09315dcb54fa9
|
refs/heads/master
| 2021-01-12T05:49:10.970449
| 2016-12-23T07:22:14
| 2016-12-23T07:22:14
| 77,206,343
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30,197
|
r
|
03_func.r
|
# Change active plot window (quartz())
A <- function(PlotWindow) {
dev.set(which=PlotWindow)
}
# ggplot default colour list for n colours
gg_colour_hue <- function(n) {
hues = seq(15, 375, length=n+1)
hcl(h=hues, l=65, c=100)[1:n]
}
################################################################################
############# Graph equations and formatting ############
################################################################################
# Get geometric mean
gm_mean = function(x, na.rm=TRUE){
exp(sum(log(x[x > 0]), na.rm=na.rm) / length(x))
}
# extract ggplot legend to stick beside gridExtra plots as desired:
extract_legend <- function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)
}
write_sma_eqn <- function(df, y){
m = lm(log(y) ~ log(SL), df);
l <- list(a = format(coef(m)[1], digits = 2),
b = format(coef(m)[2], digits = 2),
r2 = format(summary(m)$r.squared, digits = 2, nsmall = 2)
)
if (l$a >= 0) {
eq <- substitute(italic(y) == b %.% italic(x) + a*","~~italic(r)^2~"="~r2, l)
} else {
l <- list(a = format(abs(coef(m)[1]), digits = 2),
b = format(coef(m)[2], digits = 2),
r2 = format(summary(m)$r.squared, digits = 2, nsmall = 2)
)
eq <- substitute(italic(y) == b %.% italic(x) - a*","~~italic(r)^2~"="~r2, l)
}
as.character(as.expression(eq))
}
write_group_sma_eqn <- function(sma_summary_df, group_column) {
df <- sma_summary_df
m = matrix(data=NA, nrow=0, ncol=5)
count <- length(group_column)
for (i in (1:count)) {
l <- list(slp = format(sma_summary_df$slope[i], digits=2),
int = format(sma_summary_df$elev[i], digits=2),
r2 = format(sma_summary_df$xy_r2[i], digits=2, nsmall = 2),
count = sma_summary_df$n[i],
sig = sma_summary_df$sig[i]
)
if (l$int >= 0) {
eqn_r2 <- substitute(atop(italic(r)^2~"="~r2, italic(y) ==
slp%.%italic(x) + int), l)
eqn <- substitute(italic(y) == slp*italic(x) + int*sig, l)
r2 <- substitute(italic(r)^2~"="~r2, l)
n <- substitute(italic(n) ~ "=" ~ count, l)
} else {
l <- list(slp = format(sma_summary_df$slope[i], digits=2),
int = format(abs(sma_summary_df$elev[i]), digits=2),
r2 = format(sma_summary_df$xy_r2[i], digits=2, nsmall = 2),
count = sma_summary_df$n[i],
sig = sma_summary_df$sig[i]
)
eqn_r2 <- substitute(atop(italic(r)^2~"="~r2, italic(y) ==
slp%.% italic(x) - int), l)
eqn <- substitute(italic(y) == slp*italic(x) - int*sig, l)
r2 <- substitute(italic(r)^2~"="~r2, l)
n <- substitute(italic(n) ~ "=" ~ count, l)
}
#browser()
eqn_r2 <- as.character(as.expression(eqn_r2))
eqn <- as.character(as.expression(eqn))
r2 <- as.character(as.expression(r2))
n <- as.character(as.expression(n))
m <- rbind(m, c(as.character(df[[1]][i]), eqn_r2, eqn, r2, n))
#m <- rbind(m, c(as.character(df[i,1]), lm_eq))
}
m <- as.data.frame(m)
}
count_spp <- function(df) {
ddply(.data = df, .(SpeciesCode), summarize,
len = length(SpeciesCode),
n = paste("n ==", len)
)
}
# test whether a slope is allometric
get_allometry <- function(slope, p_val, iso_val = 1) {
allometry <- 'I'
if (p_val <= 0.05 & slope < iso_val) allometry <- 'N'
if (p_val <= 0.05 & slope > iso_val) allometry <- 'P'
return(allometry)
}
get_sig <- function(p_val) {
sig <- ''
if (p_val < 0.05) sig <- "*"
return(sig)
}
################################################################################
############# SMA Functions ############
################################################################################
run_sma <- function(df, gapeType=c("gh", "gw", "ga"), robust=TRUE) {
if (robust == TRUE) {
switch(gapeType,
"gh" = { sma(gh ~ SL, data = df, log = "xy", method = "SMA", robust = TRUE,
slope.test = 1) },
"gw" = { sma(gw ~ SL, data = df, log = "xy", method = "SMA", robust = TRUE,
slope.test = 1) },
"ga" = { sma(ga ~ SL, data = df, log = "xy", method = "SMA", robust = TRUE,
slope.test = 2) }
)
} else if (robust == FALSE) {
switch(gapeType,
"gh" = { sma(gh ~ SL, data = df, log = "xy", method = "SMA", robust = FALSE,
slope.test = 1) },
"gw" = { sma(gw ~ SL, data = df, log = "xy", method = "SMA", robust = FALSE,
slope.test = 1) },
"ga" = { sma(ga ~ SL, data = df, log = "xy", method = "SMA", robust = FALSE,
slope.test = 2) }
)
}
}
check_assump <- function(sma_object, plotTitle) {
plot(sma_object, which = "qq")
plot(sma_object, which = "residual")
title(main = plotTitle)
abline(h=0, col="red")
}
mk_sma_df <- function(t) {
data.frame(elevation = t$coef[[1]][1,1],
lw_ci_elev = t$coef[[1]][1,2],
up_ci_elev = t$coef[[1]][1,3],
slope = t$coef[[1]][2,1],
lw_ci_slp = t$coef[[1]][2,2],
up_ci_slp = t$coef[[1]][2,3],
r2 = t$r2[[1]],
n = t$n[[1]],
pval = t$pval[[1]]
)
}
mk_sma_graph_df <- function(sma_summary_df, num_groups, group_name) {
sma_graph_df <- data.frame(group=character(), slp=numeric(), int=numeric(),
from=numeric(), to=numeric(), yfrom=numeric(),
yto=numeric(), stringsAsFactors=FALSE
)
for (i in 1:num_groups) {
from <- sma_summary_df[10, i]
to <- sma_summary_df[11, i]
slp <- sma_summary_df[3, i]
int <- sma_summary_df[1, i]
yfrom <- 10^(slp*log10(from) + int)
yto <- 10^(slp*log10(to) + int)
group <- colnames(sma_summary_df)[i]
midpoint_y <- sqrt(yfrom * yto)
midpoint_x <- sqrt(from * to)
ref_intercept <- log10(midpoint_y/(midpoint_x^2))
row <- t(c(group=group, slp=slp, int=int, from=from, to=to, yfrom=yfrom,
yto=yto, midpoint_x=midpoint_x, midpoint_y=midpoint_y,
ref_intercept=ref_intercept))
sma_graph_df <- rbind(sma_graph_df, row)
}
sma_graph_df[, 2] <- as.numeric(as.character(sma_graph_df[, 2]))
sma_graph_df[, 3] <- as.numeric(as.character(sma_graph_df[, 3]))
sma_graph_df[, 4] <- as.numeric(as.character(sma_graph_df[, 4]))
sma_graph_df[, 5] <- as.numeric(as.character(sma_graph_df[, 5]))
sma_graph_df[, 6] <- as.numeric(as.character(sma_graph_df[, 6]))
sma_graph_df[, 7] <- as.numeric(as.character(sma_graph_df[, 7]))
sma_graph_df[, 8] <- as.numeric(as.character(sma_graph_df[, 8]))
sma_graph_df[, 9] <- as.numeric(as.character(sma_graph_df[, 9]))
sma_graph_df[, 10] <- as.numeric(as.character(sma_graph_df[, 10]))
names(sma_graph_df)[1] <- group_name
return(sma_graph_df)
}
# Used for single group
mk_sma_summary <- function(sma_object, group="column_name") {
rows <- c('elev', 'slp_test', 'slope', 'lower_ci', 'upper_ci',
'slp_p_value', 'xy_r^2', 'xy_corr_p_value', 'n', 'from', 'to')
#if (grouping==F) {
elev = coef(sma_object)[[1]]
slp_test = sma_object$slopetest[[1]][[4]]
slope = sma_object$slopetest[[1]][[5]]
lower = sma_object$slopetest[[1]][6][[1]][[1]]
upper = sma_object$slopetest[[1]][6][[1]][[2]]
slp_p_val = sma_object$slopetest[[1]][[3]]
xy_r2 = sma_object$r2[[1]]
xy_cor = sma_object$pval[[1]]
n = sma_object$n[[1]]
from = sma_object$from[[1]]
to = sma_object$to[[1]]
columns <- c(elev, slp_test, slope, lower, upper, slp_p_val, xy_r2, xy_cor, n,
from, to)
sma_df <- data.frame(columns, row.names=rows)
names(sma_df) <- sma_object$groups
#sma_df <- format(sma_df[1], digits=3, sci=F)
return(sma_df)
}
mk_spp_summary <- function(sma_object, num_spp=NA, grouping=F, group_name) {
# Use (grouping == F) when multiple sma_objects are generated using dlply
# Use (grouping == T) when the sma_object was generated using x~y*group
if (grouping==F) {
sma_df <- data.frame(elev=numeric(), slp_test=numeric(), slope=numeric(),
upper=numeric(), lower=numeric(), slp_p_val=numeric(),
xy_r2=numeric(), xy_cor=numeric(), n=numeric(),
from=numeric(), to=numeric()
)
for (i in 1:num_spp) {
#spp = as.factor(names(sma_object[[1]]))
#spp = as.character(attr(sma_object[i], which="split_labels")[[i]][[1]])
elev = coef(sma_object[[i]])[[1]]
slp_test = sma_object[[i]]$slopetest[[1]][[4]]
slope = sma_object[[i]]$slopetest[[1]][[5]]
lower = sma_object[[i]]$slopetest[[1]][6][[1]][[1]]
upper = sma_object[[i]]$slopetest[[1]][6][[1]][[2]]
slp_p_val = sma_object[[i]]$slopetest[[1]][[3]]
xy_r2 = sma_object[[i]]$r2[[1]]
xy_cor = sma_object[[i]]$pval[[1]]
n = sma_object[[i]]$n[[1]]
from = sma_object[[i]]$from[[1]]
to = sma_object[[i]]$to[[1]]
row <- c(elev, slp_test, slope, lower, upper, slp_p_val, xy_r2, xy_cor,
n, from, to)
sma_df <- rbind(sma_df, row)
columns <- c("elev", "slp_test", "slope", "lower", "upper", "slp_p_val",
"xy_r2", "xy_cor", "n", "from", "to")
#sma_df <- data.frame(columns, row.names=rows)
names(sma_df) <- columns
#sma_df <- format(sma_df[1], digits=3, sci=F)
}
return(sma_df)
} else if (grouping==T) {
sma_df <- data.frame(group=character(), elev=numeric(), slp_test=numeric(),
slope=numeric(), upper=numeric(), lower=numeric(),
slp_p_val=numeric(), xy_r2=numeric(), xy_cor=numeric(),
n=numeric(), from=numeric(), to=numeric(),
stringsAsFactors=FALSE
)
for (i in 1:length(sma_object$groups)) {
elev = sma_object$coef[[i]][[1]][1]
slp_test = sma_object$slopetest[[i]]$test.value
slope = sma_object$slopetest[[i]]$b
lower = sma_object$slopetest[[i]]$ci[1, 1]
upper = sma_object$slopetest[[i]]$ci[1, 2]
slp_p_val = sma_object$slopetest[[i]]$p
xy_r2 = sma_object$r[i][[1]]
xy_cor = sma_object$pval[i][[1]]
n = sma_object$n[i][[1]]
from = sma_object$from[i][[1]]
to = sma_object$to[i][[1]]
group = sma_object$groups[i][[1]]
row <- c("group"=as.character(group), "elev"=as.numeric(elev),
"slp_test"=as.numeric(slp_test), "slope"=as.numeric(slope),
"lower"=as.numeric(lower), "upper"=as.numeric(upper),
"slp_p_val"=as.numeric(slp_p_val), "xy_r2"=as.numeric(xy_r2),
"xy_cor"=as.numeric(xy_cor), "n"=as.numeric(n),
"from"=as.numeric(from), "to"=as.numeric(to))
sma_df[i, ] <- row
}
columns <- c("group", "elev", "slp_test", "slope", "lower", "upper",
"slp_p_val", "xy_r2", "xy_cor", "n", "from", "to")
#sma_df <- data.frame(columns, row.names=rows)
names(sma_df) <- columns
#sma_df <- format(sma_df[1], digits=3, sci=F)
for (x in 2:12) {
sma_df[, x] <- as.numeric(sma_df[, x])
}
return(sma_df)
}
}
mk_smaSPP_graph_df <- function(sma_summary_df, num_spp, group_name, iso_slope = 1) {
sma_graph_df <- data.frame(group=character(), slp=numeric(), int=numeric(),
from=numeric(), to=numeric(), yfrom=numeric(),
yto=numeric(),
stringsAsFactors=FALSE
)
for (i in 1:num_spp) {
from <- sma_summary_df[i, 11]
to <- sma_summary_df[i, 12]
slp <- sma_summary_df[i, 4]
int <- sma_summary_df[i, 2]
yfrom <- 10^(slp*log10(from) + int)
yto <- 10^(slp*log10(to) + int)
group <- as.character(sma_summary_df[i, 1])
midpoint_y <- sqrt(yfrom * yto)
midpoint_x <- sqrt(from * to)
ref_intercept_iso <- log10(midpoint_y / midpoint_x ^ (iso_slope))
slope_test <- sma_summary_df[i, 3]
#
row <- t(c(group=group, slp=slp, int=int, from=from, to=to, yfrom=yfrom,
yto=yto, midpoint_x=midpoint_x, midpoint_y=midpoint_y,
ref_intercept_iso=ref_intercept_iso, slope_test = slope_test)
)
sma_graph_df <- rbind(sma_graph_df, row)
}
sma_graph_df[, 2] <- as.numeric(as.character(sma_graph_df[, 2]))
sma_graph_df[, 3] <- as.numeric(as.character(sma_graph_df[, 3]))
sma_graph_df[, 4] <- as.numeric(as.character(sma_graph_df[, 4]))
sma_graph_df[, 5] <- as.numeric(as.character(sma_graph_df[, 5]))
sma_graph_df[, 6] <- as.numeric(as.character(sma_graph_df[, 6]))
sma_graph_df[, 7] <- as.numeric(as.character(sma_graph_df[, 7]))
sma_graph_df[, 8] <- as.numeric(as.character(sma_graph_df[, 8]))
sma_graph_df[, 9] <- as.numeric(as.character(sma_graph_df[, 9]))
sma_graph_df[, 10] <- as.numeric(as.character(sma_graph_df[, 10]))
sma_graph_df[, 11] <- as.numeric(as.character(sma_graph_df[, 11]))
names(sma_graph_df)[1] <- group_name
return(sma_graph_df)
}
# Makes SMA plots for Families all on one graph
# ==============================================================================
mk_SMAfacets <- function( df_points, df_lines, gapeType = c("gh", "gw", "ga"),
point_colour = c("j_fg", "Family", "SpeciesCode", "Region", "dissected_by",
"observer_id"),
labels = c("dissected_by", "Region", "SpecimenID", "None"),
facetting = c("j_fg", "Family", "SpeciesCode", "Region", "dissected_by",
"observer_id"),
facet_columns ) {
plot_base <- ggplot(data = df_points, aes_string(x = "SL", y = gapeType)) +
geom_point( aes_string(colour = point_colour)) +
geom_segment(data = df_lines, aes(x = from, xend = to, y = yfrom,
yend = yto)) +
scale_y_log10() +
scale_x_log10() +
xlab("log(standard length, mm)") +
theme_bw()
switch(gapeType,
"gh" = { plot_base <- plot_base + ylab("log(vertical gape, mm)") },
"gw" = { plot_base <- plot_base + ylab("log(horizontal gape, mm)") },
"ga" = { plot_base <- plot_base + ylab(expression(
paste("log(gape area ", mm^2, ")", sep= ""))) }
)
if (labels == "None") {
plot1 <- plot_base
} else {
plot1 <- plot_base + geom_text(position = position_jitter(w = 0.02,
h = 0.02), aes_string(label = labels), size = 2)
}
plot1 + facet_wrap( as.formula(sprintf('~ %s', facetting)), ncol = facet_columns )
}
theme_L_border <- function(colour = "black", size = 1, linetype = 1) {
structure(
function(x = 0, y = 0, width = 1, height = 1, ...) {
polylineGrob(
x=c(x+width, x, x), y=c(y,y,y+height), ..., default.units = "npc",
gp=gpar(lwd=size, col=colour, lty=linetype),
)
},
class = "theme",
type = "box",
call = match.call()
)
}
mk_SMAfacets2 <- function( df_points, df_lines, gapeType = c("gh", "gw", "ga"),
#point_colour = c("j_fg", "Family", "SpeciesCode", "Region", "dissected_by"),
labels = c("dissected_by", "Region", "SpecimenID", "None"),
facetting = c("j_fg", "Family", "SpeciesCode", "Region", "dissected_by"),
facet_columns, eqn_df
) {
plot_base <- ggplot(data = df_points, aes_string(x = "SL", y = gapeType)) +
geom_point(shape = 1, colour = "grey") +
geom_segment(data = df_lines, aes(x = from, xend = to, y = yfrom,
yend = yto)) +
scale_y_log10() +
scale_x_log10() +
xlab("log(standard length, mm)") +
theme_classic() +
theme(strip.background = element_blank(),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
axis.line.x = element_line(colour = "black")
) +
geom_point(aes(x = 10, y = 1), alpha = 0) +
geom_point(aes(x = 650, y = 12000), alpha = 0) +
geom_text(data = eqn_df, aes(x=280, y=3.5,
label=eqn), parse=TRUE, size = 3.5) +
geom_abline(data = df_lines, aes_string(intercept = "ref_intercept"),
slope = 2, linetype = 2, colour = "grey50")
switch(gapeType,
"gh" = { plot_base <- plot_base + ylab("log(vertical gape, mm)") },
"gw" = { plot_base <- plot_base + ylab("log(horizontal gape, mm)") },
"ga" = { plot_base <- plot_base + ylab(expression(
paste("log(gape area ", mm^2, ")", sep= ""))) }
)
if (labels == "None") {
plot1 <- plot_base
} else {
plot1 <- plot_base + geom_text(position = position_jitter(w = 0.02,
h = 0.02), aes_string(label = labels), size = 2)
}
plot1 + facet_wrap( as.formula(sprintf('~ %s', facetting)), ncol = facet_columns,
scales = "free")
}
mk_multipanel_plots2 <- function(fg_point_df, spp_point_df, spp_line_df_row,
#ref_intercept_row,
eqn_df, eqn_x, eqn_y, r2_x, r2_y, n_x, n_y, x_axis_labels=TRUE,
y_axis_labels=TRUE, fg_line_intercept, y_axis_text = TRUE, x_axis_text = TRUE,
plot_title = "", y_value, gape_dim = 'gh')
{
plotTitle <- substitute(italic(plot_title), list(plot_title = plot_title))
plot_base <-
ggplot(data = fg_point_df, aes_string(x = "SL", y = gape_dim)) +
geom_point(shape = 1, colour = "grey") +
geom_segment(data = spp_line_df_row, aes_string(x = "from", xend = "to",
y = "yfrom", yend = "yto")) +
geom_point(data = spp_point_df, colour = "black", shape = 1) +
scale_y_log10() +
scale_x_log10() +
geom_abline(intercept = fg_line_intercept, slope = 1, linetype = 2,
colour = "darkgrey") +
theme_bw() +
theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
theme(axis.line.x = element_line(color = 'black'),
axis.line.y = element_line(color = 'black')) +
geom_text(data = eqn_df, aes_string(x = eqn_x, y = eqn_y,
label = "eqn"), parse = TRUE, size = 3, hjust = 1) +
geom_text(data = eqn_df, aes_string(x = r2_x, y = r2_y,
label = "r2"), parse = TRUE, size = 3, hjust = 1) +
geom_text(data = eqn_df, aes_string(x = n_x, y = n_y,
label = "n"), parse = TRUE, size = 3, hjust = 1) +
labs(title = bquote(plain(.(plotTitle)))) +
theme(plot.title = element_text(size = 9),
axis.text = element_text(size = 8),
axis.ticks.length = unit(-0.1, "cm"),
axis.text.y = element_text(margin = margin(0, 5, 0, 0)),
axis.text.x = element_text(margin = margin(5, 0, 0, 0), vjust = 1))
#plot <- plot_base +
if (x_axis_labels == TRUE) {
plot1 <- plot_base + xlab("standard length, mm")
} else if (x_axis_labels == FALSE) {
plot1 <- plot_base + theme(axis.title.x = element_blank())
}
if (y_axis_labels == TRUE) {
plot2 <- plot1 + ylab(expression(paste("gape height, ", mm, "", sep= "")))
} else if (y_axis_labels == FALSE) {
plot2 <- plot1 + theme(axis.title.y = element_blank())
}
if (y_axis_text == TRUE) {
plot3 <- plot2
} else if (y_axis_text == FALSE) {
plot3 <- plot2 + theme(axis.text.y = element_blank())
}
if (x_axis_text == TRUE) {
plot4 <- plot3
} else if (x_axis_text == FALSE) {
plot4 <- plot3 + theme(axis.text.x = element_blank())
}
plot4
}
mk_multipanel_plots_mass <- function(fg_point_df, spp_point_df, spp_line_df_row,
ref_intercept_row,
eqn_df, eqn_x, eqn_y, r2_x, r2_y, n_x, n_y, x_axis_labels=TRUE,
y_axis_labels=TRUE, fg_line_intercept, y_axis_text = TRUE, x_axis_text = TRUE,
plot_title = "", y_value, gape_dim = 'gh')
{
plotTitle <- substitute(italic(plot_title), list(plot_title = plot_title))
plot_base <-
ggplot(data = fg_point_df, aes_string(x = "wt", y = gape_dim)) +
geom_point(shape = 1, colour = "grey") +
geom_segment(data = spp_line_df_row, aes_string(x = "from", xend = "to",
y = "yfrom", yend = "yto")) +
geom_point(data = spp_point_df, colour = "black", shape = 1) +
scale_y_log10() +
scale_x_log10() +
geom_abline(intercept = fg_line_intercept, slope = 1 / 3, linetype = 2,
colour = "darkgrey") +
theme_bw() +
theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
theme(axis.line = element_line(color = 'black')) +
geom_text(data = eqn_df, aes_string(x = eqn_x, y = eqn_y,
label = "eqn"), parse = TRUE, size = 3, hjust = 1) +
geom_text(data = eqn_df, aes_string(x = r2_x, y = r2_y,
label = "r2"), parse = TRUE, size = 3, hjust = 1) +
geom_text(data = eqn_df, aes_string(x = n_x, y = n_y,
label = "n"), parse = TRUE, size = 3, hjust = 1) +
labs(title = bquote(plain(.(plotTitle)))) +
theme(plot.title = element_text(size = 9),
axis.text = element_text(size = 8),
axis.ticks.length = unit(-0.1, "cm"),
axis.text.y = element_text(margin = margin(0, 5, 0, 0)),
axis.text.x = element_text(margin = margin(5, 0, 0, 0), vjust = 1))
#plot <- plot_base +
if (x_axis_labels == TRUE) {
plot1 <- plot_base + xlab("standard length, mm")
} else if (x_axis_labels == FALSE) {
plot1 <- plot_base + theme(axis.title.x = element_blank())
}
if (y_axis_labels == TRUE) {
plot2 <- plot1 + ylab(expression(paste("gape height, ", mm, "", sep= "")))
} else if (y_axis_labels == FALSE) {
plot2 <- plot1 + theme(axis.title.y = element_blank())
}
if (y_axis_text == TRUE) {
plot3 <- plot2
} else if (y_axis_text == FALSE) {
plot3 <- plot2 + theme(axis.text.y = element_blank())
}
if (x_axis_text == TRUE) {
plot4 <- plot3
} else if (x_axis_text == FALSE) {
plot4 <- plot3 + theme(axis.text.x = element_blank())
}
plot4
}
mk_corallivore_plot <- function(fg_point_df, spp_point_df,
#ref_intercept_row,
eqn_df, eqn_x, eqn_y, r2_x, r2_y, n_x, n_y, x_axis_labels=TRUE,
y_axis_labels=TRUE, fg_line_intercept, y_axis_text = TRUE, x_axis_text = TRUE,
plot_title = "", y_value, gape_dim = 'gh')
{
plotTitle <- substitute(italic(plot_title), list(plot_title = plot_title))
plot_base <-
ggplot(data = fg_point_df, aes_string(x = "SL", y = gape_dim)) +
geom_point(shape = 1, colour = "grey") +
geom_point(data = spp_point_df, colour = "black", shape = 1) +
scale_y_log10() +
scale_x_log10() +
geom_abline(intercept = fg_line_intercept, slope = 1, linetype = 2,
colour = "darkgrey") +
theme_bw() +
theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
theme(axis.line = element_line(color = 'black')) +
geom_text(data = eqn_df, aes_string(x = eqn_x, y = eqn_y,
label = "eqn"), parse = TRUE, size = 3, hjust = 1) +
geom_text(data = eqn_df, aes_string(x = r2_x, y = r2_y,
label = "r2"), parse = TRUE, size = 3, hjust = 1) +
geom_text(data = eqn_df, aes_string(x = n_x, y = n_y,
label = "n"), parse = TRUE, size = 3, hjust = 1) +
labs(title = bquote(plain(.(plotTitle)))) +
theme(plot.title = element_text(size = 9),
axis.text = element_text(size = 8),
axis.ticks.length = unit(-0.1, "cm"),
axis.text.y = element_text(margin = margin(0, 5, 0, 0)),
axis.text.x = element_text(margin = margin(5, 0, 0, 0), vjust = 1))
#plot <- plot_base +
if (x_axis_labels == TRUE) {
plot1 <- plot_base + xlab("standard length, mm")
} else if (x_axis_labels == FALSE) {
plot1 <- plot_base + theme(axis.title.x = element_blank())
}
if (y_axis_labels == TRUE) {
plot2 <- plot1 + ylab(expression(paste("gape height, ", mm, "", sep= "")))
} else if (y_axis_labels == FALSE) {
plot2 <- plot1 + theme(axis.title.y = element_blank())
}
if (y_axis_text == TRUE) {
plot3 <- plot2
} else if (y_axis_text == FALSE) {
plot3 <- plot2 + theme(axis.text.y = element_blank())
}
if (x_axis_text == TRUE) {
plot4 <- plot3
} else if (x_axis_text == FALSE) {
plot4 <- plot3 + theme(axis.text.x = element_blank())
}
plot4
}
mk_SMAplot <- function(df_points, df_lines, facets = TRUE, x = "SL", gapeType =
c("gh", "gw", "ga"), grouping = c("j_fg", "Family", "SpeciesCode", "Region",
"dissected_by"), labels = c("Region", "Region_colour", "dissected_by",
"dissected_colour", "SpecimenID", "None"), axis_labels) {
plot_base <- ggplot(data = df_points, aes_string(x = x, y = gapeType)) +
scale_y_log10() +
scale_x_log10() +
xlab("standard length (mm)") +
theme_bw() +
theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
theme(axis.line = element_line(color = 'black')) +
#labs(title = bquote(plain(.(plotTitle)))) +
#labs(title = bquote(italic(.(plotTitle)))) +
theme(axis.ticks.length = unit(-0.2, "cm")) +
theme(axis.text = element_text(size = 8),
axis.ticks.length = unit(-0.1, "cm"),
axis.text.y = element_text(margin = margin(0, 5, 0, 0)),
axis.text.x = element_text(margin = margin(5, 0, 0, 0), vjust = 1)) +
scale_x_log10(breaks=scales::pretty_breaks(3)) +
scale_y_log10(breaks=scales::pretty_breaks(3))
if (facets == FALSE) {
plot1 <- plot_base + geom_point( aes_string(colour = grouping), size = 1.5 ) +
geom_segment(data = df_lines, aes(x = from, xend = to, y = yfrom,
yend = yto)) + aes_string(colour = grouping)
switch(labels,
"dissected_by" = { plot1 <- plot1 +
geom_text(position=position_jitter(w=0.03, h=0.03),
aes(label=dissected_by), size=3) },
"Region" = { plot1 <- plot1 +
geom_text(position=position_jitter(w=0.03, h=0.03),
aes(label=dissected_by), size=3) },
"None" = { plot1 <- plot1 }
)
switch(gapeType,
"gh" = { plot1 + ylab("gape height (mm)") },
"gw" = { plot1 + ylab("gape width, (mm)") },
"ga" = { plot1 + ylab(expression(paste("gape area (", mm^2, ")",
sep= ""))) }
)
} else if (facets == TRUE) {
plot2 <- plot_base + geom_point() +
geom_segment(data = df_lines, aes(x = from, xend = to, y = yfrom,
yend = yto))
switch(labels,
"dissected_by" = { plot2 <- plot2 +
geom_point( aes_string(colour=labels) ) +
geom_text(position=position_jitter(w=0.03, h=0.03),
aes(label=dissected_by), size=3) },
"dissected_colour" = { plot2 <- plot2 +
geom_point( aes(colour=dissected_by)) },
"Region" = { plot2 <- plot2 +
geom_point( aes_string(colour=labels) ) +
geom_text(position=position_jitter(w=0.03, h=0.03),
aes(label=Region), size=3) },
"Region_colour" = { plot2 <- plot2 +
geom_point( aes(colour=Region)) },
"None" = { plot2 <- plot2 }
)
switch(gapeType,
"gh" = { plot3 <- plot2 + ylab("gape height (mm)") },
"gw" = { plot3 <- plot2 + ylab("gape width (mm)") },
"ga" = { plot3 <- plot2 + ylab(expression(paste("gape area (", mm^2, ")",
sep= ""))) }
)
switch(grouping,
"j_fg" = { plot3 + facet_wrap( ~ j_fg) },
"Family" = { plot3 + facet_wrap( ~ Family) },
"SpeciesCode" = { plot3 + facet_wrap( ~ SpeciesCode) },
"Region" = { plot3 + facet_wrap( ~ Region) },
"dissected_by" = { plot3 + facet_wrap( ~ dissected_by) }
)
}
}
# Plotting using grid, with a set viewport
set_vp <- function(row, column) {
viewport(layout.pos.row = row, layout.pos.col = column)
}
################################################################################
############# Predator - Prey Size Functions ############
################################################################################
# groupwise_rq <- function(df, variable) {
# rq <- ddply(df, .(variable), function(z) {
# r <- rq(psize ~ sl, tau = c(0.10, 0.90), data = z)
# })
# }
#
# rq(formula, tau=.5, data, subset, weights, na.action,
# method="br", model = TRUE, contrasts, ...)
#
# groupwise_lm_gw <- function(df, variable) {
# lm <- with(data=df, ddply(df, .(variable), function(z) {
# t <- lm(log(gw)~log(SL), data=z)
# data.frame(int = coefficients(t)[1],
# slope = coefficients(t)[2],
# rsq = summary(t)$r.squared,
# se = summary(t)$coefficients[2,2],
# p_val = summary(t)$coef[2,4])
# }))
# }
# Mapping function taken from SO answer by Joris Meys from:
# http://stackoverflow.com/questions/5353184/fixing-maps-library-data-for-pacific-centred-0-360-longitude-display
# Used to adjust polygons so that they are not left 'open' on the cut when the
# ends (for "world" and "worldHiRes") when the map is pacific ocean-centric.
# xlimits have been added to the final call for map() at the end of the function
# because they were causing islands in the pacific to disappear in first part of
# the function where the polygons are moved around.
plot.map <- function(database,center, xlimits, ...){
Obj <- map(database,...,plot=F)
coord <- cbind(Obj[[1]],Obj[[2]])
# split up the coordinates
id <- rle(!is.na(coord[,1]))
id <- matrix(c(1,cumsum(id$lengths)),ncol=2,byrow=T)
polygons <- apply(id,1,function(i){coord[i[1]:i[2],]})
# split up polygons that differ too much
polygons <- lapply(polygons,function(x){
x[,1] <- x[,1] + center
x[,1] <- ifelse(x[,1]>180,x[,1]-360,x[,1])
if(sum(diff(x[,1])>300,na.rm=T) >0){
id <- x[,1] < 0
x <- rbind(x[id,],c(NA,NA),x[!id,])
}
x
})
# reconstruct the object
polygons <- do.call(rbind,polygons)
Obj[[1]] <- polygons[,1]
Obj[[2]] <- polygons[,2]
map(Obj,..., xlim=xlimits)
}
|
dca5f029a0cbbf1307ecf129ed3f3604b077ca25
|
9b63723928a99c949772da580e3b48fd415de554
|
/man/addContrast.Rd
|
3bbfb14dea434b41283041e3ae31106a767aa12b
|
[] |
no_license
|
auberginekenobi/qsea
|
1441eaaefbeb8f8a33d5050c2a499595dfc80d4e
|
8e38a3aaaf294fef996c4ebf9f2d6280c54dae86
|
refs/heads/master
| 2020-05-26T20:27:58.553027
| 2019-05-24T06:31:47
| 2019-05-24T06:31:47
| 188,364,113
| 0
| 0
| null | 2019-05-24T06:21:20
| 2019-05-24T06:21:20
| null |
UTF-8
|
R
| false
| false
| 1,596
|
rd
|
addContrast.Rd
|
\name{addContrast}
\alias{addContrast}
\title{fit GLMs to reduced model and test for significance}
\description{This function fits negative binomial GLMs to reduced models
defined either by the "contrast" parameter, or by one or several model
coefficients (specified by "coef" parameter) set to zero.
Subsequently, a likelihood ratio test is applied, to identify windows
significantly dependent on the tested coefficient.
}
\usage{
addContrast(qs,glm,contrast,coef,name,verbose=TRUE, nChunks = NULL,
parallel = FALSE )
}
\arguments{
\item{qs}{a qseaSet object}
\item{glm}{a qseaGLM object}
\item{contrast}{numeric vector specifying a contrast of the model coefficients.
This contrast can for example be defined using limma::makeContrasts()}
\item{coef}{alternatively defines the contrast by coefficient(s) of the model
tested to be equal to zero.}
\item{name}{short descriptive name for the contrast (as "TvsN"), used for
examples in columns of result tables}
\item{verbose}{more messages that document the process}
\item{nChunks}{fit GLMs in multiple chunks}
\item{parallel}{use multicore processing}
}
\value{This function returns the qseaGLM object, extended by the fitted
coefficients of the reduced GLMs, as well as the test statistics.
Note that one qseaGLM object can contain several contrasts.}
\author{
Mathias Lienhard
}
\seealso{limma::makeContrasts(), fitNBglm(), isSignificant()}
\examples{
qs=getExampleQseaSet()
design=model.matrix(~group, getSampleTable(qs))
TvN_glm=fitNBglm(qs, design, norm_method="beta")
TvN_glm=addContrast(qs,TvN_glm, coef=2, name="TvN")
}
|
9d870849dae9515a212924dff7dc0d9660d5931a
|
fbce12a04f4981b70f110b556bf2087e2ce86f7b
|
/man/ITRLearn-package.Rd
|
65ae257fb556ebfd6e2c7729b40d6dfe586c55ed
|
[] |
no_license
|
cran/ITRLearn
|
70485fc727ecd4096a21c4290df131636ab3cf59
|
b1c68737eafbd0c5b7e159440460f07c44a86110
|
refs/heads/master
| 2020-04-03T17:16:59.263725
| 2018-11-15T04:10:03
| 2018-11-15T04:10:03
| 155,438,685
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,242
|
rd
|
ITRLearn-package.Rd
|
\name{ITRLearn-package}
\alias{ITRLearn-package}
\alias{ITRLearn}
\docType{package}
\title{
Statistical Learing for Individualized Treatment Regime
}
\description{
Maximin-projection learning (MPL, Shi, et al., 2018) is implemented for
recommending a meaningful and reliable individualized treatment regime for future
groups of patients based on the observed data from different populations with
heterogeneity in individualized decision making. Q-learning and A-learning are
implemented for estimating the groupwise contrast function that shares the same
marginal treatment effects. The packages contains classical Q-learning and A-learning
algorithms for a single stage study as a byproduct.
}
\details{
\tabular{ll}{
Package: \tab ITRLearn\cr
Type: \tab Package\cr
Version: \tab 1.0-1\cr
Date: \tab 2018-11-14\cr
License: \tab GPL-2\cr
}
}
\author{
Chengchun Shi, Rui Song, Wenbin Lu and Bo Fu
Maintainer: Chengchun Shi <cshi4@ncsu.edu>
}
\references{
Shi, C., Song, R., Lu, W., and Fu, B. (2018). Maximin Projection Learning for Optimal Treatment
Decision with Heterogeneous Individualized Treatment Effects. \emph{Journal of the Royal Statistical
Society, Series B,} \bold{ 80:} 681-702.
}
|
d507d042b191d30f859038388abf3921334b83db
|
3a2a402f434403db37d4f1ad5456185cc5c4f7d7
|
/R/generics.R
|
9cab8068e4883ea6de06f2e2c5e04091997745a4
|
[] |
no_license
|
zdk123/pulsar
|
445090f2814f02a6dfc461fc46737583c29b5996
|
9b78ec8dc01f9bf49497e4ecda9335a40d462e18
|
refs/heads/master
| 2023-02-18T14:26:15.152281
| 2023-01-26T02:25:45
| 2023-01-26T02:25:45
| 40,200,366
| 11
| 8
| null | 2023-02-07T17:19:28
| 2015-08-04T17:59:52
|
R
|
UTF-8
|
R
| false
| false
| 8,821
|
r
|
generics.R
|
#' Print a \code{pulsar.refit} S3 object
#'
#' Print information about the model, path length, graph dimension, criterion and optimal indices and graph sparsity.
#'
#' @param x a \code{pulsar.refit}. output from \code{refit}
#' @param ... ignored
#' @importFrom utils capture.output
#' @export
print.pulsar.refit <- function(x, ...) {
cat("Pulsar-selected refit of", capture.output(print(x$fun)), "\n")
cat("Path length:", length(x$est$path), "\n")
cat("Graph dim: ", ncol(x$est$path[[1]]), "\n")
crits <- names(x$refit)
if (length(crits) > 0) {
critext <- ifelse(length(crits) > 1, "Criteria:", "Criterion:")
critext2 <- lapply(crits, function(cr) {
sp <- sum(x$refit[[cr]]) / ncol(x$refit[[cr]])^2
optext <- paste(cr, "... sparsity ", signif(sp, 3), sep="")
paste(" ", optext, sep="")
})
cat(critext, "\n", paste(critext2, collapse="\n"), "\n", sep="")
}
}
#' Print a \code{pulsar} and \code{batch.pulsar} S3 object
#'
#' Print information about the model, path length, graph dimension, criterion and optimal indices, if defined.
#'
#' @param x a fitted \code{pulsar} or \code{batch.pulsar} object
#' @param ... ignored
#' @export
print.pulsar <- function(x, ...) {
fin <- getArgs(getCall(x), getEnvir(x))
mode <- ifelse(fin$ncores > 1, "parallel", "serial")
cat("Mode:", mode)
.print.pulsar(x, fin)
}
#' @rdname print.pulsar
#' @export
print.batch.pulsar <- function(x, ...) {
fin <- getArgs(getCall(x), getEnvir(x))
cat("Mode: batch")
.print.pulsar(x, fin)
}
#' @keywords internal
.print.pulsar <- function(x, fin) {
if (fin$lb.stars) {
cat("... bound index: lower ", x$stars$lb.index,
", upper ", x$stars$ub.index, "\n", sep="")
} else cat("\n")
cat("Path length:", length(fin$fargs$lambda), "\n")
cat("Subsamples: ", fin$rep.num, "\n")
cat("Graph dim: ", ncol(fin$data), "\n")
critext <- ifelse(length(fin$criterion) > 1, "Criteria:", "Criterion:")
critext2 <- lapply(fin$criterion, function(cr) {
opt.ind <- x[[cr]]$opt.ind
optext <- ifelse(is.null(opt.ind), "",
paste("... opt: index ", opt.ind, ", lambda ",
signif(fin$fargs$lambda[opt.ind], 3), sep=""))
paste(" ", cr, optext, sep="")
})
cat(critext, "\n", paste(critext2, collapse="\n"), "\n", sep="")
}
#' Plot a \code{pulsar} S3 object
#'
#' @param x a \code{pulsar} or \code{batch.pulsar} object
#' @param scale Flag to scale non-StARS criterion to max StARS value (or 1)
#' @param invlam Flag to plot 1/lambda
#' @param loglam Flag to plot log[lambda]
#' @param legends Flag to plot legends
#' @param ... ignored
#'
#' @details If both invlam and loglam are given, log[1/lambda] is plotted
#' @export
plot.pulsar <- function(x, scale=TRUE, invlam=FALSE, loglam=FALSE, legends=TRUE, ...) {
.plot.pulsar(x, scale, invlam, loglam, legends)
}
#' @importFrom graphics plot points legend
#' @keywords internal
.plot.pulsar <- function(x, scale=TRUE, invlam=FALSE, loglam=FALSE, legends=TRUE) {
fin <- getArgs(getCall(x), getEnvir(x))
lams <- fin$fargs$lambda
xlab <- "lambda"
if (invlam) {lams <- 1/lams ; xlab <- paste("1/", xlab, sep="")}
if (loglam) {lams <- log(lams) ; xlab <- paste("log[ ", xlab, " ]", sep="")}
nlam <- length(lams)
crits <- fin$criterion
n <- length(crits)
if (scale) {
ylab <- "summary (scaled)"
if ("stars" %in% crits)
ymax <- max(x$stars$summary)
else ymax <- 1
} else {
ylab <- "summary"
ymax <- max(unlist(lapply(crits, function(c) x[[ c ]]$summary)))
}
yrange <- c(0, ymax)
plot(lams, seq(yrange[1], yrange[2], length.out=nlam),
xlab=xlab, ylab=ylab, type='n')
if (!is.null(x$stars$lb.index)) {
ilams <- 1:length(lams)
range1 <- ilams < x$stars$ub.index
range2 <- ilams > x$stars$lb.index
range <- !(range1 | range2)
ccol <- vector('numeric', n+1)
ltys <- vector('numeric', n+1)
legs <- vector('numeric', n+1)
} else {
range1 <- rep(FALSE, nlam) ; range2 <- range1
range <- !range1
ccol <- vector('numeric', n)
ltys <- vector('numeric', n)
legs <- vector('numeric', n)
}
i <- 1 ; lcol <- 1
optcrits <- c() ; optcols <- c()
for (cr in crits) {
summs <- x[[ cr ]]$summary
optind <- opt.index(x, cr)
if (scale && cr != "stars") summs <- ymax*summs/max(summs)
if (length(summs) == nlam) {
points(lams[range], summs[range], type='b', col=lcol)
points(lams[range1], summs[range1], type='b', col=lcol, lty=2)
points(lams[range2], summs[range2], type='b', col=lcol, lty=2)
optind2 <- optind
if (any(range1 | range2)) {
ccol[i:(i+1)] <- c(lcol,lcol)
ltys[i:(i+1)] <- c(2,1)
legs[i:(i+1)] <- c(paste("b-", cr, sep=""), cr)
i <- i+1
} else {
ccol[i] <- lcol
ltys[i] <- 1
legs[i] <- cr
}
} else {
points(lams[range], summs, type='b', col=lcol)
optind2 <- optind-which(range)[1]+1
ccol[i] <- lcol
ltys[i] <- 1
legs[i] <- cr
}
if (!is.null(optind)) {
points(lams[optind], summs[optind2], type='p', cex=1.5, pch=16, col=lcol)
optcrits <- c(optcrits, cr)
optcols <- c(optcols , lcol)
}
lcol <- lcol + 1 ; i <- i + 1
}
if (legends) {
legend('bottomleft', legs, col=ccol, pch=1, lty=ltys, cex=1.4)
if (length(optcrits) > 0)
legend('topright', optcrits, pch=16, col=optcols, cex=1.5, title="opt lambda")
}
}
#' Update a pulsar call
#'
#' Update a pulsar model with new or altered arguments. It does this by extracting the call stored in the object, updating the call and (by default) evaluating it in the environment of the original \code{pulsar} call.
#'
#' @param object a n existing pulsar or batch.pulsar object
#' @param ... arguments to \code{pulsar} to update
#' @param evaluate Flag to evaluate the function. If \code{FALSE}, the updated call is returned without evaluation
#' @details The \code{update} call is evaluated in the environment specified by the \code{pulsar} or \code{batch.pulsar} object, so if any variables were used for arguments to the original call, unless they are purposefully updated, should not be altered. For example, if the variable for the original data is reassigned, the output of \code{update} will not be on the original dataset.
#' @return If \code{evaluate = TRUE}, the fitted object - the same output as \code{pulsar} or \code{batch.pulsar}. Otherwise, the updated call.
#' @examples
#' \dontrun{p <- 40 ; n <- 1200
#' dat <- huge.generator(n, p, "hub", verbose=FALSE, v=.1, u=.3)
#' lams <- getLamPath(getMaxCov(dat$data), .01, len=20)
#'
#' ## Run pulsar with huge
#' hugeargs <- list(lambda=lams, verbose=FALSE)
#' out.p <- pulsar(dat$data, fun=huge::huge, fargs=hugeargs,
#' rep.num=20, criterion='stars')
#'
#' ## update call, adding bounds
#' out.b <- update(out.p, lb.stars=TRUE, ub.stars=TRUE)
#' }
#' @importFrom stats update
#' @seealso \code{eval}, \code{\link{update}}, \code{\link{pulsar}}, \code{\link{batch.pulsar}}
#' @export
update.pulsar <- function(object, ..., evaluate=TRUE) {
extras <- match.call(expand.dots=FALSE)$...
.update.pulsar(object, extras, evaluate)
}
#' @importFrom stats getCall
#' @keywords internal
.update.pulsar <- function(object, extras, evaluate) {
call <- getCall(object)
if (is.null(getEnvir(object))) object$envir <- parent.frame()
if (length(extras)) {
existing <- !is.na(match(names(extras), names(call)))
for (a in names(extras)[existing]) call[[a]] <- extras[[a]]
if (any(!existing)) {
call <- c(as.list(call), extras[!existing])
call <- as.call(call)
}
}
if (evaluate)
eval(call, getEnvir(object))
else call
}
#' Get calling environment
#'
#' Generic S3 method for extracting an environment from an S3 object. A getter for an explicitly stored environment from an S3 object or list... probably the environment where the original function that created the object was called from. The default method is a wrapper for \code{x$envir}.
#'
#' @param x S3 object to extract the environment
#' @seealso \code{getCall}, \code{environment}, \code{parent.env}, \code{eval}
#' @export
getEnvir <- function(x) {
UseMethod("getEnvir")
}
#' @rdname getEnvir
#' @export
getEnvir.default <- function(x) {
getElement(x, "envir")
}
|
f9d8efb86dad84207e90b94780a0ec063748fecf
|
e4f72da406bf3cb54ebb17072c75f131afdb1c08
|
/src/simulation/cross_group_comparison/roc.R
|
f90af8ee64f2889cfc6fcf263f361d846f1df4ab
|
[] |
no_license
|
ZhuoqunWang0120/LTN_analysis-JASA_1st_submission
|
4943114be9b165f0780062e50235c24bc65a8db6
|
22cc297d0fade96cff5ad61bd038602c051f4ee0
|
refs/heads/main
| 2023-07-15T02:20:14.264335
| 2021-08-17T15:42:41
| 2021-08-17T15:42:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 987
|
r
|
roc.R
|
#!/usr/bin/env Rscript
library(ROCR)
argv=commandArgs(TRUE)
WORK_DIR=argv[1]
lambda=argv[2]
for (s in c('single_otu','multi_otu')){
system(paste0('mkdir -p ',WORK_DIR,'/results/cross_group_comparison/',s))
for (m in c('sparse','diagonal')){
cachedir=paste0(WORK_DIR,"/cache/cross_group_comparison/",s,"/LTN/pjap/")
files0=grep(paste0('H0_',m,'_lambda',lambda,'.RData'),list.files(cachedir,full.names = T),value = T)
files1=grep(paste0('H1_',m,'_lambda',lambda,'.RData'),list.files(cachedir,full.names = T),value = T)
pjaps=c(sapply(files0, readRDS),sapply(files1, readRDS))
labels=c(rep(0,length(files0)),rep(1,length(files1)))
if (length(unique(labels))>1){
if (lambda=='0' & m=='sparse'){lambda1='GammaPrior'}else{if (m=='diagonal'){lambda1='NA'}else{lambda1=lambda}}
roc=performance(prediction(pjaps,labels),'tpr','fpr')
saveRDS(roc,paste0(WORK_DIR,'/results/cross_group_comparison/',s,'/LTN_',m,'_lambda',lambda1,'.RData'))
}
}
}
|
770d5819c44bf5c157563328e13554b0a548f185
|
e40a4bb26b368842360bd98a076587bb4bd5a6fc
|
/Clinical variables/Data correction for plots/Data correction for plots.R
|
dce13923dd60306600f6bda33b4b11cf1a5e0d82
|
[] |
no_license
|
SarithaKodikara/Gene_environment_gut_interactions_in_Huntington-s_disease
|
1e6135c2e1801893c58ce15e3ea2509d34a45aed
|
1cfe61d4bb0832360e81bd61dba0f7afd43078a4
|
refs/heads/main
| 2023-04-12T12:32:00.658947
| 2022-09-09T04:19:52
| 2022-09-09T04:19:52
| 416,519,533
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,278
|
r
|
Data correction for plots.R
|
library(dplyr)
library(readxl)
library(magrittr)
library(ggplot2)
library(ggpubr)
library(car)
library(nlme)
library(stargazer)
library(dotwhisker)
library(sjPlot)
require(xtable)
library(TSA)
library(jtools)
library(emmeans)
library(graphics)
library(tidyr)
library(ordinal)
library(viridis)
library(lmerTest)
library(xlsx)
## ---- echo=FALSE-----------------------------------------------------------------------------------
data_Female<-read_excel("data/210330_Gubert_EE-EX_Data_All.xlsx","Female_EEEX_Data_Final")
#converting water content into the same format as Males
data_Female$FH20_Wk7<-data_Female$FH20_Wk7*100
data_Female$FH20_Wk8<-data_Female$FH20_Wk8*100
data_Female$FH20_Wk9<-data_Female$FH20_Wk9*100
data_Female$FH20_Wk10<-data_Female$FH20_Wk10*100
data_Female$FH20_Wk11<-data_Female$FH20_Wk11*100
data_Female$FH20_Wk12<-data_Female$FH20_Wk12*100
#Counting the number of columns with missing values
countNA <- function(df) apply(df, MARGIN = 1, FUN = function(x) length(x[is.na(x)]))
#Filter out sample with more than 80% missing values
new_f_data<-data_Female %>% filter(countNA(data_Female)<60)
# Extracting Colon length columns for the analysis
data_female_clean<-new_f_data[,2:4] %>%mutate(Gender=rep("Female",69)) %>%
mutate(new_f_data[,6],new_f_data[,c(34,35,42:45,48,49,54,55,58:65,68:75)])
data_Male<-read_excel("data/210330_Gubert_EE-EX_Data_All.xlsx","Male_EEEX_Data_Final")
#converting water content into nearest integer
data_Male$FH20_Wk12<-round(data_Male$FH20_Wk12)
#Counting the number of columns with missing values
countNA <- function(df) apply(df, MARGIN = 1, FUN = function(x) length(x[is.na(x)]))
#Filter out sample with more than 80% missing values
new_m_data<-data_Male %>% filter(countNA(data_Male)<60)
# Extracting interested columns for the analysis
data_male_clean<-new_m_data[,c(2,4:5)] %>%mutate(Gender=rep("Male",70)) %>%
mutate(new_m_data[,1],new_m_data[,c(65, 59,64,60,61,63,39,40,45,46,49:56,66:73)])
#colnames(data_female_clean);colnames(data_male_clean)
colnames(data_female_clean)<-colnames(data_male_clean)
# Combine both Males and Females
data_clean<-rbind(data_female_clean, data_male_clean)
#Converting Genotype, Housing, Gender and Box to be factor variables
# When creating the factors put the reference category first in the levels argument
data_clean$Genotype<-factor(data_clean$Genotype,levels = c("WT", "HD"))
data_clean$Housing <-factor(data_clean$Housing , levels = c("SH","EE","EX"))
data_clean$Gender<-factor(data_clean$Gender, levels = c("Female","Male"))
data_clean$Box <-factor(data_clean$Box )
colnames(data_clean)[which(names(data_clean) == "Gender")] <- "Sex"
## ---- echo=FALSE-----------------------------------------------------------------------------------
data_female_clean_time<-new_f_data[,2:4] %>%mutate(Gender=rep("Female",69)) %>%
mutate(new_f_data[,c(6:20,28:33,36:41)])
data_male_clean_time<-new_m_data[,c(2,4:5)] %>%mutate(Gender=rep("Male",70)) %>%
mutate(new_m_data[,1],new_m_data[,c(13:38)])
#colnames(data_female_clean);colnames(data_male_clean)
colnames(data_male_clean_time)<-colnames(data_female_clean_time)
# Combine both Males and Females
data_clean_time<-rbind(data_female_clean_time, data_male_clean_time)
#Converting Genotype, Housing, Gender and Box to be factor variables
data_clean_time$Genotype<-factor(data_clean_time$Genotype, levels=c("WT","HD"))
data_clean_time$Housing <-factor(data_clean_time$Housing , levels = c("SH","EE","EX"))
data_clean_time$Gender<-factor(data_clean_time$Gender, levels=c("Female", "Male"))
data_clean_time$Box <-factor(data_clean_time$Box )
colnames(data_clean_time)[which(names(data_clean_time) == "Gender")] <- "Sex"
long_data<-data_clean_time %>%
gather(v, value, Weight_Wk6:FH20_Wk12) %>%
separate(v, c("col", "Week"),sep="_Wk") %>%
arrange(MOUSE_ID) %>%
spread(col, value)
long_data$Week <-as.numeric(long_data$Week)
long_data_sorted<-long_data[order(long_data$MOUSE_ID,long_data$Box,long_data$Week),]
rotarod_data<-long_data_sorted[,c(1:6,9)]%>%filter(Week!=6)
######################## Food and Water intake per box by Gender#######
data_intake_Male<-read_excel("data/210330_Gubert_EE-EX_Data_All.xlsx","Male_EEEX_Data_Final_Food_H20")
#Sorting the data by Box number and inclusding a Sex column
data_intake_Male_sorted<-data_intake_Male[order(data_intake_Male$Box),-4]
data_intake_Male_sorted<-data_intake_Male_sorted%>%mutate(Sex=rep("Male", length(data_intake_Male_sorted$Box)), .after=Housing)
data_intake_Female<-read_excel("data/210330_Gubert_EE-EX_Data_All.xlsx","Female_EEEX_Data_Final_Food_H20",skip=1)
# Matching the Column order to Males and inclusding a Sex column
data_intake_Female_sorted<-data_intake_Female[,c(3,1,2,4:15)]
data_intake_Female_sorted<-data_intake_Female_sorted%>%mutate(Sex=rep("Female", length(data_intake_Female_sorted$...3)), .after=...2)
#colnames(data_intake_Female_sorted);colnames(data_intake_Male_sorted)
colnames(data_intake_Female_sorted)<-colnames(data_intake_Male_sorted)
food_intake<-rbind(data_intake_Female_sorted,data_intake_Male_sorted)
long_data_food<-food_intake %>%
gather(v, value, Food_Intake_Wk6:Water_Intake_Wk11) %>%
separate(v, c("col", "Week"), sep="_Wk") %>%
arrange(Box) %>%
spread(col, value)
long_data_food$Week <-as.numeric(long_data_food$Week)
food_sorted<-long_data_food[order(long_data_food$Box,long_data_food$Week),]
food_sorted$Genotype<-factor(food_sorted$Genotype, levels=c("WT","HD"))
food_sorted$Housing <-factor(food_sorted$Housing , levels = c("SH","EE","EX"))
food_sorted$Sex<-factor(food_sorted$Sex, levels=c("Female", "Male"))
food_sorted$Box <-factor(food_sorted$Box )
########Function to correct for Interactions######
corrData<-function( yVariable, dataSet, formula,indexMain){
f <- formula(paste(yVariable, formula, sep = "~"))
lmeModel <- lme(fixed = f, random = ~ 1 |Box, data = subset(dataSet, !is.na(eval(parse(text =yVariable )))))
fixedCoef<-t(as.matrix( lmeModel$coefficients$fixed))
fixedCoef_Interaction<-t(as.matrix(fixedCoef[,-indexMain]))
modMatrix<-t(model.matrix(f,
data = subset(dataSet, !is.na(dataSet[,yVariable]))))
modMatrix_Interaction<-modMatrix[-indexMain,]
newVarName<-paste0("New",yVariable)
Cordata = subset(dataSet, !is.na(dataSet[,yVariable]))
assign(newVarName,Cordata[,yVariable]-fixedCoef_Interaction%*%modMatrix_Interaction)
CordataNew=cbind(Cordata[,indexMain], get(newVarName))
return(CordataNew)
}
d1<-corrData( "Rotarod", rotarod_data, "Genotype + Housing+ Sex+Week+
Genotype:Sex+Sex:Housing+
Genotype:Week+ Sex:Week + Housing:Week+Housing:Genotype", indexMain=1:6)
write.xlsx(d1, file="CorrectedData_v2.xlsx", sheetName="Rotarod", append=TRUE)
vNames=c("Weight","FH20", "FOutput")
for(i in 1:length(vNames)){
d1<-corrData( vNames[i], long_data_sorted, "Genotype + Housing+ Sex+Week+
Genotype:Sex+Sex:Housing+
Genotype:Week+ Sex:Week + Housing:Week+Housing:Genotype", indexMain=1:6)
write.xlsx(d1, file="CorrectedData_v2.xlsx", sheetName=vNames[i], append=TRUE)
}
vNames=c("Food_Intake", "Water_Intake")
for(i in 1:length(vNames)){
d1<-corrData( vNames[i], food_sorted, "Genotype + Housing+ Sex+Week+
Genotype:Sex+Sex:Housing+
Genotype:Week+ Sex:Week + Housing:Week+Housing:Genotype", indexMain=1:6)
write.xlsx(d1, file="CorrectedData_v2.xlsx", sheetName=vNames[i], append=TRUE)
}
vNames=c("Colon_Length","Gut_Permeability_FITC","Gut_Transit_Time",
"Brain_Weight", "Caecum_Weight", "Caecum_Length",
"DG_Swing_Fore","DG_Swing_Hind","DG_Stride_Fore",
"DG_Stride_Hind","DG_Stride_Length_Fore","DG_Stride_Length_Hind",
"DG_Absolute_Paw_Angle_Fore","DG_Absolute_Paw_Angle_Hind",
"DG_Stance_Width_Fore","DG_Stance_Width_Hind",
"DG_Propel_Brake_Ratio_FORE","DG_Propel_Brake_Ratio_HIND",
"Acetate","Proprionate","Isobutyrate","Butyrate",
"Methylbutyrate2","Isovalerate", "Valerate","Caproate")
for(i in 1:length(vNames)){
d1<-corrData( vNames[i], data_clean, "Genotype + Housing+ Sex+
Genotype:Sex+Sex:Housing+Housing:Genotype", indexMain=1:5)
write.xlsx(d1, file="CorrectedData_v2.xlsx", sheetName=vNames[i], append=TRUE)
}
|
c2ec36985a1317217cceffb60a2d357a16ab954f
|
ceb6862b7e7e67b16a9c0358ae3da58a094e6d73
|
/gpr.R
|
8bd3427a2578311bc0ccd5b83b9041dddaf72d4d
|
[] |
no_license
|
andreaskapou/GP
|
94fb205506dcc5f6109cb5f3ac1764715989b049
|
a9bdce0d10818ebbee332e16b41e423e0bb6deeb
|
refs/heads/master
| 2021-01-20T10:35:40.659056
| 2015-05-05T20:01:48
| 2015-05-05T20:01:48
| 31,949,314
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,025
|
r
|
gpr.R
|
GP.fit <- function(theta=list(lambda=1,sf2=1,sn2=0.05),covFunc,f,Xs,method="cholesky"){
##=================================================================
# Gaussian process regression implementation. A valid covariance #
# function can be given as a parameter. Two modes are possible: #
# 1) training: if no test data (Xs) are given, function returns #
# the negative log likelihood and its partial derivatives #
# with respect to the hyperparameters; this mode is used #
# to fit the hyperparameters. #
# 2) prediction: If test data are given, then (marginal) Gaussian #
# predictions are computed, whose mean and variance are #
# returned. Note that in cases where covariance function #
# has noise contributions, the variance returned in S2 is #
# for noisy test targets; if you want the variance of the #
# noise-free latent function, you must substract the noise #
# variance. #
# Also, there are two modes of implementing the matrix inversion #
# one is direct computation and the other uses the Cholesky #
# factorization. Better to use the Cholesky factorization #
# #
# usage: GP$NLML <- gpr(theta, covFunc, f, method) #
# or: (GP$E.f, GP$C.f) <- gpr(theta, covFunc, f, Xs, method) #
# #
# where: #
# theta is a list of hyperparameters #
# covFunc is the covariance function #
# f is a list of D-dim training inputs and 1-dim targets #
# y is a (column) vector (of size n) of targets #
# Xs is a nn by D matrix of test inputs #
# method is the method used to compute the matrix inversion #
# #
# NLML is the value of the negative log marginal likelihood #
# DE is a (column) vector of partial derivatives of the #
# negative log marginal likelihood wrt each log #
# hyperparameter #
# E.f is a (column) vector (of size nn) of prediced means #
# C.f is a (column) vector (of size nn) of predicted var #
# #
# Adapted from (C) copyright 2006 by Carl Edward Rasmussen #
# version in matlab. #
# Notation mainly follows from Rasmussen and Williams's book #
# 'Gaussian Processes for Machine Learning' #
# #
##=================================================================
x <- f$x
y <- f$y
n <- NROW(x) # Length of the training data
I <- diag(1, n) # Identity matrix
K <- covFunc(theta, x, x) # Covariance matrix of the training inputs
noise <- theta$sn2^2 * I # Calculate the white noise variance
if (identical(method,"normal")){ # Compute using direct equations
invXX <- solve(K + noise)
if (missing(Xs)){ # If no test points, just compute the marginal
NLML <- -0.5*t(y) %*% invXX %*% y - 0.5*log(det(invXX)) - 0.5*n*log(2*pi)
}else{
E.f <- covFunc(theta,Xs,x) %*% invXX %*% y
C.f <- covFunc(theta,Xs,Xs)-covFunc(theta,Xs,x)%*%invXX%*%covFunc(theta,x,Xs)
}
}else if (identical(method,"cholesky")){ # Compute using Cholesky decomposition
L <- t(chol(K + noise))
a <- solve.cholesky(L, y) # solve(t(L), solve(L, f$y))
NLML <- -0.5*t(y) %*% a - sum(log(diag(L))) - 0.5*n*log(2*pi)
if (missing(Xs)){ # If no test points, compute marginal and derivatives
# Note that we compute the negative log marginal likelihood!
# where the derivative is: 1/2*tr((a*a'-K^{-1})* DK/Dtheta_{j})
DE <- vector(length=length(theta)-1)
W <- solve.cholesky(L,diag(n)) - a %*% t(a)
# Compute only the first two parameter derivatives (ignore noise)
for (j in 1: (length(theta)-1)){
C <- covFunc(theta, x, x, j)
DE[j] <- 0.5 * sum(W*C)
}
return(list(NLML=NLML, DE=DE))
}else{
k.star <- covFunc(theta,x,Xs)
E.f <- t(k.star) %*% a # Latent means
v <- solve(L, k.star)
#C.f <- covFunc(theta,Xs,Xs) - t(v)%*%v #impractical for large datasets
Kss <- rep(theta$sn2^2 + theta$sf2^2, NROW(Xs))
C.f <- as.matrix(Kss) - as.matrix(colSums(v * v)) # Latent variances
}
}
if (missing(Xs))
return(list(NLML=NLML))
else
return(list(E.f=E.f, C.f=C.f))
}
|
441b987b4b780a08587b746bb2254579888bfa0e
|
44dea5e6256ca234a34271defd2dfabd20f31e49
|
/corpusr/man/corpusr.Rd
|
69dfdfaac269427c178a2c6d38c01e0b4ebb01c4
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
charlie-gallagher/text_analysis
|
36af8c8ce3c11965951228f6fd3d0a9e72e057ea
|
5826c0ac47ab71e9c0c45705f8b56534e8041cd3
|
refs/heads/master
| 2023-01-18T21:22:01.255021
| 2020-11-26T17:48:39
| 2020-11-26T17:48:39
| 286,875,989
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,369
|
rd
|
corpusr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/corpusr.R
\docType{package}
\name{corpusr}
\alias{corpusr}
\title{Corpusr: A Package for Using \code{gutenbergr} as a Corpus}
\description{
Functions for helping you use the \code{gutenbergr} package as a corpus
in the style of the Oxford English Corpus: a collection of sentences
in which you can search for words you would like to see in context. This
package also contains a few objects for making work with large corpuses
faster and easier. These are \code{cps_bk} and \code{cps_corpus}. A
\code{cps_corpus} is simply a list of \code{cps_book}s.
}
\section{Interactive use}{
You can use \code{corpusr} interactively with the functions
\code{guten_search_author} and \code{guten_search_works}. These are best
used in combination with information gleaned from
\code{View(gutenbergr::gutenberg_metadata)}.
}
\section{Corpus use}{
You can also use \code{guten_grab} to build your own corpus by supplying
a vector of works to be included. The package \code{gutenbergr} has several
functions for helping to gather such lists of works. In addition,
I've included several functions for building a corpus for a certain author
or for a list of works. These are \code{corpus_build_author} and
\code{corpus_build_works}. With your corpus compiled, you can search it
with \code{corpus_search}.
}
|
c31367f6bf9b2338d133abd4b008682eab5953c1
|
5f684a2c4d0360faf50fe055c1147af80527c6cb
|
/2022/2022-week_36/rebrickable.R
|
0060c462e65398cebc5b84dbde27c7d8e7af2fef
|
[
"MIT"
] |
permissive
|
gkaramanis/tidytuesday
|
5e553f895e0a038e4ab4d484ee4ea0505eebd6d5
|
dbdada3c6cf022243f2c3058363e0ef3394bd618
|
refs/heads/master
| 2023-08-03T12:16:30.875503
| 2023-08-02T18:18:21
| 2023-08-02T18:18:21
| 174,157,655
| 630
| 117
|
MIT
| 2020-12-27T21:41:00
| 2019-03-06T14:11:15
|
R
|
UTF-8
|
R
| false
| false
| 3,605
|
r
|
rebrickable.R
|
library(tidyverse)
library(camcorder)
library(geomtextpath)
library(ggtext)
gg_record(dir = "tidytuesday-temp", device = "png", width = 10, height = 6, units = "in", dpi = 320)
elements <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2022/2022-09-06/elements.csv.gz')
cols <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2022/2022-09-06/colors.csv.gz')
sets <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2022/2022-09-06/sets.csv.gz')
inventory_parts <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2022/2022-09-06/inventory_parts.csv.gz')
inventories <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2022/2022-09-06/inventories.csv.gz')
themes <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2022/2022-09-06/themes.csv.gz')
cols_yrs <- inventories %>%
left_join(inventory_parts, by = c("id" = "inventory_id")) %>%
left_join(cols, by = c("color_id" = "id")) %>%
left_join(inventories, by = "set_num") %>%
left_join(sets, by = "set_num") %>%
mutate(hex = case_when(
!is.na(rgb) ~ paste0("#", rgb),
TRUE ~ rgb
)
)
cols_freq <- cols_yrs %>%
count(year, name.x, hex) %>%
filter(year > 1969) %>%
filter(!is.na(hex)) %>%
group_by(year) %>%
mutate(
total = sum(n),
freq = n / sum(n)
) %>%
ungroup() %>%
filter(
# str_detect(name.x, "Light Gray|Light Bluish Gray|Dark Gray|Dark Bluish Gray")
name.x == "Light Gray" |
name.x == "Light Bluish Gray" |
name.x == "Dark Gray" |
name.x == "Dark Bluish Gray"
)
col_labels <- cols_freq %>%
distinct(name.x, hex) %>%
mutate(
x = c(1970, 1979, 2021, 2022),
y = c(0.07, 0.03, 0.05, 0.18),
hjust = c(0, 0, 1, 1)
)
f1 <- "Outfit"
tx_col <- "darkslateblue"
ggplot(cols_freq) +
geom_vline(xintercept = 2003.5, color = "coral2", size = 0.75, alpha = 0.7) +
geom_line(aes(year, freq, color = hex, linetype = if_else(str_detect(name.x, "Blu"), "solid", "4141")), size = 1.4, alpha = 1) +
geom_text(data = col_labels, aes(x, y, label = name.x, hjust = hjust, color = hex), family = f1, fontface = "bold") +
scale_color_identity() +
scale_linetype_identity() +
scale_y_continuous(limits = c(0, 0.25), breaks = seq(0.1, 0.3, 0.1), labels = scales::percent) +
labs(
title = "The LEGO gray switch",
subtitle = "In <span style = 'color:#EE6A50'>2004</span>, LEGO changed <span style = 'color:#9BA19D'>◼**light gray**</span> to <span style = 'color:#A0A5A9'>◼**light bluish gray**</span> and <span style = 'color:#6D6E5C'>◼**dark gray**</span> to <span style = 'color:#6C6E68'>◼**dark bluish gray**</span>. The chart shows<br>the proportion of parts with the four gray colors by year, relative to parts of all colors released each year.",
caption = "Source: Rebrickable · Graphic: Georgios Karamanis"
) +
theme_minimal(base_family = f1) +
theme(
legend.position = "none",
plot.background = element_rect(fill = "white", color = NA),
axis.title = element_blank(),
axis.text = element_text(color = tx_col, face = "bold"),
plot.title = element_text(size = 20, color = tx_col, face = "bold"),
plot.subtitle = element_markdown(size = 12, margin = margin(7, 0, 30, 0), color = tx_col, lineheight = 1.2),
plot.caption = element_text(margin = margin(20, 0, 0, 0), color = tx_col),
plot.margin = margin(20, 20, 10, 20)
)
|
fb95f6e8cc6c3a8f3d99364913e133aecaaff38a
|
fbab6775e9e31ad0ff57cb232bc71a5a3710eef5
|
/R/mInfluence.R
|
fdcf03dbc07aba0aeb569811bb87101adfe8269f
|
[] |
no_license
|
cran/mosaicManip
|
a3fbaa9391b75a0386af6be6f2eaca61cf709a70
|
b517dba71cc73c38c1cfa12b0bd14bbe7e58363a
|
refs/heads/master
| 2021-01-01T06:00:17.652251
| 2011-12-29T00:00:00
| 2011-12-29T00:00:00
| 17,719,040
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,084
|
r
|
mInfluence.R
|
mInfluence <- function(expr, data,
groups=NULL,
col.fitted = 'gray30',
col.influence='red', ...){
if( !require(manipulate) )
stop("Must use a manipulate-compatible version of R, e.g. RStudio")
dots <- list(...)
groups <- eval(substitute(groups), data, environment(expr))
if (!require(manipulate) | !require(lattice) | !require(grid)) stop("Must have manipulate package.")
xvar <- list();
yvar <- all.vars(expr)[1]
terms <- terms(expr)
nterms <- length(terms)
for(a in all.vars(expr)[-1]){ # Name the levels of xvar, filled with char string
xvar[[a]] <- a
}
#====================
myFun <- function(xpick, multX, multY, newPoint, groups=NULL, ...){
xdat <- data[[xvar[[xpick]]]]
ydat <- data[[yvar]]
newRow <- data[1,]
for(b in 1:length(newRow)){
if(is.numeric(newRow[1,b])) # At the moment just leaving the categorical variables as is
newRow[1,b] <- median(data[,b]) # That is, what they were from data[1,], first row of data
}
if(is.numeric(newRow[[xpick]])){
newRow[[1,xpick]] <- multX*sd(xdat)+mean(xdat) # This needs to be adjusted to allow categorical
} # variables to work. mean(factor) doesn't work
if(is.factor(newRow[[xpick]])){
newRow[[1,xpick]] <- levels(xdat)[floor(length(levels(xdat))*(multX+5)/10.001)+1]
}
if(is.numeric(newRow[[1,yvar]])){
newRow[[1,yvar]] <- multY*sd(ydat)+mean(ydat)
}
if(is.numeric(xdat))
maxxlim <- c(-5.2*sd(xdat)+mean(xdat), 5.2*sd(xdat)+mean(xdat)) # manipulate control params
maxylim <- c(-5.2*sd(ydat)+mean(ydat), 5.2*sd(ydat)+mean(ydat))
modData <- rbind(data, newRow)
if(newPoint)
data <- modData
if(is.factor(data[[xvar[[xpick]]]])){
xlevels <- levels(xdat)
}
mod <- lm(expr, data)
influ <- influence.measures(mod)
influIdx <- which(apply(influ$is.inf, 1, any))
# influIdx <- data[influIdx,]
panel.mInfluence <- function(x,y,groups=NULL,...){
if(is.factor(x)) {
set.seed(73241)
x <- jitter(as.numeric(x))
}
panel.xyplot(x,y, group=groups, ...) # Data
# overplot Influential Data
mosaic:::.do.safe.call( panel.xyplot, groups=groups[influIdx],
list(x=x[influIdx], y=y[influIdx], col=col.influence,
cex= if(is.null(dots[['cex']])) .6 else .6*dots[['cex']]),
... )
# Add fitted data points
mosaic:::.do.safe.call(panel.xyplot, list(x=x, y=fitted(mod), pch=18, col=col.fitted), ... )
if(newPoint){
mosaic:::.do.safe.call( panel.xyplot,
list(x=x[length(x)], y=y[length(y)], col="orange", lwd=3,
cex= if(is.null(dots[['cex']])) 1.3 else 1.3*dots[['cex']]),
...) # last point is newpoint
}
}
if(is.factor(xdat)){
xyplot( data[[yvar]] ~ data[[xvar[[xpick]]]],
ylab=yvar,
xlab=xvar[[xpick]],
groups=groups,
panel=panel.mInfluence,
#panel.groups=panel.mInfluence,
ylim=maxylim,
scales=list(x=list(labels=xlevels,at=seq(1,length(xlevels),by=1))),
...)
}
else
mosaic:::.do.safe.call( lattice:::xyplot.formula, list( x = data[[yvar]] ~ data[[xvar[[xpick]]]],
ylab=yvar,
xlab=xvar[[xpick]],
groups=groups,
panel=panel.mInfluence,
#panel.groups="panel.mInfluence",
xlim=maxxlim,
ylim=maxylim) ,
...)
}
#==================
controls <- list(xpick=picker(xvar, label="Predictor to plot"),
newPoint=checkbox(FALSE, label="Add new point"),
multX=slider(-5, 5, initial=1, label="New predictor (measured in SDs)"),
multY=slider(-5, 5, initial=1, label="New response (measured in SDs)")
## influPick=picker(cooks.distance="Cook's Distance", label="Type of Influence")
## Influence picker never implemented, Influence calculated by
## influence.measures is.inf
)
manipulate(myFun(xpick=xpick, multY=multY, multX=multX, newPoint=newPoint, groups=groups, ...),
controls)
}
|
cad546dd1173fbc032336ec929b222b0c17a89ed
|
b45fb01431b05e020bcf12848cc4022cbfc187ab
|
/simulations/simulated_data_buffalo.R
|
201c6867733036e2b4e02f14e5b0b30fe766cb1f
|
[
"MIT"
] |
permissive
|
Florencehinder/safepaths-datascience
|
5510556edf89fcc0a8f75624a3ec82470da0e34b
|
0c1d8750d9c627512bcb655925f24bcc31092224
|
refs/heads/master
| 2022-11-30T14:06:37.041848
| 2020-05-21T03:35:17
| 2020-05-21T03:35:17
| 288,901,370
| 0
| 0
| null | 2020-08-20T03:56:06
| 2020-08-20T03:56:06
| null |
UTF-8
|
R
| false
| false
| 777
|
r
|
simulated_data_buffalo.R
|
## Script for creating city-scale simulated data for testng
# This script takes the simulated datasets at https://zenodo.org/record/2865830#.XoZqadMzaSM and converts them into a format that mimic the data being used by Privte Kit Safe Paths for purposes of testing.
library(tidyverse)
# reading data downloaded from Zenodo, then converting time to the milisecond unix time used in the Safe Paths exports and fixing typo in the latitude variable name.
D = as_tibble(readRDS("~/Downloads/buffalo_sim_full.Rds")) %>% mutate(time= as.numeric(as.POSIXct("2020-04-10 8:00:00 EST"))*1000 + (time*60*60*1000), latitude=latitute) %>% select("latitude", "longitude", "time", "ID")
# saving the whole thing as csv
write_csv(D, path="~/projects/covid19_sim_data/buffalo_sim_full.csv")
|
00d5c940b1ea6247a72f4bd40abc8fd54d5cdbac
|
fe63c74a184ab926479de627b178698e9ed16e7c
|
/CPAD2_Cell_Profiles_And_Demographs_V3.R
|
cf9dbd8d760224a002b5059cd065efdd0e49683b
|
[] |
no_license
|
GiacomoGiacomelli/Cell-Profiles-and-Demographs
|
5e388909a1b6751997bd8225342510f77acd5c1f
|
d36b41d292d4e8b12c01f328404817157afb90be
|
refs/heads/main
| 2023-06-24T02:29:06.381577
| 2021-07-27T14:34:17
| 2021-07-27T14:34:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,743
|
r
|
CPAD2_Cell_Profiles_And_Demographs_V3.R
|
############################################################################################################################################################################
#Activate necessary packages
############################################################################################################################################################################
library("gplots")
library("RColorBrewer")
library("ggplot2")
############################################################################################################################################################################
#File input
############################################################################################################################################################################
setwd("~/Experiment/Condition/Subcondition")
fold<-"Membranes*" #The name here should reflect the folder nomenclature that is used to store the profiles
folder<-paste("../",Sys.glob(fold),sep="")
setwd("~/Experiment/Condition/Subcondition/Membranes01")
PlotType<-"CellNorm" #choose between CellNorm and PopNorm
############################################################################################################################################################################
chan<-"Blue*"
maxim<-0
mayim<-0
minyim<-17000
for (f in folder){
setwd(f)
RED<-Sys.glob(chan)
for(k in 1:length(RED)){
filer<-read.table(RED[k], sep="\t",dec=".", stringsAsFactors=FALSE,header=TRUE)
if (max(filer$x)>maxim){
maxim<-max(filer$x)
}
if (PlotType=="CellNorm"){
if (max(filer$y)>mayim){ #activate if normalize for maximum overall fluorescence
mayim<-max(filer$y) #
} #
if (min(filer$y)<minyim){ #activate if normalize for maximum overall fluorescence
minyim<-min(filer$y) #
} #
}
}}
a<-seq(0,maxim, by=0.1) #define binwidth (0.02 um for PALM, 0.2 for EPI)
REDTOT<-data.frame()
counter<-1
for (f in folder){
setwd(f)
RED<-Sys.glob(chan)
for(k in 1:length(RED)){
file<-read.table(RED[k], sep="\t",dec=".", stringsAsFactors=FALSE,header=TRUE) # sep="," for ratio
file<-as.data.frame(cbind(file$x,file$y))
colnames(file)[1]<-"x"
colnames(file)[2]<-"y"
file[3]<-length(file$x)+(0.0001*k) #risk that two cells of the same length from different folders come from a file with same name...think of solution
file[4]<-counter
counter<-counter+1
REDTOT<-rbind(REDTOT,file)
}
REDTOT1<-REDTOT[order(REDTOT$V3),]
}
df2<-data.frame()
for(k in 1:length(unique(REDTOT1$V4))){
print(k)
file<-REDTOT1[REDTOT1$V4==unique(REDTOT1$V4)[k],]
if (PlotType=="CellNorm"){
mayim<-max(file$y) ###if active it normalizes on max cell intensity
minyim<-min(file$y) ###if active it normalizes on max cell intensity
}
mayim_c<-(mayim-minyim)
print(mayim)
for (i in 1:(length(a)-1)){
if (a[i]<max(file$x)){
m<-mean(file[file$x>=a[i] & file$x<a[i+1],]$y)
if (is.nan(m)) {
m<-mean(file[file$x>=a[i-1] & file$x<a[i+1],]$y)
}
m_c<-(m-minyim) #correct by minimum
b<-replicate(1+(m_c*100/mayim_c),k) #for epi: m*100/mayim OR 1+(m_c*100/mayim_c) #for palm: 1+((m/mayim)*1000))
c<-replicate(1+(m_c*100/mayim_c),a[i]) #for epi: m*100/mayim OR 1+(m_c*100/mayim_c) #for palm: 1+((m/mayim)*1000))
df1<-as.data.frame(c)
df1[2]<-b
df2<-rbind(df2,df1)
}
}
}
write.table(REDTOT1, file="../CellNormMinMax_profiles_ordered.txt",sep=",",row.names = FALSE, quote=FALSE) ###save file containing the fluorescence profiles ordereed by cell length
write.table(df2, file="../CellNormMinMax_profiles_ordered_matrix.txt",sep=",",row.names = FALSE, quote=FALSE) ###save matrix used to represent the fluorescence profiles as demographs via hist2d()
write.table(maxim, file="../CellNormMinMax_maxim.txt",sep=",",row.names = FALSE, quote=FALSE) ###maximum length among the profiles
#REDTOT1<-read.table(file="../Red_profiles_ordered.txt",sep=",", header=TRUE) ###import file containing the fluorescence profiles ordereed by cell length
#df2<-read.table(file="../Red_profiles_ordered_matrix.txt",sep=",", header=TRUE) ###import matrix used to represent the fluorescence profiles as demographs via hist2d()
#maxim<-read.table(file="../Red_maxim.txt",sep=",", header=TRUE) ###import maximum length among the profiles
#a<-seq(0,maxim[1,1], by=0.1) #Use when importing data
rf1<-colorRampPalette(c("black","blue","red")) ####Define color scale
r <- rf1(256)
rf1<-colorRampPalette(c("black","white")) ####Define color scale
r <- rf1(256)
png(file="../Demograph.png",height=3000,width=2500,res=600) ####Demograph
h2<-hist2d(df2, nbins=c(length(a)-1,length(unique(REDTOT1$V4))), col=r)
dev.off()
######################Peaks analysis
#########Establish FindPeak Function
findpeaks <- function(y, span = NULL)
{
if (is.null(span)) span <- round(.2 * length(y))
z <- embed(y, span)
s <- span %/% 2
v <- max.col(z, ties.method = "first") == 1 + s
which(c(rep(FALSE, s), v, rep(FALSE, s)))
}
#############################################
setwd("~/Experiment/Condition/Subcondition")
fold<-"Membranes*"
folder<-paste("../",Sys.glob(fold),sep="")
setwd("~/Experiment/Condition/Subcondition/Membranes1")
chan<-"Red*"
chan1<-"Blue*"
cellsTOT<-data.frame()
#f<-folder[1]
for (f in folder){
setwd(f)
RED<-Sys.glob(chan)
BLUE<-Sys.glob(chan1)
cells<-as.data.frame(seq(from=1, to=length(RED),by=1))
colnames(cells)[1]<-"Index"
cells[2]<-0
colnames(cells)[2]<-"Length"
cells[3]<-0
colnames(cells)[3]<-"RedPeak"
cells[4]<-0
colnames(cells)[4]<-"BluePeak"
for(k in 1:length(RED)){
filer<-read.table(RED[k], sep="\t",dec=".", stringsAsFactors=FALSE,header=TRUE)
fileb<-read.table(BLUE[k], sep="\t",dec=".", stringsAsFactors=FALSE,header=TRUE)
png(file=paste("Cell",k,".png",sep=""),height=2000,width=3000,res=600) ####Demograph
print(ggplot(data=fileb, aes(x=x, y=y))+
geom_line(col="blue")+
geom_point(data=fileb[findpeaks(fileb$y, span=10),][fileb[findpeaks(fileb$y, span=10),]$y-min(fileb$y)>(0.2*(max(fileb$y)-min(fileb$y))),], aes(x=x, y=y), col="blue")+
geom_line(data=filer, aes(x=x, y=y), col="red")+
geom_point(data=filer[findpeaks(filer$y, span=10),][filer[findpeaks(filer$y, span=10),]$y-min(filer$y)>(0.2*(max(filer$y)-min(filer$y))),], aes(x=x, y=y), col="red")+
theme_bw())
dev.off()
cells[k,2]<-max(filer$x)
TempPeakRed<-findpeaks(filer$y, span=10)
cells[k,3]<-length(filer[TempPeakRed,][filer[TempPeakRed,]$y-min(filer$y)>(0.2*(max(filer$y)-min(filer$y))),]$x)
TempPeakBlue<-findpeaks(fileb$y, span=10)
cells[k,4]<-length(fileb[TempPeakBlue,][fileb[TempPeakBlue,]$y-min(fileb$y)>(0.2*(max(fileb$y)-min(fileb$y))),]$x)
}
cellsTOT<-rbind(cellsTOT,cells)
cellsTOT$Index<-seq(1,length(cellsTOT$Index), by=1)
}
write.table(cellsTOT, file="../CellLength_and_Peaks_M_mc.txt",sep=",",row.names = FALSE, quote=FALSE) ###save file containing cell length and the number of peaks contained in each fluorescence channel
#EXAMPLE of analyis of obtained data
#4h_20210209_new_mito
#RESK<-read.table("X:/Giacomo Giacomelli/DIPS_project/Students/Bente/20210209/ResK/T4h/CellLength_and_Peaks_M_mc.txt", sep=",", header=TRUE)
#dipB1<-read.table("X:/Giacomo Giacomelli/DIPS_project/Students/Bente/20210209/DipB1/T4h/CellLength_and_Peaks_M_mc.txt", sep=",", header=TRUE)
#dipB2<-read.table("X:/Giacomo Giacomelli/DIPS_project/Students/Bente/20210209/DipB2/T4h/CellLength_and_Peaks_M_mc.txt", sep=",", header=TRUE)
#dipB3<-read.table("X:/Giacomo Giacomelli/DIPS_project/Students/Bente/20210209/DipB3/T4h/CellLength_and_Peaks_M_mc.txt", sep=",", header=TRUE)
#ggplot(data=RESK, aes(x=Length, y=..density..))+
# geom_histogram(fill="royalblue4", alpha=1, binwidth = 0.25)+
# geom_histogram(data=dipB3, aes(x=Length, y=..density..), fill="orange", alpha=0.5, binwidth=0.25)+
# geom_histogram(data=dipB2, aes(x=Length, y=..density..), fill="yellow", alpha=0.5, binwidth=0.25)+
# geom_histogram(data=dipB1, aes(x=Length, y=..density..), fill="red", alpha=0.5, binwidth=0.5)+
# theme_bw()
#shapiro.test(RESK$Length)
#kruskal.test(RESK$Length,dipB1$Length)
#wilcox.test(RESK$Length,dipB1$Length)
|
415dd3b6e4c0beb017dbdaf56a3f9537ade90a49
|
e2bd186fbacd40a0f1c60725f1ea19ce948f2721
|
/basic_use.R
|
15e3f89db7755821172a52350044800727ffadd4
|
[] |
no_license
|
watermelonll/R
|
2591cfec1cc0316d7caaeda11d6c53e424c53caf
|
2b67425f8929b4fc7565a45232ede559da25dd6e
|
refs/heads/master
| 2020-08-27T18:07:29.224097
| 2019-11-26T04:52:10
| 2019-11-26T04:52:10
| 217,454,875
| 0
| 0
| null | 2019-10-25T05:16:35
| 2019-10-25T05:04:24
| null |
UTF-8
|
R
| false
| false
| 2,371
|
r
|
basic_use.R
|
# import class dataset
# \ means special char, need \\ in a location
class <- read.table("E:\\R\\class.csv", header = TRUE, sep = ",")
class
# create new num variable
class <- transform(class, bmi = Weight/2/(Height/100)**2)
class
# sort;
newcls <- class[order(class$Height),]
newcls <- class[order(class$Height, -class$Weight),]
newcls
# create new char variable
newcls <- within(newcls,{
agecat <- NA
agecat[newcls$Age<14] <- "young"
agecat[newcls$Age>=14] <- "old" })
newcls
# simple plot
attach(newcls)
plot(newcls$Height, class$Weight, type = "b", lty = 2, pch=17,
main = "Test class data",
xlab="Height", ylab="Weight",
xlim = c(0,80), ylim = c(0,150))
abline(h=c(70,100),lty=3,col="brown")
abline(v=c(70), lty=3,col="green")
detach(newcls)
library(dplyr)
library(reshape)
# rename variable, case 1 to = from
newcls <- newcls %>% rename(wddh = bmi)
newcls
# rename, case 2
colnames(newcls)
names(newcls)[6] <- "bmi"
newcls
# rtf
library(rtf)
rtffile <- RTF("d:\\test.rtf")
addParagraph(rtffile,"this is test1\n")
addTable(rtffile,outtab)
addParagraph(rtffile,"\n\nthis is test2")
addTable(rtffile, cbind(rownames(mtcars), mtcars))
done(rtffile)
library(dplyr)
# head, get the first xx records
d1 <- head(mtcars,6)
d2 <- within(d1,{
cat <- NA
cat[d1$gear==4] <- "h"
cat[d1$gear==3] <- "l"})
# mutate: Compute and append one or more new columns
# union: set together
d3 <- mutate(d2,cat="t") %>% union(d2,)
View(d3)
filter(d3,carb<2)
## to be updated
dim(Puromycin)
Puromycin
help("Puromycin")
head(Puromycin)
puroa <- subset(Puromycin, state =="treated")
plot(rate ~ conc, data = puroa)
with(puroa, plot(rate ~ conc))
plot(puroa$rate ~ puroa$conc)
u <- 1:25
plot(u ~ 1, pch = u, col=u, cex =3)
plot(rate ~ conc, data = puroa, pch = 2, col = 4, cex = 1.5,
xlim = c(0, 1.2), ylim = c(40, 210),
ylab = "Concentration",
xlab = "Rate", cex.lab = 1)
title(main = "Puromycin", cex.main = 2.5)
library(doBy)
puroa.mean <- summaryBy(rate ~ conc, data = puroa, FUN = mean)
plot(rate ~ conc, data = puroa, pch = 16, col = 4,
cex = 1.0)
points(rate.mean ~ conc, data = puroa.mean, col = "cyan",
lwd = 10, pch = "x")
lines(rate.mean ~ conc, data = puroa.mean, col = 2)
abline(v=0.5, col = "tan", lty = 2)
abline(h=120, col = "blue", lty =4)
q()
n
|
61f2b52eac129a7f3989ecd6a6a8f0f1ba96913e
|
6096c4ed54cc123e8e1a98284b608d675ccaaafd
|
/app/R/plot_results.R
|
c2daff39c68c1ebf8b88666ba8f94ad236f86830
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
thibautjombart/covid19_bed_occupancy
|
dbeed370cb80f8e79fbaf2ff6feb49648191525c
|
d7d8ee5df12b1dbaa6b5f7f23575b928fc0541a8
|
refs/heads/master
| 2021-04-17T11:22:59.797456
| 2020-05-29T15:00:55
| 2020-05-29T15:00:55
| 249,441,528
| 12
| 7
|
NOASSERTION
| 2020-05-29T15:00:57
| 2020-03-23T13:41:01
|
R
|
UTF-8
|
R
| false
| false
| 3,966
|
r
|
plot_results.R
|
#' Function to plot outputs of predict_beds
#'
#' This function provides custom plots for the output of `predict_beds`.
#'
#' It exists only to wrap `plot_beds` and `plot_admissions`
#'
#' @param x the output of `predict_beds`
#'
#'
#' @author Sam Clifford
#'
#' @examples
plot_results <- function(results,
reporting = 100,
time = 7,
warning_text = NULL){
n_data <- nrow(results$data)
beds <- summarise_beds(results$beds)
beds$var <- "Bed occupancy"
beds$Status <- "Projected"
results$data$Status <- "Reported"
if (reporting < 100){
unreported <- results$data
unreported$Status <- "Unreported"
unreported$n_admissions <- round(results$data$n_admissions*100/reporting - results$data$n_admissions)
results$data <- rbind(results$data, unreported)
results$data$Status <- factor(results$data$Status, levels = c("Unreported", "Reported"))
}
results$data$var <- "Cases per day"
results$data$Date <- as.Date(results$data$date)
cases <- summarise_beds(results$admissions[-n_data,])
cases$var <- "Cases per day"
cases$Status <- "Projected"
palette_to_use <- c("Reported",
"Unreported",
"Projected")[
c(TRUE,
reporting < 100,
TRUE)]
results_plot <-
ggplot2::ggplot(
mapping = aes(x = Date)) +
ggplot2::geom_col(data = cases,
aes(fill = Status,
y = Median),
width = 0.8) +
ggplot2::geom_col(data = results$data,
aes(fill = Status,
y = n_admissions),
width = 0.8) +
ggplot2::geom_linerange(data = cases,
aes(ymin = `lower 95%`,
ymax = `upper 95%`)) +
ggplot2::geom_ribbon(data = beds,
aes(ymin = `lower 95%`,
ymax = `upper 95%`,
fill = Status),
color = NA,
alpha = 0.25) +
ggplot2::geom_line(data = beds, aes(y = Median,
color = Status)) +
ggplot2::facet_wrap( ~ var, ncol = 1, scales = "free_y") +
ggplot2::theme_bw() +
ggplot2::scale_fill_manual(values = my_palette[palette_to_use],
breaks = palette_to_use,
name = "Reporting status") +
ggplot2::scale_color_manual(values = my_palette[palette_to_use],
breaks = palette_to_use,
name = "Reporting status",
guide = FALSE) +
ggplot2::theme(legend.position = "bottom",
axis.title = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"),
strip.text = element_text(size = 18)) +
ggplot2::scale_y_continuous(limits = c(0, NA), breaks = int_breaks) +
ggplot2::scale_x_date(date_label = "%d %b %y") +
large_txt + rotate_x
if (n_data < time){
results_plot <- results_plot +
ggplot2::annotate("text",
x = min(results$data$Date),
y = Inf, label = sprintf("Warning: Uploaded data\nshorter than %s", warning_text),
hjust=0, vjust=1.25, col = "#FE5000", cex=6,
fontface = "bold", alpha = 0.8)
}
results_plot
}
|
1f467af481b9a7b59c1575cdde9ed4f7c6773488
|
9e228340172992fbeb1940b4bc24f9041156171e
|
/cachematrix.R
|
f5f2048c54f341538983d69327a26375e4f4fffb
|
[] |
no_license
|
burkhara/ProgrammingAssignment2
|
41ad984e2de3b6b4aacd22ba4184102768bed4db
|
2fc4006bbc4b84489a90cc6593b9c8be06e6d42d
|
refs/heads/master
| 2020-12-26T02:59:34.040176
| 2015-06-18T04:25:08
| 2015-06-18T04:25:08
| 37,379,207
| 0
| 0
| null | 2015-06-13T16:38:01
| 2015-06-13T16:38:00
| null |
UTF-8
|
R
| false
| false
| 1,474
|
r
|
cachematrix.R
|
#makeCacheMatrix: This function creates a special "vector" object
#that can cache its inverse. This "vector" is a list of functions to do the following:
#1. set the value of the matrix
#2. get the value of the matrix
#3. set the value of the inverted matrix
#4. get the value of the inverted matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(imatrix) m <<- imatrix
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#The following function calculates the inverse matrix of the special "vector"
#created with the above function. However, it first checks to see if the
#inverse matrix has already been calculated. If so, it `get`s the inverted matrix from the
#cache and skips the computation. Otherwise, it calculates the inverted matrix of
#the data and sets the value of the inverted matrix in the cache via the `setmean`
#function.
#If you run cacheSolve() twice, with the same input, then the second time, you will get the message "getting cached data"
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data) #assuming a square matrix. Though solve(t(data) %*% data) could also be used
x$setinverse(m)
m
}
|
4be380069ec7111640ee2a9d2e042e96893d14b3
|
7b102f9c8f2e3f9240090d1d67af50333a2ba98d
|
/mortality_code/mortality_estimation/45q15/space_time.r
|
3341d5fbd1b2ab0630103822bb9414823c27cde4
|
[] |
no_license
|
Nermin-Ghith/ihme-modeling
|
9c8ec56b249cb0c417361102724fef1e6e0bcebd
|
746ea5fb76a9c049c37a8c15aa089c041a90a6d5
|
refs/heads/main
| 2023-04-13T00:26:55.363986
| 2020-10-28T19:51:51
| 2020-10-28T19:51:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,830
|
r
|
space_time.r
|
################################################################################
## Description: Defines the space-time model (2nd stage prediction model)
################################################################################
logit <- function(x) log(x/(1-x))
inv.logit <- function(x) exp(x)/(1+exp(x))
logit10 <- function(x) log(x/(1-x),base=10)
inv.logit10 <- function(x) (10^(x))/(1+(10^(x)))
resid_space_time <- function(data, region=NULL, lambda=0.5, zeta=0.99, min_year=1950, max_year=2016, post_param_selection=F) {
## set up data frame to hold results
preds <- NULL
count <- 0
root <- ifelse(Sys.info()[1]=="Windows","filepath","filepath")
## calculate the data density
data_density_numerator <- unlist(lapply(unique(data$ihme_loc_id), function(x) sum(data$ihme_loc_id == x & !is.na(data$resid) & !is.na(data$vr) & data$vr == 1)))
names(data_density_numerator) <- unique(data$ihme_loc_id)
region_iso3s <- lapply(unique(data$region_name), function(x) unique(data$ihme_loc_id[data$region_name == x]))
names(region_iso3s) <- unique(data$region_name)
## bring in pops for lambda determination
root <- ifelse(Sys.info()[1]=="Windows","filepath","filepath")
pop20mill <- read.csv(paste0("filepath"))
large_pops <- unique(pop20mill$ihme_loc_id)
if(post_param_selection==T){
params <- read.csv(paste0("filepath"))
}
## loop through regions
if (is.null(region)) region <- sort(unique(data$region_name[!is.na(data$resid)])) # Don't loop over a region if they don't have any data/residuals to smooth (ex: subnational Kenya)
for (rr in region) {
cat(paste(rr, "\n")); flush.console()
## loop through sex
for (ss in c("male", "female")) {
region.data <- data[data$region_name == rr & data$sex == ss & !is.na(data$resid),]
## loop through country
countries <- sort(unique(data$ihme_loc_id[data$region_name==rr]))
for (cc in countries) {
to_keep <- unique(data$keep[data$ihme_loc_id==cc & data$region_name==rr])
if(post_param_selection==T){
type = unique(region.data[region.data$ihme_loc_id==cc,]$type)
# if there is no data use no data parameters
if(nrow(region.data[region.data$ihme_loc_id==cc,])==0){
type="no data"
}
lambda=params[params$type==type & params$best==1,]$lambda
years_data = length(unique(region.data$year[region.data$ihme_loc_id==cc]))
if(years_data >= 40){
zeta = 0.99
} else if(years_data <40 & years_data >= 30){
zeta = 0.9
} else if(years_data <30 & years_data >= 20){
zeta = 0.8
} else if(years_data <20 & years_data >= 10){
zeta = 0.7
} else{
zeta = 0.6
}
if(cc == "SWE_4944") {
zeta = 0.99
}
if(cc == "SRB"){
zeta = 0.8
lambda = 0.5
}
if(cc == "CHN_518"){
lambda = 0.4
}
}
print(paste("zeta is ", zeta, "for country", cc))
flush.console()
print(paste("lambda is ", lambda, "for country", cc))
flush.console()
in.country <- (region.data$ihme_loc_id == cc)
other.resids <- (sum(!in.country)>0)
# for countries with data, find the first and last year of that data, for countries without data, find the first and last year of data in the region
if (sum(in.country)>0) in.sample <- range(region.data$year[in.country]) else in.sample <- range(region.data$year)
## loop through years
for (yy in min_year:max_year) {
count <- count + 1
year <- yy + 0.5
# if (year<=in.sample[1] & subnational == F) year <- in.sample[1]
# if (year>=in.sample[2] & subnational == F) year <- in.sample[2]
if (year<=in.sample[1]) year <- in.sample[1]
if (year>=in.sample[2]) year <- in.sample[2]
# calculate time weights
t <- abs(region.data$year - year)
w <- (1-(t/(1+max(t)))^lambda)^3
# calculate space weights
if (other.resids) w[in.country] <- (zeta/(1-zeta))*(sum(w[!in.country])/sum(w[in.country]))*w[in.country]
# fit variant 1: linear local regression
model.data <- data.frame(resid=region.data$resid, year=region.data$year, dd=as.numeric(in.country), w=w)
if (sum(in.country)==0) {
linear <- predict(lm(resid ~ year, weights=w, data=model.data),
newdata=data.frame(year=year))
} else {
linear <- predict(lm(resid ~ year + dd, weights=w, data=model.data),
newdata=data.frame(year=year, dd=1))
}
# fit variant 2: fixed effects local regression
constant <- region.data$resid %*% (w/sum(w))
# combine variants
if(max(data_density_numerator[region_iso3s[[rr]]])!=0){
data_density <- data_density_numerator[cc]/max(data_density_numerator[region_iso3s[[rr]]])
}else{
data_density=0
}
combined <- linear*data_density + constant*(1-data_density)
preds[[count]] <- data.frame(ihme_loc_id=cc, sex=ss, year=(yy+0.5), pred.2.resid=combined, keep=to_keep, stringsAsFactors=F)
} # close year loop
} # close country loop
} # close sex loop
} # close region loop
preds <- do.call("rbind", preds)
} # close function
loess_resid <- function(data) {
for (cc in unique(data$ihme_loc_id)) {
for (ss in unique(data$sex)) {
ii <- (data$ihme_loc_id == cc & data$sex == ss)
data$pred.2.final[ii] <- predict(loess(pred.2.raw ~ year, span=0.3, data=data[ii,]))
}
}
}
|
19043f40ee82432f2172045b34423cf5da52f38d
|
903da089f3ac659f7295a2b1d351981394e8bcdc
|
/inst/unitTests/runit.mcPaBaLarge.R
|
afbde627ead8750cba42803a4751831cf2cd6b44
|
[] |
no_license
|
cran/mcr
|
1f27b59cda2a87be199a8f6534bec6882154b042
|
069b879be631491ed07a54a0f348b1adbebf7867
|
refs/heads/master
| 2023-02-06T22:38:44.942104
| 2023-01-26T21:00:19
| 2023-01-26T21:00:19
| 17,697,375
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,254
|
r
|
runit.mcPaBaLarge.R
|
# TODO: Generates 34 testfunctions, one for each testcase taken from the Roche Diagnostics method comparison
# intranet module testcase collection. Results obtained from the approximative Passing-Bablok implementation
# (PaBaLarge) are compared to results of the exact implementation (PaBa).
#
# Author: schueta6
###############################################################################
# one could increase the number of bins (NBins) to minimize the differences, here we use the default setting of NBins=1e06
PaBaLargePrecision <- 1e-04
cat("\n\n********************************************\nmcPaBaLarge.R method comparison test cases\n********************************************")
load(".\\TestCaseCollection\\testcases.RData", .GlobalEnv)
TCnames <- names(testcases)
#
genericPaBaLargeTest <- function(Data, Name, Exception=FALSE)
{
X <- Data[,1]
Y <- Data[,2]
if(Exception)
{
cat("\n\nTestcase:", Name, "\n")
NData <- nrow(Data)
cat("\nN(Data) =", NData)
NUData <- nrow(unique(Data))
cat("\nN(unique(Data)) =", NUData)
cat("\nN(Ties) =", NData-NUData)
cat("\n\n")
checkException(mcreg(X, Y, method.reg="PaBaLarge", method.ci="analytical",NBins=1e06)@para)
checkException(mcreg(X, Y, method.reg="PaBa", method.ci="analytical")@para)
}
else
{
cat("\n\nTestcase: ", Name, "\n")
NData <- nrow(Data)
cat("\nN(Data) =", NData)
NUData <- nrow(unique(Data))
cat("\nN(unique(Data)) =", NUData)
cat("\nN(Ties) =", NData-NUData)
cat("\n\n")
resPaBaL <- mcreg(X, Y, method.reg="PaBaLarge", method.ci="analytical", NBins=1e06)@para
resExact <- mcreg(X, Y, method.reg="PaBa", method.ci="analytical")@para
checkEquals(resPaBaL, resExact, tolerance=PaBaLargePrecision)
}
}
# imitate call-by-value argument passing
cloneLocalArgs <- function(Data, Name, Exception)
{
cloneData <- Data # generate local copies
cloneName <- Name
cloneExpt <- Exception
locFunc <- function(){genericPaBaLargeTest(cloneData, cloneName, cloneExpt)}
return(locFunc)
}
# generate test-function for each dataset of the testcase collection
for( i in 1:length(testcases))
{
Fname <- paste("test.PaBaLargeTestcase_", TCnames[i], sep="")
assign( Fname, cloneLocalArgs(testcases[[i]], TCnames[i], TCnames[i] %in% c("part_1_dataset_2", "part_1_dataset_12")) )
}
test.PaBaLarge.angle1 <- function()
{
data(creatinine)
crea <- na.omit(creatinine)
fit.PaBa.radian <- mcreg(crea[,1], crea[,2], method.reg="PaBa", slope.measure="radian", rng.seed=331, method.ci="analytical")
fit.PaBa.tangent <- mcreg(crea[,1], crea[,2], method.reg="PaBa", slope.measure="tangent", rng.seed=420, method.ci="analytical")
fit.PaBaLarge.radian <- mcreg(crea[,1], crea[,2], method.reg="PaBaLarge", slope.measure="radian", rng.seed=331, NBins=1e8, method.ci="analytical")
fit.PaBaLarge.tangent <- mcreg(crea[,1], crea[,2], method.reg="PaBaLarge", slope.measure="tangent", rng.seed=420, NBins=1e8, method.ci="analytical")
checkEquals(fit.PaBa.radian@para[,"EST"], fit.PaBaLarge.radian@para[,"EST"], tol=1e-7)
checkEquals(fit.PaBa.tangent@para[,"EST"], fit.PaBaLarge.tangent@para[,"EST"], tol=1e-7)
# pathological example from the mcreg-Rdoc
x1 <- 1:10
y1 <- 0.5*x1
x <- c(x1,y1)
y <- c(y1,x1)
m1 <- mcreg(x,y,method.reg="PaBa",method.ci="analytical",slope.measure="radian")
m2 <- mcreg(x,y,method.reg="PaBa",method.ci="analytical",slope.measure="tangent")
m1.2 <- mcreg(x,y,method.reg="PaBaLarge",method.ci="analytical",slope.measure="radian", NBins=1e8)
m2.2 <- mcreg(x,y,method.reg="PaBaLarge",method.ci="analytical",slope.measure="tangent", NBins=1e8)
checkEquals(m1@para[,"EST"], m1.2@para[,"EST"], tol=1e-7)
checkEquals(m2@para[,"EST"], m2.2@para[,"EST"], tol=1e-7)
}
|
ba8d98e0df3b8e14ce7efcabb710667108ed5d48
|
dee6c5bed839c814c08cad5306d44485352ebfc9
|
/R/ipsi.R
|
ac0fbf6337cbdb1626627d3b3b2cda0fa84b2237
|
[] |
no_license
|
ehkennedy/npcausal
|
108bc3ce94e2454163ca946335ded2d6ee8cc286
|
56a5ac117a29258b67b94874be662a171b5131f7
|
refs/heads/master
| 2022-02-27T03:12:28.468368
| 2021-02-25T01:59:18
| 2021-02-25T01:59:18
| 91,638,398
| 73
| 24
| null | 2022-01-31T22:01:11
| 2017-05-18T02:08:13
|
R
|
UTF-8
|
R
| false
| false
| 9,277
|
r
|
ipsi.R
|
#' @title Estimating effects of incremental propensity score interventions
#'
#' @description \code{ipsi} is used to estimate effects of incremental
#' propensity score interventions, i.e., estimates of mean outcomes if the odds
#' of receiving treatment were multiplied by a factor delta.
#'
#' @usage ipsi(y, a, x.trt, x.out, time, id, delta.seq, nsplits, ci_level = 0.95,
#' progress_bar = TRUE, return_ifvals = FALSE, fit,
#' sl.lib=c("SL.earth","SL.gam","SL.glm","SL.glmnet","SL.glm.interaction", "SL.mean","SL.ranger","rpart"))
#'
#' @param y Outcome of interest measured at end of study.
#' @param a Binary treatment.
#' @param x.trt Covariate matrix for treatment regression.
#' @param x.out Covariate matrix for outcome regression.
#' @param time Measurement time.
#' @param id Subject identifier.
#' @param delta.seq Sequence of delta increment values for incremental
#' propensity score intervention.
#' @param nsplits Integer number of sample splits for nuisance estimation. If
#' \code{nsplits = 1}, sample splitting is not used, and nuisance functions are
#' estimated n full sample (in which case validity of standard errors and
#' confidence intervals requires empirical process conditions). Otherwise must
#' have \code{nsplits > 1}.
#' @param ci_level A \code{numeric} value giving the level (1 - alpha) of the
#' confidence interval to be computed around the point estimate.
#' @param progress_bar A \code{logical} value indicating whether to print a
#' customized progress bar as various stages of computation reach completion.
#' The default is \code{TRUE}, printing a progress bar to inform the user.
#' @param return_ifvals A \code{logical} indicating whether the estimated
#' observation-level values of the influence function ought to be returned as
#' part of the output object. The default is \code{FALSE} as these values are
#' rarely of interest in standard usage.
#' @param fit How nuisance functions should be estimated. Options are "rf" for
#' random forests via the \code{ranger} package, or "sl" for super learner.
#' @param sl.lib sl.lib algorithm library for SuperLearner.
#' Default library includes "earth", "gam", "glm", "glmnet", "glm.interaction",
#' "mean", "ranger", "rpart.
#'
#' @section Details:
#' Treatment and covariates are expected to be time-varying and measured
#' throughout the course of the study. Therefore if \code{n} is the number of
#' subjects and \code{T} the number of timepoints, then \code{a}, \code{time},
#' and \code{id} should all be vectors of length \code{n}x\code{T}, and
#' \code{x.trt} and \code{x.out} should be matrices with \code{n}x\code{T} rows.
#' However \code{y} should be a vector of length \code{n} since it is only
#' measured at the end of the study. The subject ordering should be consistent
#' across function inputs, based on the ordering specified by \code{id}. See
#' example below for an illustration.
#'
#' @return A list containing the following components:
#' \item{res}{ estimates/SEs and uniform CIs for population means.}
#' \item{res.ptwise}{ estimates/SEs and pointwise CIs for population means.}
#' \item{calpha}{ multiplier bootstrap critical value.}
#'
#' @importFrom stats qnorm as.formula
#' @importFrom ranger ranger
#'
#' @export
#'
#' @examples
#' n <- 500
#' T <- 4
#'
#' time <- rep(1:T, n)
#' id <- rep(1:n, rep(T, n))
#' x.trt <- matrix(rnorm(n * T * 5), nrow = n * T)
#' x.out <- matrix(rnorm(n * T * 5), nrow = n * T)
#' a <- rbinom(n * T, 1, .5)
#' y <- rnorm(mean=1,n)
#'
#' d.seq <- seq(0.1, 5, length.out = 10)
#'
#' ipsi.res <- ipsi(y, a, x.trt, x.out, time, id, d.seq)
#' @references Kennedy EH. Nonparametric causal effects based on incremental
#' propensity score interventions.
#' \href{https://arxiv.org/abs/1704.00211}{arxiv:1704.00211}
#
ipsi <- function(y, a, x.trt, x.out, time, id, delta.seq,
nsplits = 2, ci_level = 0.95,
progress_bar = TRUE, return_ifvals = FALSE,
fit = "rf",
sl.lib = c("SL.earth", "SL.gam", "SL.glm", "SL.glm.interaction", "SL.mean", "SL.ranger", "SL.rpart")) {
require("SuperLearner")
require("earth")
require("gam")
require("ranger")
require("rpart")
# setup storage
ntimes <- length(table(time))
end <- max(time)
n <- length(unique(id))
ynew <- rep(NA, n * ntimes)
ynew[time == end] <- y
dat <- data.frame(time = time, id = id, y = ynew, a = a)
k <- length(delta.seq)
ifvals <- matrix(nrow = n, ncol = k)
est.eff <- rep(NA, k)
wt <- matrix(nrow = n * ntimes, ncol = k)
cumwt <- matrix(nrow = n * ntimes, ncol = k)
rt <- matrix(nrow = n * ntimes, ncol = k)
vt <- matrix(nrow = n * ntimes, ncol = k)
x.trt <- data.frame(x.trt)
x.out <- data.frame(x.out); x.out$a <- a
if (progress_bar) {
pb <- txtProgressBar(
min = 0, max = 2 * nsplits * length(delta.seq) + 3,
style = 3
)
}
s <- sample(rep(seq_len(nsplits), ceiling(n / nsplits))[seq_len(n)])
slong <- rep(s, rep(ntimes, n))
if (progress_bar) {
pbcount <- 0
}
for (split in seq_len(nsplits)) {
if (progress_bar) {
Sys.sleep(0.1)
setTxtProgressBar(pb, pbcount)
pbcount <- pbcount + 1
}
# fit treatment model
if (fit=="rf"){
trtmod <- ranger::ranger(stats::as.formula("a ~ ."),
dat = cbind(x.trt, a = dat$a)[slong != split, ])
dat$ps <- predict(trtmod, data = x.trt)$predictions
}
if (fit=="sl"){
trtmod <- SuperLearner(dat$a[slong!=split], x.trt[slong!=split,],
newX = x.trt, SL.library = sl.lib, family = binomial)
dat$ps <- trtmod$SL.predict
}
for (j in seq_len(k)) {
if (progress_bar) {
Sys.sleep(0.1)
setTxtProgressBar(pb, pbcount)
pbcount <- pbcount + 1
}
delta <- delta.seq[j]
# compute weights
wt[, j] <- (delta * dat$a + 1 - dat$a) / (delta * dat$ps + 1 - dat$ps)
cumwt[, j] <- as.numeric(t(aggregate(wt[, j],
by = list(dat$id),
cumprod
)[, -1]))
vt[, j] <- (1 - delta) * (dat$a * (1 - dat$ps) -
(1 - dat$a) * delta * dat$ps) / delta
# fit outcome models
outmod <- vector("list", ntimes)
rtp1 <- dat$y[dat$time == end]
if (progress_bar) {
Sys.sleep(0.1)
setTxtProgressBar(pb, pbcount)
pbcount <- pbcount + 1
}
for (i in seq_len(ntimes)) {
t <- rev(unique(dat$time))[i]
# counterfactual case for treatment: A = 1
newx1 <- x.out[dat$time == t, ]
newx1$a <- 1
# counterfactual case for no treatment: A = 0
newx0 <- x.out[dat$time == t, ]
newx0$a <- 0
if (fit=="rf"){
outmod[[i]] <- ranger::ranger(stats::as.formula("rtp1 ~ ."),
dat = cbind(x.out,rtp1)[dat$time == t & slong != split, ])
m1 <- predict(outmod[[i]], data = newx1)$predictions
m0 <- predict(outmod[[i]], data = newx0)$predictions
}
if (fit=="sl"){
print(c(i,j)); flush.console()
outmod[[i]] <- SuperLearner(rtp1[s!=split],
x.out[dat$time==t & slong!=split,],SL.library = sl.lib,
newX=rbind(newx1,newx0))
m1 <- outmod[[i]]$SL.predict[1:dim(newx1)[1]]
m0 <- outmod[[i]]$SL.predict[(dim(newx1)[1]+1):(dim(newx1)[1]+dim(newx0)[1])]
}
pi.t <- dat$ps[dat$time == t]
rtp1 <- (delta * pi.t * m1 + (1 - pi.t) * m0) /
(delta * pi.t + 1 - pi.t)
rt[dat$time == t, j] <- rtp1
}
# compute influence function values
ifvals[s == split, j] <- ((cumwt[, j] * dat$y)[dat$time == end] +
aggregate(cumwt[, j] * vt[, j] * rt[, j],
by = list(dat$id), sum
)[, -1])[s == split]
}
}
# compute estimator
for (j in seq_len(k)) {
est.eff[j] <- mean(ifvals[, j])
}
# compute asymptotic variance
sigma <- sqrt(apply(ifvals, 2, var))
ci_norm_bounds <- abs(stats::qnorm(p = (1 - ci_level) / 2))
eff.ll <- est.eff - ci_norm_bounds * sigma / sqrt(n)
eff.ul <- est.eff + ci_norm_bounds * sigma / sqrt(n)
# multiplier bootstrap
if (progress_bar) {
Sys.sleep(0.1)
setTxtProgressBar(pb, pbcount)
pbcount <- pbcount + 1
}
eff.mat <- matrix(rep(est.eff, n), nrow = n, byrow = TRUE)
sig.mat <- matrix(rep(sigma, n), nrow = n, byrow = TRUE)
ifvals2 <- (ifvals - eff.mat) / sig.mat
nbs <- 10000
mult <- matrix(2 * rbinom(n * nbs, 1, 0.5) - 1, nrow = n, ncol = nbs)
maxvals <- sapply(seq_len(nbs), function(col) {
max(abs(apply(mult[, col] * ifvals2, 2, sum) / sqrt(n)))
})
calpha <- quantile(maxvals, ci_level)
eff.ll2 <- est.eff - calpha * sigma / sqrt(n)
eff.ul2 <- est.eff + calpha * sigma / sqrt(n)
if (progress_bar) {
Sys.sleep(0.1)
setTxtProgressBar(pb, pbcount)
close(pb)
}
res <- data.frame(
increment = delta.seq, est = est.eff, se = sigma,
ci.ll = eff.ll2, ci.ul = eff.ul2
)
res2 <- data.frame(
increment = delta.seq, est = est.eff, se = sigma,
ci.ll = eff.ll, ci.ul = eff.ul
)
# output
if (return_ifvals) {
return(invisible(list(
res = res, res.ptwise = res2, calpha = calpha,
ifvals = (ifvals - est.eff)
)))
} else {
return(invisible(list(res = res, res.ptwise = res2, calpha = calpha)))
}
}
|
94c9167ff1c979be7ec707ab01d1bf1bd1375a60
|
eea8d9d765e9129d5daca1c56aae1a555daf7677
|
/SweepingParameters.R
|
06b28f551fc58ff2466a5c02d7a16a551f07326e
|
[
"MIT"
] |
permissive
|
isaacracine/Bio381Scripting
|
8e5e97be38d3d4740da84a0a23ada9e5209828c7
|
c79048416b04540fd4f2ad8e20a255820809a9ce
|
refs/heads/main
| 2023-04-16T21:32:49.354471
| 2021-04-28T01:27:03
| 2021-04-28T01:27:03
| 335,397,286
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,553
|
r
|
SweepingParameters.R
|
# Isaac Racine
# Parameter Sweeping
# 18 Mar 2021
#------------------------------------------------
# writing functions for equations and sweeping over parameters
library(ggplot2)
# S = cA^z describes species area relationship
# the number of species that can be found in relation to the area
#---------------------------------------
# FUNCTION species_area_Curve
# description: creates a power function for S and A
# inputs: A is a vector of island areas
# c is the intercept constant
# z is the slope constant
# outputs: S is a vector of species richness
########################################
species_area_Curve <- function(A = 1:5000,
c = 0.5,
z = 0.26) {
S <- c*(A^z)
# function body
return(S)
} # end of species_area_Curve
#---------------------------------------
head(species_area_Curve())
#---------------------------------------
# FUNCTION species_area_plot
# description: plots the species area curve with parameter values
# inputs: A = vector of area
# c = single value for c parameter
# z = single value for z parameters
# outputs: smoother curve with parameters printed in graph
########################################
species_area_plot <- function(A = 1:5000,
c = 0.5,
z = 0.26) {
plot(x = A, y = species_area_Curve(A, c, z),
type = "l",
xlab = "Island Area",
ylab = "S (number of species)",
ylim = c(0,2500))
mtext(paste("c = ", c, " z = ", z), cex = 0.7)
} # end of species_area_plot
#---------------------------------------
species_area_plot()
# build a grid of plots (faceting)
# global variables
c_pars <- c(100, 150, 175)
z_pars <- c(0.10, 0.16, 0.26, 0.3)
par(mfrow = c(3, 4)) # grid arrange with 3 rows and 4 cols
for(i in seq_along(c_pars)){
for(j in seq_along(z_pars)){
species_area_plot(c = c_pars[i], z = z_pars[j])
}
}
# Nick does not like while loop, would rather use a break in a for loop
#-------------- expand.grid ---------------
expand.grid(c_pars, z_pars)
# creates a df where each row corresponds to a different combination of the parameters presented
#---------------------------------------
# FUNCTION sa_output
# description: summary stats for species area power function
# inputs: vector of predicted species richness values
# outputs: list of max - min, coefficient of variation
########################################
sa_output <- function(S = runif(1:10)) {
sum_stats <- list(s_gain = max(S) - min(S),
s_cv = sd(S) / mean(S))
return(sum_stats)
} # end of sa_output
#---------------------------------------
sa_output()
#Build program body
# Global variables
Area <- 1:5000
c_pars <- c(10, 150, 175)
z_pars <- c(0.10, 0.16, 0.26, 0.30)
# set up model data frame
model_frame <- expand.grid(c = c_pars, z = z_pars)
str(model_frame)
model_frame$SGain <- NA
model_frame$SCV <- NA
head(model_frame)
# cycle through model calculations
for(i in 1:nrow(model_frame)) {
# generate S vector
temp1 <- species_area_Curve(A=Area,
c=model_frame[i,1],
z=model_frame[i,2])
#calculate output stats
temp2 <- sa_output(temp1)
# pass results to cols in df
model_frame[i, c(3,4)] <- temp2
}
print(model_frame)
#############################################
# parameter sweep redux with ggplot graphics
area <- 1:5
c_pars <- c(100, 150, 175)
z_pars <- c(0.1, 0.16, 0.26, 0.3)
#set up model frame
model_frame <- expand.grid(c =c_pars,
z = z_pars,
A = area)
head(model_frame)
nrow(model_frame)
# add response variable
model_frame$S <- NA
# loop thru parameters and fill w sa function
for (i in 1:length(c_pars)){
for (j in 1:length(z_pars)){
model_frame[model_frame$c == c_pars[i] & model_frame$z == z_pars[j], "S"] <- species_area_Curve(A = area, c = c_pars[i], z = z_pars[j])
}
}
head(model_frame)
#-------------- Lattice Plots---------------
p1 <- ggplot(data = model_frame)
p1 + geom_line(mapping = aes(x = A, y = S)) +
facet_grid(c~z)
p2 <- p1
p2 + geom_line(mapping = aes(x = A, y = S, group = z)) +
facet_grid(.~c)
# the . in face grid says to combine all Z's onto same plot for c
p3 <- p1
p3 + geom_line(mapping = aes(x = A, y = S, group = c)) +
facet_grid(z~.)
|
e64a4f4a548cc4f8aa0464a909862321eb6068c1
|
5f10af9b2b74606c75f4f7d2181a68f13c09d31e
|
/visualization_app/shinyApp/ui.R
|
e7ff5d0fee0b440285738a4e842940509badbcb8
|
[] |
no_license
|
latuji/fusion_capstone_project
|
13b5291147a92d495bcc3d7805accb5bff9434f2
|
3579767e7a2e66ba39b9f3fc52965aa5eebd3062
|
refs/heads/master
| 2021-05-30T02:07:24.289109
| 2015-09-09T03:14:13
| 2015-09-09T03:14:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,768
|
r
|
ui.R
|
library(shiny)
library(networkD3)
library(dplyr)
library(DT)
data_path <- "../server_get/twitterdata/"
# merge multiple results by desc indexing on retweets amount
tweets <- dir(paste(data_path, "tweets/", sep = "/"))
top20 <- read.csv(paste(data_path, "influencers(20 posts).csv", sep = "/"))
top100 <- read.csv(paste(data_path, "top100.csv", sep = "/"))
tweets_top20 <- unique(top20$t_id)
tweets <- intersect(tweets,tweets_top20)
tweets <- top100 %>%
filter(t_id %in% tweets) %>%
arrange(desc(t_retweets))
tweets <- tweets$t_id
centra <- c("Alpha" = "alpha",
"Eigen" = "eigen",
"Power" = "power")
shinyUI(navbarPage(theme="flatly.css",
"Fusion On Twitter",
tabPanel("Tweet Analysis",
fluidRow(
column(3,
htmlOutput("website"),
h4("Top 20 Tweet ID"),
helpText("data from 6/12 to 8/11"),
selectInput("tweet",
NULL,
tweets,612121494232145920),
textOutput("info1"),
textOutput("info2"),
textOutput("info3"),
hr(),
h4("Alpha Centrality"),
fluidRow(
column(6,
numericInput("alpha", "Alpha", 0.5,
min = .1, max = 1, step = .1)),
column(6,
numericInput("n", "Top N", 30,
min = 1, max = 100, step = 1))
)),
column(9,
tabsetPanel(
tabPanel("Retweeting Network",
br(),
fluidRow(
column(8,
simpleNetworkOutput("rtNetwork")),
column(4,
htmlOutput("acTable")))),
# simpleNetworkOutput("rtNetwork")),
tabPanel("All Retweeters",
DT::dataTableOutput("rtTable"))
)
)
)
),
tabPanel("Find Influencers",
fluidRow(
column(3,
wellPanel(
sliderInput("slider", label = "Number of most popular tweets", min = 0,
max = 20, value = 20),
dateInput("date", label = "Since date", value = "2015-06-01")
),
wellPanel(
radioButtons("subject.source", label = "Subject Source",
choices = list("Section" = "section", "Topic" = "topic", "Hashtag" = "hashtag"),
selected = "section"
)
),
wellPanel(
uiOutput("subject")
)
),
column(9,
wellPanel(
htmlOutput('barchart')
),
wellPanel(
dataTableOutput('table')
)
)
)
)
))
|
96bf5ab8a6b53e1366b5a082a35643355d297e41
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gtkEditableGetChars.Rd
|
9abce743ec0d30a46b1f8639f0e12bf768b1c453
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 807
|
rd
|
gtkEditableGetChars.Rd
|
\alias{gtkEditableGetChars}
\name{gtkEditableGetChars}
\title{gtkEditableGetChars}
\description{Retrieves a sequence of characters. The characters that are retrieved
are those characters at positions from \code{start.pos} up to, but not
including \code{end.pos}. If \code{end.pos} is negative, then the the characters
retrieved are those characters from \code{start.pos} to the end of the text.}
\usage{gtkEditableGetChars(object, start.pos, end.pos)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkEditable}}}
\item{\verb{start.pos}}{start of text}
\item{\verb{end.pos}}{end of text}
}
\details{Note that positions are specified in characters, not bytes.}
\value{[character] a pointer to the contents of the widget as a
string.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
5022c497f89f91b83dc89ad0eb755c1d5ecba1f2
|
18b7a97254bf4f70adc6e4a7f1924e1e032f0878
|
/FOR6934/HW62a.R
|
7b12b9a4b895af3ec9b2c56ee9a8bb7fe36b2a05
|
[] |
no_license
|
hyshenmeng/Phil_Project
|
11ab66fc4d39a00448b38573887d3bc62dea0b42
|
0516d470ac48265c145488c526f7ee3d98ac7ea2
|
refs/heads/master
| 2021-03-22T02:16:45.644126
| 2017-09-25T02:36:05
| 2017-09-25T02:36:05
| 89,524,781
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,866
|
r
|
HW62a.R
|
set.seed(1111)
library(rjags)
#load data
hw6<-read.table(file="F:/HW6.txt",header=TRUE)
x=hw6$x
dataList=list('x'=x)
#specify model
modelString_f="
model{
for (i in 1:3){
x[i]~dnorm(mu_f,prec)
}
for (i in 4:15){
x[i]~dnorm(mu_m,prec)
}
mu_f~dnorm(0,0.01)
mu_m~dnorm(0,0.01)
prec~dgamma(1,1)
}
"
writeLines(modelString_f, con="TEMPmodel.txt")
#generate chains
jagSModel=jags.model(file="TEMPmodel.txt",data=dataList,n.chain=4,n.adapt=100)
nchain=4
#burn-in period
update(jagSModel,n.iter=1000)
#geberate posterior samples
jagsSamples=jags.samples(jagSModel,c('mu_f','mu_m','prec'),n.iter=1000)
niter=1000
#trace plot for mu_f
par(mfrow=c(1,1),mar=rep(4,4))
plot(NA,NA,xlim=c(0,niter),ylim=range(jagsSamples$mu_f))
for (i in 1:nchain) lines(1:niter,jagsSamples$mu_f[1,,i],col=i)
#trace plot for mu_m
par(mfrow=c(1,1),mar=rep(4,4))
plot(NA,NA,xlim=c(0,niter),ylim=range(jagsSamples$mu_m))
for (i in 1:nchain) lines(1:niter,jagsSamples$mu_m[1,,i],col=i)
#trace plot for prec
par(mfrow=c(1,1),mar=rep(4,4))
plot(NA,NA,xlim=c(0,niter),ylim=range(jagsSamples$prec))
for (i in 1:nchain) lines(1:niter,jagsSamples$prec[1,,i],col=i)
#trace plot for sig2
par(mfrow=c(1,1),mar=rep(4,4))
plot(NA,NA,xlim=c(0,niter),ylim=range(1/jagsSamples$prec))
for (i in 1:nchain) lines(1:niter,1/jagsSamples$prec[1,,i],col=i)
#density plot function
densityplot<-function(x){
plot(density(x),type='l',xlab='',main='')
x_ci=quantile(x,c(0.025,0.975))
print(x_ci)
abline(v=x_ci,col='red',lty=3)
x_mean=mean(x)
print(x_mean)
x_var=var(x)
print(x_var)
abline(v=x_mean)
}
#density plot for mu_f
densityplot(jagsSamples$mu_f)
#density plot for mu_m
densityplot(jagsSamples$mu_m)
#density plot for prec
densityplot(jagsSamples$prec)
#density plot for sig2
densityplot(1/jagsSamples$prec)
#mu_f v.s. mu_m
diff=abs(jagsSamples$mu_f)-abs(jagsSamples$mu_m)
mean(diff<0)
|
82e3111d0942d7243a1d39da51c33c6cb8b61fdc
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/distr/examples/distrMASK.Rd.R
|
2bd00b570157cf223c31590fba3ec60acafdfcea
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 214
|
r
|
distrMASK.Rd.R
|
library(distr)
### Name: distrMASK
### Title: Masking of/by other functions in package "distr"
### Aliases: distrMASK MASKING
### Keywords: programming distribution documentation
### ** Examples
distrMASK()
|
2fe510f2a8d1e1f91c7ff6afa5b680300776f3a6
|
2d4c54ce02f33aa32d45e50f5eae21f3b7646db3
|
/man/transectHeading.Rd
|
f26044eb92c0995691a55f00aace9936f813cbb1
|
[] |
no_license
|
bemeneses/rivSurveyR
|
6100490cbd2934891eeb9eaee3175d1856cc4801
|
097ae8487e6553de2618314915a793bf20e0a8af
|
refs/heads/master
| 2020-03-14T13:23:15.241829
| 2018-04-15T21:24:28
| 2018-04-15T21:24:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,175
|
rd
|
transectHeading.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transectHeading.R
\name{transectHeading}
\alias{transectHeading}
\title{transectHeading}
\usage{
transectHeading(x, y, velE, velN, depth, flowHeading)
}
\arguments{
\item{x}{A vector of x coordinates.}
\item{y}{A vector of y coordinates. Must be the same length as \code{x}.}
\item{velE}{A vector of velocities to the east. Must be the same length as \code{x}.}
\item{velN}{A vector of velocities to the north. Must be the same length as \code{x}.}
\item{depth}{A vector of water depths. Must be the same length as \code{x}.}
\item{flowHeading}{Optional. Mean direction of flow over the cross-section. If provided velE, velN, and depth may be omitted.}
}
\value{
Returns the compass heading of a transect
}
\description{
Determines the compass heading of a cross-section transect, assuming transect starts at river right
}
\examples{
data(vels)
velSub <- vels[transectName == "t1",,]
transectHeading(velSub$UTM_X_Proj, velSub$UTM_Y_Proj, velSub$Mean.Vel.E, velSub$Mean.Vel.N, velSub$depth)
#If flow heading is known
transectHeading(velSub$UTM_X_Proj, velSub$UTM_Y_Proj, flowHeading = 149)
}
|
7bd23aaf02ae3d4c9a553468e2273472a0678a0a
|
551b9335dcc91791535095126beb86b4bd132a06
|
/Analysis/colorfunction.R
|
666a71480213afe67f3f88b5fe74217e5e8ee8b5
|
[
"MIT"
] |
permissive
|
dungates/ImagePlotting
|
214a8d4488327f8e0e6e16c85fd119277c641cf8
|
b8bf80a4a086e6938fa0c159147a9822a13d489c
|
refs/heads/master
| 2023-07-15T15:50:44.380292
| 2021-08-11T17:52:30
| 2021-08-11T17:52:30
| 325,050,470
| 1
| 2
|
NOASSERTION
| 2021-06-25T20:32:55
| 2020-12-28T15:41:32
|
R
|
UTF-8
|
R
| false
| false
| 1,901
|
r
|
colorfunction.R
|
# color distance
library(colordistance)
colors <- function(X, Y) {
loader <- loadImage(images$local_path, sample.size = 5000)
plot <- loader$filtered.rgb.2d
plot2 <- data.frame(plot)
mean_red <- mean(plot2$r * 255)
deviation_red <- sd(plot2$r * 255)
mean_blue <- mean(plot2$b * 255)
deviation_blue <- sd(plot2$b * 255)
mean_green <- mean(plot2$g * 255)
deviation_green <- sd(plot2$g * 255)
# hsv colorset - im skeptical
plot3 <- loader$filtered.hsv.2d
plot4 <- data.frame(plot3)
mean_hue <- mean(plot4$h)
deviation_hue <- sd(plot4$h)
mean_saturation <- mean(plot4$s)
deviation_saturation <- sd(plot4$s)
mean_value <- mean(plot4$v)
deviation_value <- sd(plot4$v)
# six hue algor - processes from the colors in process
# functions pass by FALSE only report on true
if (mean_blue > mean_red) {
if (mean_red >= mean_green) {
hue_region <- "Violet"
}
else {
if (mean_blue < mean_green) {
hue_region <- "Spring Green"
}
}
}
if (mean_green > mean_red) {
if (mean_red >= mean_blue) {
hue_region <- "Chartreuse"
}
else {
if (mean_green < mean_blue) {
hue_region <- "Azure"
}
}
}
if (mean_red >= mean_green) {
if (mean_green >= mean_blue) {
hue_region <- "Orange"
}
else {
if (mean_red < mean_blue) {
hue_region <- "Violet"
}
}
}
# luminance
luminance <- (mean_red + mean_blue + mean_green) / 3
# brightness with deviation of brightness
lum_contrast <- (deviation_red + deviation_blue + deviation_green) / 3
# push to global environment
Y <<- data.frame(
mean_red, deviation_red, mean_blue, deviation_blue, mean_green, deviation_green,
mean_hue, deviation_hue, mean_saturation, deviation_saturation, mean_value,
hue_region, deviation_saturation, luminance, lum_contrast
)
}
colors(images$local_path)
|
c830b32eefc6afe816bce7993f708c0d1b9dd594
|
93a78245d48f93e652ae3d1717613966e40f21b4
|
/man/knitr_colours.Rd
|
9f82e499947513badeb4069bfd7d09cc57118331
|
[] |
no_license
|
theoroe3/jrNotes
|
9e08737a594909d69ee82c7716711c4df77c4540
|
213bd237edbc6eb0b5caa8f018839279bb200760
|
refs/heads/master
| 2021-08-07T03:06:18.494378
| 2017-11-07T11:27:46
| 2017-11-07T11:27:46
| 108,836,307
| 0
| 0
| null | 2017-10-30T10:36:07
| 2017-10-30T10:36:07
| null |
UTF-8
|
R
| false
| true
| 431
|
rd
|
knitr_colours.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/knitr_code_hooks.R
\name{knitr_colours}
\alias{knitr_colours}
\alias{knitr_formatting}
\title{knitr code hooks}
\usage{
knitr_colours()
knitr_formatting()
}
\description{
These functions were hacked by Jamie. No-one really know what they do, or how
they work. In fact, I'm not convinced that knitr_formatting is correct. What's
color1 and color2????
}
|
4004d564164b479da3cc592a6db985e5ce1ce399
|
df1c4feee3da7e39f233fa45ec4dc34338d1a300
|
/man/plot.pafm.Rd
|
9f6317c8ab48954dc6f80457756480da25ff74a9
|
[] |
no_license
|
AvinashAcharya/factorAnalytics
|
1abf7e436417f63f938504733feb791186805e91
|
dd3572c5454e4f1691bbf76009144931383983a6
|
refs/heads/master
| 2020-04-05T15:17:00.784572
| 2018-10-01T22:14:56
| 2018-10-01T22:14:56
| 59,917,661
| 25
| 20
| null | 2016-05-28T22:55:24
| 2016-05-28T22:55:24
| null |
UTF-8
|
R
| false
| true
| 1,763
|
rd
|
plot.pafm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.pafm.r
\name{plot.pafm}
\alias{plot.pafm}
\title{plot \code{"pafm"} object}
\usage{
\method{plot}{pafm}(x, which.plot = c("none", "1L", "2L", "3L"),
max.show = 6, date = NULL, plot.single = FALSE, fundName,
which.plot.single = c("none", "1L", "2L", "3L"), ...)
}
\arguments{
\item{x}{object of class \code{"pafm"} created by
\code{paFm}.}
\item{which.plot}{Integer indicates which plot to create: "none" will
create a menu to choose. Defualt is none.\cr
1 = attributed cumulative returns,\cr
2 = attributed returns on date selected by user,\cr
3 = time series of attributed returns}
\item{max.show}{Maximum assets to plot. Default is 6.}
\item{date}{Indicates for attributed returns, the date format should be
xts compatible.}
\item{plot.single}{Plot a single asset of lm class. Defualt is FALSE.}
\item{fundName}{Name of the portfolio to be plotted.}
\item{which.plot.single}{Integer indicates which plot to create: "none"
will create a menu to choose. Defualt is none.\cr
1 = attributed cumulative returns,\cr
2 = attributed returns on date selected by user, \cr
3 = time series of attributed returns}
\item{...}{more arguements for \code{chart.TimeSeries} used for plotting
time series}
}
\description{
Generic function of plot method for paFm.
Either plot all assets or choose a single asset to plot.
}
\examples{
\dontrun{
data(managers)
fit <- fitTsfm(asset.names=colnames(managers[,(1:6)]),
factor.names=c("EDHEC LS EQ","SP500 TR"), data=managers)
fm.attr <- paFm(fit)
# plot all
plot(fm.attr, legend.loc="topleft", max.show=6)
dev.off()
# plot only one assets "HAM1
plot(fm.attr, plot.single=TRUE, fundName="HAM1")
}
}
\author{
Yi-An Chen.
}
|
6396e6e9d8a3a7738ed850fe6f3cfb898ecfe11f
|
77c813398c5374f0f07598cd3a39af3be8747f2f
|
/Machine Learning with R/Lecture/lecture1_1.R
|
62b1fcb03b4b92c5ba124511840bc665adea054f
|
[] |
no_license
|
Kaicheng1995/CS_Intro
|
5fa2808939f43a27a5fbe1da9308472181dff06e
|
f8efaf57b91029ddc30aaf2bd21fac52de576f24
|
refs/heads/master
| 2023-01-31T02:48:52.735772
| 2020-12-14T19:18:09
| 2020-12-14T19:18:09
| 240,658,242
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 443
|
r
|
lecture1_1.R
|
# Scrape movie list
install.packages('rvest')
install.packages('stringr')
install.packages('dplyr')
library('rvest')
library('stringr')
library('dplyr')
# download page info
imdb = read_html('https://www.imdb.com/title/tt4154756/')
director = imdb %>%
html_nodes(".summary_text+ .credit_summary_item a") %>%
html_text()
budget = imdb %>%
html_nodes("#titleDetails .txt-block:nth-child(12)") %>%
html_text()
str_sub(budget,22,-34)
|
0d5428ebb595a643aa752dbc393e398969726a4f
|
aec871d843c4bdb8297980ed4b7662834413b891
|
/homeworks/taller.R
|
21e31768e732ba627b5802b6c0ef7f732bcacbe0
|
[] |
no_license
|
JoseCortezz25/R-Codes
|
28b7c0f262e15b8b56bd6bfed7ecb839fb803bc7
|
082935263bdb09a75c357c0ee4b3d1ac145e1729
|
refs/heads/master
| 2023-03-20T10:51:11.190228
| 2021-03-11T19:40:02
| 2021-03-11T19:40:02
| 345,161,189
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,105
|
r
|
taller.R
|
#revisando estructura
class(est_pacientes)
#conversion del tipo
pac <- as.matrix.data.frame(est_pacientes2)
pac2 <- as.matrix.data.frame(est_pacientes2)
pac
pac2
#otra manera
if(pac[,1] == "low"){
pac[1,1]
}
#otra manera (optima pero mas o menos)
ifelse(pac[,1] == "low", "bajo", pac[,1])
#case when (debo importar la libreria)
library(dplyr)
#pac[,column] == paste("",match_value1) ~ paste("",new_value1),
#pac[,column] == paste("",match_value2) ~ paste("",new_value2),
#pac[,column] == paste("",match_value3) ~ paste("",new_value3),
# -------------- PASO 4---------------------
filtercolumn <- function(matrix, number_options, column,
match_value1, match_value2, match_value3,
new_value1, new_value2, new_value3){
if(number_options == 3){
print("opcion 3")
return (conversionColumn1 <- case_when(
matrix[,column] == match_value1 ~ new_value1,
matrix[,column] == match_value2 ~ new_value2,
matrix[,column] == match_value3 ~ new_value3,
))
}
if(number_options == 2){
print("opcion 2")
return (conversionColumn1 <- case_when(
matrix[,column] == match_value1 ~ new_value1,
matrix[,column] == match_value2 ~ new_value2,
))
}
}
pac3[,1] <- filtercolumn(pac3, 3, 1, "low", "mid", "high", "Bajo", "Medio", "Alto")
pac3[,2] <- filtercolumn(pac3, 3, 2, "low", "mid", "high", "Bajo", "Medio", "Alto")
pac3[,3] <- filtercolumn(pac3, 2, 3, "excellent", "good", "", "Excelente", "Bueno", "")
pac3[,4] <- filtercolumn(pac3, 3, 4, "low", "mid", "high", "Bajo", "Medio", "Alto")
pac3[,5] <- filtercolumn(pac3, 3, 5, "stable", "unstable", "mod-stable", "Estable", "Inestable", "Medio Estable")
pac3[,6] <- filtercolumn(pac3, 3, 6, "stable", "unstable", "mod-stable", "Estable", "Inestable", "Medio Estable")
pac3[,7] <- filtercolumn(pac3, 3, 7, "stable", "unstable", "mod-stable", "Estable", "Inestable", "Medio Estable")
pac3
pac3 <- pac
pac3
# -----------------------------------
# --------------- PASO 5 --------------------
# Paso 5: Cambiar el tipo de dato char a numerico en la columna 8
#pasar a data frame
pac3 <- as.data.frame(pac3)
pac3
#pasando de character a numerico
pac3_new2 <- as.matrix.data.frame(pac3_new2)
pac3_new2[,8] = as.numeric(pac3_new2[,8])
pac3_new2[,8]
columnx8 = as.numeric(pac3_new2[,8])
columnx8
#quitandole los 5
summary(columnx8)
media <- round(mean(columnx8, na.rm=TRUE), 1)
media
#añadiendo
columnx8[47] <- media
columnx8[49] <- media
columnx8[71] <- media
columnx8
#pac3[,8]
#pac3_new <- pac3
#pac <-
mean(pac3[,8])
# ----------------- Paso 7 --------------------
#Paso 7: Eliminar columna 8 de la matriz original y colocarla al final con los datos ya limpios de NA
pac3_new2
pac3_new3 <- pac3_new2
pac3_new3
pac3_new3
pac3_new2 <- pac3_new2[, -8]
pac3_new2
pac3_new2 <- cbind(pac3_new2, columnx8)
datosnume <- columnx8
datosnume
#datosnume
pac3_new2 <- cbind(pac3_new2, datosnume)
pac3_new2
# ---------------------------------------------
# --------------------- Paso 8 ------------------------
#Paso 8: Convertir la matriz a dataframes se usa as.data.frame, revisar igualmente la estructura con str.
pac3_new2 <- as.data.frame(pac3_new2)
class(pac3_new2)
str(pac3_new2)
pac3_new2
# -----------------------------------------------------
# ------------------------Paso 9 ---------------------
#Paso 9: Convertir la ultima columna a dato numérico ya que es un factor.
class(pac3_new2[,9])
pac3_new2[,9] <- as.numeric(pac3_new2[,9])
pac3_new2[,9]
str(pac3_new2)
# -----------------------------------------------------
# ------------------------ Paso 9 ---------------------
#Paso 9: Convertir la ultima columna a dato numérico ya que es un factor.
pac3_new2[,9] <- as.numeric(as.character(pac3_new2[,9]))
str(pac3_new2)
# -----------------------------------------------------
# ------------------------ Paso 10 -----------------------
#Paso 10: Colocarle nombres de columnas y filas al dataframe.
pac3_new2
matriz_datos <- pac3_new2
colnames( pac3_new2 ) <- c("L-CORE", "L-SURF", "L-O2", "L-BP", "SURF-STBL", "CORE-STBL", "BP-STBL", "COMFORT", "ADM-DECS")
pac3_new2
i <- 0
while(i < count(pac3_new2)){
i <- i + 1
rownames(pac3_new2)[i] <- paste("Paciente ", i)
}
rownames(pac3_new2)[90] <- "Paciente 90"
addnamesrows(pac3_new2)
pac3_new2
length(matriz_datos)
paste("parciena", 1)
# -----------------------------------------------------
# ------------------------ Paso 11 -----------------------
#Paso 11: Algunas gráficas de interés.
install.packages("ggplot2")
library("ggplot2")
names <-
Plantas = c(15,16,18,18,12,12,25,10,15,22,14,14,16,4,8,5,7,3,9,12) # Plantas nacidas por m2
Parcela = paste("P", 1:20)
Tratamiento = rep((c("Tratado","Testigo")),c(10,10))
df = data.frame(Tratamiento, Parcela, Plantas)
df
ggplot(data=df, aes(x=Tratamiento, y=Plantas, fill=Parcela)) +
geom_bar(stat="identity", position="dodge")
namesrows = paste("Paciente", 1:90)
namesrows
namescolumns <- c("Excelente", "Bueno")
namescolumns
niveles <- rep((c("Excelente", "Bueno")), pac3_new2["L-O2"])
pac3_new2["L-O2"]
variablita <- table(pac3_new2["L-O2"])
variablita
pac3_new2["L-O2"]
resumen <- summary(pac3_new2["L-O2"])
colors <- c("blue", "red")
barplot(variablita, xlab = "Niveles de saturación de O2", ylab = "Pacientes", main="Niveles de saturación de O2", col = colors)
class(as.matrix.data.frame(pac3_new2["L-O2"]))
class(as.data.frame.factor(pac3_new2["L-O2"]))
#GRAFICA 1
colors <- c("blue", "red")
ggplot(pac3_new2, aes( x = pac3_new2$`L-O2` )) +
geom_bar(fill=colors) +
labs(title="Niveles de saturación de O2", x = "Niveles de saturación de O2", y = "Pacientes") +
theme_dark()
#GRAFICA 2
colors <- c("blue", "yellow", "green")
ggplot(pac3_new2, aes( x = pac3_new2[,"COMFORT"] )) +
geom_bar(fill=colors) +
labs(title="Estado de Pacientes Pendientes de Salida", x = "Estados", y = "Pacientes") +
theme_dark()
#GRAFICA 3
ggplot(pac3_new2, aes( x = pac3_new2[,"ADM-DECS"], y = pac3_new2[,"COMFORT"])) +
geom_bar(stat = "identidad")
pac3_new2
ggplot(pac3_new2, aes( x=as.matrix.data.frame(pac3_new2["L-O2"]))) +
geom_bar(fill=colors) +
labs(title="Niveles de saturación de O2", x = "Niveles de saturación de O2", y = "Pacientes") +
theme_dark()
#SECUENCIA PARA AÑADIRLE NOMBRES A LAS FILAS
seq(1, 90)
# -----------------------------------------------------
pac3
results <- filtercolumn(pac, 3, 1, "low", "mid", "high", "Bajo", "Medio", "Alto")
results
results2 <- filtercolumn(pac, 2, 3, "excellent", "good", "", "Excelente", "Bueno","")
results2
pac[,1]
conversionColumn1 <- case_when(
pac[,1] == "low" ~ "Bajo",
pac[,1] == "mid" ~ "Medio",
pac[,1] == "high" ~ "Alto".
)
conversionColumn2 <- case_when(
pac[,1] == "low" ~ "Bajo",
pac[,1] == "mid" ~ "Medio",
pac[,1] == "high" ~ "Alto".
)
conversionColumn3 <- case_when(
pac[,1] == "low" ~ "Bajo",
pac[,1] == "mid" ~ "Medio",
pac[,1] == "high" ~ "Alto".
)
#generar vector
#pasarlo a la columna
class(pac[,1])
holaa <- as.factor(pac[,1])
sadasd <- ad.data.frame(matrizzzzzzz)
|
1140be37eaf7f7e6ac224bb27942a50adb2c6923
|
ff6cd64471c3dd38fb4b8ed3d5b5f816e9450063
|
/rangesurvey/man/reorder_table_columns.Rd
|
2274a629ac7ddf0d837108c181a2e9cd56382a39
|
[
"MIT"
] |
permissive
|
JamieCranston/RangeShift_survey
|
c9dcc97e9a433079138633f23f97b59c39b777e0
|
71f9318f650bc9e02389c227c77b16cfda2305cb
|
refs/heads/main
| 2023-04-14T02:46:13.075353
| 2022-03-20T12:08:55
| 2022-03-20T12:08:55
| 471,427,098
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 373
|
rd
|
reorder_table_columns.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reorder_table_columns.R
\name{reorder_table_columns}
\alias{reorder_table_columns}
\title{reorder_table_columns}
\usage{
reorder_table_columns(data)
}
\arguments{
\item{data}{respondent_table_clean}
}
\value{
respondent_table_clean with reordered columns
}
\description{
reorder_table_columns
}
|
fe83248aace30a26969d5409c12cc7dd58fc91d5
|
56e22dd051b4ecc6bf96a7fa93d6dcecc74eebcb
|
/R/utils.R
|
b55e31b7a23d43ea7312c1f2c1c0da601718802f
|
[
"MIT"
] |
permissive
|
yjunechoe/hrbragg
|
44337b14d1a23b3d3757ac5dae3c2899913ff01b
|
8f8b1f098d02329632fff26a89083237fa8cc75f
|
refs/heads/master
| 2023-06-14T13:32:21.889462
| 2021-07-10T12:36:19
| 2021-07-10T12:36:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 329
|
r
|
utils.R
|
is_windows <- function() .Platform$OS.type == "windows"
is_mac <- function() Sys.info()[["sysname"]] == "Darwin"
is_linux <- function() Sys.info()[["sysname"]] == "Linux"
platform <- function() {
if (is_windows()) return("win")
if (is_mac()) return("mac")
if (is_linux()) return("linux")
stop("unknown platform")
}
|
2c433dafb0e31e5da072353f7b4158cc8aeb105d
|
549f43e85a073d6c7b992897c526039da6bcb1b1
|
/code.R
|
7bdd191a24575a19b1caaec79f28ef47ea88b4f2
|
[] |
no_license
|
snekkje/RepData_PeerAssessment2
|
3ddd3baa608fed8932635712d73431a0a8b4a2ee
|
bbb8d1d9becc6887634ce1ee8374b83b42074d03
|
refs/heads/master
| 2021-01-10T03:33:28.404461
| 2016-03-02T09:27:05
| 2016-03-02T09:27:05
| 52,649,465
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,457
|
r
|
code.R
|
library(dplyr)
library(ggplot2)
#temp <- tempfile()
#download.file("https://d396qusza40orc.cloudfront.net/repdata%2Fdata%2FStormData.csv.bz2",
#temp, mode = "wb")
#StormData <- read.csv("here.csv.bz2")
#unlink(temp)
StormDataHarm <- select(StormData, EVTYPE, FATALITIES, INJURIES)
StormDataHarm <- mutate(StormDataHarm, HARMED = FATALITIES + INJURIES)
aggHarm <- aggregate(StormDataHarm$HARMED, list(Event = StormDataHarm$EVTYPE),
sum, na.rm = TRUE)
aggHarm <- arrange(aggHarm, desc(x))
ggplot(data = aggHarm[1:5,], aes(Event, x)) + geom_bar(stat="identity")
StormDataExpense <- select(StormData, EVTYPE, PROPDMG, PROPDMGEXP, CROPDMG, CROPDMGEXP)
StormDataExpense <- mutate(StormDataExpense, EXPENSE =
{ifelse(PROPDMGEXP == "K" | PROPDMGEXP == "k", PROPDMG * 1000,
ifelse(PROPDMGEXP == "M" | PROPDMGEXP == "m", PROPDMG * 1000000,
ifelse(PROPDMGEXP == "B" | PROPDMGEXP == "b", PROPDMG * 1000000000,
NA)))})
aggExpense <- aggregate(StormDataExpense$EXPENSE, list(Event = StormDataExpense$EVTYPE),
sum, na.rm = TRUE)
aggExpense <- arrange(aggExpense, desc(x))
ggplot(data = aggExpense[1:5,], aes(Event, x)) + geom_bar(stat="identity") +
scale_x_discrete(labels = abbreviate)
head(aggExpense,30)
str(aggExpense)
|
e68547bb4e6df770db94f16b07abef97f21d4f0c
|
1cce4f16e7319b6bc7371158af74662a61c7cb88
|
/ui.R
|
156ba7c5f94f1737720b35920d7f672363369eb1
|
[] |
no_license
|
cralphb/Gears
|
d755655c60c70867ae5927394b4034c0f58d85e2
|
75e4ad3f043dc802136d9aabd9841b1f112b7a62
|
refs/heads/master
| 2020-04-18T08:50:37.958741
| 2016-09-11T12:16:10
| 2016-09-11T12:16:10
| 67,912,202
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,016
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application
# that return miles per gallon based on parameters entered.
#
# If viewing this file in RStudio, you can run the application by
# clicking the 'Run App' button above.
#
# You can find out more about building applications with Shiny at
# the following URL:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define the User Interface (UI)
shinyUI(pageWithSidebar(
# Application title
headerPanel("Miles Per Gallon"),
# Sidebar with controls to select the parameter to plot against mpg
# and to specify whether outliers should be included
sidebarPanel(
selectInput("variable", "Variable:",
list("Cylinders" = "cyl",
"Transmission" = "am",
"Gears" = "gear")),
checkboxInput("outliers", "Show outliers", FALSE)
),
# Show the caption and plot the requested parameter.
mainPanel(
h3(textOutput("caption")),
plotOutput("mpgPlot")
)
))
|
8468ebf22aa544f8837cbefd79e2a890bbf26cd2
|
7270949bfee2dc82a7bd8dbc6de44c56d77fb514
|
/R/sem.factorcor.R
|
b627aa97918e171db923623a18cca1be6aca8a1a
|
[] |
no_license
|
nemochina2008/semoutput
|
06b654c3474e0884a9d0ff8b804ad9a7463ebed0
|
ac4d03acb80e8b0fd8f07c011f30abe10fb073b6
|
refs/heads/master
| 2020-03-27T19:23:00.649185
| 2018-08-31T20:58:26
| 2018-08-31T20:58:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,248
|
r
|
sem.factorcor.R
|
#' An SEM Output Function
#'
#' This function will display a table of Latent factor correlations
#' @param x results from a cfa() or sem() lavaan model
#' @param standardized logical whether to include standardized loadings (default = TRUE)
#' @param factors list c() of factors included in the model
#' @param print Create a knitr table for displaying as html table (default = TRUE)
#' @export
#' @examples
#' sem.factorcor(x)
sem.factorcor <- function(x, standardized = TRUE, factors = c(), print = TRUE){
table <- lavaan::parameterEstimates(x, standardized = standardized)
table <- dplyr::filter(table, op=="~~", lhs %in% factors, !is.na(pvalue), lhs!=rhs)
table <- dplyr::mutate(table, stars = ifelse(pvalue < .001, "***",
ifelse(pvalue < .01, "**",
ifelse(pvalue < .05, "*", ""))))
table <- dplyr::select(table, 'Factor 1'=lhs, 'Factor 2'=rhs, r=est, sig=stars)
if (print==TRUE){
table <- knitr::kable(table, digits=3, format="html", caption="Latent Factor Correlations")
table <- kableExtra::kable_styling(table, full_width = FALSE, position = "left")
} else if (print==FALSE){
table <- as.data.frame(table)
}
return(table)
}
|
0d65d26cf0a3be4892063405953aa2432af284ea
|
f435725fac5d146472905c0a05a5d76cf1e88077
|
/corr.R
|
223cdd88c0371eaf1af15194e02e0974c9d6f33c
|
[] |
no_license
|
sunilharris/reflections
|
0717f312458a6a41fbb33d49e41f3ac56547d3e8
|
74b0217fdf8387bb1917e27964dfe41e413ea519
|
refs/heads/master
| 2021-01-10T03:10:44.370576
| 2015-10-10T13:42:10
| 2015-10-10T13:42:10
| 44,011,906
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,552
|
r
|
corr.R
|
corr <- function(directory, threshold = 0)
{
# checking if the current directory is the same as the current
# if so, we dont have to set the working directory
# setting same directory results in error
# assuming that if the current workspace ends with the directory entered
# it is the same
currDir <-
substr(getwd(), nchar(getwd()) - nchar(directory) + 1, nchar(getwd()))
if (!(currDir == directory))
{
#changing the directory to where the file is
setwd(directory)
}
#initializing objects
filetoload <- character()
outputValue <- numeric()
# looping through all the files
for (filetoload in list.files())
{
# loading the file
currFile <- read.csv(filetoload)
# considering only complete cases (ommiting NAs)
currFile <- currFile[complete.cases(currFile),]
if(nrow(currFile) > threshold)
{
#print(paste (filetoload, "selected", nrow(currFile)))
# appending values
outputValue <- c(outputValue, cor(currFile$sulfate, currFile$nitrate))
}
else
{
#print(paste (filetoload, "ignored", nrow(currFile)))
}
#break
}
# returning the final output
return (outputValue)
}
|
158cf5f825d0dc02edb1e74323cb941bcd962889
|
fb8661959f04f1255789b29a37de80c1bc1aad6a
|
/iSumo.R
|
4796cd8490542701dcea87008844b8f2c0edabf3
|
[] |
no_license
|
xtYao/iSumo
|
b45cd1bf326eb02f1a46a1b387264146f77ac340
|
36f97ade4bf171aa5baddb3ee515bcf108663abe
|
refs/heads/master
| 2020-09-17T10:36:25.746339
| 2017-01-04T04:36:15
| 2017-01-04T04:36:15
| 66,251,681
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 33,479
|
r
|
iSumo.R
|
## TODO: in the final release, remove these two lines
setwd("~/gitLcl/iSumo/")
load(".RData")
## loading library
print("Loading required R packages...")
tryCatch({
library(data.table)
library(reshape2)
library(RMySQL)
library(ROCR)
library(ggplot2)
library(UniProt.ws) ## bioc
library(gProfileR)
library(h2o)
library(knncat)
}, error=function(e) {
print("At least one required package failed to load.")
print("Make sure you have installed all the depencies.")
stop(conditionMessage(e))
}
)
########## MAIN ##########
## part 1: gather all data
## task 1: establish reference proteome
## decide which organism
taxId = readline(prompt = "Type the taxonomical ID of the organism: ")
orgMap = setNames(c("hsapiens","scerevisiae"), c("9606","559292"))
proteomeFn =
paste("./data/", paste(taxId, "proteome", "rds", sep="."), sep = "")
if (!file.exists(proteomeFn)){
## download ref proteome from Uniprot
uniprotTab = paste("data/", taxId, ".tab", sep="")
proteome = fread(uniprotTab)
if (file.exists(uniprotTab)){
up = UniProt.ws(taxId = as.numeric(taxId))
## munging: unify column names
if (taxId == "9606") {
names(proteome) = gsub(pattern = "Entry",
replacement = "uniprotKb", x = names(proteome))
names(proteome) = gsub(pattern = "Cross-reference",
replacement = "", x = names(proteome))
# names(proteome) = gsub(pattern = " (Ensembl)",
# replacement = "ensembl",
# x = names(proteome), fixed = T)
names(proteome) = gsub(pattern = " (GeneID)",
replacement = "geneId",
x = names(proteome), fixed = T)
names(proteome) = gsub(pattern = "Protein names",
replacement = "protein", x = names(proteome))
names(proteome) = gsub(pattern = "Gene names (primary )",
replacement = "symbol",
x = names(proteome), fixed = T)
names(proteome) = gsub(pattern = "Gene names",
replacement = "gene", x = names(proteome))
names(proteome) = gsub(pattern = " (STRING)",
replacement = "stringId",
x = names(proteome), fixed = T)
## convert numeric value
proteome[, Mass := as.numeric(gsub(",","",Mass))]
proteome[, Length := as.numeric(Length)]
proteome[, Annotation :=
sapply(Annotation,
function(x) as.numeric(strsplit(x, " out")[[1]][1]))]
proteome[, stringId := gsub(";", "", x = stringId)]
## filter1: non-empty HGNC symbol, non-empty geneId
proteome = proteome[symbol != "" & geneId != ""]
## filter2: deduplicate by hgnc, keep the ones with geneId
symbolDup = proteome[which(duplicated(symbol)), unique(symbol)]
## dedup step 1: gid present
proteomeSymbolDedup =
proteome[symbol %in% symbolDup &
geneId != ""]
## dedup step 2:
dedup = proteomeSymbolDedup[numeric(0),]
for (syb in proteomeSymbolDedup[, unique(symbol)]) {
thisProt = proteomeSymbolDedup[symbol == syb]
if (nrow(thisProt)==1) {
dedup = rbindlist(list(dedup, thisProt))
} else {
thisProt = thisProt[which(Annotation==max(Annotation))]
if (nrow(thisProt)==1) {
dedup = rbindlist(list(dedup, thisProt))
} else {
if ("" %in% thisProt[, unique(stringId)]) {
thisProt = thisProt[stringId != ""]
dedup = rbindlist(list(dedup, thisProt[which.max(Mass)]))
} else {
dedup = rbindlist(list(dedup, thisProt[which.max(Mass)]))
}
}
}
}
proteome = rbindlist(list(proteome[!(symbol %in% symbolDup)], dedup))
## filter 3: dedup by geneId
geneIdDup = proteome[duplicated(geneId), unique(geneId)]
proteomeGeneIdDedup = proteome[geneId %in% geneIdDup]
dedup = proteome[numeric(0),]
for (gid in geneIdDup) {
thisProt = proteomeGeneIdDedup[geneId == gid]
dedup = rbindlist(list(dedup, thisProt[which.max(Annotation)]))
}
proteome = rbindlist(list(proteome[!(geneId %in% geneIdDup)], dedup))
saveRDS(proteome, "data/9606.tmpProteome.rds")
## filter 4: Annotation score >= 3
proteome = proteome[Annotation>=3]
## save the data
setkey(proteome, "uniprotKb")
write.table(proteome,
gsub(".tab", ".proteome.txt", uniprotTab),
quote=F, sep = "\t", row.names = F)
} else if (taxId == "559292") {
print("analyzing S cerevisiae (S288c) data.")
##
names(proteome) = gsub(pattern = "Entry",
replacement = "uniprotKb", x = names(proteome))
names(proteome) = gsub(pattern = "Protein names",
replacement = "protein", x = names(proteome))
names(proteome) = gsub(pattern = "Cross-reference",
replacement = "", x = names(proteome))
names(proteome) = gsub(pattern = " (GeneID)",
replacement = "geneId",
x = names(proteome), fixed = T)
names(proteome) = gsub(pattern = " (SGD)",
replacement = "sgd",
x = names(proteome), fixed = T)
## use primary gene name, if empty use first item in gene names
proteome[, gene := `Gene names (primary )`]
proteome[, gene := ifelse(nchar(gene)!=0 & !grepl(";", gene), gene,
sapply(strsplit(proteome$`Gene names`[1:10], " "), head, 1))]
## remove semi colons from sgd and geneId, and stringId
proteome[, geneId := gsub(";", "", x = geneId)]
proteome[, sgd := gsub(";", "", x = sgd)]
## convert Mass and Annotation to numeric
proteome[, Mass := as.numeric(gsub(",","",Mass))]
proteome[, Annotation :=
as.numeric( sapply(strsplit(Annotation, split = " "), head, 1) )]
## get rid of the duplicated sgd
proteome = proteome[!duplicated(sgd), .(uniprotKb, sgd, gene,
protein, geneNames=`Gene names`,
Length, Mass,
Status, Annotation)]
setkey(proteome, "uniprotKb")
}
} else {
stop("Download Uniprot ref proteome in tab-delimited file first!")
}
saveRDS(proteome, proteomFn)
} else {
proteome = readRDS(proteomeFn)
}
## task 2: assemble training labels of SUMO substrates
sumo = fread(paste("data/", taxId, ".sumo.txt", sep = ""))
setkey(sumo, uniprotKb)
sumo[, hits := rowSums(sumo[, -("uniprotKb"), with=F])]
sumo[, isSumo := hits>0]
## task 3: use gProfileR to find significant terms
enrich = gprofiler(query = sumo[isSumo==T, uniprotKb],
organism = orgMap[taxId], ordered_query = F,
exclude_iea = T, custom_bg = proteome$uniprotKb,
significant = F)
enrich = data.table(enrich)
if(taxId == "9606"){
sigGo = enrich[domain %in% c("MF", "CC", "BP") &
significant==T & term.size<2000 &
!grepl("sumo", term.name),
.(term.id, term.name, term.size, p.value)]
} else if (taxId == "599292") {
sigGo = enrich[domain %in% c("MF", "CC", "BP") &
term.size>5 & term.size<1000 &
significant==T & !grepl("sumo", term.name),
.(term.id, term.name, term.size, p.value)]
}
setkey(sigGo, term.name)
## Cleaning: don't include any annotation directly show SUMO status
sigGo = sigGo[!grepl("sumo", term.name, ignore.case = T)]
## Table 1: significantly enrich GO terms in SUMO set
write.table(sigGo[order(p.value)], paste(taxId,".sigGo.txt",sep = ""),
sep = "\t", row.names = F, quote = F)
## task 4: retrieve GO-gene association for all genes and all selected terms
## set up connection
getUniprotKbByTermId = function(term.id, taxId="9606"){
## Note: this function is vectorized in respect to term.id
## TODO: check if taxId is of length 1
## set up GO MySQL connection
mysql = dbDriver("MySQL")
# goConn2 = dbConnect(mysql, user='go_select', password='',
# host='spitz.lbl.gov', dbname='go_latest')
goConn2 = dbConnect(mysql, user='go_select', password='amigo',
host='mysql-amigo.ebi.ac.uk', port=4085,
dbname='go_latest')
## set up query
if (taxId=="9606"){
stmtUniprotKbByTermId = sprintf(
"SELECT DISTINCT dbxref.xref_key AS uniprotKb
FROM term
INNER JOIN graph_path ON (term.id = graph_path.term1_id)
INNER JOIN association ON (graph_path.term2_id = association.term_id)
INNER JOIN gene_product ON (association.gene_product_id = gene_product.id)
INNER JOIN species ON (gene_product.species_id = species.id)
INNER JOIN dbxref ON (gene_product.dbxref_id = dbxref.id)
WHERE
acc = '%s'
AND
ncbi_taxa_id = '%s'
AND
dbxref.xref_dbname = 'UniprotKB'",
term.id, taxId)
} else if (taxId=="559292"){
stmtUniprotKbByTermId = sprintf(
"SELECT DISTINCT dbxref.xref_key AS sgd
FROM term
INNER JOIN graph_path ON (term.id = graph_path.term1_id)
INNER JOIN association ON (graph_path.term2_id = association.term_id)
INNER JOIN gene_product ON (association.gene_product_id = gene_product.id)
INNER JOIN species ON (gene_product.species_id = species.id)
INNER JOIN dbxref ON (gene_product.dbxref_id = dbxref.id)
WHERE
acc = '%s'
AND
ncbi_taxa_id = '%s'
AND
dbxref.xref_dbname = 'SGD'",
term.id, taxId)
} else {
stop("Not yet implemented for this organism!!!")
}
## execute the query and return a list
## of the same length of term.id, each element is a char vec of UniprotKb
res = lapply(stmtUniprotKbByTermId,
function(x){
if (taxId=="9606"){
dbGetQuery(goConn2, x)$uniprotKb
} else if (taxId == "559292") {
dbGetQuery(goConn2, x)$sgd
}
})
res = setNames(res, term.id)
dbDisconnect(goConn2)
return(res)
}
## expand proteome with GO assocaition
goMat = as.data.table(lapply(sigGo$term.id, function(x){
ids = unlist(getUniprotKbByTermId(x, taxId = taxId), use.names = F)
if (taxId == "9606"){
proteome$uniprotKb %in% ids
} else if (taxId == "559292"){
proteome$sgd %in% ids
}
}))
colnames(goMat) = sigGo[, term.name]
write.table(goMat, paste("data/", taxId, ".goMat.txt", sep = ""),
quote = F, row.names = F, sep = "\t")
## task 5: retrieve STRING database
## download protein links file from website
if (taxId=="9606"){
protInteract = fread("data/9606.protein.actions.v10.txt")
protInteract = protInteract[mode=="binding",
.(p1=item_id_a, p2=item_id_b)]
## mapping from stringId to UniprotKb
stringId2uniprotKb = proteome[stringId != "", .(stringId, uniprotKb)]
setkey(stringId2uniprotKb, "stringId")
protInteract$u1 = ifelse(protInteract$p1 %in% stringId2uniprotKb$stringId,
stringId2uniprotKb[protInteract$p1, uniprotKb], NA)
protInteract$u2 = ifelse(protInteract$p2 %in% stringId2uniprotKb$stringId,
stringId2uniprotKb[protInteract$p2, uniprotKb], NA)
protInteract = protInteract[!is.na(u1) & !is.na(u2), .(u1, u2)]
write.table(protInteract, paste("data/", taxId, ".stringInt.txt", sep=""),
quote = F, sep = "\t", row.names = F)
} else if (taxId=="559292") {
## NOTE: STRING db doesn't have data for 559292 (S288c), but only 4932 (S.
## cerevisiae). We will use that instead.
protInteract = fread("data/4932.protein.actions.v10.txt")
protInteract = protInteract[mode=="binding",
.(p1=item_id_a, p2=item_id_b)]
if (any(grepl("\\.", protInteract$p1))){
protInteract$p1 = sapply(strsplit(protInteract$p1, split = "\\."),
function(x) x[2])
protInteract$p2 = sapply(strsplit(protInteract$p2, split = "\\."),
function(x) x[2])
}
## mapping of ORF name and sgd
sysName = read.table("data/sysNameSgdMapping.csv",
sep = " ", quote = '"', header = T)
sysName = as.data.table(sysName)[reason=="MATCH"]
setkey(sysName, "secondaryIdentifier")
protInteract$s1 = sysName[protInteract$p1, as.character(primaryIdentifier)]
protInteract$s2 = sysName[protInteract$p2, as.character(primaryIdentifier)]
sgd2uniprotKb = do.call("rbind",
lapply(unique(c(protInteract$s1, protInteract$s2)),
function(x) proteome[grepl(x, sgd), .(sgd, uniprotKb)]))
sgd2uniprotKb = sgd2uniprotKb[!duplicated(sgd)]
setkey(sgd2uniprotKb, "sgd")
protInteract$u1 = sgd2uniprotKb[protInteract$s1, uniprotKb]
protInteract$u2 = sgd2uniprotKb[protInteract$s2, uniprotKb]
protInteract = protInteract[!is.na(u1) & !is.na(u2), .(u1, u2)]
write.table(protInteract, paste("data/", taxId, ".stringInt.txt", sep=""),
quote = F, sep = "\t", row.names = F)
}
## calculate degrees
ppiDegree = sapply(proteome$uniprotKb,
function(x){
nrow(protInteract[u1==x | u2==x,])
})
## task 6: retrieve CORUM database, only if for human data
## summarize if a protein is within a complex
## AND how large is that complex
if (taxId == "9606"){
corum = fread("data/coreCORUM.txt", sep = ";")
corum = corum[organism == "Human", c(2, 5), with=F]
corumSubunits = setNames(
sapply(corum$`subunits (UniProt IDs)`, strsplit, ","),
corum$`Complex name`)
humanComplex = t(sapply(proteome$uniprotKb,
function(x){
inCorum = which(sapply(corumSubunits,
function(y) x %in% y))
nCorum = length(inCorum) # n comp has the prot
avgCorumSz = 0
if (nCorum != 0){
avgCorumSz = # avg n subunit per comp
mean(sapply(corumSubunits[inCorum], length))
}
return(c(nCorum, avgCorumSz))
}))
humanComplex = as.data.table(humanComplex)
colnames(humanComplex) = c("nCorum", "avgCorumSz")
} else if (taxId == "559292"){
## use the same dataset in A Baryshnikova 2016 yeast genetic interaction ppr
complexes = fread("data/559292.complexes.txt", sep = "\t")
## a list of complex compositions
cpxSubunits = setNames(
sapply(complexes$`ORFs annotated to complex`, strsplit, "; "),
complexes$`Protein Complex Name`)
## change key for sysName
sysName$primaryIdentifier = as.character(sysName$primaryIdentifier)
sysName$secondaryIdentifier = as.character(sysName$secondaryIdentifier)
setkey(sysName, "primaryIdentifier")
## convert into complex memberships
yeastComplex =
t(sapply(proteome$uniprotKb,
function(x){
xOrf = sysName[proteome[x, sgd], secondaryIdentifier]
inComp = which(sapply(cpxSubunits, function(y) xOrf %in% y))
nComp = length(inComp) # n comp has the prot
avgCompSz = 0
if (nComp != 0){
avgCompSz = # avg n subunit per comp
mean(sapply(cpxSubunits[inComp], length))
}
return(c(nComp, avgCompSz))
}))
yeastComplex = as.data.table(yeastComplex)
colnames(yeastComplex) = c("nComp", "avgCompSz")
}
## Table 2/Figure 2. analyzing RNA-binding, SUMO in protein complexes
## get proteins that are RNA-binding
humanRnaBinding = unlist(getUniprotKbByTermId(term.id = "GO:0003723"),
use.names = F)
yeastRnaBinding = unlist(getUniprotKbByTermId(term.id = "GO:0003723",
taxId = "559292"),
use.name = F)
yeastRnaBinding = sysName[primaryIdentifier %in% yeastRnaBinding,
secondaryIdentifier]
humanSumo = Reduce(union, readRDS("data/9606.sumoUniprotKb.rds"))
yeastSumo = readRDS("data/559292.sumoSgd.rds")
## assemble human and yeast complex centric data separately
hDt = do.call("rbind", lapply(names(corumSubunits), function(x){
prots = corumSubunits[[x]]
size = length(prots)
nRna = sum(prots %in% humanRnaBinding)
nSumo = sum(prots %in% humanSumo)
return(c(x, size, "human", nRna, nSumo))
}))
yDt = do.call("rbind", lapply(names(cpxSubunits), function(x){
prots = cpxSubunits[[x]]
size = length(prots)
nRna = sum(prots %in% yeastRnaBinding)
nSumo = sum(prots %in% yeastSumoSys)
return(c(x, size, "yeast", nRna, nSumo))
}))
## put them together into one dt, rename, convert classes
complexComp = data.table(rbind(hDt, yDt))
colnames(complexComp) = c("Complex name", "Complex size", "Organism",
"Number of RNA-binding subunits",
"Number of SUMOylated subunits")
class(complexComp$`Complex size`)="numeric"
class(complexComp$`Number of RNA-binding subunits`)="numeric"
class(complexComp$`Number of SUMOylated subunits`)="numeric"
complexComp$Organism = as.factor(complexComp$Organism)
complexComp[, ":="("RNA binding" = `Number of RNA-binding subunits`>0,
"SUMOylated" = `Number of SUMOylated subunits`>0)][,
"RNA.SUMO" := interaction(`RNA binding`, `SUMOylated`)]
## save it.
write.table(complexComp, "tableS3.complexes.txt",
sep = "\t", row.names = F, quote = F)
## analysis of complex size correlation with RNA-binding or SUMO
## TODO: annotate the graph with the following!!!
complexComp[Organism=="human", table(`RNA binding`, SUMOylated)]
complexComp[Organism=="yeast", table(`RNA binding`, SUMOylated)]
## pairwise wilcox rank test
pairwise.wilcox.test(x = complexComp[Organism=="human", `Complex size`],
g = complexComp[Organism=="human", `RNA.SUMO`],
p.adjust.method = "fdr")
pairwise.wilcox.test(x = complexComp[Organism=="yeast", `Complex size`],
g = complexComp[Organism=="yeast", `RNA.SUMO`],
p.adjust.method = "fdr")
complexComp = fread("tableS3.complexes.txt")
## first make boxplot to show complex size related to RNA/SUMO
## (violin and jitter too busy since majority of points are at the bottom)
fig2a = ggplot(data = complexComp,
mapping = aes(x = RNA.SUMO, y = `Complex size`))
pdf(width = 7.5, height = 5, file = "fig2a.compSzRnaSumo.pdf")
fig2a + geom_boxplot() +
# geom_violin(draw_quantiles = T) +
# geom_jitter(width = 0.2) +
facet_wrap(facets = ~Organism) +
# annotate("text", )
theme(plot.background = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank(),
plot.margin = margin(t = 20, r = 60, b = 20, l = 20, unit = "pt"),
strip.placement = "indside",
text = element_text(size = 16),
axis.line =
element_line(size=1, colour = "black",
arrow = arrow(angle = 30,
length = unit(x = 0.1, units = "inch"),
type = "open", ends = "last")),
# axis.title.x = element_text(size = 16),
# axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 16, angle = 335, hjust = 0),
# axis.text.y = element_text(size = 16),
axis.ticks = element_blank())
dev.off()
## second make scatter plot of nSUMO-nRNA, with size correspond to complex size
fig2b = ggplot(data = complexComp,
mapping = aes(x = `Number of RNA-binding subunits`,
y = `Number of SUMOylated subunits`,
color = Organism))
pdf(width = 7.5, height = 5, file = "fig2b.sumoRna.pdf")
fig2b + geom_point(aes(size = complexComp$`Complex size`), alpha=0.5) +
scale_size(breaks = c(5, 10, 20, 60, 100), range = c(1,8),
guide = guide_legend(title = "Complex size")) +
## labeling human big SUMOs
geom_text(mapping = aes(label = `Complex name`, size = 24),
data = complexComp[Organism=="human"][
order(`Number of SUMOylated subunits`,decreasing = T)][1:6],
check_overlap = T, nudge_y = 3, nudge_x = 8, show.legend = F) +
geom_text(mapping = aes(label = `Complex name`, size = 24),
data = complexComp[Organism=="human"][
order(`Number of SUMOylated subunits`,decreasing = T)][7],
check_overlap = T, nudge_y = 3, nudge_x = 2, show.legend = F) +
## labeling yeast big SUMOs
geom_text(mapping = aes(label = `Complex name`, size = 24),
data = complexComp[Organism=="yeast"][
order(`Number of SUMOylated subunits`,decreasing = T)][1],
check_overlap = T, nudge_y = 3, nudge_x = 16, show.legend = F) +
geom_text(mapping = aes(label = `Complex name`, size = 24),
data = complexComp[Organism=="yeast"][
order(`Number of SUMOylated subunits`,decreasing = T)][3],
check_overlap = T, nudge_y = 3, nudge_x = 2, show.legend = F) +
geom_text(mapping = aes(label = `Complex name`, size = 32),
data = complexComp[Organism=="yeast"][
order(`Number of SUMOylated subunits`,decreasing = T)][c(2,4)],
check_overlap = T, nudge_y = -3, nudge_x = 16, show.legend = F) +
## labeling human big RNA binding low SUMOs
geom_text(mapping = aes(label = `Complex name`, size = 32),
data = complexComp[Organism=="human" &
`Number of SUMOylated subunits`<10][
order(`Number of RNA-binding subunits`,decreasing = T)][1:3],
check_overlap = T, nudge_y = 4, nudge_x = 8, show.legend = F) +
xlim(-7,133) +
theme(plot.background = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank(),
plot.margin = margin(t = 20, r = 20, b = 20, l = 20, unit = "pt"),
text = element_text(size = 16),
axis.line =
element_line(size=1, colour = "black",
arrow = arrow(angle = 30,
length = unit(x = 0.1, units = "inch"),
type = "open", ends = "last")),
axis.ticks = element_blank(),
legend.background = element_blank(),
legend.key = element_blank(),
legend.key.size = unit(16, "pt"),
legend.position = c(0.2, 0.7),
legend.box = "vertical",
legend.text = element_text(size = 16))
dev.off()
## task 7: phosphosite
## include number of P/M/A/U modification sites
if (taxId == "9606"){
## read in phosphosite data
phos = fread("data/psp/Phosphorylation_site_dataset", skip = 3)
meth = fread("data/psp/Methylation_site_dataset", skip=3)
acet = fread("data/psp/Acetylation_site_dataset", skip=3)
ubiq = fread("data/psp/Ubiquitination_site_dataset", skip = 3)
## count how many each protein has
phosCount = phos[, table(ACC_ID)]
methCount = meth[, table(ACC_ID)]
acetCount = acet[, table(ACC_ID)]
ubiqCount = ubiq[, table(ACC_ID)]
} else if (taxId == "559292"){
## no phosphosite data, use dbPTM data instead
dbPTM = fread("data/dbPTM3.txt")
yeastPtm = dbPTM[V2 %in% proteome$uniprotKb]
phosCount = yeastPtm[V8=="Phosphorylation", table(V2)]
methCount = yeastPtm[V8=="Methylation", table(V2)]
acetCount = yeastPtm[V8=="Acetylation", table(V2)]
ubiqCount = yeastPtm[V8=="Ubiquitylation", table(V2)]
}
## construct feature vectors
ptmMat = proteome[, .(uniprotKb)]
ptmMat[, nPhos :=
ifelse(uniprotKb %in% names(phosCount), phosCount[uniprotKb], 0)]
ptmMat[, nMeth :=
ifelse(uniprotKb %in% names(methCount), methCount[uniprotKb], 0)]
ptmMat[, nAcet :=
ifelse(uniprotKb %in% names(acetCount), acetCount[uniprotKb], 0)]
ptmMat[, nUbiq :=
ifelse(uniprotKb %in% names(ubiqCount), ubiqCount[uniprotKb], 0)]
## task 8: pre-compute GPS-SUMO
seq = select(x = up, columns = c("UNIPROTKB", "SEQUENCE"),
keytype = "UNIPROTKB", keys = proteome[, uniprotKb])
seq = data.table(seq)
writeLines(seq[, paste(">", UNIPROTKB, "\n", SEQUENCE, "\n", sep="")],
con = paste("data/", taxId, ".seq.fa", sep=""), sep = "\n")
## just some ammendment to ref proteome
# extraUniprotKb = setdiff(proteome$uniprotKb, oldUniprotKb)
# seq2 = select(x = up, columns = c("UNIPROTKB", "SEQUENCE"),
# keytype = "UNIPROTKB", keys = extraUniprotKb)
# seq2 = data.table(seq2)
# writeLines(seq2[, paste(">", UNIPROTKB, "\n", SEQUENCE, "\n", sep="")],
# con = paste("data/", taxId, ".extra.seq.fa", sep=""), sep = "\n")
## Manual step: feed the sequences into GPS-SUMO 2.0 web server
## download the result as text and rename it ./data/[taxId].gps.txt
gps = fread(paste("data/",taxId,".gps.txt", sep=""))
gpsCount = data.table(dcast(gps, ID ~ Type, value.var = "Type"))[,1:4,with=F]
setkey(gpsCount, ID)
gpsSumo = do.call(rbind,
lapply(proteome$uniprotKb,
function(x){
if (x %in% gpsCount$ID){
return(gpsCount[x, -1, with=F])
} else {
res = setNames(c(0,0,0), colnames(gpsCount)[-1])
return(as.data.table(as.list(res)))
}
}))
## finally, put together the full dataset
## TODO: hardcoded for human, make it generic
## TODO: rerun
if (taxId=="9606"){
iSumoData = cbind(proteome[, .(uniprotKb, symbol, Length, Mass)],
goMat, ## GO annotation
ptmMat[,-1,with=F],
ppiDegree, humanComplex,
gpsSumo, sumo[, .(isSumo)])
} else if (taxId=="559292"){
iSumoData = cbind(proteome[, .(uniprotKb, sgd, Length, Mass)],
goMat[, , with=F],
ptmMat[, -1, with=F],
ppiDegree = ppiDegree[proteome$uniprotKb],
yeastComplex,
gpsSumo, sumo[, .(isSumo)])
}
write.table(iSumoData, paste("data/",taxId,".iSumo.txt",sep=""),
quote = F, row.names = F, sep = "\t")
## Part 2: fitting RF
source(paste(taxId, ".modelTuning.R", sep=""))
fn = dir(paste("models/", taxId, ".rf.h2o", sep=""), full.names = T)
rf = h2o.loadModel(fn)
fn.null = dir(paste("models/", taxId, ".rf.null.h2o", sep=""))
rf.null = h2o.loadModel(fn.null)
## part 3: visualize results
####################
## Figure 1. Stacked bar of number of SUMO proteins found in each paper,
## decomposed into confirmed by X studies
if (taxId == "9606"){
m1 = melt(data = sumo, id.vars = "hits", measure.vars = 1:18)
m2 = m1[hits>0 & value==TRUE, table(hits, variable)]
m3 = melt(m2, value.name = "nSumo")
m3$variable <- reorder(x = m3$variable, X = m3$nSumo, FUN = sum)
m3 = data.table(m3)
m3[, hits := ifelse(hits>=9, "9+", as.character(hits))]
} else if (taxId == "559292"){
m1 = melt(data = sumo, id.vars = "hits", measure.vars = 1:6)
m2 = m1[hits>0 & value==TRUE, table(hits, variable)]
m3 = melt(m2, value.name = "nSumo")
m3$variable <- reorder(x = m3$variable, X = m3$nSumo, FUN = sum)
m3$hits = as.factor(m3$hits)
}
## some species specific pars
if (taxId == "9606"){
pl.mar = margin(t = 0, r = 120, b = 12, l = 0, unit = "pt")
} else if (taxId == "559292"){
pl.mar = margin(t = 0, r = 100, b = 12, l = 0, unit = "pt")
}
pdf(file = paste(taxId, ".sumoStudies.pdf", sep = ""),
width = 10, height = 7.5)
fig1 = ggplot(data = m3) +
geom_bar(mapping = aes(x = variable, y = nSumo, fill = hits),
stat = "identity") +
ylab("Number of SUMOylated proteins") +
scale_fill_brewer(name="Number of studies\nin consensus", type = "seq",
palette = "BuGn", direction = -1)+
theme(axis.line =
element_line(size=1, colour = "black",
arrow = arrow(angle = 30,
length = unit(x = 0.1, units = "inch"),
type = "open", ends = "last")),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()) +
theme(axis.title.x = element_blank(),
axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 16, angle = 335, hjust = 0),
axis.text.y = element_text(size = 16),
axis.ticks = element_blank()) +
theme(legend.background = element_blank(),
legend.position = c(0.2, 0.75),
legend.text = element_text(size = 16),
legend.key.size = unit(0.2, "inch"),
legend.title = element_text(size = 16)) +
theme(plot.margin = pl.mar)
print(fig1)
dev.off()
###################
## Figure2.
## ROC curve, for either model, plot 10-CV in grey, validation in dotted
## red/blue, and test in solid red/blue. Add guideline y=x.
plotRoc = function(predLabelList, col=grey(0.25), roc=T, cv=F, dashed=F, lwd=1){
## make performance measure
pred = prediction(predLabelList[[1]], predLabelList[[2]])
if (roc) perf = performance(pred, 'tpr', 'fpr') else
perf = performance(Pred, 'prec', 'rec')
## set line type
lType = ifelse(dashed, 2, 1)
## plot
if (!cv){
if (dev.cur()==1) {
plot(perf, col=col, lty=lType, lwd = lwd)
} else {
plot(perf, col=col, lty=lType, lwd = lwd, add=T)
}
} else {
if (dev.cur()==1) plot(perf, col='grey', lwd=0.2) else
plot(perf, col='grey', lwd=0.2, add=T)
##plot(Perf, avg='vertical', col=grey(0.3), add=T)
}
}
## plotting
## rf: cv --> valid --> test
getModelPred = function(x, train, valid, test){
pred = list()
## get CV models predictions
cvFold = as.data.frame(
h2o.cross_validation_fold_assignment(x))$fold_assignment
cvPred = lapply(h2o.cross_validation_predictions(x),
function(y) as.data.frame(y))
for (i in 1:length(cvPred)){
thisName = paste("cv",i,sep="_")
thisPred = as.data.frame(cvPred[[i]])[cvFold==(i-1), 3]
thisLabel = as.data.frame(train)[cvFold==(i-1), "isSumo"]
pred[[thisName]] = list(thisPred, thisLabel)
}
## get valid predictions
validPred = as.data.frame(h2o.predict(x, valid))[, 3]
validLabel = as.data.frame(valid)[, "isSumo"]
pred$valid = list(validPred, validLabel)
## get test predictions
testPred = as.data.frame(h2o.predict(x, test))[, 3]
testLabel = as.data.frame(test)[, "isSumo"]
pred$test = list(testPred, testLabel)
return(pred)
}
## make ROC, and P-R curves
pred = getModelPred(rf, train, valid, test)
pred.null = getModelPred(rf.null, train, valid, test)
pdf(file=paste(taxId, ".ROC.pdf", sep=""),
width=8, height=8)
plot.new()
par(cex=1, cex.lab=1, cex.main=1, tcl=-0.1)
for (pr in names(pred)) {
thisPred = pred[[pr]]
thisPred.null = pred.null[[pr]]
if (grepl("cv",pr)){
## grey line: CV
plotRoc(thisPred, roc = T, cv = T)
plotRoc(thisPred.null, roc = T, cv = T)
} else if (pr=="valid"){
## dashed line: valid
plotRoc(thisPred, roc = T, cv = F, dashed = T, col = "red", lwd=2)
plotRoc(thisPred.null, roc = T, cv = F, dashed = T, col = "blue", lwd=2)
} else {
## solid line: test
plotRoc(thisPred, roc = T, cv = F, dashed = F, col = "red", lwd=3)
plotRoc(thisPred.null, roc = T, cv = F, dashed = F, col = "blue", lwd=3)
}
}
## some garnish
axis(side = 1, at = seq(0,5)/5, labels = seq(0,5)/5)
axis(side = 2, at = seq(0,5)/5, labels = seq(0,5)/5)
title(##main = "Receiver operating characteristics (ROC)
##of iSUMO model versus with only predicted sequence motif",
xlab = "False positive rate", ylab = "True positive rate")
abline(a = 0, b = 1, lty=3, lwd=0.5, col=grey(0.25))
legend(x = 0.8, y = 0.5,
fill = c("red","blue",rgb(0, 0, 0, 0),rgb(0, 0, 0, 0)),lty = c(0,0,2,1),
border = F, bty = "n", xjust = 0.5, y.intersp = 1.25,
legend = c("iSUMO","seq motif only","validation set","test set"))
dev.off()
#################################
## Figure 3.
## collect relative importance of features
varImp = as.data.table(rf@model$variable_importances)
## sort variable levels by importance
varImp$variable = reorder(varImp$variable, varImp$relative_importance)
pdf(file=paste(taxId, ".varImp.pdf", sep=""),
width=8, height=12)
plot.new()
fig3 = ggplot(data = varImp[1:30]) +
geom_bar(mapping = aes(x = variable, y = scaled_importance),
stat = "identity", width = 0.6) +
ylab(label = "Relative importance") +
xlab(label = "Variable name") +
theme(axis.line =
element_line(size=1, colour = "black",
arrow = arrow(angle = 30,
length = unit(x = 0.1, units = "inch"),
type = "open", ends = "last")),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()) +
theme(axis.title.x = element_blank(),
axis.title.y = element_text(size = 16),
axis.text.x = element_text(size = 16),
axis.text.y = element_text(size = 16),
axis.ticks = element_blank()) +
coord_flip() +
theme(legend.position = c(0.2, 0.8),
legend.text = element_text(size = 16),
legend.key.size = unit(0.5, "inch"),
legend.title = element_text(size = 16))
print(fig3)
dev.off()
#################################
## Table
finalPerformance = h2o.performance(rf, dt)
## find the threshold maximizing F2 value
threshold = as.data.table(
finalPerformance@metrics$max_criteria_and_metric_scores)[
metric=="max f2", threshold]
## dt is the h2o frame used for model tuning
finalPrediction = as.data.table(h2o.predict(rf, newdata = dt))[, -1, with=F]
finalPrediction[, ":="(uniprotKb = proteome$uniprotKb,
protein = proteome$protein,
gene = proteome$gene,
isSumo = iSumoData$isSumo)]
finalPrediction$predict = finalPrediction$TRUE.>threshold
setkey(finalPrediction, "uniprotKb")
saveRDS(finalPrediction, paste(taxId, ".finalPrediction.rds", sep=""))
write.table(finalPrediction, paste(taxId, ".finalPrediction.csv", sep=""),
sep = "\t", row.names = F, quote = F)
## post check: how do we do on GO annotated SUMO proteins.
goSumo = finalPrediction[getUniprotKbByTermId("GO:0016925", taxId)[[1]]][!is.na(predict)]
goSumoXferase = finalPrediction[getUniprotKbByTermId("GO:0019789", taxId)[[1]]][!is.na(predict)]
##### SAVE WORKSPCE
save.image(file = paste(taxId, "RData", sep = "."))
|
a46f3efedaabc829e248bf073f960feb467767a4
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/gear/R/ganiso_d.R
|
b61d09fc38b7dbeec310d6fb9fdf02a8b6365fed
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,499
|
r
|
ganiso_d.R
|
#' Anisotropic distance-related characteristics
#'
#' Computes necessary distance-related characteristics when
#' there is geometric anisotropy. This is essentially an
#' internal function to \code{\link{evaluate}} a
#' \code{cmodStd} object produced by \code{\link{cmod_std}}
#' when the anisotropy ratio differs from 1.
#'
#' @param coords2 An \eqn{M \times 2} matrix of spatial
#' coordinates. Is missing, then \code{coords2 = coords1}.
#' @inheritParams angle2d
#'
#' @return A \code{ganisoD} object with components \code{d}
#' and \code{angles}, which is the distance matrix between
#' the coordinates and the angles between the coordinates.
#' The angles are returned in radians.
#' @export
#' @examples
#' ganiso_d(cbind(0, 0), cbind(1, 1))
ganiso_d = function(coords1, coords2, radians = TRUE, invert = TRUE) {
if (missing(coords2)) {
coords2 = coords1
}
if (!is.matrix(coords1) | !is.matrix(coords2)) {
stop("coords1 and coords2 must be matrices")
}
if (ncol(coords1) != 2) {
stop("coords1 should have only 2 columns")
}
if (ncol(coords2) != 2) {
stop("coords2 should have only 2 columns")
}
out = vector("list")
out$d = geodist(coords1, coords2)
out$angles = t(apply(coords1, 1, function(x) {
angle2d(matrix(x, nrow = nrow(coords2), ncol = 2, byrow = TRUE), coords2, radians = TRUE, invert = invert)
}))
out$radians = radians
out$invert = invert
class(out) = "ganisoD"
return(out)
}
|
7f04acbccbc1b09649664e80fbdfb0df00342624
|
3fe7b25ac1e9f824a531fbf7c43bad84e9ac7d9b
|
/WESyS/epa-biogas-rin-HTL-TEA/studies/FY18/sensitivity/src/ee/scratch/ee.test.R
|
6ac4179801c781d10115587991c66a12429b6a3d
|
[] |
no_license
|
irinatsiryapkina/work
|
5f3b67d36ffc18cb1588f8a3e519a76cdfc52e81
|
1aaecb300d4d0082df36fd79748145b22d1d7acb
|
refs/heads/master
| 2021-01-16T10:25:55.539383
| 2020-02-25T18:58:31
| 2020-02-25T18:58:31
| 243,076,845
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,916
|
r
|
ee.test.R
|
rm(list=ls())
R_LIBS= ("/home/R/library")
options(scipen=999) #turn scientific notation off
options(stringsAsFactors = FALSE)
setwd ("~/GitHub/epa-biogas-rin/studies/FY18/sensitivity/src/ee/")
# Load libraries
library (dplyr)
library (data.table)
# Elementary Effects Function 1: ee
# **** DESCRIPTION HERE *****
ee.test <- function(x = NULL, y = NULL, N = NULL, r = NULL) {
y = y - min(y)
m <- (N + 1) * r
xdiff <- (x[2:m, ] - x[1:(m-1), ])[-(N+1)*1:(r-1), ]
delta <- t(xdiff)[t(xdiff) != 0]
ydiff <- (y[2:m] - y[1:(m-1)])[-(N+1)*1:(r-1)]
index <- rep(0:(r-1)*(N + 1), each=N) + apply(xdiff, 1, function(x) (1:N)[x != 0])
sort.index <- arrange(data.frame(x1 = ydiff, x2 = delta, y = index), index)
ratio <- matrix(sort.index$x1 / sort.index$x2, nr = N)
mu <- apply(ratio, 1, mean)
mu.star <- apply(ratio, 1, function(x) mean(abs(x)))
sigma <- apply(ratio, 1, sd)
sem <- sigma/(r^0.5)
std.e <- abs(mu)/(sigma/(r^0.5))
data.frame(set=1:N, mu = mu, mu.star = mu.star, sigma = sigma, sem =sem, std.errors = std.e)
}
list.files("results/")
load ("sa.design.ca.2000traj.RDA")
vars <- read.csv ("designs/ca.2000traj.ee.study.design.csv")
vars <- vars [,2:5]
load ("ee.ca.2000traj.wesys.results.RDA")
data.2040 <- out [grep ("WWTP", out$factor), ]
data.2040 <- data.2040 [!grep ("FT", data.2040$factor), ]
data.2040 <- data.2040 [!grep ("Fuel", data.2040$factor), ]
data.2040 <- data.2040 [!grep ("total", data.2040$factor), ] #just the total production for each individual pathway
data.2040 <- data.2040 [grep ("tot", data.2040$factor), ] #just the total production
data.2040$value <- as.integer(data.2040$value)
df <- unique(data.2040)
total.prod <- df[, sum(value), by=run_id]
total.prod <- data.table (run_id = total.prod$run_id, factor = "WWTP.total", value = total.prod$V1)
data.2040 <- rbind (df, total.prod)
test.ee <- ee.test (SA.design, total.prod$value, 674, 2000)
|
53113d9a002cc5282a0de6cdfa5faffd6efc0f8d
|
0696fb5fab8c614cf887fbbaaa61d4908f7d3ab7
|
/doc/nlraa.R
|
23dc88ed375a2661ffbba8b4998c69641e36edfa
|
[] |
no_license
|
femiguez/nlraa
|
6dbb4fef5b2f0d4a01a73b9d27524ab82b5db3db
|
aeb631e113be069ca5998de344b95bc240a5498d
|
refs/heads/master
| 2023-06-21T23:39:29.731687
| 2023-06-13T15:59:29
| 2023-06-13T15:59:29
| 202,008,439
| 19
| 4
| null | 2021-08-11T18:18:10
| 2019-08-12T21:01:53
|
R
|
UTF-8
|
R
| false
| false
| 2,819
|
r
|
nlraa.R
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE, fig.width = 7, fig.height = 6)
library(ggplot2)
library(nlraa)
## ----apropos------------------------------------------------------------------
apropos("^SS")
## ----sm-----------------------------------------------------------------------
## Sorghum and Maize dataset
data(sm)
ggplot(data = sm, aes(x = DOY, y = Yield, color = Crop)) +
geom_point() +
facet_wrap(~ Input)
## ----lfmc---------------------------------------------------------------------
## Live fuel moisture content
data(lfmc)
ggplot(data = lfmc, aes(x = time, y = lfmc, color = leaf.type)) +
geom_point() +
ylab("Live fuel moisture content (%)")
## ----swpg---------------------------------------------------------------------
## Soil water and plant growth
data(swpg)
ggplot(data = swpg, aes(x = ftsw, y = lfgr)) +
geom_point() +
xlab("Fraction Transpirable Soil Water") +
ylab("Relative Leaf Growth")
## ----barley-------------------------------------------------------------------
## Response of barley to nitrogen fertilizer
## There is a barley dataset also in package 'lattice'
data(barley, package = "nlraa")
ggplot(data = barley, aes(x = NF, y = yield, color = as.factor(year))) +
geom_point() +
xlab("Nitrogen fertilizer (g/m^2)") +
ylab("Grain (g/m^2)")
## ----maizeleafext-------------------------------------------------------------
## Response of barley to nitrogen fertilizer
## There is a barley dataset also in package 'lattice'
data(maizeleafext, package = "nlraa")
ggplot(data = maizeleafext, aes(x = temp, y = rate)) +
geom_point() + geom_line() +
xlab("Temperature (C)") +
ylab("Leaf Extension Rate (relative)")
## ---- eval = FALSE------------------------------------------------------------
# ## Error in nls(y ~ SSratio(x, a, b, c, d), data = dat) :
# ## step factor 0.000488281 reduced below 'minFactor' of 0.000976562
## ---- eval = FALSE------------------------------------------------------------
# ## Error in qr.default(.swts * gr) :
# ## NA/NaN/Inf in foreign function call (arg 1)
## ----barleyG------------------------------------------------------------------
library(nlme)
data(barley, package = "nlraa")
barley$yearf <- as.factor(barley$year)
barleyG <- groupedData(yield ~ NF | yearf, data = barley)
## ----barleyG-mixed------------------------------------------------------------
## Fit the nonlinear model for each year
fit.nlis <- nlsList(yield ~ SSasymp(NF, Asym, R0, lrc), data = barleyG)
## Use this to fit a nonlinear mixed model
fit.nlme <- nlme(fit.nlis)
## Investigate residuals
plot(fit.nlme)
## Look at predictions
plot(augPred(fit.nlme, level = 0:1))
## Compute confidence intervals
intervals(fit.nlme)
## A simpler model is possible...
|
9576433f4a5c8cde29371285b50332dfce9892c9
|
e807cb9f4c9d9f44726f8b70ad048a7c3fbfd566
|
/ui.r
|
2673f345860c343d89207176a338b52487dea6ca
|
[] |
no_license
|
richardsun-voyager/ExponentialDistributionApp
|
7c2f44acde7faa14eb488f66224d751bf901f3d0
|
ceb459d0032da3f26ed0a32de462c79cc397c1bd
|
refs/heads/master
| 2021-05-29T15:17:56.641559
| 2015-05-21T05:06:07
| 2015-05-21T05:06:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,108
|
r
|
ui.r
|
shinyUI(navbarPage("Exponential Distribution Simulation Navbar!",
tabPanel("Plot",
sidebarLayout(
sidebarPanel(
h2('Exponential Distribution Simulation Settings'),
sliderInput('lambda', 'Numeric input, labeled lambda', 0.05, min = 0.05, max = 1, step = 0.05),
numericInput('popSize', "Population Size",10,min=10,max=100,step=10),
numericInput('repTimes', "Repetition Times",100,min=100,max=2000,step=100),
checkboxInput("meanLine", "Show the Mean Line", FALSE)
),
mainPanel(
h2('Distribution of Average'),
plotOutput('figure')
)
)
),
tabPanel("Supporting Document",
h3("This application aims to do simulations on exponential distribution and Central Limit Theorem."),
p("1.Users can set the values of lambda,population size(step by 10) and repetition times of exponential simulation(step by 100). The generalized averages of each population are calculated and demonstrated in a histogram."),
p("2.Users can choose to add the mean line or not in order to make comparisons between theoretical mean and simulation result.") )
)
)
|
e1e315fe11eddc83b5e3a12ee88f111ee205dd7a
|
cf2cb09f424281adde2c9411a6a2cea8011b01e6
|
/R/addCrispraiScores.R
|
d146e5bb202ebf957fb38f761a5b493350ea7aea
|
[
"MIT"
] |
permissive
|
crisprVerse/crisprDesign
|
b58eb93beaf1699b1eb585875939d0249d0dce4c
|
98724dbafe87863723bf829f9300cafa5852c130
|
refs/heads/master
| 2023-07-22T06:35:25.615889
| 2023-07-19T18:09:09
| 2023-07-19T18:09:09
| 523,800,050
| 11
| 3
|
MIT
| 2023-07-19T18:09:11
| 2022-08-11T16:48:52
|
R
|
UTF-8
|
R
| false
| false
| 7,100
|
r
|
addCrispraiScores.R
|
#' @title Add CRISPRa/CRISPRi on-target scores to
#' a \linkS4class{GuideSet} object.
#' @description Add CRISPRa/CRISPRi on-target scores to a
#' \linkS4class{GuideSet} object. Only available for SpCas9, and for
#' hg38 genome. Requires \pkg{crisprScore} package to be installed.
#'
#' @param object A \linkS4class{GuideSet} object or a
#' \linkS4class{PairedGuideSet} object.
#' @param gr A \linkS4class{GRanges} object derived from \code{queryTss} used
#' to produce the \code{guideSet} object.
#' @param tssObject A \linkS4class{GRanges} object containing TSS coordinates
#' and annotation. The following columns must be present:
#' "ID", promoter", "tx_id" and "gene_symbol".
#' @param geneCol String specifying which column of the \code{tssObject} should
#' be used for a unique gene identified. "gene_id" by default.
#' @param modality String specifying which modality is used.
#' Must be either "CRISPRi" or "CRISPRa".
#' @param chromatinFiles Named character vector of length 3 specifying
#' BigWig files containing chromatin accessibility data. See
#' crisprScore vignette for more information.
#' @param fastaFile String specifying fasta file of the hg38 genome.
#' @param ... Additional arguments, currently ignored.
#'
#' @return \code{guideSet} with an added column for the CRISPRai score.
#'
#' @author Jean-Philippe Fortin
#'
#' @seealso \code{\link{addOnTargetScores}} to add other on-target scores.
#'
#' @export
#' @importFrom crisprScore getCrispraiScores
#' @rdname addCrispraiScores
setMethod("addCrispraiScores", "GuideSet",
function(object,
gr,
tssObject,
geneCol="gene_id",
modality=c("CRISPRi", "CRISPRa"),
chromatinFiles=NULL,
fastaFile=NULL
){
crisprNuclease <- crisprNuclease(object)
data(SpCas9,
package="crisprBase",
envir=environment())
if (!.identicalNucleases(crisprNuclease, SpCas9)){
stop("[addCrispraiScores] Only SpCas9 is supported at the moment.")
}
if (genome(object)[1]!="hg38"){
stop("addCrispraiScores] Only hg38 genome supported at the moment.")
}
modality <- match.arg(modality)
tssFrame <- .prepareTssFrame(tssObject,
geneCol=geneCol)
grnaFrame <- .prepareGrnaFrame(object, gr)
scores <- crisprScore::getCrispraiScores(sgrna_df=grnaFrame,
tss_df=tssFrame,
chromatinFiles=chromatinFiles,
fastaFile=fastaFile,
modality=modality)
scores <- scores[match(names(object), rownames(scores)),1]
if (modality=="CRISPRa"){
mcols(object)$score_crispra <- scores
} else {
mcols(object)$score_crispri <- scores
}
return(object)
})
#' @rdname addCrispraiScores
#' @export
setMethod("addCrispraiScores", "PairedGuideSet",
function(object,
gr,
tssObject,
geneCol="gene_id",
modality=c("CRISPRi", "CRISPRa"),
chromatinFiles=NULL,
fastaFile=NULL
){
object <- .validatePairedGuideSet(object)
unifiedGuideSet <- .pairedGuideSet2GuideSet(object)
unifiedGuideSet <- addCrispraiScores(unifiedGuideSet,
gr=gr,
tssObject=tssObject,
geneCol=geneCol,
modality=modality,
chromatinFiles=chromatinFiles,
fastaFile=fastaFile)
out <- .addColumnsFromUnifiedGuideSet(object,
unifiedGuideSet)
return(out)
})
#' @rdname addCrispraiScores
#' @export
setMethod("addCrispraiScores", "NULL", function(object){
return(NULL)
})
# Prepare a data.frame containing TSS information necessary
# to the CRISPRai algorithm
# The following columns are necessary:
# ID, gene_symbol, promoter, tx_id
.prepareTssFrame <- function(tssObject,
geneCol="gene_id"
){
tssObject <- as.data.frame(tssObject)
cols <- c("ID", "promoter","tx_id", geneCol)
if (!all(cols %in% colnames(tssObject))){
choices <- setdiff(cols, colnames(tssObject))
stop("The following columns are missing in the tssObject: \n \t",
paste0(choices, collapse=", "),".")
}
out <- data.frame(tss_id=tssObject$ID,
gene_symbol=tssObject[[geneCol]],
promoter=tssObject$promoter,
transcripts=tssObject$tx_id,
position=tssObject$start,
strand=tssObject$strand,
chr=tssObject$seqnames)
# Check if there are any missing values:
if (sum(is.na(out$gene_symbol))>0){
stop("gene_symbol has some missing values.")
}
if (sum(out$gene_symbol=="")>0){
stop("gene_symbol has some empty values.")
}
if (sum(is.na(out$promoter))>0){
stop("promoter has some missing values.")
}
if (sum(out$promoter=="")>0){
stop("promoter has some empty values.")
}
if (sum(is.na(out$position))>0){
stop("start has some missing values.")
}
if (sum(is.na(out$strand))>0){
stop("strand has some missing values.")
}
if (sum(is.na(out$seqnames))>0){
stop("strand has some missing values.")
}
# Checking for final compatibility:
good <- all(out$tss_id==paste0(out$gene_symbol, "_", out$promoter))
if (!good){
stop("The ID does not seem to be of the form geneCol_promoter.")
}
return(out)
}
# Prepare a data.frame containing gRNA information necessary
# to the CRISPRai algorithm from a guideSet object and a GRanges
# object that was used as input to create the GuideSet object.
.prepareGrnaFrame <-function(guideSet, gr){
len <- spacerLength(guideSet)
seqs <- spacers(guideSet, as.character=TRUE)
if (len!=19){
if (len==20){
seqs <- substr(seqs, 2,20)
} else{
stop("spacer length must be of length 19 or 20.")
}
}
cols <- c("ID")
colnames <- colnames(mcols(gr))
if (!all(cols %in% colnames)){
choices <- setdiff(cols, colnames)
stop("The following columns are missing in the gr object: \n \t",
paste0(choices, collapse=", "),".")
}
ids <- gr$ID[match(guideSet$region, names(gr))]
if (sum(is.na(ids))>0){
stop("Some of the guideSet regions cannot be found in the gr object.")
}
out <- data.frame(grna_id=names(guideSet),
tss_id=ids,
pam_site=pamSites(guideSet),
strand=as.character(strand(guideSet)),
spacer_19mer=seqs)
return(out)
}
|
1fc4682fcb94c7052510f3f99f2388e8d3da093a
|
983a7565e1aac16ac2d9a39ef053958df7a1f3f3
|
/Shiny app/evalue/startup.R
|
15a3324dced324d596e355fe898387b111d8d6a8
|
[] |
no_license
|
JBarsotti/evalue
|
f8edf364862b876450720345f8f9107d5b4bdca4
|
3d5e4de24fb2c7f554673f5b5232bc6d24e42409
|
refs/heads/master
| 2023-03-15T19:24:41.315026
| 2020-06-08T16:41:52
| 2020-06-08T16:41:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 184
|
r
|
startup.R
|
library(shiny)
library(EValue)
library(plotly)
# try to fix deployment problem
library(purrr)
library(plogr)
# keeps original error messages
options(shiny.sanitize.errors = FALSE)
|
092c2f03dab7e37e6c8805987372e88f80254f30
|
006c115229805aec6be748937f77ad8b5cb2588f
|
/man/createEvaluationCohort.Rd
|
4ac037910fb1121330e0b5a27f6e6160ee7a452e
|
[
"Apache-2.0"
] |
permissive
|
OHDSI/PheValuator
|
a833780b529406cf76d94ceaf43b75e053000fe0
|
5a04c84102495dd2b6679fd6bf02a12edd0fa2e2
|
refs/heads/master
| 2023-09-01T13:29:40.052357
| 2022-04-25T10:50:54
| 2022-04-25T10:50:54
| 160,824,960
| 18
| 10
| null | 2021-12-17T11:48:51
| 2018-12-07T13:06:25
|
R
|
UTF-8
|
R
| false
| true
| 5,497
|
rd
|
createEvaluationCohort.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CreateEvaluationCohort.R
\name{createEvaluationCohort}
\alias{createEvaluationCohort}
\title{Create the evaluation cohort}
\usage{
createEvaluationCohort(
connectionDetails,
oracleTempSchema = NULL,
xSpecCohortId,
xSensCohortId,
prevalenceCohortId,
xSpecCohortSize = 5000,
cdmDatabaseSchema,
cohortDatabaseSchema,
cohortTable,
workDatabaseSchema,
covariateSettings = createDefaultChronicCovariateSettings(excludedCovariateConceptIds
= c(), addDescendantsToExclude = TRUE),
modelPopulationCohortId = 0,
modelPopulationCohortIdStartDay = 0,
modelPopulationCohortIdEndDay = 0,
evaluationPopulationCohortId = 0,
evaluationPopulationCohortIdStartDay = 0,
evaluationPopulationCohortIdEndDay = 0,
modelBaseSampleSize = 1e+05,
baseSampleSize = 2e+06,
lowerAgeLimit = 0,
upperAgeLimit = 120,
visitLength = 0,
visitType = c(9201, 9202, 9203, 262, 581477),
gender = c(8507, 8532),
race = 0,
ethnicity = 0,
startDate = "19001010",
endDate = "21000101",
cdmVersion = "5",
outFolder = getwd(),
modelId = "main",
evaluationCohortId = "main",
excludeModelFromEvaluation = FALSE,
removeSubjectsWithFutureDates = TRUE,
saveEvaluationCohortPlpData = FALSE,
modelType = "acute"
)
}
\arguments{
\item{connectionDetails}{connectionDetails created using the function
createConnectionDetails in the DatabaseConnector package.}
\item{oracleTempSchema}{A schema where temp tables can be created in Oracle.}
\item{xSpecCohortId}{The number of the "extremely specific (xSpec)" cohort
definition id in the cohort table (for noisy positives).}
\item{xSensCohortId}{The number of the "extremely sensitive (xSens)" cohort
definition id in the cohort table (for noisy negatives).}
\item{prevalenceCohortId}{The number of the cohort definition id to determine the
disease prevalence.}
\item{xSpecCohortSize}{The recommended xSpec sample size to use in model (default = NULL)}
\item{cdmDatabaseSchema}{The name of the database schema that contains the OMOP CDM
instance. Requires read permissions to this database. On SQL
Server, this should specifiy both the database and the
schema, so for example 'cdm_instance.dbo'.}
\item{cohortDatabaseSchema}{The name of the database schema that is the location where
the cohort data used to define the at risk cohort is
available. Requires read permissions to this database.}
\item{cohortTable}{The tablename that contains the at risk cohort. The
expectation is cohortTable has format of COHORT table:
cohort_concept_id, SUBJECT_ID, COHORT_START_DATE,
COHORT_END_DATE.}
\item{workDatabaseSchema}{The name of the database schema that is the location where
a table can be created and afterwards removed.
Requires write permissions to this database.}
\item{covariateSettings}{A covariateSettings object as generated using
createCovariateSettings().}
\item{modelPopulationCohortId}{The number of the cohort to be used as a base population for
the model. If set to 0, the entire database population will be
used.}
\item{modelPopulationCohortIdStartDay}{The number of days relative to the mainPopulationCohortId
cohort start date to begin including visits.}
\item{modelPopulationCohortIdEndDay}{The number of days relative to the mainPopulationCohortId
cohort start date to end including visits.}
\item{evaluationPopulationCohortId}{The number of the cohort to be used as a base population for
the evalution cohort. If set to 0, the entire database population will be
used.}
\item{evaluationPopulationCohortIdStartDay}{The number of days relative to the evaluationPopulationCohortId
cohort start date to begin including visits.}
\item{evaluationPopulationCohortIdEndDay}{The number of days relative to the evaluationPopulationCohortId
cohort start date to end including visits.}
\item{modelBaseSampleSize}{The number of non-xSpec subjects to include in the model}
\item{baseSampleSize}{The maximum number of subjects in the evaluation cohort.}
\item{lowerAgeLimit}{The lower age for subjects in the model.}
\item{upperAgeLimit}{The upper age for subjects in the model.}
\item{visitLength}{The minimum length of index visit for acute outcomes.}
\item{visitType}{The concept_id for the visit type.}
\item{gender}{The gender(s) to be included.}
\item{race}{The race(s) to be included.}
\item{ethnicity}{The ethnicity(s) to be included.}
\item{startDate}{The starting date for including subjects in the model.}
\item{endDate}{The ending date for including subjects in the model.}
\item{cdmVersion}{The CDM version of the database.}
\item{outFolder}{The folder where the output files will be written.}
\item{modelId}{A string used to generate the file names for this model.}
\item{evaluationCohortId}{A string used to generate the file names for this evaluation cohort.}
\item{excludeModelFromEvaluation}{Should subjects used in the model be excluded from the evaluation cohort?}
\item{removeSubjectsWithFutureDates}{For buggy data with data in the future: ignore subjects with
dates in the future?}
\item{saveEvaluationCohortPlpData}{Should the large PLP file for the evaluation cohort be saved? To be
used for debugging purposes.}
\item{modelType}{The type of health outcome in the model either "acute" or
"chronic".}
}
\description{
Create the evaluation cohort
}
\details{
Fits a diagnostic pretiction model, and uses it to create an evaluation cohort with
probabilities for the health outcome of interest.
}
|
fb5a622d34115618e0ac9e1fa1c82ff5214f0f9c
|
b245a9b3d3565994d884a4f0649aad4d8ee267da
|
/admixture analysis/genABEL.R
|
9a258844d3a7d8073ef67713b8f53c0debc116e2
|
[] |
no_license
|
AnjaWestram/Littorina_hybrid_zone_1
|
b14f9cd236edfffa966c646368f6443ad304ec43
|
19a1a94f660e72a5c4aca175703d00c37c10a7df
|
refs/heads/master
| 2020-03-22T13:21:36.643611
| 2018-08-06T08:08:39
| 2018-08-06T08:08:39
| 140,101,937
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,713
|
r
|
genABEL.R
|
args = commandArgs(trailingOnly=TRUE)
library(GenABEL)
#### load and explore data
dat <- load.gwaa.data(phenofile=args[1], genofile=args[2], force=TRUE, makemap=FALSE, sort=TRUE, id = "id")
#### QC
# QC -#no control for hardy weinberg here (option p.value=0), control for maf of given value, defaults for SNP and individual call rates
qc1 <- check.marker(dat, p.level=0, maf = 0.01)
data1 <- dat[qc1$idok, qc1$snpok] # data set containing only individuals and snps passing qc1
# prep data for GWAS: calculating kinship matrix, replacing diagonal for egscore funtion purposes, see ?egscore
data1.gkin <- ibs(data1[, data1\@gtdata\@chromosome != "X"], weight="freq")
diag(data1.gkin) <- hom(data1[,autosomal(data1)])$Var
##########################################################
## single_SNP GWA with correction for population structure#
## using the method of Price et al. (using PCs of the genomic kinship matrix) here implemented in the egscore function (you just have to provide the kinship matrix)
##############################################################################
#GWA
data1.pca <- egscore(args[3], data1, kin=data1.gkin, propPs=0.95, clambda=FALSE, naxes=4 , times = 1000)
#data1.pca <- egscore(args[3]~size, data=data1, kin=data1.gkin, propPs=0.95, clambda=FALSE, naxes=4 , times = 1000) # trait = shape
#data1.pca <- egscore(args[3]~sex, data=data1, kin=data1.gkin, propPs=0.95, clambda=FALSE, naxes=4 , times = 1000) # trait = size
write.table(results, file=args[4], row.names=FALSE, col.names=TRUE, dec=".", na="NA", eol="\n", sep="\t")
lamb <- estlambda(results[,"P1df"])
write.table(lamb[[1]] , "lambda_with_correction_for_pop_str", row.names = F)
|
b1aa06eda940f7760aff584452d04b625af8160b
|
da2aded3040e88b9b7e375a5c79bf1a4b90b0556
|
/R/ctm_method.R
|
21b67c1025bbfe2314ff9793c22a27e6be29d8eb
|
[] |
no_license
|
PolMine/polmineR.misc
|
f1c3cca7651dbd91fd7fce92103d11a5e8270be1
|
03dfef42f3242ce3120a45f283a61b89ebb903d4
|
refs/heads/master
| 2023-06-09T01:20:29.818197
| 2022-11-17T13:27:23
| 2022-11-17T13:27:23
| 54,013,177
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 982
|
r
|
ctm_method.R
|
# #' @examples
# #' \dontrun{
# #' sz2010 <- partition("ARENEN", list(text_date=".*2010"), method="grep", pAttribute=NULL)
# #' dates <- sAttributes(sz2010, "text_date")
# #' sz2010 <- partition_bundle("ARENEN", def=list(text_date=".*2010"), var=list(text_date=dates), pAttribute="word")
# #' sz2010tdm <- as.TermDocumentMatrix(sz2010)
# #' }
# setMethod("ctm", "TermDocumentMatrix", function(.Object){
# docLengths <- tapply(.Object$v, .Object$j, sum)
# splittedByDoc <- split(
# x=data.frame(vocab=.Object$i, tf=.Object$v),
# f=.Object$j
# )
# idAndTf <- lapply(splitted, function(docTf){
# paste(apply(as.matrix(docTf), 1, function(row) paste(row, collapse=":", sep="")), collapse=" ")
# })
# ctmData <- paste(
# mapply(function(x,y) paste(x, y, sep=" "), docLengths, idAndTf),
# collapse="\n"
# )
# ctmTmpDir <- tempdir()
# # dir.create(file.path(ctmTmpDir), "ctmData")
# cat(ctmData, file=file.path(ctmTmpDir, "ctmData.txt"))
# })
#
|
2596229e5ad5fafb1156dd00dbbb21b583375824
|
c991bd9aa20b6fc005cb619b414f5b190eb2a073
|
/Scripts/BackupHistory/VisualizingBackupHistory.R
|
faf4e22a058118aa256a4cdf94d95bddc3eb6b3f
|
[] |
no_license
|
sqlshep/SQLShepBlog
|
f574080f5ea517c526308185046fdeeafb6d3274
|
ac0fc616759b83a40b4a66f31f1caef9d87fd435
|
refs/heads/master
| 2022-10-15T01:38:57.416362
| 2022-09-30T16:05:36
| 2022-09-30T16:05:36
| 77,758,171
| 6
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,443
|
r
|
VisualizingBackupHistory.R
|
#install.packages("odbc")
#install.packages("ggplot2")
#install.packages("dolyr")
#install.packages("lubridate")
#install.packages("scales")
library(odbc)
library(ggplot2)
library(dplyr)
library(lubridate)
library(scales)
Sys.setenv(TZ='GMT')
MSDB <- dbConnect(odbc::odbc(),
Driver = "SQL Server",
Server = "localhost",
Database = "msdb",
Trusted_Connection = 'Yes')
SQLStmt <- sprintf("exec usp_GetBackupHist")
rs <- dbSendQuery(MSDB, SQLStmt)
msdbBackupHist <- dbFetch(rs)
# house keeping
dbClearResult(rs)
dbDisconnect(MSDB)
### Save an object to a file
#saveRDS(msdbBackupHist, file = "C:/Users/adminshep/Documents/msdbBackupHist.rds")
### Read from file
# msdbHist <- readRDS(file = "/Users/Shep/Azure Share/DSVM1/msdb/msdbHist.rds")
keep <- msdbBackupHist
msdbBackupHist <- filter(msdbBackupHist, backup_start_date >= (max(msdbBackupHist$backup_start_date) - days(60)))
msdbBackupHist$backup_size[msdbBackupHist$backup_size_unit == 'MB'] = msdbBackupHist[msdbBackupHist$backup_size_unit == 'MB',12]/1000
msdbBackupHist$backup_size[msdbBackupHist$backup_size_unit == 'KB'] = msdbBackupHist[msdbBackupHist$backup_size_unit == 'KB',12]/1000000
msdbBackupHist$backup_size_unit[msdbBackupHist$backup_size_unit == 'KB'] = "GB"
msdbBackupHist$backup_size_unit[msdbBackupHist$backup_size_unit == 'MB'] = "GB"
options(scipen=999)
ggplot(msdbBackupHist, aes(x=backup_start_date, y=backup_size)) +
geom_point() +
facet_wrap(type ~ name) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
# Create a dataframe for just the backups
DbBackups <- msdbBackupHist[msdbBackupHist$type == 'D',]
ggplot(DbBackups, aes(x=backup_start_date, y=backup_size)) +
geom_point() +
facet_wrap(~name) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
##Show just nyse with selection in ggplot
ggplot(DbBackups[DbBackups$name == "NYSE",], aes(x=backup_start_date, y=backup_size)) +
geom_point() +
facet_wrap(~name) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
# Create a dataframe jsut for log file backups
LogBackups <- msdbBackupHist[msdbBackupHist$type == 'L',]
ggplot(LogBackups, aes(x=backup_start_date, y=backup_size)) +
geom_point() +
facet_wrap(~ name) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
|
3e4a96399d54151cbd91e4bcf1fc709ceef83950
|
77b59cfb21a8a554e791f7c4d76bf6d0ca71ec30
|
/arraycomfun.R
|
c65a3e75095ded722744c762009ec9ae7a2120a2
|
[] |
no_license
|
christinery/test_code
|
5db7386d0ff75508b1aefedf15a67bcad23a95bc
|
c56c4b01262b28737643bb1425464b7a6b6b42a4
|
refs/heads/master
| 2020-03-28T11:28:10.797291
| 2018-10-23T00:02:49
| 2018-10-23T00:02:49
| 148,216,878
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,924
|
r
|
arraycomfun.R
|
################################################################################
## This file include some elementary computation for array mentioned in the ####
## file "mainQRnoweight" for paper "Copula-based M-estimate for FVCM" ##########
################################################################################
################################################################################
##This function is used to compute the difference for each element of a array###
##minus a same matrix, and get the mean of these difference's square ###########
################################################################################
matrixdiff = function(M1, M2){
##M1 is array,M2 is matrix,the first and two dimension of M1 is the same with dim(M2)
matrixnum = dim(M1)[3]
##comdiffM store all difference square value with replacement sample times B
##number of "matrixnum" matrix
comdiffM = array(0, c(dim(M2)[1], dim(M2)[2], matrixnum))
##"comdiffsum" used to store sum of difference among these num. B matrix
comdiffsum = matrix(0, dim(M2)[1], dim(M2)[2])
for (k in 1:matrixnum) {
comdiffM[,,k] = (M1[,,k] - M2)^2
comdiffsum = comdiffsum + comdiffM[,,k]
}
comdiffmean = comdiffsum/(matrixnum-1)
return(comdiffmean) ##dim(M2)[1] * dim(M2)[2] matrix
}
###############################################################################
########################## end ################################################
###############################################################################
#############################################################################
##This function is used to compute the mean for a array by sum each element##
##or sqrt element (matrix) of array##########################################
#############################################################################
matrixsqmean = function(A,I){
##A is a array
##I takes value 1 or 0, 0 denote take sqrt firstly then compute mean, 1 denote
##compute the mean for a array by sum each element of array
index = dim(A)[3]
ASUM = matrix(0, dim(A)[1], dim(A)[2])
if(I==0){
for (l in 1:index) { ASUM = ASUM + sqrt(A[,,l]) }
return(ASUM/index)
} else{
for (l in 1:index) {
ASUM = ASUM + (A[,,l])
}
return(ASUM/index) ##retrun "dim(A)[1] * dim(A)[2]" matrix
}
}
###############################################################################
########################## end ################################################
###############################################################################
#################################################################################
##This function is used to find the empirical coverage probability for function##
##coefficient discreted by grid points###########################################
#################################################################################
matrixecp = function(A1, A2, A0){
##A1, A2 are array and with the same dimension, A0 is matrix with the same dimension
##for the first and two element of dim(A1)
index1 = dim(A1)[1]
index2 = dim(A1)[2]
index3 = dim(A1)[3]
if (prod(dim(A1)==dim(A2))==0)
stop("The dimension of the array must be the same!")
##compute the left and right point for interval
intervall = A1 - 1.96*sqrt(A2)
intervalr = A1 + 1.96*sqrt(A2)
A = array(A0, c(index1, index2, index3))
##the difference between left or right of interval and A
intervalld = intervall - A
intervalrd = intervalr - A
##product of difference, return number "index3" array, matrix dimension is "index1 * index2"
intervalpro = intervalld * intervalrd
##identify the positive or negative of each element of the array
matrixI = (intervalpro<=0)
##element of array is added, return one "index1 * index2" matrix
TOT = matrix(0, index1, index2)
for (k in 1:index3) {
TOT = TOT + matrixI[,,k]
}
return(TOT/index3)
}
###############################################################################
########################## end ################################################
###############################################################################
#############################################################################
##This function is used to compute the row mean for each element (matrix)####
##of array.##################################################################
#############################################################################
arraymatrixmean = function(A){
##A is a array
index = dim(A)[3]
COMMEAN = NULL
for (l in 1:index) {
COMMEAN = rbind(COMMEAN, apply(A[,,l], 1, mean))
}
return(COMMEAN)
}
###############################################################################
########################## end ################################################
###############################################################################
|
d373dfbdc23ac5602c26df435ef2b2b611ea6015
|
d5255e7691f99d2517892e6e690ec291646b850b
|
/football_Kmeans_t_1.R
|
7302716f6d2d5e4fcaf34162e582296e67ba3705
|
[] |
no_license
|
kjshalemraj/Football-Player-Segmentation-using-Kmeans
|
eb94dfb91f7d22866cfdd040e1fa005d304b44d0
|
aca3b853f10b1ed9dd88fa82740628d26566a329
|
refs/heads/main
| 2023-06-25T00:24:00.731031
| 2021-07-30T06:35:42
| 2021-07-30T06:35:42
| 390,958,188
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 63,298
|
r
|
football_Kmeans_t_1.R
|
#God is good all the times
#libraries
library(ggplot2)
install.packages("factoextra")
library(factoextra)
library(RColorBrewer)
library(dplyr)
library(tidyr)
#Loading the data
fb = read.csv("C:/Users/Dr Vinod/Desktop/football_ kmean/data.csv",
stringsAsFactors = TRUE)
fb$Name
dim(fb)
'18207 89'
#Checking the missing values
sapply(fb, function(x) sum(is.null(x)))
sapply(fb, function(x) sum(is.na(x)))
'48 missing values in most of the variables & 48 is less in proportion compare
complete data so removing the missing values'
#Removed the missing values
fb = fb[complete.cases(fb),]
sapply(fb, function(x) sum(is.na(x)))
'No missing values'
dim(fb)
'18147 89'
"18207-18147 = 60 removed"
str(fb)
data.frame(colnames(fb))
"
colnames.fb.
1 ï..
2 ID
3 Name
4 Age
5 Photo
6 Nationality
7 Flag
8 Overall
9 Potential
10 Club
11 Club.Logo
12 Value
13 Wage
14 Special
15 Preferred.Foot
16 International.Reputation
17 Weak.Foot
18 Skill.Moves
19 Work.Rate
20 Body.Type
21 Real.Face
22 Position
23 Jersey.Number
24 Joined
25 Loaned.From
26 Contract.Valid.Until
27 Height
28 Weight
29 LS
30 ST
31 RS
32 LW
33 LF
34 CF
35 RF
36 RW
37 LAM
38 CAM
39 RAM
40 LM
41 LCM
42 CM
43 RCM
44 RM
45 LWB
46 LDM
47 CDM
48 RDM
49 RWB
50 LB
51 LCB
52 CB
53 RCB
54 RB
55 Crossing
56 Finishing
57 HeadingAccuracy
58 ShortPassing
59 Volleys
60 Dribbling
61 Curve
62 FKAccuracy
63 LongPassing
64 BallControl
65 Acceleration
66 SprintSpeed
67 Agility
68 Reactions
69 Balance
70 ShotPower
71 Jumping
72 Stamina
73 Strength
74 LongShots
75 Aggression
76 Interceptions
77 Positioning
78 Vision
79 Penalties
80 Composure
81 Marking
82 StandingTackle
83 SlidingTackle
84 GKDiving
85 GKHandling
86 GKKicking
87 GKPositioning
88 GKReflexes
89 Release.Clause"
#________________1. ï..
str(fb$ï..)
'These are only index values/serial number of rows'
#________________2.ID
str(fb$ID)
'int [1:18147] 158023 20801 190871 193080 192985 183277 177003 176580
155862 200389 ...'
length(unique(fb$ID))
"18147 - All are unique ID's"
#________________3.Name
str(fb$Name)
' Factor w/ 17194 levels "A. Ã-hman","A. Ã-mür",..: 9689 3206 12560 4187 8671
4478 9697 9899 15466 7835 ...'
length(unique(fb$Name))
'17140'
"Observe that some names are repeated, being the ID's are unique, assuming the
data of the persons might be differ"
#________________ 4.Age
str(fb$Age)
'int [1:18147] 31 33 26 27 27 27 32 31 32 25 ...'
summary(fb$Age)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
16.00 21.00 25.00 25.12 28.00 45.00'
#Histogram
hist(fb$Age,
col = brewer.pal(8,'Spectral'),
main = 'Age of the Player',
xlab = 'Age')
#Boxplot
boxplot(fb$Age,
col = 'darkseagreen3',
horizontal = TRUE,
main = 'Age of the Player')
#Checking the outliers from complete data
age_ub = quantile(fb$Age, 0.75)+1.5*IQR(fb$Age)
length(fb$Age[fb$Age>age_ub])
'47 - Small proportion of outliers Ignoring'
#________________5.Photo
str(fb$Photo)
"These column contains the url links of the photos"
head(fb$Photo)
"
[1] https://cdn.sofifa.org/players/4/19/158023.png
[2] https://cdn.sofifa.org/players/4/19/20801.png
[3] https://cdn.sofifa.org/players/4/19/190871.png
[4] https://cdn.sofifa.org/players/4/19/193080.png
[5] https://cdn.sofifa.org/players/4/19/192985.png
[6] https://cdn.sofifa.org/players/4/19/183277.png
18207 Levels: https://cdn.sofifa.org/players/4/19/100803.png ..."
#________________6.Nationality
str(fb$Nationality)
'Factor w/ 164 levels "Afghanistan",..: 7 124 21 141 14 14 36 159 141 138 ...'
table(fb$Nationality)
#Barplot
barplot(table(fb$Nationality),
col = brewer.pal(8,'Accent'),
main = 'Nationality of the Players',
las=2)
#Converting the table output to data frame
fb_nationality = as.data.frame(table(fb$Nationality))
colnames(fb_nationality) = c('Name_of_the_Country', "No_of_Players")
fb_nationality = fb_nationality[order(fb_nationality$No_of_Players,
decreasing = TRUE),]
rownames(fb_nationality) = NULL#Resetting row index, starts from 1
#Top 10 countries having highest number of players
head(fb_nationality,10)
"
Name_of_the_Country No_of_Players
1 England 1657
2 Germany 1195
3 Spain 1071
4 Argentina 936
5 France 911
6 Brazil 825
7 Italy 699
8 Colombia 616
9 Japan 478
10 Netherlands 452"
dim(fb_nationality[fb_nationality$No_of_Players==1,])[1]
'25 countries having only 1 player'
#________________7.Flag
str(fb$Flag)
'Factor w/ 164 levels "https://cdn.sofifa.org/flags/1.png",..: 123 108 125
115 138 138 2 132 115 114 ...'
'All 164 countries having flags, similar to above variable'
head(fb$Flag)
'[1] https://cdn.sofifa.org/flags/52.png
[2] https://cdn.sofifa.org/flags/38.png
[3] https://cdn.sofifa.org/flags/54.png
[4] https://cdn.sofifa.org/flags/45.png
[5] https://cdn.sofifa.org/flags/7.png
[6] https://cdn.sofifa.org/flags/7.png
164 Levels: https://cdn.sofifa.org/flags/1.png ...'
'This variable having the urls of flag images'
#________________8.Overall
str(fb$Overall)
'int [1:18147] 94 94 92 91 91 91 91 91 91 90 ...'
summary(fb$Overall)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
46.00 62.00 66.00 66.25 71.00 94.00 '
#Histogram
hist(fb$Overall,
col = brewer.pal(8,'Set1'),
main = 'Overall of the Player',
xlab = 'Overall')
#Boxplot
boxplot(fb$Overall,
col = 'gold3',
horizontal = TRUE,
main = 'Overall of the Player')
#Upper Boundary
oa_ub = quantile(fb$Overall, 0.75)+1.5*IQR(fb$Overall)
length(fb$Overall[fb$Overall>oa_ub])
'110 - less in proportion so ignoring'
#Lower Boundary
oa_lb = quantile(fb$Overall, 0.25)-1.5*IQR(fb$Overall)
length(fb$Overall[fb$Overall<oa_lb])
'53 - less in proportion so ignoring'
#________________9 Potential
str(fb$Potential)
'int [1:18147] 94 94 93 93 92 91 91 91 91 93 ...'
summary(fb$Potential)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
48.00 67.00 71.00 71.32 75.00 95.00 '
#Histogram
hist(fb$Potential,
col = brewer.pal(8,'Accent'),
main = 'Potential of the Player',
xlab = 'Potential')
#Boxplot
boxplot(fb$Potential,
col = 'deeppink3',
horizontal = TRUE,
main = 'Potential of the Player')
#Upper Boundary
pot_ub = quantile(fb$Potential, 0.75)+1.5*IQR(fb$Potential)
length(fb$Potential[fb$Potential>pot_ub])
'131 - less in proportion so ignoring'
#Lower Boundary
pot_lb = quantile(fb$Potential, 0.25)-1.5*IQR(fb$Potential)
length(fb$Potential[fb$Potential<pot_lb])
'29 - less in proportion so ignoring'
#________________10 Club
str(fb$Club)
' Factor w/ 652 levels ""," SSV Jahn Regensburg",..: 218 333 438 380 379 142
475 218 475 65 ...'
#Barplot
barplot(table(fb$Club),
col = brewer.pal(8,'Accent'),
main = 'Club of the Players',
las=2)
table(fb$Club)
#Noted some blank cells [229], replacing them with No_club
levels(fb$Club)[levels(fb$Club)==''] = 'No_club'
#Converting the table output to data frame
fb_club = as.data.frame(table(fb$Club))
colnames(fb_club) = c('Name_of_the_Club', "No_of_Players")
fb_club = fb_club[order(fb_club$No_of_Players,decreasing = TRUE),]
rownames(fb_club) = NULL#Resetting row index
#Top 10 countries having highest number of players
head(fb_club,10)
'
Name_of_the_Club No_of_Players
1 No_club 229
2 Arsenal 33
3 AS Monaco 33
4 Atlético Madrid 33
5 Borussia Dortmund 33
6 Burnley 33
7 Cardiff City 33
8 CD Leganés 33
9 Chelsea 33
10 Eintracht Frankfurt 33'
barplot(table(fb_club$No_of_Players),
col = brewer.pal(9,'Spectral'),
main = 'No of Clubs vs Players',
xlab = 'Players',
ylab = 'No of clubs', las=2)
'
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 229
1 2 16 3 3 15 28 52 60 103 177 43 102 5 16 25 1 '
'Each club contains a minimum of 18 players maximum of 33 players. 229 players
club name is not specified'
#________________11 Club Logo
str(fb$Club.Logo)
' Factor w/ 679 levels "https://cdn.sofifa.org/flags/103.png",..: 491
553 638 90 30 577 493 491 493 490 ...'
head(fb$Club.Logo)
'
[1] https://cdn.sofifa.org/teams/2/light/241.png
[2] https://cdn.sofifa.org/teams/2/light/45.png
[3] https://cdn.sofifa.org/teams/2/light/73.png
[4] https://cdn.sofifa.org/teams/2/light/11.png
[5] https://cdn.sofifa.org/teams/2/light/10.png
[6] https://cdn.sofifa.org/teams/2/light/5.png
679 Levels: https://cdn.sofifa.org/flags/103.png ...'
'This variable is having url of Club logo'
#________________12 Value is in Dollars
str(fb$Value)
'Factor w/ 217 levels "â,¬0","â,¬1.1M",..: 17 196 19 191 13 214 183 ...'
table(fb$Value)
#Replace or substitute â,¬ with nothing
fb$Value = sub("â,¬","", fb$Value)
table(fb$Value)
#integer. A penalty to be applied when deciding to print numeric values in
#fixed or exponential notation. Positive values bias towards fixed and negative
#towards scientific notation: fixed notation will be preferred unless it is
#more than scipen digits wider.
options(scipen = 15)
#Removing M & K and keeping all the values to 1000's
'The grep R function searches for matches of certain character pattern in a
vector of character strings and returns the indices that yielded a match.'
fb$Value[grep('K$',fb$Value)] = as.numeric(sub('K',"",fb$Value[grep('K$',fb$Value)]))
fb$Value[grep('M$',fb$Value)] = (as.numeric(sub('M',"",fb$Value[grep('M$',fb$Value)]))*1000000)/1000
fb$Value = as.numeric(fb$Value)
str(fb$Value)
'num [1:18147] 110500 77000 118500 72000 102000 ...'
#table(fb$Value)
summary(fb$Value)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
0 300 675 2418 2000 118500 '
#Table output into a data frame
fb_value = as.data.frame(table(fb$Value))
colnames(fb_value) = c("Value_in_1000s", "No_of_Players")
fb_value = fb_value[order(fb_value$Value_in_1000s,decreasing = TRUE),]
rownames(fb_value) = NULL
fb_value
#Histogram
hist(fb$Value,
col = brewer.pal(8,'Set2'),
main = 'Value of the Player',
xlab = 'Value')
#Boxplot
boxplot(fb$Value,
col = 'lightcoral',
horizontal = TRUE,
main = 'Value of the Player')
#Extreme outliers belongs to the top players whose value is very highest
fb[fb$Value>=80000,]['Name']
' Name
1 L. Messi
3 Neymar Jr
5 K. De Bruyne
6 E. Hazard
8 L. Suárez
16 P. Dybala
17 H. Kane
26 K. Mbappé'
#Upper Boundary
val_ub = quantile(fb$Value, 0.75)+1.5*IQR(fb$Value)
length(fb$Value[fb$Value>val_ub])
'2487 ignoring'
#________________13 Wage
str(fb$Wage)
'Factor w/ 144 levels "â,¬0","â,¬100K",..: 95 75 56 50 67 65 78 82 71 138 ...'
table(fb$Wage)
#Replace or substitute â,¬ with nothing
fb$Wage = sub("â,¬","", fb$Wage)
#Removing K - All the values are in 1000's
'The grep R function searches for matches of certain character pattern in a
vector of character strings and returns the indices that yielded a match.'
fb$Wage = as.numeric(sub('K',"",fb$Wage))
str(fb$Wage)
' num [1:18147] 565 405 290 260 355 340 420 455 380 94 ...'
summary(fb$Wage)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
0.000 1.000 3.000 9.759 9.000 565.000 '
#Table output into a dataframe
fb_Wage = as.data.frame(table(fb$Wage))
colnames(fb_Wage) = c("Wage_in_1000s", "No_of_Players")
fb_Wage = fb_Wage[order(fb_Wage$Wage_in_1000s,decreasing = TRUE),]
rownames(fb_Wage) = NULL
fb_Wage
#Histogram
hist(fb$Wage,
col = brewer.pal(8,'Set2'),
main = 'Wage of the Player',
xlab = 'Wage')
#Boxplot
boxplot(fb$Wage,
col = 'lightcoral',
horizontal = TRUE,
main = 'Wage of the Player')
#Extreme outliers belongs to the top players whose value is very highest
fb[fb$Wage>=300,]['Name']
"
#cs2m[cs2m$Age >= 20, ]['BP']
BP
1 100
4 100
5 95
6 110
7 120
8 150
9 160
10 125
"
# Observed the value & wage is 0 for No_club therefore removing those
# observations
table(fb$Wage)[1]
' 0
229'
fb[fb$Club=='No_club',]$Value
fb[fb$Club=='No_club',]$Wage
fb = fb[fb$Club!='No_club',]
dim(fb)
'17918 89
18147-17918 = 229 observations removed'
#Upper Boundary
wag_ub = quantile(fb$Wage, 0.75)+1.5*IQR(fb$Wage)
length(fb$Wage[fb$Wage>wag_ub])
'2031 ignoring'
#________________14 Special
str(fb$Special)
'int [1:17918] 2202 2228 2143 1471 2281 2142 2280 2346 2201 1331 ...'
summary(fb$Special)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
731 1457 1636 1598 1787 2346 '
#Histogram
hist(fb$Special,
col = brewer.pal(8,'Paired'),
main = 'Special of the Player',
xlab = 'Special')
#Boxplot
boxplot(fb$Special,
col = 'turquoise4',
horizontal = TRUE,
main = 'Special of the Player')
#Upper Boundary
spe_ub = quantile(fb$Special, 0.75)+1.5*IQR(fb$Special)
length(fb$Special[fb$Special>spe_ub])
'1 - less in proportion so ignoring'
#Lower Boundary
spe_lb = quantile(fb$Special, 0.25)-1.5*IQR(fb$Special)
length(fb$Special[fb$Special<spe_lb])
'541 - less in proportion so ignoring'
#________________15 Preferred.Foot
str(fb$Preferred.Foot)
'Factor w/ 3 levels "","Left","Right": 2 3 3 3 3 3 3 3 3 3 ...'
table(fb$Preferred.Foot)
' Left Right
0 4162 13756 '
#Resetting the levels
fb$Preferred.Foot = factor(fb$Preferred.Foot) #this we have seen before also
table(fb$Preferred.Foot)
' Left Right
4162 13756 '
#Barplot
barplot(table(fb$Preferred.Foot),
col = c('darkslateblue', 'firebrick3'),
main = 'Preferred Foot - Players')
#________________16 International.Reputation
str(fb$International.Reputation)
'int [1:17918] 5 5 5 4 4 4 4 5 4 3 ...'
table(fb$International.Reputation)
' 1 2 3 4 5
16305 1248 308 51 6 '
#Barplot
barplot(table(fb$International.Reputation),
col = brewer.pal(5,'Dark2'),
main = 'International Reputation - Players')
#________________17 Weak Foot
str(fb$Weak.Foot)
'int [1:17918] 4 4 5 3 5 4 4 4 3 3 ...'
table(fb$Weak.Foot)
' 1 2 3 4 5
153 3715 11201 2622 227 '
#Barplot
barplot(table(fb$Weak.Foot),
col = brewer.pal(5,'Set1'),
main = 'Weak Foot - Players')
#________________18 Skill.Moves
str(fb$Skill.Moves)
'int [1:17918] 4 5 5 1 4 4 4 3 3 1 ...'
table(fb$Skill.Moves)
' 1 2 3 4 5
1992 8443 6522 911 50 '
#Barplot
barplot(table(fb$Skill.Moves),
col = brewer.pal(5,'Spectral'),
main = 'Skill Moves - Players')
#________________19 Work.Rate
str(fb$Work.Rate)
'Factor w/ 10 levels "","High/ High",..: 10 3 4 10 2 4 2 4 4 10 ...'
table(fb$Work.Rate)
' High/ High High/ Low High/ Medium Low/ High
0 1007 686 3131 435
Low/ Low Low/ Medium Medium/ High Medium/ Low Medium/ Medium
34 440 1660 840 9685 '
#Resetting factors
fb$Work.Rate = factor(fb$Work.Rate) # for removing 0
table(fb$Work.Rate)
' High/ High High/ Low High/ Medium Low/ High Low/ Low
1007 686 3131 435 34
Low/ Medium Medium/ High Medium/ Low Medium/ Medium
440 1660 840 9685'
#Barplot
par(mar=c(8,4,2,2))
barplot(table(fb$Work.Rate),
col = brewer.pal(9,'Paired'),
main = 'Work Rate - Players', las=2)
#________________20.Body.Type
str(fb$Body.Type)
'Factor w/ 11 levels "","Akinfenwa",..: 6 3 7 5 8 8 5 8 8 8 ...'
table(fb$Body.Type)
' Akinfenwa C. Ronaldo Courtois
0 1 1 1
Lean Messi Neymar Normal
6351 1 1 10436
PLAYER_BODY_TYPE_25 Shaqiri Stocky
1 1 1124 '
#Resetting factors
fb$Body.Type = factor(fb$Body.Type) # we have done this before also
table(fb$Body.Type)
' Akinfenwa C. Ronaldo Courtois Lean
1 1 1 6351
Messi Neymar Normal PLAYER_BODY_TYPE_25
1 1 10436 1
Shaqiri Stocky
1 1124 '
#Barplot
par(mar=c(8,4,2,2))
barplot(table(fb$Body.Type),
col = brewer.pal(9,'Paired'),
main = 'Body Type - Players', las=2)
'Data is not proper'
#________________21 Real.Face
str(fb$Real.Face)
'Factor w/ 3 levels "","No","Yes": 3 3 3 3 3 3 3 3 3 3 ...'
table(fb$Real.Face)
' No Yes
0 16264 1654'
#Resetting factors
fb$Real.Face = factor(fb$Real.Face)
table(fb$Real.Face)
' No Yes
16264 1654 '
#Barplot
par(mar=c(5,4,3,2))
barplot(table(fb$Real.Face),
col = c('turquoise4','tan4'),
main = 'Real Face - Players')
#________________22 Position
str(fb$Position)
'Factor w/ 28 levels "","CAM","CB",..: 23 28 16 7 21 13 21 25 20 7 ...'
table(fb$Position)
' CAM CB CDM CF CM GK LAM LB LCB LCM LDM LF LM LS LW
0 948 1754 936 74 1377 1992 21 1305 637 389 239 15 1086 206 374
LWB RAM RB RCB RCM RDM RF RM RS RW RWB ST
78 21 1268 652 387 246 16 1114 201 365 87 2130'
#Resetting factors
fb$Position = factor(fb$Position)
table(fb$Position)
'CAM CB CDM CF CM GK LAM LB LCB LCM LDM LF LM LS LW LWB
948 1754 936 74 1377 1992 21 1305 637 389 239 15 1086 206 374 78
RAM RB RCB RCM RDM RF RM RS RW RWB ST
21 1268 652 387 246 16 1114 201 365 87 2130 '
#Barplot
barplot(table(fb$Position),
col = brewer.pal(8,'Dark2'),
main = 'Position - Players',las=2)
#________________23 Jersey.Number
str(fb$Jersey.Number)
'int [1:17918] 10 7 10 1 7 10 10 9 15 1 ...'
summary(fb$Jersey.Number)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
1.00 8.00 17.00 19.64 26.00 99.00 '
#Histogram
hist(fb$Jersey.Number,
col = brewer.pal(8,'Spectral'),
main = 'Jersey Number - Players',las=2)
#Boxplot
boxplot(fb$Jersey.Number,
horizontal = TRUE,
col = 'coral3',
main = 'Jersey Number - Players')
#Upper Boundary; as its not numeric, statistical definition of outlier
# does not apply
#jn_ub = quantile(fb$Jersey.Number, 0.75)+1.5*IQR(fb$Jersey.Number)
#length(fb$Jersey.Number[fb$Jersey.Number>jn_ub])
'632 - less in proportion so ignoring'
#________________24.Joined # no point to include in knn procedure
str(fb$Joined)
'Factor w/ 1737 levels "","Apr 1, 2008",..: 776 796 249 783 255 ..'
summary(fb$Joined)[1:10]
'Jul 1, 2018 Jul 1, 2017 Jan 1, 2018 Jul 1, 2016 Jul 1, 2015
1538 1264 1133 635 614 368
Jan 1, 2017 Jul 1, 2014 Jan 1, 2016 Jul 1, 2013
231 226 180 156 '
head(fb$Joined)
'[1] Jul 1, 2004 Jul 10, 2018 Aug 3, 2017 Jul 1, 2011 Aug 30, 2015 Jul 1, 2012
1737 Levels: Apr 1, 2008 Apr 1, 2011 Apr 1, 2013 Apr 1, 2015 ... Sep 9, 2018'
#Converting the data type to date
'https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/strptime'
fb$Joined = as.Date(fb$Joined, format = "%b %d, %Y")
str(fb$Joined)
'Date[1:17918], format: "2004-07-01" "2018-07-10" "2017-08-03" ...'
summary(fb$Joined)
'
Min. 1st Qu. Median Mean 3rd Qu. Max.
"1991-06-01" "2016-05-23" "2017-07-03" "2016-11-15" "2018-07-01" "2018-12-20"
NA's'
"1264"'
head(as.data.frame(table(fb$Joined)))
' Var1 Freq
1 1991-06-01 1
2 1998-01-01 2
3 1998-07-01 1
4 1999-01-01 1
5 2000-01-01 1
6 2000-07-01 1'
tail(as.data.frame(table(fb$Joined)))
' Var1 Freq
1731 2018-12-05 6
1732 2018-12-06 1
1733 2018-12-07 3
1734 2018-12-08 2
1735 2018-12-10 1
1736 2018-12-20 1'
#________________25. Loaned.From # not to be included
str(fb$Loaned.From)
'Factor w/ 342 levels "","1. FC Köln",..: 1 1 1 1 1 1 1 1 1 1 ...'
table(fb$Loaned.From)
'16654 observations are blank, no use with this column'
#________________26 Contract.Valid.Until # not to be included
str(fb$Contract.Valid.Until)
' Factor w/ 37 levels "","2018","2019",..: 5 6 6 4 7 4 4 5 4 5 ...'
table(fb$Contract.Valid.Until)
#Resetting the levels of factors
fb$Contract.Valid.Until = factor(fb$Contract.Valid.Until)
table(fb$Contract.Valid.Until)
'Noted very few having complete date rest having only year'
#________________27 Height
str(fb$Height)
"Factor w/ 22 levels "",5'1,5'10,..: 10 15 12 17 4 11 11 13 13 15 ..."
head(fb$Height)
"5'7 6'2 5'9 6'4 5'11 5'8"
table(fb$Height)
" 5'1 5'10 5'11 5'2 5'3 5'4 5'5 5'6 5'7 5'8 5'9 6'0 6'1 6'2 6'3
0 3 2452 2132 5 18 30 144 311 892 934 2203 2837 1886 1990 975
6'4 6'5 6'6 6'7 6'8 6'9
741 241 91 21 10 2 "
#Resetting the factors
fb$Height = factor(fb$Height)
barplot(table(fb$Height),
col = brewer.pal(11, 'Paired'),
main = 'Height of the Players',
las=2)
#Column data separate and formed into 2 columns separately, using those vlaues
#coverted the feet & inches value to cm's
fb = fb %>%
separate(Height,c('feet','inch'),
sep = "'", convert = TRUE, remove = FALSE) %>%
mutate(Height = (12*feet+inch)*2.54)
#Dropping the newly created colunms
fb = subset(fb, select = -c(feet,inch))
barplot(table(fb$Height),
col = brewer.pal(11, 'Paired'),
main = 'Height of the Players',
las=2)
#==========================================================================
#________________28 Weight
str(fb$Weight)
'Factor w/ 58 levels "","110lbs","115lbs",..: 23 34 19 27 21 25 17 37 33 38 ...'
#Replace or substitute lbs with nothing
fb$Weight = as.numeric(sub("lbs","", fb$Weight))
#Histogram
hist(fb$Weight,
col = brewer.pal(8,'Set1'),
main = 'Weight of the Player')
#Boxplot
boxplot(fb$Weight,
horizontal = TRUE,
col = 'yellow2',
main = 'Weight - Players')
#Upper Boundary
wt_ub = quantile(fb$Weight, 0.75)+1.5*IQR(fb$Weight)
length(fb$Weight[fb$Weight>wt_ub])
'62 - less in proportion so ignoring'
#Lower Boundary
wt_lb = quantile(fb$Weight, 0.25)-1.5*IQR(fb$Weight)
length(fb$Weight[fb$Weight < wt_lb])
'13 - less in proportion so ignoring'
#________________29 LS , we will drop this
str(fb$LS)
'Factor w/ 94 levels "","31+2","32+2",..: 93 94 88 1 85 87 75 92 67 1 ...'
table(fb$LS)
'1992 not labelled'
#Barplot
barplot(table(fb$LS),
col = brewer.pal(8,'Set1'),
main = 'LS- Player', las=2)
#________________30 ST , we will drop this
str(fb$ST)
'Factor w/ 94 levels "","31+2","32+2",..: 93 94 88 1 85 87 75 92 67 1 ...'
table(fb$ST)
'1992 not labelled'
#Barplot
barplot(table(fb$ST),
col = brewer.pal(8,'Dark2'),
main = 'ST - Player', las=2)
#________________31 RS , drop this
str(fb$RS)
'Factor w/ 94 levels "","31+2","32+2",..: 93 94 88 1 85 87 75 92 67 1 ...'
table(fb$RS)
'1992 not labelled'
#Barplot
barplot(table(fb$RS),
col = brewer.pal(8,'Spectral'),
main = 'RS - Player', las=2)
#________________32 LW, drop this
str(fb$LW)
'Factor w/ 106 levels "","25+2","27+2",..: 106 105 105 1 104 105 101 103 71 1 ...'
table(fb$LW)
'1992 not labelled'
#Barplot
barplot(table(fb$LW),
col = brewer.pal(8,'Paired'),
main = 'LW - Player', las=2)
#________________33 LF, drop this
str(fb$LF)
'Factor w/ 103 levels "","27+2","29+2",..: 103 102 101 1 98 100 95 99 68 1 ...'
table(fb$LF)
'1992 not labelled'
#Barplot
barplot(table(fb$LF),
col = brewer.pal(8,'Set1'),
main = 'LF - Player', las=2)
#________________34 CF, drop this
str(fb$CF)
'Factor w/ 103 levels "","27+2","29+2",..: 103 102 101 1 98 100 95 99 68 1 ...'
table(fb$CF)
'1992 not labelled'
#Barplot
barplot(table(fb$CF),
col = brewer.pal(8,'RdYlGn'),
main = 'CF - Player', las=2)
#________________35 RF, drop this
str(fb$RF)
'Factor w/ 103 levels "","27+2","29+2",..: 103 102 101 1 98 100 95 99 68 1 ...'
table(fb$RF)
'1992 not labelled'
#Barplot
barplot(table(fb$RF),
col = brewer.pal(8,'BrBG'),
main = 'RF - Player', las=2)
#________________36 RW, drop this
str(fb$RW)
'Factor w/ 106 levels "","25+2","27+2",..: 106 105 105 1 104 105 101 103 71 1 ...'
table(fb$RW)
'1992 not labelled'
#Barplot
barplot(table(fb$RW),
col = brewer.pal(8,'PiYG'),
main = 'RW - Player', las=2)
#________________37 LAM, drop this
str(fb$LAM)
'Factor w/ 102 levels "","27+2","28+2",..: 102 100 101 1 100 101 99 97 69 1 ...'
table(fb$LAM)
'1992 not labelled'
#Barplot
barplot(table(fb$LAM),
col = brewer.pal(8,'RdBu'),
main = 'LAM - Player', las=2)
#________________38 CAM, drop this
str(fb$CAM)
'Factor w/ 102 levels "","27+2","28+2",..: 102 100 101 1 100 101 99 97 69 1 ...'
table(fb$CAM)
'1992 not labelled'
#Barplot
barplot(table(fb$CAM),
col = brewer.pal(8,'Set3'),
main = 'CAM - Player', las=2)
#________________39 RAM, drop this
str(fb$RAM)
'Factor w/ 102 levels "","27+2","28+2",..: 102 100 101 1 100 101 99 97 69 1 ...'
table(fb$RAM)
'1992 not labelled'
#Barplot
barplot(table(fb$RAM),
col = brewer.pal(8,'Set1'),
main = 'RAM - Player', las=2)
#________________40 LM, drop this
str(fb$LM)
'Factor w/ 101 levels "","27+2","28+2",..: 101 99 99 1 99 100 98 96 70 1 ...'
table(fb$LM)
'1992 not labelled'
#Barplot
barplot(table(fb$LM),
col = brewer.pal(8,'Accent'),
main = 'LM - Player', las=2)
#________________41 LCM, drop this
str(fb$LCM)
'Factor w/ 93 levels "","30+2","31+2",..: 88 83 83 1 92 85 93 79 70 1 ...'
table(fb$LCM)
'1992 not labelled'
#Barplot
barplot(table(fb$LCM),
col = brewer.pal(8,'YlOrRd'),
main = 'LCM - Player', las=2)
#________________42 CM, drop this
str(fb$CM)
'Factor w/ 93 levels "","30+2","31+2",..: 88 83 83 1 92 85 93 79 70 1 ...'
table(fb$CM)
'1992 not labelled'
#Barplot
barplot(table(fb$CM),
col = brewer.pal(8,'YlGn'),
main = 'CM - Player', las=2)
#________________43 RCM, drop this
str(fb$RCM)
'Factor w/ 93 levels "","30+2","31+2",..: 88 83 83 1 92 85 93 79 70 1 ...'
table(fb$RCM)
'1992 not labelled'
#Barplot
barplot(table(fb$RCM),
col = brewer.pal(8,'Paired'),
main = 'RCM - Player', las=2)
#________________44 RM, drop this
str(fb$RM)
'Factor w/ 101 levels "","27+2","28+2",..: 101 99 99 1 99 100 98 96 70 1 ...'
table(fb$RM)
'1992 not labelled'
#Barplot
barplot(table(fb$RM),
col = brewer.pal(8,'PRGn'),
main = 'RM - Player', las=2)
#________________45 LWB, drop this
str(fb$LWB)
'Factor w/ 96 levels "","30+2","31+2",..: 55 58 58 1 83 60 93 67 91 1 ...'
table(fb$LWB)
'1992 not labelled'
#Barplot
barplot(table(fb$LWB),
col = brewer.pal(8,'RdYlBu'),
main = 'LWB - Player', las=2)
#________________46 LDM, drop this
str(fb$LDM)
'Factor w/ 100 levels "","28+2","29+2",..: 50 51 49 1 84 55 92 66 97 1 ...'
table(fb$LDM)
'1992 not labelled'
fb[is.na(fb$LDM),]
#Barplot
barplot(table(fb$LDM),
col = brewer.pal(8,'Set1'),
main = 'LDM - Player', las=2)
#________________47 CDM, drop this
str(fb$CDM)
'Factor w/ 100 levels "","28+2","29+2",..: 50 51 49 1 84 55 92 66 97 1 ...'
table(fb$CDM)
'1992 not labelled'
#Barplot
barplot(table(fb$CDM),
col = brewer.pal(8,'RdYlGn'),
main = 'CDM - Player', las=2)
#________________48 RDM, drop this
str(fb$RDM)
'Factor w/ 100 levels "","28+2","29+2",..: 50 51 49 1 84 55 92 66 97 1 ...'
table(fb$RDM)
'1992 not labelled'
#Barplot
barplot(table(fb$RDM),
col = brewer.pal(8,'Accent'),
main = 'RDM - Player', las=2)
#________________49 RWB, drop this
str(fb$RWB)
'Factor w/ 96 levels "","30+2","31+2",..: 55 58 58 1 83 60 93 67 91 1 ...'
table(fb$RWB)
'1992 not labelled'
#Barplot
barplot(table(fb$RWB),
col = brewer.pal(8,'Dark2'),
main = 'RWB - Player', las=2)
#________________50 LB, drop this
str(fb$LB)
'Factor w/ 99 levels "","29+2","30+2",..: 49 54 52 1 79 52 91 65 99 1 ...'
table(fb$LB)
'1992 not labelled'
#Barplot
barplot(table(fb$LB),
col = brewer.pal(8,'PuOr'),
main = 'LB - Player', las=2)
#________________51 LCB, drop this
str(fb$LCB)
'Factor w/ 109 levels "","25+2","27+2",..: 30 44 31 1 71 35 81 65 109 1 ...'
table(fb$LCB)
'1992 not labelled'
#Barplot
barplot(table(fb$LCB),
col = brewer.pal(8,'Spectral'),
main = 'LCB - Player', las=2)
#________________52 CB, drop this
str(fb$CB)
'Factor w/ 109 levels "","25+2","27+2",..: 30 44 31 1 71 35 81 65 109 1 ...'
table(fb$CB)
'1992 not labelled'
#Barplot
barplot(table(fb$CB),
col = brewer.pal(8,'Paired'),
main = 'CB - Player', las=2)
#________________53 RCB, drop this
str(fb$RCB)
'Factor w/ 109 levels "","25+2","27+2",..: 30 44 31 1 71 35 81 65 109 1 ...'
table(fb$RCB)
'1992 not labelled'
#Barplot
barplot(table(fb$RCB),
col = brewer.pal(8,'Set2'),
main = 'RCB - Player', las=2)
#________________54 RB, drop this
str(fb$RB)
'Factor w/ 99 levels "","29+2","30+2",..: 49 54 52 1 79 52 91 65 99 1 ...'
table(fb$RB)
'1992 not labelled'
#Barplot
barplot(table(fb$RB),
col = brewer.pal(8,'Paired'),
main = 'RB - Player', las=2)
#&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& ____below we will use
#________________55 Crossing
str(fb$Crossing)
'int [1:17918] 84 84 79 17 93 81 86 77 66 13 ...'
summary(fb$Crossing)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
5.00 38.00 54.00 49.75 64.00 93.00 '
#Histogram
hist(fb$Crossing,
col = brewer.pal(8,'Spectral'),
main = 'Crossing of the Player',
xlab = 'Crossing')
#Boxplot
boxplot(fb$Crossing,
col = 'plum3',
horizontal = TRUE,
main = 'Crossing of the Player')
#________________56 Finishing
str(fb$Finishing)
'int [1:17918] 95 94 87 13 82 84 72 93 60 11 ...'
summary(fb$Finishing)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
2.00 30.00 49.00 45.58 62.00 95.00 '
#Histogram
hist(fb$Finishing,
col = brewer.pal(8,'Paired'),
main = 'Finishing of the Player',
xlab = 'Finishing')
#Boxplot
boxplot(fb$Finishing,
col = 'purple2',
horizontal = TRUE,
main = 'Finishing of the Player')
#________________57 Heading Accuracy
str(fb$HeadingAccuracy)
'int [1:17918] 70 89 62 21 55 61 55 77 91 15 ...'
summary(fb$HeadingAccuracy)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
2.00 30.00 49.00 45.58 62.00 95.00 '
#Histogram
hist(fb$HeadingAccuracy,
col = brewer.pal(8,'Dark2'),
main = 'Heading Accuracy of the Player',
xlab = 'Heading Accuracy')
#Boxplot
boxplot(fb$HeadingAccuracy,
col = 'slateblue3',
horizontal = TRUE,
main = 'Heading Accuracy of the Player')
#Checking the outliers
ha_lb = quantile(fb$HeadingAccuracy, 0.25)-1.5*IQR(fb$HeadingAccuracy)
length(fb$HeadingAccuracy[fb$HeadingAccuracy<ha_lb])
'985'
'Ignoring the outliers being less in proportion'
#________________58 Short Passing
str(fb$ShortPassing)
'int [1:17918] 90 81 84 50 92 89 93 82 78 29 ...'
summary(fb$ShortPassing)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
7.00 54.00 62.00 58.71 68.00 93.00 '
#Histogram
hist(fb$ShortPassing,
col = brewer.pal(8,'RdYlGn'),
main = 'Short Passing of the Player',
xlab = 'Short Passing')
#Boxplot
boxplot(fb$ShortPassing,
col = 'brown2',
horizontal = TRUE,
main = 'Short Passing of the Player')
#Checking the outliers
sp_lb = quantile(fb$ShortPassing, 0.25)-1.5*IQR(fb$ShortPassing)
length(fb$ShortPassing[fb$ShortPassing<sp_lb])
'1685 outliers, not removing'
sp_ub = quantile(fb$ShortPassing, 0.75)+1.5*IQR(fb$ShortPassing)
length(fb$ShortPassing[fb$ShortPassing>sp_ub])
'12 outliers'
#________________59 Volleys
str(fb$Volleys)
'int [1:17918] 86 87 84 13 82 80 76 88 66 13 ...'
summary(fb$Volleys)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
4.00 30.00 44.00 42.93 57.00 90.00 '
#Histogram
hist(fb$Volleys,
col = brewer.pal(8,'Set3'),
main = 'Volleys of the Player',
xlab = 'Volleys')
#Boxplot
boxplot(fb$Volleys,
col = 'seagreen3',
horizontal = TRUE,
main = 'Volleys of the Player')
#________________60 Dribbling
str(fb$Dribbling)
'int [1:17918] 97 88 96 18 86 95 90 87 63 12 ...'
summary(fb$Dribbling)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
4.00 49.00 61.00 55.41 68.00 97.00 '
#Histogram
hist(fb$Dribbling,
col = brewer.pal(8,'Set1'),
main = 'Dribbling of the Player',
xlab = 'Dribbling')
#Boxplot
boxplot(fb$Dribbling,
col = 'orangered1',
horizontal = TRUE,
main = 'Dribbling of the Player')
#Checking the outliers
dr_lb = quantile(fb$Dribbling, 0.25)-1.5*IQR(fb$Dribbling)
length(fb$Dribbling[fb$Dribbling<dr_lb])
'1893 outliers'
dr_ub = quantile(fb$Dribbling, 0.75)+1.5*IQR(fb$Dribbling)
length(fb$Dribbling[fb$Dribbling>dr_ub])
'1 outliers'
#________________61 Curve
str(fb$Curve)
'int [1:17918] 93 81 88 21 85 83 85 86 74 13 ...'
summary(fb$Curve)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
6.00 34.00 49.00 47.22 62.00 94.00'
#Histogram
hist(fb$Curve,
col = brewer.pal(8,'RdYlGn'),
main = 'Curve of the Player',
xlab = 'Curve')
#Boxplot
boxplot(fb$Curve,
col = 'darkgreen',
horizontal = TRUE,
main = 'Curve of the Player')
#________________62 FKAccuracy
str(fb$FKAccuracy)
'int [1:17918] 94 76 87 19 83 79 78 84 72 14 ...'
summary(fb$FKAccuracy)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
3.00 31.00 41.00 42.88 57.00 94.00 '
#Histogram
hist(fb$FKAccuracy,
col = brewer.pal(8,'Set2'),
main = 'FKAccuracy of the Player',
xlab = 'FKAccuracy')
#Boxplot
boxplot(fb$FKAccuracy,
col = 'aquamarine2',
horizontal = TRUE,
main = 'FKAccuracy of the Player')
#________________63 LongPassing
str(fb$LongPassing)
'int [1:17918] 87 77 78 51 91 83 88 64 77 26 ...'
summary(fb$LongPassing)
'Min. 1st Qu. Median Mean 3rd Qu. Max.
9.00 43.00 56.00 52.72 64.00 93.00 '
#Histogram
hist(fb$LongPassing,
col = brewer.pal(8,'PiYG'),
main = 'LongPassing of the Player',
xlab = 'LongPassing')
#Boxplot
boxplot(fb$LongPassing,
col = 'deeppink3',
horizontal = TRUE,
main = 'LongPassing of the Player')
#Checking the outliers
lp_lb = quantile(fb$LongPassing, 0.25)-1.5*IQR(fb$LongPassing)
length(fb$LongPassing[fb$LongPassing<lp_lb])
'17 outliers - Ignoring'
#________________64 BallControl
str(fb$BallControl)
'int [1:17918] 96 94 95 42 91 94 93 90 84 16 ...'
summary(fb$BallControl)
'Min. 1st Qu. Median Mean 3rd Qu. Max.
5.00 54.00 63.00 58.41 69.00 96.00'
#Histogram
hist(fb$BallControl,
col = brewer.pal(8,'Accent'),
main = 'BallControl of the Player',
xlab = 'BallControl')
#Boxplot
boxplot(fb$BallControl,
col = 'khaki1',
horizontal = TRUE,
main = 'BallControl of the Player')
#Checking the outliers
bc_lb = quantile(fb$BallControl, 0.25)-1.5*IQR(fb$BallControl)
length(fb$BallControl[fb$BallControl<bc_lb])
'1994 outliers'
bc_ub = quantile(fb$BallControl, 0.75)+1.5*IQR(fb$BallControl)
length(fb$BallControl[fb$BallControl>bc_ub])
'12 outliers'
#________________65 Acceleration
str(fb$Acceleration)
'int [1:17918] 91 89 94 57 78 94 80 86 76 43 ...'
summary(fb$Acceleration)
'Min. 1st Qu. Median Mean 3rd Qu. Max.
12.0 57.0 67.0 64.6 75.0 97.0'
#Histogram
hist(fb$Acceleration,
col = brewer.pal(8,'RdBu'),
main = 'Acceleration of the Player',
xlab = 'Acceleration')
#Boxplot
boxplot(fb$Acceleration,
col = 'firebrick3',
horizontal = TRUE,
main = 'Acceleration of the Player')
#Checking the outliers
ac_lb = quantile(fb$Acceleration, 0.25)-1.5*IQR(fb$Acceleration)
length(fb$Acceleration[fb$Acceleration<ac_lb])
'475 outliers - Ignore'
#________________66 SprintSpeed
str(fb$SprintSpeed)
'int [1:17918] 86 91 90 58 76 88 72 75 75 60 ...'
summary(fb$SprintSpeed)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
12.00 57.00 67.00 64.72 75.00 96.00'
#Histogram
hist(fb$SprintSpeed,
col = brewer.pal(8,'Paired'),
main = 'SprintSpeed of the Player',
xlab = 'SprintSpeed')
#Boxplot
boxplot(fb$SprintSpeed,
col = 'coral',
horizontal = TRUE,
main = 'SprintSpeed of the Player')
#Checking the outliers
ss_lb = quantile(fb$SprintSpeed, 0.25)-1.5*IQR(fb$SprintSpeed)
length(fb$SprintSpeed[fb$SprintSpeed<ss_lb])
'440 outliers - Ignore'
#________________67 Agility
str(fb$Agility)
'int [1:17918] 86 91 90 58 76 88 72 75 75 60 ...'
summary(fb$Agility)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
12.00 57.00 67.00 64.72 75.00 96.00'
#Histogram
hist(fb$Agility,
col = brewer.pal(8,'BrBG'),
main = 'Agility of the Player',
xlab = 'Agility')
#Boxplot
boxplot(fb$Agility,
col = 'tan3',
horizontal = TRUE,
main = 'Agility of the Player')
#Checking the outliers
ag_lb = quantile(fb$Agility, 0.25)-1.5*IQR(fb$Agility)
length(fb$Agility[fb$Agility<ag_lb])
'187 outliers - Ignore'
#________________68 Reactions
str(fb$Reactions)
'int [1:17918] 95 96 94 90 91 90 90 92 85 86 ...'
summary(fb$Reactions)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
21.00 56.00 62.00 61.82 68.00 96.00 '
#Histogram
hist(fb$Reactions,
col = brewer.pal(8,'Dark2'),
main = 'Reactions of the Player',
xlab = 'Reactions')
#Boxplot
boxplot(fb$Reactions,
col = 'slategray4',
horizontal = TRUE,
main = 'Reactions of the Player')
#Checking the outliers
rc_lb = quantile(fb$Reactions, 0.25)-1.5*IQR(fb$Reactions)
length(fb$Reactions[fb$Reactions<rc_lb])
'98 outliers'
rc_ub = quantile(fb$Reactions, 0.75)+1.5*IQR(fb$Reactions)
length(fb$Reactions[fb$Reactions>rc_ub])
'35 outliers'
#________________69 Balance
str(fb$Balance)
'int [1:17918] 95 70 84 43 77 94 94 83 66 49 ...'
summary(fb$Balance)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
16.00 56.00 66.00 63.96 74.00 96.00 '
#Histogram
hist(fb$Balance,
col = brewer.pal(8,'PRGn'),
main = 'Balance of the Player',
xlab = 'Balance')
#Boxplot
boxplot(fb$Balance,
col = 'purple3',
horizontal = TRUE,
main = 'Balance of the Player')
#Checking the outliers
bl_lb = quantile(fb$Balance, 0.25)-1.5*IQR(fb$Balance)
length(fb$Balance[fb$Balance<bl_lb])
'240 outliers'
#________________70 ShotPower
str(fb$ShotPower)
'int [1:17918] 85 95 80 31 91 82 79 86 79 22 ...'
summary(fb$ShotPower)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
2.00 45.00 59.00 55.49 68.00 95.00 '
#Histogram
hist(fb$ShotPower,
col = brewer.pal(8,'Pastel1'),
main = 'ShotPower of the Player',
xlab = 'ShotPower')
#Boxplot
boxplot(fb$ShotPower,
col = 'lightcyan2',
horizontal = TRUE,
main = 'ShotPower of the Player')
#Checking the outliers
sh_lb = quantile(fb$ShotPower, 0.25)-1.5*IQR(fb$ShotPower)
length(fb$ShotPower[fb$ShotPower<sh_lb])
'15 outliers'
#________________71 Jumping
str(fb$Jumping)
'int [1:17918] 68 95 61 67 63 56 68 69 93 76 ...'
summary(fb$Jumping)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
15.00 58.00 66.00 65.12 73.00 95.00 '
#Histogram
hist(fb$Jumping,
col = brewer.pal(8,'Spectral'),
main = 'Jumping of the Player',
xlab = 'Jumping')
#Boxplot
boxplot(fb$Jumping,
col = 'sienna3',
horizontal = TRUE,
main = 'Jumping of the Player')
#Checking the outliers
ju_lb = quantile(fb$Jumping, 0.25)-1.5*IQR(fb$Jumping)
length(fb$Jumping[fb$Jumping<ju_lb])
'381 outliers'
#________________72 Stamina
str(fb$Stamina)
'int [1:17918] 72 88 81 43 90 83 89 90 84 41 ...'
summary(fb$Stamina)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
15.00 58.00 66.00 65.12 73.00 95.00 '
#Histogram
hist(fb$Stamina,
col = brewer.pal(8,'YlGnBu'),
main = 'Stamina of the Player',
xlab = 'Stamina')
#Boxplot
boxplot(fb$Stamina,
col = 'deepskyblue2',
horizontal = TRUE,
main = 'Stamina of the Player')
#Checking the outliers
st_lb = quantile(fb$Stamina, 0.25)-1.5*IQR(fb$Stamina)
length(fb$Stamina[fb$Stamina<st_lb])
'854 outliers'
#________________73 Strength
str(fb$Strength)
'int [1:17918] 59 79 49 64 75 66 58 83 83 78 ...'
summary(fb$Strength)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
17.00 58.00 67.00 65.32 74.00 97.00 '
#Histogram
hist(fb$Strength,
col = brewer.pal(8,'YlOrRd'),
main = 'Strength of the Player',
xlab = 'Strength')
#Boxplot
boxplot(fb$Strength,
col = 'darkred',
horizontal = TRUE,
main = 'Strength of the Player')
#Checking the outliers
str_lb = quantile(fb$Strength, 0.25)-1.5*IQR(fb$Strength)
length(fb$Strength[fb$Strength<str_lb])
'253 outliers'
#________________74 LongShots
str(fb$LongShots)
'int [1:17918] 94 93 82 12 91 80 82 85 59 12 ...'
summary(fb$LongShots)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
3.00 33.00 51.00 47.13 62.00 94.00'
#Histogram
hist(fb$LongShots,
col = brewer.pal(11,'Set3'),
main = 'LongShots of the Player',
xlab = 'LongShots')
#Boxplot
boxplot(fb$LongShots,
col = 'slategray2',
horizontal = TRUE,
main = 'LongShots of the Player')
#________________75 Aggression
str(fb$Aggression)
'int [1:17918] 48 63 56 38 76 54 62 87 88 34 ...'
summary(fb$Aggression)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
11.00 44.00 59.00 55.88 69.00 95.00 '
#Histogram
hist(fb$Aggression,
col = brewer.pal(8,'Set1'),
main = 'Aggression of the Player',
xlab = 'Aggression')
#Boxplot
boxplot(fb$Aggression,
col = 'yellow2',
horizontal = TRUE,
main = 'Aggression of the Player')
#________________76 Interceptions
str(fb$Interceptions)
' int [1:17918] 22 29 36 30 61 41 83 41 90 19 ...'
summary(fb$Interceptions)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
3.00 26.00 52.00 46.69 64.00 92.00'
#Histogram
hist(fb$Interceptions,
col = brewer.pal(8,'RdGy'),
main = 'Interceptions of the Player',
xlab = 'Interceptions')
#Boxplot
boxplot(fb$Interceptions,
col = 'gray50',
horizontal = TRUE,
main = 'Interceptions of the Player')
#________________77 Positioning
str(fb$Positioning)
'int [1:17918] 94 95 89 12 87 87 79 92 60 11 ...'
summary(fb$Positioning)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
2 39 55 50 64 95'
#Histogram
hist(fb$Positioning,
col = brewer.pal(8,'PRGn'),
main = 'Positioning of the Player',
xlab = 'Positioning')
#Boxplot
boxplot(fb$Positioning,
col = 'mediumseagreen',
horizontal = TRUE,
main = 'Positioning of the Player')
#________________78 Vision
str(fb$Vision)
'int [1:17918] 94 82 87 68 94 89 92 84 63 70 ...'
summary(fb$Vision)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
10.00 44.00 55.00 53.45 64.00 94.00 '
#Histogram
hist(fb$Vision,
col = brewer.pal(8,'PiYG'),
main = 'Vision of the Player',
xlab = 'Vision')
#Boxplot
boxplot(fb$Vision,
col = 'maroon3',
horizontal = TRUE,
main = 'Vision of the Player')
#Checking the outliers
vis_lb = quantile(fb$Vision, 0.25)-1.5*IQR(fb$Vision)
length(fb$Vision[fb$Vision<vis_lb])
'60 outliers'
#________________79 Penalties
str(fb$Penalties)
'int [1:17918] 75 85 81 40 79 86 82 85 75 11 ...'
summary(fb$Penalties)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
5.00 39.00 49.00 48.54 60.00 92.00 '
#Histogram
hist(fb$Penalties,
col = brewer.pal(8,'BrBG'),
main = 'Penalties of the Player',
xlab = 'Penalties')
#Boxplot
boxplot(fb$Penalties,
col = 'darkcyan',
horizontal = TRUE,
main = 'Penalties of the Player')
#Checking the outliers
pen_lb = quantile(fb$Penalties, 0.25)-1.5*IQR(fb$Penalties)
length(fb$Penalties[fb$Penalties<pen_lb])
'2 outliers'
pen_ub = quantile(fb$Penalties, 0.75)+1.5*IQR(fb$Penalties)
length(fb$Penalties[fb$Penalties>pen_ub])
'1 outlier'
#________________80 Composure
str(fb$Composure)
'int [1:17918] 96 95 94 68 88 91 84 85 82 70 ...'
summary(fb$Composure)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
3.00 51.00 60.00 58.66 67.00 96.00 '
#Histogram
hist(fb$Composure,
col = brewer.pal(8,'Accent'),
main = 'Composure of the Player',
xlab = 'Composure')
#Boxplot
boxplot(fb$Composure,
col = 'dodgerblue3',
horizontal = TRUE,
main = 'Composure of the Player')
#Checking the outliers
com_lb = quantile(fb$Composure, 0.25)-1.5*IQR(fb$Composure)
length(fb$Composure[fb$Composure<com_lb])
'145 outliers'
com_ub = quantile(fb$Composure, 0.75)+1.5*IQR(fb$Composure)
length(fb$Composure[fb$Composure>com_ub])
'6 outlier'
#________________81 Marking
str(fb$Marking)
'int [1:17918] 33 28 27 15 68 34 60 62 87 27 ...'
summary(fb$Marking)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
3.00 30.00 53.00 47.26 64.00 94.00'
#Histogram
hist(fb$Marking,
col = brewer.pal(11,'Set3'),
main = 'Marking of the Player',
xlab = 'Marking')
#Boxplot
boxplot(fb$Marking,
col = 'coral',
horizontal = TRUE,
main = 'Marking of the Player')
#________________82 StandingTackle
str(fb$StandingTackle)
' int [1:17918] 28 31 24 21 58 27 76 45 92 12 ...'
summary(fb$StandingTackle)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
2.00 27.00 55.00 47.68 66.00 93.00 '
#Histogram
hist(fb$StandingTackle,
col = brewer.pal(11,'Dark2'),
main = 'StandingTackle of the Player',
xlab = 'StandingTackle')
#Boxplot
boxplot(fb$StandingTackle,
col = 'gold3',
horizontal = TRUE,
main = 'StandingTackle of the Player')
#________________83 SlidingTackle
str(fb$SlidingTackle)
'int [1:17918] 26 23 33 13 51 22 73 38 91 18 ...'
summary(fb$SlidingTackle)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
3.00 24.00 52.00 45.64 64.00 91.00'
#Histogram
hist(fb$SlidingTackle,
col = brewer.pal(8,'YlGnBu'),
main = 'SlidingTackle of the Player',
xlab = 'SlidingTackle')
#Boxplot
boxplot(fb$SlidingTackle,
col = 'cyan4',
horizontal = TRUE,
main = 'SlidingTackle of the Player')
#________________84 GKDiving
str(fb$GKDiving)
' int [1:17918] 6 7 9 90 15 11 13 27 11 86 ...'
summary(fb$GKDiving)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
1.00 8.00 11.00 16.59 14.00 90.00'
#Histogram
hist(fb$GKDiving,
col = brewer.pal(8,'Spectral'),
main = 'GKDiving of the Player',
xlab = 'GKDiving')
#Boxplot
boxplot(fb$GKDiving,
col = 'brown1',
horizontal = TRUE,
main = 'GKDiving of the Player')
#Checking the outliers
gk_ub = quantile(fb$GKDiving, 0.75)+1.5*IQR(fb$GKDiving)
length(fb$GKDiving[fb$GKDiving>gk_ub])
'1996 outlier'
#________________85 GKHandling
str(fb$GKHandling)
'int [1:17918] 11 11 9 85 13 12 9 25 8 92 ...'
summary(fb$GKHandling)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
1.00 8.00 11.00 16.37 14.00 92.00'
#Histogram
hist(fb$GKHandling,
col = brewer.pal(8,'Pastel2'),
main = 'GKHandling of the Player',
xlab = 'GKHandling')
#Boxplot
boxplot(fb$GKHandling,
col = 'burlywood2',
horizontal = TRUE,
main = 'GKHandling of the Player')
#Checking the outliers
gkh_ub = quantile(fb$GKHandling, 0.75)+1.5*IQR(fb$GKHandling)
length(fb$GKHandling[fb$GKHandling>gkh_ub])
'1995 outlier'
#________________86 GKKicking
str(fb$GKKicking)
'int [1:17918] 15 15 15 87 5 6 7 31 9 78 ...'
summary(fb$GKKicking)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
1.00 8.00 11.00 16.21 14.00 91.00 '
#Histogram
hist(fb$GKKicking,
col = brewer.pal(8,'Paired'),
main = 'GKKicking of the Player',
xlab = 'GKKicking')
#Boxplot
boxplot(fb$GKKicking,
col = 'dodgerblue2',
horizontal = TRUE,
main = 'GKKicking of the Player')
#Checking the outliers
gkk_ub = quantile(fb$GKKicking, 0.75)+1.5*IQR(fb$GKKicking)
length(fb$GKKicking[fb$GKKicking>gkk_ub])
'2002 outliers'
#________________87 GKPositioning
str(fb$GKPositioning)
' int [1:17918] 14 14 15 88 10 8 14 33 7 88 ...'
summary(fb$GKPositioning)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
1.00 8.00 11.00 16.36 14.00 90.00 '
#Histogram
hist(fb$GKPositioning,
col = brewer.pal(8,'RdYlGn'),
main = 'GKPositioning of the Player',
xlab = 'GKPositioning')
#Boxplot
boxplot(fb$GKPositioning,
col = 'coral2',
horizontal = TRUE,
main = 'GKPositioning of the Player')
#Checking the outliers
gkp_ub = quantile(fb$GKPositioning, 0.75)+1.5*IQR(fb$GKPositioning)
length(fb$GKPositioning[fb$GKPositioning>gkp_ub])
'1998 Outliers'
#________________88 GKReflexes
str(fb$GKReflexes)
'int [1:17918] 8 11 11 94 13 8 9 37 11 89 ...'
summary(fb$GKReflexes)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
1.00 8.00 11.00 16.68 14.00 94.00 '
#Histogram
hist(fb$GKReflexes,
col = brewer.pal(8,'PuOr'),
main = 'GKReflexes of the Player',
xlab = 'GKReflexes')
#Boxplot
boxplot(fb$GKReflexes,
col = 'gold3',
horizontal = TRUE,
main = 'GKReflexes of the Player')
#Checking the outliers
gkr_ub = quantile(fb$GKReflexes, 0.75)+1.5*IQR(fb$GKReflexes)
length(fb$GKReflexes[fb$GKReflexes>gkr_ub])
'1997 Outliers'
#________________89 Release.Clause
str(fb$Release.Clause)
' Factor w/ 1245 levels "","â,¬1.1M","â,¬1.2M",..: 295 84 296 106 234 ...'
table(fb$Release.Clause)
#Replace or substitute â,¬ with nothing
fb$Release.Clause = sub("â,¬","", fb$Release.Clause)
table(fb$Release.Clause)
#integer. A penalty to be applied when deciding to print numeric values in
#fixed or exponential notation. Positive values bias towards fixed and negative
#towards scientific notation: fixed notation will be preferred unless it is
#more than scipen digits wider.
options(scipen = 15)
#Removing M & K and keeping all the values to 1000's
'The grep R function searches for matches of certain character pattern in a
vector of character strings and returns the indices that yielded a match.'
fb$Release.Clause[grep('K$',fb$Release.Clause)] = as.numeric(sub('K',"",fb$Release.Clause[grep('K$',fb$Release.Clause)]))
fb$Release.Clause[grep('M$',fb$Release.Clause)] = (as.numeric(sub('M',"",fb$Release.Clause[grep('M$',fb$Release.Clause)]))*1000000)/1000
fb$Release.Clause = as.numeric(fb$Release.Clause)
str(fb$Release.Clause)
'num [1:17918] 226500 127100 228100 138600 196400 ...'
summary(fb$Release.Clause)
' Min. 1st Qu. Median Mean 3rd Qu. Max. NA's'
13 525 1100 4585 3500 228100 1275'
#Removing the Na's
fb = na.omit(fb)
dim(fb)
'16643 89
17918-16643 = 1275 obs'
summary(fb$Release.Clause)
' Min. 1st Qu. Median Mean 3rd Qu. Max.
13 525 1100 4585 3500 228100'
#Histogram
hist(fb$Release.Clause,
col = brewer.pal(8,'Set2'),
main = 'Release.Clause of the Player',
xlab = 'Release.Clause')
#Boxplot
boxplot(fb$Release.Clause,
col = 'lightcoral',
horizontal = TRUE,
main = 'Release.Clause of the Player')
#Checking the outliers
rc_ub = quantile(fb$Release.Clause, 0.75)+1.5*IQR(fb$Release.Clause)
length(fb$Release.Clause[fb$Release.Clause>rc_ub])
'2403 Outliers'
for (i in seq(rc_ub,max(fb$Release.Clause),25000)){
j = length(fb$Release.Clause[fb$Release.Clause > i])
print(paste('No of outliers with ub as',round(i,0), 'is',j))
}
'[1] "No of outliers with ub as 7962 is 2403"
[1] "No of outliers with ub as 32962 is 370"
[1] "No of outliers with ub as 57962 is 138"
[1] "No of outliers with ub as 82962 is 61"
[1] "No of outliers with ub as 107962 is 36"
[1] "No of outliers with ub as 132962 is 17"
[1] "No of outliers with ub as 157962 is 8"
[1] "No of outliers with ub as 182962 is 3"
[1] "No of outliers with ub as 207962 is 2"'
#no action taken on outliers
head(fb[order(fb$Release.Clause, decreasing = TRUE),][c(3,89)],10)
tail(fb[order(fb$Release.Clause, decreasing = TRUE),][c(3,89)],10)
#===============================================================================
"The file has lots of information which we do not need for clustering.
Only player's attributes such as speed, strength, passing, finish, heading etc
will be taken."
fb1 = fb[,c(3,4,8,9,12,14,16:18,27,28,55:89)]
#For easy calculation selected mainly numerical variables
names(fb) # 46
dim(fb1)
'16643 46'
write.csv(fb1, "C:/Users/Dr Vinod/Desktop/football_ kmean/fb1.csv")
str(fb1)
#Converting the Names column as row names
row.names(fb1) = fb1$Name
'There are duplicate rownames'
#Removing the duplicate names
duplicated(fb1$Name) # identifies which are duplicate
fb2 = fb1[!duplicated(fb1$Name),] # Selecting the rows which are not duplicated
row.names(fb2) = fb2$Name #Converting the names as row names/ index
fb2 = subset(fb2, select = -c(Name)) #Removing the name column
head(fb2,3)
dim(fb2)
'15773 45'
'16643-15773 = 870 observations removed'
write.csv(fb2, "C:/Users/Dr Vinod/Desktop/football_ kmean/fb2.csv")
#It is taking lot of time to execute on whole data so selecting only first
#5000 observations
fb_5000 = sample_n(fb2,5000)
str(fb_5000)
#RCB, RB should not be there
#Scaling the data
fb_5000_scale = scale(fb_5000)
#================Model 1 - Kmeans using WSS method==============================
#Plotting to get the optimal number of clusters, kmeans by default checks upto 10
library(factoextra)
fviz_nbclust(fb_5000_scale, kmeans, method = "wss")
fviz_nbclust(fb_5000_scale, kmeans, method = "wss")+
geom_vline(xintercept = 3,linetype=5,col='red')
# Compute k-means with k = 3
set.seed(123)
' we specify nstart = 25. This means that R will try 25 different random
starting assignments and then select the best results corresponding to the one
with the lowest within cluster variation'
km_clusters <- kmeans(fb_5000_scale, 3, nstart = 25)
# Print the results
print(km_clusters)
km_clusters$size
'K-means clustering with 3 clusters of sizes 565, 2414, 2021'
km_clusters$betweenss/km_clusters$totss
'0.4897724'
#Adding the cluster index as a variable to the data frame
fb_5000 = cbind(fb_5000, cluster = km_clusters$cluster)
head(fb_5000)
#Checking the means of values in the fb_5000
aggregate(fb_5000, by=list(cluster=km_clusters$cluster), mean)
#=================Don't run the following code==================================
#Visualizing the clusters
fviz_cluster(km.res, data = fb_5000,
palette = brewer.pal(5,'Dark2'),
ellipse.type = "euclid", # Concentration ellipse
star.plot = TRUE, # Add segments from centroids to items
repel = TRUE, # Avoid label overplotting (slow)
ggtheme = theme_minimal())
#===============================================================================
#==========Kmeans - cluster visual output using 100 data points=================
#It is taking lot of time to execute on whole data so selecting only first
#100 observations
fb_100 = sample_n(fb2,100)
#Scaling
fb_100_scale = scale(fb_100)
#Plotting to get the optimal number of clusters, kmeans by default checks upto 10
fviz_nbclust(fb_100_scale, kmeans, method = "wss")+
geom_vline(xintercept = 3,linetype=2, col='red')
# Compute k-means with k = 4
set.seed(123)
' we specify nstart = 25. This means that R will try 25 different random
starting assignments and then select the best results corresponding to the one
with the lowest within cluster variation'
km.res_100 <- kmeans(fb_100_scale, 3, nstart = 25)
# Print the results
print(km.res_100)
km.res_100$size
'K-means clustering with 3 clusters of sizes 45, 12, 43'
km.res_100$betweenss/km.res_100$totss
'0.5188112'
#Adding the cluster index as a variable to the data frame
fb_100 = cbind(fb_100, cluster = km.res_100$cluster)
head(fb_100)
#Visualizing the clusters
fviz_cluster(km.res_100, data = fb_100,
palette = brewer.pal(3,'Set1'),
ellipse.type = "euclid", # Concentration ellipse
star.plot = TRUE, # Add segments from centroids to items
repel = TRUE, # Avoid label overplotting (slow)
ggtheme = theme_minimal())
#===============================================================================
#================Model 2 - Kmeans using Silhouette method=======================
#Plotting to get the optimal number of clusters, kmeans by default checks upto 10
fviz_nbclust(fb_5000_scale, kmeans, method = "silhouette")+
labs(subtitle = "Silhouette method")
# K-means clustering
km.res1 <- eclust(fb_5000_scale, "kmeans", k = 2, nstart = 25, graph = FALSE)
#Silhouette coefficients
km.res1$silinfo
# Silhouette plot
fviz_silhouette(km.res1, palette = "jco",
ggtheme = theme_classic())
#Visualize k-means clusters
fviz_cluster(km.res1, geom = "point", ellipse.type = "norm",
palette = "jco", ggtheme = theme_minimal())
#===============================================================================
#>>>>>>>>>>>>>>>>>> NOW ON ENTIRE DATA
#Scaling the data
fb1_scale = scale(fb2)
#Plotting to get the optimal number of clusters, kmeans by default checks upto 10
#Method - Silhouette
fviz_nbclust(fb1_scale, kmeans, method = "silhouette")+
labs(subtitle = "Silhouette method")
#Method - WSS - Taking long time=======WITHOUT BELOW CODE , WE CAN GO FORWARD=========================================
fviz_nbclust(fb1_scale, kmeans, method = "wss")+
labs(subtitle = "WSS method")
#===============================================================================
# K-means clustering
km.res_fb <- eclust(fb1_scale, "kmeans", k = 2, nstart = 25, graph = FALSE)
#Silhouette coefficients
km.res_fb$silinfo
#Size of each cluster
km.res_fb$size
'K-means clustering with 2 clusters of sizes 13976 1797'
km.res_fb$betweenss/km.res_fb$totss
'0.3676574'
# Silhouette plot
fviz_silhouette(km.res_fb, palette = "jco",
ggtheme = theme_classic())
#Visualize k-means clusters
fviz_cluster(km.res_fb, geom = "point", ellipse.type = "norm",
palette = "jco", ggtheme = theme_minimal())
#Adding the cluster index as a variable to the data frame
fb3 = cbind(fb2, cluster = km.res_fb$cluster)
head(fb3)
write.csv(fb3, "C:/Users/Dr Vinod/Desktop/football_ kmean/fb3.csv")
#Checking the means of values in the fb_5000
c3 = aggregate(fb3, by=list(cluster=km.res_fb$cluster), mean)
write.csv(c3, "C:/Users/Dr Vinod/Desktop/football_ kmean/c3.csv")
|
095406aaffff99052dfc7b4e03038fdc02332cf1
|
06aa50fc00e7c7ebbdec19450f531222a23aa0d7
|
/man/get_pums_race.Rd
|
d9cec89e70a2f93b609a1de1c408e27a6c1e3db3
|
[
"MIT"
] |
permissive
|
djliden/youthhealthr
|
5c4074d90956a1bf287c67c26124e71f3a9a427f
|
a1c2958a5d46d77b81fb1c01b81e53efa8941ee9
|
refs/heads/master
| 2023-03-17T06:05:39.024302
| 2021-03-16T22:15:38
| 2021-03-16T22:15:38
| 305,777,929
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 561
|
rd
|
get_pums_race.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/youth_demo.R
\name{get_pums_race}
\alias{get_pums_race}
\title{}
\usage{
get_pums_race(
age_breaks = c(0, 14, 24, 99),
age_labels = c("0-13", "14-24", "25+")
)
}
\arguments{
\item{age_breaks}{min age, max age, and any breaks in between (numerical)}
\item{age_labels}{vector of strings showing age ranges e.g. c("14-24", "25+")}
}
\value{
tibble
}
\description{
race by age group
}
\details{
numbers/percentages of Nevadans by race and age group.
}
\author{
Daniel James Liden
}
|
410766b7e1d23bdc384585fd98d7a721aca8b9bf
|
0ccdd0abbf3d39f1c5e971e26a60f53719ba6fb4
|
/inferential_stats/inference/SamplingDist.R
|
8d32b4385d80faf6c8f7037c34fffc66df73bed9
|
[] |
no_license
|
pickle-donut/RScripts
|
9a87bd616ea3cd89a94c98e8438c3bc80432392b
|
2a60daf6cfbeaa194f696daf699b387544f8f163
|
refs/heads/master
| 2022-11-09T00:01:57.999428
| 2020-06-15T23:27:00
| 2020-06-15T23:27:00
| 270,508,904
| 0
| 0
| null | 2020-06-15T23:27:01
| 2020-06-08T03:06:02
|
R
|
UTF-8
|
R
| false
| false
| 5,110
|
r
|
SamplingDist.R
|
# install.packages("moments")
# Samples and Sampling Distributions
#
library("moments")
#
# First sample
X1 <- c(rnorm(50, 10, 5))
#
print(paste("The sample mean is: ", round(mean(X1),4)))
print(paste("The sample standard deviation is: ", round(sd(X1),4)))
print(paste("The sample median is: ", round(median(X1),4)))
print(paste("The sample skewness is: ", round(skewness(X1),4)))
print(paste("The sample kurtosis is: ", round(kurtosis(X1),4)))
h <- hist(X1, main="Histogram of Weight Loss in a sample of size 50",
xlab="Weight Loss in pounds",
border="blue",
col="green",
xlim=c(-5,30),
las=1,
breaks=14)
#
# Second Sample
X2 <- c(rnorm(50, 10, 5))
#
print(paste("The sample mean is: ", round(mean(X2),4)))
print(paste("The sample standard deviation is: ", round(sd(X2),4)))
print(paste("The sample median is: ", round(median(X2),4)))
print(paste("The sample skewness is: ", round(skewness(X2),4)))
print(paste("The sample kurtosis is: ", round(kurtosis(X2),4)))
hist(X2, main="Histogram of Weight Loss in a sample of size 50",
xlab="Weight Loss in pounds",
border="blue",
col="green",
xlim=c(-5,30),
las=1,
breaks=14)
# W = X1 + X2
W = X1 + X2
print(paste("The sample mean of X1 + X2 is: ", round(mean(W),4)))
print(paste("The sample standard deviation of X1 + X2 is: ", round(sd(W),4)))
hist(W, main="Histogram of X1 + X2 in a sample of size 50",
xlab="X1 + X2",
border="blue",
col="green",
xlim=c(-5,30),
las=1,
breaks=14)
#
# X-bar = (X1 + X2)/2 = W/2
x_bar = (X1 + X2)/2
print(paste("The sample mean of x_bar is: ", round(mean(x_bar),4)))
print(paste("The sample standard deviation of x_bar is: ", round(sd(x_bar),4)))
hist(W, main="Histogram of x-bar in a sample of size 50",
xlab="x_bar",
border="blue",
col="green",
xlim=c(-5,30),
las=1,
breaks=14)
#
# Generating an empirical sampling distribution of sample mean - X-bar
# Define the x_bar vector
num_samp = 1000
samp_size = 50
x_bar <- vector("numeric", num_samp)
#
# Each x-bar is the mean of a random sample of size 50 drawn from a Normal(10, 5)
#
# We are generating 1000 X-bars (from 1000 samples) and storing them in the x-bar vector
#
for (i in 1:num_samp) {
x_bar[i] = mean(rnorm(samp_size, 10, 5))
}
#
# Calculate the mean and standard deviation (called standard error) of x-bar
# from the empirical sampling distribution formed by 1000 samples of size 50
#
Exp_x_bar <- mean(x_bar)
stderr <- sd(x_bar)
print(paste("The Expected value of X-bar is: ", round(Exp_x_bar,4)))
print(paste("The standard error or standard deviation of X-bar is: ", round(stderr,4),
" versus predicted std error ",round(5/sqrt(samp_size),4)))
hist(x_bar,
main=paste("Histogram of Sampling Distribution of X-bar from ",num_samp,
" samples of size ", samp_size, ""),
xlab="X-bar",
border="blue",
col="green",
xlim=c(5, 15),
las=1,
breaks=20)
#
exp_samp <- rexp(100, 1)
mean(exp_samp)
hist(exp_samp,
main=paste("Histogram of a sample from a right-skewed Exponential Population" ))
#
unif_samp <- runif(100, 0, 2)
mean(unif_samp)
hist(unif_samp,
main=paste("Histogram of a sample from a symmetric Uniform Population" ))
#
beta_samp <- rbeta(100, 50, 1, ncp = 0)
mean(beta_samp)
hist(beta_samp,
main=paste("Histogram of a sample from a left-skewed Beta Population" ))
#
# Generating an empirical sampling distribution of sample mean - X-bar
# Define the x_bar vector
num_samp = 1000
samp_size = 100
x_bar <- vector("numeric", num_samp)
#
# Each x-bar is the mean of a random sample of size 100 drawn from a 3 different distrbutions
# exponential (right-skewed), uniform (symmetric), beta (left-skewed)
# We are generating 1000 X-bars (from 1000 samples) and storing them in the x-bar vector
#
for (i in 1:num_samp) {
# Uncomment the distribution to be used in the next three lines; leave the other two commented
# x_bar[i] = mean(rexp(samp_size, 1))
# x_bar[i] = mean(runif(samp_size, 0, 2))
x_bar[i] = mean(rbeta(samp_size, 50, 1, ncp = 0))
}
#
# Calculate the mean and standard deviation (called standard error) of x-bar
# from the empirical sampling distribution formed by 1000 samples of size 50
#
Expec_x_bar <- mean(x_bar)
stderr <- sd(x_bar)
print(paste("The Expected value of X-bar is: ", round(Expec_x_bar,4)))
print(paste("The standard error or standard deviation of X-bar is: ", round(stderr,4)))
hist(x_bar,
main=paste("Histogram of Sampling Distribution of X-bar from ",num_samp,
" samples of size ", samp_size, ""),
xlab="X-bar",
border="blue",
col="green",
# xlim=c(5, 15),
las=1,
breaks=20)
# We also collect other quantities such as skewness and kurtosis of the sampling distribution
#
skewness(x_bar)
kurtosis(x_bar)
#
#
qqnorm(x_bar)
qqline(x_bar, col = 2)
d <- density(x_bar)
plot(d)
x <- seq(7, 13, by=0.1)
y <- dnorm(x,Exp_x_bar,stderr)
lines(x, y, col = "red")
y1 <- dnorm(x,10.00,5/sqrt(samp_size))
lines(x, y1, col = "blue")
|
d32c06223e450e45402cd738815ce1bbded7f9b9
|
da240952753caf3a3b79e777b1bfe24140aaba86
|
/mhl1_inv/summarise_pi_inv.R
|
4757e00dde1adad26332f95bcb240b7c078f3a03
|
[] |
no_license
|
cooplab/hilo
|
ea5ea9d472ee7cf2cab17aa83e8f568c54fce34c
|
64483aaf0abd40d25846969b8732e07abf9b7667
|
refs/heads/master
| 2023-08-18T13:03:07.458675
| 2021-09-20T20:12:10
| 2021-09-20T20:12:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,028
|
r
|
summarise_pi_inv.R
|
#!/usr/bin/env Rscript
library(dplyr)
library(xtable)
# this script summarises pi within zea groups
# for the maize and mexicana allele clusters
# across the putative inversion
# load variables from Snakefile
colors_file = snakemake@params[["colors"]]
# colors_file = "colors.R"
PREFIX = snakemake@params[["PREFIX"]]
# PREFIX = "HILO_MAIZE55_PARV50"
K = snakemake@params[["K"]]
# K = 3
Ne = snakemake@params[["Ne"]]
# Ne = 10000
YESNO = snakemake@params[["YESNO"]]
# YESNO = "yes"
tex_out = snakemake@output[["tex"]]
# tex_out = paste0("../hilo_manuscript/tables/", PREFIX, "_K", K, "_Ne", Ne, "_", YESNO, "Boot_summary_pi_inv_mhl1.tex")
tbl_out = snakemake@output[["tbl"]]
# tbl_out = paste0("mhl1_inv/results/", PREFIX, "/K", K, "/Ne", Ne, "_", YESNO, "Boot/summary_pi_inv_mhl1.txt")
# load data
source(colors_file)
ancestries = c("maize", "maize", "mexicana", "mexicana")
zea = c("parv", "maize", "maize", "mexicana")
names = c("parviglumis", "maize", "maize", "mexicana")
groups = paste(names, "within", ancestries, "inversion cluster")
d = do.call(bind_rows, lapply(1:4, function(i)
read.table(paste0("mhl1_inv/results/", PREFIX, "/K", K, "/Ne", Ne, "_", YESNO, "Boot/",
ancestries[i], "_cluster/", zea[i], ".pi.inv.pestPG"),
header = F, skip = 1, sep = "\t") %>%
data.table::setnames(c("region", "Chr", "WinCenter", "tW", "tP", "tF", "tH", "tL", "Tajima", "fuf", "fud", "fayh", "zeng", "nSites")) %>%
dplyr::select("tW", "tP", "nSites") %>%
dplyr::mutate(wattersons_theta = tW/nSites,
pairwise_theta = tP/nSites,
inv_cluster = ancestries[i],
zea = zea[i],
sample = groups[i]))) %>%
dplyr::select(., sample, pairwise_theta, wattersons_theta) %>%
arrange(-pairwise_theta)
write.table(d, file = tbl_out, col.names = T, row.names = F, quote = F, sep = "\t")
print(xtable(d,
type = "latex",
latex.environments = NULL,
digits = 3),
include.rownames = F,
file = tex_out)
|
ab64967ce84fd798a6159f524139ebcd76da5cd8
|
3d3f11d6002a505483003a59e0a94264ddd86757
|
/R/nnls.R
|
d744fa749b9740b3b90f35eae95ef1bc5c10e975
|
[] |
no_license
|
cran/lqa
|
75a867677d54e2e1b84dbe90822628adf42e6238
|
fb0b5a6a7e8ce9028ac5f48adee8ffa8b3434d0b
|
refs/heads/master
| 2020-06-03T14:21:37.296678
| 2010-07-12T00:00:00
| 2010-07-12T00:00:00
| 17,697,200
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 96
|
r
|
nnls.R
|
nnls <- function (cvec, nnls.y, nnls.x)
{
sum ((drop (nnls.y - nnls.x %*% cvec))^2)
}
|
93eb06c771cc14c5764afefcc180db0f2ffc8b67
|
237bcbdc6b09c57b251191471359eeefb8014410
|
/Analyse_project_MERGE_AML_ANALYSIS_2015_F.r
|
18e77f59f00e36ad95a97127e444e318ab871af8
|
[] |
no_license
|
achalneupane/rcodes
|
d2055b03ca70fcd687440e6262037507407ec7a5
|
98cbc1b65d85bbb6913eeffad62ad15ab9d2451a
|
refs/heads/master
| 2022-10-02T20:35:18.444003
| 2022-09-09T20:53:03
| 2022-09-09T20:53:03
| 106,714,514
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 385,255
|
r
|
Analyse_project_MERGE_AML_ANALYSIS_2015_F.r
|
## /media/UQCCG/Sequencing/Data/Sequence_Genotypes/2015/2015-03-16_AllAMLandLung/Analysis/Vcf_Merge.GQ-20.Min.ALT-14_2015-03-16_AllAMLandLung.ARE_RELATED.Fri_Jul_03_2015.genetic_QC.txt
#### red in per patiant shared regions....exampls
data<-cbind(c("chr1","chr1","chr1"),c(10,50,50),c(30,70,100))
data2<-cbind(c("chr1","chr1","chr1"),c(10,60,10),c(30,70,40))
Views on a 249250621-length Rle subject
views:
start end width
[1] 10 30 21 [10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10]
[2] 60 70 11 [10 10 10 10 10 10 10 10 10 10 10]
overlapping<-cbind(start(x[["chr1"]]),end(x[["chr1"]]))
colnames(overlapping)<-c("starts","ends")
overlapping
write.table(sample.sheet.full,file="sampe.sheet.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
cyto<-read.delim("/media/TRI-T-DRIVE-tpleo/uqdi/Core_Services/UQCCG/Sequencing/CompleteGenomics/MergedAML_Cytogenetics_etc.csv",header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE)
cg.done<-read.delim("/media/UQCCG/Sequencing/CompleteGenomics/917_Data_Delivery_091214.csv",header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE)
cyto[1:5,]
cg.done[1:5,]
posns<-match(cg.done[,"Cust..Sample.ID"],cyto[,"ID"])
posns
missing<-is.na(posns)
dim(cg.done)
sum(!missing)
cg.done[missing,"Cust..Sample.ID"]
cbind(cg.done[missing,"Cust..Sample.ID"],cg.done[missing,"Cust..Sample.ID"])
cyto.have<-
cyto[posns[!missing],]
compare<-cbind(cg.done[!missing,],cyto[posns[!missing],])
getwd()
setwd("/media/UQCCG/Sequencing/CompleteGenomics")
write.table(compare,file="cg.cyto2.csv",col.names=TRUE,row.names=FALSE,sep="\t")
#######################################
tcga<-read.delim("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014/2014-11-04_AML_TCGA_Replication/Analysis/Phenotypes_TCGA.csv",header=T,sep="\t",fill=TRUE,as.is=TRUE,stringsAsFactors=FALSE)
tcga[1:5,1:11]
colnames(tcga)
tail(sample.sheet.full)
cases<-sample.sheet.full[sample.sheet.full$AffectionStatus==2,"ParticipantCode"]
cases.short<-unlist(lapply(strsplit(cases,split="-"),function(x) x[3]))
cases<-cbind(cases,cases.short)
cases[1:5,]
posns<-match(cases[,"cases.short"],tcga[,"TCGA.Patient.ID"])
posns
missing<-is.na(posns)
sum(missing)
cases[missing,] # TCGA-AB-2852-03A-01W-0726-08
extra<-tcga[posns,c("TCGA.Patient.ID","Sex","Race","Age","FAB","X.BM.Blast")]
dim(extra)
dim(caeses)
cases[1:5,]
extra[1:5,]
cases<-cbind(cases,extra)
cases[is.na(cases[,"TCGA.Patient.ID"]),]
cases[is.na(cases[,"TCGA.Patient.ID"]),"TCGA.Patient.ID"]<-2852
cases[is.na(cases[,"TCGA.Patient.ID"]),"Race"]<-"unknown"
tail(sample.sheet.full)
posns<-match(sample.sheet.full[,"ParticipantCode"],cases[,"cases"])
posns
missing<-is.na(posns)
sum(missing)
add.extra<-extra[posns,]
sample.sheet.full<-cbind(sample.sheet.full,add.extra)
a.sheet<-cbind("HIP","ALL",all.possible.samples,0,0,0,1)
a.sheet[1:5,]
getwd()
write.table(a.sheet,file="sample_sheet.csv",col.names=TRUE,row.names=FALSE,sep=",")
################# build the sample sheet
################# build the sample sheet
################# build the sample sheet
################# build the sample sheet
################# build the sample sheet
################# build the sample sheet
the.sample.sheet<-"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2015/2015-03-16_AllAMLandLung/Analysis/sample_sheet.for.analysis.txt"
sample.sheet.full<-read.delim(the.sample.sheet,header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE)
all.possible.samples<-unique(sample.sheet.full[,"ParticipantCode"])
#the.sample.sheet<-"/media/UQCCG/Sequencing/CompleteGenomics/Chort_descriptions/Sequencing comparisons-ver 4.csv"
sample.sheet.new<-read.delim("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2015/2015-03-16_AllAMLandLung/BAM/2015-03-16_AllAMLandLung.chr2.ALL.Sample_Sheet_NEW.csv",header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE)
colnames(sample.sheet.new)
#"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013/2013-10-27_AML_with_AOGCControl_NoFailedLane/BAM/TGCM-AML-combine_SampleSheet.csv"
qc<-read.delim("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2015/2015-03-16_AllAMLandLung/Analysis/Vcf_Merge.GQ-20.Min.ALT-14_2015-03-16_AllAMLandLung.ARE_RELATED.Fri_Jul_03_2015.genetic_QC.txt",header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE)
the.sample.sheet<-"/media/UQCCG/Sequencing/CompleteGenomics/Chort_descriptions/Sequencing comparisons-ver 6.csv"
sample.sheet.full<-read.delim(the.sample.sheet,header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE)
#sample.sheet.full<-read.delim(the.sample.sheet,header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE)
colnames(sample.sheet.full)[colnames(sample.sheet.full)=="Sequence.Number"]<-"ParticipantCode"
sample.sheet.full[1:5,1:10]
sample.sheet.new[1:10,]
colnames(sample.sheet.full)
dim(sample.sheet.full)
sample.sheet.full[1:5,1:10]
qc[1:5,]
all.possible.samples[grep("PGB",all.possible.samples)]
#"AMLM12030PGB" "AMLM12PAH030PGB"
all.possible.samples[grep("BJD",all.possible.samples)]
all.possible.samples[grep("1409101",all.possible.samples)]
## > all.possible.samples[grep("PGB",all.possible.samples)]
## [1] "AMLM12030PGB" "AMLM12PAH030PGB" ## PAH is crap coverage
## > all.possible.samples[grep("BJD",all.possible.samples)]
## [1] "AMLM12038BJD" "AMLM12PAH038BJD" ## PAH is crap coverage place in contaminated
## ParticipantCode:AMAS-25.3-Diagnostic;LibraryPlateLocation:NLS-20140812-EDAM/SJ/JH-AMAS-LeoPharma-D11;OldPCode:1420901;
## AMAS-18.3- is 25.3
## 13468(SDDS) AMLM12035D-T are sibs
## AMLM12035D-T G11F
## 13380 AMAS-18.3-DiagnosticMouseBlood
## 13474 AMLM12035D-T
## AMAS-18.3-DiagnosticMouseBlood AMAS-25.3-Diagnostic
## AMAS-18.3-DiagnosticMouseSpleenLiver AMAS-25.3-Diagnostic
## 13380 AMAS-18.3-DiagnosticMouseSpleenLiver
## AMAS-18.3-DiagnosticMouseBlood AMLM12033MD
## 13381 AMAS-18.3-DiagnosticMouseSpleenLiver
## 37.2 AMAS-18.3-DiagnosticMouseBlood
## AMAS-18.3-DiagnosticMouseSpleenLiver AMLM12004VDP
## 13379 AMAS-18.3-DiagnosticMouseSpleenLiver
### chk 25.3 is not rea;ted to 18.3 in any way
# in RSH <- AML
## RG ID:H1178ADXX-1-05 PL:illumina LB:H1178ADXX-1-05 DS:FCID:H1178ADXX;Lane:1;SampleID:H1178ADXX-1-05;SampleRef:Human;IndexSeq:GGACTCCT;Description:NxtD;NxtXR;Control:N;Recipe:101+9+101;Operator:SharonSong;JessicaHarris;SampleProject:RSGB;ParticipantCode:AMLM12RMH026J-N;LibraryPlateLocation:NLA-20130927-RSGB-SS-E04; SM:AMLM12RMH026J-N
## @RG ID:H1178ADXX-1-11 PL:illumina LB:H1178ADXX-1-11 DS:FCID:H1178ADXX;Lane:1;SampleID:H1178ADXX-1-11;SampleRef:Human;IndexSeq:AAGAGGCA;Description:NxtD;NxtXR;Control:N;Recipe:101+9+101;Operator:SharonSong;JessicaHarris;SampleProject:RSGB;ParticipantCode:AMLM12RMH026J-N;LibraryPlateLocation:NLA-20130927-RSGB-SS-E05; SM:AMLM12RMH026J-N
## @RG ID:H1178ADXX-2-05 PL:illumina LB:H1178ADXX-2-05 DS:FCID:H1178ADXX;Lane:2;SampleID:H1178ADXX-2-05;SampleRef:Human;IndexSeq:GGACTCCT;Description:NxtD;NxtXR;Control:N;Recipe:101+9+101;Operator:SharonSong;JessicaHarris;SampleProject:RSGB;ParticipantCode:AMLM12RMH026J-N;LibraryPlateLocation:NLA-20130927-RSGB-SS-E04; SM:AMLM12RMH026J-N
## @RG ID:H1178ADXX-2-11 PL:illumina LB:H1178ADXX-2-11 DS:FCID:H1178ADXX;Lane:2;SampleID:H1178ADXX-2-11;SampleRef:Human;IndexSeq:AAGAGGCA;Description:NxtD;NxtXR;Control:N;Recipe:101+9+101;Operator:SharonSong;JessicaHarris;SampleProject:RSGB;ParticipantCode:AMLM12RMH026J-N;LibraryPlateLocation:NLA-20130927-RSGB-SS-E05; SM:AMLM12RMH026J-
posns<-match(sample.sheet.new[,"ParticipantCode"], sample.sheet.full[,"ParticipantCode"])
missing<-is.na(posns)
sum(missing)
SampleProject<-rep(NA,times=dim(sample.sheet.new)[1])
sample.sheet<-cbind(sample.sheet.new,sample.sheet.full[posns,])
table(sample.sheet.new[missing,"AffectionStatus"])
a.case<-sample.sheet.new[,"AffectionStatus"]==2
a.control<-sample.sheet.new[,"AffectionStatus"]==1
a.unknown<-sample.sheet.new[,"AffectionStatus"]==9
#sample.sheet.new[a.case,]
sample.sheet.new[missing & a.case,"ParticipantCode"]
# "AMLM12PAH030PGB" "AMLM12PAH038BJD"
sample.sheet.new[missing & a.control,"ParticipantCode"]
sample.sheet.new[missing & a.unknown,"ParticipantCode"]
# "0413E1210023" "0413E1211353" "0413E14100212" "0413E14113512" "0413E2100212" "0413E2113512" these are lung samples
asians<-c("AMLM12PAH030PGB","AMLM12PAH038BJD","NSGC-23.2","NSGC-23.3","NSGC-23.4","13380","13381","13455","13456","13457","13379","13474","37.2","37.3","63","74","83","84","9","99","AMAS-25.3-Diagnostic","AMLM12034H-F","AMLM12038BJD","AMLM12004VDP","AMLM12005R-G","AMLM12033MD","AMAS-18.3-Diagnostic","AMAS-18.3-DiagnosticMouseBlood","AMAS-18.3-DiagnosticMouseSpleenLiver","MODY_250.3","SKDP-200.3083","SKDP-200.3065","SKDP-200.3024","SKDP-200.3025","SKDP-200.3023","SKDP-200.7095","SKDP-200.3045","SKDP-200.305","SKDP-200.3084","SKDP-200.7036")
sample.sheet[1:5,1:10]
sample.sheet[a.case,"SampleProject"]<-"AML"
sample.sheet[a.control,"SampleProject"]<-"Control"
sample.sheet[a.unknown,"SampleProject"]<-"Control"
sample.sheet[a.case & (sample.sheet[,"ParticipantCode"] %in% asians) ,"SampleProject"]<-"Asian-AML"
sample.sheet[a.control & (sample.sheet[,"ParticipantCode"] %in% asians) ,"SampleProject"]<-"Asian-Control"
sum(is.na(sample.sheet[,"SampleProject"]))
table(sample.sheet[,"SampleProject"])
## AML Asian-AML Asian-Control Control
## 175 17 23 363
table(sample.sheet[,"Sequence"])
## 1 Blood Culture 1 Spleen+Liver Culture 2m POST ALLOGRAFT 8m POST ALLOGRAFT Diagnosis Pre-AML Relapse Relapse (Chloroma-Skin-biopsy)
## 1 1 1 1 171 1 8 1
## Remission
## 5
not.diagnosis<-sample.sheet[,"Sequence"]!="Diagnosis" | is.na(sample.sheet[,"Sequence"])
sum(not.diagnosis)
sample.sheet[(not.diagnosis & a.case),c("SampleProject","ParticipantCode","Sequence")]
sample.sheet[(not.diagnosis & a.case),"SampleProject"]<-paste(sample.sheet[(not.diagnosis & a.case),"SampleProject"],"NotDiagnosis",sep="-")
sample.sheet[(not.diagnosis & a.case),c("SampleProject","ParticipantCode","Sequence")]
colnames(sample.sheet)
a.child<-as.numeric(sample.sheet[,"AGE"])<=15 | is.na(sample.sheet[,"AGE"])
sample.sheet[(a.child & a.case),c("SampleProject","ParticipantCode","AGE","Sequence")]
sample.sheet[(a.child & a.case),"SampleProject"]<-paste(sample.sheet[(a.child & a.case),"SampleProject"],"Child",sep="-")
sample.sheet[(a.child & a.case),c("SampleProject","ParticipantCode","AGE","Sequence")]
table(sample.sheet[,"SampleProject"])
## AML AML-Child AML-NotDiagnosis-Child Asian-AML Asian-AML-Child Asian-AML-NotDiagnosis-Child Asian-Control Control
## 136 22 17 11 2 4 23 363
#16 11 2 4 23
qc[1:5,]
are.related<-(qc[,"sample_A"] != qc[,"sample_B"] ) & as.numeric(qc[,"IBS"])>=0.2
related<-unique(qc[are.related,"sample_A"])
a.true.case<-sample.sheet[,"SampleProject"]=="AML"
a.true.control<-sample.sheet[,"SampleProject"]=="Control"
are.related<-(sample.sheet[,"ParticipantCode"] %in% related)
sample.sheet[(are.related & a.true.case),c("SampleProject","ParticipantCode","Sequence")]
## 107 AML AMLM12030PGB Diagnosis # realed to AMLM12PAH030PGB
## 104 AML AMLM12035D-T Diagnosis
## 105 AML AMLM12038BJD Diagnosis # AMLM12038BJD AMLM12PAH038BJD 1.19
# AMLM12035D-T is VERY strange !!!!!!!
## AMLM12035D-T AMLM12035D-T 1.34
## AMLM12035D-T G11M 0.506
## AMLM12035D-T G11F 0.488
## AMLM12035D-T G11P 0.476
## 13429 AMLM12035D-T 0.412
## 13467 AMLM12035D-T 0.543
## 13468 AMLM12035D-T 0.559
## 13469 AMLM12035D-T 0.533
## 13473 AMLM12035D-T 0.504
## 13474 AMLM12035D-T 0.48
## 13475 AMLM12035D-T 0.529
## 13429 G11M 0.401
## 13429 G11F 0.398
## unknow if 1406601-PQ is 1411901 ?? has set is true
## RG ID:C4LL7ACXX-3-710-501 PL:illumina LB:C4LL7ACXX-3-710-501 DS:FCID:C4LL7ACXX;Lane:3;SampleID:C4LL7ACXX-3-710-501;SampleRef:Human;IndexSeq:CGAGGCTG;Description:NxtD;NxtXR;Control:N;Recipe:101+7+101;Operator:LisaAnderson;SampleProject:AMAS;ParticipantCode:1406601-PQ;LibraryPlateLocation:NEW2-20140603-AML-LW-C10;Sex:x;OldPCode:1406601-PQ; SM:1406601-PQ
## @RG ID:C4MJJACXX-3-710-501 PL:illumina LB:C4MJJACXX-3-710-501 DS:FCID:C4MJJACXX;Lane:3;SampleID:C4MJJACXX-3-710-501;SampleRef:Human;IndexSeq:CGAGGCTG;Description:NxtD;NxtXR;Control:N;Recipe:101+7+101;Operator:LisaAnderson;SampleProject:AMAS;ParticipantCode:1406601-PQ;LibraryPlateLocation:NEW2-20140603-AML-LW-C10;Sex:x;OldPCode:1406601-PQ; SM:1406601-PQ
#1406601-PQ AMLM12022N-A 0.318 but AMLM12022N-A otherwise looks ok
##### Q2 to andrew does this make sense 1406601-PQ realed to realpse but not diagnosis (is post allograph)
## 1406601-PQ 1406601-PQ 1.01
## 1406601-PQ 1409101-PQ 0.786
## 1406601-PQ AMAS-5.3-Relapse 0.501
## 1406601-PQ AMAS-18.3-DiagnosticMouseSpleenLiver 0.385
## 1406601-PQ AMLM12022N-A 0.318
## 1406601-PQ AMAS-5.3-PostAllograft 0.166
## 1406601-PQ AMAS-5.3-8PostAllograft 0.0934
## 1406601-PQ AMAS-5.3-Diagnostic 0.0822
## 1406601-PQ AOGC-14-2645 0.0687
## 1409101-PQ 1409101-PQ 0.993
## 1409101-PQ AMAS-5.3-Relapse 0.472
## 1409101-PQ AMLM12022N-A 0.309
## 1409101-PQ AMAS-5.3-PostAllograft 0.161
## 1409101-PQ AMLM12036T-S 0.0962
## 1409101-PQ AMAS-5.3-8PostAllograft 0.0753
## 1409101-PQ AMAS-2.3-Relapse2 0.068
## 1409101-PQ AMAS-5.3-Diagnostic 0.0672
bad<-c("AMLM12PAH030PGB","AMLM12PAH038BJD","AMLM12035D-T","1406601-PQ")
are.related<-(qc[,"sample_A"] != qc[,"sample_B"] ) & as.numeric(qc[,"IBS"])>=0.2 & ( !(qc[,"sample_A"] %in% bad) | !(qc[,"sample_B"] %in% bad) )
related<-unique(qc[are.related,"sample_A"])
a.true.case<-sample.sheet[,"SampleProject"]=="AML"
a.true.control<-sample.sheet[,"SampleProject"]=="Control"
are.related<-(sample.sheet[,"ParticipantCode"] %in% related)
sample.sheet[(are.related & a.true.case),c("SampleProject","ParticipantCode","Sequence")]
## SampleProject ParticipantCode Sequence
## 107 AML AMLM12030PGB Diagnosis
## 104 AML AMLM12035D-T Diagnosis
sample.sheet[(are.related & a.true.control),c("SampleProject","ParticipantCode","Sequence")]
a.lung<-grepl("^0413",sample.sheet[,"ParticipantCode"])
a.indg<-grepl("^SKDP.200",sample.sheet[,"ParticipantCode"])
sample.sheet[a.indg,"ParticipantCode"]
# "SKDP-200.3023" "SKDP-200.3024" "SKDP-200.3025" "SKDP-200.3045" "SKDP-200.305" "SKDP-200.3065" "SKDP-200.3083" "SKDP-200.3084" "SKDP-200.7036" "SKDP-200.7093" "SKDP-200.7095" "SKDP-200.7096"
sample.sheet[a.lung,"ParticipantCode"]
# "0413E1210023" "0413E1211353" "0413E14100212" "0413E14113512" "0413E2100212" "0413E2113512"
contaminted<-unique(c(sample.sheet[a.lung,"ParticipantCode"],sample.sheet[a.indg,"ParticipantCode"],sample.sheet[(are.related & a.true.control),c("ParticipantCode")]))
length(contaminted) # 129
contaminted<-c(contaminted,bad)
ok<-!(sample.sheet[,"ParticipantCode"] %in% contaminted)
t(table(sample.sheet[ok,"SampleProject"]))
## AML AML-Child AML-NotDiagnosis-Child Asian-AML Asian-AML-Child Asian-AML-NotDiagnosis-Child Asian-Control Control
## [1,] 135 22 16 11 2 2 13 244
getwd()
setwd( "/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2015/2015-03-16_AllAMLandLung/Analysis")
write.table(sample.sheet,file="sample_sheet.for.analysis.txt",col.names=TRUE,row.names=FALSE,sep="\t")
write.table(contaminted,file="related.or.bad.txt",col.names=TRUE,row.names=FALSE,sep="\t")
## "/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2015/2015-03-16_AllAMLandLung/Analysis/sample_sheet.for.analysis.txt"
## "/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2015/2015-03-16_AllAMLandLung/Analysis/related.or.bad.txt"
######## ONLY NEED TO CHOOSE A DIRECTORY AND EXTENSIONS - used tab delimited files
#source("http://bioconductor.org/biocLite.R")
# biocLite(c("HardyWeinberg"))n
# install.packages("HardyWeinberg")
###############################################
#analysis.dir<-"/media/ga-apps/UQCCG/Data/Sequence_Genotypes/2013-02-27_AML_with_AOGCControl/Analysis"
#annotate.dir<-"/media/ga-apps/UQCCG/Data/Sequence_Genotypes/2013-02-27_AML_with_AOGCControl/Annotate"
analysis.dir<-"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013/2013-10-27_AML_with_AOGCControl_NoFailedLane/Analysis"
annotate.dir<-"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013/2013-10-27_AML_with_AOGCControl_NoFailedLane/Annotate"
project.extension<-".All-maf-filtered.txt"
project.name<-"2013-02-27_AML_with_AOGCControl" ## prefix for output file
fam<-c("TGCM-AML") # ALL or c() ""-one project (the prefix of the summary files to collect
#the.sample.sheet<-"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013-02-27_AML_with_AOGCControl/BAM/TGCM-AML-combine_SampleSheet.csv"
the.sample.sheet<-"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013/2013-10-27_AML_with_AOGCControl_NoFailedLane/BAM/TGCM-AML-combine_SampleSheet.csv"
remove.from.controls<-c() # expand.labels.to.samples(remove.from.controls,control.samples)
remove.from.all.samples<-c() #expand.labels.to.samples(remove.from.all.samples,all.samples)
remove.cols<-c()
#regions.file<-"/media/scratch2/AOGC-NGS/GFOS/gefos.seq/METHODS/0613-skatmeta-gefos/static/Homo_sapiens.GRCh37.70.protein_coding.genespace_boundaries.5k.split100k.txt"
core.ann<-c("chr","start","end","REF","ALT","TYPE") # out put to annanlsys programs and need foe colun labels
dont.build.summary<-FALSE ##
GATK.SB<-TRUE
maf.threshold.filter.to.use<-c(0.05)
a.label<-"CoVarRun.noControl.AML.regions"
###########################################################################
###############################################
#analysis.dir<-"/media/ga-apps/UQCCG/Data/Sequence_Genotypes/2013-02-27_AML_with_AOGCControl/Analysis"
#annotate.dir<-"/media/ga-apps/UQCCG/Data/Sequence_Genotypes/2013-02-27_AML_with_AOGCControl/Annotate"
analysis.dir<-"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014-06-24_AML_RSGB_AOGC_withHaplotypeCaller/Analysis"
annotate.dir<-"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013-10-27_AML_with_AOGCControl_NoFailedLane/Annotate"
project.extension<-".wanted.All-maf-filtered.txt"
project.name<-"2014-06-24_AML_RSGB_AOGC_withHaplotypeCaller" ## prefix for output file
fam<-c("ALL") # ALL or c() ""-one project (the prefix of the summary files to collect
#the.sample.sheet<-"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013-02-27_AML_with_AOGCControl/BAM/TGCM-AML-combine_SampleSheet.csv"
the.sample.sheet<-"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014-06-24_AML_RSGB_AOGC_withHaplotypeCaller/BAM/TGCM-AML_RSGB_PILOT_SampleSheet_controls.csv"
remove.from.controls<-c() # expand.labels.to.samples(remove.from.controls,control.samples)
remove.from.all.samples<-c() #expand.labels.to.samples(remove.from.all.samples,all.samples)
remove.cols<-c()
#regions.file<-"/media/scratch2/AOGC-NGS/GFOS/gefos.seq/METHODS/0613-skatmeta-gefos/static/Homo_sapiens.GRCh37.70.protein_coding.genespace_boundaries.5k.split100k.txt"
core.ann<-c("chr","start","end","REF","ALT","TYPE") # out put to annanlsys programs and need foe colun labels
dont.build.summary<-FALSE ##
GATK.SB<-TRUE
maf.threshold.filter.to.use<-c(0.05)
a.label<-"CoVarRun.noControl.AML.regions"
###########################################################################
############################################### NEW HC u with all data
#analysis.dir<-"/media/ga-apps/UQCCG/Data/Sequence_Genotypes/2013-02-27_AML_with_AOGCControl/Analysis"
#annotate.dir<-"/media/ga-apps/UQCCG/Data/Sequence_Genotypes/2013-02-27_AML_with_AOGCControl/Annotate"
#/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014-10-09_AML_CompleteGenomics_HC/Analysis/2014-10-09_AML_CompleteGenomics_HC.chrALL..ALL.ALL_GENOTYPES_.analysis-maf-filtered.txt
analysis.dir<-"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014/2014-11-04_AML_TCGA_Replication/Analysis"
annotate.dir<-"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014/2014-11-04_AML_TCGA_Replication/Annotate"
## /media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014-10-09_AML_CompleteGenomics_HC/Analysis/2014-10-09_AML_CompleteGenomics_HC.chr1.ALL.ALL_GENOTYPES_analysis-maf-filtered.txt
##/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014-10-09_AML_CompleteGenomics_HC/Analysis/2014-10-09_AML_CompleteGenomics_HC.chrALL.Indel.ALL.ALL_GENOTYPES_analysis.txt
#/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014/2014-11-04_AML_TCGA_Replication/Analysis/2014-11-04_AML_TCGA_ReplicationHC.ALL.BEST.chrALL.ACC_SUBSET2.ALL.ALL_GENOTYPES_analysis-maf-filtered.txt
## project.extension<-".analysis-maf-filtered.txt"
## project.name<-"2014-10-09_AML_CompleteGenomics_HC." ## prefix for output file
## fam<-c("chrALL_GENOTYPES") # ALL or c() ""-one project (the prefix of the summary files to collect
#/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014-11-04_AML_TCGA_Replication/Analysis/2014-11-04_AML_TCGA_Replication.chrALL.ACC_good_qual.ALL.ALL_GENOTYPES_analysis-maf-filtered.txt
project.extension<-"_analysis-maf-filtered.txt"
project.name<-"2014-10-09_AML_CompleteGenomics_HC." ## prefix for output file
fam<-c("ALL.BEST.chrALL.ACC_SUBSET2.ALL.ALL_GENOTYPES") # fam<-c("chrALL.ACC_good_qual.ALL.ALL_GENOTYPES") ALL or c() ""-one project (the prefix of the summary files to collect
#the.sample.sheet<-"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013-02-27_AML_with_AOGCControl/BAM/TGCM-AML-combine_SampleSheet.csv"
the.sample.sheet<-"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014/2014-11-04_AML_TCGA_Replication/Analysis/Full.sample_sheet.wP.csv"
remove.from.controls<-c() # expand.labels.to.samples(remove.from.controls,control.samples)
remove.from.all.samples<-c() #expand.labels.to.samples(remove.from.all.samples,all.samples)
remove.cols<-c()
#regions.file<-"/media/scratch2/AOGC-NGS/GFOS/gefos.seq/METHODS/0613-skatmeta-gefos/static/Homo_sapiens.GRCh37.70.protein_coding.genespace_boundaries.5k.split100k.txt"
core.ann<-c("chr","start","end","REF","ALT","TYPE") # out put to annanlsys programs and need foe colun labels
dont.build.summary<-FALSE ##
GATK.SB<-TRUE
maf.threshold.filter.to.use<-c(0.05)
a.label<-"CoVarRun.noControl.AML.regions"
###########################################################################
############################################### NEW HC u with all data
#/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2015/2015-03-16_AllAMLandLung/Analysis
## /media/UQCCG/Sequencing/Data/Sequence_Genotypes/2015/2015-03-16_AllAMLandLung/Analysis/2015-03-16_AllAMLandLung.BEST.chrALL.ACC_SUBSET.ALL.ALL_GENOTYPES_analysis.txt
## /media/UQCCG/Sequencing/Data/Sequence_Genotypes/2015/2015-03-16_AllAMLandLung/Analysis/2015-03-16_AllAMLandLung.BEST.chrALL.ACC_0.025.ALL.ALL_GENOTYPES_analysis.txt
analysis.dir<-"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2015/2015-03-16_AllAMLandLung/Analysis"
annotate.dir<-"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2015/2015-03-16_AllAMLandLung/Annotate"
## /media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014-10-09_AML_CompleteGenomics_HC/Analysis/2014-10-09_AML_CompleteGenomics_HC.chr1.ALL.ALL_GENOTYPES_analysis-maf-filtered.txt
##/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014-10-09_AML_CompleteGenomics_HC/Analysis/2014-10-09_AML_CompleteGenomics_HC.chrALL.Indel.ALL.ALL_GENOTYPES_analysis.txt
## project.extension<-".analysis-maf-filtered.txt"
## project.name<-"2014-10-09_AML_CompleteGenomics_HC." ## prefix for output file
## fam<-c("chrALL_GENOTYPES") # ALL or c() ""-one project (the prefix of the summary files to collect
#/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014-11-04_AML_TCGA_Replication/Analysis/2014-11-04_AML_TCGA_Replication.chrALL.ACC_good_qual.ALL.ALL_GENOTYPES_analysis-maf-filtered.txt
project.extension<-"_analysis.txt"
project.name<-"2015-03-16_AllAMLandLung." ## prefix for output file
fam<-c("BEST.chrALL.ACC_SUBSET.ALL.ALL_GENOTYPES") # ALL or c() ""-one project (the prefix of the summary files to collect
#fam<-c("BEST.chrALL.ACC_0.025.ALL.ALL_GENOTYPES")
#the.sample.sheet<-"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013-02-27_AML_with_AOGCControl/BAM/TGCM-AML-combine_SampleSheet.csv"
#the.sample.sheet<-"/media/UQCCG/Sequencing/CompleteGenomics/Chort_descriptions/Sequencing comparisons-ver 6.csv"
the.sample.sheet<-"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2015/2015-03-16_AllAMLandLung/Analysis/sample_sheet.for.analysis.txt"
related.or.bad.file<- "/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2015/2015-03-16_AllAMLandLung/Analysis/related.or.bad.txt"
remove.from.controls<-c() # expand.labels.to.samples(remove.from.controls,control.samples)
remove.from.all.samples<-c() #expand.labels.to.samples(remove.from.all.samples,all.samples)
remove.cols<-c()
#regions.file<-"/media/scratch2/AOGC-NGS/GFOS/gefos.seq/METHODS/0613-skatmeta-gefos/static/Homo_sapiens.GRCh37.70.protein_coding.genespace_boundaries.5k.split100k.txt"
core.ann<-c("chr","start","end","REF","ALT","TYPE") # out put to annanlsys programs and need foe colun labels
dont.build.summary<-FALSE ##
GATK.SB<-TRUE
maf.threshold.filter.to.use<-c(0.05)
a.label<-"CoVarRun.noControl.AML.regions"
###########################################################################
contaminated.file<-"/media/UQCCG/UQCCG-Projects/AOGC_exome_chip/Phenotypes/contaminated_AOGC_SEQ_samples.txt"
contaminated<-read.table(contaminated.file,header=F,fill=TRUE,sep="\t",stringsAsFactors=FALSE)
#related.or.bad.file<- "/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2015/2015-03-16_AllAMLandLung/Analysis/related.or.bad.txt"
related.or.bad<-read.table(related.or.bad.file,header=T,fill=TRUE,sep="\t",stringsAsFactors=FALSE)
library(skatMeta) ## ridge regression
#library(SKAT) ## skat method
library(GenomicFeatures)
library(HardyWeinberg)
library(Biostrings)
options(width=250,max.print=5000)
code.dir<-"/media/UQCCG/Programming/VersionControl_GitRepository/UQCCG_Pipeline_Rscripts"
setwd(code.dir)
source("annotate_SNPs_subroutines.r")
source("hwe.r")
###################################### load ola aogc######################3
## load("/media/Bioinform-D/Research/annovar/humandb/aogc.count.data.RData")
## #print(colnames(indels))
## print(colnames(geno.aogc))
## use.key<-build.key(geno.aogc,core.ann)
## insert.location<-70 ### this is where to add AOGC data INSERTS AFTER THE LOCATION
## ################################ add aogc
## ann<-readRDS("/media/scratch2/AOGC-NGS/Analysis/AOGC_sequnnce_LS/AOGC_sequence_10_LS_ANNOTATION.rds")
geneanno.DB<-c("refGene","knownGene","ensGene") # returns 2 extra columns
names(geneanno.DB)<-c("refGene","knownGene","ensGene")
############################################# POPULATION MAF FILTER - PART A
############################################# POPULATION MAF FILTER
############################################# POPULATION MAF FILTER
############################################# POPULATION MAF FILTER
maf.threshold<-0.0 # MAF threshold for annovar calling zero useful to get back all results !!do not modify!!
maf.threshold.filter.to.use<-c(0.001,0.005,0.01,0.025)
maf.threshold.filter.to.use<-sort(as.numeric(maf.threshold.filter.to.use))
filter.cols.novel.use<-c("NHBLI_6500_ANNOVAR_ALL","NHBLI_6500_ALL","NHLBI_5400_ALL","NHLBI_5400_EUR","NHLBI_5400_AFR","1000genome","1000genome_asian","1000genome_mine","snp141","snp141_clinical","snp137","CG69","EUR_ASN_AFR_INDEL","AOGC-NGS_ALL","AOGC-NGS_ALL_OLD","Chinese") ##
filter.cols.maf.use<-c("NHBLI_6500_ANNOVAR_ALL","NHBLI_6500_ALL","NHBLI_6500_EA","NHBLI_6500_AA","NHLBI_5400_ALL","1000genome","snp141","snp137","snp135")
maf.threshold.filter<-maf.threshold.filter.to.use
############################################# POPULATION MAF FILTER
############################################# POPULATION MAF FILTER
############################################# POPULATION MAF FILTER
############################################# POPULATION MAF FILTER
##################################################### DEFINE A GENE LIST #####################################################
##################################################### DEFINE A GENE LIST #####################################################
##################################################### DEFINE A GENE LIST #####################################################
######################################################################################################################################
######################################################################################################################################
######################################################################################################################################
######################################################################################################################################
############################################SET UP FNCTIONAL FILTERS #######################################################
####################### MUTATION TYPE DEINITIONS
possible.mutations<-c("frameshift substitution","nonframeshift substitution","downstream","frameshift deletion","frameshift insertion","intergenic","intronic","ncRNA_exonic","ncRNA_intronic","ncRNA_splicing","ncRNA_UTR3","ncRNA_UTR5","ncRNA_UTR5;ncRNA_UTR3","nonframeshift deletion","nonframeshift insertion","nonsynonymous SNV","splicing","stopgain SNV","stoploss SNV","synonymous SNV","unknown","upstream","upstream;downstream","UTR3","UTR5","UTR5;UTR3")
interesting.coding.mutations<-c("frameshift substitution","nonframeshift substitution","nonframeshift deletion","nonframeshift insertion","frameshift deletion","frameshift insertion","nonsynonymous SNV","stopgain SNV","stoploss SNV","splicing")
interesting.mutations.use<-c("frameshift substitution","nonframeshift substitution","nonframeshift deletion","nonframeshift insertion","frameshift deletion","frameshift insertion","nonsynonymous SNV","stopgain SNV","stoploss SNV","splicing","ncRNA_exonic")
wanted.noncoding.subtypes<-c("miRNA","lincRNA") # filter by interesting to prefiler and vep.noncoding so dones get ncRNA intronic ::use gerp.score.threshold.low only these subtypes
interesting.to.prefilter<-c("UTR3","UTR5","UTR5;UTR3","snoRNA","snRNA","antisense","sense_intronic","ncRNA_exonic","ncRNA_splicing") #use gerp.score.threshold
extra.vep.annotations<-c("Uploaded_variation","Gene","Feature","Protein_position","Amino_acids")
#"not_assigned",
vep.types<-c("stop_gained","stop_lost","missense_variant","splice_acceptor_variant","splice_donor_variant","splice_region_variant","initiator_codon_variant","stop_retained_variant","incomplete_terminal_codon_variant","frameshift_variant","inframe_deletion","inframe_insertion","5_prime_UTR_variant","3_prime_UTR_variant","non_coding_exon_variant","NC_stop_gained","NC_stop_lost","NC_splice_acceptor_variant","NC_splice_donor_variant","NC_splice_region_variant","NC_initiator_codon_variant","NC_stop_retained_variant","NC_non_coding_exon_variant","NC_incomplete_terminal_codon_variant","NC_3_prime_UTR_variant","mature_miRNA_variant","NC_5_prime_UTR_variant","TF_binding_site_variant","TFBS_ablation","TFBS_amplification","regulatory_region_variant","intron_variant","NC_intron_variant","synonymous_variant","coding_sequence_variant","NC_synonymous_variant","upstream_gene_variant","downstream_gene_variant","intergenic_variant","NC_intergenic_variant","NMD_transcript_variant","nc_transcript_variant","NC_nc_transcript_variant","feature_truncation","feature_elongation")
## vep.types<-c( "not_assigned","stop_gained","stop_lost","stop_lost,NMD_transcript_variant","stop_gained,splice_region_variant,NMD_transcript_variant","initiator_codon_variant,splice_region_variant","splice_region_variant,3_prime_UTR_variant","stop_gained,NMD_transcript_variant","missense_variant,splice_region_variant","missense_variant","splice_acceptor_variant","splice_acceptor_variant,nc_transcript_variant","splice_region_variant,3_prime_UTR_variant,NMD_transcript_variant","splice_donor_variant,nc_transcript_variant","splice_region_variant,intron_variant,NMD_transcript_variant","splice_donor_variant","splice_region_variant","splice_region_variant,5_prime_UTR_variant","splice_region_variant,synonymous_variant","splice_region_variant,intron_variant,nc_transcript_variant","splice_region_variant,non_coding_exon_variant,nc_transcript_variant","missense_variant,NMD_transcript_variant","splice_region_variant,intron_variant","NMD_transcript_variant","intron_variant,NMD_transcript_variant","mature_miRNA_variant","5_prime_UTR_variant","5_prime_UTR_variant,NMD_transcript_variant","non_coding_exon_variant,nc_transcript_variant","3_prime_UTR_variant,NMD_transcript_variant","non_coding_exon_variant","TF_binding_site_variant","intron_variant,nc_transcript_variant","synonymous_variant,NMD_transcript_variant","3_prime_UTR_variant","regulatory_region_variant","upstream_gene_variant","downstream_gene_variant","intergenic_variant","intron_variant","synonymous_variant")
#"not_assigned",
vep.coding<-c("stop_gained","stop_lost","missense_variant","splice_acceptor_variant","splice_donor_variant","splice_region_variant","initiator_codon_variant","stop_retained_variant","incomplete_terminal_codon_variant","frameshift_variant","inframe_deletion","inframe_insertion")
vep.noncoding<-c("5_prime_UTR_variant","3_prime_UTR_variant","non_coding_exon_variant","NC_stop_gained","NC_stop_lost","NC_splice_acceptor_variant","NC_splice_donor_variant","NC_splice_region_variant","NC_initiator_codon_variant","NC_stop_retained_variant","NC_non_coding_exon_variant","NC_incomplete_terminal_codon_variant","NC_3_prime_UTR_variant","mature_miRNA_variant","NC_5_prime_UTR_variant","TF_binding_site_variant","TFBS_ablation","TFBS_amplification","regulatory_region_variant")
vep.unwanted<-c("intron_variant","NC_intron_variant","synonymous_variant","coding_sequence_variant","NC_synonymous_variant","upstream_gene_variant","downstream_gene_variant","intergenic_variant","NC_intergenic_variant","NMD_transcript_variant","nc_transcript_variant","NC_nc_transcript_variant","feature_truncation","feature_elongation")
missense.variant<-c("nonsynonymous SNV","missense_variant")
hwe.control.threshold<-1e-8
gerp.score.threshold.high<-2.5 # gerp score >= will be included
gerp.score.threshold.low<-2.0 # gerp score >= will be included
gerp.score.threshold.unknown<-0
#generic.filter.DB
maf.threshold.filter.to.use<-sort(maf.threshold.filter.to.use)
maf.threshold.filter<-maf.threshold.filter.to.use
interesting.mutations<-interesting.mutations.use
if(GATK.SB){
global.quality.labs<-c("QUAL","QD","HRun","SB","FILTER","FILTER","PolyPhen.scores","PolyPhen.scores","SIFT.scores","mut.taster::score","phylo::score","PolyPhen.desc","SIFT.desc","GERP::score","GERP::score","GERP::score","MAF.ALL","MAF.HIGH","MAF.LOW","TYPE") ### THESE ARE THE COLUMN LABELS IN THE DATA these become the "good.qual" filter
global.quality.names<-c("QUAL","QD","HRun","SB","FILTER_PASS","FILTER_100","PolyPhen.low","PolyPhen.high","SIFT.high","mut.taster.high","phylo.high","PolyPhen.bad","SIFT.bad","GERP.high","GERP.low","GERP.unknown","MAF.ALL","MAF.HIGH","MAF.LOW","flat") ### THESE ARE THE COLUMN LABELS IN the quality.filter TABLE
#global.quality.cut<-c(50,0.5,5,1,"PASS","TruthSensitivityTranche99.90to100.00",0.1,0.4,0.4,0.4,0.4,"damaging","deleterious",2,2,0.25,0.25,0.25)
global.quality.cut<-c(50,0.5,5,1,"PASS","TruthSensitivityTranche99.90to100.00",0.1,0.4,0.4,0.4,0.4,"damaging","deleterious",gerp.score.threshold.high,gerp.score.threshold.low,gerp.score.threshold.unknown,0.25,0.25,0.25,"flat")
global.quality.type<-c("numeric","numeric","numeric","numeric","factor","factor","numeric","numeric","numeric","numeric","numeric","factor","factor","numeric","numeric","numeric","numeric","numeric","numeric","factor")
global.quality.dirn<-c("greater","greater","less","less","exact","exact","greater","greater","greater","greater","greater","ends_with","exact","greater","greater","exact","greater","greater","greater","ends_with")
}else{
global.quality.labs<-c("QUAL","QD","HRun","FS","FILTER","FILTER") ### these become the "good.qual" filter
global.quality.names<-c("QUAL","QD","HRun","FS","FILTER_PASS","FILTER_100")
global.quality.cut<-c(50,0.5,5,60,"PASS","TruthSensitivityTranche99.90to100.00")
global.quality.type<-c("numeric","numeric","numeric","numeric","factor","factor")
global.quality.dirn<-c("greater","greater","less","less","exact","exact")
}
names(global.quality.cut)<-global.quality.labs
names(global.quality.dirn)<-global.quality.labs
names(global.quality.type)<-global.quality.names
global.labs<-unique(global.quality.labs)
global.quality.cut
global.quality.dirn
global.quality.type
quality.cut<-global.quality.cut
quality.type<-global.quality.type
quality.dirn<-global.quality.dirn
######################################################################################################################
#"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2015/2015-03-16_AllAMLandLung/Analysis/related.or.bad.txt"
remove.from.controls<-c() # expand.labels.to.samples(remove.from.controls,control.samples)
remove.from.all.samples<-c() #expand.labels.to.samples(remove.from.all.samples,all.samples)
remove.cols<-c()
#regions.file<-"/media/scratch2/AOGC-NGS/GFOS/gefos.seq/METHODS/0613-skatmeta-gefos/static/Homo_sapiens.GRCh37.70.protein_coding.genespace_boundaries.5k.split100k.txt"
core.ann<-c("chr","start","end","REF","ALT","TYPE") # out put to annanlsys programs and need foe colun labels
dont.build.summary<-FALSE ##
GATK.SB<-TRUE
maf.threshold.filter.to.use<-c(0.05)
a.label<-"CoVarRun.noControl.AML.regions"
###########################################################################
library(skatMeta) ## ridge regression
#library(SKAT) ## skat method
library(GenomicFeatures)
library(HardyWeinberg)
library(Biostrings)
options(width=250,max.print=5000)
code.dir<-"/media/UQCCG/Programming/VersionControl_GitRepository/UQCCG_Pipeline_Rscripts"
setwd(code.dir)
source("annotate_SNPs_subroutines.r")
source("hwe.r")
###################################### load ola aogc######################3
## load("/media/Bioinform-D/Research/annovar/humandb/aogc.count.data.RData")
## #print(colnames(indels))
## print(colnames(geno.aogc))
## use.key<-build.key(geno.aogc,core.ann)
## insert.location<-70 ### this is where to add AOGC data INSERTS AFTER THE LOCATION
## ################################ add aogc
## ann<-readRDS("/media/scratch2/AOGC-NGS/Analysis/AOGC_sequnnce_LS/AOGC_sequence_10_LS_ANNOTATION.rds")
geneanno.DB<-c("refGene","knownGene","ensGene") # returns 2 extra columns
names(geneanno.DB)<-c("refGene","knownGene","ensGene")
############################################# POPULATION MAF FILTER - PART A
############################################# POPULATION MAF FILTER
############################################# POPULATION MAF FILTER
############################################# POPULATION MAF FILTER
maf.threshold<-0.0 # MAF threshold for annovar calling zero useful to get back all results !!do not modify!!
maf.threshold.filter.to.use<-c(0.001,0.01,0.025)
maf.threshold.filter.to.use<-sort(as.numeric(maf.threshold.filter.to.use))
filter.cols.novel.use<-c("NHBLI_6500_ANNOVAR_ALL","NHBLI_6500_ALL","NHLBI_5400_ALL","NHLBI_5400_EUR","NHLBI_5400_AFR","1000genome","1000genome_asian","1000genome_mine","snp141","snp141_clinical","snp137","CG69","EUR_ASN_AFR_INDEL","AOGC-NGS_ALL","AOGC-NGS_ALL_OLD","Chinese") ##
filter.cols.maf.use<-c("PopFreqMax","NHBLI_6500_ANNOVAR_ALL","NHBLI_6500_ALL","NHBLI_6500_EA","NHBLI_6500_AA","NHLBI_5400_ALL","1000genome","snp141","snp137","snp135")
maf.threshold.filter<-maf.threshold.filter.to.use
############################################# POPULATION MAF FILTER
############################################# POPULATION MAF FILTER
############################################# POPULATION MAF FILTER
############################################# POPULATION MAF FILTER
##################################################### DEFINE A GENE LIST #####################################################
##################################################### DEFINE A GENE LIST #####################################################
##################################################### DEFINE A GENE LIST #####################################################
######################################################################################################################################
######################################################################################################################################
######################################################################################################################################
######################################################################################################################################
############################################SET UP FNCTIONAL FILTERS #######################################################
####################### MUTATION TYPE DEINITIONS
possible.mutations<-c("frameshift substitution","nonframeshift substitution","downstream","frameshift deletion","frameshift insertion","intergenic","intronic","ncRNA_exonic","ncRNA_intronic","ncRNA_splicing","ncRNA_UTR3","ncRNA_UTR5","ncRNA_UTR5;ncRNA_UTR3","nonframeshift deletion","nonframeshift insertion","nonsynonymous SNV","splicing","stopgain SNV","stoploss SNV","synonymous SNV","unknown","upstream","upstream;downstream","UTR3","UTR5","UTR5;UTR3","stopgain","stoploss")
interesting.coding.mutations<-c("frameshift substitution","nonframeshift substitution","nonframeshift deletion","nonframeshift insertion","frameshift deletion","frameshift insertion","nonsynonymous SNV","stopgain SNV","stoploss SNV","splicing","stopgain","stoploss")
interesting.mutations.use<-c("frameshift substitution","nonframeshift substitution","nonframeshift deletion","nonframeshift insertion","frameshift deletion","frameshift insertion","nonsynonymous SNV","stopgain SNV","stoploss SNV","splicing","ncRNA_exonic","stopgain","stoploss")
wanted.noncoding.subtypes<-c("miRNA","lincRNA") # filter by interesting to prefiler and vep.noncoding so dones get ncRNA intronic ::use gerp.score.threshold.low only these subtypes
interesting.to.prefilter<-c("UTR3","UTR5","UTR5;UTR3","snoRNA","snRNA","antisense","sense_intronic","ncRNA_exonic","ncRNA_splicing") #use gerp.score.threshold
extra.vep.annotations<-c("Uploaded_variation","Gene","Feature","Protein_position","Amino_acids")
#"not_assigned",
vep.types<-c("stop_gained","stop_lost","missense_variant","splice_acceptor_variant","splice_donor_variant","splice_region_variant","initiator_codon_variant","stop_retained_variant","incomplete_terminal_codon_variant","frameshift_variant","inframe_deletion","inframe_insertion","5_prime_UTR_variant","3_prime_UTR_variant","non_coding_exon_variant","NC_stop_gained","NC_stop_lost","NC_splice_acceptor_variant","NC_splice_donor_variant","NC_splice_region_variant","NC_initiator_codon_variant","NC_stop_retained_variant","NC_non_coding_exon_variant","NC_incomplete_terminal_codon_variant","NC_3_prime_UTR_variant","mature_miRNA_variant","NC_5_prime_UTR_variant","TF_binding_site_variant","TFBS_ablation","TFBS_amplification","regulatory_region_variant","intron_variant","NC_intron_variant","synonymous_variant","coding_sequence_variant","NC_synonymous_variant","upstream_gene_variant","downstream_gene_variant","intergenic_variant","NC_intergenic_variant","NMD_transcript_variant","nc_transcript_variant","NC_nc_transcript_variant","feature_truncation","feature_elongation")
## vep.types<-c( "not_assigned","stop_gained","stop_lost","stop_lost,NMD_transcript_variant","stop_gained,splice_region_variant,NMD_transcript_variant","initiator_codon_variant,splice_region_variant","splice_region_variant,3_prime_UTR_variant","stop_gained,NMD_transcript_variant","missense_variant,splice_region_variant","missense_variant","splice_acceptor_variant","splice_acceptor_variant,nc_transcript_variant","splice_region_variant,3_prime_UTR_variant,NMD_transcript_variant","splice_donor_variant,nc_transcript_variant","splice_region_variant,intron_variant,NMD_transcript_variant","splice_donor_variant","splice_region_variant","splice_region_variant,5_prime_UTR_variant","splice_region_variant,synonymous_variant","splice_region_variant,intron_variant,nc_transcript_variant","splice_region_variant,non_coding_exon_variant,nc_transcript_variant","missense_variant,NMD_transcript_variant","splice_region_variant,intron_variant","NMD_transcript_variant","intron_variant,NMD_transcript_variant","mature_miRNA_variant","5_prime_UTR_variant","5_prime_UTR_variant,NMD_transcript_variant","non_coding_exon_variant,nc_transcript_variant","3_prime_UTR_variant,NMD_transcript_variant","non_coding_exon_variant","TF_binding_site_variant","intron_variant,nc_transcript_variant","synonymous_variant,NMD_transcript_variant","3_prime_UTR_variant","regulatory_region_variant","upstream_gene_variant","downstream_gene_variant","intergenic_variant","intron_variant","synonymous_variant")
#"not_assigned",
vep.coding<-c("stop_gained","stop_lost","missense_variant","splice_acceptor_variant","splice_donor_variant","splice_region_variant","initiator_codon_variant","stop_retained_variant","incomplete_terminal_codon_variant","frameshift_variant","inframe_deletion","inframe_insertion")
vep.noncoding<-c("5_prime_UTR_variant","3_prime_UTR_variant","non_coding_exon_variant","NC_stop_gained","NC_stop_lost","NC_splice_acceptor_variant","NC_splice_donor_variant","NC_splice_region_variant","NC_initiator_codon_variant","NC_stop_retained_variant","NC_non_coding_exon_variant","NC_incomplete_terminal_codon_variant","NC_3_prime_UTR_variant","mature_miRNA_variant","NC_5_prime_UTR_variant","TF_binding_site_variant","TFBS_ablation","TFBS_amplification","regulatory_region_variant")
vep.unwanted<-c("intron_variant","NC_intron_variant","synonymous_variant","coding_sequence_variant","NC_synonymous_variant","upstream_gene_variant","downstream_gene_variant","intergenic_variant","NC_intergenic_variant","NMD_transcript_variant","nc_transcript_variant","NC_nc_transcript_variant","feature_truncation","feature_elongation")
missense.variant<-c("nonsynonymous SNV","missense_variant")
hwe.control.threshold<-1e-8
gerp.score.threshold.high<-2.5 # gerp score >= will be included
gerp.score.threshold.low<-2.0 # gerp score >= will be included
gerp.score.threshold.unknown<-0
#generic.filter.DB
maf.threshold.filter.to.use<-sort(maf.threshold.filter.to.use)
maf.threshold.filter<-maf.threshold.filter.to.use
interesting.mutations<-interesting.mutations.use
if(GATK.SB){
global.quality.labs<-c("QUAL","QD","HRun","SB","FILTER","FILTER","PolyPhen.scores","PolyPhen.scores","SIFT.scores","mut.taster::score","phylo::score","PolyPhen.desc","SIFT.desc","GERP::score","GERP::score","GERP::score","MAF.ALL","MAF.HIGH","MAF.LOW","TYPE") ### THESE ARE THE COLUMN LABELS IN THE DATA these become the "good.qual" filter
global.quality.names<-c("QUAL","QD","HRun","SB","FILTER_PASS","FILTER_100","PolyPhen.low","PolyPhen.high","SIFT.high","mut.taster.high","phylo.high","PolyPhen.bad","SIFT.bad","GERP.high","GERP.low","GERP.unknown","MAF.ALL","MAF.HIGH","MAF.LOW","flat") ### THESE ARE THE COLUMN LABELS IN the quality.filter TABLE
#global.quality.cut<-c(50,0.5,5,1,"PASS","TruthSensitivityTranche99.90to100.00",0.1,0.4,0.4,0.4,0.4,"damaging","deleterious",2,2,0.25,0.25,0.25)
global.quality.cut<-c(50,0.5,5,1,"PASS","TruthSensitivityTranche99.90to100.00",0.1,0.4,0.4,0.4,0.4,"damaging","deleterious",gerp.score.threshold.high,gerp.score.threshold.low,gerp.score.threshold.unknown,0.25,0.25,0.25,"flat")
global.quality.type<-c("numeric","numeric","numeric","numeric","factor","factor","numeric","numeric","numeric","numeric","numeric","factor","factor","numeric","numeric","numeric","numeric","numeric","numeric","factor")
global.quality.dirn<-c("greater","greater","less","less","exact","exact","greater","greater","greater","greater","greater","ends_with","exact","greater","greater","exact","greater","greater","greater","ends_with")
}else{
global.quality.labs<-c("QUAL","QD","HRun","FS","FILTER","FILTER") ### these become the "good.qual" filter
global.quality.names<-c("QUAL","QD","HRun","FS","FILTER_PASS","FILTER_100")
global.quality.cut<-c(50,0.5,5,60,"PASS","TruthSensitivityTranche99.90to100.00")
global.quality.type<-c("numeric","numeric","numeric","numeric","factor","factor")
global.quality.dirn<-c("greater","greater","less","less","exact","exact")
}
names(global.quality.cut)<-global.quality.labs
names(global.quality.dirn)<-global.quality.labs
names(global.quality.type)<-global.quality.names
global.labs<-unique(global.quality.labs)
global.quality.cut
global.quality.dirn
global.quality.type
quality.cut<-global.quality.cut
quality.type<-global.quality.type
quality.dirn<-global.quality.dirn
######################################################################################################################
seq.type.file<-"/media/UQCCG/Sequencing/Data/QC for all samples summary/Coverage_QC/QC_stat_BAM_Tue_Jul_07_2015.txt"
#seq.type.file<-"/media/UQCCG/Sequencing/Data/QC for all samples summary/Coverage_QC/QC_stat_SAMPLE_Tue_Oct_14_2014.txt"
seq.type<-read.delim(seq.type.file,header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE)
seq.type[1:5,]
## seq.type<-seq.type[seq.type[,"Project"]=="AOGC-NGS",]
nim.samples<-seq.type[seq.type[,"Project"]=="AOGC-NGS" & seq.type[,"Capture.Method"]=="TruD:NimX","Sample"]
ill.samples<-seq.type[seq.type[,"Project"]=="AOGC-NGS" & seq.type[,"Capture.Method"]=="TruD:TruX","Sample"]
nim.samples<-paste(nim.samples,"GT",sep=".")
ill.samples<-paste(ill.samples,"GT",sep=".")
length(nim.samples)
length(ill.samples)
############################################ Nimblegen and illuma capture loci ####################################################
library("BSgenome.Hsapiens.UCSC.hg19")
the.chroms<-seqlengths(Hsapiens)
## load("/media/UQCCG/Sequencing/Data/Genomes/hg19/Human_Exome_Targets_illumina_v2_hg19_targets.RData")
## ill.gr<-data.gr
## load("/media/UQCCG/Sequencing/Data/Genomes/hg19/Human_Exome_Targets_Nimble_v2_hg19_targets.RData")
## nim.gr<-data.gr
## genome(ill.gr)<-"hg19"
## genome(nim.gr)<-"hg19"
## names(ill.gr)
## the.chromo<- as.character(unique(seqnames(ill.gr)))
## sum(the.chromo!=as.character(unique(seqnames(nim.gr))))==0 ## must be true else chr in different order
## human.chromlens<-the.chroms[the.chromo]
## ## ill.gr<-ill.gr+200
## ## nim.gr<-nim.gr+200
## ## overlaps<-overlapsAny(ill.gr,nim.gr)
## ## possible.loci.ori<-ill.gr[overlaps]
## ill.gr<-reduce(ill.gr)
## nim.gr<-reduce(nim.gr)
## cov.ill.gr<-coverage(ill.gr,weight=5,width=human.chromlens) ## Problems exist here is the ir.gerp are not unique get coverage*weight
## cov.nim.gr<-coverage(nim.gr,weight=5,width=human.chromlens) ## Problems exist here is the ir.gerp are not unique get coverage*weight
## cov.nim.gr<-cov.nim.gr[names(cov.ill.gr)] # make sure in same order
## cov.all<-cov.nim.gr+cov.ill.gr
## x<- slice(cov.all,lower=9,upper=11) ### over is 10 otherwise is 5
## x[[5]][1:5]
## x
## ########check
## regionViews<-x
## the.counts<-{}
## order.chromos<-names(regionViews)
## # ik<-13
## for (ik in 1:length(order.chromos)){
## chromo<-order.chromos[ik]
## ## print( chromo)
## a.chr<-chromo
## a.start<-start(regionViews[[chromo]])
## a.end<-end(regionViews[[chromo]])
## a.width<-width(regionViews[[chromo]])
## a.set<-cbind(a.chr,a.start,a.end,a.width)
## the.counts<-rbind(the.counts,a.set)
## }
## colnames(the.counts) <- c("chr","start","end","length")
## ## }) # system.time
## the.counts[1:5,]
## write.table(the.counts,file="Common_target_loci_between_Nimblegen_v2.and.v3.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
## save(list=c("the.counts"),file="Common_target_loci_between_Nimblegen_v2.and.v3.Rdata")
## getwd()
#load("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013-10-27_AML_with_AOGCControl_NoFailedLane/Analysis/Common_target_loci_between_Nimblegen_v2.and.v3.Rdata")
#load("/media/UQCCG/Sequencing/Data/Genomes/hg19/NexteraRapidCapture_Exome_TargetedRegions_hg19_targets.RData")
######################################################################################################################################
## gene.gr<-GRanges(seqnames =gene.list[,"CHR"],ranges = IRanges(start=as.numeric(gene.list[,"START"]),end=as.numeric(gene.list[,"END"])),strand="+")
## length(gene.gr)
## length(reduce(gene.gr))
## location.key<-build.key(gene.list,c("CHR","START","END"))
## unique.locations<-unique(location.key)
## length(unique.locations)
## dim(gene.list)
## posns<-grep("GRIA1.",gene.list[,target],fixed=TRUE)
## gene.list[posns,]
## vep.coding<-c("stop_gained","stop_lost","missense_variant","initiator_codon_variant","stop_retained_variant","incomplete_terminal_codon_variant","frameshift_variant","inframe_deletion","inframe_insertion")
## vep.other.coding<-c("synonymous_variant","coding_sequence_variant")
the.sample.sheet
sample.sheet.full<-read.delim(the.sample.sheet,header=T,sep=",",fill=TRUE,stringsAsFactors=FALSE)
sample.sheet.full[1:5,1:10]
colnames(sample.sheet.full)
dim(sample.sheet.full)
table(sample.sheet.full[,"SampleProject"])
## names<-colnames(sample.sheet.full.rep)[ colnames(sample.sheet.full.rep) %in% colnames(sample.sheet.full)]
pheno.types<-c("SampleProject") ## vales is column header
names(pheno.types)<-c("SampleProject") ### name is output columns
## names
## sample.sheet.full.1<-sample.sheet.full[,names]
## sample.sheet.full.2<-sample.sheet.full.rep[,names]
## sample.sheet.full<-rbind(sample.sheet.full.2,sample.sheet.full.1)
##### fix 0 and 9 for missing to NA
## pheno.types<-c("BMD_EFF_STD_HIP","BMD_EFF_STD_RAD","BMD_EFF_STD_LS","BMD_EFF_STD_FN","EVER_FX_50_EXCL_TRIVIAL")
## names(pheno.types)<-c("HIP","RAD","LS","FN","FX")
######### Check and fix the sample sheet
coverage<-seq.type # read.delim("/media/UQCCG/Sequencing/Data/QC for all samples summary/Coverage_QC/QC_stat_SAMPLE_Tue_Oct_14_2014.txt",header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE)
dim(coverage)
coverage[1:5,]
unique(coverage[,"Project"])
aml.projects<-c("AML-exome","RSGB_AML","AMAS", "TGCM-AML")
aml.samples<-coverage[coverage[,"Project"] %in% aml.projects,"Sample"]
length(aml.samples)
aml.have<-sample.sheet.full[,"ParticipantCode"] %in% aml.samples
table(sample.sheet.full[,"SampleProject"])
table(sample.sheet.full[aml.have,"SampleProject"])
table(sample.sheet.full[!aml.have,"SampleProject"])
a.control<-sample.sheet.full[,"SampleProject"]=="Control"
#sample.sheet.full[aml.have & a.control,]
## cg.samples<-read.delim("/media/UQCCG/Sequencing/CompleteGenomics/917_Data_Delivery_091214.csv",header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE)
## cg.samples[1:5,]
## recodes<-read.delim("/media/UQCCG/Sequencing/CompleteGenomics/RecodingSampleID.csv",header=T,sep=",",fill=TRUE,stringsAsFactors=FALSE)
## recodes[1:5,]
## analysis.samples<-read.delim("/media/UQCCG/Sequencing/CompleteGenomics/regions_Jonathan/rosetta.txt",header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE)
## analysis.samples[1:5,]
################################################################### AOGC STATS
the.sample.sheet.aogc<-"/media/UQCCG/UQCCG-Projects/AOGC_exome_chip/Phenotypes/AOGC_HBM_ALL_PHENOTYPES_RESIDUALS_UPDATED FX OPTIONS.txt"
sum(sample.sheet.full[,"ParticipantCode"] %in% contaminated[,1])
sample.sheet.full.aogc<-read.delim(the.sample.sheet.aogc,header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE)
sample.sheet.full.aogc[1:5,]
posns<-match(sample.sheet.full[,"ParticipantCode"],sample.sheet.full.aogc[,"PATIENT"])
missing<-is.na(posns)
sum(missing)
sample.sheet.full.aogc<-sample.sheet.full.aogc[posns[!missing],]
dim(sample.sheet.full.aogc)
table(sample.sheet.full.aogc$CENTRE) ## center 15 is saliva (NZ cohort)
mean((sample.sheet.full.aogc$AGE_SCAN),na.rm=TRUE)
range((sample.sheet.full.aogc$AGE_SCAN),na.rm=TRUE)
################################################################## GET COVERAGE STATS
## 203 AOGC-02-0395 38.32074
## 222 AOGC-02-0441 47.39313
## 249 AOGC-02-0512 36.96861
## 250 AOGC-02-0513 26.59086
## 270 AOGC-03-0017 49.88467
## 284 AOGC-03-0046 49.70396
## 286 AOGC-03-0049 49.17130
## 407 AOGC-08-0167 48.66813
## 408 AOGC-08-0169 44.18966
## bad.coverage<-the.coverage[,target]<50
posns<-match(sample.sheet.full[,"ParticipantCode"],coverage[,"Sample"])
missing<-is.na(posns)
sum(missing)
sample.sheet.full[missing,"ParticipantCode"]
## ## sample.sheet.full[posns[missing],"ParticipantCode"]
## ## coverage[,"Sample"]
## ## sample.sheet.full[missing,]
## ## coverage<-coverage[posns,]
## colnames(coverage)
the.coverage<-coverage[posns,]
the.coverage<-cbind(sample.sheet.full,the.coverage)
the.coverage[a.control,][1:5,]
target<-"percent.ccds.gt.10"
## target<-"total_reads"
mean(the.coverage[a.control & !bad.cov ,target])
median(the.coverage[a.control,target])
range(the.coverage[a.control,target])
## sd(the.coverage[a.control,target])
## table(the.coverage[a.control,"Capture.Method"])
bad.cov<-as.numeric(the.coverage[,target])<70
mean(the.coverage[!a.control,target])
median(the.coverage[!a.control,target])
range(the.coverage[!a.control,target])
## sd(the.coverage[!a.control,target])
## table(the.coverage[!a.control,"Capture.Method"])
the.coverage[as.numeric(the.coverage[a.control,target])<70,c("Sample",target)]
bad.coverge.sample<-the.coverage[as.numeric(the.coverage[a.control,target])<70,"Sample"]
bad.coverge.sample<-bad.coverge.sample[!is.na(bad.coverge.sample)]
## mean(the.coverage[!a.control,c("percent.ccds.gt.10")])
## the.coverage[a.control,c("Sample","percent.ccds.gt.10")]
## the.coverage[!a.control,c("Sample","percent.ccds.gt.10")]
########################################## coverage stats
Capture<-coverage[posns,"Capture.Method"]
#cbind(sample.sheet.full,Capture)
############## ASSUME HERE THAT SampleProjuect has classes Control and AML
pheno.types<-c("SampleProject")
names(pheno.types)<-c("SampleProject")
table(sample.sheet.full[,pheno.types[1]])
case.control<-c("SampleProject")
case.control.classes<-c(0,1)
names(case.control.classes)<-c("Control","AML")
case.control.classes
bad.samples<-sample.sheet.full[ (sample.sheet.full[,"SampleProject"]!="AML" & sample.sheet.full[,"SampleProject"]!="Control" ) ,"ParticipantCode"] # "1" "9" "63" "74" "83" "84" "99"
table(sample.sheet.full[ !(sample.sheet.full[,"ParticipantCode"] %in% bad.samples) ,"SampleProject"])
## AML Control
## 136 363
if(project.name=="2014-10-09_AML_CompleteGenomics_HC."){
sample.sheet.full[1:5,]
tail(sample.sheet.full)
test<-((sample.sheet.full[,"Race"]!="W" | is.na(sample.sheet.full[,"Race"])) & sample.sheet.full[,"AffectionStatus"]==2)
sample.sheet.full[test,]
bad.samples<-sample.sheet.full[test,"ParticipantCode"] # "1" "9" "63" "74" "83" "84" "99"
contaminated.aogc<-sample.sheet.full[sample.sheet.full[,"ParticipantCode"] %in% contaminated[,1],"ParticipantCode"]
related.or.bad[,1]
bad.samples<-unique(c(bad.samples,contaminated.aogc,related.or.bad[,1]))
# ib<-1
for(ib in 1:length(case.control)){
if(!(case.control[ib] %in% colnames(sample.sheet.full))){next}
sample.sheet.full[( !(sample.sheet.full[,case.control[ib]] %in% names(case.control.classes)) | is.na(sample.sheet.full[,case.control[ib]]) | sample.sheet.full[,case.control[ib]]==0 | sample.sheet.full[,case.control[ib]]==9) ,case.control[ib]]<-NA
}
#tapply(sample.sheet.full[,"SampleProject"],sample.sheet.full[,"SampleProject"],length)
sample.sheet.full[1:5,1:10]
control.samples<-{}
colnames(sample.sheet.full)[colnames(sample.sheet.full)=="ParticipantCode"]<-"SAMPLE"
all.samples<-sample.sheet.full[,"SAMPLE"]
length(all.samples)
## o.remove.all<-expand.labels.to.samples(remove.from.all.samples,all.samples)
## to.remove.samples<-unique(to.remove.all)
## remove.cols<-unique(c(remove.cols,to.remove.samples))
#### test fam list
files<-dir(analysis.dir)
the.extension<-paste(project.extension,"$",sep="")
files<-files[grepl(the.extension ,files)]
toString( unique(unlist( mapply(function(x){x[length(x)]}, strsplit(gsub(the.extension,"",files),split=".",fixed=TRUE) ))) )
toString( files)
files
####
#############################################################################################################
######################################### Predefined variables required
##################################################################################
#### assume has format project.chr.fam.extension or chr.project.fam.extension
setwd(analysis.dir)
getwd()
files<-dir(analysis.dir)
the.extension<-paste(project.extension,"$",sep="")
files<-files[grepl(the.extension ,files)]
if(fam=="ALL" | fam=="All" | fam=="all"){
fam<-unique(unlist( mapply(function(x){x[length(x)]}, strsplit(gsub(the.extension,"",files),split=".",fixed=TRUE) )))
}
fam
## ifam<-2
################################ add aogc
## project.files
#
ifam<-1
for(ifam in 1:length(fam)){
the.extension<-paste(fam[ifam],project.extension,"$",sep="")
project.files<-files[grepl(the.extension ,files)]
print(sort(paste("Doing: ",project.files,sep=""))) # project.files<-project.files[1:22]
indels<-{}
the.col<-{}
project.files
#
ichr<-1
for(ichr in 1:length(project.files)){ ### loop over chromosomes
setwd(analysis.dir)
## grep("Gene.Name",a.indel[,16])
## save(list=c("column.labels"),file="column.labels.RData")
## load("column.labels.RData")
## 67686
## read.delim(project.files[ichr],header=T,nrows=0,sep="\t",fill=TRUE,stringsAsFactors=FALSE,na.strings="",quote="")
## test<-read.delim(project.files[ichr],header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE,na.strings="",quote="")
## table(test[,1])
## test[67670:67686,1:5]
################## fast read ###########
column.labels<-read.delim(project.files[ichr],header=F,nrows=1,sep="\t",fill=TRUE,stringsAsFactors=FALSE,na.strings="",quote="\"")
num.vars<-dim(column.labels)[2]
a.indel<-scan(project.files[ichr],what=character(num.vars),skip=1,sep="\t",fill=TRUE,na.strings="",quote="\"")
num.lines<-length(a.indel)/(num.vars)
num.lines
dim(a.indel)<-c(num.vars,num.lines)
a.indel<-t(a.indel)
colnames(a.indel)<-column.labels
########################################
## load("2015-03-16_AllAMLandLung.BEST.chrALL.ACC_0.025.ALL.ALL_GENOTYPES_analysis.txt_SUBSET.RData")
## column.labels<-read.delim(project.files[ichr],header=F,nrows=1,sep="\t",fill=TRUE,stringsAsFactors=FALSE,na.strings="",quote="\"")
## length(column.labels)
## dim(a.indel)
## colnames(a.indel)<-column.labels
## load("2015-03-16_AllAMLandLung.BEST.chrALL.ACC_0.025.ALL.ALL_GENOTYPES_analysis.txt.RData")
## column.labels<-colnames(a.indel)
## a.indel<-as.matrix(a.indel)
## wanted.genes<-c("DDX41","TET2", "GATA2", "ASXL1", "NOTCH1", "IDH1", "JAK2","WT1","MLL","KRAS","FLT3","IDH2","IDH1","TP53","KIT","NPM1","JAK2","DNMT3A","TET2","RUNX1","NRAS","CEBPA","PTPN11","U2AF1","SMC1A","SMC3","PHF6","STAG2","RAD21","FAM5C","EZH2","HNRNPK","FANCA","FANCB","FANCC","FANCD1","FANCD2","FANCE","FANCF","FANCG","FANCI","BRIP1","FANCL","FANCM","PALB2","RAD51C","SLX4","ERCC4","APITD1","STRA13","C1orf86","C19orf40","C17orf70","SLX1","MUS81","ERCC1","FAN1","EME1","EME2","MRE11A","NBN1","RAD50","FAND1","BRCA1","BARD1","RAD51","RAD51B","RAD51D","XRCC2","XRCC3","RMI1","RMI2","BLM","TOP3A","RPA1","RPA2","RPA3","ATM","ATR","ATRIP","CHECK1","RAD9A","RAD17","CS","DLAT","DLD","DLST","FH","IDH1","IDH2","IDH3A","IDH3B","IDH3G","MDH1","MDH2","ACLY","ACO1","OGDH","ACO2","PC","PCK1","PCK2","PDHA1","PDHA2","PDHB","OGDHL","SDHA","SDHB","SDHC","SDHD","SUCLG2","SUCLG1","SUCLA2")
## wanted.genes2<-read.delim("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013/2013-10-27_AML_with_AOGCControl_NoFailedLane/Analysis/Gene_Lists/mam_new/ALL_genes.csv",header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE,na.strings="",quote="")
## wanted.genes<-unique(c(wanted.genes,wanted.genes2[,1]))
## wanted<-test[,"Gene.Names"] %in% wanted.genes
## sum(wanted)
## a.indel<-test[wanted,]
## save(list=c("a.indel"),file="2015-03-16_AllAMLandLung.BEST.chrALL.ACC_0.025.ALL.ALL_GENOTYPES_analysis.txt_SUBSET.RData")
the.chr<-unique(a.indel[,"chr"])
print(paste("Doing Chromosome ",the.chr))
print(the.chr)
if(sum(!grepl("^chr",the.chr))>0){
a.indel[,"chr"]<-paste("chr",a.indel[,"chr"],sep="")
}
a.indel[1:50,"chr"]
key<-build.key(a.indel,core.ann)
rownames(a.indel)<-key
rownames(a.indel)[1:5]
########################### REMOVE BAD SAMPLES HERE
## analysis.samples[1:5,]
bad.samples
bad.samples
bad.samples.labels<-expand.labels.to.samples(bad.samples,c("GT","AD","DP","GQ"),paste.after=TRUE)
dim(a.indel)
a.indel<-a.indel[,colnames(a.indel)[!(colnames(a.indel) %in% bad.samples.labels)]]
dim(a.indel)
#colnames(a.indel)[(colnames(a.indel) %in% bad.samples.labels)]
bad.samples
#tapply(gene.list[,"CHR"],gene.list[,"CHR"],length)
all.possible.samples<-gsub(".GT$","",colnames(a.indel)[grep(".GT$",colnames(a.indel))],perl=TRUE)
length(all.possible.samples)
pheno.types
############################################# POPULATION MAF FILTER - PART B
############################################# POPULATION MAF FILTER
############################################# POPULATION MAF FILTER
############################################# POPULATION MAF FILTER
colnames(a.indel)[grep("maf$",colnames(a.indel))]
maf.threshold.filter<-maf.threshold.filter.to.use
filter.cols.maf<-filter.cols.maf.use
filter.cols.novel<-filter.cols.novel.use
#############################get the filters so have minor allele frequencies:
the.combined.DBs<-gsub("::maf$","",colnames(a.indel)[grep("::maf$",colnames(a.indel))]) ## get maf databases
the.combined.DBs<-the.combined.DBs[!(the.combined.DBs %in% c("ID","snp132"))] ## remove artifacts
names.the.combined.DBs<-the.combined.DBs
#the.combined.DBs<-paste(the.combined.DBs,"::maf",sep="")
names(the.combined.DBs)<-names.the.combined.DBs
present.cols<-filter.cols.maf %in% names(the.combined.DBs)
if(sum(!present.cols)>0){ ### asked to filter on columns that don't exist
print("WARNING asked to filter using MAF with databaes that have not been annotated")
print(paste("missing MAF Db:",toString(filter.cols.maf[!present.cols]),"-> ignored",sep=" "))
filter.cols.maf<-filter.cols.maf[present.cols]
}
present.cols<-filter.cols.novel %in% names(the.combined.DBs)
if(sum(!present.cols)>0){ ### asked to filter on columns that don't exist
print("WARNING asked to filter using NOVEL with databaes that have not been annotated")
print(paste("missing MAF Db:",toString(filter.cols.novel[!present.cols]),"-> ignored",sep=" "))
filter.cols.novel<-filter.cols.novel[present.cols]
}
print(filter.cols.maf)
print(filter.cols.novel)
maf.target<-the.combined.DBs[names(the.combined.DBs) %in% filter.cols.maf] # the.combined.DBs
maf.target
for(i in 1:length(maf.threshold.filter)){
a.filter<-paste(filter.cols.maf,"::maf-filter::",maf.threshold.filter[i],sep="")
print(a.filter)
assign(paste("filter.cols.maf",maf.threshold.filter[i],sep="."),value=a.filter)
}
maf.threshold.filter.all<-c(0.0,maf.threshold.filter)
assign(paste("filter.cols.maf",maf.threshold.filter.all[1],sep="."),value=paste(filter.cols.novel,"::maf",sep=""))
print(eval(as.name(paste("filter.cols.maf",maf.threshold.filter.all[1],sep=".")) ) )
# a.indel[1:5,paste(the.combined.DBs,"::maf",sep="")]
################################################################################
## THE FILTER TABLE IS MADE FROM : the.combined.DBs<-c(filter.DB,generic.filter.DB,function.filter.DB) only thes DBs can be used for MAF filtereing
target.table<-a.indel[,paste(the.combined.DBs,"::maf",sep="")]
#target.table[1:5,]
extra<-matrix(data=NA,nrow=dim(target.table)[1],ncol=length(maf.target)*length(maf.threshold.filter))
colnames(extra)<-expand.labels.to.samples.complex(maf.target,maf.threshold.filter,paste.after=TRUE,seperator="::maf-filter::")
#extra[1:5,]
target.table<-cbind(target.table,extra)
rm(extra)
####### generate maf.target appreded to the allel frequency
maf.threshold.filter # filters to apply
maf.target #<-the.combined.DBs[names(the.combined.DBs) %in% all.filter.cols.maf] # the.combined.DBs
k<-2
i<-2
for(k in 1:length(maf.threshold.filter)){ #maf.threshold.filter.all contains novel "0"
for(i in 1:length(maf.target)){
an.inversion<-(target.table[, paste(maf.target[i],"maf",sep="::") ] > (1-maf.threshold.filter[k]) ) # 0.999 on a 0.01 filter is also a hit
an.inversion[is.na(an.inversion)]<-FALSE
# a.test<-(target.table[,paste(maf.target[i],"maf",sep="::")] < maf.threshold.filter[k]) # normal test for minot allele
target.table[,paste(maf.target[i],"maf-filter",maf.threshold.filter[k],sep="::")]<-(target.table[,paste(maf.target[i],"maf",sep="::")] < maf.threshold.filter[k]) | an.inversion ## incase ref and alt are inverted
target.table[is.na(target.table[,paste(maf.target[i],"maf-filter",maf.threshold.filter[k],sep="::")]),paste(maf.target[i],"maf-filter",maf.threshold.filter[k],sep="::")]<-TRUE # if not found in database then assume has small allele frequency
# ref.inversion[,k ]<-ref.inversion[,k] & an.inversion
}
print(paste("Done ",maf.threshold.filter[k],sep=""))
}
#target.table[1:5,]
###################
## colnames(target.table)
## the.DBs<-the.combined.DBs
## the.DBs
## names(the.DBs)
## for(i in 1:length(the.DBs)){
## target.string<-paste("^",the.DBs[i],sep="")
## if(grepl("++",target.string,fixed=TRUE)){target.string<-gsub("++","",target.string,fixed=TRUE)} ##gerp++ casles problems in grep (++)
## colnames(target.table)<-gsub(target.string,names(the.DBs)[i],colnames(target.table))
## ## colnames(target.table)<-gsub(paste("^",the.DBs[i],sep=""),names(the.DBs)[i],colnames(target.table))
## }
## colnames(target.table)
## target.table[1:2,]
## filter.table<-target.table
## ########
# filter.cols.maf.0
################################# Get allele frequency table
################# HOWEVER ALT ALLELES ALL IS USED AS MINOR ALLELE FREQUNCY not alternative allele frequencies
maf.lt.all<-data.frame(key=key,stringsAsFactors=FALSE)
imaf<-1
for(imaf in 1:length(maf.threshold.filter.all)){
a.filter.cols.maf<-eval(as.name( paste("filter.cols.maf",maf.threshold.filter.all[imaf],sep=".") ))
maf.lt<-rep(TRUE,times=dim(target.table)[1])
imaff<-1
for(imaff in 1:length(a.filter.cols.maf)){
if(maf.threshold.filter.all[imaf]==0){ # different case for novel NOT found (rather than less than)
# maf.lt<-maf.lt & !target.table[,a.filter.cols.maf[imaff]] ### this is correct as using the variable string name derived from filter.cols.novel
maf.lt<-maf.lt & ( is.na(target.table[,a.filter.cols.maf[imaff]]) | target.table[,a.filter.cols.maf[imaff]]=="NA" )
}else{
maf.lt<-maf.lt & as.logical(target.table[,a.filter.cols.maf[imaff]])
}
}
# filtered<-maf.lt & wanted.muts.fil
maf.lt.all<-cbind(maf.lt.all,maf.lt)
}
if(dim(maf.lt.all)[2]>1){ colnames(maf.lt.all)<-c("key",paste("MAF.lt:",maf.threshold.filter.all,sep=""))}
maf.lt.all<- maf.lt.all[,colnames(maf.lt.all)!="key"]
maf.lt.all[1:4,]
colnames(a.indel)[colnames(a.indel) == "MAF.lt:0.5"]<-"MAF.lt:0.05"
posns<-match(colnames(maf.lt.all),colnames(a.indel))
missing<-is.na(posns)
## i<-1
## if(i in 1:sum(!missing)){
## a.indel[, posns[!missing][i] ] <- maf.lt.all[,!missing][i]
## }
dim(a.indel)
maf.lt.all[1:4,]
rm(target.table)
############ FIX ALLELE FREQUENCY PROBLEMS
## if(plink.chr=="X"){plink.chr<-23}
## if(plink.chr=="Y"){plink.chr<-24}
## if(plink.chr=="XY"){plink.chr<-25}
## if(plink.chr=="M"){plink.chr<-26}
## plink.chr
## plink.file<-paste(genotype.file.prefix,"_chr",plink.chr,sep="")
## plink.file
## g.indel<-read.plink(paste(genotype.file.location,plink.file,sep="/"))
## ## test<-read.table("plink.frq",header=T)
## ## ori<-read.table("ori.frq",header=T)
## dim(a.indel)
## dim(g.indel)
## target<-"37442658"
## posn1<-match(target,a.indel[,"end"])
## posn2<-match(target,g.indel[,"start"])
## posn1
## posn2
## ####### REMOVE BAD SAMPLES
## a.indel[posn1,1:10]
## g.indel[posn2,1:10]
#########################################################################################################################
#########################################################################################################################
#########################################################################################################################
## all.possible.samples
## colnames(a.indel)[1:20]
a.indel[1:5,1:10]
## col<-grepl("REF",a.indel[,"REF"])
## sum(col)
## a.indel[col,1:15]
## a.indel<-a.indel[!col,]
## colnames(a.indel)[1:50]
## dim(a.indel)
## sort(unique(a.indel[,"chr"]))
## sort(unique(a.indel[,"FILTER"]))
## test<-"AOGC-08-0287.GT"
## wanted<-a.indel[,test]!="0/0" | is.na(a.indel[,test])
## sum(wanted)
## a.indel[wanted,c("chr","start", "end","Consequence.Embl",test)][1:10,]
## sort(table(a.indel[wanted, "Consequence.Embl"]))
## sort(table(a.indel[wanted, "TYPE"]))
## test<-all.possible.samples
## ALT.Alleles.Control
#################################### got some missing geen names still.
## all.genes[grep("GTPBP4",names(all.genes))]
no.gene<-is.na(a.indel[,"Gene.Names"]) | a.indel[,"Gene.Names"]=="NA"
all.genes<-sort(table(a.indel[,"Gene.Names"]),decreasing=TRUE)
ens.to.hgnc<-read.delim("/media/UQCCG/Sequencing/Data/Genomes/hg19/ENSG_to_HGNC.txt",header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE,na.strings="",quote="\"")
ens.to.hgnc[1:5,]
genes<-unique(a.indel[no.gene,"Gene.Embl"])
posns<-match(genes,ens.to.hgnc[,"Ensembl.Gene.ID"])
missing<-is.na(posns)
ens.to.hgnc<-ens.to.hgnc[posns[!missing],]
dups<-duplicated(ens.to.hgnc[,"Ensembl.Gene.ID"])
ens.to.hgnc<-ens.to.hgnc[!dups,]
ens.to.hgnc[1:5,]
posns<-match(a.indel[no.gene,"Gene.Embl"],ens.to.hgnc[,"Ensembl.Gene.ID"])
missing<-is.na(posns)
sum(missing)
a.indel[no.gene,"Gene.Names"]<-ens.to.hgnc[posns,"HGNC.symbol"]
a.indel[no.gene,"Gene.Names"][1:10]
ens.to.hgnc[posns,"HGNC.symbol"][1:10]
no.gene<-is.na(a.indel[,"Gene.Names"]) | a.indel[,"Gene.Names"]=="NA"
a.indel[no.gene,"Gene.Names"]<-a.indel[no.gene,"Gene.Embl"]
a.dash<-a.indel[,"Gene.Names"]=="-"
sum(a.dash)
a.indel[a.dash,][1:5,1:50]
a.indel[a.dash,"Gene.Names"]<-a.indel[a.dash,"Feature.Embl"]
a.dash<-a.indel[,"Gene.Names"]=="-"
sum(a.dash)
a.indel[a.dash,][1:5,1:50]
final.names<-a.indel[a.dash,"knownGene::gene"]
final.names<-gsub("\\(dist=\\d*\\)","",final.names)
final.names<-gsub("\\(dist=NONE\\)","",final.names)
final.names[grepl("^NO_ANNOVAR",final.names)]<-"-"
a.indel[a.dash,"Gene.Names"]<-final.names
a.dash<-a.indel[,"Gene.Names"]=="-"
sum(a.dash)
a.indel[a.dash,c("chr","start")]
final.names<-build.key(a.indel[a.dash,],c("chr","start"))
a.indel[a.dash,"Gene.Names"]<-final.names
unannotated.hits<-a.dash
all.genes<-sort(table(a.indel[,"Gene.Names"]),decreasing=TRUE)
all.genes[1:30]
## MUC4 TTN MUC16 MUC5B MUC12 FAM182B ZNF717 NBPF1 FAM182A AHNAK2
## 1357 540 366 347 326 289 284 283 279 269
## MST1L HYDIN FLG SYNE1 NBPF14 POM121L8P NEB MUC6 POM121L1P MST1P2
## 267 255 249 238 233 213 212 211 204 198
## PDE4DIP HLA-DRB1 CTBP2 MUC17 OBSCN FLJ45445 USH2A LRP1B SSPO DNAH11
## 196 192 186 184 184 178 175 172 172 167
## MUC16 MUC4 ANKRD36 TTN HLA-DRB1 SYNE1 FRG1B ZNF717 NEB
## 1176 1171 849 726 712 642 598 576 546
## FRG1 MUC12 HLA-DRB5 ANKRD30BL CSMD1 RYR1 CDC27 DNAH17 FAM182B
## 487 470 467 450 430 429 398 390 378
## DNAH11 KMT2C PKHD1 SSPO ANKRD20A8P FCGBP CTBP2 MUC5B DNAH5
## 376 364 362 355 354 347 342 341 337
## GPR98 MIR1273H LAMA5
## 334 329 328
grep("NOTCH1",names(all.genes))
common.hit.genes<-"" #names(all.genes)[all.genes>250] # common.hit.genes<-names(all.genes)[1:4]
###############################################
snpinfo.raw<-cbind(key,a.indel[,"Gene.Names"],a.indel[,"Gene.Names"])
snpinfo.raw[1:5,]
tail(snpinfo.raw)
colnames(snpinfo.raw)<-c("Name","gene","cluster")
poly.gene.site<-grep("::",snpinfo.raw[,"gene"])
length(poly.gene.site)
ipoly<-2
all.extra<-{}
if(length(poly.gene.site)>0){
for( ipoly in 1 :length(poly.gene.site)){
gene<-unlist(strsplit(snpinfo.raw[poly.gene.site[ipoly],"gene"],split="::") )
if(is.null(dim(all.extra))){
all.extra<-cbind(snpinfo.raw[poly.gene.site[ipoly],"Name"],gene,gene)
colnames(all.extra)<-colnames(snpinfo.raw)
}else{
extra<-cbind(snpinfo.raw[poly.gene.site[ipoly],"Name"],gene,gene)
colnames(extra)<-colnames(snpinfo.raw)
all.extra<-rbind(all.extra,extra)
}
} }# has a poly
# all.extra[1:5,]
dim(all.extra)
snpinfo.raw<-rbind(snpinfo.raw,all.extra)
colnames(snpinfo.raw)<-c("Name","gene","cluster")
dim(snpinfo.raw)
snpinfo.raw[1:5,]
dim(snpinfo.raw)
snpinfo<-snpinfo.raw
## other.clusters<-read.delim("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013-10-27_AML_with_AOGCControl_NoFailedLane/Analysis/Final_FANC_clusters.csv",header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE)
## other.clusters<-read.delim("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013-10-27_AML_with_AOGCControl_NoFailedLane/Analysis/other_clusters.csv",header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE)
## other.clusters[1:5,]
## colnames(other.clusters)
## for(ic in 1:dim(other.clusters)[2]){
## cluster.genes<-clusters[clusters[,"Cluster"]==clusters.wanted[ic],"Genes.assigned.to.group"]
## cluster.genes<-unlist(strsplit(cluster.genes,split=", "))
## cluster.genes
## last.cluster.length<-length(cluster.genes)
## if(ic==1){clinical.genes<-cluster.genes}
## if(ic==2){fanc.genes<-cluster.genes}
## snpinfo[ snpinfo[,"gene"] %in% cluster.genes ,"cluster"] <-clusters.wanted[ic]
## }
# /media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013-10-27_AML_with_AOGCControl_NoFailedLane/Analysis/other_clusters.csv
#clusters<-read.delim("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013-10-27_AML_with_AOGCControl_NoFailedLane/Analysis/Clusters Definitions.csv",header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE)
clusters<-read.delim("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013/2013-10-27_AML_with_AOGCControl_NoFailedLane/Analysis/Final_FANC_clusters.csv",header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE)
#clusters
#clusters.wanted<-c("Clinical","FANC - ACID")
clusters.wanted<-colnames(clusters)
ic<-1
snpinfo[1:5,]
cbind(unique(clusters[,22]),unique(clusters[,22]))
snpinfo[1:5,]
gene.aliases<-read.delim("/media/UQCCG/Software/annovar/humandb/Gene_symbol_aliases.txt",header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE)
gene.aliases[1:5,]
gene.aliases<-unwind(gene.aliases, "Aliases",delimit=", ")
gene.aliases[1:5,]
############### all.genes
gene.aliases[grepl("SLX1",gene.aliases[,1]),]
gene.aliases[grepl("SLX1",gene.aliases[,2]),]
############FIX MISSING gene names in pathways tests
#Check clusters have genes that are missing from the sequencing list then make sure have the approved gene name
ic<-5
recode<-{}
for(ic in 1:length(clusters.wanted)){
# print(ic)
cluster.genes<-clusters[,clusters.wanted[ic]]
cluster.genes<-cluster.genes[cluster.genes!="" | is.na(cluster.genes)]
cluster.genes<-unique(cluster.genes)
# all.genes[1:5]
missing.name<-!(cluster.genes %in% names(all.genes))
if(sum( missing.name)>0){
posns<-match(cluster.genes[missing.name],gene.aliases[, "Aliases"])
missing<-is.na(posns)
if(sum(missing)>0){
print(paste("in cluster",clusters.wanted[ic], " missing"))
print(cluster.genes[missing.name][!missing])
}
recode<-cbind(cluster.genes[missing.name][!missing],gene.aliases[posns[!missing], "Approved.Symbol"])
colnames(recode)<-c("old","new")
###### transfer to new gene lists
posns<-match(clusters[,clusters.wanted[ic]],recode[,"old"])
missing<-is.na(posns)
clusters[!missing,clusters.wanted[ic]]<-recode[posns[!missing],"new"] ### redefine the clusters
}
}
#########################################################
snpinfo<-snpinfo.raw
ic<-1
for(ic in 1:length(clusters.wanted)){
cluster.genes<-clusters[,clusters.wanted[ic]]
cluster.genes<-cluster.genes[cluster.genes!="" | is.na(cluster.genes)]
print(paste(clusters.wanted[ic],paste(cluster.genes,collapse=","),sep=": "))
extra<-snpinfo.raw[snpinfo.raw[,"gene"] %in% cluster.genes,]
print(dim(extra))
extra[,"cluster"]<-clusters.wanted[ic]
snpinfo<-rbind(snpinfo, extra)
}
snpinfo.ori<-snpinfo
### HAVE
# snpinfo.raw (original from a.indel)
# snpinfo # with extra clusters
# snpinfo.raw a permanent copy of snpinfo
## "FANCM " "MHF1" "MHF2" "FAAP24"
clusters[,1]
chk<-apply(clusters,2,function(x){ length(x[x!=""])})
write.table(clusters,file="clusters_as.using.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
############################
a.indel[1:20,1:5]
#filtered.genotype(a.indel[1:20,],gsub(".GT$","",the.samples),prefix="",suffix="",20,0.02,0.98,0.20,0.80,7,2)
num.cores<-4
num.bits<-num.cores
library("doParallel")
registerDoParallel(cores=num.cores)
## library(doMC)
## registerDoMC(cores=num.cores)
the.samples<-colnames(a.indel)[grepl(".GT$", colnames(a.indel))]
#colnames(a.indel)[grepl(".GQ$", colnames(a.indel))]
while((dim(a.indel)[1] %% num.bits)< 2){num.bits<-num.bits+1} ### go don't get matrix issues
## fil.genotypes<-foreach(a.indel.bit=iter(a.indel,by='row',chunksize=as.integer(dim(a.indel)[1]/num.bits) ), .combine='rbind', .multicombine=TRUE, .inorder=TRUE) %dopar% filtered.genotype.old(a.indel.bit,gsub(".GT$","",the.samples),prefix="",suffix="",20,0.02,0.98,0.20,0.80,7,2)
fil.genotypes<-foreach(a.indel.bit=iter(a.indel,by='row',chunksize=as.integer(dim(a.indel)[1]/num.bits) ), .combine='rbind', .multicombine=TRUE, .inorder=TRUE) %dopar% filtered.genotype(a.indel.bit,gsub(".GT$","",the.samples),prefix="",suffix="",20,0.02,0.98,0.20,0.80,7,2)
#fil.genotypes<-filtered.genotype(a.indel,gsub(".GT$","",the.samples),prefix="",suffix="",20,0.02,0.98,0.20,0.80,7,2)
dim(fil.genotypes)
colnames(fil.genotypes)[1:5]
rownames(fil.genotypes)[1:5]
dim(fil.genotypes)
dim(a.indel)
tail(rownames(a.indel))
## a.indel<-a.indel[1:342791,]
## fil.genotypes<-fil.genotypes[1:342791,]
############################### do one phenotype ####################
#
ipheno<-1
for(ipheno in 1:length(pheno.types)){
print(paste("Doing phenotype:",pheno.types[ipheno]))
target.pheno<-names(pheno.types)[ipheno]
target.pheno.col<-pheno.types[ipheno]
length(sample.sheet.full[,target.pheno.col])
pheno<-sample.sheet.full[!is.na(sample.sheet.full[,target.pheno.col]) ,] ## pheno only contains SAMPLES that have a phenotype
print(dim(pheno))
print(paste("Number Samples:",dim(pheno)[1]))
covars<-c("PCA1","PCA2","PCA3","PCA4")
covars<-c("the.run")
covars<-c("1")
dim(pheno)
pheno[1:5,]
formula<-paste(target.pheno.col,"~",paste(covars,collapse="+"),sep="")
print(formula)
formula<-formula(formula)
#seq.type[1:57,]
posns<-match(pheno[,"SAMPLE"],seq.type[,"Sample"])
missing<-is.na(posns)
sum(missing)
capture<-seq.type[posns,"Capture.Method"]
## the.runs<-seq.type[posns,"Run"]
table(capture) ### all illume here
pheno<-cbind(pheno,capture)
pheno[1:5,]
pheno<-pheno[pheno[,"SAMPLE"] %in% all.possible.samples,]
the.samples<-paste(pheno[,"SAMPLE"],"GT",sep=".") ## samples same order as in pheno
print(paste("Number samples: ",length(the.samples),sep=""))
table(pheno[,"SampleProject"])
## AML Control
## 89 197
## ###############GEFOS SNP type restrictions
## AML Control
## 135 216
######################## make sure pheno has sample samples as A.indel
the.projects<-c("AML","Control")
table(pheno$SampleProject)
AML<-rep(FALSE,times=dim(pheno)[1])
AML[pheno$SampleProject %in% c("AML")]<-TRUE
Control<-rep(FALSE,times=dim(pheno)[1])
Control[pheno$SampleProject %in% c("Control")]<-TRUE
##### AFFSTAT_IN_WORDS is mostly correct to the regression #####
dim(pheno)
#pheno<-pheno[,1:209]
pheno[1:5,]
pheno<-cbind(pheno,AML,Control)
names(the.projects)<-the.projects
colnames(pheno)
pheno[pheno[,the.projects[1]],"SAMPLE"]
pheno[pheno[,the.projects[2]],"SAMPLE"]
for (ir in 1: length(the.projects)){
print(paste(the.projects[ir],"Num. samples:",sum(pheno[,the.projects[ir]])))
}
summary.geno.extra<-{}
####################################################################################
#################################################################################### REGULAR
#### MAY NEED TO ADJUST way use samples in selected based on selection below.
targets<-the.projects #c("NMD","ex.Control","AOGC")
targets
names(targets)<-targets
###### the.samples and pheno in same order but the.samples has .GT extension.
it<-1
the.samples<-paste(pheno[,"SAMPLE"],"GT",sep=".")
for(it in 1:length(targets)){
#use.samples<-the.samples[pheno[,"SampleProject"]==targets[it]]
use.samples<-the.samples[pheno[,targets[it]]]
print(targets[it])
print(use.samples)
length(use.samples)
genotypes<-a.indel[,use.samples]
dim(genotypes)
summary.geno<-genotype.summary(as.matrix(genotypes))
colnames(summary.geno)<-paste(c("MAF","ALT.Alleles","REF.Alleles","TOTAL.Alleles","MISSING.Alleles","ALT_HOMO","ALT_HETRO","GENO"),names(targets)[it],sep=".")
#summary.geno[1:5,]
if(is.null(dim(summary.geno.extra))){
summary.geno.extra<-summary.geno
}else{
summary.geno.extra<-cbind(summary.geno.extra,summary.geno)
}
print(paste("Done: ",targets[it],sep=""))
} ## loop over targets
#################################################################################### FILTERED
targets<-the.projects #c("NMD","ex.Control","AOGC")
targets
names(targets)<-paste(targets,".filt",sep="")
targets
it<-1
for(it in 1:length(targets)){
#use.samples<-the.samples[pheno[,"SampleProject"]==targets[it]]
use.samples<-the.samples[pheno[,targets[it]]]
print(targets[it])
print(use.samples)
length(use.samples)
genotypes<-fil.genotypes[,use.samples]
dim(genotypes)
summary.geno<-genotype.summary(as.matrix(genotypes))
colnames(summary.geno)<-paste(c("MAF","ALT.Alleles","REF.Alleles","TOTAL.Alleles","MISSING.Alleles","ALT_HOMO","ALT_HETRO","GENO"),names(targets)[it],sep=".")
#summary.geno[1:5,]
if(is.null(dim(summary.geno.extra))){
summary.geno.extra<-summary.geno
}else{
summary.geno.extra<-cbind(summary.geno.extra,summary.geno)
}
print(paste("Done: ",targets[it],sep=""))
} ## loop over targets
#
## ######################################
## colnames(a.indel)[c(1:6,16,28,7,30,34,37:42,43,14,32,33)] # 1276,1310
## annotations<-a.indel[,c(1:6,16,28,7,30,34,37:42,43,14,32,33)]
## annotations<-a.indel[,c(1:8,13,16,28,7,30,34,37:42,43,14,32,33)]
## a.indel[1:5,1:10]
summary.geno.extra[1:5,]
colnames(a.indel)[c(1:8,16,30,37,40,42,43)] # 1276,1310,
## annotations<-a.indel[,c(core.ann,"refGene::gene","FILTER","refGene::location","refGene::type", "Consequence.Embl","Gene.Embl", "Feature.Embl","Protein_position.Embl","Amino_acids.Embl" )]
## annotations[1:50,]
## rownames(summary.geno.extra)==rownames(annotations)
## annotations<-cbind(annotations,summary.geno.extra)
## getwd()
## write.table(annotations,file="all.gene.vars.89.379_final.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
#colnames(summary.geno.extra)
#rownames(summary.geno.extra)<-key
#getHWE(obs_hets, obs_hom1, obs_hom2)
hw.p.control<-getHWE(summary.geno.extra[,"GENO.Control"]) ## used 16 CPUs
hw.p.control.filt<-getHWE(summary.geno.extra[,"GENO.Control.filt"]) ## used 16 CPUs
###
## class(summary.geno.extra)
hw.p.control[1:5]
length(hw.p.control)
names(hw.p.control)<-key
names(hw.p.control.filt)<-key
######### testing
## hw.p.control[200100:200195]
## hw.p.control[22:25]
## summary.geno.extra[22:25,"GENO.Control"]
## test.loc<-c(24,200129:200143)
## geno<-strsplit(summary.geno.extra[test.loc,"GENO.Control"],split=",")
## AA<-as.numeric(unlist(lapply(geno,function(x) x[1])))
## BB<-as.numeric(unlist(lapply(geno,function(x) x[3])))
## AB<-as.numeric(unlist(lapply(geno,function(x) x[2])))
## x<-cbind(AA,AB,BB)
## x
## HWExactMat(x)
## hw.p.control[test.loc] ## correct in original order
######################## inbreeding and no genotypes
group.maf.thresh<-0.20 # group.maf.thresh<-0.10 if more common that this in Group then discard: Discard Rare but common in this cohort ## not a point level
missing.threshold<-0.20 # missing.threshold<-0.50 60 % of genotypes missing
## missing.threshold.nimblgen<-0.20
## missing.threshold.illumina<-0.20
## rare.in.group<-( summary.geno.extra[,c("MAF.HIGH","MAF.LOW")]< group.maf.thresh) | ( summary.geno.extra[,c("MAF.HIGH","MAF.LOW")] > (1-group.maf.thresh)) | (is.na( summary.geno.extra[,c("MAF.HIGH","MAF.LOW")]))
rare.in.group.table<-( summary.geno.extra[,c("MAF.AML","MAF.Control","MAF.AML.filt","MAF.Control.filt")]< group.maf.thresh) | ( summary.geno.extra[,c("MAF.AML","MAF.Control","MAF.AML.filt","MAF.Control.filt")] > (1-group.maf.thresh)) | (is.na( summary.geno.extra[,c("MAF.AML","MAF.Control","MAF.AML.filt","MAF.Control.filt")]))
rare.in.group.table[1:5,]
#rare.in.group<-cbind( indels[,c("MAF.HIGH","MAF.LOW")]< group.maf.thresh) , (indels[,c("MAF.HIGH","MAF.LOW")] > (1-group.maf.thresh)) , (is.na(indels[,c("MAF.HIGH","MAF.LOW")])))
rare.in.group<-combine.boolean(rare.in.group.table,c("MAF.AML","MAF.Control"),"OR")
rare.in.group.filt<-combine.boolean(rare.in.group.table,c("MAF.AML.filt","MAF.Control.filt"),"OR")
sum(!rare.in.group)
sum(!rare.in.group.filt)
#no.genotypes<-(indels[,c("MAF.HIGH","MAF.LOW")]== 0) | (is.na(indels[,c("MAF.HIGH","MAF.LOW")])) # no genotypes in test classes for a mutataion after individaul quality filtering
### checking if BOTH not mono mrphic
no.genotypes.table<-(summary.geno.extra[,c("MAF.AML","MAF.Control","MAF.AML.filt","MAF.Control.filt")]== 0) | summary.geno.extra[,c("MAF.AML","MAF.Control","MAF.AML.filt","MAF.Control.filt")]=="NaN" | (is.na( summary.geno.extra[,c("MAF.AML","MAF.Control","MAF.AML.filt","MAF.Control.filt")])) # no genotypes in test classes for a mutataion after individaul quality filtering
no.genotypes.table[1:5,]
no.genotypes<-combine.boolean(no.genotypes.table,c("MAF.AML","MAF.Control"),"AND") # was AND
no.genotypes.filt<-combine.boolean(no.genotypes.table,c("MAF.AML.filt","MAF.Control.filt"),"AND") # was AND
sum(no.genotypes)
sum(no.genotypes.filt)
summary.geno.extra[1:5,]
missing.targets<-c("AML","Control","AML.filt","Control.filt")
high.missing<-{}
imt<-1
for(imt in 1:length(missing.targets)){
the.missing.alleles<-paste("MISSING.Alleles",missing.targets[imt],sep=".")
the.total.alleles<-paste("TOTAL.Alleles",missing.targets[imt],sep=".")
a.missing.test<-as.numeric(summary.geno.extra[,the.missing.alleles])/(as.numeric(summary.geno.extra[,the.total.alleles])+as.numeric(summary.geno.extra[,the.missing.alleles]))
if(is.null(length(high.missing)) | length(high.missing)==0){
high.missing<-a.missing.test
}else{
high.missing<-cbind(high.missing,a.missing.test)
}
}
colnames(high.missing)<-missing.targets
rownames(high.missing)<-key
## nimblegen.total.missing<-subset(high.missing,select=c("nimblegen"))
## nimblegen.total.missing[1:5,]
## nimblegen.total.missing<-nimblegen.total.missing > missing.threshold.nimblgen
## ## nimblegen.total.missing<-combine.boolean(high.total.missing,c("LOW","HIGH","LOW.pheno","HIGH.pheno"),"OR")
## sum(nimblegen.total.missing)
## illumina.total.missing<-subset(high.missing,select=c("illumina"))
## illumina.total.missing[1:5,]
## illumina.total.missing<-illumina.total.missing > missing.threshold.illumina
## ## nimblegen.total.missing<-combine.boolean(high.total.missing,c("LOW","HIGH","LOW.pheno","HIGH.pheno"),"OR")
## sum(illumina.total.missing)
## sum(high.total.missing | nimblegen.total.missing | illumina.total.missing)
high.missing[1:5,]
#ok.missing.test[places,]
ok.missing.test<-high.missing <= missing.threshold
ok.missing.test[1:5,]
ok.missing<-combine.boolean(ok.missing.test,c("AML","Control"),"AND") # was AND
ok.missing.filt<-combine.boolean(ok.missing.test,c("AML.filt","Control.filt"),"AND") # was AND
sum(!ok.missing)
sum(!ok.missing.filt)
ok.missing[1:5]
ok.missing.filt<-combine.boolean(ok.missing.test,c("AML.filt","Control.filt"),"AND")
## ok.missing[places]
###############################################################################################################################################
################# HOWEVER ALT ALLELES ALL IS USED AS MINOR ALLELE FREQUNCY not alternative allele frequencies
#maf.lt.all<-a.indel[,colnames(a.indel)[grepl("MAF.lt:",colnames(a.indel))]]
maf.lt.all[1:5,]
#maf.lt.all<-as.logical(maf.lt.all)
as.logical(maf.lt.all[1:5,5])
####################################################################################
##################################### make the POSITION filter matrix QUALITY.FILTER FUNATIOAL group-MAF done here
global.labs[!(global.labs %in% c(colnames(a.indel),colnames(summary.geno.extra)) )]
if(sum( !(global.labs %in% c(colnames(a.indel),colnames(summary.geno.extra))) )>0){print(paste("WARNING postion filters missing for data",
toString(global.labs[!(global.labs %in% c(colnames(a.indel))) ])))}
#grep("ljb_gerp",colnames(a.indel))
##############$$$$$$$$$$$$$$ POSITION FILTER: combines a.indel, filter table and filter.table.pholy in this case.
qual<-position.quality.filter( cbind( a.indel[,colnames(a.indel) %in% global.labs],summary.geno.extra[,colnames(summary.geno.extra) %in% global.labs] ), global.quality.cut,global.quality.type,global.quality.dirn)
dim(qual)
dim(a.indel)
rownames(qual)<-key
qual[1:5,]
#########$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
the.types<-names(tapply(a.indel[,"ensGene::location"],a.indel[,"ensGene::location"],length))
the.types<-unique(unlist(strsplit(the.types,split=";")))
if( sum( !(the.types %in% c("NA","exonic",possible.mutations)))>0 ){print("WARNING ANNOVAR HAS NEW MUTATION TYPES DEFINED- REVIEW line 532")
print( the.types[!(the.types %in% c("exonic",possible.mutations))] ) }
# filter.table.pholy[1:5,]
wanted.muts.coding<-test.for.coding.type(a.indel,geneanno.DB,interesting.coding.mutations)
sum(wanted.muts.coding)
####################### mhairi CHECK
wanted.muts.coding.vep<-test.wanted.mutation(a.indel[,"Consequence.Embl"],vep.coding,delimit.by=",") # filter.table.pholy[,"Consequence"] %in% vep.coding
## a.indel[wanted.muts.coding.vep,]
sum(wanted.muts.coding.vep)
missense.coding<-test.for.coding.type(a.indel,geneanno.DB,missense.variant)
sum(missense.coding)
####################### mhairi CHECK
missense.coding.vep<-test.wanted.mutation(a.indel[,"Consequence.Embl"],missense.variant,delimit.by=",") # filter.table.pholy[,"Consequence"] %in% vep.coding
## a.indel[wanted.muts.coding.vep,]
sum(missense.coding.vep)
is.missense<-missense.coding.vep | missense.coding
qual[1:50,"PolyPhen.low"] # true is above 0.1
a.indel[1:50,"PolyPhen.scores"]
a.indel[1:50,c("PolyPhen.desc","SIFT.desc")]
qual[1:5,"SIFT.high"]
a.indel[1:5,"SIFT.scores"]
is.benign.missense<-is.missense & !qual[,"PolyPhen.low"]
sum(!is.benign.missense)
dim(a.indel)
############## noncoding double filtered for biotype ###
interesting.NONcoding.mutations<-interesting.mutations[!(interesting.mutations %in% interesting.coding.mutations)] # "ncRNA_exonic"
wanted.muts.NONcoding<-test.for.coding.type(a.indel,geneanno.DB,interesting.NONcoding.mutations)
sum(wanted.muts.NONcoding)
wanted.interesting.to.prefilter<-test.for.coding.type(a.indel,geneanno.DB,interesting.to.prefilter) #"UTR3" "UTR5" "UTR5;UTR3" "snoRNA"
sum(wanted.interesting.to.prefilter)
# unique(a.indel[,"Consequence.Embl"])
####################### mhairi CHECK
wanted.interesting.to.prefilter.vep<-test.wanted.mutation(a.indel[,"Consequence.Embl"],vep.noncoding,delimit.by=",") #filter.table.pholy[,"Consequence"] %in% vep.noncoding
sum(wanted.interesting.to.prefilter.vep)
wanted.muts.NONcoding.keep<-rep(FALSE,times=dim(a.indel)[1]) # "ncRNA_exonic" and "ncRNA_exonic"
a.type<-wanted.noncoding.subtypes # list biotypes want to keep
if(!("gene_biotype" %in% colnames(a.indel))){colnames(a.indel)[colnames(a.indel)=="Gene.Biotype"]<- "gene_biotype"}
for(itype in 1:length(a.type)){
the.test<-grepl(a.type[itype],a.indel[,"gene_biotype"])
the.test[is.na(the.test)]<-FALSE
wanted.muts.NONcoding.keep<- wanted.muts.NONcoding.keep | the.test
}
# HERE wanted.muts.NONcoding.keep JUST DENOTES "miRNA" & "lincRNA" at this point USE wanted.muts.NONcoding to restrict to exones and splice BELOW
wanted.muts.NONcoding.keep<-wanted.muts.NONcoding.keep & wanted.muts.NONcoding
dim(qual)
colnames(qual)
qual[1:5,]
bad.coding<-wanted.muts.coding | wanted.muts.coding.vep | (wanted.muts.NONcoding.keep & (qual[,"GERP.low"] | qual[,"GERP.unknown"]) ) | ( (wanted.interesting.to.prefilter | wanted.interesting.to.prefilter.vep) & (qual[,"GERP.high"]) )
sum(bad.coding)
bad.coding<-wanted.muts.coding | wanted.muts.coding.vep
bad.non.coding<-(wanted.muts.NONcoding.keep & (qual[,"GERP.low"] | qual[,"GERP.unknown"]) ) | ( (wanted.interesting.to.prefilter | wanted.interesting.to.prefilter.vep) & (qual[,"GERP.high"]) )
bad.effect<-bad.coding | bad.non.coding
sum(bad.effect)
sum(bad.non.coding)
sum(bad.coding)
# bad.coding<-test.for.coding.type(a.indel,geneanno.DB,c("stopgain SNV","stoploss SNV","frameshift deletion","frameshift insertion"))
#bad.frame<-test.for.coding.type(geneanno.table,geneanno.DB,c("frameshift deletion","frameshift insertion"))
## basic.qual<-combine.boolean(qual,c("QUAL", "QD", "HRun", "SB"),"AND")
## gatk.qual<-combine.boolean(qual,c("FILTER_PASS", "FILTER_100" ),"OR")
## full.qual<-combine.boolean(cbind(basic.qual,gatk.qual),"all","OR")
## full.qual<-gatk.qual
## sum(full.qual)
full.qual<-qual[,"FILTER_PASS"]
sum(full.qual)
qual[full.qual,c("FILTER_PASS", "FILTER_100")][1:30,]
#any.functional<-combine.boolean(qual,c("PolyPhen.low","SIFT.high","mut.taster.high","phylo.high","PolyPhen.bad","SIFT.bad","GERP.high","ljb_gerp.high"),"OR")
functional<-combine.boolean(qual,c("PolyPhen.low","SIFT.high","PolyPhen.bad","SIFT.bad","GERP.high"),"OR")# include really bad protein changes $$$$ BUT ADD ANY GERP.hiogh might let through a lot of junk
functional.coding<-combine.boolean(qual,c("PolyPhen.low","SIFT.high","PolyPhen.bad","SIFT.bad"),"OR")
sum(functional.coding)
functional<-functional | bad.coding
sum(functional)
######################### FREQUENCY FILTERS
######### given a 0.01 threshold 6sd would allow 10 alt alleles at 6sd
######## given a 0.005 threshold 6sd would allow 7 alt alleles at 6sd
## n<-476 # number of controls
## p<-0.001 # maf threshold
## ## n*p
## ## ## #np(1-p)# sd =sqrt(var)
## ## ## ## n<-length(the.samples[pheno[,"SampleProject"]=="Control"])
## ## ## ## p<-0.01
## ## ## ## p<-0.005 # maf threshold
## ## ## sqrt(n*p*(1-p)) # sd =sqrt(var)
## ## ## ## #z=
## (13- n*p) / sqrt(n*p*(1-p)) # p=0.01 2=0 7=3.5sd 16=6.03sd
## ## ## ## # p=0.005 2=1 4=3 5=4 6=5 7=6sd
## ## ## # p=0.001 2=4sd 3=6sd 5=4 6=5 7=15sd
## ## ## # p=0.001 2=4sd 3=6sd 5=4 6=5 7=15sd
## ## #p=0.05 0=10 20=3.2 29=6.16
n<-max(as.integer(summary.geno.extra[,"TOTAL.Alleles.Control"]))
p<-0.01
#p<-0.01 ########### set MAF threshols HEREX1
sd.thresh<-4
n
p
alt.counts.thresh<-1
while( (alt.counts.thresh- n*p) / sqrt(n*p*(1-p)) <= sd.thresh){alt.counts.thresh<-alt.counts.thresh+1}
alt.counts.thresh
summary.geno.extra[1:5,]
rare.in.controls<-as.numeric(summary.geno.extra[,"ALT.Alleles.Control"])< alt.counts.thresh
rare.in.controls.filt<-as.numeric(summary.geno.extra[,"ALT.Alleles.Control.filt"])< alt.counts.thresh
sum(rare.in.controls)
sum(rare.in.controls.filt)
names(rare.in.controls)<-key
names(rare.in.controls.filt)<-key
length(maf.filter)
length(rare.in.controls)
maf.lt.all[1:5,]
#maf.filter<-as.logical(maf.lt.all[,"MAF.lt:0.001"])
#maf.filter<-as.logical(maf.lt.all[,"MAF.lt:0.01"])
maf.col<-paste("MAF.lt",p,sep=":")
maf.col
maf.filter<-as.logical(maf.lt.all[,maf.col])
## maf.filter<-as.logical(maf.lt.all[,"MAF.lt:0.5"])
sum(maf.filter)
names(maf.filter)<-key
#pass<- rare.in.group & !no.genotypes & !high.missing & common.loci
####################################
################Poly morphic SITE TESTS
####################################
################Poly morphic SITE TESTS
REF.length<-nchar(as.character(a.indel[,"REF"]))
ALT.length<-nchar(as.character(a.indel[,"ALT"]))
large.indel<-REF.length>1 | ALT.length>1
are.repeats<-identify.repeats(a.indel,di.run.max=3,homo.run.max=5)
length(large.indel)
length(are.repeats)
sum(are.repeats)
rownames(a.indel)[are.repeats][1:20]
#################### in repeats looking forward
#chk.in.repeat<-large.indel & !are.repeats
chk.in.repeat<- !are.repeats
are.sub.repeat<-indentify.IN.repeat(a.indel[chk.in.repeat,],looking="forward",bases.about=6,di.run.max=3,homo.run.max=5,genome="BSgenome.Hsapiens.UCSC.hg19")
remove.repeats<-key[chk.in.repeat][are.sub.repeat]
are.in.repeats.forward<- key %in% remove.repeats
remove.repeats[1:20]
sum(are.in.repeats.forward)
## [1] 6988
###################### in repeats looking back are.repeats[789]
sum(chk.in.repeat)
#chk.in.repeat<-large.indel & !are.repeats & !are.in.repeats.forward
chk.in.repeat<- !are.repeats & !are.in.repeats.forward
are.sub.repeat<-indentify.IN.repeat(a.indel[chk.in.repeat,],looking="back",bases.about=6,di.run.max=3,homo.run.max=5,genome="BSgenome.Hsapiens.UCSC.hg19")
remove.repeats<-key[chk.in.repeat][are.sub.repeat]
are.in.repeats.back<- key %in% remove.repeats
remove.repeats[1:20]
sum(are.in.repeats.back)
## [1] 3224
are.in.repeats<- are.in.repeats.back | are.in.repeats.forward
length(are.in.repeats)
sum(are.in.repeats)
##########################################################################
##########################################################################
##########################################################################
not.flat.genotype<-!qual[,"flat"]
sum(not.flat.genotype)
colnames(qual)
is.unwound.geno<-grepl("snp:\\d+$",a.indel[,"TYPE"]) | grepl("indel:\\d+$",a.indel[,"TYPE"])
#a.indel[!not.flat.genotype,"TYPE"]
#grepl("indel:\\d+$",a.indel[places,"TYPE"])
#is.unwound.geno[places]
hwe.control.threshold
hw.p.control.filt[1:50]
hw.p.control[1:50]
hw.controls.ok<-hw.p.control > hwe.control.threshold
hw.controls.ok.filt<-hw.p.control.filt > hwe.control.threshold
sum(hw.controls.ok)
sum(hw.controls.ok.filt)
sum(!hw.controls.ok)
length(hw.controls.ok.filt)
#hw.controls.ok[loci]
in.common.hit.gene <- a.indel[,"Gene.Names"] %in% common.hit.genes
sum(in.common.hit.gene)
the.chr
on.x.y<-a.indel[,"chr"] %in% c("X","Y","23","24","chrX","chrY")
sum(on.x.y)
#table(a.indel[,"TYPE"])
snp.only<-grepl("^snp",a.indel[,"TYPE"])
##########################################################################
##########################################################################
##########################################################################
##########################################################################
##########################################################################
##########################################################################
##########################################################################
##########################################################################
##########################################################################
icc<-2
if(target.pheno %in% case.control){
for(icc in 1:length(case.control.classes)){
recode<- pheno[,target.pheno] %in% names(case.control.classes)[icc]
pheno[recode,target.pheno]<-as.numeric(case.control.classes[icc])
}}
pheno[,target.pheno]<-as.numeric(pheno[,target.pheno])
formula
pheno[1:5,]
pheno[,target.pheno]
#no.pheno.samples<-is.na(pheno[,target.pheno]) # these already removed
## pass<-full.qual & functional & maf.filter & rare.in.group & !no.genotypes & not.flat.genotype & !(high.total.missing | nimblegen.total.missing | illumina.total.missing)
## sum(pass)
## 16 3639869 3639869 A C snp nonsynonymous SNV SLX4:NM_032444:exon12:c.T3770G:p.V1257G, (FANCP)
## 17 41197708 41197708 T G snp nonsynonymous SNV BRCA1:NM_007300:exon24:c.A5642C:p.H1881P,BRCA1
## 3 10088285 10088285 T G snp nonsynonymous SNV FANCD2:NM_033084:exon15:c.T1156G:p.F386V
## 3 10088343 10088343 A G snp nonsynonymous SNV FANCD2:NM_033084:exon15:c.A1214G:p.N405S,
## 3 10088412 10088412 G T snp splicing FANCD2(NM_033084:exon15:c.1278+5G>T,NM_001018115:exon15:c.1278+5G>T)
## 3 10089644 10089644 G A snp nonsynonymous SNV FANCD2:NM_033084:exon16:c.G1322A:p.S441N,
## 3 10089738 10089738 A G snp splicing FANCD2(NM_033084:exon16:c.1413+3A>G,NM_001018115:exon16:c.1413+3A>G)
## 3 10105570 10105570 A G snp nonsynonymous SNV FANCD2:NM_033084:exon21:c.A1922G:p.H641R,FANCD2:
## 3 10106408 10106408 C T snp splicing FANCD2(NM_033084:exon23:c.2022-5C>T,NM_001018115:exon23:c.2022-5C>T)
## 3 10106532 10106532 C T snp nonsynonymous SNV FANCD2:NM_033084:exon23:c.C2141T:p.P714L,
## 3 10114944 10114944 A C snp nonsynonymous SNV FANCD2:NM_033084:exon28:c.A2613C:p.K871N,
## 3 10128939 10128939 G A snp nonsynonymous SNV FANCD2:NM_033084:exon34:c.G3457A:p.E1153K,
## 3 10088407 10088410 AGTA - indel frameshift deletion FANCD2:NM_033084:exon15:c.1278_1278del:p.426_426del,
#ENSG00000269323 # junked
ignore.FLT3<-TRUE
if( ("13" %in% the.chr) & !("chr13:28626716:28626716:C:T:CREST" %in% key) & !ignore.FLT3 ){ # add flt3-ITD in chr213 and not already there
# /media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013-10-27_AML_with_AOGCControl_NoFailedLane/Analysis/extra_FANC.csv
## extra<-read.delim("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013-10-27_AML_with_AOGCControl_NoFailedLane/Analysis/extra_FANC.csv",header=T,sep="\t",fill=TRUE,stringsAsFactors=FALSE,na.strings="",quote="\"")
## if(sum(!grepl("^chr",extra[1,"chr"]))>0){
## extra[,"chr"]<-paste("chr",extra[,"chr"],sep="")
## }
## key.extra<-build.key(extra,core.ann)
## figure<- match(key.extra,key)
## pass[figure]
## help[figure,]
## colnames(a.indel)[1:50]
## key[grep("chr17",key)[1:100]]
## grep("chr17:41197708",key)
## key[grep("10088407",key)]
## out<-cbind(a.indel[figure,c(1:6,16,30,34,37:42)],summary.geno.extra[figure,],help[figure,])
## colnames(out)
## out[1:5,]
## write.table(out,file="extra.summary.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
extra<-a.indel[1,]
extra[the.samples]<-"0/0"
itd.pos<-paste(c(6,7,12,17,18,22,23,25,26,30,33,42,46:50,54,64,66,75,76,84,91,93,99),"GT",sep=".")
itd.pos
#### Make up some dummy genotypes
extra[itd.pos]<-"0/1"
extra.geno<-extra[the.samples]
the.samples.AD<-gsub(".GT$",".AD",the.samples)
extra[the.samples.AD]<-"100,0"
the.samples.DP<-gsub(".GT$",".DP",the.samples)
extra[the.samples.DP]<-"100"
itd.pos<-paste(c(6,7,12,17,18,22,23,25,26,30,33,42,46:50,54,64,66,75,76,84,91,93,99),"AD",sep=".")
itd.pos
#### Make up some dummy genotypes
extra[itd.pos]<-"50,50"
extra[core.ann]<-c("chr13","28626716","28626716","C","T","CREST")
extra["Gene.Names"]<-"FLT3"
target<-"AML"
use.samples<-the.samples[pheno[,"SampleProject"]=="1"]
length(use.samples)
genotypes.extra<-t(as.matrix(extra[use.samples]))
dim(genotypes.extra)
aml.extra<-genotype.summary(as.matrix(genotypes.extra))
colnames(aml.extra)<-paste(c("MAF","ALT.Alleles","REF.Alleles","TOTAL.Alleles","MISSING.Alleles","ALT_HOMO","ALT_HETRO","GENO"),target,sep=".")
aml.extra
target<-"Control"
use.samples<-the.samples[pheno[,"SampleProject"]=="0"]
length(use.samples)
genotypes.extra<-t(as.matrix(extra[use.samples]))
dim(genotypes.extra)
control.extra<-genotype.summary(as.matrix(genotypes.extra))
colnames(control.extra)<-paste(c("MAF","ALT.Alleles","REF.Alleles","TOTAL.Alleles","MISSING.Alleles","ALT_HOMO","ALT_HETRO","GENO"),target,sep=".")
control.extra
summary.geno.extra.bit<-cbind(aml.extra,control.extra)
target<-"AML.filt"
use.samples<-the.samples[pheno[,"SampleProject"]=="1"]
length(use.samples)
genotypes.extra<-t(as.matrix(extra[use.samples]))
dim(genotypes.extra)
control.extra<-genotype.summary(as.matrix(genotypes.extra))
colnames(control.extra)<-paste(c("MAF","ALT.Alleles","REF.Alleles","TOTAL.Alleles","MISSING.Alleles","ALT_HOMO","ALT_HETRO","GENO"),target,sep=".")
control.extra
summary.geno.extra.bit<-cbind(summary.geno.extra.bit,control.extra)
target<-"Control.filt"
use.samples<-the.samples[pheno[,"SampleProject"]=="0"]
length(use.samples)
genotypes.extra<-t(as.matrix(extra[use.samples]))
dim(genotypes.extra)
control.extra<-genotype.summary(as.matrix(genotypes.extra))
colnames(control.extra)<-paste(c("MAF","ALT.Alleles","REF.Alleles","TOTAL.Alleles","MISSING.Alleles","ALT_HOMO","ALT_HETRO","GENO"),target,sep=".")
control.extra
summary.geno.extra.bit<-cbind(summary.geno.extra.bit,control.extra)
summary.geno.extra<-rbind( summary.geno.extra,summary.geno.extra.bit)
a.indel<-rbind(a.indel,extra)
fil.genotypes<-rbind(fil.genotypes,extra.geno)
key<-build.key(a.indel,core.ann)
rownames(a.indel)<-key
pass<-c(pass,TRUE)
full.qual<-c(full.qual,TRUE)
bad.effect<-c(bad.effect,TRUE)
maf.filter<-c(maf.filter,TRUE)
rare.in.group<-c(rare.in.group,TRUE)
no.genotypes<-c(no.genotypes,FALSE)
in.common.hit.gene<-c(in.common.hit.gene,FALSE)
not.flat.genotype<-c(not.flat.genotype,TRUE)
hw.controls.ok<-c(hw.controls.ok,TRUE)
hw.controls.ok.filt<-c(hw.controls.ok,TRUE)
on.x.y<-c(on.x.y,FALSE)
unannotated.hits<-c(unannotated.hits,FALSE)
are.repeats<-c(are.repeats,FALSE)
bad.coding<-c( bad.coding,TRUE)
bad.non.coding<-c(bad.non.coding,FALSE)
functional.coding<-c(functional.coding,TRUE)
ok.missing<-c(ok.missing,TRUE)
are.in.repeats<-c(are.in.repeats,FALSE)
ok.missing.filt<-c(ok.missing.filt,TRUE)
hw.p.control.filt<-c(hw.p.control.filt,TRUE)
rare.in.group.filt<-c(rare.in.group.filt,TRUE)
no.genotypes.filt<-c(no.genotypes.filt,FALSE)
rare.in.controls.filt<-c(rare.in.controls.filt,TRUE)
is.unwound.geno<-c(is.unwound.geno,FALSE)
## tail(unannotated.hits)
## tail(are.repeats)
## unannotated.hits<-unannotated.hits[1:342792]
## are.repeats<-are.repeats[1:342792]
names(hw.controls.ok.filt)<-key
names(ok.missing.filt.ok)<-key
names(ok.missing.filt)<-key
names(hw.p.control.filt)<-key
names(rare.in.group.filt)<-key
names(no.genotypes.filt)<-key
names(rare.in.controls.filt)<-key
names(pass)<-key
names(full.qual)<-key
names(bad.effect)<-key
names(maf.filter)<-key
names(rare.in.group)<-key
names(no.genotypes)<-key
names(in.common.hit.gene)<-key
names(not.flat.genotype)<-key
names(hw.controls.ok)<-key
names(on.x.y)<-key
names(unannotated.hits)<-key
names(are.repeats)<-key
names(bad.coding)<-key
names(bad.non.coding)<-key
names(summary.geno.extra)<-key
names(ok.missing)<-key
names(are.in.repeats)<-key
names(functional.coding)<-key
names(is.unwound.geno)<-key
maf.lt.all<-rbind(maf.lt.all,maf.lt.all[2,]) # dim(maf.lt.all)
help<-cbind(full.qual,bad.coding,maf.filter,rare.in.group,no.genotypes,in.common.hit.gene ,hw.controls.ok,on.x.y,unannotated.hits,not.flat.genotype,are.repeats,are.in.repeats,ok.missing,ok.missing.filt,is.unwound.geno,(ok.missing.filt | is.unwound.geno) ,hw.p.control.filt,rare.in.group.filt,no.genotypes.filt,rare.in.controls.filt
)
} # ftl3 additions
length(in.common.hit.gene)
length(bad.coding)
length(key)
length(functional.coding)
length(pass)
length(full.qual)
length(bad.effect)
length(maf.filter)
length(rare.in.group)
length(no.genotypes)
length(in.common.hit.gene)
length(not.flat.genotype)
length(hw.controls.ok)
length(on.x.y)
length(unannotated.hits)
length(are.repeats)
length(bad.coding)
length(bad.non.coding)
dim(summary.geno.extra)
length(ok.missing)
length(are.in.repeats)
length(ok.missing.filt)
length(hw.p.control.filt)
length(rare.in.group.filt)
length(no.genotypes.filt)
length(rare.in.controls.filt)
length(is.unwound.geno)
length(hw.controls.ok.filt)
length(hw.controls.ok)
dim( maf.lt.all)
tail(hw.controls.ok.filt)
#hw.controls.ok.filt<-hw.controls.ok.filt[1:342792]
################
#CG indels test output
## no.genotypes.table.AML<-(summary.geno.extra[,c("MAF.AML","MAF.AML.filt")]== 0) | summary.geno.extra[,c("MAF.AML","MAF.AML.filt")]=="NaN" | (is.na( summary.geno.extra[,c("MAF.AML","MAF.AML.filt")])) # no genotypes in test classes for a mutataion after individaul quality filtering
## no.genotypes.table.AML[1:5,]
## no.genotypes.AML<-no.genotypes.table.AML[,"MAF.AML"] # was AND
## no.genotypes.AML.filt<-no.genotypes.table.AML[,"MAF.AML.filt"] # combine.boolean(no.genotypes.table,c("MAF.AML.filt"),"AND") # was AND
## sum(no.genotypes.AML)
## sum(no.genotypes.filt.AML)
## pass<- full.qual & !no.genotypes.AML & !in.common.hit.gene & !are.repeats & !are.in.repeats & !is.unwound.geno & ok.missing & ok.missing.filt & hw.controls.ok.filt & !no.genotypes.filt
## sum(pass)
## sum(full.qual)
## sum(full.qual & !in.common.hit.gene)
## sum(full.qual & !in.common.hit.gene & !are.repeats)
## sum(full.qual & !in.common.hit.gene & !are.repeats & !are.in.repeats)
## sum(full.qual & !in.common.hit.gene & !are.repeats & !are.in.repeats & !is.unwound.geno & hw.controls.ok.filt & hw.controls.ok & !no.genotypes.filt )
## sum(full.qual & !in.common.hit.gene & !are.repeats & !are.in.repeats & !is.unwound.geno & hw.controls.ok.filt & hw.controls.ok & !no.genotypes.filt & ok.missing & ok.missing.filt )
## core<-full.qual & !in.common.hit.gene & !are.repeats & !are.in.repeats & !is.unwound.geno & hw.controls.ok.filt & hw.controls.ok & !no.genotypes.filt
## summary.geno.extra[core & !ok.missing,][1:5,]
## summary.geno.extra[no.genotypes.AML,][1:5,]
## summary.geno.extra[no.genotypes.AML,][1:5,]
#save.image("aml_HC_indel_working.RData")
length(in.common.hit.gene)
length(bad.coding)
length(key)
length(functional.coding)
length(pass)
length(full.qual)
length(bad.coding)
length(maf.filter)
length(rare.in.group)
length(no.genotypes)
length(in.common.hit.gene)
length(not.flat.genotype)
length(hw.controls.ok)
length(on.x.y)
length(unannotated.hits)
length(are.repeats)
length(bad.coding)
length(bad.non.coding)
dim(summary.geno.extra)
length(ok.missing)
length(are.in.repeats)
length(ok.missing.filt)
length(hw.p.control.filt)
length(rare.in.group.filt)
length(rare.in.group)
length(no.genotypes.filt)
length(rare.in.controls.filt)
length(is.unwound.geno)
length(hw.controls.ok.filt)
length(hw.controls.ok)
dim( maf.lt.all)
tail(hw.controls.ok.filt)
## pass<- full.qual & bad.coding & maf.filter & !in.common.hit.gene & !unannotated.hits & not.flat.genotype & !are.repeats & !are.in.repeats &
## ( ok.missing.filt ) & hw.controls.ok.filt & hw.AOGC.ok & hw.AOGC.ok.filt & !no.genotypes.filt & rare.in.controls.filt & rare.in.group & rare.in.AOGC & rare.in.AOGC.filt &
## rare.in.ex.controls.filt & hw.ex.controls.ok.filt & ( ok.missing.filt.ex ) # NMD
######################################################################################################
n<-max(as.integer(summary.geno.extra[,"TOTAL.Alleles.Control"]))
#n<-max(as.integer(summary.geno.extra[,"TOTAL.Alleles.AOGC"]))
p<-0.001
# p<-0.01 ########### set MAF threshols HEREX1
# p<-0.005 ########### set MAF threshols HEREX1
# p<-0.05 ########### set MAF threshols HEREX1
sd.thresh<-4
n
p
alt.counts.thresh<-1
while( (alt.counts.thresh- n*p) / sqrt(n*p*(1-p)) <= sd.thresh){alt.counts.thresh<-alt.counts.thresh+1}
alt.counts.thresh
## alt.counts.thresh<-1
## while( (alt.counts.thresh- n*p) / sqrt(n*p*(1-p)) <= sd.thresh){alt.counts.thresh<-alt.counts.thresh+1}
## alt.counts.thresh
#colna
n<-as.numeric(summary.geno.extra[,"TOTAL.Alleles.Control"])
#n<-as.numeric(summary.geno.extra[,"TOTAL.Alleles.AOGC"])
# n
alt.counts.thresh <- round((sd.thresh*sqrt(n*p*(1-p)) + n*p))
alt.counts.thresh[1:50]
summary.geno.extra[1:5,]
rare.in.Control<-as.numeric(summary.geno.extra[,"ALT.Alleles.Control"])<= alt.counts.thresh
rare.in.Control.filt <-as.numeric(summary.geno.extra[,"ALT.Alleles.Control.filt"])<= alt.counts.thresh
## rare.in.Control<-as.numeric(summary.geno.extra[,"ALT.Alleles.AOGC"])<= alt.counts.thresh
## rare.in.Control.filt <-as.numeric(summary.geno.extra[,"ALT.Alleles.AOGC.filt"])<= alt.counts.thresh
sum(rare.in.Control)
sum(rare.in.Control.filt )
names(rare.in.Control)<-key
names(rare.in.Control.filt )<-key
length(maf.filter)
length(rare.in.Control)
maf.lt.all[1:5,]
maf.lt.all[1:5,]
#maf.filter<-as.logical(maf.lt.all[,"MAF.lt:0.001"])
#maf.filter<-as.logical(maf.lt.all[,"MAF.lt:0.01"])
maf.col<-paste("MAF.lt",p,sep=":")
maf.col
maf.filter<-as.logical(maf.lt.all[,maf.col])
## maf.filter<-as.logical(maf.lt.all[,"MAF.lt:0.5"])
sum(maf.filter)
names(maf.filter)<-key
#pass<- rare.in.group & !no.genotypes & !high.missing & common.loci
## test<-"chr5:68667282:68667282:T:G:snp:68667282"
## summary.geno.extra[test,"ALT.Alleles.Control"]
## rare.in.Control[test]
######################################################################################################
maf.lt.all[1:5,]
#maf.filter<-as.logical(maf.lt.all[,"MAF.lt:0.001"])
#maf.filter<-as.logical(maf.lt.all[,"MAF.lt:0.01"])
#maf.aogc.total<-as.logical(a.indel[,"MAF.lt:0.01"])
maf.aogc.total<-as.logical(a.indel[,maf.col])
## maf.aogc.total<-as.logical(a.indel[,"MAF.lt:0.005"])
maf.aogc.total[1:5]
is.missense<-missense.coding.vep | missense.coding
qual[1:50,"PolyPhen.low"] # true is above 0.1
a.indel[1:50,"PolyPhen.scores"]
a.indel[1:50,c("PolyPhen.desc","SIFT.desc")]
qual[1:5,"SIFT.high"]
a.indel[1:5,"SIFT.scores"]
#predict.benign<-a.indel[,"SIFT.desc"]=="tolerated" & a.indel[,"PolyPhen.desc"]=="benign"
predict.benign<-qual[,"PolyPhen.low"]
sum((predict.benign))
is.benign.missense<-is.missense & !predict.benign
sum(is.benign.missense)
dim(a.indel)
table(a.indel[,"PolyPhen.desc"])
table(a.indel[,"SIFT.desc"])
## pass<- full.qual & bad.coding & maf.filter & !in.common.hit.gene & !on.x.y & !unannotated.hits & not.flat.genotype & ok.missing.filt & hw.controls.ok.filt & !no.genotypes.filt & rare.in.controls.filt & rare.in.controls & !are.repeats & !are.in.repeats
# & maf.aogc.total # & !is.benign.missense # # & !is.benign.missense # & maf.aogc.total & rare.in.group
pass<- full.qual & bad.coding & maf.filter & !in.common.hit.gene & !unannotated.hits & not.flat.genotype & ok.missing.filt & hw.controls.ok.filt & !no.genotypes.filt & rare.in.Control.filt & rare.in.Control & !are.repeats & !are.in.repeats
#& !are.repeats & !are.in.repeats
## pass.0.001.use<-pass
## pass.0.01.use<-pass
## sum(pass.0.001.use & !pass.0.01.use)
bad.genotypes<-c("chr6:35425714:35425714:-:C:indel","chr11:108126934:108126934:A:T:snp")
bad.genotypes %in% names(pass)
pass[ names(pass) %in% bad.genotypes]<-FALSE
pass.ori<-pass
sum(pass)
sum(pass.ori)
## summary.geno.extra[pass & !pass.3 ,c("GENO.AML","GENO.Control","GENO.AML.filt","GENO.Control.filt")][1:5,]
## maf.aogc.total[pass & !pass.3][1:5]
#pass<- full.qual & maf.filter & !in.common.hit.gene & !on.x.y & !unannotated.hits & not.flat.genotype & !are.repeats & !are.in.repeats & ok.missing.filt & hw.controls.ok.filt & !no.genotypes.filt & rare.in.controls.filt & rare.in.group
help<-cbind(full.qual,bad.coding,maf.filter,rare.in.group,no.genotypes,in.common.hit.gene ,hw.controls.ok,on.x.y,unannotated.hits,not.flat.genotype,are.repeats,are.in.repeats,ok.missing,ok.missing.filt,is.unwound.geno,(ok.missing.filt | is.unwound.geno) ,hw.p.control.filt,rare.in.group.filt,no.genotypes.filt,rare.in.controls.filt )
dim(fil.genotypes)
sum(pass)
length(pass)
dim(snpinfo.ori)
snpinfo[1:5,]
a.indel[1:5,1:50]
pass[1:5]
dim(a.indel)
dim(pheno)
length(the.samples)
################################# GEFOS FILTERING cause sending all
## snpinfo[grep("chr13:28626716:28626716:C:T:CREST",snpinfo[,"Name"]),]
## snpinfo.ori[grep("chr13:28626716:28626716:C:T:CREST",snpinfo.ori[,"Name"]),]
## table(a.indel[pass,"refGene::gene"])
## table(a.indel[pass,"Gene.name"])
#pass<-pass[the.snps] ### GEOFS
genotypes<-a.indel[pass,the.samples] ## ordered correctly for phenotypes and have phenotypes
#genotypes<-fil.genotypes[pass,the.samples]
snp.names<-key[pass] ## GEFOS ony name with start
#### snpinfo now A different size than a.indel since added pathways!!!
snpinfo<-snpinfo.ori[snpinfo.ori[,"Name"] %in% snp.names,]
if( sum(!(snp.names %in% snpinfo.ori[,"Name"]))>0){print("WARINING snp.names not in snpinfo- unusual!")}
dim(snpinfo)
length(snp.names)
dim(genotypes)
# 414 639
print("start QC")
#RNPC3
genotypes[genotypes=="NA"]<-NA
genotypes[genotypes=="0/0"]<-0
genotypes[genotypes=="0/1"]<-1
genotypes[genotypes=="1/1"]<-2
########### prevent any averaging
dim(genotypes)
genotypes[is.na(genotypes)]<-0
dim(genotypes)
########### prevent any averaging
########################################## GEFOS MINOR ALLELE TRANSFORMATION
## flip.geno<-gsub("2","3",genotypes[to.flip,])
## #flip.geno[1:15,1:10]
## flip.geno<-gsub("0","2",flip.geno)
## flip.geno<-gsub("3","0",flip.geno)
## genotypes[to.flip,]<-flip.geno
##########################################################################
num.col<-dim(genotypes)[2]
num.row<-dim(genotypes)[1]
## genotypes[1:5,1:20]
genotypes<-as.numeric(as.matrix(genotypes))
dim(genotypes)<-c(num.row,num.col)
genotypes<-t(genotypes) # samples x SNPS
colnames(genotypes)<-snp.names
rownames(genotypes)<-gsub(".GT$","",the.samples)
#################################
dim(genotypes)
dim(pheno)
pheno[1:5,]
snpinfo[1:5,]
genotypes[1:5,1:5]
formula
# cohort.seq.gene<- skatCohort(Z=genotypes,formula, SNPInfo = snpinfo, data=pheno,family=binomial(),aggregateBy="gene",verbose=FALSE)
## meta.results.burden.gene<-burdenMeta(cohort.seq.gene,wts=1,mafRange = c(0,1),SNPInfo = snpinfo,aggregateBy = "gene")
## meta.results.skatO.gene<-skatOMeta(cohort.seq.gene,burden.wts =1,SNPInfo = snpinfo,aggregateBy="gene")
## the.order.gene<- order(meta.results.burden.gene[,"p"])
## meta.results.burden.gene<-meta.results.burden.gene[the.order.gene,]
## meta.results.burden.gene[1:50,]
## the.order.gene<- order(meta.results.skatO.gene[,"p"])
## meta.results.skatO.gene<-meta.results.skatO.gene[the.order.gene,]
## meta.results.skatO.gene[1:50,]
cohort.seq <- skatCohort(Z=genotypes,formula, SNPInfo = snpinfo, data=pheno,aggregateBy="cluster",family=binomial(),verbose=FALSE) ## genes and clusters
meta.results.burden<-burdenMeta(cohort.seq,wts=1,mafRange = c(0,1),SNPInfo = snpinfo,aggregateBy="cluster")
meta.results.skat<-skatMeta(cohort.seq,SNPInfo = snpinfo,aggregateBy="cluster")
meta.results.skatO<-skatOMeta(cohort.seq,burden.wts =1,SNPInfo = snpinfo,aggregateBy="cluster")
the.order<- order(meta.results.burden[,"p"])
sum(is.na(meta.results.burden[,"p"])) ## bad p-values shoudl not happen
meta.results.burden<- meta.results.burden[the.order,]
meta.results.burden[meta.results.burden[,"gene"] %in% clusters.wanted,]
meta.results.burden[1:50,]
the.order<- order(meta.results.skat[,"p"])
meta.results.skat<- meta.results.skat[the.order,]
meta.results.skat[1:50,]
the.order<- order(meta.results.skatO[,"p"])
sum(is.na(meta.results.skatO[,"p"])) ## bad p-values shoudl not happen
meta.results.skatO<- meta.results.skatO[the.order,]
meta.results.skatO[1:50,]
meta.results.skatO[meta.results.skatO[,"gene"] %in% clusters.wanted,]
## meta.results.burden.gene[meta.results.burden.gene[,"gene"] %in% fanc.genes,]
## snpinfo.ori<-snpinfo
## meta.results.burden.gene[meta.results.burden.gene[,"gene"] %in% other.clusters[,2],]
## meta.results.burden.gene[meta.results.burden.gene[,"gene"] %in% other.clusters[,3],]
## meta.results.skatO.gene[meta.results.skatO.gene[,"gene"] %in% clinical.genes,]
## meta.results.skatO.gene[meta.results.skatO.gene[,"gene"] %in% fanc.genes,]
## meta.results.skatO[meta.results.skatO[,"gene"] %in% clusters.wanted,]
setwd(analysis.dir)
getwd()
snap.file<-"coding.0.01.all"
snap.file<-"coding.0.001.all"
snap.file<-"coding.0.01.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN_wSTRAT"
snap.file<-"coding.0.001.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN_wSTRAT"
snap.file<-"coding.0.01.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN_wSTRAT_no_benign"
snap.file<-"coding.0.001.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN_wSTRAT_no_benign"
snap.file<-"coding.0.01.all.geno.all.filters_no.imput_paper_wSTRAT"
snap.file<-"coding.0.001.all.geno.all.filters_no.imput_paper_wSTRAT"
snap.file<-"coding.0.01.all.geno.all.filters_no.imput_paper_wSTRAT_no_benign"
snap.file<-"coding.0.001.all.geno.all.filters_no.imput_paper_wSTRAT_no_benign"
snap.file<-"coding.0.001.all.geno.all.filters_no.imput_paper_wSTRAT"
snap.file<-"coding.0.001.all.geno.all.filters_no.imput_paper_wSTRAT_rare"
snap.file<-"coding.0.01.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN"
snap.file<-"coding.0.001.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN"
snap.file<-"coding.0.01.all.geno.all.filters_no.imput_paper_no_benign"
snap.file<-"coding.0.001.all.geno.all.filters_no.imput_paper"
snap.file<-"coding.0.01.all.geno.all.filters"
snap.file<-"coding.0.001.all.geno.all.filters_no.imput"
snap.file<-"coding.0.01.all.geno.all.filters_no.imput_HC_indels"
snap.file<-"TCGA.coding.0.01.FINAL"
snap.file<-"TCGA.coding.0.001.FINAL"
p
write.table(meta.results.burden[1:50,],file=paste("Burden","Top50",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
write.table(meta.results.burden[meta.results.burden[,"gene"] %in% clusters.wanted,],file=paste("Burden","clusters",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
write.table(meta.results.skat[1:50,],file=paste("Skat",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
write.table(meta.results.skatO[1:50,],file=paste("SkatO",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
write.table(meta.results.skatO[1:50,],file=paste("SkatO",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
write.table(meta.results.skatO[meta.results.skat[,"gene"] %in% clusters.wanted,],file=paste("Skat","clusters",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
write.table(meta.results.skatO[meta.results.skatO[,"gene"] %in% clusters.wanted,],file=paste("SkatO","clusters",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
write.table(meta.results.burden[meta.results.burden[,"gene"] %in% clusters.wanted,],file=paste("Burden","clusters",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
colnames(a.indel)[c(1:8,13,16,28,7,30,34,37:42,43,14,32,33)] # 1276,1310
annotations<-a.indel[,c(1:8,13,16,28,7,30,34,37:42,43,14,32,33)]
save(list=c("case.control","snpinfo.ori","formula","clusters","pheno.types","ipheno","clusters.wanted","genotypes","p","meta.results.skat","meta.results.skatO","meta.results.burden","pheno","target.pheno.col","snpinfo","fil.genotypes","pass","high.missing.table","a.indel","help","key","summary.geno.extra","full.qual","bad.effect","maf.filter","in.common.hit.gene","on.x.y","unannotated.hits","not.flat.genotype","are.repeats","are.in.repeats","ok.missing","hw.controls.ok.filt","no.genotypes","rare.in.Control","rare.in.Control.filt","in.any.normal","in.any.normal.filt","are.in.repeats.back","are.in.repeats.forward","all.genes"),file=paste(paste(project.files[ichr],".",pheno.types[ipheno],".",snap.file,".small_final.RData",sep="")) )
getwd()
save.image(file=paste(snap.file,"RData",sep="."))
## save(list=c("clusters.wanted"),file="clusters.wanted.RData")
## getwd()
## load(paste(snap.file,"RData",sep="."))
## setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014-11-04_AML_TCGA_Replication/Analysis")
## load("AML_TCGA_image_paper.coding.0.01.all.geno.all.filters_no.imput_paper_TCGA_REP.RData")
## setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013/2013-10-27_AML_with_AOGCControl_NoFailedLane/Analysis")
load("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013/2013-10-27_AML_with_AOGCControl_NoFailedLane/Analysis/coding.0.001.all.geno.all.filters_no.imput_paper_wSTRAT_no_benign.RData")
load("AML_HC_image_paper.coding.0.01.all.geno.all.filters_no.imput_paper.RData")
load("AML_AOGC_image.coding.0.001.all.geno.all.filters.NEW.RData")
load("coding.0.001.all.geno.all.filters_no.imput_paper_wSTRAT_no_benign.RData")
###############$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
###############$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
###############$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
###############$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
##################### RELOAD########################
library(skatMeta) ## ridge regression
#library(SKAT) ## skat method
library(GenomicFeatures)
library(HardyWeinberg)
library(Biostrings)
## analysis.dir<-"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013-10-27_AML_with_AOGCControl_NoFailedLane/Analysis"
## setwd(analysis.dir)
## getwd()
## snap.file<-"coding.0.01.all.geno.all.filters_no.imput"
## snap.file<-"coding.0.01.all.geno.all.filters.NEW"
## snap.file<-"coding.0.001.all.geno.all.filters.NEW"
load(paste(snap.file,"RData",sep="."))
options(width=200)
meta.results.burden[1:50,]
meta.results.skat[1:50,]
meta.results.skatO[1:50,]
covars<-"1"
target.pheno.col<-"SampleProject"
formula<-paste(target.pheno.col,"~",paste(covars,collapse="+"),sep="")
print(formula)
formula<-formula(formula)
pheno[1:5,]
###############$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
###############$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
###############$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
###############$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
## test<-c("MYCBP2","SLC25A24","TMCO3","C13orf35")
## test<-c("MYCBP2","SLC25A24","TMCO3","C13orf35")
## test<-c("chr22:41252435-41252687:ST13")
## test<-c("SETD8")
test<-c("SEC61A1","ST14","GPANK1","EEF1A2")
test<-c("C19orf40")
test<-c("FANCP")
test<-c("IDH1")
test<-c("clinical") # not sig after coverage filtering at 0.01 0.1997688 from 0.000142
test<-clinical.genes
test<-fanc.genes
snpinfo[1:5,]
a.cluster<-"random.745"
test<-snpinfo[snpinfo[,"cluster"]==a.cluster,"gene"]
test
test<-c("LOC100268168") #,"NOTCH1")
snpinfo[1:5,]
## meta.results.skat[meta.results.skat[,"gene"] %in% test,]
meta.results.burden[1:20,]
meta.results.burden[meta.results.burden[,"gene"] %in% clusters.wanted,]
test<-snpinfo[snpinfo[,"cluster"]==a.cluster,"cluster"]
meta.results.burden[meta.results.burden[,"gene"] %in% test,]
to.unwind<-c(meta.results.burden[1:50,"gene"],meta.results.skatO[1:50,"gene"])
#to.unwind<-c("FANCD2_minimal_mono_ubi") #, "MCM7", "RNPC3")
to.unwind<-c("FANC_complex.all") # to.unwind<-meta.results.burden[8,"gene"]
#to.unwind<-c("BLM.Complex_AND_Checkpoint")
#to.unwind<-c("NPM1")
## to.unwind<-c("FLT3")
## to.unwind<-c("Clinical")
## to.unwind<-c("DDX41","TET2", "GATA2", "ASXL1", "NOTCH1", "IDH1", "JAK2","MET")
## to.unwind %in% meta.results.burden[,"gene"]
## to.unwind %in% meta.results.skatO[,"gene"]
## dim( meta.results.burden)
to.unwind<-c(clusters.wanted[!(clusters.wanted %in% c("Ubin.proteo","lipid_raft","caveolae","Checkpoint_extendedx1","Checkpoint_extendedx2"))])
clusters.wanted.subset<-c("FANC_complex.all", "Clinical" ,"BRCA.Genes","RAD51.Paralogues","BLM.Complex","Checkpoint.Proteins","citric","Citric_final","BLM.Complex_AND_Checkpoint","FANCD2_minimal_mono_ubi","MLH_cluster","Richard", "C1Alpha","C1Delta","C1Beta","C2","C3","C4","C5","MRC","MRC_IDH","C1","NDUFA","NDUFB","NDUFS","NDUFCV")
to.unwind<-c(clusters.wanted.subset)
to.unwind.name<-to.unwind[1]
to.unwind
#to.unwind.name<-to.unwind
# to.unwind.name<-"Collaboration_Genes"
# to.unwind.name<-"ALL_significant"
# to.unwind.name<-"ALL_significant"
snpinfo.ex<-snpinfo[snpinfo[,"cluster"] %in% to.unwind,]
loci<-snpinfo[snpinfo[,"cluster"] %in% to.unwind,"Name"] # this is IDH1 not IDH1 in cluster # are the snp.names
the.genes<-unique(snpinfo.ex[,"gene"])
the.genes<-the.genes[!(the.genes %in% clusters.wanted)]
the.genes #245 ### if used a cluster name need to do back up to (**)
############repest to clean out cluster names
to.unwind<-the.genes
snpinfo.ex<-snpinfo[snpinfo[,"cluster"] %in% to.unwind,]
loci<-snpinfo[snpinfo[,"cluster"] %in% to.unwind,"Name"] # this is IDH1 not IDH1 in cluster # are the snp.names
the.genes<-unique(snpinfo.ex[,"gene"])
the.genes<-the.genes[!(the.genes %in% clusters.wanted)]
the.genes
meta.results.skatO[1:50,]
meta.results.burden[1:50,]
the.genes.burden<-meta.results.burden[meta.results.burden[,"gene"] %in% the.genes,]
# the.genes.burden
write.table(the.genes.burden,file=paste(to.unwind.name,"conponents:","Burden","clusters",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
the.genes.burden<-meta.results.skatO[meta.results.skatO[,"gene"] %in% the.genes,]
#the.genes.burden
write.table(the.genes.burden,file=paste(paste(to.unwind.name,collapse="."),"conponents:","SkatO","clusters",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
#subset<-(rownames(genotypes) %in% loci) # indicated in genotypes
sum(subset)
length(subset)
snpinfo[1:5,]
dim(genotypes)
genotypes[1:5,1:5]
genotypes.ex<-genotypes[,loci]
dim(genotypes.ex)
genotypes.ex[is.na(genotypes.ex)]<-0
dim(genotypes.ex)
snpinfo.ex<-snpinfo[snpinfo[,"cluster"] %in% to.unwind,]
dim(snpinfo.ex)
dim(genotypes.ex)
dim(pheno)
snpinfo.ex[1:5,]
# summary.geno.extra[loci,]
#high.missing[loci,]
sum(are.in.repeats[loci])
# qual[loci,]
## snpinfo[1:5,]
## qual[1:5,c("FILTER_PASS", "FILTER_100" )]
cohort.seq.ex <- skatCohort(genotypes.ex, formula, SNPInfo = snpinfo.ex, data=pheno,aggregateBy = "Name",verbose=FALSE)
## meta.results.skat.ex<-skatMeta(cohort.seq,SNPInfo = snpinfo)
meta.results.burden.ex<-burdenMeta(cohort.seq.ex,wts=1,mafRange = c(0,1),SNPInfo = snpinfo.ex,aggregateBy = "Name")
#meta.results.burden.ex
pheno[1:5,]
cohort.seq.test <- skatCohort(genotypes.ex, formula, SNPInfo = snpinfo.ex, data=pheno,aggregateBy = "cluster",verbose=FALSE)
meta.results.burden.test<-burdenMeta(cohort.seq.test,wts=1,mafRange = c(0,1),SNPInfo = snpinfo.ex,aggregateBy = "cluster")
#meta.results.burden.test
## meta.results.skat.ex<-skatMeta(cohort.seq,SNPInfo = snpinfo)
meta.results.skatO.test<-skatOMeta(cohort.seq.test,burden.wts =1,SNPInfo = snpinfo.ex,aggregateBy="cluster")
#meta.results.skatO.test
muts.in.cases<-apply(genotypes.ex[pheno[,"SampleProject"]==1,],2,function(x) { paste(names(x)[x!=0 & !is.na(x)],collapse=",")})
muts.in.controls<-apply(genotypes.ex[pheno[,"SampleProject"]==0,],2,function(x) { paste(names(x)[x!=0 & !is.na(x)],collapse=",")})
figure<- match(loci,key)
########################################################
check<-16
quality.cases<-rep("",times=length(loci))
quality.controls<-rep("",times=length(loci))
a.indel.sub<-a.indel[figure,]
for(check in 1:length(loci)){
print(check)
#check<-"chr11:130066457:130066457:-:A:indel"
# posn<-grep(loci[check],key)
posn<-check
if(muts.in.cases[check]!=""){
#the.gt<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"GT",sep=".")
#the.gq<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"GQ",sep=".")
the.gq<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"DP",sep=".")
quality.cases[check]<-paste(a.indel.sub[posn,the.gq],collapse=",")
## a.indel[posn,the.gq]
## a.indel[posn,the.gt]
## a.indel[posn,the.dp]
}
if(muts.in.controls[check]!=""){
#the.gt<-paste(unlist(strsplit(muts.in.controls[check],split=",")),"GT",sep=".")
#the.gq<-paste(unlist(strsplit(muts.in.controls[check],split=",")),"GQ",sep=".")
the.gq<-paste(unlist(strsplit(muts.in.controls[check],split=",")),"DP",sep=".")
quality.controls[check]<-paste(a.indel.sub[posn,the.gq],collapse=",")
## a.indel[posn,the.gq]
## a.indel[posn,the.gt]
## a.indel[posn,the.dp]
}
} # end check
##########################################################################
#figure
length(figure)
dim(meta.results.burden.ex)
length(muts.in.cases)
length(muts.in.controls)
#pass[figure]
#help[figure,]
dim(annotations)
dim(help)
dim(summary.geno.extra)
length(figure)
sum(meta.results.burden.ex[,"gene"]!=loci)
## colnames(a.indel)[1:50]
## key[grep("chr17",key)[1:100]]
## grep("chr17:41197708",key)
## key[grep("10088407",key)]
#out<-cbind(meta.results.burden.ex,a.indel[figure,c(1:6,16,28,7,30,34,37:42,43)],summary.geno.extra[figure,],high.missing[figure,],help[figure,])
## out<-cbind(meta.results.burden.ex,a.indel[figure,c(1:6,16,28,7,30,34,37:42,43,14,32,33)],summary.geno.extra[figure,c("GENO.AML","GENO.Control","GENO.AML.filt","GENO.Control.filt")],high.missing[figure,])
## summary.geno.extra[figure,]
## annotations[figure,]
## help[figure,]
dim(meta.results.burden.ex)
#out<-cbind(meta.results.burden.ex,a.indel[figure,c(1:6,16,43,28,7,30,34,37:42)],summary.geno.extra[figure,c("GENO.AML","GENO.Control","GENO.AML.filt","GENO.Control.filt")],help[figure,],muts.in.cases,muts.in.controls)
maf.old<-a.indel[,"MAF.lt:0.01" ]
maf.new<-maf.lt.all[,"MAF.lt:0.01"]
maf.aogc<-a.indel[,"AOGC-NGS_ALL::maf"]
a.functions<-a.indel[,c("PolyPhen.scores","SIFT.scores","PolyPhen.desc","SIFT.desc")]
out<-cbind(meta.results.burden.ex,maf.new[figure],maf.old[figure],maf.aogc[figure],a.functions[figure,],is.benign.missense[figure],annotations[figure,],summary.geno.extra[figure,c("GENO.AML","GENO.Control","GENO.AML.filt","GENO.Control.filt")],help[figure,],muts.in.cases,quality.cases,muts.in.controls,quality.controls)
#out<-cbind(meta.results.burden.ex,annotations[figure,],muts.in.cases,muts.in.controls)
## table(out[,"refGene::location"])
## table(out[,"Consequence.Embl"])
paste(paste(to.unwind,collapse="."))
paste(to.unwind.name,collapse=".")
paste(paste(to.unwind.name,collapse="."),"GENOTYPE.conponents:","SkatO","clusters",snap.file,"txt",sep=".")
write.table(out,file=paste(paste(to.unwind.name,collapse="."),"GENOTYPE.conponents:","SkatO","clusters",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
getwd()
#######################################################################
#######################################################################
#######################################################################
#######################################################################
#######################################################################
#######################################################################
meta.results.burden[1:50,]
meta.results.burden[meta.results.burden[,"gene"] %in% clusters.wanted,]
the.order<- order(meta.results.skat[,"p"])
meta.results.skat<- meta.results.skat[the.order,]
meta.results.skat[1:50,]
the.order<- order(meta.results.skatO[,"p"])
sum(is.na(meta.results.skatO[,"p"])) ## bad p-values shoudl not happen
meta.results.skatO<- meta.results.skatO[the.order,]
meta.results.skatO[1:50,]
dim(clusters)
snpinfo.sub<-snpinfo[snpinfo[,"cluster"] %in% clusters.wanted,]
genes.cl<-unique(snpinfo.sub[,"gene"])
genes.cl<-genes.cl[!(genes.cl %in% clusters.wanted)]
genes.cl
clusters.wanted
genes.and.clusters<-c(genes.cl,clusters.wanted)
meta.results.burden[1:5,]
#################################### just want to plot
subset<-meta.results.burden[ !(meta.results.burden[,"gene"] %in% genes.and.clusters) ,]
subset[1:5,]
dups<-duplicated(subset[,"gene"])
sum(dups)
z<-qchisq(subset[ ,"p"],df=1,ncp=0,lower.tail=FALSE,log.p=FALSE) ## if have no chisq valuse
#z0<-qchisq(meta.results.skatO[ !(meta.results.skatO[,"gene"] %in% genes.and.clusters ) ,"p"],df=1,ncp=0,lower.tail=FALSE,log.p=FALSE)
z[1:5]
subset[1:10,]
p.val<-as.numeric(subset[ ,"p"])
par(mfrow=c(1,1),font=2,font.lab=2,font.axis=2,mgp=c(3.5,1,0),mar=c(5,5,4,2)+0.1)
## z<-rchisq(length(p.val), df=1, ncp = 0) ## null test
median(z,na.rm=TRUE)/0.456 #1.071491
median(z0,na.rm=TRUE)/0.456 #1.071491
################## p-values
z0=qnorm(p.val/2)
lambda = round(median(z0^2)/0.454,3)
lambda
## source("http://bioconductor.org/biocLite.R")
## biocLite("GWASTools")
## setRep
## qq.Plot(pvals)
## Reads data
## S <- read.table(input,header=F)
## if (stat_type == "Z")
## z=S[,1]
## if (stat_type == "CHISQ")
## z=sqrt(S[,1])
## if (stat_type == "PVAL")
## z0=qnorm(meta.results.skatO[,"p"]/2)
## ## calculates lambda
lambda = round(median(z0^2)/.454,3)
## lambda
range(z)
the.plot<-my.qq.plot(z,dist="chisq",df=1,ncp=0,col="blue",ylab="Observed chi-squared value",xlab="Expected chi-squared value",main="",cex=1,xlim=c(0,22),ylim=c(0,80),cex.lab=2.0,cex.axis=2.0,font.lab=2,font.axis=2,lwd=2,line="robust",plot.it=TRUE) # function defined below
z.all<-qchisq(meta.results.burden[ !(meta.results.burden[,"gene"] %in% clusters.wanted) ,"p"],df=1,ncp=0,lower.tail=FALSE,log.p=FALSE)
range(z.all)
qq<- qq.data(z.all,plot.it=FALSE) ## qq plot used same method as in car library
points(qq$x,qq$y,col="magenta",pch=21)
symbols<-meta.results.burden[!(meta.results.burden[,"gene"] %in% clusters.wanted),"gene"]
#symbols<-meta.results.skatO[,"gene"]
#####annotate curve
selected.data<-identify(qq$x,qq$y,labels=symbols[qq$ord],col="red",cex=1,offset=1,atpen='TRUE') ##plate row col symbol
selected.data<-identify(qq$x,qq$y,labels=labels[qq$ord],col="red",cex=1,atpen='TRUE') ## sybmol
selected.data<-identify(qq$x,qq$y,labels=as.character(round(data.in[qq$ord],2)),col="forestgreen",cex=1.25,atpen='TRUE') # observed score
#####
leg.txt<-c("All Genes","Remove Clinical Genes")
legend(2,60,leg.txt,col=c("magenta","blue"),lty=c(-1,-1,2),pch=c(1,1,-1),cex=2.25)
label<-"AML_paper_0.01_all_filters_NO_STRAT"
savePlot(paste(label,"tiff",sep="."),type="tiff")
savePlot(paste(label,"png",sep="."),type="png")
save.image("qq.AS.paper.final.RData")
o <- -(log10(p.val))
e <- -log10( 1:length(o)/length(o) )
plot(e,o, pch=23, cex=.4, bg="black",main=hlabel, ylab="Observed -log10 P value",xlab="Expected -log10 P value")
abline(coef=c(0,1), col=1, lwd=2)
## qq <- parse.pvals.qq(gene.region.compare[,"p.skatO"],lim=lim)
## qq <- parse.pvals.qq(all.res.gene[,"p.skatO"],lim=lim)
qq <- parse.pvals.qq(gene.region.compare[,"p.skatO"],lim=lim)
qq <- parse.pvals.qq(all.res.gene[,"p.skatO"],lim=lim)
ylab = expression(Observed~~-log[10](italic(p)))
xlab = expression(Expected~~-log[10](italic(p)))
plot(qq$e,
qq$o,
xlim=c(0,lim),ylim=c(0,lim),
pch=20,col='deepskyblue',
xlab=xlab,ylab=ylab, main="Gene Based")
abline(coef=c(0,1), col=1, lwd=2)
savePlot("FilteredGeneBased.jpg",type="jpeg")
#write.table(geno.all,file=paste(project.name,fam[ifam],"geno.all.txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
#geno.all<-read.delim(paste(project.name,fam[ifam],"geno.all.txt",sep="."),header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
parse.pvals.qq <- function(pvector,lim=7) {
o = -log10(sort(pvector,decreasing=F))
e = -log10( 1:length(o)/length(o) )
o[o>lim] <- lim
out <- list(o=o,e=e)
return(out)
}
data <- data.van
Mobspval <- sort(data$P)
Mobspval <- Mobspval[!Mobspval==0]
o <- -(log10(Mobspval))
e <- -log10( 1:length(o)/length(o) )
#Mobsmax <- 3 #trunc(max(Mlogobspval))+1
#Mexpmax <- trunc(max(Mlogexppval))+1
#if (is.infinite(Mobsmax)) {Mobsmax <- 3} else {Mobsmax <- Mobsmax}
#plot(c(0,Mexpmax), c(0,Mexpmax), col="gray", lwd=1, type="l", xlab="Expected -log10 P value", ylab="Observed -log10 P value", xlim=c(0,Mexpmax), ylim=c(0,Mobsmax), las=1, xaxs="i", yaxs="i", bty="l",main=hlabel)
#plot(c(0,Mexpmax), c(0,Mexpmax), col="gray", lwd=1, type="l", xlab="Expected -log10 P value", ylab="Observed -log10 P value", las=1, xaxs="i", yaxs="i", bty="l",main=hlabel)
# plot(c(0,Mexpmax), c(0,Mexpmax), col="gray", lwd=1, type="l", xlab="Expected -log10 P value", ylab="Observed -log10 P value", las=1, xaxs="i", yaxs="i", bty="l",main=hlabel)
#points(Mlogexppval,Mlogobspval, pch=23, cex=.4, bg="black")
plot(e,o, pch=23, cex=.4, bg="black",main=hlabel, ylab="Observed -log10 P value",xlab="Expected -log10 P value")
#######################################################################
#######################################################################
#######################################################################
#######################################################################
#######################################################################
my.qq.plot<-function (x, distribution = "chisq",df=1,ncp=0, ylab = deparse(substitute(x)),
xlab = paste(distribution, "quantiles"), main = NULL, las = par("las"),
envelope = 0.95, labels = FALSE, col = palette()[2], lwd = 2,
pch = 1, cex = 1, line = c("quartiles", "robust", "none"),xlim=c(0,100),ylim=c(0,20),font.lab=2,font.axis=2,font.main=2,cex.lab=2.5,cex.axis=1.0,plot.it=TRUE, ...){
result <- NULL
line <- match.arg(line)
good <- !is.na(x)
ord <- order(x[good])
ord.x <- x[good][ord]
q.function <- eval(parse(text = paste("q", distribution,
sep = "")))
d.function <- eval(parse(text = paste("d", distribution,
sep = "")))
n <- length(ord.x)
P <- ppoints(n)
z <- q.function(P,df=df,ncp=ncp, ...)
if(plot.it){
plot(z, ord.x, xlab = xlab, ylab = ylab, main = main, las = las,
col = col, pch = pch,cex = cex,xlim=xlim,ylim=ylim,font.lab=font.lab,font.axis=font.axis,font.main=2,cex.lab=cex.lab,cex.axis=cex.axis)}
if (line == "quartiles") {
Q.x <- quantile(ord.x, c(0.25, 0.75))
Q.z <- q.function(c(0.25, 0.75),df=df,ncp=ncp, ...)
b <- (Q.x[2] - Q.x[1])/(Q.z[2] - Q.z[1])
a <- Q.x[1] - b * Q.z[1]
if(plot.it){
abline(a, b, col = "red", lwd = lwd)}
}
if (line == "robust") {
if (!require("MASS"))
stop("MASS package not available")
coef <- coefficients(rlm(ord.x ~ z))
a <- coef[1]
b <- coef[2]
if(plot.it){
abline(a, b,col="red")}
} ################### Envelope function
if (line != "none" & envelope != FALSE) {
zz <- qnorm(1 - (1 - envelope)/2)
SE <- (b/d.function(z,df=df,ncp=ncp, ...)) * sqrt(P * (1 - P)/n)
fit.value <- a + b * z
upper <- fit.value + zz * SE
lower <- fit.value - zz * SE
if(plot.it){
lines(z, upper, lty = 2, lwd = lwd, col = "red")
lines(z, lower, lty = 2, lwd = lwd, col = "red")}
} #####################
if (labels[1] == TRUE & length(labels) == 1)
labels <- seq(along = z)
if (labels[1] != FALSE) {
selected <- identify(z, ord.x, labels[good][ord])
result <- seq(along = x)[good][ord][selected]
}
if (is.null(result))
invisible(list(result=result,a=a,b=b,x=z,y = ord.x,ord=ord,upper=upper,lower=lower))
else {sort(result)
invisible(list(result=result,a=a,b=b,x=z,y = ord.x,ord=ord,upper=upper,lower=lower))}
}
qq.data<- function (x, plot.it = TRUE, distribution = "chisq", df=1,ncp=0, xlab = deparse(substitute(x)),
ylab = deparse(substitute(y)) , ...)
{
good <- !is.na(x)
ord <- order(x[good])
ord.x <- x[good][ord]
q.function <- eval(parse(text = paste("q", distribution,
sep = "")))
n <- length(ord.x)
P <- ppoints(n)
z <- q.function(P,df=df,ncp=ncp, ...)
if (plot.it)
plot(z, ord.x, xlab = xlab, ylab = ylab, ...)
invisible(list(x = z, y = ord.x, ord=ord))
} ##ord is the order if use identify
######################################### END SECTION
## qq<- qq.data(data.in,distribution="norm",the.mean=the.mean,the.sd=the.sd,plot.it=FALSE)
## my.qq.plot(data.in,distribution="norm",col="blue",xlab="Expected Score",ylab="Observed score",xlim=range(qq$x), ylim=range(data.in),main=paste("Screen:",the.screen,"with 95% confidence intervals for",":",the.score,sep=" "),the.mean=the.mean,the.sd=the.sd,cex.lab=1.5,cex.axis=1.5,cex.main=1.5,cex=1.5)
########################## USE FUNTIONS BELOW IF YOU REFERNCE FUNCYION IS A NORMAL DISTRIBUTION NOT A CHISQ
my.qq.plot.mean<-function (x, distribution = "norm", ylab = deparse(substitute(x)),
xlab = paste(distribution, "quantiles"), main = NULL, las = par("las"),
envelope = 0.95, labels = FALSE, col = palette()[2], lwd = 2, the.mean=0,the.sd=1,cex.lab=2,
pch = 1, cex = 1, line = c("quartiles", "robust", "none"),xlim=c(0,100),ylim=c(0,20),font.lab=2,font.axis=2,font.main=2,cex.axis=1,cex.main=1,
...)
{
result <- NULL
line <- match.arg(line)
good <- !is.na(x)
ord <- order(x[good])
ord.x <- x[good][ord]
q.function <- eval(parse(text = paste("q", distribution,
sep = "")))
d.function <- eval(parse(text = paste("d", distribution,
sep = "")))
n <- length(ord.x)
P <- ppoints(n)
z <- q.function(P, mean=the.mean, sd=the.sd, ...)
plot(z, ord.x, xlab = xlab, ylab = ylab, main = main, las = las,
col = col, pch = pch,cex = cex,xlim=xlim,ylim=ylim,cex.lab=cex.lab,font.lab=font.lab,font.axis=font.axis,font.main=font.main,cex.main=cex.main,cex.axis=cex.axis)
if (line == "quartiles") {
Q.x <- quantile(ord.x, c(0.25, 0.75))
Q.z <- q.function(c(0.25, 0.75), mean=the.mean, sd=the.sd, ...)
b <- (Q.x[2] - Q.x[1])/(Q.z[2] - Q.z[1])
a <- Q.x[1] - b * Q.z[1]
abline(a, b, col = "red", lwd = lwd)
}
if (line == "robust") {
if (!require("MASS"))
stop("MASS package not available")
coef <- coefficients(rlm(ord.x ~ z))
a <- coef[1]
b <- coef[2]
abline(a, b)
} ################### Envelope function
if (line != "none" & envelope != FALSE) {
zz <- qnorm(1 - (1 - envelope)/2)
SE <- (b/d.function(z, mean=the.mean, sd=the.sd, ...)) * sqrt(P * (1 - P)/n)
fit.value <- a + b * z
upper <- fit.value + zz * SE
lower <- fit.value - zz * SE
lines(z, upper, lty = 2, lwd = lwd/2, col = "red")
lines(z, lower, lty = 2, lwd = lwd/2, col = "red")
} #####################
if (labels[1] == TRUE & length(labels) == 1)
labels <- seq(along = z)
if (labels[1] != FALSE) {
selected <- identify(z, ord.x, labels[good][ord])
result <- seq(along = x)[good][ord][selected]
}
if (is.null(result))
invisible(result)
else sort(result)
}
############################################### META ANALYSIS
############################################### META ANALYSIS
############################################### META ANALYSIS
############################################### META ANALYSIS
############################################### META ANALYSIS
############################################### META ANALYSIS
1 2 TCGA
476 102
1 2 DISCOVERY
197 89
## setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014-11-04_AML_TCGA_Replication/Analysis/meta_analyis/")
## files<-c("Burden.clusters.coding.0.01.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN.txt","Burden.clusters.coding.0.01.all.geno.all.filters_no.imput_paper.txt")
## sizes<-c(150,96)
setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014/2014-11-04_AML_TCGA_Replication/Analysis/meta_analysis/")
files<-c("Burden.clusters.coding.0.01.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN_wSTRAT_no_benign.txt","Burden.clusters.coding.0.01.all.geno.all.filters_no.imput_paper_wSTRAT_no_benign.txt")
sizes<-c(102,89)
traits<-c("AML")
i<-1
#### NEED TP HAND MODIFY FILES TO CHNAGES NMISS TO NUMBERS ABOVE
#/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014-11-04_AML_TCGA_Replication/Analysis/meta_analyis/ALL_clusters.conponents:.Burden.clusters.coding.0.001.all.geno.all.filters_no.imput_paper.txt
#setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014/2014-11-04_AML_TCGA_Replication/Analysis/meta_analyis")
system(paste("sed s/chip.TRAIT/",files[1],"/ run_config_for_P_and_BETA_TEMPLATE.txt > ",paste("sample.size.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.TRAIT/",files[2],"/ ",paste("sample.size.CONFIG",traits[i],"txt",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("sed s/chip.NMISS/","nmiss","/ ",paste("sample.size.CONFIG",traits[i],"txt1",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.NMISS/","nmiss","/ ",paste("sample.size.CONFIG",traits[i],"txt",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
paste("sample.size.CONFIG",traits[i],"txt1",sep=".")
system(paste("./metal ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("cp ","META.SAMPLE.SIZE.TRAIT1.tbl","META.SAMPLE.SIZE.TRAIT.p_0.01.txt", sep=" "))
system(paste("sed s/chip.TRAIT/",files[1],"/ run_config_for_inverse_varience_TEMPLATE.txt > ",paste("STDERR.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.TRAIT/",files[2],"/ ",paste("STDERR.CONFIG",traits[i],"txt",sep=".")," > ",paste("STDERR.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("sed s/chip.NMISS/","nmiss","/ ",paste("STDERR.CONFIG",traits[i],"txt1",sep=".")," > ",paste("STDERR.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.NMISS/","nmiss","/ ",paste("STDERR.CONFIG",traits[i],"txt",sep=".")," > ",paste("STDERR.CONFIG",traits[i],"txt1",sep="."),sep=""))
paste("STDERR.CONFIG",traits[i],"txt1",sep=".")
system(paste("./metal ",paste("STDERR.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("cp ","META.STDERR.TRAIT1.tbl","META.STDERR.TRAIT.p_0.01.txt", sep=" "))
tcga<-read.delim(files[1],header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
dis<-read.delim(files[2],header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
meta<-read.delim("META.STDERR.TRAIT.p_0.01.txt",header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
tcga[1:5,]
dis[1:5,]
meta[1:5,]
colnames(tcga)<-paste(colnames(tcga),"TCGA",sep=".")
colnames(dis)<-paste(colnames(dis),"DIS",sep=".")
posns<-match(meta[,"MarkerName"], tcga[,1])
missing<-is.na(posns)
sum(missing)
meta[missing,"MarkerName"]
tcga<-tcga[posns,]
posns<-match(meta[,"MarkerName"], dis[,1])
missing<-is.na(posns)
sum(missing)
meta[missing,"MarkerName"]
dis<-dis[posns,]
dim(dis)
dim(tcga)
meta<-cbind(meta,tcga,dis)
meta[1:5,]
getwd()
write.table(meta,file="META.STDERR.TRAIT.p_0.01.SUMMARY.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
)
bad.genotypes %in% names(pass)
pass[ names(pass) %in% bad.genotypes]<-FALSE
pass.ori<-pass
sum(pass)
sum(pass.ori)
## summary.geno.extra[pass & !pass.3 ,c("GENO.AML","GENO.Control","GENO.AML.filt","GENO.Control.filt")][1:5,]
## maf.aogc.total[pass & !pass.3][1:5]
#pass<- full.qual & maf.filter & !in.common.hit.gene & !on.x.y & !unannotated.hits & not.flat.genotype & !are.repeats & !are.in.repeats & ok.missing.filt & hw.controls.ok.filt & !no.genotypes.filt & rare.in.controls.filt & rare.in.group
help<-cbind(full.qual,bad.coding,maf.filter,rare.in.group,no.genotypes,in.common.hit.gene ,hw.controls.ok,on.x.y,unannotated.hits,not.flat.genotype,are.repeats,are.in.repeats,ok.missing,ok.missing.filt,is.unwound.geno,(ok.missing.filt | is.unwound.geno) ,hw.p.control.filt,rare.in.group.filt,no.genotypes.filt,rare.in.controls.filt )
dim(fil.genotypes)
sum(pass)
length(pass)
dim(snpinfo.ori)
snpinfo[1:5,]
a.indel[1:5,1:50]
pass[1:5]
dim(a.indel)
################################# GEFOS FILTERING cause sending all
## snpinfo[grep("chr13:28626716:28626716:C:T:CREST",snpinfo[,"Name"]),]
## snpinfo.ori[grep("chr13:28626716:28626716:C:T:CREST",snpinfo.ori[,"Name"]),]
## table(a.indel[pass,"refGene::gene"])
## table(a.indel[pass,"Gene.name"])
#pass<-pass[the.snps] ### GEOFS
genotypes<-a.indel[pass,the.samples] ## ordered correctly for phenotypes and have phenotypes
#genotypes<-fil.genotypes[pass,the.samples]
snp.names<-key[pass] ## GEFOS ony name with start
#### snpinfo now A different size than a.indel since added pathways!!!
snpinfo<-snpinfo.ori[snpinfo.ori[,"Name"] %in% snp.names,]
if( sum(!(snp.names %in% snpinfo.ori[,"Name"]))>0){print("WARINING snp.names not in snpinfo- unusual!")}
dim(snpinfo)
length(snp.names)
dim(genotypes)
# 414 639
print("start QC")
#RNPC3
genotypes[genotypes=="NA"]<-NA
genotypes[genotypes=="0/0"]<-0
genotypes[genotypes=="0/1"]<-1
genotypes[genotypes=="1/1"]<-2
########### prevent any averaging
dim(genotypes)
genotypes[is.na(genotypes)]<-0
dim(genotypes)
########### prevent any averaging
########################################## GEFOS MINOR ALLELE TRANSFORMATION
## flip.geno<-gsub("2","3",genotypes[to.flip,])
## #flip.geno[1:15,1:10]
## flip.geno<-gsub("0","2",flip.geno)
## flip.geno<-gsub("3","0",flip.geno)
## genotypes[to.flip,]<-flip.geno
##########################################################################
num.col<-dim(genotypes)[2]
num.row<-dim(genotypes)[1]
## genotypes[1:5,1:20]
genotypes<-as.numeric(as.matrix(genotypes))
dim(genotypes)<-c(num.row,num.col)
genotypes<-t(genotypes) # samples x SNPS
colnames(genotypes)<-snp.names
rownames(genotypes)<-gsub(".GT$","",the.samples)
#################################
dim(genotypes)
dim(pheno)
pheno[1:5,]
snpinfo[1:5,]
genotypes[1:5,1:5]
formula
# cohort.seq.gene<- skatCohort(Z=genotypes,formula, SNPInfo = snpinfo, data=pheno,family=binomial(),aggregateBy="gene",verbose=FALSE)
## meta.results.burden.gene<-burdenMeta(cohort.seq.gene,wts=1,mafRange = c(0,1),SNPInfo = snpinfo,aggregateBy = "gene")
## meta.results.skatO.gene<-skatOMeta(cohort.seq.gene,burden.wts =1,SNPInfo = snpinfo,aggregateBy="gene")
## the.order.gene<- order(meta.results.burden.gene[,"p"])
## meta.results.burden.gene<-meta.results.burden.gene[the.order.gene,]
## meta.results.burden.gene[1:50,]
## the.order.gene<- order(meta.results.skatO.gene[,"p"])
## meta.results.skatO.gene<-meta.results.skatO.gene[the.order.gene,]
## meta.results.skatO.gene[1:50,]
cohort.seq <- skatCohort(Z=genotypes,formula, SNPInfo = snpinfo, data=pheno,aggregateBy="cluster",family=binomial(),verbose=FALSE) ## genes and clusters
meta.results.burden<-burdenMeta(cohort.seq,wts=1,mafRange = c(0,1),SNPInfo = snpinfo,aggregateBy="cluster")
meta.results.skat<-skatMeta(cohort.seq,SNPInfo = snpinfo,aggregateBy="cluster")
meta.results.skatO<-skatOMeta(cohort.seq,burden.wts =1,SNPInfo = snpinfo,aggregateBy="cluster")
the.order<- order(meta.results.burden[,"p"])
sum(is.na(meta.results.burden[,"p"])) ## bad p-values shoudl not happen
meta.results.burden<- meta.results.burden[the.order,]
meta.results.burden[meta.results.burden[,"gene"] %in% clusters.wanted,]
meta.results.burden[1:50,]
the.order<- order(meta.results.skat[,"p"])
meta.results.skat<- meta.results.skat[the.order,]
meta.results.skat[1:50,]
the.order<- order(meta.results.skatO[,"p"])
sum(is.na(meta.results.skatO[,"p"])) ## bad p-values shoudl not happen
meta.results.skatO<- meta.results.skatO[the.order,]
meta.results.skatO[1:50,]
meta.results.skatO[meta.results.skatO[,"gene"] %in% clusters.wanted,]
## meta.results.burden.gene[meta.results.burden.gene[,"gene"] %in% fanc.genes,]
## snpinfo.ori<-snpinfo
## meta.results.burden.gene[meta.results.burden.gene[,"gene"] %in% other.clusters[,2],]
## meta.results.burden.gene[meta.results.burden.gene[,"gene"] %in% other.clusters[,3],]
## meta.results.skatO.gene[meta.results.skatO.gene[,"gene"] %in% clinical.genes,]
## meta.results.skatO.gene[meta.results.skatO.gene[,"gene"] %in% fanc.genes,]
## meta.results.skatO[meta.results.skatO[,"gene"] %in% clusters.wanted,]
setwd(analysis.dir)
getwd()
snap.file<-"coding.0.01.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN_wSTRAT"
snap.file<-"coding.0.001.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN_wSTRAT"
snap.file<-"coding.0.01.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN_wSTRAT_no_benign"
snap.file<-"coding.0.001.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN_wSTRAT_no_benign"
snap.file<-"coding.0.01.all.geno.all.filters_no.imput_paper_wSTRAT"
snap.file<-"coding.0.001.all.geno.all.filters_no.imput_paper_wSTRAT"
snap.file<-"coding.0.01.all.geno.all.filters_no.imput_paper_wSTRAT_no_benign"
snap.file<-"coding.0.001.all.geno.all.filters_no.imput_paper_wSTRAT_no_benign"
snap.file<-"coding.0.001.all.geno.all.filters_no.imput_paper_wSTRAT"
snap.file<-"coding.0.001.all.geno.all.filters_no.imput_paper_wSTRAT_rare"
snap.file<-"coding.0.01.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN"
snap.file<-"coding.0.001.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN"
snap.file<-"coding.0.01.all.geno.all.filters_no.imput_paper_no_benign"
snap.file<-"coding.0.001.all.geno.all.filters_no.imput_paper"
snap.file<-"coding.0.01.all.geno.all.filters"
snap.file<-"coding.0.001.all.geno.all.filters_no.imput"
snap.file<-"coding.0.01.all.geno.all.filters_no.imput_HC_indels"
write.table(meta.results.burden[1:50,],file=paste("Burden","Top50",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
write.table(meta.results.burden[meta.results.burden[,"gene"] %in% clusters.wanted,],file=paste("Burden","clusters",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
write.table(meta.results.skat[1:50,],file=paste("Skat",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
write.table(meta.results.skatO[1:50,],file=paste("SkatO",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
write.table(meta.results.skatO[1:50,],file=paste("SkatO",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
write.table(meta.results.skatO[meta.results.skatO[,"gene"] %in% clusters.wanted,],file=paste("SkatO","clusters",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
write.table(meta.results.burden[meta.results.burden[,"gene"] %in% clusters.wanted,],file=paste("Burden","clusters",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
colnames(a.indel)[c(1:8,13,16,28,7,30,34,37:42,43,14,32,33)] # 1276,1310
annotations<-a.indel[,c(1:8,13,16,28,7,30,34,37:42,43,14,32,33)]
save(list=c("case.control","snpinfo.ori","formula","clusters","pheno.types","ipheno","clusters.wanted","genotypes","p","meta.results.skat","meta.results.skatO","meta.results.burden","pheno","target.pheno.col","snpinfo","fil.genotypes","pass","high.missing.table","a.indel","help","key","summary.geno.extra","full.qual","bad.effect","maf.filter","in.common.hit.gene","on.x.y","unannotated.hits","not.flat.genotype","are.repeats","are.in.repeats","ok.missing","hw.controls.ok.filt","no.genotypes","rare.in.Control","rare.in.Control.filt","in.any.normal","in.any.normal.filt","are.in.repeats.back","are.in.repeats.forward","all.genes"),file=paste(paste(project.files[ichr],".",pheno.types[ipheno],".",snap.file,".small_final.RData",sep="")) )
getwd()
save.image(file=paste(snap.file,"RData",sep="."))
## save(list=c("clusters.wanted"),file="clusters.wanted.RData")
## getwd()
## load(paste(snap.file,"RData",sep="."))
## setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014-11-04_AML_TCGA_Replication/Analysis")
## load("AML_TCGA_image_paper.coding.0.01.all.geno.all.filters_no.imput_paper_TCGA_REP.RData")
## setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013/2013-10-27_AML_with_AOGCControl_NoFailedLane/Analysis")
load("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013/2013-10-27_AML_with_AOGCControl_NoFailedLane/Analysis/coding.0.001.all.geno.all.filters_no.imput_paper_wSTRAT_no_benign.RData")
load("AML_HC_image_paper.coding.0.01.all.geno.all.filters_no.imput_paper.RData")
load("AML_AOGC_image.coding.0.001.all.geno.all.filters.NEW.RData")
###############$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
###############$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
###############$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
###############$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
##################### RELOAD########################
library(skatMeta) ## ridge regression
#library(SKAT) ## skat method
library(GenomicFeatures)
library(HardyWeinberg)
library(Biostrings)
## analysis.dir<-"/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2013-10-27_AML_with_AOGCControl_NoFailedLane/Analysis"
## setwd(analysis.dir)
## getwd()
## snap.file<-"coding.0.01.all.geno.all.filters_no.imput"
## snap.file<-"coding.0.01.all.geno.all.filters.NEW"
## snap.file<-"coding.0.001.all.geno.all.filters.NEW"
load(paste(snap.file,"RData",sep="."))
options(width=200)
meta.results.burden[1:50,]
meta.results.skat[1:50,]
meta.results.skatO[1:50,]
covars<-"1"
target.pheno.col<-"SampleProject"
formula<-paste(target.pheno.col,"~",paste(covars,collapse="+"),sep="")
print(formula)
formula<-formula(formula)
pheno[1:5,]
###############$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
###############$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
###############$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
###############$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
## test<-c("MYCBP2","SLC25A24","TMCO3","C13orf35")
## test<-c("MYCBP2","SLC25A24","TMCO3","C13orf35")
## test<-c("chr22:41252435-41252687:ST13")
## test<-c("SETD8")
test<-c("SEC61A1","ST14","GPANK1","EEF1A2")
test<-c("C19orf40")
test<-c("FANCP")
test<-c("IDH1")
test<-c("clinical") # not sig after coverage filtering at 0.01 0.1997688 from 0.000142
test<-clinical.genes
test<-fanc.genes
snpinfo[1:5,]
a.cluster<-"random.745"
test<-snpinfo[snpinfo[,"cluster"]==a.cluster,"gene"]
test
test<-c("LOC100268168") #,"NOTCH1")
snpinfo[1:5,]
## meta.results.skat[meta.results.skat[,"gene"] %in% test,]
meta.results.burden[1:20,]
meta.results.burden[meta.results.burden[,"gene"] %in% clusters.wanted,]
test<-snpinfo[snpinfo[,"cluster"]==a.cluster,"cluster"]
meta.results.burden[meta.results.burden[,"gene"] %in% test,]
to.unwind<-c(meta.results.burden[1:50,"gene"],meta.results.skatO[1:50,"gene"])
#to.unwind<-c("FANCD2_minimal_mono_ubi") #, "MCM7", "RNPC3")
to.unwind<-c("FANC_complex.all") # to.unwind<-meta.results.burden[8,"gene"]
#to.unwind<-c("BLM.Complex_AND_Checkpoint")
#to.unwind<-c("NPM1")
## to.unwind<-c("FLT3")
## to.unwind<-c("DDX41","TET2", "GATA2", "ASXL1", "NOTCH1", "IDH1", "JAK2")
to.unwind<-c(clusters.wanted[!(clusters.wanted %in% c("Ubin.proteo","lipid_raft","caveolae","Checkpoint_extendedx1","Checkpoint_extendedx2"))])
to.unwind
#to.unwind.name<-to.unwind
to.unwind.name<-"ALL_clusters_ALL_mutations"
to.unwind.name<-"Collaboration_genes"
# to.unwind.name<-"ALL_clusters"
# to.unwind.name<-"ALL_significant"
# to.unwind.name<-"ALL_significant"
snpinfo.ex<-snpinfo[snpinfo[,"cluster"] %in% to.unwind,]
loci<-snpinfo[snpinfo[,"cluster"] %in% to.unwind,"Name"] # this is IDH1 not IDH1 in cluster # are the snp.names
the.genes<-unique(snpinfo.ex[,"gene"])
the.genes<-the.genes[!(the.genes %in% clusters.wanted)]
the.genes #245 ### if used a cluster name need to do back up to (**)
############repest to clean out cluster names
to.unwind<-the.genes
snpinfo.ex<-snpinfo[snpinfo[,"cluster"] %in% to.unwind,]
loci<-snpinfo[snpinfo[,"cluster"] %in% to.unwind,"Name"] # this is IDH1 not IDH1 in cluster # are the snp.names
the.genes<-unique(snpinfo.ex[,"gene"])
the.genes<-the.genes[!(the.genes %in% clusters.wanted)]
the.genes
meta.results.skatO[1:50,]
meta.results.burden[1:50,]
the.genes.burden<-meta.results.burden[meta.results.burden[,"gene"] %in% the.genes,]
#the.genes.burden
write.table(the.genes.burden,file=paste(to.unwind.name,"conponents:","Burden","clusters",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
the.genes.burden<-meta.results.skatO[meta.results.skatO[,"gene"] %in% the.genes,]
#the.genes.burden
write.table(the.genes.burden,file=paste(paste(to.unwind.name,collapse="."),"conponents:","SkatO","clusters",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
#subset<-(rownames(genotypes) %in% loci) # indicated in genotypes
sum(subset)
length(subset)
snpinfo[1:5,]
dim(genotypes)
genotypes[1:5,1:5]
genotypes.ex<-genotypes[,loci]
dim(genotypes.ex)
genotypes.ex[is.na(genotypes.ex)]<-0
dim(genotypes.ex)
snpinfo.ex<-snpinfo[snpinfo[,"cluster"] %in% to.unwind,]
dim(snpinfo.ex)
dim(genotypes.ex)
dim(pheno)
snpinfo.ex[1:5,]
# summary.geno.extra[loci,]
#high.missing[loci,]
sum(are.in.repeats[loci])
# qual[loci,]
## snpinfo[1:5,]
## qual[1:5,c("FILTER_PASS", "FILTER_100" )]
cohort.seq.ex <- skatCohort(genotypes.ex, formula, SNPInfo = snpinfo.ex, data=pheno,aggregateBy = "Name",verbose=FALSE)
## meta.results.skat.ex<-skatMeta(cohort.seq,SNPInfo = snpinfo)
meta.results.burden.ex<-burdenMeta(cohort.seq.ex,wts=1,mafRange = c(0,1),SNPInfo = snpinfo.ex,aggregateBy = "Name")
#meta.results.burden.ex
pheno[1:5,]
cohort.seq.test <- skatCohort(genotypes.ex, formula, SNPInfo = snpinfo.ex, data=pheno,aggregateBy = "cluster",verbose=FALSE)
meta.results.burden.test<-burdenMeta(cohort.seq.test,wts=1,mafRange = c(0,1),SNPInfo = snpinfo.ex,aggregateBy = "cluster")
#meta.results.burden.test
## meta.results.skat.ex<-skatMeta(cohort.seq,SNPInfo = snpinfo)
meta.results.skatO.test<-skatOMeta(cohort.seq.test,burden.wts =1,SNPInfo = snpinfo.ex,aggregateBy="cluster")
#meta.results.skatO.test
muts.in.cases<-apply(genotypes.ex[pheno[,"SampleProject"]==1,],2,function(x) { paste(names(x)[x!=0 & !is.na(x)],collapse=",")})
muts.in.controls<-apply(genotypes.ex[pheno[,"SampleProject"]==0,],2,function(x) { paste(names(x)[x!=0 & !is.na(x)],collapse=",")})
figure<- match(loci,key)
########################################################
check<-16
quality.cases<-rep("",times=length(loci))
quality.controls<-rep("",times=length(loci))
a.indel.sub<-a.indel[figure,]
for(check in 1:length(loci)){
print(check)
#check<-"chr11:130066457:130066457:-:A:indel"
# posn<-grep(loci[check],key)
posn<-check
if(muts.in.cases[check]!=""){
#the.gt<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"GT",sep=".")
#the.gq<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"GQ",sep=".")
the.gq<-paste(unlist(strsplit(muts.in.cases[check],split=",")),"DP",sep=".")
quality.cases[check]<-paste(a.indel.sub[posn,the.gq],collapse=",")
## a.indel[posn,the.gq]
## a.indel[posn,the.gt]
## a.indel[posn,the.dp]
}
if(muts.in.controls[check]!=""){
#the.gt<-paste(unlist(strsplit(muts.in.controls[check],split=",")),"GT",sep=".")
#the.gq<-paste(unlist(strsplit(muts.in.controls[check],split=",")),"GQ",sep=".")
the.gq<-paste(unlist(strsplit(muts.in.controls[check],split=",")),"DP",sep=".")
quality.controls[check]<-paste(a.indel.sub[posn,the.gq],collapse=",")
## a.indel[posn,the.gq]
## a.indel[posn,the.gt]
## a.indel[posn,the.dp]
}
} # end check
##########################################################################
#figure
length(figure)
dim(meta.results.burden.ex)
length(muts.in.cases)
length(muts.in.controls)
#pass[figure]
#help[figure,]
dim(annotations)
dim(help)
dim(summary.geno.extra)
length(figure)
sum(meta.results.burden.ex[,"gene"]!=loci)
## colnames(a.indel)[1:50]
## key[grep("chr17",key)[1:100]]
## grep("chr17:41197708",key)
## key[grep("10088407",key)]
#out<-cbind(meta.results.burden.ex,a.indel[figure,c(1:6,16,28,7,30,34,37:42,43)],summary.geno.extra[figure,],high.missing[figure,],help[figure,])
## out<-cbind(meta.results.burden.ex,a.indel[figure,c(1:6,16,28,7,30,34,37:42,43,14,32,33)],summary.geno.extra[figure,c("GENO.AML","GENO.Control","GENO.AML.filt","GENO.Control.filt")],high.missing[figure,])
## summary.geno.extra[figure,]
## annotations[figure,]
## help[figure,]
dim(meta.results.burden.ex)
#out<-cbind(meta.results.burden.ex,a.indel[figure,c(1:6,16,43,28,7,30,34,37:42)],summary.geno.extra[figure,c("GENO.AML","GENO.Control","GENO.AML.filt","GENO.Control.filt")],help[figure,],muts.in.cases,muts.in.controls)
maf.old<-annotations[,"MAF.lt:0.01" ]
maf.new<-maf.lt.all[,"MAF.lt:0.01"]
maf.aogc<-a.indel[,"AOGC-NGS_ALL::maf"]
a.functions<-a.indel[,c("PolyPhen.scores","SIFT.scores","PolyPhen.desc","SIFT.desc")]
out<-cbind(meta.results.burden.ex,maf.new[figure],maf.old[figure],maf.aogc[figure],a.functions[figure,],is.benign.missense[figure],annotations[figure,],summary.geno.extra[figure,c("GENO.AML","GENO.Control","GENO.AML.filt","GENO.Control.filt")],help[figure,],muts.in.cases,quality.cases,muts.in.controls,quality.controls)
#out<-cbind(meta.results.burden.ex,annotations[figure,],muts.in.cases,muts.in.controls)
## table(out[,"refGene::location"])
## table(out[,"Consequence.Embl"])
paste(paste(to.unwind,collapse="."))
paste(to.unwind.name,collapse=".")
paste(paste(to.unwind.name,collapse="."),"GENOTYPE.conponents:","SkatO","clusters",snap.file,"txt",sep=".")
write.table(out,file=paste(paste(to.unwind.name,collapse="."),"GENOTYPE.conponents:","SkatO","clusters",snap.file,"txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
getwd()
#######################################################################
#######################################################################
#######################################################################
#######################################################################
#######################################################################
#######################################################################
meta.results.burden[1:50,]
meta.results.burden[meta.results.burden[,"gene"] %in% clusters.wanted,]
the.order<- order(meta.results.skat[,"p"])
meta.results.skat<- meta.results.skat[the.order,]
meta.results.skat[1:50,]
the.order<- order(meta.results.skatO[,"p"])
sum(is.na(meta.results.skatO[,"p"])) ## bad p-values shoudl not happen
meta.results.skatO<- meta.results.skatO[the.order,]
meta.results.skatO[1:50,]
dim(clusters)
snpinfo.sub<-snpinfo[snpinfo[,"cluster"] %in% clusters.wanted,]
genes.cl<-unique(snpinfo.sub[,"gene"])
genes.cl<-genes.cl[!(genes.cl %in% clusters.wanted)]
genes.cl
clusters.wanted
genes.and.clusters<-c(genes.cl,clusters.wanted)
meta.results.burden[1:5,]
#################################### just want to plot
subset<-meta.results.burden[ !(meta.results.burden[,"gene"] %in% genes.and.clusters) ,]
subset[1:5,]
dups<-duplicated(subset[,"gene"])
sum(dups)
z<-qchisq(subset[ ,"p"],df=1,ncp=0,lower.tail=FALSE,log.p=FALSE) ## if have no chisq valuse
#z0<-qchisq(meta.results.skatO[ !(meta.results.skatO[,"gene"] %in% genes.and.clusters ) ,"p"],df=1,ncp=0,lower.tail=FALSE,log.p=FALSE)
z[1:5]
subset[1:10,]
p.val<-as.numeric(subset[ ,"p"])
par(mfrow=c(1,1),font=2,font.lab=2,font.axis=2,mgp=c(3.5,1,0),mar=c(5,5,4,2)+0.1)
## z<-rchisq(length(p.val), df=1, ncp = 0) ## null test
median(z,na.rm=TRUE)/0.456 #1.071491
median(z0,na.rm=TRUE)/0.456 #1.071491
################## p-values
z0=qnorm(p.val/2)
lambda = round(median(z0^2)/0.454,3)
lambda
## source("http://bioconductor.org/biocLite.R")
## biocLite("GWASTools")
## setRep
## qq.Plot(pvals)
## Reads data
## S <- read.table(input,header=F)
## if (stat_type == "Z")
## z=S[,1]
## if (stat_type == "CHISQ")
## z=sqrt(S[,1])
## if (stat_type == "PVAL")
## z0=qnorm(meta.results.skatO[,"p"]/2)
## ## calculates lambda
lambda = round(median(z0^2)/.454,3)
## lambda
range(z)
the.plot<-my.qq.plot(z,dist="chisq",df=1,ncp=0,col="blue",ylab="Observed chi-squared value",xlab="Expected chi-squared value",main="",cex=1,xlim=c(0,22),ylim=c(0,80),cex.lab=2.0,cex.axis=2.0,font.lab=2,font.axis=2,lwd=2,line="robust",plot.it=TRUE) # function defined below
z.all<-qchisq(meta.results.burden[ !(meta.results.burden[,"gene"] %in% clusters.wanted) ,"p"],df=1,ncp=0,lower.tail=FALSE,log.p=FALSE)
range(z.all)
qq<- qq.data(z.all,plot.it=FALSE) ## qq plot used same method as in car library
points(qq$x,qq$y,col="magenta",pch=21)
symbols<-meta.results.burden[!(meta.results.burden[,"gene"] %in% clusters.wanted),"gene"]
#symbols<-meta.results.skatO[,"gene"]
#####annotate curve
selected.data<-identify(qq$x,qq$y,labels=symbols[qq$ord],col="red",cex=1,offset=1,atpen='TRUE') ##plate row col symbol
selected.data<-identify(qq$x,qq$y,labels=labels[qq$ord],col="red",cex=1,atpen='TRUE') ## sybmol
selected.data<-identify(qq$x,qq$y,labels=as.character(round(data.in[qq$ord],2)),col="forestgreen",cex=1.25,atpen='TRUE') # observed score
#####
leg.txt<-c("All Genes","Remove Clinical Genes")
legend(2,60,leg.txt,col=c("magenta","blue"),lty=c(-1,-1,2),pch=c(1,1,-1),cex=2.25)
label<-"AML_paper_0.01_all_filters_NO_STRAT"
savePlot(paste(label,"tiff",sep="."),type="tiff")
savePlot(paste(label,"png",sep="."),type="png")
save.image("qq.AS.paper.final.RData")
o <- -(log10(p.val))
e <- -log10( 1:length(o)/length(o) )
plot(e,o, pch=23, cex=.4, bg="black",main=hlabel, ylab="Observed -log10 P value",xlab="Expected -log10 P value")
abline(coef=c(0,1), col=1, lwd=2)
## qq <- parse.pvals.qq(gene.region.compare[,"p.skatO"],lim=lim)
## qq <- parse.pvals.qq(all.res.gene[,"p.skatO"],lim=lim)
qq <- parse.pvals.qq(gene.region.compare[,"p.skatO"],lim=lim)
qq <- parse.pvals.qq(all.res.gene[,"p.skatO"],lim=lim)
ylab = expression(Observed~~-log[10](italic(p)))
xlab = expression(Expected~~-log[10](italic(p)))
plot(qq$e,
qq$o,
xlim=c(0,lim),ylim=c(0,lim),
pch=20,col='deepskyblue',
xlab=xlab,ylab=ylab, main="Gene Based")
abline(coef=c(0,1), col=1, lwd=2)
savePlot("FilteredGeneBased.jpg",type="jpeg")
#write.table(geno.all,file=paste(project.name,fam[ifam],"geno.all.txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
#geno.all<-read.delim(paste(project.name,fam[ifam],"geno.all.txt",sep="."),header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
parse.pvals.qq <- function(pvector,lim=7) {
o = -log10(sort(pvector,decreasing=F))
e = -log10( 1:length(o)/length(o) )
o[o>lim] <- lim
out <- list(o=o,e=e)
return(out)
}
data <- data.van
Mobspval <- sort(data$P)
Mobspval <- Mobspval[!Mobspval==0]
o <- -(log10(Mobspval))
e <- -log10( 1:length(o)/length(o) )
#Mobsmax <- 3 #trunc(max(Mlogobspval))+1
#Mexpmax <- trunc(max(Mlogexppval))+1
#if (is.infinite(Mobsmax)) {Mobsmax <- 3} else {Mobsmax <- Mobsmax}
#plot(c(0,Mexpmax), c(0,Mexpmax), col="gray", lwd=1, type="l", xlab="Expected -log10 P value", ylab="Observed -log10 P value", xlim=c(0,Mexpmax), ylim=c(0,Mobsmax), las=1, xaxs="i", yaxs="i", bty="l",main=hlabel)
#plot(c(0,Mexpmax), c(0,Mexpmax), col="gray", lwd=1, type="l", xlab="Expected -log10 P value", ylab="Observed -log10 P value", las=1, xaxs="i", yaxs="i", bty="l",main=hlabel)
# plot(c(0,Mexpmax), c(0,Mexpmax), col="gray", lwd=1, type="l", xlab="Expected -log10 P value", ylab="Observed -log10 P value", las=1, xaxs="i", yaxs="i", bty="l",main=hlabel)
#points(Mlogexppval,Mlogobspval, pch=23, cex=.4, bg="black")
plot(e,o, pch=23, cex=.4, bg="black",main=hlabel, ylab="Observed -log10 P value",xlab="Expected -log10 P value")
#######################################################################
#######################################################################
#######################################################################
#######################################################################
#######################################################################
my.qq.plot<-function (x, distribution = "chisq",df=1,ncp=0, ylab = deparse(substitute(x)),
xlab = paste(distribution, "quantiles"), main = NULL, las = par("las"),
envelope = 0.95, labels = FALSE, col = palette()[2], lwd = 2,
pch = 1, cex = 1, line = c("quartiles", "robust", "none"),xlim=c(0,100),ylim=c(0,20),font.lab=2,font.axis=2,font.main=2,cex.lab=2.5,cex.axis=1.0,plot.it=TRUE, ...){
result <- NULL
line <- match.arg(line)
good <- !is.na(x)
ord <- order(x[good])
ord.x <- x[good][ord]
q.function <- eval(parse(text = paste("q", distribution,
sep = "")))
d.function <- eval(parse(text = paste("d", distribution,
sep = "")))
n <- length(ord.x)
P <- ppoints(n)
z <- q.function(P,df=df,ncp=ncp, ...)
if(plot.it){
plot(z, ord.x, xlab = xlab, ylab = ylab, main = main, las = las,
col = col, pch = pch,cex = cex,xlim=xlim,ylim=ylim,font.lab=font.lab,font.axis=font.axis,font.main=2,cex.lab=cex.lab,cex.axis=cex.axis)}
if (line == "quartiles") {
Q.x <- quantile(ord.x, c(0.25, 0.75))
Q.z <- q.function(c(0.25, 0.75),df=df,ncp=ncp, ...)
b <- (Q.x[2] - Q.x[1])/(Q.z[2] - Q.z[1])
a <- Q.x[1] - b * Q.z[1]
if(plot.it){
abline(a, b, col = "red", lwd = lwd)}
}
if (line == "robust") {
if (!require("MASS"))
stop("MASS package not available")
coef <- coefficients(rlm(ord.x ~ z))
a <- coef[1]
b <- coef[2]
if(plot.it){
abline(a, b,col="red")}
} ################### Envelope function
if (line != "none" & envelope != FALSE) {
zz <- qnorm(1 - (1 - envelope)/2)
SE <- (b/d.function(z,df=df,ncp=ncp, ...)) * sqrt(P * (1 - P)/n)
fit.value <- a + b * z
upper <- fit.value + zz * SE
lower <- fit.value - zz * SE
if(plot.it){
lines(z, upper, lty = 2, lwd = lwd, col = "red")
lines(z, lower, lty = 2, lwd = lwd, col = "red")}
} #####################
if (labels[1] == TRUE & length(labels) == 1)
labels <- seq(along = z)
if (labels[1] != FALSE) {
selected <- identify(z, ord.x, labels[good][ord])
result <- seq(along = x)[good][ord][selected]
}
if (is.null(result))
invisible(list(result=result,a=a,b=b,x=z,y = ord.x,ord=ord,upper=upper,lower=lower))
else {sort(result)
invisible(list(result=result,a=a,b=b,x=z,y = ord.x,ord=ord,upper=upper,lower=lower))}
}
qq.data<- function (x, plot.it = TRUE, distribution = "chisq", df=1,ncp=0, xlab = deparse(substitute(x)),
ylab = deparse(substitute(y)) , ...)
{
good <- !is.na(x)
ord <- order(x[good])
ord.x <- x[good][ord]
q.function <- eval(parse(text = paste("q", distribution,
sep = "")))
n <- length(ord.x)
P <- ppoints(n)
z <- q.function(P,df=df,ncp=ncp, ...)
if (plot.it)
plot(z, ord.x, xlab = xlab, ylab = ylab, ...)
invisible(list(x = z, y = ord.x, ord=ord))
} ##ord is the order if use identify
######################################### END SECTION
## qq<- qq.data(data.in,distribution="norm",the.mean=the.mean,the.sd=the.sd,plot.it=FALSE)
## my.qq.plot(data.in,distribution="norm",col="blue",xlab="Expected Score",ylab="Observed score",xlim=range(qq$x), ylim=range(data.in),main=paste("Screen:",the.screen,"with 95% confidence intervals for",":",the.score,sep=" "),the.mean=the.mean,the.sd=the.sd,cex.lab=1.5,cex.axis=1.5,cex.main=1.5,cex=1.5)
########################## USE FUNTIONS BELOW IF YOU REFERNCE FUNCYION IS A NORMAL DISTRIBUTION NOT A CHISQ
my.qq.plot.mean<-function (x, distribution = "norm", ylab = deparse(substitute(x)),
xlab = paste(distribution, "quantiles"), main = NULL, las = par("las"),
envelope = 0.95, labels = FALSE, col = palette()[2], lwd = 2, the.mean=0,the.sd=1,cex.lab=2,
pch = 1, cex = 1, line = c("quartiles", "robust", "none"),xlim=c(0,100),ylim=c(0,20),font.lab=2,font.axis=2,font.main=2,cex.axis=1,cex.main=1,
...)
{
result <- NULL
line <- match.arg(line)
good <- !is.na(x)
ord <- order(x[good])
ord.x <- x[good][ord]
q.function <- eval(parse(text = paste("q", distribution,
sep = "")))
d.function <- eval(parse(text = paste("d", distribution,
sep = "")))
n <- length(ord.x)
P <- ppoints(n)
z <- q.function(P, mean=the.mean, sd=the.sd, ...)
plot(z, ord.x, xlab = xlab, ylab = ylab, main = main, las = las,
col = col, pch = pch,cex = cex,xlim=xlim,ylim=ylim,cex.lab=cex.lab,font.lab=font.lab,font.axis=font.axis,font.main=font.main,cex.main=cex.main,cex.axis=cex.axis)
if (line == "quartiles") {
Q.x <- quantile(ord.x, c(0.25, 0.75))
Q.z <- q.function(c(0.25, 0.75), mean=the.mean, sd=the.sd, ...)
b <- (Q.x[2] - Q.x[1])/(Q.z[2] - Q.z[1])
a <- Q.x[1] - b * Q.z[1]
abline(a, b, col = "red", lwd = lwd)
}
if (line == "robust") {
if (!require("MASS"))
stop("MASS package not available")
coef <- coefficients(rlm(ord.x ~ z))
a <- coef[1]
b <- coef[2]
abline(a, b)
} ################### Envelope function
if (line != "none" & envelope != FALSE) {
zz <- qnorm(1 - (1 - envelope)/2)
SE <- (b/d.function(z, mean=the.mean, sd=the.sd, ...)) * sqrt(P * (1 - P)/n)
fit.value <- a + b * z
upper <- fit.value + zz * SE
lower <- fit.value - zz * SE
lines(z, upper, lty = 2, lwd = lwd/2, col = "red")
lines(z, lower, lty = 2, lwd = lwd/2, col = "red")
} #####################
if (labels[1] == TRUE & length(labels) == 1)
labels <- seq(along = z)
if (labels[1] != FALSE) {
selected <- identify(z, ord.x, labels[good][ord])
result <- seq(along = x)[good][ord][selected]
}
if (is.null(result))
invisible(result)
else sort(result)
}
############################################### META ANALYSIS
############################################### META ANALYSIS
############################################### META ANALYSIS
############################################### META ANALYSIS
############################################### META ANALYSIS
############################################### META ANALYSIS
1 2 TCGA
475 102
1 2 DISCOVERY
323 131
## setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014-11-04_AML_TCGA_Replication/Analysis/meta_analyis/")
## files<-c("Burden.clusters.coding.0.01.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN.txt","Burden.clusters.coding.0.01.all.geno.all.filters_no.imput_paper.txt")
## sizes<-c(150,96)
## setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014/2014-11-04_AML_TCGA_Replication/Analysis/meta_analysis/")
## files<-c("Burden.clusters.coding.0.01.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN_wSTRAT_no_benign.txt","Burden.clusters.coding.0.01.all.geno.all.filters_no.imput_paper_wSTRAT_no_benign.txt")
## sizes<-c(102,89)
setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014/2014-11-04_AML_TCGA_Replication/Analysis/meta_analysis_new/")
files<-c("Burden.clusters.TCGA.coding.0.01.FINAL.txt","Burden.clusters.coding.somatic.with.Indels.AOGC.0.01.FINAL.PCA.txt")
sizes<-c(102,131)
traits<-c("AML")
i<-1
#### NEED TP HAND MODIFY FILES TO CHNAGES NMISS TO NUMBERS ABOVE
#/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014-11-04_AML_TCGA_Replication/Analysis/meta_analyis/ALL_clusters.conponents:.Burden.clusters.coding.0.001.all.geno.all.filters_no.imput_paper.txt
#setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014/2014-11-04_AML_TCGA_Replication/Analysis/meta_analyis")
system(paste("sed s/chip.TRAIT/",files[1],"/ run_config_for_P_and_BETA_TEMPLATE.txt > ",paste("sample.size.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.TRAIT/",files[2],"/ ",paste("sample.size.CONFIG",traits[i],"txt",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("sed s/chip.NMISS/","nmiss","/ ",paste("sample.size.CONFIG",traits[i],"txt1",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.NMISS/","nmiss","/ ",paste("sample.size.CONFIG",traits[i],"txt",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
paste("sample.size.CONFIG",traits[i],"txt1",sep=".")
system(paste("./metal ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("cp ","META.SAMPLE.SIZE.TRAIT1.tbl","META.SAMPLE.SIZE.TRAIT.p_0.01.txt", sep=" "))
system(paste("sed s/chip.TRAIT/",files[1],"/ run_config_for_inverse_varience_TEMPLATE.txt > ",paste("STDERR.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.TRAIT/",files[2],"/ ",paste("STDERR.CONFIG",traits[i],"txt",sep=".")," > ",paste("STDERR.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("sed s/chip.NMISS/","nmiss","/ ",paste("STDERR.CONFIG",traits[i],"txt1",sep=".")," > ",paste("STDERR.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.NMISS/","nmiss","/ ",paste("STDERR.CONFIG",traits[i],"txt",sep=".")," > ",paste("STDERR.CONFIG",traits[i],"txt1",sep="."),sep=""))
paste("STDERR.CONFIG",traits[i],"txt1",sep=".")
system(paste("./metal ",paste("STDERR.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("cp ","META.STDERR.TRAIT1.tbl","META.STDERR.TRAIT.p_0.01.txt", sep=" "))
tcga<-read.delim(files[1],header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
dis<-read.delim(files[2],header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
meta<-read.delim("META.STDERR.TRAIT.p_0.01.txt",header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
tcga[1:5,]
dis[1:5,]
meta[1:5,]
colnames(tcga)<-paste(colnames(tcga),"TCGA",sep=".")
colnames(dis)<-paste(colnames(dis),"DIS",sep=".")
posns<-match(meta[,"MarkerName"], tcga[,1])
missing<-is.na(posns)
sum(missing)
meta[missing,"MarkerName"]
tcga<-tcga[posns,]
posns<-match(meta[,"MarkerName"], dis[,1])
missing<-is.na(posns)
sum(missing)
meta[missing,"MarkerName"]
dis<-dis[posns,]
dim(dis)
dim(tcga)
meta<-cbind(meta,tcga,dis)
meta[1:5,]
getwd()
write.table(meta,file="META.STDERR.TRAIT.p_0.01.SUMMARY.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
## files<-c("Burden.clusters.coding.0.001.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN.txt","Burden.clusters.coding.0.001.all.geno.all.filters_no.imput_paper.txt")
## sizes<-c(150,96)
## traits<-c("AML")
## setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014/2014-11-04_AML_TCGA_Replication/Analysis/meta_analysis/")
## files<-c("Burden.clusters.coding.0.001.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN_wSTRAT_no_benign.txt","Burden.clusters.coding.0.001.all.geno.all.filters_no.imput_paper_wSTRAT_no_benign.txt")
## sizes<-c(102,89)
setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014/2014-11-04_AML_TCGA_Replication/Analysis/meta_analysis_new/")
files<-c("Burden.clusters.TCGA.coding.0.001.FINAL.txt","Burden.clusters.coding.somatic.with.Indels.AOGC.0.001.FINAL.PCA.txt")
sizes<-c(102,131)
i<-1
system(paste("sed s/chip.TRAIT/",files[1],"/ run_config_for_P_and_BETA_TEMPLATE.txt > ",paste("sample.size.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.TRAIT/",files[2],"/ ",paste("sample.size.CONFIG",traits[i],"txt",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("sed s/chip.NMISS/","nmiss","/ ",paste("sample.size.CONFIG",traits[i],"txt1",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.NMISS/","nmiss","/ ",paste("sample.size.CONFIG",traits[i],"txt",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
paste("sample.size.CONFIG",traits[i],"txt1",sep=".")
system(paste("./metal ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("cp ","META.SAMPLE.SIZE.TRAIT1.tbl","META.SAMPLE.SIZE.TRAIT.p_0.001.txt", sep=" "))
system(paste("sed s/chip.TRAIT/",files[1],"/ run_config_for_inverse_varience_TEMPLATE.txt > ",paste("STDERR.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.TRAIT/",files[2],"/ ",paste("STDERR.CONFIG",traits[i],"txt",sep=".")," > ",paste("STDERR.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("sed s/chip.NMISS/","nmiss","/ ",paste("STDERR.CONFIG",traits[i],"txt1",sep=".")," > ",paste("STDERR.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.NMISS/","nmiss","/ ",paste("STDERR.CONFIG",traits[i],"txt",sep=".")," > ",paste("STDERR.CONFIG",traits[i],"txt1",sep="."),sep=""))
paste("STDERR.CONFIG",traits[i],"txt1",sep=".")
system(paste("./metal ",paste("STDERR.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("cp ","META.STDERR.TRAIT1.tbl","META.STDERR.TRAIT.p_0.001.txt", sep=" "))
tcga<-read.delim(files[1],header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
dis<-read.delim(files[2],header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
meta<-read.delim("META.STDERR.TRAIT.p_0.001.txt",header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
tcga[1:5,]
dis[1:5,]
meta[1:5,]
colnames(tcga)<-paste(colnames(tcga),"TCGA",sep=".")
colnames(dis)<-paste(colnames(dis),"DIS",sep=".")
posns<-match(meta[,"MarkerName"], tcga[,1])
missing<-is.na(posns)
sum(missing)
meta[missing,"MarkerName"]
tcga<-tcga[posns,]
posns<-match(meta[,"MarkerName"], dis[,1])
missing<-is.na(posns)
sum(missing)
meta[missing,"MarkerName"]
dis<-dis[posns,]
dim(dis)
dim(tcga)
meta<-cbind(meta,tcga,dis)
meta[1:5,]
getwd()
write.table(meta,file="META.STDERR.TRAIT.p_0.001.SUMMARY.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
##########################################################################
## setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014/2014-11-04_AML_TCGA_Replication/Analysis/meta_analysis/")
## files<-c("SkatO.clusters.coding.0.001.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN_wSTRAT_no_benign.txt","SkatO.clusters.coding.0.001.all.geno.all.filters_no.imput_paper_wSTRAT_no_benign.txt")
## sizes<-c(102,89)
setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014/2014-11-04_AML_TCGA_Replication/Analysis/meta_analysis_new/")
files<-c("SkatO.clusters.TCGA.coding.0.001.FINAL.txt","SkatO.clusters.coding.somatic.with.Indels.AOGC.0.001.FINAL.PCA.txt")
sizes<-c(102,131)
i<-1
system(paste("sed s/chip.TRAIT/",files[1],"/ run_config_for_P_and_BETA_TEMPLATE.txt > ",paste("sample.size.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.TRAIT/",files[2],"/ ",paste("sample.size.CONFIG",traits[i],"txt",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("sed s/chip.NMISS/","nmiss","/ ",paste("sample.size.CONFIG",traits[i],"txt1",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.NMISS/","nmiss","/ ",paste("sample.size.CONFIG",traits[i],"txt",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
paste("sample.size.CONFIG",traits[i],"txt1",sep=".")
system(paste("./metal ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("cp ","META.SAMPLE.SIZE.TRAIT1.tbl","META.SAMPLE.SIZE.TRAIT.SKATO.p_0.001.txt", sep=" "))
tcga<-read.delim(files[1],header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
dis<-read.delim(files[2],header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
meta<-read.delim("META.SAMPLE.SIZE.TRAIT.SKATO.p_0.001.txt",header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
tcga[1:5,]
dis[1:5,]
meta[1:5,]
colnames(tcga)<-paste(colnames(tcga),"TCGA",sep=".")
colnames(dis)<-paste(colnames(dis),"DIS",sep=".")
posns<-match(meta[,"MarkerName"], tcga[,1])
missing<-is.na(posns)
sum(missing)
meta[missing,"MarkerName"]
tcga<-tcga[posns,]
posns<-match(meta[,"MarkerName"], dis[,1])
missing<-is.na(posns)
sum(missing)
meta[missing,"MarkerName"]
dis<-dis[posns,]
dim(dis)
dim(tcga)
meta<-cbind(meta,tcga,dis)
meta[1:5,]
getwd()
write.table(meta,file="META.SAMPLE.SIZE.TRAIT.SKATO.p_0.001.SUMMARY.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
##########################################################################
## setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014/2014-11-04_AML_TCGA_Replication/Analysis/meta_analysis/")
## files<-c("SkatO.clusters.coding.0.01.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN_wSTRAT_no_benign.txt","SkatO.clusters.coding.0.01.all.geno.all.filters_no.imput_paper_wSTRAT_no_benign.txt")
## sizes<-c(102,89)
setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014/2014-11-04_AML_TCGA_Replication/Analysis/meta_analysis_new/")
files<-c("SkatO.clusters.TCGA.coding.0.01.FINAL.txt","SkatO.clusters.coding.somatic.with.Indels.AOGC.0.01.FINAL.PCA.txt")
sizes<-c(102,131)
i<-1
system(paste("sed s/chip.TRAIT/",files[1],"/ run_config_for_P_and_BETA_TEMPLATE.txt > ",paste("sample.size.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.TRAIT/",files[2],"/ ",paste("sample.size.CONFIG",traits[i],"txt",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("sed s/chip.NMISS/","nmiss","/ ",paste("sample.size.CONFIG",traits[i],"txt1",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.NMISS/","nmiss","/ ",paste("sample.size.CONFIG",traits[i],"txt",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
paste("sample.size.CONFIG",traits[i],"txt1",sep=".")
system(paste("./metal ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("cp ","META.SAMPLE.SIZE.TRAIT1.tbl","META.SAMPLE.SIZE.TRAIT.SKATO.p_0.01.txt", sep=" "))
tcga<-read.delim(files[1],header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
dis<-read.delim(files[2],header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
meta<-read.delim("META.SAMPLE.SIZE.TRAIT.SKATO.p_0.01.txt",header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
tcga[1:5,]
dis[1:5,]
meta[1:5,]
colnames(tcga)<-paste(colnames(tcga),"TCGA",sep=".")
colnames(dis)<-paste(colnames(dis),"DIS",sep=".")
posns<-match(meta[,"MarkerName"], tcga[,1])
missing<-is.na(posns)
sum(missing)
meta[missing,"MarkerName"]
tcga<-tcga[posns,]
posns<-match(meta[,"MarkerName"], dis[,1])
missing<-is.na(posns)
sum(missing)
meta[missing,"MarkerName"]
dis<-dis[posns,]
dim(dis)
dim(tcga)
meta<-cbind(meta,tcga,dis)
meta[1:5,]
getwd()
write.table(meta,file="META.SAMPLE.SIZE.TRAIT.SKATO.p_0.01.SUMMARY.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2015/2015-08-14_AML_mixedAligners/Analysis/paper/Discovery_paper_final/FANC_complex.all.0.01.GENOTYPE.conponents..Burden.clusters_FINAL.coding.somatic.with.Indels.AOGC.0.01.FINAL.PCA.xlsx
/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2015/2015-08-14_AML_mixedAligners/Analysis/paper/Discovery_paper_final/EVERYTHING.GENOTYPE.conponents..Burden.clusters_FINAL.coding.somatic.with.Indels.AOGC.ALL.FINAL.PCA.txt
################## NO BENIGN
################## NO BENIGN
################## NO BENIGN
################## NO BENIGN
################## NO BENIGN
################## NO BENIGN
################## NO BENIGN
################## NO BENIGN
1 2 TCGA
476 102
1 2 DISCOVERY
197 89
Burden 0.001 stderr & size stderror reported
setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014/2014-11-04_AML_TCGA_Replication/Analysis/meta_analysis/")
files<-c("Burden.clusters.coding.0.001.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN_wSTRAT.txt","Burden.clusters.coding.0.001.all.geno.all.filters_no.imput_paper_wSTRAT.txt")
sizes<-c(102,89)
traits<-c("AML")
i<-1
system(paste("sed s/chip.TRAIT/",files[1],"/ run_config_for_P_and_BETA_TEMPLATE.txt > ",paste("sample.size.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.TRAIT/",files[2],"/ ",paste("sample.size.CONFIG",traits[i],"txt",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("sed s/chip.NMISS/","nmiss","/ ",paste("sample.size.CONFIG",traits[i],"txt1",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.NMISS/","nmiss","/ ",paste("sample.size.CONFIG",traits[i],"txt",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
paste("sample.size.CONFIG",traits[i],"txt1",sep=".")
system(paste("./metal ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("cp ","META.SAMPLE.SIZE.TRAIT1.tbl","META.SAMPLE.SIZE.TRAIT.WITH_BENIGN_MISSENSE_p_0.001.txt", sep=" "))
system(paste("sed s/chip.TRAIT/",files[1],"/ run_config_for_inverse_varience_TEMPLATE.txt > ",paste("STDERR.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.TRAIT/",files[2],"/ ",paste("STDERR.CONFIG",traits[i],"txt",sep=".")," > ",paste("STDERR.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("sed s/chip.NMISS/","nmiss","/ ",paste("STDERR.CONFIG",traits[i],"txt1",sep=".")," > ",paste("STDERR.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.NMISS/","nmiss","/ ",paste("STDERR.CONFIG",traits[i],"txt",sep=".")," > ",paste("STDERR.CONFIG",traits[i],"txt1",sep="."),sep=""))
paste("STDERR.CONFIG",traits[i],"txt1",sep=".")
system(paste("./metal ",paste("STDERR.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("cp ","META.STDERR.TRAIT1.tbl","META.STDERR.TRAIT.WITH_BENIGN_MISSENSE_p_0.001.txt", sep=" "))
tcga<-read.delim(files[1],header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
dis<-read.delim(files[2],header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
meta<-read.delim("META.STDERR.TRAIT.WITH_BENIGN_MISSENSE_p_0.001.txt",header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
tcga[1:5,]
dis[1:5,]
meta[1:5,]
colnames(tcga)<-paste(colnames(tcga),"TCGA",sep=".")
colnames(dis)<-paste(colnames(dis),"DIS",sep=".")
posns<-match(meta[,"MarkerName"], tcga[,1])
missing<-is.na(posns)
sum(missing)
meta[missing,"MarkerName"]
tcga<-tcga[posns,]
posns<-match(meta[,"MarkerName"], dis[,1])
missing<-is.na(posns)
sum(missing)
meta[missing,"MarkerName"]
dis<-dis[posns,]
dim(dis)
dim(tcga)
meta<-cbind(meta,tcga,dis)
meta[1:5,]
getwd()
write.table(meta,file="META.STDERR.TRAIT.WITH_BENIGN_MISSENSE_p_0.001.SUMMARY.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
########################
Burden 0.01 stderr & size , std error reported
setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014/2014-11-04_AML_TCGA_Replication/Analysis/meta_analysis/")
files<-c("Burden.clusters.coding.0.01.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN_wSTRAT.txt","Burden.clusters.coding.0.01.all.geno.all.filters_no.imput_paper_wSTRAT.txt")
sizes<-c(102,89)
traits<-c("AML")
i<-1
system(paste("sed s/chip.TRAIT/",files[1],"/ run_config_for_P_and_BETA_TEMPLATE.txt > ",paste("sample.size.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.TRAIT/",files[2],"/ ",paste("sample.size.CONFIG",traits[i],"txt",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("sed s/chip.NMISS/","nmiss","/ ",paste("sample.size.CONFIG",traits[i],"txt1",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.NMISS/","nmiss","/ ",paste("sample.size.CONFIG",traits[i],"txt",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
paste("sample.size.CONFIG",traits[i],"txt1",sep=".")
system(paste("./metal ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("cp ","META.SAMPLE.SIZE.TRAIT1.tbl","META.SAMPLE.SIZE.TRAIT.WITH_BENIGN_MISSENSE_p_0.01.txt", sep=" "))
system(paste("sed s/chip.TRAIT/",files[1],"/ run_config_for_inverse_varience_TEMPLATE.txt > ",paste("STDERR.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.TRAIT/",files[2],"/ ",paste("STDERR.CONFIG",traits[i],"txt",sep=".")," > ",paste("STDERR.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("sed s/chip.NMISS/","nmiss","/ ",paste("STDERR.CONFIG",traits[i],"txt1",sep=".")," > ",paste("STDERR.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.NMISS/","nmiss","/ ",paste("STDERR.CONFIG",traits[i],"txt",sep=".")," > ",paste("STDERR.CONFIG",traits[i],"txt1",sep="."),sep=""))
paste("STDERR.CONFIG",traits[i],"txt1",sep=".")
system(paste("./metal ",paste("STDERR.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("cp ","META.STDERR.TRAIT1.tbl","META.STDERR.TRAIT.WITH_BENIGN_MISSENSE_p_0.01.txt", sep=" "))
tcga<-read.delim(files[1],header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
dis<-read.delim(files[2],header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
meta<-read.delim("META.STDERR.TRAIT.WITH_BENIGN_MISSENSE_p_0.01.txt",header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
tcga[1:5,]
dis[1:5,]
meta[1:5,]
colnames(tcga)<-paste(colnames(tcga),"TCGA",sep=".")
colnames(dis)<-paste(colnames(dis),"DIS",sep=".")
posns<-match(meta[,"MarkerName"], tcga[,1])
missing<-is.na(posns)
sum(missing)
meta[missing,"MarkerName"]
tcga<-tcga[posns,]
posns<-match(meta[,"MarkerName"], dis[,1])
missing<-is.na(posns)
sum(missing)
meta[missing,"MarkerName"]
dis<-dis[posns,]
dim(dis)
dim(tcga)
meta<-cbind(meta,tcga,dis)
meta[1:5,]
getwd()
write.table(meta,file="META.STDERR.TRAIT.WITH_BENIGN_MISSENSE_p_0.01.SUMMARY.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
##########################################################################
SKAT 0.001 size , size reported
setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014/2014-11-04_AML_TCGA_Replication/Analysis/meta_analysis/")
files<-c("SkatO.clusters.coding.0.001.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN_wSTRAT.txt","SkatO.clusters.coding.0.001.all.geno.all.filters_no.imput_paper_wSTRAT.txt")
sizes<-c(102,89)
i<-1
system(paste("sed s/chip.TRAIT/",files[1],"/ run_config_for_P_and_BETA_TEMPLATE.txt > ",paste("sample.size.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.TRAIT/",files[2],"/ ",paste("sample.size.CONFIG",traits[i],"txt",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("sed s/chip.NMISS/","nmiss","/ ",paste("sample.size.CONFIG",traits[i],"txt1",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.NMISS/","nmiss","/ ",paste("sample.size.CONFIG",traits[i],"txt",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
paste("sample.size.CONFIG",traits[i],"txt1",sep=".")
system(paste("./metal ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("cp ","META.SAMPLE.SIZE.TRAIT1.tbl","META.SAMPLE.SIZE.TRAIT.SKATO.WITH_BENIGN_MISSENSE_p_0.001.txt", sep=" "))
tcga<-read.delim(files[1],header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
dis<-read.delim(files[2],header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
meta<-read.delim("META.SAMPLE.SIZE.TRAIT.SKATO.WITH_BENIGN_MISSENSE_p_0.001.txt",header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
tcga[1:5,]
dis[1:5,]
meta[1:5,]
colnames(tcga)<-paste(colnames(tcga),"TCGA",sep=".")
colnames(dis)<-paste(colnames(dis),"DIS",sep=".")
posns<-match(meta[,"MarkerName"], tcga[,1])
missing<-is.na(posns)
sum(missing)
meta[missing,"MarkerName"]
tcga<-tcga[posns,]
posns<-match(meta[,"MarkerName"], dis[,1])
missing<-is.na(posns)
sum(missing)
meta[missing,"MarkerName"]
dis<-dis[posns,]
dim(dis)
dim(tcga)
meta<-cbind(meta,tcga,dis)
meta[1:5,]
getwd()
write.table(meta,file="META.SAMPLE.SIZE.TRAIT.SKATO.WITH_BENIGN_MISSENSE_p_0.001.SUMMARY.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
##########################################################################
SKAT 0.01 size , size reported
setwd("/media/UQCCG/Sequencing/Data/Sequence_Genotypes/2014/2014-11-04_AML_TCGA_Replication/Analysis/meta_analysis/")
files<-c("SkatO.clusters.coding.0.01.all.geno.all.filters_no.imput_paper_TCGA_REP_CLEAN_wSTRAT.txt","SkatO.clusters.coding.0.01.all.geno.all.filters_no.imput_paper_wSTRAT.txt")
sizes<-c(102,89)
i<-1
system(paste("sed s/chip.TRAIT/",files[1],"/ run_config_for_P_and_BETA_TEMPLATE.txt > ",paste("sample.size.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.TRAIT/",files[2],"/ ",paste("sample.size.CONFIG",traits[i],"txt",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("sed s/chip.NMISS/","nmiss","/ ",paste("sample.size.CONFIG",traits[i],"txt1",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/seq.NMISS/","nmiss","/ ",paste("sample.size.CONFIG",traits[i],"txt",sep=".")," > ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
paste("sample.size.CONFIG",traits[i],"txt1",sep=".")
system(paste("./metal ",paste("sample.size.CONFIG",traits[i],"txt1",sep="."),sep=""))
system(paste("cp ","META.SAMPLE.SIZE.TRAIT1.tbl","META.SAMPLE.SIZE.TRAIT.SKATO.WITH_BENIGN_MISSENSE_p_0.01.txt", sep=" "))
tcga<-read.delim(files[1],header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
dis<-read.delim(files[2],header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
meta<-read.delim("META.SAMPLE.SIZE.TRAIT.SKATO.WITH_BENIGN_MISSENSE_p_0.01.txt",header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
tcga[1:5,]
dis[1:5,]
meta[1:5,]
colnames(tcga)<-paste(colnames(tcga),"TCGA",sep=".")
colnames(dis)<-paste(colnames(dis),"DIS",sep=".")
posns<-match(meta[,"MarkerName"], tcga[,1])
missing<-is.na(posns)
sum(missing)
meta[missing,"MarkerName"]
tcga<-tcga[posns,]
posns<-match(meta[,"MarkerName"], dis[,1])
missing<-is.na(posns)
sum(missing)
meta[missing,"MarkerName"]
dis<-dis[posns,]
dim(dis)
dim(tcga)
meta<-cbind(meta,tcga,dis)
meta[1:5,]
getwd()
write.table(meta,file="META.SAMPLE.SIZE.TRAIT.SKATO.WITH_BENIGN_MISSENSE_p_0.01.SUMMARY.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
######################################################
######################################################
######################################################
######################################################
######################################################
######################################################
write.table(bad.samples,file="excluded_samples_TCGA.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
write.table(bad.samples,file="excluded_samples_DISCOVERY.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
system(paste("sed s/TRAIT/common.",traits[i],"/ run_config_for_P_and_BETA_TEMPLATE.txt > ",paste("sample.size.CONFIG.common",traits[i],"txt",sep="."),sep=""))
system(paste("./metal ",paste("sample.size.CONFIG.common",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/TRAIT/",traits[i],"/ run_config_for_inverse_varience_TEMPLATE.txt > ",paste("STDERR.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("./metal ",paste("STDERR.CONFIG",traits[i],"txt",sep="."),sep=""))
system(paste("sed s/TRAIT/common.",traits[i],"/ run_config_for_inverse_varience_TEMPLATE.txt > ",paste("STDERR.CONFIG.common",traits[i],"txt",sep="."),sep=""))
system(paste("./metal ",paste("STDERR.CONFIG.common",traits[i],"txt",sep="."),sep=""))
####################################
chk<-out[,"refGene::location"]=="synonymous SNV"
chk2<-out[,"Consequence.Embl"]=="missense_variant"
sum(chk & chk2)
sort(table(out[chk,"Consequence.Embl"]))
out[chk & chk2,1:25][1:5,]
out
out.idh1<-out #"chr2:209113112:209113112:C:T:snp"
write.table(out,file="fanc-acid.summary.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
write.table(out,file="fanc-acid.4 missing.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
write.table(out,file="fanc-acid.0.001-2.protein.summary.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
write.table(out,file="clinical.0.001.protein.summary.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
out[1:5,]
meta.results.burden.ex[ meta.results.burden.ex[,"gene"] %in% remove.repeats,]
## #meta.results.skatO.ex<-skatOMeta(cohort.seq.ex,burden.wts =1,SNPInfo = snpinfo)
grep(TRUE,diff)[1:10]
target<-c("chr1:2116899:2116899:A:G:snp","chr1:2121211:2121211:C:T:snp","chr15:89803883:89803883:T:C:snp","chr14:45652935:45652935:A:G:snp")
target<-"chr19:33465099:33465099:C:T:snp"
target<-"chr11:130059563:130059563:G:A:snp"
target<-grep("chr2:209113113:209113113:G",key)
target
#target<-key[diff][83:84] # target<-key[test]
target<-key[target]
target<-key[figure]
pass<- full.qual & bad.coding & maf.filter & !in.common.hit.gene & !on.x.y & !unannotated.hits & not.flat.genotype & !are.repeats &
( ok.missing.filt | is.unwound.geno) & hw.controls.ok.filt & !no.genotypes.filt & !are.in.repeats & rare.in.controls.filt & rare.in.group
a.indel[target,1:40]
out<-cbind(a.indel[target,1:40],summary.geno.extra[target,c("GENO.AML","GENO.Control","GENO.AML.filt","GENO.Control.filt")])
help[target,]
pass[target]
ok.missing.filt[target]
maf.filter[target]
high.missing[target,]
chk<-gsub(".GT$",".AD",the.samples[1:96])
chk.GT<-the.samples[1:96]
chk<-gsub(".GT$",".AD",the.samples)
chk.GT<-the.samples
a.indel[target,chk]
a.indel[target,chk.GT]
fil.genotypes[target,chk.GT]
summary.geno.extra[target,paste("GENO",c("AML","Control","AML.filt","Control.filt"),sep=".")]
hw.p.control.filt[target]
AML Control AML.filt Control.filt
chr2:209113113:209113113:G:A:snp:209113113:flat 0 0 0.5000000 0.29
chr2:209113113:209113113:G:A:snp:209113113 0 0 0.5104167 0.29
chr2:209113113:209113113:G:T:snp:209113113 0 0 0.5312500 0.29
## ## tapply(a.indel[pass,"Consequence.Embl"],a.indel[pass,"Consequence.Embl"],length)
## ## ## dbeta(x, shape1, shape2, ncp = 0, log = FALSE)
## ## ## shape1, shape2: positive parameters of the Beta distribution.
## ## ## ncp: non-centrality parameter.
extra.out<-c("FILTER","TYPE","MAF.lt:0.5","Consequence.Embl","wanted.muts","wanted.muts.coding",unique(global.quality.labs)[unique(global.quality.labs) %in% colnames(a.indel)],"Hetero.ALT.reads","Hetero.REF.reads","Hetero.Read.Balance","culprit")
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
1042 WT1 0.00007100568909052773730 0.94444308 0.2377219 0.13764770 0.13764770 9 9 0
1807 MLL 0.48467366086356300503013 -0.32851222 0.4701054 0.03378378 0.03378378 16 16 0
2187 KRAS 0.07033093737069703865750 1.59454406 0.8810739 0.01013514 0.01013514 5 5 0
3033 FLT3 0.00000000000005425888317 2.38660803 0.3173157 0.06925676 0.06925676 10 10 0
4420 IDH2 0.00000001305450061629625 2.32224967 0.4084620 0.04729730 0.04729730 5 5 0
5478 TP53 0.00004001465115838068794 1.99534188 0.4857925 0.02367167 0.02367167 11 11 0
11171 DNMT3A 0.00000000000019795761198 2.24740471 0.3057624 0.07794105 0.07794105 30 30 0
13567 KIT 0.01521559788883776638546 1.32404414 0.5455013 0.02370054 0.02370054 10 10 0
13787 TET2 0.00000047126510932962245 1.12239340 0.2228007 0.12718477 0.12718477 52 52 0
14788 NPM1 0.00000000000000003337779 1.69694129 0.2012002 0.10979730 0.10979730 6 6 0
17452 JAK2 0.84085058870232365357822 0.04984451 0.2482227 0.15168108 0.15168108 12 12 0
>
5697 TTC19 0.00000000000000000001958964 -1.8425116 0.19887755 0.29245955 0.29245955 6 6 0
15152 NPM1 0.00000000000000000960301848 0.7070687 0.08242233 0.26520270 0.26520270 15 15 0
13394 PFN2 0.00000000000000026646051843 1.9890686 0.24293636 0.12668919 0.12668919 6 5 0
3103 FLT3 0.00000000000005425888316660 2.3866080 0.31731574 0.06925676 0.06925676 10 10 0
11441 DNMT3A 0.00000000000019795761198317 2.2474047 0.30576235 0.07794105 0.07794105 30 30 0
15162 LOC100268168 0.00000000002476538050455725 1.2574656 0.18839142 0.32075104 0.32075104 7 7 0
2875 KSR2 0.00000000003062675194293715 -0.9080621 0.13668377 0.41430453 0.41430453 14 14 0
4910 GGA2 0.00000000010816520700968617 0.4888360 0.07572894 1.01858108 1.01858108 15 15 0
11254 PACSIN2 0.00000000021489113624525686 2.2058416 0.34736005 0.06624158 0.06624158 9 9 0
9451 CD1A 0.00000000127822168012818693 1.9147310 0.31543594 0.09819093 0.09819093 6 6 0
5429 ENSG00000269323 0.00000000390495998358280713 -1.3197635 0.22413815 0.13682432 0.13682432 1 1 0
4536 IDH2 0.00000001305450061629624858 2.3222497 0.40846204 0.04729730 0.04729730 5 5 0
16893 ATXN7L1 0.00000002337016933282044505 0.5516432 0.09877230 0.46168151 0.46168151 20 20 0
721 LOC728407 0.00000013493389842786308576 1.5188057 0.28808826 0.49166667 0.49166667 1 1 0
4800 LOC440335 0.00000017189409453387654263 1.6801551 0.32141208 0.08992448 0.08992448 2 2 0
10638 EEF1A2 0.00000030640515080703841849 1.2747271 0.24899620 0.18978953 0.18978953 2 2 0
209 PARG 0.00000070845983409388752649 1.5324555 0.30902322 0.47445532 0.47445532 3 3 0
3520 STXBP6 0.00000077629917146060417535 1.7819896 0.36063660 0.06460484 0.06460484 5 5 0
14512 NIPBL 0.00000105409135833204367882 1.4049795 0.28783122 0.09060888 0.09060888 16 16 0
17242 KMT2C 0.00000196638747493748888254 -0.9783022 0.20566185 0.26116071 0.26116071 2 2 0
2222 LDHB 0.00000202648349448258055131 -0.6908779 0.14542455 0.37440685 0.37440685 4 4 0
13836 KLF3 0.00000366262309136746226783 1.3769195 0.29741251 0.10642464 0.10642464 6 6 0
3025 ENSR00000430498 0.00000889885831862108461743 1.1566910 0.26037916 0.15593220 0.15593220 1 1 0
2712 FGD6 0.00000989160569885710713636 0.3892227 0.08806881 0.38446519 0.38446519 16 16 0
4123 INO80 0.00001022417375355785278319 1.1280452 0.25565475 0.11323372 0.11323372 17 17 0
12297 IDH1 0.00001125814375019362615672 1.5378205 0.35018271 0.03885135 0.03885135 7 7 0
16833 TSC22D4 0.00001337573547972480456471 1.8395756 0.42251542 0.04641653 0.04641653 5 5 0
540 WBP1L 0.00001442643020075763023451 -1.1895626 0.27426537 0.09799027 0.09799027 8 8 0
16583 OGDH 0.00001894088326779406772924 1.0058254 0.23516949 0.16048446 0.16048446 10 10 0
2062 CD27 0.00002418586493523308856993 -1.6657034 0.39450463 0.05574324 0.05574324 2 2 0
6907 CNN2 0.00003705952582898055575793 -0.9727442 0.23581183 0.09631838 0.09631838 7 7 0
5616 TP53 0.00004001465115838068794391 1.9953419 0.48579254 0.02367167 0.02367167 11 11 0
3191 ESD 0.00004143513725315481078517 0.8959166 0.21855207 0.20439189 0.20439189 4 4 0
12767 CTNNB1 0.00004377859525646177034103 0.7841088 0.19187427 0.25210448 0.25210448 11 11 0
8156 ZNF8 0.00004839096663123216608563 -1.1702579 0.28800923 0.10472973 0.10472973 6 6 0
18290 PTPN3 0.00004851169760004435047018 -0.7725056 0.19014662 0.47316903 0.47316903 8 8 0
6710 GAREM 0.00005049050955042321724500 1.0778286 0.26591091 0.54560811 0.54560811 10 10 0
10023 HNRNPU 0.00005088402190605824181686 0.6647065 0.16406313 0.29325553 0.29325553 10 10 0
13664 CCDC58 0.00005148194684455684465340 1.4406924 0.35583237 0.07094595 0.07094595 3 3 0
18168 SHC3 0.00005324718275289896197771 -0.8552526 0.21164920 0.17059658 0.17059658 4 4 0
1063 WT1 0.00007100568909052773730432 0.9444431 0.23772190 0.13764770 0.13764770 9 9 0
14396 WDFY3-AS1 0.00007448685883887118689903 1.3467120 0.33995181 0.07993197 0.07993197 1 1 0
4010 NEDD8 0.00007814162348186583608497 0.8274176 0.20947125 0.21428571 0.21428571 3 3 0
16229 TULP4 0.00008986033811572452945776 0.5485538 0.14006383 0.43248993 0.43248993 23 23 0
1972 ENSR00000558810 0.00009000456176326984095593 1.8173676 0.46407959 0.03938356 0.03938356 1 1 0
15987 PRDM1 0.00009393036056275626418353 0.3916096 0.10026469 0.82994055 0.82994055 18 18 0
15365 ATXN1 0.00009444772816356851680240 0.1766385 0.04524054 2.69360977 2.69360977 41 41 0
6679 C18orf8 0.00011118710945817823006372 3.1580161 0.81712596 0.01182432 0.01182432 6 6 0
12614 EMC3 0.00011295421737071749813779 0.9412806 0.24379606 0.28210032 0.28210032 4 4 0
14906 SLC23A1 0.00011398259503656368992960 3.1532978 0.81718759 0.01186484 0.01186484 8 7 0
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
1016 FANCF 0.75904796 0.2957806 0.9642988 0.009504010 0.009504010 2 2 0
3495 FANCM 0.39824998 0.3771907 0.4465111 0.042229730 0.042229730 16 16 0
4404 FANCI 0.66334497 -0.2261604 0.5195511 0.030405405 0.030405405 12 12 0
5265 FANCA 0.40425225 -0.3841600 0.4605953 0.035472973 0.035472973 17 17 0
6362 C17orf70 0.05315640 1.2173415 0.6295569 0.020461234 0.020461234 10 10 0
6385 STRA13 0.30527330 -0.9827900 0.9586413 0.005669328 0.005669328 2 2 0
7280 C19orf40 0.08503474 -1.2392754 0.7195919 0.013183442 0.013183442 2 2 0
8073 C1orf86 0.57068855 0.2946712 0.5196677 0.030775238 0.030775238 7 7 0
11359 FANCL 0.17286745 0.6546883 0.4803121 0.032094595 0.032094595 9 9 0
12317 FANCD2 0.39288470 -0.5871702 0.6872329 0.016891892 0.016891892 7 7 0
15277 FANCE 0.48768622 -1.4850169 2.1398136 0.001689189 0.001689189 1 1 0
17575 FANCG 0.32554696 -1.4900680 1.5156478 0.003378378 0.003378378 2 2 0
17737 FANCC 0.65107059 0.1894990 0.4189902 0.041252909 0.041252909 6 6 0
################################### protein 0.01
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
13492 NPM1 0.000000000000000004666874 1.0474066 0.1209304 0.179054054 0.179054054 8 8 0
2743 FLT3 0.000000000000158669842946 2.4365276 0.3301671 0.065878378 0.065878378 8 8 0
10150 DNMT3A 0.000000000000626740759891 2.2837670 0.3174308 0.069471967 0.069471967 25 25 0
3983 IDH2 0.000000013054500616296249 2.3222497 0.4084620 0.047297297 0.047297297 5 5 0
12494 TET2 0.000016213366527191842669 1.2089637 0.2804029 0.070957437 0.070957437 40 40 0
4953 TP53 0.000108295608479553532970 1.9264148 0.4976258 0.021982481 0.021982481 10 10 0
12295 KIT 0.070330937370696955390770 1.5945441 0.8810739 0.010135135 0.010135135 5 5 0
936 WT1 0.070330937370697080290860 1.5945441 0.8810739 0.010135135 0.010135135 5 5 0
1989 KRAS 0.202970214166854234782988 1.5782177 1.2396312 0.005067568 0.005067568 2 2 0
15772 JAK2 0.506363538396016887865869 -0.4805368 0.7231439 0.015202703 0.015202703 8 8 0
1633 MLL 0.732588605446840124280072 -0.2056990 0.6020149 0.018581081 0.018581081 11 11 0
################################### protein 0.01
10317 FANCL 0.0004122055 1.6604799 0.4701054 0.033783784 0.033783784 8 8 0
5744 C17orf70 0.0531564049 1.2173415 0.6295569 0.020461234 0.020461234 10 10 0
5766 STRA13 0.3052732963 -0.9827900 0.9586413 0.005669328 0.005669328 2 2 0
15879 FANCG 0.3255469610 -1.4900680 1.5156478 0.003378378 0.003378378 2 2 0
11180 FANCD2 0.3928846996 -0.5871702 0.6872329 0.016891892 0.016891892 7 7 0
3157 FANCM 0.3982499824 0.3771907 0.4465111 0.042229730 0.042229730 16 16 0
4761 FANCA 0.4042522470 -0.3841600 0.4605953 0.035472973 0.035472973 17 17 0
916 FANCF 0.4498418852 0.8126484 1.0753889 0.006756757 0.006756757 1 1 0
6601 C19orf40 0.4876862217 -1.4850169 2.1398136 0.001689189 0.001689189 1 1 0
13812 FANCE 0.4876862217 -1.4850169 2.1398136 0.001689189 0.001689189 1 1 0
16019 FANCC 0.6244209596 -0.2771562 0.5660935 0.025337838 0.025337838 4 4 0
7354 C1orf86 0.6734397267 0.2314917 0.5492980 0.027254067 0.027254067 5 5 0
3970 FANCI 0.7840547028 -0.1462422 0.5336548 0.028716216 0.028716216 11 11 0
################################### protein 0.001
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
11649 NPM1 0.000000000000000004666874 1.0474066 0.1209304 0.179054054 0.179054054 8 8 0
2369 FLT3 0.000000000000000063908810 3.2306785 0.3865474 0.054054054 0.054054054 6 6 0
8721 DNMT3A 0.000000000000552230211637 2.3671831 0.3282382 0.062650013 0.062650013 21 21 0
3420 IDH2 0.000000000023130646061980 3.3067633 0.4946721 0.033783784 0.033783784 2 2 0
10766 TET2 0.000009553518252604718656 1.3067709 0.2951794 0.062511491 0.062511491 36 36 0
4265 TP53 0.000748277501256778697532 1.7680941 0.5244624 0.018604103 0.018604103 9 9 0
800 WT1 0.003655478822618108813297 3.1255708 1.0753889 0.006756757 0.006756757 4 4 0
10603 KIT 0.011978874707657898024404 3.1149033 1.2396312 0.005067568 0.005067568 3 3 0
1717 KRAS 0.202970214166854234782988 1.5782177 1.2396312 0.005067568 0.005067568 2 2 0
13607 JAK2 0.715431886461324495485314 0.3512669 0.9635083 0.008445946 0.008445946 5 5 0
1401 MLL 0.781879288747886591615099 0.1890884 0.6829481 0.013513514 0.013513514 8 8 0
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
8874 FANCL 0.002711277 1.54750263 0.5160566 0.027027027 0.027027027 5 5 0
3408 FANCI 0.281888405 0.82393519 0.7656772 0.013513514 0.013513514 8 8 0
4950 STRA13 0.305273296 -0.98279000 0.9586413 0.005669328 0.005669328 2 2 0
13700 FANCG 0.325546961 -1.49006803 1.5156478 0.003378378 0.003378378 2 2 0
6349 C1orf86 0.401499884 0.49081068 0.5850328 0.023781175 0.023781175 3 3 0
2725 FANCM 0.434345506 0.56533746 0.7231439 0.015202703 0.015202703 8 8 0
11926 FANCE 0.487686222 -1.48501695 2.1398136 0.001689189 0.001689189 1 1 0
14150 FANCC 0.487686222 -1.48501695 2.1398136 0.001689189 0.001689189 1 1 0
4094 FANCA 0.648916787 -0.34858796 0.7656772 0.013513514 0.013513514 7 7 0
9624 FANCD2 0.973273037 0.04153204 1.2396312 0.005067568 0.005067568 3 3 0
4930 C17orf70 0.976555202 0.03643125 1.2396673 0.005084863 0.005084863 3 3 0
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
546 Clinical 0.0000000000000000000000000000004888122 0.8565784 0.07393617 0.4495089 0.4495089 104 104 0
908 FANC - ACID 0.0867467126344465128129357367470220197 0.4301199 0.25111881 0.1156164 0.1156164 43 43 0
>
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
11649 NPM1 0.000000000000000004666874 1.0474066 0.1209304 0.17905405 0.17905405 8 8 0
2369 FLT3 0.000000000000000063908810 3.2306785 0.3865474 0.05405405 0.05405405 6 6 0
8721 DNMT3A 0.000000000000552230211637 2.3671831 0.3282382 0.06265001 0.06265001 21 21 0
3420 IDH2 0.000000000023130646061980 3.3067633 0.4946721 0.03378378 0.03378378 2 2 0
11550 LOC100268168 0.000000000023533911748733 1.2857857 0.1924188 0.31399428 0.31399428 3 3 0
4127 ENSG00000269323 0.000000003904959983582807 -1.3197635 0.2241381 0.13682432 0.13682432 1 1 0
12234 SERINC1 0.000000094211826775019504 1.8572919 0.3479672 0.12785741 0.12785741 2 2 0
544 LOC728407 0.000000134933898427863086 1.5188057 0.2880883 0.49166667 0.49166667 1 1 0
3636 LOC440335 0.000000171894094533876543 1.6801551 0.3214121 0.08992448 0.08992448 2 2 0
9362 IDH1 0.000000331598514265690453 3.2136150 0.6295600 0.02027027 0.02027027 3 3 0
4276 PER1 0.000000692890021435440133 1.7087936 0.3442826 0.08116412 0.08116412 6 6 0
8107 EEF1A2 0.000001137775237446866202 1.2327350 0.2533272 0.18641115 0.18641115 1 1 0
13094 KMT2C 0.000001966387474937488883 -0.9783022 0.2056618 0.26116071 0.26116071 2 2 0
10766 TET2 0.000009553518252604718656 1.3067709 0.2951794 0.06251149 0.06251149 36 36 0
8299 PCNT 0.000011442460687260778087 1.1341969 0.2584800 0.13554180 0.13554180 12 12 0
1952 LRP1 0.000034034260333051306034 0.8104806 0.1955489 0.22651456 0.22651456 9 9 0
7999 UBE2C 0.000041498170035095508933 1.9445022 0.4743873 0.03914591 0.03914591 1 1 0
10813 KIAA1109 0.000047255810233395838932 2.2193480 0.5454551 0.02364865 0.02364865 13 13 0
8207 RUNX1 0.000047326938703021625054 1.5047527 0.3698590 0.03909016 0.03909016 15 15 0
7302 DDR2 0.000058857795100619874381 -1.6531321 0.4114988 0.05084746 0.05084746 2 2 0
11153 BDP1 0.000061989348153295098726 0.9370378 0.2339607 0.20975925 0.20975925 9 9 0
7023 NRAS 0.000074914710439908353693 2.5831005 0.6522797 0.01525424 0.01525424 8 8 0
3412 KIF7 0.000100898997892848101368 1.2854867 0.3305936 0.09599800 0.09599800 8 8 0
7082 ADAMTSL4 0.000101601001243716289693 1.2695473 0.3266358 0.08385783 0.08385783 7 7 0
7934 CEP250 0.000111187109458178230064 3.1580161 0.8171260 0.01182432 0.01182432 7 7 0
9519 ANKMY1 0.000111187109458178230064 3.1580161 0.8171260 0.01182432 0.01182432 7 7 0
10803 USP53 0.000111187109458178230064 3.1580161 0.8171260 0.01182432 0.01182432 7 7 0
1707 LDHB 0.000116389200518012228438 -0.9555849 0.2479714 0.21114865 0.21114865 2 2 0
6797 CDCP2 0.000122802383916957911060 0.9933946 0.2586653 0.35780486 0.35780486 5 5 0
7359 ASTN1 0.000133839078173962654824 1.9595385 0.5130642 0.02372881 0.02372881 13 13 0
9623 EMC3 0.000182299524791116322091 0.9236984 0.2468224 0.28040541 0.28040541 3 3 0
2504 FARP1 0.000191424749198074471484 1.3362401 0.3582344 0.05514385 0.05514385 5 5 0
12470 TNRC18 0.000234083870525570765389 0.5140810 0.1397311 0.36964260 0.36964260 13 13 0
8218 TTC3 0.000247927027443518377750 1.1483377 0.3133774 0.09534780 0.09534780 10 10 0
14094 NOTCH1 0.000249298429951354749094 1.2113617 0.3307039 0.06834617 0.06834617 11 11 0
7977 GDAP1L1 0.000249478395604726900427 -1.2256653 0.3346257 0.07263514 0.07263514 1 1 0
11848 GPANK1 0.000314055267712844826938 1.2264781 0.3403649 0.07506803 0.07506803 5 5 0
12779 MCM7 0.000354372117981734016494 3.1471264 0.8810739 0.01013514 0.01013514 5 5 0
2037 C12orf50 0.000354372117981734287544 3.1471264 0.8810739 0.01013514 0.01013514 5 5 0
5693 SLC7A9 0.000354372117981735100696 3.1471264 0.8810739 0.01013514 0.01013514 6 6 0
10541 ARAP2 0.000354372117981735100696 3.1471264 0.8810739 0.01013514 0.01013514 6 6 0
11347 SLC23A1 0.000356907628275577887972 3.1455490 0.8810928 0.01014663 0.01014663 6 6 0
217 SPOCK2 0.000358679910400708907257 -0.9409022 0.2636499 0.12325957 0.12325957 3 3 0
9460 SPATA3 0.000366044773861016385728 3.1398917 0.8811439 0.01018702 0.01018702 5 5 0
3974 FUK 0.000368341828536694387754 1.7763957 0.4987375 0.02676248 0.02676248 5 5 0
13467 FER1L6 0.000461698838858031076170 2.1220873 0.6059563 0.02198244 0.02198244 12 12 0
5289 TLE6 0.000473051272974335946641 -1.4972329 0.4283230 0.04250552 0.04250552 3 3 0
3516 METRN 0.000488996172629530792726 0.6917143 0.1983858 0.24723979 0.24723979 5 5 0
5347 LONP1 0.000598160234910257077902 2.2889379 0.6668530 0.15473912 0.15473912 8 8 0
445 TCF7L2 0.000617325306994607092732 1.3700787 0.4001531 0.04918521 0.04918521 4 4 0
>
> meta.results.skatO.gene[1:50,]
gene p pmin rho cmaf nmiss nsnps errflag
13492 NPM1 0.00000000000000006231578 0.000000000000000 0 0.17905405 0 8 3
10150 DNMT3A 0.00000000063524399003710 -0.000000011481598 0 0.06947197 0 25 3
2743 FLT3 0.00000000071406168256950 0.000000000000000 0 0.06587838 0 8 3
3983 IDH2 0.00000000185436090349299 -0.000000001299192 0 0.04729730 0 5 3
4790 ENSG00000269323 0.00000000390495929759913 0.000000003904959 0 0.13682432 0 1 0
10898 IDH1 0.00000003199432113946245 0.000000336874034 1 0.02027027 0 3 0
14171 SERINC1 0.00000005535758817594879 0.000000090207034 1 0.12785741 0 2 0
4222 LOC440335 0.00000007760763769242383 0.000000169958773 1 0.08992448 0 2 0
641 LOC728407 0.00000013493379736382203 0.000000134933797 0 0.49166667 0 1 0
9443 EEF1A2 0.00000113777580490786562 0.000001137775805 0 0.18641115 0 1 0
1745 ST14 0.00000611062893059399721 0.000000774375733 0 0.09877338 0 6 0
8506 DDR2 0.00000635028110935179648 0.000041332534274 1 0.05593220 0 3 0
15171 KMT2C 0.00001624214541258868269 0.000001964479786 1 0.26116071 0 2 0
12494 TET2 0.00001784037973307827301 0.000016213028713 1 0.07095744 0 40 0
7888 CDCP2 0.00002905436713756232202 0.000004173148585 0 0.35950554 0 6 0
8073 RNPC3 0.00003485771459188366722 0.000064092765796 0 0.26271186 0 7 0
9318 UBE2C 0.00004149812741518308111 0.000041498127415 0 0.03914591 0 1 0
9554 RUNX1 0.00006414818616932162719 0.000047326710961 1 0.03909016 0 15 0
4967 PER1 0.00007598954113991147950 0.000076821033159 1 0.12576632 0 17 0
7113 ZNF880 0.00009357194667626277389 0.000083158238226 0 0.23370192 0 7 0
1977 LDHB 0.00011677481253043246702 0.000116389126280 1 0.21114865 0 2 0
8160 NRAS 0.00014215324891966822228 0.000074914501136 1 0.01525424 0 8 0
4953 TP53 0.00014976007670256414337 0.000108295491227 1 0.02198248 0 10 0
16350 NOTCH1 0.00015788371991295785304 0.000064771648038 0 0.09234495 0 17 0
9284 GDAP1L1 0.00021973013925981576796 0.000184702254448 1 0.07432432 0 2 0
6141 TLE6 0.00024465429727435966190 0.000189855323963 1 0.04757308 0 5 0
12108 FGFRL1 0.00030240431511864730848 0.000266572893705 0 0.26207047 0 5 0
11178 EMC3 0.00037495772538978234785 0.000182299540965 1 0.28040541 0 3 0
12936 BDP1 0.00039352602231099734166 0.000180463974624 1 0.24016465 0 18 0
14809 MCM7 0.00048124778478639620993 0.000111186970999 1 0.01182432 0 6 0
7996 COL24A1 0.00055122382437280048259 0.000286734556616 0 0.15423729 0 14 0
2245 NABP2 0.00067773351061196007369 0.000677733510612 0 0.11718750 0 1 0
8243 ANXA9 0.00069925444700109397334 0.000362147710139 1 0.01016949 0 4 0
265 SPOCK2 0.00070550090794654462655 0.000358679931356 1 0.12325957 0 3 0
10317 FANCL 0.00078542350021463982861 0.000412205725316 1 0.03378378 0 8 0
11930 LRRIQ4 0.00078647783891075081532 0.000731331535248 1 0.03716216 0 4 0
2271 LRP1 0.00088440249666421849421 0.000468516330787 1 0.27077381 0 23 0
2040 ADAMTS20 0.00099548907401215600520 0.000629621878342 1 0.15371622 0 13 0
9661 PCNT 0.00103788866818594455697 0.000523955889346 1 0.19473237 0 30 0
12996 ATP6AP1L 0.00109459889658832312809 0.000238452879667 1 0.01520270 0 4 0
11140 CHL1 0.00111354688852394233603 0.000776550554834 1 0.06418919 0 6 0
2874 COMMD6 0.00116585873464020574157 0.001165858734640 0 0.05323194 0 1 0
7037 VRK3 0.00119730526807206960442 0.001073935077417 1 0.01858108 0 7 0
2627 ATP6V0A2 0.00120585642429437236468 0.001079285181628 1 0.01689189 0 5 0
8234 ADAMTSL4 0.00124712487726952921813 0.000650456467526 1 0.10090030 0 15 0
4083 METRN 0.00126149029609892693836 0.000621033308648 1 0.24892898 0 6 0
7073 KLK14 0.00132600922668462458537 0.001141255779499 0 0.02612879 0 3 0
14285 PLEKHG1 0.00144443572977550477310 0.001022375924098 1 0.03209459 0 12 0
1764 ENSG00000254418 0.00144593322772500133995 0.001445933227725 0 0.20992366 0 1 0
13167 SLC23A1 0.00147425012502366554849 0.000356907836456 1 0.01014663 0 6 0
############################with repeats controled
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
11384 NPM1 0.000000000000000004666874 1.0474066 0.1209304 0.179054054 0.179054054 8 8 0
2326 FLT3 0.000000000000000063908810 3.2306785 0.3865474 0.054054054 0.054054054 6 6 0
8506 DNMT3A 0.000000000000552230211637 2.3671831 0.3282382 0.062650013 0.062650013 21 21 0
3331 IDH2 0.000000000023130646061980 3.3067633 0.4946721 0.033783784 0.033783784 2 2 0
10515 TET2 0.000009553518252604718656 1.3067709 0.2951794 0.062511491 0.062511491 36 36 0
4151 TP53 0.000748277501256778697532 1.7680941 0.5244624 0.018604103 0.018604103 9 9 0
786 WT1 0.003655478822618108813297 3.1255708 1.0753889 0.006756757 0.006756757 4 4 0
10354 KIT 0.011978874707657898024404 3.1149033 1.2396312 0.005067568 0.005067568 3 3 0
1686 KRAS 0.202970214166854234782988 1.5782177 1.2396312 0.005067568 0.005067568 2 2 0
13287 JAK2 0.715431886461324495485314 0.3512669 0.9635083 0.008445946 0.008445946 5 5 0
1372 MLL 0.781879288747886591615099 0.1890884 0.6829481 0.013513514 0.013513514 8 8 0
> meta.results.burden.gene[meta.results.burden.gene[,"gene"] %in% fanc.genes,]
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
8657 FANCL 0.06550967 1.41019676 0.7656772 0.013513514 0.013513514 4 4 0
3319 FANCI 0.28188840 0.82393519 0.7656772 0.013513514 0.013513514 8 8 0
4822 STRA13 0.30527330 -0.98279000 0.9586413 0.005669328 0.005669328 2 2 0
13379 FANCG 0.32554696 -1.49006803 1.5156478 0.003378378 0.003378378 2 2 0
6183 C1orf86 0.40149988 0.49081068 0.5850328 0.023781175 0.023781175 3 3 0
2667 FANCM 0.43434551 0.56533746 0.7231439 0.015202703 0.015202703 8 8 0
11657 FANCE 0.48768622 -1.48501695 2.1398136 0.001689189 0.001689189 1 1 0
13820 FANCC 0.48768622 -1.48501695 2.1398136 0.001689189 0.001689189 1 1 0
3988 FANCA 0.64891679 -0.34858796 0.7656772 0.013513514 0.013513514 7 7 0
9390 FANCD2 0.97327304 0.04153204 1.2396312 0.005067568 0.005067568 3 3 0
4802 C17orf70 0.97655520 0.03643125 1.2396673 0.005084863 0.005084863 3 3 0
> meta.results.burden.gene[1:50,]
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
11384 NPM1 0.000000000000000004666874 1.0474066 0.1209304 0.17905405 0.17905405 8 8 0
2326 FLT3 0.000000000000000063908810 3.2306785 0.3865474 0.05405405 0.05405405 6 6 0
8506 DNMT3A 0.000000000000552230211637 2.3671831 0.3282382 0.06265001 0.06265001 21 21 0
3331 IDH2 0.000000000023130646061980 3.3067633 0.4946721 0.03378378 0.03378378 2 2 0
4018 ENSG00000269323 0.000000003904959983582807 -1.3197635 0.2241381 0.13682432 0.13682432 1 1 0
3539 LOC440335 0.000000171894094533876543 1.6801551 0.3214121 0.08992448 0.08992448 2 2 0
9136 IDH1 0.000000331598514265690453 3.2136150 0.6295600 0.02027027 0.02027027 3 3 0
4162 PER1 0.000000692890021435440133 1.7087936 0.3442826 0.08116412 0.08116412 6 6 0
7910 EEF1A2 0.000001137775237446866202 1.2327350 0.2533272 0.18641115 0.18641115 1 1 0
12186 TNRC18 0.000001552073217241876052 1.1277451 0.2347312 0.19070665 0.19070665 10 10 0
10515 TET2 0.000009553518252604718656 1.3067709 0.2951794 0.06251149 0.06251149 36 36 0
8096 PCNT 0.000011442460687260778087 1.1341969 0.2584800 0.13554180 0.13554180 12 12 0
8006 RUNX1 0.000011520978735816074680 1.8369797 0.4187840 0.02877979 0.02877979 13 13 0
1916 LRP1 0.000034034260333051306034 0.8104806 0.1955489 0.22651456 0.22651456 9 9 0
7803 UBE2C 0.000041498170035095508933 1.9445022 0.4743873 0.03914591 0.03914591 1 1 0
10562 KIAA1109 0.000047255810233395838932 2.2193480 0.5454551 0.02364865 0.02364865 13 13 0
7126 DDR2 0.000058857795100619874381 -1.6531321 0.4114988 0.05084746 0.05084746 2 2 0
10896 BDP1 0.000061989348153295098726 0.9370378 0.2339607 0.20975925 0.20975925 9 9 0
6850 NRAS 0.000074914710439908353693 2.5831005 0.6522797 0.01525424 0.01525424 8 8 0
6908 ADAMTSL4 0.000101601001243716289693 1.2695473 0.3266358 0.08385783 0.08385783 7 7 0
7739 CEP250 0.000111187109458178230064 3.1580161 0.8171260 0.01182432 0.01182432 7 7 0
9289 ANKMY1 0.000111187109458178230064 3.1580161 0.8171260 0.01182432 0.01182432 7 7 0
10552 USP53 0.000111187109458178230064 3.1580161 0.8171260 0.01182432 0.01182432 7 7 0
7183 ASTN1 0.000133839078173962654824 1.9595385 0.5130642 0.02372881 0.02372881 13 13 0
9389 EMC3 0.000182299524791116322091 0.9236984 0.2468224 0.28040541 0.28040541 3 3 0
2458 FARP1 0.000191424749198074471484 1.3362401 0.3582344 0.05514385 0.05514385 5 5 0
4229 MYO15A 0.000233148869783894401551 1.3709464 0.3725304 0.04229875 0.04229875 21 21 0
8017 TTC3 0.000247927027443518377750 1.1483377 0.3133774 0.09534780 0.09534780 10 10 0
13765 NOTCH1 0.000249298429951354749094 1.2113617 0.3307039 0.06834617 0.06834617 11 11 0
7781 GDAP1L1 0.000249478395604726900427 -1.2256653 0.3346257 0.07263514 0.07263514 1 1 0
1185 ARHGEF17 0.000253292668381546249353 2.6464736 0.7232961 0.01531949 0.01531949 8 8 0
11579 GPANK1 0.000314055267712844826938 1.2264781 0.3403649 0.07506803 0.07506803 5 5 0
12486 MCM7 0.000354372117981734016494 3.1471264 0.8810739 0.01013514 0.01013514 5 5 0
1999 C12orf50 0.000354372117981734287544 3.1471264 0.8810739 0.01013514 0.01013514 5 5 0
5545 SLC7A9 0.000354372117981735100696 3.1471264 0.8810739 0.01013514 0.01013514 6 6 0
10294 ARAP2 0.000354372117981735100696 3.1471264 0.8810739 0.01013514 0.01013514 6 6 0
11087 SLC23A1 0.000356907628275577887972 3.1455490 0.8810928 0.01014663 0.01014663 6 6 0
3426 METRN 0.000365365220460703169283 0.7069607 0.1983664 0.24496706 0.24496706 4 4 0
9233 SPATA3 0.000366044773861016385728 3.1398917 0.8811439 0.01018702 0.01018702 5 5 0
13155 FER1L6 0.000461698838858031076170 2.1220873 0.6059563 0.02198244 0.02198244 12 12 0
5149 TLE6 0.000473051272974335946641 -1.4972329 0.4283230 0.04250552 0.04250552 3 3 0
7943 PIGU 0.000617045303329858394782 0.8522241 0.2488965 0.26689189 0.26689189 2 2 0
436 TCF7L2 0.000617325306994607092732 1.3700787 0.4001531 0.04918521 0.04918521 4 4 0
421 SORCS3 0.000743219662802089187266 2.5827199 0.7656772 0.01351351 0.01351351 6 6 0
422 SORCS1 0.000743219662802090921989 2.5827199 0.7656772 0.01351351 0.01351351 8 8 0
4151 TP53 0.000748277501256778697532 1.7680941 0.5244624 0.01860410 0.01860410 9 9 0
7329 CD34 0.000782871117631803257325 1.2453682 0.3707798 0.05753230 0.05753230 4 4 0
1728 ADAMTS20 0.000814007272223931277640 0.8533616 0.2548879 0.15033784 0.15033784 11 11 0
3179 IGDCC4 0.000840322856677447847795 0.6763688 0.2025566 0.19018193 0.19018193 7 7 0
6721 COL24A1 0.000841237701234497006059 0.9069638 0.2716390 0.13389831 0.13389831 8 8 0
gene p pmin rho cmaf nmiss nsnps errflag
11384 NPM1 0.00000000000000006231578 0.000000000000000 0 0.17905405 0 8 3
2326 FLT3 0.00000000004159119762795 0.000000000000000 0 0.05405405 0 6 3
8506 DNMT3A 0.00000000065664519662610 0.000000000000000 1 0.06265001 0 21 3
4018 ENSG00000269323 0.00000000390495929759913 0.000000003904959 0 0.13682432 0 1 0
9136 IDH1 0.00000003199432113946245 0.000000336874034 1 0.02027027 0 3 0
3331 IDH2 0.00000004493313973139144 0.000000000000000 0 0.03378378 0 2 3
3539 LOC440335 0.00000007760763769242383 0.000000169958773 1 0.08992448 0 2 0
1916 LRP1 0.00000055844612457180626 0.000034033979921 1 0.22651456 0 9 0
7126 DDR2 0.00000112303401150393009 0.000058857561437 1 0.05084746 0 2 0
7910 EEF1A2 0.00000113777580490786562 0.000001137775805 0 0.18641115 0 1 0
12186 TNRC18 0.00000254387052620701859 0.000001552393842 1 0.19070665 0 10 0
1462 ST14 0.00000798384789661489150 0.000000469149787 0 0.09194148 0 4 0
10515 TET2 0.00000973448554996648209 0.000009551470734 1 0.06251149 0 36 0
4162 PER1 0.00000994446631615928415 0.000000697164039 1 0.08116412 0 6 0
6778 RNPC3 0.00003268132259280769904 0.000067241379935 0 0.25762712 0 4 0
7803 UBE2C 0.00004149812741518308111 0.000041498127415 0 0.03914591 0 1 0
6721 COL24A1 0.00004360811649046598205 0.000096512602278 0 0.13389831 0 8 0
10896 BDP1 0.00005667937916215882738 0.000061989094361 1 0.20975925 0 9 0
13765 NOTCH1 0.00006777611311859203130 0.000007700593850 0 0.06834617 0 11 0
5967 ZNF880 0.00009357194667626277389 0.000083158238226 0 0.23370192 0 7 0
7183 ASTN1 0.00009768630291826020423 0.000133838990002 1 0.02372881 0 13 0
6908 ADAMTSL4 0.00010755872574991883677 0.000101600820104 1 0.08385783 0 7 0
10562 KIAA1109 0.00010841578313424904347 0.000047255556232 1 0.02364865 0 13 0
6850 NRAS 0.00014215324891966822228 0.000074914501136 1 0.01525424 0 8 0
2458 FARP1 0.00018633779647004084152 0.000191424694611 1 0.05514385 0 5 0
7781 GDAP1L1 0.00024947839359046791513 0.000249478393590 0 0.07263514 0 1 0
10197 FGFRL1 0.00025960576293948740313 0.000241706127955 0 0.03828477 0 3 0
9389 EMC3 0.00037495772538978234785 0.000182299540965 1 0.28040541 0 3 0
4229 MYO15A 0.00046589230545476220972 0.000233148713961 1 0.04229875 0 21 0
7248 ASPM 0.00057904110521865776243 0.000424932659092 0 0.20086407 0 13 0
3426 METRN 0.00071442981815005197899 0.000365365399523 1 0.24496706 0 4 0
7739 CEP250 0.00074783202647760822421 0.000111186970999 1 0.01182432 0 7 0
9289 ANKMY1 0.00074783202647760822421 0.000111186970999 1 0.01182432 0 7 0
10552 USP53 0.00074783202647760822421 0.000111186970999 1 0.01182432 0 7 0
5149 TLE6 0.00075323556471991437965 0.000473051548984 1 0.04250552 0 3 0
3047 MGA 0.00078088636041159006224 0.000394817033164 0 0.06250000 0 14 0
8017 TTC3 0.00088817956132411686205 0.000247927107401 1 0.09534780 0 10 0
7329 CD34 0.00091171936144897422916 0.000782871711179 1 0.05753230 0 4 0
9357 CHL1 0.00091824710097400277873 0.000859782542944 0 0.05743243 0 3 0
1185 ARHGEF17 0.00094721919192385626868 0.000253292746374 1 0.01531949 0 8 0
10033 LRRIQ4 0.00108328441973140317603 0.000988094126919 1 0.03547297 0 3 0
2435 COMMD6 0.00116585873464020574157 0.001165858734640 0 0.05323194 0 1 0
11043 SEPT8 0.00120710039548990841381 0.001162522989611 1 0.07311776 0 2 0
7943 PIGU 0.00121815113374191207572 0.000617045957877 1 0.26689189 0 2 0
12486 MCM7 0.00125280761917496356699 0.000354372250721 1 0.01013514 0 5 0
13155 FER1L6 0.00127857000269924439735 0.000461699042854 1 0.02198244 0 12 0
12316 CAMK2B 0.00129517724533686996377 0.000999241709910 1 0.01302213 0 4 0
1728 ADAMTS20 0.00135014904903250606075 0.000814007843442 1 0.15033784 0 11 0
9233 SPATA3 0.00143334790237578148242 0.000366044954399 1 0.01018702 0 5 0
1999 C12orf50 0.00146533556131319894766 0.000354372250721 1 0.01013514 0 5 0
######################### non coding 0.01 repeats
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
8586 PFN2 0.0000000000000002664605 1.9890686 0.24293636 0.12668919 0.12668919 6 5 0
5932 CD1A 0.0000000012442374240917 2.4832436 0.40880228 0.04810997 0.04810997 1 1 0
7133 PACSIN2 0.0000000045295542039248 2.8369042 0.48381665 0.03583618 0.03583618 1 1 0
8920 KLF3 0.0000020035235062399152 1.4215841 0.29908765 0.10472973 0.10472973 5 5 0
1717 FGD6 0.0000024571346728112104 0.4630778 0.09828347 0.33885708 0.33885708 8 8 0
10814 OGDH 0.0000065260074012094771 1.3335337 0.29577625 0.10810811 0.10810811 2 2 0
5042 ZNF8 0.0000086678253943043828 -1.3333883 0.29977335 0.09290541 0.09290541 1 1 0
9370 NIPBL 0.0000100892675686425552 1.8434779 0.41752502 0.04919837 0.04919837 10 10 0
357 WBP1L 0.0000106962172988840060 -1.4606236 0.33176402 0.07432432 0.07432432 5 5 0
6777 ENSR00000403778 0.0000152165703799475389 0.9719161 0.22469295 0.19661017 0.19661017 1 1 0
4556 CNN2 0.0000175535283945161972 -1.6720611 0.38940081 0.05743243 0.05743243 1 1 0
2704 INO80 0.0000226149987306957645 1.1821126 0.27897297 0.09797297 0.09797297 10 10 0
10553 TULP4 0.0000465778583229100321 0.5968424 0.14656630 0.39695946 0.39695946 6 6 0
6139 KDM5B 0.0000535444630009138749 1.6502905 0.40852910 0.04745763 0.04745763 3 3 0
9298 WDFY3-AS1 0.0000744868588388711869 1.3467120 0.33995181 0.07993197 0.07993197 1 1 0
1203 ENSR00000558810 0.0000900045617632698410 1.8173676 0.46407959 0.03938356 0.03938356 1 1 0
10958 TSC22D4 0.0000946051296121490500 1.7679752 0.45285933 0.03956129 0.03956129 2 2 0
10874 PHTF2 0.0000970987856811571758 1.3762771 0.35309713 0.06761385 0.06761385 6 6 0
10638 OSTCP1 0.0001193190917018499715 -0.9609236 0.24975136 0.17398649 0.17398649 1 1 0
336 TLX1 0.0001249485584720845467 1.3695816 0.35701441 0.05513308 0.05513308 1 1 0
8897 KCNIP4-IT1 0.0001255141308119878877 2.4144325 0.62956210 0.02043293 0.02043293 3 3 0
7404 ALMS1 0.0001488084439207132338 0.9730538 0.25653602 0.19584726 0.19584726 2 2 0
7085 FAM118A 0.0001670385597882903701 -1.1808086 0.31368889 0.06250000 0.06250000 2 2 0
680 WT1 0.0001858114946950466728 0.9312925 0.24917096 0.12500000 0.12500000 3 3 0
1373 SLCO1C1 0.0002124346118168629117 1.1325269 0.30577835 0.08360045 0.08360045 2 2 0
1179 FLI1 0.0002446207337969641903 1.4469747 0.39450463 0.05574324 0.05574324 7 7 0
5365 RNF19B 0.0002470716685476660322 0.8887876 0.24248857 0.23898305 0.23898305 5 5 0
9784 SGCD 0.0002471438600087611610 1.0068381 0.27470197 0.12842466 0.12842466 11 11 0
4614 PTPRS 0.0002477966203335093837 1.0306785 0.28125829 0.09094817 0.09094817 5 5 0
6426 IL20 0.0002776610166622033656 -1.2748651 0.35069081 0.06440678 0.06440678 1 1 0
7616 GPD2 0.0003396946912176378457 1.0984161 0.30656483 0.08569442 0.08569442 6 6 0
11108 CREB3L2 0.0003457397710789674047 0.6916436 0.19328438 0.15878378 0.15878378 9 9 0
167 HNRNPH3 0.0003509574215695936269 -1.0435742 0.29195332 0.07939189 0.07939189 6 6 0
2078 CAB39L 0.0003543721179817351007 3.1471264 0.88107388 0.01013514 0.01013514 5 5 0
9796 EBF1 0.0003685039301610075761 0.4282638 0.12024244 0.82650677 0.82650677 25 25 0
8365 ROBO1 0.0003765031993305783392 0.5064511 0.14242042 0.36995557 0.36995557 13 13 0
5593 TTLL7 0.0003805656309857651833 1.3538053 0.38100946 0.05202352 0.05202352 4 4 0
10642 ENSR00001233890 0.0004362132705023125719 -0.5808398 0.16514519 0.56925676 0.56925676 3 3 0
12111 CACNA1B 0.0005414104587663745886 0.7545655 0.21812128 0.18169838 0.18169838 6 6 0
3494 GSE1 0.0005479380695629277763 0.8455973 0.24466411 0.15046024 0.15046024 12 12 0
9548 PJA2 0.0006180456170429991705 1.0213026 0.29831519 0.09206880 0.09206880 8 8 0
3015 ENSR00000405933 0.0006615683494434497275 1.0246404 0.30091964 0.08781362 0.08781362 1 1 0
11303 CHMP7 0.0007242222624701354214 2.4298699 0.71884512 0.01196707 0.01196707 4 4 0
5027 ZNF628 0.0007687302761574642132 0.7849972 0.23336514 0.23742507 0.23742507 2 2 0
6133 PTPN7 0.0008007722223019250740 1.4241425 0.42479665 0.04830215 0.04830215 3 3 0
8095 ZNF385D 0.0009115871559858098249 2.5409567 0.76616044 0.01391300 0.01391300 4 4 0
208 DNAJC9 0.0009439504589197324430 2.5347086 0.76653249 0.01459854 0.01459854 1 1 0
7634 TBR1 0.0009510971873220742251 -1.1039775 0.33407228 0.08277027 0.08277027 2 2 0
4287 VAMP2 0.0009902905940165425587 -0.5612425 0.17042100 0.31418919 0.31418919 2 2 0
6998 MYH9 0.0010931689914459362476 -0.7305933 0.22373907 0.20101351 0.20101351 4 4 0
######################### non coding 0.01 repeats
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
680 WT1 0.0001858115 0.93129252 0.2491710 0.125000000 0.125000000 3 3 0
9095 TET2 0.0299625606 1.15136274 0.5304389 0.025718858 0.025718858 11 11 0
377 SMC3 0.0405430821 3.10430839 1.5156478 0.003378378 0.003378378 1 1 0
2005 FLT3 0.0405430821 3.10430839 1.5156478 0.003378378 0.003378378 2 2 0
8973 KIT 0.0666101830 1.40459975 0.7657436 0.013565400 0.013565400 5 5 0
3684 TP53 0.1482276942 3.09378531 2.1398136 0.001689189 0.001689189 1 1 0
4776 CEBPA 0.1482276942 3.09378531 2.1398136 0.001689189 0.001689189 1 1 0
9824 NPM1 0.1841520121 1.27961512 0.9635083 0.008445946 0.008445946 4 4 0
7199 DNMT3A 0.1855514020 1.27559874 0.9635491 0.008469086 0.008469086 5 5 0
1384 KRAS 0.2029702142 1.57821767 1.2396312 0.005067568 0.005067568 3 3 0
11147 EZH2 0.2261878604 -1.50027397 1.2396528 0.005084746 0.005084746 1 1 0
11843 HNRNPK 0.2967280545 0.27443305 0.2629990 0.120013546 0.120013546 17 17 0
6847 U2AF1 0.3255469610 -1.49006803 1.5156478 0.003378378 0.003378378 2 2 0
1813 PTPN11 0.3389335569 0.43321603 0.4530247 0.025464240 0.025464240 3 3 0
11560 RAD21 0.4876862217 -1.48501695 2.1398136 0.001689189 0.001689189 1 1 0
1112 MLL 0.5063635384 -0.48053684 0.7231439 0.015202703 0.015202703 5 5 0
6097 FAM5C 0.5966678409 0.80208760 1.5156653 0.003389831 0.003389831 2 2 0
11697 JAK2 0.6527915370 0.11713622 0.2603678 0.136478376 0.136478376 4 4 0
5740 NRAS 0.7193820883 0.34618391 0.9635364 0.008474576 0.008474576 4 4 0
6822 RUNX1 0.9240786289 0.02617505 0.2746671 0.128419293 0.128419293 10 10 0
######################### non coding filtered on coverage
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
6806 PFN2 0.0000000000000001114682 2.0241415 0.2441112 0.12530921 0.12530921 6 5 0
3952 ZNF628 0.0000000000763594977791 2.2952454 0.3527021 0.17957746 0.17957746 1 1 0
4071 PLEKHG5 0.0000000003445125198729 2.1833791 0.3478216 0.14858757 0.14858757 4 2 0
2860 TOX3 0.0000000014051499963940 1.2112405 0.2000434 0.40459856 0.40459856 6 6 0
5667 PACSIN2 0.0000000054936038001572 2.8224972 0.4840100 0.03620690 0.03620690 1 1 0
8943 OGDH 0.0000001819885167122744 1.6599143 0.3181831 0.09363958 0.09363958 1 1 0
3232 HLF 0.0000001910567376793365 1.4806432 0.2843107 0.24700530 0.24700530 9 9 0
1367 FGD6 0.0000004676907084027239 0.6008102 0.1192293 0.27386780 0.27386780 7 7 0
3422 ENSR00001346374 0.0000005302364056717515 1.8926496 0.3773950 0.09950249 0.09950249 1 1 0
7113 KLF3 0.0000025701135128080424 1.4229483 0.3025948 0.10135135 0.10135135 4 4 0
2823 RAB11FIP3 0.0000036411347416446038 1.1422161 0.2466520 0.35106264 0.35106264 4 4 0
9719 MIR4668::UGCG 0.0000059064803024314693 1.1747033 0.2593330 0.25735667 0.25735667 2 2 0
2125 CTAGE5 0.0000093042918947091136 2.2217441 0.5012123 0.04587156 0.04587156 1 1 0
8917 ELFN1 0.0000105178969030470281 1.0023946 0.2274942 0.25821881 0.25821881 7 7 0
5385 ENSR00000403778 0.0000107203678988820426 1.0161314 0.2308283 0.18439716 0.18439716 1 1 0
4950 ENSR00000163508 0.0000110620078604507429 1.4339182 0.3262390 0.15422886 0.15422886 1 1 0
264 WBP1L 0.0000127148251343299580 -1.6776411 0.3843432 0.05915065 0.05915065 3 3 0
7413 WDFY3-AS1 0.0000128087409190540472 2.0501932 0.4698671 0.05066079 0.05066079 1 1 0
5651 ENSR00001041998 0.0000153456918746836024 1.3283770 0.3072339 0.19902816 0.19902816 3 3 0
3104 SMARCE1 0.0000317375752644696637 1.1276416 0.2710269 0.12575235 0.12575235 5 5 0
9173 STMN2 0.0000391947788728819934 1.3895546 0.3379123 0.09031778 0.09031778 5 4 0
2398 CHD2 0.0000581461650955096936 1.5328199 0.3812785 0.14252285 0.14252285 8 8 0
6017 GPD2 0.0000629135851035580237 2.3211096 0.5800443 0.05082363 0.05082363 3 3 0
2204 INO80 0.0000638779349663875942 1.3169498 0.3294015 0.08261883 0.08261883 6 5 0
7480 NIPBL 0.0000719068498351913536 1.7726203 0.4465166 0.04223546 0.04223546 9 8 0
8391 TULP4 0.0000731871771328772568 0.5854931 0.1476401 0.38934029 0.38934029 5 5 0
4650 CD1A 0.0000796868558910846452 2.4086200 0.6104975 0.03437500 0.03437500 1 1 0
1602 TESC 0.0000850582623921027536 1.0979859 0.2794083 0.16770021 0.16770021 3 3 0
4227 RNF19B 0.0000942900632103326456 0.9478244 0.2427310 0.23392144 0.23392144 3 3 0
7787 SGCD 0.0001056345600476162592 1.1412240 0.2943369 0.10732379 0.10732379 7 7 0
921 ENSR00000558810 0.0001077384744300534071 2.5975897 0.6707839 0.03900709 0.03900709 1 1 0
4492 ST7L 0.0001105057034761962271 3.1594783 0.8171871 0.01188747 0.01188747 4 4 0
2794 GSE1 0.0001262546609200849513 0.9839160 0.2566524 0.13690552 0.13690552 12 11 0
526 WT1 0.0001274740341488569799 0.9579426 0.2500315 0.12331654 0.12331654 2 2 0
3274 DDX42 0.0001319223475810524021 1.0344252 0.2705906 0.15066531 0.15066531 6 5 0
4805 ADIPOR1 0.0001638439756260418653 1.5537428 0.4122329 0.05239422 0.05239422 2 2 0
8424 TRIM15 0.0001679680242171599633 0.8816094 0.2342911 0.18179003 0.18179003 2 2 0
1018 CCND2 0.0001908784036524938197 0.6188038 0.1658639 0.38348364 0.38348364 8 7 0
6428 SATB1 0.0002040133432748618642 0.8688103 0.2339288 0.18704653 0.18704653 12 11 0
6937 KIAA0226 0.0002356787522383769649 0.8563891 0.2328828 0.33055865 0.33055865 7 7 0
3886 CCDC8 0.0002466912647625096595 1.3877475 0.3785794 0.10700968 0.10700968 3 3 0
8799 ENSR00000632639 0.0002480111822073928583 0.9662919 0.2637040 0.15920398 0.15920398 1 1 0
2424 ENSR00000405933 0.0002491497564689616859 2.0992192 0.5730667 0.04248366 0.04248366 1 1 0
2661 LPCAT2 0.0002760752901902625281 1.9104187 0.5253060 0.04461848 0.04461848 2 2 0
7992 C6orf62 0.0003376895230738654837 0.4669573 0.1302703 0.45821546 0.45821546 10 10 0
5505 GNAZ 0.0003459948047349956221 1.7477164 0.4884372 0.11190389 0.11190389 3 3 0
7098 KCNIP4-IT1 0.0003543721179817342875 3.1471264 0.8810739 0.01013514 0.01013514 2 1 0
3314 GRB2 0.0003691597007200729291 0.5912133 0.1660151 0.33588687 0.33588687 5 5 0
4802 KDM5B 0.0003720231489334260562 2.1581550 0.6063640 0.02608516 0.02608516 2 2 0
7034 ENSR00001378190 0.0003914465133046934474 2.5799237 0.7276031 0.02112676 0.02112676 1 1 0
######################### non coding filtered on coverage
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
526 WT1 0.000127474 0.95794260 0.2500315 0.123316537 0.123316537 2 2 0
7239 TET2 0.028779893 1.43628883 0.6568930 0.019178441 0.019178441 9 8 0
7818 NPM1 0.040543082 3.10430839 1.5156478 0.003378378 0.003378378 2 2 0
1632 FLT3 0.040868247 3.09934770 1.5156741 0.003389869 0.003389869 2 2 0
1431 PTPN11 0.047016610 1.23430223 0.6214545 0.014604318 0.014604318 2 2 0
9480 HNRNPK 0.084284744 0.48249708 0.2794900 0.103299198 0.103299198 15 13 0
3801 CEBPA 0.148227694 3.09378531 2.1398136 0.001689189 0.001689189 1 1 0
2958 TP53 0.149552034 3.08375427 2.1398383 0.001700680 0.001700680 1 1 0
4767 FAM5C 0.151590107 3.06844828 2.1398759 0.001718213 0.001718213 2 1 0
1090 KRAS 0.202916790 1.57844402 1.2396624 0.005084785 0.005084785 3 3 0
843 MLL 0.203449805 1.57655863 1.2396432 0.005073294 0.005073294 3 3 0
7155 KIT 0.205670314 1.56938906 1.2400739 0.005279519 0.005279519 3 3 0
5715 DNMT3A 0.457143001 0.79972571 1.0755369 0.006834352 0.006834352 4 4 0
9242 RAD21 0.478735186 -1.51579254 2.1399273 0.001742160 0.001742160 1 1 0
5424 RUNX1 0.730590306 0.09851200 0.2860890 0.113612033 0.113612033 5 5 0
9677 JAK2 0.857395075 -0.06226895 0.3465333 0.094664396 0.094664396 2 2 0
4507 NRAS 0.976355499 0.03674339 1.2397257 0.005119613 0.005119613 3 3 0
################# coding
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
13492 NPM1 0.000000000000000004666874 1.04740664 0.1209304 0.179054054 0.179054054 8 8 0
2743 FLT3 0.000000000000158669842946 2.43652759 0.3301671 0.065878378 0.065878378 8 8 0
10150 DNMT3A 0.000000000000626740759891 2.28376703 0.3174308 0.069471967 0.069471967 25 25 0
3983 IDH2 0.000000013054500616296249 2.32224967 0.4084620 0.047297297 0.047297297 5 5 0
12494 TET2 0.000016213366527191842669 1.20896367 0.2804029 0.070957437 0.070957437 40 40 0
9554 RUNX1 0.000047326938703021625054 1.50475272 0.3698590 0.039090165 0.039090165 15 15 0
8160 NRAS 0.000074914710439908353693 2.58310051 0.6522797 0.015254237 0.015254237 8 8 0
4953 TP53 0.000108295608479553532970 1.92641482 0.4976258 0.021982481 0.021982481 10 10 0
6607 CEBPA 0.055405577783932999369476 1.83600014 0.9584055 0.005287955 0.005287955 3 3 0
2522 PTPN11 0.067090467507398429680698 1.96910959 1.0753889 0.006756757 0.006756757 4 4 0
9606 U2AF1 0.067090467507398443558486 1.96910959 1.0753889 0.006756757 0.006756757 4 4 0
12295 KIT 0.070330937370696955390770 1.59454406 0.8810739 0.010135135 0.010135135 5 5 0
936 WT1 0.070330937370697080290860 1.59454406 0.8810739 0.010135135 0.010135135 5 5 0
15587 RAD21 0.142886245022605579135799 0.34028191 0.2322542 0.198270728 0.198270728 6 6 0
1989 KRAS 0.202970214166854234782988 1.57821767 1.2396312 0.005067568 0.005067568 2 2 0
517 SMC3 0.325546960976486610128688 -1.49006803 1.5156478 0.003378378 0.003378378 2 2 0
15966 HNRNPK 0.422434173461554707262877 0.61053173 0.7610666 0.010135135 0.010135135 6 6 0
15772 JAK2 0.506363538396016887865869 -0.48053684 0.7231439 0.015202703 0.015202703 8 8 0
8638 FAM5C 0.596667840900447998819800 0.80208760 1.5156653 0.003389831 0.003389831 2 2 0
1633 MLL 0.732588605446840124280072 -0.20569895 0.6020149 0.018581081 0.018581081 11 11 0
##### protein 5%
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
14059 NPM1 0.000000000000000004666874 1.0474066 0.12093043 0.17905405 0.17905405 8 8 0
10574 DNMT3A 0.000000000000626740759891 2.2837670 0.31743081 0.06947197 0.06947197 25 25 0
2869 FLT3 0.000000000042940967530786 1.8258515 0.27691446 0.08952703 0.08952703 10 10 0
11366 IKZF2 0.000000000162553782222710 1.6070721 0.25137595 0.18243243 0.18243243 4 4 0
4964 ENSG00000269323 0.000000003904959983582807 -1.3197635 0.22413815 0.13682432 0.13682432 1 1 0
4141 IDH2 0.000000013054500616296249 2.3222497 0.40846204 0.04729730 0.04729730 5 5 0
9834 EEF1A2 0.000001137775237446866202 1.2327350 0.25332720 0.18641115 0.18641115 1 1 0
755 MRGPRG-AS1 0.000001620994244998847405 1.6952647 0.35349606 0.05434106 0.05434106 6 6 0
9955 RUNX1 0.000011520978735816074680 1.8369797 0.41878401 0.02877979 0.02877979 13 13 0
14247 GABBR1 0.000030668322866356776714 0.5362051 0.12863428 0.26351351 0.26351351 6 6 0
9705 UBE2C 0.000041498170035095508933 1.9445022 0.47438728 0.03914591 0.03914591 1 1 0
5248 FAM83G 0.000074104900624649342042 1.3362992 0.33721882 0.06233780 0.06233780 9 8 0
8480 NRAS 0.000074914710439908353693 2.5831005 0.65227971 0.01525424 0.01525424 8 8 0
5133 TP53 0.000108295608479553532970 1.9264148 0.49762578 0.02198248 0.02198248 10 10 0
13549 ATP6AP1L 0.000120345718131096676338 2.4209233 0.62956005 0.02027027 0.02027027 5 5 0
6916 WDR62 0.000132231614563317053735 0.6136045 0.16053427 0.28490021 0.28490021 13 13 0
3534 ENSG00000268657 0.000142026995963789453041 1.5318377 0.40262660 0.04974230 0.04974230 3 3 0
892 SWAP70 0.000166810470471911846159 1.8081867 0.48031207 0.03209459 0.03209459 4 4 0
2283 RARG 0.000168895995722435617124 0.6830158 0.18158060 0.16816741 0.16816741 6 6 0
16308 KIAA0196 0.000171163465494147894359 1.6485204 0.43865027 0.04391892 0.04391892 6 6 0
9594 PIGU 0.000173165348652006353828 0.9121882 0.24290983 0.27364865 0.27364865 3 3 0
4240 METRN 0.000181467358188834620080 0.6762444 0.18064448 0.26185895 0.26185895 6 6 0
11633 EMC3 0.000182299524791116322091 0.9236984 0.24682237 0.28040541 0.28040541 3 3 0
3949 SLC24A1 0.000185839035721821097761 0.4930339 0.13191447 0.37579767 0.37579767 12 12 0
8859 DDR2 0.000185920484066668837775 -1.3474965 0.36054218 0.06440678 0.06440678 5 4 0
6372 TLE6 0.000189855349907728799532 -1.5249470 0.40859788 0.04757308 0.04757308 5 5 0
16779 ABCA1 0.000210268124718437780015 0.4606855 0.12429628 0.40328369 0.40328369 22 22 0
16684 ROR2 0.000263981755791403032742 1.5034248 0.41208918 0.04243136 0.04243136 10 10 0
690 IFITM3 0.000277277470632597870196 0.6201126 0.17056429 0.21096161 0.21096161 5 5 0
15248 OGDH 0.000325766738098032249127 1.2447853 0.34636073 0.07095744 0.07095744 8 8 0
6284 THEG 0.000332875478878449752862 0.5912790 0.16478094 0.20949989 0.20949989 10 10 0
9515 LOC284788 0.000353468521122107771307 0.8876798 0.24846958 0.13344595 0.13344595 1 1 0
213 PCDH15 0.000359745451234783932253 0.2528272 0.07086007 1.01300444 1.01300444 35 35 0
14094 F13A1 0.000439290968136652447158 -0.5372435 0.15283094 0.31250573 0.31250573 10 10 0
4275 IGFALS 0.000550888288549952298383 0.3747050 0.10846213 0.39657298 0.39657298 5 5 0
14844 SLC18B1 0.000568241175026907826065 1.0389074 0.30145265 0.08108108 0.08108108 4 4 0
14134 ATXN1 0.000592694281715230300231 0.1935701 0.05635334 1.62772567 1.62772567 25 25 0
15388 ZNF804B 0.000657888939569081744249 0.2151153 0.06314752 0.61486486 0.61486486 19 19 0
9668 GDAP1L1 0.000692928251073881293701 -1.0309938 0.30391621 0.09459459 0.09459459 4 4 0
12037 FOXP1 0.000716162631546294215640 -1.2240824 0.36179965 0.05912162 0.05912162 7 7 0
13741 SRA1 0.000719218043990806207497 2.4310968 0.71880295 0.01195183 0.01195183 4 4 0
2294 ATF7 0.000731331122414082554842 -1.5988321 0.47336956 0.03716216 0.03716216 3 3 0
5363 UNC45B 0.000859460792559572591721 -0.6919260 0.20760483 0.23986486 0.23986486 12 12 0
13339 CDH12 0.000882773914947601189181 -0.4779997 0.14374005 0.41216216 0.41216216 9 9 0
6841 SLC7A9 0.000920033854839213418274 1.7001351 0.51303102 0.02364865 0.02364865 8 8 0
7247 BAX 0.000984494814669964338552 0.4477650 0.13589553 0.31189039 0.31189039 7 7 0
4381 SEC14L5 0.000988804896881187618540 1.6903010 0.51319328 0.02386440 0.02386440 11 11 0
16375 ZNF707 0.001036536316145084081139 -0.8043018 0.24518276 0.14020843 0.14020843 3 3 0
8465 LRIG2 0.001051771831947953842953 0.8401587 0.25643530 0.16272928 0.16272928 6 6 0
7303 VRK3 0.001073934611807498456029 1.9688328 0.60201492 0.01858108 0.01858108 7 7 0
>
##############nw data missing done before
> > > > gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
13234 NPM1 0.000000000000000005198732 1.0461338 0.1209554 0.17966102 0.17966102 8 8 0
2711 FLT3 0.000000000000049318060989 2.4659912 0.3273275 0.06768195 0.06768195 9 9 0
9943 DNMT3A 0.000000000000199583042110 2.3521909 0.3200663 0.06791091 0.06791091 24 24 0
3912 IDH2 0.000000013605859243950595 2.3197880 0.4085373 0.04740084 0.04740084 5 5 0
11529 SEC61A1 0.000000124700250433274801 2.1204570 0.4011103 0.06864493 0.06864493 5 5 0
1733 ST14 0.000000209348865691834268 1.9585871 0.3773161 0.05861313 0.05861313 5 5 0
704 MRGPRG-AS1 0.000000738935917701869801 1.7696535 0.3574451 0.05286128 0.05286128 6 6 0
2149 CSAD 0.000002029191699452423640 1.5548682 0.3273066 0.14261090 0.14261090 9 9 0
10401 MYO7B 0.000020847494809388570432 1.3513460 0.3175442 0.09384357 0.09384357 18 18 0
12256 TET2 0.000027671120912236711522 0.7256282 0.1731053 0.14518446 0.14518446 44 44 0
9361 RUNX1 0.000032710565942251596070 1.7704006 0.4262199 0.02740480 0.02740480 12 12 0
14781 ENSG00000257743 0.000042388014398671702702 1.9023615 0.4646635 0.02702703 0.02702703 5 5 0
6425 ZNF254 0.000057467362506624575702 1.6948940 0.4213035 0.04401626 0.04401626 8 8 0
11046 GLB1 0.000064323090770531730193 1.8035849 0.4513066 0.03961539 0.03961539 2 2 0
8207 LMNA 0.000070478854723002212204 1.8877066 0.4749349 0.03979004 0.03979004 4 4 0
2301 HELB 0.000078136748618861764687 1.8325983 0.4639437 0.03900693 0.03900693 10 10 0
1146 VPS37C 0.000084173871405202670506 1.8652174 0.4743451 0.04192747 0.04192747 5 5 0
8066 ANXA9 0.000114012786876461089873 3.1531366 0.8171595 0.01186441 0.01186441 5 5 0
4843 TP53 0.000116093623281911869239 1.9184083 0.4977408 0.02216952 0.02216952 10 10 0
12745 ATP6AP1L 0.000120345718131096676338 2.4209233 0.6295600 0.02027027 0.02027027 5 5 0
7710 ACOT11 0.000124673505871855801967 0.7948071 0.2071564 0.09713898 0.09713898 11 11 0
9968 ATRAID 0.000124818098998251202992 1.6603143 0.4327719 0.03716216 0.03716216 6 6 0
15704 ROR2 0.000160064636162976807118 1.5819575 0.4190714 0.04171823 0.04171823 10 10 0
4184 ABCC6 0.000160453098764581165905 1.1363830 0.3010839 0.06992748 0.06992748 10 10 0
14576 GIGYF1 0.000164915830796788964903 1.4362089 0.3812139 0.03723240 0.03723240 7 7 0
11997 TLR1 0.000169399407266112797272 1.8925524 0.5032370 0.02878576 0.02878576 10 10 0
15350 KIAA0196 0.000171163465494147894359 1.6485204 0.4386503 0.04391892 0.04391892 6 6 0
1752 ENSG00000268844 0.000197390549831760232519 1.9793946 0.5317618 0.05172414 0.05172414 1 1 0
7066 ZNF304 0.000228376293438168156448 1.9157186 0.5198180 0.03078272 0.03078272 2 2 0
6254 MRI1 0.000253049384171016161771 1.8811752 0.5141012 0.02547580 0.02547580 4 4 0
15169 PREX2 0.000277942757790603802402 1.1343359 0.3120564 0.05912162 0.05912162 18 18 0
11177 IP6K2 0.000281234864335923795062 1.8003365 0.4956876 0.03527690 0.03527690 4 4 0
7145 ACAP3 0.000302869423169560743076 1.1385046 0.3151279 0.07570491 0.07570491 5 5 0
12313 KIAA1109 0.000315524229182963754630 1.5873223 0.4406524 0.03558464 0.03558464 19 19 0
4604 ATP2C2 0.000315617699817664996292 1.1454037 0.3179794 0.08658198 0.08658198 19 19 0
9600 KREMEN1 0.000320169949832403081917 2.2510153 0.6255585 0.01691503 0.01691503 7 7 0
1783 PRMT8 0.000321607141685174727306 2.3789902 0.6613369 0.02583449 0.02583449 3 3 0
14805 TAS2R60 0.000362685106858874357211 2.3406188 0.6563994 0.01858108 0.01858108 1 1 0
162 RASSF4 0.000407743552660958842333 2.2096821 0.6250833 0.01751409 0.01751409 5 5 0
786 TRIM3 0.000429704007948382665609 2.3123523 0.6567065 0.01894891 0.01894891 4 4 0
15002 ENTPD4 0.000441509267988472264930 1.2170605 0.3463519 0.04095219 0.04095219 10 10 0
10342 EDAR 0.000448892920076970067863 1.5830456 0.4510700 0.02197675 0.02197675 4 4 0
11362 CRYBG3 0.000479381684615500815432 2.1165303 0.6061048 0.02211152 0.02211152 10 10 0
5904 CTDP1 0.000517015944726954227578 1.0401717 0.2996074 0.05947742 0.05947742 10 10 0
210 RHOBTB1 0.000535126076548453040796 2.0145107 0.5818034 0.02121967 0.02121967 4 4 0
10858 KLHL30 0.000562384633953766409700 2.0860473 0.6048030 0.02474246 0.02474246 8 8 0
7288 TMEM82 0.000568470616489318426694 0.8521315 0.2472650 0.06345112 0.06345112 9 9 0
2483 RAD9B 0.000571678983766462224352 1.8387324 0.5337849 0.02888874 0.02888874 4 4 0
16080 ABCA2 0.000577529196555780135330 1.4510557 0.4215791 0.05815759 0.05815759 12 12 0
15410 PYCRL 0.000609260226812426944516 3.0247655 0.8825098 0.01109613 0.01109613 5 5 0
> meta.results.burden[meta.results.burden[,"gene"] %in% clusters.wanted,]
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
439 Clinical 0.000000000000000000000005269345 1.2476189 0.1234700 0.3602883 0.3602883 142 142 0
670 FANC - ACID 0.529508831193668427772536233533 0.1219868 0.1940129 0.2019703 0.2019703 78 78 0
> meta.results.burden.gene[meta.results.burden.gene[,"gene"] %in% clinical.genes,]
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
13234 NPM1 0.000000000000000005198732 1.04613378 0.1209554 0.179661017 0.179661017 8 8 0
2711 FLT3 0.000000000000049318060989 2.46599120 0.3273275 0.067681945 0.067681945 9 9 0
9943 DNMT3A 0.000000000000199583042110 2.35219094 0.3200663 0.067910908 0.067910908 24 24 0
3912 IDH2 0.000000013605859243950595 2.31978798 0.4085373 0.047400835 0.047400835 5 5 0
12256 TET2 0.000027671120912236711522 0.72562817 0.1731053 0.145184460 0.145184460 44 44 0
9361 RUNX1 0.000032710565942251596070 1.77040060 0.4262199 0.027404805 0.027404805 12 12 0
4843 TP53 0.000116093623281911869239 1.91840825 0.4977408 0.022169522 0.022169522 10 10 0
7980 NRAS 0.003710317430548133472296 3.12063001 1.0754138 0.006779661 0.006779661 4 4 0
12057 KIT 0.066285869450683806980429 1.11727855 0.6083786 0.028400244 0.028400244 6 6 0
928 WT1 0.070330937370697080290860 1.59454406 0.8810739 0.010135135 0.010135135 5 5 0
10687 IDH1 0.115793254911813789376218 0.88463808 0.5625040 0.022127090 0.022127090 2 2 0
1971 KRAS 0.202970214166854234782988 1.57821767 1.2396312 0.005067568 0.005067568 2 2 0
507 SMC3 0.318927922043224509884851 -1.51072866 1.5157862 0.003442433 0.003442433 2 2 0
15679 HNRNPK 0.422434173461554707262877 0.61053173 0.7610666 0.010135135 0.010135135 6 6 0
1620 MLL 0.547357757609294282019619 0.31896817 0.5300903 0.025337838 0.025337838 13 13 0
9415 U2AF1 0.594362502534595771308545 0.80712018 1.5156478 0.003378378 0.003378378 2 2 0
2501 PTPN11 0.595513595715084664838912 0.80460856 1.5156609 0.003384104 0.003384104 2 2 0
8471 FAM5C 0.596667840900447998819800 0.80208760 1.5156653 0.003389831 0.003389831 2 2 0
15485 JAK2 0.751870165974434678801686 -0.18493752 0.5849226 0.023648649 0.023648649 9 9 0
15311 RAD21 0.787526013436009808543758 0.15766029 0.5849569 0.023677673 0.023677673 5 5 0
14821 EZH2 0.962014617123815418686661 0.04196169 0.8810739 0.010135135 0.010135135 5 5 0
> > > > gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
12804 NPM1 0.000000000000000005198732 1.0461338 0.1209554 0.179661017 0.179661017 8 8 0
9610 DNMT3A 0.000000000000199583042110 2.3521909 0.3200663 0.067910908 0.067910908 24 24 0
3787 IDH2 0.000000013605859243950595 2.3197880 0.4085373 0.047400835 0.047400835 5 5 0
11155 SEC61A1 0.000000073049858231976606 2.5227418 0.4686060 0.051323584 0.051323584 3 3 0
2080 CSAD 0.000000200290463446029697 1.8664681 0.3590005 0.132360067 0.132360067 7 7 0
1675 ST14 0.000008025302852671984085 1.5505244 0.3473002 0.075642916 0.075642916 5 5 0
11855 TET2 0.000012074718120871907201 1.2367108 0.2825970 0.070860136 0.070860136 39 39 0
6289 U2AF1L4 0.000015314117990915600986 1.8070436 0.4178985 0.063863900 0.063863900 4 4 0
10051 MYO7B 0.000020847494809388570432 1.3513460 0.3175442 0.093843572 0.093843572 18 18 0
9052 RUNX1 0.000032710565942251596070 1.7704006 0.4262199 0.027404805 0.027404805 12 12 0
7642 RNPC3 0.000058432551854491600416 1.6933303 0.4213257 0.044085210 0.044085210 4 4 0
15545 ABCA2 0.000091213341086191364023 1.7045241 0.4356222 0.053713149 0.053713149 11 11 0
14058 MCM7 0.000111187109458177864145 3.1580161 0.8171260 0.011824324 0.011824324 6 6 0
4699 TP53 0.000116093623281911869239 1.9184083 0.4977408 0.022169522 0.022169522 10 10 0
1695 ENSG00000268844 0.000197390549831760232519 1.9793946 0.5317618 0.051724138 0.051724138 1 1 0
12331 ATP6AP1L 0.000238452901613662066353 2.6570861 0.7231439 0.015202703 0.015202703 4 4 0
4055 ABCC6 0.000281867651053124270918 1.5647591 0.4308947 0.049657213 0.049657213 7 7 0
11907 KIAA1109 0.000315524229182963754630 1.5873223 0.4406524 0.035584641 0.035584641 19 19 0
1727 PRMT8 0.000321607141685174727306 2.3789902 0.6613369 0.025834488 0.025834488 3 3 0
6903 C19orf33 0.000333359051844815310571 1.4745121 0.4109686 0.043071161 0.043071161 1 1 0
7806 ANXA9 0.000362147663061860734714 3.1422261 0.8811048 0.010169492 0.010169492 4 4 0
9446 CYP2D6 0.000385127214111089725794 2.2221026 0.6259319 0.017335119 0.017335119 9 9 0
12496 SLC23A1 0.000428671337539374803453 3.1037873 0.8813139 0.010658217 0.010658217 6 6 0
10995 CRYBG3 0.000479381684615500815432 2.1165303 0.6061048 0.022111523 0.022111523 10 10 0
9025 MRAP 0.000667145971961554166106 1.5671786 0.4605642 0.055685526 0.055685526 2 2 0
959 NR1H3 0.000702887484659180214637 2.4355644 0.7187841 0.011905512 0.011905512 7 7 0
3100 ESR2 0.000743219662802089295686 2.5827199 0.7656772 0.013513514 0.013513514 7 7 0
10310 NRP2 0.000765713948857463851810 2.5767097 0.7657611 0.013571927 0.013571927 8 8 0
8336 CENPF 0.000963201086109111061923 1.5206316 0.4606494 0.035593220 0.035593220 16 16 0
13565 PLEKHG1 0.001022374605895130881411 1.5774870 0.4803121 0.032094595 0.032094595 12 12 0
6653 VRK3 0.001073934611807498456029 1.9688328 0.6020149 0.018581081 0.018581081 7 7 0
2520 ATP6V0A2 0.001079284197161915715910 2.2465641 0.6872329 0.016891892 0.016891892 5 5 0
12508 SRA1 0.001103427165890638605994 2.3479072 0.7196133 0.012931967 0.012931967 4 4 0
202 RHOBTB1 0.001133542127159656255284 3.1363116 0.9635083 0.008445946 0.008445946 3 3 0
7064 TMEM82 0.001152063413686501332917 0.8161676 0.2510907 0.058181408 0.058181408 6 6 0
8137 ASTN1 0.001152957272663993407158 1.3527126 0.4161848 0.037449589 0.037449589 17 17 0
8801 L3MBTL1 0.001158574963517203845059 3.1305133 0.9635648 0.008480538 0.008480538 5 5 0
12629 NDST1 0.001236116432906644337086 1.8172006 0.5625289 0.022173643 0.022173643 7 7 0
10488 COL6A3 0.001289237272950148746262 0.8789480 0.2731039 0.100100072 0.100100072 33 33 0
9456 PACSIN2 0.001379644627650715655101 1.6620740 0.5195806 0.030434114 0.030434114 8 8 0
9933 FER1L5 0.001447720681670354924905 1.7490471 0.5491576 0.027084758 0.027084758 16 16 0
8522 TMEM234 0.002092515051375646445431 1.5683870 0.5097499 0.037848606 0.037848606 1 1 0
11530 WFS1 0.002102272226138989126565 1.0002333 0.3252378 0.066281244 0.066281244 20 20 0
14498 ENTPD4 0.002181773594185488270719 1.1121746 0.3629453 0.034195435 0.034195435 9 9 0
7615 ABCA4 0.002191838480875605600640 0.9061983 0.2958603 0.076288481 0.076288481 29 29 0
1024 OR5M3 0.002283666905670040866982 2.4928257 0.8171528 0.011841620 0.011841620 5 5 0
4434 CNTNAP4 0.002291739615674785561505 2.4919049 0.8171349 0.011830050 0.011830050 5 5 0
9635 ATRAID 0.002306314748613086298284 2.4903213 0.8171260 0.011824324 0.011824324 5 5 0
1736 GALNT8 0.002306314748613088900370 2.4903213 0.8171260 0.011824324 0.011824324 7 7 0
9707 CDKL4 0.002311738543013478613952 2.4898404 0.8171575 0.011847229 0.011847229 5 5 0
> meta.results.burden.gene[meta.results.burden.gene[,"gene"] %in% clinical.genes,]
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
12804 NPM1 0.000000000000000005198732 1.04613378 0.1209554 0.179661017 0.179661017 8 8 0
9610 DNMT3A 0.000000000000199583042110 2.35219094 0.3200663 0.067910908 0.067910908 24 24 0
3787 IDH2 0.000000013605859243950595 2.31978798 0.4085373 0.047400835 0.047400835 5 5 0
11855 TET2 0.000012074718120871907201 1.23671084 0.2825970 0.070860136 0.070860136 39 39 0
9052 RUNX1 0.000032710565942251596070 1.77040060 0.4262199 0.027404805 0.027404805 12 12 0
4699 TP53 0.000116093623281911869239 1.91840825 0.4977408 0.022169522 0.022169522 10 10 0
7724 NRAS 0.003710317430548133472296 3.12063001 1.0754138 0.006779661 0.006779661 4 4 0
10330 IDH1 0.003760195379923354525725 3.11642276 1.0755154 0.006872852 0.006872852 1 1 0
2627 FLT3 0.007293311413177344139369 1.50846696 0.5622008 0.022073837 0.022073837 7 7 0
894 WT1 0.070330937370697080290860 1.59454406 0.8810739 0.010135135 0.010135135 5 5 0
11666 KIT 0.073757958808056370281214 1.57580826 0.8812694 0.010265529 0.010265529 5 5 0
1907 KRAS 0.202970214166854234782988 1.57821767 1.2396312 0.005067568 0.005067568 2 2 0
494 SMC3 0.318927922043224509884851 -1.51072866 1.5157862 0.003442433 0.003442433 2 2 0
15155 HNRNPK 0.422434173461554707262877 0.61053173 0.7610666 0.010135135 0.010135135 6 6 0
14968 JAK2 0.506363538396016887865869 -0.48053684 0.7231439 0.015202703 0.015202703 8 8 0
9100 U2AF1 0.594362502534595771308545 0.80712018 1.5156478 0.003378378 0.003378378 2 2 0
2418 PTPN11 0.595513595715084664838912 0.80460856 1.5156609 0.003384104 0.003384104 2 2 0
8193 FAM5C 0.596667840900447998819800 0.80208760 1.5156653 0.003389831 0.003389831 2 2 0
1566 MLL 0.732588605446840124280072 -0.20569895 0.6020149 0.018581081 0.018581081 11 11 0
14795 RAD21 0.961099484015240812517789 0.04297574 0.8811217 0.010164159 0.010164159 4 4 0
14320 EZH2 0.962014617123815418686661 0.04196169 0.8810739 0.010135135 0.010135135 5 5 0
####### filtered genotypes: pass<- full.qual & bad.coding & maf.filter & !in.common.hit.gene & !on.x.y & !unannotated.hits & not.flat.genotype & !are.repeats &( ok.missing.filt | is.unwound.geno) & hw.controls.ok.filt & !no.genotypes.filt & !are.in.repeats & rare.in.controls.filt & rare.in.group
> > > gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
13351 NPM1 0.000000000000000005198732 1.0461338 0.1209554 0.17966102 0.17966102 8 8 0
10030 DNMT3A 0.000000000000199583042110 2.3521909 0.3200663 0.06791091 0.06791091 24 24 0
13594 GPANK1 0.000000011804785007255658 2.0650544 0.3621300 0.10794559 0.10794559 6 6 0
3944 IDH2 0.000000013605859243950595 2.3197880 0.4085373 0.04740084 0.04740084 5 5 0
11630 SEC61A1 0.000000124700250433274801 2.1204570 0.4011103 0.06864493 0.06864493 5 5 0
711 MRGPRG-AS1 0.000000738935917701869801 1.7696535 0.3574451 0.05286128 0.05286128 6 6 0
2166 CSAD 0.000002029191699452423640 1.5548682 0.3273066 0.14261090 0.14261090 9 9 0
7002 ZNF880 0.000004229308603069661974 1.8574131 0.4038045 0.11837622 0.11837622 8 8 0
1749 ST14 0.000006837954464606166843 1.5324597 0.3406472 0.07902129 0.07902129 6 6 0
13536 HLA-A 0.000011041783483456846593 0.5465112 0.1243287 0.28932879 0.28932879 19 19 0
6549 U2AF1L4 0.000011740494742377153553 1.7422707 0.3975652 0.06555309 0.06555309 5 5 0
2318 HELB 0.000025771180707801876196 1.9108266 0.4541005 0.04395743 0.04395743 11 11 0
6481 ZNF254 0.000027691079140785789095 0.9712906 0.2317193 0.08286761 0.08286761 9 9 0
9443 RUNX1 0.000032710565942251596070 1.7704006 0.4262199 0.02740480 0.02740480 12 12 0
14915 ENSG00000257743 0.000042388014398671702702 1.9023615 0.4646635 0.02702703 0.02702703 5 5 0
7967 RNPC3 0.000058432551854491600416 1.6933303 0.4213257 0.04408521 0.04408521 4 4 0
11141 GLB1 0.000064323090770531730193 1.8035849 0.4513066 0.03961539 0.03961539 2 2 0
8281 LMNA 0.000070478854723002212204 1.8877066 0.4749349 0.03979004 0.03979004 4 4 0
10488 MYO7B 0.000111318148685442098816 1.1076194 0.2866141 0.11425173 0.11425173 19 19 0
8139 ANXA9 0.000114012786876461089873 3.1531366 0.8171595 0.01186441 0.01186441 5 5 0
4887 TP53 0.000116093623281911869239 1.9184083 0.4977408 0.02216952 0.02216952 10 10 0
12855 ATP6AP1L 0.000120345718131096676338 2.4209233 0.6295600 0.02027027 0.02027027 5 5 0
10055 ATRAID 0.000124818098998251202992 1.6603143 0.4327719 0.03716216 0.03716216 6 6 0
10776 IDH1 0.000147522836223088713590 1.7863450 0.4706850 0.04067390 0.04067390 4 4 0
15843 ROR2 0.000160064636162976807118 1.5819575 0.4190714 0.04171823 0.04171823 10 10 0
12102 TLR1 0.000169399407266112797272 1.8925524 0.5032370 0.02878576 0.02878576 10 10 0
15486 KIAA0196 0.000171163465494147894359 1.6485204 0.4386503 0.04391892 0.04391892 6 6 0
12361 TET2 0.000185530298357044874527 0.5103515 0.1365326 0.17558987 0.17558987 45 45 0
14474 OGDH 0.000194889275992655281160 1.3032912 0.3498251 0.06953254 0.06953254 7 7 0
1768 ENSG00000268844 0.000197390549831760232519 1.9793946 0.5317618 0.05172414 0.05172414 1 1 0
7129 ZNF304 0.000228376293438168156448 1.9157186 0.5198180 0.03078272 0.03078272 2 2 0
6309 MRI1 0.000253049384171016161771 1.8811752 0.5141012 0.02547580 0.02547580 4 4 0
7210 ACAP3 0.000302869423169560743076 1.1385046 0.3151279 0.07570491 0.07570491 5 5 0
12419 KIAA1109 0.000315524229182963754630 1.5873223 0.4406524 0.03558464 0.03558464 19 19 0
4647 ATP2C2 0.000315617699817664996292 1.1454037 0.3179794 0.08658198 0.08658198 19 19 0
9686 KREMEN1 0.000320169949832403081917 2.2510153 0.6255585 0.01691503 0.01691503 7 7 0
1799 PRMT8 0.000321607141685174727306 2.3789902 0.6613369 0.02583449 0.02583449 3 3 0
2103 BCDIN3D 0.000355552591566583072857 1.3655719 0.3824006 0.06073695 0.06073695 3 3 0
14939 TAS2R60 0.000362685106858874357211 2.3406188 0.6563994 0.01858108 0.01858108 1 1 0
949 ABTB2 0.000383803694537056662526 1.3745016 0.3870770 0.06083512 0.06083512 6 6 0
794 TRIM3 0.000429704007948382665609 2.3123523 0.6567065 0.01894891 0.01894891 4 4 0
15137 ENTPD4 0.000441509267988472264930 1.2170605 0.3463519 0.04095219 0.04095219 10 10 0
10429 EDAR 0.000448892920076970067863 1.5830456 0.4510700 0.02197675 0.02197675 4 4 0
7781 ACOT11 0.000503425798686950938705 0.7018158 0.2017334 0.11438036 0.11438036 12 12 0
5956 CTDP1 0.000517015944726954227578 1.0401717 0.2996074 0.05947742 0.05947742 10 10 0
213 RHOBTB1 0.000535126076548453040796 2.0145107 0.5818034 0.02121967 0.02121967 4 4 0
10948 KLHL30 0.000562384633953766409700 2.0860473 0.6048030 0.02474246 0.02474246 8 8 0
2501 RAD9B 0.000571678983766462224352 1.8387324 0.5337849 0.02888874 0.02888874 4 4 0
16222 ABCA2 0.000577529196555780135330 1.4510557 0.4215791 0.05815759 0.05815759 12 12 0
14711 ZAN 0.000596752484487304423084 0.5176847 0.1507928 0.25183902 0.25183902 39 39 0
> gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
13351 NPM1 0.000000000000000005198732 1.04613378 0.1209554 0.179661017 0.179661017 8 8 0
10030 DNMT3A 0.000000000000199583042110 2.35219094 0.3200663 0.067910908 0.067910908 24 24 0
3944 IDH2 0.000000013605859243950595 2.31978798 0.4085373 0.047400835 0.047400835 5 5 0
9443 RUNX1 0.000032710565942251596070 1.77040060 0.4262199 0.027404805 0.027404805 12 12 0
4887 TP53 0.000116093623281911869239 1.91840825 0.4977408 0.022169522 0.022169522 10 10 0
10776 IDH1 0.000147522836223088713590 1.78634504 0.4706850 0.040673902 0.040673902 4 4 0
12361 TET2 0.000185530298357044874527 0.51035154 0.1365326 0.175589866 0.175589866 45 45 0
8053 NRAS 0.007276758615167416496816 2.66958354 0.9946636 1.840112994 1.840112994 8 8 0
2731 FLT3 0.016007804546955266278285 0.93099866 0.3865090 0.045722486 0.045722486 9 9 0
12162 KIT 0.066285869450683806980429 1.11727855 0.6083786 0.028400244 0.028400244 6 6 0
936 WT1 0.070330937370697080290860 1.59454406 0.8810739 0.010135135 0.010135135 5 5 0
1987 KRAS 0.202970214166854234782988 1.57821767 1.2396312 0.005067568 0.005067568 2 2 0
512 SMC3 0.318927922043224509884851 -1.51072866 1.5157862 0.003442433 0.003442433 2 2 0
15818 HNRNPK 0.422434173461554707262877 0.61053173 0.7610666 0.010135135 0.010135135 6 6 0
1633 MLL 0.547357757609294282019619 0.31896817 0.5300903 0.025337838 0.025337838 13 13 0
9498 U2AF1 0.594362502582130414197081 0.80712018 1.5156478 1.003378378 1.003378378 4 4 0
8547 FAM5C 0.596667840900447998819800 0.80208760 1.5156653 0.003389831 0.003389831 2 2 0
2519 PTPN11 0.635146991466079557930868 0.64281004 1.3547247 0.753384104 0.753384104 4 4 0
15623 JAK2 0.751870165974434678801686 -0.18493752 0.5849226 0.023648649 0.023648649 9 9 0
15447 RAD21 0.787526013436009808543758 0.15766029 0.5849569 0.023677673 0.023677673 5 5 0
14955 EZH2 0.962014617123815418686661 0.04196169 0.8810739 0.010135135 0.010135135 5 5 0
> gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
10190 FANCL 0.009713958 1.630338920 0.6304851 0.021112253 0.021112253 7 7 0
5673 C17orf70 0.116968166 0.787652229 0.5024487 0.030952513 0.030952513 10 10 0
13691 FANCE 0.123163401 -1.059459207 0.6872329 0.016891892 0.016891892 2 2 0
11051 FANCD2 0.139117951 -0.768517923 0.5195906 0.030451214 0.030451214 8 8 0
6498 C19orf40 0.158692635 -1.515847751 1.0754643 0.006825939 0.006825939 1 1 0
916 FANCF 0.451435530 0.544661693 0.7232993 0.015410959 0.015410959 2 2 0
15873 FANCC 0.624420960 -0.277156188 0.5660935 0.025337838 0.025337838 4 4 0
3136 FANCM 0.656014862 0.092626079 0.2079513 0.165998622 0.165998622 23 23 0
4703 FANCA 0.750213021 -0.131985079 0.4145801 0.045684388 0.045684388 18 18 0
15727 FANCG 0.945737438 0.042848200 0.6295600 0.020270270 0.020270270 4 4 0
3930 FANCI 0.948995022 -0.030280911 0.4733696 0.037162162 0.037162162 12 12 0
7237 C1orf86 0.989379441 -0.008377649 0.6293642 0.021303642 0.021303642 2 2 0
>
########################################## BEST
####### full genotypes: pass<- full.qual & bad.coding & maf.filter & !in.common.hit.gene & !on.x.y & !unannotated.hits & not.flat.genotype & !are.repeats &( ok.missing.filt | is.unwound.geno) & hw.controls.ok.filt & !no.genotypes.filt & !are.in.repeats & rare.in.controls.filt & rare.in.group
## from regular genotypes maf 0.01-10
52 Clinical 0.00000000000000000000000000002083576 0.68524895 0.06086059 0.7760321 0.7760321 188 188 0
707 FANC - ACID 0.77103190867222715088047380049829371 0.03695912 0.12699620 0.4411895 0.4411895 93 93 0
> > > >
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
12729 NPM1 0.000000000000000004666874 1.0474066 0.1209304 0.179054054 0.179054054 8 8 0
2711 FLT3 0.000000000000049318060989 2.46599120 0.3273275 0.067681945 0.067681945 9 9 0
9540 DNMT3A 0.000000000000185717732325 2.3545462 0.3199679 0.067717581 0.067717581 24 24 0
3762 IDH2 0.000000013054500616296249 2.3222497 0.4084620 0.047297297 0.047297297 5 5 0
10256 IDH1 0.000000331598514265690453 3.2136150 0.6295600 0.020270270 0.020270270 3 3 0
1660 ST14 0.000000396027727362394940 1.9358463 0.3817592 0.055818388 0.055818388 4 4 0
11779 TET2 0.000019808388425572099386 1.2116587 0.2839577 0.067567568 0.067567568 38 38 0
8986 RUNX1 0.000029515227742021809366 1.7799352 0.4261098 0.027067462 0.027067462 12 12 0
7576 RNPC3 0.000058475485235818226389 1.6931988 0.4213111 0.044067797 0.044067797 4 4 0
7658 NRAS 0.000074914710439908353693 2.5831005 0.6522797 0.015254237 0.015254237 8 8 0
4664 TP53 0.000108295608479553532970 1.9264148 0.4976258 0.021982481 0.021982481 10 10 0
13979 MCM7 0.000111187109458177864145 3.1580161 0.8171260 0.011824324 0.011824324 6 6 0
9377 CYP2D6 0.000114075565379125875849 2.3233540 0.6021360 0.018726133 0.018726133 10 10 0
12256 ATP6AP1L 0.000238452901613662066353 2.6570861 0.7231439 0.015202703 0.015202703 4 4 0
5756 TLE6 0.000258761091211884709361 -1.5154747 0.4148088 0.045872403 0.045872403 4 4 0
12948 GPANK1 0.000264697418450174023780 1.2990350 0.3561338 0.071655059 0.071655059 4 4 0
11831 KIAA1109 0.000308685940444626853371 1.5895272 0.4405686 0.035472973 0.035472973 19 19 0
7740 ANXA9 0.000362147663061860734714 3.1422261 0.8811048 0.010169492 0.010169492 4 4 0
10922 CRYBG3 0.000457354068794540470633 2.1235191 0.6059294 0.021959459 0.021959459 10 10 0
14521 PRKDC 0.000481131631454768124781 0.8484258 0.2430291 0.134599946 0.134599946 22 22 0
949 NR1H3 0.000687113422372042552747 2.4398835 0.7187395 0.011858915 0.011858915 7 7 0
3078 ESR2 0.000743219662802089295686 2.5827199 0.7656772 0.013513514 0.013513514 7 7 0
10236 NRP2 0.000752044732056882016952 2.5803454 0.7657113 0.013536653 0.013536653 8 8 0
8271 CENPF 0.000963201086109111061923 1.5206316 0.4606494 0.035593220 0.035593220 16 16 0
13488 PLEKHG1 0.001022374605895130881411 1.5774870 0.4803121 0.032094595 0.032094595 12 12 0
6595 VRK3 0.001073934611807498456029 1.9688328 0.6020149 0.018581081 0.018581081 7 7 0
2498 ATP6V0A2 0.001079284197161915715910 2.2465641 0.6872329 0.016891892 0.016891892 5 5 0
2590 RNF17 0.001099120584976548842954 1.6422868 0.5031758 0.028716216 0.028716216 11 11 0
201 RHOBTB1 0.001133542127159656255284 3.1363116 0.9635083 0.008445946 0.008445946 3 3 0
8736 L3MBTL1 0.001137640815658285363410 3.1353567 0.9635184 0.008451672 0.008451672 5 5 0
12421 SLC23A1 0.001141781308313696103804 3.1343953 0.9635286 0.008457437 0.008457437 5 5 0
6010 C19orf57 0.001141783739387275311422 3.1343871 0.9635262 0.008457398 0.008457398 4 4 0
12553 NDST1 0.001143436272736687679685 1.8291797 0.5623700 0.021959459 0.021959459 7 7 0
6488 PPP5C 0.001171382463498273291194 3.1275459 0.9635794 0.008497715 0.008497715 3 3 0
10413 COL6A3 0.001203607223687304774232 0.8839309 0.2729847 0.099679379 0.099679379 33 33 0
9387 PACSIN2 0.001366863775572994561119 1.6633733 0.5195511 0.030405405 0.030405405 8 8 0
4257 MMP15 0.001452027290110410352730 -1.4203910 0.4460881 0.042384569 0.042384569 5 5 0
8240 CD34 0.001864112754271283686758 1.1082620 0.3562319 0.059227213 0.059227213 5 5 0
8071 ASTN1 0.002088028437452790157108 1.2603555 0.4095498 0.038988816 0.038988816 17 17 0
12433 SRA1 0.002125094123910511306752 2.3383450 0.7611382 0.010227693 0.010227693 3 3 0
7549 ABCA4 0.002185309251448841645626 0.9064358 0.2958516 0.076271186 0.076271186 29 29 0
1014 OR5M3 0.002283666905670040866982 2.4928257 0.8171528 0.011841620 0.011841620 5 5 0
9635 CDKL4 0.002306314748613079793071 2.4903213 0.8171260 0.011824324 0.011824324 5 5 0
14671 NIPAL2 0.002306314748613079793071 2.4903213 0.8171260 0.011824324 0.011824324 3 3 0
6186 SLC7A9 0.002306314748613086298284 2.4903213 0.8171260 0.011824324 0.011824324 7 7 0
9565 ATRAID 0.002306314748613086298284 2.4903213 0.8171260 0.011824324 0.011824324 5 5 0
4403 CNTNAP4 0.002306314748613087165646 2.4903213 0.8171260 0.011824324 0.011824324 5 5 0
1720 GALNT8 0.002306314748613088900370 2.4903213 0.8171260 0.011824324 0.011824324 7 7 0
6702 TARM1 0.002306314748613088900370 2.4903213 0.8171260 0.011824324 0.011824324 4 4 0
6311 SAMD4B 0.002333289395956713706964 2.4875738 0.8171619 0.011847464 0.011847464 4 4 0
9294 TMPRSS6 0.002339903226242971787802 2.4868960 0.8171679 0.011853033 0.011853033 7 7 0
> gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
12729 NPM1 0.000000000000000004666874 1.04740664 0.1209304 0.179054054 0.179054054 8 8 0
9540 DNMT3A 0.000000000000185717732325 2.35454622 0.3199679 0.067717581 0.067717581 24 24 0
3762 IDH2 0.000000013054500616296249 2.32224967 0.4084620 0.047297297 0.047297297 5 5 0
10256 IDH1 0.000000331598514265690453 3.21361502 0.6295600 0.020270270 0.020270270 3 3 0
11779 TET2 0.000019808388425572099386 1.21165871 0.2839577 0.067567568 0.067567568 38 38 0
8986 RUNX1 0.000029515227742021809366 1.77993519 0.4261098 0.027067462 0.027067462 12 12 0
7658 NRAS 0.000074914710439908353693 2.58310051 0.6522797 0.015254237 0.015254237 8 8 0
4664 TP53 0.000108295608479553532970 1.92641482 0.4976258 0.021982481 0.021982481 10 10 0
2606 FLT3 0.007139709251128845308998 1.51291969 0.5623700 0.021959459 0.021959459 7 7 0
2397 PTPN11 0.067090467507398429680698 1.96910959 1.0753889 0.006756757 0.006756757 4 4 0
9034 U2AF1 0.067090467507398443558486 1.96910959 1.0753889 0.006756757 0.006756757 4 4 0
11592 KIT 0.070330937370696955390770 1.59454406 0.8810739 0.010135135 0.010135135 5 5 0
886 WT1 0.070330937370697080290860 1.59454406 0.8810739 0.010135135 0.010135135 5 5 0
1891 KRAS 0.202970214166854234782988 1.57821767 1.2396312 0.005067568 0.005067568 2 2 0
492 SMC3 0.325546960976486610128688 -1.49006803 1.5156478 0.003378378 0.003378378 2 2 0
15071 HNRNPK 0.422434173461554707262877 0.61053173 0.7610666 0.010135135 0.010135135 6 6 0
14884 JAK2 0.506363538396016887865869 -0.48053684 0.7231439 0.015202703 0.015202703 8 8 0
8127 FAM5C 0.596667840900447998819800 0.80208760 1.5156653 0.003389831 0.003389831 2 2 0
1551 MLL 0.732588605446840124280072 -0.20569895 0.6020149 0.018581081 0.018581081 11 11 0
14240 EZH2 0.962014617123815418686661 0.04196169 0.8810739 0.010135135 0.010135135 5 5 0
14713 RAD21 0.962014617123815418686661 0.04196169 0.8810739 0.010135135 0.010135135 4 4 0
> gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
9698 FANCL 0.009701225 1.6282316 0.6295600 0.020270270 0.020270270 7 7 0
5409 C17orf70 0.136874385 1.0759111 0.7232861 0.015330052 0.015330052 7 7 0
14987 FANCG 0.325546961 -1.4900680 1.5156478 0.003378378 0.003378378 2 2 0
10524 FANCD2 0.392884700 -0.5871702 0.6872329 0.016891892 0.016891892 7 7 0
2993 FANCM 0.398249982 0.3771907 0.4465111 0.042229730 0.042229730 16 16 0
4490 FANCA 0.404252247 -0.3841600 0.4605953 0.035472973 0.035472973 17 17 0
866 FANCF 0.449841885 0.8126484 1.0753889 0.006756757 0.006756757 1 1 0
13037 FANCE 0.487686222 -1.4850169 2.1398136 0.001689189 0.001689189 1 1 0
15122 FANCC 0.624420960 -0.2771562 0.5660935 0.025337838 0.025337838 4 4 0
8451 C1orf86 0.703563876 -0.2497855 0.6564424 0.018644068 0.018644068 1 1 0
3749 FANCI 0.784054703 -0.1462422 0.5336548 0.028716216 0.028716216 11 11 0
>
####### full genotypes: pass<- full.qual & bad.coding & maf.filter & !in.common.hit.gene & !on.x.y & !unannotated.hits & not.flat.genotype & !are.repeats &( ok.missing.filt | is.unwound.geno) & hw.controls.ok.filt & !no.genotypes.filt & !are.in.repeats & rare.in.controls.filt & rare.in.group
## from regular genotypes maf 0.001-2
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
371 Clinical 0.00000000000000000000000000000000004925784 0.9007790 0.07294287 0.50365483 0.50365483 147 147 0
1248 FANC - ACID 0.43818185292591255164396102372847963124514 0.2559758 0.33017759 0.06589567 0.06589567 36 36 0
10597 NPM1 0.000000000000000004666874 1.047407 0.1209304 0.179054054 0.179054054 8 8 0
7891 DNMT3A 0.000000000000552230211637 2.367183 0.3282382 0.062650013 0.062650013 21 21 0
3110 IDH2 0.000000000023130646061980 3.306763 0.4946721 0.033783784 0.033783784 2 2 0
8489 IDH1 0.000000331598514265690453 3.213615 0.6295600 0.020270270 0.020270270 3 3 0
5516 ZNF880 0.000001625172471612332086 1.456413 0.3037235 0.085255050 0.085255050 5 5 0
9793 TET2 0.000011451729355498889545 1.314288 0.2995343 0.059121622 0.059121622 34 34 0
9836 KIAA1109 0.000012012177824843884501 2.461700 0.5623700 0.021959459 0.021959459 12 12 0
7428 RUNX1 0.000029515227742021809366 1.779935 0.4261098 0.027067462 0.027067462 12 12 0
6332 NRAS 0.000074914710439908353693 2.583101 0.6522797 0.015254237 0.015254237 8 8 0
8621 COL6A3 0.000075092562780992657442 1.914991 0.4836391 0.035484464 0.035484464 19 19 0
3936 MYO15A 0.000084047277731249094360 1.627472 0.4138458 0.033841318 0.033841318 19 19 0
7184 CEP250 0.000111187109458178230064 3.158016 0.8171260 0.011824324 0.011824324 7 7 0
8638 ANKMY1 0.000111187109458178230064 3.158016 0.8171260 0.011824324 0.011824324 7 7 0
9827 USP53 0.000111187109458178230064 3.158016 0.8171260 0.011824324 0.011824324 7 7 0
6654 ASTN1 0.000133839078173962654824 1.959539 0.5130642 0.023728814 0.023728814 13 13 0
11632 MCM7 0.000354372117981734016494 3.147126 0.8810739 0.010135135 0.010135135 5 5 0
1867 C12orf50 0.000354372117981734287544 3.147126 0.8810739 0.010135135 0.010135135 5 5 0
5126 SLC7A9 0.000354372117981735100696 3.147126 0.8810739 0.010135135 0.010135135 6 6 0
9582 ARAP2 0.000354372117981735100696 3.147126 0.8810739 0.010135135 0.010135135 6 6 0
12254 FER1L6 0.000362685106858877338767 2.340619 0.6563994 0.018581081 0.018581081 11 11 0
8583 SPATA3 0.000366044773861016385728 3.139892 0.8811439 0.010187021 0.010187021 5 5 0
1014 PCNXL3 0.000367371992882505613368 3.139089 0.8811535 0.010192866 0.010192866 6 6 0
1646 MLL2 0.000742263683766061733646 1.769209 0.5244475 0.018581081 0.018581081 11 11 0
403 SORCS3 0.000743219662802089187266 2.582720 0.7656772 0.013513514 0.013513514 6 6 0
404 SORCS1 0.000743219662802090921989 2.582720 0.7656772 0.013513514 0.013513514 8 8 0
1825 HELB 0.000743219662802090921989 2.582720 0.7656772 0.013513514 0.013513514 5 5 0
3863 TP53 0.000748277501256778697532 1.768094 0.5244624 0.018604103 0.018604103 9 9 0
6796 CD34 0.000782871117631803257325 1.245368 0.3707798 0.057532298 0.057532298 4 4 0
11994 ENTPD4 0.000919245595398182320859 2.161655 0.6522515 0.015202703 0.015202703 5 5 0
1282 DSCAML1 0.000928345275044910908484 2.159931 0.6522738 0.015225607 0.015225607 9 9 0
4411 MYO15B 0.001079143986350689348591 1.360112 0.4160590 0.037202362 0.037202362 19 19 0
7983 THADA 0.001079284197161923739006 2.246564 0.6872329 0.016891892 0.016891892 10 10 0
4423 EVPL 0.001130514823533310337159 2.237922 0.6873530 0.016995823 0.016995823 10 10 0
67 MLLT10 0.001133542127159656688964 3.136312 0.9635083 0.008445946 0.008445946 5 5 0
328 CRTAC1 0.001133542127159656688964 3.136312 0.9635083 0.008445946 0.008445946 5 5 0
751 PAMR1 0.001133542127159656688964 3.136312 0.9635083 0.008445946 0.008445946 5 5 0
5456 VRK3 0.001133542127159656688964 3.136312 0.9635083 0.008445946 0.008445946 5 5 0
8143 SMYD1 0.001133542127159656688964 3.136312 0.9635083 0.008445946 0.008445946 4 4 0
8528 TTLL4 0.001133542127159656688964 3.136312 0.9635083 0.008445946 0.008445946 5 5 0
9059 CRYBG3 0.001133542127159656688964 3.136312 0.9635083 0.008445946 0.008445946 5 5 0
9808 CFI 0.001133542127159656688964 3.136312 0.9635083 0.008445946 0.008445946 5 5 0
12106 TOX 0.001133542127159656688964 3.136312 0.9635083 0.008445946 0.008445946 5 5 0
10885 UNC5CL 0.001137640815658282978165 3.135357 0.9635184 0.008451672 0.008451672 5 5 0
10323 SLC23A1 0.001141781308313696103804 3.134395 0.9635286 0.008457437 0.008457437 5 5 0
2971 IGDCC4 0.001145964219784313762457 3.133427 0.9635388 0.008463241 0.008463241 5 5 0
8472 NRP2 0.001150190176437126780840 3.132453 0.9635491 0.008469086 0.008469086 5 5 0
6073 RAD54L 0.001154481159148085288660 3.131391 0.9635364 0.008474576 0.008474576 5 5 0
6279 CELSR2 0.001154595264666994063746 2.233908 0.6873848 0.017042346 0.017042346 10 10 0
6732 PKP1 0.001158678519010327128552 3.130429 0.9635465 0.008480341 0.008480341 5 5 0
4758 TMPRSS9 0.001162843638144572117055 3.129539 0.9635751 0.008486381 0.008486381 5 5 0
> gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
10597 NPM1 0.000000000000000004666874 1.0474066 0.1209304 0.179054054 0.179054054 8 8 0
7891 DNMT3A 0.000000000000552230211637 2.3671831 0.3282382 0.062650013 0.062650013 21 21 0
3110 IDH2 0.000000000023130646061980 3.3067633 0.4946721 0.033783784 0.033783784 2 2 0
8489 IDH1 0.000000331598514265690453 3.2136150 0.6295600 0.020270270 0.020270270 3 3 0
9793 TET2 0.000011451729355498889545 1.3142885 0.2995343 0.059121622 0.059121622 34 34 0
7428 RUNX1 0.000029515227742021809366 1.7799352 0.4261098 0.027067462 0.027067462 12 12 0
6332 NRAS 0.000074914710439908353693 2.5831005 0.6522797 0.015254237 0.015254237 8 8 0
3863 TP53 0.000748277501256778697532 1.7680941 0.5244624 0.018604103 0.018604103 9 9 0
2164 FLT3 0.002032749790892977338752 2.3481990 0.7610666 0.010135135 0.010135135 5 5 0
736 WT1 0.003655478822618108813297 3.1255708 1.0753889 0.006756757 0.006756757 4 4 0
9639 KIT 0.011978874707657898024404 3.1149033 1.2396312 0.005067568 0.005067568 3 3 0
12229 RAD21 0.040543082123378960945903 3.1043084 1.5156478 0.003378378 0.003378378 2 2 0
1980 PTPN11 0.067090467507398429680698 1.9691096 1.0753889 0.006756757 0.006756757 4 4 0
7470 U2AF1 0.067090467507398443558486 1.9691096 1.0753889 0.006756757 0.006756757 4 4 0
1573 KRAS 0.202970214166854234782988 1.5782177 1.2396312 0.005067568 0.005067568 2 2 0
12530 HNRNPK 0.422434173461554707262877 0.6105317 0.7610666 0.010135135 0.010135135 6 6 0
11839 EZH2 0.449841885210977288078738 0.8126484 1.0753889 0.006756757 0.006756757 4 4 0
408 SMC3 0.487686221714785539393944 -1.4850169 2.1398136 0.001689189 0.001689189 1 1 0
6702 FAM5C 0.596667840900447998819800 0.8020876 1.5156653 0.003389831 0.003389831 2 2 0
12373 JAK2 0.715431886461324495485314 0.3512669 0.9635083 0.008445946 0.008445946 5 5 0
1292 MLL 0.781879288747886591615099 0.1890884 0.6829481 0.013513514 0.013513514 8 8 0
> gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
8032 FANCL 0.06709047 1.96910959 1.0753889 0.006756757 0.006756757 3 3 0
3098 FANCI 0.28188840 0.82393519 0.7656772 0.013513514 0.013513514 8 8 0
12462 FANCG 0.32554696 -1.49006803 1.5156478 0.003378378 0.003378378 2 2 0
2482 FANCM 0.43434551 0.56533746 0.7231439 0.015202703 0.015202703 8 8 0
10852 FANCE 0.48768622 -1.48501695 2.1398136 0.001689189 0.001689189 1 1 0
12879 FANCC 0.48768622 -1.48501695 2.1398136 0.001689189 0.001689189 1 1 0
3719 FANCA 0.64891679 -0.34858796 0.7656772 0.013513514 0.013513514 7 7 0
8725 FANCD2 0.97327304 0.04153204 1.2396312 0.005067568 0.005067568 3 3 0
4472 C17orf70 0.97655520 0.03643125 1.2396673 0.005084863 0.005084863 3 3 0
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
1233 ATM 0.01923871 1.32515302 0.5660935 0.025337838 0.025337838 15 15 0
3941 TOP3A 0.02192947 2.20796334 0.9635083 0.008445946 0.008445946 5 5 0
8032 FANCL 0.06709047 1.96910959 1.0753889 0.006756757 0.006756757 3 3 0
2791 FAN1 0.15753607 1.15493162 0.8171260 0.011824324 0.011824324 7 7 0
3098 FANCI 0.28188840 0.82393519 0.7656772 0.013513514 0.013513514 8 8 0
3119 BLM 0.32554696 -1.49006803 1.5156478 0.003378378 0.003378378 2 2 0
12462 FANCG 0.32554696 -1.49006803 1.5156478 0.003378378 0.003378378 2 2 0
2482 FANCM 0.43434551 0.56533746 0.7231439 0.015202703 0.015202703 8 8 0
10852 FANCE 0.48768622 -1.48501695 2.1398136 0.001689189 0.001689189 1 1 0
12879 FANCC 0.48768622 -1.48501695 2.1398136 0.001689189 0.001689189 1 1 0
1189 MRE11A 0.54921451 -0.57708133 0.9635083 0.008445946 0.008445946 5 5 0
3064 FAH 0.59551360 0.80460856 1.5156609 0.003384104 0.003384104 2 2 0
3719 FANCA 0.64891679 -0.34858796 0.7656772 0.013513514 0.013513514 7 7 0
8725 FANCD2 0.97327304 0.04153204 1.2396312 0.005067568 0.005067568 3 3 0
4472 C17orf70 0.97655520 0.03643125 1.2396673 0.005084863 0.005084863 3 3 0
> sum(meta.results.burden[,"gene"] %in% fanc.cluster)
Error in meta.results.burden[, "gene"] %in% fanc.cluster :
error in evaluating the argument 'table' in selecting a method for function '%in%': Error: object 'fanc.cluster' not found
> meta.results.burden[meta.results.burden[,"gene"] %in% clusters.wanted,]
gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
269 Clinical 0.00000000000000000000000000000000004925784 0.9007790 0.07294287 0.5036548 0.5036548 147 147 0
425 FANC - ACID 0.01778860926305391235158204210620169760659 0.5807776 0.24505496 0.1267122 0.1267122 72 72 0
### real genos 0.01 no filter tests
## pass<- full.qual & bad.coding & maf.filter & !in.common.hit.gene & !on.x.y & !unannotated.hits & not.flat.genotype & !are.repeats & ( ok.missing | is.unwound.geno) & hw.controls.ok & !no.genotypes & !are.in.repeats & rare.in.controls & rare.in.group
> > > > gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
13158 NPM1 0.000000000000000004666874 1.0474066 0.1209304 0.179054054 0.179054054 8 8 0
9885 DNMT3A 0.000000000000626740759891 2.2837670 0.3174308 0.069471967 0.069471967 25 25 0
3871 IDH2 0.000000013054500616296249 2.3222497 0.4084620 0.047297297 0.047297297 5 5 0
10623 IDH1 0.000000331598514265690453 3.2136150 0.6295600 0.020270270 0.020270270 3 3 0
1711 ST14 0.000000398444428453536362 1.8902375 0.3728500 0.059389817 0.059389817 5 5 0
9307 RUNX1 0.000011520978735816074680 1.8369797 0.4187840 0.028779791 0.028779791 13 13 0
12182 TET2 0.000016213366527191842669 1.2089637 0.2804029 0.070957437 0.070957437 40 40 0
7867 RNPC3 0.000017560009932915971728 1.6933256 0.3943606 0.047457627 0.047457627 6 6 0
9081 UBE2C 0.000041498170035095508933 1.9445022 0.4743873 0.039145907 0.039145907 1 1 0
7951 NRAS 0.000074914710439908353693 2.5831005 0.6522797 0.015254237 0.015254237 8 8 0
4813 TP53 0.000108295608479553532970 1.9264148 0.4976258 0.021982481 0.021982481 10 10 0
14448 MCM7 0.000111187109458177864145 3.1580161 0.8171260 0.011824324 0.011824324 6 6 0
15951 NOTCH1 0.000142414483804481471372 1.0743503 0.2824312 0.092344953 0.092344953 17 17 0
12676 ATP6AP1L 0.000238452901613662066353 2.6570861 0.7231439 0.015202703 0.015202703 4 4 0
12845 SLC23A1 0.000356907628275577887972 3.1455490 0.8810928 0.010146626 0.010146626 6 6 0
8034 ANXA9 0.000362147663061860734714 3.1422261 0.8811048 0.010169492 0.010169492 4 4 0
516 TCF7L2 0.000617325306994607092732 1.3700787 0.4001531 0.049185215 0.049185215 4 4 0
12235 KIAA1109 0.000664814726269654884296 1.4730227 0.4327719 0.037162162 0.037162162 20 20 0
984 NR1H3 0.000687113422372042552747 2.4398835 0.7187395 0.011858915 0.011858915 7 7 0
12857 SRA1 0.000719218043990806207497 2.4310968 0.7188029 0.011951831 0.011951831 4 4 0
3162 ESR2 0.000743219662802089295686 2.5827199 0.7656772 0.013513514 0.013513514 7 7 0
1772 GALNT8 0.000743219662802090921989 2.5827199 0.7656772 0.013513514 0.013513514 8 8 0
10603 NRP2 0.000752044732056882016952 2.5803454 0.7657113 0.013536653 0.013536653 8 8 0
8573 CENPF 0.000963201086109111061923 1.5206316 0.4606494 0.035593220 0.035593220 16 16 0
4099 SEC14L5 0.000988804896881187618540 1.6903010 0.5131933 0.023864397 0.023864397 11 11 0
14243 CAMK2B 0.000999240785028693507056 2.3601313 0.7172037 0.013022127 0.013022127 4 4 0
13941 PLEKHG1 0.001022374605895130881411 1.5774870 0.4803121 0.032094595 0.032094595 12 12 0
7180 NPHP4 0.001059862888942325089003 1.2595164 0.3846869 0.059876832 0.059876832 24 24 0
6846 VRK3 0.001073934611807498456029 1.9688328 0.6020149 0.018581081 0.018581081 7 7 0
2572 ATP6V0A2 0.001079284197161915715910 2.2465641 0.6872329 0.016891892 0.016891892 5 5 0
205 RHOBTB1 0.001133542127159656255284 3.1363116 0.9635083 0.008445946 0.008445946 3 3 0
9042 L3MBTL1 0.001137640815658285363410 3.1353567 0.9635184 0.008451672 0.008451672 5 5 0
12978 NDST1 0.001143436272736687679685 1.8291797 0.5623700 0.021959459 0.021959459 7 7 0
9771 MLC1 0.001145907487241422167620 3.1334404 0.9635387 0.008463163 0.008463163 5 5 0
6729 PPP5C 0.001171382463498273291194 3.1275459 0.9635794 0.008497715 0.008497715 3 3 0
11808 FGFRL1 0.001196865201944575943585 1.5303958 0.4723989 0.038284767 0.038284767 3 3 0
9716 CYP2D6 0.001222570793866454858662 1.8189709 0.5625286 0.022162559 0.022162559 11 11 0
12299 TMEM154 0.001234952964252972055018 3.1137951 0.9638198 0.008622428 0.008622428 4 4 0
272 DNAJC9 0.001325403525163939633461 2.2092866 0.6881609 0.017976919 0.017976919 2 2 0
9726 PACSIN2 0.001366863775572994561119 1.6633733 0.5195511 0.030405405 0.030405405 8 8 0
11302 CRYBG3 0.001406269910620657143455 1.8678690 0.5849226 0.023648649 0.023648649 11 11 0
2272 HELB 0.001419332295452793235926 1.7519940 0.5490956 0.027027027 0.027027027 10 10 0
10784 COL6A3 0.001723090950845458397406 0.8514486 0.2716617 0.101368568 0.101368568 34 34 0
14895 ENTPD4 0.002006604281164279298538 1.1211255 0.3629115 0.033789510 0.033789510 9 9 0
4753 RPAIN 0.002059928109132580942991 2.3454000 0.7611344 0.010199924 0.010199924 6 6 0
8369 ASTN1 0.002088028437452790157108 1.2603555 0.4095498 0.038988816 0.038988816 17 17 0
1049 OR5M3 0.002283666905670040866982 2.4928257 0.8171528 0.011841620 0.011841620 5 5 0
9983 CDKL4 0.002306314748613079793071 2.4903213 0.8171260 0.011824324 0.011824324 5 5 0
15158 NIPAL2 0.002306314748613079793071 2.4903213 0.8171260 0.011824324 0.011824324 3 3 0
6415 SLC7A9 0.002306314748613086298284 2.4903213 0.8171260 0.011824324 0.011824324 7 7 0
> gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
13158 NPM1 0.000000000000000004666874 1.04740664 0.1209304 0.179054054 0.179054054 8 8 0
9885 DNMT3A 0.000000000000626740759891 2.28376703 0.3174308 0.069471967 0.069471967 25 25 0
3871 IDH2 0.000000013054500616296249 2.32224967 0.4084620 0.047297297 0.047297297 5 5 0
10623 IDH1 0.000000331598514265690453 3.21361502 0.6295600 0.020270270 0.020270270 3 3 0
9307 RUNX1 0.000011520978735816074680 1.83697974 0.4187840 0.028779791 0.028779791 13 13 0
12182 TET2 0.000016213366527191842669 1.20896367 0.2804029 0.070957437 0.070957437 40 40 0
7951 NRAS 0.000074914710439908353693 2.58310051 0.6522797 0.015254237 0.015254237 8 8 0
4813 TP53 0.000108295608479553532970 1.92641482 0.4976258 0.021982481 0.021982481 10 10 0
2678 FLT3 0.007139709251128845308998 1.51291969 0.5623700 0.021959459 0.021959459 7 7 0
6423 CEBPA 0.055405577783932999369476 1.83600014 0.9584055 0.005287955 0.005287955 3 3 0
2468 PTPN11 0.067090467507398429680698 1.96910959 1.0753889 0.006756757 0.006756757 4 4 0
9357 U2AF1 0.067090467507398443558486 1.96910959 1.0753889 0.006756757 0.006756757 4 4 0
11989 KIT 0.070330937370696955390770 1.59454406 0.8810739 0.010135135 0.010135135 5 5 0
914 WT1 0.070330937370697080290860 1.59454406 0.8810739 0.010135135 0.010135135 5 5 0
1946 KRAS 0.202970214166854234782988 1.57821767 1.2396312 0.005067568 0.005067568 2 2 0
505 SMC3 0.325546960976486610128688 -1.49006803 1.5156478 0.003378378 0.003378378 2 2 0
15570 HNRNPK 0.422434173461554707262877 0.61053173 0.7610666 0.010135135 0.010135135 6 6 0
15377 JAK2 0.506363538396016887865869 -0.48053684 0.7231439 0.015202703 0.015202703 8 8 0
8426 FAM5C 0.596667840900447998819800 0.80208760 1.5156653 0.003389831 0.003389831 2 2 0
1599 MLL 0.732588605446840124280072 -0.20569895 0.6020149 0.018581081 0.018581081 11 11 0
14714 EZH2 0.962014617123815418686661 0.04196169 0.8810739 0.010135135 0.010135135 5 5 0
15201 RAD21 0.962014617123815418686661 0.04196169 0.8810739 0.010135135 0.010135135 4 4 0
> gene p beta se cmafTotal cmafUsed nsnpsTotal nsnpsUsed nmiss
10050 FANCL 0.009701225 1.6282316 0.6295600 0.020270270 0.020270270 7 7 0
5589 C17orf70 0.053156405 1.2173415 0.6295569 0.020461234 0.020461234 10 10 0
5611 STRA13 0.305273296 -0.9827900 0.9586413 0.005669328 0.005669328 2 2 0
15483 FANCG 0.325546961 -1.4900680 1.5156478 0.003378378 0.003378378 2 2 0
10894 FANCD2 0.392884700 -0.5871702 0.6872329 0.016891892 0.016891892 7 7 0
3075 FANCM 0.398249982 0.3771907 0.4465111 0.042229730 0.042229730 16 16 0
4627 FANCA 0.404252247 -0.3841600 0.4605953 0.035472973 0.035472973 17 17 0
894 FANCF 0.449841885 0.8126484 1.0753889 0.006756757 0.006756757 1 1 0
6417 C19orf40 0.487686222 -1.4850169 2.1398136 0.001689189 0.001689189 1 1 0
13473 FANCE 0.487686222 -1.4850169 2.1398136 0.001689189 0.001689189 1 1 0
15622 FANCC 0.624420960 -0.2771562 0.5660935 0.025337838 0.025337838 4 4 0
7155 C1orf86 0.673439727 0.2314917 0.5492980 0.027254067 0.027254067 5 5 0
3858 FANCI 0.784054703 -0.1462422 0.5336548 0.028716216 0.028716216 11 11 0
## the.an<-
## target<-"1"
## use.samples<-the.samples[pheno[,"SampleProject"]==target]
## the.an<-gsub(".GT",".AD",use.samples)
## use.samples
## snpinfo[1:5,]
## genos<-a.indel[loci,use.samples]
## a.indel[loci,the.an]
## colnames(genos)[genos[2,]!="0/0"]
## snpinfo[1:5,]
## ## Warning message:
## ## In check_format_skat(Z, SNPInfo, nullmodel, aggregateBy, snpNames) :
## ## Some missing genotypes - will be imputed to average dose
## exclude.samples<-gsub(".GT$","",nim.samples)
## dim(genotypes)
## dim(pheno)
## exclude<-rownames(genotypes) %in% exclude.samples
## sum(exclude)
## genotypes.ex<-genotypes[!exclude,]
## pheno.ex<-pheno[!exclude,]
## dim(genotypes.ex)
## dim(pheno.ex)
## cohort.seq.ex <- skatCohort(genotypes.ex, formula, SNPInfo = snpinfo, data=pheno.ex,verbose=FALSE)
## meta.results.skat.ex<-skatMeta(cohort.seq,SNPInfo = snpinfo)
## meta.results.burden.ex<-burdenMeta(cohort.seq.ex,wts=1,mafRange = c(0,1),SNPInfo = snpinfo)
## #meta.results.skatO.ex<-skatOMeta(cohort.seq.ex,burden.wts =1,SNPInfo = snpinfo)
## meta.results.skat.ex[meta.results.skat.ex[,"gene"] %in% test,]
## meta.results.burden.ex[meta.results.burden.ex[,"gene"] %in% test,]
###############$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
###############$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
###############$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
###############$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
## "51.GT" "52.GT" "53.GT" "54.GT" "55.GT" "56.GT" "57.GT" "58.GT" "59.GT" "60.GT" "61.GT" "62.GT" "63.GT" "64.GT" "65.GT" "66.GT" "67.GT" "68.GT" "69.GT" "70.GT" "71.GT" "72.GT" "73.GT" "74.GT" "75.GT" "76.GT" "77.GT" "78.GT" "79.GT" "80.GT" "81.GT" "82.GT" "83.GT" "84.GT" "85.GT" "86.GT" "87.GT" "88.GT" "89.GT" "91.GT" "92.GT" "93.GT" "94.GT" "96.GT" "97.GT" "98.GT" "99.GT" "100.GT
http://www.hindawi.com/journals/ijcb/2012/161837/
http://www.genome.jp/dbget-bin/get_linkdb?-t+8+path:hsa00020
KEGG GENES
hsa:1431 CS; citrate synthase (EC:2.3.3.1); K01647 citrate synthase [EC:2.3.3.1]
hsa:1737 DLAT, DLTA, PDC-E2, PDCE2; dihydrolipoamide S-acetyltransferase (EC:2.3.1.12); K00627 pyruvate dehyd
hsa:1738 DLD, DLDD, DLDH, E3, GCSL, LAD, PHE3; dihydrolipoamide dehydrogenase (EC:1.8.1.4); K00382 dihydrolip
hsa:1743 DLST, DLTS; dihydrolipoamide S-succinyltransferase (E2 component of 2-oxo-glutarate complex) (EC:2.3
hsa:2271 FH, HLRCC, LRCC, MCL, MCUL1; fumarate hydratase (EC:4.2.1.2); K01679 fumarate hydratase, class II [E
hsa:3417 IDH1, HEL-216, HEL-S-26, IDCD, IDH, IDP, IDPC, PICD; isocitrate dehydrogenase 1 (NADP+), soluble (EC
hsa:3418 IDH2, D2HGA2, ICD-M, IDH, IDHM, IDP, IDPM, mNADP-IDH; isocitrate dehydrogenase 2 (NADP+), mitochondr
hsa:3419 IDH3A; isocitrate dehydrogenase 3 (NAD+) alpha (EC:1.1.1.41); K00030 isocitrate dehydrogenase (NAD+)
hsa:3420 IDH3B, H-IDHB, RP46; isocitrate dehydrogenase 3 (NAD+) beta (EC:1.1.1.41); K00030 isocitrate dehydro
hsa:3421 IDH3G, H-IDHG; isocitrate dehydrogenase 3 (NAD+) gamma (EC:1.1.1.41); K00030 isocitrate dehydrogenas
hsa:4190 MDH1, HEL-S-32, MDH-s, MDHA, MGC:1375, MOR2; malate dehydrogenase 1, NAD (soluble) (EC:1.1.1.37 1.1.
hsa:4191 MDH2, M-MDH, MDH, MGC:3559, MOR1; malate dehydrogenase 2, NAD (mitochondrial) (EC:1.1.1.37); K00026
hsa:47 ACLY, ACL, ATPCL, CLATP; ATP citrate lyase (EC:2.3.3.8); K01648 ATP citrate (pro-S)-lyase [EC:2.3.3.
hsa:48 ACO1, ACONS, HEL60, IREB1, IREBP, IREBP1, IRP1; aconitase 1, soluble (EC:4.2.1.3); K01681 aconitate
hsa:4967 OGDH, AKGDH, E1k, OGDC; oxoglutarate (alpha-ketoglutarate) dehydrogenase (lipoamide) (EC:1.2.4.2); K
hsa:50 ACO2, ACONM, ICRD; aconitase 2, mitochondrial (EC:4.2.1.3); K01681 aconitate hydratase [EC:4.2.1.3]
hsa:5091 PC, PCB; pyruvate carboxylase (EC:6.4.1.1); K01958 pyruvate carboxylase [EC:6.4.1.1]
hsa:5105 PCK1, PEPCK-C, PEPCK1, PEPCKC; phosphoenolpyruvate carboxykinase 1 (soluble) (EC:4.1.1.32); K01596 p
hsa:5106 PCK2, PEPCK, PEPCK-M, PEPCK2; phosphoenolpyruvate carboxykinase 2 (mitochondrial) (EC:4.1.1.32); K01
hsa:5160 PDHA1, PDHA, PDHCE1A, PHE1A; pyruvate dehydrogenase (lipoamide) alpha 1 (EC:1.2.4.1); K00161 pyruvat
hsa:5161 PDHA2, PDHAL; pyruvate dehydrogenase (lipoamide) alpha 2 (EC:1.2.4.1); K00161 pyruvate dehydrogenase
hsa:5162 PDHB, PDHBD, PDHE1-B, PHE1B; pyruvate dehydrogenase (lipoamide) beta (EC:1.2.4.1); K00162 pyruvate d
hsa:55753 OGDHL; oxoglutarate dehydrogenase-like (EC:1.2.4.-); K00164 2-oxoglutarate dehydrogenase E1 componen
hsa:6389 SDHA, CMD1GG, FP, PGL5, SDH1, SDH2, SDHF; succinate dehydrogenase complex, subunit A, flavoprotein (
hsa:6390 SDHB, CWS2, IP, PGL4, SDH, SDH1, SDH2, SDHIP; succinate dehydrogenase complex, subunit B, iron sulfu
hsa:6391 SDHC, CYB560, CYBL, PGL3, QPS1, SDH3; succinate dehydrogenase complex, subunit C, integral membrane
hsa:6392 SDHD, CBT1, CII-4, CWS3, PGL, PGL1, QPs3, SDH4, cybS; succinate dehydrogenase complex, subunit D, in
hsa:8801 SUCLG2, GBETA; succinate-CoA ligase, GDP-forming, beta subunit (EC:6.2.1.4); K01900 succinyl-CoA syn
hsa:8802 SUCLG1, GALPHA, MTDPS9, SUCLA1; succinate-CoA ligase, alpha subunit (EC:6.2.1.4 6.2.1.5); K01899 suc
hsa:8803 SUCLA2, A-BETA, MTDPS5, SCS-betaA; succinate-CoA ligase, ADP-forming, beta subunit (EC:6.2.1.5); K01
http://www.genome.jp/dbget-bin/get_linkdb?-t+genes+path:hsa04666
Fc gamma R-mediated
ID Definition
----------------------------------------------------------------------------------------------------
hsa:10000 AKT3, MPPH, PKB-GAMMA, PKBG, PRKBG, RAC-PK-gamma, RAC-gamma, STK-2; v-akt murine thymoma viral oncog
hsa:100137049 PLA2G4B, HsT16992, cPLA2-beta; phospholipase A2, group IVB (cytosolic) (EC:3.1.1.4); K16342 cytosoli
hsa:10092 ARPC5, ARC16, dJ127C7.3, p16-Arc; actin related protein 2/3 complex, subunit 5, 16kDa; K05754 actin
hsa:10093 ARPC4, ARC20, P20-ARC; actin related protein 2/3 complex, subunit 4, 20kDa; K05755 actin related pro
hsa:10094 ARPC3, ARC21, p21-Arc; actin related protein 2/3 complex, subunit 3, 21kDa; K05756 actin related pro
hsa:10095 ARPC1B, ARC41, p40-ARC, p41-ARC; actin related protein 2/3 complex, subunit 1B, 41kDa; K05757 actin
hsa:10109 ARPC2, ARC34, PNAS-139, p34-Arc; actin related protein 2/3 complex, subunit 2, 34kDa; K05758 actin r
hsa:10163 WASF2, IMD2, SCAR2, WASF4, WAVE2, dJ393P12.2; WAS protein family, member 2; K05748 WAS protein famil
hsa:10451 VAV3; vav 3 guanine nucleotide exchange factor; K05730 guanine nucleotide exchange factor VAV
hsa:10552 ARPC1A, Arc40, HEL-68, SOP2Hs, SOP2L; actin related protein 2/3 complex, subunit 1A, 41kDa; K05757 a
hsa:1072 CFL1, CFL, HEL-S-15; cofilin 1 (non-muscle); K05765 cofilin
hsa:1073 CFL2, NEM7; cofilin 2 (muscle); K05765 cofilin
hsa:10810 WASF3, Brush-1, SCAR3, WAVE3; WAS protein family, member 3; K06083 WAS protein family, member 3
hsa:123745 PLA2G4E; phospholipase A2, group IVE (EC:3.1.1.4); K16342 cytosolic phospholipase A2 [EC:3.1.1.4]
hsa:1398 CRK, CRKII, p38; v-crk avian sarcoma virus CT10 oncogene homolog; K04438 proto-oncogene C-crk
hsa:1399 CRKL; v-crk avian sarcoma virus CT10 oncogene homolog-like; K04438 proto-oncogene C-crk
hsa:1785 DNM2, CMT2M, CMTDI1, CMTDIB, DI-CMTB, DYN2, DYNII, LCCS5; dynamin 2 (EC:3.6.5.5); K01528 dynamin GTP
hsa:1794 DOCK2; dedicator of cytokinesis 2; K12367 dedicator of cytokinesis protein 2
hsa:207 AKT1, AKT, CWS6, PKB, PKB-ALPHA, PRKBA, RAC, RAC-ALPHA; v-akt murine thymoma viral oncogene homolog
hsa:208 AKT2, HIHGHH, PKBB, PKBBETA, PRKBB, RAC-BETA; v-akt murine thymoma viral oncogene homolog 2 (EC:2.7.
hsa:2209 FCGR1A, CD64, CD64A, FCRI, IGFR1; Fc fragment of IgG, high affinity Ia, receptor (CD64); K06498 high
hsa:2212 FCGR2A, CD32, CD32A, CDw32, FCG2, FCGR2, FCGR2A1, FcGR, IGFR2; Fc fragment of IgG, low affinity IIa,
hsa:2213 FCGR2B, CD32, CD32B, FCG2, FCGR2, IGFR2; Fc fragment of IgG, low affinity IIb, receptor (CD32); K125
hsa:2214 FCGR3A, CD16, CD16A, FCG3, FCGR3, FCGRIII, FCR-10, FCRIII, FCRIIIA, IGFR3, IMD20; Fc fragment of IgG
hsa:23396 PIP5K1C, LCCS3, PIP5K-GAMMA, PIP5Kgamma, PIPKIg_v4; phosphatidylinositol-4-phosphate 5-kinase, type
hsa:23533 PIK3R5, F730038I15Rik, FOAP-2, P101-PI3K, p101; phosphoinositide-3-kinase, regulatory subunit 5; K02
hsa:255189 PLA2G4F, PLA2G4FZ; phospholipase A2, group IVF (EC:3.1.1.4); K16342 cytosolic phospholipase A2 [EC:3
hsa:27040 LAT, LAT1, pp36; linker for activation of T cells; K07362 linker for activation of T cells
hsa:273 AMPH, AMPH1; amphiphysin; K12562 amphiphysin
hsa:283748 PLA2G4D, cPLA2delta; phospholipase A2, group IVD (cytosolic) (EC:3.1.1.4); K16342 cytosolic phosphol
hsa:2934 GSN, ADF, AGEL; gelsolin; K05768 gelsolin
hsa:3055 HCK, JTK9, p59Hck, p61Hck; hemopoietic cell kinase (EC:2.7.10.2); K08893 hemopoietic cell kinase [EC
hsa:3635 INPP5D, SHIP, SHIP-1, SHIP1, SIP-145, hp51CN, p150Ship; inositol polyphosphate-5-phosphatase, 145kDa
hsa:3636 INPPL1, OPSMD, SHIP2; inositol polyphosphate phosphatase-like 1 (EC:3.1.3.86); K15909 phosphatidylin
hsa:382 ARF6; ADP-ribosylation factor 6; K07941 ADP-ribosylation factor 6
hsa:3984 LIMK1, LIMK, LIMK-1; LIM domain kinase 1 (EC:2.7.11.1); K05743 LIM domain kinase 1 [EC:2.7.11.1]
hsa:3985 LIMK2; LIM domain kinase 2 (EC:2.7.11.1); K05744 LIM domain kinase 2 [EC:2.7.11.1]
hsa:4067 LYN, JTK8, p53Lyn, p56Lyn; v-yes-1 Yamaguchi sarcoma viral related oncogene homolog (EC:2.7.10.2); K
hsa:4082 MARCKS, 80K-L, MACS, PKCSL, PRKCSL; myristoylated alanine-rich protein kinase C substrate; K12561 my
hsa:4651 MYO10; myosin X; K12559 myosin X
hsa:5058 PAK1, PAKalpha; p21 protein (Cdc42/Rac)-activated kinase 1 (EC:2.7.11.1); K04409 p21-activated kinas
hsa:50807 ASAP1, AMAP1, CENTB4, DDEF1, PAG2, PAP, ZG14P; ArfGAP with SH3 domain, ankyrin repeat and PH domain
hsa:5290 PIK3CA, CLOVE, CWS5, MCAP, MCM, MCMTC, PI3K, p110-alpha; phosphatidylinositol-4,5-bisphosphate 3-kin
hsa:5291 PIK3CB, P110BETA, PI3K, PI3KBETA, PIK3C1; phosphatidylinositol-4,5-bisphosphate 3-kinase, catalytic
hsa:5293 PIK3CD, APDS, IMD14, P110DELTA, PI3K, p110D; phosphatidylinositol-4,5-bisphosphate 3-kinase, catalyt
hsa:5294 PIK3CG, PI3CG, PI3K, PI3Kgamma, PIK3, p110gamma, p120-PI3K; phosphatidylinositol-4,5-bisphosphate 3-
hsa:5295 PIK3R1, AGM7, GRB1, p85, p85-ALPHA; phosphoinositide-3-kinase, regulatory subunit 1 (alpha); K02649
hsa:5296 PIK3R2, MPPH, P85B, p85, p85-BETA; phosphoinositide-3-kinase, regulatory subunit 2 (beta); K02649 ph
hsa:5321 PLA2G4A, PLA2G4, cPLA2-alpha; phospholipase A2, group IVA (cytosolic, calcium-dependent) (EC:3.1.1.5
hsa:5335 PLCG1, NCKAP3, PLC-II, PLC1, PLC148, PLCgamma1; phospholipase C, gamma 1 (EC:3.1.4.11); K01116 phosp
hsa:5336 PLCG2, APLAID, FCAS3, PLC-IV, PLC-gamma-2; phospholipase C, gamma 2 (phosphatidylinositol-specific)
hsa:5337 PLD1; phospholipase D1, phosphatidylcholine-specific (EC:3.1.4.4); K01115 phospholipase D1/2 [EC:3.1
hsa:5338 PLD2; phospholipase D2 (EC:3.1.4.4); K01115 phospholipase D1/2 [EC:3.1.4.4]
hsa:55616 ASAP3, ACAP4, CENTB6, DDEFL1, UPLC1; ArfGAP with SH3 domain, ankyrin repeat and PH domain 3; K12488
hsa:5578 PRKCA, AAG6, PKC-alpha, PKCA, PRKACA; protein kinase C, alpha (EC:2.7.11.13); K02677 classical prote
hsa:5579 PRKCB, PKC-beta, PKCB, PRKCB1, PRKCB2; protein kinase C, beta (EC:2.7.11.13); K02677 classical prote
hsa:5580 PRKCD, CVID9, MAY1, PKCD, nPKC-delta; protein kinase C, delta (EC:2.7.10.2 2.7.11.13); K06068 novel
hsa:5581 PRKCE, PKCE, nPKC-epsilon; protein kinase C, epsilon (EC:2.7.11.13); K18050 novel protein kinase C e
hsa:5582 PRKCG, PKC-gamma, PKCC, PKCG, SCA14; protein kinase C, gamma (EC:2.7.11.13); K02677 classical protei
hsa:5594 MAPK1, ERK, ERK-2, ERK2, ERT1, MAPK2, P42MAPK, PRKM1, PRKM2, p38, p40, p41, p41mapk, p42-MAPK; mitog
hsa:5595 MAPK3, ERK-1, ERK1, ERT2, HS44KDAP, HUMKER1A, P44ERK1, P44MAPK, PRKM3, p44-ERK1, p44-MAPK; mitogen-a
hsa:5604 MAP2K1, CFC3, MAPKK1, MEK1, MKK1, PRKMK1; mitogen-activated protein kinase kinase 1 (EC:2.7.12.2); K
hsa:56848 SPHK2, SK_2, SK-2, SPK_2, SPK-2; sphingosine kinase 2 (EC:2.7.1.91); K04718 sphingosine kinase [EC:2
hsa:5788 PTPRC, B220, CD45, CD45R, GP180, L-CA, LCA, LY5, T200; protein tyrosine phosphatase, receptor type,
hsa:5879 RAC1, Rac-1, TC-25, p21-Rac1; ras-related C3 botulinum toxin substrate 1 (rho family, small GTP bind
hsa:5880 RAC2, EN-7, Gx, HSPC022, p21-Rac2; ras-related C3 botulinum toxin substrate 2 (rho family, small GTP
hsa:5894 RAF1, CRAF, NS5, Raf-1, c-Raf; v-raf-1 murine leukemia viral oncogene homolog 1 (EC:2.7.11.1); K0436
hsa:6198 RPS6KB1, PS6K, S6K, S6K-beta-1, S6K1, STK14A, p70_S6KA, p70(S6K)-alpha, p70-S6K, p70-alpha; ribosoma
hsa:6199 RPS6KB2, KLS, P70-beta, P70-beta-1, P70-beta-2, S6K-beta2, S6K2, SRK, STK14B, p70(S6K)-beta, p70S6Kb
hsa:65108 MARCKSL1, F52, MACMARCKS, MLP, MLP1, MRP; MARCKS-like 1; K13536 MARCKS-related protein
hsa:653361 NCF1, NCF1A, NOXO2, SH3PXD1A, p47phox; neutrophil cytosolic factor 1; K08011 neutrophil cytosolic fa
hsa:6850 SYK, p72-Syk; spleen tyrosine kinase (EC:2.7.10.2); K05855 spleen tyrosine kinase [EC:2.7.10.2]
hsa:7408 VASP; vasodilator-stimulated phosphoprotein; K06274 vasodilator-stimulated phosphoprotein
hsa:7409 VAV1, VAV; vav 1 guanine nucleotide exchange factor; K05730 guanine nucleotide exchange factor VAV
hsa:7410 VAV2, VAV-2; vav 2 guanine nucleotide exchange factor; K05730 guanine nucleotide exchange factor VAV
hsa:7454 WAS, IMD2, SCNX, THC, THC1, WASP; Wiskott-Aldrich syndrome; K05747 Wiskott-Aldrich syndrome protein
hsa:81873 ARPC5L, ARC16-2; actin related protein 2/3 complex, subunit 5-like; K05754 actin related protein 2/3
hsa:8394 PIP5K1A; phosphatidylinositol-4-phosphate 5-kinase, type I, alpha (EC:2.7.1.68); K00889 1-phosphatid
hsa:8395 PIP5K1B, MSS4, STM7; phosphatidylinositol-4-phosphate 5-kinase, type I, beta (EC:2.7.1.68); K00889 1
hsa:8398 PLA2G6, CaI-PLA2, GVI, INAD1, IPLA2-VIA, NBIA2, NBIA2A, NBIA2B, PARK14, PLA2, PNPLA9, iPLA2, iPLA2be
hsa:8503 PIK3R3, p55, p55-GAMMA; phosphoinositide-3-kinase, regulatory subunit 3 (gamma); K02649 phosphoinosi
hsa:85477 SCIN; scinderin; K05768 gelsolin
hsa:8611 PPAP2A, LLP1a, LPP1, PAP-2a, PAP2; phosphatidic acid phosphatase type 2A (EC:3.1.3.4); K01080 phosph
hsa:8612 PPAP2C, LPP2, PAP-2c, PAP2-g; phosphatidic acid phosphatase type 2C (EC:3.1.3.4); K01080 phosphatida
hsa:8613 PPAP2B, Dri42, LPP3, PAP2B, VCIP; phosphatidic acid phosphatase type 2B (EC:3.1.3.4); K01080 phospha
hsa:8853 ASAP2, AMAP2, CENTB3, DDEF2, PAG3, PAP, Pap-alpha, SHAG1; ArfGAP with SH3 domain, ankyrin repeat and
hsa:8877 SPHK1, SPHK; sphingosine kinase 1 (EC:2.7.1.91); K04718 sphingosine kinase [EC:2.7.1.91]
hsa:8936 WASF1, SCAR1, WAVE, WAVE1; WAS protein family, member 1; K05753 WAS protein family, member 1
hsa:8976 WASL, N-WASP, NWASP; Wiskott-Aldrich syndrome-like; K05747 Wiskott-Aldrich syndrome protein
hsa:9846 GAB2; GRB2-associated binding protein 2; K08091 growth factor receptor bound protein 2-associated pr
hsa:998 CDC42, CDC42Hs, G25K; cell division cycle 42; K04393 cell division control protein 42
http://pid.nci.nih.gov/MoleculePage?molid=503236
http://pid.nci.nih.gov/search/advanced_landing.shtml?what=graphic&svg=&jpg=true&xml=&biopax=&complex_uses=on&family_uses=on°ree=1&molecule=&pathway=FANCONI¯o_process=&source_id=5&evidence_code=NIL&evidence_code=IAE&evidence_code=IC&evidence_code=IDA&evidence_code=IFC&evidence_code=IGI&evidence_code=IMP&evidence_code=IOS&evidence_code=IPI&evidence_code=RCA&evidence_code=RGE&evidence_code=TAS&output-format=graphic&Submit=Go
colnames(clusters)[1:13]
for
combine<-rbind(clusters[,1],)
Extra fancoi tsoff
FANCD2 FANCD2/FANCI/BRCA2/PALB2,FANCG/BRCA2/FANCD2/XRCC3,FANCD2/FANCI,FANCD2/FANCI/FAN1,FANCD2/FANCI/H2AX,FANCD2/FANCI,FANCD2/FANCI
FANCJ FANCJ/BLM/TOP3A/BLAP75
FANCM FA core complex/FANCM/FAAP24/MHF1/MHF2/BLM/TOP3A/BLAP75,FANCM/FAAP24/MHF1/MHF2
H2AX FANCD2/FANCI/H2AX
PALB2 FANCD2/FANCI/BRCA2/PALB2
FANCG FA core complex/FANCM/FAAP24/MHF1/MHF2/BLM/TOP3A/BLAP75,FANCA/FANCB/FANCC/FANCF/FANCG/FANCL/UBE2T/HES1/FAAP100,FANCG/BRCA2/FANCD2/XRCC3,FA core complex,FA core complex
FAAP24 FA core complex/FANCM/FAAP24/MHF1/MHF2/BLM/TOP3A/BLAP75,FANCM/FAAP24/MHF1/MHF2
beta TrCP1-2
BRCA2 FANCD2/FANCI/BRCA2/PALB2,FANCG/BRCA2/FANCD2/XRCC3
FAN1 FANCD2/FANCI/FAN1
BRCA1
XRCC3 FANCG/BRCA2/FANCD2/XRCC3
FANCE FA core complex/FANCM/FAAP24/MHF1/MHF2/BLM/TOP3A/BLAP75,FA core complex,FA core complex
CHK1
BLM
BLAP75
TOP3A
BRCA2
RMI1
BRCA1
FAH
ATM
TOP3A
MRE11A
BLM
XRCC3
FAN1
Molecules Uses in complexes (duplicate names indicate differences in component properties)
FANCD2 FANCD2/FANCI/BRCA2/PALB2,FANCG/BRCA2/FANCD2/XRCC3,FANCD2/FANCI,FANCD2/FANCI/FAN1,FANCD2/FANCI/H2AX,FANCD2/FANCI,FANCD2/FANCI
FANCJ FANCJ/BLM/TOP3A/BLAP75
FANCM FA core complex/FANCM/FAAP24/MHF1/MHF2/BLM/TOP3A/BLAP75,FANCM/FAAP24/MHF1/MHF2
H2AX FANCD2/FANCI/H2AX
PALB2 FANCD2/FANCI/BRCA2/PALB2
FANCG FA core complex/FANCM/FAAP24/MHF1/MHF2/BLM/TOP3A/BLAP75,FANCA/FANCB/FANCC/FANCF/FANCG/FANCL/UBE2T/HES1/FAAP100,FANCG/BRCA2/FANCD2/XRCC3,FA core complex,FA core complex
FAAP24 FA core complex/FANCM/FAAP24/MHF1/MHF2/BLM/TOP3A/BLAP75,FANCM/FAAP24/MHF1/MHF2
beta TrCP1-2
BRCA2 FANCD2/FANCI/BRCA2/PALB2,FANCG/BRCA2/FANCD2/XRCC3
FAN1 FANCD2/FANCI/FAN1
BRCA1
XRCC3 FANCG/BRCA2/FANCD2/XRCC3
FANCE FA core complex/FANCM/FAAP24/MHF1/MHF2/BLM/TOP3A/BLAP75,FA core complex,FA core complex
CHK1
ubinquitin mediated proteolysis
K02207 UBE2R, UBC3, CDC34; ubiquitin-conjugating enzyme E2 R [EC:6.3.2.19]
K03094 SKP1, CBF3D; S-phase kinase-associated protein 1
K03175 TRAF6; TNF receptor-associated factor 6
K03178 UBE1, UBA1; ubiquitin-activating enzyme E1 [EC:6.3.2.19]
K03347 CUL1, CDC53; cullin 1
K03348 APC1; anaphase-promoting complex subunit 1
K03349 APC2; anaphase-promoting complex subunit 2
K03350 APC3, CDC27; anaphase-promoting complex subunit 3
K03351 APC4; anaphase-promoting complex subunit 4
K03352 APC5; anaphase-promoting complex subunit 5
K03353 APC6, CDC16; anaphase-promoting complex subunit 6
K03354 APC7; anaphase-promoting complex subunit 7
K03355 APC8, CDC23; anaphase-promoting complex subunit 8
K03356 APC9; anaphase-promoting complex subunit 9
K03357 APC10, DOC1; anaphase-promoting complex subunit 10
K03358 APC11; anaphase-promoting complex subunit 11
K03359 APC12, CDC26; anaphase-promoting complex subunit 12
K03360 GRR1; F-box and leucine-rich repeat protein GRR1
K03361 CDC4; F-box and WD-40 domain protein CDC4
K03362 FBXW1_11, BTRC, beta-TRCP; F-box and WD-40 domain protein 1/11
K03363 CDC20; cell division cycle 20, cofactor of APC complex
K03364 CDH1; cell division cycle 20-like protein 1, cofactor of APC complex
K03868 RBX1, ROC1; RING-box protein 1
K03869 CUL3; cullin 3
K03870 CUL2; cullin 2
K03871 VHL; von Hippel-Lindau disease tumor supressor
K03872 TCEB1; transcription elongation factor B, polypeptide 1
K03873 TCEB2; transcription elongation factor B, polypeptide 2
K03875 SKP2, FBXL1; F-box and leucine-rich repeat protein 1 (S-phase kinase-associated protein 2)
K03876 MEL26; BTB domain ubiquitin protein ligase cofactor
K04416 MAP3K1, MEKK1; mitogen-activated protein kinase kinase kinase 1 [EC:2.7.11.25]
K04506 SIAH1; E3 ubiquitin-protein ligase SIAH1 [EC:6.3.2.19]
K04552 UBE2L3, UBCH7; ubiquitin-conjugating enzyme E2 L3 [EC:6.3.2.19]
K04553 UBE2L6, UBCH8; ubiquitin-conjugating enzyme E2 L6 [EC:6.3.2.19]
K04554 UBE2J2, NCUBE2, UBC6; ubiquitin-conjugating enzyme E2 J2 [EC:6.3.2.19]
K04555 UBE2G2, UBC7; ubiquitin-conjugating enzyme E2 G2 [EC:6.3.2.19]
K04556 PARK2; parkin [EC:6.3.2.19]
K04649 HIP2, UBC1; ubiquitin-conjugating enzyme (huntingtin interacting protein 2) [EC:6.3.2.19]
K04678 SMURF; E3 ubiquitin ligase SMURF1/2 [EC:6.3.2.19]
K04694 SOCS1, JAB; suppressor of cytokine signaling 1
K04696 SOCS3, CIS3; suppressor of cytokine signaling 3
K04706 PIAS1; E3 SUMO-protein ligase PIAS1 [EC:6.3.2.-]
K04707 CBL; E3 ubiquitin-protein ligase CBL [EC:6.3.2.19]
K04725 XIAP, BIRC4; E3 ubiquitin-protein ligase XIAP
K05630 WWP2, AIP2; atrophin-1 interacting protein 2 (WW domain containing E3 ubiquitin protein ligase 2) [E
K05632 AIP4, ITCH; atrophin-1 interacting protein 4 [EC:6.3.2.19]
K05633 AIP5, WWP1; atrophin-1 interacting protein 5 (WW domain containing E3 ubiquitin protein ligase 1) [E
K06643 MDM2; E3 ubiquitin-protein ligase Mdm2 [EC:6.3.2.19]
K06688 UBE2C, UBC11; ubiquitin-conjugating enzyme E2 C [EC:6.3.2.19]
K06689 UBE2D_E, UBC4, UBC5; ubiquitin-conjugating enzyme E2 D/E [EC:6.3.2.19]
K07868 RHOBTB1_2; Rho-related BTB domain-containing protein 1/2
K08285 TRIM18, MID1; midline 1 [EC:6.3.2.19]
K09561 STUB1, CHIP; STIP1 homology and U-box containing protein 1 [EC:6.3.2.19]
K10054 PML, TRIM19; probable transcription factor PML
K10099 FBXO2, NFB42; F-box protein 2
K10140 DDB2; DNA damage-binding protein 2
K10143 RFWD2, COP1; E3 ubiquitin-protein ligase RFWD2 [EC:6.3.2.19]
K10144 RCHY1, PIRH2; RING finger and CHY zinc finger domain-containing protein 1 [EC:6.3.2.19]
K10259 MET30; F-box and WD-40 domain protein MET30
K10260 FBXW7, SEL10; F-box and WD-40 domain protein 7
K10264 FBXW8; F-box and WD-40 domain protein 8
K10291 FBXO4; F-box protein 4
K10447 KLHL9_13; kelch-like protein 9/13
K10456 KLHL19, KEAP1, INRF2; kelch-like protein 19
K10570 ERCC8, CKN1, CSA; DNA excision repair protein ERCC-8
K10571 DET1; de-etiolated-1
K10573 UBE2A, UBC2, RAD6A; ubiquitin-conjugating enzyme E2 A [EC:6.3.2.19]
K10574 UBE2B, RAD6B; ubiquitin-conjugating enzyme E2 B [EC:6.3.2.19]
K10575 UBE2G1, UBC7; ubiquitin-conjugating enzyme E2 G1 [EC:6.3.2.19]
K10576 UBE2H, UBC8; ubiquitin-conjugating enzyme E2 H [EC:6.3.2.19]
K10577 UBE2I, UBC9; ubiquitin-conjugating enzyme E2 I [EC:6.3.2.19]
K10578 UBE2J1, NCUBE1, UBC6; ubiquitin-conjugating enzyme E2 J1 [EC:6.3.2.19]
K10579 UBE2M, UBC12; ubiquitin-conjugating enzyme E2 M [EC:6.3.2.19]
K10580 UBE2N, BLU, UBC13; ubiquitin-conjugating enzyme E2 N [EC:6.3.2.19]
K10581 UBE2O; ubiquitin-conjugating enzyme E2 O [EC:6.3.2.19]
K10582 UBE2Q; ubiquitin-conjugating enzyme E2 Q [EC:6.3.2.19]
K10583 UBE2S, E2EPF; ubiquitin-conjugating enzyme E2 S [EC:6.3.2.19]
K10584 UBE2U; ubiquitin-conjugating enzyme E2 U [EC:6.3.2.19]
K10585 UBE2Z; ubiquitin-conjugating enzyme E2 Z [EC:6.3.2.19]
K10586 BIRC6, BRUCE; baculoviral IAP repeat-containing protein 6 (apollon) [EC:6.3.2.19]
K10587 UBE3A, E6AP; ubiquitin-protein ligase E3 A [EC:6.3.2.19]
K10588 UBE3B; ubiquitin-protein ligase E3 B [EC:6.3.2.19]
K10589 UBE3C; ubiquitin-protein ligase E3 C [EC:6.3.2.19]
K10590 TRIP12; E3 ubiquitin-protein ligase TRIP12 [EC:6.3.2.19]
K10591 NEDD4, RSP5; E3 ubiquitin-protein ligase NEDD4 [EC:6.3.2.19]
K10592 HUWE1, MULE, ARF-BP1; E3 ubiquitin-protein ligase HUWE1 [EC:6.3.2.19]
K10593 EDD1, UBR5; E3 ubiquitin-protein ligase EDD1 [EC:6.3.2.19]
K10594 HERC1; E3 ubiquitin-protein ligase HERC1 [EC:6.3.2.19]
K10595 HERC2; E3 ubiquitin-protein ligase HERC2 [EC:6.3.2.19]
K10596 UBE4A; ubiquitin conjugation factor E4 A [EC:6.3.2.19]
K10597 UBE4B, UFD2; ubiquitin conjugation factor E4 B [EC:6.3.2.19]
K10598 PPIL2, CYC4, CHP60; peptidyl-prolyl cis-trans isomerase-like 2 [EC:5.2.1.8]
K10599 PRPF19, PRP19; pre-mRNA-processing factor 19 [EC:6.3.2.19]
K10600 UBOX5, UIP5; U-box domain-containing protein 5
K10601 SYVN1, HRD1; E3 ubiquitin-protein ligase synoviolin [EC:6.3.2.19]
K10602 NHLRC1; E3 ubiquitin-protein ligase NHLRC1 [EC:6.3.2.19]
K10603 AIRE; autoimmune regulator
K10604 MGRN1; E3 ubiquitin-protein ligase MGRN1 [EC:6.3.2.19]
K10605 BRCA1; breast cancer type 1 susceptibility protein
K10606 FANCL, PHF9; E3 ubiquitin-protein ligase FANCL [EC:6.3.2.19]
K10607 TRIM32, HT2A; tripartite motif-containing protein 32 [EC:6.3.2.19]
K10608 TRIM37, MUL; tripartite motif-containing protein 37 [EC:6.3.2.19]
K10609 CUL4; cullin 4
K10610 DDB1; DNA damage-binding protein 1
K10611 RBX2, ROC2, RNF7; RING-box protein 2
K10612 CUL5; cullin 5
K10613 CUL7; cullin 7
K10614 HERC3; E3 ubiquitin-protein ligase HERC3 [EC:6.3.2.19]
K10615 HERC4; E3 ubiquitin-protein ligase HERC4 [EC:6.3.2.19]
K10684 UBLE1A, SAE1; ubiquitin-like 1-activating enzyme E1 A [EC:6.3.2.19]
K10685 UBLE1B, SAE2, UBA2; ubiquitin-like 1-activating enzyme E1 B [EC:6.3.2.19]
K10686 UBE1C, UBA3; ubiquitin-activating enzyme E1 C [EC:6.3.2.19]
K10687 UBE2F; ubiquitin-conjugating enzyme E2 F [EC:6.3.2.19]
K10688 UBE2W, UBC16; ubiquitin-conjugating enzyme E2 W [EC:6.3.2.19]
K10698 UBE1L, UBA7; ubiquitin-activating enzyme E1-like [EC:6.3.2.19]
K10699 UBE1L2, UBA6; ubiquitin-activating enzyme E1-like protein 2 [EC:6.3.2.19]
K12416 SWM1, APC13; anaphase-promoting complex subunit SWM1
K12456 APC13; anaphase-promoting complex subunit 13
K13305 NEDD4L; E3 ubiquitin-protein ligase NEDD4-like [EC:6.3.2.19]
K16060 BIRC2_3; baculoviral IAP repeat-containing protein 2/3
K16061 BIRC7_8; baculoviral IAP repeat-containing protein 7/8
K16063 PIAS2; E3 SUMO-protein ligase PIAS2 [EC:6.3.2.-]
K16064 PIAS3; E3 SUMO-protein ligase PIAS3 [EC:6.3.2.-]
K16065 PIAS4; E3 SUMO-protein ligase PIAS4 [EC:6.3.2.-]
myotubularin related protein 15 (1017 aa)
print("Saving results")
out.file<-paste("AOGC_sequence_", the.chr,"_",target.pheno,a.label,".rds", collapse="T", sep="")
out.file.res.skat<-paste("AOGC_sequence_skat", the.chr,"_",target.pheno,a.label,".rds", collapse="T", sep="")
out.file.res.burden<-paste("AOGC_sequence_burden", the.chr,"_",target.pheno,a.label,".rds", collapse="T", sep="")
out.file.res.skatO<-paste("AOGC_sequence_skatO", the.chr,"_",target.pheno,a.label,".rds", collapse="T", sep="")
out.ann.file<-paste("AOGC_sequence_", the.chr,"_",target.pheno,a.label,"_ANNOTATION.rds", collapse="T", sep="")
out.ann.extra.file<-paste("AOGC_sequence_", the.chr,"_",target.pheno,a.label,"_ANNOTATION_EXTRA.rds", collapse="T", sep="")
saveRDS(cohort.seq, file=out.file)
saveRDS(meta.results.skat, file=out.file.res.skat)
saveRDS(meta.results.burden, file=out.file.res.burden)
saveRDS(meta.results.skatO, file=out.file.res.skatO)
saveRDS(ann,file=out.ann.file)
saveRDS(summary.geno.extra.out,file=out.ann.extra.file)
##################################ADD extras here
#setwd(code.dir)
# source("MERGE_alt_alleles_AOGC.r")
#source("MERGE_alt_alleles_AOGC_as_controls.r")
###############################
if(dont.build.summary){indels<-{}}
## setwd(code.dir)
## source("ucsc.table.names.r")
} # ipheno
if(!dont.build.summary){
if(ipheno==1){
if(is.null(dim(indels))){
indels<-a.indel
the.col<-colnames(a.indel)
}else{
if(sum(!(the.col %in% colnames(a.indel)))>0 | sum(!(colnames(a.indel) %in% the.col))>0){
print("error colnames don't match")
print(the.col[!(the.col %in% colnames(a.indel))])
print(colnames(a.indel)[!(colnames(a.indel) %in% the.col)])
next
} # columns don't match
if(sum(!(colnames(a.indel) %in% the.col))>0 ){
a.indel<-a.indel[,the.col] } # reduce a.indels }
indels<-rbind(indels,a.indel[,the.col])
} ## is null so not first
} # make for one phenotype.
}
} # ichr
setwd(annotate.dir)
write.table(indels,file="RunCovar_AML_pass_noControl_Coding_filtered.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
} # ifam loop
q()
getwd()
#####################
######################
pheno.types<-c("BMD_EFF_STD_HIP")
names(pheno.types)<-c("HIP")
## files<-dir(getwd())
## files<-files[files %in% "*burden*"]
## files
## burden.files<-grepl("^AOGC_sequence_skatO",files)
## the.extension<-paste(fam[ifam],project.extension,"$",sep="")
## project.files<-files[grepl(the.extension ,files)]
## print(sort(paste("Doing: ",project.files,sep=""))) # project.files<-project.files[1:22]
#indels<-{}
the.col<-{}
project.file
setwd(analysis.dir)
ipheno<-1
for(ipheno in 1:length(pheno.types)){
target.pheno<-names(pheno.types)[ipheno]
iregion<-1 # iregion<-1
region.labels<-c("PILOT.GENE.regions","GENE.regions")
region.labels<-c("FILTERED.PILOT.GENE.regions","FILTERED.GENE.regions")
region.labels<-c("AML.regions")
region.labels<-c("Coding.noControl.AML.regions")
for(iregion in 1:length(region.labels)){
a.label<-region.labels[iregion]
all.chr<-paste("chr",c(1:22,"X","Y"),sep="")
all.chr<-paste("chr",c(1:22,"X"),sep="")
#all.res
# ichr<-10
all.res<-{}
for(ichr in 1:length(all.chr)){
the.chr<-all.chr[ichr]
print(the.chr)
res.skat<-readRDS( paste("AOGC_sequence_skat", the.chr,"_",target.pheno,a.label,".rds", collapse="T", sep=""))
res.burden<-readRDS(paste("AOGC_sequence_burden", the.chr,"_",target.pheno,a.label,".rds", collapse="T", sep=""))
res.skatO<-readRDS(paste("AOGC_sequence_skatO", the.chr,"_",target.pheno,a.label,".rds", collapse="T", sep=""))
ann.chr<-readRDS(paste("AOGC_sequence_", the.chr,"_",target.pheno,a.label,"_ANNOTATION.rds", collapse="T", sep=""))
ann.extra.chr<-readRDS(paste("AOGC_sequence_", the.chr,"_",target.pheno,a.label,"_ANNOTATION_EXTRA.rds", collapse="T", sep=""))
res.skatO[1:5,]
res.burden[1:5,]
res.skat[1:5,]
ann.chr[1:5,]
order.by<-order(res.skatO[,"p"],decreasing=FALSE)
res.skatO<-res.skatO[order.by,]
posns<-match(res.skatO[,"gene"],res.burden[,"gene"])
res.burden<-res.burden[posns,]
posns<-match(res.skatO[,"gene"],res.skat[,"gene"])
res.skat<-res.skat[posns,]
res.skatO[1:5,]
res.burden[1:5,]
res.skat[1:5,]
colnames(res.skatO)<-paste(colnames(res.skatO),"skatO",sep=".")
colnames(res.burden)<-paste(colnames(res.burden),"burden",sep=".")
colnames(res.skat)<-paste(colnames(res.skat),"skat",sep=".")
a.res<-cbind(the.chr,res.skatO,res.burden,res.skat)
a.res[1:5,]
if(is.null(dim(all.res))){
all.res<-a.res
ann<-ann.chr
ann.extra<-ann.extra.chr
}else{
all.res<-rbind(all.res,a.res)
ann<-rbind(ann,ann.chr)
ann.extra<-rbind(ann.extra,ann.extra.chr)
}
} ## loop over chr
order.by<-order(all.res[,"p.skatO"],decreasing=FALSE)
all.res<-all.res[order.by,]
all.res[1:45,]
wanted<-c("gene.skatO","p.skatO","p.burden","p.skat","nmiss.skatO","nsnps.skatO","errflag.skatO","beta.burden","se.burden","cmafTotal.burden","cmafUsed.burden","nsnpsTotal.burden","nsnpsUsed.burden","Qmeta.skat","cmaf.skat")
all.res<-all.res[,wanted]
if(iregion==1){
posns<-match(all.res[,"gene.skatO"],ann[,"Gene.Names"])
ann[posns,][1:5,c("Gene.Names","description","gene_biotype","OMIM (Gene::Status::OMIM::description::disease)" )]
all.res<-cbind(ann[posns,c("Gene.Names","description","gene_biotype","OMIM (Gene::Status::OMIM::description::disease)" )],all.res)
all.res.gene<-all.res
}
if(iregion==2){
posns<-match(all.res[,"gene.skatO"],gene.list[,"GENE_NAME"])
gene.list[posns,][1:5,]
gene.list[1:5,]
all.res<-cbind(all.res,gene.list[posns,])
all.res.pilot<-all.res
}
write.table(all.res.gene,file="AML.coding.noControl.filtered.Gene.based.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
write.table(all.res.pilot,file="filtered.Gene.EXON.based.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
all.res[1:15,1:5]
all.res.pilot[1:5,]
all.res.gene[1:6,]
region.cols.wanted<-c("gene.skatO","p.skatO","p.burden","p.skat","nmiss.skatO","nsnps.skatO","nsnpsTotal.burden","errflag.skatO")
posns<-match(all.res.gene[,"Gene.Names"],all.res.pilot[,"hgnc_symbol"])
all.res.pilot.subset<-all.res.pilot[posns,region.cols.wanted]
colnames(all.res.pilot.subset)<-paste(colnames(all.res.pilot.subset),"EXONS",sep=".")
cbind(all.res.gene,all.res.pilot[posns,region.cols.wanted])[1:5,]
cbind(all.res.gene[,1:8],all.res.pilot.subset,all.res.gene[,9:dim(all.res.gene)[2]])[1:5,]
gene.region.compare<-cbind(all.res.gene[,1:8],all.res.pilot.subset,all.res.gene[,9:dim(all.res.gene)[2]])
write.table(gene.region.compare,file="filtered.Compare.Gene.EXON.based.txt",col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
lim = 28
qq <- parse.pvals.qq(gene.region.compare[,"p.skatO"],lim=lim)
qq <- parse.pvals.qq(all.res.gene[,"p.skatO"],lim=lim)
ylab = expression(Observed~~-log[10](italic(p)))
xlab = expression(Expected~~-log[10](italic(p)))
plot(qq$e,
qq$o,
xlim=c(0,lim),ylim=c(0,lim),
pch=20,col='deepskyblue',
xlab=xlab,ylab=ylab, main="Gene Based")
abline(coef=c(0,1), col=1, lwd=2)
savePlot("FilteredGeneBased.jpg",type="jpeg")
#write.table(geno.all,file=paste(project.name,fam[ifam],"geno.all.txt",sep="."),col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
#geno.all<-read.delim(paste(project.name,fam[ifam],"geno.all.txt",sep="."),header=T,skip=0,fill=TRUE,sep="\t",stringsAsFactors=FALSE,check.names=FALSE)
parse.pvals.qq <- function(pvector,lim=7) {
o = -log10(sort(pvector,decreasing=F))
e = -log10( 1:length(o)/length(o) )
o[o>lim] <- lim
out <- list(o=o,e=e)
return(out)
}
## test<-c("MYCBP2","SLC25A24","TMCO3","C13orf35")
## test<-c("MYCBP2","SLC25A24","TMCO3","C13orf35")
## test<-c("chr22:41252435-41252687:ST13")
rownames(all.res.gene)<-1:dim(all.res.gene)[1]
test<-c("FLT3","NPM1","DNMT3A","IDH2","IDH1","TET2","RUNX1","TP53","NRAS","CEBPA","WT1","PTPN11","KIT","U2AF1","KRAS","SMC1A","SMC3","PHF6","STAG2","RAD21","FAM5C","EZH2","NHRNPK")
all.res.gene[all.res.gene[,"gene.skatO"] %in% test,]
meta.results.burden[meta.results.burden[,"gene"] %in% test,]
## loci<-snpinfo[snpinfo[,"gene"] %in% test,"Name"]
## #summary.geno.extra.out[loci,]
## high.missing.out[loci,]
## qual[loci,]
## a.indel[loci,extra]
## snpinfo[1:5,]
## qual[1:5,c("FILTER_PASS", "FILTER_100" )]
library(plyr)
options(stringsAsFactors=FALSE)
setwd("~/Documents/Data/trial_GWAS/run1/assoc_results")
SNP = read.table("LM-cases-assoc.csv", header=T, na.strings = "NA") # skip importing the the #eigenvalue labels
head(SNP)
str(SNP)
#----------my script------------------------------------
# the expected dist of P values is NOT normal but an even distribution from 0-1
# the expected dist of STAT is TDist which seems to == rnorm in this case anyway (see below)
# dof for t tests is n1+n2 -2 and thus for its distribution too
# SNP = subset(file1, TEST=="ADD")
# check normality of data
plot(sort(SNP$P[20000:70000]), cex=0.2, col="grey")
lines(sort(seq(0,1, length.out=50000)), col="red", lwd=2, lty=2)
plot(sort(SNP$STAT[40000:70000]), col="grey")
lines(sort(rt(30000, df=99998)), col="red", lwd=4, lty=2)
lines(sort(rt(30000, df=120)), col="green", lwd=4, lty=2)
lines(sort(rt(30000, df=12)), col="cyan", lwd=4, lty=2)
# --- my script for T STATISTIC
# i think negative t scores are as important as positive ones, they just have mean1- mean2 backwards
dof = (length(SNP$STAT) *2) -2
t = sort(-log10(abs(rt(length(SNP$STAT), df=dof))))
# you can't log a negative number. the distribution is symmetrical around 0 anyway, so abs() or chop it off here
test = sort(-log10(abs(SNP$STAT)))
# test2= qqnorm(SNP$STAT[1:20000], col="forestgreen")
par(bg="white")
jpeg("run2-STAT-myscript.jpg", width=800, height=600)
plot(t, test, main="QQ of run2 (no controls) -log STAT (dof=540k)", col="forestgreen", xlim=c(0,7), ylim=c(0,7), ylab="-log(STAT)", xlab="-log(tDist)")
abline(coef=c(0,1), col=1, lwd=2)
dev.off()
# ------------- my P value QQ
# using a vector to generate expected (adrian's method) appears better than runif
#runif = -log10(sort(runif(length(SNP$P), min=0, max=1))) # other alternative is ppoints
vector = -log10(1:length(SNP$P)/length(SNP$P))
test.p = -log10(sort((SNP$P[1:length(SNP$P)]), decreasing=F))
par(bg="white")
jpeg("QQ-run2-myscript.jpg", width=800, height=600)
#plot(runif, test.p, main="QQ of log p", col="forestgreen", xlim=c(0,6), ylim=c(0,6), ylab="-log(P)", xlab="-log(sequence)")
plot (vector, test.p, main="QQ of run2 no controls",col="blue", type="p", cex=1, pch=20)
abline(coef=c(0,1), col=1, lwd=1)
# legend("bottomright", legend=c("runif","vector","seq","rnorm"), col=c("forestgreen","navy","deeppink","purple"), lty=1, title="gradient", cex=0.8, lwd=2)
dev.off()
# ------------------- broad institute script modified for STAT
observed <- sort(abs(SNP$STAT), decreasing=T)
lobs <- -(log10(observed))
dof = (length(SNP$STAT) *2) -2
lexp = sort(-log10(abs(rt(length(SNP$STAT), df=dof))))
png("QQ-run2-broad-STAT.png", units="px", width=800, height=600)
plot(c(-5,7), c(-5,7), main="QQ no controls run2",col="red", lwd=3, type="l", xlab="Expected (-logSTAT)", ylab="Observed (-logP)", xlim=c(0,7), ylim=c(0,7), las=1, xaxs="i", yaxs="i", bty="l")
points(lexp, lobs, pch=20, cex=.8, bg="deepskyblue")
dev.off()
# ------------------- RAW broad script
observed <- sort(SNP$P)
lobs <- -(log10(observed))
expected <- c(1:length(observed))
lexp <- -(log10(expected / (length(expected)+1)))
plot(-log10(expected/(length(expected)+1)), type="l")
lines(lexp, col="red", add=T)
png("QQ_run2-broad-P.png", units="px", width=800, height=600)
plot(c(0,7), c(0,7), col="red", lwd=3, type="l", xlab="Expected (-logP)", ylab="Observed (-logP)", xlim=c(0,7), ylim=c(0,7), las=1, xaxs="i", yaxs="i", bty="l")
points(lexp, lobs, pch=20, cex=.8, bg="grey")
dev.off()
# adrian's script --------------------------------
parse.pvals.qq <- function(pvector,lim=7) {
o = -log10(sort(pvector,decreasing=F))
e = -log10( 1:length(o)/length(o) )
o[o>lim] <- lim
out <- list(o=o,e=e)
return(out)
}
lim = 9
qq <- parse.pvals.qq(SNP[,"P"],lim=lim)
ylab = expression(Observed~~-log[10](italic(p)))
xlab = expression(Expected~~-log[10](italic(p)))
plot(qq$e,
qq$o,
xlim=c(0,lim),ylim=c(0,lim),
pch=20,col='deepskyblue',
xlab=xlab,ylab=ylab, main="title")
abline(coef=c(0,1), col=1, lwd=2) # my abline
########################################################################################################################
########################################################################################################################
########################################################################################################################
########################################################################################################################
########################################################################################################################
########################################################################################################################
########################################################################################################################
########################################################################################################################
########################################################################################################################
########################################################################################################################
########################################################################################################################
########################################################################################################################
### list of snps use this on command line :: 74th is the column with the rs ids...
## head -50 chr10.AOGC-NGS.2013.pointwise.txt | cut -f 74
## /media/ga-apps/UQCCG/Programming/scripts/PerlScripts/GrepMafFiles.pl 35SNPs_or_proxies.csv 74 chr10.AOGC-NGS.2013.pointwise.txt
## the.samples<-sample.sheet.full[,"ParticipantCode"]
## the.samples
## indels<-indels[,paste(the.samples,"GT",sep=".")]
# Asian-AML Control EXOME-AML
|
72cbb04cd8616c44dc35b3d8af04bf670e7a9f23
|
9b19a0739e65e8307c13a25640a5f965966913e7
|
/ui.R
|
126414c5982af0c064a603a967b0631de6490bb6
|
[] |
no_license
|
Lrem32/Final_Project
|
512a2fc503d9096b93983cbb5a9a75361f098c76
|
7ab68699e8af75341bd3bf4c2ed15189a32e6ca2
|
refs/heads/master
| 2020-11-24T04:27:56.034888
| 2019-12-14T04:50:13
| 2019-12-14T04:50:13
| 227,964,743
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,260
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinythemes)
# Define UI for application that draws a histogram
shinyUI(fluidPage(theme = shinytheme("superhero"),
# Application title
titlePanel("Understanding Crime in Chicago"),
navbarPage("UCC App",
tabPanel(h2("Introduction"),br(),
title = "Introduction",
h5("If you want to check out the dataset for",
a("chicago crime", href = "https://data.cityofchicago.org/Public-Safety/Crimes-2019/w98m-zvie"),
", its available online, they have a really cool interface for browsing through the data."), br(),
h4("Goal of the UCC App"),
p(h5("
This UCC App looks to help a user navigate the expansive Chicago Crime dataset,
while also helping them implement certain modeling methods and approaches. The
overall goal is to better understand how crime patterns change depending on
different factors, additionally we'd like to test our ability to predict the
likelyhood of certain crimes based on the factors the user will be
able to explore!
")), br(),
h4("About the Dataset"),
p(h5("
The City of Chicago does an incredibly good job at compiling much
of it's crime data. One can download it or even look at it and
visualize it on their respective website. We are going to be downloading
the dataset directly from their website. After some formatting and data
cleaning the data will be presented to the user for exploration.
This app will focus on having 4 main uses when exploring the Chicago Crime
Dataset."), br(),
h5("1) A Data exploration page where we can see some numeric
and graphical summaries using some user designated variables", style = "position"
),
h6("- Here the user will be able to define variables and types of graphs to create as well as save and export
the plots as well as any subset datasets they create to a .png and .csv file respectively."), br(),
h6("2) A page with clustering model approach where the user can specify
aspects of the algorithm"
),
h6("3) A page for modeling, where there will be two supervised learning models
with dynamic features for exploration of how different variables
affect modeling"),
h6("4) A simple representation of or dataset for the user to observe it.
Additionally they will be able to subset on this dataset to look for
more precise sections."
))
),
# This will be our Data Exploration tab allowing users to explore the data and produce some basic graphs.
tabPanel("Data Exploration",
sidebarLayout(
sidebarPanel(
selectInput("plote", h4("Graphing Options:"), c("Overview Of Crime Type"="crime", "Crimes Base On Time Of Day"="time",
"Crimes Based On Weekday"="day", "Crimes Based On Month" = "month")),
downloadButton("downloadData", "Download Dataset"),
downloadButton("downloadPlot", "Download Generated Plot"),
checkboxInput("selector_Option", h4("Show Optional Crime Variables")),
conditionalPanel(
condition = "input.selector_Option ==1",
checkboxGroupInput("crime_Selector", h4("Crime Variables Considered:"),
choices = c("Arson" = "Arson", "Assault" = "Assault",
"Battery" = "Battery", "Burglary" = "Burglary",
"Children Involvement" = "Child", "Damage" = "Damage",
"Drug Crime" = "Drug", "Fraud" = "Fraud", "Homicide" = "Homicide",
"Motor Vehicle Theft" = "MVT",
"Non-Violent Crime" = "NV-Crime", "Other" = "Other",
"Robbery" = "Robbery", "Sex Crime" = "Sex", "Theft" = "Theft",
"Human Trafficking" = "Traffick", "Tresspassing" = "Tresspass"),
selected = c("Arson", "Assault", "Battery", "Drug", "MVT", "Other"))),
checkboxInput("data_Filter", h4("Show Dataset Filtering Options")),
conditionalPanel(
condition = "input.data_Filter == 1",
checkboxGroupInput("variable_Selector", h4("Variables In Dataset"),
choices = c("ID" = "ID", "Case" = "Case Number", "Full Date" = "Date", "Date" = "Date 2",
"Month" = "Month", "Time" = "Time", "Block" = "Block", "Crime" = "Crime",
"IUCR" = "IUCR", "Primary Type" = "Primary Type",
"Result in Arrest" = "Arrest", "Domestic Violence" = "Domestic",
"Police Beat Area" = "Beat", "District" = "District",
"Ward" = "Ward", "FBI Code" = "FBI Code",
"Year" = "Year", "Latitude" = "Latitude",
"Longitude" = "Longitude"),
selected = c("Case Number", "Date 2", "Time", "Crime", "Arrest"))
)
),
mainPanel(
plotOutput("plot"),
dataTableOutput("exploreTable")
#textOutput("summaryStats")
)
)),
# This is our Modeling Tab for letting users implement supervised learning methods.
tabPanel("Supervised Learning Methods",
sidebarLayout(
sidebarPanel(
selectInput("modelType", h4("Modeling Options:"), c("Linear Regression Model"="lm",
"Random Forest Model" = "rf")),
conditionalPanel(
condition = "input.modelType == 'glm'",
selectInput("responseVar", h4("Select response variable"), c("Ward" = "Ward", "Type of Crime" = "Primary Type",
"Arrest" = "Arrest"))),
conditionalPanel(
condition = "input.responseVar == 'Ward'",
radioButtons("predictorVars", "Predictors to use in model:", c("Case Number" = "`Case Number`", "Date" = "`Date 2`",
"Primary Date" = "`Primary Date`", "Arrest" = "Arrest",
"Domestic" = "Domestic", "Beat" = "Beat",
"District" = "District", "Ward" = "Ward",
"FBI Code" = "`FBI Code`", "Year" = "Year",
"Latitude" = "Latitude", "Longitude" = "Longitude"),
select = "Beat"),
checkboxGroupInput("predictorVarsQuad", "Predictors to use in model(Quadratic):", c("Case Number" = "I(`Case Number`^2)", "Date" = "I(Date^2)",
"Primary Date" = "I(`Primary Date`^2)", "Arrest" = "I(Arrest^2)",
"Domestic" = "I(Domestic^2)", "Beat" = "I(Domestic^2)",
"District" = "I(District^2)", "Ward" = "I(Ward^2)",
"FBI Code" = "I(`FBI Code`^2)", "Year" = "I(Year^2)",
"Latitude" = "I(Latitude^2)", "Longitude" = "I(Longitude^2)"),
)
)
),
mainPanel(
verbatimTextOutput("glmModel"),
plotOutput("glmPlot")
)
)),
tabPanel("Clustering")
)))
|
67a8650173e18ca9d834ba34be3de6aa4bd7527c
|
fa6d897dd17890085b98ddd3a1f89f3cc144c4aa
|
/R/mod_plot.R
|
1316c2c5363549d8097d94503c161538d17a0511
|
[
"MIT"
] |
permissive
|
ColinFay/puzzlemath
|
6fa47ca5b71283a3cca6f61c83bebfc91a535bd2
|
53c4d90e3fb92cec84312feec69dc4b5b2a5a451
|
refs/heads/main
| 2023-01-24T01:47:53.479415
| 2020-11-26T21:37:35
| 2020-11-26T21:37:35
| 316,332,112
| 0
| 0
|
NOASSERTION
| 2020-11-26T20:37:23
| 2020-11-26T20:37:23
| null |
UTF-8
|
R
| false
| false
| 2,286
|
r
|
mod_plot.R
|
#' start UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @noRd
#'
#' @importFrom shiny NS tagList plotOutput
plotUI <- function(id){
ns <- shiny::NS(id)
shiny::tagList(
shiny::plotOutput(ns("plot"))
)
}
#' start Server Function
#'
#' @noRd
#' @import shiny
#' @importFrom whereami cat_where whereami
#' @import ggplot2
#' @importFrom ggpubr background_image
#' @importFrom grid arrow
plotServer <- function(id, r){
shiny::moduleServer(id,
function(input, output, session){
output$plot <- shiny::renderPlot({
whereami::cat_where(whereami::whereami())
req(r$game)
req(this$qp)
tqp <- this$qp
dat <- plot_data(tqp, r$ans)
aa <- tqp$y
bb <- tqp$x
p <- ggplot2::ggplot(
data = dat,
ggplot2::aes(x = y, y = x, fill = z)
) +
ggpubr::background_image(this$img)
if(any(dat$a==1)){
if(all(dat$a==1)){
p <- p + ggplot2::geom_raster(show.legend = FALSE)
}else{
p <- p + ggplot2::geom_raster(ggplot2::aes(alpha = a),show.legend = FALSE)
}
if(r$arrows){
p <- p +
ggplot2::geom_segment(
x = aa, xend = aa, y = 0.5, yend = bb-0.25,
arrow = grid::arrow(),
colour = 'red',
size = 1) +
ggplot2::geom_segment(
x = 0.5, xend = aa-0.25, y = bb, yend = bb,
arrow = grid::arrow(),
colour = 'red',
size = 1)
}
p <- p + ggplot2::scale_fill_viridis_b()
}
p + ggplot2::scale_x_continuous(
expand = c(0,0),
breaks = seq(r$mat_dim),
labels = attr(this$df,'v2')
) +
ggplot2::scale_y_continuous(
expand = c(0,0),
breaks = seq(r$mat_dim),
labels = attr(this$df,'v1')
) +
ggplot2::theme(
axis.text = ggplot2::element_text(size = 20),
axis.title = ggplot2::element_blank()
)
})
})
}
|
09ad315e71c9c0f9a4924c424fd77d168ac29325
|
eef7754e99418c7c040e2bc18e95b3263d542426
|
/man/reordercategories.Rd
|
37f2a7213539363d373c7659d44e35e26872ff9e
|
[] |
no_license
|
cran/cabootcrs
|
e63b5ce9d86b5f2b973c19b8a250095f81f81c55
|
a5e3efc3f1e555b547a9b0ffb266052bee11a770
|
refs/heads/master
| 2022-03-06T06:23:19.926820
| 2022-03-02T22:00:02
| 2022-03-02T22:00:02
| 17,694,910
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,210
|
rd
|
reordercategories.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/defineclassesandfunctions.R
\name{reordercategories}
\alias{reordercategories}
\title{Reorder categories for chosen variable in MCA case only}
\usage{
reordercategories(x, varno, newcats)
}
\arguments{
\item{x}{An object of class \code{\linkS4class{cabootcrsresults}}}
\item{varno}{The number of the variable to be reordered}
\item{newcats}{A vector of length equal to the number of categories for this variable,
giving the new order for the categories
(e.g. c(4,1,2,3) means that the original 4th category is moved to first)}
}
\value{
An object of class \code{\linkS4class{cabootcrsresults}}
}
\description{
\code{reordercategories} reorders the principal and standard coordinates, CTR, REP,
variances and covariances of the categories for a single MCA variable
}
\details{
This may be useful when comparing results between different data sets or from different packages
Note: does not reorder anything in the \code{\linkS4class{cabasicresults}}
part of the \code{\linkS4class{cabootcrsresults}} object
}
\examples{
bd3 <- cabootcrs(DreamData223by3, catype="mca", nboots=0, showresults=FALSE)
bd3reorderedvar2 <- reordercategories(bd3, 2, c(3,2,4,1))
summaryca(bd3)
summaryca(bd3reorderedvar2)
\dontrun{
# Can be used when comparing results in different packages,
# or when adding ellipses from this package to output from others
library(FactoMineR)
library(ca)
data(tea)
# remove duplicated age variable
teamod <- tea[,c(1:18,20:36)]
# ca package uses standardised coordinates and inertias by default
catea <- mjca(teamod)
btea <- cabootcrs(teamod, catype="mca", showresults=FALSE, nboots=0, varandcat=FALSE)
# FactoMineR package uses unstandardised coordinates and inertias by default
fmtea <- MCA(teamod, method="Burt", graph=FALSE)
bteaunstd <- cabootcrs(teamod, catype="mca", showresults=FALSE, nboots=0,
mcaadjustinertias = FALSE, mcaadjustcoords = FALSE, varandcat=FALSE)
summary(fmtea)
summaryca(bteaunstd)
summary(catea)
summaryca(btea)
# slight difference due to different orderings of categories for these two
fmtea$var$coord / bteaunstd@Colprinccoord[,1:5]
catea$colpcoord[,1:5] / btea@Colprinccoord[,1:5]
fmtea$var$coord / catea$colpcoord[,1:5]
# Variables 22 and 23, in columns 57-65, are the problem
# The coordinates agree (apart from reflection) but the categories are in a different order
fmtea$var$coord[57:65,1:3]
bteaunstd@Colprinccoord[57:65,1:3]
catea$colpcoord[57:65,1:3]
btea@Colprinccoord[57:65,1:3]
# Coordinates agree when categories reordered and axes reflected
bteaunstdreord <- reordercategories(bteaunstd,22,c(2:5,1))
bteaunstdreord <- reordercategories(bteaunstdreord,23,c(3,2,1,4))
bteaunstdreordreflect <- reflectaxes(bteaunstdreord,c(1,4))
fmtea$var$coord / bteaunstdreordreflect@Colprinccoord[,1:5]
bteareord <- reordercategories(btea,22,c(2:5,1))
bteareord <- reordercategories(bteareord,23,c(3,2,1,4))
bteareordreflect <- reflectaxes(bteareord,c(2,5))
catea$colpcoord[,1:5] / bteareordreflect@Colprinccoord[,1:5]
}
}
\seealso{
\code{\link{cabootcrs-package}}, \code{\link{cabootcrs}}, \code{\link{reflectaxes}}, \code{\linkS4class{cabootcrsresults}}
}
|
055cdd1b78effe843e83db7d4a1bf82455a4d447
|
13d8fceceaa2f2a7b774490d61537e9f416a8d11
|
/Tea_analysis_Prinn.R
|
4462246c95c84a85e19dd0441ef5e412276ddd14
|
[] |
no_license
|
JoeRothwell/Polyphenols
|
5e9be000a17692a32b00bbc7a49e28b15934faa0
|
e31c6303b1247c7d867ecff1801bc9490620d129
|
refs/heads/master
| 2023-06-27T18:08:46.790634
| 2023-06-16T21:06:52
| 2023-06-16T21:06:52
| 180,323,803
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,008
|
r
|
Tea_analysis_Prinn.R
|
# Tea analysis for Prinn
# Original dataset ----
# had one replicate per sample only
# tea <- read.csv("data/Tea_PCA_pp.csv", header=T)
# new set of data 19-feb 2015
teanew <- read.csv("data/Teanew.csv", header=T)
# discard altitude data and subset observation names separately
cmpds <- teanew[ , 5:71]
pc <- prcomp(cmpds, .scale=FALSE)
tlabs <- teanew[, 1]
groups <- teanew[ , 4]
#biplot and scree plot
biplot(pc, xlabs=tlabs)
plot(pc)
# Spearman correlation heatmap of variables
library(corrplot)
tcor <- cor(cmpds, method="spearman")
corrplot(tcor, is.corr=TRUE, method="color", mar=c(1,1,1,1),
shade.col=NA, tl.col="black", tl.srt=90, order="hclust", type="full",
addrect=NULL, tl.cex=0.8, cl.cex=0.7)
# Scatterplot to give loadings
library(ggplot2)
ggplot(tload, aes(x=PC1, y=PC2)) + geom_point() + theme_bw() +
geom_text(data=tload, mapping=aes(x = PC1, y = PC2+0.015, label = .names), size=4, alpha=0.8) +
labs(x = "PC1", y = "PC2") + #ylim(-0.12, 0.08) + xlim(-0.10, 0.10) +
geom_hline(yintercept=0, linetype="dashed") + geom_vline(xintercept=0, linetype="dashed") +
theme(panel.border=element_rect(colour="black"))
ggsave("Tea loadings2.png", height=150, width=200, units="mm")
#cluster dendrogram
cmpdsT <- t(cmpds)
hc <- hclust(dist(cmpdsT))
plot(hc)
# New data: four replicates per sample ----
tea <- read.csv("data/Tea new four replicates.csv", header=T, row.names=1)
tea0 <- read.csv("data/Tea four reps no impute.csv", header=T, row.names=1)
tealabs <- rep(c("SVR", "MK", "BR", "PKML", "CF", "OHO", "DC", "YW"), each = 4)
logtea <- log2(tea)
#logtea2 <- log2(tea0)
pclog <- prcomp(logtea, scale.=T)
#pc <- prcomp(tea, scale.=T)
pcnoscale <- prcomp(tea, scale.=F)
#pcnoimpute <- prcomp(logtea2, scale.=T)
#pcnoimpute2 <- prcomp(tea0, scale.=T)
logtea <- log2(tea)
#biplot and scree plot
library(ggbiplot)
plot(pclog)
ggbiplot(pclog, var.axes=F, labels=rownames(tea), groups=tealabs) + theme_bw() +
stat_ellipse(type="norm", linetype="dashed") +
geom_hline(linetype="dashed") + geom_vline(linetype="dashed") +
coord_fixed(ratio=0.75) +
theme(panel.border=element_rect(colour="black", size=1), panel.grid.major=element_blank(),
legend.position="none") + ggtitle("log2 transformed and UV scaled")
ggsave("Tea scores 4 reps log2 scaled2.png", height=150, width=200, units="mm")
#correlation heatmap of variables
library(corrplot)
tcor <- cor(tea, method="pearson")
corrplot(tcor, is.corr=TRUE, method="color", mar=c(1,1,1,1),
shade.col=NA, tl.col="black", tl.srt=90, order="hclust", type="full",
addrect=NULL, tl.cex=0.8, cl.cex=0.7)
#plot loadings separately from prcomp output
#make a data frame from the rotations calculated by prcomp
tload <- data.frame(pclog$rotation, .names = row.names(pclog$rotation))
#scatterplot to give loadings
library(ggplot2)
ggplot(tload, aes(x=PC1, y=PC2)) + geom_point() + theme_bw() +
geom_text(data=tload, mapping=aes(x = PC1, y = PC2+0.008, label = .names), size=4, alpha=0.8) +
labs(x = "PC1", y = "PC2") + #ylim(-0.12, 0.08) + xlim(-0.10, 0.10) +
geom_hline(yintercept = 0, linetype="dashed") + geom_vline(xintercept = 0, linetype="dashed") +
theme(panel.border=element_rect(colour="black")) +
ggtitle("loadings log2 transformed and UV scaled")
ggsave("Tea loadings 4 reps log2 and scaled.png", height=150, width=200, units="mm")
#cluster dendrogram
teaT <- t(tea)
hc <- hclust(dist(teaT))
plot(hc)
#ggplot2
library(reshape2)
mtcor <- data.frame(melt(tcor))
ggplot(mtcor, aes(x=Var1, y=Var2, fill=value)) + geom_tile() + theme_bw()
# New PCA (formerly Tea new PCA.R)
pclog <- prcomp(logtea, scale.=T)
pcnoscale <- prcomp(tea, scale.=F)
groups <- tealabs
d <- data.frame(pclog$x) %>% mutate(Tea = groups)
e <- data.frame(unscaled$x) %>% mutate(Tea = groups)
#ggplot2 settings
theme_pca <- theme_bw() + theme(panel.border=element_rect(colour="black", size=1),
panel.grid.major=element_blank(), legend.key = element_rect(colour="white"), legend.title=element_blank())
ggplot(d, aes(x=PC1, y=PC2)) + geom_point(data=d, mapping = aes(shape=Tea)) +
stat_ellipse(type="norm", size=0.2) + geom_hline(yintercept = 0, size=0.2) + theme_pca +
geom_vline(xintercept = 0, size=0.2) + xlab("Scores on PC1") + ylab("Scores on PC2") +
scale_shape_manual(values=c(1,2,5,6,15,17,18,19)) + xlab("Scores on PC1") + ylab("Scores on PC2")
ggplot(e, aes(x=PC1, y=PC2)) + geom_point(data=e, mapping = aes(shape=Tea)) +
stat_ellipse(type="norm", size=0.2) + theme_pca + geom_hline(yintercept = 0, size=0.2) +
geom_vline(xintercept = 0, size=0.2) +
scale_shape_manual(values=c(1,2,5,6, 15, 17, 18,19)) + xlab("Scores on PC1") + ylab("Scores on PC2")
ggsave("Scores plot symbols scaled bw.png", height=100, width=160, units="mm")
#make a data frame from the rotations calculated by prcomp
f <- data.frame(pclog$rotation, .names = c(1:nrow(pclog$rotation)))
g <- data.frame(pcnoscale$rotation, .names = c(1:nrow(pcnoscale$rotation)))
#scatterplot to give loadings
ggplot(f, aes(x=PC1, y=PC2)) + geom_point() + theme_pca +
geom_hline(yintercept = 0, size=0.2) + theme_pca + geom_vline(xintercept = 0, size=0.2) +
geom_text(data=f, mapping=aes(x = PC1-0.002, y = PC2-0.009, label = .names), size=3, alpha=0.8) +
labs(x = "Loadings on PC1", y = "Loadings on PC2")
ggplot(g, aes(x=PC1, y=PC2)) + geom_point() + theme_pca +
geom_hline(yintercept = 0, size=0.2) + theme_pca + geom_vline(xintercept = 0, size=0.2) +
geom_text(data=g, mapping=aes(x = PC1-0.005, y = PC2-0.025, label = .names), size=3, alpha=0.8) +
labs(x = "Loadings on PC1", y = "Loadings on PC2")
ggsave("Loadings plot numbers unscaled.png", height=100, width=160, units="mm")
#filtered variables
filter <- c(3,6,11,12,14,18,19,29,31,34,45) #positions of variables to be retained (from email 22/2/16)
teafilter <- logtea[, filter]
pclog <- prcomp(teafilter, scale.=T)
unscaled <- prcomp(teafilter, scale.=F) #after this obtain d and e variables from above
#ggplot2 settings
theme_pca <- theme_bw() + theme(panel.border=element_rect(colour="black", size=1),
panel.grid.major=element_blank(), legend.key = element_rect(colour="white"), legend.title=element_blank())
ggplot(e, aes(x=PC1, y=PC2)) + geom_point(data=e, mapping = aes(shape=Tea)) +
stat_ellipse(type="norm", size=0.2) + geom_hline(yintercept = 0, size=0.2) + theme_pca +
geom_vline(xintercept = 0, size=0.2) + xlab("Scores on PC1") + ylab("Scores on PC2") +
scale_shape_manual(values=c(1,2,5,6,15,17,18,19)) + xlab("Scores on PC1") + ylab("Scores on PC2")
ggsave("Scores reduced variables unscaled.png", height=100, width=160, units="mm")
# PCPR2 (formerly PCPR2 Tea.R)
tea <- read.csv("data/Tea mod joe.csv")
# Subset X and Y variables
X_DataMatrix <- log2(tea[, 6:72]) %>% as.matrix
Y_var <- tea[, 2:4]
Y_var <- Y_var %>% mutate_at(vars(Replicate, Experiment), as.factor)
library(pcpr2)
output <- runPCPR2(X_DataMatrix, Y_var)
plot(output, col = "red")
|
fd6b18bfbf04235186b686d1362b6ace8b59cb6a
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.database/man/memorydb_create_acl.Rd
|
ad6b82f9d5492bf9b27f6e6b2711ec6d5d5d06e3
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 893
|
rd
|
memorydb_create_acl.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/memorydb_operations.R
\name{memorydb_create_acl}
\alias{memorydb_create_acl}
\title{Creates an Access Control List}
\usage{
memorydb_create_acl(ACLName, UserNames = NULL, Tags = NULL)
}
\arguments{
\item{ACLName}{[required] The name of the Access Control List.}
\item{UserNames}{The list of users that belong to the Access Control List.}
\item{Tags}{A list of tags to be added to this resource. A tag is a key-value pair.
A tag key must be accompanied by a tag value, although null is accepted.}
}
\description{
Creates an Access Control List. For more information, see \href{https://docs.aws.amazon.com/memorydb/latest/devguide/clusters.acls.html}{Authenticating users with Access Contol Lists (ACLs)}.
See \url{https://www.paws-r-sdk.com/docs/memorydb_create_acl/} for full documentation.
}
\keyword{internal}
|
ddd7278a114c5c349bffce30ff0ff2bea351b9c5
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/eigenmodel/R/rmvnorm.R
|
afc9cedf1eb0e17a0b3f547bfd66658f310fc92d
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 168
|
r
|
rmvnorm.R
|
"rmvnorm" <-
function(mu,Sig2){
## sample from multivariate normal distribution
R<-t(chol(Sig2))
t(R%*%(rnorm(length(mu),0,1)) +mu)
}
|
f868af15646f963fa811ec388cb75dd12576bbdc
|
8577e9d70acc9b3383684b8aedd5bdc36b72751d
|
/data-raw/cat_documentation.R
|
5b321743e14769ae3e03ddcaeb06d49f590aa16a
|
[
"Artistic-2.0"
] |
permissive
|
Liuyangying/curatedMetagenomicData
|
f6a9bffb45e2e3b71d3429d798a8bcd2fe309aa2
|
9c5f2bfa7c15c839f30049a9ec41ca57c6393d3f
|
refs/heads/master
| 2020-05-20T05:13:08.761177
| 2019-04-25T14:34:05
| 2019-04-25T14:34:05
| 185,399,986
| 1
| 0
|
Artistic-2.0
| 2019-05-07T12:43:53
| 2019-05-07T12:43:52
| null |
UTF-8
|
R
| false
| false
| 1,452
|
r
|
cat_documentation.R
|
cat_documentation <- function(title_str, documentation_df) {
first_element <- match(title_str, documentation_df$title)
every_element <- grep(title_str, documentation_df$title)
documentation_str <- character(length = 11L)
documentation_str[01] <-
"## generated by make_data_documentation(): do not edit by hand"
documentation_str[02] <-
"\n## see source in data-raw/make_data_documentation.R"
documentation_str[03] <- documentation_title(first_element,
documentation_df)
documentation_str[04] <- documentation_aliases(every_element,
documentation_df)
documentation_str[05] <- documentation_section("Datasets:")
documentation_str[06] <- documentation_datasets(every_element,
documentation_df)
documentation_str[07] <- documentation_section("Source:")
documentation_str[08] <- documentation_source(first_element,
documentation_df)
documentation_str[09] <- documentation_examples(first_element,
documentation_df)
documentation_str[10] <- documentation_name(first_element, documentation_df)
documentation_str[11] <- "\nNULL\n"
documentation_filename(first_element, documentation_df) %>%
cat(documentation_str, file = .)
}
|
346633d0448c982c7162525071751bc4d879592b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/oceanmap/examples/parameter_definitions.Rd.R
|
5d1d34c1e988d9b247f3894698049d54d3c15d7c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 375
|
r
|
parameter_definitions.Rd.R
|
library(oceanmap)
### Name: parameter_definitions
### Title: parameter definitions dataframe
### Aliases: parameter_definitions
### Keywords: parameter_definitions
### ** Examples
## Example
data(parameter_definitions)
head(parameter_definitions)
# selecting sea surface temperature parameter definition
parameter_definitions[parameter_definitions$param == "sst2",]
|
426ead1fe22211891d086eb1e32005db2224ca6b
|
530ffa1be925736fc1ee238e07d77f96dbb1c17e
|
/R/make_bins.R
|
048978353af07e89bdffd38bc838abf74e3fa0e2
|
[] |
no_license
|
qenvio/dryhic
|
b70acbb07847c64add1b026e378cc191d669bdc6
|
5b1fdaec3b635869e9c78ac0e1140eb497723fa6
|
refs/heads/master
| 2021-01-09T06:31:11.028762
| 2020-03-10T18:29:22
| 2020-03-10T18:29:22
| 81,000,207
| 3
| 3
| null | 2017-06-12T08:01:02
| 2017-02-05T15:03:56
|
R
|
UTF-8
|
R
| false
| false
| 1,056
|
r
|
make_bins.R
|
#' Bin the genome
#'
#' This function takes a HiC-BAM file and creates a data.frame with the genome binned at the desired resolution
#' @import magrittr
#' @importFrom dplyr mutate
#' @importFrom dplyr filter
#' @param inbam HiC-BAM file (only the header will be used)
#' @param resolution Desired resolution (bin size)
#' @return A \code{data.frame} containing chromosome, position and bin ID
#' @export
#' @examples
#' plot(0)
make_bins <- function(inbam, resolution){
sizes <- paste("samtools view -H", inbam) %>%
pipe %>%
read.delim(head = F) %>%
filter(V1 == "@SQ") %>%
mutate(V2 = gsub("^SN:", "", V2),
V3 = gsub("^LN:", "", V3)) %>%
(function(x) as.numeric(x$V3) %>% setNames(x$V2))
bins <- lapply(sizes, function(x) seq(0, x, resolution) %>% as.integer)
bins <- data.frame(chr = rep(names(bins), sapply(bins, length)),
pos = unlist(bins),
stringsAsFactors = F) %>%
mutate(bin = paste(chr, pos, sep = ":"))
bins
}
|
8663bdec7ed5869be9ea460453a2cd8351b6414d
|
ba4b34a9f81f25dbe905433ed55d32282adebd70
|
/Rcode/getWeights.R
|
9b4bad87bc2ec577a202600ad103ecdcb301cea8
|
[] |
no_license
|
somegek/DataPreprocess
|
10099b89e5d0aae2f151a63fbea0513b1ced5baf
|
434fb92cad6ed4a87a4632ef03f985b801abb947
|
refs/heads/master
| 2020-03-31T13:39:51.338239
| 2019-03-15T09:49:52
| 2019-03-15T09:49:52
| 152,264,057
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,699
|
r
|
getWeights.R
|
getWeights <- function(DT) {
# this function calculate the optimal weights
# input preprocessed data
# output data with optimal weights
# get the sub covariance matrix given name list
getSubCov <- function(Cov, colNameList) {
colInd <- which(colnames(Cov) %in% colNameList)
Cov[colInd, colInd]
}
# get the weights using simlpe formula
calculateWeight <- function(Cov) {
weight <- solve(Cov, rep(1, ncol(Cov)))
weight <- weight / sum(weight)
weight
}
# get the bias in the forecast according to the definition
getBias <- function(tempDT, trainDT) {
# calculate demeaned error and forecast
colNamesList <- c("OBS_DEMEAN", "ERR_DEMEAN")
colSelectList <- c("OBS_VALUE", "ERR_VALUE")
trainDT[, (colNamesList) := lapply(.SD, function(x) x - mean(x)), .SDcols = colSelectList, by = c("FCT_SOURCE")]
# use ERR_DEMEAN ~ OBS_DEMEAN - 1 to calculate beta per forecaster
trainDT[, ERR_B := lm(ERR_DEMEAN ~ OBS_DEMEAN - 1, data = .SD[TRUE])$coefficients, by = c("FCT_SOURCE")]
# calculate alpha over different forecaster
trainDT[, ERR_A := mean(ERR_VALUE - ERR_B * OBS_VALUE)]
trainDT[, BIAS := ERR_A + ERR_B * OBS_VALUE]
# combine with test data
tempDT <- merge(tempDT, unique(trainDT[, .(FCT_SOURCE, ERR_B, ERR_A)]), by = c("FCT_SOURCE"))
# forecast ERR_VALUE
tempDT[, BIAS := ERR_A + ERR_B * OBS_VALUE]
# change the ERR_VALUE in trainDT for covariance calculation
trainDT[, ERR_VALUE := ERR_VALUE - BIAS]
tempCov <- getCovariance(tempDT, trainDT)
list(bias = tempDT[TIME_PERIOD == last(sort(TIME_PERIOD)), BIAS], Cov = tempCov)
}
# get bias weights by changing the covariance matrix
getBiasWeights <- function(tempDT, trainDT) {
biasList <- getBias(tempDT, trainDT)
Cov_Err <- getSubCov(biasList$Cov, tempDT[TIME_PERIOD == last(sort(TIME_PERIOD)), FCT_SOURCE]) + biasList$bias %o% biasList$bias
weight <- calculateWeight(Cov_Err)
weight
}
# calcualte the covariance matrix
getCovariance <- function(tempDT, trainDT) {
# list of all forecasters
forcastersList <- unique(tempDT$FCT_SOURCE)
amountOfForecasters <- length(forcastersList)
# allocate NA covariance
tempCov <- matrix(nrow = amountOfForecasters, ncol = amountOfForecasters)
# covariance is calculated with trainDT
# for each element in covariance
for (i in 1:amountOfForecasters) {
for (j in 1:amountOfForecasters) {
# get error per forcaster
ei <- trainDT[FCT_SOURCE == forcastersList[[i]], .(TIME_PERIOD, ERR_VALUE)]
ej <- trainDT[FCT_SOURCE == forcastersList[[j]], .(TIME_PERIOD, ERR_VALUE)]
setkey(ei, TIME_PERIOD)
setkey(ej, TIME_PERIOD)
# get intersect on TIME_PERIOD
e <- ei[ej, nomatch = 0]
# get covariance
if (nrow(e) == 0) {
tempCov[[i, j]] <- 0
} else {
tempCov[[i, j]] <- sum(e$ERR_VALUE * e$i.ERR_VALUE) / nrow(e)
}
}
}
# make sure it is p.d. (choice according to the paper)
tempCov <- nearPD(tempCov)
# take the matrix from the output structure of nearPD
tempCov <- as.data.frame.matrix(tempCov$mat)
colnames(tempCov) <- forcastersList
tempCov
}
# get the weights by first getting the covariance, then calculate the weights
getWeightsSubFunc <- function(isFull, tempDT, trainDT) {
# weights is appended to tempDT
if (isFull) {
# calculate weights using full cov
tempWeight <- getBiasWeights(tempDT, trainDT)
} else {
tempCov <- getCovariance(tempDT, trainDT)
# get the weights using submatrix
tempWeight <- calculateWeight(getSubCov(tempCov, tempDT[TIME_PERIOD == last(sort(TIME_PERIOD)), FCT_SOURCE]))
}
# fill in the weights
tempWeightDT <- as.data.table(tempWeight, keep.rownames = "FCT_SOURCE")
setkey(tempWeightDT, FCT_SOURCE)
setkey(tempDT, FCT_SOURCE)
tempDT <- merge(tempDT, tempWeightDT, all.x = TRUE)
tempDT[is.na(tempWeight), tempWeight := 0]
return(tempDT$tempWeight)
}
# make a copy of the data table per testing period
testingList <- seq(2014.25, 2018.25, 0.25)
testingColumn <- sort(rep(testingList, nrow(DT)))
DT <- data.table(DT, TEST_PERIOD = testingColumn)
# take only the values up to forecast date to evaluate covariance
DT <- DT[TIME_PERIOD <= TEST_PERIOD]
# calculate the weight by running getWeightsSubFunc() per topic, horizon and test periods
# .SD[TRUE] is because unmodified .SD is locked, so here modify the input by taking all rows with [TRUE]
print("Full start")
startPoint <- Sys.time()
DT[, WEIGHT_FULL := getWeightsSubFunc(isFull = TRUE, .SD[TRUE], .SD[TIME_PERIOD < TEST_PERIOD]), by = c("FCT_TOPIC", "FCT_HORIZON", "TEST_PERIOD")]
print("Full done")
midPoint <- Sys.time()
print(midPoint - startPoint)
DT[, WEIGHT_SUB := getWeightsSubFunc(isFull = FALSE, .SD[TRUE], .SD[TIME_PERIOD < TEST_PERIOD]), by = c("FCT_TOPIC", "FCT_HORIZON", "TEST_PERIOD")]
print("Sub done")
endPoint <- Sys.time()
print(endPoint - midPoint)
print("Total time")
print(endPoint - startPoint)
# get equal weights
DT[TIME_PERIOD == TEST_PERIOD, WEIGHT_EQUAL := 1 / .N, by = c("FCT_TOPIC", "FCT_HORIZON", "TEST_PERIOD")]
# set order by .... to put for each test period, the last obs is the equal weights
setkey(DT, FCT_TOPIC, FCT_HORIZON, TEST_PERIOD, TIME_PERIOD)
# take the last obs and append them upwards within test period
DT[, WEIGHT_EQUAL := na.locf(WEIGHT_EQUAL, fromLast = T)]
DT
}
|
b70bc0bdba4540af3bb000943f9ef1ac950dabcf
|
4e9a08420ce21f33b02d48b83c17deee36e97d77
|
/low-birth-weight.R
|
407f9ec053c9c11aeb2aefd2f58f276b0a5332ca
|
[] |
no_license
|
nichaelmeilson-kul/low-birth-weight
|
d220edeb5b8c6628dc6eb2b4835f05a91f621ccf
|
954ac675e866bd884c1af8d4417fde0e56fb420f
|
refs/heads/master
| 2021-01-08T05:22:36.815215
| 2020-03-16T20:14:51
| 2020-03-16T20:14:51
| 241,924,849
| 0
| 1
| null | 2020-03-16T19:11:32
| 2020-02-20T15:52:56
|
HTML
|
UTF-8
|
R
| false
| false
| 1,268
|
r
|
low-birth-weight.R
|
# To run in R Studio:
# Drowpdown panel: Session > Set Working Directory > To Source File Location
birth.table <- read.table('lowbwt.dat')
###PART I (magda /// 26.02.2019)
#starting model with main effects and interactions terms we hypothesize might be relevant
summary(glm(LOW ~ AGE + LWT +FTV + RACE + LWT*FTV + AGE*FTV,
data=lowbwt.small,
family=binomial(link="logit")))
#Residual deviance: 211.99
#remove LWT*FTV - high p-value
summary(glm(LOW ~ AGE + LWT +FTV + RACE + AGE*FTV,
data=lowbwt.small,
family=binomial(link="logit")))
#Residual deviance: 212.62
#remove RACE - high p-value (age is higher but we can't remove it if we can't to keep AGE*FTV which appears to be very significant)
summary(glm(LOW ~ AGE + LWT +FTV + AGE*FTV ,
data=lowbwt.small,
family=binomial(link="logit")))
#Residual deviance: 215.37
#these are all significant
##technically residual deviance is going up as we remove terms (which is expected because this will inevitably happen as we remove variables)
## we can test using AIC to see that removing these terms does actually improve the model though
##BUT how is this different from the backwards selection we have to do in part 2 of analysis 1?
|
3eda942bb88df0a53192e8887f46c9a1ed40b0c7
|
b32baa2b38043d0bbf497eade8f934c557f490cc
|
/R/similarProducts.R
|
07830521be578c7ff9ed6205d0b303370b9bdda2
|
[] |
no_license
|
byapparov/recommender
|
21a83204528c3cbb0696a805b840d743584fec36
|
9fb394be260e46683b13cb5434a711461fd689fe
|
refs/heads/master
| 2020-07-27T08:38:52.952805
| 2018-06-09T08:17:26
| 2018-06-09T08:17:26
| 73,433,864
| 0
| 3
| null | 2018-06-09T08:17:27
| 2016-11-11T00:59:30
|
R
|
UTF-8
|
R
| false
| false
| 3,126
|
r
|
similarProducts.R
|
#' Recommends products similar to given products
#'
#' Based on the similarity matix and given product recommends top products
#' @export
#'
#' @importFrom utils head
#' @param sim.matrix similarity matrix based on implicit interractions
#' @param skus identifiers of the implicit interraction products (source should match names in sim.matrix)
#' @param values required number of recommendations
#' @param exclude.same excludes values in `skus` from recommendations
#' @param groups - named vector of sku categories
recommendSimilar <- function(sim.matrix, skus, values, exclude.same, groups = NULL) {
sku.rec <- sim <- group <- NULL
missing.skus <- setdiff(skus, rownames(sim.matrix))
if(length(missing.skus) > 0) {
warning("Following skus are missing from the sim.matrix: ", paste(missing.skus, collapse = ", "))
}
# only keep skus that are in the similarity matrix
skus <- setdiff(skus, missing.skus)
similarity.scores <- combineSimilarity(sim.matrix, skus, exclude.same)
similarity.scores <- keepOnePerGroup(similarity.scores, groups)
# Limit results to the requested number of skus
res <- head(similarity.scores[order(sim, decreasing = T)]$sku, values)
return (res)
}
#' Combined similarity to given products
#'
#' Turns recommendations matrix into a normalised data table
#' with mean similarity score for each recommended product
#' @inherit recommendSimilar
combineSimilarity <- function(sim.matrix, skus, exclude.same) {
sim <- sku.rec <- NULL
# filter to the list of relevant skus
filter <- notInWhich(colnames(sim.matrix), skus, exclude.same)
product.affinity <- melt(sim.matrix[skus, filter, drop=FALSE], na.rm = T)
colnames(product.affinity) <- c("sku", "sku.rec", "sim")
product.affinity <- data.table(product.affinity, key = c("sku", "sku.rec"))
# Group similiarity score by recommended sku
combined.scores <- product.affinity[, list(sim = mean(sim)), by = sku.rec]
setnames(combined.scores, "sku.rec", "sku")
setkey(combined.scores, "sku")
return(combined.scores)
}
#' Creates permutation index for exclusion of values
#' @param x vector to filter
#' @param y vector to match
#' @param filter flag to indicate whether filter should be applied
notInWhich <- function(x, y, filter) {
no.filter <- 1:length(x)
if(filter) {
index = - which(x %in% y)
if(length(index) == 0L) index <- no.filter
return(index)
}
else {
return(no.filter)
}
}
#' Gets top value per group
#' @param dt data.table with similarity score
#' @param groups named vector of product groups
keepOnePerGroup <- function(dt, groups) {
sim <- group <- NULL
if(is.null(groups)) return(dt)
# Append group data to affinity table
groups.table <- data.table(sku = names(groups), group = groups, key = "sku")
dt <- dt[groups.table, nomatch = 0]
# Get the best performing sku per group which
# can be a combindation of several columns e.g.: c("visitor.id", "group")
by.cols <- setdiff(colnames(dt), c("sku", "sim"))
# http://stackoverflow.com/questions/16573995/subset-by-group-with-data-table
dt <- dt[dt[, .I[sim == max(sim)], by = by.cols]$V1]
}
|
600869e2ae509a4591ffa11949196a3e23367809
|
5ffb742e954dd8f7376d694000f2a6a0201bf359
|
/Sociologia-Economica/sbs_sociologia_economica02.R
|
e963230def902088e2e7a1d42402dfdccc212ed9
|
[] |
no_license
|
neylsoncrepalde/Big-Data
|
9427187b77f0f4d0b6e83445a884ace9d58e61f2
|
94f5ced2580b47c0e057b57f8e80704481c3c2aa
|
refs/heads/master
| 2021-01-20T10:56:27.953269
| 2018-04-09T01:53:23
| 2018-04-09T01:53:23
| 64,178,077
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 451
|
r
|
sbs_sociologia_economica02.R
|
# SBS
# vendo as keywords
setwd('~/Documentos/Neylson Crepalde/Doutorado/GIARS/SBS_sociologia_economica')
library(readr)
keys.df = read_csv('keywords_df.csv')
View(keys.df)
library(descr)
library(magrittr)
library(dplyr)
#keys = keys.df$keyword %>% unique
selecao = grep("(e|E)con(ô|o)m", keys.df$keyword)
soc.econ = keys.df[selecao,]
#write.table(soc.econ, 'artigos_soc_econ.csv', sep=';', row.names = F)
soc.econ$keyword %>% unique %>% length
|
7ea92e0e3875effe3b3fe04358ff3e67f41139dc
|
cf09c35b67ddbb4f158accf0dc95228e68dde863
|
/man/polya.Rd
|
915e1485e8596d9c95f873f0f0c6fb05e270a461
|
[] |
no_license
|
FlorisRoelofs/polya
|
4f220c5f113d88423f570e9247a920b18a0803c0
|
d8a97b184b95defc41a2acfff18887fdf2745469
|
refs/heads/master
| 2020-03-18T23:49:39.750985
| 2018-06-22T10:26:07
| 2018-06-22T10:26:07
| 135,431,377
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 247
|
rd
|
polya.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/polya.R
\name{polya}
\alias{polya}
\title{polya urn function, call with polya()}
\usage{
polya()
}
\value{
value
}
\description{
polya urn function, call with polya()
}
|
6a958f0f43d7a29e6255db6e68d2c84b1709c04d
|
6c237020b6c5988fb7cd115905a13945d515cbd9
|
/docker/R/install_packages.R
|
6de20ba60d5173b140d9d44ff6c685a507e2a156
|
[
"BSD-2-Clause"
] |
permissive
|
sensecollective/OpenStudio-server
|
772278e842b7cbba485e4988a542199c7e194cba
|
50c99721c2c76cb84f86a3c867c1a0df2fd91f9b
|
refs/heads/master
| 2021-01-18T21:08:54.149001
| 2017-06-26T04:31:22
| 2017-06-26T04:31:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,134
|
r
|
install_packages.R
|
q# Install Packages
install.packages('lhs', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('e1071', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('triangle', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('RUnit', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('R.methodsS3', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('R.oo', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('R.utils', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('NMOF', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('mco', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('rgenoud', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('conf.design', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('vcd', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('combinat', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('DoE.base', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('xts', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('rjson', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('RSQLite', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('Rcpp', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('plyr', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('ggplot2', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('sensitivity', repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
install.packages('Rserve', configure.args=c('PKG_CPPFLAGS=-DNODAEMON'), repos=c('http://cloud.r-project.org','http://cran.r-project.org'))
|
30c24fb8b3a185b46421984b82d37145b566fdfb
|
6579fa137eda608c0cdb22fa7236667972fb57de
|
/man/deparse_js.Rd
|
19f914d7ed04b82166e42f22be1e4f4d204fd16b
|
[
"MIT"
] |
permissive
|
nischalshrestha/sketch
|
258a700bb0de38b5832861c7365605640c75cdbb
|
ab3258c3707aa768d31bfc7fe9c9d8916c8e1632
|
refs/heads/master
| 2022-11-24T16:54:46.185048
| 2019-12-09T07:57:32
| 2019-12-09T07:57:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 276
|
rd
|
deparse_js.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deparse.R
\name{deparse_js}
\alias{deparse_js}
\title{Expression Deparsing for JS}
\usage{
deparse_js(ast)
}
\arguments{
\item{ast}{A language object.}
}
\description{
Expression Deparsing for JS
}
|
433c431806450a8ff74543fdb6416082385afffb
|
97c2cfd517cdf2a348a3fcb73e9687003f472201
|
/R/src/Java1/R/JQQueue.R
|
5e8ebb15f11004fbebeb03c870719afd7edc274a
|
[] |
no_license
|
rsheftel/ratel
|
b1179fcc1ca55255d7b511a870a2b0b05b04b1a0
|
e1876f976c3e26012a5f39707275d52d77f329b8
|
refs/heads/master
| 2016-09-05T21:34:45.510667
| 2015-05-12T03:51:05
| 2015-05-12T03:51:05
| 32,461,975
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,433
|
r
|
JQQueue.R
|
constructor("JQQueue", function(jobj = NULL) {
extend(JObject(), "JQQueue", .jobj = jobj)
})
method("by_Queue_String", "JQQueue", enforceRCC = TRUE, function(static, q = NULL, broker = NULL, ...) {
JQQueue(jNew("jms/QQueue", .jcast(q$.jobj, "javax.jms.Queue"), the(broker)))
})
method("by_String_String", "JQQueue", enforceRCC = TRUE, function(static, name = NULL, broker = NULL, ...) {
JQQueue(jNew("jms/QQueue", the(name), the(broker)))
})
method("by_String", "JQQueue", enforceRCC = TRUE, function(static, name = NULL, ...) {
JQQueue(jNew("jms/QQueue", the(name)))
})
method("shutdown", "JQQueue", enforceRCC = TRUE, function(this, ...) {
jCall(this$.jobj, "V", "shutdown")
})
method("register_by_Object", "JQQueue", enforceRCC = TRUE, function(this, receiver = NULL, ...) {
JObject(jobj = jCall(this$.jobj, "Ljava/lang/Object;", "register", .jcast(receiver$.jobj, "java.lang.Object")))
})
method("response_by_String_MessageReceiver_int", "JQQueue", enforceRCC = TRUE, function(this, text = NULL, receiver = NULL, heartbeatFrequencyMillis = NULL, ...) {
jCall(this$.jobj, "V", "response", the(text), .jcast(receiver$.jobj, "jms.MessageReceiver"), theInteger(heartbeatFrequencyMillis))
})
method("main_by_StringArray", "JQQueue", enforceRCC = TRUE, function(static, args = NULL, ...) {
jCall("jms/QQueue", "V", "main", jArray(args, "[Ljava/lang/String;"))
})
|
0addddfeb5ed3d45673296041148d98f768614c4
|
014a09363ab8169a989848d12e1c84992bbfe38f
|
/man/rown_col.Rd
|
fa93e5fe61043442f702fad97ad7edf0b0b5c856
|
[] |
no_license
|
bassam-abulnoor/TestDimorph
|
45b412e5ab0747b51fa6cb68ad3c9a5c19b4958a
|
f5d733b3bf2ee8d684d21fe5888afd5ceb2e60f4
|
refs/heads/master
| 2021-07-20T05:36:15.983925
| 2021-01-24T18:36:44
| 2021-01-24T18:36:44
| 234,365,880
| 1
| 0
| null | 2021-01-02T19:29:37
| 2020-01-16T16:46:23
|
R
|
UTF-8
|
R
| false
| true
| 325
|
rd
|
rown_col.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{rown_col}
\alias{rown_col}
\title{rown_col}
\usage{
rown_col(x, var)
}
\arguments{
\item{x}{dataframe with rownames}
\item{var}{new column name}
}
\description{
rown_col
}
\details{
convert rownames to column
}
\keyword{internal}
|
7780d0741f0336d79884a1a3f06197ed3e32d1a0
|
5d0bc9fa9c48a468d115e9930f5eac66a0764789
|
/inst/snippets/Exploration9.1.6.R
|
a143827d19240b80aeecec71e9e9f9e4a827b0a1
|
[] |
no_license
|
rpruim/ISIwithR
|
a48aac902c9a25b857d2fd9c81cb2fc0eb0e848e
|
7703172a2d854516348267c87319ace046508eef
|
refs/heads/master
| 2020-04-15T20:36:55.171770
| 2015-05-21T09:20:21
| 2015-05-21T09:20:21
| 21,158,247
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 50
|
r
|
Exploration9.1.6.R
|
favstats(brain_change ~ treatment, data = Brain)
|
cc33f1fffb8af34ae4cdbc9691b70085e772031a
|
03f4102ab9929f3c1e8e6c38172d85294eaa7fa5
|
/Nearest Neighbor Code.R
|
3faf9a539452d131993bababfa04cf3ccd2ba5d6
|
[] |
no_license
|
treypujats/Heuristics
|
1d180b4149b1a77d6a65ceb31dc1a3ae38694367
|
4f94f5b15946bb024e261c1846f014d6d22facda
|
refs/heads/main
| 2023-06-21T05:57:19.552504
| 2021-08-12T15:43:55
| 2021-08-12T15:43:55
| 395,366,249
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 668
|
r
|
Nearest Neighbor Code.R
|
rm(list=ls())
library(readxl)
arcs <- read_excel("OPER 623 - Heuristics/Lab 1 Data.xlsx")
n<-nrow(arcs)
visited<-rep(0,n)
arcstoured=matrix(0,nrow = n,ncol = n)
num_visited=0
total_cost=0
nearest_cost=Inf
while (num_visited<2*n) {
for (i in n) {
for (j in n) {
if (i!=j && arcs(i,j)<nearest_cost && arcstoured[i,j]!=1 && visited[i]<2 && visited[j]<2) {
nearest_cost=arcs(i,j)
k=i
z=j
}
}
}
total_cost=total_cost+nearest_cost
visited[k]=visited[k]+1
visited[z]=visited[z]+1
num_visited=sum(visited)
nearest_cost=Inf
arcstoured[i,j]=1
}
arcstoured
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.