content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
choices = list("Johor", "Kelantan", "Melaka",
"N. Sembilan", "P.Pinang", "Pahang", "Perak", "Perlis", "Sabah",
"Sarawak", "Selangor", "Terengganu", "WPKL")
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("2012-2015 Malaysia Dengue Outbreak"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(h3('Github URL'),
h5('https://github.com/hongt/DevelopDataProduct'),
h1(' '),
h5('Tab panel Overview Dengue Table Data(30 rows) - Choose Year in Slider below'),
h5('Tab panel PLOT DENGUE Cases Barchart - Overall of State and Year overview ONLY'),
h5('Tab panel PLOT DENGUE Outbreak ScatterPlot - Choose Year in Slider below and then choose State in Tab Panel'),
sliderInput("yyear",
"Year:",
min = 2012,
max = 2015,
value = 1,sep = "")
),
mainPanel(
tabsetPanel(
tabPanel("Overview Dengue Table", tableOutput("data_table")),
tabPanel("PLOT DENGUE Cases Barchart", plotOutput("distPlot")),
tabPanel("PLOT DENGUE Outbreak ScatterPlot", tabPanel("Panel1", selectInput("select_State", h3("Select State"),
choices ) ),
plotOutput("distScatter")))
)
)
))
| /ui.R | no_license | hongt/DevelopDataProduct | R | false | false | 1,748 | r | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
choices = list("Johor", "Kelantan", "Melaka",
"N. Sembilan", "P.Pinang", "Pahang", "Perak", "Perlis", "Sabah",
"Sarawak", "Selangor", "Terengganu", "WPKL")
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("2012-2015 Malaysia Dengue Outbreak"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(h3('Github URL'),
h5('https://github.com/hongt/DevelopDataProduct'),
h1(' '),
h5('Tab panel Overview Dengue Table Data(30 rows) - Choose Year in Slider below'),
h5('Tab panel PLOT DENGUE Cases Barchart - Overall of State and Year overview ONLY'),
h5('Tab panel PLOT DENGUE Outbreak ScatterPlot - Choose Year in Slider below and then choose State in Tab Panel'),
sliderInput("yyear",
"Year:",
min = 2012,
max = 2015,
value = 1,sep = "")
),
mainPanel(
tabsetPanel(
tabPanel("Overview Dengue Table", tableOutput("data_table")),
tabPanel("PLOT DENGUE Cases Barchart", plotOutput("distPlot")),
tabPanel("PLOT DENGUE Outbreak ScatterPlot", tabPanel("Panel1", selectInput("select_State", h3("Select State"),
choices ) ),
plotOutput("distScatter")))
)
)
))
|
## @knitr load.lib
rm(list = ls())
library(wordcloud)
library(tidyverse)
library(dplyr)
library(stringr)
library(tidytext)
library(tm)
library(textstem)
library(tidyr)
library(forcats)
library(igraph)
library(ggraph)
library(widyr)
## @knitr read.data
data <- read_csv("Data/Train_rev1.csv")
train_data_jobs <- data %>% mutate(Title = as.character(tolower(Title))) %>%
filter(str_detect(Title, paste(c("analyst", "data", "business intelligence",
"statisti"),collapse = '|')))
## @knitr corpus
# n_distinct(train_data_jobs$Category)
job_corpus <- VCorpus(VectorSource(train_data_jobs$FullDescription))
# lapply(job_corpus[1:5], as.character)
## @knitr cleaning
#changing to lower case
job_corpus_clean <- tm_map(job_corpus, content_transformer(tolower))
#remove numbers from the text
job_corpus_clean <- tm_map(job_corpus_clean, removeNumbers)
#remove stop words
job_corpus_clean <- tm_map(job_corpus_clean, removeWords,
c(stopwords(), 'will', 'work', 'skill', 'job', 'role', 'required', 'within', 'please',
'opportunity', 'successful', 'requirements', 'working', 'ability', 'looking',
'knowledge', 'skills', 'work', 'good', 'key', 'strong', 'new', 'requirement',
'abilities'))
mystopwords <- data.frame(word = c("will", 'work', 'skill', 'job', 'role', 'required', 'within', 'please',
'opportunity', 'successful', 'requirements', 'working', 'ability', 'looking',
'knowledge', 'skills', 'work', 'good', 'key', 'strong', 'new', 'requirement',
'abilities'))
#remove punctuations
job_corpus_clean <- tm_map(job_corpus_clean, removePunctuation)
#stemming the words to their root words - fucking things
#job_corpus_clean <- tm_map(job_corpus_clean, stemDocument)
# removing extra spaces from the text
job_corpus_clean <- tm_map(job_corpus_clean, stripWhitespace)
# lapply(job_corpus_clean[1:5], as.character)
FullDescription <- data.frame(text=unlist(sapply(job_corpus_clean, `[`, "content")),
stringsAsFactors=F)
train_data_jobs_clean <- train_data_jobs %>% select(-FullDescription) %>% cbind(FullDescription)
ndoc <- length(unique(train_data_jobs_clean$Category))
train_data_jobs_clean <- train_data_jobs_clean %>%
mutate(text = as.character(text)) %>% rename(FullDescription = text) %>%
mutate(Category = gsub(' Jobs', '', Category),
Category = as.factor(Category))
train_data_jobs_clean$FullDescription <- sapply(train_data_jobs_clean$FullDescription,
function(row) iconv(row, "latin1", "ASCII", sub=""))
## @knitr word_tf
Word_TF <- train_data_jobs_clean %>%
unnest_tokens(word, FullDescription) %>%
mutate(word=lemmatize_strings(word)) %>%
anti_join(stop_words, by = 'word') %>%
anti_join(mystopwords, by = 'word') %>%
group_by(Category) %>%
count(word,sort=TRUE) %>%
ungroup() %>% group_by(Category) %>%
mutate(TF = n/sum(n))
freq_by_rank <- Word_TF %>% arrange(Category, desc(TF)) %>%
group_by(Category) %>%
mutate(rank = row_number())
## @knitr frequency.plot
p1 <- freq_by_rank %>%
ggplot(aes(rank, TF, color = Category)) +
geom_abline(intercept = -0.62, slope = -1.1, color = "gray50", linetype = 2) +
geom_line(size = 1, alpha = 0.5, show.legend = FALSE) +
scale_x_log10() +
scale_y_log10() +
theme_bw()
## @knitr tf.idf
Word_IDF <- train_data_jobs_clean %>%
unnest_tokens(word, FullDescription) %>%
mutate(word=lemmatize_strings(word)) %>%
anti_join(stop_words, by = 'word') %>%
anti_join(mystopwords, by = 'word') %>%
group_by(Category) %>%
count(word,sort=TRUE) %>% ungroup() %>%
group_by(word) %>%
summarise(nDoc=n()) %>%
mutate(nTotalDoc=ndoc,
idf = log(nTotalDoc/nDoc)) %>%
arrange(idf)
Word_Tf_IDF <- merge(Word_TF,Word_IDF,by="word") %>%
mutate(TFIDF = TF*idf) %>%
arrange(Category,desc(TFIDF)) %>% ungroup()
category_dist <- train_data_jobs_clean %>% group_by(Category) %>%
summarise(Freq = n()) %>%
mutate(prop = Freq/sum(Freq)*100) %>% #Computing proportion of each industry in data
arrange(desc(prop)) %>%
mutate(cum_prop = cumsum(prop))
## @knitr proportion.plot
p2 <- category_dist %>%
ggplot(aes(x=fct_reorder(Category, prop), y = prop)) +
geom_col() +
coord_flip() +
labs(x='Job Category', y = 'Proportion in Data', title = 'Proportion of Categories in Data') +
theme_bw()
## @knitr word.tf.idf
selected_categories <- category_dist %>% slice(1:10) %>% select(Category) %>%
filter(Category != "Other/General Jobs")
# stringr::str_detect(Word_Tf_IDF$word, '^www')
p3 <- Word_Tf_IDF %>% filter(!str_detect(word, '^www')) %>%
inner_join(selected_categories, by = "Category") %>%
arrange(desc(TFIDF)) %>%
mutate(word = factor(word, levels = rev(unique(word)))) %>%
group_by(Category) %>%
top_n(15) %>%
ungroup %>%
ggplot(aes(word, TFIDF, fill = Category)) +
geom_col(show.legend = FALSE) +
labs(x = NULL, y = "tf-idf") +
facet_wrap(~Category, ncol = 3, scales = "free") +
coord_flip() +
theme_bw()
## @knitr wordcloud1
set.seed(1234)
tokens <- train_data_jobs_clean %>%
select(Category, FullDescription) %>%
unnest_tokens(word, FullDescription) %>%
anti_join(stop_words, by = 'word') %>%
count(word, Category, sort = TRUE) %>%
ungroup() %>%
group_by(word, Category) %>%
summarise(freq = sum(n)) %>%
arrange(Category, desc(freq))
tokens %>%
filter(Category == "IT") %>%
select(word, freq) %>%
with(wordcloud(word, freq, max.words=300, random.order=FALSE,
rot.per=0.35, colors=brewer.pal(8, 'Dark2')))
# wordcloud2::wordcloud2(color = brewer.pal(8, "Dark2"),minRotation = -pi/6, maxRotation = -pi/6, minSize = 10,
# rotateRatio = 1)
## @knitr wordcloud2
tokens %>%
filter(Category == "Accounting & Finance") %>%
select(word, freq) %>%
with(wordcloud(word, freq, max.words=300, random.order=FALSE,
rot.per=0.35, colors=brewer.pal(8, 'Dark2')))
# wordcloud2::wordcloud2(color = brewer.pal(8, "Dark2"),minRotation = pi/6, maxRotation = pi/6, minSize = 10,
# rotateRatio = 1)
## @knitr wordcloud3
tokens %>%
filter(Category == "Healthcare & Nursing") %>%
select(word, freq) %>%
with(wordcloud(word, freq, max.words=300, random.order=FALSE,
rot.per=0.35, colors=brewer.pal(8, 'Dark2')))
# wordcloud2::wordcloud2(color = brewer.pal(8, "Dark2"))
## @knitr biagramsNetwork
wordFreq <- train_data_jobs_clean %>%
unnest_tokens(bigram, FullDescription, token = "ngrams", n = 2) %>%
count(bigram) %>%
separate(bigram, c("word1", "word2"), sep = " ") %>%
filter(!word1 %in% stop_words$word) %>%
filter(!word2 %in% stop_words$word)
bigram_graph <- wordFreq %>%
filter(n > 200) %>%
graph_from_data_frame()
arrows <- grid::arrow(type = "closed", length = unit(.1, "inches"), angle = 30)
ggraph(bigram_graph, layout = "fr") +
geom_edge_link(aes(edge_alpha = n), show.legend = FALSE, arrow = arrows) +
geom_node_point(color = "lightblue", size = 2) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1, check_overlap = TRUE) +
theme_void()
## @knitr correlation
keyWords <- c("analyst","excel","manager","qa", 'sql')
wordCorIT <- train_data_jobs_clean %>%
filter(Category == 'IT') %>%
unnest_tokens(word, FullDescription) %>%
anti_join(stop_words) %>%
group_by(word) %>%
filter(n() >= 200) %>%
pairwise_cor(word, Id, sort = TRUE) %>%
ungroup()
plDF.it <- wordCorIT %>%
filter(item1 %in% keyWords) %>%
group_by(item1) %>%
arrange(desc(correlation)) %>%
slice(1:20) %>%
ungroup() %>%
mutate(xOrder = n():1)
cor.it <- plDF.it %>%
ggplot(aes(x=xOrder,y=correlation,fill=item1)) +
geom_bar(stat = "identity", show.legend = FALSE) +
facet_wrap(~ item1,scales='free',nrow=1) +
scale_x_continuous(breaks = plDF.it$xOrder,labels = plDF.it$item2,expand = c(0,0)) +
coord_flip()+
theme_bw()+
theme(legend.position = "none")+
labs(x='Word',y='Correlations',
title = 'Top Correlated Words in IT')
wordCorFIN <- train_data_jobs_clean %>%
filter(Category == 'Accounting & Finance') %>%
unnest_tokens(word, FullDescription) %>%
anti_join(stop_words) %>%
group_by(word) %>%
filter(n() >= 200) %>%
pairwise_cor(word, Id, sort = TRUE) %>%
ungroup()
plDF.fin <- wordCorFIN %>%
filter(item1 %in% c(keyWords, 'sas')) %>%
group_by(item1) %>%
arrange(desc(correlation)) %>%
slice(1:20) %>%
ungroup() %>%
mutate(xOrder = n():1)
cor.fin <- plDF.fin %>%
ggplot(aes(x=xOrder,y=correlation,fill=item1)) +
geom_bar(stat = "identity", show.legend = FALSE) +
facet_wrap(~ item1,scales='free',nrow=1) +
scale_x_continuous(breaks = plDF.fin$xOrder,labels = plDF.fin$item2,expand = c(0,0)) +
coord_flip()+
theme_bw()+
theme(legend.position = "none")+
labs(x='Word',y='Correlations',
title = 'Top Correlated Words in Accounting & Finance')
wordCorHealth <- train_data_jobs_clean %>%
filter(Category == 'Healthcare & Nursing') %>%
unnest_tokens(word, FullDescription) %>%
anti_join(stop_words) %>%
group_by(word) %>%
filter(n() >= 20) %>%
pairwise_cor(word, Id, sort = TRUE) %>%
ungroup()
plDF.health <- wordCorHealth %>%
filter(item1 %in% c(keyWords,'sas','statistics','database')) %>%
group_by(item1) %>%
arrange(desc(correlation)) %>%
slice(1:20) %>%
ungroup() %>%
mutate(xOrder = n():1)
cor.health <- plDF.health %>%
ggplot(aes(x=xOrder,y=correlation,fill=item1)) +
geom_bar(stat = "identity", show.legend = FALSE) +
facet_wrap(~ item1,scales='free',nrow=1) +
scale_x_continuous(breaks = plDF.health$xOrder,labels = plDF.health$item2,expand = c(0,0)) +
coord_flip()+
theme_bw()+
theme(legend.position = "none")+
labs(x='Word',y='Correlations',
title = 'Top Correlated Words in Healthcare & Nursing')
## @knitr topic.modelling
jobs_dtm <- DocumentTermMatrix(job_corpus_clean)
library(topicmodels)
jobs_lda <- LDA(jobs_dtm, k = 9, control = list(seed = 1234))
job_topics <- tidy(jobs_lda, matrix = "beta")
job_topics
job_top_terms <- job_topics %>%
group_by(topic) %>%
top_n(20, beta) %>%
ungroup() %>%
arrange(topic, -beta)
job_top_terms %>%
mutate(term = fct_reorder(term, beta)) %>%
ggplot(aes(term, beta, fill = factor(topic))) +
geom_col(show.legend = FALSE) +
facet_wrap(~topic, scales = "free") +
coord_flip() +
theme_bw()
job_documents <- tidy(jobs_lda, matrix = "gamma")
job_documents
| /Code_v2.R | no_license | linpingyu/Unstructured-Data | R | false | false | 10,652 | r |
## @knitr load.lib
rm(list = ls())
library(wordcloud)
library(tidyverse)
library(dplyr)
library(stringr)
library(tidytext)
library(tm)
library(textstem)
library(tidyr)
library(forcats)
library(igraph)
library(ggraph)
library(widyr)
## @knitr read.data
data <- read_csv("Data/Train_rev1.csv")
train_data_jobs <- data %>% mutate(Title = as.character(tolower(Title))) %>%
filter(str_detect(Title, paste(c("analyst", "data", "business intelligence",
"statisti"),collapse = '|')))
## @knitr corpus
# n_distinct(train_data_jobs$Category)
job_corpus <- VCorpus(VectorSource(train_data_jobs$FullDescription))
# lapply(job_corpus[1:5], as.character)
## @knitr cleaning
#changing to lower case
job_corpus_clean <- tm_map(job_corpus, content_transformer(tolower))
#remove numbers from the text
job_corpus_clean <- tm_map(job_corpus_clean, removeNumbers)
#remove stop words
job_corpus_clean <- tm_map(job_corpus_clean, removeWords,
c(stopwords(), 'will', 'work', 'skill', 'job', 'role', 'required', 'within', 'please',
'opportunity', 'successful', 'requirements', 'working', 'ability', 'looking',
'knowledge', 'skills', 'work', 'good', 'key', 'strong', 'new', 'requirement',
'abilities'))
mystopwords <- data.frame(word = c("will", 'work', 'skill', 'job', 'role', 'required', 'within', 'please',
'opportunity', 'successful', 'requirements', 'working', 'ability', 'looking',
'knowledge', 'skills', 'work', 'good', 'key', 'strong', 'new', 'requirement',
'abilities'))
#remove punctuations
job_corpus_clean <- tm_map(job_corpus_clean, removePunctuation)
#stemming the words to their root words - fucking things
#job_corpus_clean <- tm_map(job_corpus_clean, stemDocument)
# removing extra spaces from the text
job_corpus_clean <- tm_map(job_corpus_clean, stripWhitespace)
# lapply(job_corpus_clean[1:5], as.character)
FullDescription <- data.frame(text=unlist(sapply(job_corpus_clean, `[`, "content")),
stringsAsFactors=F)
train_data_jobs_clean <- train_data_jobs %>% select(-FullDescription) %>% cbind(FullDescription)
ndoc <- length(unique(train_data_jobs_clean$Category))
train_data_jobs_clean <- train_data_jobs_clean %>%
mutate(text = as.character(text)) %>% rename(FullDescription = text) %>%
mutate(Category = gsub(' Jobs', '', Category),
Category = as.factor(Category))
train_data_jobs_clean$FullDescription <- sapply(train_data_jobs_clean$FullDescription,
function(row) iconv(row, "latin1", "ASCII", sub=""))
## @knitr word_tf
Word_TF <- train_data_jobs_clean %>%
unnest_tokens(word, FullDescription) %>%
mutate(word=lemmatize_strings(word)) %>%
anti_join(stop_words, by = 'word') %>%
anti_join(mystopwords, by = 'word') %>%
group_by(Category) %>%
count(word,sort=TRUE) %>%
ungroup() %>% group_by(Category) %>%
mutate(TF = n/sum(n))
freq_by_rank <- Word_TF %>% arrange(Category, desc(TF)) %>%
group_by(Category) %>%
mutate(rank = row_number())
## @knitr frequency.plot
p1 <- freq_by_rank %>%
ggplot(aes(rank, TF, color = Category)) +
geom_abline(intercept = -0.62, slope = -1.1, color = "gray50", linetype = 2) +
geom_line(size = 1, alpha = 0.5, show.legend = FALSE) +
scale_x_log10() +
scale_y_log10() +
theme_bw()
## @knitr tf.idf
Word_IDF <- train_data_jobs_clean %>%
unnest_tokens(word, FullDescription) %>%
mutate(word=lemmatize_strings(word)) %>%
anti_join(stop_words, by = 'word') %>%
anti_join(mystopwords, by = 'word') %>%
group_by(Category) %>%
count(word,sort=TRUE) %>% ungroup() %>%
group_by(word) %>%
summarise(nDoc=n()) %>%
mutate(nTotalDoc=ndoc,
idf = log(nTotalDoc/nDoc)) %>%
arrange(idf)
Word_Tf_IDF <- merge(Word_TF,Word_IDF,by="word") %>%
mutate(TFIDF = TF*idf) %>%
arrange(Category,desc(TFIDF)) %>% ungroup()
category_dist <- train_data_jobs_clean %>% group_by(Category) %>%
summarise(Freq = n()) %>%
mutate(prop = Freq/sum(Freq)*100) %>% #Computing proportion of each industry in data
arrange(desc(prop)) %>%
mutate(cum_prop = cumsum(prop))
## @knitr proportion.plot
p2 <- category_dist %>%
ggplot(aes(x=fct_reorder(Category, prop), y = prop)) +
geom_col() +
coord_flip() +
labs(x='Job Category', y = 'Proportion in Data', title = 'Proportion of Categories in Data') +
theme_bw()
## @knitr word.tf.idf
selected_categories <- category_dist %>% slice(1:10) %>% select(Category) %>%
filter(Category != "Other/General Jobs")
# stringr::str_detect(Word_Tf_IDF$word, '^www')
p3 <- Word_Tf_IDF %>% filter(!str_detect(word, '^www')) %>%
inner_join(selected_categories, by = "Category") %>%
arrange(desc(TFIDF)) %>%
mutate(word = factor(word, levels = rev(unique(word)))) %>%
group_by(Category) %>%
top_n(15) %>%
ungroup %>%
ggplot(aes(word, TFIDF, fill = Category)) +
geom_col(show.legend = FALSE) +
labs(x = NULL, y = "tf-idf") +
facet_wrap(~Category, ncol = 3, scales = "free") +
coord_flip() +
theme_bw()
## @knitr wordcloud1
set.seed(1234)
tokens <- train_data_jobs_clean %>%
select(Category, FullDescription) %>%
unnest_tokens(word, FullDescription) %>%
anti_join(stop_words, by = 'word') %>%
count(word, Category, sort = TRUE) %>%
ungroup() %>%
group_by(word, Category) %>%
summarise(freq = sum(n)) %>%
arrange(Category, desc(freq))
tokens %>%
filter(Category == "IT") %>%
select(word, freq) %>%
with(wordcloud(word, freq, max.words=300, random.order=FALSE,
rot.per=0.35, colors=brewer.pal(8, 'Dark2')))
# wordcloud2::wordcloud2(color = brewer.pal(8, "Dark2"),minRotation = -pi/6, maxRotation = -pi/6, minSize = 10,
# rotateRatio = 1)
## @knitr wordcloud2
tokens %>%
filter(Category == "Accounting & Finance") %>%
select(word, freq) %>%
with(wordcloud(word, freq, max.words=300, random.order=FALSE,
rot.per=0.35, colors=brewer.pal(8, 'Dark2')))
# wordcloud2::wordcloud2(color = brewer.pal(8, "Dark2"),minRotation = pi/6, maxRotation = pi/6, minSize = 10,
# rotateRatio = 1)
## @knitr wordcloud3
tokens %>%
filter(Category == "Healthcare & Nursing") %>%
select(word, freq) %>%
with(wordcloud(word, freq, max.words=300, random.order=FALSE,
rot.per=0.35, colors=brewer.pal(8, 'Dark2')))
# wordcloud2::wordcloud2(color = brewer.pal(8, "Dark2"))
## @knitr biagramsNetwork
wordFreq <- train_data_jobs_clean %>%
unnest_tokens(bigram, FullDescription, token = "ngrams", n = 2) %>%
count(bigram) %>%
separate(bigram, c("word1", "word2"), sep = " ") %>%
filter(!word1 %in% stop_words$word) %>%
filter(!word2 %in% stop_words$word)
bigram_graph <- wordFreq %>%
filter(n > 200) %>%
graph_from_data_frame()
arrows <- grid::arrow(type = "closed", length = unit(.1, "inches"), angle = 30)
ggraph(bigram_graph, layout = "fr") +
geom_edge_link(aes(edge_alpha = n), show.legend = FALSE, arrow = arrows) +
geom_node_point(color = "lightblue", size = 2) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1, check_overlap = TRUE) +
theme_void()
## @knitr correlation
keyWords <- c("analyst","excel","manager","qa", 'sql')
wordCorIT <- train_data_jobs_clean %>%
filter(Category == 'IT') %>%
unnest_tokens(word, FullDescription) %>%
anti_join(stop_words) %>%
group_by(word) %>%
filter(n() >= 200) %>%
pairwise_cor(word, Id, sort = TRUE) %>%
ungroup()
plDF.it <- wordCorIT %>%
filter(item1 %in% keyWords) %>%
group_by(item1) %>%
arrange(desc(correlation)) %>%
slice(1:20) %>%
ungroup() %>%
mutate(xOrder = n():1)
cor.it <- plDF.it %>%
ggplot(aes(x=xOrder,y=correlation,fill=item1)) +
geom_bar(stat = "identity", show.legend = FALSE) +
facet_wrap(~ item1,scales='free',nrow=1) +
scale_x_continuous(breaks = plDF.it$xOrder,labels = plDF.it$item2,expand = c(0,0)) +
coord_flip()+
theme_bw()+
theme(legend.position = "none")+
labs(x='Word',y='Correlations',
title = 'Top Correlated Words in IT')
wordCorFIN <- train_data_jobs_clean %>%
filter(Category == 'Accounting & Finance') %>%
unnest_tokens(word, FullDescription) %>%
anti_join(stop_words) %>%
group_by(word) %>%
filter(n() >= 200) %>%
pairwise_cor(word, Id, sort = TRUE) %>%
ungroup()
plDF.fin <- wordCorFIN %>%
filter(item1 %in% c(keyWords, 'sas')) %>%
group_by(item1) %>%
arrange(desc(correlation)) %>%
slice(1:20) %>%
ungroup() %>%
mutate(xOrder = n():1)
cor.fin <- plDF.fin %>%
ggplot(aes(x=xOrder,y=correlation,fill=item1)) +
geom_bar(stat = "identity", show.legend = FALSE) +
facet_wrap(~ item1,scales='free',nrow=1) +
scale_x_continuous(breaks = plDF.fin$xOrder,labels = plDF.fin$item2,expand = c(0,0)) +
coord_flip()+
theme_bw()+
theme(legend.position = "none")+
labs(x='Word',y='Correlations',
title = 'Top Correlated Words in Accounting & Finance')
wordCorHealth <- train_data_jobs_clean %>%
filter(Category == 'Healthcare & Nursing') %>%
unnest_tokens(word, FullDescription) %>%
anti_join(stop_words) %>%
group_by(word) %>%
filter(n() >= 20) %>%
pairwise_cor(word, Id, sort = TRUE) %>%
ungroup()
plDF.health <- wordCorHealth %>%
filter(item1 %in% c(keyWords,'sas','statistics','database')) %>%
group_by(item1) %>%
arrange(desc(correlation)) %>%
slice(1:20) %>%
ungroup() %>%
mutate(xOrder = n():1)
cor.health <- plDF.health %>%
ggplot(aes(x=xOrder,y=correlation,fill=item1)) +
geom_bar(stat = "identity", show.legend = FALSE) +
facet_wrap(~ item1,scales='free',nrow=1) +
scale_x_continuous(breaks = plDF.health$xOrder,labels = plDF.health$item2,expand = c(0,0)) +
coord_flip()+
theme_bw()+
theme(legend.position = "none")+
labs(x='Word',y='Correlations',
title = 'Top Correlated Words in Healthcare & Nursing')
## @knitr topic.modelling
jobs_dtm <- DocumentTermMatrix(job_corpus_clean)
library(topicmodels)
jobs_lda <- LDA(jobs_dtm, k = 9, control = list(seed = 1234))
job_topics <- tidy(jobs_lda, matrix = "beta")
job_topics
job_top_terms <- job_topics %>%
group_by(topic) %>%
top_n(20, beta) %>%
ungroup() %>%
arrange(topic, -beta)
job_top_terms %>%
mutate(term = fct_reorder(term, beta)) %>%
ggplot(aes(term, beta, fill = factor(topic))) +
geom_col(show.legend = FALSE) +
facet_wrap(~topic, scales = "free") +
coord_flip() +
theme_bw()
job_documents <- tidy(jobs_lda, matrix = "gamma")
job_documents
|
# Test that chunk outputs match original data system
context("oldnew")
library(readr)
OLDNEW <- F # Run old-new tests?
if (OLDNEW){
test_that("matches old data system output", {
# If we are running the code coverage tests then let's skip this since
# it will take a long to time run and the purpose of this test is to
# make sure the chunk outputs match the old data system and not to test
# the functionality of any chunks
if(isTRUE(as.logical(Sys.getenv("gcamdata.is_coverage_test")))) {
skip("Skip old new when only interested in code coverage")
}
# If we're on GitHub Actions, need to run the driver to ensure chunk outputs saved
# Don't do this locally, to speed things up
# Look for output data in OUTPUTS_DIR under top level
# (as this code will be run in tests/testthat)
outputs_dir <- normalizePath(file.path("../..", OUTPUTS_DIR))
xml_dir <- normalizePath(file.path("../..", XML_DIR))
if(identical(Sys.getenv("GITHUB_ACTIONS"), "true")) {
# Run the driver and save chunk outputs
# Note we are not going to bother writing the XML since GitHub Actions will not have
# any gcamdata.xml_cmpdir to do the OLD/NEW on the XML files anyways.
gcam_data_map <- driver(write_outputs = TRUE, write_xml = FALSE, quiet = TRUE, outdir = outputs_dir, xmldir = xml_dir, return_data_map_only = TRUE)
# The following two tests are only run on GitHub Actions because they will fail
# during the R CMD CHECK process locally (as the R build process removes outputs/)
expect_equivalent(file.access(outputs_dir, mode = 4), 0, # outputs_dir exists and is readable
info = paste("Directory", outputs_dir, "unreadable or does not exist from", getwd()))
expect_true(file.info(outputs_dir)$isdir)
# Now we compare the data map returned above with the pre-packaged version
# They should match! See https://github.com/JGCRI/gcamdata/pull/751#issuecomment-331578990
# First put what the driver returns and the internal GCAM_DATA_MAP into the same order (can vary if run on PIC for example)
gcam_data_map <- arrange(gcam_data_map, name, output)
data("GCAM_DATA_MAP")
gdm_internal <- arrange(GCAM_DATA_MAP, name, output)
# The gcam_data_map that's generated on GitHub Actions won't have the proprietary IEA data, so its comments
# and units may differ
expect_true(tibble::is_tibble(gdm_internal))
expect_true(tibble::is_tibble(gcam_data_map))
expect_identical(dim(gdm_internal), dim(gcam_data_map), info =
"GCAM_DATA_MAP dimensions don't match. Rerun generate_package_data to update.")
expect_identical(gdm_internal$name, gcam_data_map$name, info =
"GCAM_DATA_MAP name doesn't match. Rerun generate_package_data to update.")
expect_identical(gdm_internal$output, gcam_data_map$output, info = "GCAM_DATA_MAP output doesn't match")
expect_identical(gdm_internal$precursors, gcam_data_map$precursors, info =
"GCAM_DATA_MAP precursors doesn't match. Rerun generate_package_data to update.")
}
# Get a list of files in OUTPUTS_DIR for which we will make OLD/NEW comparisons
new_files <- list.files(outputs_dir, full.names = TRUE)
if(length(new_files) == 0) {
# There was no "NEW" outputs in the OUTPUTS_DIR to make comparisons
# so we will skip this test
skip("no output data found for comparison")
} else if(!require("gcamdata.compdata", quietly = TRUE)) {
# We couldn't get the "OLD" outputs from the gcamdata.compdata repo
# so we will skip this test
skip("gcamdata.compdata package not available")
} else {
# Create a list of changed outputs
changed_outputs <- c()
# For each file in OUTPUTS_DIR, look for corresponding file in our
# comparison data. Load them, reshape new data if necessary, compare.
for(newf in list.files(outputs_dir, full.names = TRUE)) {
# In this rewrite, we're not putting X's in front of years,
# nor are we going to spend time unnecessarily reshaping datasets
# (i.e. wide to long and back). But we still need to be able to
# verify old versus new datasets! Chunks tag the data if it's
# reshaped, and save_chunkdata puts flag(s) at top of the file.
new_firstline <- readLines(newf, n = 1)
if(grepl(FLAG_NO_TEST, new_firstline)) {
next
}
newdata <- read_csv(newf, comment = COMMENT_CHAR)
oldf <- sub('.csv$', '', basename(newf))
# get the comparison data which is coming from the gcamdata.compdata package
olddata <- get_comparison_data(oldf)
expect_is(olddata, "data.frame", info = paste("No comparison data found for", oldf))
if(is.null(olddata)) {
# will have already failed the above test but we need to protect
# from crashing in the calculations below
next
}
# TEMPORARY FIX TO PASS CHECKS
if (grepl("L131.in_EJ_R_Senduse_F_Yh.csv", newf)){
olddata <- olddata %>%
mutate(value = if_else(is.nan(value), as.double(NA), value))
}
# Finally, test (NB rounding numeric columns to a sensible number of
# digits; otherwise spurious mismatches occur)
# Also first converts integer columns to numeric (otherwise test will
# fail when comparing <int> and <dbl> columns)
DIGITS <- 3
round_df <- function(x, digits = DIGITS) {
integer_columns <- sapply(x, class) == "integer"
x[integer_columns] <- lapply(x[integer_columns], as.numeric)
numeric_columns <- sapply(x, class) == "numeric"
x[numeric_columns] <- round(x[numeric_columns], digits)
return(x)
}
expect_identical(dim(olddata), dim(newdata), info = paste("Dimensions are not the same for", basename(newf)))
if(isTRUE(all.equal(olddata, newdata, tolerance = 0.02))){
expect_true(TRUE)
}
else if(isTRUE(all.equal(data.table(distinct(olddata)), data.table(distinct(newdata)), ignore.row.order = TRUE, ignore.col.order = TRUE, tolerance = 0.02))){
expect_true(TRUE)
}
else if(isTRUE(dplyr::all_equal(round_df(olddata), round_df(newdata)))){
expect_true(TRUE)
} else {
changed_outputs <- c(changed_outputs, oldf)
}
}
if (length(changed_outputs > 0)){
print("The following outputs have changed:")
print(changed_outputs)
expect_true(FALSE)
}
}
})
test_that('New XML outputs match old XML outputs', {
## The XML comparison data is huge, so we don't want to try to include it in
## the package. Instead, we look for an option that indicates where the data
## can be found. If the option isn't set, then we skip this test.
xml_cmp_dir <- getOption('gcamdata.xml_cmpdir')
if(is.null(xml_cmp_dir)) {
skip("XML comparison data not provided. Set option 'gcamdata.xml_cmpdir' to run this test.")
}
else {
xml_cmp_dir <- normalizePath(xml_cmp_dir)
}
expect_true(file.exists(xml_cmp_dir))
xml_dir <- normalizePath(file.path("../..", XML_DIR))
expect_true(file.exists(xml_dir))
for(newxml in list.files(xml_dir, full.names = TRUE)) {
oldxml <- list.files(xml_cmp_dir, pattern = paste0('^',basename(newxml),'$'), recursive = TRUE,
full.names = TRUE)
if(length(oldxml) > 0) {
expect_equal(length(oldxml), 1,
info = paste('Testing file', newxml, ': Found', length(oldxml),
'comparison files. There can be only one.'))
## If we come back with multiple matching files, we'll try to run the test anyhow, selecting
## the first one as the true comparison.
expect_true(cmp_xml_files(oldxml[1], newxml),
info = paste('Sorry to be the one to tell you, but new XML file',
newxml, "is not equivalent to its old version."))
}
else {
## If no comparison file found, issue a message, but don't fail the test.
message('No comparison file found for ', newxml, '. Skipping.')
}
}
})
} else {
# If we're not running oldnew tests, then we should test that driver can run without any errors
# no need to save outputs
test_that("driver runs", {
expect_error(driver(write_outputs = FALSE, write_xml = FALSE), NA)
})
}
| /input/gcamdata/tests/testthat/test_oldnew.R | permissive | JGCRI/gcam-core | R | false | false | 8,589 | r | # Test that chunk outputs match original data system
context("oldnew")
library(readr)
OLDNEW <- F # Run old-new tests?
if (OLDNEW){
test_that("matches old data system output", {
# If we are running the code coverage tests then let's skip this since
# it will take a long to time run and the purpose of this test is to
# make sure the chunk outputs match the old data system and not to test
# the functionality of any chunks
if(isTRUE(as.logical(Sys.getenv("gcamdata.is_coverage_test")))) {
skip("Skip old new when only interested in code coverage")
}
# If we're on GitHub Actions, need to run the driver to ensure chunk outputs saved
# Don't do this locally, to speed things up
# Look for output data in OUTPUTS_DIR under top level
# (as this code will be run in tests/testthat)
outputs_dir <- normalizePath(file.path("../..", OUTPUTS_DIR))
xml_dir <- normalizePath(file.path("../..", XML_DIR))
if(identical(Sys.getenv("GITHUB_ACTIONS"), "true")) {
# Run the driver and save chunk outputs
# Note we are not going to bother writing the XML since GitHub Actions will not have
# any gcamdata.xml_cmpdir to do the OLD/NEW on the XML files anyways.
gcam_data_map <- driver(write_outputs = TRUE, write_xml = FALSE, quiet = TRUE, outdir = outputs_dir, xmldir = xml_dir, return_data_map_only = TRUE)
# The following two tests are only run on GitHub Actions because they will fail
# during the R CMD CHECK process locally (as the R build process removes outputs/)
expect_equivalent(file.access(outputs_dir, mode = 4), 0, # outputs_dir exists and is readable
info = paste("Directory", outputs_dir, "unreadable or does not exist from", getwd()))
expect_true(file.info(outputs_dir)$isdir)
# Now we compare the data map returned above with the pre-packaged version
# They should match! See https://github.com/JGCRI/gcamdata/pull/751#issuecomment-331578990
# First put what the driver returns and the internal GCAM_DATA_MAP into the same order (can vary if run on PIC for example)
gcam_data_map <- arrange(gcam_data_map, name, output)
data("GCAM_DATA_MAP")
gdm_internal <- arrange(GCAM_DATA_MAP, name, output)
# The gcam_data_map that's generated on GitHub Actions won't have the proprietary IEA data, so its comments
# and units may differ
expect_true(tibble::is_tibble(gdm_internal))
expect_true(tibble::is_tibble(gcam_data_map))
expect_identical(dim(gdm_internal), dim(gcam_data_map), info =
"GCAM_DATA_MAP dimensions don't match. Rerun generate_package_data to update.")
expect_identical(gdm_internal$name, gcam_data_map$name, info =
"GCAM_DATA_MAP name doesn't match. Rerun generate_package_data to update.")
expect_identical(gdm_internal$output, gcam_data_map$output, info = "GCAM_DATA_MAP output doesn't match")
expect_identical(gdm_internal$precursors, gcam_data_map$precursors, info =
"GCAM_DATA_MAP precursors doesn't match. Rerun generate_package_data to update.")
}
# Get a list of files in OUTPUTS_DIR for which we will make OLD/NEW comparisons
new_files <- list.files(outputs_dir, full.names = TRUE)
if(length(new_files) == 0) {
# There was no "NEW" outputs in the OUTPUTS_DIR to make comparisons
# so we will skip this test
skip("no output data found for comparison")
} else if(!require("gcamdata.compdata", quietly = TRUE)) {
# We couldn't get the "OLD" outputs from the gcamdata.compdata repo
# so we will skip this test
skip("gcamdata.compdata package not available")
} else {
# Create a list of changed outputs
changed_outputs <- c()
# For each file in OUTPUTS_DIR, look for corresponding file in our
# comparison data. Load them, reshape new data if necessary, compare.
for(newf in list.files(outputs_dir, full.names = TRUE)) {
# In this rewrite, we're not putting X's in front of years,
# nor are we going to spend time unnecessarily reshaping datasets
# (i.e. wide to long and back). But we still need to be able to
# verify old versus new datasets! Chunks tag the data if it's
# reshaped, and save_chunkdata puts flag(s) at top of the file.
new_firstline <- readLines(newf, n = 1)
if(grepl(FLAG_NO_TEST, new_firstline)) {
next
}
newdata <- read_csv(newf, comment = COMMENT_CHAR)
oldf <- sub('.csv$', '', basename(newf))
# get the comparison data which is coming from the gcamdata.compdata package
olddata <- get_comparison_data(oldf)
expect_is(olddata, "data.frame", info = paste("No comparison data found for", oldf))
if(is.null(olddata)) {
# will have already failed the above test but we need to protect
# from crashing in the calculations below
next
}
# TEMPORARY FIX TO PASS CHECKS
if (grepl("L131.in_EJ_R_Senduse_F_Yh.csv", newf)){
olddata <- olddata %>%
mutate(value = if_else(is.nan(value), as.double(NA), value))
}
# Finally, test (NB rounding numeric columns to a sensible number of
# digits; otherwise spurious mismatches occur)
# Also first converts integer columns to numeric (otherwise test will
# fail when comparing <int> and <dbl> columns)
DIGITS <- 3
round_df <- function(x, digits = DIGITS) {
integer_columns <- sapply(x, class) == "integer"
x[integer_columns] <- lapply(x[integer_columns], as.numeric)
numeric_columns <- sapply(x, class) == "numeric"
x[numeric_columns] <- round(x[numeric_columns], digits)
return(x)
}
expect_identical(dim(olddata), dim(newdata), info = paste("Dimensions are not the same for", basename(newf)))
if(isTRUE(all.equal(olddata, newdata, tolerance = 0.02))){
expect_true(TRUE)
}
else if(isTRUE(all.equal(data.table(distinct(olddata)), data.table(distinct(newdata)), ignore.row.order = TRUE, ignore.col.order = TRUE, tolerance = 0.02))){
expect_true(TRUE)
}
else if(isTRUE(dplyr::all_equal(round_df(olddata), round_df(newdata)))){
expect_true(TRUE)
} else {
changed_outputs <- c(changed_outputs, oldf)
}
}
if (length(changed_outputs > 0)){
print("The following outputs have changed:")
print(changed_outputs)
expect_true(FALSE)
}
}
})
test_that('New XML outputs match old XML outputs', {
## The XML comparison data is huge, so we don't want to try to include it in
## the package. Instead, we look for an option that indicates where the data
## can be found. If the option isn't set, then we skip this test.
xml_cmp_dir <- getOption('gcamdata.xml_cmpdir')
if(is.null(xml_cmp_dir)) {
skip("XML comparison data not provided. Set option 'gcamdata.xml_cmpdir' to run this test.")
}
else {
xml_cmp_dir <- normalizePath(xml_cmp_dir)
}
expect_true(file.exists(xml_cmp_dir))
xml_dir <- normalizePath(file.path("../..", XML_DIR))
expect_true(file.exists(xml_dir))
for(newxml in list.files(xml_dir, full.names = TRUE)) {
oldxml <- list.files(xml_cmp_dir, pattern = paste0('^',basename(newxml),'$'), recursive = TRUE,
full.names = TRUE)
if(length(oldxml) > 0) {
expect_equal(length(oldxml), 1,
info = paste('Testing file', newxml, ': Found', length(oldxml),
'comparison files. There can be only one.'))
## If we come back with multiple matching files, we'll try to run the test anyhow, selecting
## the first one as the true comparison.
expect_true(cmp_xml_files(oldxml[1], newxml),
info = paste('Sorry to be the one to tell you, but new XML file',
newxml, "is not equivalent to its old version."))
}
else {
## If no comparison file found, issue a message, but don't fail the test.
message('No comparison file found for ', newxml, '. Skipping.')
}
}
})
} else {
# If we're not running oldnew tests, then we should test that driver can run without any errors
# no need to save outputs
test_that("driver runs", {
expect_error(driver(write_outputs = FALSE, write_xml = FALSE), NA)
})
}
|
# The imports!
library(dplyr)
library(tidyr)
# Make some variables we are going to reuse
column_names_for_labels <- c("index", "label")
column_name_for_activity <- c("activity")
column_name_for_subject <- c("subject")
# 1 read the column names from the features.txt file. There should be 561 columns (aaargh)
column_labels <- read.table("UCI HAR Dataset/features.txt", col.names=column_names_for_labels)
# This we'll repeat for the train and test dataset.
trainset <- read.table("UCI HAR Dataset/train/X_train.txt", col.names=column_labels$label)
activity <- read.table("UCI HAR Dataset/train/y_train.txt", col.names=column_name_for_activity)
subject <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names=column_name_for_subject)
# Add columns for the activity and subject to the trainset
trainset <- trainset %>% mutate(activity=activity$activity, subject=subject$subject)
# Same for the test dataset
testset <- read.table("UCI HAR Dataset/test/X_test.txt", col.names=column_labels$label)
activity <- read.table("UCI HAR Dataset/test/y_test.txt", col.names=column_name_for_activity)
subject <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names=column_name_for_subject)
testset <- testset %>% mutate(activity=activity$activity, subject=subject$subject)
# bind the two dataset togther
dataset <- bind_rows(testset, trainset)
# Select only the measurements of the mean and standard deviation:
reduced_dataset <- dataset %>% select(
contains("mean", ignore.case=FALSE), # I don't want the gravityMean etc
contains("std", ignore.case=FALSE),
activity,
subject)
# Use descriptive names for the activity. Not super clear whether this is really required.
# Luckily those names exists already:
activity_label <- read.table("UCI HAR Dataset/activity_labels.txt", col.names=c("activity", "names"))
reduced_dataset <- left_join(reduced_dataset, activity_label, by=c("activity" = "activity")) %>%
mutate(activity=names) %>%
select(contains("mean"), contains("std"), activity, subject)
# Use the reduced dataset to get the average of each variable for each activity and for each subject
analysed_dataset <- reduced_dataset %>%
group_by(activity, subject) %>%
summarise_all(funs(mean(., na.rm=TRUE)))
# Save both datasets:
write.csv(reduced_dataset, file="tidy_dataset.csv")
write.csv(analysed_dataset, file="analysed_dataset.csv") | /run_analysis.R | no_license | Quath/ProgrammingAssignment4 | R | false | false | 2,417 | r | # The imports!
library(dplyr)
library(tidyr)
# Make some variables we are going to reuse
column_names_for_labels <- c("index", "label")
column_name_for_activity <- c("activity")
column_name_for_subject <- c("subject")
# 1 read the column names from the features.txt file. There should be 561 columns (aaargh)
column_labels <- read.table("UCI HAR Dataset/features.txt", col.names=column_names_for_labels)
# This we'll repeat for the train and test dataset.
trainset <- read.table("UCI HAR Dataset/train/X_train.txt", col.names=column_labels$label)
activity <- read.table("UCI HAR Dataset/train/y_train.txt", col.names=column_name_for_activity)
subject <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names=column_name_for_subject)
# Add columns for the activity and subject to the trainset
trainset <- trainset %>% mutate(activity=activity$activity, subject=subject$subject)
# Same for the test dataset
testset <- read.table("UCI HAR Dataset/test/X_test.txt", col.names=column_labels$label)
activity <- read.table("UCI HAR Dataset/test/y_test.txt", col.names=column_name_for_activity)
subject <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names=column_name_for_subject)
testset <- testset %>% mutate(activity=activity$activity, subject=subject$subject)
# bind the two dataset togther
dataset <- bind_rows(testset, trainset)
# Select only the measurements of the mean and standard deviation:
reduced_dataset <- dataset %>% select(
contains("mean", ignore.case=FALSE), # I don't want the gravityMean etc
contains("std", ignore.case=FALSE),
activity,
subject)
# Use descriptive names for the activity. Not super clear whether this is really required.
# Luckily those names exists already:
activity_label <- read.table("UCI HAR Dataset/activity_labels.txt", col.names=c("activity", "names"))
reduced_dataset <- left_join(reduced_dataset, activity_label, by=c("activity" = "activity")) %>%
mutate(activity=names) %>%
select(contains("mean"), contains("std"), activity, subject)
# Use the reduced dataset to get the average of each variable for each activity and for each subject
analysed_dataset <- reduced_dataset %>%
group_by(activity, subject) %>%
summarise_all(funs(mean(., na.rm=TRUE)))
# Save both datasets:
write.csv(reduced_dataset, file="tidy_dataset.csv")
write.csv(analysed_dataset, file="analysed_dataset.csv") |
# this script used for runing check the functions from qgraqph to create the partial correlation and corrected p-value by holm or even the fdrlocal
# the result these methods are not working because the output giving the p-value nonsignificant they are all pvalue is 1
library(RCurl) # run this package for load the data form the website
file <- getURL("https://docs.google.com/spreadsheets/d/1zB7gNdI7Nk7SuHuPWcjzaKnjuwkvL6sOVMo0zMfuV-c/pub?gid=558862364&single=true&output=csv") # load data from the google drive
data <- read.csv(text = file) # read data which is formated as the csv
data[data == "-"] <- NA # replace '-' with NA
data[data == ""] <- NA # replace 'missing data' with NA
#==== to lower variable names ====
names(data) <- tolower(names(data)) # for more consistancy
data$phase <- NULL # there is only one type yype of phase in the survey
data$identifier <- NULL # this variable is not included in the analysis
data$ccs <- NULL
data$ccd <- NULL
data$village <- NULL # remove name of village
data$year <- NULL # remove year data
data$season <- NULL # remove season data
data$lat <- NULL # remove latitude data
data$long <- NULL # remove longitude data
data$fa <- NULL # field area is not include in the analysis
data$fn <- NULL # farmer name can not be included in this survey analysis
data$fp <- NULL # I do not know what is fp
data$pc <- NULL
data$cem <- NULL
data$ast <- NULL
data$vartype <- NULL
data$fym <- NULL
data$n <- NULL
data$p <- NULL
data$k <- NULL
data$mf <- NULL
data$wcp <- NULL
data$iu <- NULL
data$hu <- NULL
data$fu <- NULL
data$cs <- NULL
data$ldg <- NULL
data$yield <- NULL
data$dscum <- NULL
data$wecum <- NULL
data$ntmax <- NULL
data$npmax <- NULL
data$nltmax <- NULL
data$nlhmax <- NULL
data$lfm <- NULL # there is only one type of land form in this survey
data$ced <- NULL # Date data can not be included in the network analysis
data$cedjul <- NULL # remove crop establisment julian date data
data$hd <- NULL # Date data can not be included in the network analysis
data$hdjul <- NULL # remove harvest julian date
data$cvr <- NULL # reove crop varieties
data$varcoded <- NULL # I will recode them
data$fymcoded <- NULL # remove code data of fym
data$mu <- NULL # no record of mullucicide data
data$nplsqm <- NULL # remove number of plant per square meter
data$rbpx <- NULL # no record of rice bug p
#======================================================================
#=================== corract the variable type ========================
#======================================================================
data <- transform(data,
country = as.factor(country),
waa = as.numeric(waa),
wba = as.numeric(wba) ,
dhx = as.numeric(dhx),
whx = as.numeric(whx),
ssx = as.numeric(ssx),
wma = as.numeric(wma),
lfa = as.numeric(lfa),
lma = as.numeric(lma),
rha = as.numeric(rha) ,
thrx = as.numeric(thrx),
pmx = as.numeric(pmx),
defa = as.numeric(defa),
bphx = as.numeric(bphx),
wbpx = as.numeric(wbpx),
awx = as.numeric(awx),
rbx =as.numeric(rbx),
rbbx = as.numeric(rbbx),
glhx = as.numeric(glhx),
stbx=as.numeric(stbx),
hbx= as.numeric(hbx),
bbx = as.numeric(bbx),
blba = as.numeric(blba),
lba = as.numeric(lba),
bsa = as.numeric(bsa),
blsa = as.numeric(blsa),
nbsa = as.numeric(nbsa),
rsa = as.numeric(rsa),
lsa = as.numeric(lsa),
shbx = as.numeric(shbx) ,
shrx = as.numeric(shrx),
srx= as.numeric(srx),
fsmx = as.numeric(fsmx),
nbx = as.numeric(nbx),
dpx = as.numeric(dpx),
rtdx = as.numeric(rtdx),
rsdx = as.numeric(rsdx),
gsdx =as.numeric(gsdx),
rtx = as.numeric(rtx)
)
num.data <- apply(data[, -c(1,2)], 2, as.numeric) # create dataframe to store the numerical transformation of raw data excluded fno and country
num.data <- as.data.frame(as.matrix(num.data)) # convert from vector to matrix
data <- cbind(data[ , c("fno", "country")], num.data)
data <- data[ , apply(data[, -c(1,2)], 2, var, na.rm = TRUE) != 0] # exclude the column with variation = 0
data <- data[complete.cases(data), ] # exclude row which cantain NA
start.IP <- "dhx" # set to read the data from column named "dhx"
end.IP <- "rtx" # set to read the data from column named "rtx"
start.col.IP <- match(start.IP, names(data)) # match function for check which column of the data mactch the column named following the conditons above
end.col.IP <- match(end.IP, names(data)) # match function for check which column of the data mactch the column named following the conditons above
IP.data <- data[start.col.IP:end.col.IP] # select the columns of raw data which are following the condition above
IP.data <- IP.data[ ,apply(IP.data, 2, var, na.rm = TRUE) != 0] # exclude the column (variables) with variation = 0
#
country <- data$country #combine two cloumn names country and PS
IP.data <- cbind(country, IP.data)
IP.data[is.na(IP.data)] <- 0
name.country <- as.vector(unique(IP.data$country))
#======= Experiment 1 ======= #
PHL <- IP.data %>% filter(country == "PHL")
PHL <- PHL[-1][ ,apply(PHL[-1], 2, var, na.rm = TRUE) != 0] # exclude the column (variables) with variation = 0
PHLCors <- cor(PHL, method = "spearman")
corGraph <- qgraph(PHLCors, layout = corGraph$layout, graph = "pcor", threshold = "bonferroni", sampleSize = nrow(PHL))
optGraph <- findGraph(as.data.frame(PHLCors), nrow(PHL), type = "cor")
| /chapter4/results/withqgraph.R | no_license | sithjaisong/network.project | R | false | false | 6,074 | r | # this script used for runing check the functions from qgraqph to create the partial correlation and corrected p-value by holm or even the fdrlocal
# the result these methods are not working because the output giving the p-value nonsignificant they are all pvalue is 1
library(RCurl) # run this package for load the data form the website
file <- getURL("https://docs.google.com/spreadsheets/d/1zB7gNdI7Nk7SuHuPWcjzaKnjuwkvL6sOVMo0zMfuV-c/pub?gid=558862364&single=true&output=csv") # load data from the google drive
data <- read.csv(text = file) # read data which is formated as the csv
data[data == "-"] <- NA # replace '-' with NA
data[data == ""] <- NA # replace 'missing data' with NA
#==== to lower variable names ====
names(data) <- tolower(names(data)) # for more consistancy
data$phase <- NULL # there is only one type yype of phase in the survey
data$identifier <- NULL # this variable is not included in the analysis
data$ccs <- NULL
data$ccd <- NULL
data$village <- NULL # remove name of village
data$year <- NULL # remove year data
data$season <- NULL # remove season data
data$lat <- NULL # remove latitude data
data$long <- NULL # remove longitude data
data$fa <- NULL # field area is not include in the analysis
data$fn <- NULL # farmer name can not be included in this survey analysis
data$fp <- NULL # I do not know what is fp
data$pc <- NULL
data$cem <- NULL
data$ast <- NULL
data$vartype <- NULL
data$fym <- NULL
data$n <- NULL
data$p <- NULL
data$k <- NULL
data$mf <- NULL
data$wcp <- NULL
data$iu <- NULL
data$hu <- NULL
data$fu <- NULL
data$cs <- NULL
data$ldg <- NULL
data$yield <- NULL
data$dscum <- NULL
data$wecum <- NULL
data$ntmax <- NULL
data$npmax <- NULL
data$nltmax <- NULL
data$nlhmax <- NULL
data$lfm <- NULL # there is only one type of land form in this survey
data$ced <- NULL # Date data can not be included in the network analysis
data$cedjul <- NULL # remove crop establisment julian date data
data$hd <- NULL # Date data can not be included in the network analysis
data$hdjul <- NULL # remove harvest julian date
data$cvr <- NULL # reove crop varieties
data$varcoded <- NULL # I will recode them
data$fymcoded <- NULL # remove code data of fym
data$mu <- NULL # no record of mullucicide data
data$nplsqm <- NULL # remove number of plant per square meter
data$rbpx <- NULL # no record of rice bug p
#======================================================================
#=================== corract the variable type ========================
#======================================================================
data <- transform(data,
country = as.factor(country),
waa = as.numeric(waa),
wba = as.numeric(wba) ,
dhx = as.numeric(dhx),
whx = as.numeric(whx),
ssx = as.numeric(ssx),
wma = as.numeric(wma),
lfa = as.numeric(lfa),
lma = as.numeric(lma),
rha = as.numeric(rha) ,
thrx = as.numeric(thrx),
pmx = as.numeric(pmx),
defa = as.numeric(defa),
bphx = as.numeric(bphx),
wbpx = as.numeric(wbpx),
awx = as.numeric(awx),
rbx =as.numeric(rbx),
rbbx = as.numeric(rbbx),
glhx = as.numeric(glhx),
stbx=as.numeric(stbx),
hbx= as.numeric(hbx),
bbx = as.numeric(bbx),
blba = as.numeric(blba),
lba = as.numeric(lba),
bsa = as.numeric(bsa),
blsa = as.numeric(blsa),
nbsa = as.numeric(nbsa),
rsa = as.numeric(rsa),
lsa = as.numeric(lsa),
shbx = as.numeric(shbx) ,
shrx = as.numeric(shrx),
srx= as.numeric(srx),
fsmx = as.numeric(fsmx),
nbx = as.numeric(nbx),
dpx = as.numeric(dpx),
rtdx = as.numeric(rtdx),
rsdx = as.numeric(rsdx),
gsdx =as.numeric(gsdx),
rtx = as.numeric(rtx)
)
num.data <- apply(data[, -c(1,2)], 2, as.numeric) # create dataframe to store the numerical transformation of raw data excluded fno and country
num.data <- as.data.frame(as.matrix(num.data)) # convert from vector to matrix
data <- cbind(data[ , c("fno", "country")], num.data)
data <- data[ , apply(data[, -c(1,2)], 2, var, na.rm = TRUE) != 0] # exclude the column with variation = 0
data <- data[complete.cases(data), ] # exclude row which cantain NA
start.IP <- "dhx" # set to read the data from column named "dhx"
end.IP <- "rtx" # set to read the data from column named "rtx"
start.col.IP <- match(start.IP, names(data)) # match function for check which column of the data mactch the column named following the conditons above
end.col.IP <- match(end.IP, names(data)) # match function for check which column of the data mactch the column named following the conditons above
IP.data <- data[start.col.IP:end.col.IP] # select the columns of raw data which are following the condition above
IP.data <- IP.data[ ,apply(IP.data, 2, var, na.rm = TRUE) != 0] # exclude the column (variables) with variation = 0
#
country <- data$country #combine two cloumn names country and PS
IP.data <- cbind(country, IP.data)
IP.data[is.na(IP.data)] <- 0
name.country <- as.vector(unique(IP.data$country))
#======= Experiment 1 ======= #
PHL <- IP.data %>% filter(country == "PHL")
PHL <- PHL[-1][ ,apply(PHL[-1], 2, var, na.rm = TRUE) != 0] # exclude the column (variables) with variation = 0
PHLCors <- cor(PHL, method = "spearman")
corGraph <- qgraph(PHLCors, layout = corGraph$layout, graph = "pcor", threshold = "bonferroni", sampleSize = nrow(PHL))
optGraph <- findGraph(as.data.frame(PHLCors), nrow(PHL), type = "cor")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/registration.R
\name{adult_gene_expression_analysis}
\alias{adult_gene_expression_analysis}
\title{Gene expression spatial enrichment}
\usage{
adult_gene_expression_analysis(
anatomy_statistics,
gene_expression_paths = NULL,
target_threshold,
contrast_threshold = NULL,
symmetric_statistics = F,
ABI_registration = list(anatomy_template = NULL, anatomy_mask = NULL,
allen_ccf3v_template = NULL, allen_ccf3v_mask = NULL, anatomy_transform = NULL),
gene_expression_analyis_options = list(interpolate_gene = F, reflect_gene = T,
brain_mask = NULL),
tmp_files_dir = NULL,
parallel = NULL,
conf_file = getOption("RMINC_BATCH_CONF")
)
}
\arguments{
\item{anatomy_statistics}{MINC file or vector denoting statistics at each voxel}
\item{gene_expression_paths}{Filenames or URLs pointing to gene expression data. If NULL, then genome-wide gene expression analysis is conducted and gene expression data is downloaded to temporary files.}
\item{target_threshold}{statistics greater than this value constitute the ROI}
\item{contrast_threshold}{statistics less than this value constitute the contract region. If NULL, then contrast region is assumed to be the whole brain.}
\item{symmetric_statistics}{Should the absolute value of statistics at each voxel be considered instead of the signed value?}
\item{ABI_registration}{arguments that can be supplied if you want to perform a registration prior to analysis. If you provide an XFM filepath as 'anatomy_transform', then the statistics will be transformed and resampled prior to gene expression analysis. If you provide 'anatomy_template' (filepath or MINC vector denoting the anatomy where statistics were conducted), 'anatomy_mask' (filepath or MINC vector denoting brain mask for anatomy), then the anatomy is registered to the ABI template and the resulting transformation applied to the statistics prior to gene expression computation. In this case, ABI template and mask are downloaded unless 'allen_ccf3v_template' and 'allen_ccf3v_mask' point to filepaths containing the respective information.}
\item{gene_expression_analyis_options}{options for gene expression analysis. 'interpolate_gene' flags whether nearest-neighbour imputation should be done to fill in missing gene expression voxels. If FALSE, then these voxels are ignored. 'reflect_gene' flags whether gene expression signal should be reflected across the sagittal midplane to fill in missing gene expression voxels in the opposite hemisphere. 'brain_mask' is a MINC file, RAW file, or vector identifying which voxels are in the brain. If NULL, the mask will be downloaded from the ABI.}
\item{tmp_files_dir}{location to store temporary files from the registration}
\item{parallel}{how many processors to run on (default=single processor). Specified as a two element vector, with the first element corresponding to the type of parallelization, and the second to the number of processors to use. For local running set the first element to "local" or "snowfall" for back-compatibility, anything else will be run with batchtools. Leaving this argument NULL runs sequentially.}
\item{conf_file}{A batchtools configuration file defaulting to \code{getOption("RMINC_BATCH_CONF")}}
}
\value{
gene expression enrichment expressed as fold-change
}
\description{
Conducts a gene expression spatial enrichment analysis for arbitrary anatomy statistics. Statistics above the target threshold constitute the target region-of-interest (ROI). Statistics below a contrast threshold constitute the contrast region. Gene expression spatial enrichment is calculated for any number of genes in the ABI gene expression atlas. Enrichments is computed using a fold-change measure: expression in target ROI divided by expression in contrast region. If contrast threshold is not suppled, then the contrast regions is assumed to be the whole brain.
}
| /man/adult_gene_expression_analysis.Rd | no_license | DJFernandes/ABIgeneRMINC | R | false | true | 3,949 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/registration.R
\name{adult_gene_expression_analysis}
\alias{adult_gene_expression_analysis}
\title{Gene expression spatial enrichment}
\usage{
adult_gene_expression_analysis(
anatomy_statistics,
gene_expression_paths = NULL,
target_threshold,
contrast_threshold = NULL,
symmetric_statistics = F,
ABI_registration = list(anatomy_template = NULL, anatomy_mask = NULL,
allen_ccf3v_template = NULL, allen_ccf3v_mask = NULL, anatomy_transform = NULL),
gene_expression_analyis_options = list(interpolate_gene = F, reflect_gene = T,
brain_mask = NULL),
tmp_files_dir = NULL,
parallel = NULL,
conf_file = getOption("RMINC_BATCH_CONF")
)
}
\arguments{
\item{anatomy_statistics}{MINC file or vector denoting statistics at each voxel}
\item{gene_expression_paths}{Filenames or URLs pointing to gene expression data. If NULL, then genome-wide gene expression analysis is conducted and gene expression data is downloaded to temporary files.}
\item{target_threshold}{statistics greater than this value constitute the ROI}
\item{contrast_threshold}{statistics less than this value constitute the contract region. If NULL, then contrast region is assumed to be the whole brain.}
\item{symmetric_statistics}{Should the absolute value of statistics at each voxel be considered instead of the signed value?}
\item{ABI_registration}{arguments that can be supplied if you want to perform a registration prior to analysis. If you provide an XFM filepath as 'anatomy_transform', then the statistics will be transformed and resampled prior to gene expression analysis. If you provide 'anatomy_template' (filepath or MINC vector denoting the anatomy where statistics were conducted), 'anatomy_mask' (filepath or MINC vector denoting brain mask for anatomy), then the anatomy is registered to the ABI template and the resulting transformation applied to the statistics prior to gene expression computation. In this case, ABI template and mask are downloaded unless 'allen_ccf3v_template' and 'allen_ccf3v_mask' point to filepaths containing the respective information.}
\item{gene_expression_analyis_options}{options for gene expression analysis. 'interpolate_gene' flags whether nearest-neighbour imputation should be done to fill in missing gene expression voxels. If FALSE, then these voxels are ignored. 'reflect_gene' flags whether gene expression signal should be reflected across the sagittal midplane to fill in missing gene expression voxels in the opposite hemisphere. 'brain_mask' is a MINC file, RAW file, or vector identifying which voxels are in the brain. If NULL, the mask will be downloaded from the ABI.}
\item{tmp_files_dir}{location to store temporary files from the registration}
\item{parallel}{how many processors to run on (default=single processor). Specified as a two element vector, with the first element corresponding to the type of parallelization, and the second to the number of processors to use. For local running set the first element to "local" or "snowfall" for back-compatibility, anything else will be run with batchtools. Leaving this argument NULL runs sequentially.}
\item{conf_file}{A batchtools configuration file defaulting to \code{getOption("RMINC_BATCH_CONF")}}
}
\value{
gene expression enrichment expressed as fold-change
}
\description{
Conducts a gene expression spatial enrichment analysis for arbitrary anatomy statistics. Statistics above the target threshold constitute the target region-of-interest (ROI). Statistics below a contrast threshold constitute the contrast region. Gene expression spatial enrichment is calculated for any number of genes in the ABI gene expression atlas. Enrichments is computed using a fold-change measure: expression in target ROI divided by expression in contrast region. If contrast threshold is not suppled, then the contrast regions is assumed to be the whole brain.
}
|
.onLoad <- function(libname, pkgname) {
library.dynam("DNAcopy", pkgname, libname)
packageStartupMessage("\n**************************************************************************\n The plan to change the data format for CNA object has been postponed \n in order to ensure backward compatibility with older versions of DNAcopy \n**************************************************************************\n")
}
| /R/zzz.R | no_license | genome-vendor/r-bioc-dnacopy | R | false | false | 421 | r | .onLoad <- function(libname, pkgname) {
library.dynam("DNAcopy", pkgname, libname)
packageStartupMessage("\n**************************************************************************\n The plan to change the data format for CNA object has been postponed \n in order to ensure backward compatibility with older versions of DNAcopy \n**************************************************************************\n")
}
|
if (!file.exists("household_power_consumption.txt")) {
stop("Please place the unzipped data file [household_power_consumption.txt] in the working directory.")
}
# reading the data file into a data frame
datafile <- "household_power_consumption.txt"
DT <- read.table(datafile, header = TRUE, sep = ";",
na.strings = "?") # missing values are coded as '?'
# converting data types for easier handling
DT$Time <- strptime(paste(DT$Date, DT$Time, sep = " "), "%d/%m/%Y %H:%M:%S")
DT$Date <- as.Date(DT$Date, "%d/%m/%Y")
# subsetting data
DT2Days <- subset(DT, DT$Date > as.Date("2007-01-31") & DT$Date < as.Date("2007-02-03"))
# setting global parameters
par(mfrow = c(1,1), cex = 1)
# plotting line graph
plot(DT2Days$Time ,DT2Days$Global_active_power, type = "l",
xlab = NA, ylab = "Global Active Power (kilowatts)")
# copying plot to PNG file
dev.copy(png, file = "plot2.png", width=480, height=480)
dev.off()
| /plot2.R | no_license | safwannaqi/ExData_Plotting1 | R | false | false | 943 | r | if (!file.exists("household_power_consumption.txt")) {
stop("Please place the unzipped data file [household_power_consumption.txt] in the working directory.")
}
# reading the data file into a data frame
datafile <- "household_power_consumption.txt"
DT <- read.table(datafile, header = TRUE, sep = ";",
na.strings = "?") # missing values are coded as '?'
# converting data types for easier handling
DT$Time <- strptime(paste(DT$Date, DT$Time, sep = " "), "%d/%m/%Y %H:%M:%S")
DT$Date <- as.Date(DT$Date, "%d/%m/%Y")
# subsetting data
DT2Days <- subset(DT, DT$Date > as.Date("2007-01-31") & DT$Date < as.Date("2007-02-03"))
# setting global parameters
par(mfrow = c(1,1), cex = 1)
# plotting line graph
plot(DT2Days$Time ,DT2Days$Global_active_power, type = "l",
xlab = NA, ylab = "Global Active Power (kilowatts)")
# copying plot to PNG file
dev.copy(png, file = "plot2.png", width=480, height=480)
dev.off()
|
#' @title get_chapter - Download a single chapter from ctext.org
#' @description Unvectorized function. Used as a building block for get_chapters and get_books.
#' @param book name of book
#' @param chapter name or number of chapter in book
#' @export TRUE
#' @return Returns a dataframe with 4 columns: text, book, chapter name in English, & chapter name in Chinese.
#' In the case for the dao-de-jing, chapter param not needed.
#' If you reach the API call limit or the connection has an error, function will retry 3 times.
#' If still unsuccessful, an NA value will be returned in the "text" column.
#' @examples
#' get_chapter("analects", "xue-er")
#' get_chapter("analects", 1)
#' get_chapter("dao-de-jing")
get_chapter <- function(book, chapter = NA) {
stopifnot(is.character(book) | book %in% ctextclassics::book_list$book)
counter <- 0
ok <- FALSE
if (is.numeric(chapter)) {
indexed_book <-
book_list[ctextclassics::book_list$book == book, c("book", "chapter")]
chapter <- indexed_book[chapter, 2]
}
if (book == "dao-de-jing") {
path <- paste("/gettext?urn=", "ctp:", book, sep = "")
} else {
path <-
paste("/gettext?urn=", "ctp:", book, "/", chapter[[1]], sep = "")
}
while(ok == FALSE & counter < 3) {
raw_data <- ctextclassics::ctext_api(path)
counter <- counter + 1
if(grepl("requires|request", raw_data[[1]][1], ignore.case = TRUE)) {
cat(" API error, trying", 3-counter, "more times\n", sep = " ")
raw_data <- list(text = NA, chapter = chapter)
Sys.sleep(2)
} else {
ok <- TRUE
}
}
raw_data <- do.call(rbind, raw_data)
setNames(
data.frame(
book,
chapter,
matrix(unlist(raw_data), ncol = 2,
byrow = TRUE),
stringsAsFactors = FALSE
),
c("book", "chapter", "word", "chapter_cn")
)
}
#' @title get_chapters - download multiple chapters from ctext.org.
#' @description Vectorized form of get_chapter. Download multiple chapters from a single book or multple books of your
#' specification. Can partially download books, and can mix and match books.
#' @param book name of book or vector of book names
#' @param chapter name or number of chapter, or vector of chapters
#' @export TRUE
#' @return Returns a dataframe with 4 columns: text, book, chapter name in English, & chapter name in Chinese.
#' In the case for the dao-de-jing, only book arg is needed. If you reach the API call limit or
#' the connection has an error, function will retry 3 times. If still unsuccessful, an NA value will be returned in the "text" column.
#' @examples
#' most basic use is to call a book and a chapter
#' get_chapters("analects", "xue-er")
#' get_chapters("analects", 1)
#' Can accept multiple chapters in a book, e.g.
#' get_chapters("analects", c("xue-er", "wei-zheng"))
#' get_chapters("analects", 1:2)
#' can specify multiple books and chapters
#' get_chapters(c("analects", "mengzi"), c("xue-er", "liang-hui-wang-i"))
#' get_chapters(c("analects", "mengzi", 1:2))
get_chapters <- function(book, chapter) {
if (length(book) < length(chapter)) {
book <- rep(book, length(chapter))
}
if (length(book) > length(chapter)) {
chapter <- rep(chapter, length(book))
}
do.call(rbind, purrr::map2(book, chapter, ~ get_chapter(..1, ..2)))
}
#' @title get_books - Get an entire book or several books in book_list
#' @description A wrapper around get_text that downloads an entire book or books.
#' Different from get_chapters in that it directly downloads all chapters of specified
#' books.
#' @param book name or vector of book names
#' @export TRUE
#' @return Returns a dataframe with 4 columns: text, book, chapter in English, & chapter in Chinese.
#' In the case that the connection experiences a problem or you meet your daily API imit,
#' an NA value will be returned in the "text" column.
#' @examples
#' get_books("analects")
#' get_books("dao-de-jing")
#' On average you can download around 3 books before hitting the API limit.
#' my_booklist <- c("analects", "mengzi", "zhuangzi")
#' my_books <- get_books(my_booklist)
get_books <- function(...) {
books <- c(...)
if(!all(books %in% ctextclassics::book_list$book)|length(books) == 0) {
missing_book <- setdiff(books, ctextclassics::book_list$book)
print(missing_book)
stop("Books unable to be indexed. Print 'book_list$book' for details\n")
}
selected_books <- book_list[book_list$book %in% books, ]
my_books <- purrr::map2(selected_books$book, selected_books$chapter, ~get_chapters(..1, ..2))
data.frame(do.call(rbind, my_books), stringsAsFactors = FALSE)
}
| /R/get_text.R | no_license | Jjohn987/ctextclassics | R | false | false | 4,623 | r | #' @title get_chapter - Download a single chapter from ctext.org
#' @description Unvectorized function. Used as a building block for get_chapters and get_books.
#' @param book name of book
#' @param chapter name or number of chapter in book
#' @export TRUE
#' @return Returns a dataframe with 4 columns: text, book, chapter name in English, & chapter name in Chinese.
#' In the case for the dao-de-jing, chapter param not needed.
#' If you reach the API call limit or the connection has an error, function will retry 3 times.
#' If still unsuccessful, an NA value will be returned in the "text" column.
#' @examples
#' get_chapter("analects", "xue-er")
#' get_chapter("analects", 1)
#' get_chapter("dao-de-jing")
get_chapter <- function(book, chapter = NA) {
stopifnot(is.character(book) | book %in% ctextclassics::book_list$book)
counter <- 0
ok <- FALSE
if (is.numeric(chapter)) {
indexed_book <-
book_list[ctextclassics::book_list$book == book, c("book", "chapter")]
chapter <- indexed_book[chapter, 2]
}
if (book == "dao-de-jing") {
path <- paste("/gettext?urn=", "ctp:", book, sep = "")
} else {
path <-
paste("/gettext?urn=", "ctp:", book, "/", chapter[[1]], sep = "")
}
while(ok == FALSE & counter < 3) {
raw_data <- ctextclassics::ctext_api(path)
counter <- counter + 1
if(grepl("requires|request", raw_data[[1]][1], ignore.case = TRUE)) {
cat(" API error, trying", 3-counter, "more times\n", sep = " ")
raw_data <- list(text = NA, chapter = chapter)
Sys.sleep(2)
} else {
ok <- TRUE
}
}
raw_data <- do.call(rbind, raw_data)
setNames(
data.frame(
book,
chapter,
matrix(unlist(raw_data), ncol = 2,
byrow = TRUE),
stringsAsFactors = FALSE
),
c("book", "chapter", "word", "chapter_cn")
)
}
#' @title get_chapters - download multiple chapters from ctext.org.
#' @description Vectorized form of get_chapter. Download multiple chapters from a single book or multple books of your
#' specification. Can partially download books, and can mix and match books.
#' @param book name of book or vector of book names
#' @param chapter name or number of chapter, or vector of chapters
#' @export TRUE
#' @return Returns a dataframe with 4 columns: text, book, chapter name in English, & chapter name in Chinese.
#' In the case for the dao-de-jing, only book arg is needed. If you reach the API call limit or
#' the connection has an error, function will retry 3 times. If still unsuccessful, an NA value will be returned in the "text" column.
#' @examples
#' most basic use is to call a book and a chapter
#' get_chapters("analects", "xue-er")
#' get_chapters("analects", 1)
#' Can accept multiple chapters in a book, e.g.
#' get_chapters("analects", c("xue-er", "wei-zheng"))
#' get_chapters("analects", 1:2)
#' can specify multiple books and chapters
#' get_chapters(c("analects", "mengzi"), c("xue-er", "liang-hui-wang-i"))
#' get_chapters(c("analects", "mengzi", 1:2))
get_chapters <- function(book, chapter) {
if (length(book) < length(chapter)) {
book <- rep(book, length(chapter))
}
if (length(book) > length(chapter)) {
chapter <- rep(chapter, length(book))
}
do.call(rbind, purrr::map2(book, chapter, ~ get_chapter(..1, ..2)))
}
#' @title get_books - Get an entire book or several books in book_list
#' @description A wrapper around get_text that downloads an entire book or books.
#' Different from get_chapters in that it directly downloads all chapters of specified
#' books.
#' @param book name or vector of book names
#' @export TRUE
#' @return Returns a dataframe with 4 columns: text, book, chapter in English, & chapter in Chinese.
#' In the case that the connection experiences a problem or you meet your daily API imit,
#' an NA value will be returned in the "text" column.
#' @examples
#' get_books("analects")
#' get_books("dao-de-jing")
#' On average you can download around 3 books before hitting the API limit.
#' my_booklist <- c("analects", "mengzi", "zhuangzi")
#' my_books <- get_books(my_booklist)
get_books <- function(...) {
books <- c(...)
if(!all(books %in% ctextclassics::book_list$book)|length(books) == 0) {
missing_book <- setdiff(books, ctextclassics::book_list$book)
print(missing_book)
stop("Books unable to be indexed. Print 'book_list$book' for details\n")
}
selected_books <- book_list[book_list$book %in% books, ]
my_books <- purrr::map2(selected_books$book, selected_books$chapter, ~get_chapters(..1, ..2))
data.frame(do.call(rbind, my_books), stringsAsFactors = FALSE)
}
|
#' Gives the subgroup specific optimal dose vector.
#' Returns a list containing the optimal doses to enroll each subgroup at and the subgroups that should have their accrual suspended temporarily.
#' @param Y Vector containing observed event or censoring times.
#' @param I Vector containing event indicators (1 if patient experiences an event for a patient).
#' @param Doses Vector containing numerical doses assigned to patients in the trial.
#' @param Groups Vector containing group assignment of patients, 1 is baseline group.
#' @param Include Binary vector indicating whether each patient record should be included in the decision making process.
#' @param ID Vector of patient IDs. Can be numeric or character valued.
#' @param T1 Reference time for toxicity.
#' @param Target Target cumulative toxicity probability vector at time T1.
#' @param Dose Vector containing the standardized doses considered.
#' @param Upper Cutoff values used to determine if accrual in a subgroup should be suspended.
#' @param cohort Number of patients needed to be assigned at a dose level prior to escalation.
#' @param Conservative Binary Indicator of Whether conservative escalation, i.e. not allowing escalation until cohort patients have been fully evaluated at the highest tried dose level.
#' @param meanmu Prior mean for baseline intercept.
#' @param meanslope Prior mean for baseline slope.
#' @param MeanInts Vector of prior means for the group specific intercept parameters.
#' @param MeanSlopes Vector of prior means for the group specific slope parameters.
#' @param varint Prior variance for the intercept parameters.
#' @param varbeta Prior variance for the slope parameters.
#' @param phetero Prior probability of heterogeneous subgroups.
#' @param Borrow Parameter to specify subgroup borrowing/clustering. 0=No borrowing, 1=Borrowing but no clustering, 2=Borrowing and clustering.
#' @param B Number of Iterations to run for MCMC
#' @return Returns a list with two objects, a vector of optimal doses for each subgroup and matrix of posterior toxicity probabilities at each dose level within each subgroup.
#' @references
#' [1] Chapple and Thall (2017), Subgroup Specific Dose Finding in Phase I Clinical Trials Based on Time to Toxicity Within a Fixed Follow Up Period.
#' @examples
#' T1=28 ##Reference time for toxicity
#' Target=rep(.3,2) ##Target toxicity probability
#' Upper=rep(.95,2) ##Upper cutoffs for excessive toxicity
#' ##How many patients in each subgroup have been assigned at each dose level?
#' cohort=3 ##Cohort size required for escalation
#' Conservative = 1 ##Conservative escalation
#' ##Only can escalate with a fully evaluated cohort at the highest dose level.
#' ##Matrix of umber of patients tried or fully evaluated at each dose level.
#' ##Hyperparameters
#' meanmu=-0.4467184 ##Common Intercept hypermean
#' meanslope= 0.8861634 ##Common slope hypermean
#' MeanInts = -0.5205379 ##Group Intercept hypermeans
#' MeanSlopes = 0.1888923 ##Group slope hyperneabs
#' varint=5 #Prior Variance of the intercept betas
#' varbeta=1 ##Prior Variance of slope betas
#' phetero=.9 ##Prior Probability of hetergeneity
#' Borrow=0 ##Borrowing specification, 0=none, 1=some, 2=clustering.
#' B=5000 ##Number of iterations
#' Borrow=2
#' Y=c(28,26,29,28,29,5,1)
#' RawDose=c(350,420,530,660,825)
#' Dose=(RawDose-mean(RawDose))/sd(RawDose)
#' I <- c(0,0,0,0,0,0,0)
#' Doses <- rep(2,7)
#' Groups <- c(0,1,1,0,0,1,1)
#' Include <- rep(1,7)
#' ID=1:length(Y)
#' Z=GetSubTite(Y, I,Doses, Groups, Include,ID,cohort, Conservative,
#' T1,Target, Upper, Dose, meanmu, meanslope,
#' MeanInts, MeanSlopes ,varint,varbeta,phetero, Borrow,B)
#' Z
#'@export
GetSubTite=function(Y, I,Doses, Groups, Include, ID, cohort,Conservative,T1, Target,
Upper, Dose, meanmu, meanslope,
MeanInts, MeanSlopes ,varint,varbeta,phetero,Borrow,B){
Doses2=Doses
if(sum(Doses %in% Dose)>0){
warning("Doses assigned to patients should be numbered.")
}
###
DATAHOLD = data.frame(cbind(ID,round(Y,3),I,Doses,Groups,Include))
colnames(DATAHOLD)=c("Patient ID","Toxicity/Censoring Time","Toxicity Indicator","Assigned Dose #","Subgroup","Included?")
###Re-write Y so that it is within the appropriate follow up window.
Y[Y>T1]=T1 ##
ERRHOLD=c(length(Target), length(Upper), length(MeanInts)+1, length(MeanSlopes)+1)
HOLD=0
##Check for errors in dimension specification
for(k in 1:length(ERRHOLD)){
for(m in 1:length(ERRHOLD)){
if(ERRHOLD[k] != ERRHOLD[m]){
HOLD=1
}
}
}
if(HOLD==1){
message("Target toxicity vector, toxicity threshold, or subgroup hyperparameter vector has incorrect dimensions")
}else{
###Contains Design parameters
DESIGN = as.list(rep(NA,14))
names(DESIGN) = c("Standardized dose levels:",
"Target toxicity probabilities:",
"Posterior probability thresholds for overly toxic subgroups:",
"Escalation scheme",
"Prior mean for the baseline intercept = ",
"Prior mean for the baseline slope = ",
"Prior means for other subgroup intercepts = ",
"Prior means for other subgroup slopes = ",
"Prior intercept variance = ",
"Prior slope variance = ",
"Borrow/Clustering Settings",
"Borrow Indicator",
"DLT observation time: ",
"Number of MCMC iterations = ")
DESIGN[[5]]=meanmu
DESIGN[[6]]=meanslope
DESIGN[[7]]=MeanInts
DESIGN[[8]]=MeanSlopes
DESIGN[[9]]=varint
DESIGN[[10]]=varbeta
DESIGN[[12]]=Borrow
DESIGN[[13]]=T1
DESIGN[[14]]=B
DESIGN[[1]]=Dose
names(Target)=paste0("Subgroup ",1:length(Target))
DESIGN[[2]]=Target
names(Upper)=paste0("Subgroup ",1:length(Target))
DESIGN[[3]]=Upper
if(Conservative==1){
DESIGN[[4]]=paste0("Conservative escalation requiring ", cohort," patients to be fully evaluated.")
}else{
DESIGN[[4]]=paste0("Aggressive escalation requiring ", cohort," patients to be treated, but not fully evaluated.")
}
if(Borrow==2){
DESIGN[[11]]=paste0("Clustering and borrowing with a prior probability of heterogeneity of: ",phetero)
}
if(Borrow==1){
DESIGN[[11]]=paste0("Borrowing between the subgroups but no clustering")
}
if(Borrow==0){
DESIGN[[11]]=paste0("No borrowing or clustering between subgroups")
}
TIME = paste0("Model run on: ",Sys.time())
DESIGN=c(TIME,DESIGN)
names(DESIGN)[[1]]="Date/Time of escalation decision"
DESIGN = c("Sub-TITE Package Version: 4.0.0",DESIGN)
###Check if the Groups are labeled incorrectly
##Groups should be labeled 0, ..., G-1
##If they are not labeled right, re-write them
if(min(Groups)!=0 | max(Groups) != (length(Target)-1)){
warning("Subgroup vector is not labeled correctly! Please re-format from 0,...,G-1.")
}else{
###Let's write our dose-tried matrix...
###The vector Doses will now have the numeric values
DoseTried = matrix(nrow=length(Upper),ncol=length(Dose))
NumTreated = DoseTried
##Let's fill this in manually....
for(j in 1:length(Dose)){
for(k in 1:length(Upper)){
NumTreated[k,j]=sum(Groups==(k-1) & Doses==j)
}
}
###Holder for number of patients tried
NUMTREAT=NumTreated
NUMTOX = NUMTREAT
for(j in 1:length(Dose)){
for(k in 1:length(Upper)){
NUMTOX[k,j]=sum(Groups==(k-1) & I==1 & Doses==j)
}
}
###re-write lower doses....
for(k in 1:nrow(NumTreated)){
for(j in 1:(ncol(NumTreated)-1)){
if(NumTreated[k,j]==0){
###Have any doses above this been used?
if(sum(NumTreated[k,(j+1):ncol(NumTreated)])>0){
##Some doses above this have been tried already
NumTreated[k,j]=cohort
}
}
}
}
###Output the number of patients treated and dlt at each dose
HOLD = NUMTREAT
for(k in 1:nrow(NUMTREAT)){
for(j in 1:ncol(NUMTREAT)){
HOLD[k,j]=paste0(NUMTOX[k,j],"/",NUMTREAT[k,j])
}
}
colnames(HOLD)=1:ncol(NUMTREAT)
rownames(HOLD)=paste0("Subgroup ",1:nrow(HOLD))
HOLDTHIS = noquote(HOLD)
###Do we have conservative escalation?
if(Conservative==1){
###We have conservative escalation, so let's re-write the NumTreated (DoesTried) Matrix
###So that we cannot escalate unless we have fully evaluated the largest dose level
for(k in 1:nrow(NumTreated)){
###What is the highest TRIED dose level?
which1=max(which(NumTreated[k,]>0))
##Are we at the highest dose level?
if(which1<ncol(NumTreated)){
###We can still escalate
###Let's check to make sure that this dose level has the right number of fully evaluated patients
NUM = sum( (Y[Doses==which1 & Groups==(k-1)]>=T1 & I[Doses==which1 & Groups==(k-1)]==0) | I[Doses==which1 & Groups==(k-1)]==1)
###Now re-write this with NUM
NumTreated[k,which1]=NUM
}
}
}
DoseTried=NumTreated
##Repackage MeanInts and MeanSlopes
MeanInts=c(0,MeanInts)
MeanSlopes=c(0,MeanSlopes)
Stopped=rep(0,nrow(DoseTried))
##This matrix contains posterior mean toxicity probabilities at each dose for each subgroup.
##The last column in the matrix has whether or not each group should be stopped.
RESULTS=MCMC( Y,I, Dose[Doses], Groups, T1, Target, Upper, Dose, meanmu, meanslope,
MeanInts, MeanSlopes, varint, varbeta, phetero, Stopped, length(Y), Borrow,B)
CLUST=RESULTS[[2]]
RESULTS1=RESULTS ##HOLDER FOR STOPPED GROUPS
RESULTS=RESULTS[[1]]
Stopped= RESULTS[,ncol(DoseTried)+1]
##Get optimal Dose
OptDose= Stopped
##Check if ALL groups are stopped, if so run a separate trial in each.
if(Borrow>0){
if(sum(Stopped)==length(Stopped)){
message("Borrowing has caused all groups to stop due to excessive toxicity.
Separate models will be fit to each group to ensure the design is not stopping too early due to borrowing.")
RESULTS=MCMC( Y,I, Dose[Doses], Groups, T1, Target, Upper, Dose, meanmu, meanslope,
MeanInts, MeanSlopes, varint, varbeta, phetero, Stopped=rep(0,nrow(DoseTried)), length(Y), 0,B)
RESULTS1=RESULTS ##HOLDER FOR STOPPED GROUPS
RESULTS=RESULTS[[1]]
}
}
PROBS = data.frame(RESULTS[,1:ncol(DoseTried)]) ##Used for looping over probs
##Can only choose a dose level if we've escalated correctly
PROBS1=PROBS
Y1=DoseTried<cohort ##Flag which dose levels haven't been treated enough.
##Cant escalate past that dose
for(k in 1:length(Stopped)){
j=1
if(sum(1-Y1[k,])<ncol(DoseTried)){
##Checks if all doses meet cohort criteria
while(Y1[k,j]==FALSE){
j=j+1
}
##We can go up one more
j=j+1
##Are we at the highest dose level? If not, we need to check if we can escalate.
if(j<ncol(DoseTried)){
##Reset PROB1 with -1000, so now we can't pick it
PROBS1[k,j:ncol(DoseTried)]=-10000
}
}
}
##Now get optimal doses
for(k in 1:length(Stopped)){
if(Stopped[k]==0){
a1 = abs(PROBS1[k,]-Target[k])
OptDose[k]=which(a1==min(a1)) ##Minimum distance from Target probability
}else{
OptDose[k]=NA ##Na for stopped groups
}
}
Z=as.list(c(0,0,0,0,0,0))
cat("
Decisions for next patient or optimal dose.
")
for(k in 1:length(Stopped)){
if(!is.na(OptDose[k])){
cat(paste0("Next reccomended dose level for subgroup ",k,": Dose ",OptDose[k],"
"))
}else{
cat(paste0("Subgroup ",k," is too toxic, do not enroll these patients at this time.
"))
}
}
Z[[1]]=OptDose
for(k in 1:nrow(PROBS)){
rownames(PROBS)[k]=paste0("Subgroup ", k)
}
##Columns by dose levels
colnames(PROBS)=1:ncol(DoseTried)
Z[[2]]=PROBS
##Posterior probability of stopping
Z[[3]]=RESULTS[,ncol(RESULTS)]
Z[[5]]=HOLDTHIS
names(Z)=c("Optimal Dose","Posterior Mean Toxicity Probability",
"Posterior Probability of Overly Toxic Subgroup",
"Clustering Parameters", "Number of Treated and DLTs",
"Data")
if(Borrow==2){
###Cluster membership...
CLUST=CLUST+1
##Cluster assignment
CLUST1 = as.data.frame(matrix(nrow=(nrow(PROBS)),ncol=nrow(PROBS)))
for(k in 1:nrow(PROBS)){
for(j in 1:nrow(PROBS)){
CLUST1[k,j]=mean(CLUST[,k]==j)
}
}
for(k in 1:nrow(PROBS)){
rownames(CLUST1)[k]=paste0("Subgroup ", k)
colnames(CLUST1)[k]=paste0("Latent Subgroup",k)
}
NCLUST=rep(0, nrow(CLUST))
for(j in 1:nrow(CLUST)){
NCLUST[j] = length(unique(CLUST[j,]))
}
## print(RESULTS1[[2]])
Z2=as.list(c(0,0,0))
Z2[[1]]=CLUST1
Z2[[2]]=table(NCLUST)/nrow(CLUST)
##Finally, let's get the clustering breakdown
##For G groups, there are (G choose 2) + (G choose 3) + .... (G choose G-1) + 2 cluster configurations
G=nrow(DoseTried)
if(G==2){
HOLD = c(0,0)
HOLD[1]=mean(CLUST[,1]==CLUST[,2])
HOLD[2]=mean(CLUST[,1]!=CLUST[,2])
names(HOLD)=c("1-2","1,2")
}else{
if(G<=4){
NClust=2
for(j in 2:(G-1)){
NClust = NClust + choose(G,j)
}
##Make Matrix
HOLD = rep(NA,NClust)
##First do the pairs
if(G==3){
##5 clusters
##1-2,3
##2-3, 1
##1-3, 2
##1,2,3
##1-2-3
HOLD[1]=mean((CLUST[,1]==CLUST[,2])*(CLUST[,3]!=CLUST[,1]))
HOLD[2]=mean((CLUST[,2]==CLUST[,3])*(CLUST[,2]!=CLUST[,1]))
HOLD[3]=mean((CLUST[,1]==CLUST[,3])*(CLUST[,2]!=CLUST[,1]))
HOLD[4]=mean(NCLUST==G)
HOLD[5]=mean(NCLUST==1)
names(HOLD)=c("1-2,3", "2-3,1","1-3,2","1,2,3","1-2-3")
}
if(G==4){
HOLD=rep(NA,15)
names(HOLD)=c("1-2,3-4", "1-3,2-4","1-4,2-3",
"1-2,3,4", "1-3,2,4", "1-4,2,3",
"2-3,1,4","2-4,1,3","3-4,1,3",
"1-2-3,4", "1-2-4,3","1-3-4,2",
"2-3-4,1","1,2,3,4","1-2-3-4")
##15 clusters
##1-2,3-4
##1-3, 2-4
##1-4, 2-3 *
HOLD[1]=mean((CLUST[,1]==CLUST[,2])*(CLUST[,3]==CLUST[,4])*(CLUST[,3] != CLUST[,1]))
HOLD[2]=mean((CLUST[,1]==CLUST[,3])*(CLUST[,2]==CLUST[,4])*(CLUST[,4] != CLUST[,1]))
HOLD[3]=mean((CLUST[,1]==CLUST[,4])*(CLUST[,2]==CLUST[,3])*(CLUST[,3] != CLUST[,1]))
##1-2, 3,4
#1-3, 2,4
#1-4, 2,3
#2-3,1,4
#2-4,1,3
#3-4, 1, 2
HOLD[4]=mean((CLUST[,1]==CLUST[,2])*(CLUST[,3]!=CLUST[,1])*(CLUST[,3] != CLUST[,4])*(CLUST[,4] != CLUST[,1]))
HOLD[5]=mean((CLUST[,1]==CLUST[,3])*(CLUST[,2]!=CLUST[,4])*(CLUST[,4] != CLUST[,1])*(CLUST[,4] != CLUST[,2]))
HOLD[6]=mean((CLUST[,1]==CLUST[,4])*(CLUST[,2]!=CLUST[,3])*(CLUST[,3] != CLUST[,1])*(CLUST[,2] != CLUST[,1]))
HOLD[7]=mean((CLUST[,2]==CLUST[,3])*(CLUST[,1]!=CLUST[,2])*(CLUST[,1] != CLUST[,4])*(CLUST[,4] != CLUST[,2]))
HOLD[8]=mean((CLUST[,2]==CLUST[,4])*(CLUST[,1]!=CLUST[,4])*(CLUST[,1] != CLUST[,2])*(CLUST[,3] != CLUST[,2]))
HOLD[9]=mean((CLUST[,2]==CLUST[,3])*(CLUST[,1]!=CLUST[,2])*(CLUST[,4] != CLUST[,2])*(CLUST[,1] != CLUST[,4]))
##1-2-3,4
##1-2-4,3
##1-3-4,2*
##2-3-4,1
##1,2,3,4
##1-2-3-4
##3 pairs
##4 3 ways
HOLD[10]=mean((CLUST[,1]==CLUST[,2])*(CLUST[,1]==CLUST[,3])*(CLUST[,1] != CLUST[,4]))
HOLD[11]=mean((CLUST[,1]==CLUST[,2])*(CLUST[,1]==CLUST[,4])*(CLUST[,1] != CLUST[,3]))
HOLD[12]=mean((CLUST[,1]==CLUST[,3])*(CLUST[,1]==CLUST[,4])*(CLUST[,1] != CLUST[,2]))
HOLD[13]=mean((CLUST[,2]==CLUST[,3])*(CLUST[,3]==CLUST[,4])*(CLUST[,1] != CLUST[,4]))
HOLD[14]=mean(NCLUST==G)
HOLD[15]=mean(NCLUST==1)
}
}else{
HOLD = "Cannot Return Unique Clusters for more than 4 subgroups"
}
}
Z2[[3]]=HOLD
names(Z2)= c("Latent Subgroup Posterior","Posterior # of Clusters","Posterior Probability Cluster Configuration")
Z[[4]]=Z2
}else{
Z[[4]]="No Clustering"
}
###Write the dataframe into the last item of the list
Z[[length(Z)]]=DATAHOLD
Z=c(1,Z)
Z[[1]]=DESIGN
names(Z)[[1]]="Design Parameters"
###Add last... # completely evaluated at each dose...
Z=c(Z,1)
names(Z)[[length(Z)]]="Number Fully Evaluated At Each Dose"
MAT = matrix(nrow=length(Target),ncol=length(Dose))
rownames(MAT)=paste0("Subgroup ",1:length(Target))
colnames(MAT)=1:length(Dose)
##Fill in....
for(j in 1:nrow(MAT)){
for(k in 1:ncol(MAT)){
MAT[j,k]=sum(I[Groups==(j-1) & Doses==k]) + sum((1-I[Groups==(j-1) & Doses==k]) & Y[Groups==(j-1) & Doses==k]>=T1)
}
}
Z[[length(Z)]]=MAT
return(Z)
}
}
}
| /fuzzedpackages/SubTite/R/GetSubTite.R | no_license | akhikolla/testpackages | R | false | false | 18,729 | r | #' Gives the subgroup specific optimal dose vector.
#' Returns a list containing the optimal doses to enroll each subgroup at and the subgroups that should have their accrual suspended temporarily.
#' @param Y Vector containing observed event or censoring times.
#' @param I Vector containing event indicators (1 if patient experiences an event for a patient).
#' @param Doses Vector containing numerical doses assigned to patients in the trial.
#' @param Groups Vector containing group assignment of patients, 1 is baseline group.
#' @param Include Binary vector indicating whether each patient record should be included in the decision making process.
#' @param ID Vector of patient IDs. Can be numeric or character valued.
#' @param T1 Reference time for toxicity.
#' @param Target Target cumulative toxicity probability vector at time T1.
#' @param Dose Vector containing the standardized doses considered.
#' @param Upper Cutoff values used to determine if accrual in a subgroup should be suspended.
#' @param cohort Number of patients needed to be assigned at a dose level prior to escalation.
#' @param Conservative Binary Indicator of Whether conservative escalation, i.e. not allowing escalation until cohort patients have been fully evaluated at the highest tried dose level.
#' @param meanmu Prior mean for baseline intercept.
#' @param meanslope Prior mean for baseline slope.
#' @param MeanInts Vector of prior means for the group specific intercept parameters.
#' @param MeanSlopes Vector of prior means for the group specific slope parameters.
#' @param varint Prior variance for the intercept parameters.
#' @param varbeta Prior variance for the slope parameters.
#' @param phetero Prior probability of heterogeneous subgroups.
#' @param Borrow Parameter to specify subgroup borrowing/clustering. 0=No borrowing, 1=Borrowing but no clustering, 2=Borrowing and clustering.
#' @param B Number of Iterations to run for MCMC
#' @return Returns a list with two objects, a vector of optimal doses for each subgroup and matrix of posterior toxicity probabilities at each dose level within each subgroup.
#' @references
#' [1] Chapple and Thall (2017), Subgroup Specific Dose Finding in Phase I Clinical Trials Based on Time to Toxicity Within a Fixed Follow Up Period.
#' @examples
#' T1=28 ##Reference time for toxicity
#' Target=rep(.3,2) ##Target toxicity probability
#' Upper=rep(.95,2) ##Upper cutoffs for excessive toxicity
#' ##How many patients in each subgroup have been assigned at each dose level?
#' cohort=3 ##Cohort size required for escalation
#' Conservative = 1 ##Conservative escalation
#' ##Only can escalate with a fully evaluated cohort at the highest dose level.
#' ##Matrix of umber of patients tried or fully evaluated at each dose level.
#' ##Hyperparameters
#' meanmu=-0.4467184 ##Common Intercept hypermean
#' meanslope= 0.8861634 ##Common slope hypermean
#' MeanInts = -0.5205379 ##Group Intercept hypermeans
#' MeanSlopes = 0.1888923 ##Group slope hyperneabs
#' varint=5 #Prior Variance of the intercept betas
#' varbeta=1 ##Prior Variance of slope betas
#' phetero=.9 ##Prior Probability of hetergeneity
#' Borrow=0 ##Borrowing specification, 0=none, 1=some, 2=clustering.
#' B=5000 ##Number of iterations
#' Borrow=2
#' Y=c(28,26,29,28,29,5,1)
#' RawDose=c(350,420,530,660,825)
#' Dose=(RawDose-mean(RawDose))/sd(RawDose)
#' I <- c(0,0,0,0,0,0,0)
#' Doses <- rep(2,7)
#' Groups <- c(0,1,1,0,0,1,1)
#' Include <- rep(1,7)
#' ID=1:length(Y)
#' Z=GetSubTite(Y, I,Doses, Groups, Include,ID,cohort, Conservative,
#' T1,Target, Upper, Dose, meanmu, meanslope,
#' MeanInts, MeanSlopes ,varint,varbeta,phetero, Borrow,B)
#' Z
#'@export
GetSubTite=function(Y, I,Doses, Groups, Include, ID, cohort,Conservative,T1, Target,
Upper, Dose, meanmu, meanslope,
MeanInts, MeanSlopes ,varint,varbeta,phetero,Borrow,B){
Doses2=Doses
if(sum(Doses %in% Dose)>0){
warning("Doses assigned to patients should be numbered.")
}
###
DATAHOLD = data.frame(cbind(ID,round(Y,3),I,Doses,Groups,Include))
colnames(DATAHOLD)=c("Patient ID","Toxicity/Censoring Time","Toxicity Indicator","Assigned Dose #","Subgroup","Included?")
###Re-write Y so that it is within the appropriate follow up window.
Y[Y>T1]=T1 ##
ERRHOLD=c(length(Target), length(Upper), length(MeanInts)+1, length(MeanSlopes)+1)
HOLD=0
##Check for errors in dimension specification
for(k in 1:length(ERRHOLD)){
for(m in 1:length(ERRHOLD)){
if(ERRHOLD[k] != ERRHOLD[m]){
HOLD=1
}
}
}
if(HOLD==1){
message("Target toxicity vector, toxicity threshold, or subgroup hyperparameter vector has incorrect dimensions")
}else{
###Contains Design parameters
DESIGN = as.list(rep(NA,14))
names(DESIGN) = c("Standardized dose levels:",
"Target toxicity probabilities:",
"Posterior probability thresholds for overly toxic subgroups:",
"Escalation scheme",
"Prior mean for the baseline intercept = ",
"Prior mean for the baseline slope = ",
"Prior means for other subgroup intercepts = ",
"Prior means for other subgroup slopes = ",
"Prior intercept variance = ",
"Prior slope variance = ",
"Borrow/Clustering Settings",
"Borrow Indicator",
"DLT observation time: ",
"Number of MCMC iterations = ")
DESIGN[[5]]=meanmu
DESIGN[[6]]=meanslope
DESIGN[[7]]=MeanInts
DESIGN[[8]]=MeanSlopes
DESIGN[[9]]=varint
DESIGN[[10]]=varbeta
DESIGN[[12]]=Borrow
DESIGN[[13]]=T1
DESIGN[[14]]=B
DESIGN[[1]]=Dose
names(Target)=paste0("Subgroup ",1:length(Target))
DESIGN[[2]]=Target
names(Upper)=paste0("Subgroup ",1:length(Target))
DESIGN[[3]]=Upper
if(Conservative==1){
DESIGN[[4]]=paste0("Conservative escalation requiring ", cohort," patients to be fully evaluated.")
}else{
DESIGN[[4]]=paste0("Aggressive escalation requiring ", cohort," patients to be treated, but not fully evaluated.")
}
if(Borrow==2){
DESIGN[[11]]=paste0("Clustering and borrowing with a prior probability of heterogeneity of: ",phetero)
}
if(Borrow==1){
DESIGN[[11]]=paste0("Borrowing between the subgroups but no clustering")
}
if(Borrow==0){
DESIGN[[11]]=paste0("No borrowing or clustering between subgroups")
}
TIME = paste0("Model run on: ",Sys.time())
DESIGN=c(TIME,DESIGN)
names(DESIGN)[[1]]="Date/Time of escalation decision"
DESIGN = c("Sub-TITE Package Version: 4.0.0",DESIGN)
###Check if the Groups are labeled incorrectly
##Groups should be labeled 0, ..., G-1
##If they are not labeled right, re-write them
if(min(Groups)!=0 | max(Groups) != (length(Target)-1)){
warning("Subgroup vector is not labeled correctly! Please re-format from 0,...,G-1.")
}else{
###Let's write our dose-tried matrix...
###The vector Doses will now have the numeric values
DoseTried = matrix(nrow=length(Upper),ncol=length(Dose))
NumTreated = DoseTried
##Let's fill this in manually....
for(j in 1:length(Dose)){
for(k in 1:length(Upper)){
NumTreated[k,j]=sum(Groups==(k-1) & Doses==j)
}
}
###Holder for number of patients tried
NUMTREAT=NumTreated
NUMTOX = NUMTREAT
for(j in 1:length(Dose)){
for(k in 1:length(Upper)){
NUMTOX[k,j]=sum(Groups==(k-1) & I==1 & Doses==j)
}
}
###re-write lower doses....
for(k in 1:nrow(NumTreated)){
for(j in 1:(ncol(NumTreated)-1)){
if(NumTreated[k,j]==0){
###Have any doses above this been used?
if(sum(NumTreated[k,(j+1):ncol(NumTreated)])>0){
##Some doses above this have been tried already
NumTreated[k,j]=cohort
}
}
}
}
###Output the number of patients treated and dlt at each dose
HOLD = NUMTREAT
for(k in 1:nrow(NUMTREAT)){
for(j in 1:ncol(NUMTREAT)){
HOLD[k,j]=paste0(NUMTOX[k,j],"/",NUMTREAT[k,j])
}
}
colnames(HOLD)=1:ncol(NUMTREAT)
rownames(HOLD)=paste0("Subgroup ",1:nrow(HOLD))
HOLDTHIS = noquote(HOLD)
###Do we have conservative escalation?
if(Conservative==1){
###We have conservative escalation, so let's re-write the NumTreated (DoesTried) Matrix
###So that we cannot escalate unless we have fully evaluated the largest dose level
for(k in 1:nrow(NumTreated)){
###What is the highest TRIED dose level?
which1=max(which(NumTreated[k,]>0))
##Are we at the highest dose level?
if(which1<ncol(NumTreated)){
###We can still escalate
###Let's check to make sure that this dose level has the right number of fully evaluated patients
NUM = sum( (Y[Doses==which1 & Groups==(k-1)]>=T1 & I[Doses==which1 & Groups==(k-1)]==0) | I[Doses==which1 & Groups==(k-1)]==1)
###Now re-write this with NUM
NumTreated[k,which1]=NUM
}
}
}
DoseTried=NumTreated
##Repackage MeanInts and MeanSlopes
MeanInts=c(0,MeanInts)
MeanSlopes=c(0,MeanSlopes)
Stopped=rep(0,nrow(DoseTried))
##This matrix contains posterior mean toxicity probabilities at each dose for each subgroup.
##The last column in the matrix has whether or not each group should be stopped.
RESULTS=MCMC( Y,I, Dose[Doses], Groups, T1, Target, Upper, Dose, meanmu, meanslope,
MeanInts, MeanSlopes, varint, varbeta, phetero, Stopped, length(Y), Borrow,B)
CLUST=RESULTS[[2]]
RESULTS1=RESULTS ##HOLDER FOR STOPPED GROUPS
RESULTS=RESULTS[[1]]
Stopped= RESULTS[,ncol(DoseTried)+1]
##Get optimal Dose
OptDose= Stopped
##Check if ALL groups are stopped, if so run a separate trial in each.
if(Borrow>0){
if(sum(Stopped)==length(Stopped)){
message("Borrowing has caused all groups to stop due to excessive toxicity.
Separate models will be fit to each group to ensure the design is not stopping too early due to borrowing.")
RESULTS=MCMC( Y,I, Dose[Doses], Groups, T1, Target, Upper, Dose, meanmu, meanslope,
MeanInts, MeanSlopes, varint, varbeta, phetero, Stopped=rep(0,nrow(DoseTried)), length(Y), 0,B)
RESULTS1=RESULTS ##HOLDER FOR STOPPED GROUPS
RESULTS=RESULTS[[1]]
}
}
PROBS = data.frame(RESULTS[,1:ncol(DoseTried)]) ##Used for looping over probs
##Can only choose a dose level if we've escalated correctly
PROBS1=PROBS
Y1=DoseTried<cohort ##Flag which dose levels haven't been treated enough.
##Cant escalate past that dose
for(k in 1:length(Stopped)){
j=1
if(sum(1-Y1[k,])<ncol(DoseTried)){
##Checks if all doses meet cohort criteria
while(Y1[k,j]==FALSE){
j=j+1
}
##We can go up one more
j=j+1
##Are we at the highest dose level? If not, we need to check if we can escalate.
if(j<ncol(DoseTried)){
##Reset PROB1 with -1000, so now we can't pick it
PROBS1[k,j:ncol(DoseTried)]=-10000
}
}
}
##Now get optimal doses
for(k in 1:length(Stopped)){
if(Stopped[k]==0){
a1 = abs(PROBS1[k,]-Target[k])
OptDose[k]=which(a1==min(a1)) ##Minimum distance from Target probability
}else{
OptDose[k]=NA ##Na for stopped groups
}
}
Z=as.list(c(0,0,0,0,0,0))
cat("
Decisions for next patient or optimal dose.
")
for(k in 1:length(Stopped)){
if(!is.na(OptDose[k])){
cat(paste0("Next reccomended dose level for subgroup ",k,": Dose ",OptDose[k],"
"))
}else{
cat(paste0("Subgroup ",k," is too toxic, do not enroll these patients at this time.
"))
}
}
Z[[1]]=OptDose
for(k in 1:nrow(PROBS)){
rownames(PROBS)[k]=paste0("Subgroup ", k)
}
##Columns by dose levels
colnames(PROBS)=1:ncol(DoseTried)
Z[[2]]=PROBS
##Posterior probability of stopping
Z[[3]]=RESULTS[,ncol(RESULTS)]
Z[[5]]=HOLDTHIS
names(Z)=c("Optimal Dose","Posterior Mean Toxicity Probability",
"Posterior Probability of Overly Toxic Subgroup",
"Clustering Parameters", "Number of Treated and DLTs",
"Data")
if(Borrow==2){
###Cluster membership...
CLUST=CLUST+1
##Cluster assignment
CLUST1 = as.data.frame(matrix(nrow=(nrow(PROBS)),ncol=nrow(PROBS)))
for(k in 1:nrow(PROBS)){
for(j in 1:nrow(PROBS)){
CLUST1[k,j]=mean(CLUST[,k]==j)
}
}
for(k in 1:nrow(PROBS)){
rownames(CLUST1)[k]=paste0("Subgroup ", k)
colnames(CLUST1)[k]=paste0("Latent Subgroup",k)
}
NCLUST=rep(0, nrow(CLUST))
for(j in 1:nrow(CLUST)){
NCLUST[j] = length(unique(CLUST[j,]))
}
## print(RESULTS1[[2]])
Z2=as.list(c(0,0,0))
Z2[[1]]=CLUST1
Z2[[2]]=table(NCLUST)/nrow(CLUST)
##Finally, let's get the clustering breakdown
##For G groups, there are (G choose 2) + (G choose 3) + .... (G choose G-1) + 2 cluster configurations
G=nrow(DoseTried)
if(G==2){
HOLD = c(0,0)
HOLD[1]=mean(CLUST[,1]==CLUST[,2])
HOLD[2]=mean(CLUST[,1]!=CLUST[,2])
names(HOLD)=c("1-2","1,2")
}else{
if(G<=4){
NClust=2
for(j in 2:(G-1)){
NClust = NClust + choose(G,j)
}
##Make Matrix
HOLD = rep(NA,NClust)
##First do the pairs
if(G==3){
##5 clusters
##1-2,3
##2-3, 1
##1-3, 2
##1,2,3
##1-2-3
HOLD[1]=mean((CLUST[,1]==CLUST[,2])*(CLUST[,3]!=CLUST[,1]))
HOLD[2]=mean((CLUST[,2]==CLUST[,3])*(CLUST[,2]!=CLUST[,1]))
HOLD[3]=mean((CLUST[,1]==CLUST[,3])*(CLUST[,2]!=CLUST[,1]))
HOLD[4]=mean(NCLUST==G)
HOLD[5]=mean(NCLUST==1)
names(HOLD)=c("1-2,3", "2-3,1","1-3,2","1,2,3","1-2-3")
}
if(G==4){
HOLD=rep(NA,15)
names(HOLD)=c("1-2,3-4", "1-3,2-4","1-4,2-3",
"1-2,3,4", "1-3,2,4", "1-4,2,3",
"2-3,1,4","2-4,1,3","3-4,1,3",
"1-2-3,4", "1-2-4,3","1-3-4,2",
"2-3-4,1","1,2,3,4","1-2-3-4")
##15 clusters
##1-2,3-4
##1-3, 2-4
##1-4, 2-3 *
HOLD[1]=mean((CLUST[,1]==CLUST[,2])*(CLUST[,3]==CLUST[,4])*(CLUST[,3] != CLUST[,1]))
HOLD[2]=mean((CLUST[,1]==CLUST[,3])*(CLUST[,2]==CLUST[,4])*(CLUST[,4] != CLUST[,1]))
HOLD[3]=mean((CLUST[,1]==CLUST[,4])*(CLUST[,2]==CLUST[,3])*(CLUST[,3] != CLUST[,1]))
##1-2, 3,4
#1-3, 2,4
#1-4, 2,3
#2-3,1,4
#2-4,1,3
#3-4, 1, 2
HOLD[4]=mean((CLUST[,1]==CLUST[,2])*(CLUST[,3]!=CLUST[,1])*(CLUST[,3] != CLUST[,4])*(CLUST[,4] != CLUST[,1]))
HOLD[5]=mean((CLUST[,1]==CLUST[,3])*(CLUST[,2]!=CLUST[,4])*(CLUST[,4] != CLUST[,1])*(CLUST[,4] != CLUST[,2]))
HOLD[6]=mean((CLUST[,1]==CLUST[,4])*(CLUST[,2]!=CLUST[,3])*(CLUST[,3] != CLUST[,1])*(CLUST[,2] != CLUST[,1]))
HOLD[7]=mean((CLUST[,2]==CLUST[,3])*(CLUST[,1]!=CLUST[,2])*(CLUST[,1] != CLUST[,4])*(CLUST[,4] != CLUST[,2]))
HOLD[8]=mean((CLUST[,2]==CLUST[,4])*(CLUST[,1]!=CLUST[,4])*(CLUST[,1] != CLUST[,2])*(CLUST[,3] != CLUST[,2]))
HOLD[9]=mean((CLUST[,2]==CLUST[,3])*(CLUST[,1]!=CLUST[,2])*(CLUST[,4] != CLUST[,2])*(CLUST[,1] != CLUST[,4]))
##1-2-3,4
##1-2-4,3
##1-3-4,2*
##2-3-4,1
##1,2,3,4
##1-2-3-4
##3 pairs
##4 3 ways
HOLD[10]=mean((CLUST[,1]==CLUST[,2])*(CLUST[,1]==CLUST[,3])*(CLUST[,1] != CLUST[,4]))
HOLD[11]=mean((CLUST[,1]==CLUST[,2])*(CLUST[,1]==CLUST[,4])*(CLUST[,1] != CLUST[,3]))
HOLD[12]=mean((CLUST[,1]==CLUST[,3])*(CLUST[,1]==CLUST[,4])*(CLUST[,1] != CLUST[,2]))
HOLD[13]=mean((CLUST[,2]==CLUST[,3])*(CLUST[,3]==CLUST[,4])*(CLUST[,1] != CLUST[,4]))
HOLD[14]=mean(NCLUST==G)
HOLD[15]=mean(NCLUST==1)
}
}else{
HOLD = "Cannot Return Unique Clusters for more than 4 subgroups"
}
}
Z2[[3]]=HOLD
names(Z2)= c("Latent Subgroup Posterior","Posterior # of Clusters","Posterior Probability Cluster Configuration")
Z[[4]]=Z2
}else{
Z[[4]]="No Clustering"
}
###Write the dataframe into the last item of the list
Z[[length(Z)]]=DATAHOLD
Z=c(1,Z)
Z[[1]]=DESIGN
names(Z)[[1]]="Design Parameters"
###Add last... # completely evaluated at each dose...
Z=c(Z,1)
names(Z)[[length(Z)]]="Number Fully Evaluated At Each Dose"
MAT = matrix(nrow=length(Target),ncol=length(Dose))
rownames(MAT)=paste0("Subgroup ",1:length(Target))
colnames(MAT)=1:length(Dose)
##Fill in....
for(j in 1:nrow(MAT)){
for(k in 1:ncol(MAT)){
MAT[j,k]=sum(I[Groups==(j-1) & Doses==k]) + sum((1-I[Groups==(j-1) & Doses==k]) & Y[Groups==(j-1) & Doses==k]>=T1)
}
}
Z[[length(Z)]]=MAT
return(Z)
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cor_bakers_gamma.R
\name{cor_bakers_gamma}
\alias{cor_bakers_gamma}
\alias{cor_bakers_gamma.default}
\alias{cor_bakers_gamma.dendlist}
\alias{cor_bakers_gamma.dendrogram}
\alias{cor_bakers_gamma.hclust}
\title{Baker's Gamma correlation coefficient}
\usage{
cor_bakers_gamma(dend1, ...)
\method{cor_bakers_gamma}{default}(dend1, dend2, ...)
\method{cor_bakers_gamma}{dendrogram}(dend1, dend2,
use_labels_not_values = TRUE, to_plot = FALSE,
warn = dendextend_options("warn"), ...)
\method{cor_bakers_gamma}{hclust}(dend1, dend2, use_labels_not_values = TRUE,
to_plot = FALSE, warn = dendextend_options("warn"), ...)
\method{cor_bakers_gamma}{dendlist}(dend1, which = c(1L, 2L), ...)
}
\arguments{
\item{dend1}{a tree (dendrogram/hclust/phylo)}
\item{...}{Passed to \link[dendextend]{cutree}.}
\item{dend2}{a tree (dendrogram/hclust/phylo)}
\item{use_labels_not_values}{logical (TRUE). Should labels be used in the
k matrix when using cutree? Set to FALSE will make the function a bit faster
BUT, it assumes the two trees have the exact same leaves order values for
each labels. This can be assured by using \link{match_order_by_labels}.}
\item{to_plot}{logical (FALSE). Passed to \link{bakers_gamma_for_2_k_matrix}}
\item{warn}{logical (default from dendextend_options("warn") is FALSE).
Set if warning are to be issued, it is safer to keep this at TRUE,
but for keeping the noise down, the default is FALSE.
should a warning be issued when using \link[dendextend]{cutree}?}
\item{which}{an integer vector of length 2, indicating
which of the trees in the dendlist object should be plotted (relevant for dendlist)}
}
\value{
Baker's Gamma association Index between two trees (a number between -1 to 1)
}
\description{
Calculate Baker's Gamma correlation coefficient for two trees
(also known as Goodman-Kruskal-gamma index).
Assumes the labels in the two trees fully match. If they do not
please first use \link{intersect_trees} to have them matched.
WARNING: this can be quite slow for medium/large trees.
}
\details{
Baker's Gamma (see reference) is a measure of accosiation (similarity)
between two trees of heirarchical clustering (dendrograms).
It is calculated by taking two items, and see what is the heighst
possible level of k (number of cluster groups created when cutting the tree)
for which the two item still belongs to the same tree. That k is returned,
and the same is done for these two items for the second tree.
There are n over 2 combinations of such pairs of items from the items in
the tree, and all of these numbers are calculated for each of the two trees.
Then, these two sets of numbers (a set for the items in each tree)
are paired according to the pairs of items compared, and a spearman
correlation is calculated.
The value can range between -1 to 1. With near 0 values meaning that
the two trees are not statistically similar.
For exact p-value one should result to a permutation test. One such option
will be to permute over the labels of one tree many times, and calculating
the distriubtion under the null hypothesis (keeping the trees topologies
constant).
Notice that this measure is not affected by the height of a branch but only
of its relative position compared with other branches.
}
\examples{
\dontrun{
set.seed(23235)
ss <- sample(1:150, 10 )
hc1 <- hclust(dist(iris[ss,-5]), "com")
hc2 <- hclust(dist(iris[ss,-5]), "single")
dend1 <- as.dendrogram(hc1)
dend2 <- as.dendrogram(hc2)
# cutree(dend1)
cor_bakers_gamma(hc1, hc2)
cor_bakers_gamma(dend1, dend2)
dend1 <- match_order_by_labels(dend1, dend2) # if you are not sure
cor_bakers_gamma(dend1, dend2, use_labels_not_values = FALSE)
library(microbenchmark)
microbenchmark(
with_labels = cor_bakers_gamma(dend1, dend2, try_cutree_hclust=FALSE) ,
with_values = cor_bakers_gamma(dend1, dend2,
use_labels_not_values = FALSE, try_cutree_hclust=FALSE) ,
times=10
)
cor_bakers_gamma(dend1, dend1, use_labels_not_values = FALSE)
cor_bakers_gamma(dend1, dend1, use_labels_not_values = TRUE)
}
}
\references{
Baker, F. B., Stability of Two Hierarchical Grouping Techniques Case
1: Sensitivity to Data Errors. Journal of the American Statistical
Association, 69(346), 440 (1974).
}
\seealso{
\link{cor_cophenetic}
}
| /man/cor_bakers_gamma.Rd | no_license | JohnMCMa/dendextend | R | false | true | 4,362 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cor_bakers_gamma.R
\name{cor_bakers_gamma}
\alias{cor_bakers_gamma}
\alias{cor_bakers_gamma.default}
\alias{cor_bakers_gamma.dendlist}
\alias{cor_bakers_gamma.dendrogram}
\alias{cor_bakers_gamma.hclust}
\title{Baker's Gamma correlation coefficient}
\usage{
cor_bakers_gamma(dend1, ...)
\method{cor_bakers_gamma}{default}(dend1, dend2, ...)
\method{cor_bakers_gamma}{dendrogram}(dend1, dend2,
use_labels_not_values = TRUE, to_plot = FALSE,
warn = dendextend_options("warn"), ...)
\method{cor_bakers_gamma}{hclust}(dend1, dend2, use_labels_not_values = TRUE,
to_plot = FALSE, warn = dendextend_options("warn"), ...)
\method{cor_bakers_gamma}{dendlist}(dend1, which = c(1L, 2L), ...)
}
\arguments{
\item{dend1}{a tree (dendrogram/hclust/phylo)}
\item{...}{Passed to \link[dendextend]{cutree}.}
\item{dend2}{a tree (dendrogram/hclust/phylo)}
\item{use_labels_not_values}{logical (TRUE). Should labels be used in the
k matrix when using cutree? Set to FALSE will make the function a bit faster
BUT, it assumes the two trees have the exact same leaves order values for
each labels. This can be assured by using \link{match_order_by_labels}.}
\item{to_plot}{logical (FALSE). Passed to \link{bakers_gamma_for_2_k_matrix}}
\item{warn}{logical (default from dendextend_options("warn") is FALSE).
Set if warning are to be issued, it is safer to keep this at TRUE,
but for keeping the noise down, the default is FALSE.
should a warning be issued when using \link[dendextend]{cutree}?}
\item{which}{an integer vector of length 2, indicating
which of the trees in the dendlist object should be plotted (relevant for dendlist)}
}
\value{
Baker's Gamma association Index between two trees (a number between -1 to 1)
}
\description{
Calculate Baker's Gamma correlation coefficient for two trees
(also known as Goodman-Kruskal-gamma index).
Assumes the labels in the two trees fully match. If they do not
please first use \link{intersect_trees} to have them matched.
WARNING: this can be quite slow for medium/large trees.
}
\details{
Baker's Gamma (see reference) is a measure of accosiation (similarity)
between two trees of heirarchical clustering (dendrograms).
It is calculated by taking two items, and see what is the heighst
possible level of k (number of cluster groups created when cutting the tree)
for which the two item still belongs to the same tree. That k is returned,
and the same is done for these two items for the second tree.
There are n over 2 combinations of such pairs of items from the items in
the tree, and all of these numbers are calculated for each of the two trees.
Then, these two sets of numbers (a set for the items in each tree)
are paired according to the pairs of items compared, and a spearman
correlation is calculated.
The value can range between -1 to 1. With near 0 values meaning that
the two trees are not statistically similar.
For exact p-value one should result to a permutation test. One such option
will be to permute over the labels of one tree many times, and calculating
the distriubtion under the null hypothesis (keeping the trees topologies
constant).
Notice that this measure is not affected by the height of a branch but only
of its relative position compared with other branches.
}
\examples{
\dontrun{
set.seed(23235)
ss <- sample(1:150, 10 )
hc1 <- hclust(dist(iris[ss,-5]), "com")
hc2 <- hclust(dist(iris[ss,-5]), "single")
dend1 <- as.dendrogram(hc1)
dend2 <- as.dendrogram(hc2)
# cutree(dend1)
cor_bakers_gamma(hc1, hc2)
cor_bakers_gamma(dend1, dend2)
dend1 <- match_order_by_labels(dend1, dend2) # if you are not sure
cor_bakers_gamma(dend1, dend2, use_labels_not_values = FALSE)
library(microbenchmark)
microbenchmark(
with_labels = cor_bakers_gamma(dend1, dend2, try_cutree_hclust=FALSE) ,
with_values = cor_bakers_gamma(dend1, dend2,
use_labels_not_values = FALSE, try_cutree_hclust=FALSE) ,
times=10
)
cor_bakers_gamma(dend1, dend1, use_labels_not_values = FALSE)
cor_bakers_gamma(dend1, dend1, use_labels_not_values = TRUE)
}
}
\references{
Baker, F. B., Stability of Two Hierarchical Grouping Techniques Case
1: Sensitivity to Data Errors. Journal of the American Statistical
Association, 69(346), 440 (1974).
}
\seealso{
\link{cor_cophenetic}
}
|
############################################################
# DATA PROCESSING
############################################################
############################################################
# 1. Turning email strings into a word frequency matrix
############################################################
# Used a tutorial for tm package
# Can be found at https://eight2late.wordpress.com/2015/05/27/a-gentle-introduction-to-text-mining-using-r/
## Libraries
library(tm)
library(stringr)
library(SnowballC)
library(wordcloud)
## Load data
## setwd to root directory
emails <- read.table("data/HRC_train.tsv", sep="\t", header=FALSE, stringsAsFactors = FALSE)
test <- read.table("data/HRC_test.tsv", sep = "\t", header=FALSE, stringsAsFactors = FALSE)
# create vectors of words from initial string
for (i in 1:nrow(emails)) {
emails$wordvec[i] <- strsplit(emails$V2[i], " ")
}
for (i in 1:nrow(test)) {
test$wordvec[i] <- strsplit(test$V1[i], " ")
}
wordvec <- c(emails$wordvec, test$wordvec)
# Create "corpus" that tm_map takes as input
# Need to create one feature matrix for both train and test set
emailsC <- Corpus(VectorSource(wordvec))
# Straightforward processing
processed <- tm_map(emailsC, content_transformer(tolower))
processed <- tm_map(processed, removePunctuation)
processed <- tm_map(processed, removeNumbers)
dtm_raw <- DocumentTermMatrix(processed)
processed <- tm_map(processed, removeWords, stopwords("english"))
dtm_stop <- DocumentTermMatrix(processed)
processed <- tm_map(processed, stemDocument, language = "english")
dtm_stem <- DocumentTermMatrix(processed)
freq <- colSums(as.matrix(dtm_stem))
ord <- order(freq,decreasing=TRUE)
freq[head(ord, n = 20)]
# Most frequent terms "state", "depart", "case", "date", "doc", "subject", "sent", "will", not meaningful.
# Keep only words that are in within a frequency bound
dtmr <- DocumentTermMatrix(processed, control=list(bounds = list(global=c(3, 1200))))
freqr <- colSums(as.matrix(dtmr))
ordr <- order(freqr,decreasing=TRUE)
freqr[head(ordr)]
freqr[tail(ordr)]
############################################################
# 2. Exploring other possible features from training set
############################################################
# Exploration
library(ggplot2)
png("images/SenderHistogram.png")
qplot(V1, data=emails, geom="histogram", col=I("white"), binwidth=1,
main = "Histogram for Sender",
xlab = "Sender")
dev.off()
png("images/SenderNchar.png")
plot(nchar(emails$V2), emails$V1, ylab="Sender", xlab ="Characters per Email",
main="Characters per Email by Sender")
dev.off()
# Compute some other statistics about each email, see if they might be useful
total_chars <- c()
mean_chars <- c()
num_words <- c()
ampersands <- c()
qmarks <- c()
semicolons <- c()
qmarks_per_word <- c()
semicolons_per_word <- c()
uppercase_per_word <- c()
periods <- c()
commas <- c()
hyphens <- c()
parentheses <- c()
numerals <- c()
for (i in 1:nrow(emails)) {
vec <- as.vector((emails$wordvec[i])[[1]])
total_chars[i] <- sum(nchar(vec))
mean_chars[i] <- mean(nchar(vec))
num_words[i] <- length(vec)
ampersands[i] <-str_count(emails$V2[i], pattern = "&" )
qmarks[i] <-str_count(emails$V2[i], pattern = "/?" )
qmarks_per_word[i] <- qmarks[i]/num_words[i]
semicolons[i] <-str_count(emails$V2[i], pattern = ";" )
semicolons_per_word[i] <- semicolons[i]/num_words[i]
periods[i] <-str_count(emails$V2[i], pattern = ".")
commas[i] <- str_count(emails$V2[i], pattern = ",")
hyphens[i] <- str_count(emails$V2[i], pattern = "-")
parentheses[i] <- str_count(emails$V2[i], pattern = "\\(") + str_count(emails$V2[i], pattern = "\\)")
numerals[i] <- str_count(emails$V2[i], pattern = "[0-9]")
}
# Significant
summary(aov(total_chars ~ emails$V1))
# Significant
summary(aov(mean_chars ~ emails$V1))
# Significant - related to total_chars
summary(aov(num_words ~ emails$V1))
# Not significant
summary(aov(ampersands ~ emails$V1))
# Significant
summary(aov(qmarks ~ emails$V1))
# Significant
summary(aov(qmarks_per_word ~ emails$V1))
# Not significant
summary(aov(semicolons_per_word ~ emails$V1))
# Not significant
summary(aov(uppercase_per_word ~ emails$V1))
# Not significant
words_per_sentence <- num_words/periods
summary(aov(words_per_sentence ~ emails$V1))
# Not significant
words_per_comma <- commas/num_words
summary(aov(words_per_comma ~ emails$V1))
# Not Significant
parentheses_per_word <- parentheses/num_words
summary(aov(parentheses_per_word ~ emails$V1))
# Not Significant
numerals_per_word <- numerals/num_words
summary(aov(numerals_per_word ~ emails$V1))
# Wasn't able to find anything too distinct in writing styles
# But will add number of words in email as a feature
############################################################
# 3. Creating full dataframe
############################################################
# Create df for analysis
m <- as.matrix(dtmr)
m_train <- m[1:1200,]
df <- data.frame(emails$V1, num_words, m_train)
# Write base set to csv
write.csv(df, file="wrongdata/data/processed_train_df.csv")
# After adding new features
df2 <- data.frame(emails$V1, num_words, qmarks_per_word,
hyphens_per_word, m_train)
write.csv(df2, file="wrongdata/data/processed_train_df_2.csv")
# Process Test Data for use in final submission
m_test <- m[1201:1305,]
test_num_words <- c()
test_qmarks_per_word <- c()
test_hyphens_per_word <- c()
for (i in 1:nrow(test)) {
vec <- as.vector((test$wordvec[i])[[1]])
test_num_words[i] <- length(vec)
test_hyphens_per_word[i] <- str_count(test$wordvec[i], pattern = "-")/test_num_words[i]
test_qmarks_per_word[i] <- str_count(test$wordvec[i], pattern = "/?")/test_num_words[i]
}
df_test <- data.frame(test_num_words, m_test)
write.csv(df_test, file="wrongdata/data/processed_test_df.csv")
# After adding some additional features: our best model
df_test_2 <- data.frame(test_num_words, test_qmarks_per_word,
test_hyphens_per_word, m_test)
write.csv(df_test_2, file="wrongdata/data/processed_test_df_2.csv")
############################################################
# 4. FEATURE REDUCTION: UNIVARIATE ANOVA
############################################################
# Try computing one-way significance to produce reduced feature set
word_aov <- c()
words <- as.vector(colnames(m))
for (i in 1:ncol(m)) {
col <- m_train[,i]
summ <- summary(aov(col ~ emails$V1))
p <- summ[[1]][["Pr(>F)"]][1]
word_aov <- c(word_aov, p)
}
aov_df <- data.frame(words, word_aov)
# Take only words with p-values less than 5%
sig_df <- aov_df[aov_df$word_aov < .05, ]
sig_cols <- sig_df$words
sig_dtmr <- m[, sig_cols]
df3 <- data.frame(emails$V1, num_words, numerals_per_word,
hyphens_per_word, sig_dtmr[1:3505,])
write.csv(df3, file="wrongdata/data/processed_train_df_3.csv")
df_test_3 <- data.frame(test_num_words, test_qmarks_per_word,
test_hyphens_per_word, sig_dtmr[3506:3894,])
write.csv(df_test_3, file="wrongdata/data/processed_test_df_3.csv")
###########################################################
# 5. BORUTA FEATURE SELECTION
# With ~9000 features, we try to run a feature selection method
############################################################
library(caret)
library(Boruta)
library(dplyr)
train <- read.csv("data/processed_train_df_2.csv")
set.seed(1234)
idx <- createDataPartition(train$emails.V1,p=0.01,list=FALSE)
# Take small sample of the data
sample.df <- train[idx,]
explanatory.attributes <- setdiff(names(sample.df),c("X","emails.V1"))
data.classes <- sapply(explanatory.attributes,function(x){class(sample.df[,x])})
unique.classes <- unique(data.classes)
attr.by.data.types <- lapply(unique.classes,function(x){names(data.classes[data.classes==x])})
names(attr.by.data.types) <- unique.classes
comment(attr.by.data.types) <- "list that categorize training data types"
pp <- preProcess(sample.df[c(attr.by.data.types$numeric,attr.by.data.types$integer)],
method=c("medianImpute"))
pp.sample.df <- predict(pp,sample.df[c(attr.by.data.types$numeric,attr.by.data.types$integer)])
df <- cbind(pp.sample.df,sample.df[attr.by.data.types$character])
# Change the colnames that begin with "shadow" as it throws an error
cn <- colnames(df)
for (i in 1:length(cn)) {
if (str_detect(cn[i], "shadow")) {
print(cn[i])
cn[i] <- paste("X", cn[i])
}
}
colnames(df) <- cn
# Run Boruta
bor.results <- Boruta(df,factor(sample.df$emails.V1),
maxRuns=18, pValue = 0.4,
doTrace=0)
# Unfortunately, no features are confirmed as important.
# Only about 10 are considered possibly important, the rest are confirmed unimportant
# We raised the p-value in the hopes of getting more features, but got similar results
decision <- bor.results$finalDecision
decision <- as.matrix(decision)
decision <- data.frame(rownames(decision), decision)
tentative <- decision[decision$decision == "Tentative",]
tentativeWords <- tentative$rownames.decision
tentativeWords <- as.vector(tentativeWords)
new_df <- train[, tentativeWords]
# From these 10, we compute quadratic and cubic powers
powerFeats <- train[ , 1]
for (i in 1:ncol(new_df)) {
for (j in 1:ncol(new_df)) {
newvar = paste(colnames(new_df)[i], colnames(new_df)[j], sep = "*")
x = data.frame(new_df[ , i]*new_df[ , j])
colnames(x) = newvar
powerFeats <- data.frame(powerFeats, x)
}
}
for (i in 1:ncol(new_df)) {
for (j in 1:ncol(new_df)) {
for (k in 1:ncol(new_df)) {
newvar = paste(colnames(new_df)[i], colnames(new_df)[j],colnames(new_df)[k] , sep = "*")
x = data.frame(new_df[ , i]*new_df[ , j]*new_df[ , k])
colnames(x) = newvar
powerFeats <- data.frame(powerFeats, x)
}
}
}
y <- powerFeats[ , -1]
new_df <- data.frame(train[, 1], new_df, powerFeats[ , -1])
# Contains about 1100 features
write.csv(file="wrongdata/data/processed_train_df_4.csv", new_df)
| /wrongdata/Processing-wrong.R | no_license | renswny/STAT154-Group04 | R | false | false | 9,982 | r | ############################################################
# DATA PROCESSING
############################################################
############################################################
# 1. Turning email strings into a word frequency matrix
############################################################
# Used a tutorial for tm package
# Can be found at https://eight2late.wordpress.com/2015/05/27/a-gentle-introduction-to-text-mining-using-r/
## Libraries
library(tm)
library(stringr)
library(SnowballC)
library(wordcloud)
## Load data
## setwd to root directory
emails <- read.table("data/HRC_train.tsv", sep="\t", header=FALSE, stringsAsFactors = FALSE)
test <- read.table("data/HRC_test.tsv", sep = "\t", header=FALSE, stringsAsFactors = FALSE)
# create vectors of words from initial string
for (i in 1:nrow(emails)) {
emails$wordvec[i] <- strsplit(emails$V2[i], " ")
}
for (i in 1:nrow(test)) {
test$wordvec[i] <- strsplit(test$V1[i], " ")
}
wordvec <- c(emails$wordvec, test$wordvec)
# Create "corpus" that tm_map takes as input
# Need to create one feature matrix for both train and test set
emailsC <- Corpus(VectorSource(wordvec))
# Straightforward processing
processed <- tm_map(emailsC, content_transformer(tolower))
processed <- tm_map(processed, removePunctuation)
processed <- tm_map(processed, removeNumbers)
dtm_raw <- DocumentTermMatrix(processed)
processed <- tm_map(processed, removeWords, stopwords("english"))
dtm_stop <- DocumentTermMatrix(processed)
processed <- tm_map(processed, stemDocument, language = "english")
dtm_stem <- DocumentTermMatrix(processed)
freq <- colSums(as.matrix(dtm_stem))
ord <- order(freq,decreasing=TRUE)
freq[head(ord, n = 20)]
# Most frequent terms "state", "depart", "case", "date", "doc", "subject", "sent", "will", not meaningful.
# Keep only words that are in within a frequency bound
dtmr <- DocumentTermMatrix(processed, control=list(bounds = list(global=c(3, 1200))))
freqr <- colSums(as.matrix(dtmr))
ordr <- order(freqr,decreasing=TRUE)
freqr[head(ordr)]
freqr[tail(ordr)]
############################################################
# 2. Exploring other possible features from training set
############################################################
# Exploration
library(ggplot2)
png("images/SenderHistogram.png")
qplot(V1, data=emails, geom="histogram", col=I("white"), binwidth=1,
main = "Histogram for Sender",
xlab = "Sender")
dev.off()
png("images/SenderNchar.png")
plot(nchar(emails$V2), emails$V1, ylab="Sender", xlab ="Characters per Email",
main="Characters per Email by Sender")
dev.off()
# Compute some other statistics about each email, see if they might be useful
total_chars <- c()
mean_chars <- c()
num_words <- c()
ampersands <- c()
qmarks <- c()
semicolons <- c()
qmarks_per_word <- c()
semicolons_per_word <- c()
uppercase_per_word <- c()
periods <- c()
commas <- c()
hyphens <- c()
parentheses <- c()
numerals <- c()
for (i in 1:nrow(emails)) {
vec <- as.vector((emails$wordvec[i])[[1]])
total_chars[i] <- sum(nchar(vec))
mean_chars[i] <- mean(nchar(vec))
num_words[i] <- length(vec)
ampersands[i] <-str_count(emails$V2[i], pattern = "&" )
qmarks[i] <-str_count(emails$V2[i], pattern = "/?" )
qmarks_per_word[i] <- qmarks[i]/num_words[i]
semicolons[i] <-str_count(emails$V2[i], pattern = ";" )
semicolons_per_word[i] <- semicolons[i]/num_words[i]
periods[i] <-str_count(emails$V2[i], pattern = ".")
commas[i] <- str_count(emails$V2[i], pattern = ",")
hyphens[i] <- str_count(emails$V2[i], pattern = "-")
parentheses[i] <- str_count(emails$V2[i], pattern = "\\(") + str_count(emails$V2[i], pattern = "\\)")
numerals[i] <- str_count(emails$V2[i], pattern = "[0-9]")
}
# Significant
summary(aov(total_chars ~ emails$V1))
# Significant
summary(aov(mean_chars ~ emails$V1))
# Significant - related to total_chars
summary(aov(num_words ~ emails$V1))
# Not significant
summary(aov(ampersands ~ emails$V1))
# Significant
summary(aov(qmarks ~ emails$V1))
# Significant
summary(aov(qmarks_per_word ~ emails$V1))
# Not significant
summary(aov(semicolons_per_word ~ emails$V1))
# Not significant
summary(aov(uppercase_per_word ~ emails$V1))
# Not significant
words_per_sentence <- num_words/periods
summary(aov(words_per_sentence ~ emails$V1))
# Not significant
words_per_comma <- commas/num_words
summary(aov(words_per_comma ~ emails$V1))
# Not Significant
parentheses_per_word <- parentheses/num_words
summary(aov(parentheses_per_word ~ emails$V1))
# Not Significant
numerals_per_word <- numerals/num_words
summary(aov(numerals_per_word ~ emails$V1))
# Wasn't able to find anything too distinct in writing styles
# But will add number of words in email as a feature
############################################################
# 3. Creating full dataframe
############################################################
# Create df for analysis
m <- as.matrix(dtmr)
m_train <- m[1:1200,]
df <- data.frame(emails$V1, num_words, m_train)
# Write base set to csv
write.csv(df, file="wrongdata/data/processed_train_df.csv")
# After adding new features
df2 <- data.frame(emails$V1, num_words, qmarks_per_word,
hyphens_per_word, m_train)
write.csv(df2, file="wrongdata/data/processed_train_df_2.csv")
# Process Test Data for use in final submission
m_test <- m[1201:1305,]
test_num_words <- c()
test_qmarks_per_word <- c()
test_hyphens_per_word <- c()
for (i in 1:nrow(test)) {
vec <- as.vector((test$wordvec[i])[[1]])
test_num_words[i] <- length(vec)
test_hyphens_per_word[i] <- str_count(test$wordvec[i], pattern = "-")/test_num_words[i]
test_qmarks_per_word[i] <- str_count(test$wordvec[i], pattern = "/?")/test_num_words[i]
}
df_test <- data.frame(test_num_words, m_test)
write.csv(df_test, file="wrongdata/data/processed_test_df.csv")
# After adding some additional features: our best model
df_test_2 <- data.frame(test_num_words, test_qmarks_per_word,
test_hyphens_per_word, m_test)
write.csv(df_test_2, file="wrongdata/data/processed_test_df_2.csv")
############################################################
# 4. FEATURE REDUCTION: UNIVARIATE ANOVA
############################################################
# Try computing one-way significance to produce reduced feature set
word_aov <- c()
words <- as.vector(colnames(m))
for (i in 1:ncol(m)) {
col <- m_train[,i]
summ <- summary(aov(col ~ emails$V1))
p <- summ[[1]][["Pr(>F)"]][1]
word_aov <- c(word_aov, p)
}
aov_df <- data.frame(words, word_aov)
# Take only words with p-values less than 5%
sig_df <- aov_df[aov_df$word_aov < .05, ]
sig_cols <- sig_df$words
sig_dtmr <- m[, sig_cols]
df3 <- data.frame(emails$V1, num_words, numerals_per_word,
hyphens_per_word, sig_dtmr[1:3505,])
write.csv(df3, file="wrongdata/data/processed_train_df_3.csv")
df_test_3 <- data.frame(test_num_words, test_qmarks_per_word,
test_hyphens_per_word, sig_dtmr[3506:3894,])
write.csv(df_test_3, file="wrongdata/data/processed_test_df_3.csv")
###########################################################
# 5. BORUTA FEATURE SELECTION
# With ~9000 features, we try to run a feature selection method
############################################################
library(caret)
library(Boruta)
library(dplyr)
train <- read.csv("data/processed_train_df_2.csv")
set.seed(1234)
idx <- createDataPartition(train$emails.V1,p=0.01,list=FALSE)
# Take small sample of the data
sample.df <- train[idx,]
explanatory.attributes <- setdiff(names(sample.df),c("X","emails.V1"))
data.classes <- sapply(explanatory.attributes,function(x){class(sample.df[,x])})
unique.classes <- unique(data.classes)
attr.by.data.types <- lapply(unique.classes,function(x){names(data.classes[data.classes==x])})
names(attr.by.data.types) <- unique.classes
comment(attr.by.data.types) <- "list that categorize training data types"
pp <- preProcess(sample.df[c(attr.by.data.types$numeric,attr.by.data.types$integer)],
method=c("medianImpute"))
pp.sample.df <- predict(pp,sample.df[c(attr.by.data.types$numeric,attr.by.data.types$integer)])
df <- cbind(pp.sample.df,sample.df[attr.by.data.types$character])
# Change the colnames that begin with "shadow" as it throws an error
cn <- colnames(df)
for (i in 1:length(cn)) {
if (str_detect(cn[i], "shadow")) {
print(cn[i])
cn[i] <- paste("X", cn[i])
}
}
colnames(df) <- cn
# Run Boruta
bor.results <- Boruta(df,factor(sample.df$emails.V1),
maxRuns=18, pValue = 0.4,
doTrace=0)
# Unfortunately, no features are confirmed as important.
# Only about 10 are considered possibly important, the rest are confirmed unimportant
# We raised the p-value in the hopes of getting more features, but got similar results
decision <- bor.results$finalDecision
decision <- as.matrix(decision)
decision <- data.frame(rownames(decision), decision)
tentative <- decision[decision$decision == "Tentative",]
tentativeWords <- tentative$rownames.decision
tentativeWords <- as.vector(tentativeWords)
new_df <- train[, tentativeWords]
# From these 10, we compute quadratic and cubic powers
powerFeats <- train[ , 1]
for (i in 1:ncol(new_df)) {
for (j in 1:ncol(new_df)) {
newvar = paste(colnames(new_df)[i], colnames(new_df)[j], sep = "*")
x = data.frame(new_df[ , i]*new_df[ , j])
colnames(x) = newvar
powerFeats <- data.frame(powerFeats, x)
}
}
for (i in 1:ncol(new_df)) {
for (j in 1:ncol(new_df)) {
for (k in 1:ncol(new_df)) {
newvar = paste(colnames(new_df)[i], colnames(new_df)[j],colnames(new_df)[k] , sep = "*")
x = data.frame(new_df[ , i]*new_df[ , j]*new_df[ , k])
colnames(x) = newvar
powerFeats <- data.frame(powerFeats, x)
}
}
}
y <- powerFeats[ , -1]
new_df <- data.frame(train[, 1], new_df, powerFeats[ , -1])
# Contains about 1100 features
write.csv(file="wrongdata/data/processed_train_df_4.csv", new_df)
|
\name{avgts}
\alias{avgts}
\title{
TimeSeries averages over cycle phases.
}
\description{
This function returns the averages of the input time series over each of
phases in the Dating. It omits the NA's in the time series, so will give an error with internal NA's.
}
\usage{
avgts(ts,Dating)
}
\arguments{
\item{ts}{
The input time series.
}
\item{Dating}{
The dating.
}
}
\value{
A ts timeseries.
}
\author{
Majid Einian,\email{m.einian@mbri.ac.ir}, \cr\href{http://www.mbri.ac.ir}{Monetary and Banking Research Institute}, \href{http://cbi.ir/default_en.aspx}{Central Bank of Islamic Republic of Iran}
}
\examples{
data("Iran.non.Oil.GDP.Quarterly.Growth")
data("MBRI.Iran.Dating")
avggrowth <- avgts(Iran.non.Oil.GDP.Quarterly.Growth,MBRI.Iran.Dating)
cbind(avggrowth,Iran.non.Oil.GDP.Quarterly.Growth)
plot(MBRI.Iran.Dating,avggrowth)
plot(MBRI.Iran.Dating,Iran.non.Oil.GDP.Quarterly.Growth,averages=TRUE)
}
\keyword{Averages over Cycle Phases} | /man/avgts.Rd | no_license | freephys/BCDating | R | false | false | 1,020 | rd | \name{avgts}
\alias{avgts}
\title{
TimeSeries averages over cycle phases.
}
\description{
This function returns the averages of the input time series over each of
phases in the Dating. It omits the NA's in the time series, so will give an error with internal NA's.
}
\usage{
avgts(ts,Dating)
}
\arguments{
\item{ts}{
The input time series.
}
\item{Dating}{
The dating.
}
}
\value{
A ts timeseries.
}
\author{
Majid Einian,\email{m.einian@mbri.ac.ir}, \cr\href{http://www.mbri.ac.ir}{Monetary and Banking Research Institute}, \href{http://cbi.ir/default_en.aspx}{Central Bank of Islamic Republic of Iran}
}
\examples{
data("Iran.non.Oil.GDP.Quarterly.Growth")
data("MBRI.Iran.Dating")
avggrowth <- avgts(Iran.non.Oil.GDP.Quarterly.Growth,MBRI.Iran.Dating)
cbind(avggrowth,Iran.non.Oil.GDP.Quarterly.Growth)
plot(MBRI.Iran.Dating,avggrowth)
plot(MBRI.Iran.Dating,Iran.non.Oil.GDP.Quarterly.Growth,averages=TRUE)
}
\keyword{Averages over Cycle Phases} |
\name{summary.bounds}
\alias{summary.bounds}
\alias{print.summary.bounds}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Summary for Group Sequential Boundaries}
\description{
'summary' method for class '"bounds"'.
}
\usage{
\method{summary}{bounds}(object, ...)
\method{print}{summary.bounds}(x, digit = 5, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{an object of class '"bounds"', a result of a call to
\code{bounds}.}
\item{x}{an object of class '"summary.bounds"', a result of a
call to \code{summary.bounds}.}
\item{digit}{the number of significant digits to use when printing.}
\item{...}{further arguments passed to or from other methods.}
}
\value{
The function 'summary.bounds' returns a list of summary
values of the group sequential boundary calculations given in
'object'.
}
\references{Reboussin, D. M., DeMets, D. L., Kim, K. M., and Lan,
K. K. G. (2000) Computations for group sequential boundaries using the
Lan-DeMets spending function method. \emph{Controlled Clinical Trials},
21:190-207.
Fortran program 'ld98' by the same authors as above.
DeMets, D. L. and Lan, K. K. G. (1995) \emph{Recent Advances in Clinical
Trial Design and Analysis}, Thall, P. F. (ed.). Boston: Kluwer
Academic Publishers.
Lan, K. K. G. and DeMets, D. L. (1983) Discrete sequential boundaries
for clinical trials. \emph{Biometrika}, 70:659-63.
}
\author{Oscar A. Perez and T. Charles Casper \email{charlie.casper@hsc.utah.edu}}
\seealso{\code{\link{bounds}} for computation of boundaries using alpha
spending function method. \code{\link{drift}} for exit probabilities given boundaries or drift
(effect) or confidence interval given power.
}
\examples{
## See function 'bounds'
}
\keyword{misc}
\keyword{methods}
| /man/summary.bounds.Rd | no_license | okjoh/ldbounds | R | false | false | 1,854 | rd | \name{summary.bounds}
\alias{summary.bounds}
\alias{print.summary.bounds}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Summary for Group Sequential Boundaries}
\description{
'summary' method for class '"bounds"'.
}
\usage{
\method{summary}{bounds}(object, ...)
\method{print}{summary.bounds}(x, digit = 5, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{an object of class '"bounds"', a result of a call to
\code{bounds}.}
\item{x}{an object of class '"summary.bounds"', a result of a
call to \code{summary.bounds}.}
\item{digit}{the number of significant digits to use when printing.}
\item{...}{further arguments passed to or from other methods.}
}
\value{
The function 'summary.bounds' returns a list of summary
values of the group sequential boundary calculations given in
'object'.
}
\references{Reboussin, D. M., DeMets, D. L., Kim, K. M., and Lan,
K. K. G. (2000) Computations for group sequential boundaries using the
Lan-DeMets spending function method. \emph{Controlled Clinical Trials},
21:190-207.
Fortran program 'ld98' by the same authors as above.
DeMets, D. L. and Lan, K. K. G. (1995) \emph{Recent Advances in Clinical
Trial Design and Analysis}, Thall, P. F. (ed.). Boston: Kluwer
Academic Publishers.
Lan, K. K. G. and DeMets, D. L. (1983) Discrete sequential boundaries
for clinical trials. \emph{Biometrika}, 70:659-63.
}
\author{Oscar A. Perez and T. Charles Casper \email{charlie.casper@hsc.utah.edu}}
\seealso{\code{\link{bounds}} for computation of boundaries using alpha
spending function method. \code{\link{drift}} for exit probabilities given boundaries or drift
(effect) or confidence interval given power.
}
\examples{
## See function 'bounds'
}
\keyword{misc}
\keyword{methods}
|
#' Make a league table - not limited by seasons or tiers
#'
#' @param df The results dataframe
#' @param Season The Season
#' @param tier The tier
#' @param pts Points for a win. Default is 3.
#' @param begin Earliest date of results to make table from (format Y-m-d)
#' @param end Latest date of results to make table from (format Y-m-d)
#' @param type Whether to show all results together or only home or away results.
#' @section Notes:
#' The table that is produced is based upon 3 points for a win (unless otherwise
#' defined), 1 for a draw and 0 for a loss. The table is sorted based upon descending
#' GD and then descending GF as tie-breakers. Use other 'maketable' functions for
#' more precise tables for each league.
#' @return a dataframe with a league table
#' @importFrom magrittr '%>%'
#' @examples
#' maketable_all(df=england[england$tier==1,],begin='1992-08-15',
#' end='2017-07-01') #EPL historical table
#' maketable_all(df=england[england$tier==1,],begin='1992-08-15',
#' end='2017-07-01', type='away') #EPL historical table away results
#' @export
maketable_all <-
function(df = NULL, Season = NULL, tier = NULL,
pts = 3, begin = NULL, end = NULL,
type = c("both", "home", "away"))
{
## season/tier
if (!is.null(Season) & is.null(tier)) {
dfx <- df[(df$Season == Season), ]
} else if (is.null(Season) & !is.null(tier)) {
dfx <- df[(df$tier == tier), ]
} else if (!is.null(Season) & !is.null(tier)) {
dfx <- df[(df$Season == Season & df$tier == tier), ]
} else {
dfx <- df
}
# dates
if (!is.null(begin) & is.null(end)) {
dfx <- dfx[(dfx$Date >= begin & dfx$Date <= end), ]
} else if (is.null(begin) & !is.null(end)) {
dfx <- dfx[(dfx$Date <= end), ]
} else if (!is.null(begin) & !is.null(end)) {
dfx <- dfx[(dfx$Date >= begin), ]
}
# subset only home or away fixtures, if applicable
if (match.arg(type) == "home") {
temp <- select(dfx, team = home, opp = visitor, GF = hgoal, GA = vgoal)
} else if (match.arg(type) == "away") {
temp <- select(dfx, team = visitor, opp = home, GF = vgoal, GA = hgoal)
} else if (match.arg(type) == "both") {
temp <- bind_rows(select(dfx, team = home, opp = visitor, GF = hgoal, GA = vgoal),
select(dfx, team = visitor, opp = home, GF = vgoal, GA = hgoal))
}
temp <- temp %>%
mutate(GD = GF - GA) %>%
group_by(team) %>%
summarise(GP = sum(GD <= 100), W = sum(GD > 0), D = sum(GD == 0),
L = sum(GD < 0), gf = sum(GF), ga = sum(GA), gd = sum(GD)) %>%
mutate(Pts = (W * pts) + D) %>%
arrange(-Pts, -gd, -gf) %>%
mutate(Pos = rownames(.)) %>%
as.data.frame()
return(temp)
}
| /R/maketable_all.R | no_license | nturaga/engsoccerdata | R | false | false | 2,861 | r | #' Make a league table - not limited by seasons or tiers
#'
#' @param df The results dataframe
#' @param Season The Season
#' @param tier The tier
#' @param pts Points for a win. Default is 3.
#' @param begin Earliest date of results to make table from (format Y-m-d)
#' @param end Latest date of results to make table from (format Y-m-d)
#' @param type Whether to show all results together or only home or away results.
#' @section Notes:
#' The table that is produced is based upon 3 points for a win (unless otherwise
#' defined), 1 for a draw and 0 for a loss. The table is sorted based upon descending
#' GD and then descending GF as tie-breakers. Use other 'maketable' functions for
#' more precise tables for each league.
#' @return a dataframe with a league table
#' @importFrom magrittr '%>%'
#' @examples
#' maketable_all(df=england[england$tier==1,],begin='1992-08-15',
#' end='2017-07-01') #EPL historical table
#' maketable_all(df=england[england$tier==1,],begin='1992-08-15',
#' end='2017-07-01', type='away') #EPL historical table away results
#' @export
maketable_all <-
function(df = NULL, Season = NULL, tier = NULL,
pts = 3, begin = NULL, end = NULL,
type = c("both", "home", "away"))
{
## season/tier
if (!is.null(Season) & is.null(tier)) {
dfx <- df[(df$Season == Season), ]
} else if (is.null(Season) & !is.null(tier)) {
dfx <- df[(df$tier == tier), ]
} else if (!is.null(Season) & !is.null(tier)) {
dfx <- df[(df$Season == Season & df$tier == tier), ]
} else {
dfx <- df
}
# dates
if (!is.null(begin) & is.null(end)) {
dfx <- dfx[(dfx$Date >= begin & dfx$Date <= end), ]
} else if (is.null(begin) & !is.null(end)) {
dfx <- dfx[(dfx$Date <= end), ]
} else if (!is.null(begin) & !is.null(end)) {
dfx <- dfx[(dfx$Date >= begin), ]
}
# subset only home or away fixtures, if applicable
if (match.arg(type) == "home") {
temp <- select(dfx, team = home, opp = visitor, GF = hgoal, GA = vgoal)
} else if (match.arg(type) == "away") {
temp <- select(dfx, team = visitor, opp = home, GF = vgoal, GA = hgoal)
} else if (match.arg(type) == "both") {
temp <- bind_rows(select(dfx, team = home, opp = visitor, GF = hgoal, GA = vgoal),
select(dfx, team = visitor, opp = home, GF = vgoal, GA = hgoal))
}
temp <- temp %>%
mutate(GD = GF - GA) %>%
group_by(team) %>%
summarise(GP = sum(GD <= 100), W = sum(GD > 0), D = sum(GD == 0),
L = sum(GD < 0), gf = sum(GF), ga = sum(GA), gd = sum(GD)) %>%
mutate(Pts = (W * pts) + D) %>%
arrange(-Pts, -gd, -gf) %>%
mutate(Pos = rownames(.)) %>%
as.data.frame()
return(temp)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{DEH}
\alias{DEH}
\title{DEH}
\format{A data frame with 479 rows and 3 variables:
\describe{
\item{chromosome}{chromosome of DEH loci}
\item{loci}{location of DEH loci}
\item{strand}{strand of DEH loci}
}}
\usage{
data(DEH)
DEH
}
\value{
A data frame
}
\description{
DEH loci
}
\keyword{datasets}
| /man/DEH.Rd | no_license | xiaowenchenjax/epihet | R | false | true | 408 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{DEH}
\alias{DEH}
\title{DEH}
\format{A data frame with 479 rows and 3 variables:
\describe{
\item{chromosome}{chromosome of DEH loci}
\item{loci}{location of DEH loci}
\item{strand}{strand of DEH loci}
}}
\usage{
data(DEH)
DEH
}
\value{
A data frame
}
\description{
DEH loci
}
\keyword{datasets}
|
#
# if (!dir.exists('final')){
if (!file.exists("./datasets/pml-testing.csv")) {
url = 'https://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv'
download.file(url,'./datasets/pml-testing.csv', mode = 'wb')
# unzip("Coursera-SwiftKey.zip", exdir = getwd())
print("The testing dataset was downloaded successfully")
} else {
print("The testing dataset was previously downloaded")
}
#
if (!file.exists("./datasets/pml-training.csv")) {
url = 'https://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv'
download.file(url,'./datasets/pml-training.csv', mode = 'wb')
# unzip("Coursera-SwiftKey.zip", exdir = getwd())
print("The training dataset was downloaded successfully")
} else {
print("The training dataset was previously downloaded")
} | /getData.R | no_license | and88x/Human-Activity-Recognition | R | false | false | 797 | r | #
# if (!dir.exists('final')){
if (!file.exists("./datasets/pml-testing.csv")) {
url = 'https://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv'
download.file(url,'./datasets/pml-testing.csv', mode = 'wb')
# unzip("Coursera-SwiftKey.zip", exdir = getwd())
print("The testing dataset was downloaded successfully")
} else {
print("The testing dataset was previously downloaded")
}
#
if (!file.exists("./datasets/pml-training.csv")) {
url = 'https://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv'
download.file(url,'./datasets/pml-training.csv', mode = 'wb')
# unzip("Coursera-SwiftKey.zip", exdir = getwd())
print("The training dataset was downloaded successfully")
} else {
print("The training dataset was previously downloaded")
} |
## Code to generate plot1 from scratch
## THis code assumes that the data is contained in a folder in the github
## repository name data/
header <- read.table("./data/household_power_consumption.txt", sep = ';', header = FALSE, nrows = 1, stringsAsFactors = FALSE )
pwr <- read.table("./data/household_power_consumption.txt", sep = ';', nrows = (69517 - 66638), skip = 66637, header = FALSE)
colnames(pwr) <- unlist(header)
png(file = "plot1.png")
hist(pwr$Global_active_power, xlab = "Global Active Power (kilowatts)", col = "red", main = "Global Active Power")
dev.off()
| /plot1.R | no_license | tayebzaidi/ExData_Plotting1 | R | false | false | 580 | r | ## Code to generate plot1 from scratch
## THis code assumes that the data is contained in a folder in the github
## repository name data/
header <- read.table("./data/household_power_consumption.txt", sep = ';', header = FALSE, nrows = 1, stringsAsFactors = FALSE )
pwr <- read.table("./data/household_power_consumption.txt", sep = ';', nrows = (69517 - 66638), skip = 66637, header = FALSE)
colnames(pwr) <- unlist(header)
png(file = "plot1.png")
hist(pwr$Global_active_power, xlab = "Global Active Power (kilowatts)", col = "red", main = "Global Active Power")
dev.off()
|
##
## Date 2016-08-18
## Author: Tiziano Vicentini
##
## This script is part of the final assignemnt of the Getting and Cleaning Data
## from Coursera Data Science Specialization.
## It processes the data sets from sorce
## http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
## and downloaded at link
## https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
## Output is a tidy data set tidy.txt with the average of
## each variable for each activity and each subject.
##
library(dplyr)
## STEP 1
# Merges the training and the test sets to create one data set.
# Loads datasets from disk
print("Loading dataset from disk...")
trainx <- read.table("./data/train/X_train.txt", stringsAsFactors = FALSE)
trainy <- read.table("./data/train/y_train.txt", stringsAsFactors = FALSE)
testx <- read.table("./data/test/X_test.txt", stringsAsFactors = FALSE)
testy <- read.table("./data/test/y_test.txt", stringsAsFactors = FALSE)
trainsubj <- read.table("./data/train/subject_train.txt", stringsAsFactors = FALSE)
testsubj <- read.table("./data/test/subject_test.txt", stringsAsFactors = FALSE)
# Joins datasets
mytrain <- cbind(trainsubj, trainy, trainx)
mytest <- cbind(testsubj, testy, testx)
final_step1 <- rbind(mytrain, mytest) # final result STEP 1
# cleaning
rm(list = c("trainx", "trainy", "testx", "testy", "trainsubj", "testsubj", "mytrain", "mytest"))
print("STEP 1: Merges the training and the test sets to create one data set. Done!")
## STEP 2
# Extracts only the measurements on the mean and standard deviation for each measurement.
# Loads features labels
lbdata <- read.table("./data/features.txt")
# Adds ActivityId and SubjectId to labels
lbdata <- rbind(data.frame(V1 = 0, V2 = "ActivityId"), lbdata)
lbdata <- rbind(data.frame(V1 = -1, V2 = "SubjectId"), lbdata)
# Assign labels to final_step1 dataset
names(final_step1) <- t(lbdata[,2]) # transpose the column
# Extracts mean and std only
final_step2 <- final_step1[,grep("mean|std",names(final_step1))] # final result STEP 2
# cleaning
rm(list = c("lbdata"))
print("STEP 2: Extracts only mean/std measurement. Done!")
## STEP 3
# Uses descriptive activity names to name the activities in the data set
# read label data for activities
lbactivity <- read.table("./data/activity_labels.txt", stringsAsFactors = FALSE)
# substitute "_" with space
lbactivity[,2] <- sub("_"," ", lbactivity[,2])
# all lowercase
lbactivity[,2] <- tolower(lbactivity[,2])
# Set column names for the temp data
names(lbactivity) <- c("ActivityId", "Activity")
# Add Activity and Subject
final_step2 <- cbind("ActivityId" = final_step1$ActivityId, "SubjectId" = final_step1$SubjectId, final_step2)
# Join data
final_step3 <- merge(lbactivity, final_step2, by = "ActivityId", all = TRUE, sort = FALSE)
# Remove unused colum
final_step3 <- final_step3[,c(3,2,4:82)] # final result STEP 3
# cleaning
rm(list = c("final_step2", "lbactivity"))
print("STEP 3: Use descriptive activity names. Done." )
## STEP 4
# Appropriately labels the data set with descriptive variable names.
# Store temp column label
nametidy <- names(final_step3)
# Set column labels to HumanReadableForm beacuse are very long names
nametidy <- sub("^f", "Frequency", nametidy)
nametidy <- sub("^t", "Time", nametidy)
nametidy <- sub("BodyBody", "Body", nametidy)
nametidy <- sub("Acc", "Acceleration", nametidy)
nametidy <- sub("Gyro", "Gyroscope", nametidy)
nametidy <- sub("mean\\(\\)", "Mean", nametidy)
nametidy <- sub("meanFreq\\(\\)", "MeanFrequency", nametidy)
nametidy <- sub("Mag", "Magnitude", nametidy)
nametidy <- sub("std\\(\\)", "StandardDeviation", nametidy)
nametidy <- sub("X", "XAxis", nametidy)
nametidy <- sub("Y", "YAxis", nametidy)
nametidy <- sub("Z", "ZAxis", nametidy)
nametidy <- gsub("-", "", nametidy)
names(final_step3) <- nametidy
final_step4 <- final_step3 # final result step 4
# cleaning
rm(list = c("nametidy", "final_step3"))
print("STEP 4: Appropriately labels the data set with descriptive variable names.Done!")
## STEP 5
# From the data set in step 4, creates a second, independent tidy data set with
# the average of each variable for each activity and each subject.
# final result STEP 5
tidy <- final_step4 %>%
group_by(SubjectId, Activity) %>%
summarise_all(mean)
# write data for assignment
write.table(tidy, "tidy.txt", row.names = FALSE)
# cleaning
rm(list = c("final_step1", "final_step4"))
print("STEP 5: creates a second, independent tidy data set. Done!") | /run_analysis.R | no_license | TizVic/DataScienceGettingAndCleaningData | R | false | false | 4,703 | r | ##
## Date 2016-08-18
## Author: Tiziano Vicentini
##
## This script is part of the final assignemnt of the Getting and Cleaning Data
## from Coursera Data Science Specialization.
## It processes the data sets from sorce
## http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
## and downloaded at link
## https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
## Output is a tidy data set tidy.txt with the average of
## each variable for each activity and each subject.
##
library(dplyr)
## STEP 1
# Merges the training and the test sets to create one data set.
# Loads datasets from disk
print("Loading dataset from disk...")
trainx <- read.table("./data/train/X_train.txt", stringsAsFactors = FALSE)
trainy <- read.table("./data/train/y_train.txt", stringsAsFactors = FALSE)
testx <- read.table("./data/test/X_test.txt", stringsAsFactors = FALSE)
testy <- read.table("./data/test/y_test.txt", stringsAsFactors = FALSE)
trainsubj <- read.table("./data/train/subject_train.txt", stringsAsFactors = FALSE)
testsubj <- read.table("./data/test/subject_test.txt", stringsAsFactors = FALSE)
# Joins datasets
mytrain <- cbind(trainsubj, trainy, trainx)
mytest <- cbind(testsubj, testy, testx)
final_step1 <- rbind(mytrain, mytest) # final result STEP 1
# cleaning
rm(list = c("trainx", "trainy", "testx", "testy", "trainsubj", "testsubj", "mytrain", "mytest"))
print("STEP 1: Merges the training and the test sets to create one data set. Done!")
## STEP 2
# Extracts only the measurements on the mean and standard deviation for each measurement.
# Loads features labels
lbdata <- read.table("./data/features.txt")
# Adds ActivityId and SubjectId to labels
lbdata <- rbind(data.frame(V1 = 0, V2 = "ActivityId"), lbdata)
lbdata <- rbind(data.frame(V1 = -1, V2 = "SubjectId"), lbdata)
# Assign labels to final_step1 dataset
names(final_step1) <- t(lbdata[,2]) # transpose the column
# Extracts mean and std only
final_step2 <- final_step1[,grep("mean|std",names(final_step1))] # final result STEP 2
# cleaning
rm(list = c("lbdata"))
print("STEP 2: Extracts only mean/std measurement. Done!")
## STEP 3
# Uses descriptive activity names to name the activities in the data set
# read label data for activities
lbactivity <- read.table("./data/activity_labels.txt", stringsAsFactors = FALSE)
# substitute "_" with space
lbactivity[,2] <- sub("_"," ", lbactivity[,2])
# all lowercase
lbactivity[,2] <- tolower(lbactivity[,2])
# Set column names for the temp data
names(lbactivity) <- c("ActivityId", "Activity")
# Add Activity and Subject
final_step2 <- cbind("ActivityId" = final_step1$ActivityId, "SubjectId" = final_step1$SubjectId, final_step2)
# Join data
final_step3 <- merge(lbactivity, final_step2, by = "ActivityId", all = TRUE, sort = FALSE)
# Remove unused colum
final_step3 <- final_step3[,c(3,2,4:82)] # final result STEP 3
# cleaning
rm(list = c("final_step2", "lbactivity"))
print("STEP 3: Use descriptive activity names. Done." )
## STEP 4
# Appropriately labels the data set with descriptive variable names.
# Store temp column label
nametidy <- names(final_step3)
# Set column labels to HumanReadableForm beacuse are very long names
nametidy <- sub("^f", "Frequency", nametidy)
nametidy <- sub("^t", "Time", nametidy)
nametidy <- sub("BodyBody", "Body", nametidy)
nametidy <- sub("Acc", "Acceleration", nametidy)
nametidy <- sub("Gyro", "Gyroscope", nametidy)
nametidy <- sub("mean\\(\\)", "Mean", nametidy)
nametidy <- sub("meanFreq\\(\\)", "MeanFrequency", nametidy)
nametidy <- sub("Mag", "Magnitude", nametidy)
nametidy <- sub("std\\(\\)", "StandardDeviation", nametidy)
nametidy <- sub("X", "XAxis", nametidy)
nametidy <- sub("Y", "YAxis", nametidy)
nametidy <- sub("Z", "ZAxis", nametidy)
nametidy <- gsub("-", "", nametidy)
names(final_step3) <- nametidy
final_step4 <- final_step3 # final result step 4
# cleaning
rm(list = c("nametidy", "final_step3"))
print("STEP 4: Appropriately labels the data set with descriptive variable names.Done!")
## STEP 5
# From the data set in step 4, creates a second, independent tidy data set with
# the average of each variable for each activity and each subject.
# final result STEP 5
tidy <- final_step4 %>%
group_by(SubjectId, Activity) %>%
summarise_all(mean)
# write data for assignment
write.table(tidy, "tidy.txt", row.names = FALSE)
# cleaning
rm(list = c("final_step1", "final_step4"))
print("STEP 5: creates a second, independent tidy data set. Done!") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/theta_join.R
\name{theta_join_nse}
\alias{theta_join_nse}
\title{Make a theta_join node.}
\usage{
theta_join_nse(a, b, expr, ..., jointype = "INNER", suffix = c("_a",
"_b"), env = parent.frame())
}
\arguments{
\item{a}{source to select from.}
\item{b}{source to select from.}
\item{expr}{unquoted join condition}
\item{...}{force later arguments to be by name}
\item{jointype}{type of join ('INNER', 'LEFT', 'RIGHT', 'FULL').}
\item{suffix}{character length 2, suffices to disambiguate columns.}
\item{env}{environment to look for values in.}
}
\value{
theta_join node.
}
\description{
Theta join is a join on an arbitrary predicate.
}
\examples{
if (requireNamespace("DBI", quietly = TRUE) && requireNamespace("RSQLite", quietly = TRUE)) {
my_db <- DBI::dbConnect(RSQLite::SQLite(), ":memory:")
d1 <- rq_copy_to(my_db, 'd1',
data.frame(AUC = 0.6, R2 = 0.2))
d2 <- rq_copy_to(my_db, 'd2',
data.frame(AUC2 = 0.4, R2 = 0.3))
optree <- theta_join_nse(d1, d2, AUC >= AUC2)
cat(format(optree))
sql <- to_sql(optree, my_db)
cat(sql)
print(DBI::dbGetQuery(my_db, sql))
DBI::dbDisconnect(my_db)
}
}
| /man/theta_join_nse.Rd | no_license | mstei4176/rquery | R | false | true | 1,237 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/theta_join.R
\name{theta_join_nse}
\alias{theta_join_nse}
\title{Make a theta_join node.}
\usage{
theta_join_nse(a, b, expr, ..., jointype = "INNER", suffix = c("_a",
"_b"), env = parent.frame())
}
\arguments{
\item{a}{source to select from.}
\item{b}{source to select from.}
\item{expr}{unquoted join condition}
\item{...}{force later arguments to be by name}
\item{jointype}{type of join ('INNER', 'LEFT', 'RIGHT', 'FULL').}
\item{suffix}{character length 2, suffices to disambiguate columns.}
\item{env}{environment to look for values in.}
}
\value{
theta_join node.
}
\description{
Theta join is a join on an arbitrary predicate.
}
\examples{
if (requireNamespace("DBI", quietly = TRUE) && requireNamespace("RSQLite", quietly = TRUE)) {
my_db <- DBI::dbConnect(RSQLite::SQLite(), ":memory:")
d1 <- rq_copy_to(my_db, 'd1',
data.frame(AUC = 0.6, R2 = 0.2))
d2 <- rq_copy_to(my_db, 'd2',
data.frame(AUC2 = 0.4, R2 = 0.3))
optree <- theta_join_nse(d1, d2, AUC >= AUC2)
cat(format(optree))
sql <- to_sql(optree, my_db)
cat(sql)
print(DBI::dbGetQuery(my_db, sql))
DBI::dbDisconnect(my_db)
}
}
|
library(lubridate)
data <- read.csv("household_power_consumption.txt",sep=";")
data <- subset(data, Date == '2/2/2007' | Date == '1/2/2007')
head(data)
str(data)
data$DateTime <- strptime(paste(dmy(data$Date), data$Time), format= "%Y-%m-%d %H:%M:%S")
png('plot3.png', width=480, height=480)
with(data, plot(DateTime, as.double(as.character(Sub_metering_1)), type="l", ylab = "Energy sub metering", xlab=""))
with(data, lines(DateTime, as.double(as.character(Sub_metering_2)), type="l", col="red"))
with(data, lines(DateTime, as.double(as.character(Sub_metering_3)), type="l", col="blue"))
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col =c("black","red","blue"),lty=c(1,1,1),cex=0.8,text.width=45000)
dev.off()
| /Plot3.R | no_license | paullb514/ExData_Plotting1 | R | false | false | 745 | r | library(lubridate)
data <- read.csv("household_power_consumption.txt",sep=";")
data <- subset(data, Date == '2/2/2007' | Date == '1/2/2007')
head(data)
str(data)
data$DateTime <- strptime(paste(dmy(data$Date), data$Time), format= "%Y-%m-%d %H:%M:%S")
png('plot3.png', width=480, height=480)
with(data, plot(DateTime, as.double(as.character(Sub_metering_1)), type="l", ylab = "Energy sub metering", xlab=""))
with(data, lines(DateTime, as.double(as.character(Sub_metering_2)), type="l", col="red"))
with(data, lines(DateTime, as.double(as.character(Sub_metering_3)), type="l", col="blue"))
legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col =c("black","red","blue"),lty=c(1,1,1),cex=0.8,text.width=45000)
dev.off()
|
########### MACHINE LEARNING COURSE EXERCISE TWO - LOGISTIC REGRESSION ############
# R scripts for exercise 2 (week 3) of coursera machine learning course.
# Read Data & Set Params ---------------------------------------------------------------
# create dataset for plotting
dataset1 <- read_csv("ex2data1.txt", col_names = c('exam_one','exam_two','entry')) #%>%
#mutate(entry = ifelse(entry==1,"Admitted", "Not Admitted"))
# create X and y
X <- matrix(c(rep(1,100), dataset1[['exam_one']], dataset1[['exam_two']]), ncol=3)
y <- dataset1[['entry']]
# set theta
theta <- c(0,0,0)
# Plot Data ---------------------------------------------------------------
# create plot
ggplot(dataset1, aes(x=exam_one, y=exam_two)) +
geom_point(aes(colour=as.factor(entry), shape =as.factor(entry))) +
theme_bw() +
scale_color_manual(values=c('yellow','navy')) +
labs(color='', shape='', x="Exam One Score", y="Exam Two Score") +
scale_shape_manual(values=c('circle','cross'))
# Define Functions --------------------------------------------------------
# Define Sigmoid Function
sigmoid <- function(z) {
1 / (1+exp(-z))
}
# Define Cost Function
costFunction <- function(X, y, theta) {
j = list()
k = list()
for (i in 1:length(y)) {
j[i] <- -y[i]*log(sigmoid((X[i, 1] * theta[1] + X[i, 2] * theta[2] + X[i, 3] * theta[3])))
k[i] <- ((1-y[i])*log( 1 - (sigmoid(X[i,1] * theta[1] + X[i, 2] * theta[2] + X[i, 3] * theta[3]))))
}
J <- (1/length(y)) * (sum(sum(unlist(j)) - sum(unlist(k))))
return(J)
}
#test cost function - output should be approximately 0.218
costFunction(theta=c(-24,0.2,0.2), X, y)
#define gradient function
gradFunction <- function(X, y, theta) {
j=list()
for (i in 1:length(y)) {
j[i] <- (sigmoid((X[i, 1] * theta[1] + X[i, 2] * theta[2] + X[i, 3] * theta[3]))) - y[i]
}
g <- (unlist(j))
f <- (X*matrix(rep(g,3), ncol = 3))
return((1/length(y) *(c(sum(f[, 1]), sum(f[, 2]), sum(f[, 3])))))
}
#test grad function
gradFunction(X, y, theta = c(-24, 0.2, 0.2))
# Optimising and Plotting Decision Boundary -------------------------------
log_reg <- glm(entry ~ exam_one + exam_two, data = dataset, family='binomial')
slope <- coef(log_reg)[2]/(-coef(log_reg)[3])
intercept <- coef(log_reg)[1]/(-coef(log_reg)[3])
ggplot(dataset, aes(x=exam_one, y=exam_two)) +
geom_point(aes(colour=as.factor(entry), shape =as.factor(entry))) +
theme_bw() +
scale_color_manual(values=c('yellow','navy')) +
labs(color='', shape='', x="Exam One Score", y="Exam Two Score") +
scale_shape_manual(values=c('circle','cross')) +
geom_abline(slope = slope, intercept = intercept)
# Prediction and Accuracy -------------------------------------------------
sigmoid(predict(log_reg, tibble(intercept = 1, exam_one = 45, exam_two=85)))
# Regularised Logistic Regression -----------------------------------------
dataset2 <- read_csv("ex2data2.txt", col_names = c('test_one','test_two','pass'))
# Plotting the Data -------------------------------------------------------
ggplot(dataset2, aes(test_one, test_two)) +
geom_point(aes(color=as.factor(pass), shape=as.factor(pass))) +
scale_color_manual(values=c('yellow','navy')) +
scale_shape_manual(values=c('circle','cross')) + theme_bw() +
labs(x="Test One Score", y="Test Two Score", color="Passed", shape="Passed")
| /machine-learning-ex2/ex2/exercise_two.R | no_license | samtaylor54321/machine_learning_course | R | false | false | 3,350 | r | ########### MACHINE LEARNING COURSE EXERCISE TWO - LOGISTIC REGRESSION ############
# R scripts for exercise 2 (week 3) of coursera machine learning course.
# Read Data & Set Params ---------------------------------------------------------------
# create dataset for plotting
dataset1 <- read_csv("ex2data1.txt", col_names = c('exam_one','exam_two','entry')) #%>%
#mutate(entry = ifelse(entry==1,"Admitted", "Not Admitted"))
# create X and y
X <- matrix(c(rep(1,100), dataset1[['exam_one']], dataset1[['exam_two']]), ncol=3)
y <- dataset1[['entry']]
# set theta
theta <- c(0,0,0)
# Plot Data ---------------------------------------------------------------
# create plot
ggplot(dataset1, aes(x=exam_one, y=exam_two)) +
geom_point(aes(colour=as.factor(entry), shape =as.factor(entry))) +
theme_bw() +
scale_color_manual(values=c('yellow','navy')) +
labs(color='', shape='', x="Exam One Score", y="Exam Two Score") +
scale_shape_manual(values=c('circle','cross'))
# Define Functions --------------------------------------------------------
# Define Sigmoid Function
sigmoid <- function(z) {
1 / (1+exp(-z))
}
# Define Cost Function
costFunction <- function(X, y, theta) {
j = list()
k = list()
for (i in 1:length(y)) {
j[i] <- -y[i]*log(sigmoid((X[i, 1] * theta[1] + X[i, 2] * theta[2] + X[i, 3] * theta[3])))
k[i] <- ((1-y[i])*log( 1 - (sigmoid(X[i,1] * theta[1] + X[i, 2] * theta[2] + X[i, 3] * theta[3]))))
}
J <- (1/length(y)) * (sum(sum(unlist(j)) - sum(unlist(k))))
return(J)
}
#test cost function - output should be approximately 0.218
costFunction(theta=c(-24,0.2,0.2), X, y)
#define gradient function
gradFunction <- function(X, y, theta) {
j=list()
for (i in 1:length(y)) {
j[i] <- (sigmoid((X[i, 1] * theta[1] + X[i, 2] * theta[2] + X[i, 3] * theta[3]))) - y[i]
}
g <- (unlist(j))
f <- (X*matrix(rep(g,3), ncol = 3))
return((1/length(y) *(c(sum(f[, 1]), sum(f[, 2]), sum(f[, 3])))))
}
#test grad function
gradFunction(X, y, theta = c(-24, 0.2, 0.2))
# Optimising and Plotting Decision Boundary -------------------------------
log_reg <- glm(entry ~ exam_one + exam_two, data = dataset, family='binomial')
slope <- coef(log_reg)[2]/(-coef(log_reg)[3])
intercept <- coef(log_reg)[1]/(-coef(log_reg)[3])
ggplot(dataset, aes(x=exam_one, y=exam_two)) +
geom_point(aes(colour=as.factor(entry), shape =as.factor(entry))) +
theme_bw() +
scale_color_manual(values=c('yellow','navy')) +
labs(color='', shape='', x="Exam One Score", y="Exam Two Score") +
scale_shape_manual(values=c('circle','cross')) +
geom_abline(slope = slope, intercept = intercept)
# Prediction and Accuracy -------------------------------------------------
sigmoid(predict(log_reg, tibble(intercept = 1, exam_one = 45, exam_two=85)))
# Regularised Logistic Regression -----------------------------------------
dataset2 <- read_csv("ex2data2.txt", col_names = c('test_one','test_two','pass'))
# Plotting the Data -------------------------------------------------------
ggplot(dataset2, aes(test_one, test_two)) +
geom_point(aes(color=as.factor(pass), shape=as.factor(pass))) +
scale_color_manual(values=c('yellow','navy')) +
scale_shape_manual(values=c('circle','cross')) + theme_bw() +
labs(x="Test One Score", y="Test Two Score", color="Passed", shape="Passed")
|
#' Set the GITHUB_PAT variable on Travis
#'
#' Assigns a GitHub PAT to the `GITHUB_PAT` private variable on Travis CI
#' via [travis_set_var()],
#' among others this works around GitHub API rate limitations.
#' By default, the PAT is obtained from [github_create_pat()].
#'
#' @param pat `[string]`\cr
#' If set, avoids calling `github_create_pat()`
#' @inheritParams travis_repo_info
#' @inheritParams github_create_repo
#'
#' @family Travis CI functions
#'
#' @export
travis_set_pat <- function(pat = NULL,
repo = github_repo(),
token = travis_token(repo),
quiet = FALSE) {
if (is.null(pat)) {
pat <- github_create_pat(repo = repo)
}
travis_set_var("GITHUB_PAT", pat,
public = FALSE,
repo = repo,
token = token, quiet = quiet
)
}
| /R/travis-pat.R | no_license | zeehio/travis | R | false | false | 839 | r | #' Set the GITHUB_PAT variable on Travis
#'
#' Assigns a GitHub PAT to the `GITHUB_PAT` private variable on Travis CI
#' via [travis_set_var()],
#' among others this works around GitHub API rate limitations.
#' By default, the PAT is obtained from [github_create_pat()].
#'
#' @param pat `[string]`\cr
#' If set, avoids calling `github_create_pat()`
#' @inheritParams travis_repo_info
#' @inheritParams github_create_repo
#'
#' @family Travis CI functions
#'
#' @export
travis_set_pat <- function(pat = NULL,
repo = github_repo(),
token = travis_token(repo),
quiet = FALSE) {
if (is.null(pat)) {
pat <- github_create_pat(repo = repo)
}
travis_set_var("GITHUB_PAT", pat,
public = FALSE,
repo = repo,
token = token, quiet = quiet
)
}
|
library(testthat)
library(episheet)
test_check("episheet")
| /tests/testthat.R | no_license | epijim/episheet | R | false | false | 60 | r | library(testthat)
library(episheet)
test_check("episheet")
|
# Funktion adSim() intialisieren mit Standardeinstellungen: Verteilungsannahme: normal; Bootstrap-Simulationen: 10000
adSim <- function(x, distribution = "normal", b = 10000){
# Fehlerpruefungen durchfuehren
# 1.20 ueberpruefung, dass x ein numerischer Vektor ist
if(mode(x) != "numeric")
stop(paste("\n"," adSim() requires numeric x data"))
# 1.25 ueberpruefung von x auf fehlende Werte
if(any(is.na(x)))
stop(paste("\n"," x data has missing values (NA)"))
# 1.30 ueberpruefung, ob auf die angegebene Verteilung getestet werden kann
# Vektor "distr" enthaelt die testbaren Verteilungsmodelle
distr <- c("exponential","cauchy","gumbel","gamma","log-normal","lognormal","logistic","normal","weibull")
if(any(distr == distribution) == FALSE)
stop(paste("\n"," adSim() can not apply for",distribution,"distribution. Please choose one of the following distributions for testing goodness-of-fit: exponential, cauchy, gumbel, gamma, log-normal, lognormal, logistic, normal, weibull"))
# 1.35 & 1.40 ueberpruefung, ob die gewaehlte Verteilung fuer Werte von x<=0 definiert ist bzw. der MLE berechnet werden kann, wenn x entsprechende Werte hat
if(any(x<=0)){
if(distribution == "exponential" || distribution == "lognormal" || distribution == "log-normal" || distribution == "gamma" || distribution == "weibull"){
stop(paste("\n"," adSim() can not apply for", distribution ,"distribution while x contains negative values or x has values equal zero."))}}
# 1.45 & 1.50: bei log-Normalverteilung wird die Stichprobe logarithmiert und anschliessend auf Normalverteilung getestet
# 1.52 "testDistr" gibt die Verteilung an, auf die im Funktionsablauf getestet werden soll (nicht immer identisch mit der hypothetischen Verteilung des Anwenders)
if(distribution == "lognormal" || distribution == "log-normal"){
x <- log(x)
testDistr <- "normal" }
# 1.54 & 1.56 & 1.58: bei Weibull-Verteilung wird die Stichprobe tranformiert x=-log(x) und anschliessend auf Gumbel-Verteilung getestet
# obwohl die Weibull-Verteilung fuer x>=0 definiert ist, muss aufgrund der Logarithmierung x>0 gelten (die Pruefung erfolgt bereits mit 1.35 & 1.40)
if(distribution == "weibull"){
x <- -log(x)
testDistr <- "gumbel"}
# 1.60 bei den anderen Verteilungen erfolgt der Test an ihrer eigenen Verteilung
if(distribution != "lognormal" & distribution != "log-normal" & distribution != "weibull"){
testDistr <- distribution}
# 1.70 Stichprobenumfang bestimmen
n = length(x)
# 1.80 Pruefung auf eine Mindeststichprobengroesse
if(n<3)
stop(paste("\n"," adSim() can not apply for sample sizes n < 3."))
# 1.90 Ordnungsstatistik bilden (aufsteigende Sortierung)
x = sort(x, decreasing = FALSE)
###################################################################
# 2.00 Parameterschaetzung
###################################################################
# 2.10 & 2.15 Zum Schaetzen der Parameter standardmaessig den MLE benutzen (ausser fuer die Normal-, Cauchy- und Gumbel-Verteilung)
# Parameter der Verteilung aus Stichprobe schaetzen und als Liste "parafit" speichern
if(testDistr != "normal" & testDistr != "gumbel" & testDistr != "cauchy"){
#library(MASS)
parafit <- MASS::fitdistr(x,testDistr)
}
# 2.30 & 2.35 Schaetzen der Parameter der Normalverteilung
# der MLE ist fuer die Standardabweichung der Normalverteilung nicht erwartungstreu
# Parameter der Normalverteilung werden mit mean() und sd() geschaetzt und als Vektor "parafit" gespeichert
if(testDistr == "normal"){
parafit <- numeric(2)
parafit[1] = mean(x)
parafit[2] = sd(x)
# 2.40 Parameterbenennung
if(distribution == "lognormal" || distribution == "log-normal"){
names(parafit) = c( "meanlog", "sdlog") # 2.41
}else{names(parafit) = c( "mean", "sd")} # 2.42
}
# 2.50 Schaetzen der Cauchy-Parameter
# Fuer die Cauchy-Verteilung sind zwei unterschiedliche Schaetzverfahren implementiert
if(testDistr == "cauchy"){
# 2.52 Pruefung, ob simuliert wird oder kritische Werte ausgelesen werden sollen
if(is.na(b) == FALSE){ # die AD-Verteilung soll spaeter simuliert werden
# 2.55 Schaetzung mittels fitdistr()
#library(MASS)
parafit <- MASS::fitdistr(x,testDistr)
}else{ # 2.52 die AD-Verteilung soll spaeter nicht simuliert werden
# 2.60 Parameterschaetzung, basierend auf den Summen der gewichteten Ordnungsstatistik
parafit <- numeric(2)
uWeight = numeric(n)
uWeight[1:n] <- sin( 4*pi* ( 1:n/(n+1) - 0.5) ) / (n*tan( pi* ( 1:n/(n+1) - 0.5 ) ) ) # Berechnung der Gewichte u
# fuer ungerade n koennen nicht alle Gewichte berechnet werden, deshalb wird das fehlende Gewicht geschaetzt
if(ifelse(n %% 2, TRUE, FALSE)){ # ifelse-Bedingung gibt TRUE, wenn n ungerade ist
if(length(na.omit(uWeight)) + 1 == length(uWeight)){ # Bedingung prueft, ob nur ein Wert in uWeight nicht berechnet werden konnte
uWeight[which(is.na(uWeight))] = (n+1)/n - sum(na.omit(uWeight)) # Ersetzen von NaN durch geeigneten Schaetzer
}}
parafit[1] <- uWeight %*% x # Parameterberechnung
vWeight = numeric(n)
vWeight[1:n] <- 8*tan( pi* ( 1:n/(n+1) - 0.5) )*(cos( pi*(1:n/(n+1) - 0.5 )))^4 / n # Berechnung der Gewichte v
parafit[2] <- vWeight %*% x # Parameterberechnung
# 2.65 Parameterbenennung
names(parafit) = c( "location", "scale")
}
}
# 2.70 Schaetzen der Gumbel-Parameter (fitdistr() kann nicht die Gumbel-Verteilung schaetzen)
if(testDistr == "gumbel"){
### 2.72 p-Gumbel-Funktion definieren (aus Library VGAM uebernommen)
pgumbel = function (q, location = 0, scale = 1){
answer = exp(-exp(-(q - location)/scale))
answer[scale <= 0] = NaN
answer}
### 2.75 r-Gumbel-Funktion definieren (aus Library VGAM uebernommen)
is.Numeric = function (x, allowable.length = Inf, integer.valued = FALSE, positive = FALSE){
if (all(is.numeric(x)) && all(is.finite(x)) && (if (is.finite(allowable.length)) length(x) ==
allowable.length else TRUE) && (if (integer.valued) all(x == round(x)) else TRUE) &&
(if (positive) all(x > 0) else TRUE)) TRUE else FALSE}
rgumbel = function (n, location = 0, scale = 1){
use.n = if ((length.n <- length(n)) > 1)
length.n
else if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
stop("bad input for argument 'n'")
else n
answer = location - scale * log(-log(runif(use.n)))
answer[scale <= 0] = NaN
answer}
### 2.80 MLE-Parameterschaetzung fuer die Gumbel-Verteilung (iterative Loesung)
f <- function(p) 1/n*sum(x) - (sum(x*exp(-x/p)))/(sum(exp(-x/p)))-p
### iterative Loesung notwendig, um beta zu schaetzen (D'Agostino/Stephens 1986, S. 146)
itSol <- uniroot(f,c(-100,100),tol = 0.0000001, maxiter = 100000)
beta <- as.numeric(itSol$root)
alpha = - beta * log(sum(exp(-x/beta)/n))
parafit <- numeric(2)
parafit[1] = alpha
parafit[2] = beta
# 2.85 Parameterbenennung
names(parafit) = c( "location", "scale")
}
###################################################################
# 3.00 AD-Teststatistik berechnen (mit Lewis-Formel)
###################################################################
# 3.10 Transformation des Wahrscheinlichkeitsintegrals (PIT) berechnen
pit = numeric(n)
#3.15
if(testDistr == "normal")
pit[1:n] = pnorm(x[1:n],parafit[1],parafit[2])
#3.20
if(testDistr == "exponential")
pit[1:n] = pexp(x[1:n],parafit$estimate["rate"])
#3.25
if(testDistr == "cauchy")
if(is.na(b) == FALSE){
pit[1:n] = pcauchy(x[1:n],parafit$estimate["location"],parafit$estimate["scale"])
}else{ pit[1:n] = pcauchy(x[1:n],parafit[1],parafit[2])}
#3.30
if(testDistr == "gamma")
pit[1:n] = pgamma(x[1:n],parafit$estimate["shape"],parafit$estimate["rate"])
#3.35
if(testDistr == "gumbel")
pit[1:n] = pgumbel(x[1:n],parafit[1],parafit[2])
#3.40
if(testDistr == "logistic")
pit[1:n] = plogis(x[1:n],parafit$estimate["location"],parafit$estimate["scale"])
# 3.50 Anwendung der AD-Formel
# 3.60 fuer jedes i den Summenwert berechnen und in Matrix h schreiben
h = matrix(ncol=n)
h[1,] = (2*col(h)-1)*log(pit) + (2*n + 1 - 2*col(h))* log(1 - pit)
# 3.70 AD-Formel vollstaendig berechnen
AD = -n-(1/n) * rowSums(h)
# 3.80 Fehlerpruefung, ob AD-Wert vorhanden ist
if(is.na(AD) == TRUE || AD<0)
stop(paste("\n"," The calculation of the Anderson Darling statistic fails."))
# Ende der AD-Wert-Berechnung fuer die beobachteten Daten
if(is.na(b) == FALSE){
#####################################################################################
# 4 parametrisches Bootstrapping der AD-Nullverteilung
#####################################################################################
# 4.00 Fehlerpruefung, ob die Simulationshaeufigkeit zulaessig gewaehlt ist
if(b<1000)
stop("b is chosen too small for generate an accurate p-Value.")
if(b>1000000)
stop("b is chosen too big for generate an p-value within a reasonable time.")
# 4.02 Ausgabe, dass simuliert wird
cat("\n"," ... simulating the Anderson-Darling distribution by",b,"bootstraps for",distribution,"distribution...","\n","\n")
# 4.05 Resampling und Parameterschaetzung
# Matrix Y mit b sortierten Zufallsstichproben mit Umfang n erstellen
# Verteilungsparameter zu jeder Bootstrap-Stichprobe schaetzen
# 4.10
if(testDistr == "normal"){
Y = t(replicate(b, sort(rnorm(n,parafit[1],parafit[2]))))
paraMean = rowMeans(Y)
paraSd = sqrt(rowSums((Y-rowMeans(Y))^2) /(ncol(Y)-1))
}
# 4.15
if(testDistr == "exponential"){
Y = t(replicate(b, sort(rexp(n, parafit$estimate["rate"]))))
unParaList <- unlist(apply(Y,1,function(x) fitdistr(x,testDistr,rate = parafit$estimate["rate"])))
paraRate = unParaList[names(unParaList) == "estimate.rate"]
}
# 4.20
if(testDistr == "cauchy"){
Y = t(replicate(b, sort(rcauchy(n, parafit$estimate["location"],parafit$estimate["scale"]))))
unParaList <- unlist(apply(Y,1,function(x) fitdistr(x,testDistr,list(location = parafit$estimate["location"], scale = parafit$estimate["scale"]))))
paraLocation = unParaList[names(unParaList) == "estimate.location"]
paraScale = unParaList[names(unParaList) == "estimate.scale"]
}
# 4.25
if(testDistr == "gamma"){
Y = t(replicate(b, sort(rgamma(n, parafit$estimate["shape"],parafit$estimate["rate"]))))
unParaList <- unlist(apply(Y,1,function(x) fitdistr(x,testDistr,list(shape = parafit$estimate["shape"], rate = parafit$estimate["rate"]))))
paraShape = unParaList[names(unParaList) == "estimate.shape"]
paraRate = unParaList[names(unParaList) == "estimate.rate"]
}
# 4.30
if(testDistr == "gumbel"){
Y = t(replicate(b, sort(rgumbel(n, parafit[1], parafit[2]))))
paraBeta = numeric(b)
paraAlpha = numeric(b)
for(j in 1:b){
### iterative Loesung notwendig, um beta zu schaetzen (D'Agostino/Stephens 1986, S. 146)
itSol <- uniroot(function(p) 1/n*sum(Y[j,])-
(sum(Y[j,]*exp(-Y[j,]/p)))/(sum(exp(-Y[j,]/p)))-p,c(-100,100),tol = 0.0000000001, maxiter = 100000)
paraBeta[j] <- as.numeric(itSol$root)
paraAlpha[j] = - paraBeta[j] * log(sum(exp(-Y[j,]/paraBeta[j])/n))
}
}
#4.35
if(testDistr == "logistic"){
Y = t(replicate(b, sort(rlogis(n, parafit$estimate["location"],parafit$estimate["scale"]))))
unParaList <- unlist(apply(Y,1,function(x) fitdistr(x,testDistr,list(location = parafit$estimate["location"], scale = parafit$estimate["scale"]))))
paraLocation = unParaList[names(unParaList) == "estimate.location"]
paraScale = unParaList[names(unParaList) == "estimate.scale"]
}
######################################################################
### 4.40 b-fache Berechnung der Anderson-Darling-Formel
######################################################################
# PIT fuer jeden Wert der Matrix Y berechnen
# (abhaengig von den geschaetzten Verteilungsparameter jeder Bootstrap-Stichprobe)
#4.45
if(testDistr == "normal")
Y[,1:n] <- pnorm(Y[,1:n],paraMean,paraSd)
#4.50
if(testDistr == "exponential")
Y[,1:n] <- pexp(Y[,1:n],paraRate)
#4.55
if(testDistr == "cauchy")
Y[,1:n] <- pcauchy(Y[,1:n],paraLocation,paraScale)
#4.60
if(testDistr == "gamma")
Y[,1:n] <- pgamma(Y[,1:n],paraShape,paraRate)
#4.65
if(testDistr == "gumbel")
Y[,1:n] <- pgumbel(Y[,1:n],paraAlpha,paraBeta)
#4.70
if(testDistr == "logistic")
Y[,1:n] <- plogis(Y[,1:n],paraLocation,paraScale)
# 4.75 Berechnung der Summenglieder der AD-(Lewis)-Formel zu jeder Bootstrap-Stichprobe (Matrix Y ueberschreiben)
Y[1:b,] <- (2*col(Y)-1)*log(Y[1:b,]) + (2*n + 1 - 2*col(Y)) * log(1 - Y[1:b,])
# 4.77 simulierte AD-Werte berechnen und in Vektor simAD schreiben
d = rowSums(Y)
simAD = numeric(b)
simAD[1:b] = -n-(1/n)*d[1:b]
# 4.80 Fehlerpruefung, ob alle AD-Werte bestimmt werden konnten
if(any(is.na(simAD))){
cat(" The simulated Anderson-Darling distribution contains NAs or NaNs!","\n","\n")}
# 4.90 Bestimmung kritischer Werte
critValues = round (matrix( c( 0.75, 0.90, 0.95, 0.975, 0.990,
quantile(simAD, 0.75,na.rm = TRUE), quantile(simAD, 0.90,na.rm = TRUE), quantile(simAD, 0.95,na.rm = TRUE),
quantile(simAD, 0.975,na.rm = TRUE), quantile(simAD, 0.99,na.rm = TRUE) ), nrow= 2, byrow = TRUE ), digits = 5)
# 4.95 Bestimmung des p-Werts
# die p-Wert Bestimmung ist so programmiert, dass fehlende Werte in simAD ausgelassen werden (na.omit)
pValue = sum(na.omit(simAD) > AD)/length(na.omit(simAD))
# Ende der simulationsbasierten Bestimmung von kritischen Werten bzw. des p-Werts
}else{
# 5.00 (b = NA), d.h. Auslesen tabellierter kritischer Werte bzw. des p-Werts oder Anwendung von Berechnungsformeln zur p-Wert-Bestimmung
# 5.02 simAD wird spaeter in der Ausgabe abgerufen, weshalb die Variable definiert sein muss
simAD = NA
### 5.06 Definieren einer Matrix "critValues" zur Erfassung kritischer Werte
critValues = matrix( c( 0.75, 0.90, 0.95, 0.975, 0.990, NA, NA ,NA, NA, NA), nrow = 2, byrow = TRUE)
# 5.08 Test auf Normal-, Exponential-, Gumbel- oder logistische Verteilung
if(testDistr == "normal" || testDistr == "exponential" ||testDistr == "gumbel" || testDistr == "logistic"){
# 5.10 Test auf Normalverteilung
if(testDistr == "normal"){
###################################################################################################################
### kritische Werte fuer die Normalverteilung nach D'Agostino und Stephens 1986 - S. 123 Tab. 4.7
###################################################################################################################
# 5.12 Tabelle der kritischen Werte
normalMtx = matrix(c( 0.50 , 0.75, 0.85 , 0.90 , 0.95 , 0.975 , 0.99 , 0.995 ,
0.341 , 0.470 , 0.561 , 0.631 , 0.752 , 0.873 , 1.035 , 1.159), nrow=2, byrow = TRUE )
# 5.14 Anpassen der Wertetabelle bezueglich des Stichprobenumfangs
normalMtx[2,1:ncol(normalMtx)] = normalMtx[2,1:ncol(normalMtx)]/(1+0.75/n+2.25/n^2)
refMtx = normalMtx
########################################################################################################
### p-Wert Berechnung fuer die Normalverteilung nach D'Agostino und Stephens 1986 - S. 127
########################################################################################################
# 5.16 Anpassen des AD-Werts bzgl. des Stichprobenumfangs
cAD = AD*(1+0.75/n+2.25/n^2)
# 5.18 Berechnung des p-Werts nach Formel
if(0.600<cAD)
{pValue = exp(1.2937-5.709*cAD+0.0186*cAD^2)}
if(0.340<cAD & cAD<0.600)
{pValue = exp(0.9177-4.279*cAD-1.38*cAD^2)}
if(0.200<cAD & cAD<0.340)
{pValue = 1-exp(-8.318+42.796*cAD-59.938*cAD^2)}
if(cAD<0.200)
{pValue = 1-exp(-13.436+101.14*cAD-223.73*cAD^2)}
}
# 5.20 Test auf Exponentialverteilung
if(testDistr == "exponential"){
#############################################################################################################
### Kritische Werte fuer die Exponentialverteilung nach D'Agostino und Stephens 1986 - S. 135 Tab. 4.11
#############################################################################################################
# 5.22 Tabelle der kritischen Werte
expMtx = matrix( c( 0.75, 0.80, 0.85 , 0.90 , 0.95 , 0.975 , 0.99 , 0.995 , 0.9975,
0.736 , 0.816 , 0.916 , 1.062 , 1.321 , 1.591 , 1.959 , 2.244, 2.534), nrow = 2, byrow = TRUE )
# 5.24 Anpassen der Wertetabelle bezueglich des Stichprobenumfangs
expMtx[2,1:ncol(expMtx)] = expMtx[2,1:ncol(expMtx)]/(1+0.6/n)
refMtx = expMtx
############################################################################################################
### p-Wert-Berechnung fuer die Exponentialverteilung nach D'Agostino und Stephens 1986 - S. 136
### Case 2: origin (bzw. location) = 0 = known , scale = unknown = 1/Lambda = 1/rate
############################################################################################################
# 5.26 Anpassen des AD-Werts bzgl. des Stichprobenumfangs
cAD = AD*(1+0.6/n)
# 5.28 Berechnung des p-Werts nach Formel
if(0.950<cAD)
{pValue = exp(0.731 - 3.009*cAD + 0.15*cAD^2)}
if(0.510<cAD & cAD<0.950)
{pValue = exp(0.9209 - 3.353*cAD + 0.300*cAD^2)}
if(0.260<cAD & cAD<0.510)
{pValue = 1 - exp(-6.1327 + 20.218*cAD - 18.663*cAD^2)}
if(cAD<0.260)
{pValue = 1 - exp(-12.2204 + 67.459*cAD - 110.3*cAD^2)}
}
# 5.30 Test auf logistische Verteilung oder Gumbel-Verteilung
if(testDistr == "gumbel" || testDistr == "logistic"){
# 5.31 Test auf Gumbel-Verteilung
if(testDistr == "gumbel"){
##############################################################################################################
### Kritische Werte fuer die Extremwert-Verteilung nach D'Agostino und Stephens 1986 - S. 146 Tab. 4.17
##############################################################################################################
# 5.32 Tabelle der kritischen Werte
gumbelMtx = matrix(c( 0.75, 0.90 , 0.95 , 0.975 , 0.99 , 0.474, 0.637, 0.757, 0.877, 1.038), nrow = 2, byrow = TRUE )
# 5.34 Anpassen der Wertetabelle bezueglich des Stichprobenumfangs
gumbelMtx[2,1:ncol(gumbelMtx)] = gumbelMtx[2,1:ncol(gumbelMtx)]/(1 + 0.2/sqrt(n))
refMtx = gumbelMtx
}
# 5.35 Test auf logistische Verteilung
if(testDistr == "logistic"){
##############################################################################################################
### kritische Werte fuer die logistische Verteilung nach D'Agostino und Stephens 1986 - S. 157 Tab 4.22
##############################################################################################################
# 5.37 Tabelle der kritischen Werte
logisMtx = matrix(c( 0.75, 0.90 , 0.95 , 0.975 , 0.99 , 0.995 , 0.426, 0.563, 0.660, 0.769, 0.906, 1.010 ), nrow = 2, byrow = TRUE )
# 5.39 Anpassen der Wertetabelle bezueglich des Stichprobenumfangs
logisMtx[2,1:ncol(logisMtx)] = logisMtx[2,1:ncol(logisMtx)]/(1 + 0.25/n)
refMtx = logisMtx
}
##############################################################################################################
### Bestimmung des p-Werts fuer die Gumbel- oder logistische Verteilung
##############################################################################################################
critCheck <- refMtx[2,1:ncol(refMtx)] > AD ### gibt TRUE aus fuer alle Eintraege in der Zeile, die groesser als AD sind
# 5.40 Existiert ein kritischer Wert, der groesser als der AD-Wert ist?
if(any(critCheck)){
# 5.42 firPos gibt die Position in der Zeile vom letzten Wert an, der noch kleiner als der AD-Wert ist
firPos <- min(which(critCheck)) - 1
}else{
# 5.44 letzte Spalte als Position waehlen
firPos <- ncol(refMtx)}
# 5.46 p-Wert entsprechend der ermittelten Position bestimmen
if(firPos == 0){
pValue <- 1 - refMtx[1,1]
pValue <- paste(">",pValue)
}else{
pValue <- 1 - refMtx[1,firPos]
pValue <- paste("<=",pValue)}}
##############################################################################################################
### Auslesen der kritischen Werte fuer Normal-, Exponential-, Gumbel oder logistische Verteilung
##############################################################################################################
for(i in 1:ncol(critValues)){
# 5.50 Ist der kritische Werte fuer das zu spezifizierende Quantil tabelliert?
if(any(refMtx[1,1:ncol(refMtx)] == critValues[1,i])){
# 5.52 dann Position des kritischen Werts bzgl. des betrachteten Quantils erfassen
position <- (1:length(refMtx[1,1:ncol(refMtx)] == critValues[1,i]))[(refMtx[1,1:ncol(refMtx)] == critValues[1,i])]
# 5.54 Auslesen des kritischen Werts bezueglich der Position
critValues[2,i] <- refMtx[2,position] ### liest aus der Matrix den kritschen Wert abhaengig vom gewaehlten Quantil
}else{
# 5.58 nicht-tabellierte Quantile mit "NA" belegen
critValues[2,i] <- NA}}
}
# 5.60 Test auf Gamma-Verteilung
if(testDistr == "gamma"){
###################################################################################################################################
### Bestimmung des kritischen Werts und des p-Werts fuer die Gammaverteilung nach D'Agostino und Stephens 1986 - S. 155 - Tab. 4.21
### case 3: shape = unknown, scale = unknown = 1/rate, origin = known = 0
###################################################################################################################################
# 5.62 Tabelle der kritischen Werte
gammaDF = data.frame( c( 1, 2, 3, 4, 5, 6, 8, 10, 12, 15, 20, Inf ),
c( 0.486, 0.477, 0.475, 0.473, 0.472, 0.472, 0.471, 0.471, 0.471, 0.47, 0.47, 0.47 ),
c( 0.657, 0.643, 0.639, 0.637, 0.635, 0.635, 0.634, 0.633, 0.633, 0.632, 0.632, 0.631),
c( 0.786, 0.768, 0.762, 0.759, 0.758, 0.757, 0.755, 0.754, 0.754, 0.754, 0.753, 0.752),
c( 0.917, 0.894, 0.886, 0.883, 0.881, 0.88, 0.878, 0.877, 0.876, 0.876, 0.875, 0.873),
c( 1.092, 1.062, 1.052, 1.048, 1.045, 1.043, 1.041, 1.04, 1.039, 1.038, 1.037, 1.035),
c( 1.227, 1.19, 1.178, 1.173, 1.17, 1.168, 1.165, 1.164, 1.163, 1.162, 1.161, 1.159))
names(gammaDF) = c( "m", 0.75, 0.90, 0.95, 0.975, 0.99, 0.995)
###################################################################################################################################
######## p-Wert-Bestimmung fuer die Gamma-Verteilung
###################################################################################################################################
# critCheck gibt TRUE aus fuer alle Eintraege in der entsprechenden m-Zeile, die groesser als der AD-Wert sind
# zu beachten ist, dass die Betrachtung erst ab der 2ten Spalte der Tabelle erfolgt, so dass die Indizierung von critCheck
# gegenueber der Indizierung der Tabelle um -1 verschoben ist
critCheck <- gammaDF[min(which(gammaDF$m >= parafit$estimate["shape"])),2:ncol(gammaDF)] > AD
# 5.65 Existiert ein kritischer Wert, der groesser als der AD-Wert ist (in der entsprechenden m-Zeile)?
if(any(critCheck)){
# 5.66 firPos gibt die Spalten-Position der Tabelle in der entsprechenden m-Zeile von dem krit. Wert an,
# der noch kleiner als der AD-Wert ist. Fuer firPos = 1 existiert kein kleiner kritischer Wert
firPos <- min(which(critCheck))
}else{
# 5.67 letzte Spalte als Position waehlen
firPos <- ncol(gammaDF) }
# 5.68 p-Wert entsprechend der ermittelten Position bestimmen
if(firPos == 1){
pValue <- 1 - as.numeric(names(gammaDF)[2])
pValue <- paste(">",pValue)
}else{
pValue <- 1 - as.numeric(names(gammaDF)[firPos])
pValue <- paste("<=",pValue)}
###################################################################################################################################
######## kritischen Wert fuer die Gamma-Verteilung auslesen
###################################################################################################################################
for(i in 1:ncol(critValues)){
# 5.70 Ist der kritische Wert fuer das zu spezifizierende Quantil tabelliert?
if(any(names(gammaDF) == critValues[1,i] )){
# 5.72 Auslesen der kritischen Werte an der Zeilen-Position des entsprechenden Formparameters
# und der Spalten-Position des zu spezifizierenden Quantils
critValues[2,i] <- gammaDF[min(which(gammaDF$m >= parafit$estimate["shape"] )),which(names(gammaDF) == critValues[1,i])]
}else{
# 5.74 nicht-tabellierte Quantile mit "NA" belegen
critValues[2,i] <- NA}}
}
# 5.80 Test auf Cauchy-Verteilung
if(testDistr == "cauchy"){
####################################################################################################################
### Bestimmung des kritischen Werts fuer die Cauchy-Verteilung nach D'Agostino und Stephens 1986 - S. 163 Tab 4.26
### case 3: location = unknown, shape = unknown
####################################################################################################################
# 5.82 Tabelle der kritischen Werte
cauchyDF = data.frame(
c( 5, 8, 10, 12, 15, 20, 25, 30, 40, 50, 60, 100, Inf),
c( 0.835, 0.992, 1.04, 1.04, 1.02, 0.975, 0.914, 0.875, 0.812, 0.774, 0.743, 0.689, 0.615),
c( 1.14, 1.52, 1.63, 1.65, 1.61, 1.51, 1.4, 1.3, 1.16, 1.08, 1.02, 0.927, 0.78),
c( 1.4, 2.06, 2.27, 2.33, 2.28, 2.13, 1.94, 1.76, 1.53, 1.41, 1.3, 1.14, 0.949),
c( 1.77, 3.2, 3.77, 4.14, 4.25, 4.05, 3.57, 3.09, 2.48, 2.14, 1.92, 1.52, 1.225),
c( 2, 4.27, 5.58, 6.43, 7.2, 7.58, 6.91, 5.86, 4.23, 3.37, 2.76, 2.05, 1.52),
c( 2.16, 5.24, 7.5, 9.51, 11.5, 14.57, 14.96, 13.8, 10.2, 7.49, 5.32, 3.3, 1.9))
names(cauchyDF) = c( "n", 0.75, 0.85, 0.90, 0.95, 0.975, 0.99)
##############################################################################################################
### Bestimmung des p-Werts fuer die Cauchy-Verteilung
##############################################################################################################
# 5.84 Existieren kritische Werte fuer den zu untersuchenden Stichprobenumfang n?
if(any(cauchyDF[1:13,1] == n)){
# critCheck gibt TRUE aus fuer alle Eintraege in der entsprechenden n-Zeile, die groesser als der AD-Wert sind
# zu beachten ist, dass die Betrachtung erst ab der 2ten Spalte der Tabelle erfolgt, so dass die Indizierung von critCheck
# gegenueber der Indizierung der Tabelle um -1 verschoben ist
critCheck <- cauchyDF[which(cauchyDF[1:13,1] == n),2:ncol(cauchyDF)] > AD
# 5.85 Existiert ein kritischer Wert, der groesser als der AD-Wert ist (in der entsprechenden n-Zeile)?
if(any(critCheck)){
# 5.86 firPos gibt die Spalten-Position der Tabelle in der entsprechenden n-Zeile von dem krit. Wert an,
# der noch kleiner als der AD-Wert ist; fuer firPos = 1 existiert kein kleiner kritischer Wert
firPos <- min(which(critCheck))
}else{
# 5.87 letzte Spalte als Position waehlen
firPos <- ncol(cauchyDF) }
# 5.88 p-Wert entsprechend der ermittelten Position bestimmen
if(firPos == 1){
pValue <- 1 - as.numeric(names(cauchyDF)[2])
pValue <- paste(">",pValue)
}else{
pValue <- 1 - as.numeric(names(cauchyDF)[firPos])
pValue <- paste("<=",pValue)}
##############################################################################################################
### Kritische Werte fuer die Cauchy-Verteilung auslesen
##############################################################################################################
for(i in 1:ncol(critValues)){
# 5.90 Ist der kritische Wert fuer das zu spezifizierende Quantil tabelliert?
if(any(names(cauchyDF) == critValues[1,i] )){
# 5.92 Auslesen der kritischen Werte an der Zeilen-Position des entsprechenden Stichprobenumfangs n
# und der Spalten-Position des zu spezifizierenden Quantils
critValues[2,i] <- cauchyDF[which(cauchyDF[1:13,1] == n),which(names(cauchyDF) == critValues[1,i])]
}else{
# 5.94 nicht-tabellierte Quantile mit "NA" belegen
critValues[2,i] <- NA}
}
}else{
# 5.96 p-Wert kann nicht ermittelt werden, da fuer "n" keine tabellierten Werte existieren
pValue <- NA
# 5.98 kritische Werte koennen nicht ermittlet werden, da fuer "n" keine tabellierten Werte existieren
critValues[2,1:ncol(critValues)] <- NA
cat("\n","Critical values / p-Values for the Cauchy Distribution are only tabled for sample sizes: n = 5, 8, 10, 12, 15, 20, 25, 30, 40, 50, 60, 100","\n")}
}
}
###########################################################################
#### 6.00 Parameterkorrektur fuer die Weibull-Verteilung
###########################################################################
# 6.10 Sind die beobachteten Zufallszahlen auf Weibull-Verteilung getestet worden?
if(distribution == "weibull"){
# 6.20 Umbenennung der Parameternamen entsprechend der Weibull-Verteilung
names(parafit) = c( "shape", "scale")
# 6.30 Kopie des "parafit"-Vektors
parafitCopy = parafit
# 6.40 Umrechnung der Gumbel-Parameter in Weibull-Parameter
parafit[1] = (1/parafitCopy[2])
parafit[2] = exp(- parafitCopy[1])}
print(list(distribution = distribution, parameter_estimation = parafit,Anderson_Darling = AD, p_value = pValue))
invisible(list(distribution = distribution, parameter_estimation = parafit,Anderson_Darling = AD,p_value = pValue,crititical_values = critValues,simAD = simAD))
}
| /qualityTools/R/adSim.R | no_license | ingted/R-Examples | R | false | false | 31,881 | r |
# Funktion adSim() intialisieren mit Standardeinstellungen: Verteilungsannahme: normal; Bootstrap-Simulationen: 10000
adSim <- function(x, distribution = "normal", b = 10000){
# Fehlerpruefungen durchfuehren
# 1.20 ueberpruefung, dass x ein numerischer Vektor ist
if(mode(x) != "numeric")
stop(paste("\n"," adSim() requires numeric x data"))
# 1.25 ueberpruefung von x auf fehlende Werte
if(any(is.na(x)))
stop(paste("\n"," x data has missing values (NA)"))
# 1.30 ueberpruefung, ob auf die angegebene Verteilung getestet werden kann
# Vektor "distr" enthaelt die testbaren Verteilungsmodelle
distr <- c("exponential","cauchy","gumbel","gamma","log-normal","lognormal","logistic","normal","weibull")
if(any(distr == distribution) == FALSE)
stop(paste("\n"," adSim() can not apply for",distribution,"distribution. Please choose one of the following distributions for testing goodness-of-fit: exponential, cauchy, gumbel, gamma, log-normal, lognormal, logistic, normal, weibull"))
# 1.35 & 1.40 ueberpruefung, ob die gewaehlte Verteilung fuer Werte von x<=0 definiert ist bzw. der MLE berechnet werden kann, wenn x entsprechende Werte hat
if(any(x<=0)){
if(distribution == "exponential" || distribution == "lognormal" || distribution == "log-normal" || distribution == "gamma" || distribution == "weibull"){
stop(paste("\n"," adSim() can not apply for", distribution ,"distribution while x contains negative values or x has values equal zero."))}}
# 1.45 & 1.50: bei log-Normalverteilung wird die Stichprobe logarithmiert und anschliessend auf Normalverteilung getestet
# 1.52 "testDistr" gibt die Verteilung an, auf die im Funktionsablauf getestet werden soll (nicht immer identisch mit der hypothetischen Verteilung des Anwenders)
if(distribution == "lognormal" || distribution == "log-normal"){
x <- log(x)
testDistr <- "normal" }
# 1.54 & 1.56 & 1.58: bei Weibull-Verteilung wird die Stichprobe tranformiert x=-log(x) und anschliessend auf Gumbel-Verteilung getestet
# obwohl die Weibull-Verteilung fuer x>=0 definiert ist, muss aufgrund der Logarithmierung x>0 gelten (die Pruefung erfolgt bereits mit 1.35 & 1.40)
if(distribution == "weibull"){
x <- -log(x)
testDistr <- "gumbel"}
# 1.60 bei den anderen Verteilungen erfolgt der Test an ihrer eigenen Verteilung
if(distribution != "lognormal" & distribution != "log-normal" & distribution != "weibull"){
testDistr <- distribution}
# 1.70 Stichprobenumfang bestimmen
n = length(x)
# 1.80 Pruefung auf eine Mindeststichprobengroesse
if(n<3)
stop(paste("\n"," adSim() can not apply for sample sizes n < 3."))
# 1.90 Ordnungsstatistik bilden (aufsteigende Sortierung)
x = sort(x, decreasing = FALSE)
###################################################################
# 2.00 Parameterschaetzung
###################################################################
# 2.10 & 2.15 Zum Schaetzen der Parameter standardmaessig den MLE benutzen (ausser fuer die Normal-, Cauchy- und Gumbel-Verteilung)
# Parameter der Verteilung aus Stichprobe schaetzen und als Liste "parafit" speichern
if(testDistr != "normal" & testDistr != "gumbel" & testDistr != "cauchy"){
#library(MASS)
parafit <- MASS::fitdistr(x,testDistr)
}
# 2.30 & 2.35 Schaetzen der Parameter der Normalverteilung
# der MLE ist fuer die Standardabweichung der Normalverteilung nicht erwartungstreu
# Parameter der Normalverteilung werden mit mean() und sd() geschaetzt und als Vektor "parafit" gespeichert
if(testDistr == "normal"){
parafit <- numeric(2)
parafit[1] = mean(x)
parafit[2] = sd(x)
# 2.40 Parameterbenennung
if(distribution == "lognormal" || distribution == "log-normal"){
names(parafit) = c( "meanlog", "sdlog") # 2.41
}else{names(parafit) = c( "mean", "sd")} # 2.42
}
# 2.50 Schaetzen der Cauchy-Parameter
# Fuer die Cauchy-Verteilung sind zwei unterschiedliche Schaetzverfahren implementiert
if(testDistr == "cauchy"){
# 2.52 Pruefung, ob simuliert wird oder kritische Werte ausgelesen werden sollen
if(is.na(b) == FALSE){ # die AD-Verteilung soll spaeter simuliert werden
# 2.55 Schaetzung mittels fitdistr()
#library(MASS)
parafit <- MASS::fitdistr(x,testDistr)
}else{ # 2.52 die AD-Verteilung soll spaeter nicht simuliert werden
# 2.60 Parameterschaetzung, basierend auf den Summen der gewichteten Ordnungsstatistik
parafit <- numeric(2)
uWeight = numeric(n)
uWeight[1:n] <- sin( 4*pi* ( 1:n/(n+1) - 0.5) ) / (n*tan( pi* ( 1:n/(n+1) - 0.5 ) ) ) # Berechnung der Gewichte u
# fuer ungerade n koennen nicht alle Gewichte berechnet werden, deshalb wird das fehlende Gewicht geschaetzt
if(ifelse(n %% 2, TRUE, FALSE)){ # ifelse-Bedingung gibt TRUE, wenn n ungerade ist
if(length(na.omit(uWeight)) + 1 == length(uWeight)){ # Bedingung prueft, ob nur ein Wert in uWeight nicht berechnet werden konnte
uWeight[which(is.na(uWeight))] = (n+1)/n - sum(na.omit(uWeight)) # Ersetzen von NaN durch geeigneten Schaetzer
}}
parafit[1] <- uWeight %*% x # Parameterberechnung
vWeight = numeric(n)
vWeight[1:n] <- 8*tan( pi* ( 1:n/(n+1) - 0.5) )*(cos( pi*(1:n/(n+1) - 0.5 )))^4 / n # Berechnung der Gewichte v
parafit[2] <- vWeight %*% x # Parameterberechnung
# 2.65 Parameterbenennung
names(parafit) = c( "location", "scale")
}
}
# 2.70 Schaetzen der Gumbel-Parameter (fitdistr() kann nicht die Gumbel-Verteilung schaetzen)
if(testDistr == "gumbel"){
### 2.72 p-Gumbel-Funktion definieren (aus Library VGAM uebernommen)
pgumbel = function (q, location = 0, scale = 1){
answer = exp(-exp(-(q - location)/scale))
answer[scale <= 0] = NaN
answer}
### 2.75 r-Gumbel-Funktion definieren (aus Library VGAM uebernommen)
is.Numeric = function (x, allowable.length = Inf, integer.valued = FALSE, positive = FALSE){
if (all(is.numeric(x)) && all(is.finite(x)) && (if (is.finite(allowable.length)) length(x) ==
allowable.length else TRUE) && (if (integer.valued) all(x == round(x)) else TRUE) &&
(if (positive) all(x > 0) else TRUE)) TRUE else FALSE}
rgumbel = function (n, location = 0, scale = 1){
use.n = if ((length.n <- length(n)) > 1)
length.n
else if (!is.Numeric(n, integ = TRUE, allow = 1, posit = TRUE))
stop("bad input for argument 'n'")
else n
answer = location - scale * log(-log(runif(use.n)))
answer[scale <= 0] = NaN
answer}
### 2.80 MLE-Parameterschaetzung fuer die Gumbel-Verteilung (iterative Loesung)
f <- function(p) 1/n*sum(x) - (sum(x*exp(-x/p)))/(sum(exp(-x/p)))-p
### iterative Loesung notwendig, um beta zu schaetzen (D'Agostino/Stephens 1986, S. 146)
itSol <- uniroot(f,c(-100,100),tol = 0.0000001, maxiter = 100000)
beta <- as.numeric(itSol$root)
alpha = - beta * log(sum(exp(-x/beta)/n))
parafit <- numeric(2)
parafit[1] = alpha
parafit[2] = beta
# 2.85 Parameterbenennung
names(parafit) = c( "location", "scale")
}
###################################################################
# 3.00 AD-Teststatistik berechnen (mit Lewis-Formel)
###################################################################
# 3.10 Transformation des Wahrscheinlichkeitsintegrals (PIT) berechnen
pit = numeric(n)
#3.15
if(testDistr == "normal")
pit[1:n] = pnorm(x[1:n],parafit[1],parafit[2])
#3.20
if(testDistr == "exponential")
pit[1:n] = pexp(x[1:n],parafit$estimate["rate"])
#3.25
if(testDistr == "cauchy")
if(is.na(b) == FALSE){
pit[1:n] = pcauchy(x[1:n],parafit$estimate["location"],parafit$estimate["scale"])
}else{ pit[1:n] = pcauchy(x[1:n],parafit[1],parafit[2])}
#3.30
if(testDistr == "gamma")
pit[1:n] = pgamma(x[1:n],parafit$estimate["shape"],parafit$estimate["rate"])
#3.35
if(testDistr == "gumbel")
pit[1:n] = pgumbel(x[1:n],parafit[1],parafit[2])
#3.40
if(testDistr == "logistic")
pit[1:n] = plogis(x[1:n],parafit$estimate["location"],parafit$estimate["scale"])
# 3.50 Anwendung der AD-Formel
# 3.60 fuer jedes i den Summenwert berechnen und in Matrix h schreiben
h = matrix(ncol=n)
h[1,] = (2*col(h)-1)*log(pit) + (2*n + 1 - 2*col(h))* log(1 - pit)
# 3.70 AD-Formel vollstaendig berechnen
AD = -n-(1/n) * rowSums(h)
# 3.80 Fehlerpruefung, ob AD-Wert vorhanden ist
if(is.na(AD) == TRUE || AD<0)
stop(paste("\n"," The calculation of the Anderson Darling statistic fails."))
# Ende der AD-Wert-Berechnung fuer die beobachteten Daten
if(is.na(b) == FALSE){
#####################################################################################
# 4 parametrisches Bootstrapping der AD-Nullverteilung
#####################################################################################
# 4.00 Fehlerpruefung, ob die Simulationshaeufigkeit zulaessig gewaehlt ist
if(b<1000)
stop("b is chosen too small for generate an accurate p-Value.")
if(b>1000000)
stop("b is chosen too big for generate an p-value within a reasonable time.")
# 4.02 Ausgabe, dass simuliert wird
cat("\n"," ... simulating the Anderson-Darling distribution by",b,"bootstraps for",distribution,"distribution...","\n","\n")
# 4.05 Resampling und Parameterschaetzung
# Matrix Y mit b sortierten Zufallsstichproben mit Umfang n erstellen
# Verteilungsparameter zu jeder Bootstrap-Stichprobe schaetzen
# 4.10
if(testDistr == "normal"){
Y = t(replicate(b, sort(rnorm(n,parafit[1],parafit[2]))))
paraMean = rowMeans(Y)
paraSd = sqrt(rowSums((Y-rowMeans(Y))^2) /(ncol(Y)-1))
}
# 4.15
if(testDistr == "exponential"){
Y = t(replicate(b, sort(rexp(n, parafit$estimate["rate"]))))
unParaList <- unlist(apply(Y,1,function(x) fitdistr(x,testDistr,rate = parafit$estimate["rate"])))
paraRate = unParaList[names(unParaList) == "estimate.rate"]
}
# 4.20
if(testDistr == "cauchy"){
Y = t(replicate(b, sort(rcauchy(n, parafit$estimate["location"],parafit$estimate["scale"]))))
unParaList <- unlist(apply(Y,1,function(x) fitdistr(x,testDistr,list(location = parafit$estimate["location"], scale = parafit$estimate["scale"]))))
paraLocation = unParaList[names(unParaList) == "estimate.location"]
paraScale = unParaList[names(unParaList) == "estimate.scale"]
}
# 4.25
if(testDistr == "gamma"){
Y = t(replicate(b, sort(rgamma(n, parafit$estimate["shape"],parafit$estimate["rate"]))))
unParaList <- unlist(apply(Y,1,function(x) fitdistr(x,testDistr,list(shape = parafit$estimate["shape"], rate = parafit$estimate["rate"]))))
paraShape = unParaList[names(unParaList) == "estimate.shape"]
paraRate = unParaList[names(unParaList) == "estimate.rate"]
}
# 4.30
if(testDistr == "gumbel"){
Y = t(replicate(b, sort(rgumbel(n, parafit[1], parafit[2]))))
paraBeta = numeric(b)
paraAlpha = numeric(b)
for(j in 1:b){
### iterative Loesung notwendig, um beta zu schaetzen (D'Agostino/Stephens 1986, S. 146)
itSol <- uniroot(function(p) 1/n*sum(Y[j,])-
(sum(Y[j,]*exp(-Y[j,]/p)))/(sum(exp(-Y[j,]/p)))-p,c(-100,100),tol = 0.0000000001, maxiter = 100000)
paraBeta[j] <- as.numeric(itSol$root)
paraAlpha[j] = - paraBeta[j] * log(sum(exp(-Y[j,]/paraBeta[j])/n))
}
}
#4.35
if(testDistr == "logistic"){
Y = t(replicate(b, sort(rlogis(n, parafit$estimate["location"],parafit$estimate["scale"]))))
unParaList <- unlist(apply(Y,1,function(x) fitdistr(x,testDistr,list(location = parafit$estimate["location"], scale = parafit$estimate["scale"]))))
paraLocation = unParaList[names(unParaList) == "estimate.location"]
paraScale = unParaList[names(unParaList) == "estimate.scale"]
}
######################################################################
### 4.40 b-fache Berechnung der Anderson-Darling-Formel
######################################################################
# PIT fuer jeden Wert der Matrix Y berechnen
# (abhaengig von den geschaetzten Verteilungsparameter jeder Bootstrap-Stichprobe)
#4.45
if(testDistr == "normal")
Y[,1:n] <- pnorm(Y[,1:n],paraMean,paraSd)
#4.50
if(testDistr == "exponential")
Y[,1:n] <- pexp(Y[,1:n],paraRate)
#4.55
if(testDistr == "cauchy")
Y[,1:n] <- pcauchy(Y[,1:n],paraLocation,paraScale)
#4.60
if(testDistr == "gamma")
Y[,1:n] <- pgamma(Y[,1:n],paraShape,paraRate)
#4.65
if(testDistr == "gumbel")
Y[,1:n] <- pgumbel(Y[,1:n],paraAlpha,paraBeta)
#4.70
if(testDistr == "logistic")
Y[,1:n] <- plogis(Y[,1:n],paraLocation,paraScale)
# 4.75 Berechnung der Summenglieder der AD-(Lewis)-Formel zu jeder Bootstrap-Stichprobe (Matrix Y ueberschreiben)
Y[1:b,] <- (2*col(Y)-1)*log(Y[1:b,]) + (2*n + 1 - 2*col(Y)) * log(1 - Y[1:b,])
# 4.77 simulierte AD-Werte berechnen und in Vektor simAD schreiben
d = rowSums(Y)
simAD = numeric(b)
simAD[1:b] = -n-(1/n)*d[1:b]
# 4.80 Fehlerpruefung, ob alle AD-Werte bestimmt werden konnten
if(any(is.na(simAD))){
cat(" The simulated Anderson-Darling distribution contains NAs or NaNs!","\n","\n")}
# 4.90 Bestimmung kritischer Werte
critValues = round (matrix( c( 0.75, 0.90, 0.95, 0.975, 0.990,
quantile(simAD, 0.75,na.rm = TRUE), quantile(simAD, 0.90,na.rm = TRUE), quantile(simAD, 0.95,na.rm = TRUE),
quantile(simAD, 0.975,na.rm = TRUE), quantile(simAD, 0.99,na.rm = TRUE) ), nrow= 2, byrow = TRUE ), digits = 5)
# 4.95 Bestimmung des p-Werts
# die p-Wert Bestimmung ist so programmiert, dass fehlende Werte in simAD ausgelassen werden (na.omit)
pValue = sum(na.omit(simAD) > AD)/length(na.omit(simAD))
# Ende der simulationsbasierten Bestimmung von kritischen Werten bzw. des p-Werts
}else{
# 5.00 (b = NA), d.h. Auslesen tabellierter kritischer Werte bzw. des p-Werts oder Anwendung von Berechnungsformeln zur p-Wert-Bestimmung
# 5.02 simAD wird spaeter in der Ausgabe abgerufen, weshalb die Variable definiert sein muss
simAD = NA
### 5.06 Definieren einer Matrix "critValues" zur Erfassung kritischer Werte
critValues = matrix( c( 0.75, 0.90, 0.95, 0.975, 0.990, NA, NA ,NA, NA, NA), nrow = 2, byrow = TRUE)
# 5.08 Test auf Normal-, Exponential-, Gumbel- oder logistische Verteilung
if(testDistr == "normal" || testDistr == "exponential" ||testDistr == "gumbel" || testDistr == "logistic"){
# 5.10 Test auf Normalverteilung
if(testDistr == "normal"){
###################################################################################################################
### kritische Werte fuer die Normalverteilung nach D'Agostino und Stephens 1986 - S. 123 Tab. 4.7
###################################################################################################################
# 5.12 Tabelle der kritischen Werte
normalMtx = matrix(c( 0.50 , 0.75, 0.85 , 0.90 , 0.95 , 0.975 , 0.99 , 0.995 ,
0.341 , 0.470 , 0.561 , 0.631 , 0.752 , 0.873 , 1.035 , 1.159), nrow=2, byrow = TRUE )
# 5.14 Anpassen der Wertetabelle bezueglich des Stichprobenumfangs
normalMtx[2,1:ncol(normalMtx)] = normalMtx[2,1:ncol(normalMtx)]/(1+0.75/n+2.25/n^2)
refMtx = normalMtx
########################################################################################################
### p-Wert Berechnung fuer die Normalverteilung nach D'Agostino und Stephens 1986 - S. 127
########################################################################################################
# 5.16 Anpassen des AD-Werts bzgl. des Stichprobenumfangs
cAD = AD*(1+0.75/n+2.25/n^2)
# 5.18 Berechnung des p-Werts nach Formel
if(0.600<cAD)
{pValue = exp(1.2937-5.709*cAD+0.0186*cAD^2)}
if(0.340<cAD & cAD<0.600)
{pValue = exp(0.9177-4.279*cAD-1.38*cAD^2)}
if(0.200<cAD & cAD<0.340)
{pValue = 1-exp(-8.318+42.796*cAD-59.938*cAD^2)}
if(cAD<0.200)
{pValue = 1-exp(-13.436+101.14*cAD-223.73*cAD^2)}
}
# 5.20 Test auf Exponentialverteilung
if(testDistr == "exponential"){
#############################################################################################################
### Kritische Werte fuer die Exponentialverteilung nach D'Agostino und Stephens 1986 - S. 135 Tab. 4.11
#############################################################################################################
# 5.22 Tabelle der kritischen Werte
expMtx = matrix( c( 0.75, 0.80, 0.85 , 0.90 , 0.95 , 0.975 , 0.99 , 0.995 , 0.9975,
0.736 , 0.816 , 0.916 , 1.062 , 1.321 , 1.591 , 1.959 , 2.244, 2.534), nrow = 2, byrow = TRUE )
# 5.24 Anpassen der Wertetabelle bezueglich des Stichprobenumfangs
expMtx[2,1:ncol(expMtx)] = expMtx[2,1:ncol(expMtx)]/(1+0.6/n)
refMtx = expMtx
############################################################################################################
### p-Wert-Berechnung fuer die Exponentialverteilung nach D'Agostino und Stephens 1986 - S. 136
### Case 2: origin (bzw. location) = 0 = known , scale = unknown = 1/Lambda = 1/rate
############################################################################################################
# 5.26 Anpassen des AD-Werts bzgl. des Stichprobenumfangs
cAD = AD*(1+0.6/n)
# 5.28 Berechnung des p-Werts nach Formel
if(0.950<cAD)
{pValue = exp(0.731 - 3.009*cAD + 0.15*cAD^2)}
if(0.510<cAD & cAD<0.950)
{pValue = exp(0.9209 - 3.353*cAD + 0.300*cAD^2)}
if(0.260<cAD & cAD<0.510)
{pValue = 1 - exp(-6.1327 + 20.218*cAD - 18.663*cAD^2)}
if(cAD<0.260)
{pValue = 1 - exp(-12.2204 + 67.459*cAD - 110.3*cAD^2)}
}
# 5.30 Test auf logistische Verteilung oder Gumbel-Verteilung
if(testDistr == "gumbel" || testDistr == "logistic"){
# 5.31 Test auf Gumbel-Verteilung
if(testDistr == "gumbel"){
##############################################################################################################
### Kritische Werte fuer die Extremwert-Verteilung nach D'Agostino und Stephens 1986 - S. 146 Tab. 4.17
##############################################################################################################
# 5.32 Tabelle der kritischen Werte
gumbelMtx = matrix(c( 0.75, 0.90 , 0.95 , 0.975 , 0.99 , 0.474, 0.637, 0.757, 0.877, 1.038), nrow = 2, byrow = TRUE )
# 5.34 Anpassen der Wertetabelle bezueglich des Stichprobenumfangs
gumbelMtx[2,1:ncol(gumbelMtx)] = gumbelMtx[2,1:ncol(gumbelMtx)]/(1 + 0.2/sqrt(n))
refMtx = gumbelMtx
}
# 5.35 Test auf logistische Verteilung
if(testDistr == "logistic"){
##############################################################################################################
### kritische Werte fuer die logistische Verteilung nach D'Agostino und Stephens 1986 - S. 157 Tab 4.22
##############################################################################################################
# 5.37 Tabelle der kritischen Werte
logisMtx = matrix(c( 0.75, 0.90 , 0.95 , 0.975 , 0.99 , 0.995 , 0.426, 0.563, 0.660, 0.769, 0.906, 1.010 ), nrow = 2, byrow = TRUE )
# 5.39 Anpassen der Wertetabelle bezueglich des Stichprobenumfangs
logisMtx[2,1:ncol(logisMtx)] = logisMtx[2,1:ncol(logisMtx)]/(1 + 0.25/n)
refMtx = logisMtx
}
##############################################################################################################
### Bestimmung des p-Werts fuer die Gumbel- oder logistische Verteilung
##############################################################################################################
critCheck <- refMtx[2,1:ncol(refMtx)] > AD ### gibt TRUE aus fuer alle Eintraege in der Zeile, die groesser als AD sind
# 5.40 Existiert ein kritischer Wert, der groesser als der AD-Wert ist?
if(any(critCheck)){
# 5.42 firPos gibt die Position in der Zeile vom letzten Wert an, der noch kleiner als der AD-Wert ist
firPos <- min(which(critCheck)) - 1
}else{
# 5.44 letzte Spalte als Position waehlen
firPos <- ncol(refMtx)}
# 5.46 p-Wert entsprechend der ermittelten Position bestimmen
if(firPos == 0){
pValue <- 1 - refMtx[1,1]
pValue <- paste(">",pValue)
}else{
pValue <- 1 - refMtx[1,firPos]
pValue <- paste("<=",pValue)}}
##############################################################################################################
### Auslesen der kritischen Werte fuer Normal-, Exponential-, Gumbel oder logistische Verteilung
##############################################################################################################
for(i in 1:ncol(critValues)){
# 5.50 Ist der kritische Werte fuer das zu spezifizierende Quantil tabelliert?
if(any(refMtx[1,1:ncol(refMtx)] == critValues[1,i])){
# 5.52 dann Position des kritischen Werts bzgl. des betrachteten Quantils erfassen
position <- (1:length(refMtx[1,1:ncol(refMtx)] == critValues[1,i]))[(refMtx[1,1:ncol(refMtx)] == critValues[1,i])]
# 5.54 Auslesen des kritischen Werts bezueglich der Position
critValues[2,i] <- refMtx[2,position] ### liest aus der Matrix den kritschen Wert abhaengig vom gewaehlten Quantil
}else{
# 5.58 nicht-tabellierte Quantile mit "NA" belegen
critValues[2,i] <- NA}}
}
# 5.60 Test auf Gamma-Verteilung
if(testDistr == "gamma"){
###################################################################################################################################
### Bestimmung des kritischen Werts und des p-Werts fuer die Gammaverteilung nach D'Agostino und Stephens 1986 - S. 155 - Tab. 4.21
### case 3: shape = unknown, scale = unknown = 1/rate, origin = known = 0
###################################################################################################################################
# 5.62 Tabelle der kritischen Werte
gammaDF = data.frame( c( 1, 2, 3, 4, 5, 6, 8, 10, 12, 15, 20, Inf ),
c( 0.486, 0.477, 0.475, 0.473, 0.472, 0.472, 0.471, 0.471, 0.471, 0.47, 0.47, 0.47 ),
c( 0.657, 0.643, 0.639, 0.637, 0.635, 0.635, 0.634, 0.633, 0.633, 0.632, 0.632, 0.631),
c( 0.786, 0.768, 0.762, 0.759, 0.758, 0.757, 0.755, 0.754, 0.754, 0.754, 0.753, 0.752),
c( 0.917, 0.894, 0.886, 0.883, 0.881, 0.88, 0.878, 0.877, 0.876, 0.876, 0.875, 0.873),
c( 1.092, 1.062, 1.052, 1.048, 1.045, 1.043, 1.041, 1.04, 1.039, 1.038, 1.037, 1.035),
c( 1.227, 1.19, 1.178, 1.173, 1.17, 1.168, 1.165, 1.164, 1.163, 1.162, 1.161, 1.159))
names(gammaDF) = c( "m", 0.75, 0.90, 0.95, 0.975, 0.99, 0.995)
###################################################################################################################################
######## p-Wert-Bestimmung fuer die Gamma-Verteilung
###################################################################################################################################
# critCheck gibt TRUE aus fuer alle Eintraege in der entsprechenden m-Zeile, die groesser als der AD-Wert sind
# zu beachten ist, dass die Betrachtung erst ab der 2ten Spalte der Tabelle erfolgt, so dass die Indizierung von critCheck
# gegenueber der Indizierung der Tabelle um -1 verschoben ist
critCheck <- gammaDF[min(which(gammaDF$m >= parafit$estimate["shape"])),2:ncol(gammaDF)] > AD
# 5.65 Existiert ein kritischer Wert, der groesser als der AD-Wert ist (in der entsprechenden m-Zeile)?
if(any(critCheck)){
# 5.66 firPos gibt die Spalten-Position der Tabelle in der entsprechenden m-Zeile von dem krit. Wert an,
# der noch kleiner als der AD-Wert ist. Fuer firPos = 1 existiert kein kleiner kritischer Wert
firPos <- min(which(critCheck))
}else{
# 5.67 letzte Spalte als Position waehlen
firPos <- ncol(gammaDF) }
# 5.68 p-Wert entsprechend der ermittelten Position bestimmen
if(firPos == 1){
pValue <- 1 - as.numeric(names(gammaDF)[2])
pValue <- paste(">",pValue)
}else{
pValue <- 1 - as.numeric(names(gammaDF)[firPos])
pValue <- paste("<=",pValue)}
###################################################################################################################################
######## kritischen Wert fuer die Gamma-Verteilung auslesen
###################################################################################################################################
for(i in 1:ncol(critValues)){
# 5.70 Ist der kritische Wert fuer das zu spezifizierende Quantil tabelliert?
if(any(names(gammaDF) == critValues[1,i] )){
# 5.72 Auslesen der kritischen Werte an der Zeilen-Position des entsprechenden Formparameters
# und der Spalten-Position des zu spezifizierenden Quantils
critValues[2,i] <- gammaDF[min(which(gammaDF$m >= parafit$estimate["shape"] )),which(names(gammaDF) == critValues[1,i])]
}else{
# 5.74 nicht-tabellierte Quantile mit "NA" belegen
critValues[2,i] <- NA}}
}
# 5.80 Test auf Cauchy-Verteilung
if(testDistr == "cauchy"){
####################################################################################################################
### Bestimmung des kritischen Werts fuer die Cauchy-Verteilung nach D'Agostino und Stephens 1986 - S. 163 Tab 4.26
### case 3: location = unknown, shape = unknown
####################################################################################################################
# 5.82 Tabelle der kritischen Werte
cauchyDF = data.frame(
c( 5, 8, 10, 12, 15, 20, 25, 30, 40, 50, 60, 100, Inf),
c( 0.835, 0.992, 1.04, 1.04, 1.02, 0.975, 0.914, 0.875, 0.812, 0.774, 0.743, 0.689, 0.615),
c( 1.14, 1.52, 1.63, 1.65, 1.61, 1.51, 1.4, 1.3, 1.16, 1.08, 1.02, 0.927, 0.78),
c( 1.4, 2.06, 2.27, 2.33, 2.28, 2.13, 1.94, 1.76, 1.53, 1.41, 1.3, 1.14, 0.949),
c( 1.77, 3.2, 3.77, 4.14, 4.25, 4.05, 3.57, 3.09, 2.48, 2.14, 1.92, 1.52, 1.225),
c( 2, 4.27, 5.58, 6.43, 7.2, 7.58, 6.91, 5.86, 4.23, 3.37, 2.76, 2.05, 1.52),
c( 2.16, 5.24, 7.5, 9.51, 11.5, 14.57, 14.96, 13.8, 10.2, 7.49, 5.32, 3.3, 1.9))
names(cauchyDF) = c( "n", 0.75, 0.85, 0.90, 0.95, 0.975, 0.99)
##############################################################################################################
### Bestimmung des p-Werts fuer die Cauchy-Verteilung
##############################################################################################################
# 5.84 Existieren kritische Werte fuer den zu untersuchenden Stichprobenumfang n?
if(any(cauchyDF[1:13,1] == n)){
# critCheck gibt TRUE aus fuer alle Eintraege in der entsprechenden n-Zeile, die groesser als der AD-Wert sind
# zu beachten ist, dass die Betrachtung erst ab der 2ten Spalte der Tabelle erfolgt, so dass die Indizierung von critCheck
# gegenueber der Indizierung der Tabelle um -1 verschoben ist
critCheck <- cauchyDF[which(cauchyDF[1:13,1] == n),2:ncol(cauchyDF)] > AD
# 5.85 Existiert ein kritischer Wert, der groesser als der AD-Wert ist (in der entsprechenden n-Zeile)?
if(any(critCheck)){
# 5.86 firPos gibt die Spalten-Position der Tabelle in der entsprechenden n-Zeile von dem krit. Wert an,
# der noch kleiner als der AD-Wert ist; fuer firPos = 1 existiert kein kleiner kritischer Wert
firPos <- min(which(critCheck))
}else{
# 5.87 letzte Spalte als Position waehlen
firPos <- ncol(cauchyDF) }
# 5.88 p-Wert entsprechend der ermittelten Position bestimmen
if(firPos == 1){
pValue <- 1 - as.numeric(names(cauchyDF)[2])
pValue <- paste(">",pValue)
}else{
pValue <- 1 - as.numeric(names(cauchyDF)[firPos])
pValue <- paste("<=",pValue)}
##############################################################################################################
### Kritische Werte fuer die Cauchy-Verteilung auslesen
##############################################################################################################
for(i in 1:ncol(critValues)){
# 5.90 Ist der kritische Wert fuer das zu spezifizierende Quantil tabelliert?
if(any(names(cauchyDF) == critValues[1,i] )){
# 5.92 Auslesen der kritischen Werte an der Zeilen-Position des entsprechenden Stichprobenumfangs n
# und der Spalten-Position des zu spezifizierenden Quantils
critValues[2,i] <- cauchyDF[which(cauchyDF[1:13,1] == n),which(names(cauchyDF) == critValues[1,i])]
}else{
# 5.94 nicht-tabellierte Quantile mit "NA" belegen
critValues[2,i] <- NA}
}
}else{
# 5.96 p-Wert kann nicht ermittelt werden, da fuer "n" keine tabellierten Werte existieren
pValue <- NA
# 5.98 kritische Werte koennen nicht ermittlet werden, da fuer "n" keine tabellierten Werte existieren
critValues[2,1:ncol(critValues)] <- NA
cat("\n","Critical values / p-Values for the Cauchy Distribution are only tabled for sample sizes: n = 5, 8, 10, 12, 15, 20, 25, 30, 40, 50, 60, 100","\n")}
}
}
###########################################################################
#### 6.00 Parameterkorrektur fuer die Weibull-Verteilung
###########################################################################
# 6.10 Sind die beobachteten Zufallszahlen auf Weibull-Verteilung getestet worden?
if(distribution == "weibull"){
# 6.20 Umbenennung der Parameternamen entsprechend der Weibull-Verteilung
names(parafit) = c( "shape", "scale")
# 6.30 Kopie des "parafit"-Vektors
parafitCopy = parafit
# 6.40 Umrechnung der Gumbel-Parameter in Weibull-Parameter
parafit[1] = (1/parafitCopy[2])
parafit[2] = exp(- parafitCopy[1])}
print(list(distribution = distribution, parameter_estimation = parafit,Anderson_Darling = AD, p_value = pValue))
invisible(list(distribution = distribution, parameter_estimation = parafit,Anderson_Darling = AD,p_value = pValue,crititical_values = critValues,simAD = simAD))
}
|
##
## USAGE: Rscript p6mA.R [INPUT FASTA FILE] [OUTPUT FILE NAME]
##
## INPUT FASTA FILE: The input .fasta file. The sequences should be 41-bp and the A in 21st base
## OUTPUT FILE NAME: The output file name. Default: Output.result
##
##
####################################################################
#Obtain the parameters.
Args<-commandArgs(trailingOnly=T)
if(length(Args) == 0){
cat("USAGE: Rscript p6mA.R [INPUT FASTA FILE] [OUTPUT FILE NAME]");cat("\n");cat("\n")
cat("INPUT FASTA FILE: The input .fasta file. The sequences should be 41-bp and the A in 21st base");cat("\n")
cat("OUTPUT FILE NAME: The output file name. Default: output.result");cat("\n")
cat("\n");
stop("Please read the usage.",call.=F)
}
#Load the packages
library(RTFE,verbose = F)
library(magrittr,verbose = F)
library(stringr,verbose = F)
library(xgboost,verbose = F)
library(Biostrings,verbose = F)
#Utilities defination
PssZ<-function(object,Z){
P<-matrix(-1,nrow=object@seq_num,ncol=ncol(Z)) #get the Features
for(i in 1:ncol(Z)){
Z[lapply(object@Seqs, str_sub,start=i,end=i+2) %>% do.call(what="rbind"),i]->P[,i]
}
return(P %>% as.data.frame())
}
PdsZ<-function(object,Z){
newSeq<-str_replace_all(string = object@Seqs,pattern = "T",replacement = "A")
newSeq<-str_replace_all(string = newSeq,pattern = "G",replacement = "C")
P<-matrix(-1,nrow=object@seq_num,ncol=ncol(Z)) #get the Features
for(i in 1:ncol(Z)){
Z[lapply(newSeq, str_sub,start=i,end=i+2) %>% do.call(what="rbind"),i]->P[,i]
}
return(P %>% as.data.frame())
}
cat("Runing");cat("\n")
output<-"output.result"
try(input<-Args[1] %>% as.character(),silent = T);
if(!is.na(Args[2])){output<-Args[2]}
cat(paste0("The input file is ",input));cat("\n")
xgb.load("total_models/total_train_model")->modelA
load("total_models/Zds_41.rds")
load("total_models/Zss_41.rds")
load("total_models/Features.RData")
#extract features
Seq<-readDNAStringSet(input,format = "fasta",use.names = T)
result<-data.frame(Seqnames=names(Seq),
Score=0,type = "N",stringsAsFactors = F)
Seq<-as.character(Seq)
Bean<-makeBenneat(Seqs = Seq,labs = rep("test",length(Seq)))
All_Feature<-data.frame(seq=names(Seq))
physio<-read.csv("feature_6_diprogb.csv",
row.names = 1,stringsAsFactors = F)
PseKNC<-getT2PseKNC(object = Bean,phychem_file = physio,
normalization = T,lambda = 5)
n<-ncol(PseKNC)/nrow(physio)
paste0(paste0("T2P",lapply(1:5,rep,times=6) %>% unlist),
rep(rownames(physio),5)) -> colnames(PseKNC)
All_Feature<-cbind(All_Feature,PseKNC)
rm(PseKNC)
##EIIP
EIIP<-getEIIP(object = Bean)
colnames(EIIP)<-paste0("EIIP_",colnames(EIIP))
All_Feature<-cbind(All_Feature,EIIP)
rm(EIIP)
##PSTNPss
PSTNPss<-PssZ(object = Bean,Z = Zss)
colnames(PSTNPss)<-paste0("PSTNPss_",1:ncol(PSTNPss))
All_Feature<-cbind(All_Feature,PSTNPss)
rm(PSTNPss)
##PSTNPds
PSTNPds<-PdsZ(object = Bean,Z = Zds)
colnames(PSTNPds)<-paste0("PSTNPds_",1:ncol(PSTNPds))
All_Feature<-cbind(All_Feature,PSTNPds)
rm(PSTNPds)
All_Feature<-All_Feature[,feaN]
dtest<-xgb.DMatrix(All_Feature%>% as.matrix())
result$Score<-predict(modelA,dtest)
result[result$Score>0.5,]$type<-"6mA";
try(result[result$Score<0.5,]$type<-"non-6mA",silent = T)
cat("\n")
cat("The procedure has ended.");cat("\n")
cat(paste0("There are ",length(Seq)," sequences. And ",sum(result$Score>0.5),
" Argines are identified as 6mA."));cat("\n")
cat(paste0("The Positive Ratio is ",sum(result$Score>0.5)/length(Seq)))
cat("\n")
write.table(result,file=output)
| /p6mA.R | no_license | Konglab404/p6mA | R | false | false | 3,568 | r | ##
## USAGE: Rscript p6mA.R [INPUT FASTA FILE] [OUTPUT FILE NAME]
##
## INPUT FASTA FILE: The input .fasta file. The sequences should be 41-bp and the A in 21st base
## OUTPUT FILE NAME: The output file name. Default: Output.result
##
##
####################################################################
#Obtain the parameters.
Args<-commandArgs(trailingOnly=T)
if(length(Args) == 0){
cat("USAGE: Rscript p6mA.R [INPUT FASTA FILE] [OUTPUT FILE NAME]");cat("\n");cat("\n")
cat("INPUT FASTA FILE: The input .fasta file. The sequences should be 41-bp and the A in 21st base");cat("\n")
cat("OUTPUT FILE NAME: The output file name. Default: output.result");cat("\n")
cat("\n");
stop("Please read the usage.",call.=F)
}
#Load the packages
library(RTFE,verbose = F)
library(magrittr,verbose = F)
library(stringr,verbose = F)
library(xgboost,verbose = F)
library(Biostrings,verbose = F)
#Utilities defination
PssZ<-function(object,Z){
P<-matrix(-1,nrow=object@seq_num,ncol=ncol(Z)) #get the Features
for(i in 1:ncol(Z)){
Z[lapply(object@Seqs, str_sub,start=i,end=i+2) %>% do.call(what="rbind"),i]->P[,i]
}
return(P %>% as.data.frame())
}
PdsZ<-function(object,Z){
newSeq<-str_replace_all(string = object@Seqs,pattern = "T",replacement = "A")
newSeq<-str_replace_all(string = newSeq,pattern = "G",replacement = "C")
P<-matrix(-1,nrow=object@seq_num,ncol=ncol(Z)) #get the Features
for(i in 1:ncol(Z)){
Z[lapply(newSeq, str_sub,start=i,end=i+2) %>% do.call(what="rbind"),i]->P[,i]
}
return(P %>% as.data.frame())
}
cat("Runing");cat("\n")
output<-"output.result"
try(input<-Args[1] %>% as.character(),silent = T);
if(!is.na(Args[2])){output<-Args[2]}
cat(paste0("The input file is ",input));cat("\n")
xgb.load("total_models/total_train_model")->modelA
load("total_models/Zds_41.rds")
load("total_models/Zss_41.rds")
load("total_models/Features.RData")
#extract features
Seq<-readDNAStringSet(input,format = "fasta",use.names = T)
result<-data.frame(Seqnames=names(Seq),
Score=0,type = "N",stringsAsFactors = F)
Seq<-as.character(Seq)
Bean<-makeBenneat(Seqs = Seq,labs = rep("test",length(Seq)))
All_Feature<-data.frame(seq=names(Seq))
physio<-read.csv("feature_6_diprogb.csv",
row.names = 1,stringsAsFactors = F)
PseKNC<-getT2PseKNC(object = Bean,phychem_file = physio,
normalization = T,lambda = 5)
n<-ncol(PseKNC)/nrow(physio)
paste0(paste0("T2P",lapply(1:5,rep,times=6) %>% unlist),
rep(rownames(physio),5)) -> colnames(PseKNC)
All_Feature<-cbind(All_Feature,PseKNC)
rm(PseKNC)
##EIIP
EIIP<-getEIIP(object = Bean)
colnames(EIIP)<-paste0("EIIP_",colnames(EIIP))
All_Feature<-cbind(All_Feature,EIIP)
rm(EIIP)
##PSTNPss
PSTNPss<-PssZ(object = Bean,Z = Zss)
colnames(PSTNPss)<-paste0("PSTNPss_",1:ncol(PSTNPss))
All_Feature<-cbind(All_Feature,PSTNPss)
rm(PSTNPss)
##PSTNPds
PSTNPds<-PdsZ(object = Bean,Z = Zds)
colnames(PSTNPds)<-paste0("PSTNPds_",1:ncol(PSTNPds))
All_Feature<-cbind(All_Feature,PSTNPds)
rm(PSTNPds)
All_Feature<-All_Feature[,feaN]
dtest<-xgb.DMatrix(All_Feature%>% as.matrix())
result$Score<-predict(modelA,dtest)
result[result$Score>0.5,]$type<-"6mA";
try(result[result$Score<0.5,]$type<-"non-6mA",silent = T)
cat("\n")
cat("The procedure has ended.");cat("\n")
cat(paste0("There are ",length(Seq)," sequences. And ",sum(result$Score>0.5),
" Argines are identified as 6mA."));cat("\n")
cat(paste0("The Positive Ratio is ",sum(result$Score>0.5)/length(Seq)))
cat("\n")
write.table(result,file=output)
|
###
'quality_assessment.R contains the code used to process genome quality data generated by QUAST and CheckM'
###
library(reshape2)
library(dplyr)
library(ggplot2)
library(stringr)
#plotting
library(cowplot)
library(ggsci)
library(scales)
library(RColorBrewer)
library(egg)
#Tree
library(ape)
library(ggrepel)
library(ggtree)
library(ggstance)
library(phytools)
library(phylogram)
library(dendextend)
library(treespace)
#ASR
library(ggimage)
main_path <- '/Users/owlex/Dropbox/Documents/Northwestern/Hartmann_Lab/enr_comparison_project/manuscript/ismej_submission/github/fabv_paper/quality_assessments'
#### Section 0 ####
#### CheckM and QUAST quality check for 160 type strains, 30 study isolates, and c japonicus outgroup
#### output is df_checkm, which contains all
section_0_path <- paste(main_path,'section_0',sep='/')
setwd(section_0_path)
#
## Read in checkm outputs
df1 <- read.csv('checkm_output.txt',sep='\t')
df2 <- read.csv('checkm_output2.txt',sep='\t')
#
df_meta <- read.csv('processed_genomes_metadata.csv')
df_checkm <- rbind(df1,df2)
df_checkm <- df_checkm%>%mutate(bID=paste(Bin.Id,'.fna',sep=''))
df_checkm <- merge(df_checkm,df_meta,by.x='bID',by.y='filename')
write.csv(df_checkm,'checkm_quast_metrics.csv')
#### Section 1 ####
#### CheckM and QUAST quality check for 8,747 genomes downloaded from NCBI. A subset of the original ~9,000 genommes downloaded was already filtered for contigs
###
#### output is 'large_ani_analysis.csv', which is the curated list of genomes that pass the metrics and also all the type strains
section_1_path <- paste(main_path,'section_1',sep='/')
setwd(section_1_path)
#
## Designate the representative species genomes in the df_meta dataset
df_meta <- read.csv('processed_genomes_metadata.csv')
df_rep <- read.csv('processed_genomes_metadata_typeestrains.csv')%>%filter(source=='REFSEQ')
## filter for quast metrics
df_meta <- df_meta%>%mutate(representative=case_when(filename%in%df_rep$filename~1,TRUE~0))
df_meta <- df_meta%>%filter((total_contigs<=300 & N50>=10000)|representative==1)
## prepare for merging
df_meta <- df_meta%>%mutate(filename2=str_replace(filename,'.fna',''))
#
## Read in compiled checkM results
df_checkm <- read.csv('refseq_checkm_complete.csv')
## merge datasets
df_meta <- merge(df_meta,df_checkm,by.x='filename2',by.y='Bin.Id')
## Filter for checkM scores
df_meta <- df_meta%>%mutate(overallquality=Completeness-(5*Contamination))
df_meta <- df_meta%>%filter((overallquality>=50&Completeness>=90&Contamination<=10)|representative==1)
## write to file
write.csv(df_meta,'large_ani_analysis.csv',row.names=FALSE)
#### Section 2 ####
#### Make a large genome strain datatable for publication
#### and Generate figure with compelteness, conitg, and n50 statistics for all (7,163) genomes in study
section_2_path <- paste(main_path,'section_2',sep='/')
setwd(section_2_path)
#
####
# Contains all metadata, quast, and checkm info for the ~8000 ncbi refseq genomes
df_compiled <- read.csv('large_ani_analysis.csv')
## Filtering for desired checkM values
df_compiled <- df_compiled%>%mutate(overallquality=Completeness-(5*Contamination))
nrow(df_compiled%>%filter(representative==1))
df_compiled <- df_compiled%>%filter((overallquality>=50&Completeness>=90&Contamination<=10)|(representative==1))
nrow(df_compiled%>%filter(representative==1))
df_metapass <- read.csv('pass_largegenomesanalysis.csv')
df_metapass <- merge(df_compiled,df_metapass,by.x='filename',by.y='filename',all.y=TRUE)
df_metapass <- df_metapass%>%select(filename,id,species,mod_species,fabv_pa,type,total_contigs,total_length,N50,Completeness,Contamination,overallquality,description)%>%
rename(refseq_species=species,ANI_species_group=mod_species,fabv_detected=fabv_pa,type_strain=type,overall_quality=overallquality,refseq_description=description)
# Writing the compiled df_metapass dataframe
write.csv(df_metapass,'filtered_processed_genomes_metadata.csv',row.names=FALSE)
#### Generate figure with completeness, conitg, and n50 statistics for all (7,134) genomes in study
#
## Reading in study isolate checkm and quast data
df_isolate <- read.csv('checkm_output_modified.txt',sep='\t')
df_isolatequast <- read.csv('processed_genomes_metadata.csv')%>%filter(source=='USER')%>%mutate(representative=2)
df_isolate <- merge(df_isolate,df_isolatequast,by.x='Bin.Id',by.y='id')
df_isolate <- df_isolate%>%mutate(overallquality=Completeness-(5*Contamination))
#
## Merge isolate data with refseq/type strain data
df_compiled <- df_compiled%>%select(cleaned_filename,representative,Completeness,Contamination,total_contigs,N50,overallquality)
df_isolate <- df_isolate%>%select(cleaned_filename,representative,Completeness,Contamination,total_contigs,N50,overallquality)
df_compiled <- rbind(df_compiled,df_isolate)
df_compiled <- df_compiled%>%filter()
p1 <- ggplot(df_compiled,aes(x=as.factor(representative),y=Completeness))+
#geom_violin(color='maroon')+
geom_point(position=position_jitter(),alpha=.2)+
geom_violin(color='lightblue',alpha=.1)+theme_classic()+
theme(axis.title.x = element_text(size = 0),axis.title.y = element_text(size = 30),
axis.text.x = element_text(size = 30,color='white'),axis.text.y = element_text(size = 30),legend.position = 'none')+
scale_x_discrete(labels=c('0'='RefSeq\nGenomes','1'='Type\nStrains','2'='Study\nIsolates'))
#geom_segment(aes(x=0.5,xend=1.5,y=90,yend=90),color='orange',linetype='dashed')#(yintercept=90.0,linetype='dashed',color='lightblue',size=1,alpha=0.8)
p2 <- ggplot(df_compiled,aes(x=as.factor(representative),y=Contamination))+
#geom_violin()+
geom_point(position=position_jitter(),alpha=.2)+
geom_violin(color='lightblue',alpha=.1)+theme_classic()+
theme(axis.title.x = element_text(size = 0),axis.title.y = element_text(size = 30),
axis.text.x = element_text(size = 30,color='white'),axis.text.y = element_text(size = 30),legend.position = 'none')+
scale_x_discrete(labels=c('0'='RefSeq\nGenomes','1'='Type\nStrains','2'='Study\nIsolates'))#+
#geom_segment(aes(x=0.5,xend=1.5,y=10,yend=10),color='orange',linetype='dashed')#(yintercept=90.0,linetype='dashed',color='lightblue',size=1,alpha=0.8)
p3 <- ggplot(df_compiled,aes(x=as.factor(representative),y=total_contigs))+
#geom_violin()+
geom_point(position=position_jitter(),alpha=.2)+
geom_violin(color='lightblue',alpha=.1)+theme_classic()+
theme(axis.title.x = element_text(size = 0),axis.title.y = element_text(size = 30),
axis.text.x = element_text(size = 30,color='white'),axis.text.y = element_text(size = 30),legend.position = 'none')+
scale_x_discrete(labels=c('0'='RefSeq\nGenomes','1'='Type\nStrains','2'='Study\nIsolates'))#+
#geom_segment(aes(x=0.5,xend=1.5,y=300,yend=300),color='orange',linetype='dashed')#(yintercept=90.0,linetype='dashed',color='lightblue',size=1,alpha=0.8)
p4 <- ggplot(df_compiled,aes(x=as.factor(representative),y=N50))+
#geom_violin()+
geom_point(position=position_jitter(),alpha=.2)+
geom_violin(color='lightblue',alpha=.1)+theme_classic()+
theme(axis.title.x = element_text(size = 0),axis.title.y = element_text(size = 30),
axis.text.x = element_text(size = 30),axis.text.y = element_text(size = 30),legend.position = 'none')+
scale_x_discrete(labels=c('0'='RefSeq\nGenomes','1'='Type\nStrains','2'='Study\nIsolates'))#+
#geom_segment(aes(x=0.5,xend=1.5,y=10000,yend=10000),color='orange',linetype='dashed')#(yintercept=90.0,linetype='dashed',color='lightblue',size=1,alpha=0.8)
t <- ggarrange(p1,p2,p3,p4,nrow=4,ncol=1)
save_plot('sec2_genome_quality_stats.png',t,base_height=16,base_aspect_ratio = 1.4)
| /quality_assessments/quality_assessments.R | no_license | hartmann-lab/fabv_paper | R | false | false | 7,618 | r | ###
'quality_assessment.R contains the code used to process genome quality data generated by QUAST and CheckM'
###
library(reshape2)
library(dplyr)
library(ggplot2)
library(stringr)
#plotting
library(cowplot)
library(ggsci)
library(scales)
library(RColorBrewer)
library(egg)
#Tree
library(ape)
library(ggrepel)
library(ggtree)
library(ggstance)
library(phytools)
library(phylogram)
library(dendextend)
library(treespace)
#ASR
library(ggimage)
main_path <- '/Users/owlex/Dropbox/Documents/Northwestern/Hartmann_Lab/enr_comparison_project/manuscript/ismej_submission/github/fabv_paper/quality_assessments'
#### Section 0 ####
#### CheckM and QUAST quality check for 160 type strains, 30 study isolates, and c japonicus outgroup
#### output is df_checkm, which contains all
section_0_path <- paste(main_path,'section_0',sep='/')
setwd(section_0_path)
#
## Read in checkm outputs
df1 <- read.csv('checkm_output.txt',sep='\t')
df2 <- read.csv('checkm_output2.txt',sep='\t')
#
df_meta <- read.csv('processed_genomes_metadata.csv')
df_checkm <- rbind(df1,df2)
df_checkm <- df_checkm%>%mutate(bID=paste(Bin.Id,'.fna',sep=''))
df_checkm <- merge(df_checkm,df_meta,by.x='bID',by.y='filename')
write.csv(df_checkm,'checkm_quast_metrics.csv')
#### Section 1 ####
#### CheckM and QUAST quality check for 8,747 genomes downloaded from NCBI. A subset of the original ~9,000 genommes downloaded was already filtered for contigs
###
#### output is 'large_ani_analysis.csv', which is the curated list of genomes that pass the metrics and also all the type strains
section_1_path <- paste(main_path,'section_1',sep='/')
setwd(section_1_path)
#
## Designate the representative species genomes in the df_meta dataset
df_meta <- read.csv('processed_genomes_metadata.csv')
df_rep <- read.csv('processed_genomes_metadata_typeestrains.csv')%>%filter(source=='REFSEQ')
## filter for quast metrics
df_meta <- df_meta%>%mutate(representative=case_when(filename%in%df_rep$filename~1,TRUE~0))
df_meta <- df_meta%>%filter((total_contigs<=300 & N50>=10000)|representative==1)
## prepare for merging
df_meta <- df_meta%>%mutate(filename2=str_replace(filename,'.fna',''))
#
## Read in compiled checkM results
df_checkm <- read.csv('refseq_checkm_complete.csv')
## merge datasets
df_meta <- merge(df_meta,df_checkm,by.x='filename2',by.y='Bin.Id')
## Filter for checkM scores
df_meta <- df_meta%>%mutate(overallquality=Completeness-(5*Contamination))
df_meta <- df_meta%>%filter((overallquality>=50&Completeness>=90&Contamination<=10)|representative==1)
## write to file
write.csv(df_meta,'large_ani_analysis.csv',row.names=FALSE)
#### Section 2 ####
#### Make a large genome strain datatable for publication
#### and Generate figure with compelteness, conitg, and n50 statistics for all (7,163) genomes in study
section_2_path <- paste(main_path,'section_2',sep='/')
setwd(section_2_path)
#
####
# Contains all metadata, quast, and checkm info for the ~8000 ncbi refseq genomes
df_compiled <- read.csv('large_ani_analysis.csv')
## Filtering for desired checkM values
df_compiled <- df_compiled%>%mutate(overallquality=Completeness-(5*Contamination))
nrow(df_compiled%>%filter(representative==1))
df_compiled <- df_compiled%>%filter((overallquality>=50&Completeness>=90&Contamination<=10)|(representative==1))
nrow(df_compiled%>%filter(representative==1))
df_metapass <- read.csv('pass_largegenomesanalysis.csv')
df_metapass <- merge(df_compiled,df_metapass,by.x='filename',by.y='filename',all.y=TRUE)
df_metapass <- df_metapass%>%select(filename,id,species,mod_species,fabv_pa,type,total_contigs,total_length,N50,Completeness,Contamination,overallquality,description)%>%
rename(refseq_species=species,ANI_species_group=mod_species,fabv_detected=fabv_pa,type_strain=type,overall_quality=overallquality,refseq_description=description)
# Writing the compiled df_metapass dataframe
write.csv(df_metapass,'filtered_processed_genomes_metadata.csv',row.names=FALSE)
#### Generate figure with completeness, conitg, and n50 statistics for all (7,134) genomes in study
#
## Reading in study isolate checkm and quast data
df_isolate <- read.csv('checkm_output_modified.txt',sep='\t')
df_isolatequast <- read.csv('processed_genomes_metadata.csv')%>%filter(source=='USER')%>%mutate(representative=2)
df_isolate <- merge(df_isolate,df_isolatequast,by.x='Bin.Id',by.y='id')
df_isolate <- df_isolate%>%mutate(overallquality=Completeness-(5*Contamination))
#
## Merge isolate data with refseq/type strain data
df_compiled <- df_compiled%>%select(cleaned_filename,representative,Completeness,Contamination,total_contigs,N50,overallquality)
df_isolate <- df_isolate%>%select(cleaned_filename,representative,Completeness,Contamination,total_contigs,N50,overallquality)
df_compiled <- rbind(df_compiled,df_isolate)
df_compiled <- df_compiled%>%filter()
p1 <- ggplot(df_compiled,aes(x=as.factor(representative),y=Completeness))+
#geom_violin(color='maroon')+
geom_point(position=position_jitter(),alpha=.2)+
geom_violin(color='lightblue',alpha=.1)+theme_classic()+
theme(axis.title.x = element_text(size = 0),axis.title.y = element_text(size = 30),
axis.text.x = element_text(size = 30,color='white'),axis.text.y = element_text(size = 30),legend.position = 'none')+
scale_x_discrete(labels=c('0'='RefSeq\nGenomes','1'='Type\nStrains','2'='Study\nIsolates'))
#geom_segment(aes(x=0.5,xend=1.5,y=90,yend=90),color='orange',linetype='dashed')#(yintercept=90.0,linetype='dashed',color='lightblue',size=1,alpha=0.8)
p2 <- ggplot(df_compiled,aes(x=as.factor(representative),y=Contamination))+
#geom_violin()+
geom_point(position=position_jitter(),alpha=.2)+
geom_violin(color='lightblue',alpha=.1)+theme_classic()+
theme(axis.title.x = element_text(size = 0),axis.title.y = element_text(size = 30),
axis.text.x = element_text(size = 30,color='white'),axis.text.y = element_text(size = 30),legend.position = 'none')+
scale_x_discrete(labels=c('0'='RefSeq\nGenomes','1'='Type\nStrains','2'='Study\nIsolates'))#+
#geom_segment(aes(x=0.5,xend=1.5,y=10,yend=10),color='orange',linetype='dashed')#(yintercept=90.0,linetype='dashed',color='lightblue',size=1,alpha=0.8)
p3 <- ggplot(df_compiled,aes(x=as.factor(representative),y=total_contigs))+
#geom_violin()+
geom_point(position=position_jitter(),alpha=.2)+
geom_violin(color='lightblue',alpha=.1)+theme_classic()+
theme(axis.title.x = element_text(size = 0),axis.title.y = element_text(size = 30),
axis.text.x = element_text(size = 30,color='white'),axis.text.y = element_text(size = 30),legend.position = 'none')+
scale_x_discrete(labels=c('0'='RefSeq\nGenomes','1'='Type\nStrains','2'='Study\nIsolates'))#+
#geom_segment(aes(x=0.5,xend=1.5,y=300,yend=300),color='orange',linetype='dashed')#(yintercept=90.0,linetype='dashed',color='lightblue',size=1,alpha=0.8)
p4 <- ggplot(df_compiled,aes(x=as.factor(representative),y=N50))+
#geom_violin()+
geom_point(position=position_jitter(),alpha=.2)+
geom_violin(color='lightblue',alpha=.1)+theme_classic()+
theme(axis.title.x = element_text(size = 0),axis.title.y = element_text(size = 30),
axis.text.x = element_text(size = 30),axis.text.y = element_text(size = 30),legend.position = 'none')+
scale_x_discrete(labels=c('0'='RefSeq\nGenomes','1'='Type\nStrains','2'='Study\nIsolates'))#+
#geom_segment(aes(x=0.5,xend=1.5,y=10000,yend=10000),color='orange',linetype='dashed')#(yintercept=90.0,linetype='dashed',color='lightblue',size=1,alpha=0.8)
t <- ggarrange(p1,p2,p3,p4,nrow=4,ncol=1)
save_plot('sec2_genome_quality_stats.png',t,base_height=16,base_aspect_ratio = 1.4)
|
n=112 #multi securities
Ei=175
#########
e=matrix(0,n,175)
r=res=matrix(0,n,175+31)
tP=tCross=tBW=tBMP=
PvalP=PvalCross=PvalBW=PvalBMP=
tMM=tLR=tMLR=tSLR=tMSLR=
PvalMM=PvalLR=PvalMLR=PvalSLR=PvalMSLR=matrix(0,31,1)
tLRrho=tMLRrho=tSLRrho=tMSLRrho=
PvalLRrho=PvalMLRrho=PvalSLRrho=PvalMSLRrho=matrix(0,31,1)
BFstar1=BFstar2=matrix(0,31,1)
AR=SAR=C=CCum=matrix(0,n,31)
Alpha=Beta=matrix(0,n,1)
for(i in 1:112)
{
m=lm(R[i,1:175]~Rm[i,1:175])
Alpha[i]=m$coef[1]
Beta[i]=m$coef[2]
e[i,]=m$residuals
for(t in 1:31)
{
AR[i,t]= R[i,180+t] - (Alpha[i] + Beta[i]*Rm[i,180+t])
#estimation period
Rmmean=mean(Rm[i,1:175])
C[i,t] = (1+ (1/Ei) + ((Rm[i,180+t]-Rmmean)^2/(sum((Rm[i,1:175]-Rmmean)^2))))
CCum[i,t] = (1+ (t/Ei) +((sum(Rm[i,(180+1):(180+t)])- (t*Rmmean))^2)/(t*sum((Rm[i,1:175]-Rmmean)^2)))
SAR[i,t]= AR[i,t]/(sd(e[i,])*sqrt(C[i,t]))
}# for tau
}# for i in 1:n stocks
for(t in 1:31)
{
#BW=================================================
sum=0
for(days in 1:175)
{
x=(mean(e[,days])-mean(e))^2
sum= sum + x
}
denomBW=sqrt(sum/174)
tBW[t]=mean(AR[,t])/denomBW
PvalBW[t]= 2*(1-pt(abs(tBW[t]),174))
tBWCum[t]= sum(tBW[1:t])/sqrt(t) #coz denom is const
PvalBWCum[t]= 2*(1-pt(abs(tBWCum[t]),174))
#Patell====================================================================================
tP[t]= sum(SAR[,t])/sqrt(n*(173/171))
PvalP[t]=2*(1-pnorm(abs(tP[t])))
tPCum[t]= sum(tP[1:t])/sqrt(t)
PvalPCum[t]=2*(1-pnorm(abs(tP[t])))
#BMP=====================================================================================
tBMP[t]=sum(SAR[,t])/sqrt(var(SAR[,t])*n)
PvalBMP[t]= 2*(1-pt(abs(tBMP[t]),n-1))
tBMPCum[t]=sum(tBMP[1:t])/sqrt(t)
PvalBMPCum[t]=2*(1-pt(abs(tBMPCum[t]),n-1))
#=================================================================
#MM=================
#MM denom
sum=0
for(i in 1:n)
{
x=var(e[i,])*C[i,t]
sum=sum+x
}
tMM[t]= n*mean(AR[,t])/sqrt(sum)
PvalMM[t]=2*(1-pnorm(abs(tMM[t])))
#=====================================================================================
#Corrado===================
#==================Do not touch coz coded from original paper
for(i in 1:n)
{
res[i,1:175]=e[i,]
res[i,176:(175+t)]=AR[i,t]
r[i,]=rank(res[i,])
}
#Corr denom
x=matrix(0,175+t,1)
for(days in 1:(175+t))
{
x[days]= (1/n)*sum(r[,days]-mean(1:(Ei+1)))
}
denom=sqrt((1/(175+t))* sum(x^2))
Corr[t]= (1/n)* sum(r[,175+t]- mean(1:(Ei+1)))/ denom
CorrPval[t]=2*(1-pnorm(abs(Corr[t])))
#===================Do not touch
rCum = resCum = matrix(0,n,175+t)
for(i in 1:n)
{
resCum[i,1:175]=e[i,]
resCum[i,176:(175+t)]=AR[i,1:t]
rCum[i,]=rank(resCum[i,])
}
sumrank=matrix(0,n,1)
for(l in 1:t)
{
x = rCum[,175+l]
sumrank = sumrank + x
}
CorrCum[t]= sqrt(1/n)* sum((sumrank - t * mean(1:(Ei+t+1)) ) / (
sqrt(t*Ei*(t+Ei+1)/12)))
CorrPvalCum[t]=2*(1-pnorm(abs(CorrCum[t])))
#============================================================================================
#=============== do not touch
#Corrado & Zivney=================
for(i in 1:n)
{
res[i,1:175]=e[i,]/sd(e[i,])
res[i,175+t]=(AR[i,t]/(sd(e[i,])*C[i,t]))/(sd(AR[,t]/sd(AR[,t])))
r[i,]=rank(res[i,])
}
CorrZ[t]=sqrt(1/n)* sum( (r[,175+t] -mean(1:(Ei+1))) /sd(1:(Ei+1)))
CorrZPval[t]=2*(1-pnorm(abs(CorrZ[t])))
#================ do not touch
rCum = resCum = matrix(0,n,175+t)
for(i in 1:n)
{
resCum[i,1:175]=e[i,]/sd(e[i,])
#resCum[i,1:200]=e[i,]
for(l in 1:t)
{
resCum[i,175+l]=AR[i,l]/(sd(e[i,])*C[i,l])/(sd(AR[,l]/sd(AR[,l])))
#resCum[i,200+l]=SAR[i,l]
}
rCum[i,]=rank(resCum[i,])
}
sumrank=matrix(0,n,1)
for(l in 1:t)
{
x = rCum[,175+l]
sumrank = sumrank + x
}
CorrZCum[t]= sqrt(1/n)* sum((sumrank - t *
mean(1:(Ei+t+1)))/(sqrt(t*Ei*(t+Ei+1)/12)))
CorrZPvalCum[t]=2*(1-pnorm(abs(CorrZCum[t])))
#=======================================================================
#sign===========
x=matrix(0,n,1)
sign=matrix(0,n,31)
for(l in 1:31)
{
for(i in 1:n)
{
x[i]=median(e[i,])
if(AR[i,l]>x[i])
{
sign[i,l]=1
}
if(AR[i,l]<=x[i])
{
sign[i,l]=0
}
}
}
#Sign[t]= sqrt(n)^-1 * sum((sign[,t]- 0.5)/sqrt(0.5*0.5))
Sign[t]= sqrt(n) * (sum(sign[,t])/n - 0.5)/(0.5)
SignPval[t]=2*(1-pnorm(abs(Sign[t])))
x=sumsign=matrix(0,n,1)
for(l in 1:t)
{
x=sign[,l]
sumsign=sumsign+x
}
SignCum[t]=sqrt(n)^-1 * sum((sumsign- t*0.5)/sqrt(t*0.5*0.5))
SignPvalCum[t]=2*(1-pnorm(abs(SignCum[t])))
#==================================================================================
#cross==========================
tCross[t]=t.test(AR[,t])$statistic
PvalCross[t]=t.test(AR[,t])$p.value
sum=x=matrix(0,n,1)
for(l in 1:t)
{
x=AR[,l]
sum=sum+x
}
tCrossCum[t]=t.test(sum)$statistic
PvalCrossCum[t]=t.test(sum)$p.value
}
#df=data.frame(PvalBW,PvalP,PvalBMP,PvalCross,PvalLR,PvalSLR,PvalMLR,PvalMSLR,PvalMM,
# CorrPval,CorrZPval,SignPval)
#days=0:30
#df=data.frame(days,PvalBW,PvalP,PvalBMP,PvalCross,PvalMM,PvalLR,PvalMLR,PvalSLR,PvalMSLR)
#colnames(df)=c("days","Brown and Warner","Patell","BMP", "Cross sectional","Method of Moments","LR","Modified LR","SLR","Modified SLR")
#df=round(df,5)
#write.csv(df,file="9.csv")
#days=0:30
#df=data.frame(days,BFstar1,BFstar2)
#colnames(df)=c("days","BF*1","BF*2")
#df=round(df,5)
#write.csv(df,file="2.csv")
days=0:30
df=data.frame(days,PvalLRrho,PvalMLRrho,PvalSLRrho,PvalMSLRrho)
colnames(df)=c("days","LR","Modified LR","SLR","Modified SLR")
df=round(df,5)
write.csv(df,file="4.csv")
| /Event_Study Codes.R | permissive | adu3110/Baysian-Event-Study | R | false | false | 5,922 | r |
n=112 #multi securities
Ei=175
#########
e=matrix(0,n,175)
r=res=matrix(0,n,175+31)
tP=tCross=tBW=tBMP=
PvalP=PvalCross=PvalBW=PvalBMP=
tMM=tLR=tMLR=tSLR=tMSLR=
PvalMM=PvalLR=PvalMLR=PvalSLR=PvalMSLR=matrix(0,31,1)
tLRrho=tMLRrho=tSLRrho=tMSLRrho=
PvalLRrho=PvalMLRrho=PvalSLRrho=PvalMSLRrho=matrix(0,31,1)
BFstar1=BFstar2=matrix(0,31,1)
AR=SAR=C=CCum=matrix(0,n,31)
Alpha=Beta=matrix(0,n,1)
for(i in 1:112)
{
m=lm(R[i,1:175]~Rm[i,1:175])
Alpha[i]=m$coef[1]
Beta[i]=m$coef[2]
e[i,]=m$residuals
for(t in 1:31)
{
AR[i,t]= R[i,180+t] - (Alpha[i] + Beta[i]*Rm[i,180+t])
#estimation period
Rmmean=mean(Rm[i,1:175])
C[i,t] = (1+ (1/Ei) + ((Rm[i,180+t]-Rmmean)^2/(sum((Rm[i,1:175]-Rmmean)^2))))
CCum[i,t] = (1+ (t/Ei) +((sum(Rm[i,(180+1):(180+t)])- (t*Rmmean))^2)/(t*sum((Rm[i,1:175]-Rmmean)^2)))
SAR[i,t]= AR[i,t]/(sd(e[i,])*sqrt(C[i,t]))
}# for tau
}# for i in 1:n stocks
for(t in 1:31)
{
#BW=================================================
sum=0
for(days in 1:175)
{
x=(mean(e[,days])-mean(e))^2
sum= sum + x
}
denomBW=sqrt(sum/174)
tBW[t]=mean(AR[,t])/denomBW
PvalBW[t]= 2*(1-pt(abs(tBW[t]),174))
tBWCum[t]= sum(tBW[1:t])/sqrt(t) #coz denom is const
PvalBWCum[t]= 2*(1-pt(abs(tBWCum[t]),174))
#Patell====================================================================================
tP[t]= sum(SAR[,t])/sqrt(n*(173/171))
PvalP[t]=2*(1-pnorm(abs(tP[t])))
tPCum[t]= sum(tP[1:t])/sqrt(t)
PvalPCum[t]=2*(1-pnorm(abs(tP[t])))
#BMP=====================================================================================
tBMP[t]=sum(SAR[,t])/sqrt(var(SAR[,t])*n)
PvalBMP[t]= 2*(1-pt(abs(tBMP[t]),n-1))
tBMPCum[t]=sum(tBMP[1:t])/sqrt(t)
PvalBMPCum[t]=2*(1-pt(abs(tBMPCum[t]),n-1))
#=================================================================
#MM=================
#MM denom
sum=0
for(i in 1:n)
{
x=var(e[i,])*C[i,t]
sum=sum+x
}
tMM[t]= n*mean(AR[,t])/sqrt(sum)
PvalMM[t]=2*(1-pnorm(abs(tMM[t])))
#=====================================================================================
#Corrado===================
#==================Do not touch coz coded from original paper
for(i in 1:n)
{
res[i,1:175]=e[i,]
res[i,176:(175+t)]=AR[i,t]
r[i,]=rank(res[i,])
}
#Corr denom
x=matrix(0,175+t,1)
for(days in 1:(175+t))
{
x[days]= (1/n)*sum(r[,days]-mean(1:(Ei+1)))
}
denom=sqrt((1/(175+t))* sum(x^2))
Corr[t]= (1/n)* sum(r[,175+t]- mean(1:(Ei+1)))/ denom
CorrPval[t]=2*(1-pnorm(abs(Corr[t])))
#===================Do not touch
rCum = resCum = matrix(0,n,175+t)
for(i in 1:n)
{
resCum[i,1:175]=e[i,]
resCum[i,176:(175+t)]=AR[i,1:t]
rCum[i,]=rank(resCum[i,])
}
sumrank=matrix(0,n,1)
for(l in 1:t)
{
x = rCum[,175+l]
sumrank = sumrank + x
}
CorrCum[t]= sqrt(1/n)* sum((sumrank - t * mean(1:(Ei+t+1)) ) / (
sqrt(t*Ei*(t+Ei+1)/12)))
CorrPvalCum[t]=2*(1-pnorm(abs(CorrCum[t])))
#============================================================================================
#=============== do not touch
#Corrado & Zivney=================
for(i in 1:n)
{
res[i,1:175]=e[i,]/sd(e[i,])
res[i,175+t]=(AR[i,t]/(sd(e[i,])*C[i,t]))/(sd(AR[,t]/sd(AR[,t])))
r[i,]=rank(res[i,])
}
CorrZ[t]=sqrt(1/n)* sum( (r[,175+t] -mean(1:(Ei+1))) /sd(1:(Ei+1)))
CorrZPval[t]=2*(1-pnorm(abs(CorrZ[t])))
#================ do not touch
rCum = resCum = matrix(0,n,175+t)
for(i in 1:n)
{
resCum[i,1:175]=e[i,]/sd(e[i,])
#resCum[i,1:200]=e[i,]
for(l in 1:t)
{
resCum[i,175+l]=AR[i,l]/(sd(e[i,])*C[i,l])/(sd(AR[,l]/sd(AR[,l])))
#resCum[i,200+l]=SAR[i,l]
}
rCum[i,]=rank(resCum[i,])
}
sumrank=matrix(0,n,1)
for(l in 1:t)
{
x = rCum[,175+l]
sumrank = sumrank + x
}
CorrZCum[t]= sqrt(1/n)* sum((sumrank - t *
mean(1:(Ei+t+1)))/(sqrt(t*Ei*(t+Ei+1)/12)))
CorrZPvalCum[t]=2*(1-pnorm(abs(CorrZCum[t])))
#=======================================================================
#sign===========
x=matrix(0,n,1)
sign=matrix(0,n,31)
for(l in 1:31)
{
for(i in 1:n)
{
x[i]=median(e[i,])
if(AR[i,l]>x[i])
{
sign[i,l]=1
}
if(AR[i,l]<=x[i])
{
sign[i,l]=0
}
}
}
#Sign[t]= sqrt(n)^-1 * sum((sign[,t]- 0.5)/sqrt(0.5*0.5))
Sign[t]= sqrt(n) * (sum(sign[,t])/n - 0.5)/(0.5)
SignPval[t]=2*(1-pnorm(abs(Sign[t])))
x=sumsign=matrix(0,n,1)
for(l in 1:t)
{
x=sign[,l]
sumsign=sumsign+x
}
SignCum[t]=sqrt(n)^-1 * sum((sumsign- t*0.5)/sqrt(t*0.5*0.5))
SignPvalCum[t]=2*(1-pnorm(abs(SignCum[t])))
#==================================================================================
#cross==========================
tCross[t]=t.test(AR[,t])$statistic
PvalCross[t]=t.test(AR[,t])$p.value
sum=x=matrix(0,n,1)
for(l in 1:t)
{
x=AR[,l]
sum=sum+x
}
tCrossCum[t]=t.test(sum)$statistic
PvalCrossCum[t]=t.test(sum)$p.value
}
#df=data.frame(PvalBW,PvalP,PvalBMP,PvalCross,PvalLR,PvalSLR,PvalMLR,PvalMSLR,PvalMM,
# CorrPval,CorrZPval,SignPval)
#days=0:30
#df=data.frame(days,PvalBW,PvalP,PvalBMP,PvalCross,PvalMM,PvalLR,PvalMLR,PvalSLR,PvalMSLR)
#colnames(df)=c("days","Brown and Warner","Patell","BMP", "Cross sectional","Method of Moments","LR","Modified LR","SLR","Modified SLR")
#df=round(df,5)
#write.csv(df,file="9.csv")
#days=0:30
#df=data.frame(days,BFstar1,BFstar2)
#colnames(df)=c("days","BF*1","BF*2")
#df=round(df,5)
#write.csv(df,file="2.csv")
days=0:30
df=data.frame(days,PvalLRrho,PvalMLRrho,PvalSLRrho,PvalMSLRrho)
colnames(df)=c("days","LR","Modified LR","SLR","Modified SLR")
df=round(df,5)
write.csv(df,file="4.csv")
|
#### Q1 ####
# Question 1: How does the preference between the two dishes vary with age?
# Question 2: How does the preference between the two dishes vary by city?
# Question 3: What percentage of people in each city voted?
#### Q2 ####
messy <- data.frame(c1 = c("","","Yes", "No"), c2 = c("Edinburgh","16-24","80,100", "35,900"), c3 = c("Edinburgh","25+","143,000", "214,800"), c4 = c("Glasgow","16-24","99,400", "43,000"), c5 = c("Glasgow","25+","150,400", "207,000"))
#### Q3 ####
library(tidyr)
library(plyr)
library(dplyr)
data <- messy[3:4,] %>% gather(yesno, votes, c2:c5) %>% select(-yesno)
city1 <- c(t(as.vector(messy[1,])[-1][1:2]), t(as.vector(messy[1,])[-1][1:2]))
city2 <- c(t(as.vector(messy[1,])[-1][3:4]), t(as.vector(messy[1,])[-1][3:4]))
data$city <- c(city1, city2)
agebucket1 <- c(t(as.vector(messy[2,])[-1][1:1]))
agebucket2 <- c(t(as.vector(messy[2,])[-1][2:2]))
data$age <- c(agebucket1,agebucket1, agebucket2, agebucket2,agebucket1,agebucket1, agebucket2, agebucket2)
data$votes <- extract_numeric(data$votes)
#### Q4 ####
#1
byCity <- ddply(data, .(city, c1), summarize, s1 = sum(votes))
EdinPrefer <- byCity[2,3] / (byCity[2,3] + byCity[1,3])
GlasgPrefer <- byCity[4,3] / (byCity[4,3] + byCity[3,3])
#Answer: 47.7% of Edinburgh preferred Cullen skink while 49.97% of Glasgow voters preferred Cullen skink
#2
byAge <- ddply(data, .(age, c1), summarize, s1 = sum(votes))
youngPrefer <- byAge[2,3] / (byAge[2,3] + byAge[1,3])
oldPrefer <- byAge[4,3] / (byAge[4,3] + byAge[3,3])
#Answer: 69.4% of younger voters (16-24) preferred Cullen skink while 41.1% of older voters (25+) preferred Cullen skink
#3
turnout <- ddply(data, .(city), summarize, s1 = sum(votes))
edinTurn <- turnout[1,2] / 487300 #number found from wikipedia
glasgTurn <- turnout[2,2] / 596550
#Answer: There was a better voter turnout in edinburgh
#### Q5 ####
#Answer: Having gone through the exercise, I think it would have been beneficial to have yes and no votes for each city/agebucket pair on the same row of the dataframe. My first and second
# questions in particularly could have been answered more elegantly if I had structured the data in this way.
| /Week 5/w5Assignment.R | no_license | jhink7/IS607 | R | false | false | 2,167 | r | #### Q1 ####
# Question 1: How does the preference between the two dishes vary with age?
# Question 2: How does the preference between the two dishes vary by city?
# Question 3: What percentage of people in each city voted?
#### Q2 ####
messy <- data.frame(c1 = c("","","Yes", "No"), c2 = c("Edinburgh","16-24","80,100", "35,900"), c3 = c("Edinburgh","25+","143,000", "214,800"), c4 = c("Glasgow","16-24","99,400", "43,000"), c5 = c("Glasgow","25+","150,400", "207,000"))
#### Q3 ####
library(tidyr)
library(plyr)
library(dplyr)
data <- messy[3:4,] %>% gather(yesno, votes, c2:c5) %>% select(-yesno)
city1 <- c(t(as.vector(messy[1,])[-1][1:2]), t(as.vector(messy[1,])[-1][1:2]))
city2 <- c(t(as.vector(messy[1,])[-1][3:4]), t(as.vector(messy[1,])[-1][3:4]))
data$city <- c(city1, city2)
agebucket1 <- c(t(as.vector(messy[2,])[-1][1:1]))
agebucket2 <- c(t(as.vector(messy[2,])[-1][2:2]))
data$age <- c(agebucket1,agebucket1, agebucket2, agebucket2,agebucket1,agebucket1, agebucket2, agebucket2)
data$votes <- extract_numeric(data$votes)
#### Q4 ####
#1
byCity <- ddply(data, .(city, c1), summarize, s1 = sum(votes))
EdinPrefer <- byCity[2,3] / (byCity[2,3] + byCity[1,3])
GlasgPrefer <- byCity[4,3] / (byCity[4,3] + byCity[3,3])
#Answer: 47.7% of Edinburgh preferred Cullen skink while 49.97% of Glasgow voters preferred Cullen skink
#2
byAge <- ddply(data, .(age, c1), summarize, s1 = sum(votes))
youngPrefer <- byAge[2,3] / (byAge[2,3] + byAge[1,3])
oldPrefer <- byAge[4,3] / (byAge[4,3] + byAge[3,3])
#Answer: 69.4% of younger voters (16-24) preferred Cullen skink while 41.1% of older voters (25+) preferred Cullen skink
#3
turnout <- ddply(data, .(city), summarize, s1 = sum(votes))
edinTurn <- turnout[1,2] / 487300 #number found from wikipedia
glasgTurn <- turnout[2,2] / 596550
#Answer: There was a better voter turnout in edinburgh
#### Q5 ####
#Answer: Having gone through the exercise, I think it would have been beneficial to have yes and no votes for each city/agebucket pair on the same row of the dataframe. My first and second
# questions in particularly could have been answered more elegantly if I had structured the data in this way.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chk-length.R
\name{chk_length}
\alias{chk_length}
\alias{vld_length}
\title{Check Length}
\usage{
chk_length(x, length = 1L, upper = length, x_name = NULL)
vld_length(x, length = 1L, upper = length)
}
\arguments{
\item{x}{The object to check.}
\item{length}{A count of the length.}
\item{upper}{A count of the max length.}
\item{x_name}{A string of the name of object x or NULL.}
}
\value{
The \code{chk_} function throws an informative error if the test fails or
returns the original object if successful so it can used in pipes.
The \code{vld_} function returns a flag indicating whether the test was met.
}
\description{
Checks length is a particular value or range using
\code{length(x) >= length && length(x) <= upper}
}
\section{Functions}{
\itemize{
\item \code{vld_length()}: Validate Length
}}
\examples{
# chk_length
chk_length("text")
try(vld_length("text", length = 2))
# vld_length
vld_length(2:1, 2)
vld_length(2:1, 1)
}
\seealso{
\code{\link[=check_dim]{check_dim()}}
Other chk_misc:
\code{\link{chk_match}()},
\code{\link{chk_missing}()},
\code{\link{chk_named}()},
\code{\link{chk_not_any_na}()},
\code{\link{chk_not_empty}()},
\code{\link{chk_not_missing}()},
\code{\link{chk_sorted}()},
\code{\link{chk_unique}()},
\code{\link{chk_valid_name}()}
}
\concept{chk_misc}
| /man/chk_length.Rd | permissive | poissonconsulting/chk | R | false | true | 1,373 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chk-length.R
\name{chk_length}
\alias{chk_length}
\alias{vld_length}
\title{Check Length}
\usage{
chk_length(x, length = 1L, upper = length, x_name = NULL)
vld_length(x, length = 1L, upper = length)
}
\arguments{
\item{x}{The object to check.}
\item{length}{A count of the length.}
\item{upper}{A count of the max length.}
\item{x_name}{A string of the name of object x or NULL.}
}
\value{
The \code{chk_} function throws an informative error if the test fails or
returns the original object if successful so it can used in pipes.
The \code{vld_} function returns a flag indicating whether the test was met.
}
\description{
Checks length is a particular value or range using
\code{length(x) >= length && length(x) <= upper}
}
\section{Functions}{
\itemize{
\item \code{vld_length()}: Validate Length
}}
\examples{
# chk_length
chk_length("text")
try(vld_length("text", length = 2))
# vld_length
vld_length(2:1, 2)
vld_length(2:1, 1)
}
\seealso{
\code{\link[=check_dim]{check_dim()}}
Other chk_misc:
\code{\link{chk_match}()},
\code{\link{chk_missing}()},
\code{\link{chk_named}()},
\code{\link{chk_not_any_na}()},
\code{\link{chk_not_empty}()},
\code{\link{chk_not_missing}()},
\code{\link{chk_sorted}()},
\code{\link{chk_unique}()},
\code{\link{chk_valid_name}()}
}
\concept{chk_misc}
|
#!/usr/bin/env Rscript
library(ggplot2)
library(grid)
args<-commandArgs(T)
data = read.table(args[1],header=TRUE,sep="\t")
pdf(args[2])
#ggplot(data,aes(x=log2((Library+Total4+Total5)/3), y=log2((Total4+Total5)/2/Library)))+geom_point()+labs(x="log2(Mean Read Counts of gRNA)",y="log2(Baseline/Library)")+theme(axis.title=element_text(size =20),axis.text=element_text(size =15))+geom_hline(yintercept = c(-1,1))
#ggplot(data,aes(x=log2((GFPpos4+GFPpos5+Total4+Total5)/4), y=log2((GFPpos4+GFPpos5)/(Total4+Total5)),color=Tag ))+geom_point()+labs(x="log2(Mean Read Counts of gRNA)",y="log2(GFP+/Baseline)")+theme(axis.title=element_text(size =20),axis.text=element_text(size =15),legend.position=c(0.9,0.1),legend.title=element_blank())+geom_hline(yintercept = c(-1,1))
ggplot(data,aes(x=log2((GFPneg4+GFPneg5+Total4+Total5)/4), y=log2((GFPneg4+GFPneg5)/(Total4+Total5)),color=Tag ))+geom_point()+labs(x="log2(Mean Read Counts of gRNA)",y="log2(GFP-/Baseline)")+theme(axis.title=element_text(size =20),axis.text=element_text(size =15),legend.position=c(0.9,0.1),legend.title=element_blank())+geom_hline(yintercept = c(-1,1))
| /workspace/CRISPR/new_year/maplot_zhangcheng.r.2018050611 | no_license | ijayden-lung/hpc | R | false | false | 1,127 | 2018050611 | #!/usr/bin/env Rscript
library(ggplot2)
library(grid)
args<-commandArgs(T)
data = read.table(args[1],header=TRUE,sep="\t")
pdf(args[2])
#ggplot(data,aes(x=log2((Library+Total4+Total5)/3), y=log2((Total4+Total5)/2/Library)))+geom_point()+labs(x="log2(Mean Read Counts of gRNA)",y="log2(Baseline/Library)")+theme(axis.title=element_text(size =20),axis.text=element_text(size =15))+geom_hline(yintercept = c(-1,1))
#ggplot(data,aes(x=log2((GFPpos4+GFPpos5+Total4+Total5)/4), y=log2((GFPpos4+GFPpos5)/(Total4+Total5)),color=Tag ))+geom_point()+labs(x="log2(Mean Read Counts of gRNA)",y="log2(GFP+/Baseline)")+theme(axis.title=element_text(size =20),axis.text=element_text(size =15),legend.position=c(0.9,0.1),legend.title=element_blank())+geom_hline(yintercept = c(-1,1))
ggplot(data,aes(x=log2((GFPneg4+GFPneg5+Total4+Total5)/4), y=log2((GFPneg4+GFPneg5)/(Total4+Total5)),color=Tag ))+geom_point()+labs(x="log2(Mean Read Counts of gRNA)",y="log2(GFP-/Baseline)")+theme(axis.title=element_text(size =20),axis.text=element_text(size =15),legend.position=c(0.9,0.1),legend.title=element_blank())+geom_hline(yintercept = c(-1,1))
|
complex_DiffExprComplex <- readRDS("complex_DiffExprComplex.rda")
complex_DiffExprProteoform <- readRDS("complex_DiffExprProteoform.rda")
complex_DiffExprProteoform[, has_proteoform := ifelse(any(grep("_",proteoform_id)),TRUE,FALSE), by = c("complex_id")]
n_proteoform_table <- unique(subset(complex_DiffExprProteoform, select=c("complex_id","apex","has_proteoform")))
complex_DiffExprComplex <- merge(complex_DiffExprComplex, n_proteoform_table, by=c("complex_id","apex"))
noProteoforms_DiffExprComplex <- subset(complex_DiffExprComplex, has_proteoform==FALSE)
withProteoforms_DiffExprComplex <- subset(complex_DiffExprComplex, has_proteoform==TRUE)
getCount <- function(data){
n_proteins <- length(unique(data$complex_id))
n_differential_proteins <- length(unique(data[pBHadj<0.05][abs(medianLog2FC)>1]$complex_id))
up <- unique(data[pBHadj < 0.05][medianLog2FC > 1]$complex_id)
down <- unique(data[pBHadj < 0.05][medianLog2FC < -1]$complex_id)
n_both <- length(intersect(up,down))
n_up <- length(up[!up %in% down])
n_down <- length(down[!down %in% up])
n_unchanged <- n_proteins-n_differential_proteins
data.table(
level=c("complexes","complexes","complexes","complexes"),
quant=c("unchanged","up & down","up","down"),
count=c(n_unchanged, n_both, n_up, n_down)
)
}
complexStats_noProteoforms <- getCount(noProteoforms_DiffExprComplex)
complexStats_noProteoforms[,proteoform:=1]
complexStats_withProteoforms <- getCount(withProteoforms_DiffExprComplex)
complexStats_withProteoforms[,proteoform:=2]
proteinFeatureDiffStats <- do.call("rbind", list(complexStats_noProteoforms,
complexStats_withProteoforms))
proteinFeatureDiffStats$quant <- factor(proteinFeatureDiffStats$quant, levels = c("unchanged","up & down","up","down"))
proteinFeatureDiffStats$proteoform <- factor(proteinFeatureDiffStats$proteoform, levels = c(2,1))
pdf("complexDiffStats.pdf", width=5, height=3)
ggplot(proteinFeatureDiffStats, aes(x=quant, y=count, fill=proteoform)) +
geom_col() +
theme_classic() +
theme(axis.title.x=element_blank()) +
scale_fill_manual(values=c("#3288BD","#999999"), labels = c(expression(phantom(x)>= "2"),"1"), name="N proteoforms") #+
#theme(legend.position = "bottom")
dev.off()
all_proteins_noProteoforms <- sum(proteinFeatureDiffStats[proteoform == 1][level=="proteins"]$count)
diff_proteins_noProteoforms <- sum(proteinFeatureDiffStats[proteoform == 1][level=="proteins"][quant != "unchanged"]$count)
all_proteins_noProteoforms
diff_proteins_noProteoforms
diff_proteins_noProteoforms/all_proteins_noProteoforms
all_proteins_withProteoforms <- sum(proteinFeatureDiffStats[proteoform == 2][level=="proteins"]$count)
diff_proteins_withProteoforms <- sum(proteinFeatureDiffStats[proteoform == 2][level=="proteins"][quant != "unchanged"]$count)
all_proteins_withProteoforms
diff_proteins_withProteoforms
diff_proteins_withProteoforms/all_proteins_withProteoforms
###########################
###########################
###########################
#up
length(unique(complex_DiffExprComplex[pBHadj < 0.05][medianLog2FC < -1]$complex_id))
#down
length(unique(complex_DiffExprComplex[pBHadj < 0.05][medianLog2FC > 1]$complex_id))
###########################
###########################
###########################
complex_DiffExprComplex[, n_features := .N, by = c("complex_id")]
complex_DiffExprComplex_countDT <- unique(subset(complex_DiffExprComplex, select=c("complex_id","n_features")))
pdf("complex_nFeatures.pdf", width=4, height=4)
q <- ggplot(complex_DiffExprComplex_countDT,aes(x=n_features)) +
stat_bin(binwidth=1) +
stat_bin(binwidth=1, geom="text", aes(label=..count..), vjust=-0.5) +
labs(x="N co-elution features",y="N complexes") +
theme_classic() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
print(q)
dev.off()
###########################
###########################
###########################
complex_DiffExprProteoform[, is_proteoform := ifelse(any(grep("_",proteoform_id)),TRUE,FALSE), by = c("feature_id")]
proteoform_includingComplexes <- complex_DiffExprProteoform[has_proteoform==T][is_proteoform==T]
proteoform_includingComplexes[,n_proteoforms_total := length(unique(proteoform_id)), by=c("feature_id")]
proteoform_includingComplexes[,n_proteoforms_inFeature := length(unique(proteoform_id)), by=c("feature_id","complex_id","apex")]
proteoform_includingComplexes[n_proteoforms_total!=n_proteoforms_inFeature]
proteoform_includingComplexes[,max_proteoform_FC:=max(medianLog2FC), by=c("feature_id","complex_id","apex")]
proteoform_includingComplexes[,min_proteoform_FC:=min(medianLog2FC), by=c("feature_id","complex_id","apex")]
proteoform_includingComplexes[,diff_proteoform_FC:=max_proteoform_FC-min_proteoform_FC]
proteoform_includingComplexes_countDT <- unique(subset(proteoform_includingComplexes, select=c("feature_id","complex_id","apex","diff_proteoform_FC")))
interestingIDs <- unique(proteoform_includingComplexes_countDT[diff_proteoform_FC>2]$feature_id)
reassignedTraces_list <- readRDS("traces_list_reassignedProteoforms.rds")
design_matrix <- readRDS("design_matrix.rda")
calibrationFunctions <- readRDS("calibration.rds")
reassignedFeatures <- readRDS("reassignedProteoformFeatures.rds")
protTraces <- readRDS("protein_traces_list.rds")
scoredDataAll <- readRDS("scoredDataAll.rds")
pdf("proteoformSpecificComplexes_interestingIDs.pdf", height=5, width=8)
for (id in interestingIDs) {
complex <- proteoform_includingComplexes_countDT[feature_id==id]$complex_id[1]
sub <- subset(reassignedTraces_list, trace_subset_ids = id, trace_subset_type = "protein_id")
plotFeatures(feature_table = reassignedFeatures,
traces = reassignedTraces_list,
calibration=calibrationFunctions,
feature_id = id,
design_matrix=design_matrix,
annotation_label="proteoform_id",
colour_by="proteoform_id",
peak_area = T,
legend = T,
onlyBest = F,
monomer_MW=T)
plotFeatures(
feature_table=scoredDataAll,
traces=protTraces,
design_matrix = design_matrix,
feature_id = complex,
calibration=calibrationFunctions,
annotation_label="Entry_name",
peak_area=TRUE,
onlyBest = FALSE,
monomer_MW = TRUE,
PDF=F,
name=id
)
}
dev.off()
id = "Q99613"
complex <- proteoform_includingComplexes_countDT[feature_id==id]$complex_id[1]
sub <- subset(reassignedTraces_list, trace_subset_ids = id, trace_subset_type = "protein_id")
plotPeptideCluster(reassignedTraces,"Q99613",PDF = F)
pdf("proteoformSpecificComplexes_Q99613_peptides.pdf", height=5, width=8)
plotFeatures(feature_table = reassignedFeatures,
traces = reassignedTraces_list,
calibration=calibrationFunctions,
feature_id = id,
design_matrix=design_matrix,
annotation_label="proteoform_id",
colour_by="proteoform_id",
peak_area = F,
apex = F,
legend = T,
onlyBest = F,
monomer_MW=T)
dev.off()
pdf("proteoformSpecificComplexes_Q99613_complex.pdf", height=7, width=8)
plotFeatures(
feature_table=scoredDataAll,
traces=protTraces,
design_matrix = design_matrix,
feature_id = complex,
calibration=calibrationFunctions,
annotation_label="Entry_name",
peak_area=TRUE,
onlyBest = FALSE,
monomer_MW = TRUE,
PDF=F,
name=id
)
dev.off()
library(ggrepel)
plotVolcano <- function(testResults,
highlight=NULL,
FC_cutoff=2,
pBHadj_cutoff=0.01,
name="volcanoPlot",
PDF=FALSE,
level=c("feature","global")) {
level <- match.arg(level)
if (level=="feature"){
if (PDF) {
pdf(paste0(name,".pdf"), height=4, width=4)
}
if ("medianLog2FC" %in% names(testResults)) {
p <- ggplot(testResults, aes(x=medianLog2FC,y=-log10(pBHadj)))
} else {
p <- ggplot(testResults, aes(x=log2FC,y=-log10(pBHadj)))
}
p <- p +
geom_point(size=1) +
theme_classic() +
geom_hline(yintercept=-log10(pBHadj_cutoff), colour="red", linetype="dashed") +
geom_vline(xintercept=-log2(FC_cutoff), colour="red", linetype="dashed") +
geom_vline(xintercept=log2(FC_cutoff), colour="red", linetype="dashed")
if (! is.null(highlight)){
if ("feature_id" %in% names(testResults)) {
sub <- subset(testResults,feature_id %in% highlight)
col <- "feature_id"
} else if ("complex_id" %in% names(testResults)) {
sub <- subset(testResults,complex_id %in% highlight)
col <- "complex_id"
} else if (highlight %in% testResults$protein_id) {
sub <- subset(testResults,protein_id %in% highlight)
col <- "protein_id"
} else if (highlight %in% testResults$proteoform_id) {
sub <- subset(testResults,proteoform_id %in% highlight)
col <- "proteoform_id"
} else {
stop("The testResults do not have the proper format. Input should be the result from testDifferentialExpression.")
}
if ("medianLog2FC" %in% names(testResults)) {
p <- p + geom_point(data=sub, aes(x=medianLog2FC,y=-log10(pBHadj)), colour="red", fill="red", size=3, shape=23) #+
#geom_text_repel(data=sub, aes(label=get(col)), size=4, vjust=0, hjust=-0.1, colour="red")
} else {
p <- p + geom_point(data=sub, aes(x=log2FC,y=-log10(pBHadj)), colour="red", fill="red", size=3, shape=23)#+
# geom_text_repel(data=sub, aes(label=get(col)), size=4, vjust=0, hjust=-0.1, colour="red")
}
}
} else if (level=="global"){
if (PDF) {
pdf(paste0(name,"_",level,".pdf"), height=4, width=4)
}
if ("medianLog2FC" %in% names(testResults)) {
p <- ggplot(testResults, aes(x=global_medianLog2FC,y=-log10(global_pBHadj)))
} else {
p <- ggplot(testResults, aes(x=global_log2FC,y=-log10(global_pBHadj)))
}
p <- p +
geom_point(size=1) +
theme_classic() +
geom_hline(yintercept=-log10(pBHadj_cutoff), colour="red", linetype="dashed") +
geom_vline(xintercept=-log2(FC_cutoff), colour="red", linetype="dashed") +
geom_vline(xintercept=log2(FC_cutoff), colour="red", linetype="dashed")
if (! is.null(highlight)){
if ("feature_id" %in% names(testResults)) {
sub <- subset(testResults,feature_id %in% highlight)
col <- "feature_id"
} else if ("complex_id" %in% names(testResults)) {
sub <- subset(testResults,complex_id %in% highlight)
col <- "complex_id"
} else if (highlight %in% testResults$protein_id) {
sub <- subset(testResults,protein_id %in% highlight)
col <- "protein_id"
} else if (highlight %in% testResults$proteoform_id) {
sub <- subset(testResults,proteoform_id %in% highlight)
col <- "proteoform_id"
} else {
stop("The testResults do not have the proper format. Input should be the result from testDifferentialExpression.")
}
if ("global_medianLog2FC" %in% names(testResults)) {
p <- p + geom_point(data=sub, aes(x=global_medianLog2FC,y=-log10(global_pBHadj)), colour="red", fill="red", size=3, shape=23) #+
# geom_text_repel(data=sub, aes(label=get(col)), size=4, vjust=0, hjust=-0.1, colour="red")
} else {
p <- p + geom_point(data=sub, aes(x=global_log2FC,y=-log10(global_pBHadj)), colour="red", fill="red", size=3, shape=23)#+
#geom_text_repel(data=sub, aes(label=get(col)), size=4, vjust=0, hjust=-0.1, colour="red")
}
}
} else {
stop("Level can only be feature or global.")
}
print(p)
if (PDF) {
dev.off()
}
}
plotVolcano(complex_DiffExprProteoform, highlight=c("Q99613"), PDF = T, name = "complex_DiffExprComplex_Q99613")
| /thesis/complexFeaturePlots.R | no_license | SpliceosomeDepletion/CCprofilerAnalysis | R | false | false | 11,870 | r | complex_DiffExprComplex <- readRDS("complex_DiffExprComplex.rda")
complex_DiffExprProteoform <- readRDS("complex_DiffExprProteoform.rda")
complex_DiffExprProteoform[, has_proteoform := ifelse(any(grep("_",proteoform_id)),TRUE,FALSE), by = c("complex_id")]
n_proteoform_table <- unique(subset(complex_DiffExprProteoform, select=c("complex_id","apex","has_proteoform")))
complex_DiffExprComplex <- merge(complex_DiffExprComplex, n_proteoform_table, by=c("complex_id","apex"))
noProteoforms_DiffExprComplex <- subset(complex_DiffExprComplex, has_proteoform==FALSE)
withProteoforms_DiffExprComplex <- subset(complex_DiffExprComplex, has_proteoform==TRUE)
getCount <- function(data){
n_proteins <- length(unique(data$complex_id))
n_differential_proteins <- length(unique(data[pBHadj<0.05][abs(medianLog2FC)>1]$complex_id))
up <- unique(data[pBHadj < 0.05][medianLog2FC > 1]$complex_id)
down <- unique(data[pBHadj < 0.05][medianLog2FC < -1]$complex_id)
n_both <- length(intersect(up,down))
n_up <- length(up[!up %in% down])
n_down <- length(down[!down %in% up])
n_unchanged <- n_proteins-n_differential_proteins
data.table(
level=c("complexes","complexes","complexes","complexes"),
quant=c("unchanged","up & down","up","down"),
count=c(n_unchanged, n_both, n_up, n_down)
)
}
complexStats_noProteoforms <- getCount(noProteoforms_DiffExprComplex)
complexStats_noProteoforms[,proteoform:=1]
complexStats_withProteoforms <- getCount(withProteoforms_DiffExprComplex)
complexStats_withProteoforms[,proteoform:=2]
proteinFeatureDiffStats <- do.call("rbind", list(complexStats_noProteoforms,
complexStats_withProteoforms))
proteinFeatureDiffStats$quant <- factor(proteinFeatureDiffStats$quant, levels = c("unchanged","up & down","up","down"))
proteinFeatureDiffStats$proteoform <- factor(proteinFeatureDiffStats$proteoform, levels = c(2,1))
pdf("complexDiffStats.pdf", width=5, height=3)
ggplot(proteinFeatureDiffStats, aes(x=quant, y=count, fill=proteoform)) +
geom_col() +
theme_classic() +
theme(axis.title.x=element_blank()) +
scale_fill_manual(values=c("#3288BD","#999999"), labels = c(expression(phantom(x)>= "2"),"1"), name="N proteoforms") #+
#theme(legend.position = "bottom")
dev.off()
all_proteins_noProteoforms <- sum(proteinFeatureDiffStats[proteoform == 1][level=="proteins"]$count)
diff_proteins_noProteoforms <- sum(proteinFeatureDiffStats[proteoform == 1][level=="proteins"][quant != "unchanged"]$count)
all_proteins_noProteoforms
diff_proteins_noProteoforms
diff_proteins_noProteoforms/all_proteins_noProteoforms
all_proteins_withProteoforms <- sum(proteinFeatureDiffStats[proteoform == 2][level=="proteins"]$count)
diff_proteins_withProteoforms <- sum(proteinFeatureDiffStats[proteoform == 2][level=="proteins"][quant != "unchanged"]$count)
all_proteins_withProteoforms
diff_proteins_withProteoforms
diff_proteins_withProteoforms/all_proteins_withProteoforms
###########################
###########################
###########################
#up
length(unique(complex_DiffExprComplex[pBHadj < 0.05][medianLog2FC < -1]$complex_id))
#down
length(unique(complex_DiffExprComplex[pBHadj < 0.05][medianLog2FC > 1]$complex_id))
###########################
###########################
###########################
complex_DiffExprComplex[, n_features := .N, by = c("complex_id")]
complex_DiffExprComplex_countDT <- unique(subset(complex_DiffExprComplex, select=c("complex_id","n_features")))
pdf("complex_nFeatures.pdf", width=4, height=4)
q <- ggplot(complex_DiffExprComplex_countDT,aes(x=n_features)) +
stat_bin(binwidth=1) +
stat_bin(binwidth=1, geom="text", aes(label=..count..), vjust=-0.5) +
labs(x="N co-elution features",y="N complexes") +
theme_classic() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
print(q)
dev.off()
###########################
###########################
###########################
complex_DiffExprProteoform[, is_proteoform := ifelse(any(grep("_",proteoform_id)),TRUE,FALSE), by = c("feature_id")]
proteoform_includingComplexes <- complex_DiffExprProteoform[has_proteoform==T][is_proteoform==T]
proteoform_includingComplexes[,n_proteoforms_total := length(unique(proteoform_id)), by=c("feature_id")]
proteoform_includingComplexes[,n_proteoforms_inFeature := length(unique(proteoform_id)), by=c("feature_id","complex_id","apex")]
proteoform_includingComplexes[n_proteoforms_total!=n_proteoforms_inFeature]
proteoform_includingComplexes[,max_proteoform_FC:=max(medianLog2FC), by=c("feature_id","complex_id","apex")]
proteoform_includingComplexes[,min_proteoform_FC:=min(medianLog2FC), by=c("feature_id","complex_id","apex")]
proteoform_includingComplexes[,diff_proteoform_FC:=max_proteoform_FC-min_proteoform_FC]
proteoform_includingComplexes_countDT <- unique(subset(proteoform_includingComplexes, select=c("feature_id","complex_id","apex","diff_proteoform_FC")))
interestingIDs <- unique(proteoform_includingComplexes_countDT[diff_proteoform_FC>2]$feature_id)
reassignedTraces_list <- readRDS("traces_list_reassignedProteoforms.rds")
design_matrix <- readRDS("design_matrix.rda")
calibrationFunctions <- readRDS("calibration.rds")
reassignedFeatures <- readRDS("reassignedProteoformFeatures.rds")
protTraces <- readRDS("protein_traces_list.rds")
scoredDataAll <- readRDS("scoredDataAll.rds")
pdf("proteoformSpecificComplexes_interestingIDs.pdf", height=5, width=8)
for (id in interestingIDs) {
complex <- proteoform_includingComplexes_countDT[feature_id==id]$complex_id[1]
sub <- subset(reassignedTraces_list, trace_subset_ids = id, trace_subset_type = "protein_id")
plotFeatures(feature_table = reassignedFeatures,
traces = reassignedTraces_list,
calibration=calibrationFunctions,
feature_id = id,
design_matrix=design_matrix,
annotation_label="proteoform_id",
colour_by="proteoform_id",
peak_area = T,
legend = T,
onlyBest = F,
monomer_MW=T)
plotFeatures(
feature_table=scoredDataAll,
traces=protTraces,
design_matrix = design_matrix,
feature_id = complex,
calibration=calibrationFunctions,
annotation_label="Entry_name",
peak_area=TRUE,
onlyBest = FALSE,
monomer_MW = TRUE,
PDF=F,
name=id
)
}
dev.off()
id = "Q99613"
complex <- proteoform_includingComplexes_countDT[feature_id==id]$complex_id[1]
sub <- subset(reassignedTraces_list, trace_subset_ids = id, trace_subset_type = "protein_id")
plotPeptideCluster(reassignedTraces,"Q99613",PDF = F)
pdf("proteoformSpecificComplexes_Q99613_peptides.pdf", height=5, width=8)
plotFeatures(feature_table = reassignedFeatures,
traces = reassignedTraces_list,
calibration=calibrationFunctions,
feature_id = id,
design_matrix=design_matrix,
annotation_label="proteoform_id",
colour_by="proteoform_id",
peak_area = F,
apex = F,
legend = T,
onlyBest = F,
monomer_MW=T)
dev.off()
pdf("proteoformSpecificComplexes_Q99613_complex.pdf", height=7, width=8)
plotFeatures(
feature_table=scoredDataAll,
traces=protTraces,
design_matrix = design_matrix,
feature_id = complex,
calibration=calibrationFunctions,
annotation_label="Entry_name",
peak_area=TRUE,
onlyBest = FALSE,
monomer_MW = TRUE,
PDF=F,
name=id
)
dev.off()
library(ggrepel)
plotVolcano <- function(testResults,
highlight=NULL,
FC_cutoff=2,
pBHadj_cutoff=0.01,
name="volcanoPlot",
PDF=FALSE,
level=c("feature","global")) {
level <- match.arg(level)
if (level=="feature"){
if (PDF) {
pdf(paste0(name,".pdf"), height=4, width=4)
}
if ("medianLog2FC" %in% names(testResults)) {
p <- ggplot(testResults, aes(x=medianLog2FC,y=-log10(pBHadj)))
} else {
p <- ggplot(testResults, aes(x=log2FC,y=-log10(pBHadj)))
}
p <- p +
geom_point(size=1) +
theme_classic() +
geom_hline(yintercept=-log10(pBHadj_cutoff), colour="red", linetype="dashed") +
geom_vline(xintercept=-log2(FC_cutoff), colour="red", linetype="dashed") +
geom_vline(xintercept=log2(FC_cutoff), colour="red", linetype="dashed")
if (! is.null(highlight)){
if ("feature_id" %in% names(testResults)) {
sub <- subset(testResults,feature_id %in% highlight)
col <- "feature_id"
} else if ("complex_id" %in% names(testResults)) {
sub <- subset(testResults,complex_id %in% highlight)
col <- "complex_id"
} else if (highlight %in% testResults$protein_id) {
sub <- subset(testResults,protein_id %in% highlight)
col <- "protein_id"
} else if (highlight %in% testResults$proteoform_id) {
sub <- subset(testResults,proteoform_id %in% highlight)
col <- "proteoform_id"
} else {
stop("The testResults do not have the proper format. Input should be the result from testDifferentialExpression.")
}
if ("medianLog2FC" %in% names(testResults)) {
p <- p + geom_point(data=sub, aes(x=medianLog2FC,y=-log10(pBHadj)), colour="red", fill="red", size=3, shape=23) #+
#geom_text_repel(data=sub, aes(label=get(col)), size=4, vjust=0, hjust=-0.1, colour="red")
} else {
p <- p + geom_point(data=sub, aes(x=log2FC,y=-log10(pBHadj)), colour="red", fill="red", size=3, shape=23)#+
# geom_text_repel(data=sub, aes(label=get(col)), size=4, vjust=0, hjust=-0.1, colour="red")
}
}
} else if (level=="global"){
if (PDF) {
pdf(paste0(name,"_",level,".pdf"), height=4, width=4)
}
if ("medianLog2FC" %in% names(testResults)) {
p <- ggplot(testResults, aes(x=global_medianLog2FC,y=-log10(global_pBHadj)))
} else {
p <- ggplot(testResults, aes(x=global_log2FC,y=-log10(global_pBHadj)))
}
p <- p +
geom_point(size=1) +
theme_classic() +
geom_hline(yintercept=-log10(pBHadj_cutoff), colour="red", linetype="dashed") +
geom_vline(xintercept=-log2(FC_cutoff), colour="red", linetype="dashed") +
geom_vline(xintercept=log2(FC_cutoff), colour="red", linetype="dashed")
if (! is.null(highlight)){
if ("feature_id" %in% names(testResults)) {
sub <- subset(testResults,feature_id %in% highlight)
col <- "feature_id"
} else if ("complex_id" %in% names(testResults)) {
sub <- subset(testResults,complex_id %in% highlight)
col <- "complex_id"
} else if (highlight %in% testResults$protein_id) {
sub <- subset(testResults,protein_id %in% highlight)
col <- "protein_id"
} else if (highlight %in% testResults$proteoform_id) {
sub <- subset(testResults,proteoform_id %in% highlight)
col <- "proteoform_id"
} else {
stop("The testResults do not have the proper format. Input should be the result from testDifferentialExpression.")
}
if ("global_medianLog2FC" %in% names(testResults)) {
p <- p + geom_point(data=sub, aes(x=global_medianLog2FC,y=-log10(global_pBHadj)), colour="red", fill="red", size=3, shape=23) #+
# geom_text_repel(data=sub, aes(label=get(col)), size=4, vjust=0, hjust=-0.1, colour="red")
} else {
p <- p + geom_point(data=sub, aes(x=global_log2FC,y=-log10(global_pBHadj)), colour="red", fill="red", size=3, shape=23)#+
#geom_text_repel(data=sub, aes(label=get(col)), size=4, vjust=0, hjust=-0.1, colour="red")
}
}
} else {
stop("Level can only be feature or global.")
}
print(p)
if (PDF) {
dev.off()
}
}
plotVolcano(complex_DiffExprProteoform, highlight=c("Q99613"), PDF = T, name = "complex_DiffExprComplex_Q99613")
|
# Script to plot an orbit along with its associated precisions
# in the parameterization method of the QBFBP/RTBP around L1/L2 of the Earth-Moon system
# Script for the PhD manuscript.
#
# Note that the variations of the energy are no longer computed as an error,
# but as an absolute value.
#
# BLB Summer 2017
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Init
#-------------------------------------------------------------------------------
source("source/init.R")
#-------------------------------------------------------------------------------
# Select Models & libration point
#-------------------------------------------------------------------------------
Li = "L1"
MODEL = "QBCP"
FWRK = "EM"
# If from Server
# currentfolder = paste0(plotfolder(MODEL, FWRK, Li), "Serv/")
# size = "-51.0435_test" #1.5 7.5 15 30 45
# sizep = "51.0435_test"
# If local
currentfolder = paste0(plotfolder(MODEL, FWRK, Li), "orbits/")
size = "2.55_test" #40_test
sizep = size# "1.5_test" #40_test
# Orders
dfindex = c(5,10,15,20)
dfnumber = 1:length(dfindex)
maxOrder = max(dfindex)
#-------------------------------------------------------------------------------
# Parameters
#-------------------------------------------------------------------------------
maxPrec = 1e-6
#-------------------------------------------------------------------------------
#Normalized units (gammaR, c1R)
#-------------------------------------------------------------------------------
muR = muR(FWRK);
gammaR = gamma(Li, FWRK);
c1R = c1(Li, FWRK);
L = Ldist(FWRK)
Period = ifelse(MODEL=="QBCP", SEMperiod(FWRK), 2*pi)
#-------------------------------------------------------------------------------
# Type of plot (can be changed at will)
#-------------------------------------------------------------------------------
if(MODEL == "QBCP")
{
fplot = plotdf_line;
}else
{
fplot = plotdf_line_dashed;
}
fplot_path = plotdf_path;
#-------------------------------------------------------------------------------
# Strings
#-------------------------------------------------------------------------------
ts = "$t$ ($\\times T$)"
ns = "Order $N$"
xs = "\\textit{X} [km]"
ys = "\\textit{Y} [km]"
zs = "\\textit{Z} [km]"
#-------------------------------------------------------------------------------
# vH
#-------------------------------------------------------------------------------
# Load data
#-------------------------------------------------------------------------------
dflist = list();
dftype = "eH_Order_"
for(i in dfnumber)
{
dflist[[i]] = read.table(paste0(currentfolder, dftype,dfindex[i],"_Size_", size, ".txt"), header = F)
colnames(dflist[[i]]) = c("t", "H")
dflist[[i]]$order = dfindex[i]
dflist[[i]]$VH = dflist[[i]]$H - mean(dflist[[i]]$H)
}
# Concatenate the results
vH = rbind(dflist[[1]], dflist[[2]])
for(i in 3:length(dfnumber)){vH = rbind(vH, dflist[[i]])}
#Plot
#-------------------------------------------------------------------------------
#Actual plot
pH = fplot(vH, "t", "H", ts, "$\\delta H$", "order", ns, 1)
#Cont scale on the x axis
pH = pH + scale_x_continuous(breaks = seq(0.0, Period, 0.25*Period), labels = c("0.0", "0.25 $T$", "0.5 $T$", "0.75 $T$", "$T$")) #cont scale on the x axis
#Display the plot
pH = pH + legend_inside_theme
pH
#-------------------------------------------------------------------------------
# eI
#-------------------------------------------------------------------------------
# Load data
#-------------------------------------------------------------------------------
dflist = list();
dftype = "eI_Order_"
for(i in dfnumber)
{
dflist[[i]] = read.table(paste0(currentfolder, dftype, dfindex[i],"_Size_", size, ".txt"), header = F)
colnames(dflist[[i]]) = c("x", "y")
dflist[[i]]$order = dfindex[i]
}
# Concatenate the results
eI = rbind(dflist[[1]], dflist[[2]])
for(i in 3:length(dfnumber)){eI = rbind(eI, dflist[[i]])}
#Posprocessing
#-------------------------------------------------------------------------------
eI$EI = eI$y*gammaR;
#Plot
#-------------------------------------------------------------------------------
pI = fplot(eI, "x", "EI", ts, "$E_I$", "order", ns, 1)
#Cont scale on the x axis
pI = pI + scale_x_continuous(breaks = seq(0.0, Period, 0.25*Period),
labels = c("0.0", "0.25 $T$", "0.5 $T$", "0.75 $T$", "$T$")) #cont scale on the x axis
#Logscale on the y axis
pI = pI + scale_y_log10(breaks = 10^(-8:0*2), limits=c(1e-12,1e-2))
#Theme
pI = pI+legend_inside_theme
#Display
pI
#-------------------------------------------------------------------------------
# eO
#-------------------------------------------------------------------------------
# Load data
#-------------------------------------------------------------------------------
dflist = list();
dftype = "eO_Order_"
for(i in dfnumber)
{
dflist[[i]] = read.table(paste0(currentfolder, dftype,dfindex[i],"_Size_", size, ".txt"), header = F)
colnames(dflist[[i]]) = c("x", "y")
dflist[[i]]$order = dfindex[i]
}
# Concatenate the results
eO = rbind(dflist[[1]], dflist[[2]])
for(i in 3:length(dfnumber)){eO = rbind(eO, dflist[[i]])}
#Plot
#-------------------------------------------------------------------------------
pO = fplot(eO, "x", "y", ts, "$E_O$", "order", ns, 1)
#Cont scale on the x axis
pO = pO + scale_x_continuous(breaks = seq(0.0, Period, 0.25*Period), labels = c("0.0", "0.25 $T$", "0.5 $T$", "0.75 $T$", "$T$")) #cont scale on the x axis
#Logscale on the y axis
pO = pO + scale_y_log10(limits=c(1e-10,NaN), breaks = 10^(-8:0*2))
#Theme
pO = pO+legend_inside_theme
#Display
pO
#-------------------------------------------------------------------------------
# XYZ
#-------------------------------------------------------------------------------
# Load data
#-------------------------------------------------------------------------------
dflist = list();
dftype = "XYZ_Order_"
for(i in dfnumber)
{
dflist[[i]] = read.table(paste0(currentfolder, dftype,dfindex[i],"_Size_", size, ".txt"), header = F)
colnames(dflist[[i]]) = c("t", "x", "y", "z")
dflist[[i]]$order = dfindex[i]
}
# Concatenate the results
orbit = rbind(dflist[[1]], dflist[[2]])
for(i in 3:length(dfnumber)){orbit = rbind(orbit, dflist[[i]])}
#-------------------------------------------------------------------------------
# Load data (PM)
#-------------------------------------------------------------------------------
dflist = list();
dftype = "XYZ_PM_Order_"
for(i in dfnumber)
{
dflist[[i]] = read.table(paste0(currentfolder, dftype,dfindex[i],"_Size_", size, ".txt"), header = F)
colnames(dflist[[i]]) = c("t", "x", "y", "z")
dflist[[i]]$order = dfindex[i]
}
# Concatenate the results
orbit_pm = rbind(dflist[[1]], dflist[[2]])
for(i in 3:length(dfnumber)){orbit_pm = rbind(orbit_pm, dflist[[i]])}
#-------------------------------------------------------------------------------
# Post-processing on coordinates and units
#-------------------------------------------------------------------------------
# From NC coordinates to C coordinates
orbit = NCtoC(orbit, gammaR)
orbit = CtoPH(orbit, L)
orbit = NCtoSYS(orbit, gammaR, c1R)
orbit = SYStoPH(orbit, L)
orbit_pm = NCtoC(orbit_pm, gammaR)
orbit_pm = CtoPH(orbit_pm, L)
orbit_pm = NCtoSYS(orbit_pm, gammaR, c1R)
orbit_pm = SYStoPH(orbit_pm, L)
#-------------------------------------------------------------------------------
#Select half period time
#-------------------------------------------------------------------------------
orbit_half = ddply(orbit, ~order, function(x){x[which.min(abs(x$t-0.25*Period)),]})
#-------------------------------------------------------------------------------
#Select when orbital precision is > maxPrec
#-------------------------------------------------------------------------------
eO_prec = ddply(eO, ~order, function(x){x[which.min(abs(x$y-maxPrec)),]}) #time for which eO ~ maxPrec
orbit_prec = ddply(orbit, ~order, function(orb){orb[which.min(abs(orb$t-eO_prec$x[which(eO_prec$order == maxOrder)])),]}) #select order maxOrder
#-------------------------------------------------------------------------------
#Center manifold
#-------------------------------------------------------------------------------
orbitMaxOrder = orbit_pm[which(orbit_pm$order == maxOrder),]
orbitMaxOrder_prec = orbit_prec[which(orbit_prec$order==maxOrder),]
orbitMaxOrder_start = orbitMaxOrder[which(orbitMaxOrder$t == 0.0),]
orbitMaxOrder_end = orbitMaxOrder[which(orbitMaxOrder$t == max(orbitMaxOrder$t)),]
#-------------------------------------------------------------------------------
# Plot
#-------------------------------------------------------------------------------
pxy = fplot_path (orbit, "xPH", "yPH", xs, ys, "order", ns, 1 )#, "order")
#Grey scale if needed
#pxy = pxy + scale_colour_grey(start = 0.9, end = 0.0, name="Order")
#Center manifold plot
pxy = pxy + geom_path(data = orbitMaxOrder, aes(xPH, yPH, color = "PM"), size = linesize[["line"]], linetype = "dashed", colour = "black")
# Starting point
pxy = pxy + geom_point(data = orbitMaxOrder_start, aes(xPH, yPH), size = 4, colour = "black", fill = "white", pch = 21)
# Point at a given precision
#pxy = pxy + geom_point(data = orbitMaxOrder_end, aes(xPH, yPH), size = 4, pch = 22, colour = "black", fill = "white")
#Scaling
pxy = pxy + scale_x_continuous(limits = c(-3.4e5, -3.1e5)) #cont scale on the x axis
pxy = pxy + scale_y_continuous(limits = c(-2.2e4, 2.2e4)) #cont scale on the y axis
#Theme
#pxy = pxy + theme_bw()+custom_theme
#Add an arrow to give the direction of motion along the orbit
ai= 50
pxy = pxy + geom_segment(aes(x = orbit$xPH[ai], y = orbit$yPH[ai], xend = orbit$xPH[ai+1], yend = orbit$yPH[ai+1]),
colour = muted("blue"),
arrow = arrow(length = unit(0.4, "cm"), type = "closed"))
pxy = pxy+legend_inside_theme+theme(legend.position = c(0.28,0.02))
pxy
#Add the Moon
#-------------------
moonR = 1737.10 #Moon's radius
moonPos = gammaR*L #Moon position in km wrt to Li
#pxy = addMoon(pxy, x = moonPos, y = 0, moonR, surfSize = 0.4, cratSize = 0.2)+ coord_fixed()
# Add Li
dfemli = dflibpoint(Li, FWRK)
pxy = pxy + geom_point(data = dfemli, aes(x= x_PH, y = y_PH), size = 4, colour = "black", fill = "black", pch = 21)
pxy = pxy + annotate("text", x = dfemli$x_PH, y = -1.5e3, label = "\\textsc{eml}$_1$", size=10)
pxy
#-------------------------------------------------------------------------------
# Plot (yz)
#-------------------------------------------------------------------------------
pyz = fplot_path (orbit, "yPH", "zPH", ys, zs, "order", ns, 1 )#, "order")
#Grey scale if needed
#pyz = pyz + scale_colour_grey(start = 0.9, end = 0.0, name="Order")
#Center manifold plot
pyz = pyz + geom_path(data = orbitMaxOrder, aes(yPH, zPH, color = "PM"), size = linesize[["line"]], linetype = "dashed", colour = "black")
# Starting point
pyz = pyz + geom_point(data = orbitMaxOrder_start, aes(yPH, zPH), size = 4, colour = "black", fill = "white", pch = 21)
# Point at a given precision
#pyz = pyz + geom_point(data = orbitMaxOrder_prec, aes(yPH, zPH), size = 4, pch = 22, colour = "black", fill = "white")
#Scaling
pyz = pyz + scale_x_continuous(limits = c(-2.2e4, 2.2e4)) #cont scale on the x axis
pyz = pyz + scale_y_continuous(limits = c(-3.3e4, 3.1e4)) #cont scale on the y axis
#Theme
#pyz = pyz + theme_bw()+custom_theme
#Annotation
pyz = pyz + geom_segment(aes(x = orbit$yPH[ai], y = orbit$zPH[ai], xend = orbit$yPH[ai+1], yend = orbit$zPH[ai+1]),
colour = muted("blue"),
arrow = arrow(length = unit(0.4, "cm"), type = "closed"))
pyz = pyz+legend_inside_theme+theme(legend.position = c(0.98,0.55))
# Add Li
pyz = pyz + geom_point(data = dfemli, aes(x= y_PH, y = z_PH), size = 4, colour = "black", fill = "black", pch = 21)
pyz = pyz + annotate("text", x = dfemli$y_PH, y = -4e3, label = "\\textsc{eml}$_1$", size=10)
#Display the plot
pyz
#-------------------------------------------------------------------------------
# Plot (xz)
#-------------------------------------------------------------------------------
pxz = fplot_path (orbit, "xPH", "zPH", xs, zs, "order", ns, 1 )#, "order")
#Grey scale if needed
pxz = pxz + scale_colour_grey(start = 0.9, end = 0.0, name="Order")
#Center manifold plot
pxz = pxz + geom_path(data = orbitMaxOrder, aes(xPH, zPH, color = "PM"), size = linesize[["line"]], linetype = "dashed", colour = "black")
# Starting point
pxz = pxz + geom_point(data = orbitMaxOrder_start, aes(xPH, zPH), size = 4, colour = "black", fill = "white", pch = 21)
# Point at a given precision
#pxz = pxz + geom_point(data = orbitMaxOrder_prec, aes(xPH, zPH), size = 4, pch = 22, colour = "black", fill = "white")
#Scaling
#pxz = pxz + scale_x_continuous(limits = c(-50, 40)) #cont scale on the x axis
#pxz = pxz + scale_y_continuous(limits = c(-3500, 3500)) #cont scale on the y axis
#Theme
#pxz = pxz + theme_bw()+custom_theme
#Annotation
#pxz = pxz + annotate("text", x = -0.3, y = -0.21, label = "CM \n Order maxOrder", parse = F, size = 8, colour = "limegreen", fontface = "bold")
#pxz = pxz + geom_segment(aes(x = -0.25, y = -0.2, xend = -0.19, yend = -0.2), arrow = arrow(length = unit(0.5, "cm")), size = psize)
#Display the plot
pxz
pxz = pxz + geom_segment(aes(x = orbit$xPH[ai], y = orbit$zPH[ai], xend = orbit$xPH[ai+1], yend = orbit$zPH[ai+1]),
colour = muted("blue"),
arrow = arrow(length = unit(0.4, "cm"), type = "closed"))
pxz = pxz+legend_inside_theme
stop()
#-------------------------------------------------------------------------------
#Save in tex file
#-------------------------------------------------------------------------------
ggplot2tikz_phd(pI, xSize, ySize, file = paste0(currentfolder, "R_eI_Size_", sizep, ".tex"))
ggplot2tikz_phd(pO, xSize, ySize, file = paste0(currentfolder, "R_eO_Size_", sizep, ".tex"))
ggplot2tikz_phd(pH, xSize, ySize, file = paste0(currentfolder, "R_eH_Size_", sizep, ".tex"))
ggplot2tikz_phd(pxy, xSize, ySize, file = paste0(currentfolder, "R_XY_Size_", sizep, ".tex"))
ggplot2tikz_phd(pyz, xSize, ySize, file = paste0(currentfolder, "R_YZ_Size_", sizep, ".tex"))
#-------------------------------------------------------------------------------
#Save in pdf file
#-------------------------------------------------------------------------------
# ggsave(pH, file = paste0(currentfolder, "R_eH_Size_", sizep, ".pdf"))
# ggsave(pI, file = paste0(currentfolder, "R_eI_Size_", sizep, ".pdf"))
# ggsave(pO, file = paste0(currentfolder, "R_eO_Size_", sizep, ".pdf"))
# ggsave(pxy, file = paste0(currentfolder, "R_XY_Size_", sizep, ".pdf"))
# ggsave(pyz, file = paste0(currentfolder, "R_YZ_Size_", sizep, ".pdf"))
| /Single_Orbit/Single_Orbit_PhD_EML1_Big_3D.R | no_license | lebihanbastien/RFTDA | R | false | false | 15,130 | r | # Script to plot an orbit along with its associated precisions
# in the parameterization method of the QBFBP/RTBP around L1/L2 of the Earth-Moon system
# Script for the PhD manuscript.
#
# Note that the variations of the energy are no longer computed as an error,
# but as an absolute value.
#
# BLB Summer 2017
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Init
#-------------------------------------------------------------------------------
source("source/init.R")
#-------------------------------------------------------------------------------
# Select Models & libration point
#-------------------------------------------------------------------------------
Li = "L1"
MODEL = "QBCP"
FWRK = "EM"
# If from Server
# currentfolder = paste0(plotfolder(MODEL, FWRK, Li), "Serv/")
# size = "-51.0435_test" #1.5 7.5 15 30 45
# sizep = "51.0435_test"
# If local
currentfolder = paste0(plotfolder(MODEL, FWRK, Li), "orbits/")
size = "2.55_test" #40_test
sizep = size# "1.5_test" #40_test
# Orders
dfindex = c(5,10,15,20)
dfnumber = 1:length(dfindex)
maxOrder = max(dfindex)
#-------------------------------------------------------------------------------
# Parameters
#-------------------------------------------------------------------------------
maxPrec = 1e-6
#-------------------------------------------------------------------------------
#Normalized units (gammaR, c1R)
#-------------------------------------------------------------------------------
muR = muR(FWRK);
gammaR = gamma(Li, FWRK);
c1R = c1(Li, FWRK);
L = Ldist(FWRK)
Period = ifelse(MODEL=="QBCP", SEMperiod(FWRK), 2*pi)
#-------------------------------------------------------------------------------
# Type of plot (can be changed at will)
#-------------------------------------------------------------------------------
if(MODEL == "QBCP")
{
fplot = plotdf_line;
}else
{
fplot = plotdf_line_dashed;
}
fplot_path = plotdf_path;
#-------------------------------------------------------------------------------
# Strings
#-------------------------------------------------------------------------------
ts = "$t$ ($\\times T$)"
ns = "Order $N$"
xs = "\\textit{X} [km]"
ys = "\\textit{Y} [km]"
zs = "\\textit{Z} [km]"
#-------------------------------------------------------------------------------
# vH
#-------------------------------------------------------------------------------
# Load data
#-------------------------------------------------------------------------------
dflist = list();
dftype = "eH_Order_"
for(i in dfnumber)
{
dflist[[i]] = read.table(paste0(currentfolder, dftype,dfindex[i],"_Size_", size, ".txt"), header = F)
colnames(dflist[[i]]) = c("t", "H")
dflist[[i]]$order = dfindex[i]
dflist[[i]]$VH = dflist[[i]]$H - mean(dflist[[i]]$H)
}
# Concatenate the results
vH = rbind(dflist[[1]], dflist[[2]])
for(i in 3:length(dfnumber)){vH = rbind(vH, dflist[[i]])}
#Plot
#-------------------------------------------------------------------------------
#Actual plot
pH = fplot(vH, "t", "H", ts, "$\\delta H$", "order", ns, 1)
#Cont scale on the x axis
pH = pH + scale_x_continuous(breaks = seq(0.0, Period, 0.25*Period), labels = c("0.0", "0.25 $T$", "0.5 $T$", "0.75 $T$", "$T$")) #cont scale on the x axis
#Display the plot
pH = pH + legend_inside_theme
pH
#-------------------------------------------------------------------------------
# eI
#-------------------------------------------------------------------------------
# Load data
#-------------------------------------------------------------------------------
dflist = list();
dftype = "eI_Order_"
for(i in dfnumber)
{
dflist[[i]] = read.table(paste0(currentfolder, dftype, dfindex[i],"_Size_", size, ".txt"), header = F)
colnames(dflist[[i]]) = c("x", "y")
dflist[[i]]$order = dfindex[i]
}
# Concatenate the results
eI = rbind(dflist[[1]], dflist[[2]])
for(i in 3:length(dfnumber)){eI = rbind(eI, dflist[[i]])}
#Posprocessing
#-------------------------------------------------------------------------------
eI$EI = eI$y*gammaR;
#Plot
#-------------------------------------------------------------------------------
pI = fplot(eI, "x", "EI", ts, "$E_I$", "order", ns, 1)
#Cont scale on the x axis
pI = pI + scale_x_continuous(breaks = seq(0.0, Period, 0.25*Period),
labels = c("0.0", "0.25 $T$", "0.5 $T$", "0.75 $T$", "$T$")) #cont scale on the x axis
#Logscale on the y axis
pI = pI + scale_y_log10(breaks = 10^(-8:0*2), limits=c(1e-12,1e-2))
#Theme
pI = pI+legend_inside_theme
#Display
pI
#-------------------------------------------------------------------------------
# eO
#-------------------------------------------------------------------------------
# Load data
#-------------------------------------------------------------------------------
dflist = list();
dftype = "eO_Order_"
for(i in dfnumber)
{
dflist[[i]] = read.table(paste0(currentfolder, dftype,dfindex[i],"_Size_", size, ".txt"), header = F)
colnames(dflist[[i]]) = c("x", "y")
dflist[[i]]$order = dfindex[i]
}
# Concatenate the results
eO = rbind(dflist[[1]], dflist[[2]])
for(i in 3:length(dfnumber)){eO = rbind(eO, dflist[[i]])}
#Plot
#-------------------------------------------------------------------------------
pO = fplot(eO, "x", "y", ts, "$E_O$", "order", ns, 1)
#Cont scale on the x axis
pO = pO + scale_x_continuous(breaks = seq(0.0, Period, 0.25*Period), labels = c("0.0", "0.25 $T$", "0.5 $T$", "0.75 $T$", "$T$")) #cont scale on the x axis
#Logscale on the y axis
pO = pO + scale_y_log10(limits=c(1e-10,NaN), breaks = 10^(-8:0*2))
#Theme
pO = pO+legend_inside_theme
#Display
pO
#-------------------------------------------------------------------------------
# XYZ
#-------------------------------------------------------------------------------
# Load data
#-------------------------------------------------------------------------------
dflist = list();
dftype = "XYZ_Order_"
for(i in dfnumber)
{
dflist[[i]] = read.table(paste0(currentfolder, dftype,dfindex[i],"_Size_", size, ".txt"), header = F)
colnames(dflist[[i]]) = c("t", "x", "y", "z")
dflist[[i]]$order = dfindex[i]
}
# Concatenate the results
orbit = rbind(dflist[[1]], dflist[[2]])
for(i in 3:length(dfnumber)){orbit = rbind(orbit, dflist[[i]])}
#-------------------------------------------------------------------------------
# Load data (PM)
#-------------------------------------------------------------------------------
dflist = list();
dftype = "XYZ_PM_Order_"
for(i in dfnumber)
{
dflist[[i]] = read.table(paste0(currentfolder, dftype,dfindex[i],"_Size_", size, ".txt"), header = F)
colnames(dflist[[i]]) = c("t", "x", "y", "z")
dflist[[i]]$order = dfindex[i]
}
# Concatenate the results
orbit_pm = rbind(dflist[[1]], dflist[[2]])
for(i in 3:length(dfnumber)){orbit_pm = rbind(orbit_pm, dflist[[i]])}
#-------------------------------------------------------------------------------
# Post-processing on coordinates and units
#-------------------------------------------------------------------------------
# From NC coordinates to C coordinates
orbit = NCtoC(orbit, gammaR)
orbit = CtoPH(orbit, L)
orbit = NCtoSYS(orbit, gammaR, c1R)
orbit = SYStoPH(orbit, L)
orbit_pm = NCtoC(orbit_pm, gammaR)
orbit_pm = CtoPH(orbit_pm, L)
orbit_pm = NCtoSYS(orbit_pm, gammaR, c1R)
orbit_pm = SYStoPH(orbit_pm, L)
#-------------------------------------------------------------------------------
#Select half period time
#-------------------------------------------------------------------------------
orbit_half = ddply(orbit, ~order, function(x){x[which.min(abs(x$t-0.25*Period)),]})
#-------------------------------------------------------------------------------
#Select when orbital precision is > maxPrec
#-------------------------------------------------------------------------------
eO_prec = ddply(eO, ~order, function(x){x[which.min(abs(x$y-maxPrec)),]}) #time for which eO ~ maxPrec
orbit_prec = ddply(orbit, ~order, function(orb){orb[which.min(abs(orb$t-eO_prec$x[which(eO_prec$order == maxOrder)])),]}) #select order maxOrder
#-------------------------------------------------------------------------------
#Center manifold
#-------------------------------------------------------------------------------
orbitMaxOrder = orbit_pm[which(orbit_pm$order == maxOrder),]
orbitMaxOrder_prec = orbit_prec[which(orbit_prec$order==maxOrder),]
orbitMaxOrder_start = orbitMaxOrder[which(orbitMaxOrder$t == 0.0),]
orbitMaxOrder_end = orbitMaxOrder[which(orbitMaxOrder$t == max(orbitMaxOrder$t)),]
#-------------------------------------------------------------------------------
# Plot
#-------------------------------------------------------------------------------
pxy = fplot_path (orbit, "xPH", "yPH", xs, ys, "order", ns, 1 )#, "order")
#Grey scale if needed
#pxy = pxy + scale_colour_grey(start = 0.9, end = 0.0, name="Order")
#Center manifold plot
pxy = pxy + geom_path(data = orbitMaxOrder, aes(xPH, yPH, color = "PM"), size = linesize[["line"]], linetype = "dashed", colour = "black")
# Starting point
pxy = pxy + geom_point(data = orbitMaxOrder_start, aes(xPH, yPH), size = 4, colour = "black", fill = "white", pch = 21)
# Point at a given precision
#pxy = pxy + geom_point(data = orbitMaxOrder_end, aes(xPH, yPH), size = 4, pch = 22, colour = "black", fill = "white")
#Scaling
pxy = pxy + scale_x_continuous(limits = c(-3.4e5, -3.1e5)) #cont scale on the x axis
pxy = pxy + scale_y_continuous(limits = c(-2.2e4, 2.2e4)) #cont scale on the y axis
#Theme
#pxy = pxy + theme_bw()+custom_theme
#Add an arrow to give the direction of motion along the orbit
ai= 50
pxy = pxy + geom_segment(aes(x = orbit$xPH[ai], y = orbit$yPH[ai], xend = orbit$xPH[ai+1], yend = orbit$yPH[ai+1]),
colour = muted("blue"),
arrow = arrow(length = unit(0.4, "cm"), type = "closed"))
pxy = pxy+legend_inside_theme+theme(legend.position = c(0.28,0.02))
pxy
#Add the Moon
#-------------------
moonR = 1737.10 #Moon's radius
moonPos = gammaR*L #Moon position in km wrt to Li
#pxy = addMoon(pxy, x = moonPos, y = 0, moonR, surfSize = 0.4, cratSize = 0.2)+ coord_fixed()
# Add Li
dfemli = dflibpoint(Li, FWRK)
pxy = pxy + geom_point(data = dfemli, aes(x= x_PH, y = y_PH), size = 4, colour = "black", fill = "black", pch = 21)
pxy = pxy + annotate("text", x = dfemli$x_PH, y = -1.5e3, label = "\\textsc{eml}$_1$", size=10)
pxy
#-------------------------------------------------------------------------------
# Plot (yz)
#-------------------------------------------------------------------------------
pyz = fplot_path (orbit, "yPH", "zPH", ys, zs, "order", ns, 1 )#, "order")
#Grey scale if needed
#pyz = pyz + scale_colour_grey(start = 0.9, end = 0.0, name="Order")
#Center manifold plot
pyz = pyz + geom_path(data = orbitMaxOrder, aes(yPH, zPH, color = "PM"), size = linesize[["line"]], linetype = "dashed", colour = "black")
# Starting point
pyz = pyz + geom_point(data = orbitMaxOrder_start, aes(yPH, zPH), size = 4, colour = "black", fill = "white", pch = 21)
# Point at a given precision
#pyz = pyz + geom_point(data = orbitMaxOrder_prec, aes(yPH, zPH), size = 4, pch = 22, colour = "black", fill = "white")
#Scaling
pyz = pyz + scale_x_continuous(limits = c(-2.2e4, 2.2e4)) #cont scale on the x axis
pyz = pyz + scale_y_continuous(limits = c(-3.3e4, 3.1e4)) #cont scale on the y axis
#Theme
#pyz = pyz + theme_bw()+custom_theme
#Annotation
pyz = pyz + geom_segment(aes(x = orbit$yPH[ai], y = orbit$zPH[ai], xend = orbit$yPH[ai+1], yend = orbit$zPH[ai+1]),
colour = muted("blue"),
arrow = arrow(length = unit(0.4, "cm"), type = "closed"))
pyz = pyz+legend_inside_theme+theme(legend.position = c(0.98,0.55))
# Add Li
pyz = pyz + geom_point(data = dfemli, aes(x= y_PH, y = z_PH), size = 4, colour = "black", fill = "black", pch = 21)
pyz = pyz + annotate("text", x = dfemli$y_PH, y = -4e3, label = "\\textsc{eml}$_1$", size=10)
#Display the plot
pyz
#-------------------------------------------------------------------------------
# Plot (xz)
#-------------------------------------------------------------------------------
pxz = fplot_path (orbit, "xPH", "zPH", xs, zs, "order", ns, 1 )#, "order")
#Grey scale if needed
pxz = pxz + scale_colour_grey(start = 0.9, end = 0.0, name="Order")
#Center manifold plot
pxz = pxz + geom_path(data = orbitMaxOrder, aes(xPH, zPH, color = "PM"), size = linesize[["line"]], linetype = "dashed", colour = "black")
# Starting point
pxz = pxz + geom_point(data = orbitMaxOrder_start, aes(xPH, zPH), size = 4, colour = "black", fill = "white", pch = 21)
# Point at a given precision
#pxz = pxz + geom_point(data = orbitMaxOrder_prec, aes(xPH, zPH), size = 4, pch = 22, colour = "black", fill = "white")
#Scaling
#pxz = pxz + scale_x_continuous(limits = c(-50, 40)) #cont scale on the x axis
#pxz = pxz + scale_y_continuous(limits = c(-3500, 3500)) #cont scale on the y axis
#Theme
#pxz = pxz + theme_bw()+custom_theme
#Annotation
#pxz = pxz + annotate("text", x = -0.3, y = -0.21, label = "CM \n Order maxOrder", parse = F, size = 8, colour = "limegreen", fontface = "bold")
#pxz = pxz + geom_segment(aes(x = -0.25, y = -0.2, xend = -0.19, yend = -0.2), arrow = arrow(length = unit(0.5, "cm")), size = psize)
#Display the plot
pxz
pxz = pxz + geom_segment(aes(x = orbit$xPH[ai], y = orbit$zPH[ai], xend = orbit$xPH[ai+1], yend = orbit$zPH[ai+1]),
colour = muted("blue"),
arrow = arrow(length = unit(0.4, "cm"), type = "closed"))
pxz = pxz+legend_inside_theme
stop()
#-------------------------------------------------------------------------------
#Save in tex file
#-------------------------------------------------------------------------------
ggplot2tikz_phd(pI, xSize, ySize, file = paste0(currentfolder, "R_eI_Size_", sizep, ".tex"))
ggplot2tikz_phd(pO, xSize, ySize, file = paste0(currentfolder, "R_eO_Size_", sizep, ".tex"))
ggplot2tikz_phd(pH, xSize, ySize, file = paste0(currentfolder, "R_eH_Size_", sizep, ".tex"))
ggplot2tikz_phd(pxy, xSize, ySize, file = paste0(currentfolder, "R_XY_Size_", sizep, ".tex"))
ggplot2tikz_phd(pyz, xSize, ySize, file = paste0(currentfolder, "R_YZ_Size_", sizep, ".tex"))
#-------------------------------------------------------------------------------
#Save in pdf file
#-------------------------------------------------------------------------------
# ggsave(pH, file = paste0(currentfolder, "R_eH_Size_", sizep, ".pdf"))
# ggsave(pI, file = paste0(currentfolder, "R_eI_Size_", sizep, ".pdf"))
# ggsave(pO, file = paste0(currentfolder, "R_eO_Size_", sizep, ".pdf"))
# ggsave(pxy, file = paste0(currentfolder, "R_XY_Size_", sizep, ".pdf"))
# ggsave(pyz, file = paste0(currentfolder, "R_YZ_Size_", sizep, ".pdf"))
|
testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 3.84363065730702e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615827606-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 487 | r | testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 3.84363065730702e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
read_peptide_tsv_MaxQuant_volcano <- function(peptide_file, sample_pattern, min_valid_sample = 2,
intensity_metric = "PSM") {
check_file(peptide_file, "MaxQuant")
peptide_import <- read.csv(peptide_file, sep = "\t", header = T, )
filetype(peptide_import, "Combined", "MaxQuant")
names(peptide_import)[names(peptide_import) == "Intensity"] <- "summed_intensity"
peptides <- peptide_import
peptides$sequence <- peptides$Sequence
ptms <- peptide_import[,grepl(".*site.IDs", names(peptide_import))]
ptm_names <- str_replace_all(string = names(ptms), pattern = "\\.", "")
ptm_names <- str_replace_all(string = ptm_names, pattern = "siteIDs", "")
peptides$PEPTIDE <- peptides$sequence
for (i in 1:nrow(peptides)) {
for (j in 1:ncol(ptms)) {
if (!is.na(ptms[i,j]) && ptms[i,j] != "") {
peptides$PEPTIDE[i] <- paste0(peptides$PEPTIDE[i], ";", ptm_names[j], ":", ptms[i,j])
}
}
}
peptides$protein <- NA
for(i in 1:nrow(peptides)) {
if (peptides$Proteins[i] != "") {
protein <- str_split(peptides$Proteins[i], ";")[[1]]
protein_vec <- rep("", length(protein))
for (j in 1:length(protein)) {
protein_vec[j] <- str_split(protein[j], "\\|")[[1]][2]
}
peptides$protein[i] <- paste(protein_vec, sep = ";", collapse = ";")
} else {
peptides$protein[i] <- "Not Found"
}
}
if(length(names(peptides)[grepl(sample_pattern, names(peptides))])<=0){
stop("Sample Pattern not found in file.")
} else{
if (intensity_metric == "PSM") {
pattern <- paste0("Experiment", ".*", sample_pattern, ".*")
} else if (intensity_metric == "Intensity") {
pattern <- paste0("Intensity", ".*", sample_pattern, ".*")
} else if (intensity_metric == "Area") {
pattern <- paste0("LFQ.intensity", ".*", sample_pattern, ".*")
}
dataframe <- peptides[,grepl(pattern, names(peptides))]
dataframe[dataframe == 0] <- NA
sample_count <- ncol(dataframe)
peptides$count <- NA
for (i in 1:sample_count) {
peptides[[paste0("Volcano_intensity_", i)]] <- NA
}
for (i in 1:nrow(peptides)) {
for (j in 1:sample_count) {
peptides[[paste0("Volcano_intensity_", j)]][i] <- dataframe[i,j]
}
peptides$count[i] <- sum(!is.na(as.numeric(dataframe[i,])))
}
peptides <- peptides[peptides$count > 0,]
return(peptides)
###
PSM_pattern <- paste0("Experiment", ".*", sample_pattern, ".*")
PSM_df <- peptide_import[,grepl(PSM_pattern, names(peptide_import))]
PSM_df[is.na(PSM_df)] <- 0
PSM_vec <- rowSums(as.data.frame(PSM_df))
sample_count <- ncol(as.data.frame(PSM_df))
Intensity_pattern <- paste0("Intensity", ".*", sample_pattern, ".*")
Intensity_df <- peptide_import[,grepl(Intensity_pattern, names(peptide_import))]
Intensity_df[is.na(Intensity_df)] <- 0
Intensity_vec <- rowSums(as.data.frame(Intensity_df))
Area_pattern <- paste0("LFQ.intensity", ".*", sample_pattern, ".*")
Area_df <- peptide_import[,grepl(Area_pattern, names(peptide_import))]
Area_df[is.na(Area_df)] <- 0
Area_vec <- rowSums(as.data.frame(Area_df))
peptides$PSM <- PSM_vec
peptides$Intensity <- Intensity_vec
peptides$Area <- Area_vec
peptides <- peptides[peptides$PSM > 0,]
if (!is.na(sample)) {
peptides$sample <- sample
}
if (!is.na(filter)) {
peptides <- peptides[grepl(filter, peptides$Accession) == F,]
}
return_list <- list(peptides, sample_count)
return(return_list)
}}
read_peptide_tsv_Metamorpheus_volcano <- function(peptide_file, sample_pattern, min_valid_sample = 2,
intensity_metric = "PSM") {
check_file(peptide_file, "Metamorpheus")
peptides <- read.csv(peptide_file, sep = "\t", header = T)
filetype(peptides, "Combined", "Metamorpheus")
if(length(names(peptides)[grepl("Total.Ion.Current", names(peptides))]) >0){
stop("Only quantified peptide results may be used for volcano plot from MetaMorpheus")
} else {
if(length(names(peptides)[grepl(sample_pattern, names(peptides))])<=0){
stop("Sample Pattern not found in file.")
}
else{
names(peptides)[names(peptides) == "Sequence"] <- "PEPTIDE"
peptides$protein <- peptides$Protein.Groups
peptides$sequence <- peptides$Base.Sequence
pattern <- paste0("Intensity_", ".*", sample_pattern, ".*")
dataframe <- peptides[,grepl(pattern, names(peptides))]
dataframe[dataframe == 0] <- NA
sample_count <- ncol(as.data.frame(dataframe))
for (i in 1:sample_count) {
peptides[[paste0("Volcano_intensity_", i)]] <- NA
}
for (i in 1:nrow(peptides)) {
for (j in 1:sample_count) {
peptides[[paste0("Volcano_intensity_", j)]][i] <- dataframe[i,j]
}
}
return(peptides)
}}
}
read_peptide_tsv_MSfragger_volcano <- function(peptide_file, sample_pattern, min_valid_sample = 2,
intensity_metric = "PSM") {
check_file(peptide_file, "MSfragger")
peptide_import <- read.csv(peptide_file, sep = "\t", header = T, )
filetype(peptide_import, "Combined", "MSfragger")
peptides <- peptide_import
names(peptides)[names(peptides)== "Sequence" | names(peptides) == "Peptide.Sequence"] <- "sequence"
peptides$PEPTIDE <- peptides$sequence
peptides$protein <- NA
for(i in 1:nrow(peptides)) {
if (peptides$Mapped.Proteins[i] != "") {
protein <- str_split(peptides$Mapped.Proteins[i], ",")[[1]]
protein_vec <- rep("", length(protein))
for (j in 1:length(protein)) {
protein_vec[j] <- str_split(protein[j], "\\|")[[1]][2]
}
proteins_additional <- paste(protein_vec, sep = ";", collapse = ";")
peptides$protein[i] <- paste0(peptides$Protein.ID[i], ";", proteins_additional)
} else {
peptides$protein[i] <- peptides$Protein.ID[i]
}
}
if(length(names(peptides)[grepl(sample_pattern, names(peptides))])<=0){
stop("Sample Pattern not found in file.")
} else {
if (intensity_metric == "PSM") {
pattern <- paste0(".*", sample_pattern, ".*", "Spectral\\.Count")
} else if (intensity_metric == "Intensity") {
pattern <- paste0(".*", sample_pattern, ".*", "Intensity")
} else if (intensity_metric == "Area") {
pattern <- paste0(".*", sample_pattern, ".*", "MaxLFQ.Intensity")
}
dataframe <- peptide_import[,grepl(pattern, names(peptide_import))]
dataframe[dataframe == 0] <- NA
sample_count <- ncol(as.data.frame(dataframe))
for (i in 1:sample_count) {
peptides[[paste0("Volcano_intensity_", i)]] <- NA
}
for (i in 1:nrow(peptides)) {
for (j in 1:sample_count) {
peptides[[paste0("Volcano_intensity_", j)]][i] <- dataframe[i,j]
}
}
return(peptides)
}
}
read_peptide_csv_PEAKS_volcano <- function(peptide_file, sample_pattern, min_valid_sample = 2,
intensity_metric = "PSM") {
check_file(peptide_file, "PEAKS")
peptide_import <- read.csv(peptide_file)
filetype(peptide_import, "Combined", "PEAKS")
names(peptide_import)[names(peptide_import) == "X.Spec"] <- "total_spectra"
names(peptide_import)[names(peptide_import) == "Avg..Area"] <- "Average_LFQ_value"
if(length(names(peptide_import)[grepl("Group.Profile..Ratio.", names(peptide_import))])>0){
start <- which(grepl(pattern = "Sample.Profile", x = names(peptide_import)))
end <- which(grepl(pattern = "Group.Profile", x = names(peptide_import)))
for (i in (start+1):(end-1)) {
names(peptide_import)[i] <- paste0("Average_", i)
}
}
peptides <- peptide_import
peptides[peptides == 0] <- NA
peptides$sequence <- str_remove_all(peptides$Peptide, "[a-z1-9()+-:.]")
peptides$PEPTIDE <- peptides$Peptide
peptides$protein <- NA
for(i in 1:nrow(peptides)) {
protein <- str_split(peptides$Accession[i], ";")[[1]]
protein_vec <- rep("", length(protein))
for (j in 1:length(protein)) {
protein_vec[j] <- str_split(protein[j], "\\|")[[1]][1]
}
peptides$protein[i] <- paste(protein_vec, sep = ";", collapse = ";")
}
if(length(names(peptides)[grepl(sample_pattern, names(peptides))])<=0){
stop("Sample Pattern not found in file.")
}
else {
if (intensity_metric == "PSM") {
pattern <- paste0("X\\.Spec", ".*", sample_pattern, ".*")
} else if (intensity_metric == "Intensity") {
pattern <- paste0("Intensity", ".*", sample_pattern, ".*")
} else if (intensity_metric == "Area") {
pattern <- paste0("Area", ".*", sample_pattern, ".*")
}
dataframe <- peptide_import[,grepl(pattern, names(peptide_import))]
dataframe[dataframe == 0] <- NA
if (intensity_metric == "PSM") {
dataframe[dataframe == 0] <- NA
}
sample_count <- ncol(as.data.frame(dataframe))
for (i in 1:sample_count) {
peptides[[paste0("Volcano_intensity_", i)]] <- NA
}
for (i in 1:nrow(peptides)) {
for (j in 1:sample_count) {
peptides[[paste0("Volcano_intensity_", j)]][i] <- dataframe[i,j]
}
}
return(peptides)
}}
read_peptide_csv_generic_volcano <- function(peptide_file, sample_pattern, min_valid_sample = 2,
intensity_metric = "PSM") {
#check_file(peptide_file, "PEAKS")
peptide_import <- read.csv(peptide_file)
#filetype(peptide_import, "Combined", "PEAKS")
peptides <- peptide_import
peptides$sequence <- str_remove_all(peptides$Peptide, "[a-z1-9()+-:.]")
peptides$PEPTIDE <- peptides$Peptide
if (length(names(peptides)[grepl("protein", names(peptides))]) > 0) {
peptides$protein <- NA
}
if(length(names(peptides)[grepl(sample_pattern, names(peptides))])<=0){
stop("Sample Pattern not found in file.")
}
else {
if (intensity_metric == "PSM") {
pattern <- paste0(".*", sample_pattern, ".*", ".PSM")
} else if (intensity_metric == "Intensity") {
pattern <- paste0(".*",sample_pattern, ".*", ".Intensity")
} else if (intensity_metric == "Area") {
pattern <- paste0(".*", sample_pattern, ".*", ".Area")
}
dataframe <- peptide_import[,grepl(pattern, names(peptide_import))]
if (intensity_metric == "PSM") {
dataframe[dataframe == 0] <- NA
}
dataframe[dataframe == 0] <- NA
sample_count <- ncol(as.data.frame(dataframe))
for (i in 1:sample_count) {
peptides[[paste0("Volcano_intensity_", i)]] <- NA
}
for (i in 1:nrow(peptides)) {
for (j in 1:sample_count) {
peptides[[paste0("Volcano_intensity_", j)]][i] <- dataframe[i,j]
}
}
return(peptides)
}}
combine_two_volcano_dfs <- function(df_1, df_2, min_valid_sample = 2, fdr = 0.05,
fold_change_cutoff_plot = 1, fold_change_cutoff_sig = 5,
equal_variance_bool = T, remove_na = T, set_na = 0) {
print(paste0("rows in df_1: " ,nrow(df_1)))
print(paste0("rows in df_2: " ,nrow(df_2)))
print(names(df_1))
print(head(df_1$PEPTIDE))
print(head(df_1$sequence))
print(head(df_1$protien))
combine_df <- full_join(df_1, df_2, by = c("PEPTIDE", "sequence", "protein"), multiple = "any")
print(paste0("rows in combined: " ,nrow(combine_df)))
print(names(combine_df))
volcano_df1 <- combine_df[,grepl("Volcano_intensity_.*\\.x", names(combine_df))]
volcano_df2 <- combine_df[,grepl("Volcano_intensity_.*\\.y", names(combine_df))]
combine_df$count.x <- NA
combine_df$count.y <- NA
combine_df$valid.x <- F
combine_df$valid.y <- F
combine_df$fold_change_xy <- NA
combine_df$fold_change_category <- "Normal"
combine_df$p_val <- NA
combine_df$avg.x <- NA
combine_df$avg.y <- NA
combine_df$l2fc_xy <- NA
combine_df$neg10logp <- NA
combine_df$p_val_error <- F
for (i in 1:nrow(combine_df)) {
combine_df$count.x[i] <- sum(!is.na(as.numeric(volcano_df1[i,])))
combine_df$count.y[i] <- sum(!is.na(as.numeric(volcano_df2[i,])))
}
combine_df$valid.x[combine_df$count.x >= min_valid_sample] <- T
combine_df$valid.y[combine_df$count.y >= min_valid_sample] <- T
combine_df$fold_change_category[combine_df$valid.x == F & combine_df$valid.y == F] <- "Invalid"
combine_df$fold_change_category[combine_df$valid.x == T & combine_df$count.y == 0] <- "Negative_Infinite"
combine_df$fold_change_category[combine_df$count.x == 0 & combine_df$valid.y == T] <- "Infinite"
combine_df$fold_change_category[(combine_df$valid.x == T & combine_df$valid.y == F & combine_df$count.y > 0)] <- "Compromised.x"
combine_df$fold_change_category[(combine_df$valid.y == T & combine_df$valid.x == F & combine_df$count.x > 0)] <- "Compromised.y"
for (i in 1:nrow(combine_df)) {
if (combine_df$fold_change_category[i] == "Normal" || combine_df$fold_change_category[i] == "Compromixed.x" || combine_df$fold_change_category[i] == "Compromised.y") {
if (remove_na == T) {
combine_df$avg.x[i] <- mean(as.numeric(volcano_df1[i,]), na.rm = T)
combine_df$avg.y[i] <- mean(as.numeric(volcano_df2[i,]), na.rm = T)
tryCatch({
combine_df$p_val[i] <- t.test(as.numeric(volcano_df1[i,]),
as.numeric(volcano_df2[i,]),
var.equal = equal_variance_bool)[[3]]
}, error = function(e){
combine_df$p_val[i] <- NA
combine_df$p_val_error[i] <- T})
} else {
volcano_df1[is.na(volcano_df1)] <- set_na
volcano_df2[is.na(volcano_df2)] <- set_na
combine_df$avg.x[i] <- mean(as.numeric(volcano_df1[i,]), na.rm = F)
combine_df$avg.y[i] <- mean(as.numeric(volcano_df2[i,]), na.rm = F)
combine_df$p_val[i] <- t.test(as.numeric(volcano_df1[i,]),
as.numeric(volcano_df2[i,]),
var.equal = equal_variance_bool)[[3]]
}
combine_df$fold_change_xy[i] <- combine_df$avg.y[i] / combine_df$avg.x[i]
combine_df$l2fc_xy[i] <- log(combine_df$fold_change_xy[i], base = 2)
combine_df$neg10logp[i] <- -log(combine_df$p_val[i], base = 10)
}
}
return(combine_df)
}
create_volcano_plot <- function(df, fdr = 0.05,
fold_change_cutoff_plot = 1, fold_change_cutoff_sig = 5,
equal_variance_bool = T, intensity_metric = "PSM",
sample1 = "Sample_1", sample2 = "Sample_2", BH_correction = F,
protein_of_interest = NULL, display_comp_vals = T, display_infinites = T) {
minx <- min(df$l2fc_xy, na.rm = T)
maxx <- max(df$l2fc_xy, na.rm = T)
maxabs <- max(c(abs(minx), abs(maxx)))
minx <- -maxabs
maxx <- maxabs
maxy <- max(df$neg10logp, na.rm = T)
rangex <- maxx - minx
if (display_comp_vals == F) {
df <- df[df$fold_change_category != "Compromised.x",]
df <- df[df$fold_change_category != "Compromised.y",]
}
if (BH_correction == T) {
df <- df[order(df$p_val),]
BH_df <- df[!is.na(df$p_val),]
df$critical_val_BH <- NA
for (i in 1:nrow(BH_df)) {
df$critical_val_BH[i] <- (i / nrow(BH_df)) * fdr
}
df$BH_cutoff <- df$critical - df$p_val
sigdf <- df[df$BH_cutoff > 0,]
sigdf <- sigdf[!is.na(sigdf$BH_cutoff),]
if (nrow(sigdf) == 0) {
y_cutoff <- maxy * 1.1
} else {
new_p_val <- max(sigdf$p_val, na.rm = T)
y_cutoff <- -log(new_p_val, base = 10)
}
} else {
y_cutoff <- -log(fdr, base = 10)
}
df$color <- "Not_Significant"
df$color[df$l2fc_xy > fold_change_cutoff_plot & df$neg10logp > y_cutoff] <- "Significant"
df$color[df$l2fc_xy < -fold_change_cutoff_plot & df$neg10logp > y_cutoff] <- "Significant"
if (!is.null(protein_of_interest)) {
df$color[grepl(protein_of_interest, df$protein.x)] <- "Prot_of_interest"
}
if (length(unique(df$color)) == 1) {
color_list <- "grey65"
} else if (length(unique(df$color)) == 2) {
color_list <- c("grey65", "black")
} else {
color_list <- c("grey65", "green", "black")
}
minx <- min(df$l2fc_xy, na.rm = T)
maxx <- max(df$l2fc_xy, na.rm = T)
maxy <- max(df$neg10logp, na.rm = T)
minx <- -maxabs
maxx <- maxabs
rangex <- maxx - minx
pos_range <- maxx - fold_change_cutoff_plot
neg_range <- minx + fold_change_cutoff_plot
y_range <- maxy - y_cutoff
infinite_x <- maxx - (0.1*maxx)
infinite_y <- maxy - (0.2*maxy)
neg_infinite_x <- minx + (0.1*abs(minx))
infinite <- df[df$fold_change_category == "Infinite",]
neg_infinite <- df[df$fold_change_category == "Negative_Infinite",]
infinite$x <- runif(nrow(infinite), (fold_change_cutoff_plot + (pos_range/2)*1.2), maxx)
infinite$y <- runif(nrow(infinite), (y_cutoff + (y_range/2)*1.4), maxy)
neg_infinite$x <- -1 * (runif(nrow(neg_infinite), (fold_change_cutoff_plot + (abs(neg_range)/2)*1.2), abs(minx)))
neg_infinite$y <- runif(nrow(neg_infinite), (y_cutoff + (y_range/2)*1.4), maxy)
if (is.null(protein_of_interest)) {
plot_title <- paste0("Peptide Volcano Plot: ", intensity_metric)
} else {
plot_title <- paste0("Peptide Volcano Plot: ", intensity_metric, " (", protein_of_interest, ")")
}
plot <- ggplot() +
geom_vline(xintercept = fold_change_cutoff_plot, linetype = 2) +
geom_vline(xintercept = -fold_change_cutoff_plot, linetype = 2) +
geom_hline(yintercept = y_cutoff, linetype = 2) +
geom_point(data = df, aes(x = l2fc_xy, y = neg10logp,
PEPTIDE = PEPTIDE,
sequence = sequence,
protein = protein,
colour = color,
p_val = p_val,
fold_change_category = fold_change_category)) +
theme_bw(base_size = 15) +
theme(panel.grid = element_blank(), legend.position = "none") +
labs(x = (paste("Log2 fold-change: (", sample2, "/", sample1, ")",sep="")),
y = "-Log10 (p-value)",
title = plot_title) +
scale_x_continuous(breaks = round(minx):round(maxx), limits = c(minx*1.2, maxx*1.2)) +
scale_y_continuous(breaks = 0:round(max(df$neg10logp, na.rm = T)))+
scale_color_manual(values = color_list)
if (display_infinites) {
rectangle_pos_min_x <- fold_change_cutoff_plot + (pos_range/2)*1.2
rectangle_min_y <- y_cutoff + (y_range/2)*1.4
rectangle_neg_min_x <- (fold_change_cutoff_plot + (abs(neg_range)/2)*1.2) * (-1)
plot <- plot +
geom_rect(aes(xmin = rectangle_pos_min_x - (rectangle_pos_min_x*0.05), xmax = maxx + (maxx*0.05),
ymin = rectangle_min_y - (rectangle_min_y*0.05), ymax = maxy + (maxy*0.05)), alpha = 1,
color = "black", fill = NA, alpha = 0, linetype = 5) +
geom_rect(aes(xmin = minx + (minx*0.05), xmax = rectangle_neg_min_x - (rectangle_neg_min_x*0.05),
ymin = rectangle_min_y - (rectangle_min_y*0.05), ymax = maxy + (maxy*0.05)), alpha = 1,
color = "black", fill = NA, alpha = 0, linetype = 5) +
geom_point(data = infinite,
aes(x = x, y = y,
PEPTIDE = PEPTIDE,
sequence = sequence,
protein = protein,
colour = color,
fold_change_category = fold_change_category)) +
geom_point(data = neg_infinite,
aes(x = x, y = y,
PEPTIDE = PEPTIDE,
sequence = sequence,
protein = protein,
colour = color,
fold_change_category = fold_change_category))
}
return(list(plot, df))
}
create_volcano_plotly <- function(plot) {
plotly <- ggplotly(plot)
return(plotly)
}
| /www/Volcano_plot_functions.R | permissive | Champion-Lab/PrIntMap-R | R | false | false | 20,186 | r | read_peptide_tsv_MaxQuant_volcano <- function(peptide_file, sample_pattern, min_valid_sample = 2,
intensity_metric = "PSM") {
check_file(peptide_file, "MaxQuant")
peptide_import <- read.csv(peptide_file, sep = "\t", header = T, )
filetype(peptide_import, "Combined", "MaxQuant")
names(peptide_import)[names(peptide_import) == "Intensity"] <- "summed_intensity"
peptides <- peptide_import
peptides$sequence <- peptides$Sequence
ptms <- peptide_import[,grepl(".*site.IDs", names(peptide_import))]
ptm_names <- str_replace_all(string = names(ptms), pattern = "\\.", "")
ptm_names <- str_replace_all(string = ptm_names, pattern = "siteIDs", "")
peptides$PEPTIDE <- peptides$sequence
for (i in 1:nrow(peptides)) {
for (j in 1:ncol(ptms)) {
if (!is.na(ptms[i,j]) && ptms[i,j] != "") {
peptides$PEPTIDE[i] <- paste0(peptides$PEPTIDE[i], ";", ptm_names[j], ":", ptms[i,j])
}
}
}
peptides$protein <- NA
for(i in 1:nrow(peptides)) {
if (peptides$Proteins[i] != "") {
protein <- str_split(peptides$Proteins[i], ";")[[1]]
protein_vec <- rep("", length(protein))
for (j in 1:length(protein)) {
protein_vec[j] <- str_split(protein[j], "\\|")[[1]][2]
}
peptides$protein[i] <- paste(protein_vec, sep = ";", collapse = ";")
} else {
peptides$protein[i] <- "Not Found"
}
}
if(length(names(peptides)[grepl(sample_pattern, names(peptides))])<=0){
stop("Sample Pattern not found in file.")
} else{
if (intensity_metric == "PSM") {
pattern <- paste0("Experiment", ".*", sample_pattern, ".*")
} else if (intensity_metric == "Intensity") {
pattern <- paste0("Intensity", ".*", sample_pattern, ".*")
} else if (intensity_metric == "Area") {
pattern <- paste0("LFQ.intensity", ".*", sample_pattern, ".*")
}
dataframe <- peptides[,grepl(pattern, names(peptides))]
dataframe[dataframe == 0] <- NA
sample_count <- ncol(dataframe)
peptides$count <- NA
for (i in 1:sample_count) {
peptides[[paste0("Volcano_intensity_", i)]] <- NA
}
for (i in 1:nrow(peptides)) {
for (j in 1:sample_count) {
peptides[[paste0("Volcano_intensity_", j)]][i] <- dataframe[i,j]
}
peptides$count[i] <- sum(!is.na(as.numeric(dataframe[i,])))
}
peptides <- peptides[peptides$count > 0,]
return(peptides)
###
PSM_pattern <- paste0("Experiment", ".*", sample_pattern, ".*")
PSM_df <- peptide_import[,grepl(PSM_pattern, names(peptide_import))]
PSM_df[is.na(PSM_df)] <- 0
PSM_vec <- rowSums(as.data.frame(PSM_df))
sample_count <- ncol(as.data.frame(PSM_df))
Intensity_pattern <- paste0("Intensity", ".*", sample_pattern, ".*")
Intensity_df <- peptide_import[,grepl(Intensity_pattern, names(peptide_import))]
Intensity_df[is.na(Intensity_df)] <- 0
Intensity_vec <- rowSums(as.data.frame(Intensity_df))
Area_pattern <- paste0("LFQ.intensity", ".*", sample_pattern, ".*")
Area_df <- peptide_import[,grepl(Area_pattern, names(peptide_import))]
Area_df[is.na(Area_df)] <- 0
Area_vec <- rowSums(as.data.frame(Area_df))
peptides$PSM <- PSM_vec
peptides$Intensity <- Intensity_vec
peptides$Area <- Area_vec
peptides <- peptides[peptides$PSM > 0,]
if (!is.na(sample)) {
peptides$sample <- sample
}
if (!is.na(filter)) {
peptides <- peptides[grepl(filter, peptides$Accession) == F,]
}
return_list <- list(peptides, sample_count)
return(return_list)
}}
read_peptide_tsv_Metamorpheus_volcano <- function(peptide_file, sample_pattern, min_valid_sample = 2,
intensity_metric = "PSM") {
check_file(peptide_file, "Metamorpheus")
peptides <- read.csv(peptide_file, sep = "\t", header = T)
filetype(peptides, "Combined", "Metamorpheus")
if(length(names(peptides)[grepl("Total.Ion.Current", names(peptides))]) >0){
stop("Only quantified peptide results may be used for volcano plot from MetaMorpheus")
} else {
if(length(names(peptides)[grepl(sample_pattern, names(peptides))])<=0){
stop("Sample Pattern not found in file.")
}
else{
names(peptides)[names(peptides) == "Sequence"] <- "PEPTIDE"
peptides$protein <- peptides$Protein.Groups
peptides$sequence <- peptides$Base.Sequence
pattern <- paste0("Intensity_", ".*", sample_pattern, ".*")
dataframe <- peptides[,grepl(pattern, names(peptides))]
dataframe[dataframe == 0] <- NA
sample_count <- ncol(as.data.frame(dataframe))
for (i in 1:sample_count) {
peptides[[paste0("Volcano_intensity_", i)]] <- NA
}
for (i in 1:nrow(peptides)) {
for (j in 1:sample_count) {
peptides[[paste0("Volcano_intensity_", j)]][i] <- dataframe[i,j]
}
}
return(peptides)
}}
}
read_peptide_tsv_MSfragger_volcano <- function(peptide_file, sample_pattern, min_valid_sample = 2,
intensity_metric = "PSM") {
check_file(peptide_file, "MSfragger")
peptide_import <- read.csv(peptide_file, sep = "\t", header = T, )
filetype(peptide_import, "Combined", "MSfragger")
peptides <- peptide_import
names(peptides)[names(peptides)== "Sequence" | names(peptides) == "Peptide.Sequence"] <- "sequence"
peptides$PEPTIDE <- peptides$sequence
peptides$protein <- NA
for(i in 1:nrow(peptides)) {
if (peptides$Mapped.Proteins[i] != "") {
protein <- str_split(peptides$Mapped.Proteins[i], ",")[[1]]
protein_vec <- rep("", length(protein))
for (j in 1:length(protein)) {
protein_vec[j] <- str_split(protein[j], "\\|")[[1]][2]
}
proteins_additional <- paste(protein_vec, sep = ";", collapse = ";")
peptides$protein[i] <- paste0(peptides$Protein.ID[i], ";", proteins_additional)
} else {
peptides$protein[i] <- peptides$Protein.ID[i]
}
}
if(length(names(peptides)[grepl(sample_pattern, names(peptides))])<=0){
stop("Sample Pattern not found in file.")
} else {
if (intensity_metric == "PSM") {
pattern <- paste0(".*", sample_pattern, ".*", "Spectral\\.Count")
} else if (intensity_metric == "Intensity") {
pattern <- paste0(".*", sample_pattern, ".*", "Intensity")
} else if (intensity_metric == "Area") {
pattern <- paste0(".*", sample_pattern, ".*", "MaxLFQ.Intensity")
}
dataframe <- peptide_import[,grepl(pattern, names(peptide_import))]
dataframe[dataframe == 0] <- NA
sample_count <- ncol(as.data.frame(dataframe))
for (i in 1:sample_count) {
peptides[[paste0("Volcano_intensity_", i)]] <- NA
}
for (i in 1:nrow(peptides)) {
for (j in 1:sample_count) {
peptides[[paste0("Volcano_intensity_", j)]][i] <- dataframe[i,j]
}
}
return(peptides)
}
}
read_peptide_csv_PEAKS_volcano <- function(peptide_file, sample_pattern, min_valid_sample = 2,
intensity_metric = "PSM") {
check_file(peptide_file, "PEAKS")
peptide_import <- read.csv(peptide_file)
filetype(peptide_import, "Combined", "PEAKS")
names(peptide_import)[names(peptide_import) == "X.Spec"] <- "total_spectra"
names(peptide_import)[names(peptide_import) == "Avg..Area"] <- "Average_LFQ_value"
if(length(names(peptide_import)[grepl("Group.Profile..Ratio.", names(peptide_import))])>0){
start <- which(grepl(pattern = "Sample.Profile", x = names(peptide_import)))
end <- which(grepl(pattern = "Group.Profile", x = names(peptide_import)))
for (i in (start+1):(end-1)) {
names(peptide_import)[i] <- paste0("Average_", i)
}
}
peptides <- peptide_import
peptides[peptides == 0] <- NA
peptides$sequence <- str_remove_all(peptides$Peptide, "[a-z1-9()+-:.]")
peptides$PEPTIDE <- peptides$Peptide
peptides$protein <- NA
for(i in 1:nrow(peptides)) {
protein <- str_split(peptides$Accession[i], ";")[[1]]
protein_vec <- rep("", length(protein))
for (j in 1:length(protein)) {
protein_vec[j] <- str_split(protein[j], "\\|")[[1]][1]
}
peptides$protein[i] <- paste(protein_vec, sep = ";", collapse = ";")
}
if(length(names(peptides)[grepl(sample_pattern, names(peptides))])<=0){
stop("Sample Pattern not found in file.")
}
else {
if (intensity_metric == "PSM") {
pattern <- paste0("X\\.Spec", ".*", sample_pattern, ".*")
} else if (intensity_metric == "Intensity") {
pattern <- paste0("Intensity", ".*", sample_pattern, ".*")
} else if (intensity_metric == "Area") {
pattern <- paste0("Area", ".*", sample_pattern, ".*")
}
dataframe <- peptide_import[,grepl(pattern, names(peptide_import))]
dataframe[dataframe == 0] <- NA
if (intensity_metric == "PSM") {
dataframe[dataframe == 0] <- NA
}
sample_count <- ncol(as.data.frame(dataframe))
for (i in 1:sample_count) {
peptides[[paste0("Volcano_intensity_", i)]] <- NA
}
for (i in 1:nrow(peptides)) {
for (j in 1:sample_count) {
peptides[[paste0("Volcano_intensity_", j)]][i] <- dataframe[i,j]
}
}
return(peptides)
}}
read_peptide_csv_generic_volcano <- function(peptide_file, sample_pattern, min_valid_sample = 2,
intensity_metric = "PSM") {
#check_file(peptide_file, "PEAKS")
peptide_import <- read.csv(peptide_file)
#filetype(peptide_import, "Combined", "PEAKS")
peptides <- peptide_import
peptides$sequence <- str_remove_all(peptides$Peptide, "[a-z1-9()+-:.]")
peptides$PEPTIDE <- peptides$Peptide
if (length(names(peptides)[grepl("protein", names(peptides))]) > 0) {
peptides$protein <- NA
}
if(length(names(peptides)[grepl(sample_pattern, names(peptides))])<=0){
stop("Sample Pattern not found in file.")
}
else {
if (intensity_metric == "PSM") {
pattern <- paste0(".*", sample_pattern, ".*", ".PSM")
} else if (intensity_metric == "Intensity") {
pattern <- paste0(".*",sample_pattern, ".*", ".Intensity")
} else if (intensity_metric == "Area") {
pattern <- paste0(".*", sample_pattern, ".*", ".Area")
}
dataframe <- peptide_import[,grepl(pattern, names(peptide_import))]
if (intensity_metric == "PSM") {
dataframe[dataframe == 0] <- NA
}
dataframe[dataframe == 0] <- NA
sample_count <- ncol(as.data.frame(dataframe))
for (i in 1:sample_count) {
peptides[[paste0("Volcano_intensity_", i)]] <- NA
}
for (i in 1:nrow(peptides)) {
for (j in 1:sample_count) {
peptides[[paste0("Volcano_intensity_", j)]][i] <- dataframe[i,j]
}
}
return(peptides)
}}
combine_two_volcano_dfs <- function(df_1, df_2, min_valid_sample = 2, fdr = 0.05,
fold_change_cutoff_plot = 1, fold_change_cutoff_sig = 5,
equal_variance_bool = T, remove_na = T, set_na = 0) {
print(paste0("rows in df_1: " ,nrow(df_1)))
print(paste0("rows in df_2: " ,nrow(df_2)))
print(names(df_1))
print(head(df_1$PEPTIDE))
print(head(df_1$sequence))
print(head(df_1$protien))
combine_df <- full_join(df_1, df_2, by = c("PEPTIDE", "sequence", "protein"), multiple = "any")
print(paste0("rows in combined: " ,nrow(combine_df)))
print(names(combine_df))
volcano_df1 <- combine_df[,grepl("Volcano_intensity_.*\\.x", names(combine_df))]
volcano_df2 <- combine_df[,grepl("Volcano_intensity_.*\\.y", names(combine_df))]
combine_df$count.x <- NA
combine_df$count.y <- NA
combine_df$valid.x <- F
combine_df$valid.y <- F
combine_df$fold_change_xy <- NA
combine_df$fold_change_category <- "Normal"
combine_df$p_val <- NA
combine_df$avg.x <- NA
combine_df$avg.y <- NA
combine_df$l2fc_xy <- NA
combine_df$neg10logp <- NA
combine_df$p_val_error <- F
for (i in 1:nrow(combine_df)) {
combine_df$count.x[i] <- sum(!is.na(as.numeric(volcano_df1[i,])))
combine_df$count.y[i] <- sum(!is.na(as.numeric(volcano_df2[i,])))
}
combine_df$valid.x[combine_df$count.x >= min_valid_sample] <- T
combine_df$valid.y[combine_df$count.y >= min_valid_sample] <- T
combine_df$fold_change_category[combine_df$valid.x == F & combine_df$valid.y == F] <- "Invalid"
combine_df$fold_change_category[combine_df$valid.x == T & combine_df$count.y == 0] <- "Negative_Infinite"
combine_df$fold_change_category[combine_df$count.x == 0 & combine_df$valid.y == T] <- "Infinite"
combine_df$fold_change_category[(combine_df$valid.x == T & combine_df$valid.y == F & combine_df$count.y > 0)] <- "Compromised.x"
combine_df$fold_change_category[(combine_df$valid.y == T & combine_df$valid.x == F & combine_df$count.x > 0)] <- "Compromised.y"
for (i in 1:nrow(combine_df)) {
if (combine_df$fold_change_category[i] == "Normal" || combine_df$fold_change_category[i] == "Compromixed.x" || combine_df$fold_change_category[i] == "Compromised.y") {
if (remove_na == T) {
combine_df$avg.x[i] <- mean(as.numeric(volcano_df1[i,]), na.rm = T)
combine_df$avg.y[i] <- mean(as.numeric(volcano_df2[i,]), na.rm = T)
tryCatch({
combine_df$p_val[i] <- t.test(as.numeric(volcano_df1[i,]),
as.numeric(volcano_df2[i,]),
var.equal = equal_variance_bool)[[3]]
}, error = function(e){
combine_df$p_val[i] <- NA
combine_df$p_val_error[i] <- T})
} else {
volcano_df1[is.na(volcano_df1)] <- set_na
volcano_df2[is.na(volcano_df2)] <- set_na
combine_df$avg.x[i] <- mean(as.numeric(volcano_df1[i,]), na.rm = F)
combine_df$avg.y[i] <- mean(as.numeric(volcano_df2[i,]), na.rm = F)
combine_df$p_val[i] <- t.test(as.numeric(volcano_df1[i,]),
as.numeric(volcano_df2[i,]),
var.equal = equal_variance_bool)[[3]]
}
combine_df$fold_change_xy[i] <- combine_df$avg.y[i] / combine_df$avg.x[i]
combine_df$l2fc_xy[i] <- log(combine_df$fold_change_xy[i], base = 2)
combine_df$neg10logp[i] <- -log(combine_df$p_val[i], base = 10)
}
}
return(combine_df)
}
create_volcano_plot <- function(df, fdr = 0.05,
fold_change_cutoff_plot = 1, fold_change_cutoff_sig = 5,
equal_variance_bool = T, intensity_metric = "PSM",
sample1 = "Sample_1", sample2 = "Sample_2", BH_correction = F,
protein_of_interest = NULL, display_comp_vals = T, display_infinites = T) {
minx <- min(df$l2fc_xy, na.rm = T)
maxx <- max(df$l2fc_xy, na.rm = T)
maxabs <- max(c(abs(minx), abs(maxx)))
minx <- -maxabs
maxx <- maxabs
maxy <- max(df$neg10logp, na.rm = T)
rangex <- maxx - minx
if (display_comp_vals == F) {
df <- df[df$fold_change_category != "Compromised.x",]
df <- df[df$fold_change_category != "Compromised.y",]
}
if (BH_correction == T) {
df <- df[order(df$p_val),]
BH_df <- df[!is.na(df$p_val),]
df$critical_val_BH <- NA
for (i in 1:nrow(BH_df)) {
df$critical_val_BH[i] <- (i / nrow(BH_df)) * fdr
}
df$BH_cutoff <- df$critical - df$p_val
sigdf <- df[df$BH_cutoff > 0,]
sigdf <- sigdf[!is.na(sigdf$BH_cutoff),]
if (nrow(sigdf) == 0) {
y_cutoff <- maxy * 1.1
} else {
new_p_val <- max(sigdf$p_val, na.rm = T)
y_cutoff <- -log(new_p_val, base = 10)
}
} else {
y_cutoff <- -log(fdr, base = 10)
}
df$color <- "Not_Significant"
df$color[df$l2fc_xy > fold_change_cutoff_plot & df$neg10logp > y_cutoff] <- "Significant"
df$color[df$l2fc_xy < -fold_change_cutoff_plot & df$neg10logp > y_cutoff] <- "Significant"
if (!is.null(protein_of_interest)) {
df$color[grepl(protein_of_interest, df$protein.x)] <- "Prot_of_interest"
}
if (length(unique(df$color)) == 1) {
color_list <- "grey65"
} else if (length(unique(df$color)) == 2) {
color_list <- c("grey65", "black")
} else {
color_list <- c("grey65", "green", "black")
}
minx <- min(df$l2fc_xy, na.rm = T)
maxx <- max(df$l2fc_xy, na.rm = T)
maxy <- max(df$neg10logp, na.rm = T)
minx <- -maxabs
maxx <- maxabs
rangex <- maxx - minx
pos_range <- maxx - fold_change_cutoff_plot
neg_range <- minx + fold_change_cutoff_plot
y_range <- maxy - y_cutoff
infinite_x <- maxx - (0.1*maxx)
infinite_y <- maxy - (0.2*maxy)
neg_infinite_x <- minx + (0.1*abs(minx))
infinite <- df[df$fold_change_category == "Infinite",]
neg_infinite <- df[df$fold_change_category == "Negative_Infinite",]
infinite$x <- runif(nrow(infinite), (fold_change_cutoff_plot + (pos_range/2)*1.2), maxx)
infinite$y <- runif(nrow(infinite), (y_cutoff + (y_range/2)*1.4), maxy)
neg_infinite$x <- -1 * (runif(nrow(neg_infinite), (fold_change_cutoff_plot + (abs(neg_range)/2)*1.2), abs(minx)))
neg_infinite$y <- runif(nrow(neg_infinite), (y_cutoff + (y_range/2)*1.4), maxy)
if (is.null(protein_of_interest)) {
plot_title <- paste0("Peptide Volcano Plot: ", intensity_metric)
} else {
plot_title <- paste0("Peptide Volcano Plot: ", intensity_metric, " (", protein_of_interest, ")")
}
plot <- ggplot() +
geom_vline(xintercept = fold_change_cutoff_plot, linetype = 2) +
geom_vline(xintercept = -fold_change_cutoff_plot, linetype = 2) +
geom_hline(yintercept = y_cutoff, linetype = 2) +
geom_point(data = df, aes(x = l2fc_xy, y = neg10logp,
PEPTIDE = PEPTIDE,
sequence = sequence,
protein = protein,
colour = color,
p_val = p_val,
fold_change_category = fold_change_category)) +
theme_bw(base_size = 15) +
theme(panel.grid = element_blank(), legend.position = "none") +
labs(x = (paste("Log2 fold-change: (", sample2, "/", sample1, ")",sep="")),
y = "-Log10 (p-value)",
title = plot_title) +
scale_x_continuous(breaks = round(minx):round(maxx), limits = c(minx*1.2, maxx*1.2)) +
scale_y_continuous(breaks = 0:round(max(df$neg10logp, na.rm = T)))+
scale_color_manual(values = color_list)
if (display_infinites) {
rectangle_pos_min_x <- fold_change_cutoff_plot + (pos_range/2)*1.2
rectangle_min_y <- y_cutoff + (y_range/2)*1.4
rectangle_neg_min_x <- (fold_change_cutoff_plot + (abs(neg_range)/2)*1.2) * (-1)
plot <- plot +
geom_rect(aes(xmin = rectangle_pos_min_x - (rectangle_pos_min_x*0.05), xmax = maxx + (maxx*0.05),
ymin = rectangle_min_y - (rectangle_min_y*0.05), ymax = maxy + (maxy*0.05)), alpha = 1,
color = "black", fill = NA, alpha = 0, linetype = 5) +
geom_rect(aes(xmin = minx + (minx*0.05), xmax = rectangle_neg_min_x - (rectangle_neg_min_x*0.05),
ymin = rectangle_min_y - (rectangle_min_y*0.05), ymax = maxy + (maxy*0.05)), alpha = 1,
color = "black", fill = NA, alpha = 0, linetype = 5) +
geom_point(data = infinite,
aes(x = x, y = y,
PEPTIDE = PEPTIDE,
sequence = sequence,
protein = protein,
colour = color,
fold_change_category = fold_change_category)) +
geom_point(data = neg_infinite,
aes(x = x, y = y,
PEPTIDE = PEPTIDE,
sequence = sequence,
protein = protein,
colour = color,
fold_change_category = fold_change_category))
}
return(list(plot, df))
}
create_volcano_plotly <- function(plot) {
plotly <- ggplotly(plot)
return(plotly)
}
|
reformat_meta_to_powerBI <- function(meta, vclist){
reformat_meta <- meta %>%
mutate(host = primary_host.x) %>%
filter(host == "Human") %>%
rename(strain = primary_virus_name) %>%
rename(nt_id = primary_nt_id.x) %>%
mutate(gisaid_epi_isl = covv_accession_id) %>%
mutate(genbank_accession = accession) %>%
mutate(date = as.Date(primary_collection_date.x)) %>%
mutate(country = country_iso_name.x) %>%
mutate(division = primary_state.x) %>%
mutate(location = zip) %>%
mutate(length = primary_sequence_length) %>%
mutate(pango_lineage = lineage) %>%
mutate(mutations = spike_mutations) %>%
select(strain,
nt_id,
gisaid_epi_isl,
genbank_accession,
date,
country,
division,
location,
length,
pango_lineage,
mutations)
reformat_meta <- reformat_meta %>%
mutate(country_code = if_else(
country == "Kosovo","XKX",
if_else(country == "Eswatini", "SWZ",
countrycode(sourcevar = country, origin = "country.name", destination = "iso3c"))))
# Get the WHO countries
gitdata.dir <- "https://raw.githubusercontent.com/CDCgov/ITF_Power_BI/master/itf_dashboard/output/"
who_countries <- read.csv(paste0(gitdata.dir,"country_data.csv"), encoding="UTF-8") %>%
rename(country_code = iso3code) %>%
select(country, country_code)
new_meta <- reformat_meta %>%
inner_join(who_countries, by = c("country_code")) %>% # add the iso 3 letter code
filter(!duplicated(strain)) %>%
rename(country = country.x) %>%
select(-c(country.y, nt_id, length)) %>%
rows_update(tibble(strain = "SARS-CoV-2/Human/USA/AK-CDC-2-4242656/1988", date = "2021-04-01")) # fix the typo of 1988
variant_label <- vclist %>% filter(label == "VOI" | label == "VOC")
new_meta <- new_meta %>%
mutate(date = replace(date, date == "2051-04-05","2021-04-05")) %>%
filter(pango_lineage != "NA" & pango_lineage != "none") %>%
left_join(variant_label, by = "pango_lineage") %>%
rename(type = label) %>% # rename label to type
replace_na(list(type = "Other")) # replace NA in type with "Other" for the figure
return(new_meta)
}
| /reformat_meta_to_powerBI.R | no_license | yyw-informatics/COVID_visuals | R | false | false | 2,251 | r | reformat_meta_to_powerBI <- function(meta, vclist){
reformat_meta <- meta %>%
mutate(host = primary_host.x) %>%
filter(host == "Human") %>%
rename(strain = primary_virus_name) %>%
rename(nt_id = primary_nt_id.x) %>%
mutate(gisaid_epi_isl = covv_accession_id) %>%
mutate(genbank_accession = accession) %>%
mutate(date = as.Date(primary_collection_date.x)) %>%
mutate(country = country_iso_name.x) %>%
mutate(division = primary_state.x) %>%
mutate(location = zip) %>%
mutate(length = primary_sequence_length) %>%
mutate(pango_lineage = lineage) %>%
mutate(mutations = spike_mutations) %>%
select(strain,
nt_id,
gisaid_epi_isl,
genbank_accession,
date,
country,
division,
location,
length,
pango_lineage,
mutations)
reformat_meta <- reformat_meta %>%
mutate(country_code = if_else(
country == "Kosovo","XKX",
if_else(country == "Eswatini", "SWZ",
countrycode(sourcevar = country, origin = "country.name", destination = "iso3c"))))
# Get the WHO countries
gitdata.dir <- "https://raw.githubusercontent.com/CDCgov/ITF_Power_BI/master/itf_dashboard/output/"
who_countries <- read.csv(paste0(gitdata.dir,"country_data.csv"), encoding="UTF-8") %>%
rename(country_code = iso3code) %>%
select(country, country_code)
new_meta <- reformat_meta %>%
inner_join(who_countries, by = c("country_code")) %>% # add the iso 3 letter code
filter(!duplicated(strain)) %>%
rename(country = country.x) %>%
select(-c(country.y, nt_id, length)) %>%
rows_update(tibble(strain = "SARS-CoV-2/Human/USA/AK-CDC-2-4242656/1988", date = "2021-04-01")) # fix the typo of 1988
variant_label <- vclist %>% filter(label == "VOI" | label == "VOC")
new_meta <- new_meta %>%
mutate(date = replace(date, date == "2051-04-05","2021-04-05")) %>%
filter(pango_lineage != "NA" & pango_lineage != "none") %>%
left_join(variant_label, by = "pango_lineage") %>%
rename(type = label) %>% # rename label to type
replace_na(list(type = "Other")) # replace NA in type with "Other" for the figure
return(new_meta)
}
|
# Simulated data for Figures 4 and 5 for MEPS paper that take too long to produce when building a vignette.
# See ?sizeSpectra::MLEbin.MEPS.default (and for other objects saved below) for details
MLEbin.MEPS.default <- MLEbin.simulate()
usethis::use_data(MLEbin.MEPS.default, overwrite = TRUE)
MLEbin.MEPS.xmin16 <- MLEbin.simulate(xmin.known = 16)
usethis::use_data(MLEbin.MEPS.xmin16, overwrite = TRUE)
MLEbin.MEPS.cutoff16 <- MLEbin.simulate(cut.off = 16)
usethis::use_data(MLEbin.MEPS.cutoff16, overwrite = TRUE)
| /data-raw/simulate-data2.R | no_license | andrew-edwards/sizeSpectra | R | false | false | 522 | r | # Simulated data for Figures 4 and 5 for MEPS paper that take too long to produce when building a vignette.
# See ?sizeSpectra::MLEbin.MEPS.default (and for other objects saved below) for details
MLEbin.MEPS.default <- MLEbin.simulate()
usethis::use_data(MLEbin.MEPS.default, overwrite = TRUE)
MLEbin.MEPS.xmin16 <- MLEbin.simulate(xmin.known = 16)
usethis::use_data(MLEbin.MEPS.xmin16, overwrite = TRUE)
MLEbin.MEPS.cutoff16 <- MLEbin.simulate(cut.off = 16)
usethis::use_data(MLEbin.MEPS.cutoff16, overwrite = TRUE)
|
#' Get Legend, Mapunit and Legend Mapunit Area Overlap Tables
#'
#'
#' @param SS Fetch data from the currently loaded selected set in NASIS or from the entire local database (default: `TRUE`)
#' @param repdmu Return only "representative" data mapunits? Default: `TRUE`
#' @param droplevels Drop unused levels from `farmlndcl` and other factor levels from NASIS domains?
#' @param stringsAsFactors deprecated
#' @param dsn Optional: path to local SQLite database containing NASIS
#' table structure; default: `NULL`
#'
#' @export
get_mapunit_from_NASIS <- function(SS = TRUE, repdmu = TRUE, droplevels = TRUE, stringsAsFactors = NULL, dsn = NULL) {
if (!missing(stringsAsFactors) && is.logical(stringsAsFactors)) {
.Deprecated(msg = sprintf("stringsAsFactors argument is deprecated.\nSetting package option with `NASISDomainsAsFactor(%s)`", stringsAsFactors))
NASISDomainsAsFactor(stringsAsFactors)
}
q.mapunit <- paste("SELECT
ng.grpname, areasymbol, areatypename, liid, lmapunitiid,
nationalmusym, muiid, musym, muname, mukind, mutype, mustatus, dmuinvesintens, muacres,
farmlndcl, dmuiid, repdmu, pct_component, pct_hydric, n_component, n_majcompflag
FROM
area a INNER JOIN
legend_View_1 l ON l.areaiidref = a.areaiid INNER JOIN
lmapunit_View_1 lmu ON lmu.liidref = l.liid INNER JOIN
mapunit_View_1 mu ON mu.muiid = lmu.muiidref
INNER JOIN
areatype at ON at.areatypeiid = areatypeiidref
INNER JOIN
nasisgroup ng ON ng.grpiid = mu.grpiidref
LEFT OUTER JOIN
--components
(SELECT
cor.muiidref cor_muiidref, dmuiid, repdmu, dmuinvesintens,
SUM(comppct_r) pct_component,
SUM(comppct_r * CASE WHEN hydricrating = 1 THEN 1 ELSE 0 END) pct_hydric,
COUNT(*) n_component,
SUM(CASE WHEN majcompflag = 1 THEN 1 ELSE 0 END) n_majcompflag
FROM
component_View_1 co LEFT OUTER JOIN
datamapunit_View_1 dmu ON dmu.dmuiid = co.dmuiidref LEFT OUTER JOIN
correlation_View_1 cor ON cor.dmuiidref = dmu.dmuiid",
ifelse(repdmu, "AND cor.repdmu = 1", ""), "
GROUP BY cor.muiidref, dmuiid, repdmu, dmuinvesintens
) co ON co.cor_muiidref = mu.muiid
WHERE
areatypename IN ('Non-MLRA Soil Survey Area', 'MLRA Soil Survey Area')
ORDER BY areasymbol, musym
;")
# toggle selected set vs. local DB
if (SS == FALSE) {
q.mapunit <- gsub(pattern = '_View_1', replacement = '', x = q.mapunit, fixed = TRUE)
}
channel <- dbConnectNASIS(dsn)
if (inherits(channel, 'try-error'))
return(data.frame())
# exec query
d.mapunit <- dbQueryNASIS(channel, q.mapunit)
# recode metadata domains
d.mapunit <- uncode(d.mapunit, droplevels = droplevels, dsn = dsn)
# hacks to make R CMD check --as-cran happy:
metadata <- NULL
# load local copy of metadata
load(system.file("data/metadata.rda", package = "soilDB")[1])
# transform variables and metadata
d.mapunit$farmlndcl <- factor(d.mapunit$farmlndcl,
levels = metadata[metadata$ColumnPhysicalName == "farmlndcl", "ChoiceValue"],
labels = metadata[metadata$ColumnPhysicalName == "farmlndcl", "ChoiceLabel"])
if (is.null(stringsAsFactors) || stringsAsFactors == FALSE) {
d.mapunit$farmlndcl = as.character(d.mapunit$farmlndcl)
}
if (droplevels == TRUE & is.factor(d.mapunit$farmlndcl)) {
d.mapunit$farmlndcl = droplevels(d.mapunit$farmlndcl)
}
# cache original column names
orig_names <- names(d.mapunit)
# done
return(d.mapunit)
}
#' @export
#' @rdname get_mapunit_from_NASIS
get_legend_from_NASIS <- function(SS = TRUE,
droplevels = TRUE,
stringsAsFactors = NULL,
dsn = NULL) {
if (!missing(stringsAsFactors) && is.logical(stringsAsFactors)) {
.Deprecated(msg = sprintf("stringsAsFactors argument is deprecated.\nSetting package option with `NASISDomainsAsFactor(%s)`", stringsAsFactors))
NASISDomainsAsFactor(stringsAsFactors)
}
q.legend <- paste("
SELECT
mlraoffice,
areasymbol, areaname, areatypename, CAST(areaacres AS INTEGER) AS areaacres, ssastatus,
CAST(projectscale AS INTEGER) projectscale, cordate,
CAST(liid AS INTEGER) liid, COUNT(lmu.lmapunitiid) n_lmapunitiid, legendsuituse
FROM
area a INNER JOIN
legend_View_1 l ON l.areaiidref = a.areaiid INNER JOIN
lmapunit_View_1 lmu ON lmu.liidref = l.liid
INNER JOIN
areatype at ON at.areatypeiid = areatypeiidref
WHERE
areatypename IN ('Non-MLRA Soil Survey Area', 'MLRA Soil Survey Area')
GROUP BY mlraoffice, areasymbol, areaname, areatypename, areaacres, ssastatus, projectscale, legendsuituse, cordate, liid
ORDER BY mlraoffice, areasymbol
;")
# toggle selected set vs. local DB
if(SS == FALSE) {
q.legend <- gsub(pattern = '_View_1', replacement = '', x = q.legend, fixed = TRUE)
}
channel <- dbConnectNASIS(dsn)
if (inherits(channel, 'try-error'))
return(data.frame())
# exec query
d.legend <- dbQueryNASIS(channel, q.legend)
# recode metadata domains
d.legend <- uncode(d.legend, droplevels = droplevels, dsn = dsn)
# done
return(d.legend)
}
#' @export
#' @rdname get_mapunit_from_NASIS
get_lmuaoverlap_from_NASIS <- function(SS = TRUE,
droplevels = TRUE,
stringsAsFactors = NULL,
dsn = NULL) {
q <- "SELECT
a.areasymbol, a.areaname, a.areaacres,
at2.areatypename lao_areatypename, a2.areasymbol lao_areasymbol, a2.areaname lao_areaname, lao.areaovacres lao_areaovacres,
lmapunitiid, musym, nationalmusym, muname, mustatus, muacres,
lmuao.areaovacres lmuao_areaovacres
FROM
legend_View_1 l INNER JOIN
lmapunit_View_1 lmu ON lmu.liidref = l.liid INNER JOIN
mapunit_View_1 mu ON mu.muiid = lmu.muiidref
INNER JOIN
area a ON a.areaiid = l.areaiidref INNER JOIN
areatype at ON at.areatypeiid = a.areatypeiidref
LEFT OUTER JOIN
laoverlap_View_1 lao ON lao.liidref = l.liid INNER JOIN
area a2 ON a2.areaiid = lao.areaiidref INNER JOIN
areatype at2 ON at2.areatypeiid = a2.areatypeiidref
LEFT OUTER JOIN
lmuaoverlap_View_1 lmuao ON lmuao.lmapunitiidref = lmu.lmapunitiid
AND lmuao.lareaoviidref = lao.lareaoviid
WHERE
at.areatypename IN ('Non-MLRA Soil Survey Area', 'MLRA Soil Survey Area')
ORDER BY a.areasymbol, lmu.musym, lao_areatypename
;"
# toggle selected set vs. local DB
if (SS == FALSE) {
q <- gsub(pattern = '_View_1', replacement = '', x = q, fixed = TRUE)
}
channel <- dbConnectNASIS(dsn)
if (inherits(channel, 'try-error'))
return(data.frame())
d <- dbQueryNASIS(channel, q)
d$musym <- as.character(d$musym)
# recode metadata domains
d <- uncode(d, droplevels = droplevels, dsn = dsn)
# done
return(d)
}
| /R/get_mapunit_from_NASIS.R | no_license | ncss-tech/soilDB | R | false | false | 8,482 | r | #' Get Legend, Mapunit and Legend Mapunit Area Overlap Tables
#'
#'
#' @param SS Fetch data from the currently loaded selected set in NASIS or from the entire local database (default: `TRUE`)
#' @param repdmu Return only "representative" data mapunits? Default: `TRUE`
#' @param droplevels Drop unused levels from `farmlndcl` and other factor levels from NASIS domains?
#' @param stringsAsFactors deprecated
#' @param dsn Optional: path to local SQLite database containing NASIS
#' table structure; default: `NULL`
#'
#' @export
get_mapunit_from_NASIS <- function(SS = TRUE, repdmu = TRUE, droplevels = TRUE, stringsAsFactors = NULL, dsn = NULL) {
if (!missing(stringsAsFactors) && is.logical(stringsAsFactors)) {
.Deprecated(msg = sprintf("stringsAsFactors argument is deprecated.\nSetting package option with `NASISDomainsAsFactor(%s)`", stringsAsFactors))
NASISDomainsAsFactor(stringsAsFactors)
}
q.mapunit <- paste("SELECT
ng.grpname, areasymbol, areatypename, liid, lmapunitiid,
nationalmusym, muiid, musym, muname, mukind, mutype, mustatus, dmuinvesintens, muacres,
farmlndcl, dmuiid, repdmu, pct_component, pct_hydric, n_component, n_majcompflag
FROM
area a INNER JOIN
legend_View_1 l ON l.areaiidref = a.areaiid INNER JOIN
lmapunit_View_1 lmu ON lmu.liidref = l.liid INNER JOIN
mapunit_View_1 mu ON mu.muiid = lmu.muiidref
INNER JOIN
areatype at ON at.areatypeiid = areatypeiidref
INNER JOIN
nasisgroup ng ON ng.grpiid = mu.grpiidref
LEFT OUTER JOIN
--components
(SELECT
cor.muiidref cor_muiidref, dmuiid, repdmu, dmuinvesintens,
SUM(comppct_r) pct_component,
SUM(comppct_r * CASE WHEN hydricrating = 1 THEN 1 ELSE 0 END) pct_hydric,
COUNT(*) n_component,
SUM(CASE WHEN majcompflag = 1 THEN 1 ELSE 0 END) n_majcompflag
FROM
component_View_1 co LEFT OUTER JOIN
datamapunit_View_1 dmu ON dmu.dmuiid = co.dmuiidref LEFT OUTER JOIN
correlation_View_1 cor ON cor.dmuiidref = dmu.dmuiid",
ifelse(repdmu, "AND cor.repdmu = 1", ""), "
GROUP BY cor.muiidref, dmuiid, repdmu, dmuinvesintens
) co ON co.cor_muiidref = mu.muiid
WHERE
areatypename IN ('Non-MLRA Soil Survey Area', 'MLRA Soil Survey Area')
ORDER BY areasymbol, musym
;")
# toggle selected set vs. local DB
if (SS == FALSE) {
q.mapunit <- gsub(pattern = '_View_1', replacement = '', x = q.mapunit, fixed = TRUE)
}
channel <- dbConnectNASIS(dsn)
if (inherits(channel, 'try-error'))
return(data.frame())
# exec query
d.mapunit <- dbQueryNASIS(channel, q.mapunit)
# recode metadata domains
d.mapunit <- uncode(d.mapunit, droplevels = droplevels, dsn = dsn)
# hacks to make R CMD check --as-cran happy:
metadata <- NULL
# load local copy of metadata
load(system.file("data/metadata.rda", package = "soilDB")[1])
# transform variables and metadata
d.mapunit$farmlndcl <- factor(d.mapunit$farmlndcl,
levels = metadata[metadata$ColumnPhysicalName == "farmlndcl", "ChoiceValue"],
labels = metadata[metadata$ColumnPhysicalName == "farmlndcl", "ChoiceLabel"])
if (is.null(stringsAsFactors) || stringsAsFactors == FALSE) {
d.mapunit$farmlndcl = as.character(d.mapunit$farmlndcl)
}
if (droplevels == TRUE & is.factor(d.mapunit$farmlndcl)) {
d.mapunit$farmlndcl = droplevels(d.mapunit$farmlndcl)
}
# cache original column names
orig_names <- names(d.mapunit)
# done
return(d.mapunit)
}
#' @export
#' @rdname get_mapunit_from_NASIS
get_legend_from_NASIS <- function(SS = TRUE,
droplevels = TRUE,
stringsAsFactors = NULL,
dsn = NULL) {
if (!missing(stringsAsFactors) && is.logical(stringsAsFactors)) {
.Deprecated(msg = sprintf("stringsAsFactors argument is deprecated.\nSetting package option with `NASISDomainsAsFactor(%s)`", stringsAsFactors))
NASISDomainsAsFactor(stringsAsFactors)
}
q.legend <- paste("
SELECT
mlraoffice,
areasymbol, areaname, areatypename, CAST(areaacres AS INTEGER) AS areaacres, ssastatus,
CAST(projectscale AS INTEGER) projectscale, cordate,
CAST(liid AS INTEGER) liid, COUNT(lmu.lmapunitiid) n_lmapunitiid, legendsuituse
FROM
area a INNER JOIN
legend_View_1 l ON l.areaiidref = a.areaiid INNER JOIN
lmapunit_View_1 lmu ON lmu.liidref = l.liid
INNER JOIN
areatype at ON at.areatypeiid = areatypeiidref
WHERE
areatypename IN ('Non-MLRA Soil Survey Area', 'MLRA Soil Survey Area')
GROUP BY mlraoffice, areasymbol, areaname, areatypename, areaacres, ssastatus, projectscale, legendsuituse, cordate, liid
ORDER BY mlraoffice, areasymbol
;")
# toggle selected set vs. local DB
if(SS == FALSE) {
q.legend <- gsub(pattern = '_View_1', replacement = '', x = q.legend, fixed = TRUE)
}
channel <- dbConnectNASIS(dsn)
if (inherits(channel, 'try-error'))
return(data.frame())
# exec query
d.legend <- dbQueryNASIS(channel, q.legend)
# recode metadata domains
d.legend <- uncode(d.legend, droplevels = droplevels, dsn = dsn)
# done
return(d.legend)
}
#' @export
#' @rdname get_mapunit_from_NASIS
get_lmuaoverlap_from_NASIS <- function(SS = TRUE,
droplevels = TRUE,
stringsAsFactors = NULL,
dsn = NULL) {
q <- "SELECT
a.areasymbol, a.areaname, a.areaacres,
at2.areatypename lao_areatypename, a2.areasymbol lao_areasymbol, a2.areaname lao_areaname, lao.areaovacres lao_areaovacres,
lmapunitiid, musym, nationalmusym, muname, mustatus, muacres,
lmuao.areaovacres lmuao_areaovacres
FROM
legend_View_1 l INNER JOIN
lmapunit_View_1 lmu ON lmu.liidref = l.liid INNER JOIN
mapunit_View_1 mu ON mu.muiid = lmu.muiidref
INNER JOIN
area a ON a.areaiid = l.areaiidref INNER JOIN
areatype at ON at.areatypeiid = a.areatypeiidref
LEFT OUTER JOIN
laoverlap_View_1 lao ON lao.liidref = l.liid INNER JOIN
area a2 ON a2.areaiid = lao.areaiidref INNER JOIN
areatype at2 ON at2.areatypeiid = a2.areatypeiidref
LEFT OUTER JOIN
lmuaoverlap_View_1 lmuao ON lmuao.lmapunitiidref = lmu.lmapunitiid
AND lmuao.lareaoviidref = lao.lareaoviid
WHERE
at.areatypename IN ('Non-MLRA Soil Survey Area', 'MLRA Soil Survey Area')
ORDER BY a.areasymbol, lmu.musym, lao_areatypename
;"
# toggle selected set vs. local DB
if (SS == FALSE) {
q <- gsub(pattern = '_View_1', replacement = '', x = q, fixed = TRUE)
}
channel <- dbConnectNASIS(dsn)
if (inherits(channel, 'try-error'))
return(data.frame())
d <- dbQueryNASIS(channel, q)
d$musym <- as.character(d$musym)
# recode metadata domains
d <- uncode(d, droplevels = droplevels, dsn = dsn)
# done
return(d)
}
|
library(streambugs)
### Name: construct.statevariables
### Title: Construct the streambugs ODE state variable names
### Aliases: construct.statevariables
### ** Examples
Reaches <- paste0("Reach",1:2)
Habitats <- paste0("Hab",1:1)
y.names <- construct.statevariables(Reaches,Habitats,Invertebrates=c("Baetis","Ecdyonurus"))
| /data/genthat_extracted_code/streambugs/examples/construct.statevariables.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 351 | r | library(streambugs)
### Name: construct.statevariables
### Title: Construct the streambugs ODE state variable names
### Aliases: construct.statevariables
### ** Examples
Reaches <- paste0("Reach",1:2)
Habitats <- paste0("Hab",1:1)
y.names <- construct.statevariables(Reaches,Habitats,Invertebrates=c("Baetis","Ecdyonurus"))
|
############################################
# Cross-population analysis of high-grade serous ovarian cancer does
# not support four subtypes
#
# Way, G.P., Rudd, J., Wang, C., Hamidi, H., Fridley, L.B,
# Konecny, G., Goode, E., Greene, C.S., Doherty, J.A.
# ~~~~~~~~~~~~~~~~~~~~~
# This script will input a series of datasets and perform Non-negative Matrix
# Factorization (NMF)
suppressMessages(library(checkpoint))
suppressMessages(checkpoint('2016-03-01', checkpointLocation = "."))
args <- commandArgs(trailingOnly = TRUE)
# args <- c(2, 4, 100, 123, "TCGA_eset", "mayo.eset", "GSE32062.GPL6480_eset",
# "GSE9891_eset", "GSE26712_eset", "aaces.eset")
################################
# Load Libraries
################################
library(curatedOvarianData)
library(cluster)
library(NMF)
# Load important kmeans and SAM functions
kmeansfxnpath <- file.path("2.Clustering_DiffExprs", "Scripts", "Functions",
"kmeans_SAM_functions.R")
source(kmeansfxnpath)
# Loads curatedOvarianData
loadovcapath <- file.path("1.DataInclusion", "Scripts", "Functions",
"LoadOVCA_Data.R")
source(loadovcapath)
################################
# Constants
################################
rank1 = as.numeric(paste(args[1]))
rank2 = as.numeric(paste(args[2]))
nruns = as.numeric(paste(args[3]))
set.seed(args[4])
############################################
# Load Data
############################################
# Separate the eset arguments from the rest of the commandArgs
argsCurated <- args[grep("eset", args)]
# Add Mayo to argsCurated
if("Mayo" %in% args) {
argsCurated = c(argsCurated[1], "Mayo", argsCurated[2:length(argsCurated)])
}
# Use the LoadOVCA_Data function to read in the datasets subset by MAD genes
ExpData <- LoadOVCA_Data(datasets = argsCurated, genelist_subset = "madgenes")
############################################
# Run NMF
############################################
nmfClusters <- list()
for (dataset in 1:length(ExpData)) {
# Load corresponding k-means cluster membership file
membfile <- file.path("2.Clustering_DiffExprs",
"Tables", "ClusterMembership", "kmeans",
paste0("KMembership_", names(ExpData)[[dataset]],
".csv"))
ClusterAssign <- read.csv(file = membfile, row.names = 1)
# Initialize a blank matrix with the same number of rows as samples and
# kmin to kmax columns
newMatrix <- matrix(NA, nrow = nrow(ClusterAssign),
ncol = length(rank1:rank2))
rownames(newMatrix) <- rownames(ClusterAssign)
# For each centroid assignment, perform NMF and assign cluster
# membership files to newMatrix
for (centroid in 1:length(rank1:rank2)) {
fname <- paste0("NMF_Clusters_", names(ExpData)[dataset], "_K",
centroid + 1)
# Run custom NMF function with 100 runs for k = 2, 3, and 4
newMatrix[ , centroid] <- runNMF(ExpData[[dataset]],
k = centroid + 1, nrun = nruns,
KClusterAssign = ClusterAssign,
fname = fname)
}
# Assign results to list
nmfClusters[[dataset]] <- newMatrix
# Run custom NMF function with 10 runs to output cophenetic coefficient
coph_coeff <- runNMF(ExpData[[dataset]], coph = T)
# Base name to save figures
sup_fname <- paste("NMF_Clusters_", names(ExpData)[dataset], sep = "")
# Write the consensus mapping as a figure
fname <- file.path("2.Clustering_DiffExprs", "Figures", "nmf",
"CopheneticMaps", paste0(sup_fname, "_k2-6.png"))
png(fname, width = 950, height = 550)
# Plot consensus maps
consensusmap(coph_coeff$consensus[1:5], labCol = NA, labRow = NA,
main = "", fontsize = 12)
# Close the connection
dev.off()
# Write the Cophenetic Coefficient as a figure
fname <- file.path("2.Clustering_DiffExprs", "Figures", "nmf",
"CopheneticMaps", paste0(sup_fname, "coph_coeff", ".png"))
png(fname, width = 270, height = 230)
# Plot cophenetic coefficient
par(mar = c(4.5, 4.5, 1.5, 1))
plot(coph_coeff$measures$cophenetic, xaxt = "n", cex.axis = 1.5,
cex.lab = 1.5, xlab = 'k', ylab = 'Cophenetic Correlation')
axis(1, at=1:7, labels=2:8, cex.axis = 1.5)
lines(coph_coeff$measures$cophenetic, lwd = 2)
points(coph_coeff$measures$cophenetic, col = 'black', pch = 19, cex = 1.2)
# Close the connection
dev.off()
}
############################################
# Write NMF clusters
############################################
for (dataset in 1:length(nmfClusters)) {
filepath <- file.path("2.Clustering_DiffExprs", "Tables", "ClusterMembership",
"nmf", paste0(names(ExpData[dataset]), "_nmf.csv"))
write.table(as.data.frame(nmfClusters[[dataset]]),
file = filepath, sep = ",", row.names = T, col.names = NA)
}
| /2.Clustering_DiffExprs/Scripts/D.NMF.R | permissive | changt34x/hgsc_subtypes | R | false | false | 4,998 | r | ############################################
# Cross-population analysis of high-grade serous ovarian cancer does
# not support four subtypes
#
# Way, G.P., Rudd, J., Wang, C., Hamidi, H., Fridley, L.B,
# Konecny, G., Goode, E., Greene, C.S., Doherty, J.A.
# ~~~~~~~~~~~~~~~~~~~~~
# This script will input a series of datasets and perform Non-negative Matrix
# Factorization (NMF)
suppressMessages(library(checkpoint))
suppressMessages(checkpoint('2016-03-01', checkpointLocation = "."))
args <- commandArgs(trailingOnly = TRUE)
# args <- c(2, 4, 100, 123, "TCGA_eset", "mayo.eset", "GSE32062.GPL6480_eset",
# "GSE9891_eset", "GSE26712_eset", "aaces.eset")
################################
# Load Libraries
################################
library(curatedOvarianData)
library(cluster)
library(NMF)
# Load important kmeans and SAM functions
kmeansfxnpath <- file.path("2.Clustering_DiffExprs", "Scripts", "Functions",
"kmeans_SAM_functions.R")
source(kmeansfxnpath)
# Loads curatedOvarianData
loadovcapath <- file.path("1.DataInclusion", "Scripts", "Functions",
"LoadOVCA_Data.R")
source(loadovcapath)
################################
# Constants
################################
rank1 = as.numeric(paste(args[1]))
rank2 = as.numeric(paste(args[2]))
nruns = as.numeric(paste(args[3]))
set.seed(args[4])
############################################
# Load Data
############################################
# Separate the eset arguments from the rest of the commandArgs
argsCurated <- args[grep("eset", args)]
# Add Mayo to argsCurated
if("Mayo" %in% args) {
argsCurated = c(argsCurated[1], "Mayo", argsCurated[2:length(argsCurated)])
}
# Use the LoadOVCA_Data function to read in the datasets subset by MAD genes
ExpData <- LoadOVCA_Data(datasets = argsCurated, genelist_subset = "madgenes")
############################################
# Run NMF
############################################
nmfClusters <- list()
for (dataset in 1:length(ExpData)) {
# Load corresponding k-means cluster membership file
membfile <- file.path("2.Clustering_DiffExprs",
"Tables", "ClusterMembership", "kmeans",
paste0("KMembership_", names(ExpData)[[dataset]],
".csv"))
ClusterAssign <- read.csv(file = membfile, row.names = 1)
# Initialize a blank matrix with the same number of rows as samples and
# kmin to kmax columns
newMatrix <- matrix(NA, nrow = nrow(ClusterAssign),
ncol = length(rank1:rank2))
rownames(newMatrix) <- rownames(ClusterAssign)
# For each centroid assignment, perform NMF and assign cluster
# membership files to newMatrix
for (centroid in 1:length(rank1:rank2)) {
fname <- paste0("NMF_Clusters_", names(ExpData)[dataset], "_K",
centroid + 1)
# Run custom NMF function with 100 runs for k = 2, 3, and 4
newMatrix[ , centroid] <- runNMF(ExpData[[dataset]],
k = centroid + 1, nrun = nruns,
KClusterAssign = ClusterAssign,
fname = fname)
}
# Assign results to list
nmfClusters[[dataset]] <- newMatrix
# Run custom NMF function with 10 runs to output cophenetic coefficient
coph_coeff <- runNMF(ExpData[[dataset]], coph = T)
# Base name to save figures
sup_fname <- paste("NMF_Clusters_", names(ExpData)[dataset], sep = "")
# Write the consensus mapping as a figure
fname <- file.path("2.Clustering_DiffExprs", "Figures", "nmf",
"CopheneticMaps", paste0(sup_fname, "_k2-6.png"))
png(fname, width = 950, height = 550)
# Plot consensus maps
consensusmap(coph_coeff$consensus[1:5], labCol = NA, labRow = NA,
main = "", fontsize = 12)
# Close the connection
dev.off()
# Write the Cophenetic Coefficient as a figure
fname <- file.path("2.Clustering_DiffExprs", "Figures", "nmf",
"CopheneticMaps", paste0(sup_fname, "coph_coeff", ".png"))
png(fname, width = 270, height = 230)
# Plot cophenetic coefficient
par(mar = c(4.5, 4.5, 1.5, 1))
plot(coph_coeff$measures$cophenetic, xaxt = "n", cex.axis = 1.5,
cex.lab = 1.5, xlab = 'k', ylab = 'Cophenetic Correlation')
axis(1, at=1:7, labels=2:8, cex.axis = 1.5)
lines(coph_coeff$measures$cophenetic, lwd = 2)
points(coph_coeff$measures$cophenetic, col = 'black', pch = 19, cex = 1.2)
# Close the connection
dev.off()
}
############################################
# Write NMF clusters
############################################
for (dataset in 1:length(nmfClusters)) {
filepath <- file.path("2.Clustering_DiffExprs", "Tables", "ClusterMembership",
"nmf", paste0(names(ExpData[dataset]), "_nmf.csv"))
write.table(as.data.frame(nmfClusters[[dataset]]),
file = filepath, sep = ",", row.names = T, col.names = NA)
}
|
#install.packages("shiny")
library("shiny")
library("plotly")
library("DT")
my_ui <- fluidPage (
titlePanel(strong("United States Mortality Rates Report")),
br(),
sidebarLayout(
sidebarPanel(
h3("Control Panel"),
selectInput("state_select",
label = p("Select State"),
choices = state.name, selected = "Alabama"
),
sliderInput("year",
label = "Year",
min = 1980, max = 2014, value = 1980
),
selectInput("abuse_type",
label = "Mortality Type",
choices = c("Alcohol use disorders", "Drug use disorders", "Interpersonal violence", "Self-harm"),
selected = "Alcohol use disorders",
multiple = FALSE
)
),
mainPanel(
h3("Findings Panel"),
tabsetPanel(type = "tabs",
tabPanel(
"Data Introduction", h4(
"The data set we are using is mortality rates in the United States by substance abuse disorders
and intentional injurious collected by the Institute for Health Metrics and Evaluation Institute
for Health Metrics of the University of Washington. This data uses US census data for populations
as a base to form mortality rates based on total individual reported cases. We will also be using
the complete data set from the US census that this report uses for further demographic, socio-economic,
and other features of county populations to find deeper trends. This is needed to truly find the potential
reasons behind the data’s mortality rates. The main mortality data set includes information from
1980-2014. This includes mortality rates by state and county. It also breaks down the causes of
mortality by county with a FIPS number attached to that county. This provides vital information that
will aid us in being able to visually map this data, which can help us find initial geographic trends
to further explore. Past geographic data the only demographic segmentation of by gender, which is why
we are including other census data. The domain knowledge for this project will be understanding the most
common types of mortalities in the United States and how certain population features can impact these
types in order to have a starting place for our analysis. This will be through researching causes for
these types through government reports on public policy and its past effects on population health.
This system will allow you to look through our analysis on four core problems listed in the next tabs.
You can filter the data visualizations by state, year, and mortality where it is appropriate."
),
br(),
h4("Data Link: http://ghdx.healthdata.org/record/united-states-substance-use-disorders-and-intentional-injuries-mortality-rates-county-1980")
),
tabPanel(
"Generational Data", h3("As this data set contains the lives of those in different generations is there any
major difference between the mortality rates and types for each generation? What could
some of the historical events or factors that could have contributed to this correlations?"),
br(),
h3("Visualizations"),
textOutput("graph_a"),
br(),
plotOutput("question_one_plot_a"),
br(),
textOutput("table_a"),
br(),
DTOutput("question_one_table_a"),
br(),
textOutput("graph_b"),
br(),
plotOutput("question_one_plot_b"),
br(),
h3("Observations and Analysis"),
textOutput("analysis_one"),
br(),
h3("Conclusion"),
textOutput("conclusion_one"),
br(),
h3("Reference"),
htmlOutput("reference_one")
),
tabPanel("Ripple Effects",
h3("Are there high concentration regions of certain mortality types and do these
concentrated areas have noticeable ripple effects in surrounding states? In
other words, does the mortality rate of a single state affect those of its
neighboring states? What are the possible factors contributing to the
results."),
br(),
h3("Visualizations"),
br(),
plotlyOutput("choropleth_map", height = 500),
DTOutput("mortality_rate_table1"),
br(),
tableOutput("influence"),
textOutput("variable_explained"),
h3("Observations and Analysis"),
tags$head(tags$style("#variable_explained{font-size: 16px;
font-style: italic;}")),
h4("Alcohol"),
textOutput("alcohol_analysis"),
br(),
tags$head(tags$style("#alcohol_analysis{font-size: 18px;}")),
h4("Drug"),
textOutput("drug_analysis"),
br(),
tags$head(tags$style("#drug_analysis{font-size: 18px;}")),
h4("Self-harm"),
textOutput("self_harm_analysis"),
br(),
tags$head(tags$style("#self_harm_analysis{font-size: 18px;}")),
h4("Interpersonal Violence"),
textOutput("interpersonal_violence_analysis"),
br(),
tags$head(tags$style("#interpersonal_violence_analysis{font-size: 18px;}")),
h3("Conclusion"),
textOutput("conclusion"),
tags$head(tags$style("#conclusion{font-size: 18px;}")),
h3("Reference"),
htmlOutput("reference")
),
tabPanel( "Economic Prosperity and Mortality",
h3("Do mortality rate and economic prosperity have an effect on each other?
The two seem unrelated, but are they actually similar or do they have a
relationship to each other? Correlation between socio-economics and how
wealth affects overall mortality rates for populations (are wealthier
populations less prone to death overall/to certain types?)."),
br(),
plotOutput("question_three_table_a"),
br(),
p("*Using sample data from Wyoming 1998"),
br(),
h3("Prosperity and Alcohol Use Disorder Analysis"),
textOutput("money_alcohol_analysis"),
br(),
h3("Prosperity and Self-harm Analysis"),
textOutput("money_self_harm_analysis"),
br(),
h3("Prosperity and Drug Use Disorder Analysis"),
textOutput("money_drug_analysis"),
br(),
h3("Prosperity and Interpersonal Violence Analysis"),
textOutput("money_interpersonal_analysis"),
br(),
h3("Reference"),
htmlOutput("reference_three")
),
tabPanel("Outside-the-Box Analysis: Self-Harm and Health Bills",
h3("Over the past 20 years, with increases in mental illness/disorder awareness
and treatment, is there a decrease in self-harm mortality? How many health
bills have been passed in a given year and how much on average has each bill
changed the rate? Given some bills need time to ramp up, what is this overall
change based on current overall number of bills?"),
br(),
h3("Self-Harm Mortality Rates"),
br(),
p("Let's review the average self-harm mortality rates in the United States every 5 years:"),
br(),
plotOutput("self_harm_review_line"),
DTOutput("self_harm_review_avgs"),
em(textOutput("self_harm_review_explain")),
br(),
h3("Health Bill Counts and Changes in Mortality Rate"),
br(),
p("Now let's see the correlation between health bill counts and change in mortality rates:"),
br(),
DTOutput("mental_health_bills"),
em(textOutput("mental_health_bills_explain")),
br(),
h3("Observations and Analysis"),
textOutput("mental_health_bills_analysis"),
br(),
h3("Conclusion"),
textOutput("conclusion_four"),
br(),
h3("Reference"),
htmlOutput("reference_four")
)
)
)
)
)
shinyUI(my_ui) | /ui.R | no_license | ThiemoLo/final_project | R | false | false | 10,198 | r | #install.packages("shiny")
library("shiny")
library("plotly")
library("DT")
my_ui <- fluidPage (
titlePanel(strong("United States Mortality Rates Report")),
br(),
sidebarLayout(
sidebarPanel(
h3("Control Panel"),
selectInput("state_select",
label = p("Select State"),
choices = state.name, selected = "Alabama"
),
sliderInput("year",
label = "Year",
min = 1980, max = 2014, value = 1980
),
selectInput("abuse_type",
label = "Mortality Type",
choices = c("Alcohol use disorders", "Drug use disorders", "Interpersonal violence", "Self-harm"),
selected = "Alcohol use disorders",
multiple = FALSE
)
),
mainPanel(
h3("Findings Panel"),
tabsetPanel(type = "tabs",
tabPanel(
"Data Introduction", h4(
"The data set we are using is mortality rates in the United States by substance abuse disorders
and intentional injurious collected by the Institute for Health Metrics and Evaluation Institute
for Health Metrics of the University of Washington. This data uses US census data for populations
as a base to form mortality rates based on total individual reported cases. We will also be using
the complete data set from the US census that this report uses for further demographic, socio-economic,
and other features of county populations to find deeper trends. This is needed to truly find the potential
reasons behind the data’s mortality rates. The main mortality data set includes information from
1980-2014. This includes mortality rates by state and county. It also breaks down the causes of
mortality by county with a FIPS number attached to that county. This provides vital information that
will aid us in being able to visually map this data, which can help us find initial geographic trends
to further explore. Past geographic data the only demographic segmentation of by gender, which is why
we are including other census data. The domain knowledge for this project will be understanding the most
common types of mortalities in the United States and how certain population features can impact these
types in order to have a starting place for our analysis. This will be through researching causes for
these types through government reports on public policy and its past effects on population health.
This system will allow you to look through our analysis on four core problems listed in the next tabs.
You can filter the data visualizations by state, year, and mortality where it is appropriate."
),
br(),
h4("Data Link: http://ghdx.healthdata.org/record/united-states-substance-use-disorders-and-intentional-injuries-mortality-rates-county-1980")
),
tabPanel(
"Generational Data", h3("As this data set contains the lives of those in different generations is there any
major difference between the mortality rates and types for each generation? What could
some of the historical events or factors that could have contributed to this correlations?"),
br(),
h3("Visualizations"),
textOutput("graph_a"),
br(),
plotOutput("question_one_plot_a"),
br(),
textOutput("table_a"),
br(),
DTOutput("question_one_table_a"),
br(),
textOutput("graph_b"),
br(),
plotOutput("question_one_plot_b"),
br(),
h3("Observations and Analysis"),
textOutput("analysis_one"),
br(),
h3("Conclusion"),
textOutput("conclusion_one"),
br(),
h3("Reference"),
htmlOutput("reference_one")
),
tabPanel("Ripple Effects",
h3("Are there high concentration regions of certain mortality types and do these
concentrated areas have noticeable ripple effects in surrounding states? In
other words, does the mortality rate of a single state affect those of its
neighboring states? What are the possible factors contributing to the
results."),
br(),
h3("Visualizations"),
br(),
plotlyOutput("choropleth_map", height = 500),
DTOutput("mortality_rate_table1"),
br(),
tableOutput("influence"),
textOutput("variable_explained"),
h3("Observations and Analysis"),
tags$head(tags$style("#variable_explained{font-size: 16px;
font-style: italic;}")),
h4("Alcohol"),
textOutput("alcohol_analysis"),
br(),
tags$head(tags$style("#alcohol_analysis{font-size: 18px;}")),
h4("Drug"),
textOutput("drug_analysis"),
br(),
tags$head(tags$style("#drug_analysis{font-size: 18px;}")),
h4("Self-harm"),
textOutput("self_harm_analysis"),
br(),
tags$head(tags$style("#self_harm_analysis{font-size: 18px;}")),
h4("Interpersonal Violence"),
textOutput("interpersonal_violence_analysis"),
br(),
tags$head(tags$style("#interpersonal_violence_analysis{font-size: 18px;}")),
h3("Conclusion"),
textOutput("conclusion"),
tags$head(tags$style("#conclusion{font-size: 18px;}")),
h3("Reference"),
htmlOutput("reference")
),
tabPanel( "Economic Prosperity and Mortality",
h3("Do mortality rate and economic prosperity have an effect on each other?
The two seem unrelated, but are they actually similar or do they have a
relationship to each other? Correlation between socio-economics and how
wealth affects overall mortality rates for populations (are wealthier
populations less prone to death overall/to certain types?)."),
br(),
plotOutput("question_three_table_a"),
br(),
p("*Using sample data from Wyoming 1998"),
br(),
h3("Prosperity and Alcohol Use Disorder Analysis"),
textOutput("money_alcohol_analysis"),
br(),
h3("Prosperity and Self-harm Analysis"),
textOutput("money_self_harm_analysis"),
br(),
h3("Prosperity and Drug Use Disorder Analysis"),
textOutput("money_drug_analysis"),
br(),
h3("Prosperity and Interpersonal Violence Analysis"),
textOutput("money_interpersonal_analysis"),
br(),
h3("Reference"),
htmlOutput("reference_three")
),
tabPanel("Outside-the-Box Analysis: Self-Harm and Health Bills",
h3("Over the past 20 years, with increases in mental illness/disorder awareness
and treatment, is there a decrease in self-harm mortality? How many health
bills have been passed in a given year and how much on average has each bill
changed the rate? Given some bills need time to ramp up, what is this overall
change based on current overall number of bills?"),
br(),
h3("Self-Harm Mortality Rates"),
br(),
p("Let's review the average self-harm mortality rates in the United States every 5 years:"),
br(),
plotOutput("self_harm_review_line"),
DTOutput("self_harm_review_avgs"),
em(textOutput("self_harm_review_explain")),
br(),
h3("Health Bill Counts and Changes in Mortality Rate"),
br(),
p("Now let's see the correlation between health bill counts and change in mortality rates:"),
br(),
DTOutput("mental_health_bills"),
em(textOutput("mental_health_bills_explain")),
br(),
h3("Observations and Analysis"),
textOutput("mental_health_bills_analysis"),
br(),
h3("Conclusion"),
textOutput("conclusion_four"),
br(),
h3("Reference"),
htmlOutput("reference_four")
)
)
)
)
)
shinyUI(my_ui) |
############# SEMANA 4 - ETL ##################
# É importante deixar claro que essa parte foi copiada do script do Professor Hugo,
# disponível em: https://github.com/hugoavmedeiros/etl_com_r/blob/master/scripts/etl1.R
#Essa linha de código mostra o processo de ETL completo
######### PRIMEIRA PARTE- Extração ############
#Extração via .csv
# carrega a base de snistros de transito do site da PCR
sinistrosRecife2020Raw <- read.csv2('http://dados.recife.pe.gov.br/dataset/44087d2d-73b5-4ab3-9bd8-78da7436eed1/resource/fc1c8460-0406-4fff-b51a-e79205d1f1ab/download/acidentes_2020-novo.csv', sep = ';', encoding = 'UTF-8')
sinistrosRecife2021Raw <- read.csv2('http://dados.recife.pe.gov.br/dataset/44087d2d-73b5-4ab3-9bd8-78da7436eed1/resource/2caa8f41-ccd9-4ea5-906d-f66017d6e107/download/acidentes_2021-jan.csv', sep = ';', encoding = 'UTF-8')
# junta as bases de dados com comando rbind (juntas por linhas (row))
sinistrosRecifeRaw <- rbind(sinistrosRecife2020Raw, sinistrosRecife2021Raw)
# observa a estrutura dos dados
str(sinistrosRecifeRaw)
# modifca a data para formato date
class(sinistrosRecifeRaw$data) # como vcs podem ver, antes estava como character
sinistrosRecifeRaw$data <- as.Date(sinistrosRecifeRaw$data, format = "%Y-%m-%d")
# modifica natureza do sinistro de texto para fator
class(sinistrosRecifeRaw$natureza_acidente)
sinistrosRecifeRaw$natureza_acidente <- as.factor(sinistrosRecifeRaw$natureza_acidente)
#forma otimizada de ler caracter
# cria funçaõ para substituir not available (na) por 0
naZero <- function(x) {
x <- ifelse(is.na(x), 0, x)
}
# aplica a função naZero a todas as colunas de contagem
sinistrosRecifeRaw[, 15:25] <- sapply(sinistrosRecifeRaw[, 15:25], naZero)
# exporta em formato nativo do R
saveRDS(sinistrosRecifeRaw, "Semana 4/bases_tratadas/sinistrosRecife.rds")
# exporta em formato tabular (.csv) - padrão para interoperabilidade
write.csv2(sinistrosRecifeRaw, "Semana 4/bases_tratadas/sinistrosRecife.csv")
#### Extração em Staging Area ####
#### Staging area e uso de memória
# ficamos com staging area??
ls() # lista todos os objetos no R
# vamos ver quanto cada objeto está ocupando
for (itm in ls()) {
print(formatC(c(itm, object.size(get(itm))),
format="d",
width=30),
quote=F)
}
ls() # lista todos os objetos no R
# agora, vamos remover
gc() # uso explícito do garbage collector
rm(list = c('sinistrosRecife2020Raw', 'sinistrosRecife2021Raw'))
# deletando todos os elementos: rm(list = ls())
# deletando todos os elementos, menos os listados:
rm(list=(ls()[ls()!="sinistrosRecifeRaw"]))
saveRDS(sinistrosRecifeRaw, "Semana 4/bases_tratadas/sinistrosRecife.rds")
write.csv2(sinistrosRecifeRaw, "Semana 4/bases_tratadas/sinistrosRecife.csv")
#############################LOAD########
##########
install.packages("microbenchmark")
library(microbenchmark)
# exporta em formato nativo do R
saveRDS(sinistrosRecifeRaw, "Semana 4/bases_tratadas/sinistrosRecife.rds")
# exporta em formato tabular (.csv) - padrão para interoperabilidade
write.csv2(sinistrosRecifeRaw, "Semana 4/bases_tratadas/sinistrosRecife.csv")
# carrega base de dados em formato nativo R
sinistrosRecife <- readRDS('Semana 4/bases_tratadas/sinistrosRecife.rds')
# carrega base de dados em formato tabular (.csv) - padrão para interoperabilidade
sinistrosRecife <- read.csv2('Semana 4/bases_tratadas/sinistrosRecife.csv', sep = ';')
# compara os dois processos de exportação, usando a função microbenchmark
microbenchmark(a <- saveRDS(sinistrosRecifeRaw, "Semana 4/bases_tratadas/sinistrosRecife.rds"), b <- write.csv2(sinistrosRecifeRaw, "Semana 4/bases_tratadas/sinistrosRecife.csv"), times = 30L)
microbenchmark(a <- readRDS('Semana 4/bases_tratadas/sinistrosRecife.rds'), b <- read.csv2('Semana 4/bases_tratadas/sinistrosRecife.csv', sep = ';'), times = 10L)
| /Semana 4/semana 4.R | no_license | marcustorresz/ETLdata | R | false | false | 3,894 | r | ############# SEMANA 4 - ETL ##################
# É importante deixar claro que essa parte foi copiada do script do Professor Hugo,
# disponível em: https://github.com/hugoavmedeiros/etl_com_r/blob/master/scripts/etl1.R
#Essa linha de código mostra o processo de ETL completo
######### PRIMEIRA PARTE- Extração ############
#Extração via .csv
# carrega a base de snistros de transito do site da PCR
sinistrosRecife2020Raw <- read.csv2('http://dados.recife.pe.gov.br/dataset/44087d2d-73b5-4ab3-9bd8-78da7436eed1/resource/fc1c8460-0406-4fff-b51a-e79205d1f1ab/download/acidentes_2020-novo.csv', sep = ';', encoding = 'UTF-8')
sinistrosRecife2021Raw <- read.csv2('http://dados.recife.pe.gov.br/dataset/44087d2d-73b5-4ab3-9bd8-78da7436eed1/resource/2caa8f41-ccd9-4ea5-906d-f66017d6e107/download/acidentes_2021-jan.csv', sep = ';', encoding = 'UTF-8')
# junta as bases de dados com comando rbind (juntas por linhas (row))
sinistrosRecifeRaw <- rbind(sinistrosRecife2020Raw, sinistrosRecife2021Raw)
# observa a estrutura dos dados
str(sinistrosRecifeRaw)
# modifca a data para formato date
class(sinistrosRecifeRaw$data) # como vcs podem ver, antes estava como character
sinistrosRecifeRaw$data <- as.Date(sinistrosRecifeRaw$data, format = "%Y-%m-%d")
# modifica natureza do sinistro de texto para fator
class(sinistrosRecifeRaw$natureza_acidente)
sinistrosRecifeRaw$natureza_acidente <- as.factor(sinistrosRecifeRaw$natureza_acidente)
#forma otimizada de ler caracter
# cria funçaõ para substituir not available (na) por 0
naZero <- function(x) {
x <- ifelse(is.na(x), 0, x)
}
# aplica a função naZero a todas as colunas de contagem
sinistrosRecifeRaw[, 15:25] <- sapply(sinistrosRecifeRaw[, 15:25], naZero)
# exporta em formato nativo do R
saveRDS(sinistrosRecifeRaw, "Semana 4/bases_tratadas/sinistrosRecife.rds")
# exporta em formato tabular (.csv) - padrão para interoperabilidade
write.csv2(sinistrosRecifeRaw, "Semana 4/bases_tratadas/sinistrosRecife.csv")
#### Extração em Staging Area ####
#### Staging area e uso de memória
# ficamos com staging area??
ls() # lista todos os objetos no R
# vamos ver quanto cada objeto está ocupando
for (itm in ls()) {
print(formatC(c(itm, object.size(get(itm))),
format="d",
width=30),
quote=F)
}
ls() # lista todos os objetos no R
# agora, vamos remover
gc() # uso explícito do garbage collector
rm(list = c('sinistrosRecife2020Raw', 'sinistrosRecife2021Raw'))
# deletando todos os elementos: rm(list = ls())
# deletando todos os elementos, menos os listados:
rm(list=(ls()[ls()!="sinistrosRecifeRaw"]))
saveRDS(sinistrosRecifeRaw, "Semana 4/bases_tratadas/sinistrosRecife.rds")
write.csv2(sinistrosRecifeRaw, "Semana 4/bases_tratadas/sinistrosRecife.csv")
#############################LOAD########
##########
install.packages("microbenchmark")
library(microbenchmark)
# exporta em formato nativo do R
saveRDS(sinistrosRecifeRaw, "Semana 4/bases_tratadas/sinistrosRecife.rds")
# exporta em formato tabular (.csv) - padrão para interoperabilidade
write.csv2(sinistrosRecifeRaw, "Semana 4/bases_tratadas/sinistrosRecife.csv")
# carrega base de dados em formato nativo R
sinistrosRecife <- readRDS('Semana 4/bases_tratadas/sinistrosRecife.rds')
# carrega base de dados em formato tabular (.csv) - padrão para interoperabilidade
sinistrosRecife <- read.csv2('Semana 4/bases_tratadas/sinistrosRecife.csv', sep = ';')
# compara os dois processos de exportação, usando a função microbenchmark
microbenchmark(a <- saveRDS(sinistrosRecifeRaw, "Semana 4/bases_tratadas/sinistrosRecife.rds"), b <- write.csv2(sinistrosRecifeRaw, "Semana 4/bases_tratadas/sinistrosRecife.csv"), times = 30L)
microbenchmark(a <- readRDS('Semana 4/bases_tratadas/sinistrosRecife.rds'), b <- read.csv2('Semana 4/bases_tratadas/sinistrosRecife.csv', sep = ';'), times = 10L)
|
#' Set Print Method
#'
#' The function allows the user to define options relating to the print method for \code{data.table}.
#'
#' @param class should the variable class be printed? (\code{options("datatable.print.class")})
#' @param topn the number of rows to print (both head and tail) if \code{nrows(DT) > nrows}. (\code{options("datatable.print.topn")})
#' @param rownames should rownames be printed? (\code{options("datatable.print.rownames")})
#' @param nrows total number of rows to print (\code{options("datatable.print.nrows")})
#' @param trunc.cols if \code{TRUE}, only the columns that fit in the console are printed (with a message stating the variables not shown, similar to \code{tibbles}; \code{options("datatable.print.trunc.cols")}). This only works on \code{data.table} versions higher than \code{1.12.6} (i.e. not currently available but anticipating the eventual release).
#'
#' @return None. This function is used for its side effect of changing options.
#'
#' @examples
#'
#' dt_print_options(
#' class = TRUE,
#' topn = 5,
#' rownames = TRUE,
#' nrows = 100,
#' trunc.cols = TRUE
#' )
#' @importFrom utils packageVersion
#'
#' @export
dt_print_options <- function(class = TRUE,
topn = 5,
rownames = TRUE,
nrows = 100,
trunc.cols = TRUE) {
if (isTRUE(class)) options("datatable.print.class" = TRUE)
options("datatable.print.topn" = topn)
options("datatable.print.nrows" = nrows)
options("datatable.print.rownames" = rownames)
if (packageVersion("data.table") >= "1.12.9") {
options("datatable.print.trunc.cols" = trunc.cols)
}
}
| /R/print.R | no_license | TysonStanley/tidyfast | R | false | false | 1,693 | r | #' Set Print Method
#'
#' The function allows the user to define options relating to the print method for \code{data.table}.
#'
#' @param class should the variable class be printed? (\code{options("datatable.print.class")})
#' @param topn the number of rows to print (both head and tail) if \code{nrows(DT) > nrows}. (\code{options("datatable.print.topn")})
#' @param rownames should rownames be printed? (\code{options("datatable.print.rownames")})
#' @param nrows total number of rows to print (\code{options("datatable.print.nrows")})
#' @param trunc.cols if \code{TRUE}, only the columns that fit in the console are printed (with a message stating the variables not shown, similar to \code{tibbles}; \code{options("datatable.print.trunc.cols")}). This only works on \code{data.table} versions higher than \code{1.12.6} (i.e. not currently available but anticipating the eventual release).
#'
#' @return None. This function is used for its side effect of changing options.
#'
#' @examples
#'
#' dt_print_options(
#' class = TRUE,
#' topn = 5,
#' rownames = TRUE,
#' nrows = 100,
#' trunc.cols = TRUE
#' )
#' @importFrom utils packageVersion
#'
#' @export
dt_print_options <- function(class = TRUE,
topn = 5,
rownames = TRUE,
nrows = 100,
trunc.cols = TRUE) {
if (isTRUE(class)) options("datatable.print.class" = TRUE)
options("datatable.print.topn" = topn)
options("datatable.print.nrows" = nrows)
options("datatable.print.rownames" = rownames)
if (packageVersion("data.table") >= "1.12.9") {
options("datatable.print.trunc.cols" = trunc.cols)
}
}
|
library(ISLR)
library(boot)
data(Default)
attach(Default)
set.seed(1)
glm.fit = glm(default~income+balance,family=binomial)
coef(summary(glm.fit))[2:3,1:2]
coef(glm.fit)
boot.fn = function(data, index){
return(
coef(glm(default~income+balance
,family=binomial
,data=data
,subset=index))
)
}
boot(Default,boot.fn,1000)
| /Chapter 5 - Resampling Methods - Cross Validation and Bootstrap/Homework 5.6.R | no_license | bpafoshizle/IntroStatLearn | R | false | false | 403 | r | library(ISLR)
library(boot)
data(Default)
attach(Default)
set.seed(1)
glm.fit = glm(default~income+balance,family=binomial)
coef(summary(glm.fit))[2:3,1:2]
coef(glm.fit)
boot.fn = function(data, index){
return(
coef(glm(default~income+balance
,family=binomial
,data=data
,subset=index))
)
}
boot(Default,boot.fn,1000)
|
### Objective: Add zeros for hauls that occurred where species were absent
library(tidyverse)
library(lubridate)
### Read in catch data
catch <- read_csv(paste0("Data/", list.files("Data", pattern="Groundfish_all")))
### Read in haul data
haul <- read_csv(paste0("Data/", list.files("Data", pattern="Trawl_info_all")))
### Add in column for area_swept_km and day of year
#wctri_hauls_spp_zeros2[,"Date":=as.Date(gsub(" .*$", "", START_TIME), format='%m/%d/%Y')]
#wctri_hauls_spp_zeros2[,"Numerical_calendar_date":=yday(Date)]
haul2 <- haul %>%
mutate(Numerical_calendar_date=yday(ymd(date_yyyymmdd)),
Year=year(ymd(date_yyyymmdd)),
AreaSwept_km2=0.01*area_swept_ha_der)
### Add zeros function
add.zeros.sp <- function(bio.dt, haul.dt){
bio.dt$pres <- bio.dt$total_catch_wt_kg > 0
### Merge bio with hauls so that every haul is repeated
#spp <- sort(unique(bio.dt$scientific_name))
spp <- data.frame(scientific_name=unique(bio.dt$scientific_name), common_name=unique(bio.dt$common_name))
print("Step1: Create master haul_spp df")
hauls <- unique(haul.dt$trawl_id)
haul.spp.master <- expand.grid(trawl_id=hauls, scientific_name=spp$scientific_name)
haul.spp.master2 <- merge(haul.spp.master, spp, by=c("scientific_name"))
### Merge back with haul info to get those haul meta data associated with file
haul.spp <- merge(haul.dt, haul.spp.master2, all.y=T, by="trawl_id")
cols.shared <- colnames(bio.dt)[colnames(bio.dt)%in%colnames(haul.spp)]
# ### Merge with mean biomass to get those data associated
# haul.spp.mean.bio <- merge(haul.spp, mean.bio.yr, by=c("spp", "year"))
print("Step2: Merge with catch bio and add zeros")
### Merge with biomass
bio.haul.zeros <- merge(haul.spp, bio.dt,
by=cols.shared, all.x=T)
##
### Make pres FALSE where NA
bio.haul.zeros$pres2 <- as.logical(ifelse(is.na(bio.haul.zeros$pres), "FALSE", bio.haul.zeros$pres))
### Make NAs for absences into zero biomass
bio.haul.zeros$total_catch_wt_kg <- ifelse(is.na(bio.haul.zeros$total_catch_wt_kg), 0, bio.haul.zeros$total_catch_wt_kg)
bio.haul.zeros$total_catch_numbers <- ifelse(is.na(bio.haul.zeros$total_catch_numbers), 0, bio.haul.zeros$total_catch_numbers)
return(bio.haul.zeros)
}
catch.zeros <- as_tibble(add.zeros.sp(bio.dt=catch, haul.dt=haul2))
vast.catch <- catch.zeros %>%
transmute(Trawl.Identifier=trawl_id,
Year=Year,
Longitude=longitude_dd,
Latitude=latitude_dd,
Survey=ifelse(project=="Groundfish Triennial Shelf Survey","Tri.Shelf",
ifelse(project=="Groundfish Slope and Shelf Combination Survey", "WCGBTS", NA)),
Numerical_calendar_date=Numerical_calendar_date,
Catch_KG=total_catch_wt_kg,
AreaSwept_km2=AreaSwept_km2,
Species=common_name)
#Check that same number of hauls in processed files
length(unique(haul$trawl_id))
length(unique(vast.catch$Trawl.Identifier))
### Check that each haul has lat and lon
sum(is.na(vast.catch$Longitude))
sum(is.na(vast.catch$Latitude))
#Test subset to one species
filter(vast.catch, Species=="sablefish")
save(vast.catch, file="Data/catch_for_vast.RData")
| /Code/Hauls_add_zeros_wc.R | no_license | bselden/StockAvailability | R | false | false | 3,260 | r | ### Objective: Add zeros for hauls that occurred where species were absent
library(tidyverse)
library(lubridate)
### Read in catch data
catch <- read_csv(paste0("Data/", list.files("Data", pattern="Groundfish_all")))
### Read in haul data
haul <- read_csv(paste0("Data/", list.files("Data", pattern="Trawl_info_all")))
### Add in column for area_swept_km and day of year
#wctri_hauls_spp_zeros2[,"Date":=as.Date(gsub(" .*$", "", START_TIME), format='%m/%d/%Y')]
#wctri_hauls_spp_zeros2[,"Numerical_calendar_date":=yday(Date)]
haul2 <- haul %>%
mutate(Numerical_calendar_date=yday(ymd(date_yyyymmdd)),
Year=year(ymd(date_yyyymmdd)),
AreaSwept_km2=0.01*area_swept_ha_der)
### Add zeros function
add.zeros.sp <- function(bio.dt, haul.dt){
bio.dt$pres <- bio.dt$total_catch_wt_kg > 0
### Merge bio with hauls so that every haul is repeated
#spp <- sort(unique(bio.dt$scientific_name))
spp <- data.frame(scientific_name=unique(bio.dt$scientific_name), common_name=unique(bio.dt$common_name))
print("Step1: Create master haul_spp df")
hauls <- unique(haul.dt$trawl_id)
haul.spp.master <- expand.grid(trawl_id=hauls, scientific_name=spp$scientific_name)
haul.spp.master2 <- merge(haul.spp.master, spp, by=c("scientific_name"))
### Merge back with haul info to get those haul meta data associated with file
haul.spp <- merge(haul.dt, haul.spp.master2, all.y=T, by="trawl_id")
cols.shared <- colnames(bio.dt)[colnames(bio.dt)%in%colnames(haul.spp)]
# ### Merge with mean biomass to get those data associated
# haul.spp.mean.bio <- merge(haul.spp, mean.bio.yr, by=c("spp", "year"))
print("Step2: Merge with catch bio and add zeros")
### Merge with biomass
bio.haul.zeros <- merge(haul.spp, bio.dt,
by=cols.shared, all.x=T)
##
### Make pres FALSE where NA
bio.haul.zeros$pres2 <- as.logical(ifelse(is.na(bio.haul.zeros$pres), "FALSE", bio.haul.zeros$pres))
### Make NAs for absences into zero biomass
bio.haul.zeros$total_catch_wt_kg <- ifelse(is.na(bio.haul.zeros$total_catch_wt_kg), 0, bio.haul.zeros$total_catch_wt_kg)
bio.haul.zeros$total_catch_numbers <- ifelse(is.na(bio.haul.zeros$total_catch_numbers), 0, bio.haul.zeros$total_catch_numbers)
return(bio.haul.zeros)
}
catch.zeros <- as_tibble(add.zeros.sp(bio.dt=catch, haul.dt=haul2))
vast.catch <- catch.zeros %>%
transmute(Trawl.Identifier=trawl_id,
Year=Year,
Longitude=longitude_dd,
Latitude=latitude_dd,
Survey=ifelse(project=="Groundfish Triennial Shelf Survey","Tri.Shelf",
ifelse(project=="Groundfish Slope and Shelf Combination Survey", "WCGBTS", NA)),
Numerical_calendar_date=Numerical_calendar_date,
Catch_KG=total_catch_wt_kg,
AreaSwept_km2=AreaSwept_km2,
Species=common_name)
#Check that same number of hauls in processed files
length(unique(haul$trawl_id))
length(unique(vast.catch$Trawl.Identifier))
### Check that each haul has lat and lon
sum(is.na(vast.catch$Longitude))
sum(is.na(vast.catch$Latitude))
#Test subset to one species
filter(vast.catch, Species=="sablefish")
save(vast.catch, file="Data/catch_for_vast.RData")
|
##########################################################################
# policyMakingPowersDoubleCluster.R
# GR: November 8, 2017
# This file reads policy-making powers for lower and upper houses and
# executives.
# It first performs principal component analysis of common powers in both houses
# keeping as many dimensions as needed.
# It then finds the optimal number of clusters for both sets of houses
# In a second run, a new cluster analysis is performed that takes as input
# the clusters from the first run, and the "additional" powers that we have
# for lower houses and upper houses (this second step is carried out separately
# in upper and lower houses)
##########################################################################
library (psych)
#################################
#### Import and prepare data ####
#################################
LowerHouse <- read.table ("lowerHousePowers.txt", sep="\t", header=TRUE)
UpperHouse <- read.table ("upperHousePowers.txt", sep="\t", header=TRUE)
Executive <- read.table ("presidentialPowers.txt", sep="\t", header=TRUE)
LH.label <- paste(tolower (substr(LowerHouse$country, start=1, stop=3)), LowerHouse$startyear, sep="-")
UH.label <- paste(tolower (substr(UpperHouse$country, start=1, stop=3)), UpperHouse$startyear, sep="-")
PR.label <- paste(tolower (substr(Executive$country, start=1, stop=3)), Executive$startyear, sep="-")
LowerHouse$chamber <- rep("lower", nrow(LowerHouse))
UpperHouse$chamber <- rep("upper", nrow(UpperHouse))
sharedPowers <- c("amend","partialobservationsoverride","vetooverride"
,"budgetspending","question")
lowHouseOnly <- c("committeeslo","stafflo","introdbillslo")
uppHouseOnly <- c("vetoupper","introdbillsup")
Leg.W <- rbind (LowerHouse[,sharedPowers], UpperHouse[,sharedPowers])
Chamber <- c(LowerHouse$chamber, UpperHouse$chamber)
Leg.add <- rowSums(Leg.W) # simple additive scores
Leg.label <- c(paste("L", tolower (substr(LowerHouse$country, start=1, stop=3))
, LowerHouse$startyear, sep="-")
, paste("U", tolower (substr(UpperHouse$country, start=1, stop=3))
, UpperHouse$startyear, sep="-"))
# Principal components Executives
PR.W <- Executive[,!is.element(colnames(Executive), c("country","startyear","endyear"))]
PR.add <- rowSums (PR.W)
pc.PR <- princomp(PR.W, scores=T)
screeplot (pc.PR)
summary(pc.PR) # 2 principal components might be necessary
plot (pc.PR$scores[,1], pc.PR$scores[,2], ylab="PC2 scores", xlab="PC1 scores"
, type="n", xlim=c(-2.5,2.5), main="Executive")
text (xy.coords(pc.PR$scores[,1], jitter(pc.PR$scores[,2], amount=0.115)), labels=PR.label, cex=0.8)
plot (pc.PR$scores[,1], PR.add, ylab="Additive scores", xlab="PC1 scores"
, type="n", xlim=c(-2.5,2.5), main="Executive")
text (xy.coords(pc.PR$scores[,1], jitter(PR.add, amount=0.5)), labels=PR.label, cex=0.8)
PR.scores <- as.matrix (pc.PR$scores[,c(1:2)])
rownames(PR.scores) <- paste (Executive$country, Executive$startyear, Executive$endyear, sep="-")
################################
#### First round clustering ####
################################
# Principal components all chambers together
pc.Leg <- princomp(Leg.W, scores=T)
screeplot (pc.Leg)
summary(pc.Leg) # 1 principal component is enough
plot (pc.Leg$scores[,1], pc.Leg$scores[,2],# Leg.add,
ylab="PC2 scores", xlab="PC1 scores"
, type="n", xlim=c(-2.5,2.5), main="Full Legislature")
text (xy.coords(pc.Leg$scores[,1],pc.Leg$scores[,2]),# jitter(Leg.add, amount=0.5)),
labels=Leg.label, cex=0.8)
Leg.scores <- as.matrix (pc.Leg$scores[,1:2])
rownames(Leg.scores) <- c(paste (LowerHouse$country, LowerHouse$startyear
, LowerHouse$endyear, sep="-")
, paste (UpperHouse$country, UpperHouse$startyear
, UpperHouse$endyear, sep="-"))
# Runs with a single clustering round for lower and upper houses
Lower <- LowerHouse[,is.element(colnames(LowerHouse), c(sharedPowers, lowHouseOnly))]
pc.Lower <- princomp (Lower, scores=T)
screeplot (pc.Lower)
summary (pc.Lower) # 2 components
plot (pc.Lower$scores[,1], pc.Lower$scores[,2],# Leg.add,
ylab="PC2 scores", xlab="PC1 scores"
, type="n", xlim=c(-2.5,2.5), main="Lower House only")
text (xy.coords(pc.Lower$scores[,1],pc.Lower$scores[,2]),# jitter(Leg.add, amount=0.5)),
labels=Leg.label[1:39], cex=0.8)
Lower.scores <- as.matrix (pc.Lower$scores[,1:2])
rownames(Lower.scores) <- paste (LowerHouse$country, LowerHouse$startyear
, LowerHouse$endyear, sep="-")
Upper <- UpperHouse[,is.element(colnames(UpperHouse), c(sharedPowers, uppHouseOnly))]
pc.Upper <- princomp (Upper, scores=T)
screeplot (pc.Upper)
summary (pc.Upper) # 2 components
plot (pc.Upper$scores[,1], pc.Upper$scores[,2],# Leg.add,
ylab="PC2 scores", xlab="PC1 scores"
, type="n", xlim=c(-2.5,2.5), main="Upper House only")
text (xy.coords(pc.Upper$scores[,1],pc.Upper$scores[,2]),# jitter(Leg.add, amount=0.5)),
labels=Leg.label[40:length(Leg.label)], cex=0.8)
Upper.scores <- as.matrix (pc.Upper$scores[,1:2])
rownames(Upper.scores) <- paste (UpperHouse$country, UpperHouse$startyear
, UpperHouse$endyear, sep="-")
#################################
#### Finding kmeans clusters ####
#################################
# These two functions help find optimal number of clusters visually
# findKmeans iterates from 1 to 8 clusters, producing Between SS and Within SS to be analyzed later
findKmeans <- function (x) {
obj2return <- list ()
for (i in 1:8) {
obj2return[[i]] <- kmeans(x, centers=i, nstart=20)
}
return (obj2return)
}
# screeLikePlot produces a scree plot of the ratio of between to total SS
# The idea is to take as many clusters as needed until reaching the "bend" in the "elbow"
screeLikePlot <- function (x) {
tmp <- c()
for (i in 1:length(x)) {
tmp[i] <- 100*(x[[i]]$betweenss/x[[i]]$totss)
}
return (tmp)
}
# legislature scores
temp <- findKmeans (Leg.scores)
plot (screeLikePlot (temp), type="b"
, main="optimal # of clusters for Legislature PC scores, first run")
Leg.cluster <- kmeans(Leg.scores, centers=3, nstart=20) # I see 3 clusters
# Lower house scores
temp <- findKmeans (Lower.scores)
plot (screeLikePlot (temp), type="b"
, main="optimal # of clusters for Lower House PC scores, first and only run")
Lower.cluster <- kmeans(Lower.scores, centers=3, nstart=20) # I see 3 clusters
# Upper house scores
temp <- findKmeans (Upper.scores)
plot (screeLikePlot (temp), type="b"
, main="optimal # of clusters for Upper House PC scores, first and only run")
Upper.cluster <- kmeans(Upper.scores, centers=3, nstart=20) # I see 3 clusters
# Presidential scores
temp <- findKmeans (PR.scores)
plot (screeLikePlot (temp), type="b"
, main="optimal # of clusters for Executive PC scores") # I see 3
PR.cluster <- kmeans(PR.scores, centers=3, nstart=20)
#######################
#### Plots to keep ####
#######################
# Executive
graphicsPath <- "Graphs/" # Save copies here as well
pdf (paste0 (graphicsPath, "ExecutiveClustering.pdf"), h=5, w=7)
plot (pc.PR$scores[,1], pc.PR$scores[,2], ylab="PC2 scores", xlab="PC1 scores"
, type="n", xlim=c(-3,2.5), main="Executive")
text (xy.coords(pc.PR$scores[,1], jitter(pc.PR$scores[,2], amount=0.115))
, labels=PR.label, cex=0.8, col="gray")
points (xy.coords(PR.cluster$centers), pch=rownames(PR.cluster$centers), cex=1.3)
dev.off ()
# Full legislature
plot (pc.Leg$scores[,1],pc.Leg$scores[,2], xlab="PC1 scores",ylab="PC2 scores",
#, xlim=c(-2.5,2.5),
pch="", main="Full Legislature")
text (pc.Leg$scores[,1],pc.Leg$scores[,2], #1,
labels=Leg.label, cex=0.8, col="gray", srt=45)
points (Leg.cluster$centers[,1],Leg.cluster$centers[,2],# rep(1, length(Leg.cluster$centers)),
pch=rownames(Leg.cluster$centers), cex=1.3)
# Lower House 1 round clustering
pdf (paste0 (graphicsPath, "LowerHouseClustering.pdf"), h=5, w=7)
plot (pc.Lower$scores[,1], pc.Lower$scores[,2], ylab="PC2 scores", xlab="PC1 scores"
, type="n", xlim=c(-3,2.5), main="Lower House (1 round)")
text (xy.coords(pc.Lower$scores[,1], jitter(pc.Lower$scores[,2], amount=0.115))
, labels=Leg.label[1:39], cex=0.8, col="gray")
points (xy.coords(Lower.cluster$centers), pch=rownames(Lower.cluster$centers), cex=1.3)
dev.off ()
# Upper House 1 round clustering
pdf (paste0 (graphicsPath, "UpperHouseClustering.pdf"), h=5, w=7)
plot (pc.Upper$scores[,1], pc.Upper$scores[,2], ylab="PC2 scores", xlab="PC1 scores"
, type="n", xlim=c(-3,2.5), main="Upper House (1 round)")
text (xy.coords(pc.Upper$scores[,1], jitter(pc.Upper$scores[,2], amount=0.115))
, labels=Leg.label[40:length(Leg.label)], cex=0.8, col="gray")
points (xy.coords(Upper.cluster$centers), pch=rownames(Upper.cluster$centers), cex=1.3)
dev.off ()
#################################
#### Second round clustering ####
#################################
# Principal component step
pc.LH <- princomp(cbind(LowerHouse[,lowHouseOnly],Leg.cluster$cluster[Chamber=="lower"]), scores=T)
screeplot (pc.LH)
summary(pc.LH) # 1 components is enough
LH.scores <- as.matrix (pc.LH$scores[,1])
pc.UH <- princomp(cbind(UpperHouse[,uppHouseOnly],Leg.cluster$cluster[Chamber=="upper"]), scores=T)
screeplot (pc.UH)
summary(pc.UH) # 1 component is enough, there's only two variables
UH.scores <- as.matrix (pc.UH$scores[,1])
temp <- findKmeans (LH.scores)
plot (screeLikePlot (temp), type="b"
, main="optimal # of clusters for Lower Houses, second run") # I see 3 clusters
LH.cluster <- kmeans(LH.scores, centers=3, nstart=20)
temp <- findKmeans (UH.scores)
plot (screeLikePlot (temp), type="b"
, main="optimal # of clusters for Upper Houses, second run")# I see 4 clusters
UH.cluster <- kmeans(UH.scores, centers=3, nstart=20)
######################################################
#### Hardwire additional column with fixed labels ####
######################################################
## Presidents
#weak
label.weak <- PR.cluster$cluster[which(names (PR.cluster$cluster)=="Argentina-1957-1993")]
#proactive
label.proac <- PR.cluster$cluster[which(names (PR.cluster$cluster)=="Argentina-1994-2015")]
executive.label <- ifelse (PR.cluster$cluster==label.weak, "Weak president",
ifelse (PR.cluster$cluster==label.proac
, "Proactive president"
, "Agenda-setting president"))
## Lower House
label.only <- LH.cluster$cluster[which(names (LH.cluster$cluster)=="Bolivia-2010-2015")]
label.limited <- LH.cluster$cluster[which(names (LH.cluster$cluster)=="Argentina-1983-2015")]
lh.label <- ifelse (LH.cluster$cluster==label.only, "Only budget",
ifelse (LH.cluster$cluster==label.limited
, "Limited powers"
, "Except budget"))
## Upper House
label.only.uh <- UH.cluster$cluster[which(names (UH.cluster$cluster)=="Bolivia-2010-2015")]
label.limited.uh <- UH.cluster$cluster[which(names (UH.cluster$cluster)=="Argentina-1983-2015")]
uh.label <- ifelse (UH.cluster$cluster==label.only.uh, "Only budget",
ifelse (UH.cluster$cluster==label.limited.uh
, "Limited powers"
, "Except budget"))
############################
#### More plots to keep ####
############################
pdf (paste0 (graphicsPath, "LowerHouseClustering2ndRound.pdf"), h=5, w=7)
stripchart (pc.LH$scores[,1], xlab="PC1 scores (second round)"
,type="n", xlim=c(-2.5,2.5), main="Lower House", ylab="")
text (pc.LH$scores[,1], 1,
labels=names(LH.cluster$cluster), cex=0.8, col="gray", srt=45)
text (LH.cluster$centers, rep(1, length(LH.cluster$centers)), labels=unique(lh.label), cex=1.3, srt=45)
dev.off()
pdf (paste0 (graphicsPath, "UpperHouseClustering2ndRound.pdf"), h=5, w=7)
stripchart (pc.UH$scores[,1], xlab="PC1 scores (second round)"
,type="n", xlim=c(-2.5,2.5), main="Upper House", ylab="")
text (pc.UH$scores[,1], 1,
labels=names(UH.cluster$cluster), cex=0.8, col="gray", srt=45)
text (UH.cluster$centers, rep(1, length(UH.cluster$centers)), labels=unique(uh.label), cex=1.3, srt=45)
dev.off()
# Gather all important variables together
Executive <- data.frame (cbind (Executive, PR.cluster$cluster, pc.PR$scores[,1], pc.PR$scores[,2], president.type=executive.label))
LH <- data.frame (cbind (LowerHouse[,!is.element(colnames(LowerHouse), c("investigate","amnesty"))]
, firstRoundPC1=as.matrix (pc.Lower$scores[,1])
, firstRoundPC2=as.matrix (pc.Lower$scores[,2])
, clusterOnlyRound1=Lower.cluster$cluster
, secondRoundPC=LH.scores
, clusterRound1=Leg.cluster$cluster[Chamber=="lower"]
, clusterRound2=LH.cluster$cluster
, lower.house.type=lh.label))
UH <- data.frame (cbind (UpperHouse[,!is.element(colnames(UpperHouse), c("investigate","amnesty"))]
, firstRoundPC1=as.matrix (pc.Upper$scores[,1])
, firstRoundPC2=as.matrix (pc.Upper$scores[,2])
, clusterOnlyRound1=Upper.cluster$cluster
, secondRoundPC=UH.scores
, clusterRound1=Leg.cluster$cluster[Chamber=="upper"]
, clusterRound2=UH.cluster$cluster
, upper.house.type=uh.label))
# Save results for Brian
setwd ("Datasets/OriginalDataFiles/PolicyMakingPowers/")
write.table (LH, file="LowerHouseDoubleClusters.txt", sep="\t", row.names=F)
write.table (UH, file="UpperHouseDoubleClusters.txt", sep="\t", row.names=F)
write.table (Executive, file="ExecutiveClusters.txt", sep="\t", row.names=F)
| /Code/Code8_Linkage2_PolicyMakingProcess/policyMakingPowersDoubleCluster.R | no_license | solivella/ChainOfRepresentation | R | false | false | 14,090 | r | ##########################################################################
# policyMakingPowersDoubleCluster.R
# GR: November 8, 2017
# This file reads policy-making powers for lower and upper houses and
# executives.
# It first performs principal component analysis of common powers in both houses
# keeping as many dimensions as needed.
# It then finds the optimal number of clusters for both sets of houses
# In a second run, a new cluster analysis is performed that takes as input
# the clusters from the first run, and the "additional" powers that we have
# for lower houses and upper houses (this second step is carried out separately
# in upper and lower houses)
##########################################################################
library (psych)
#################################
#### Import and prepare data ####
#################################
LowerHouse <- read.table ("lowerHousePowers.txt", sep="\t", header=TRUE)
UpperHouse <- read.table ("upperHousePowers.txt", sep="\t", header=TRUE)
Executive <- read.table ("presidentialPowers.txt", sep="\t", header=TRUE)
LH.label <- paste(tolower (substr(LowerHouse$country, start=1, stop=3)), LowerHouse$startyear, sep="-")
UH.label <- paste(tolower (substr(UpperHouse$country, start=1, stop=3)), UpperHouse$startyear, sep="-")
PR.label <- paste(tolower (substr(Executive$country, start=1, stop=3)), Executive$startyear, sep="-")
LowerHouse$chamber <- rep("lower", nrow(LowerHouse))
UpperHouse$chamber <- rep("upper", nrow(UpperHouse))
sharedPowers <- c("amend","partialobservationsoverride","vetooverride"
,"budgetspending","question")
lowHouseOnly <- c("committeeslo","stafflo","introdbillslo")
uppHouseOnly <- c("vetoupper","introdbillsup")
Leg.W <- rbind (LowerHouse[,sharedPowers], UpperHouse[,sharedPowers])
Chamber <- c(LowerHouse$chamber, UpperHouse$chamber)
Leg.add <- rowSums(Leg.W) # simple additive scores
Leg.label <- c(paste("L", tolower (substr(LowerHouse$country, start=1, stop=3))
, LowerHouse$startyear, sep="-")
, paste("U", tolower (substr(UpperHouse$country, start=1, stop=3))
, UpperHouse$startyear, sep="-"))
# Principal components Executives
PR.W <- Executive[,!is.element(colnames(Executive), c("country","startyear","endyear"))]
PR.add <- rowSums (PR.W)
pc.PR <- princomp(PR.W, scores=T)
screeplot (pc.PR)
summary(pc.PR) # 2 principal components might be necessary
plot (pc.PR$scores[,1], pc.PR$scores[,2], ylab="PC2 scores", xlab="PC1 scores"
, type="n", xlim=c(-2.5,2.5), main="Executive")
text (xy.coords(pc.PR$scores[,1], jitter(pc.PR$scores[,2], amount=0.115)), labels=PR.label, cex=0.8)
plot (pc.PR$scores[,1], PR.add, ylab="Additive scores", xlab="PC1 scores"
, type="n", xlim=c(-2.5,2.5), main="Executive")
text (xy.coords(pc.PR$scores[,1], jitter(PR.add, amount=0.5)), labels=PR.label, cex=0.8)
PR.scores <- as.matrix (pc.PR$scores[,c(1:2)])
rownames(PR.scores) <- paste (Executive$country, Executive$startyear, Executive$endyear, sep="-")
################################
#### First round clustering ####
################################
# Principal components all chambers together
pc.Leg <- princomp(Leg.W, scores=T)
screeplot (pc.Leg)
summary(pc.Leg) # 1 principal component is enough
plot (pc.Leg$scores[,1], pc.Leg$scores[,2],# Leg.add,
ylab="PC2 scores", xlab="PC1 scores"
, type="n", xlim=c(-2.5,2.5), main="Full Legislature")
text (xy.coords(pc.Leg$scores[,1],pc.Leg$scores[,2]),# jitter(Leg.add, amount=0.5)),
labels=Leg.label, cex=0.8)
Leg.scores <- as.matrix (pc.Leg$scores[,1:2])
rownames(Leg.scores) <- c(paste (LowerHouse$country, LowerHouse$startyear
, LowerHouse$endyear, sep="-")
, paste (UpperHouse$country, UpperHouse$startyear
, UpperHouse$endyear, sep="-"))
# Runs with a single clustering round for lower and upper houses
Lower <- LowerHouse[,is.element(colnames(LowerHouse), c(sharedPowers, lowHouseOnly))]
pc.Lower <- princomp (Lower, scores=T)
screeplot (pc.Lower)
summary (pc.Lower) # 2 components
plot (pc.Lower$scores[,1], pc.Lower$scores[,2],# Leg.add,
ylab="PC2 scores", xlab="PC1 scores"
, type="n", xlim=c(-2.5,2.5), main="Lower House only")
text (xy.coords(pc.Lower$scores[,1],pc.Lower$scores[,2]),# jitter(Leg.add, amount=0.5)),
labels=Leg.label[1:39], cex=0.8)
Lower.scores <- as.matrix (pc.Lower$scores[,1:2])
rownames(Lower.scores) <- paste (LowerHouse$country, LowerHouse$startyear
, LowerHouse$endyear, sep="-")
Upper <- UpperHouse[,is.element(colnames(UpperHouse), c(sharedPowers, uppHouseOnly))]
pc.Upper <- princomp (Upper, scores=T)
screeplot (pc.Upper)
summary (pc.Upper) # 2 components
plot (pc.Upper$scores[,1], pc.Upper$scores[,2],# Leg.add,
ylab="PC2 scores", xlab="PC1 scores"
, type="n", xlim=c(-2.5,2.5), main="Upper House only")
text (xy.coords(pc.Upper$scores[,1],pc.Upper$scores[,2]),# jitter(Leg.add, amount=0.5)),
labels=Leg.label[40:length(Leg.label)], cex=0.8)
Upper.scores <- as.matrix (pc.Upper$scores[,1:2])
rownames(Upper.scores) <- paste (UpperHouse$country, UpperHouse$startyear
, UpperHouse$endyear, sep="-")
#################################
#### Finding kmeans clusters ####
#################################
# These two functions help find optimal number of clusters visually
# findKmeans iterates from 1 to 8 clusters, producing Between SS and Within SS to be analyzed later
findKmeans <- function (x) {
obj2return <- list ()
for (i in 1:8) {
obj2return[[i]] <- kmeans(x, centers=i, nstart=20)
}
return (obj2return)
}
# screeLikePlot produces a scree plot of the ratio of between to total SS
# The idea is to take as many clusters as needed until reaching the "bend" in the "elbow"
screeLikePlot <- function (x) {
tmp <- c()
for (i in 1:length(x)) {
tmp[i] <- 100*(x[[i]]$betweenss/x[[i]]$totss)
}
return (tmp)
}
# legislature scores
temp <- findKmeans (Leg.scores)
plot (screeLikePlot (temp), type="b"
, main="optimal # of clusters for Legislature PC scores, first run")
Leg.cluster <- kmeans(Leg.scores, centers=3, nstart=20) # I see 3 clusters
# Lower house scores
temp <- findKmeans (Lower.scores)
plot (screeLikePlot (temp), type="b"
, main="optimal # of clusters for Lower House PC scores, first and only run")
Lower.cluster <- kmeans(Lower.scores, centers=3, nstart=20) # I see 3 clusters
# Upper house scores
temp <- findKmeans (Upper.scores)
plot (screeLikePlot (temp), type="b"
, main="optimal # of clusters for Upper House PC scores, first and only run")
Upper.cluster <- kmeans(Upper.scores, centers=3, nstart=20) # I see 3 clusters
# Presidential scores
temp <- findKmeans (PR.scores)
plot (screeLikePlot (temp), type="b"
, main="optimal # of clusters for Executive PC scores") # I see 3
PR.cluster <- kmeans(PR.scores, centers=3, nstart=20)
#######################
#### Plots to keep ####
#######################
# Executive
graphicsPath <- "Graphs/" # Save copies here as well
pdf (paste0 (graphicsPath, "ExecutiveClustering.pdf"), h=5, w=7)
plot (pc.PR$scores[,1], pc.PR$scores[,2], ylab="PC2 scores", xlab="PC1 scores"
, type="n", xlim=c(-3,2.5), main="Executive")
text (xy.coords(pc.PR$scores[,1], jitter(pc.PR$scores[,2], amount=0.115))
, labels=PR.label, cex=0.8, col="gray")
points (xy.coords(PR.cluster$centers), pch=rownames(PR.cluster$centers), cex=1.3)
dev.off ()
# Full legislature
plot (pc.Leg$scores[,1],pc.Leg$scores[,2], xlab="PC1 scores",ylab="PC2 scores",
#, xlim=c(-2.5,2.5),
pch="", main="Full Legislature")
text (pc.Leg$scores[,1],pc.Leg$scores[,2], #1,
labels=Leg.label, cex=0.8, col="gray", srt=45)
points (Leg.cluster$centers[,1],Leg.cluster$centers[,2],# rep(1, length(Leg.cluster$centers)),
pch=rownames(Leg.cluster$centers), cex=1.3)
# Lower House 1 round clustering
pdf (paste0 (graphicsPath, "LowerHouseClustering.pdf"), h=5, w=7)
plot (pc.Lower$scores[,1], pc.Lower$scores[,2], ylab="PC2 scores", xlab="PC1 scores"
, type="n", xlim=c(-3,2.5), main="Lower House (1 round)")
text (xy.coords(pc.Lower$scores[,1], jitter(pc.Lower$scores[,2], amount=0.115))
, labels=Leg.label[1:39], cex=0.8, col="gray")
points (xy.coords(Lower.cluster$centers), pch=rownames(Lower.cluster$centers), cex=1.3)
dev.off ()
# Upper House 1 round clustering
pdf (paste0 (graphicsPath, "UpperHouseClustering.pdf"), h=5, w=7)
plot (pc.Upper$scores[,1], pc.Upper$scores[,2], ylab="PC2 scores", xlab="PC1 scores"
, type="n", xlim=c(-3,2.5), main="Upper House (1 round)")
text (xy.coords(pc.Upper$scores[,1], jitter(pc.Upper$scores[,2], amount=0.115))
, labels=Leg.label[40:length(Leg.label)], cex=0.8, col="gray")
points (xy.coords(Upper.cluster$centers), pch=rownames(Upper.cluster$centers), cex=1.3)
dev.off ()
#################################
#### Second round clustering ####
#################################
# Principal component step
pc.LH <- princomp(cbind(LowerHouse[,lowHouseOnly],Leg.cluster$cluster[Chamber=="lower"]), scores=T)
screeplot (pc.LH)
summary(pc.LH) # 1 components is enough
LH.scores <- as.matrix (pc.LH$scores[,1])
pc.UH <- princomp(cbind(UpperHouse[,uppHouseOnly],Leg.cluster$cluster[Chamber=="upper"]), scores=T)
screeplot (pc.UH)
summary(pc.UH) # 1 component is enough, there's only two variables
UH.scores <- as.matrix (pc.UH$scores[,1])
temp <- findKmeans (LH.scores)
plot (screeLikePlot (temp), type="b"
, main="optimal # of clusters for Lower Houses, second run") # I see 3 clusters
LH.cluster <- kmeans(LH.scores, centers=3, nstart=20)
temp <- findKmeans (UH.scores)
plot (screeLikePlot (temp), type="b"
, main="optimal # of clusters for Upper Houses, second run")# I see 4 clusters
UH.cluster <- kmeans(UH.scores, centers=3, nstart=20)
######################################################
#### Hardwire additional column with fixed labels ####
######################################################
## Presidents
#weak
label.weak <- PR.cluster$cluster[which(names (PR.cluster$cluster)=="Argentina-1957-1993")]
#proactive
label.proac <- PR.cluster$cluster[which(names (PR.cluster$cluster)=="Argentina-1994-2015")]
executive.label <- ifelse (PR.cluster$cluster==label.weak, "Weak president",
ifelse (PR.cluster$cluster==label.proac
, "Proactive president"
, "Agenda-setting president"))
## Lower House
label.only <- LH.cluster$cluster[which(names (LH.cluster$cluster)=="Bolivia-2010-2015")]
label.limited <- LH.cluster$cluster[which(names (LH.cluster$cluster)=="Argentina-1983-2015")]
lh.label <- ifelse (LH.cluster$cluster==label.only, "Only budget",
ifelse (LH.cluster$cluster==label.limited
, "Limited powers"
, "Except budget"))
## Upper House
label.only.uh <- UH.cluster$cluster[which(names (UH.cluster$cluster)=="Bolivia-2010-2015")]
label.limited.uh <- UH.cluster$cluster[which(names (UH.cluster$cluster)=="Argentina-1983-2015")]
uh.label <- ifelse (UH.cluster$cluster==label.only.uh, "Only budget",
ifelse (UH.cluster$cluster==label.limited.uh
, "Limited powers"
, "Except budget"))
############################
#### More plots to keep ####
############################
pdf (paste0 (graphicsPath, "LowerHouseClustering2ndRound.pdf"), h=5, w=7)
stripchart (pc.LH$scores[,1], xlab="PC1 scores (second round)"
,type="n", xlim=c(-2.5,2.5), main="Lower House", ylab="")
text (pc.LH$scores[,1], 1,
labels=names(LH.cluster$cluster), cex=0.8, col="gray", srt=45)
text (LH.cluster$centers, rep(1, length(LH.cluster$centers)), labels=unique(lh.label), cex=1.3, srt=45)
dev.off()
pdf (paste0 (graphicsPath, "UpperHouseClustering2ndRound.pdf"), h=5, w=7)
stripchart (pc.UH$scores[,1], xlab="PC1 scores (second round)"
,type="n", xlim=c(-2.5,2.5), main="Upper House", ylab="")
text (pc.UH$scores[,1], 1,
labels=names(UH.cluster$cluster), cex=0.8, col="gray", srt=45)
text (UH.cluster$centers, rep(1, length(UH.cluster$centers)), labels=unique(uh.label), cex=1.3, srt=45)
dev.off()
# Gather all important variables together
Executive <- data.frame (cbind (Executive, PR.cluster$cluster, pc.PR$scores[,1], pc.PR$scores[,2], president.type=executive.label))
LH <- data.frame (cbind (LowerHouse[,!is.element(colnames(LowerHouse), c("investigate","amnesty"))]
, firstRoundPC1=as.matrix (pc.Lower$scores[,1])
, firstRoundPC2=as.matrix (pc.Lower$scores[,2])
, clusterOnlyRound1=Lower.cluster$cluster
, secondRoundPC=LH.scores
, clusterRound1=Leg.cluster$cluster[Chamber=="lower"]
, clusterRound2=LH.cluster$cluster
, lower.house.type=lh.label))
UH <- data.frame (cbind (UpperHouse[,!is.element(colnames(UpperHouse), c("investigate","amnesty"))]
, firstRoundPC1=as.matrix (pc.Upper$scores[,1])
, firstRoundPC2=as.matrix (pc.Upper$scores[,2])
, clusterOnlyRound1=Upper.cluster$cluster
, secondRoundPC=UH.scores
, clusterRound1=Leg.cluster$cluster[Chamber=="upper"]
, clusterRound2=UH.cluster$cluster
, upper.house.type=uh.label))
# Save results for Brian
setwd ("Datasets/OriginalDataFiles/PolicyMakingPowers/")
write.table (LH, file="LowerHouseDoubleClusters.txt", sep="\t", row.names=F)
write.table (UH, file="UpperHouseDoubleClusters.txt", sep="\t", row.names=F)
write.table (Executive, file="ExecutiveClusters.txt", sep="\t", row.names=F)
|
#' Run an experiment on cluster
#' @inheritParams default_params_doc
#' @author Giovanni Laudanno
#' @return nothing
#' @export
pocket_experiment <- function(
account = jap::your_account(),
projects_folder_name = "Projects",
github_name = "Giappo",
project_name = "sls",
function_name = "sls_main",
params,
cluster_folder = "home",
drive = FALSE
) {
tempfolder <- tempdir()
save(
file = file.path(tempfolder, "pocket_data.RData"),
list = c(
"account",
"projects_folder_name",
"github_name",
"project_name",
"function_name",
"params",
"cluster_folder",
"drive"
)
)
filename <- "pocket_script.R"
url <- paste0(
"https://raw.githubusercontent.com/Giappo/jap/master/job_scripts/",
filename
)
utils::download.file(url, destfile = file.path(tempfolder, filename))
# list.files(tempfolder)
rstudioapi::jobRunScript(
path = file.path(tempfolder, filename),
name = paste0("pocket_", project_name),
importEnv = FALSE
)
return()
}
| /R/pocket_experiment.R | no_license | TheoPannetier/jap | R | false | false | 1,043 | r | #' Run an experiment on cluster
#' @inheritParams default_params_doc
#' @author Giovanni Laudanno
#' @return nothing
#' @export
pocket_experiment <- function(
account = jap::your_account(),
projects_folder_name = "Projects",
github_name = "Giappo",
project_name = "sls",
function_name = "sls_main",
params,
cluster_folder = "home",
drive = FALSE
) {
tempfolder <- tempdir()
save(
file = file.path(tempfolder, "pocket_data.RData"),
list = c(
"account",
"projects_folder_name",
"github_name",
"project_name",
"function_name",
"params",
"cluster_folder",
"drive"
)
)
filename <- "pocket_script.R"
url <- paste0(
"https://raw.githubusercontent.com/Giappo/jap/master/job_scripts/",
filename
)
utils::download.file(url, destfile = file.path(tempfolder, filename))
# list.files(tempfolder)
rstudioapi::jobRunScript(
path = file.path(tempfolder, filename),
name = paste0("pocket_", project_name),
importEnv = FALSE
)
return()
}
|
library(shiny)
# Define server logic required to draw a plot
shinyServer(function(input, output) {
# Expression that generates a plot. The expression is
# wrapped in a call to renderPlot to indicate that:
# 1) It is "reactive" and therefore should be automatically
# re-executed when inputs change
# 2) Its output type is a plot
output$distPlot <- renderPlot({
# plot a Normal probability distribution
par(mar=c(3, 2, 0, 0), oma=c(0, 0, 0, 0))
curve(expr=dnorm(x, mean=input$mean, sd=input$std_dev), type="l",
xlim=c(-4, 4),
xlab="", ylab="", lwd=2, col="blue")
}, height=300, width=500) # end renderPlot
}) # end shinyServer
| /shiny/normal_dist/server.R | no_license | algoquant/presentations | R | false | false | 668 | r | library(shiny)
# Define server logic required to draw a plot
shinyServer(function(input, output) {
# Expression that generates a plot. The expression is
# wrapped in a call to renderPlot to indicate that:
# 1) It is "reactive" and therefore should be automatically
# re-executed when inputs change
# 2) Its output type is a plot
output$distPlot <- renderPlot({
# plot a Normal probability distribution
par(mar=c(3, 2, 0, 0), oma=c(0, 0, 0, 0))
curve(expr=dnorm(x, mean=input$mean, sd=input$std_dev), type="l",
xlim=c(-4, 4),
xlab="", ylab="", lwd=2, col="blue")
}, height=300, width=500) # end renderPlot
}) # end shinyServer
|
library("astsa")
library("dplyr")
library("ggplot2")
gunviolence <- read.csv('~/Documents/gun-violence/gun-violence-data.csv') %>%
mutate(
incident_date = as.Date(incident_date, "%B %d, %Y"),
days_since = difftime(incident_date, as.Date('2014-01-01'), units="days") %>% as.numeric,
months_since = floor((days_since / 365.0) * 12.0) %>% as.integer,
quarters_since = floor((days_since / 365.0) * 4.0 %>% as.integer)
) %>%
select(-operations)
gundeaths_monthly <- gunviolence %>%
mutate(
days_since = difftime(incident_date, as.Date('2014-01-01'), units="days") %>% as.numeric,
months_since = floor((days_since / 365.0) * 12.0) %>% as.integer) %>%
group_by(months_since) %>%
summarize(
n_killed = sum(n_killed),
n_incidents = n())
ggplot(gundeaths_monthly, aes(x=months_since, y=n_killed, label=annotation_text)) +
geom_line(color='red', alpha=0.8) +
labs(y="Number of Individuals Killed", x="Months Since Jan 2014") +
annotate("text", x=29.2, y=99, label="Pulse Nightclub Massacre", face="bold", color="darkblue") +
annotate("text", x=45.2, y=88, label="Las Vegas Shooting", face="bold", color="darkblue")
sarima(gundeaths_monthly$n_killed, 1, 1, 0)
| /gun-deaths-plot.R | no_license | daikonradish/gun-violence | R | false | false | 1,207 | r | library("astsa")
library("dplyr")
library("ggplot2")
gunviolence <- read.csv('~/Documents/gun-violence/gun-violence-data.csv') %>%
mutate(
incident_date = as.Date(incident_date, "%B %d, %Y"),
days_since = difftime(incident_date, as.Date('2014-01-01'), units="days") %>% as.numeric,
months_since = floor((days_since / 365.0) * 12.0) %>% as.integer,
quarters_since = floor((days_since / 365.0) * 4.0 %>% as.integer)
) %>%
select(-operations)
gundeaths_monthly <- gunviolence %>%
mutate(
days_since = difftime(incident_date, as.Date('2014-01-01'), units="days") %>% as.numeric,
months_since = floor((days_since / 365.0) * 12.0) %>% as.integer) %>%
group_by(months_since) %>%
summarize(
n_killed = sum(n_killed),
n_incidents = n())
ggplot(gundeaths_monthly, aes(x=months_since, y=n_killed, label=annotation_text)) +
geom_line(color='red', alpha=0.8) +
labs(y="Number of Individuals Killed", x="Months Since Jan 2014") +
annotate("text", x=29.2, y=99, label="Pulse Nightclub Massacre", face="bold", color="darkblue") +
annotate("text", x=45.2, y=88, label="Las Vegas Shooting", face="bold", color="darkblue")
sarima(gundeaths_monthly$n_killed, 1, 1, 0)
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m_inv <- matrix()
set <- function(y) {
x <<- y
m_inv <<- matrix()
}
get <- function() x
setm_inv <- function(solve_m) m_inv <<- solve_m
getm_inv <- function() m_inv
list(set = set, get = get,
setm_inv = setm_inv,
getm_inv = getm_inv)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m_inv <- x$getm_inv()
if (!is.na(m_inv[1,1])) {
message("getting cached data")
return(m_inv)
}
data <- x$get()
m_inv <- solve(data)
x$setm_inv(m_inv)
m_inv
}
| /cachematrix.R | no_license | datadavidz/ProgrammingAssignment2 | R | false | false | 771 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m_inv <- matrix()
set <- function(y) {
x <<- y
m_inv <<- matrix()
}
get <- function() x
setm_inv <- function(solve_m) m_inv <<- solve_m
getm_inv <- function() m_inv
list(set = set, get = get,
setm_inv = setm_inv,
getm_inv = getm_inv)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m_inv <- x$getm_inv()
if (!is.na(m_inv[1,1])) {
message("getting cached data")
return(m_inv)
}
data <- x$get()
m_inv <- solve(data)
x$setm_inv(m_inv)
m_inv
}
|
#' Run one replicate of the model
#'
#' Run one replicate of the model; a replicate is a water_year/chinook_run combination
#'
#' @md
#' @param water_year_string Water year (1997-2011) as a string
#' @param chinook_run Run timing classification: Fall, LateFall, Winter, Spring
#' @param ocean_year_type Type of ocean survival relationship used for that year: length or intercept
#'
#' @export
#'
#'
## probably would have take a different approach to storing output data
## if I was planning to incorporate Delta rearing from beginning
## approach that I'm using in Yolo section is very prone to typos
run_one_rep <- function(water_year_string, chinook_run,
ocean_year_type = c("length", "intercept")){
knights_dates <- wy_dates[[water_year_string]]
knights_dates_index <- 1:length(knights_dates)
knights_abun <- initial_cohort_abundance(chinook_run, water_year_string)
knights_fl <- initial_cohort_fork_length(chinook_run, water_year_string, knights_dates_index)
knights_ww <- length_weight(knights_fl)
# no travel time, mortality, or growth from Knights Landing to Fremont Weir
entrain_list <- entrainment(water_year_string, knights_dates_index, knights_abun)
# Sacramento route
sac <- data.frame(KnightsDate = knights_dates,
KnightsDateIndex = knights_dates_index,
KnightsAbun = knights_abun,
KnightsFL = knights_fl,
KnightsWW = knights_ww,
stringsAsFactors = FALSE)
sac[["FremontAbun"]] <- entrain_list[["Sac"]]
sac <- sac[sac[["FremontAbun"]] > 0,]
if (nrow(sac) > 0){
# Sacramento River passage
sac_passage_list <- passage(water_year_string,
sac[["KnightsDateIndex"]],
sac[["FremontAbun"]],
sac[["KnightsFL"]],
route = "Sac")
sac[["DeltaDateIndex"]] <- sac[["KnightsDateIndex"]] + sac_passage_list[["PassageTime"]]
sac[["DeltaAbun"]] <- sac_passage_list[["Abundance"]]
# Delta rearing
sac[["DeltaRearingTime"]] <- rearing_time_delta(water_year_string,
sac[["DeltaDateIndex"]],
sac_passage_list[["PassageTime"]],
sac[["KnightsFL"]])
sac[["ChippsAbun"]] <- rearing_survival(water_year_string,
sac[["DeltaDateIndex"]],
sac[["DeltaAbun"]],
sac[["DeltaRearingTime"]],
location = "Delta")
sac[["ChippsFL"]] <- weight_length(rearing_growth(water_year_string,
sac[["DeltaDateIndex"]],
sac[["DeltaRearingTime"]],
"Delta",
sac[["KnightsWW"]]))
# Ocean
sac[["AdultReturns"]] <- ocean_survival(sac[["ChippsAbun"]],
sac[["ChippsFL"]],
ocean_year_type)
}else{
sac = data.frame()
}
# Yolo route
yolo <- data.frame(KnightsDate = knights_dates,
KnightsDateIndex = knights_dates_index,
KnightsAbun = knights_abun,
KnightsFL = knights_fl,
KnightsWW = knights_ww,
stringsAsFactors = FALSE)
yolo[["FremontAbun"]] <- entrain_list[["Yolo"]]
yolo <- yolo[yolo[["FremontAbun"]] > 0,]
if (nrow(yolo) > 0){
# Yolo Bypass rearing
# YoloRearingAbun is proportion of cohorts that "decide" to rear on the Yolo Bypass multipled by abundance
yolo[["YoloRearingAbun"]] <- rearing_abundance(yolo[["FremontAbun"]], yolo[["KnightsFL"]])
yolo[["YoloRearingTime"]] <- rearing_time_yolo(water_year_string, yolo[["KnightsDateIndex"]])
yolo[["PostYoloRearingAbun"]] <- rearing_survival(water_year_string,
yolo[["KnightsDateIndex"]],
yolo[["YoloRearingAbun"]],
yolo[["YoloRearingTime"]],
location = "Yolo")
yolo[["PostYoloRearingFL"]] <- weight_length(rearing_growth(water_year_string,
yolo[["KnightsDateIndex"]],
yolo[["YoloRearingTime"]],
"Yolo",
yolo[["KnightsWW"]]))
# Yolo Bypass passage
# yolo passage for rearing individuals
yolo_passage_rear <- passage(water_year_string,
yolo[["KnightsDateIndex"]] + yolo[["YoloRearingTime"]],
yolo[["PostYoloRearingAbun"]],
yolo[["PostYoloRearingFL"]],
route = "Yolo")
# yolo passage for non-rearing individuals
yolo_passage_no_rear <- passage(water_year_string,
yolo[["KnightsDateIndex"]],
yolo[["FremontAbun"]] - yolo[["YoloRearingAbun"]],
yolo[["KnightsFL"]],
route = "Yolo")
yolo[["DeltaAbun_YoloRear"]] <- yolo_passage_rear[["Abundance"]]
yolo[["DeltaAbun_YoloNoRear"]] <- yolo_passage_no_rear[["Abundance"]]
yolo[["DeltaDateIndex_YoloRear"]] <- yolo[["KnightsDateIndex"]] + yolo[["YoloRearingTime"]] + yolo_passage_rear[["PassageTime"]]
yolo[["DeltaDateIndex_YoloNoRear"]] <- yolo[["KnightsDateIndex"]] + yolo_passage_no_rear[["PassageTime"]]
# Delta rearing
yolo[["DeltaRearingTime_YoloRear"]] <- rearing_time_delta(water_year_string,
yolo[["DeltaDateIndex_YoloRear"]],
yolo_passage_rear[["PassageTime"]],
yolo[["PostYoloRearingFL"]])
yolo[["DeltaRearingTime_YoloNoRear"]] <- rearing_time_delta(water_year_string,
yolo[["DeltaDateIndex_YoloNoRear"]],
yolo_passage_no_rear[["PassageTime"]],
yolo[["KnightsFL"]])
yolo[["ChippsAbun_YoloRear"]] <- rearing_survival(water_year_string,
yolo[["DeltaDateIndex_YoloRear"]],
yolo[["DeltaAbun_YoloRear"]],
yolo[["DeltaRearingTime_YoloRear"]],
location = "Delta")
yolo[["ChippsAbun_YoloNoRear"]] <- rearing_survival(water_year_string,
yolo[["DeltaDateIndex_YoloNoRear"]],
yolo[["DeltaAbun_YoloNoRear"]],
yolo[["DeltaRearingTime_YoloNoRear"]],
location = "Delta")
yolo[["ChippsFL_YoloRear"]] <- weight_length(rearing_growth(water_year_string,
yolo[["DeltaDateIndex_YoloRear"]],
yolo[["DeltaRearingTime_YoloRear"]],
"Delta",
length_weight(yolo[["PostYoloRearingFL"]])))
yolo[["ChippsFL_YoloNoRear"]] <- weight_length(rearing_growth(water_year_string,
yolo[["DeltaDateIndex_YoloNoRear"]],
yolo[["DeltaRearingTime_YoloNoRear"]],
"Delta",
yolo[["KnightsWW"]]))
# Ocean
yolo[["AdultReturns_YoloRear"]] <- ocean_survival(yolo[["ChippsAbun_YoloRear"]],
yolo[["ChippsFL_YoloRear"]],
ocean_year_type)
yolo[["AdultReturns_YoloNoRear"]] <- ocean_survival(yolo[["ChippsAbun_YoloNoRear"]],
yolo[["ChippsFL_YoloNoRear"]],
ocean_year_type)
}else{
yolo = data.frame()
}
return(list("Sac" = sac, "Yolo" = yolo))
}
| /R/run_one_rep.R | no_license | hinkelman/YoloBypassSBM | R | false | false | 9,192 | r | #' Run one replicate of the model
#'
#' Run one replicate of the model; a replicate is a water_year/chinook_run combination
#'
#' @md
#' @param water_year_string Water year (1997-2011) as a string
#' @param chinook_run Run timing classification: Fall, LateFall, Winter, Spring
#' @param ocean_year_type Type of ocean survival relationship used for that year: length or intercept
#'
#' @export
#'
#'
## probably would have take a different approach to storing output data
## if I was planning to incorporate Delta rearing from beginning
## approach that I'm using in Yolo section is very prone to typos
run_one_rep <- function(water_year_string, chinook_run,
ocean_year_type = c("length", "intercept")){
knights_dates <- wy_dates[[water_year_string]]
knights_dates_index <- 1:length(knights_dates)
knights_abun <- initial_cohort_abundance(chinook_run, water_year_string)
knights_fl <- initial_cohort_fork_length(chinook_run, water_year_string, knights_dates_index)
knights_ww <- length_weight(knights_fl)
# no travel time, mortality, or growth from Knights Landing to Fremont Weir
entrain_list <- entrainment(water_year_string, knights_dates_index, knights_abun)
# Sacramento route
sac <- data.frame(KnightsDate = knights_dates,
KnightsDateIndex = knights_dates_index,
KnightsAbun = knights_abun,
KnightsFL = knights_fl,
KnightsWW = knights_ww,
stringsAsFactors = FALSE)
sac[["FremontAbun"]] <- entrain_list[["Sac"]]
sac <- sac[sac[["FremontAbun"]] > 0,]
if (nrow(sac) > 0){
# Sacramento River passage
sac_passage_list <- passage(water_year_string,
sac[["KnightsDateIndex"]],
sac[["FremontAbun"]],
sac[["KnightsFL"]],
route = "Sac")
sac[["DeltaDateIndex"]] <- sac[["KnightsDateIndex"]] + sac_passage_list[["PassageTime"]]
sac[["DeltaAbun"]] <- sac_passage_list[["Abundance"]]
# Delta rearing
sac[["DeltaRearingTime"]] <- rearing_time_delta(water_year_string,
sac[["DeltaDateIndex"]],
sac_passage_list[["PassageTime"]],
sac[["KnightsFL"]])
sac[["ChippsAbun"]] <- rearing_survival(water_year_string,
sac[["DeltaDateIndex"]],
sac[["DeltaAbun"]],
sac[["DeltaRearingTime"]],
location = "Delta")
sac[["ChippsFL"]] <- weight_length(rearing_growth(water_year_string,
sac[["DeltaDateIndex"]],
sac[["DeltaRearingTime"]],
"Delta",
sac[["KnightsWW"]]))
# Ocean
sac[["AdultReturns"]] <- ocean_survival(sac[["ChippsAbun"]],
sac[["ChippsFL"]],
ocean_year_type)
}else{
sac = data.frame()
}
# Yolo route
yolo <- data.frame(KnightsDate = knights_dates,
KnightsDateIndex = knights_dates_index,
KnightsAbun = knights_abun,
KnightsFL = knights_fl,
KnightsWW = knights_ww,
stringsAsFactors = FALSE)
yolo[["FremontAbun"]] <- entrain_list[["Yolo"]]
yolo <- yolo[yolo[["FremontAbun"]] > 0,]
if (nrow(yolo) > 0){
# Yolo Bypass rearing
# YoloRearingAbun is proportion of cohorts that "decide" to rear on the Yolo Bypass multipled by abundance
yolo[["YoloRearingAbun"]] <- rearing_abundance(yolo[["FremontAbun"]], yolo[["KnightsFL"]])
yolo[["YoloRearingTime"]] <- rearing_time_yolo(water_year_string, yolo[["KnightsDateIndex"]])
yolo[["PostYoloRearingAbun"]] <- rearing_survival(water_year_string,
yolo[["KnightsDateIndex"]],
yolo[["YoloRearingAbun"]],
yolo[["YoloRearingTime"]],
location = "Yolo")
yolo[["PostYoloRearingFL"]] <- weight_length(rearing_growth(water_year_string,
yolo[["KnightsDateIndex"]],
yolo[["YoloRearingTime"]],
"Yolo",
yolo[["KnightsWW"]]))
# Yolo Bypass passage
# yolo passage for rearing individuals
yolo_passage_rear <- passage(water_year_string,
yolo[["KnightsDateIndex"]] + yolo[["YoloRearingTime"]],
yolo[["PostYoloRearingAbun"]],
yolo[["PostYoloRearingFL"]],
route = "Yolo")
# yolo passage for non-rearing individuals
yolo_passage_no_rear <- passage(water_year_string,
yolo[["KnightsDateIndex"]],
yolo[["FremontAbun"]] - yolo[["YoloRearingAbun"]],
yolo[["KnightsFL"]],
route = "Yolo")
yolo[["DeltaAbun_YoloRear"]] <- yolo_passage_rear[["Abundance"]]
yolo[["DeltaAbun_YoloNoRear"]] <- yolo_passage_no_rear[["Abundance"]]
yolo[["DeltaDateIndex_YoloRear"]] <- yolo[["KnightsDateIndex"]] + yolo[["YoloRearingTime"]] + yolo_passage_rear[["PassageTime"]]
yolo[["DeltaDateIndex_YoloNoRear"]] <- yolo[["KnightsDateIndex"]] + yolo_passage_no_rear[["PassageTime"]]
# Delta rearing
yolo[["DeltaRearingTime_YoloRear"]] <- rearing_time_delta(water_year_string,
yolo[["DeltaDateIndex_YoloRear"]],
yolo_passage_rear[["PassageTime"]],
yolo[["PostYoloRearingFL"]])
yolo[["DeltaRearingTime_YoloNoRear"]] <- rearing_time_delta(water_year_string,
yolo[["DeltaDateIndex_YoloNoRear"]],
yolo_passage_no_rear[["PassageTime"]],
yolo[["KnightsFL"]])
yolo[["ChippsAbun_YoloRear"]] <- rearing_survival(water_year_string,
yolo[["DeltaDateIndex_YoloRear"]],
yolo[["DeltaAbun_YoloRear"]],
yolo[["DeltaRearingTime_YoloRear"]],
location = "Delta")
yolo[["ChippsAbun_YoloNoRear"]] <- rearing_survival(water_year_string,
yolo[["DeltaDateIndex_YoloNoRear"]],
yolo[["DeltaAbun_YoloNoRear"]],
yolo[["DeltaRearingTime_YoloNoRear"]],
location = "Delta")
yolo[["ChippsFL_YoloRear"]] <- weight_length(rearing_growth(water_year_string,
yolo[["DeltaDateIndex_YoloRear"]],
yolo[["DeltaRearingTime_YoloRear"]],
"Delta",
length_weight(yolo[["PostYoloRearingFL"]])))
yolo[["ChippsFL_YoloNoRear"]] <- weight_length(rearing_growth(water_year_string,
yolo[["DeltaDateIndex_YoloNoRear"]],
yolo[["DeltaRearingTime_YoloNoRear"]],
"Delta",
yolo[["KnightsWW"]]))
# Ocean
yolo[["AdultReturns_YoloRear"]] <- ocean_survival(yolo[["ChippsAbun_YoloRear"]],
yolo[["ChippsFL_YoloRear"]],
ocean_year_type)
yolo[["AdultReturns_YoloNoRear"]] <- ocean_survival(yolo[["ChippsAbun_YoloNoRear"]],
yolo[["ChippsFL_YoloNoRear"]],
ocean_year_type)
}else{
yolo = data.frame()
}
return(list("Sac" = sac, "Yolo" = yolo))
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/homr.R
\name{homr}
\alias{homr}
\title{Historical Observing Metadata Repository (HOMR) station metadata}
\usage{
homr(qid = NULL, qidMod = NULL, station = NULL, state = NULL,
county = NULL, country = NULL, name = NULL, nameMod = NULL,
platform = NULL, date = NULL, begindate = NULL, enddate = NULL,
headersOnly = FALSE, phrData = NULL, combine = FALSE, ...)
}
\arguments{
\item{qid}{One of COOP, FAA, GHCND, ICAO, NCDCSTNID, NWSLI, TRANS, WBAN, or WMO, or any
of those plus \code{[a-z0-9]}, or just \code{[a-z0-9]}. (qid = qualified ID)}
\item{qidMod}{(character) One of: is, starts, ends, contains. Specifies how the ID portion of
the qid parameter should be applied within the search. If a qid is passed but the qidMod
parameter is not used, qidMod is assumed to be IS.}
\item{station}{(character) A station id.}
\item{state}{(character) A two-letter state abbreviation. Two-letter code for US states,
Canadian provinces, and other Island areas.}
\item{county}{(character) A two letter county code. US county names, best used with a state
identifier.}
\item{country}{(character) A two letter country code. See here for a list of valid country
names.}
\item{name}{(character) One of name=[0-9A-Z]+. Searches on any type of name we have for the
station.}
\item{nameMod}{(character) [is|starts|ends|contains]. Specifies how the name parameter should
be applied within the search. If a name is passed but the nameMod parameter is not used,
nameMod is assumed to be IS.}
\item{platform}{(character) (aka network) [ASOS|USCRN|USHCN|NEXRAD|AL USRCRN|USRCRN|COOP].
Limit the search to stations of a certain platform/network type.}
\item{date}{(character) [YYYY-MM-DD|all] Limits values to only those that occurred on a
specific date. Alternatively, date=all will return all values for matched stations. If this
field is omitted, the search will return only the most recent values for each field.}
\item{begindate,enddate}{[YYYY-MM-DD]. Limits values to only those that occurred within a
date range.}
\item{headersOnly}{(logical) Returns only minimal information for each station found (NCDC
Station ID, Preferred Name, Station Begin Date, and Station End Date), but is much quicker than a
full query. If you are performing a search that returns a large number of stations and intend
to choose only one from that list to examine in detail, headersOnly may give you enough
information to find the NCDC Station ID for the station that you actually want.}
\item{phrData}{(logical) The HOMR web service now includes PHR (element-level) data when
available, in an elements section. Because of how this data is structured, it can substantially
increase the size of any result which includes it. If you don't need this data you can omit it
by including phrData=false. If the parameter is not set, it will default to phrData=true.}
\item{combine}{(logical) Combine station metadata or not.}
\item{...}{Curl options passed on to \code{\link[httr]{GET}} (optional)}
}
\value{
A list, with elements named by the station ids.
}
\description{
Historical Observing Metadata Repository (HOMR) station metadata
}
\details{
Since the definitions for variables are always the same, we don't include the ability
to get description data in this function. Use \code{link[rnoaa]{homr_descriptions}} to get
descriptions information.
}
\examples{
\dontrun{
homr(qid = 'COOP:046742')
homr(headersOnly=TRUE, qid='TRANS:')
homr(qid = ':046742')
homr(qidMod='starts', qid='COOP:0467')
homr(headersOnly=TRUE, state='DE')
homr(headersOnly=TRUE, country='GHANA')
homr(headersOnly=TRUE, state='NC', county='BUNCOMBE')
homr(name='CLAYTON')
res <- homr(state='NC', county='BUNCOMBE', combine=TRUE)
res$id
res$head
res$updates
homr(nameMod='starts', name='CLAY')
homr(headersOnly=TRUE, platform='ASOS')
homr(qid='COOP:046742', date='2011-01-01')
homr(qid='COOP:046742', begindate='2005-01-01', enddate='2011-01-01')
homr(state='DE', headersOnly=TRUE)
homr(station=20002078)
homr(station=20002078, date='all', phrData=FALSE)
# Optionally pass in curl options
homr(headersOnly=TRUE, state='NC', county='BUNCOMBE', config=verbose())
}
}
\references{
\url{http://www.ncdc.noaa.gov/homr/api}
}
| /man/homr.Rd | permissive | jmolina9/rnoaa | R | false | false | 4,255 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/homr.R
\name{homr}
\alias{homr}
\title{Historical Observing Metadata Repository (HOMR) station metadata}
\usage{
homr(qid = NULL, qidMod = NULL, station = NULL, state = NULL,
county = NULL, country = NULL, name = NULL, nameMod = NULL,
platform = NULL, date = NULL, begindate = NULL, enddate = NULL,
headersOnly = FALSE, phrData = NULL, combine = FALSE, ...)
}
\arguments{
\item{qid}{One of COOP, FAA, GHCND, ICAO, NCDCSTNID, NWSLI, TRANS, WBAN, or WMO, or any
of those plus \code{[a-z0-9]}, or just \code{[a-z0-9]}. (qid = qualified ID)}
\item{qidMod}{(character) One of: is, starts, ends, contains. Specifies how the ID portion of
the qid parameter should be applied within the search. If a qid is passed but the qidMod
parameter is not used, qidMod is assumed to be IS.}
\item{station}{(character) A station id.}
\item{state}{(character) A two-letter state abbreviation. Two-letter code for US states,
Canadian provinces, and other Island areas.}
\item{county}{(character) A two letter county code. US county names, best used with a state
identifier.}
\item{country}{(character) A two letter country code. See here for a list of valid country
names.}
\item{name}{(character) One of name=[0-9A-Z]+. Searches on any type of name we have for the
station.}
\item{nameMod}{(character) [is|starts|ends|contains]. Specifies how the name parameter should
be applied within the search. If a name is passed but the nameMod parameter is not used,
nameMod is assumed to be IS.}
\item{platform}{(character) (aka network) [ASOS|USCRN|USHCN|NEXRAD|AL USRCRN|USRCRN|COOP].
Limit the search to stations of a certain platform/network type.}
\item{date}{(character) [YYYY-MM-DD|all] Limits values to only those that occurred on a
specific date. Alternatively, date=all will return all values for matched stations. If this
field is omitted, the search will return only the most recent values for each field.}
\item{begindate,enddate}{[YYYY-MM-DD]. Limits values to only those that occurred within a
date range.}
\item{headersOnly}{(logical) Returns only minimal information for each station found (NCDC
Station ID, Preferred Name, Station Begin Date, and Station End Date), but is much quicker than a
full query. If you are performing a search that returns a large number of stations and intend
to choose only one from that list to examine in detail, headersOnly may give you enough
information to find the NCDC Station ID for the station that you actually want.}
\item{phrData}{(logical) The HOMR web service now includes PHR (element-level) data when
available, in an elements section. Because of how this data is structured, it can substantially
increase the size of any result which includes it. If you don't need this data you can omit it
by including phrData=false. If the parameter is not set, it will default to phrData=true.}
\item{combine}{(logical) Combine station metadata or not.}
\item{...}{Curl options passed on to \code{\link[httr]{GET}} (optional)}
}
\value{
A list, with elements named by the station ids.
}
\description{
Historical Observing Metadata Repository (HOMR) station metadata
}
\details{
Since the definitions for variables are always the same, we don't include the ability
to get description data in this function. Use \code{link[rnoaa]{homr_descriptions}} to get
descriptions information.
}
\examples{
\dontrun{
homr(qid = 'COOP:046742')
homr(headersOnly=TRUE, qid='TRANS:')
homr(qid = ':046742')
homr(qidMod='starts', qid='COOP:0467')
homr(headersOnly=TRUE, state='DE')
homr(headersOnly=TRUE, country='GHANA')
homr(headersOnly=TRUE, state='NC', county='BUNCOMBE')
homr(name='CLAYTON')
res <- homr(state='NC', county='BUNCOMBE', combine=TRUE)
res$id
res$head
res$updates
homr(nameMod='starts', name='CLAY')
homr(headersOnly=TRUE, platform='ASOS')
homr(qid='COOP:046742', date='2011-01-01')
homr(qid='COOP:046742', begindate='2005-01-01', enddate='2011-01-01')
homr(state='DE', headersOnly=TRUE)
homr(station=20002078)
homr(station=20002078, date='all', phrData=FALSE)
# Optionally pass in curl options
homr(headersOnly=TRUE, state='NC', county='BUNCOMBE', config=verbose())
}
}
\references{
\url{http://www.ncdc.noaa.gov/homr/api}
}
|
library(geneplotter)
### Name: GetColor
### Title: A function to get the Red-Blue color scheme used by dChip
### Aliases: GetColor dChip.colors greenred.colors
### Keywords: manip
### ** Examples
set.seed(10)
x <- rnorm(10)
GetColor(x)
dChip.colors(10)
| /data/genthat_extracted_code/geneplotter/examples/GetColor.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 268 | r | library(geneplotter)
### Name: GetColor
### Title: A function to get the Red-Blue color scheme used by dChip
### Aliases: GetColor dChip.colors greenred.colors
### Keywords: manip
### ** Examples
set.seed(10)
x <- rnorm(10)
GetColor(x)
dChip.colors(10)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sheets_objects.R
\name{CandlestickDomain}
\alias{CandlestickDomain}
\title{CandlestickDomain Object}
\usage{
CandlestickDomain(data = NULL, reversed = NULL)
}
\arguments{
\item{data}{The data of the CandlestickDomain}
\item{reversed}{True to reverse the order of the domain values (horizontal axis)}
}
\value{
CandlestickDomain object
}
\description{
CandlestickDomain Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The domain of a CandlestickChart.
}
| /man/CandlestickDomain.Rd | no_license | key-Mustang/googleSheetsR | R | false | true | 571 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sheets_objects.R
\name{CandlestickDomain}
\alias{CandlestickDomain}
\title{CandlestickDomain Object}
\usage{
CandlestickDomain(data = NULL, reversed = NULL)
}
\arguments{
\item{data}{The data of the CandlestickDomain}
\item{reversed}{True to reverse the order of the domain values (horizontal axis)}
}
\value{
CandlestickDomain object
}
\description{
CandlestickDomain Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The domain of a CandlestickChart.
}
|
examples.dyn.collapse = function() {
app = eventsApp()
ui = fluidPage(
dynCollapse(id="collapse", labels=c("Adfndskgdsjgkjdkvndcnvkdfhve djjdbidzgfu sjfjnsdfj","B","C"), panel.fun=function(...) {
args = list(...)
restore.point("inner")
cat("I am pressed!!!!")
HTML(paste0("I am pressed...", sample.int(10e9,1)))
})
)
app$ui = ui
runEventsApp(app, launch.browser=rstudio::viewer)
}
dynCollapse = function(id, labels, values=labels, panel.fun, block=TRUE,...) {
restore.point("dynCollapse")
li = lapply(seq_along(values), function(i) {
btnid = paste0(id,"__btn",i)
ui.id = paste0(id,"__ui",i)
btn = bsButton(btnid, label=labels[[i]], block=block, type="toggle")
#btn$attribs$class = "btn btn-default sbs-toggle-button btn-block"
btn$attribs$class = "btn-default sbs-toggle-button btn-block panel-heading panel-title text-left"
#btn = shinyBS:::removeClass(btn,"btn")
#btn = shinyBS:::removeClass(btn,"btn-default")
#btn = shinyBS:::addClass(btn,"panel-heading")
#btn = shinyBS:::addClass(btn,"panel-title")
#btn = shinyBS:::addClass(btn,"btn-default")
#btn = shinyBS:::addClass(btn,"text-left")
#btn = as.character(btn)
changeHandler(btnid, dynCollapse.click, collapseId=id, collapseValue=values[[i]], substitute.fun=FALSE, panel.fun=panel.fun, ui.id=ui.id,...)
list(btn, uiOutput(ui.id))
})
do.call("c",li)
#unlist(li)
}
dynCollapse.click = function(panel.fun,ui.id,value,...) {
args = list(...)
restore.point("dynCollapse.click")
cat("\nToogle value: ", value)
if (value) {
ui = panel.fun(ui.id=ui.id,...)
} else {
ui = HTML("")
}
setUI(ui.id,ui)
}
| /R/dynCollapse.r | no_license | harshinamdar/RTutor | R | false | false | 1,695 | r | examples.dyn.collapse = function() {
app = eventsApp()
ui = fluidPage(
dynCollapse(id="collapse", labels=c("Adfndskgdsjgkjdkvndcnvkdfhve djjdbidzgfu sjfjnsdfj","B","C"), panel.fun=function(...) {
args = list(...)
restore.point("inner")
cat("I am pressed!!!!")
HTML(paste0("I am pressed...", sample.int(10e9,1)))
})
)
app$ui = ui
runEventsApp(app, launch.browser=rstudio::viewer)
}
dynCollapse = function(id, labels, values=labels, panel.fun, block=TRUE,...) {
restore.point("dynCollapse")
li = lapply(seq_along(values), function(i) {
btnid = paste0(id,"__btn",i)
ui.id = paste0(id,"__ui",i)
btn = bsButton(btnid, label=labels[[i]], block=block, type="toggle")
#btn$attribs$class = "btn btn-default sbs-toggle-button btn-block"
btn$attribs$class = "btn-default sbs-toggle-button btn-block panel-heading panel-title text-left"
#btn = shinyBS:::removeClass(btn,"btn")
#btn = shinyBS:::removeClass(btn,"btn-default")
#btn = shinyBS:::addClass(btn,"panel-heading")
#btn = shinyBS:::addClass(btn,"panel-title")
#btn = shinyBS:::addClass(btn,"btn-default")
#btn = shinyBS:::addClass(btn,"text-left")
#btn = as.character(btn)
changeHandler(btnid, dynCollapse.click, collapseId=id, collapseValue=values[[i]], substitute.fun=FALSE, panel.fun=panel.fun, ui.id=ui.id,...)
list(btn, uiOutput(ui.id))
})
do.call("c",li)
#unlist(li)
}
dynCollapse.click = function(panel.fun,ui.id,value,...) {
args = list(...)
restore.point("dynCollapse.click")
cat("\nToogle value: ", value)
if (value) {
ui = panel.fun(ui.id=ui.id,...)
} else {
ui = HTML("")
}
setUI(ui.id,ui)
}
|
### Script was created and tested on Windows XP SP3 and R v. 3.2.0 (i386)
# Setup working directory
# You should to change working directory or comment next line
# If you work on Linus you should change path name
setwd("D:\\Course\\Exploratory Data Analysis\\CourseProject1")
# Test if data directory exists
if ( !file.exists(".\\data") ) {
dir.create(".\\data")
}
# Setup data file name for download
dest_file<-".\\household_power_consumption.zip"
# Setup URL for file download
fileUrl<-"http://d396qusza40orc.cloudfront.net/exdata/data/household_power_consumption.zip"
# Download file form Internet
download.file(fileUrl,dest_file)
# Get path to data file
path<-paste0(".\\data\\",list.files(".\\data"))
# Unzip data file
unzip(dest_file,exdir=".\\data")
# Get path to data file
path<-paste0(".\\data\\",list.files(".\\data"))
# Read data from file to table my_data
my_data<-read.csv(path, sep=";",header=TRUE, colClasses = "character")
# Select on date
my_data_on_date<-subset(my_data, (my_data$Date == "1/2/2007") | (my_data$Date == "2/2/2007"))
# Draw plot1 -- good
hist(as.numeric(my_data_on_date$Global_active_power),col="red", main="Global Active Power",xlab="Global Active Power (kilowatts)")
#axis(side=1, at=c(1,1400,2800), labels=c("Thu", "Fri", "Sat"))
#axis(side=2, at=c(0,2,4,6), labels=c("0","2","4","6"))
# Write plot1 to file plot1.png
dev.copy(png, filename = "plot1.png", width = 480, height = 480)
graphics.off() | /plot1.R | no_license | kda795/ExData_Plotting1 | R | false | false | 1,459 | r | ### Script was created and tested on Windows XP SP3 and R v. 3.2.0 (i386)
# Setup working directory
# You should to change working directory or comment next line
# If you work on Linus you should change path name
setwd("D:\\Course\\Exploratory Data Analysis\\CourseProject1")
# Test if data directory exists
if ( !file.exists(".\\data") ) {
dir.create(".\\data")
}
# Setup data file name for download
dest_file<-".\\household_power_consumption.zip"
# Setup URL for file download
fileUrl<-"http://d396qusza40orc.cloudfront.net/exdata/data/household_power_consumption.zip"
# Download file form Internet
download.file(fileUrl,dest_file)
# Get path to data file
path<-paste0(".\\data\\",list.files(".\\data"))
# Unzip data file
unzip(dest_file,exdir=".\\data")
# Get path to data file
path<-paste0(".\\data\\",list.files(".\\data"))
# Read data from file to table my_data
my_data<-read.csv(path, sep=";",header=TRUE, colClasses = "character")
# Select on date
my_data_on_date<-subset(my_data, (my_data$Date == "1/2/2007") | (my_data$Date == "2/2/2007"))
# Draw plot1 -- good
hist(as.numeric(my_data_on_date$Global_active_power),col="red", main="Global Active Power",xlab="Global Active Power (kilowatts)")
#axis(side=1, at=c(1,1400,2800), labels=c("Thu", "Fri", "Sat"))
#axis(side=2, at=c(0,2,4,6), labels=c("0","2","4","6"))
# Write plot1 to file plot1.png
dev.copy(png, filename = "plot1.png", width = 480, height = 480)
graphics.off() |
# 'expression_xls' is the expression index file (e.g. outputted by dChip); 'sample_info_file' is a tab-delimited text file containing the colums: Array name, sample name, Batch, and any other covariates to be included in the modeling; 'type' currently supports two data file types 'txt' for a tab-delimited text file and 'csv' for an Excel .csv file (sometimes R handles the .csv file better, so use this if you have problems with a .txt file!); 'write' if 'T' ComBat writes adjusted data to a file, and if 'F' and ComBat outputs the adjusted data matrix if 'F' (so assign it to an object! i.e. NewData <- ComBat('my expression.xls','Sample info file.txt', write=F)); 'covariates=all' will use all of the columns in your sample info file in the modeling (except array/sample name), if you only want use a some of the columns in your sample info file, specify these columns here as a vector (you must include the Batch column in this list); 'par.prior' if 'T' uses the parametric adjustments, if 'F' uses the nonparametric adjustments--if you are unsure what to use, try the parametric adjustments (they run faster) and check the plots to see if these priors are reasonable; 'filter=value' filters the genes with absent calls in > 1-value of the samples. The defaut here (as well as in dchip) is .8. Filter if you can as the EB adjustments work better after filtering. Filter must be numeric if your expression index file contains presence/absence calls (but you can set it >1 if you don't want to filter any genes) and must be 'F' if your data doesn't have presence/absence calls; 'skip' is the number of columns that contain probe names and gene information, so 'skip=5' implies the first expression values are in column 6; 'prior.plots' if true will give prior plots with black as a kernal estimate of the empirical batch effect density and red as the parametric estimate.
ComBat <- function(expression_xls, sample_info_file, type='txt', write=T, covariates='all', par.prior=T, filter=F, skip=0, prior.plots=T){
#debug: expression_xls='exp.txt'; sample_info_file='sam.txt'; type='txt'; write=T; covariates='all'; par.prior=T; filter=F; skip=0; prior.plots=T
cat('Reading Sample Information File\n')
saminfo <- read.table(sample_info_file, header=T, sep='\t',comment.char='')
if(sum(colnames(saminfo)=="Batch")!=1){return('ERROR: Sample Information File does not have a Batch column!')}
cat('Reading Expression Data File\n')
if(type=='csv'){
dat <- read.csv(expression_xls,header=T,as.is=T)
#print(dat[1:2,])
# dat <- dat[,trim.dat(dat)]
#print(colnames(dat))
colnames(dat)=scan(expression_xls,what='character',nlines=1,sep=',',quiet=T)[1:ncol(dat)]
#print(colnames(dat))
}
else{
dat <- read.table(expression_xls,header=T,comment.char='',fill=T,sep='\t', as.is=T)
dat <- dat[,trim.dat(dat)]
colnames(dat)=scan(expression_xls,what='character',nlines=1,sep='\t',quiet=T)[1:ncol(dat)]
}
if (skip>0){
geneinfo <- as.matrix(dat[,1:skip])
dat <- dat[,-c(1:skip)]}
else{geneinfo=NULL}
#print(geneinfo[1:4])
#print(dat[1:2,])
if(filter){
ngenes <- nrow(dat)
col <- ncol(dat)/2
present <- apply(dat, 1, filter.absent, filter)
dat <- dat[present, -(2*(1:col))]
if (skip>0){geneinfo <- geneinfo[present,]}
cat('Filtered genes absent in more than',filter,'of samples. Genes remaining:',nrow(dat),'; Genes filtered:',ngenes-nrow(dat),'\n')
}
if(any(apply(dat,2,mode)!='numeric')){return('ERROR: Array expression columns contain non-numeric values! (Check your .xls file for non-numeric values and if this is not the problem, make a .csv file and use the type=csv option)')}
tmp <- match(colnames(dat),saminfo[,1])
if(any(is.na(tmp))){return('ERROR: Sample Information File and Data Array Names are not the same!')}
#tmp1 <- match(saminfo[,1],colnames(dat))
#saminfo <- saminfo[tmp1[!is.na(tmp1)],]
saminfo <- saminfo[tmp,] ## Bug fixed 01/04/2011
if(any(covariates != 'all')){saminfo <- saminfo[,c(1:2,covariates)]}
design <- design.mat(saminfo)
batches <- list.batch(saminfo)
n.batch <- length(batches)
n.batches <- sapply(batches, length)
n.array <- sum(n.batches)
## Check for missing values
NAs = any(is.na(dat))
if(NAs){cat(c('Found',sum(is.na(dat)),'Missing Data Values\n'),sep=' ')}
#print(dat[1:2,])
##Standardize Data across genes
cat('Standardizing Data across genes\n')
if (!NAs){B.hat <- solve(t(design)%*%design)%*%t(design)%*%t(as.matrix(dat))}else{B.hat=apply(dat,1,Beta.NA,design)} #Standarization Model
grand.mean <- t(n.batches/n.array)%*%B.hat[1:n.batch,]
if (!NAs){var.pooled <- ((dat-t(design%*%B.hat))^2)%*%rep(1/n.array,n.array)}else{var.pooled <- apply(dat-t(design%*%B.hat),1,var,na.rm=T)}
stand.mean <- t(grand.mean)%*%t(rep(1,n.array))
if(!is.null(design)){tmp <- design;tmp[,c(1:n.batch)] <- 0;stand.mean <- stand.mean+t(tmp%*%B.hat)}
s.data <- (dat-stand.mean)/(sqrt(var.pooled)%*%t(rep(1,n.array)))
##Get regression batch effect parameters
cat("Fitting L/S model and finding priors\n")
batch.design <- design[,1:n.batch]
if (!NAs){gamma.hat <- solve(t(batch.design)%*%batch.design)%*%t(batch.design)%*%t(as.matrix(s.data))}else{gamma.hat=apply(s.data,1,Beta.NA,batch.design)}
delta.hat <- NULL
for (i in batches){
delta.hat <- rbind(delta.hat,apply(s.data[,i], 1, var,na.rm=T))
}
##Find Priors
gamma.bar <- apply(gamma.hat, 1, mean)
t2 <- apply(gamma.hat, 1, var)
a.prior <- apply(delta.hat, 1, aprior)
b.prior <- apply(delta.hat, 1, bprior)
##Plot empirical and parametric priors
if (prior.plots & par.prior){
par(mfrow=c(2,2))
tmp <- density(gamma.hat[1,])
plot(tmp, type='l', main="Density Plot")
xx <- seq(min(tmp$x), max(tmp$x), length=100)
lines(xx,dnorm(xx,gamma.bar[1],sqrt(t2[1])), col=2)
qqnorm(gamma.hat[1,])
qqline(gamma.hat[1,], col=2)
tmp <- density(delta.hat[1,])
invgam <- 1/rgamma(ncol(delta.hat),a.prior[1],b.prior[1])
tmp1 <- density(invgam)
plot(tmp, typ='l', main="Density Plot", ylim=c(0,max(tmp$y,tmp1$y)))
lines(tmp1, col=2)
qqplot(delta.hat[1,], invgam, xlab="Sample Quantiles", ylab='Theoretical Quantiles')
lines(c(0,max(invgam)),c(0,max(invgam)),col=2)
title('Q-Q Plot')
}
##Find EB batch adjustments
gamma.star <- delta.star <- NULL
if(par.prior){
cat("Finding parametric adjustments\n")
for (i in 1:n.batch){
temp <- it.sol(s.data[,batches[[i]]],gamma.hat[i,],delta.hat[i,],gamma.bar[i],t2[i],a.prior[i],b.prior[i])
gamma.star <- rbind(gamma.star,temp[1,])
delta.star <- rbind(delta.star,temp[2,])
}
}else{
cat("Finding nonparametric adjustments\n")
for (i in 1:n.batch){
temp <- int.eprior(as.matrix(s.data[,batches[[i]]]),gamma.hat[i,],delta.hat[i,])
gamma.star <- rbind(gamma.star,temp[1,])
delta.star <- rbind(delta.star,temp[2,])
}
}
### Normalize the Data ###
cat("Adjusting the Data\n")
bayesdata <- s.data
j <- 1
for (i in batches){
bayesdata[,i] <- (bayesdata[,i]-t(batch.design[i,]%*%gamma.star))/(sqrt(delta.star[j,])%*%t(rep(1,n.batches[j])))
j <- j+1
}
bayesdata <- (bayesdata*(sqrt(var.pooled)%*%t(rep(1,n.array))))+stand.mean
if(write){
output_file <- paste('Adjusted',expression_xls,'.xls',sep='_')
#print(geneinfo[1:2])
#print(bayesdata[1:2,1:4])
#cat(c(colnames(geneinfo),colnames(dat),'\n'),file=output_file,sep='\t')
#suppressWarnings(write.table(cbind(geneinfo,formatC(as.matrix(bayesdata), format = "f")), file=output_file, sep="\t", quote=F,row.names=F,col.names=F,append=T))
outdata <- cbind(ProbeID=geneinfo, bayesdata); write.table(outdata, file=output_file, sep="\t")
cat("Adjusted data saved in file:",output_file,"\n")
}else{return(cbind(geneinfo,bayesdata))}
}
# filters data based on presence/absence call
filter.absent <- function(x,pct){
present <- T
col <- length(x)/2
pct.absent <- (sum(x[2*(1:col)]=="A") + sum(x[2*(1:col)]=="M"))/col
if(pct.absent > pct){present <- F}
present
}
# Next two functions make the design matrix (X) from the sample info file
build.design <- function(vec, des=NULL, start=2){
tmp <- matrix(0,length(vec),nlevels(vec)-start+1)
for (i in 1:ncol(tmp)){tmp[,i] <- vec==levels(vec)[i+start-1]}
cbind(des,tmp)
}
design.mat <- function(saminfo){
tmp <- which(colnames(saminfo) == 'Batch')
tmp1 <- as.factor(saminfo[,tmp])
cat("Found",nlevels(tmp1),'batches\n')
design <- build.design(tmp1,start=1)
ncov <- ncol(as.matrix(saminfo[,-c(1:2,tmp)]))
cat("Found",ncov,'covariate(s)\n')
if(ncov>0){
for (j in 1:ncov){
tmp1 <- as.factor(as.matrix(saminfo[,-c(1:2,tmp)])[,j])
design <- build.design(tmp1,des=design)
}
}
design
}
# Makes a list with elements pointing to which array belongs to which batch
list.batch <- function(saminfo){
tmp1 <- as.factor(saminfo[,which(colnames(saminfo) == 'Batch')])
batches <- NULL
for (i in 1:nlevels(tmp1)){batches <- append(batches, list((1:length(tmp1))[tmp1==levels(tmp1)[i]]))}
batches
}
# Trims the data of extra columns, note your array names cannot be named 'X' or start with 'X.'
trim.dat <- function(dat){
tmp <- strsplit(colnames(dat),'\\.')
tr <- NULL
for (i in 1:length(tmp)){tr <- c(tr,tmp[[i]][1]!='X')}
tr
}
# Following four find empirical hyper-prior values
aprior <- function(gamma.hat){m=mean(gamma.hat); s2=var(gamma.hat); (2*s2+m^2)/s2}
bprior <- function(gamma.hat){m=mean(gamma.hat); s2=var(gamma.hat); (m*s2+m^3)/s2}
postmean <- function(g.hat,g.bar,n,d.star,t2){(t2*n*g.hat+d.star*g.bar)/(t2*n+d.star)}
postvar <- function(sum2,n,a,b){(.5*sum2+b)/(n/2+a-1)}
# Pass in entire data set, the design matrix for the entire data, the batch means, the batch variances, priors (m, t2, a, b), columns of the data matrix for the batch. Uses the EM to find the parametric batch adjustments
it.sol <- function(sdat,g.hat,d.hat,g.bar,t2,a,b,conv=.0001){
n <- apply(!is.na(sdat),1,sum)
g.old <- g.hat
d.old <- d.hat
change <- 1
count <- 0
while(change>conv){
g.new <- postmean(g.hat,g.bar,n,d.old,t2)
sum2 <- apply((sdat-g.new%*%t(rep(1,ncol(sdat))))^2, 1, sum,na.rm=T)
d.new <- postvar(sum2,n,a,b)
change <- max(abs(g.new-g.old)/g.old,abs(d.new-d.old)/d.old)
g.old <- g.new
d.old <- d.new
count <- count+1
}
#cat("This batch took", count, "iterations until convergence\n")
adjust <- rbind(g.new, d.new)
rownames(adjust) <- c("g.star","d.star")
adjust
}
#likelihood function used below
L <- function(x,g.hat,d.hat){prod(dnorm(x,g.hat,sqrt(d.hat)))}
# Monte Carlo integration function to find the nonparametric adjustments
int.eprior <- function(sdat,g.hat,d.hat){
g.star <- d.star <- NULL
r <- nrow(sdat)
for(i in 1:r){
g <- g.hat[-i]
d <- d.hat[-i]
x <- sdat[i,!is.na(sdat[i,])]
n <- length(x)
j <- numeric(n)+1
dat <- matrix(as.numeric(x),length(g),n,byrow=T)
resid2 <- (dat-g)^2
sum2 <- resid2%*%j
LH <- 1/(2*pi*d)^(n/2)*exp(-sum2/(2*d))
LH[LH=="NaN"]=0
g.star <- c(g.star,sum(g*LH)/sum(LH))
d.star <- c(d.star,sum(d*LH)/sum(LH))
#if(i%%1000==0){cat(i,'\n')}
}
adjust <- rbind(g.star,d.star)
rownames(adjust) <- c("g.star","d.star")
adjust
}
#fits the L/S model in the presence of missing data values
Beta.NA = function(y,X){
des=X[!is.na(y),]
y1=y[!is.na(y)]
B <- solve(t(des)%*%des)%*%t(des)%*%y1
B
}
| /ComBat.R | no_license | MosheSilverstein/WORK | R | false | false | 11,339 | r | # 'expression_xls' is the expression index file (e.g. outputted by dChip); 'sample_info_file' is a tab-delimited text file containing the colums: Array name, sample name, Batch, and any other covariates to be included in the modeling; 'type' currently supports two data file types 'txt' for a tab-delimited text file and 'csv' for an Excel .csv file (sometimes R handles the .csv file better, so use this if you have problems with a .txt file!); 'write' if 'T' ComBat writes adjusted data to a file, and if 'F' and ComBat outputs the adjusted data matrix if 'F' (so assign it to an object! i.e. NewData <- ComBat('my expression.xls','Sample info file.txt', write=F)); 'covariates=all' will use all of the columns in your sample info file in the modeling (except array/sample name), if you only want use a some of the columns in your sample info file, specify these columns here as a vector (you must include the Batch column in this list); 'par.prior' if 'T' uses the parametric adjustments, if 'F' uses the nonparametric adjustments--if you are unsure what to use, try the parametric adjustments (they run faster) and check the plots to see if these priors are reasonable; 'filter=value' filters the genes with absent calls in > 1-value of the samples. The defaut here (as well as in dchip) is .8. Filter if you can as the EB adjustments work better after filtering. Filter must be numeric if your expression index file contains presence/absence calls (but you can set it >1 if you don't want to filter any genes) and must be 'F' if your data doesn't have presence/absence calls; 'skip' is the number of columns that contain probe names and gene information, so 'skip=5' implies the first expression values are in column 6; 'prior.plots' if true will give prior plots with black as a kernal estimate of the empirical batch effect density and red as the parametric estimate.
ComBat <- function(expression_xls, sample_info_file, type='txt', write=T, covariates='all', par.prior=T, filter=F, skip=0, prior.plots=T){
#debug: expression_xls='exp.txt'; sample_info_file='sam.txt'; type='txt'; write=T; covariates='all'; par.prior=T; filter=F; skip=0; prior.plots=T
cat('Reading Sample Information File\n')
saminfo <- read.table(sample_info_file, header=T, sep='\t',comment.char='')
if(sum(colnames(saminfo)=="Batch")!=1){return('ERROR: Sample Information File does not have a Batch column!')}
cat('Reading Expression Data File\n')
if(type=='csv'){
dat <- read.csv(expression_xls,header=T,as.is=T)
#print(dat[1:2,])
# dat <- dat[,trim.dat(dat)]
#print(colnames(dat))
colnames(dat)=scan(expression_xls,what='character',nlines=1,sep=',',quiet=T)[1:ncol(dat)]
#print(colnames(dat))
}
else{
dat <- read.table(expression_xls,header=T,comment.char='',fill=T,sep='\t', as.is=T)
dat <- dat[,trim.dat(dat)]
colnames(dat)=scan(expression_xls,what='character',nlines=1,sep='\t',quiet=T)[1:ncol(dat)]
}
if (skip>0){
geneinfo <- as.matrix(dat[,1:skip])
dat <- dat[,-c(1:skip)]}
else{geneinfo=NULL}
#print(geneinfo[1:4])
#print(dat[1:2,])
if(filter){
ngenes <- nrow(dat)
col <- ncol(dat)/2
present <- apply(dat, 1, filter.absent, filter)
dat <- dat[present, -(2*(1:col))]
if (skip>0){geneinfo <- geneinfo[present,]}
cat('Filtered genes absent in more than',filter,'of samples. Genes remaining:',nrow(dat),'; Genes filtered:',ngenes-nrow(dat),'\n')
}
if(any(apply(dat,2,mode)!='numeric')){return('ERROR: Array expression columns contain non-numeric values! (Check your .xls file for non-numeric values and if this is not the problem, make a .csv file and use the type=csv option)')}
tmp <- match(colnames(dat),saminfo[,1])
if(any(is.na(tmp))){return('ERROR: Sample Information File and Data Array Names are not the same!')}
#tmp1 <- match(saminfo[,1],colnames(dat))
#saminfo <- saminfo[tmp1[!is.na(tmp1)],]
saminfo <- saminfo[tmp,] ## Bug fixed 01/04/2011
if(any(covariates != 'all')){saminfo <- saminfo[,c(1:2,covariates)]}
design <- design.mat(saminfo)
batches <- list.batch(saminfo)
n.batch <- length(batches)
n.batches <- sapply(batches, length)
n.array <- sum(n.batches)
## Check for missing values
NAs = any(is.na(dat))
if(NAs){cat(c('Found',sum(is.na(dat)),'Missing Data Values\n'),sep=' ')}
#print(dat[1:2,])
##Standardize Data across genes
cat('Standardizing Data across genes\n')
if (!NAs){B.hat <- solve(t(design)%*%design)%*%t(design)%*%t(as.matrix(dat))}else{B.hat=apply(dat,1,Beta.NA,design)} #Standarization Model
grand.mean <- t(n.batches/n.array)%*%B.hat[1:n.batch,]
if (!NAs){var.pooled <- ((dat-t(design%*%B.hat))^2)%*%rep(1/n.array,n.array)}else{var.pooled <- apply(dat-t(design%*%B.hat),1,var,na.rm=T)}
stand.mean <- t(grand.mean)%*%t(rep(1,n.array))
if(!is.null(design)){tmp <- design;tmp[,c(1:n.batch)] <- 0;stand.mean <- stand.mean+t(tmp%*%B.hat)}
s.data <- (dat-stand.mean)/(sqrt(var.pooled)%*%t(rep(1,n.array)))
##Get regression batch effect parameters
cat("Fitting L/S model and finding priors\n")
batch.design <- design[,1:n.batch]
if (!NAs){gamma.hat <- solve(t(batch.design)%*%batch.design)%*%t(batch.design)%*%t(as.matrix(s.data))}else{gamma.hat=apply(s.data,1,Beta.NA,batch.design)}
delta.hat <- NULL
for (i in batches){
delta.hat <- rbind(delta.hat,apply(s.data[,i], 1, var,na.rm=T))
}
##Find Priors
gamma.bar <- apply(gamma.hat, 1, mean)
t2 <- apply(gamma.hat, 1, var)
a.prior <- apply(delta.hat, 1, aprior)
b.prior <- apply(delta.hat, 1, bprior)
##Plot empirical and parametric priors
if (prior.plots & par.prior){
par(mfrow=c(2,2))
tmp <- density(gamma.hat[1,])
plot(tmp, type='l', main="Density Plot")
xx <- seq(min(tmp$x), max(tmp$x), length=100)
lines(xx,dnorm(xx,gamma.bar[1],sqrt(t2[1])), col=2)
qqnorm(gamma.hat[1,])
qqline(gamma.hat[1,], col=2)
tmp <- density(delta.hat[1,])
invgam <- 1/rgamma(ncol(delta.hat),a.prior[1],b.prior[1])
tmp1 <- density(invgam)
plot(tmp, typ='l', main="Density Plot", ylim=c(0,max(tmp$y,tmp1$y)))
lines(tmp1, col=2)
qqplot(delta.hat[1,], invgam, xlab="Sample Quantiles", ylab='Theoretical Quantiles')
lines(c(0,max(invgam)),c(0,max(invgam)),col=2)
title('Q-Q Plot')
}
##Find EB batch adjustments
gamma.star <- delta.star <- NULL
if(par.prior){
cat("Finding parametric adjustments\n")
for (i in 1:n.batch){
temp <- it.sol(s.data[,batches[[i]]],gamma.hat[i,],delta.hat[i,],gamma.bar[i],t2[i],a.prior[i],b.prior[i])
gamma.star <- rbind(gamma.star,temp[1,])
delta.star <- rbind(delta.star,temp[2,])
}
}else{
cat("Finding nonparametric adjustments\n")
for (i in 1:n.batch){
temp <- int.eprior(as.matrix(s.data[,batches[[i]]]),gamma.hat[i,],delta.hat[i,])
gamma.star <- rbind(gamma.star,temp[1,])
delta.star <- rbind(delta.star,temp[2,])
}
}
### Normalize the Data ###
cat("Adjusting the Data\n")
bayesdata <- s.data
j <- 1
for (i in batches){
bayesdata[,i] <- (bayesdata[,i]-t(batch.design[i,]%*%gamma.star))/(sqrt(delta.star[j,])%*%t(rep(1,n.batches[j])))
j <- j+1
}
bayesdata <- (bayesdata*(sqrt(var.pooled)%*%t(rep(1,n.array))))+stand.mean
if(write){
output_file <- paste('Adjusted',expression_xls,'.xls',sep='_')
#print(geneinfo[1:2])
#print(bayesdata[1:2,1:4])
#cat(c(colnames(geneinfo),colnames(dat),'\n'),file=output_file,sep='\t')
#suppressWarnings(write.table(cbind(geneinfo,formatC(as.matrix(bayesdata), format = "f")), file=output_file, sep="\t", quote=F,row.names=F,col.names=F,append=T))
outdata <- cbind(ProbeID=geneinfo, bayesdata); write.table(outdata, file=output_file, sep="\t")
cat("Adjusted data saved in file:",output_file,"\n")
}else{return(cbind(geneinfo,bayesdata))}
}
# filters data based on presence/absence call
filter.absent <- function(x,pct){
present <- T
col <- length(x)/2
pct.absent <- (sum(x[2*(1:col)]=="A") + sum(x[2*(1:col)]=="M"))/col
if(pct.absent > pct){present <- F}
present
}
# Next two functions make the design matrix (X) from the sample info file
build.design <- function(vec, des=NULL, start=2){
tmp <- matrix(0,length(vec),nlevels(vec)-start+1)
for (i in 1:ncol(tmp)){tmp[,i] <- vec==levels(vec)[i+start-1]}
cbind(des,tmp)
}
design.mat <- function(saminfo){
tmp <- which(colnames(saminfo) == 'Batch')
tmp1 <- as.factor(saminfo[,tmp])
cat("Found",nlevels(tmp1),'batches\n')
design <- build.design(tmp1,start=1)
ncov <- ncol(as.matrix(saminfo[,-c(1:2,tmp)]))
cat("Found",ncov,'covariate(s)\n')
if(ncov>0){
for (j in 1:ncov){
tmp1 <- as.factor(as.matrix(saminfo[,-c(1:2,tmp)])[,j])
design <- build.design(tmp1,des=design)
}
}
design
}
# Makes a list with elements pointing to which array belongs to which batch
list.batch <- function(saminfo){
tmp1 <- as.factor(saminfo[,which(colnames(saminfo) == 'Batch')])
batches <- NULL
for (i in 1:nlevels(tmp1)){batches <- append(batches, list((1:length(tmp1))[tmp1==levels(tmp1)[i]]))}
batches
}
# Trims the data of extra columns, note your array names cannot be named 'X' or start with 'X.'
trim.dat <- function(dat){
tmp <- strsplit(colnames(dat),'\\.')
tr <- NULL
for (i in 1:length(tmp)){tr <- c(tr,tmp[[i]][1]!='X')}
tr
}
# Following four find empirical hyper-prior values
aprior <- function(gamma.hat){m=mean(gamma.hat); s2=var(gamma.hat); (2*s2+m^2)/s2}
bprior <- function(gamma.hat){m=mean(gamma.hat); s2=var(gamma.hat); (m*s2+m^3)/s2}
postmean <- function(g.hat,g.bar,n,d.star,t2){(t2*n*g.hat+d.star*g.bar)/(t2*n+d.star)}
postvar <- function(sum2,n,a,b){(.5*sum2+b)/(n/2+a-1)}
# Pass in entire data set, the design matrix for the entire data, the batch means, the batch variances, priors (m, t2, a, b), columns of the data matrix for the batch. Uses the EM to find the parametric batch adjustments
it.sol <- function(sdat,g.hat,d.hat,g.bar,t2,a,b,conv=.0001){
n <- apply(!is.na(sdat),1,sum)
g.old <- g.hat
d.old <- d.hat
change <- 1
count <- 0
while(change>conv){
g.new <- postmean(g.hat,g.bar,n,d.old,t2)
sum2 <- apply((sdat-g.new%*%t(rep(1,ncol(sdat))))^2, 1, sum,na.rm=T)
d.new <- postvar(sum2,n,a,b)
change <- max(abs(g.new-g.old)/g.old,abs(d.new-d.old)/d.old)
g.old <- g.new
d.old <- d.new
count <- count+1
}
#cat("This batch took", count, "iterations until convergence\n")
adjust <- rbind(g.new, d.new)
rownames(adjust) <- c("g.star","d.star")
adjust
}
#likelihood function used below
L <- function(x,g.hat,d.hat){prod(dnorm(x,g.hat,sqrt(d.hat)))}
# Monte Carlo integration function to find the nonparametric adjustments
int.eprior <- function(sdat,g.hat,d.hat){
g.star <- d.star <- NULL
r <- nrow(sdat)
for(i in 1:r){
g <- g.hat[-i]
d <- d.hat[-i]
x <- sdat[i,!is.na(sdat[i,])]
n <- length(x)
j <- numeric(n)+1
dat <- matrix(as.numeric(x),length(g),n,byrow=T)
resid2 <- (dat-g)^2
sum2 <- resid2%*%j
LH <- 1/(2*pi*d)^(n/2)*exp(-sum2/(2*d))
LH[LH=="NaN"]=0
g.star <- c(g.star,sum(g*LH)/sum(LH))
d.star <- c(d.star,sum(d*LH)/sum(LH))
#if(i%%1000==0){cat(i,'\n')}
}
adjust <- rbind(g.star,d.star)
rownames(adjust) <- c("g.star","d.star")
adjust
}
#fits the L/S model in the presence of missing data values
Beta.NA = function(y,X){
des=X[!is.na(y),]
y1=y[!is.na(y)]
B <- solve(t(des)%*%des)%*%t(des)%*%y1
B
}
|
casgem_zip_url <- "https://d3.water.ca.gov/owncloud/index.php/s/YPFGd8p47s6jhu6/download"
files_in_zip <- list(
"groundwater_data" = "gwl_file.csv",
"station_metadata" = "gst_file.csv"
)
#' @param d a data file to obtain from zip. Available options listed by names(files_in_zip)
get_casgem_data <- function(d=NULL) {
if (is.null(d)) {
message("You are attempting to extract all files in this archive, an action that could take a while")
pr <- readline(prompt = "Proceed? [y/N]")
if (tolower(pr) != "y")
stop("action stopped")
else {
download.file(casgem_zip_url, "temp.zip")
on.exit(file.remove("temp.zip"))
data <- unzip(zipfile = "temp.zip")
invisible(data)
}
}
name_to_extract <- files_in_zip[[d]]
if (is.null(name_to_extract))
stop("data is not a valid choice for this zip file")
download.file(casgem_zip_url, "temp.zip")
on.exit(file.remove("temp.zip"))
data <- unzip(zipfile = "temp.zip", files = files_in_zip[[d]])
invisible(data)
}
| /R/retrieve-data.R | no_license | FlowWest/casgem-well-data | R | false | false | 1,045 | r | casgem_zip_url <- "https://d3.water.ca.gov/owncloud/index.php/s/YPFGd8p47s6jhu6/download"
files_in_zip <- list(
"groundwater_data" = "gwl_file.csv",
"station_metadata" = "gst_file.csv"
)
#' @param d a data file to obtain from zip. Available options listed by names(files_in_zip)
get_casgem_data <- function(d=NULL) {
if (is.null(d)) {
message("You are attempting to extract all files in this archive, an action that could take a while")
pr <- readline(prompt = "Proceed? [y/N]")
if (tolower(pr) != "y")
stop("action stopped")
else {
download.file(casgem_zip_url, "temp.zip")
on.exit(file.remove("temp.zip"))
data <- unzip(zipfile = "temp.zip")
invisible(data)
}
}
name_to_extract <- files_in_zip[[d]]
if (is.null(name_to_extract))
stop("data is not a valid choice for this zip file")
download.file(casgem_zip_url, "temp.zip")
on.exit(file.remove("temp.zip"))
data <- unzip(zipfile = "temp.zip", files = files_in_zip[[d]])
invisible(data)
}
|
library(FSAdata)
### Name: HumpbackWFCR
### Title: Capture histories (2 sample) of Humpback Whitefish.
### Aliases: HumpbackWFCR
### Keywords: datasets
### ** Examples
data(HumpbackWFCR)
str(HumpbackWFCR)
head(HumpbackWFCR)
| /data/genthat_extracted_code/FSAdata/examples/HumpbackWFCR.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 232 | r | library(FSAdata)
### Name: HumpbackWFCR
### Title: Capture histories (2 sample) of Humpback Whitefish.
### Aliases: HumpbackWFCR
### Keywords: datasets
### ** Examples
data(HumpbackWFCR)
str(HumpbackWFCR)
head(HumpbackWFCR)
|
library(gtrend)
library(GTrendsR)
library(dplyr)
library(ggplot2)
library(scales)
library(forecast)
library(rucm)
library(yaml)
#set working directory
setwd("C:/Users/jwagg/Documents/R Projects/Gtrends/")
#load file w/ username and password
config <- yaml.load_file('config.txt')
#list of terms to pull trends for
terms <- c("amazon")
#call to trends scraper to pull trends
#out <- gtrend_scraper("email@gmail.com", "password", terms)
out <- gtrend_scraper(config$uname, config$pwrd, terms)
#extract the trend from the returned data
que <- extract_trend(out)
#subset trends into data frame
x <- data.frame(date = que[[1]][1:618,2], amazon = que[[1]][1:618,3])
#put into time series representing weekly data
#x2 <- ts(x$comcast, freq=365.25/7, start=2004+10/365.25)
#plot data for visual
plot(x$amazon, type="l")
#model using UCUM
modelTest <- ucm(amazon~0, data = x, irregular = TRUE, level = TRUE, slope = FALSE, season = TRUE, season.length = 52)
#plot data and smoothed level values
plot(x$amazon, ylab = "demand", type = "l")
lines(modelTest$s.level, col = "red")
modelTest$model #Print the model
#predict next N periods
x3 <- predict(modelTest$model, n.ahead = 52)
#convert original and forecast to go into data frame
orig <- as.matrix(x$comcast)
orig2 <- data.frame(period = "orig", demand = orig)
fcast <- as.matrix(x3)
fcast2 <- data.frame(period = "fcast", demand = fcast)
#bind together
x5 <- rbind(orig2,fcast2)
x5$ind <- as.numeric(rownames(x5))
#subset to recent data
x6 <- subset(x5, ind >= 500)
#plot for the visual
qplot(
x = ind,
y = demand,
data = x6,
color = x6$period,
geom = "line"
)
| /googleTrendsForecast.R | no_license | justinwagg/Gtrends | R | false | false | 1,656 | r | library(gtrend)
library(GTrendsR)
library(dplyr)
library(ggplot2)
library(scales)
library(forecast)
library(rucm)
library(yaml)
#set working directory
setwd("C:/Users/jwagg/Documents/R Projects/Gtrends/")
#load file w/ username and password
config <- yaml.load_file('config.txt')
#list of terms to pull trends for
terms <- c("amazon")
#call to trends scraper to pull trends
#out <- gtrend_scraper("email@gmail.com", "password", terms)
out <- gtrend_scraper(config$uname, config$pwrd, terms)
#extract the trend from the returned data
que <- extract_trend(out)
#subset trends into data frame
x <- data.frame(date = que[[1]][1:618,2], amazon = que[[1]][1:618,3])
#put into time series representing weekly data
#x2 <- ts(x$comcast, freq=365.25/7, start=2004+10/365.25)
#plot data for visual
plot(x$amazon, type="l")
#model using UCUM
modelTest <- ucm(amazon~0, data = x, irregular = TRUE, level = TRUE, slope = FALSE, season = TRUE, season.length = 52)
#plot data and smoothed level values
plot(x$amazon, ylab = "demand", type = "l")
lines(modelTest$s.level, col = "red")
modelTest$model #Print the model
#predict next N periods
x3 <- predict(modelTest$model, n.ahead = 52)
#convert original and forecast to go into data frame
orig <- as.matrix(x$comcast)
orig2 <- data.frame(period = "orig", demand = orig)
fcast <- as.matrix(x3)
fcast2 <- data.frame(period = "fcast", demand = fcast)
#bind together
x5 <- rbind(orig2,fcast2)
x5$ind <- as.numeric(rownames(x5))
#subset to recent data
x6 <- subset(x5, ind >= 500)
#plot for the visual
qplot(
x = ind,
y = demand,
data = x6,
color = x6$period,
geom = "line"
)
|
###############################################################
###############################################################
###############################################################
###############################################################
###############################################################
###############################################################
###############################################################
# Function to compute power for dyadic IL studies as a function of the number of participants
Summary.model.Dyad.IL = function(Model,N.dyad,N0.dyad,N1.dyad,T.obs,
c.F,c.M,a.FF,p.MF,a.FF2,p.MF2,a.MM,p.FM,a.MM2,p.FM2,
c,a,a.2,p,p.2,
c.F0,c.F1,c.M0,c.M1,a.FF0,a.FF1,a.FF02,a.FF12,p.MF0,p.MF1,p.MF02,p.MF12,
a.MM0,a.MM1,a.MM02,a.MM12,p.FM0,p.FM1,p.FM02,p.FM12,
c0,c1,a0,a1,a02,a12,p0,p1,p02,p12,
b.F,b.M,b.FF,b.MF,b.MM,b.FM,b.FF2,b.MF2,b.MM2,b.FM2,
d.F,d.M,d.FF,d.MF,d.MM,d.FM,d.FF2,d.MF2,d.MM2,d.FM2,
b,b.a,b.a2,b.p,b.p2,
d,d.a,d.a2,d.p,d.p2,
rho.YF,rho.YM,rho.Y,rho.YF0,rho.YF1,rho.YM0,rho.YM1,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,sigma.nu,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
mu.XF0,mu.XF1,sigma.XF0,sigma.XF1,mu.XM0,mu.XM1,sigma.XM0,sigma.XM1,rho.X0,rho.X1,
mu.W,sigma.W,prob.D,
is.center.X,is.center.W,R,alpha,is.REML){
########################################################################################
########################################################################################
########################################################################################
suppressMessages(library(nlme,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(MASS,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(tidyverse,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(future.apply,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(gridExtra,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(formattable,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(htmltools,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(shiny,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(DT,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(ggplot2,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(gridExtra,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(data.table,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(plyr,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(dplyr,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(tidyr,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(shinyjs,warn.conflicts = FALSE, quietly=TRUE))
# Simulate data from APIM model
if (Model == 1){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.1(N.dyad,T.obs,
c.F,c.M,a.FF,p.MF,a.MM,p.FM,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,is.center.X,alpha,is.REML),silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model with indistinguishable dyads
if (Model == 2){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.2(N.dyad,T.obs,
c,a,p,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,is.center.X,alpha,is.REML),silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model: group differences in actor partner effects
if (Model == 3){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.3(N0.dyad,N1.dyad,T.obs,
c.F0,c.F1,c.M0,c.M1,a.FF0,a.FF1,p.MF0,p.MF1,a.MM0,a.MM1,p.FM0,p.FM1,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF0,mu.XF1,sigma.XF0,sigma.XF1,mu.XM0,mu.XM1,sigma.XM0,sigma.XM1,rho.X0,rho.X1,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}}
}
########################################################################################
# Simulate data from APIM model: group differences in actor partner effects
# with indistinguishable dyads
if (Model == 4){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.4(N0.dyad,N1.dyad,T.obs,
c0,c1,a0,a1,p0,p1,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF0,mu.XF1,sigma.XF0,sigma.XF1,mu.XM0,mu.XM1,sigma.XM0,sigma.XM1,rho.X0,rho.X1,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}}
}
########################################################################################
# Simulate data from APIM model with a continuos time-varying dyad moderator
if (Model == 5){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.5(N.dyad,T.obs,
c.F,c.M,a.FF,p.MF,a.MM,p.FM,
b.F,b.M,b.FF,b.MF,b.MM,b.FM,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
mu.W,sigma.W,is.center.X,is.center.W,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model with a continuos time-varying dyad moderator
# with indistinguishable dyads
if (Model == 6){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.6(N.dyad,T.obs,
c,a,p,b,b.a,b.p,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
mu.W,sigma.W,is.center.X,is.center.W,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model with a dichotonomous time-varying dyad moderator
if (Model == 7){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.7(N.dyad,T.obs,
c.F,c.M,a.FF,p.MF,a.MM,p.FM,
d.F,d.M,d.FF,d.MF,d.MM,d.FM,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
prob.D,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model with a dichotonomous time-varying dyad moderator
# with indistinguishable dyads
if (Model == 8){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.8(N.dyad,T.obs,
c,a,p,d,d.a,d.p,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
prob.D,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model
if (Model == 9){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.9(N.dyad,T.obs,
c.F,c.M,a.FF,p.MF,a.FF2,p.MF2,a.MM,p.FM,a.MM2,p.FM2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model with indistinguishable dyads
if (Model == 10){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.10(N.dyad,T.obs,
c,a,a.2,p,p.2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model: group differences in actor partner effects
if (Model == 11){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.11(N0.dyad,N1.dyad,T.obs,
c.F0,c.F1,c.M0,c.M1,a.FF0,a.FF1,a.FF02,a.FF12,p.MF0,p.MF1,p.MF02,p.MF12,
a.MM0,a.MM1,a.MM02,a.MM12,p.FM0,p.FM1,p.FM02,p.FM12,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF0,mu.XF1,sigma.XF0,sigma.XF1,mu.XM0,mu.XM1,sigma.XM0,sigma.XM1,rho.X0,rho.X1,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model: group differences in actor partner effects
# with indistinguishable dyads
if (Model == 12){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.12(N0.dyad,N1.dyad,T.obs,
c0,c1,a0,a1,a02,a12,p0,p1,p02,p12,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF0,mu.XF1,sigma.XF0,sigma.XF1,mu.XM0,mu.XM1,sigma.XM0,sigma.XM1,rho.X0,rho.X1,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model with a continuos time-varying dyad moderator
if (Model == 13){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.13(N.dyad,T.obs,
c.F,c.M,a.FF,a.FF2,p.MF,p.MF2,a.MM,a.MM2,p.FM,p.FM2,
b.F,b.M,b.FF,b.FF2,b.MF,b.MF2,b.MM,b.MM2,b.FM,b.FM2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
mu.W,sigma.W,is.center.X,is.center.W,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model with a continuos time-varying dyad moderator
# with indistinguishable dyads
if (Model == 14){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.14(N.dyad,T.obs,
c,a,a.2,p,p.2,b,b.a,b.a2,b.p,b.p2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
mu.W,sigma.W,is.center.X,is.center.W,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model with a dichotonomous time-varying dyad moderator
if (Model == 15){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.15(N.dyad,T.obs,
c.F,c.M,a.FF,a.FF2,p.MF,p.MF2,a.MM,a.MM2,p.FM,p.FM2,
d.F,d.M,d.FF,d.FF2,d.MF,d.MF2,d.MM,d.MM2,d.FM,d.FM2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
prob.D,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model with a dichotonomous time-varying dyad moderator
# with indistinguishable dyads
if (Model == 16){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.16(N.dyad,T.obs,
c,a,a.2,p,p.2,d,d.a,d.a2,d.p,d.p2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
prob.D,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model
if (Model == 17){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.1.lag(N.dyad,T.obs,
c.F,c.M,rho.YF,rho.YM,a.FF,p.MF,a.MM,p.FM,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,is.center.X,alpha,is.REML),silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model with indistinguishable dyads
if (Model == 18){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.2.lag(N.dyad,T.obs,
c,rho.Y,a,p,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,is.center.X,alpha,is.REML),silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model: group differences in actor partner effects
if (Model == 19){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.3.lag(N0.dyad,N1.dyad,T.obs,
c.F0,c.F1,c.M0,c.M1,rho.YF0,rho.YF1,rho.YM0,rho.YM1,a.FF0,a.FF1,p.MF0,p.MF1,a.MM0,a.MM1,p.FM0,p.FM1,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF0,mu.XF1,sigma.XF0,sigma.XF1,mu.XM0,mu.XM1,sigma.XM0,sigma.XM1,rho.X0,rho.X1,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}}
}
########################################################################################
# Simulate data from APIM model: group differences in actor partner effects
# with indistinguishable dyads
if (Model == 20){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.4.lag(N0.dyad,N1.dyad,T.obs,
c0,c1,rho.Y0,rho.Y1,a0,a1,p0,p1,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF0,mu.XF1,sigma.XF0,sigma.XF1,mu.XM0,mu.XM1,sigma.XM0,sigma.XM1,rho.X0,rho.X1,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}}
}
########################################################################################
# Simulate data from APIM model with a continuos time-varying dyad moderator
if (Model == 21){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.5.lag(N.dyad,T.obs,
c.F,c.M,rho.YF,rho.YM,a.FF,p.MF,a.MM,p.FM,
b.F,b.M,b.FF,b.MF,b.MM,b.FM,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
mu.W,sigma.W,is.center.X,is.center.W,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model with a continuos time-varying dyad moderator
# with indistinguishable dyads
if (Model == 22){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.6.lag(N.dyad,T.obs,
c,rho.Y,a,p,b,b.a,b.p,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
mu.W,sigma.W,is.center.X,is.center.W,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model with a dichotonomous time-varying dyad moderator
if (Model == 23){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.7.lag(N.dyad,T.obs,
c.F,c.M,rho.YF,rho.YM,a.FF,p.MF,a.MM,p.FM,
d.F,d.M,d.FF,d.MF,d.MM,d.FM,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
prob.D,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model with a dichotonomous time-varying dyad moderator
# with indistinguishable dyads
if (Model == 24){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.8.lag(N.dyad,T.obs,
c,rho.Y,a,p,d,d.a,d.p,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
prob.D,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model
if (Model == 25){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.9.lag(N.dyad,T.obs,
c.F,c.M,rho.YF,rho.YM,a.FF,p.MF,a.FF2,p.MF2,a.MM,p.FM,a.MM2,p.FM2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model with indistinguishable dyads
if (Model == 26){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.10.lag(N.dyad,T.obs,
c,rho.Y,a,a.2,p,p.2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model: group differences in actor partner effects
if (Model == 27){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.11.lag(N0.dyad,N1.dyad,T.obs,
c.F0,c.F1,c.M0,c.M1,rho.YF0,rho.YF1,rho.YM0,rho.YM1,a.FF0,a.FF1,a.FF02,a.FF12,p.MF0,p.MF1,p.MF02,p.MF12,
a.MM0,a.MM1,a.MM02,a.MM12,p.FM0,p.FM1,p.FM02,p.FM12,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF0,mu.XF1,sigma.XF0,sigma.XF1,mu.XM0,mu.XM1,sigma.XM0,sigma.XM1,rho.X0,rho.X1,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model: group differences in actor partner effects
# with indistinguishable dyads
if (Model == 28){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.12.lag(N0.dyad,N1.dyad,T.obs,
c0,c1,rho.Y0,rho.Y1,a0,a1,a02,a12,p0,p1,p02,p12,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF0,mu.XF1,sigma.XF0,sigma.XF1,mu.XM0,mu.XM1,sigma.XM0,sigma.XM1,rho.X0,rho.X1,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model with a continuos time-varying dyad moderator
if (Model == 29){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.13.lag(N.dyad,T.obs,
c.F,c.M,rho.YF,rho.YM,a.FF,a.FF2,p.MF,p.MF2,a.MM,a.MM2,p.FM,p.FM2,
b.F,b.M,b.FF,b.FF2,b.MF,b.MF2,b.MM,b.MM2,b.FM,b.FM2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
mu.W,sigma.W,is.center.X,is.center.W,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model with a continuos time-varying dyad moderator
# with indistinguishable dyads
if (Model == 30){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.14.lag(N.dyad,T.obs,
c,rho.Y,a,a.2,p,p.2,b,b.a,b.a2,b.p,b.p2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
mu.W,sigma.W,is.center.X,is.center.W,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model with a dichotonomous time-varying dyad moderator
if (Model == 31){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.15.lag(N.dyad,T.obs,
c.F,c.M,rho.YF,rho.YM,a.FF,a.FF2,p.MF,p.MF2,a.MM,a.MM2,p.FM,p.FM2,
d.F,d.M,d.FF,d.FF2,d.MF,d.MF2,d.MM,d.MM2,d.FM,d.FM2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
prob.D,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model with a dichotonomous time-varying dyad moderator
# with indistinguishable dyads
if (Model == 32){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.16.lag(N.dyad,T.obs,
c,rho.Y,a,a.2,p,p.2,d,d.a,d.a2,d.p,d.p2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
prob.D,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
# End of function ---> Return estimated model
return(fit)}
#####################################################################################
| /R/Summary.model.Dyad.IL.R | no_license | smasongarrison/PowerLAPIM | R | false | false | 35,961 | r | ###############################################################
###############################################################
###############################################################
###############################################################
###############################################################
###############################################################
###############################################################
# Function to compute power for dyadic IL studies as a function of the number of participants
Summary.model.Dyad.IL = function(Model,N.dyad,N0.dyad,N1.dyad,T.obs,
c.F,c.M,a.FF,p.MF,a.FF2,p.MF2,a.MM,p.FM,a.MM2,p.FM2,
c,a,a.2,p,p.2,
c.F0,c.F1,c.M0,c.M1,a.FF0,a.FF1,a.FF02,a.FF12,p.MF0,p.MF1,p.MF02,p.MF12,
a.MM0,a.MM1,a.MM02,a.MM12,p.FM0,p.FM1,p.FM02,p.FM12,
c0,c1,a0,a1,a02,a12,p0,p1,p02,p12,
b.F,b.M,b.FF,b.MF,b.MM,b.FM,b.FF2,b.MF2,b.MM2,b.FM2,
d.F,d.M,d.FF,d.MF,d.MM,d.FM,d.FF2,d.MF2,d.MM2,d.FM2,
b,b.a,b.a2,b.p,b.p2,
d,d.a,d.a2,d.p,d.p2,
rho.YF,rho.YM,rho.Y,rho.YF0,rho.YF1,rho.YM0,rho.YM1,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,sigma.nu,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
mu.XF0,mu.XF1,sigma.XF0,sigma.XF1,mu.XM0,mu.XM1,sigma.XM0,sigma.XM1,rho.X0,rho.X1,
mu.W,sigma.W,prob.D,
is.center.X,is.center.W,R,alpha,is.REML){
########################################################################################
########################################################################################
########################################################################################
suppressMessages(library(nlme,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(MASS,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(tidyverse,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(future.apply,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(gridExtra,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(formattable,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(htmltools,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(shiny,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(DT,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(ggplot2,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(gridExtra,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(data.table,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(plyr,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(dplyr,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(tidyr,warn.conflicts = FALSE, quietly=TRUE))
suppressMessages(library(shinyjs,warn.conflicts = FALSE, quietly=TRUE))
# Simulate data from APIM model
if (Model == 1){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.1(N.dyad,T.obs,
c.F,c.M,a.FF,p.MF,a.MM,p.FM,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,is.center.X,alpha,is.REML),silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model with indistinguishable dyads
if (Model == 2){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.2(N.dyad,T.obs,
c,a,p,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,is.center.X,alpha,is.REML),silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model: group differences in actor partner effects
if (Model == 3){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.3(N0.dyad,N1.dyad,T.obs,
c.F0,c.F1,c.M0,c.M1,a.FF0,a.FF1,p.MF0,p.MF1,a.MM0,a.MM1,p.FM0,p.FM1,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF0,mu.XF1,sigma.XF0,sigma.XF1,mu.XM0,mu.XM1,sigma.XM0,sigma.XM1,rho.X0,rho.X1,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}}
}
########################################################################################
# Simulate data from APIM model: group differences in actor partner effects
# with indistinguishable dyads
if (Model == 4){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.4(N0.dyad,N1.dyad,T.obs,
c0,c1,a0,a1,p0,p1,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF0,mu.XF1,sigma.XF0,sigma.XF1,mu.XM0,mu.XM1,sigma.XM0,sigma.XM1,rho.X0,rho.X1,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}}
}
########################################################################################
# Simulate data from APIM model with a continuos time-varying dyad moderator
if (Model == 5){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.5(N.dyad,T.obs,
c.F,c.M,a.FF,p.MF,a.MM,p.FM,
b.F,b.M,b.FF,b.MF,b.MM,b.FM,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
mu.W,sigma.W,is.center.X,is.center.W,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model with a continuos time-varying dyad moderator
# with indistinguishable dyads
if (Model == 6){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.6(N.dyad,T.obs,
c,a,p,b,b.a,b.p,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
mu.W,sigma.W,is.center.X,is.center.W,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model with a dichotonomous time-varying dyad moderator
if (Model == 7){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.7(N.dyad,T.obs,
c.F,c.M,a.FF,p.MF,a.MM,p.FM,
d.F,d.M,d.FF,d.MF,d.MM,d.FM,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
prob.D,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model with a dichotonomous time-varying dyad moderator
# with indistinguishable dyads
if (Model == 8){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.8(N.dyad,T.obs,
c,a,p,d,d.a,d.p,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
prob.D,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model
if (Model == 9){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.9(N.dyad,T.obs,
c.F,c.M,a.FF,p.MF,a.FF2,p.MF2,a.MM,p.FM,a.MM2,p.FM2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model with indistinguishable dyads
if (Model == 10){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.10(N.dyad,T.obs,
c,a,a.2,p,p.2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model: group differences in actor partner effects
if (Model == 11){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.11(N0.dyad,N1.dyad,T.obs,
c.F0,c.F1,c.M0,c.M1,a.FF0,a.FF1,a.FF02,a.FF12,p.MF0,p.MF1,p.MF02,p.MF12,
a.MM0,a.MM1,a.MM02,a.MM12,p.FM0,p.FM1,p.FM02,p.FM12,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF0,mu.XF1,sigma.XF0,sigma.XF1,mu.XM0,mu.XM1,sigma.XM0,sigma.XM1,rho.X0,rho.X1,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model: group differences in actor partner effects
# with indistinguishable dyads
if (Model == 12){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.12(N0.dyad,N1.dyad,T.obs,
c0,c1,a0,a1,a02,a12,p0,p1,p02,p12,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF0,mu.XF1,sigma.XF0,sigma.XF1,mu.XM0,mu.XM1,sigma.XM0,sigma.XM1,rho.X0,rho.X1,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model with a continuos time-varying dyad moderator
if (Model == 13){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.13(N.dyad,T.obs,
c.F,c.M,a.FF,a.FF2,p.MF,p.MF2,a.MM,a.MM2,p.FM,p.FM2,
b.F,b.M,b.FF,b.FF2,b.MF,b.MF2,b.MM,b.MM2,b.FM,b.FM2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
mu.W,sigma.W,is.center.X,is.center.W,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model with a continuos time-varying dyad moderator
# with indistinguishable dyads
if (Model == 14){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.14(N.dyad,T.obs,
c,a,a.2,p,p.2,b,b.a,b.a2,b.p,b.p2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
mu.W,sigma.W,is.center.X,is.center.W,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model with a dichotonomous time-varying dyad moderator
if (Model == 15){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.15(N.dyad,T.obs,
c.F,c.M,a.FF,a.FF2,p.MF,p.MF2,a.MM,a.MM2,p.FM,p.FM2,
d.F,d.M,d.FF,d.FF2,d.MF,d.MF2,d.MM,d.MM2,d.FM,d.FM2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
prob.D,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model with a dichotonomous time-varying dyad moderator
# with indistinguishable dyads
if (Model == 16){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.16(N.dyad,T.obs,
c,a,a.2,p,p.2,d,d.a,d.a2,d.p,d.p2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
prob.D,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model
if (Model == 17){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.1.lag(N.dyad,T.obs,
c.F,c.M,rho.YF,rho.YM,a.FF,p.MF,a.MM,p.FM,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,is.center.X,alpha,is.REML),silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model with indistinguishable dyads
if (Model == 18){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.2.lag(N.dyad,T.obs,
c,rho.Y,a,p,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,is.center.X,alpha,is.REML),silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model: group differences in actor partner effects
if (Model == 19){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.3.lag(N0.dyad,N1.dyad,T.obs,
c.F0,c.F1,c.M0,c.M1,rho.YF0,rho.YF1,rho.YM0,rho.YM1,a.FF0,a.FF1,p.MF0,p.MF1,a.MM0,a.MM1,p.FM0,p.FM1,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF0,mu.XF1,sigma.XF0,sigma.XF1,mu.XM0,mu.XM1,sigma.XM0,sigma.XM1,rho.X0,rho.X1,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}}
}
########################################################################################
# Simulate data from APIM model: group differences in actor partner effects
# with indistinguishable dyads
if (Model == 20){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.4.lag(N0.dyad,N1.dyad,T.obs,
c0,c1,rho.Y0,rho.Y1,a0,a1,p0,p1,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF0,mu.XF1,sigma.XF0,sigma.XF1,mu.XM0,mu.XM1,sigma.XM0,sigma.XM1,rho.X0,rho.X1,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}}
}
########################################################################################
# Simulate data from APIM model with a continuos time-varying dyad moderator
if (Model == 21){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.5.lag(N.dyad,T.obs,
c.F,c.M,rho.YF,rho.YM,a.FF,p.MF,a.MM,p.FM,
b.F,b.M,b.FF,b.MF,b.MM,b.FM,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
mu.W,sigma.W,is.center.X,is.center.W,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model with a continuos time-varying dyad moderator
# with indistinguishable dyads
if (Model == 22){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.6.lag(N.dyad,T.obs,
c,rho.Y,a,p,b,b.a,b.p,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
mu.W,sigma.W,is.center.X,is.center.W,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model with a dichotonomous time-varying dyad moderator
if (Model == 23){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.7.lag(N.dyad,T.obs,
c.F,c.M,rho.YF,rho.YM,a.FF,p.MF,a.MM,p.FM,
d.F,d.M,d.FF,d.MF,d.MM,d.FM,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
prob.D,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Simulate data from APIM model with a dichotonomous time-varying dyad moderator
# with indistinguishable dyads
if (Model == 24){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.8.lag(N.dyad,T.obs,
c,rho.Y,a,p,d,d.a,d.p,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
prob.D,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model
if (Model == 25){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.9.lag(N.dyad,T.obs,
c.F,c.M,rho.YF,rho.YM,a.FF,p.MF,a.FF2,p.MF2,a.MM,p.FM,a.MM2,p.FM2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model with indistinguishable dyads
if (Model == 26){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.10.lag(N.dyad,T.obs,
c,rho.Y,a,a.2,p,p.2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model: group differences in actor partner effects
if (Model == 27){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.11.lag(N0.dyad,N1.dyad,T.obs,
c.F0,c.F1,c.M0,c.M1,rho.YF0,rho.YF1,rho.YM0,rho.YM1,a.FF0,a.FF1,a.FF02,a.FF12,p.MF0,p.MF1,p.MF02,p.MF12,
a.MM0,a.MM1,a.MM02,a.MM12,p.FM0,p.FM1,p.FM02,p.FM12,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF0,mu.XF1,sigma.XF0,sigma.XF1,mu.XM0,mu.XM1,sigma.XM0,sigma.XM1,rho.X0,rho.X1,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model: group differences in actor partner effects
# with indistinguishable dyads
if (Model == 28){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.12.lag(N0.dyad,N1.dyad,T.obs,
c0,c1,rho.Y0,rho.Y1,a0,a1,a02,a12,p0,p1,p02,p12,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF0,mu.XF1,sigma.XF0,sigma.XF1,mu.XM0,mu.XM1,sigma.XM0,sigma.XM1,rho.X0,rho.X1,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads in Group 0 larger to',
N0.dyad,'or the number of dyads in Group 1 larger to',N1.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model with a continuos time-varying dyad moderator
if (Model == 29){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.13.lag(N.dyad,T.obs,
c.F,c.M,rho.YF,rho.YM,a.FF,a.FF2,p.MF,p.MF2,a.MM,a.MM2,p.FM,p.FM2,
b.F,b.M,b.FF,b.FF2,b.MF,b.MF2,b.MM,b.MM2,b.FM,b.FM2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
mu.W,sigma.W,is.center.X,is.center.W,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model with a continuos time-varying dyad moderator
# with indistinguishable dyads
if (Model == 30){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.14.lag(N.dyad,T.obs,
c,rho.Y,a,a.2,p,p.2,b,b.a,b.a2,b.p,b.p2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
mu.W,sigma.W,is.center.X,is.center.W,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model with a dichotonomous time-varying dyad moderator
if (Model == 31){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.15.lag(N.dyad,T.obs,
c.F,c.M,rho.YF,rho.YM,a.FF,a.FF2,p.MF,p.MF2,a.MM,a.MM2,p.FM,p.FM2,
d.F,d.M,d.FF,d.FF2,d.MF,d.MF2,d.MM,d.MM2,d.FM,d.FM2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu.F,sigma.nu.M,rho.nu.F.M,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
prob.D,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
########################################################################################
# Curvilinear actor and partner effects
# Simulate data from APIM model with a dichotonomous time-varying dyad moderator
# with indistinguishable dyads
if (Model == 32){
plan(multisession)
fit = future_lapply(1:R, function(r)
try(Performance.Dyad.Model.16.lag(N.dyad,T.obs,
c,rho.Y,a,a.2,p,p.2,d,d.a,d.a2,d.p,d.p2,
sigma.eps.F,sigma.eps.M,rho.eps.FM,
sigma.nu,
mu.XF,sigma.XF,mu.XM,sigma.XM,rho.X,
prob.D,is.center.X,alpha,is.REML),
silent = FALSE),future.seed = 0xBEEF)
errors = rep(0,R)
for (r in 1:R){errors[r] = length(fit[[r]])}
errors = sum(ifelse(errors==1,1,0))
if (errors>0){
if (errors<(R-1)){message(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}
if (errors>=(R-1)){stop(paste(errors, 'replications produce convergence errors.
Check the value of the parameters or set the number of dyads larger to',
N.dyad))}}
}
# End of function ---> Return estimated model
return(fit)}
#####################################################################################
|
library(mgcv)
n.station <- 20 #specify number of stations to run
mu <- 50
sd <- 40
noise.sd <- 0.2 #sd of white noise
stn.sd <- rnorm(n.station, 0.5, 0.2) #sd for random station effect
mult <- 1e3
z <- seq(0, 250, 5) #explanatory variable (depth)
rho <- mult*dnorm(z, mu, sd)/(pnorm(max(z), mu, sd) - pnorm(min(z), mu, sd))
noise <- rnorm(n = length(rho), mean = 0, sd = noise.sd) #white noise
stn.re <- rnorm(length(stn.sd), mean = 0, sd = stn.sd) #station specific random effect
#calculate obs, the observed data, and log(obs)
obs <- rep(rho, n.station) + rep(noise, n.station) + rep(stn.re, 1, each = length(rho))
l.obs <- log(obs + 2)
#fit gamm with station random effect
glm.spl <- data.frame(obs, l.obs, rep(z, n.station), rep(c(1:n.station), 1, each = length(z)))
names(glm.spl) <- c("obs", "l.obs", "z", "stn")
glm.spl$stn <- as.factor(glm.spl$stn)
lme.fit <- gamm(l.obs ~ s(z), random=list(stn =~1), data = glm.spl)
summary(lme.fit$lme)
#plot fitted values and observed data
plot(log(rho + 2), z, ylim = c(max(z), min(z)), type = 'l', xlim = c(min(na.omit(l.obs)), max(na.omit(l.obs))), xlab = "l.fluoro")
points(l.obs, rep(z, n.station), col = glm.spl$stn)
title("gamm with station random effect")
lines(fitted(lme.fit$gam)[glm.spl$stn == 1], col = "black", z, lwd = 2)
#variogram
gamm.var <- Variogram(lme.fit$lme)
plot(gamm.var)
| /broke-simulation.R | no_license | gracevaziri/R-simulation-study | R | false | false | 1,350 | r | library(mgcv)
n.station <- 20 #specify number of stations to run
mu <- 50
sd <- 40
noise.sd <- 0.2 #sd of white noise
stn.sd <- rnorm(n.station, 0.5, 0.2) #sd for random station effect
mult <- 1e3
z <- seq(0, 250, 5) #explanatory variable (depth)
rho <- mult*dnorm(z, mu, sd)/(pnorm(max(z), mu, sd) - pnorm(min(z), mu, sd))
noise <- rnorm(n = length(rho), mean = 0, sd = noise.sd) #white noise
stn.re <- rnorm(length(stn.sd), mean = 0, sd = stn.sd) #station specific random effect
#calculate obs, the observed data, and log(obs)
obs <- rep(rho, n.station) + rep(noise, n.station) + rep(stn.re, 1, each = length(rho))
l.obs <- log(obs + 2)
#fit gamm with station random effect
glm.spl <- data.frame(obs, l.obs, rep(z, n.station), rep(c(1:n.station), 1, each = length(z)))
names(glm.spl) <- c("obs", "l.obs", "z", "stn")
glm.spl$stn <- as.factor(glm.spl$stn)
lme.fit <- gamm(l.obs ~ s(z), random=list(stn =~1), data = glm.spl)
summary(lme.fit$lme)
#plot fitted values and observed data
plot(log(rho + 2), z, ylim = c(max(z), min(z)), type = 'l', xlim = c(min(na.omit(l.obs)), max(na.omit(l.obs))), xlab = "l.fluoro")
points(l.obs, rep(z, n.station), col = glm.spl$stn)
title("gamm with station random effect")
lines(fitted(lme.fit$gam)[glm.spl$stn == 1], col = "black", z, lwd = 2)
#variogram
gamm.var <- Variogram(lme.fit$lme)
plot(gamm.var)
|
library(fishtree)
library(here)
comm<- read.table(here::here("data", "comm_Parana-Paraguai.txt"), header= TRUE)
spp<- colnames(comm)[-c(1,2)]
tree<- fishtree_phylogeny(species = spp, type = "chronogram") #tree downloaded from fishtree of life Chang et al 2019
tree_insert<- tree
###checking the tree#####
spp_data<- 1:length(spp)
names(spp_data)<- spp
insert_spp<- treedata_modif(phy = tree_insert, spp_data, warnings = F)$nc$data_not_tree
species_to_genre<- tree_insert$tip.label[match(sub("_.*", "", insert_spp),
sub("_.*", "", tree_insert$tip.label)
)[!is.na(match(sub("_.*", "", insert_spp),
sub("_.*", "", tree_insert$tip.label)
)
)
]
] #genre that must be added
species_not_genre<- insert_spp[-unlist(
unique(
lapply(species_to_genre,
function(x){
which(sub("_.*", "", insert_spp) == unique(sub("_.*", "", x))
)
}
)
)
)
]
##download all species###
families<- c("Loricariidae", "Cichlidae", "Characidae", "Heptapteridae", "Synbranchiformes")
symbranc_tree<- fishtree_phylogeny(rank= "Synbranchiformes")
spp_allFamilies<- fishtree_phylogeny(species= unlist(lapply(lapply(families, function(x) fishtree_phylogeny(rank= x)), function(i) i$tip.label))) #tree with all species from families in comm
spp_nogenre_allFamilies<- unique(spp_allFamilies$tip.label[match( sub("_.*", "", species_not_genre),
sub("_.*", "", spp_allFamilies$tip.label)
)
[!is.na(match( sub("_.*", "", species_not_genre), sub("_.*", "", spp_allFamilies$tip.label)))
]
]
) #sister taxa on all families
spp_synbranc<- sample(symbranc_tree$tip.label, 1) #extracting one species from Symbranchiformes
include_spp<- c(spp_nogenre_allFamilies, spp_synbranc) #names to be included in spp data
spp_all<- c(spp, include_spp) #all genus to download from FishTree
phylo_raw<- fishtree_phylogeny(species = spp_all) #all genus included
species_not_genre #position
species_to_genre #add species to genus
spp_problem1<- "Synbranchus_marmoratus"
spp_problem2<- "Pyxiloricaria_menezesi"
pos_1<- treedata_modif(phy = phylo_raw, data = spp_data, warnings = F)$nc$tree_not_data #add position (change name) and species to genus (already had a name in the phylo)
pos_1<- pos_1[-which(pos_1 == spp_synbranc)] #excluding species that are not present in the FishTree
phylo_test<- force.ultrametric(phylo_raw)
for(i in 1:length(pos_1)){
#i= 6
if((table(sub("_.*", "", names(spp_data)))[sub("_.*", "", pos_1[i])] == 1) == TRUE){
position<- which(sub("_.*", "", phylo_test$tip.label) == sub("_.*", "", pos_1[i]))
phylo_test$tip.label[position]<- names(spp_data[which(sub("_.*", "", names(spp_data)) ==
names(table(sub("_.*", "", names(spp_data)))[sub("_.*", "", pos_1[i])] == 1))])
} else {
position<- which(sub("_.*", "", phylo_test$tip.label) == sub("_.*", "", pos_1[i]))
names_spp<- names(spp_data[which(sub("_.*", "", names(spp_data)) ==
names(table(sub("_.*", "", names(spp_data)))[sub("_.*", "", pos_1[i])] == 1))])
phylo_test$tip.label[position]<- names_spp[1]
for(j in 2:length(names_spp)){
phylo_test<- add.species.to.genus(phylo_test, names_spp[j])
}
}
}
pos_2<- treedata_modif(phy = phylo_test, data = spp_data, warnings = F)$nc$data_not_tree #only species to genus
pos_2<- pos_2[-match(c(spp_problem1, spp_problem2), pos_2)] #symbranchus
for(i in 1:length(pos_2)){
phylo_test<- add.species.to.genus(phylo_test, pos_2[i])
}
position_problem1<- which(phylo_test$tip.label == spp_synbranc)
phylo_test$tip.label[position_problem1]<- spp_problem1 #solving problem 1
plot(phylo_test, cex= 0.5)
nodelabels()
phylo_test<- bind.tip(tree = phylo_test, tip.label = spp_problem2, where = 75, position = 5)
write.tree(phylo_test, here::here("data", "tree_update_20-03-20.new"))
| /R/C_tree_edit_21-03-20.R | no_license | GabrielNakamura/ProjModel | R | false | false | 4,547 | r | library(fishtree)
library(here)
comm<- read.table(here::here("data", "comm_Parana-Paraguai.txt"), header= TRUE)
spp<- colnames(comm)[-c(1,2)]
tree<- fishtree_phylogeny(species = spp, type = "chronogram") #tree downloaded from fishtree of life Chang et al 2019
tree_insert<- tree
###checking the tree#####
spp_data<- 1:length(spp)
names(spp_data)<- spp
insert_spp<- treedata_modif(phy = tree_insert, spp_data, warnings = F)$nc$data_not_tree
species_to_genre<- tree_insert$tip.label[match(sub("_.*", "", insert_spp),
sub("_.*", "", tree_insert$tip.label)
)[!is.na(match(sub("_.*", "", insert_spp),
sub("_.*", "", tree_insert$tip.label)
)
)
]
] #genre that must be added
species_not_genre<- insert_spp[-unlist(
unique(
lapply(species_to_genre,
function(x){
which(sub("_.*", "", insert_spp) == unique(sub("_.*", "", x))
)
}
)
)
)
]
##download all species###
families<- c("Loricariidae", "Cichlidae", "Characidae", "Heptapteridae", "Synbranchiformes")
symbranc_tree<- fishtree_phylogeny(rank= "Synbranchiformes")
spp_allFamilies<- fishtree_phylogeny(species= unlist(lapply(lapply(families, function(x) fishtree_phylogeny(rank= x)), function(i) i$tip.label))) #tree with all species from families in comm
spp_nogenre_allFamilies<- unique(spp_allFamilies$tip.label[match( sub("_.*", "", species_not_genre),
sub("_.*", "", spp_allFamilies$tip.label)
)
[!is.na(match( sub("_.*", "", species_not_genre), sub("_.*", "", spp_allFamilies$tip.label)))
]
]
) #sister taxa on all families
spp_synbranc<- sample(symbranc_tree$tip.label, 1) #extracting one species from Symbranchiformes
include_spp<- c(spp_nogenre_allFamilies, spp_synbranc) #names to be included in spp data
spp_all<- c(spp, include_spp) #all genus to download from FishTree
phylo_raw<- fishtree_phylogeny(species = spp_all) #all genus included
species_not_genre #position
species_to_genre #add species to genus
spp_problem1<- "Synbranchus_marmoratus"
spp_problem2<- "Pyxiloricaria_menezesi"
pos_1<- treedata_modif(phy = phylo_raw, data = spp_data, warnings = F)$nc$tree_not_data #add position (change name) and species to genus (already had a name in the phylo)
pos_1<- pos_1[-which(pos_1 == spp_synbranc)] #excluding species that are not present in the FishTree
phylo_test<- force.ultrametric(phylo_raw)
for(i in 1:length(pos_1)){
#i= 6
if((table(sub("_.*", "", names(spp_data)))[sub("_.*", "", pos_1[i])] == 1) == TRUE){
position<- which(sub("_.*", "", phylo_test$tip.label) == sub("_.*", "", pos_1[i]))
phylo_test$tip.label[position]<- names(spp_data[which(sub("_.*", "", names(spp_data)) ==
names(table(sub("_.*", "", names(spp_data)))[sub("_.*", "", pos_1[i])] == 1))])
} else {
position<- which(sub("_.*", "", phylo_test$tip.label) == sub("_.*", "", pos_1[i]))
names_spp<- names(spp_data[which(sub("_.*", "", names(spp_data)) ==
names(table(sub("_.*", "", names(spp_data)))[sub("_.*", "", pos_1[i])] == 1))])
phylo_test$tip.label[position]<- names_spp[1]
for(j in 2:length(names_spp)){
phylo_test<- add.species.to.genus(phylo_test, names_spp[j])
}
}
}
pos_2<- treedata_modif(phy = phylo_test, data = spp_data, warnings = F)$nc$data_not_tree #only species to genus
pos_2<- pos_2[-match(c(spp_problem1, spp_problem2), pos_2)] #symbranchus
for(i in 1:length(pos_2)){
phylo_test<- add.species.to.genus(phylo_test, pos_2[i])
}
position_problem1<- which(phylo_test$tip.label == spp_synbranc)
phylo_test$tip.label[position_problem1]<- spp_problem1 #solving problem 1
plot(phylo_test, cex= 0.5)
nodelabels()
phylo_test<- bind.tip(tree = phylo_test, tip.label = spp_problem2, where = 75, position = 5)
write.tree(phylo_test, here::here("data", "tree_update_20-03-20.new"))
|
##Loading relevant data to R
options(stringsAsFactors=F)
dataset <- read.table("household_power_consumption.txt",sep=";",na.strings="?",skip=66636,nrow=2880,quote="",header=TRUE,colClasses = "character")
colnames(dataset) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
transform.data <- transform(dataset,Date=as.Date(strptime(Date,"%d/%m/%Y")),Time=strptime(paste(Date,Time,sep=" "),"%d/%m/%Y %H:%M:%S"))
##Creating the PNG file
png("plot1.png", width = 480, height = 480)
##Creating the histogram
hist(as.numeric(transform.data$Global_active_power),col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)",cex.axis=1,cex.lab = 1)
dev.off() | /plot1.R | no_license | pavithratg/ExData_Plotting1 | R | false | false | 762 | r | ##Loading relevant data to R
options(stringsAsFactors=F)
dataset <- read.table("household_power_consumption.txt",sep=";",na.strings="?",skip=66636,nrow=2880,quote="",header=TRUE,colClasses = "character")
colnames(dataset) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
transform.data <- transform(dataset,Date=as.Date(strptime(Date,"%d/%m/%Y")),Time=strptime(paste(Date,Time,sep=" "),"%d/%m/%Y %H:%M:%S"))
##Creating the PNG file
png("plot1.png", width = 480, height = 480)
##Creating the histogram
hist(as.numeric(transform.data$Global_active_power),col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)",cex.axis=1,cex.lab = 1)
dev.off() |
###### This script deals with transcriptomes and also getting the probe set rename
library(ape)
library(seqinr)
library(stringr)
library(data.table)
library(GenomicRanges)
library(Biostrings)
library(Rsamtools)
#Options
options(stringsAsFactors = FALSE)
#options(warn=2) #for debugging warnings in loops
##########################################################################################################
#Step 1: Settings for everything
##########################################################################################################
#Directory settings
work.dir<-"/Volumes/Armored/Mantellidae_All" #The directory that contains your processed samples
out.dir<-"mtGenomes" #output directory
#Reference files. Place in working directory (work.dir)
reference<-"reference.fa" #Name of the reference. Includes many frog mtGenomes.
gene.file<-"mtGenes.fa" #Name of the reference gene files. Default is from N. parkeri. Can replace with closer taxa.
#Running setups
threads = 8 #Number of threads
mem = "80" #GB of ram
min.id = "0.7" #Initial value for matching raw reads to reference. Probably should leave alone, for problem taxa.
resume = TRUE #Skips over samples already done
#Alignment settings
secondary.structure = FALSE #Runs mafft-qinsi on mt regions that have secondary structure. Takes structure into acct.
min.taxa = 4 #min number to keep an alignment
min.prop = "0.25" #min number of coverage per individual. e.g. for a 100bp gene, needs 25 bp to keep.
min.len = "100" #min length for trimming. Set to this value as you don't usually want to trim t-RNAs
gblocks = FALSE #If you want to use.
trimal = TRUE
###############################################################################
######## DO NOT EDIT BELOW THIS POINT ########################################
###############################################################################
###############################################################################
###################### FUNCTIONS #########################
###############################################################################
###############################################################################
pairwise.inf.sites<-function(x, y) {
#Sets up data, puts ref seq first
new.align<-x
new.align[new.align == "n"]<-"-"
new.align[is.na(new.align) == T]<-"-"
ref<-new.align[rownames(new.align) == y,]
summary.data<-c()
all.pars<-c()
all.over<-c()
for (z in 1:nrow(new.align)) {
#Site counter
pars<-0
overlap<-0
tar<-new.align[z,]
combined<-matrix(NA_character_, ncol = max(length(ref), length(tar)), nrow =2)
combined[1,]<-ref
combined[2,]<-tar
for (k in 1:ncol(combined)) {
#Pulls out column of data
seq.col<-vector("character", length = nrow(combined))
seq.col<-combined[,k]
#not equal to -
f.char<-seq.col[seq.col != '-']
#don't count missing seq
if (length(f.char) <= 1) { next }
if (length(f.char) >= 2){
overlap<-overlap+1
if (f.char[1] != f.char [2]) { pars<-pars+1 }
}#end if
}#ends informative sites loop
all.pars<-append(all.pars, pars)
all.over<-append(all.over, overlap)
}# ends seq loop
#Summarizes and returns data
summary.data<-all.pars/all.over
summary.data[is.nan(summary.data)]<-0
names(summary.data)<-rownames(new.align)
return(summary.data)
}
write.phy<-function (x, file = "", interleave = FALSE, strict = FALSE){
str2cha <- function(x) {
unlist(strsplit(x, ""))
}
datatype <- ifelse(is.numeric(x[1, 1]), "continuous", "nc")
ntax <- nrow(x)
nchar <- ncol(x)
taxnames <- rownames(x)
if (strict) {
taxnames <- substring(taxnames, 1, truncate)
missing <- 10 - unlist(lapply(strsplit(taxnames, ""),
length))
for (i in seq(along = taxnames)) taxnames[i] <- paste(taxnames[i],
paste(rep("*", missing[i]), collapse = ""), sep = "")
if (any(duplicated(taxnames)))
cat("WARNING: Truncation of taxon names created",
"identical strings.")
}
else {
xx <- nchar(taxnames)
diff <- max(xx) - xx + 3
for (i in 1:ntax) taxnames[i] <- paste(taxnames[i], paste(rep(" ",
diff[i]), collapse = ""), sep = "")
}
if (!interleave)
interleave <- nchar
nbpart <- ceiling(nchar/interleave)
pt <- matrix(nrow = nbpart, ncol = 2)
pt[1, ] <- c(1, interleave)
if (nbpart > 1)
for (i in 2:(dim(pt)[1])) {
pt[i, ] <- c(pt[i - 1, 2] + 1, pt[i - 1, 2] + interleave)
pt[nbpart, 2] <- nchar
}
phy <- paste(ntax, nchar)
for (i in seq(along = pt[, 1])) {
sm <- as.character(x[, pt[i, 1]:pt[i, 2]])
if (is.null(dim(sm)))
sm <- as.matrix(sm, ncol = 1)
sm <- apply(sm, 1, paste, collapse = "")
if (i == 1)
sm <- paste(taxnames, sm)
if (i < max(seq(along = pt[, 1])))
sm <- c(sm, "")
phy <- c(phy, sm)
}
if (file == "") {
cat(phy, sep = "\n")
}
else {
write(phy, file = file)
}
}
#################################################################
#Step 1: Gather read data and assemble mitochondrial genomes
#################################################################
#Creates cirecotires and stuff
dir.create(paste(work.dir, "/", out.dir, sep = ""))
dir.create(paste(work.dir, "/", out.dir, "/", "Species_mtGenomes", sep = ""))
#Sets up the reads
trim.cds<-FALSE #defaults to no trimming for coding sequence. Usually destroys mtGenes
setwd(paste(work.dir, "/", "Processed_Samples", sep = ""))
raw.dir<-"assembly-reads" #Directory of reads used for assembly. Shouldn't need to modify unless the assembly reads fucked
files<-list.files(path = ".", full.names = F, recursive = T)
reads<-files[grep(pattern = raw.dir, x = files)]
#Creates directories and copies files
system(paste("cp ../", reference, " ", work.dir, "/", out.dir, sep = ""))
system(paste("cp ../", gene.file, " ", work.dir, "/", out.dir, sep = ""))
setwd(paste(work.dir, "/", out.dir, sep = ""))
sample.names = list.dirs(paste0(work.dir, "/Processed_Samples"), recursive = F, full.names = F)
done.names = list.files(paste0(work.dir, "/mtGenomes/Species_mtGenomes"))
if (resume == TRUE){
done.names = list.files(paste0(work.dir, "/mtGenomes/Species_mtGenomes"))
samples = sample.names[!sample.names %in% done.names]
} else { samples = sample.names }
if (length(samples) != 0){
for (i in 1:length(samples)){
#Change to main directory
setwd(paste(work.dir, "/", out.dir, sep = ""))
#Gets reads together for this sample
min.id<-"0.7"
reference<-"reference.fa" #Name of the reference. Includes many frog mtGenomes.
sample.reads<-reads[grep(samples[i], reads)]
read1<-paste(work.dir, "/Processed_Samples/", sample.reads[grep("READ1", sample.reads)], sep = "")
read2<-paste(work.dir, "/Processed_Samples/", sample.reads[grep("READ2", sample.reads)], sep = "")
read3<-paste(work.dir, "/Processed_Samples/", sample.reads[grep("singleton", sample.reads)], sep = "")
#Pick out matching reads to mt Genomes
system(paste("bbmap.sh -Xmx8g ref=reference.fa", " in1=", read1, " in2=", read2, " vslow k=12 minid=",min.id,
" outm1=read1.fq outm2=read2.fq", sep = ""), ignore.stderr = T)
system(paste("bbmap.sh -Xmx8g ref=reference.fa", " in=", read3, " vslow k=12 minid=", min.id,
" outm=singleton.fq", sep = ""), ignore.stderr = T)
#Creates a bam alignment file of reads mapped to reference
# system(paste("bwa mem -t ", threads, " ref ", read1, " ", read2,
# " | samtools sort -n -@", threads, " -O BAM -o paired.bam -", sep = ""))
#Pulls out the pairs where single reads match to the ref
# system(paste("samtools view -bh -F 4 -f 8 paired.bam > out1.bam", sep = ""))
#system(paste("samtools view -bh -F 8 -f 4 paired.bam > out2.bam", sep = ""))
#Pulls out the pairs where both reads match to the ref
# system(paste("samtools view -bh -F 12 paired.bam > out3.bam", sep = ""))
#Merges and sorts bam file
#system(paste("samtools cat out1.bam out2.bam out3.bam | ",
# "samtools sort -n -@", threads, " -O BAM -o match_paired.bam -", sep = ""))
# system(paste("bedtools bamtofastq -i match_paired.bam -fq output_r1.fastq -fq2 output_r2.fastq", sep = ""))
system("touch current_seed.fasta")
new.len<-0
counter<-0
repeat.counter<-0
seeding<-T
while (seeding == T){
#Copy new reference to do recursively
counter<-counter+1
prev.len<-new.len
#skips the first one since its already done
if (counter >= 2){
#Pick out matching reads to mt Genomes
system(paste("bbmap.sh -Xmx8g ref=current_seed.fasta", " in1=", read1, " in2=", read2, " vslow k=12 minid=",min.id,
" outm1=t_read1.fq outm2=t_read2.fq", sep = ""), ignore.stderr = T)
system(paste("bbmap.sh -Xmx8g ref=current_seed.fasta", " in=", read3, " vslow k=12 minid=", min.id,
" outm=t_singleton.fq", sep = ""), ignore.stderr = T)
system(paste("cat t_read1.fq o_read1.fq >> read1.fq"))
system(paste("cat t_read2.fq o_read2.fq >> read2.fq"))
system(paste("cat t_singleton.fq o_singleton.fq >> singleton.fq"))
system("rm t_read1.fq t_read2.fq t_singleton.fq")
}#end counter if
#Run SPADES on sample
k<-c(9,13,21,33,55,77,99,127)
k.val<-paste(k, collapse = ",")
system(paste("spades.py --pe1-1 read1.fq --pe1-2 read2.fq --pe1-s singleton.fq",
" -o spades -k ",k.val," --careful -t ", threads, " -m ", mem, sep = ""), ignore.stdout = T)
#Checks to see if one kmer failed or not
while (file.exists("spades/contigs.fasta") == F){
#subtract Ks until it works
system("rm -r spades")
k<-k[-length(k)]
if (length(k) == 0) { break }
k.val<-paste(k, collapse = ",")
min.id<-"0.6"
system(paste("spades.py --pe1-1 read1.fq --pe1-2 read2.fq --pe1-s singleton.fq",
" -o spades -k ",k.val," --careful -t ", threads, " -m ", mem, sep = ""), ignore.stdout = T)
}#end while
#If the k-mers are all run out, therefore nothing can be assembled
if (length(k) == 0) {
paste("k-mer values all used up, cannot assemble!")
system("rm read1.fq read2.fq singleton.fq t_read1.fq t_read2.fq t_singleton.fq o_read1.fq o_read2.fq o_singleton.fq")
system("rm -r spades")
seeding = F
}# end if
if (counter == 1){
system(paste("mv read1.fq o_read1.fq"))
system(paste("mv read2.fq o_read2.fq"))
system(paste("mv singleton.fq o_singleton.fq"))
}
system("cp spades/contigs.fasta current_seed.fasta")
if (counter >= 2) { system("rm read1.fq read2.fq singleton.fq") }
system("rm -r spades")
reference<-"current_seed.fasta"
#Check size
temp.count<-scan(file = "current_seed.fasta", what = "character")
new.len<-sum(nchar(temp.count[-grep(">", temp.count)]))
no.contigs<-length(temp.count[grep(">", temp.count)])
print(paste("iteration ", counter, " complete!", sep = ""))
print(paste("new length: ", new.len, ". Old length: ", prev.len, sep = ""))
if (new.len == prev.len || counter == 20){
seeding<-F
system("rm o_read1.fq o_read2.fq o_singleton.fq")
print(paste("mitogenome complete after ", counter, " iterations!", sep = ""))
min.id<-"0.7"
} #end if
#If the file gets too large, its due to repeats
if (new.len >= 23000){
#runs cap3 to merge similar contigs (pull only clustered contigs out?)
system(paste("cap3 current_seed.fasta -z 1 -o 16 -e 11 -s 251", " > ",
"log.fasta.cap.txt", sep = ""))
#Reads in results files
temp.assembled<-scanFa(FaFile(paste("current_seed.fasta.cap.contigs", sep = "")))
temp.singlets<-scanFa(FaFile(paste("current_seed.fasta.cap.singlets", sep = "")))
keep.singlets<-temp.singlets[width(temp.singlets) >= 100]
final.save<-append(temp.assembled, keep.singlets)
#Writes contigs for cap3
write.loci<-as.list(as.character(final.save))
write.fasta(sequences = write.loci, names = names(write.loci),
"current_seed.fasta", nbchar = 1000000, as.string = T)
#Get cap3 files and deletes
cap.files<-list.files(pattern = "", full.names = F, recursive = F)
cap.remove<-cap.files[grep(pattern = paste("fasta.cap*.", sep =""), x = cap.files)]
system(paste("rm ", paste(cap.remove, collapse = " ") ))
min.id<-"0.95"
#makes sure this doesn't go on forever and ever
repeat.counter<-repeat.counter+1
if (repeat.counter >= 5){
print(paste("repeat counter hit 5"))
system("rm o_read1.fq o_read2.fq o_singleton.fq")
seeding<-F
}#end if
}#end length > 30,000 if
}#end while
#Save finsihed genome
contigs<-scanFa(FaFile("current_seed.fasta")) # loads up fasta file
#Skips if there are none
if (length(contigs) == 0){ next }
#Trys to merge contigs if there are more than 1
if (length(contigs) >= 2){
#runs cap3 to merge similar contigs (pull only clustered contigs out?)
system(paste("cap3 current_seed.fasta -z 1 -o 16 -e 11 -s 251", " > ",
"log.fasta.cap.txt", sep = ""))
#Reads in results files
temp.assembled<-scanFa(FaFile(paste("current_seed.fasta.cap.contigs", sep = "")))
temp.singlets<-scanFa(FaFile(paste("current_seed.fasta.cap.singlets", sep = "")))
keep.singlets<-temp.singlets[width(temp.singlets) >= 100]
contigs<-append(temp.assembled, keep.singlets)
#Get cap3 files and deletes
cap.files<-list.files(pattern = "", full.names = F, recursive = F)
cap.remove<-cap.files[grep(pattern = paste("fasta.cap*.", sep =""), x = cap.files)]
system(paste("rm ", paste(cap.remove, collapse = " ") ))
}#end if
if (sum(width(contigs)) <= 1000) {
print("less than 1000bp, not enough data to extract")
next
}
#Writes the full mitochondrial genome file
system("rm current_seed.fasta")
names(contigs)<-paste("sequence_", seq(1:length(contigs)), sep = "")
write.loci<-as.list(as.character(contigs))
write.fasta(sequences = write.loci, names = names(write.loci),
paste("Species_mtGenomes/", samples[i], ".fa", sep = ""), nbchar = 1000000, as.string = T)
}#end i loop
system("rm -r ref")
}#end if
#################################################################
#Step 2: Assess completeness of the mitochondrial genome and annotate
#################################################################
#PSLX headers
headers<-c("matches", "misMatches", "repMatches", "nCount", "qNumInsert", "qBaseInsert", "tNumInsert", "tBaseInsert", "strand", "qName",
"qSize", "qStart", "qEnd", "tName", "tSize", "tStart", "tEnd", "blockCount", "blockSize", "qStarts", "tStarts", "qSeq", "tSeq")
#Creates new directory and enters this working directory
setwd(paste(work.dir, "/", out.dir, sep = ""))
dir.create("Species_Loci")
spp.samples<-list.files("Species_mtGenomes/.")
spp.samples<-gsub(".fa$", "", spp.samples)
for (i in 1:length(spp.samples)){
#Load in the data
contigs<-scanFa(FaFile(paste("Species_mtGenomes/", spp.samples[i], ".fa", sep = ""))) # loads up fasta file
#Matches samples to loci
system(paste("mpirun pblat -threads=", threads, " Species_mtGenomes/", spp.samples[i], ".fa ",
gene.file, " -tileSize=8 -minIdentity=60",
" -noHead -out=pslx mt_to_genes.pslx", sep = ""), ignore.stdout = T)
#Need to load in transcriptome for each species and take the matching transcripts to the database
temp.count<-scan(file = "mt_to_genes.pslx", what = "character")
if (length(temp.count) == 0){
print("No matching mitochondrial genes were found.")
next
}
match.data<-fread("mt_to_genes.pslx", sep = "\t", header = F, stringsAsFactors = FALSE)
setnames(match.data, headers)
loci.names<-unique(match.data$qName)
sep.loci<-DNAStringSet()
for (j in 1:length(loci.names)){
#pulls out data that matches to multiple contigs
sub.data<-match.data[match.data$qName %in% loci.names[j],]
sub.data<-sub.data[sub.data$matches == max(sub.data$matches),][1]
if (sub.data$strand == "+"){
#Cuts the node apart and saves separately
sub.data$tStart<-sub.data$tStart-sub.data$qStart+1
#Fixes ends
sub.data$tEnd<-sub.data$tEnd+(sub.data$qSize-sub.data$qEnd)
} else {
sub.data$tStart<-sub.data$tStart-(sub.data$qSize-sub.data$qEnd)
#Fixes ends
sub.data$tEnd<-sub.data$tEnd+sub.data$qStart+1
}
#If it ends up with a negative start
if (sub.data$tStart <= 0){ sub.data$tStart<-1}
#Fixes if the contig is smaller than the full target locus
if (sub.data$tEnd >= sub.data$tSize) { sub.data$tEnd<-sub.data$tSize }
#Gets start and end
start.pos<-min(sub.data$tStart, sub.data$tEnd)
end.pos<-max(sub.data$tStart, sub.data$tEnd)
temp.contig<-contigs[names(contigs) == sub.data$tName]
new.seq<-subseq(x = temp.contig, start = start.pos, end = end.pos)
names(new.seq)<-sub.data$qName
sep.loci<-append(sep.loci, new.seq)
}#end j loop
#Writes the full mitochondrial genome file
write.loci<-as.list(as.character(sep.loci))
write.fasta(sequences = write.loci, names = names(write.loci),
paste("Species_Loci/", spp.samples[i], "_mito_genes.fa", sep = ""), nbchar = 1000000, as.string = T)
system("rm mt_to_genes.pslx")
}#end i loop
#################################################################
#Step 3: Create alignments
#################################################################
setwd(paste(work.dir, "/", out.dir, sep = ""))
#Sets up the loci to align
ref.data<-scanFa(FaFile(gene.file))
species.names<-list.files("Species_Loci/.", full.names = F)
species.names<-species.names[species.names != ""]
dir.create("mtGenes_Fastas")
dir.create("mtGenes_Aligned")
#Aligns each potential locus
for (i in 1:length(ref.data)){
##############
#STEP 1: Gets the locus data from each species
##############
#Gets all species data
final.gene<-DNAStringSet()
for (j in 1:length(species.names)){
#Looks for this gene in the species data
spp.data<-scanFa(FaFile(paste("Species_Loci/", species.names[j], sep = ""))) # loads up fasta file
spp.gene<-spp.data[names(spp.data) == names(ref.data)[i]]
#Skips if none
if (length(spp.gene) == 0){ next }
#Renames
names(spp.gene)<-gsub("_mito_genes.fa", "", species.names[j])
final.gene<-append(final.gene, spp.gene)
}#end j loop
##############
#STEP 2: Sets up for alignment
##############
#Checks for a minimum length
final.gene<-final.gene[width(final.gene) >= width(ref.data)[i]*as.numeric(min.prop)]
#Checks for minimum taxa number
if (length(names(final.gene)) <= min.taxa){
print(paste(names(ref.data)[i], " had too few taxa", sep = ""))
next
}
#Adds reference locus
final.gene<-append(final.gene, ref.data[i])
names(final.gene)[length(final.gene)]<-"Reference"
final.loci<-as.list(as.character(final.gene))
#Saves to folder to run with mafft
write.fasta(sequences = final.loci, names = names(final.loci),
paste("mtGenes_Fastas/", names(ref.data)[i], ".fa", sep = ""), nbchar = 1000000, as.string = T)
##############
#STEP 3: Runs MAFFT to align
##############
mafft.cmd<-"mafft"
if (names(ref.data)[i] == "12S_rRNA" || names(ref.data)[i] == "16S_rRNA"){
if (secondary.structure == TRUE){ mafft.cmd<-"mafft-qinsi" } else { mafft.cmd<-"mafft" }
}
#Runs the mafft command
system(paste(mafft.cmd, " --localpair --maxiterate 1000 --adjustdirection --quiet --op 3 --ep 0.123",
" --thread ", threads, " ", "mtGenes_Fastas/", names(ref.data)[i], ".fa",
" > ", "mtGenes_Fastas/", names(ref.data)[i], "_align.fa", sep = ""))
alignment<-scanFa(FaFile(paste("mtGenes_Fastas/", names(ref.data)[i], "_align.fa", sep = ""))) # loads up fasta file
#Reverses alignment back to correction orientation
reversed<-names(alignment)[grep(pattern = "_R_", names(alignment))]
if (length(reversed[grep(pattern = "Reference", reversed)]) == 1){ alignment<-reverseComplement(alignment) }
#Renames sequences to get rid of _R_
names(alignment)<-gsub(pattern = "_R_", replacement = "", x = names(alignment))
new.align<-strsplit(as.character(alignment), "")
mat.align<-lapply(new.align, tolower)
m.align<-as.matrix(as.DNAbin(mat.align))
#Filters out weirdly divergent sequences
diff<-pairwise.inf.sites(as.character(m.align), "Reference")
bad.seqs<-names(diff)[which(diff >= 0.45)]
rem.align<-alignment[!names(alignment) %in% bad.seqs]
# Moves onto next loop in there are no good sequences
if (length(rem.align) <= as.numeric(min.taxa)){
#Deletes old files
system(paste("rm ", "mtGenes_Fastas/", names(ref.data)[i], "_align.fa", sep = ""))
print(paste(names(ref.data)[i], " had too few taxa", sep = ""))
next }
### realign if bad seqs removed
if (length(bad.seqs) != 0 && width(ref.data)[i] >= 200){
#Aligns using mafft
print(paste(names(ref.data)[i], " was realigned", sep = ""))
#Saves to folder to run with mafft
final.loci<-as.list(as.character(rem.align))
#Saves to folder to run with mafft
write.fasta(sequences = final.loci, names = names(final.loci),
paste("mtGenes_Fastas/", names(ref.data)[i], ".fa", sep = ""), nbchar = 1000000, as.string = T)
system(paste(mafft.cmd, " --localpair --maxiterate 1000 --adjustdirection --quiet --op 3 --ep 0.123",
" --thread ", threads, " ", "mtGenes_Fastas/", names(ref.data)[i], ".fa",
" > ", "mtGenes_Fastas/", names(ref.data)[i], "_align.fa", sep = ""))
alignment<-scanFa(FaFile(paste(work.dir, "/", out.dir, "/mtGenes_Fastas/", names(ref.data)[i], "_align.fa", sep = ""))) # loads up fasta file
#Reverses alignment back to correction orientation
reversed<-names(alignment)[grep(pattern = "_R_", names(alignment))]
if (length(reversed[grep(pattern = "Reference", reversed)]) == 1){ alignment<-reverseComplement(alignment) }
#Renames sequences to get rid of _R_
names(alignment)<-gsub(pattern = "_R_", replacement = "", x = names(alignment))
} # end bad.seqs if
#Removes the edge gaps
ref.aligned<-as.character(alignment['Reference'])
not.gaps<-str_locate_all(ref.aligned, pattern = "[^-]")[[1]][,1]
ref.start<-min(not.gaps)
ref.finish<-max(not.gaps)
trim.align<-subseq(alignment, ref.start, ref.finish)
#readies for saving
write.temp<-strsplit(as.character(trim.align), "")
aligned.set<-as.matrix(as.DNAbin(write.temp) )
write.phy(aligned.set, file=paste("mtGenes_Aligned/", names(ref.data)[i], ".phy", sep = ""), interleave = F)
}#end i loop
#################################################################
#Step 4: Create alignments and partition by codon
#################################################################
#Create directory and loci to trim
dir.create("mtGenes_Trimmed")
locus.names<-list.files("mtGenes_Aligned/.")
#So it doesn't trim the cds
if (trim.cds == FALSE){ no.trim<-locus.names[grep("CDS", locus.names)] }#end if
#Loops through each locus and does operations on them
for (i in 1:length(locus.names)){
##############
#STEP 1: Basic steps
##############
#Reads in files
align<-readAAMultipleAlignment(file = paste("mtGenes_Aligned/", locus.names[i], sep =""), format = "phylip")
#Use taxa remove
tax.names<-rownames(align)
tax.names<-tax.names[!tax.names %in% "Reference"]
new.align<-strsplit(as.character(align), "")
mat.align<-lapply(new.align, tolower)
m.align<-as.matrix(as.DNAbin(mat.align))
t.align<-m.align[rownames(m.align) %in% tax.names,]
save.rownames<-rownames(t.align)
#removes too short loci
if (ncol(align) <= as.numeric(min.len)){
write.phy(t.align, file= paste("mtGenes_Trimmed/", locus.names[i], sep = ""), interleave = F)
next
}
#So it doesn't trim the cds
if (length(grep(locus.names[i], no.trim)) != 0) {
write.phy(t.align, file= paste("mtGenes_Trimmed/", locus.names[i], sep = ""), interleave = F)
next
}#end if
#Trims the intron data
t.loci<-as.character(as.list(t.align))
w.loci<-lapply(t.loci, toupper)
write.align<-lapply(w.loci, c2s)
input.file<-paste("mtGenes_Trimmed/", gsub(pattern = "\\..*", "", locus.names[i]), ".fa", sep = "")
write.fasta(sequences = write.align, names = names(write.align),
input.file, nbchar = 1000000, as.string = T)
##############
#STEP 2: GBLOCKS
##############
if (gblocks == TRUE){
system(paste("Gblocks ", input.file, " -t=d -b1=50 -b2=50 -b5=h ", sep = ""))
system(paste("rm ", input.file, " ", input.file, "-gb.htm", sep = ""))
system(paste("mv ", input.file, "-gb ", input.file, sep = ""))
}
##############
#STEP 3: TrimAI
##############
if (trimal == TRUE){
#system(paste("trimal -in ", input.file, " -out ", input.file, "-tm ",
# "-gt 0.75 -st 0.001 -cons 60 -resoverlap 0.75 -seqoverlap 50 -automated1", sep = ""))
system(paste("trimal -in ", input.file, " -out ", input.file, "-tm -automated1", sep = ""))
system(paste("rm ", input.file, sep = ""))
system(paste("mv ", input.file, "-tm ", input.file, sep = ""))
}
##############
#STEP 4: Save as .phy file
##############
locus.save.name<-gsub(pattern = ".fa", replacement = ".phy", x = input.file)
alignment<-scanFa(FaFile(input.file)) # loads up fasta file
temp<-names(alignment)[is.na(names(alignment)) == T]
if (length(temp) > 0){ break }
new.names<-c()
for (j in 1:length(names(alignment))){
new.names[j]<-save.rownames[grep(pattern = names(alignment)[j], x = save.rownames)]
}
names(alignment)<-new.names
#removes loci with too few taxa
if (length(names(alignment)) <= as.numeric(min.taxa)){
system(paste("rm ", input.file, sep = ""))
print(paste(input.file, "deleted. Too few taxa after trimming."))
write.phy(t.align, file= paste("mtGenes_Trimmed/", locus.names[i], sep = ""), interleave = F)
next
}
write.temp<-strsplit(as.character(alignment), "")
aligned.set<-as.matrix(as.DNAbin(write.temp) )
#readies for saving
write.phy(aligned.set, file= locus.save.name, interleave = F)
system(paste("rm ", input.file, sep = ""))
}
# END SCRIPT
| /3-Post-Processing/02_mtgenome_assembly.R | no_license | chutter/murinae-seq | R | false | false | 26,770 | r | ###### This script deals with transcriptomes and also getting the probe set rename
library(ape)
library(seqinr)
library(stringr)
library(data.table)
library(GenomicRanges)
library(Biostrings)
library(Rsamtools)
#Options
options(stringsAsFactors = FALSE)
#options(warn=2) #for debugging warnings in loops
##########################################################################################################
#Step 1: Settings for everything
##########################################################################################################
#Directory settings
work.dir<-"/Volumes/Armored/Mantellidae_All" #The directory that contains your processed samples
out.dir<-"mtGenomes" #output directory
#Reference files. Place in working directory (work.dir)
reference<-"reference.fa" #Name of the reference. Includes many frog mtGenomes.
gene.file<-"mtGenes.fa" #Name of the reference gene files. Default is from N. parkeri. Can replace with closer taxa.
#Running setups
threads = 8 #Number of threads
mem = "80" #GB of ram
min.id = "0.7" #Initial value for matching raw reads to reference. Probably should leave alone, for problem taxa.
resume = TRUE #Skips over samples already done
#Alignment settings
secondary.structure = FALSE #Runs mafft-qinsi on mt regions that have secondary structure. Takes structure into acct.
min.taxa = 4 #min number to keep an alignment
min.prop = "0.25" #min number of coverage per individual. e.g. for a 100bp gene, needs 25 bp to keep.
min.len = "100" #min length for trimming. Set to this value as you don't usually want to trim t-RNAs
gblocks = FALSE #If you want to use.
trimal = TRUE
###############################################################################
######## DO NOT EDIT BELOW THIS POINT ########################################
###############################################################################
###############################################################################
###################### FUNCTIONS #########################
###############################################################################
###############################################################################
pairwise.inf.sites<-function(x, y) {
#Sets up data, puts ref seq first
new.align<-x
new.align[new.align == "n"]<-"-"
new.align[is.na(new.align) == T]<-"-"
ref<-new.align[rownames(new.align) == y,]
summary.data<-c()
all.pars<-c()
all.over<-c()
for (z in 1:nrow(new.align)) {
#Site counter
pars<-0
overlap<-0
tar<-new.align[z,]
combined<-matrix(NA_character_, ncol = max(length(ref), length(tar)), nrow =2)
combined[1,]<-ref
combined[2,]<-tar
for (k in 1:ncol(combined)) {
#Pulls out column of data
seq.col<-vector("character", length = nrow(combined))
seq.col<-combined[,k]
#not equal to -
f.char<-seq.col[seq.col != '-']
#don't count missing seq
if (length(f.char) <= 1) { next }
if (length(f.char) >= 2){
overlap<-overlap+1
if (f.char[1] != f.char [2]) { pars<-pars+1 }
}#end if
}#ends informative sites loop
all.pars<-append(all.pars, pars)
all.over<-append(all.over, overlap)
}# ends seq loop
#Summarizes and returns data
summary.data<-all.pars/all.over
summary.data[is.nan(summary.data)]<-0
names(summary.data)<-rownames(new.align)
return(summary.data)
}
write.phy<-function (x, file = "", interleave = FALSE, strict = FALSE){
str2cha <- function(x) {
unlist(strsplit(x, ""))
}
datatype <- ifelse(is.numeric(x[1, 1]), "continuous", "nc")
ntax <- nrow(x)
nchar <- ncol(x)
taxnames <- rownames(x)
if (strict) {
taxnames <- substring(taxnames, 1, truncate)
missing <- 10 - unlist(lapply(strsplit(taxnames, ""),
length))
for (i in seq(along = taxnames)) taxnames[i] <- paste(taxnames[i],
paste(rep("*", missing[i]), collapse = ""), sep = "")
if (any(duplicated(taxnames)))
cat("WARNING: Truncation of taxon names created",
"identical strings.")
}
else {
xx <- nchar(taxnames)
diff <- max(xx) - xx + 3
for (i in 1:ntax) taxnames[i] <- paste(taxnames[i], paste(rep(" ",
diff[i]), collapse = ""), sep = "")
}
if (!interleave)
interleave <- nchar
nbpart <- ceiling(nchar/interleave)
pt <- matrix(nrow = nbpart, ncol = 2)
pt[1, ] <- c(1, interleave)
if (nbpart > 1)
for (i in 2:(dim(pt)[1])) {
pt[i, ] <- c(pt[i - 1, 2] + 1, pt[i - 1, 2] + interleave)
pt[nbpart, 2] <- nchar
}
phy <- paste(ntax, nchar)
for (i in seq(along = pt[, 1])) {
sm <- as.character(x[, pt[i, 1]:pt[i, 2]])
if (is.null(dim(sm)))
sm <- as.matrix(sm, ncol = 1)
sm <- apply(sm, 1, paste, collapse = "")
if (i == 1)
sm <- paste(taxnames, sm)
if (i < max(seq(along = pt[, 1])))
sm <- c(sm, "")
phy <- c(phy, sm)
}
if (file == "") {
cat(phy, sep = "\n")
}
else {
write(phy, file = file)
}
}
#################################################################
#Step 1: Gather read data and assemble mitochondrial genomes
#################################################################
#Creates cirecotires and stuff
dir.create(paste(work.dir, "/", out.dir, sep = ""))
dir.create(paste(work.dir, "/", out.dir, "/", "Species_mtGenomes", sep = ""))
#Sets up the reads
trim.cds<-FALSE #defaults to no trimming for coding sequence. Usually destroys mtGenes
setwd(paste(work.dir, "/", "Processed_Samples", sep = ""))
raw.dir<-"assembly-reads" #Directory of reads used for assembly. Shouldn't need to modify unless the assembly reads fucked
files<-list.files(path = ".", full.names = F, recursive = T)
reads<-files[grep(pattern = raw.dir, x = files)]
#Creates directories and copies files
system(paste("cp ../", reference, " ", work.dir, "/", out.dir, sep = ""))
system(paste("cp ../", gene.file, " ", work.dir, "/", out.dir, sep = ""))
setwd(paste(work.dir, "/", out.dir, sep = ""))
sample.names = list.dirs(paste0(work.dir, "/Processed_Samples"), recursive = F, full.names = F)
done.names = list.files(paste0(work.dir, "/mtGenomes/Species_mtGenomes"))
if (resume == TRUE){
done.names = list.files(paste0(work.dir, "/mtGenomes/Species_mtGenomes"))
samples = sample.names[!sample.names %in% done.names]
} else { samples = sample.names }
if (length(samples) != 0){
for (i in 1:length(samples)){
#Change to main directory
setwd(paste(work.dir, "/", out.dir, sep = ""))
#Gets reads together for this sample
min.id<-"0.7"
reference<-"reference.fa" #Name of the reference. Includes many frog mtGenomes.
sample.reads<-reads[grep(samples[i], reads)]
read1<-paste(work.dir, "/Processed_Samples/", sample.reads[grep("READ1", sample.reads)], sep = "")
read2<-paste(work.dir, "/Processed_Samples/", sample.reads[grep("READ2", sample.reads)], sep = "")
read3<-paste(work.dir, "/Processed_Samples/", sample.reads[grep("singleton", sample.reads)], sep = "")
#Pick out matching reads to mt Genomes
system(paste("bbmap.sh -Xmx8g ref=reference.fa", " in1=", read1, " in2=", read2, " vslow k=12 minid=",min.id,
" outm1=read1.fq outm2=read2.fq", sep = ""), ignore.stderr = T)
system(paste("bbmap.sh -Xmx8g ref=reference.fa", " in=", read3, " vslow k=12 minid=", min.id,
" outm=singleton.fq", sep = ""), ignore.stderr = T)
#Creates a bam alignment file of reads mapped to reference
# system(paste("bwa mem -t ", threads, " ref ", read1, " ", read2,
# " | samtools sort -n -@", threads, " -O BAM -o paired.bam -", sep = ""))
#Pulls out the pairs where single reads match to the ref
# system(paste("samtools view -bh -F 4 -f 8 paired.bam > out1.bam", sep = ""))
#system(paste("samtools view -bh -F 8 -f 4 paired.bam > out2.bam", sep = ""))
#Pulls out the pairs where both reads match to the ref
# system(paste("samtools view -bh -F 12 paired.bam > out3.bam", sep = ""))
#Merges and sorts bam file
#system(paste("samtools cat out1.bam out2.bam out3.bam | ",
# "samtools sort -n -@", threads, " -O BAM -o match_paired.bam -", sep = ""))
# system(paste("bedtools bamtofastq -i match_paired.bam -fq output_r1.fastq -fq2 output_r2.fastq", sep = ""))
system("touch current_seed.fasta")
new.len<-0
counter<-0
repeat.counter<-0
seeding<-T
while (seeding == T){
#Copy new reference to do recursively
counter<-counter+1
prev.len<-new.len
#skips the first one since its already done
if (counter >= 2){
#Pick out matching reads to mt Genomes
system(paste("bbmap.sh -Xmx8g ref=current_seed.fasta", " in1=", read1, " in2=", read2, " vslow k=12 minid=",min.id,
" outm1=t_read1.fq outm2=t_read2.fq", sep = ""), ignore.stderr = T)
system(paste("bbmap.sh -Xmx8g ref=current_seed.fasta", " in=", read3, " vslow k=12 minid=", min.id,
" outm=t_singleton.fq", sep = ""), ignore.stderr = T)
system(paste("cat t_read1.fq o_read1.fq >> read1.fq"))
system(paste("cat t_read2.fq o_read2.fq >> read2.fq"))
system(paste("cat t_singleton.fq o_singleton.fq >> singleton.fq"))
system("rm t_read1.fq t_read2.fq t_singleton.fq")
}#end counter if
#Run SPADES on sample
k<-c(9,13,21,33,55,77,99,127)
k.val<-paste(k, collapse = ",")
system(paste("spades.py --pe1-1 read1.fq --pe1-2 read2.fq --pe1-s singleton.fq",
" -o spades -k ",k.val," --careful -t ", threads, " -m ", mem, sep = ""), ignore.stdout = T)
#Checks to see if one kmer failed or not
while (file.exists("spades/contigs.fasta") == F){
#subtract Ks until it works
system("rm -r spades")
k<-k[-length(k)]
if (length(k) == 0) { break }
k.val<-paste(k, collapse = ",")
min.id<-"0.6"
system(paste("spades.py --pe1-1 read1.fq --pe1-2 read2.fq --pe1-s singleton.fq",
" -o spades -k ",k.val," --careful -t ", threads, " -m ", mem, sep = ""), ignore.stdout = T)
}#end while
#If the k-mers are all run out, therefore nothing can be assembled
if (length(k) == 0) {
paste("k-mer values all used up, cannot assemble!")
system("rm read1.fq read2.fq singleton.fq t_read1.fq t_read2.fq t_singleton.fq o_read1.fq o_read2.fq o_singleton.fq")
system("rm -r spades")
seeding = F
}# end if
if (counter == 1){
system(paste("mv read1.fq o_read1.fq"))
system(paste("mv read2.fq o_read2.fq"))
system(paste("mv singleton.fq o_singleton.fq"))
}
system("cp spades/contigs.fasta current_seed.fasta")
if (counter >= 2) { system("rm read1.fq read2.fq singleton.fq") }
system("rm -r spades")
reference<-"current_seed.fasta"
#Check size
temp.count<-scan(file = "current_seed.fasta", what = "character")
new.len<-sum(nchar(temp.count[-grep(">", temp.count)]))
no.contigs<-length(temp.count[grep(">", temp.count)])
print(paste("iteration ", counter, " complete!", sep = ""))
print(paste("new length: ", new.len, ". Old length: ", prev.len, sep = ""))
if (new.len == prev.len || counter == 20){
seeding<-F
system("rm o_read1.fq o_read2.fq o_singleton.fq")
print(paste("mitogenome complete after ", counter, " iterations!", sep = ""))
min.id<-"0.7"
} #end if
#If the file gets too large, its due to repeats
if (new.len >= 23000){
#runs cap3 to merge similar contigs (pull only clustered contigs out?)
system(paste("cap3 current_seed.fasta -z 1 -o 16 -e 11 -s 251", " > ",
"log.fasta.cap.txt", sep = ""))
#Reads in results files
temp.assembled<-scanFa(FaFile(paste("current_seed.fasta.cap.contigs", sep = "")))
temp.singlets<-scanFa(FaFile(paste("current_seed.fasta.cap.singlets", sep = "")))
keep.singlets<-temp.singlets[width(temp.singlets) >= 100]
final.save<-append(temp.assembled, keep.singlets)
#Writes contigs for cap3
write.loci<-as.list(as.character(final.save))
write.fasta(sequences = write.loci, names = names(write.loci),
"current_seed.fasta", nbchar = 1000000, as.string = T)
#Get cap3 files and deletes
cap.files<-list.files(pattern = "", full.names = F, recursive = F)
cap.remove<-cap.files[grep(pattern = paste("fasta.cap*.", sep =""), x = cap.files)]
system(paste("rm ", paste(cap.remove, collapse = " ") ))
min.id<-"0.95"
#makes sure this doesn't go on forever and ever
repeat.counter<-repeat.counter+1
if (repeat.counter >= 5){
print(paste("repeat counter hit 5"))
system("rm o_read1.fq o_read2.fq o_singleton.fq")
seeding<-F
}#end if
}#end length > 30,000 if
}#end while
#Save finsihed genome
contigs<-scanFa(FaFile("current_seed.fasta")) # loads up fasta file
#Skips if there are none
if (length(contigs) == 0){ next }
#Trys to merge contigs if there are more than 1
if (length(contigs) >= 2){
#runs cap3 to merge similar contigs (pull only clustered contigs out?)
system(paste("cap3 current_seed.fasta -z 1 -o 16 -e 11 -s 251", " > ",
"log.fasta.cap.txt", sep = ""))
#Reads in results files
temp.assembled<-scanFa(FaFile(paste("current_seed.fasta.cap.contigs", sep = "")))
temp.singlets<-scanFa(FaFile(paste("current_seed.fasta.cap.singlets", sep = "")))
keep.singlets<-temp.singlets[width(temp.singlets) >= 100]
contigs<-append(temp.assembled, keep.singlets)
#Get cap3 files and deletes
cap.files<-list.files(pattern = "", full.names = F, recursive = F)
cap.remove<-cap.files[grep(pattern = paste("fasta.cap*.", sep =""), x = cap.files)]
system(paste("rm ", paste(cap.remove, collapse = " ") ))
}#end if
if (sum(width(contigs)) <= 1000) {
print("less than 1000bp, not enough data to extract")
next
}
#Writes the full mitochondrial genome file
system("rm current_seed.fasta")
names(contigs)<-paste("sequence_", seq(1:length(contigs)), sep = "")
write.loci<-as.list(as.character(contigs))
write.fasta(sequences = write.loci, names = names(write.loci),
paste("Species_mtGenomes/", samples[i], ".fa", sep = ""), nbchar = 1000000, as.string = T)
}#end i loop
system("rm -r ref")
}#end if
#################################################################
#Step 2: Assess completeness of the mitochondrial genome and annotate
#################################################################
#PSLX headers
headers<-c("matches", "misMatches", "repMatches", "nCount", "qNumInsert", "qBaseInsert", "tNumInsert", "tBaseInsert", "strand", "qName",
"qSize", "qStart", "qEnd", "tName", "tSize", "tStart", "tEnd", "blockCount", "blockSize", "qStarts", "tStarts", "qSeq", "tSeq")
#Creates new directory and enters this working directory
setwd(paste(work.dir, "/", out.dir, sep = ""))
dir.create("Species_Loci")
spp.samples<-list.files("Species_mtGenomes/.")
spp.samples<-gsub(".fa$", "", spp.samples)
for (i in 1:length(spp.samples)){
#Load in the data
contigs<-scanFa(FaFile(paste("Species_mtGenomes/", spp.samples[i], ".fa", sep = ""))) # loads up fasta file
#Matches samples to loci
system(paste("mpirun pblat -threads=", threads, " Species_mtGenomes/", spp.samples[i], ".fa ",
gene.file, " -tileSize=8 -minIdentity=60",
" -noHead -out=pslx mt_to_genes.pslx", sep = ""), ignore.stdout = T)
#Need to load in transcriptome for each species and take the matching transcripts to the database
temp.count<-scan(file = "mt_to_genes.pslx", what = "character")
if (length(temp.count) == 0){
print("No matching mitochondrial genes were found.")
next
}
match.data<-fread("mt_to_genes.pslx", sep = "\t", header = F, stringsAsFactors = FALSE)
setnames(match.data, headers)
loci.names<-unique(match.data$qName)
sep.loci<-DNAStringSet()
for (j in 1:length(loci.names)){
#pulls out data that matches to multiple contigs
sub.data<-match.data[match.data$qName %in% loci.names[j],]
sub.data<-sub.data[sub.data$matches == max(sub.data$matches),][1]
if (sub.data$strand == "+"){
#Cuts the node apart and saves separately
sub.data$tStart<-sub.data$tStart-sub.data$qStart+1
#Fixes ends
sub.data$tEnd<-sub.data$tEnd+(sub.data$qSize-sub.data$qEnd)
} else {
sub.data$tStart<-sub.data$tStart-(sub.data$qSize-sub.data$qEnd)
#Fixes ends
sub.data$tEnd<-sub.data$tEnd+sub.data$qStart+1
}
#If it ends up with a negative start
if (sub.data$tStart <= 0){ sub.data$tStart<-1}
#Fixes if the contig is smaller than the full target locus
if (sub.data$tEnd >= sub.data$tSize) { sub.data$tEnd<-sub.data$tSize }
#Gets start and end
start.pos<-min(sub.data$tStart, sub.data$tEnd)
end.pos<-max(sub.data$tStart, sub.data$tEnd)
temp.contig<-contigs[names(contigs) == sub.data$tName]
new.seq<-subseq(x = temp.contig, start = start.pos, end = end.pos)
names(new.seq)<-sub.data$qName
sep.loci<-append(sep.loci, new.seq)
}#end j loop
#Writes the full mitochondrial genome file
write.loci<-as.list(as.character(sep.loci))
write.fasta(sequences = write.loci, names = names(write.loci),
paste("Species_Loci/", spp.samples[i], "_mito_genes.fa", sep = ""), nbchar = 1000000, as.string = T)
system("rm mt_to_genes.pslx")
}#end i loop
#################################################################
#Step 3: Create alignments
#################################################################
setwd(paste(work.dir, "/", out.dir, sep = ""))
#Sets up the loci to align
ref.data<-scanFa(FaFile(gene.file))
species.names<-list.files("Species_Loci/.", full.names = F)
species.names<-species.names[species.names != ""]
dir.create("mtGenes_Fastas")
dir.create("mtGenes_Aligned")
#Aligns each potential locus
for (i in 1:length(ref.data)){
##############
#STEP 1: Gets the locus data from each species
##############
#Gets all species data
final.gene<-DNAStringSet()
for (j in 1:length(species.names)){
#Looks for this gene in the species data
spp.data<-scanFa(FaFile(paste("Species_Loci/", species.names[j], sep = ""))) # loads up fasta file
spp.gene<-spp.data[names(spp.data) == names(ref.data)[i]]
#Skips if none
if (length(spp.gene) == 0){ next }
#Renames
names(spp.gene)<-gsub("_mito_genes.fa", "", species.names[j])
final.gene<-append(final.gene, spp.gene)
}#end j loop
##############
#STEP 2: Sets up for alignment
##############
#Checks for a minimum length
final.gene<-final.gene[width(final.gene) >= width(ref.data)[i]*as.numeric(min.prop)]
#Checks for minimum taxa number
if (length(names(final.gene)) <= min.taxa){
print(paste(names(ref.data)[i], " had too few taxa", sep = ""))
next
}
#Adds reference locus
final.gene<-append(final.gene, ref.data[i])
names(final.gene)[length(final.gene)]<-"Reference"
final.loci<-as.list(as.character(final.gene))
#Saves to folder to run with mafft
write.fasta(sequences = final.loci, names = names(final.loci),
paste("mtGenes_Fastas/", names(ref.data)[i], ".fa", sep = ""), nbchar = 1000000, as.string = T)
##############
#STEP 3: Runs MAFFT to align
##############
mafft.cmd<-"mafft"
if (names(ref.data)[i] == "12S_rRNA" || names(ref.data)[i] == "16S_rRNA"){
if (secondary.structure == TRUE){ mafft.cmd<-"mafft-qinsi" } else { mafft.cmd<-"mafft" }
}
#Runs the mafft command
system(paste(mafft.cmd, " --localpair --maxiterate 1000 --adjustdirection --quiet --op 3 --ep 0.123",
" --thread ", threads, " ", "mtGenes_Fastas/", names(ref.data)[i], ".fa",
" > ", "mtGenes_Fastas/", names(ref.data)[i], "_align.fa", sep = ""))
alignment<-scanFa(FaFile(paste("mtGenes_Fastas/", names(ref.data)[i], "_align.fa", sep = ""))) # loads up fasta file
#Reverses alignment back to correction orientation
reversed<-names(alignment)[grep(pattern = "_R_", names(alignment))]
if (length(reversed[grep(pattern = "Reference", reversed)]) == 1){ alignment<-reverseComplement(alignment) }
#Renames sequences to get rid of _R_
names(alignment)<-gsub(pattern = "_R_", replacement = "", x = names(alignment))
new.align<-strsplit(as.character(alignment), "")
mat.align<-lapply(new.align, tolower)
m.align<-as.matrix(as.DNAbin(mat.align))
#Filters out weirdly divergent sequences
diff<-pairwise.inf.sites(as.character(m.align), "Reference")
bad.seqs<-names(diff)[which(diff >= 0.45)]
rem.align<-alignment[!names(alignment) %in% bad.seqs]
# Moves onto next loop in there are no good sequences
if (length(rem.align) <= as.numeric(min.taxa)){
#Deletes old files
system(paste("rm ", "mtGenes_Fastas/", names(ref.data)[i], "_align.fa", sep = ""))
print(paste(names(ref.data)[i], " had too few taxa", sep = ""))
next }
### realign if bad seqs removed
if (length(bad.seqs) != 0 && width(ref.data)[i] >= 200){
#Aligns using mafft
print(paste(names(ref.data)[i], " was realigned", sep = ""))
#Saves to folder to run with mafft
final.loci<-as.list(as.character(rem.align))
#Saves to folder to run with mafft
write.fasta(sequences = final.loci, names = names(final.loci),
paste("mtGenes_Fastas/", names(ref.data)[i], ".fa", sep = ""), nbchar = 1000000, as.string = T)
system(paste(mafft.cmd, " --localpair --maxiterate 1000 --adjustdirection --quiet --op 3 --ep 0.123",
" --thread ", threads, " ", "mtGenes_Fastas/", names(ref.data)[i], ".fa",
" > ", "mtGenes_Fastas/", names(ref.data)[i], "_align.fa", sep = ""))
alignment<-scanFa(FaFile(paste(work.dir, "/", out.dir, "/mtGenes_Fastas/", names(ref.data)[i], "_align.fa", sep = ""))) # loads up fasta file
#Reverses alignment back to correction orientation
reversed<-names(alignment)[grep(pattern = "_R_", names(alignment))]
if (length(reversed[grep(pattern = "Reference", reversed)]) == 1){ alignment<-reverseComplement(alignment) }
#Renames sequences to get rid of _R_
names(alignment)<-gsub(pattern = "_R_", replacement = "", x = names(alignment))
} # end bad.seqs if
#Removes the edge gaps
ref.aligned<-as.character(alignment['Reference'])
not.gaps<-str_locate_all(ref.aligned, pattern = "[^-]")[[1]][,1]
ref.start<-min(not.gaps)
ref.finish<-max(not.gaps)
trim.align<-subseq(alignment, ref.start, ref.finish)
#readies for saving
write.temp<-strsplit(as.character(trim.align), "")
aligned.set<-as.matrix(as.DNAbin(write.temp) )
write.phy(aligned.set, file=paste("mtGenes_Aligned/", names(ref.data)[i], ".phy", sep = ""), interleave = F)
}#end i loop
#################################################################
#Step 4: Create alignments and partition by codon
#################################################################
#Create directory and loci to trim
dir.create("mtGenes_Trimmed")
locus.names<-list.files("mtGenes_Aligned/.")
#So it doesn't trim the cds
if (trim.cds == FALSE){ no.trim<-locus.names[grep("CDS", locus.names)] }#end if
#Loops through each locus and does operations on them
for (i in 1:length(locus.names)){
##############
#STEP 1: Basic steps
##############
#Reads in files
align<-readAAMultipleAlignment(file = paste("mtGenes_Aligned/", locus.names[i], sep =""), format = "phylip")
#Use taxa remove
tax.names<-rownames(align)
tax.names<-tax.names[!tax.names %in% "Reference"]
new.align<-strsplit(as.character(align), "")
mat.align<-lapply(new.align, tolower)
m.align<-as.matrix(as.DNAbin(mat.align))
t.align<-m.align[rownames(m.align) %in% tax.names,]
save.rownames<-rownames(t.align)
#removes too short loci
if (ncol(align) <= as.numeric(min.len)){
write.phy(t.align, file= paste("mtGenes_Trimmed/", locus.names[i], sep = ""), interleave = F)
next
}
#So it doesn't trim the cds
if (length(grep(locus.names[i], no.trim)) != 0) {
write.phy(t.align, file= paste("mtGenes_Trimmed/", locus.names[i], sep = ""), interleave = F)
next
}#end if
#Trims the intron data
t.loci<-as.character(as.list(t.align))
w.loci<-lapply(t.loci, toupper)
write.align<-lapply(w.loci, c2s)
input.file<-paste("mtGenes_Trimmed/", gsub(pattern = "\\..*", "", locus.names[i]), ".fa", sep = "")
write.fasta(sequences = write.align, names = names(write.align),
input.file, nbchar = 1000000, as.string = T)
##############
#STEP 2: GBLOCKS
##############
if (gblocks == TRUE){
system(paste("Gblocks ", input.file, " -t=d -b1=50 -b2=50 -b5=h ", sep = ""))
system(paste("rm ", input.file, " ", input.file, "-gb.htm", sep = ""))
system(paste("mv ", input.file, "-gb ", input.file, sep = ""))
}
##############
#STEP 3: TrimAI
##############
if (trimal == TRUE){
#system(paste("trimal -in ", input.file, " -out ", input.file, "-tm ",
# "-gt 0.75 -st 0.001 -cons 60 -resoverlap 0.75 -seqoverlap 50 -automated1", sep = ""))
system(paste("trimal -in ", input.file, " -out ", input.file, "-tm -automated1", sep = ""))
system(paste("rm ", input.file, sep = ""))
system(paste("mv ", input.file, "-tm ", input.file, sep = ""))
}
##############
#STEP 4: Save as .phy file
##############
locus.save.name<-gsub(pattern = ".fa", replacement = ".phy", x = input.file)
alignment<-scanFa(FaFile(input.file)) # loads up fasta file
temp<-names(alignment)[is.na(names(alignment)) == T]
if (length(temp) > 0){ break }
new.names<-c()
for (j in 1:length(names(alignment))){
new.names[j]<-save.rownames[grep(pattern = names(alignment)[j], x = save.rownames)]
}
names(alignment)<-new.names
#removes loci with too few taxa
if (length(names(alignment)) <= as.numeric(min.taxa)){
system(paste("rm ", input.file, sep = ""))
print(paste(input.file, "deleted. Too few taxa after trimming."))
write.phy(t.align, file= paste("mtGenes_Trimmed/", locus.names[i], sep = ""), interleave = F)
next
}
write.temp<-strsplit(as.character(alignment), "")
aligned.set<-as.matrix(as.DNAbin(write.temp) )
#readies for saving
write.phy(aligned.set, file= locus.save.name, interleave = F)
system(paste("rm ", input.file, sep = ""))
}
# END SCRIPT
|
alldata <- read.table('t1II_stim.txt', header=F)
names(alldata) <- c('cat', 'cat2', 'length', 'orient')
plot(alldata$length, alldata$orient) | /4-category/stim_test.r | no_license | nathanblanco/experiment-archive | R | false | false | 142 | r | alldata <- read.table('t1II_stim.txt', header=F)
names(alldata) <- c('cat', 'cat2', 'length', 'orient')
plot(alldata$length, alldata$orient) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RemoveHyphens.R
\docType{class}
\name{RemoveHyphens}
\alias{RemoveHyphens}
\title{RemoveHyphens}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
RemoveHyphens$new(x)$execute()
}
\arguments{
\item{x}{Object of to be processed. This can be an object of the Corpus, Document, character classes, or a list of character vectors.}
}
\description{
\code{RemoveHyphens} Removes hyphens from text.
}
\section{Methods}{
\describe{
\item{\code{new(...)}}{Instantiates the class object.}
\item{\code{execute()}}{Executes the process on the object.}
}
}
\section{TextStudio Family of Classes Overview}{
The TextStudio family of classes include four types of classes. The manager classes which
are responsible for executing one or more core class objects through the implementation
of a command class object, supported by support class objects.
}
\section{DataStudio Design}{
This DataStudio family of classes is an implementation of the Strategy
design pattern, as described in the book "Design Patterns: Elements of Reusable
Object-Oriented Software" by Erich Gamma, Richard Helm, Ralph Johnson
and John Vlissides (hence Gang of Four). The Strategy pattern defines
the family of text cleaning classes, encapsulates class and allows
text cleaning algorithms to be selected and exchagned at runtime.
}
\seealso{
Other TextStudio Classes: \code{\link{AddCommaSpaceCmd}},
\code{\link{AddCommaSpace}}, \code{\link{LowerCaseCmd}},
\code{\link{LowerCase}}, \code{\link{RemoveEmailCmd}},
\code{\link{RemoveEmail}},
\code{\link{RemoveHyphensCmd}},
\code{\link{RemoveNumbersCmd}},
\code{\link{RemoveNumbers}},
\code{\link{RemovePunctCmd}}, \code{\link{RemovePunct}},
\code{\link{RemoveSymbolsCmd}},
\code{\link{RemoveSymbols}},
\code{\link{RemoveTwitterCmd}},
\code{\link{RemoveTwitter}}, \code{\link{RemoveURLCmd}},
\code{\link{RemoveURL}},
\code{\link{RemoveWhiteSpaceCmd}},
\code{\link{RemoveWhiteSpace}}, \code{\link{RepairFile}},
\code{\link{ReplaceAbbreviationsCmd}},
\code{\link{ReplaceAbbreviations}},
\code{\link{ReplaceBacktickCmd}},
\code{\link{ReplaceBacktick}},
\code{\link{ReplaceContractionsCmd}},
\code{\link{ReplaceContractions}},
\code{\link{ReplaceCurlyQuotesCmd}},
\code{\link{ReplaceCurlyQuotes}},
\code{\link{ReplaceEmojiCmd}},
\code{\link{ReplaceEmoji}},
\code{\link{ReplaceEmoticonCmd}},
\code{\link{ReplaceEmoticon}},
\code{\link{ReplaceHTMLCmd}}, \code{\link{ReplaceHTML}},
\code{\link{ReplaceInternetSlangCmd}},
\code{\link{ReplaceInternetSlang}},
\code{\link{ReplaceKernCmd}}, \code{\link{ReplaceKern}},
\code{\link{ReplaceNonAsciiCmd}},
\code{\link{ReplaceNonAscii}},
\code{\link{ReplaceNumbersCmd}},
\code{\link{ReplaceNumbers}},
\code{\link{ReplaceOrdinalCmd}},
\code{\link{ReplaceOrdinal}},
\code{\link{ReplaceSymbolCmd}},
\code{\link{ReplaceSymbol}},
\code{\link{ReplaceTokensCmd}},
\code{\link{ReplaceTokens}},
\code{\link{ReplaceWordElongationCmd}},
\code{\link{ReplaceWordElongation}},
\code{\link{ReshapeCmd}}, \code{\link{Reshape}},
\code{\link{SplitCorpus}}, \code{\link{SplitDocument}},
\code{\link{StripTextCmd}}, \code{\link{StripText}}
}
\author{
John James, \email{jjames@dataScienceSalon.org}
}
\keyword{datasets}
| /man/RemoveHyphens.Rd | no_license | john-james-ai/NLPStudio-2.0 | R | false | true | 3,342 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RemoveHyphens.R
\docType{class}
\name{RemoveHyphens}
\alias{RemoveHyphens}
\title{RemoveHyphens}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
RemoveHyphens$new(x)$execute()
}
\arguments{
\item{x}{Object of to be processed. This can be an object of the Corpus, Document, character classes, or a list of character vectors.}
}
\description{
\code{RemoveHyphens} Removes hyphens from text.
}
\section{Methods}{
\describe{
\item{\code{new(...)}}{Instantiates the class object.}
\item{\code{execute()}}{Executes the process on the object.}
}
}
\section{TextStudio Family of Classes Overview}{
The TextStudio family of classes include four types of classes. The manager classes which
are responsible for executing one or more core class objects through the implementation
of a command class object, supported by support class objects.
}
\section{DataStudio Design}{
This DataStudio family of classes is an implementation of the Strategy
design pattern, as described in the book "Design Patterns: Elements of Reusable
Object-Oriented Software" by Erich Gamma, Richard Helm, Ralph Johnson
and John Vlissides (hence Gang of Four). The Strategy pattern defines
the family of text cleaning classes, encapsulates class and allows
text cleaning algorithms to be selected and exchagned at runtime.
}
\seealso{
Other TextStudio Classes: \code{\link{AddCommaSpaceCmd}},
\code{\link{AddCommaSpace}}, \code{\link{LowerCaseCmd}},
\code{\link{LowerCase}}, \code{\link{RemoveEmailCmd}},
\code{\link{RemoveEmail}},
\code{\link{RemoveHyphensCmd}},
\code{\link{RemoveNumbersCmd}},
\code{\link{RemoveNumbers}},
\code{\link{RemovePunctCmd}}, \code{\link{RemovePunct}},
\code{\link{RemoveSymbolsCmd}},
\code{\link{RemoveSymbols}},
\code{\link{RemoveTwitterCmd}},
\code{\link{RemoveTwitter}}, \code{\link{RemoveURLCmd}},
\code{\link{RemoveURL}},
\code{\link{RemoveWhiteSpaceCmd}},
\code{\link{RemoveWhiteSpace}}, \code{\link{RepairFile}},
\code{\link{ReplaceAbbreviationsCmd}},
\code{\link{ReplaceAbbreviations}},
\code{\link{ReplaceBacktickCmd}},
\code{\link{ReplaceBacktick}},
\code{\link{ReplaceContractionsCmd}},
\code{\link{ReplaceContractions}},
\code{\link{ReplaceCurlyQuotesCmd}},
\code{\link{ReplaceCurlyQuotes}},
\code{\link{ReplaceEmojiCmd}},
\code{\link{ReplaceEmoji}},
\code{\link{ReplaceEmoticonCmd}},
\code{\link{ReplaceEmoticon}},
\code{\link{ReplaceHTMLCmd}}, \code{\link{ReplaceHTML}},
\code{\link{ReplaceInternetSlangCmd}},
\code{\link{ReplaceInternetSlang}},
\code{\link{ReplaceKernCmd}}, \code{\link{ReplaceKern}},
\code{\link{ReplaceNonAsciiCmd}},
\code{\link{ReplaceNonAscii}},
\code{\link{ReplaceNumbersCmd}},
\code{\link{ReplaceNumbers}},
\code{\link{ReplaceOrdinalCmd}},
\code{\link{ReplaceOrdinal}},
\code{\link{ReplaceSymbolCmd}},
\code{\link{ReplaceSymbol}},
\code{\link{ReplaceTokensCmd}},
\code{\link{ReplaceTokens}},
\code{\link{ReplaceWordElongationCmd}},
\code{\link{ReplaceWordElongation}},
\code{\link{ReshapeCmd}}, \code{\link{Reshape}},
\code{\link{SplitCorpus}}, \code{\link{SplitDocument}},
\code{\link{StripTextCmd}}, \code{\link{StripText}}
}
\author{
John James, \email{jjames@dataScienceSalon.org}
}
\keyword{datasets}
|
## Coursera Assignment #2 - Caching the Inverse of a Matrix
## R Sangole
# This function accepts an argument of the datatype matrix, in 'mat'
# The function initially sets invr to NULL. This is so that when the cacheSolve
# function is first called, x$getinv() returns a NULL to 'inverse'. This allows
# usage of the if(!is.null(inverse)) formulation in that function to recall a cached
# value.
# Thereafter, 4 functions within the function are defined:
# set: Enables a manual setting of the matrix in 'mat' (Stored in the set function's parent environment
# i.e. makeCacheMatrix function's environment)
# get: Gets the matrix 'mat'
# setinv: Saves the inverse matrix passed to the func to 'invr' (Stored in the setinv function's parent
# environment, i.e. makeCacheMatrix function's environment)
# getinv: Retrieves the inverse matrix stored in invr
makeCacheMatrix <- function(mat = matrix()) {
invr <- NULL #invr set to NULL every first time makeCacheMatrix is run
set <- function(y){
mat <<- y #Sets 'mat' in makeCacheMatrix func's environment to y
invr <<- NULL #Sets 'invr' in makeCacheMatrix func's envir to NULL, since a new matrix is assigned to mat
}
get <- function(){
mat
}
setinv <- function(inv){
invr <<- inv #sets 'invr' in makeCacheMatrix func's envir to the inverse obtained thru argument 'inv'
}
getinv <- function(){
invr
}
list(set = set, get = get, #function list
setinv = setinv,
getinv = getinv)
}
# This function accepts a func list as an argument, in 'x'
# It asks for an inverse via getinv(), and stores in 'inverse'
# If 'inverse' is NOT null, the inverse has been calculated before, and gets returned
# If 'inverse' is null, it needs to be calculated and stored
# Inverse is calculated using solve() and gets stored in the makeCacheMatrix environment
# using setinv()
cacheSolve <- function(x, ...) {
inverse <- x$getinv() #get existing inverse, possibly NULL
if(!is.null(inverse)) { #if NOT NULL, run this...
message("Getting cached inverse. Not recalculating.")
return(inverse) #return exits the function completely
}
#if NULL, run this...
mat <- x$get() #get the matrix
inverse <- solve(mat, ...) #calculate the inverse
x$setinv(inverse) #store in makeCacheMatrix func's environment
inverse #return inverse
}
| /cachematrix.R | no_license | rsangole/ProgrammingAssignment2 | R | false | false | 2,327 | r | ## Coursera Assignment #2 - Caching the Inverse of a Matrix
## R Sangole
# This function accepts an argument of the datatype matrix, in 'mat'
# The function initially sets invr to NULL. This is so that when the cacheSolve
# function is first called, x$getinv() returns a NULL to 'inverse'. This allows
# usage of the if(!is.null(inverse)) formulation in that function to recall a cached
# value.
# Thereafter, 4 functions within the function are defined:
# set: Enables a manual setting of the matrix in 'mat' (Stored in the set function's parent environment
# i.e. makeCacheMatrix function's environment)
# get: Gets the matrix 'mat'
# setinv: Saves the inverse matrix passed to the func to 'invr' (Stored in the setinv function's parent
# environment, i.e. makeCacheMatrix function's environment)
# getinv: Retrieves the inverse matrix stored in invr
makeCacheMatrix <- function(mat = matrix()) {
invr <- NULL #invr set to NULL every first time makeCacheMatrix is run
set <- function(y){
mat <<- y #Sets 'mat' in makeCacheMatrix func's environment to y
invr <<- NULL #Sets 'invr' in makeCacheMatrix func's envir to NULL, since a new matrix is assigned to mat
}
get <- function(){
mat
}
setinv <- function(inv){
invr <<- inv #sets 'invr' in makeCacheMatrix func's envir to the inverse obtained thru argument 'inv'
}
getinv <- function(){
invr
}
list(set = set, get = get, #function list
setinv = setinv,
getinv = getinv)
}
# This function accepts a func list as an argument, in 'x'
# It asks for an inverse via getinv(), and stores in 'inverse'
# If 'inverse' is NOT null, the inverse has been calculated before, and gets returned
# If 'inverse' is null, it needs to be calculated and stored
# Inverse is calculated using solve() and gets stored in the makeCacheMatrix environment
# using setinv()
cacheSolve <- function(x, ...) {
inverse <- x$getinv() #get existing inverse, possibly NULL
if(!is.null(inverse)) { #if NOT NULL, run this...
message("Getting cached inverse. Not recalculating.")
return(inverse) #return exits the function completely
}
#if NULL, run this...
mat <- x$get() #get the matrix
inverse <- solve(mat, ...) #calculate the inverse
x$setinv(inverse) #store in makeCacheMatrix func's environment
inverse #return inverse
}
|
# Author: Angela Di Serio
# Course: Exploratory Data Analysis
# Date: July 2015
# Course Project 1
# Plot 3
# Script to visualize household energy usage over a 2-day period in February 2007
#-------------------------------------------------------------------------------------
# Step 1.
# This script requires the dplyr and lubridate packages
# dplyr and lubridate packages will be installed if necessary
list.of.packages <- c("dplyr","lubridate")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
require(dplyr)
require(lubridate)
# Step 2.
# read data from the Electric power consumption dataset from
# https://archive.ics.uci.edu/ml/datasets/Individual+household+electric+power+consumption
# Since we will only be using data from the dates 2007-02-01 and 2007-02-02,
# only the data from these two days will be read
# Note: missing values are coded as ?
message("Reading Electric Power Consumption Dataset")
flush.console()
if (!file.exists("./household_power_consumption.txt")) {
stop("File household_power_consumption.txt not found")
}
powCons<- read.table("household_power_consumption.txt",header=T,sep=";",na.strings="?") %>%
filter(dmy(Date)==dmy("01/02/2007") | dmy(Date)==dmy("02/02/2007"))
powCons$Date<-dmy(powCons$Date)
# Step 3.
# Plot of Sub metering
par(mfrow=c(1,1))
plot(strptime(paste(powCons$Date,powCons$Time),format="%Y-%m-%d %H:%M:%S"),powCons$Sub_metering_1,
type="l",xlab="",ylab="Energy sub metering",main="", bg="transparent", col="grey15")
lines(strptime(paste(powCons$Date,powCons$Time),format="%Y-%m-%d %H:%M:%S"),powCons$Sub_metering_2,
col="red",main="", bg="transparent")
lines(strptime(paste(powCons$Date,powCons$Time),format="%Y-%m-%d %H:%M:%S"),powCons$Sub_metering_3,
col="blue",main="", bg="transparent")
legend("topright", lty=1, col = c("grey15", "red","blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),cex=0.75)
# Step 4. Save the plot to a PNG file (480 x 480 px)
dev.copy(png, file = "plot3.png",width=480,height=480)
dev.off()
| /plot3.R | no_license | adiserio/ExData_Plotting1 | R | false | false | 2,147 | r | # Author: Angela Di Serio
# Course: Exploratory Data Analysis
# Date: July 2015
# Course Project 1
# Plot 3
# Script to visualize household energy usage over a 2-day period in February 2007
#-------------------------------------------------------------------------------------
# Step 1.
# This script requires the dplyr and lubridate packages
# dplyr and lubridate packages will be installed if necessary
list.of.packages <- c("dplyr","lubridate")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
require(dplyr)
require(lubridate)
# Step 2.
# read data from the Electric power consumption dataset from
# https://archive.ics.uci.edu/ml/datasets/Individual+household+electric+power+consumption
# Since we will only be using data from the dates 2007-02-01 and 2007-02-02,
# only the data from these two days will be read
# Note: missing values are coded as ?
message("Reading Electric Power Consumption Dataset")
flush.console()
if (!file.exists("./household_power_consumption.txt")) {
stop("File household_power_consumption.txt not found")
}
powCons<- read.table("household_power_consumption.txt",header=T,sep=";",na.strings="?") %>%
filter(dmy(Date)==dmy("01/02/2007") | dmy(Date)==dmy("02/02/2007"))
powCons$Date<-dmy(powCons$Date)
# Step 3.
# Plot of Sub metering
par(mfrow=c(1,1))
plot(strptime(paste(powCons$Date,powCons$Time),format="%Y-%m-%d %H:%M:%S"),powCons$Sub_metering_1,
type="l",xlab="",ylab="Energy sub metering",main="", bg="transparent", col="grey15")
lines(strptime(paste(powCons$Date,powCons$Time),format="%Y-%m-%d %H:%M:%S"),powCons$Sub_metering_2,
col="red",main="", bg="transparent")
lines(strptime(paste(powCons$Date,powCons$Time),format="%Y-%m-%d %H:%M:%S"),powCons$Sub_metering_3,
col="blue",main="", bg="transparent")
legend("topright", lty=1, col = c("grey15", "red","blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),cex=0.75)
# Step 4. Save the plot to a PNG file (480 x 480 px)
dev.copy(png, file = "plot3.png",width=480,height=480)
dev.off()
|
# (1=gill, 2=heart, 3=liver, 4=spleen, 5=kidney)
params<- c("psi_z","psi_organ","p","sifAB","sifBC","sifAC","sifAD","sifBD","sifCD")
inits<- function(t){
list(theta=runif(15),eta=runif(20), beta=runif(20)
)}
# BUNDLE DATA UP
combos<-as.matrix(expand.grid(c(0,1),c(0,1),c(0,1),c(0,1)))
dat<- list(yyy=analysis_dat,nfish=26,combos=combos)
out <- bugs(data=dat,inits=inits,parameters=params,model = filename,
n.chains = 3,n.iter = 500000,n.burnin = 100000, debug=TRUE,n.thin=20,
bugs.directory="C:/Documents and Settings/mcolvin/My Documents/WinBUGS14 - 1",
working.directory=getwd())
out <- bugs(data=dat,inits=inits,parameters=params,model = filename,
n.chains = 3,n.iter = 5000,n.burnin = 1000, debug=TRUE, bugs.directory="C:/Users/colvinm/Documents/WinBUGS14 - 1",
working.directory=getwd())
out$mean
save(file="./output/ms_mcmc.Rdata", out)
### DIAGNOSTICS AND GOF
out_gof <- as.mcmc.list(out)
plot(out_gof, ask=T)
summary(out_gof) # Notice that rss and rss.sim have similar posteriors
# indicating good fit
##### Calculate a Bayesian P-value (aka Posterior predictive check)
gofMat <- as.matrix(out_gof)
PrGreater <- sum(gofMat[,"rss"] > gofMat[,"rss_sim"]) / nrow(gofMat)
# 0.48
Pr2tail <- 2 * min(PrGreater, 1 - PrGreater) # No evidence of lack of fit
# 0.96
#### Generic convergence diagnostics
gelman.diag(out_gof)
gelman.plot(out_gof)
geweke.diag(out_gof)
geweke.plot(out_gof)
crosscorr(out_gof)
crosscorr.plot(out_gof)
autocorr.diag(out_gof)
autocorr.plot(out_gof)
### END
| /analysis/src/7_analysis.R | no_license | mcolvin/ms_occ_model | R | false | false | 1,592 | r | # (1=gill, 2=heart, 3=liver, 4=spleen, 5=kidney)
params<- c("psi_z","psi_organ","p","sifAB","sifBC","sifAC","sifAD","sifBD","sifCD")
inits<- function(t){
list(theta=runif(15),eta=runif(20), beta=runif(20)
)}
# BUNDLE DATA UP
combos<-as.matrix(expand.grid(c(0,1),c(0,1),c(0,1),c(0,1)))
dat<- list(yyy=analysis_dat,nfish=26,combos=combos)
out <- bugs(data=dat,inits=inits,parameters=params,model = filename,
n.chains = 3,n.iter = 500000,n.burnin = 100000, debug=TRUE,n.thin=20,
bugs.directory="C:/Documents and Settings/mcolvin/My Documents/WinBUGS14 - 1",
working.directory=getwd())
out <- bugs(data=dat,inits=inits,parameters=params,model = filename,
n.chains = 3,n.iter = 5000,n.burnin = 1000, debug=TRUE, bugs.directory="C:/Users/colvinm/Documents/WinBUGS14 - 1",
working.directory=getwd())
out$mean
save(file="./output/ms_mcmc.Rdata", out)
### DIAGNOSTICS AND GOF
out_gof <- as.mcmc.list(out)
plot(out_gof, ask=T)
summary(out_gof) # Notice that rss and rss.sim have similar posteriors
# indicating good fit
##### Calculate a Bayesian P-value (aka Posterior predictive check)
gofMat <- as.matrix(out_gof)
PrGreater <- sum(gofMat[,"rss"] > gofMat[,"rss_sim"]) / nrow(gofMat)
# 0.48
Pr2tail <- 2 * min(PrGreater, 1 - PrGreater) # No evidence of lack of fit
# 0.96
#### Generic convergence diagnostics
gelman.diag(out_gof)
gelman.plot(out_gof)
geweke.diag(out_gof)
geweke.plot(out_gof)
crosscorr(out_gof)
crosscorr.plot(out_gof)
autocorr.diag(out_gof)
autocorr.plot(out_gof)
### END
|
rm(list=ls())
library(data.table)
library(reshape2)
library(ggplot2)
library(gridExtra)
library(ggExtra)
library(ggpubr)
library(stringr)
library(stats)
library(zoo)
library(grid)
library(lme4)
set.seed(1)
g_legend <- function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)}
plot_comp_diff <- function(doc_df,o,fn){
Ad <- doc_df[Carbon_Source_1 == 'Glucose' & Same_Environment == TRUE & Overlap>o,]
Bd <- doc_df[Carbon_Source_1 == 'Citrate' &Same_Environment == TRUE & Overlap>o,]
Cd <- doc_df[Carbon_Source_2 == 'Citrate'& Carbon_Source_1 =='Glucose' & Overlap>o,]
A = rbind(Ad,Bd,Cd)
A$Treatment = 'NA'
A[Same_Environment == TRUE & Carbon_Source_1 == 'Glucose',]$Treatment = 'A'
A[Same_Environment == TRUE & Carbon_Source_1 == 'Citrate',]$Treatment = 'B'
A[Same_Environment == FALSE,]$Treatment ='C'
#Replace P Values by bootstrapped data
tests = fread(fn)
tests = tests[Threshold ==o & !is.na(tests$t)]
pvals = data.frame()
for(x in unique(tests$Comparison)){
t = mean(tests[Comparison==x]$t)
if(t <0){
pvals = rbind(pvals,data.frame(Comparison = x,p = sum(tests[Comparison == x]$t>0)/nrow(tests[Comparison == x])))}
else{
pvals = rbind(pvals,data.frame(Comparison = x,p = sum(tests[Comparison == x]$t<0)/nrow(tests[Comparison == x])))
}
}
n = length(unique(tests$Run))
pvals$adj = paste('p < ', ceiling(1000/n)/1000)
pvals[pvals$p !=0,]$adj = as.character(floor(pvals[pvals$p !=0,]$p*1000)/1000)
pvals[pvals$p!=0]
pvals$group1 = c('A','B','A')
pvals$group2 = c('B','C','C')
p1 <- ggboxplot(A,x='Treatment',y='Dissimilarity',col='Treatment',palette = "jco",
add = "jitter",legend='right') +
# stat_compare_means(comparisons = mycomparisons,method='t.test',size=2) + scale_y_continuous(breaks=c(0,0.8)) +
guides(col=FALSE) + labs(x = '') +
stat_pvalue_manual(pvals,label='adj',y.position = c(0.8,0.85,0.9),size=3) +
theme(legend.position = '' ,
axis.line = element_line(size=1),
axis.text = element_text(size=8)) +
scale_x_discrete(labels=c(expression('Glc-Glc'),expression('Cit-Cit'),expression('Glc-Cit'))) +
ggtitle(paste('t =', o))
return(p1)
}
doc_df = fread('../Data/Dissimilarity_Overlap.csv')
doc_df = doc_df[Dissimilarity!=0,]
p1<- plot_comp_diff(doc_df,0.9,fn= '../Stat_Outputs/TTest_CSource.csv')
p2<- plot_comp_diff(doc_df,0.91,fn= '../Stat_Outputs/TTest_CSource.csv')
p3<- plot_comp_diff(doc_df,0.92,fn= '../Stat_Outputs/TTest_CSource.csv')
p4<- plot_comp_diff(doc_df,0.93,fn= '../Stat_Outputs/TTest_CSource.csv')
p5<- plot_comp_diff(doc_df,0.94,fn= '../Stat_Outputs/TTest_CSource.csv')
p6<- plot_comp_diff(doc_df,0.95,fn= '../Stat_Outputs/TTest_CSource.csv')
p7<- plot_comp_diff(doc_df,0.96,fn= '../Stat_Outputs/TTest_CSource.csv')
p8<- plot_comp_diff(doc_df,0.97,fn= '../Stat_Outputs/TTest_CSource.csv')
p9<- plot_comp_diff(doc_df,0.98,fn= '../Stat_Outputs/TTest_CSource.csv')
p10<- plot_comp_diff(doc_df,0.99,fn= '../Stat_Outputs/TTest_CSource.csv')
p11<- plot_comp_diff(doc_df,0.9925,fn= '../Stat_Outputs/TTest_CSource.csv')
p12<- plot_comp_diff(doc_df,0.995,fn= '../Stat_Outputs/TTest_CSource.csv')
# p = grid.arrange(pA,pB,layout_matrix=rbind(c(1,1,1,1,1,2,2)))
ggsave('../Final_Figures/Supp5.png',grid.arrange(p1,p2,p3,p4,p5,p6,p7,p8,p9,p10,p11,p12,ncol=3),height=12,width=9)
| /Figure_Generating_Scripts/Supp5.R | permissive | jccvila/VilaLiuSanchez2020 | R | false | false | 3,480 | r | rm(list=ls())
library(data.table)
library(reshape2)
library(ggplot2)
library(gridExtra)
library(ggExtra)
library(ggpubr)
library(stringr)
library(stats)
library(zoo)
library(grid)
library(lme4)
set.seed(1)
g_legend <- function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)}
plot_comp_diff <- function(doc_df,o,fn){
Ad <- doc_df[Carbon_Source_1 == 'Glucose' & Same_Environment == TRUE & Overlap>o,]
Bd <- doc_df[Carbon_Source_1 == 'Citrate' &Same_Environment == TRUE & Overlap>o,]
Cd <- doc_df[Carbon_Source_2 == 'Citrate'& Carbon_Source_1 =='Glucose' & Overlap>o,]
A = rbind(Ad,Bd,Cd)
A$Treatment = 'NA'
A[Same_Environment == TRUE & Carbon_Source_1 == 'Glucose',]$Treatment = 'A'
A[Same_Environment == TRUE & Carbon_Source_1 == 'Citrate',]$Treatment = 'B'
A[Same_Environment == FALSE,]$Treatment ='C'
#Replace P Values by bootstrapped data
tests = fread(fn)
tests = tests[Threshold ==o & !is.na(tests$t)]
pvals = data.frame()
for(x in unique(tests$Comparison)){
t = mean(tests[Comparison==x]$t)
if(t <0){
pvals = rbind(pvals,data.frame(Comparison = x,p = sum(tests[Comparison == x]$t>0)/nrow(tests[Comparison == x])))}
else{
pvals = rbind(pvals,data.frame(Comparison = x,p = sum(tests[Comparison == x]$t<0)/nrow(tests[Comparison == x])))
}
}
n = length(unique(tests$Run))
pvals$adj = paste('p < ', ceiling(1000/n)/1000)
pvals[pvals$p !=0,]$adj = as.character(floor(pvals[pvals$p !=0,]$p*1000)/1000)
pvals[pvals$p!=0]
pvals$group1 = c('A','B','A')
pvals$group2 = c('B','C','C')
p1 <- ggboxplot(A,x='Treatment',y='Dissimilarity',col='Treatment',palette = "jco",
add = "jitter",legend='right') +
# stat_compare_means(comparisons = mycomparisons,method='t.test',size=2) + scale_y_continuous(breaks=c(0,0.8)) +
guides(col=FALSE) + labs(x = '') +
stat_pvalue_manual(pvals,label='adj',y.position = c(0.8,0.85,0.9),size=3) +
theme(legend.position = '' ,
axis.line = element_line(size=1),
axis.text = element_text(size=8)) +
scale_x_discrete(labels=c(expression('Glc-Glc'),expression('Cit-Cit'),expression('Glc-Cit'))) +
ggtitle(paste('t =', o))
return(p1)
}
doc_df = fread('../Data/Dissimilarity_Overlap.csv')
doc_df = doc_df[Dissimilarity!=0,]
p1<- plot_comp_diff(doc_df,0.9,fn= '../Stat_Outputs/TTest_CSource.csv')
p2<- plot_comp_diff(doc_df,0.91,fn= '../Stat_Outputs/TTest_CSource.csv')
p3<- plot_comp_diff(doc_df,0.92,fn= '../Stat_Outputs/TTest_CSource.csv')
p4<- plot_comp_diff(doc_df,0.93,fn= '../Stat_Outputs/TTest_CSource.csv')
p5<- plot_comp_diff(doc_df,0.94,fn= '../Stat_Outputs/TTest_CSource.csv')
p6<- plot_comp_diff(doc_df,0.95,fn= '../Stat_Outputs/TTest_CSource.csv')
p7<- plot_comp_diff(doc_df,0.96,fn= '../Stat_Outputs/TTest_CSource.csv')
p8<- plot_comp_diff(doc_df,0.97,fn= '../Stat_Outputs/TTest_CSource.csv')
p9<- plot_comp_diff(doc_df,0.98,fn= '../Stat_Outputs/TTest_CSource.csv')
p10<- plot_comp_diff(doc_df,0.99,fn= '../Stat_Outputs/TTest_CSource.csv')
p11<- plot_comp_diff(doc_df,0.9925,fn= '../Stat_Outputs/TTest_CSource.csv')
p12<- plot_comp_diff(doc_df,0.995,fn= '../Stat_Outputs/TTest_CSource.csv')
# p = grid.arrange(pA,pB,layout_matrix=rbind(c(1,1,1,1,1,2,2)))
ggsave('../Final_Figures/Supp5.png',grid.arrange(p1,p2,p3,p4,p5,p6,p7,p8,p9,p10,p11,p12,ncol=3),height=12,width=9)
|
function (s,b,t,case)
{
C=case
if (C == 1) {
S = rnorm(s)
}
else if(C == 2){
S = rpois(s,7)
}
else if(C == 3){
S = rchisq(s,2)
}
else if(C == 4){
S = rbeta(s,2,3)
}
else{
print("Case incorrect")
end
}
Var = c(1)
for(k in 0:t-1)
{
X = sample(S,b,replace = T)
Var[k] = var(X)
}
hist(Var)
} | /bootstrap.R | no_license | CityChicken/R-Scripts | R | false | false | 317 | r | function (s,b,t,case)
{
C=case
if (C == 1) {
S = rnorm(s)
}
else if(C == 2){
S = rpois(s,7)
}
else if(C == 3){
S = rchisq(s,2)
}
else if(C == 4){
S = rbeta(s,2,3)
}
else{
print("Case incorrect")
end
}
Var = c(1)
for(k in 0:t-1)
{
X = sample(S,b,replace = T)
Var[k] = var(X)
}
hist(Var)
} |
## ---------------------------
##
## Script name: ui.R
##
## Purpose of script: Specifies user interface for coronaRisk app
##
## Author: Ben Phillips
##
## Date Created: 2020-03-12
##
## Email: phillipsb@unimelb.edu.au
##
## ---------------------------
##
## Notes:
##
##
## --------------------------
## load up the packages we will need
library(shiny)
## ---------------------------
## load up our functions into memory
## source files
source("getData.R")
## ---------------------------
## ---------------------------
options(scipen=9)
# Define UI
shinyUI(fluidPage(
# Application title
titlePanel("Coronavirus 10-day forecast -- Australia"),
navbarPage(p("As of", format(dates[length(dates)], "%d %b")),
##### 10-day forecast #####
tabPanel("10-day forecast",
# Sidebar
sidebarLayout(
sidebarPanel(
titlePanel("Location"),
selectInput(inputId = "stateFinder",
label = "Select State:",
choices = ddReg,
selected = ddNames[1]),
h5("Raw case numbers:"),
tableOutput(outputId = "rawStats"),
h5("Active cases:"),
tableOutput(outputId = "tablePredConf"),
h5("Doubling time (days):"),
textOutput(outputId = "doubTime"),
hr(),
sliderInput(inputId = "fitWinSlider", min = 3, max = 10, value = 7, label = "Fit window:", post = "days"),
p("When growth rates are changing fast, reduce the fit window to average growth over more recent history."),
titlePanel("Detection"),
h5("Estimated proportion of cases detected:"),
textOutput(outputId = "detRate"),
h5("Possible true number of cases now given imperfect detection:"),
textOutput(outputId = "tablePredTrue"),
hr(),
p("Take this last number with a grain of salt; it is rough. But low detection indicates that there are many more deaths in the country than there should be given reported case numbers (so there must be more cases than are reported)."),
p("Active cases are total number of infections minus deaths and recoveries."),
p("For more information, see the 'About' tab.")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("rawPlot"),
plotOutput("logPlot")
)
)
),
# ##### Growth Rate #####
tabPanel("Growth rate and curve flattening",
# Sidebar
sidebarLayout(
sidebarPanel(
titlePanel("Location selector"),
selectInput(inputId = "stateGrowthRate",
label = "Select State:",
choices = ddReg,
selected = ddNames[c(1, 3, 8, 9)],
multiple = TRUE)
),
mainPanel(
h5("Growth rate"),
p("This is the growth rate of the number of active cases for the last 10 days. It can be thought of as the interest rate, compounded daily."),
p("Positive is bad, negative is good. Progress in control would be indicated by steady decline in growth rate over time, and holding in negative territory."),
p("Note, days with low or zero growth followed by large spikes are reporting issues: countries miss a day (or several) of reporting and then aggregate cases into the following day."),
plotOutput("growthRate"),
hr(),
h5("Curve flattening index"),
p("This is a measure of how well a country is flattening the epidemic curve at any point in time. Positive values mean growth rates are declining at that point in time."),
p("Note, this last plot covers the entire time period of the pandemic, not just the last ten days."),
plotOutput("cfi"),
p("For more information, see the 'About' tab.")
)
)
),
tabPanel("About", br(),
fluidRow(column(12,
withMathJax(),
includeMarkdown("doc/about.Rmd")
)))
)
))
| /ui.R | permissive | benflips/nCovForecastOz | R | false | false | 4,589 | r | ## ---------------------------
##
## Script name: ui.R
##
## Purpose of script: Specifies user interface for coronaRisk app
##
## Author: Ben Phillips
##
## Date Created: 2020-03-12
##
## Email: phillipsb@unimelb.edu.au
##
## ---------------------------
##
## Notes:
##
##
## --------------------------
## load up the packages we will need
library(shiny)
## ---------------------------
## load up our functions into memory
## source files
source("getData.R")
## ---------------------------
## ---------------------------
options(scipen=9)
# Define UI
shinyUI(fluidPage(
# Application title
titlePanel("Coronavirus 10-day forecast -- Australia"),
navbarPage(p("As of", format(dates[length(dates)], "%d %b")),
##### 10-day forecast #####
tabPanel("10-day forecast",
# Sidebar
sidebarLayout(
sidebarPanel(
titlePanel("Location"),
selectInput(inputId = "stateFinder",
label = "Select State:",
choices = ddReg,
selected = ddNames[1]),
h5("Raw case numbers:"),
tableOutput(outputId = "rawStats"),
h5("Active cases:"),
tableOutput(outputId = "tablePredConf"),
h5("Doubling time (days):"),
textOutput(outputId = "doubTime"),
hr(),
sliderInput(inputId = "fitWinSlider", min = 3, max = 10, value = 7, label = "Fit window:", post = "days"),
p("When growth rates are changing fast, reduce the fit window to average growth over more recent history."),
titlePanel("Detection"),
h5("Estimated proportion of cases detected:"),
textOutput(outputId = "detRate"),
h5("Possible true number of cases now given imperfect detection:"),
textOutput(outputId = "tablePredTrue"),
hr(),
p("Take this last number with a grain of salt; it is rough. But low detection indicates that there are many more deaths in the country than there should be given reported case numbers (so there must be more cases than are reported)."),
p("Active cases are total number of infections minus deaths and recoveries."),
p("For more information, see the 'About' tab.")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("rawPlot"),
plotOutput("logPlot")
)
)
),
# ##### Growth Rate #####
tabPanel("Growth rate and curve flattening",
# Sidebar
sidebarLayout(
sidebarPanel(
titlePanel("Location selector"),
selectInput(inputId = "stateGrowthRate",
label = "Select State:",
choices = ddReg,
selected = ddNames[c(1, 3, 8, 9)],
multiple = TRUE)
),
mainPanel(
h5("Growth rate"),
p("This is the growth rate of the number of active cases for the last 10 days. It can be thought of as the interest rate, compounded daily."),
p("Positive is bad, negative is good. Progress in control would be indicated by steady decline in growth rate over time, and holding in negative territory."),
p("Note, days with low or zero growth followed by large spikes are reporting issues: countries miss a day (or several) of reporting and then aggregate cases into the following day."),
plotOutput("growthRate"),
hr(),
h5("Curve flattening index"),
p("This is a measure of how well a country is flattening the epidemic curve at any point in time. Positive values mean growth rates are declining at that point in time."),
p("Note, this last plot covers the entire time period of the pandemic, not just the last ten days."),
plotOutput("cfi"),
p("For more information, see the 'About' tab.")
)
)
),
tabPanel("About", br(),
fluidRow(column(12,
withMathJax(),
includeMarkdown("doc/about.Rmd")
)))
)
))
|
#Put Necessary Libraries Here
knitr::opts_chunk$set(echo = TRUE)
library(tidyverse)
library(dplyr)
library(ggthemes)
library(plotly)
library(ggplot2)
library(tidyverse)
library(modelr)
library(broom)
library(glmnet)
library(kableExtra)
library(scales)
#Load cleaned data from Variable_Addition.R
nhl.clean=read.csv('nhl-game-data/cleaned_NHL.csv')
for(i in 1:nrow(nhl.clean)){
if(nhl.clean$team_id[i] == 52){
nhl.clean$team_id[i] = 31
} else if(nhl.clean$team_id[i] == 53){
nhl.clean$team_id[i] = 32
} else if(nhl.clean$team_id[i] == 54){
nhl.clean$team_id[i] = 33
}
}
for(i in 1:nrow(nhl.clean)){
if(nhl.clean$away_team_id[i] == 52){
nhl.clean$away_team_id[i] = 31
} else if(nhl.clean$away_team_id[i] == 53){
nhl.clean$away_team_id[i] = 32
} else if(nhl.clean$away_team_id[i] == 54){
nhl.clean$away_team_id[i] = 33
}
}
for(i in 1:nrow(nhl.clean)){
if(nhl.clean$home_team_id[i] == 52){
nhl.clean$home_team_id[i] = 31
} else if(nhl.clean$home_team_id[i] == 53){
nhl.clean$home_team_id[i] = 32
} else if(nhl.clean$home_team_id[i] == 54){
nhl.clean$home_team_id[i] = 33
}
if(nhl.clean$team_id[i] == nhl.clean$away_team_id[i]){
nhl.clean$other.id[i] = nhl.clean$home_team_id[i]
}else{
nhl.clean$other.id[i] = nhl.clean$away_team_id[i]
}
}
#Summarize Betting Variablity by Team
season.fgw=nhl.clean %>%
group_by(Season) %>%
summarize(rate=mean(first_goal_win)) %>%
mutate(Season=Season %/% 10000) %>%
cbind(Team="All") %>%
rename(`First Goal Wins`=rate)
season.fgw1 = season.fgw
for(i in unique(nhl.clean$team_id)){
data=filter(nhl.clean,team_id==i) %>%
select(Season,away_team_id,home_team_id, first_goal_win) %>%
group_by(Season) %>%
summarize(rate=mean(first_goal_win)) %>%
cbind(as.character(i)) %>%
mutate(Season=Season %/% 10000) %>%
rename(`First Goal Wins`=rate,Team="as.character(i)")
season.fgw=rbind(season.fgw,data)
}
#Summarize Betting Variable by H vs A
nhl.clean %>%
select(away_team_id,home_team_id,first_goal_win) %>%
group_by(away_team_id,home_team_id) %>%
summarize(rate=mean(first_goal_win)) %>%
spread(away_team_id,rate)
HA.tile = nhl.clean %>%
select(away_team_id,home_team_id,first_goal_win) %>%
group_by(away_team_id,home_team_id) %>%
summarize(rate=mean(first_goal_win))
ggplot(HA.tile, aes(away_team_id, home_team_id)) + geom_tile(aes(fill = rate))+ theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank())+xlab("Away Team ID") + ylab("Home Team ID")+
labs(title = "First Goal Win Rate by H vs A Matchup")
#Summarize Betting Variable with no Regard for H vs A
total.tile = nhl.clean %>%
select(team_id, other.id, first_goal_win) %>%
group_by(team_id, other.id) %>%
summarize(rate=mean(first_goal_win))
ggplot(total.tile, aes(team_id, other.id)) + geom_tile(aes(fill = rate))+ theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank())+xlab("Away Team ID") + ylab("Home Team ID")+
labs(title = "First Goal Win Rate by Matchup")
#Average FGW over the course of the dataset
target = mean(nhl.clean$first_goal_win)
#Summarize Betting Variable within Divisions
division = filter(nhl.clean, game_type == 1)
tileplotclose.func = function(data){
data = data %>% select(home_team_name, away_team_name, first_goal_win) %>%
group_by(home_team_name, away_team_name) %>%
summarize(rate=mean(first_goal_win))
data$OU = NA
for(i in 1:nrow(data)){
if(data$rate[i] > .7){
data$OU[i] = 2
}else if(data$rate[i] < .66){
data$OU[i] = 1
}else if(data$rate[i] >= .66 & data$rate[i] <= .7) {
data$OU[i] = 0
}
}
ggplot(data, aes(home_team_name, away_team_name)) + geom_tile(aes(fill = OU))+ theme(axis.text.x=element_text(angle=90, hjust=1))
}
atlantic1 = filter(division, home_division == "Atlantic")
central1 = filter(division, home_division == "Central")
tileplotclose.func(central1)
Metropolitan1 = filter(division, home_division == "Metropolitan")
Northeast1 = filter(division, home_division == "Northeast")
Northwest1 = filter(division, home_division == "Northwest")
Pacific1 = filter(division, home_division == "Pacific")
Southeast1 = filter(division, home_division == "Southeast")
#Summarize Betting Variable within Conferences
conference = filter(nhl.clean, game_type == 2)
atlantic2 = filter(conference, home_division == "Atlantic")
central2 = filter(conference, home_division == "Central")
Metropolitan2 = filter(conference, home_division == "Metropolitan")
Northeast2 = filter(conference, home_division == "Northeast")
Northwest2 = filter(conference, home_division == "Northwest")
Pacific2 = filter(conference, home_division == "Pacific")
Southeast2 = filter(conference, home_division == "Southeast")
##Summarize Betting Variable in out of conference
ooc = filter(nhl.clean, game_type == 3)
atlantic3 = filter(ooc, home_division == "Atlantic")
central3 = filter(ooc, home_division == "Central")
Metropolitan3 = filter(ooc, home_division == "Metropolitan")
Northeast3 = filter(ooc, home_division == "Northeast")
Northwest3 = filter(ooc, home_division == "Northwest")
Pacific3 = filter(ooc, home_division == "Pacific")
Southeast3 = filter(ooc, home_division == "Southeast")
tileplotclose.func(atlantic1)
tileplotclose.func(central1)
tileplotclose.func(Metropolitan1)
tileplotclose.func(Northeast1)
tileplotclose.func(Northwest1)
tileplotclose.func(Pacific1)
tileplotclose.func(Southeast1)
tileplotclose.func(atlantic2)
tileplotclose.func(central2)
tileplotclose.func(Metropolitan2)
tileplotclose.func(Northeast2)
tileplotclose.func(Northwest2)
tileplotclose.func(Pacific2)
tileplotclose.func(Southeast2)
tileplotclose.func(atlantic3)
tileplotclose.func(central3)
tileplotclose.func(Metropolitan3)
tileplotclose.func(Northeast3)
tileplotclose.func(Northwest3)
tileplotclose.func(Pacific3)
tileplotclose.func(Southeast3)
ggplot(season.fgw) + geom_line(aes(Season, season.fgw[,2], color = Team)) + geom_hline(yintercept = target, colour = "red")+ theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank())+ ylab("First Goal Wins Percentage")+
labs(title = "First Goal Wins by Season by Team")
ggplot(season.fgw1) + geom_line(aes(Season, season.fgw1[,2])) + geom_hline(yintercept = target, colour = "red")+ theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank())+ ylab("First Goal Wins Percentage")+
labs(title = "First Goal Wins by Season")
| /Summarize.R | no_license | hufinger/NHL_Research | R | false | false | 6,628 | r | #Put Necessary Libraries Here
knitr::opts_chunk$set(echo = TRUE)
library(tidyverse)
library(dplyr)
library(ggthemes)
library(plotly)
library(ggplot2)
library(tidyverse)
library(modelr)
library(broom)
library(glmnet)
library(kableExtra)
library(scales)
#Load cleaned data from Variable_Addition.R
nhl.clean=read.csv('nhl-game-data/cleaned_NHL.csv')
for(i in 1:nrow(nhl.clean)){
if(nhl.clean$team_id[i] == 52){
nhl.clean$team_id[i] = 31
} else if(nhl.clean$team_id[i] == 53){
nhl.clean$team_id[i] = 32
} else if(nhl.clean$team_id[i] == 54){
nhl.clean$team_id[i] = 33
}
}
for(i in 1:nrow(nhl.clean)){
if(nhl.clean$away_team_id[i] == 52){
nhl.clean$away_team_id[i] = 31
} else if(nhl.clean$away_team_id[i] == 53){
nhl.clean$away_team_id[i] = 32
} else if(nhl.clean$away_team_id[i] == 54){
nhl.clean$away_team_id[i] = 33
}
}
for(i in 1:nrow(nhl.clean)){
if(nhl.clean$home_team_id[i] == 52){
nhl.clean$home_team_id[i] = 31
} else if(nhl.clean$home_team_id[i] == 53){
nhl.clean$home_team_id[i] = 32
} else if(nhl.clean$home_team_id[i] == 54){
nhl.clean$home_team_id[i] = 33
}
if(nhl.clean$team_id[i] == nhl.clean$away_team_id[i]){
nhl.clean$other.id[i] = nhl.clean$home_team_id[i]
}else{
nhl.clean$other.id[i] = nhl.clean$away_team_id[i]
}
}
#Summarize Betting Variablity by Team
season.fgw=nhl.clean %>%
group_by(Season) %>%
summarize(rate=mean(first_goal_win)) %>%
mutate(Season=Season %/% 10000) %>%
cbind(Team="All") %>%
rename(`First Goal Wins`=rate)
season.fgw1 = season.fgw
for(i in unique(nhl.clean$team_id)){
data=filter(nhl.clean,team_id==i) %>%
select(Season,away_team_id,home_team_id, first_goal_win) %>%
group_by(Season) %>%
summarize(rate=mean(first_goal_win)) %>%
cbind(as.character(i)) %>%
mutate(Season=Season %/% 10000) %>%
rename(`First Goal Wins`=rate,Team="as.character(i)")
season.fgw=rbind(season.fgw,data)
}
#Summarize Betting Variable by H vs A
nhl.clean %>%
select(away_team_id,home_team_id,first_goal_win) %>%
group_by(away_team_id,home_team_id) %>%
summarize(rate=mean(first_goal_win)) %>%
spread(away_team_id,rate)
HA.tile = nhl.clean %>%
select(away_team_id,home_team_id,first_goal_win) %>%
group_by(away_team_id,home_team_id) %>%
summarize(rate=mean(first_goal_win))
ggplot(HA.tile, aes(away_team_id, home_team_id)) + geom_tile(aes(fill = rate))+ theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank())+xlab("Away Team ID") + ylab("Home Team ID")+
labs(title = "First Goal Win Rate by H vs A Matchup")
#Summarize Betting Variable with no Regard for H vs A
total.tile = nhl.clean %>%
select(team_id, other.id, first_goal_win) %>%
group_by(team_id, other.id) %>%
summarize(rate=mean(first_goal_win))
ggplot(total.tile, aes(team_id, other.id)) + geom_tile(aes(fill = rate))+ theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank())+xlab("Away Team ID") + ylab("Home Team ID")+
labs(title = "First Goal Win Rate by Matchup")
#Average FGW over the course of the dataset
target = mean(nhl.clean$first_goal_win)
#Summarize Betting Variable within Divisions
division = filter(nhl.clean, game_type == 1)
tileplotclose.func = function(data){
data = data %>% select(home_team_name, away_team_name, first_goal_win) %>%
group_by(home_team_name, away_team_name) %>%
summarize(rate=mean(first_goal_win))
data$OU = NA
for(i in 1:nrow(data)){
if(data$rate[i] > .7){
data$OU[i] = 2
}else if(data$rate[i] < .66){
data$OU[i] = 1
}else if(data$rate[i] >= .66 & data$rate[i] <= .7) {
data$OU[i] = 0
}
}
ggplot(data, aes(home_team_name, away_team_name)) + geom_tile(aes(fill = OU))+ theme(axis.text.x=element_text(angle=90, hjust=1))
}
atlantic1 = filter(division, home_division == "Atlantic")
central1 = filter(division, home_division == "Central")
tileplotclose.func(central1)
Metropolitan1 = filter(division, home_division == "Metropolitan")
Northeast1 = filter(division, home_division == "Northeast")
Northwest1 = filter(division, home_division == "Northwest")
Pacific1 = filter(division, home_division == "Pacific")
Southeast1 = filter(division, home_division == "Southeast")
#Summarize Betting Variable within Conferences
conference = filter(nhl.clean, game_type == 2)
atlantic2 = filter(conference, home_division == "Atlantic")
central2 = filter(conference, home_division == "Central")
Metropolitan2 = filter(conference, home_division == "Metropolitan")
Northeast2 = filter(conference, home_division == "Northeast")
Northwest2 = filter(conference, home_division == "Northwest")
Pacific2 = filter(conference, home_division == "Pacific")
Southeast2 = filter(conference, home_division == "Southeast")
##Summarize Betting Variable in out of conference
ooc = filter(nhl.clean, game_type == 3)
atlantic3 = filter(ooc, home_division == "Atlantic")
central3 = filter(ooc, home_division == "Central")
Metropolitan3 = filter(ooc, home_division == "Metropolitan")
Northeast3 = filter(ooc, home_division == "Northeast")
Northwest3 = filter(ooc, home_division == "Northwest")
Pacific3 = filter(ooc, home_division == "Pacific")
Southeast3 = filter(ooc, home_division == "Southeast")
tileplotclose.func(atlantic1)
tileplotclose.func(central1)
tileplotclose.func(Metropolitan1)
tileplotclose.func(Northeast1)
tileplotclose.func(Northwest1)
tileplotclose.func(Pacific1)
tileplotclose.func(Southeast1)
tileplotclose.func(atlantic2)
tileplotclose.func(central2)
tileplotclose.func(Metropolitan2)
tileplotclose.func(Northeast2)
tileplotclose.func(Northwest2)
tileplotclose.func(Pacific2)
tileplotclose.func(Southeast2)
tileplotclose.func(atlantic3)
tileplotclose.func(central3)
tileplotclose.func(Metropolitan3)
tileplotclose.func(Northeast3)
tileplotclose.func(Northwest3)
tileplotclose.func(Pacific3)
tileplotclose.func(Southeast3)
ggplot(season.fgw) + geom_line(aes(Season, season.fgw[,2], color = Team)) + geom_hline(yintercept = target, colour = "red")+ theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank())+ ylab("First Goal Wins Percentage")+
labs(title = "First Goal Wins by Season by Team")
ggplot(season.fgw1) + geom_line(aes(Season, season.fgw1[,2])) + geom_hline(yintercept = target, colour = "red")+ theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank())+ ylab("First Goal Wins Percentage")+
labs(title = "First Goal Wins by Season")
|
% Part of the dcemri package for R
% Distributed under the BSD license: see dcemri/COPYING
%
% $Id: buckley.Rd 332 2010-01-29 16:54:07Z bjw34032 $
\name{Buckley}
\alias{buckley}
\alias{breast}
\alias{meningioma}
\title{Simulated Data from Buckley (2002)}
\description{
In Buckley (2002) tissue residue curves for a Meningioma and a Breast
Cancer were simulated using the MMID4 model. Note, the model is
described in detail by Bassingthwaighte, J.B. \emph{et al.} (1984) and
Kroll, K \emph{et al.} (1996). This model accounts for flow
dispersion and heterogeneity, and includes capillaries modeled as
axially distributed blood-tissue exchange units. A plasma
concentration-time curve, AKA arterial input function, was simulated
as an input to the model using measurements made by Fritz-Hansen
\emph{et al.} (1996).
}
\usage{
data("buckley")
}
\format{
Two lists are created (breast and meningioma) that contain the
simulated time curves and all associated kinetic parameter values.
}
\source{
See below.
}
\references{
Buckley, D.L. (2002) Uncertainty in the Analysis of Tracer Kinetics
Using Dynamic Contrast-Enhanced \eqn{T_1}{T1}-weighted MRI,
\emph{Magnetic Resonance in Medicine} \bold{47}, 601-606.
Bassingthwaighte, J.B. and Goresky, C.A. (1984) Modelling in the analysis of
solute and water exchange in the microvasculature. In: Renkin, E.M., Michel,
C.C. and Geiger, S.R., editors. Handbook of physiology. Section 2. The
cardiovascular system. Bethesda: American Physiological Society. p549-626.
Kroll, K., Wilke, N., Jerosch-Herold, M., Wang, Y., Zhang Y., Basche,
R.J. and Bassingthwaighte, J.B. (1996) Modelling regional myocardial
flows from residue functions of an intravascular indicator. \emph{Am J
Physiol} \bold{271}, H1643-H1655.
Fritz-Hansen, T., Rostrup, E., Larsson, H.B., Sondergaard, L., Ring, P. and
Hendriksen, O. (1996) Measurement of the arterial concentration Gd-DTPA using
MRI; a step toward quantitative perfusion imaging. \emph{Magn Reson Med}
\bold{36}, 347-357.
}
\keyword{datasets}
| /branches/dcemriS4lomis/man/buckley.Rd | permissive | bjw34032/dcemri | R | false | false | 2,079 | rd | % Part of the dcemri package for R
% Distributed under the BSD license: see dcemri/COPYING
%
% $Id: buckley.Rd 332 2010-01-29 16:54:07Z bjw34032 $
\name{Buckley}
\alias{buckley}
\alias{breast}
\alias{meningioma}
\title{Simulated Data from Buckley (2002)}
\description{
In Buckley (2002) tissue residue curves for a Meningioma and a Breast
Cancer were simulated using the MMID4 model. Note, the model is
described in detail by Bassingthwaighte, J.B. \emph{et al.} (1984) and
Kroll, K \emph{et al.} (1996). This model accounts for flow
dispersion and heterogeneity, and includes capillaries modeled as
axially distributed blood-tissue exchange units. A plasma
concentration-time curve, AKA arterial input function, was simulated
as an input to the model using measurements made by Fritz-Hansen
\emph{et al.} (1996).
}
\usage{
data("buckley")
}
\format{
Two lists are created (breast and meningioma) that contain the
simulated time curves and all associated kinetic parameter values.
}
\source{
See below.
}
\references{
Buckley, D.L. (2002) Uncertainty in the Analysis of Tracer Kinetics
Using Dynamic Contrast-Enhanced \eqn{T_1}{T1}-weighted MRI,
\emph{Magnetic Resonance in Medicine} \bold{47}, 601-606.
Bassingthwaighte, J.B. and Goresky, C.A. (1984) Modelling in the analysis of
solute and water exchange in the microvasculature. In: Renkin, E.M., Michel,
C.C. and Geiger, S.R., editors. Handbook of physiology. Section 2. The
cardiovascular system. Bethesda: American Physiological Society. p549-626.
Kroll, K., Wilke, N., Jerosch-Herold, M., Wang, Y., Zhang Y., Basche,
R.J. and Bassingthwaighte, J.B. (1996) Modelling regional myocardial
flows from residue functions of an intravascular indicator. \emph{Am J
Physiol} \bold{271}, H1643-H1655.
Fritz-Hansen, T., Rostrup, E., Larsson, H.B., Sondergaard, L., Ring, P. and
Hendriksen, O. (1996) Measurement of the arterial concentration Gd-DTPA using
MRI; a step toward quantitative perfusion imaging. \emph{Magn Reson Med}
\bold{36}, 347-357.
}
\keyword{datasets}
|
#' Create simulation object
#'
#' @param file \code{character} path to the simulation file
#' @param irun \code{character} name of the simulation column
#' @param idv \code{character} name of the ind. variable
#' @export
#' @example inst/examples/vpc.R
pmx_sim <- function(
file,
irun,
idv) {
if (missing(idv)) idv <- "TIME"
if (file.exists(file)) {
sim <- pmx_fread(file)
if (tolower(idv) == "time") {
idvn <- names(sim)[tolower(names(sim)) == "time"]
setnames(sim, idvn, "TIME")
idv <- "TIME"
}
id_col <- grep("^id$", names(sim), ignore.case = TRUE, value = TRUE)
setnames(sim, id_col, "ID")
obj <- list(
sim = sim,
idv = idv,
irun = irun
)
structure(obj, class = c("pmxSimClass", "list"))
}
}
check_argument <- function(value, pmxname) {
call <- match.call()
if (any(missing(value) | is.null(value))) {
stop(
sprintf(
"Please set a %s argument",
deparse(call$value), pmxname
)
)
}
value
}
#' Create a pmx object
#'
#' Create a pmx object from a data source
#' @param config Can be either :
#' The complete path for the configuration file, the name of configuration within the built-in
#' list of configurations, or a configuration object.
#' @param sys the system name can be MLX/NM
#' @param directory \code{character} modelling output directory.
#' @param input \code{character} complete path to the modelling input file
#' @param dv \code{character} the name of measurable variable used in the input modelling file
#' @param dvid \emph{[Optional]} \code{character} observation type parameter. This is mandatory
#' in case of multiple endpoint (PKPD).
#' @param cats \emph{[Optional]}\code{character} vector of categorical covariates
#' @param conts \emph{[Optional]}\code{character} vector of continuous covariates
#' @param occ \emph{[Optional]}\code{character} occasional covariate variable name
#' @param strats \emph{[Optional]}\code{character} extra stratification variables
#' @param settings \emph{[Optional]}\code{pmxSettingsClass} \code{\link{pmx_settings}}
#' shared between all plots
#' @param endpoint \code{pmxEndpointClass} or \code{integer} or \code{charcater} defalut to NULL
#' of the endpoint code. \code{\link{pmx_endpoint}}
#' @param sim \code{pmxSimClass} default to NULL. \code{\link{pmx_sim}}
#' @param bloq \code{pmxBLOQClass} default to NULL. \code{\link{pmx_bloq}}
#' @return \code{pmxClass} controller object.
#' @export
#' @example inst/examples/controller.R
pmx <-
function(config, sys=c("mlx", "nm"), directory, input, dv, dvid, cats=NULL, conts=NULL, occ=NULL, strats=NULL,
settings=NULL, endpoint=NULL, sim=NULL, bloq=NULL) {
directory <- check_argument(directory, "work_dir")
input <- check_argument(input, "input")
dv <- check_argument(dv, "dv")
## dvid <- check_argument(dvid, "dvid")
if (missing(cats)) cats <- ""
if (missing(sim)) sim <- NULL
if (missing(endpoint)) {
endpoint <- NULL
}
assert_that(is_character_or_null(cats))
if (missing(conts)) conts <- ""
assert_that(is_character_or_null(conts))
if (missing(occ)) occ <- ""
assert_that(is_character_or_null(occ))
if (missing(strats)) strats <- ""
assert_that(is_character_or_null(strats))
if (!inherits(config, "pmxConfig")) {
if ("populationParameters.txt" %in% list.files(directory)) sys <- "mlx18"
config <- load_config(config, sys)
}
if (missing(settings)) settings <- pmx_settings()
if (!inherits(settings, "pmxSettingsClass")) {
settings <- pmx_settings()
}
if (missing(bloq)) bloq <- NULL
assert_that(inherits(bloq, "pmxBLOQClass") || is.null(bloq))
pmxClass$new(directory, input, dv, config, dvid, cats, conts, occ, strats, settings, endpoint, sim, bloq)
}
#' @rdname pmx
#' @details
#' \code{pmx_mlx} is a wrapper to mlx for the MONOLIX system ( \code{sys="mlx"})
#' @export
pmx_mlx <-
function(config, directory, input, dv, dvid, cats, conts, occ, strats, settings, endpoint, sim, bloq) {
pmx(config, "mlx", directory, input, dv, dvid, cats, conts, occ, strats, settings, endpoint, sim, bloq)
}
#' Create a controller from mlxtran file
#'
#' @param file_name \code{character} mlxtran file path.
#' @param call \code{logical} if TRUE the result is the parameters parsed
#' @param ... extra arguments passed to pmx_mlx.
#' @rdname pmx
#'
#' @export
#' @details
#'
#' \code{pmx_mlxtran} parses mlxtran file and guess \code{\link{pmx_mlx}} arguments. In case of
#' multi endpoint the first endpoint is selected. You can though set the endpoint through the same argument.
#' When you set \code{call=TRUE},no controller is created but only the parameters parsed
#' by mlxtran. This can be very helpful, in case you would like to customize parameters
#' (adding settings vi pmx_settings, chnag eth edefault endpoint.)
pmx_mlxtran <- function(file_name, config="standing", call=FALSE, endpoint, ...) {
params <- parse_mlxtran(file_name)
params$config <- config
rr <- as.list(match.call()[-1])
rr$file_name <- NULL
params <- append(params, rr)
if (!missing(endpoint)) {
params$endpoint <- NULL
params$endpoint <- endpoint
}
if (call) {
params$call <- NULL
return(params)
}
params$call <- NULL
do.call(pmx_mlx, params)
}
formula_to_text <- function(form) {
if (is.formula(form)) {
paste(as.character(as.list(form)[-1]), collapse = " and ")
} else {
form
}
}
#' Create controller global settings
#' @param is.draft \code{logical} if FALSE any plot is without draft annotation
#' @param use.abbrev \code{logical} if TRUE use abbreviations mapping for axis names
#' @param color.scales \code{list} list containing elements of scale_color_manual
#' @param use.labels \code{logical} if TRUE replace factor named by cats.labels
#' @param cats.labels \code{list} list of named vectors for each factor
#' @param use.titles \code{logical} FALSE to generate plots without titles
#' @param effects \code{list} list of effects levels and labels
#' @param ... extra parameter not used yet
#' @return pmxSettingsClass object
#' @example inst/examples/pmx-settings.R
#' @export
pmx_settings <-
function(is.draft=TRUE, use.abbrev=FALSE, color.scales=NULL,
cats.labels=NULL, use.labels=FALSE, use.titles=TRUE,
effects=NULL,
...) {
if (!missing(effects) && !is.null(effects)) {
if (!is.list(effects)) stop("effects should be a list")
if (!exists("levels", effects) || !exists("labels", effects)) {
stop("effects should be a list that contains levels and labels")
}
if (length(effects$labels) != length(effects$levels)) {
stop("effects should be a list that contains levels and labels have the same length")
}
}
res <- list(
is.draft = is.draft,
use.abbrev = use.abbrev,
color.scales = color.scales,
use.labels = use.labels,
cats.labels = cats.labels,
use.titles = use.titles,
effects = effects
)
if (use.labels) {
res$labeller <- do.call("labeller", cats.labels)
}
structure(
res, ...,
class = "pmxSettingsClass"
)
}
#' Creates pmx endpoint object
#'
#' @param code \code{character} endpoint code : used to filter observations DVID==code.
#' @param label \code{character} endpoint label: used to set title and axis labels
#' @param unit \code{character} endpoint unit : used to set title and axis labels
#' @param file.code \code{character} endpoint file code : used to set predictions and finegrid \cr
#' files extensions in case using code parameter is not enough.
#' @param trans \code{list} Transformation parameter not used yet.
#' @export
#'
#' @example inst/examples/endpoint.R
#' @details
#' In case of multiple endpoints, pkpd case for example, we need to pass endpoint to the pmx call.
#' Internally , ggPMX will filter the observations data set to keep only rows satisfying \code{DVID==code}.
#' The \code{code} is also used to find the right predictions and or fingrid files.
#' ggPMX use the configuration file to fine the path of the predictions file
#' (like the single endpoint case) and then filter the right file using the code parameter. \cr
#' For example:
#' \itemize{
#' \item predictions\{code\}.txt for mlx16
#' \item predictions\{code\}.txt and y\{code\}_residual for mlx18
#' }
#'
#' For some tricky examples the code parameter is not enough to find the files. In that case the
#' \code{file.code} parameter is used to distinguish the endpoint files.
pmx_endpoint <-
function(code,
label="",
unit="",
file.code=code,
trans =NULL) {
assert_that(is.character(code))
assert_that(is.character(file.code))
assert_that(is.character(unit))
assert_that(is.character(label))
assert_that(is_character_or_null(trans))
res <- list(
code = code,
label = label,
unit = unit,
file.code = file.code,
trans = trans
)
structure(
res,
class = "pmxEndpointClass"
)
}
#' Creates BLOQ object attributes
#'
#' @param cens \code{character} the censoring column name
#' @param limit \code{character} the limit column name (optional)
#' @param colour \code{character} the color of the geom
#' @param size \code{numeric} the size of the geom
#' @param alpha \code{numeric} the alpha of the geom
#' @param show \code{logical} if FALSE remove all censory observations
#' @param ... any other graphical parameter
#'
#' @export
#' @details
#' To define that a measurement is censored, the observation data set should include
#' a CENSORING column ( default to `CENS` ) and put 1 for lower limit or -1 for upper limit. \cr
#' Optionally, data set can contain have a limit column ( default to `LIMIT`) column to set the other limit.
pmx_bloq <-
function(
cens="CENS",
limit ="LIMIT",
colour="pink",
size=2,
alpha=0.9,
show=TRUE,
...) {
res <- list(
cens = cens,
limit = limit,
show = show,
colour = colour,
size = size,
alpha = alpha,
...
)
structure(
res,
class = "pmxBLOQClass"
)
}
#' Create a new plot of the desired type
#'
#' @param ctr \code{pmxClass} controller object
#' @param ptype plot type can be:
##' \itemize{
##' \item{"IND"}{ Individual plot type: \code{\link{individual}} }
##' \item{"DIS"}{ Distribution plot type : \code{\link{distrib}}}
##' \item{"SCATTER"}{ Residual plot type :\code{\link{residual}}}
##' }
##' @param pname plot name, if missing it will be created using function aestetics
##' @param filter optional filter which will be applied to plotting data
##' @param strat.facet \code{formula} define categorical stratification as formula
##' @param strat.color \code{character}
##' @param trans \code{list}{transformation operator}
##' @param color.scales \code{list} can be used with strat.color to set scale_color_manual
##' @param use.defaults \code{logical} if FALSE do not use defaults defined in yaml init files
#' @param ... other plot parameters to configure \code{\link{pmx_gpar}}.
#'
#' @family pmxclass
#' @return invisible ctr object
#' @export
set_plot <- function(
ctr,
ptype = c(
"IND", "DIS", "SCATTER", "ETA_PAIRS",
"ETA_COV", "PMX_QQ", "VPC", "PMX_DENS"
),
pname,
use.defaults=TRUE,
filter =NULL,
strat.color=NULL,
strat.facet=NULL,
color.scales=NULL,
trans=NULL, ...) {
assert_that(is_pmxclass(ctr))
ptype <- match.arg(ptype)
assert_that(is_string_or_null(pname))
assert_that(is_string_or_null(strat.color))
assert_that(is_string_or_formula_or_null(strat.facet))
params <- list(...)
if (use.defaults) {
defaults_yaml <-
file.path(system.file(package = "ggPMX"), "init", "defaults.yaml")
defaults <- yaml.load_file(defaults_yaml)
names(defaults) <- tolower(names(defaults))
def <- if (tolower(ptype) %in% names(defaults)) {
defaults[[tolower(ptype)]]
} else {
if (ptype == "DIS") {
if (params$type == "hist") {
defaults[["dis_hist"]]
} else {
defaults[["dis_box"]]
}
}
}
if (!is.null(def)) {
params <- l_left_join(def, params)
params$ptype <- NULL
}
}
if (ptype == "VPC") {
params$dv <- ctr$sim$dv
params$idv <- ctr$sim$idv
}
conf <-
switch(ptype,
IND = do.call(individual, params),
DIS = if (ctr$has_re) do.call(distrib, params),
SCATTER = do.call(residual, params),
ETA_PAIRS = if (ctr$has_re) do.call(eta_pairs, params),
ETA_COV = if (ctr$has_re) do.call(eta_cov, params),
PMX_QQ = do.call(pmx_qq, params),
PMX_DENS = do.call(pmx_dens, params),
VPC = do.call(pmx_vpc, params)
)
if (!is.null(substitute(filter))) {
filter <- deparse(substitute(filter))
filter <- local_filter(filter)
}
if (!is.null(conf)) {
conf[["filter"]] <- filter
conf[["trans"]] <- trans
if (!is.null(strat.color)) conf[["strat.color"]] <- strat.color
if (!is.null(strat.facet)) conf[["strat.facet"]] <- strat.facet
if (!is.null(color.scales)) conf$gp[["color.scales"]] <- color.scales
ctr[["config"]][["plots"]][[toupper(pname)]] <-
c(ptype = ptype, list(...))
ctr$add_plot(conf, pname)
}
invisible(ctr)
}
#' update or add a new abbreviation
#'
#' @param ctr \code{pmxClass} controller object
#' @param ... Options to set or add, with the form \code{name = value}.
#' @export
#' @examples
#' ctr <- theophylline()
#' ctr %>% set_abbrev("new_param"="new value")
#' ctr %>% get_abbrev("new_param")
set_abbrev <- function(ctr, ...) {
assert_that(is_pmxclass(ctr))
abbrev <- if (length(ctr$abbrev) > 0) {
l_left_join(ctr$abbrev, list(...))
} else {
unlist(list(...), recursive = FALSE)
}
class(abbrev) <- c("abbreviation", "list")
ctr$abbrev <- abbrev
}
#' S3 print abbreviation
#' @param x object of class configs
#' @param ... pass additional options (not used presently)
#' @return print abbreviation
#' @export
print.abbreviation <- function(x, ...) {
assert_that(inherits(x, "abbreviation"))
for (i in seq_len(length(x)))
cat(sprintf("%s : %s \n", names(x)[i], x[[i]]))
}
#' Get abbreviation definition by key
#'
#' @param param abbreviation term
#' @param ctr \code{pmxClass} controller
#'
#' @return \code{character} abbreviation definition
#' @export
get_abbrev <- function(ctr, param) {
keys <- ctr$abbrev
if (missing(param)) {
keys
} else {
if (!is.null(keys[[param]])) keys[[param]] else param
}
}
#' Get plot object
#'
#' @param ctr \code{pmxClass} controller object
#' @param nplot character the plot name
#' @param npage integer or integer vector, set page number in case of multi pages plot
#'
#' @family pmxclass
#' @return ggplot object
#' @export
#' @examples
#' \dontrun{
#' library(ggPMX)
#' ctr <- theophylline()
#' p1 <- ctr %>% get_plot("iwres_ipred")
#' ## get all pages or some pages
#' p2 <- ctr %>% get_plot("individual")
#' ## returns one page of individual plot
#' p2 <- ctr %>% get_plot("individual",npage=1)
#' p3 <- ctr %>% get_plot("individual",npage=c(1,3))
#' ## get distribution plot
#' pdistri <- ctr %>% get_plot("eta_hist")
#'
#' }
get_plot <- function(ctr, nplot, npage = NULL) {
if (is.numeric(npage)) {
npage <- as.integer(npage)
}
assert_that(is_pmxclass(ctr))
assert_that(is_string(nplot))
assert_that(is_integer_or_null(npage))
nplot <- tolower(nplot)
assert_that(is_valid_plot_name(nplot, plot_names(ctr)))
xx <- ctr$get_plot(nplot)
if (is.function(xx)) {
xx(npage)
} else {
xx
}
}
#' Get plot names
#'
#' @param ctr \code{pmxClass} controller object
#'
#' @family pmxclass
#' @return list of plot names
#' @export
plot_names <- function(ctr) {
assert_that(is_pmxclass(ctr))
ctr$plots()
}
#' Get plots description
#'
#' @param ctr \code{pmxClass} controller object
#'
#' @family pmxclass
#' @return data.frame of plots
#' @export
plots <- function(ctr) {
existsF <- function(...) do.call("existsFunction", list(...))
assert_that(is_pmxclass(ctr))
x <- ctr$config
function_name <- function(nn) {
fn <- sprintf("pmx_plot_%s", nn)
if (!existsF(fn, where = asNamespace("ggPMX"))) {
fn <- sprintf("pmx_plot('%s',...)", nn)
}
fn
}
if (exists("plots", x)) {
pp <- x$plots
names(pp) <- tolower(names(pp))
pp <- pp[ctr$plots()]
data.table(
plot_name = names(pp),
plot_type = sapply(pp, "[[", "ptype"),
plot_function = sapply(names(pp), function_name)
)
}
}
#' Get the plot config by name
#'
#' @param ctr the controller object
#' @param pname the plot name
#'
#' @family pmxclass
#' @return the config object
#' @export
#'
#' @examples
#' \dontrun{
#' ctr <- theophylline()
#' ctr %>% set_plot("IND", pname = "indiv1")
#' ctr %>% get_plot_config("distr1")
#' }
get_plot_config <- function(ctr, pname) {
assert_that(is_pmxclass(ctr))
ctr$get_config(pname)
}
#' Get controller data set
#'
#' @param ctr the controller object
#' @param data_set the data set name
#'
#' @family pmxclass
#' @return a data.table of the named data set if available.
#' @export
get_data <- function(ctr, data_set = c(
"estimates", "predictions",
"eta", "finegrid", "input", "sim",
"individual"
)) {
assert_that(is_pmxclass(ctr))
## data_set <- match.arg(data_set)
if (data_set == "individual") data_set <- "IND"
if (data_set == "input") {
copy(ctr[["input"]])
} else {
copy(ctr[["data"]][[data_set]])
}
}
#' Set a controller data set
#'
#' @param ctr the controller object
#' @param ... a named list parameters (see example)
#' @family pmxclass
#' @details
#' This function can be used to set an existing data set or to create a new one. The basic
#' idea is to change the built-in data set (change the factor level names, change some rows
#' values or apply any other data set operation) and use the new data set using the dname
#' parameter of pmx_plot family functions.
#' @examples
#' ctr <- theophylline()
#' dx <- ctr %>% get_data("eta")
#' dx <- dx[,EFFECT:=factor(
#' EFFECT,levels=c("ka", "V", "Cl"),
#' labels=c("Concentration","Volume","Clearance"))]
#' ## update existing data set
#' ctr %>% set_data(eta=dx)
#' ## or create a new data set
#' ctr %>% set_data(eta_long=dx)
#' @export
set_data <- function(ctr, ...) {
assert_that(is_pmxclass(ctr))
params <- as.list(match.call(expand.dots = TRUE))[-c(1, 2)]
if (!nzchar(names(params))) {
stop("each data set should be well named")
}
invisible(Map(function(n, v) ctr$data[[n]] <- eval(v), names(params), params))
}
#' Get category covariates
#'
#' @param ctr the controller object
#'
#' @family pmxclass
#' @return a charcater vector
#' @export
get_cats <- function(ctr) {
assert_that(is_pmxclass(ctr))
ctr$cats
}
#' Get extra stratification variables
#'
#' @param ctr the controller object
#'
#' @family pmxclass
#' @return a charcater vector
#' @export
get_strats <- function(ctr) {
assert_that(is_pmxclass(ctr))
ctr$strats
}
#' Get covariates variables
#'
#' @param ctr the controller object
#'
#' @family pmxclass
#' @return a charcater vector
#' @export
get_covariates <- function(ctr) {
assert_that(is_pmxclass(ctr))
res <- unique(c(ctr$cats, ctr$conts))
res[nzchar(res)]
}
#' Get continuous covariates
#'
#' @param ctr the controller object
#'
#' @family pmxclass
#' @return a charcater vector
#' @export
get_conts <- function(ctr) {
assert_that(is_pmxclass(ctr))
ctr$conts
}
#' Get controller occasional covariates
#'
#' @param ctr the controller object
#'
#' @family pmxclass
#' @return a charcater vector
#' @export
get_occ <- function(ctr) {
assert_that(is_pmxclass(ctr))
ctr$occ
}
# pmxSource (R6 Class) ------------------------------------------------------------
#' @importFrom R6 R6Class
pmxClass <- R6::R6Class(
"pmxClass",
# Private methods ------------------------------------------------------------
private = list(
.data_path = "",
.input_path = "",
.covariates = NULL,
.plots = list(),
.plots_configs = list()
),
# Public methods -------------------------------------------------------------
public = list(
data = NULL,
config = NULL,
input = NULL, input_file = NULL,
dv = NULL,
dvid = NULL, cats = NULL, conts = NULL, occ = NULL,
strats = NULL,
settings = NULL,
has_re = FALSE, re = NULL,
abbrev = list(),
endpoint = NULL,
warnings = list(),
footnote = FALSE,
save_dir = NULL,
report_queue = list(),
report_n = 0,
plot_file_name = "",
sim = NULL,
bloq = NULL,
initialize = function(data_path, input, dv, config, dvid, cats, conts, occ, strats, settings, endpoint, sim, bloq)
pmx_initialize(self, private, data_path, input, dv, config, dvid, cats, conts, occ, strats, settings, endpoint, sim, bloq),
print = function(data_path, config, ...)
pmx_print(self, private, ...),
enqueue_plot = function(pname) {
self$report_n <- self$report_n + 1
pname_file <- paste0(pname, "-", self$report_n)
self$plot_file_name <- pname_file
self$report_queue <- c(self$report_queue, pname_file)
},
dequeue_plot = function() pmx_dequeue_plot(self),
# Operations ---------------------------------------------------------------
add_plot = function(x, pname)
pmx_add_plot(self, private, x, pname),
update_plot = function(pname, strat.facet=NULL, strat.color=NULL,
filter=NULL, trans=NULL,
..., pmxgpar = NULL) {
pmx_update_plot(
self, private, pname,
strat.color = strat.color, strat.facet = strat.facet,
filter, trans, ..., pmxgpar = pmxgpar
)
},
remove_plot = function(pname, ...)
pmx_remove_plot(self, private, pname, ...),
get_config = function(pname)
pmx_get_config(self, private, pname),
set_config = function(pname, new)
pmx_set_config(self, private, pname, new),
get_plot = function(pname)
pmx_get_plot(self, private, pname),
plots = function()
pmx_plots(self, private),
post_load = function()
pmx_post_load(self, private)
)
)
pmx_initialize <- function(self, private, data_path, input, dv,
config, dvid, cats, conts, occ, strats,
settings, endpoint, sim, bloq) {
DVID <- NULL
if (missing(data_path) || missing(data_path)) {
stop(
"Expecting source path(directory ) and a config path",
call. = FALSE
)
}
if (missing(dvid)) dvid <- NULL
if (any(missing(occ) | is.null(occ) | is.na(occ))) occ <- ""
if (any(missing(cats) | is.null(cats) | is.na(cats))) cats <- ""
if (any(missing(conts) | is.null(conts) | is.na(conts))) conts <- ""
if (any(missing(strats) | is.null(strats) | is.na(strats))) strats <- ""
if (missing(settings)) settings <- NULL
if (missing(bloq)) bloq <- NULL
private$.data_path <- data_path
self$save_dir <- data_path
if (is.character(input)) {
private$.input_path <- input
}
self$config <- config
self$dv <- dv
self$dvid <- dvid
self$cats <- cats
self$conts <- conts
self$occ <- toupper(occ)
self$strats <- strats
self$settings <- settings
self$bloq <- bloq
if (!is.null(endpoint) && is.atomic(endpoint)) {
endpoint <- pmx_endpoint(code = as.character(endpoint))
}
self$endpoint <- endpoint
if (is.character(input) && file.exists(input)) {
self$input_file <- input
self$input <- read_input(input, self$dv, self$dvid, self$cats, self$conts, self$strats, self$occ, self$endpoint)
} else {
if (!inherits(input, "data.frame")) {
stop("observation data should be either a file or a data.frame")
}
self$input <- setDT(input)
}
self$data <- load_source(
sys = config$sys, private$.data_path,
self$config$data, dvid = self$dvid,
endpoint = self$endpoint,
occ = self$occ
)
##
## check random effect
if (!is.null(self$data[["eta"]])) {
re <- grep("^eta_(.*)_(mode|mean)", names(self$data[["eta"]]), value = TRUE)
if (length(re) > 0) {
self$has_re <- TRUE
self$re <- gsub("^eta_(.*)_(mode|mean)", "\\1", re)
self$data[["eta"]] <-
post_load_eta(
self$data[["eta"]],
self$input, self$sys, self$occ
)
}
}
self$post_load()
if (!is.null(sim)) {
dx <- sim[["sim"]]
inn <- copy(self$input)[, self$dv := NULL]
# check for unique keys in the observation variables
if (sum(duplicated(inn[, c("ID", "TIME"), with = FALSE])) > 0) {
warning(
paste(
" Different covariates for the same patient same time point\n",
"--> Duplicated created in the vpc data set."
),
call. = FALSE
)
}
self$data[["sim"]] <- merge(dx, inn, by = c("ID", "TIME"))
self$sim <- sim
}
## abbrev
keys_file <- file.path(
system.file(package = "ggPMX"), "init", "abbrev.yaml"
)
self$abbrev <- set_abbrev(self, yaml.load_file(keys_file))
## create all plots
for (nn in names(self$config$plots)) {
x <- self$config$plots[[nn]]
x$pname <- tolower(nn)
x$use.defaults <- FALSE
do.call(set_plot, c(ctr = self, x))
}
}
#' @importFrom knitr kable
pmx_print <- function(self, private, ...) {
cat("\npmx object:\n")
paste_col <- function(n, x) if (all(x != "")) c(n, paste(x, collapse = ","))
ctr_table <-
rbind(
c(
"working directory",
basename(dirname(private$.data_path))
),
c("Modelling input file", basename(private$.input_path)),
c("dv", self$dv),
c("dvid", self$dvid),
paste_col("cats", self %>% get_cats()),
paste_col("conts", self %>% get_conts()),
paste_col("strats", self %>% get_strats())
)
colnames(ctr_table) <- c("PARAM", "VALUE")
print(kable(ctr_table))
print(self$config, ctr = self, plot_names = names(private$.plots))
}
pmx_transform <- function(x, dx, trans, direction) {
if (is.character(trans)) {
params <- strsplit(trans, "_")[[1]]
trans <- params[1]
direction <- params[2]
}
cols_res <- function(x) {
with(x, {
switch(
direction,
x = aess$x,
y = aess$y,
xy = c(aess$x, aess$y)
)
})
}
cols_ind <- function(x) {
switch(
direction,
x = "TIME",
y = c("PRED", "IPRED", "DV"),
xy = c("TIME", "PRED", "IPRED", "DV")
)
}
cols_dis <- function(x) {
switch(
direction,
x = c("VALUE"),
y = c("VALUE"),
xy = c("VALUE")
)
}
cols_qq <- function(x) {
switch(
direction,
x = x$x
)
}
cols_eta_conts <- function(x) {
switch(
direction,
y = "VALUE"
)
}
cols <- switch(
x[["ptype"]],
SCATTER = cols_res(x),
IND = cols_ind(x),
DIS = cols_dis(x),
PMX_QQ = cols_qq(x),
ETA_COV = cols_eta_conts(x)
)
cols <- intersect(cols, names(dx))
if (length(cols) > 0) {
dx[, (cols) := lapply(.SD, match.fun(trans)), .SDcols = (cols)]
}
dx
}
pmx_remove_plot <- function(self, private, pname, ...) {
private$.plots_configs[[pname]] <- NULL
private$.plots[[pname]] <- NULL
invisible(self)
}
pmx_get_config <- function(self, private, pname) {
pname <- tolower(pname)
private$.plots_configs[[pname]]
}
pmx_set_config <- function(self, private, pname, new) {
pname <- tolower(pname)
private$.plots_configs[[pname]] <- new
}
pmx_dequeue_plot <- function(self) {
## assert_that(is_none_empty_queue(self))
if (length(self$report_queue)) {
first <- self$report_queue[[1]]
self$report_queue <- self$report_queue[-1]
first
} else {
message("Warning: Chunk has plots that were not registered within ggPMX. Footnotes may be wrong.")
}
}
pmx_fig_process_init <- function(self) {
report_queue <- list()
report_n <- 0
}
pmx_fig_process_wrapup <- function(self) {
assert_that(is_empty_queue(self))
}
pmx_get_plot <- function(self, private, pname) {
pname <- tolower(pname)
private$.plots[[pname]]
}
pmx_plots <- function(self, private) {
names(private$.plots)
}
pmx_post_load <- function(self, private) {
res <- post_load(
self$data, self$input, self$config$sys,
self$config$plots,
occ = get_occ(self)
)
self$data <- res$data
self$warnings <- res$warnings
}
#' Print pmxClass object
#'
#' @param x pmxClass object
#' @param ... additinal arguments to pass to print
#'
#' @family pmxclass functions
#' @return print object to screen
#' @export
print.pmxClass <- function(x, ...) {
x$print(...)
}
#' Creates a deep copy of the controller
#'
#' @param ctr \code{pmxClass} object
#' @param keep_globals \code{logical} if TRUE we keep the global parameters changed by pmx_settings
#' @param ... extra parameters passed to \code{pmx_settings}
#'
#' @return an object of \code{pmxClass}
#' @export
#' @details
#'
#' The controller is an `R6` object, it behaves like a reference object.
#' Some functions ( methods) can have a side effect on the controller and modify it internally.
#' Technically speaking we talk about chaining not piping here. However ,
#' using \code{pmx_copy} user can work on a copy of the controller.
#'
#' By defaul the copy don't keep global parameters setted using pmx_settings.
#'
#' @examples
#' ctr <- theophylline()
#' cctr <- ctr %>% pmx_copy
#' ## Any change in the ctr has no side effect in the ctr and vice versa
pmx_copy <- function(ctr, keep_globals=FALSE, ...) {
assert_that(is_pmxclass(ctr))
cctr <- ctr$clone()
params <- as.list(match.call(expand.dots = TRUE))[-1]
params <- lang_to_expr(params)
## params <- list(...)
if (!keep_globals) {
nn <- rev(names(formals(pmx_settings)))[-1]
eff_nn <- intersect(nn, names(params))
if (length(eff_nn) > 0) {
cctr$settings <- do.call(pmx_settings, params[eff_nn])
}
}
cctr
}
| /R/pmxClass.R | no_license | agstudy/ggPMX | R | false | false | 30,384 | r |
#' Create simulation object
#'
#' @param file \code{character} path to the simulation file
#' @param irun \code{character} name of the simulation column
#' @param idv \code{character} name of the ind. variable
#' @export
#' @example inst/examples/vpc.R
pmx_sim <- function(
file,
irun,
idv) {
if (missing(idv)) idv <- "TIME"
if (file.exists(file)) {
sim <- pmx_fread(file)
if (tolower(idv) == "time") {
idvn <- names(sim)[tolower(names(sim)) == "time"]
setnames(sim, idvn, "TIME")
idv <- "TIME"
}
id_col <- grep("^id$", names(sim), ignore.case = TRUE, value = TRUE)
setnames(sim, id_col, "ID")
obj <- list(
sim = sim,
idv = idv,
irun = irun
)
structure(obj, class = c("pmxSimClass", "list"))
}
}
check_argument <- function(value, pmxname) {
call <- match.call()
if (any(missing(value) | is.null(value))) {
stop(
sprintf(
"Please set a %s argument",
deparse(call$value), pmxname
)
)
}
value
}
#' Create a pmx object
#'
#' Create a pmx object from a data source
#' @param config Can be either :
#' The complete path for the configuration file, the name of configuration within the built-in
#' list of configurations, or a configuration object.
#' @param sys the system name can be MLX/NM
#' @param directory \code{character} modelling output directory.
#' @param input \code{character} complete path to the modelling input file
#' @param dv \code{character} the name of measurable variable used in the input modelling file
#' @param dvid \emph{[Optional]} \code{character} observation type parameter. This is mandatory
#' in case of multiple endpoint (PKPD).
#' @param cats \emph{[Optional]}\code{character} vector of categorical covariates
#' @param conts \emph{[Optional]}\code{character} vector of continuous covariates
#' @param occ \emph{[Optional]}\code{character} occasional covariate variable name
#' @param strats \emph{[Optional]}\code{character} extra stratification variables
#' @param settings \emph{[Optional]}\code{pmxSettingsClass} \code{\link{pmx_settings}}
#' shared between all plots
#' @param endpoint \code{pmxEndpointClass} or \code{integer} or \code{charcater} defalut to NULL
#' of the endpoint code. \code{\link{pmx_endpoint}}
#' @param sim \code{pmxSimClass} default to NULL. \code{\link{pmx_sim}}
#' @param bloq \code{pmxBLOQClass} default to NULL. \code{\link{pmx_bloq}}
#' @return \code{pmxClass} controller object.
#' @export
#' @example inst/examples/controller.R
pmx <-
function(config, sys=c("mlx", "nm"), directory, input, dv, dvid, cats=NULL, conts=NULL, occ=NULL, strats=NULL,
settings=NULL, endpoint=NULL, sim=NULL, bloq=NULL) {
directory <- check_argument(directory, "work_dir")
input <- check_argument(input, "input")
dv <- check_argument(dv, "dv")
## dvid <- check_argument(dvid, "dvid")
if (missing(cats)) cats <- ""
if (missing(sim)) sim <- NULL
if (missing(endpoint)) {
endpoint <- NULL
}
assert_that(is_character_or_null(cats))
if (missing(conts)) conts <- ""
assert_that(is_character_or_null(conts))
if (missing(occ)) occ <- ""
assert_that(is_character_or_null(occ))
if (missing(strats)) strats <- ""
assert_that(is_character_or_null(strats))
if (!inherits(config, "pmxConfig")) {
if ("populationParameters.txt" %in% list.files(directory)) sys <- "mlx18"
config <- load_config(config, sys)
}
if (missing(settings)) settings <- pmx_settings()
if (!inherits(settings, "pmxSettingsClass")) {
settings <- pmx_settings()
}
if (missing(bloq)) bloq <- NULL
assert_that(inherits(bloq, "pmxBLOQClass") || is.null(bloq))
pmxClass$new(directory, input, dv, config, dvid, cats, conts, occ, strats, settings, endpoint, sim, bloq)
}
#' @rdname pmx
#' @details
#' \code{pmx_mlx} is a wrapper to mlx for the MONOLIX system ( \code{sys="mlx"})
#' @export
pmx_mlx <-
function(config, directory, input, dv, dvid, cats, conts, occ, strats, settings, endpoint, sim, bloq) {
pmx(config, "mlx", directory, input, dv, dvid, cats, conts, occ, strats, settings, endpoint, sim, bloq)
}
#' Create a controller from mlxtran file
#'
#' @param file_name \code{character} mlxtran file path.
#' @param call \code{logical} if TRUE the result is the parameters parsed
#' @param ... extra arguments passed to pmx_mlx.
#' @rdname pmx
#'
#' @export
#' @details
#'
#' \code{pmx_mlxtran} parses mlxtran file and guess \code{\link{pmx_mlx}} arguments. In case of
#' multi endpoint the first endpoint is selected. You can though set the endpoint through the same argument.
#' When you set \code{call=TRUE},no controller is created but only the parameters parsed
#' by mlxtran. This can be very helpful, in case you would like to customize parameters
#' (adding settings vi pmx_settings, chnag eth edefault endpoint.)
pmx_mlxtran <- function(file_name, config="standing", call=FALSE, endpoint, ...) {
params <- parse_mlxtran(file_name)
params$config <- config
rr <- as.list(match.call()[-1])
rr$file_name <- NULL
params <- append(params, rr)
if (!missing(endpoint)) {
params$endpoint <- NULL
params$endpoint <- endpoint
}
if (call) {
params$call <- NULL
return(params)
}
params$call <- NULL
do.call(pmx_mlx, params)
}
formula_to_text <- function(form) {
if (is.formula(form)) {
paste(as.character(as.list(form)[-1]), collapse = " and ")
} else {
form
}
}
#' Create controller global settings
#' @param is.draft \code{logical} if FALSE any plot is without draft annotation
#' @param use.abbrev \code{logical} if TRUE use abbreviations mapping for axis names
#' @param color.scales \code{list} list containing elements of scale_color_manual
#' @param use.labels \code{logical} if TRUE replace factor named by cats.labels
#' @param cats.labels \code{list} list of named vectors for each factor
#' @param use.titles \code{logical} FALSE to generate plots without titles
#' @param effects \code{list} list of effects levels and labels
#' @param ... extra parameter not used yet
#' @return pmxSettingsClass object
#' @example inst/examples/pmx-settings.R
#' @export
pmx_settings <-
function(is.draft=TRUE, use.abbrev=FALSE, color.scales=NULL,
cats.labels=NULL, use.labels=FALSE, use.titles=TRUE,
effects=NULL,
...) {
if (!missing(effects) && !is.null(effects)) {
if (!is.list(effects)) stop("effects should be a list")
if (!exists("levels", effects) || !exists("labels", effects)) {
stop("effects should be a list that contains levels and labels")
}
if (length(effects$labels) != length(effects$levels)) {
stop("effects should be a list that contains levels and labels have the same length")
}
}
res <- list(
is.draft = is.draft,
use.abbrev = use.abbrev,
color.scales = color.scales,
use.labels = use.labels,
cats.labels = cats.labels,
use.titles = use.titles,
effects = effects
)
if (use.labels) {
res$labeller <- do.call("labeller", cats.labels)
}
structure(
res, ...,
class = "pmxSettingsClass"
)
}
#' Creates pmx endpoint object
#'
#' @param code \code{character} endpoint code : used to filter observations DVID==code.
#' @param label \code{character} endpoint label: used to set title and axis labels
#' @param unit \code{character} endpoint unit : used to set title and axis labels
#' @param file.code \code{character} endpoint file code : used to set predictions and finegrid \cr
#' files extensions in case using code parameter is not enough.
#' @param trans \code{list} Transformation parameter not used yet.
#' @export
#'
#' @example inst/examples/endpoint.R
#' @details
#' In case of multiple endpoints, pkpd case for example, we need to pass endpoint to the pmx call.
#' Internally , ggPMX will filter the observations data set to keep only rows satisfying \code{DVID==code}.
#' The \code{code} is also used to find the right predictions and or fingrid files.
#' ggPMX use the configuration file to fine the path of the predictions file
#' (like the single endpoint case) and then filter the right file using the code parameter. \cr
#' For example:
#' \itemize{
#' \item predictions\{code\}.txt for mlx16
#' \item predictions\{code\}.txt and y\{code\}_residual for mlx18
#' }
#'
#' For some tricky examples the code parameter is not enough to find the files. In that case the
#' \code{file.code} parameter is used to distinguish the endpoint files.
pmx_endpoint <-
function(code,
label="",
unit="",
file.code=code,
trans =NULL) {
assert_that(is.character(code))
assert_that(is.character(file.code))
assert_that(is.character(unit))
assert_that(is.character(label))
assert_that(is_character_or_null(trans))
res <- list(
code = code,
label = label,
unit = unit,
file.code = file.code,
trans = trans
)
structure(
res,
class = "pmxEndpointClass"
)
}
#' Creates BLOQ object attributes
#'
#' @param cens \code{character} the censoring column name
#' @param limit \code{character} the limit column name (optional)
#' @param colour \code{character} the color of the geom
#' @param size \code{numeric} the size of the geom
#' @param alpha \code{numeric} the alpha of the geom
#' @param show \code{logical} if FALSE remove all censory observations
#' @param ... any other graphical parameter
#'
#' @export
#' @details
#' To define that a measurement is censored, the observation data set should include
#' a CENSORING column ( default to `CENS` ) and put 1 for lower limit or -1 for upper limit. \cr
#' Optionally, data set can contain have a limit column ( default to `LIMIT`) column to set the other limit.
pmx_bloq <-
function(
cens="CENS",
limit ="LIMIT",
colour="pink",
size=2,
alpha=0.9,
show=TRUE,
...) {
res <- list(
cens = cens,
limit = limit,
show = show,
colour = colour,
size = size,
alpha = alpha,
...
)
structure(
res,
class = "pmxBLOQClass"
)
}
#' Create a new plot of the desired type
#'
#' @param ctr \code{pmxClass} controller object
#' @param ptype plot type can be:
##' \itemize{
##' \item{"IND"}{ Individual plot type: \code{\link{individual}} }
##' \item{"DIS"}{ Distribution plot type : \code{\link{distrib}}}
##' \item{"SCATTER"}{ Residual plot type :\code{\link{residual}}}
##' }
##' @param pname plot name, if missing it will be created using function aestetics
##' @param filter optional filter which will be applied to plotting data
##' @param strat.facet \code{formula} define categorical stratification as formula
##' @param strat.color \code{character}
##' @param trans \code{list}{transformation operator}
##' @param color.scales \code{list} can be used with strat.color to set scale_color_manual
##' @param use.defaults \code{logical} if FALSE do not use defaults defined in yaml init files
#' @param ... other plot parameters to configure \code{\link{pmx_gpar}}.
#'
#' @family pmxclass
#' @return invisible ctr object
#' @export
set_plot <- function(
ctr,
ptype = c(
"IND", "DIS", "SCATTER", "ETA_PAIRS",
"ETA_COV", "PMX_QQ", "VPC", "PMX_DENS"
),
pname,
use.defaults=TRUE,
filter =NULL,
strat.color=NULL,
strat.facet=NULL,
color.scales=NULL,
trans=NULL, ...) {
assert_that(is_pmxclass(ctr))
ptype <- match.arg(ptype)
assert_that(is_string_or_null(pname))
assert_that(is_string_or_null(strat.color))
assert_that(is_string_or_formula_or_null(strat.facet))
params <- list(...)
if (use.defaults) {
defaults_yaml <-
file.path(system.file(package = "ggPMX"), "init", "defaults.yaml")
defaults <- yaml.load_file(defaults_yaml)
names(defaults) <- tolower(names(defaults))
def <- if (tolower(ptype) %in% names(defaults)) {
defaults[[tolower(ptype)]]
} else {
if (ptype == "DIS") {
if (params$type == "hist") {
defaults[["dis_hist"]]
} else {
defaults[["dis_box"]]
}
}
}
if (!is.null(def)) {
params <- l_left_join(def, params)
params$ptype <- NULL
}
}
if (ptype == "VPC") {
params$dv <- ctr$sim$dv
params$idv <- ctr$sim$idv
}
conf <-
switch(ptype,
IND = do.call(individual, params),
DIS = if (ctr$has_re) do.call(distrib, params),
SCATTER = do.call(residual, params),
ETA_PAIRS = if (ctr$has_re) do.call(eta_pairs, params),
ETA_COV = if (ctr$has_re) do.call(eta_cov, params),
PMX_QQ = do.call(pmx_qq, params),
PMX_DENS = do.call(pmx_dens, params),
VPC = do.call(pmx_vpc, params)
)
if (!is.null(substitute(filter))) {
filter <- deparse(substitute(filter))
filter <- local_filter(filter)
}
if (!is.null(conf)) {
conf[["filter"]] <- filter
conf[["trans"]] <- trans
if (!is.null(strat.color)) conf[["strat.color"]] <- strat.color
if (!is.null(strat.facet)) conf[["strat.facet"]] <- strat.facet
if (!is.null(color.scales)) conf$gp[["color.scales"]] <- color.scales
ctr[["config"]][["plots"]][[toupper(pname)]] <-
c(ptype = ptype, list(...))
ctr$add_plot(conf, pname)
}
invisible(ctr)
}
#' update or add a new abbreviation
#'
#' @param ctr \code{pmxClass} controller object
#' @param ... Options to set or add, with the form \code{name = value}.
#' @export
#' @examples
#' ctr <- theophylline()
#' ctr %>% set_abbrev("new_param"="new value")
#' ctr %>% get_abbrev("new_param")
set_abbrev <- function(ctr, ...) {
assert_that(is_pmxclass(ctr))
abbrev <- if (length(ctr$abbrev) > 0) {
l_left_join(ctr$abbrev, list(...))
} else {
unlist(list(...), recursive = FALSE)
}
class(abbrev) <- c("abbreviation", "list")
ctr$abbrev <- abbrev
}
#' S3 print abbreviation
#' @param x object of class configs
#' @param ... pass additional options (not used presently)
#' @return print abbreviation
#' @export
print.abbreviation <- function(x, ...) {
assert_that(inherits(x, "abbreviation"))
for (i in seq_len(length(x)))
cat(sprintf("%s : %s \n", names(x)[i], x[[i]]))
}
#' Get abbreviation definition by key
#'
#' @param param abbreviation term
#' @param ctr \code{pmxClass} controller
#'
#' @return \code{character} abbreviation definition
#' @export
get_abbrev <- function(ctr, param) {
keys <- ctr$abbrev
if (missing(param)) {
keys
} else {
if (!is.null(keys[[param]])) keys[[param]] else param
}
}
#' Get plot object
#'
#' @param ctr \code{pmxClass} controller object
#' @param nplot character the plot name
#' @param npage integer or integer vector, set page number in case of multi pages plot
#'
#' @family pmxclass
#' @return ggplot object
#' @export
#' @examples
#' \dontrun{
#' library(ggPMX)
#' ctr <- theophylline()
#' p1 <- ctr %>% get_plot("iwres_ipred")
#' ## get all pages or some pages
#' p2 <- ctr %>% get_plot("individual")
#' ## returns one page of individual plot
#' p2 <- ctr %>% get_plot("individual",npage=1)
#' p3 <- ctr %>% get_plot("individual",npage=c(1,3))
#' ## get distribution plot
#' pdistri <- ctr %>% get_plot("eta_hist")
#'
#' }
get_plot <- function(ctr, nplot, npage = NULL) {
if (is.numeric(npage)) {
npage <- as.integer(npage)
}
assert_that(is_pmxclass(ctr))
assert_that(is_string(nplot))
assert_that(is_integer_or_null(npage))
nplot <- tolower(nplot)
assert_that(is_valid_plot_name(nplot, plot_names(ctr)))
xx <- ctr$get_plot(nplot)
if (is.function(xx)) {
xx(npage)
} else {
xx
}
}
#' Get plot names
#'
#' @param ctr \code{pmxClass} controller object
#'
#' @family pmxclass
#' @return list of plot names
#' @export
plot_names <- function(ctr) {
assert_that(is_pmxclass(ctr))
ctr$plots()
}
#' Get plots description
#'
#' @param ctr \code{pmxClass} controller object
#'
#' @family pmxclass
#' @return data.frame of plots
#' @export
plots <- function(ctr) {
existsF <- function(...) do.call("existsFunction", list(...))
assert_that(is_pmxclass(ctr))
x <- ctr$config
function_name <- function(nn) {
fn <- sprintf("pmx_plot_%s", nn)
if (!existsF(fn, where = asNamespace("ggPMX"))) {
fn <- sprintf("pmx_plot('%s',...)", nn)
}
fn
}
if (exists("plots", x)) {
pp <- x$plots
names(pp) <- tolower(names(pp))
pp <- pp[ctr$plots()]
data.table(
plot_name = names(pp),
plot_type = sapply(pp, "[[", "ptype"),
plot_function = sapply(names(pp), function_name)
)
}
}
#' Get the plot config by name
#'
#' @param ctr the controller object
#' @param pname the plot name
#'
#' @family pmxclass
#' @return the config object
#' @export
#'
#' @examples
#' \dontrun{
#' ctr <- theophylline()
#' ctr %>% set_plot("IND", pname = "indiv1")
#' ctr %>% get_plot_config("distr1")
#' }
get_plot_config <- function(ctr, pname) {
assert_that(is_pmxclass(ctr))
ctr$get_config(pname)
}
#' Get controller data set
#'
#' @param ctr the controller object
#' @param data_set the data set name
#'
#' @family pmxclass
#' @return a data.table of the named data set if available.
#' @export
get_data <- function(ctr, data_set = c(
"estimates", "predictions",
"eta", "finegrid", "input", "sim",
"individual"
)) {
assert_that(is_pmxclass(ctr))
## data_set <- match.arg(data_set)
if (data_set == "individual") data_set <- "IND"
if (data_set == "input") {
copy(ctr[["input"]])
} else {
copy(ctr[["data"]][[data_set]])
}
}
#' Set a controller data set
#'
#' @param ctr the controller object
#' @param ... a named list parameters (see example)
#' @family pmxclass
#' @details
#' This function can be used to set an existing data set or to create a new one. The basic
#' idea is to change the built-in data set (change the factor level names, change some rows
#' values or apply any other data set operation) and use the new data set using the dname
#' parameter of pmx_plot family functions.
#' @examples
#' ctr <- theophylline()
#' dx <- ctr %>% get_data("eta")
#' dx <- dx[,EFFECT:=factor(
#' EFFECT,levels=c("ka", "V", "Cl"),
#' labels=c("Concentration","Volume","Clearance"))]
#' ## update existing data set
#' ctr %>% set_data(eta=dx)
#' ## or create a new data set
#' ctr %>% set_data(eta_long=dx)
#' @export
set_data <- function(ctr, ...) {
assert_that(is_pmxclass(ctr))
params <- as.list(match.call(expand.dots = TRUE))[-c(1, 2)]
if (!nzchar(names(params))) {
stop("each data set should be well named")
}
invisible(Map(function(n, v) ctr$data[[n]] <- eval(v), names(params), params))
}
#' Get category covariates
#'
#' @param ctr the controller object
#'
#' @family pmxclass
#' @return a charcater vector
#' @export
get_cats <- function(ctr) {
assert_that(is_pmxclass(ctr))
ctr$cats
}
#' Get extra stratification variables
#'
#' @param ctr the controller object
#'
#' @family pmxclass
#' @return a charcater vector
#' @export
get_strats <- function(ctr) {
assert_that(is_pmxclass(ctr))
ctr$strats
}
#' Get covariates variables
#'
#' @param ctr the controller object
#'
#' @family pmxclass
#' @return a charcater vector
#' @export
get_covariates <- function(ctr) {
assert_that(is_pmxclass(ctr))
res <- unique(c(ctr$cats, ctr$conts))
res[nzchar(res)]
}
#' Get continuous covariates
#'
#' @param ctr the controller object
#'
#' @family pmxclass
#' @return a charcater vector
#' @export
get_conts <- function(ctr) {
assert_that(is_pmxclass(ctr))
ctr$conts
}
#' Get controller occasional covariates
#'
#' @param ctr the controller object
#'
#' @family pmxclass
#' @return a charcater vector
#' @export
get_occ <- function(ctr) {
assert_that(is_pmxclass(ctr))
ctr$occ
}
# pmxSource (R6 Class) ------------------------------------------------------------
#' @importFrom R6 R6Class
pmxClass <- R6::R6Class(
"pmxClass",
# Private methods ------------------------------------------------------------
private = list(
.data_path = "",
.input_path = "",
.covariates = NULL,
.plots = list(),
.plots_configs = list()
),
# Public methods -------------------------------------------------------------
public = list(
data = NULL,
config = NULL,
input = NULL, input_file = NULL,
dv = NULL,
dvid = NULL, cats = NULL, conts = NULL, occ = NULL,
strats = NULL,
settings = NULL,
has_re = FALSE, re = NULL,
abbrev = list(),
endpoint = NULL,
warnings = list(),
footnote = FALSE,
save_dir = NULL,
report_queue = list(),
report_n = 0,
plot_file_name = "",
sim = NULL,
bloq = NULL,
initialize = function(data_path, input, dv, config, dvid, cats, conts, occ, strats, settings, endpoint, sim, bloq)
pmx_initialize(self, private, data_path, input, dv, config, dvid, cats, conts, occ, strats, settings, endpoint, sim, bloq),
print = function(data_path, config, ...)
pmx_print(self, private, ...),
enqueue_plot = function(pname) {
self$report_n <- self$report_n + 1
pname_file <- paste0(pname, "-", self$report_n)
self$plot_file_name <- pname_file
self$report_queue <- c(self$report_queue, pname_file)
},
dequeue_plot = function() pmx_dequeue_plot(self),
# Operations ---------------------------------------------------------------
add_plot = function(x, pname)
pmx_add_plot(self, private, x, pname),
update_plot = function(pname, strat.facet=NULL, strat.color=NULL,
filter=NULL, trans=NULL,
..., pmxgpar = NULL) {
pmx_update_plot(
self, private, pname,
strat.color = strat.color, strat.facet = strat.facet,
filter, trans, ..., pmxgpar = pmxgpar
)
},
remove_plot = function(pname, ...)
pmx_remove_plot(self, private, pname, ...),
get_config = function(pname)
pmx_get_config(self, private, pname),
set_config = function(pname, new)
pmx_set_config(self, private, pname, new),
get_plot = function(pname)
pmx_get_plot(self, private, pname),
plots = function()
pmx_plots(self, private),
post_load = function()
pmx_post_load(self, private)
)
)
pmx_initialize <- function(self, private, data_path, input, dv,
config, dvid, cats, conts, occ, strats,
settings, endpoint, sim, bloq) {
DVID <- NULL
if (missing(data_path) || missing(data_path)) {
stop(
"Expecting source path(directory ) and a config path",
call. = FALSE
)
}
if (missing(dvid)) dvid <- NULL
if (any(missing(occ) | is.null(occ) | is.na(occ))) occ <- ""
if (any(missing(cats) | is.null(cats) | is.na(cats))) cats <- ""
if (any(missing(conts) | is.null(conts) | is.na(conts))) conts <- ""
if (any(missing(strats) | is.null(strats) | is.na(strats))) strats <- ""
if (missing(settings)) settings <- NULL
if (missing(bloq)) bloq <- NULL
private$.data_path <- data_path
self$save_dir <- data_path
if (is.character(input)) {
private$.input_path <- input
}
self$config <- config
self$dv <- dv
self$dvid <- dvid
self$cats <- cats
self$conts <- conts
self$occ <- toupper(occ)
self$strats <- strats
self$settings <- settings
self$bloq <- bloq
if (!is.null(endpoint) && is.atomic(endpoint)) {
endpoint <- pmx_endpoint(code = as.character(endpoint))
}
self$endpoint <- endpoint
if (is.character(input) && file.exists(input)) {
self$input_file <- input
self$input <- read_input(input, self$dv, self$dvid, self$cats, self$conts, self$strats, self$occ, self$endpoint)
} else {
if (!inherits(input, "data.frame")) {
stop("observation data should be either a file or a data.frame")
}
self$input <- setDT(input)
}
self$data <- load_source(
sys = config$sys, private$.data_path,
self$config$data, dvid = self$dvid,
endpoint = self$endpoint,
occ = self$occ
)
##
## check random effect
if (!is.null(self$data[["eta"]])) {
re <- grep("^eta_(.*)_(mode|mean)", names(self$data[["eta"]]), value = TRUE)
if (length(re) > 0) {
self$has_re <- TRUE
self$re <- gsub("^eta_(.*)_(mode|mean)", "\\1", re)
self$data[["eta"]] <-
post_load_eta(
self$data[["eta"]],
self$input, self$sys, self$occ
)
}
}
self$post_load()
if (!is.null(sim)) {
dx <- sim[["sim"]]
inn <- copy(self$input)[, self$dv := NULL]
# check for unique keys in the observation variables
if (sum(duplicated(inn[, c("ID", "TIME"), with = FALSE])) > 0) {
warning(
paste(
" Different covariates for the same patient same time point\n",
"--> Duplicated created in the vpc data set."
),
call. = FALSE
)
}
self$data[["sim"]] <- merge(dx, inn, by = c("ID", "TIME"))
self$sim <- sim
}
## abbrev
keys_file <- file.path(
system.file(package = "ggPMX"), "init", "abbrev.yaml"
)
self$abbrev <- set_abbrev(self, yaml.load_file(keys_file))
## create all plots
for (nn in names(self$config$plots)) {
x <- self$config$plots[[nn]]
x$pname <- tolower(nn)
x$use.defaults <- FALSE
do.call(set_plot, c(ctr = self, x))
}
}
#' @importFrom knitr kable
pmx_print <- function(self, private, ...) {
cat("\npmx object:\n")
paste_col <- function(n, x) if (all(x != "")) c(n, paste(x, collapse = ","))
ctr_table <-
rbind(
c(
"working directory",
basename(dirname(private$.data_path))
),
c("Modelling input file", basename(private$.input_path)),
c("dv", self$dv),
c("dvid", self$dvid),
paste_col("cats", self %>% get_cats()),
paste_col("conts", self %>% get_conts()),
paste_col("strats", self %>% get_strats())
)
colnames(ctr_table) <- c("PARAM", "VALUE")
print(kable(ctr_table))
print(self$config, ctr = self, plot_names = names(private$.plots))
}
pmx_transform <- function(x, dx, trans, direction) {
if (is.character(trans)) {
params <- strsplit(trans, "_")[[1]]
trans <- params[1]
direction <- params[2]
}
cols_res <- function(x) {
with(x, {
switch(
direction,
x = aess$x,
y = aess$y,
xy = c(aess$x, aess$y)
)
})
}
cols_ind <- function(x) {
switch(
direction,
x = "TIME",
y = c("PRED", "IPRED", "DV"),
xy = c("TIME", "PRED", "IPRED", "DV")
)
}
cols_dis <- function(x) {
switch(
direction,
x = c("VALUE"),
y = c("VALUE"),
xy = c("VALUE")
)
}
cols_qq <- function(x) {
switch(
direction,
x = x$x
)
}
cols_eta_conts <- function(x) {
switch(
direction,
y = "VALUE"
)
}
cols <- switch(
x[["ptype"]],
SCATTER = cols_res(x),
IND = cols_ind(x),
DIS = cols_dis(x),
PMX_QQ = cols_qq(x),
ETA_COV = cols_eta_conts(x)
)
cols <- intersect(cols, names(dx))
if (length(cols) > 0) {
dx[, (cols) := lapply(.SD, match.fun(trans)), .SDcols = (cols)]
}
dx
}
pmx_remove_plot <- function(self, private, pname, ...) {
private$.plots_configs[[pname]] <- NULL
private$.plots[[pname]] <- NULL
invisible(self)
}
pmx_get_config <- function(self, private, pname) {
pname <- tolower(pname)
private$.plots_configs[[pname]]
}
pmx_set_config <- function(self, private, pname, new) {
pname <- tolower(pname)
private$.plots_configs[[pname]] <- new
}
pmx_dequeue_plot <- function(self) {
## assert_that(is_none_empty_queue(self))
if (length(self$report_queue)) {
first <- self$report_queue[[1]]
self$report_queue <- self$report_queue[-1]
first
} else {
message("Warning: Chunk has plots that were not registered within ggPMX. Footnotes may be wrong.")
}
}
pmx_fig_process_init <- function(self) {
report_queue <- list()
report_n <- 0
}
pmx_fig_process_wrapup <- function(self) {
assert_that(is_empty_queue(self))
}
pmx_get_plot <- function(self, private, pname) {
pname <- tolower(pname)
private$.plots[[pname]]
}
pmx_plots <- function(self, private) {
names(private$.plots)
}
pmx_post_load <- function(self, private) {
res <- post_load(
self$data, self$input, self$config$sys,
self$config$plots,
occ = get_occ(self)
)
self$data <- res$data
self$warnings <- res$warnings
}
#' Print pmxClass object
#'
#' @param x pmxClass object
#' @param ... additinal arguments to pass to print
#'
#' @family pmxclass functions
#' @return print object to screen
#' @export
print.pmxClass <- function(x, ...) {
x$print(...)
}
#' Creates a deep copy of the controller
#'
#' @param ctr \code{pmxClass} object
#' @param keep_globals \code{logical} if TRUE we keep the global parameters changed by pmx_settings
#' @param ... extra parameters passed to \code{pmx_settings}
#'
#' @return an object of \code{pmxClass}
#' @export
#' @details
#'
#' The controller is an `R6` object, it behaves like a reference object.
#' Some functions ( methods) can have a side effect on the controller and modify it internally.
#' Technically speaking we talk about chaining not piping here. However ,
#' using \code{pmx_copy} user can work on a copy of the controller.
#'
#' By defaul the copy don't keep global parameters setted using pmx_settings.
#'
#' @examples
#' ctr <- theophylline()
#' cctr <- ctr %>% pmx_copy
#' ## Any change in the ctr has no side effect in the ctr and vice versa
pmx_copy <- function(ctr, keep_globals=FALSE, ...) {
assert_that(is_pmxclass(ctr))
cctr <- ctr$clone()
params <- as.list(match.call(expand.dots = TRUE))[-1]
params <- lang_to_expr(params)
## params <- list(...)
if (!keep_globals) {
nn <- rev(names(formals(pmx_settings)))[-1]
eff_nn <- intersect(nn, names(params))
if (length(eff_nn) > 0) {
cctr$settings <- do.call(pmx_settings, params[eff_nn])
}
}
cctr
}
|
pztp <-
function(y,lambda){
dummy<-exp(-lambda)
#cat(paste("length of dummy: ",length(dummy),"\n"))
#cat(paste("length of ppois: ",length(ppois(y,lambda)),"\n"))
#cat(paste("---\n"))
out<-(ppois(y,lambda)-dummy)/(1-dummy)
out[y<1]=0
return(out)
}
| /R/pztp.R | no_license | cran/CopulaRegression | R | false | false | 269 | r | pztp <-
function(y,lambda){
dummy<-exp(-lambda)
#cat(paste("length of dummy: ",length(dummy),"\n"))
#cat(paste("length of ppois: ",length(ppois(y,lambda)),"\n"))
#cat(paste("---\n"))
out<-(ppois(y,lambda)-dummy)/(1-dummy)
out[y<1]=0
return(out)
}
|
testlist <- list(x = numeric(0), y = 0)
result <- do.call(netrankr:::checkPairs,testlist)
str(result) | /netrankr/inst/testfiles/checkPairs/libFuzzer_checkPairs/checkPairs_valgrind_files/1612882890-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 101 | r | testlist <- list(x = numeric(0), y = 0)
result <- do.call(netrankr:::checkPairs,testlist)
str(result) |
# Remove last element of array
vars.group.bydf <- c('23','dfa', 'wer')
vars.group.bydf[-length(vars.group.bydf)]
| /support/array/general.R | permissive | lnsongxf/R4Econ | R | false | false | 113 | r | # Remove last element of array
vars.group.bydf <- c('23','dfa', 'wer')
vars.group.bydf[-length(vars.group.bydf)]
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{package}
\name{testthat}
\alias{testthat}
\alias{testthat-package}
\title{R package to make testing fun!}
\description{
Try the example below. Have a look at the references and learn more
from function documentation such as \code{\link{expect_that}}.
}
\details{
Software testing is important, but, in part because
it is frustrating and boring, many of us avoid it.
testthat is a new testing framework for R that is easy learn and use,
and integrates with your existing workflow.
}
\examples{
library(testthat)
a <- 9
expect_that(a, is_less_than(10))
expect_less_than(a, 10)
}
\references{
Wickham, H (2011). testthat: Get Started with Testing.
\strong{The R Journal} \emph{3/1} 5-10.
\url{http://journal.r-project.org/archive/2011-1/RJournal_2011-1_Wickham.pdf}
\url{https://github.com/hadley/testthat}
\url{http://adv-r.had.co.nz/Testing.html}
}
| /testthat/man/testthat.Rd | no_license | radfordneal/R-package-mods | R | false | false | 914 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\docType{package}
\name{testthat}
\alias{testthat}
\alias{testthat-package}
\title{R package to make testing fun!}
\description{
Try the example below. Have a look at the references and learn more
from function documentation such as \code{\link{expect_that}}.
}
\details{
Software testing is important, but, in part because
it is frustrating and boring, many of us avoid it.
testthat is a new testing framework for R that is easy learn and use,
and integrates with your existing workflow.
}
\examples{
library(testthat)
a <- 9
expect_that(a, is_less_than(10))
expect_less_than(a, 10)
}
\references{
Wickham, H (2011). testthat: Get Started with Testing.
\strong{The R Journal} \emph{3/1} 5-10.
\url{http://journal.r-project.org/archive/2011-1/RJournal_2011-1_Wickham.pdf}
\url{https://github.com/hadley/testthat}
\url{http://adv-r.had.co.nz/Testing.html}
}
|
/Scooping Rules.R | no_license | tamaramdg/Software_Actuarial_lll | R | false | false | 431 | r | ||
data_full <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data1 <- subset(data_full, Date %in% c("1/2/2007","2/2/2007"))
data1$Date <- as.Date(data1$Date, format="%d/%m/%Y")
datetime <- paste(as.Date(data1$Date), data1$Time)
data1$Datetime <- as.POSIXct(datetime)
with(data1, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
})
dev.copy(png, file="plot2.png", height=480, width=480)
| /plot2.R | no_license | florin-dot/ExData_Plotting1 | R | false | false | 562 | r | data_full <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data1 <- subset(data_full, Date %in% c("1/2/2007","2/2/2007"))
data1$Date <- as.Date(data1$Date, format="%d/%m/%Y")
datetime <- paste(as.Date(data1$Date), data1$Time)
data1$Datetime <- as.POSIXct(datetime)
with(data1, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
})
dev.copy(png, file="plot2.png", height=480, width=480)
|
\name{nb_exc}
\alias{nb_exc}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Values higher or equal to Zobs
%% ~~function to do ... ~~
}
\description{ Computes how many values of the data set z are higher or equal to the test statistic x0.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
nb_exc(x0,z)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x0}{ the test statistic - real number.
%% ~~Describe \code{x} here~~
}
\item{z}{ the data set - list of real numbers.
%% ~~Describe \code{x} here~~
}
}
\value{ Returns an integer M which is the number of values of z higher or equal to x0.
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\author{ Marion
%% ~~who you are~~
}
\examples{
nb_exc(5,1:10)
## The function is currently defined as
function (x0,z)
{
M<-length(z[z>=x0])
return(M)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /nb_exc.Rd | no_license | mariondechallens/Second-Internship | R | false | false | 1,241 | rd | \name{nb_exc}
\alias{nb_exc}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Values higher or equal to Zobs
%% ~~function to do ... ~~
}
\description{ Computes how many values of the data set z are higher or equal to the test statistic x0.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
nb_exc(x0,z)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x0}{ the test statistic - real number.
%% ~~Describe \code{x} here~~
}
\item{z}{ the data set - list of real numbers.
%% ~~Describe \code{x} here~~
}
}
\value{ Returns an integer M which is the number of values of z higher or equal to x0.
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\author{ Marion
%% ~~who you are~~
}
\examples{
nb_exc(5,1:10)
## The function is currently defined as
function (x0,z)
{
M<-length(z[z>=x0])
return(M)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
library(tidyverse)
df <- tibble(X1= c(6,5,8,4,7,3,1,2,2,5,3,2), X2= c(7,9,6,9,9,3,6,3,3,1,1,3),X3=c("A","A","A","A","A","B","B","B","C","C","C","C") )
df<-read_table2(file="data/T11-7.txt", col_names = F)
df
group_column <- "X3"
group_column <- "X6"
groupnames<-df %>% select(group_column) %>% unique()
factors <- df %>% select(-group_column) %>% colnames()
n_factors <- length(factors)
X <- df %>% select(-group_column) %>% as.matrix()
X[,1]
groupnames[[1]][1]
g<-nrow(groupnames)
df %>% filter(group_column == groupnames[[1]][1])
df %>% select(group_column) %>% filter( group_column =="Wilhelm")
df<-as.data.frame(df)
#means per group
df[[group_column]]
#overall means regardless of group
df %>% select(-group_column) %>% summarise_all(.funs = mean)
#each col represents a group
group_means <- df %>% group_by_at(group_column) %>% summarise_all(.funs = mean) %>% select(-group_column) %>% as.matrix() %>% t()
#overall means regardless of group
overall_sample_mean <-df %>% select(-group_column) %>% summarise_all(.funs = mean) %>% as.matrix() %>% t()
n_j<-df %>% group_by_at(group_column) %>% tally()
n_j
colnames(n_j) <- c("group","n")
n_j$n
max(n_j$n) #max group number
get_n_from_g <- function(g){
#groupnames[[1]][g]
return( n_j %>% filter(group == groupnames[[1]][g] ) %>% select(n) %>% as.numeric() )
}
get_n_from_g(1)
get_indices_per_group <- function(df){
indices_list <- list()
for (group in 1:g){
#convert to character since each list element can't have different lengths
indices_list[group] <- which(df[group_column][[1]] %in% groupnames[[1]][group]) %>% as.character() %>% paste(collapse = " ")
}
return(indices_list)
}
group_ind<-get_indices_per_group(df)
convert_string_to_numeric <- function(group_ind,g){
test<-group_ind[[g]] %>% strsplit(split = " ") # group_ind[[g]] gets the indices in group g
test<-lapply(test,as.numeric)[[1]] # [[1]] gets the vector only
return(test)
}
tmp_matrix <- matrix(NA, nrow=g, ncol=max(n_j$n) )
fill_observation_matrix <- function(tmp_matrix,f){
for (group in 1:g){
#print(convert_string_to_numeric(group_ind, g=group))
values_at_g <- X[convert_string_to_numeric(group_ind, g=group), f]
tmp_matrix[group, 1:get_n_from_g(group) ] <- values_at_g
}
return(tmp_matrix)
}
obs_matrix<-fill_observation_matrix(tmp_matrix,1)
obs
#fill mean matrix
mean_value <- overall_sample_mean[1,][[1]] #replace overall_sample_mean[1,] with overall_sample_mean[f,]
overall_sample_mean_matrix <- replace(obs_matrix, obs_matrix != is.na(obs_matrix), mean_value) # replace obs_matrix with its overall mean
fill_group_mean_matrix <- function(tmp_matrix,f){
for (group in 1:g){
tmp_matrix[group, 1:get_n_from_g(group) ] <- group_means[f,group ]
}
return(tmp_matrix)
}
group_mean_matrix <- fill_group_mean_matrix(tmp_matrix,1)
estimated_treatment_effect_matrix <- group_mean_matrix - overall_sample_mean_matrix
residual_matrix <- obs_matrix-group_mean_matrix
obs_matrix == overall_sample_mean_matrix + estimated_treatment_effect_matrix + residual_matrix #TRUE
get_sum_of_squares <- function(mat){
return(sum( mat[!is.na(mat)]^2 ))
}
SS_obs<- get_sum_of_squares(obs_matrix)
SS_mean<- get_sum_of_squares(overall_sample_mean_matrix)
SS_tr<-get_sum_of_squares(estimated_treatment_effect_matrix)
SS_res<-get_sum_of_squares(residual_matrix)
SS_corrected <- SS_obs-SS_mean
get_SS_decomposition_for_factor_f <- function(f){
tmp_matrix <- matrix(NA, nrow=g, ncol=max(n_j$n))
obs_matrix<-fill_observation_matrix(tmp_matrix,f)
mean_value <- overall_sample_mean[f,][[1]]
overall_sample_mean_matrix <- replace(obs_matrix, obs_matrix != is.na(obs_matrix), mean_value) # replaces obs_matrix with its overall mean
group_mean_matrix <- fill_group_mean_matrix(tmp_matrix,f)
estimated_treatment_effect_matrix <- group_mean_matrix - overall_sample_mean_matrix
residual_matrix <- obs_matrix-group_mean_matrix
SS_obs<- get_sum_of_squares(obs_matrix)
SS_mean<- get_sum_of_squares(overall_sample_mean_matrix)
SS_tr<-get_sum_of_squares(estimated_treatment_effect_matrix)
SS_res<-get_sum_of_squares(residual_matrix)
SS_corrected <- SS_obs-SS_mean
return(
list(
SS_obs = SS_obs,
SS_mean = SS_mean,
SS_tr = SS_tr,
SS_res = SS_res,
SS_corrected = SS_corrected,
obs_matrix=obs_matrix,
overall_sample_mean_matrix = overall_sample_mean_matrix,
group_mean_matrix = group_mean_matrix,
estimated_treatment_effect_matrix = estimated_treatment_effect_matrix,
residual_matrix = residual_matrix
)
)
}
test<-get_SS_decomposition_for_factor_f(2)
test
###################
# TREATMENT MATRIX
B <- matrix(0,nrow = n_factors, ncol = n_factors)
# RESIDUAL MATRIX
W <- matrix(0,nrow = n_factors, ncol = n_factors)
# TOTAL MATRIX
T <- matrix(0,nrow = n_factors, ncol = n_factors)
results_list <- list()
for (f in 1:n_factors){
# step 1: calculate SS for each variable
SS <- get_SS_decomposition_for_factor_f(f)
results_list[[f]] <- SS
B[f,f] <- SS$SS_tr
W[f,f] <- SS$SS_res
T[f,f] <- SS$SS_corrected
}
B
W
T
p<-as.vector(results_list[[1]]$obs_matrix)*as.vector(results_list[[2]]$obs_matrix)
sum(as.vector(results_list[[1]]$obs_matrix)*as.vector(results_list[[2]]$obs_matrix) ,na.rm = T)
sum(p,na.rm = T)
results_list[[1]]$estimated_treatment_effect_matrix[,1]
get_treatment_crossproduct <- function(i,j){
tot <- 0
for (group in 1:g){
group_mean_one <- results_list[[i]]$estimated_treatment_effect_matrix[group,1]
group_mean_two <- results_list[[j]]$estimated_treatment_effect_matrix[group,1]
n<-get_n_from_g(group)
tot = tot + n*group_mean_one*group_mean_two
cat(group ,"\n")
}
return(tot)
}
get_n_from_g(1)*results_list[[1]]$estimated_treatment_effect_matrix[1,1]*results_list[[2]]$estimated_treatment_effect_matrix[1,1] +
get_n_from_g(2)*results_list[[1]]$estimated_treatment_effect_matrix[2,1]*results_list[[2]]$estimated_treatment_effect_matrix[2,1] +
get_n_from_g(3)*results_list[[1]]$estimated_treatment_effect_matrix[3,1]*results_list[[2]]$estimated_treatment_effect_matrix[3,1]
get_treatment_crossproduct(1,2)
# CROSS PRODUCTS
for (f in 1:n_factors){
for(f_col in 1:n_factors){
#if (f!=f_col){
total <- sum(as.vector(results_list[[f]]$obs_matrix)*as.vector(results_list[[f_col]]$obs_matrix) ,na.rm = T)
mean <- sum(as.vector(results_list[[f]]$overall_sample_mean_matrix)*as.vector(results_list[[f_col]]$overall_sample_mean_matrix) ,na.rm = T)
treatment <- get_treatment_crossproduct(f,f_col)
residual<- sum(as.vector(results_list[[f]]$residual_matrix)*as.vector(results_list[[f_col]]$residual_matrix) ,na.rm = T)
obs_corrected <- total-mean
B[f,f_col] <- treatment
W[f,f_col] <- residual
T[f,f_col] <- obs_corrected
#}
}
}
W
T
B
det(W)/det(B+W)
nl <- c(271,138,107)
n<- sum(nl)
x1 <- matrix(c(2.066,0.480,0.082,0.360), nrow=1, ncol=4)
x2 <- matrix(c(2.167,0.596,0.124,0.418), nrow=1, ncol=4)
x3 <- matrix(c(2.273,0.521,0.125,0.383), nrow=1, ncol=4)
xbar <- round((nl[1]*x1 + nl[2]*x2 + nl[3]*x3 )/ n ,digits = 3)
xbar
x1
x2
x3
?manova
manova()
#X3 = group
m1<-manova(cbind(X1, X2) ~ X3, data = df)
summary(m1,test = "Wilks")
aov(data = df, formula = )
| /MANOVA_test.R | no_license | emilwest/multivariate-theory | R | false | false | 7,586 | r | library(tidyverse)
df <- tibble(X1= c(6,5,8,4,7,3,1,2,2,5,3,2), X2= c(7,9,6,9,9,3,6,3,3,1,1,3),X3=c("A","A","A","A","A","B","B","B","C","C","C","C") )
df<-read_table2(file="data/T11-7.txt", col_names = F)
df
group_column <- "X3"
group_column <- "X6"
groupnames<-df %>% select(group_column) %>% unique()
factors <- df %>% select(-group_column) %>% colnames()
n_factors <- length(factors)
X <- df %>% select(-group_column) %>% as.matrix()
X[,1]
groupnames[[1]][1]
g<-nrow(groupnames)
df %>% filter(group_column == groupnames[[1]][1])
df %>% select(group_column) %>% filter( group_column =="Wilhelm")
df<-as.data.frame(df)
#means per group
df[[group_column]]
#overall means regardless of group
df %>% select(-group_column) %>% summarise_all(.funs = mean)
#each col represents a group
group_means <- df %>% group_by_at(group_column) %>% summarise_all(.funs = mean) %>% select(-group_column) %>% as.matrix() %>% t()
#overall means regardless of group
overall_sample_mean <-df %>% select(-group_column) %>% summarise_all(.funs = mean) %>% as.matrix() %>% t()
n_j<-df %>% group_by_at(group_column) %>% tally()
n_j
colnames(n_j) <- c("group","n")
n_j$n
max(n_j$n) #max group number
get_n_from_g <- function(g){
#groupnames[[1]][g]
return( n_j %>% filter(group == groupnames[[1]][g] ) %>% select(n) %>% as.numeric() )
}
get_n_from_g(1)
get_indices_per_group <- function(df){
indices_list <- list()
for (group in 1:g){
#convert to character since each list element can't have different lengths
indices_list[group] <- which(df[group_column][[1]] %in% groupnames[[1]][group]) %>% as.character() %>% paste(collapse = " ")
}
return(indices_list)
}
group_ind<-get_indices_per_group(df)
convert_string_to_numeric <- function(group_ind,g){
test<-group_ind[[g]] %>% strsplit(split = " ") # group_ind[[g]] gets the indices in group g
test<-lapply(test,as.numeric)[[1]] # [[1]] gets the vector only
return(test)
}
tmp_matrix <- matrix(NA, nrow=g, ncol=max(n_j$n) )
fill_observation_matrix <- function(tmp_matrix,f){
for (group in 1:g){
#print(convert_string_to_numeric(group_ind, g=group))
values_at_g <- X[convert_string_to_numeric(group_ind, g=group), f]
tmp_matrix[group, 1:get_n_from_g(group) ] <- values_at_g
}
return(tmp_matrix)
}
obs_matrix<-fill_observation_matrix(tmp_matrix,1)
obs
#fill mean matrix
mean_value <- overall_sample_mean[1,][[1]] #replace overall_sample_mean[1,] with overall_sample_mean[f,]
overall_sample_mean_matrix <- replace(obs_matrix, obs_matrix != is.na(obs_matrix), mean_value) # replace obs_matrix with its overall mean
fill_group_mean_matrix <- function(tmp_matrix,f){
for (group in 1:g){
tmp_matrix[group, 1:get_n_from_g(group) ] <- group_means[f,group ]
}
return(tmp_matrix)
}
group_mean_matrix <- fill_group_mean_matrix(tmp_matrix,1)
estimated_treatment_effect_matrix <- group_mean_matrix - overall_sample_mean_matrix
residual_matrix <- obs_matrix-group_mean_matrix
obs_matrix == overall_sample_mean_matrix + estimated_treatment_effect_matrix + residual_matrix #TRUE
get_sum_of_squares <- function(mat){
return(sum( mat[!is.na(mat)]^2 ))
}
SS_obs<- get_sum_of_squares(obs_matrix)
SS_mean<- get_sum_of_squares(overall_sample_mean_matrix)
SS_tr<-get_sum_of_squares(estimated_treatment_effect_matrix)
SS_res<-get_sum_of_squares(residual_matrix)
SS_corrected <- SS_obs-SS_mean
get_SS_decomposition_for_factor_f <- function(f){
tmp_matrix <- matrix(NA, nrow=g, ncol=max(n_j$n))
obs_matrix<-fill_observation_matrix(tmp_matrix,f)
mean_value <- overall_sample_mean[f,][[1]]
overall_sample_mean_matrix <- replace(obs_matrix, obs_matrix != is.na(obs_matrix), mean_value) # replaces obs_matrix with its overall mean
group_mean_matrix <- fill_group_mean_matrix(tmp_matrix,f)
estimated_treatment_effect_matrix <- group_mean_matrix - overall_sample_mean_matrix
residual_matrix <- obs_matrix-group_mean_matrix
SS_obs<- get_sum_of_squares(obs_matrix)
SS_mean<- get_sum_of_squares(overall_sample_mean_matrix)
SS_tr<-get_sum_of_squares(estimated_treatment_effect_matrix)
SS_res<-get_sum_of_squares(residual_matrix)
SS_corrected <- SS_obs-SS_mean
return(
list(
SS_obs = SS_obs,
SS_mean = SS_mean,
SS_tr = SS_tr,
SS_res = SS_res,
SS_corrected = SS_corrected,
obs_matrix=obs_matrix,
overall_sample_mean_matrix = overall_sample_mean_matrix,
group_mean_matrix = group_mean_matrix,
estimated_treatment_effect_matrix = estimated_treatment_effect_matrix,
residual_matrix = residual_matrix
)
)
}
test<-get_SS_decomposition_for_factor_f(2)
test
###################
# TREATMENT MATRIX
B <- matrix(0,nrow = n_factors, ncol = n_factors)
# RESIDUAL MATRIX
W <- matrix(0,nrow = n_factors, ncol = n_factors)
# TOTAL MATRIX
T <- matrix(0,nrow = n_factors, ncol = n_factors)
results_list <- list()
for (f in 1:n_factors){
# step 1: calculate SS for each variable
SS <- get_SS_decomposition_for_factor_f(f)
results_list[[f]] <- SS
B[f,f] <- SS$SS_tr
W[f,f] <- SS$SS_res
T[f,f] <- SS$SS_corrected
}
B
W
T
p<-as.vector(results_list[[1]]$obs_matrix)*as.vector(results_list[[2]]$obs_matrix)
sum(as.vector(results_list[[1]]$obs_matrix)*as.vector(results_list[[2]]$obs_matrix) ,na.rm = T)
sum(p,na.rm = T)
results_list[[1]]$estimated_treatment_effect_matrix[,1]
get_treatment_crossproduct <- function(i,j){
tot <- 0
for (group in 1:g){
group_mean_one <- results_list[[i]]$estimated_treatment_effect_matrix[group,1]
group_mean_two <- results_list[[j]]$estimated_treatment_effect_matrix[group,1]
n<-get_n_from_g(group)
tot = tot + n*group_mean_one*group_mean_two
cat(group ,"\n")
}
return(tot)
}
get_n_from_g(1)*results_list[[1]]$estimated_treatment_effect_matrix[1,1]*results_list[[2]]$estimated_treatment_effect_matrix[1,1] +
get_n_from_g(2)*results_list[[1]]$estimated_treatment_effect_matrix[2,1]*results_list[[2]]$estimated_treatment_effect_matrix[2,1] +
get_n_from_g(3)*results_list[[1]]$estimated_treatment_effect_matrix[3,1]*results_list[[2]]$estimated_treatment_effect_matrix[3,1]
get_treatment_crossproduct(1,2)
# CROSS PRODUCTS
for (f in 1:n_factors){
for(f_col in 1:n_factors){
#if (f!=f_col){
total <- sum(as.vector(results_list[[f]]$obs_matrix)*as.vector(results_list[[f_col]]$obs_matrix) ,na.rm = T)
mean <- sum(as.vector(results_list[[f]]$overall_sample_mean_matrix)*as.vector(results_list[[f_col]]$overall_sample_mean_matrix) ,na.rm = T)
treatment <- get_treatment_crossproduct(f,f_col)
residual<- sum(as.vector(results_list[[f]]$residual_matrix)*as.vector(results_list[[f_col]]$residual_matrix) ,na.rm = T)
obs_corrected <- total-mean
B[f,f_col] <- treatment
W[f,f_col] <- residual
T[f,f_col] <- obs_corrected
#}
}
}
W
T
B
det(W)/det(B+W)
nl <- c(271,138,107)
n<- sum(nl)
x1 <- matrix(c(2.066,0.480,0.082,0.360), nrow=1, ncol=4)
x2 <- matrix(c(2.167,0.596,0.124,0.418), nrow=1, ncol=4)
x3 <- matrix(c(2.273,0.521,0.125,0.383), nrow=1, ncol=4)
xbar <- round((nl[1]*x1 + nl[2]*x2 + nl[3]*x3 )/ n ,digits = 3)
xbar
x1
x2
x3
?manova
manova()
#X3 = group
m1<-manova(cbind(X1, X2) ~ X3, data = df)
summary(m1,test = "Wilks")
aov(data = df, formula = )
|
library("knitr")
library("rgl")
#knit("4-Methylanilin.Rmd")
#markdownToHTML('4-Methylanilin.md', '4-Methylanilin.html', options=c("use_xhml"))
#system("pandoc -s 4-Methylanilin.html -o 4-Methylanilin.pdf")
knit2html('4-Methylanilin.Rmd')
| /FDA_Pesticide_Glossary/4-Methylanilin.R | permissive | andrewdefries/andrewdefries.github.io | R | false | false | 240 | r | library("knitr")
library("rgl")
#knit("4-Methylanilin.Rmd")
#markdownToHTML('4-Methylanilin.md', '4-Methylanilin.html', options=c("use_xhml"))
#system("pandoc -s 4-Methylanilin.html -o 4-Methylanilin.pdf")
knit2html('4-Methylanilin.Rmd')
|
#讀取檔案NBA2016-17球季全聯盟30隊各項攻擊均數
NBA1617Analysis <- read.csv("NBA1617TeamPerGameStatsAll.csv",header=T,sep = ",")
attach(NBA1617Analysis)
#籃球是以單位時間內得分多寡為勝負的比賽,先利用球隊場均得分進行敘述統計演練
NBATEAMPTS <- NBA1617Analysis$PTS
#設定顯示2X2圖形框
par(mfrow=c(2,2))
#顯示各隊均分圖形
hist(NBATEAMPTS)
dotchart(NBATEAMPTS)
boxplot(NBATEAMPTS)
qqnorm(NBATEAMPTS)
#畫出均得分點狀圖
par(mfrow=c(1,1))
library(epicalc)
dotplot(NBATEAMPTS,pch = 16,axes=F)
#敘述統計
#依據聯盟分組計算分組得分
xtabs(PTS~Area,data =NBA1617Analysis)
NBA1617Analysis.xtabs<-xtabs(PTS~Conference+Area,data=NBA1617Analysis)
#橫列邊際總和
margin.table(NBA1617Analysis.xtabs,margin = 1)
#直行邊際總和
margin.table(NBA1617Analysis.xtabs,margin = 2)
#橫列總和
rowSums(xtabs(PTS~Conference+Area,data=NBA1617Analysis))
#各分組依照分區的平均分數
colMeans(xtabs(PTS~Conference+Area,data=NBA1617Analysis))
#直行總和
colSums(xtabs(PTS~Conference+Area,data=NBA1617Analysis))
#計算各分區各組隊伍得分平均數
aggregate(NBATEAMPTS,by=list(NBA1617Analysis$Conference,NBA1617Analysis$Area),FUN=mean)
#計算分區各組得分比例數據
round(prop.table(xtabs(PTS~Conference+Area,data=NBA1617Analysis)),2)
#各橫列的邊際比例
round(prop.table(xtabs(PTS~Conference+Area,data=NBA1617Analysis),margin = 1),2)
#各直行的邊際比例
round(prop.table(xtabs(PTS~Conference+Area,data=NBA1617Analysis),margin =2),2)
#找出全聯盟均分統計數據
#x=NBATEAMPTS
my.desc=function(x)
{
n=length(x)
x.desc <- c(n,summary(x),var(x),sum(x),sqrt(var(x)),IQR(x),mad(x))
names(x.desc) <- c("樣本數","最小值","Q1","中位數","平均數","Q3","最大值","變異數","總和","標準差","IQR","MAD")
return(x.desc)
}
my.desc(NBATEAMPTS)
#截尾平均數(90%)
mean(NBATEAMPTS,trim = 0.1)
#截尾平均數(80%)
mean(NBATEAMPTS,trim = 0.2)
#計算眾數
t=table(NBATEAMPTS)
t[which(table(NBATEAMPTS)==max(table(NBATEAMPTS)))]
#計算偏態係數與峰態係數
library(TSA)
skewness(NBATEAMPTS)
kurtosis(NBATEAMPTS)
#畫出枝葉圖
stem(NBATEAMPTS)
#常態性檢定 P值0.2283 > 0.05
shapiro.test(NBATEAMPTS)
#判定機率分配---->logistic
library(qAnalyst)
rapidFitFun(NBATEAMPTS)
#查看東區球隊的場均得分屬於何種機率分配--->logistic
rapidFitFun(NBATEAMPTS[NBA1617Analysis$Conference=="Eastern"])
#查看西區球隊的場均得分屬於何種機率分配--->logistic
rapidFitFun(NBATEAMPTS[NBA1617Analysis$Conference=="Western"])
#Logistic線性回歸
#晉級Promotion與場均分PTS
#聯盟東區球隊
Eastern <- read.csv("EasternTeamPerGameStats.csv",header=T,sep = ",")
attach(Eastern)
ETEAMPTS <- Eastern$PTS
Emodellog<- glm(formula = Promotion~ETEAMPTS,data = Eastern,family = binomial(link = "logit"))
summary(Emodellog)
#以場均得分最低預測在聯盟例賽獲勝機率為0.1430553
predict.glm(Emodellog,type="response",newdata = data.frame(ETEAMPTS=101.1))
#以場均得分最高預測在聯盟例賽獲勝機率為0.9673395
predict.glm(Emodellog,type="response",newdata = data.frame(ETEAMPTS=110.3))
Emodellog$coefficients
#場均得分多的0.5628832 可以增加1.755727勝率
exp(0.5628832)
#設定分區及分組為類別變數
Eareaf <- factor(Eastern$Area)
EPromotionf <- factor(Eastern$Promotion)
#驗證分區及分組的變數設定正確
is.factor(Eareaf)
is.factor(EPromotionf)
#進行Logistic迴歸分析
Emodel2 <- glm(formula = Eastern$Promotion~Eareaf+EPromotionf,data=Eastern,family = binomial(link = "logit"))
summary(Emodel2)
epiDisplay::logistic.display(Emodel2)
#分組球隊在分區晉級可能性預測
classtab <- table(Eastern$Area,Eastern$Promotion)
prop.table(classtab,1)
#聯盟西區球隊
Western <- read.csv("WesternTeamPerGameStats.csv",header=T,sep = ",")
attach(Western)
WTEAMPTS <- Western$PTS
Wmodellog<- glm(formula = Promotion~WTEAMPTS,data = Western,family = binomial(link = "logit"))
summary(Wmodellog)
#以場均得分最低預測在聯盟例賽獲勝機率為0.301573
predict.glm(Wmodellog,type="response",newdata = data.frame(WTEAMPTS=97.9))
#以場均得分最高預測在聯盟例賽獲勝機率為0.7797157
predict.glm(Wmodellog,type="response",newdata = data.frame(WTEAMPTS=115.9))
Wmodellog$coefficients
#場均得分多的0.1168794 可以增加1.123984勝率
exp(0.1168794)
#設定分區及分組為類別變數
Wareaf <- factor(Western$Area)
WPromotionf <- factor(Western$Promotion)
#驗證分區及分組的變數設定正確
is.factor(Wareaf)
is.factor(WPromotionf)
#進行Logistic迴歸分析
Wmodel2 <- glm(formula = Western$Promotion~Wareaf+WPromotionf,data=Western,family = binomial(link = "logit"))
summary(model2)
epiDisplay::logistic.display(Wmodel2)
#分組球隊在分區晉級可能性預測
classtab <- table(Western$Area,Western$Promotion)
prop.table(classtab,1)
| /FinalReport/NBA1060502.R | no_license | KuChanTung/R | R | false | false | 4,906 | r |
#讀取檔案NBA2016-17球季全聯盟30隊各項攻擊均數
NBA1617Analysis <- read.csv("NBA1617TeamPerGameStatsAll.csv",header=T,sep = ",")
attach(NBA1617Analysis)
#籃球是以單位時間內得分多寡為勝負的比賽,先利用球隊場均得分進行敘述統計演練
NBATEAMPTS <- NBA1617Analysis$PTS
#設定顯示2X2圖形框
par(mfrow=c(2,2))
#顯示各隊均分圖形
hist(NBATEAMPTS)
dotchart(NBATEAMPTS)
boxplot(NBATEAMPTS)
qqnorm(NBATEAMPTS)
#畫出均得分點狀圖
par(mfrow=c(1,1))
library(epicalc)
dotplot(NBATEAMPTS,pch = 16,axes=F)
#敘述統計
#依據聯盟分組計算分組得分
xtabs(PTS~Area,data =NBA1617Analysis)
NBA1617Analysis.xtabs<-xtabs(PTS~Conference+Area,data=NBA1617Analysis)
#橫列邊際總和
margin.table(NBA1617Analysis.xtabs,margin = 1)
#直行邊際總和
margin.table(NBA1617Analysis.xtabs,margin = 2)
#橫列總和
rowSums(xtabs(PTS~Conference+Area,data=NBA1617Analysis))
#各分組依照分區的平均分數
colMeans(xtabs(PTS~Conference+Area,data=NBA1617Analysis))
#直行總和
colSums(xtabs(PTS~Conference+Area,data=NBA1617Analysis))
#計算各分區各組隊伍得分平均數
aggregate(NBATEAMPTS,by=list(NBA1617Analysis$Conference,NBA1617Analysis$Area),FUN=mean)
#計算分區各組得分比例數據
round(prop.table(xtabs(PTS~Conference+Area,data=NBA1617Analysis)),2)
#各橫列的邊際比例
round(prop.table(xtabs(PTS~Conference+Area,data=NBA1617Analysis),margin = 1),2)
#各直行的邊際比例
round(prop.table(xtabs(PTS~Conference+Area,data=NBA1617Analysis),margin =2),2)
#找出全聯盟均分統計數據
#x=NBATEAMPTS
my.desc=function(x)
{
n=length(x)
x.desc <- c(n,summary(x),var(x),sum(x),sqrt(var(x)),IQR(x),mad(x))
names(x.desc) <- c("樣本數","最小值","Q1","中位數","平均數","Q3","最大值","變異數","總和","標準差","IQR","MAD")
return(x.desc)
}
my.desc(NBATEAMPTS)
#截尾平均數(90%)
mean(NBATEAMPTS,trim = 0.1)
#截尾平均數(80%)
mean(NBATEAMPTS,trim = 0.2)
#計算眾數
t=table(NBATEAMPTS)
t[which(table(NBATEAMPTS)==max(table(NBATEAMPTS)))]
#計算偏態係數與峰態係數
library(TSA)
skewness(NBATEAMPTS)
kurtosis(NBATEAMPTS)
#畫出枝葉圖
stem(NBATEAMPTS)
#常態性檢定 P值0.2283 > 0.05
shapiro.test(NBATEAMPTS)
#判定機率分配---->logistic
library(qAnalyst)
rapidFitFun(NBATEAMPTS)
#查看東區球隊的場均得分屬於何種機率分配--->logistic
rapidFitFun(NBATEAMPTS[NBA1617Analysis$Conference=="Eastern"])
#查看西區球隊的場均得分屬於何種機率分配--->logistic
rapidFitFun(NBATEAMPTS[NBA1617Analysis$Conference=="Western"])
#Logistic線性回歸
#晉級Promotion與場均分PTS
#聯盟東區球隊
Eastern <- read.csv("EasternTeamPerGameStats.csv",header=T,sep = ",")
attach(Eastern)
ETEAMPTS <- Eastern$PTS
Emodellog<- glm(formula = Promotion~ETEAMPTS,data = Eastern,family = binomial(link = "logit"))
summary(Emodellog)
#以場均得分最低預測在聯盟例賽獲勝機率為0.1430553
predict.glm(Emodellog,type="response",newdata = data.frame(ETEAMPTS=101.1))
#以場均得分最高預測在聯盟例賽獲勝機率為0.9673395
predict.glm(Emodellog,type="response",newdata = data.frame(ETEAMPTS=110.3))
Emodellog$coefficients
#場均得分多的0.5628832 可以增加1.755727勝率
exp(0.5628832)
#設定分區及分組為類別變數
Eareaf <- factor(Eastern$Area)
EPromotionf <- factor(Eastern$Promotion)
#驗證分區及分組的變數設定正確
is.factor(Eareaf)
is.factor(EPromotionf)
#進行Logistic迴歸分析
Emodel2 <- glm(formula = Eastern$Promotion~Eareaf+EPromotionf,data=Eastern,family = binomial(link = "logit"))
summary(Emodel2)
epiDisplay::logistic.display(Emodel2)
#分組球隊在分區晉級可能性預測
classtab <- table(Eastern$Area,Eastern$Promotion)
prop.table(classtab,1)
#聯盟西區球隊
Western <- read.csv("WesternTeamPerGameStats.csv",header=T,sep = ",")
attach(Western)
WTEAMPTS <- Western$PTS
Wmodellog<- glm(formula = Promotion~WTEAMPTS,data = Western,family = binomial(link = "logit"))
summary(Wmodellog)
#以場均得分最低預測在聯盟例賽獲勝機率為0.301573
predict.glm(Wmodellog,type="response",newdata = data.frame(WTEAMPTS=97.9))
#以場均得分最高預測在聯盟例賽獲勝機率為0.7797157
predict.glm(Wmodellog,type="response",newdata = data.frame(WTEAMPTS=115.9))
Wmodellog$coefficients
#場均得分多的0.1168794 可以增加1.123984勝率
exp(0.1168794)
#設定分區及分組為類別變數
Wareaf <- factor(Western$Area)
WPromotionf <- factor(Western$Promotion)
#驗證分區及分組的變數設定正確
is.factor(Wareaf)
is.factor(WPromotionf)
#進行Logistic迴歸分析
Wmodel2 <- glm(formula = Western$Promotion~Wareaf+WPromotionf,data=Western,family = binomial(link = "logit"))
summary(model2)
epiDisplay::logistic.display(Wmodel2)
#分組球隊在分區晉級可能性預測
classtab <- table(Western$Area,Western$Promotion)
prop.table(classtab,1)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fixtext.R
\name{fixtext}
\alias{fixtext}
\title{standardize text within columns of a dataframe}
\usage{
fixtext(df, colnames)
}
\arguments{
\item{df}{dataframe whose column text you want to standardize}
}
\description{
standardize text within columns of a dataframe
}
| /man/fixtext.Rd | no_license | johnfrye/fryeR | R | false | true | 346 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fixtext.R
\name{fixtext}
\alias{fixtext}
\title{standardize text within columns of a dataframe}
\usage{
fixtext(df, colnames)
}
\arguments{
\item{df}{dataframe whose column text you want to standardize}
}
\description{
standardize text within columns of a dataframe
}
|
###############################################################################
##
## Working with Quandl
##
###############################################################################
library(Quandl)
library(quantmod)
Quandl.auth("9FgxcyZLUuPWmU3Ak6LA")
assignInNamespace('Quandl.host', 'http://www.quandl.com/api', 'Quandl')
options(RCurlOptions = list(proxy = "gl.pl", proxyport = 80))
Quandl.search(query = "petroleum", silent = FALSE)
# Load the Facebook data with the help of Quandl
Facebook <- Quandl("GOOG/NASDAQ_FB", type = "xts", start_date="2014-01-01")
# Plot the chart with the help of candleChart()
candleChart(Facebook)
##Brent crude price up to date
rbrte = Quandl("DOE/RBRTE", start_date="2000-01-01", type = "xts")
futureb1 = Quandl("OFDP/FUTURE_B1", type = "xts")
candleChart(rbrte)
plot(rbrte)
candleChart(futureb1)
plot(futureb1)
| /scripts/oilprice.R | no_license | dainiuxt/RData | R | false | false | 883 | r | ###############################################################################
##
## Working with Quandl
##
###############################################################################
library(Quandl)
library(quantmod)
Quandl.auth("9FgxcyZLUuPWmU3Ak6LA")
assignInNamespace('Quandl.host', 'http://www.quandl.com/api', 'Quandl')
options(RCurlOptions = list(proxy = "gl.pl", proxyport = 80))
Quandl.search(query = "petroleum", silent = FALSE)
# Load the Facebook data with the help of Quandl
Facebook <- Quandl("GOOG/NASDAQ_FB", type = "xts", start_date="2014-01-01")
# Plot the chart with the help of candleChart()
candleChart(Facebook)
##Brent crude price up to date
rbrte = Quandl("DOE/RBRTE", start_date="2000-01-01", type = "xts")
futureb1 = Quandl("OFDP/FUTURE_B1", type = "xts")
candleChart(rbrte)
plot(rbrte)
candleChart(futureb1)
plot(futureb1)
|
# In this example the k-means clustering method is used to cluster a given toy
# data set. In k-means clustering one tries to partition n observations into k
# clusters in which each observation belongs to the cluster with the nearest mean.
# The algorithm class constructor takes the number of clusters and a distance to
# be used as input. The distance used in this example is Euclidean distance.
# After training one can fetch the result of clustering by obtaining the cluster
# centers and their radiuses.
library("sg")
fm_train <- as.matrix(read.table('../data/fm_train_real.dat'))
# KMEANS
print('KMeans')
k <- 3
iter <- 1000
dump <- sg('set_distance', 'EUCLIDIAN', 'REAL')
dump <- sg('set_features', 'TRAIN', fm_train)
dump <- sg('new_clustering', 'KMEANS')
dump <- sg('train_clustering', k, iter)
result <- sg('get_clustering')
radi <- result[[1]]
centers <- result[[2]]
| /build/shogun_lib/examples/documented/r_static/clustering_kmeans.R | no_license | behollis/muViewBranch | R | false | false | 885 | r | # In this example the k-means clustering method is used to cluster a given toy
# data set. In k-means clustering one tries to partition n observations into k
# clusters in which each observation belongs to the cluster with the nearest mean.
# The algorithm class constructor takes the number of clusters and a distance to
# be used as input. The distance used in this example is Euclidean distance.
# After training one can fetch the result of clustering by obtaining the cluster
# centers and their radiuses.
library("sg")
fm_train <- as.matrix(read.table('../data/fm_train_real.dat'))
# KMEANS
print('KMeans')
k <- 3
iter <- 1000
dump <- sg('set_distance', 'EUCLIDIAN', 'REAL')
dump <- sg('set_features', 'TRAIN', fm_train)
dump <- sg('new_clustering', 'KMEANS')
dump <- sg('train_clustering', k, iter)
result <- sg('get_clustering')
radi <- result[[1]]
centers <- result[[2]]
|
tsppath <- function(nodes, startpoint=NULL, type="tsp", method="farthest")
{
setuplocalization("STATS_TSP")
procname=gtxt("Traveling Salesperson Analysis")
warningsprocname = gtxt("Traveling Salesperson Analysis: Warnings")
omsid="STATSTSP"
warns = Warn(procname=warningsprocname,omsid=omsid)
values <- NULL
if (!is.null(startpoint) && !is.element(startpoint, nodes))
{
warns$warn(gtxt("Variable specified on START keyword must also be specified on NODES keyword."),
dostop=TRUE)
}
allvars <- nodes
varNum <- spssdictionary.GetVariableCount()
varNames <- c()
for (i in 0:(varNum-1))
{
varNames <- c(varNames, spssdictionary.GetVariableName(i))
}
allvarsIndex <- c()
for (var in allvars)
{
allvarsIndex <- c(allvarsIndex, match(var, varNames))
}
allvarsIndex <- sort(allvarsIndex)
allvars <- c()
for (i in allvarsIndex)
{
allvars <- c(allvars, varNames[i])
}
len <- length(allvars)
if (len > 0 && len < varNum)
{
cmd <- "COMPUTE filter = "
cmd <- paste(cmd, allvars[[1]], "=0", sep="")
for ( i in 2 : length(allvars))
{
variable <- paste(allvars[[i]], "=0", sep="")
cmd <- paste(cmd, variable, sep=" | ")
}
spsspkg.Submit(cmd)
spsspkg.Submit("FILTER BY filter.")
spsspkg.Submit("EXECUTE.")
values <- spssdata.GetDataFromSPSS(allvars)
spsspkg.Submit("FILTER OFF.")
spsspkg.Submit("DELETE VARIABLES filter.")
}
else
{
values <- spssdata.GetDataFromSPSS(allvars)
}
count = nrow(values)
varname <- unlist(labels(values)[2])
data <- matrix(unlist(values), count, count, dimnames=list(varname, varname))
tryCatch(library("TSP"), error=function(e){
warns$warn(gtxtf("The R %s package is required but could not be loaded.", "TSP"),dostop=TRUE)
}
)
type <- tolower(type)
if (type == "tsp")
tsp_data <- TSP(data)
else if (type == "atsp")
tsp_data <- ATSP(data)
method_name <- switch(tolower(method), nn="nn", repetitive="repetitive_nn", nearest="nearest_insertion", farthest="farthest_insertion", cheapest="cheapest_insertion", arbitrary="arbitrary_insertion", two_opt="2-opt")
tour <- solve_TSP(tsp_data, method=method_name)
labs <- labels(tour)
index <- as.integer(tour)
len <- length(tour)
if (!is.null(startpoint))
{
startIndex <- match(startpoint, labs)
tailCount <- len-startIndex+1
headCount <- startIndex-1
labs <- c(tail(labs, tailCount), head(labs, headCount))
index <- c(tail(index, tailCount), head(index, headCount))
}
#Create pivot table
labs <- c(labs, labs[1])
index <- c(index, index[1])
no_dummy_len <- len + 1
cost <- c(data[(index[1]-1) * len + index[1]])
for (i in 2:no_dummy_len)
{
cost <- c(cost, data[(index[i]-1) * len + index[i-1]])
}
charIndex <- c(1:no_dummy_len, "Total")
labs <- c(labs, "")
cost <- c(cost, tour_length(tour))
StartProcedure(procname, omsid)
method_name_disp <- switch(tolower(method), nn="Nearest neighbor", repetitive="Repetitive nearest neighbor", nearest="Nearest insertion", farthest="Farthest insertion", cheapest="Cheapest insertion", arbitrary="Arbitrary insertion", two_opt="2-Opt improvement heuristic")
spsspivottable.Display(data.frame(labs, cost), title="Solution", caption=paste("Method: ",method_name_disp),
rowdim="Order", hiderowdimtitle=FALSE, rowlabels=charIndex, collabels=c("Node", "Cost"))
spsspkg.EndProcedure()
}
Run <- function(args) {
cmdname = args[[1]]
args = args[[2]]
oobj = spsspkg.Syntax(list(
spsspkg.Template("NODES", subc="", ktype="existingvarlist", var="nodes", islist=TRUE),
spsspkg.Template("START", subc="OPTIONS", ktype="existingvarlist", var="startpoint", islist=FALSE),
spsspkg.Template("TYPE", subc="OPTIONS", ktype="literal", var="type", vallist=list("tsp", "atsp")),
spsspkg.Template("METHOD", subc="OPTIONS", ktype="literal", var="method", vallist=list("nn", "repetitive", "nearest", "farthest", "cheapest", "arbitrary", "two_opt")),
spsspkg.Template("HELP", subc="", ktype="bool")
))
if ("HELP" %in% attr(args,"names")) {
helper("Traveling_Salesperson_Problem")
}
else {
res <- spsspkg.processcmd(oobj, args, "tsppath")
}
}
gtxt <- function(...) {
return(gettext(...,domain="STATS_TSP"))
}
gtxtf <- function(...) {
return(gettextf(...,domain="STATS_TSP"))
}
StartProcedure<-function(procname, omsid){
if (as.integer(substr(spsspkg.GetSPSSVersion(),1, 2)) >= 19)
spsspkg.StartProcedure(procname,omsid)
else
spsspkg.StartProcedure(omsid)
}
Warn = function(procname, omsid) {
# constructor (sort of) for message management
lcl = list(
procname=procname,
omsid=omsid,
msglist = list(), # accumulate messages
msgnum = 0
)
# This line is the key to this approach
lcl = mylist2env(lcl) # makes this list into an environment
lcl$warn = function(msg=NULL, dostop=FALSE, inproc=FALSE) {
# Accumulate messages and, if dostop or no message, display all
# messages and end procedure state
# If dostop, issue a stop.
if (!is.null(msg)) { # accumulate message
assign("msgnum", lcl$msgnum + 1, envir=lcl)
# There seems to be no way to update an object, only replace it
m = lcl$msglist
m[[lcl$msgnum]] = msg
assign("msglist", m, envir=lcl)
}
if (is.null(msg) || dostop) {
lcl$display(inproc) # display messages and end procedure state
if (dostop) {
stop(gtxt("End of procedure"), call.=FALSE) # may result in dangling error text
}
}
}
lcl$display = function(inproc=FALSE) {
# display any accumulated messages as a warnings table or as prints
# and end procedure state, if any
if (lcl$msgnum == 0) { # nothing to display
if (inproc) {
spsspkg.EndProcedure()
}
} else {
if (!inproc) {
procok =tryCatch({
StartProcedure(lcl$procname, lcl$omsid)
TRUE
},
error = function(e) {
FALSE
}
)
}
if (procok) { # build and display a Warnings table if we can
table = spss.BasePivotTable("Warnings ","Warnings", isSplit=FALSE) # do not translate this
rowdim = BasePivotTable.Append(table,Dimension.Place.row,
gtxt("Message Number"), hideName = FALSE,hideLabels = FALSE)
for (i in 1:lcl$msgnum) {
rowcategory = spss.CellText.String(as.character(i))
BasePivotTable.SetCategories(table,rowdim,rowcategory)
BasePivotTable.SetCellValue(table,rowcategory,
spss.CellText.String(lcl$msglist[[i]]))
}
spsspkg.EndProcedure() # implies display
} else { # can't produce a table
for (i in 1:lcl$msgnum) {
print(lcl$msglist[[i]])
}
}
}
}
return(lcl)
}
mylist2env = function(alist) {
env = new.env()
lnames = names(alist)
for (i in 1:length(alist)) {
assign(lnames[[i]],value = alist[[i]], envir=env)
}
return(env)
}
setuplocalization = function(domain) {
# find and bind translation file names
# domain is the root name of the extension command .R file, e.g., "SPSSINC_BREUSCH_PAGAN"
# This would be bound to root location/SPSSINC_BREUSCH_PAGAN/lang
fpath = Find(file.exists, file.path(.libPaths(), paste(domain, ".R", sep="")))
bindtextdomain(domain, file.path(dirname(fpath), domain, "lang"))
}
if (exists("spsspkg.helper")) {
assign("helper", spsspkg.helper)
} | /src/STATS_TSP.R | permissive | IBMPredictiveAnalytics/Traveling_Salesperson_Problem | R | false | false | 7,946 | r | tsppath <- function(nodes, startpoint=NULL, type="tsp", method="farthest")
{
setuplocalization("STATS_TSP")
procname=gtxt("Traveling Salesperson Analysis")
warningsprocname = gtxt("Traveling Salesperson Analysis: Warnings")
omsid="STATSTSP"
warns = Warn(procname=warningsprocname,omsid=omsid)
values <- NULL
if (!is.null(startpoint) && !is.element(startpoint, nodes))
{
warns$warn(gtxt("Variable specified on START keyword must also be specified on NODES keyword."),
dostop=TRUE)
}
allvars <- nodes
varNum <- spssdictionary.GetVariableCount()
varNames <- c()
for (i in 0:(varNum-1))
{
varNames <- c(varNames, spssdictionary.GetVariableName(i))
}
allvarsIndex <- c()
for (var in allvars)
{
allvarsIndex <- c(allvarsIndex, match(var, varNames))
}
allvarsIndex <- sort(allvarsIndex)
allvars <- c()
for (i in allvarsIndex)
{
allvars <- c(allvars, varNames[i])
}
len <- length(allvars)
if (len > 0 && len < varNum)
{
cmd <- "COMPUTE filter = "
cmd <- paste(cmd, allvars[[1]], "=0", sep="")
for ( i in 2 : length(allvars))
{
variable <- paste(allvars[[i]], "=0", sep="")
cmd <- paste(cmd, variable, sep=" | ")
}
spsspkg.Submit(cmd)
spsspkg.Submit("FILTER BY filter.")
spsspkg.Submit("EXECUTE.")
values <- spssdata.GetDataFromSPSS(allvars)
spsspkg.Submit("FILTER OFF.")
spsspkg.Submit("DELETE VARIABLES filter.")
}
else
{
values <- spssdata.GetDataFromSPSS(allvars)
}
count = nrow(values)
varname <- unlist(labels(values)[2])
data <- matrix(unlist(values), count, count, dimnames=list(varname, varname))
tryCatch(library("TSP"), error=function(e){
warns$warn(gtxtf("The R %s package is required but could not be loaded.", "TSP"),dostop=TRUE)
}
)
type <- tolower(type)
if (type == "tsp")
tsp_data <- TSP(data)
else if (type == "atsp")
tsp_data <- ATSP(data)
method_name <- switch(tolower(method), nn="nn", repetitive="repetitive_nn", nearest="nearest_insertion", farthest="farthest_insertion", cheapest="cheapest_insertion", arbitrary="arbitrary_insertion", two_opt="2-opt")
tour <- solve_TSP(tsp_data, method=method_name)
labs <- labels(tour)
index <- as.integer(tour)
len <- length(tour)
if (!is.null(startpoint))
{
startIndex <- match(startpoint, labs)
tailCount <- len-startIndex+1
headCount <- startIndex-1
labs <- c(tail(labs, tailCount), head(labs, headCount))
index <- c(tail(index, tailCount), head(index, headCount))
}
#Create pivot table
labs <- c(labs, labs[1])
index <- c(index, index[1])
no_dummy_len <- len + 1
cost <- c(data[(index[1]-1) * len + index[1]])
for (i in 2:no_dummy_len)
{
cost <- c(cost, data[(index[i]-1) * len + index[i-1]])
}
charIndex <- c(1:no_dummy_len, "Total")
labs <- c(labs, "")
cost <- c(cost, tour_length(tour))
StartProcedure(procname, omsid)
method_name_disp <- switch(tolower(method), nn="Nearest neighbor", repetitive="Repetitive nearest neighbor", nearest="Nearest insertion", farthest="Farthest insertion", cheapest="Cheapest insertion", arbitrary="Arbitrary insertion", two_opt="2-Opt improvement heuristic")
spsspivottable.Display(data.frame(labs, cost), title="Solution", caption=paste("Method: ",method_name_disp),
rowdim="Order", hiderowdimtitle=FALSE, rowlabels=charIndex, collabels=c("Node", "Cost"))
spsspkg.EndProcedure()
}
Run <- function(args) {
cmdname = args[[1]]
args = args[[2]]
oobj = spsspkg.Syntax(list(
spsspkg.Template("NODES", subc="", ktype="existingvarlist", var="nodes", islist=TRUE),
spsspkg.Template("START", subc="OPTIONS", ktype="existingvarlist", var="startpoint", islist=FALSE),
spsspkg.Template("TYPE", subc="OPTIONS", ktype="literal", var="type", vallist=list("tsp", "atsp")),
spsspkg.Template("METHOD", subc="OPTIONS", ktype="literal", var="method", vallist=list("nn", "repetitive", "nearest", "farthest", "cheapest", "arbitrary", "two_opt")),
spsspkg.Template("HELP", subc="", ktype="bool")
))
if ("HELP" %in% attr(args,"names")) {
helper("Traveling_Salesperson_Problem")
}
else {
res <- spsspkg.processcmd(oobj, args, "tsppath")
}
}
gtxt <- function(...) {
return(gettext(...,domain="STATS_TSP"))
}
gtxtf <- function(...) {
return(gettextf(...,domain="STATS_TSP"))
}
StartProcedure<-function(procname, omsid){
if (as.integer(substr(spsspkg.GetSPSSVersion(),1, 2)) >= 19)
spsspkg.StartProcedure(procname,omsid)
else
spsspkg.StartProcedure(omsid)
}
Warn = function(procname, omsid) {
# constructor (sort of) for message management
lcl = list(
procname=procname,
omsid=omsid,
msglist = list(), # accumulate messages
msgnum = 0
)
# This line is the key to this approach
lcl = mylist2env(lcl) # makes this list into an environment
lcl$warn = function(msg=NULL, dostop=FALSE, inproc=FALSE) {
# Accumulate messages and, if dostop or no message, display all
# messages and end procedure state
# If dostop, issue a stop.
if (!is.null(msg)) { # accumulate message
assign("msgnum", lcl$msgnum + 1, envir=lcl)
# There seems to be no way to update an object, only replace it
m = lcl$msglist
m[[lcl$msgnum]] = msg
assign("msglist", m, envir=lcl)
}
if (is.null(msg) || dostop) {
lcl$display(inproc) # display messages and end procedure state
if (dostop) {
stop(gtxt("End of procedure"), call.=FALSE) # may result in dangling error text
}
}
}
lcl$display = function(inproc=FALSE) {
# display any accumulated messages as a warnings table or as prints
# and end procedure state, if any
if (lcl$msgnum == 0) { # nothing to display
if (inproc) {
spsspkg.EndProcedure()
}
} else {
if (!inproc) {
procok =tryCatch({
StartProcedure(lcl$procname, lcl$omsid)
TRUE
},
error = function(e) {
FALSE
}
)
}
if (procok) { # build and display a Warnings table if we can
table = spss.BasePivotTable("Warnings ","Warnings", isSplit=FALSE) # do not translate this
rowdim = BasePivotTable.Append(table,Dimension.Place.row,
gtxt("Message Number"), hideName = FALSE,hideLabels = FALSE)
for (i in 1:lcl$msgnum) {
rowcategory = spss.CellText.String(as.character(i))
BasePivotTable.SetCategories(table,rowdim,rowcategory)
BasePivotTable.SetCellValue(table,rowcategory,
spss.CellText.String(lcl$msglist[[i]]))
}
spsspkg.EndProcedure() # implies display
} else { # can't produce a table
for (i in 1:lcl$msgnum) {
print(lcl$msglist[[i]])
}
}
}
}
return(lcl)
}
mylist2env = function(alist) {
env = new.env()
lnames = names(alist)
for (i in 1:length(alist)) {
assign(lnames[[i]],value = alist[[i]], envir=env)
}
return(env)
}
setuplocalization = function(domain) {
# find and bind translation file names
# domain is the root name of the extension command .R file, e.g., "SPSSINC_BREUSCH_PAGAN"
# This would be bound to root location/SPSSINC_BREUSCH_PAGAN/lang
fpath = Find(file.exists, file.path(.libPaths(), paste(domain, ".R", sep="")))
bindtextdomain(domain, file.path(dirname(fpath), domain, "lang"))
}
if (exists("spsspkg.helper")) {
assign("helper", spsspkg.helper)
} |
library(PermAlgo)
### Name: PermAlgo-package
### Title: Generate Event Times Conditional On Time-Dependent Covariates
### Aliases: PermAlgo-package PermAlgo
### Keywords: survival
### ** Examples
# Example - Generating adverse event conditional on use
# of prescription drugs
# Prepare the matrice of covariate (Xmat)
# Here we simulate daily exposures to 2 prescription drugs over a
# year. Drug prescriptions can start any day of follow-up, and their
# duration is a multiple of 7 days. There can be multiple prescriptions
# for each individuals over the year and interuptions of drug use in
# between.
# Additionaly, there is a time-independant binary covarite (sex).
n=500 # subjects
m=365 # days
# Generate the matrix of three covariate, in a 'long' format.
Xmat=matrix(ncol=3, nrow=n*m)
# time-independant binary covariate
Xmat[,1] <- rep(rbinom(n, 1, 0.3), each=m)
# Function to generate an individual time-dependent exposure history
# e.g. generate prescriptions of different durations and doses.
TDhist <- function(m){
start <- round(runif(1,1,m),0) # individual start date
duration <- 7 + 7*rpois(1,3) # in weeks
dose <- round(runif(1,0,10),1)
vec <- c(rep(0, start-1), rep(dose, duration))
while (length(vec)<=m){
intermission <- 21 + 7*rpois(1,3) # in weeks
duration <- 7 + 7*rpois(1,3) # in weeks
dose <- round(runif(1,0,10),1)
vec <- append(vec, c(rep(0, intermission), rep(dose, duration)))}
return(vec[1:m])}
# create TD var
Xmat[,2] <- do.call("c", lapply(1:n, function(i) TDhist(m)))
Xmat[,3] <- do.call("c", lapply(1:n, function(i) TDhist(m)))
# genereate vectors of event and censoring times prior to calling the
# function for the algorithm
eventRandom <- round(rexp(n, 0.012)+1,0)
censorRandom <- round(runif(n, 1,870),0)
# Generate the survival data conditional on the three covariates
data <- permalgorithm(n, m, Xmat, XmatNames=c("sex", "Drug1", "Drug2"),
eventRandom = eventRandom, censorRandom=censorRandom, betas=c(log(2),
log(1.04), log(0.99)), groupByD=FALSE )
# could use survival library and check whether the data was generated
# properly using coxph(Surv(Start, Stop, Event) ~ sex + Drug1 + Drug2,
# data)
| /data/genthat_extracted_code/PermAlgo/examples/PermAlgo-package.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 2,194 | r | library(PermAlgo)
### Name: PermAlgo-package
### Title: Generate Event Times Conditional On Time-Dependent Covariates
### Aliases: PermAlgo-package PermAlgo
### Keywords: survival
### ** Examples
# Example - Generating adverse event conditional on use
# of prescription drugs
# Prepare the matrice of covariate (Xmat)
# Here we simulate daily exposures to 2 prescription drugs over a
# year. Drug prescriptions can start any day of follow-up, and their
# duration is a multiple of 7 days. There can be multiple prescriptions
# for each individuals over the year and interuptions of drug use in
# between.
# Additionaly, there is a time-independant binary covarite (sex).
n=500 # subjects
m=365 # days
# Generate the matrix of three covariate, in a 'long' format.
Xmat=matrix(ncol=3, nrow=n*m)
# time-independant binary covariate
Xmat[,1] <- rep(rbinom(n, 1, 0.3), each=m)
# Function to generate an individual time-dependent exposure history
# e.g. generate prescriptions of different durations and doses.
TDhist <- function(m){
start <- round(runif(1,1,m),0) # individual start date
duration <- 7 + 7*rpois(1,3) # in weeks
dose <- round(runif(1,0,10),1)
vec <- c(rep(0, start-1), rep(dose, duration))
while (length(vec)<=m){
intermission <- 21 + 7*rpois(1,3) # in weeks
duration <- 7 + 7*rpois(1,3) # in weeks
dose <- round(runif(1,0,10),1)
vec <- append(vec, c(rep(0, intermission), rep(dose, duration)))}
return(vec[1:m])}
# create TD var
Xmat[,2] <- do.call("c", lapply(1:n, function(i) TDhist(m)))
Xmat[,3] <- do.call("c", lapply(1:n, function(i) TDhist(m)))
# genereate vectors of event and censoring times prior to calling the
# function for the algorithm
eventRandom <- round(rexp(n, 0.012)+1,0)
censorRandom <- round(runif(n, 1,870),0)
# Generate the survival data conditional on the three covariates
data <- permalgorithm(n, m, Xmat, XmatNames=c("sex", "Drug1", "Drug2"),
eventRandom = eventRandom, censorRandom=censorRandom, betas=c(log(2),
log(1.04), log(0.99)), groupByD=FALSE )
# could use survival library and check whether the data was generated
# properly using coxph(Surv(Start, Stop, Event) ~ sex + Drug1 + Drug2,
# data)
|
## This set of functions allow to cache a matrix and its inverse ('makeCacheMatrix')
## or to compute it in case it is not already cached ('cacheSolve')
## Matrix object that cache its data and inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
# TODO: Check if x == y
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## Matrix inverse computation (uses cached data if it's available)
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if (!is.null(inv)) {
message("getting cached data!")
return(inv)
}
# Compute inverse
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | jgsogo/ProgrammingAssignment2 | R | false | false | 920 | r | ## This set of functions allow to cache a matrix and its inverse ('makeCacheMatrix')
## or to compute it in case it is not already cached ('cacheSolve')
## Matrix object that cache its data and inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
# TODO: Check if x == y
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## Matrix inverse computation (uses cached data if it's available)
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if (!is.null(inv)) {
message("getting cached data!")
return(inv)
}
# Compute inverse
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
|
library(tidyverse)
silk <- seq(from=-10, to=10, by = 0.04) %>%
expand.grid(x=., y=.) %>%
ggplot(aes(x=(x+pi*sin(y)), y=(y+pi*sin(x)))) +
geom_point(alpha=.1, shape=20, size=0) +
theme_void()
ggsave(silk, filename="C://Users/aless/Desktop/silk.png", dpi=500, type = "cairo",width = 8)
square <- seq(from=-10, to=10, by = 0.04) %>%
expand.grid(x=., y=.) %>%
ggplot(aes(x=(x+pi*cos(x)), y=(y+pi*sin(y)))) +
geom_point(alpha=.1, shape=20, size=0) +
theme_void()
ggsave(square, filename="C://Users/aless/Desktop/square.png", dpi=500, type = "cairo",width = 8)
square2 <- seq(from=-10, to=10, by = 0.04) %>%
expand.grid(x=., y=.) %>%
ggplot(aes(x=(x^2*cos(x)), y=(y+pi*sin(x)))) +
geom_point(alpha=.1, shape=20, size=0) +
theme_void()
ggsave(square2, filename="C://Users/aless/Desktop/square2.png", dpi=500, type = "cairo",width = 8)
silk2 <- seq(from=-3, to=3, by = 0.01) %>%
expand.grid(x=., y=.) %>%
ggplot(aes(x=(1-x-sin(y^2)), y=(1+y-cos(x^2)))) +
geom_point(alpha=.1, shape=20, size=0) +
theme_void()
ggsave(silk2, filename="C://Users/aless/Desktop/silk2.png", dpi=500, type = "cairo",width = 8)
aa <- seq(from=-3, to=3, by = 0.01) %>%
expand.grid(x=., y=.) %>%
ggplot(aes(x=(1-x-sin(y^2)), y=(1+y-cos(x^2)))) +
geom_line(alpha=.1) +
theme_void()
ggsave(aa, filename="C://Users/aless/Desktop/aa.png", dpi=500, type = "cairo",width = 8)
rose <- seq(-3,3,by=.01) %>%
expand.grid(x=., y=.) %>%
ggplot(aes(x=(1-x-sin(y^2)), y=(1+y-cos(x^2)))) +
geom_point(alpha=.05, shape=20, size=0)+
theme_void()+
coord_polar()
ggsave(rose, filename="C://Users/aless/Desktop/rose.png", dpi=500, type = "cairo",width = 8)
| /Silk/silk.R | no_license | alearrigo/aRt | R | false | false | 1,674 | r | library(tidyverse)
silk <- seq(from=-10, to=10, by = 0.04) %>%
expand.grid(x=., y=.) %>%
ggplot(aes(x=(x+pi*sin(y)), y=(y+pi*sin(x)))) +
geom_point(alpha=.1, shape=20, size=0) +
theme_void()
ggsave(silk, filename="C://Users/aless/Desktop/silk.png", dpi=500, type = "cairo",width = 8)
square <- seq(from=-10, to=10, by = 0.04) %>%
expand.grid(x=., y=.) %>%
ggplot(aes(x=(x+pi*cos(x)), y=(y+pi*sin(y)))) +
geom_point(alpha=.1, shape=20, size=0) +
theme_void()
ggsave(square, filename="C://Users/aless/Desktop/square.png", dpi=500, type = "cairo",width = 8)
square2 <- seq(from=-10, to=10, by = 0.04) %>%
expand.grid(x=., y=.) %>%
ggplot(aes(x=(x^2*cos(x)), y=(y+pi*sin(x)))) +
geom_point(alpha=.1, shape=20, size=0) +
theme_void()
ggsave(square2, filename="C://Users/aless/Desktop/square2.png", dpi=500, type = "cairo",width = 8)
silk2 <- seq(from=-3, to=3, by = 0.01) %>%
expand.grid(x=., y=.) %>%
ggplot(aes(x=(1-x-sin(y^2)), y=(1+y-cos(x^2)))) +
geom_point(alpha=.1, shape=20, size=0) +
theme_void()
ggsave(silk2, filename="C://Users/aless/Desktop/silk2.png", dpi=500, type = "cairo",width = 8)
aa <- seq(from=-3, to=3, by = 0.01) %>%
expand.grid(x=., y=.) %>%
ggplot(aes(x=(1-x-sin(y^2)), y=(1+y-cos(x^2)))) +
geom_line(alpha=.1) +
theme_void()
ggsave(aa, filename="C://Users/aless/Desktop/aa.png", dpi=500, type = "cairo",width = 8)
rose <- seq(-3,3,by=.01) %>%
expand.grid(x=., y=.) %>%
ggplot(aes(x=(1-x-sin(y^2)), y=(1+y-cos(x^2)))) +
geom_point(alpha=.05, shape=20, size=0)+
theme_void()+
coord_polar()
ggsave(rose, filename="C://Users/aless/Desktop/rose.png", dpi=500, type = "cairo",width = 8)
|
\name{lpmvnorm}
\alias{lpmvnorm}
\alias{slpmvnorm}
\alias{ldmvnorm}
\alias{sldmvnorm}
\alias{ldpmvnorm}
\alias{sldpmvnorm}
\title{
Multivariate Normal Log-likelihood and Score Functions
}
\description{
Computes the log-likelihood (contributions) of multiple exact or
interval-censored observations (or a mix thereof) from multivariate
normal distributions and evaluates corresponding score functions.
}
\usage{
lpmvnorm(lower, upper, mean = 0, center = NULL, chol, invchol, logLik = TRUE,
M = NULL, w = NULL, seed = NULL, tol = .Machine$double.eps, fast = FALSE)
slpmvnorm(lower, upper, mean = 0, center = NULL, chol, invchol, logLik = TRUE,
M = NULL, w = NULL, seed = NULL, tol = .Machine$double.eps, fast = FALSE)
ldmvnorm(obs, mean = 0, chol, invchol, logLik = TRUE)
sldmvnorm(obs, mean = 0, chol, invchol, logLik = TRUE)
ldpmvnorm(obs, lower, upper, mean = 0, chol, invchol, logLik = TRUE, ...)
sldpmvnorm(obs, lower, upper, mean = 0, chol, invchol, logLik = TRUE, ...)
}
\arguments{
\item{lower}{matrix of lower limits (one column for each observation, \eqn{J} rows).
}
\item{upper}{matrix of upper limits (one column for each observation, \eqn{J} rows).
}
\item{obs}{matrix of exact observations (one column for each observation, \eqn{J} rows).
}
\item{mean}{matrix of means (one column for each observation, length is
recycled to length of \code{obs}, \code{lower} and \code{upper}).
}
\item{center}{matrix of negative rescaled means (one column for each observation, length is
recycled to length of \code{lower} and \code{upper}) as returned by
\code{cond_mvnorm(..., center = TRUE)}.
}
\item{chol}{Cholesky factors of covariance matrices as
\code{\link{ltMatrices}} object, length is recylced to length of
\code{obs}, \code{lower} and \code{upper}.
}
\item{invchol}{Cholesky factors of precision matrices as
\code{\link{ltMatrices}} object, length is recylced to length of \code{lower} and
\code{upper}. Either \code{chol} or \code{invchol} must be given.
}
\item{logLik}{logical, if \code{TRUE}, the log-likelihood is returned,
otherwise the individual contributions to the sum are returned.
}
\item{M}{number of iterations, early stopping based on
estimated errors is NOT implemented.
}
\item{w}{an optional matrix of weights with \eqn{J - 1} rows. This allows to replace the default
Monte-Carlo procedure (Genz, 1992) with a quasi-Monte-Carlo approach (Genz &
Bretz, 2002). Note that the same weights for evaluating the
multivariate normal probability are used for all observations when
\code{ncol(w) == M} is specified. If \code{ncol(w) == ncol(lower) * M}, each
likelihood contribution is evaluated on the corresponding sub-matrix.
If \code{w} is \code{NULL}, different uniform numbers are
drawn for each observation.
}
\item{seed}{an object specifying if and how the random number generator
should be initialized, see \code{\link[stats]{simulate}}. Only
applied when \code{w} is \code{NULL}.
}
\item{tol}{tolerance limit, values smaller than \code{tol} are interpreted
as zero.
}
\item{fast}{logical, if \code{TRUE}, a faster but less accurate version
of \code{pnorm} is used internally.
}
\item{\dots}{additional arguments to \code{lpmvnorm}.
}
}
\details{
Evaluates the multivariate normal log-likelihood defined by \code{means} and
\code{chol} over boxes defined by \code{lower} and \code{upper} or for
exact observations \code{obs}.
Monte-Carlo (Genz, 1992, the default) and quasi-Monte-Carlo (Genz & Bretz, 2002)
integration is implemented, the latter with weights obtained, for example,
from packages \pkg{qrng} or \pkg{randtoolbox}. It is the responsibility of
the user to ensure a meaningful lattice is used. In case of doubt, use
plain Monte-Carlo (\code{w = NULL}) or \code{\link{pmvnorm}}.
\code{slpmvnorm} computes both the individual log-likelihood contributions
and the corresponding score matrix (of dimension \eqn{J \times (J + 1) / 2 \times N}) if
\code{chol} contains diagonal elements. Otherwise, the dimension is \eqn{J
\times (J - 1) / 2 \times N}. The scores for exact or mixed exact-interval
observations are computed by \code{sldmvnorm} and \code{sldpmvnorm},
respectively.
More details can be found in the \code{lmvnorm_src} package vignette.
}
\value{
The log-likelihood (\code{logLik = TRUE}) or the individual contributions to the log-likelihood.
\code{slpmvnorm}, \code{sldmvnorm}, and \code{sldpmvnorm} return the score
matrices and, optionally (\code{logLik = TRUE}), the individual log-likelihood contributions
as well as scores for \code{obs}, \code{lower}, \code{upper}, and
\code{mean}.
}
\seealso{\code{\link{dmvnorm}}, \code{vignette("lmvnorm_src", package = "mvtnorm")}}
\references{
Genz, A. (1992). Numerical computation of multivariate normal probabilities.
\emph{Journal of Computational and Graphical Statistics}, \bold{1}, 141--150.
Genz, A. and Bretz, F. (2002), Methods for the computation of multivariate
t-probabilities. \emph{Journal of Computational and Graphical Statistics},
\bold{11}, 950--971.
}
\examples{
### five observations
N <- 5L
### dimension
J <- 4L
### lower and upper bounds, ie interval-censoring
lwr <- matrix(-runif(N * J), nrow = J)
upr <- matrix(runif(N * J), nrow = J)
### Cholesky factor
(C <- ltMatrices(runif(J * (J + 1) / 2), diag = TRUE))
### corresponding covariance matrix
(S <- as.array(Tcrossprod(C))[,,1])
### plain Monte-Carlo (Genz, 1992)
w <- NULL
M <- 25000
### quasi-Monte-Carlo (Genz & Bretz, 2002, but with different weights)
if (require("qrng")) w <- t(ghalton(M * N, J - 1))
### log-likelihood
lpmvnorm(lower = lwr, upper = upr, chol = C, w = w, M = M)
### compare with pmvnorm
exp(lpmvnorm(lower = lwr, upper = upr, chol = C, logLik = FALSE, w = w, M = M))
sapply(1:N, function(i) pmvnorm(lower = lwr[,i], upper = upr[,i], sigma = S))
### log-lik contributions and score matrix
slpmvnorm(lower = lwr, upper = upr, chol = C, w = w, M = M, logLik = TRUE)
}
\keyword{distribution}
| /man/lpmvnorm.Rd | no_license | cran/mvtnorm | R | false | false | 6,107 | rd | \name{lpmvnorm}
\alias{lpmvnorm}
\alias{slpmvnorm}
\alias{ldmvnorm}
\alias{sldmvnorm}
\alias{ldpmvnorm}
\alias{sldpmvnorm}
\title{
Multivariate Normal Log-likelihood and Score Functions
}
\description{
Computes the log-likelihood (contributions) of multiple exact or
interval-censored observations (or a mix thereof) from multivariate
normal distributions and evaluates corresponding score functions.
}
\usage{
lpmvnorm(lower, upper, mean = 0, center = NULL, chol, invchol, logLik = TRUE,
M = NULL, w = NULL, seed = NULL, tol = .Machine$double.eps, fast = FALSE)
slpmvnorm(lower, upper, mean = 0, center = NULL, chol, invchol, logLik = TRUE,
M = NULL, w = NULL, seed = NULL, tol = .Machine$double.eps, fast = FALSE)
ldmvnorm(obs, mean = 0, chol, invchol, logLik = TRUE)
sldmvnorm(obs, mean = 0, chol, invchol, logLik = TRUE)
ldpmvnorm(obs, lower, upper, mean = 0, chol, invchol, logLik = TRUE, ...)
sldpmvnorm(obs, lower, upper, mean = 0, chol, invchol, logLik = TRUE, ...)
}
\arguments{
\item{lower}{matrix of lower limits (one column for each observation, \eqn{J} rows).
}
\item{upper}{matrix of upper limits (one column for each observation, \eqn{J} rows).
}
\item{obs}{matrix of exact observations (one column for each observation, \eqn{J} rows).
}
\item{mean}{matrix of means (one column for each observation, length is
recycled to length of \code{obs}, \code{lower} and \code{upper}).
}
\item{center}{matrix of negative rescaled means (one column for each observation, length is
recycled to length of \code{lower} and \code{upper}) as returned by
\code{cond_mvnorm(..., center = TRUE)}.
}
\item{chol}{Cholesky factors of covariance matrices as
\code{\link{ltMatrices}} object, length is recylced to length of
\code{obs}, \code{lower} and \code{upper}.
}
\item{invchol}{Cholesky factors of precision matrices as
\code{\link{ltMatrices}} object, length is recylced to length of \code{lower} and
\code{upper}. Either \code{chol} or \code{invchol} must be given.
}
\item{logLik}{logical, if \code{TRUE}, the log-likelihood is returned,
otherwise the individual contributions to the sum are returned.
}
\item{M}{number of iterations, early stopping based on
estimated errors is NOT implemented.
}
\item{w}{an optional matrix of weights with \eqn{J - 1} rows. This allows to replace the default
Monte-Carlo procedure (Genz, 1992) with a quasi-Monte-Carlo approach (Genz &
Bretz, 2002). Note that the same weights for evaluating the
multivariate normal probability are used for all observations when
\code{ncol(w) == M} is specified. If \code{ncol(w) == ncol(lower) * M}, each
likelihood contribution is evaluated on the corresponding sub-matrix.
If \code{w} is \code{NULL}, different uniform numbers are
drawn for each observation.
}
\item{seed}{an object specifying if and how the random number generator
should be initialized, see \code{\link[stats]{simulate}}. Only
applied when \code{w} is \code{NULL}.
}
\item{tol}{tolerance limit, values smaller than \code{tol} are interpreted
as zero.
}
\item{fast}{logical, if \code{TRUE}, a faster but less accurate version
of \code{pnorm} is used internally.
}
\item{\dots}{additional arguments to \code{lpmvnorm}.
}
}
\details{
Evaluates the multivariate normal log-likelihood defined by \code{means} and
\code{chol} over boxes defined by \code{lower} and \code{upper} or for
exact observations \code{obs}.
Monte-Carlo (Genz, 1992, the default) and quasi-Monte-Carlo (Genz & Bretz, 2002)
integration is implemented, the latter with weights obtained, for example,
from packages \pkg{qrng} or \pkg{randtoolbox}. It is the responsibility of
the user to ensure a meaningful lattice is used. In case of doubt, use
plain Monte-Carlo (\code{w = NULL}) or \code{\link{pmvnorm}}.
\code{slpmvnorm} computes both the individual log-likelihood contributions
and the corresponding score matrix (of dimension \eqn{J \times (J + 1) / 2 \times N}) if
\code{chol} contains diagonal elements. Otherwise, the dimension is \eqn{J
\times (J - 1) / 2 \times N}. The scores for exact or mixed exact-interval
observations are computed by \code{sldmvnorm} and \code{sldpmvnorm},
respectively.
More details can be found in the \code{lmvnorm_src} package vignette.
}
\value{
The log-likelihood (\code{logLik = TRUE}) or the individual contributions to the log-likelihood.
\code{slpmvnorm}, \code{sldmvnorm}, and \code{sldpmvnorm} return the score
matrices and, optionally (\code{logLik = TRUE}), the individual log-likelihood contributions
as well as scores for \code{obs}, \code{lower}, \code{upper}, and
\code{mean}.
}
\seealso{\code{\link{dmvnorm}}, \code{vignette("lmvnorm_src", package = "mvtnorm")}}
\references{
Genz, A. (1992). Numerical computation of multivariate normal probabilities.
\emph{Journal of Computational and Graphical Statistics}, \bold{1}, 141--150.
Genz, A. and Bretz, F. (2002), Methods for the computation of multivariate
t-probabilities. \emph{Journal of Computational and Graphical Statistics},
\bold{11}, 950--971.
}
\examples{
### five observations
N <- 5L
### dimension
J <- 4L
### lower and upper bounds, ie interval-censoring
lwr <- matrix(-runif(N * J), nrow = J)
upr <- matrix(runif(N * J), nrow = J)
### Cholesky factor
(C <- ltMatrices(runif(J * (J + 1) / 2), diag = TRUE))
### corresponding covariance matrix
(S <- as.array(Tcrossprod(C))[,,1])
### plain Monte-Carlo (Genz, 1992)
w <- NULL
M <- 25000
### quasi-Monte-Carlo (Genz & Bretz, 2002, but with different weights)
if (require("qrng")) w <- t(ghalton(M * N, J - 1))
### log-likelihood
lpmvnorm(lower = lwr, upper = upr, chol = C, w = w, M = M)
### compare with pmvnorm
exp(lpmvnorm(lower = lwr, upper = upr, chol = C, logLik = FALSE, w = w, M = M))
sapply(1:N, function(i) pmvnorm(lower = lwr[,i], upper = upr[,i], sigma = S))
### log-lik contributions and score matrix
slpmvnorm(lower = lwr, upper = upr, chol = C, w = w, M = M, logLik = TRUE)
}
\keyword{distribution}
|
rm(list=ls())
repenv <- Sys.getenv("SLURM_ARRAY_TASK_ID")
i <- as.numeric(repenv)
set.seed(1000 + i)
library(Rcpp, lib.loc = 'packages')
library(RcppArmadillo, lib.loc = 'packages')
library(RcppEigen, lib.loc = 'packages')
library(rstan, lib.loc = 'packages')
source('slurm_RFuns.R')
sourceCpp('slurm_cppFuns.cpp')
ids <- readRDS('idTest.RDS')
datatest <- readRDS('dataTest.RDS')
data <- datatest[[i]]
id <- ids[i]
CHprior <- readRDS('CHFit.RDS')
minT <- 50
maxT <- 400
results <- data.frame()
lagsA <- 4
lagsD <- 4
K <- mix <- 6
dim <- 2 + lagsA + lagsD
# VB Prior Distributions: 1) IH, 2) CH
prior <- list()
prior[[1]] <- c(-5, -5, rep(0, 8), c(chol(diag(10, 10))))
prior[[2]] <- CHprior
starting <- list(matrix(c(prior[[2]]$mean[1:10], diag(0.5, 10)), ncol = 1),
matrix(c(prior[[2]]$mean, rep(log(0.5), 10*K), rep(1, K)), ncol = 1))
fitInitial <- fitVB(data[1:minT, 1:2], prior, starting, dim = 10, mix = K, time = TRUE)
S <- c(1, 5, 10, 20)
results <- data.frame(timing = fitInitial[[2]],
model = c('IH', 'IH', 'CH', 'CH'),
inference = c('SVB-Single', 'SVB-Mixture', 'SVB-Single', 'SVB-Mixture', 'UVB-Single',
'UVB-Mixture', 'UVB-Single', 'UVB-Mixture'),
k = rep(S, rep(8, 4)),
S = minT,
id = id)
for(k in 1:4){
sSeq <- seq(minT+S[k], maxT, S[k])
# Incrementally add data to VB fits
for(s in seq_along(sSeq)){
if(sSeq[s] > nrow(data)){
break
}
# Offline (Standard VB) model fits
fitOffline <- fitVB(data[1:sSeq[s], 1:2], prior, starting, dim = 10, mix = K, time = TRUE)
if(s == 1){
fitOnline <- fitUVB(data[(minT-(max(lagsA, lagsD) - 1)):sSeq[s], 1:2], fitInitial[[1]], starting, dim = 10, mix = K, time = TRUE)
} else {
fitOnline <- fitUVB(data[(sSeq[s-1]-(max(lagsA, lagsD) - 1)):sSeq[s], 1:2], fitOnline[[1]], starting, dim = 10, mix = K, time = TRUE)
}
# Grab logscores etc. for heterogenous models
results <- rbind(results,
data.frame(timing = c(fitOffline[[2]], fitOnline[[2]]),
model = c('IH', 'IH', 'CH', 'CH'),
inference = c('SVB-Single', 'SVB-Mixture', 'SVB-Single', 'SVB-Mixture', 'UVB-Single',
'UVB-Mixture', 'UVB-Single', 'UVB-Mixture'),
k = S[k],
S = sSeq[s],
id = id)
)
}
}
write.csv(results, paste0('timing/car', id, '.csv'), row.names=FALSE)
| /slurm_timing.R | no_license | NTomasetti/OHF | R | false | false | 2,670 | r | rm(list=ls())
repenv <- Sys.getenv("SLURM_ARRAY_TASK_ID")
i <- as.numeric(repenv)
set.seed(1000 + i)
library(Rcpp, lib.loc = 'packages')
library(RcppArmadillo, lib.loc = 'packages')
library(RcppEigen, lib.loc = 'packages')
library(rstan, lib.loc = 'packages')
source('slurm_RFuns.R')
sourceCpp('slurm_cppFuns.cpp')
ids <- readRDS('idTest.RDS')
datatest <- readRDS('dataTest.RDS')
data <- datatest[[i]]
id <- ids[i]
CHprior <- readRDS('CHFit.RDS')
minT <- 50
maxT <- 400
results <- data.frame()
lagsA <- 4
lagsD <- 4
K <- mix <- 6
dim <- 2 + lagsA + lagsD
# VB Prior Distributions: 1) IH, 2) CH
prior <- list()
prior[[1]] <- c(-5, -5, rep(0, 8), c(chol(diag(10, 10))))
prior[[2]] <- CHprior
starting <- list(matrix(c(prior[[2]]$mean[1:10], diag(0.5, 10)), ncol = 1),
matrix(c(prior[[2]]$mean, rep(log(0.5), 10*K), rep(1, K)), ncol = 1))
fitInitial <- fitVB(data[1:minT, 1:2], prior, starting, dim = 10, mix = K, time = TRUE)
S <- c(1, 5, 10, 20)
results <- data.frame(timing = fitInitial[[2]],
model = c('IH', 'IH', 'CH', 'CH'),
inference = c('SVB-Single', 'SVB-Mixture', 'SVB-Single', 'SVB-Mixture', 'UVB-Single',
'UVB-Mixture', 'UVB-Single', 'UVB-Mixture'),
k = rep(S, rep(8, 4)),
S = minT,
id = id)
for(k in 1:4){
sSeq <- seq(minT+S[k], maxT, S[k])
# Incrementally add data to VB fits
for(s in seq_along(sSeq)){
if(sSeq[s] > nrow(data)){
break
}
# Offline (Standard VB) model fits
fitOffline <- fitVB(data[1:sSeq[s], 1:2], prior, starting, dim = 10, mix = K, time = TRUE)
if(s == 1){
fitOnline <- fitUVB(data[(minT-(max(lagsA, lagsD) - 1)):sSeq[s], 1:2], fitInitial[[1]], starting, dim = 10, mix = K, time = TRUE)
} else {
fitOnline <- fitUVB(data[(sSeq[s-1]-(max(lagsA, lagsD) - 1)):sSeq[s], 1:2], fitOnline[[1]], starting, dim = 10, mix = K, time = TRUE)
}
# Grab logscores etc. for heterogenous models
results <- rbind(results,
data.frame(timing = c(fitOffline[[2]], fitOnline[[2]]),
model = c('IH', 'IH', 'CH', 'CH'),
inference = c('SVB-Single', 'SVB-Mixture', 'SVB-Single', 'SVB-Mixture', 'UVB-Single',
'UVB-Mixture', 'UVB-Single', 'UVB-Mixture'),
k = S[k],
S = sSeq[s],
id = id)
)
}
}
write.csv(results, paste0('timing/car', id, '.csv'), row.names=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sociality-indices.R
\name{fit_dyadic_regression}
\alias{fit_dyadic_regression}
\title{Fit dyadic index regression on subset of data}
\usage{
fit_dyadic_regression(df)
}
\arguments{
\item{df}{A subset of data on which to fit a regression of interactions on observer effort.}
}
\value{
The input data with an additional column containing the regression residuals.
}
\description{
Fit dyadic index regression on subset of data
}
| /man/fit_dyadic_regression.Rd | no_license | amboseli/ramboseli | R | false | true | 504 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sociality-indices.R
\name{fit_dyadic_regression}
\alias{fit_dyadic_regression}
\title{Fit dyadic index regression on subset of data}
\usage{
fit_dyadic_regression(df)
}
\arguments{
\item{df}{A subset of data on which to fit a regression of interactions on observer effort.}
}
\value{
The input data with an additional column containing the regression residuals.
}
\description{
Fit dyadic index regression on subset of data
}
|
# Unzip the file with the data
file.zip <- file.path(".", "activity.zip")
file.csv <- file.path(".", "activity.csv")
if(file.exists(file.zip) & !file.exists(file.csv)) {unzip(file.zip)}
# Read the CSV file
data <- read.csv(file.csv, header = TRUE, stringsAsFactors = FALSE)
# Show first few lines
head(data)
# Transform data in columns (transform dates, steps and intervals are fine)
data$date <- as.Date(data$date, "%Y-%m-%d")
# Convert table to data.table for improved manipulation
library(data.table)
data <- data.table(data)
# Compute the total no. of steps per date and plot a histogram
library(ggplot2)
data.steps <- data[,
.(steps.sum = sum(steps, na.rm = TRUE),
wday = weekdays(date)
),
by = date
]
# Compute mean and median of the total no. of steps
data.steps.median <- median(data.steps$steps.sum)
data.steps.mean <- mean(data.steps$steps.sum)
print(paste("Median:", data.steps.median))
print(paste("Mean:", data.steps.mean))
# Plot the resulting bar plots
plot.steps <- ggplot(data = data.steps, aes(x = date, y = steps.sum, fill = wday)) +
scale_x_date(breaks = data.steps$date[seq(1, length(data.steps$date), 7)]) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5)) +
scale_fill_brewer(palette = "YlOrRd") +
geom_bar(stat = "identity") +
xlab("date") +
ylab("total no. of steps (dotted = median, dashed = mean)") +
guides(fill = guide_legend(title = "weekdays")) +
geom_hline(yintercept = data.steps.median, linetype = "dotted") +
geom_hline(yintercept = data.steps.mean, linetype = "dashed") +
scale_linetype_manual(name = "statistics") +
ggtitle("Total no. of steps in each day")
#print(plot.steps)
# Compute average no. of steps
data.avg <- data[, .(steps.avg = mean(steps, na.rm = TRUE)), by = interval]
# Plot the time series of the data
plot.avg <- ggplot(data = data.avg, aes(x = interval, y = steps.avg)) +
geom_step(direction = "hv") +
xlab("interval") +
ylab("average no. of steps") +
ggtitle("Average no. of steps per interval")
#print(plot.avg)
# Find the max no. of steps
max.pos <- which.max(data.avg$steps.avg)
max.val <- data.avg[max.pos,]$interval
print(paste("Max no. of steps (", max.val, ") in the ", max.pos, "th time step", sep = ""))
# Find total no. of missing data
na.steps <- sum(as.numeric(is.na(data$steps)))
na.dates <- sum(as.numeric(is.na(data$date)))
na.intvs <- sum(as.numeric(is.na(data$interval)))
print(paste("Total no. of NA values in: ",
"step --> ", na.steps, ", ",
"date --> ", na.dates, ", ",
"interval --> ", na.intvs, ".",
sep = ""
)
)
print(paste("Total no. of rows with NA values:", dim(data[is.na(data$steps)])[1]))
# Merge data and its average by interval (this way to each interval we associate an avg)
data.byinterval <- merge(data, data.avg, by = "interval")
data.byinterval <- data.byinterval[order(date, interval),]
data.byinterval$steps <- as.double(data.byinterval$steps)
data.byinterval <- data.byinterval[is.na(steps), steps := steps.avg]
# Show the first few lines of the new dataset
head(data.byinterval)
# Recompute the total no. of steps
data.steps.fill <- data.byinterval[,
.(steps.sum = sum(steps, na.rm = TRUE),
wday = weekdays(date)
),
by = date
]
# Plot again the total no. of steps
plot.steps.fill <- ggplot(data = data.steps.fill, aes(x = date, y = steps.sum, fill = wday)) +
scale_x_date(breaks = data.steps.fill$date[seq(1, length(data.steps.fill$date), 7)]) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5)) +
scale_fill_brewer(palette = "YlOrRd") +
geom_bar(stat = "identity") +
xlab("date") +
ylab("total no. of steps (dotted = median, dashed = mean)") +
guides(fill = guide_legend(title = "weekdays")) +
geom_hline(yintercept = data.steps.fill.median, linetype = "dotted") +
geom_hline(yintercept = data.steps.fill.mean, linetype = "dashed") +
scale_linetype_manual(name = "statistics") +
ggtitle("Total no. of steps in each day (NA filled w/ interval mean)")
#print(plot.steps.fill)
# Recompute mean and median of the total no. of steps
data.steps.fill.median <- median(data.steps.fill$steps.sum)
data.steps.fill.mean <- mean(data.steps.fill$steps.sum)
print(paste("Median:", data.steps.fill.median))
print(paste("Mean:", data.steps.fill.mean))
# Now add a variable to distinguish weekdays and weekends
day.factor <- format(data.byinterval$date, "%u")
day.factor[day.factor %in% 1:5] <- "Weekday"
day.factor[day.factor %in% 6:7] <- "Weekend"
day.factor <- factor(day.factor, levels = c("Weekday", "Weekend"))
data.byinterval$day <- day.factor
# Group by interval and compute the average step divided in weekday and weekend
data.byday <- data.byinterval[,
.(steps.avg = mean(steps, na.rm = TRUE)),
by = list(interval, day)
]
plot.avg.byday <- ggplot(data = data.byday, aes(x = interval, y = steps.avg)) +
geom_step(direction = "hv") +
facet_grid(. ~ day) +
xlab("interval") +
ylab("average no. of steps") +
ggtitle("Average no. of steps per interval")
#print(plot.avg.byday)
| /run_analysis.R | no_license | thesfinox/RepData_PeerAssessment1 | R | false | false | 5,906 | r | # Unzip the file with the data
file.zip <- file.path(".", "activity.zip")
file.csv <- file.path(".", "activity.csv")
if(file.exists(file.zip) & !file.exists(file.csv)) {unzip(file.zip)}
# Read the CSV file
data <- read.csv(file.csv, header = TRUE, stringsAsFactors = FALSE)
# Show first few lines
head(data)
# Transform data in columns (transform dates, steps and intervals are fine)
data$date <- as.Date(data$date, "%Y-%m-%d")
# Convert table to data.table for improved manipulation
library(data.table)
data <- data.table(data)
# Compute the total no. of steps per date and plot a histogram
library(ggplot2)
data.steps <- data[,
.(steps.sum = sum(steps, na.rm = TRUE),
wday = weekdays(date)
),
by = date
]
# Compute mean and median of the total no. of steps
data.steps.median <- median(data.steps$steps.sum)
data.steps.mean <- mean(data.steps$steps.sum)
print(paste("Median:", data.steps.median))
print(paste("Mean:", data.steps.mean))
# Plot the resulting bar plots
plot.steps <- ggplot(data = data.steps, aes(x = date, y = steps.sum, fill = wday)) +
scale_x_date(breaks = data.steps$date[seq(1, length(data.steps$date), 7)]) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5)) +
scale_fill_brewer(palette = "YlOrRd") +
geom_bar(stat = "identity") +
xlab("date") +
ylab("total no. of steps (dotted = median, dashed = mean)") +
guides(fill = guide_legend(title = "weekdays")) +
geom_hline(yintercept = data.steps.median, linetype = "dotted") +
geom_hline(yintercept = data.steps.mean, linetype = "dashed") +
scale_linetype_manual(name = "statistics") +
ggtitle("Total no. of steps in each day")
#print(plot.steps)
# Compute average no. of steps
data.avg <- data[, .(steps.avg = mean(steps, na.rm = TRUE)), by = interval]
# Plot the time series of the data
plot.avg <- ggplot(data = data.avg, aes(x = interval, y = steps.avg)) +
geom_step(direction = "hv") +
xlab("interval") +
ylab("average no. of steps") +
ggtitle("Average no. of steps per interval")
#print(plot.avg)
# Find the max no. of steps
max.pos <- which.max(data.avg$steps.avg)
max.val <- data.avg[max.pos,]$interval
print(paste("Max no. of steps (", max.val, ") in the ", max.pos, "th time step", sep = ""))
# Find total no. of missing data
na.steps <- sum(as.numeric(is.na(data$steps)))
na.dates <- sum(as.numeric(is.na(data$date)))
na.intvs <- sum(as.numeric(is.na(data$interval)))
print(paste("Total no. of NA values in: ",
"step --> ", na.steps, ", ",
"date --> ", na.dates, ", ",
"interval --> ", na.intvs, ".",
sep = ""
)
)
print(paste("Total no. of rows with NA values:", dim(data[is.na(data$steps)])[1]))
# Merge data and its average by interval (this way to each interval we associate an avg)
data.byinterval <- merge(data, data.avg, by = "interval")
data.byinterval <- data.byinterval[order(date, interval),]
data.byinterval$steps <- as.double(data.byinterval$steps)
data.byinterval <- data.byinterval[is.na(steps), steps := steps.avg]
# Show the first few lines of the new dataset
head(data.byinterval)
# Recompute the total no. of steps
data.steps.fill <- data.byinterval[,
.(steps.sum = sum(steps, na.rm = TRUE),
wday = weekdays(date)
),
by = date
]
# Plot again the total no. of steps
plot.steps.fill <- ggplot(data = data.steps.fill, aes(x = date, y = steps.sum, fill = wday)) +
scale_x_date(breaks = data.steps.fill$date[seq(1, length(data.steps.fill$date), 7)]) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5)) +
scale_fill_brewer(palette = "YlOrRd") +
geom_bar(stat = "identity") +
xlab("date") +
ylab("total no. of steps (dotted = median, dashed = mean)") +
guides(fill = guide_legend(title = "weekdays")) +
geom_hline(yintercept = data.steps.fill.median, linetype = "dotted") +
geom_hline(yintercept = data.steps.fill.mean, linetype = "dashed") +
scale_linetype_manual(name = "statistics") +
ggtitle("Total no. of steps in each day (NA filled w/ interval mean)")
#print(plot.steps.fill)
# Recompute mean and median of the total no. of steps
data.steps.fill.median <- median(data.steps.fill$steps.sum)
data.steps.fill.mean <- mean(data.steps.fill$steps.sum)
print(paste("Median:", data.steps.fill.median))
print(paste("Mean:", data.steps.fill.mean))
# Now add a variable to distinguish weekdays and weekends
day.factor <- format(data.byinterval$date, "%u")
day.factor[day.factor %in% 1:5] <- "Weekday"
day.factor[day.factor %in% 6:7] <- "Weekend"
day.factor <- factor(day.factor, levels = c("Weekday", "Weekend"))
data.byinterval$day <- day.factor
# Group by interval and compute the average step divided in weekday and weekend
data.byday <- data.byinterval[,
.(steps.avg = mean(steps, na.rm = TRUE)),
by = list(interval, day)
]
plot.avg.byday <- ggplot(data = data.byday, aes(x = interval, y = steps.avg)) +
geom_step(direction = "hv") +
facet_grid(. ~ day) +
xlab("interval") +
ylab("average no. of steps") +
ggtitle("Average no. of steps per interval")
#print(plot.avg.byday)
|
#' Multi-Species Indicator
#'
#' @description A simple wrapper for \code{msi_tool} to make it easier to use in R.
#' Multi-Species Indicators (MSI) are biodiversity indicators that combine the population
#' development of species into a single indicator. The MSI-tool calculates an MSI, confidence intervals
#' for the MSIs and linear and flexible (smoothed) trends. The trends are classified in terms like
#' "moderate increase", "strong decrease" or "stable". A number of additional analyses can be performed
#' like testing for changepoints, comparison of trends before and after a changepoint and the calculation
#' and testing of the total change in a time series.
#'
#' @param data a data.frame with 4 columns in this order: 'species', 'year', 'index', 'se' (standard error).
#' The index value in the base year (which need not be the first year), should be set to 100, with se of 0.
#' @param jobname Generic name for output files
#' @param ... other parameters to pass to \code{msi_tool}
#' @return Returns a dataframe with 4 columns: Year, Index, lower2.5, upper97.5. The last two columns are the credible intervals
#' @import reshape2
#' @importFrom boot inv.logit
#' @export
#' @examples
#'
#' # Create some example data in the format required
#' nyr = 20
#' species = rep(letters, each = nyr)
#' year = rev(rep(1:nyr, length(letters)))
#'
#' # Create an index value that increases with time
#' index = rep(seq(50, 100, length.out = nyr), length(letters))
#' # Add randomness to species
#' index = index * runif(n = length(index), 0.7, 1.3)
#' # Add correlated randomness acrosss species, to years
#' index = index * rep(runif(0.8, 1.2, n = nyr), length(letters))
#'
#' se = runif(n = nyr * length(letters), min = 10, max = 20)
#'
#' data <- data.frame(species, year, index, se)
#'
#' # Our species are decreasing
#' plot(data$year, data$index)
#'
#' # Species index values need to be 100 in the base year. Here I use
#' # the first year as my base year and rescale to 100. The standard error
#' # in the base year should be 0.
#' min_year <- min(data$year)
#'
#' for(sp in unique(data$species)){
#'
#' subset_data <- data[data$species == sp, ]
#' multi_factor <- 100 / subset_data$index[subset_data$year == min_year]
#' data$index[data$species == sp] <- data$index[data$species == sp] * multi_factor
#' data$se[data$species == sp] <- data$se[data$species == sp] * multi_factor
#' data$se[data$species == sp][1] <- 0
#'
#' }
#'
#' # Run the MSI function
#' msi_out <- msi(data, plot = FALSE)
#'
#' # Plot the resulting indicator
#' plot(msi_out)
msi <- function(data, jobname = 'MSI_job', ...){
stopifnot(inherits(x = data, what = 'data.frame'))
if(!all(colnames(data) == c("species", "year", "index", "se"))){
stop('column names must be "species" "year" "index" "se"')
}
# order the data by year
data <- data[order(data$year), ]
# check column types
stopifnot(inherits(data$year, 'integer') | inherits(data$year, 'numeric'))
stopifnot(inherits(data$index, 'integer') | inherits(data$index, 'numeric'))
stopifnot(inherits(data$se, 'integer') | inherits(data$se, 'numeric'))
stopifnot(inherits(data$species, 'character') | inherits(data$species, 'factor'))
if(inherits(data$species, 'factor')) data$species <- as.character(data$species)
# The base year index value should be 100 and se in this year should be 0
# Here are a couple of warnings to try and pick up when the user has got this wrong
all_species <- unique(data$species)
species_with_100_index <- unique(data$species[round(data$index, digits = 5) == 100])
species_with_0_se <- unique(data$species[data$se == 0])
if(!(all(all_species %in% species_with_100_index))){
warning('Species are expected to have an index value of 100 in their base year. Some of',
' your species do not have any index values of 100: ',
paste(all_species[!all_species %in% species_with_100_index], collapse = ', '))
}
if(!(all(all_species %in% species_with_0_se))){
warning('Species are expected to have an se value of 0 in their base year (where index is',
' set to 100).',
' Some of your species do not have any se values',
' of 0: ',
paste(all_species[!all_species %in% species_with_0_se], collapse = ', '))
}
dir <- tempdir()
write.csv(data, file = file.path(dir, 'input.csv'), row.names = FALSE)
msi_tool(wd = dir, inputFile = 'input.csv', jobname = jobname, ...)
results <- read.table(file.path(dir, paste0(jobname, "_RESULTS.csv")), sep = ';',
header = TRUE)
# replace all commas with decimals and make them numbers
# who does that?!
for(i in 2:8){
results[,i] <- as.numeric(gsub(',','.',as.character(results[,i])))
}
trends <- read.table(file.path(dir, paste0(jobname, "_TRENDS.csv")), sep = ';',
header = TRUE)
colnames(trends)[1] <- 'Measure'
trends$value <- as.numeric(gsub(',','.',as.character(trends$value)))
CV <- read.table(file.path(dir, "species_CV_values.csv"), sep = ',',
header = TRUE)
msi_out <- list(results = results,
trends = trends,
CV = CV)
class(msi_out) <- 'MSI'
return(msi_out)
} | /R/msi.R | no_license | BiologicalRecordsCentre/BRCindicators | R | false | false | 5,290 | r | #' Multi-Species Indicator
#'
#' @description A simple wrapper for \code{msi_tool} to make it easier to use in R.
#' Multi-Species Indicators (MSI) are biodiversity indicators that combine the population
#' development of species into a single indicator. The MSI-tool calculates an MSI, confidence intervals
#' for the MSIs and linear and flexible (smoothed) trends. The trends are classified in terms like
#' "moderate increase", "strong decrease" or "stable". A number of additional analyses can be performed
#' like testing for changepoints, comparison of trends before and after a changepoint and the calculation
#' and testing of the total change in a time series.
#'
#' @param data a data.frame with 4 columns in this order: 'species', 'year', 'index', 'se' (standard error).
#' The index value in the base year (which need not be the first year), should be set to 100, with se of 0.
#' @param jobname Generic name for output files
#' @param ... other parameters to pass to \code{msi_tool}
#' @return Returns a dataframe with 4 columns: Year, Index, lower2.5, upper97.5. The last two columns are the credible intervals
#' @import reshape2
#' @importFrom boot inv.logit
#' @export
#' @examples
#'
#' # Create some example data in the format required
#' nyr = 20
#' species = rep(letters, each = nyr)
#' year = rev(rep(1:nyr, length(letters)))
#'
#' # Create an index value that increases with time
#' index = rep(seq(50, 100, length.out = nyr), length(letters))
#' # Add randomness to species
#' index = index * runif(n = length(index), 0.7, 1.3)
#' # Add correlated randomness acrosss species, to years
#' index = index * rep(runif(0.8, 1.2, n = nyr), length(letters))
#'
#' se = runif(n = nyr * length(letters), min = 10, max = 20)
#'
#' data <- data.frame(species, year, index, se)
#'
#' # Our species are decreasing
#' plot(data$year, data$index)
#'
#' # Species index values need to be 100 in the base year. Here I use
#' # the first year as my base year and rescale to 100. The standard error
#' # in the base year should be 0.
#' min_year <- min(data$year)
#'
#' for(sp in unique(data$species)){
#'
#' subset_data <- data[data$species == sp, ]
#' multi_factor <- 100 / subset_data$index[subset_data$year == min_year]
#' data$index[data$species == sp] <- data$index[data$species == sp] * multi_factor
#' data$se[data$species == sp] <- data$se[data$species == sp] * multi_factor
#' data$se[data$species == sp][1] <- 0
#'
#' }
#'
#' # Run the MSI function
#' msi_out <- msi(data, plot = FALSE)
#'
#' # Plot the resulting indicator
#' plot(msi_out)
msi <- function(data, jobname = 'MSI_job', ...){
stopifnot(inherits(x = data, what = 'data.frame'))
if(!all(colnames(data) == c("species", "year", "index", "se"))){
stop('column names must be "species" "year" "index" "se"')
}
# order the data by year
data <- data[order(data$year), ]
# check column types
stopifnot(inherits(data$year, 'integer') | inherits(data$year, 'numeric'))
stopifnot(inherits(data$index, 'integer') | inherits(data$index, 'numeric'))
stopifnot(inherits(data$se, 'integer') | inherits(data$se, 'numeric'))
stopifnot(inherits(data$species, 'character') | inherits(data$species, 'factor'))
if(inherits(data$species, 'factor')) data$species <- as.character(data$species)
# The base year index value should be 100 and se in this year should be 0
# Here are a couple of warnings to try and pick up when the user has got this wrong
all_species <- unique(data$species)
species_with_100_index <- unique(data$species[round(data$index, digits = 5) == 100])
species_with_0_se <- unique(data$species[data$se == 0])
if(!(all(all_species %in% species_with_100_index))){
warning('Species are expected to have an index value of 100 in their base year. Some of',
' your species do not have any index values of 100: ',
paste(all_species[!all_species %in% species_with_100_index], collapse = ', '))
}
if(!(all(all_species %in% species_with_0_se))){
warning('Species are expected to have an se value of 0 in their base year (where index is',
' set to 100).',
' Some of your species do not have any se values',
' of 0: ',
paste(all_species[!all_species %in% species_with_0_se], collapse = ', '))
}
dir <- tempdir()
write.csv(data, file = file.path(dir, 'input.csv'), row.names = FALSE)
msi_tool(wd = dir, inputFile = 'input.csv', jobname = jobname, ...)
results <- read.table(file.path(dir, paste0(jobname, "_RESULTS.csv")), sep = ';',
header = TRUE)
# replace all commas with decimals and make them numbers
# who does that?!
for(i in 2:8){
results[,i] <- as.numeric(gsub(',','.',as.character(results[,i])))
}
trends <- read.table(file.path(dir, paste0(jobname, "_TRENDS.csv")), sep = ';',
header = TRUE)
colnames(trends)[1] <- 'Measure'
trends$value <- as.numeric(gsub(',','.',as.character(trends$value)))
CV <- read.table(file.path(dir, "species_CV_values.csv"), sep = ',',
header = TRUE)
msi_out <- list(results = results,
trends = trends,
CV = CV)
class(msi_out) <- 'MSI'
return(msi_out)
} |
library("RPostgreSQL")
library(DT)
library(tidytext)
library(dplyr)
library(stringr)
library(sentimentr)
library(ggplot2)
library(RColorBrewer)
library(readr)
library(SnowballC)
library(tm)
library(wordcloud)
library(reticulate)
library(crfsuite)
library(ggplot2)
library(textcat)
library(broom)
library(caTools)
library(rpart)
library(rpart.plot)
library(stargazer)
library(traitr)
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, dbname = "DAAN888", host = "localhost", port = 5432, user = "postgres", password = "")
categories <- dbGetQuery(con, 'Select cats.* from public."Business_Category" cats inner join public."Pitt_Review_Data" revs on cats.business_id = revs.business_id')
#Broader Categories
Eastern_Asian <- c('Chinese'
,'Szechuan'
,'Taiwanese'
,'Dim Sum'
,'Cantonese'
,'Korean'
,'Asian Fusion'
,'Pan Asian'
,'Mongolian'
,'Uzbek')
Japanese <- c('Sushi Bars','Japanese','Ramen')
Mexican <- c('Mexican', 'Tex-Mex', 'Tacos', 'New-Mexican Cuisine')
Indian <- c('Pakistani', 'Indian', 'Himalayan/Nepalese')
Southeast_Asian <- c('Vietnamese'
,'Thai'
,'Burmese'
,'Cambodian'
,'Filipino'
,'Bangladeshi'
,'Indonesian'
,'Laotian'
,'Malaysian'
,'Singaporean')
American <- c('American'
,'American'
,'Southern')
Italian <- c('Italian','Sicilian')
Middle_Eastern <- c('Middle Eastern'
,'Halal'
,'Moroccan'
,'Egyptian'
,'Persian/Iranian'
,'Syrian')
Latin_American <- c('Latin American'
,'Brazilian'
,'Venezuelan'
,'Argentine'
,'Colombian'
,'Caribbean'
,'Cuban')
Mediterranean <- c('Greek'
,'Turkish'
,'Lebanese')
European <- c('Irish'
,'Modern European'
,'Polish'
,'German'
,'Spanish'
,'Portuguese'
,'Basque'
,'British'
,'Hungarian'
,'Irish Pub'
,'Iberian'
,'French')
Fastfood <- 'Fast Food'
Seafood <- 'Seafood'
Pizza <- 'Pizza'
Breakfast <- c('Breakfast','Waffles')
Cafe <- c('Caf')
Diners <- 'Diner'
Bars <- c('Pubs'
,'Beer'
,'Sports'
,'Bars'
,'Gastropubs'
,'Breweries'
,'Beer'
,'Gardens'
,'Brewpubs'
,'Speakeasies'
,'Coktail Bars')
categories$Eastern_Asian <- 0
categories$Eastern_Asian[grep(paste(Eastern_Asian,collapse='|'),categories$categories)] <- 1
categories$Japanese <- 0
categories$Japanese[grep(paste(Japanese,collapse='|'),categories$categories)] <- 1
categories$Mexican <- 0
categories$Mexican[grep(paste(Mexican,collapse='|'),categories$categories)] <- 1
categories$Indian <- 0
categories$Indian[grep(paste(Indian,collapse='|'),categories$categories)] <- 1
categories$Indian <- 0
categories$Indian[grep(paste(Indian,collapse='|'),categories$categories)] <- 1
categories$Southeast_Asian <- 0
categories$Southeast_Asian[grep(paste(Southeast_Asian,collapse='|'),categories$categories)] <- 1
categories$American <- 0
categories$American[grep(paste(American,collapse='|'),categories$categories)] <- 1
categories$Italian <- 0
categories$Italian[grep(paste(Italian,collapse='|'),categories$categories)] <- 1
categories$Middle_Eastern <- 0
categories$Middle_Eastern[grep(paste(Middle_Eastern,collapse='|'),categories$categories)] <- 1
categories$Latin_American <- 0
categories$Latin_American[grep(paste(Latin_American,collapse='|'),categories$categories)] <- 1
categories$Mediterranean <- 0
categories$Mediterranean[grep(paste(Mediterranean,collapse='|'),categories$categories)] <- 1
categories$European <- 0
categories$European[grep(paste(European,collapse='|'),categories$categories)] <- 1
categories$Fastfood <- 0
categories$Fastfood[grep(paste(Fastfood,collapse='|'),categories$categories)] <- 1
categories$Seafood <- 0
categories$Seafood[grep(paste(Seafood,collapse='|'),categories$categories)] <- 1
categories$Pizza <- 0
categories$Pizza[grep(paste(Pizza,collapse='|'),categories$categories)] <- 1
categories$Breakfast <- 0
categories$Breakfast[grep(paste(Breakfast,collapse='|'),categories$categories)] <- 1
categories$Cafe <- 0
categories$Cafe[grep(paste(Cafe,collapse='|'),categories$categories)] <- 1
categories$Diners <- 0
categories$Diners[grep(paste(Diners,collapse='|'),categories$categories)] <- 1
categories$Bars <- 0
categories$Bars[grep(paste(Bars,collapse='|'),categories$categories)] <- 1
cats <- categories %>% group_by(business_id) %>% summarise(Eastern_Asian = max(Eastern_Asian),
Japanese = max(Japanese),
Mexican = max(Mexican),
Indian = max(Indian),
Southeast_Asian = max(Southeast_Asian),
American = max(American),
Italian = max(Italian),
Middle_Eastern = max(Middle_Eastern),
Latin_American = max(Latin_American),
Mediterranean = max(Mediterranean),
European = max(European),
Fastfood = max(Fastfood),
Seafood = max(Seafood),
Pizza = max(Pizza),
Breakfast = max(Breakfast),
Cafe = max(Cafe),
Diners = max(Diners),
Bars = max(Bars))
cats$other <- 0
cats$other[cats$Japanese == 0 & cats$Eastern_Asian == 0 & cats$Mexican == 0 &
cats$Indian == 0 & cats$Southeast_Asian == 0 & cats$American == 0 &
cats$Middle_Eastern == 0 & cats$Latin_American == 0 & cats$Italian == 0 &
cats$Mediterranean == 0 & cats$European == 0] <- 1
#Build Review Words Categories
reviews <- read_csv("/Users/Kris/Box Sync/Kris Penn State/DAAN 888/ReviewsPAwithSentiScores.csv")
reviews$text <- tolower(reviews$text)
service_words = c('service'
,'bartender'
,'waiter'
,'waitress'
,'server'
,'waitperson'
,'carhop'
,'host'
,'hostess'
,'staff'
,'employee'
,'employees'
,'help'
,'mean'
,'rude'
,'abusive'
,'blunt'
,'boorish'
,'coarse'
,'crude'
,'ignorant'
,'impolite'
,'insulting'
,'obscene'
,'vulgar'
,'bad-mannered'
,'badmannered'
,'bad mannered'
,'curt'
,'gruff'
,'inconsiderate'
,'kind'
,'amiable'
,'compassionate'
,'considerate'
,'cordial'
,'courteous'
,'friendly'
,'unfriendly'
,'gracious'
,'kindhearted'
,'kindly'
,'loving'
,'thoughtful'
,'polite'
,'attentive'
)
reviews$service <- 0
reviews$service[grep(paste(service_words,collapse='|'),reviews$text)] <- 1
summary(reviews$service)
speed = c('slow'
,'gradual'
,'heavy'
,'lackadaisical'
,'leisurely'
,'lethargic'
,'passive'
,'reluctant'
,'sluggish'
,'stagnant'
,'crawling'
,'creeping'
,'dawdling'
,'delaying'
,'deliberate'
,'disinclined'
,'idle'
,'lagging'
,'loitering'
,'plodding'
,'postponing'
,'procrastinating'
,'slack'
,'apathetic'
,'dilatory'
,'dreamy'
,'drowsy'
,'imperceptible'
,'inactive'
,'indolent'
,'inert'
,'laggard'
,'leaden'
,'listless'
,'phlegmatic'
,'ponderous'
,'remiss'
,'sleepy'
,'slothful'
,'slow-moving'
,'snaillike'
,'supine'
,'tardy'
,'torpid'
,'tortoiselike'
,'fast'
,'agile'
,'brisk'
,'nimble'
,'quick'
,'rapid'
,'swift'
,'accelerated'
,'active'
,'dashing'
,'electric'
,'flashing'
,'fleet'
,'fleeting'
,'flying'
,'hurried'
,'racing'
,'snap'
,'winged'
,'blue streak'
,'breakneck'
,'chop-chop'
,'double-time'
,'expeditious'
,'expeditive'
,'hairtrigger'
,'hasty'
,'hypersonic'
,'in a jiffy'
,'in nothing flat'
,'lickety split'
,'like a bat out of hell'
,'like all get out'
,'like crazy'
,'like mad'
,'on the double'
,'posthaste'
,'presto'
,'pronto'
,'snappy'
,'speedball'
,'supersonic'
,'velocious')
reviews$speed <- 0
reviews$speed[grep(paste(speed,collapse='|'),reviews$text)] <- 1
summary(reviews$speed)
clean <- c('clean'
,'spotless'
,'hygienic'
,'orderly'
,'neat'
,'tidy'
,'unblemished'
,'washed'
,'cleaned'
,'shining'
,'stained'
,'dirty'
,'disinfected'
,'sanitary'
,'contaminated'
,'dusty'
,'filth'
,'filthy'
,'greasy'
,'grimy'
,'messy'
,'muddy'
,'murky'
,'nasty'
,'polluted'
,'sloppy'
,'unkempt'
,'begrimed'
,'smudged'
,'smudge'
,'sullied'
,'unsanitary'
,'unsightly'
,'sick'
,'slimy')
reviews$cleanliness <- 0
reviews$cleanliness[grep(paste(clean,collapse='|'),reviews$text)] <- 1
summary(reviews$cleanliness)
tasty <- c('tasty'
,'taste'
,'appetizing'
,'delectable'
,'flavorful'
,'luscious'
,'pungent'
,'savory'
,'spicy'
,'yummy'
,'delish'
,'divine'
,'flavorsome'
,'flavory'
,'full-flavored'
,'good-tasting'
,'heavenly'
,'mellow'
,'palatable'
,'piquant'
,'sapid'
,'scrumptious'
,'sugar-coated'
,'sweetened'
,'tasteful'
,'toothsome'
,'toothy'
,'zestful'
,'bland'
,'distasteful'
,'dull'
,'flavorless'
,'offensive'
,'tasteless'
,'unappetizing'
,'unsavory'
,'appetizing'
,'delectable'
,'delightful'
,'distinctive'
,'enjoyable'
,'enticing'
,'exquisite'
,'heavenly'
,'luscious'
,'piquant'
,'savory'
,'spicy'
,'sweet'
,'tempting'
,'yummy'
,'yum'
,'dainty'
,'darling'
,'divine'
,'adorable'
,'ambrosial'
,'delish'
,'fit for king'
,'gratifying'
,'mouthwatering'
,'nectarous'
,'palatable'
,'sapid'
,'scrumptious'
,'tasteful'
,'titillating'
,'toothsome'
,'well-prepared'
,'well-seasoned'
,'aftertaste'
,'bitter'
,'sour'
,'savor'
,'unsavor'
,'salty'
,'zest'
,'flavorless'
,'gross'
,'delicious'
)
reviews$taste <- 0
reviews$taste[grep(paste(tasty,collapse='|'),reviews$text)] <- 1
summary(reviews$taste)
environment <- c('seating'
,'table'
,'seat'
,'stool'
,'booth'
,'outside'
,'inside'
,'deck'
,'porch'
,'ambiance'
,'music'
,'noise'
,'loud'
,'deafening'
,'rowdy'
,'calm'
,'peaceful'
,'secluded'
,'serene'
,'quiet'
,'private'
,'comfortable'
,'atmosphere'
,'space'
,'environment'
,'climate'
,'setting'
,'cozy'
,'relaxed'
,'serene'
,'relaxing'
,'beautiful'
,'view'
,'charming')
reviews$environment <- 0
reviews$environment[grep(paste(environment,collapse='|'),reviews$text)] <- 1
summary(reviews$environment)
location <- c('parking'
,'site'
,'garage'
,'location'
,'convenient'
,'neighborhood')
reviews$location <- 0
reviews$location[grep(paste(location,collapse='|'),reviews$text)] <- 1
summary(reviews$location)
new_reviews <- merge(reviews,cats,by='business_id')
summary(new_reviews)
| /dummies.r | no_license | ksnyder1986/DAAN888Group8 | R | false | false | 14,659 | r | library("RPostgreSQL")
library(DT)
library(tidytext)
library(dplyr)
library(stringr)
library(sentimentr)
library(ggplot2)
library(RColorBrewer)
library(readr)
library(SnowballC)
library(tm)
library(wordcloud)
library(reticulate)
library(crfsuite)
library(ggplot2)
library(textcat)
library(broom)
library(caTools)
library(rpart)
library(rpart.plot)
library(stargazer)
library(traitr)
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, dbname = "DAAN888", host = "localhost", port = 5432, user = "postgres", password = "")
categories <- dbGetQuery(con, 'Select cats.* from public."Business_Category" cats inner join public."Pitt_Review_Data" revs on cats.business_id = revs.business_id')
#Broader Categories
Eastern_Asian <- c('Chinese'
,'Szechuan'
,'Taiwanese'
,'Dim Sum'
,'Cantonese'
,'Korean'
,'Asian Fusion'
,'Pan Asian'
,'Mongolian'
,'Uzbek')
Japanese <- c('Sushi Bars','Japanese','Ramen')
Mexican <- c('Mexican', 'Tex-Mex', 'Tacos', 'New-Mexican Cuisine')
Indian <- c('Pakistani', 'Indian', 'Himalayan/Nepalese')
Southeast_Asian <- c('Vietnamese'
,'Thai'
,'Burmese'
,'Cambodian'
,'Filipino'
,'Bangladeshi'
,'Indonesian'
,'Laotian'
,'Malaysian'
,'Singaporean')
American <- c('American'
,'American'
,'Southern')
Italian <- c('Italian','Sicilian')
Middle_Eastern <- c('Middle Eastern'
,'Halal'
,'Moroccan'
,'Egyptian'
,'Persian/Iranian'
,'Syrian')
Latin_American <- c('Latin American'
,'Brazilian'
,'Venezuelan'
,'Argentine'
,'Colombian'
,'Caribbean'
,'Cuban')
Mediterranean <- c('Greek'
,'Turkish'
,'Lebanese')
European <- c('Irish'
,'Modern European'
,'Polish'
,'German'
,'Spanish'
,'Portuguese'
,'Basque'
,'British'
,'Hungarian'
,'Irish Pub'
,'Iberian'
,'French')
Fastfood <- 'Fast Food'
Seafood <- 'Seafood'
Pizza <- 'Pizza'
Breakfast <- c('Breakfast','Waffles')
Cafe <- c('Caf')
Diners <- 'Diner'
Bars <- c('Pubs'
,'Beer'
,'Sports'
,'Bars'
,'Gastropubs'
,'Breweries'
,'Beer'
,'Gardens'
,'Brewpubs'
,'Speakeasies'
,'Coktail Bars')
categories$Eastern_Asian <- 0
categories$Eastern_Asian[grep(paste(Eastern_Asian,collapse='|'),categories$categories)] <- 1
categories$Japanese <- 0
categories$Japanese[grep(paste(Japanese,collapse='|'),categories$categories)] <- 1
categories$Mexican <- 0
categories$Mexican[grep(paste(Mexican,collapse='|'),categories$categories)] <- 1
categories$Indian <- 0
categories$Indian[grep(paste(Indian,collapse='|'),categories$categories)] <- 1
categories$Indian <- 0
categories$Indian[grep(paste(Indian,collapse='|'),categories$categories)] <- 1
categories$Southeast_Asian <- 0
categories$Southeast_Asian[grep(paste(Southeast_Asian,collapse='|'),categories$categories)] <- 1
categories$American <- 0
categories$American[grep(paste(American,collapse='|'),categories$categories)] <- 1
categories$Italian <- 0
categories$Italian[grep(paste(Italian,collapse='|'),categories$categories)] <- 1
categories$Middle_Eastern <- 0
categories$Middle_Eastern[grep(paste(Middle_Eastern,collapse='|'),categories$categories)] <- 1
categories$Latin_American <- 0
categories$Latin_American[grep(paste(Latin_American,collapse='|'),categories$categories)] <- 1
categories$Mediterranean <- 0
categories$Mediterranean[grep(paste(Mediterranean,collapse='|'),categories$categories)] <- 1
categories$European <- 0
categories$European[grep(paste(European,collapse='|'),categories$categories)] <- 1
categories$Fastfood <- 0
categories$Fastfood[grep(paste(Fastfood,collapse='|'),categories$categories)] <- 1
categories$Seafood <- 0
categories$Seafood[grep(paste(Seafood,collapse='|'),categories$categories)] <- 1
categories$Pizza <- 0
categories$Pizza[grep(paste(Pizza,collapse='|'),categories$categories)] <- 1
categories$Breakfast <- 0
categories$Breakfast[grep(paste(Breakfast,collapse='|'),categories$categories)] <- 1
categories$Cafe <- 0
categories$Cafe[grep(paste(Cafe,collapse='|'),categories$categories)] <- 1
categories$Diners <- 0
categories$Diners[grep(paste(Diners,collapse='|'),categories$categories)] <- 1
categories$Bars <- 0
categories$Bars[grep(paste(Bars,collapse='|'),categories$categories)] <- 1
cats <- categories %>% group_by(business_id) %>% summarise(Eastern_Asian = max(Eastern_Asian),
Japanese = max(Japanese),
Mexican = max(Mexican),
Indian = max(Indian),
Southeast_Asian = max(Southeast_Asian),
American = max(American),
Italian = max(Italian),
Middle_Eastern = max(Middle_Eastern),
Latin_American = max(Latin_American),
Mediterranean = max(Mediterranean),
European = max(European),
Fastfood = max(Fastfood),
Seafood = max(Seafood),
Pizza = max(Pizza),
Breakfast = max(Breakfast),
Cafe = max(Cafe),
Diners = max(Diners),
Bars = max(Bars))
cats$other <- 0
cats$other[cats$Japanese == 0 & cats$Eastern_Asian == 0 & cats$Mexican == 0 &
cats$Indian == 0 & cats$Southeast_Asian == 0 & cats$American == 0 &
cats$Middle_Eastern == 0 & cats$Latin_American == 0 & cats$Italian == 0 &
cats$Mediterranean == 0 & cats$European == 0] <- 1
#Build Review Words Categories
reviews <- read_csv("/Users/Kris/Box Sync/Kris Penn State/DAAN 888/ReviewsPAwithSentiScores.csv")
reviews$text <- tolower(reviews$text)
service_words = c('service'
,'bartender'
,'waiter'
,'waitress'
,'server'
,'waitperson'
,'carhop'
,'host'
,'hostess'
,'staff'
,'employee'
,'employees'
,'help'
,'mean'
,'rude'
,'abusive'
,'blunt'
,'boorish'
,'coarse'
,'crude'
,'ignorant'
,'impolite'
,'insulting'
,'obscene'
,'vulgar'
,'bad-mannered'
,'badmannered'
,'bad mannered'
,'curt'
,'gruff'
,'inconsiderate'
,'kind'
,'amiable'
,'compassionate'
,'considerate'
,'cordial'
,'courteous'
,'friendly'
,'unfriendly'
,'gracious'
,'kindhearted'
,'kindly'
,'loving'
,'thoughtful'
,'polite'
,'attentive'
)
reviews$service <- 0
reviews$service[grep(paste(service_words,collapse='|'),reviews$text)] <- 1
summary(reviews$service)
speed = c('slow'
,'gradual'
,'heavy'
,'lackadaisical'
,'leisurely'
,'lethargic'
,'passive'
,'reluctant'
,'sluggish'
,'stagnant'
,'crawling'
,'creeping'
,'dawdling'
,'delaying'
,'deliberate'
,'disinclined'
,'idle'
,'lagging'
,'loitering'
,'plodding'
,'postponing'
,'procrastinating'
,'slack'
,'apathetic'
,'dilatory'
,'dreamy'
,'drowsy'
,'imperceptible'
,'inactive'
,'indolent'
,'inert'
,'laggard'
,'leaden'
,'listless'
,'phlegmatic'
,'ponderous'
,'remiss'
,'sleepy'
,'slothful'
,'slow-moving'
,'snaillike'
,'supine'
,'tardy'
,'torpid'
,'tortoiselike'
,'fast'
,'agile'
,'brisk'
,'nimble'
,'quick'
,'rapid'
,'swift'
,'accelerated'
,'active'
,'dashing'
,'electric'
,'flashing'
,'fleet'
,'fleeting'
,'flying'
,'hurried'
,'racing'
,'snap'
,'winged'
,'blue streak'
,'breakneck'
,'chop-chop'
,'double-time'
,'expeditious'
,'expeditive'
,'hairtrigger'
,'hasty'
,'hypersonic'
,'in a jiffy'
,'in nothing flat'
,'lickety split'
,'like a bat out of hell'
,'like all get out'
,'like crazy'
,'like mad'
,'on the double'
,'posthaste'
,'presto'
,'pronto'
,'snappy'
,'speedball'
,'supersonic'
,'velocious')
reviews$speed <- 0
reviews$speed[grep(paste(speed,collapse='|'),reviews$text)] <- 1
summary(reviews$speed)
clean <- c('clean'
,'spotless'
,'hygienic'
,'orderly'
,'neat'
,'tidy'
,'unblemished'
,'washed'
,'cleaned'
,'shining'
,'stained'
,'dirty'
,'disinfected'
,'sanitary'
,'contaminated'
,'dusty'
,'filth'
,'filthy'
,'greasy'
,'grimy'
,'messy'
,'muddy'
,'murky'
,'nasty'
,'polluted'
,'sloppy'
,'unkempt'
,'begrimed'
,'smudged'
,'smudge'
,'sullied'
,'unsanitary'
,'unsightly'
,'sick'
,'slimy')
reviews$cleanliness <- 0
reviews$cleanliness[grep(paste(clean,collapse='|'),reviews$text)] <- 1
summary(reviews$cleanliness)
tasty <- c('tasty'
,'taste'
,'appetizing'
,'delectable'
,'flavorful'
,'luscious'
,'pungent'
,'savory'
,'spicy'
,'yummy'
,'delish'
,'divine'
,'flavorsome'
,'flavory'
,'full-flavored'
,'good-tasting'
,'heavenly'
,'mellow'
,'palatable'
,'piquant'
,'sapid'
,'scrumptious'
,'sugar-coated'
,'sweetened'
,'tasteful'
,'toothsome'
,'toothy'
,'zestful'
,'bland'
,'distasteful'
,'dull'
,'flavorless'
,'offensive'
,'tasteless'
,'unappetizing'
,'unsavory'
,'appetizing'
,'delectable'
,'delightful'
,'distinctive'
,'enjoyable'
,'enticing'
,'exquisite'
,'heavenly'
,'luscious'
,'piquant'
,'savory'
,'spicy'
,'sweet'
,'tempting'
,'yummy'
,'yum'
,'dainty'
,'darling'
,'divine'
,'adorable'
,'ambrosial'
,'delish'
,'fit for king'
,'gratifying'
,'mouthwatering'
,'nectarous'
,'palatable'
,'sapid'
,'scrumptious'
,'tasteful'
,'titillating'
,'toothsome'
,'well-prepared'
,'well-seasoned'
,'aftertaste'
,'bitter'
,'sour'
,'savor'
,'unsavor'
,'salty'
,'zest'
,'flavorless'
,'gross'
,'delicious'
)
reviews$taste <- 0
reviews$taste[grep(paste(tasty,collapse='|'),reviews$text)] <- 1
summary(reviews$taste)
environment <- c('seating'
,'table'
,'seat'
,'stool'
,'booth'
,'outside'
,'inside'
,'deck'
,'porch'
,'ambiance'
,'music'
,'noise'
,'loud'
,'deafening'
,'rowdy'
,'calm'
,'peaceful'
,'secluded'
,'serene'
,'quiet'
,'private'
,'comfortable'
,'atmosphere'
,'space'
,'environment'
,'climate'
,'setting'
,'cozy'
,'relaxed'
,'serene'
,'relaxing'
,'beautiful'
,'view'
,'charming')
reviews$environment <- 0
reviews$environment[grep(paste(environment,collapse='|'),reviews$text)] <- 1
summary(reviews$environment)
location <- c('parking'
,'site'
,'garage'
,'location'
,'convenient'
,'neighborhood')
reviews$location <- 0
reviews$location[grep(paste(location,collapse='|'),reviews$text)] <- 1
summary(reviews$location)
new_reviews <- merge(reviews,cats,by='business_id')
summary(new_reviews)
|
#' Automatic Statistical Identification in Complex Spectra
#'
#' Quantification of 1D 1H NMR spectra with ASICS method using a library of
#' pure metabolite spectra. The method is presented in Tardivel et al. (2017).
#'
#' @param spectra_obj An object of class \linkS4class{Spectra} obtained with the
#' function \link{createSpectra}.
#' @param exclusion.areas Definition domain of spectra that has to be excluded
#' for the quantification (ppm). By default, the water region is excluded
#' (4.5-5.1 ppm).
#' @param max.shift Maximum chemical shift allowed (in ppm). Default to 0.02.
#' @param pure.library An object of class \linkS4class{PureLibrary} containing
#' the reference spectra (pure metabolite spectra). If \code{NULL}, the library
#' included in the package (that contains 191 reference spectra) is used.
#' @param threshold.noise Threshold for signal noise. Default to 0.02.
#' @param combine Logical. If \code{TRUE}, information from all spectra are
#' taken into account to align individual library.
#' @param seed Random seed to control randomness in the algorithm (used in the
#' estimation of the significativity of a given metabolite concentration).
#' @param ncores Number of cores used in parallel evaluation. Default to
#' \code{1}.
#' @param verbose A boolean value to allow print out process information.
#'
#' @note Since version 2.3.1 small changes were applied in order to improve the
#' speed of metabolite selection algorithm. To reproduce previous results, you
#' have to use an older version.
#'
#' @return An object of type \linkS4class{ASICSResults} containing the
#' quantification results.
#'
#' @importFrom BiocParallel bplapply MulticoreParam multicoreWorkers SerialParam
#' @importFrom stats reshape
#' @export
#'
#' @seealso \linkS4class{ASICSResults} \code{\link{pure_library}}
#' \code{\link{createSpectra}}
#'
#' @references Tardivel P., Canlet C., Lefort G., Tremblay-Franco M., Debrauwer
#' L., Concordet D., Servien R. (2017). ASICS: an automatic method for
#' identification and quantification of metabolites in complex 1D 1H NMR
#' spectra. \emph{Metabolomics}, \strong{13}(10): 109.
#' \url{https://doi.org/10.1007/s11306-017-1244-5}
#'
#' @examples
#' # Import data and create object
#' current_path <- system.file("extdata", package = "ASICS")
#' spectra_data <- importSpectra(name.dir = current_path,
#' name.file = "spectra_example.txt", type.import = "txt")
#' spectra_obj <- createSpectra(spectra_data)
#'
#' # Estimation of relative quantifications
#' to_exclude <- matrix(c(4.5, 10), ncol = 2)
#' resASICS <- ASICS(spectra_obj, exclusion.areas = to_exclude, combine = FALSE)
ASICS <- function(spectra_obj,
exclusion.areas = matrix(c(4.5, 5.1), ncol = 2),
max.shift = 0.02, pure.library = NULL,
threshold.noise = 0.02, combine = TRUE, seed = 1234,
ncores = 1, verbose = TRUE) {
if(!is.null(exclusion.areas) &&
(!is.matrix(exclusion.areas) | ncol(exclusion.areas) != 2)){
stop("'exclusion.areas' must be a matrix with 2 columns.")
}
if(max.shift < 0){
stop("'max.shift' must be non negative.")
}
if(threshold.noise < 0){
stop("'threshold.noise' must be non negative.")
}
if(!is(pure.library, "PureLibrary") & !is.null(pure.library)){
stop(paste("'pure.library' must be either NULL or an object of class",
"'PureLibrary'."))
}
res_estimation <- .ASICSInternal(spectra_obj, exclusion.areas, max.shift,
pure.library, threshold.noise, seed, ncores,
combine, verbose)
return(res_estimation)
}
#' @importFrom methods new
.ASICSInternal <- function(spectra_obj_raw,
exclusion.areas = matrix(c(4.5, 5.1), ncol = 2),
max.shift = 0.02, pure.library = NULL,
threshold.noise = 0.02, seed = 1234, ncores = 1,
combine = TRUE, verbose = TRUE){
# seed and parallel environment
set.seed(seed)
# default library or not
if(is.null(pure.library)){
pure.library <- ASICS::pure_library
}
# spectra object as a list where each element are 1 spectrum
spectra_list <- lapply(seq_along(spectra_obj_raw),
function(x) spectra_obj_raw[x])
#-----------------------------------------------------------------------------
#### Remove areas from spectrum and library ####
if (verbose) cat("Remove areas from spectrum and library \n")
spectra_obj <- bplapply(spectra_list, .removeAreas, exclusion.areas,
pure.library,
BPPARAM = .createEnv(ncores, length(spectra_obj_raw),
verbose))
# number of points on library grid corresponding to maximum shift
if (length(spectra_list) == 1 | !combine) {
nb_points_shift <-
floor(max.shift / (spectra_obj[[1]][["cleaned_library"]]@ppm.grid[2] -
spectra_obj[[1]][["cleaned_library"]]@ppm.grid[1]))
} else {
max.shift <- seq_len(5) * max.shift / 5
nb_points_shift <-
floor(max.shift / (spectra_obj[[1]][["cleaned_library"]]@ppm.grid[2] -
spectra_obj[[1]][["cleaned_library"]]@ppm.grid[1]))
}
#-----------------------------------------------------------------------------
#### Cleaning step: remove metabolites that cannot belong to the mixture ####
if (verbose) cat("Remove metabolites that cannot belong to the mixture \n")
spectra_obj <- bplapply(spectra_obj, .cleanLibrary, threshold.noise,
nb_points_shift[length(nb_points_shift)],
BPPARAM = .createEnv(ncores, length(spectra_obj_raw),
verbose))
#-----------------------------------------------------------------------------
#### Find the best translation between each pure spectra and mixture ####
#and sort metabolites by regression residuals
# compute weights
s1 <- 0.172 #standard deviation of multiplicative noise
s2 <- 0.15 #standard deviation of additive noise
if (verbose) cat("Compute weights \n")
spectra_obj <-
bplapply(spectra_obj,
function(x){x[["mixture_weights"]] <-
as.numeric(1 / (abs(x[["cleaned_spectrum"]]@spectra) *
s1 ^ 2 + s2 ^ 2)); return(x)},
BPPARAM = .createEnv(ncores, length(spectra_obj_raw),
verbose))
if (verbose) cat("Translate library \n")
if (length(spectra_list) == 1 | !combine) {
spectra_obj <- bplapply(spectra_obj, .translateLibrary,
nb_points_shift[length(nb_points_shift)],
max.shift[length(max.shift)],
BPPARAM = .createEnv(ncores, length(spectra_obj_raw),
verbose))
} else {
# spectra binning
spectra_to_bin <- data.frame(as.matrix(getSpectra(spectra_obj_raw)))
rownames(spectra_to_bin) <- spectra_obj_raw@ppm.grid
norm.param <- c(list(spectra = spectra_to_bin,
exclusion.areas = exclusion.areas,
ncores = ncores,
bin = 0.001,
verbose = FALSE,
type.norm = spectra_obj_raw@norm.method),
spectra_obj_raw@norm.params)
spec_bin <- do.call("binning", norm.param)
spec_bin <- spec_bin[rowSums(spec_bin) != 0, ]
spectra_obj <- .translateLibrary_combineVersion(spectra_obj, max.shift,
nb_points_shift, spec_bin,
pure.library, ncores,
length(spectra_obj_raw),
verbose)
}
#-----------------------------------------------------------------------------
#### Localized deformations of pure spectra ####
if (verbose) cat("Deform library peaks \n")
spectra_obj <- bplapply(spectra_obj, .deformLibrary,
BPPARAM = .createEnv(ncores, length(spectra_obj_raw),
verbose))
#-----------------------------------------------------------------------------
#### Threshold and concentration optimisation for each metabolites ####
if (verbose) cat("Compute quantifications \n")
spectra_obj <- bplapply(spectra_obj, .concentrationOpti,
BPPARAM = .createEnv(ncores, length(spectra_obj_raw),
verbose))
#-----------------------------------------------------------------------------
#### Results ####
if (verbose) cat("Format results... \n")
sample_name <-
unlist(vapply(spectra_obj,
function(x) return(x[["cleaned_spectrum"]]@sample.name),
"character"))
spectra <-
do.call("cbind",
lapply(spectra_obj,
function(x) return(x[["cleaned_spectrum"]]@spectra)))
rec_spectra <-
do.call("cbind", lapply(spectra_obj,
function(x) return(x[["est_mixture"]])))
rel_conc <- lapply(spectra_obj, function(x) {
x[["relative_concentration"]]$row_names <-
x[["cleaned_library"]]@sample.name ;
return(x[["relative_concentration"]])})
metab_conc <- join_all(rel_conc, by = "row_names", type = "full")
rownames(metab_conc) <- metab_conc$row_names
metab_conc$row_names <- NULL
metab_conc[is.na(metab_conc)] <- 0
pure_lib_format <- do.call("rbind",
lapply(spectra_obj,
function(x) return(x[["format_library"]])))
# Object to return
res_object <- new(Class = "ASICSResults",
sample.name = sample_name,
ppm.grid = spectra_obj[[1]][["cleaned_spectrum"]]@ppm.grid,
spectra = spectra,
reconstructed.spectra = rec_spectra,
quantification = metab_conc,
deformed.library = pure_lib_format)
return(res_object)
}
| /R/ASICS.R | no_license | GaelleLefort/ASICS | R | false | false | 10,235 | r | #' Automatic Statistical Identification in Complex Spectra
#'
#' Quantification of 1D 1H NMR spectra with ASICS method using a library of
#' pure metabolite spectra. The method is presented in Tardivel et al. (2017).
#'
#' @param spectra_obj An object of class \linkS4class{Spectra} obtained with the
#' function \link{createSpectra}.
#' @param exclusion.areas Definition domain of spectra that has to be excluded
#' for the quantification (ppm). By default, the water region is excluded
#' (4.5-5.1 ppm).
#' @param max.shift Maximum chemical shift allowed (in ppm). Default to 0.02.
#' @param pure.library An object of class \linkS4class{PureLibrary} containing
#' the reference spectra (pure metabolite spectra). If \code{NULL}, the library
#' included in the package (that contains 191 reference spectra) is used.
#' @param threshold.noise Threshold for signal noise. Default to 0.02.
#' @param combine Logical. If \code{TRUE}, information from all spectra are
#' taken into account to align individual library.
#' @param seed Random seed to control randomness in the algorithm (used in the
#' estimation of the significativity of a given metabolite concentration).
#' @param ncores Number of cores used in parallel evaluation. Default to
#' \code{1}.
#' @param verbose A boolean value to allow print out process information.
#'
#' @note Since version 2.3.1 small changes were applied in order to improve the
#' speed of metabolite selection algorithm. To reproduce previous results, you
#' have to use an older version.
#'
#' @return An object of type \linkS4class{ASICSResults} containing the
#' quantification results.
#'
#' @importFrom BiocParallel bplapply MulticoreParam multicoreWorkers SerialParam
#' @importFrom stats reshape
#' @export
#'
#' @seealso \linkS4class{ASICSResults} \code{\link{pure_library}}
#' \code{\link{createSpectra}}
#'
#' @references Tardivel P., Canlet C., Lefort G., Tremblay-Franco M., Debrauwer
#' L., Concordet D., Servien R. (2017). ASICS: an automatic method for
#' identification and quantification of metabolites in complex 1D 1H NMR
#' spectra. \emph{Metabolomics}, \strong{13}(10): 109.
#' \url{https://doi.org/10.1007/s11306-017-1244-5}
#'
#' @examples
#' # Import data and create object
#' current_path <- system.file("extdata", package = "ASICS")
#' spectra_data <- importSpectra(name.dir = current_path,
#' name.file = "spectra_example.txt", type.import = "txt")
#' spectra_obj <- createSpectra(spectra_data)
#'
#' # Estimation of relative quantifications
#' to_exclude <- matrix(c(4.5, 10), ncol = 2)
#' resASICS <- ASICS(spectra_obj, exclusion.areas = to_exclude, combine = FALSE)
ASICS <- function(spectra_obj,
exclusion.areas = matrix(c(4.5, 5.1), ncol = 2),
max.shift = 0.02, pure.library = NULL,
threshold.noise = 0.02, combine = TRUE, seed = 1234,
ncores = 1, verbose = TRUE) {
if(!is.null(exclusion.areas) &&
(!is.matrix(exclusion.areas) | ncol(exclusion.areas) != 2)){
stop("'exclusion.areas' must be a matrix with 2 columns.")
}
if(max.shift < 0){
stop("'max.shift' must be non negative.")
}
if(threshold.noise < 0){
stop("'threshold.noise' must be non negative.")
}
if(!is(pure.library, "PureLibrary") & !is.null(pure.library)){
stop(paste("'pure.library' must be either NULL or an object of class",
"'PureLibrary'."))
}
res_estimation <- .ASICSInternal(spectra_obj, exclusion.areas, max.shift,
pure.library, threshold.noise, seed, ncores,
combine, verbose)
return(res_estimation)
}
#' @importFrom methods new
.ASICSInternal <- function(spectra_obj_raw,
exclusion.areas = matrix(c(4.5, 5.1), ncol = 2),
max.shift = 0.02, pure.library = NULL,
threshold.noise = 0.02, seed = 1234, ncores = 1,
combine = TRUE, verbose = TRUE){
# seed and parallel environment
set.seed(seed)
# default library or not
if(is.null(pure.library)){
pure.library <- ASICS::pure_library
}
# spectra object as a list where each element are 1 spectrum
spectra_list <- lapply(seq_along(spectra_obj_raw),
function(x) spectra_obj_raw[x])
#-----------------------------------------------------------------------------
#### Remove areas from spectrum and library ####
if (verbose) cat("Remove areas from spectrum and library \n")
spectra_obj <- bplapply(spectra_list, .removeAreas, exclusion.areas,
pure.library,
BPPARAM = .createEnv(ncores, length(spectra_obj_raw),
verbose))
# number of points on library grid corresponding to maximum shift
if (length(spectra_list) == 1 | !combine) {
nb_points_shift <-
floor(max.shift / (spectra_obj[[1]][["cleaned_library"]]@ppm.grid[2] -
spectra_obj[[1]][["cleaned_library"]]@ppm.grid[1]))
} else {
max.shift <- seq_len(5) * max.shift / 5
nb_points_shift <-
floor(max.shift / (spectra_obj[[1]][["cleaned_library"]]@ppm.grid[2] -
spectra_obj[[1]][["cleaned_library"]]@ppm.grid[1]))
}
#-----------------------------------------------------------------------------
#### Cleaning step: remove metabolites that cannot belong to the mixture ####
if (verbose) cat("Remove metabolites that cannot belong to the mixture \n")
spectra_obj <- bplapply(spectra_obj, .cleanLibrary, threshold.noise,
nb_points_shift[length(nb_points_shift)],
BPPARAM = .createEnv(ncores, length(spectra_obj_raw),
verbose))
#-----------------------------------------------------------------------------
#### Find the best translation between each pure spectra and mixture ####
#and sort metabolites by regression residuals
# compute weights
s1 <- 0.172 #standard deviation of multiplicative noise
s2 <- 0.15 #standard deviation of additive noise
if (verbose) cat("Compute weights \n")
spectra_obj <-
bplapply(spectra_obj,
function(x){x[["mixture_weights"]] <-
as.numeric(1 / (abs(x[["cleaned_spectrum"]]@spectra) *
s1 ^ 2 + s2 ^ 2)); return(x)},
BPPARAM = .createEnv(ncores, length(spectra_obj_raw),
verbose))
if (verbose) cat("Translate library \n")
if (length(spectra_list) == 1 | !combine) {
spectra_obj <- bplapply(spectra_obj, .translateLibrary,
nb_points_shift[length(nb_points_shift)],
max.shift[length(max.shift)],
BPPARAM = .createEnv(ncores, length(spectra_obj_raw),
verbose))
} else {
# spectra binning
spectra_to_bin <- data.frame(as.matrix(getSpectra(spectra_obj_raw)))
rownames(spectra_to_bin) <- spectra_obj_raw@ppm.grid
norm.param <- c(list(spectra = spectra_to_bin,
exclusion.areas = exclusion.areas,
ncores = ncores,
bin = 0.001,
verbose = FALSE,
type.norm = spectra_obj_raw@norm.method),
spectra_obj_raw@norm.params)
spec_bin <- do.call("binning", norm.param)
spec_bin <- spec_bin[rowSums(spec_bin) != 0, ]
spectra_obj <- .translateLibrary_combineVersion(spectra_obj, max.shift,
nb_points_shift, spec_bin,
pure.library, ncores,
length(spectra_obj_raw),
verbose)
}
#-----------------------------------------------------------------------------
#### Localized deformations of pure spectra ####
if (verbose) cat("Deform library peaks \n")
spectra_obj <- bplapply(spectra_obj, .deformLibrary,
BPPARAM = .createEnv(ncores, length(spectra_obj_raw),
verbose))
#-----------------------------------------------------------------------------
#### Threshold and concentration optimisation for each metabolites ####
if (verbose) cat("Compute quantifications \n")
spectra_obj <- bplapply(spectra_obj, .concentrationOpti,
BPPARAM = .createEnv(ncores, length(spectra_obj_raw),
verbose))
#-----------------------------------------------------------------------------
#### Results ####
if (verbose) cat("Format results... \n")
sample_name <-
unlist(vapply(spectra_obj,
function(x) return(x[["cleaned_spectrum"]]@sample.name),
"character"))
spectra <-
do.call("cbind",
lapply(spectra_obj,
function(x) return(x[["cleaned_spectrum"]]@spectra)))
rec_spectra <-
do.call("cbind", lapply(spectra_obj,
function(x) return(x[["est_mixture"]])))
rel_conc <- lapply(spectra_obj, function(x) {
x[["relative_concentration"]]$row_names <-
x[["cleaned_library"]]@sample.name ;
return(x[["relative_concentration"]])})
metab_conc <- join_all(rel_conc, by = "row_names", type = "full")
rownames(metab_conc) <- metab_conc$row_names
metab_conc$row_names <- NULL
metab_conc[is.na(metab_conc)] <- 0
pure_lib_format <- do.call("rbind",
lapply(spectra_obj,
function(x) return(x[["format_library"]])))
# Object to return
res_object <- new(Class = "ASICSResults",
sample.name = sample_name,
ppm.grid = spectra_obj[[1]][["cleaned_spectrum"]]@ppm.grid,
spectra = spectra,
reconstructed.spectra = rec_spectra,
quantification = metab_conc,
deformed.library = pure_lib_format)
return(res_object)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.