content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
# Tested on R 3.1.1 clients on Ubuntu 14.04 Trusty Tahr,
# Mac OS X 10.9 Mavericks, and Windows 8.1 connecting to
# MADlib 1.6 running in PostgreSQL 9.3.5 on CentOS 6.5.
library(RPostgreSQL)
db.drv <- PostgreSQL()
db.conn <- dbConnect(db.drv, host = "hostname",
port = 5432, dbname = "rdb",
user = "ruser",
password = "rpassword")
# Run association rules in MADlib
dbGetQuery(
db.conn,
"SELECT *
FROM madlib.assoc_rules(
0.001, -- support
0.01, -- confidence
'trans_id', -- tid_col
'item_id', -- item_col
'trans_items', -- input_table
'public', -- output_schema
TRUE -- verbose
);")
# Retrieve results
dbGetQuery(db.conn, 'SELECT * FROM assoc_rules;')
|
/book/packt/R.High.Performance.Programming/chapter-09/09-04_in-database_madlib.R
|
permissive
|
xenron/sandbox-da-r
|
R
| false
| false
| 838
|
r
|
# Tested on R 3.1.1 clients on Ubuntu 14.04 Trusty Tahr,
# Mac OS X 10.9 Mavericks, and Windows 8.1 connecting to
# MADlib 1.6 running in PostgreSQL 9.3.5 on CentOS 6.5.
library(RPostgreSQL)
db.drv <- PostgreSQL()
db.conn <- dbConnect(db.drv, host = "hostname",
port = 5432, dbname = "rdb",
user = "ruser",
password = "rpassword")
# Run association rules in MADlib
dbGetQuery(
db.conn,
"SELECT *
FROM madlib.assoc_rules(
0.001, -- support
0.01, -- confidence
'trans_id', -- tid_col
'item_id', -- item_col
'trans_items', -- input_table
'public', -- output_schema
TRUE -- verbose
);")
# Retrieve results
dbGetQuery(db.conn, 'SELECT * FROM assoc_rules;')
|
library(textreadr)
library(dplyr)
library(stringr)
library(tidytext)
library(reshape2)
library(ggplot2)
library(wordcloud)
library(tidyr)
library(tm)
library(quanteda)
data(stop_words)
setwd('/Users/user/Desktop/R/Mod B/Project Team')
read_data <- read_document(file="survey.txt")
junk <- data_frame(
word=c("visit","plan","out","on","which","countries","what","why","how","much","are","spent",
"spend","spending","usually","by","at","it\'s","i\'m","i\'d","day","don\'t","hussein","tend",
"that\'s","haven\'t","i\'ll"),
lexicon=c(rep("SMART",each=27)))
a <- 32 #how many observations to you have
b <- 5 #how many variables do you have
my_df <- as.data.frame(matrix(nrow=a, ncol=b))
for(z in 1:b){
for(i in 1:a){
my_df[i,z]<- read_data[i*b+z-b]
}}
########################
##### 1 word token #####
my_txt_1 <- my_df$V1
my_txt_2 <- my_df$V2
my_txt_3 <- my_df$V3
my_txt_4 <- my_df$V4
my_txt_5 <- my_df$V5
mydf_1 <- data_frame(line=1:a, text=my_txt_1)
mydf_2 <- data_frame(line=1:a, text=my_txt_2)
mydf_3 <- data_frame(line=1:a, text=my_txt_3)
mydf_4 <- data_frame(line=1:a, text=my_txt_4)
mydf_5 <- data_frame(line=1:a, text=my_txt_5)
Q1_token <- mydf_1 %>%unnest_tokens(word, text) %>% anti_join(stop_words) %>% count(word, sort=TRUE)
Q2_token <- mydf_2 %>%unnest_tokens(word, text) %>% anti_join(stop_words) %>% count(word, sort=TRUE)
Q3_token <- mydf_3 %>%unnest_tokens(word, text) %>% anti_join(stop_words) %>% count(word, sort=TRUE)
Q4_token <- mydf_4 %>%unnest_tokens(word, text) %>% anti_join(stop_words) %>% count(word, sort=TRUE)
Q5_token <- mydf_5 %>%unnest_tokens(word, text) %>% anti_join(stop_words) %>% count(word, sort=TRUE)
my_txt_1 <- my_df$V1
my_txt_2 <- my_df$V2
my_txt_3 <- my_df$V3
my_txt_4 <- my_df$V4
my_txt_5 <- my_df$V5
mydf_1 <- data_frame(line=1:a, text=my_txt_1)
mydf_2 <- data_frame(line=1:a, text=my_txt_2)
mydf_3 <- data_frame(line=1:a, text=my_txt_3)
mydf_4 <- data_frame(line=1:a, text=my_txt_4)
mydf_5 <- data_frame(line=1:a, text=my_txt_5)
frequency_question <- bind_rows(mutate(Q1_token, question = 'Q1'),
mutate(Q2_token, question = 'Q2'),
mutate(Q3_token, question = 'Q3'),
mutate(Q4_token, question = 'Q4'),
mutate(Q5_token, question = 'Q5'))
total_words <- frequency_question %>% group_by(question) %>%summarize(total=sum(n))
book_words <- left_join(frequency_question, total_words)
freq_by_rank <- book_words %>%group_by(question) %>%mutate(rank = row_number(),`term frequency` = n/total)
testing <- book_words %>%bind_tf_idf(word, question, n)
haha<-testing %>%arrange(desc(tf_idf))
haha[order(-haha$tf_idf),] %>% mutate(word=factor(word, levels=rev(unique(word)))) %>%
group_by(question) %>%
top_n(15) %>%
ungroup %>%
ggplot(aes(word, tf_idf, fill=question))+ geom_col(show.legend=FALSE)+
labs(x=NULL, y="tf-idf")+facet_wrap(~question, ncol=3, scales="free")+coord_flip()
##### Sentiment Graph ###################
Q1_token_sentiment <- mydf_1 %>% unnest_tokens(word, text) %>% inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=T) %>% ungroup()
Q2_token_sentiment <- mydf_2 %>% unnest_tokens(word, text) %>% inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=T) %>% ungroup()
Q3_token_sentiment <- mydf_3 %>% unnest_tokens(word, text) %>% inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=T) %>% ungroup()
Q4_token_sentiment <- mydf_4 %>% unnest_tokens(word, text) %>% inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=T) %>% ungroup()
Q5_token_sentiment <- mydf_5 %>% unnest_tokens(word, text) %>% inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=T) %>% ungroup()
Q1_token_sentiment %>% inner_join(get_sentiments("nrc")) %>% count(word, sentiment, sort=TRUE) %>%
acast(word ~sentiment, value.var="n", fill=0) %>% comparison.cloud(colors = c("grey20", "gray80"),max.words=100)
Q2_token_sentiment %>% inner_join(get_sentiments("nrc")) %>% count(word, sentiment, sort=TRUE) %>%
acast(word ~sentiment, value.var="n", fill=0) %>% comparison.cloud(colors = c("grey20", "gray80"), max.words=100)
Q3_token_sentiment %>%inner_join(get_sentiments("nrc")) %>%count(word, sentiment, sort=TRUE) %>%
acast(word ~sentiment, value.var="n", fill=0) %>%comparison.cloud(colors = c("grey20", "gray80"), max.words=100)
Q4_token_sentiment %>%inner_join(get_sentiments("nrc")) %>%count(word, sentiment, sort=TRUE) %>%
acast(word ~sentiment, value.var="n", fill=0) %>%comparison.cloud(colors = c("grey20", "gray80"), max.words=100)
Q5_token_sentiment %>%inner_join(get_sentiments("nrc")) %>%count(word, sentiment, sort=TRUE) %>%
acast(word ~sentiment, value.var="n", fill=0) %>%comparison.cloud(colors = c("grey20", "gray80"),max.words=100)
Q1_token_sentiment %>%group_by(sentiment) %>%top_n(10) %>%ungroup() %>% mutate(word=reorder(word, n)) %>%
ggplot(aes(word, n, fill=sentiment)) + geom_col(show.legend = FALSE) +facet_wrap(~sentiment, scales = "free_y")+
labs(y="Contribution to sentiment", x=NULL)+coord_flip()
Q2_token_sentiment %>% group_by(sentiment) %>% top_n(10) %>% ungroup() %>% mutate(word=reorder(word, n)) %>%
ggplot(aes(word, n, fill=sentiment)) + geom_col(show.legend = FALSE) + facet_wrap(~sentiment, scales = "free_y")+
labs(y="Contribution to sentiment", x=NULL)+ coord_flip()
Q3_token_sentiment %>% group_by(sentiment) %>%top_n(10) %>%ungroup() %>%mutate(word=reorder(word, n)) %>%
ggplot(aes(word, n, fill=sentiment)) +geom_col(show.legend = FALSE) +facet_wrap(~sentiment, scales = "free_y")+
labs(y="Contribution to sentiment", x=NULL)+ coord_flip()
Q4_token_sentiment %>% group_by(sentiment) %>% top_n(10) %>%ungroup() %>%mutate(word=reorder(word, n)) %>%
ggplot(aes(word, n, fill=sentiment)) + geom_col(show.legend = FALSE) +facet_wrap(~sentiment, scales = "free_y")+
labs(y="Contribution to sentiment", x=NULL)+coord_flip()
Q5_token_sentiment %>%group_by(sentiment) %>%top_n(10) %>%ungroup() %>%mutate(word=reorder(word, n)) %>%
ggplot(aes(word, n, fill=sentiment)) +geom_col(show.legend = FALSE) +facet_wrap(~sentiment, scales = "free_y")+
labs(y="Contribution to sentiment", x=NULL)+ coord_flip()
########## ZIPF's law ################
freq_by_rank %>%
ggplot(aes(rank, `term frequency`, color=question))+
geom_abline(intercept=-0.62, slope= -1.1, color='gray50', linetype=2)+
geom_line(size= 1.1, alpha = 0.8, show.legend = FALSE)+
scale_x_log10()+scale_y_log10()
######## two words token #############
Q1_2token <- mydf_1 %>%unnest_tokens(word,text, token = "ngrams", n=2) %>% anti_join(stop_words) %>% count(word, sort=TRUE)
Q2_2token <- mydf_2 %>%unnest_tokens(word,text, token = "ngrams", n=2) %>% anti_join(stop_words) %>% count(word, sort=TRUE)
Q3_2token <- mydf_3 %>%unnest_tokens(word,text, token = "ngrams", n=2) %>% anti_join(stop_words) %>% count(word, sort=TRUE)
Q4_2token <- mydf_4 %>%unnest_tokens(word,text, token = "ngrams", n=2) %>% anti_join(stop_words) %>% count(word, sort=TRUE)
Q5_2token <- mydf_5 %>%unnest_tokens(word,text, token = "ngrams", n=2) %>% anti_join(stop_words) %>% count(word, sort=TRUE)
Q1_2token['question']<-c('Q1')
Q1_2token_m<-select(Q1_2token, "question", "word", "n")
Q2_2token['question']<-c('Q2')
Q2_2token_m<-select(Q2_2token, "question", "word", "n")
Q3_2token['question']<-c('Q3')
Q3_2token_m<-select(Q3_2token, "question", "word", "n")
Q4_2token['question']<-c('Q4')
Q4_2token_m<-select(Q4_2token, "question", "word", "n")
Q5_2token['question']<-c('Q5')
Q5_2token_m<-select(Q5_2token, "question", "word", "n")
all_2_tokenize<-rbind(Q1_2token_m,Q2_2token_m,Q3_2token_m,Q4_2token_m,Q5_2token_m)
final_tesla_separated <- all_2_tokenize %>%separate(word, c("word1", "word2"), sep = " ")
final_2token_filtered <- final_tesla_separated %>% filter(!word1 %in% stop_words$word) %>% filter(!word2 %in% stop_words$word)
final_2token_counts <- final_2token_filtered %>% count(word1, word2, sort = TRUE)
final_2token_united <- final_2token_filtered %>% unite(word, word1, word2, sep = " ")
final_2token_united_total <- final_2token_united %>%group_by(question) %>%summarize(total=sum(n))
final_2token_words <- left_join(final_2token_united, final_2token_united_total)
final_2token_freq_by_rank <-final_2token_words %>%group_by(question) %>% mutate(rank = row_number(),`term frequency` = n / total)
final_2token_freq_by_rank <- final_2token_freq_by_rank %>% bind_tf_idf(word, question, n)
final_2token_freq_by_rank %>%arrange(desc(tf_idf))
final_2token_freq_by_rank %>%arrange(desc(tf_idf)) %>%mutate(word=factor(word, levels=rev(unique(word)))) %>%
group_by(question) %>%top_n(8) %>%ungroup %>%ggplot(aes(word, tf_idf, fill=question))+geom_col(show.legend=FALSE)+
labs(x=NULL, y="tf-idf")+facet_wrap(~question, ncol=3, scales="free")+coord_flip()
######## dfm ######
transpose <- t(my_df)
t_df <- as.data.frame(transpose)
my_txt_1 <- t_df$V1
my_txt_2 <- t_df$V2
my_txt_3 <- t_df$V3
my_txt_4 <- t_df$V4
my_txt_5 <- t_df$V5
my_txt_6 <- t_df$V6
my_txt_7 <- t_df$V7
my_txt_8 <- t_df$V8
my_txt_9 <- t_df$V9
my_txt_10 <- t_df$V10
my_txt_11 <- t_df$V11
my_txt_12 <- t_df$V12
my_txt_13 <- t_df$V13
my_txt_14 <- t_df$V14
my_txt_15 <- t_df$V15
my_txt_16 <- t_df$V16
my_txt_17 <- t_df$V17
my_txt_18 <- t_df$V18
my_txt_19 <- t_df$V19
my_txt_20 <- t_df$V20
my_txt_21 <- t_df$V21
my_txt_22 <- t_df$V22
my_txt_23 <- t_df$V23
my_txt_24 <- t_df$V24
my_txt_25 <- t_df$V25
my_txt_26 <- t_df$V26
my_txt_27 <- t_df$V27
my_txt_28 <- t_df$V28
my_txt_29 <- t_df$V29
my_txt_30 <- t_df$V30
my_txt_31 <- t_df$V31
my_txt_32 <- t_df$V32
a <- 5
mydf_1 <- data_frame(line=1:a, text=my_txt_1)
mydf_2 <- data_frame(line=1:a, text=my_txt_2)
mydf_3 <- data_frame(line=1:a, text=my_txt_3)
mydf_4 <- data_frame(line=1:a, text=my_txt_4)
mydf_5 <- data_frame(line=1:a, text=my_txt_5)
mydf_6 <- data_frame(line=1:a, text=my_txt_6)
mydf_7 <- data_frame(line=1:a, text=my_txt_7)
mydf_8 <- data_frame(line=1:a, text=my_txt_8)
mydf_9 <- data_frame(line=1:a, text=my_txt_9)
mydf_10 <- data_frame(line=1:a, text=my_txt_10)
mydf_11 <- data_frame(line=1:a, text=my_txt_11)
mydf_12 <- data_frame(line=1:a, text=my_txt_12)
mydf_13 <- data_frame(line=1:a, text=my_txt_13)
mydf_14 <- data_frame(line=1:a, text=my_txt_14)
mydf_15 <- data_frame(line=1:a, text=my_txt_15)
mydf_16 <- data_frame(line=1:a, text=my_txt_16)
mydf_17 <- data_frame(line=1:a, text=my_txt_17)
mydf_18 <- data_frame(line=1:a, text=my_txt_18)
mydf_19 <- data_frame(line=1:a, text=my_txt_19)
mydf_20 <- data_frame(line=1:a, text=my_txt_20)
mydf_21 <- data_frame(line=1:a, text=my_txt_21)
mydf_22 <- data_frame(line=1:a, text=my_txt_22)
mydf_23 <- data_frame(line=1:a, text=my_txt_23)
mydf_24 <- data_frame(line=1:a, text=my_txt_24)
mydf_25 <- data_frame(line=1:a, text=my_txt_25)
mydf_26 <- data_frame(line=1:a, text=my_txt_26)
mydf_27 <- data_frame(line=1:a, text=my_txt_27)
mydf_28 <- data_frame(line=1:a, text=my_txt_28)
mydf_29 <- data_frame(line=1:a, text=my_txt_29)
mydf_30 <- data_frame(line=1:a, text=my_txt_30)
mydf_31 <- data_frame(line=1:a, text=my_txt_31)
mydf_32 <- data_frame(line=1:a, text=my_txt_32)
frequency_person <- bind_rows(mutate(mydf_1, person = 'p01'),
mutate(mydf_2, person = 'p02'),
mutate(mydf_3, person = 'p03'),
mutate(mydf_4, person = 'p04'),
mutate(mydf_5, person = 'p05'),
mutate(mydf_6, person = 'p06'),
mutate(mydf_7, person = 'p07'),
mutate(mydf_8, person = 'p08'),
mutate(mydf_9, person = 'p09'),
mutate(mydf_10, person = 'p10'),
mutate(mydf_11, person = 'p11'),
mutate(mydf_12, person = 'p12'),
mutate(mydf_13, person = 'p13'),
mutate(mydf_14, person = 'p14'),
mutate(mydf_15, person = 'p15'),
mutate(mydf_16, person = 'p16'),
mutate(mydf_17, person = 'p17'),
mutate(mydf_18, person = 'p18'),
mutate(mydf_19, person = 'p19'),
mutate(mydf_20, person = 'p20'),
mutate(mydf_21, person = 'p21'),
mutate(mydf_22, person = 'p22'),
mutate(mydf_23, person = 'p23'),
mutate(mydf_24, person = 'p24'),
mutate(mydf_25, person = 'p25'),
mutate(mydf_26, person = 'p26'),
mutate(mydf_27, person = 'p27'),
mutate(mydf_28, person = 'p28'),
mutate(mydf_29, person = 'p29'),
mutate(mydf_30, person = 'p30'),
mutate(mydf_31, person = 'p31'),
mutate(mydf_32, person = 'p32'),
)
survey_dfm <- frequency_person %>%
group_by(person) %>%
unnest_tokens(word, text) %>%
anti_join(stop_words) %>%
anti_join(junk) %>%
count(word) %>%
cast_dfm(person, word, n)
####predict#####
survey_dfm.train<-survey_dfm[1:8,]
survey_dfm.test<-survey_dfm[9:32,]
#building the Naive Bayes model:
NB_classifier <- textmodel_nb(survey_dfm.train, c(1,1,1,1,0,1,0,1))
NB_classifier
summary(NB_classifier)
# predicting the testing data
pred <- predict(NB_classifier, survey_dfm.test)
pred
########LDA######
survey_dtm <- frequency_person %>%
group_by(person) %>%
unnest_tokens(word, text) %>%
anti_join(stop_words) %>%
anti_join(junk) %>%
count(word) %>%
cast_dtm(person, word, n)
survey_dtm
#calling the Latent Dirichlet Allocation algorithm
ap_lda <- LDA(survey_dtm, k=2, control=list(seed=123)) # k = number of topics
ap_lda
#now we are looking for the per topic per word probabilities aka. beta
#beta - what is the probability that "this term" will be generated by "this topic"
library(tidytext)
ap_topics <- tidy(ap_lda, matrix="beta")
ap_topics
library(ggplot2)
library(dplyr)
top_terms <- ap_topics %>%
group_by(topic) %>%
top_n(10, beta) %>%
ungroup() %>%
arrange(topic, -beta)
top_terms
#lets plot the term frequencies by topic
top_terms %>%
mutate(term=reorder(term, beta)) %>%
ggplot(aes(term, beta, fill = factor(topic))) +
geom_col(show.legend=FALSE) +
facet_wrap(~topic, scales = "free") +
coord_flip()
#lets calculate the relative difference between the betas for words in topic 1
#and words in topic 2
beta_spread <- ap_topics %>%
mutate(topic=paste0("topic", topic)) %>%
spread(topic, beta) %>%
filter(topic1>.001 | topic2 >0.001) %>%
mutate(log_rate = log2(topic2/topic1)) %>% # buat liat top di topic 2
arrange(desc(log_rate))
beta_spread
my_gamma <- tidy(ap_lda, matrix="gamma") # topic 2 gamma prob nya: = 1 - gamma topic 1
######## clustering #######
basic <- frequency_person %>%
group_by(person) %>%
unnest_tokens(word, text) %>%
anti_join(stop_words) %>%
anti_join(junk) %>%
count(word)
cluster1 <- basic %>%
filter(person %in% c("p04","p05","p06","p07","p09","p10","p11","p12","p15","p20","p26","p31"))
cluster2 <- basic %>%
filter(!person %in% c("p04","p05","p06","p07","p09","p10","p11","p12","p15","p20","p26","p31"))
library(wordcloud2)
wordcloud(words = cluster1$word, freq = cluster1$n, min.freq = 1,
max.words=100, random.order=FALSE, rot.per=0.35,
colors=brewer.pal(8, "Spectral"))
wordcloud(words = cluster2$word, freq = cluster2$n, min.freq = 1,
max.words=100, random.order=FALSE, rot.per=0.35,
colors=brewer.pal(8, "RdYlGn"))
|
/TOKENISATION IN R .R
|
no_license
|
mable1996/TOKENIZATION-IN-R-
|
R
| false
| false
| 15,912
|
r
|
library(textreadr)
library(dplyr)
library(stringr)
library(tidytext)
library(reshape2)
library(ggplot2)
library(wordcloud)
library(tidyr)
library(tm)
library(quanteda)
data(stop_words)
setwd('/Users/user/Desktop/R/Mod B/Project Team')
read_data <- read_document(file="survey.txt")
junk <- data_frame(
word=c("visit","plan","out","on","which","countries","what","why","how","much","are","spent",
"spend","spending","usually","by","at","it\'s","i\'m","i\'d","day","don\'t","hussein","tend",
"that\'s","haven\'t","i\'ll"),
lexicon=c(rep("SMART",each=27)))
a <- 32 #how many observations to you have
b <- 5 #how many variables do you have
my_df <- as.data.frame(matrix(nrow=a, ncol=b))
for(z in 1:b){
for(i in 1:a){
my_df[i,z]<- read_data[i*b+z-b]
}}
########################
##### 1 word token #####
my_txt_1 <- my_df$V1
my_txt_2 <- my_df$V2
my_txt_3 <- my_df$V3
my_txt_4 <- my_df$V4
my_txt_5 <- my_df$V5
mydf_1 <- data_frame(line=1:a, text=my_txt_1)
mydf_2 <- data_frame(line=1:a, text=my_txt_2)
mydf_3 <- data_frame(line=1:a, text=my_txt_3)
mydf_4 <- data_frame(line=1:a, text=my_txt_4)
mydf_5 <- data_frame(line=1:a, text=my_txt_5)
Q1_token <- mydf_1 %>%unnest_tokens(word, text) %>% anti_join(stop_words) %>% count(word, sort=TRUE)
Q2_token <- mydf_2 %>%unnest_tokens(word, text) %>% anti_join(stop_words) %>% count(word, sort=TRUE)
Q3_token <- mydf_3 %>%unnest_tokens(word, text) %>% anti_join(stop_words) %>% count(word, sort=TRUE)
Q4_token <- mydf_4 %>%unnest_tokens(word, text) %>% anti_join(stop_words) %>% count(word, sort=TRUE)
Q5_token <- mydf_5 %>%unnest_tokens(word, text) %>% anti_join(stop_words) %>% count(word, sort=TRUE)
my_txt_1 <- my_df$V1
my_txt_2 <- my_df$V2
my_txt_3 <- my_df$V3
my_txt_4 <- my_df$V4
my_txt_5 <- my_df$V5
mydf_1 <- data_frame(line=1:a, text=my_txt_1)
mydf_2 <- data_frame(line=1:a, text=my_txt_2)
mydf_3 <- data_frame(line=1:a, text=my_txt_3)
mydf_4 <- data_frame(line=1:a, text=my_txt_4)
mydf_5 <- data_frame(line=1:a, text=my_txt_5)
frequency_question <- bind_rows(mutate(Q1_token, question = 'Q1'),
mutate(Q2_token, question = 'Q2'),
mutate(Q3_token, question = 'Q3'),
mutate(Q4_token, question = 'Q4'),
mutate(Q5_token, question = 'Q5'))
total_words <- frequency_question %>% group_by(question) %>%summarize(total=sum(n))
book_words <- left_join(frequency_question, total_words)
freq_by_rank <- book_words %>%group_by(question) %>%mutate(rank = row_number(),`term frequency` = n/total)
testing <- book_words %>%bind_tf_idf(word, question, n)
haha<-testing %>%arrange(desc(tf_idf))
haha[order(-haha$tf_idf),] %>% mutate(word=factor(word, levels=rev(unique(word)))) %>%
group_by(question) %>%
top_n(15) %>%
ungroup %>%
ggplot(aes(word, tf_idf, fill=question))+ geom_col(show.legend=FALSE)+
labs(x=NULL, y="tf-idf")+facet_wrap(~question, ncol=3, scales="free")+coord_flip()
##### Sentiment Graph ###################
Q1_token_sentiment <- mydf_1 %>% unnest_tokens(word, text) %>% inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=T) %>% ungroup()
Q2_token_sentiment <- mydf_2 %>% unnest_tokens(word, text) %>% inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=T) %>% ungroup()
Q3_token_sentiment <- mydf_3 %>% unnest_tokens(word, text) %>% inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=T) %>% ungroup()
Q4_token_sentiment <- mydf_4 %>% unnest_tokens(word, text) %>% inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=T) %>% ungroup()
Q5_token_sentiment <- mydf_5 %>% unnest_tokens(word, text) %>% inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=T) %>% ungroup()
Q1_token_sentiment %>% inner_join(get_sentiments("nrc")) %>% count(word, sentiment, sort=TRUE) %>%
acast(word ~sentiment, value.var="n", fill=0) %>% comparison.cloud(colors = c("grey20", "gray80"),max.words=100)
Q2_token_sentiment %>% inner_join(get_sentiments("nrc")) %>% count(word, sentiment, sort=TRUE) %>%
acast(word ~sentiment, value.var="n", fill=0) %>% comparison.cloud(colors = c("grey20", "gray80"), max.words=100)
Q3_token_sentiment %>%inner_join(get_sentiments("nrc")) %>%count(word, sentiment, sort=TRUE) %>%
acast(word ~sentiment, value.var="n", fill=0) %>%comparison.cloud(colors = c("grey20", "gray80"), max.words=100)
Q4_token_sentiment %>%inner_join(get_sentiments("nrc")) %>%count(word, sentiment, sort=TRUE) %>%
acast(word ~sentiment, value.var="n", fill=0) %>%comparison.cloud(colors = c("grey20", "gray80"), max.words=100)
Q5_token_sentiment %>%inner_join(get_sentiments("nrc")) %>%count(word, sentiment, sort=TRUE) %>%
acast(word ~sentiment, value.var="n", fill=0) %>%comparison.cloud(colors = c("grey20", "gray80"),max.words=100)
Q1_token_sentiment %>%group_by(sentiment) %>%top_n(10) %>%ungroup() %>% mutate(word=reorder(word, n)) %>%
ggplot(aes(word, n, fill=sentiment)) + geom_col(show.legend = FALSE) +facet_wrap(~sentiment, scales = "free_y")+
labs(y="Contribution to sentiment", x=NULL)+coord_flip()
Q2_token_sentiment %>% group_by(sentiment) %>% top_n(10) %>% ungroup() %>% mutate(word=reorder(word, n)) %>%
ggplot(aes(word, n, fill=sentiment)) + geom_col(show.legend = FALSE) + facet_wrap(~sentiment, scales = "free_y")+
labs(y="Contribution to sentiment", x=NULL)+ coord_flip()
Q3_token_sentiment %>% group_by(sentiment) %>%top_n(10) %>%ungroup() %>%mutate(word=reorder(word, n)) %>%
ggplot(aes(word, n, fill=sentiment)) +geom_col(show.legend = FALSE) +facet_wrap(~sentiment, scales = "free_y")+
labs(y="Contribution to sentiment", x=NULL)+ coord_flip()
Q4_token_sentiment %>% group_by(sentiment) %>% top_n(10) %>%ungroup() %>%mutate(word=reorder(word, n)) %>%
ggplot(aes(word, n, fill=sentiment)) + geom_col(show.legend = FALSE) +facet_wrap(~sentiment, scales = "free_y")+
labs(y="Contribution to sentiment", x=NULL)+coord_flip()
Q5_token_sentiment %>%group_by(sentiment) %>%top_n(10) %>%ungroup() %>%mutate(word=reorder(word, n)) %>%
ggplot(aes(word, n, fill=sentiment)) +geom_col(show.legend = FALSE) +facet_wrap(~sentiment, scales = "free_y")+
labs(y="Contribution to sentiment", x=NULL)+ coord_flip()
########## ZIPF's law ################
freq_by_rank %>%
ggplot(aes(rank, `term frequency`, color=question))+
geom_abline(intercept=-0.62, slope= -1.1, color='gray50', linetype=2)+
geom_line(size= 1.1, alpha = 0.8, show.legend = FALSE)+
scale_x_log10()+scale_y_log10()
######## two words token #############
Q1_2token <- mydf_1 %>%unnest_tokens(word,text, token = "ngrams", n=2) %>% anti_join(stop_words) %>% count(word, sort=TRUE)
Q2_2token <- mydf_2 %>%unnest_tokens(word,text, token = "ngrams", n=2) %>% anti_join(stop_words) %>% count(word, sort=TRUE)
Q3_2token <- mydf_3 %>%unnest_tokens(word,text, token = "ngrams", n=2) %>% anti_join(stop_words) %>% count(word, sort=TRUE)
Q4_2token <- mydf_4 %>%unnest_tokens(word,text, token = "ngrams", n=2) %>% anti_join(stop_words) %>% count(word, sort=TRUE)
Q5_2token <- mydf_5 %>%unnest_tokens(word,text, token = "ngrams", n=2) %>% anti_join(stop_words) %>% count(word, sort=TRUE)
Q1_2token['question']<-c('Q1')
Q1_2token_m<-select(Q1_2token, "question", "word", "n")
Q2_2token['question']<-c('Q2')
Q2_2token_m<-select(Q2_2token, "question", "word", "n")
Q3_2token['question']<-c('Q3')
Q3_2token_m<-select(Q3_2token, "question", "word", "n")
Q4_2token['question']<-c('Q4')
Q4_2token_m<-select(Q4_2token, "question", "word", "n")
Q5_2token['question']<-c('Q5')
Q5_2token_m<-select(Q5_2token, "question", "word", "n")
all_2_tokenize<-rbind(Q1_2token_m,Q2_2token_m,Q3_2token_m,Q4_2token_m,Q5_2token_m)
final_tesla_separated <- all_2_tokenize %>%separate(word, c("word1", "word2"), sep = " ")
final_2token_filtered <- final_tesla_separated %>% filter(!word1 %in% stop_words$word) %>% filter(!word2 %in% stop_words$word)
final_2token_counts <- final_2token_filtered %>% count(word1, word2, sort = TRUE)
final_2token_united <- final_2token_filtered %>% unite(word, word1, word2, sep = " ")
final_2token_united_total <- final_2token_united %>%group_by(question) %>%summarize(total=sum(n))
final_2token_words <- left_join(final_2token_united, final_2token_united_total)
final_2token_freq_by_rank <-final_2token_words %>%group_by(question) %>% mutate(rank = row_number(),`term frequency` = n / total)
final_2token_freq_by_rank <- final_2token_freq_by_rank %>% bind_tf_idf(word, question, n)
final_2token_freq_by_rank %>%arrange(desc(tf_idf))
final_2token_freq_by_rank %>%arrange(desc(tf_idf)) %>%mutate(word=factor(word, levels=rev(unique(word)))) %>%
group_by(question) %>%top_n(8) %>%ungroup %>%ggplot(aes(word, tf_idf, fill=question))+geom_col(show.legend=FALSE)+
labs(x=NULL, y="tf-idf")+facet_wrap(~question, ncol=3, scales="free")+coord_flip()
######## dfm ######
transpose <- t(my_df)
t_df <- as.data.frame(transpose)
my_txt_1 <- t_df$V1
my_txt_2 <- t_df$V2
my_txt_3 <- t_df$V3
my_txt_4 <- t_df$V4
my_txt_5 <- t_df$V5
my_txt_6 <- t_df$V6
my_txt_7 <- t_df$V7
my_txt_8 <- t_df$V8
my_txt_9 <- t_df$V9
my_txt_10 <- t_df$V10
my_txt_11 <- t_df$V11
my_txt_12 <- t_df$V12
my_txt_13 <- t_df$V13
my_txt_14 <- t_df$V14
my_txt_15 <- t_df$V15
my_txt_16 <- t_df$V16
my_txt_17 <- t_df$V17
my_txt_18 <- t_df$V18
my_txt_19 <- t_df$V19
my_txt_20 <- t_df$V20
my_txt_21 <- t_df$V21
my_txt_22 <- t_df$V22
my_txt_23 <- t_df$V23
my_txt_24 <- t_df$V24
my_txt_25 <- t_df$V25
my_txt_26 <- t_df$V26
my_txt_27 <- t_df$V27
my_txt_28 <- t_df$V28
my_txt_29 <- t_df$V29
my_txt_30 <- t_df$V30
my_txt_31 <- t_df$V31
my_txt_32 <- t_df$V32
a <- 5
mydf_1 <- data_frame(line=1:a, text=my_txt_1)
mydf_2 <- data_frame(line=1:a, text=my_txt_2)
mydf_3 <- data_frame(line=1:a, text=my_txt_3)
mydf_4 <- data_frame(line=1:a, text=my_txt_4)
mydf_5 <- data_frame(line=1:a, text=my_txt_5)
mydf_6 <- data_frame(line=1:a, text=my_txt_6)
mydf_7 <- data_frame(line=1:a, text=my_txt_7)
mydf_8 <- data_frame(line=1:a, text=my_txt_8)
mydf_9 <- data_frame(line=1:a, text=my_txt_9)
mydf_10 <- data_frame(line=1:a, text=my_txt_10)
mydf_11 <- data_frame(line=1:a, text=my_txt_11)
mydf_12 <- data_frame(line=1:a, text=my_txt_12)
mydf_13 <- data_frame(line=1:a, text=my_txt_13)
mydf_14 <- data_frame(line=1:a, text=my_txt_14)
mydf_15 <- data_frame(line=1:a, text=my_txt_15)
mydf_16 <- data_frame(line=1:a, text=my_txt_16)
mydf_17 <- data_frame(line=1:a, text=my_txt_17)
mydf_18 <- data_frame(line=1:a, text=my_txt_18)
mydf_19 <- data_frame(line=1:a, text=my_txt_19)
mydf_20 <- data_frame(line=1:a, text=my_txt_20)
mydf_21 <- data_frame(line=1:a, text=my_txt_21)
mydf_22 <- data_frame(line=1:a, text=my_txt_22)
mydf_23 <- data_frame(line=1:a, text=my_txt_23)
mydf_24 <- data_frame(line=1:a, text=my_txt_24)
mydf_25 <- data_frame(line=1:a, text=my_txt_25)
mydf_26 <- data_frame(line=1:a, text=my_txt_26)
mydf_27 <- data_frame(line=1:a, text=my_txt_27)
mydf_28 <- data_frame(line=1:a, text=my_txt_28)
mydf_29 <- data_frame(line=1:a, text=my_txt_29)
mydf_30 <- data_frame(line=1:a, text=my_txt_30)
mydf_31 <- data_frame(line=1:a, text=my_txt_31)
mydf_32 <- data_frame(line=1:a, text=my_txt_32)
frequency_person <- bind_rows(mutate(mydf_1, person = 'p01'),
mutate(mydf_2, person = 'p02'),
mutate(mydf_3, person = 'p03'),
mutate(mydf_4, person = 'p04'),
mutate(mydf_5, person = 'p05'),
mutate(mydf_6, person = 'p06'),
mutate(mydf_7, person = 'p07'),
mutate(mydf_8, person = 'p08'),
mutate(mydf_9, person = 'p09'),
mutate(mydf_10, person = 'p10'),
mutate(mydf_11, person = 'p11'),
mutate(mydf_12, person = 'p12'),
mutate(mydf_13, person = 'p13'),
mutate(mydf_14, person = 'p14'),
mutate(mydf_15, person = 'p15'),
mutate(mydf_16, person = 'p16'),
mutate(mydf_17, person = 'p17'),
mutate(mydf_18, person = 'p18'),
mutate(mydf_19, person = 'p19'),
mutate(mydf_20, person = 'p20'),
mutate(mydf_21, person = 'p21'),
mutate(mydf_22, person = 'p22'),
mutate(mydf_23, person = 'p23'),
mutate(mydf_24, person = 'p24'),
mutate(mydf_25, person = 'p25'),
mutate(mydf_26, person = 'p26'),
mutate(mydf_27, person = 'p27'),
mutate(mydf_28, person = 'p28'),
mutate(mydf_29, person = 'p29'),
mutate(mydf_30, person = 'p30'),
mutate(mydf_31, person = 'p31'),
mutate(mydf_32, person = 'p32'),
)
survey_dfm <- frequency_person %>%
group_by(person) %>%
unnest_tokens(word, text) %>%
anti_join(stop_words) %>%
anti_join(junk) %>%
count(word) %>%
cast_dfm(person, word, n)
####predict#####
survey_dfm.train<-survey_dfm[1:8,]
survey_dfm.test<-survey_dfm[9:32,]
#building the Naive Bayes model:
NB_classifier <- textmodel_nb(survey_dfm.train, c(1,1,1,1,0,1,0,1))
NB_classifier
summary(NB_classifier)
# predicting the testing data
pred <- predict(NB_classifier, survey_dfm.test)
pred
########LDA######
survey_dtm <- frequency_person %>%
group_by(person) %>%
unnest_tokens(word, text) %>%
anti_join(stop_words) %>%
anti_join(junk) %>%
count(word) %>%
cast_dtm(person, word, n)
survey_dtm
#calling the Latent Dirichlet Allocation algorithm
ap_lda <- LDA(survey_dtm, k=2, control=list(seed=123)) # k = number of topics
ap_lda
#now we are looking for the per topic per word probabilities aka. beta
#beta - what is the probability that "this term" will be generated by "this topic"
library(tidytext)
ap_topics <- tidy(ap_lda, matrix="beta")
ap_topics
library(ggplot2)
library(dplyr)
top_terms <- ap_topics %>%
group_by(topic) %>%
top_n(10, beta) %>%
ungroup() %>%
arrange(topic, -beta)
top_terms
#lets plot the term frequencies by topic
top_terms %>%
mutate(term=reorder(term, beta)) %>%
ggplot(aes(term, beta, fill = factor(topic))) +
geom_col(show.legend=FALSE) +
facet_wrap(~topic, scales = "free") +
coord_flip()
#lets calculate the relative difference between the betas for words in topic 1
#and words in topic 2
beta_spread <- ap_topics %>%
mutate(topic=paste0("topic", topic)) %>%
spread(topic, beta) %>%
filter(topic1>.001 | topic2 >0.001) %>%
mutate(log_rate = log2(topic2/topic1)) %>% # buat liat top di topic 2
arrange(desc(log_rate))
beta_spread
my_gamma <- tidy(ap_lda, matrix="gamma") # topic 2 gamma prob nya: = 1 - gamma topic 1
######## clustering #######
basic <- frequency_person %>%
group_by(person) %>%
unnest_tokens(word, text) %>%
anti_join(stop_words) %>%
anti_join(junk) %>%
count(word)
cluster1 <- basic %>%
filter(person %in% c("p04","p05","p06","p07","p09","p10","p11","p12","p15","p20","p26","p31"))
cluster2 <- basic %>%
filter(!person %in% c("p04","p05","p06","p07","p09","p10","p11","p12","p15","p20","p26","p31"))
library(wordcloud2)
wordcloud(words = cluster1$word, freq = cluster1$n, min.freq = 1,
max.words=100, random.order=FALSE, rot.per=0.35,
colors=brewer.pal(8, "Spectral"))
wordcloud(words = cluster2$word, freq = cluster2$n, min.freq = 1,
max.words=100, random.order=FALSE, rot.per=0.35,
colors=brewer.pal(8, "RdYlGn"))
|
# Exercise 2: indexing and filtering vectors
# Create a vector `first_ten` that has the values 10 through 20 in it (using
# the : operator)
first_ten <- 10:20
# Create a vector `next_ten` that has the values 21 through 30 in it (using the
# seq() function)
next_ten <- seq(21, 30)
# Create a vector `all_numbers` by combining the previous two vectors
all_numbers <- c(first_ten, next_ten)
# Create a variable `eleventh` that contains the 11th element in `all_numbers`
eleventh <- all_numbers[11]
# Create a vector `some_numbers` that contains the 2nd through the 5th elements
# of `all_numbers`
some_numbers <- all_numbers[2:5]
# Create a vector `even` that holds the even numbers from 1 to 100
even <- seq(2, 100, 2)
# Using the `all()` function and `%%` (modulo) operator, confirm that all of the
# numbers in your `even` vector are even
test <- all(even %% 2== 0)
# Create a vector `phone_numbers` that contains the numbers 8, 6, 7, 5, 3, 0, 9
phone_numbers <- c(8,6,7,5,3,0,9)
# Create a vector `prefix` that has the first three elements of `phone_numbers`
prefix <- phone_numbers[1:3]
# Create a vector `small` that has the values of `phone_numbers` that are
# less than or equal to 5
small <- phone_numbers[phone_numbers<=5]
# Create a vector `large` that has the values of `phone_numbers` that are
# strictly greater than 5
large <- phone_numbers[phone_numbers>=5]
# Replace the values in `phone_numbers` that are larger than 5 with the number 5
phone_numbers[phone_numbers>5] <- 5
# Replace every odd-numbered value in `phone_numbers` with the number 0
phone_numbers[phone_numbers%%2 == 1] <- 0
|
/Exercise-2.R
|
no_license
|
Ali-gogo/Chapter-07-exercises.
|
R
| false
| false
| 1,621
|
r
|
# Exercise 2: indexing and filtering vectors
# Create a vector `first_ten` that has the values 10 through 20 in it (using
# the : operator)
first_ten <- 10:20
# Create a vector `next_ten` that has the values 21 through 30 in it (using the
# seq() function)
next_ten <- seq(21, 30)
# Create a vector `all_numbers` by combining the previous two vectors
all_numbers <- c(first_ten, next_ten)
# Create a variable `eleventh` that contains the 11th element in `all_numbers`
eleventh <- all_numbers[11]
# Create a vector `some_numbers` that contains the 2nd through the 5th elements
# of `all_numbers`
some_numbers <- all_numbers[2:5]
# Create a vector `even` that holds the even numbers from 1 to 100
even <- seq(2, 100, 2)
# Using the `all()` function and `%%` (modulo) operator, confirm that all of the
# numbers in your `even` vector are even
test <- all(even %% 2== 0)
# Create a vector `phone_numbers` that contains the numbers 8, 6, 7, 5, 3, 0, 9
phone_numbers <- c(8,6,7,5,3,0,9)
# Create a vector `prefix` that has the first three elements of `phone_numbers`
prefix <- phone_numbers[1:3]
# Create a vector `small` that has the values of `phone_numbers` that are
# less than or equal to 5
small <- phone_numbers[phone_numbers<=5]
# Create a vector `large` that has the values of `phone_numbers` that are
# strictly greater than 5
large <- phone_numbers[phone_numbers>=5]
# Replace the values in `phone_numbers` that are larger than 5 with the number 5
phone_numbers[phone_numbers>5] <- 5
# Replace every odd-numbered value in `phone_numbers` with the number 0
phone_numbers[phone_numbers%%2 == 1] <- 0
|
rotationMatrixToEP <- function(R){
# From: Computing Euler angles from a rotation matrix by Gregory G. Slabaugh
if(R[3, 1] == -1 || R[3, 1] == -1){
o1 <- 0
if(R[3, 1] == -1){
t1 <- pi/2
p1 <- o1 + atan2(R[1, 2], R[1, 3])
}else{
t1 <- -pi/2
p1 <- -o1 + atan2(-R[1, 2], -R[1, 3])
}
o2 <- 0
t2 <- 0
p2 <- 0
}else{
t1 <- -asin(R[3, 1])
t2 <- pi - t1
p1 <- atan2(R[3, 2]/cos(t1), R[3, 3]/cos(t1))
p2 <- atan2(R[3, 2]/cos(t2), R[3, 3]/cos(t2))
o1 <- atan2(R[2, 1]/cos(t1), R[1, 1]/cos(t1))
o2 <- atan2(R[2, 1]/cos(t2), R[1, 1]/cos(t2))
}
# z,y,x
#list(c(o1, t1, p1), c(o2, t2, p2))
c(o1, t1, p1)
}
|
/R/rotationMatrixToEP.R
|
no_license
|
aaronolsen/linkR
|
R
| false
| false
| 641
|
r
|
rotationMatrixToEP <- function(R){
# From: Computing Euler angles from a rotation matrix by Gregory G. Slabaugh
if(R[3, 1] == -1 || R[3, 1] == -1){
o1 <- 0
if(R[3, 1] == -1){
t1 <- pi/2
p1 <- o1 + atan2(R[1, 2], R[1, 3])
}else{
t1 <- -pi/2
p1 <- -o1 + atan2(-R[1, 2], -R[1, 3])
}
o2 <- 0
t2 <- 0
p2 <- 0
}else{
t1 <- -asin(R[3, 1])
t2 <- pi - t1
p1 <- atan2(R[3, 2]/cos(t1), R[3, 3]/cos(t1))
p2 <- atan2(R[3, 2]/cos(t2), R[3, 3]/cos(t2))
o1 <- atan2(R[2, 1]/cos(t1), R[1, 1]/cos(t1))
o2 <- atan2(R[2, 1]/cos(t2), R[1, 1]/cos(t2))
}
# z,y,x
#list(c(o1, t1, p1), c(o2, t2, p2))
c(o1, t1, p1)
}
|
#' @title Plot percent spliced-in (PSI) values for mutually exclusive exons (MXE) located on negative strand
#'
#' @description
#' \code{PlotPSI.MXE.Neg} computes percent spliced-in (PSI) at each genomic coordinate for mutually exclusive exons (MXE) located on negative (reverse) strand.
#'
#' @details
#' This function computes percent spliced-in (PSI) at each genomic coordinate for mutually exclusive exons (MXE) located on negative (reverse) strand. Formula for computing PSI is number of reads with non-N CIGAR operation divided by the total number of reads. Total number of reads is the sum of reads with non-N CIGAR operation and reads with N-CIGAR operation
#'
#' @param tran_id Character string. Splicing event nomenclature.
#' @param Bam Character string. Path to folder where the BAM files and their corresponding index files are located.
#' @param BamPheno object of class data.frame. Mandatory columns are \code{bam.file.name} and \code{cell.type}. \code{bam.file.name} column indicates BAM file names as per that found in the \code{Bam} folder. \code{cell.type} column indicates the cell group names.
#' @param cell.types Character string. Cell types to plot. Should be the same number of cell groups or less than the \code{cell.type} column of the \code{BamPheno} argument.
#' @param min.coverage Numeric value. Coverage (Total reads) threshold below which the PSI value of the genomic coordinate is annotate as missing value, i.e. no coverage.
#' @param cons.exon.cutoff Numeric value. Limit the number of bases to plot for the constitutive exons. This allow users to focus the plots on the alternative exon.
#' @param method Character string. Statistical test to compare the PSI values across the different cell types. \code{"wilcox"}, \code{"t.test"}, \code{"ks"}, \code{"ad"}, and \code{"dts"} available for 2-group comparison. \code{"ANOVA"} and \code{"kw"} available for 3- or more group comparison. \code{"ks"}, \code{"ad"}, \code{"dts"}, and \code{"kw"}, represent Kolmogorov–Smirnov, Anderson-Darling, DTS, and Kruskal-Wallis test, respectively.
#' @param method.adj Character string. Adjust p-values for multiple testing. Options available as per \code{p.adjust} function.
#' @param sig.pval Numeric value. Adjust p-value, below which, the p-value is considered statistically significant.
#' @param cell.types.colors Character string. Legend colors for each cell type. Should be of same length as \code{cell.types} argument. To use ggplot2 default color scheme, please specify \code{"ggplot.default"}.
#' @param plot.title Character string. Main title for plot. Examples are gene ID, gene names, splicing ID etc..
#' @param plot.width Numeric value. Width of plot.
#' @param plot.height Numeric value. Height of plot.
#' @param plot.out Character string. Path to folder to output plot.
#' @param track Logical. If set to \code{TRUE} (default), a process of reading in the BAM files, which is the rate-limiting step, will be tracked on the console.
#' @param nboots Numeric value. When \code{method} set to \code{"dts"}, the number of bootstrap iterations for computing the p-value.
#' @param show.mean.ci Logical value. If set to \code{TRUE}, the 95percent confidence interval of the per-cell group mean PSI values will not be shown. Default is \code{FALSE}.
#' @export
#' @return A plot in PDF format located in the folder specified by \code{plot.out} argument.
#' @author Sean Wen <sean.wenwx@gmail.com>
#' @importFrom plyr join
#' @import GenomicAlignments
#' @import GenomicRanges
#' @import IRanges
#' @import Rsamtools
#' @import ggplot2
#' @import pheatmap
#' @import ggplotify
#' @import ggpubr
#' @import scales
#' @importFrom reshape2 dcast
#' @import grDevices
#' @import kSamples
#' @import twosamples
#' @examples
#' \donttest{
#' # DO NOT RUN
#' PlotPSI.MXE.Pos(
#' tran_id="chr11:73720846:73720899:-@chr11:73718785:73718890
#' :-@chr11:73718613:73718718:-@chr11:73716251:73716362",
#' Bam="/Users/BAM/",
#' BamPheno=BamPheno,
#' cell.types=c("iPSC", "Endoderm"),
#' min.coverage=10,
#' cons.exon.cutoff=100,
#' method="ks",
#' method.adj="bonferroni",
#' cell.types.colors="ggplot.default",
#' plot.title="SNRPN",
#' plot.width=5,
#' plot.height=8,
#' plot.out=paste(tempdir(), "Plot.pdf", sep="")
#' )
#' }
PlotPSI.MXE.Neg <- function(tran_id, Bam, BamPheno, cell.types, min.coverage, cons.exon.cutoff, method, method.adj, sig.pval=0.10, cell.types.colors, plot.title, plot.width, plot.height, plot.out, track=TRUE, nboots=2000, show.mean.ci=TRUE) {
#tran_id <- "chr9:35685269:35685339:-@chr9:35685064:35685139:-@chr9:35684732:35684807:-@chr9:35684488:35684550"
#Bam <- "/Users/seanwen/Documents/VALERIE/VALERIE/Dataset/BAM/"
#BamPheno <- read.table("/Users/seanwen/Documents/VALERIE/VALERIE/Dataset/BAM_PhenoData.txt", sep="\t", header=TRUE, stringsAsFactors=FALSE)
#cell.types <- c("iPSC", "Endoderm")
#min.coverage <- 10
#cons.exon.cutoff <- 100
#method <- "ks"
#method.adj <- "fdr"
#cell.types.colors <- "ggplot.default"
#plot.title <- "TPM2"
#plot.width <- 5
#plot.height <- 8
#plot.out <- "/Users/seanwen/Documents/VALERIE/VALERIE/Dataset/Plots/TPM2.pdf"
#track <- TRUE
#sig.pval <- 0.10
##########################################################################
# Determine cell group order
cell.types <- factor(cell.types, levels=cell.types)
##########################################################################
############################# PLOT COLORS ################################
##########################################################################
if(cell.types.colors[1]=="ggplot.default") {
gg_color_hue <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
n = length(cell.types)
cell.types.colors <- gg_color_hue(n)
} else {
cell.types.colors <- cell.types.colors
}
##########################################################################
############################# TRIM EXON ##################################
##########################################################################
# 5' constitutive exon
. <- strsplit(tran_id, split=":-@", fixed=TRUE)[[1]][1]
chr <- strsplit(., split=":", fixed=TRUE)[[1]][1]
start <- as.numeric(strsplit(., split=":", fixed=TRUE)[[1]][3])
end <- as.numeric(strsplit(., split=":", fixed=TRUE)[[1]][2])
exon.length <- (start - end) + 1
if(exon.length > cons.exon.cutoff) {
start.new <- end + (cons.exon.cutoff - 1)
exon.1 <- paste(chr, end, start.new, sep=":")
} else {
exon.1 <- paste(chr, end, start, sep=":")
}
# 5' Alt. exon (Do nothing)
exon.2 <- strsplit(tran_id, split=":-@", fixed=TRUE)[[1]][2]
# 5' Alt. exon (Do nothing)
exon.3 <- strsplit(tran_id, split=":-@", fixed=TRUE)[[1]][3]
# 3' constitutive exon
. <- strsplit(tran_id, split=":-@", fixed=TRUE)[[1]][4]
chr <- strsplit(., split=":", fixed=TRUE)[[1]][1]
start <- as.numeric(strsplit(., split=":", fixed=TRUE)[[1]][3])
end <- as.numeric(strsplit(., split=":", fixed=TRUE)[[1]][2])
exon.length <- (start - end) + 1
if(exon.length > cons.exon.cutoff) {
end.new <- start - (cons.exon.cutoff - 1)
exon.4 <- paste(chr, end.new, start, sep=":")
} else {
exon.4 <- paste(chr, end, start, sep=":")
}
# Merge
tran_id <- paste(exon.1, exon.2, exon.3, exon.4, sep=":-@")
##########################################################################
############################# COMPUTE PSI ################################
##########################################################################
# Retrieve BAM files to analyse
# Retrieve all files within directory
files <- list.files(Bam)
# Retrieve non-index files
files <- grep(".bam$", files, value=TRUE)
# Subset BAM files present in sample metadata
overlap <- intersect(files, BamPheno$bam.file.name)
BamPheno <- BamPheno[which(BamPheno$bam.file.name %in% overlap), ]
files <- files[which(files %in% overlap)]
# Retrieve cell types to analyse
BamPheno <- BamPheno[which(BamPheno$cell.type %in% cell.types), ]
files <- files[which(files %in% BamPheno$bam.file.name)]
BamPheno$cell.type <- factor(BamPheno$cell.type, levels=cell.types)
# Check if header contains chr
# Read example file
bamfile.GA <- readGAlignments(paste(Bam, BamPheno$bam.file.name[1], sep="/"))
# Retrieve header
header <- names(coverage(bamfile.GA))
# Check if header contains chr
header <- grepl("^chr", header[1])
# Specify event coordinates
# chr
exons <- strsplit(tran_id, split=":-@", fixed=TRUE)[[1]][1]
chr <- strsplit(exons, split=":", fixed=TRUE)[[1]][1]
if(length(header)==TRUE) {
chr <- chr
} else {
chr <- gsub("chr", "", chrs)
}
# Retrieve start, exon length
exons <- strsplit(tran_id, split=":-@", fixed=TRUE)[[1]]
ends <- NULL
widths <- NULL
for(i in 1:length(exons)) {
end <- as.numeric(strsplit(exons[i], split=":", fixed=TRUE)[[1]][2])
start <- as.numeric(strsplit(exons[i], split=":", fixed=TRUE)[[1]][3])
width <- (start - end) + 1
ends[i] <- end
widths[i] <- width
}
# Create GRanges object
gr <- GRanges(seqnames=chr, ranges=IRanges(start=ends, width=widths))
# Read-in BAM for specificied coordinates
print("Reading in BAM files...")
bamfileGA.list <- vector(mode="list", length=length(files))
if(track==TRUE) {
pb <- txtProgressBar(1, length(files), style=3)
}
for(i in 1:length(files)) {
# Read file
bamfileGA.list[[i]] <- readGAlignments(file=paste(Bam, files[i], sep="/"), index=paste(Bam, files[i], sep="/"), param=ScanBamParam(which=gr))
# Track progress
if(track==TRUE) {
setTxtProgressBar(pb, i)
}
}
# Specify per-base coordinates
exons <- strsplit(tran_id, split=":-@", fixed=TRUE)[[1]]
range.list <- list()
for(i in 1:length(exons)) {
end <- as.numeric(strsplit(exons[i], split=":", fixed=TRUE)[[1]][2])
start <- as.numeric(strsplit(exons[i], split=":", fixed=TRUE)[[1]][3])
range.list[[i]] <- seq(start, end)
}
coord <- unlist(range.list)
# Compute PSI
print("Computing PSI...")
psi.list <- list()
for(i in 1:length(files)) {
# Retrieve GAalignment object
bamfile.GA <- bamfileGA.list[[i]]
# Retrieve read counts
read.counts <- as.vector(coverage(bamfile.GA)[[chr]])[coord]
# Retrieve read + skipped counts
all.counts <- as.vector(coverage(granges(bamfile.GA))[[chr]])[coord]
# Set threshold for coverage
all.counts[which(all.counts < min.coverage)] <- NA
# Compute PSI
psi <- read.counts/all.counts
# Save as data frame
psi <- data.frame("bam.file.name"=files[i], "chr.coord"=paste(chr, coord, sep=":"), "chr"=chr, "coord"=coord, "psi"=psi, stringsAsFactors=FALSE)
# Save PSI in list
psi.list[[i]] <- psi
# Remove BAM file
remove(bamfile.GA)
}
df <- do.call(rbind.data.frame, psi.list)
# Reorder coordinates (ONLY APPLICABLE FOR NEGATIVE STRAND)
df <- df[order(df$coord, decreasing=TRUE), ]
##########################################################################
######################### PREPARE TO PLOT ################################
##########################################################################
print("Plotting...")
# Annotate with sample metadata
df <- join(df, BamPheno, by="bam.file.name", type="left")
# Annotate constitutive, alt. exons
df.1 <- data.frame("chr.coord"=paste(chr, range.list[[1]], sep=":"),
"exon.type"="5' Cons. exon",
stringsAsFactors=FALSE
)
df.2 <- data.frame("chr.coord"=paste(chr, range.list[[2]], sep=":"),
"exon.type"="5' Alt. exon",
stringsAsFactors=FALSE
)
df.3 <- data.frame("chr.coord"=paste(chr, range.list[[3]], sep=":"),
"exon.type"="3' Alt. exon",
stringsAsFactors=FALSE
)
df.4 <- data.frame("chr.coord"=paste(chr, range.list[[4]], sep=":"),
"exon.type"="3' Cons. exon",
stringsAsFactors=FALSE
)
df.merged <- rbind.data.frame(df.1, df.2, df.3, df.4)
df <- join(df, df.merged, by="chr.coord", type="left")
# Set factor levels
df$cell.type <- factor(df$cell.type, levels=cell.types)
df$chr.coord <- factor(df$chr.coord, levels=unique(df$chr.coord))
df$exon.type <- factor(df$exon.type,
levels=c("5' Cons. exon", "5' Alt. exon", "3' Alt. exon", "3' Cons. exon")
)
# Compute mean, 95% CI PSI for each base
.list <- list()
for(i in 1:length(cell.types)) {
# Subset relevant cell type
df.small <- df[which(df$cell.type==cell.types[i]), ]
# Mean
ave <- tapply(df.small$psi, df.small$chr.coord, function(x) {
y <- x[!is.na(x)]
mean(y, na.rm=TRUE)
})
# Error
error <- tapply(df.small$psi, df.small$chr.coord, function(x) {
y <- x[!is.na(x)]
qt(0.975, df=length(y)-1)*sd(y)/sqrt(length(y))
})
# CI
ci.lower <- ave - error
ci.higher <- ave + error
# Save into data frame
.list[[i]] <- data.frame("chr.coord"=levels(df$chr.coord),
"mean"=ave,
"ci.lower"=ci.lower,
"ci.higher"=ci.higher,
"cell.type"=cell.types[i]
)
}
df.mean <- do.call(rbind.data.frame, .list)
df.mean$ci.higher[df.mean$ci.higher > 1] <- 1
df.mean$ci.lower[df.mean$ci.lower < 0] <- 0
df.mean$chr.coord <- factor(df.mean$chr.coord, levels=unique(df.mean$chr.coord))
# Compute collapse mean by alt. exon PSI
df.small <- df[which(df$exon.type=="5' Alt. exon"), ]
ave <- tapply(df.small$psi, df.small$bam.file.name, function(x) {
y <- x[!is.na(x)]
mean(y, na.rm=TRUE)
})
df.ave.alt <- data.frame("bam.file.name"=names(ave),
"psi"=ave,
stringsAsFactors=FALSE)
row.names(df.ave.alt) <- NULL
df.ave.alt <- join(df.ave.alt, BamPheno, by="bam.file.name", type="left")
df.ave.alt <- df.ave.alt[order(df.ave.alt$cell.type, -df.ave.alt$psi), ]
# Compute p-values
p.val <- NULL
if(method=="wilcox") {
coords <- levels(df$chr.coord)
for(i in 1:length(coords)) {
df.small <- df[which(df$chr.coord==coords[i]), ]
non.na <- tapply(df.small$psi, df.small$cell.type, function(x) {sum(!is.na(x))})
non.na <- non.na[which(non.na < 3)]
non.na <- length(non.na)
if(non.na != 0) {
p.val[i] <- NA
} else {
p.val[i] <- wilcox.test(psi ~ cell.type, na.omit(df.small))$p.value
}
}
###############################
} else if(method=="t.test") {
coords <- levels(df$chr.coord)
for(i in 1:length(coords)) {
df.small <- df[which(df$chr.coord==coords[i]), ]
non.na <- tapply(df.small$psi, df.small$cell.type, function(x) {sum(!is.na(x))})
non.na <- non.na[which(non.na < 3)]
non.na <- length(non.na)
if(non.na != 0) {
p.val[i] <- NA
} else {
p.val[i] <- t.test(psi ~ cell.type, na.omit(df.small))$p.value
}
}
###############################
} else if(method=="ks") {
coords <- levels(df$chr.coord)
for(i in 1:length(coords)) {
df.small <- df[which(df$chr.coord==coords[i]), ]
non.na <- tapply(df.small$psi, df.small$cell.type, function(x) {sum(!is.na(x))})
non.na <- non.na[which(non.na < 3)]
non.na <- length(non.na)
if(non.na != 0) {
p.val[i] <- NA
} else {
x <- df.small[which(df.small$cell.type==cell.types[1]), "psi"]
y <- df.small[which(df.small$cell.type==cell.types[2]), "psi"]
x <- na.omit(x)
y <- na.omit(y)
p.val[i] <- ks.test(x, y)$p.value
}
}
###############################
} else if(method=="ad") {
coords <- levels(df$chr.coord)
for(i in 1:length(coords)) {
df.small <- df[which(df$chr.coord==coords[i]), ]
non.na <- tapply(df.small$psi, df.small$cell.type, function(x) {sum(!is.na(x))})
non.na <- non.na[which(non.na < 3)]
non.na <- length(non.na)
if(non.na != 0) {
p.val[i] <- NA
} else {
x <- df.small[which(df.small$cell.type==cell.types[1]), "psi"]
y <- df.small[which(df.small$cell.type==cell.types[2]), "psi"]
x <- na.omit(x)
y <- na.omit(y)
error.check <- tryCatch(ad.test(x, y), error=function(err) "Error")
if(error.check[1] == "Error") {
p.val[i] <- 1
} else {
p.val[i] <- ad.test(x, y, method="asymptotic")$ad[1,3]
}
}
}
###############################
} else if(method=="dts") {
coords <- levels(df$chr.coord)
for(i in 1:length(coords)) {
df.small <- df[which(df$chr.coord==coords[i]), ]
non.na <- tapply(df.small$psi, df.small$cell.type, function(x) {sum(!is.na(x))})
non.na <- non.na[which(non.na < 3)]
non.na <- length(non.na)
if(non.na != 0) {
p.val[i] <- NA
} else {
x <- df.small[which(df.small$cell.type==cell.types[1]), "psi"]
y <- df.small[which(df.small$cell.type==cell.types[2]), "psi"]
x <- na.omit(x)
y <- na.omit(y)
p.val[i] <- dts_test(x, y, nboots=nboots)[2]
}
}
###############################
} else if(method=="ANOVA") {
coords <- levels(df$chr.coord)
for(i in 1:length(coords)) {
df.small <- df[which(df$chr.coord==coords[i]), ]
non.na <- tapply(df.small$psi, df.small$cell.type, function(x) {sum(!is.na(x))})
non.na <- non.na[which(non.na < 3)]
non.na <- length(non.na)
if(non.na != 0) {
p.val[i] <- NA
} else {
p.val[i] <- summary(aov(psi ~ cell.type, na.omit(df.small)))[[1]][["Pr(>F)"]][1]
}
}
###############################
} else if(method=="kw") {
coords <- levels(df$chr.coord)
for(i in 1:length(coords)) {
df.small <- df[which(df$chr.coord==coords[i]), ]
non.na <- tapply(df.small$psi, df.small$cell.type, function(x) {sum(!is.na(x))})
non.na <- non.na[which(non.na < 3)]
non.na <- length(non.na)
if(non.na != 0) {
p.val[i] <- NA
} else {
p.val[i] <- kruskal.test(psi ~ cell.type, na.omit(df.small))$p.value
}
}
###############################
}
if(method != "dts") {
p.val.adj <- p.adjust(p.val, method=method.adj, n=length(p.val))
} else if(method == "dts"){
p.val.adj <- p.adjust(p.val, method="none", n=length(p.val))
}
p.val.adj[is.na(p.val.adj)] <- NA
set.seed(1)
p.val.adj[p.val.adj == 0 & !is.na(p.val.adj)] <- runif(n=length(p.val.adj[p.val.adj == 0 & !is.na(p.val.adj)]),
min=2.0e-16,
max=2.2e-16
)
p.val.adj.transformed <- -log10(p.val.adj)
df.pval <- data.frame("chr.coord"=levels(df$chr.coord),
"p.val.adj.transformed"=p.val.adj.transformed
)
df.pval$chr.coord <- factor(df.pval$chr.coord, levels=unique(df.pval$chr.coord))
##########################################################################
######################### HEATMAP: PSI PLOT ##############################
##########################################################################
# Create expression matrix
df.exp <- dcast(df, bam.file.name ~ chr.coord, value.var="psi")
row.names(df.exp) <- df.exp$bam.file.name
df.exp$bam.file.name <- NULL
# Reorder by mean alt. exon expression, cell type
df.exp <- df.exp[df.ave.alt$bam.file.name, ]
table(row.names(df.exp)==df.ave.alt$bam.file.name)
# Define column labels
# Create data frame
. <- data.frame("chr.coord"=names(df.exp))
. <- join(., unique(df[,c("chr.coord", "exon.type")]), by="chr.coord", type="left")
# Create row names
row.names(.) <- .$chr.coord
.$chr.coord <- NULL
names(.) <- "exon.type"
# Save as new object
annotation.col.lab <- .
# Define row labels
# Create data frame
. <- data.frame("bam.file.name"=row.names(df.exp))
. <- join(., unique(df[,c("bam.file.name", "cell.type")]), by="bam.file.name", type="left")
# Create row names
row.names(.) <- .$bam.file.name
.$bam.file.name <- NULL
names(.) <- "cell.type"
# Save as new object
annotation.row.lab <- .
# Define column and colors
# Columnm color
annotation.col.color <- c("black", "orange", "brown", "black")
names(annotation.col.color) <- c("5' Cons. exon", "5' Alt. exon", "3' Alt. exon", "3' Cons. exon")
# Row colors
annotation.row.color <- cell.types.colors
names(annotation.row.color) <- cell.types
# Create color list
annotation.row.col.color <- list("exon.type"=annotation.col.color, "cell.type"=annotation.row.color)
# Color range
color <- grDevices::colorRampPalette(c("yellow", "white", "blue"))((20))
# Plot
p1 <- pheatmap(as.matrix(df.exp), cluster_cols=FALSE, cluster_rows=FALSE, scale="column", show_rownames=FALSE, show_colnames=FALSE, color=color, annotation_col=annotation.col.lab, annotation_row=annotation.row.lab, annotation_colors=annotation.row.col.color, border_color=NA, fontsize=10, main=plot.title, annotation_names_row=FALSE, annotation_names_col=FALSE, silent=TRUE)
# Convert pheatmap class to grob class
p1 <- as.grob(p1)
##########################################################################
######################### LINE PLOT: MEAN PSI ############################
##########################################################################
# Check maximum coloum z-score
z.max <- max(apply(df.exp, 2, scale), na.rm=TRUE)
z.min <- min(apply(df.exp, 2, scale), na.rm=TRUE)
z.max.abs <- max(abs(c(z.max, z.min)))
if(z.max.abs <= 10) {
accuracy <- 0.01
} else {
accuracy <- 0.001
}
# Plot
if(show.mean.ci==TRUE){
p2 <- ggplot(df.mean, aes(x=chr.coord, y=mean, group = cell.type, col=cell.type, fill=cell.type)) +
geom_line() +
geom_ribbon(aes(ymin=ci.lower, ymax=ci.higher), alpha=0.2, colour = NA) +
labs(y="Mean (PSI)", x=NULL) +
scale_y_continuous(labels=scales::number_format(accuracy=accuracy), limits=c(0, 1), position="right") +
scale_fill_manual(values=cell.types.colors) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.border=element_blank(),
axis.line.y.right = element_line(color="black"),
axis.title.y.right=element_text(size=11, angle=0, vjust=0.5, margin = margin(t = 0, r = 0, b = 0, l = 20)),
axis.text=element_text(size=13),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
legend.position="none",
plot.margin = margin(0.5, 0.38, 0, 0.7, "cm")
)
} else {
p2 <- ggplot(df.mean, aes(x=chr.coord, y=mean, group = cell.type, col=cell.type, fill=cell.type)) +
geom_line() +
#geom_ribbon(aes(ymin=ci.lower, ymax=ci.higher), alpha=0.2, colour = NA) +
labs(y="Mean (PSI)", x=NULL) +
scale_y_continuous(labels=scales::number_format(accuracy=accuracy), limits=c(0, 1), position="right") +
scale_fill_manual(values=cell.types.colors) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.border=element_blank(),
axis.line.y.right = element_line(color="black"),
axis.title.y.right=element_text(size=11, angle=0, vjust=0.5, margin = margin(t = 0, r = 0, b = 0, l = 20)),
axis.text=element_text(size=13),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
legend.position="none",
plot.margin = margin(0.5, 0.38, 0, 0.7, "cm")
)
}
##########################################################################
######################### LINE PLOT: P-VALUES ############################
##########################################################################
if(max(df.pval$p.val.adj.transformed, na.rm=TRUE) > 10 & z.max.abs <= 10) {
accuracy <- 0.1
} else if(max(df.pval$p.val.adj.transformed, na.rm=TRUE) < 10 & z.max.abs <= 10) {
accuracy <- 0.01
} else if(max(df.pval$p.val.adj.transformed, na.rm=TRUE) > 10 & z.max.abs > 10) {
accuracy <- 0.01
} else if(max(df.pval$p.val.adj.transformed, na.rm=TRUE) < 10 & z.max.abs > 10) {
accuracy <- 0.001
}
# Plot
p3 <- ggplot(data=df.pval, aes(x=chr.coord, y=p.val.adj.transformed, group=1)) +
geom_line() +
geom_hline(yintercept=-log10(sig.pval), col="red", linetype="dashed") +
labs(x=NULL, y="-log10(p-value)") +
scale_y_continuous(labels=scales::number_format(accuracy=accuracy),
limits=c(0, max(df.pval$p.val.adj.transformed)), position="right") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.border=element_blank(),
axis.line.y.right = element_line(color="black"),
axis.title.y.right=element_text(size=11, angle=0, vjust=0.5),
axis.text=element_text(size=13),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
plot.margin = margin(0.5, 0.16, 0, 0.7, "cm")
)
##########################################################################
############################# FINAL PLOT #################################
##########################################################################
# Arrange plots
plot.final <- ggarrange(p1, p2, p3, ncol=1, nrow=3, widths=0.25)
# Save plot
ggsave(plot.out, plot.final, device="pdf", width=plot.width, height=plot.height)
}
|
/R/Script_01_4_PlotPSI_MXE_NegativeStrand.R
|
no_license
|
wenweixiong/VALERIE
|
R
| false
| false
| 30,384
|
r
|
#' @title Plot percent spliced-in (PSI) values for mutually exclusive exons (MXE) located on negative strand
#'
#' @description
#' \code{PlotPSI.MXE.Neg} computes percent spliced-in (PSI) at each genomic coordinate for mutually exclusive exons (MXE) located on negative (reverse) strand.
#'
#' @details
#' This function computes percent spliced-in (PSI) at each genomic coordinate for mutually exclusive exons (MXE) located on negative (reverse) strand. Formula for computing PSI is number of reads with non-N CIGAR operation divided by the total number of reads. Total number of reads is the sum of reads with non-N CIGAR operation and reads with N-CIGAR operation
#'
#' @param tran_id Character string. Splicing event nomenclature.
#' @param Bam Character string. Path to folder where the BAM files and their corresponding index files are located.
#' @param BamPheno object of class data.frame. Mandatory columns are \code{bam.file.name} and \code{cell.type}. \code{bam.file.name} column indicates BAM file names as per that found in the \code{Bam} folder. \code{cell.type} column indicates the cell group names.
#' @param cell.types Character string. Cell types to plot. Should be the same number of cell groups or less than the \code{cell.type} column of the \code{BamPheno} argument.
#' @param min.coverage Numeric value. Coverage (Total reads) threshold below which the PSI value of the genomic coordinate is annotate as missing value, i.e. no coverage.
#' @param cons.exon.cutoff Numeric value. Limit the number of bases to plot for the constitutive exons. This allow users to focus the plots on the alternative exon.
#' @param method Character string. Statistical test to compare the PSI values across the different cell types. \code{"wilcox"}, \code{"t.test"}, \code{"ks"}, \code{"ad"}, and \code{"dts"} available for 2-group comparison. \code{"ANOVA"} and \code{"kw"} available for 3- or more group comparison. \code{"ks"}, \code{"ad"}, \code{"dts"}, and \code{"kw"}, represent Kolmogorov–Smirnov, Anderson-Darling, DTS, and Kruskal-Wallis test, respectively.
#' @param method.adj Character string. Adjust p-values for multiple testing. Options available as per \code{p.adjust} function.
#' @param sig.pval Numeric value. Adjust p-value, below which, the p-value is considered statistically significant.
#' @param cell.types.colors Character string. Legend colors for each cell type. Should be of same length as \code{cell.types} argument. To use ggplot2 default color scheme, please specify \code{"ggplot.default"}.
#' @param plot.title Character string. Main title for plot. Examples are gene ID, gene names, splicing ID etc..
#' @param plot.width Numeric value. Width of plot.
#' @param plot.height Numeric value. Height of plot.
#' @param plot.out Character string. Path to folder to output plot.
#' @param track Logical. If set to \code{TRUE} (default), a process of reading in the BAM files, which is the rate-limiting step, will be tracked on the console.
#' @param nboots Numeric value. When \code{method} set to \code{"dts"}, the number of bootstrap iterations for computing the p-value.
#' @param show.mean.ci Logical value. If set to \code{TRUE}, the 95percent confidence interval of the per-cell group mean PSI values will not be shown. Default is \code{FALSE}.
#' @export
#' @return A plot in PDF format located in the folder specified by \code{plot.out} argument.
#' @author Sean Wen <sean.wenwx@gmail.com>
#' @importFrom plyr join
#' @import GenomicAlignments
#' @import GenomicRanges
#' @import IRanges
#' @import Rsamtools
#' @import ggplot2
#' @import pheatmap
#' @import ggplotify
#' @import ggpubr
#' @import scales
#' @importFrom reshape2 dcast
#' @import grDevices
#' @import kSamples
#' @import twosamples
#' @examples
#' \donttest{
#' # DO NOT RUN
#' PlotPSI.MXE.Pos(
#' tran_id="chr11:73720846:73720899:-@chr11:73718785:73718890
#' :-@chr11:73718613:73718718:-@chr11:73716251:73716362",
#' Bam="/Users/BAM/",
#' BamPheno=BamPheno,
#' cell.types=c("iPSC", "Endoderm"),
#' min.coverage=10,
#' cons.exon.cutoff=100,
#' method="ks",
#' method.adj="bonferroni",
#' cell.types.colors="ggplot.default",
#' plot.title="SNRPN",
#' plot.width=5,
#' plot.height=8,
#' plot.out=paste(tempdir(), "Plot.pdf", sep="")
#' )
#' }
PlotPSI.MXE.Neg <- function(tran_id, Bam, BamPheno, cell.types, min.coverage, cons.exon.cutoff, method, method.adj, sig.pval=0.10, cell.types.colors, plot.title, plot.width, plot.height, plot.out, track=TRUE, nboots=2000, show.mean.ci=TRUE) {
#tran_id <- "chr9:35685269:35685339:-@chr9:35685064:35685139:-@chr9:35684732:35684807:-@chr9:35684488:35684550"
#Bam <- "/Users/seanwen/Documents/VALERIE/VALERIE/Dataset/BAM/"
#BamPheno <- read.table("/Users/seanwen/Documents/VALERIE/VALERIE/Dataset/BAM_PhenoData.txt", sep="\t", header=TRUE, stringsAsFactors=FALSE)
#cell.types <- c("iPSC", "Endoderm")
#min.coverage <- 10
#cons.exon.cutoff <- 100
#method <- "ks"
#method.adj <- "fdr"
#cell.types.colors <- "ggplot.default"
#plot.title <- "TPM2"
#plot.width <- 5
#plot.height <- 8
#plot.out <- "/Users/seanwen/Documents/VALERIE/VALERIE/Dataset/Plots/TPM2.pdf"
#track <- TRUE
#sig.pval <- 0.10
##########################################################################
# Determine cell group order
cell.types <- factor(cell.types, levels=cell.types)
##########################################################################
############################# PLOT COLORS ################################
##########################################################################
if(cell.types.colors[1]=="ggplot.default") {
gg_color_hue <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
n = length(cell.types)
cell.types.colors <- gg_color_hue(n)
} else {
cell.types.colors <- cell.types.colors
}
##########################################################################
############################# TRIM EXON ##################################
##########################################################################
# 5' constitutive exon
. <- strsplit(tran_id, split=":-@", fixed=TRUE)[[1]][1]
chr <- strsplit(., split=":", fixed=TRUE)[[1]][1]
start <- as.numeric(strsplit(., split=":", fixed=TRUE)[[1]][3])
end <- as.numeric(strsplit(., split=":", fixed=TRUE)[[1]][2])
exon.length <- (start - end) + 1
if(exon.length > cons.exon.cutoff) {
start.new <- end + (cons.exon.cutoff - 1)
exon.1 <- paste(chr, end, start.new, sep=":")
} else {
exon.1 <- paste(chr, end, start, sep=":")
}
# 5' Alt. exon (Do nothing)
exon.2 <- strsplit(tran_id, split=":-@", fixed=TRUE)[[1]][2]
# 5' Alt. exon (Do nothing)
exon.3 <- strsplit(tran_id, split=":-@", fixed=TRUE)[[1]][3]
# 3' constitutive exon
. <- strsplit(tran_id, split=":-@", fixed=TRUE)[[1]][4]
chr <- strsplit(., split=":", fixed=TRUE)[[1]][1]
start <- as.numeric(strsplit(., split=":", fixed=TRUE)[[1]][3])
end <- as.numeric(strsplit(., split=":", fixed=TRUE)[[1]][2])
exon.length <- (start - end) + 1
if(exon.length > cons.exon.cutoff) {
end.new <- start - (cons.exon.cutoff - 1)
exon.4 <- paste(chr, end.new, start, sep=":")
} else {
exon.4 <- paste(chr, end, start, sep=":")
}
# Merge
tran_id <- paste(exon.1, exon.2, exon.3, exon.4, sep=":-@")
##########################################################################
############################# COMPUTE PSI ################################
##########################################################################
# Retrieve BAM files to analyse
# Retrieve all files within directory
files <- list.files(Bam)
# Retrieve non-index files
files <- grep(".bam$", files, value=TRUE)
# Subset BAM files present in sample metadata
overlap <- intersect(files, BamPheno$bam.file.name)
BamPheno <- BamPheno[which(BamPheno$bam.file.name %in% overlap), ]
files <- files[which(files %in% overlap)]
# Retrieve cell types to analyse
BamPheno <- BamPheno[which(BamPheno$cell.type %in% cell.types), ]
files <- files[which(files %in% BamPheno$bam.file.name)]
BamPheno$cell.type <- factor(BamPheno$cell.type, levels=cell.types)
# Check if header contains chr
# Read example file
bamfile.GA <- readGAlignments(paste(Bam, BamPheno$bam.file.name[1], sep="/"))
# Retrieve header
header <- names(coverage(bamfile.GA))
# Check if header contains chr
header <- grepl("^chr", header[1])
# Specify event coordinates
# chr
exons <- strsplit(tran_id, split=":-@", fixed=TRUE)[[1]][1]
chr <- strsplit(exons, split=":", fixed=TRUE)[[1]][1]
if(length(header)==TRUE) {
chr <- chr
} else {
chr <- gsub("chr", "", chrs)
}
# Retrieve start, exon length
exons <- strsplit(tran_id, split=":-@", fixed=TRUE)[[1]]
ends <- NULL
widths <- NULL
for(i in 1:length(exons)) {
end <- as.numeric(strsplit(exons[i], split=":", fixed=TRUE)[[1]][2])
start <- as.numeric(strsplit(exons[i], split=":", fixed=TRUE)[[1]][3])
width <- (start - end) + 1
ends[i] <- end
widths[i] <- width
}
# Create GRanges object
gr <- GRanges(seqnames=chr, ranges=IRanges(start=ends, width=widths))
# Read-in BAM for specificied coordinates
print("Reading in BAM files...")
bamfileGA.list <- vector(mode="list", length=length(files))
if(track==TRUE) {
pb <- txtProgressBar(1, length(files), style=3)
}
for(i in 1:length(files)) {
# Read file
bamfileGA.list[[i]] <- readGAlignments(file=paste(Bam, files[i], sep="/"), index=paste(Bam, files[i], sep="/"), param=ScanBamParam(which=gr))
# Track progress
if(track==TRUE) {
setTxtProgressBar(pb, i)
}
}
# Specify per-base coordinates
exons <- strsplit(tran_id, split=":-@", fixed=TRUE)[[1]]
range.list <- list()
for(i in 1:length(exons)) {
end <- as.numeric(strsplit(exons[i], split=":", fixed=TRUE)[[1]][2])
start <- as.numeric(strsplit(exons[i], split=":", fixed=TRUE)[[1]][3])
range.list[[i]] <- seq(start, end)
}
coord <- unlist(range.list)
# Compute PSI
print("Computing PSI...")
psi.list <- list()
for(i in 1:length(files)) {
# Retrieve GAalignment object
bamfile.GA <- bamfileGA.list[[i]]
# Retrieve read counts
read.counts <- as.vector(coverage(bamfile.GA)[[chr]])[coord]
# Retrieve read + skipped counts
all.counts <- as.vector(coverage(granges(bamfile.GA))[[chr]])[coord]
# Set threshold for coverage
all.counts[which(all.counts < min.coverage)] <- NA
# Compute PSI
psi <- read.counts/all.counts
# Save as data frame
psi <- data.frame("bam.file.name"=files[i], "chr.coord"=paste(chr, coord, sep=":"), "chr"=chr, "coord"=coord, "psi"=psi, stringsAsFactors=FALSE)
# Save PSI in list
psi.list[[i]] <- psi
# Remove BAM file
remove(bamfile.GA)
}
df <- do.call(rbind.data.frame, psi.list)
# Reorder coordinates (ONLY APPLICABLE FOR NEGATIVE STRAND)
df <- df[order(df$coord, decreasing=TRUE), ]
##########################################################################
######################### PREPARE TO PLOT ################################
##########################################################################
print("Plotting...")
# Annotate with sample metadata
df <- join(df, BamPheno, by="bam.file.name", type="left")
# Annotate constitutive, alt. exons
df.1 <- data.frame("chr.coord"=paste(chr, range.list[[1]], sep=":"),
"exon.type"="5' Cons. exon",
stringsAsFactors=FALSE
)
df.2 <- data.frame("chr.coord"=paste(chr, range.list[[2]], sep=":"),
"exon.type"="5' Alt. exon",
stringsAsFactors=FALSE
)
df.3 <- data.frame("chr.coord"=paste(chr, range.list[[3]], sep=":"),
"exon.type"="3' Alt. exon",
stringsAsFactors=FALSE
)
df.4 <- data.frame("chr.coord"=paste(chr, range.list[[4]], sep=":"),
"exon.type"="3' Cons. exon",
stringsAsFactors=FALSE
)
df.merged <- rbind.data.frame(df.1, df.2, df.3, df.4)
df <- join(df, df.merged, by="chr.coord", type="left")
# Set factor levels
df$cell.type <- factor(df$cell.type, levels=cell.types)
df$chr.coord <- factor(df$chr.coord, levels=unique(df$chr.coord))
df$exon.type <- factor(df$exon.type,
levels=c("5' Cons. exon", "5' Alt. exon", "3' Alt. exon", "3' Cons. exon")
)
# Compute mean, 95% CI PSI for each base
.list <- list()
for(i in 1:length(cell.types)) {
# Subset relevant cell type
df.small <- df[which(df$cell.type==cell.types[i]), ]
# Mean
ave <- tapply(df.small$psi, df.small$chr.coord, function(x) {
y <- x[!is.na(x)]
mean(y, na.rm=TRUE)
})
# Error
error <- tapply(df.small$psi, df.small$chr.coord, function(x) {
y <- x[!is.na(x)]
qt(0.975, df=length(y)-1)*sd(y)/sqrt(length(y))
})
# CI
ci.lower <- ave - error
ci.higher <- ave + error
# Save into data frame
.list[[i]] <- data.frame("chr.coord"=levels(df$chr.coord),
"mean"=ave,
"ci.lower"=ci.lower,
"ci.higher"=ci.higher,
"cell.type"=cell.types[i]
)
}
df.mean <- do.call(rbind.data.frame, .list)
df.mean$ci.higher[df.mean$ci.higher > 1] <- 1
df.mean$ci.lower[df.mean$ci.lower < 0] <- 0
df.mean$chr.coord <- factor(df.mean$chr.coord, levels=unique(df.mean$chr.coord))
# Compute collapse mean by alt. exon PSI
df.small <- df[which(df$exon.type=="5' Alt. exon"), ]
ave <- tapply(df.small$psi, df.small$bam.file.name, function(x) {
y <- x[!is.na(x)]
mean(y, na.rm=TRUE)
})
df.ave.alt <- data.frame("bam.file.name"=names(ave),
"psi"=ave,
stringsAsFactors=FALSE)
row.names(df.ave.alt) <- NULL
df.ave.alt <- join(df.ave.alt, BamPheno, by="bam.file.name", type="left")
df.ave.alt <- df.ave.alt[order(df.ave.alt$cell.type, -df.ave.alt$psi), ]
# Compute p-values
p.val <- NULL
if(method=="wilcox") {
coords <- levels(df$chr.coord)
for(i in 1:length(coords)) {
df.small <- df[which(df$chr.coord==coords[i]), ]
non.na <- tapply(df.small$psi, df.small$cell.type, function(x) {sum(!is.na(x))})
non.na <- non.na[which(non.na < 3)]
non.na <- length(non.na)
if(non.na != 0) {
p.val[i] <- NA
} else {
p.val[i] <- wilcox.test(psi ~ cell.type, na.omit(df.small))$p.value
}
}
###############################
} else if(method=="t.test") {
coords <- levels(df$chr.coord)
for(i in 1:length(coords)) {
df.small <- df[which(df$chr.coord==coords[i]), ]
non.na <- tapply(df.small$psi, df.small$cell.type, function(x) {sum(!is.na(x))})
non.na <- non.na[which(non.na < 3)]
non.na <- length(non.na)
if(non.na != 0) {
p.val[i] <- NA
} else {
p.val[i] <- t.test(psi ~ cell.type, na.omit(df.small))$p.value
}
}
###############################
} else if(method=="ks") {
coords <- levels(df$chr.coord)
for(i in 1:length(coords)) {
df.small <- df[which(df$chr.coord==coords[i]), ]
non.na <- tapply(df.small$psi, df.small$cell.type, function(x) {sum(!is.na(x))})
non.na <- non.na[which(non.na < 3)]
non.na <- length(non.na)
if(non.na != 0) {
p.val[i] <- NA
} else {
x <- df.small[which(df.small$cell.type==cell.types[1]), "psi"]
y <- df.small[which(df.small$cell.type==cell.types[2]), "psi"]
x <- na.omit(x)
y <- na.omit(y)
p.val[i] <- ks.test(x, y)$p.value
}
}
###############################
} else if(method=="ad") {
coords <- levels(df$chr.coord)
for(i in 1:length(coords)) {
df.small <- df[which(df$chr.coord==coords[i]), ]
non.na <- tapply(df.small$psi, df.small$cell.type, function(x) {sum(!is.na(x))})
non.na <- non.na[which(non.na < 3)]
non.na <- length(non.na)
if(non.na != 0) {
p.val[i] <- NA
} else {
x <- df.small[which(df.small$cell.type==cell.types[1]), "psi"]
y <- df.small[which(df.small$cell.type==cell.types[2]), "psi"]
x <- na.omit(x)
y <- na.omit(y)
error.check <- tryCatch(ad.test(x, y), error=function(err) "Error")
if(error.check[1] == "Error") {
p.val[i] <- 1
} else {
p.val[i] <- ad.test(x, y, method="asymptotic")$ad[1,3]
}
}
}
###############################
} else if(method=="dts") {
coords <- levels(df$chr.coord)
for(i in 1:length(coords)) {
df.small <- df[which(df$chr.coord==coords[i]), ]
non.na <- tapply(df.small$psi, df.small$cell.type, function(x) {sum(!is.na(x))})
non.na <- non.na[which(non.na < 3)]
non.na <- length(non.na)
if(non.na != 0) {
p.val[i] <- NA
} else {
x <- df.small[which(df.small$cell.type==cell.types[1]), "psi"]
y <- df.small[which(df.small$cell.type==cell.types[2]), "psi"]
x <- na.omit(x)
y <- na.omit(y)
p.val[i] <- dts_test(x, y, nboots=nboots)[2]
}
}
###############################
} else if(method=="ANOVA") {
coords <- levels(df$chr.coord)
for(i in 1:length(coords)) {
df.small <- df[which(df$chr.coord==coords[i]), ]
non.na <- tapply(df.small$psi, df.small$cell.type, function(x) {sum(!is.na(x))})
non.na <- non.na[which(non.na < 3)]
non.na <- length(non.na)
if(non.na != 0) {
p.val[i] <- NA
} else {
p.val[i] <- summary(aov(psi ~ cell.type, na.omit(df.small)))[[1]][["Pr(>F)"]][1]
}
}
###############################
} else if(method=="kw") {
coords <- levels(df$chr.coord)
for(i in 1:length(coords)) {
df.small <- df[which(df$chr.coord==coords[i]), ]
non.na <- tapply(df.small$psi, df.small$cell.type, function(x) {sum(!is.na(x))})
non.na <- non.na[which(non.na < 3)]
non.na <- length(non.na)
if(non.na != 0) {
p.val[i] <- NA
} else {
p.val[i] <- kruskal.test(psi ~ cell.type, na.omit(df.small))$p.value
}
}
###############################
}
if(method != "dts") {
p.val.adj <- p.adjust(p.val, method=method.adj, n=length(p.val))
} else if(method == "dts"){
p.val.adj <- p.adjust(p.val, method="none", n=length(p.val))
}
p.val.adj[is.na(p.val.adj)] <- NA
set.seed(1)
p.val.adj[p.val.adj == 0 & !is.na(p.val.adj)] <- runif(n=length(p.val.adj[p.val.adj == 0 & !is.na(p.val.adj)]),
min=2.0e-16,
max=2.2e-16
)
p.val.adj.transformed <- -log10(p.val.adj)
df.pval <- data.frame("chr.coord"=levels(df$chr.coord),
"p.val.adj.transformed"=p.val.adj.transformed
)
df.pval$chr.coord <- factor(df.pval$chr.coord, levels=unique(df.pval$chr.coord))
##########################################################################
######################### HEATMAP: PSI PLOT ##############################
##########################################################################
# Create expression matrix
df.exp <- dcast(df, bam.file.name ~ chr.coord, value.var="psi")
row.names(df.exp) <- df.exp$bam.file.name
df.exp$bam.file.name <- NULL
# Reorder by mean alt. exon expression, cell type
df.exp <- df.exp[df.ave.alt$bam.file.name, ]
table(row.names(df.exp)==df.ave.alt$bam.file.name)
# Define column labels
# Create data frame
. <- data.frame("chr.coord"=names(df.exp))
. <- join(., unique(df[,c("chr.coord", "exon.type")]), by="chr.coord", type="left")
# Create row names
row.names(.) <- .$chr.coord
.$chr.coord <- NULL
names(.) <- "exon.type"
# Save as new object
annotation.col.lab <- .
# Define row labels
# Create data frame
. <- data.frame("bam.file.name"=row.names(df.exp))
. <- join(., unique(df[,c("bam.file.name", "cell.type")]), by="bam.file.name", type="left")
# Create row names
row.names(.) <- .$bam.file.name
.$bam.file.name <- NULL
names(.) <- "cell.type"
# Save as new object
annotation.row.lab <- .
# Define column and colors
# Columnm color
annotation.col.color <- c("black", "orange", "brown", "black")
names(annotation.col.color) <- c("5' Cons. exon", "5' Alt. exon", "3' Alt. exon", "3' Cons. exon")
# Row colors
annotation.row.color <- cell.types.colors
names(annotation.row.color) <- cell.types
# Create color list
annotation.row.col.color <- list("exon.type"=annotation.col.color, "cell.type"=annotation.row.color)
# Color range
color <- grDevices::colorRampPalette(c("yellow", "white", "blue"))((20))
# Plot
p1 <- pheatmap(as.matrix(df.exp), cluster_cols=FALSE, cluster_rows=FALSE, scale="column", show_rownames=FALSE, show_colnames=FALSE, color=color, annotation_col=annotation.col.lab, annotation_row=annotation.row.lab, annotation_colors=annotation.row.col.color, border_color=NA, fontsize=10, main=plot.title, annotation_names_row=FALSE, annotation_names_col=FALSE, silent=TRUE)
# Convert pheatmap class to grob class
p1 <- as.grob(p1)
##########################################################################
######################### LINE PLOT: MEAN PSI ############################
##########################################################################
# Check maximum coloum z-score
z.max <- max(apply(df.exp, 2, scale), na.rm=TRUE)
z.min <- min(apply(df.exp, 2, scale), na.rm=TRUE)
z.max.abs <- max(abs(c(z.max, z.min)))
if(z.max.abs <= 10) {
accuracy <- 0.01
} else {
accuracy <- 0.001
}
# Plot
if(show.mean.ci==TRUE){
p2 <- ggplot(df.mean, aes(x=chr.coord, y=mean, group = cell.type, col=cell.type, fill=cell.type)) +
geom_line() +
geom_ribbon(aes(ymin=ci.lower, ymax=ci.higher), alpha=0.2, colour = NA) +
labs(y="Mean (PSI)", x=NULL) +
scale_y_continuous(labels=scales::number_format(accuracy=accuracy), limits=c(0, 1), position="right") +
scale_fill_manual(values=cell.types.colors) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.border=element_blank(),
axis.line.y.right = element_line(color="black"),
axis.title.y.right=element_text(size=11, angle=0, vjust=0.5, margin = margin(t = 0, r = 0, b = 0, l = 20)),
axis.text=element_text(size=13),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
legend.position="none",
plot.margin = margin(0.5, 0.38, 0, 0.7, "cm")
)
} else {
p2 <- ggplot(df.mean, aes(x=chr.coord, y=mean, group = cell.type, col=cell.type, fill=cell.type)) +
geom_line() +
#geom_ribbon(aes(ymin=ci.lower, ymax=ci.higher), alpha=0.2, colour = NA) +
labs(y="Mean (PSI)", x=NULL) +
scale_y_continuous(labels=scales::number_format(accuracy=accuracy), limits=c(0, 1), position="right") +
scale_fill_manual(values=cell.types.colors) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.border=element_blank(),
axis.line.y.right = element_line(color="black"),
axis.title.y.right=element_text(size=11, angle=0, vjust=0.5, margin = margin(t = 0, r = 0, b = 0, l = 20)),
axis.text=element_text(size=13),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
legend.position="none",
plot.margin = margin(0.5, 0.38, 0, 0.7, "cm")
)
}
##########################################################################
######################### LINE PLOT: P-VALUES ############################
##########################################################################
if(max(df.pval$p.val.adj.transformed, na.rm=TRUE) > 10 & z.max.abs <= 10) {
accuracy <- 0.1
} else if(max(df.pval$p.val.adj.transformed, na.rm=TRUE) < 10 & z.max.abs <= 10) {
accuracy <- 0.01
} else if(max(df.pval$p.val.adj.transformed, na.rm=TRUE) > 10 & z.max.abs > 10) {
accuracy <- 0.01
} else if(max(df.pval$p.val.adj.transformed, na.rm=TRUE) < 10 & z.max.abs > 10) {
accuracy <- 0.001
}
# Plot
p3 <- ggplot(data=df.pval, aes(x=chr.coord, y=p.val.adj.transformed, group=1)) +
geom_line() +
geom_hline(yintercept=-log10(sig.pval), col="red", linetype="dashed") +
labs(x=NULL, y="-log10(p-value)") +
scale_y_continuous(labels=scales::number_format(accuracy=accuracy),
limits=c(0, max(df.pval$p.val.adj.transformed)), position="right") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.border=element_blank(),
axis.line.y.right = element_line(color="black"),
axis.title.y.right=element_text(size=11, angle=0, vjust=0.5),
axis.text=element_text(size=13),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
plot.margin = margin(0.5, 0.16, 0, 0.7, "cm")
)
##########################################################################
############################# FINAL PLOT #################################
##########################################################################
# Arrange plots
plot.final <- ggarrange(p1, p2, p3, ncol=1, nrow=3, widths=0.25)
# Save plot
ggsave(plot.out, plot.final, device="pdf", width=plot.width, height=plot.height)
}
|
### Barplot with stacked scatter plot with mean/s.d. ###
library(ggplot2)
### Import data ###
rdata = read.table(file.choose(), header = TRUE, sep = "\t") ### Data are in long format!!! ###
rdata
dim(rdata)
### extensive cleanup for plot (run before plot)
cleanup = theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(color = "black", lineend = "square", size =0.7),
plot.margin = margin(0.5, 0.5, 0.5, 1, "cm"),
axis.ticks = element_line(size = 0.7, lineend = "square", color = "black"))
ymax = 8 ### set ylimits manually - optional
ylimits = c(0, ymax) ### set ylimits manually - optional
yticks = seq(0, 8, 1) ### set y tick marks manually - optional
### Enter axis labels ###
xlb = readline(prompt = "Enter x axis label:") ### leave blank if no label is needed
ylb = readline(prompt = "Enter y axis label:")
### Change order of data ###
rdata$sample <- factor(rdata$sample, levels = c("untr", "tr")) ### change "untr", "tr" to desired conditions in desired order
### Plot data ###
myplot = ggplot(rdata, aes(x=sample, y=value)) + ### add color=Sample in the parentathesis to get group coloring
ylab(ylb) +
xlab(xlb) +
theme(axis.title.y = element_text(face = "bold", size = 14, color = "black", margin = margin(0,0.3,0,0,"cm"))) +
theme(axis.title.x = element_text(face = "bold", size = 14, color = "black", margin = margin(0.3,0,0,0,"cm"))) +
theme(axis.text.x = element_text(face = "bold", size = 12, color = "black", angle = 0, vjust = 0.2, hjust = 0.5)) +
theme(axis.text.y = element_text(face = "bold", size = 10, color = "black", angle = 0, margin = margin(0,0.1,0,0,"cm"))) +
scale_y_continuous(limits = ylimits, breaks = yticks, expand = c(0,0)) + ##limits = ylimits, breaks = yticks - add this for manual y ticks and limits
#scale_x_continuous(expand = c(0,0)) + ##limits = xlimits, breaks = xticks - add this for manual x ticks and limits
stat_summary(fun.y=mean, geom="bar", color="black", fill="yellow", size=0.2, width = 0.6) + ### chane "goem=" to "bar" for barchart
#geom_errorbar(aes(ymin=value-se, ymax=value+se)) +
#stat_summary(fun.ymin=function(x)(mean(x)-sd(x)), ### these 3 lines are for sd error bars
#fun.ymax=function(x)(mean(x)+sd(x)),
#geom="errorbar", width=0.1) +
stat_summary(geom = "errorbar", fun.data = mean_se, width = 0.2) +
#geom_point(size=1, color="grey40", position=position_jitter(w=0.1, h=0)) +
geom_dotplot(binaxis='y', stackdir='center', dotsize = 2, binwidth = 0.11, fill = "grey", color = "grey") +
cleanup ### Alternative cleanup is: theme_bw() - is built-in
myplot
label.df <- data.frame(sample = c("untr", "tr"), value = c(4.5, 7.5), signif = c("*", "***"))
myplot + geom_text(data = label.df, label = label.df$signif, size = 10)
|
/Barplot_w_dots.R
|
no_license
|
tschemic/Additional_Scripts
|
R
| false
| false
| 2,922
|
r
|
### Barplot with stacked scatter plot with mean/s.d. ###
library(ggplot2)
### Import data ###
rdata = read.table(file.choose(), header = TRUE, sep = "\t") ### Data are in long format!!! ###
rdata
dim(rdata)
### extensive cleanup for plot (run before plot)
cleanup = theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(color = "black", lineend = "square", size =0.7),
plot.margin = margin(0.5, 0.5, 0.5, 1, "cm"),
axis.ticks = element_line(size = 0.7, lineend = "square", color = "black"))
ymax = 8 ### set ylimits manually - optional
ylimits = c(0, ymax) ### set ylimits manually - optional
yticks = seq(0, 8, 1) ### set y tick marks manually - optional
### Enter axis labels ###
xlb = readline(prompt = "Enter x axis label:") ### leave blank if no label is needed
ylb = readline(prompt = "Enter y axis label:")
### Change order of data ###
rdata$sample <- factor(rdata$sample, levels = c("untr", "tr")) ### change "untr", "tr" to desired conditions in desired order
### Plot data ###
myplot = ggplot(rdata, aes(x=sample, y=value)) + ### add color=Sample in the parentathesis to get group coloring
ylab(ylb) +
xlab(xlb) +
theme(axis.title.y = element_text(face = "bold", size = 14, color = "black", margin = margin(0,0.3,0,0,"cm"))) +
theme(axis.title.x = element_text(face = "bold", size = 14, color = "black", margin = margin(0.3,0,0,0,"cm"))) +
theme(axis.text.x = element_text(face = "bold", size = 12, color = "black", angle = 0, vjust = 0.2, hjust = 0.5)) +
theme(axis.text.y = element_text(face = "bold", size = 10, color = "black", angle = 0, margin = margin(0,0.1,0,0,"cm"))) +
scale_y_continuous(limits = ylimits, breaks = yticks, expand = c(0,0)) + ##limits = ylimits, breaks = yticks - add this for manual y ticks and limits
#scale_x_continuous(expand = c(0,0)) + ##limits = xlimits, breaks = xticks - add this for manual x ticks and limits
stat_summary(fun.y=mean, geom="bar", color="black", fill="yellow", size=0.2, width = 0.6) + ### chane "goem=" to "bar" for barchart
#geom_errorbar(aes(ymin=value-se, ymax=value+se)) +
#stat_summary(fun.ymin=function(x)(mean(x)-sd(x)), ### these 3 lines are for sd error bars
#fun.ymax=function(x)(mean(x)+sd(x)),
#geom="errorbar", width=0.1) +
stat_summary(geom = "errorbar", fun.data = mean_se, width = 0.2) +
#geom_point(size=1, color="grey40", position=position_jitter(w=0.1, h=0)) +
geom_dotplot(binaxis='y', stackdir='center', dotsize = 2, binwidth = 0.11, fill = "grey", color = "grey") +
cleanup ### Alternative cleanup is: theme_bw() - is built-in
myplot
label.df <- data.frame(sample = c("untr", "tr"), value = c(4.5, 7.5), signif = c("*", "***"))
myplot + geom_text(data = label.df, label = label.df$signif, size = 10)
|
# Generate time series data based on different generators
# Author: Brian Lee Yung Rowe
#' @examples
#' mygbm <- function(x) gbm(x, 40, .03/1440)
#' ps <- rprices(mygbm, obs=100)
rprices(process, start, ohlc, volume, ...) %::% Function:.:logical:logical:...:xts
rprices(process, start=Sys.Date(), ohlc=FALSE, volume=FALSE, ...) %as% {
dates <- trading_dates(start=start, ...)
n <- length(dates)
prices <- as.xts(process(n), order.by=dates)
#rownames(prices) <- format(dates)
colnames(prices) <- 'close'
if (ohlc) prices <- .add_ohlc(prices, ohlc)
if (volume) prices <- .add_volume(prices, volume)
prices
}
# beta_a = cov(r_a, r_m) / var(r_m)
# cov(r_a, r_m) = beta_a * var(r_m)
# cor(r_a, r_m) = cov(r_a, r_m) / (sd(r_a) * sd(r_m))
#' Generate a random vector based on a time period and betas
rprices(beta, var=252) %as% {
n <- 1 + length(beta)
m <- matrix(rep(0,n^2), nrow=n)
m[1,] <- c(1, beta)
m[,1] <- c(1, beta)
diag(m) <- 1
cor.mat <- denoise(m, RandomMatrixDenoiser())
cd <- chol(cor.mat)
z <- matrix(rnorm(length(m)), nrow=nrow(m))
v <- t(cd) %*% z
}
# m <- rprices(c(.5,.8,.3,-.3,1.4))
# md <- denoise(m, RandomMatrixDenoiser())
# cd <- chol(md)
# z <- matrix(rnorm(length(m)), nrow=nrow(m))
# v <- t(cd) %*% z
getTradingDates <- function(...) {
flog.info("This function is deprecated. Use trading_dates instead.")
trading_dates(...)
}
#' Generate trading dates with a given calendar
#'
#' @param period The date period, defaulting to 1 day (1440 minutes).
#' Units are in minutes, so values less than 1440 will result in
#' intraday time steps being added.
#' @examples
#' getTradingDates('2009-02-24',obs=10)
trading_dates(start, end, calendar=holidayNYSE) %::% a:a:Function:Date
trading_dates(start, end, calendar=holidayNYSE) %as% {
start <- as.Date(start)
end <- as.Date(end)
dates <- timeSequence(from=start, to=end)
dates <- dates[isBizday(dates, holidays=calendar(unique(year(dates))))]
as.Date(dates)
}
trading_dates(start, obs, calendar=holidayNYSE) %::% a:numeric:Function:Date
trading_dates(start, obs, calendar=holidayNYSE) %when% { obs>=0 } %as% {
start <- as.Date(start)
# This is to get enough dates to account for holidays and weekends
shimmed <- ceiling(obs * 2)
dates <- timeSequence(from=start, length.out=shimmed)
dates <- as.Date(dates[isBizday(dates, holidays=calendar(unique(year(dates))))])
dates <- dates[dates >= start]
dates <- dates[1:obs]
}
trading_dates(start, obs, calendar=holidayNYSE) %as% {
start <- as.Date(start)
# This is to get enough dates to account for holidays and weekends
shimmed <- ceiling(abs(obs) * 2)
dates <- timeSequence(from=as.Date(start)-shimmed, to=start)
dates <- as.Date(dates[isBizday(dates, holidays=calendar(unique(year(dates))))])
dates <- dates[dates <= start]
dates <- tail(dates, abs(obs))
}
trading_dates(start, obs, period, hours.fn) %::% a:numeric:numeric:Function:POSIXt
trading_dates(start, obs, period=1, hours.fn) %as% {
dates <- trading_dates(start, obs)
hours <- hours.fn(dates)
ts <- lapply(dates,
function(d) as.POSIXct(d) + intraday_ticks(period, hours[d]))
unique(do.call(c, ts))
}
# th <- function(x) trading_hours(x,'cme')
# trading_dates('2014-06-30','2014-01-01', 5, th)
trading_dates(start, end, period, hours.fn) %::% a:a:numeric:Function:POSIXt
trading_dates(start, end, period=1, hours.fn) %as% {
dates <- trading_dates(start, end)
hours <- hours.fn(dates)
ts <- lapply(dates,
function(d) as.POSIXct(d) + intraday_ticks(period, hours[d]))
unique(do.call(c, ts))
}
# OBSOLETE
.trading_dates <- function(end, start=NULL, obs=NULL, calendar=holidayNYSE, period=1440)
{
if (is.null(obs) & is.null(start)) stop("Either obs or start must be set")
end <- as.Date(end)
if (!is.null(start))
{
start <- as.Date(start)
dates <- timeSequence(from=start, to=end)
dates <- dates[isBizday(dates, holidays=calendar())]
}
else
{
# This is to get enough dates to account for holidays and weekends
shimmed <- ceiling(obs * 1.45)
start <- as.Date(end) - shimmed + 1
dates <- timeSequence(from=start, length.out=shimmed)
dates <- dates[isBizday(dates, holidays=calendar())]
if (length(dates) < obs)
{
# It turns out that there are a lot of holidays so add a few more days
gap <- (2 + obs - length(dates)) * 2
start.1 <- as.Date(dates[1] - shimmed)
dates.1 <- timeSequence(from=start, length.out=gap)
dates <- c(dates.1[isBizday(dates.1, holidays=calendar())], dates)
}
inf <- anylength(dates) - obs + 1
sup <- anylength(dates)
dates <- dates[inf:sup]
}
ds <- as.Date(dates)
if (period == 1440) return(ds)
ts <- lapply(ds, function(d) as.POSIXct(d) + intraday_ticks(period))
unique(do.call(c, ts))
}
# Generate n price series using the specified method
# Example
# getPortfolioPrices('TWM', '2009-02-24',obs=10, seed=seed, patterns=pats)
getPortfolioPrices <- function(...) {
flog.info("This function is deprecated. Use portfolio_prices instead.")
portfolio_prices(...)
}
portfolio_prices <- function(symbols, obs=NULL, end=Sys.Date(), start=NULL,
calendar=holidayNYSE, seeds=NULL, patterns=NULL, ..., type='uniform')
{
if (is.null(obs)) {
dates <- trading_dates(start, end, calendar)
} else {
dates <- trading_dates(start, obs, calendar)
}
if (is.null(seeds))
{
data(generators)
seeds = sample(sampleInitiators, anylength(symbols), TRUE)
}
if (is.null(patterns))
{
data(generators)
patterns = sample(sampleGenerators, anylength(symbols), TRUE)
}
# Loop over symbols names
set <- NULL
for (symbol in symbols)
{
# Call fractal, and cbind
if (! exists('count') & ! exists('epochs'))
ts <- fractal(seeds, patterns, ..., count=obs, type=type)
else
ts <- fractal(seeds, patterns, ..., type=type)
#ts <- ts[(nrow(ts)-length(dates)+1):nrow(ts),]
ts <- tail(ts, anylength(dates))
index(ts) <- dates
set <- cbind(set, ts)
}
anynames(set) <- symbols
return(set)
}
plotReturns <- function(series, ...) {
flog.info("This function is deprecated. Use plot_returns instead.")
plot_returns(series, ...)
}
plot_returns <- function(series, ...)
{
o.par <- par(mfrow=c(2,1), mar=c(3.1, 2.1, 2.1, 1.1))
plot(series, type='l', main='Prices',...)
plot(Delt(series), main='Returns')
par(o.par)
invisible(series)
}
|
/R/framework.R
|
no_license
|
alexsbromberg/fractalrock
|
R
| false
| false
| 6,420
|
r
|
# Generate time series data based on different generators
# Author: Brian Lee Yung Rowe
#' @examples
#' mygbm <- function(x) gbm(x, 40, .03/1440)
#' ps <- rprices(mygbm, obs=100)
rprices(process, start, ohlc, volume, ...) %::% Function:.:logical:logical:...:xts
rprices(process, start=Sys.Date(), ohlc=FALSE, volume=FALSE, ...) %as% {
dates <- trading_dates(start=start, ...)
n <- length(dates)
prices <- as.xts(process(n), order.by=dates)
#rownames(prices) <- format(dates)
colnames(prices) <- 'close'
if (ohlc) prices <- .add_ohlc(prices, ohlc)
if (volume) prices <- .add_volume(prices, volume)
prices
}
# beta_a = cov(r_a, r_m) / var(r_m)
# cov(r_a, r_m) = beta_a * var(r_m)
# cor(r_a, r_m) = cov(r_a, r_m) / (sd(r_a) * sd(r_m))
#' Generate a random vector based on a time period and betas
rprices(beta, var=252) %as% {
n <- 1 + length(beta)
m <- matrix(rep(0,n^2), nrow=n)
m[1,] <- c(1, beta)
m[,1] <- c(1, beta)
diag(m) <- 1
cor.mat <- denoise(m, RandomMatrixDenoiser())
cd <- chol(cor.mat)
z <- matrix(rnorm(length(m)), nrow=nrow(m))
v <- t(cd) %*% z
}
# m <- rprices(c(.5,.8,.3,-.3,1.4))
# md <- denoise(m, RandomMatrixDenoiser())
# cd <- chol(md)
# z <- matrix(rnorm(length(m)), nrow=nrow(m))
# v <- t(cd) %*% z
getTradingDates <- function(...) {
flog.info("This function is deprecated. Use trading_dates instead.")
trading_dates(...)
}
#' Generate trading dates with a given calendar
#'
#' @param period The date period, defaulting to 1 day (1440 minutes).
#' Units are in minutes, so values less than 1440 will result in
#' intraday time steps being added.
#' @examples
#' getTradingDates('2009-02-24',obs=10)
trading_dates(start, end, calendar=holidayNYSE) %::% a:a:Function:Date
trading_dates(start, end, calendar=holidayNYSE) %as% {
start <- as.Date(start)
end <- as.Date(end)
dates <- timeSequence(from=start, to=end)
dates <- dates[isBizday(dates, holidays=calendar(unique(year(dates))))]
as.Date(dates)
}
trading_dates(start, obs, calendar=holidayNYSE) %::% a:numeric:Function:Date
trading_dates(start, obs, calendar=holidayNYSE) %when% { obs>=0 } %as% {
start <- as.Date(start)
# This is to get enough dates to account for holidays and weekends
shimmed <- ceiling(obs * 2)
dates <- timeSequence(from=start, length.out=shimmed)
dates <- as.Date(dates[isBizday(dates, holidays=calendar(unique(year(dates))))])
dates <- dates[dates >= start]
dates <- dates[1:obs]
}
trading_dates(start, obs, calendar=holidayNYSE) %as% {
start <- as.Date(start)
# This is to get enough dates to account for holidays and weekends
shimmed <- ceiling(abs(obs) * 2)
dates <- timeSequence(from=as.Date(start)-shimmed, to=start)
dates <- as.Date(dates[isBizday(dates, holidays=calendar(unique(year(dates))))])
dates <- dates[dates <= start]
dates <- tail(dates, abs(obs))
}
trading_dates(start, obs, period, hours.fn) %::% a:numeric:numeric:Function:POSIXt
trading_dates(start, obs, period=1, hours.fn) %as% {
dates <- trading_dates(start, obs)
hours <- hours.fn(dates)
ts <- lapply(dates,
function(d) as.POSIXct(d) + intraday_ticks(period, hours[d]))
unique(do.call(c, ts))
}
# th <- function(x) trading_hours(x,'cme')
# trading_dates('2014-06-30','2014-01-01', 5, th)
trading_dates(start, end, period, hours.fn) %::% a:a:numeric:Function:POSIXt
trading_dates(start, end, period=1, hours.fn) %as% {
dates <- trading_dates(start, end)
hours <- hours.fn(dates)
ts <- lapply(dates,
function(d) as.POSIXct(d) + intraday_ticks(period, hours[d]))
unique(do.call(c, ts))
}
# OBSOLETE
.trading_dates <- function(end, start=NULL, obs=NULL, calendar=holidayNYSE, period=1440)
{
if (is.null(obs) & is.null(start)) stop("Either obs or start must be set")
end <- as.Date(end)
if (!is.null(start))
{
start <- as.Date(start)
dates <- timeSequence(from=start, to=end)
dates <- dates[isBizday(dates, holidays=calendar())]
}
else
{
# This is to get enough dates to account for holidays and weekends
shimmed <- ceiling(obs * 1.45)
start <- as.Date(end) - shimmed + 1
dates <- timeSequence(from=start, length.out=shimmed)
dates <- dates[isBizday(dates, holidays=calendar())]
if (length(dates) < obs)
{
# It turns out that there are a lot of holidays so add a few more days
gap <- (2 + obs - length(dates)) * 2
start.1 <- as.Date(dates[1] - shimmed)
dates.1 <- timeSequence(from=start, length.out=gap)
dates <- c(dates.1[isBizday(dates.1, holidays=calendar())], dates)
}
inf <- anylength(dates) - obs + 1
sup <- anylength(dates)
dates <- dates[inf:sup]
}
ds <- as.Date(dates)
if (period == 1440) return(ds)
ts <- lapply(ds, function(d) as.POSIXct(d) + intraday_ticks(period))
unique(do.call(c, ts))
}
# Generate n price series using the specified method
# Example
# getPortfolioPrices('TWM', '2009-02-24',obs=10, seed=seed, patterns=pats)
getPortfolioPrices <- function(...) {
flog.info("This function is deprecated. Use portfolio_prices instead.")
portfolio_prices(...)
}
portfolio_prices <- function(symbols, obs=NULL, end=Sys.Date(), start=NULL,
calendar=holidayNYSE, seeds=NULL, patterns=NULL, ..., type='uniform')
{
if (is.null(obs)) {
dates <- trading_dates(start, end, calendar)
} else {
dates <- trading_dates(start, obs, calendar)
}
if (is.null(seeds))
{
data(generators)
seeds = sample(sampleInitiators, anylength(symbols), TRUE)
}
if (is.null(patterns))
{
data(generators)
patterns = sample(sampleGenerators, anylength(symbols), TRUE)
}
# Loop over symbols names
set <- NULL
for (symbol in symbols)
{
# Call fractal, and cbind
if (! exists('count') & ! exists('epochs'))
ts <- fractal(seeds, patterns, ..., count=obs, type=type)
else
ts <- fractal(seeds, patterns, ..., type=type)
#ts <- ts[(nrow(ts)-length(dates)+1):nrow(ts),]
ts <- tail(ts, anylength(dates))
index(ts) <- dates
set <- cbind(set, ts)
}
anynames(set) <- symbols
return(set)
}
plotReturns <- function(series, ...) {
flog.info("This function is deprecated. Use plot_returns instead.")
plot_returns(series, ...)
}
plot_returns <- function(series, ...)
{
o.par <- par(mfrow=c(2,1), mar=c(3.1, 2.1, 2.1, 1.1))
plot(series, type='l', main='Prices',...)
plot(Delt(series), main='Returns')
par(o.par)
invisible(series)
}
|
#' Calculate Cramer's V for categorical variables
#'
#' Function calculates Cramer's V for two categorical variables based on the table
#' function
#'
#' The function calculates Cramer's V and also returns the associated statistics from
#' Chi-Squared test with the null hypothesis of independence of the two variables.
#'
#' See details in the vignette "Marketing analytics with greybox":
#' \code{vignette("maUsingGreybox","greybox")}
#'
#' @template author
#' @keywords htest
#'
#' @param x First categorical variable.
#' @param y Second categorical variable.
#' @param use What observations to use. See \link[stats]{cor} function for details.
#' The only option that is not available here is \code{"pairwise.complete.obs"}.
#'
#' @return The following list of values is returned:
#' \itemize{
#' \item{value}{The value of Cramer's V;}
#' \item{statistic}{The value of Chi squared statistic associated with the Cramer's V;}
#' \item{p.value}{The p-value of Chi squared test associated with the Cramer's V;}
#' \item{df}{The number of degrees of freedom from the test.}
#' }
#'
#' @seealso \code{\link[base]{table}, \link[greybox]{tableplot}, \link[greybox]{spread},
#' \link[greybox]{mcor}, \link[greybox]{association}}
#'
#' @examples
#'
#' cramer(mtcars$am, mtcars$gear)
#'
#' @importFrom stats chisq.test
#' @export cramer
cramer <- function(x, y, use=c("na.or.complete","complete.obs","everything","all.obs")){
use <- substr(use[1],1,1);
# Function returns values or NAs or error
returner <- function(errorType=c(0,1,2)){
if(errorType==0){
return(structure(list(value=cramerValue,statistic=chiTest$statistic,
p.value=chiTest$p.value,df=chiTest$parameter),class="cramer"));
}
else if(errorType==1){
return(structure(list(value=NA,statistic=NA,
p.value=NA,df=NA),class="cramer"));
}
else{
stop("Missing observations in cramer", call.=FALSE);
}
}
# Check, whether x and y ar categorical or at least numerical with only 10 levels
if(is.numeric(x)){
if(length(unique(x))>10){
warning("It seems that x is numeric, not categorical. Other measures of association might be more informative.",
call.=FALSE);
}
}
if(is.numeric(y)){
if(length(unique(y))>10){
warning("It seems that y is numeric, not categorical. Other measures of association might be more informative.",
call.=FALSE);
}
}
# Check the presence of NAs
obsNAx <- is.na(x);
obsNAy <- is.na(y);
if(any(obsNAx) | any(obsNAy)){
if(use=="e"){
return(returner(1));
}
else if(use=="a"){
returner(2);
}
else if(any(use==c("n","c"))){
x <- x[!obsNAx & !obsNAy];
y <- y[!obsNAx & !obsNAy];
if(length(x)<2){
if(use=="c"){
return(returner(1));
}
else{
returner(2);
}
}
}
}
dataTable <- table(x,y);
chiTest <- suppressWarnings(chisq.test(dataTable));
cramerValue <- sqrt(chiTest$statistic/(min(dim(dataTable)-1)*sum(dataTable)));
return(returner(0));
}
|
/fuzzedpackages/greybox/R/cramer.R
|
no_license
|
akhikolla/testpackages
|
R
| false
| false
| 3,350
|
r
|
#' Calculate Cramer's V for categorical variables
#'
#' Function calculates Cramer's V for two categorical variables based on the table
#' function
#'
#' The function calculates Cramer's V and also returns the associated statistics from
#' Chi-Squared test with the null hypothesis of independence of the two variables.
#'
#' See details in the vignette "Marketing analytics with greybox":
#' \code{vignette("maUsingGreybox","greybox")}
#'
#' @template author
#' @keywords htest
#'
#' @param x First categorical variable.
#' @param y Second categorical variable.
#' @param use What observations to use. See \link[stats]{cor} function for details.
#' The only option that is not available here is \code{"pairwise.complete.obs"}.
#'
#' @return The following list of values is returned:
#' \itemize{
#' \item{value}{The value of Cramer's V;}
#' \item{statistic}{The value of Chi squared statistic associated with the Cramer's V;}
#' \item{p.value}{The p-value of Chi squared test associated with the Cramer's V;}
#' \item{df}{The number of degrees of freedom from the test.}
#' }
#'
#' @seealso \code{\link[base]{table}, \link[greybox]{tableplot}, \link[greybox]{spread},
#' \link[greybox]{mcor}, \link[greybox]{association}}
#'
#' @examples
#'
#' cramer(mtcars$am, mtcars$gear)
#'
#' @importFrom stats chisq.test
#' @export cramer
cramer <- function(x, y, use=c("na.or.complete","complete.obs","everything","all.obs")){
use <- substr(use[1],1,1);
# Function returns values or NAs or error
returner <- function(errorType=c(0,1,2)){
if(errorType==0){
return(structure(list(value=cramerValue,statistic=chiTest$statistic,
p.value=chiTest$p.value,df=chiTest$parameter),class="cramer"));
}
else if(errorType==1){
return(structure(list(value=NA,statistic=NA,
p.value=NA,df=NA),class="cramer"));
}
else{
stop("Missing observations in cramer", call.=FALSE);
}
}
# Check, whether x and y ar categorical or at least numerical with only 10 levels
if(is.numeric(x)){
if(length(unique(x))>10){
warning("It seems that x is numeric, not categorical. Other measures of association might be more informative.",
call.=FALSE);
}
}
if(is.numeric(y)){
if(length(unique(y))>10){
warning("It seems that y is numeric, not categorical. Other measures of association might be more informative.",
call.=FALSE);
}
}
# Check the presence of NAs
obsNAx <- is.na(x);
obsNAy <- is.na(y);
if(any(obsNAx) | any(obsNAy)){
if(use=="e"){
return(returner(1));
}
else if(use=="a"){
returner(2);
}
else if(any(use==c("n","c"))){
x <- x[!obsNAx & !obsNAy];
y <- y[!obsNAx & !obsNAy];
if(length(x)<2){
if(use=="c"){
return(returner(1));
}
else{
returner(2);
}
}
}
}
dataTable <- table(x,y);
chiTest <- suppressWarnings(chisq.test(dataTable));
cramerValue <- sqrt(chiTest$statistic/(min(dim(dataTable)-1)*sum(dataTable)));
return(returner(0));
}
|
#### 11-1 미국 주별 강력 범죄율 단계 구분도 작성 ####
## -------------------------------------------------------------------- ##
install.packages("ggiraphExtra")
# install.packages("maps")
# install.packages("stringi")
library(ggiraphExtra)
library(maps)
# USArrests : 1973년 미국 주별 10만명당 강력 범죄율 정보.
# - 살인 건, 폭행 건, 도시인구, 강간 건.
str(USArrests)
head(USArrests)
rownames(USArrests)
library(tibble) # convert rowname to culumn
crime <- rownames_to_column(USArrests, var = "state")
crime$state <- tolower(crime$state)
str(crime)
library(ggplot2)
states_map <- map_data("state") # maps 패키지 필요.
head(states_map)
# install.packages("mapproj")
library(mapproj)
# mapproj 패키지를 필요.
ggiraphExtra::ggChoropleth(data = crime, # 지도에 표현할 데이터
aes(fill = Murder, # 색깔로 표현할 변수
map_id = state), # 지역 기준 변수
map = states_map) # 지도 데이터
ggChoropleth(data = crime, # 지도에 표현할 데이터
aes(fill = Rape, # 색깔로 표현할 변수
map_id = state), # 지역 기준 변수
map = states_map, # 지도 데이터
interactive = T) # 인터랙티브
#### 11-2 대한민국 시도별 인구, 결핵 환자 수 단계 구분도 ####
# install.packages("stringi")
# install.packages("processx")
# install.packages("fs")
# install.packages("devtools")
devtools::install_github("cardiomoon/kormaps2014", force = TRUE)
library(kormaps2014)
# 시도별 인구통계 정보
str(korpop1)
str(changeCode(korpop1))
# 역시 변수명을 영문으로 변경.
library(dplyr)
korpop1 <- rename(korpop1,
pop = 총인구_명,
name = 행정구역별_읍면동)
# 시도별 인구지도 데이터
str(changeCode(kormap1))
ggChoropleth(data = korpop1, # 지도에 표현할 데이터
aes(fill = pop, # 색깔로 표현할 변수
map_id = code, # 지역 기준 변수
tooltip = name), # 지도 위에 표시할 지역명
map = kormap1, # 지도 데이터
interactive = T) # 인터랙티브
## ----- 결핵 환자 단계 구분도 -------------------------------- ##
str(changeCode(tbc)) #komap2014에 있는 데이터
ggChoropleth(data = tbc, # 지도에 표현할 데이터
aes(fill = NewPts, # 색깔로 표현할 변수
map_id = code, # 지역 기준 변수
tooltip = name), # 지도 위에 표시할 지역명
map = kormap1, # 지도 데이터
interactive = T) # 인터랙티브
|
/R/Doit_R-master/Script/Part11_Teacher.r
|
no_license
|
dico1631/TIL
|
R
| false
| false
| 2,792
|
r
|
#### 11-1 미국 주별 강력 범죄율 단계 구분도 작성 ####
## -------------------------------------------------------------------- ##
install.packages("ggiraphExtra")
# install.packages("maps")
# install.packages("stringi")
library(ggiraphExtra)
library(maps)
# USArrests : 1973년 미국 주별 10만명당 강력 범죄율 정보.
# - 살인 건, 폭행 건, 도시인구, 강간 건.
str(USArrests)
head(USArrests)
rownames(USArrests)
library(tibble) # convert rowname to culumn
crime <- rownames_to_column(USArrests, var = "state")
crime$state <- tolower(crime$state)
str(crime)
library(ggplot2)
states_map <- map_data("state") # maps 패키지 필요.
head(states_map)
# install.packages("mapproj")
library(mapproj)
# mapproj 패키지를 필요.
ggiraphExtra::ggChoropleth(data = crime, # 지도에 표현할 데이터
aes(fill = Murder, # 색깔로 표현할 변수
map_id = state), # 지역 기준 변수
map = states_map) # 지도 데이터
ggChoropleth(data = crime, # 지도에 표현할 데이터
aes(fill = Rape, # 색깔로 표현할 변수
map_id = state), # 지역 기준 변수
map = states_map, # 지도 데이터
interactive = T) # 인터랙티브
#### 11-2 대한민국 시도별 인구, 결핵 환자 수 단계 구분도 ####
# install.packages("stringi")
# install.packages("processx")
# install.packages("fs")
# install.packages("devtools")
devtools::install_github("cardiomoon/kormaps2014", force = TRUE)
library(kormaps2014)
# 시도별 인구통계 정보
str(korpop1)
str(changeCode(korpop1))
# 역시 변수명을 영문으로 변경.
library(dplyr)
korpop1 <- rename(korpop1,
pop = 총인구_명,
name = 행정구역별_읍면동)
# 시도별 인구지도 데이터
str(changeCode(kormap1))
ggChoropleth(data = korpop1, # 지도에 표현할 데이터
aes(fill = pop, # 색깔로 표현할 변수
map_id = code, # 지역 기준 변수
tooltip = name), # 지도 위에 표시할 지역명
map = kormap1, # 지도 데이터
interactive = T) # 인터랙티브
## ----- 결핵 환자 단계 구분도 -------------------------------- ##
str(changeCode(tbc)) #komap2014에 있는 데이터
ggChoropleth(data = tbc, # 지도에 표현할 데이터
aes(fill = NewPts, # 색깔로 표현할 변수
map_id = code, # 지역 기준 변수
tooltip = name), # 지도 위에 표시할 지역명
map = kormap1, # 지도 데이터
interactive = T) # 인터랙티브
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sedproxy.R
\docType{data}
\name{stages.key}
\alias{stages.key}
\title{Description of proxy stages}
\format{
A data frame with 13 rows and 6 variables:
\tabular{ll}{
\cr \code{stage} \tab proxy stage
\cr \code{label} \tab label for proxy stage
\cr \code{description} \tab description of proxy stage
\cr \code{plot.order} \tab default plotting order of stages
\cr \code{plotting.colour} \tab default colour for plotting
\cr \code{plotting.alpha} \tab default alpha level for plotting
}
}
\usage{
stages.key
}
\description{
A description of the proxy stages in the output of \code{ClimToProxyClim}
and default labels, colours and order for plotting
}
\keyword{datasets}
|
/man/stages.key.Rd
|
permissive
|
EarthSystemDiagnostics/sedproxy
|
R
| false
| true
| 769
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sedproxy.R
\docType{data}
\name{stages.key}
\alias{stages.key}
\title{Description of proxy stages}
\format{
A data frame with 13 rows and 6 variables:
\tabular{ll}{
\cr \code{stage} \tab proxy stage
\cr \code{label} \tab label for proxy stage
\cr \code{description} \tab description of proxy stage
\cr \code{plot.order} \tab default plotting order of stages
\cr \code{plotting.colour} \tab default colour for plotting
\cr \code{plotting.alpha} \tab default alpha level for plotting
}
}
\usage{
stages.key
}
\description{
A description of the proxy stages in the output of \code{ClimToProxyClim}
and default labels, colours and order for plotting
}
\keyword{datasets}
|
#' Create `cowplot` label based on a ggplot theme
#'
#' This is a wrapper around `cowplot::draw_label()` that creates a `ggplot`-based label that inherits formatting from a given theme element. It's more or less been superceded by `ggplot`'s new `plot.title.position` theme argument.
#' @param label A string of text for label.
#' @param theme A ggplot theme; if `NULL` (the default), will get current theme with `ggplot2::theme_get()`.
#' @param element Name of a theme element; defaults to base text.
#' @param x x-position; defaults to 0.01
#' @param hjust Horizontal alignment; defaults 0
#' @param ... Any other arguments to pass to `cowplot::draw_label()`.
#' @return A `ggplot` object.
#' @examples
#' \dontrun{
#' if (requireNamespace("ggplot2", quietly = TRUE)) {
#' town_pops <- race_pops %>%
#' dplyr::filter(variable == "total") %>%
#' dplyr::mutate(name = forcats::fct_reorder(as.factor(name), estimate))
#'
#' library(ggplot2)
#' p <- ggplot(town_pops, aes(x = name, y = estimate)) +
#' geom_col() +
#' coord_flip()
#' # With long labels on the left, ggplot's default title placement
#' # aligned to the panel can become awkward
#' p + ggtitle("Total population by town, 2017")
#' # Instead, make a label grob and arrange plot elements how you want
#' title <- themed_label("Total population by town, 2017", element = "plot.title")
#' cowplot::plot_grid(
#' title,
#' p,
#' ncol = 1,
#' rel_heights = c(1, 10)
#' )
#' }
#' }
#' @export
themed_label <- function(label, theme = NULL, element = "text", x = 0.01, hjust = 0, ...) {
if (is.null(theme)) {
theme <- ggplot2::theme_get()
}
# if theme isn't put in as theme(), invoke it
if (is.function(theme)) {
theme <- rlang::exec(theme)
}
assertthat::assert_that(element %in% names(theme), msg = "Element must be a valid ggplot theme element name")
elements <- ggplot2::calc_element(element, theme)
lbl <- cowplot::draw_label(label,
fontfamily = elements$family,
fontface = elements$face,
colour = elements$color,
size = elements$size,
x = x,
hjust = hjust,
...
)
cowplot::ggdraw() + lbl
}
|
/R/themed_label.R
|
permissive
|
camille-s/camiller
|
R
| false
| false
| 2,285
|
r
|
#' Create `cowplot` label based on a ggplot theme
#'
#' This is a wrapper around `cowplot::draw_label()` that creates a `ggplot`-based label that inherits formatting from a given theme element. It's more or less been superceded by `ggplot`'s new `plot.title.position` theme argument.
#' @param label A string of text for label.
#' @param theme A ggplot theme; if `NULL` (the default), will get current theme with `ggplot2::theme_get()`.
#' @param element Name of a theme element; defaults to base text.
#' @param x x-position; defaults to 0.01
#' @param hjust Horizontal alignment; defaults 0
#' @param ... Any other arguments to pass to `cowplot::draw_label()`.
#' @return A `ggplot` object.
#' @examples
#' \dontrun{
#' if (requireNamespace("ggplot2", quietly = TRUE)) {
#' town_pops <- race_pops %>%
#' dplyr::filter(variable == "total") %>%
#' dplyr::mutate(name = forcats::fct_reorder(as.factor(name), estimate))
#'
#' library(ggplot2)
#' p <- ggplot(town_pops, aes(x = name, y = estimate)) +
#' geom_col() +
#' coord_flip()
#' # With long labels on the left, ggplot's default title placement
#' # aligned to the panel can become awkward
#' p + ggtitle("Total population by town, 2017")
#' # Instead, make a label grob and arrange plot elements how you want
#' title <- themed_label("Total population by town, 2017", element = "plot.title")
#' cowplot::plot_grid(
#' title,
#' p,
#' ncol = 1,
#' rel_heights = c(1, 10)
#' )
#' }
#' }
#' @export
themed_label <- function(label, theme = NULL, element = "text", x = 0.01, hjust = 0, ...) {
if (is.null(theme)) {
theme <- ggplot2::theme_get()
}
# if theme isn't put in as theme(), invoke it
if (is.function(theme)) {
theme <- rlang::exec(theme)
}
assertthat::assert_that(element %in% names(theme), msg = "Element must be a valid ggplot theme element name")
elements <- ggplot2::calc_element(element, theme)
lbl <- cowplot::draw_label(label,
fontfamily = elements$family,
fontface = elements$face,
colour = elements$color,
size = elements$size,
x = x,
hjust = hjust,
...
)
cowplot::ggdraw() + lbl
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pmut.edap.cont.R
\name{pmut.edap.cont}
\alias{pmut.edap.cont}
\title{Create Exploratory Visualization for Continuous Feature}
\usage{
pmut.edap.cont(datatable, varstring, targetstring, meta = c(50, 4, 0.01,
0.99), qbin = FALSE, pred.df = NULL)
}
\arguments{
\item{datatable}{Object of class \code{data.frame} or \code{data.table}}
\item{varstring}{Single character string indicating the column name inside \code{datatable} for the discrete feature}
\item{targetstring}{Single character string indicating the column name inside \code{datatable} for the response}
\item{meta}{Numeric vector with length of 4 (default is c(50,4,0.01,0.99)): 1st indicates number of bins,
2nd indicates bin rounding digits, 3rd and 4th indicate the outlier percentile}
\item{qbin}{Logical (default is FALSE), FALSE indicates equal length bins, TRUE indicates equal weight bins (quantile view)}
\item{pred.df}{Object of class \code{data.frame} (optional), with column being prediction from each model}
}
\value{
A view of line plot stacked above the histogram
}
\description{
This function creates visualization with a line plot of a specified continuous feature against the response,
plus a distribution histogram for that feature.
In the line plot, the continuous feature will be cut into bins and then placed on the x-axis. The response will be the y-axis,
which will serve as Actual. Binning characteristics will be controlled by \code{meta} and \code{qbin}. NA will be formed as its own bin.
More lines of Prediction can be created by specifying a prediction \code{data.frame}.
}
\examples{
df = data.frame(ggplot2::diamonds)
pmut.edap.cont(df, "carat", "price")
pmut.edap.cont(df, "carat", "price", meta=c(12,2,0,1), qbin=TRUE)
pmut.edap.cont(df, "carat", "price", pred.df=data.frame(GLM=df$carat*7000-1000))
}
|
/man/pmut.edap.cont.Rd
|
no_license
|
chengjunhou/pmut
|
R
| false
| true
| 1,880
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pmut.edap.cont.R
\name{pmut.edap.cont}
\alias{pmut.edap.cont}
\title{Create Exploratory Visualization for Continuous Feature}
\usage{
pmut.edap.cont(datatable, varstring, targetstring, meta = c(50, 4, 0.01,
0.99), qbin = FALSE, pred.df = NULL)
}
\arguments{
\item{datatable}{Object of class \code{data.frame} or \code{data.table}}
\item{varstring}{Single character string indicating the column name inside \code{datatable} for the discrete feature}
\item{targetstring}{Single character string indicating the column name inside \code{datatable} for the response}
\item{meta}{Numeric vector with length of 4 (default is c(50,4,0.01,0.99)): 1st indicates number of bins,
2nd indicates bin rounding digits, 3rd and 4th indicate the outlier percentile}
\item{qbin}{Logical (default is FALSE), FALSE indicates equal length bins, TRUE indicates equal weight bins (quantile view)}
\item{pred.df}{Object of class \code{data.frame} (optional), with column being prediction from each model}
}
\value{
A view of line plot stacked above the histogram
}
\description{
This function creates visualization with a line plot of a specified continuous feature against the response,
plus a distribution histogram for that feature.
In the line plot, the continuous feature will be cut into bins and then placed on the x-axis. The response will be the y-axis,
which will serve as Actual. Binning characteristics will be controlled by \code{meta} and \code{qbin}. NA will be formed as its own bin.
More lines of Prediction can be created by specifying a prediction \code{data.frame}.
}
\examples{
df = data.frame(ggplot2::diamonds)
pmut.edap.cont(df, "carat", "price")
pmut.edap.cont(df, "carat", "price", meta=c(12,2,0,1), qbin=TRUE)
pmut.edap.cont(df, "carat", "price", pred.df=data.frame(GLM=df$carat*7000-1000))
}
|
plotData <- read.csv("./data/DataHousehold.csv")
temp<-transform(plotData, X=strptime(paste(as.character(plotData$Date),as.character(plotData$Time)), "%d/%m/%Y %H:%M:%S"))
png(file="plot2.png")
plot(temp$X, temp$Global_active_power, type="l", xlab=" ", ylab="Global Active Power (kilowatts)")
dev.off()
|
/figure/plot2.R
|
no_license
|
ebnetera/ExData_Plotting1
|
R
| false
| false
| 305
|
r
|
plotData <- read.csv("./data/DataHousehold.csv")
temp<-transform(plotData, X=strptime(paste(as.character(plotData$Date),as.character(plotData$Time)), "%d/%m/%Y %H:%M:%S"))
png(file="plot2.png")
plot(temp$X, temp$Global_active_power, type="l", xlab=" ", ylab="Global Active Power (kilowatts)")
dev.off()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
test_that("Table cast (ARROW-3741)", {
tab <- Table$create(x = 1:10, y = 1:10)
expect_error(tab$cast(schema(x = int32())))
expect_error(tab$cast(schema(x = int32(), z = int32())))
s2 <- schema(x = int16(), y = int64())
tab2 <- tab$cast(s2)
expect_equal(tab2$schema, s2)
expect_equal(tab2$column(0L)$type, int16())
expect_equal(tab2$column(1L)$type, int64())
})
test_that("Table S3 methods", {
tab <- Table$create(example_data)
for (f in c("dim", "nrow", "ncol", "dimnames", "colnames", "row.names", "as.list")) {
fun <- get(f)
expect_identical(fun(tab), fun(example_data), info = f)
}
})
test_that("Table $column and $field", {
tab <- Table$create(x = 1:10, y = 1:10)
expect_equal(tab$field(0), field("x", int32()))
# input validation
expect_error(tab$column(NA), "'i' cannot be NA")
expect_error(tab$column(-1), "subscript out of bounds")
expect_error(tab$column(1000), "subscript out of bounds")
expect_error(tab$column(1:2))
expect_error(tab$column("one"))
expect_error(tab$field(NA), "'i' cannot be NA")
expect_error(tab$field(-1), "subscript out of bounds")
expect_error(tab$field(1000), "subscript out of bounds")
expect_error(tab$field(1:2))
expect_error(tab$field("one"))
})
# Common fixtures used in some of the following tests
tbl <- tibble::tibble(
int = 1:10,
dbl = as.numeric(1:10),
lgl = sample(c(TRUE, FALSE, NA), 10, replace = TRUE),
chr = letters[1:10],
fct = factor(letters[1:10])
)
tab <- Table$create(tbl)
test_that("[, [[, $ for Table", {
expect_identical(names(tab), names(tbl))
expect_data_frame(tab[6:7, ], tbl[6:7, ])
expect_data_frame(tab[6:7, 2:4], tbl[6:7, 2:4])
expect_data_frame(tab[, c("dbl", "fct")], tbl[, c(2, 5)])
expect_as_vector(tab[, "chr", drop = TRUE], tbl$chr)
# Take within a single chunk
expect_data_frame(tab[c(7, 3, 5), 2:4], tbl[c(7, 3, 5), 2:4])
expect_data_frame(tab[rep(c(FALSE, TRUE), 5), ], tbl[c(2, 4, 6, 8, 10), ])
# bool ChunkedArray (with one chunk)
expect_data_frame(tab[tab$lgl, ], tbl[tbl$lgl, ])
# ChunkedArray with multiple chunks
c1 <- c(TRUE, FALSE, TRUE, TRUE, FALSE)
c2 <- c(FALSE, FALSE, TRUE, TRUE, FALSE)
ca <- ChunkedArray$create(c1, c2)
expect_data_frame(tab[ca, ], tbl[c(1, 3, 4, 8, 9), ])
# int Array
expect_data_frame(tab[Array$create(5:6), 2:4], tbl[6:7, 2:4])
# ChunkedArray
expect_data_frame(tab[ChunkedArray$create(5L, 6L), 2:4], tbl[6:7, 2:4])
# Expression
expect_data_frame(tab[tab$int > 6, ], tbl[tbl$int > 6, ])
expect_as_vector(tab[["int"]], tbl$int)
expect_as_vector(tab$int, tbl$int)
expect_as_vector(tab[[4]], tbl$chr)
expect_null(tab$qwerty)
expect_null(tab[["asdf"]])
# List-like column slicing
expect_data_frame(tab[2:4], tbl[2:4])
expect_data_frame(tab[c(2, 1)], tbl[c(2, 1)])
expect_data_frame(tab[-3], tbl[-3])
expect_error(tab[[c(4, 3)]])
expect_error(tab[[NA]], "'i' must be character or numeric, not logical")
expect_error(tab[[NULL]], "'i' must be character or numeric, not NULL")
expect_error(tab[[c("asdf", "jkl;")]], "length(name) not equal to 1", fixed = TRUE)
expect_error(tab[-3:3], "Invalid column index")
expect_error(tab[1000], "Invalid column index")
expect_error(tab[1:1000], "Invalid column index")
# input validation
expect_error(tab[, c("dbl", "NOTACOLUMN")], 'Column not found: "NOTACOLUMN"')
expect_error(tab[, c(6, NA)], "Column indices cannot be NA")
skip("Table with 0 cols doesn't know how many rows it should have")
expect_data_frame(tab[0], tbl[0])
})
test_that("[[<- assignment", {
# can remove a column
tab[["chr"]] <- NULL
expect_data_frame(tab, tbl[-4])
# can remove a column by index
tab[[4]] <- NULL
expect_data_frame(tab, tbl[1:3])
# can add a named column
tab[["new"]] <- letters[10:1]
expect_data_frame(tab, dplyr::bind_cols(tbl[1:3], new = letters[10:1]))
# can replace a column by index
tab[[2]] <- as.numeric(10:1)
expect_as_vector(tab[[2]], as.numeric(10:1))
# can add a column by index
tab[[5]] <- as.numeric(10:1)
expect_as_vector(tab[[5]], as.numeric(10:1))
expect_as_vector(tab[["5"]], as.numeric(10:1))
# can replace a column
tab[["int"]] <- 10:1
expect_as_vector(tab[["int"]], 10:1)
# can use $
tab$new <- NULL
expect_null(as.vector(tab$new))
expect_identical(dim(tab), c(10L, 4L))
tab$int <- 1:10
expect_as_vector(tab$int, 1:10)
# recycling
tab[["atom"]] <- 1L
expect_as_vector(tab[["atom"]], rep(1L, 10))
expect_error(
tab[["atom"]] <- 1:6,
"Can't recycle input of size 6 to size 10."
)
# assign Arrow array and chunked_array
array <- Array$create(c(10:1))
tab$array <- array
expect_as_vector(tab$array, 10:1)
tab$chunked <- chunked_array(1:10)
expect_as_vector(tab$chunked, 1:10)
# nonsense indexes
expect_error(tab[[NA]] <- letters[10:1], "'i' must be character or numeric, not logical")
expect_error(tab[[NULL]] <- letters[10:1], "'i' must be character or numeric, not NULL")
expect_error(tab[[NA_integer_]] <- letters[10:1], "!is.na(i) is not TRUE", fixed = TRUE)
expect_error(tab[[NA_real_]] <- letters[10:1], "!is.na(i) is not TRUE", fixed = TRUE)
expect_error(tab[[NA_character_]] <- letters[10:1], "!is.na(i) is not TRUE", fixed = TRUE)
expect_error(tab[[c(1, 4)]] <- letters[10:1], "length(i) not equal to 1", fixed = TRUE)
})
test_that("Table$Slice", {
tab2 <- tab$Slice(5)
expect_data_frame(tab2, tbl[6:10, ])
tab3 <- tab$Slice(5, 2)
expect_data_frame(tab3, tbl[6:7, ])
# Input validation
expect_error(tab$Slice("ten"))
expect_error(tab$Slice(NA_integer_), "Slice 'offset' cannot be NA")
expect_error(tab$Slice(NA), "Slice 'offset' cannot be NA")
expect_error(tab$Slice(10, "ten"))
expect_error(tab$Slice(10, NA_integer_), "Slice 'length' cannot be NA")
expect_error(tab$Slice(NA_integer_, NA_integer_), "Slice 'offset' cannot be NA")
expect_error(tab$Slice(c(10, 10)))
expect_error(tab$Slice(10, c(10, 10)))
expect_error(tab$Slice(1000), "Slice 'offset' greater than array length")
expect_error(tab$Slice(-1), "Slice 'offset' cannot be negative")
expect_error(tab3$Slice(10, 10), "Slice 'offset' greater than array length")
expect_error(tab$Slice(10, -1), "Slice 'length' cannot be negative")
expect_error(tab$Slice(-1, 10), "Slice 'offset' cannot be negative")
})
test_that("head and tail on Table", {
expect_data_frame(head(tab), head(tbl))
expect_data_frame(head(tab, 4), head(tbl, 4))
expect_data_frame(head(tab, 40), head(tbl, 40))
expect_data_frame(head(tab, -4), head(tbl, -4))
expect_data_frame(head(tab, -40), head(tbl, -40))
expect_data_frame(tail(tab), tail(tbl))
expect_data_frame(tail(tab, 4), tail(tbl, 4))
expect_data_frame(tail(tab, 40), tail(tbl, 40))
expect_data_frame(tail(tab, -4), tail(tbl, -4))
expect_data_frame(tail(tab, -40), tail(tbl, -40))
})
test_that("Table print method", {
expect_output(
print(tab),
paste(
"Table",
"10 rows x 5 columns",
"$int <int32>",
"$dbl <double>",
"$lgl <bool>",
"$chr <string>",
"$fct <dictionary<values=string, indices=int8>>",
sep = "\n"
),
fixed = TRUE
)
})
test_that("table active bindings", {
expect_identical(dim(tbl), dim(tab))
expect_type(tab$columns, "list")
expect_equal(tab$columns[[1]], tab[[1]])
})
test_that("table() handles record batches with splicing", {
batch <- record_batch(x = 1:2, y = letters[1:2])
tab <- Table$create(batch, batch, batch)
expect_equal(tab$schema, batch$schema)
expect_equal(tab$num_rows, 6L)
expect_equal(
as.data.frame(tab),
vctrs::vec_rbind(as.data.frame(batch), as.data.frame(batch), as.data.frame(batch))
)
batches <- list(batch, batch, batch)
tab <- Table$create(!!!batches)
expect_equal(tab$schema, batch$schema)
expect_equal(tab$num_rows, 6L)
expect_equal(
as.data.frame(tab),
vctrs::vec_rbind(!!!purrr::map(batches, as.data.frame))
)
})
test_that("table() handles ... of arrays, chunked arrays, vectors", {
a <- Array$create(1:10)
ca <- chunked_array(1:5, 6:10)
v <- rnorm(10)
tbl <- tibble::tibble(x = 1:10, y = letters[1:10])
tab <- Table$create(a = a, b = ca, c = v, !!!tbl)
expect_equal(
tab$schema,
schema(a = int32(), b = int32(), c = float64(), x = int32(), y = utf8())
)
res <- as.data.frame(tab)
expect_equal(names(res), c("a", "b", "c", "x", "y"))
expect_equal(
res,
tibble::tibble(a = 1:10, b = 1:10, c = v, x = 1:10, y = letters[1:10])
)
})
test_that("table() auto splices (ARROW-5718)", {
df <- tibble::tibble(x = 1:10, y = letters[1:10])
tab1 <- Table$create(df)
tab2 <- Table$create(!!!df)
expect_equal(tab1, tab2)
expect_equal(tab1$schema, schema(x = int32(), y = utf8()))
expect_equal(as.data.frame(tab1), df)
s <- schema(x = float64(), y = utf8())
tab3 <- Table$create(df, schema = s)
tab4 <- Table$create(!!!df, schema = s)
expect_equal(tab3, tab4)
expect_equal(tab3$schema, s)
expect_equal(as.data.frame(tab3), df)
})
test_that("Validation when creating table with schema (ARROW-10953)", {
expect_error(
Table$create(data.frame(), schema = schema(a = int32())),
"incompatible. schema has 1 fields, and 0 columns are supplied",
fixed = TRUE
)
expect_error(
Table$create(data.frame(b = 1), schema = schema(a = int32())),
"field at index 1 has name 'a' != 'b'",
fixed = TRUE
)
expect_error(
Table$create(data.frame(b = 2, c = 3), schema = schema(a = int32())),
"incompatible. schema has 1 fields, and 2 columns are supplied",
fixed = TRUE
)
})
test_that("==.Table", {
tab1 <- Table$create(x = 1:2, y = c("a", "b"))
tab2 <- Table$create(x = 1:2, y = c("a", "b"))
tab3 <- Table$create(x = 1:2)
tab4 <- Table$create(x = 1:2, y = c("a", "b"), z = 3:4)
expect_true(tab1 == tab2)
expect_true(tab2 == tab1)
expect_false(tab1 == tab3)
expect_false(tab3 == tab1)
expect_false(tab1 == tab4)
expect_false(tab4 == tab1)
expect_true(all.equal(tab1, tab2))
expect_equal(tab1, tab2)
})
test_that("Table$Equals(check_metadata)", {
tab1 <- Table$create(x = 1:2, y = c("a", "b"))
tab2 <- Table$create(
x = 1:2, y = c("a", "b"),
schema = tab1$schema$WithMetadata(list(some = "metadata"))
)
expect_r6_class(tab1, "Table")
expect_r6_class(tab2, "Table")
expect_false(tab1$schema$HasMetadata)
expect_true(tab2$schema$HasMetadata)
expect_identical(tab2$schema$metadata, list(some = "metadata"))
expect_true(tab1 == tab2)
expect_true(tab1$Equals(tab2))
expect_false(tab1$Equals(tab2, check_metadata = TRUE))
expect_failure(expect_equal(tab1, tab2)) # expect_equal has check_metadata=TRUE
expect_equal(tab1, tab2, ignore_attr = TRUE) # this sets check_metadata=FALSE
expect_false(tab1$Equals(24)) # Not a Table
})
test_that("Table handles null type (ARROW-7064)", {
tab <- Table$create(a = 1:10, n = vctrs::unspecified(10))
expect_equal(tab$schema, schema(a = int32(), n = null()), ignore_attr = TRUE)
})
test_that("Can create table with specific dictionary types", {
fact <- example_data[, "fct"]
int_types <- c(int8(), int16(), int32(), int64())
# TODO: test uint types when format allows
# uint_types <- c(uint8(), uint16(), uint32(), uint64()) # nolint
for (i in int_types) {
sch <- schema(fct = dictionary(i, utf8()))
tab <- Table$create(fact, schema = sch)
expect_equal(sch, tab$schema)
if (i != int64()) {
# TODO: same downcast to int32 as we do for int64() type elsewhere
expect_identical(as.data.frame(tab), fact)
}
}
})
test_that("Table unifies dictionary on conversion back to R (ARROW-8374)", {
b1 <- record_batch(f = factor(c("a"), levels = c("a", "b")))
b2 <- record_batch(f = factor(c("c"), levels = c("c", "d")))
b3 <- record_batch(f = factor(NA, levels = "a"))
b4 <- record_batch(f = factor())
res <- tibble::tibble(f = factor(c("a", "c", NA), levels = c("a", "b", "c", "d")))
tab <- Table$create(b1, b2, b3, b4)
expect_identical(as.data.frame(tab), res)
})
test_that("Table$SelectColumns()", {
tab <- Table$create(x = 1:10, y = 1:10)
expect_equal(tab$SelectColumns(0L), Table$create(x = 1:10))
expect_error(tab$SelectColumns(2:4))
expect_error(tab$SelectColumns(""))
})
test_that("Table name assignment", {
tab <- Table$create(x = 1:10, y = 1:10)
expect_identical(names(tab), c("x", "y"))
names(tab) <- c("a", "b")
expect_identical(names(tab), c("a", "b"))
expect_error(names(tab) <- "f")
expect_error(names(tab) <- letters)
expect_error(names(tab) <- character(0))
expect_error(names(tab) <- NULL)
expect_error(names(tab) <- c(TRUE, FALSE))
})
test_that("Table$create() with different length columns", {
msg <- "All columns must have the same length"
expect_error(Table$create(a = 1:5, b = 1:6), msg)
})
test_that("Table$create() scalar recycling with vectors", {
expect_data_frame(
Table$create(a = 1:10, b = 5),
tibble::tibble(a = 1:10, b = 5)
)
})
test_that("Table$create() scalar recycling with Scalars, Arrays, and ChunkedArrays", {
expect_data_frame(
Table$create(a = Array$create(1:10), b = Scalar$create(5)),
tibble::tibble(a = 1:10, b = 5)
)
expect_data_frame(
Table$create(a = Array$create(1:10), b = Array$create(5)),
tibble::tibble(a = 1:10, b = 5)
)
expect_data_frame(
Table$create(a = Array$create(1:10), b = ChunkedArray$create(5)),
tibble::tibble(a = 1:10, b = 5)
)
})
test_that("Table$create() no recycling with tibbles", {
expect_error(
Table$create(
tibble::tibble(a = 1:10, b = 5),
tibble::tibble(a = 1, b = 5)
),
regexp = "All input tibbles or data.frames must have the same number of rows"
)
expect_error(
Table$create(
tibble::tibble(a = 1:10, b = 5),
tibble::tibble(a = 1)
),
regexp = "All input tibbles or data.frames must have the same number of rows"
)
})
test_that("Tables can be combined with concat_tables()", {
expect_error(
concat_tables(arrow_table(a = 1:10), arrow_table(a = c("a", "b")), unify_schemas = FALSE),
regexp = "Schema at index 2 does not match the first schema"
)
expect_error(
concat_tables(arrow_table(a = 1:10), arrow_table(a = c("a", "b")), unify_schemas = TRUE),
regexp = "Unable to merge: Field a has incompatible types: int32 vs string"
)
expect_error(
concat_tables(),
regexp = "Must pass at least one Table"
)
expect_equal(
concat_tables(
arrow_table(a = 1:5),
arrow_table(a = 6:7, b = c("d", "e"))
),
arrow_table(a = 1:7, b = c(rep(NA, 5), "d", "e"))
)
# concat_tables() with one argument returns identical table
expected <- arrow_table(a = 1:10)
expect_equal(expected, concat_tables(expected))
})
test_that("Table supports rbind", {
expect_error(
rbind(arrow_table(a = 1:10), arrow_table(a = c("a", "b"))),
regexp = "Schema at index 2 does not match the first schema"
)
tables <- list(
arrow_table(a = 1:10, b = Scalar$create("x")),
arrow_table(a = 2:42, b = Scalar$create("y")),
arrow_table(a = 8:10, b = Scalar$create("z"))
)
expected <- Table$create(do.call(rbind, lapply(tables, as.data.frame)))
actual <- do.call(rbind, tables)
expect_equal(actual, expected, ignore_attr = TRUE)
# rbind with empty table produces identical table
expected <- arrow_table(a = 1:10, b = Scalar$create("x"))
expect_equal(
rbind(expected, arrow_table(a = integer(0), b = character(0))),
expected
)
# rbind() with one argument returns identical table
expect_equal(rbind(expected), expected)
})
test_that("Table supports cbind", {
expect_snapshot_error(
cbind(
arrow_table(a = 1:10),
arrow_table(a = c("a", "b"))
)
)
expect_error(
cbind(arrow_table(a = 1:10), arrow_table(b = character(0))),
regexp = "Non-scalar inputs must have an equal number of rows"
)
actual <- cbind(
arrow_table(a = 1:10, b = Scalar$create("x")),
arrow_table(a = 11:20, b = Scalar$create("y")),
arrow_table(c = 1:10)
)
expected <- arrow_table(cbind(
tibble::tibble(a = 1:10, b = "x"),
tibble::tibble(a = 11:20, b = "y"),
tibble::tibble(c = 1:10)
))
expect_equal(actual, expected, ignore_attr = TRUE)
# cbind() with one argument returns identical table
expected <- arrow_table(a = 1:10)
expect_equal(expected, cbind(expected))
# Handles Arrow arrays and chunked arrays
expect_equal(
cbind(arrow_table(a = 1:2), b = Array$create(4:5)),
arrow_table(a = 1:2, b = 4:5)
)
expect_equal(
cbind(arrow_table(a = 1:2), b = chunked_array(4, 5)),
arrow_table(a = 1:2, b = chunked_array(4, 5))
)
# Handles data.frame
if (getRversion() >= "4.0.0") {
# Prior to R 4.0, cbind would short-circuit to the data.frame implementation
# if **any** of the arguments are a data.frame.
expect_equal(
cbind(arrow_table(a = 1:2), data.frame(b = 4:5)),
arrow_table(a = 1:2, b = 4:5)
)
}
# Handles factors
expect_equal(
cbind(arrow_table(a = 1:2), b = factor(c("a", "b"))),
arrow_table(a = 1:2, b = factor(c("a", "b")))
)
# Handles scalar values
expect_equal(
cbind(arrow_table(a = 1:2), b = "x"),
arrow_table(a = 1:2, b = c("x", "x"))
)
# Handles zero rows
expect_equal(
cbind(arrow_table(a = character(0)), b = Array$create(numeric(0)), c = integer(0)),
arrow_table(a = character(0), b = numeric(0), c = integer(0)),
)
# Rejects unnamed arrays, even in cases where no named arguments are passed
expect_error(
cbind(arrow_table(a = 1:2), b = 3:4, 5:6),
regexp = "Vector and array arguments must have names"
)
expect_error(
cbind(arrow_table(a = 1:2), 3:4, 5:6),
regexp = "Vector and array arguments must have names"
)
})
test_that("cbind.Table handles record batches and tables", {
# R 3.6 cbind dispatch rules cause cbind to fall back to default impl if
# there are multiple arguments with distinct cbind implementations
skip_if(getRversion() < "4.0.0", "R 3.6 cbind dispatch rules prevent this behavior")
expect_equal(
cbind(arrow_table(a = 1L:2L), record_batch(b = 4:5)),
arrow_table(a = 1L:2L, b = 4:5)
)
})
test_that("ARROW-11769/ARROW-17085 - grouping preserved in table creation", {
skip_if_not_available("dataset")
tbl <- tibble::tibble(
int = 1:10,
fct = factor(rep(c("A", "B"), 5)),
fct2 = factor(rep(c("C", "D"), each = 5)),
)
expect_identical(
tbl %>%
Table$create() %>%
dplyr::group_vars(),
dplyr::group_vars(tbl)
)
expect_identical(
tbl %>%
dplyr::group_by(fct, fct2) %>%
Table$create() %>%
dplyr::group_vars(),
c("fct", "fct2")
)
})
test_that("ARROW-12729 - length returns number of columns in Table", {
tbl <- tibble::tibble(
int = 1:10,
fct = factor(rep(c("A", "B"), 5)),
fct2 = factor(rep(c("C", "D"), each = 5)),
)
tab <- Table$create(!!!tbl)
expect_identical(length(tab), 3L)
})
test_that("as_arrow_table() works for Table", {
table <- arrow_table(col1 = 1L, col2 = "two")
expect_identical(as_arrow_table(table), table)
expect_equal(
as_arrow_table(table, schema = schema(col1 = float64(), col2 = string())),
arrow_table(col1 = Array$create(1, type = float64()), col2 = "two")
)
})
test_that("as_arrow_table() works for RecordBatch", {
table <- arrow_table(col1 = 1L, col2 = "two")
batch <- record_batch(col1 = 1L, col2 = "two")
expect_equal(as_arrow_table(batch), table)
expect_equal(
as_arrow_table(batch, schema = schema(col1 = float64(), col2 = string())),
arrow_table(col1 = Array$create(1, type = float64()), col2 = "two")
)
})
test_that("as_arrow_table() works for data.frame()", {
table <- arrow_table(col1 = 1L, col2 = "two")
tbl <- tibble::tibble(col1 = 1L, col2 = "two")
expect_equal(as_arrow_table(tbl), table)
expect_equal(
as_arrow_table(
tbl,
schema = schema(col1 = float64(), col2 = string())
),
arrow_table(col1 = Array$create(1, type = float64()), col2 = "two")
)
})
test_that("as_arrow_table() errors for invalid input", {
expect_error(
as_arrow_table("no as_arrow_table() method"),
class = "arrow_no_method_as_arrow_table"
)
})
test_that("num_rows method not susceptible to integer overflow", {
skip_if_not_running_large_memory_tests()
small_array <- Array$create(raw(1))
big_array <- Array$create(raw(.Machine$integer.max))
big_chunked_array <- chunked_array(big_array, small_array)
# LargeString array with data buffer > MAX_INT32
big_string_array <- Array$create(make_big_string())
small_table <- Table$create(col = small_array)
big_table <- Table$create(col = big_chunked_array)
expect_type(big_array$nbytes(), "integer")
expect_type(big_chunked_array$nbytes(), "double")
expect_type(length(big_array), "integer")
expect_type(length(big_chunked_array), "double")
expect_type(small_table$num_rows, "integer")
expect_type(big_table$num_rows, "double")
expect_identical(big_string_array$data()$buffers[[3]]$size, 2148007936)
})
test_that("can create empty table from schema", {
schema <- schema(
col1 = float64(),
col2 = string(),
col3 = vctrs_extension_type(integer())
)
out <- Table$create(schema = schema)
expect_r6_class(out, "Table")
expect_equal(nrow(out), 0)
expect_equal(out$schema, schema)
})
|
/r/tests/testthat/test-Table.R
|
permissive
|
pitrou/arrow
|
R
| false
| false
| 22,134
|
r
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
test_that("Table cast (ARROW-3741)", {
tab <- Table$create(x = 1:10, y = 1:10)
expect_error(tab$cast(schema(x = int32())))
expect_error(tab$cast(schema(x = int32(), z = int32())))
s2 <- schema(x = int16(), y = int64())
tab2 <- tab$cast(s2)
expect_equal(tab2$schema, s2)
expect_equal(tab2$column(0L)$type, int16())
expect_equal(tab2$column(1L)$type, int64())
})
test_that("Table S3 methods", {
tab <- Table$create(example_data)
for (f in c("dim", "nrow", "ncol", "dimnames", "colnames", "row.names", "as.list")) {
fun <- get(f)
expect_identical(fun(tab), fun(example_data), info = f)
}
})
test_that("Table $column and $field", {
tab <- Table$create(x = 1:10, y = 1:10)
expect_equal(tab$field(0), field("x", int32()))
# input validation
expect_error(tab$column(NA), "'i' cannot be NA")
expect_error(tab$column(-1), "subscript out of bounds")
expect_error(tab$column(1000), "subscript out of bounds")
expect_error(tab$column(1:2))
expect_error(tab$column("one"))
expect_error(tab$field(NA), "'i' cannot be NA")
expect_error(tab$field(-1), "subscript out of bounds")
expect_error(tab$field(1000), "subscript out of bounds")
expect_error(tab$field(1:2))
expect_error(tab$field("one"))
})
# Common fixtures used in some of the following tests
tbl <- tibble::tibble(
int = 1:10,
dbl = as.numeric(1:10),
lgl = sample(c(TRUE, FALSE, NA), 10, replace = TRUE),
chr = letters[1:10],
fct = factor(letters[1:10])
)
tab <- Table$create(tbl)
test_that("[, [[, $ for Table", {
expect_identical(names(tab), names(tbl))
expect_data_frame(tab[6:7, ], tbl[6:7, ])
expect_data_frame(tab[6:7, 2:4], tbl[6:7, 2:4])
expect_data_frame(tab[, c("dbl", "fct")], tbl[, c(2, 5)])
expect_as_vector(tab[, "chr", drop = TRUE], tbl$chr)
# Take within a single chunk
expect_data_frame(tab[c(7, 3, 5), 2:4], tbl[c(7, 3, 5), 2:4])
expect_data_frame(tab[rep(c(FALSE, TRUE), 5), ], tbl[c(2, 4, 6, 8, 10), ])
# bool ChunkedArray (with one chunk)
expect_data_frame(tab[tab$lgl, ], tbl[tbl$lgl, ])
# ChunkedArray with multiple chunks
c1 <- c(TRUE, FALSE, TRUE, TRUE, FALSE)
c2 <- c(FALSE, FALSE, TRUE, TRUE, FALSE)
ca <- ChunkedArray$create(c1, c2)
expect_data_frame(tab[ca, ], tbl[c(1, 3, 4, 8, 9), ])
# int Array
expect_data_frame(tab[Array$create(5:6), 2:4], tbl[6:7, 2:4])
# ChunkedArray
expect_data_frame(tab[ChunkedArray$create(5L, 6L), 2:4], tbl[6:7, 2:4])
# Expression
expect_data_frame(tab[tab$int > 6, ], tbl[tbl$int > 6, ])
expect_as_vector(tab[["int"]], tbl$int)
expect_as_vector(tab$int, tbl$int)
expect_as_vector(tab[[4]], tbl$chr)
expect_null(tab$qwerty)
expect_null(tab[["asdf"]])
# List-like column slicing
expect_data_frame(tab[2:4], tbl[2:4])
expect_data_frame(tab[c(2, 1)], tbl[c(2, 1)])
expect_data_frame(tab[-3], tbl[-3])
expect_error(tab[[c(4, 3)]])
expect_error(tab[[NA]], "'i' must be character or numeric, not logical")
expect_error(tab[[NULL]], "'i' must be character or numeric, not NULL")
expect_error(tab[[c("asdf", "jkl;")]], "length(name) not equal to 1", fixed = TRUE)
expect_error(tab[-3:3], "Invalid column index")
expect_error(tab[1000], "Invalid column index")
expect_error(tab[1:1000], "Invalid column index")
# input validation
expect_error(tab[, c("dbl", "NOTACOLUMN")], 'Column not found: "NOTACOLUMN"')
expect_error(tab[, c(6, NA)], "Column indices cannot be NA")
skip("Table with 0 cols doesn't know how many rows it should have")
expect_data_frame(tab[0], tbl[0])
})
test_that("[[<- assignment", {
# can remove a column
tab[["chr"]] <- NULL
expect_data_frame(tab, tbl[-4])
# can remove a column by index
tab[[4]] <- NULL
expect_data_frame(tab, tbl[1:3])
# can add a named column
tab[["new"]] <- letters[10:1]
expect_data_frame(tab, dplyr::bind_cols(tbl[1:3], new = letters[10:1]))
# can replace a column by index
tab[[2]] <- as.numeric(10:1)
expect_as_vector(tab[[2]], as.numeric(10:1))
# can add a column by index
tab[[5]] <- as.numeric(10:1)
expect_as_vector(tab[[5]], as.numeric(10:1))
expect_as_vector(tab[["5"]], as.numeric(10:1))
# can replace a column
tab[["int"]] <- 10:1
expect_as_vector(tab[["int"]], 10:1)
# can use $
tab$new <- NULL
expect_null(as.vector(tab$new))
expect_identical(dim(tab), c(10L, 4L))
tab$int <- 1:10
expect_as_vector(tab$int, 1:10)
# recycling
tab[["atom"]] <- 1L
expect_as_vector(tab[["atom"]], rep(1L, 10))
expect_error(
tab[["atom"]] <- 1:6,
"Can't recycle input of size 6 to size 10."
)
# assign Arrow array and chunked_array
array <- Array$create(c(10:1))
tab$array <- array
expect_as_vector(tab$array, 10:1)
tab$chunked <- chunked_array(1:10)
expect_as_vector(tab$chunked, 1:10)
# nonsense indexes
expect_error(tab[[NA]] <- letters[10:1], "'i' must be character or numeric, not logical")
expect_error(tab[[NULL]] <- letters[10:1], "'i' must be character or numeric, not NULL")
expect_error(tab[[NA_integer_]] <- letters[10:1], "!is.na(i) is not TRUE", fixed = TRUE)
expect_error(tab[[NA_real_]] <- letters[10:1], "!is.na(i) is not TRUE", fixed = TRUE)
expect_error(tab[[NA_character_]] <- letters[10:1], "!is.na(i) is not TRUE", fixed = TRUE)
expect_error(tab[[c(1, 4)]] <- letters[10:1], "length(i) not equal to 1", fixed = TRUE)
})
test_that("Table$Slice", {
tab2 <- tab$Slice(5)
expect_data_frame(tab2, tbl[6:10, ])
tab3 <- tab$Slice(5, 2)
expect_data_frame(tab3, tbl[6:7, ])
# Input validation
expect_error(tab$Slice("ten"))
expect_error(tab$Slice(NA_integer_), "Slice 'offset' cannot be NA")
expect_error(tab$Slice(NA), "Slice 'offset' cannot be NA")
expect_error(tab$Slice(10, "ten"))
expect_error(tab$Slice(10, NA_integer_), "Slice 'length' cannot be NA")
expect_error(tab$Slice(NA_integer_, NA_integer_), "Slice 'offset' cannot be NA")
expect_error(tab$Slice(c(10, 10)))
expect_error(tab$Slice(10, c(10, 10)))
expect_error(tab$Slice(1000), "Slice 'offset' greater than array length")
expect_error(tab$Slice(-1), "Slice 'offset' cannot be negative")
expect_error(tab3$Slice(10, 10), "Slice 'offset' greater than array length")
expect_error(tab$Slice(10, -1), "Slice 'length' cannot be negative")
expect_error(tab$Slice(-1, 10), "Slice 'offset' cannot be negative")
})
test_that("head and tail on Table", {
expect_data_frame(head(tab), head(tbl))
expect_data_frame(head(tab, 4), head(tbl, 4))
expect_data_frame(head(tab, 40), head(tbl, 40))
expect_data_frame(head(tab, -4), head(tbl, -4))
expect_data_frame(head(tab, -40), head(tbl, -40))
expect_data_frame(tail(tab), tail(tbl))
expect_data_frame(tail(tab, 4), tail(tbl, 4))
expect_data_frame(tail(tab, 40), tail(tbl, 40))
expect_data_frame(tail(tab, -4), tail(tbl, -4))
expect_data_frame(tail(tab, -40), tail(tbl, -40))
})
test_that("Table print method", {
expect_output(
print(tab),
paste(
"Table",
"10 rows x 5 columns",
"$int <int32>",
"$dbl <double>",
"$lgl <bool>",
"$chr <string>",
"$fct <dictionary<values=string, indices=int8>>",
sep = "\n"
),
fixed = TRUE
)
})
test_that("table active bindings", {
expect_identical(dim(tbl), dim(tab))
expect_type(tab$columns, "list")
expect_equal(tab$columns[[1]], tab[[1]])
})
test_that("table() handles record batches with splicing", {
batch <- record_batch(x = 1:2, y = letters[1:2])
tab <- Table$create(batch, batch, batch)
expect_equal(tab$schema, batch$schema)
expect_equal(tab$num_rows, 6L)
expect_equal(
as.data.frame(tab),
vctrs::vec_rbind(as.data.frame(batch), as.data.frame(batch), as.data.frame(batch))
)
batches <- list(batch, batch, batch)
tab <- Table$create(!!!batches)
expect_equal(tab$schema, batch$schema)
expect_equal(tab$num_rows, 6L)
expect_equal(
as.data.frame(tab),
vctrs::vec_rbind(!!!purrr::map(batches, as.data.frame))
)
})
test_that("table() handles ... of arrays, chunked arrays, vectors", {
a <- Array$create(1:10)
ca <- chunked_array(1:5, 6:10)
v <- rnorm(10)
tbl <- tibble::tibble(x = 1:10, y = letters[1:10])
tab <- Table$create(a = a, b = ca, c = v, !!!tbl)
expect_equal(
tab$schema,
schema(a = int32(), b = int32(), c = float64(), x = int32(), y = utf8())
)
res <- as.data.frame(tab)
expect_equal(names(res), c("a", "b", "c", "x", "y"))
expect_equal(
res,
tibble::tibble(a = 1:10, b = 1:10, c = v, x = 1:10, y = letters[1:10])
)
})
test_that("table() auto splices (ARROW-5718)", {
df <- tibble::tibble(x = 1:10, y = letters[1:10])
tab1 <- Table$create(df)
tab2 <- Table$create(!!!df)
expect_equal(tab1, tab2)
expect_equal(tab1$schema, schema(x = int32(), y = utf8()))
expect_equal(as.data.frame(tab1), df)
s <- schema(x = float64(), y = utf8())
tab3 <- Table$create(df, schema = s)
tab4 <- Table$create(!!!df, schema = s)
expect_equal(tab3, tab4)
expect_equal(tab3$schema, s)
expect_equal(as.data.frame(tab3), df)
})
test_that("Validation when creating table with schema (ARROW-10953)", {
expect_error(
Table$create(data.frame(), schema = schema(a = int32())),
"incompatible. schema has 1 fields, and 0 columns are supplied",
fixed = TRUE
)
expect_error(
Table$create(data.frame(b = 1), schema = schema(a = int32())),
"field at index 1 has name 'a' != 'b'",
fixed = TRUE
)
expect_error(
Table$create(data.frame(b = 2, c = 3), schema = schema(a = int32())),
"incompatible. schema has 1 fields, and 2 columns are supplied",
fixed = TRUE
)
})
test_that("==.Table", {
tab1 <- Table$create(x = 1:2, y = c("a", "b"))
tab2 <- Table$create(x = 1:2, y = c("a", "b"))
tab3 <- Table$create(x = 1:2)
tab4 <- Table$create(x = 1:2, y = c("a", "b"), z = 3:4)
expect_true(tab1 == tab2)
expect_true(tab2 == tab1)
expect_false(tab1 == tab3)
expect_false(tab3 == tab1)
expect_false(tab1 == tab4)
expect_false(tab4 == tab1)
expect_true(all.equal(tab1, tab2))
expect_equal(tab1, tab2)
})
test_that("Table$Equals(check_metadata)", {
tab1 <- Table$create(x = 1:2, y = c("a", "b"))
tab2 <- Table$create(
x = 1:2, y = c("a", "b"),
schema = tab1$schema$WithMetadata(list(some = "metadata"))
)
expect_r6_class(tab1, "Table")
expect_r6_class(tab2, "Table")
expect_false(tab1$schema$HasMetadata)
expect_true(tab2$schema$HasMetadata)
expect_identical(tab2$schema$metadata, list(some = "metadata"))
expect_true(tab1 == tab2)
expect_true(tab1$Equals(tab2))
expect_false(tab1$Equals(tab2, check_metadata = TRUE))
expect_failure(expect_equal(tab1, tab2)) # expect_equal has check_metadata=TRUE
expect_equal(tab1, tab2, ignore_attr = TRUE) # this sets check_metadata=FALSE
expect_false(tab1$Equals(24)) # Not a Table
})
test_that("Table handles null type (ARROW-7064)", {
tab <- Table$create(a = 1:10, n = vctrs::unspecified(10))
expect_equal(tab$schema, schema(a = int32(), n = null()), ignore_attr = TRUE)
})
test_that("Can create table with specific dictionary types", {
fact <- example_data[, "fct"]
int_types <- c(int8(), int16(), int32(), int64())
# TODO: test uint types when format allows
# uint_types <- c(uint8(), uint16(), uint32(), uint64()) # nolint
for (i in int_types) {
sch <- schema(fct = dictionary(i, utf8()))
tab <- Table$create(fact, schema = sch)
expect_equal(sch, tab$schema)
if (i != int64()) {
# TODO: same downcast to int32 as we do for int64() type elsewhere
expect_identical(as.data.frame(tab), fact)
}
}
})
test_that("Table unifies dictionary on conversion back to R (ARROW-8374)", {
b1 <- record_batch(f = factor(c("a"), levels = c("a", "b")))
b2 <- record_batch(f = factor(c("c"), levels = c("c", "d")))
b3 <- record_batch(f = factor(NA, levels = "a"))
b4 <- record_batch(f = factor())
res <- tibble::tibble(f = factor(c("a", "c", NA), levels = c("a", "b", "c", "d")))
tab <- Table$create(b1, b2, b3, b4)
expect_identical(as.data.frame(tab), res)
})
test_that("Table$SelectColumns()", {
tab <- Table$create(x = 1:10, y = 1:10)
expect_equal(tab$SelectColumns(0L), Table$create(x = 1:10))
expect_error(tab$SelectColumns(2:4))
expect_error(tab$SelectColumns(""))
})
test_that("Table name assignment", {
tab <- Table$create(x = 1:10, y = 1:10)
expect_identical(names(tab), c("x", "y"))
names(tab) <- c("a", "b")
expect_identical(names(tab), c("a", "b"))
expect_error(names(tab) <- "f")
expect_error(names(tab) <- letters)
expect_error(names(tab) <- character(0))
expect_error(names(tab) <- NULL)
expect_error(names(tab) <- c(TRUE, FALSE))
})
test_that("Table$create() with different length columns", {
msg <- "All columns must have the same length"
expect_error(Table$create(a = 1:5, b = 1:6), msg)
})
test_that("Table$create() scalar recycling with vectors", {
expect_data_frame(
Table$create(a = 1:10, b = 5),
tibble::tibble(a = 1:10, b = 5)
)
})
test_that("Table$create() scalar recycling with Scalars, Arrays, and ChunkedArrays", {
expect_data_frame(
Table$create(a = Array$create(1:10), b = Scalar$create(5)),
tibble::tibble(a = 1:10, b = 5)
)
expect_data_frame(
Table$create(a = Array$create(1:10), b = Array$create(5)),
tibble::tibble(a = 1:10, b = 5)
)
expect_data_frame(
Table$create(a = Array$create(1:10), b = ChunkedArray$create(5)),
tibble::tibble(a = 1:10, b = 5)
)
})
test_that("Table$create() no recycling with tibbles", {
expect_error(
Table$create(
tibble::tibble(a = 1:10, b = 5),
tibble::tibble(a = 1, b = 5)
),
regexp = "All input tibbles or data.frames must have the same number of rows"
)
expect_error(
Table$create(
tibble::tibble(a = 1:10, b = 5),
tibble::tibble(a = 1)
),
regexp = "All input tibbles or data.frames must have the same number of rows"
)
})
test_that("Tables can be combined with concat_tables()", {
expect_error(
concat_tables(arrow_table(a = 1:10), arrow_table(a = c("a", "b")), unify_schemas = FALSE),
regexp = "Schema at index 2 does not match the first schema"
)
expect_error(
concat_tables(arrow_table(a = 1:10), arrow_table(a = c("a", "b")), unify_schemas = TRUE),
regexp = "Unable to merge: Field a has incompatible types: int32 vs string"
)
expect_error(
concat_tables(),
regexp = "Must pass at least one Table"
)
expect_equal(
concat_tables(
arrow_table(a = 1:5),
arrow_table(a = 6:7, b = c("d", "e"))
),
arrow_table(a = 1:7, b = c(rep(NA, 5), "d", "e"))
)
# concat_tables() with one argument returns identical table
expected <- arrow_table(a = 1:10)
expect_equal(expected, concat_tables(expected))
})
test_that("Table supports rbind", {
expect_error(
rbind(arrow_table(a = 1:10), arrow_table(a = c("a", "b"))),
regexp = "Schema at index 2 does not match the first schema"
)
tables <- list(
arrow_table(a = 1:10, b = Scalar$create("x")),
arrow_table(a = 2:42, b = Scalar$create("y")),
arrow_table(a = 8:10, b = Scalar$create("z"))
)
expected <- Table$create(do.call(rbind, lapply(tables, as.data.frame)))
actual <- do.call(rbind, tables)
expect_equal(actual, expected, ignore_attr = TRUE)
# rbind with empty table produces identical table
expected <- arrow_table(a = 1:10, b = Scalar$create("x"))
expect_equal(
rbind(expected, arrow_table(a = integer(0), b = character(0))),
expected
)
# rbind() with one argument returns identical table
expect_equal(rbind(expected), expected)
})
test_that("Table supports cbind", {
expect_snapshot_error(
cbind(
arrow_table(a = 1:10),
arrow_table(a = c("a", "b"))
)
)
expect_error(
cbind(arrow_table(a = 1:10), arrow_table(b = character(0))),
regexp = "Non-scalar inputs must have an equal number of rows"
)
actual <- cbind(
arrow_table(a = 1:10, b = Scalar$create("x")),
arrow_table(a = 11:20, b = Scalar$create("y")),
arrow_table(c = 1:10)
)
expected <- arrow_table(cbind(
tibble::tibble(a = 1:10, b = "x"),
tibble::tibble(a = 11:20, b = "y"),
tibble::tibble(c = 1:10)
))
expect_equal(actual, expected, ignore_attr = TRUE)
# cbind() with one argument returns identical table
expected <- arrow_table(a = 1:10)
expect_equal(expected, cbind(expected))
# Handles Arrow arrays and chunked arrays
expect_equal(
cbind(arrow_table(a = 1:2), b = Array$create(4:5)),
arrow_table(a = 1:2, b = 4:5)
)
expect_equal(
cbind(arrow_table(a = 1:2), b = chunked_array(4, 5)),
arrow_table(a = 1:2, b = chunked_array(4, 5))
)
# Handles data.frame
if (getRversion() >= "4.0.0") {
# Prior to R 4.0, cbind would short-circuit to the data.frame implementation
# if **any** of the arguments are a data.frame.
expect_equal(
cbind(arrow_table(a = 1:2), data.frame(b = 4:5)),
arrow_table(a = 1:2, b = 4:5)
)
}
# Handles factors
expect_equal(
cbind(arrow_table(a = 1:2), b = factor(c("a", "b"))),
arrow_table(a = 1:2, b = factor(c("a", "b")))
)
# Handles scalar values
expect_equal(
cbind(arrow_table(a = 1:2), b = "x"),
arrow_table(a = 1:2, b = c("x", "x"))
)
# Handles zero rows
expect_equal(
cbind(arrow_table(a = character(0)), b = Array$create(numeric(0)), c = integer(0)),
arrow_table(a = character(0), b = numeric(0), c = integer(0)),
)
# Rejects unnamed arrays, even in cases where no named arguments are passed
expect_error(
cbind(arrow_table(a = 1:2), b = 3:4, 5:6),
regexp = "Vector and array arguments must have names"
)
expect_error(
cbind(arrow_table(a = 1:2), 3:4, 5:6),
regexp = "Vector and array arguments must have names"
)
})
test_that("cbind.Table handles record batches and tables", {
# R 3.6 cbind dispatch rules cause cbind to fall back to default impl if
# there are multiple arguments with distinct cbind implementations
skip_if(getRversion() < "4.0.0", "R 3.6 cbind dispatch rules prevent this behavior")
expect_equal(
cbind(arrow_table(a = 1L:2L), record_batch(b = 4:5)),
arrow_table(a = 1L:2L, b = 4:5)
)
})
test_that("ARROW-11769/ARROW-17085 - grouping preserved in table creation", {
skip_if_not_available("dataset")
tbl <- tibble::tibble(
int = 1:10,
fct = factor(rep(c("A", "B"), 5)),
fct2 = factor(rep(c("C", "D"), each = 5)),
)
expect_identical(
tbl %>%
Table$create() %>%
dplyr::group_vars(),
dplyr::group_vars(tbl)
)
expect_identical(
tbl %>%
dplyr::group_by(fct, fct2) %>%
Table$create() %>%
dplyr::group_vars(),
c("fct", "fct2")
)
})
test_that("ARROW-12729 - length returns number of columns in Table", {
tbl <- tibble::tibble(
int = 1:10,
fct = factor(rep(c("A", "B"), 5)),
fct2 = factor(rep(c("C", "D"), each = 5)),
)
tab <- Table$create(!!!tbl)
expect_identical(length(tab), 3L)
})
test_that("as_arrow_table() works for Table", {
table <- arrow_table(col1 = 1L, col2 = "two")
expect_identical(as_arrow_table(table), table)
expect_equal(
as_arrow_table(table, schema = schema(col1 = float64(), col2 = string())),
arrow_table(col1 = Array$create(1, type = float64()), col2 = "two")
)
})
test_that("as_arrow_table() works for RecordBatch", {
table <- arrow_table(col1 = 1L, col2 = "two")
batch <- record_batch(col1 = 1L, col2 = "two")
expect_equal(as_arrow_table(batch), table)
expect_equal(
as_arrow_table(batch, schema = schema(col1 = float64(), col2 = string())),
arrow_table(col1 = Array$create(1, type = float64()), col2 = "two")
)
})
test_that("as_arrow_table() works for data.frame()", {
table <- arrow_table(col1 = 1L, col2 = "two")
tbl <- tibble::tibble(col1 = 1L, col2 = "two")
expect_equal(as_arrow_table(tbl), table)
expect_equal(
as_arrow_table(
tbl,
schema = schema(col1 = float64(), col2 = string())
),
arrow_table(col1 = Array$create(1, type = float64()), col2 = "two")
)
})
test_that("as_arrow_table() errors for invalid input", {
expect_error(
as_arrow_table("no as_arrow_table() method"),
class = "arrow_no_method_as_arrow_table"
)
})
test_that("num_rows method not susceptible to integer overflow", {
skip_if_not_running_large_memory_tests()
small_array <- Array$create(raw(1))
big_array <- Array$create(raw(.Machine$integer.max))
big_chunked_array <- chunked_array(big_array, small_array)
# LargeString array with data buffer > MAX_INT32
big_string_array <- Array$create(make_big_string())
small_table <- Table$create(col = small_array)
big_table <- Table$create(col = big_chunked_array)
expect_type(big_array$nbytes(), "integer")
expect_type(big_chunked_array$nbytes(), "double")
expect_type(length(big_array), "integer")
expect_type(length(big_chunked_array), "double")
expect_type(small_table$num_rows, "integer")
expect_type(big_table$num_rows, "double")
expect_identical(big_string_array$data()$buffers[[3]]$size, 2148007936)
})
test_that("can create empty table from schema", {
schema <- schema(
col1 = float64(),
col2 = string(),
col3 = vctrs_extension_type(integer())
)
out <- Table$create(schema = schema)
expect_r6_class(out, "Table")
expect_equal(nrow(out), 0)
expect_equal(out$schema, schema)
})
|
\name{disc.ef}
\alias{disc.ef}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Discretization using the method of equal frequencies}
\description{
Unsupervised discretization using intervals of equal frequencies
}
\usage{
disc.ef(data, varcon, k,out=c("symb","num"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{The dataset to be discretized}
\item{varcon}{A vector containing the continuous features}
\item{k}{ The number of intervals to be used}
\item{out}{To get the discretized dataset in a numerical format write "num". To get
the discretized in an interval format write "symb"}
}
\value{
Returns a new data matrix with discretized values.
}
\references{
Kantardzic M. (2003). Data Mining: Concepts, Models, methods, and Algorithms.
John Wiley. New York.}
\author{Edgar Acuna}
\seealso{\code{\link{disc.1r}}, \code{\link{disc.ew}},\code{\link{chiMerge}}}
\examples{
#Discretization using the equal frequency method
data(bupa)
bupa.disc.ef=disc.ef(bupa,1:6,8,out="symb")
}
\keyword{Discretization}
|
/man/disc.ef.Rd
|
no_license
|
neslon/dprep
|
R
| false
| false
| 1,106
|
rd
|
\name{disc.ef}
\alias{disc.ef}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Discretization using the method of equal frequencies}
\description{
Unsupervised discretization using intervals of equal frequencies
}
\usage{
disc.ef(data, varcon, k,out=c("symb","num"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{The dataset to be discretized}
\item{varcon}{A vector containing the continuous features}
\item{k}{ The number of intervals to be used}
\item{out}{To get the discretized dataset in a numerical format write "num". To get
the discretized in an interval format write "symb"}
}
\value{
Returns a new data matrix with discretized values.
}
\references{
Kantardzic M. (2003). Data Mining: Concepts, Models, methods, and Algorithms.
John Wiley. New York.}
\author{Edgar Acuna}
\seealso{\code{\link{disc.1r}}, \code{\link{disc.ew}},\code{\link{chiMerge}}}
\examples{
#Discretization using the equal frequency method
data(bupa)
bupa.disc.ef=disc.ef(bupa,1:6,8,out="symb")
}
\keyword{Discretization}
|
testlist <- list(x1 = numeric(0), x2 = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y1 = numeric(0), y2 = numeric(0))
result <- do.call(palm:::euc_distances,testlist)
str(result)
|
/palm/inst/testfiles/euc_distances/libFuzzer_euc_distances/euc_distances_valgrind_files/1612968615-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 386
|
r
|
testlist <- list(x1 = numeric(0), x2 = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y1 = numeric(0), y2 = numeric(0))
result <- do.call(palm:::euc_distances,testlist)
str(result)
|
# This script creates the "Categories.txt" file which is used to add group
# membership information to the epigenetic pipeline output.
# Set working directory.
setwd("C:/Dev/Projects/Epigenetics/EMAP")
# Load the appropriate annotation file.
VERSION <- "v2"
annotationFolder <- file.path("Annotations", VERSION)
annotations <- read.table(file.path(annotationFolder, "EDMA.Annotation.txt"), header=TRUE, sep="\t")
# Load the relevant helper functions.
source("Scripts/CategoryEnrichment.R")
# Categorize lengths of CpG islands.
thresholds <- quantile(annotations$CpG_Length[annotations$CpG_Length!=0 & !is.na(annotations$CpG_Length)], c(0.20, 0.80))
lengthCategory <- vector(length=nrow(annotations))
lengthCategory[annotations$CpG_Length<thresholds[1]] <- "Small"
lengthCategory[annotations$CpG_Length>thresholds[2]] <- "Long"
lengthCategory[annotations$CpG_Length==0] <- "No-CpG-Island"
lengthCategory[annotations$CpG_Length>=thresholds[1] & annotations$CpG_Length<=thresholds[2]] <- "Intermediate"
lengthCategory[is.na(annotations$CpG_Length)] <- "Unknown"
# Categorize CpG densities.
thresholds <- quantile(annotations$CpG_Density[annotations$CpG_Length!=0 & !is.na(annotations$CpG_Length)], c(0.20, 0.80))
densityCategory <- vector(length=nrow(annotations))
densityCategory[annotations$CpG_Density<thresholds[1]] <- "Low-Density"
densityCategory[annotations$CpG_Density>thresholds[2]] <- "High-Density"
densityCategory[annotations$CpG_Density==0] <- "No-CpG-Island"
densityCategory[annotations$CpG_Density>=thresholds[1] & annotations$CpG_Density<=thresholds[2]] <- "Intermediate-Density"
densityCategory[is.na(annotations$CpG_Density)] <- "Unknown"
# Categorize by type of genic region.
geneRegionTypeCategory <- rep("No-nearby-gene", length=nrow(annotations))
geneRegionTypeCategory[annotations$Distal_Promoter != ""] <- "Distal-Promoter"
geneRegionTypeCategory[annotations$Promoter != ""] <- "Promoter"
geneRegionTypeCategory[annotations$Proximal_Promoter != ""] <- "Proximal-Promoter"
geneRegionTypeCategory[annotations$Intron != ""] <- "Intronic"
geneRegionTypeCategory[annotations$Exon != ""] <- "Exonic"
geneRegionTypeCategory[annotations$Chromosome==""] <- "Unknown"
# Categorize by proximity to CpG Islands
proximityCategory <- as.character(annotations$UCSC_CpG_Proximity)
proximityCategory[proximityCategory=="Shore"] <- "CpG Shore"
proximityCategory[proximityCategory=="Shelf"] <- "CpG Shelf"
proximityCategory[proximityCategory=="Island"] <- "CpG Islands"
proximityCategory[proximityCategory==""] <- "Unknown"
proximityCategory <- sub(" ", "-", proximityCategory, fixed=TRUE)
# Categorize repeat classes.
repeatClasses <- sub("/.*$", "", gsub("/.*? ", " ", annotations$Fragment_RepeatClass))
repeatClasses[repeatClasses==""] <- "No_Repeats"
repeatMatrix <- buildCategoryMatrix(repeatClasses)
# Build a matrix containing all categories.
categories <- cbind(lengthCategory, densityCategory, geneRegionTypeCategory, proximityCategory,
repeatMatrix[,colnames(repeatMatrix) %in% c("No_Repeats", "SINE", "LINE", "Simple_repeat", "LTR", "Low_complexity", "DNA")])
row.names(categories) <- annotations$Probe
# Save the categories.
write.table(categories, file.path(annotationFolder, "Categories.txt"), sep="\t", col.names=TRUE, row.names=TRUE, quote=FALSE)
|
/Scripts/BuildCategoryAnnotation.R
|
no_license
|
ericfournier2/EMAP
|
R
| false
| false
| 3,357
|
r
|
# This script creates the "Categories.txt" file which is used to add group
# membership information to the epigenetic pipeline output.
# Set working directory.
setwd("C:/Dev/Projects/Epigenetics/EMAP")
# Load the appropriate annotation file.
VERSION <- "v2"
annotationFolder <- file.path("Annotations", VERSION)
annotations <- read.table(file.path(annotationFolder, "EDMA.Annotation.txt"), header=TRUE, sep="\t")
# Load the relevant helper functions.
source("Scripts/CategoryEnrichment.R")
# Categorize lengths of CpG islands.
thresholds <- quantile(annotations$CpG_Length[annotations$CpG_Length!=0 & !is.na(annotations$CpG_Length)], c(0.20, 0.80))
lengthCategory <- vector(length=nrow(annotations))
lengthCategory[annotations$CpG_Length<thresholds[1]] <- "Small"
lengthCategory[annotations$CpG_Length>thresholds[2]] <- "Long"
lengthCategory[annotations$CpG_Length==0] <- "No-CpG-Island"
lengthCategory[annotations$CpG_Length>=thresholds[1] & annotations$CpG_Length<=thresholds[2]] <- "Intermediate"
lengthCategory[is.na(annotations$CpG_Length)] <- "Unknown"
# Categorize CpG densities.
thresholds <- quantile(annotations$CpG_Density[annotations$CpG_Length!=0 & !is.na(annotations$CpG_Length)], c(0.20, 0.80))
densityCategory <- vector(length=nrow(annotations))
densityCategory[annotations$CpG_Density<thresholds[1]] <- "Low-Density"
densityCategory[annotations$CpG_Density>thresholds[2]] <- "High-Density"
densityCategory[annotations$CpG_Density==0] <- "No-CpG-Island"
densityCategory[annotations$CpG_Density>=thresholds[1] & annotations$CpG_Density<=thresholds[2]] <- "Intermediate-Density"
densityCategory[is.na(annotations$CpG_Density)] <- "Unknown"
# Categorize by type of genic region.
geneRegionTypeCategory <- rep("No-nearby-gene", length=nrow(annotations))
geneRegionTypeCategory[annotations$Distal_Promoter != ""] <- "Distal-Promoter"
geneRegionTypeCategory[annotations$Promoter != ""] <- "Promoter"
geneRegionTypeCategory[annotations$Proximal_Promoter != ""] <- "Proximal-Promoter"
geneRegionTypeCategory[annotations$Intron != ""] <- "Intronic"
geneRegionTypeCategory[annotations$Exon != ""] <- "Exonic"
geneRegionTypeCategory[annotations$Chromosome==""] <- "Unknown"
# Categorize by proximity to CpG Islands
proximityCategory <- as.character(annotations$UCSC_CpG_Proximity)
proximityCategory[proximityCategory=="Shore"] <- "CpG Shore"
proximityCategory[proximityCategory=="Shelf"] <- "CpG Shelf"
proximityCategory[proximityCategory=="Island"] <- "CpG Islands"
proximityCategory[proximityCategory==""] <- "Unknown"
proximityCategory <- sub(" ", "-", proximityCategory, fixed=TRUE)
# Categorize repeat classes.
repeatClasses <- sub("/.*$", "", gsub("/.*? ", " ", annotations$Fragment_RepeatClass))
repeatClasses[repeatClasses==""] <- "No_Repeats"
repeatMatrix <- buildCategoryMatrix(repeatClasses)
# Build a matrix containing all categories.
categories <- cbind(lengthCategory, densityCategory, geneRegionTypeCategory, proximityCategory,
repeatMatrix[,colnames(repeatMatrix) %in% c("No_Repeats", "SINE", "LINE", "Simple_repeat", "LTR", "Low_complexity", "DNA")])
row.names(categories) <- annotations$Probe
# Save the categories.
write.table(categories, file.path(annotationFolder, "Categories.txt"), sep="\t", col.names=TRUE, row.names=TRUE, quote=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/offinbest.R
\docType{data}
\name{offinmeta}
\alias{offinmeta}
\title{Metadata of Infiltration data of Offin Basin}
\format{A data frame with 15 rows and 18 variables:
\describe{
\item{ID}{Unique ID of the data}
\item{TownName}{Town Name}
\item{longitude}{longitudes in degrees}
\item{latitude}{latitudes in degrees}
\item{Time}{Total time of infiltration in minutes }
\item{N}{the Number of water volumes (cylinders)}
\item{q}{the mean infiltration rate in mm/s}
\item{Texture}{USDA soil texture}
\item{tho}{Initial soil water content (cm3/cm3)}
\item{thr}{residual water content(cm3/cm3)}
\item{pb}{bulk density (g/cm3)}
\item{n}{van Genuchten n parameter}
\item{m}{van Genuchten m parameter}
\item{n2}{Particle Size parameter}
\item{b}{Particle Size distribution parameter}
}}
\description{
A metadata of infiltration data of Offin Basin.
}
\author{
George Owusu
}
|
/man/offinmeta.Rd
|
no_license
|
Mactavish11/vadose
|
R
| false
| true
| 978
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/offinbest.R
\docType{data}
\name{offinmeta}
\alias{offinmeta}
\title{Metadata of Infiltration data of Offin Basin}
\format{A data frame with 15 rows and 18 variables:
\describe{
\item{ID}{Unique ID of the data}
\item{TownName}{Town Name}
\item{longitude}{longitudes in degrees}
\item{latitude}{latitudes in degrees}
\item{Time}{Total time of infiltration in minutes }
\item{N}{the Number of water volumes (cylinders)}
\item{q}{the mean infiltration rate in mm/s}
\item{Texture}{USDA soil texture}
\item{tho}{Initial soil water content (cm3/cm3)}
\item{thr}{residual water content(cm3/cm3)}
\item{pb}{bulk density (g/cm3)}
\item{n}{van Genuchten n parameter}
\item{m}{van Genuchten m parameter}
\item{n2}{Particle Size parameter}
\item{b}{Particle Size distribution parameter}
}}
\description{
A metadata of infiltration data of Offin Basin.
}
\author{
George Owusu
}
|
# Copyright 2017 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#' @title Calculate annual (and seasonal) cumulative flows
#'
#' @description Calculates annual and seasonal total flows, volumetric or runoff yield flows, from a streamflow dataset. Calculates
#' the statistics from all daily discharge values from all years, unless specified. For water year and seasonal data, the designated
#' year is the year in which the year or season ends. Two-seasons and four-seasons per year are calculated, with each 6 and 3-month
#' seasons starting with the first month of the year (Jan for calendar year, specified for water year). Each season is designated
#' by the calendar or water year in which it occurs.
#'
#' @inheritParams calc_annual_stats
#' @inheritParams add_basin_area
#' @param use_yield Logical value indicating whether to use yield runoff, in mm, instead of volumetric. Default \code{FALSE}.
#' @param include_seasons Logical value indication whether to include seasonal yields and total discharges. Default \code{TRUE}.
#'
#' @return A tibble data frame with the following columns, ending with '_TotalQ_m3' or '_Yield_mm' based on selection:
#' \item{Year}{calendar or water year selected}
#' \item{Annual_*}{annual total flow, in m3 or mm}
#' Default seasonal columns:
#' \item{MMM-MMM_*}{first of two season total flows, in m3 or mm}
#' \item{MMM-MMM_*}{second of two season total flows, in m3 or mm}
#' \item{MMM-MMM_*}{first of four season total flows, in m3 or mm}
#' \item{MMM-MMM_*}{second of four season total flows, in m3 or mm}
#' \item{MMM-MMM_*}{third of four season total flows, in m3 or mm}
#' \item{MMM-MMM_*}{fourth of four season total flows, in m3 or mm}
#' Transposing data creates a column of 'Statistics' and subsequent columns for each year selected.
#'
#' @examples
#' \dontrun{
#'
#' calc_annual_cumulative_stats(station_number = "08NM116",
#' water_year = TRUE,
#' water_year_start = 8,
#' include_seasons = TRUE)
#'
#' }
#' @export
calc_annual_cumulative_stats <- function(data = NULL,
dates = Date,
values = Value,
groups = STATION_NUMBER,
station_number = NULL,
use_yield = FALSE,
basin_area = NA,
water_year = FALSE,
water_year_start = 10,
start_year = 0,
end_year = 9999,
exclude_years = NULL,
include_seasons = FALSE,
transpose = FALSE){
## ARGUMENT CHECKS
## ---------------
use_yield_checks(use_yield)
water_year_checks(water_year, water_year_start)
years_checks(start_year, end_year, exclude_years)
transpose_checks(transpose)
include_seasons_checks(include_seasons)
## FLOW DATA CHECKS AND FORMATTING
## -------------------------------
# Check if data is provided and import it
flow_data <- flowdata_import(data = data,
station_number = station_number)
# Save the original columns (to check for STATION_NUMBER col at end) and ungroup if necessary
orig_cols <- names(flow_data)
flow_data <- dplyr::ungroup(flow_data)
# Check and rename columns
flow_data <- format_all_cols(data = flow_data,
dates = as.character(substitute(dates)),
values = as.character(substitute(values)),
groups = as.character(substitute(groups)),
rm_other_cols = TRUE)
## SET UP BASIN AREA
## -----------------
if (use_yield){
flow_data <- add_basin_area(flow_data, basin_area = basin_area)
flow_data$Basin_Area_sqkm_temp <- flow_data$Basin_Area_sqkm
}
## PREPARE FLOW DATA
## -----------------
# Fill missing dates, add date variables, and add AnalysisYear
flow_data <- analysis_prep(data = flow_data,
water_year = water_year,
water_year_start = water_year_start,
year = TRUE)
flow_data <- add_seasons(data = flow_data, water_year = water_year, water_year_start = water_year_start)
# Add cumulative flows
if (use_yield){
flow_data <- suppressWarnings(add_daily_yield(data = flow_data, basin_area = basin_area))
names(flow_data)[names(flow_data) == "Yield_mm"] <- "daily_total"
} else {
flow_data <- add_daily_volume(data = flow_data)
names(flow_data)[names(flow_data) == "Volume_m3"] <- "daily_total"
}
# Filter data FOR SELECTED YEARS FOR REMAINDER OF CALCS
flow_data <- dplyr::filter(flow_data, AnalysisYear >= start_year & AnalysisYear <= end_year)
## CALCULATE STATISTICS
## --------------------
# Calculate annual stats
annual_stats <- dplyr::summarize(dplyr::group_by(flow_data, STATION_NUMBER, AnalysisYear),
Cumulative_total = sum(daily_total, na.rm = FALSE))
annual_stats <- dplyr::ungroup(annual_stats)
names(annual_stats)[names(annual_stats) == "Cumulative_total"] <- ifelse(!use_yield,
paste("Annual_TotalQ_m3"),
paste("Annual_Yield_mm"))
annual_stats <- dplyr::rename(annual_stats, Year = AnalysisYear)
# Calculate seasonal stats
if(include_seasons) {
# Calculate two-seasons stats
seasons2_stats <- dplyr::summarize(dplyr::group_by(flow_data, STATION_NUMBER, AnalysisYear, Seasons2),
Cumulative_total = sum(daily_total, na.rm = FALSE))
seasons2_stats <- dplyr::ungroup(seasons2_stats)
seasons2_stats <- dplyr::mutate(seasons2_stats, Seasons2 = paste0(Seasons2, "_", ifelse(!use_yield, paste("TotalQ_m3"), paste("Yield_mm"))))
s2_order <- unique(seasons2_stats$Seasons2)
seasons2_stats <- tidyr::spread(seasons2_stats, Seasons2, Cumulative_total)
seasons2_stats <- dplyr::select(seasons2_stats, STATION_NUMBER, Year = AnalysisYear, s2_order)
# Calculate four-seasons stats
seasons4_stats <- dplyr::summarize(dplyr::group_by(flow_data, STATION_NUMBER, AnalysisYear, Seasons4),
Cumulative_total = sum(daily_total, na.rm = FALSE))
seasons4_stats <- dplyr::ungroup(seasons4_stats)
seasons4_stats <- dplyr::mutate(seasons4_stats, Seasons4 = paste0(Seasons4, "_", ifelse(!use_yield, paste("TotalQ_m3"), paste("Yield_mm"))))
s4_order <- unique(seasons4_stats$Seasons4)
seasons4_stats <- tidyr::spread(seasons4_stats, Seasons4, Cumulative_total)
seasons4_stats <- dplyr::select(seasons4_stats, STATION_NUMBER, Year = AnalysisYear, s4_order)
# Merge with annual stats
annual_stats <- merge(annual_stats, seasons2_stats, by = c("STATION_NUMBER", "Year"), all = TRUE)
annual_stats <- merge(annual_stats, seasons4_stats, by = c("STATION_NUMBER", "Year"), all = TRUE)
}
# Make an excluded years NA
annual_stats[annual_stats$Year %in% exclude_years, -(1:2)] <- NA
# Transpose data if selected
if(transpose){
options(scipen = 999)
# Get list of columns to order the Statistic column after transposing
stat_levels <- names(annual_stats[-(1:2)])
annual_stats <- tidyr::gather(annual_stats, Statistic, Value, -Year, -STATION_NUMBER)
annual_stats <- tidyr::spread(annual_stats, Year, Value)
# Order the columns
annual_stats$Statistic <- factor(annual_stats$Statistic, levels = stat_levels)
annual_stats <- dplyr::arrange(annual_stats, STATION_NUMBER, Statistic)
}
# Give warning if any NA values
missing_complete_yr_warning(annual_stats[, 3:ncol(annual_stats)])
# Recheck if station_number/grouping was in original flow_data and rename or remove as necessary
if(as.character(substitute(groups)) %in% orig_cols) {
names(annual_stats)[names(annual_stats) == "STATION_NUMBER"] <- as.character(substitute(groups))
} else {
annual_stats <- dplyr::select(annual_stats, -STATION_NUMBER)
}
dplyr::as_tibble(annual_stats)
}
|
/R/calc_annual_cumulative_stats.R
|
permissive
|
MonkmanMH/fasstr
|
R
| false
| false
| 9,115
|
r
|
# Copyright 2017 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#' @title Calculate annual (and seasonal) cumulative flows
#'
#' @description Calculates annual and seasonal total flows, volumetric or runoff yield flows, from a streamflow dataset. Calculates
#' the statistics from all daily discharge values from all years, unless specified. For water year and seasonal data, the designated
#' year is the year in which the year or season ends. Two-seasons and four-seasons per year are calculated, with each 6 and 3-month
#' seasons starting with the first month of the year (Jan for calendar year, specified for water year). Each season is designated
#' by the calendar or water year in which it occurs.
#'
#' @inheritParams calc_annual_stats
#' @inheritParams add_basin_area
#' @param use_yield Logical value indicating whether to use yield runoff, in mm, instead of volumetric. Default \code{FALSE}.
#' @param include_seasons Logical value indication whether to include seasonal yields and total discharges. Default \code{TRUE}.
#'
#' @return A tibble data frame with the following columns, ending with '_TotalQ_m3' or '_Yield_mm' based on selection:
#' \item{Year}{calendar or water year selected}
#' \item{Annual_*}{annual total flow, in m3 or mm}
#' Default seasonal columns:
#' \item{MMM-MMM_*}{first of two season total flows, in m3 or mm}
#' \item{MMM-MMM_*}{second of two season total flows, in m3 or mm}
#' \item{MMM-MMM_*}{first of four season total flows, in m3 or mm}
#' \item{MMM-MMM_*}{second of four season total flows, in m3 or mm}
#' \item{MMM-MMM_*}{third of four season total flows, in m3 or mm}
#' \item{MMM-MMM_*}{fourth of four season total flows, in m3 or mm}
#' Transposing data creates a column of 'Statistics' and subsequent columns for each year selected.
#'
#' @examples
#' \dontrun{
#'
#' calc_annual_cumulative_stats(station_number = "08NM116",
#' water_year = TRUE,
#' water_year_start = 8,
#' include_seasons = TRUE)
#'
#' }
#' @export
calc_annual_cumulative_stats <- function(data = NULL,
dates = Date,
values = Value,
groups = STATION_NUMBER,
station_number = NULL,
use_yield = FALSE,
basin_area = NA,
water_year = FALSE,
water_year_start = 10,
start_year = 0,
end_year = 9999,
exclude_years = NULL,
include_seasons = FALSE,
transpose = FALSE){
## ARGUMENT CHECKS
## ---------------
use_yield_checks(use_yield)
water_year_checks(water_year, water_year_start)
years_checks(start_year, end_year, exclude_years)
transpose_checks(transpose)
include_seasons_checks(include_seasons)
## FLOW DATA CHECKS AND FORMATTING
## -------------------------------
# Check if data is provided and import it
flow_data <- flowdata_import(data = data,
station_number = station_number)
# Save the original columns (to check for STATION_NUMBER col at end) and ungroup if necessary
orig_cols <- names(flow_data)
flow_data <- dplyr::ungroup(flow_data)
# Check and rename columns
flow_data <- format_all_cols(data = flow_data,
dates = as.character(substitute(dates)),
values = as.character(substitute(values)),
groups = as.character(substitute(groups)),
rm_other_cols = TRUE)
## SET UP BASIN AREA
## -----------------
if (use_yield){
flow_data <- add_basin_area(flow_data, basin_area = basin_area)
flow_data$Basin_Area_sqkm_temp <- flow_data$Basin_Area_sqkm
}
## PREPARE FLOW DATA
## -----------------
# Fill missing dates, add date variables, and add AnalysisYear
flow_data <- analysis_prep(data = flow_data,
water_year = water_year,
water_year_start = water_year_start,
year = TRUE)
flow_data <- add_seasons(data = flow_data, water_year = water_year, water_year_start = water_year_start)
# Add cumulative flows
if (use_yield){
flow_data <- suppressWarnings(add_daily_yield(data = flow_data, basin_area = basin_area))
names(flow_data)[names(flow_data) == "Yield_mm"] <- "daily_total"
} else {
flow_data <- add_daily_volume(data = flow_data)
names(flow_data)[names(flow_data) == "Volume_m3"] <- "daily_total"
}
# Filter data FOR SELECTED YEARS FOR REMAINDER OF CALCS
flow_data <- dplyr::filter(flow_data, AnalysisYear >= start_year & AnalysisYear <= end_year)
## CALCULATE STATISTICS
## --------------------
# Calculate annual stats
annual_stats <- dplyr::summarize(dplyr::group_by(flow_data, STATION_NUMBER, AnalysisYear),
Cumulative_total = sum(daily_total, na.rm = FALSE))
annual_stats <- dplyr::ungroup(annual_stats)
names(annual_stats)[names(annual_stats) == "Cumulative_total"] <- ifelse(!use_yield,
paste("Annual_TotalQ_m3"),
paste("Annual_Yield_mm"))
annual_stats <- dplyr::rename(annual_stats, Year = AnalysisYear)
# Calculate seasonal stats
if(include_seasons) {
# Calculate two-seasons stats
seasons2_stats <- dplyr::summarize(dplyr::group_by(flow_data, STATION_NUMBER, AnalysisYear, Seasons2),
Cumulative_total = sum(daily_total, na.rm = FALSE))
seasons2_stats <- dplyr::ungroup(seasons2_stats)
seasons2_stats <- dplyr::mutate(seasons2_stats, Seasons2 = paste0(Seasons2, "_", ifelse(!use_yield, paste("TotalQ_m3"), paste("Yield_mm"))))
s2_order <- unique(seasons2_stats$Seasons2)
seasons2_stats <- tidyr::spread(seasons2_stats, Seasons2, Cumulative_total)
seasons2_stats <- dplyr::select(seasons2_stats, STATION_NUMBER, Year = AnalysisYear, s2_order)
# Calculate four-seasons stats
seasons4_stats <- dplyr::summarize(dplyr::group_by(flow_data, STATION_NUMBER, AnalysisYear, Seasons4),
Cumulative_total = sum(daily_total, na.rm = FALSE))
seasons4_stats <- dplyr::ungroup(seasons4_stats)
seasons4_stats <- dplyr::mutate(seasons4_stats, Seasons4 = paste0(Seasons4, "_", ifelse(!use_yield, paste("TotalQ_m3"), paste("Yield_mm"))))
s4_order <- unique(seasons4_stats$Seasons4)
seasons4_stats <- tidyr::spread(seasons4_stats, Seasons4, Cumulative_total)
seasons4_stats <- dplyr::select(seasons4_stats, STATION_NUMBER, Year = AnalysisYear, s4_order)
# Merge with annual stats
annual_stats <- merge(annual_stats, seasons2_stats, by = c("STATION_NUMBER", "Year"), all = TRUE)
annual_stats <- merge(annual_stats, seasons4_stats, by = c("STATION_NUMBER", "Year"), all = TRUE)
}
# Make an excluded years NA
annual_stats[annual_stats$Year %in% exclude_years, -(1:2)] <- NA
# Transpose data if selected
if(transpose){
options(scipen = 999)
# Get list of columns to order the Statistic column after transposing
stat_levels <- names(annual_stats[-(1:2)])
annual_stats <- tidyr::gather(annual_stats, Statistic, Value, -Year, -STATION_NUMBER)
annual_stats <- tidyr::spread(annual_stats, Year, Value)
# Order the columns
annual_stats$Statistic <- factor(annual_stats$Statistic, levels = stat_levels)
annual_stats <- dplyr::arrange(annual_stats, STATION_NUMBER, Statistic)
}
# Give warning if any NA values
missing_complete_yr_warning(annual_stats[, 3:ncol(annual_stats)])
# Recheck if station_number/grouping was in original flow_data and rename or remove as necessary
if(as.character(substitute(groups)) %in% orig_cols) {
names(annual_stats)[names(annual_stats) == "STATION_NUMBER"] <- as.character(substitute(groups))
} else {
annual_stats <- dplyr::select(annual_stats, -STATION_NUMBER)
}
dplyr::as_tibble(annual_stats)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/STDEV.R
\name{STDEV}
\alias{STDEV}
\title{Basic STDEV function from excel}
\usage{
STDEV(
number1,
number2 = NA,
number3 = NA,
number4 = NA,
number5 = NA,
number6 = NA,
number7 = NA,
number8 = NA,
number9 = NA,
number10 = NA,
number11 = NA,
number12 = NA,
number13 = NA,
number14 = NA,
number15 = NA,
number16 = NA,
number17 = NA,
number18 = NA,
number19 = NA,
number20 = NA,
number21 = NA,
number22 = NA,
number23 = NA,
number24 = NA
)
}
\arguments{
\item{number1, number2, number3, number4, number5, number6, number7, number8, number9, number10, number11, number12, number13, number14, number15, number16, number17, number18, number19, number20, number21, number22, number23, number24}{From which numbers should the function calculate the standard deviation. Sames goes for other number arguments as well. If you want to specify several numbers simply go: STDEV(2,2,1,2). No need to put them into a vector.}
}
\value{
In this example we simply calculate standard deviation of the given numbers. Function will always return numeric class.
}
\description{
It acts similiarly to Excel's STDEV function. It calculates the standard deviation from the numbers you give it.
}
\examples{
STDEV(2,1,3,1)
}
|
/man/STDEV.Rd
|
no_license
|
cran/ExcelFunctionsR
|
R
| false
| true
| 1,325
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/STDEV.R
\name{STDEV}
\alias{STDEV}
\title{Basic STDEV function from excel}
\usage{
STDEV(
number1,
number2 = NA,
number3 = NA,
number4 = NA,
number5 = NA,
number6 = NA,
number7 = NA,
number8 = NA,
number9 = NA,
number10 = NA,
number11 = NA,
number12 = NA,
number13 = NA,
number14 = NA,
number15 = NA,
number16 = NA,
number17 = NA,
number18 = NA,
number19 = NA,
number20 = NA,
number21 = NA,
number22 = NA,
number23 = NA,
number24 = NA
)
}
\arguments{
\item{number1, number2, number3, number4, number5, number6, number7, number8, number9, number10, number11, number12, number13, number14, number15, number16, number17, number18, number19, number20, number21, number22, number23, number24}{From which numbers should the function calculate the standard deviation. Sames goes for other number arguments as well. If you want to specify several numbers simply go: STDEV(2,2,1,2). No need to put them into a vector.}
}
\value{
In this example we simply calculate standard deviation of the given numbers. Function will always return numeric class.
}
\description{
It acts similiarly to Excel's STDEV function. It calculates the standard deviation from the numbers you give it.
}
\examples{
STDEV(2,1,3,1)
}
|
#' Convert a Named Vector to a Data Frame
#'
#' @param x named vector
#' @return data frame with columns \code{name}, containing the names of \code{x}
#' and \code{value}, containing the values of \code{x}
#' @export
#' @examples
#' namedVectorToDataFrame(c(a = 1, b = 2, c = 3))
namedVectorToDataFrame <- function(x)
{
noFactorDataFrame(name = names(x), value = as.character(x))
}
|
/R/namedVectorToDataFrame.R
|
permissive
|
KWB-R/kwb.utils
|
R
| false
| false
| 386
|
r
|
#' Convert a Named Vector to a Data Frame
#'
#' @param x named vector
#' @return data frame with columns \code{name}, containing the names of \code{x}
#' and \code{value}, containing the values of \code{x}
#' @export
#' @examples
#' namedVectorToDataFrame(c(a = 1, b = 2, c = 3))
namedVectorToDataFrame <- function(x)
{
noFactorDataFrame(name = names(x), value = as.character(x))
}
|
# Call Libraries and Attach Data
require(ISLR)
require(tree)
attach(Carseats)
# Visualize Sales
hist(Sales)
# Create New Dataframe with Custom Variable
High <- ifelse(Sales <= 8, "Yes", "No")
Carseats <- data.frame(Carseats, High)
# Create and examining tree-model
tree.carseats <- tree(High~.-Sales, data = Carseats)
summary(tree.carseats)
plot(tree.carseats)
text(tree.carseats)
# Train and Predict New Variable using Treemodel
set.seed(3)
train <- sample(1:nrow(Carseats), 300)
tree.carseats <- tree(High~.-Sales,Carseats,subset = train)
plot(tree.carseats)
text(tree.carseats, pretty = 0)
tree.pre <- predict(tree.carseats, Carseats[-train], type = "class")
with(Carseats[-train], table(tree.pre,High))
# Cross Validate Model
cv.carseats <- cv.tree(tree.carseats, FUN = prune.misclass)
cv.carseats
plot(cv.carseats)
# Prune Tree and Check Results
prune.carseats = prune.misclass(tree.carseats, best = 13)
plot(prune.carseats)
text(prune.carseats)
prune.pre <- predict(prune.carseats, Carseats[-train], type = "class")
with(Carseats[-train], table(prune.pre, High))
|
/code/10_Decision Tree.R
|
no_license
|
felixschildorfer/Statistical-Learning-in-R
|
R
| false
| false
| 1,090
|
r
|
# Call Libraries and Attach Data
require(ISLR)
require(tree)
attach(Carseats)
# Visualize Sales
hist(Sales)
# Create New Dataframe with Custom Variable
High <- ifelse(Sales <= 8, "Yes", "No")
Carseats <- data.frame(Carseats, High)
# Create and examining tree-model
tree.carseats <- tree(High~.-Sales, data = Carseats)
summary(tree.carseats)
plot(tree.carseats)
text(tree.carseats)
# Train and Predict New Variable using Treemodel
set.seed(3)
train <- sample(1:nrow(Carseats), 300)
tree.carseats <- tree(High~.-Sales,Carseats,subset = train)
plot(tree.carseats)
text(tree.carseats, pretty = 0)
tree.pre <- predict(tree.carseats, Carseats[-train], type = "class")
with(Carseats[-train], table(tree.pre,High))
# Cross Validate Model
cv.carseats <- cv.tree(tree.carseats, FUN = prune.misclass)
cv.carseats
plot(cv.carseats)
# Prune Tree and Check Results
prune.carseats = prune.misclass(tree.carseats, best = 13)
plot(prune.carseats)
text(prune.carseats)
prune.pre <- predict(prune.carseats, Carseats[-train], type = "class")
with(Carseats[-train], table(prune.pre, High))
|
# --- -2 --- -1 --- 0 --- 1 --- 2 ---
rm(answer)
x <- rnorm(1)
if(x > 1){
answer <- "Greater than 1"
}else if(x >=-1){
answer<- "Between -1 and 1"
}else{
answer<- "Less than -1"
}
|
/Practice Files/IfStatement.R
|
no_license
|
avinash-vijayakumar/R-Udemy
|
R
| false
| false
| 194
|
r
|
# --- -2 --- -1 --- 0 --- 1 --- 2 ---
rm(answer)
x <- rnorm(1)
if(x > 1){
answer <- "Greater than 1"
}else if(x >=-1){
answer<- "Between -1 and 1"
}else{
answer<- "Less than -1"
}
|
## plot4.R
## Coursera Data Scientist
## Course 4: Exploratory Data analysis
## Course Project #2
## Due Sept 7 2014
## Step 1: Manually download and unzip the data into the working directory.
## Ensure that the "household_power_consumption.txt" file is in the same directory as the plot1.R file.
## Set the working directory to that directory.
## Step 2: Read text file. It's a text file, with a header, separated by ;, with ? as missing data.
dataset <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
## We only want certain days, so subset the data. NOTE that the date is not a date class.
dataset2 <- dataset[(dataset[,1] == "1/2/2007" | dataset[,1] == "2/2/2007"),]
##########################################
## Plot 4:
## Create a timestamp.
##
dataset2$Timestamp<-strptime(paste(dataset2$Date, dataset2$Time), format='%d/%m/%Y %H:%M:%S')
## First tell R that you want to save a PNG file. The default is 480p.
png(filename = "plot4.png")
## Then use the plot command as normal
## Two of these are the same as the other plots. And two are new but simple variations.
par(mfcol = c(2,2))
plot (dataset2$Timestamp,
dataset2$Global_active_power,
xlab ="",
ylab ="Global Active Power (kilowatts)",
type="l")
plot (dataset2$Timestamp,
dataset2$Sub_metering_1,
xlab ="",
ylab ="Energy sub metering",
type="n")
lines (dataset2$Timestamp,
dataset2$Sub_metering_1,
ylab ="Energy sub metering",
type="l")
lines (dataset2$Timestamp,
dataset2$Sub_metering_2,
#ylab ="Energy sub metering",
col = "red",
type="l")
lines (dataset2$Timestamp,
dataset2$Sub_metering_3,
#ylab ="Energy sub metering",
col = "blue",
type="l")
legend("topright",
bty = "n",
c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
#lty=c(1,1,1,4), ## adjust line type, not needed
lwd=c(2.5), ## Adust line width.
col=c("black","red","blue")
)
plot (dataset2$Timestamp,
dataset2$Voltage,
xlab ="datetime",
ylab ="Voltage",
type="l")
plot (dataset2$Timestamp,
dataset2$Global_reactive_power,
xlab ="datetime",
ylab ="Global_reactive_power",
type="l")
##Then tell R that you are done outputting a file
dev.off()
|
/plot4.R
|
no_license
|
DataJay/ExData_Plotting1
|
R
| false
| false
| 2,361
|
r
|
## plot4.R
## Coursera Data Scientist
## Course 4: Exploratory Data analysis
## Course Project #2
## Due Sept 7 2014
## Step 1: Manually download and unzip the data into the working directory.
## Ensure that the "household_power_consumption.txt" file is in the same directory as the plot1.R file.
## Set the working directory to that directory.
## Step 2: Read text file. It's a text file, with a header, separated by ;, with ? as missing data.
dataset <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
## We only want certain days, so subset the data. NOTE that the date is not a date class.
dataset2 <- dataset[(dataset[,1] == "1/2/2007" | dataset[,1] == "2/2/2007"),]
##########################################
## Plot 4:
## Create a timestamp.
##
dataset2$Timestamp<-strptime(paste(dataset2$Date, dataset2$Time), format='%d/%m/%Y %H:%M:%S')
## First tell R that you want to save a PNG file. The default is 480p.
png(filename = "plot4.png")
## Then use the plot command as normal
## Two of these are the same as the other plots. And two are new but simple variations.
par(mfcol = c(2,2))
plot (dataset2$Timestamp,
dataset2$Global_active_power,
xlab ="",
ylab ="Global Active Power (kilowatts)",
type="l")
plot (dataset2$Timestamp,
dataset2$Sub_metering_1,
xlab ="",
ylab ="Energy sub metering",
type="n")
lines (dataset2$Timestamp,
dataset2$Sub_metering_1,
ylab ="Energy sub metering",
type="l")
lines (dataset2$Timestamp,
dataset2$Sub_metering_2,
#ylab ="Energy sub metering",
col = "red",
type="l")
lines (dataset2$Timestamp,
dataset2$Sub_metering_3,
#ylab ="Energy sub metering",
col = "blue",
type="l")
legend("topright",
bty = "n",
c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
#lty=c(1,1,1,4), ## adjust line type, not needed
lwd=c(2.5), ## Adust line width.
col=c("black","red","blue")
)
plot (dataset2$Timestamp,
dataset2$Voltage,
xlab ="datetime",
ylab ="Voltage",
type="l")
plot (dataset2$Timestamp,
dataset2$Global_reactive_power,
xlab ="datetime",
ylab ="Global_reactive_power",
type="l")
##Then tell R that you are done outputting a file
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workspacesweb_operations.R
\name{workspacesweb_disassociate_ip_access_settings}
\alias{workspacesweb_disassociate_ip_access_settings}
\title{Disassociates IP access settings from a web portal}
\usage{
workspacesweb_disassociate_ip_access_settings(portalArn)
}
\arguments{
\item{portalArn}{[required] The ARN of the web portal.}
}
\description{
Disassociates IP access settings from a web portal.
See \url{https://www.paws-r-sdk.com/docs/workspacesweb_disassociate_ip_access_settings/} for full documentation.
}
\keyword{internal}
|
/cran/paws.end.user.computing/man/workspacesweb_disassociate_ip_access_settings.Rd
|
permissive
|
paws-r/paws
|
R
| false
| true
| 609
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workspacesweb_operations.R
\name{workspacesweb_disassociate_ip_access_settings}
\alias{workspacesweb_disassociate_ip_access_settings}
\title{Disassociates IP access settings from a web portal}
\usage{
workspacesweb_disassociate_ip_access_settings(portalArn)
}
\arguments{
\item{portalArn}{[required] The ARN of the web portal.}
}
\description{
Disassociates IP access settings from a web portal.
See \url{https://www.paws-r-sdk.com/docs/workspacesweb_disassociate_ip_access_settings/} for full documentation.
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AGread_Data.R
\docType{data}
\name{imu_to_check}
\alias{imu_to_check}
\title{IMU data to check}
\format{A data frame with 300 rows and 8 variables:
\describe{
\item{file_source_IMU}{The filename of the IMU file}
\item{date_processed_IMU}{The date the IMU file was processed}
\item{Timestamp}{The corresponding time for each row of data}
\item{Gyroscope_VM_DegPerS}{Gyroscope vector magnitude, in degrees per
second}
\item{mean_abs_Gyroscope_x_DegPerS}{Rotation in x axis, degrees per second}
\item{mean_abs_Gyroscope_y_DegPerS}{Rotation in y axis, degrees per second}
\item{mean_abs_Gyroscope_z_DegPerS}{Rotation in z axis, degrees per second}
\item{mean_magnetometer_direction}{Cardinal direction of magnetometer
signal, averaged over one second}
}}
\usage{
imu_to_check
}
\description{
A dataset for demonstrating checks that are applied to IMU data.
}
\keyword{datasets}
|
/AGread/man/imu_to_check.Rd
|
permissive
|
akhikolla/InformationHouse
|
R
| false
| true
| 998
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AGread_Data.R
\docType{data}
\name{imu_to_check}
\alias{imu_to_check}
\title{IMU data to check}
\format{A data frame with 300 rows and 8 variables:
\describe{
\item{file_source_IMU}{The filename of the IMU file}
\item{date_processed_IMU}{The date the IMU file was processed}
\item{Timestamp}{The corresponding time for each row of data}
\item{Gyroscope_VM_DegPerS}{Gyroscope vector magnitude, in degrees per
second}
\item{mean_abs_Gyroscope_x_DegPerS}{Rotation in x axis, degrees per second}
\item{mean_abs_Gyroscope_y_DegPerS}{Rotation in y axis, degrees per second}
\item{mean_abs_Gyroscope_z_DegPerS}{Rotation in z axis, degrees per second}
\item{mean_magnetometer_direction}{Cardinal direction of magnetometer
signal, averaged over one second}
}}
\usage{
imu_to_check
}
\description{
A dataset for demonstrating checks that are applied to IMU data.
}
\keyword{datasets}
|
#NOTE: the Java wrapper for this script first sources CNVPlottingLibrary.R
options(error = quote({dump.frames(dumpto = "plotting_dump", to.file = TRUE); q(status = 1)})) # Useful for debugging
library(optparse)
library(data.table)
source('CNVPlottingLibrary.R')
option_list = list(
make_option(c("--sample_name", "-s"), dest="sample_name", action="store"),
make_option(c("--standardized_copy_ratios_file", "-t"), dest="standardized_copy_ratios_file", action="store"),
make_option(c("--denoised_copy_ratios_file", "-d"), dest="denoised_copy_ratios_file", action="store"),
make_option(c("--contig_names", "-c"), dest="contig_names", action="store"), #string with elements separated by "CONTIG_DELIMITER"
make_option(c("--contig_lengths", "-l"), dest="contig_lengths", action="store"), #string with elements separated by "CONTIG_DELIMITER"
make_option(c("--output_dir", "-o"), dest="output_dir", action="store"),
make_option(c("--output_prefix", "-p"), dest="output_prefix", action="store"))
opt = parse_args(OptionParser(option_list=option_list))
sample_name = opt[["sample_name"]]
standardized_copy_ratios_file = opt[["standardized_copy_ratios_file"]]
denoised_copy_ratios_file = opt[["denoised_copy_ratios_file"]]
contig_names_string = opt[["contig_names"]]
contig_lengths_string = opt[["contig_lengths"]]
output_dir = opt[["output_dir"]]
output_prefix = opt[["output_prefix"]]
#check that input files exist; if not, quit with error code that GATK will pick up
if (!all(file.exists(c(standardized_copy_ratios_file, denoised_copy_ratios_file)))) {
quit(save="no", status=1, runLast=FALSE)
}
contig_names = as.list(strsplit(contig_names_string, "CONTIG_DELIMITER")[[1]])
contig_lengths = as.list(strsplit(contig_lengths_string, "CONTIG_DELIMITER")[[1]])
contig_ends = cumsum(contig_lengths)
contig_starts = c(0, head(contig_ends, -1))
CalculateMedianAbsoluteDeviation = function(dat) {
return(median(abs(diff(dat))))
}
#plotting is extracted to a function for debugging purposes
WriteDenoisingPlots = function(sample_name, standardized_copy_ratios_file, denoised_copy_ratios_file, contig_names, output_dir, output_prefix) {
standardized_copy_ratios_df = ReadTSV(standardized_copy_ratios_file)
denoised_copy_ratios_df = ReadTSV(denoised_copy_ratios_file)
#transform to linear copy ratio
standardized_copy_ratios_df[["COPY_RATIO"]] = 2^standardized_copy_ratios_df[["LOG2_COPY_RATIO"]]
denoised_copy_ratios_df[["COPY_RATIO"]] = 2^denoised_copy_ratios_df[["LOG2_COPY_RATIO"]]
#determine copy-ratio midpoints
standardized_copy_ratios_df[["MIDDLE"]] = round((standardized_copy_ratios_df[["START"]] + standardized_copy_ratios_df[["END"]]) / 2)
denoised_copy_ratios_df[["MIDDLE"]] = round((denoised_copy_ratios_df[["START"]] + denoised_copy_ratios_df[["END"]]) / 2)
#write the MAD files
standardizedMAD = CalculateMedianAbsoluteDeviation(standardized_copy_ratios_df[["COPY_RATIO"]])
denoisedMAD = CalculateMedianAbsoluteDeviation(denoised_copy_ratios_df[["COPY_RATIO"]])
write.table(round(standardizedMAD, 3), file.path(output_dir, paste(output_prefix, ".standardizedMAD.txt", sep="")), col.names=FALSE, row.names=FALSE)
write.table(round(denoisedMAD, 3), file.path(output_dir, paste(output_prefix, ".denoisedMAD.txt", sep="")), col.names=FALSE, row.names=FALSE)
write.table(round(standardizedMAD - denoisedMAD, 3), file.path(output_dir, paste(output_prefix, ".deltaMAD.txt", sep="")), col.names=FALSE, row.names=FALSE)
write.table(round((standardizedMAD - denoisedMAD) / standardizedMAD, 3), file.path(output_dir, paste(output_prefix, ".scaledDeltaMAD.txt", sep="")), col.names=FALSE, row.names=FALSE)
#plot standardized and denoised copy ratio on top of each other
pre_color_blue = "#3B5DFF"
post_color_green = "#4FC601"
#plot over full range
denoising_plot_file = file.path(output_dir, paste(output_prefix, ".denoised.png", sep=""))
png(denoising_plot_file, 12, 7, units="in", type="cairo", res=300, bg="white")
par(mfrow=c(2, 1), cex=0.75, las=1)
SetUpPlot(sample_name, "standardized copy ratio", 0, max(standardized_copy_ratios_df[["COPY_RATIO"]]), paste("median absolute deviation = ", round(standardizedMAD, 3), sep=""), contig_names, contig_starts, contig_ends, FALSE)
PlotCopyRatios(standardized_copy_ratios_df, pre_color_blue, contig_names, contig_starts)
SetUpPlot(sample_name, "denoised copy ratio", 0, max(denoised_copy_ratios_df[["COPY_RATIO"]]), paste("median absolute deviation = ", round(denoisedMAD, 3), sep=""), contig_names, contig_starts, contig_ends, TRUE)
PlotCopyRatios(denoised_copy_ratios_df, post_color_green, contig_names, contig_starts)
dev.off()
#plot up to CR = 4
denoising_limit_plot_file = file.path(output_dir, paste(output_prefix, ".denoisedLimit4.png", sep=""))
png(denoising_limit_plot_file, 12, 7, units="in", type="cairo", res=300, bg="white")
par(mfrow=c(2, 1), cex=0.75, las=1)
SetUpPlot(sample_name, "standardized copy ratio", 0, 4, paste("median absolute deviation = ", round(standardizedMAD, 3), sep=""), contig_names, contig_starts, contig_ends, FALSE)
PlotCopyRatios(standardized_copy_ratios_df, pre_color_blue, contig_names, contig_starts)
SetUpPlot(sample_name, "denoised copy ratio", 0, 4, paste("median absolute deviation = ", round(denoisedMAD, 3), sep=""), contig_names, contig_starts, contig_ends, TRUE)
PlotCopyRatios(denoised_copy_ratios_df, post_color_green, contig_names, contig_starts)
dev.off()
#check for created files and quit with error code if not found
if (!all(file.exists(c(denoising_plot_file, denoising_limit_plot_file)))) {
quit(save="no", status=1, runLast=FALSE)
}
}
WriteDenoisingPlots(sample_name, standardized_copy_ratios_file, denoised_copy_ratios_file, contig_names, output_dir, output_prefix)
|
/bin/PlotDenoisedCopyRatios.R
|
no_license
|
Jianhua-Wang/GATK
|
R
| false
| false
| 5,888
|
r
|
#NOTE: the Java wrapper for this script first sources CNVPlottingLibrary.R
options(error = quote({dump.frames(dumpto = "plotting_dump", to.file = TRUE); q(status = 1)})) # Useful for debugging
library(optparse)
library(data.table)
source('CNVPlottingLibrary.R')
option_list = list(
make_option(c("--sample_name", "-s"), dest="sample_name", action="store"),
make_option(c("--standardized_copy_ratios_file", "-t"), dest="standardized_copy_ratios_file", action="store"),
make_option(c("--denoised_copy_ratios_file", "-d"), dest="denoised_copy_ratios_file", action="store"),
make_option(c("--contig_names", "-c"), dest="contig_names", action="store"), #string with elements separated by "CONTIG_DELIMITER"
make_option(c("--contig_lengths", "-l"), dest="contig_lengths", action="store"), #string with elements separated by "CONTIG_DELIMITER"
make_option(c("--output_dir", "-o"), dest="output_dir", action="store"),
make_option(c("--output_prefix", "-p"), dest="output_prefix", action="store"))
opt = parse_args(OptionParser(option_list=option_list))
sample_name = opt[["sample_name"]]
standardized_copy_ratios_file = opt[["standardized_copy_ratios_file"]]
denoised_copy_ratios_file = opt[["denoised_copy_ratios_file"]]
contig_names_string = opt[["contig_names"]]
contig_lengths_string = opt[["contig_lengths"]]
output_dir = opt[["output_dir"]]
output_prefix = opt[["output_prefix"]]
#check that input files exist; if not, quit with error code that GATK will pick up
if (!all(file.exists(c(standardized_copy_ratios_file, denoised_copy_ratios_file)))) {
quit(save="no", status=1, runLast=FALSE)
}
contig_names = as.list(strsplit(contig_names_string, "CONTIG_DELIMITER")[[1]])
contig_lengths = as.list(strsplit(contig_lengths_string, "CONTIG_DELIMITER")[[1]])
contig_ends = cumsum(contig_lengths)
contig_starts = c(0, head(contig_ends, -1))
CalculateMedianAbsoluteDeviation = function(dat) {
return(median(abs(diff(dat))))
}
#plotting is extracted to a function for debugging purposes
WriteDenoisingPlots = function(sample_name, standardized_copy_ratios_file, denoised_copy_ratios_file, contig_names, output_dir, output_prefix) {
standardized_copy_ratios_df = ReadTSV(standardized_copy_ratios_file)
denoised_copy_ratios_df = ReadTSV(denoised_copy_ratios_file)
#transform to linear copy ratio
standardized_copy_ratios_df[["COPY_RATIO"]] = 2^standardized_copy_ratios_df[["LOG2_COPY_RATIO"]]
denoised_copy_ratios_df[["COPY_RATIO"]] = 2^denoised_copy_ratios_df[["LOG2_COPY_RATIO"]]
#determine copy-ratio midpoints
standardized_copy_ratios_df[["MIDDLE"]] = round((standardized_copy_ratios_df[["START"]] + standardized_copy_ratios_df[["END"]]) / 2)
denoised_copy_ratios_df[["MIDDLE"]] = round((denoised_copy_ratios_df[["START"]] + denoised_copy_ratios_df[["END"]]) / 2)
#write the MAD files
standardizedMAD = CalculateMedianAbsoluteDeviation(standardized_copy_ratios_df[["COPY_RATIO"]])
denoisedMAD = CalculateMedianAbsoluteDeviation(denoised_copy_ratios_df[["COPY_RATIO"]])
write.table(round(standardizedMAD, 3), file.path(output_dir, paste(output_prefix, ".standardizedMAD.txt", sep="")), col.names=FALSE, row.names=FALSE)
write.table(round(denoisedMAD, 3), file.path(output_dir, paste(output_prefix, ".denoisedMAD.txt", sep="")), col.names=FALSE, row.names=FALSE)
write.table(round(standardizedMAD - denoisedMAD, 3), file.path(output_dir, paste(output_prefix, ".deltaMAD.txt", sep="")), col.names=FALSE, row.names=FALSE)
write.table(round((standardizedMAD - denoisedMAD) / standardizedMAD, 3), file.path(output_dir, paste(output_prefix, ".scaledDeltaMAD.txt", sep="")), col.names=FALSE, row.names=FALSE)
#plot standardized and denoised copy ratio on top of each other
pre_color_blue = "#3B5DFF"
post_color_green = "#4FC601"
#plot over full range
denoising_plot_file = file.path(output_dir, paste(output_prefix, ".denoised.png", sep=""))
png(denoising_plot_file, 12, 7, units="in", type="cairo", res=300, bg="white")
par(mfrow=c(2, 1), cex=0.75, las=1)
SetUpPlot(sample_name, "standardized copy ratio", 0, max(standardized_copy_ratios_df[["COPY_RATIO"]]), paste("median absolute deviation = ", round(standardizedMAD, 3), sep=""), contig_names, contig_starts, contig_ends, FALSE)
PlotCopyRatios(standardized_copy_ratios_df, pre_color_blue, contig_names, contig_starts)
SetUpPlot(sample_name, "denoised copy ratio", 0, max(denoised_copy_ratios_df[["COPY_RATIO"]]), paste("median absolute deviation = ", round(denoisedMAD, 3), sep=""), contig_names, contig_starts, contig_ends, TRUE)
PlotCopyRatios(denoised_copy_ratios_df, post_color_green, contig_names, contig_starts)
dev.off()
#plot up to CR = 4
denoising_limit_plot_file = file.path(output_dir, paste(output_prefix, ".denoisedLimit4.png", sep=""))
png(denoising_limit_plot_file, 12, 7, units="in", type="cairo", res=300, bg="white")
par(mfrow=c(2, 1), cex=0.75, las=1)
SetUpPlot(sample_name, "standardized copy ratio", 0, 4, paste("median absolute deviation = ", round(standardizedMAD, 3), sep=""), contig_names, contig_starts, contig_ends, FALSE)
PlotCopyRatios(standardized_copy_ratios_df, pre_color_blue, contig_names, contig_starts)
SetUpPlot(sample_name, "denoised copy ratio", 0, 4, paste("median absolute deviation = ", round(denoisedMAD, 3), sep=""), contig_names, contig_starts, contig_ends, TRUE)
PlotCopyRatios(denoised_copy_ratios_df, post_color_green, contig_names, contig_starts)
dev.off()
#check for created files and quit with error code if not found
if (!all(file.exists(c(denoising_plot_file, denoising_limit_plot_file)))) {
quit(save="no", status=1, runLast=FALSE)
}
}
WriteDenoisingPlots(sample_name, standardized_copy_ratios_file, denoised_copy_ratios_file, contig_names, output_dir, output_prefix)
|
# library(devtools)
# install_github("dgrtwo/broom")
# install_github("cran/bstats")
# install_github("bdemeshev/rusquant")
# install_github("bdemeshev/sophisthse")
library(lubridate)
library(sandwich)
library(lmtest)
library(car)
library(bstats)
library(zoo)
library(xts)
library(dplyr)
library(broom)
library(ggplot2)
library(Ecdat)
library(quantmod) #загрузка с finance google
library(rusquant) #загрузка с finam
library(sophisthse)
library(Quandl)
# work with dates
x <- c("2012-04-15", "2011-08-17")
y <- ymd(x)
y + days(20)
y - years(10)
day(y)
month(y)
year(y)
# create time series
x <- rnorm(5)
y <- ymd("2014-01-01") + days(0:4)
ts <- zoo(x, order.by = y)
ts
stats::lag(ts, -1) #showing value for yesterday,compare it with ts and will understand
stats::lag(ts, 1) #showing value for tomorrow,compare it with ts and will understand
diff(ts) #showing difference between dates, beginning since tomorrow
z <- (stats::lag(ts, -1)+stats::lag(ts, -2)+stats::lag(ts, -3))/3 #showing mean value for past 3 days
z
ts2 <- zooreg(x, start=as.yearqtr("2014-01"), freq=4)
ts2
ts3 <- zooreg(x, start=as.yearmon("2014-01"), freq=12)
ts3
#
data("Investment")
start(Investment)
end(Investment)
time(Investment)
coredata(Investment) # data without time
# делаем пропущенные значения. Их можно заполнить или с помощью линейной аппроксимации -- среднее между двумя значениями выше и ниже, или взять просто предыдущее значение
dna <- Investment
dna[1, 2] <- NA
dna[5, 3] <- NA
na.approx(dna)
na.locf(dna)
#download from outside sources
a <- sophisthse("POPNUM_Y")
b <- Quandl("FRED/GNP")
b
# finance google
Sys.setlocale("LC_TIME", "C") #set english locale, because russian and english date formats are different
getSymbols(Symbols = "AAPL", from="2010-01-01",
to="2014-03-04", src="yahoo")
head(AAPL)
tail(AAPL)
# finam
getSymbols(Symbols = "GAZP", from="2010-01-01",
to="2014-03-04", src="Finam")
head(GAZP)
tail(GAZP)
plot(GAZP)
autoplot(GAZP[,1:4])
autoplot(GAZP[,1:4], facets = NULL)
chartSeries(GAZP)
#robust intervals
d <- as.zoo(Investment)
autoplot(d[,1:2], facets = NULL)
model <- lm(data=d, RealInv~RealInt+RealGNP)
summary(model)
coeftest(model)
confint(model)
d_aug <- augment(model, as.data.frame(d))
qplot(data=d_aug, lag(.resid), .resid)
vcov(model)
vcovHAC(model)
coeftest(model, vcov. = vcovHAC(model))
conftable <- coeftest(model, vcov. = vcovHAC(model))
ci <- data.frame(estimate=conftable[,1], se_ac=conftable[,2])
ci <- mutate(ci, left_95=estimate-1.96*se_ac, right_95=estimate+1.96*se_ac)
ci
#durbin-watson
dwt(model)
res <- dwt(model)
res$p
#BG-test
bgtest(model, order = 6)
# если данных одного теста не хватило, чтобы отвергнуть гипотезу, а другому тесту хватило, то гипотеза отвергается
# дз
d <- Griliches
d
model <- lm(lw80 ~ age80 + iq + school80 + expr80, d)
vcov(model)
vcov(model) - vcovHC(model,type = "HC3")
vcovHC(model,type = "HC4m")[2,2]
vcovHC(model,type = "HC3")[2,2]
vcovHC(model,type = "HC5")[2,2]
vcovHC(model,type = "HC1")[2,2]
d <- augment(model, d)
bptest(model, varformula = ~ iq, data = d)
gqtest(model, order.by = ~expr80, data = d, fraction = 0.2)
d <- Solow
model <- lm(q ~ k + A, d)
summary(model)
vcov(model) - vcovHAC(model)
model2 <- lm(q ~ k, d)
dw <- dwt(model2)
dw$dw
bgtest(model, order = 3)
Sys.setlocale("LC_TIME","C")
getSymbols(Symbols = "AAPL",from="2010-01-01", to="2014-02-03",src="yahoo")
plot(AAPL$AAPL.Close, main = "")
getSymbols(Symbols = "GOOG",from="2010-01-01", to="2014-02-03",src="yahoo")
plot(GOOG$GOOG.Close, main = "")
getSymbols(Symbols = "INTC",from="2010-01-01", to="2014-02-03",src="yahoo")
plot(INTC$INTC.Close, main = "")
getSymbols(Symbols = "MSFT",from="2010-01-01", to="2014-02-03",src="yahoo")
plot(MSFT$MSFT.Close, main = "")
p <- MSFT$MSFT.Close
summary(lm(p ~ stats::lag(p, -1) + stats::lag(p, -2)))
|
/6. Econometrics/6.7. Week 6.R
|
no_license
|
sergeymong/R
|
R
| false
| false
| 4,109
|
r
|
# library(devtools)
# install_github("dgrtwo/broom")
# install_github("cran/bstats")
# install_github("bdemeshev/rusquant")
# install_github("bdemeshev/sophisthse")
library(lubridate)
library(sandwich)
library(lmtest)
library(car)
library(bstats)
library(zoo)
library(xts)
library(dplyr)
library(broom)
library(ggplot2)
library(Ecdat)
library(quantmod) #загрузка с finance google
library(rusquant) #загрузка с finam
library(sophisthse)
library(Quandl)
# work with dates
x <- c("2012-04-15", "2011-08-17")
y <- ymd(x)
y + days(20)
y - years(10)
day(y)
month(y)
year(y)
# create time series
x <- rnorm(5)
y <- ymd("2014-01-01") + days(0:4)
ts <- zoo(x, order.by = y)
ts
stats::lag(ts, -1) #showing value for yesterday,compare it with ts and will understand
stats::lag(ts, 1) #showing value for tomorrow,compare it with ts and will understand
diff(ts) #showing difference between dates, beginning since tomorrow
z <- (stats::lag(ts, -1)+stats::lag(ts, -2)+stats::lag(ts, -3))/3 #showing mean value for past 3 days
z
ts2 <- zooreg(x, start=as.yearqtr("2014-01"), freq=4)
ts2
ts3 <- zooreg(x, start=as.yearmon("2014-01"), freq=12)
ts3
#
data("Investment")
start(Investment)
end(Investment)
time(Investment)
coredata(Investment) # data without time
# делаем пропущенные значения. Их можно заполнить или с помощью линейной аппроксимации -- среднее между двумя значениями выше и ниже, или взять просто предыдущее значение
dna <- Investment
dna[1, 2] <- NA
dna[5, 3] <- NA
na.approx(dna)
na.locf(dna)
#download from outside sources
a <- sophisthse("POPNUM_Y")
b <- Quandl("FRED/GNP")
b
# finance google
Sys.setlocale("LC_TIME", "C") #set english locale, because russian and english date formats are different
getSymbols(Symbols = "AAPL", from="2010-01-01",
to="2014-03-04", src="yahoo")
head(AAPL)
tail(AAPL)
# finam
getSymbols(Symbols = "GAZP", from="2010-01-01",
to="2014-03-04", src="Finam")
head(GAZP)
tail(GAZP)
plot(GAZP)
autoplot(GAZP[,1:4])
autoplot(GAZP[,1:4], facets = NULL)
chartSeries(GAZP)
#robust intervals
d <- as.zoo(Investment)
autoplot(d[,1:2], facets = NULL)
model <- lm(data=d, RealInv~RealInt+RealGNP)
summary(model)
coeftest(model)
confint(model)
d_aug <- augment(model, as.data.frame(d))
qplot(data=d_aug, lag(.resid), .resid)
vcov(model)
vcovHAC(model)
coeftest(model, vcov. = vcovHAC(model))
conftable <- coeftest(model, vcov. = vcovHAC(model))
ci <- data.frame(estimate=conftable[,1], se_ac=conftable[,2])
ci <- mutate(ci, left_95=estimate-1.96*se_ac, right_95=estimate+1.96*se_ac)
ci
#durbin-watson
dwt(model)
res <- dwt(model)
res$p
#BG-test
bgtest(model, order = 6)
# если данных одного теста не хватило, чтобы отвергнуть гипотезу, а другому тесту хватило, то гипотеза отвергается
# дз
d <- Griliches
d
model <- lm(lw80 ~ age80 + iq + school80 + expr80, d)
vcov(model)
vcov(model) - vcovHC(model,type = "HC3")
vcovHC(model,type = "HC4m")[2,2]
vcovHC(model,type = "HC3")[2,2]
vcovHC(model,type = "HC5")[2,2]
vcovHC(model,type = "HC1")[2,2]
d <- augment(model, d)
bptest(model, varformula = ~ iq, data = d)
gqtest(model, order.by = ~expr80, data = d, fraction = 0.2)
d <- Solow
model <- lm(q ~ k + A, d)
summary(model)
vcov(model) - vcovHAC(model)
model2 <- lm(q ~ k, d)
dw <- dwt(model2)
dw$dw
bgtest(model, order = 3)
Sys.setlocale("LC_TIME","C")
getSymbols(Symbols = "AAPL",from="2010-01-01", to="2014-02-03",src="yahoo")
plot(AAPL$AAPL.Close, main = "")
getSymbols(Symbols = "GOOG",from="2010-01-01", to="2014-02-03",src="yahoo")
plot(GOOG$GOOG.Close, main = "")
getSymbols(Symbols = "INTC",from="2010-01-01", to="2014-02-03",src="yahoo")
plot(INTC$INTC.Close, main = "")
getSymbols(Symbols = "MSFT",from="2010-01-01", to="2014-02-03",src="yahoo")
plot(MSFT$MSFT.Close, main = "")
p <- MSFT$MSFT.Close
summary(lm(p ~ stats::lag(p, -1) + stats::lag(p, -2)))
|
library('randomForest')
source(paste(Sys.getenv('HOME'), '/.rconfig.R', sep=''))
load(paste(githome, '/electricity_logging/statistics/hourly_correction_model.RData', sep=''))
# Get args
args <- commandArgs(trailingOnly=TRUE)
sum_date <- args[1]
hour <- args[2]
kwh_ch1 <- args[3]
kwh_ch2 <- args[4]
pdata <- rbind(sum_date, hour, kwh_ch1, kwh_ch2)
pred <- predict(rf.model, pdata)
print(pred)
|
/statistics/hourly_correction_predict.R
|
no_license
|
guitar1999/electricity_logging
|
R
| false
| false
| 397
|
r
|
library('randomForest')
source(paste(Sys.getenv('HOME'), '/.rconfig.R', sep=''))
load(paste(githome, '/electricity_logging/statistics/hourly_correction_model.RData', sep=''))
# Get args
args <- commandArgs(trailingOnly=TRUE)
sum_date <- args[1]
hour <- args[2]
kwh_ch1 <- args[3]
kwh_ch2 <- args[4]
pdata <- rbind(sum_date, hour, kwh_ch1, kwh_ch2)
pred <- predict(rf.model, pdata)
print(pred)
|
hpcdata <- read.delim2("household_power_consumption.txt", header = TRUE, sep = ";", na.strings="?")
subhpc <- subset(hpcdata, Date=="1/2/2007" | Date=="2/2/2007")
subhpc$datetime<-strptime(paste(subhpc$Date,subhpc$Time), "%d/%m/%Y %H:%M:%S")
subhpc$Sub_metering_1<-as.numeric(as.character(subhpc$Sub_metering_1))
subhpc$Sub_metering_2<-as.numeric(as.character(subhpc$Sub_metering_2))
subhpc$Sub_metering_3<-as.numeric(as.character(subhpc$Sub_metering_3))
png(file="plot3.png", width=480, height=480)
plot(subhpc$datetime,subhpc$Sub_metering_1,type="n", xlab="", ylab="Energy sub metering")
lines(subhpc$datetime,subhpc$Sub_metering_1)
lines(subhpc$datetime,subhpc$Sub_metering_2,col="red")
lines(subhpc$datetime,subhpc$Sub_metering_3,col="blue")
legend(x="topright",lwd=c(1,1,1), col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
|
/plot3.R
|
no_license
|
shekars/ExData_Plotting1
|
R
| false
| false
| 881
|
r
|
hpcdata <- read.delim2("household_power_consumption.txt", header = TRUE, sep = ";", na.strings="?")
subhpc <- subset(hpcdata, Date=="1/2/2007" | Date=="2/2/2007")
subhpc$datetime<-strptime(paste(subhpc$Date,subhpc$Time), "%d/%m/%Y %H:%M:%S")
subhpc$Sub_metering_1<-as.numeric(as.character(subhpc$Sub_metering_1))
subhpc$Sub_metering_2<-as.numeric(as.character(subhpc$Sub_metering_2))
subhpc$Sub_metering_3<-as.numeric(as.character(subhpc$Sub_metering_3))
png(file="plot3.png", width=480, height=480)
plot(subhpc$datetime,subhpc$Sub_metering_1,type="n", xlab="", ylab="Energy sub metering")
lines(subhpc$datetime,subhpc$Sub_metering_1)
lines(subhpc$datetime,subhpc$Sub_metering_2,col="red")
lines(subhpc$datetime,subhpc$Sub_metering_3,col="blue")
legend(x="topright",lwd=c(1,1,1), col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
|
\name{rLSloadSemiContData}
\alias{rLSloadSemiContData}
\title{Load semi-continuous data into a model structure.}
\description{
R interface function for LINDO API function \code{LSloadSemiContData}. For more information,
please refer to LINDO API User Manual.
}
\usage{
rLSloadSemiContData(model,nSCVars,paiVars,padL,padU)
}
\arguments{
\item{model}{A LINDO API model object, returned by \code{\link{rLScreateModel}}.}
\item{nSCVars}{The number of semi-continuous variables.}
\item{paiVars}{A integer array containing the indices of semicontinuous variables.}
\item{padL}{An double array containing the lower bound associated with each semi-continuous variable.}
\item{padU}{An double array containing the upper bound associated with each semi-continuous variable.}
}
\value{
An R list object with components:
\item{ErrorCode}{Zero if successful, nonzero otherwise.}
}
\references{
LINDO SYSTEMS home page at www.lindo.com
}
|
/man/rLSloadSemiContData.Rd
|
no_license
|
cran/rLindo
|
R
| false
| false
| 974
|
rd
|
\name{rLSloadSemiContData}
\alias{rLSloadSemiContData}
\title{Load semi-continuous data into a model structure.}
\description{
R interface function for LINDO API function \code{LSloadSemiContData}. For more information,
please refer to LINDO API User Manual.
}
\usage{
rLSloadSemiContData(model,nSCVars,paiVars,padL,padU)
}
\arguments{
\item{model}{A LINDO API model object, returned by \code{\link{rLScreateModel}}.}
\item{nSCVars}{The number of semi-continuous variables.}
\item{paiVars}{A integer array containing the indices of semicontinuous variables.}
\item{padL}{An double array containing the lower bound associated with each semi-continuous variable.}
\item{padU}{An double array containing the upper bound associated with each semi-continuous variable.}
}
\value{
An R list object with components:
\item{ErrorCode}{Zero if successful, nonzero otherwise.}
}
\references{
LINDO SYSTEMS home page at www.lindo.com
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{uy_deptos_grid}
\alias{uy_deptos_grid}
\title{Uruguay Departments grid}
\format{
A data frame with 19 rows and 4 variables:
\describe{
\item{name}{name of the "Departamento"}
\item{code}{INE code of the "Departamento"}
\item{row}{row position in the grid}
\item{col}{col position in the grid}
}
}
\usage{
uy_deptos_grid
}
\description{
A dataset containing the cods, names and others attributes as a geofacet grid
}
\seealso{
Other data:
\code{\link{loc_agr_ine}},
\code{\link{metadata_tables}},
\code{\link{metadata_wms}},
\code{\link{metadata}},
\code{\link{mvd_barrios_grid}}
}
\concept{data}
\keyword{datasets}
|
/man/uy_deptos_grid.Rd
|
no_license
|
cran/geouy
|
R
| false
| true
| 761
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{uy_deptos_grid}
\alias{uy_deptos_grid}
\title{Uruguay Departments grid}
\format{
A data frame with 19 rows and 4 variables:
\describe{
\item{name}{name of the "Departamento"}
\item{code}{INE code of the "Departamento"}
\item{row}{row position in the grid}
\item{col}{col position in the grid}
}
}
\usage{
uy_deptos_grid
}
\description{
A dataset containing the cods, names and others attributes as a geofacet grid
}
\seealso{
Other data:
\code{\link{loc_agr_ine}},
\code{\link{metadata_tables}},
\code{\link{metadata_wms}},
\code{\link{metadata}},
\code{\link{mvd_barrios_grid}}
}
\concept{data}
\keyword{datasets}
|
test_that("Paper Example G Stats", {
res3 <- small_samptest(c(3,0,0,0,0,0,0))
res2 <- small_samptest(c(2,1,0,0,0,0,0))
res1 <- small_samptest(c(1,1,1,0,0,0,0))
test_stats <- c(11.68,7.86,5.08)
res_stats <- c(res3$test_stat,res2$test_stat,res1$test_stat)
expect_equal(test_stats,round(res_stats,2))
})
# https://andrewpwheeler.com/2017/03/31/using-the-exact-reference-distribution-for-small-sample-benford-tests/
test_that("Benford Example Permutation N", {
UpProb <- c(0.301029995663981, 0.176091259055681, 0.0579919469776867,
0.0511525224473813,0.0457574905606751, 0.367976785294594)
ZeroAdd <- c(1,1,3,6,2,0)
resSmall <- small_samptest(d=ZeroAdd,p=UpProb,type="KS")
n <- sum(ZeroAdd)
m <- length(ZeroAdd)
choose_stat <- choose(m+n-1,m-1)
# Test type is correct
expect_equal(resSmall$test,'KS')
expect_equal(dim(resSmall$CDF)[1],choose_stat)
})
test_that("Power Example", {
r1 <- small_samptest(c(3,0,0,0,0,0,0))
p1 <- powalt(r1,c(1/2,1/2,0,0,0,0,0))
r2 <- small_samptest(c(11,0,0,0,0,0,0))
p2 <- powalt(r2,c(1/4,1/4,1/4,1/4,0,0,0))
check_pow <- c(0.25,0.58)
te_pow <- round(c(p1$pow,p2$pow),2)
expect_equal(te_pow, check_pow)
})
test_that("Reuse CDF", {
cv <- c(3,0,0,0,0,0,0)
r1 <- small_samptest(cv, type="V")
r2 <- small_samptest(cv, type="V", cdf=r1$CDF)
expect_equal(r1, r2)
})
test_that("Chi/G Equal", {
cv <- c(3,0,0,0,0,0,0)
r1 <- small_samptest(cv, type="Chi")
r2 <- small_samptest(cv, type="G")
expect_equal(r1$AggregateStatistics[,2:3], r2$AggregateStatistics[,2:3])
})
|
/tests/testthat/test-exact.R
|
permissive
|
tkante/ptools
|
R
| false
| false
| 1,563
|
r
|
test_that("Paper Example G Stats", {
res3 <- small_samptest(c(3,0,0,0,0,0,0))
res2 <- small_samptest(c(2,1,0,0,0,0,0))
res1 <- small_samptest(c(1,1,1,0,0,0,0))
test_stats <- c(11.68,7.86,5.08)
res_stats <- c(res3$test_stat,res2$test_stat,res1$test_stat)
expect_equal(test_stats,round(res_stats,2))
})
# https://andrewpwheeler.com/2017/03/31/using-the-exact-reference-distribution-for-small-sample-benford-tests/
test_that("Benford Example Permutation N", {
UpProb <- c(0.301029995663981, 0.176091259055681, 0.0579919469776867,
0.0511525224473813,0.0457574905606751, 0.367976785294594)
ZeroAdd <- c(1,1,3,6,2,0)
resSmall <- small_samptest(d=ZeroAdd,p=UpProb,type="KS")
n <- sum(ZeroAdd)
m <- length(ZeroAdd)
choose_stat <- choose(m+n-1,m-1)
# Test type is correct
expect_equal(resSmall$test,'KS')
expect_equal(dim(resSmall$CDF)[1],choose_stat)
})
test_that("Power Example", {
r1 <- small_samptest(c(3,0,0,0,0,0,0))
p1 <- powalt(r1,c(1/2,1/2,0,0,0,0,0))
r2 <- small_samptest(c(11,0,0,0,0,0,0))
p2 <- powalt(r2,c(1/4,1/4,1/4,1/4,0,0,0))
check_pow <- c(0.25,0.58)
te_pow <- round(c(p1$pow,p2$pow),2)
expect_equal(te_pow, check_pow)
})
test_that("Reuse CDF", {
cv <- c(3,0,0,0,0,0,0)
r1 <- small_samptest(cv, type="V")
r2 <- small_samptest(cv, type="V", cdf=r1$CDF)
expect_equal(r1, r2)
})
test_that("Chi/G Equal", {
cv <- c(3,0,0,0,0,0,0)
r1 <- small_samptest(cv, type="Chi")
r2 <- small_samptest(cv, type="G")
expect_equal(r1$AggregateStatistics[,2:3], r2$AggregateStatistics[,2:3])
})
|
library(quantmod)
amzn = getSymbols("AMZN",auto.assign=FALSE)
sampleTimes = index(amzn)
length(grep("^2012",sampleTimes))
table(weekdays(as.Date(sampleTimes[grep("^2012",sampleTimes)],'%Y-%m-%d')))
|
/HW4.R
|
no_license
|
ckatsulis/Getting-and-Cleaning-Data
|
R
| false
| false
| 200
|
r
|
library(quantmod)
amzn = getSymbols("AMZN",auto.assign=FALSE)
sampleTimes = index(amzn)
length(grep("^2012",sampleTimes))
table(weekdays(as.Date(sampleTimes[grep("^2012",sampleTimes)],'%Y-%m-%d')))
|
setwd("/home/brito/salt/JSRepairClass/input/applications/")
data <- read.csv("repositories_metrics.csv")
hist(data$TotalCommits, main = paste("Histogram for ", nrow(data), "node.js applications"), xlab = "Total number of commits")
setwd("/home/brito/salt/JSRepairClass/input/modules/")
data <- read.csv("repositories_metrics.csv")
hist(data$TotalCommits, main = paste("Histogram of", nrow(data), "top rated node.js modules (from npmjs.com)"), xlab = "Total number of commits")
|
/core/tools/histogram_from_repository_metrics.r
|
permissive
|
nashid/CommitMiner
|
R
| false
| false
| 478
|
r
|
setwd("/home/brito/salt/JSRepairClass/input/applications/")
data <- read.csv("repositories_metrics.csv")
hist(data$TotalCommits, main = paste("Histogram for ", nrow(data), "node.js applications"), xlab = "Total number of commits")
setwd("/home/brito/salt/JSRepairClass/input/modules/")
data <- read.csv("repositories_metrics.csv")
hist(data$TotalCommits, main = paste("Histogram of", nrow(data), "top rated node.js modules (from npmjs.com)"), xlab = "Total number of commits")
|
library(tidyverse)
library(MuMIn)
library(caret)
library(broom)
data("mtcars")
index <- sample(1:nrow(mtcars), size = round(nrow(mtcars)/2))
Train <- mtcars[index, ]
Test <- mtcars[-index, ]
Modelo <- lm(mpg ~ hp + I(hp^2), data = Train)
Modelo2 <- lm(mpg ~ hp + I(hp^2) + I(hp^3) + I(hp^4), data = Train)
broom::tidy(Modelo)
broom::glance(Modelo)
Test$Pred <- predict(Modelo, Test)
Test <- Test %>%
mutate(resid = mpg - Pred) %>%
dplyr::select(hp, mpg, Pred, resid)
## Para ver el poder predictivo
postResample(pred = Test$Pred, obs = Test$mpg)
# Para calcular AICc
AICc(Modelo)
AICc(Modelo2)
## Poder predictivo, vs Poder explicativo
DF <- data.frame(Predictivo = rep(NA, 100), Explicativo = rep(NA, 100))
for(i in 1:100){
index <- sample(1:nrow(mtcars), size = round(nrow(mtcars)/2))
Train <- mtcars[index, ]
Test <- mtcars[-index, ]
Modelo2 <- lm(mpg ~ hp + I(hp^2) + I(hp^3) + I(hp^4), data = Train)
Train$Pred <- predict(Modelo2, Train)
Test$Pred <- predict(Modelo2, Test)
## Para ver el poder predictivo
DF$Predictivo[i] <- postResample(pred = Test$Pred, obs = Test$mpg)[2]
DF$Explicativo[i] <- postResample(pred = Train$Pred, obs = Train$mpg)[2]
}
DF <- DF %>% pivot_longer(cols = everything(), names_to = "Tipo", values_to = "R2")
ggplot(DF, aes(x = Tipo, y = R2)) + geom_boxplot()
|
/Scripts_Clases/Clase1.R
|
no_license
|
derek-corcoran-barrios/Curso_multivariado_2020_2
|
R
| false
| false
| 1,369
|
r
|
library(tidyverse)
library(MuMIn)
library(caret)
library(broom)
data("mtcars")
index <- sample(1:nrow(mtcars), size = round(nrow(mtcars)/2))
Train <- mtcars[index, ]
Test <- mtcars[-index, ]
Modelo <- lm(mpg ~ hp + I(hp^2), data = Train)
Modelo2 <- lm(mpg ~ hp + I(hp^2) + I(hp^3) + I(hp^4), data = Train)
broom::tidy(Modelo)
broom::glance(Modelo)
Test$Pred <- predict(Modelo, Test)
Test <- Test %>%
mutate(resid = mpg - Pred) %>%
dplyr::select(hp, mpg, Pred, resid)
## Para ver el poder predictivo
postResample(pred = Test$Pred, obs = Test$mpg)
# Para calcular AICc
AICc(Modelo)
AICc(Modelo2)
## Poder predictivo, vs Poder explicativo
DF <- data.frame(Predictivo = rep(NA, 100), Explicativo = rep(NA, 100))
for(i in 1:100){
index <- sample(1:nrow(mtcars), size = round(nrow(mtcars)/2))
Train <- mtcars[index, ]
Test <- mtcars[-index, ]
Modelo2 <- lm(mpg ~ hp + I(hp^2) + I(hp^3) + I(hp^4), data = Train)
Train$Pred <- predict(Modelo2, Train)
Test$Pred <- predict(Modelo2, Test)
## Para ver el poder predictivo
DF$Predictivo[i] <- postResample(pred = Test$Pred, obs = Test$mpg)[2]
DF$Explicativo[i] <- postResample(pred = Train$Pred, obs = Train$mpg)[2]
}
DF <- DF %>% pivot_longer(cols = everything(), names_to = "Tipo", values_to = "R2")
ggplot(DF, aes(x = Tipo, y = R2)) + geom_boxplot()
|
trend <- function(year,month,start.year=1958){
#Function to calculate 'trend' vector for time-series:
nyear=year-start.year
trend=month+nyear*12
trend
}
|
/R/old/trend.r
|
no_license
|
bearedo/effdis
|
R
| false
| false
| 161
|
r
|
trend <- function(year,month,start.year=1958){
#Function to calculate 'trend' vector for time-series:
nyear=year-start.year
trend=month+nyear*12
trend
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_item_index_max_information.R
\name{get_item_index_max_information}
\alias{get_item_index_max_information}
\title{Item index maximum information}
\usage{
get_item_index_max_information(available, item_information, estimate, stop_test,
alpha, number_answers)
}
\arguments{
\item{available}{Vector with indices of available items.}
\item{item_information}{Vector with summarized information of each yet available item, with zeros for administered items
(as returned by \code{\link{get_summarized_information}} with \code{pad = TRUE}).}
\item{estimate}{Vector containing the theta estimate, with its covariance matrix as an attribute.}
\item{stop_test}{List indicating rules for when to terminate the test. Should be a list of the form
\code{list(target = ..., max_n = ..., min_n = ..., cutoffs = ...)},
where \code{target} is a vector indicating the maximum acceptable variance per dimension; \code{NULL} means no variance target,
\code{max_n} is the test length at which the test should be terminated (even if the target has not been reached yet),
\code{min_n} is the minimum test length; \code{NULL} means no mimimum test length, and
\code{cutoffs} is a matrix containing cut off values per dimension (columns) and test iteration (rows). First row contains cut off values for when no items have been
administered yet, second row for when one item has been administered, etc. If estimate + 3SE < cutoff for each dimension at a certain iteration, test terminates;
\code{NULL} means no cut off values.}
\item{alpha}{Matrix of alpha parameters, one column per dimension, one row per item. Row names should contain the item keys.
Note that so called within-dimensional models still use an alpha matrix, they simply have only one non-zero loading per item.}
\item{number_answers}{The number of answers given thus far (length of \code{administered}).}
}
\value{
Index of item with maximum information.
}
\description{
Get the index of the item with maximum information from the available items.
For multi dimensional models, items that only load on dimensions for
which the variance target has already been reached, will not be returned.
}
|
/man/get_item_index_max_information.Rd
|
no_license
|
Karel-Kroeze/ShadowCAT
|
R
| false
| true
| 2,228
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_item_index_max_information.R
\name{get_item_index_max_information}
\alias{get_item_index_max_information}
\title{Item index maximum information}
\usage{
get_item_index_max_information(available, item_information, estimate, stop_test,
alpha, number_answers)
}
\arguments{
\item{available}{Vector with indices of available items.}
\item{item_information}{Vector with summarized information of each yet available item, with zeros for administered items
(as returned by \code{\link{get_summarized_information}} with \code{pad = TRUE}).}
\item{estimate}{Vector containing the theta estimate, with its covariance matrix as an attribute.}
\item{stop_test}{List indicating rules for when to terminate the test. Should be a list of the form
\code{list(target = ..., max_n = ..., min_n = ..., cutoffs = ...)},
where \code{target} is a vector indicating the maximum acceptable variance per dimension; \code{NULL} means no variance target,
\code{max_n} is the test length at which the test should be terminated (even if the target has not been reached yet),
\code{min_n} is the minimum test length; \code{NULL} means no mimimum test length, and
\code{cutoffs} is a matrix containing cut off values per dimension (columns) and test iteration (rows). First row contains cut off values for when no items have been
administered yet, second row for when one item has been administered, etc. If estimate + 3SE < cutoff for each dimension at a certain iteration, test terminates;
\code{NULL} means no cut off values.}
\item{alpha}{Matrix of alpha parameters, one column per dimension, one row per item. Row names should contain the item keys.
Note that so called within-dimensional models still use an alpha matrix, they simply have only one non-zero loading per item.}
\item{number_answers}{The number of answers given thus far (length of \code{administered}).}
}
\value{
Index of item with maximum information.
}
\description{
Get the index of the item with maximum information from the available items.
For multi dimensional models, items that only load on dimensions for
which the variance target has already been reached, will not be returned.
}
|
\name{magnets-package}
\Rdversion{1.1}
\alias{magnets-package}
\alias{magnets}
\docType{package}
\title{
Numerically Simulate n Micro-spins under the Influence of Temperature &
Other External Conditions
}
\description{
In recent years, micro-magnets systems have arisen much theoretical and experimental
interest as a possible candidate of the next-generation circuits to
replace the current electronic
integrated circuits. The micro-magnets on the sizes of micro-meters can be
arranged to form logic gates and even wires to transfer
information. Although the speed of micro-magnet components is
slower than the traditional electronic circuit, the energy that is
needed to change the state of a micro-magnet is much smaller. This
makes it suitable for many special cases where low energy consumption
is the priority. An ideal integrated circuit would be a mix of
the micro-magnet circuit and the traditional electronic
circuit.
Therefore it is important to be able to do numerical simulations about
any configurations of micro-magnets. However there is no package in R
that can do this job yet. My package will fulfill this gap.
As the first step, my package now is able to simulate the
time-dependent dynamics of n magnet spins with any possible
configurations. My package has the ability to display the dynamics as
an animation with the help of animation package, which proves to be very
helpful in my research. Some system related
properties such as the free energy and the average magnitude of the spins can be
measured.
So far this package can be used as a first step simulation to study
the dynamics of micro-magnet wire (a linear array of micro-magnets) and logic gates. The users are
required to input the configuration of the system, i.e. how the spins
are positioned in the 3D space. Different configurations can have
different dynamics, and thus may be used as different functional
components in an integrated circuit.
In the future versions, More functions will be added. And the package will be able to simulate the detailed dynamics
of one single magnet spin.
This package should be useful to physicists and electrical engineers,
who are interested in design the next generation integrated circuits.
Numerically simulate \eqn{n} micro-spins under certain temperature,
external magnetic field, external fixed magnets. The
Landau-Lifshitz-Gilbert Equation is used to simulate the system.
}
\details{
\tabular{ll}{
Package: \tab magnets\cr
Type: \tab Package\cr
Version: \tab 0.1\cr
Date: \tab 2009-10-15\cr
License: \tab What license is it under?\cr
LazyLoad: \tab yes\cr
}
Use \code{\link{multispin.evol}} to simulate, use
\code{\link{free.energy}} to calculate the free energy of the system,
use \code{\link{component.animation}}
to view the dynamics directly as an animation.
}
\author{
Hai Qian, Electrical Engineering Department, UCLA
Maintainer: Hai Qian <haiqian@ee.ucla.edu>
}
\references{
check out the references in \code{\link{multispin.evol}}
}
\keyword{ package }
|
/man/magnets-package.Rd
|
no_license
|
cran/magnets
|
R
| false
| false
| 3,020
|
rd
|
\name{magnets-package}
\Rdversion{1.1}
\alias{magnets-package}
\alias{magnets}
\docType{package}
\title{
Numerically Simulate n Micro-spins under the Influence of Temperature &
Other External Conditions
}
\description{
In recent years, micro-magnets systems have arisen much theoretical and experimental
interest as a possible candidate of the next-generation circuits to
replace the current electronic
integrated circuits. The micro-magnets on the sizes of micro-meters can be
arranged to form logic gates and even wires to transfer
information. Although the speed of micro-magnet components is
slower than the traditional electronic circuit, the energy that is
needed to change the state of a micro-magnet is much smaller. This
makes it suitable for many special cases where low energy consumption
is the priority. An ideal integrated circuit would be a mix of
the micro-magnet circuit and the traditional electronic
circuit.
Therefore it is important to be able to do numerical simulations about
any configurations of micro-magnets. However there is no package in R
that can do this job yet. My package will fulfill this gap.
As the first step, my package now is able to simulate the
time-dependent dynamics of n magnet spins with any possible
configurations. My package has the ability to display the dynamics as
an animation with the help of animation package, which proves to be very
helpful in my research. Some system related
properties such as the free energy and the average magnitude of the spins can be
measured.
So far this package can be used as a first step simulation to study
the dynamics of micro-magnet wire (a linear array of micro-magnets) and logic gates. The users are
required to input the configuration of the system, i.e. how the spins
are positioned in the 3D space. Different configurations can have
different dynamics, and thus may be used as different functional
components in an integrated circuit.
In the future versions, More functions will be added. And the package will be able to simulate the detailed dynamics
of one single magnet spin.
This package should be useful to physicists and electrical engineers,
who are interested in design the next generation integrated circuits.
Numerically simulate \eqn{n} micro-spins under certain temperature,
external magnetic field, external fixed magnets. The
Landau-Lifshitz-Gilbert Equation is used to simulate the system.
}
\details{
\tabular{ll}{
Package: \tab magnets\cr
Type: \tab Package\cr
Version: \tab 0.1\cr
Date: \tab 2009-10-15\cr
License: \tab What license is it under?\cr
LazyLoad: \tab yes\cr
}
Use \code{\link{multispin.evol}} to simulate, use
\code{\link{free.energy}} to calculate the free energy of the system,
use \code{\link{component.animation}}
to view the dynamics directly as an animation.
}
\author{
Hai Qian, Electrical Engineering Department, UCLA
Maintainer: Hai Qian <haiqian@ee.ucla.edu>
}
\references{
check out the references in \code{\link{multispin.evol}}
}
\keyword{ package }
|
# --- Header -------------------------------------------------------------------
# (C) Joachim Gassen 2021, gassen@wiwi.hu-berlin.de
# License: MIT. See LICENSE file for details.
#
# English language shiny app communicating the results of the pricing experiment
# ------------------------------------------------------------------------------
library(DBI, quietly = TRUE)
library(shiny, quietly = TRUE)
library(DT, quietly = TRUE)
library(shinyjs, quietly = TRUE)
library(tidyverse)
library(kableExtra)
dbase_path <- "croom_exp_response.sqlite3"
end_experiment <- lubridate::as_datetime("2021-04-27 10:00:00", tz = "CEST")
DEBUG <- TRUE
if(Sys.time() < end_experiment & ! DEBUG) {
ui <- fluidPage(p("Nothing to see here yet. Sorry."))
} else {
ui <- fluidPage(
titlePanel("A Pricing Task: Experimental Findings"),
p("These are the findings of our classroom experiment."),
br(),
sidebarLayout(
sidebarPanel(
radioButtons("data_cutoff", "Do you want to limit the sample?",
c("All observations" = "none",
"Only observations with a price > $6" = "be6")),
br(),
sliderInput("exclude_below_time",
"Only observations with response time in seconds larger than...",
value = 0,
min = 0,
max = 60),
downloadButton("download", "Download the experimental data")
),
mainPanel(
h3("Descriptive Statistics"),
p("Below you will find your answering behavior, separated for both",
"experimental groups. Price in measured in $ and response",
"time in seconds."),
tableOutput("descriptive_table"),
h3("Group Box Plots"),
plotOutput("box_plots"),
br(),
h3("Test Statistics"),
tableOutput("tests"),
p(),
p("The Chi-square test is based on the following contingency table."),
tableOutput("cont_table"),
br(),
HTML("<p>Copyright Joachim Gassen, gassen@wiwi.hu-berlin.de, 2021",
"See <a href='https://github.com/joachim-gassen/shiny_croom_exp'>",
"GitHub repository</a> for license, code and details.")
)
)
)
}
server <- function(input, output, session) {
read_experiment_data <- function(fname) {
con <- dbConnect(RSQLite::SQLite(), fname)
res <- dbSendQuery(con, "SELECT * FROM answers")
df <- dbFetch(res)
dbClearResult(res)
dbDisconnect(con)
df$full_cost <- ifelse (df$full_cost == 1, "Full cost", "Variable cost")
df
}
raw_df <- read_experiment_data(dbase_path)
d <- reactive({
if (input$data_cutoff == "be6") df <- raw_df %>% filter(price > 6)
else df <- raw_df
df %>% filter(time >= input$exclude_below_time)
})
output$descriptive_table <- function() {
df <- d() %>%
group_by(full_cost) %>%
select(price, time, full_cost) %>%
gather(key = "var", value = "value", -full_cost) %>%
group_by(full_cost, var) %>%
summarise(N = n(),
Mean = mean(value),
'Standard deviation' = sd(value),
Minimum = min(value),
'First quartile' = quantile(value, 0.25),
Median = median(value),
'Third quartile' = quantile(value, 0.75),
Maximum = max(value),
.groups = "drop") %>%
rename('Full cost data' = full_cost)
df <- as_tibble(cbind(nms = names(df), t(df)))
print_df <- df[c(4:10),]
print_df[, 2:5] <- lapply(print_df[, 2:5], as.numeric)
names(print_df) <- c("", rep(c("price", "response time"), 2))
group_header <- c(1, 2, 2)
names(group_header) <- c(" ",
sprintf("Full cost data (N = %d)", as.numeric(df[3, 2])),
sprintf("Variable cost data (N = %d)", as.numeric(df[3, 4])))
knitr::kable(print_df, "html", digits = 2) %>%
kable_styling("striped", full_width = F) %>%
add_header_above(group_header)
}
output$box_plots <- renderPlot(
ggplot(data = d(), aes(x = full_cost, y = price)) +
geom_boxplot(outlier.color = NA, fill = "lightblue") +
geom_jitter(width = 0.2, height = 0, size = 3, stroke = 0, shape = 16, alpha = 0.5) +
theme_bw() +
labs(x = "Experimental group",
y = "Calculated price in $")
)
output$tests <- function() {
df <- d()
tt <- t.test(price ~ full_cost, data = df)
rt <- wilcox.test(price ~ full_cost, data = df)
df$high_price <- df$price > 12
df$high_price[df$price == 12] <- NA
ct <- chisq.test(df$high_price[!is.na(df$high_price)],
df$full_cost[!is.na(df$high_price)])
print_df <- rbind(c(tt$statistic, tt$p.value),
c(rt$statistic, rt$p.value),
c(ct$statistic, ct$p.value))
colnames(print_df) <- c("Statistic", "P-value (two sided)")
rownames(print_df) <- c("T-test for mean differences",
"Wilcoxon test for distribution differences",
"Chi-square test for group differences")
knitr::kable(print_df, "html", digits = c(2, 4)) %>%
kable_styling("striped", full_width = F)
}
output$cont_table <- function() {
df <- d()
df$high_price <- df$price > 12
df$high_price[df$price == 12] <- NA
ct <- table(df$high_price, df$full_cost)
rownames(ct) <- c("Price below $12", "Price above $12")
knitr::kable(ct, "html") %>%
kable_styling("striped", full_width = F)
}
output$download <- downloadHandler(
filename <- function() {
paste0('croom_exp_data-', Sys.Date(), '.csv')
},
content <- function(con) {
write.csv(raw_df, con)
}
)
}
# Run the application
shinyApp(ui = ui, server = server)
|
/app_results_en.R
|
permissive
|
joachim-gassen/shiny_croom_exp
|
R
| false
| false
| 5,904
|
r
|
# --- Header -------------------------------------------------------------------
# (C) Joachim Gassen 2021, gassen@wiwi.hu-berlin.de
# License: MIT. See LICENSE file for details.
#
# English language shiny app communicating the results of the pricing experiment
# ------------------------------------------------------------------------------
library(DBI, quietly = TRUE)
library(shiny, quietly = TRUE)
library(DT, quietly = TRUE)
library(shinyjs, quietly = TRUE)
library(tidyverse)
library(kableExtra)
dbase_path <- "croom_exp_response.sqlite3"
end_experiment <- lubridate::as_datetime("2021-04-27 10:00:00", tz = "CEST")
DEBUG <- TRUE
if(Sys.time() < end_experiment & ! DEBUG) {
ui <- fluidPage(p("Nothing to see here yet. Sorry."))
} else {
ui <- fluidPage(
titlePanel("A Pricing Task: Experimental Findings"),
p("These are the findings of our classroom experiment."),
br(),
sidebarLayout(
sidebarPanel(
radioButtons("data_cutoff", "Do you want to limit the sample?",
c("All observations" = "none",
"Only observations with a price > $6" = "be6")),
br(),
sliderInput("exclude_below_time",
"Only observations with response time in seconds larger than...",
value = 0,
min = 0,
max = 60),
downloadButton("download", "Download the experimental data")
),
mainPanel(
h3("Descriptive Statistics"),
p("Below you will find your answering behavior, separated for both",
"experimental groups. Price in measured in $ and response",
"time in seconds."),
tableOutput("descriptive_table"),
h3("Group Box Plots"),
plotOutput("box_plots"),
br(),
h3("Test Statistics"),
tableOutput("tests"),
p(),
p("The Chi-square test is based on the following contingency table."),
tableOutput("cont_table"),
br(),
HTML("<p>Copyright Joachim Gassen, gassen@wiwi.hu-berlin.de, 2021",
"See <a href='https://github.com/joachim-gassen/shiny_croom_exp'>",
"GitHub repository</a> for license, code and details.")
)
)
)
}
server <- function(input, output, session) {
read_experiment_data <- function(fname) {
con <- dbConnect(RSQLite::SQLite(), fname)
res <- dbSendQuery(con, "SELECT * FROM answers")
df <- dbFetch(res)
dbClearResult(res)
dbDisconnect(con)
df$full_cost <- ifelse (df$full_cost == 1, "Full cost", "Variable cost")
df
}
raw_df <- read_experiment_data(dbase_path)
d <- reactive({
if (input$data_cutoff == "be6") df <- raw_df %>% filter(price > 6)
else df <- raw_df
df %>% filter(time >= input$exclude_below_time)
})
output$descriptive_table <- function() {
df <- d() %>%
group_by(full_cost) %>%
select(price, time, full_cost) %>%
gather(key = "var", value = "value", -full_cost) %>%
group_by(full_cost, var) %>%
summarise(N = n(),
Mean = mean(value),
'Standard deviation' = sd(value),
Minimum = min(value),
'First quartile' = quantile(value, 0.25),
Median = median(value),
'Third quartile' = quantile(value, 0.75),
Maximum = max(value),
.groups = "drop") %>%
rename('Full cost data' = full_cost)
df <- as_tibble(cbind(nms = names(df), t(df)))
print_df <- df[c(4:10),]
print_df[, 2:5] <- lapply(print_df[, 2:5], as.numeric)
names(print_df) <- c("", rep(c("price", "response time"), 2))
group_header <- c(1, 2, 2)
names(group_header) <- c(" ",
sprintf("Full cost data (N = %d)", as.numeric(df[3, 2])),
sprintf("Variable cost data (N = %d)", as.numeric(df[3, 4])))
knitr::kable(print_df, "html", digits = 2) %>%
kable_styling("striped", full_width = F) %>%
add_header_above(group_header)
}
output$box_plots <- renderPlot(
ggplot(data = d(), aes(x = full_cost, y = price)) +
geom_boxplot(outlier.color = NA, fill = "lightblue") +
geom_jitter(width = 0.2, height = 0, size = 3, stroke = 0, shape = 16, alpha = 0.5) +
theme_bw() +
labs(x = "Experimental group",
y = "Calculated price in $")
)
output$tests <- function() {
df <- d()
tt <- t.test(price ~ full_cost, data = df)
rt <- wilcox.test(price ~ full_cost, data = df)
df$high_price <- df$price > 12
df$high_price[df$price == 12] <- NA
ct <- chisq.test(df$high_price[!is.na(df$high_price)],
df$full_cost[!is.na(df$high_price)])
print_df <- rbind(c(tt$statistic, tt$p.value),
c(rt$statistic, rt$p.value),
c(ct$statistic, ct$p.value))
colnames(print_df) <- c("Statistic", "P-value (two sided)")
rownames(print_df) <- c("T-test for mean differences",
"Wilcoxon test for distribution differences",
"Chi-square test for group differences")
knitr::kable(print_df, "html", digits = c(2, 4)) %>%
kable_styling("striped", full_width = F)
}
output$cont_table <- function() {
df <- d()
df$high_price <- df$price > 12
df$high_price[df$price == 12] <- NA
ct <- table(df$high_price, df$full_cost)
rownames(ct) <- c("Price below $12", "Price above $12")
knitr::kable(ct, "html") %>%
kable_styling("striped", full_width = F)
}
output$download <- downloadHandler(
filename <- function() {
paste0('croom_exp_data-', Sys.Date(), '.csv')
},
content <- function(con) {
write.csv(raw_df, con)
}
)
}
# Run the application
shinyApp(ui = ui, server = server)
|
/FactoMineR/R/descfreq.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 1,873
|
r
| ||
\name{tickTF-package}
\alias{tickTF-package}
\alias{tickTF}
\docType{package}
\title{
\packageTitle{tickTF}
}
\description{
\packageDescription{tickTF}
}
\details{
This package contains the data and functions used to describe the
infection process of the roe deer fawns by the ticks described by
Bariod et al. (in prep.). The dataset \code{fticks} contains the
original data, and the vignette of the package describes how the
functions of the package were used to fit the model. This package
relies strongly on the package nimble for the fit.
}
\author{
\packageAuthor{tickTF}
Maintainer: \packageMaintainer{tickTF}
}
\references{
Bariod L., Said S., Calenge C., Chabot S., Badeaud V. \& Bourgoin
G. in prep. Infection of Roe Deer Fawns by Ticks from 1992 to 2018 in the
Trois-Fontaines Forest (France).
}
\keyword{ package }
\seealso{
\code{\link{fticks}} for the dataset used; \code{vignette("tickTF")}
for a description of the calculations carried out in Bariod et al. (in
prep.).
}
\examples{
## dataset
head(fticks)
}
|
/man/tickTF-package.Rd
|
no_license
|
ClementCalenge/tickTF
|
R
| false
| false
| 1,056
|
rd
|
\name{tickTF-package}
\alias{tickTF-package}
\alias{tickTF}
\docType{package}
\title{
\packageTitle{tickTF}
}
\description{
\packageDescription{tickTF}
}
\details{
This package contains the data and functions used to describe the
infection process of the roe deer fawns by the ticks described by
Bariod et al. (in prep.). The dataset \code{fticks} contains the
original data, and the vignette of the package describes how the
functions of the package were used to fit the model. This package
relies strongly on the package nimble for the fit.
}
\author{
\packageAuthor{tickTF}
Maintainer: \packageMaintainer{tickTF}
}
\references{
Bariod L., Said S., Calenge C., Chabot S., Badeaud V. \& Bourgoin
G. in prep. Infection of Roe Deer Fawns by Ticks from 1992 to 2018 in the
Trois-Fontaines Forest (France).
}
\keyword{ package }
\seealso{
\code{\link{fticks}} for the dataset used; \code{vignette("tickTF")}
for a description of the calculations carried out in Bariod et al. (in
prep.).
}
\examples{
## dataset
head(fticks)
}
|
test_that("convertFYQToCQ", {
expect_equal(convertFYQToCQ(1995, 1), "1994Q4")
expect_equal(convertFYQToCQ(1995, 2), "1995Q1")
expect_equal(convertFYQToCQ(1995, 3), "1995Q2")
expect_equal(convertFYQToCQ(1995, 4), "1995Q3")
})
|
/tests/testthat/test-convertFYQToCQ.R
|
permissive
|
pepfar-datim/datimutils
|
R
| false
| false
| 235
|
r
|
test_that("convertFYQToCQ", {
expect_equal(convertFYQToCQ(1995, 1), "1994Q4")
expect_equal(convertFYQToCQ(1995, 2), "1995Q1")
expect_equal(convertFYQToCQ(1995, 3), "1995Q2")
expect_equal(convertFYQToCQ(1995, 4), "1995Q3")
})
|
##
## 28 Jan 2020
##
## Details: Test coupons, Inconel 718 Plate 02-Build 01
##
##
## Find center axis of test coupons using non-linear least squares
##
##------------------------------------------------------------------------------------------------------------------------
library(fields)
library(plyr)
setwd("C:/Users/barna/Documents/Coupons/datasets")
poreData <- readRDS("porosityData.rds")
position <- readRDS("buildPlatePosition.rds")
setwd("C:/Users/barna/Documents/Coupons/nlsAxis")
source("nlsFunctions.R") #lolol don't set wd in functions
for(n in 1:58){
print(position[n])
##--------------------------------------------------------------------
## crop coupon
##--------------------------------------------------------------------
poreCoordinates <- cropCoupon(n, poreData)
##--------------------------------------------------------------------
nlsObj <- nlsAxisFit(poreCoordinates)
nlsCoeff <- coef(nlsObj)
##--------------------------------------------------------------------
## store data
##--------------------------------------------------------------------
## store the old coupon coordinates, the "new" rotated coupon coords, and the nls coeff
## useful for generating surface plots and histograms for each coupon
nlsCoupon <- newCoupon(poreCoordinates, nlsCoeff["centroidX"], nlsCoeff["centroidY"],
nlsCoeff["axisVectorX"], nlsCoeff["axisVectorY"])
oldCoupon <- poreCoordinates
setwd("C:/Users/barna/Documents/Coupons/nlsAxis/porosity/nlsPorosityData/cropped")
save(oldCoupon, nlsCoupon, nlsCoeff, file = paste0("nlsCoupon", position[n], ".rda"))
} # end of for loop
|
/nlsAxis/porosity/nlsPorosityFit.R
|
no_license
|
dnychka/Coupons
|
R
| false
| false
| 1,636
|
r
|
##
## 28 Jan 2020
##
## Details: Test coupons, Inconel 718 Plate 02-Build 01
##
##
## Find center axis of test coupons using non-linear least squares
##
##------------------------------------------------------------------------------------------------------------------------
library(fields)
library(plyr)
setwd("C:/Users/barna/Documents/Coupons/datasets")
poreData <- readRDS("porosityData.rds")
position <- readRDS("buildPlatePosition.rds")
setwd("C:/Users/barna/Documents/Coupons/nlsAxis")
source("nlsFunctions.R") #lolol don't set wd in functions
for(n in 1:58){
print(position[n])
##--------------------------------------------------------------------
## crop coupon
##--------------------------------------------------------------------
poreCoordinates <- cropCoupon(n, poreData)
##--------------------------------------------------------------------
nlsObj <- nlsAxisFit(poreCoordinates)
nlsCoeff <- coef(nlsObj)
##--------------------------------------------------------------------
## store data
##--------------------------------------------------------------------
## store the old coupon coordinates, the "new" rotated coupon coords, and the nls coeff
## useful for generating surface plots and histograms for each coupon
nlsCoupon <- newCoupon(poreCoordinates, nlsCoeff["centroidX"], nlsCoeff["centroidY"],
nlsCoeff["axisVectorX"], nlsCoeff["axisVectorY"])
oldCoupon <- poreCoordinates
setwd("C:/Users/barna/Documents/Coupons/nlsAxis/porosity/nlsPorosityData/cropped")
save(oldCoupon, nlsCoupon, nlsCoeff, file = paste0("nlsCoupon", position[n], ".rda"))
} # end of for loop
|
\name{SCRdesign}
\alias{SCRdesign}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Computes an SCR design.%% ~~function to do ... ~~
}
\description{
Computes an SCR design given the state-space and number of traps to
deploy. Note: the function is experimental. The math has not been
checked and the theory is tentative. Progress will be made in 2015.
}
\usage{
SCRdesign(S = S, C = C, ntraps = 9, ndesigns = 10, nn = 19, sigma = 2, crit = 3)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{S}{
The state-space of the SCR model, i.e., "study area" within which
traps will be deployed, including a suitable habitat buffer where
individuals exposed to capture might live. S should be an M x 2
matrix of coordinates.
%% ~~Describe \code{S} here~~
}
\item{C}{
The potential (candidate) trap locations. Also called the design space.
Normally this is a strict subset of S.
}
\item{ntraps}{
Number of traps to be deployed.
}
\item{ndesigns}{
Number of random designs to optimize over. More is better in terms of
finding better designs but increases the run time. %% ~~Describe \code{ndesigns} here~~
}
\item{nn}{
Number of nearest neighbors to use in the optimization over the design
space. %% ~~Describe \code{nn} here~~
}
\item{sigma}{
The function uses a "half-normal" hazard rate model of the form:
Pr(encounter) = 1-exp(-lambda0*g(||x-s||)) where
g(||x-s||) is the kernel of a bivariate normal density. sigma is the
parameter.
}
\item{crit}{
Which criterion to use? In the SCR book there are 4: Q1, Q2, Q3 and
Q4. These are numbered 4-7 in this function because some partial
things are used in the debugging. The definition of things is as follows:
## crit=4 = trace(V(alpha)) (crit 1 from the book)
## crit 5 = Var(N) (crit 2 from the book)
## crit 6 = 1-pbar (crit 3 from the book)
## crit 7 = var(pbar) (crit 4 from the book)
}
}
\details{
The function uses a coordinate exchange algorithm to sequentially
improve the objective function by swapping one point at a time out of
the design.
%% ~~ If necessary, more details than the description above ~~
}
\value{
A list with components:
Qvec = the ordered criteria values of the design found for each
starting design
Xid = ID of each design point in the best design over all
Xlst a list of all designs found (one for each starting design)
C
S
Qhistory
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
SCR book chapter 10.
Royle, J. A., & Nychka, D. (1998). An algorithm for the construction of spatial coverage designs with implementation in SPLUS. Computers & Geosciences, 24(5), 479-488.
Nychka, D., Yang, Q., & Royle, J. A. (1997). Constructing spatial designs using regression subset selection. Statistics for the Environment, 3, 131-154.
}
\author{
Andy Royle, aroyle@usgs.gov
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
# Define a state-space S and a design space C
lower=9
upper=21
delta<- .2 ##1/3 # spacing of design points
pts<-seq(lower,upper,delta)
s1<- sort(rep(pts,length(pts)))
s2<-rep(pts,length(pts))
S<-cbind(s1,s2)
inX<- (s1 <=20 & s1 >=10) & (s2 <=20 & s2>=10)
C<-S[inX,] # candidate set
dbar<-function(X){
a<-as.matrix(dis(X))
diag(a)<-10^10
mean(apply(a,1,min))
}
library(scrbook)
## crit=4 = trace(V(alpha))
## crit 5 = Var(N)
## crit 6 = 1-pbar
## crit 7 = var(pbar)
a1.1<-SCRdesign(S,C,ntraps=11,ndesigns=10,nn=15,sigma=1,crit=4)
a2.1<-SCRdesign(S,C,ntraps=11,ndesigns=10,nn=15,sigma=1,crit=5)
# Probably use crit=6 right now
a3.1<-SCRdesign(S,C,ntraps=11,ndesigns=10,nn=15,sigma=1,crit=6)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/Rpackage/scrbook/man/SCRdesign.Rd
|
no_license
|
jaroyle/scrbook
|
R
| false
| false
| 4,115
|
rd
|
\name{SCRdesign}
\alias{SCRdesign}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Computes an SCR design.%% ~~function to do ... ~~
}
\description{
Computes an SCR design given the state-space and number of traps to
deploy. Note: the function is experimental. The math has not been
checked and the theory is tentative. Progress will be made in 2015.
}
\usage{
SCRdesign(S = S, C = C, ntraps = 9, ndesigns = 10, nn = 19, sigma = 2, crit = 3)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{S}{
The state-space of the SCR model, i.e., "study area" within which
traps will be deployed, including a suitable habitat buffer where
individuals exposed to capture might live. S should be an M x 2
matrix of coordinates.
%% ~~Describe \code{S} here~~
}
\item{C}{
The potential (candidate) trap locations. Also called the design space.
Normally this is a strict subset of S.
}
\item{ntraps}{
Number of traps to be deployed.
}
\item{ndesigns}{
Number of random designs to optimize over. More is better in terms of
finding better designs but increases the run time. %% ~~Describe \code{ndesigns} here~~
}
\item{nn}{
Number of nearest neighbors to use in the optimization over the design
space. %% ~~Describe \code{nn} here~~
}
\item{sigma}{
The function uses a "half-normal" hazard rate model of the form:
Pr(encounter) = 1-exp(-lambda0*g(||x-s||)) where
g(||x-s||) is the kernel of a bivariate normal density. sigma is the
parameter.
}
\item{crit}{
Which criterion to use? In the SCR book there are 4: Q1, Q2, Q3 and
Q4. These are numbered 4-7 in this function because some partial
things are used in the debugging. The definition of things is as follows:
## crit=4 = trace(V(alpha)) (crit 1 from the book)
## crit 5 = Var(N) (crit 2 from the book)
## crit 6 = 1-pbar (crit 3 from the book)
## crit 7 = var(pbar) (crit 4 from the book)
}
}
\details{
The function uses a coordinate exchange algorithm to sequentially
improve the objective function by swapping one point at a time out of
the design.
%% ~~ If necessary, more details than the description above ~~
}
\value{
A list with components:
Qvec = the ordered criteria values of the design found for each
starting design
Xid = ID of each design point in the best design over all
Xlst a list of all designs found (one for each starting design)
C
S
Qhistory
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
SCR book chapter 10.
Royle, J. A., & Nychka, D. (1998). An algorithm for the construction of spatial coverage designs with implementation in SPLUS. Computers & Geosciences, 24(5), 479-488.
Nychka, D., Yang, Q., & Royle, J. A. (1997). Constructing spatial designs using regression subset selection. Statistics for the Environment, 3, 131-154.
}
\author{
Andy Royle, aroyle@usgs.gov
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
# Define a state-space S and a design space C
lower=9
upper=21
delta<- .2 ##1/3 # spacing of design points
pts<-seq(lower,upper,delta)
s1<- sort(rep(pts,length(pts)))
s2<-rep(pts,length(pts))
S<-cbind(s1,s2)
inX<- (s1 <=20 & s1 >=10) & (s2 <=20 & s2>=10)
C<-S[inX,] # candidate set
dbar<-function(X){
a<-as.matrix(dis(X))
diag(a)<-10^10
mean(apply(a,1,min))
}
library(scrbook)
## crit=4 = trace(V(alpha))
## crit 5 = Var(N)
## crit 6 = 1-pbar
## crit 7 = var(pbar)
a1.1<-SCRdesign(S,C,ntraps=11,ndesigns=10,nn=15,sigma=1,crit=4)
a2.1<-SCRdesign(S,C,ntraps=11,ndesigns=10,nn=15,sigma=1,crit=5)
# Probably use crit=6 right now
a3.1<-SCRdesign(S,C,ntraps=11,ndesigns=10,nn=15,sigma=1,crit=6)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Line.Transect.R, R/Point.Transect.R
\name{plot,Line.Transect,ANY-method}
\alias{plot,Line.Transect,ANY-method}
\alias{plot,Point.Transect,ANY-method}
\title{Plot}
\usage{
\S4method{plot}{Line.Transect,ANY}(x, y, ...)
\S4method{plot}{Point.Transect,ANY}(x, y, ...)
}
\arguments{
\item{x}{object of class transect}
\item{y}{not used}
\item{...}{Additional arguments: add (TRUE/FALSE) whether to add to existing
plot, col colour, lwd line width (for line transects) and pch point symbols
(for point transects).}
}
\description{
Plots an S4 object of class 'Transect'
}
|
/man/plot.Transect-methods.Rd
|
no_license
|
cran/dssd
|
R
| false
| true
| 670
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Line.Transect.R, R/Point.Transect.R
\name{plot,Line.Transect,ANY-method}
\alias{plot,Line.Transect,ANY-method}
\alias{plot,Point.Transect,ANY-method}
\title{Plot}
\usage{
\S4method{plot}{Line.Transect,ANY}(x, y, ...)
\S4method{plot}{Point.Transect,ANY}(x, y, ...)
}
\arguments{
\item{x}{object of class transect}
\item{y}{not used}
\item{...}{Additional arguments: add (TRUE/FALSE) whether to add to existing
plot, col colour, lwd line width (for line transects) and pch point symbols
(for point transects).}
}
\description{
Plots an S4 object of class 'Transect'
}
|
#'@title
#'Retrieve data from the sampled logs
#'
#'@description
#'\code{sampled_logs} reads in and parses data from the 1:1000 sampled RequestLogs
#'on stat1002.
#'
#'@param file either the full name of a sampled log file, or the year/month/day of the log file you want,
#'provided as YYYYMMDD
#'
#'@details
#'It does what it says on the tin; pass in a date (formatted as '20140601' or equivalent)
#'and it will retrieve the sampled requestlogs for that day. One caveat worth noting is that
#'the daily dumps are not truncated at precisely the stroke of midnight; for the example,
#'you can expect to see some of the logs from 20140602 and be missing some from the 1st,
#'which will be in 20140531. Slight fuzziness around date ranges may be necessary to get all the
#'traffic you want.
#'
#'It does not return all the fields from the log file, merely the most useful ones - namely timestamp,
#'ip_address, status_code, URL, mime_type, referer, x_forwarded, user_agent, lang and x_analytics.
#'
#'@author Oliver Keyes <okeyes@@wikimedia.org>
#'
#'@seealso
#'\code{\link{log_strptime}} for handling the log timestamp format, \code{\link{parse_uuids}} for parsing out
#'app UUIDs from URLs, \code{\link{log_sieve}} for filtering the sampled logs to "pageviews",
#'and \code{\link{hive_query}} for querying the unsampled RequestLogs.
#'
#'@return a data.frame containing the sampled logs of the day you asked for.
#'
#'@export
sampled_logs <- function(file){
#Note result names
resultnames <- c("timestamp","ip_address","status_code","URL","mime_type",
"referer","x_forwarded","user_agent","lang","x_analytics")
#Check whether a file was provided, or a date. If a data, construct a filename
if(!grepl(x = file, pattern = "/")){
#Construct file address
origin_path <- paste("/a/squid/archive/sampled/sampled-1000.tsv.log-",file,".gz", sep = "")
} else {
#Otherwise, use the filename provided
origin_path <- file
}
#Create temp file
output_file <- tempfile()
save_file <- paste(output_file,".gz", sep = "")
#Copy file to save_file and unzip
if(!file.copy(from = origin_path, to = save_file, overwrite = TRUE)){
warning("The file ", origin_path, " could not be found")
return(NULL)
}
system(paste("gunzip", save_file))
#Read in
data <- c_sampled_logs(output_file)
#Remove temp file
file.remove(output_file)
#Turn into a data.table and return
data <- as.data.table(data)
setnames(data, 1:ncol(data), resultnames)
return(data)
}
|
/R/sampled_logs.R
|
permissive
|
Protonk/WMUtils
|
R
| false
| false
| 2,543
|
r
|
#'@title
#'Retrieve data from the sampled logs
#'
#'@description
#'\code{sampled_logs} reads in and parses data from the 1:1000 sampled RequestLogs
#'on stat1002.
#'
#'@param file either the full name of a sampled log file, or the year/month/day of the log file you want,
#'provided as YYYYMMDD
#'
#'@details
#'It does what it says on the tin; pass in a date (formatted as '20140601' or equivalent)
#'and it will retrieve the sampled requestlogs for that day. One caveat worth noting is that
#'the daily dumps are not truncated at precisely the stroke of midnight; for the example,
#'you can expect to see some of the logs from 20140602 and be missing some from the 1st,
#'which will be in 20140531. Slight fuzziness around date ranges may be necessary to get all the
#'traffic you want.
#'
#'It does not return all the fields from the log file, merely the most useful ones - namely timestamp,
#'ip_address, status_code, URL, mime_type, referer, x_forwarded, user_agent, lang and x_analytics.
#'
#'@author Oliver Keyes <okeyes@@wikimedia.org>
#'
#'@seealso
#'\code{\link{log_strptime}} for handling the log timestamp format, \code{\link{parse_uuids}} for parsing out
#'app UUIDs from URLs, \code{\link{log_sieve}} for filtering the sampled logs to "pageviews",
#'and \code{\link{hive_query}} for querying the unsampled RequestLogs.
#'
#'@return a data.frame containing the sampled logs of the day you asked for.
#'
#'@export
sampled_logs <- function(file){
#Note result names
resultnames <- c("timestamp","ip_address","status_code","URL","mime_type",
"referer","x_forwarded","user_agent","lang","x_analytics")
#Check whether a file was provided, or a date. If a data, construct a filename
if(!grepl(x = file, pattern = "/")){
#Construct file address
origin_path <- paste("/a/squid/archive/sampled/sampled-1000.tsv.log-",file,".gz", sep = "")
} else {
#Otherwise, use the filename provided
origin_path <- file
}
#Create temp file
output_file <- tempfile()
save_file <- paste(output_file,".gz", sep = "")
#Copy file to save_file and unzip
if(!file.copy(from = origin_path, to = save_file, overwrite = TRUE)){
warning("The file ", origin_path, " could not be found")
return(NULL)
}
system(paste("gunzip", save_file))
#Read in
data <- c_sampled_logs(output_file)
#Remove temp file
file.remove(output_file)
#Turn into a data.table and return
data <- as.data.table(data)
setnames(data, 1:ncol(data), resultnames)
return(data)
}
|
#! /usr/bin/env Rscript
# by caozj
# 18 Dec 2017
# 4:11:20 PM
# This script converts the RDS data downloaded from hemberg website
# into HDF5 format to be used by different methods
suppressPackageStartupMessages({
library(rhdf5)
library(SingleCellExperiment)
library(Seurat)
})
source("../../Utilities/data.R", chdir = TRUE)
message("Reading data...")
sce <- readRDS("../download/Hemberg/Manno_human/manno_human.rds")
count_mat <- as.matrix(counts(sce))
cdata <- as.data.frame(colData(sce))
# Clean cell type
mask <- cdata$cell_type1 != "Unk"
count_mat <- count_mat[, mask]
cdata <- cdata[mask, c("cell_type1", "Source", "age", "WellID", "batch")]
construct_dataset("../data/Manno_human", count_mat, cdata)
message("Done!")
|
/Datasets/collect/collect_manno_human.R
|
permissive
|
JiahuaQu/Cell_BLAST
|
R
| false
| false
| 742
|
r
|
#! /usr/bin/env Rscript
# by caozj
# 18 Dec 2017
# 4:11:20 PM
# This script converts the RDS data downloaded from hemberg website
# into HDF5 format to be used by different methods
suppressPackageStartupMessages({
library(rhdf5)
library(SingleCellExperiment)
library(Seurat)
})
source("../../Utilities/data.R", chdir = TRUE)
message("Reading data...")
sce <- readRDS("../download/Hemberg/Manno_human/manno_human.rds")
count_mat <- as.matrix(counts(sce))
cdata <- as.data.frame(colData(sce))
# Clean cell type
mask <- cdata$cell_type1 != "Unk"
count_mat <- count_mat[, mask]
cdata <- cdata[mask, c("cell_type1", "Source", "age", "WellID", "batch")]
construct_dataset("../data/Manno_human", count_mat, cdata)
message("Done!")
|
library(pbdBASE, lib.loc="~/R/lib")
library(pbdDMAT, quiet=TRUE,lib.loc="~/R/lib")
library(openblasctl)
library(pryr)
library(pbdIO, quiet=TRUE)
library(dmx)
openblas_set_num_threads(1)
args = commandArgs(trailingOnly = TRUE)
init.grid()
n_num <- as.numeric(args[1])
p_num <- as.numeric(args[2])
blm <- as.numeric(args[3])
sigma <- p_num
load(paste0("~/src/RData/NGK_PK_GP_Data_n",n_num,"_p",p_num,".RData"))
Data.num <- 1
rm(eps.list)
X <- as.matrix(data.list[[Data.num]][,-1])
y <- as.matrix(data.list[[Data.num]][,1])
n <- nrow(X)
d <- ncol(X)
scale.X <- scale(X)
r_I <- diag(1,n)
rm(data.list)
comm.print("rank0 input rank")
comm.print(mem_used())
comm.print("after input")
comm.print(mem_used())
bldim <- c(blm,blm)
input.time <- comm.timer({
dX <-as.ddmatrix(x=X,bldim=bldim);
dy <- as.ddmatrix(x=y, bldim=bldim);
comm.print("mem of dX is ")
comm.print(object.size(dX))
comm.print("mem of dy is ")
comm.print(object.size(dy))
})
if(comm.rank()==0){
print(mem_used())
cat("Input time is \n")
}
comm.print(input.time, all.rank=FALSE)
# scale vars
scale.time <- comm.timer({
dX.init <- dX;
dX.init.sd <- apply(dX.init,2,sd)
dy.init <- dy;
dy.init.mean <- mean(dy.init);
dy.init.sd <- sd(dy.init)
y.init.sd <- as.numeric(as.matrix(dy.init.sd))
X.init.sd <- as.numeric(as.matrix(dX.init.sd))
dX <- scale(dX.init)
dy <- (dy.init - mean(dy.init))/y.init.sd;
})
if(comm.rank()==0){
print(mem_used())
cat("scale time is \n")
}
comm.print(scale.time, all.rank=FALSE)
# kernel_matrix
kernel.time <- comm.timer({
dX2 <- dX^2;
d_F_vec <- apply(dX2,1,sum);
d_D1 <- sweep(-2*crossprod(t(dX)),1,d_F_vec,FUN="+");
d_D2 <- sweep(d_D1,2,d_F_vec,FUN="+");
dK <- exp(-d_D2/sigma);
comm.print("mem of dK is")
comm.print(object.size(dK))
#pbd_K <- as.matrix(dK, proc.dest=0);
#diag(pbd_K) <- 1;
})
rm(dX)
rm(dX2)
rm(d_D1)
rm(d_D2)
rm(d_F_vec)
if(comm.rank()==0){
print(mem_used())
cat("Kernel time is \n")
}
comm.print(kernel.time, all.rank=FALSE)
# Eigendecomposition
eigen.time <- comm.timer({
d_Eigenobject <- eigen(dK,only.values=FALSE,symmetric=TRUE);
})
if(comm.rank()==0){
print(mem_used())
cat("Eigendecomposition time is \n")
}
comm.print(eigen.time, all.rank=FALSE)
## lambda search
lambda.time <- comm.timer({
n <- nrow(dy);
tol <- 10^(-3)*n;
# upper_bound_lambda
U <- n;
while(sum(d_Eigenobject$values / (d_Eigenobject$values + U)) < 1){
U <- U-1
}
# lower_bound_lambda
q <- which.min(abs(d_Eigenobject$values - (max(d_Eigenobject$values)/1000)));
L = .Machine$double.eps;
while(sum(d_Eigenobject$values / (d_Eigenobject$values + L)) > q){
L <- L + 0.5
}
# create new search values #
X1 <- L + 0.381966*(U - L);
X2 <- U - 0.381966*(U - L);
d_diag_eigen <- as.ddmatrix(x=diag(d_Eigenobject$values),bldim=c(blm,blm));
d_I <- as.ddmatrix(x=diag(1,n),bldim=c(blm,blm));
d_inv_mat <- solve(d_diag_eigen + X1*d_I);
d_inv_G <- tcrossprod(crossprod(t(d_Eigenobject$vectors),d_inv_mat),(d_Eigenobject$vectors));
d_diag_inv_G <- as.ddmatrix(x=diag(diag(d_inv_G)),bldim=c(blm,blm));
d_coeffs <- crossprod(d_inv_G,dy);
LOOE1 <- crossprod(solve(d_diag_inv_G, d_coeffs));
d_inv_mat <- solve(d_diag_eigen + X2*d_I);
d_inv_G <- tcrossprod(crossprod(t(d_Eigenobject$vectors),d_inv_mat),(d_Eigenobject$vectors));
d_diag_inv_G <- as.ddmatrix(x=diag(diag(d_inv_G)),bldim=c(blm,blm));
d_coeffs <- crossprod(d_inv_G,dy);
LOOE2 <- crossprod(solve(d_diag_inv_G, d_coeffs));
d_diff_LE <- (LOOE1-LOOE2);
diff_LE <- as.matrix(d_diff_LE);
while(abs(diff_LE) > tol){
if(diff_LE < 0){
U <- X2
X2 <- X1
X1 <- L + 0.381966*(U-L)
LOOE2 <- LOOE1
d_inv_mat <- solve(d_diag_eigen + X1*d_I);
d_inv_G <- tcrossprod(crossprod(t(d_Eigenobject$vectors),d_inv_mat),(d_Eigenobject$vectors));
d_diag_inv_G <- as.ddmatrix(x=diag(diag(d_inv_G)),bldim=c(blm,blm));
d_coeffs <- crossprod(d_inv_G,dy);
LOOE1 <- crossprod(solve(d_diag_inv_G, d_coeffs));
}else{
L <- X1
X1 <- X2
X2 <- U - (0.381966)*(U-L)
LOOE1 <- LOOE2
d_inv_mat <- solve(d_diag_eigen + X2*d_I);
d_inv_G <- tcrossprod(crossprod(t(d_Eigenobject$vectors),d_inv_mat),(d_Eigenobject$vectors));
d_diag_inv_G <- as.ddmatrix(x=diag(diag(d_inv_G)),bldim=c(blm,blm));
d_coeffs <- crossprod(d_inv_G,dy);
LOOE2 <- crossprod(solve(d_diag_inv_G, d_coeffs));
}
d_diff_LE <- (LOOE1-LOOE2)
diff_LE <- as.matrix(d_diff_LE)
}
lambda <- ifelse(diff_LE < 0, X1 ,X2);
lambda <- as.numeric(lambda);
d_inv_mat <- solve(d_diag_eigen + lambda*d_I);
d_inv_G <- tcrossprod(crossprod(t(d_Eigenobject$vectors),d_inv_mat),(d_Eigenobject$vectors));
d_diag_inv_G <- as.ddmatrix(x=diag(diag(d_inv_G)),bldim=c(blm,blm));
d_coeffs <- crossprod(d_inv_G,dy);
LOOE <- crossprod(solve(d_diag_inv_G,d_coeffs))
})
if(comm.rank()==0){
print(mem_used())
cat("search time for lambda is \n")
}
comm.print(lambda.time, all.rank=FALSE)
### fitted y ###
fit.time <- comm.timer({
d_fit_y <- dK %*% d_coeffs
})
if(comm.rank()==0){
cat("Fitting time is \n")
}
comm.print(fit.time, all.rank=FALSE)
### var-covariance matrix for coeffs ###
vcov.time <- comm.timer({
d_sigmasq <- as.numeric(as.matrix(crossprod(dy-d_fit_y)/n));
d_inv_mat <- solve(d_diag_eigen + lambda*d_I);
d_covmatc <- tcrossprod(crossprod(t(d_Eigenobject$vectors),d_sigmasq*crossprod(d_inv_mat)),(d_Eigenobject$vectors));
comm.print(object.size(d_covmatc))
d_vcovmat_yhat <- crossprod(dK,crossprod(d_covmatc,dK))
comm.print(object.size(d_vcovmat_yhat))
rm(d_inv_mat)
})
if(comm.rank()==0){
print(mem_used())
cat("Covariance time is \n")
}
comm.print(vcov.time,all.rank=FALSE)
### derivative #####
#deriv.time <- comm.timer({
return.time1 <- comm.timer({
r_K <- as.matrix(dK)
kron_row <- comm.chunk(n, type="equal",form="vector")
j <- comm.rank()
vec_K <- matrix(as.vector(r_K[,kron_row]),ncol=1)
comm.print("vec_K dimension")
comm.print(dim(vec_K))
n_kron_row <- length(kron_row)
r_onevec1 <- matrix(rep(1,n_kron_row),ncol=1)
#r_coeffs <- as.matrix(d_coeffs,proc.dest=0);
r_coeffs <- as.matrix(d_coeffs)
r_kron_alpha <- kronecker(r_onevec1,r_coeffs)
rm(d_coeffs)
#if(comm.rank()==0){
#print("mem of r_kron_alpha is")
#print(object.size(r_kron_alpha))
#}
#barrier()
r_onevec2 <- matrix(rep(1,n),ncol=1)
tmp_I <- diag(1,n_kron_row)
kron_I <- kronecker(tmp_I,r_onevec2)
#kron_I <- matrix(0,nrow=n_kron_row*n, ncol=n_kron_row)
#comm.print(kron_col[1], all.rank=TRUE)
#index <- 1:(n_kron_row*n) + c((kron_row[1]-1)*n)
#comm.print(index[1], all.rank=TRUE)
#kron_I[index,] <- tmp_kron_I
#rm(tmp_kron_I)
#d_mat <- ddmatrix(tmp.vec, ncol=1, bldim=c(blm,blm))
#d_kron_I[[i]] <- tmp.vec
comm.print("mem of tmp.mat is")
comm.print(object.size(kron_I))
#d_kron_I <- as.list(1:kron_col)
#comm.print(tmp.mat[c(index[1]+0:4),c(1:5)],all.rank=TRUE)
#comm.print(dim(tmp.mat),all.rank=TRUE)
#d_kron_I <- new("ddmatrix",Data=tmp.mat,dim=c(dim(tmp.mat)[1],allreduce(dim(tmp.mat)[2])), bldim=c(dim(tmp.mat)[1], comm.chunk(d, type="equal",form="bldim")), ldim=dim(tmp.mat), ICTXT=1)
#comm.print((d_kron_I),all.rank`=TRUE)
#comm.print("mem of d_mat is")
#comm.print(object.size(d_kron_I))
})
comm.print(mem_used())
if(comm.rank()==0){
# rm(r_kron_alpha)
# rm(r_kron_I)
print(mem_used())
print("return time is \n")
}
comm.print(return.time1, all.rank=FALSE)
deriv.time <- comm.timer({
row.num <- comm.chunk(n, type="equal", form="vector")
comm.print(row.num, all.rank=TRUE)
comm.print(mem_used())
if(length(row.num)==0){
r_derivmat <- NULL
distk <- NULL
deriv_col <- NULL
r_dist <- NULL
}else{
rows <- cbind(rep(row.num,each=n),1:n);
r_derivmat <- matrix(NA, nrow=length(row.num), ncol=d)
col.num <- 100
col.end <- d/100
for(j in 1:col.end){
index <- c((j-1)*col.num+1):c(j*col.num)
r_dist <- scale.X[rows[,1],index]-scale.X[rows[,2],index];
comm.print(mem_used())
#rm(rows); rm(scale.X)
comm.print(object.size(r_dist))
distk <- sweep(r_dist,1, vec_K,FUN="*");
#rm(r_dist); rm(vec_K)
comm.print(mem_used())
comm.print(object.size(distk))
deriv_col <- sweep(distk,1, r_kron_alpha, FUN="*");
#rm(distk);rm(r_kron_alpha)
comm.print(mem_used())
comm.print(object.size(deriv_col))
r_derivmat[,index]<- -(2/sigma)*(crossprod(kron_I, deriv_col))
#rm(kron_I); rm(deriv_col)
}
rm(rows, scale.X, r_dist, vec_K, distk, r_kron_alpha, kron_I, deriv_col);
comm.print(mem_used())
comm.print(object.size(r_derivmat))
}
derivmat <- gather(r_derivmat)
rm(r_derivmat)
comm.print(length(derivmat))
comm.print(dim(derivmat[[1]]))
if(comm.rank()==0){
derivmat <- do.call(rbind, derivmat)
print(dim(derivmat))
avgderivmat <- apply(derivmat,2,mean)
}
})
if(comm.rank()==0){
print(mem_used())
cat("derivative time is \n")
}
comm.print(deriv.time, all.rank=FALSE)
## scale.back ##
scale.back.time <- comm.timer({
d_fit_y <- d_fit_y %*% dy.init.sd + dy.init.mean
d_vcov_c <- (y.init.sd^2)*d_covmatc
d_vcov_fitted <- (y.init.sd^2)*d_vcovmat_yhat
LOOE <- LOOE * y.init.sd
if(comm.rank()==0){
derivmat <- y.init.sd*derivmat/X.init.sd
avgderivmat <- y.init.sd*avgderivmat/X.init.sd
}
})
if(comm.rank()==0){
cat("scaling-back time is \n")
}
comm.print(scale.back.time, all.rank=FALSE)
## R-squared ####
R2.time <- comm.timer({
d_R2 <- 1-(crossprod(dy.init-d_fit_y)/(n*dy.init.sd^2))
})
if(comm.rank()==0){
cat("R2 time is \n")
}
comm.print(R2.time,all.rank=FALSE)
## return ###
return.time2 <- comm.timer({
#r_K <- as.matrix(dK, proc.dest=0);
fitted <- as.matrix(d_fit_y, proc.dest=0);
X <- as.matrix(dX.init, proc.dest=0);
y <- as.matrix(dy.init, proc.dest=0);
R2 <- as.matrix(d_R2, proc.dest=0);
vcov.c <- as.matrix(d_vcov_c, proc.dest=0);
vcov.fitted <- as.matrix(d_vcov_fitted, proc.dest=0);
LOOE <- as.matrix(LOOE, proc.dest=0);
#derivmat <- as.matrix(d_derivmat, proc.dest=0)
#avgderiv <- as.matrix(d_avgderivmat, proc.dest=0)
})
if(comm.rank()==0){
print(mem_used())
cat("return time is \n")
}
comm.print(return.time2, all.rank=FALSE)
if(comm.rank()==0){
out <- list(K=r_K,
coeffs=r_coeffs,
LOOE=LOOE,
fitted = fitted,
X=X,
y=y,
sigma=sigma,
lambda=lambda,
R2=R2,
vcov.c=vcov.c,
vcov.fitted=vcov.fitted,
derivatives=derivmat,
avgderivatives=avgderivmat
)
total.time <- sum(input.time[3],scale.time[3],kernel.time[3],eigen.time[3],lambda.time[3],
fit.time[3],vcov.time[3],return.time1[3],deriv.time[3],
R2.time[3],scale.back.time[3],return.time2[3])
print(total.time)
#print(all.equal(scale.X,pbd_X))
save(out, file=paste0("~/src/my_project/parallel_project/pbd_KRLS_p",p_num,".RData"))
}
finalize()
|
/MPI_KRLS/pbd_KRLS3.r
|
no_license
|
ikarus702/pbd_KRLS
|
R
| false
| false
| 10,621
|
r
|
library(pbdBASE, lib.loc="~/R/lib")
library(pbdDMAT, quiet=TRUE,lib.loc="~/R/lib")
library(openblasctl)
library(pryr)
library(pbdIO, quiet=TRUE)
library(dmx)
openblas_set_num_threads(1)
args = commandArgs(trailingOnly = TRUE)
init.grid()
n_num <- as.numeric(args[1])
p_num <- as.numeric(args[2])
blm <- as.numeric(args[3])
sigma <- p_num
load(paste0("~/src/RData/NGK_PK_GP_Data_n",n_num,"_p",p_num,".RData"))
Data.num <- 1
rm(eps.list)
X <- as.matrix(data.list[[Data.num]][,-1])
y <- as.matrix(data.list[[Data.num]][,1])
n <- nrow(X)
d <- ncol(X)
scale.X <- scale(X)
r_I <- diag(1,n)
rm(data.list)
comm.print("rank0 input rank")
comm.print(mem_used())
comm.print("after input")
comm.print(mem_used())
bldim <- c(blm,blm)
input.time <- comm.timer({
dX <-as.ddmatrix(x=X,bldim=bldim);
dy <- as.ddmatrix(x=y, bldim=bldim);
comm.print("mem of dX is ")
comm.print(object.size(dX))
comm.print("mem of dy is ")
comm.print(object.size(dy))
})
if(comm.rank()==0){
print(mem_used())
cat("Input time is \n")
}
comm.print(input.time, all.rank=FALSE)
# scale vars
scale.time <- comm.timer({
dX.init <- dX;
dX.init.sd <- apply(dX.init,2,sd)
dy.init <- dy;
dy.init.mean <- mean(dy.init);
dy.init.sd <- sd(dy.init)
y.init.sd <- as.numeric(as.matrix(dy.init.sd))
X.init.sd <- as.numeric(as.matrix(dX.init.sd))
dX <- scale(dX.init)
dy <- (dy.init - mean(dy.init))/y.init.sd;
})
if(comm.rank()==0){
print(mem_used())
cat("scale time is \n")
}
comm.print(scale.time, all.rank=FALSE)
# kernel_matrix
kernel.time <- comm.timer({
dX2 <- dX^2;
d_F_vec <- apply(dX2,1,sum);
d_D1 <- sweep(-2*crossprod(t(dX)),1,d_F_vec,FUN="+");
d_D2 <- sweep(d_D1,2,d_F_vec,FUN="+");
dK <- exp(-d_D2/sigma);
comm.print("mem of dK is")
comm.print(object.size(dK))
#pbd_K <- as.matrix(dK, proc.dest=0);
#diag(pbd_K) <- 1;
})
rm(dX)
rm(dX2)
rm(d_D1)
rm(d_D2)
rm(d_F_vec)
if(comm.rank()==0){
print(mem_used())
cat("Kernel time is \n")
}
comm.print(kernel.time, all.rank=FALSE)
# Eigendecomposition
eigen.time <- comm.timer({
d_Eigenobject <- eigen(dK,only.values=FALSE,symmetric=TRUE);
})
if(comm.rank()==0){
print(mem_used())
cat("Eigendecomposition time is \n")
}
comm.print(eigen.time, all.rank=FALSE)
## lambda search
lambda.time <- comm.timer({
n <- nrow(dy);
tol <- 10^(-3)*n;
# upper_bound_lambda
U <- n;
while(sum(d_Eigenobject$values / (d_Eigenobject$values + U)) < 1){
U <- U-1
}
# lower_bound_lambda
q <- which.min(abs(d_Eigenobject$values - (max(d_Eigenobject$values)/1000)));
L = .Machine$double.eps;
while(sum(d_Eigenobject$values / (d_Eigenobject$values + L)) > q){
L <- L + 0.5
}
# create new search values #
X1 <- L + 0.381966*(U - L);
X2 <- U - 0.381966*(U - L);
d_diag_eigen <- as.ddmatrix(x=diag(d_Eigenobject$values),bldim=c(blm,blm));
d_I <- as.ddmatrix(x=diag(1,n),bldim=c(blm,blm));
d_inv_mat <- solve(d_diag_eigen + X1*d_I);
d_inv_G <- tcrossprod(crossprod(t(d_Eigenobject$vectors),d_inv_mat),(d_Eigenobject$vectors));
d_diag_inv_G <- as.ddmatrix(x=diag(diag(d_inv_G)),bldim=c(blm,blm));
d_coeffs <- crossprod(d_inv_G,dy);
LOOE1 <- crossprod(solve(d_diag_inv_G, d_coeffs));
d_inv_mat <- solve(d_diag_eigen + X2*d_I);
d_inv_G <- tcrossprod(crossprod(t(d_Eigenobject$vectors),d_inv_mat),(d_Eigenobject$vectors));
d_diag_inv_G <- as.ddmatrix(x=diag(diag(d_inv_G)),bldim=c(blm,blm));
d_coeffs <- crossprod(d_inv_G,dy);
LOOE2 <- crossprod(solve(d_diag_inv_G, d_coeffs));
d_diff_LE <- (LOOE1-LOOE2);
diff_LE <- as.matrix(d_diff_LE);
while(abs(diff_LE) > tol){
if(diff_LE < 0){
U <- X2
X2 <- X1
X1 <- L + 0.381966*(U-L)
LOOE2 <- LOOE1
d_inv_mat <- solve(d_diag_eigen + X1*d_I);
d_inv_G <- tcrossprod(crossprod(t(d_Eigenobject$vectors),d_inv_mat),(d_Eigenobject$vectors));
d_diag_inv_G <- as.ddmatrix(x=diag(diag(d_inv_G)),bldim=c(blm,blm));
d_coeffs <- crossprod(d_inv_G,dy);
LOOE1 <- crossprod(solve(d_diag_inv_G, d_coeffs));
}else{
L <- X1
X1 <- X2
X2 <- U - (0.381966)*(U-L)
LOOE1 <- LOOE2
d_inv_mat <- solve(d_diag_eigen + X2*d_I);
d_inv_G <- tcrossprod(crossprod(t(d_Eigenobject$vectors),d_inv_mat),(d_Eigenobject$vectors));
d_diag_inv_G <- as.ddmatrix(x=diag(diag(d_inv_G)),bldim=c(blm,blm));
d_coeffs <- crossprod(d_inv_G,dy);
LOOE2 <- crossprod(solve(d_diag_inv_G, d_coeffs));
}
d_diff_LE <- (LOOE1-LOOE2)
diff_LE <- as.matrix(d_diff_LE)
}
lambda <- ifelse(diff_LE < 0, X1 ,X2);
lambda <- as.numeric(lambda);
d_inv_mat <- solve(d_diag_eigen + lambda*d_I);
d_inv_G <- tcrossprod(crossprod(t(d_Eigenobject$vectors),d_inv_mat),(d_Eigenobject$vectors));
d_diag_inv_G <- as.ddmatrix(x=diag(diag(d_inv_G)),bldim=c(blm,blm));
d_coeffs <- crossprod(d_inv_G,dy);
LOOE <- crossprod(solve(d_diag_inv_G,d_coeffs))
})
if(comm.rank()==0){
print(mem_used())
cat("search time for lambda is \n")
}
comm.print(lambda.time, all.rank=FALSE)
### fitted y ###
fit.time <- comm.timer({
d_fit_y <- dK %*% d_coeffs
})
if(comm.rank()==0){
cat("Fitting time is \n")
}
comm.print(fit.time, all.rank=FALSE)
### var-covariance matrix for coeffs ###
vcov.time <- comm.timer({
d_sigmasq <- as.numeric(as.matrix(crossprod(dy-d_fit_y)/n));
d_inv_mat <- solve(d_diag_eigen + lambda*d_I);
d_covmatc <- tcrossprod(crossprod(t(d_Eigenobject$vectors),d_sigmasq*crossprod(d_inv_mat)),(d_Eigenobject$vectors));
comm.print(object.size(d_covmatc))
d_vcovmat_yhat <- crossprod(dK,crossprod(d_covmatc,dK))
comm.print(object.size(d_vcovmat_yhat))
rm(d_inv_mat)
})
if(comm.rank()==0){
print(mem_used())
cat("Covariance time is \n")
}
comm.print(vcov.time,all.rank=FALSE)
### derivative #####
#deriv.time <- comm.timer({
return.time1 <- comm.timer({
r_K <- as.matrix(dK)
kron_row <- comm.chunk(n, type="equal",form="vector")
j <- comm.rank()
vec_K <- matrix(as.vector(r_K[,kron_row]),ncol=1)
comm.print("vec_K dimension")
comm.print(dim(vec_K))
n_kron_row <- length(kron_row)
r_onevec1 <- matrix(rep(1,n_kron_row),ncol=1)
#r_coeffs <- as.matrix(d_coeffs,proc.dest=0);
r_coeffs <- as.matrix(d_coeffs)
r_kron_alpha <- kronecker(r_onevec1,r_coeffs)
rm(d_coeffs)
#if(comm.rank()==0){
#print("mem of r_kron_alpha is")
#print(object.size(r_kron_alpha))
#}
#barrier()
r_onevec2 <- matrix(rep(1,n),ncol=1)
tmp_I <- diag(1,n_kron_row)
kron_I <- kronecker(tmp_I,r_onevec2)
#kron_I <- matrix(0,nrow=n_kron_row*n, ncol=n_kron_row)
#comm.print(kron_col[1], all.rank=TRUE)
#index <- 1:(n_kron_row*n) + c((kron_row[1]-1)*n)
#comm.print(index[1], all.rank=TRUE)
#kron_I[index,] <- tmp_kron_I
#rm(tmp_kron_I)
#d_mat <- ddmatrix(tmp.vec, ncol=1, bldim=c(blm,blm))
#d_kron_I[[i]] <- tmp.vec
comm.print("mem of tmp.mat is")
comm.print(object.size(kron_I))
#d_kron_I <- as.list(1:kron_col)
#comm.print(tmp.mat[c(index[1]+0:4),c(1:5)],all.rank=TRUE)
#comm.print(dim(tmp.mat),all.rank=TRUE)
#d_kron_I <- new("ddmatrix",Data=tmp.mat,dim=c(dim(tmp.mat)[1],allreduce(dim(tmp.mat)[2])), bldim=c(dim(tmp.mat)[1], comm.chunk(d, type="equal",form="bldim")), ldim=dim(tmp.mat), ICTXT=1)
#comm.print((d_kron_I),all.rank`=TRUE)
#comm.print("mem of d_mat is")
#comm.print(object.size(d_kron_I))
})
comm.print(mem_used())
if(comm.rank()==0){
# rm(r_kron_alpha)
# rm(r_kron_I)
print(mem_used())
print("return time is \n")
}
comm.print(return.time1, all.rank=FALSE)
deriv.time <- comm.timer({
row.num <- comm.chunk(n, type="equal", form="vector")
comm.print(row.num, all.rank=TRUE)
comm.print(mem_used())
if(length(row.num)==0){
r_derivmat <- NULL
distk <- NULL
deriv_col <- NULL
r_dist <- NULL
}else{
rows <- cbind(rep(row.num,each=n),1:n);
r_derivmat <- matrix(NA, nrow=length(row.num), ncol=d)
col.num <- 100
col.end <- d/100
for(j in 1:col.end){
index <- c((j-1)*col.num+1):c(j*col.num)
r_dist <- scale.X[rows[,1],index]-scale.X[rows[,2],index];
comm.print(mem_used())
#rm(rows); rm(scale.X)
comm.print(object.size(r_dist))
distk <- sweep(r_dist,1, vec_K,FUN="*");
#rm(r_dist); rm(vec_K)
comm.print(mem_used())
comm.print(object.size(distk))
deriv_col <- sweep(distk,1, r_kron_alpha, FUN="*");
#rm(distk);rm(r_kron_alpha)
comm.print(mem_used())
comm.print(object.size(deriv_col))
r_derivmat[,index]<- -(2/sigma)*(crossprod(kron_I, deriv_col))
#rm(kron_I); rm(deriv_col)
}
rm(rows, scale.X, r_dist, vec_K, distk, r_kron_alpha, kron_I, deriv_col);
comm.print(mem_used())
comm.print(object.size(r_derivmat))
}
derivmat <- gather(r_derivmat)
rm(r_derivmat)
comm.print(length(derivmat))
comm.print(dim(derivmat[[1]]))
if(comm.rank()==0){
derivmat <- do.call(rbind, derivmat)
print(dim(derivmat))
avgderivmat <- apply(derivmat,2,mean)
}
})
if(comm.rank()==0){
print(mem_used())
cat("derivative time is \n")
}
comm.print(deriv.time, all.rank=FALSE)
## scale.back ##
scale.back.time <- comm.timer({
d_fit_y <- d_fit_y %*% dy.init.sd + dy.init.mean
d_vcov_c <- (y.init.sd^2)*d_covmatc
d_vcov_fitted <- (y.init.sd^2)*d_vcovmat_yhat
LOOE <- LOOE * y.init.sd
if(comm.rank()==0){
derivmat <- y.init.sd*derivmat/X.init.sd
avgderivmat <- y.init.sd*avgderivmat/X.init.sd
}
})
if(comm.rank()==0){
cat("scaling-back time is \n")
}
comm.print(scale.back.time, all.rank=FALSE)
## R-squared ####
R2.time <- comm.timer({
d_R2 <- 1-(crossprod(dy.init-d_fit_y)/(n*dy.init.sd^2))
})
if(comm.rank()==0){
cat("R2 time is \n")
}
comm.print(R2.time,all.rank=FALSE)
## return ###
return.time2 <- comm.timer({
#r_K <- as.matrix(dK, proc.dest=0);
fitted <- as.matrix(d_fit_y, proc.dest=0);
X <- as.matrix(dX.init, proc.dest=0);
y <- as.matrix(dy.init, proc.dest=0);
R2 <- as.matrix(d_R2, proc.dest=0);
vcov.c <- as.matrix(d_vcov_c, proc.dest=0);
vcov.fitted <- as.matrix(d_vcov_fitted, proc.dest=0);
LOOE <- as.matrix(LOOE, proc.dest=0);
#derivmat <- as.matrix(d_derivmat, proc.dest=0)
#avgderiv <- as.matrix(d_avgderivmat, proc.dest=0)
})
if(comm.rank()==0){
print(mem_used())
cat("return time is \n")
}
comm.print(return.time2, all.rank=FALSE)
if(comm.rank()==0){
out <- list(K=r_K,
coeffs=r_coeffs,
LOOE=LOOE,
fitted = fitted,
X=X,
y=y,
sigma=sigma,
lambda=lambda,
R2=R2,
vcov.c=vcov.c,
vcov.fitted=vcov.fitted,
derivatives=derivmat,
avgderivatives=avgderivmat
)
total.time <- sum(input.time[3],scale.time[3],kernel.time[3],eigen.time[3],lambda.time[3],
fit.time[3],vcov.time[3],return.time1[3],deriv.time[3],
R2.time[3],scale.back.time[3],return.time2[3])
print(total.time)
#print(all.equal(scale.X,pbd_X))
save(out, file=paste0("~/src/my_project/parallel_project/pbd_KRLS_p",p_num,".RData"))
}
finalize()
|
\name{coxinterval.control}
\alias{coxinterval.control}
\title{Control model fit}
\description{
Set parameters controlling the model fit.
}
\usage{
coxinterval.control(eps = 1e-7, iter.max = 5000, coef.typ = 1,
coef.max = 10, return.data = FALSE,
eps.norm = c("max", "grad"), armijo = 1/3,
var.coef = TRUE, trace = FALSE, thread.max = 1,
sieve = TRUE, sieve.const = 1, sieve.rate = 1/3,
risk.min = 1)
}
\arguments{
\item{eps}{
threshold value for the norm used to measure convergence in the
parameter estimates.
}
\item{iter.max}{
maximum number of iterations to attempt. This ensures that
the estimation routine will eventually exit, even when the
convergence criteria are not met.
}
\item{coef.typ}{
a scalar or vector of "typical" (absolute) values for the
regression coefficient.
}
\item{coef.max}{
a scalar or vector of probable upper bounds for the regression
coefficient. This and the \code{coef.typ} arguments tune
variance estimation via the curvature in the profile
log-likelihood, following Boruvka and Cook (2015, Section 6).
}
\item{return.data}{
a logical value indicating that the model object returned should
contain an element \code{data} that generally gives the assigned
support points and model matrix. Further details on the output is
provided in the documentation for each model function.
}
\item{eps.norm}{
a character string identifying the norm to use in the convergence
criteria for \code{\link{coxaalen}}---either the maximum norm
between the current and previous parameter values
(\code{eps.norm = "max"}) or the absolute inner product between the
current value and the score (\code{eps.norm = "grad"}).
}
\item{armijo}{
a scale factor in (0, 1/2) for Armijo's (1966) rule---a line search
used to ensure that each iteration in the estimation routine for
\code{\link{coxaalen}} achieves an adequate increase in the
log-likelihood. The model fit is typically not very sensitive to
this value.
}
\item{var.coef}{
a logical value indicating that \code{\link{coxaalen}} standard
errors for the multiplicative regression coefficients should be
estimated. This is done via profile likelihood---an approach that
can require an inordinate amount of processing time under many
regression coefficients and larger sample size.
}
\item{trace}{
a logical value indicating that, on execution of
\code{\link{coxaalen}}, CPLEX should print its results to the
screen.
}
\item{thread.max}{
maximum number of CPU threads for \code{\link{coxaalen}} to allocate
to CPLEX. The default value disables multithreading. A value of
zero allows CPLEX to set the number of threads automatically. The
actual number of threads used is limited by the number of available
processors and the CPLEX license.
}
\item{sieve}{
a logical value indicating that the sieve rather than the
semiparametric maximum likelihood estimator should be fit by
\code{\link{coxdual}}. The default \code{TRUE} is recommended to
avoid issues with support finding and convergence.
}
\item{sieve.const}{
a constant factor that, in part, determines the sieve size. The
factor can be made specific to the transition type with
\code{sieve.const} a vector of length three. Indexing the states
from zero, this vector's components correspond to the state 0 to
state 1, 0 to 2, and 1 to 2 transition types, respectively.
}
\item{sieve.rate}{
a scalar in (1/8, 1/2) determining the rate at which the sieve
increases with the sample size.
}
\item{risk.min}{
a positive integer giving the minimum size of risk set for support
points defining the sieve.
}
}
\value{
A list of the above arguments with their final values.
}
\details{
For a given sample size \emph{n}, the sieve for \code{\link{coxdual}}
has size at most \code{sieve.const*}\emph{n}\code{^sieve.rate}. Any
reduction in size from this value is applied to ensure that each
subinterval in the sieve's time partition captures at least one
support point from the semiparametric maximum likelihood estimator
based on the subsample with known progression status (Boruvka and
Cook, 2016).
}
\references{
Boruvka, A. and Cook, R. J. (2015)
\href{http://dx.doi.org/10.1111/sjos.12113}{
A Cox-Aalen model for interval-censored data}.
\emph{Scandinavian Journal of Statistics} \bold{42}, 414--426.
Boruvka, A. and Cook, R. J. (2016)
\href{http://dx.doi.org/10.1093/biostatistics/kxv042}{
Sieve estimation in a Markov illness-death process under dual
censoring}.
\emph{Biostatistics} \bold{17}, 350--363.
Armijo, L. (1966)
\href{http://dx.doi.org/10.2140/pjm.1966.16.1}{
Minimization of functions having Lipschitz continuous first partial
derivatives.}
\emph{Pacific Journal of Mathematics} \bold{16}, 1--3.
}
\seealso{
\code{\link{coxaalen}}, \code{\link{coxdual}}
}
\examples{
if (is.loaded("coxaalen", "coxinterval")) \{
f <- Surv(left, right, type = "interval2") ~ prop(treat)
coxaalen(f, data = cosmesis,
control = coxinterval.control(iter.max = 2, trace = TRUE))
coxaalen(f, data = cosmesis, iter.max = 2)
\}
f <- Surv(start, stop, status) ~ cluster(id) + strata(from, to) +
I(z * (to == 1)) + I(z * (from \%in\% 0 & to == 2)) +
I(z * (from \%in\% c(NA, 1) & to == 2))
coxdual(f, data = dualrc,
control = coxinterval.control(eps = 1e-5, sieve.rate = 2/5))
coxdual(f, data = dualrc, eps = 1e-5, sieve.rate = 2/5)
}
\keyword{survival}
|
/man/coxinterval.control.Rd
|
no_license
|
aboruvka/coxinterval
|
R
| false
| false
| 5,726
|
rd
|
\name{coxinterval.control}
\alias{coxinterval.control}
\title{Control model fit}
\description{
Set parameters controlling the model fit.
}
\usage{
coxinterval.control(eps = 1e-7, iter.max = 5000, coef.typ = 1,
coef.max = 10, return.data = FALSE,
eps.norm = c("max", "grad"), armijo = 1/3,
var.coef = TRUE, trace = FALSE, thread.max = 1,
sieve = TRUE, sieve.const = 1, sieve.rate = 1/3,
risk.min = 1)
}
\arguments{
\item{eps}{
threshold value for the norm used to measure convergence in the
parameter estimates.
}
\item{iter.max}{
maximum number of iterations to attempt. This ensures that
the estimation routine will eventually exit, even when the
convergence criteria are not met.
}
\item{coef.typ}{
a scalar or vector of "typical" (absolute) values for the
regression coefficient.
}
\item{coef.max}{
a scalar or vector of probable upper bounds for the regression
coefficient. This and the \code{coef.typ} arguments tune
variance estimation via the curvature in the profile
log-likelihood, following Boruvka and Cook (2015, Section 6).
}
\item{return.data}{
a logical value indicating that the model object returned should
contain an element \code{data} that generally gives the assigned
support points and model matrix. Further details on the output is
provided in the documentation for each model function.
}
\item{eps.norm}{
a character string identifying the norm to use in the convergence
criteria for \code{\link{coxaalen}}---either the maximum norm
between the current and previous parameter values
(\code{eps.norm = "max"}) or the absolute inner product between the
current value and the score (\code{eps.norm = "grad"}).
}
\item{armijo}{
a scale factor in (0, 1/2) for Armijo's (1966) rule---a line search
used to ensure that each iteration in the estimation routine for
\code{\link{coxaalen}} achieves an adequate increase in the
log-likelihood. The model fit is typically not very sensitive to
this value.
}
\item{var.coef}{
a logical value indicating that \code{\link{coxaalen}} standard
errors for the multiplicative regression coefficients should be
estimated. This is done via profile likelihood---an approach that
can require an inordinate amount of processing time under many
regression coefficients and larger sample size.
}
\item{trace}{
a logical value indicating that, on execution of
\code{\link{coxaalen}}, CPLEX should print its results to the
screen.
}
\item{thread.max}{
maximum number of CPU threads for \code{\link{coxaalen}} to allocate
to CPLEX. The default value disables multithreading. A value of
zero allows CPLEX to set the number of threads automatically. The
actual number of threads used is limited by the number of available
processors and the CPLEX license.
}
\item{sieve}{
a logical value indicating that the sieve rather than the
semiparametric maximum likelihood estimator should be fit by
\code{\link{coxdual}}. The default \code{TRUE} is recommended to
avoid issues with support finding and convergence.
}
\item{sieve.const}{
a constant factor that, in part, determines the sieve size. The
factor can be made specific to the transition type with
\code{sieve.const} a vector of length three. Indexing the states
from zero, this vector's components correspond to the state 0 to
state 1, 0 to 2, and 1 to 2 transition types, respectively.
}
\item{sieve.rate}{
a scalar in (1/8, 1/2) determining the rate at which the sieve
increases with the sample size.
}
\item{risk.min}{
a positive integer giving the minimum size of risk set for support
points defining the sieve.
}
}
\value{
A list of the above arguments with their final values.
}
\details{
For a given sample size \emph{n}, the sieve for \code{\link{coxdual}}
has size at most \code{sieve.const*}\emph{n}\code{^sieve.rate}. Any
reduction in size from this value is applied to ensure that each
subinterval in the sieve's time partition captures at least one
support point from the semiparametric maximum likelihood estimator
based on the subsample with known progression status (Boruvka and
Cook, 2016).
}
\references{
Boruvka, A. and Cook, R. J. (2015)
\href{http://dx.doi.org/10.1111/sjos.12113}{
A Cox-Aalen model for interval-censored data}.
\emph{Scandinavian Journal of Statistics} \bold{42}, 414--426.
Boruvka, A. and Cook, R. J. (2016)
\href{http://dx.doi.org/10.1093/biostatistics/kxv042}{
Sieve estimation in a Markov illness-death process under dual
censoring}.
\emph{Biostatistics} \bold{17}, 350--363.
Armijo, L. (1966)
\href{http://dx.doi.org/10.2140/pjm.1966.16.1}{
Minimization of functions having Lipschitz continuous first partial
derivatives.}
\emph{Pacific Journal of Mathematics} \bold{16}, 1--3.
}
\seealso{
\code{\link{coxaalen}}, \code{\link{coxdual}}
}
\examples{
if (is.loaded("coxaalen", "coxinterval")) \{
f <- Surv(left, right, type = "interval2") ~ prop(treat)
coxaalen(f, data = cosmesis,
control = coxinterval.control(iter.max = 2, trace = TRUE))
coxaalen(f, data = cosmesis, iter.max = 2)
\}
f <- Surv(start, stop, status) ~ cluster(id) + strata(from, to) +
I(z * (to == 1)) + I(z * (from \%in\% 0 & to == 2)) +
I(z * (from \%in\% c(NA, 1) & to == 2))
coxdual(f, data = dualrc,
control = coxinterval.control(eps = 1e-5, sieve.rate = 2/5))
coxdual(f, data = dualrc, eps = 1e-5, sieve.rate = 2/5)
}
\keyword{survival}
|
#' rredlist - IUCN Red List Client
#'
#' @section Authentication:
#' IUCN requires you to get your own API key, an alphanumeric string that you
#' need to send in every request. Get it at
#' \url{http://apiv3.iucnredlist.org/api/v3/token}.
#' Keep this key private. You can pass the key in to each function via the
#' \code{key} parameter, but it's better to store the key either as a
#' environment variable (\code{IUCN_REDLIST_KEY}) or an R option
#' (\code{iucn_redlist_key}) - we suggest using the former option.
#'
#' @section High vs. Low level package APIs:
#' \strong{High level API}
#' High level functions do the HTTP request and parse data to a data.frame for
#' ease of downstream use. The high level functions have no underscore on
#' the end of the function name, e.g., \code{\link{rl_search}}
#'
#' \strong{Low level API}
#' The parsing to data.frame in the high level API does take extra time.
#' The low level API only does the HTTP request, and gives back JSON without
#' doing any more parsing. The low level functions DO have an underscore on
#' the end of the function name, e.g., \code{\link{rl_search_}}
#'
#' @section No Spatial:
#' This package does not include support for the spatial API, described at
#' \url{http://apiv3.iucnredlist.org/spatial}
#'
#' @section Citing the Red List API:
#' The citation is
#' \code{IUCN 2015. IUCN Red List of Threatened Species. Version 2015-4 <www.iucnredlist.org>}.
#' You can get this programatically via \code{\link{rl_citation}}
#'
#' @section Rate limiting:
#' From the IUCN folks: Too many frequent calls, or too many calls per day
#' might get your access blocked temporarily. If you're a heavy API user, the
#' Red List Unit asked that you contact them, as there might be better options.
#' They suggest a 2-second delay between your calls if you plan to make a
#' lot of calls.
#'
#' @importFrom jsonlite fromJSON
#' @name rredlist-package
#' @aliases rredlist
#' @docType package
#' @author Scott Chamberlain \email{myrmecocystus@@gmail.com}
#' @keywords package
NULL
|
/R/rredlist-package.R
|
permissive
|
jlehtoma/rredlist
|
R
| false
| false
| 2,041
|
r
|
#' rredlist - IUCN Red List Client
#'
#' @section Authentication:
#' IUCN requires you to get your own API key, an alphanumeric string that you
#' need to send in every request. Get it at
#' \url{http://apiv3.iucnredlist.org/api/v3/token}.
#' Keep this key private. You can pass the key in to each function via the
#' \code{key} parameter, but it's better to store the key either as a
#' environment variable (\code{IUCN_REDLIST_KEY}) or an R option
#' (\code{iucn_redlist_key}) - we suggest using the former option.
#'
#' @section High vs. Low level package APIs:
#' \strong{High level API}
#' High level functions do the HTTP request and parse data to a data.frame for
#' ease of downstream use. The high level functions have no underscore on
#' the end of the function name, e.g., \code{\link{rl_search}}
#'
#' \strong{Low level API}
#' The parsing to data.frame in the high level API does take extra time.
#' The low level API only does the HTTP request, and gives back JSON without
#' doing any more parsing. The low level functions DO have an underscore on
#' the end of the function name, e.g., \code{\link{rl_search_}}
#'
#' @section No Spatial:
#' This package does not include support for the spatial API, described at
#' \url{http://apiv3.iucnredlist.org/spatial}
#'
#' @section Citing the Red List API:
#' The citation is
#' \code{IUCN 2015. IUCN Red List of Threatened Species. Version 2015-4 <www.iucnredlist.org>}.
#' You can get this programatically via \code{\link{rl_citation}}
#'
#' @section Rate limiting:
#' From the IUCN folks: Too many frequent calls, or too many calls per day
#' might get your access blocked temporarily. If you're a heavy API user, the
#' Red List Unit asked that you contact them, as there might be better options.
#' They suggest a 2-second delay between your calls if you plan to make a
#' lot of calls.
#'
#' @importFrom jsonlite fromJSON
#' @name rredlist-package
#' @aliases rredlist
#' @docType package
#' @author Scott Chamberlain \email{myrmecocystus@@gmail.com}
#' @keywords package
NULL
|
corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
cc <- complete(directory, id = 1:332)
ccthresh <- cc[["id"]][cc$nobs > threshold]
c <- as.numeric(c())
if (length(ccthresh) !=0){
files <- paste(directory, sprintf("%03d.csv", ccthresh), sep="/")
for(i in 1:length(ccthresh)){
data <- na.omit(read.csv(files[i], header=TRUE))
c[i] <- cor(data$nitrate,data$sulfate)
}
}
return(c)
}
|
/corr.R
|
no_license
|
raypacheco/datasciencecoursera
|
R
| false
| false
| 790
|
r
|
corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
cc <- complete(directory, id = 1:332)
ccthresh <- cc[["id"]][cc$nobs > threshold]
c <- as.numeric(c())
if (length(ccthresh) !=0){
files <- paste(directory, sprintf("%03d.csv", ccthresh), sep="/")
for(i in 1:length(ccthresh)){
data <- na.omit(read.csv(files[i], header=TRUE))
c[i] <- cor(data$nitrate,data$sulfate)
}
}
return(c)
}
|
`mNNinfo2` <-
function (n, R, Q)
{
N <- sum(n)
k <- length(n)
l <- names(n)
EN <- matrix(0, nrow = k, ncol = k)
VN <- VarN <- matrix(0, nrow = k * k, ncol = k * k)
for (i in 1:k) {
for (j in 1:k) {
EN[i, j] <- n[i] * (n[j] - (i == j))/(N - 1)
}
}
for (l1 in 1:(k * k)) {
i <- 1 + (l1 - 1)%/%k
j <- 1 + (l1 - 1)%%k
for (l2 in l1:(k * k)) {
i2 <- 1 + (l2 - 1)%/%k
j2 <- 1 + (l2 - 1)%%k
if ((i == i2) & (j == j2)) {
if (i == j) {
p2 <- n[i] * (n[i] - 1)/(N * (N - 1))
p3 <- p2 * (n[i] - 2)/(N - 2)
p4 <- p3 * (n[i] - 3)/(N - 3)
VN <- check(1, VN, l1, l2)
VarN[l1, l2] <- (N + R) * p2 + (2 * N - 2 *
R + Q) * p3 + (N * (N - 3) - Q + R) * p4 -
EN[i, j] * EN[i, j]
}
else {
p2 <- n[i] * n[j]/(N * (N - 1))
p3 <- p2 * (n[i] - 1)/(N - 2)
p4 <- p3 * (n[j] - 1)/(N - 3)
VN <- check(2, VN, l1, l2)
VarN[l1, l2] <- N * p2 + Q * p3 + (N * (N -
3) - Q + R) * p4 - EN[i, j] * EN[i, j]
}
}
else if ((i == j) & (i == i2) & (j != j2)) {
p3 <- n[i] * (n[i] - 1) * n[j2]/(N * (N - 1) *
(N - 2))
p4 <- p3 * (n[i] - 2)/(N - 3)
VN <- check(3, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N - R) * p3 +
(N * (N - 3) - Q + R) * p4 - EN[i, j] * EN[i2,
j2]
}
else if ((i2 == j2) & (i == i2) & (j != j2)) {
p3 <- n[i2] * (n[i2] - 1) * n[j]/(N * (N - 1) *
(N - 2))
p4 <- p3 * (n[i] - 2)/(N - 3)
VN <- check(3, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N - R) * p3 +
(N * (N - 3) - Q + R) * p4 - EN[i, j] * EN[i2,
j2]
}
else if ((i2 == j2) & (j == j2) & (i != i2)) {
p3 <- n[j] * (n[j] - 1) * n[i]/(N * (N - 1) *
(N - 2))
p4 <- p3 * (n[j] - 2)/(N - 3)
VN <- check(4, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N - R + Q) *
p3 + (N * (N - 3) - Q + R) * p4 - EN[i, j] *
EN[i2, j2]
}
else if ((i == j) & (i == j2) & (i != i2)) {
p3 <- n[i] * (n[i] - 1) * n[i2]/(N * (N - 1) *
(N - 2))
p4 <- p3 * (n[i] - 2)/(N - 3)
#VN <- check(14, VN, l1, l2) # Probably a typo !!!
VN <- check(4, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N - R + Q) *
p3 + (N * (N - 3) - Q + R) * p4 - EN[i, j] *
EN[i2, j2]
}
else if ((i == j) & (i2 == j2) & (i != i2)) {
p4 <- n[i] * (n[i] - 1) * n[i2] * (n[i2] - 1)/(N *
(N - 1) * (N - 2) * (N - 3))
VN <- check(5, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N * (N - 3) -
Q + R) * p4 - EN[i, j] * EN[i2, j2]
}
else if ((i == j) & (i2 != i) & (j2 != j) & (i2 !=
j2)) {
p4 <- n[i] * (n[i] - 1) * n[i2] * n[j2]/(N *
(N - 1) * (N - 2) * (N - 3))
VN <- check(6, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N * (N - 3) -
Q + R) * p4 - EN[i, j] * EN[i2, j2]
}
else if ((i2 == j2) & (i2 != i) & (j2 != j) & (i !=
j)) {
p4 <- n[i2] * (n[i2] - 1) * n[i] * n[j]/(N *
(N - 1) * (N - 2) * (N - 3))
VN <- check(6, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N * (N - 3) -
Q + R) * p4 - EN[i, j] * EN[i2, j2]
}
else if ((i == i2) & (i != j) & (i2 != j2) & (j !=
j2)) {
p4 <- n[i] * (n[i] - 1) * n[j] * n[j2]/(N * (N -
1) * (N - 2) * (N - 3))
VN <- check(7, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N * (N - 3) -
Q + R) * p4 - EN[i, j] * EN[i2, j2]
}
else if ((i == j2) & (i2 == j) & (i != j)) {
p2 <- n[i] * n[j]/(N * (N - 1))
p3 <- p2 * (n[i] - 1 + n[j] - 1)/(N - 2)
p4 <- p2 * (n[i] - 1) * (n[j] - 1)/((N - 2) *
(N - 3))
VN <- check(8, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- R * p2 + (N -
R) * p3 + (N * (N - 3) - Q + R) * p4 - EN[i,
j] * EN[i2, j2]
}
else if ((i != j) & (j == i2) & (i2 != j2) & (i !=
j2)) {
p3 <- n[i] * n[j] * n[j2]/(N * (N - 1) * (N -
2))
p4 <- p3 * (n[j] - 1)/(N - 3)
VN <- check(9, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N - R) * p3 +
(N * (N - 3) - Q + R) * p4 - EN[i, j] * EN[i2,
j2]
}
else if ((i != j) & (j == j2) & (i2 != j2) & (i !=
i2)) {
p3 <- n[i] * n[j] * n[i2]/(N * (N - 1) * (N -
2))
p4 <- p3 * (n[j] - 1)/(N - 3)
VN <- check(10, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- Q * p3 + (N *
(N - 3) - Q + R) * p4 - EN[i, j] * EN[i2, j2]
}
else if ((i != j) & (i == j2) & (i2 != j2) & (j !=
i2)) {
p3 <- n[i] * n[j] * n[i2]/(N * (N - 1) * (N -
2))
p4 <- p3 * (n[i] - 1)/(N - 3)
VN <- check(11, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N - R) * p3 +
(N * (N - 3) - Q + R) * p4 - EN[i, j] * EN[i2,
j2]
}
else if ((i != j) & (i != i2) & (i != j2) & (j !=
i2) & (j != j2) & (i2 != j2)) {
p4 <- n[i] * n[j] * n[i2] * n[j2]/(N * (N - 1) *
(N - 2) * (N - 3))
VN <- check(12, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N * (N - 3) -
Q + R) * p4 - EN[i, j] * EN[i2, j2]
}
}
}
v <- as.vector(t(outer(l, l, paste, sep = "")))
dimnames(VN) <- dimnames(VarN) <- list(v, v)
list(EN = EN, VN = VN, VarN = VarN)
}
|
/R/mNNinfo2.R
|
no_license
|
cran/dixon
|
R
| false
| false
| 6,903
|
r
|
`mNNinfo2` <-
function (n, R, Q)
{
N <- sum(n)
k <- length(n)
l <- names(n)
EN <- matrix(0, nrow = k, ncol = k)
VN <- VarN <- matrix(0, nrow = k * k, ncol = k * k)
for (i in 1:k) {
for (j in 1:k) {
EN[i, j] <- n[i] * (n[j] - (i == j))/(N - 1)
}
}
for (l1 in 1:(k * k)) {
i <- 1 + (l1 - 1)%/%k
j <- 1 + (l1 - 1)%%k
for (l2 in l1:(k * k)) {
i2 <- 1 + (l2 - 1)%/%k
j2 <- 1 + (l2 - 1)%%k
if ((i == i2) & (j == j2)) {
if (i == j) {
p2 <- n[i] * (n[i] - 1)/(N * (N - 1))
p3 <- p2 * (n[i] - 2)/(N - 2)
p4 <- p3 * (n[i] - 3)/(N - 3)
VN <- check(1, VN, l1, l2)
VarN[l1, l2] <- (N + R) * p2 + (2 * N - 2 *
R + Q) * p3 + (N * (N - 3) - Q + R) * p4 -
EN[i, j] * EN[i, j]
}
else {
p2 <- n[i] * n[j]/(N * (N - 1))
p3 <- p2 * (n[i] - 1)/(N - 2)
p4 <- p3 * (n[j] - 1)/(N - 3)
VN <- check(2, VN, l1, l2)
VarN[l1, l2] <- N * p2 + Q * p3 + (N * (N -
3) - Q + R) * p4 - EN[i, j] * EN[i, j]
}
}
else if ((i == j) & (i == i2) & (j != j2)) {
p3 <- n[i] * (n[i] - 1) * n[j2]/(N * (N - 1) *
(N - 2))
p4 <- p3 * (n[i] - 2)/(N - 3)
VN <- check(3, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N - R) * p3 +
(N * (N - 3) - Q + R) * p4 - EN[i, j] * EN[i2,
j2]
}
else if ((i2 == j2) & (i == i2) & (j != j2)) {
p3 <- n[i2] * (n[i2] - 1) * n[j]/(N * (N - 1) *
(N - 2))
p4 <- p3 * (n[i] - 2)/(N - 3)
VN <- check(3, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N - R) * p3 +
(N * (N - 3) - Q + R) * p4 - EN[i, j] * EN[i2,
j2]
}
else if ((i2 == j2) & (j == j2) & (i != i2)) {
p3 <- n[j] * (n[j] - 1) * n[i]/(N * (N - 1) *
(N - 2))
p4 <- p3 * (n[j] - 2)/(N - 3)
VN <- check(4, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N - R + Q) *
p3 + (N * (N - 3) - Q + R) * p4 - EN[i, j] *
EN[i2, j2]
}
else if ((i == j) & (i == j2) & (i != i2)) {
p3 <- n[i] * (n[i] - 1) * n[i2]/(N * (N - 1) *
(N - 2))
p4 <- p3 * (n[i] - 2)/(N - 3)
#VN <- check(14, VN, l1, l2) # Probably a typo !!!
VN <- check(4, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N - R + Q) *
p3 + (N * (N - 3) - Q + R) * p4 - EN[i, j] *
EN[i2, j2]
}
else if ((i == j) & (i2 == j2) & (i != i2)) {
p4 <- n[i] * (n[i] - 1) * n[i2] * (n[i2] - 1)/(N *
(N - 1) * (N - 2) * (N - 3))
VN <- check(5, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N * (N - 3) -
Q + R) * p4 - EN[i, j] * EN[i2, j2]
}
else if ((i == j) & (i2 != i) & (j2 != j) & (i2 !=
j2)) {
p4 <- n[i] * (n[i] - 1) * n[i2] * n[j2]/(N *
(N - 1) * (N - 2) * (N - 3))
VN <- check(6, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N * (N - 3) -
Q + R) * p4 - EN[i, j] * EN[i2, j2]
}
else if ((i2 == j2) & (i2 != i) & (j2 != j) & (i !=
j)) {
p4 <- n[i2] * (n[i2] - 1) * n[i] * n[j]/(N *
(N - 1) * (N - 2) * (N - 3))
VN <- check(6, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N * (N - 3) -
Q + R) * p4 - EN[i, j] * EN[i2, j2]
}
else if ((i == i2) & (i != j) & (i2 != j2) & (j !=
j2)) {
p4 <- n[i] * (n[i] - 1) * n[j] * n[j2]/(N * (N -
1) * (N - 2) * (N - 3))
VN <- check(7, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N * (N - 3) -
Q + R) * p4 - EN[i, j] * EN[i2, j2]
}
else if ((i == j2) & (i2 == j) & (i != j)) {
p2 <- n[i] * n[j]/(N * (N - 1))
p3 <- p2 * (n[i] - 1 + n[j] - 1)/(N - 2)
p4 <- p2 * (n[i] - 1) * (n[j] - 1)/((N - 2) *
(N - 3))
VN <- check(8, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- R * p2 + (N -
R) * p3 + (N * (N - 3) - Q + R) * p4 - EN[i,
j] * EN[i2, j2]
}
else if ((i != j) & (j == i2) & (i2 != j2) & (i !=
j2)) {
p3 <- n[i] * n[j] * n[j2]/(N * (N - 1) * (N -
2))
p4 <- p3 * (n[j] - 1)/(N - 3)
VN <- check(9, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N - R) * p3 +
(N * (N - 3) - Q + R) * p4 - EN[i, j] * EN[i2,
j2]
}
else if ((i != j) & (j == j2) & (i2 != j2) & (i !=
i2)) {
p3 <- n[i] * n[j] * n[i2]/(N * (N - 1) * (N -
2))
p4 <- p3 * (n[j] - 1)/(N - 3)
VN <- check(10, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- Q * p3 + (N *
(N - 3) - Q + R) * p4 - EN[i, j] * EN[i2, j2]
}
else if ((i != j) & (i == j2) & (i2 != j2) & (j !=
i2)) {
p3 <- n[i] * n[j] * n[i2]/(N * (N - 1) * (N -
2))
p4 <- p3 * (n[i] - 1)/(N - 3)
VN <- check(11, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N - R) * p3 +
(N * (N - 3) - Q + R) * p4 - EN[i, j] * EN[i2,
j2]
}
else if ((i != j) & (i != i2) & (i != j2) & (j !=
i2) & (j != j2) & (i2 != j2)) {
p4 <- n[i] * n[j] * n[i2] * n[j2]/(N * (N - 1) *
(N - 2) * (N - 3))
VN <- check(12, VN, l1, l2)
VarN[l1, l2] <- VarN[l2, l1] <- (N * (N - 3) -
Q + R) * p4 - EN[i, j] * EN[i2, j2]
}
}
}
v <- as.vector(t(outer(l, l, paste, sep = "")))
dimnames(VN) <- dimnames(VarN) <- list(v, v)
list(EN = EN, VN = VN, VarN = VarN)
}
|
comments <- list(
"325"="Verify lack of growth or loss in HH 2010-20 is reasonable or revise(Known issue)",
"505"="HH too high in Rural area, 15,500-16,000 in 2020, 18,500-19,000 in 2030, 22,500-23,000 in 2040(Pierce)",
"506"="HH low given inventory of post-2010 MPDs, 11,000-11,500 in 2030, 14,800-15,400 in 2040(Pierce)",
"705"="HH low given inventory of 6500 post-2010 MPD HU, 6800-7200 in 2020, 8800-9100 in 2030, 13,300-13,600 in 2040(Pierce)\nVerify lack of growth or loss in HH 2010-20 is reasonable or revise(Known issue)",
"805"="HH too high in Rural area, 7,300-7,500 in 2020, 8,300-8,600 in 2030, 9,000-9,300 in 2040(Pierce)",
"1310"="Residential (Potentially) High (Tacoma)\nJobs should not decrease; 5300 - 5500 in 2030, 5500 - 5800 in 2040(Pierce)",
"1320"="Residential (Potentially) High (Tacoma) \nJobs should not decrease 4200- 4600 in 2020(Pierce)",
"1330"="Jobs should not decrease 5000-5200 in 2020, 5200-5400 in 2030, 5400-5600 in 2040 (Pierce)\nSector mix issue: Gov(Pierce)\nVerify lack of growth or loss in HH 2010-20 is reasonable or revise(Known issue)",
"1410"="Residential (Potentially) High (Tacoma), TotHH & MFHH too high (Pierce)\nJobs should not decrease, 12,000-13,000 in 2020, 13,000-14,000 in 2030(Pierce)",
"1420"="TotHH & MFHH too high(Pierce)",
"1605"="Residential (Potentially) High(Tacoma)\nJobs should not decrease, 6800-7000 in 2030, 7000-7200 in 2040(Pierce)",
"1606"="Residential (Potentially) High(Tacoma)\nJobs should not decrease, 3100-3500 in 2040 (Pierce)",
"1710"="Residential (Potentially) High(Tacoma), TotHH & MFHH too high (Pierce)",
"1720"="Residential (Potentially) High(Tacoma)\nJobs should not decrease, 6900-7100 in 2030, 7100-7300 in 2040(Pierce)",
"1810"="Growth low (Tacoma)",
"1820"="HHs should not decrease 2010-2020, 4350-4400 in 2020,TotHH & MFHH too high in 2040(Pierce)\nJobs too high in 2040(Pierce)",
"1900"="HHs should not decrease 2020-2030, 450-520 in 2030(Pierce)\nJobs too high by 2040 for Ind area(Pierce)\nSector mix issue: Services(Pierce)",
"2910"="Too much HH growth in 2030-2040, 1900-2100 in 2040(Pierce)",
"2925"="Too much HH growth in 2030-2040, 8000-8500 in 2040(Pierce)\nVerify lack of growth or loss in HH 2010-20 is reasonable or revise(Known issue)",
"2926"="Verify lack of growth or loss in HH 2010-20 is reasonable or revise(Known issue)",
"2927"="Too much HH growth in 2030-2040, 4500-6000 in 2040(Pierce)",
"2935"="JBLM: HHs should not decrease, 4100-4200 in 2020, 4200-4300 in 2030, 4300-4400 in 2040(Pierce)\nJobs look low in all years(Pierce)\nMHS: comp to SAF06 not accurate, SAF06 includes enlisted\nJBLM Reset Facility - added maintenance functions(Lakewood)",
"2936"="Jobs should not decrease, 2600-2800 in 2020, 2800-3000 in 2030, 3000-3500 in 2040(Pierce)\nVerify lack of growth or loss in HH 2010-20 is reasonable or revise(Known issue)",
"2940"="Too much HH growth in 2030-2040, 9000-9500 in 2040(Pierce)",
"3046"="Midway Subarea Plan, HHs & Jobs should be higher, area around Highline Comm College 2680 HHs and 2220 jobs(Kent)\n MHS Midway Subarea only part of FAZ",
"3505"="Increase HH and reduce Jobs(service) - by 2040 Downtown Subarea = 10,700 HHs, 8500 Jobs(Kent)",
"3600"="Sector Mix issue: more Manuf than Service job growth in MIC(Kent)\nMHS: FAZ 3600 includes Segale, source of most Service sector growth",
"3705"="Port study est. 18,800 jobs on-site at airport, 00-10 jobs in FAZ appear high at 34,500-26,600(Port)\nSector Mix issue: WTU growth is low given airport location(Port)",
"4505"="HHs high by 577 in 2030(Bellevue)\nJobs look reasonable, leave as is(Bellevue)",
"4506"="HHs maybe high but unsure, FAZ contains part of Issaquah(Bellevue)\nEmployment Looks low by 3330 in 2030,but FLU does not include anticipated upzones(Bellevue)",
"4810"="HHs looks low by 262 in 2030(Bellevue)\nEmployment may be high by 1200 in 2030 - parcel w dev. event should be locked out due to light rail(Bellevue)\nMHS: thats unclear",
"4820"="HHs a little high - by 125 in 2030, 12605-14207 in 2030, 14805-17807 in 2040(Bellevue)\nJobs perhaps high (how are parking spaces handled?)(Bellevue)",
"4900"="HHs: 9,500-9,900 in 2020, 12,600-14,200 in 2030, 14,800-17,800 in 2040\nHHPop: 14,200-14,800 in 2020, 18,900-21,300 in 2030, 22,200-26,700 in 2040\nJobs: 52,200-55,800 in 2020, 2030 is OK, 85,400-90,800 in 2040",
"5010"="HHs a little high - by 286 in 2030(Bellevue)\nJobs high, 3000 being added back into existing: 11,300-12,500 in 2020, 12,100-13,300 in 2030, 12,900-14,200 in 2040(Bellevue)",
"5020"="HHs may be low by 269 in 2030(Bellevue)\nEmpl. looks reasonable(Bellevue)",
"5205"="HHs: 6,500-7,200 in 2020, 9,400-9,800 in 2030, 11,850-12,300 in 2040 (also comments on SF/MF mix)(Bellevue)\nHH Pop: 14,500-15,700 in 2020, 19,900-20,800 in 2030, 24,600-25,500 in 2040 (also comments on TotPop)(Bellevue)\nJobs: look reasonable(Bellevue)",
"5305"="City of Kirkland: HU growth thru 2040 ridiculously low(Kirkland)",
"5306"="City of Kirkland: HU growth thru 2040 ridiculously low(Kirkland)",
"5415"="Jobs decline from 2020-30, overall slow growth 2020-40(Known issue)\n Verify likely growth given MSFT expansion plans & other potential development",
"5425"="Main City of Redmond FAZ, job growth high, MF growth low",
"5515"="City of Kirkland: HU growth thru 2040 ridiculously low(Kirkland)",
"5720"="Residential a bit too high(Seattle)",
"5825"="Sector Mix issue: MIC land getting high retail/services jobs(Port)",
"5826"="Sector Mix issue: MIC land getting high retail/services jobs(Port)",
"5916"="Residential too high(Seattle)",
"5925"="Residential a bit too high(Seattle)",
"6010"="Jobs really too high, residential moderately too low (2000 units in pipeline now in downtown Sea CBD)(Seattle)",
"6020"="Jobs really too high, residential moderately too low (2000 units in pipeline now in downtown Sea CBD)(Seattle)",
"6113"="Job decline doubtful in 2020+, stable due to hospitals and Seattle U(Seattle)",
"6123"="Jobs OK,HHs low(Seattle)",
"6216"="Job decline doubtful in 2020 - Childrens Hosp expansion(Seattle)",
"6224"="Could receive more growth from 6326(Seattle)",
"6225"="Northgate (growth low relative to 6326, 6223 both HHs and Jobs)(Seattle)",
"6226"="too much residential(Seattle)",
"6326"="Both Jobs and HHs quite high - more likely in 6225 Northgate and to a lesser extent 6224(Seattle)",
"6501"="Verify lack of growth or loss in jobs 2010-20 is reasonable or revise(Known issue)",
"7015"="Pop in FAZ group(7015,7025,7026) jumps 2010-20 (Mostly SF), then levels off thru 2040\nJobs: 00-10 Gov increase mystery, services decline same period\nJobs: frontloading 2010-20 more likely for post-2020",
"7025"="Pop in FAZ group(7015,7025,7026) jumps 2010-20 (Mostly SF), then levels off thru 2040\nJobs: 00-10 Gov increase mystery, services decline same period\nJobs: frontloading 2010-20 more likely for post-2020",
"7026"="Pop in FAZ group(7015,7025,7026) jumps 2010-20 (Mostly SF), then levels off thru 2040\nJobs: 00-10 Gov increase mystery, services decline same period\nJobs: frontloading 2010-20 more likely for post-2020",
"7100"="TotPop too High: 21,600-24,400 in 2040(?)(MT)\nJobs high esp 30-40 gain, 8,800-12,300 in 2040",
"7206"="Sector Mix issue: Boeing moved out between 00-10, Manuf should be 2000 jobs lower by 2040",
"7320"="MF HHs decline or stagnate from 10-20 and-or 20-30",
"7335"="MF HHs decline or stagnate from 10-20 and-or 20-30",
"7340"="MF HHs decline or stagnate from 10-20 and-or 20-30",
"7415"="MF HHs decline or stagnate from 10-20 and-or 20-30",
"7526"="MF HHs decline or stagnate from 10-20 and-or 20-30",
"7605"="MF HHs decline or stagnate from 10-20 and-or 20-30",
"7606"="MF HHs decline or stagnate from 10-20 and-or 20-30",
"7806"="Sector Mix issue: Retail jobs too low in 2040 relative to 2010, +400 in 2040",
"8925"="MF HHs decline or stagnate from 10-20 and-or 20-30",
"8927"="MF HHs decline or stagnate from 10-20 and-or 20-30\nGov jobs much higher in 2010 than other decades, Stillaguamish tribe w casino, +700 Gov jobs in 2040",
"8935"="MF HHs decline or stagnate from 10-20 and-or 20-30",
"8936"="2010 Retail is higher than other decades, +500 retail in 2040",
"8937"="Sector Mix issue: Tribal employment Services in 00, 20, 30, 40, and Gov in 2010?",
"9908"="Bangor base, need to include enlisted personnel(Known issue)",
)
source('faz_pidedits.R')
faz_names <- read.table('faz_names.txt', header=TRUE, sep='\t')
|
/data/comments_mhs_updated.R
|
no_license
|
hanase/Rreports
|
R
| false
| false
| 8,377
|
r
|
comments <- list(
"325"="Verify lack of growth or loss in HH 2010-20 is reasonable or revise(Known issue)",
"505"="HH too high in Rural area, 15,500-16,000 in 2020, 18,500-19,000 in 2030, 22,500-23,000 in 2040(Pierce)",
"506"="HH low given inventory of post-2010 MPDs, 11,000-11,500 in 2030, 14,800-15,400 in 2040(Pierce)",
"705"="HH low given inventory of 6500 post-2010 MPD HU, 6800-7200 in 2020, 8800-9100 in 2030, 13,300-13,600 in 2040(Pierce)\nVerify lack of growth or loss in HH 2010-20 is reasonable or revise(Known issue)",
"805"="HH too high in Rural area, 7,300-7,500 in 2020, 8,300-8,600 in 2030, 9,000-9,300 in 2040(Pierce)",
"1310"="Residential (Potentially) High (Tacoma)\nJobs should not decrease; 5300 - 5500 in 2030, 5500 - 5800 in 2040(Pierce)",
"1320"="Residential (Potentially) High (Tacoma) \nJobs should not decrease 4200- 4600 in 2020(Pierce)",
"1330"="Jobs should not decrease 5000-5200 in 2020, 5200-5400 in 2030, 5400-5600 in 2040 (Pierce)\nSector mix issue: Gov(Pierce)\nVerify lack of growth or loss in HH 2010-20 is reasonable or revise(Known issue)",
"1410"="Residential (Potentially) High (Tacoma), TotHH & MFHH too high (Pierce)\nJobs should not decrease, 12,000-13,000 in 2020, 13,000-14,000 in 2030(Pierce)",
"1420"="TotHH & MFHH too high(Pierce)",
"1605"="Residential (Potentially) High(Tacoma)\nJobs should not decrease, 6800-7000 in 2030, 7000-7200 in 2040(Pierce)",
"1606"="Residential (Potentially) High(Tacoma)\nJobs should not decrease, 3100-3500 in 2040 (Pierce)",
"1710"="Residential (Potentially) High(Tacoma), TotHH & MFHH too high (Pierce)",
"1720"="Residential (Potentially) High(Tacoma)\nJobs should not decrease, 6900-7100 in 2030, 7100-7300 in 2040(Pierce)",
"1810"="Growth low (Tacoma)",
"1820"="HHs should not decrease 2010-2020, 4350-4400 in 2020,TotHH & MFHH too high in 2040(Pierce)\nJobs too high in 2040(Pierce)",
"1900"="HHs should not decrease 2020-2030, 450-520 in 2030(Pierce)\nJobs too high by 2040 for Ind area(Pierce)\nSector mix issue: Services(Pierce)",
"2910"="Too much HH growth in 2030-2040, 1900-2100 in 2040(Pierce)",
"2925"="Too much HH growth in 2030-2040, 8000-8500 in 2040(Pierce)\nVerify lack of growth or loss in HH 2010-20 is reasonable or revise(Known issue)",
"2926"="Verify lack of growth or loss in HH 2010-20 is reasonable or revise(Known issue)",
"2927"="Too much HH growth in 2030-2040, 4500-6000 in 2040(Pierce)",
"2935"="JBLM: HHs should not decrease, 4100-4200 in 2020, 4200-4300 in 2030, 4300-4400 in 2040(Pierce)\nJobs look low in all years(Pierce)\nMHS: comp to SAF06 not accurate, SAF06 includes enlisted\nJBLM Reset Facility - added maintenance functions(Lakewood)",
"2936"="Jobs should not decrease, 2600-2800 in 2020, 2800-3000 in 2030, 3000-3500 in 2040(Pierce)\nVerify lack of growth or loss in HH 2010-20 is reasonable or revise(Known issue)",
"2940"="Too much HH growth in 2030-2040, 9000-9500 in 2040(Pierce)",
"3046"="Midway Subarea Plan, HHs & Jobs should be higher, area around Highline Comm College 2680 HHs and 2220 jobs(Kent)\n MHS Midway Subarea only part of FAZ",
"3505"="Increase HH and reduce Jobs(service) - by 2040 Downtown Subarea = 10,700 HHs, 8500 Jobs(Kent)",
"3600"="Sector Mix issue: more Manuf than Service job growth in MIC(Kent)\nMHS: FAZ 3600 includes Segale, source of most Service sector growth",
"3705"="Port study est. 18,800 jobs on-site at airport, 00-10 jobs in FAZ appear high at 34,500-26,600(Port)\nSector Mix issue: WTU growth is low given airport location(Port)",
"4505"="HHs high by 577 in 2030(Bellevue)\nJobs look reasonable, leave as is(Bellevue)",
"4506"="HHs maybe high but unsure, FAZ contains part of Issaquah(Bellevue)\nEmployment Looks low by 3330 in 2030,but FLU does not include anticipated upzones(Bellevue)",
"4810"="HHs looks low by 262 in 2030(Bellevue)\nEmployment may be high by 1200 in 2030 - parcel w dev. event should be locked out due to light rail(Bellevue)\nMHS: thats unclear",
"4820"="HHs a little high - by 125 in 2030, 12605-14207 in 2030, 14805-17807 in 2040(Bellevue)\nJobs perhaps high (how are parking spaces handled?)(Bellevue)",
"4900"="HHs: 9,500-9,900 in 2020, 12,600-14,200 in 2030, 14,800-17,800 in 2040\nHHPop: 14,200-14,800 in 2020, 18,900-21,300 in 2030, 22,200-26,700 in 2040\nJobs: 52,200-55,800 in 2020, 2030 is OK, 85,400-90,800 in 2040",
"5010"="HHs a little high - by 286 in 2030(Bellevue)\nJobs high, 3000 being added back into existing: 11,300-12,500 in 2020, 12,100-13,300 in 2030, 12,900-14,200 in 2040(Bellevue)",
"5020"="HHs may be low by 269 in 2030(Bellevue)\nEmpl. looks reasonable(Bellevue)",
"5205"="HHs: 6,500-7,200 in 2020, 9,400-9,800 in 2030, 11,850-12,300 in 2040 (also comments on SF/MF mix)(Bellevue)\nHH Pop: 14,500-15,700 in 2020, 19,900-20,800 in 2030, 24,600-25,500 in 2040 (also comments on TotPop)(Bellevue)\nJobs: look reasonable(Bellevue)",
"5305"="City of Kirkland: HU growth thru 2040 ridiculously low(Kirkland)",
"5306"="City of Kirkland: HU growth thru 2040 ridiculously low(Kirkland)",
"5415"="Jobs decline from 2020-30, overall slow growth 2020-40(Known issue)\n Verify likely growth given MSFT expansion plans & other potential development",
"5425"="Main City of Redmond FAZ, job growth high, MF growth low",
"5515"="City of Kirkland: HU growth thru 2040 ridiculously low(Kirkland)",
"5720"="Residential a bit too high(Seattle)",
"5825"="Sector Mix issue: MIC land getting high retail/services jobs(Port)",
"5826"="Sector Mix issue: MIC land getting high retail/services jobs(Port)",
"5916"="Residential too high(Seattle)",
"5925"="Residential a bit too high(Seattle)",
"6010"="Jobs really too high, residential moderately too low (2000 units in pipeline now in downtown Sea CBD)(Seattle)",
"6020"="Jobs really too high, residential moderately too low (2000 units in pipeline now in downtown Sea CBD)(Seattle)",
"6113"="Job decline doubtful in 2020+, stable due to hospitals and Seattle U(Seattle)",
"6123"="Jobs OK,HHs low(Seattle)",
"6216"="Job decline doubtful in 2020 - Childrens Hosp expansion(Seattle)",
"6224"="Could receive more growth from 6326(Seattle)",
"6225"="Northgate (growth low relative to 6326, 6223 both HHs and Jobs)(Seattle)",
"6226"="too much residential(Seattle)",
"6326"="Both Jobs and HHs quite high - more likely in 6225 Northgate and to a lesser extent 6224(Seattle)",
"6501"="Verify lack of growth or loss in jobs 2010-20 is reasonable or revise(Known issue)",
"7015"="Pop in FAZ group(7015,7025,7026) jumps 2010-20 (Mostly SF), then levels off thru 2040\nJobs: 00-10 Gov increase mystery, services decline same period\nJobs: frontloading 2010-20 more likely for post-2020",
"7025"="Pop in FAZ group(7015,7025,7026) jumps 2010-20 (Mostly SF), then levels off thru 2040\nJobs: 00-10 Gov increase mystery, services decline same period\nJobs: frontloading 2010-20 more likely for post-2020",
"7026"="Pop in FAZ group(7015,7025,7026) jumps 2010-20 (Mostly SF), then levels off thru 2040\nJobs: 00-10 Gov increase mystery, services decline same period\nJobs: frontloading 2010-20 more likely for post-2020",
"7100"="TotPop too High: 21,600-24,400 in 2040(?)(MT)\nJobs high esp 30-40 gain, 8,800-12,300 in 2040",
"7206"="Sector Mix issue: Boeing moved out between 00-10, Manuf should be 2000 jobs lower by 2040",
"7320"="MF HHs decline or stagnate from 10-20 and-or 20-30",
"7335"="MF HHs decline or stagnate from 10-20 and-or 20-30",
"7340"="MF HHs decline or stagnate from 10-20 and-or 20-30",
"7415"="MF HHs decline or stagnate from 10-20 and-or 20-30",
"7526"="MF HHs decline or stagnate from 10-20 and-or 20-30",
"7605"="MF HHs decline or stagnate from 10-20 and-or 20-30",
"7606"="MF HHs decline or stagnate from 10-20 and-or 20-30",
"7806"="Sector Mix issue: Retail jobs too low in 2040 relative to 2010, +400 in 2040",
"8925"="MF HHs decline or stagnate from 10-20 and-or 20-30",
"8927"="MF HHs decline or stagnate from 10-20 and-or 20-30\nGov jobs much higher in 2010 than other decades, Stillaguamish tribe w casino, +700 Gov jobs in 2040",
"8935"="MF HHs decline or stagnate from 10-20 and-or 20-30",
"8936"="2010 Retail is higher than other decades, +500 retail in 2040",
"8937"="Sector Mix issue: Tribal employment Services in 00, 20, 30, 40, and Gov in 2010?",
"9908"="Bangor base, need to include enlisted personnel(Known issue)",
)
source('faz_pidedits.R')
faz_names <- read.table('faz_names.txt', header=TRUE, sep='\t')
|
library(ggplot2)
library(ggthemes)
library(scales)
library(reshape2) # For melt()
library(dplyr)
pf <- read.csv('pseudo_facebook.tsv', sep = '\t')
yo <- read.csv('yogurt.csv')
statesInfo <- read.csv('stateData.csv')
data('airquality')
airquality
#####################################################
# 1. LOOKING AT THE DATA
#####################################################
# About a dataset
str(statesInfo) # shows metadata of a dataset
nrow(statesInfo) # count rows
names(statesInfo) # lists the column names in a dataframe
ncol(statesInfo) # count columns
head(statesInfo) # shows first few rows of dataset
unique(statesInfo$X) # shows unique values in the variable
# Indexing/Subsetting columns of a dataframe
subset(statesInfo, state.region == 2 ) # subset function by selecting column headers
statesInfo[,c(1,2,5)] # subsetting: the numbers are the column number
statesInfo[,c("X","income")] # subsetting: based on column headers
statesInfo$population # subsetting a column into a vector
statesInfo[c(1,2,5),] # subsetting ROWS
statesInfo[(statesInfo$state.region == 2
& statesInfo$life.exp > 69),] # subsetting based on conditionals
# Ordering levels in factors within a dataframe
levels(reddit$age.range) # lists all the levels for the factor age.range
reddit$age.range <-
ordered(reddit$age.range, # re-orders the age.range variable factor levels in the reddit dataset
levels = c( "Under 18", "18-24",
"25-34", "35-44", "45-54",
"55-64", "65 or Above"))
reddit$age.range <-
factor(reddit$age.range, # does the same as above, using the factor function
levels = c("Under 18", "18-24",
"25-34", "35-44", "45-54",
"55-64", "65 or Above"),
ordered = TRUE)
#recoding variables with dplyr
applicants <- read.csv("D:/Google Drive/Documents/Analytics/R/Datasets/collegeadmit.csv")
applicants$rank2 <- as.factor(applicants$rank)
library(plyr)
applicants$rank2 <- revalue(applicants$rank2,
c("1" = "Tier1","2" = "Tier2",
"3" = "Tier3","4" = "Tier4"))
#####################################################
# 2. DEALING WITH ROWNS, COLUMNS AND VALUES
#####################################################
# Reordering columns based on their index
aql <- airquality[c(5:6, 1:4)]
# Add/Drop variables
noFrost <- subset(statesInfo, select = -frost) # dropping the frost variable
# Removing row.names variable
row.names(statesInfo) <- NULL # removing artificially created row names
# Extracting records with NA's
NAS <- statesInfo[!complete.cases(statesInfo),] # to see all records that contain atleast one NA
# Dealing with NA's
Clean_Data <- pf[complete.cases(pf), ] # removing all observations with NA
Clean_Data2 <- pf[complete.cases(pf[,1:2]),] # only keeping obs that have no NA's in Column 1 and 2
# Counting NA's
myNA <- is.na(pf) # is.na checks for missing data points and flags them as TRUE/FALSE
sum(myNA) # summing these logical flags yields the total number of mising points
# Sorting data
X[order(X$var1,X$var3),]
aql <- arrange(aql, Ozone) # using the dplyr package for sorting
# adding columns together for each observation using transform
yo <- transform(yo,
all.purchases=(strawberry+blueberry+
pina.colada+plain+mixed.berry))
#####################################################
# 2. DATA LONG/WIDE FORMATS
#####################################################
# Converting dataframe to long format
library('reshape2')
aql <- melt(airquality) # melt essentially converts data to long format but you need to fine tune it
aql <- melt(airquality,
id.vars = c("Month", "Day"), # ID variables are the variables that identify individual rows of data.
variable.name = "Climate_variable", # this names the new created variable
value.name = "Climate_value") # and fills it with the values
# Converting dataframe to wide format
aqlm <- dcast(aql, Month+Day ~ Climate_variable, # left of ~: key, right of ~: column to convert to row
value.var="Climate_value") # data that goes into new columns
#####################################################
# 3. DATA TRANSPOSING
#####################################################
# Transposing dataframes that have strings in the first column. In these cases a smple transpose will
#cause the whole dataframe to convert into a character:
gdp <- read.csv('gdp.csv', header = TRUE)
gdp2 <- t(gdp) # converts the whole dataframe into a character
is.character(gdp2)
is.data.frame(gdp2)
#Storing the country names in column X from the gdp dataframe
n <- as.vector(gdp$X) #country column names are converted to a vector
n <- c('year', n) #'year' is added to the vector
names <- factor(n, levels=n) #'year' is added as a level
# transpose all but the first column(X)
gdp2 <- as.data.frame(t(gdp[,-1])) # transposing the dataframe
gdp2$year <- row.names(gdp2) # assigning 'gdp' rownames(first column) as a column name to'year'
gdp2 <- gdp2[c(189, 1:188)] # re-positioning columns in the new data frame
gdp2$year <- (substring(gdp2$year,2,nchar(gdp2$year))) # removing the "x" in year to convert to numeric format
colnames(gdp2) <- names # assigning column names to new dataset from the 'names' factor
row.names(gdp2) <- NULL # resetting row headers in 'gdp2'
#####################################################
# 3. BINNING, FACTORING $ SAMPLING
#####################################################
# Converting a continuous variable to a binary variable
summary(pf$mobile_likes)
mobile_check_in <- NA
pf$mobile_check_in <- ifelse(pf$mobile_likes > 0,1,0) # if likes>0, assign 1 else assign 0
pf$mobile_check_in <- factor(pf$mobile_check_in) # converting to a factor
summary(pf$mobile_check_in)
# Creating class intervals for age buckets
pf$year_joined.bucket <- cut(pf$dob_year, # specify column for bucketing
breaks= seq(1900,2000,10), # specify class interval width - can be non-equal
labels = NULL, # the bins are stored as a factor and levels can be labelled
include.lowest = TRUE, right = TRUE, # specify whether class interval is closed on the right ( ] ) or lefe ( [ )
ordered_result = TRUE)
# Sampling a dataframe
pf_sample<- pf[sample(1:length(pf$age), 10000),] # to obtain 10,000 random samples
sample.ids <- sample(levels(yo$id),16) # creates a sample from our original yogurt dataframe
ggplot(data=(subset(yo, id%in% sample.ids)),
|
/5 - Data Munging.R
|
no_license
|
macjas/Analytics-with-R
|
R
| false
| false
| 8,353
|
r
|
library(ggplot2)
library(ggthemes)
library(scales)
library(reshape2) # For melt()
library(dplyr)
pf <- read.csv('pseudo_facebook.tsv', sep = '\t')
yo <- read.csv('yogurt.csv')
statesInfo <- read.csv('stateData.csv')
data('airquality')
airquality
#####################################################
# 1. LOOKING AT THE DATA
#####################################################
# About a dataset
str(statesInfo) # shows metadata of a dataset
nrow(statesInfo) # count rows
names(statesInfo) # lists the column names in a dataframe
ncol(statesInfo) # count columns
head(statesInfo) # shows first few rows of dataset
unique(statesInfo$X) # shows unique values in the variable
# Indexing/Subsetting columns of a dataframe
subset(statesInfo, state.region == 2 ) # subset function by selecting column headers
statesInfo[,c(1,2,5)] # subsetting: the numbers are the column number
statesInfo[,c("X","income")] # subsetting: based on column headers
statesInfo$population # subsetting a column into a vector
statesInfo[c(1,2,5),] # subsetting ROWS
statesInfo[(statesInfo$state.region == 2
& statesInfo$life.exp > 69),] # subsetting based on conditionals
# Ordering levels in factors within a dataframe
levels(reddit$age.range) # lists all the levels for the factor age.range
reddit$age.range <-
ordered(reddit$age.range, # re-orders the age.range variable factor levels in the reddit dataset
levels = c( "Under 18", "18-24",
"25-34", "35-44", "45-54",
"55-64", "65 or Above"))
reddit$age.range <-
factor(reddit$age.range, # does the same as above, using the factor function
levels = c("Under 18", "18-24",
"25-34", "35-44", "45-54",
"55-64", "65 or Above"),
ordered = TRUE)
#recoding variables with dplyr
applicants <- read.csv("D:/Google Drive/Documents/Analytics/R/Datasets/collegeadmit.csv")
applicants$rank2 <- as.factor(applicants$rank)
library(plyr)
applicants$rank2 <- revalue(applicants$rank2,
c("1" = "Tier1","2" = "Tier2",
"3" = "Tier3","4" = "Tier4"))
#####################################################
# 2. DEALING WITH ROWNS, COLUMNS AND VALUES
#####################################################
# Reordering columns based on their index
aql <- airquality[c(5:6, 1:4)]
# Add/Drop variables
noFrost <- subset(statesInfo, select = -frost) # dropping the frost variable
# Removing row.names variable
row.names(statesInfo) <- NULL # removing artificially created row names
# Extracting records with NA's
NAS <- statesInfo[!complete.cases(statesInfo),] # to see all records that contain atleast one NA
# Dealing with NA's
Clean_Data <- pf[complete.cases(pf), ] # removing all observations with NA
Clean_Data2 <- pf[complete.cases(pf[,1:2]),] # only keeping obs that have no NA's in Column 1 and 2
# Counting NA's
myNA <- is.na(pf) # is.na checks for missing data points and flags them as TRUE/FALSE
sum(myNA) # summing these logical flags yields the total number of mising points
# Sorting data
X[order(X$var1,X$var3),]
aql <- arrange(aql, Ozone) # using the dplyr package for sorting
# adding columns together for each observation using transform
yo <- transform(yo,
all.purchases=(strawberry+blueberry+
pina.colada+plain+mixed.berry))
#####################################################
# 2. DATA LONG/WIDE FORMATS
#####################################################
# Converting dataframe to long format
library('reshape2')
aql <- melt(airquality) # melt essentially converts data to long format but you need to fine tune it
aql <- melt(airquality,
id.vars = c("Month", "Day"), # ID variables are the variables that identify individual rows of data.
variable.name = "Climate_variable", # this names the new created variable
value.name = "Climate_value") # and fills it with the values
# Converting dataframe to wide format
aqlm <- dcast(aql, Month+Day ~ Climate_variable, # left of ~: key, right of ~: column to convert to row
value.var="Climate_value") # data that goes into new columns
#####################################################
# 3. DATA TRANSPOSING
#####################################################
# Transposing dataframes that have strings in the first column. In these cases a smple transpose will
#cause the whole dataframe to convert into a character:
gdp <- read.csv('gdp.csv', header = TRUE)
gdp2 <- t(gdp) # converts the whole dataframe into a character
is.character(gdp2)
is.data.frame(gdp2)
#Storing the country names in column X from the gdp dataframe
n <- as.vector(gdp$X) #country column names are converted to a vector
n <- c('year', n) #'year' is added to the vector
names <- factor(n, levels=n) #'year' is added as a level
# transpose all but the first column(X)
gdp2 <- as.data.frame(t(gdp[,-1])) # transposing the dataframe
gdp2$year <- row.names(gdp2) # assigning 'gdp' rownames(first column) as a column name to'year'
gdp2 <- gdp2[c(189, 1:188)] # re-positioning columns in the new data frame
gdp2$year <- (substring(gdp2$year,2,nchar(gdp2$year))) # removing the "x" in year to convert to numeric format
colnames(gdp2) <- names # assigning column names to new dataset from the 'names' factor
row.names(gdp2) <- NULL # resetting row headers in 'gdp2'
#####################################################
# 3. BINNING, FACTORING $ SAMPLING
#####################################################
# Converting a continuous variable to a binary variable
summary(pf$mobile_likes)
mobile_check_in <- NA
pf$mobile_check_in <- ifelse(pf$mobile_likes > 0,1,0) # if likes>0, assign 1 else assign 0
pf$mobile_check_in <- factor(pf$mobile_check_in) # converting to a factor
summary(pf$mobile_check_in)
# Creating class intervals for age buckets
pf$year_joined.bucket <- cut(pf$dob_year, # specify column for bucketing
breaks= seq(1900,2000,10), # specify class interval width - can be non-equal
labels = NULL, # the bins are stored as a factor and levels can be labelled
include.lowest = TRUE, right = TRUE, # specify whether class interval is closed on the right ( ] ) or lefe ( [ )
ordered_result = TRUE)
# Sampling a dataframe
pf_sample<- pf[sample(1:length(pf$age), 10000),] # to obtain 10,000 random samples
sample.ids <- sample(levels(yo$id),16) # creates a sample from our original yogurt dataframe
ggplot(data=(subset(yo, id%in% sample.ids)),
|
##' @importFrom grid gList
##' @importFrom grid rectGrob
##' @importFrom grid polygonGrob
##' @importFrom grid gpar
candleGrob <- function(x, y, color.candle = "orange", color.fire = "red", vp=NULL) {
width <- 0.02
height <- 0.2
xx = c(x+.005,x-.01,x+.01,x+.03,x+.015,x+0.005)
yy = c(y+.2,y+.23,y+.26,y+.23,y+.2,y+.2)
gTree(children = gList(
rectGrob(x+width/2, y+height/2, width = width, height = height, gp = gpar(fill=color.candle), vp=vp),
polygonGrob(xx, yy, gp = gpar(fill = color.fire), vp=vp)
))
}
ellipseGrob <- function(x, y, a, b, gp=gpar(), vp=NULL) {
t <- seq(0, 2*pi, length.out=100)
xx <- x + a * cos(t)
yy <- y + b * sin(t)
polygonGrob(xx, yy, gp = gp, vp=vp)
}
##' @importFrom grid segmentsGrob
cakeGrob <- function(x=.5, y=.5, a=.4, b=.14, A=.44, B=.17, height=.3, gp=gpar(), vp=NULL) {
gp2 <- gp
if (!is.null(gp$fill)) {
gp2$col <- gp2$fill
}
gTree(children = gList(
ellipseGrob(x, y-height, A, B, gp=gp, vp=vp),
ellipseGrob(x, y-height, a, b, gp=gp, vp=vp),
rectGrob(x, y-height/2, a*2, height, gp=gp2, vp=vp),
segmentsGrob(x-a, y-height, x-a, y, gp=gp, vp=vp),
segmentsGrob(x+a, y-height, x+a, y, gp=gp, vp=vp),
ellipseGrob(x, y, a, b, gp=gp, vp=vp))
)
}
##' @importFrom grid gTree
cakeCandleGrob <- function(color.cake = "pink", color.candle="orange", color.fire="red", vp=NULL, name=NULL) {
grobs <- gList(cakeGrob(x=.5, y=.5, a=.4, b=.14, A=.44, B=.17, height=.3, gp=gpar(fill=color.cake)),
candleGrob(.25,.45, color.candle, color.fire),
candleGrob(.3,.5, color.candle, color.fire),
candleGrob(.4, .45,color.candle, color.fire),
candleGrob(.5,.5, color.candle, color.fire),
candleGrob(.6, .45, color.candle, color.fire),
candleGrob(.7, .52, color.candle, color.fire)
)
gTree(children=grobs, name=name, vp=vp)
}
##' ggplot2 layer of birthday cake
##'
##'
##' @title geom_cake
##' @param mapping aes mapping
##' @param data data
##' @param ... additional parameters
##' @return ggplot2 layer
##' @importFrom ggplot2 layer
##' @export
##' @author guangchuang yu
geom_cake <- function(mapping = NULL, data = NULL, ...) {
layer(
data = data,
mapping = mapping,
geom = GeomCake,
stat = "identity",
position = "identity",
params = list(...),
check.aes = FALSE
)
}
##' @importFrom grid viewport
##' @importFrom ggplot2 ggproto
##' @importFrom ggplot2 Geom
##' @importFrom ggplot2 draw_key_blank
##' @importFrom ggplot2 aes
GeomCake <- ggproto("GeomCake", Geom,
draw_panel = function(data, panel_scales, coord) {
data <- coord$transform(data, panel_scales)
grobs <- lapply(1:nrow(data), function(i) {
vp <- viewport(x=data$x[i], y=data$y[i],
width=data$size[i], height=data$size[i],
angle = data$angle[i],
just = c("center", "center"),
default.units = "native")
cakeCandleGrob(data$color.cake[i], data$color.candle[i], data$color.fire[i], vp=vp, name=i)
})
class(grobs) <- "gList"
ggplot2:::ggname("geom_cake",
gTree(children = grobs))
},
non_missing_aes = c("x", "y", "size", "color.cake", "color.candle", "color.fire"),
default_aes = aes(size=.1, color.cake="#FF3399", color.candle = "orange", color.fire="red", angle=0),
draw_key = draw_key_blank
)
|
/R/geom_cake.R
|
no_license
|
xtmgah/yyplot
|
R
| false
| false
| 3,948
|
r
|
##' @importFrom grid gList
##' @importFrom grid rectGrob
##' @importFrom grid polygonGrob
##' @importFrom grid gpar
candleGrob <- function(x, y, color.candle = "orange", color.fire = "red", vp=NULL) {
width <- 0.02
height <- 0.2
xx = c(x+.005,x-.01,x+.01,x+.03,x+.015,x+0.005)
yy = c(y+.2,y+.23,y+.26,y+.23,y+.2,y+.2)
gTree(children = gList(
rectGrob(x+width/2, y+height/2, width = width, height = height, gp = gpar(fill=color.candle), vp=vp),
polygonGrob(xx, yy, gp = gpar(fill = color.fire), vp=vp)
))
}
ellipseGrob <- function(x, y, a, b, gp=gpar(), vp=NULL) {
t <- seq(0, 2*pi, length.out=100)
xx <- x + a * cos(t)
yy <- y + b * sin(t)
polygonGrob(xx, yy, gp = gp, vp=vp)
}
##' @importFrom grid segmentsGrob
cakeGrob <- function(x=.5, y=.5, a=.4, b=.14, A=.44, B=.17, height=.3, gp=gpar(), vp=NULL) {
gp2 <- gp
if (!is.null(gp$fill)) {
gp2$col <- gp2$fill
}
gTree(children = gList(
ellipseGrob(x, y-height, A, B, gp=gp, vp=vp),
ellipseGrob(x, y-height, a, b, gp=gp, vp=vp),
rectGrob(x, y-height/2, a*2, height, gp=gp2, vp=vp),
segmentsGrob(x-a, y-height, x-a, y, gp=gp, vp=vp),
segmentsGrob(x+a, y-height, x+a, y, gp=gp, vp=vp),
ellipseGrob(x, y, a, b, gp=gp, vp=vp))
)
}
##' @importFrom grid gTree
cakeCandleGrob <- function(color.cake = "pink", color.candle="orange", color.fire="red", vp=NULL, name=NULL) {
grobs <- gList(cakeGrob(x=.5, y=.5, a=.4, b=.14, A=.44, B=.17, height=.3, gp=gpar(fill=color.cake)),
candleGrob(.25,.45, color.candle, color.fire),
candleGrob(.3,.5, color.candle, color.fire),
candleGrob(.4, .45,color.candle, color.fire),
candleGrob(.5,.5, color.candle, color.fire),
candleGrob(.6, .45, color.candle, color.fire),
candleGrob(.7, .52, color.candle, color.fire)
)
gTree(children=grobs, name=name, vp=vp)
}
##' ggplot2 layer of birthday cake
##'
##'
##' @title geom_cake
##' @param mapping aes mapping
##' @param data data
##' @param ... additional parameters
##' @return ggplot2 layer
##' @importFrom ggplot2 layer
##' @export
##' @author guangchuang yu
geom_cake <- function(mapping = NULL, data = NULL, ...) {
layer(
data = data,
mapping = mapping,
geom = GeomCake,
stat = "identity",
position = "identity",
params = list(...),
check.aes = FALSE
)
}
##' @importFrom grid viewport
##' @importFrom ggplot2 ggproto
##' @importFrom ggplot2 Geom
##' @importFrom ggplot2 draw_key_blank
##' @importFrom ggplot2 aes
GeomCake <- ggproto("GeomCake", Geom,
draw_panel = function(data, panel_scales, coord) {
data <- coord$transform(data, panel_scales)
grobs <- lapply(1:nrow(data), function(i) {
vp <- viewport(x=data$x[i], y=data$y[i],
width=data$size[i], height=data$size[i],
angle = data$angle[i],
just = c("center", "center"),
default.units = "native")
cakeCandleGrob(data$color.cake[i], data$color.candle[i], data$color.fire[i], vp=vp, name=i)
})
class(grobs) <- "gList"
ggplot2:::ggname("geom_cake",
gTree(children = grobs))
},
non_missing_aes = c("x", "y", "size", "color.cake", "color.candle", "color.fire"),
default_aes = aes(size=.1, color.cake="#FF3399", color.candle = "orange", color.fire="red", angle=0),
draw_key = draw_key_blank
)
|
# modeldb/DatasetService.proto
#
# No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
#
# The version of the OpenAPI document: version not set
#
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title ModeldbGetExperimentRunByDatasetResponse
#'
#' @description ModeldbGetExperimentRunByDatasetResponse Class
#'
#' @format An \code{R6Class} generator object
#'
#' @field experiment_runs list( \link{ModeldbExperimentRun} ) [optional]
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ModeldbGetExperimentRunByDatasetResponse <- R6::R6Class(
'ModeldbGetExperimentRunByDatasetResponse',
public = list(
`experiment_runs` = NULL,
initialize = function(
`experiment_runs`=NULL, ...
) {
local.optional.var <- list(...)
if (!is.null(`experiment_runs`)) {
stopifnot(is.vector(`experiment_runs`), length(`experiment_runs`) != 0)
sapply(`experiment_runs`, function(x) stopifnot(R6::is.R6(x)))
self$`experiment_runs` <- `experiment_runs`
}
},
toJSON = function() {
ModeldbGetExperimentRunByDatasetResponseObject <- list()
if (!is.null(self$`experiment_runs`)) {
ModeldbGetExperimentRunByDatasetResponseObject[['experiment_runs']] <-
lapply(self$`experiment_runs`, function(x) x$toJSON())
}
ModeldbGetExperimentRunByDatasetResponseObject
},
fromJSON = function(ModeldbGetExperimentRunByDatasetResponseJson) {
ModeldbGetExperimentRunByDatasetResponseObject <- jsonlite::fromJSON(ModeldbGetExperimentRunByDatasetResponseJson)
if (!is.null(ModeldbGetExperimentRunByDatasetResponseObject$`experiment_runs`)) {
self$`experiment_runs` <- ApiClient$new()$deserializeObj(ModeldbGetExperimentRunByDatasetResponseObject$`experiment_runs`, "array[ModeldbExperimentRun]", loadNamespace("openapi"))
}
self
},
toJSONString = function() {
jsoncontent <- c(
if (!is.null(self$`experiment_runs`)) {
sprintf(
'"experiment_runs":
[%s]
',
paste(sapply(self$`experiment_runs`, function(x) jsonlite::toJSON(x$toJSON(), auto_unbox=TRUE, digits = NA)), collapse=",")
)}
)
jsoncontent <- paste(jsoncontent, collapse = ",")
paste('{', jsoncontent, '}', sep = "")
},
fromJSONString = function(ModeldbGetExperimentRunByDatasetResponseJson) {
ModeldbGetExperimentRunByDatasetResponseObject <- jsonlite::fromJSON(ModeldbGetExperimentRunByDatasetResponseJson)
self$`experiment_runs` <- ApiClient$new()$deserializeObj(ModeldbGetExperimentRunByDatasetResponseObject$`experiment_runs`, "array[ModeldbExperimentRun]", loadNamespace("openapi"))
self
}
)
)
|
/R/modeldb_get_experiment_run_by_dataset_response.R
|
no_license
|
botchkoAI/VertaDatasetService
|
R
| false
| false
| 2,785
|
r
|
# modeldb/DatasetService.proto
#
# No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
#
# The version of the OpenAPI document: version not set
#
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title ModeldbGetExperimentRunByDatasetResponse
#'
#' @description ModeldbGetExperimentRunByDatasetResponse Class
#'
#' @format An \code{R6Class} generator object
#'
#' @field experiment_runs list( \link{ModeldbExperimentRun} ) [optional]
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ModeldbGetExperimentRunByDatasetResponse <- R6::R6Class(
'ModeldbGetExperimentRunByDatasetResponse',
public = list(
`experiment_runs` = NULL,
initialize = function(
`experiment_runs`=NULL, ...
) {
local.optional.var <- list(...)
if (!is.null(`experiment_runs`)) {
stopifnot(is.vector(`experiment_runs`), length(`experiment_runs`) != 0)
sapply(`experiment_runs`, function(x) stopifnot(R6::is.R6(x)))
self$`experiment_runs` <- `experiment_runs`
}
},
toJSON = function() {
ModeldbGetExperimentRunByDatasetResponseObject <- list()
if (!is.null(self$`experiment_runs`)) {
ModeldbGetExperimentRunByDatasetResponseObject[['experiment_runs']] <-
lapply(self$`experiment_runs`, function(x) x$toJSON())
}
ModeldbGetExperimentRunByDatasetResponseObject
},
fromJSON = function(ModeldbGetExperimentRunByDatasetResponseJson) {
ModeldbGetExperimentRunByDatasetResponseObject <- jsonlite::fromJSON(ModeldbGetExperimentRunByDatasetResponseJson)
if (!is.null(ModeldbGetExperimentRunByDatasetResponseObject$`experiment_runs`)) {
self$`experiment_runs` <- ApiClient$new()$deserializeObj(ModeldbGetExperimentRunByDatasetResponseObject$`experiment_runs`, "array[ModeldbExperimentRun]", loadNamespace("openapi"))
}
self
},
toJSONString = function() {
jsoncontent <- c(
if (!is.null(self$`experiment_runs`)) {
sprintf(
'"experiment_runs":
[%s]
',
paste(sapply(self$`experiment_runs`, function(x) jsonlite::toJSON(x$toJSON(), auto_unbox=TRUE, digits = NA)), collapse=",")
)}
)
jsoncontent <- paste(jsoncontent, collapse = ",")
paste('{', jsoncontent, '}', sep = "")
},
fromJSONString = function(ModeldbGetExperimentRunByDatasetResponseJson) {
ModeldbGetExperimentRunByDatasetResponseObject <- jsonlite::fromJSON(ModeldbGetExperimentRunByDatasetResponseJson)
self$`experiment_runs` <- ApiClient$new()$deserializeObj(ModeldbGetExperimentRunByDatasetResponseObject$`experiment_runs`, "array[ModeldbExperimentRun]", loadNamespace("openapi"))
self
}
)
)
|
library(RSQLite)
# OPEN DATABASE AND QUERY
conn <- dbConnect(RSQLite::SQLite(), dbname="CLData.db")
sqlData <- dbGetQuery(conn, "SELECT * FROM cldata")
dbDisconnect(conn)
# WRITE TO CSV
write.csv(sqlData, paste0("CLData_R.csv"), row.names=FALSE)
cat("Successfully migrated SQL data to CSV!\n")
|
/SQL/SQLtoCSV/SQLtoCSV_R.R
|
no_license
|
ParfaitG/DATA_MIGRATION
|
R
| false
| false
| 308
|
r
|
library(RSQLite)
# OPEN DATABASE AND QUERY
conn <- dbConnect(RSQLite::SQLite(), dbname="CLData.db")
sqlData <- dbGetQuery(conn, "SELECT * FROM cldata")
dbDisconnect(conn)
# WRITE TO CSV
write.csv(sqlData, paste0("CLData_R.csv"), row.names=FALSE)
cat("Successfully migrated SQL data to CSV!\n")
|
#' ISRaD.build builds the database and updates objects in R package
#'
#' Wrapper function that combines tools for rapid deployment of R package data objects.
#' Meant to be used by the maintainers/developers of ISRaD
#'
#' @param ISRaD_directory directory where the ISRaD package is found
#' @param geodata_directory directory where geospatial climate datasets are found. Necessary to create ISRaD_Extra
#' @return runs QAQC on all datafiles, moves files that fail QAQC, updates ISRaD_Data, updates ISRaD_Extra
#' @export
#' @examples
#' \dontrun{
#' ISRaD.build(ISRaD_directory="~/ISRaD/", geodata_directory="~/geospatial_datasets")
#' }
ISRaD.build<-function(ISRaD_directory=getwd(), geodata_directory){
# Install local ISRaD -----------------------------------------------------
cat("Installing local version of ISRaD...")
devtools::install("../ISRaD")
library(ISRaD)
# Compile database --------------------------------------------------------
cat("Compiling the data files in", paste0(ISRaD_directory,"/ISRaD_data_files\n"))
cat("You must review the compilation report log file when complete... \n\n")
ISRaD_data_compiled<-compile(dataset_directory = paste0(ISRaD_directory,"/ISRaD_data_files/"), write_report = T, write_out = T, return_type = "list", checkdoi = F)
cat("\nISRaD_data.xlsx saved to", paste0(ISRaD_directory,"/ISRaD_data_files/database\n\n"))
reviewed<-utils::menu(c("Yes", "No"), title="Have you reviewed the compilation report log file? (ISRaD_data_files/database/ISRaD_log.txt). I would suggest using the git commit preview window in R to see changes.")
if (reviewed==2){
stop("You cannot build the ISRaD database without reviewing the compilation report log file...")
}
reviewed<-utils::menu(c("Yes", "No"), title="Did everything in the log file look ok?")
if (reviewed==2){
stop("You cannot build the ISRaD database if the log file shows problems...")
}
# Replace data objects ----------------------------------------------------
cat("\nReplacing the ISRaD_data object with the new one...\n")
cat("\tChecking the number of new rows in the compiled ISRaD_data object...\n")
for(t in names(ISRaD_data_compiled)){
cat("\t\t", nrow(ISRaD_data_compiled[[t]])-nrow(ISRaD_data[[t]]), "rows were added to the", t, "table.\n")
}
new_entries<-setdiff(ISRaD_data_compiled$metadata$entry_name,ISRaD_data$metadata$entry_name)
if(length(new_entries)==0) new_entries <- "none"
cat("\t\t New entry_name values added to the data:", new_entries, "\n")
removed_entries<-setdiff(ISRaD_data$metadata$entry_name, ISRaD_data_compiled$metadata$entry_name)
if(length(removed_entries)==0) removed_entries <- "none"
cat("\t\t entry_name values removed from the data:", new_entries, "\n")
reviewed<-utils::menu(c("Yes", "No"), title="Are these differences what you expected?")
if (reviewed==2){
stop("You cannot replace the ISRaD_data object with a faulty data object...")
}
cat("\nCreating the ISRaD_extra object...\n")
ISRaD_extra_compiled<-ISRaD.extra(database=ISRaD_data_compiled, geodata_directory = geodata_directory)
cat("Replacing the ISRaD_extra object with the new one...\n")
cat("\tChecking the number of new rows in the compiled ISRaD_extra object...\n")
for(t in names(ISRaD_extra_compiled)){
cat("\t\t", ncol(ISRaD_extra_compiled[[t]])-ncol(ISRaD_extra[[t]]), "ncol were added to the", t, "table.\n")
}
reviewed<-utils::menu(c("Yes", "No"), title="Are these differences what you expected?")
if (reviewed==2){
stop("You cannot replace the ISRaD_data object with a faulty data object...")
}
ISRaD_data<-ISRaD_data_compiled
usethis::use_data(ISRaD_data, overwrite = T)
cat("ISRaD_data has been updated...\n\n")
ISRaD_extra<-ISRaD_extra_compiled
usethis::use_data(ISRaD_extra, overwrite = T)
cat("ISRaD_extra has been updated...\n\n")
# Flattened data objects --------------------------------------------------
cat("\tUpdating flattened data objects...\n")
for(tab in c("flux","layer","interstitial","incubation","fraction")){
flattened_data<-ISRaD.flatten(database=ISRaD_data, table = tab)
cat("writing ISRaD_data_flat_", tab, ".csv"," ...\n", sep = "")
utils::write.csv(flattened_data, paste0(ISRaD_directory,"/ISRaD_data_files/database/", "ISRaD_data_flat_", tab, ".csv"))
flattened_extra<-ISRaD.flatten(database=ISRaD_extra, table = tab)
cat("writing ISRaD_extra_flat_", tab, ".csv"," ...\n", sep = "")
utils::write.csv(flattened_extra, paste0(ISRaD_directory,"/ISRaD_data_files/database/", "ISRaD_extra_flat_", tab, ".csv"))
}
# update references -------------------------------------------------------
#if(removed_entries != "none" & new_entries !="none") {
cat("\nUpdating credits.md page...this takes about 5 min")
dois=as.character(ISRaD_data$metadata$doi)
cleandois=dois[dois[]!="israd"]
he_doi="10.1126/science.aad4273"
mathieu_doi="10.1111/gcb.13012"
# References from clean dois
a=sapply(cleandois,FUN=rcrossref::cr_cn, format="text", style="apa", USE.NAMES = FALSE)
he_ref=rcrossref::cr_cn(he_doi,format="text", style="apa")
mathieu_ref=rcrossref::cr_cn(mathieu_doi,format="text", style="apa")
# Body
h1="## Main compilations"
p1="ISRaD has been built based on two main compilations:"
h2="## Studies within ISRaD"
n=length(cleandois)
p2=paste("Currently, there are", n, "entries in ISRaD, which are from the following publications:")
# Print markdown file for website
cat(c(h1, p1, " ", paste("* ",mathieu_ref), paste("* ",he_ref), " ",
h2, p2, " ", paste("* ",a)), sep="\n", file="ISRaD_data_files/database/credits.md")
#}
# document and check ------------------------------------------------------
cat("\tUpdating documentation and running check()...\n")
devtools::document(pkg = ISRaD_directory)
devtools::check(pkg=ISRaD_directory, manual = T, cran = T)
errors<-1
while(errors==1){
errors<-utils::menu(c("Yes", "No"), title="Were there any errors, warnings, or notes?")
if (errors==1){
cat("Ok, please fix the issues and confim below when you are ready to run the check again...\n")
ready<-utils::menu(c("Yes", "No"), title="Are you ready to run the check again?")
if (ready==1){
devtools::check(pkg=ISRaD_directory, manual = T, cran = T)
}
}
}
system(paste0("rm ", getwd(), "/ISRaD.pdf"))
system(paste(shQuote(file.path(R.home("bin"), "R")),
"CMD", "Rd2pdf", shQuote(getwd())))
reviewed<-utils::menu(c("Yes", "No"), title="Are you going to push this to github?")
if (reviewed==1){
cat("Ok, the DESCRIPTION file is being updated with a new version...\n")
DESC<-readLines(paste0(ISRaD_directory,"/DESCRIPTION"))
version<-strsplit(DESC[3],split = "\\.")
if(length(version[[1]])<4) version[[1]][4]<-900
version[[1]][4]<-as.numeric(version[[1]][4])+1
DESC[3]<-paste(unlist(version), collapse = ".")
writeLines(DESC, paste0(ISRaD_directory,"/DESCRIPTION"))
cat("Ok, you can now commit and push this to github!\n You should also then reload R and reinstall ISRaD from guthub since you changed the data objects.\n")
}
}
|
/R/ISRaD.build.R
|
no_license
|
olgavinduskova/ISRaD
|
R
| false
| false
| 7,217
|
r
|
#' ISRaD.build builds the database and updates objects in R package
#'
#' Wrapper function that combines tools for rapid deployment of R package data objects.
#' Meant to be used by the maintainers/developers of ISRaD
#'
#' @param ISRaD_directory directory where the ISRaD package is found
#' @param geodata_directory directory where geospatial climate datasets are found. Necessary to create ISRaD_Extra
#' @return runs QAQC on all datafiles, moves files that fail QAQC, updates ISRaD_Data, updates ISRaD_Extra
#' @export
#' @examples
#' \dontrun{
#' ISRaD.build(ISRaD_directory="~/ISRaD/", geodata_directory="~/geospatial_datasets")
#' }
ISRaD.build<-function(ISRaD_directory=getwd(), geodata_directory){
# Install local ISRaD -----------------------------------------------------
cat("Installing local version of ISRaD...")
devtools::install("../ISRaD")
library(ISRaD)
# Compile database --------------------------------------------------------
cat("Compiling the data files in", paste0(ISRaD_directory,"/ISRaD_data_files\n"))
cat("You must review the compilation report log file when complete... \n\n")
ISRaD_data_compiled<-compile(dataset_directory = paste0(ISRaD_directory,"/ISRaD_data_files/"), write_report = T, write_out = T, return_type = "list", checkdoi = F)
cat("\nISRaD_data.xlsx saved to", paste0(ISRaD_directory,"/ISRaD_data_files/database\n\n"))
reviewed<-utils::menu(c("Yes", "No"), title="Have you reviewed the compilation report log file? (ISRaD_data_files/database/ISRaD_log.txt). I would suggest using the git commit preview window in R to see changes.")
if (reviewed==2){
stop("You cannot build the ISRaD database without reviewing the compilation report log file...")
}
reviewed<-utils::menu(c("Yes", "No"), title="Did everything in the log file look ok?")
if (reviewed==2){
stop("You cannot build the ISRaD database if the log file shows problems...")
}
# Replace data objects ----------------------------------------------------
cat("\nReplacing the ISRaD_data object with the new one...\n")
cat("\tChecking the number of new rows in the compiled ISRaD_data object...\n")
for(t in names(ISRaD_data_compiled)){
cat("\t\t", nrow(ISRaD_data_compiled[[t]])-nrow(ISRaD_data[[t]]), "rows were added to the", t, "table.\n")
}
new_entries<-setdiff(ISRaD_data_compiled$metadata$entry_name,ISRaD_data$metadata$entry_name)
if(length(new_entries)==0) new_entries <- "none"
cat("\t\t New entry_name values added to the data:", new_entries, "\n")
removed_entries<-setdiff(ISRaD_data$metadata$entry_name, ISRaD_data_compiled$metadata$entry_name)
if(length(removed_entries)==0) removed_entries <- "none"
cat("\t\t entry_name values removed from the data:", new_entries, "\n")
reviewed<-utils::menu(c("Yes", "No"), title="Are these differences what you expected?")
if (reviewed==2){
stop("You cannot replace the ISRaD_data object with a faulty data object...")
}
cat("\nCreating the ISRaD_extra object...\n")
ISRaD_extra_compiled<-ISRaD.extra(database=ISRaD_data_compiled, geodata_directory = geodata_directory)
cat("Replacing the ISRaD_extra object with the new one...\n")
cat("\tChecking the number of new rows in the compiled ISRaD_extra object...\n")
for(t in names(ISRaD_extra_compiled)){
cat("\t\t", ncol(ISRaD_extra_compiled[[t]])-ncol(ISRaD_extra[[t]]), "ncol were added to the", t, "table.\n")
}
reviewed<-utils::menu(c("Yes", "No"), title="Are these differences what you expected?")
if (reviewed==2){
stop("You cannot replace the ISRaD_data object with a faulty data object...")
}
ISRaD_data<-ISRaD_data_compiled
usethis::use_data(ISRaD_data, overwrite = T)
cat("ISRaD_data has been updated...\n\n")
ISRaD_extra<-ISRaD_extra_compiled
usethis::use_data(ISRaD_extra, overwrite = T)
cat("ISRaD_extra has been updated...\n\n")
# Flattened data objects --------------------------------------------------
cat("\tUpdating flattened data objects...\n")
for(tab in c("flux","layer","interstitial","incubation","fraction")){
flattened_data<-ISRaD.flatten(database=ISRaD_data, table = tab)
cat("writing ISRaD_data_flat_", tab, ".csv"," ...\n", sep = "")
utils::write.csv(flattened_data, paste0(ISRaD_directory,"/ISRaD_data_files/database/", "ISRaD_data_flat_", tab, ".csv"))
flattened_extra<-ISRaD.flatten(database=ISRaD_extra, table = tab)
cat("writing ISRaD_extra_flat_", tab, ".csv"," ...\n", sep = "")
utils::write.csv(flattened_extra, paste0(ISRaD_directory,"/ISRaD_data_files/database/", "ISRaD_extra_flat_", tab, ".csv"))
}
# update references -------------------------------------------------------
#if(removed_entries != "none" & new_entries !="none") {
cat("\nUpdating credits.md page...this takes about 5 min")
dois=as.character(ISRaD_data$metadata$doi)
cleandois=dois[dois[]!="israd"]
he_doi="10.1126/science.aad4273"
mathieu_doi="10.1111/gcb.13012"
# References from clean dois
a=sapply(cleandois,FUN=rcrossref::cr_cn, format="text", style="apa", USE.NAMES = FALSE)
he_ref=rcrossref::cr_cn(he_doi,format="text", style="apa")
mathieu_ref=rcrossref::cr_cn(mathieu_doi,format="text", style="apa")
# Body
h1="## Main compilations"
p1="ISRaD has been built based on two main compilations:"
h2="## Studies within ISRaD"
n=length(cleandois)
p2=paste("Currently, there are", n, "entries in ISRaD, which are from the following publications:")
# Print markdown file for website
cat(c(h1, p1, " ", paste("* ",mathieu_ref), paste("* ",he_ref), " ",
h2, p2, " ", paste("* ",a)), sep="\n", file="ISRaD_data_files/database/credits.md")
#}
# document and check ------------------------------------------------------
cat("\tUpdating documentation and running check()...\n")
devtools::document(pkg = ISRaD_directory)
devtools::check(pkg=ISRaD_directory, manual = T, cran = T)
errors<-1
while(errors==1){
errors<-utils::menu(c("Yes", "No"), title="Were there any errors, warnings, or notes?")
if (errors==1){
cat("Ok, please fix the issues and confim below when you are ready to run the check again...\n")
ready<-utils::menu(c("Yes", "No"), title="Are you ready to run the check again?")
if (ready==1){
devtools::check(pkg=ISRaD_directory, manual = T, cran = T)
}
}
}
system(paste0("rm ", getwd(), "/ISRaD.pdf"))
system(paste(shQuote(file.path(R.home("bin"), "R")),
"CMD", "Rd2pdf", shQuote(getwd())))
reviewed<-utils::menu(c("Yes", "No"), title="Are you going to push this to github?")
if (reviewed==1){
cat("Ok, the DESCRIPTION file is being updated with a new version...\n")
DESC<-readLines(paste0(ISRaD_directory,"/DESCRIPTION"))
version<-strsplit(DESC[3],split = "\\.")
if(length(version[[1]])<4) version[[1]][4]<-900
version[[1]][4]<-as.numeric(version[[1]][4])+1
DESC[3]<-paste(unlist(version), collapse = ".")
writeLines(DESC, paste0(ISRaD_directory,"/DESCRIPTION"))
cat("Ok, you can now commit and push this to github!\n You should also then reload R and reinstall ISRaD from guthub since you changed the data objects.\n")
}
}
|
\encoding{UTF-8}
\docType{methods}
\name{seploc}
\alias{seploc}
\alias{seploc-methods}
\alias{seploc,ANY-method}
\alias{seploc,genind-method}
\alias{seploc,genpop-method}
\alias{seploc,genlight-method}
\title{ Separate data per locus}
\description{
The function \code{seploc} splits an object (\linkS4class{genind},
\linkS4class{genpop} or \linkS4class{genlight}) by marker. For
\linkS4class{genind} and \linkS4class{genpop} objects, the method
returns a list of objects whose components each correspond to a
marker. For \linkS4class{genlight} objects, the methods returns blocks
of SNPs.
}
\usage{
\S4method{seploc}{genind}(x,truenames=TRUE,res.type=c("genind","matrix"))
\S4method{seploc}{genpop}(x,truenames=TRUE,res.type=c("genpop","matrix"))
\S4method{seploc}{genlight}(x, n.block=NULL, block.size=NULL, random=FALSE,
parallel=FALSE, n.cores=NULL)
}
\arguments{
\item{x}{a \linkS4class{genind} or a \linkS4class{genpop} object.}
\item{truenames}{a logical indicating whether true names should be
used (TRUE, default) instead of generic labels (FALSE).}
\item{res.type}{a character indicating the type of returned results,
a genind or genpop object (default) or a matrix of data
corresponding to the 'tab' slot.}
\item{n.block}{an integer indicating the number of blocks of SNPs to
be returned.}
\item{block.size}{an integer indicating the size (in number of SNPs)
of the blocks to be returned.}
\item{random}{should blocks be formed of contiguous SNPs, or should
they be made or randomly chosen SNPs.}
\item{parallel}{a logical indicating whether multiple cores -if
available- should be used for the computations (TRUE, default), or
not (FALSE); requires the package \code{parallel} to be installed.}
\item{n.cores}{if \code{parallel} is TRUE, the number of cores to be
used in the computations; if NULL, then the maximum number of cores
available on the computer is used.}
}
\value{The function \code{seploc} returns an list of objects of the
same class as the initial object, or a list of matrices similar to
x\$tab.\cr
}
\author{Thibaut Jombart \email{t.jombart@imperial.ac.uk} }
\seealso{\code{\link{seppop}}, \code{\link{repool}}}
\examples{
\dontrun{
## example on genind objects
data(microbov)
# separate all markers
obj <- seploc(microbov)
names(obj)
obj$INRA5
## example on genlight objects
x <- glSim(100, 1000, 0, ploidy=2) # simulate data
x <- x[,order(glSum(x))] # reorder loci by frequency of 2nd allele
glPlot(x, main="All data") # plot data
foo <- seploc(x, n.block=3) # form 3 blocks
foo
glPlot(foo[[1]], main="1st block") # plot 1st block
glPlot(foo[[2]], main="2nd block") # plot 2nd block
glPlot(foo[[3]], main="3rd block") # plot 3rd block
foo <- seploc(x, block.size=600, random=TRUE) # split data, randomize loci
foo # note the different block sizes
glPlot(foo[[1]])
}
}
\keyword{manip}
|
/man/seploc.Rd
|
no_license
|
gtonkinhill/adegenet
|
R
| false
| false
| 2,934
|
rd
|
\encoding{UTF-8}
\docType{methods}
\name{seploc}
\alias{seploc}
\alias{seploc-methods}
\alias{seploc,ANY-method}
\alias{seploc,genind-method}
\alias{seploc,genpop-method}
\alias{seploc,genlight-method}
\title{ Separate data per locus}
\description{
The function \code{seploc} splits an object (\linkS4class{genind},
\linkS4class{genpop} or \linkS4class{genlight}) by marker. For
\linkS4class{genind} and \linkS4class{genpop} objects, the method
returns a list of objects whose components each correspond to a
marker. For \linkS4class{genlight} objects, the methods returns blocks
of SNPs.
}
\usage{
\S4method{seploc}{genind}(x,truenames=TRUE,res.type=c("genind","matrix"))
\S4method{seploc}{genpop}(x,truenames=TRUE,res.type=c("genpop","matrix"))
\S4method{seploc}{genlight}(x, n.block=NULL, block.size=NULL, random=FALSE,
parallel=FALSE, n.cores=NULL)
}
\arguments{
\item{x}{a \linkS4class{genind} or a \linkS4class{genpop} object.}
\item{truenames}{a logical indicating whether true names should be
used (TRUE, default) instead of generic labels (FALSE).}
\item{res.type}{a character indicating the type of returned results,
a genind or genpop object (default) or a matrix of data
corresponding to the 'tab' slot.}
\item{n.block}{an integer indicating the number of blocks of SNPs to
be returned.}
\item{block.size}{an integer indicating the size (in number of SNPs)
of the blocks to be returned.}
\item{random}{should blocks be formed of contiguous SNPs, or should
they be made or randomly chosen SNPs.}
\item{parallel}{a logical indicating whether multiple cores -if
available- should be used for the computations (TRUE, default), or
not (FALSE); requires the package \code{parallel} to be installed.}
\item{n.cores}{if \code{parallel} is TRUE, the number of cores to be
used in the computations; if NULL, then the maximum number of cores
available on the computer is used.}
}
\value{The function \code{seploc} returns an list of objects of the
same class as the initial object, or a list of matrices similar to
x\$tab.\cr
}
\author{Thibaut Jombart \email{t.jombart@imperial.ac.uk} }
\seealso{\code{\link{seppop}}, \code{\link{repool}}}
\examples{
\dontrun{
## example on genind objects
data(microbov)
# separate all markers
obj <- seploc(microbov)
names(obj)
obj$INRA5
## example on genlight objects
x <- glSim(100, 1000, 0, ploidy=2) # simulate data
x <- x[,order(glSum(x))] # reorder loci by frequency of 2nd allele
glPlot(x, main="All data") # plot data
foo <- seploc(x, n.block=3) # form 3 blocks
foo
glPlot(foo[[1]], main="1st block") # plot 1st block
glPlot(foo[[2]], main="2nd block") # plot 2nd block
glPlot(foo[[3]], main="3rd block") # plot 3rd block
foo <- seploc(x, block.size=600, random=TRUE) # split data, randomize loci
foo # note the different block sizes
glPlot(foo[[1]])
}
}
\keyword{manip}
|
#' Get the UID codes from NCBI for taxonomic names.
#'
#' Retrieve the Unique Identifier (UID) of a taxon from NCBI taxonomy browser.
#'
#' @export
#' @param sciname character; scientific name.
#' @param ask logical; should get_uid be run in interactive mode? If TRUE and
#' more than one TSN is found for the species, the user is asked for input. If
#' FALSE NA is returned for multiple matches.
#' @param messages logical; If `TRUE` (default) the actual taxon queried is
#' printed on the console.
#' @param rows numeric; Any number from 1 to infinity. If the default NA, all
#' rows are considered. Note that this function still only gives back a uid
#' class object with one to many identifiers. See
#' \code{\link[taxize]{get_uid_}} to get back all, or a subset, of the raw
#' data that you are presented during the ask process.
#' @param modifier (character) A modifier to the \code{sciname} given. Options
#' include: Organism, Scientific Name, Common Name, All Names, Division,
#' Filter, Lineage, GC, MGC, Name Tokens, Next Level, PGC, Properties, Rank,
#' Subtree, Synonym, Text Word. These are not checked, so make sure they are
#' entered correctly, as is.
#' @param rank_query (character) A taxonomic rank name to modify the query sent
#' to NCBI. See \code{\link{rank_ref}} for possible options. Though note that
#' some data sources use atypical ranks, so inspect the data itself for
#' options. Optional. See \code{Querying} below.
#' @param division_filter (character) A division (aka phylum) name to filter
#' data after retrieved from NCBI. Optional. See \code{Filtering} below.
#' @param rank_filter (character) A taxonomic rank name to filter data after
#' retrieved from NCBI. See \code{\link{rank_ref}} for possible options.
#' Though note that some data sources use atypical ranks, so inspect the data
#' itself for options. Optional. See \code{Filtering} below.
#' @param key (character) NCBI Entrez API key. optional. See Details.
#' @param x Input to \code{\link{as.uid}}
#' @param ... Ignored
#' @param check logical; Check if ID matches any existing on the DB, only used
#' in \code{\link{as.uid}}
#' @template getreturn
#'
#' @section Querying: The parameter \code{rank_query} is used in the search sent
#' to NCBI, whereas \code{rank_filter} filters data after it comes back. The
#' parameter \code{modifier} adds modifiers to the name. For example,
#' \code{modifier="Organism"} adds that to the name, giving e.g.,
#' \code{Helianthus[Organism]}.
#'
#' @section Filtering: The parameters \code{division_filter} and
#' \code{rank_filter} are not used in the search to the data provider, but are
#' used in filtering the data down to a subset that is closer to the target
#' you want. For all these parameters, you can use regex strings since we use
#' \code{\link{grep}} internally to match. Filtering narrows down to the set
#' that matches your query, and removes the rest.
#'
#' @section Beware: NCBI does funny things sometimes. E.g., if you search on
#' Fringella morel, a slight misspelling of the genus name, and a non-existent
#' epithet, NCBI gives back a morel fungal species. In addition, NCBI doesn't
#' really do fuzzy searching very well, so if there is a slight mis-spelling
#' in your names, you likely won't get what you are expecting. The lesson:
#' clean your names before using this function. Other data sources are better
#' about fuzzy matching.
#'
#' @section Authentication:
#' See \code{\link{taxize-authentication}} for help on authentication
#'
#' Note that even though you can't pass in your key to `as.uid` functions,
#' we still use your Entrez API key if you have it saved as an R option
#' or environment variable.
#'
#' @family taxonomic-ids
#' @seealso \code{\link[taxize]{classification}}
#'
#' @author Eduard Szoecs, \email{eduardszoecs@@gmail.com}
#'
#' @examples \dontrun{
#' get_uid(c("Chironomus riparius", "Chaetopteryx"))
#' get_uid(c("Chironomus riparius", "aaa vva"))
#'
#' # When not found
#' get_uid("howdy")
#' get_uid(c("Chironomus riparius", "howdy"))
#'
#' # Narrow down results to a division or rank, or both
#' ## By modifying the query
#' ### w/ modifiers to the name
#' get_uid(sciname = "Aratinga acuticauda", modifier = "Organism")
#' get_uid(sciname = "bear", modifier = "Common Name")
#'
#' ### w/ rank query
#' get_uid(sciname = "Pinus", rank_query = "genus")
#' get_uid(sciname = "Pinus", rank_query = "subgenus")
#' ### division query doesn't really work, for unknown reasons, so not available
#'
#' ## By filtering the result
#' ## Echinacea example
#' ### Results w/o narrowing
#' get_uid("Echinacea")
#' ### w/ division
#' get_uid(sciname = "Echinacea", division_filter = "eudicots")
#' get_uid(sciname = "Echinacea", division_filter = "sea urchins")
#'
#' ## Satyrium example
#' ### Results w/o narrowing
#' get_uid(sciname = "Satyrium")
#' ### w/ division
#' get_uid(sciname = "Satyrium", division_filter = "monocots")
#' get_uid(sciname = "Satyrium", division_filter = "butterflies")
#'
#' ## Rank example
#' get_uid(sciname = "Pinus")
#' get_uid(sciname = "Pinus", rank_filter = "genus")
#' get_uid(sciname = "Pinus", rank_filter = "subgenus")
#'
#' # Fuzzy filter on any filtering fields
#' ## uses grep on the inside
#' get_uid("Satyrium", division_filter = "m")
#'
#' # specify rows to limit choices available
#' get_uid('Dugesia') # user prompt needed
#' get_uid('Dugesia', rows=1) # 2 choices, so returns only 1 row, so no choices
#' get_uid('Dugesia', ask = FALSE) # returns NA for multiple matches
#'
#' # Go to a website with more info on the taxon
#' res <- get_uid("Chironomus riparius")
#' browseURL(attr(res, "uri"))
#'
#' # Convert a uid without class information to a uid class
#' as.uid(get_uid("Chironomus riparius")) # already a uid, returns the same
#' as.uid(get_uid(c("Chironomus riparius","Pinus contorta"))) # same
#' as.uid(315567) # numeric
#' as.uid(c(315567,3339,9696)) # numeric vector, length > 1
#' as.uid("315567") # character
#' as.uid(c("315567","3339","9696")) # character vector, length > 1
#' as.uid(list("315567","3339","9696")) # list, either numeric or character
#' ## dont check, much faster
#' as.uid("315567", check=FALSE)
#' as.uid(315567, check=FALSE)
#' as.uid(c("315567","3339","9696"), check=FALSE)
#' as.uid(list("315567","3339","9696"), check=FALSE)
#'
#' (out <- as.uid(c(315567,3339,9696)))
#' data.frame(out)
#' as.uid( data.frame(out) )
#'
#' # Get all data back
#' get_uid_("Puma concolor")
#' get_uid_("Dugesia")
#' get_uid_("Dugesia", rows=2)
#' get_uid_("Dugesia", rows=1:2)
#' get_uid_(c("asdfadfasd","Pinus contorta"))
#'
#' # use curl options
#' get_uid("Quercus douglasii", verbose = TRUE)
#' }
get_uid <- function(sciname, ask = TRUE, messages = TRUE, rows = NA,
modifier = NULL, rank_query = NULL,
division_filter = NULL, rank_filter = NULL,
key = NULL, ...) {
assert(ask, "logical")
assert(messages, "logical")
assert(modifier, "character")
assert(rank_query, "character")
assert(division_filter, "character")
assert(rank_filter, "character")
key <- getkey(key, service="entrez")
fun <- function(sciname, ask, messages, rows, ...) {
direct <- FALSE
mssg(messages, "\nRetrieving data for taxon '", sciname, "'\n")
sciname <- gsub(" ", "+", sciname)
if (!is.null(modifier)) sciname <- paste0(sciname,
sprintf("[%s]", modifier))
term <- sciname
if (!is.null(rank_query)) term <- paste0(term, sprintf(" AND %s[Rank]", rank_query))
try_again_errors <- c("Could not resolve host: eutils.ncbi.nlm.nih.gov")
query_args <- tc(list(db = "taxonomy", term = term, api_key = key))
raw_xml_result <- repeat_until_it_works(try_again_errors,
"esearch",
query = query_args,
...)
xml_result <- xml2::read_xml(raw_xml_result)
# NCBI limits requests to three per second when no key
if (is.null(key)) Sys.sleep(0.33)
uid <- xml2::xml_text(xml2::xml_find_all(xml_result, "//IdList/Id"))
mm <- length(uid) > 1
if (length(uid) == 0) { # if taxon name is not found
uid <- NA_character_
} else {
att <- 'found'
}
# not found on ncbi
if (length(uid) == 0 || is.na(uid)) {
mssg(messages, "Not found. Consider checking the spelling or alternate classification")
uid <- NA_character_
att <- 'NA due to not found'
}
# more than one found on ncbi -> user input
if (length(uid) > 1) {
ID <- paste(uid, collapse = ",")
try_again_errors <- c("Could not resolve host: eutils.ncbi.nlm.nih.gov")
query_args <- tc(list(db = "taxonomy", ID = ID, api_key = key))
tt <- repeat_until_it_works(try_again_errors, "esummary",
query_args, ...)
ttp <- xml2::read_xml(tt)
df <- parse_ncbi(ttp)
rownames(df) <- 1:nrow(df)
if (!is.null(division_filter) || !is.null(rank_filter)) {
df <- filt(df, "division", division_filter)
df <- filt(df, "rank", rank_filter)
}
df <- sub_rows(df, rows)
uid <- df$uid
if (length(uid) == 1) {
direct <- TRUE
att <- "found"
}
if (length(uid) == 0) {
uid <- NA_character_
}
if (length(uid) > 1) {
if (!ask) {
if (length(uid) == 1) {
att <- "found"
} else {
warning(
sprintf("More than one UID found for taxon '%s'; refine query or set ask=TRUE",
sciname),
call. = FALSE
)
uid <- NA_character_
att <- 'NA due to ask=FALSE & > 1 result'
}
} else {
# prompt
rownames(df) <- 1:nrow(df)
message("\n\n")
message("\nMore than one UID found for taxon '", sciname, "'!\n
Enter rownumber of taxon (other inputs will return 'NA'):\n")
print(df)
take <- scan(n = 1, quiet = TRUE, what = 'raw')
if (length(take) == 0) {
take <- 'notake'
att <- 'nothing chosen'
}
if (take %in% seq_len(nrow(df))) {
take <- as.numeric(take)
message("Input accepted, took UID '",
as.character(df$uid[take]), "'.\n")
uid <- as.character(df$uid[take])
att <- 'found'
} else {
uid <- NA_character_
att <- 'NA due to user input out of range'
mssg(messages, "\nReturned 'NA'!\n\n")
}
}
}
}
return(data.frame(uid, att, multiple = mm, direct = direct,
stringsAsFactors = FALSE))
}
sciname <- as.character(sciname)
outd <- ldply(sciname, fun, ask, messages, rows, ...)
out <- structure(outd$uid, class = "uid",
match = outd$att,
multiple_matches = outd$multiple,
pattern_match = outd$direct)
add_uri(out, 'https://www.ncbi.nlm.nih.gov/taxonomy/%s')
}
repeat_until_it_works <- function(catch, path, query, max_tries = 3, wait_time = 10,
messages = TRUE, ...) {
error_handler <- function(e) {
if (e$message %in% catch) {
if (messages) warning(paste("Caught error:", e$message))
return(NA)
} else {
stop(e$message)
}
}
for (count in 1:max_tries) {
cli <- crul::HttpClient$new(url = ncbi_base(), opts = list(...))
res <- cli$get(sprintf("entrez/eutils/%s.fcgi", path),
query = tc(query))
output <- tryCatch(res$parse("UTF-8"), error = error_handler)
if (!is.na(output)) return(output)
Sys.sleep(wait_time * count)
}
return(output)
}
#' @export
#' @rdname get_uid
as.uid <- function(x, check=TRUE) UseMethod("as.uid")
#' @export
#' @rdname get_uid
as.uid.uid <- function(x, check=TRUE) x
#' @export
#' @rdname get_uid
as.uid.character <- function(x, check=TRUE) if(length(x) == 1) make_uid(x, check) else collapse(x, make_uid, "uid", check=check)
#' @export
#' @rdname get_uid
as.uid.list <- function(x, check=TRUE) if(length(x) == 1) make_uid(x, check) else collapse(x, make_uid, "uid", check=check)
#' @export
#' @rdname get_uid
as.uid.numeric <- function(x, check=TRUE) as.uid(as.character(x), check)
#' @export
#' @rdname get_uid
as.uid.data.frame <- function(x, check=TRUE) {
structure(x$ids, class="uid", match=x$match,
multiple_matches = x$multiple_matches,
pattern_match = x$pattern_match, uri=x$uri)
}
#' @export
#' @rdname get_uid
as.data.frame.uid <- function(x, ...){
data.frame(ids = as.character(unclass(x)),
class = "uid",
match = attr(x, "match"),
multiple_matches = attr(x, "multiple_matches"),
pattern_match = attr(x, "pattern_match"),
uri = attr(x, "uri"),
stringsAsFactors = FALSE)
}
make_uid <- function(x, check=TRUE) {
make_generic(x, 'https://www.ncbi.nlm.nih.gov/taxonomy/%s',
"uid", check)
}
check_uid <- function(x){
key <- getkey(NULL, "ENTREZ_KEY")
cli <- crul::HttpClient$new(url = ncbi_base())
args <- tc(list(db = "taxonomy", id = x, api_key = key))
res <- cli$get("entrez/eutils/esummary.fcgi", query = args)
res$raise_for_status()
tt <- xml2::read_xml(res$parse("UTF-8"))
tryid <- xml2::xml_text(xml2::xml_find_all(tt, "//Id"))
identical(as.character(x), tryid)
}
#' @export
#' @rdname get_uid
get_uid_ <- function(sciname, messages = TRUE, rows = NA, key = NULL, ...){
key <- getkey(key, "ENTREZ_KEY")
stats::setNames(lapply(sciname, get_uid_help, messages = messages,
rows = rows, key = key, ...), sciname)
}
get_uid_help <- function(sciname, messages, rows, key, ...) {
mssg(messages, "\nRetrieving data for taxon '", sciname, "'\n")
cli <- crul::HttpClient$new(url = ncbi_base(), opts = list(...))
res <- cli$get(
"entrez/eutils/esearch.fcgi",
query = tc(list(api_key = key,
db = "taxonomy", term = gsub(" ", "+", sciname))))
res$raise_for_status()
xml_result <- xml2::read_xml(res$parse("UTF-8"))
Sys.sleep(0.33)
uid <- xml_text(xml_find_all(xml_result, "//IdList/Id"))
if (length(uid) == 0) {
NULL
} else {
res <- cli$get("entrez/eutils/esummary.fcgi",
query = tc(list(api_key = key, db = "taxonomy",
ID = paste(uid, collapse = ","))))
res$raise_for_status()
ttp <- xml2::read_xml(res$parse("UTF-8"))
df <- parse_ncbi(ttp)
sub_rows(df, rows)
}
}
parse_ncbi <- function(x) {
mget <- c("Status", "Rank", "Division", "ScientificName",
"CommonName", "TaxId", "Genus", "Species", "Subsp",
"ModificationDate")
nget <- paste0('Item[@Name="', mget, "\"]")
nodes <- xml_find_all(x, "//DocSum")
tmp <- taxize_ldfast(lapply(nodes, function(z) {
data.frame(as.list(
setNames(sapply(nget, function(w) xml_text(xml_find_all(z, w))), tolower(mget))),
stringsAsFactors = FALSE)
}))
rename(tmp, c('taxid' = 'uid'))
}
ncbi_base <- function() "https://eutils.ncbi.nlm.nih.gov"
|
/R/get_uid.R
|
permissive
|
fozy81/taxize
|
R
| false
| false
| 15,120
|
r
|
#' Get the UID codes from NCBI for taxonomic names.
#'
#' Retrieve the Unique Identifier (UID) of a taxon from NCBI taxonomy browser.
#'
#' @export
#' @param sciname character; scientific name.
#' @param ask logical; should get_uid be run in interactive mode? If TRUE and
#' more than one TSN is found for the species, the user is asked for input. If
#' FALSE NA is returned for multiple matches.
#' @param messages logical; If `TRUE` (default) the actual taxon queried is
#' printed on the console.
#' @param rows numeric; Any number from 1 to infinity. If the default NA, all
#' rows are considered. Note that this function still only gives back a uid
#' class object with one to many identifiers. See
#' \code{\link[taxize]{get_uid_}} to get back all, or a subset, of the raw
#' data that you are presented during the ask process.
#' @param modifier (character) A modifier to the \code{sciname} given. Options
#' include: Organism, Scientific Name, Common Name, All Names, Division,
#' Filter, Lineage, GC, MGC, Name Tokens, Next Level, PGC, Properties, Rank,
#' Subtree, Synonym, Text Word. These are not checked, so make sure they are
#' entered correctly, as is.
#' @param rank_query (character) A taxonomic rank name to modify the query sent
#' to NCBI. See \code{\link{rank_ref}} for possible options. Though note that
#' some data sources use atypical ranks, so inspect the data itself for
#' options. Optional. See \code{Querying} below.
#' @param division_filter (character) A division (aka phylum) name to filter
#' data after retrieved from NCBI. Optional. See \code{Filtering} below.
#' @param rank_filter (character) A taxonomic rank name to filter data after
#' retrieved from NCBI. See \code{\link{rank_ref}} for possible options.
#' Though note that some data sources use atypical ranks, so inspect the data
#' itself for options. Optional. See \code{Filtering} below.
#' @param key (character) NCBI Entrez API key. optional. See Details.
#' @param x Input to \code{\link{as.uid}}
#' @param ... Ignored
#' @param check logical; Check if ID matches any existing on the DB, only used
#' in \code{\link{as.uid}}
#' @template getreturn
#'
#' @section Querying: The parameter \code{rank_query} is used in the search sent
#' to NCBI, whereas \code{rank_filter} filters data after it comes back. The
#' parameter \code{modifier} adds modifiers to the name. For example,
#' \code{modifier="Organism"} adds that to the name, giving e.g.,
#' \code{Helianthus[Organism]}.
#'
#' @section Filtering: The parameters \code{division_filter} and
#' \code{rank_filter} are not used in the search to the data provider, but are
#' used in filtering the data down to a subset that is closer to the target
#' you want. For all these parameters, you can use regex strings since we use
#' \code{\link{grep}} internally to match. Filtering narrows down to the set
#' that matches your query, and removes the rest.
#'
#' @section Beware: NCBI does funny things sometimes. E.g., if you search on
#' Fringella morel, a slight misspelling of the genus name, and a non-existent
#' epithet, NCBI gives back a morel fungal species. In addition, NCBI doesn't
#' really do fuzzy searching very well, so if there is a slight mis-spelling
#' in your names, you likely won't get what you are expecting. The lesson:
#' clean your names before using this function. Other data sources are better
#' about fuzzy matching.
#'
#' @section Authentication:
#' See \code{\link{taxize-authentication}} for help on authentication
#'
#' Note that even though you can't pass in your key to `as.uid` functions,
#' we still use your Entrez API key if you have it saved as an R option
#' or environment variable.
#'
#' @family taxonomic-ids
#' @seealso \code{\link[taxize]{classification}}
#'
#' @author Eduard Szoecs, \email{eduardszoecs@@gmail.com}
#'
#' @examples \dontrun{
#' get_uid(c("Chironomus riparius", "Chaetopteryx"))
#' get_uid(c("Chironomus riparius", "aaa vva"))
#'
#' # When not found
#' get_uid("howdy")
#' get_uid(c("Chironomus riparius", "howdy"))
#'
#' # Narrow down results to a division or rank, or both
#' ## By modifying the query
#' ### w/ modifiers to the name
#' get_uid(sciname = "Aratinga acuticauda", modifier = "Organism")
#' get_uid(sciname = "bear", modifier = "Common Name")
#'
#' ### w/ rank query
#' get_uid(sciname = "Pinus", rank_query = "genus")
#' get_uid(sciname = "Pinus", rank_query = "subgenus")
#' ### division query doesn't really work, for unknown reasons, so not available
#'
#' ## By filtering the result
#' ## Echinacea example
#' ### Results w/o narrowing
#' get_uid("Echinacea")
#' ### w/ division
#' get_uid(sciname = "Echinacea", division_filter = "eudicots")
#' get_uid(sciname = "Echinacea", division_filter = "sea urchins")
#'
#' ## Satyrium example
#' ### Results w/o narrowing
#' get_uid(sciname = "Satyrium")
#' ### w/ division
#' get_uid(sciname = "Satyrium", division_filter = "monocots")
#' get_uid(sciname = "Satyrium", division_filter = "butterflies")
#'
#' ## Rank example
#' get_uid(sciname = "Pinus")
#' get_uid(sciname = "Pinus", rank_filter = "genus")
#' get_uid(sciname = "Pinus", rank_filter = "subgenus")
#'
#' # Fuzzy filter on any filtering fields
#' ## uses grep on the inside
#' get_uid("Satyrium", division_filter = "m")
#'
#' # specify rows to limit choices available
#' get_uid('Dugesia') # user prompt needed
#' get_uid('Dugesia', rows=1) # 2 choices, so returns only 1 row, so no choices
#' get_uid('Dugesia', ask = FALSE) # returns NA for multiple matches
#'
#' # Go to a website with more info on the taxon
#' res <- get_uid("Chironomus riparius")
#' browseURL(attr(res, "uri"))
#'
#' # Convert a uid without class information to a uid class
#' as.uid(get_uid("Chironomus riparius")) # already a uid, returns the same
#' as.uid(get_uid(c("Chironomus riparius","Pinus contorta"))) # same
#' as.uid(315567) # numeric
#' as.uid(c(315567,3339,9696)) # numeric vector, length > 1
#' as.uid("315567") # character
#' as.uid(c("315567","3339","9696")) # character vector, length > 1
#' as.uid(list("315567","3339","9696")) # list, either numeric or character
#' ## dont check, much faster
#' as.uid("315567", check=FALSE)
#' as.uid(315567, check=FALSE)
#' as.uid(c("315567","3339","9696"), check=FALSE)
#' as.uid(list("315567","3339","9696"), check=FALSE)
#'
#' (out <- as.uid(c(315567,3339,9696)))
#' data.frame(out)
#' as.uid( data.frame(out) )
#'
#' # Get all data back
#' get_uid_("Puma concolor")
#' get_uid_("Dugesia")
#' get_uid_("Dugesia", rows=2)
#' get_uid_("Dugesia", rows=1:2)
#' get_uid_(c("asdfadfasd","Pinus contorta"))
#'
#' # use curl options
#' get_uid("Quercus douglasii", verbose = TRUE)
#' }
get_uid <- function(sciname, ask = TRUE, messages = TRUE, rows = NA,
modifier = NULL, rank_query = NULL,
division_filter = NULL, rank_filter = NULL,
key = NULL, ...) {
assert(ask, "logical")
assert(messages, "logical")
assert(modifier, "character")
assert(rank_query, "character")
assert(division_filter, "character")
assert(rank_filter, "character")
key <- getkey(key, service="entrez")
fun <- function(sciname, ask, messages, rows, ...) {
direct <- FALSE
mssg(messages, "\nRetrieving data for taxon '", sciname, "'\n")
sciname <- gsub(" ", "+", sciname)
if (!is.null(modifier)) sciname <- paste0(sciname,
sprintf("[%s]", modifier))
term <- sciname
if (!is.null(rank_query)) term <- paste0(term, sprintf(" AND %s[Rank]", rank_query))
try_again_errors <- c("Could not resolve host: eutils.ncbi.nlm.nih.gov")
query_args <- tc(list(db = "taxonomy", term = term, api_key = key))
raw_xml_result <- repeat_until_it_works(try_again_errors,
"esearch",
query = query_args,
...)
xml_result <- xml2::read_xml(raw_xml_result)
# NCBI limits requests to three per second when no key
if (is.null(key)) Sys.sleep(0.33)
uid <- xml2::xml_text(xml2::xml_find_all(xml_result, "//IdList/Id"))
mm <- length(uid) > 1
if (length(uid) == 0) { # if taxon name is not found
uid <- NA_character_
} else {
att <- 'found'
}
# not found on ncbi
if (length(uid) == 0 || is.na(uid)) {
mssg(messages, "Not found. Consider checking the spelling or alternate classification")
uid <- NA_character_
att <- 'NA due to not found'
}
# more than one found on ncbi -> user input
if (length(uid) > 1) {
ID <- paste(uid, collapse = ",")
try_again_errors <- c("Could not resolve host: eutils.ncbi.nlm.nih.gov")
query_args <- tc(list(db = "taxonomy", ID = ID, api_key = key))
tt <- repeat_until_it_works(try_again_errors, "esummary",
query_args, ...)
ttp <- xml2::read_xml(tt)
df <- parse_ncbi(ttp)
rownames(df) <- 1:nrow(df)
if (!is.null(division_filter) || !is.null(rank_filter)) {
df <- filt(df, "division", division_filter)
df <- filt(df, "rank", rank_filter)
}
df <- sub_rows(df, rows)
uid <- df$uid
if (length(uid) == 1) {
direct <- TRUE
att <- "found"
}
if (length(uid) == 0) {
uid <- NA_character_
}
if (length(uid) > 1) {
if (!ask) {
if (length(uid) == 1) {
att <- "found"
} else {
warning(
sprintf("More than one UID found for taxon '%s'; refine query or set ask=TRUE",
sciname),
call. = FALSE
)
uid <- NA_character_
att <- 'NA due to ask=FALSE & > 1 result'
}
} else {
# prompt
rownames(df) <- 1:nrow(df)
message("\n\n")
message("\nMore than one UID found for taxon '", sciname, "'!\n
Enter rownumber of taxon (other inputs will return 'NA'):\n")
print(df)
take <- scan(n = 1, quiet = TRUE, what = 'raw')
if (length(take) == 0) {
take <- 'notake'
att <- 'nothing chosen'
}
if (take %in% seq_len(nrow(df))) {
take <- as.numeric(take)
message("Input accepted, took UID '",
as.character(df$uid[take]), "'.\n")
uid <- as.character(df$uid[take])
att <- 'found'
} else {
uid <- NA_character_
att <- 'NA due to user input out of range'
mssg(messages, "\nReturned 'NA'!\n\n")
}
}
}
}
return(data.frame(uid, att, multiple = mm, direct = direct,
stringsAsFactors = FALSE))
}
sciname <- as.character(sciname)
outd <- ldply(sciname, fun, ask, messages, rows, ...)
out <- structure(outd$uid, class = "uid",
match = outd$att,
multiple_matches = outd$multiple,
pattern_match = outd$direct)
add_uri(out, 'https://www.ncbi.nlm.nih.gov/taxonomy/%s')
}
repeat_until_it_works <- function(catch, path, query, max_tries = 3, wait_time = 10,
messages = TRUE, ...) {
error_handler <- function(e) {
if (e$message %in% catch) {
if (messages) warning(paste("Caught error:", e$message))
return(NA)
} else {
stop(e$message)
}
}
for (count in 1:max_tries) {
cli <- crul::HttpClient$new(url = ncbi_base(), opts = list(...))
res <- cli$get(sprintf("entrez/eutils/%s.fcgi", path),
query = tc(query))
output <- tryCatch(res$parse("UTF-8"), error = error_handler)
if (!is.na(output)) return(output)
Sys.sleep(wait_time * count)
}
return(output)
}
#' @export
#' @rdname get_uid
as.uid <- function(x, check=TRUE) UseMethod("as.uid")
#' @export
#' @rdname get_uid
as.uid.uid <- function(x, check=TRUE) x
#' @export
#' @rdname get_uid
as.uid.character <- function(x, check=TRUE) if(length(x) == 1) make_uid(x, check) else collapse(x, make_uid, "uid", check=check)
#' @export
#' @rdname get_uid
as.uid.list <- function(x, check=TRUE) if(length(x) == 1) make_uid(x, check) else collapse(x, make_uid, "uid", check=check)
#' @export
#' @rdname get_uid
as.uid.numeric <- function(x, check=TRUE) as.uid(as.character(x), check)
#' @export
#' @rdname get_uid
as.uid.data.frame <- function(x, check=TRUE) {
structure(x$ids, class="uid", match=x$match,
multiple_matches = x$multiple_matches,
pattern_match = x$pattern_match, uri=x$uri)
}
#' @export
#' @rdname get_uid
as.data.frame.uid <- function(x, ...){
data.frame(ids = as.character(unclass(x)),
class = "uid",
match = attr(x, "match"),
multiple_matches = attr(x, "multiple_matches"),
pattern_match = attr(x, "pattern_match"),
uri = attr(x, "uri"),
stringsAsFactors = FALSE)
}
make_uid <- function(x, check=TRUE) {
make_generic(x, 'https://www.ncbi.nlm.nih.gov/taxonomy/%s',
"uid", check)
}
check_uid <- function(x){
key <- getkey(NULL, "ENTREZ_KEY")
cli <- crul::HttpClient$new(url = ncbi_base())
args <- tc(list(db = "taxonomy", id = x, api_key = key))
res <- cli$get("entrez/eutils/esummary.fcgi", query = args)
res$raise_for_status()
tt <- xml2::read_xml(res$parse("UTF-8"))
tryid <- xml2::xml_text(xml2::xml_find_all(tt, "//Id"))
identical(as.character(x), tryid)
}
#' @export
#' @rdname get_uid
get_uid_ <- function(sciname, messages = TRUE, rows = NA, key = NULL, ...){
key <- getkey(key, "ENTREZ_KEY")
stats::setNames(lapply(sciname, get_uid_help, messages = messages,
rows = rows, key = key, ...), sciname)
}
get_uid_help <- function(sciname, messages, rows, key, ...) {
mssg(messages, "\nRetrieving data for taxon '", sciname, "'\n")
cli <- crul::HttpClient$new(url = ncbi_base(), opts = list(...))
res <- cli$get(
"entrez/eutils/esearch.fcgi",
query = tc(list(api_key = key,
db = "taxonomy", term = gsub(" ", "+", sciname))))
res$raise_for_status()
xml_result <- xml2::read_xml(res$parse("UTF-8"))
Sys.sleep(0.33)
uid <- xml_text(xml_find_all(xml_result, "//IdList/Id"))
if (length(uid) == 0) {
NULL
} else {
res <- cli$get("entrez/eutils/esummary.fcgi",
query = tc(list(api_key = key, db = "taxonomy",
ID = paste(uid, collapse = ","))))
res$raise_for_status()
ttp <- xml2::read_xml(res$parse("UTF-8"))
df <- parse_ncbi(ttp)
sub_rows(df, rows)
}
}
parse_ncbi <- function(x) {
mget <- c("Status", "Rank", "Division", "ScientificName",
"CommonName", "TaxId", "Genus", "Species", "Subsp",
"ModificationDate")
nget <- paste0('Item[@Name="', mget, "\"]")
nodes <- xml_find_all(x, "//DocSum")
tmp <- taxize_ldfast(lapply(nodes, function(z) {
data.frame(as.list(
setNames(sapply(nget, function(w) xml_text(xml_find_all(z, w))), tolower(mget))),
stringsAsFactors = FALSE)
}))
rename(tmp, c('taxid' = 'uid'))
}
ncbi_base <- function() "https://eutils.ncbi.nlm.nih.gov"
|
# Read Data
Pow_Data<-read.delim('household_power_consumption.txt',header = TRUE,sep=";", stringsAsFactors=FALSE, dec=".")
#head(Pow_Data)
#subset Data to for 1st 2 days in Feb 2007
New_Data <- Pow_Data[Pow_Data$Date %in% c("1/2/2007","2/2/2007"),]
#combine Date and Time
New_Data$DateTime <- strptime(paste(New_Data$Date,New_Data$Time,sep = " "), "%d/%m/%Y %H:%M:%S")
#head(New_Data$DateTime)
#Change Sub_metering_* to numeric
New_Data$Sub_metering_1 <- as.numeric(New_Data$Sub_metering_1)
New_Data$Sub_metering_2 <- as.numeric(New_Data$Sub_metering_2)
New_Data$Sub_metering_3 <- as.numeric(New_Data$Sub_metering_3)
#Create plot DateTime against Sub_metering_* and save as png
png("plot3.png", width=480, height=480)
plot(New_Data$DateTime,New_Data$Sub_metering_1,type = "l", xlab=" ", ylab = "Energy sub metering")
lines(New_Data$DateTime,New_Data$Sub_metering_2,type = "l",col="red")
lines(New_Data$DateTime,New_Data$Sub_metering_3,type = "l",col="blue")
legend("topright",lty = 1,lwd=2 ,col = c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
|
/plot3.R
|
no_license
|
dheeraj13/ExData_Plotting1
|
R
| false
| false
| 1,124
|
r
|
# Read Data
Pow_Data<-read.delim('household_power_consumption.txt',header = TRUE,sep=";", stringsAsFactors=FALSE, dec=".")
#head(Pow_Data)
#subset Data to for 1st 2 days in Feb 2007
New_Data <- Pow_Data[Pow_Data$Date %in% c("1/2/2007","2/2/2007"),]
#combine Date and Time
New_Data$DateTime <- strptime(paste(New_Data$Date,New_Data$Time,sep = " "), "%d/%m/%Y %H:%M:%S")
#head(New_Data$DateTime)
#Change Sub_metering_* to numeric
New_Data$Sub_metering_1 <- as.numeric(New_Data$Sub_metering_1)
New_Data$Sub_metering_2 <- as.numeric(New_Data$Sub_metering_2)
New_Data$Sub_metering_3 <- as.numeric(New_Data$Sub_metering_3)
#Create plot DateTime against Sub_metering_* and save as png
png("plot3.png", width=480, height=480)
plot(New_Data$DateTime,New_Data$Sub_metering_1,type = "l", xlab=" ", ylab = "Energy sub metering")
lines(New_Data$DateTime,New_Data$Sub_metering_2,type = "l",col="red")
lines(New_Data$DateTime,New_Data$Sub_metering_3,type = "l",col="blue")
legend("topright",lty = 1,lwd=2 ,col = c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lpca.R
\name{lpca}
\alias{lpca}
\title{Perform local PCA on each cluster of the data}
\usage{
lpca(indicator, X, d)
}
\arguments{
\item{X}{Data.}
\item{d}{intrinsic dimension}
\item{indicatr}{cluster membership or number of clusters for pam() clustering
if a single positive interger is provided}
}
\value{
a list of representation of data (X.rep), mean normalized reconstruction error (mean_error),
normalized reconstruction error for all data (all_error), cluster membership (cluster_id),
mean normalized reconstruction error in each cluster (each_error), cluster size (cluster_size),
variance explained by each PC in each cluster (variance_proportion),
d/number of PCs needed to explaine more than d of the variance in each cluster (num_ev).
}
\description{
Takes in indicator of cluter membership, data, and intrinsic dimension
}
\examples{
############################## example I: Open box
## package for 3d plot
library(rgl)
## package for pam() kmeans clustering
library(cluster)
## load data
data(OpenBox)
## intrinsic dimension of the data
trueDim = 2
## number of clusters
K = 6
indi = pam(OpenBox,K)$clustering
temp = lpca(indi,OpenBox,trueDim)
OpenBox_rep = temp[[1]]
error_rep = temp[[2]]
open3d()
plot3d(OpenBox,col=indi,xlim=c(0,2),ylim=c(0,2),zlim=c(0,2))
open3d()
plot3d(OpenBox_rep,col=indi,xlim=c(0,2),ylim=c(0,2),zlim=c(0,2))
############################## example II: Swiss roll
## package for 3d plot
library(rgl)
## package for pam() kmeans clustering
library(cluster)
## load data
data(SwissRoll)
## intrinsic dimension of the data
trueDim = 2
## number of clusters
K = 8
indi = pam(SwissRoll,K)$clustering
temp = lpca(indi,SwissRoll,trueDim)
SwissRoll_rep = temp[[1]]
error_rep = temp[[2]]
open3d()
plot3d(SwissRoll,col=indi,xlim=c(0,2),ylim=c(0,2),zlim=c(0,2))
open3d()
plot3d(SwissRoll_rep,col=indi,xlim=c(0,2),ylim=c(0,2),zlim=c(0,2))
############################## example III: M-shape
## package for pam() kmeans clustering
library(cluster)
## load data
data(M_shape)
## intrinsic dimension of the data
trueDim = 1
## number of clusters
K = 4
indi = pam(M_shape,K)$clustering
temp = lpca(indi,M_shape,trueDim)
M_shape_rep = temp[[1]]
error_rep = temp[[2]]
indi_true = rep(1:4,each=nrow(M_shape)/4)
temp_true = lpca(indi_true,M_shape,trueDim)
M_shape_rep_true = temp_true[[1]]
par(mfrow=c(1,3))
plot(M_shape,col=indi)
plot(M_shape_rep,col=indi)
plot(M_shape_rep_true,col=indi_true)
}
|
/man/lpca.Rd
|
no_license
|
Yanhao29/GeoRatio
|
R
| false
| true
| 2,520
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lpca.R
\name{lpca}
\alias{lpca}
\title{Perform local PCA on each cluster of the data}
\usage{
lpca(indicator, X, d)
}
\arguments{
\item{X}{Data.}
\item{d}{intrinsic dimension}
\item{indicatr}{cluster membership or number of clusters for pam() clustering
if a single positive interger is provided}
}
\value{
a list of representation of data (X.rep), mean normalized reconstruction error (mean_error),
normalized reconstruction error for all data (all_error), cluster membership (cluster_id),
mean normalized reconstruction error in each cluster (each_error), cluster size (cluster_size),
variance explained by each PC in each cluster (variance_proportion),
d/number of PCs needed to explaine more than d of the variance in each cluster (num_ev).
}
\description{
Takes in indicator of cluter membership, data, and intrinsic dimension
}
\examples{
############################## example I: Open box
## package for 3d plot
library(rgl)
## package for pam() kmeans clustering
library(cluster)
## load data
data(OpenBox)
## intrinsic dimension of the data
trueDim = 2
## number of clusters
K = 6
indi = pam(OpenBox,K)$clustering
temp = lpca(indi,OpenBox,trueDim)
OpenBox_rep = temp[[1]]
error_rep = temp[[2]]
open3d()
plot3d(OpenBox,col=indi,xlim=c(0,2),ylim=c(0,2),zlim=c(0,2))
open3d()
plot3d(OpenBox_rep,col=indi,xlim=c(0,2),ylim=c(0,2),zlim=c(0,2))
############################## example II: Swiss roll
## package for 3d plot
library(rgl)
## package for pam() kmeans clustering
library(cluster)
## load data
data(SwissRoll)
## intrinsic dimension of the data
trueDim = 2
## number of clusters
K = 8
indi = pam(SwissRoll,K)$clustering
temp = lpca(indi,SwissRoll,trueDim)
SwissRoll_rep = temp[[1]]
error_rep = temp[[2]]
open3d()
plot3d(SwissRoll,col=indi,xlim=c(0,2),ylim=c(0,2),zlim=c(0,2))
open3d()
plot3d(SwissRoll_rep,col=indi,xlim=c(0,2),ylim=c(0,2),zlim=c(0,2))
############################## example III: M-shape
## package for pam() kmeans clustering
library(cluster)
## load data
data(M_shape)
## intrinsic dimension of the data
trueDim = 1
## number of clusters
K = 4
indi = pam(M_shape,K)$clustering
temp = lpca(indi,M_shape,trueDim)
M_shape_rep = temp[[1]]
error_rep = temp[[2]]
indi_true = rep(1:4,each=nrow(M_shape)/4)
temp_true = lpca(indi_true,M_shape,trueDim)
M_shape_rep_true = temp_true[[1]]
par(mfrow=c(1,3))
plot(M_shape,col=indi)
plot(M_shape_rep,col=indi)
plot(M_shape_rep_true,col=indi_true)
}
|
data(nhl)
fit<-survfit(Surv(time,status)~1,data=nhl)
plotbands(fit)
|
/kmconfband/demo/nhldemo.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 70
|
r
|
data(nhl)
fit<-survfit(Surv(time,status)~1,data=nhl)
plotbands(fit)
|
# Unit 4 - "Judge, Jury, and Classifier" Lecture
# VIDEO 4
# Read in the data
stevens = read.csv("stevens.csv")
str(stevens)
# Split the data
library(caTools)
set.seed(3000)
spl = sample.split(stevens$Reverse, SplitRatio = 0.7)
Train = subset(stevens, spl==TRUE)
Test = subset(stevens, spl==FALSE)
# Install rpart library
install.packages("rpart")
library(rpart)
install.packages("rpart.plot")
library(rpart.plot)
# CART model
StevensTree = rpart(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, method="class", minbucket=25)
#StevensTree = rpart(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, method="class", minbucket=5)
#StevensTree = rpart(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, method="class", minbucket=100)
prp(StevensTree)
# Make predictions
PredictCART = predict(StevensTree, newdata = Test, type = "class")
table(Test$Reverse, PredictCART)
(41+71)/(41+36+22+71)
# ROC curve
library(ROCR)
PredictROC = predict(StevensTree, newdata = Test)
PredictROC
pred = prediction(PredictROC[,2], Test$Reverse)
perf = performance(pred, "tpr", "fpr")
plot(perf)
Auc = as.numeric(performance(pred, "auc")@y.values)
Auc
# VIDEO 5 - Random Forests
# Install randomForest package
install.packages("randomForest")
library(randomForest)
set.seed(200)
# Build random forest model
StevensForest = randomForest(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, ntree=200, nodesize=25 )
# Convert outcome to factor
Train$Reverse = as.factor(Train$Reverse)
Test$Reverse = as.factor(Test$Reverse)
# Try again
StevensForest = randomForest(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, ntree=200, nodesize=25 )
# Make predictions
PredictForest = predict(StevensForest, newdata = Test)
table(Test$Reverse, PredictForest)
(40+73)/(40+37+20+73)
# VIDEO 6
# Install cross-validation packages
install.packages("caret")
library(caret)
install.packages("e1071")
library(e1071)
# Define cross-validation experiment
numFolds = trainControl( method = "cv", number = 10 )
cpGrid = expand.grid( .cp = seq(0.01,0.5,0.01))
# Perform the cross validation
train(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, method = "rpart", trControl = numFolds, tuneGrid = cpGrid )
# Create a new CART model
StevensTreeCV = rpart(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, method="class", cp = 0.18)
prp(StevensTreeCV)
# Make predictions
PredictCV = predict(StevensTreeCV, newdata = Test, type = "class")
table(Test$Reverse, PredictCV)
(59+64)/(59+18+29+64)
|
/Unit4_SupremeCourt.R
|
no_license
|
wuthmone/The-Analytics-Edge
|
R
| false
| false
| 2,745
|
r
|
# Unit 4 - "Judge, Jury, and Classifier" Lecture
# VIDEO 4
# Read in the data
stevens = read.csv("stevens.csv")
str(stevens)
# Split the data
library(caTools)
set.seed(3000)
spl = sample.split(stevens$Reverse, SplitRatio = 0.7)
Train = subset(stevens, spl==TRUE)
Test = subset(stevens, spl==FALSE)
# Install rpart library
install.packages("rpart")
library(rpart)
install.packages("rpart.plot")
library(rpart.plot)
# CART model
StevensTree = rpart(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, method="class", minbucket=25)
#StevensTree = rpart(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, method="class", minbucket=5)
#StevensTree = rpart(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, method="class", minbucket=100)
prp(StevensTree)
# Make predictions
PredictCART = predict(StevensTree, newdata = Test, type = "class")
table(Test$Reverse, PredictCART)
(41+71)/(41+36+22+71)
# ROC curve
library(ROCR)
PredictROC = predict(StevensTree, newdata = Test)
PredictROC
pred = prediction(PredictROC[,2], Test$Reverse)
perf = performance(pred, "tpr", "fpr")
plot(perf)
Auc = as.numeric(performance(pred, "auc")@y.values)
Auc
# VIDEO 5 - Random Forests
# Install randomForest package
install.packages("randomForest")
library(randomForest)
set.seed(200)
# Build random forest model
StevensForest = randomForest(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, ntree=200, nodesize=25 )
# Convert outcome to factor
Train$Reverse = as.factor(Train$Reverse)
Test$Reverse = as.factor(Test$Reverse)
# Try again
StevensForest = randomForest(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, ntree=200, nodesize=25 )
# Make predictions
PredictForest = predict(StevensForest, newdata = Test)
table(Test$Reverse, PredictForest)
(40+73)/(40+37+20+73)
# VIDEO 6
# Install cross-validation packages
install.packages("caret")
library(caret)
install.packages("e1071")
library(e1071)
# Define cross-validation experiment
numFolds = trainControl( method = "cv", number = 10 )
cpGrid = expand.grid( .cp = seq(0.01,0.5,0.01))
# Perform the cross validation
train(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, method = "rpart", trControl = numFolds, tuneGrid = cpGrid )
# Create a new CART model
StevensTreeCV = rpart(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, method="class", cp = 0.18)
prp(StevensTreeCV)
# Make predictions
PredictCV = predict(StevensTreeCV, newdata = Test, type = "class")
table(Test$Reverse, PredictCV)
(59+64)/(59+18+29+64)
|
## ------------------------------------------------------------------------- ##
## function 'applyCoeffs':
## ------------------------------------------------------------------------- ##
test_that(desc="testApplyCoeffs", code={
x <- matrix(rnorm(10), nrow=2, ncol=5)
eSet <- Biobase::ExpressionSet(assayData=x)
normCoeffs <- rnorm(5)
eSetNormed <- TPP:::applyCoeffs(eSet, normCoeffs)
xNew <- unname(Biobase::exprs(eSetNormed))
xRef <- rbind(x[1,] * normCoeffs, x[2,] * normCoeffs)
expect_equal(xNew, xRef)
})
|
/tests/testthat/test_applyCoeffs.R
|
no_license
|
andreaschrader/TPP
|
R
| false
| false
| 531
|
r
|
## ------------------------------------------------------------------------- ##
## function 'applyCoeffs':
## ------------------------------------------------------------------------- ##
test_that(desc="testApplyCoeffs", code={
x <- matrix(rnorm(10), nrow=2, ncol=5)
eSet <- Biobase::ExpressionSet(assayData=x)
normCoeffs <- rnorm(5)
eSetNormed <- TPP:::applyCoeffs(eSet, normCoeffs)
xNew <- unname(Biobase::exprs(eSetNormed))
xRef <- rbind(x[1,] * normCoeffs, x[2,] * normCoeffs)
expect_equal(xNew, xRef)
})
|
#' Create a page to utilise Groundworks grids
#'
#' Add detail here
#'
#' @param ... Elements to include within the page
#' @rdname gwfluidPage
#' @export
gwfluidPage <- function(...){
gwLIBS(...)
}
#' @rdname gwfluidPage
#' @param class additional class definitions to add.
#' @export
gwfluidRow <- function(..., class = NULL){
class <- ifelse(is.null(class), "row", paste("row", class))
div(class = class, ...)
}
#' Create a gwcolumn within a UI definition
#'
#' Create a gwcolumn for use within a \code{\link{gwfluidRow}}
#'
#' @param numerator
#' @param denominator
#' @param ... Elements to include within the column
#' @param class additional class definitions to add.
#' @param gridBreakpoint By default, grid columns collapse into rows at the $small-tablet breakpoint. The grid breakpoint helpers allow you to target the breakpoint that the grid structure should persist through.
#' For more detail see \link{http://groundworkcss.github.io/groundwork/docs/breakpoints.htm}
#'
#' @return A gwcolumn that can be included within a
#' \code{\link{gwfluidRow}}.
#'
#'
#' @seealso \code{\link{gwfluidRow}}.
#'
#' @export
gwcolumn <- function(nominator = '', denominator = c(1:12), ..., class = NULL
, gridBreakpoint = c(NA, "small-tablet", "mobile")){
gridBreakpoint <- match.arg(gridBreakpoint)
class <- ifelse(is.null(class), fractionalClass(as.integer(nominator), as.integer(denominator))
, paste(fractionalClass(as.integer(nominator), as.integer(denominator)), class))
class <- ifelse(is.na(gridBreakpoint), class, paste(class, gridBreakpoint))
div(class = class, ...)
}
|
/R/gw-layout.R
|
no_license
|
arturochian/shinyGWorks
|
R
| false
| false
| 1,636
|
r
|
#' Create a page to utilise Groundworks grids
#'
#' Add detail here
#'
#' @param ... Elements to include within the page
#' @rdname gwfluidPage
#' @export
gwfluidPage <- function(...){
gwLIBS(...)
}
#' @rdname gwfluidPage
#' @param class additional class definitions to add.
#' @export
gwfluidRow <- function(..., class = NULL){
class <- ifelse(is.null(class), "row", paste("row", class))
div(class = class, ...)
}
#' Create a gwcolumn within a UI definition
#'
#' Create a gwcolumn for use within a \code{\link{gwfluidRow}}
#'
#' @param numerator
#' @param denominator
#' @param ... Elements to include within the column
#' @param class additional class definitions to add.
#' @param gridBreakpoint By default, grid columns collapse into rows at the $small-tablet breakpoint. The grid breakpoint helpers allow you to target the breakpoint that the grid structure should persist through.
#' For more detail see \link{http://groundworkcss.github.io/groundwork/docs/breakpoints.htm}
#'
#' @return A gwcolumn that can be included within a
#' \code{\link{gwfluidRow}}.
#'
#'
#' @seealso \code{\link{gwfluidRow}}.
#'
#' @export
gwcolumn <- function(nominator = '', denominator = c(1:12), ..., class = NULL
, gridBreakpoint = c(NA, "small-tablet", "mobile")){
gridBreakpoint <- match.arg(gridBreakpoint)
class <- ifelse(is.null(class), fractionalClass(as.integer(nominator), as.integer(denominator))
, paste(fractionalClass(as.integer(nominator), as.integer(denominator)), class))
class <- ifelse(is.na(gridBreakpoint), class, paste(class, gridBreakpoint))
div(class = class, ...)
}
|
/R_기초프로그래밍_회귀분석/02_데이터형태(170717_18).R
|
permissive
|
heechulk/Data_science_school_with_R
|
R
| false
| false
| 9,465
|
r
| ||
#read in a vector of character strings of parks visited
#output dataset with vector added 1 for visited, 0 for not
agrep_vec_sapply<-function(vec, mdtol, lookup) {
agm<-sapply(vec, FUN=function(x) agrep(x, lookup, max.distance = mdtol), simplify = TRUE)
agml<-sapply(agm, length)
md<-unlist(agm[which(agml==1)])
fx<-which(agml!=1)
match_agrep<-list(matched=md, tofix=fx)
return(match_agrep)
}
match_userlist<-function(visit_vector, mdtol, rtrn) {
trymatch1<-agrep_vec_sapply(vec=visit_vector, mdtol=mdtol, lookup = npnames)
matched<-trymatch1$matched
tofix<-trymatch1$tofix
pairmatch<-cbind(matched, visit_vector[which(!visit_vector%in%visit_vector[tofix])])
if (length(matched) != length(visit_vector)) {
trynp<-paste(visit_vector[tofix], "NP", sep=" ")
trymatch2<-agrep_vec_sapply(vec=trynp, mdtol=mdtol, lookup = npnames)
match2<-trymatch2$matched
pairm2<-cbind(match2, visit_vector[which(!visit_vector%in%visit_vector[trymatch2$tofix])])
trynpsub<-gsub("National Park", "NP", visit_vector)
trynpsub<-gsub("NP and Preserve", "NP & PRES", trynpsub)
trymatch3<-agrep_vec_sapply(vec=trynpsub, mdtol=mdtol, lookup = npnames)
match3<-trymatch3$matched
pairm3<-cbind(match3, visit_vector[which(!visit_vector%in%visit_vector[trymatch3$tofix])])
if (length(match2)==length(tofix)) {
allmatch<-c(matched, match2)
allpair<-rbind(pairmatch, pairm2)
}
else if (length(match3)==length(tofix)) {
allmatch<-c(matched, match3)
allpair<-rbind(pairmatch, pairm3)
}
else {
if(length(match3)>length(match2)){newmatched<-trymatch3}
else {newmatched<-trymatch2}
newtofix<-newmatched$tofix
newpairm<-cbind(newmatched$matched, visit_vector[which(!visit_vector%in%visit_vector[newtofix])])
matchsofar<-c(matched, newmatched$matched)
pairsofar<-rbind(pairmatch, newpairm)
for(i in 1:length(newtofix)){
if(grepl("and", visit_vector[newtofix[i]])){
tosplit<-gsub("National Park", "", visit_vector[newtofix[i]])
splitlst<-strsplit(tosplit, "and")
splitfix<-do.call(c, splitlst)
trymatch4<-agrep_vec_sapply(vec=splitfix, mdtol=mdtol, lookup = npnames)
matchsofar<-c(matchsofar, trymatch4$matched)
pairi<-cbind(trymatch4$matched, rep(visit_vector[newtofix[i]], length(trymatch4$matched)))
pairsofar<-rbind(pairsofar, pairi)
}
else {print("No and in name, not sure what to try")}
}
if(length(unique(matchsofar))>=length(visit_vector)) {
allmatch<-matchsofar
allpair<-pairsofar
}
else {print("Something went wrong here length not correct")}
}
}
else {allmatch<-matched}
if(rtrn=="flag") {
visitflag<-rep(0, length(npnames))
visitflag[allmatch]<-1
return(visitflag)
}
else if(rtrn=="strings") {
return(npnames[allmatch])
}
else if(rtrn=="pairs") {
allpairname<-cbind(allpair, npnames[as.numeric(allpair[,1])])
return(allpairname)
}
}
matchnames_np<-function(find, src){
maxmatch<-min(length(find), length(src))
nmatched<-0
allmatch<-FALSE
frmNP<-gsub("National Park", "", find)
srmNP<-gsub("NP", "", src)
frmP<-gsub("and Preserve", "", frmNP)
srmP<-gsub("& PRES", "", srmNP)
m1<-agrep_vec_sapply(frmP, mdtol=0.1, srmP)
nmatched<-nmatched+length(m1$matched)
matches<-m1$matched
tofix<-m1$tofix
allmatch<-(nmatched==maxmatch)
allpair<-cbind(matches, find[which(!find%in%find[tofix])])
othertrymeths<-1
othertry<-0
while(!allmatch | othertry<othertrymeths){
nm2<-c()
nmt2<-c()
for(i in 1:length(m1$tofix)){
#t2<-grep(frmP[m1$tofix[i]], srmP)
t2<-which(frmP[m1$tofix[i]]==srmP)
if(length(t2)>=1){
tofix<-tofix[-i]
nmp<-cbind(t2, find[which(find%in%find[m1$tofix[i]])])
nmt2<-rbind(nmt2, nmp)
}
nm2<-c(nm2, t2)
othertry<-othertry+1
}
nmatched<-nmatched+length(nm2)
allmatch<-(nmatched==maxmatch)
matches<-c(matches, nm2)
allpair<-rbind(allpair, nmt2)
}
allpairname<-cbind(allpair, src[as.numeric(allpair[,1])])
return(allpairname)
}
|
/parkstrmatch.R
|
no_license
|
sestaszak/FindYourPark
|
R
| false
| false
| 4,182
|
r
|
#read in a vector of character strings of parks visited
#output dataset with vector added 1 for visited, 0 for not
agrep_vec_sapply<-function(vec, mdtol, lookup) {
agm<-sapply(vec, FUN=function(x) agrep(x, lookup, max.distance = mdtol), simplify = TRUE)
agml<-sapply(agm, length)
md<-unlist(agm[which(agml==1)])
fx<-which(agml!=1)
match_agrep<-list(matched=md, tofix=fx)
return(match_agrep)
}
match_userlist<-function(visit_vector, mdtol, rtrn) {
trymatch1<-agrep_vec_sapply(vec=visit_vector, mdtol=mdtol, lookup = npnames)
matched<-trymatch1$matched
tofix<-trymatch1$tofix
pairmatch<-cbind(matched, visit_vector[which(!visit_vector%in%visit_vector[tofix])])
if (length(matched) != length(visit_vector)) {
trynp<-paste(visit_vector[tofix], "NP", sep=" ")
trymatch2<-agrep_vec_sapply(vec=trynp, mdtol=mdtol, lookup = npnames)
match2<-trymatch2$matched
pairm2<-cbind(match2, visit_vector[which(!visit_vector%in%visit_vector[trymatch2$tofix])])
trynpsub<-gsub("National Park", "NP", visit_vector)
trynpsub<-gsub("NP and Preserve", "NP & PRES", trynpsub)
trymatch3<-agrep_vec_sapply(vec=trynpsub, mdtol=mdtol, lookup = npnames)
match3<-trymatch3$matched
pairm3<-cbind(match3, visit_vector[which(!visit_vector%in%visit_vector[trymatch3$tofix])])
if (length(match2)==length(tofix)) {
allmatch<-c(matched, match2)
allpair<-rbind(pairmatch, pairm2)
}
else if (length(match3)==length(tofix)) {
allmatch<-c(matched, match3)
allpair<-rbind(pairmatch, pairm3)
}
else {
if(length(match3)>length(match2)){newmatched<-trymatch3}
else {newmatched<-trymatch2}
newtofix<-newmatched$tofix
newpairm<-cbind(newmatched$matched, visit_vector[which(!visit_vector%in%visit_vector[newtofix])])
matchsofar<-c(matched, newmatched$matched)
pairsofar<-rbind(pairmatch, newpairm)
for(i in 1:length(newtofix)){
if(grepl("and", visit_vector[newtofix[i]])){
tosplit<-gsub("National Park", "", visit_vector[newtofix[i]])
splitlst<-strsplit(tosplit, "and")
splitfix<-do.call(c, splitlst)
trymatch4<-agrep_vec_sapply(vec=splitfix, mdtol=mdtol, lookup = npnames)
matchsofar<-c(matchsofar, trymatch4$matched)
pairi<-cbind(trymatch4$matched, rep(visit_vector[newtofix[i]], length(trymatch4$matched)))
pairsofar<-rbind(pairsofar, pairi)
}
else {print("No and in name, not sure what to try")}
}
if(length(unique(matchsofar))>=length(visit_vector)) {
allmatch<-matchsofar
allpair<-pairsofar
}
else {print("Something went wrong here length not correct")}
}
}
else {allmatch<-matched}
if(rtrn=="flag") {
visitflag<-rep(0, length(npnames))
visitflag[allmatch]<-1
return(visitflag)
}
else if(rtrn=="strings") {
return(npnames[allmatch])
}
else if(rtrn=="pairs") {
allpairname<-cbind(allpair, npnames[as.numeric(allpair[,1])])
return(allpairname)
}
}
matchnames_np<-function(find, src){
maxmatch<-min(length(find), length(src))
nmatched<-0
allmatch<-FALSE
frmNP<-gsub("National Park", "", find)
srmNP<-gsub("NP", "", src)
frmP<-gsub("and Preserve", "", frmNP)
srmP<-gsub("& PRES", "", srmNP)
m1<-agrep_vec_sapply(frmP, mdtol=0.1, srmP)
nmatched<-nmatched+length(m1$matched)
matches<-m1$matched
tofix<-m1$tofix
allmatch<-(nmatched==maxmatch)
allpair<-cbind(matches, find[which(!find%in%find[tofix])])
othertrymeths<-1
othertry<-0
while(!allmatch | othertry<othertrymeths){
nm2<-c()
nmt2<-c()
for(i in 1:length(m1$tofix)){
#t2<-grep(frmP[m1$tofix[i]], srmP)
t2<-which(frmP[m1$tofix[i]]==srmP)
if(length(t2)>=1){
tofix<-tofix[-i]
nmp<-cbind(t2, find[which(find%in%find[m1$tofix[i]])])
nmt2<-rbind(nmt2, nmp)
}
nm2<-c(nm2, t2)
othertry<-othertry+1
}
nmatched<-nmatched+length(nm2)
allmatch<-(nmatched==maxmatch)
matches<-c(matches, nm2)
allpair<-rbind(allpair, nmt2)
}
allpairname<-cbind(allpair, src[as.numeric(allpair[,1])])
return(allpairname)
}
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shinydashboard)
library(mvtnorm)
library(scatterplot3d)
library(ellipse)
library(plot3D)
BOUND1<-5
BOUND2<-5
ui <- dashboardPage(
dashboardHeader(title="InfoF422: Conditional probability", titleWidth = 500),
dashboardSidebar(
sidebarMenu(
sliderInput("N",
"Number of samples:",
min = 1,
max = 1000,
value = 500,step=2),
sliderInput("tdt",
"3D theta:",
min = -60,
max = 60,
value = 0,step=5),
sliderInput("tdp",
"3D phi:",
min = 0,
max = 90,
value = 75,step=1),
## sliderInput("dx","X density:", min = 0.1, max = 0.3, value = 0.15,step=0.01),
## sliderInput("dy", "Y density:", min = 0.1, max = 0.3, value = 0.15,step=0.01),
menuItem("Bivariate gaussian distribution", tabName = "Bivariatemixture", icon = icon("th")),
menuItem("Regression function", tabName = "Regression", icon = icon("th")),
menuItem("About", tabName = "about", icon = icon("question"))
)
),
dashboardBody(
tabItems(
# First tab content
# Second tab content
tabItem(tabName = "Bivariatemixture",
fluidRow(
box(width=4,collapsible = TRUE,sliderInput("rot1","Rotation:", min = -3/2,max = 3/2,
value = -0.75),
sliderInput("ax11","Axis1:",min = 0.01,max = BOUND2,value = 3,step=0.05),
sliderInput("ax21","Axis2:", min = 0.01, max = BOUND2, value = 0.15,step=0.05),
sliderInput("x","x:", min = -BOUND2, max = BOUND2, value = 0.15,step=0.05),
textOutput("textB")),
box(width=8,title = "3D joint density visualization",collapsible = TRUE,plotOutput("biPlotP"))),
fluidRow( box(width=6,collapsible = TRUE,title = "Data",plotOutput("biPlotD")),
box(width=6,collapsible = TRUE,title = "Conditional distribution",plotOutput("biCond")))
), ## tabItem
tabItem(tabName = "Regression",
fluidRow(box(width=4,collapsible = TRUE,
sliderInput("ord","Functions:", min = -3,max = 3,
value = 1,step=1),
sliderInput("sdw","Cond sdev:", min = 0.5,max = 2.5,
value = 1,step=0.1),
sliderInput("rx","x:", min = -BOUND2, max = BOUND2, value = 0.15,step=0.05)),
box(width=6,title = "3D joint density visualization",collapsible = TRUE,plotOutput("regrPlotP"))),## fluidRow
fluidRow( box(width=6,collapsible = TRUE,title = "Data",plotOutput("regrPlotD")),
box(width=6,collapsible = TRUE,title = "Conditional distribution",plotOutput("regrCond")))
),
tabItem(tabName = "about",
fluidPage(
includeHTML("about/about.condpro.html")
)
) ## tabItem
)
)
) # ui
D<-NULL ## Univariate dataset
E<-NULL ## Bivariate eigenvalue matrix
server<-function(input, output,session) {
set.seed(122)
f<-function(x,ord){
f<-numeric(length(x))
if (ord==-1)
f<-sin(x)
if (ord==-2)
f<-cos(2*x)
if (ord==-3)
f<-cos(4*x)
if (ord==1)
f<-x
if (ord==2)
f<-x^2-2
if (ord==3)
f<--x^2+1
f
}
output$biPlotP <- renderPlot({
x <- seq(-BOUND2, BOUND2, by= .2)
y <- x
z<-array(0,dim=c(length(x),length(y)))
#th : rotation angle of the first principal axis
#ax1: length principal axis 1
#ax2: length principal axis 2
ax1<-input$ax11
th=input$rot1
ax2<-input$ax21
Rot<-array(c(cos(th), sin(th), -sin(th), cos(th)),dim=c(2,2)); #rotation matrix
A<-array(c(ax1, 0, 0, ax2),dim=c(2,2))
Sigma<-(Rot%*%A)%*%t(Rot)
E<<-eigen(Sigma)
for (i in 1:length(x)){
for (j in 1:length(y)){
z[i,j]<-dmvnorm(c(x[i],y[j]),sigma=Sigma)
}
}
z[is.na(z)] <- 1
op <- par(bg = "white")
prob.z<-z
surface<-persp3D(x, y, prob.z, theta = input$tdt, phi = input$tdp, expand = 0.5, col = "blue",facets=FALSE)
lines (trans3d(x=input$x, y = seq(-BOUND2, BOUND2, by= .2), z = 0, pmat = surface), col = "red",lwd=3)
})
output$biPlotD <- renderPlot( {
th=input$rot1
Rot<-array(c(cos(th), sin(th), -sin(th), cos(th)),dim=c(2,2)); #rotation matrix
A<-array(c(input$ax11, 0, 0, input$ax21),dim=c(2,2))
Sigma<-(Rot%*%A)%*%t(Rot)
D1=rmvnorm(input$N,sigma=Sigma)
D<<-D1
plot(D[,1],D[,2],xlim=c(-BOUND2,BOUND2),ylim=c(-BOUND2,BOUND2),xlab="x", ylab="y")
lines(ellipse(Sigma))
abline(v=input$x, col = "red",lwd=3)
})
output$biCond <- renderPlot( {
th=input$rot1
Rot<-array(c(cos(th), sin(th), -sin(th), cos(th)),dim=c(2,2)); #rotation matrix
A<-array(c(input$ax11, 0, 0, input$ax21),dim=c(2,2))
Sigma<-(Rot%*%A)%*%t(Rot)
sigma2=sqrt(Sigma[2,2])
sigma1=sqrt(Sigma[1,1])
rho=Sigma[1,2]/(sigma1*sigma2)
x=seq(-1.5*BOUND2, 1.5*BOUND2, by= .02)
plot(x,dnorm(x,rho*sigma2*(input$x)/sigma1,sd=sigma2^2*(1-rho^2)),type="l",col="red",
lwd=2,ylab="Conditional density")
lines(x,dnorm(x,0,sd=sigma2^2))
legend(x=BOUND2,y=1,legend=c("Conditional","Marginal"),lty=1,col=c("red","black"))
})
output$textB <- renderText({
input$rot
input$ax1
input$ax2
paste("Eigen1=", E$values[1], "\n Eigen2=", E$values[2])
})
output$regrPlotP <- renderPlot({
x <- seq(-BOUND2, BOUND2, by= 0.1)
y <- seq(-BOUND2, BOUND2, by= 0.1)
z<-array(0,dim=c(length(x),length(y)))
#th : rotation angle of the first principal axis
#ax1: length principal axis 1
#ax2: length principal axis 2
muy<-f(x,ord=input$ord)
for (i in 1:length(x)){
for (j in 1:length(y)){
z[i,j]<-dnorm(y[j],mean=muy[i],sd=input$sdw)
}
}
z[is.na(z)] <- 1
op <- par(bg = "white")
prob.z<-z
surface<-persp3D(x, y, prob.z, theta = input$tdt, phi =input$tdp, expand = 0.5, col = "blue",facets=FALSE)
lines (trans3D(x=input$rx, y = seq(-BOUND2, BOUND2, by= .2), z = 0, pmat = surface), col = "red",lwd=3)
})
output$regrPlotD <- renderPlot( {
X=seq(-BOUND2, BOUND2,length.out=input$N)
muy=f(X,ord=input$ord)
Y=muy+rnorm(input$N,sd=input$sdw)
D<<-cbind(X,Y)
plot(D[,1],D[,2],xlim=c(-BOUND2,BOUND2),ylim=c(-BOUND2,BOUND2),
xlab="x",ylab="y")
lines(D[,1],muy, lwd=3)
abline(v=input$rx, col = "red",lwd=3)
})
output$regrCond <- renderPlot( {
th=input$rot1
muy=f(input$rx,input$ord)
x=seq(-1.5*BOUND2, 1.5*BOUND2, by= .02)
plot(x,dnorm(x,mean=muy,sd=input$sdw),type="l",col="red",lwd=2,ylab="Conditional density")
})
}
shinyApp(ui, server)
|
/inst/shiny/condpro.R
|
no_license
|
REXCHE/gbcode
|
R
| false
| false
| 7,376
|
r
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shinydashboard)
library(mvtnorm)
library(scatterplot3d)
library(ellipse)
library(plot3D)
BOUND1<-5
BOUND2<-5
ui <- dashboardPage(
dashboardHeader(title="InfoF422: Conditional probability", titleWidth = 500),
dashboardSidebar(
sidebarMenu(
sliderInput("N",
"Number of samples:",
min = 1,
max = 1000,
value = 500,step=2),
sliderInput("tdt",
"3D theta:",
min = -60,
max = 60,
value = 0,step=5),
sliderInput("tdp",
"3D phi:",
min = 0,
max = 90,
value = 75,step=1),
## sliderInput("dx","X density:", min = 0.1, max = 0.3, value = 0.15,step=0.01),
## sliderInput("dy", "Y density:", min = 0.1, max = 0.3, value = 0.15,step=0.01),
menuItem("Bivariate gaussian distribution", tabName = "Bivariatemixture", icon = icon("th")),
menuItem("Regression function", tabName = "Regression", icon = icon("th")),
menuItem("About", tabName = "about", icon = icon("question"))
)
),
dashboardBody(
tabItems(
# First tab content
# Second tab content
tabItem(tabName = "Bivariatemixture",
fluidRow(
box(width=4,collapsible = TRUE,sliderInput("rot1","Rotation:", min = -3/2,max = 3/2,
value = -0.75),
sliderInput("ax11","Axis1:",min = 0.01,max = BOUND2,value = 3,step=0.05),
sliderInput("ax21","Axis2:", min = 0.01, max = BOUND2, value = 0.15,step=0.05),
sliderInput("x","x:", min = -BOUND2, max = BOUND2, value = 0.15,step=0.05),
textOutput("textB")),
box(width=8,title = "3D joint density visualization",collapsible = TRUE,plotOutput("biPlotP"))),
fluidRow( box(width=6,collapsible = TRUE,title = "Data",plotOutput("biPlotD")),
box(width=6,collapsible = TRUE,title = "Conditional distribution",plotOutput("biCond")))
), ## tabItem
tabItem(tabName = "Regression",
fluidRow(box(width=4,collapsible = TRUE,
sliderInput("ord","Functions:", min = -3,max = 3,
value = 1,step=1),
sliderInput("sdw","Cond sdev:", min = 0.5,max = 2.5,
value = 1,step=0.1),
sliderInput("rx","x:", min = -BOUND2, max = BOUND2, value = 0.15,step=0.05)),
box(width=6,title = "3D joint density visualization",collapsible = TRUE,plotOutput("regrPlotP"))),## fluidRow
fluidRow( box(width=6,collapsible = TRUE,title = "Data",plotOutput("regrPlotD")),
box(width=6,collapsible = TRUE,title = "Conditional distribution",plotOutput("regrCond")))
),
tabItem(tabName = "about",
fluidPage(
includeHTML("about/about.condpro.html")
)
) ## tabItem
)
)
) # ui
D<-NULL ## Univariate dataset
E<-NULL ## Bivariate eigenvalue matrix
server<-function(input, output,session) {
set.seed(122)
f<-function(x,ord){
f<-numeric(length(x))
if (ord==-1)
f<-sin(x)
if (ord==-2)
f<-cos(2*x)
if (ord==-3)
f<-cos(4*x)
if (ord==1)
f<-x
if (ord==2)
f<-x^2-2
if (ord==3)
f<--x^2+1
f
}
output$biPlotP <- renderPlot({
x <- seq(-BOUND2, BOUND2, by= .2)
y <- x
z<-array(0,dim=c(length(x),length(y)))
#th : rotation angle of the first principal axis
#ax1: length principal axis 1
#ax2: length principal axis 2
ax1<-input$ax11
th=input$rot1
ax2<-input$ax21
Rot<-array(c(cos(th), sin(th), -sin(th), cos(th)),dim=c(2,2)); #rotation matrix
A<-array(c(ax1, 0, 0, ax2),dim=c(2,2))
Sigma<-(Rot%*%A)%*%t(Rot)
E<<-eigen(Sigma)
for (i in 1:length(x)){
for (j in 1:length(y)){
z[i,j]<-dmvnorm(c(x[i],y[j]),sigma=Sigma)
}
}
z[is.na(z)] <- 1
op <- par(bg = "white")
prob.z<-z
surface<-persp3D(x, y, prob.z, theta = input$tdt, phi = input$tdp, expand = 0.5, col = "blue",facets=FALSE)
lines (trans3d(x=input$x, y = seq(-BOUND2, BOUND2, by= .2), z = 0, pmat = surface), col = "red",lwd=3)
})
output$biPlotD <- renderPlot( {
th=input$rot1
Rot<-array(c(cos(th), sin(th), -sin(th), cos(th)),dim=c(2,2)); #rotation matrix
A<-array(c(input$ax11, 0, 0, input$ax21),dim=c(2,2))
Sigma<-(Rot%*%A)%*%t(Rot)
D1=rmvnorm(input$N,sigma=Sigma)
D<<-D1
plot(D[,1],D[,2],xlim=c(-BOUND2,BOUND2),ylim=c(-BOUND2,BOUND2),xlab="x", ylab="y")
lines(ellipse(Sigma))
abline(v=input$x, col = "red",lwd=3)
})
output$biCond <- renderPlot( {
th=input$rot1
Rot<-array(c(cos(th), sin(th), -sin(th), cos(th)),dim=c(2,2)); #rotation matrix
A<-array(c(input$ax11, 0, 0, input$ax21),dim=c(2,2))
Sigma<-(Rot%*%A)%*%t(Rot)
sigma2=sqrt(Sigma[2,2])
sigma1=sqrt(Sigma[1,1])
rho=Sigma[1,2]/(sigma1*sigma2)
x=seq(-1.5*BOUND2, 1.5*BOUND2, by= .02)
plot(x,dnorm(x,rho*sigma2*(input$x)/sigma1,sd=sigma2^2*(1-rho^2)),type="l",col="red",
lwd=2,ylab="Conditional density")
lines(x,dnorm(x,0,sd=sigma2^2))
legend(x=BOUND2,y=1,legend=c("Conditional","Marginal"),lty=1,col=c("red","black"))
})
output$textB <- renderText({
input$rot
input$ax1
input$ax2
paste("Eigen1=", E$values[1], "\n Eigen2=", E$values[2])
})
output$regrPlotP <- renderPlot({
x <- seq(-BOUND2, BOUND2, by= 0.1)
y <- seq(-BOUND2, BOUND2, by= 0.1)
z<-array(0,dim=c(length(x),length(y)))
#th : rotation angle of the first principal axis
#ax1: length principal axis 1
#ax2: length principal axis 2
muy<-f(x,ord=input$ord)
for (i in 1:length(x)){
for (j in 1:length(y)){
z[i,j]<-dnorm(y[j],mean=muy[i],sd=input$sdw)
}
}
z[is.na(z)] <- 1
op <- par(bg = "white")
prob.z<-z
surface<-persp3D(x, y, prob.z, theta = input$tdt, phi =input$tdp, expand = 0.5, col = "blue",facets=FALSE)
lines (trans3D(x=input$rx, y = seq(-BOUND2, BOUND2, by= .2), z = 0, pmat = surface), col = "red",lwd=3)
})
output$regrPlotD <- renderPlot( {
X=seq(-BOUND2, BOUND2,length.out=input$N)
muy=f(X,ord=input$ord)
Y=muy+rnorm(input$N,sd=input$sdw)
D<<-cbind(X,Y)
plot(D[,1],D[,2],xlim=c(-BOUND2,BOUND2),ylim=c(-BOUND2,BOUND2),
xlab="x",ylab="y")
lines(D[,1],muy, lwd=3)
abline(v=input$rx, col = "red",lwd=3)
})
output$regrCond <- renderPlot( {
th=input$rot1
muy=f(input$rx,input$ord)
x=seq(-1.5*BOUND2, 1.5*BOUND2, by= .02)
plot(x,dnorm(x,mean=muy,sd=input$sdw),type="l",col="red",lwd=2,ylab="Conditional density")
})
}
shinyApp(ui, server)
|
###############################
# #
# TELSTRA TELECOMMUNICATIONS #
# #
###############################
# ASSUMPTION: The required data files are downloaded from competition site and made available locally.
# COMPETITION SITE URL: https://www.kaggle.com/c/telstra-recruiting-network/
# Perform house-keeping
rm(list=ls())
gc()
# Set working directory
setwd("C:/home/kaggle/Telstra")
# Load required Packages
library(reshape2)
library(methods)
library(caret)
library(randomForest)
# Set seed for reproducibility
set.seed(220469)
# Load Train dataset and rearrange 'Location' feature
len <- nchar('location ')
train <- read.csv('train.csv',header=TRUE,stringsAsFactors = F)
train$location <- substr(train$location, len + 1, len + 4)
train$location <- as.numeric(gsub(' ', '', train$location))
# Load Test dataset and rearrange 'Location' feature
test <- read.csv('test.csv',header=TRUE,stringsAsFactors = F) # 11171 x 2
test$location <- substr(test$location, len + 1, len + 4)
test$location <- as.numeric(gsub(' ', '', test$location))
# Load and expand 'Resource Type' feature
len <- nchar('resource_type ')
resource <- read.csv('resource_type.csv', header=TRUE, stringsAsFactors = F)
resource$value <- as.numeric(substr(resource$resource_type, len + 1, len + 2))
resource$resource_type <- gsub(' ', '.', resource$resource_type)
resource_df <- dcast(resource, id ~ resource_type, value.var = "value")
# Replace NAs to -1 in 'Resource Type' feature
resource_df[is.na(resource_df)] <- 0
# Load and expand 'Event Type' feature
len <- nchar('event_type ')
event <- read.csv('event_type.csv', header=TRUE, stringsAsFactors = F)
event$value <- as.numeric(substr(event$event_type, len + 1, len + 2))
event$event_type <- gsub(' ', '.', event$event_type)
event_df <- dcast(event, id ~ event_type, value.var = "value")
# Replace NAs to -1 in 'EventType' feature
event_df[is.na(event_df)] <- 0
# Load and expand 'Severity Type' feature
len <- nchar('severity_type ')
severity <- read.csv('severity_type.csv', header=TRUE, stringsAsFactors = F) # 18552 x 2
severity$value <- as.numeric(substr(severity$severity_type, len + 1, len + 2))
severity$severity_type <- gsub(' ', '.', severity$severity_type)
severity_df <- dcast(severity, id ~ severity_type, value.var = "value") # 18552 x 6
# Replace NAs to -1 in 'EventType' feature
severity_df[is.na(severity_df)] <- 0
# Merge Train data with 'Resource Type', 'Event Type', 'Severity' data
train_res <- merge(train, resource_df, by="id")
train_res_evn <- merge(train_res, event_df, by="id")
train_res_evn_sev <- merge(train_res_evn, severity_df, by="id")
trainold <- train
train <- train_res_evn_sev
rm(train_res_evn_sev)
# Merge Test data with 'Resource Type', 'Event Type', 'Severity' data
test_res <- merge(test, resource_df, by="id")
test_res_evn <- merge(test_res, event_df, by="id")
test_res_evn_sev <- merge(test_res_evn, severity_df, by="id")
testold <- test
test <- test_res_evn_sev
rm(test_res_evn_sev)
# Remove ID fields
testid <- test$id
train$id <- NULL
test$id <- NULL
# Remove unused 'Event Type' features
train$event_type.17 <- NULL
train$event_type.33 <- NULL
train$event_type.4 <- NULL
train$event_type.48 <- NULL
train$event_type.52 <- NULL
test$event_type.17 <- NULL
test$event_type.33 <- NULL
test$event_type.4 <- NULL
test$event_type.48 <- NULL
test$event_type.52 <- NULL
# Extract Target from train dataset
trainpredictor <- as.factor(train$fault_severity)
# Change Target from Numeric to Alphanumeric - X0, X1, X2
levels(trainpredictor) <- make.names(levels(factor(trainpredictor)))
# Remove Target from Train dataset it got extracted already
train$fault_severity <- NULL
# Create 'Train Control' object
traincontrol <- trainControl(method = 'repeatedcv'
, number = 10 # No of folds for Cross-validation
, repeats = 8 # How many times want to repeat the folds
, verboseIter = FALSE
, returnResamp = 'all' # Performance measures to save
, classProbs = TRUE # For multi-class probability
)
# Create 'Tune Grid' Object
tunegrid <- expand.grid(mtry = 5
, shrinkage = 0.01
)
# Buid Random Forest Model
rfModel <- randomForest(x = train
, y = trainpredictor
, ntree = 1200
, replace = FALSE
, do.trace = TRUE
, trControl = traincontrol
, tuneGrid = tunegrid
, importance = TRUE
)
# Predict probabilities for Test dataset
ypred <- predict(rfModel, test, type="prob")
# Convert predicted probabilities to Decimal from Scientific notation
ypred <- format(ypred, scientific=FALSE)
# Create Submission file
colnames(ypred) <- paste("predict_", c(0, 1, 2), sep="")
resp <- data.frame(id = testid, ypred)
write.csv(resp, 'Submission-3-RandomForest.csv', row.names=F, quote = F)
|
/Telstra/3-RandomForest.R
|
no_license
|
socratesk/kaggle
|
R
| false
| false
| 4,965
|
r
|
###############################
# #
# TELSTRA TELECOMMUNICATIONS #
# #
###############################
# ASSUMPTION: The required data files are downloaded from competition site and made available locally.
# COMPETITION SITE URL: https://www.kaggle.com/c/telstra-recruiting-network/
# Perform house-keeping
rm(list=ls())
gc()
# Set working directory
setwd("C:/home/kaggle/Telstra")
# Load required Packages
library(reshape2)
library(methods)
library(caret)
library(randomForest)
# Set seed for reproducibility
set.seed(220469)
# Load Train dataset and rearrange 'Location' feature
len <- nchar('location ')
train <- read.csv('train.csv',header=TRUE,stringsAsFactors = F)
train$location <- substr(train$location, len + 1, len + 4)
train$location <- as.numeric(gsub(' ', '', train$location))
# Load Test dataset and rearrange 'Location' feature
test <- read.csv('test.csv',header=TRUE,stringsAsFactors = F) # 11171 x 2
test$location <- substr(test$location, len + 1, len + 4)
test$location <- as.numeric(gsub(' ', '', test$location))
# Load and expand 'Resource Type' feature
len <- nchar('resource_type ')
resource <- read.csv('resource_type.csv', header=TRUE, stringsAsFactors = F)
resource$value <- as.numeric(substr(resource$resource_type, len + 1, len + 2))
resource$resource_type <- gsub(' ', '.', resource$resource_type)
resource_df <- dcast(resource, id ~ resource_type, value.var = "value")
# Replace NAs to -1 in 'Resource Type' feature
resource_df[is.na(resource_df)] <- 0
# Load and expand 'Event Type' feature
len <- nchar('event_type ')
event <- read.csv('event_type.csv', header=TRUE, stringsAsFactors = F)
event$value <- as.numeric(substr(event$event_type, len + 1, len + 2))
event$event_type <- gsub(' ', '.', event$event_type)
event_df <- dcast(event, id ~ event_type, value.var = "value")
# Replace NAs to -1 in 'EventType' feature
event_df[is.na(event_df)] <- 0
# Load and expand 'Severity Type' feature
len <- nchar('severity_type ')
severity <- read.csv('severity_type.csv', header=TRUE, stringsAsFactors = F) # 18552 x 2
severity$value <- as.numeric(substr(severity$severity_type, len + 1, len + 2))
severity$severity_type <- gsub(' ', '.', severity$severity_type)
severity_df <- dcast(severity, id ~ severity_type, value.var = "value") # 18552 x 6
# Replace NAs to -1 in 'EventType' feature
severity_df[is.na(severity_df)] <- 0
# Merge Train data with 'Resource Type', 'Event Type', 'Severity' data
train_res <- merge(train, resource_df, by="id")
train_res_evn <- merge(train_res, event_df, by="id")
train_res_evn_sev <- merge(train_res_evn, severity_df, by="id")
trainold <- train
train <- train_res_evn_sev
rm(train_res_evn_sev)
# Merge Test data with 'Resource Type', 'Event Type', 'Severity' data
test_res <- merge(test, resource_df, by="id")
test_res_evn <- merge(test_res, event_df, by="id")
test_res_evn_sev <- merge(test_res_evn, severity_df, by="id")
testold <- test
test <- test_res_evn_sev
rm(test_res_evn_sev)
# Remove ID fields
testid <- test$id
train$id <- NULL
test$id <- NULL
# Remove unused 'Event Type' features
train$event_type.17 <- NULL
train$event_type.33 <- NULL
train$event_type.4 <- NULL
train$event_type.48 <- NULL
train$event_type.52 <- NULL
test$event_type.17 <- NULL
test$event_type.33 <- NULL
test$event_type.4 <- NULL
test$event_type.48 <- NULL
test$event_type.52 <- NULL
# Extract Target from train dataset
trainpredictor <- as.factor(train$fault_severity)
# Change Target from Numeric to Alphanumeric - X0, X1, X2
levels(trainpredictor) <- make.names(levels(factor(trainpredictor)))
# Remove Target from Train dataset it got extracted already
train$fault_severity <- NULL
# Create 'Train Control' object
traincontrol <- trainControl(method = 'repeatedcv'
, number = 10 # No of folds for Cross-validation
, repeats = 8 # How many times want to repeat the folds
, verboseIter = FALSE
, returnResamp = 'all' # Performance measures to save
, classProbs = TRUE # For multi-class probability
)
# Create 'Tune Grid' Object
tunegrid <- expand.grid(mtry = 5
, shrinkage = 0.01
)
# Buid Random Forest Model
rfModel <- randomForest(x = train
, y = trainpredictor
, ntree = 1200
, replace = FALSE
, do.trace = TRUE
, trControl = traincontrol
, tuneGrid = tunegrid
, importance = TRUE
)
# Predict probabilities for Test dataset
ypred <- predict(rfModel, test, type="prob")
# Convert predicted probabilities to Decimal from Scientific notation
ypred <- format(ypred, scientific=FALSE)
# Create Submission file
colnames(ypred) <- paste("predict_", c(0, 1, 2), sep="")
resp <- data.frame(id = testid, ypred)
write.csv(resp, 'Submission-3-RandomForest.csv', row.names=F, quote = F)
|
library(tidyverse)
library(gapminder)
# paired programming
# write the gapminder data set to a CSV
write_csv(gapminder, './gapminder.csv') # the dot adds the current working directory! So tidy
View(gapminder) # look at the data set like a spreadsheet
gapminder_sum <- gapminder %>%
group_by(continent) %>%
summarize(ave_lifeExp = mean(lifeExp))
View(gapminder_sum)
# exporting CSV files helps in data sharing between software
# also -- incorporate version control into the whole thing (git)
# plot
gapminder_sum %>%
ggplot(aes(x = continent, y = ave_lifeExp)) +
geom_point() +
theme_bw()
# bad practices
# using 'remove(list = (ls))' removes all your objects but doesn't detach any packages
# don't use remove(list = (ls))
# don't use "setwd()"
#####
# package "here"
# install.packages("here")
library(here)
# instead of copying your directory and switching all back slashes to forward slashes, use here
# write_csv(here::here(????))
# sets a working directory without slash preference...
# I don't understand why you would use this... just keep all work in one project folder?
# grabbing URL
# URL for greatest givers (XLS file from cloud):
# define URL as an object
data_url <- "http://gattonweb.uky.edu/sheather/book/docs/datasets/GreatestGivers.xls"
# read_csv can directly import URLs
# read_excel requires that ou first download the file
# set file name
file_name <- basename(data_url)
# this is the "old" way...
#download.file(url = data_url, destfile = paste('./cm011-datasets/', file_name))
# this is the "new" way...
#download.file(url = data_url, destfile = here::here("cm011-datasets", file_name))
### --- these don't work because the file was corrupted for Windows users :(
### --- download the file normally and save to folder
# read it in?
library(readxl)
philanthropists <- read_excel((here::here("cm011-datasets", file_name, trim_ws = TRUE)))
# trim white space makes numbers numbers
# may not trim leading white space... although it's supposed to
## -------- reading in XLS
mri_file <- here::here("cm011-datasets", "Firas-MRI.xls")
mri <- read_excel(mri_file)
head(mri)
mri <- read_excel(mri_file, range = "A1:L12")
View(mri)
# end. of. class.
# this lecture was very confusing to me. stressful. byyyyyeeee
|
/cm011.R
|
no_license
|
HJMcSorley/STAT545-participation
|
R
| false
| false
| 2,272
|
r
|
library(tidyverse)
library(gapminder)
# paired programming
# write the gapminder data set to a CSV
write_csv(gapminder, './gapminder.csv') # the dot adds the current working directory! So tidy
View(gapminder) # look at the data set like a spreadsheet
gapminder_sum <- gapminder %>%
group_by(continent) %>%
summarize(ave_lifeExp = mean(lifeExp))
View(gapminder_sum)
# exporting CSV files helps in data sharing between software
# also -- incorporate version control into the whole thing (git)
# plot
gapminder_sum %>%
ggplot(aes(x = continent, y = ave_lifeExp)) +
geom_point() +
theme_bw()
# bad practices
# using 'remove(list = (ls))' removes all your objects but doesn't detach any packages
# don't use remove(list = (ls))
# don't use "setwd()"
#####
# package "here"
# install.packages("here")
library(here)
# instead of copying your directory and switching all back slashes to forward slashes, use here
# write_csv(here::here(????))
# sets a working directory without slash preference...
# I don't understand why you would use this... just keep all work in one project folder?
# grabbing URL
# URL for greatest givers (XLS file from cloud):
# define URL as an object
data_url <- "http://gattonweb.uky.edu/sheather/book/docs/datasets/GreatestGivers.xls"
# read_csv can directly import URLs
# read_excel requires that ou first download the file
# set file name
file_name <- basename(data_url)
# this is the "old" way...
#download.file(url = data_url, destfile = paste('./cm011-datasets/', file_name))
# this is the "new" way...
#download.file(url = data_url, destfile = here::here("cm011-datasets", file_name))
### --- these don't work because the file was corrupted for Windows users :(
### --- download the file normally and save to folder
# read it in?
library(readxl)
philanthropists <- read_excel((here::here("cm011-datasets", file_name, trim_ws = TRUE)))
# trim white space makes numbers numbers
# may not trim leading white space... although it's supposed to
## -------- reading in XLS
mri_file <- here::here("cm011-datasets", "Firas-MRI.xls")
mri <- read_excel(mri_file)
head(mri)
mri <- read_excel(mri_file, range = "A1:L12")
View(mri)
# end. of. class.
# this lecture was very confusing to me. stressful. byyyyyeeee
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(countrycode)
library(plotly)
library(dplyr)
library(tidyr)
source("PopData.R")
server <- function(input, output) {
regiondf <- reactive({
reg <- countrycode_data[countrycode_data$region == input$region, ]
fips <- reg$fips104
sub <- full %>%
filter(FIPS %in% fips) %>%
rename(Male = E0_M, Female = E0_F) %>%
arrange(Female)
sub
})
output$dumbbell <- renderPlotly({
regiondf() %>%
gather(Sex, value, Male, Female) %>%
plot_ly(x = value, y = NAME, mode = 'lines',
group = NAME, showlegend = FALSE, line = list(color = 'gray'),
hovermode = FALSE, hoverinfo = 'none') %>%
add_trace(x = value, y = NAME, color = Sex, mode = 'markers',
colors = c('darkred', 'navy'), marker = list(size = 10)) %>%
layout(xaxis = list(title = 'Life expectancy at birth'),
yaxis = list(title = ''),
margin = list(l = 120))
})
}
|
/server.R
|
no_license
|
ez3804/Life_Expectancy
|
R
| false
| false
| 1,631
|
r
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(countrycode)
library(plotly)
library(dplyr)
library(tidyr)
source("PopData.R")
server <- function(input, output) {
regiondf <- reactive({
reg <- countrycode_data[countrycode_data$region == input$region, ]
fips <- reg$fips104
sub <- full %>%
filter(FIPS %in% fips) %>%
rename(Male = E0_M, Female = E0_F) %>%
arrange(Female)
sub
})
output$dumbbell <- renderPlotly({
regiondf() %>%
gather(Sex, value, Male, Female) %>%
plot_ly(x = value, y = NAME, mode = 'lines',
group = NAME, showlegend = FALSE, line = list(color = 'gray'),
hovermode = FALSE, hoverinfo = 'none') %>%
add_trace(x = value, y = NAME, color = Sex, mode = 'markers',
colors = c('darkred', 'navy'), marker = list(size = 10)) %>%
layout(xaxis = list(title = 'Life expectancy at birth'),
yaxis = list(title = ''),
margin = list(l = 120))
})
}
|
args <- commandArgs(TRUE)
library("methylKit")
saveRDS(args, "~/arg.RDS")
output <- args[1]
treatment <- args[2]
mincov <- args[3]
input <- args[4:length(args)]
print(output)
print(class(input))
#-------------------------------------------------------------------
#' Combine multiple methylRaw objects into a methylRawList object
#'
#' @param list.of.methylRaw a list of methylRaw objects from the methylKit package
#' @param treatment a numeric vector indicating treaments
#' @param min.cov a number indicating a minimum coverage
combine2methylRawList <- function(list.of.methylRaw, treatment, min.cov) {
## check if treatment has same length as number of inputs
if(length(list.of.methylRaw)!=length(treatment))
stop("Treatment vector doesnt have the same length as list of methylRaw objects.")
## check if input is really of type methylRaw
if(!all(sapply(list.of.methylRaw, function(x) class(x)=="methylRaw")))
stop("Input objects are not methylRaw objects.")
## remove data beyond min coverage
list.of.methylRaw.mincov = lapply(1:length(list.of.methylRaw), function(i){
#which.gtmincov = which( getData(rds.objs[[i]])$coverage >= min.cov )
which.gtmincov = which( rds.objs[[i]][[5]] >= min.cov )
rds.objs[[i]][which.gtmincov,]
})
## merge
mrl <- new("methylRawList", list.of.methylRaw.mincov)
mrl@treatment <- treatment
mrl
}
stich_list_of_methylRaw2methylRaw = function(list_methylRaw,
patientid,
cores=20){
require(methylKit)
require(data.table)
a=mclapply(rdsfiles, readRDS, mc.cores=cores)
my.df.list = mclapply(a, function(x) data.table(getData(x)), mc.cores=cores)
# adata = do.call("rbind", my.df.list) # too slow, use data.table instead
dt <- rbindlist(my.df.list)
dt.ordered=dt[order(chr,start,decreasing=FALSE),]
obj=new("methylRaw", dt.ordered,
sample.id=patientid,
assembly='hg19', ########### TODO:
context='CpG',
resolution='base')
return( obj )
}
rds.objs = lapply(input, readRDS)
methRawList.obj = stich_list_of_methylRaw2methylRaw(rds.objs, treatment, mincov)
# TODOOOOOOOOOOO
saveRDS(methRawList.obj, file=output)
|
/Scripts/methCall_merge.R
|
no_license
|
katwre/makeWGBSnake
|
R
| false
| false
| 2,295
|
r
|
args <- commandArgs(TRUE)
library("methylKit")
saveRDS(args, "~/arg.RDS")
output <- args[1]
treatment <- args[2]
mincov <- args[3]
input <- args[4:length(args)]
print(output)
print(class(input))
#-------------------------------------------------------------------
#' Combine multiple methylRaw objects into a methylRawList object
#'
#' @param list.of.methylRaw a list of methylRaw objects from the methylKit package
#' @param treatment a numeric vector indicating treaments
#' @param min.cov a number indicating a minimum coverage
combine2methylRawList <- function(list.of.methylRaw, treatment, min.cov) {
## check if treatment has same length as number of inputs
if(length(list.of.methylRaw)!=length(treatment))
stop("Treatment vector doesnt have the same length as list of methylRaw objects.")
## check if input is really of type methylRaw
if(!all(sapply(list.of.methylRaw, function(x) class(x)=="methylRaw")))
stop("Input objects are not methylRaw objects.")
## remove data beyond min coverage
list.of.methylRaw.mincov = lapply(1:length(list.of.methylRaw), function(i){
#which.gtmincov = which( getData(rds.objs[[i]])$coverage >= min.cov )
which.gtmincov = which( rds.objs[[i]][[5]] >= min.cov )
rds.objs[[i]][which.gtmincov,]
})
## merge
mrl <- new("methylRawList", list.of.methylRaw.mincov)
mrl@treatment <- treatment
mrl
}
stich_list_of_methylRaw2methylRaw = function(list_methylRaw,
patientid,
cores=20){
require(methylKit)
require(data.table)
a=mclapply(rdsfiles, readRDS, mc.cores=cores)
my.df.list = mclapply(a, function(x) data.table(getData(x)), mc.cores=cores)
# adata = do.call("rbind", my.df.list) # too slow, use data.table instead
dt <- rbindlist(my.df.list)
dt.ordered=dt[order(chr,start,decreasing=FALSE),]
obj=new("methylRaw", dt.ordered,
sample.id=patientid,
assembly='hg19', ########### TODO:
context='CpG',
resolution='base')
return( obj )
}
rds.objs = lapply(input, readRDS)
methRawList.obj = stich_list_of_methylRaw2methylRaw(rds.objs, treatment, mincov)
# TODOOOOOOOOOOO
saveRDS(methRawList.obj, file=output)
|
#visualize reversal
library(pheatmap)
library(dplyr)
library("gplots")
library("ggplot2")
library("RColorBrewer")
lincs_drug_prediction = read.csv(paste(outputFolder, "/all_lincs_score.csv", sep=""),stringsAsFactors = F)
dz_gene_ids = read.csv(paste(outputFolder, "/dz_sig_used.csv", sep=""),stringsAsFactors = F)
dz_signature = read.csv(paste0(outputFolder,'dz_signature.csv'),stringsAsFactors = F)
dz_signature = dz_signature %>% filter(gene %in% dz_gene_ids$gene)
dz_sig = dz_signature %>% select(gene,log2FoldChange)
#drugs to visualize
sRGES = read.csv(paste(outputFolder, "/sRGES.csv", sep=""))
#choose top common drugs
top_drugs = as.character(unique(sRGES$pert_iname[sRGES$n > 1 & !sRGES$pert_iname %in% sRGES$pert_iname[grep("BRD-|SA-", sRGES$pert_iname)]][1:20]))
##########
#visualized gene reversed
#only pick the signatures from close to the median
#cell_lines = read.csv(paste("../table/", cancer, "_cell_lines.csv", sep=""))
#lincs_drug_prediction_subset = subset(lincs_drug_prediction, cell_id %in% cell_lines$LINCS, select=c("id", "RGES", "pert_iname", "pert_dose", "pert_time"))
lincs_drug_prediction$RGES = lincs_drug_prediction$cmap_score
lincs_drug_prediction_subset = lincs_drug_prediction[lincs_drug_prediction$pert_iname %in% top_drugs,]
###selecting median still sounds weird... let's keep all signatures
drug_cmap_score = aggregate(RGES ~ pert_iname, lincs_drug_prediction_subset, median)
drug_instances_median = merge(lincs_drug_prediction_subset, drug_cmap_score, by = c("pert_iname"))
drug_instances_median$diff = abs(drug_instances_median$RGES.x - drug_instances_median$RGES.y) #cmap_score.y is the median
drug_instances_min_diff = aggregate(diff ~ pert_iname, drug_instances_median, min)
drug_instances_select = merge(drug_instances_median, drug_instances_min_diff, by=c("pert_iname", "diff"))
drug_instances_select = drug_instances_select[!duplicated(drug_instances_select$pert_iname), ]
sig_id_selects = drug_instances_select$id
#sig_id_selects = lincs_drug_prediction_subset$id
landmark = 1
if (landmark == 1){
load(paste0(pipelineDataFolder,"LINCS_RGES/lincs_signatures_cmpd_landmark_symbol.RData"))
}else{
load(paste0(pipelineDataFolder,"LINCS_RGES/lincs_signatures_cmpd_landmark_GSE92742.RData"))
}
drug_dz_signature = merge(dz_sig, data.frame(gene = rownames(lincs_signatures),
lincs_signatures[, as.character(sig_id_selects)]), by="gene", suffixes='')
#########################
###
#visualize the reversed gene expression
#reorder drug dz signatures
gene_ids = drug_dz_signature$gene
drug_dz_signature_rank = drug_dz_signature[,-1]
for (i in 1:ncol(drug_dz_signature_rank)){
drug_dz_signature_rank[,i] = rank(-1 * drug_dz_signature_rank[,i] ) #highly expressed genes ranked on the top
}
gene_ids_rank <- gene_ids[order(drug_dz_signature_rank[,1])]
drug_dz_signature_rank <- drug_dz_signature_rank[order(drug_dz_signature_rank[,1]),] #order by disease expression
col_sorted = sort(cor(drug_dz_signature_rank, method="spearman")["log2FoldChange",-1])
drug_dz_signature_rank = drug_dz_signature_rank[,c("log2FoldChange", names(col_sorted))]
drug_names = sapply(2:ncol(drug_dz_signature_rank), function(id){
lincs_drug_prediction$pert_iname[paste("X",lincs_drug_prediction$id, sep="") == names(drug_dz_signature_rank)[id]][1]
})
dz = 'case'
pdf(paste(outputFolder, "/lincs_reverse_expression.pdf", sep=""))
#colPal <- bluered(100)
colPal = rev(colorRampPalette(brewer.pal(10, "RdYlBu"))(256))
par(mar=c(13, 6, 2, 0.5))
axiscolor = sapply(c(dz, as.character(drug_names)), function(name){
if (name == dz){
"black"
}else if (name %in% ""){
"black"
}else{
"black"
}
})
image(t(drug_dz_signature_rank), col=colPal, axes=F, srt=45)
axis(1, at= seq(0,1,length.out=ncol( drug_dz_signature_rank ) ), labels= FALSE)
text(x = seq(0,1,length.out=ncol( drug_dz_signature_rank ) ), c(-0.05),
labels = c( dz,as.character(drug_names)),col=axiscolor, srt = 45, pos=2,offset=0.05, xpd = TRUE, cex=0.6)
dev.off()
|
/Code/deprecatedfor_v0.1/visualize_drug_hits.R
|
no_license
|
metabdel/octad_desktop
|
R
| false
| false
| 3,987
|
r
|
#visualize reversal
library(pheatmap)
library(dplyr)
library("gplots")
library("ggplot2")
library("RColorBrewer")
lincs_drug_prediction = read.csv(paste(outputFolder, "/all_lincs_score.csv", sep=""),stringsAsFactors = F)
dz_gene_ids = read.csv(paste(outputFolder, "/dz_sig_used.csv", sep=""),stringsAsFactors = F)
dz_signature = read.csv(paste0(outputFolder,'dz_signature.csv'),stringsAsFactors = F)
dz_signature = dz_signature %>% filter(gene %in% dz_gene_ids$gene)
dz_sig = dz_signature %>% select(gene,log2FoldChange)
#drugs to visualize
sRGES = read.csv(paste(outputFolder, "/sRGES.csv", sep=""))
#choose top common drugs
top_drugs = as.character(unique(sRGES$pert_iname[sRGES$n > 1 & !sRGES$pert_iname %in% sRGES$pert_iname[grep("BRD-|SA-", sRGES$pert_iname)]][1:20]))
##########
#visualized gene reversed
#only pick the signatures from close to the median
#cell_lines = read.csv(paste("../table/", cancer, "_cell_lines.csv", sep=""))
#lincs_drug_prediction_subset = subset(lincs_drug_prediction, cell_id %in% cell_lines$LINCS, select=c("id", "RGES", "pert_iname", "pert_dose", "pert_time"))
lincs_drug_prediction$RGES = lincs_drug_prediction$cmap_score
lincs_drug_prediction_subset = lincs_drug_prediction[lincs_drug_prediction$pert_iname %in% top_drugs,]
###selecting median still sounds weird... let's keep all signatures
drug_cmap_score = aggregate(RGES ~ pert_iname, lincs_drug_prediction_subset, median)
drug_instances_median = merge(lincs_drug_prediction_subset, drug_cmap_score, by = c("pert_iname"))
drug_instances_median$diff = abs(drug_instances_median$RGES.x - drug_instances_median$RGES.y) #cmap_score.y is the median
drug_instances_min_diff = aggregate(diff ~ pert_iname, drug_instances_median, min)
drug_instances_select = merge(drug_instances_median, drug_instances_min_diff, by=c("pert_iname", "diff"))
drug_instances_select = drug_instances_select[!duplicated(drug_instances_select$pert_iname), ]
sig_id_selects = drug_instances_select$id
#sig_id_selects = lincs_drug_prediction_subset$id
landmark = 1
if (landmark == 1){
load(paste0(pipelineDataFolder,"LINCS_RGES/lincs_signatures_cmpd_landmark_symbol.RData"))
}else{
load(paste0(pipelineDataFolder,"LINCS_RGES/lincs_signatures_cmpd_landmark_GSE92742.RData"))
}
drug_dz_signature = merge(dz_sig, data.frame(gene = rownames(lincs_signatures),
lincs_signatures[, as.character(sig_id_selects)]), by="gene", suffixes='')
#########################
###
#visualize the reversed gene expression
#reorder drug dz signatures
gene_ids = drug_dz_signature$gene
drug_dz_signature_rank = drug_dz_signature[,-1]
for (i in 1:ncol(drug_dz_signature_rank)){
drug_dz_signature_rank[,i] = rank(-1 * drug_dz_signature_rank[,i] ) #highly expressed genes ranked on the top
}
gene_ids_rank <- gene_ids[order(drug_dz_signature_rank[,1])]
drug_dz_signature_rank <- drug_dz_signature_rank[order(drug_dz_signature_rank[,1]),] #order by disease expression
col_sorted = sort(cor(drug_dz_signature_rank, method="spearman")["log2FoldChange",-1])
drug_dz_signature_rank = drug_dz_signature_rank[,c("log2FoldChange", names(col_sorted))]
drug_names = sapply(2:ncol(drug_dz_signature_rank), function(id){
lincs_drug_prediction$pert_iname[paste("X",lincs_drug_prediction$id, sep="") == names(drug_dz_signature_rank)[id]][1]
})
dz = 'case'
pdf(paste(outputFolder, "/lincs_reverse_expression.pdf", sep=""))
#colPal <- bluered(100)
colPal = rev(colorRampPalette(brewer.pal(10, "RdYlBu"))(256))
par(mar=c(13, 6, 2, 0.5))
axiscolor = sapply(c(dz, as.character(drug_names)), function(name){
if (name == dz){
"black"
}else if (name %in% ""){
"black"
}else{
"black"
}
})
image(t(drug_dz_signature_rank), col=colPal, axes=F, srt=45)
axis(1, at= seq(0,1,length.out=ncol( drug_dz_signature_rank ) ), labels= FALSE)
text(x = seq(0,1,length.out=ncol( drug_dz_signature_rank ) ), c(-0.05),
labels = c( dz,as.character(drug_names)),col=axiscolor, srt = 45, pos=2,offset=0.05, xpd = TRUE, cex=0.6)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/susie_ss.R
\name{susie_z}
\alias{susie_z}
\title{Summary statistics version of SuSiE on z scores and correlation (or covariance) matrix.}
\usage{
susie_z(z, R, r_tol = 1e-08, L = 10,
estimate_residual_variance = TRUE, optimize_option = c("uniroot",
"EM"), prior_weights = NULL, null_weight = NULL, coverage = 0.95,
min_abs_corr = 0.5, verbose = FALSE, track_fit = FALSE, ...)
}
\arguments{
\item{z}{a p vector of z scores.}
\item{R}{a p by p symmetric and positive semidefinite matrix. It can be X'X, covariance matrix or correlation matrix.}
\item{r_tol}{tolerance level for eigen value check of positive semidefinite matrix of R.}
\item{L}{maximum number of non-zero effects.}
\item{estimate_residual_variance}{indicates whether to estimate residual variance}
\item{optimize_option}{the method to estimate V, 'uniroot' or 'EM'}
\item{prior_weights}{a p vector of prior probability that each element is non-zero.}
\item{null_weight}{probability of no effect, for each single effect model.}
\item{coverage}{coverage of confident sets. Default to 0.95 for 95\% credible interval.}
\item{min_abs_corr}{minimum of absolute value of correlation allowed in a credible set.}
\item{verbose}{if TRUE outputs some progress messages.}
\item{track_fit}{add an attribute \code{trace} to output that saves current values of all iterations.}
\item{...}{further arguments to be passed to \code{\link{susie_ss}}}
}
\value{
a susie fit
}
\description{
Summary statistics version of SuSiE on z scores and correlation (or covariance) matrix.
}
|
/man/susie_z.Rd
|
permissive
|
KaiqianZhang/susieR
|
R
| false
| true
| 1,621
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/susie_ss.R
\name{susie_z}
\alias{susie_z}
\title{Summary statistics version of SuSiE on z scores and correlation (or covariance) matrix.}
\usage{
susie_z(z, R, r_tol = 1e-08, L = 10,
estimate_residual_variance = TRUE, optimize_option = c("uniroot",
"EM"), prior_weights = NULL, null_weight = NULL, coverage = 0.95,
min_abs_corr = 0.5, verbose = FALSE, track_fit = FALSE, ...)
}
\arguments{
\item{z}{a p vector of z scores.}
\item{R}{a p by p symmetric and positive semidefinite matrix. It can be X'X, covariance matrix or correlation matrix.}
\item{r_tol}{tolerance level for eigen value check of positive semidefinite matrix of R.}
\item{L}{maximum number of non-zero effects.}
\item{estimate_residual_variance}{indicates whether to estimate residual variance}
\item{optimize_option}{the method to estimate V, 'uniroot' or 'EM'}
\item{prior_weights}{a p vector of prior probability that each element is non-zero.}
\item{null_weight}{probability of no effect, for each single effect model.}
\item{coverage}{coverage of confident sets. Default to 0.95 for 95\% credible interval.}
\item{min_abs_corr}{minimum of absolute value of correlation allowed in a credible set.}
\item{verbose}{if TRUE outputs some progress messages.}
\item{track_fit}{add an attribute \code{trace} to output that saves current values of all iterations.}
\item{...}{further arguments to be passed to \code{\link{susie_ss}}}
}
\value{
a susie fit
}
\description{
Summary statistics version of SuSiE on z scores and correlation (or covariance) matrix.
}
|
library(askpass)
### Name: askpass
### Title: Password Prompt Utility
### Aliases: askpass
### ** Examples
## No test:
# Prompt user for passwd
pw <- askpass("Please enter your password")
## End(No test)
|
/data/genthat_extracted_code/askpass/examples/askpass.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 212
|
r
|
library(askpass)
### Name: askpass
### Title: Password Prompt Utility
### Aliases: askpass
### ** Examples
## No test:
# Prompt user for passwd
pw <- askpass("Please enter your password")
## End(No test)
|
uniqueEl <- function(arg1) {
##computes the number of unique elements in a pair of sets, for all pairs of columns in a matrix
##George Crowley
if(!is.matrix(arg1)){
stop("input argument is not a matrix")
}
n = ncol(arg1)
unique.mat <- matrix(,ncol= n, nrow = n)
for (i in 1:n){
for (j in 1:n) {
unique.mat[i,j] <- length(setdiff(union(arg1[,i], arg1[,j]), intersect(arg1[,i], arg1[,j])))
}
}
return(unique.mat)
}
|
/uniqueEl.R
|
no_license
|
NolanLabNYU/PEDF-a-pleiotropic
|
R
| false
| false
| 448
|
r
|
uniqueEl <- function(arg1) {
##computes the number of unique elements in a pair of sets, for all pairs of columns in a matrix
##George Crowley
if(!is.matrix(arg1)){
stop("input argument is not a matrix")
}
n = ncol(arg1)
unique.mat <- matrix(,ncol= n, nrow = n)
for (i in 1:n){
for (j in 1:n) {
unique.mat[i,j] <- length(setdiff(union(arg1[,i], arg1[,j]), intersect(arg1[,i], arg1[,j])))
}
}
return(unique.mat)
}
|
library(argparse)
library(monocle)
library(stringr)
library(dplyr)
library(Matrix)
library(tidyr)
source("custom_monocle_da_functions.R")
get_gene_vs_gRNAgroup_1Mb_DE <- function(gRNA_group, cds_obj, gene_gRNAgroup_pair_table){
genes_to_test <- subset(gene_gRNAgroup_pair_table, gRNAgroup == gRNA_group)
#print(head(genes_to_test))
ENSG_list <- as.character(genes_to_test$ENSG.targetgene) #genes to test against gRNA group, called within 1 Mb
gene_cds <- cds_obj[rownames(cds_obj) %in% ENSG_list, ] #wont require all the ensgs are in the list
#print("Size_Factor" %in% colnames(pData(gene_cds)))
#print(head(pData(gene_cds)))
pData(gene_cds)$gRNA_detected <- grepl(paste0('(^|_)', gRNA_group, '(_|$)'), pData(gene_cds)$gene) #this should refer to the true/false input if a gRNA is detected, esp as i re-run with gRNAgroup
print(table(pData(gene_cds)$gRNA_detected))
print(gRNA_group)
gene_cds <- gene_cds[, !is.na(pData(gene_cds)$gRNA_detected)]
#print("Size_Factor" %in% colnames(pData(gene_cds)))
#print("No gRNA detected: ")
#print( w_NA - no_NA) #print the number of cells w/o gRNA detected
#print(table(pData(gene_cds)$gRNA_detected))
#print("About to start DE")
#print(dim(gene_cds))
#closeAllConnections()
DE_results <- myDifferentialGeneTest(gene_cds, fullModelFormulaStr = "~gRNA_detected", cores=1)
print(head(DE_results))
#print(DE_results[1,'qval'])
#tried to merge but i think that's confusing
#merged_df <- merge(gene_gRNAgroup_pair_table, DE_results, by.x = "ENSG.targetgene", by.y = "id")
#DE_results$gRNA_group <- gRNA_group
#print(head(DE_results))
#print(head(gene_gRNAgroup_pair_table))
DE_results$gRNA_group <- gRNA_group
#ENSG_list HAS THE GENE SOMEHOW
####EXPORT THE MEANS
#print(table(pData(gene_cds)$gRNA_detected))
yes_gRNA_cds <- gene_cds[,pData(gene_cds)$gRNA_detected]
no_gRNA_cds <- gene_cds[,!pData(gene_cds)$gRNA_detected]
#print("Size_Factor" %in% colnames(pData(yes_gRNA_cds)))
#print("Size_Factor" %in% colnames(pData(no_gRNA_cds)))
print(yes_gRNA_cds)
print(no_gRNA_cds)
#print('1')
yes_gRNA_rowmean <- Matrix::rowSums(t( t(exprs(yes_gRNA_cds)) / pData(yes_gRNA_cds)$Size_Factor) ) / ncol(yes_gRNA_cds) #have to double transpose to make sure the sizefactor will divide the right way, transpose again to get it to work
#print('2')
#print(no_gRNA_cds)
no_gRNA_rowmean <- Matrix::rowSums(t( t(exprs(no_gRNA_cds)) / pData(no_gRNA_cds)$Size_Factor) ) / ncol(no_gRNA_cds)
DE_results$yes_gRNA_rowmean <- yes_gRNA_rowmean
DE_results$no_gRNA_rowmean <- no_gRNA_rowmean
return(DE_results)
}
###TROUBLESHOOTING###
#source("~/Library/Group Containers/G69SCX94XU.duck/Library/Application Support/duck/Volumes/Vol10_PROJECTS/170803_CRISPRQTL/bin/custom_monocle_da_functions.R")
#test.rds <- readRDS("~/Library/Group Containers/G69SCX94XU.duck/Library/Application Support/duck/Volumes/Vol10_PROJECTS/170803_CRISPRQTL/results/171012_K1000_analysis/K1000.NOCHIM.sizefact.dispersions.exprs.rds")
#test.gene_gRNAgroup_pair_table <- read.delim("~/Library/Group Containers/G69SCX94XU.duck/Library/Application Support/duck/Volumes/Vol10_PROJECTS/170803_CRISPRQTL/results/171012_K1000_analysis/171018_gRNAxGene_association_tests/171018_gRAx2Mbgene_PAIRS.txt")
#test.gRNA_group <- "ACTB_TSS"
#undebug(get_gene_vs_gRNAgroup_1Mb_DE)
#test.result <- get_gene_vs_gRNAgroup_1Mb_DE(test.gRNA_group, test.rds, test.gene_gRNAgroup_pair_table)
###TROUBLESHOOTING END ###
parser=argparse::ArgumentParser(description = "Script to perform DE on gRNAgroups")
parser$add_argument("cds_obj")
parser$add_argument("gRNA_group")
parser$add_argument("gene_gRNAgroup_pair_table")
parser$add_argument("output")
args = parser$parse_args()
K1000_cds <- readRDS(args$cds_obj) #43000 cells
READIN.gene_gRNAgroup_pair_table <- as.data.frame(readr::read_delim(args$gene_gRNAgroup_pair_table, delim='\t'))
DE_results <- get_gene_vs_gRNAgroup_1Mb_DE(args$gRNA_group, K1000_cds, READIN.gene_gRNAgroup_pair_table)
write.table(DE_results, file = args$output , quote = FALSE, sep = "\t", col.names = TRUE, row.names = FALSE)
|
/get_deg.pilot.R
|
no_license
|
shendurelab/tafka-crisprQTL
|
R
| false
| false
| 4,165
|
r
|
library(argparse)
library(monocle)
library(stringr)
library(dplyr)
library(Matrix)
library(tidyr)
source("custom_monocle_da_functions.R")
get_gene_vs_gRNAgroup_1Mb_DE <- function(gRNA_group, cds_obj, gene_gRNAgroup_pair_table){
genes_to_test <- subset(gene_gRNAgroup_pair_table, gRNAgroup == gRNA_group)
#print(head(genes_to_test))
ENSG_list <- as.character(genes_to_test$ENSG.targetgene) #genes to test against gRNA group, called within 1 Mb
gene_cds <- cds_obj[rownames(cds_obj) %in% ENSG_list, ] #wont require all the ensgs are in the list
#print("Size_Factor" %in% colnames(pData(gene_cds)))
#print(head(pData(gene_cds)))
pData(gene_cds)$gRNA_detected <- grepl(paste0('(^|_)', gRNA_group, '(_|$)'), pData(gene_cds)$gene) #this should refer to the true/false input if a gRNA is detected, esp as i re-run with gRNAgroup
print(table(pData(gene_cds)$gRNA_detected))
print(gRNA_group)
gene_cds <- gene_cds[, !is.na(pData(gene_cds)$gRNA_detected)]
#print("Size_Factor" %in% colnames(pData(gene_cds)))
#print("No gRNA detected: ")
#print( w_NA - no_NA) #print the number of cells w/o gRNA detected
#print(table(pData(gene_cds)$gRNA_detected))
#print("About to start DE")
#print(dim(gene_cds))
#closeAllConnections()
DE_results <- myDifferentialGeneTest(gene_cds, fullModelFormulaStr = "~gRNA_detected", cores=1)
print(head(DE_results))
#print(DE_results[1,'qval'])
#tried to merge but i think that's confusing
#merged_df <- merge(gene_gRNAgroup_pair_table, DE_results, by.x = "ENSG.targetgene", by.y = "id")
#DE_results$gRNA_group <- gRNA_group
#print(head(DE_results))
#print(head(gene_gRNAgroup_pair_table))
DE_results$gRNA_group <- gRNA_group
#ENSG_list HAS THE GENE SOMEHOW
####EXPORT THE MEANS
#print(table(pData(gene_cds)$gRNA_detected))
yes_gRNA_cds <- gene_cds[,pData(gene_cds)$gRNA_detected]
no_gRNA_cds <- gene_cds[,!pData(gene_cds)$gRNA_detected]
#print("Size_Factor" %in% colnames(pData(yes_gRNA_cds)))
#print("Size_Factor" %in% colnames(pData(no_gRNA_cds)))
print(yes_gRNA_cds)
print(no_gRNA_cds)
#print('1')
yes_gRNA_rowmean <- Matrix::rowSums(t( t(exprs(yes_gRNA_cds)) / pData(yes_gRNA_cds)$Size_Factor) ) / ncol(yes_gRNA_cds) #have to double transpose to make sure the sizefactor will divide the right way, transpose again to get it to work
#print('2')
#print(no_gRNA_cds)
no_gRNA_rowmean <- Matrix::rowSums(t( t(exprs(no_gRNA_cds)) / pData(no_gRNA_cds)$Size_Factor) ) / ncol(no_gRNA_cds)
DE_results$yes_gRNA_rowmean <- yes_gRNA_rowmean
DE_results$no_gRNA_rowmean <- no_gRNA_rowmean
return(DE_results)
}
###TROUBLESHOOTING###
#source("~/Library/Group Containers/G69SCX94XU.duck/Library/Application Support/duck/Volumes/Vol10_PROJECTS/170803_CRISPRQTL/bin/custom_monocle_da_functions.R")
#test.rds <- readRDS("~/Library/Group Containers/G69SCX94XU.duck/Library/Application Support/duck/Volumes/Vol10_PROJECTS/170803_CRISPRQTL/results/171012_K1000_analysis/K1000.NOCHIM.sizefact.dispersions.exprs.rds")
#test.gene_gRNAgroup_pair_table <- read.delim("~/Library/Group Containers/G69SCX94XU.duck/Library/Application Support/duck/Volumes/Vol10_PROJECTS/170803_CRISPRQTL/results/171012_K1000_analysis/171018_gRNAxGene_association_tests/171018_gRAx2Mbgene_PAIRS.txt")
#test.gRNA_group <- "ACTB_TSS"
#undebug(get_gene_vs_gRNAgroup_1Mb_DE)
#test.result <- get_gene_vs_gRNAgroup_1Mb_DE(test.gRNA_group, test.rds, test.gene_gRNAgroup_pair_table)
###TROUBLESHOOTING END ###
parser=argparse::ArgumentParser(description = "Script to perform DE on gRNAgroups")
parser$add_argument("cds_obj")
parser$add_argument("gRNA_group")
parser$add_argument("gene_gRNAgroup_pair_table")
parser$add_argument("output")
args = parser$parse_args()
K1000_cds <- readRDS(args$cds_obj) #43000 cells
READIN.gene_gRNAgroup_pair_table <- as.data.frame(readr::read_delim(args$gene_gRNAgroup_pair_table, delim='\t'))
DE_results <- get_gene_vs_gRNAgroup_1Mb_DE(args$gRNA_group, K1000_cds, READIN.gene_gRNAgroup_pair_table)
write.table(DE_results, file = args$output , quote = FALSE, sep = "\t", col.names = TRUE, row.names = FALSE)
|
(A <- matrix(1:6,nrow=2,ncol=3)) # nrow,ncolは省略可
(B <- rbind(c(2,3,5),c(7,11,13))) # 行ベクトルを連結
(C <- cbind(c(0,0),c(0,1),c(1,0))) # 列ベクトルを連結
A + B - C
|
/RFiles/matrix-sum2.R
|
no_license
|
marco-tn/StaticticsBasic
|
R
| false
| false
| 189
|
r
|
(A <- matrix(1:6,nrow=2,ncol=3)) # nrow,ncolは省略可
(B <- rbind(c(2,3,5),c(7,11,13))) # 行ベクトルを連結
(C <- cbind(c(0,0),c(0,1),c(1,0))) # 列ベクトルを連結
A + B - C
|
context("Test memo structure")
test_that("We can warn on an invalid COP year", {
d <- list()
d$info$cop_year <- "1999"
expect_warning(d2 <- memoStructure(d, d2_session = training))
expect_null(d2$memo$structure)
expect_identical(d, d2)
})
with_mock_api({
test_that("We can create a memo structure", {
d <- list()
years <- c("2021", "2022")
for (year in years) {
d$info$cop_year <- year
d <- memoStructure(d, d2_session = training)
#TODO: No idea what the message is or where it is coming from..
#This should be silent but is not. testthat suppresses the messages
#and cannot reproduce it on the console
#expect_silent(d <- memoStructure(d, d2_session = training))
expect_false(is.null(d$memo$structure))
expect_equal(typeof(d$memo$structure), "list")
expect_setequal(names(d$memo$structure), c("row_order", "col_order"))
expect_true("data.frame" %in% class(d$memo$structure$row_order))
expect_setequal(names(d$memo$structure$row_order),
c("ind",
"options",
"partner_chunk"))
expect_true("data.frame" %in% class(d$memo$structure$col_order))
expect_setequal(
names(d$memo$structure$col_order),
c("value",
"name",
"col_order",
"id",
"Prioritization")
)
expect_false(is.null(d$memo$inds))
}
})
})
|
/tests/testthat/test-memo-structure.R
|
permissive
|
jason-p-pickering/datapackr
|
R
| false
| false
| 1,442
|
r
|
context("Test memo structure")
test_that("We can warn on an invalid COP year", {
d <- list()
d$info$cop_year <- "1999"
expect_warning(d2 <- memoStructure(d, d2_session = training))
expect_null(d2$memo$structure)
expect_identical(d, d2)
})
with_mock_api({
test_that("We can create a memo structure", {
d <- list()
years <- c("2021", "2022")
for (year in years) {
d$info$cop_year <- year
d <- memoStructure(d, d2_session = training)
#TODO: No idea what the message is or where it is coming from..
#This should be silent but is not. testthat suppresses the messages
#and cannot reproduce it on the console
#expect_silent(d <- memoStructure(d, d2_session = training))
expect_false(is.null(d$memo$structure))
expect_equal(typeof(d$memo$structure), "list")
expect_setequal(names(d$memo$structure), c("row_order", "col_order"))
expect_true("data.frame" %in% class(d$memo$structure$row_order))
expect_setequal(names(d$memo$structure$row_order),
c("ind",
"options",
"partner_chunk"))
expect_true("data.frame" %in% class(d$memo$structure$col_order))
expect_setequal(
names(d$memo$structure$col_order),
c("value",
"name",
"col_order",
"id",
"Prioritization")
)
expect_false(is.null(d$memo$inds))
}
})
})
|
runlengthencoding <- function(x)
{
splitx <- unlist(strsplit(input, ""))
rlex <- rle(splitx)
paste(with(rlex, as.vector(rbind(lengths, values))), collapse="")
}
input <- "WWWWWWWWWWWWBWWWWWWWWWWWWBBBWWWWWWWWWWWWWWWWWWWWWWWWBWWWWWWWWWWWWWW"
runlengthencoding(input)
|
/Programming Language Detection/Experiment-2/Dataset/Train/R/run-length-encoding-1.r
|
no_license
|
dlaststark/machine-learning-projects
|
R
| false
| false
| 275
|
r
|
runlengthencoding <- function(x)
{
splitx <- unlist(strsplit(input, ""))
rlex <- rle(splitx)
paste(with(rlex, as.vector(rbind(lengths, values))), collapse="")
}
input <- "WWWWWWWWWWWWBWWWWWWWWWWWWBBBWWWWWWWWWWWWWWWWWWWWWWWWBWWWWWWWWWWWWWW"
runlengthencoding(input)
|
# Copyright 2018 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#' @title
#' Determine species index from species code
#' @description
#' Determine species index from species code
#' @param sc Character, Defined as species code.
#' @return Species index.
#' May return an error code under the following conditions:
#'
#' return value condition
#' ------------ ---------
#' SI_ERR_CODE if species code is unknown
#'
#' @note
#' Species code string can be 1, 2, or 3 letters; upper/lower case
#' is ignored.
#' @rdname SIndexR_SpecMap
SIndexR_SpecMap <- function(sc)
{
return(unlist(lapply(sc, function(s) species_map (s))))
}
#' @title
#' Remap species to recommended species, and return species index
#' @description
#' Remap species to recommended species, and return species index
#' @param sc Character, Species code.
#' @param fiz Character. Forest inventory zone: (A,B,C)=coast,
#' (D,E,F,G,H,I,J,K,L)=interior.
#'
#' @return Species index.
#' May return an error code under the following conditions:
#'
#' return value condition
#' ------------ ---------
#' SI_ERR_CODE if species code is unknown
#' SI_ERR_FIZ if FIZ code is unknown
#'
#' @note
#' Species code string can be 1, 2, or 3 letters; upper/lower case
#' is ignored. FIZ is only used where needed, such as for species
#' code "FD".
#' @rdname SIndexR_SpecRemap
SIndexR_SpecRemap <- function(sc, fiz)
{
if(length(sc) == 1 & length(fiz) != 1){
sc <- rep(sc, length(fiz))
}
if(length(sc) != 1 & length(fiz) == 1){
fiz <- rep(fiz, length(sc))
}
if(length(sc) != length(fiz)){
stop("sc and fiz do not have same length.")
}
sc_list <- lapply(sc, function(s) s)
fiz_list <- lapply(fiz, function(s) s)
allinputs <- Map(list, sc_list, fiz_list)
return(unlist(lapply(allinputs, function(s) species_remap(sc = s[[1]],
fiz = s[[2]]))))
}
|
/R/SpeciesMapRemap.R
|
permissive
|
rjdejong14/SIndexR
|
R
| false
| false
| 2,596
|
r
|
# Copyright 2018 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#' @title
#' Determine species index from species code
#' @description
#' Determine species index from species code
#' @param sc Character, Defined as species code.
#' @return Species index.
#' May return an error code under the following conditions:
#'
#' return value condition
#' ------------ ---------
#' SI_ERR_CODE if species code is unknown
#'
#' @note
#' Species code string can be 1, 2, or 3 letters; upper/lower case
#' is ignored.
#' @rdname SIndexR_SpecMap
SIndexR_SpecMap <- function(sc)
{
return(unlist(lapply(sc, function(s) species_map (s))))
}
#' @title
#' Remap species to recommended species, and return species index
#' @description
#' Remap species to recommended species, and return species index
#' @param sc Character, Species code.
#' @param fiz Character. Forest inventory zone: (A,B,C)=coast,
#' (D,E,F,G,H,I,J,K,L)=interior.
#'
#' @return Species index.
#' May return an error code under the following conditions:
#'
#' return value condition
#' ------------ ---------
#' SI_ERR_CODE if species code is unknown
#' SI_ERR_FIZ if FIZ code is unknown
#'
#' @note
#' Species code string can be 1, 2, or 3 letters; upper/lower case
#' is ignored. FIZ is only used where needed, such as for species
#' code "FD".
#' @rdname SIndexR_SpecRemap
SIndexR_SpecRemap <- function(sc, fiz)
{
if(length(sc) == 1 & length(fiz) != 1){
sc <- rep(sc, length(fiz))
}
if(length(sc) != 1 & length(fiz) == 1){
fiz <- rep(fiz, length(sc))
}
if(length(sc) != length(fiz)){
stop("sc and fiz do not have same length.")
}
sc_list <- lapply(sc, function(s) s)
fiz_list <- lapply(fiz, function(s) s)
allinputs <- Map(list, sc_list, fiz_list)
return(unlist(lapply(allinputs, function(s) species_remap(sc = s[[1]],
fiz = s[[2]]))))
}
|
# sidebar -----------------------------------------------------------------
sidebar <- dashboardSidebar(
sidebarMenu(
menuItem(
"Monitoring",
tabName = "monitoring",
icon = icon("line-chart")
),
menuItem(
"Removal Rate Tuning",
tabName = "tuning",
icon = icon("dashboard")
),
menuItem(
"Yield Curves",
tabName = "yield",
icon = icon("line-chart")
),
selectInput("station", "Station", station_list, selected = "A456")
),
width = 300
)
# body --------------------------------------------------------------------
body <- dashboardBody(
tabItems(
tabItem(tabName = "tuning",
fluidRow(
box(
sliderInput(
"cabp",
"Center Air Bag Pressure",
min = 20,
max = 100,
value = 60
),
sliderInput(
"pcp",
"Pressurized Chamber Pressure",
min = 20,
max = 100,
value = 60
)
),
box(
sliderInput(
"slurryA",
"Slurry Flow Rate Line A",
min = 0,
max = 16,
value = 8
),
sliderInput(
"slurryC",
"Slurry Flow Rate Line C",
min = 50,
max = 450,
value = 250
)
)
),
fluidRow(box(plotOutput("avg_removal_rate_plot"), width = 8))
),
tabItem(
tabName = "monitoring",
fluidRow(
box(
selectInput("wafer_id", label = "Wafer ID",
choices = wafer_ids,
selected = 29494154)
),
box(
selectInput("vars", label = "Variables",
choices = list(
"CENTER_AIR_BAG_PRESSURE",
"PRESSURIZED_CHAMBER_PRESSURE",
"SLURRY_FLOW_LINE_A",
"SLURRY_FLOW_LINE_B",
"SLURRY_FLOW_LINE_C",
"WAFER_ROTATION",
"STAGE_ROTATION"
),
selected = "CENTER_AIR_BAG_PRESSURE",
multiple = TRUE,
selectize = FALSE
)
)),
fluidRow(
box(width = 8, plotOutput("ts_plot"))
)
),
tabItem(
tabName = "yield",
fluidRow(
box(
sliderInput(
"abs_error",
"Error Tolerance",
min = 1,
max = 40,
value = 10
)
)
),
fluidRow(
box(width = 8, plotOutput("yield_plot"))
)
)
)
)
# notifications -----------------------------------------------------------
header <- dashboardHeader(title = "Wafer Polishing Process",
# fluidRow(
# valueBox(paste0(round(dresser_life, 2) * 100, '%'), "Dresser Life Remaining", color = "red", icon = icon("exclamation-triangle"), width = 6),
# valueBox(paste0(round(backing_film_life, 2) * 100, '%'), "Backing Film Life Remaining", color = "green", width = 6)
#
# ),
dropdownMenu(
type = "tasks", badgeStatus = "danger",
taskItem(value = 94, color = "red",
"Dresser Life Consumed"
),
taskItem(value = 6, color = "green",
"Backing Film Life Consumed"
)
))
# page --------------------------------------------------------------------
dashboardPage(
header,
sidebar,
body
)
|
/R/phm_dashboard2/ui.R
|
no_license
|
phm16/cdm_prediction
|
R
| false
| false
| 3,762
|
r
|
# sidebar -----------------------------------------------------------------
sidebar <- dashboardSidebar(
sidebarMenu(
menuItem(
"Monitoring",
tabName = "monitoring",
icon = icon("line-chart")
),
menuItem(
"Removal Rate Tuning",
tabName = "tuning",
icon = icon("dashboard")
),
menuItem(
"Yield Curves",
tabName = "yield",
icon = icon("line-chart")
),
selectInput("station", "Station", station_list, selected = "A456")
),
width = 300
)
# body --------------------------------------------------------------------
body <- dashboardBody(
tabItems(
tabItem(tabName = "tuning",
fluidRow(
box(
sliderInput(
"cabp",
"Center Air Bag Pressure",
min = 20,
max = 100,
value = 60
),
sliderInput(
"pcp",
"Pressurized Chamber Pressure",
min = 20,
max = 100,
value = 60
)
),
box(
sliderInput(
"slurryA",
"Slurry Flow Rate Line A",
min = 0,
max = 16,
value = 8
),
sliderInput(
"slurryC",
"Slurry Flow Rate Line C",
min = 50,
max = 450,
value = 250
)
)
),
fluidRow(box(plotOutput("avg_removal_rate_plot"), width = 8))
),
tabItem(
tabName = "monitoring",
fluidRow(
box(
selectInput("wafer_id", label = "Wafer ID",
choices = wafer_ids,
selected = 29494154)
),
box(
selectInput("vars", label = "Variables",
choices = list(
"CENTER_AIR_BAG_PRESSURE",
"PRESSURIZED_CHAMBER_PRESSURE",
"SLURRY_FLOW_LINE_A",
"SLURRY_FLOW_LINE_B",
"SLURRY_FLOW_LINE_C",
"WAFER_ROTATION",
"STAGE_ROTATION"
),
selected = "CENTER_AIR_BAG_PRESSURE",
multiple = TRUE,
selectize = FALSE
)
)),
fluidRow(
box(width = 8, plotOutput("ts_plot"))
)
),
tabItem(
tabName = "yield",
fluidRow(
box(
sliderInput(
"abs_error",
"Error Tolerance",
min = 1,
max = 40,
value = 10
)
)
),
fluidRow(
box(width = 8, plotOutput("yield_plot"))
)
)
)
)
# notifications -----------------------------------------------------------
header <- dashboardHeader(title = "Wafer Polishing Process",
# fluidRow(
# valueBox(paste0(round(dresser_life, 2) * 100, '%'), "Dresser Life Remaining", color = "red", icon = icon("exclamation-triangle"), width = 6),
# valueBox(paste0(round(backing_film_life, 2) * 100, '%'), "Backing Film Life Remaining", color = "green", width = 6)
#
# ),
dropdownMenu(
type = "tasks", badgeStatus = "danger",
taskItem(value = 94, color = "red",
"Dresser Life Consumed"
),
taskItem(value = 6, color = "green",
"Backing Film Life Consumed"
)
))
# page --------------------------------------------------------------------
dashboardPage(
header,
sidebar,
body
)
|
#loads data into tibble variable hpc
source('./LoadHouseholdPowerConsumptionData.R')
plot1 <- function(data = NULL)
{
if(is.null(data)) {
data <- load_data()
}
# Open Png device
png(filename = "plot1.png",
width = 480,
height = 480,
units = "px")
with(data, hist(Global_active_power,
col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)"))
dev.off() # Close the Png file device
}
|
/plot1.R
|
no_license
|
jb1t/ExData_Plotting1
|
R
| false
| false
| 547
|
r
|
#loads data into tibble variable hpc
source('./LoadHouseholdPowerConsumptionData.R')
plot1 <- function(data = NULL)
{
if(is.null(data)) {
data <- load_data()
}
# Open Png device
png(filename = "plot1.png",
width = 480,
height = 480,
units = "px")
with(data, hist(Global_active_power,
col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)"))
dev.off() # Close the Png file device
}
|
#' @importFrom dplyr arrange
#' @importFrom dplyr do
#' @importFrom dplyr filter
#' @importFrom dplyr group_by
#' @importFrom dplyr select
#' @importFrom dplyr summarise
#' @importFrom dplyr n
#' @importFrom dplyr mutate
#' @importFrom data.table data.table
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 aes_string
#' @importFrom ggplot2 coord_flip
#' @importFrom ggplot2 coord_cartesian
#' @importFrom ggplot2 element_blank
#' @importFrom ggplot2 element_rect
#' @importFrom ggplot2 element_text
#' @importFrom ggplot2 facet_wrap
#' @importFrom ggplot2 geom_bar
#' @importFrom ggplot2 geom_boxplot
#' @importFrom ggplot2 geom_density
#' @importFrom ggplot2 geom_histogram
#' @importFrom ggplot2 geom_line
#' @importFrom ggplot2 geom_linerange
#' @importFrom ggplot2 geom_hline
#' @importFrom ggplot2 geom_vline
#' @importFrom ggplot2 geom_path
#' @importFrom ggplot2 geom_point
#' @importFrom ggplot2 geom_polygon
#' @importFrom ggplot2 geom_smooth
#' @importFrom ggplot2 geom_text
#' @importFrom ggplot2 geom_tile
#' @importFrom ggplot2 geom_violin
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 ggtitle
#' @importFrom ggplot2 guides
#' @importFrom ggplot2 guide_legend
#' @importFrom ggplot2 labs
#' @importFrom ggplot2 position_jitter
#' @importFrom ggplot2 scale_alpha_continuous
#' @importFrom ggplot2 scale_colour_gradient2
#' @importFrom ggplot2 scale_x_log10
#' @importFrom ggplot2 scale_y_log10
#' @importFrom ggplot2 scale_fill_gradientn
#' @importFrom ggplot2 scale_fill_gradient
#' @importFrom ggplot2 scale_x_discrete
#' @importFrom ggplot2 scale_x_continuous
#' @importFrom ggplot2 scale_y_discrete
#' @importFrom ggplot2 scale_y_continuous
#' @importFrom ggplot2 scale_size_manual
#' @importFrom ggplot2 scale_size
#' @importFrom ggplot2 scale_color_manual
#' @importFrom ggplot2 theme_set
#' @importFrom ggplot2 theme_bw
#' @importFrom ggplot2 theme_minimal
#' @importFrom ggplot2 theme
#' @importFrom ggplot2 xlab
#' @importFrom ggplot2 ylab
#' @importFrom ggpubr ggboxplot
#' @importFrom ggpubr ggarrange
#' @importFrom grDevices dev.off
#' @importFrom grDevices pdf
#' @importFrom graphics barplot
#' @importFrom graphics hist
#' @importFrom microbiome abundances
#' @importFrom microbiome aggregate_taxa
#' @importFrom microbiome associate
#' @importFrom microbiome baseline
#' @importFrom microbiome bimodality
#' @importFrom microbiome boxplot_abundance
#' @importFrom microbiome core
#' @importFrom microbiome core_abundance
#' @importFrom microbiome core_members
#' @importFrom microbiome coverage
#' @importFrom microbiome divergence
#' @importFrom microbiome diversities
#' @importFrom microbiome dominance
#' @importFrom microbiome evenness
#' @importFrom microbiome gktau
#' @importFrom microbiome global
#' @importFrom microbiome group_age
#' @importFrom microbiome group_bmi
#' @importFrom microbiome heat
#' @importFrom microbiome hotplot
#' @importFrom microbiome inequality
#' @importFrom microbiome intermediate_stability
#' @importFrom microbiome log_modulo_skewness
#' @importFrom microbiome low_abundance
#' @importFrom microbiome map_levels
#' @importFrom microbiome meta
#' @importFrom microbiome neat
#' @importFrom neatsort
#' @importFrom microbiome noncore_abundance
#' @importFrom microbiome noncore_members
#' @importFrom microbiome plot_composition
#' @importFrom microbiome plot_core
#' @importFrom microbiome plot_density
#' @importFrom microbiome plot_frequencies
#' @importFrom microbiome plot_landscape
#' @importFrom microbiome plot_regression
#' @importFrom microbiome plot_taxa_prevalence
#' @importFrom microbiome plot_tipping
#' @importFrom microbiome potential_analysis
#' @importFrom microbiome prevalence
#' @importFrom microbiome quiet
#' @importFrom microbiome rare_members
#' @importFrom microbiome rarity
#' @importFrom microbiome read_biom2phyloseq
#' @importFrom microbiome read_csv2phyloseq
#' @importFrom microbiome read_mothur2phyloseq
#' @importFrom microbiome read_phyloseq
#' @importFrom microbiome remove_samples
#' @importFrom microbiome remove_taxa
#' @importFrom microbiome richness
#' @importFrom microbiome taxa
#' @importFrom microbiome time_normalize
#' @importFrom microbiome time_sort
#' @importFrom microbiome top_taxa
#' @importFrom microbiome transform
#' @importFrom phyloseq distance
#' @importFrom phyloseq estimate_richness
#' @importFrom phyloseq get_taxa
#' @importFrom phyloseq get_variable
#' @importFrom phyloseq merge_phyloseq
#' @importFrom phyloseq nsamples
#' @importFrom phyloseq ntaxa
#' @importFrom phyloseq ordinate
#' @importFrom phyloseq otu_table
#' @importFrom phyloseq otu_table<-
#' @importFrom phyloseq phyloseq
#' @importFrom phyloseq psmelt
#' @importFrom phyloseq prune_samples
#' @importFrom phyloseq prune_taxa
#' @importFrom phyloseq sample_data
#' @importFrom phyloseq sample_data<-
#' @importFrom phyloseq sample_names
#' @importFrom phyloseq sample_sums
#' @importFrom phyloseq taxa_are_rows
#' @importFrom phyloseq taxa_names
#' @importFrom phyloseq tax_glom
#' @importFrom phyloseq tax_table
#' @importFrom phyloseq tax_table<-
#' @importFrom phyloseq import_biom
#' @importFrom phyloseq parse_taxonomy_default
#' @importFrom plyr ddply
#' @importFrom reshape2 melt
#' @importFrom stats aggregate
#' @importFrom stats as.dist
#' @importFrom stats coef
#' @importFrom stats cor
#' @importFrom stats cor.test
#' @importFrom stats density
#' @importFrom stats dist
#' @importFrom stats dnorm
#' @importFrom stats hclust
#' @importFrom stats kernel
#' @importFrom stats lm
#' @importFrom stats loess
#' @importFrom stats loess.control
#' @importFrom stats median
#' @importFrom stats na.fail
#' @importFrom stats na.omit
#' @importFrom stats p.adjust
#' @importFrom stats pnorm
#' @importFrom stats predict
#' @importFrom stats quantile
#' @importFrom stats rnorm
#' @importFrom stats sd
#' @importFrom stats time
#' @importFrom stats filter
#' @importFrom stats var
#' @importFrom stats frequency
#' @importFrom tidyr gather
#' @importFrom tidyr separate
#' @importFrom utils capture.output
#' @importFrom utils flush.console
#' @importFrom utils head
#' @importFrom utils read.csv
#' @importFrom utils read.table
#' @importFrom utils tail
#' @importFrom utils write.csv
#' @importFrom vegan decostand
#' @importFrom vegan fisher.alpha
#' @importFrom vegan metaMDS
#' @importFrom vegan scores
#' @importFrom vegan vegdist
#' @importFrom vegan wascores
|
/R/firstlib.R
|
no_license
|
adamsorbie/microbiomeutilities
|
R
| false
| false
| 6,564
|
r
|
#' @importFrom dplyr arrange
#' @importFrom dplyr do
#' @importFrom dplyr filter
#' @importFrom dplyr group_by
#' @importFrom dplyr select
#' @importFrom dplyr summarise
#' @importFrom dplyr n
#' @importFrom dplyr mutate
#' @importFrom data.table data.table
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 aes_string
#' @importFrom ggplot2 coord_flip
#' @importFrom ggplot2 coord_cartesian
#' @importFrom ggplot2 element_blank
#' @importFrom ggplot2 element_rect
#' @importFrom ggplot2 element_text
#' @importFrom ggplot2 facet_wrap
#' @importFrom ggplot2 geom_bar
#' @importFrom ggplot2 geom_boxplot
#' @importFrom ggplot2 geom_density
#' @importFrom ggplot2 geom_histogram
#' @importFrom ggplot2 geom_line
#' @importFrom ggplot2 geom_linerange
#' @importFrom ggplot2 geom_hline
#' @importFrom ggplot2 geom_vline
#' @importFrom ggplot2 geom_path
#' @importFrom ggplot2 geom_point
#' @importFrom ggplot2 geom_polygon
#' @importFrom ggplot2 geom_smooth
#' @importFrom ggplot2 geom_text
#' @importFrom ggplot2 geom_tile
#' @importFrom ggplot2 geom_violin
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 ggtitle
#' @importFrom ggplot2 guides
#' @importFrom ggplot2 guide_legend
#' @importFrom ggplot2 labs
#' @importFrom ggplot2 position_jitter
#' @importFrom ggplot2 scale_alpha_continuous
#' @importFrom ggplot2 scale_colour_gradient2
#' @importFrom ggplot2 scale_x_log10
#' @importFrom ggplot2 scale_y_log10
#' @importFrom ggplot2 scale_fill_gradientn
#' @importFrom ggplot2 scale_fill_gradient
#' @importFrom ggplot2 scale_x_discrete
#' @importFrom ggplot2 scale_x_continuous
#' @importFrom ggplot2 scale_y_discrete
#' @importFrom ggplot2 scale_y_continuous
#' @importFrom ggplot2 scale_size_manual
#' @importFrom ggplot2 scale_size
#' @importFrom ggplot2 scale_color_manual
#' @importFrom ggplot2 theme_set
#' @importFrom ggplot2 theme_bw
#' @importFrom ggplot2 theme_minimal
#' @importFrom ggplot2 theme
#' @importFrom ggplot2 xlab
#' @importFrom ggplot2 ylab
#' @importFrom ggpubr ggboxplot
#' @importFrom ggpubr ggarrange
#' @importFrom grDevices dev.off
#' @importFrom grDevices pdf
#' @importFrom graphics barplot
#' @importFrom graphics hist
#' @importFrom microbiome abundances
#' @importFrom microbiome aggregate_taxa
#' @importFrom microbiome associate
#' @importFrom microbiome baseline
#' @importFrom microbiome bimodality
#' @importFrom microbiome boxplot_abundance
#' @importFrom microbiome core
#' @importFrom microbiome core_abundance
#' @importFrom microbiome core_members
#' @importFrom microbiome coverage
#' @importFrom microbiome divergence
#' @importFrom microbiome diversities
#' @importFrom microbiome dominance
#' @importFrom microbiome evenness
#' @importFrom microbiome gktau
#' @importFrom microbiome global
#' @importFrom microbiome group_age
#' @importFrom microbiome group_bmi
#' @importFrom microbiome heat
#' @importFrom microbiome hotplot
#' @importFrom microbiome inequality
#' @importFrom microbiome intermediate_stability
#' @importFrom microbiome log_modulo_skewness
#' @importFrom microbiome low_abundance
#' @importFrom microbiome map_levels
#' @importFrom microbiome meta
#' @importFrom microbiome neat
#' @importFrom neatsort
#' @importFrom microbiome noncore_abundance
#' @importFrom microbiome noncore_members
#' @importFrom microbiome plot_composition
#' @importFrom microbiome plot_core
#' @importFrom microbiome plot_density
#' @importFrom microbiome plot_frequencies
#' @importFrom microbiome plot_landscape
#' @importFrom microbiome plot_regression
#' @importFrom microbiome plot_taxa_prevalence
#' @importFrom microbiome plot_tipping
#' @importFrom microbiome potential_analysis
#' @importFrom microbiome prevalence
#' @importFrom microbiome quiet
#' @importFrom microbiome rare_members
#' @importFrom microbiome rarity
#' @importFrom microbiome read_biom2phyloseq
#' @importFrom microbiome read_csv2phyloseq
#' @importFrom microbiome read_mothur2phyloseq
#' @importFrom microbiome read_phyloseq
#' @importFrom microbiome remove_samples
#' @importFrom microbiome remove_taxa
#' @importFrom microbiome richness
#' @importFrom microbiome taxa
#' @importFrom microbiome time_normalize
#' @importFrom microbiome time_sort
#' @importFrom microbiome top_taxa
#' @importFrom microbiome transform
#' @importFrom phyloseq distance
#' @importFrom phyloseq estimate_richness
#' @importFrom phyloseq get_taxa
#' @importFrom phyloseq get_variable
#' @importFrom phyloseq merge_phyloseq
#' @importFrom phyloseq nsamples
#' @importFrom phyloseq ntaxa
#' @importFrom phyloseq ordinate
#' @importFrom phyloseq otu_table
#' @importFrom phyloseq otu_table<-
#' @importFrom phyloseq phyloseq
#' @importFrom phyloseq psmelt
#' @importFrom phyloseq prune_samples
#' @importFrom phyloseq prune_taxa
#' @importFrom phyloseq sample_data
#' @importFrom phyloseq sample_data<-
#' @importFrom phyloseq sample_names
#' @importFrom phyloseq sample_sums
#' @importFrom phyloseq taxa_are_rows
#' @importFrom phyloseq taxa_names
#' @importFrom phyloseq tax_glom
#' @importFrom phyloseq tax_table
#' @importFrom phyloseq tax_table<-
#' @importFrom phyloseq import_biom
#' @importFrom phyloseq parse_taxonomy_default
#' @importFrom plyr ddply
#' @importFrom reshape2 melt
#' @importFrom stats aggregate
#' @importFrom stats as.dist
#' @importFrom stats coef
#' @importFrom stats cor
#' @importFrom stats cor.test
#' @importFrom stats density
#' @importFrom stats dist
#' @importFrom stats dnorm
#' @importFrom stats hclust
#' @importFrom stats kernel
#' @importFrom stats lm
#' @importFrom stats loess
#' @importFrom stats loess.control
#' @importFrom stats median
#' @importFrom stats na.fail
#' @importFrom stats na.omit
#' @importFrom stats p.adjust
#' @importFrom stats pnorm
#' @importFrom stats predict
#' @importFrom stats quantile
#' @importFrom stats rnorm
#' @importFrom stats sd
#' @importFrom stats time
#' @importFrom stats filter
#' @importFrom stats var
#' @importFrom stats frequency
#' @importFrom tidyr gather
#' @importFrom tidyr separate
#' @importFrom utils capture.output
#' @importFrom utils flush.console
#' @importFrom utils head
#' @importFrom utils read.csv
#' @importFrom utils read.table
#' @importFrom utils tail
#' @importFrom utils write.csv
#' @importFrom vegan decostand
#' @importFrom vegan fisher.alpha
#' @importFrom vegan metaMDS
#' @importFrom vegan scores
#' @importFrom vegan vegdist
#' @importFrom vegan wascores
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flags.R
\name{flags}
\alias{flags}
\alias{flag_numeric}
\alias{flag_integer}
\alias{flag_boolean}
\alias{flag_string}
\title{Flags for a training run}
\usage{
flags(
...,
config = Sys.getenv("R_CONFIG_ACTIVE", unset = "default"),
file = "flags.yml",
arguments = commandArgs(TRUE)
)
flag_numeric(name, default, description = NULL)
flag_integer(name, default, description = NULL)
flag_boolean(name, default, description = NULL)
flag_string(name, default, description = NULL)
}
\arguments{
\item{...}{One or more flag definitions}
\item{config}{The configuration to use. Defaults to the active configuration
for the current environment (as specified by the \code{R_CONFIG_ACTIVE}
environment variable), or \code{default} when unset.}
\item{file}{The flags YAML file to read}
\item{arguments}{The command line arguments (as a character vector) to be
parsed.}
\item{name}{Flag name}
\item{default}{Flag default value}
\item{description}{Flag description}
}
\value{
Named list of training flags
}
\description{
Define the flags (name, type, default value, description) which paramaterize
a training run. Optionally read overrides of the default values from a
"flags.yml" config file and/or command line arguments.
}
\section{Config File Flags}{
Config file flags are defined a YAML configuration file (by default
named "flags.yml"). Flags can either appear at the top-level of
the YAML or can be inclued in named configuration sections
(see the \href{https://github.com/rstudio/config}{config package} for
details).
}
\section{Command Line Flags}{
Command line flags should be of the form \code{--key=value} or
\verb{--key value}. The values are assumed to be valid \code{yaml} and
will be converted using \code{\link[=yaml.load]{yaml.load()}}.
}
\examples{
\dontrun{
library(tfruns)
# define flags and parse flag values from flags.yml and the command line
FLAGS <- flags(
flag_numeric('learning_rate', 0.01, 'Initial learning rate.'),
flag_integer('max_steps', 5000, 'Number of steps to run trainer.'),
flag_string('data_dir', 'MNIST-data', 'Directory for training data'),
flag_boolean('fake_data', FALSE, 'If true, use fake data for testing')
)
}
}
|
/man/flags.Rd
|
no_license
|
cran/tfruns
|
R
| false
| true
| 2,257
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flags.R
\name{flags}
\alias{flags}
\alias{flag_numeric}
\alias{flag_integer}
\alias{flag_boolean}
\alias{flag_string}
\title{Flags for a training run}
\usage{
flags(
...,
config = Sys.getenv("R_CONFIG_ACTIVE", unset = "default"),
file = "flags.yml",
arguments = commandArgs(TRUE)
)
flag_numeric(name, default, description = NULL)
flag_integer(name, default, description = NULL)
flag_boolean(name, default, description = NULL)
flag_string(name, default, description = NULL)
}
\arguments{
\item{...}{One or more flag definitions}
\item{config}{The configuration to use. Defaults to the active configuration
for the current environment (as specified by the \code{R_CONFIG_ACTIVE}
environment variable), or \code{default} when unset.}
\item{file}{The flags YAML file to read}
\item{arguments}{The command line arguments (as a character vector) to be
parsed.}
\item{name}{Flag name}
\item{default}{Flag default value}
\item{description}{Flag description}
}
\value{
Named list of training flags
}
\description{
Define the flags (name, type, default value, description) which paramaterize
a training run. Optionally read overrides of the default values from a
"flags.yml" config file and/or command line arguments.
}
\section{Config File Flags}{
Config file flags are defined a YAML configuration file (by default
named "flags.yml"). Flags can either appear at the top-level of
the YAML or can be inclued in named configuration sections
(see the \href{https://github.com/rstudio/config}{config package} for
details).
}
\section{Command Line Flags}{
Command line flags should be of the form \code{--key=value} or
\verb{--key value}. The values are assumed to be valid \code{yaml} and
will be converted using \code{\link[=yaml.load]{yaml.load()}}.
}
\examples{
\dontrun{
library(tfruns)
# define flags and parse flag values from flags.yml and the command line
FLAGS <- flags(
flag_numeric('learning_rate', 0.01, 'Initial learning rate.'),
flag_integer('max_steps', 5000, 'Number of steps to run trainer.'),
flag_string('data_dir', 'MNIST-data', 'Directory for training data'),
flag_boolean('fake_data', FALSE, 'If true, use fake data for testing')
)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/WindowSpec.R
\name{orderBy}
\alias{orderBy}
\alias{orderBy,WindowSpec,character-method}
\alias{orderBy,WindowSpec,Column-method}
\title{Ordering Columns in a WindowSpec}
\usage{
orderBy(x, col, ...)
\S4method{orderBy}{WindowSpec,character}(x, col, ...)
\S4method{orderBy}{WindowSpec,Column}(x, col, ...)
}
\arguments{
\item{x}{a WindowSpec}
\item{col}{a character or Column indicating an ordering column}
\item{...}{additional sorting fields}
}
\value{
A WindowSpec.
}
\description{
Defines the ordering columns in a WindowSpec.
}
\note{
orderBy(WindowSpec, character) since 2.0.0
orderBy(WindowSpec, Column) since 2.0.0
}
\examples{
\dontrun{
orderBy(ws, "col1", "col2")
orderBy(ws, df$col1, df$col2)
}
}
\seealso{
See \link{arrange} for use in sorting a SparkDataFrame
Other windowspec_method:
\code{\link{partitionBy}()},
\code{\link{rangeBetween}()},
\code{\link{rowsBetween}()}
}
\concept{windowspec_method}
|
/man/orderBy.Rd
|
no_license
|
cran/SparkR
|
R
| false
| true
| 1,016
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/WindowSpec.R
\name{orderBy}
\alias{orderBy}
\alias{orderBy,WindowSpec,character-method}
\alias{orderBy,WindowSpec,Column-method}
\title{Ordering Columns in a WindowSpec}
\usage{
orderBy(x, col, ...)
\S4method{orderBy}{WindowSpec,character}(x, col, ...)
\S4method{orderBy}{WindowSpec,Column}(x, col, ...)
}
\arguments{
\item{x}{a WindowSpec}
\item{col}{a character or Column indicating an ordering column}
\item{...}{additional sorting fields}
}
\value{
A WindowSpec.
}
\description{
Defines the ordering columns in a WindowSpec.
}
\note{
orderBy(WindowSpec, character) since 2.0.0
orderBy(WindowSpec, Column) since 2.0.0
}
\examples{
\dontrun{
orderBy(ws, "col1", "col2")
orderBy(ws, df$col1, df$col2)
}
}
\seealso{
See \link{arrange} for use in sorting a SparkDataFrame
Other windowspec_method:
\code{\link{partitionBy}()},
\code{\link{rangeBetween}()},
\code{\link{rowsBetween}()}
}
\concept{windowspec_method}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nest_by.R
\name{nest_by.}
\alias{nest_by.}
\title{Nest data.tables}
\usage{
nest_by.(.df, ..., .key = "data", .keep = FALSE)
}
\arguments{
\item{.df}{A data.frame or data.table}
\item{...}{Columns to group by. If empty nests the entire data.table.
\code{tidyselect} compatible.}
\item{.key}{Name of the new column created by nesting.}
\item{.keep}{Should the grouping columns be kept in the list column.}
}
\description{
Nest data.tables by group.
Note: \code{nest_by()} \emph{does not} return a rowwise tidytable.
}
\examples{
df <- data.table(
a = 1:5,
b = 6:10,
c = c(rep("a", 3), rep("b", 2)),
d = c(rep("a", 3), rep("b", 2))
)
df \%>\%
nest_by()
df \%>\%
nest_by(c, d)
df \%>\%
nest_by(where(is.character))
df \%>\%
nest_by(c, d, .keep = TRUE)
}
\keyword{internal}
|
/man/nest_by..Rd
|
no_license
|
cran/tidytable
|
R
| false
| true
| 872
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nest_by.R
\name{nest_by.}
\alias{nest_by.}
\title{Nest data.tables}
\usage{
nest_by.(.df, ..., .key = "data", .keep = FALSE)
}
\arguments{
\item{.df}{A data.frame or data.table}
\item{...}{Columns to group by. If empty nests the entire data.table.
\code{tidyselect} compatible.}
\item{.key}{Name of the new column created by nesting.}
\item{.keep}{Should the grouping columns be kept in the list column.}
}
\description{
Nest data.tables by group.
Note: \code{nest_by()} \emph{does not} return a rowwise tidytable.
}
\examples{
df <- data.table(
a = 1:5,
b = 6:10,
c = c(rep("a", 3), rep("b", 2)),
d = c(rep("a", 3), rep("b", 2))
)
df \%>\%
nest_by()
df \%>\%
nest_by(c, d)
df \%>\%
nest_by(where(is.character))
df \%>\%
nest_by(c, d, .keep = TRUE)
}
\keyword{internal}
|
library(bigstatsr)
library(testthat)
spmat0 <- Matrix::rsparsematrix(1000, 1000, 0.01, symmetric = TRUE)
X <- as_SFBM(spmat0)
spmat <- as(spmat0, "dgCMatrix")
X2 <- as_SFBM(spmat)
expect_identical(readBin(X$sbk, what = 1, n = 1e6),
readBin(X2$sbk, what = 1, n = 1e6))
expect_equal(X$p, spmat@p)
col_count <- bigsparser:::col_count_sym(spmat0@p, spmat0@i)
head(col_count)
sum(col_count)
|
/tmp-tests/test-write-sym2.R
|
no_license
|
privefl/bigsparser
|
R
| false
| false
| 405
|
r
|
library(bigstatsr)
library(testthat)
spmat0 <- Matrix::rsparsematrix(1000, 1000, 0.01, symmetric = TRUE)
X <- as_SFBM(spmat0)
spmat <- as(spmat0, "dgCMatrix")
X2 <- as_SFBM(spmat)
expect_identical(readBin(X$sbk, what = 1, n = 1e6),
readBin(X2$sbk, what = 1, n = 1e6))
expect_equal(X$p, spmat@p)
col_count <- bigsparser:::col_count_sym(spmat0@p, spmat0@i)
head(col_count)
sum(col_count)
|
library(igraph)
h<-0
for(i in 1:200){
g<-barabasi.game(1000,directed=FALSE)
if(is.connected(g)) h<-h+1}
h/200
|
/EE232HW1/2b_connected.R
|
no_license
|
realmichaelzyy/UCLA_EE232_Network-Graph-Flow
|
R
| false
| false
| 120
|
r
|
library(igraph)
h<-0
for(i in 1:200){
g<-barabasi.game(1000,directed=FALSE)
if(is.connected(g)) h<-h+1}
h/200
|
#!/usr/bin/env Rscript
library("devtools")
library (ggplot2)
require(gtools)
library(plyr)
#library (data.table)
#require(ggseqlogo)
setwd("/media/nikolai/DATADRIVE1/BioProjects/TAL_generator/swapsams/")
list.files(pattern=".txt$") # use the pattern argument to define a common pattern for import files with regex. Here: .csv
list.filenames<-list.files(pattern=".txt$")
list.filenames <- mixedsort(list.filenames)
print(list.filenames)
RVD2 = read.csv("/media/nikolai/DATADRIVE1/BioProjects/TAL_generator/RVDs/RVDs2.csv")
for (i in 1:length(list.filenames))
{
alldata <- do.call(cbind,lapply(list.filenames,read.table, header = FALSE,sep="\t"))
}
alldata[(seq(4, 35, by = 3))] <- list(NULL)
colnames(alldata) <- c("readname",seq(1, 12,by=0.5))
colnames(alldata)[c(3,5,7,9,11,13,15,17,19,21,23,25)] <- "MAPQ"
alldata$string <- gsub(" ","", paste(substr(alldata[,2], 0, 1),
substr(alldata[,4], 0, 1),
substr(alldata[,6], 0, 1),
substr(alldata[,8], 0, 1),
substr(alldata[,10], 0, 1),
substr(alldata[,12], 0, 1),
substr(alldata[,14], 0, 1),
substr(alldata[,16], 0, 1),
substr(alldata[,18], 0, 1),
substr(alldata[,20], 0, 1),
substr(alldata[,22], 0, 1),
substr(alldata[,24], 0, 1),
collapse = NULL))
alldata[alldata == '*'] <- NA
alldata[alldata == 255] <- 0
alldata$sum <- rowSums(alldata[,c(3,5,7,9,11,13,15,17,19,21,23,25)])
alldata$missing <- rowSums(is.na(alldata))
plot(hist(alldata$missing, breaks=16))
hqdata <- na.omit(alldata)
alpha <- LETTERS[seq( from = 1, to = 24 )]
#beta <- as.vector(RVD2$RVD)
chq1 <- substr(alldata[which(alldata[,3]=="40"),2],0,1)
chq1 <- append(chq1, alpha)
chq1RVD <- mapvalues(chq1, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq2 <- substr(alldata[which(alldata[,5]=="40"),4],0,1)
chq2 <- append(chq2, alpha)
chq2RVD <- mapvalues(chq2, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq3 <- substr(alldata[which(alldata[,7]=="40"),6],0,1)
chq3 <- append(chq3, alpha)
chq3RVD <- mapvalues(chq3, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq4 <- substr(alldata[which(alldata[,9]=="40"),8],0,1)
chq4 <- append(chq4, alpha)
chq4RVD <- mapvalues(chq4, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq5 <- substr(alldata[which(alldata[,11]=="40"),10],0,1)
chq5 <- append(chq5, alpha)
chq5RVD <- mapvalues(chq5, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq6 <- substr(alldata[which(alldata[,13]=="40"),12],0,1)
chq6 <- append(chq6, alpha)
chq6RVD <- mapvalues(chq6, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq7 <- substr(alldata[which(alldata[,15]=="40"),14],0,1)
chq7 <- append(chq7, alpha)
chq7RVD <- mapvalues(chq7, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq8 <- substr(alldata[which(alldata[,17]=="40"),16],0,1)
chq8 <- append(chq8, alpha)
chq8RVD <- mapvalues(chq8, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq9 <- substr(alldata[which(alldata[,19]=="40"),18],0,1)
chq9 <- append(chq9, alpha)
chq9RVD <- mapvalues(chq9, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq10 <- substr(alldata[which(alldata[,21]=="40"),20],0,1)
chq10 <- append(chq10, alpha)
chq10RVD <- mapvalues(chq10, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq11 <- substr(alldata[which(alldata[,23]=="40"),22],0,1)
chq11 <- append(chq11, alpha)
chq11RVD <- mapvalues(chq11, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq12 <- substr(alldata[which(alldata[,25]=="40"),24],0,1)
chq12 <- append(chq12, alpha)
chq12RVD <- mapvalues(chq12, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
t1 <- table(chq1)-1
t2 <- table(chq2)-1
t3 <- table(chq3)-1
t4 <- table(chq4)-1
t5 <- table(chq5)-1
t6 <- table(chq6)-1
t7 <- table(chq7)-1
t8 <- table(chq8)-1
t9 <- table(chq9)-1
t10 <- table(chq10)-1
t11 <- table(chq11)-1
t12 <- table(chq12)-1
rt1 <- table(chq1RVD)-1
frt1 <- rt1/sum(rt1)
rt2 <- table(chq2RVD)-1
frt2 <- rt2/sum(rt2)
rt3 <- table(chq3RVD)-1
frt3 <- rt3/sum(rt3)
rt4 <- table(chq4RVD)-1
frt4 <- rt4/sum(rt4)
rt5 <- table(chq5RVD)-1
frt5 <- rt5/sum(rt5)
rt6 <- table(chq6RVD)-1
frt6 <- rt6/sum(rt6)
rt7 <- table(chq7RVD)-1
frt7 <- rt7/sum(rt7)
rt8 <- table(chq8RVD)-1
frt8 <- rt8/sum(rt8)
rt9 <- table(chq9RVD)-1
frt9 <- rt9/sum(rt9)
rt10 <- table(chq10RVD)-1
frt10 <- rt10/sum(rt10)
rt11 <- table(chq11RVD)-1
frt11 <- rt11/sum(rt11)
rt12 <- table(chq12RVD)-1
frt12 <- rt12/sum(rt12)
combined <- cbind(t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12)
combinedRVD <- cbind(rt1,rt2,rt3,rt4,rt5,rt6,rt7,rt8,rt9,rt10,rt11,rt12)
#comb <- t(combined)
nucfreq <- matrix(0, ncol = 4, nrow = 12)
colnames(nucfreq) <- c("A","T","G","C")
for (rvdtocheck in c("AA","AA","CI","CP","CT","DN","DT","EN","HA","HD","HG","HN","HS","HT","HT","KT","NN","NS","NT","RA","RD","RI","VA","YT"))
{
#print(RVD2[RVD2$RVD==rvdtocheck,4:7])
#print(frt1[frt1 = rvdtocheck])
nucfreq[1,] <- nucfreq[1,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt1[frt1 = rvdtocheck])
nucfreq[2,] <- nucfreq[2,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt2[frt2 = rvdtocheck])
nucfreq[3,] <- nucfreq[3,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt3[frt3 = rvdtocheck])
nucfreq[4,] <- nucfreq[4,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt4[frt4 = rvdtocheck])
nucfreq[5,] <- nucfreq[5,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt5[frt5 = rvdtocheck])
nucfreq[6,] <- nucfreq[6,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt6[frt6 = rvdtocheck])
nucfreq[7,] <- nucfreq[7,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt7[frt7 = rvdtocheck])
nucfreq[8,] <- nucfreq[8,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt8[frt8 = rvdtocheck])
nucfreq[9,] <- nucfreq[9,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt9[frt9 = rvdtocheck])
nucfreq[10,] <- nucfreq[10,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt10[frt10 = rvdtocheck])
nucfreq[11,] <- nucfreq[11,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt11[frt11 = rvdtocheck])
nucfreq[12,] <- nucfreq[12,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt12[frt12 = rvdtocheck])
}
nucfreq <- (nucfreq/rowSums(nucfreq))*100
colnames(combined) <- c(1:12)
colnames(combinedRVD) <- c(1:12)
#combined <- c(rep(names(t1), t1), rep(names(t2), t2))
#table(combined)
library(devtools)
install_github('kkdey/Logolas')
library("Logolas")
library(RColorBrewer)
color_profile <- list("type" = "per_row","col" = RColorBrewer::brewer.pal(dim(combined)[1],name ="Spectral"))
color_profile$col[12] <- "#000000"
color_profile$col <- c(rbind(color_profile$col,color_profile$col))
color_profile$col <- c("#9E0142","#9E0142","#3288BD","#3288BD","#9E0142","#9E0142","#3288BD","#3288BD","#9E0142","#9E0142","#3288BD","#3288BD","#9E0142","#9E0142","#3288BD","#3288BD","#9E0142","#9E0142","#3288BD","#3288BD","#9E0142","#9E0142","#3288BD","#3288BD")
color_profile2 <- list("type" = "per_row","col" = colorRampPalette(brewer.pal(8,"Dark2"))(22))
logomaker(combined,hist = FALSE,
frame_width = 1,
color_profile = color_profile,
ic.scale = FALSE,
alpha = 2,
yscale_change=TRUE,
xlab="position",
col_line_split = "grey80")
logomaker(combinedRVD,hist = FALSE,
frame_width = 1,
color_profile = color_profile2,
ic.scale = FALSE,
alpha = 2,
yscale_change=TRUE,
xlab="position",
col_line_split = "grey80")
#color_profile$col <- c("#9E0142","#9E0142","#3288BD","#3288BD")
color_profile$col <- c("darkred","darkblue","darkgrey","darkgreen")
tnucfreq <- t(nucfreq)
colnames(tnucfreq) <- c(1:12)
logomaker(tnucfreq,hist = FALSE,
frame_width = 1,
color_profile = color_profile,
ic.scale = FALSE,
alpha = 2,
yscale_change=TRUE,
xlab="position",
col_line_split = "grey80")
print("all: ")
print(dim(alldata))
print("no missing data: ")
print(dim(hqdata))
|
/R_analysis/oldTALEscripts/postgraph.R
|
no_license
|
genome-traffic/ZFN_analysis
|
R
| false
| false
| 8,413
|
r
|
#!/usr/bin/env Rscript
library("devtools")
library (ggplot2)
require(gtools)
library(plyr)
#library (data.table)
#require(ggseqlogo)
setwd("/media/nikolai/DATADRIVE1/BioProjects/TAL_generator/swapsams/")
list.files(pattern=".txt$") # use the pattern argument to define a common pattern for import files with regex. Here: .csv
list.filenames<-list.files(pattern=".txt$")
list.filenames <- mixedsort(list.filenames)
print(list.filenames)
RVD2 = read.csv("/media/nikolai/DATADRIVE1/BioProjects/TAL_generator/RVDs/RVDs2.csv")
for (i in 1:length(list.filenames))
{
alldata <- do.call(cbind,lapply(list.filenames,read.table, header = FALSE,sep="\t"))
}
alldata[(seq(4, 35, by = 3))] <- list(NULL)
colnames(alldata) <- c("readname",seq(1, 12,by=0.5))
colnames(alldata)[c(3,5,7,9,11,13,15,17,19,21,23,25)] <- "MAPQ"
alldata$string <- gsub(" ","", paste(substr(alldata[,2], 0, 1),
substr(alldata[,4], 0, 1),
substr(alldata[,6], 0, 1),
substr(alldata[,8], 0, 1),
substr(alldata[,10], 0, 1),
substr(alldata[,12], 0, 1),
substr(alldata[,14], 0, 1),
substr(alldata[,16], 0, 1),
substr(alldata[,18], 0, 1),
substr(alldata[,20], 0, 1),
substr(alldata[,22], 0, 1),
substr(alldata[,24], 0, 1),
collapse = NULL))
alldata[alldata == '*'] <- NA
alldata[alldata == 255] <- 0
alldata$sum <- rowSums(alldata[,c(3,5,7,9,11,13,15,17,19,21,23,25)])
alldata$missing <- rowSums(is.na(alldata))
plot(hist(alldata$missing, breaks=16))
hqdata <- na.omit(alldata)
alpha <- LETTERS[seq( from = 1, to = 24 )]
#beta <- as.vector(RVD2$RVD)
chq1 <- substr(alldata[which(alldata[,3]=="40"),2],0,1)
chq1 <- append(chq1, alpha)
chq1RVD <- mapvalues(chq1, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq2 <- substr(alldata[which(alldata[,5]=="40"),4],0,1)
chq2 <- append(chq2, alpha)
chq2RVD <- mapvalues(chq2, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq3 <- substr(alldata[which(alldata[,7]=="40"),6],0,1)
chq3 <- append(chq3, alpha)
chq3RVD <- mapvalues(chq3, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq4 <- substr(alldata[which(alldata[,9]=="40"),8],0,1)
chq4 <- append(chq4, alpha)
chq4RVD <- mapvalues(chq4, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq5 <- substr(alldata[which(alldata[,11]=="40"),10],0,1)
chq5 <- append(chq5, alpha)
chq5RVD <- mapvalues(chq5, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq6 <- substr(alldata[which(alldata[,13]=="40"),12],0,1)
chq6 <- append(chq6, alpha)
chq6RVD <- mapvalues(chq6, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq7 <- substr(alldata[which(alldata[,15]=="40"),14],0,1)
chq7 <- append(chq7, alpha)
chq7RVD <- mapvalues(chq7, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq8 <- substr(alldata[which(alldata[,17]=="40"),16],0,1)
chq8 <- append(chq8, alpha)
chq8RVD <- mapvalues(chq8, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq9 <- substr(alldata[which(alldata[,19]=="40"),18],0,1)
chq9 <- append(chq9, alpha)
chq9RVD <- mapvalues(chq9, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq10 <- substr(alldata[which(alldata[,21]=="40"),20],0,1)
chq10 <- append(chq10, alpha)
chq10RVD <- mapvalues(chq10, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq11 <- substr(alldata[which(alldata[,23]=="40"),22],0,1)
chq11 <- append(chq11, alpha)
chq11RVD <- mapvalues(chq11, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
chq12 <- substr(alldata[which(alldata[,25]=="40"),24],0,1)
chq12 <- append(chq12, alpha)
chq12RVD <- mapvalues(chq12, from=as.vector(RVD2$module), to=as.vector(RVD2$RVD))
t1 <- table(chq1)-1
t2 <- table(chq2)-1
t3 <- table(chq3)-1
t4 <- table(chq4)-1
t5 <- table(chq5)-1
t6 <- table(chq6)-1
t7 <- table(chq7)-1
t8 <- table(chq8)-1
t9 <- table(chq9)-1
t10 <- table(chq10)-1
t11 <- table(chq11)-1
t12 <- table(chq12)-1
rt1 <- table(chq1RVD)-1
frt1 <- rt1/sum(rt1)
rt2 <- table(chq2RVD)-1
frt2 <- rt2/sum(rt2)
rt3 <- table(chq3RVD)-1
frt3 <- rt3/sum(rt3)
rt4 <- table(chq4RVD)-1
frt4 <- rt4/sum(rt4)
rt5 <- table(chq5RVD)-1
frt5 <- rt5/sum(rt5)
rt6 <- table(chq6RVD)-1
frt6 <- rt6/sum(rt6)
rt7 <- table(chq7RVD)-1
frt7 <- rt7/sum(rt7)
rt8 <- table(chq8RVD)-1
frt8 <- rt8/sum(rt8)
rt9 <- table(chq9RVD)-1
frt9 <- rt9/sum(rt9)
rt10 <- table(chq10RVD)-1
frt10 <- rt10/sum(rt10)
rt11 <- table(chq11RVD)-1
frt11 <- rt11/sum(rt11)
rt12 <- table(chq12RVD)-1
frt12 <- rt12/sum(rt12)
combined <- cbind(t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12)
combinedRVD <- cbind(rt1,rt2,rt3,rt4,rt5,rt6,rt7,rt8,rt9,rt10,rt11,rt12)
#comb <- t(combined)
nucfreq <- matrix(0, ncol = 4, nrow = 12)
colnames(nucfreq) <- c("A","T","G","C")
for (rvdtocheck in c("AA","AA","CI","CP","CT","DN","DT","EN","HA","HD","HG","HN","HS","HT","HT","KT","NN","NS","NT","RA","RD","RI","VA","YT"))
{
#print(RVD2[RVD2$RVD==rvdtocheck,4:7])
#print(frt1[frt1 = rvdtocheck])
nucfreq[1,] <- nucfreq[1,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt1[frt1 = rvdtocheck])
nucfreq[2,] <- nucfreq[2,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt2[frt2 = rvdtocheck])
nucfreq[3,] <- nucfreq[3,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt3[frt3 = rvdtocheck])
nucfreq[4,] <- nucfreq[4,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt4[frt4 = rvdtocheck])
nucfreq[5,] <- nucfreq[5,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt5[frt5 = rvdtocheck])
nucfreq[6,] <- nucfreq[6,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt6[frt6 = rvdtocheck])
nucfreq[7,] <- nucfreq[7,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt7[frt7 = rvdtocheck])
nucfreq[8,] <- nucfreq[8,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt8[frt8 = rvdtocheck])
nucfreq[9,] <- nucfreq[9,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt9[frt9 = rvdtocheck])
nucfreq[10,] <- nucfreq[10,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt10[frt10 = rvdtocheck])
nucfreq[11,] <- nucfreq[11,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt11[frt11 = rvdtocheck])
nucfreq[12,] <- nucfreq[12,] + as.numeric(RVD2[RVD2$RVD==rvdtocheck,4:7][1,] * frt12[frt12 = rvdtocheck])
}
nucfreq <- (nucfreq/rowSums(nucfreq))*100
colnames(combined) <- c(1:12)
colnames(combinedRVD) <- c(1:12)
#combined <- c(rep(names(t1), t1), rep(names(t2), t2))
#table(combined)
library(devtools)
install_github('kkdey/Logolas')
library("Logolas")
library(RColorBrewer)
color_profile <- list("type" = "per_row","col" = RColorBrewer::brewer.pal(dim(combined)[1],name ="Spectral"))
color_profile$col[12] <- "#000000"
color_profile$col <- c(rbind(color_profile$col,color_profile$col))
color_profile$col <- c("#9E0142","#9E0142","#3288BD","#3288BD","#9E0142","#9E0142","#3288BD","#3288BD","#9E0142","#9E0142","#3288BD","#3288BD","#9E0142","#9E0142","#3288BD","#3288BD","#9E0142","#9E0142","#3288BD","#3288BD","#9E0142","#9E0142","#3288BD","#3288BD")
color_profile2 <- list("type" = "per_row","col" = colorRampPalette(brewer.pal(8,"Dark2"))(22))
logomaker(combined,hist = FALSE,
frame_width = 1,
color_profile = color_profile,
ic.scale = FALSE,
alpha = 2,
yscale_change=TRUE,
xlab="position",
col_line_split = "grey80")
logomaker(combinedRVD,hist = FALSE,
frame_width = 1,
color_profile = color_profile2,
ic.scale = FALSE,
alpha = 2,
yscale_change=TRUE,
xlab="position",
col_line_split = "grey80")
#color_profile$col <- c("#9E0142","#9E0142","#3288BD","#3288BD")
color_profile$col <- c("darkred","darkblue","darkgrey","darkgreen")
tnucfreq <- t(nucfreq)
colnames(tnucfreq) <- c(1:12)
logomaker(tnucfreq,hist = FALSE,
frame_width = 1,
color_profile = color_profile,
ic.scale = FALSE,
alpha = 2,
yscale_change=TRUE,
xlab="position",
col_line_split = "grey80")
print("all: ")
print(dim(alldata))
print("no missing data: ")
print(dim(hqdata))
|
#set working directory and important list of relevant participants
setwd("/Users/abbiepopa/Documents/Lab/DPTB/Cortisol Analysis/cortdata from Elliot/Working")
Participants<-read.csv("PartList_DPTB.csv", na.strings=".")
#import reaction time data for TD participants
RTTD<-read.csv("DPTB_RT_9-17-14_TDKoralyOut.csv",na.strings="NaN")
RTTD<-RTTD[,c(1,9,10)]
#import reaction time data for participants with 22q
RT22q<-read.csv("DPTB_RT_9-17-14_22qKoralyOut.csv", na.strings="NaN")
RT22q<-RT22q[,c(1,9,10)]
RT<-rbind(RTTD,RT22q)
#import cortisol data
Cort<-read.csv("CORT-Data-EAB_July2014.csv")
Cort<-Cort[,c(1, 3:6)]
colnames(Cort)[1]<-"CABIL_ID"
#import spence and ABAS data
SpenceABAS<-read.csv("Clustering_Database_8-8-14.csv", na.strings="-9999")
#import overall gaze data
EyeGazeOverall<-read.csv("AllData.csv", na.strings="NA")
EyeGazeOverall<-EyeGazeOverall[,2:11]
colnames(EyeGazeOverall)[1]<-"CABIL_ID"
#import time course gaze data
EyeGazeTimeCourse<-read.csv("AllDataTC25no676.csv", na.strings="NA")
EyeGazeTimeCourse<-EyeGazeTimeCourse[,2:24]
colnames(EyeGazeTimeCourse)[1]<-"CABIL_ID"
#import overall pupilometry for kids with 22q
PupilOverall22q<-read.csv("PupilPilot_6-5-14_22q.csv", na.strings=".")
PupilOverall22q<-PupilOverall22q[,c(1:7, 9:13)]
colnames(PupilOverall22q)[1]<-"CABIL_ID"
#import overall pupilometry to kids who are TD
PupilOverallTD<-read.csv("PupilPilot_6-5-14_TD.csv", na.strings=".")
PupilOverallTD<-PupilOverallTD[,c(1:7,9:13)]
colnames(PupilOverallTD)[1]<-"CABIL_ID"
#merge Dx groups
PupilOverall<-rbind(PupilOverall22q, PupilOverallTD)
#import time course pupilometry data for kids who are TD
PupilChangeTD<-read.csv("PupilChange_6-5-14_TD.csv",na.strings=".")
PupilChangeTD<-PupilChangeTD[,1:7]
colnames(PupilChangeTD)[1]<-"CABIL_ID"
#import time course pupilometry data for kids with 22q
PupilChange22q<-read.csv("PupilChange_6-5-14_22q.csv",na.strings=".")
PupilChange22q<-PupilChange22q[,1:7]
colnames(PupilChange22q)[1]<-"CABIL_ID"
#merge TC pupil data
PupilChange<-rbind(PupilChangeTD,PupilChange22q)
#merge data set
AllDataCortAnalysis<-merge(Participants, Cort, all.x=T)
AllDataCortAnalysis<-merge(AllDataCortAnalysis, SpenceABAS, all.x=T)
AllDataCortAnalysis<-merge(AllDataCortAnalysis, RT, all.x=T)
AllDataCortAnalysis<-merge(AllDataCortAnalysis, EyeGazeOverall, all.x=T)
AllDataCortAnalysis<-merge(AllDataCortAnalysis, EyeGazeTimeCourse, all.x=T)
AllDataCortAnalysis<-merge(AllDataCortAnalysis, PupilOverall, all.x=T)
AllDataCortAnalysis<-merge(AllDataCortAnalysis, PupilChange, all.x=T)
#calculate change in cort, and change in log cort
AllDataCortAnalysis$CortDelta<-AllDataCortAnalysis$PostCORT-AllDataCortAnalysis$PreCORT
AllDataCortAnalysis$LogCortDelta<-log10(AllDataCortAnalysis$CortDelta+1)
AllDataCortAnalysis$CortLogDelta<-AllDataCortAnalysis$LogPostCort-AllDataCortAnalysis$LogPreCort
###clusters###
ClusAll<-read.csv("AllDataClus.csv")
Clus<-ClusAll[,c(2,12)]
colnames(Clus)[1]<-"CABIL_ID"
AllDataCortAnalysis<-merge(AllDataCortAnalysis, Clus, all.x=T)
### pos, neg, or no change in cort
BinAssigner<-function(x){
if(is.na(x)){
thebin<-NA
}
else if(x<(-0.10900)){
thebin<-(-1)
} else if (x>(0.10430)){
thebin<-(1)
} else{
thebin<-(0)
}
return(thebin)
}
for(i in 1:dim(AllDataCortAnalysis)[[1]]){
AllDataCortAnalysis[i,c("CortDeltaBin")]<-BinAssigner(AllDataCortAnalysis[i,c("CortDelta")])
}
#Negative Only Graphs
NegChange<-subset(AllDataCortAnalysis, CortDeltaBin==-1)
for(i in 5:76){
pretest<-cor.test(NegChange$LogPreCort, NegChange[,i])
if(pretest$p.value<0.05){
nowData<-NegChange[which(!is.na(NegChange$LogPreCort)&!is.na(NegChange[,i])),]
quartz()
plot(scale(nowData$LogPreCort), scale(nowData[,i]), main=paste("r=",round(pretest$estimate, digits=2),"p=",round(pretest$p.value, digits=2),"n=",dim(nowData)[[1]]), xlab="LogPreCort", ylab=colnames(nowData)[i],col="darkblue",bg="blue", pch=25)
abline(lm(scale(nowData[,i])~scale(nowData$LogPreCort)))
prd<-predict(lm(scale(nowData[,i])~scale(nowData$LogPreCort)), interval="confidence")
lines(sort(scale(nowData$LogPreCort), decreasing=(pretest$estimate[[1]]<0)),sort(prd[,2]),col="red", lty=2)
lines(sort(scale(nowData$LogPreCort), decreasing=(pretest$estimate[[1]]<0)),sort(prd[,3]),col="red", lty=2)
}
posttest<-cor.test(NegChange$LogPostCort, NegChange[,i])
if(posttest$p.value<0.05){
nowData<-NegChange[which(!is.na(NegChange$LogPostCort)&!is.na(NegChange[,i])),]
quartz()
plot(scale(nowData$LogPostCort), scale(nowData[,i]), main=paste("r=",round(posttest$estimate, digits=2),"p=",round(posttest$p.value, digits=2),"n=",dim(nowData)[[1]]), xlab="LogPostCort", ylab=colnames(nowData)[i],col="darkblue",bg="blue", pch=25)
abline(lm(scale(nowData[,i])~scale(nowData$LogPostCort)))
prd<-predict(lm(scale(nowData[,i])~scale(nowData$LogPostCort)), interval="confidence")
lines(sort(scale(nowData$LogPostCort), decreasing=(posttest$estimate[[1]]<0)),sort(prd[,2]),col="red", lty=2)
lines(sort(scale(nowData$LogPostCort), decreasing=(posttest$estimate[[1]]<0)),sort(prd[,3]),col="red", lty=2)
}
deltatest<-cor.test(NegChange$CortDelta, NegChange[,i])
if(deltatest$p.value<0.05){
nowData<-NegChange[which(!is.na(NegChange$CortDelta)&!is.na(NegChange[,i])),]
quartz()
plot(scale(nowData$CortDelta), scale(nowData[,i]), main=paste("r=",round(deltatest$estimate, digits=2),"p=",round(deltatest$p.value, digits=2),"n=",dim(nowData)[[1]]), xlab="Change in Cort (Post-Pre)", ylab=colnames(nowData)[i],col="darkblue",bg="blue", pch=25)
abline(lm(scale(nowData[,i])~scale(nowData$CortDelta)))
prd<-predict(lm(scale(nowData[,i])~scale(nowData$CortDelta)), interval="confidence")
lines(sort(scale(nowData$CortDelta), decreasing=(deltatest$estimate[[1]]<0)),sort(prd[,2]),col="red", lty=2)
lines(sort(scale(nowData$CortDelta), decreasing=(deltatest$estimate[[1]]<0)),sort(prd[,3]),col="red", lty=2)
}
}
#Negative Only Graphs
PosChange<-subset(AllDataCortAnalysis, CortDeltaBin==1)
for(i in 5:76){
pretest<-cor.test(PosChange$LogPreCort, PosChange[,i])
if(pretest$p.value<0.05){
nowData<-PosChange[which(!is.na(PosChange$LogPreCort)&!is.na(PosChange[,i])),]
quartz()
plot(scale(nowData$LogPreCort), scale(nowData[,i]), main=paste("r=",round(pretest$estimate, digits=2),"p=",round(pretest$p.value, digits=2),"n=",dim(nowData)[[1]]), xlab="LogPreCort", ylab=colnames(nowData)[i],col="darkgoldenrod4",bg="darkgoldenrod1", pch=24)
abline(lm(scale(nowData[,i])~scale(nowData$LogPreCort)))
prd<-predict(lm(scale(nowData[,i])~scale(nowData$LogPreCort)), interval="confidence")
lines(sort(scale(nowData$LogPreCort), decreasing=(pretest$estimate[[1]]<0)),sort(prd[,2]),col="red", lty=2)
lines(sort(scale(nowData$LogPreCort), decreasing=(pretest$estimate[[1]]<0)),sort(prd[,3]),col="red", lty=2)
}
posttest<-cor.test(PosChange$LogPostCort, PosChange[,i])
if(posttest$p.value<0.05){
nowData<-PosChange[which(!is.na(PosChange$LogPostCort)&!is.na(PosChange[,i])),]
quartz()
plot(scale(nowData$LogPostCort), scale(nowData[,i]), main=paste("r=",round(posttest$estimate, digits=2),"p=",round(posttest$p.value, digits=2),"n=",dim(nowData)[[1]]), xlab="LogPostCort", ylab=colnames(nowData)[i],col="darkgoldenrod4",bg="darkgoldenrod1", pch=24)
abline(lm(scale(nowData[,i])~scale(nowData$LogPostCort)))
prd<-predict(lm(scale(nowData[,i])~scale(nowData$LogPostCort)), interval="confidence")
lines(sort(scale(nowData$LogPostCort), decreasing=(posttest$estimate[[1]]<0)),sort(prd[,2]),col="red", lty=2)
lines(sort(scale(nowData$LogPostCort), decreasing=(posttest$estimate[[1]]<0)),sort(prd[,3]),col="red", lty=2)
}
deltatest<-cor.test(PosChange$CortDelta, PosChange[,i])
if(deltatest$p.value<0.05){
nowData<-PosChange[which(!is.na(PosChange$CortDelta)&!is.na(PosChange[,i])),]
quartz()
plot(scale(nowData$CortDelta), scale(nowData[,i]), main=paste("r=",round(deltatest$estimate, digits=2),"p=",round(deltatest$p.value, digits=2),"n=",dim(nowData)[[1]]), xlab="Change in Cort (Post-Pre)", ylab=colnames(nowData)[i],col="darkgoldenrod4",bg="darkgoldenrod1", pch=24)
abline(lm(scale(nowData[,i])~scale(nowData$CortDelta)))
prd<-predict(lm(scale(nowData[,i])~scale(nowData$CortDelta)), interval="confidence")
lines(sort(scale(nowData$CortDelta), decreasing=(deltatest$estimate[[1]]<0)),sort(prd[,2]),col="red", lty=2)
lines(sort(scale(nowData$CortDelta), decreasing=(deltatest$estimate[[1]]<0)),sort(prd[,3]),col="red", lty=2)
}
}
#No Change Graphs
NoChange<-subset(AllDataCortAnalysis, CortDeltaBin==0)
for(i in 5:76){
pretest<-cor.test(NoChange$LogPreCort, NoChange[,i])
if(pretest$p.value<0.05){
nowData<-NoChange[which(!is.na(NoChange$LogPreCort)&!is.na(NoChange[,i])),]
quartz()
plot(scale(nowData$LogPreCort), scale(nowData[,i]), main=paste("r=",round(pretest$estimate, digits=2),"p=",round(pretest$p.value, digits=2),"n=",dim(nowData)[[1]]), xlab="LogPreCort", ylab=colnames(nowData)[i],col="darkgray",bg="lightgray", pch=22)
abline(lm(scale(nowData[,i])~scale(nowData$LogPreCort)))
prd<-predict(lm(scale(nowData[,i])~scale(nowData$LogPreCort)), interval="confidence")
lines(sort(scale(nowData$LogPreCort), decreasing=(pretest$estimate[[1]]<0)),sort(prd[,2]),col="red", lty=2)
lines(sort(scale(nowData$LogPreCort), decreasing=(pretest$estimate[[1]]<0)),sort(prd[,3]),col="red", lty=2)
}
posttest<-cor.test(NoChange$LogPostCort, NoChange[,i])
if(posttest$p.value<0.05){
nowData<-NoChange[which(!is.na(NoChange$LogPostCort)&!is.na(NoChange[,i])),]
quartz()
plot(scale(nowData$LogPostCort), scale(nowData[,i]), main=paste("r=",round(posttest$estimate, digits=2),"p=",round(posttest$p.value, digits=2),"n=",dim(nowData)[[1]]), xlab="LogPostCort", ylab=colnames(nowData)[i],col="darkgray",bg="lightgray", pch=22)
abline(lm(scale(nowData[,i])~scale(nowData$LogPostCort)))
prd<-predict(lm(scale(nowData[,i])~scale(nowData$LogPostCort)), interval="confidence")
lines(sort(scale(nowData$LogPostCort), decreasing=(posttest$estimate[[1]]<0)),sort(prd[,2]),col="red", lty=2)
lines(sort(scale(nowData$LogPostCort), decreasing=(posttest$estimate[[1]]<0)),sort(prd[,3]),col="red", lty=2)
}
deltatest<-cor.test(NoChange$CortDelta, NoChange[,i])
if(deltatest$p.value<0.05){
nowData<-NoChange[which(!is.na(NoChange$CortDelta)&!is.na(NoChange[,i])),]
quartz()
plot(scale(nowData$CortDelta), scale(nowData[,i]), main=paste("r=",round(deltatest$estimate, digits=2),"p=",round(deltatest$p.value, digits=2),"n=",dim(nowData)[[1]]), xlab="Change in Cort (Post-Pre)", ylab=colnames(nowData)[i],col="darkgray",bg="lightgray", pch=22)
abline(lm(scale(nowData[,i])~scale(nowData$CortDelta)))
prd<-predict(lm(scale(nowData[,i])~scale(nowData$CortDelta)), interval="confidence")
lines(sort(scale(nowData$CortDelta), decreasing=(deltatest$estimate[[1]]<0)),sort(prd[,2]),col="red", lty=2)
lines(sort(scale(nowData$CortDelta), decreasing=(deltatest$estimate[[1]]<0)),sort(prd[,3]),col="red", lty=2)
}
}
#Violins for Different Measures Based on ChangeInCort Bins
for (i in 5:76){
thisfit<-Anova(lm(AllDataCortAnalysis[,i]~AllDataCortAnalysis$CortDeltaBin))
if(thisfit[1,4]<0.05){
quartz()
print(ggplot(AllDataCortAnalysis, aes(factor(CortDeltaBin),AllDataCortAnalysis[,i]))+geom_violin(trim=F, aes(fill=factor(CortDeltaBin)))+scale_fill_manual(values=c("darkblue","lightgray","darkgoldenrod1"))+geom_boxplot(width=0.1,fill="grey50")+xlab("CortDeltaBin")+ylab(colnames(AllDataCortAnalysis)[i])+ggtitle(paste("p=",round(thisfit[1,4], digits=2))))
}
}
###Follow-up Scatters to the Violins
library(stargazer)
#AngryEnd Angry
nowData<-AllDataCortAnalysis[which(!is.na(AllDataCortAnalysis$CortDelta)&!is.na(AllDataCortAnalysis$PercAngryEnd25_Angry)),]
fitblue<-lm(scale(nowData[which(nowData$CortDeltaBin<0),]$PercAngryEnd25_Angry)~scale(nowData[which(nowData$CortDeltaBin<0),]$CortDelta))
bluecor<-cor.test(nowData[which(nowData$CortDeltaBin<0),]$CortDelta, nowData[which(nowData$CortDeltaBin<0),]$PercAngryEnd25_Angry)
fitgray<-lm(scale(nowData[which(nowData$CortDeltaBin==0),]$PercAngryEnd25_Angry)~scale(nowData[which(nowData$CortDeltaBin==0),]$CortDelta))
graycor<-cor.test(nowData[which(nowData$CortDeltaBin==0),]$CortDelta, nowData[which(nowData$CortDeltaBin==0),]$PercAngryEnd25_Angry)
fitgold<-lm(scale(nowData[which(nowData$CortDeltaBin>0),]$PercAngryEnd25_Angry)~scale(nowData[which(nowData$CortDeltaBin>0),]$CortDelta))
goldcor<-cor.test(nowData[which(nowData$CortDeltaBin>0),]$CortDelta, nowData[which(nowData$CortDeltaBin>0),]$PercAngryEnd25_Angry)
quartz()
plot(scale(nowData[which(nowData$CortDeltaBin<0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBin<0),]$PercAngryEnd25_Angry), xlab="Change in Cort (Post-Pre)", ylab="Angry End - Angry", col="darkblue", bg="blue",
pch=25, main="CortBins")
abline(fitblue,col="blue")
points(scale(nowData[which(nowData$CortDeltaBin==0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBin==0),]$PercAngryEnd25_Angry), xlab="Change in Cort (Post-Pre)", ylab="Angry End - Angry", col="darkgray", bg="lightgray",
pch=22)
abline(fitgray, col="gray")
points(scale(nowData[which(nowData$CortDeltaBin>0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBi>0),]$PercAngryEnd25_Angry), xlab="Change in Cort (Post-Pre)", ylab="Angry End - Angry", col="darkgoldenrod4", bg="darkgoldenrod1",
pch=24)
abline(fitgold, col="darkgoldenrod3")
regtable<-matrix(nrow=3, ncol=3)
colnames(regtable)<-c("n","r","p")
row.names(regtable)<-c("Negative Change (blue)","NoChange (gray)","Positive Change (gold)")
regtable[1,1]<-dim(nowData[which(nowData$CortDeltaBin<0),])[[1]]
regtable[2,1]<-dim(nowData[which(nowData$CortDeltaBin==0),])[[1]]
regtable[3,1]<-dim(nowData[which(nowData$CortDeltaBin>0),])[[1]]
regtable[1,2]<-round(bluecor$estimate, digits=2)
regtable[2,2]<-round(graycor$estimate, digits=2)
regtable[3,2]<-round(goldcor$estimate, digits=2)
regtable[1,3]<-round(bluecor$p.value, digits=2)
regtable[2,3]<-round(graycor$p.value, digits=2)
regtable[3,3]<-round(goldcor$p.value, digits=2)
stargazer(regtable,title="Angry End - Angry",out="AngryEndAngry.html",summary=F)
#HappyChange Neutral
nowData<-AllDataCortAnalysis[which(!is.na(AllDataCortAnalysis$CortDelta)&!is.na(AllDataCortAnalysis$PercHappyChange25_Neutral)),]
fitblue<-lm(scale(nowData[which(nowData$CortDeltaBin<0),]$PercHappyChange25_Neutral)~scale(nowData[which(nowData$CortDeltaBin<0),]$CortDelta))
bluecor<-cor.test(nowData[which(nowData$CortDeltaBin<0),]$CortDelta, nowData[which(nowData$CortDeltaBin<0),]$PercHappyChange25_Neutral)
fitgray<-lm(scale(nowData[which(nowData$CortDeltaBin==0),]$PercHappyChange25_Neutral)~scale(nowData[which(nowData$CortDeltaBin==0),]$CortDelta))
graycor<-cor.test(nowData[which(nowData$CortDeltaBin==0),]$CortDelta, nowData[which(nowData$CortDeltaBin==0),]$PercHappyChange25_Neutral)
fitgold<-lm(scale(nowData[which(nowData$CortDeltaBin>0),]$PercHappyChange25_Neutral)~scale(nowData[which(nowData$CortDeltaBin>0),]$CortDelta))
goldcor<-cor.test(nowData[which(nowData$CortDeltaBin>0),]$CortDelta, nowData[which(nowData$CortDeltaBin>0),]$PercHappyChange25_Neutral)
quartz()
plot(scale(nowData[which(nowData$CortDeltaBin<0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBin<0),]$PercHappyChange25_Neutral), xlab="Change in Cort (Post-Pre)", ylab="Change in Neutral on Happy Trials", col="darkblue", bg="blue",
pch=25, main="CortBins")
abline(fitblue,col="blue")
points(scale(nowData[which(nowData$CortDeltaBin==0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBin==0),]$PercHappyChange25_Neutral), xlab="Change in Cort (Post-Pre)", ylab="Change in Neutral on Happy Trials", col="darkgray", bg="lightgray",
pch=22)
abline(fitgray, col="gray")
points(scale(nowData[which(nowData$CortDeltaBin>0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBi>0),]$PercHappyChange25_Neutral), xlab="Change in Cort (Post-Pre)", ylab="Change in Neutral on Happy Trials", col="darkgoldenrod4", bg="darkgoldenrod1",
pch=24)
abline(fitgold, col="darkgoldenrod3")
regtable<-matrix(nrow=3, ncol=3)
colnames(regtable)<-c("n","r","p")
row.names(regtable)<-c("Negative Change (blue)","NoChange (gray)","Positive Change (gold)")
regtable[1,1]<-dim(nowData[which(nowData$CortDeltaBin<0),])[[1]]
regtable[2,1]<-dim(nowData[which(nowData$CortDeltaBin==0),])[[1]]
regtable[3,1]<-dim(nowData[which(nowData$CortDeltaBin>0),])[[1]]
regtable[1,2]<-round(bluecor$estimate, digits=2)
regtable[2,2]<-round(graycor$estimate, digits=2)
regtable[3,2]<-round(goldcor$estimate, digits=2)
regtable[1,3]<-round(bluecor$p.value, digits=2)
regtable[2,3]<-round(graycor$p.value, digits=2)
regtable[3,3]<-round(goldcor$p.value, digits=2)
stargazer(regtable,title="Change in Neutral on Happy Trials",out="ChangeNeutralOnHappy.html",summary=F)
#HappyEnd Neutral
nowData<-AllDataCortAnalysis[which(!is.na(AllDataCortAnalysis$CortDelta)&!is.na(AllDataCortAnalysis$PercHappyEnd25_Neutral)),]
fitblue<-lm(scale(nowData[which(nowData$CortDeltaBin<0),]$PercHappyEnd25_Neutral)~scale(nowData[which(nowData$CortDeltaBin<0),]$CortDelta))
bluecor<-cor.test(nowData[which(nowData$CortDeltaBin<0),]$CortDelta, nowData[which(nowData$CortDeltaBin<0),]$PercHappyEnd25_Neutral)
fitgray<-lm(scale(nowData[which(nowData$CortDeltaBin==0),]$PercHappyEnd25_Neutral)~scale(nowData[which(nowData$CortDeltaBin==0),]$CortDelta))
graycor<-cor.test(nowData[which(nowData$CortDeltaBin==0),]$CortDelta, nowData[which(nowData$CortDeltaBin==0),]$PercHappyEnd25_Neutral)
fitgold<-lm(scale(nowData[which(nowData$CortDeltaBin>0),]$PercHappyEnd25_Neutral)~scale(nowData[which(nowData$CortDeltaBin>0),]$CortDelta))
goldcor<-cor.test(nowData[which(nowData$CortDeltaBin>0),]$CortDelta, nowData[which(nowData$CortDeltaBin>0),]$PercHappyEnd25_Neutral)
quartz()
plot(scale(nowData[which(nowData$CortDeltaBin<0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBin<0),]$PercHappyEnd25_Neutral), xlab="Change in Cort (Post-Pre)", ylab="Happy - Neutral End", col="darkblue", bg="blue",
pch=25, main="CortBins")
abline(fitblue,col="blue")
points(scale(nowData[which(nowData$CortDeltaBin==0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBin==0),]$PercHappyEnd25_Neutral), xlab="Change in Cort (Post-Pre)", ylab="Happy - Neutral End", col="darkgray", bg="lightgray",
pch=22)
abline(fitgray, col="gray")
points(scale(nowData[which(nowData$CortDeltaBin>0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBi>0),]$PercHappyEnd25_Neutral), xlab="Change in Cort (Post-Pre)", ylab="Happy - Neutral End", col="darkgoldenrod4", bg="darkgoldenrod1",
pch=24)
abline(fitgold, col="darkgoldenrod3")
regtable<-matrix(nrow=3, ncol=3)
colnames(regtable)<-c("n","r","p")
row.names(regtable)<-c("Negative Change (blue)","NoChange (gray)","Positive Change (gold)")
regtable[1,1]<-dim(nowData[which(nowData$CortDeltaBin<0),])[[1]]
regtable[2,1]<-dim(nowData[which(nowData$CortDeltaBin==0),])[[1]]
regtable[3,1]<-dim(nowData[which(nowData$CortDeltaBin>0),])[[1]]
regtable[1,2]<-round(bluecor$estimate, digits=2)
regtable[2,2]<-round(graycor$estimate, digits=2)
regtable[3,2]<-round(goldcor$estimate, digits=2)
regtable[1,3]<-round(bluecor$p.value, digits=2)
regtable[2,3]<-round(graycor$p.value, digits=2)
regtable[3,3]<-round(goldcor$p.value, digits=2)
stargazer(regtable,title="Happy - Neutral End",out="HappyNeutralEnd.html",summary=F)
#HappyEnd NonFace
nowData<-AllDataCortAnalysis[which(!is.na(AllDataCortAnalysis$CortDelta)&!is.na(AllDataCortAnalysis$PercHappyEnd25_NonFace)),]
fitblue<-lm(scale(nowData[which(nowData$CortDeltaBin<0),]$PercHappyEnd25_NonFace)~scale(nowData[which(nowData$CortDeltaBin<0),]$CortDelta))
bluecor<-cor.test(nowData[which(nowData$CortDeltaBin<0),]$CortDelta, nowData[which(nowData$CortDeltaBin<0),]$PercHappyEnd25_NonFace)
fitgray<-lm(scale(nowData[which(nowData$CortDeltaBin==0),]$PercHappyEnd25_NonFace)~scale(nowData[which(nowData$CortDeltaBin==0),]$CortDelta))
graycor<-cor.test(nowData[which(nowData$CortDeltaBin==0),]$CortDelta, nowData[which(nowData$CortDeltaBin==0),]$PercHappyEnd25_NonFace)
fitgold<-lm(scale(nowData[which(nowData$CortDeltaBin>0),]$PercHappyEnd25_NonFace)~scale(nowData[which(nowData$CortDeltaBin>0),]$CortDelta))
goldcor<-cor.test(nowData[which(nowData$CortDeltaBin>0),]$CortDelta, nowData[which(nowData$CortDeltaBin>0),]$PercHappyEnd25_NonFace)
quartz()
plot(scale(nowData[which(nowData$CortDeltaBin<0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBin<0),]$PercHappyEnd25_NonFace), xlab="Change in Cort (Post-Pre)", ylab="Happy - NonFace End", col="darkblue", bg="blue",
pch=25, main="CortBins")
abline(fitblue,col="blue")
points(scale(nowData[which(nowData$CortDeltaBin==0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBin==0),]$PercHappyEnd25_NonFace), xlab="Change in Cort (Post-Pre)", ylab="Happy - NonFace End", col="darkgray", bg="lightgray",
pch=22)
abline(fitgray, col="gray")
points(scale(nowData[which(nowData$CortDeltaBin>0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBi>0),]$PercHappyEnd25_NonFace), xlab="Change in Cort (Post-Pre)", ylab="Happy - NonFace End", col="darkgoldenrod4", bg="darkgoldenrod1",
pch=24)
abline(fitgold, col="darkgoldenrod3")
regtable<-matrix(nrow=3, ncol=3)
colnames(regtable)<-c("n","r","p")
row.names(regtable)<-c("Negative Change (blue)","NoChange (gray)","Positive Change (gold)")
regtable[1,1]<-dim(nowData[which(nowData$CortDeltaBin<0),])[[1]]
regtable[2,1]<-dim(nowData[which(nowData$CortDeltaBin==0),])[[1]]
regtable[3,1]<-dim(nowData[which(nowData$CortDeltaBin>0),])[[1]]
regtable[1,2]<-round(bluecor$estimate, digits=2)
regtable[2,2]<-round(graycor$estimate, digits=2)
regtable[3,2]<-round(goldcor$estimate, digits=2)
regtable[1,3]<-round(bluecor$p.value, digits=2)
regtable[2,3]<-round(graycor$p.value, digits=2)
regtable[3,3]<-round(goldcor$p.value, digits=2)
stargazer(regtable,title="Happy - NonFace End",out="HappyNonFaceEnd.html",summary=F)
###tables about cortbins
for(i in c(2,80:84)){
hereitis<-table(AllDataCortAnalysis[,i],AllDataCortAnalysis$CortDeltaBin)
stargazer(as.data.frame.matrix(hereitis), title=colnames(AllDataCortAnalysis)[i],out=paste(colnames(AllDataCortAnalysis)[i],".html",sep=""), summary=F)
}
|
/Cort Follow-Up - ChangeInCort.R
|
permissive
|
abbiepopa/DPTB_Cortisol
|
R
| false
| false
| 22,090
|
r
|
#set working directory and important list of relevant participants
setwd("/Users/abbiepopa/Documents/Lab/DPTB/Cortisol Analysis/cortdata from Elliot/Working")
Participants<-read.csv("PartList_DPTB.csv", na.strings=".")
#import reaction time data for TD participants
RTTD<-read.csv("DPTB_RT_9-17-14_TDKoralyOut.csv",na.strings="NaN")
RTTD<-RTTD[,c(1,9,10)]
#import reaction time data for participants with 22q
RT22q<-read.csv("DPTB_RT_9-17-14_22qKoralyOut.csv", na.strings="NaN")
RT22q<-RT22q[,c(1,9,10)]
RT<-rbind(RTTD,RT22q)
#import cortisol data
Cort<-read.csv("CORT-Data-EAB_July2014.csv")
Cort<-Cort[,c(1, 3:6)]
colnames(Cort)[1]<-"CABIL_ID"
#import spence and ABAS data
SpenceABAS<-read.csv("Clustering_Database_8-8-14.csv", na.strings="-9999")
#import overall gaze data
EyeGazeOverall<-read.csv("AllData.csv", na.strings="NA")
EyeGazeOverall<-EyeGazeOverall[,2:11]
colnames(EyeGazeOverall)[1]<-"CABIL_ID"
#import time course gaze data
EyeGazeTimeCourse<-read.csv("AllDataTC25no676.csv", na.strings="NA")
EyeGazeTimeCourse<-EyeGazeTimeCourse[,2:24]
colnames(EyeGazeTimeCourse)[1]<-"CABIL_ID"
#import overall pupilometry for kids with 22q
PupilOverall22q<-read.csv("PupilPilot_6-5-14_22q.csv", na.strings=".")
PupilOverall22q<-PupilOverall22q[,c(1:7, 9:13)]
colnames(PupilOverall22q)[1]<-"CABIL_ID"
#import overall pupilometry to kids who are TD
PupilOverallTD<-read.csv("PupilPilot_6-5-14_TD.csv", na.strings=".")
PupilOverallTD<-PupilOverallTD[,c(1:7,9:13)]
colnames(PupilOverallTD)[1]<-"CABIL_ID"
#merge Dx groups
PupilOverall<-rbind(PupilOverall22q, PupilOverallTD)
#import time course pupilometry data for kids who are TD
PupilChangeTD<-read.csv("PupilChange_6-5-14_TD.csv",na.strings=".")
PupilChangeTD<-PupilChangeTD[,1:7]
colnames(PupilChangeTD)[1]<-"CABIL_ID"
#import time course pupilometry data for kids with 22q
PupilChange22q<-read.csv("PupilChange_6-5-14_22q.csv",na.strings=".")
PupilChange22q<-PupilChange22q[,1:7]
colnames(PupilChange22q)[1]<-"CABIL_ID"
#merge TC pupil data
PupilChange<-rbind(PupilChangeTD,PupilChange22q)
#merge data set
AllDataCortAnalysis<-merge(Participants, Cort, all.x=T)
AllDataCortAnalysis<-merge(AllDataCortAnalysis, SpenceABAS, all.x=T)
AllDataCortAnalysis<-merge(AllDataCortAnalysis, RT, all.x=T)
AllDataCortAnalysis<-merge(AllDataCortAnalysis, EyeGazeOverall, all.x=T)
AllDataCortAnalysis<-merge(AllDataCortAnalysis, EyeGazeTimeCourse, all.x=T)
AllDataCortAnalysis<-merge(AllDataCortAnalysis, PupilOverall, all.x=T)
AllDataCortAnalysis<-merge(AllDataCortAnalysis, PupilChange, all.x=T)
#calculate change in cort, and change in log cort
AllDataCortAnalysis$CortDelta<-AllDataCortAnalysis$PostCORT-AllDataCortAnalysis$PreCORT
AllDataCortAnalysis$LogCortDelta<-log10(AllDataCortAnalysis$CortDelta+1)
AllDataCortAnalysis$CortLogDelta<-AllDataCortAnalysis$LogPostCort-AllDataCortAnalysis$LogPreCort
###clusters###
ClusAll<-read.csv("AllDataClus.csv")
Clus<-ClusAll[,c(2,12)]
colnames(Clus)[1]<-"CABIL_ID"
AllDataCortAnalysis<-merge(AllDataCortAnalysis, Clus, all.x=T)
### pos, neg, or no change in cort
BinAssigner<-function(x){
if(is.na(x)){
thebin<-NA
}
else if(x<(-0.10900)){
thebin<-(-1)
} else if (x>(0.10430)){
thebin<-(1)
} else{
thebin<-(0)
}
return(thebin)
}
for(i in 1:dim(AllDataCortAnalysis)[[1]]){
AllDataCortAnalysis[i,c("CortDeltaBin")]<-BinAssigner(AllDataCortAnalysis[i,c("CortDelta")])
}
#Negative Only Graphs
NegChange<-subset(AllDataCortAnalysis, CortDeltaBin==-1)
for(i in 5:76){
pretest<-cor.test(NegChange$LogPreCort, NegChange[,i])
if(pretest$p.value<0.05){
nowData<-NegChange[which(!is.na(NegChange$LogPreCort)&!is.na(NegChange[,i])),]
quartz()
plot(scale(nowData$LogPreCort), scale(nowData[,i]), main=paste("r=",round(pretest$estimate, digits=2),"p=",round(pretest$p.value, digits=2),"n=",dim(nowData)[[1]]), xlab="LogPreCort", ylab=colnames(nowData)[i],col="darkblue",bg="blue", pch=25)
abline(lm(scale(nowData[,i])~scale(nowData$LogPreCort)))
prd<-predict(lm(scale(nowData[,i])~scale(nowData$LogPreCort)), interval="confidence")
lines(sort(scale(nowData$LogPreCort), decreasing=(pretest$estimate[[1]]<0)),sort(prd[,2]),col="red", lty=2)
lines(sort(scale(nowData$LogPreCort), decreasing=(pretest$estimate[[1]]<0)),sort(prd[,3]),col="red", lty=2)
}
posttest<-cor.test(NegChange$LogPostCort, NegChange[,i])
if(posttest$p.value<0.05){
nowData<-NegChange[which(!is.na(NegChange$LogPostCort)&!is.na(NegChange[,i])),]
quartz()
plot(scale(nowData$LogPostCort), scale(nowData[,i]), main=paste("r=",round(posttest$estimate, digits=2),"p=",round(posttest$p.value, digits=2),"n=",dim(nowData)[[1]]), xlab="LogPostCort", ylab=colnames(nowData)[i],col="darkblue",bg="blue", pch=25)
abline(lm(scale(nowData[,i])~scale(nowData$LogPostCort)))
prd<-predict(lm(scale(nowData[,i])~scale(nowData$LogPostCort)), interval="confidence")
lines(sort(scale(nowData$LogPostCort), decreasing=(posttest$estimate[[1]]<0)),sort(prd[,2]),col="red", lty=2)
lines(sort(scale(nowData$LogPostCort), decreasing=(posttest$estimate[[1]]<0)),sort(prd[,3]),col="red", lty=2)
}
deltatest<-cor.test(NegChange$CortDelta, NegChange[,i])
if(deltatest$p.value<0.05){
nowData<-NegChange[which(!is.na(NegChange$CortDelta)&!is.na(NegChange[,i])),]
quartz()
plot(scale(nowData$CortDelta), scale(nowData[,i]), main=paste("r=",round(deltatest$estimate, digits=2),"p=",round(deltatest$p.value, digits=2),"n=",dim(nowData)[[1]]), xlab="Change in Cort (Post-Pre)", ylab=colnames(nowData)[i],col="darkblue",bg="blue", pch=25)
abline(lm(scale(nowData[,i])~scale(nowData$CortDelta)))
prd<-predict(lm(scale(nowData[,i])~scale(nowData$CortDelta)), interval="confidence")
lines(sort(scale(nowData$CortDelta), decreasing=(deltatest$estimate[[1]]<0)),sort(prd[,2]),col="red", lty=2)
lines(sort(scale(nowData$CortDelta), decreasing=(deltatest$estimate[[1]]<0)),sort(prd[,3]),col="red", lty=2)
}
}
#Negative Only Graphs
PosChange<-subset(AllDataCortAnalysis, CortDeltaBin==1)
for(i in 5:76){
pretest<-cor.test(PosChange$LogPreCort, PosChange[,i])
if(pretest$p.value<0.05){
nowData<-PosChange[which(!is.na(PosChange$LogPreCort)&!is.na(PosChange[,i])),]
quartz()
plot(scale(nowData$LogPreCort), scale(nowData[,i]), main=paste("r=",round(pretest$estimate, digits=2),"p=",round(pretest$p.value, digits=2),"n=",dim(nowData)[[1]]), xlab="LogPreCort", ylab=colnames(nowData)[i],col="darkgoldenrod4",bg="darkgoldenrod1", pch=24)
abline(lm(scale(nowData[,i])~scale(nowData$LogPreCort)))
prd<-predict(lm(scale(nowData[,i])~scale(nowData$LogPreCort)), interval="confidence")
lines(sort(scale(nowData$LogPreCort), decreasing=(pretest$estimate[[1]]<0)),sort(prd[,2]),col="red", lty=2)
lines(sort(scale(nowData$LogPreCort), decreasing=(pretest$estimate[[1]]<0)),sort(prd[,3]),col="red", lty=2)
}
posttest<-cor.test(PosChange$LogPostCort, PosChange[,i])
if(posttest$p.value<0.05){
nowData<-PosChange[which(!is.na(PosChange$LogPostCort)&!is.na(PosChange[,i])),]
quartz()
plot(scale(nowData$LogPostCort), scale(nowData[,i]), main=paste("r=",round(posttest$estimate, digits=2),"p=",round(posttest$p.value, digits=2),"n=",dim(nowData)[[1]]), xlab="LogPostCort", ylab=colnames(nowData)[i],col="darkgoldenrod4",bg="darkgoldenrod1", pch=24)
abline(lm(scale(nowData[,i])~scale(nowData$LogPostCort)))
prd<-predict(lm(scale(nowData[,i])~scale(nowData$LogPostCort)), interval="confidence")
lines(sort(scale(nowData$LogPostCort), decreasing=(posttest$estimate[[1]]<0)),sort(prd[,2]),col="red", lty=2)
lines(sort(scale(nowData$LogPostCort), decreasing=(posttest$estimate[[1]]<0)),sort(prd[,3]),col="red", lty=2)
}
deltatest<-cor.test(PosChange$CortDelta, PosChange[,i])
if(deltatest$p.value<0.05){
nowData<-PosChange[which(!is.na(PosChange$CortDelta)&!is.na(PosChange[,i])),]
quartz()
plot(scale(nowData$CortDelta), scale(nowData[,i]), main=paste("r=",round(deltatest$estimate, digits=2),"p=",round(deltatest$p.value, digits=2),"n=",dim(nowData)[[1]]), xlab="Change in Cort (Post-Pre)", ylab=colnames(nowData)[i],col="darkgoldenrod4",bg="darkgoldenrod1", pch=24)
abline(lm(scale(nowData[,i])~scale(nowData$CortDelta)))
prd<-predict(lm(scale(nowData[,i])~scale(nowData$CortDelta)), interval="confidence")
lines(sort(scale(nowData$CortDelta), decreasing=(deltatest$estimate[[1]]<0)),sort(prd[,2]),col="red", lty=2)
lines(sort(scale(nowData$CortDelta), decreasing=(deltatest$estimate[[1]]<0)),sort(prd[,3]),col="red", lty=2)
}
}
#No Change Graphs
NoChange<-subset(AllDataCortAnalysis, CortDeltaBin==0)
for(i in 5:76){
pretest<-cor.test(NoChange$LogPreCort, NoChange[,i])
if(pretest$p.value<0.05){
nowData<-NoChange[which(!is.na(NoChange$LogPreCort)&!is.na(NoChange[,i])),]
quartz()
plot(scale(nowData$LogPreCort), scale(nowData[,i]), main=paste("r=",round(pretest$estimate, digits=2),"p=",round(pretest$p.value, digits=2),"n=",dim(nowData)[[1]]), xlab="LogPreCort", ylab=colnames(nowData)[i],col="darkgray",bg="lightgray", pch=22)
abline(lm(scale(nowData[,i])~scale(nowData$LogPreCort)))
prd<-predict(lm(scale(nowData[,i])~scale(nowData$LogPreCort)), interval="confidence")
lines(sort(scale(nowData$LogPreCort), decreasing=(pretest$estimate[[1]]<0)),sort(prd[,2]),col="red", lty=2)
lines(sort(scale(nowData$LogPreCort), decreasing=(pretest$estimate[[1]]<0)),sort(prd[,3]),col="red", lty=2)
}
posttest<-cor.test(NoChange$LogPostCort, NoChange[,i])
if(posttest$p.value<0.05){
nowData<-NoChange[which(!is.na(NoChange$LogPostCort)&!is.na(NoChange[,i])),]
quartz()
plot(scale(nowData$LogPostCort), scale(nowData[,i]), main=paste("r=",round(posttest$estimate, digits=2),"p=",round(posttest$p.value, digits=2),"n=",dim(nowData)[[1]]), xlab="LogPostCort", ylab=colnames(nowData)[i],col="darkgray",bg="lightgray", pch=22)
abline(lm(scale(nowData[,i])~scale(nowData$LogPostCort)))
prd<-predict(lm(scale(nowData[,i])~scale(nowData$LogPostCort)), interval="confidence")
lines(sort(scale(nowData$LogPostCort), decreasing=(posttest$estimate[[1]]<0)),sort(prd[,2]),col="red", lty=2)
lines(sort(scale(nowData$LogPostCort), decreasing=(posttest$estimate[[1]]<0)),sort(prd[,3]),col="red", lty=2)
}
deltatest<-cor.test(NoChange$CortDelta, NoChange[,i])
if(deltatest$p.value<0.05){
nowData<-NoChange[which(!is.na(NoChange$CortDelta)&!is.na(NoChange[,i])),]
quartz()
plot(scale(nowData$CortDelta), scale(nowData[,i]), main=paste("r=",round(deltatest$estimate, digits=2),"p=",round(deltatest$p.value, digits=2),"n=",dim(nowData)[[1]]), xlab="Change in Cort (Post-Pre)", ylab=colnames(nowData)[i],col="darkgray",bg="lightgray", pch=22)
abline(lm(scale(nowData[,i])~scale(nowData$CortDelta)))
prd<-predict(lm(scale(nowData[,i])~scale(nowData$CortDelta)), interval="confidence")
lines(sort(scale(nowData$CortDelta), decreasing=(deltatest$estimate[[1]]<0)),sort(prd[,2]),col="red", lty=2)
lines(sort(scale(nowData$CortDelta), decreasing=(deltatest$estimate[[1]]<0)),sort(prd[,3]),col="red", lty=2)
}
}
#Violins for Different Measures Based on ChangeInCort Bins
for (i in 5:76){
thisfit<-Anova(lm(AllDataCortAnalysis[,i]~AllDataCortAnalysis$CortDeltaBin))
if(thisfit[1,4]<0.05){
quartz()
print(ggplot(AllDataCortAnalysis, aes(factor(CortDeltaBin),AllDataCortAnalysis[,i]))+geom_violin(trim=F, aes(fill=factor(CortDeltaBin)))+scale_fill_manual(values=c("darkblue","lightgray","darkgoldenrod1"))+geom_boxplot(width=0.1,fill="grey50")+xlab("CortDeltaBin")+ylab(colnames(AllDataCortAnalysis)[i])+ggtitle(paste("p=",round(thisfit[1,4], digits=2))))
}
}
###Follow-up Scatters to the Violins
library(stargazer)
#AngryEnd Angry
nowData<-AllDataCortAnalysis[which(!is.na(AllDataCortAnalysis$CortDelta)&!is.na(AllDataCortAnalysis$PercAngryEnd25_Angry)),]
fitblue<-lm(scale(nowData[which(nowData$CortDeltaBin<0),]$PercAngryEnd25_Angry)~scale(nowData[which(nowData$CortDeltaBin<0),]$CortDelta))
bluecor<-cor.test(nowData[which(nowData$CortDeltaBin<0),]$CortDelta, nowData[which(nowData$CortDeltaBin<0),]$PercAngryEnd25_Angry)
fitgray<-lm(scale(nowData[which(nowData$CortDeltaBin==0),]$PercAngryEnd25_Angry)~scale(nowData[which(nowData$CortDeltaBin==0),]$CortDelta))
graycor<-cor.test(nowData[which(nowData$CortDeltaBin==0),]$CortDelta, nowData[which(nowData$CortDeltaBin==0),]$PercAngryEnd25_Angry)
fitgold<-lm(scale(nowData[which(nowData$CortDeltaBin>0),]$PercAngryEnd25_Angry)~scale(nowData[which(nowData$CortDeltaBin>0),]$CortDelta))
goldcor<-cor.test(nowData[which(nowData$CortDeltaBin>0),]$CortDelta, nowData[which(nowData$CortDeltaBin>0),]$PercAngryEnd25_Angry)
quartz()
plot(scale(nowData[which(nowData$CortDeltaBin<0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBin<0),]$PercAngryEnd25_Angry), xlab="Change in Cort (Post-Pre)", ylab="Angry End - Angry", col="darkblue", bg="blue",
pch=25, main="CortBins")
abline(fitblue,col="blue")
points(scale(nowData[which(nowData$CortDeltaBin==0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBin==0),]$PercAngryEnd25_Angry), xlab="Change in Cort (Post-Pre)", ylab="Angry End - Angry", col="darkgray", bg="lightgray",
pch=22)
abline(fitgray, col="gray")
points(scale(nowData[which(nowData$CortDeltaBin>0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBi>0),]$PercAngryEnd25_Angry), xlab="Change in Cort (Post-Pre)", ylab="Angry End - Angry", col="darkgoldenrod4", bg="darkgoldenrod1",
pch=24)
abline(fitgold, col="darkgoldenrod3")
regtable<-matrix(nrow=3, ncol=3)
colnames(regtable)<-c("n","r","p")
row.names(regtable)<-c("Negative Change (blue)","NoChange (gray)","Positive Change (gold)")
regtable[1,1]<-dim(nowData[which(nowData$CortDeltaBin<0),])[[1]]
regtable[2,1]<-dim(nowData[which(nowData$CortDeltaBin==0),])[[1]]
regtable[3,1]<-dim(nowData[which(nowData$CortDeltaBin>0),])[[1]]
regtable[1,2]<-round(bluecor$estimate, digits=2)
regtable[2,2]<-round(graycor$estimate, digits=2)
regtable[3,2]<-round(goldcor$estimate, digits=2)
regtable[1,3]<-round(bluecor$p.value, digits=2)
regtable[2,3]<-round(graycor$p.value, digits=2)
regtable[3,3]<-round(goldcor$p.value, digits=2)
stargazer(regtable,title="Angry End - Angry",out="AngryEndAngry.html",summary=F)
#HappyChange Neutral
nowData<-AllDataCortAnalysis[which(!is.na(AllDataCortAnalysis$CortDelta)&!is.na(AllDataCortAnalysis$PercHappyChange25_Neutral)),]
fitblue<-lm(scale(nowData[which(nowData$CortDeltaBin<0),]$PercHappyChange25_Neutral)~scale(nowData[which(nowData$CortDeltaBin<0),]$CortDelta))
bluecor<-cor.test(nowData[which(nowData$CortDeltaBin<0),]$CortDelta, nowData[which(nowData$CortDeltaBin<0),]$PercHappyChange25_Neutral)
fitgray<-lm(scale(nowData[which(nowData$CortDeltaBin==0),]$PercHappyChange25_Neutral)~scale(nowData[which(nowData$CortDeltaBin==0),]$CortDelta))
graycor<-cor.test(nowData[which(nowData$CortDeltaBin==0),]$CortDelta, nowData[which(nowData$CortDeltaBin==0),]$PercHappyChange25_Neutral)
fitgold<-lm(scale(nowData[which(nowData$CortDeltaBin>0),]$PercHappyChange25_Neutral)~scale(nowData[which(nowData$CortDeltaBin>0),]$CortDelta))
goldcor<-cor.test(nowData[which(nowData$CortDeltaBin>0),]$CortDelta, nowData[which(nowData$CortDeltaBin>0),]$PercHappyChange25_Neutral)
quartz()
plot(scale(nowData[which(nowData$CortDeltaBin<0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBin<0),]$PercHappyChange25_Neutral), xlab="Change in Cort (Post-Pre)", ylab="Change in Neutral on Happy Trials", col="darkblue", bg="blue",
pch=25, main="CortBins")
abline(fitblue,col="blue")
points(scale(nowData[which(nowData$CortDeltaBin==0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBin==0),]$PercHappyChange25_Neutral), xlab="Change in Cort (Post-Pre)", ylab="Change in Neutral on Happy Trials", col="darkgray", bg="lightgray",
pch=22)
abline(fitgray, col="gray")
points(scale(nowData[which(nowData$CortDeltaBin>0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBi>0),]$PercHappyChange25_Neutral), xlab="Change in Cort (Post-Pre)", ylab="Change in Neutral on Happy Trials", col="darkgoldenrod4", bg="darkgoldenrod1",
pch=24)
abline(fitgold, col="darkgoldenrod3")
regtable<-matrix(nrow=3, ncol=3)
colnames(regtable)<-c("n","r","p")
row.names(regtable)<-c("Negative Change (blue)","NoChange (gray)","Positive Change (gold)")
regtable[1,1]<-dim(nowData[which(nowData$CortDeltaBin<0),])[[1]]
regtable[2,1]<-dim(nowData[which(nowData$CortDeltaBin==0),])[[1]]
regtable[3,1]<-dim(nowData[which(nowData$CortDeltaBin>0),])[[1]]
regtable[1,2]<-round(bluecor$estimate, digits=2)
regtable[2,2]<-round(graycor$estimate, digits=2)
regtable[3,2]<-round(goldcor$estimate, digits=2)
regtable[1,3]<-round(bluecor$p.value, digits=2)
regtable[2,3]<-round(graycor$p.value, digits=2)
regtable[3,3]<-round(goldcor$p.value, digits=2)
stargazer(regtable,title="Change in Neutral on Happy Trials",out="ChangeNeutralOnHappy.html",summary=F)
#HappyEnd Neutral
nowData<-AllDataCortAnalysis[which(!is.na(AllDataCortAnalysis$CortDelta)&!is.na(AllDataCortAnalysis$PercHappyEnd25_Neutral)),]
fitblue<-lm(scale(nowData[which(nowData$CortDeltaBin<0),]$PercHappyEnd25_Neutral)~scale(nowData[which(nowData$CortDeltaBin<0),]$CortDelta))
bluecor<-cor.test(nowData[which(nowData$CortDeltaBin<0),]$CortDelta, nowData[which(nowData$CortDeltaBin<0),]$PercHappyEnd25_Neutral)
fitgray<-lm(scale(nowData[which(nowData$CortDeltaBin==0),]$PercHappyEnd25_Neutral)~scale(nowData[which(nowData$CortDeltaBin==0),]$CortDelta))
graycor<-cor.test(nowData[which(nowData$CortDeltaBin==0),]$CortDelta, nowData[which(nowData$CortDeltaBin==0),]$PercHappyEnd25_Neutral)
fitgold<-lm(scale(nowData[which(nowData$CortDeltaBin>0),]$PercHappyEnd25_Neutral)~scale(nowData[which(nowData$CortDeltaBin>0),]$CortDelta))
goldcor<-cor.test(nowData[which(nowData$CortDeltaBin>0),]$CortDelta, nowData[which(nowData$CortDeltaBin>0),]$PercHappyEnd25_Neutral)
quartz()
plot(scale(nowData[which(nowData$CortDeltaBin<0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBin<0),]$PercHappyEnd25_Neutral), xlab="Change in Cort (Post-Pre)", ylab="Happy - Neutral End", col="darkblue", bg="blue",
pch=25, main="CortBins")
abline(fitblue,col="blue")
points(scale(nowData[which(nowData$CortDeltaBin==0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBin==0),]$PercHappyEnd25_Neutral), xlab="Change in Cort (Post-Pre)", ylab="Happy - Neutral End", col="darkgray", bg="lightgray",
pch=22)
abline(fitgray, col="gray")
points(scale(nowData[which(nowData$CortDeltaBin>0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBi>0),]$PercHappyEnd25_Neutral), xlab="Change in Cort (Post-Pre)", ylab="Happy - Neutral End", col="darkgoldenrod4", bg="darkgoldenrod1",
pch=24)
abline(fitgold, col="darkgoldenrod3")
regtable<-matrix(nrow=3, ncol=3)
colnames(regtable)<-c("n","r","p")
row.names(regtable)<-c("Negative Change (blue)","NoChange (gray)","Positive Change (gold)")
regtable[1,1]<-dim(nowData[which(nowData$CortDeltaBin<0),])[[1]]
regtable[2,1]<-dim(nowData[which(nowData$CortDeltaBin==0),])[[1]]
regtable[3,1]<-dim(nowData[which(nowData$CortDeltaBin>0),])[[1]]
regtable[1,2]<-round(bluecor$estimate, digits=2)
regtable[2,2]<-round(graycor$estimate, digits=2)
regtable[3,2]<-round(goldcor$estimate, digits=2)
regtable[1,3]<-round(bluecor$p.value, digits=2)
regtable[2,3]<-round(graycor$p.value, digits=2)
regtable[3,3]<-round(goldcor$p.value, digits=2)
stargazer(regtable,title="Happy - Neutral End",out="HappyNeutralEnd.html",summary=F)
#HappyEnd NonFace
nowData<-AllDataCortAnalysis[which(!is.na(AllDataCortAnalysis$CortDelta)&!is.na(AllDataCortAnalysis$PercHappyEnd25_NonFace)),]
fitblue<-lm(scale(nowData[which(nowData$CortDeltaBin<0),]$PercHappyEnd25_NonFace)~scale(nowData[which(nowData$CortDeltaBin<0),]$CortDelta))
bluecor<-cor.test(nowData[which(nowData$CortDeltaBin<0),]$CortDelta, nowData[which(nowData$CortDeltaBin<0),]$PercHappyEnd25_NonFace)
fitgray<-lm(scale(nowData[which(nowData$CortDeltaBin==0),]$PercHappyEnd25_NonFace)~scale(nowData[which(nowData$CortDeltaBin==0),]$CortDelta))
graycor<-cor.test(nowData[which(nowData$CortDeltaBin==0),]$CortDelta, nowData[which(nowData$CortDeltaBin==0),]$PercHappyEnd25_NonFace)
fitgold<-lm(scale(nowData[which(nowData$CortDeltaBin>0),]$PercHappyEnd25_NonFace)~scale(nowData[which(nowData$CortDeltaBin>0),]$CortDelta))
goldcor<-cor.test(nowData[which(nowData$CortDeltaBin>0),]$CortDelta, nowData[which(nowData$CortDeltaBin>0),]$PercHappyEnd25_NonFace)
quartz()
plot(scale(nowData[which(nowData$CortDeltaBin<0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBin<0),]$PercHappyEnd25_NonFace), xlab="Change in Cort (Post-Pre)", ylab="Happy - NonFace End", col="darkblue", bg="blue",
pch=25, main="CortBins")
abline(fitblue,col="blue")
points(scale(nowData[which(nowData$CortDeltaBin==0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBin==0),]$PercHappyEnd25_NonFace), xlab="Change in Cort (Post-Pre)", ylab="Happy - NonFace End", col="darkgray", bg="lightgray",
pch=22)
abline(fitgray, col="gray")
points(scale(nowData[which(nowData$CortDeltaBin>0),]$CortDelta), scale(nowData[which(nowData$CortDeltaBi>0),]$PercHappyEnd25_NonFace), xlab="Change in Cort (Post-Pre)", ylab="Happy - NonFace End", col="darkgoldenrod4", bg="darkgoldenrod1",
pch=24)
abline(fitgold, col="darkgoldenrod3")
regtable<-matrix(nrow=3, ncol=3)
colnames(regtable)<-c("n","r","p")
row.names(regtable)<-c("Negative Change (blue)","NoChange (gray)","Positive Change (gold)")
regtable[1,1]<-dim(nowData[which(nowData$CortDeltaBin<0),])[[1]]
regtable[2,1]<-dim(nowData[which(nowData$CortDeltaBin==0),])[[1]]
regtable[3,1]<-dim(nowData[which(nowData$CortDeltaBin>0),])[[1]]
regtable[1,2]<-round(bluecor$estimate, digits=2)
regtable[2,2]<-round(graycor$estimate, digits=2)
regtable[3,2]<-round(goldcor$estimate, digits=2)
regtable[1,3]<-round(bluecor$p.value, digits=2)
regtable[2,3]<-round(graycor$p.value, digits=2)
regtable[3,3]<-round(goldcor$p.value, digits=2)
stargazer(regtable,title="Happy - NonFace End",out="HappyNonFaceEnd.html",summary=F)
###tables about cortbins
for(i in c(2,80:84)){
hereitis<-table(AllDataCortAnalysis[,i],AllDataCortAnalysis$CortDeltaBin)
stargazer(as.data.frame.matrix(hereitis), title=colnames(AllDataCortAnalysis)[i],out=paste(colnames(AllDataCortAnalysis)[i],".html",sep=""), summary=F)
}
|
# HW2: Generate patterned matrix
#
# In this exercise, you will write functions to create matrices with indicated patterns.
# Hint: You can create a zero matrix with required dimension first, then alter the matrix.
#
# 1. Write a function `matrix_1(d)` to return a d dimensional square matrix with following pattern:
# 1 0 0 0 ... 0
# 1 2 0 0 ... 0
# 1 2 3 0 ... 0
# 1 2 3 4 ... 0
# . . . . ... .
# 1 2 3 4 ... d
# 2. Write a function `matrix_2(d)` to return a d dimensional square matrix with following pattern:
# 1 0 0 0 ... 0
# 2 3 0 0 ... 0
# 4 5 6 0 ... 0
# 7 8 9 10 ... 0
# . . . . ... .
# 3. Write a function `matrix_3(d)` to return a d dimensional square matrix with following pattern:
# 1 2 3 4 ... d
# 2d 2d-1 2d-2 2d-3 ... d+1
# 2d+1 2d+2 2d+3 2d+4 ... 3d
# . . . . ... .
# . . . . ... .
# Note: d is a positive integer
## Do not modify this line! ## Your code for 1.
matrix_1 <- function(d){
mat <- matrix(0, d, d)
for (i in 1:d){
mat[i:d,i] <- i
}
return(mat)
}
## Do not modify this line! ## Your code for 2.
recur <- function(x){
if (x==1)
return (1)
else
{
return(recur(x-1) + x)
}
}
matrix_2 <- function(d){
mat <- matrix(0, d, d)
vec <- NULL
for (i in 1:recur(d)){
vec[i] <- i
}
mat[upper.tri(mat, diag = T)] <- vec
return (t(mat))
}
## Do not modify this line! ## Your code for 3.
matrix_3 <- function(d){
mat <- matrix(0, d, d)
for (i in 1:d){
if(i==1){
mat[i, ] <- c(1:d)
}else if(i %% 2 == 0 ){
mat[i, ] <- c((i * d): ((i * d) - d + 1))
}else{
mat[i, ] <- c((((i-1) * d) +1): (i * d) )
}
}
return(mat)
}
|
/HW2/Patterned Matrix.R
|
no_license
|
lsjhome/GR5206
|
R
| false
| false
| 1,720
|
r
|
# HW2: Generate patterned matrix
#
# In this exercise, you will write functions to create matrices with indicated patterns.
# Hint: You can create a zero matrix with required dimension first, then alter the matrix.
#
# 1. Write a function `matrix_1(d)` to return a d dimensional square matrix with following pattern:
# 1 0 0 0 ... 0
# 1 2 0 0 ... 0
# 1 2 3 0 ... 0
# 1 2 3 4 ... 0
# . . . . ... .
# 1 2 3 4 ... d
# 2. Write a function `matrix_2(d)` to return a d dimensional square matrix with following pattern:
# 1 0 0 0 ... 0
# 2 3 0 0 ... 0
# 4 5 6 0 ... 0
# 7 8 9 10 ... 0
# . . . . ... .
# 3. Write a function `matrix_3(d)` to return a d dimensional square matrix with following pattern:
# 1 2 3 4 ... d
# 2d 2d-1 2d-2 2d-3 ... d+1
# 2d+1 2d+2 2d+3 2d+4 ... 3d
# . . . . ... .
# . . . . ... .
# Note: d is a positive integer
## Do not modify this line! ## Your code for 1.
matrix_1 <- function(d){
mat <- matrix(0, d, d)
for (i in 1:d){
mat[i:d,i] <- i
}
return(mat)
}
## Do not modify this line! ## Your code for 2.
recur <- function(x){
if (x==1)
return (1)
else
{
return(recur(x-1) + x)
}
}
matrix_2 <- function(d){
mat <- matrix(0, d, d)
vec <- NULL
for (i in 1:recur(d)){
vec[i] <- i
}
mat[upper.tri(mat, diag = T)] <- vec
return (t(mat))
}
## Do not modify this line! ## Your code for 3.
matrix_3 <- function(d){
mat <- matrix(0, d, d)
for (i in 1:d){
if(i==1){
mat[i, ] <- c(1:d)
}else if(i %% 2 == 0 ){
mat[i, ] <- c((i * d): ((i * d) - d + 1))
}else{
mat[i, ] <- c((((i-1) * d) +1): (i * d) )
}
}
return(mat)
}
|
require(xgboost)
require(methods)
library(data.table)
library(plyr)
library(Hmisc)
library(lattice)
require(gridExtra)
library(fastfurious)
library(parallel)
### FUNCS
cluster_by = function(predictor.train,predictor.test,num_bids = 8,verbose=T) {
require(Hmisc)
## clustering by quantity
if (verbose) {
print(describe(predictor.train))
print(describe(predictor.test))
}
data = as.vector(c(predictor.train,predictor.test))
q = as.numeric(quantile(data, probs = ((1:num_bids)/num_bids)))
## counting cluster card
num=rep(0,num_bids)
for (i in 1:num_bids)
if (i == 1) {
num[i] = sum(data<=q[i])
} else {
num[i] = sum(data<=q[i] & data>q[i-1])
}
if (verbose) print(describe(num))
## mapping quantity to cluster qty
qty2lev = data.frame(qty = sort(unique(data)) , lev = NA)
for (i in 1:nrow(qty2lev)) {
for (k in 1:length(q)) {
if (k == 1) {
if (qty2lev[i,]$qty <= q[1]) {
qty2lev[i,]$lev = 1
break
}
} else {
if (qty2lev[i,]$qty <= q[k] & qty2lev[i,]$qty > q[k-1] ) {
qty2lev[i,]$lev = k
break
}
}
}
}
## mapping qty_lev on data
if (verbose) cat(">> mapping qty_lev to data ... \n")
tr_qty_lev = rep(NA,length(predictor.train))
for (i in 1:length(predictor.train))
tr_qty_lev[i] = qty2lev[qty2lev$qty==predictor.train[i],]$lev
ts_qty_lev = rep(NA,length(predictor.test))
for (i in 1:length(predictor.test))
ts_qty_lev[i] = qty2lev[qty2lev$qty==predictor.test[i],]$lev
return( list(levels.train = tr_qty_lev , levels.test = ts_qty_lev , theresolds = q) )
}
################# FAST-FURIOUS
ff.setBasePath(path = '/Users/gino/kaggle/fast-furious/gitHub/fast-furious/')
ff.bindPath(type = 'data' , sub_path = 'dataset/caterpillar-tube-pricing/competition_data')
ff.bindPath(type = 'process' , sub_path = 'data_process')
ff.bindPath(type = 'elab' , sub_path = 'dataset/caterpillar-tube-pricing/elab')
ff.bindPath(type = 'docs' , sub_path = 'dataset/caterpillar-tube-pricing/docs')
ff.bindPath(type = 'submission' , sub_path = 'dataset/caterpillar-tube-pricing')
ff.bindPath(type = 'submission_old' , sub_path = 'dataset/caterpillar-tube-pricing/submission')
ff.bindPath(type = 'best_tune' , sub_path = 'dataset/caterpillar-tube-pricing/best_tune_1')
ff.bindPath(type = 'tmp' , sub_path = 'dataset/caterpillar-tube-pricing/tmp')
source(paste0( ff.getPath("process") , "/FeatureSelection_Lib.R"))
source(paste0( ff.getPath("process") , "/Regression_Lib.R"))
################# SETTINGS
DEBUG_MODE = T
################# MODELS
reg_models = c("rlm", "pls" , "ridge" , "enet" ,
"knn", "svmRadial", "treebag", "gbm" )
cluster_levs = 1:8 ##<<<<<<<---- :::::: <<<<<<<<<
################# DATA IN
sample_submission = as.data.frame( fread(paste(ff.getPath("data") ,
"sample_submission.csv" , sep='')))
## elab
train_enc = as.data.frame( fread(paste(ff.getPath("elab") ,
"train_enc.csv" , sep='')))
test_enc = as.data.frame( fread(paste(ff.getPath("elab") ,
"test_enc.csv" , sep='')))
train_enc_date = as.data.frame( fread(paste(ff.getPath("elab") ,
"train_enc_date.csv" , sep='')))
test_enc_date = as.data.frame( fread(paste(ff.getPath("elab") ,
"test_enc_date.csv" , sep='')))
## tech props
tube_base = as.data.frame( fread(paste(ff.getPath("elab") ,
"tube_base.csv" , sep='')))
bom_base = as.data.frame( fread(paste(ff.getPath("elab") ,
"bom_base.csv" , sep='')))
spec_enc = as.data.frame( fread(paste(ff.getPath("elab") ,
"spec_enc.csv" , sep='')))
####>>>>>>>>>> PROCESSING
## build technical feature set
tube = cbind(tube_base,bom_base)
tube = cbind(tube,spec_enc)
dim(tube) ## 180 (encoded) technical features
# [1] 21198 180
## putting quote_date in data set
head_train_set = train_enc_date
head_test_set = test_enc_date
## build train_set and test_set
train_set = merge(x = head_train_set , y = tube , by = 'tube_assembly_id' , all = F)
test_set = merge(x = head_test_set , y = tube , by = 'tube_assembly_id' , all = F)
######### feature scaling
cat(">>> Feature scaling ... \n")
feature2scal = c(
"quote_date" , "annual_usage" , "min_order_quantity" ,
"diameter" , "wall" , "length" , "num_bends" , "bend_radius" ,
"num_boss" , "num_bracket" ,
"CP_001_weight" , "CP_002_weight" , "CP_003_weight" , "CP_004_weight" , "CP_005_weight" , "CP_006_weight" ,
"CP_007_weight" , "CP_008_weight" , "CP_009_weight" , "CP_010_weight" , "CP_011_weight" , "CP_012_weight" ,
"CP_014_weight" , "CP_015_weight" , "CP_016_weight" , "CP_017_weight" , "CP_018_weight" , "CP_019_weight" ,
"CP_020_weight" , "CP_021_weight" , "CP_022_weight" , "CP_023_weight" , "CP_024_weight" , "CP_025_weight" ,
"CP_026_weight" , "CP_027_weight" , "CP_028_weight" , "CP_029_weight" , "OTHER_weight"
)
trans.scal <- preProcess(rbind(train_set[,feature2scal],test_set[,feature2scal]),
method = c("center", "scale") )
print(trans.scal)
train_set[,feature2scal] = predict(trans.scal,train_set[,feature2scal])
test_set[,feature2scal] = predict(trans.scal,test_set[,feature2scal])
#########
## clustering
cls = cluster_by(predictor.train=train_set$quantity,
predictor.test=test_set$quantity,
num_bids = length(cluster_levs),
verbose=T)
train_set$qty_lev = cls$levels.train
test_set$qty_lev = cls$levels.test
## grid
grid = expand.grid(cluster = cluster_levs , model= reg_models)
grid$model = as.character(grid$model)
##############
## MAIN LOOP
##############
ptm <- proc.time()
res_list = mclapply( 1:nrow(grid) , function(i) {
cls = grid[i,]$cluster
model.label = grid[i,]$model
pred = best_prediction$cost
###
pid = paste('[cluster:',cls,'][model:',model.label,']',sep='')
cat('>>> processing ',pid,'... \n')
## define train / test set
train_set_cl = train_set[train_set$qty_lev == cls,]
test_set_cl = test_set[test_set$qty_lev== cls,]
cat(pid,'>>> train observations:',nrow(train_set_cl), '- test observations:',nrow(test_set_cl), ' \n')
if (nrow(test_set_cl) == 0) stop('something wrong')
pred_cl_idx = which(test_set$qty_lev==cls)
stopifnot ( length(pred_cl_idx) == nrow(test_set_cl) )
##############
## DATA PROC
##############
## tube_assembly_id , id
train_set_cl[, 'tube_assembly_id'] = NULL
test_set_cl [, 'tube_assembly_id'] = NULL
test_set_cl [, 'id'] = NULL
## material_id
# cat(">>> encoding material_id [",unique(c(train_set_cl$material_id ,
# test_set_cl$material_id)),"] [",length(unique(c(train_set_cl$material_id ,
# test_set_cl$material_id))),"] ... \n")
# l = encodeCategoricalFeature (train_set_cl$material_id , test_set_cl$material_id , colname.prefix = "material_id" , asNumeric=F)
# cat(">>> train_set before encoding:",ncol(train_set_cl)," - test_set before encoding:",ncol(test_set_cl)," ... \n")
# train_set_cl = cbind(train_set_cl , l$traindata)
# test_set_cl = cbind(test_set_cl , l$testdata)
train_set_cl[, 'material_id'] = NULL
test_set_cl [, 'material_id'] = NULL
cat(pid,">>> train_set after encoding:",ncol(train_set_cl)," - test_set after encoding:",ncol(test_set_cl)," ... \n")
## y, data
y = train_set_cl$cost
train_set_cl[, 'cost'] = NULL
####### remove zero variance predictors
l = featureSelect (train_set_cl,
test_set_cl,
removeOnlyZeroVariacePredictors=T, ### <<< :::: <<< ---------
performVarianceAnalysisOnTrainSetOnly = T ,
removePredictorsMakingIllConditionedSquareMatrix = F,
removeHighCorrelatedPredictors = F,
featureScaling = F)
train_set_cl = l$traindata
test_set_cl = l$testdata
####### DEBUG
if (DEBUG_MODE) {
cat(pid,">>> Debug mode ... \n")
train_set_cl = train_set_cl[1:20,]
y = y[1:20]
} else {
cat(pid,">>> Production mode ... \n")
}
####### end of DEBUG
##############
## MODELING
##############
##
rmse_xval_mod = NULL
pred_mod = NULL
secs_mod = NULL
early.stop = NULL
bestTune = NULL
###
if (model.label == "XGBoost") {
ptm_xgb <- proc.time()
param <- list("objective" = "reg:linear",
"eval_metric" = "rmse",
"eta" = 0.05,
"gamma" = 0.7,
"max_depth" = 20,
"subsample" = 0.5 , ## suggested in ESLII
"nthread" = 10,
"min_child_weight" = 1 ,
"colsample_bytree" = 0.5,
"max_delta_step" = 1)
cat(pid,">> XGBoost Params:\n")
print(param)
xgb = xgb_train_and_predict (train_set = train_set_cl,
y = y,
test_set = test_set_cl,
param = param,
cv.nround = 3000 ,
nfold = min(5,nrow(train_set_cl)) ,
verbose=F)
pred_mod = xgb$pred
rmse_xval_mod = xgb$perf.cv
early.stop = xgb$early.stop
tm_xgb = proc.time() - ptm_xgb
secs_mod = as.numeric(tm_xgb[3])
} else {
# 8-fold repteated 3 times
controlObject <- trainControl(method = "repeatedcv", repeats = 3, number = 8)
l = reg_train_predict ( YtrainingSet = y ,
XtrainingSet = train_set_cl,
testSet = test_set_cl,
model.label = model.label,
controlObject = controlObject,
best.tuning = T)
if ( !is.null(l$model) ) {
rmse_xval_mod = min(l$model$results$RMSE)
bestTune = l$model$bestTune
} else {
rmse_xval_mod = 1000000
}
secs_mod = l$secs
pred_mod = l$pred
}
stopifnot(sum(is.na(pred_mod)) == 0)
stopifnot(length(pred_mod) == length(pred_cl_idx))
cat(pid,'>> number of prediction < 0:',sum(pred_mod<0),' ... repleaced with 1.5 \n')
pred_mod = ifelse(pred_mod<0,1.5,pred_mod)
pred[pred_cl_idx] = pred_mod
# output
return(list(cluster = cls,
model.label = model.label,
rmse_xval_mod = rmse_xval_mod,
early.stop = early.stop,
bestTune = bestTune,
pred = pred,
secs_mod = secs_mod))
},
mc.cores = nrow(grid))
####### end of parallel loop
tm = proc.time() - ptm
secs = as.numeric(tm[3])
cat(">>> MAIN LOOP >>> Time elapsed:",secs," secs. [",secs/60,"min.] [",secs/(60*60),"hours] \n")
############## end of MAIN LOOP
## write on disk
cat(">>> writing prediction / cluster_perf on disk ... \n")
grid$secs = NA
grid$rmse = NA
grid$early.stop = NA
allParams = unique(unlist(lapply(res_list,function(e) {
if (! is.null(e['bestTune'])) {
return(lapply(e['bestTune'],function(ee) {
return(names(ee))
}))
}
return(NULL)
})))
allParams = allParams[allParams != 'parameter'] ## linear regression: parameter none
grid = cbind(grid,
setNames(
as.data.frame(matrix(rep(NA,length(allParams)*nrow(grid)),ncol = length(allParams))),
allParams))
alist = lapply( res_list , function(ll) {
cls = ll$cluster
model.label = ll$model.label
grid[grid$cluster == cls & grid$model == model.label,]$secs <<- ll$secs_mod
grid[grid$cluster == cls & grid$model == model.label,]$rmse <<- ll$rmse_xval_mod
if (model.label == "XGBoost") {
## early.stop
grid[grid$cluster == cls & grid$model == model.label,]$early.stop <<- ll$early.stop
} else {
## sons of bestTune
if (! is.null(ll['bestTune']) ) {
for (parName in names(ll$bestTune)) {
if (parName == 'parameter') next
grid[grid$cluster == cls & grid$model == model.label,parName] <<- ll$bestTune[parName]
}
}
}
pid = paste('[cluster:',cls,'][model:',model.label,']',sep='')
fn = paste('sub_cluster_by_qty_lev_',cls,'_mod_',model.label,'.csv',sep='')
cat(pid,": writing prediction on disk ... \n")
stopifnot( sum(ll$pred<0) == 0 )
# sample_submission$cost = ll$pred
# write.csv(sample_submission,quote=FALSE,
# file=paste(ff.getPath("submission"),fn,sep='') ,
# row.names=FALSE)
})
write.csv(grid,quote=FALSE,
file=paste(ff.getPath("submission"),'grid_GENERAL.csv',sep='') ,
row.names=FALSE)
|
/competitions/caterpillar-tube-pricing/Predict_2_Cluster_Qty_Ensemble_ClusterSelect_Tune_All_Models.R
|
permissive
|
fxcebx/fast-furious
|
R
| false
| false
| 13,210
|
r
|
require(xgboost)
require(methods)
library(data.table)
library(plyr)
library(Hmisc)
library(lattice)
require(gridExtra)
library(fastfurious)
library(parallel)
### FUNCS
cluster_by = function(predictor.train,predictor.test,num_bids = 8,verbose=T) {
require(Hmisc)
## clustering by quantity
if (verbose) {
print(describe(predictor.train))
print(describe(predictor.test))
}
data = as.vector(c(predictor.train,predictor.test))
q = as.numeric(quantile(data, probs = ((1:num_bids)/num_bids)))
## counting cluster card
num=rep(0,num_bids)
for (i in 1:num_bids)
if (i == 1) {
num[i] = sum(data<=q[i])
} else {
num[i] = sum(data<=q[i] & data>q[i-1])
}
if (verbose) print(describe(num))
## mapping quantity to cluster qty
qty2lev = data.frame(qty = sort(unique(data)) , lev = NA)
for (i in 1:nrow(qty2lev)) {
for (k in 1:length(q)) {
if (k == 1) {
if (qty2lev[i,]$qty <= q[1]) {
qty2lev[i,]$lev = 1
break
}
} else {
if (qty2lev[i,]$qty <= q[k] & qty2lev[i,]$qty > q[k-1] ) {
qty2lev[i,]$lev = k
break
}
}
}
}
## mapping qty_lev on data
if (verbose) cat(">> mapping qty_lev to data ... \n")
tr_qty_lev = rep(NA,length(predictor.train))
for (i in 1:length(predictor.train))
tr_qty_lev[i] = qty2lev[qty2lev$qty==predictor.train[i],]$lev
ts_qty_lev = rep(NA,length(predictor.test))
for (i in 1:length(predictor.test))
ts_qty_lev[i] = qty2lev[qty2lev$qty==predictor.test[i],]$lev
return( list(levels.train = tr_qty_lev , levels.test = ts_qty_lev , theresolds = q) )
}
################# FAST-FURIOUS
ff.setBasePath(path = '/Users/gino/kaggle/fast-furious/gitHub/fast-furious/')
ff.bindPath(type = 'data' , sub_path = 'dataset/caterpillar-tube-pricing/competition_data')
ff.bindPath(type = 'process' , sub_path = 'data_process')
ff.bindPath(type = 'elab' , sub_path = 'dataset/caterpillar-tube-pricing/elab')
ff.bindPath(type = 'docs' , sub_path = 'dataset/caterpillar-tube-pricing/docs')
ff.bindPath(type = 'submission' , sub_path = 'dataset/caterpillar-tube-pricing')
ff.bindPath(type = 'submission_old' , sub_path = 'dataset/caterpillar-tube-pricing/submission')
ff.bindPath(type = 'best_tune' , sub_path = 'dataset/caterpillar-tube-pricing/best_tune_1')
ff.bindPath(type = 'tmp' , sub_path = 'dataset/caterpillar-tube-pricing/tmp')
source(paste0( ff.getPath("process") , "/FeatureSelection_Lib.R"))
source(paste0( ff.getPath("process") , "/Regression_Lib.R"))
################# SETTINGS
DEBUG_MODE = T
################# MODELS
reg_models = c("rlm", "pls" , "ridge" , "enet" ,
"knn", "svmRadial", "treebag", "gbm" )
cluster_levs = 1:8 ##<<<<<<<---- :::::: <<<<<<<<<
################# DATA IN
sample_submission = as.data.frame( fread(paste(ff.getPath("data") ,
"sample_submission.csv" , sep='')))
## elab
train_enc = as.data.frame( fread(paste(ff.getPath("elab") ,
"train_enc.csv" , sep='')))
test_enc = as.data.frame( fread(paste(ff.getPath("elab") ,
"test_enc.csv" , sep='')))
train_enc_date = as.data.frame( fread(paste(ff.getPath("elab") ,
"train_enc_date.csv" , sep='')))
test_enc_date = as.data.frame( fread(paste(ff.getPath("elab") ,
"test_enc_date.csv" , sep='')))
## tech props
tube_base = as.data.frame( fread(paste(ff.getPath("elab") ,
"tube_base.csv" , sep='')))
bom_base = as.data.frame( fread(paste(ff.getPath("elab") ,
"bom_base.csv" , sep='')))
spec_enc = as.data.frame( fread(paste(ff.getPath("elab") ,
"spec_enc.csv" , sep='')))
####>>>>>>>>>> PROCESSING
## build technical feature set
tube = cbind(tube_base,bom_base)
tube = cbind(tube,spec_enc)
dim(tube) ## 180 (encoded) technical features
# [1] 21198 180
## putting quote_date in data set
head_train_set = train_enc_date
head_test_set = test_enc_date
## build train_set and test_set
train_set = merge(x = head_train_set , y = tube , by = 'tube_assembly_id' , all = F)
test_set = merge(x = head_test_set , y = tube , by = 'tube_assembly_id' , all = F)
######### feature scaling
cat(">>> Feature scaling ... \n")
feature2scal = c(
"quote_date" , "annual_usage" , "min_order_quantity" ,
"diameter" , "wall" , "length" , "num_bends" , "bend_radius" ,
"num_boss" , "num_bracket" ,
"CP_001_weight" , "CP_002_weight" , "CP_003_weight" , "CP_004_weight" , "CP_005_weight" , "CP_006_weight" ,
"CP_007_weight" , "CP_008_weight" , "CP_009_weight" , "CP_010_weight" , "CP_011_weight" , "CP_012_weight" ,
"CP_014_weight" , "CP_015_weight" , "CP_016_weight" , "CP_017_weight" , "CP_018_weight" , "CP_019_weight" ,
"CP_020_weight" , "CP_021_weight" , "CP_022_weight" , "CP_023_weight" , "CP_024_weight" , "CP_025_weight" ,
"CP_026_weight" , "CP_027_weight" , "CP_028_weight" , "CP_029_weight" , "OTHER_weight"
)
trans.scal <- preProcess(rbind(train_set[,feature2scal],test_set[,feature2scal]),
method = c("center", "scale") )
print(trans.scal)
train_set[,feature2scal] = predict(trans.scal,train_set[,feature2scal])
test_set[,feature2scal] = predict(trans.scal,test_set[,feature2scal])
#########
## clustering
cls = cluster_by(predictor.train=train_set$quantity,
predictor.test=test_set$quantity,
num_bids = length(cluster_levs),
verbose=T)
train_set$qty_lev = cls$levels.train
test_set$qty_lev = cls$levels.test
## grid
grid = expand.grid(cluster = cluster_levs , model= reg_models)
grid$model = as.character(grid$model)
##############
## MAIN LOOP
##############
ptm <- proc.time()
res_list = mclapply( 1:nrow(grid) , function(i) {
cls = grid[i,]$cluster
model.label = grid[i,]$model
pred = best_prediction$cost
###
pid = paste('[cluster:',cls,'][model:',model.label,']',sep='')
cat('>>> processing ',pid,'... \n')
## define train / test set
train_set_cl = train_set[train_set$qty_lev == cls,]
test_set_cl = test_set[test_set$qty_lev== cls,]
cat(pid,'>>> train observations:',nrow(train_set_cl), '- test observations:',nrow(test_set_cl), ' \n')
if (nrow(test_set_cl) == 0) stop('something wrong')
pred_cl_idx = which(test_set$qty_lev==cls)
stopifnot ( length(pred_cl_idx) == nrow(test_set_cl) )
##############
## DATA PROC
##############
## tube_assembly_id , id
train_set_cl[, 'tube_assembly_id'] = NULL
test_set_cl [, 'tube_assembly_id'] = NULL
test_set_cl [, 'id'] = NULL
## material_id
# cat(">>> encoding material_id [",unique(c(train_set_cl$material_id ,
# test_set_cl$material_id)),"] [",length(unique(c(train_set_cl$material_id ,
# test_set_cl$material_id))),"] ... \n")
# l = encodeCategoricalFeature (train_set_cl$material_id , test_set_cl$material_id , colname.prefix = "material_id" , asNumeric=F)
# cat(">>> train_set before encoding:",ncol(train_set_cl)," - test_set before encoding:",ncol(test_set_cl)," ... \n")
# train_set_cl = cbind(train_set_cl , l$traindata)
# test_set_cl = cbind(test_set_cl , l$testdata)
train_set_cl[, 'material_id'] = NULL
test_set_cl [, 'material_id'] = NULL
cat(pid,">>> train_set after encoding:",ncol(train_set_cl)," - test_set after encoding:",ncol(test_set_cl)," ... \n")
## y, data
y = train_set_cl$cost
train_set_cl[, 'cost'] = NULL
####### remove zero variance predictors
l = featureSelect (train_set_cl,
test_set_cl,
removeOnlyZeroVariacePredictors=T, ### <<< :::: <<< ---------
performVarianceAnalysisOnTrainSetOnly = T ,
removePredictorsMakingIllConditionedSquareMatrix = F,
removeHighCorrelatedPredictors = F,
featureScaling = F)
train_set_cl = l$traindata
test_set_cl = l$testdata
####### DEBUG
if (DEBUG_MODE) {
cat(pid,">>> Debug mode ... \n")
train_set_cl = train_set_cl[1:20,]
y = y[1:20]
} else {
cat(pid,">>> Production mode ... \n")
}
####### end of DEBUG
##############
## MODELING
##############
##
rmse_xval_mod = NULL
pred_mod = NULL
secs_mod = NULL
early.stop = NULL
bestTune = NULL
###
if (model.label == "XGBoost") {
ptm_xgb <- proc.time()
param <- list("objective" = "reg:linear",
"eval_metric" = "rmse",
"eta" = 0.05,
"gamma" = 0.7,
"max_depth" = 20,
"subsample" = 0.5 , ## suggested in ESLII
"nthread" = 10,
"min_child_weight" = 1 ,
"colsample_bytree" = 0.5,
"max_delta_step" = 1)
cat(pid,">> XGBoost Params:\n")
print(param)
xgb = xgb_train_and_predict (train_set = train_set_cl,
y = y,
test_set = test_set_cl,
param = param,
cv.nround = 3000 ,
nfold = min(5,nrow(train_set_cl)) ,
verbose=F)
pred_mod = xgb$pred
rmse_xval_mod = xgb$perf.cv
early.stop = xgb$early.stop
tm_xgb = proc.time() - ptm_xgb
secs_mod = as.numeric(tm_xgb[3])
} else {
# 8-fold repteated 3 times
controlObject <- trainControl(method = "repeatedcv", repeats = 3, number = 8)
l = reg_train_predict ( YtrainingSet = y ,
XtrainingSet = train_set_cl,
testSet = test_set_cl,
model.label = model.label,
controlObject = controlObject,
best.tuning = T)
if ( !is.null(l$model) ) {
rmse_xval_mod = min(l$model$results$RMSE)
bestTune = l$model$bestTune
} else {
rmse_xval_mod = 1000000
}
secs_mod = l$secs
pred_mod = l$pred
}
stopifnot(sum(is.na(pred_mod)) == 0)
stopifnot(length(pred_mod) == length(pred_cl_idx))
cat(pid,'>> number of prediction < 0:',sum(pred_mod<0),' ... repleaced with 1.5 \n')
pred_mod = ifelse(pred_mod<0,1.5,pred_mod)
pred[pred_cl_idx] = pred_mod
# output
return(list(cluster = cls,
model.label = model.label,
rmse_xval_mod = rmse_xval_mod,
early.stop = early.stop,
bestTune = bestTune,
pred = pred,
secs_mod = secs_mod))
},
mc.cores = nrow(grid))
####### end of parallel loop
tm = proc.time() - ptm
secs = as.numeric(tm[3])
cat(">>> MAIN LOOP >>> Time elapsed:",secs," secs. [",secs/60,"min.] [",secs/(60*60),"hours] \n")
############## end of MAIN LOOP
## write on disk
cat(">>> writing prediction / cluster_perf on disk ... \n")
grid$secs = NA
grid$rmse = NA
grid$early.stop = NA
allParams = unique(unlist(lapply(res_list,function(e) {
if (! is.null(e['bestTune'])) {
return(lapply(e['bestTune'],function(ee) {
return(names(ee))
}))
}
return(NULL)
})))
allParams = allParams[allParams != 'parameter'] ## linear regression: parameter none
grid = cbind(grid,
setNames(
as.data.frame(matrix(rep(NA,length(allParams)*nrow(grid)),ncol = length(allParams))),
allParams))
alist = lapply( res_list , function(ll) {
cls = ll$cluster
model.label = ll$model.label
grid[grid$cluster == cls & grid$model == model.label,]$secs <<- ll$secs_mod
grid[grid$cluster == cls & grid$model == model.label,]$rmse <<- ll$rmse_xval_mod
if (model.label == "XGBoost") {
## early.stop
grid[grid$cluster == cls & grid$model == model.label,]$early.stop <<- ll$early.stop
} else {
## sons of bestTune
if (! is.null(ll['bestTune']) ) {
for (parName in names(ll$bestTune)) {
if (parName == 'parameter') next
grid[grid$cluster == cls & grid$model == model.label,parName] <<- ll$bestTune[parName]
}
}
}
pid = paste('[cluster:',cls,'][model:',model.label,']',sep='')
fn = paste('sub_cluster_by_qty_lev_',cls,'_mod_',model.label,'.csv',sep='')
cat(pid,": writing prediction on disk ... \n")
stopifnot( sum(ll$pred<0) == 0 )
# sample_submission$cost = ll$pred
# write.csv(sample_submission,quote=FALSE,
# file=paste(ff.getPath("submission"),fn,sep='') ,
# row.names=FALSE)
})
write.csv(grid,quote=FALSE,
file=paste(ff.getPath("submission"),'grid_GENERAL.csv',sep='') ,
row.names=FALSE)
|
library(tidyr)
library(ggplot2)
library(ggrepel)
library(cowplot)
library(scales)
library(ggpubr)
library(Category)
library(GSEABase)
library(GOstats)
library(org.Hs.eg.db)
library(knitr)
library(limma)
library(grid)
library(tidyverse)
library(ggsignif)
library(ggtext)
library(glue)
library(RColorBrewer)
library(MASS)
source("functionsToImport.R")
summData <- readRDS("analysis/outputTabs/summData_donorCtype_Cells.RDS")
summData <- subset(summData, quantile!=1)
summDataTP <- subset(summData, tp=="D52")
summDataTP <- subset(summDataTP, annot=="DA" | annot=="Sert-like")
summDataDiff <- sapply(unique(summDataTP$donor_extended), function(x) {
tmp <- subset(summDataTP, x==donor_extended)
data.frame(donor_id=unique(tmp$donor_id),
donor_extended=unique(tmp$donor_extended),
nBatches=paste0(tmp$nBatches, collapse=","),
cfrac=sum(tmp$nCells)/unique(tmp$nTotalCells))
}, simplify=F)
summDataDiff <- do.call("rbind", summDataDiff)
rownames(summDataDiff) <- NULL
summDataDiff$outcome <- NA
mask_succ <- summDataDiff$cfrac>=0.2
summDataDiff$outcome[mask_succ] <- "Successful"
summDataDiff$outcome[!mask_succ] <- "Failed"
summDataDiff$outcome2 <- NA
## outcome2 set to outcome (for those without pool replicates)
summDataDiff[match(names(which(table(summDataDiff$donor_id)==1)), summDataDiff$donor_id),]$outcome2 <-
summDataDiff[match(names(which(table(summDataDiff$donor_id)==1)), summDataDiff$donor_id),]$outcome
## outcome2 definition for those cell-lines with pool replicates (either both succ, both fail, or discordant)
ids_dup <- names(which(table(summDataDiff$donor_id)>1))
outDisamb <- sapply(ids_dup, function(x){
tmp <- subset(summDataDiff, donor_id==x)
if (length(unique(tmp$outcome))==1){
return(unique(tmp$outcome))
} else {
return("Discordant")
}
}, simplify=T)
summDataDiff[is.na(summDataDiff$outcome2),]$outcome2 <-
unname(outDisamb[match(summDataDiff[is.na(summDataDiff$outcome2),]$donor_id, (names(outDisamb)))])
### successful or failed
dopaminergic_neurons <- "analysis/inputTabs/dopNeurons/predDopaminergic.csv"
dopaminergic_neurons <- read.csv(dopaminergic_neurons)
# predicted differentiation efficiency
dopaminergic_neurons$pred_diff_efficiency <- NA
dopaminergic_neurons[dopaminergic_neurons$model_score>=0.2,]$pred_diff_efficiency <- "Successful"
dopaminergic_neurons[dopaminergic_neurons$model_score<0.2,]$pred_diff_efficiency <- "Failed"
summDataDiff$JerberObserved <-
dopaminergic_neurons[match(summDataDiff$donor_id, dopaminergic_neurons$donor_id),]$pred_diff_efficiency
vec <- c("Successful","NA","NotAssessed","Failed")
names(vec) <- unique(summDataDiff$JerberObserved)
summDataDiff$JerberObserved <- unname(vec[summDataDiff$JerberObserved])
summDataDiff$JerberPredicted <-
dopaminergic_neurons[match(summDataDiff$donor_id, dopaminergic_neurons$donor_id),]$pred_diff_efficiency
table(summDataDiff$outcome2, summDataDiff$JerberPredicted)
# Failed Successful
# Discordant 0 6
# Failed 51 2
# Successful 1 158
summDataDiff$duplicatedPool <- FALSE
summDataDiff[summDataDiff$donor_id %in% names(outDisamb),]$duplicatedPool <- TRUE
table(sapply(unique(summDataDiff[summDataDiff$duplicatedPool==TRUE,]$donor_id),
function(x) unique(subset(summDataDiff, donor_id==x)$outcome2), simplify=T))
# Discordant Failed Successful
# 3 6 27
summData$outcome <- summDataDiff[match(summData$donor_extended, summDataDiff$donor_extended),]$outcome
summData <- summData[!summData$donor_id %in% unique(summDataDiff[summDataDiff$outcome2=="Discordant",]$donor_id),]
outcomeDetail <- sapply(split(summData$outcome, summData$donor_id), function(x) unique(x))
toAddPools <- sapply(outcomeDetail[elementNROWS(outcomeDetail)>1], function(x){
x[!is.na(x) & length(x)>1]
})
for (i in 1:length(toAddPools)){
summData[summData$donor_id %in% names(toAddPools)[i],]$outcome <- unname(toAddPools[i])
}
summData$measuredOut <- "observed"
## There is a list of 18 donors/pool that outcome was not measured as they were not present in D52.
## To take advantage of those samples (and avoid removing them), we use the JerberPredicted data to annotate the outcome.
## Note here that the agreement between those predicitions and our experimental data is very high for those with exp. data available.
outcomeDetail <- sapply(split(summData$outcome, summData$donor_id), function(x) unique(x))
missing <- outcomeDetail[is.na(outcomeDetail)]
mask_na <- is.na(summData$outcome)
summData[mask_na,]$outcome <- dopaminergic_neurons[match(summData[mask_na,]$donor_id, dopaminergic_neurons$donor_id),]$pred_diff_efficiency
summData[mask_na,]$measuredOut <- "predicted"
outcomeDetail <- sapply(split(summData$outcome, summData$donor_id), function(x) unique(x))
missing <- outcomeDetail[is.na(outcomeDetail)]
summData <- subset(summData, !is.na(outcome))
saveRDS(summData, file="analysis/outputTabs/DEsinglecell/CTfractionPerLinePerTPCurated2.RDS")
#############
### fig4A ###
#############
## We finally remove those annotations from which we cannot derive the outcome
pvaltp <- sapply(unique(summData$tp), function(x){
tmp_summData <- subset(summData, tp==x)
pvalAnnot <- sapply(unique(tmp_summData$annot), function(y){
tmp_summData2 <- subset(tmp_summData, annot==y)
model.nb = glm.nb(nCells ~ outcome + offset(log(nTotalCells)), data=tmp_summData2)
res = coef(summary(model.nb))
data.frame(annot=y,
tp=x,
pval.nb=res["outcomeSuccessful","Pr(>|z|)"],
pval.wilcox=wilcox.test(subset(tmp_summData2, outcome=="Failed")$cfrac,subset(tmp_summData2, outcome=="Successful")$cfrac)$p.value)
}, simplify=F)
pvalAnnot <- do.call("rbind", pvalAnnot)
rownames(pvalAnnot) <- NULL
pvalAnnot
}, simplify=F)
pvaltp <- do.call("rbind", pvaltp)
rownames(pvaltp) <- NULL
pvaltp$pvalAdj.nb <- p.adjust(pvaltp$pval.nb, "BH")
pvaltp$signif.nb <- pvalConverter(pvaltp$pvalAdj.nb)
pvaltp$comb <- paste0(pvaltp$annot,"-", pvaltp$tp)
cfracTab <- as.data.frame(summData %>%
group_by(tp, annot) %>%
summarise_at(.vars = c("cfrac"), .funs = mean))
cfracTab$rareClust <- FALSE
cfracTab[cfracTab$cfrac<0.02,]$rareClust <- TRUE
cfracTab$comb <- paste0(cfracTab$annot,"-", cfracTab$tp)
pvaltp$rareClust <- cfracTab[match(pvaltp$comb, cfracTab$comb),]$rareClust
fig4a <- ggplot(summData, aes(x=annot, y=cfrac, fill=factor(outcome)))+
geom_boxplot(outlier.shape=NA)+
facet_wrap(~tp, nrow = 3)+
geom_jitter(position=position_jitterdodge(jitter.width =0.2),size=0.25, alpha=0.25)+
theme_bw()+
theme(legend.position="top",
plot.title=element_text(hjust=0.5, face="bold"),
axis.text.x=element_text(angle=30, vjust=0.5, hjust=0.5, size=12),
axis.text.y=element_text(size=12),
axis.title=element_text(size=14),
legend.title=element_text(size=14),
legend.text=element_text(size=12))+
xlab("")+
ylab("Cell-type fraction")+
#ggtitle("Cell-type composition")+
# geom_text(data = pvaltp, aes(x = annot, y = 0.9,
# label = paste0("p=",formatC(pvalAdj.nb, format = "e", digits = 1)),
# face="bold"),
# size=3, inherit.aes=F)+
geom_text(data = pvaltp, aes(x = annot, y = 0.9, label = signif.nb, color=rareClust),
size=4, inherit.aes=F)+
coord_cartesian(ylim=c(0,1))+
scale_fill_manual(name="Differentiation outcome", values=c("Failed"='#E69F00', "Successful"='#56B4E9'))+
scale_y_continuous(breaks=seq(0,1,0.25))+
scale_color_manual(name=c("Rare Cluster"),values=c("FALSE"="black","TRUE"="red"))+
theme(legend.title=element_text(face="bold"))+
guides(col=FALSE)
pdf(file="figures/mainFigs/figure4A.pdf")
plot(fig4a)
dev.off()
##############
### Fig 4B ###
##############
metadata <- readRDS("analysis/outputTabs/suppData1.RDS")
valid_3tp <- sapply(unique(metadata$donor_extended), function(x){
tmp <- subset(metadata, donor_extended==x)
if (length(unique(tmp$tp))==3){
x
} else {
NA
}
}, simplify=T)
valid_3tp <- unname(valid_3tp[!is.na(valid_3tp)])
metadata <- metadata[metadata$donor_extended %in% valid_3tp,]
dirFiles <- "demuxlet/deconvolution/"
Files <- dir(dirFiles)[grepl("_sample_list.txt",dir(dirFiles))]
datalist = lapply(paste0(dirFiles,Files), function(x)read.table(x, header=F))
names(datalist) <- gsub("_sample.+","", Files)
genes_state_KO <- c("ASXL3","SNCA","CTNNB1","TCF4","CHD2","SET","GATAD2B","TBL1XR1")
names(genes_state_KO) <- c("pool11","pool12","pool13","pool14","pool16","pool17","pool20","pool21")
tmpPerPool <- sapply(unique(metadata$pool_id), function(x){
tmpPool <- subset(metadata, pool_id==x)
tmpclines <- datalist[[x]]$V1
if (any(x %in% names(genes_state_KO))){
mask <- grepl("kolf_2", tmpclines)
tmpko <- unname(genes_state_KO[match(x, names(genes_state_KO))])
tmpclines[mask] <- paste0(tmpclines[mask],"/", tmpko)
}
tmp_tp <- sapply(sort(c("D0",unique(tmpPool$tp))), function(y){
if (y=="D0"){
tmp <- data.frame(cline=tmpclines,
pool=x,
timePoint=y,
cfrac=signif(1/length(tmpclines),3))
} else {
tmpPool2 <- subset(tmpPool, tp==y)
tabNum <- table(tmpPool2$donor_id)
tmp <- data.frame(cline=tmpclines,
pool=x,
timePoint=y,
cfrac=NA)
tmp$cfrac <- signif(tabNum[match(tmp$cline, names(tabNum))]/sum(tabNum),3)
}
tmp
}, simplify=F)
tmp_tp <- do.call("rbind", tmp_tp)
rownames(tmp_tp) <- NULL
tmp_tp
}, simplify=F)
tmpPerPool <- do.call("rbind", tmpPerPool)
rownames(tmpPerPool) <- NULL
tmpPerPool$pool <- firstup(tmpPerPool$pool)
tmpPerPool$pool <- as.factor(tmpPerPool$pool)
tmpPerPool$pool <- factor(tmpPerPool$pool,
levels=levels(tmpPerPool$pool)[order(as.numeric(gsub("Pool","",levels(tmpPerPool$pool))))])
tmpPerPool$cline_expanded <- paste0(tmpPerPool$cline,"/", tmpPerPool$pool)
tmpPerPool$cfrac_log1p <- signif(log1p(tmpPerPool$cfrac),3)
ratiosTab <- sapply(unique(tmpPerPool$cline_expanded), function(z){
tmp <- subset(tmpPerPool, cline_expanded==z)
data.frame(cline_expanded=z,
pool=as.character(unique(tmp$pool)),
ratio_d11_d0=signif(subset(tmp, timePoint=="D11")$cfrac/subset(tmp, timePoint=="D0")$cfrac,3),
ratio_d30_d0=signif(subset(tmp, timePoint=="D30")$cfrac/subset(tmp, timePoint=="D0")$cfrac,3),
ratio_d52_d0=signif(subset(tmp, timePoint=="D52")$cfrac/subset(tmp, timePoint=="D0")$cfrac,3))
}, simplify=F)
ratiosTab <- do.call("rbind", ratiosTab)
rownames(ratiosTab) <- NULL
ratiosTab$pool <- firstup(ratiosTab$pool)
## do that for the three Ratios
cols <- colnames(ratiosTab)[grepl("ratio",colnames(ratiosTab))]
## outlier analysis
summData <- readRDS("tabs/summData_donorCtype_Cells.RDS")
summData <- subset(summData, quantile!=1)
summData <- sapply(unique(summData$tp), function(x){
tmp <- subset(summData, tp==x)
annotTmp <- sapply(unique(tmp$annot), function(y){
tmp2 <- subset(tmp, annot==y)
tmp2$zsco <- round((tmp2$cfrac-mean(tmp2$cfrac))/sd(tmp2$cfrac),2)
tmp2$outlier <- abs(tmp2$zsco)>2
tmp2
}, simplify=F)
annotTmp <- do.call("rbind", annotTmp)
rownames(annotTmp) <- NULL
annotTmp
}, simplify=F)
summData <- do.call("rbind", summData)
rownames(summData) <- NULL
summData$pool <- sapply(strsplit(summData$donor_extended, "/"), function(x) x[length(x)])
summData$donor_extended <- gsub("pool","Pool", summData$donor_extended)
allClust <- readRDS("analysis/outputTabs/corr_cfracBurdenprolif.RDS")
adjList <- sapply(unique(allClust$annotId), function(x){
tmp <- subset(allClust, annotId==x)
tmp <- tmp[order(tmp$logRegPval, decreasing = F),]
tmp$adjPval <- p.adjust(tmp$wilcoxPval, "BH")
tmp
}, simplify=F)
adjList_filt <- sapply(adjList, function(x){
subset(x, adjPval<0.05)
}, simplify=F)
adjList_filt <- do.call("rbind", adjList_filt)
rownames(adjList_filt) <- NULL
retain <- table(metadata$annot, metadata$tp)*100/colSums(table(metadata$annot, metadata$tp))>2
retain <- retain[,"D52"]
retain <- names(retain[retain])
adjList_filt2 <- adjList_filt[adjList_filt$annotId %in% retain,]
proliferationAndCellFraction <- function(ratio="ratio_d11_d0", timePoint="D11"){
tmpCfrac <- subset(summData, tp==timePoint)
tmpCfrac$prolifRate <- ratiosTab[match(tmpCfrac$donor_extended, ratiosTab$cline_expanded),ratio]
tmpCfrac$ratioType <- ratio
majorTypes <-names(which(sapply(unique(tmpCfrac$annot), function(x){
mean(subset(summData, tp==timePoint & annot==x)$cfrac)>0.02
})))
annotCorr <- sapply(majorTypes, function(x){
tmpCfrac_annot <- subset(tmpCfrac, annot==x)
res <- lm(cfrac~prolifRate, data=tmpCfrac_annot)
data.frame(annot=x,
corrPearson=unname(cor.test(tmpCfrac_annot$prolifRate, tmpCfrac_annot$cfrac, family="pearson")$estimate),
pval_lm=summary(res)$coef["prolifRate","Pr(>|t|)"],
effect_size_lm=summary(res)$coef["prolifRate","Estimate"],
tp=timePoint,
prolifRate=ratio)
}, simplify=F)
annotCorr <- do.call("rbind", annotCorr)
rownames(annotCorr) <- NULL
annotCorr$pAdj <- p.adjust(annotCorr$pval_lm, "BH")
annotCorr$signif <- pvalConverter(annotCorr$pAdj)
return(annotCorr)
}
prolifTable <- proliferationAndCellFraction(ratio="ratio_d52_d0", timePoint="D52")
signif.floor <- function(x, n){
pow <- floor( log10( abs(x) ) ) + 1 - n
y <- floor(x / 10 ^ pow) * 10^pow
# handle the x = 0 case
y[x==0] <- 0
y
}
signif.ceiling <- function(x, n){
pow <- floor( log10( abs(x) ) ) + 1 - n
y <- ceiling(x / 10 ^ pow) * 10^pow
# handle the x = 0 case
y[x==0] <- 0
y
}
plotD52 <- ggplot(data=prolifTable, aes(x=annot, y=tp, fill=corrPearson))+
geom_tile(col="black")+
geom_point(data=prolifTable, aes(x=annot, y=tp, size=signif, shape=signif))+
theme_bw()+
ylab("")+xlab("")+
scale_y_discrete(position = "right")+
theme(legend.position="top",
axis.ticks.y=element_blank(),
axis.text.y=element_blank(),
legend.title=element_text(size=14, face="bold"),
axis.text.x=element_text(angle=90, hjust=0.5, vjust=0.5, size=14),
plot.title=element_text(hjust=0.5, size=13, face="bold"),
legend.text=element_text(size=12))+
#ggtitle("Cell-line proliferation ~ Cell-type fraction (Day 52)")+
scale_size_manual(name="LinearReg P.adj", values=c(4,4,6,8),breaks=c("ns","*","**","***"))+
scale_shape_manual(name="LinearReg P.adj",values=c(1,16,16,16), breaks=c("ns","*","**","***"))+
scale_fill_gradientn(name="Pearson correlation",
colours = c("orange",
"white", "blue"),
limits=c(signif.floor(min(prolifTable$corrPearson),1),
signif.ceiling(max(prolifTable$corrPearson),1)),
labels=seq(signif.floor(min(prolifTable$corrPearson),1),signif.ceiling(max(prolifTable$corrPearson),1),0.2),
breaks=seq(signif.floor(min(prolifTable$corrPearson),1),signif.ceiling(max(prolifTable$corrPearson),1),0.2),
guide = guide_colourbar(barwidth = 10, nbin = 5))+
guides(size=guide_legend(nrow=3,byrow=TRUE))
pdf(file=paste0("figures/mainFigs/figure4B.pdf"))
plot(plotD52)
dev.off()
#############
### fig4C ###
#############
## DE analysis (add to scripts)
pathTODE <- "analysis/outputTabs/DEsinglecell/"
summResults <- paste0(pathTODE, list.files(path=pathTODE,
pattern = "resultsDEFailVsSucc2.+.RDS")) %>%
map(readRDS) %>%
bind_rows()
heatmap_df <- summResults
heatmap_df <- heatmap_df[,match(c("annot","timepoint","numDE","numFailedDonors","numSuccessfulDonors","numFailedCells","numSuccessfulCells"), colnames(heatmap_df))]
heatmap_df <- heatmap_df[!duplicated(heatmap_df),]
# creation of geneUniverse (add to scripts)
geneUniverse <- readRDS("analysis/outputTabs/DEsinglecell/geneUniverse_seurat.RDS")
pval_df <- rbind(enrichmentCalc(summResults, geneUniverse, geneSet="ddd"),
enrichmentCalc(summResults, geneUniverse, geneSet="cosmic"),
enrichmentCalc(summResults, geneUniverse, geneSet="ddd_dominantMOI"))
pval_df$pvalAdj <- p.adjust(pval_df$pval, "BH")
pval_df$signif <- pvalConverter(pval_df$pvalAdj)
pval_df$signif <- factor(pval_df$signif, levels=c("ns","*","**","***"))
genVec <- c("DDD","Cosmic-T1","DDD-Dominant")
names(genVec) <- unique(pval_df$geneSet)
pval_df$geneSet <- unname(genVec[pval_df$geneSet])
heatmap_df <- subset(heatmap_df, annot!="Unk2")
heatmap_df$annot <- as.factor(heatmap_df$annot)
heatmap_df$annot <- factor(heatmap_df$annot, levels=rev(levels(heatmap_df$annot)))
pval_df <- subset(pval_df, annot!="Unk2")
pval_df$annot <- as.factor(pval_df$annot)
pval_df$annot <- factor(pval_df$annot, levels=levels(pval_df$annot))
heatmap_df$annot <- as.factor(heatmap_df$annot)
heatmap_df$annot <- factor(heatmap_df$annot, levels=rev(levels(pval_df$annot)))
size_values=c("ns"=0,
"*"=4,
"**"=6,
"***"=8)
myPalette <- colorRampPalette(brewer.pal(9, "YlOrRd"))
my_breaks <- c(0,100,200,300,400,500)
fig4c <- ggplot(data=heatmap_df, aes(x=timepoint, y=annot, fill=numDE))+
geom_tile()+
theme_classic()+
theme(axis.text.x=element_text(vjust=0.5),
plot.title=element_text(hjust=0.5, face="bold"),
axis.title=element_text(size=14, face="bold"),
axis.text=element_text(size=12),
legend.text=element_text(size=12),
legend.title=element_text(size=14))+
scale_fill_gradientn(name="DE genes",
colours=myPalette(100),
limits=c(0,550),
labels=my_breaks,
breaks=my_breaks)+
xlab("Time-point")+
ylab("Cell-types")+
#ggtitle("Number of DE genes / Gene set enrichment")+
geom_point(data=pval_df, aes(x=xpos, y=annot, size=signif, col=geneSet), inherit.aes = F)+
scale_color_manual(name="Gene-Set", values=c("DDD"="cyan1","Cosmic-T1"="plum1","DDD-Dominant"="cornflowerblue"))+
theme(legend.title=element_text(face="bold"))+
scale_size_manual(name="Adj.Pval", values=size_values)+
guides(shape=F)
pdf(file="figures/mainFigs/figure4C.pdf")
plot(fig4c)
dev.off()
#############
### fig4D ###
#############
stopifnot(all(!is.na(match(summResults$geneDE, geneUniverse$symbol))))
tabCorr <- AnnotationDbi::select(org.Hs.eg.db, geneUniverse$ensembl, "ENTREZID", "ENSEMBL")
tabCorr <- tabCorr[!is.na(tabCorr$ENTREZID),]
tabCorr <- tabCorr[!duplicated(tabCorr$ENSEMBL),]
geneUniverse$entrezid <- tabCorr[match(geneUniverse$ensembl,tabCorr$ENSEMBL),]$ENTREZID
geneUniverse <- subset(geneUniverse, !is.na(entrezid))
summResults$entrezid <- geneUniverse[match(summResults$geneDE, geneUniverse$symbol),]$entrezid
summResults$comb <- paste0(summResults$timepoint,"-", summResults$annot)
summResults <- subset(summResults, !is.na(entrezid))
pval_df$comb <- paste0(pval_df$timepoint,"-", pval_df$annot)
## all DE considering all clusters together
allclusters <- unique(summResults$entrezid)
geneUniverse_allclusters <- unique(geneUniverse$entrezid)
## all DE considering only signif clusters (either ddd or cosmic)
mask_ddd <- pval_df$geneSet=="DDD"
mask_cosmic <- pval_df$geneSet=="Cosmic-T1"
allsignif_ddd <- pval_df[mask_ddd & pval_df$signif!="ns",]$comb
#allsignif_cosmic <- pval_df[mask_cosmic & pval_df$signif!="ns",]$comb
allsignif_ddd <- unique(summResults[summResults$comb %in% allsignif_ddd,]$entrezid)
#allsignif_cosmic <- unique(summResults[summResults$comb %in% allsignif_cosmic,]$entrezid)
geneUniverse_allsignif <- geneUniverse_allclusters
## list of DE for each signif cluster (ddd)
list_clustr_ddd <- sapply(pval_df[mask_ddd & pval_df$signif!="ns",]$comb, function(x){
subset(summResults, comb==x)$entrezid
}, simplify=F)
list_geneUniverse_ddd <- gsub("-.+","",pval_df[mask_ddd & pval_df$signif!="ns",]$comb)
list_geneUniverse_ddd <- sapply(list_geneUniverse_ddd, function(x){
colMatch <- match(x, colnames(geneUniverse))
unique(geneUniverse[geneUniverse[,colMatch]==TRUE,]$entrezid)
}, simplify=F)
## list of DE for each signif cluster (ddd)
list_clustr_cosmic <- sapply(pval_df[mask_cosmic & pval_df$signif!="ns",]$comb, function(x){
subset(summResults, comb==x)$entrezid
}, simplify=F)
list_geneUniverse_cosmic <- gsub("-.+","",pval_df[mask_cosmic & pval_df$signif!="ns",]$comb)
list_geneUniverse_cosmic <- sapply(list_geneUniverse_cosmic, function(x){
colMatch <- match(x, colnames(geneUniverse))
unique(geneUniverse[geneUniverse[,colMatch]==TRUE,]$entrezid)
}, simplify=F)
## GOenrichment tests
report_allclusters <- GOenrichmentAndReport(allclusters, geneUniverse_allclusters, minSize=30, maxSize=200, minCount=20, p.value=0.001, label="allDE")
##allsignif-ddd
report_allsignif_ddd <- GOenrichmentAndReport(allsignif_ddd, geneUniverse_allsignif, minSize=30, maxSize=200, minCount=20, p.value=0.001, label="allsignif")
report_allclusters$GeneSyms <- NULL
report_allsignif_ddd$GeneSyms <- NULL
##ddd
report_testClust <- sapply(1:length(list_clustr_ddd), function(x){
print(x)
labelClust= names(list_clustr_ddd)[x]
report_testClust <- GOenrichmentAndReport(list_clustr_ddd[[x]], list_geneUniverse_ddd[[x]], minSize=10, maxSize=200, minCount=7, p.value=0.001, label=labelClust)
report_testClust$GeneSyms <- NULL
report_testClust
}, simplify=F)
#saveRDS(report_testClust, file="analysis/outputTabs/DEsinglecell/report_testClust_ddd2.RDS")
#report_testClust <- readRDS("analysis/outputTabs/DEsinglecell/report_testClust_ddd2.RDS")
suppTable7 <- rbind(report_allclusters, report_allsignif_ddd, do.call("rbind", report_testClust))
write.table(suppTable7, file="suppTabs/suppTable7.txt",
quote=F, sep="\t", col.names=T, row.names=F)
addPosition <- function(report_allclusters){
report_allclusters$position <- 1:dim(report_allclusters)[1]
return(report_allclusters)
}
report_allclusters <- addPosition(report_allclusters)
report_allsignif_ddd <- addPosition(report_allsignif_ddd)
report_testClust <- sapply(report_testClust, function(x){
addPosition(x)
}, simplify=F)
report_testClust <- do.call("rbind", report_testClust)
reportInfo <- rbind(report_allclusters, report_allsignif_ddd, report_testClust)
vecMatch <- c("axon","neuron","glial","brain",
"hindbrain","forebrain","midbrain","synapse","chromatin",
"cerebellum", "neural","cortex","neurogenesis",
"axonogenesis","nervous","hippocampus","neurotransmitter",
"dopaminergic", "axenome", "action potential","synaptic")
vecLogic <- sapply(vecMatch, function(x){
grepl(x,reportInfo$Term)
}, simplify=T)
reportInfo$neuroRelated <- rowSums(vecLogic)>0
reportInfo$top1 <- FALSE
reportInfo[match(unique(reportInfo$label), reportInfo$label),]$top1 <- TRUE
reportInfo$top2 <- FALSE
reportInfo[sort(c(match(unique(reportInfo$label), reportInfo$label), (match(unique(reportInfo$label), reportInfo$label))+1)),]$top2 <- TRUE
reportInfo$mostShared <- !is.na(match(reportInfo$Term,names(sort(table(reportInfo$Term), decreasing=T)[1:20])))
### Neuro-related ###
neuroTab <- subset(reportInfo, neuroRelated==TRUE)
neuroTab <- rbind(neuroTab, fillNA(neuroTab, list_clustr_ddd, highlight="neuroRelated"))
maxOddsNeuro <- ceiling(max(subset(reportInfo, neuroRelated==TRUE)$OddsRatio))
fig4d <- ggplot(neuroTab,
aes(y=Term, x=label, fill=OddsRatio))+
geom_tile(colour = "black")+
theme_classic()+
theme(axis.text.x=element_text(angle=90,hjust=1, vjust=0.5),
plot.title=element_text(hjust=0.5, face="bold", size=13))+
xlab("")+
ylab("")+
#ggtitle("GO:BP enrichment in critical cell-types")+
scale_fill_gradientn(name="OddsRatio",
colours=myPalette(100),
limits=c(0,maxOddsNeuro),
labels=seq(0,maxOddsNeuro,2),
breaks=seq(0,maxOddsNeuro,2),
na.value = 'grey90')+
geom_text(aes(label=position))
pdf(file="figures/mainFigs/figure4D.pdf", width = 7, height = 4)
plot(fig4d)
dev.off()
|
/figure4.R
|
no_license
|
paupuigdevall/somaticBurdenNeuro2022
|
R
| false
| false
| 24,521
|
r
|
library(tidyr)
library(ggplot2)
library(ggrepel)
library(cowplot)
library(scales)
library(ggpubr)
library(Category)
library(GSEABase)
library(GOstats)
library(org.Hs.eg.db)
library(knitr)
library(limma)
library(grid)
library(tidyverse)
library(ggsignif)
library(ggtext)
library(glue)
library(RColorBrewer)
library(MASS)
source("functionsToImport.R")
summData <- readRDS("analysis/outputTabs/summData_donorCtype_Cells.RDS")
summData <- subset(summData, quantile!=1)
summDataTP <- subset(summData, tp=="D52")
summDataTP <- subset(summDataTP, annot=="DA" | annot=="Sert-like")
summDataDiff <- sapply(unique(summDataTP$donor_extended), function(x) {
tmp <- subset(summDataTP, x==donor_extended)
data.frame(donor_id=unique(tmp$donor_id),
donor_extended=unique(tmp$donor_extended),
nBatches=paste0(tmp$nBatches, collapse=","),
cfrac=sum(tmp$nCells)/unique(tmp$nTotalCells))
}, simplify=F)
summDataDiff <- do.call("rbind", summDataDiff)
rownames(summDataDiff) <- NULL
summDataDiff$outcome <- NA
mask_succ <- summDataDiff$cfrac>=0.2
summDataDiff$outcome[mask_succ] <- "Successful"
summDataDiff$outcome[!mask_succ] <- "Failed"
summDataDiff$outcome2 <- NA
## outcome2 set to outcome (for those without pool replicates)
summDataDiff[match(names(which(table(summDataDiff$donor_id)==1)), summDataDiff$donor_id),]$outcome2 <-
summDataDiff[match(names(which(table(summDataDiff$donor_id)==1)), summDataDiff$donor_id),]$outcome
## outcome2 definition for those cell-lines with pool replicates (either both succ, both fail, or discordant)
ids_dup <- names(which(table(summDataDiff$donor_id)>1))
outDisamb <- sapply(ids_dup, function(x){
tmp <- subset(summDataDiff, donor_id==x)
if (length(unique(tmp$outcome))==1){
return(unique(tmp$outcome))
} else {
return("Discordant")
}
}, simplify=T)
summDataDiff[is.na(summDataDiff$outcome2),]$outcome2 <-
unname(outDisamb[match(summDataDiff[is.na(summDataDiff$outcome2),]$donor_id, (names(outDisamb)))])
### successful or failed
dopaminergic_neurons <- "analysis/inputTabs/dopNeurons/predDopaminergic.csv"
dopaminergic_neurons <- read.csv(dopaminergic_neurons)
# predicted differentiation efficiency
dopaminergic_neurons$pred_diff_efficiency <- NA
dopaminergic_neurons[dopaminergic_neurons$model_score>=0.2,]$pred_diff_efficiency <- "Successful"
dopaminergic_neurons[dopaminergic_neurons$model_score<0.2,]$pred_diff_efficiency <- "Failed"
summDataDiff$JerberObserved <-
dopaminergic_neurons[match(summDataDiff$donor_id, dopaminergic_neurons$donor_id),]$pred_diff_efficiency
vec <- c("Successful","NA","NotAssessed","Failed")
names(vec) <- unique(summDataDiff$JerberObserved)
summDataDiff$JerberObserved <- unname(vec[summDataDiff$JerberObserved])
summDataDiff$JerberPredicted <-
dopaminergic_neurons[match(summDataDiff$donor_id, dopaminergic_neurons$donor_id),]$pred_diff_efficiency
table(summDataDiff$outcome2, summDataDiff$JerberPredicted)
# Failed Successful
# Discordant 0 6
# Failed 51 2
# Successful 1 158
summDataDiff$duplicatedPool <- FALSE
summDataDiff[summDataDiff$donor_id %in% names(outDisamb),]$duplicatedPool <- TRUE
table(sapply(unique(summDataDiff[summDataDiff$duplicatedPool==TRUE,]$donor_id),
function(x) unique(subset(summDataDiff, donor_id==x)$outcome2), simplify=T))
# Discordant Failed Successful
# 3 6 27
summData$outcome <- summDataDiff[match(summData$donor_extended, summDataDiff$donor_extended),]$outcome
summData <- summData[!summData$donor_id %in% unique(summDataDiff[summDataDiff$outcome2=="Discordant",]$donor_id),]
outcomeDetail <- sapply(split(summData$outcome, summData$donor_id), function(x) unique(x))
toAddPools <- sapply(outcomeDetail[elementNROWS(outcomeDetail)>1], function(x){
x[!is.na(x) & length(x)>1]
})
for (i in 1:length(toAddPools)){
summData[summData$donor_id %in% names(toAddPools)[i],]$outcome <- unname(toAddPools[i])
}
summData$measuredOut <- "observed"
## There is a list of 18 donors/pool that outcome was not measured as they were not present in D52.
## To take advantage of those samples (and avoid removing them), we use the JerberPredicted data to annotate the outcome.
## Note here that the agreement between those predicitions and our experimental data is very high for those with exp. data available.
outcomeDetail <- sapply(split(summData$outcome, summData$donor_id), function(x) unique(x))
missing <- outcomeDetail[is.na(outcomeDetail)]
mask_na <- is.na(summData$outcome)
summData[mask_na,]$outcome <- dopaminergic_neurons[match(summData[mask_na,]$donor_id, dopaminergic_neurons$donor_id),]$pred_diff_efficiency
summData[mask_na,]$measuredOut <- "predicted"
outcomeDetail <- sapply(split(summData$outcome, summData$donor_id), function(x) unique(x))
missing <- outcomeDetail[is.na(outcomeDetail)]
summData <- subset(summData, !is.na(outcome))
saveRDS(summData, file="analysis/outputTabs/DEsinglecell/CTfractionPerLinePerTPCurated2.RDS")
#############
### fig4A ###
#############
## We finally remove those annotations from which we cannot derive the outcome
pvaltp <- sapply(unique(summData$tp), function(x){
tmp_summData <- subset(summData, tp==x)
pvalAnnot <- sapply(unique(tmp_summData$annot), function(y){
tmp_summData2 <- subset(tmp_summData, annot==y)
model.nb = glm.nb(nCells ~ outcome + offset(log(nTotalCells)), data=tmp_summData2)
res = coef(summary(model.nb))
data.frame(annot=y,
tp=x,
pval.nb=res["outcomeSuccessful","Pr(>|z|)"],
pval.wilcox=wilcox.test(subset(tmp_summData2, outcome=="Failed")$cfrac,subset(tmp_summData2, outcome=="Successful")$cfrac)$p.value)
}, simplify=F)
pvalAnnot <- do.call("rbind", pvalAnnot)
rownames(pvalAnnot) <- NULL
pvalAnnot
}, simplify=F)
pvaltp <- do.call("rbind", pvaltp)
rownames(pvaltp) <- NULL
pvaltp$pvalAdj.nb <- p.adjust(pvaltp$pval.nb, "BH")
pvaltp$signif.nb <- pvalConverter(pvaltp$pvalAdj.nb)
pvaltp$comb <- paste0(pvaltp$annot,"-", pvaltp$tp)
cfracTab <- as.data.frame(summData %>%
group_by(tp, annot) %>%
summarise_at(.vars = c("cfrac"), .funs = mean))
cfracTab$rareClust <- FALSE
cfracTab[cfracTab$cfrac<0.02,]$rareClust <- TRUE
cfracTab$comb <- paste0(cfracTab$annot,"-", cfracTab$tp)
pvaltp$rareClust <- cfracTab[match(pvaltp$comb, cfracTab$comb),]$rareClust
fig4a <- ggplot(summData, aes(x=annot, y=cfrac, fill=factor(outcome)))+
geom_boxplot(outlier.shape=NA)+
facet_wrap(~tp, nrow = 3)+
geom_jitter(position=position_jitterdodge(jitter.width =0.2),size=0.25, alpha=0.25)+
theme_bw()+
theme(legend.position="top",
plot.title=element_text(hjust=0.5, face="bold"),
axis.text.x=element_text(angle=30, vjust=0.5, hjust=0.5, size=12),
axis.text.y=element_text(size=12),
axis.title=element_text(size=14),
legend.title=element_text(size=14),
legend.text=element_text(size=12))+
xlab("")+
ylab("Cell-type fraction")+
#ggtitle("Cell-type composition")+
# geom_text(data = pvaltp, aes(x = annot, y = 0.9,
# label = paste0("p=",formatC(pvalAdj.nb, format = "e", digits = 1)),
# face="bold"),
# size=3, inherit.aes=F)+
geom_text(data = pvaltp, aes(x = annot, y = 0.9, label = signif.nb, color=rareClust),
size=4, inherit.aes=F)+
coord_cartesian(ylim=c(0,1))+
scale_fill_manual(name="Differentiation outcome", values=c("Failed"='#E69F00', "Successful"='#56B4E9'))+
scale_y_continuous(breaks=seq(0,1,0.25))+
scale_color_manual(name=c("Rare Cluster"),values=c("FALSE"="black","TRUE"="red"))+
theme(legend.title=element_text(face="bold"))+
guides(col=FALSE)
pdf(file="figures/mainFigs/figure4A.pdf")
plot(fig4a)
dev.off()
##############
### Fig 4B ###
##############
metadata <- readRDS("analysis/outputTabs/suppData1.RDS")
valid_3tp <- sapply(unique(metadata$donor_extended), function(x){
tmp <- subset(metadata, donor_extended==x)
if (length(unique(tmp$tp))==3){
x
} else {
NA
}
}, simplify=T)
valid_3tp <- unname(valid_3tp[!is.na(valid_3tp)])
metadata <- metadata[metadata$donor_extended %in% valid_3tp,]
dirFiles <- "demuxlet/deconvolution/"
Files <- dir(dirFiles)[grepl("_sample_list.txt",dir(dirFiles))]
datalist = lapply(paste0(dirFiles,Files), function(x)read.table(x, header=F))
names(datalist) <- gsub("_sample.+","", Files)
genes_state_KO <- c("ASXL3","SNCA","CTNNB1","TCF4","CHD2","SET","GATAD2B","TBL1XR1")
names(genes_state_KO) <- c("pool11","pool12","pool13","pool14","pool16","pool17","pool20","pool21")
tmpPerPool <- sapply(unique(metadata$pool_id), function(x){
tmpPool <- subset(metadata, pool_id==x)
tmpclines <- datalist[[x]]$V1
if (any(x %in% names(genes_state_KO))){
mask <- grepl("kolf_2", tmpclines)
tmpko <- unname(genes_state_KO[match(x, names(genes_state_KO))])
tmpclines[mask] <- paste0(tmpclines[mask],"/", tmpko)
}
tmp_tp <- sapply(sort(c("D0",unique(tmpPool$tp))), function(y){
if (y=="D0"){
tmp <- data.frame(cline=tmpclines,
pool=x,
timePoint=y,
cfrac=signif(1/length(tmpclines),3))
} else {
tmpPool2 <- subset(tmpPool, tp==y)
tabNum <- table(tmpPool2$donor_id)
tmp <- data.frame(cline=tmpclines,
pool=x,
timePoint=y,
cfrac=NA)
tmp$cfrac <- signif(tabNum[match(tmp$cline, names(tabNum))]/sum(tabNum),3)
}
tmp
}, simplify=F)
tmp_tp <- do.call("rbind", tmp_tp)
rownames(tmp_tp) <- NULL
tmp_tp
}, simplify=F)
tmpPerPool <- do.call("rbind", tmpPerPool)
rownames(tmpPerPool) <- NULL
tmpPerPool$pool <- firstup(tmpPerPool$pool)
tmpPerPool$pool <- as.factor(tmpPerPool$pool)
tmpPerPool$pool <- factor(tmpPerPool$pool,
levels=levels(tmpPerPool$pool)[order(as.numeric(gsub("Pool","",levels(tmpPerPool$pool))))])
tmpPerPool$cline_expanded <- paste0(tmpPerPool$cline,"/", tmpPerPool$pool)
tmpPerPool$cfrac_log1p <- signif(log1p(tmpPerPool$cfrac),3)
ratiosTab <- sapply(unique(tmpPerPool$cline_expanded), function(z){
tmp <- subset(tmpPerPool, cline_expanded==z)
data.frame(cline_expanded=z,
pool=as.character(unique(tmp$pool)),
ratio_d11_d0=signif(subset(tmp, timePoint=="D11")$cfrac/subset(tmp, timePoint=="D0")$cfrac,3),
ratio_d30_d0=signif(subset(tmp, timePoint=="D30")$cfrac/subset(tmp, timePoint=="D0")$cfrac,3),
ratio_d52_d0=signif(subset(tmp, timePoint=="D52")$cfrac/subset(tmp, timePoint=="D0")$cfrac,3))
}, simplify=F)
ratiosTab <- do.call("rbind", ratiosTab)
rownames(ratiosTab) <- NULL
ratiosTab$pool <- firstup(ratiosTab$pool)
## do that for the three Ratios
cols <- colnames(ratiosTab)[grepl("ratio",colnames(ratiosTab))]
## outlier analysis
summData <- readRDS("tabs/summData_donorCtype_Cells.RDS")
summData <- subset(summData, quantile!=1)
summData <- sapply(unique(summData$tp), function(x){
tmp <- subset(summData, tp==x)
annotTmp <- sapply(unique(tmp$annot), function(y){
tmp2 <- subset(tmp, annot==y)
tmp2$zsco <- round((tmp2$cfrac-mean(tmp2$cfrac))/sd(tmp2$cfrac),2)
tmp2$outlier <- abs(tmp2$zsco)>2
tmp2
}, simplify=F)
annotTmp <- do.call("rbind", annotTmp)
rownames(annotTmp) <- NULL
annotTmp
}, simplify=F)
summData <- do.call("rbind", summData)
rownames(summData) <- NULL
summData$pool <- sapply(strsplit(summData$donor_extended, "/"), function(x) x[length(x)])
summData$donor_extended <- gsub("pool","Pool", summData$donor_extended)
allClust <- readRDS("analysis/outputTabs/corr_cfracBurdenprolif.RDS")
adjList <- sapply(unique(allClust$annotId), function(x){
tmp <- subset(allClust, annotId==x)
tmp <- tmp[order(tmp$logRegPval, decreasing = F),]
tmp$adjPval <- p.adjust(tmp$wilcoxPval, "BH")
tmp
}, simplify=F)
adjList_filt <- sapply(adjList, function(x){
subset(x, adjPval<0.05)
}, simplify=F)
adjList_filt <- do.call("rbind", adjList_filt)
rownames(adjList_filt) <- NULL
retain <- table(metadata$annot, metadata$tp)*100/colSums(table(metadata$annot, metadata$tp))>2
retain <- retain[,"D52"]
retain <- names(retain[retain])
adjList_filt2 <- adjList_filt[adjList_filt$annotId %in% retain,]
proliferationAndCellFraction <- function(ratio="ratio_d11_d0", timePoint="D11"){
tmpCfrac <- subset(summData, tp==timePoint)
tmpCfrac$prolifRate <- ratiosTab[match(tmpCfrac$donor_extended, ratiosTab$cline_expanded),ratio]
tmpCfrac$ratioType <- ratio
majorTypes <-names(which(sapply(unique(tmpCfrac$annot), function(x){
mean(subset(summData, tp==timePoint & annot==x)$cfrac)>0.02
})))
annotCorr <- sapply(majorTypes, function(x){
tmpCfrac_annot <- subset(tmpCfrac, annot==x)
res <- lm(cfrac~prolifRate, data=tmpCfrac_annot)
data.frame(annot=x,
corrPearson=unname(cor.test(tmpCfrac_annot$prolifRate, tmpCfrac_annot$cfrac, family="pearson")$estimate),
pval_lm=summary(res)$coef["prolifRate","Pr(>|t|)"],
effect_size_lm=summary(res)$coef["prolifRate","Estimate"],
tp=timePoint,
prolifRate=ratio)
}, simplify=F)
annotCorr <- do.call("rbind", annotCorr)
rownames(annotCorr) <- NULL
annotCorr$pAdj <- p.adjust(annotCorr$pval_lm, "BH")
annotCorr$signif <- pvalConverter(annotCorr$pAdj)
return(annotCorr)
}
prolifTable <- proliferationAndCellFraction(ratio="ratio_d52_d0", timePoint="D52")
signif.floor <- function(x, n){
pow <- floor( log10( abs(x) ) ) + 1 - n
y <- floor(x / 10 ^ pow) * 10^pow
# handle the x = 0 case
y[x==0] <- 0
y
}
signif.ceiling <- function(x, n){
pow <- floor( log10( abs(x) ) ) + 1 - n
y <- ceiling(x / 10 ^ pow) * 10^pow
# handle the x = 0 case
y[x==0] <- 0
y
}
plotD52 <- ggplot(data=prolifTable, aes(x=annot, y=tp, fill=corrPearson))+
geom_tile(col="black")+
geom_point(data=prolifTable, aes(x=annot, y=tp, size=signif, shape=signif))+
theme_bw()+
ylab("")+xlab("")+
scale_y_discrete(position = "right")+
theme(legend.position="top",
axis.ticks.y=element_blank(),
axis.text.y=element_blank(),
legend.title=element_text(size=14, face="bold"),
axis.text.x=element_text(angle=90, hjust=0.5, vjust=0.5, size=14),
plot.title=element_text(hjust=0.5, size=13, face="bold"),
legend.text=element_text(size=12))+
#ggtitle("Cell-line proliferation ~ Cell-type fraction (Day 52)")+
scale_size_manual(name="LinearReg P.adj", values=c(4,4,6,8),breaks=c("ns","*","**","***"))+
scale_shape_manual(name="LinearReg P.adj",values=c(1,16,16,16), breaks=c("ns","*","**","***"))+
scale_fill_gradientn(name="Pearson correlation",
colours = c("orange",
"white", "blue"),
limits=c(signif.floor(min(prolifTable$corrPearson),1),
signif.ceiling(max(prolifTable$corrPearson),1)),
labels=seq(signif.floor(min(prolifTable$corrPearson),1),signif.ceiling(max(prolifTable$corrPearson),1),0.2),
breaks=seq(signif.floor(min(prolifTable$corrPearson),1),signif.ceiling(max(prolifTable$corrPearson),1),0.2),
guide = guide_colourbar(barwidth = 10, nbin = 5))+
guides(size=guide_legend(nrow=3,byrow=TRUE))
pdf(file=paste0("figures/mainFigs/figure4B.pdf"))
plot(plotD52)
dev.off()
#############
### fig4C ###
#############
## DE analysis (add to scripts)
pathTODE <- "analysis/outputTabs/DEsinglecell/"
summResults <- paste0(pathTODE, list.files(path=pathTODE,
pattern = "resultsDEFailVsSucc2.+.RDS")) %>%
map(readRDS) %>%
bind_rows()
heatmap_df <- summResults
heatmap_df <- heatmap_df[,match(c("annot","timepoint","numDE","numFailedDonors","numSuccessfulDonors","numFailedCells","numSuccessfulCells"), colnames(heatmap_df))]
heatmap_df <- heatmap_df[!duplicated(heatmap_df),]
# creation of geneUniverse (add to scripts)
geneUniverse <- readRDS("analysis/outputTabs/DEsinglecell/geneUniverse_seurat.RDS")
pval_df <- rbind(enrichmentCalc(summResults, geneUniverse, geneSet="ddd"),
enrichmentCalc(summResults, geneUniverse, geneSet="cosmic"),
enrichmentCalc(summResults, geneUniverse, geneSet="ddd_dominantMOI"))
pval_df$pvalAdj <- p.adjust(pval_df$pval, "BH")
pval_df$signif <- pvalConverter(pval_df$pvalAdj)
pval_df$signif <- factor(pval_df$signif, levels=c("ns","*","**","***"))
genVec <- c("DDD","Cosmic-T1","DDD-Dominant")
names(genVec) <- unique(pval_df$geneSet)
pval_df$geneSet <- unname(genVec[pval_df$geneSet])
heatmap_df <- subset(heatmap_df, annot!="Unk2")
heatmap_df$annot <- as.factor(heatmap_df$annot)
heatmap_df$annot <- factor(heatmap_df$annot, levels=rev(levels(heatmap_df$annot)))
pval_df <- subset(pval_df, annot!="Unk2")
pval_df$annot <- as.factor(pval_df$annot)
pval_df$annot <- factor(pval_df$annot, levels=levels(pval_df$annot))
heatmap_df$annot <- as.factor(heatmap_df$annot)
heatmap_df$annot <- factor(heatmap_df$annot, levels=rev(levels(pval_df$annot)))
size_values=c("ns"=0,
"*"=4,
"**"=6,
"***"=8)
myPalette <- colorRampPalette(brewer.pal(9, "YlOrRd"))
my_breaks <- c(0,100,200,300,400,500)
fig4c <- ggplot(data=heatmap_df, aes(x=timepoint, y=annot, fill=numDE))+
geom_tile()+
theme_classic()+
theme(axis.text.x=element_text(vjust=0.5),
plot.title=element_text(hjust=0.5, face="bold"),
axis.title=element_text(size=14, face="bold"),
axis.text=element_text(size=12),
legend.text=element_text(size=12),
legend.title=element_text(size=14))+
scale_fill_gradientn(name="DE genes",
colours=myPalette(100),
limits=c(0,550),
labels=my_breaks,
breaks=my_breaks)+
xlab("Time-point")+
ylab("Cell-types")+
#ggtitle("Number of DE genes / Gene set enrichment")+
geom_point(data=pval_df, aes(x=xpos, y=annot, size=signif, col=geneSet), inherit.aes = F)+
scale_color_manual(name="Gene-Set", values=c("DDD"="cyan1","Cosmic-T1"="plum1","DDD-Dominant"="cornflowerblue"))+
theme(legend.title=element_text(face="bold"))+
scale_size_manual(name="Adj.Pval", values=size_values)+
guides(shape=F)
pdf(file="figures/mainFigs/figure4C.pdf")
plot(fig4c)
dev.off()
#############
### fig4D ###
#############
stopifnot(all(!is.na(match(summResults$geneDE, geneUniverse$symbol))))
tabCorr <- AnnotationDbi::select(org.Hs.eg.db, geneUniverse$ensembl, "ENTREZID", "ENSEMBL")
tabCorr <- tabCorr[!is.na(tabCorr$ENTREZID),]
tabCorr <- tabCorr[!duplicated(tabCorr$ENSEMBL),]
geneUniverse$entrezid <- tabCorr[match(geneUniverse$ensembl,tabCorr$ENSEMBL),]$ENTREZID
geneUniverse <- subset(geneUniverse, !is.na(entrezid))
summResults$entrezid <- geneUniverse[match(summResults$geneDE, geneUniverse$symbol),]$entrezid
summResults$comb <- paste0(summResults$timepoint,"-", summResults$annot)
summResults <- subset(summResults, !is.na(entrezid))
pval_df$comb <- paste0(pval_df$timepoint,"-", pval_df$annot)
## all DE considering all clusters together
allclusters <- unique(summResults$entrezid)
geneUniverse_allclusters <- unique(geneUniverse$entrezid)
## all DE considering only signif clusters (either ddd or cosmic)
mask_ddd <- pval_df$geneSet=="DDD"
mask_cosmic <- pval_df$geneSet=="Cosmic-T1"
allsignif_ddd <- pval_df[mask_ddd & pval_df$signif!="ns",]$comb
#allsignif_cosmic <- pval_df[mask_cosmic & pval_df$signif!="ns",]$comb
allsignif_ddd <- unique(summResults[summResults$comb %in% allsignif_ddd,]$entrezid)
#allsignif_cosmic <- unique(summResults[summResults$comb %in% allsignif_cosmic,]$entrezid)
geneUniverse_allsignif <- geneUniverse_allclusters
## list of DE for each signif cluster (ddd)
list_clustr_ddd <- sapply(pval_df[mask_ddd & pval_df$signif!="ns",]$comb, function(x){
subset(summResults, comb==x)$entrezid
}, simplify=F)
list_geneUniverse_ddd <- gsub("-.+","",pval_df[mask_ddd & pval_df$signif!="ns",]$comb)
list_geneUniverse_ddd <- sapply(list_geneUniverse_ddd, function(x){
colMatch <- match(x, colnames(geneUniverse))
unique(geneUniverse[geneUniverse[,colMatch]==TRUE,]$entrezid)
}, simplify=F)
## list of DE for each signif cluster (ddd)
list_clustr_cosmic <- sapply(pval_df[mask_cosmic & pval_df$signif!="ns",]$comb, function(x){
subset(summResults, comb==x)$entrezid
}, simplify=F)
list_geneUniverse_cosmic <- gsub("-.+","",pval_df[mask_cosmic & pval_df$signif!="ns",]$comb)
list_geneUniverse_cosmic <- sapply(list_geneUniverse_cosmic, function(x){
colMatch <- match(x, colnames(geneUniverse))
unique(geneUniverse[geneUniverse[,colMatch]==TRUE,]$entrezid)
}, simplify=F)
## GOenrichment tests
report_allclusters <- GOenrichmentAndReport(allclusters, geneUniverse_allclusters, minSize=30, maxSize=200, minCount=20, p.value=0.001, label="allDE")
##allsignif-ddd
report_allsignif_ddd <- GOenrichmentAndReport(allsignif_ddd, geneUniverse_allsignif, minSize=30, maxSize=200, minCount=20, p.value=0.001, label="allsignif")
report_allclusters$GeneSyms <- NULL
report_allsignif_ddd$GeneSyms <- NULL
##ddd
report_testClust <- sapply(1:length(list_clustr_ddd), function(x){
print(x)
labelClust= names(list_clustr_ddd)[x]
report_testClust <- GOenrichmentAndReport(list_clustr_ddd[[x]], list_geneUniverse_ddd[[x]], minSize=10, maxSize=200, minCount=7, p.value=0.001, label=labelClust)
report_testClust$GeneSyms <- NULL
report_testClust
}, simplify=F)
#saveRDS(report_testClust, file="analysis/outputTabs/DEsinglecell/report_testClust_ddd2.RDS")
#report_testClust <- readRDS("analysis/outputTabs/DEsinglecell/report_testClust_ddd2.RDS")
suppTable7 <- rbind(report_allclusters, report_allsignif_ddd, do.call("rbind", report_testClust))
write.table(suppTable7, file="suppTabs/suppTable7.txt",
quote=F, sep="\t", col.names=T, row.names=F)
addPosition <- function(report_allclusters){
report_allclusters$position <- 1:dim(report_allclusters)[1]
return(report_allclusters)
}
report_allclusters <- addPosition(report_allclusters)
report_allsignif_ddd <- addPosition(report_allsignif_ddd)
report_testClust <- sapply(report_testClust, function(x){
addPosition(x)
}, simplify=F)
report_testClust <- do.call("rbind", report_testClust)
reportInfo <- rbind(report_allclusters, report_allsignif_ddd, report_testClust)
vecMatch <- c("axon","neuron","glial","brain",
"hindbrain","forebrain","midbrain","synapse","chromatin",
"cerebellum", "neural","cortex","neurogenesis",
"axonogenesis","nervous","hippocampus","neurotransmitter",
"dopaminergic", "axenome", "action potential","synaptic")
vecLogic <- sapply(vecMatch, function(x){
grepl(x,reportInfo$Term)
}, simplify=T)
reportInfo$neuroRelated <- rowSums(vecLogic)>0
reportInfo$top1 <- FALSE
reportInfo[match(unique(reportInfo$label), reportInfo$label),]$top1 <- TRUE
reportInfo$top2 <- FALSE
reportInfo[sort(c(match(unique(reportInfo$label), reportInfo$label), (match(unique(reportInfo$label), reportInfo$label))+1)),]$top2 <- TRUE
reportInfo$mostShared <- !is.na(match(reportInfo$Term,names(sort(table(reportInfo$Term), decreasing=T)[1:20])))
### Neuro-related ###
neuroTab <- subset(reportInfo, neuroRelated==TRUE)
neuroTab <- rbind(neuroTab, fillNA(neuroTab, list_clustr_ddd, highlight="neuroRelated"))
maxOddsNeuro <- ceiling(max(subset(reportInfo, neuroRelated==TRUE)$OddsRatio))
fig4d <- ggplot(neuroTab,
aes(y=Term, x=label, fill=OddsRatio))+
geom_tile(colour = "black")+
theme_classic()+
theme(axis.text.x=element_text(angle=90,hjust=1, vjust=0.5),
plot.title=element_text(hjust=0.5, face="bold", size=13))+
xlab("")+
ylab("")+
#ggtitle("GO:BP enrichment in critical cell-types")+
scale_fill_gradientn(name="OddsRatio",
colours=myPalette(100),
limits=c(0,maxOddsNeuro),
labels=seq(0,maxOddsNeuro,2),
breaks=seq(0,maxOddsNeuro,2),
na.value = 'grey90')+
geom_text(aes(label=position))
pdf(file="figures/mainFigs/figure4D.pdf", width = 7, height = 4)
plot(fig4d)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BayesFunc.R
\name{chains.plotOwn}
\alias{chains.plotOwn}
\alias{mixchains.Own}
\title{Plots to assess the mixing of the Chains}
\usage{
chains.plotOwn(data, vline.red = min(data$iter),
post.mean.green = apply(data, 2, mean), ...,
title = "TracePlots of the generated Chains ")
mixchains.Own(data, moreplot = F, burnin.redline = 0, legend2 = F,
title = "TracePlots of the generated Chains ")
}
\arguments{
\item{data}{numeric vector containing the GEV block-maxima}
\item{vline.red}{draws a dashed red line in red representing the starting of the iterations
, thus removing the burn-in period.}
\item{post.mean.green}{draws a green dashed line representing the posterior mean
of the parameter's chain}
\item{...}{Other parameters from \code{gridExtra::grid.arrange()}}
\item{title}{Global title for the plot}
}
\value{
a grid.arrange() of ggplots.
}
\description{
Compute the ggplots for each parameter of interest in a single page.
}
\examples{
data("max_years")
fn <- function(par, data) -log_post0(par[1], par[2], par[3], data)
param <- c(mean(max_years$df$Max),log(sd(max_years$df$Max)), 0.1 )
# opt <- optim(param, fn, data = max_years$data,
# method="BFGS", hessian = TRUE)
opt <- nlm(fn, param, data = max_years$data,
hessian=T, iterlim = 1e5)
start <- opt$estimate
Sig <- solve(opt$hessian)
ev <- eigen( (2.4/sqrt(2))^2 * Sig)
varmat <- ev$vectors \%*\% diag(sqrt(ev$values)) \%*\% t(ev$vectors)
# (MH)
set.seed(100)
mh.mcmc1 <- MH_mcmc.own(start, varmat \%*\% c(.1,.3,.4))
mh.mcmc1$mean.acc_rates
chains.plotOwn(mh.mcmc1$out.chain)
# (GIBBS)
# k chains with k different starting values
set.seed(100)
gibbs.trend <- gibbs.trend.own(start, propsd = c(.5, 1.9, .15, .12),
iter = 1000)
## TracePlots
chain.mix <- cbind.data.frame(gibbs.trend$out.chain,
iter.chain = rep(1:500, 4))
mixchains.Own(chain.mix)
}
\author{
Antoine Pissoort, \email{antoine.pissoort@student.uclouvain.be}
}
|
/man/ggplotbayesfuns.Rd
|
no_license
|
proto4426/PissoortThesis
|
R
| false
| true
| 2,061
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BayesFunc.R
\name{chains.plotOwn}
\alias{chains.plotOwn}
\alias{mixchains.Own}
\title{Plots to assess the mixing of the Chains}
\usage{
chains.plotOwn(data, vline.red = min(data$iter),
post.mean.green = apply(data, 2, mean), ...,
title = "TracePlots of the generated Chains ")
mixchains.Own(data, moreplot = F, burnin.redline = 0, legend2 = F,
title = "TracePlots of the generated Chains ")
}
\arguments{
\item{data}{numeric vector containing the GEV block-maxima}
\item{vline.red}{draws a dashed red line in red representing the starting of the iterations
, thus removing the burn-in period.}
\item{post.mean.green}{draws a green dashed line representing the posterior mean
of the parameter's chain}
\item{...}{Other parameters from \code{gridExtra::grid.arrange()}}
\item{title}{Global title for the plot}
}
\value{
a grid.arrange() of ggplots.
}
\description{
Compute the ggplots for each parameter of interest in a single page.
}
\examples{
data("max_years")
fn <- function(par, data) -log_post0(par[1], par[2], par[3], data)
param <- c(mean(max_years$df$Max),log(sd(max_years$df$Max)), 0.1 )
# opt <- optim(param, fn, data = max_years$data,
# method="BFGS", hessian = TRUE)
opt <- nlm(fn, param, data = max_years$data,
hessian=T, iterlim = 1e5)
start <- opt$estimate
Sig <- solve(opt$hessian)
ev <- eigen( (2.4/sqrt(2))^2 * Sig)
varmat <- ev$vectors \%*\% diag(sqrt(ev$values)) \%*\% t(ev$vectors)
# (MH)
set.seed(100)
mh.mcmc1 <- MH_mcmc.own(start, varmat \%*\% c(.1,.3,.4))
mh.mcmc1$mean.acc_rates
chains.plotOwn(mh.mcmc1$out.chain)
# (GIBBS)
# k chains with k different starting values
set.seed(100)
gibbs.trend <- gibbs.trend.own(start, propsd = c(.5, 1.9, .15, .12),
iter = 1000)
## TracePlots
chain.mix <- cbind.data.frame(gibbs.trend$out.chain,
iter.chain = rep(1:500, 4))
mixchains.Own(chain.mix)
}
\author{
Antoine Pissoort, \email{antoine.pissoort@student.uclouvain.be}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_global.R
\name{\%>\%}
\alias{\%>\%}
\title{Pipe operator}
\usage{
lhs \%>\% rhs
}
\arguments{
\item{lhs}{A value or the magrittr placeholder.}
\item{rhs}{A function call using the magrittr semantics.}
}
\value{
The result of calling \code{rhs(lhs)}.
}
\description{
See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details.
}
\keyword{internal}
|
/man/pipe.Rd
|
permissive
|
asshah4/tardis
|
R
| false
| true
| 434
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_global.R
\name{\%>\%}
\alias{\%>\%}
\title{Pipe operator}
\usage{
lhs \%>\% rhs
}
\arguments{
\item{lhs}{A value or the magrittr placeholder.}
\item{rhs}{A function call using the magrittr semantics.}
}
\value{
The result of calling \code{rhs(lhs)}.
}
\description{
See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details.
}
\keyword{internal}
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
ui <- fluidPage(
titlePanel("Harp seals as indicators of Arctic climate change"),
br(),
p("In March 2019 scientists from the ",
a("Sea Mammal Research Unit",
href = "http://www.smru.st-andrews.ac.uk"),
"at the University of St Andrews and the ",
a("Marine Mammal Section",
href = "http://www.dfo-mpo.gc.ca/science/coe-cde/cemam/index-eng.html"),
"of the Canadian Department for Fisheries and Oceans deployed 10 satellite transmitters on harp seals in the Gulf of St Lawrence."),
p("These tags allow the scientists to track the seals as they migrate north to follow the seasonal sea ice retreat."),
p("The interactive map below displays the latest transmitted positions of the seals (updated daily) along with the individual movement paths.
The white shading represents the current sea ice conditions in the region using data taken from ",
a("NSIDC",
href = "https://nsidc.org"),
"."),
br(),
leafletOutput("mymap", height = 600) %>% withSpinner(type = 7),
br(),
p("This work was made possible by a UK-Canada Arctic Partnership Bursary from the ",
a("NERC Arctic Office",
href = "https://www.arctic.ac.uk"),
"and funding from the ",
a("DFO",
href = "http://www.dfo-mpo.gc.ca/index-eng.htm")),
br(),
fluidRow(
img(src='Arctic-Office-Logo.png', align = "centre", height = 60),
img(src='DFO.png', align = "centre", height = 80),
img(src='SMRU.jpg', align = "centre", height = 60),
img(src='University-St-Andrews.png', align = "centre", height = 80)
)
)
|
/ui.R
|
no_license
|
jamesgrecian/harpMap
|
R
| false
| false
| 1,728
|
r
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
ui <- fluidPage(
titlePanel("Harp seals as indicators of Arctic climate change"),
br(),
p("In March 2019 scientists from the ",
a("Sea Mammal Research Unit",
href = "http://www.smru.st-andrews.ac.uk"),
"at the University of St Andrews and the ",
a("Marine Mammal Section",
href = "http://www.dfo-mpo.gc.ca/science/coe-cde/cemam/index-eng.html"),
"of the Canadian Department for Fisheries and Oceans deployed 10 satellite transmitters on harp seals in the Gulf of St Lawrence."),
p("These tags allow the scientists to track the seals as they migrate north to follow the seasonal sea ice retreat."),
p("The interactive map below displays the latest transmitted positions of the seals (updated daily) along with the individual movement paths.
The white shading represents the current sea ice conditions in the region using data taken from ",
a("NSIDC",
href = "https://nsidc.org"),
"."),
br(),
leafletOutput("mymap", height = 600) %>% withSpinner(type = 7),
br(),
p("This work was made possible by a UK-Canada Arctic Partnership Bursary from the ",
a("NERC Arctic Office",
href = "https://www.arctic.ac.uk"),
"and funding from the ",
a("DFO",
href = "http://www.dfo-mpo.gc.ca/index-eng.htm")),
br(),
fluidRow(
img(src='Arctic-Office-Logo.png', align = "centre", height = 60),
img(src='DFO.png', align = "centre", height = 80),
img(src='SMRU.jpg', align = "centre", height = 60),
img(src='University-St-Andrews.png', align = "centre", height = 80)
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multivariance-functions.R
\name{m.multivariance}
\alias{m.multivariance}
\title{m distance multivariance}
\usage{
m.multivariance(
x,
vec = NA,
m = 2,
Nscale = TRUE,
Escale = TRUE,
squared = TRUE,
...
)
}
\arguments{
\item{x}{either a data matrix or a list of doubly centered distance matrices}
\item{vec}{if x is a matrix, then this indicates which columns are treated together as one sample; if x is a list, these are the indexes for which the multivariance is calculated. The default is all columns and all indexes, respectively.}
\item{m}{\code{=2} or \code{3} the m-multivariance will be computed.}
\item{Nscale}{if \code{TRUE} the multivariance is scaled up by the sample size (and thus it is exactly as required for the test of independence)}
\item{Escale}{if \code{TRUE} then it is scaled by the number of multivariances which are theoretically summed up (in the case of independence this yields for normalized distance matrices an estimator with expectation 1)}
\item{squared}{if \code{FALSE} it returns the actual multivariance, otherwise the squared multivariance (less computation)}
\item{...}{these are passed to \code{\link{cdms}} (which is only invoked if \code{x} is a matrix)}
}
\description{
Computes m distance multivariance.
}
\details{
m-distance multivariance is per definition the scaled sum of certain distance multivariances, and it characterize m-dependence.
As a rough guide to interpret the value of total distance multivariance note:
\itemize{
\item Large values indicate dependence.
\item If the random variables are (m-1)-independent and \code{Nscale = TRUE}, values close to 1 and smaller indicate m-independence, larger values indicate dependence. In fact, in the case of independence the test statistic is a Gaussian quadratic form with expectation 1 and samples of it can be generated by \code{\link{resample.multivariance}}.
\item If the random variables are (m-1)-independent and \code{Nscale = FALSE}, small values (close to 0) indicate m-independence, larger values indicate dependence.
}
Since random variables are always 1-independent, the case \code{m=2} characterizes pairwise independence.
Finally note, that due to numerical (in)precision the value of m-multivariance might become negative. In these cases it is set to 0. A warning is issued, if the value is negative and further than the usual (used by \code{\link[base]{all.equal}}) tolerance away from 0.
}
\examples{
x = matrix(rnorm(3*30),ncol = 3)
# the following values are identical
m.multivariance(x,m =2)
1/choose(3,2)*(multivariance(x[,c(1,2)]) +
multivariance(x[,c(1,3)]) +
multivariance(x[,c(2,3)]))
# the following values are identical
m.multivariance(x,m=3)
multivariance(x)
# the following values are identical
1/4*(3*(m.multivariance(x,m=2)) + m.multivariance(x,m=3))
total.multivariance(x, Nscale = TRUE)
1/4*(multivariance(x[,c(1,2)], Nscale = TRUE) +
multivariance(x[,c(1,3)], Nscale = TRUE) +
multivariance(x[,c(2,3)], Nscale = TRUE) + multivariance(x, Nscale = TRUE))
}
\references{
For the theoretic background see the reference [3] given on the main help page of this package: \link{multivariance-package}.
}
|
/fuzzedpackages/multivariance/man/m.multivariance.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| true
| 3,274
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multivariance-functions.R
\name{m.multivariance}
\alias{m.multivariance}
\title{m distance multivariance}
\usage{
m.multivariance(
x,
vec = NA,
m = 2,
Nscale = TRUE,
Escale = TRUE,
squared = TRUE,
...
)
}
\arguments{
\item{x}{either a data matrix or a list of doubly centered distance matrices}
\item{vec}{if x is a matrix, then this indicates which columns are treated together as one sample; if x is a list, these are the indexes for which the multivariance is calculated. The default is all columns and all indexes, respectively.}
\item{m}{\code{=2} or \code{3} the m-multivariance will be computed.}
\item{Nscale}{if \code{TRUE} the multivariance is scaled up by the sample size (and thus it is exactly as required for the test of independence)}
\item{Escale}{if \code{TRUE} then it is scaled by the number of multivariances which are theoretically summed up (in the case of independence this yields for normalized distance matrices an estimator with expectation 1)}
\item{squared}{if \code{FALSE} it returns the actual multivariance, otherwise the squared multivariance (less computation)}
\item{...}{these are passed to \code{\link{cdms}} (which is only invoked if \code{x} is a matrix)}
}
\description{
Computes m distance multivariance.
}
\details{
m-distance multivariance is per definition the scaled sum of certain distance multivariances, and it characterize m-dependence.
As a rough guide to interpret the value of total distance multivariance note:
\itemize{
\item Large values indicate dependence.
\item If the random variables are (m-1)-independent and \code{Nscale = TRUE}, values close to 1 and smaller indicate m-independence, larger values indicate dependence. In fact, in the case of independence the test statistic is a Gaussian quadratic form with expectation 1 and samples of it can be generated by \code{\link{resample.multivariance}}.
\item If the random variables are (m-1)-independent and \code{Nscale = FALSE}, small values (close to 0) indicate m-independence, larger values indicate dependence.
}
Since random variables are always 1-independent, the case \code{m=2} characterizes pairwise independence.
Finally note, that due to numerical (in)precision the value of m-multivariance might become negative. In these cases it is set to 0. A warning is issued, if the value is negative and further than the usual (used by \code{\link[base]{all.equal}}) tolerance away from 0.
}
\examples{
x = matrix(rnorm(3*30),ncol = 3)
# the following values are identical
m.multivariance(x,m =2)
1/choose(3,2)*(multivariance(x[,c(1,2)]) +
multivariance(x[,c(1,3)]) +
multivariance(x[,c(2,3)]))
# the following values are identical
m.multivariance(x,m=3)
multivariance(x)
# the following values are identical
1/4*(3*(m.multivariance(x,m=2)) + m.multivariance(x,m=3))
total.multivariance(x, Nscale = TRUE)
1/4*(multivariance(x[,c(1,2)], Nscale = TRUE) +
multivariance(x[,c(1,3)], Nscale = TRUE) +
multivariance(x[,c(2,3)], Nscale = TRUE) + multivariance(x, Nscale = TRUE))
}
\references{
For the theoretic background see the reference [3] given on the main help page of this package: \link{multivariance-package}.
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.