blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e07068b09353061aa44e63df30b2f8b75dd3a791
|
633d4069f4db086c6d7111822e98a0fae9716f34
|
/createdata.R
|
6be43f9170a9a7937d6b2dfd065f5cdcd000957e
|
[] |
no_license
|
castillosebastian/NLP_predictor_project
|
5343801f0c2f49e32985ea087c4ac709812c8485
|
b51e450d7a949bcc01b41d29f7ac78b1af09be46
|
refs/heads/master
| 2020-04-16T01:47:30.469527
| 2019-01-11T06:21:19
| 2019-01-11T06:21:19
| 165,185,652
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,488
|
r
|
createdata.R
|
library(doParallel)
registerDoParallel(makeCluster(4))
library(stringr)
library(dplyr)
library(caret)
library(tau)
library(data.table)
library(downloader)
library(knitr)
library(tm)
## Create Dir
if(!file.exists("./projectData")){
dir.create("./projectData")
}
## Download the dataset and unzip folder
url <- "https://d396qusza40orc.cloudfront.net/dsscapstone/dataset/Coursera-SwiftKey.zip"
if(!file.exists("./projectData/Coursera-SwiftKey.zip")){
download.file(url, destfile="./projectData/Coursera-SwiftKey.zip", mode = "wb")
}
## Check if zip has already been unzipped?
if(!file.exists("./projectData/final")){
unzip(zipfile="./projectData/Coursera-SwiftKey.zip",exdir="./projectData")
}
blog_file <- file("~/R/Processing_Text_Project/projectData/final/en_US/en_US.blogs.txt", open = "rb")
blog <- readLines(blog_file, encoding= "UTF-8", warn = F)
close(blog_file)
rm(blog_file)
#head(blog)
preprocdata <- function(x) {
x <- iconv(x, from="UTF-8", to="latin1", sub=" ")
x <- tolower(x)
x <- str_replace_all(x, "([iu]n)-([a-z])", "\\1\\2")
x <- str_replace_all(x, "([0-9])(st|nd|rd|th)", "\\1")
x <- str_replace_all(x, " \\'|\\' ", " ")
x <- str_replace_all(x, "[^a-z.' ]", " ")
x <- str_replace_all(x, "([abiep])\\.([cdegm])\\.", "\\1\\2")
x <- str_replace_all(x, "([a-z])\\.([a-z])", "\\1 \\2")
x <- str_replace_all(x, "( [a-z])\\. ", "\\1 ")
x <- str_replace_all(x, " (m[rs]|mrs)\\.", " \\1 ")
x <- str_replace_all(x, " (dr|st|rd|av|ave|blvd|ct)\\.", " \\1 ")
x <- str_replace_all(x, "\\.$", "")
x <- str_replace_all(x, "^ +| +$|", "")
x <- str_replace_all(x, " {2,}", " ")
x <- str_replace_all(x, " *\\. *","\\.")
x <- str_split(x, "\\.")
x <- unlist(x)
x <- x[x != ""]
x
}
blog <- preprocdata(blog)
# head(blog)
twitter_file <- file("~/R/Processing_Text_Project/projectData/final/en_US/en_US.twitter.txt", open = "rb")
twitter <- readLines(twitter_file, encoding= "UTF-8", warn = F)
close(twitter_file)
rm(twitter_file)
twitter <- preprocdata(twitter)
#head(twitter)
news_file <- file("~/R/Processing_Text_Project/projectData/final/en_US/en_US.news.txt", open = "rb")
news <- readLines(news_file, encoding= "UTF-8", warn = F)
close(news_file)
rm(news_file)
news <- preprocdata(news)
head(news)
# Create corpus
corpus <- c(blog, twitter, news)
rm(blog, twitter, news)
# Sample corpus to train our algorithm
set.seed(666)
sample_data <- createDataPartition(y = 1:length(corpus), p = 0.15, list = FALSE)
train <- corpus[sample_data]
rm(corpus, sample_data)
# Check for "bad" tokens
sum(str_detect(train, "www"))
# More cleaning task
train <- train %>%
str_replace_all("www [a-z]+ [a-z]+", "") %>%
str_replace_all(" ([a-z])\\1+ |^([a-z])\\1+ | ([a-z])\\1+$|^([a-z])\\1+$", " ") %>%
str_replace_all( "([a-z])\\1{2,}", "\\1\\1") %>%
str_replace_all( "\\'+([a-z]+)\\'+", "\\1") %>%
str_replace_all( "\\'+ \\'+", " ") %>%
str_replace_all( "(\\'+ )+|( \\'+)+|^\\'+|\\'+$", " ") %>%
str_replace_all( "^[a-z]+$", "") %>%
str_replace_all( "( [^ai])+ |^([^ai] )+|( [^ai])+$", " ") %>%
str_replace_all( "^ +| +$|", "") %>%
str_replace_all( " {2,}", " ") %>%
str_replace_all( " +$|^ +", "")
train <- train[train != ""]
#head(train)
#?textcnt()
train1 <- textcnt(train, method = "string", split = "[[:space:]]", n = 1L, decreasing = T)
train2 <- textcnt(train, method = "string", split = "[[:space:]]", n = 2L, decreasing = T)
train3 <- textcnt(train, method = "string", split = "[[:space:]]", n = 3L, decreasing = T)
rm(train)
# Explore our ngrams
head(train1)
head(train1)
head(train1)
unigram_dt <- data.table(text = names(train1), as.matrix(train1))
setnames(unigram_dt, "V1", "count")
setnames(unigram_dt, "text", "n0")
tot <- sum(unigram_dt$count)
unigram_dt <- unigram_dt %>%
mutate(freq = round(count/tot, 7)) %>%
select(-count)
unigram_dt <- as.data.table(unigram_dt)
setkeyv(unigram_dt, c("n0", "freq"))
saveRDS(unigram_dt, "unigram_dt.rds")
rm(tot, unigram_dt)
bigram_dt <- data.table(text = names(train2), as.matrix(train2))
setnames(bigram_dt, "V1", "count")
bigram_dt[, c("n1", "n0") := do.call(Map, c(f = c, strsplit(text, " ")))]
bigram_dt <- mutate(bigram_dt, freq = round(count/train1[n1][[1]], 7))
bigram_dt$text <- NULL
bigram_dt$count <- NULL
bigram_dt <- as.data.table(bigram_dt)
setkey(bigram_dt, n1)
bigram_dt <- bigram_dt[,lapply(.SD, function(x) head(x, 5)), by = key(bigram_dt)]
setkeyv(bigram_dt, c("n1", "freq", "n0"))
saveRDS(bigram_dt, "bigram_dt.rds")
rm(bigram_dt)
rm(train1)
trigram_dt <- data.table(text = names(train3), as.matrix(train3))
setnames(trigram_dt, "V1", "count")
trigram_dt <- filter(trigram_dt, count > 1)
trigram_dt <- as.data.table(trigram_dt)
str(trigram_dt)
trigram_dt[, c("n2", "n1", "n0") := do.call(Map, c(f = c, strsplit(text, " ")))]
trigram_dt <- mutate(trigram_dt, freq = round(count/train2[paste(n2, n1)][[1]], 7))
trigram_dt$text <- NULL
trigram_dt$count <- NULL
trigram_dt <- as.data.table(trigram_dt)
setkeyv(trigram_dt, c("n2", "n1"))
trigram_dt <- trigram_dt[,lapply(.SD, function(x) head(x, 5)),by = key(trigram_dt)]
setkeyv(trigram_dt, c("n2", "n1", "freq", "n0"))
saveRDS(trigram_dt, "trigram_dt.rds")
rm(trigram_dt, train2, train3)
# Load the badwords dataset from google https://code.google.com/archive/p/badwordslist/downloads
badwords <- readLines("badwords.txt", warn = F)
badwords <- tolower(badwords)
badwords <- str_replace_all(badwords, "\\(", "\\\\(")
saveRDS(badwords, "badwords_dt.rds")
rm(badwords)
|
00e4a5030b9f5e7fa6c0d4f6f59f1575051c9177
|
58d06ff7d5c1e12e4033f2024e837b723951a7f7
|
/DT_Fraudcheck.R
|
9b8d1b73670d2b8f3dd3d208954b86b0ad4b0909
|
[] |
no_license
|
karthiknr2/Karthik
|
a7471280d31164db19afc0343cf448eebf22cb1e
|
ddca7b72fe4074992686f78e4781df07b7855dbd
|
refs/heads/master
| 2023-01-07T08:54:12.702385
| 2020-10-19T19:03:29
| 2020-10-19T19:03:29
| 257,606,334
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,262
|
r
|
DT_Fraudcheck.R
|
library(caret)
library(C50)
fc <- read.csv(file.choose())
View(fc)
Risky_Good <- ifelse(fc$Taxable.Income<=30000,"Risky","Good")
fraud <- data.frame(fc,Risky_Good)
View(fraud)
hist(fraud$Taxable.Income)
fraud_train <- fraud[1:480,]
fraud_test <- fraud[481:600,]
table(fraud$Risky_Good)
dt_model <- C5.0(Risky_Good~.,data=fraud_train,trials=40)
summary(dt_model)
plot(dt_model)
pred <- predict.C5.0(dt_model,fraud_test[-7])
table(pred)
f <- table(fraud_test$Risky_Good)
f
sum(diag(f)/sum(f))
plot(dt_model)
acc<-c()
for(i in 1:100)
{
print(i)
fraud_train <- fraud[1:480,]
fraud_test <- fraud[481:600,]
dt_model <- C5.0(fraud_train$Risky_Good~.,data=fraud_train)
pred<-predict.C5.0(dt_model,fraud_test[-7])
f<-table(fraud_test$Risky_Good)
acc<-c(acc,sum(diag(f))/sum(f))
}
acc
summary(acc)
library(tree)
fraud_tree <- tree(Risky_Good~.,data=fraud_train)
plot(fraud_tree)
text(fraud_tree,pretty = 0)
pred_tree <- as.data.frame(predict(fraud_tree,newdata=fraud_test))
pred_tree["final"] <- NULL
for (i in 1:nrow(pred_tree))
{
pred_tree[i,"final"]<-ifelse(pred_tree[i,"Risky"]<=30000,"Good")
}
mean(pred_tree$final==fraud_test$Risky_Good)
CrossTable(fraud_test$Risky_Good,pred_tree$final)
|
43d0df33f74988baea33a4b9aaaf84218279be1e
|
89c9b90868f017924de545719f7f26a247cfb49c
|
/tests/testthat/test-splitINFO.R
|
7f3e6040bde5159e839ac4443b10413d8a320590
|
[] |
no_license
|
liujohnson118/FluidigmValidation
|
5009a815abe393a6ac9c1d66bd197f79a291eace
|
831cc1cac97e6f1968fcea0cd7807a9d964e3b16
|
refs/heads/master
| 2020-11-30T01:45:02.605092
| 2016-10-21T20:51:02
| 2016-10-21T20:51:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 872
|
r
|
test-splitINFO.R
|
context("splitINFO")
test_that("splitINFO works", {
data("PAT82DNA130CurettageA2S9")
dataForFilterQualityFrequency<-PAT82DNA130CurettageA2S9
filtered<-filterQualityFrequency(dataForFilterQualityFrequency,minQual=80,minFreq=0.03)
expect_error(splitINFO(subset(filtered,select=-INFO)))
#splitINFO usually should occur after quality and frequency have been filtered
filtered<-splitINFO(filtered)
#Check split info processes
expect_error(splitINFO(subset(filtered,select=-NUMS)))
expect_equal(nrow(filtered),91)
expect_equal(filtered$POS[48],133253167)
expect_equal(filtered$Frequency[91],0.0537)
#Check by computations
expect_true(abs(mean(filtered$POS)-60454964.6)<1)
expect_true(abs(sd(filtered$POS)-54260560.8)<1)
expect_true(abs(mean(filtered$Frequency)-0.07722637)<0.0001)
expect_true(abs(sd(filtered$Frequency)-0.11372661)<0.0001)
})
|
3b1bfaa5a7c484964b03fb445966b585e1fdcf23
|
81c4acf23d5db8910522cdc0caab8e6a7ba5cc31
|
/text_mining.R
|
1d67e705d04455738f15a12f1c874219654c8afc
|
[] |
no_license
|
ruhulali/R_Codes
|
ff2d12dc6450ae1da748c4df6ab51600dd48e7aa
|
e2b3b3f090e7fd8a43746ed29e750b023035b3f1
|
refs/heads/master
| 2021-06-08T06:44:39.003256
| 2021-04-23T16:21:16
| 2021-04-23T16:21:16
| 158,611,318
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,307
|
r
|
text_mining.R
|
install.packages("tidytext", dependencies = T)
library(RTextTools)
library(e1071)
pos_tweets = rbind(
c('I love this car', 'positive'),
c('This view is amazing', 'positive'),
c('I feel great this morning', 'positive'),
c('I am so excited about the concert', 'positive'),
c('He is my best friend', 'positive')
)
neg_tweets = rbind(
c('I do not like this car', 'negative'),
c('This view is horrible', 'negative'),
c('I feel tired this morning', 'negative'),
c('I am not looking forward to the concert', 'negative'),
c('He is my enemy', 'negative')
)
test_tweets = rbind(
c('feel happy this morning', 'positive'),
c('larry friend', 'positive'),
c('not like that man', 'negative'),
c('house not great', 'negative'),
c('your song annoying', 'negative')
)
tweets = rbind(pos_tweets, neg_tweets, test_tweets)
# build dtm
matrix= create_matrix(tweets[,1], language="english",
removeStopwords=FALSE, removeNumbers=TRUE,
stemWords=FALSE)
# train the model
mat = as.matrix(matrix)
classifier = naiveBayes(mat[1:10,], as.factor(tweets[1:10,2]) )
# test the validity
predicted = predict(classifier, mat[11:15,]); predicted
table(tweets[11:15, 2], predicted)
recall_accuracy(tweets[11:15, 2], predicted)
##################################################################
# build the data to specify response variable, training set, testing set.
container = create_container(matrix, as.numeric(as.factor(tweets[,2])),
trainSize=1:10, testSize=11:15,virgin=FALSE)
models = train_models(container, algorithms=c("MAXENT" , "SVM", "RF", "BAGGING", "TREE"))
results = classify_models(container, models)
View(results)
# accuracy table
table(as.numeric(as.factor(tweets[11:15, 2])), results[,"FORESTS_LABEL"])
table(as.numeric(as.factor(tweets[11:15, 2])), results[,"MAXENTROPY_LABEL"])
# recall accuracy
recall_accuracy(as.numeric(as.factor(tweets[11:15, 2])), results[,"FORESTS_LABEL"])
recall_accuracy(as.numeric(as.factor(tweets[11:15, 2])), results[,"MAXENTROPY_LABEL"])
recall_accuracy(as.numeric(as.factor(tweets[11:15, 2])), results[,"TREE_LABEL"])
recall_accuracy(as.numeric(as.factor(tweets[11:15, 2])), results[,"BAGGING_LABEL"])
recall_accuracy(as.numeric(as.factor(tweets[11:15, 2])), results[,"SVM_LABEL"])
# model summary
analytics = create_analytics(container, results)
summary(analytics)
head(analytics@document_summary)
analytics@ensemble_summar
N=4
set.seed(2014)
cross_validate(container,N,"MAXENT")
cross_validate(container,N,"TREE")
cross_validate(container,N,"SVM")
cross_validate(container,N,"RF")
##################################################################
rm(list = ls())
###################
"load data"
###################
setwd("D:/NOTES/Analytics Notes/Practice/Text Mining & Web Scraping/Twitter-Sentimental-Analysis-master")
happy = readLines("D:/NOTES/Analytics Notes/Practice/Text Mining & Web Scraping/Twitter-Sentimental-Analysis-master/happy.txt")
sad = readLines("D:/NOTES/Analytics Notes/Practice/Text Mining & Web Scraping/Twitter-Sentimental-Analysis-master/sad.txt")
happy_test = readLines("D:/NOTES/Analytics Notes/Practice/Text Mining & Web Scraping/Twitter-Sentimental-Analysis-master/happy_test.txt")
sad_test = readLines("D:/NOTES/Analytics Notes/Practice/Text Mining & Web Scraping/Twitter-Sentimental-Analysis-master/sad_test.txt")
tweet = c(happy, sad)
tweet_test= c(happy_test, sad_test)
tweet_all = c(tweet, tweet_test)
sentiment = c(rep("happy", length(happy) ),
rep("sad", length(sad)))
sentiment_test = c(rep("happy", length(happy_test) ),
rep("sad", length(sad_test)))
sentiment_all = as.factor(c(sentiment, sentiment_test))
library(RTextTools)
# naive bayes
mat= create_matrix(tweet_all, language="english",
removeStopwords=FALSE, removeNumbers=TRUE,
stemWords=FALSE, tm::weightTfIdf)
mat = as.matrix(mat)
classifier = naiveBayes(mat[1:160,], as.factor(sentiment_all[1:160]))
predicted = predict(classifier, mat[161:180,]); predicted
table(sentiment_test, predicted)
recall_accuracy(sentiment_test, predicted)
# the other methods
mat= create_matrix(tweet_all, language="english",
removeStopwords=FALSE, removeNumbers=TRUE,
stemWords=FALSE, tm::weightTfIdf)
container = create_container(mat, as.numeric(sentiment_all),
trainSize=1:160, testSize=161:180,virgin=FALSE) #????????????removeSparseTerms
models = train_models(container, algorithms=c("MAXENT",
"SVM",
#"GLMNET", "BOOSTING",
"SLDA","BAGGING",
"RF", # "NNET",
"TREE"
))
# test the model
results = classify_models(container, models)
table(as.numeric(as.numeric(sentiment_all[161:180])), results[,"FORESTS_LABEL"])
recall_accuracy(as.numeric(as.numeric(sentiment_all[161:180])), results[,"FORESTS_LABEL"])
# formal tests
analytics = create_analytics(container, results)
summary(analytics)
head(analytics@algorithm_summary)
head(analytics@label_summary)
head(analytics@document_summary)
analytics@ensemble_summary # Ensemble Agreement
# Cross Validation
N=3
cross_SVM = cross_validate(container,N,"SVM")
cross_GLMNET = cross_validate(container,N,"GLMNET")
cross_MAXENT = cross_validate(container,N,"MAXENT")
##################################################################
rm(list = ls())
install.packages("plyr")
install.packages("ggplot2")
install.packages("wordcloud")
install.packages("RColorBrewer")
install.packages("tm")
install.packages("SnowballC")
install.packages("devtools")
require(devtools)
library(plyr)
library(ggplot2)
library(wordcloud)
library(RColorBrewer)
library(tm)
library(SnowballC)
data <- read_data ("D:/NOTES/Analytics Notes/Practice/Text Mining & Web Scraping/RDataMining-Tweets-20160203.rdata")
install_url("http://www.omegahat.org/Rstem/Rstem_0.4-1.tar.gz")
install_url("http://cran.r-project.org/src/contrib/Archive/sentiment/sentiment_0.1.tar.gz")
install_url("http://cran.r-project.org/src/contrib/Archive/sentiment/sentiment_0.2.tar.gz")
dfb <- do.call("rbind", lapply(rdmTweets, as.data.frame))
dim(df)
library(tm)
# build a corpus, which is a collection of text documents
# VectorSource specifies that the source is character vectors.
myCorpus <- Corpus(VectorSource(df$text))
##################################################################
library(dplyr)
library(tidyr)
library(purrr)
library(readr)
training_folder <- "D:/NOTES/Analytics Notes/Practice/Text Mining & Web Scraping/20news-bydate/20news-bydate-train/"
# Define a function to read all files from a folder into a data frame
read_folder <- function(infolder) {
data_frame(file = dir(infolder, full.names = TRUE)) %>%
mutate(text = map(file, read_lines)) %>%
transmute(id = basename(file), text) %>%
unnest(text)
}
# Use unnest() and map() to apply read_folder to each subfolder
raw_text <- data_frame(folder = dir(training_folder, full.names = TRUE)) %>%
unnest(map(folder, read_folder)) %>%
transmute(newsgroup = basename(folder), id, text)
raw_text
library(ggplot2)
raw_text %>%
group_by(newsgroup) %>%
summarize(messages = n_distinct(id)) %>%
ggplot(aes(newsgroup, messages)) +
geom_col() +
coord_flip()
library(stringr)
cleaned_text <- raw_text %>%
group_by(newsgroup, id) %>%
filter(cumsum(text == "") > 0,
cumsum(str_detect(text, "^--")) == 0) %>%
ungroup()
cleaned_text <- cleaned_text %>%
filter(str_detect(text, "^[^>]+[A-Za-z\\d]") | text == "",
!str_detect(text, "writes(:|\\.\\.\\.)$"),
!str_detect(text, "^In article <"),
!id %in% c(9704, 9985))
library(tidytext)
usenet_words <- cleaned_text %>%
unnest_tokens(word, text) %>%
filter(str_detect(word, "[a-z']$"),
!word %in% stop_words$word)
usenet_words %>%
count(word, sort = TRUE)
|
940e2be1e6fd004eec103754d873475b5f390f2a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/kofdata/examples/get_time_series.Rd.R
|
f7adbb3c133709471607819008018d292550408c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 180
|
r
|
get_time_series.Rd.R
|
library(kofdata)
### Name: get_time_series
### Title: Get Time Series form KOF Datenservice API
### Aliases: get_time_series
### ** Examples
get_time_series("kofbarometer")
|
94c8b4171ac8f1510f8de1a4abe52eb571108323
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Causata/examples/BinaryPredictor.Rd.R
|
0f7784d5f66cf5b1a49d26758bdb7997d4d49cc5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,216
|
r
|
BinaryPredictor.Rd.R
|
library(Causata)
### Name: BinaryPredictor
### Title: Univariate analysis for binary classification.
### Aliases: BinaryPredictor BinaryPredictor.factor BinaryPredictor.numeric
### BinaryPredictor.data.frame BinaryPredictor.default
### plot.BinaryPredictor print.BinaryPredictorList
### ** Examples
library(ggplot2)
data(diamonds)
# set a dependent variable that is TRUE when the price is above $5000
dv <- diamonds$price > 5000
# convert ordered to factor
diamonds$cut <- as.factor(as.character(diamonds$cut))
diamonds$color <- as.factor(as.character(diamonds$color))
diamonds$clarity <- as.factor(as.character(diamonds$clarity))
# evaluate diamond cut and carats, and generate a plot for each
bp.cut <- BinaryPredictor(diamonds$cut, dv)
plot(bp.cut)
bp.carat <- BinaryPredictor(diamonds$carat, dv)
plot(bp.carat)
# Evaluate all predictors, print summary to screen
# note that price does not have 100% predictive
# power since the discreatization boundary is not $5000.
# Using a sample of 10k records and 3 folds of cross validation
# for greater speed.
set.seed(98765)
idx <- sample.int(nrow(diamonds), 10000)
bpList <- BinaryPredictor(diamonds[idx, ], dv[idx], folds=3)
df.summary <- print(bpList)
|
4aeab58aa9063870617de49472cc758710273d93
|
f0a815520a23a6f7d4633374f7b4e67e463fcee3
|
/cachematrix.R
|
3d236717de7da23ff1fe164630d8ded175ce0bc6
|
[] |
no_license
|
elinborg/ProgrammingAssignment2
|
38386cb54b7735b3a7e938da755685225f3547c4
|
d2350da9f15d573fbcbafccbd8dabfdbc1157e91
|
refs/heads/master
| 2021-01-20T01:11:07.375937
| 2015-02-22T22:30:14
| 2015-02-22T22:30:14
| 31,182,403
| 0
| 0
| null | 2015-02-22T21:52:47
| 2015-02-22T21:52:47
| null |
UTF-8
|
R
| false
| false
| 1,006
|
r
|
cachematrix.R
|
## The following two functions cache the inverse of a invertible square matrix
## This is a function that provides functions that set and get
## the value of an invertible square matrix x.
## In addition, it provides functions setinv and getinv, that set and get the value of the inverse of x
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x<<-y
inv <<- NULL
}
get <- function() x
setinv <- function(invx) inv <<- invx
getinv <- function() inv
list(set = set,get = get,setinv = setinv, getinv = getinv)
}
## This is a function that calculates the inverse of the invertible square matrix x.
## The inverse is only calculated for the first time that this function is called for the matrix x.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if (!is.null(inv)) {
message("Getting cache data for the inverse")
return(inv)
}
data <- x$get()
inv <- solve(data,...)
x$setinv(inv)
inv
}
|
eb31b960626d49d6eade2a3b1d3dd12e25b9eb71
|
9a12758d6038efb65e5edd8346f8a8fdc97bcdb9
|
/9.shortdatabase_joint.R
|
55ace70008586e4d749e0778abc832b09a6ba125
|
[] |
no_license
|
HaihuaWang-hub/RNASeq-rRNA-identification-approach2
|
2ec593d2b3c6bfb10c0f9870e5d243a53138992a
|
de21ac1fa9771d724e48666969bbbb2ad2f11ccb
|
refs/heads/main
| 2023-08-06T11:55:48.669260
| 2021-10-04T19:12:03
| 2021-10-04T19:12:03
| 385,419,622
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 794
|
r
|
9.shortdatabase_joint.R
|
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
if (!require('seqinr')) install.packages('seqinr')
if (!require('ape')) install.packages('ape')
setwd("F:/linux_share/approach_2_test/database")
data1 <- read.fasta(file = "short_database_1.fasta", seqtype = "DNA", as.string = T)
data2 <- read.fasta(file = "short_database_2.fasta", seqtype = "DNA", as.string = T)
data3 <- read.fasta(file = "short_database_3.fasta", seqtype = "DNA", as.string = T)
output_0 <- cbind(data1, data2, data3, fill.with.gaps=TRUE)
output<-output_0[ , !colnames(output_0) %in% c("fill.with.gaps")]
write.dna(output, file="short_database_joint.fasta",
format="fasta",
append = F,
nbcol = 10,
colsep = "",
colw = 1000,
)
|
f872f40bfeee4d4c33a75bfbd80da2965368eb36
|
d9464f143884bee0e4db68021665a7d17cb8dcd9
|
/ProgAssignment3/best.R
|
579082a1a6651a0b9852473a48bce35a72c3ff8d
|
[] |
no_license
|
ykarabag/datasciencecoursera
|
d96f92268281a80714b861c1fcba5f90925148d7
|
b65c35a63952bd562943a49d4e56068a2a6d403e
|
refs/heads/master
| 2021-01-10T01:09:37.935923
| 2016-04-10T21:04:18
| 2016-04-10T21:04:18
| 54,744,661
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,419
|
r
|
best.R
|
best <- function (state = character(2), outcome = character()){
## Read outcome data and filter our unnecessary columns. Also change the outcome data to numeric
outcomes <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
outcomes <- subset.data.frame(outcomes,select=c("Hospital.Name","State","Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack","Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure","Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia"))
outcomes$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack <- as.numeric(outcomes$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack)
outcomes$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure <- as.numeric(outcomes$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure)
outcomes$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia <- as.numeric(outcomes$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia)
## standardize to uppercase state names and Conditions
validStates <- casefold(unique(outcomes$State),upper=TRUE)
state <- casefold(state,upper=TRUE)
outcome <- casefold(outcome,upper=TRUE)
## Check that state and outcome are valid
if ( !(state %in% validStates)) {
return("ERROR: Invalid State")
} else {
if (!(outcome %in% c("HEART ATTACK", "HEART FAILURE", "PNEUMONIA")))
return("ERROR: Invalid Outcome")
}
## Filter out hospitals without data on specific condition
if (outcome=="HEART ATTACK" ){
outcomes<-subset.data.frame(outcomes,!is.na(outcomes$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack) & outcomes$State==state)
result <- subset.data.frame(outcomes,outcomes$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack == min(outcomes$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack))[,1]
} else if (outcome=="HEART FAILURE" ) {
outcomes<-subset.data.frame(outcomes,!is.na(outcomes$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure) & outcomes$State==state)
result <- subset.data.frame(outcomes,outcomes$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure == min(outcomes$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure))[,1]
} else { ## This can only be Pneumonia
outcomes<-subset.data.frame(outcomes,!is.na(outcomes$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia) & outcomes$State==state)
result <- subset.data.frame(outcomes,outcomes$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia == min(outcomes$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia))[,1]
}
## Return hospital name in that state with lowest 30-day death rate. If there are more than one hospitals with minimum rate, choose the first in alphabetical order.
if(length(result)>1){
return(head(sort(result$Hospital.Name,decreasing=TRUE),1))
} else{
return(result)
}
}
|
2148c6dc2da1506ce7ffc9c501e3d69f0f360a1c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/xpose4/examples/iwres.dist.hist.Rd.R
|
9ab2a1a0e507b0cb712b069184f7e6b85bb4fc16
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 222
|
r
|
iwres.dist.hist.Rd.R
|
library(xpose4)
### Name: iwres.dist.hist
### Title: Histogram of individual weighted residuals (IWRES), for Xpose 4
### Aliases: iwres.dist.hist
### Keywords: methods
### ** Examples
iwres.dist.hist(simpraz.xpdb)
|
3917cf5552f944d48316fa1ba201f24ec2277844
|
01b1302af51d339f7c8827a620c4a5fb26c890f1
|
/outcome_measurement/all/cod/dhis/meta_data/extract_meta_data.R
|
10fa217ff6d73bddfad21f77bfd524e94c6bd1bb
|
[] |
no_license
|
ihmeuw/gf
|
64ab90fb5a5c49694bde1596f4b20fcf107a76e3
|
29e0c530b86867d5edd85104f4fe7dcb1ed0f1ee
|
refs/heads/develop
| 2021-08-15T02:16:59.086173
| 2021-08-03T19:52:31
| 2021-08-03T19:52:31
| 109,062,373
| 3
| 6
| null | 2019-03-21T01:48:02
| 2017-10-31T23:17:16
|
R
|
UTF-8
|
R
| false
| false
| 7,621
|
r
|
extract_meta_data.R
|
# DHIS Extraction for DRC: Meta data extractopm
# Caitlin O'Brien-Carelli
# Meta data extraction code for the DHIS Extraction tool
# Extracts the names of variables, age/sex categories, data sets, facilities
# 2/20/2020
#---------------------
# This code can be run locally or on the cluster
# Calls the extracting functions (dhis_extracting_functions.R)
# Extracts all meta data to run the extraction
#--------------------
# Acts as a single function extract_dhis_content:
# this function runs multiple functions to extract:
# --------------------
# Set up R
library(profvis)
library(data.table)
library(ggplot2)
library(dplyr)
library(stringr)
library(reshape)
library(RCurl)
library(XML)
library(profvis)
library(plyr)
library(openxlsx)
# --------------------
# detect the user
user = Sys.info()[['user']]
#----------------------
# set the working directories and source functions
# determine if the code is being run on the cluster or on home computer
j = ifelse(Sys.info()[1]=='Windows', 'J:', '/home/j')
# set working directory
dir = paste0(j, '/Project/Evaluation/GF/outcome_measurement/cod/dhis_data/')
setwd(dir)
# set the output directory
out_dir = paste0(dir, '0_meta_data/')
# library for the dhisextractr package
dirx = paste0(dir, 'packages/')
# source functions from J Drive - be sure to update from most recent push
source(paste0(dir, 'code/dhis_extracting_functions.R'))
# source the prep function for the facilities
source(paste0(dir, 'code/prep_master_facilities_function.R'))
#---------------------------------------------
#'Extract content information from DHIS
#'
#' \code{extract_dhis_content} extracts content information from DHIS
#'
#' @param base_url The base url of the DHIS2 setting
#' @param userID your username in the given DHIS2 setting, as a character string
#' @param password your password for this DHIS2 setting, as a character string
#' @return Returns a list of seven tables :
#'
#' \strong{data_sets} The list of data sets as extracted by
#' \link{extract_dhis_datasets}.
#'
#' \strong{data_elements} The list of data elements as extracted by
#' \link{extract_data_elements}.
#'
#' \strong{data_elements_categories} The list of categories as extracted by
#' \link{extract_categories}.
#'
#' \strong{org_units_list} The list of organization units as extracted by
#' \link{extract_orgunits_list}.
#'
#' \strong{org_units_description} The description of organization units as extracted by
#' \link{extract_org_unit}.
#'
#' \strong{org_units_group} The list of the groups of organization units as extracted by
#' \link{extract_org_unit}.
#'
#' \strong{org_units_report} The list of reports for each organization unit as extracted by
#' \link{extract_org_unit}.
#'
#---------------------------------------------
#-----------------------
# input the country, base_url, userID, and password for the DRC DHIS2 system
# input as string variables
country = 'drc'
base_url = 'https://snisrdc.com'
userID = 'Bethany_Huntley'
password = 'Snisrdcongo1'
#---------------------------------------------
#extract_dhis_content function
extract_dhis_content = function(base_url, userID, password) {
print('Making DHIS urls')
urls = make_dhis_urls(base_url)
urls = data.table(urls)
#-----------------------
# extract data sets
print('Extracting Data Sets')
data_sets = extract_dhis_datasets(urls$data_sets_url, userID, password)
colnames(data_sets) = c('datasets_ID', 'datasets_name', 'datasets_url')
data_sets = data.table(data_sets)
#-----------------------
# extract data elements
print('Extracting Data Elements')
data_element_list = extract_data_elements(urls$data_elements_url, userID, password)
data_elements = ddply(data_sets, .(datasets_ID, datasets_name),
function(data_sets) {
out = extract_data_elements_ds(as.character(data_sets$datasets_url),
userID, password)})
# merge the data elements and data sets
data_elements = data.table(data_elements)
data_elements[ , datasets_name:=NULL]
interim = merge(data_sets, data_elements, by='datasets_ID')
#change names of data_element_list for the merge
setnames(data_element_list, c('data_element_id', 'data_element_name', 'data_element_url' ))
updated_data_elements = merge(interim, data_element_list, by='data_element_id', all=TRUE)
updated_data_elements = data.table(updated_data_elements)
#-----------------------
# extract categories for the data elements (age, sex, etc.)
print('Extracting Categories')
data_elements_categories = extract_categories(as.character(urls$data_elements_categories),
userID, password)
data_elements_categories = data.table(data_elements_categories)
#-----------------------
# organisational units extraction
# this extracts the list of health facilities, but not their associated geographic information
print('Extracting Organisation Units List')
org_units_list = extract_orgunits_list(as.character(urls$org_units_url),
userID, password)
colnames(org_units_list) = c('org_unit_ID', 'org_unit_name', 'org_unit_url')
org_units_list = data.table(org_units_list)
# convert factors to strings
org_units_list[ , org_unit_ID:=as.character(org_unit_ID)]
org_units_list[ , org_unit_name:=as.character(org_unit_name)]
org_units_list[ , url:=as.character(org_unit_url)]
# check for duplicate facilities
org_units_list[duplicated(org_units_list$org_unit_ID)]
org_units_list[ , org_unit_url:=NULL]
#-----------------------
# return all of the data as a data set
return(list(data_sets, updated_data_elements, data_elements_categories, org_units_list))
}
#-------------------------------------------------------------------
#------------------------
# RUN THE META DATA EXTRACTION
# the main function outputs a list item
DRC_extraction = extract_dhis_content(base_url = base_url, userID = userID, password = password)
#-----------------------------
#------------------------------
# save the data
# extract data tables from the large list of meta data
data_sets = DRC_extraction[1][[1]] # sets where the data live
updated_data_elements = DRC_extraction[2][[1]] # variable names
categories = DRC_extraction[3][[1]] # age, sex, and inventory categories
org_units = DRC_extraction[4][[1]] # health facility names
#--------------
# save all the RDS files to the J Drive
saveRDS(data_sets, paste0(out_dir, 'data_sets.rds'))
saveRDS(updated_data_elements, paste0(out_dir, 'updated_data_elements.rds'))
saveRDS(categories, paste0(out_dir, 'data_elements_categories.rds'))
saveRDS(org_units, paste0(out_dir, 'org_units.rds'))
#--------------------------
#------------------------------------------
# extract additional geographic information for health facilities
# this code extracts the dps, hz, and health area for each facility
# read in the previous geographic information from the last extraction
master = readRDS(paste0(dir, '0_meta_data/master_facilities.RDS'))
# determine if any information is missing
missing_units = org_units[!(org_unit_ID %in% master$org_unit_id)]
# if any units are missing geographic info, extract it
# if no units are missing info, no need to redo any part of the extraction
if (0 < nrow(missing_units)) source(paste0(dir, '/code/extract_facilities_meta_data.R'))
#-------------------------------------------------
# print statement to show completion
print("All metadata extracted!")
#-------------------------------------------------
|
39583bfb4aceabe249e008b34378b34b9e60d458
|
d545784864e85b543cbc837863333550fbab31f3
|
/ixp/R/ixp.R
|
3645b79cea4259c43a3098da696bc1fdc6aa9f32
|
[] |
no_license
|
phil8192/ixp
|
64ebebc667496ead3433a6a55b2868457c65340c
|
09dda001dc2240152ffcb5047f3ea7d42be44772
|
refs/heads/master
| 2021-11-11T05:49:35.800122
| 2021-10-27T16:11:19
| 2021-10-27T16:11:19
| 219,201,400
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 170
|
r
|
ixp.R
|
#' The ixp package holds ixp data and functions for loading,
#' manipulating, analysing and visualising the data.
#'
#'
#'
#' @keywords internal
"_PACKAGE"
|
c4bff690c514ae4e3cdd2b9b023383aed56d8081
|
a42d50796461308e6b30e64e4777ec1a3ad9c8fb
|
/R/pycheck.r
|
b4cd2e92d27d1180e1817d093d11500281ade56f
|
[
"MIT"
] |
permissive
|
mtalluto/mbm
|
b6de42299b1d77e4144ad0da14c2df16a8e9484f
|
65127258b7799b0fffc8ffffa10b6254051e4f39
|
refs/heads/master
| 2021-06-01T18:27:31.622299
| 2019-08-11T20:49:41
| 2019-08-12T10:48:13
| 76,379,194
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 499
|
r
|
pycheck.r
|
#' Check if necessary Python packages can be loaded
#' @examples check_python()
#' @export
check_python <- function()
{
efun <- function(module, err)
paste("Loading python module", module, "produced an error:\n", err)
modules <- c('GPy', 'numpy', 'scipy')
mTest <- sapply(modules, function(m) {
tryCatch(sp <- reticulate::import(m), warning = function(w) warning(w),
error = function(e) warning(efun(m, e)))
})
print("check_python done; if no errors/warnings you are good to go!")
}
|
1dab51f066129891e9661e3be8b606aba1434f58
|
04ca21e146db0f4e27b38475661b10be7b61a77c
|
/code_II2020/clase_7sep.R
|
2279188f25e84a6d91c9dcdd48b27f304a70d007
|
[] |
no_license
|
AdrianEnriquez4505/EST-383
|
5c2f1a67777db735ad5ecc8e584b593dc60eed6b
|
a6227a1afc3c50c2ed8beeeace508ec5dc24147f
|
refs/heads/master
| 2023-04-18T18:41:25.754012
| 2021-05-04T23:39:31
| 2021-05-04T23:39:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,591
|
r
|
clase_7sep.R
|
# Heterogéneas
Pueden contener objetos de diferentes clases
## dataframe (Marco de datos)
Un dataframe supone que las filas corresponden a observaciones y las columnas
a variables, donde las variables pueden ser de diferentes clases; numéricas,
texto, lógico, etc. La forma es muy parecida a una matriz.
Un data frame se puede generar a partir de la colección de vectores o por un
proceso de importación de datos.
sexo<-c("H","M","M","H","M")
edad<-c(20,23,24,30,22)
salario<-c(0,3000,1000,0,0)
bd1<-data.frame(sexo,edad,salario)
bd1
class(bd1)
#Nota: la indexación en un dataframe es similar a la de una matriz
bd1[,2]
bd1[3,]
bd1[,"edad"]
bd1[,"salario"]
Adicionalmente se puede usar el simbolo "$" como operador para acceder a las
variables
bd1$edad
bd1$salario
bd1$sexo
bd2<-data.frame(sexo=c("H","M","M","H","M"),
edad=c(20,23,24,30,22),
salario=c(0,3000,1000,0,0))
bd2
a<-matrix(1:20,4,5)
bd3<-as.data.frame(a) # transforma a una matriz a dataframe
bd3
dim(bd1) #
names(bd1)
names(bd1)[2]<-"EDAD"
bd1
## lista
l1<-list(bd1,bd2,bd3,"hola",seq(1,1000,0.01))
class(l1)
l2<-list(BD1=bd1,BD2=bd2,BD3=bd3,scalar="hola",vector=seq(1,1000,0.01))
# comando para conocer la estructura de un dataframe o una lista
str(l1)
str(l2)
str(bd1)
l1[[1]]
l1[[4]]
l1[[2]][1,3]
l2$vector
l2$BD2
l2$BD3[2,3]
l2$BD3$V3
l2$BD3
l2[[3]]
l3<-list(l1,l2,bd1,bd2,"hola")
str(l3)
l3[[2]][[2]]$edad
l3[[2]]$BD2$edad
# Comandos: for, if, while, (loops y condiciones)
## for: Se repite una proceso un determinado número de veces
for(i in 1:10){
print(i)
print(a)
z<-i^2
print(z)
}
# la unica regla para el iterador es que este sea un vector
ddep<-c("ch","lp","cb","or","pt","tj","sc","bn","pd")
for(j in ddep){
print(j)
}
for(j in ddep){
print(j)
for(i in seq(1,100,2)){
print(paste(j,":",i))
}
}
## if: Acciones basadas en codiciones
x<-0
if(x>2){
print("hola")
}
x<-6
if(x>2){
print("hola")
} else {
print("chau")
}
x<-2
if(x==2){
print("a")
} else if (x==3){
print("b")
} else if (x==4){
print("c")
} else if (x>4){
print("d")
} else {
print("e")
}
for(i in 1:5){
print(i)
if(i==3){
print("hola")
break
}
}
for(i in 1:5){
print(i)
if(i==3){
next
print("hola")
}
print("chau")
}
x<-0
while(x==0){
print("hola")
}
while(x<1000){
print(paste("hola",x))
x<-x+1
}
Ejemplo con la tolerancia k de series convergentes
de los recíprocos de las potencias de 2
k<-0.001
i<-0
vf<-2
ss<-(1/(2^i))
while(abs(sum(ss)-vf)>k){
i<-i+1
ss<-c(ss,(1/(2^i)))
}
|
584c0bdfb62f373f35fd2178c510413702aa450c
|
fd570307c637f9101ab25a223356ec32dacbff0a
|
/src-local/specpr/src.radtran/SRC/cnvg1g.r
|
4b12ac468071a7cdda477c2df3fd582700242700
|
[] |
no_license
|
ns-bak/tetracorder-tutorial
|
3ab4dd14950eff0d63429291c648820fb14bb4cb
|
fd07c008100f6021c293ce3c1f69584cc35de98a
|
refs/heads/master
| 2022-07-30T06:04:07.138507
| 2021-01-03T22:19:09
| 2021-01-03T22:49:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,110
|
r
|
cnvg1g.r
|
subroutine cnvg1g (sum1a, sum2a)
implicit integer*4 (i-n)
#
# iterate to converge on best grain size for one mineral
#
include "defs.h"
include "lmrefl.h"
include "convh.h"
include "../../src.specpr/common/lblg"
real*4 sum1, sum2
dmove = 1.0
iter = 0
dlast = ddflt(1)*1000.
10 dsave = ddflt(1)
call eval1g(sum1a,sum2a) # evaluate present position
write (6,20) ddflt(1), sum1a, sum2a, dmove
20 format(' d=',1pe12.5,'cm, fit=',1pe12.5,' sq(sumsq)=',
1pe12.5,' factor=',1pe12.5)
22 if (dmove < 0.1e-5) { # converged!
write (6,25)
25 format ('CONVERGED:')
write (6,20) ddflt(1), sum1a, sum2a, dmove
return
}
ddflt(1) = ddflt(1) * (1.0+dmove)
call eval1g(sum1b,sum2b) # evaluate larger grain size
if (sum2b < sum2a) { # larger is better
dlast = dsave
go to 10
}
ddflt(1) = ddflt(1) /(1.0+dmove)**2
call eval1g(sum1c,sum2c) # evaluate smaller grain size
if (sum2c < sum2a) { #smaller is better
dlast = dsave
go to 10
}
if (sum2a <= sum2b && sum2a <= sum2c) { # present pos best, contract
ddflt(1) = dsave
dmove = dmove/2.0
go to 22
}
return
end
|
8e7ed1afd4d1e03ea5a1139fb6c622da5ebfb12e
|
7c5caeca7735d7909c29ee3ed6074ad008320cf0
|
/R/duplicate.R
|
4bbbce8a4efa7a8b190c6a98f9b864ac85292da1
|
[] |
no_license
|
ncss-tech/aqp
|
8063e800ed55458cfa7e74bc7e2ef60ac3b1e6f5
|
c80591ee6fe6f4f08b9ea1a5cd011fc6d02b5c4a
|
refs/heads/master
| 2023-09-02T07:45:34.769566
| 2023-08-31T00:14:22
| 2023-08-31T00:27:14
| 54,595,349
| 47
| 12
| null | 2023-08-17T15:33:59
| 2016-03-23T21:48:50
|
R
|
UTF-8
|
R
| false
| false
| 1,292
|
r
|
duplicate.R
|
#' @title Duplicate Profiles of a SoilProfileCollection
#' @description A simple function to duplicate the contents of a \code{SoilProfileCollection} object. Old profile IDs are saved as a site-level attribute (\code{oldID}) and new IDs are generated using a numeric serial number.
#'
#' @author D.E. Beaudette
#'
#'
#' @param x a \code{SoilProfileCollection} object with 1 or more profiles
#' @param times requested number of copies
#' @param oldID site-level attribute used to store the original profile IDs
#'
#' @return a \code{SoilProfileCollection} object
#'
#' @keywords manip
#' @export
#' @examples
#'
#' # sample data
#' data('sp4')
#'
#' # promote to SPC
#' depths(sp4) <- id ~ top + bottom
#'
#' # duplicate each profile 2 times
#' d <- duplicate(sp4, times = 2)
#'
#' # graphical check
#' par(mar = c(0, 0, 3, 1))
#' plotSPC(d, color = 'Ca', width = 0.25)
#'
duplicate <- function(x, times = 3, oldID = '.oldID') {
res <- lapply(1:times, function(i) {
# local copy of original object
.x <- x
# save old ID just in case
site(.x)[[oldID]] <- profile_id(.x)
# overwrite with new ID
profile_id(.x) <- sprintf('%s-%02d', profile_id(.x), i)
# done
return(.x)
})
# safely combine list -> SPC
s <- combine(res)
}
|
1265e8aa406d1fd51b7613b3b16b8adcda0b473d
|
a9a6e636c6dd4b5c472f7c999166305d69b48632
|
/man/make_df_index.Rd
|
820fe30ea403340d04a92372991534c1660dd20a
|
[] |
no_license
|
jiqiaingwu/ActigraphyUtah
|
2fa17e62bd4758ea220ea4e0dbbe15a47792c023
|
195df9fd927ae3d9c19db00297860e175e8c310b
|
refs/heads/main
| 2023-04-13T10:19:05.751824
| 2021-04-22T21:16:07
| 2021-04-22T21:16:07
| 336,078,135
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 537
|
rd
|
make_df_index.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_df_index.R
\name{make_df_index}
\alias{make_df_index}
\title{Utility function to map table with their columns/rows in a bigger table (Map Over Multiple Inputs Simultaneously)}
\usage{
make_df_index(v)
}
\arguments{
\item{v}{import Actigraphy Raw data}
}
\value{
The lengths and values of runs of equal values in a vector.
}
\description{
Utility function to map table with their columns/rows in a bigger table (Map Over Multiple Inputs Simultaneously)
}
|
0bb29bf5520e5245a8266ca30113eed92f5fa98e
|
ef4a08ecc44c8c30a1ded2df9bf18abc72de5687
|
/man/plot.diverse_tt.Rd
|
c827af09d321542b045bcc68cacacc9482ad0ddb
|
[] |
no_license
|
pmartinezarbizu/dada2pp
|
de7008f8f1a68a55f7399b8a60ee805e8ab952d9
|
a4c70de6028cf6429cdd387ea2c6abf0776404ff
|
refs/heads/master
| 2023-07-27T21:54:29.676912
| 2023-07-18T12:11:47
| 2023-07-18T12:11:47
| 160,071,012
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 409
|
rd
|
plot.diverse_tt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.diverse_tt.r
\name{plot.diverse_tt}
\alias{plot.diverse_tt}
\title{Plot Method for divers.tt}
\usage{
\method{plot}{diverse_tt}(x, ty = "ASVs")
}
\arguments{
\item{x}{}
}
\value{
A graph with 4 boxplots
}
\description{
}
\details{
}
\examples{
}
\seealso{
\code{\link{plot.diverse_tt}}
}
\author{
Pedro Martinez Arbizu
}
|
e714cdd4c9a5a1201dd4fa48416e3a4e0aa3a6a6
|
c90dac176024e17fc2f984c2e5bf3390ee08235b
|
/Lab/Chapter 5/Chap 5.r
|
e46c81b923dc1c6dc377e0456c27473fdfc701df
|
[] |
no_license
|
HariharasudhanAS/ISLR-Exercises
|
4317851a5c6fafe0f63f4f0be3b6363ea5bb9593
|
0b6066ce81c19cefeb582703f3b3a7f3148d5af3
|
refs/heads/master
| 2020-03-31T00:26:17.718212
| 2018-12-11T11:05:53
| 2018-12-11T11:05:53
| 151,739,548
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,166
|
r
|
Chap 5.r
|
library(ISLR)
set.seed(1)
#random selection
train = sample(392,196)
# model fitting
lm.fit = lm(mpg~horsepower, data=Auto, subset=train)
summary(lm.fit)
attach(Auto)
# MSE in test set
mean((mpg-predict(lm.fit, Auto))[-train]^2)
# testing for quadratic and cubic
lm.fit2 = lm(mpg~poly(horsepower,2),data=Auto,subset=train)
mean((mpg-predict(lm.fit2, Auto))[-train]^2)
lm.fit3 = lm(mpg~poly(horsepower,3),data=Auto,subset=train)
mean((mpg-predict(lm.fit3, Auto))[-train]^2)
# Different sample
set.seed(2)
train = sample(392, 196)
lm.fit=lm(mpg~horsepower, data=Auto, subset=train)
mean((mpg-predict(lm.fit, Auto))[-train]^2)
lm.fit2 = lm(mpg~poly(horsepower,2),data=Auto,subset=train)
mean((mpg-predict(lm.fit2, Auto))[-train]^2)
lm.fit3 = lm(mpg~poly(horsepower,3),data=Auto,subset=train)
mean((mpg-predict(lm.fit3, Auto))[-train]^2)
# LOOCV
library(boot)
?cv.glm # default LOOCV
glm.fit = glm(mpg~horsepower, data=Auto)
cv.err = cv.glm(Auto, glm.fit)
cv.err$delta
library(ISLR)
set.seed(1)
#random selection
train = sample(392,196)
# model fitting
lm.fit = lm(mpg~horsepower, data=Auto, subset=train)
summary(lm.fit)
attach(Auto)
# MSE in test set
mean((mpg-predict(lm.fit, Auto))[-train]^2)
# testing for quadratic and cubic
lm.fit2 = lm(mpg~poly(horsepower,2),data=Auto,subset=train)
mean((mpg-predict(lm.fit2, Auto))[-train]^2)
lm.fit3 = lm(mpg~poly(horsepower,3),data=Auto,subset=train)
mean((mpg-predict(lm.fit3, Auto))[-train]^2)
# Different sample
set.seed(2)
train = sample(392, 196)
lm.fit=lm(mpg~horsepower, data=Auto, subset=train)
mean((mpg-predict(lm.fit, Auto))[-train]^2)
lm.fit2 = lm(mpg~poly(horsepower,2),data=Auto,subset=train)
mean((mpg-predict(lm.fit2, Auto))[-train]^2)
lm.fit3 = lm(mpg~poly(horsepower,3),data=Auto,subset=train)
mean((mpg-predict(lm.fit3, Auto))[-train]^2)
# LOOCV
library(boot)
?cv.glm # default LOOCV
glm.fit = glm(mpg~horsepower, data=Auto)
cv.err = cv.glm(Auto, glm.fit)
cv.error = rep(0,5)
for ( i in 1:5){
glm.fit=glm(mpg~poly(horsepower,i),data=Auto)
cv.error[i]=cv.glm(Auto,glm.fit)$delta[1]
}
print(cv.error)
set.seed(17)
cv.error.10 = rep(0,5)
for ( i in 1:5){
glm.fit=glm(mpg~poly(horsepower,i),data=Auto)
cv.error.10[i]=cv.glm(Auto,glm.fit, K=10)$delta[1]
}
print(cv.error.10)
# Note:
# (In principle, the computation time for LOOCV for a least squares linear
# model should be faster than for k-fold CV, due to the availability of the
# formula (5.2) for LOOCV (inflated MSE); however, unfortunately the cv.glm() function
# does not make use of this formula.)
# Bootstrap
alpha.fn = function(data, index){
X=data$X[index]
Y=data$Y[index]
return((var(Y)-cov(X,Y))/(var(X)+var(Y)-2*cov(X,Y)))
}
#Test fn
alpha.fn(Portfolio,1:100)
set.seed(1)
alpha.fn(Portfolio,sample(100,100,replace=T))
boot(Portfolio,alpha.fn,R=1000)
boot.fn=function(data,index){
return(coef(lm(mpg~horsepower, data=data, subset=index)))
}
boot.fn(Auto, sample(392,392,replace=T))
boot(Auto,boot.fn,R=1000)
boot.fn=function(data, index)
coefficients(lm(mpg~horsepower+I(horsepower^2),data=data,subset=index))
set.seed(1)
boot(Auto,boot.fn,1000)
summary(lm(mpg~horsepower+I(horsepower^2),data = Auto ) )$coef
|
cb755067a487a22f7a107e71750720be01d3e3dd
|
c9e0c41b6e838d5d91c81cd1800e513ec53cd5ab
|
/man/gtkWindowHasToplevelFocus.Rd
|
bfa01543153027fa7d9bbd88dd288ce7b3681419
|
[] |
no_license
|
cran/RGtk2.10
|
3eb71086e637163c34e372c7c742922b079209e3
|
75aacd92d4b2db7d0942a3a6bc62105163b35c5e
|
refs/heads/master
| 2021-01-22T23:26:26.975959
| 2007-05-05T00:00:00
| 2007-05-05T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 631
|
rd
|
gtkWindowHasToplevelFocus.Rd
|
\alias{gtkWindowHasToplevelFocus}
\name{gtkWindowHasToplevelFocus}
\title{gtkWindowHasToplevelFocus}
\description{Returns whether the input focus is within this GtkWindow.
For real toplevel windows, this is identical to \code{\link{gtkWindowIsActive}},
but for embedded windows, like \code{\link{GtkPlug}}, the results will differ.}
\usage{gtkWindowHasToplevelFocus(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkWindow}}] a \code{\link{GtkWindow}}}}
\details{ Since 2.4}
\value{[logical] \code{TRUE} if the input focus is within this GtkWindow}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
aec61f7ced2b00e401517b7330672f3523bc97de
|
8a4ded6b300e28278a8b1f952209fd3b2ae4af5e
|
/evaluation/stats_final.r
|
e9beca633d23d40bcd8c859885cf090812fd837e
|
[] |
no_license
|
mario206/jsNaughty
|
8bb76c0f37f6d1d1e1b675d4a360d53f4c405cbb
|
a5d46d2bddedf7dbc708166b1777ac96b51056bb
|
refs/heads/master
| 2020-04-02T06:25:06.811612
| 2017-05-08T20:42:15
| 2017-05-08T20:42:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,645
|
r
|
stats_final.r
|
#From the stats.r file we want to produce 3 plots:
#1) a comparison boxplot between the three different consistency measures with and without
#hashing. The no hash and hash boxplots (three each) should be in different colors.
#2) a comparison boxplot between our best measure, jsnice, and their mixed equivalent. (also with global comparison)
#3) a scatterplot showing the complementary nature of the two methods
#Also, this should have on the wilcox and cohensd D tests for these plots.
library(ggplot2)
library(lsr)
library(car)
library(reshape2)
library(effsize)
rFromWilcox<-function(wilcoxModel, N){
z<- qnorm(wilcoxModel$p.value/2)
r<- z/ sqrt(N)
cat(wilcoxModel$data.name, "Effect Size, r = ", r)
}
#rFromWilcox(w,2*length(subset1$`Freq`))
#Saving 9.36 x 7.07 in image
#Before hash fix
stats_mix <- read.csv("~/jsnaughty/evaluation/stats_mix_final.csv", sep=";")
stats_nomix <- read.csv("~/jsnaughty/evaluation/stats_nomix_final.csv", sep=";")
#1) (white background + shades.)
# boxplot(cbind(stats_nomix[,c("hash_def_one_renaming_freqlen", "hash_def_one_renaming_lm",
# "hash_def_one_renaming_logmodel", "no_renaming_freqlen",
# "no_renaming_lm", "no_renaming_logmodel")])/stats_nomix[,c("num_loc_names")],
# las=2, ylab=paste("% names recovered -", nrow(stats_nomix), "files"))
#ggplot version with colors!
subset1 <- cbind(stats_nomix[,c("no_renaming_freqlen", "hash_def_one_renaming_freqlen",
"no_renaming_lm","hash_def_one_renaming_lm",
"no_renaming_logmodel", "hash_def_one_renaming_logmodel")])/stats_nomix[,c("num_loc_names")]
colnames(subset1) <- c("Freq", "Freq (Hash)", "LM", "LM (Hash)", "Logistic", "Logistic (Hash)")
#Want to melt the data so we have two labels (one for each of the 6), and one for colors.
b_1 <- melt(subset1)
b_1$Hashed <- b_1$variable == "Freq (Hash)" | b_1$variable == "LM (Hash)" | b_1$variable == "Logistic (Hash)"
c_plot <- ggplot(b_1, aes(x = variable, y = value)) +
geom_boxplot(aes(fill = Hashed)) +
scale_fill_manual(values = c("#dff4d8","#afd8b9")) +
theme(axis.text.x = element_text(size=20, angle = 45, hjust = 1),
axis.title = element_text(size=20),
axis.text.y = element_text(size=20),
axis.title = element_text(size=20),
panel.grid.major.y = element_line(colour = "#f1f1f1", size = 1),
panel.background = element_rect(fill = "white"),
legend.position="none") +
#ggtitle("File level accuracy with and without hashes by selection technique") +
xlab("Selection technique") +
ylab(paste("% local names recovered -", nrow(subset1), "files"))
c_plot
ggsave(c_plot, file = "~/jsnaughty/evaluation/Plots/consistencyBoxplot.pdf", height= 7.07, width = 9.36, units = "in")
ggsave(c_plot, file = "~/jsnaughty-paper/figures/consistencyBoxplot.pdf", height=7.07, width =9.36, units = "in")
#Compare the values of the hash and no hash versions assuming the hashing is better...
wilcox.test(subset1$`Freq`, subset1$`Freq (Hash)`,paired=TRUE,alt="l")
cohen.d(subset1$`Freq`, subset1$`Freq (Hash)`, paired = TRUE)
#cohen.d(subset1$`Freq`, subset1$`Freq (Hash)`) #Test -> they are equivalent. (check)
#cohensD(subset1$`Freq`, subset1$`Freq (Hash)`)
wilcox.test(subset1$`LM`, subset1$`LM (Hash)`,paired=TRUE,alt="l")
cohen.d(subset1$`LM`, subset1$`LM (Hash)`, paired = TRUE)
#cohensD(subset1$`LM`, subset1$`LM (Hash)`)
wilcox.test(subset1$`Logistic`, subset1$`Logistic (Hash)`,paired=TRUE,alt="l")
cohen.d(subset1$`Logistic`, subset1$`Logistic (Hash)`, paired=TRUE)
#cohensD(subset1$`Model`, subset1$`Model (Hash)`)
#Compare the values of three hash method (low - mid) + (mid - high)
wilcox.test(subset1$`Freq (Hash)`, subset1$`LM (Hash)`,paired=TRUE,alt="l")
cohen.d(subset1$`Freq (Hash)`, subset1$`LM (Hash)`, paired = TRUE)
#cohensD(subset1$`Freq (Hash)`, subset1$`LM (Hash)`)
wilcox.test(subset1$`LM (Hash)`, subset1$`Logistic (Hash)`,paired=TRUE,alt="l")
cohen.d(subset1$`LM (Hash)`, subset1$`Logistic (Hash)`, paired = TRUE)
#cohensD(subset1$`LM (Hash)`, subset1$`Model (Hash)`)
#2)
subset2 <- cbind(stats_mix[,c("hash_def_one_renaming_logmodel", "n2p")])/stats_mix[,c("num_loc_names")]
#Add in globals as well.
subset2 <- cbind(subset2, (cbind(stats_mix[,c("hash_def_one_renaming_logmodel_all", "n2p_all")]))/stats_mix[,c("num_names")])
colnames(subset2) <- c("Autonymix", "JSNice", "Autonymix (All)", "JSNice (All)")
subset3 <- cbind(stats_nomix[,c("hash_def_one_renaming_logmodel", "n2p")])/stats_nomix[,c("num_loc_names")]
subset3 <- cbind(subset3, (cbind(stats_nomix[,c("hash_def_one_renaming_logmodel_all", "n2p_all")]))/stats_nomix[,c("num_names")])
colnames(subset3) <- c("Autonym", "JSNice","Autonym (All)", "JSNice (All)")
subset3$file <- stats_nomix$file
subset2$file <- stats_mix$file
subset2 <- cbind(subset2[,c("file","Autonymix", "Autonymix (All)")])
subset2 <- merge(subset3, subset2, by = c("file"))
#Reorder the columns so they are plotted like we wish.
subset2 <- subset2[,c("Autonym", "Autonym (All)", "JSNice", "JSNice (All)", "Autonymix", "Autonymix (All)")]
colnames(subset2) <- c("Autonym (Local)", "Autonym (All)", "JSNice (Local)", "JSNice (All)", "Autonymix (Local)", "Autonymix (All)")
b_2 <- melt(subset2)
mix_plot <- ggplot(b_2, aes(variable, y = value))+
geom_boxplot(aes(fill = variable)) +
theme(axis.text.x = element_text(size=20, angle = 45, hjust = 1),
axis.title = element_text(size=20),
axis.text.y = element_text(size=20),
axis.title = element_text(size=20),
panel.grid.major.y = element_line(colour = "#f1f1f1", size = 1),
panel.background = element_rect(fill = "white"),
legend.position="none") +
scale_fill_manual(values = c( "#afd8b9","#f1f1f1","#ccbadd","#f1f1f1","#269441","#f1f1f1")) +
#scale_fill_brewer(palette="Greens") +
#ggtitle("File level accuracy for JSnice, Autonym, and Autonymix") +
xlab("Renaming technique") +
ylab(paste("% names recovered -", nrow(subset2), "files"))
mix_plot
ggsave(mix_plot, file = "~/jsnaughty/evaluation/Plots/jsniceAndBlendBoxplot.pdf", height= 7.07, width = 9.36, units = "in")
ggsave(mix_plot, file = "~/jsnaughty-paper/figures/jsniceAndBlendBoxplot.pdf", height= 7.07, width = 9.36, units = "in")
#Temp:
#Not significant.
#wilcox.test(subset3$Autonym, subset3$JSNice, paired=TRUE)
#cohen.d(subset3$Autonym, subset3$JSNice, paired=TRUE)
wilcox.test(subset2$`Autonym (Local)`, subset2$`JSNice (Local)`, paired=TRUE)
cohen.d(subset2$`Autonym (Local)`, subset2$`JSNice (Local)`,paired=TRUE)
wilcox.test(subset2$`Autonym (Local)`, subset2$`Autonymix (Local)`, paired=TRUE, alt="l")
cohen.d(subset2$`Autonym (Local)`, subset2$`Autonymix (Local)`,paired=TRUE)
wilcox.test(subset2$`JSNice (Local)`, subset2$`Autonymix (Local)`, paired=TRUE, alt="l")
cohen.d(subset2$`JSNice (Local)`, subset2$`Autonymix (Local)`,paired=TRUE)
wilcox.test(subset2$`JSNice (All)`, subset2$`Autonymix (All)`, paired=TRUE, alt="l")
cohen.d(subset2$`JSNice (All)`, subset2$`Autonymix (All)`,paired=TRUE)
#Remove them...
no_glb <- subset2[,c("Autonym (Local)", "JSNice (Local)", "Autonymix (Local)")]
colnames(no_glb) <- c("Autonym", "JSNice", "Autonymix")
nglb <- melt(no_glb)
mix_plot2 <- ggplot(nglb, aes(variable, y = value))+
geom_boxplot(aes(fill = variable)) +
theme(axis.text.x = element_text(size=20, angle = 45, hjust = 1),
axis.title = element_text(size=20),
axis.text.y = element_text(size=20),
axis.title = element_text(size=20),
panel.grid.major.y = element_line(colour = "#f1f1f1", size = 1),
panel.background = element_rect(fill = "white"),
legend.position="none") +
scale_fill_manual(values = c( "#afd8b9","#ccbadd","#269441")) +
#scale_fill_brewer(palette="Greens") +
#ggtitle("File level accuracy for JSnice, Autonym, and Autonymix") +
xlab("Renaming technique") +
ylab(paste("% local names recovered -", nrow(subset2), "files"))
mix_plot2
ggsave(mix_plot2, file = "~/jsnaughty/evaluation/Plots/jsniceAndBlendBoxplotLocal.pdf", height= 7.07, width = 9.36, units = "in")
ggsave(mix_plot2, file = "~/jsnaughty-paper/figures/jsniceAndBlendBoxplotLocal.pdf", height= 7.07, width = 9.36, units = "in")
#Fix them (but not consistent with JSNice reported results... which seem to match our original ones...)
globalCountMix <- read.csv("~/jsnaughty/evaluation/globalCountMix.csv", header=FALSE, sep=";")
colnames(globalCountMix) <- c("file", "num_glb_names_fixed")
stats_mix2 <- merge(stats_mix, globalCountMix, by = c("file")) #mix should be equivalent to nomix for globals.
subset2 <- cbind(stats_mix2[,c("hash_def_one_renaming_logmodel", "n2p")])/stats_mix2[,c("num_loc_names")]
nrow(stats_mix2) #should be the same.
stats_mix2$num_names_fixed <- stats_mix2$num_loc_names + stats_mix2$num_glb_names_fixed
#Add in globals as well.
subset2 <- cbind(subset2, (cbind(stats_mix2[,c("hash_def_one_renaming_logmodel", "n2p")] + stats_mix2[,c("num_glb_names_fixed")]))/stats_mix2[,c("num_names_fixed")])
colnames(subset2) <- c("Autonymix", "JSNice", "Autonymix (All)", "JSNice (All)")
stats_nomix2 <- merge(stats_nomix, globalCountMix, by = c("file")) #mix should be equivalent to nomix for globals.
stats_nomix2$num_names_fixed <- stats_nomix2$num_loc_names + stats_nomix2$num_glb_names_fixed
subset3 <- cbind(stats_nomix2[,c("hash_def_one_renaming_logmodel", "n2p")])/stats_nomix2[,c("num_loc_names")]
#Globals
subset3 <- cbind(subset3, (cbind(stats_nomix2[,c("hash_def_one_renaming_logmodel", "n2p")] + stats_nomix2[,c("num_glb_names_fixed")]))/stats_nomix2[,c("num_names_fixed")])
colnames(subset3) <- c("Autonym", "JSNice","Autonym (All)", "JSNice (All)")
subset3$file <- stats_nomix2$file
subset2$file <- stats_mix2$file
subset2 <- cbind(subset2[,c("file","Autonymix", "Autonymix (All)")])
subset2 <- merge(subset3, subset2, by = c("file"))
#Reorder the columns so they are plotted like we wish.
subset2 <- subset2[,c("Autonym", "Autonym (All)", "JSNice", "JSNice (All)", "Autonymix", "Autonymix (All)")]
colnames(subset2) <- c("Autonym (Local)", "Autonym (All)", "JSNice (Local)", "JSNice (All)", "Autonymix (Local)", "Autonymix (All)")
glb_fix <- melt(subset2)
mix_plot3 <- ggplot(glb_fix, aes(variable, y = value))+
geom_boxplot(aes(fill = variable)) +
theme(axis.text.x = element_text(size=20, angle = 45, hjust = 1),
axis.title = element_text(size=20),
axis.text.y = element_text(size=20),
axis.title = element_text(size=20),
panel.grid.major.y = element_line(colour = "#f1f1f1", size = 1),
panel.background = element_rect(fill = "white"),
legend.position="none") +
scale_fill_manual(values = c( "#afd8b9","#f1f1f1","#ccbadd","#f1f1f1","#269441","#f1f1f1")) +
#scale_fill_brewer(palette="Greens") +
#ggtitle("File level accuracy for JSnice, Autonym, and Autonymix") +
xlab("Renaming technique") +
ylab(paste("% names recovered -", nrow(subset2), "files"))
mix_plot3
ggsave(mix_plot3, file = "~/jsnaughty/evaluation/Plots/jsniceAndBlendBoxplotFixed.pdf", height= 7.07, width = 9.36, units = "in")
ggsave(mix_plot3, file = "~/jsnaughty-paper/figures/jsniceAndBlendBoxplotFixed.pdf", height= 7.07, width = 9.36, units = "in")
#3)#Bogdan's start point:
non_zero <- subset3[subset3$Autonym != 0 | subset3$JSNice != 0,]
diff <- non_zero[non_zero$Autonym != 1 | non_zero$JSNice != 1,]
hex_plot <- ggplot(data=diff,aes(Autonym,JSNice))+
geom_hex()+
theme(axis.text.x = element_text(size=20),
axis.title = element_text(size=20),
axis.text.y = element_text(size=20),
axis.title = element_text(size=20),
legend.text = element_text(size=20),
legend.title = element_text(size=20),
panel.background = element_rect(fill = "white")) +
#scale_fill_gradientn(colours=c("green","black"),name = "Frequency",na.value=NA) +
scale_fill_gradientn(colors=c("#269441","black"),name = "Frequency",na.value=NA)+
#scale_fill_gradientn(colors=c("#91d288","black"),name = "Frequency",na.value=NA)+
xlab("Autonym File Accuracy")+
ylab("JSNice File Accuracy")+
geom_abline(slope = 1, size = 2)#+
#ggtitle("Comparison of Autonym vs JSNice Accuracy")
ggsave(hex_plot, file = "~/jsnaughty/evaluation/Plots/hexComparisonPlot.pdf", height= 7.07, width = 9.36, units = "in")
ggsave(hex_plot, file = "~/jsnaughty-paper/figures/hexComparisonPlot.pdf", height= 7.07, width = 9.36, units = "in")
#Density comparison
d_in <- melt(subset3[,c("Autonym", "JSNice")])
colnames(d_in) <- c("Technique", "value")
density_plot <- ggplot(data =d_in, aes(x = value, group = Technique, fill = Technique)) +
geom_density(alpha = .3) +
theme(axis.text.x = element_text(size=20),
axis.title = element_text(size=20),
axis.text.y = element_text(size=20),
axis.title = element_text(size=20),
legend.text = element_text(size=20),
legend.title = element_text(size=20),
panel.background = element_rect(fill = "white")) +
ylab("Density") +
xlab(paste("% names recovered -", nrow(subset2), "files")) +
#ylab(paste("% names recovered -", nrow(stats_nomix), "files")) +
#ggtitle("Density Plot of File Accuracy of Autonym Vs JSNice") +
scale_fill_manual(values = c("#269441","#7647a2"))
#scale_fill_manual(values = c("#7647a2","#00cc00"))
density_plot
ggsave(density_plot, file = "~/jsnaughty/evaluation/Plots/jsniceCompareDensity.pdf", height= 7.07, width = 9.36, units = "in")
ggsave(density_plot, file = "~/jsnaughty-paper/figures/jsniceCompareDensity.pdf", height= 7.07, width = 9.36, units = "in")
|
c55a474de0e811f386850b26a65efcb07d93e5f0
|
d160549f5fcee328daaf69d93fdd59123905168b
|
/man/check_phred.Rd
|
cd1192d3a4566d273885ee04381945194093cc13
|
[] |
no_license
|
ddiez/ngstools
|
636b228128e2ceee7d4cf22fad657e2e706def16
|
9cd94b458390d44c091ff43b54784a9d10370e00
|
refs/heads/master
| 2021-01-18T23:59:43.630413
| 2017-05-02T09:36:10
| 2017-05-02T09:36:10
| 72,809,450
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 388
|
rd
|
check_phred.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phred.R
\name{check_phred}
\alias{check_phred}
\title{check_phred}
\usage{
check_phred(filenames, nlines = 1000)
}
\arguments{
\item{filenames}{name of fastq file(s) to check.}
\item{nlines}{number of quality lines to check.}
}
\description{
checks the Phred encoding for the quality scores in fastq files.
}
|
9a7f82c3a87e5d53270c9a079213865bfa0537e7
|
0b72304cf8e6d26211bd702dc40423bf1a2b5296
|
/man/conv.check.Rd
|
f50930e30e63a0cb91f1138d5a692c6a583ef80c
|
[] |
no_license
|
cran/GJRM
|
0ce5f2fc18661b2e4f6a9de4ab66dd07bed35b80
|
382349bb8af640694b472807a8c61a55e64760a9
|
refs/heads/master
| 2023-06-22T20:55:23.493200
| 2023-06-21T17:50:02
| 2023-06-21T17:50:02
| 97,197,872
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 541
|
rd
|
conv.check.Rd
|
\name{conv.check}
\alias{conv.check}
\title{Some convergence diagnostics}
\description{It takes a fitted model object and produces some
diagnostic information about the fitting procedure.
}
\usage{
conv.check(x)
}
\arguments{
\item{x}{\code{gjrm} object.}
}
\author{
Maintainer: Giampiero Marra \email{giampiero.marra@ucl.ac.uk}
}
\seealso{
\code{\link{gamlss}}, \code{\link{gjrm}}
}
\keyword{generalised joint regression modelling}
\keyword{diagnostics}
|
94b81ee4d062371a8283c45695bca69ba78d5665
|
434c6c56502ed849f65cecf2877d5eb21a30b44e
|
/man/short.form.Rd
|
475d38b5b4eb802014ecb2b1fec53c7d854742db
|
[] |
no_license
|
lansexiaobang/marima
|
afa077e000bceaf33594a5bdb97fd4d96144ee21
|
9985537297dd4874ef09a61e546d19ff7a019604
|
refs/heads/master
| 2021-04-06T09:05:29.895309
| 2017-01-13T23:59:20
| 2017-01-13T23:59:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,615
|
rd
|
short.form.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/defining.programs.R
\name{short.form}
\alias{short.form}
\title{short.form}
\usage{
short.form(poly = NULL, name = "Lag=", leading = TRUE, tail = FALSE,
digits = 6)
}
\arguments{
\item{poly}{matrix polynomium (0-1 array as construced by define.model,
for example, or array of reals as estimated by marima).}
\item{name}{character string used as header in output (default='lag').}
\item{leading}{TRUE/FALSE. If leading=FALSE the leading (unity matrix)
is to be left out/suppressed.}
\item{tail}{TRUE/FALSE. If TRUE and the ar/ma-model only consists
of coefficient matrice(s) where all coefficients (except the
leading unity matrix) are all zero a first order coefficient matrix
(being zero) is retained (in order to avoid a model containing only
the leading unity matrix).
If tail=TRUE and the coefficients in the first coefficient matrix
(after the leading unity matrix) are all zero, the leading unity
matrix is always retained.}
\item{digits}{the number of digits retained by short.form (default=6).}
}
\description{
Function to condensate (and/or) print out matrix
polynomium leaving out empty lag matrices and, if specified,
the leading (unity) matrix.
}
\examples{
Model<-define.model(kvar=4, ar=c(1, 2, 4), ma=c(1), reg.var=4)
short.form(Model$ar.pattern)
short.form(Model$ma.pattern)
short.form(Model$ar.pattern, leading=FALSE)
short.form(Model$ar.pattern, leading=FALSE)
#
M<-define.model(kvar=4, ma=c(1))
short.form(M$ar.pattern)
short.form(M$ar.pattern, tail=TRUE)
short.form(M$ar.pattern, leading=FALSE, tail=TRUE)
}
|
deeb042bd7cf5500185a96409d810d8e01b05627
|
c8f8ac023f0021201d25568a705bf5085e664853
|
/man/melt.mefa.Rd
|
9aea826c6e1963393a36455d54680f2fd517fba2
|
[] |
no_license
|
psolymos/mefa
|
ca7d5716229c82453a49d76334282b4c4a6f07c1
|
db3922bc72429d9f61499143f8c8d6f8f31166d3
|
refs/heads/master
| 2021-10-13T04:40:46.657736
| 2021-10-07T01:48:21
| 2021-10-07T01:48:21
| 25,432,520
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,530
|
rd
|
melt.mefa.Rd
|
\encoding{UTF-8}
\name{melt}
\alias{melt}
\alias{melt.mefa}
\title{Melt (Convert) an Object of Class 'mefa' Into 'stcs'}
\description{
The function converts a wide formatted 'mefa' object into a long formatted 'stcs' object. The resulting segment column can take various formats.
}
\usage{
melt(x, \dots)
\method{melt}{mefa}(x, segm.var = NULL, by.samp = TRUE,
raw.out = FALSE, drop.zero = FALSE, \dots)
}
\arguments{
\item{x}{an object of class 'mefa'.}
\item{segm.var}{\code{NULL} (default) or column indices of samples (if \code{by.samp = TRUE}) or taxa tables (if \code{by.samp = FALSE}). If it refers to more than one column, their interaction is taken. If \code{NULL}, the resulting segment column is filled with \code{"undefined"}.}
\item{by.samp}{logical, if \code{TRUE} (default) \code{segm.var} is taken from the samples table (\code{x$samp}), if \code{FALSE} \code{segm.var} is taken from the taxa table (\code{x$taxa}).}
\item{raw.out}{logical, whether the result should contain all the zeros that is in the crosstabulated (wide) matrix (\code{TRUE}) or not (\code{FALSE}, default). Removing the zeros is straightforward for sparse matrices. But empty samples can be retained if \code{drop.zero = FALSE}.}
\item{drop.zero}{logical, whether samples with zero total count should be leaved out (\code{TRUE}) or not (\code{FALSE}, default).}
\item{\dots}{other arguments passed to the function \code{\link{stcs}}.}
}
\details{
If a 'mefa' objet has original segments and \code{segm.var = NULL}, the function returns original segments. In this case, the \code{raw.out = TRUE} cannot be applied (because segments have different zero samples, so the option is meaningless). If the 'mefa' object contains only a data matrix without dimnames, rows and columns are numbered before melting.
Besides the goal to convert from wide format to long format, this 'mefa' specific implementation is also useful, if a segment is not coded along with the long formatted database, but can be found in a linked table of samples or species. In this way, a 'mefa' object with new segments can be defined easily.
If melting is done with a vector that is not part of the tables inside the 'mefa' object, it must have a class attribute to be recognised, use e.g. the \code{\link{as.factor}} function (see 'Examples').
}
\value{
An object of class 'stsc' if \code{raw = FALSE}, pure 'data.frame' otherwise (because the zero count flag cannot be applied).
}
\references{
\enc{S\'olymos}{Solymos} P. (2008) mefa: an R package for handling and reporting count data. \emph{Community Ecology} \bold{9}, 125--127.
\enc{S\'olymos}{Solymos} P. (2009) Processing ecological data in R with the mefa package. \emph{Journal of Statistical Software} \bold{29(8)}, 1--28. \doi{10.18637/jss.v029.i08}
\url{http://mefa.r-forge.r-project.org/}
}
\author{
\enc{P\'eter S\'olymos}{Peter Solymos}, \email{solymos@ualberta.ca}
}
\examples{
data(dol.count, dol.samp, dol.taxa)
x <- mefa(stcs(dol.count), dol.samp, dol.taxa)
## Long format with original segments
melt(x)
## Long format with undefined segment
melt(as.mefa(x, segment = FALSE))
## These two are identical
melt(x, "microhab")
melt(x, x$samp$microhab) ## this is a factor
## Using a simple vector as factor
melt(x, as.factor(rbinom(dim(x)[1], 1, 0.5)))
## Interaction
melt(x, c("method", "microhab"))
## From taxa table
melt(x, "familia", by.samp = FALSE)
## Example for non-integer data
y <- x$xtab
y[y > 0] <- y[y > 0] + 0.123
z <- mefa(y)
melt(z)
}
\keyword{ methods }
\keyword{ manip }
|
3f8118f1ba4b46f94dd5153cb26a7959e42483d0
|
dced7914ebb2d205ba856e6bf7638390b75cc43c
|
/d3donut/sin.R
|
d14aae08d2dfdcdf24f36e2f1d7929a2bbb8cf04
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
clarkfitzg/junkyard
|
b292218cef35445363691264c48639c521471264
|
a88c72d9dea984985c3677fd44137f9665064662
|
refs/heads/master
| 2023-02-16T02:48:04.448950
| 2023-01-30T15:21:57
| 2023-01-30T15:21:57
| 21,373,484
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 269
|
r
|
sin.R
|
sin_row = function(phase = 0){
x = seq(0, 2*pi, length.out = 50)
(1 + sin(x - phase)) / 2
}
out = sapply(seq(0, 50, length.out = 500), sin_row)
out = data.frame(t(out))
write.table(out, "sin.csv", sep = ","
, col.names = FALSE, row.names = FALSE)
|
f8597ed0d174cdff7e30b80d7191c51c7a9b92a6
|
0318386625386e0b7488ca27b2a3025e98c641c5
|
/cachematrix.R
|
aeef40c4f5fc2ee54cbd374c1cf5d1a81e194039
|
[] |
no_license
|
PavithraBJ/ProgrammingAssignment2
|
a853b5eb70a4a516daca4bde2071a959f84b9292
|
e1d4ae3bb86286666982d84062fd9c135f4f2ade
|
refs/heads/master
| 2021-01-25T11:03:20.404778
| 2017-06-10T13:55:10
| 2017-06-10T13:55:10
| 93,909,814
| 0
| 0
| null | 2017-06-10T02:03:38
| 2017-06-10T02:03:38
| null |
UTF-8
|
R
| false
| false
| 1,045
|
r
|
cachematrix.R
|
##makeCacheMatrix creates a list of functions to set the matrix and cache the inverse of the matrix
##cacheSolve computes the inverse of matrix if it is not cached yet and matrix is not changed and caches the inverse
##inv stores inverse of the matrix, set <- sets the matrix, setinv <- sets the inverse of matrix
##get and getinv gets the value of matrix and its inverse respectively
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function(){ x }
setinv <- function(inverse){ inv <<- inverse}
getinv <- function(){ inv }
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
##Return a matrix that is the inverse of 'x' either cached or computed
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
matrix <- x$get()
inv <- solve(matrix, ...)
x$setinv(inv)
inv
}
|
6a0c80a6ae7f0fc80d4489f17742c963d7ccc5c2
|
d74d80e33b2802b6e0f27a9ffc6020e3c65c5779
|
/_common.R
|
e6b21de404f81b1f9eb13e19f7232e9c89678a0f
|
[] |
no_license
|
espinielli/flights_in_R
|
9b3da990f4cebc859ff3d0fa81979e0754bcdf06
|
a2b0fb70b632a27a7044be61e7345458fed31244
|
refs/heads/main
| 2023-05-05T06:40:54.616341
| 2021-06-03T13:04:03
| 2021-06-03T13:04:03
| 372,894,391
| 0
| 0
| null | 2021-06-01T16:24:44
| 2021-06-01T16:24:44
| null |
UTF-8
|
R
| false
| false
| 289
|
r
|
_common.R
|
set.seed(8765)
options(digits = 3)
knitr::opts_chunk$set(
echo = TRUE,
message = FALSE,
warning = FALSE,
cache = FALSE,
fig.align = 'center',
fig.width = 6,
fig.asp = 0.618, # 1 / phi
fig.show = "hold"
)
options(dplyr.print_min = 6, dplyr.print_max = 6)
|
a620c0f57fbae8020b45c7e077ce7eadbe1b65d0
|
2bb0ddfce85c2cec938adebc837e66d12b340203
|
/R/gmt.r
|
379db8155658894760d14a9871336a5c5fcce584
|
[] |
no_license
|
NaNa-diamond/ActivePathways
|
0b00d00b1e078b995bc577cef27c09d23d4eb3b3
|
395c1b1f6f00e6544125f1588179a3970b69f87c
|
refs/heads/master
| 2022-06-05T01:35:56.132429
| 2020-05-02T20:52:02
| 2020-05-02T20:52:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,080
|
r
|
gmt.r
|
#' Read and Write GMT files
#'
#' Functions to read and Write Gene Matrix Transposed (GMT) files and to test if
#' an object inherits from GMT
#'
#' A GMT file describes gene sets, such as pathways. GMT files are tab delimeted
#' and each row contains a term id, a term name, and all genes annotated to the
#' term.
#'
#' @format
#' A GMT object is a named list of terms, where each term is a list with the items:
#' \describe{
#' \item{id}{The term id}
#' \item{name}{The full name of the term}
#' \item{genes}{A character vector of genes annotated to this term}
#' }
#' @exportClass GMT
#' @rdname GMT
#' @name GMT
#' @aliases GMT gmt
#'
#' @param filename Location of the gmt file
#' @param gmt a GMT object
#' @param x object to test
#'
#' @return \code{read.GMT} returns a GMT object. \cr
#' \code{write.GMT} returns NULL. \cr
#' \code{is.GMT} returns TRUE if \code{x} is a GMT object, else FALSE
#'
#'
#' @examples
#' \dontrun{
#' gmt <- read.GMT('path/to/gmt.gmt')
#' gmt[1:10]
#' gmt[[1]]
#' gmt[1]$id
#' gmt[1]$genes
#' gmt[1]$name
#' gmt$`REAC:3108214`
#' write.GMT(gmt, 'filename.gmt')
#' }
NULL
#' @rdname GMT
#' @export
read.GMT <- function(filename) {
gmt <- strsplit(readLines(filename), '\t')
names(gmt) <- sapply(gmt, `[`, 1)
gmt <- lapply(gmt, function(x) { list(id=x[1], name=x[2], genes=x[-c(1,2)]) })
class(gmt) <- 'GMT'
gmt
}
#' @rdname GMT
#' @export
write.GMT <- function(gmt, filename) {
if (!is.GMT(gmt)) stop("gmt is not a valid GMT object")
sink(filename)
for (term in gmt) {
cat(term$id, term$name, paste(term$genes, collapse="\t"), "\n", sep="\t")
}
sink()
}
#' Make a background list of genes
#'
#' Returns a character vector of all genes in a GMT object
#'
#' @param gmt a \link{GMT} object
#' @return a character vector containing all genes in GMT
#' @export
#'
#' @examples
#' \dontrun{
#' makeBackground(gmt)
#' }
makeBackground <- function(gmt) {
if (!is.GMT(gmt)) stop('gmt is not a valid GMT object')
unlist(Reduce(function(x, y) union(x, y$genes), gmt, gmt[[1]]$genes))
}
##### Subsetting functions #####
# Treat as a list but return an object of "GMT" class
#' @export
`[.GMT` <- function(x, i) {
x <- unclass(x)
res <- x[i]
class(res) <- c('GMT')
res
}
#' @export
`[[.GMT` <- function(x, i, exact = TRUE) {
x <- unclass(x)
x[[i, exact = exact]]
}
#' @export
`$.GMT` <- function(x, i) {
x[[i]]
}
#' @export
#' @rdname GMT
is.GMT <- function(x) inherits(x, 'GMT')
# Print a GMT object
#' @export
print.GMT <- function(x, ...) {
num.lines <- min(length(x), getOption("max.print", 99999))
num.trunc <- length(x) - num.lines
cat(sapply(x[1:num.lines], function(a) paste(a$id, "-", a$name, "\n",
paste(a$genes, collapse=", "), '\n\n')))
if (num.trunc == 1) {
cat('[ reached getOption("max.print") -- omitted 1 term ]')
} else if (num.trunc > 1) {
cat(paste('[ reached getOption("max.print") -- ommitted', num.trunc, 'terms ]'))
}
}
|
47532b0477add3c65ac24f8667af588b3f2e34b5
|
0dcbc7478a27c36d97286072d197216303c28f2c
|
/ChinaContamination/ChinaContamination.R
|
f5cfa00a8198ab2495c6b80ffd0539bb97a2dd12
|
[] |
no_license
|
hacalf/edxDataScience
|
3160f4987c373de49b018fb2d4fab75bdb9d8256
|
8ff5e3465b2714e6c1c17f07d4a8c99731df6f75
|
refs/heads/master
| 2022-10-26T12:45:35.338732
| 2020-06-21T04:29:00
| 2020-06-21T04:29:00
| 259,980,682
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,048
|
r
|
ChinaContamination.R
|
#### Environment Settings
# Packages required for this proyect
pkgs <-c("tidyverse", "caret","kernlab", "caTools","ranger", "randomForest","knitr")
# If a package is missing it will added in the missing packages list
missing_pkgs <-pkgs[!(pkgs %in% installed.packages())]
# The packages in the list will be installed
if (length(missing_pkgs)) {
install.packages(missing_pkgs, repos = "http://cran.rstudio.com")
}
#Load the required libraries
library(caret)
library(tidyverse)
library(kernlab)
library(caTools)
library(ranger)
library(randomForest)
library(knitr)
#Download data ZIP file
if (!file.exists("PRSA2017_Data_20130301-20170228.zip")){
download.file("https://archive.ics.uci.edu/ml/machine-learning-databases/00501/PRSA2017_Data_20130301-20170228.zip", "PRSA2017_Data_20130301-20170228.zip")
}
# Unzip the data file
if (!file.exists("Data")){
zip_file<- ".\\PRSA2017_Data_20130301-20170228.zip"
outDir<-"Data"
unzip(zip_file,exdir=outDir)
}
#Read the 12 csv files into a variable
PRSA <- list.files(path = ".\\Data\\PRSA_Data_20130301-20170228",pattern = "*.csv", full.names = TRUE) %>% lapply(read_csv) %>% bind_rows
### Exploratory Data Analysis
# Structure of the data (data type, numbers of rows, number of attributes)
str(PRSA)
#Near zero variance ( identify the attributes that do not give us valuable data )
nzv <- nearZeroVar(PRSA)
# Dimensions of the original data (rows,columns)
dim(PRSA)
#Remove the nzv columns, NA data and save in another variable
data <- PRSA[,-nzv]
data<- na.omit(data)
# Dimensions of the valuable data (rows,columns)
dim(data)
# Show the data in a transposed version to see more data
glimpse(data)
#Remove the "no" variable because is a consecutive (also do not give us valuable data)
data <- data[,-c(1)]
# Cast the attributes year, month, day, hour, wd and station from "dbl" type to "factor" type
data$year <- as.factor(data$year)
data$month <- as.factor(data$month)
data$day <- as.integer(data$day)
data$hour <- as.integer(data$hour)
data$wd <- as.factor(data$wd)
data$station <- as.factor(data$station)
# Analyse if the the attributes PM (2.5 and 10) and CO can be cast to Integer
unique(data$PM2.5)
unique(data$PM10)
unique(data$CO)
# Only the attribute CO can be casted to integer
data$CO <- as.integer(data$CO)
# Finaly add the attribute date in a single column
data <- data %>% mutate(date = ISOdate(year, month, day,hour))
# Filtered for performance reasons: half days and only 3 rows per day
data <-data %>% filter(day %%2==0,hour %%6==0) %>% select(date,CO,PM2.5,PM10,SO2,NO2,O3,TEMP,PRES,DEWP,wd,WSPM,station)
#And convert de tibble to data.frame
data<-as.data.frame(data)
# Review the previous changes in a transposed version to see more data
glimpse(data)
# Review the statistics of each attribute
summary(data)
# Review the first 6 rows
head(data)
# Histogram of Carbon Monoxide
data %>% ggplot(aes(CO)) + geom_histogram() +
labs(title = "China Contamination", x = "Carbon Monoxide")
########### Exploratory Data Analisys ####################################
###### Data Throught Time
# Carbon Monoxide
data_grph <- data %>%
select(date,CO) %>%
gather(key = "Indexes", value = "value", -date)
data_grph %>%
ggplot(aes(x=date,y=value)) +
geom_point(aes(color = Indexes)) +
labs(title = "Carbon Monoxide", x = "Year",
y = "concentration (ug/mˆ3)")
# Particulate Matter and Ozone.
data_grph <- data %>%
select(date,PM10,PM2.5,O3) %>%
gather(key = "Indexes", value = "value", -date)
data_grph %>%
ggplot(aes(x=date,y=value)) +
geom_point(aes(color = Indexes)) +
labs(title = "Particulate Matter and Ozone.", x = "Year",
y = "concentration (ug/mˆ3)")
# Nitrogen Dioxide and Sulfur Dioxide
data_grph <- data %>%
select(date, NO2, SO2) %>%
gather(key = "Indexes", value = "value", -date)
data_grph %>%
ggplot(aes(x=date,y=value)) +
geom_point(aes(color = Indexes)) +
labs(title = "Nitrogen Dioxide and Sulfur Dioxide", x = "Year",
y = "concentration (ug/mˆ3)")
##### Data Throught Temperature
# Carbon Monoxide
data_grph <- data %>%
select(TEMP,CO) %>%
gather(key = "Indexes", value = "value", -TEMP)
data_grph %>%
ggplot(aes(x=TEMP,y=value)) +
geom_point(aes(color = Indexes)) +
labs(title = "Carbon Monoxide", x = "Temperature (°C)",
y = "concentration (ug/mˆ3)")
# Particulate Matter and Ozono
data_grph <- data %>%
select(TEMP,PM10,PM2.5,O3) %>%
gather(key = "Indexes", value = "value", -TEMP)
data_grph %>%
ggplot(aes(x=TEMP,y=value)) +
geom_point(aes(color = Indexes)) +
labs(title = "Particulate Matter and Ozone.", x = "Temperature (°C)",
y = "concentration (ug/mˆ3)")
# Nitrogen Dioxide and Sulfur Dioxide
data_grph <- data %>%
select(TEMP, NO2, SO2) %>%
gather(key = "Indexes", value = "value", -TEMP)
data_grph %>%
ggplot(aes(x=TEMP,y=value)) +
geom_point(aes(color = Indexes)) +
labs(title = "Nitric Dioxide and Sulfur Dioxide", x = "Temperature (°C)",
y = "concentration (ug/mˆ3)")
### Data Throught Pressure
# Carbon Monoxide
data_grph <- data %>%
select(PRES,CO) %>%
gather(key = "Indexes", value = "value", -PRES)
data_grph %>%
ggplot(aes(x=PRES,y=value)) +
geom_point(aes(color = Indexes)) +
labs(title = "Carbon Monoxide", x = "Pressure (hPa)",
y = "concentration (ug/mˆ3)")
# Particulate Matter and Ozono
data_grph <- data %>%
select(PRES,PM10,PM2.5,O3) %>%
gather(key = "Indexes", value = "value", -PRES)
data_grph %>%
ggplot(aes(x=PRES,y=value)) +
geom_point(aes(color = Indexes)) +
labs(title = "Particulate Matter and Ozone.", x = "Pressure (hPa)",
y = "concentration (ug/mˆ3)")
# Nitrogen Dioxide and Sulfur Dioxide
data_grph <- data %>%
select(PRES, NO2, SO2) %>%
gather(key = "Indexes", value = "value", -PRES)
data_grph %>%
ggplot(aes(x=PRES,y=value)) +
geom_point(aes(color = Indexes)) +
labs(title = "Nitric Dioxide and Sulfur Dioxide", x = "Pressure (hPa)",
y = "concentration (ug/mˆ3)")
### Data Throught Wind Speed
# Carbon Monoxide
data_grph <- data %>%
select(WSPM,CO) %>%
gather(key = "Indexes", value = "value", -WSPM)
data_grph %>%
ggplot(aes(x=WSPM,y=value)) +
geom_point(aes(color = Indexes)) +
labs(title = "Carbon Monoxide", x = "Wind Speed (m/s)",
y = "concentration (ug/mˆ3)")
# Particulate Matter and Ozono
data_grph <- data %>%
select(WSPM,PM10,PM2.5,O3) %>%
gather(key = "Indexes", value = "value", -WSPM)
data_grph %>%
ggplot(aes(x=WSPM,y=value)) +
geom_point(aes(color = Indexes)) +
labs(title = "Particulate Matter and Ozone.", x = "Wind Speed (m/s)",
y = "concentration (ug/mˆ3)")
# Nitrogen Dioxide and Sulfur Dioxide
data_grph <- data %>%
select(WSPM, NO2, SO2) %>%
gather(key = "Indexes", value = "value", -WSPM)
data_grph %>%
ggplot(aes(x=WSPM,y=value)) +
geom_point(aes(color = Indexes)) +
labs(title = "Nitric Dioxide and Sulfur Dioxide", x = "Wind Speed (m/s)",
y = "concentration (ug/mˆ3)")
### Data Throught Station
# Carbon Monoxide
data_grph <- data %>%
select(date,station,CO) %>%
gather(key = "Indexes", value = "value", "CO")
data_grph %>%
ggplot(aes(x=date,y=value)) +
geom_point(aes(color = Indexes)) +
facet_wrap(~ station) +
labs(title = "Carbon Monoxide", x = "Year",
y = "concentration (ug/mˆ3)")
# Particulate Matter and Ozono
data_grph <- data %>%
select(date,station,PM10,PM2.5,O3) %>%
gather(key = "Indexes", value = "value", "PM10":"O3")
data_grph %>%
ggplot(aes(x=date,y=value)) +
geom_point(aes(color = Indexes)) +
facet_wrap(~ station) +
labs(title = "Particulate Matter and Ozone.", x = "Year",
y = "concentration (ug/mˆ3)")
# Nitrogen Dioxide and Sulfur Dioxide
data_grph <- data %>%
select(date,station, NO2, SO2) %>%
gather(key = "Indexes", value = "value", "NO2":"SO2")
data_grph %>%
ggplot(aes(x=date,y=value)) +
geom_point(aes(color = Indexes)) +
facet_wrap(~ station) +
labs(title = "Nitric Dioxide and Sulfur Dioxide", x = "Year",
y = "concentration (ug/mˆ3)")
###### Models ##########################################################
#Defining training and test data set
set.seed(2000, sample.kind="Rounding")
## Next, create the partition
test_prt <- createDataPartition(data$CO, times = 1, p = 0.1, list = FALSE)
train_dts <- data[-test_prt,]
test_dts <- data[test_prt,]
dim(train_dts)
dim(test_dts)
real_CO <- test_dts$CO
RMSE <- function(real_data, predicted_data){
sqrt(mean((real_data - predicted_data)^2))
}
##### Simple Average #############################
## The average is calculated
avg <-mean(train_dts$CO)
# Calculation of the RMSE with the predicted and test set
rmse_avg <- RMSE(real_CO, avg)
# Saving the results
options(pillar.sigfig = 6)
models_result <- tibble(Model = "Simple Average", RMSE = rmse_avg)
models_result
##### k Nearest Neighbours (kNN) #############################
#### identifying the best "k"
set.seed(2000, sample.kind="Rounding")
knn_k <- train(CO ~ date,
method = "knn",
tuneGrid = data.frame(k = seq(1, 11, 2)),
data = train_dts)
##Getting the best "K":
best_k<-knn_k$bestTune
best_k
## Plot the K's calculated
ggplot(knn_k, highlight = TRUE)
#### Training the model
set.seed(2000, sample.kind="Rounding")
train_knn <- train(CO ~ date,
method = "knn",
tuneGrid = data.frame(k = best_k),
data = train_dts)
## Getting the prediction
pred_knn <- predict(train_knn, test_dts)
# Calculation of the RMSE with the predicted and test set
rmse_knn <- RMSE(real_CO, pred_knn)
# Saving the results
models_result <- bind_rows(models_result,
tibble(Model = "K-Nearest Neighbors", RMSE = rmse_knn))
models_result
##### Random Forest #############################
#### Training the model
set.seed(2000, sample.kind="Rounding")
train_rnd <- ranger(CO ~ date,
data = train_dts,
num.trees = 400,
respect.unordered.factors = "order",
seed = 2000)
print(train_rnd)
## Getting the prediction
pred_rnd <- predict(train_rnd, test_dts)$predictions
# Calculation of the RMSE with the predicted and test set
rmse_rnd <- RMSE(real_CO, pred_rnd)
# Saving the results
models_result <- bind_rows(models_result,
tibble(Model = "Random Forest", RMSE = rmse_rnd))
models_result
##### Neural Network #############################
#### Training the model
set.seed(2000, sample.kind="Rounding")
train_nnt <- train(CO ~ date,
method = "nnet",
data = train_dts)
## Getting the prediction
pred_nnt <- predict(train_nnt, test_dts)
# Calculation of the RMSE with the predicted and test set
rmse_nnt <- RMSE(real_CO, pred_nnt)
# Saving the results
models_result <- bind_rows(models_result,
tibble(Model = "Neural Network", RMSE = rmse_nnt))
models_result
|
e97d4bcdb1d2e85a2de49f3b056019bbfdb3b66b
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query01_query26_1344n/query01_query26_1344n.R
|
901239f1e0cbeaeefcbe0d99a7de6fb133310872
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 70
|
r
|
query01_query26_1344n.R
|
bf9d60ab8aa6c40c9b6e7173f4c2e0d2 query01_query26_1344n.qdimacs 349 581
|
d9bdf8ac51f72e4211b65f365e1c329379756c76
|
c3f6f6a26105eba04d502f745febe8556a8a59d6
|
/man/createIGraph-GraphElements-method.Rd
|
5683bc1244be4637a71cd5af5802efe5ad76669b
|
[] |
no_license
|
sandraTL/PathQuantPadigm
|
f6eb7a018e1b749058d95bc3366aeebf6ff57289
|
aa45b64935a51c425c78d64fceed66f040af7e65
|
refs/heads/master
| 2020-04-13T15:23:47.110870
| 2016-03-05T21:58:53
| 2016-03-05T21:58:53
| 40,988,816
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 415
|
rd
|
createIGraph-GraphElements-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Graph.R
\docType{methods}
\name{createIGraph,GraphElements-method}
\alias{createIGraph,GraphElements-method}
\title{Creation of object igraph with data from XML file of pathway
of interest}
\usage{
\S4method{createIGraph}{GraphElements}(object)
}
\description{
Creation of object igraph with data from XML file of pathway
of interest
}
|
e1be088293579badb95437741c3df6c9ec7f081a
|
127e43a5a0271f562a286784443b9fcccb54dc4b
|
/app-2/ui.R
|
4d9fce9ae9cbaa0fb3682d91f1e313844a39ddb5
|
[] |
no_license
|
DawnEve/shiny_GO
|
15747bb9ac2bd84b24f4b08e0bb4c857c6d064a4
|
a741bcaa27c66d8a18a7ef2812c83b87f4cbf175
|
refs/heads/master
| 2022-11-23T07:00:52.236350
| 2020-07-25T14:30:22
| 2020-07-25T14:30:22
| 279,522,557
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 380
|
r
|
ui.R
|
library(shiny)
library(dplyr)
library(broom)
shinyUI(
pageWithSidebar(
# Application title
headerPanel("kmeans VS hclust"),
sidebarPanel(
numericInput('n', 'Number of obs', 500 ,min=200 ,max=1000),
selectInput("type", "Select a clust approach:",c("kmeans","hclust"),"kmeans")
),
mainPanel(plotOutput("plot"))
)
)
|
22b7feeb36ebb35082bd4e94bda8be61129539f9
|
80f6ebe26244d309d5145b8ffeda94c0826186e8
|
/sim_difmedias.r
|
45a706f36f7bbf79220ffa757662c625cd666687
|
[] |
no_license
|
maibru/TestHipo
|
a9d81f29ea66431fd7a8ca396e138b25b203950d
|
f53e8241d4b24aea9533956324d3ffcfe7dc9ab8
|
refs/heads/master
| 2020-04-22T11:08:19.378554
| 2019-02-15T11:03:55
| 2019-02-15T11:03:55
| 170,329,047
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 1,162
|
r
|
sim_difmedias.r
|
dif.medias<-function(mu1=4700,mu2=4900,sig=100,n1=4,n2=4){
## Obtenemos una muestra de tamańo n1 de la normal con media mu1
## y desviación típica sig
y1<-rnorm(n=n1,mean=mu1,sd=sig)
## calculamos la media muestral de y1
m1<-mean(y1)
## calculamos la varinza muestral de y1
v1<-var(y1)
## Obtenemos una muestra de tamańo n2 de la normal con media mu2
## y desviación típica sig
y2<-rnorm(n=n2,mean=mu2,sd=sig)
## calculamos la media muestral de y2
m2<-mean(y2)
## calculamos la varinza muestral de y2
v2<-var(y2)
## calculamos las diferencias de las dos medias muestrales
dif.med<-m1-m2
## calculamos la varianza agregada
s2p<-((n1-1)*v1+(n2-1)*v2)/(n1+n2-2)
## Error estandar de la diferencias de medias estimadas
se.dif<-sqrt(s2p*((1/n1)+(1/n2)))
## T test para compara medias
res<-t.test(y1,y2,var.equal=TRUE,alternative="two.sided")
## limite inferior del IC
li<-res$conf.int[1]
## limite superior del IC
ls<-res$conf.int[2]
## salida de la funcion. Lista con la diferencia y con los
## limites del IC
sal<-c(stat.T=res$statistic,li.Inf=li,li.Sup=ls)
return(sal)
}
|
f1959eb56b565e33dbf4187453d198e4c1da949c
|
06a9935261e4e50e568a6ba1f389b07f78447a2f
|
/experiment-1/analysis/06_plot_examples.R
|
f58f383f96542809099984659244b7bc6242fa4b
|
[] |
no_license
|
VanRijnLab/cold-start-afl
|
0a26364e589764c2717efaff4a3dc6e59a9468d9
|
ad0511ef556923925c1e72f780099f8bd0838271
|
refs/heads/master
| 2021-06-28T00:30:17.277456
| 2020-12-18T23:27:42
| 2020-12-18T23:27:42
| 196,563,642
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,141
|
r
|
06_plot_examples.R
|
## ----------------------------------------------------------------------------
##
## Visualise rate of forgetting development for a single fact in session 1,
## plot examples of model predictions, activation
##
## Author: Maarten van der Velde
##
## Last updated: 2020-05-13
##
## ----------------------------------------------------------------------------
library(dplyr)
library(ggplot2)
library(ggforce)
library(patchwork)
library(purrr)
library(scales)
library(tidyr)
library(extraDistr)
library(extrafont)
library(fst)
library(wesanderson)
library(data.table)
library(tikzDevice)
# font_import() # Run once to populate the R font database with the fonts on the system
loadfonts()
theme_set(theme_light(base_size = 14) +
theme(text = element_text(family = 'Merriweather Sans')))
theme_paper <- theme_classic(base_size = 12) +
theme(axis.text = element_text(colour = "black"),
panel.grid.major.y = element_line(colour = "grey92"))
condition_colours <- wes_palette("Darjeeling1", n = 5)
condition_colours[c(2, 4, 5)] <- condition_colours[c(4, 5, 2)]
set.seed(1)
lab_1_rl <- read.fst(file.path("data", "processed", "lab", "session1", "session1_rl_with_alpha_lab.fst"))
# Only look at one fact
buchanan <- lab_1_rl %>%
group_by(subject) %>%
mutate(time = (start_time - min(start_time)) / 60000) %>%
filter(fact_id == 5) %>%
filter(n() >= 3) %>%
ungroup()
buchanan_last <- buchanan %>%
group_by(subject) %>%
arrange(time) %>%
slice(n()) %>%
ungroup()
# Plot alpha development during session for all learners ------------------
alpha_plot <- ggplot(buchanan, aes(x = time, y = alpha)) +
geom_line(alpha = 0.5, aes(colour = subject)) +
geom_point(alpha = 0.5, size = 1, aes(colour = subject, fill = subject)) +
geom_mark_ellipse(data = buchanan_last, expand = 0.02, aes(colour = subject, fill = subject)) +
guides(colour = FALSE, fill = FALSE) +
scale_x_continuous(breaks = seq(0, 10, 2), minor_breaks = NULL) +
scale_y_continuous(breaks = seq(0.2, 0.8, 0.1), limits = c(0.199, 0.601), minor_breaks = NULL) +
scale_color_viridis_d() +
scale_fill_viridis_d() +
labs(x = "Time (minutes)",
y = "Rate of forgetting")
alpha_plot
ggsave("output/plots/alpha_plot.pdf", device = "pdf", width = 4, height = 3)
ggsave("output/plots/alpha_plot_narrow.pdf", device = "pdf", width = 3, height = 3)
# Highlight final estimate in plot ----------------------------------------
alpha_plot +
geom_mark_ellipse(data = buchanan_last, alpha = 0.1, aes(fill = as.factor(fact_id), label = "Final estimate"))
ggsave("output/plots/alpha_plot_highlight.pdf", device = "pdf", width = 4, height = 3)
# Plot distribution of final alpha estimates ------------------------------
final_alpha_dist <- ggplot(buchanan_last, aes(x = as.factor(fact_id), y = alpha)) +
geom_boxplot(outlier.shape = NA, width = 0.75) +
geom_point(alpha = 0.5, size = 1, aes(colour = subject, fill = subject), position = position_jitter(width = 0.2, height = 0, seed = 1)) +
geom_mark_ellipse(data = buchanan_last, expand = 0.06, aes(colour = subject, fill = subject), position = position_jitter(width = 0.2, height = 0, seed = 1)) +
guides(colour = FALSE, fill = FALSE) +
scale_x_discrete(labels = c("")) +
scale_y_continuous(breaks = seq(0.2, 0.8, 0.1), limits = c(0.199, 0.601), minor_breaks = NULL) +
scale_colour_viridis_d() +
scale_fill_viridis_d() +
labs(x = "",
y = "Rate of forgetting") +
theme(axis.ticks.x = element_blank(),
panel.grid.major.x = element_blank())
final_alpha_dist
ggsave("output/plots/final_alpha_dist.pdf", device = "pdf", width = 1.2, height = 2.2)
# Combine alpha development and final distribution in one plot ------------
final_alpha_dist_noY <- ggplot(buchanan_last, aes(x = as.factor(fact_id), y = alpha)) +
geom_boxplot(outlier.shape = NA, width = 0.75) +
geom_point(alpha = 0.5, size = 1, aes(colour = subject, fill = subject), position = position_jitter(width = 0.2, height = 0, seed = 1)) +
geom_mark_ellipse(data = buchanan_last, expand = 0.08, aes(colour = subject, fill = subject), position = position_jitter(width = 0.2, height = 0, seed = 1)) +
guides(colour = FALSE, fill = FALSE) +
scale_x_discrete(labels = c("")) +
scale_y_continuous(breaks = seq(0.2, 0.8, 0.1), limits = c(0.199, 0.601), minor_breaks = NULL) +
scale_colour_viridis_d() +
scale_fill_viridis_d() +
labs(x = "",
y = "Rate of forgetting") +
theme(axis.ticks.x = element_blank(),
panel.grid.major.x = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())
alpha_plot + final_alpha_dist_noY + plot_layout(widths = c(0.8, 0.2))
ggsave("output/plots/alpha_plot_with_dist.pdf", device = "pdf", width = 3, height = 2.2)
# Plot posterior predictive distribution ----------------------------------
source("analysis/bayes_funs.R")
mu_0 <- 0.3 # mean (b)
kappa_0 <- 1 # number of observations/precision (c)
a_0 <- 3 # shape of Gamma (g)
b_0 <- 0.2 # rate of Gamma (h)
prior <- tibble(mu_n = mu_0,
kappa_n = kappa_0,
a_n = a_0,
b_n = b_0)
buchanan_bayes <- bind_rows(prior,
run_bayes_model_incremental(buchanan_last$alpha)) %>%
mutate(obs = as.factor(0:(n()-1)))
post_pred <- buchanan_bayes %>%
nest_legacy(-obs) %>%
mutate(postpred = map(data, ~ calculate_t_distr(.$mu_n, .$kappa_n, .$a_n, .$b_n))) %>%
unnest_legacy(postpred)
post_pred_first <- filter(post_pred, obs == 0)
post_pred_last <- filter(post_pred, as.numeric(obs) == nrow(buchanan_bayes))
post_pred_between <- filter(post_pred, obs != 0 & obs != nrow(buchanan_bayes))
post_pred_plot <- ggplot(post_pred_between, aes(x = x, y = y, colour = obs)) +
geom_line(data = post_pred_first, colour = "darkgrey", lwd = 1.25, lty = 2) +
geom_line(lwd = 0.75) +
geom_line(data = post_pred_last, colour = "black", lwd = 2.0) +
annotate("segment", x = tail(buchanan_bayes,1)$mu_n, xend = tail(buchanan_bayes,1)$mu_n, y = 3.75, yend = 3.25, arrow = arrow(angle = 20, length = unit(0.1, "inches"), type = "closed")) +
annotate("text", x = tail(buchanan_bayes,1)$mu_n, y = 4, label = formatC(tail(buchanan_bayes,1)$mu_n, digits = 3)) +
# geom_line(data = post_pred_last, colour = "white", lwd = 0.5) +
# scale_colour_viridis_d(option = "E", direction = -1, begin = 0.25, end = 0.75) +
scale_x_continuous(minor_breaks = NULL) +
scale_y_continuous(minor_breaks = NULL, limits = c(0,4)) +
scale_colour_viridis_d(option = "C", direction = -1, begin = 0.1, end = 0.9) +
guides(colour = FALSE) +
labs(x = "Rate of forgetting",
y = "Density")
post_pred_plot
ggsave("output/plots/post_pred_plot.pdf", device = "pdf", width = 2.2, height = 2.2)
# Illustrate the conditions -----------------------------------------------
# Plot a few posterior predictive distributions in the same graph
lab_1_rl_final_alpha <- read.fst(file.path("data", "processed", "lab", "session1", "session1_final_alpha_lab.fst"))
alpha <- lab_1_rl_final_alpha %>%
filter(reps >= 3)
fact_res <- alpha %>%
nest_legacy(-fact_id) %>%
mutate(res = map(data, ~ run_bayes_model(.$final_alpha))) %>%
select(-data)
post_pred_facts <- fact_res %>%
mutate(postpred = map(res, ~ calculate_t_distr(.$mu_n, .$kappa_n, .$a_n, .$b_n))) %>%
unnest_legacy(postpred)
post_pred_select <- post_pred_facts %>%
filter(fact_id %in% c(6, 21, 27))
predictions <- post_pred_select %>%
group_by(fact_id) %>%
filter(y == max(y))
ggplot(post_pred_select, aes(x = x, y = y, colour = as.factor(fact_id))) +
geom_line(lwd = 0.75) +
scale_x_continuous(minor_breaks = NULL) +
scale_y_continuous(minor_breaks = NULL, limits = c(0,5)) +
geom_segment(data = predictions, aes(x = x, xend = x, y = 4.525 + 1 * (y - mean(y)), yend = 3.9 + 1 * (y - mean(y)), colour = as.factor(fact_id)), arrow = arrow(angle = 20, length = unit(0.1, "inches"), type = "closed")) +
scale_colour_viridis_d(option = "D", direction = -1, begin = 0.1, end = 0.9) +
guides(colour = FALSE) +
labs(x = "Rate of forgetting",
y = "",
title = "Fact") +
theme(panel.grid = element_blank(),
axis.ticks = element_blank(),
axis.text = element_blank())
ggsave("output/plots/post_pred_3_1.pdf", device = "pdf", width = 3, height = 3)
post_pred_select2 <- post_pred_facts %>%
filter(fact_id %in% c(7, 24, 30))
predictions <- post_pred_select2 %>%
group_by(fact_id) %>%
filter(y == max(y))
ggplot(post_pred_select2, aes(x = x, y = y, colour = as.factor(fact_id))) +
geom_line(lwd = 0.75) +
scale_x_continuous(minor_breaks = NULL) +
scale_y_continuous(minor_breaks = NULL, limits = c(0,5)) +
geom_segment(data = predictions, aes(x = x, xend = x, y = 4.525 + 1 * (y - mean(y)), yend = 3.9 + 1 * (y - mean(y)), colour = as.factor(fact_id)), arrow = arrow(angle = 20, length = unit(0.1, "inches"), type = "closed")) +
scale_colour_viridis_d(option = "C", direction = -1, begin = 0.1, end = 0.9) +
guides(colour = FALSE) +
labs(x = "Rate of forgetting",
y = "",
title = "Learner") +
theme(panel.grid = element_blank(),
axis.ticks = element_blank(),
axis.text = element_blank())
ggsave("output/plots/post_pred_3_2.pdf", device = "pdf", width = 3, height = 3)
# Prediction illustrations for paper --------------------------------------
# Fact prediction
fact_6 <- alpha %>%
filter(fact_id == 6) %>%
nest_legacy() %>%
mutate(res = map(data, ~ run_bayes_model(.$final_alpha))) %>%
mutate(postpred = map(res, ~ calculate_t_distr(.$mu_n, .$kappa_n, .$a_n, .$b_n)))
observations_6 <- fact_6 %>%
unnest_legacy(data) %>%
select(x = final_alpha)
prediction_dist_6 <- fact_6 %>%
unnest_legacy(postpred)
prediction_6 <- prediction_dist_6 %>%
filter(y == max(y))
ggplot(data = prediction_dist_6, aes(x = x, y = y)) +
geom_segment(data = observations_6, aes(x = x, xend = x, y = -.26, yend = -.01), lwd = 0.5, colour = condition_colours[3]) +
geom_line(lwd = 3, colour = condition_colours[3]) +
scale_x_continuous(minor_breaks = NULL) +
geom_segment(data = prediction_6, aes(x = x, xend = x, y = 3.91 + 1 * (y - mean(y)), yend = 3.9 + 1 * (y - mean(y))), arrow = arrow(angle = 20, length = unit(0.25, "inches"), type = "closed")) +
coord_cartesian(xlim = c(-.1, .75), ylim = c(0, 4.1), clip = "off") +
theme(panel.grid = element_blank(),
panel.border = element_blank(),
axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title = element_blank())
ggsave("output/prediction_example_fact.pdf", device = "pdf", width = 2, height = 2)
ggsave("output/prediction_example_fact.svg", device = "svg", width = 2, height = 2)
# Learner
fact_27 <- alpha %>%
filter(fact_id == 27) %>%
mutate(final_alpha = final_alpha - 0.1) %>%
nest_legacy() %>%
mutate(res = map(data, ~ run_bayes_model(.$final_alpha))) %>%
mutate(postpred = map(res, ~ calculate_t_distr(.$mu_n, .$kappa_n, .$a_n, .$b_n)))
observations_27 <- fact_27 %>%
unnest_legacy(data) %>%
select(x = final_alpha)
prediction_dist_27 <- fact_27 %>%
unnest_legacy(postpred)
prediction_27 <- prediction_dist_27 %>%
filter(y == max(y))
ggplot(data = prediction_dist_27, aes(x = x, y = y)) +
geom_segment(data = observations_27, aes(x = x, xend = x, y = -.26, yend = -.01), lwd = 0.5, colour = condition_colours[4]) +
geom_line(lwd = 3, colour = condition_colours[4]) +
scale_x_continuous(minor_breaks = NULL) +
geom_segment(data = prediction_27, aes(x = x, xend = x, y = 3.91 + 1 * (y - mean(y)), yend = 3.9 + 1 * (y - mean(y))), arrow = arrow(angle = 20, length = unit(0.25, "inches"), type = "closed")) +
coord_cartesian(xlim = c(-.1, .75), ylim = c(0, 4.1), clip = "off") +
theme(panel.grid = element_blank(),
panel.border = element_blank(),
axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title = element_blank())
ggsave("output/prediction_example_learner.pdf", device = "pdf", width = 2, height = 2)
ggsave("output/prediction_example_learner.svg", device = "svg", width = 2, height = 2)
# Fact & Learner prediction
prediction_dist_6_27 <- calculate_logarithmic_pool(prediction_dist_6, prediction_dist_27)
prediction_6_27 <- prediction_dist_6_27 %>%
filter(y == max(y))
ggplot(data = prediction_dist_6, aes(x = x, y = y)) +
geom_segment(data = observations_6, aes(x = x, xend = x, y = -.26, yend = -.01), lwd = 0.5, colour = condition_colours[3]) +
geom_segment(data = observations_27, aes(x = x, xend = x, y = -.52, yend = -.27), lwd = 0.5, colour = condition_colours[4]) +
geom_line(lwd = 1, lty = 2, colour = condition_colours[3]) +
geom_line(data = prediction_dist_27, lwd = 1, lty = 2, colour = condition_colours[4]) +
geom_line(data = prediction_dist_6_27, lwd = 3, colour = condition_colours[5]) +
scale_x_continuous(minor_breaks = NULL) +
geom_segment(data = prediction_6_27, aes(x = x, xend = x, y = 3.91 + 1 * (y - mean(y)), yend = 3.9 + 1 * (y - mean(y))), arrow = arrow(angle = 20, length = unit(0.25, "inches"), type = "closed")) +
coord_cartesian(xlim = c(-.1, .75), ylim = c(0, 4.1), clip = "off") +
theme(panel.grid = element_blank(),
panel.border = element_blank(),
axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title = element_blank())
ggsave("output/prediction_example_fact_and_learner.pdf", device = "pdf", width = 2, height = 2)
ggsave("output/prediction_example_fact_and_learner.svg", device = "svg", width = 2, height = 2)
# Make schematic overview for paper ---------------------------------------
example_facts <- c(9, 16, 4)
example_subjects <- c("1nw3ypbqfy2engu", "m06yozffuudcg8a", "zavk4ejxdc82y1a")
alpha_examples <- lab_1_rl %>%
filter(fact_id %in% example_facts, subject %in% example_subjects) %>%
group_by(subject) %>%
mutate(time = (start_time - min(start_time)) / 60000) %>%
group_by(subject, fact_id) %>%
filter(n() >= 3) %>%
ungroup()
alpha_examples_last <- alpha_examples %>%
group_by(subject, fact_id) %>%
arrange(time) %>%
slice(n()) %>%
ungroup()
alpha_examples_list <- split(alpha_examples, list(alpha_examples$subject, alpha_examples$fact_id))
for (a in alpha_examples_list) {
subject_a <- a$subject[1]
fact_a <- a$fact_id[1]
ggplot(a, aes(x = start_time/60000, y = alpha)) +
geom_line() +
geom_point(size = .85) +
geom_point(data = filter(alpha_examples_last, subject == subject_a, fact_id == fact_a), size = 2) +
scale_x_continuous(limits = c(0, 10)) +
scale_y_continuous(limits = c(0.199, 0.601)) +
theme_paper +
theme(panel.grid = element_blank(),
panel.border = element_blank(),
axis.ticks = element_blank(),
axis.line = element_line(),
axis.text = element_blank(),
axis.title = element_blank())
filename <- paste("output/alpha_example", subject_a, fact_a, ".svg", sep = "_")
ggsave(filename, device = "svg", width = .75, height = .75)
system(paste("sed -i 's/stroke-linecap: butt;/stroke-linecap: butt; stroke: #000000; fill: none;/g'", filename)) # Fix svg background
}
# Plot example of activation over time ------------------------------------
source("analysis/slimstampen_model_funs.R")
t <- seq(0, 60000, by = 10)
alpha <- c(0.2, 0.3, 0.4)
act <- list()
r <- data.table()
for(j in seq_along(alpha)) {
a <- alpha[j]
fact_id <- paste0("fact", a)
for (i in seq_along(t)) {
activation <- calculate_activation(time = t[i], id = fact_id, factalpha = a, responses = r)
if (activation <= -0.8) {
r <- rbind(r, list(fact_id = fact_id, text = "question", start_time = t[i], rt = 4000, correct = TRUE, threshold = -0.8))
}
act[[length(t)*(j-1) + i]] <- list(time = t[i], fact_id = fact_id, alpha = a, activation = activation)
}
}
act <- rbindlist(act)
r$alpha <- as.factor(stringr::str_remove(r$fact_id, "fact"))
act$alpha <- as.factor(act$alpha)
levels(act$alpha) <- c(expression(paste(alpha, " = 0.2")), expression(paste(alpha, " = 0.3")), expression(paste(alpha, " = 0.4")))
levels(r$alpha) <- c(expression(paste(alpha, " = 0.2")), expression(paste(alpha, " = 0.3")), expression(paste(alpha, " = 0.4")))
p <- ggplot(act, aes(x = time/1000, y = activation, colour = alpha, group = alpha)) +
facet_grid(. ~ alpha, labeller = label_parsed) +
geom_hline(yintercept = -.8, linetype = 2) +
geom_line() +
geom_linerange(data = r, aes(x = start_time/1000, ymin = -.8, ymax = 2, colour = alpha)) +
geom_point(data = r, aes(x = start_time/1000, y = 2, colour = alpha), size = 2) +
scale_y_continuous(limits = c(-1, 2)) +
scale_x_continuous(limits = c(0, 60)) +
scale_colour_brewer(type = "qual") +
guides(colour = FALSE) +
labs(x = "Time (s)",
y = "Activation",
colour = NULL) +
theme_bw(base_size = 12) +
theme(axis.text = element_text(colour = "black"),
strip.background = element_blank(),
strip.text.x = element_text(size = rel(1.5)))
p
ggsave("output/alpha_simulation.pdf", device = "pdf", width = 7, height = 2.75)
|
d15697099f52e887f6c966e8b47bdc52d39d2744
|
af8b1cfa36e31284367560dac2800456de9bb284
|
/R/distance.R
|
c11562e9fb994de0226a1f13fd029280b55fed78
|
[
"MIT"
] |
permissive
|
LudvigOlsen/rearrr
|
f07cdf8fe92647335fb5a26ffc1416162543c59a
|
40d150b440ae06507873fad20a28345c08d48cf3
|
refs/heads/master
| 2023-04-19T04:24:49.834419
| 2023-03-01T10:48:10
| 2023-03-01T10:48:10
| 259,158,437
| 24
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,429
|
r
|
distance.R
|
# __________________ #< 60cfc78f594e5611a6eaaf34a2b212ae ># __________________
# Calculate distances ####
#' @title Calculate the distance to an origin
#' @description
#' \Sexpr[results=rd, stage=render]{lifecycle::badge("experimental")}
#'
#' Calculates the distance to the specified origin with:
#' \deqn{d(P1, P2) = sqrt( (x2 - x1)^2 + (y2 - y1)^2 + (z2 - z1)^2 + ... )}
#'
#' The origin can be supplied as coordinates or as a function that returns coordinates. The
#' latter can be useful when supplying a grouped \code{data.frame} and finding the distance to e.g. the centroid
#' of each group.
#'
#' @author Ludvig Renbo Olsen, \email{r-pkgs@@ludvigolsen.dk}
#' @param cols Names of columns in \code{`data`} to measure distance in.
#' Each column is considered a dimension.
#' @param origin Coordinates of the origin to calculate distances to.
#' A scalar to use in all dimensions
#' or a \code{vector} with one scalar per dimension.
#'
#' \strong{N.B.} Ignored when \code{`origin_fn`} is not \code{NULL}.
#' @param distance_col_name Name of new column with the distances.
#' @param origin_col_name Name of new column with the origin coordinates. If \code{NULL}, no column is added.
#' @export
#' @return \code{data.frame} (\code{tibble}) with the additional columns (distances and origin coordinates).
#' @inheritParams multi_mutator_
#' @family measuring functions
#' @family distance functions
#' @examples
#' # Attach packages
#' library(rearrr)
#' library(dplyr)
#'
#' # Set seed
#' set.seed(1)
#'
#' # Create a data frame
#' df <- data.frame(
#' "x" = runif(20),
#' "y" = runif(20),
#' "g" = rep(1:4, each = 5)
#' )
#'
#' # Calculate distances in the two dimensions (x and y)
#' # With the origin at x=0.5, y=0.5
#' distance(
#' data = df,
#' cols = c("x", "y"),
#' origin = c(0.5, 0.5)
#' )
#'
#' # Calculate distances to the centroid for each group in 'g'
#' distance(
#' data = dplyr::group_by(df, g),
#' cols = c("x", "y"),
#' origin_fn = centroid
#' )
distance <- function(data,
cols = NULL,
origin = NULL,
origin_fn = NULL,
distance_col_name = ".distance",
origin_col_name = ".origin",
overwrite = FALSE) {
# Check arguments ####
assert_collection <- checkmate::makeAssertCollection()
checkmate::assert_string(distance_col_name, add = assert_collection)
checkmate::assert_string(origin_col_name, null.ok = TRUE, add = assert_collection)
checkmate::assert_numeric(origin,
min.len = 1,
any.missing = FALSE,
null.ok = TRUE,
add = assert_collection
)
checkmate::assert_function(origin_fn, null.ok = TRUE, add = assert_collection)
checkmate::reportAssertions(assert_collection)
check_unique_colnames_(cols, distance_col_name, origin_col_name)
check_overwrite_(data = data,
nm = distance_col_name,
overwrite = overwrite)
check_overwrite_(data = data,
nm = origin_col_name,
overwrite = overwrite)
# End of argument checks ####
# Mutate with each multiplier
multi_mutator_(
data = data,
mutate_fn = calculate_distances_mutator_method_,
check_fn = NULL,
cols = cols,
overwrite = overwrite,
force_df = TRUE,
keep_original = TRUE,
origin = origin,
origin_fn = origin_fn,
distance_col_name = distance_col_name,
origin_col_name = origin_col_name
)
}
calculate_distances_mutator_method_ <- function(data,
grp_id,
cols,
overwrite,
origin,
origin_fn,
distance_col_name,
origin_col_name,
...) {
# Number of dimensions
# Each column is a dimension
num_dims <- length(cols)
# Convert columns to list of vectors
dim_vectors <- as.list(data[, cols, drop = FALSE])
# Find origin if specified
origin <- apply_coordinate_fn_(
dim_vectors = dim_vectors,
coordinates = origin,
fn = origin_fn,
num_dims = length(cols),
coordinate_name = "origin",
fn_name = "origin_fn",
dim_var_name = "cols",
grp_id = grp_id,
allow_len_one = TRUE
)
# Calculate distances
# formula: sqrt( (x2 - x1)^2 + (y2 - y1)^2 + (z2 - z1)^2 )
distances <-
calculate_distances_(dim_vectors = dim_vectors, to = origin)
# Add info columns
if (!is.null(origin_col_name)) {
data[[origin_col_name]] <- list_coordinates_(origin, cols)
}
data[[distance_col_name]] <- distances
data
}
# Helper used in multiple places
calculate_distances_ <- function(dim_vectors, to) {
# Distance formula:
# d(P1, P2) = sqrt( (x2 - x1)^2 + (y2 - y1)^2 + (z2 - z1)^2 )
# Calculate (x2-x1)^2, (y2-y1)^2, etc.
distance_terms <-
purrr::map2(.x = dim_vectors, .y = to, .f = ~ {
(.x - .y)^2
})
# Calculate sqrt(sum(x,y,z))
# of the squared differences dim-wise
distances <- distance_terms %>%
purrr::transpose() %>%
purrr::simplify_all() %>%
purrr::map_dbl(~ {
sqrt(sum(.x))
})
distances
}
|
7c67b14af4d62ea65875f87fe8c68fe076ac3f63
|
913c7d05a4d6582d8dd539fbeadf9774dbba6296
|
/东西印度洋功能比较-箱线图.R
|
98e0753c7bcab3d8225776993c2d24e6dc95c296
|
[] |
no_license
|
KeqiangYan/Meta
|
6fe8c35030637f3c9c3a0765ad861cafc5b4882e
|
0546543f78a3df647e92f7cdb71ad5c2e98ff590
|
refs/heads/master
| 2021-01-21T21:38:01.749837
| 2017-07-27T09:06:46
| 2017-07-27T09:06:46
| 98,519,094
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 697
|
r
|
东西印度洋功能比较-箱线图.R
|
c<-read.table("all_COG_iBAQ.txt",header = T,sep = "\t")
Sample<-c("S08","S10","S12","S14","S16","S18","S20","S22","S24","S26","S28")
c1<-data.frame(c,row.names = 1)
c2<-t(c1)
c1<-as.data.frame(c2,row.names = F)
c2<-cbind(Sample,c1)
cc2<-c2[,c(1,3,5,7,10,12,15,17)]
library(reshape2)
cc3<-melt(cc2,id="Sample")
group<-c(rep(c(rep(c("East"),6),rep(c("West"),5)),7))
c4<-cbind(cc3,group)
colnames(c4)<-c("Sample","COG","Abundance","Group")
ggplot(c4, aes(x=factor(COG), y=Abundance))+geom_boxplot(aes(fill=Group))+xlab("COG")+ylab("Abundance")
cc4<-c2[,c(1,3,5,6,7,8,9,16,17)]
library(reshape2)
cc6<-melt(cc4,id="Sample")
group<-c(rep(c(rep(c("East"),5),rep(c("West"),6)),8))
c4<-cbind(cc6,group)
|
2128b1b34c5ec24b44f4ff16269d9618afb59c92
|
a69974c3e38a83fcb05e1c37b1f1becbbd682c88
|
/Optogenetics SuHGFP.R
|
c78ff4c41547e4e74e948181a431a1d823c93d82
|
[
"CC0-1.0"
] |
permissive
|
jmt86cam/CSL-sequestration
|
9317f724d01c6a676c421585f6c423030408ba05
|
f2d57c547b5ffb7e4c84c411e05337f638f7dd77
|
refs/heads/main
| 2023-04-10T02:15:16.350586
| 2023-02-05T18:13:36
| 2023-02-05T18:13:36
| 597,759,738
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,744
|
r
|
Optogenetics SuHGFP.R
|
# clear the environment:
rm(list = ls())
# clear the console: ctrl+L
# clear all the plots:
dev.off()
#### Packages for this script ####
install.packages("ggplot2")
install.packages("cowplot")
install.packages("dplyr")
install.packages("stringr")
install.packages("ggsignif")
install.packages("ggpubr")
install.packages("gridExtra")
library(ggplot2)
library(cowplot)
library(dplyr)
library(tidyverse)
library(stringr)
library(ggsignif)
library(ggpubr)
library(gridExtra)
#### end ####
#### Data analysis functions ####
## Calculate the standard error for each row of a data frame
SEMrows <- function(df){
dft <- as.data.frame(t(df))
rm(df)
dft <- dft[-1,]
dft <- na.omit(dft)
output <- matrix(ncol = length(colnames(dft)), nrow = 1)
for (n in 1:length(colnames(dft))) {
dftn <- dft[[n]]
output[,n] <- sqrt(var(dftn)/length(dftn))
rm(dftn)
}
output <- as.data.frame(t(output))
return(output$V1)
}
MeanDF <- function(DataFrame){
MeanList <- list()
for (l in 1:length(DataFrame)) {
df <- DataFrame[[l]]
dfmeanse <- data.frame(Distance=df[["Distance (um)"]], Mean=rowMeans(df[,-1], na.rm = TRUE))
dfmeanse$SEM <- SEMrows(df)
Nnumber <- ncol(df)-1
dfmeanse$Genotype <- paste(names(DataFrame)[l]," (N = ", Nnumber, ")", sep = "")
MeanList[[l]] <- dfmeanse
names(MeanList)[l] <- names(DataFrame[l])
rm(dfmeanse,Nnumber)
}
Output <- dplyr::bind_rows(MeanList, .id = 'Name')
rm(MeanList)
return(Output)
}
NonListMeanDF <- function(df, Genotype){
dfmeanse <- data.frame(Distance=df[["Distance (um)"]], Mean=rowMeans(df[,-1], na.rm = TRUE))
dfmeanse$SEM <- SEMrows(df[,-1])
Nnumber <- ncol(df)-1
dfmeanse$Genotype <- paste(Genotype," (N = ", Nnumber, ")", sep = "")
rm(Nnumber)
return(dfmeanse)
}
# Create a list of dataframes, named for each truncation, where the data contained
# is the wild type data collected alongside that truncation only.
SepDates <- function(DataList,GenotypeToSep){
Dates <- list()
for (g in 1:length(DataList)) {
df <- DataList[[g]]
ColNames <- colnames(df)
ColNames <- ColNames[-1]
for (c in 1:length(ColNames)) {
SplitName <- str_split(ColNames[c]," ")
SplitName <- SplitName[[1]]
ColNames[c] <- SplitName[1]
}
dfDates <- unique(ColNames)
Dates[[g]] <- dfDates
names(Dates)[g] <- names(DataList[g])
rm(dfDates, ColNames, SplitName)
}
data <- DataList[[GenotypeToSep]]
dataD <- as.data.frame(data[,1])
names(dataD)[names(dataD) == "data[, 1]"] <- names(data)[1]
SubDates <- Dates
SubDates[[GenotypeToSep]] <- NULL
Output <- list()
for (c in 1:length(SubDates)) {
GenotypeDates <- SubDates[[c]]
dataG <- dataD
for (d in 1:length(GenotypeDates)) {
df <- data[,grepl(GenotypeDates[d], names(data))]
dataG <- cbind(dataG, df)
rm(df)
}
Output[[c]] <- dataG
names(Output)[c] <- names(SubDates[c])
rm(dataG,GenotypeDates)
}
rm(data, dataD, SubDates, Dates)
return(Output)
}
# Create a list of dataframes, named for each truncation, where each dataframe
# has the data of the truncation and wild type data from the corresponding days
# organised into the means and SEM for all the bands imaged.
WTvsMutByDate <- function(SeparatedWTdata, AllData){
OutputList <- list()
for (l in 1:length(SeparatedWTdata)) {
Genotype <- names(SeparatedWTdata)[l]
WT <- NonListMeanDF(SeparatedWTdata[[l]],"Wild Type")
Mutant <- NonListMeanDF(AllData[[Genotype]],Genotype)
df <- rbind(WT,Mutant)
OutputList[[l]] <- df
names(OutputList)[l] <- names(SeparatedWTdata[l])
rm(df, WT, Mutant)
}
return(OutputList)
}
# This function will take the RelInt values and separate them into a list of
# dateframes for each truncation. The dataframes are named for their mutant
# and contain RelInt values for the truncation and wild type data taken alongside.
RelIntSepDates <- function(RelIntDataFrame, GenotypeToSep) {
Mutants <- dplyr::filter(RelIntDataFrame, Genotype != GenotypeToSep)
WildTypes <- dplyr::filter(RelIntDataFrame, Genotype == GenotypeToSep)
OutputList <- list()
for (t in 1:length(unique(Mutants$Genotype))) {
MutantData <- dplyr::filter(RelIntDataFrame, Genotype == unique(Mutants$Genotype)[t])
StrSplit <- str_split(MutantData$Nucleus, " ")
Dates <- c()
for (s in 1:length(StrSplit)) {
SplitName <- StrSplit[[s]]
Date <- SplitName[1]
Dates <- append(Dates,Date)
rm(Date, SplitName)
}
UniqueDates <- unique(Dates)
rm(Dates)
df <- WildTypes[grep(UniqueDates[1], WildTypes$Nucleus),]
for (d in 2:length(UniqueDates)) {
Nextdf <- WildTypes[grep(UniqueDates[d], WildTypes$Nucleus),]
df <- rbind(df,Nextdf)
}
df <- rbind(df,MutantData)
OutputList[[t]] <- df
names(OutputList)[t] <- unique(Mutants$Genotype)[t]
rm(df)
}
return(OutputList)
}
#### end ####
#### Plotting functions ####
## Define a basic theme for the plots
theme_jmt <- function(){theme(
text = element_text(family = "sans", size = 25), # set default font to be Arial
plot.title = element_text(hjust=0.5, face = 'plain'), # align title to centre and make bold
panel.grid.major = element_line(colour = "grey80", size = 0.1), # strip major gridlines
panel.grid.minor = element_blank(), # strip minor gridlines
panel.background = element_blank(), # strip background
axis.text.x = element_text(angle = 0, vjust = 0.5, hjust=0.5), # tilt the x axis
axis.line = element_line(colour = "black", size = 1.5), # add axis lines in black
axis.ticks = element_line(colour = "black", size = 1.5),
axis.ticks.length=unit(2,"mm")) # add tick marks in black
}
## Where required, labels are the Title, X axis and Y axis labels defined in a character string (in order)
## e.g. Labels <- c(title = "Normalised to Background value", x = "Distance (um)", y = "Normalised fluorescence")
## these are then read into the function.
## Define a function that plots a dataframe of distance, mean, sem, grouping by the IncubationTime
## as a fourth column
band_plot <- function(Data, Labels){
ggplot(Data, aes(Distance,Mean,group = Genotype,col=Genotype,shape=Genotype,ymin=Mean-SEM,ymax=Mean+SEM)) + theme_jmt() +
geom_ribbon(linetype = "dashed", fill = "grey90",alpha = 0.5, size = 0.25) + geom_line(size = 0.5) + geom_point() +
labs(title = Labels[[1]], x = Labels[[2]], y = Labels[[3]]) +
scale_color_brewer(palette = "Set2")
}
## Define a function to plot the relative intensities of the bands
RelInt_boxviolin <- function(Data, Labels){
ggplot(Data, aes(Genotype,RelativeBandIntensity,col=Genotype,shape=Genotype)) + theme_jmt() +
geom_violin() + geom_boxplot(width=0.1, show.legend = FALSE,colour = "Grey60",alpha = 0.5, outlier.shape = NA) + geom_jitter() +
labs(title = Labels[[1]], x = Labels[[2]], y = Labels[[3]]) +
scale_color_brewer(palette = "Set2")
}
RelInt_crossbar <- function(Data, Labels){
ggplot(Data,aes(Genotype,RelativeBandIntensity,col=Genotype,shape=Genotype)) + theme_jmt() +
geom_jitter(position = position_jitterdodge()) +
stat_summary(fun="mean", geom="crossbar", width=0.5) + theme(legend.key=element_blank()) +
labs(title = Labels[[1]], x = Labels[[2]], y = Labels[[3]]) +
scale_color_brewer(palette = "Set2")
}
#### end ####
# Set location of data and where plots will be saved to:
mainDir <- "/Users/jonathantownson/Documents/PhD/Images/Analysed image data/Optogenetics data/newBLITz in light incubator/SuHGFP with locus tag"
setwd(mainDir)
# Load the background values
DF <- read.csv("/Users/jonathantownson/Documents/PhD/Images/Analysed image data/Optogenetics data/newBLITz in light incubator/SuHGFP with locus tag/SuHGFP background values.csv", head = TRUE, sep = ",")
MamBackgroundValues <- data.frame(Nucleus=DF[,1], MamBG=rowMeans(DF[,-1]))
rm(DF)
# Create the path where the data is stored
DataPath <- "/Users/jonathantownson/Documents/PhD/Images/Analysed image data/Optogenetics data/newBLITz in light incubator/SuHGFP with locus tag/SuH"
#### Read in and normalise the data ####
# Create the folder names for the data
FolderNames <- list.dirs(DataPath, full.names = FALSE, recursive = FALSE)
# Read in all the csv files for each condition
MamRawData <- list()
for (f in 1:length(FolderNames)){
Path <- paste(DataPath,FolderNames[f], sep ="/")
Files <- list.files(Path, pattern = "*.csv")
FilePaths <- list()
for (i in 1:length(Files)){
FilePaths[[i]] <- paste(Path,Files[i], sep ="/")
}
rm(Path)
rm(Files)
df <- read.csv(FilePaths[[1]])
names(df)[names(df) == 'Y'] <- str_sub(basename(FilePaths[[1]]), end=-9)
for (i in 2:length(FilePaths)){
Nextdf <- read.csv(FilePaths[[i]])
df <- merge(df,Nextdf, by="X", all = TRUE)
names(df)[names(df) == 'Y'] <- str_sub(basename(FilePaths[[i]]), end=-9)
rm(Nextdf)
}
names(df)[names(df) == 'X'] <- "Distance (um)"
MamRawData[[f]] <- df
rm(df)
rm(FilePaths)
names(MamRawData)[f] <- FolderNames[f]
}
rm(FolderNames)
MamminsixSub <- list()
for (s in 1:length(MamRawData)) {
df <- MamRawData[[s]]
ColNames <- colnames(df)
for (t in 2:length(ColNames)) {
dfOrdered <- df[order(df[ColNames[[t]]])[1:6],]
NormValue <- mean(dfOrdered[[ColNames[[t]]]])
rm(dfOrdered)
df[ColNames[[t]]] <- df[ColNames[[t]]] - NormValue
rm(NormValue)
}
MamminsixSub[[s]] <- df
names(MamminsixSub)[s] <- names(MamRawData[s])
rm(df,ColNames)
}
MamBgNorm <- list()
for (s in 1:length(MamRawData)) {
df <- MamRawData[[s]]
ColNames <- colnames(df)
for (t in 2:length(ColNames)) {
NormValue <- MamBackgroundValues$MamBG[which(grepl(ColNames[[t]], MamBackgroundValues$Nucleus))]
df[ColNames[[t]]] <- df[ColNames[[t]]] / NormValue
rm(NormValue)
}
MamBgNorm[[s]] <- df
names(MamBgNorm)[s] <- names(MamRawData[s])
rm(df,ColNames)
}
MeanMamRawData <- MeanDF(MamRawData)
MeanMamBgNorm <- MeanDF(MamBgNorm)
MeanMamMinSixSub <- MeanDF(MamminsixSub)
#### end ####
#### Calculate fold increase of band over background ####
RelIntMam <- NULL
for (s in 1:length(MamRawData)) {
df <- MamRawData[[s]]
df$`Distance (um)` <- NULL
Band <- colMeans(slice(df,((ceiling(nrow(df)/2)-2):(ceiling(nrow(df)/2)+2))))
dfOrdered <- NULL
for (c in 1:length(colnames(df))) {
Column <- as.data.frame(df[,c])
MinSix <- Column[order(Column$`df[, c]`),][1:6]
dfOrdered <- cbind(dfOrdered,MinSix)
}
Background <- colMeans(dfOrdered)
RelInt <- as.data.frame(Band/Background)
RelInt$Genotype <- names(MamRawData[s])
RelInt$Nucleus <- rownames(RelInt)
RelInt <- rename(RelInt, RelativeBandIntensity = `Band/Background`)
RelIntMam <- rbind(RelIntMam,RelInt)
rm(RelInt, Band, Background, df)
}
## Here we will use the mean of the 5 middle values and divide by the mean of the 5 values at
## each edge of the band (10 values in total).
RelIntMamEdge <- NULL
for (s in 1:length(MamRawData)) {
df <- MamRawData[[s]]
df$`Distance (um)` <- NULL
Band <- colMeans(slice(df,((ceiling(nrow(df)/2)-2):(ceiling(nrow(df)/2)+2))))
Background <- colMeans(rbind(slice_head(df, n=5),slice_tail(df,n=5)))
RelInt <- as.data.frame(Band/Background)
RelInt$Genotype <- names(MamRawData[s])
RelInt$Nucleus <- rownames(RelInt)
RelInt <- rename(RelInt, RelativeBandIntensity = `Band/Background`)
RelIntMamEdge <- rbind(RelIntMamEdge,RelInt)
rm(RelInt, Band, Background, df)
}
RelIntMamBg <- NULL
for (s in 1:length(MamRawData)) {
df <- MamRawData[[s]]
df$`Distance (um)` <- NULL
ColNames <- colnames(df)
BandValues <- as.data.frame(colMeans(slice(df,((ceiling(nrow(df)/2)-2):(ceiling(nrow(df)/2)+2)))))
for (t in 1:length(ColNames)) {
Band <- BandValues[t,]
Background <- MamBackgroundValues$MamBG[which(grepl(ColNames[[t]], MamBackgroundValues$Nucleus))]
RelInt <- as.data.frame(Band/Background)
RelInt$Genotype <- names(MamRawData[s])
RelInt$Nucleus <- ColNames[[t]]
RelInt <- rename(RelInt, RelativeBandIntensity = `Band/Background`)
rm(Band, Background, BandSubBg)
RelIntMamBg <- rbind(RelIntMamBg,RelInt)
}
rm(RelInt, BandValues, df, ColNames)
}
RelIntMamBg0 <- NULL
for (s in 1:length(MamRawData)) {
df <- MamRawData[[s]]
df$`Distance (um)` <- NULL
ColNames <- colnames(df)
BandValues <- as.data.frame(colMeans(slice(df,((ceiling(nrow(df)/2)-2):(ceiling(nrow(df)/2)+2)))))
for (t in 1:length(ColNames)) {
Band <- BandValues[t,]
Background <- MamBackgroundValues$MamBG[which(grepl(ColNames[[t]], MamBackgroundValues$Nucleus))]
BandSubBg <- Band - Background
RelInt <- as.data.frame(BandSubBg/Band)
RelInt$Genotype <- names(MamRawData[s])
RelInt$Nucleus <- ColNames[[t]]
RelInt <- rename(RelInt, RelativeBandIntensity = `BandSubBg/Band`)
rm(Band, Background, BandSubBg)
RelIntMamBg0 <- rbind(RelIntMamBg0,RelInt)
}
rm(RelInt, BandValues, df, ColNames)
}
RelIntMamMinSixSub <- NULL
for (s in 1:length(MamRawData)) {
df <- MamRawData[[s]]
df$`Distance (um)` <- NULL
Band <- colMeans(slice(df,((ceiling(nrow(df)/2)-2):(ceiling(nrow(df)/2)+2))))
dfOrdered <- NULL
for (c in 1:length(colnames(df))) {
Column <- as.data.frame(df[,c])
MinSix <- Column[order(Column$`df[, c]`),][1:6]
dfOrdered <- cbind(dfOrdered,MinSix)
}
Background <- colMeans(dfOrdered)
RelInt <- as.data.frame(Band-Background)
RelInt$Genotype <- names(MamRawData[s])
RelInt$Nucleus <- rownames(RelInt)
RelInt <- rename(RelInt, RelativeBandIntensity = `Band - Background`)
RelIntMamMinSixSub <- rbind(RelIntMamMinSixSub,RelInt)
rm(RelInt, Band, Background, df,dfOrdered,c,Column,MinSix)
}
RelIntChangeMam <- NULL
for (s in 1:length(MamRawData)) {
df <- MamRawData[[s]]
df$`Distance (um)` <- NULL
Band <- colMeans(slice(df,((ceiling(nrow(df)/2)-2):(ceiling(nrow(df)/2)+2))))
dfOrdered <- NULL
for (c in 1:length(colnames(df))) {
Column <- as.data.frame(df[,c])
MinSix <- Column[order(Column$`df[, c]`),][1:6]
dfOrdered <- cbind(dfOrdered,MinSix)
}
Background <- colMeans(dfOrdered)
RelInt <- as.data.frame((Band-Background)/Background)
RelInt$Genotype <- names(MamRawData[s])
RelInt$Nucleus <- rownames(RelInt)
RelInt <- rename(RelInt, RelativeBandIntensity = `(Band - Background)/Background`)
RelIntChangeMam <- rbind(RelIntChangeMam,RelInt)
rm(RelInt, Band, Background, df, c, MinSix, dfOrdered, Column)
}
RelIntChangeMamBg <- NULL
for (s in 1:length(MamRawData)) {
df <- MamRawData[[s]]
df$`Distance (um)` <- NULL
ColNames <- colnames(df)
BandValues <- as.data.frame(colMeans(slice(df,((ceiling(nrow(df)/2)-2):(ceiling(nrow(df)/2)+2)))))
for (t in 1:length(ColNames)) {
Band <- BandValues[t,]
Background <- MamBackgroundValues$MamBG[which(grepl(ColNames[[t]], MamBackgroundValues$Nucleus))]
RelInt <- as.data.frame((Band-Background)/Background)
RelInt$Genotype <- names(MamRawData[s])
RelInt$Nucleus <- ColNames[[t]]
RelInt <- rename(RelInt, RelativeBandIntensity = `(Band - Background)/Background`)
rm(Band, Background, BandSubBg)
RelIntChangeMamBg <- rbind(RelIntChangeMamBg,RelInt)
}
rm(RelInt, BandValues, df, ColNames)
}
#### end ####
#### Saved plots ####
setwd("/Users/jonathantownson/Documents/PhD/CSL localisation Paper/Figures for paper/Files/Fig 4")
## Create and save band plots of the bands compared to their own wt data
df <- MeanMamRawData %>% dplyr::filter(Name == "Dark Control" | Name == "2 hours")
XAxisOrder <- c("Dark Control (N = 24)", "2 hours (N = 33)")
df <- df %>% mutate(Genotype = fct_relevel(Genotype, XAxisOrder))
Labs <- c(title = expression(paste("eGFP::CSL at ",italic("E(spl)-C"))), x = "Distance (μm)", y = "eGFP::CSL \nFluorescence (AU)")
band_plot(df, Labs) + theme(legend.position = "bottom", legend.title = element_blank()) +
guides(colour = guide_legend(nrow=2, byrow = TRUE)) +
scale_y_continuous(breaks = seq(0, 800, by = 200), limits = c(0,800))
ggsave("RawData Band plot of 2 hours and dark control CSL.jpg", device = "jpeg", dpi = "retina",
width = 15, height = 15, units = "cm")
df <- MeanMamMinSixSub %>% dplyr::filter(Name == "Dark Control" | Name == "2 hours")
XAxisOrder <- c("Dark Control (N = 24)", "2 hours (N = 33)")
df <- df %>% mutate(Genotype = fct_relevel(Genotype, XAxisOrder))
Labs <- c(title = " ", x = "Distance (μm)", y = "EGFP::CSL Fluorescence (AU)")
band_plot(df, Labs) + theme(legend.position = "bottom", legend.title = element_blank()) +
guides(colour = guide_legend(nrow=1, byrow = TRUE)) +
scale_y_continuous(breaks = seq(0, 800, by = 200), limits = c(0,600))
ggsave("MinSix Band plot of 2 hours and dark control CSL.jpg", device = "jpeg", dpi = "retina",
width = 12, height = 12, units = "cm")
#### end ####
|
097266d6cada64a1d3651b6bc1e4331c4949a4fe
|
14a1e75d1ac6e4a92c8677e4421b80a7314cf986
|
/03 Visualizations/storing_io_qualitative.R
|
6f570e52c42699d10de8c2a0c55fdbe9dc46661f
|
[] |
no_license
|
syairahusna/DV_RProject3
|
52037da4420b113cafa8dda882f730311af2d01d
|
1647439cf3bff30931b1cf9d94399f66719e9309
|
refs/heads/master
| 2021-01-23T08:11:03.280291
| 2015-03-05T01:32:51
| 2015-03-05T01:32:51
| 31,343,466
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 363
|
r
|
storing_io_qualitative.R
|
#png("/Users/Syairah/DataVisualization/DV_RProject3/00 Doc/categoricals_io_1.png", width = 25, height = 10, units = "in", res = 72)
#grid.newpage()
pushViewport(viewport(layout = grid.layout(6, 12)))
print(l[[1]], vp = viewport(layout.pos.row = 1:3, layout.pos.col = 1:12))
print(l[[2]], vp = viewport(layout.pos.row = 4:6, layout.pos.col = 1:12))
#dev.off()
|
ba5e5c93c6e164312fcd05161261cc825ead908f
|
0f75c0b71eb84cb36db6f826c4b0a42cab522f4f
|
/170926 R8000 barcode-1/171006 R8000-1 and -2 hiseq.R
|
9eb7fb1cb313ab7434381dc453f8eb57fe694eac
|
[] |
no_license
|
cliff-b/Raw-R-files
|
6360713213ae87bf227e4b4661ca71ed0546644f
|
5296f8e6d6b5b97c3175223418da9aafb43e6ae8
|
refs/heads/master
| 2021-05-02T07:39:30.760921
| 2018-02-08T23:43:34
| 2018-02-08T23:43:34
| 120,834,727
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,298
|
r
|
171006 R8000-1 and -2 hiseq.R
|
################################################# Standard libraries + viridis to make things pretty + extrafont for comic sans #################################################
library(dplyr)
library(tidyr)
library(extrafont)
library(viridis)
library(cowplot)
################################################# Read in the data and mash it together #################################################
setwd("/Users/Cliff/Documents/Kosuri Lab/NGS files/170926 R8000 barcode-1/")
R8000_RNA_0 <- read.table("sc-counts_R8000_0h_RNA.txt", header = FALSE)
R8000_RNA_0 <- data.frame(R8000_RNA_0, rep("RNA-0",nrow(R8000_RNA_0)),rep(sum(R8000_RNA_0$V2), nrow(R8000_RNA_0)))
colnames(R8000_RNA_0) <- c("barcode", "count", "collapsed_bc", "condition","readsinlib")
R8000_DNA_0 <- read.table("sc-counts_R8000_0h_DNA.txt", header = FALSE)
R8000_DNA_0 <- data.frame(R8000_DNA_0, rep("DNA-0",nrow(R8000_DNA_0)),rep(sum(R8000_DNA_0$V2), nrow(R8000_DNA_0)))
colnames(R8000_DNA_0) <- c("barcode", "count", "collapsed_bc", "condition","readsinlib")
R8000_RNA_6 <- read.table("sc-counts_R8000_6h_RNA.txt", header = FALSE)
R8000_RNA_6 <- data.frame(R8000_RNA_6, rep("RNA-6",nrow(R8000_RNA_6)),rep(sum(R8000_RNA_6$V2), nrow(R8000_RNA_6)))
colnames(R8000_RNA_6) <- c("barcode", "count", "collapsed_bc", "condition","readsinlib")
R8000_DNA_6 <-read.table("sc-counts_R8000_6h_DNA.txt", header = FALSE)
R8000_DNA_6 <- data.frame(R8000_DNA_6, rep("DNA-6",nrow(R8000_DNA_6)),rep(sum(R8000_DNA_6$V2), nrow(R8000_DNA_6)))
colnames(R8000_DNA_6) <- c("barcode", "count", "collapsed_bc", "condition","readsinlib")
R8000_full_DF <- bind_rows(R8000_RNA_0, R8000_DNA_0, R8000_RNA_6, R8000_DNA_6)
g.raw <- ggplot(R8000_full_DF, aes(condition, count, fill = condition)) + geom_violin() + labs(title = "171004 R8000-1+2 counts distribution", y = "Number of reads per barcode", x = "Condition") + scale_fill_brewer(palette = "Set1") +
geom_boxplot(width = 0.025, fill = "Black") + stat_summary(fun.y = mean, geom = "point", color = "White", show.legend = FALSE)
g.raw
R8000_join <- full_join(R8000_RNA_0, R8000_RNA_6, by = "barcode")
R8000_join <- select(R8000_join, barcode, "RNA_0" = count.x, "RNA_6" = count.y)
R8000_join <- full_join(R8000_join, R8000_DNA_0, by = "barcode")
R8000_join <- full_join(R8000_join, R8000_DNA_6, by = "barcode")
R8000_join <- select(R8000_join, barcode, RNA_0, RNA_6, "DNA_0" = count.x, "DNA_6" = count.y)
R8000_join$RNA_0[is.na(R8000_join$RNA_0)] <-0
R8000_join$RNA_6[is.na(R8000_join$RNA_6)] <- 0
R8000_join$DNA_0[is.na(R8000_join$DNA_0)] <-0
R8000_join$DNA_6[is.na(R8000_join$DNA_6)] <- 0
g.R1vR6 <- ggplot(R8000_join, aes(RNA_0, RNA_6)) + geom_point(alpha = 0.1) + scale_x_log10() + scale_y_log10() + geom_density2d() + labs(title = "171004 R8000 RNA counts at 0h vs RNA counts at 6h", x = "RNA barcode counts at 0h", y = "RNA barcode counts at 6h")
g.R1vR6
g.R1vD1 <- ggplot(R8000_join, aes(DNA_0, RNA_0)) + geom_point(alpha = 0.1) + scale_y_log10() + scale_x_log10() + labs(title = "171004 R8000 RNA counts at 0h vs DNA counts at 0h", x = "DNA barcode counts at 0h", y = "RNA barcode counts at 0h")
g.R1vD1
g.R6vD6 <- ggplot(R8000_join, aes(DNA_6, RNA_6)) + geom_point(alpha = 0.1) + scale_y_log10() + scale_x_log10() + geom_density2d() + labs(title = "171004 R8000 RNA counts at 6h vs DNA counts at 6h", x = "DNA barcode counts at 6h", y = "RNA barcode counts at 6h")
g.R6vD6
g.D1vD6 <- ggplot(R8000_join, aes(DNA_0, DNA_6)) + geom_point(alpha = 0.1) + scale_x_log10() + scale_y_log10() + labs(title = "171004 R8000 DNA counts at 0h vs DNA counts at 0h", x = "DNA barcode counts at 0h", y = "DNA barcode counts at 6h")
g.D1vD6
R8000_full_DF_3 <- full_join(R8000_full_DF,R8000_full_DF_2, by = c("barcode", "condition"))
R8000_full_DF_3$count.x[is.na(R8000_full_DF_3$count.x)] <- 0
R8000_full_DF_3$count.y[is.na(R8000_full_DF_3$count.y)] <- 0
R8000_full_DF_3$readsinlib.y <- na.locf(R8000_full_DF_3$readsinlib.y)
R8000_full_DF_3$readsinlib.x <- na.locf(R8000_full_DF_3$readsinlib.x)
R8000_full_DF_3 <- mutate(R8000_full_DF_3, count = count.x + count.y, readsinlib = readsinlib.x + readsinlib.y)
R8000_map <- read.table("R8000-c1.x-y.txt", skip = 1)
colnames(R8000_map) <- c("barcode", "X_peptide", "Y_peptide", "reads")
R8000_map <- filter(R8000_map, X_peptide != "<NA>", Y_peptide != "<NA>") %>%
filter(as.character(X_peptide) == as.character(Y_peptide)) %>%
select(-Y_peptide) %>%
separate(X_peptide, into = c("X_peptide", "X_codon"), sep = "_") %>%
separate(X_peptide, into = c("X_peptide","Y_peptide"), sep = "\\+") %>%
separate(X_peptide, into = c("X_peptide", "X_group"), sep = "-") %>%
separate(Y_peptide, into = c("Y_peptide", "Y_group"), sep = "-")
R8000_full_DF_3 <- mutate(R8000_full_DF_3, "nCount" = count/readsinlib*1000000)
R8000_full_DF_3 <- inner_join(R8000_full_DF_3, R8000_map, by = c("barcode"))
all_DF <- full_join(R8000_full_DF,R8000_full_DF_2, by = c("barcode","X_peptide","Y_peptide", "X_group", "Y_group", "condition"))
g.all <- ggplot(all_DF, aes(nCount.x, nCount.y, color = condition)) + geom_point(alpha = 0.1) + labs (title = "170927 R8000 Biological replicates normalized counts", x = "Replicate 1 (RPM)", y = "Replicate 2 (RPM)") +
geom_abline(slope = 1) + scale_x_continuous(limits = c(0,115)) + scale_y_continuous(limits = c(0,115)) + geom_text(data = corrlab, aes(x=x, y=y, label= label), color = "Black", parse = TRUE)
g.all
all_DF$nCount.x[is.na(all_DF$nCount.x)] <- 0
all_DF$nCount.y[is.na(all_DF$nCount.y)] <- 0
corr <- cor(all_DF$nCount.x, all_DF$nCount.y)
corrlab <- data.frame( x = 30, y = 90, label = paste("italic(r)==",round(corr, digits = 3)))
R8000RNA <- filter(R8000_full_DF_3, grepl("RNA", R8000_full_DF_3$condition)) %>%
select(X_peptide, X_group, X_codon, Y_peptide, condition, barcode, "rcount" = nCount ) %>%
separate(condition, into = c("template", "condition"))
sumRNA <- group_by(R100RNA, X_peptide, Y_peptide, condition) %>%
summarise(mean_RNA = mean(rcount), sd_R = sd(rcount), bcnum = n_distinct(barcode))
R8000DNA <- filter(R8000_full_DF_3, grepl("DNA", R8000_full_DF_3$condition)) %>%
select(X_peptide, X_group, X_codon, Y_peptide, condition, barcode, "dcount" = nCount ) %>%
separate(condition, into = c("template", "condition"))
sumDNA <- group_by(R100DNA, X_peptide, Y_peptide, condition) %>%
summarise(mean_DNA = mean(dcount), sd_D = sd(dcount), bcnum_D = n_distinct(barcode))
R8000RD <- right_join(R8000RNA,R8000DNA, by = c("X_peptide","Y_peptide","barcode","condition", "X_group", "X_codon")) %>%
mutate("RNADNA" = rcount/dcount*100) %>%
select(-template.x, -template.y) %>%
mutate("RNADNA" = ifelse(is.na(RNADNA),0,RNADNA)) %>%
mutate("rcount" = ifelse(is.na(rcount),0,rcount))
sumR100RD <- group_by(R8000RD, X_peptide, Y_peptide, condition, X_group) %>%
summarise(mean_RNA = mean(rcount), sd_R = sd(rcount), mean_DNA = mean(dcount), sd_D = sd(dcount), mean_RD = mean(RNADNA), sd_RD = sd(RNADNA), med_RD = median(RNADNA), sum_DNA = sum(dcount), sum_RNA = sum(rcount), bcnum = n_distinct(barcode), SEM = sd(RNADNA)/sqrt(n()))
sumR8000C1 <- filter(sumR100RD, X_codon == "C1")
sumR8000C2 <- filter(sumR100RD, X_codon == "C2")
sumR8000C3 <- filter(sumR100RD, X_codon == "C3")
sumR8full <- full_join(sumR8000C1,sumR8000C2, by = c("X_peptide", "Y_peptide","condition","X_group"))
sumR8full <- full_join(sumR8full, sumR8000C3, by = c("X_peptide","Y_peptide","condition","X_group"))
sumR8full$mean_RD.x[is.na(sumR8full$mean_RD.x)] <- 0
sumR8full$mean_RD.y[is.na(sumR8full$mean_RD.y)] <- 0
sumR8full$mean_RD[is.na(sumR8full$mean_RD)] <- 0
g.RDC1C2 <- ggplot(filter(sumR8full, bcnum.x >5, bcnum.y >5), aes(mean_DNA.x, mean_DNA.y, color = log(bcnum.x + bcnum.y))) + geom_point(alpha = 0.2) +geom_text(data = corr1, aes(x=x, y=y, label= label), color = "Black", parse = TRUE) +
labs(title = "170927 R8000 codon usage 1 vs codon usage 2 mean DNA (bc >5) ", x = "Mean DNA (Codon 1)", y = "Mean DNA (Codon 2)") + scale_x_log10(limits = c(0.1,30)) + scale_y_log10(limits = c(0.1,30)) +scale_color_viridis(name = "Total number \nof barcodes" )
g.RDC1C2
corr1 <- cor(all_DF$nCount.x, all_DF$nCount.y)
corr1 <- data.frame( x = 30, y = 300, label = paste("italic(r)==",round(cor(filter(sumR8full, !is.na(mean_RD.x), !is.na(mean_RD.y))$mean_RD.x, filter(sumR8full, !is.na(mean_RD.x), !is.na(mean_RD.y))$mean_RD.y), digits = 3)))
g.RDC1C3 <- ggplot(filter(sumR8full, bcnum.x > 5, bcnum > 5), aes(mean_RD.x, mean_RD, color = condition)) + geom_point(alpha = 0.2) +geom_text(data = corr2, aes(x=x, y=y, label= label), color = "Black", parse = TRUE) +
labs(title = "170927 R8000 codon usage 1 vs codon usage 3 mean RNA/DNA (bc>5)", x = "Mean RNA/DNA (Codon 1)", y = "Mean RNA/DNA (Codon 3)") + scale_x_log10() + scale_y_log10()
g.RDC1C3
corr2 <- data.frame( x = 30, y = 90, label = paste("italic(r)==",round(cor(filter(sumR8full, bcnum.x >5, bcnum >5)$mean_RD.x, filter(sumR8full, bcnum.x > 5, bcnum > 5)$mean_RD), digits = 3)))
g.RDC2C3 <- ggplot(filter(sumR8full, bcnum.y > 5, bcnum > 5), aes(mean_RD.y, mean_RD, color = condition)) + geom_point(alpha = 0.2) +geom_text(data = corr3, aes(x=x, y=y, label= label), color = "Black", parse = TRUE) +
labs(title = "170927 R8000 codon usage 2 vs codon usage 3 mean RNA/DNA (bc>5)", x = "Mean RNA/DNA (Codon 2)", y = "Mean RNA/DNA (Codon 3)") + scale_x_log10() + scale_y_log10()
g.RDC2C3
corr3 <- data.frame( x = 30, y = 90, label = paste("italic(r)==",round(cor(filter(sumR8full, bcnum.y >5, bcnum >5)$mean_RD.y, filter(sumR8full, bcnum.y > 5, bcnum > 5)$mean_RD), digits = 3)))
sumR10comp <- filter(sumR100RD, grepl("O", X_peptide), is.na(X_group), condition == 6)
sumR10comp <- left_join(sumR10comp, mason2, by = c("X_peptide","Y_peptide"))
colrs <- c("c1" = "#FF0000","c2"="#000000")
g.mason <- ggplot(sumR10comp, aes(mean_RD, Tm)) + geom_point(aes(color="c2")) + labs(title = "171003 R8000 O and Drobnak constructs mean RNA/DNA vs reported Tm", x = "Mean RNA/DNA", y = "Reported Tm") + geom_smooth(formula = y ~ log(x), span = 9) +
geom_point(data = sumR10drob, aes(mean_RD, Tm, color = "c1")) + scale_color_manual(name = "Data set", breaks = c("c1","c2"), values = colrs, labels = c("Drobnak","Mason")) + scale_x_log10(limits = c(40, 1001))
g.mason
sumR10drob <- filter(right_join(sumR100RD, drobnak, by = c("X_peptide", "Y_peptide")), !is.na(X_peptide), condition == 6)
g.drob <- ggplot(sumR10drob, aes(mean_RD, Tm)) + geom_point()
g.drob
sumR100RD$X_group[is.na(sumR100RD$X_group) & grepl("mSN$",sumR100RD$X_peptide)] <- "mSN"
sumR100RD$X_group[is.na(sumR100RD$X_group) & grepl("Hb$",sumR100RD$X_peptide)] <- "Hb"
sumR100RD$X_group[is.na(sumR100RD$X_group) & grepl("Hq$",sumR100RD$X_peptide)] <- "Hq"
sumR100RD$X_group[is.na(sumR100RD$X_group) & grepl("bH1$",sumR100RD$X_peptide)] <- "bH1"
sumR100RD$X_group[is.na(sumR100RD$X_group) & grepl("mS$",sumR100RD$X_peptide)] <- "mS"
sumR100RD$X_group[is.na(sumR100RD$X_group) & grepl("A$",sumR100RD$X_peptide)] <- "A"
sumR100RD$X_group[is.na(sumR100RD$X_group) & grepl("SH$",sumR100RD$X_peptide)] <- "SH"
induction_labels <- c("0" = "0 hours induction RNA/DNA", "6" = "6 hours induction RNA/DNA")
os <-filter(sumR100RD, grepl("O",X_peptide))
g.sumR100RD <- ggplot(filter(sumR100RD, grepl("O",X_peptide), bcnum > 5), aes(X_peptide, Y_peptide)) + geom_tile(aes(fill=log(med_RD))) + facet_grid(X_group~condition, labeller = labeller(condition=induction_labels), scales = "free") +
theme(text=element_text(family="Calibri"), strip.text=element_text(size = 10), strip.background=element_rect(fill = "White"), axis.text.x = element_text(angle=90,size =5),
axis.text.y = element_text(size = 5), legend.title=element_text(size = 8), legend.title.align=0.5, legend.text=element_text(size =8)) +
scale_fill_viridis(option="viridis", name = "Med \nRNA to DNA ", na.value="white") + labs(title="Roman's 8000 RNA/DNA median barcode ratio O constructs (bc >5)", x="X peptide", y = "Y peptide")
g.sumR100RD
g.bccount <- ggplot(R100_full_DF, aes(count)) + geom_histogram() + labs(x = "Number of reads", y = "number of barcodes", title = "170525 R100-2-2 Hiseq Barcode counts") + scale_x_continuous(limits = c(0, 1000))
g.bccount
R100_raw_map <- full_join(R100_raw_map, R100_DNA_0, by ="barcode")
R100_raw_map$count.x[is.na(R100_raw_map$count.x)] <- 1
R100_raw_map$count.y[is.na(R100_raw_map$count.y)] <- 1
g.mapcount <- ggplot(R100_raw_map, aes(count.y, count.x)) + geom_point(alpha = 0.1) + scale_x_log10() + scale_y_log10() + labs(x = "Barcode-seq DNA barcode counts", y = "Mapping run barcode counts", title = "170526 comparison of barcode counts >=0")
g.mapcount
################################################### To compare against previously published data ###############################################
mason <- read.csv("mason-measured-Tm-both.csv", header = TRUE)
mason_names <- c(paste("O",1:16, sep = ""))
colnames(mason) <- mason_names
rownames(mason) <- mason_names
mason <- mason[2:17]
mason <- data.frame(rownames(mason),mason)
mason2 <- gather(mason, rownames.mason.)
colnames(mason2) <- c("X_peptide","Y_peptide","Tm")
drobnak <- read.csv("Drobnak-JACS-Tm.csv", header = TRUE)
colnames(drobnak2) <- c("X_peptide","Y_peptide","Tm","bcipa_Tm")
drobnak2 <- select(drobnak, Y_peptide,X_peptide,Tm,bcipa_Tm)
drobnak <- rbind(drobnak,drobnak2)
|
18ab2705485dd9924bf70ec488746b7676b9eee3
|
21fd652b847972a38051ef89e25854cb0623d8d4
|
/libs/is_p_star.r
|
e7e1b3c200741e9d701dcd5f4b56350e4d66d5b9
|
[] |
no_license
|
douglask3/VCF_vs_sites
|
be73e6b3cd99ed06357b5214f7f23bb64a570fde
|
71b358a8f21fd6f338df71b97a1a04efb5463709
|
refs/heads/master
| 2021-11-26T03:30:03.582583
| 2021-11-12T16:44:05
| 2021-11-12T16:44:05
| 219,031,403
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 192
|
r
|
is_p_star.r
|
is_p_star <- function(P) {
if (P > 0.1) out = ' '
else if (P > 0.05) out = '.'
else if (P > 0.01) out = '*'
else if (P > 0.001) out = '**'
else out = '***'
out
}
|
97615a9b2583c6b28723a0a914c73b487a5673ec
|
36813b0527adde7f6e483df15d52b448a66a530b
|
/neural_tune.R
|
56d1cbd4608ccc41be351eaa7ceba6fcfe27b0fb
|
[] |
no_license
|
shay-lebovitz/301-3_Regression
|
18fa68a133e9cba72c87cdbd228c16894740c1c2
|
5617798be94b7f6cdec8f233cb26b2020416e706
|
refs/heads/main
| 2023-05-13T10:21:59.974448
| 2021-05-28T18:37:40
| 2021-05-28T18:37:40
| 362,631,635
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 721
|
r
|
neural_tune.R
|
# Load package(s) ----
library(tidyverse)
library(tidymodels)
# load required objects ----
load('data/setup.rda')
# Define model ----
slnn_model <-
mlp(mode = 'regression', hidden_units = tune('hidden_units'),
penalty = tune('penalty')) %>%
set_engine('nnet', MaxNWts = 1500)
# Parameters
slnn_params <- parameters(slnn_model)
# set-up tuning grid ----
slnn_grid <- grid_regular(slnn_params, levels = 5)
# workflow ----
slnn_workflow <- workflow() %>%
add_model(slnn_model) %>%
add_recipe(rec)
# Tuning/fitting ----
slnn_tune <- slnn_workflow %>%
tune_grid(resamples = train_folds, grid = slnn_grid)
# Write out results & workflow
save(slnn_tune, slnn_workflow, file = 'data/slnn_tune.rda')
|
fa02cbd1b8d8fcc4d94ce6b47717d7f6c302b9b5
|
3a0f710ff92138256bf7fbc16849462b1244542a
|
/plot3.R
|
d7c9461a29c2089d257a4fa86a981e7048bfe991
|
[] |
no_license
|
gabrielaolivou/ExData_Plotting1
|
e19cf6f7a732beca2175aa00933f379f8f829de4
|
196d8cf6a72cb17ad054c74b8935e4da6a6abfa5
|
refs/heads/master
| 2020-04-02T05:05:53.494235
| 2018-10-22T02:59:08
| 2018-10-22T02:59:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,186
|
r
|
plot3.R
|
#Set Working Directory
setwd("C:/Users/Gabriela Olivo/Desktop/Coursera/Exploratory Data Analysis/Week 1/Project Course")
#Read Data
HPC_data <- read.table("./data/household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
# Filter Dates
HPC_data2 <- HPC_data[HPC_data$Date %in% c("1/2/2007","2/2/2007") ,]
#Format Date-Time
HPC_datetime <- strptime(paste(HPC_data2$Date, HPC_data2$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
HPC_data2 <- cbind(HPC_data2,HPC_datetime)
#Format Data Variable
HPC_data2$subMetering1 <- as.numeric(HPC_data2$Sub_metering_1)
HPC_data2$subMetering2 <- as.numeric(HPC_data2$Sub_metering_2)
HPC_data2$subMetering3 <- as.numeric(HPC_data2$Sub_metering_3)
#Plot 3
plot(HPC_data2$Sub_metering_1~HPC_data2$HPC_datetime, type="l", xlab="", ylab="Energy sub metering")
lines(HPC_data2$Sub_metering_2~HPC_data2$HPC_datetime, col="red")
lines(HPC_data2$Sub_metering_3~HPC_data2$HPC_datetime, col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=1, col=c("black", "red", "blue"), cex=0.65)
#Save as PNG
dev.copy(png, file = "plot3.png", height = 480, width = 480)
dev.off()
|
824c255f1428ea5c6d60935a5c625a1c23567e8a
|
6a192ede793c1aa1c63c5dc388d87f704d4c8f02
|
/Unit6 Clustering/stocks.R
|
3443cc45773bc3de79157fd8d79a0b5664b5e793
|
[] |
no_license
|
jpalbino/-AnalyticsEdgeMITx
|
454341f8e69cc615ab7b8286ba9bbee9df66026d
|
9d6b152f5ac2ca9728e5363face0ba0e50da879c
|
refs/heads/master
| 2021-01-18T08:12:17.814468
| 2019-11-05T21:33:18
| 2019-11-05T21:33:18
| 57,457,758
| 0
| 0
| null | 2016-04-30T19:14:06
| 2016-04-30T19:14:05
| null |
UTF-8
|
R
| false
| false
| 15,551
|
r
|
stocks.R
|
#
# The Analytics Edge
# Unit6 Clustering
# PREDICTING STOCK RETURNS WITH CLUSTER-THEN-PREDICT
# Calin Uioreanu
#
#In the second lecture sequence this week, we heard about cluster-then-predict, a methodology in which you first cluster observations and then build cluster-specific prediction models. In the lecture sequence, we saw how this methodology helped improve the prediction of heart attack risk. In this assignment, we'll use cluster-then-predict to predict future stock prices using historical stock data.
#
#When selecting which stocks to invest in, investors seek to obtain good future returns. In this problem, we will first use clustering to identify clusters of stocks that have similar returns over time. Then, we'll use logistic regression to predict whether or not the stocks will have positive future returns.
#
#For this problem, we'll use StocksCluster.csv, which contains monthly stock returns from the NASDAQ stock exchange. The NASDAQ is the second-largest stock exchange in the world, and it lists many technology companies. The stock price data used in this problem was obtained from infochimps, a website providing access to many datasets.
#
#Each observation in the dataset is the monthly returns of a particular company in a particular year. The years included are 2000-2009. The companies are limited to tickers that were listed on the exchange for the entire period 2000-2009, and whose stock price never fell below $1. So, for example, one observation is for Yahoo in 2000, and another observation is for Yahoo in 2001. Our goal will be to predict whether or not the stock return in December will be positive, using the stock returns for the first 11 months of the year.
#
#This dataset contains the following variables:
#
#ReturnJan = the return for the company's stock during January (in the year of the observation).
#ReturnFeb = the return for the company's stock during February (in the year of the observation).
#ReturnMar = the return for the company's stock during March (in the year of the observation).
#ReturnApr = the return for the company's stock during April (in the year of the observation).
#ReturnMay = the return for the company's stock during May (in the year of the observation).
#ReturnJune = the return for the company's stock during June (in the year of the observation).
#ReturnJuly = the return for the company's stock during July (in the year of the observation).
#ReturnAug = the return for the company's stock during August (in the year of the observation).
#ReturnSep = the return for the company's stock during September (in the year of the observation).
#ReturnOct = the return for the company's stock during October (in the year of the observation).
#ReturnNov = the return for the company's stock during November (in the year of the observation).
#PositiveDec = whether or not the company's stock had a positive return in December (in the year of the observation). This variable takes value 1 if the return was positive, and value 0 if the return was not positive.
#For the first 11 variables, the value stored is a proportional change in stock value during that month. For instance, a value of 0.05 means the stock increased in value 5% during the month, while a value of -0.02 means the stock decreased in value 2% during the month.
rm(list=ls())
gc()
stocks = read.csv('StocksCluster.csv');
str(stocks)
#'data.frame': 11580 obs. of 12 variables:
# $ ReturnJan : num 0.0807 -0.0107 0.0477 -0.074 -0.031 ...
# $ ReturnFeb : num 0.0663 0.1021 0.036 -0.0482 -0.2127 ...
# $ ReturnMar : num 0.0329 0.1455 0.0397 0.0182 0.0915 ...
# $ ReturnApr : num 0.1831 -0.0844 -0.1624 -0.0247 0.1893 ...
# $ ReturnMay : num 0.13033 -0.3273 -0.14743 -0.00604 -0.15385 ...
# $ ReturnJune : num -0.0176 -0.3593 0.0486 -0.0253 -0.1061 ...
# $ ReturnJuly : num -0.0205 -0.0253 -0.1354 -0.094 0.3553 ...
# $ ReturnAug : num 0.0247 0.2113 0.0334 0.0953 0.0568 ...
# $ ReturnSep : num -0.0204 -0.58 0 0.0567 0.0336 ...
# $ ReturnOct : num -0.1733 -0.2671 0.0917 -0.0963 0.0363 ...
# $ ReturnNov : num -0.0254 -0.1512 -0.0596 -0.0405 -0.0853 ...
# $ PositiveDec: int 0 0 0 1 1 1 1 0 0 0 ...
boxplot(stocks)
prop.table(table(stocks$PositiveDec))
# 0 1
#0.453886 0.546114
#What is the maximum correlation between any two return variables in the dataset?
cor(stocks)
ReturnJan ReturnFeb ReturnMar ReturnApr ReturnMay ReturnJune ReturnJuly ReturnAug ReturnSep ReturnOct ReturnNov PositiveDec
ReturnJan 1.00000000 0.0667746 -0.09049680 -0.03767801 -0.04441142 0.0922383 -0.081429765 -0.022792019 -0.026437153 0.1429772 0.0676323 0.00472852
ReturnFeb 0.06677458 1.0000000 -0.15598326 -0.19135192 -0.09552092 0.1699945 -0.061778509 0.131559786 0.043501771 -0.0873243 -0.1546583 -0.03817318
ReturnMar -0.09049680 -0.1559833 1.00000000 0.00972629 -0.00389279 -0.0859055 0.003374160 -0.022005400 0.076518327 -0.0119238 0.0373235 0.02240866
ReturnApr -0.03767801 -0.1913519 0.00972629 1.00000000 0.06382250 -0.0110278 0.080631932 -0.051756051 -0.028920972 0.0485400 0.0317618 0.09435353
ReturnMay -0.04441142 -0.0955209 -0.00389279 0.06382250 1.00000000 -0.0210745 0.090850264 -0.033125658 0.021962862 0.0171667 0.0480466 0.05820193
ReturnJune 0.09223831 0.1699945 -0.08590549 -0.01102775 -0.02107454 1.0000000 -0.029152600 0.010710526 0.044747269 -0.0226360 -0.0652705 0.02340975
ReturnJuly -0.08142976 -0.0617785 0.00337416 0.08063193 0.09085026 -0.0291526 1.000000000 0.000713756 0.068947804 -0.0547089 -0.0483738 0.07436421
ReturnAug -0.02279202 0.1315598 -0.02200540 -0.05175605 -0.03312566 0.0107105 0.000713756 1.000000000 0.000740714 -0.0755946 -0.1164890 0.00416697
ReturnSep -0.02643715 0.0435018 0.07651833 -0.02892097 0.02196286 0.0447473 0.068947804 0.000740714 1.000000000 -0.0580792 -0.0197198 0.04163029
ReturnOct 0.14297723 -0.0873243 -0.01192376 0.04854003 0.01716673 -0.0226360 -0.054708909 -0.075594561 -0.058079236 1.0000000 0.1916728 -0.05257496
ReturnNov 0.06763233 -0.1546583 0.03732353 0.03176184 0.04804659 -0.0652705 -0.048373837 -0.116489034 -0.019719800 0.1916728 1.0000000 -0.06234656
PositiveDec 0.00472852 -0.0381732 0.02240866 0.09435353 0.05820193 0.0234097 0.074364210 0.004166966 0.041630286 -0.0525750 -0.0623466 1.00000000
# ReturnNov + ReturnOct
#Which month (from January through November) has the largest mean return across all observations in the dataset?
colMeans(stocks)
# ReturnJan ReturnFeb ReturnMar ReturnApr ReturnMay ReturnJune ReturnJuly ReturnAug ReturnSep ReturnOct ReturnNov PositiveDec
# 0.01263160 -0.00760478 0.01940234 0.02630815 0.02473659 0.00593790 0.00305086 0.01619826 -0.01472077 0.00565084 0.01138744 0.54611399
#INITIAL LOGISTIC REGRESSION MODEL
# split the data into a training set and testing set, 70% in the training set and 30% in the testing set
library(caTools)
set.seed(144)
spl = sample.split(stocks$PositiveDec, SplitRatio = 0.7)
stocksTrain = subset(stocks, spl == TRUE)
stocksTest = subset(stocks, spl == FALSE)
# train a logistic regression model
StocksModel = glm(PositiveDec ~ ., data=stocksTrain, family=binomial)
# overall accuracy on the training set, using a threshold of 0.5
table(stocksTrain$PositiveDec, predict(StocksModel, type="response")>0.5)
# FALSE TRUE
# 0 990 2689
# 1 787 3640
(3640+990)/nrow(stocksTrain)
# 0.5711818 -> not great
# overall accuracy on the testing set, using a threshold of 0.5
table(stocksTest$PositiveDec, predict(StocksModel, newdata=stocksTest, type="response")>0.5)
# FALSE TRUE
# 0 417 1160
# 1 344 1553
(1553+417)/nrow(stocksTest)
# 0.5670697
# accuracy on the test set of a baseline model that always predicts the most common outcome (PositiveDec = 1)
1897/nrow(stocksTest)
#[1] 0.5460564
#CLUSTERING STOCKS
limitedTrain = stocksTrain
limitedTrain$PositiveDec = NULL
limitedTest = stocksTest
limitedTest$PositiveDec = NULL
#In cluster-then-predict, our final goal is to predict the dependent variable, which is unknown to us at the time of prediction. Therefore, if we need to know the outcome value to perform the clustering, the methodology is no longer useful for prediction of an unknown outcome value.
#
#This is an important point that is sometimes mistakenly overlooked. If you use the outcome value to cluster, you might conclude your method strongly outperforms a non-clustering alternative. However, this is because it is using the outcome to determine the clusters, which is not valid.
#preProcess command from the caret package, which normalizes variables by subtracting by the mean and dividing by the standard deviation.
library(caret)
preproc = preProcess(limitedTrain)
normTrain = predict(preproc, limitedTrain)
normTest = predict(preproc, limitedTest)
colMeans(normTrain)
# ReturnJan ReturnFeb ReturnMar ReturnApr ReturnMay ReturnJune ReturnJuly ReturnAug ReturnSep
# 1.330682e-17 -1.007214e-17 -8.431792e-18 -1.460048e-19 -1.000259e-18 -7.332770e-18 3.549057e-18 2.078051e-17 -6.781814e-18
# ReturnOct ReturnNov
#-5.161583e-18 -6.497723e-18
colMeans(normTest)
# ReturnJan ReturnFeb ReturnMar ReturnApr ReturnMay ReturnJune ReturnJuly ReturnAug ReturnSep
#-0.0004185886 -0.0038621679 0.0058299150 -0.0363806373 0.0265120925 0.0431544402 0.0060164183 -0.0497332436 0.0293887872
# ReturnOct ReturnNov
# 0.0296723768 0.0171281833
# Why is the mean ReturnJan variable much closer to 0 in normTrain than in normTest?
summary(normTrain$ReturnJan)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
#-4.57700 -0.48270 -0.07055 0.00000 0.35900 18.06000
summary(normTest$ReturnJan)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
#-3.744000 -0.485700 -0.066860 -0.000419 0.357700 8.413000
#From mean(stocksTrain$ReturnJan) and mean(stocksTest$ReturnJan), we see that the average return in January is slightly higher in the training set than in the testing set. Since normTest was constructed by subtracting by the mean ReturnJan value from the training set, this explains why the mean value of ReturnJan is slightly negative in normTest.
# CLUSTERING STOCKS
k = 3
set.seed(144)
km = kmeans(normTrain, centers = k)
table(km$cluster)
# 1 2 3
#3157 4696 253
# we can use the flexclust package to obtain training set and testing set cluster assignments for our observations
library(flexclust)
km.kcca = as.kcca(km, normTrain)
clusterTrain = predict(km.kcca)
clusterTest = predict(km.kcca, newdata=normTest)
table(clusterTrain)
#clusterTrain
# 1 2 3
#3157 4696 253
table(clusterTest)
#clusterTest
# 1 2 3
#1298 2080 96
# Using the subset function, build data frames stocksTrain1, stocksTrain2, and stocksTrain3, containing the elements in the stocksTrain data frame assigned to clusters 1, 2, and 3, respectively (be careful to take subsets of stocksTrain, not of normTrain). Similarly build stocksTest1, stocksTest2, and stocksTest3 from the stocksTest data frame.
stocksTrain1 = subset(stocksTrain, clusterTrain == 1)
stocksTrain2 = subset(stocksTrain, clusterTrain == 2)
stocksTrain3 = subset(stocksTrain, clusterTrain == 3)
mean(stocksTrain1$PositiveDec)
#[1] 0.6024707
mean(stocksTrain2$PositiveDec)
#[1] 0.5140545
mean(stocksTrain3$PositiveDec)
#[1] 0.4387352
#CLUSTER-SPECIFIC PREDICTIONS
#Build logistic regression models StocksModel1, StocksModel2, and StocksModel3, which predict PositiveDec using all the other variables as independent variables. StocksModel1 should be trained on stocksTrain1, StocksModel2 should be trained on stocksTrain2, and StocksModel3 should be trained on stocksTrain3.
#
#Which variables have a positive sign for the coefficient in at least one of StocksModel1, StocksModel2, and StocksModel3 and a negative sign for the coefficient in at least one of StocksModel1, StocksModel2, and StocksModel3? Select all that apply.
StocksModel1 = glm(PositiveDec ~ ., data=stocksTrain1, family="binomial")
StocksModel2 = glm(PositiveDec ~ ., data=stocksTrain2, family="binomial")
StocksModel3 = glm(PositiveDec ~ ., data=stocksTrain3, family="binomial")
coefficients(StocksModel1)
#(Intercept) ReturnJan ReturnFeb ReturnMar ReturnApr ReturnMay ReturnJune ReturnJuly ReturnAug ReturnSep ReturnOct ReturnNov
# 0.17223985 0.02498357 -0.37207369 0.59554957 1.19047752 0.30420906 -0.01165375 0.19769226 0.51272941 0.58832685 -1.02253506 -0.74847186
coefficients(StocksModel2)
#(Intercept) ReturnJan ReturnFeb ReturnMar ReturnApr ReturnMay ReturnJune ReturnJuly ReturnAug ReturnSep ReturnOct ReturnNov
# 0.1029318 0.8845148 0.3176221 -0.3797811 0.4929105 0.8965492 1.5008787 0.7831487 -0.2448602 0.7368522 -0.2775631 -0.7874737
coefficients(StocksModel3)
# (Intercept) ReturnJan ReturnFeb ReturnMar ReturnApr ReturnMay ReturnJune ReturnJuly ReturnAug ReturnSep ReturnOct ReturnNov
#-0.181895809 -0.009789345 -0.046883260 0.674179495 1.281466189 0.762511555 0.329433917 0.774164370 0.982605385 0.363806823 0.782242086 -0.873752144
stocksTest1 = subset(stocksTest, clusterTest == 1)
stocksTest2 = subset(stocksTest, clusterTest == 2)
stocksTest3 = subset(stocksTest, clusterTest == 3)
PredictTest1 = predict(StocksModel1, newdata=stocksTest1, type="response")>0.5
PredictTest2 = predict(StocksModel2, newdata=stocksTest2, type="response")>0.5
PredictTest3 = predict(StocksModel3, newdata=stocksTest3, type="response")>0.5
table(stocksTest1$PositiveDec, PredictTest1)
table(stocksTest2$PositiveDec, PredictTest2)
table(stocksTest3$PositiveDec, PredictTest3)
table(stocksTest1$PositiveDec, predict(StocksModel1, newdata=stocksTest1, type="response")>0.5)
#
# FALSE TRUE
# 0 30 471
# 1 23 774
(30+774)/nrow(stocksTest1)
#0.6194145
table(stocksTest2$PositiveDec, predict(StocksModel2, newdata=stocksTest2, type="response")>0.5)
#
# FALSE TRUE
# 0 388 626
# 1 309 757
(388+757)/nrow(stocksTest2)
#0.5504808
table(stocksTest3$PositiveDec, predict(StocksModel3, newdata=stocksTest3, type="response")>0.5)
#
# FALSE TRUE
# 0 49 13
# 1 21 13
(49+13)/nrow(stocksTest3)
# 0.6458333
#To compute the overall test-set accuracy of the cluster-then-predict approach, we can combine all the test-set predictions into a single vector and all the true outcomes into a single vector:
AllPredictions = c(PredictTest1, PredictTest2, PredictTest3)
AllOutcomes = c(stocksTest1$PositiveDec, stocksTest2$PositiveDec, stocksTest3$PositiveDec)
#What is the overall test-set accuracy of the cluster-then-predict approach, again using a threshold of 0.5?
table(AllPredictions, AllOutcomes)
# AllOutcomes
#AllPredictions 0 1
# FALSE 467 353
# TRUE 1110 1544
#> (467+1544)/(467+353+1110+1544)
#[1] 0.5788716
#We see a modest improvement over the original logistic regression model. Since predicting stock returns is a notoriously hard problem, this is a good increase in accuracy. By investing in stocks for which we are more confident that they will have positive returns (by selecting the ones with higher predicted probabilities), this cluster-then-predict model can give us an edge over the original logistic regression model.
|
842c23bfa41725f7e3e762206ca54ca5e01b8136
|
9f57e0ad44b78d809c262fa0ffb659232fdb8d5e
|
/implementations/histogram.R
|
7a62600695efd3170262edad691c704a478bd9b9
|
[] |
no_license
|
abhi8893/Intensive-R
|
c3439c177776f63705546c6666960fbc020c47e8
|
e340ad775bf25d5a17435f8ea18300013195e2c7
|
refs/heads/master
| 2020-09-22T00:57:36.118504
| 2020-08-31T09:23:57
| 2020-08-31T09:23:57
| 224,994,015
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 181
|
r
|
histogram.R
|
# Implement a histogram function
# Option to provide variable bin width by specifying a list of ranges
# and the corresponding number of bins
# - as a wrapper over original hist
|
26a8bf609cd25a9241e7bccb7e05f74cc44307b1
|
91f3da7a0175133a73489f16784faac1f7b37251
|
/R-scripts/08_primary_producer_analysis.R
|
57a086739d29b50e6dd5084767d97a8e218379e3
|
[
"MIT"
] |
permissive
|
JoeyBernhardt/fatty-acids
|
7114457388dce867a636567b85ce36f73a1628f8
|
d81cc65fb9bb952f3f953f6a00ff83d1ef4b18a4
|
refs/heads/master
| 2021-07-25T08:05:03.635526
| 2020-10-11T15:28:43
| 2020-10-11T15:28:43
| 224,154,297
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,344
|
r
|
08_primary_producer_analysis.R
|
#### This script contains code used to answer the question: Do DHA, EPA and ALA concentrations in primary producers vary by ecosystem type?
#### last updated by JB October 8 2020, adapted from code written by Ryan Shipley (any mistakes are JB's fault!)
library(betareg)
library(tidyverse)
library(plotrix)
library(cowplot)
theme_set(theme_cowplot())
### The producer data here contain one observation per genus, per ecosystem, which is a genus-level average across studies, treatments etc.
producers <- read_csv("data-processed/fa-producers-finest-taxonomic-resolution-october.csv") %>%
filter(!is.na(mean_concentration))
View(producers)
# DHA ---------------------------------------------------------------------
### Are DHA concentrations different among ecosystem types?
dha <- producers %>%
filter(fa == "dha") %>%
mutate(mean_concentration = mean_concentration / 100)
dha_n <- length((dha$mean_concentration))
dha2 <- dha %>%
mutate(transformed_concentration = ((mean_concentration*(dha_n - 1)) + 0.5) / dha_n)
mod <- betareg(formula = transformed_concentration ~ ecosystem, data = dha2, link = "logit")
summary(mod)
#### look at the raw data
dha2 %>%
group_by(ecosystem) %>%
summarise_each(funs(mean, std.error), mean_concentration) %>%
ggplot(aes(x = ecosystem, y = mean)) +
geom_pointrange(aes(x = ecosystem, ymin = mean - std.error, ymax = mean + std.error))
library(emmeans)
library(multcompView)
library(lsmeans)
library(multcomp)
marginal <- emmeans(mod, ~ ecosystem)
pairwise_comps <- pairs(marginal, adjust="tukey") ### don't get the same results as Ryan :(
# EPA ---------------------------------------------------------------------
### Are epa concentrations different among ecosystem types?
epa <- producers %>%
filter(fa == "epa") %>%
mutate(mean_concentration = mean_concentration / 100)
epa_n <- length((epa$mean_concentration))
epa2 <- epa %>%
mutate(transformed_concentration = ((mean_concentration*(epa_n - 1)) + 0.5) / epa_n)
mod <- betareg(formula = transformed_concentration ~ ecosystem, data = epa2, link = "logit")
summary(mod)
epa2 %>%
group_by(ecosystem) %>%
summarise_each(funs(mean, std.error), mean_concentration) %>%
ggplot(aes(x = ecosystem, y = mean)) +
geom_pointrange(aes(x = ecosystem, ymin = mean - std.error, ymax = mean + std.error))
marginal <- emmeans(mod, ~ ecosystem)
pairwise_comps <- pairs(marginal, adjust="tukey") ### don't get the same results as Ryan :(
# ALA ---------------------------------------------------------------------
### Are ala concentrations different among ecosystem types?
ala <- producers %>%
filter(fa == "ala") %>%
mutate(mean_concentration = mean_concentration / 100)
View(ala)
ala_n <- length((ala$mean_concentration))
ala2 <- ala %>%
mutate(transformed_concentration = ((mean_concentration*(ala_n - 1)) + 0.5) / ala_n)
mod <- betareg(formula = transformed_concentration ~ ecosystem, data = ala2, link = "logit")
summary(mod)
ala2 %>%
group_by(ecosystem) %>%
summarise_each(funs(mean, std.error), mean_concentration) %>%
ggplot(aes(x = ecosystem, y = mean)) +
geom_pointrange(aes(x = ecosystem, ymin = mean - std.error, ymax = mean + std.error))
marginal <- emmeans(mod, ~ ecosystem)
pairwise_comps <- pairs(marginal, adjust="tukey") ### don't get the same results as Ryan :(
|
72412e5656460719ee069b6d402e3151a82ed6b6
|
f18c938eca55595667cb039ebdb577de72e1fe3f
|
/plot4.R
|
bfa6904a439667115be13d23b9bf6c868600dee1
|
[] |
no_license
|
srajiv007/ExData_Plotting1
|
0406d4214451b2048b9f51e7c0ba41f84f6ac0b8
|
435899216f54d082d6612792b6c633fb1dc9ba01
|
refs/heads/master
| 2021-01-17T22:23:14.833309
| 2015-11-08T13:44:44
| 2015-11-08T13:44:44
| 45,744,005
| 0
| 0
| null | 2015-11-07T16:22:43
| 2015-11-07T16:22:43
| null |
UTF-8
|
R
| false
| false
| 2,683
|
r
|
plot4.R
|
# Step 1 : Read the data from file into a data.frame
power_data<-read.csv("data/household_power_consumption.txt", header=TRUE, sep=";")
# Step 2: Define the filter dates
start_dt<-as.Date("2007-02-01", format = "%Y-%m-%d")
end_dt<-as.Date("2007-02-02", format = "%Y-%m-%d")
library(datasets)
# Step 3: Transform power data to include new DateTime column & transform existing character Date to class "Date"
power_data <- transform(power_data, DateTime=as.POSIXct(paste0(Date, " ", Time), format="%d/%m/%Y %H:%M:%S"), Date=as.Date(Date, format = "%d/%m/%Y"))
# Step 4: Define the filter criteria vector, filtering on the transformed Date feature
filter_criteria <- power_data$Date >= start_dt & power_data$Date <=end_dt
# Step 5: subset power data using criteria vector
filter_data <- power_data[filter_criteria, ]
# Step 6: Transform filtered data frame's Global_active_power, Global_reactive_power, Voltage and Sub metering features to numeric type
filter_data <- transform(filter_data, Global_active_power=as.numeric(as.character(Global_active_power)),
Global_reactive_power=as.numeric(as.character(Global_reactive_power)),
Voltage=as.numeric(as.character(Voltage)))
filter_data <- transform(filter_data, Sub_metering_1=as.numeric(as.character(Sub_metering_1)),
Sub_metering_2=as.numeric(as.character(Sub_metering_2)),
Sub_metering_3=as.numeric(as.character(Sub_metering_3)))
# Step 7: Generate plots using png device
png(filename = "plot4.png", height = 480, width = 480)
par(mfrow=c(2,2))
# Plot 1: Global_active_power vs. DateTime
with(filter_data, plot(DateTime, Global_active_power, type="l", main = "", xlab="", ylab = "Global Active Power", col = "black"))
# Plot 2: Voltage vs. DateTime
with(filter_data, plot(DateTime, Voltage, type="l", main = "", xlab="datetime", ylab = "Voltage", col = "black"))
# Plot 3: Sub metering vs. DateTime
with(filter_data, plot(DateTime, Sub_metering_1, main = "", xlab="", ylab="Energy sub metering", type = "l", col="black"))
with(filter_data, lines(DateTime, Sub_metering_2, main = "", xlab="", ylab="", type = "l", col=506))
with(filter_data, lines(DateTime, Sub_metering_3, main = "", xlab="", ylab="", type = "l", col="blue"))
legend("topright", bty="n", col=c("black", 506, "blue"), lty="solid", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#Plot 4: Global_reactive_power vs. DateTime
with(filter_data, plot(DateTime, Global_reactive_power, type="l", main = "", xlab="datetime", ylab = "Global_reactive_power", col = "black"))
dev.off()
|
e99d448c7ce2a3a68ceafa50bcf2311645dc266e
|
aaaa3f260cb68632dd104c5333478c9617101946
|
/R/rpart.r
|
89bb10656b955fd66458baa5da7f63456562d011
|
[] |
no_license
|
Molmed/emil
|
957dd213f1ea1f56f9c346c265db0b1be04373a1
|
570e26f7e5d47aba292f884ff2fc810c4a26fbe7
|
refs/heads/master
| 2021-01-17T13:42:30.778308
| 2017-10-25T06:07:41
| 2017-10-25T06:07:41
| 19,534,115
| 3
| 0
| null | 2019-12-20T15:17:19
| 2014-05-07T12:45:14
|
R
|
UTF-8
|
R
| false
| false
| 1,141
|
r
|
rpart.r
|
#' Fit a decision tree
#'
#' @param x Data set (features).
#' @param y Response.
#' @param ... Sent to \code{\link{rpart}}.
#' @return A fitted decision tree.
#' @author Christofer \enc{Bäcklin}{Backlin}
#' @export
fit_rpart <- function(x, y, ...){
nice_require("rpart", "is needed to fit decision trees")
model <- if(inherits(y, "formula")){
rpart::rpart(formula = y, data = x, ...)
} else {
rpart::rpart(formula = y ~ ., data = x, ...)
}
model$y <- y
model
}
#' Predict using a fitted decision tree
#'
#' @param object Fitted decision tree.
#' @param x New data whose response is to be predicted.
#' @return Predictions. The exact form depends on the type of application
#' (classification or regression)
#' @author Christofer \enc{Bäcklin}{Backlin}
#' @export
predict_rpart <- function(object, x){
if(is.factor(object$y)){
# Classification
list(prediction = predict(object, x, type="class"),
probability = as.data.frame(predict(object, x, type="prob")))
} else {
# Regression
list(prediction = predict(object, x, type="vector"))
}
}
|
b39f7f62ccbaa4d997620d4d831ee534c7012bb7
|
712c71892a6edd61227e2c0c58bbc1e9b43893e4
|
/man/copyProject.Rd
|
4b5bdcadf0bd569d2aea512641b3e76069c5be3c
|
[] |
no_license
|
gelfondjal/adapr
|
130a6f665d85cdfae7730196ee57ba0a3aab9c22
|
b85114afea2ba5b70201eef955e33ca9ac2f9258
|
refs/heads/master
| 2021-01-24T10:20:14.982698
| 2020-01-28T22:56:18
| 2020-01-28T22:56:18
| 50,005,270
| 33
| 3
| null | 2018-10-18T16:09:57
| 2016-01-20T04:48:49
|
R
|
UTF-8
|
R
| false
| true
| 878
|
rd
|
copyProject.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/copyProject.R
\name{copyProject}
\alias{copyProject}
\title{Copies project to a different directory}
\usage{
copyProject(
project.id = getProject(),
targetDirectory = getProjectPublishPath(project.id),
dataCopy = TRUE,
programsOnly = FALSE,
speedCopy = TRUE
)
}
\arguments{
\item{project.id}{character string name of project}
\item{targetDirectory}{character string specifying which directory to copy to}
\item{dataCopy}{logicial specifying whether to copy data}
\item{programsOnly}{logical indicating to copy only program folder}
\item{speedCopy}{logical indicating to not copy if file name, mod time, and file size are identical in target directory.}
}
\value{
data frame
}
\description{
Copies project to a different directory
}
\details{
Defaults to copy to the publish directory
}
|
039e91c47954e386a5036d9007c4ba8fb5059cc7
|
90c8e94716f2456a8621b62cc0e798add3daf9da
|
/Data_Analysis_Project_Panel.R
|
6834e15bda989a8517a5519fbed25e812e6c8e88
|
[] |
no_license
|
alexandernt/Data-Analysis-Final-Project-
|
a7912174ca67b4426f51db58e48920640b2b88d8
|
fcc58019f343662979a0a349fcb39aaf88d6a568
|
refs/heads/master
| 2023-05-14T00:53:45.474123
| 2021-06-08T20:47:44
| 2021-06-08T20:47:44
| 372,255,498
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,536
|
r
|
Data_Analysis_Project_Panel.R
|
##Starting project.
##Installing packages & libraries
install.packages("tidyverse")
install.packages("here")
install.packages("ggplot2")
install.packages("skimr")
install.packages("janitor")
install.packages("readxl")
install.packages("rmarkdown")
library(tidyverse)
library(here)
library(ggplot2)
library(skimr)
library(janitor)
library(readxl)
library(rmarkdown)
## Rmarkdown
library(rmarkdown)
render("Test.Rmd")
##Importing first data set
dailiy_Activity <- read_excel("dailiy_Activity.xlsx")
##Cleaning data
da <- clean_names(dailiy_Activity)
##The day has 1440 minutes, according to the data sets, if u sum the sedentary minutes, plus active minutes (light, very and fairly)
## plus the time in bed, u will have the 1440 minutes
##Analyzing data
head(da)
da_selected <- da %>%
select(total_steps,
total_distance,
very_active_distance,
moderately_active_distance,
light_active_distance,
very_active_minutes,lightly_active_minutes,
fairly_active_minutes,
calories) %>%
summary()
##Graphs
##We can see a trend in light distance, is bigger than the other ones. This means that people use to do more exercise in a lightly way
ggplot(data = da) +
geom_point(mapping = aes(x = very_active_distance, y = total_distance), color = "red") +
labs(title = "Total Distance VS Very Active Distance")
ggplot(data = da) +
geom_point(mapping = aes(x = moderately_active_distance, y = total_distance), color = "green") +
labs(title = "Lightly Distance VS Very Active Distance")
ggplot(data = da) +
geom_point(mapping = aes(x = light_active_distance, y = total_distance), color = "purple") +
labs(title = "Lightly Distance VS Very Active Distance")
##Conclusion:
##If we check these 3 graphs, we will see a trend that people used to do more exercise distances in a lightly way.
##Active distances are the shortest.
##But in the nexts graphs, we can see the trend that when you spend more time doing active exercise than light, more calories you burn
ggplot(data = da, aes(x = calories, y = total_activity_minutes)) +
geom_point(aes(alpha = lightly_active_minutes), color = "red") +
geom_point(aes(alpha = very_active_minutes), color = "blue") +
geom_point(aes(alpha = fairly_active_minutes), color = "green") +
theme(axis.title.x = element_text()) +
labs(x = "Calories", y = "Total activity (mins)") +
annotate("text", x = 680, y = 530, label = "Red = Lightly active mins", color = "red") +
annotate("text", x = 645, y = 500, label = "Blue = Very active mins", color = "blue") +
annotate("text", x = 705, y = 470, label = "Green = Fairly active mins", color = "green") +
theme(legend.position='none')
##Conclusion: doing very active exercises will burn more calories. We can found a balance if you do a moderately exercise,
##and finally if you do a light exercise you will burn less calories.
##We can see, that if you do light activity u need more time to burn calories. But if you do an extensive activity, u need less time.
ggplot(data = da) +
geom_point(mapping = aes(x = calories, y = total_activity_minutes)) +
geom_smooth(mapping = aes (x = calories, y = very_active_minutes), color = "blue") +
geom_smooth(mapping = aes (x = calories, y = lightly_active_minutes), color = "red") +
labs(x = "Calories", y = "Total activity (mins)") +
theme(axis.title.x = element_text(face = "bold"), axis.title.y = element_text(face = "bold")) +
annotate("text", x = 500, y = 530, label = "Blue = Very active mins", color = "blue") +
annotate("text", x = 550, y = 500, label = "Red = Lightly active mins", color = "red")
##Calories vs total minutes // Total minutes = sedentary + active
##We can see that human spend more time in a sedentary way than an active way
ggplot(data = da) +
geom_point(mapping = aes(x = calories, y = total_minutes)) +
geom_smooth(mapping = aes (x = calories, y = sedentary_minutes), color = "red") +
geom_smooth(mapping = aes (x = calories, y = total_activity_minutes), color = "yellow") +
labs(x = "Calories", y = "Total minutes") +
annotate("text", x = 3500, y = 100, label = "Red = Sedentary active mins", color = "blue") +
annotate("text", x = 3435, y = 40, label = "Yellow = Total active mins", color = "blue")
##We've done a couple of trends on this sheet, so now we are going to merge it with other ones and keep searching for new trends.
##Importing data set
sleep_Day <- read_excel("sleep_Day.xlsx")
#Only for future datasets merges & analysis, I will change the column named as SleepDay for activity_date
##2) Cleaning the data
sl <- clean_names(sleep_Day) %>%
select(-id_len)
sld <- rename(sl, activity_date = sleep_day)
##Analyzing data
head(sld)
sld %>%
select(hour_and_minutes_asleep,
hours_and_minutos_in_bed,
total_minutes_asleep,
total_time_in_bed) %>%
summary()
##4) Graphs
ggplot(data = sld) +
geom_point(mapping = aes(x = total_time_in_bed, y = total_minutes_asleep), color = "purple") +
labs(title = "Minutes asleep vs in bed")
##We are going to merge with Inner Join, this means only the information that match, so we can analyze the data daily activity with the sleep days.
## da = daily Activity dataframe; sl = sleep Day dataframe
da_sl <- merge(da, sld, by = c("id","activity_date"))
glimpse(da_sl)
##I want to chech how many people sleep 8 or more hours per day from my dataset.
filter_8hours <- da_sl %>%
filter(total_minutes_asleep >= 480)
View(filter_8hours)
##Conclusion: I had 413 rows in the data set and 117 people slept more than 8 hours.
filter_less8hourse <- da_sl %>%
filter(total_minutes_asleep < 480)
View(filter_less8hourse)
##Conclusion_ From 413 rows data, 296 slept less than 8 hours.
##Analyzing data
filter_less8hourse %>%
summary()
filter_8hours %>%
summary()
##fl8 = filter_less8hours
select_fl8 <- filter_less8hourse %>%
select(total_steps,
total_distance,
very_active_distance,
moderately_active_distance,
light_active_distance,
very_active_minutes,
fairly_active_minutes,
lightly_active_minutes,
total_activity_minutes,
sedentary_minutes,
calories,
total_minutes_asleep,
total_time_in_bed)
View(select_fl8)
##8h = 8hours
select_8h <- filter_8hours %>%
select(
total_steps,
total_distance,
very_active_distance,
moderately_active_distance,
light_active_distance,
very_active_minutes,
fairly_active_minutes,
lightly_active_minutes,
total_activity_minutes,
sedentary_minutes,
calories,
total_minutes_asleep,
total_time_in_bed)
View(select_8h)
## Random size so we can compare means values in both data sets.
random_selectfl8 <- select_fl8 %>%
sample_n(117, replace = FALSE)
View(random_selectfl8)
##I will merge both 117 rows data sets, creating a new one only of 234
merge_8h_fl8 <- merge(select_8h, random_selectfl8, all = TRUE)
View(merge_8h_fl8)
eight_hours <- merge_8h_fl8 %>%
mutate(eight_hours_or_not = if_else(total_minutes_asleep >= 480, 'TRUE', 'FALSE'))
View(eight_hours)
##Courious data:
##8 >= hours means:
##Total time in bed: 583,2
##Total minues asleep: 543
##Calories: 2284
##Total activity minutes: 251,9
##Sedentary minutes: 610
##Graphs:
##Total steps vs total time in bed
ggplot(data = eight_hours) +
geom_point(mapping = aes(x = total_activity_minutes, y = sedentary_minutes)) +
facet_wrap(~eight_hours_or_not) +
theme(legend.position='bottom')
ggplot(data = eight_hours) +
geom_point(mapping = aes(x = very_active_distance, y = light_active_distance)) +
facet_wrap(~eight_hours_or_not) +
theme(legend.position='bottom')
ggplot(data = eight_hours) +
geom_point(mapping = aes(x = total_distance, y = total_steps, color = light_active_distance)) +
facet_wrap(~eight_hours_or_not) +
theme(legend.position='bottom')
ggplot(data = eight_hours, aes(x = total_distance, y = calories)) +
geom_smooth(mapping = aes(x = very_active_distance, y = calories), color = "red") +
theme(legend.position='none')
##We are going to see the relation between the wake up minutes vs calories and which trend has with sedentary and active column
ggplot(data = eight_hours, aes(x = sedentary_minutes, y = calories)) +
geom_point(aes(alpha = total_minutes_asleep)) +
facet_wrap(~eight_hours_or_not)
ggplot(data = da_sl, aes(x = total_activity_minutes, y = calories)) +
geom_point(aes(alpha = total_minutes_asleep))
##Now, I am going to import the weight_Info data set, clean it, analyze it, graph some trends and then merge with other data sets.
##1) Importing data set
weight_Log_Info <- read_excel("weight_Log_Info.xlsx")
##2) Cleaning and analyzing
## weight = weight_Log_Info
head(weight_Log_Info)
colnames(weight_Log_Info)
##Columns did not have names
weight_Log_Info %>%
rename(activity_date = ...3) %>%
rename(x = ...2)
weightt <- weight_Log_Info %>%
clean_names() %>%
rename(activity_date = x3)
weight <- weightt %>%
select(id,
activity_date,
weight_kg,
fat,
bmi,)
View(weight)
##Now, we are going to merge with da_Sl
da_sl_we <- merge(da_sl, weight, by = c("id", "activity_date"))
View(da_sl_we)
## Graphs
ggplot(data = da_sl_we) +
geom_point(mapping = aes(x = sedentary_minutes, y = total_activity_minutes, color = weight_kg))
ggplot(data = da_sl_we) +
geom_point(mapping = aes(x = sedentary_minutes, y = total_distance, color = weight_kg))
## There is no big difference betwee the minutes in bed, wake up and the weight
ggplot(data = da_sl_we) +
geom_point(mapping = aes(x = total_minutes_asleep, y = total_time_in_bed,color = weight_kg))
## We can see a trend when weight increment, the bmi also increment.
ggplot(data = da_sl_we) +
geom_line(mapping = aes(x = weight_kg, y = bmi, ), color = "blue") +
labs(title = "Bmi vs Weight")
##I want to analyze the data sets per hour and merge them for trends findings
##We are going to import data sets, clean it, analyze it and graph trends.
hourly_Intensities <- read_excel("hourly_Intensities.xlsx")
hourly_Calories <- read_excel("hourly_Calories.xlsx")
hourly_Steps <- read_excel("hourly_Steps.xlsx")
##Variables for understanding
## hi =hourly_Itennsities
## hc = hourly_Calories
## hs = hourly_Steps
##The time was inconsistent in data sets, so with excel I clean it, so all the columns have the same format in date & time.
##2) Cleaning
hi <- hourly_Intensities %>%
clean_names()
hc <- hourly_Calories %>%
clean_names()
hs <- hourly_Steps %>%
clean_names()
##3) Merging data
hi_hc <- merge(hi, hc, by = c("id", "activity_date"))
View(hi_hc)
hi_hc_hs <- merge(hi_hc, hs, by = c("id", "activity_date"))
View(hi_hc_hs)
##I could find that in these merges were duplicates, so we will fix it.
hi_hc_hs_nodup <- hi_hc_hs %>%
distinct(id, activity_date, .keep_all = TRUE)
View(hi_hc_hs_nodup)
##4) Graphs
ggplot(data = hi_hc_hs_nodup) +
geom_point(mapping = aes(x= step_total, y = calories)) +
labs(x = "Total Steps", y = "Calories")
ggplot(data = hi_hc_hs_nodup) +
geom_point(mapping = aes(x= step_total, y = calories, color = total_intensity))
ggplot(data = hi_hc_hs_nodup) +
geom_point(mapping = aes(x = average_intensity, y = total_intensity, color = calories))
##I am going to add the heartrate data set and merge it with hc_hi_hs_nodup
##Importing data set
##Cleaning it
hr <- heartrate %>%
select(-Time) %>%
clean_names()
View(hr)
##Analyzing data
head(hr)
## Population size = 1048575
## Confidence level = 95%
## Margin error = 3%
## Sample size = 1067
random_hr <- hr %>%
sample_n(600000, replace = FALSE)
View(random_hr)
##Merge data sets
hi_hc_hs_hr <- merge(hi_hc_hs_nodup, hr, by = c("id", "activity_date"))
View(hi_hc_hs_hr)
## Cleaning dups
hi_hc_hs_hr_nodup <- hi_hc_hs_hr %>%
distinct(id, activity_date, .keep_all = TRUE)
View(hi_hc_hs_hr_nodup)
##Graph data
ggplot(data = hi_hc_hs_hr_nodup) +
geom_point(mapping = aes(x = total_intensity, y = heartrate_value))
##Here, we can see a trend that when more steps u do, more control u have on the heartrate value
ggplot(data = hi_hc_hs_hr_nodup) +
geom_point(mapping = aes(x = heartrate_value, y = calories, color = step_total))
##The cases are almost all good, but u can see some exceptions when the intensity is lower, the calories too and the hearthrate increment.
ggplot(data = hi_hc_hs_hr_nodup) +
geom_point(mapping = aes(x = heartrate_value, y = calories, color = total_intensity))
|
4d134ad69aa76d44be88d723e24183977e069560
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/wordspace/examples/RG65.Rd.R
|
bc462ba047a9e179da18a724509af3310816d6a3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 233
|
r
|
RG65.Rd.R
|
library(wordspace)
### Name: RG65
### Title: Similarity Ratings for 65 Noun Pairs (wordspace)
### Aliases: RG65
### Keywords: datasets
### ** Examples
head(RG65, 10) # least similar pairs
tail(RG65, 10) # most similar pairs
|
f55d47241bccc4ff12bf896b2e5ab91f33c7e16f
|
8603d007f1b000f2acf61ada7e98a7aa8d9c8080
|
/R/Model Effects Plot.R
|
7bccb40110b6155573da7ea3e648fa63da794075
|
[] |
no_license
|
vsluydts/ggregplot
|
942c3b3c5eba8eb31e65870ab4e248e5cd5af228
|
f89ddb3ea5fb1a8fbf66aba923d74dddd6f61572
|
refs/heads/master
| 2020-11-25T21:06:28.980261
| 2019-12-10T03:29:06
| 2019-12-10T03:29:06
| 228,847,877
| 0
| 0
| null | 2019-12-18T13:38:48
| 2019-12-18T13:38:48
| null |
UTF-8
|
R
| false
| false
| 2,252
|
r
|
Model Effects Plot.R
|
# EfxPlotComp
Efxplot <- function(ModelList, Sig = TRUE,
ModelNames = NULL,
VarNames = NULL, VarOrder = NULL,
Intercept = TRUE,
tips = 0.2){
require(dplyr); require(ggplot2); require(INLA); require(MCMCglmm)
graphlist<-list()
if(!class(ModelList)=="list"){
ModelList <- list(ModelList)
}
for(i in 1:length(ModelList)){
model<-ModelList[[i]]
if(class(model)=="inla"){
graph<-as.data.frame(summary(model)$fixed)
colnames(graph)[which(colnames(graph)%in%c("0.025quant","0.975quant"))]<-c("Lower","Upper")
colnames(graph)[which(colnames(graph)%in%c("0.05quant","0.95quant"))]<-c("Lower","Upper")
colnames(graph)[which(colnames(graph)%in%c("mean"))]<-c("Estimate")
}
if(class(model)=="MCMCglmm"){
graph<-as.data.frame(summary(model)$solutions)
colnames(graph)[1:3]<-c("Estimate","Lower","Upper")
}
graph$Model<-i
graph$Factor<-rownames(graph)
graphlist[[i]]<-graph
}
graph<-bind_rows(graphlist)
graph$Sig <- with(graph,ifelse((Lower<0&Upper<0)|(Lower>0&Upper>0),"*",""))
graph$Model <- as.factor(graph$Model)
if(!is.null(ModelNames)){
levels(graph$Model)<-ModelNames
}
position <- ifelse(length(unique(graph$Model)) == 1, "none", "right")
if(is.null(VarOrder)) VarOrder <- rev(unique(graph$Factor))
if(is.null(VarNames)) VarNames <- VarOrder
graph$Factor <- factor(graph$Factor, levels = VarOrder)
levels(graph$Factor) <- VarNames
if(!Intercept){
VarNames <- VarNames[!str_detect(VarNames, "ntercept")]
graph <- graph %>% filter(Factor %in% VarNames)
}
graph$starloc<-NA
min<-min(graph$Lower,na.rm=T)
max<-max(graph$Upper,na.rm=T)
if(Sig==TRUE){
graph$starloc <- max+(max-min)/10
}
ggplot(as.data.frame(graph),aes(x=as.factor(Factor),y=Estimate,colour=Model))+
geom_point(position=position_dodge(w=0.5))+
geom_errorbar(position=position_dodge(w=0.5), aes(ymin = Lower, ymax = Upper), size=0.3, width=tips)+
geom_hline(aes(yintercept=0),lty=2) + labs(x=NULL) + coord_flip() +
theme(legend.position = position) +
geom_text(aes(label = Sig, y = starloc), position = position_dodge(w = 0.5))
}
|
18957949f46ee095f3c4f68e9510f31c98c25540
|
0e6fe4d2f5a8c84aedc0705581acded74e0c67f8
|
/man/solveWt.Rd
|
54cc9e0810808aa966689c574224860037b697b7
|
[] |
no_license
|
cran/iopsych
|
5b90206fe927287549e5b9ddffc60d988a7b8054
|
c23bd432035c025f77c5cc8a81d3eeacbe284404
|
refs/heads/master
| 2016-08-11T15:21:11.371180
| 2016-04-04T13:38:31
| 2016-04-04T13:38:31
| 55,406,585
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,125
|
rd
|
solveWt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regression.R
\name{solveWt}
\alias{solveWt}
\title{Find r given arbitrary predictor weights}
\usage{
solveWt(r_mat, y_col, x_col, wt)
}
\arguments{
\item{r_mat}{A correlation matrix.}
\item{y_col}{A vector of columns representing criterion variables.}
\item{x_col}{A vector of columns representing predictor variables.}
\item{wt}{A vector of predictor weights or a list of multiple vectors.}
}
\value{
The correlation between the weighted predictor composite and criterion.
}
\description{
Find r given arbitrary predictor weights
}
\note{
This uses a simpler, faster version of the same formula used for fuse().
}
\examples{
library(iopsych)
#Get Data
data(dls2007)
r_mat <- dls2007[1:6, 2:7]
#Get weights
unit_wt <- c(1,1,1,1)
other_wt <- c(1,2,1,.5)
wt_list <- list(unit_wt, other_wt)
#Solve
solveWt(r_mat=r_mat, y_col=6, x_col=1:4, wt=unit_wt)
solveWt(r_mat=r_mat, y_col=6, x_col=1:4, wt=other_wt)
solveWt(r_mat=r_mat, y_col=6, x_col=1:4, wt=wt_list)
}
\author{
Allen Goebl and Jeff Jones
}
|
20002b8909c793faea887d95b3e2187c4d5e85a7
|
0f7405b90fade51b2ce5218869513065f8c6ad34
|
/R/ExerciseData.R
|
49795770f9225da763271348220c123ae6754567
|
[] |
no_license
|
anhnguyendepocen/AdvancedRegression
|
4dcaf01554af22c27b51f39094149f6bbdada810
|
5bbd35c8196759dd80bea953a13faa80c8764b99
|
refs/heads/master
| 2022-03-09T23:29:17.615787
| 2019-10-05T14:08:37
| 2019-10-05T14:08:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 502
|
r
|
ExerciseData.R
|
#' Exercise Data
#'
#' Data used for the different exercises
#' @rdname ExerciseData
"BetaExerciseData"
#' @rdname ExerciseData
"GammaExerciseData"
#' @rdname ExerciseData
"HierarchicalExerciseData"
#' @rdname ExerciseData
"LogisticExerciseData"
#' @rdname ExerciseData
"LongitudinalNormalExerciseData"
#' @rdname ExerciseData
"LongitudinalPoissonExerciseData"
#' @rdname ExerciseData
"NormalExerciseData"
#' @rdname ExerciseData
"PoissonExerciseData"
#' @rdname ExerciseData
"ZIPExerciseData"
|
401274226118fd7f8e8ee65c76a7a072ff01d0db
|
673d4c7e8f3510b650ed21014c9235ba790d97dd
|
/man/likchisq.Rd
|
4946f0b925787cca312bedb499342d209b302700
|
[] |
no_license
|
BenBarnard/likelihoodExplore
|
1ce6f64e314f68834255bc4f13cab6b04b81755d
|
e16bdc37a8758c232cfc249b217f8b6e14f95768
|
refs/heads/master
| 2021-04-09T15:42:59.071771
| 2019-11-30T21:27:19
| 2019-11-30T21:27:19
| 60,886,619
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 882
|
rd
|
likchisq.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Chi-Squared.R
\name{likchisq}
\alias{likchisq}
\title{Chi-Squared Log Likelihood Function}
\usage{
likchisq(df, ncp = 0, x, log = FALSE)
}
\arguments{
\item{df}{degrees of freedom (non-negative, but can be non-integer).}
\item{ncp}{non-centrality parameter (non-negative).}
\item{x}{vector of quantiles.}
\item{log}{logical; if TRUE, probabilities p are given as log(p).}
}
\value{
A numeric scalar for the log likelihood of the chi-squared density
given the data where df and ncp can be held constant or if vector were given
vector will be returned.
}
\description{
The log likelihood of a chi-squared density with data, x, df and ncp
parameters.
}
\details{
The log likelihood is the log of a function of parameters given the data.
}
\examples{
likchisq(x = rchisq(n = 2, df = 4),
df = 4)
}
|
655a02038df1e32a5e717a9d71c673766de062b9
|
cf6e3baeac2f1a51776589f720b854193d180bdd
|
/man/perm_distribution.Rd
|
fecc693eb0e5f60f1f911159bae28f669e99e2d9
|
[] |
no_license
|
b-brune/robTests
|
fd29e83cde06e1ca74b9fa1777635844b805d466
|
e5da4923a66a1f291a9a601182564670b66c65eb
|
refs/heads/master
| 2021-11-25T17:10:02.707361
| 2018-12-20T11:06:12
| 2018-12-20T11:06:12
| 162,573,392
| 0
| 0
| null | 2021-04-20T15:23:53
| 2018-12-20T11:56:21
|
R
|
UTF-8
|
R
| false
| true
| 1,131
|
rd
|
perm_distribution.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/permDistribution.R
\name{perm_distribution}
\alias{perm_distribution}
\title{Permutation Distribution}
\usage{
perm_distribution(x, y, type, sampled = FALSE, n.rep = NULL)
}
\arguments{
\item{x}{numeric vector of observations.}
\item{y}{numeric vector of observations.}
\item{type}{desired test statistic, in \code{"D1S1"} (default), \code{"D2S1", "D2S1", "D2S2", "D3S3", "D3S4"},
where "D1" to "D3" mark different estimators for the location shift and
"S1" to "S4" are estimates for the scale of the samples; defaults to \code{"D1S1"}.}
\item{sampled}{logical value indicating whether all permutations should be
considered or only a random sample.}
\item{n.rep}{integer specifying the sample size in case of taking a random
sample to approximate the permutation distribution.}
}
\value{
Vector with permutation distribution.
}
\description{
\code{perm_distribution()} calculates the permutation distribution for
several test statistics.
}
\details{
see documentation of \code{rob_perm_statistic()} for a description of
the \code{type}-parameter
}
|
0e18f80d80ed69041e566514fa19b2c33cf2648b
|
8b8cea28523d21ccc1b5227c604f85a621b13dce
|
/Intuitive-RNA-Seq-Web/about.R
|
d560879cd3f666763892551a1484155e0590c269
|
[] |
no_license
|
ThomasKAtkins/intuitive-RNA-seq
|
ca5d4f73f3c793e0e4cdf146964b63561fe681ef
|
424663df2a46013a36cf2eb78b42662fee6bbc5c
|
refs/heads/main
| 2023-06-11T08:40:55.814057
| 2021-06-26T01:46:50
| 2021-06-26T01:46:50
| 380,347,381
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 209
|
r
|
about.R
|
about <- tabPanel("About",
h1("About"),
p("Intuitive RNA-Seq is a project developed for the 2021 IQBIO REU hackathon by Ashmika Behere, Stanton Li, Miguel Hernandez, and Thomas Atkins.")
)
|
a5999536d64bbe92a3e3b444ab9f0824df01f2ba
|
c4115cb5af53941e43684ccc5964f7102013e257
|
/R/get_gho.R
|
872610abd351227aa6c37875466e33451cfd0766
|
[] |
no_license
|
cran/rgho
|
f816ec332701770edbfbb225c119f75b9864d03b
|
ccb04061bc0ce8a336acfe1dcf68315d72678d9c
|
refs/heads/master
| 2023-08-29T11:58:23.535882
| 2023-08-09T12:10:02
| 2023-08-09T13:30:34
| 54,256,363
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,791
|
r
|
get_gho.R
|
#' GET a GHO URL
#'
#' @param url the url to retrieve, given as a character
#' string.
#'
#' @return A `ODataQuery` object.
#' @keywords internal
get_gho <- function(url = getOption("rgho.baseurl")) {
ODataQuery$new(url)
}
#' @rdname man-gho
#' @export
get_gho_dimensions <- function() {
resp <- get_gho()$path("Dimension")
build_gho(resp)
}
#' @rdname man-gho
#' @export
#' @description
#' `r lifecycle::badge("deprecated")`
#'
#' This function was deprecated in favor of get_gho_values
#' @keywords internal
#'
get_gho_codes <- function(...){
lifecycle::deprecate_soft("3.0.0", "get_gho_codes()", "get_gho_values()")
get_gho_values(...)
}
#' @rdname man-gho
#' @export
get_gho_values <- function(dimension = "GHO") {
dims <- get_gho_dimensions()
return_if_message(dims)
stopifnot(
dimension %in% dims$Code
)
vals <- get_gho()$path(sprintf("DIMENSION/%s/DimensionValues", dimension))
res <- build_gho(vals)
structure(res[c("Code", "Title")],
url = attr(res, "url"))
}
#' @rdname man-gho
#' @export
get_gho_indicators <- function(filter = NULL) {
resp <- get_gho()$path("Indicator")
table <- if (!is.null(filter)){
build_gho(resp$filter(list_to_filter(filter)))
} else {
build_gho(resp)
}
url <- attr(table, "url")
structure(table, class = c("gho", class(table)),
url = url)
}
#' Check and Build a gho Object
#'
#' @param x A ODataQuery object
#'
#' @return A \code{gho} object.
#' @keywords internal
#'
build_gho <- function(x){
w <- graceful_fail(x$url, config = c(
httr::user_agent("https://github.com/aphp/rgho/")
))
return_if_message(w, display = TRUE, n = 2)
ret <- x$retrieve()
structure(ret$value,
class = c("gho", "data.frame"),
url = ret$`@odata.context`)
}
|
f74e39e83118929a7b5857166743eee92868f2eb
|
77d4c68d919779238599353c2bea4b1056b59f27
|
/ExampleRunThrough/3_RunCompass_Example.R
|
bb7db5907e5c20f4b34821b8fc5521b7cee56206
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
jqliuh/flowHelpers
|
8dfaedfbf94c600292aded335edf817d4736700c
|
c8f3f715fd1bebc1d539ee63ff6e4fb744e840ec
|
refs/heads/master
| 2020-04-06T13:09:36.052480
| 2018-10-02T18:45:20
| 2018-10-02T18:45:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,655
|
r
|
3_RunCompass_Example.R
|
#!/usr/bin/env Rscript
library(flowHelpers)
library(flowWorkspace)
library(here)
gsPath <- "/home/malisa/uw/COMPASS/20170331_TB-ICS_ACS/GatingSetList4COMPASS"
seed <- 1
mapMarkers <- list("CD154", "IFNg", "IL4", "TNFa", "IL22", "IL17a", "IL2")
individuals <- "PATIENT ID"
uniqueruns <- "Peptide"
# Run COMPASS for CD8+
mapNodesCD8 <- c("8+/154+", "8+/IFNg+", "8+/IL4+", "8+/TNFa+", "8+/IL22+", "8+/IL17+", "8+/IL2+")
outdir <- here::here("ExampleRunThrough/output/CompassOutput/CD8")
node <- "8+"
nodeMarkerMapCD8 <- mapMarkers
names(nodeMarkerMapCD8) <- mapNodesCD8
generic.compass.wrapper(path=gsPath,
seed=seed,
outdir=outdir,
cnode=node,
nodemarkermap=nodeMarkerMapCD8,
individuals=individuals,
grouping="QuantiferonInTubeResult",
uniqueruns=uniqueruns,
countFilterThreshold=0)
# Run COMPASS for CD4+
mapNodesCD4 <- c("4+/154+", "4+/IFNg+", "4+/IL4+", "4+/TNFa+", "4+/IL22+", "4+/IL17+", "4+/IL2+")
outdir <- here::here("ExampleRunThrough/output/CompassOutput/CD4")
node <- "4+"
nodeMarkerMapCD4 <- mapMarkers
names(nodeMarkerMapCD4) <- mapNodesCD4
generic.compass.wrapper(path=gsPath,
seed=seed,
outdir=outdir,
cnode=node,
nodemarkermap=nodeMarkerMapCD4,
individuals=individuals,
grouping="QuantiferonInTubeResult",
uniqueruns=uniqueruns,
countFilterThreshold=0)
|
1173fcd8717136af1ba73b3e6b4d17ac664bc071
|
dcdce563dc89b47cea2bcfee8ee608c291fdc2e9
|
/R/plot.msc.R
|
23cf7306263cfaa5386791a61e38a197230c4ece
|
[] |
no_license
|
cran/CombMSC
|
cb4412f580013286fb28a69303cc8077be926476
|
b17252d5f1924a5a7122fb20c6b9e307b41c80cc
|
refs/heads/master
| 2021-01-22T05:15:50.935459
| 2019-01-04T17:23:10
| 2019-01-04T17:23:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,405
|
r
|
plot.msc.R
|
`plot.msc` <-
function(x, sumfun.Index = "def", fancy = FALSE, grayscale = FALSE, draw.Grid = TRUE, ...){
# 3-D plotting function. Used in plot.msc only when the "fancy" option is TRUE and
# the number of MSCs is 3, which allows this particular type of display
showResults <- function(fram, resp, maintitle = "", obj){
require(rgl)
x = unique(as.vector(fram[,1]))
y = unique(as.vector(fram[,2]))
ht <- x %o% y
for(i in 1:length(x)){
for(j in 1:length(y)){
ifelse(x[i] + y[j] > 1, ht[i,j] <-0, ht[i,j] <- resp[which(fram[,1]==x[i] & fram[,2]==y[j])])
}}
ht <- ht * (100/max(resp))
rgl.open()
rgl.clear("all")
rgl.bg(sphere = TRUE, color = c("black", "white"), lit = FALSE, size=.5, alpha=0.25,bg=lines)
rgl.light()
rgl.bbox()
rgl.surface(100*x,100*y,ht,color="blue",alpha=.95,shininess=128,tick=FALSE)
ind = which.min(resp)
rgl.spheres(100*fram[,1][ind],(100/max(resp))*resp[ind],100*fram[,2][ind],color="red",radius=2,
specular="red",texmipmap=T, texminfilter="linear.mipmap.linear")
axes3d(labels = c(names(obj$msc.List)[1], names(obj$msc.List)[2], "Rank"), tick=FALSE, ntick=0, color="green")
title3d(xlab = names(obj$msc.List)[1], zlab = names(obj$msc.List)[2], main = maintitle, color = "green")
rgl.viewpoint(theta = 225, phi = 30)
}
if(fancy && length(x$msc.List)==3){
if(sumfun.Index == "all"){
for(i in 1:length(x$summary.Functions))showResults(fram= x$Sum.Stats,
resp = x$Sum.Stats[,names(x$summary.Function)[i]],
maintitle = names(x$summary.Functions)[i], obj = x)}
else {if(sumfun.Index=="def")sumfun.Index = 1
showResults(fram= x$Sum.Stats, resp = x$Sum.Stats[,names(x$summary.Function)[sumfun.Index]],
maintitle = names(x$summary.Functions)[sumfun.Index], obj = x)}}
else{
if(sumfun.Index == "all"){
for(i in 1:length(x$summary.Functions)){
win.graph()
print(plot(x, sumfun.Index=i))
}
}
else{
if(sumfun.Index == "def"){
if(length(x$summary.Functions) < 6) plot(x, 1)
else{
print(plot(x, sumfun.Index=1, grayscale = grayscale, draw.Grid = draw.Grid),
split = c(1, 1, 3, 2), more=TRUE)
print(plot(x, sumfun.Index=2, grayscale = grayscale, draw.Grid = draw.Grid),
split = c(2, 1, 3, 2), more=TRUE)
print(plot(x, sumfun.Index=3, grayscale = grayscale, draw.Grid = draw.Grid),
split = c(3, 1, 3, 2), more=TRUE)
print(plot(x, sumfun.Index=4, grayscale = grayscale, draw.Grid = draw.Grid),
split = c(1, 2, 3, 2), more=TRUE)
print(plot(x, sumfun.Index=5, grayscale = grayscale, draw.Grid = draw.Grid),
split = c(2, 2, 3, 2), more=TRUE)
print(plot(x, sumfun.Index=6, grayscale = grayscale, draw.Grid = draw.Grid),
split = c(3, 2, 3, 2), more=FALSE)
}
}
else{
require(lattice)
obj = x
sumfun = x$summary.Functions[sumfun.Index]
fram <- x$Sum.Stats
num.Mscs <- length(x$msc.List)
if(num.Mscs==1) stop ("Something's wrong... you should have more than one MSC!")
if(num.Mscs==2) {out <- xyplot(fram[,names(sumfun)] ~ fram[,1], main = paste(
names(sumfun)), xlab = names(x$msc.List)[1],
ylab = names(sumfun))
}
if(num.Mscs==3) {
mypanel <- function(x,y,z, ...){
panel.fill(col="black")
if(draw.Grid)panel.grid(col.line = "blue", h = 4, v = 4)
panel.levelplot(x, y, z, background = list(alpha = 1, col = "blue"), ...)
}
if(!grayscale){
out <- levelplot(fram[,names(sumfun)] ~ fram[[1]]
+ fram[[2]], xlab = paste(names(x$msc.List)[1], "Weight"),
ylab= paste(names(x$msc.List)[2], "Weight"), zlab = names(sumfun), auto.key=TRUE
, main = paste(names(sumfun)), bg="black", panel=mypanel,
axis = function(side, ...) {
axis.default(side, ...)
if (side == "bottom")
panel.text(0, 0, lab = names(obj$msc.List)[[3]], adj = c(1, 1))})
}
else out <- levelplot(fram[,names(sumfun)] ~ fram[[1]]
+ fram[[2]], xlab = paste(names(x$msc.List)[1], "Weight"),
ylab= paste(names(x$msc.List)[2], "Weight"), zlab = names(sumfun), auto.key=TRUE
, main = paste(names(sumfun)), col.regions = gray(1:100/100),
axis = function(side, ...) {
axis.default(side, ...)
if (side == "bottom")
panel.text(0, 0, lab = names(obj$msc.List)[[3]], adj = c(1, 1))})
}
if(num.Mscs==4) {
ints = (1/9)*cbind(0:8, 1:9)
dim(ints) <- c(9,2)
Name <- shingle(fram[[3]], intervals=ints)
out <- (levelplot(fram[[names(sumfun)]] ~ fram[[1]] * fram[[2]]
| Name, xlab = names(x$msc.List)[1],
ylab= names(x$msc.List)[2], zlab= names(sumfun), auto.key=TRUE,
strip=strip.custom(var.name=names(fram)[3], shingle.intervals=ints,
strip.levels=c(FALSE, FALSE)), region=TRUE, main = paste(names(sumfun))))
out
}
if(num.Mscs>4) {print("No default method available."); return(NULL)}
out
}
}
}
}
|
cad672d98cae33c5ec82c1764885f243b7e6e238
|
89d219d3dfa744543c6ba1c1b3a99e4dcabb1442
|
/man/monthNumber.Rd
|
19da299fb02dfe9aa15565e0113e780baf618afc
|
[] |
no_license
|
pteetor/tutils
|
e2eb5d2fba238cbfe37bf3c16b90df9fa76004bb
|
fe9b936d8981f5cb9b275850908ef08adeffef4e
|
refs/heads/master
| 2022-06-17T11:34:30.590173
| 2022-06-14T02:02:15
| 2022-06-14T02:02:15
| 77,761,145
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 892
|
rd
|
monthNumber.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/monthFunctions.R
\name{monthNumber}
\alias{monthNumber}
\alias{monthNumber.default}
\alias{monthNumber.integer}
\alias{monthNumber.numeric}
\alias{monthNumber.character}
\alias{monthNumber.Date}
\alias{monthNumber.yearmon}
\title{Convert month in character format to integer (1, ..., 12)}
\usage{
monthNumber(x, ...)
\method{monthNumber}{default}(x)
\method{monthNumber}{integer}(x)
\method{monthNumber}{numeric}(x)
\method{monthNumber}{character}(s)
\method{monthNumber}{Date}(d)
\method{monthNumber}{yearmon}(d)
}
\arguments{
\item{x}{Can be a number (1, ..., 12), name ("jan", "feb", ...),
futures month code ("F", "G", "H", ...),
Date, or yearmon.
Case is ignored for character arguments.}
}
\value{
Month number (1L, ..., 12L)
}
\description{
Convert month in character format to integer (1, ..., 12)
}
|
9e26cb43fd3bd3aebd991f2870ccdbebe09a2bea
|
0edd7703d14f0e27dad2fdc10d4d6f67ffe9f8e5
|
/man/infmat_bes.Rd
|
ac2e385cb0edaa300ce1f71be8b3092c19a9ffc4
|
[] |
no_license
|
cran/bbreg
|
e9fb514f07831a0f86c061e8d15c3c0b1b9e0ff6
|
cb22d2d4a7c66545152fc6019035ee563b95a280
|
refs/heads/master
| 2022-02-19T21:23:03.434884
| 2022-02-14T07:00:05
| 2022-02-14T07:00:05
| 249,200,976
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,335
|
rd
|
infmat_bes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EM_Bes.R
\name{infmat_bes}
\alias{infmat_bes}
\title{infmat_bes}
\usage{
infmat_bes(theta, z, x, v, link.mean, link.precision, information = FALSE)
}
\arguments{
\item{theta}{vector of parameters (all coefficients: kappa and lambda).}
\item{z}{response vector with 0 < z_i < 1.}
\item{x}{matrix containing the covariates for the mean submodel. Each column is a different covariate.}
\item{v}{matrix containing the covariates for the precision submodel. Each column is a different covariate.}
\item{link.mean}{a string containing the link function for the mean.
The possible link functions for the mean are "logit","probit", "cauchit", "cloglog".}
\item{link.precision}{a string containing the link function the precision parameter.
The possible link functions for the precision parameter are "identity", "log", "sqrt", "inverse".}
\item{information}{optionally, a logical parameter indicating whether the Fisher's information matrix should be returned}
}
\value{
Vector of standard errors or Fisher's information matrix if the parameter 'information' is set to TRUE.
}
\description{
Function to compute standard errors based on the Fisher information matrix for the bessel regression.
This function can also provide the Fisher's information matrix.
}
|
fb1e022ef6c41856de545548a5778c1622a9212b
|
7d277b1c06f5d15d191b52736a4ad5f47473a07e
|
/appSourceEmail.R
|
4219d022b190880e438be1ad249ca9def932f080
|
[
"MIT"
] |
permissive
|
OwnYourData/app-relationship
|
a9092374014f39ef28e779aab0e2d17095550007
|
5c65cb622281dc4ea42aec5512d1fb73439072c4
|
refs/heads/master
| 2021-06-22T00:51:52.918742
| 2017-06-04T20:56:18
| 2017-06-04T20:56:18
| 58,932,204
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,287
|
r
|
appSourceEmail.R
|
# ui for setting up emails
# last update: 2017-03-06
appSourceEmail <- function(){
tabPanel('Email', br(),
fluidRow(
column(2,
img(src='email.png',width='100px', style='margin: 35px;')),
column(10,
helpText('Trage hier die Namen und Emailadressen der beiden Personen ein, die den Beziehungstracker nutzen möchten.'),
textInput('name1', 'Name von Person #1:'),
textInput('email1', 'Emailadresse für Person #1:'),
hr(),
textInput('name2', 'Name von Person #2:'),
textInput('email2', 'Emailadresse für Person #2:'),
actionButton('saveRelationEmail', 'Speichern',
icon('save')),
actionButton('cancelRelationEmail',
'Emailversand beenden',
icon('trash')),
br(), br(), uiOutput('relationEmailStatus')
)
)
)
}
|
d6dd0f2d6110812ac752ce4c4670dbf77f38bb7b
|
8156558b6b34ea0966959f59ed4df3cefbe66775
|
/S2_AnomalySaving.R
|
8bb764f26b7f915c4b8cb9ade326d6e6a6ca57a7
|
[] |
no_license
|
earthlybimo/SpatialDampedAnomalyPersistence-SDAP
|
400b5ad888fedcf7b96bec1152f56a7af5fe2783
|
8d65986560453b815daab7cdb311f8e68728f9e2
|
refs/heads/main
| 2023-04-11T01:47:24.821108
| 2021-10-07T13:59:16
| 2021-10-07T13:59:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,902
|
r
|
S2_AnomalySaving.R
|
#!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
### This script uses the Climatological SIP and Initial condition for each date to find the SIP anomalies. It is designed to be run as part of a batch process, with system args giving 1. Hemisphere, 2. Month no. and 3. Year. If only 2 syst args are given, all years from 1989 to 2021 are used. Currently it is established to only initialise at the start of each month, but this can be easily changed.
## First, to detect arguments !
if(length(args)==0)
stop ("Did not get an argument in !? Try re-running with 1. Hemisphere, 2. (optional) Monthno. (1:12) and 3. (optional) year")
ccc=as.integer(args[1]) #Choice of Hemisphere
DOYlist=c(1,32,61,92,122,153,183,214,245,275,306,336)
if(length(args)>1) {
mch=as.integer(args[2])
temp=DOYlist #default is all months
if((mch>=1)&(mch<=12)) temp=DOYlist[mch] #Will be just a single month now
DOYlist=temp
remove(temp,mch)}
Ylist=1989:2021
if(length(args)==3) {
temp=as.integer(args[3])
if((temp>=1989)&(temp<=2022)) Ylist=temp #Will be just a single year now
remove(temp)}
if(ccc == 1) HEM="nh"
if(ccc== 2) HEM="sh"
#Same as in every script:
MASTERPATH="~/WORK/Data/SDAP/"
HEMPATH=paste0(MASTERPATH,"/",HEM)
# Datapath1=paste0(MASTERPATH,"/OSI450/osisaf.met.no") #OSI 450 Not needed anymore
# Datapath2=paste0(MASTERPATH,"/OSI430b/osisaf.met.no")#OSI 430b
require(ncdf4)
require(spheRlab)
require(tictoc)
library(parallel) #Certain functions can also be parallelised to speed process, but optional
#Declaring some functions beforehand for ease of usage
binarise <-function (somearr,dlevel) { #Function to binarise some given array based on this level
ll=dim(somearr)
if (is.null(ll)){ll=length(somearr)} #because 1d arrays dont do
ibinar=array(dim =ll)
ibinar[somearr[]>=dlevel]=1
ibinar[somearr[]<dlevel]=0
return (ibinar)
}
findclosestpointindf <-function (plat,plon,df) { #Function to find nearest point within a dataframe for any given point
gcdistances=sl.gc.dist(c(plon,df$lon),c(plat,df$lat),sequential = FALSE)
return (which.min(gcdistances))
}
anomalyatIE <-function (cnt,sip_clim) { #Function to find anomalies (from sip_clim) at initial IceEdge contour (cnt)
for(i in 1:length(cnt[[1]]$segments)){ #Now i is each segment of the edge
ICprob=vector()
for(j in 1:length(cnt[[1]]$segments[[i]]$lat)){ #For each point in this segment
#This has to do with how contours are depicted in spheRlab. Essentially, they are somewhere between actual grid points, and the ratio of distance is also given.
edgepts=cnt[[1]]$segments[[i]]$edge.endpoints[j,]
edgesic=sip_clim[edgepts]
wgtoffirst=cnt[[1]]$segments[[i]]$edge.relpos[j]
sipforpt=(edgesic[[1]]*(1-wgtoffirst))+(edgesic[[2]]*(wgtoffirst)) #For properly weighing the climatologies at both points
ICprob[j]=sipforpt
remove(edgepts,edgesic,wgtoffirst,sipforpt)
}
cnt[[1]]$segments[[i]]$ICprobabofnearpt=ICprob
remove(ICprob)
}
return (cnt)
}
#Load grid
gridFilename=paste0(HEMPATH,"/Outputs/gridfile_OSISAF450",HEM,"_all_inclFlags")
load(gridFilename,envir = (gridenv=new.env()));grd=gridenv$grd
lon=grd$lon;lat=grd$lat;grlen=length(lon)
remove(gridFilename)
# yrs=2001; # yodi=1 #For testing
for(yrs in Ylist){
for(yodi in DOYlist){# Or for initialising at all days, 1:366
Climatologyfile=Sys.glob(sprintf("%s/Outputs/Climatology/Filtered_Climatologyfor%d*_yod%03d",HEMPATH,yrs,yodi))
if(length(Climatologyfile)==0) {warning(paste("Climafile not found for year ",yrs," day ",yodi,sep = ""));next()}
load(Climatologyfile)
savename=sprintf("%s/Outputs/savedSIP/SIPinheritedetcfor%s",HEMPATH,ICdate)
if(file.exists(savename)) next() #If this file already exists, don't have to redo it.
tic("One initial step")
SIPclima=SIPclimaGFilt #Using the gaussian filtered version. ``````````````````
err=FALSE
tryCatch({ #Some files give errors with the contour making so wrapping them here
climMedcnt=sl.contours(var = SIPclima, elem=grd$elem,lon = grd$lon,lat=grd$lat,levels = c(0.5),return.edge.info = TRUE) #Make median contour
ICcnt0=sl.contours(var = ICsic, elem=grd$elem,lon = grd$lon,lat=grd$lat,levels = c(0.15),return.edge.info = TRUE) #0.15 cntr from initcondtn
},
error=function(error_message){
message("Apparently there was an error, in one of the contour making steps:::")
message(error_message)
message(paste("---For date: ",ICdate,sep=""))
#next() This next doesnt work because of the way trycatch works
err<<-TRUE #So we have to do this 'hack'. Also we have to use '<<-' and not '=' for this to work!
}
) #End of tryCatch
if(err==TRUE) next()
## Here, we are using a shortcut and simply taking the CLIM_sip and comparing it later in the forecasting phase, rather than taking the anomaly against median (aka CLIM - 50%) and compare against 50% later. In effect, the result is the same.
#First inherit climaSIP to Iniital contour
ICcnt=anomalyatIE(ICcnt0,SIPclima) #climatological SIP of nearest grid along each contour pnt is added
ICcnt_df <- ldply(ICcnt[[1]]$segments,data.frame) #Make this into a dataframe (necessary later)
## Second pass to median contour
cliMedncnt_df <- ldply(climMedcnt[[1]]$segments,data.frame)
#Make the CLIMMED contour into a dataframe, for easier dist measurements
climMed_inheritedSIP=array(dim=length(cliMedncnt_df$lat))
for(i in 1:length(cliMedncnt_df$lat)){ #For every point of the CLIM-MED cntur
nearpt=findclosestpointindf(cliMedncnt_df$lat[[i]],cliMedncnt_df$lon[[i]],ICcnt_df)
climMed_inheritedSIP[i]=ICcnt_df$ICprobabofnearpt[[nearpt]]}
## Then pass to all points on the grid: (note that we are only using ocean points)
inheritedSIP=array(dim=grlen)
for(i in 1:grlen){ #For each gridpoint
nearpt=findclosestpointindf(lat[[i]],lon[[i]],cliMedncnt_df)
inheritedSIP[i]=climMed_inheritedSIP[nearpt]
}
## Last, do some local corrections based on initial forecast (Initial state correction)
sicICbin=binarise(ICsic,0.15) #Only ice or no ice in the initial date
correctedSIP=inheritedSIP
eps=0.05 #eps=Tolerance for correction down below.
for(i in 1:grlen){ #For each grid point
#If initial ice, and SIPclima is NOT >= SIPlimt, then that means NOT ice fcst, so need to correct
if((sicICbin[i]==1)&(SIPclima[i]<correctedSIP[i])) correctedSIP[i]=SIPclima[i]-eps
if((sicICbin[i]==1)&(SIPclima[i]<correctedSIP[i])+eps) correctedSIP[i]=correctedSIP[i]-eps
#If NO ice, and SIPclima is>= SIPlimt, then that means ice fcst, so need to correct
if((sicICbin[i]==0)&(SIPclima[i]>=correctedSIP[i])) correctedSIP[i]=SIPclima[i]+eps
if((sicICbin[i]==0)&(SIPclima[i]>=correctedSIP[i]-eps)) correctedSIP[i]=correctedSIP[i]+eps
}
#For checking, forecast for day1 is:
# fcst=SIPinheritedI*0;fcst[SIPclima>SIPcorrected]=1;
# Can check mismatch with which(fcst>sicICbin) and which(fcst<sicICbin)
## Saving!
SIPreturn=array(dim=c(3,grlen)) #In some old scripts, this format was used so this is left here
SIPreturn[1,]=inheritedSIP
SIPreturn[3,]=correctedSIP
Comments=paste0("First taking anomalies from IC to MP by simple nearest neighbour from MP. Then correcting points with false initial forecast, and adding a epsilon buffer to inherited corrected SIP if it does not have enough. Using epsilon: ",eps)
save(SIPreturn,yod,Climatologyfile,ICsic,SIPclima,grd,ICdate,inheritedSIP,correctedSIP,Comments,file = savename,version = 2)
print(paste("Saved file: ",basename(savename),sep = ""))
remove(SIPreturn,SIPclima,climMedcnt,ICcnt,ICcnt0,ICcnt_df,cliMedncnt_df,ICsic)
toc()
} #Loop over other initialisation dates and years
}
|
82a811a9232d1cf3b9f375b34bb9424e19f485df
|
ceec3182ad1b8cf4007c90109f2948f0704bc3a8
|
/HW2/BLB/BLB_lin_reg_job.R
|
b6938e7c967814ea21b2ed14ecc2b1d9d338a48e
|
[
"MIT"
] |
permissive
|
jmwerner/STA-250
|
a42550ec5e8a5a1a36394525791ad75f3d019e9a
|
203b442f809117d4ed0dfaeb50ed0c2ad2d08a33
|
refs/heads/master
| 2020-12-24T11:25:46.878804
| 2013-12-12T11:18:55
| 2013-12-12T11:18:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,646
|
r
|
BLB_lin_reg_job.R
|
####################################
# Author: Jeremy Werner #
# Assignment: STA 205 HW2 #
# Due date: November 13, 2013 #
####################################
rm(list = ls()) #Starting with a clean workspace (for safety)
mini = FALSE
# Load packages:
library(BH)
library(bigmemory.sri)
library(bigmemory)
library(biganalytics)
#============================== Setup for running on Gauss... ==============================#
args <- commandArgs(TRUE)
cat("Command-line arguments:\n")
print(args)
###################
sim_start <- 1000 #lowest possible dataset number
s_num = 5 #number of subsamples
r_num = 50 #number of boostraps per subsample
gamma = .7
###################
if(length(args)==0){
job_num = 1
sim_num = sim_start + job_num
set.seed(121231)
}else{
job_num = as.numeric(args[1])
sim_num <- sim_start + job_num
# Find r and s indices:
s = ((job_num-1) %% s_num) + 1
r = ceiling(job_num/s_num)
sim_seed = (762*s + 121231)
#Setting seed to get the correct rows from the sample function based on the value of s
set.seed(sim_seed)
}
cat(paste("\nAnalyzing simulation number ",sim_num,"...\n\n",sep=""))
cat(paste("S index:", s, " R index:", r, "\n\n"))
#============================== Run the simulation study ==============================#
# I/O specifications:
datapath <- "/home/pdbaines/data"
outpath <- "output/"
#datapath = "/Users/jmwerner1123/Dropbox/GitHub/Invisible/STA250/HW2/BLB"
#outpath = "/Users/jmwerner1123/Dropbox/GitHub/Invisible/STA250/HW2/BLB/output"
# mini or full?
if(mini){
rootfilename <- "blb_lin_reg_mini"
}else{
rootfilename <- "blb_lin_reg_data"
}
# Filenames:
data_file = paste0(datapath,"/",rootfilename, ".desc")
# Attach big.matrix :
data = attach.big.matrix(data_file)
# Remaining BLB specs:
dims = dim(data)
n = dims[1] #Number of observations
d = dims[2] - 1 #Number of covariates
b = ceiling(n ^ gamma) #Size of bootstrap dataset
# Extract the subset:
subset_index = sample(1:n, size = b, replace = FALSE)
subset = data[subset_index,]
y = subset[,dims[2]] #Response
sub_data = subset[,-dims[2]]
# Reset simulation seed:
set.seed(762*sim_num + 121231) #unique seed for each job
# Bootstrap dataset:
bootstrap_data_weights = rmultinom(1, n, rep(1/b,b))
# Fit lm:
linear_model = lm(y~(0+sub_data), weights = bootstrap_data_weights)
# Output file:
outfile = paste0("output/","coef_",sprintf("%02d",s),"_",sprintf("%02d",r),".txt")
# Save estimates to file:
write.table(linear_model$coeff, file = outfile, sep = ",",
col.names = TRUE, row.names = FALSE)
cat("Program complete, self destruct sequence initiated... \n")
q("no")
|
9c732c9c6a853d6a404b471d39d386286edc48ec
|
9706133df19d93395a748068a07ea071a03eeeaa
|
/projects/roland_garros_project/modelling/player_varying_intercept.R
|
129a32bb286d6355540f1b095019766d0efd86e4
|
[] |
no_license
|
petertea96/tennis_analytics
|
7fa2d924f6a6db8515c095dd9e34d01748c599db
|
e267a4a463d4e0b3522117b370289a6ae9b3cb27
|
refs/heads/master
| 2023-07-31T21:54:59.511368
| 2021-09-22T03:01:03
| 2021-09-22T03:01:03
| 291,520,867
| 3
| 2
| null | 2020-10-22T06:04:20
| 2020-08-30T17:42:36
|
HTML
|
UTF-8
|
R
| false
| false
| 29,009
|
r
|
player_varying_intercept.R
|
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### == PLOT MODEL 2 RESULTS (JUST PLAYER-VARYING INTERCEPTS) =====
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
# -- Plot Player Intercepts (Wide vs T) for Model 2 (Not Model 4)
# -- Plot tranformed intercept from logit scale --> probability scale
setwd("/Users/petertea/tennis_analytics/projects/roland_garros_project/")
library(dplyr)
library(rstan)
options(tibble.print_max = 30)
#library(reshape2)
# -- Fit STAN Model
#library(cmdstanr)
#library(data.table)
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### ===== Data Processing Steps =====
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
atp_data <- read.csv('./collect_data/data/atp_processed_roland_garros_tracking_data.csv',
stringsAsFactors = FALSE,
na.strings=c("","NA"))
# wta_data <- read.csv('./collect_data/data/wta_processed_roland_garros_tracking_data.csv',
# stringsAsFactors = FALSE)
atp_training_data <- atp_data %>%
distinct() %>%
mutate(
# -- Remember, the last level is the reference level in the STAN model
y = ifelse(intended_serve_dir == 'Body', 3,
ifelse(intended_serve_dir == 'T', 1,
ifelse(intended_serve_dir == 'Wide', 2, NA))),
is_first_serve = ifelse(serve_num == 1, 1,0) ) %>%
select(y, server_name, returner_name,
is_break_point, x_ball_at_serve,
is_first_serve,
court_side,
is_prev_doublefault, is_prev_ace,
serve_impact_from_center,
server_hand, returner_hand,
point_importance,
returner_backhand_loc,
prev_intended_serve_loc1,
prev_intended_serve_loc2,
player_hands_match,
z_ball_at_serve,
player_hands_match, match_id
) %>%
mutate(## --model.matrix() not cooperating with factors...I'll do this manually
prev_intended_serve_loc1T = ifelse(prev_intended_serve_loc1 == 'T',1,0),
prev_intended_serve_loc1Wide = ifelse(prev_intended_serve_loc1 == 'Wide',1,0),
prev_intended_serve_loc2T = ifelse(prev_intended_serve_loc2 == 'T',1,0),
prev_intended_serve_loc2Wide = ifelse(prev_intended_serve_loc2 == 'Wide',1,0),
is_second_serve = ifelse(is_first_serve == 1,0,1),
court_side_ad = ifelse(court_side == 'DeuceCourt', 0,1),
returner_backhand_locT = ifelse(returner_backhand_loc == 'T', 1,0),
server_handL = ifelse(server_hand == 'left-handed', 1,0),
distance_inside_serve = 11.89 - abs(x_ball_at_serve),
interaction_s_hand_court_side = server_handL * court_side_ad,
interaction_ss_bh_T = is_second_serve*returner_backhand_locT
) %>%
filter(complete.cases(.))
atp_training_data$server_index <- as.numeric(factor(atp_training_data$server_name))
atp_stan_datalist2 <- list(
N = nrow(atp_training_data),
N_1 = length(unique(atp_training_data$server_index)),
id_1 = atp_training_data$server_index,
y = atp_training_data$y,
K = 3
)
# -- Prediction with RSTAN -----
options(mc.cores = 4)
player_varying_intercept_model <- stan_model('./modelling/stan_files/predict_player_intercept.stan')
fit_atp_model <- sampling(player_varying_intercept_model,
data = atp_stan_datalist2,
iter = 2000)
extract_stan_model <- extract(fit_atp_model)
names(extract_stan_model)
dim(extract_stan_model$player_probs)
dim(extract_stan_model$player_preds)
dim(extract_stan_model$player_probs[,,1])
# -- Prob(T)
probT_df <- extract_stan_model$player_probs[,,1]
t_means <- colMeans(probT_df)
t_medians <- apply(probT_df, 2, median)
t_sd <- apply(probT_df, 2, sd)
t_2.5_quantile <- apply(probT_df, 2, quantile, probs=0.025)
t_97.5_quantile <- apply(probT_df, 2, quantile, probs=0.975)
# -- Prob(Wide)
probWide_df <- extract_stan_model$player_probs[,,2]
wide_mean <- colMeans(probWide_df)
wide_sd <- apply(probWide_df , 2, sd)
wide_medians <- apply(probWide_df , 2, median)
wide_2.5_quantile <- apply(probWide_df , 2, quantile, probs=0.025)
wide_97.5_quantile <- apply(probWide_df , 2, quantile, probs=0.975)
# -- Prob(Body)
probBody_df <- extract_stan_model$player_probs[,,3]
body_medians <- apply(probBody_df, 2, median)
body_mean <- colMeans(probBody_df)
body_sd <- apply(probBody_df , 2, sd)
body_2.5_quantile <- apply(probBody_df, 2, quantile, probs=0.025)
body_97.5_quantile <- apply(probBody_df, 2, quantile, probs=0.975)
atp_player_df <- data.frame(
name = levels(factor(atp_training_data$server_name)),
median_t = t_medians,
mean_t = t_means,
sd_t = t_sd,
t_quant_2.5 = t_2.5_quantile,
t_quant_97.5 = t_97.5_quantile,
median_wide = wide_medians,
mean_wide = wide_mean,
sd_wide = wide_sd,
wide_quant_2.5 = wide_2.5_quantile,
wide_quant_97.5 = wide_97.5_quantile,
median_body = body_medians,
mean_body = body_mean,
sd_body = body_sd,
body_quant_2.5 = body_2.5_quantile,
body_quant_97.5 = body_97.5_quantile,
gender = 'Men'
)
player_num_matches <-
atp_training_data %>%
group_by(server_name) %>%
summarise(num_matches = length(unique(match_id)),
num_serves = n())
atp_player_df <- atp_player_df %>%
left_join(player_num_matches,
by = c('name' = 'server_name'))
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### == FITTING STAN BAYESIAN MODELS W/ WTA DATA =====
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
wta_data <- read.csv('./collect_data/data/wta_processed_roland_garros_tracking_data.csv',
stringsAsFactors = FALSE)
wta_training_data <- wta_data %>%
distinct() %>%
mutate(
# -- Remember, the last level is the reference level in the STAN model
y = ifelse(intended_serve_dir == 'Body', 3,
ifelse(intended_serve_dir == 'T', 1,
ifelse(intended_serve_dir == 'Wide', 2, NA))),
is_first_serve = ifelse(serve_num == 1, 1,0) ) %>%
select(y, server_name, returner_name,
is_break_point, x_ball_at_serve,
is_first_serve,
court_side,
is_prev_doublefault, is_prev_ace,
serve_impact_from_center,
server_hand, returner_hand,
point_importance,
z_scaled_point_importance,
returner_backhand_loc,
prev_intended_serve_loc1,
prev_intended_serve_loc2,
player_hands_match,
z_ball_at_serve,
player_hands_match, match_id
) %>%
mutate(## --model.matrix() not cooperating with factors...I'll do this manually
prev_intended_serve_loc1T = ifelse(prev_intended_serve_loc1 == 'T',1,0),
prev_intended_serve_loc1Wide = ifelse(prev_intended_serve_loc1 == 'Wide',1,0),
prev_intended_serve_loc2T = ifelse(prev_intended_serve_loc2 == 'T',1,0),
prev_intended_serve_loc2Wide = ifelse(prev_intended_serve_loc2 == 'Wide',1,0),
is_second_serve = ifelse(is_first_serve == 1,0,1),
court_side_ad = ifelse(court_side == 'DeuceCourt', 0,1),
returner_backhand_locT = ifelse(returner_backhand_loc == 'T', 1,0),
server_handL = ifelse(server_hand == 'left-handed', 1,0),
distance_inside_serve = 11.89 - abs(x_ball_at_serve),
interaction_s_hand_court_side = server_handL * court_side_ad,
interaction_ss_bh_T = is_second_serve*returner_backhand_locT
) %>%
filter(complete.cases(.))
wta_training_data$server_index <- as.numeric(factor(wta_training_data$server_name))
# wta_training_data %>%
# filter(server_name == 'S.WILLIAMS') %>%
# .$y %>%
# table()
# wta_training_data %>%
# filter(server_name == 'O.JABEUR') %>%
# .$y %>%
# table()
wta_stan_datalist2 <- list(
N = nrow(wta_training_data),
N_1 = length(unique(wta_training_data$server_index)),
id_1 = wta_training_data$server_index,
y = wta_training_data$y,
K = 3
)
fit_wta_model <- sampling(player_varying_intercept_model,
data = wta_stan_datalist2,
iter = 2000)
extract_wta_stan_model <- extract(fit_wta_model)
# -- Prob(T)
wta_probT_df <- extract_wta_stan_model$player_probs[,,1]
wta_t_medians <- apply(wta_probT_df, 2, median)
wta_t_mean <- colMeans(wta_probT_df)
wta_t_sd <- apply(wta_probT_df , 2, sd)
wta_t_2.5_quantile <- apply(wta_probT_df, 2, quantile, probs=0.025)
wta_t_97.5_quantile <- apply(wta_probT_df, 2, quantile, probs=0.975)
# -- Prob(Wide)
wta_probWide_df <- extract_wta_stan_model$player_probs[,,2]
wta_wide_medians <- apply(wta_probWide_df , 2, median)
wta_wide_mean <- colMeans(wta_probWide_df)
wta_wide_sd <- apply(wta_probWide_df , 2, sd)
wta_wide_2.5_quantile <- apply(wta_probWide_df , 2, quantile, probs=0.025)
wta_wide_97.5_quantile <- apply(wta_probWide_df , 2, quantile, probs=0.975)
# -- Prob(Body)
wta_probBody_df <- extract_wta_stan_model$player_probs[,,3]
wta_body_medians <- apply(wta_probBody_df, 2, median)
wta_body_mean <- colMeans(wta_probBody_df)
wta_body_sd <- apply(wta_probBody_df , 2, sd)
wta_body_2.5_quantile <- apply(wta_probBody_df, 2, quantile, probs=0.025)
wta_body_97.5_quantile <- apply(wta_probBody_df, 2, quantile, probs=0.975)
wta_player_df <- data.frame(
name = levels(factor(wta_training_data$server_name)),
median_t = wta_t_medians,
mean_t = wta_t_mean,
sd_t = wta_t_sd,
t_quant_2.5 = wta_t_2.5_quantile,
t_quant_97.5 = wta_t_97.5_quantile,
median_wide = wta_wide_medians,
mean_wide = wta_wide_mean,
sd_wide = wta_wide_sd,
wide_quant_2.5 = wta_wide_2.5_quantile,
wide_quant_97.5 = wta_wide_97.5_quantile,
median_body = wta_body_medians,
mean_body = wta_body_mean,
sd_body = wta_body_sd,
body_quant_2.5 = wta_body_2.5_quantile,
body_quant_97.5 = wta_body_97.5_quantile,
gender = 'Women'
)
wta_player_num_matches <-
wta_training_data %>%
group_by(server_name) %>%
summarise(num_matches = length(unique(match_id)),
num_serves = n())
wta_player_df <- wta_player_df %>%
left_join(wta_player_num_matches,
by = c('name' = 'server_name'))
wta_player_df %>%
arrange(desc(mean_wide)) %>% View()
wta_player_df %>%
arrange(desc(mean_body)) %>% View()
# -- Save Data
plot_data <- rbind(atp_player_df, wta_player_df)
write.csv(plot_data, './modelling/compare_intercepts_probs/plot_player_probs_dataframe.csv', row.names = FALSE)
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
### == PLOT BOTH MEN AND WOMEN PROBABILITIES =====
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###
library(ggplot2)
library(ggrepel)
source(file = "/Users/petertea/tennis_analytics/projects/roland_garros_project/src/ggplot_theme.R")
plot_data <- read.csv('./modelling/compare_intercepts_probs/plot_player_probs_dataframe.csv')
wta_plot_data <- plot_data %>%
filter(gender == 'Women') #%>%
#filter(num_serves>100)
wta_plot_data %>%
arrange(desc(mean_t)) %>%
View()
wta_labels <- c('O.JABEUR', 'K.SINIAKOVA', 'N.OSAKA',
'M.VONDROUSOVA', 'S.SORRIBESTORMO', 'J.KONTA',
'P.KVITOVA', 'S.KENIN', 'S.WILLIAMS',
'I.SWIATEK')
ggplot(wta_plot_data,
aes(x = mean_t, y = mean_wide)) +
geom_abline(intercept = 0, slope = 1, size = 0.25,linetype='dashed') +
geom_point(shape = 21,
size = 2,
alpha = 0.75,
fill = '#EC7063') +
geom_point(data = wta_plot_data %>%
filter(name %in% wta_labels), #%>%
#filter(num_serves>100)
aes(x = mean_t, y = mean_wide),
fill = '#ffeda5',
shape = 21,
size = 2.5) +
# WILLIAMS
geom_label_repel(data = wta_plot_data %>%
filter(name =='S.WILLIAMS'),
aes(y = mean_wide,
x = mean_t,
label = name),
fill = '#EC7063',
nudge_y = 0,
nudge_x = 0.02,
fontface = 'bold',
size = 2.3,
show.legend = FALSE,
arrow = arrow(ends ='last',
type = 'closed',
length = unit(0.015, "npc")),
point.padding = unit(0.5, "lines"),
label.padding = unit(0.25, "lines")) +
geom_label_repel(data = wta_plot_data %>%
filter(name =='O.JABEUR'),
aes(y = mean_wide,
x = mean_t,
label = name),
fill = '#EC7063',
nudge_y = 0.01,
nudge_x = 0.0,
fontface = 'bold',
size = 2.3,
show.legend = FALSE,
arrow = arrow(ends ='last',
type = 'closed',
length = unit(0.015, "npc")),
point.padding = unit(0.5, "lines"),
label.padding = unit(0.25, "lines")) +
geom_label_repel(data = wta_plot_data %>%
filter(name =='K.SINIAKOVA'),
aes(y = mean_wide,
x = mean_t,
label = name),
fill = '#EC7063',
nudge_y = 0.01,
nudge_x = 0.0,
fontface = 'bold',
size = 2.3,
show.legend = FALSE,
arrow = arrow(ends ='last',
type = 'closed',
length = unit(0.015, "npc")),
point.padding = unit(0.5, "lines"),
label.padding = unit(0.25, "lines")) +
geom_label_repel(data = wta_plot_data %>%
filter(name =='N.OSAKA'),
aes(y = mean_wide,
x = mean_t,
label = name),
fill = '#EC7063',
nudge_y = -0.01,
nudge_x = 0.04,
fontface = 'bold',
size = 2.3,
show.legend = FALSE,
arrow = arrow(ends ='last',
type = 'closed',
length = unit(0.015, "npc")),
point.padding = unit(0.5, "lines"),
label.padding = unit(0.25, "lines")) +
geom_label_repel(data = wta_plot_data %>%
filter(name =='M.VONDROUSOVA'),
aes(y = mean_wide,
x = mean_t,
label = name),
fill = '#EC7063',
nudge_y = 0.0,
nudge_x = -0.02,
fontface = 'bold',
size = 2.3,
show.legend = FALSE,
arrow = arrow(ends ='last',
type = 'closed',
length = unit(0.015, "npc")),
point.padding = unit(0.5, "lines"),
label.padding = unit(0.25, "lines")) +
geom_label_repel(data = wta_plot_data %>%
filter(name =='S.SORRIBESTORMO'),
aes(y = mean_wide,
x = mean_t,
label = name),
fill = '#EC7063',
nudge_y = 0.0,
nudge_x = 0.02,
fontface = 'bold',
size = 2.3,
show.legend = FALSE,
arrow = arrow(ends ='last',
type = 'closed',
length = unit(0.015, "npc")),
point.padding = unit(0.5, "lines"),
label.padding = unit(0.25, "lines")) +
geom_label_repel(data = wta_plot_data %>%
filter(name =='J.KONTA'),
aes(y = mean_wide,
x = mean_t,
label = name),
fill = '#EC7063',
nudge_y = 0.0,
nudge_x = 0.02,
fontface = 'bold',
size = 2.3,
show.legend = FALSE,
arrow = arrow(ends ='last',
type = 'closed',
length = unit(0.015, "npc")),
point.padding = unit(0.5, "lines"),
label.padding = unit(0.25, "lines")) +
geom_label_repel(data = wta_plot_data %>%
filter(name =='S.KENIN'),
aes(y = mean_wide,
x = mean_t,
label = name),
fill = '#EC7063',
nudge_y = 0.0,
nudge_x = 0.03,
fontface = 'bold',
size = 2.3,
show.legend = FALSE,
arrow = arrow(ends ='last',
type = 'closed',
length = unit(0.015, "npc")),
point.padding = unit(0.5, "lines"),
label.padding = unit(0.25, "lines")) +
geom_label_repel(data = wta_plot_data %>%
filter(name =='P.KVITOVA'),
aes(y = mean_wide,
x = mean_t,
label = name),
fill = '#EC7063',
nudge_y = 0.01,
nudge_x = 0.0,
fontface = 'bold',
size = 2.3,
show.legend = FALSE,
arrow = arrow(ends ='last',
type = 'closed',
length = unit(0.015, "npc")),
point.padding = unit(0.5, "lines"),
label.padding = unit(0.25, "lines")) +
geom_label_repel(data = wta_plot_data %>%
filter(name =='I.SWIATEK'),
aes(y = mean_wide,
x = mean_t,
label = name),
alpha = 0.8,
fill = '#EC7063',
nudge_y = -0.03,
nudge_x = 0.01,
fontface = 'bold',
size = 2.3,
show.legend = FALSE,
arrow = arrow(ends ='last',
type = 'closed',
length = unit(0.015, "npc")),
point.padding = unit(0.5, "lines"),
label.padding = unit(0.25, "lines")) +
labs(#title = 'Women Estimated Serve Direction Probabilities',
x='Mean Prob(T)',
y = 'Mean Prob(Wide)'#,
#caption="Roland Garros 2019-20"
) +
ylim(0.2, 0.55) +
xlim(0.2, 0.56) +
#scale_y_continuous(breaks = c(0.2, 0.3, 0.4, 0.5))+
#scale_x_continuous(breaks = c(0.2, 0.3, 0.4, 0.5))+
peter_theme(family_font = 'Tahoma')
ggsave('./modelling/plots/wta_comparing_player_probs.jpg',
width=6, height=4,
dpi = 280)
# -- ATP
atp_plot_data <- plot_data %>%
filter(gender == 'Men') #%>%
#filter(num_serves>150)
atp_plot_data %>%
arrange(desc(mean_t)) %>%
View()
atp_labels <- c('D.SHAPOVALOV', 'D.THIEM', #'S.WAWRINKA',
'R.FEDERER', 'N.DJOKOVIC', 'R.NADAL',
'J.DELPOTRO', 'A.RUBLEV', 'A.ZVEREV',
'A.MURRAY'#, 'J.SOCK' #'S.TSITSIPAS'
)
ggplot(data = atp_plot_data,
aes(x = mean_t, y = mean_wide)) +
geom_abline(intercept = 0, slope = 1, size = 0.25,linetype='dashed') +
geom_point(shape = 21,
size = 2,
alpha = 0.75,
fill = '#5DADE2') +
geom_point(data = atp_plot_data %>%
filter(name %in% atp_labels),
aes(x = mean_t, y = mean_wide),
fill = '#ffeda5',
shape = 21,
size = 2.5) +
geom_label_repel(data = atp_plot_data %>%
filter(name =='R.FEDERER'),
aes(y = mean_wide,
x = mean_t,
label = name),
fill = '#5DADE2',
nudge_y = 0.0,
nudge_x = 0.04,
fontface = 'bold',
size = 2.3,
show.legend = FALSE,
arrow = arrow(ends ='last',
type = 'closed',
length = unit(0.015, "npc")),
point.padding = unit(0.5, "lines"),
label.padding = unit(0.25, "lines")) +
geom_label_repel(data = atp_plot_data %>%
filter(name =='R.NADAL'),
aes(y = mean_wide,
x = mean_t,
label = name),
fill = '#5DADE2',
nudge_y = -0.035,
nudge_x = -0.015,
fontface = 'bold',
size = 2.3,
show.legend = FALSE,
arrow = arrow(ends ='last',
type = 'closed',
length = unit(0.015, "npc")),
point.padding = unit(0.5, "lines"),
label.padding = unit(0.25, "lines")) +
geom_label_repel(data = atp_plot_data %>%
filter(name =='N.DJOKOVIC'),
aes(y = mean_wide,
x = mean_t,
label = name),
fill = '#5DADE2',
nudge_y = 0.03,
nudge_x = 0.035,
fontface = 'bold',
size = 2.3,
show.legend = FALSE,
arrow = arrow(ends ='last',
type = 'closed',
length = unit(0.015, "npc")),
point.padding = unit(0.5, "lines"),
label.padding = unit(0.25, "lines")) +
geom_label_repel(data = atp_plot_data %>%
filter(name =='D.SHAPOVALOV'),
aes(y = mean_wide,
x = mean_t,
label = name),
fill = '#5DADE2',
nudge_y = 0.023,
nudge_x = -0.04,
fontface = 'bold',
size = 2.3,
show.legend = FALSE,
arrow = arrow(ends ='last',
type = 'closed',
length = unit(0.015, "npc")),
point.padding = unit(0.5, "lines"),
label.padding = unit(0.25, "lines")) +
geom_label_repel(data = atp_plot_data %>%
filter(name =='D.THIEM'),
aes(y = mean_wide,
x = mean_t,
label = name),
fill = '#5DADE2',
nudge_y = 0.022,
nudge_x = 0.03,
fontface = 'bold',
size = 2.3,
show.legend = FALSE,
arrow = arrow(ends ='last',
type = 'closed',
length = unit(0.015, "npc")),
point.padding = unit(0.5, "lines"),
label.padding = unit(0.25, "lines")) +
geom_label_repel(data = atp_plot_data %>%
filter(name =='A.ZVEREV'),
aes(y = mean_wide,
x = mean_t,
label = name),
fill = '#5DADE2',
nudge_y = -0.032,
nudge_x = 0.0,
fontface = 'bold',
size = 2.3,
show.legend = FALSE,
arrow = arrow(ends ='last',
type = 'closed',
length = unit(0.015, "npc")),
point.padding = unit(0.5, "lines"),
label.padding = unit(0.25, "lines")) +
geom_label_repel(data = atp_plot_data %>%
filter(name =='J.DELPOTRO'),
aes(y = mean_wide,
x = mean_t,
label = name),
fill = '#5DADE2',
nudge_y = -0.01,
nudge_x = 0.04,
fontface = 'bold',
size = 2.3,
show.legend = FALSE,
arrow = arrow(ends ='last',
type = 'closed',
length = unit(0.015, "npc")),
point.padding = unit(0.5, "lines"),
label.padding = unit(0.25, "lines")) +
# geom_label_repel(data = atp_plot_data %>%
# filter(name =='S.WAWRINKA'),
# aes(y = mean_wide,
# x = mean_t,
# label = name),
# fill = '#5DADE2',
# #alpha = 0.8,
# nudge_y = 0.02,
# nudge_x = 0.02,
# fontface = 'bold',
# size = 2.3,
# show.legend = FALSE,
# arrow = arrow(ends ='last',
# type = 'closed',
# length = unit(0.015, "npc")),
# point.padding = unit(0.5, "lines"),
# label.padding = unit(0.25, "lines")) +
geom_label_repel(data = atp_plot_data %>%
filter(name =='A.RUBLEV'),
aes(y = mean_wide,
x = mean_t,
label = name),
fill = '#5DADE2',
nudge_y = 0,
nudge_x = 0.04,
fontface = 'bold',
size = 2.3,
show.legend = FALSE,
arrow = arrow(ends ='last',
type = 'closed',
length = unit(0.015, "npc")),
point.padding = unit(0.5, "lines"),
label.padding = unit(0.25, "lines")) +
geom_label_repel(data = atp_plot_data %>%
filter(name =='A.MURRAY'),
aes(y = mean_wide,
x = mean_t,
label = name),
fill = '#5DADE2',
nudge_y = 0.00,
nudge_x = 0.04,
fontface = 'bold',
size = 2.3,
show.legend = FALSE,
arrow = arrow(ends ='last',
type = 'closed',
length = unit(0.015, "npc")),
point.padding = unit(0.5, "lines"),
label.padding = unit(0.25, "lines")) +
#geom_label_repel(data = atp_plot_data %>%
# filter(name =='S.TSITSIPAS'),
# aes(y = mean_wide,
# x = mean_t,
# label = name),
# alpha=0.8,
# fill = '#5DADE2',
# nudge_y = 0.008,
# nudge_x = -0.02,
# fontface = 'bold',
# size = 2.3,
# show.legend = FALSE,
# arrow = arrow(ends ='last',
# type = 'closed',
# length = unit(0.015, "npc")),
# point.padding = unit(0.5, "lines"),
# label.padding = unit(0.25, "lines")) +
labs(#title = 'Men Estimated Serve Direction Probabilities',
x='Mean Prob(T)',
y = 'Mean Prob(Wide)'#,
#caption="Roland Garros 2019-20"
) +
ylim(0.2, 0.55) +
xlim(0.2, 0.56) +
peter_theme(family_font = 'Tahoma')
#scale_y_continuous(breaks=seq(0.2,0.5,0.1))+
#scale_x_continuous(breaks = c(0.2, 0.3, 0.4, 0.5))
ggsave('./modelling/plots/atp_comparing_player_probs.jpg',
width=6, height=4,
dpi = 280)
# -- Everything on one plot
player_labels <- c(
'O.JABEUR', 'K.SINIAKOVA', 'N.OSAKA',
'M.VONDROUSOVA', 'S.SORRIBESTORMO', 'J.KONTA',
'P.KVITOVA', 'S.KENIN', 'S.WILLIAMS',
'I.SWIATEK', 'D.SHAPOVALOV', 'D.THIEM', 'S.WAWRINKA',
'R.FEDERER', 'N.DJOKOVIC', 'R.NADAL',
'J.DELPOTRO', 'A.RUBLEV', 'A.ZVEREV',
'A.MURRAY'
)
ggplot(data = plot_data,
aes(x = mean_t, y = mean_wide)) +
geom_abline(intercept = 0, slope = 1, size = 0.25,linetype='dashed') +
geom_point(shape = 21,
size = 2,
alpha = 0.7,
aes(fill = gender)) +
geom_point(data = plot_data %>%
filter(name %in% player_labels),
aes(x = mean_t, y = mean_wide),
fill = '#ffeda5',
shape = 21,
size = 2.5) +
peter_theme() +
ylim(0.2, 0.55) +
xlim(0.2, 0.56)
|
6783cf113944c84ab478eaf6cdef776df31beea3
|
2883f55324a139a3398635e1ee70e47b440040f6
|
/R/turbidity.R
|
22038c436f457d5b2da4d7e97f12c97338ee611e
|
[
"CC0-1.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
jsta/lakeattributes
|
52f57ae2344ce25fc4eed412ae2c4cc09baa55f5
|
8b33c881401ec6e369f2b919dbca694875f641b4
|
refs/heads/master
| 2021-07-09T05:49:01.942800
| 2019-02-03T14:53:19
| 2019-02-03T14:53:19
| 97,721,674
| 0
| 0
| null | 2017-07-19T13:50:22
| 2017-07-19T13:50:22
| null |
UTF-8
|
R
| false
| false
| 603
|
r
|
turbidity.R
|
#'@title Turbidity data table
#'
#'
#'@name turbidity
#'
#'@return turbidity data.frame with all available observed turbidity data.
#'
#'\tabular{lll}{
#'Name \tab Type \tab Description\cr
#'site_id \tab character \tab Unique identifier of site\cr
#'year \tab numeric \tab Year of observation\cr
#'date \tab character \tab Date of observation (May 15th for turbidity averages)\cr
#'turbidity_ntu \tab numeric \tab Observed or estimated turbidity in NTU\cr
#'source \tab character \tab Source (Just in-situ right now)\cr
#'}
#'
#'@docType data
#'
#'@examples
#'head(turbidity)
#'
#'@export turbidity
NULL
|
feab20c1d391cfbc2e8752c2d21aa4fb7176f570
|
f02303dd25516e4a35460cf8cff5e221e036f308
|
/lta.R
|
221838f9bdc7cfc756c31af6546deb79d2bd628f
|
[
"MIT"
] |
permissive
|
IMS-Bio2Core-Facility/BIC016a-Samuel-Furse
|
4848e45334a50a05c8a1f8460e154649aef8c3ba
|
059575b039623a0cf62326b3cf4b9af4b07e5cd4
|
refs/heads/main
| 2023-04-11T18:46:52.074863
| 2021-09-10T09:04:32
| 2021-09-10T09:04:32
| 405,012,516
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 32,474
|
r
|
lta.R
|
rm(list = ls())
options(warn=-1)
#### PRELIMINARIES #############################################################################################
#*Uploads the needed libraries --------------------------------------------------------------------------------
require(ggplot2)
require(data.table)
require(plotly)
require(DT)
require(R2HTML)
require(stringr)
#** Sets the number of significant digits for the output --------------------------
sig_dig = 4
#** Sets the working directory ---------------------------------------------------------------------------------
#Gets the default wd
default_wd <- getwd()
setwd("C:/Users/Furse/.../300. Data outputs/")# <--- insert here the path to the working (output) directory
new_wd <- getwd()
#Sets the input directory
inputdir <-"C:/Users/Furse/.../200. Data sheets/"# <--- insert here the path to the input directory
#### DEFINES FUNCTIONS #############################################################################################
count_zeroes <- function(x){length(which(x==0))}
#considered_mode <- "\\+ve"
considered_mode <- "\\+ve"
considered_generation <- "PWD"
considered_model_1 <-"lean"
considered_model_2 <- "obese"
percentage_of_zeroes <- 33 # Sets a threshold to exclude rows from the analysis: the rows containing more than [percentage_of_zeroes/100] zeroes will be excluded from the analysis
#### DATA UPLOAD ###############################################################################################################
# - Uploads the .csv files containing the information related to the F1A, PW, -ve datasets.
# - Separates the metadata from the main data
# - Picks only the considered_model_1 and considered_model_2 sets
files_names_originals <- list.files(inputdir)
files_names_originals <- files_names_originals[which(str_length(files_names_originals)==17)]
modes <- unique(substr(files_names_originals, start=1, stop=3))
tissues <- unique(substr(files_names_originals, start=6, stop=8))
generations <- unique(substr(files_names_originals, start=11, stop=13))
# PRODUCES TISSUE-SPECIFIC MATRICES
# For each tissue, produces two matrices (one for each considered_model)
# in which the columns are the samples and the rows are the lipids
for(j in 1: length(tissues)){
tissue <- tissues[j]
aa <- files_names_originals[grep(files_names_originals, pattern=considered_mode)]
aa <- aa[grep(aa, pattern=considered_generation)]
aa<- aa[grep(aa, pattern=tissue)]
if(length(aa)!=0){
bb<- read.csv(paste0(inputdir, aa), stringsAsFactors = F)
assign(paste0(tissue, "_", considered_generation, "_", substr(considered_mode, 2,4), "_metadata"), bb)
cc<- read.csv(paste0(inputdir, aa), stringsAsFactors = F , skip=10 )
cc <- cc[!is.na(cc$m.z),]
assign(paste0(tissue, "_", considered_generation, "_", substr(considered_mode, 2,4)), cc)
cc_1 <- cc[,grep(as.vector(bb[4, ]),pattern=considered_model_1)]
cc_2 <- cc[,grep(as.vector(bb[4, ]),pattern=considered_model_2)]
rownames(cc_1) <- cc$Lipid.variable
rownames(cc_2) <- cc$Lipid.variable
cc_1_zeroes <- apply(cc_1[, c(3:ncol(cc_1))], 1, count_zeroes)
cc_2_zeroes <- apply(cc_2[, c(3:ncol(cc_2))], 1, count_zeroes)
cc_1_nozeroes <- cc_1[-which(cc_1_zeroes > ncol(cc_1)*percentage_of_zeroes/100) ,] # Excludes the rows in which the number of zeroes is greater than 20%
cc_2_nozeroes <- cc_2[-which(cc_2_zeroes > ncol(cc_2)*percentage_of_zeroes/100) ,]
assign(paste0(tissue, "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1), cc_1)
assign(paste0(tissue, "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_2), cc_2)
assign(paste0(tissue, "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_nozeroes"), cc_1_nozeroes)
assign(paste0(tissue, "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_2, "_nozeroes"), cc_2_nozeroes)
}
}
#### A LIPIDS ###############################################################################################################
# Finds the A-type lipids for both the considered models --------------------------------------------------------------------------------------------------
# ** Considered_model_1 --------------------------------------------------------------------------------------
# Finds the A-lipids
# Creates a list (called all_tissues) in which each element is
# a list of lipids coming from the row names of
# each Tissue-specific Matrix
all_tissues <-list()
for(k in 1:length(tissues)){
tissue <- tissues[k]
aa <- files_names_originals[grep(files_names_originals, pattern=considered_mode)]
aa <- aa[grep(aa, pattern=considered_generation)]
aa<- aa[grep(aa, pattern=tissue)]
if(length(aa)!=0){
print(tissue)
yy <- rownames(get(paste0(tissue, "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_nozeroes") ) )
assign(paste0("xx_",k), yy)
all_tissues[[k]] <- yy
}
}
names(all_tissues) <- tissues
if(length(which(all_tissues=="NULL")) !=0){
all_tissues<-all_tissues[-which(all_tissues=="NULL")]
}
# intersects all the elements of the all_tissues list to find the A-lipids
gg_1<-Reduce(intersect, all_tissues)
assign(paste0 ("A_Lipids_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1), gg_1)
write.csv(gg_1, file=paste0 ("A_Lipids_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, ".csv"))
# Counts the A-lipids in each classes
classes <- unique(substr(gg_1, start=1, stop =5))
classes_counts_model_1 <- matrix(ncol=1, nrow=length(classes))
rownames(classes_counts_model_1) <- classes
colnames(classes_counts_model_1) <- paste0("A_lipids_",considered_model_1)
for(i in 1:length(classes)){
classes_counts_model_1[i] <- length(grep(gg_1, pattern=paste0("^",classes[i])))
}
# ** Considered_model_2 --------------------------------------------------------------------------------------
# Finds the A-lipids
# Creates a list (called all_tissues) in which each element is
# a list of lipids coming from the row names of
# each Tissue-specific Matrix
all_tissues <-list()
for(k in 1:length(tissues)){
tissue <- tissues[k]
aa <- files_names_originals[grep(files_names_originals, pattern=considered_mode)]
aa <- aa[grep(aa, pattern=considered_generation)]
aa<- aa[grep(aa, pattern=tissue)]
if(length(aa)!=0){
print(tissue)
yy <- rownames(get(paste0(tissue, "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_2, "_nozeroes") ) )
assign(paste0("xx_",k), yy)
all_tissues[[k]] <- yy
}
}
names(all_tissues) <- tissues
if(length(which(all_tissues=="NULL")) !=0){
all_tissues<-all_tissues[-which(all_tissues=="NULL")]
}
# intersects all the elements of the all_tissues list to find the A-lipids
gg_2<-Reduce(intersect, all_tissues)
assign(paste0 ("A_Lipids_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_2), gg_2)
write.csv(gg_2, file=paste0 ("A_Lipids_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_2, ".csv"))
# Counts the A-lipids in each class
classes <- unique(substr(gg_2, start=1, stop =5))
classes_counts_model_2 <- matrix(ncol=1, nrow=length(classes))
rownames(classes_counts_model_2) <- classes
colnames(classes_counts_model_2) <- paste0("A_lipids_",considered_model_2)
for(i in 1:length(classes)){
classes_counts_model_2[i] <- length(grep(gg_2, pattern= paste0("^",classes[i])))
}
# Creates the A_lipids_classes_counts_tot dataframe, where the vectors classes_counts_model_1 and classes_counts_model_2 are merged
classes_counts_model_1 <- as.data.frame(classes_counts_model_1)
classes_counts_model_2 <- as.data.frame(classes_counts_model_2)
A_lipids_classes_counts_tot <- merge(classes_counts_model_1, classes_counts_model_2, by.x="row.names", by.y="row.names", all=T)
A_lipids_classes_counts_tot[is.na(A_lipids_classes_counts_tot)]<-0
rownames(A_lipids_classes_counts_tot) <- A_lipids_classes_counts_tot[,1]
A_lipids_classes_counts_tot <- A_lipids_classes_counts_tot[,-1]
#
All_glicerids <- A_lipids_classes_counts_tot[grep(rownames(A_lipids_classes_counts_tot), pattern="DGX|MGX|TGX"),]
A_lipids_classes_counts_tot[nrow(A_lipids_classes_counts_tot)+1,] <- colSums(All_glicerids)
rownames(A_lipids_classes_counts_tot)[nrow(A_lipids_classes_counts_tot)] <- "Glyc"
assign(paste0("A_lipids_classes_counts_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2), A_lipids_classes_counts_tot)
write.csv(A_lipids_classes_counts_tot, file=paste0("A_lipids_classes_counts_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2,".csv"))
# Computes the Jaccard distances between models --------------------------------------------------------------------------------------------------
# This part is not needed for the computation Jaccard distances but only for printing the A_lipids_all_matrix - START
# Merges the vectors containing the A lipids "substituted" for each model into the A_lipids_matrix
ss <- union(gg_1, gg_2)
ss<- sort(ss)
#
A_lipids_matrix<- cbind(ss, rep(0, length(ss)), rep(0, length(ss)) )
A_lipids_matrix[,2][which(A_lipids_matrix[,1] %in% gg_1)] <- A_lipids_matrix[,1][which(A_lipids_matrix[,1] %in% gg_1)]
A_lipids_matrix[,3][which(A_lipids_matrix[,1] %in% gg_2)] <- A_lipids_matrix[,1][which(A_lipids_matrix[,1] %in% gg_2)]
#
colnames(A_lipids_matrix) <- c("rownames",considered_model_1, considered_model_2)
rownames(A_lipids_matrix) <- A_lipids_matrix[,1]
A_lipids_matrix <- A_lipids_matrix[,-1]
assign(paste0 ("A_Lipids_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2), A_lipids_matrix)
write.csv(file= paste0 ("A_Lipids_all_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2,".csv"), A_lipids_matrix)
# This part is not needed for the computation Jaccard distances but only for printing the A_lipids_all_matrix - END
# Unifies the classes MG, DG and TG under the class Glyc, by changing the row names of the A-lipids
gg_1_substituted <- gsub("MGXX|DGXX|TGXX", "Glyc", gg_1)
gg_2_substituted <- gsub("MGXX|DGXX|TGXX", "Glyc", gg_2)
# Merges the vectors containing the A lipids "substituted" for each model into the A_lipids_matrix
ss <- union(gg_1_substituted, gg_2_substituted)
ss<- sort(ss)
#
A_lipids_matrix<- cbind(ss, rep(0, length(ss)), rep(0, length(ss)) )
A_lipids_matrix[,2][which(A_lipids_matrix[,1] %in% gg_1_substituted)] <- A_lipids_matrix[,1][which(A_lipids_matrix[,1] %in% gg_1_substituted)]
A_lipids_matrix[,3][which(A_lipids_matrix[,1] %in% gg_2_substituted)] <- A_lipids_matrix[,1][which(A_lipids_matrix[,1] %in% gg_2_substituted)]
#
colnames(A_lipids_matrix) <- c("rownames",considered_model_1, considered_model_2)
rownames(A_lipids_matrix) <- A_lipids_matrix[,1]
A_lipids_matrix <- A_lipids_matrix[,-1]
assign(paste0 ("A_Lipids_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2), A_lipids_matrix)
write.csv(file= paste0 ("A_Lipids_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2,".csv"), A_lipids_matrix)
# Computes the Jaccard distances
require(jaccard)
if(considered_mode =="\\+ve"){
classes <- rownames(A_lipids_classes_counts_tot)[-grep(rownames(A_lipids_classes_counts_tot), pattern="DG|TGX|MGX")]
classes <- c(classes, "Glyc")
Jaccard_distances <- matrix(ncol=2, nrow = length(classes))
colnames(Jaccard_distances) <- c("Distance", "Pvalue")
rownames(Jaccard_distances) <- classes
} else {
classes <- rownames(A_lipids_classes_counts_tot)
Jaccard_distances <- matrix(ncol=2, nrow = length(classes))
colnames(Jaccard_distances) <- c("Distance", "Pvalue")
rownames(Jaccard_distances) <- classes
}
Global_jaccard_matrix <- A_lipids_matrix
Global_jaccard_matrix[which(Global_jaccard_matrix!=0)] = 1
for(i in 1:length(classes)){
zz<- as.matrix(Global_jaccard_matrix[grep(rownames(Global_jaccard_matrix), pattern=classes[i]),])
if(length(grep(rownames(Global_jaccard_matrix), pattern=classes[i]))==1 ){
uu<- jaccard(as.numeric(zz[1]), as.numeric(zz[2]))
vv <-jaccard.test(as.numeric(zz[1]), as.numeric(zz[2]), method = "exact")
Jaccard_distances[i,c(1,2)] <- c(uu,vv$pvalue)
}
else if (length(grep(rownames(Global_jaccard_matrix), pattern=classes[i]))==0) {
Jaccard_distances[i,c(1,2)] <- c("NA","NA")
}
else{
uu<- jaccard(as.numeric(zz[,1]), as.numeric(zz[,2]))
vv <-jaccard.test(as.numeric(zz[,1]), as.numeric(zz[,2]), method = "exact")
Jaccard_distances[i,c(1,2)] <- c(uu,vv$pvalue)
}
#print(zz)
}
assign(paste0 ("Jaccard_distances_A_Lipids_", "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2), Jaccard_distances)
write.csv(file=paste0 ("Jaccard_distances_A_Lipids_", "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2,".csv"), Jaccard_distances)
#### U LIPIDS ###############################################################################################################
# Finds the U-lipids for each tissue
# ** Considered_model_1 --------------------------------------------------------------------------------------
# Creates a list (called all_tissues_1) in which each element is
# a list of lipids coming from the row names of
# each Tissue-specific and model-specific Matrix
all_tissues_1 <-list()
for(k in 1:length(tissues)){
tissue <- tissues[k]
aa <- files_names_originals[grep(files_names_originals, pattern=considered_mode)]
aa <- aa[grep(aa, pattern=considered_generation)]
aa <- aa[grep(aa, pattern=tissue)]
if(length(aa)!=0){
yy <- rownames(get(paste0(tissue, "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_nozeroes") ) )
assign(paste0("xx_",k), yy)
all_tissues_1[[k]] <- yy
}
}
names(all_tissues_1) <- tissues
if(length(which(all_tissues_1=="NULL")) !=0){
all_tissues<-all_tissues_1[-which(all_tissues_1=="NULL")]
all_tissues_1<-all_tissues_1[-which(all_tissues_1=="NULL")]
}
# ** Considered_model_2 --------------------------------------------------------------------------------------
# Creates a list (called all_tissues_2) in which each element is
# a list of lipids coming from the row names of
# each Tissue-specific and model-specific Matrix
all_tissues_2 <-list()
for(k in 1:length(tissues)){
tissue <- tissues[k]
aa <- files_names_originals[grep(files_names_originals, pattern=considered_mode)]
aa <- aa[grep(aa, pattern=considered_generation)]
aa <- aa[grep(aa, pattern=tissue)]
if(length(aa)!=0){
yy <- rownames(get(paste0(tissue, "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_2, "_nozeroes") ) )
assign(paste0("xx_",k), yy)
all_tissues_2[[k]] <- yy
}
}
names(all_tissues_2) <- tissues
if(length(which(all_tissues_2=="NULL")) !=0){
all_tissues<-all_tissues_2[-which(all_tissues_2=="NULL")]
all_tissues_2<-all_tissues_2[-which(all_tissues_2=="NULL")]
}
# ** Tissue-specific U-lipids --------------------------------------------------------------------------------------
# **** Produces the Tissue-specific U-lipids matrices --------------------------------------------------------------------------------------
for(o in 1:length(names(all_tissues_1))){
Tissue <- names(all_tissues)[o]
considered_tissue_1 <- all_tissues_1[which(names(all_tissues_1)==Tissue)]
other_tissues_1 <- all_tissues_1[-which(names(all_tissues_1)==Tissue)]
ss_1 <- Reduce(union, other_tissues_1) # Lists the lipids present in all the tissues but the considered one
tt_1 <- considered_tissue_1[[1]] # Lists the lipids present in the considered tissue
uu_1<- setdiff(tt_1,ss_1) # Lists the lipids that are in the considered tissue but not in all the others
considered_tissue_2 <- all_tissues_2[which(names(all_tissues_2)==Tissue)]
other_tissues_2 <- all_tissues_2[-which(names(all_tissues_2)==Tissue)]
ss_2 <- Reduce(union, other_tissues_2)
tt_2 <- considered_tissue_2[[1]]
uu_2<- setdiff(tt_2,ss_2)
vv <- union(uu_1, uu_2)
vv<- sort(vv)
#
U_lipids_matrix<- cbind(vv, rep(0, length(vv)), rep(0, length(vv)) )
U_lipids_matrix[,2][which(U_lipids_matrix[,1] %in% uu_1)] <- U_lipids_matrix[,1][which(U_lipids_matrix[,1] %in% uu_1)]
U_lipids_matrix[,3][which(U_lipids_matrix[,1] %in% uu_2)] <- U_lipids_matrix[,1][which(U_lipids_matrix[,1] %in% uu_2)]
#
colnames(U_lipids_matrix) <- c("rownames",considered_model_1, considered_model_2)
rownames(U_lipids_matrix) <- U_lipids_matrix[,1]
U_lipids_matrix <- U_lipids_matrix[,-1]
assign(paste0 ("U_Lipids_", "all_", Tissue, "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2), U_lipids_matrix)
write.csv(file= paste0 ("U_Lipids_", "all_", Tissue, "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2,".csv"), U_lipids_matrix)
# **** Counts the U-lipids in each class --------------------------------------------------------------------------------------
classes_1 <- unique(substr(uu_1, start=1, stop =5))
classes_counts_model_1 <- matrix(ncol=1, nrow=length(classes_1))
rownames(classes_counts_model_1) <- classes_1
colnames(classes_counts_model_1) <- paste0("U_lipids_",considered_model_1)
if(length(uu_1) !=0){
for(i in 1:length(classes_1)){
classes_counts_model_1[i] <- length(grep(uu_1, pattern=classes_1[i]))
}
}
classes_2 <- unique(substr(uu_2, start=1, stop =5))
classes_counts_model_2 <- matrix(ncol=1, nrow=length(classes_2))
rownames(classes_counts_model_2) <- classes_2
colnames(classes_counts_model_2) <- paste0("U_lipids_",considered_model_2)
if(length(uu_2) !=0){
for(i in 1:length(classes_2)){
classes_counts_model_2[i] <- length(grep(uu_2, pattern=classes_2[i]))
}
}
# Creates the U_lipids_classes_counts_tot dataframe, where the vectors classes_counts_model_1 and classes_counts_model_2 are merged
classes_counts_model_1 <- as.data.frame(classes_counts_model_1)
classes_counts_model_2 <- as.data.frame(classes_counts_model_2)
U_lipids_classes_counts_tot <- merge(classes_counts_model_1, classes_counts_model_2, by.x="row.names", by.y="row.names", all=T)
U_lipids_classes_counts_tot[is.na(U_lipids_classes_counts_tot)]<-0
rownames(U_lipids_classes_counts_tot) <- U_lipids_classes_counts_tot[,1]
U_lipids_classes_counts_tot <- U_lipids_classes_counts_tot[,-1]
#
All_glicerids <- U_lipids_classes_counts_tot[grep(rownames(U_lipids_classes_counts_tot), pattern="DGX|MGX|TGX"),]
U_lipids_classes_counts_tot[nrow(U_lipids_classes_counts_tot)+1,] <- colSums(All_glicerids)
rownames(U_lipids_classes_counts_tot)[nrow(U_lipids_classes_counts_tot)] <- "Glyc"
assign(paste0("U_lipids_classes_counts_", Tissue, "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2), U_lipids_classes_counts_tot)
write.csv(U_lipids_classes_counts_tot, file=paste0("U_lipids_classes_counts_", Tissue, "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2,".csv"))
# Computes the Jaccard distances between models --------------------------------------------------------------------------------------------------
# Unifies the classes MG, DG and TG under the class Glyc, by changing the row names of the A-lipids
uu_1_substituted <- gsub("MGXX|DGXX|TGXX", "Glyc", uu_1)
uu_2_substituted <- gsub("MGXX|DGXX|TGXX", "Glyc", uu_2)
# Merges the vectors containing the A lipids "substituted" for each model into the A_lipids_matrix
ss <- union(uu_1_substituted, uu_2_substituted)
ss<- sort(ss)
#
U_lipids_matrix<- cbind(ss, rep(0, length(ss)), rep(0, length(ss)) )
U_lipids_matrix[,2][which(U_lipids_matrix[,1] %in% uu_1_substituted)] <- U_lipids_matrix[,1][which(U_lipids_matrix[,1] %in% uu_1_substituted)]
U_lipids_matrix[,3][which(U_lipids_matrix[,1] %in% uu_2_substituted)] <- U_lipids_matrix[,1][which(U_lipids_matrix[,1] %in% uu_2_substituted)]
#
colnames(U_lipids_matrix) <- c("rownames",considered_model_1, considered_model_2)
rownames(U_lipids_matrix) <- U_lipids_matrix[,1]
U_lipids_matrix <- U_lipids_matrix[,-1]
assign(paste0 ("U_Lipids_", Tissue, "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2), U_lipids_matrix)
write.csv(file= paste0 ("U_Lipids_", Tissue, "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2,".csv"), U_lipids_matrix)
if(length(uu_2)!=0&length(uu_1)!=0){
# Computes the Jaccard distances
require(jaccard)
classes <- rownames(U_lipids_classes_counts_tot)
Jaccard_distances <- matrix(ncol=2, nrow = length(classes))
colnames(Jaccard_distances) <- c("Distance", "Pvalue")
rownames(Jaccard_distances) <- classes
Global_jaccard_matrix <- U_lipids_matrix
Global_jaccard_matrix[which(Global_jaccard_matrix!=0)] = 1
for(i in 1:length(classes)){
zz<- as.matrix(Global_jaccard_matrix[grep(rownames(Global_jaccard_matrix), pattern=classes[i]),])
if(length(grep(rownames(Global_jaccard_matrix), pattern=classes[i]))==1 ){
uu<- jaccard(as.numeric(zz[1]), as.numeric(zz[2]))
vv <-jaccard.test(as.numeric(zz[1]), as.numeric(zz[2]), method = "exact")
Jaccard_distances[i,c(1,2)] <- c(uu,vv$pvalue)
}
else if (length(grep(rownames(Global_jaccard_matrix), pattern=classes[i]))==0) {
Jaccard_distances[i,c(1,2)] <- c("NA","NA")
}
else{
uu<- jaccard(as.numeric(zz[,1]), as.numeric(zz[,2]))
vv <-jaccard.test(as.numeric(zz[,1]), as.numeric(zz[,2]), method = "exact")
Jaccard_distances[i,c(1,2)] <- c(uu,vv$pvalue)
}
#print(zz)
}
assign(paste0 ("Jaccard_distances_U_Lipids_", Tissue, "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2), Jaccard_distances)
write.csv(file=paste0 ("Jaccard_distances_U_Lipids_", Tissue, "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2,".csv"), Jaccard_distances)
}
}
#### B LIPIDS ###############################################################################################################
# ** Considered_model_1 --------------------------------------------------------------------------------------
# Creates a list (called all_tissues_1) in which each element is
# a list of lipids coming from the row names of
# each Tissue-specific and model-specific Matrix
all_tissues_1 <-list()
for(k in 1:length(tissues)){
tissue <- tissues[k]
aa <- files_names_originals[grep(files_names_originals, pattern=considered_mode)]
aa <- aa[grep(aa, pattern=considered_generation)]
aa <- aa[grep(aa, pattern=tissue)]
if(length(aa)!=0){
yy <- rownames(get(paste0(tissue, "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_nozeroes") ) )
assign(paste0("xx_",k), yy)
all_tissues_1[[k]] <- yy
}
}
names(all_tissues_1) <- tissues
if(length(which(all_tissues_1=="NULL")) !=0){
all_tissues<-all_tissues_1[-which(all_tissues_1=="NULL")]
}
# ** Considered_model_2 --------------------------------------------------------------------------------------
# Creates a list (called all_tissues_2) in which each element is
# a list of lipids coming from the row names of
# each Tissue-specific and model-specific Matrix
all_tissues_2 <-list()
for(k in 1:length(tissues)){
tissue <- tissues[k]
aa <- files_names_originals[grep(files_names_originals, pattern=considered_mode)]
aa <- aa[grep(aa, pattern=considered_generation)]
aa <- aa[grep(aa, pattern=tissue)]
if(length(aa)!=0){
yy <- rownames(get(paste0(tissue, "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_2, "_nozeroes") ) )
assign(paste0("xx_",k), yy)
all_tissues_2[[k]] <- yy
}
}
names(all_tissues_2) <- tissues
if(length(which(all_tissues_2=="NULL")) !=0){
all_tissues<-all_tissues_2[-which(all_tissues_2=="NULL")]
}
#Creates the IDs of the pairwise comparisons
possible_pairs <- combn(names(all_tissues_1), 2)
#Compares the two models for each pair of tissues
for(d in 1:ncol(possible_pairs)){
tissues_to_compare <- possible_pairs[,d]
B_lipids_1 <- (intersect( all_tissues_1[[tissues_to_compare[1]]] , all_tissues_1[[tissues_to_compare[2]]] ))
B_lipids_2 <- (intersect( all_tissues_2[[tissues_to_compare[1]]] , all_tissues_2[[tissues_to_compare[2]]] ))
B_lipids_tot <- union(B_lipids_1, B_lipids_2)
B_lipids_matrix<- cbind(B_lipids_tot, rep(0, length(B_lipids_tot)), rep(0, length(B_lipids_tot)) )
B_lipids_matrix[,2][which(B_lipids_matrix[,1] %in% B_lipids_1)] <- B_lipids_matrix[,1][which(B_lipids_matrix[,1] %in% B_lipids_1)]
B_lipids_matrix[,3][which(B_lipids_matrix[,1] %in% B_lipids_2)] <- B_lipids_matrix[,1][which(B_lipids_matrix[,1] %in% B_lipids_2)]
#
colnames(B_lipids_matrix) <- c("rownames",considered_model_1, considered_model_2)
rownames(B_lipids_matrix) <- B_lipids_matrix[,1]
B_lipids_matrix <- B_lipids_matrix[,-1]
assign(paste0 ("B_Lipids_", "all_", tissues_to_compare[1], "_", tissues_to_compare[2], "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2), B_lipids_matrix)
write.csv(file= paste0 ("B_Lipids_", "all_", tissues_to_compare[1], "_", tissues_to_compare[2], "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2,".csv"), B_lipids_matrix)
# Counts the B-lipids in each classes --------------------------------------------------------------------------------------
# Considered_model_1
classes <- unique(substr(B_lipids_1, start=1, stop =5))
classes_counts_model_1 <- matrix(ncol=1, nrow=length(classes))
rownames(classes_counts_model_1) <- classes
colnames(classes_counts_model_1) <- paste0("B_lipids_",considered_model_1)
for(i in 1:length(classes)){
classes_counts_model_1[i] <- length(grep(B_lipids_1, pattern=paste0("^",classes[i])))
}
# Considered_model_2
classes <- unique(substr(B_lipids_2, start=1, stop =5))
classes_counts_model_2 <- matrix(ncol=1, nrow=length(classes))
rownames(classes_counts_model_2) <- classes
colnames(classes_counts_model_2) <- paste0("B_lipids_",considered_model_2)
for(i in 1:length(classes)){
classes_counts_model_2[i] <- length(grep(B_lipids_2, pattern=paste0("^",classes[i])))
}
#Creates the B_lipids_classes_counts_tot dataframe, where the vectors classes_counts_model_1 and classes_counts_model_2 are merged
classes_counts_model_1 <- as.data.frame(classes_counts_model_1)
classes_counts_model_2 <- as.data.frame(classes_counts_model_2)
B_lipids_classes_counts_tot <- merge(classes_counts_model_1, classes_counts_model_2, by.x="row.names", by.y="row.names", all=T)
B_lipids_classes_counts_tot[is.na(B_lipids_classes_counts_tot)]<-0
rownames(B_lipids_classes_counts_tot) <- B_lipids_classes_counts_tot[,1]
B_lipids_classes_counts_tot <- B_lipids_classes_counts_tot[,-1]
#
All_glicerids <- B_lipids_classes_counts_tot[grep(rownames(B_lipids_classes_counts_tot), pattern="DGX|MGX|TGX"),]
B_lipids_classes_counts_tot[nrow(B_lipids_classes_counts_tot)+1,] <- colSums(All_glicerids)
rownames(B_lipids_classes_counts_tot)[nrow(B_lipids_classes_counts_tot)] <- "Glyc"
assign(paste0("B_lipids_classes_counts_", tissues_to_compare[1], "_", tissues_to_compare[2], "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2), B_lipids_classes_counts_tot)
write.csv(B_lipids_classes_counts_tot, file=paste0("B_lipids_classes_counts_", tissues_to_compare[1], "_", tissues_to_compare[2], "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2,".csv"))
# Computes the Jaccard Distances --------------------------------------------------------------------------------------
# Unifies the classes MG, DG and TG under the class Glyc, by changing the row names of the B-lipids
B_lipids_1_substituted <- gsub("MGXX|DGXX|TGXX", "Glyc", B_lipids_1)
B_lipids_2_substituted <- gsub("MGXX|DGXX|TGXX", "Glyc", B_lipids_2)
# Merges the vectors containing the A lipids "substituted" for each model into the B_lipids_matrix
vv <- union(B_lipids_1_substituted, B_lipids_2_substituted)
vv<- sort(vv)
#
B_lipids_matrix<- cbind(vv, rep(0, length(vv)), rep(0, length(vv)) )
B_lipids_matrix[,2][which(B_lipids_matrix[,1] %in% B_lipids_1_substituted)] <- B_lipids_matrix[,1][which(B_lipids_matrix[,1] %in% B_lipids_1_substituted)]
B_lipids_matrix[,3][which(B_lipids_matrix[,1] %in% B_lipids_2_substituted)] <- B_lipids_matrix[,1][which(B_lipids_matrix[,1] %in% B_lipids_2_substituted)]
#
colnames(B_lipids_matrix) <- c("rownames",considered_model_1, considered_model_2)
rownames(B_lipids_matrix) <- B_lipids_matrix[,1]
B_lipids_matrix <- B_lipids_matrix[,-1]
assign(paste0 ("B_Lipids_", tissues_to_compare[1], "_", tissues_to_compare[2], "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2), B_lipids_matrix)
write.csv(file= paste0 ("B_Lipids_", tissues_to_compare[1], "_", tissues_to_compare[2], "_",considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2,".csv"), B_lipids_matrix)
# Computes the Jaccard distances
require(jaccard)
classes <- rownames(B_lipids_classes_counts_tot)
Jaccard_distances <- matrix(ncol=2, nrow = length(classes))
colnames(Jaccard_distances) <- c("Distance", "Pvalue")
rownames(Jaccard_distances) <- classes
Global_jaccard_matrix <- B_lipids_matrix
Global_jaccard_matrix[which(Global_jaccard_matrix!=0)] = 1
for(i in 1:length(classes)){
zz<- as.matrix(Global_jaccard_matrix[grep(rownames(Global_jaccard_matrix), pattern=classes[i]),])
if(length(grep(rownames(Global_jaccard_matrix), pattern=classes[i]))==1 ){
uu<- jaccard(as.numeric(zz[1]), as.numeric(zz[2]))
vv <-jaccard.test(as.numeric(zz[1]), as.numeric(zz[2]), method = "exact")
Jaccard_distances[i,c(1,2)] <- c(uu,vv$pvalue)
}
else if (length(grep(rownames(Global_jaccard_matrix), pattern=classes[i]))==0) {
Jaccard_distances[i,c(1,2)] <- c("NA","NA")
}
else{
uu<- jaccard(as.numeric(zz[,1]), as.numeric(zz[,2]))
vv <-jaccard.test(as.numeric(zz[,1]), as.numeric(zz[,2]), method = "exact")
Jaccard_distances[i,c(1,2)] <- c(uu,vv$pvalue)
}
#print(zz)
}
assign(paste0 ("Jaccard_distances_B_Lipids_", tissues_to_compare[1], "_", tissues_to_compare[2], "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2), Jaccard_distances)
write.csv(file=paste0 ("Jaccard_distances_B_Lipids_", tissues_to_compare[1], "_", tissues_to_compare[2], "_", considered_generation, "_", substr(considered_mode, 2,4), "_", considered_model_1, "_", considered_model_2,".csv"), Jaccard_distances)
}
options(warn=0)
|
1abcbd5acbccbcf403e1afc7ba68d7fa8f00aa0a
|
aee8226b9bc42b68a547b7748e3e59acc698ce9a
|
/heatmap plot_world.R
|
ef0f5b65404c081e1c6f6071a352978aa88e2b21
|
[
"CC0-1.0"
] |
permissive
|
LaiShengjie/Holiday
|
3f1d4c218a33ae501deea31216be3c5f21a82508
|
b5b5b2595c9159736abdf155d4e6b9cc7bd73ae0
|
refs/heads/main
| 2023-08-14T01:06:43.868363
| 2021-09-19T13:53:33
| 2021-09-19T13:53:33
| 316,921,040
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,045
|
r
|
heatmap plot_world.R
|
### Heatmap ####
# Image scale
image.scale <- function(z, zlim, col = col,
breaks, horiz=TRUE, ylim=NULL, xlim=NULL, ...){
if(!missing(breaks)){
if(length(breaks) != (length(col)+1)){stop("must have one more break than colour")}
}
if(missing(breaks) & !missing(zlim)){
breaks <- seq(zlim[1], zlim[2], length.out=(length(col)+1))
}
if(missing(breaks) & missing(zlim)){
zlim <- range(z, na.rm=TRUE)
zlim[2] <- zlim[2]+c(zlim[2]-zlim[1])*(1E-3)#adds a bit to the range in both directions
zlim[1] <- zlim[1]-c(zlim[2]-zlim[1])*(1E-3)
breaks <- seq(zlim[1], zlim[2], length.out=(length(col)+1))
}
poly <- vector(mode="list", length(col))
for(i in seq(poly)){
poly[[i]] <- c(breaks[i], breaks[i+1], breaks[i+1], breaks[i])
}
xaxt <- ifelse(horiz, "s", "n")
yaxt <- ifelse(horiz, "n", "s")
if(horiz){YLIM<-c(0,1); XLIM<-range(breaks)}
if(!horiz){YLIM<-range(breaks); XLIM<-c(0,1)}
if(missing(xlim)) xlim=XLIM
if(missing(ylim)) ylim=YLIM
plot(1,1,t="n",ylim=ylim, xlim=xlim, xaxt=xaxt, yaxt=yaxt, xaxs="i", yaxs="i", ...)
for(i in seq(poly)){
if(horiz){
polygon(poly[[i]], c(0,0,1,1), col=col[i], border=NA)
}
if(!horiz){
polygon(c(0,0,1,1), poly[[i]], col=col[i], border=NA)
}
}
}
# heatmap panel function
panel <- function(mat1, region, col, wk, panel.name){
image(x=seq(1,nrow(mat1)), z=mat1, col=col, yaxt="n",xaxt="n",xlab="")
box()
x= (nrow(mat1)+1)/365
abline(v=c(x*92, x*183, x*275), lty =2,)
axis(1, at=c(1:nrow(mat1)), labels= as.factor(sort(wk)))
region$order <- seq(0,1,,dim(region)[1])
## add North and South
region$y <- round(region$y,5)
max.y <- max(region$y)
min.y <- min(region$y)
region$order[! region$y %in% c(max.y, min.y)] <- NA
region$adm[region$y == max.y] <- 'N'
region$adm[region$y == min.y] <- 'S'
axis(2, at=region$order, labels=region$adm, las=1, cex.axis=1.3)
title(xlab='Month and week', cex.lab=1.2)
mtext(paste(panel.name),side=3,line=0.1, adj=0, cex=1.3)
}
panel.world <- function(mat1, region, col, wk, panel.name){
image(x=seq(1,nrow(mat1)), z=mat1, col=col, yaxt="n",xaxt="n",xlab="")
box()
x= (nrow(mat1)+1)/365
abline(v=c(x*91, x*182, x*274), lty =2,)
v=c((1-3*x), x*32, x*60, x*91, x*121, x*152, x*182, x*213, x*244, x*274, x*305, x*335, (53+3*x))
axis(1, at=v, labels= c('J', 'F', 'M', 'A', 'M', 'J', 'J', 'A', 'S', 'O', 'N', 'D', 'J'))
region$order <- seq(0,1,,dim(region)[1])
## add North and South
region$y <- round(region$y,5)
max.y <- max(region$y)
min.y <- min(region$y)
region$order[! region$y %in% c(max.y, min.y)] <- NA
region$adm[region$y == max.y] <- 'N'
region$adm[region$y == min.y] <- 'S'
axis(2, at=region$order, labels=region$adm, las=1, cex.axis=1.3)
title(xlab='Month and week', cex.lab=1.2)
mtext(paste(panel.name),side=3,line=0.1, adj=0, cex=1.3)
}
# heatmap panel function - selected countries
panel.iso <- function(mat1, region, col, wk, panel.name){
image(x=seq(1,nrow(mat1)), z=mat1, col=col, yaxt="n",xaxt="n",xlab="")
box()
x= (nrow(mat1)+1)/365
abline(v=c(x*91, x*182, x*274), lty =2,)
v=c((1-3*x), x*32, x*60, x*91, x*121, x*152, x*182, x*213, x*244, x*274, x*305, x*335, (53+3*x))
axis(1, at=v, labels= c('J', 'F', 'M', 'A', 'M', 'J', 'J', 'A', 'S', 'O', 'N', 'D', 'J'))
region$order <- seq(0,1,,dim(region)[1])
# ## add North and South
# region$y <- round(region$y,5)
# max.y <- max(region$y)
# min.y <- min(region$y)
# region$order[! region$y %in% c(max.y, min.y)] <- NA
# region$adm[region$y == max.y] <- 'N'
# region$adm[region$y == min.y] <- 'S'
axis(2, at=region$order, labels=region$adm, las=1, cex.axis=1.3)
title(xlab='Month and week', cex.lab=1.2)
mtext(paste(panel.name),side=3,line=0.1, adj=0, cex=1.3)
}
# heatmap panel function for monthly time series
panel.month <- function(mat1, region, col, panel.name){
image(x=seq(1,nrow(mat1)), z=mat1, col=col, yaxt="n",xaxt="n",xlab="")
box()
v=seq(0.5, 108.5, 12)
abline(v=v, lty =2,)
axis(1, at=v, labels= as.factor(2010:2019))
region$order <- seq(0,1,,dim(region)[1])
## add North and South
region$y <- round(region$y,5)
max.y <- max(region$y)
min.y <- min(region$y)
region$order[! region$y %in% c(max.y, min.y)] <- NA
region$adm[region$y == max.y] <- 'N'
region$adm[region$y == min.y] <- 'S'
axis(2, at=region$order, labels=region$adm, las=1, cex.axis=1.3)
title(xlab='Year and month', cex.lab=1.2)
mtext(paste(panel.name),side=3,line=0.1, adj=0, cex=1.3)
}
# heatmap panel function for monthly air travel time series
panel.month.air <- function(mat1, region, col, panel.name){
image(x=seq(1,nrow(mat1)), z=mat1, col=col, yaxt="n",xaxt="n",xlab="")
box()
v=seq(0.5, 96.5, 12)
abline(v=v, lty =2,)
axis(1, at=v, labels= as.factor(2010:2018))
region$order <- seq(0,1,,dim(region)[1])
## add North and South
region$y <- round(region$y,5)
max.y <- max(region$y)
min.y <- min(region$y)
region$order[! region$y %in% c(max.y, min.y)] <- NA
region$adm[region$y == max.y] <- 'N'
region$adm[region$y == min.y] <- 'S'
axis(2, at=region$order, labels=region$adm, las=1, cex.axis=1.3)
title(xlab='Year and month', cex.lab=1.2)
mtext(paste(panel.name),side=3,line=0.3, adj=0, cex=1.3)
}
# heatmap panel function for air travel - seasonality
panel.month.air.season <- function(mat1, region, col, panel.name){
image(x=seq(1,nrow(mat1)), z=mat1, col=col, yaxt="n",xaxt="n",xlab="")
box()
axis(1, at=c(0.5, 3.5, 6.5, 9.5, 12.5), labels= c('J', 'A', 'J', 'O', 'J'))
region$order <- seq(0,1,,dim(region)[1])
## add North and South
region$y <- round(region$y,5)
max.y <- max(region$y)
min.y <- min(region$y)
region$order[! region$y %in% c(max.y, min.y)] <- NA
region$adm[region$y == max.y] <- 'N'
region$adm[region$y == min.y] <- 'S'
axis(2, at=region$order, labels=region$adm, las=1, cex.axis=1.3)
title(xlab='Month', cex.lab=1.2)
mtext(paste(panel.name),side=3,line=0.3, adj=0, cex=1.3)
}
# heatmap panel function for monthly air travel time series
panel.month.air.oag <- function(mat1, region, col, panel.name){
image(x=seq(1,nrow(mat1)), z=mat1, col=col, yaxt="n",xaxt="n",xlab="")
box()
v=seq(0.5, 48.5, 12)
abline(v=v, lty =2,)
axis(1, at=v, labels= as.factor(2015:2019))
region$order <- seq(0,1,,dim(region)[1])
## add North and South
region$y <- round(region$y,5)
max.y <- max(region$y)
min.y <- min(region$y)
region$order[! region$y %in% c(max.y, min.y)] <- NA
region$adm[region$y == max.y] <- 'N'
region$adm[region$y == min.y] <- 'S'
axis(2, at=region$order, labels=region$adm, las=1, cex.axis=1.3)
title(xlab='Year and month', cex.lab=1.2)
mtext(paste(panel.name),side=3,line=0.3, adj=0, cex=1.3)
}
# heatmap panel function for holiday vs air travel
panel.month.air.hol <- function(mat1, col, panel.name){
image(x=seq(1,nrow(mat1)), z=mat1, col=col, yaxt="n",xaxt="n",xlab="")
box()
axis(1, at=c(1:nrow(mat1)), labels= rownames(mat1))
axis(2, at=seq(0,1, 1/(ncol(mat1)-1)), labels= colnames(mat1))
title(xlab='Holiday (days)', ylab = 'Air travel (rank)', cex.lab=1.2)
mtext(panel.name,side=3,line=0.3, adj=0, cex=1.3)
}
#### heatmap panel legends
panel.legend.wk1 <- function(mat1, col, breaks){
image.scale(mat1, col=col, breaks=breaks, horiz=FALSE, yaxt="n", xaxt="n", xlab="", ylab="")
axis(4,at = c(min(breaks)+0.5, max(breaks)-0.5),
labels = c('0 days', paste(max(breaks)-1, ' days', sep = '')), cex.axis=1.1, las=2)
# axis(4,at = c(1,52),labels = c('High', 'Low'), cex.axis=1.1, las=2) # for ranking plot
# mtext("Weekly outflow", side=4, line=2.3, cex=0.9)
box()
}
panel.legend.wk2 <- function(mat1, col, breaks){
image.scale(mat1, col=col, breaks=breaks, horiz=FALSE, yaxt="n", xaxt="n", xlab="", ylab="")
axis(4,at = c(min(breaks)+0.5, max(breaks)-0.5), labels = c('NO', 'Yes'), cex.axis=1.1, las=2)
# axis(4,at = c(1,52),labels = c('High', 'Low'), cex.axis=1.1, las=2) # for ranking plot
# mtext("Weekly outflow", side=4, line=2.3, cex=0.9)
box()
}
panel.legend.wk1.air <- function(mat1, col, breaks){
image.scale(mat1, col=col, breaks=breaks, horiz=FALSE, yaxt="n", xaxt="n", xlab="", ylab="")
axis(4,at = c(min(breaks)+0.5, max(breaks)-0.5), labels = c('1', '12'), cex.axis=1.1, las=2)
# axis(4,at = c(1,52),labels = c('High', 'Low'), cex.axis=1.1, las=2) # for ranking plot
# mtext("Weekly outflow", side=4, line=2.3, cex=0.9)
box()
}
panel.legend.air.hol <- function(mat1, col, breaks){
image.scale(mat1, col=col, breaks=breaks, horiz=FALSE, yaxt="n", xaxt="n", xlab="", ylab="")
axis(4,at = c(min(breaks)+0.5, max(breaks)-0.5),
labels = c('0', '0.4'),
cex.axis=1.1, las=2)
box()
}
|
4cc403a5472d857d29033e327c1d575612c76d03
|
9c20dfe75c41283db15d80bef6ff60fbf4cb5255
|
/Kaplan meier plots in R.R
|
ef2c096bacd79fa021b68bbff680d121ca3114dc
|
[] |
no_license
|
kristyrobledo/CodingConundrums
|
c9eadaa313afa5e04e47ecf5e016936b9bee328b
|
7713b19a28774c75b9ae0e48b8ff7d648a40d278
|
refs/heads/master
| 2023-08-04T11:30:56.604221
| 2023-07-27T03:02:00
| 2023-07-27T03:02:00
| 237,126,082
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,142
|
r
|
Kaplan meier plots in R.R
|
#read in packages
if(!require(tidyverse)){
install.packages("tidyverse")
library(tidyverse)
}
if(!require(survival)){
install.packages("survival")
library(survival)
}
if(!require(survminer)){
install.packages("survminer")
library(survminer)
}
if(!require(glue)){
install.packages("glue")
library(glue)
}
## kaplan meier plot
fit<-survfit(Surv(time, status) ~sex , data=lung)
fit
km<-ggsurvplot(fit, data=lung,
xscale="d_y", ##change to years
break.time.by=365.25/2, ## chenge the xaxis ticks
xlim=c(0, 2.5*365.25), ##change the x axis limits
xlab="Years since registration", ylab="Proportion alive",
risk.table = TRUE,
palette = c("#ff0000", "#00008B"),
legend.labs=c("Females", "Males"),
legend="none",
legend.title=" ",
conf.int = TRUE,
risk.table.col="strata",
risk.table.height=0.2,
tables.theme = theme_cleantable())
## logrank test
survdiff(Surv(time, status) ~sex , data=lung)
## cox regression
ph<-summary(coxph(Surv(time,status)~sex, data=lung))
HR<-round(ph$coefficients[2], digits=2)
HRl<-round(ph$conf.int[3], digits=2)
HRu<-round(ph$conf.int[4], digits=2)
p<-signif(ph$waldtest[3], digits=1)
stats<-glue('HR = {HR} (95% CI: {HRl}-{HRu}), p-value={p}')
##stick the cox regression data onto our curve
km$plot<-km$plot +
annotate("text",
x=1.5*365.25, y=0.8,
hjust=0,
label=stats)
ggsave(file="Survival.pdf", print(km))
library(patchwork)
(km$plot+km$plot)/(km$plot+km$plot)
##if you want to keep your risk tables, use arrange_ggsurvplots
## median survival time and other quantiles of survival
fit<-survfit(Surv(time, status) ~sex , data=lung)
fit
quantile(survfit(Surv(time, status) ~sex , data=lung))
## survival probability at time t
summary(survfit(Surv(time, status) ~sex , data=lung),
times=c(.5*365.25, 365.25, 1.5*365.25))
## median followup time
table(lung$status)
lung %<>%
mutate(followup = ifelse(status==2, 0, 1))
table(lung$followup)
quantile(survfit(Surv(time, followup) ~ 1 , data=lung))
|
4b7964ef4b19bdc083330e9a8969133563ed1af2
|
69ebb8938df16e190c6758f49ee1575cee79ec76
|
/pratical machine leanring/quiz4.R
|
75bb02fb19e8807136cdc01708b7e8d0760d5dec
|
[] |
no_license
|
bikash/Coursera-Practical-Machine-Learning
|
360ba206e8785ab10a41300f116225ca003b18f8
|
cc2bbe3a980ad706ad2fb097c6f4662a7efc62bd
|
refs/heads/master
| 2020-05-18T20:55:38.613912
| 2016-01-05T00:13:37
| 2016-01-05T00:13:37
| 33,836,906
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,307
|
r
|
quiz4.R
|
## Quiz 4
# Question 1
# Load the vowel.train and vowel.test data sets:
# library(ElemStatLearn)
# data(vowel.train)
# data(vowel.test)
# Set the variable y to be a factor variable in both the training and test set. Then set the seed to 33833.
# Fit (1) a random forest predictor relating the factor variable y to the remaining variables and (2) a boosted predictor using the "gbm" method.
# Fit these both with the train() command in the caret package.
#
# What are the accuracies for the two approaches on the test data set? What is the accuracy among the test set samples where the two methods agree?
library(ElemStatLearn)
library(caret)
data(vowel.train)
data(vowel.test)
vowel.train$y <- as.factor(vowel.train$y)
vowel.test$y <- as.factor(vowel.test$y)
set.seed(33833)
fitControl <- trainControl(method="repeatedcv",
number=5,
repeats=1,
verboseIter=TRUE)
## random Forest
fitRf <- train(y ~ ., data=vowel.train, method="rf")
## Gradient boosting
fitGBM <- train(y ~ x.1, data=vowel.train, method="gbm", trControl = fitControl)
predRf <- predict(fitRf, vowel.test)
predGBM <- predict(fitGBM, vowel.test)
# RF Accuracy: 0.6038961
confusionMatrix(predRf, vowel.test$y)$overall[1]
# GBM Accuracy: 0.530303
confusionMatrix(predGBM, vowel.test$y)$overall[1]
pred <- data.frame(predRf, predGBM, y=vowel.test$y, agree=predRf == predGBM)
head(pred)
accuracy <- sum(predRf[pred$agree] == pred$y[pred$agree]) / sum(pred$agree)
accuracy # Agreement Accuracy: 0.6569579
### Question 2
library(caret)
library(gbm)
set.seed(3433)
library(AppliedPredictiveModeling)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
set.seed(62433)
## random Forest
fitRf <- train(diagnosis ~ ., data=training, method="rf")
## gradient boosting machine
fitgbm <- train(diagnosis ~ ., data=training, method="gbm")
## linear discrimental analysis
fitlda <- train(diagnosis ~ ., data=training, method="lda")
## Prediction
predRf <- predict(fitRf, testing)
predGBM <- predict(fitgbm, testing)
predLDA <- predict(fitlda, testing)
## combine
df_combined <- data.frame(predRf, predGBM, predLDA, diagnosis = testing$diagnosis) # training$diagnosis?
fit_combined <- train(diagnosis ~ ., data = df_combined, method = "rf")
predict_final <- predict(fit_combined, newdata = testing)
# confusion matrixes
c1 <- confusionMatrix(predRf, testing$diagnosis) ## 0.7926829
c2 <- confusionMatrix(predGBM, testing$diagnosis)
c3 <- confusionMatrix(predLDA, testing$diagnosis) ## 0.7682927
c4 <- confusionMatrix(predict_final, testing$diagnosis)
print(paste(c1$overall[1], c2$overall[1], c3$overall[1], c4$overall[1]))
# Stacked Accuracy: 0.79 is better than random forests and lda
# and the same as boosting.
## Quesiton 3
set.seed(3523)
library(AppliedPredictiveModeling)
data(concrete)
inTrain = createDataPartition(concrete$CompressiveStrength, p = 3/4)[[1]]
training = concrete[ inTrain,]
testing = concrete[-inTrain,]
## Set the seed to 233 and fit a lasso model to predict Compressive Strength.
# Which variable is the last coefficient to be set to zero as the penalty increases? (Hint: it may be useful to look up ?plot.enet).
set.seed(233)
fit <- train(CompressiveStrength ~ ., data=training, method="lasso")
fit
plot.enet(fit$finalModel, xvar="penalty", use.color=T)
# Cement
# Question 4
## Load the data on the number of visitors to the instructors blog from here:
# https://d396qusza40orc.cloudfront.net/predmachlearn/gaData.csv
setwd("/Users/bikash/repos/Coursera-Practical-Machine-Learning/")
library(lubridate) # For year() function below
library(forecast) ## bats() fnction
dat = read.csv("data/gaData.csv")
training = dat[year(dat$date) < 2012,]
testing = dat[(year(dat$date)) > 2011,]
tstrain = ts(training$visitsTumblr)
## Fit a model using the bats() function in the forecast package to the training time series.
# Then forecast this model for the remaining time points. For how many of the testing points is the true value within the 95% prediction interval bounds?
fit <- bats(tstrain)
pred <- forecast(fit, level=95, h=dim(testing)[1])
plot(pred)
# get the accuracy
accuracy(pred, testing$visitsTumblr)
predComb <- cbind(testing, data.frame(pred))
predComb$in95 <- (predComb$Lo.95 < predComb$visitsTumblr) & (predComb$visitsTumblr < predComb$Hi.95)
# How many of the testing points is the true value within the
# 95% prediction interval bounds?
prop.table(table(predComb$in95)) # 0.9617021
# Question 5
set.seed(3523)
library(AppliedPredictiveModeling)
data(concrete)
inTrain = createDataPartition(concrete$CompressiveStrength, p = 3/4)[[1]]
training = concrete[ inTrain,]
testing = concrete[-inTrain,]
set.set(325)
## Set the seed to 325 and fit a support vector machine using the e1071 package to predict Compressive Strength using the default settings.
## Predict on the testing set. What is the RMSE?
library(e1071)
library(hydroGOF) ## RMSE
## SVM
model <- svm(CompressiveStrength~., data=training)
print(model)
summary(model) #print summary
# test on the whole set
pred <- predict(model, testing)
rmse(pred,testing$CompressiveStrength)
### 6.715009
|
42adf673b738662bf389adb266d345fbb8a68482
|
2551bc0184e234dcc4f5b47723cc3f9039add0d2
|
/plot3.R
|
c310751c415d5fb7f1c24838bbb0f897da2b9a43
|
[] |
no_license
|
ncd10/ExData_Plotting1
|
0504de2f844367239609a6c3c06abe6986fb43dd
|
dddced0bcc3065bdaafab2f03110b9f44764e08b
|
refs/heads/master
| 2020-12-27T09:32:34.933615
| 2015-04-07T19:24:39
| 2015-04-07T19:24:39
| 33,508,995
| 0
| 0
| null | 2015-04-06T22:23:16
| 2015-04-06T22:23:16
| null |
UTF-8
|
R
| false
| false
| 1,146
|
r
|
plot3.R
|
##plot 3
#reading in data
household_power_consumption <- read.csv(
"C:/Users/user/Desktop/exdata-data-household_power_consumption/household_power_consumption.txt",
sep=";", na.strings="?")
#keeping only the specified dates
data<-household_power_consumption[household_power_consumption$Date=="1/2/2007"|
household_power_consumption$Date=="2/2/2007", ]
#paste date and time
data$Date<-as.Date.character(data$Date, format="%d/%m/%Y")
data$day<-as.POSIXct(paste(data$Date, as.character(data$Time)))
#change language from Sys defult to English
Sys.setlocale("LC_TIME", "C")
#plot graph
png(filename="plot3.png",width=480, height=480)
plot(data$day, data$Sub_metering_1, type="l",
ylab="Energy sub metering", xlab="" )
par(new=T)
lines(data$day, data$Sub_metering_2, col="red",
ylim=c(0,30),xlab="",ylab="", mar=c(5, 4, 10, 2))
par(new=F)
lines(data$day, data$Sub_metering_3, col="blue",
ylim=c(0,30),xlab="",ylab="", mar=c(5, 4, 10, 2))
par(new=F)
legend('topright', c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"),
col=c(1,2,4),lty=1, cex=.8)
dev.off()
|
0f2fc15cc7f13f7b1ca7ac2f5c507eba50e493b6
|
38023bae01994887a7c9280943dbce5eaa993230
|
/man/citippy_path.Rd
|
20d33bccc197c2cbe34398a1284f9d8cbf962267
|
[
"MIT"
] |
permissive
|
awconway/citippy
|
9730c3fc15fcb4a9b8183b36c15f2a53d5710897
|
64824ac6ae9e1c7b8b2b47f5aa79f3933ae17c63
|
refs/heads/master
| 2020-12-11T14:50:55.190052
| 2020-02-19T17:44:17
| 2020-02-19T17:44:17
| 233,876,620
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 405
|
rd
|
citippy_path.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/citippy_path.R
\name{citippy_path}
\alias{citippy_path}
\title{add citation to rmarkdown document that will show as tooltip on hover}
\usage{
citippy_path(ref_path)
}
\arguments{
\item{ref_path}{Path to your .bib file with bibtex entries}
}
\description{
add citation to rmarkdown document that will show as tooltip on hover
}
|
5639a8461ac17824bebacebbb60b44fe9b54806d
|
12d311d9865b976a471d45d795e4ccac94013659
|
/Scripts/Load_Data/covidPCR_load.R
|
b3ae542b22daca39ab848b8709e1e9700dc813e2
|
[] |
no_license
|
pmbusch/Analisis-COVID-MP2.5
|
cfe4bec4bc22f5aed2ba412dd38585c7e474c1e6
|
edc3d3a7c3a2e3751d38cc984fd96a3b5e661df4
|
refs/heads/master
| 2023-01-07T21:21:16.080455
| 2020-11-11T13:21:47
| 2020-11-11T13:21:47
| 281,760,233
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 883
|
r
|
covidPCR_load.R
|
### Analisis-COVID-MP2.5
## Datos Covid. Fuente: https://github.com/MinCiencia/Datos-COVID19
## Producto 7: Exámenes PCR por región
## PBH Julio 2020
## Descarga de datos ---------
# Url para descarga directa de datos desde el Github del Ministerio de Ciencia
url <- "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output"
# Nota: Archivos _std vienen aplanados
df_pcr <- read_csv(paste(url,"producto7","PCR_std.csv", sep="/"))
names(df_pcr) <- names(df_pcr) %>% str_to_lower() %>% str_replace_all(" ","_")
df_pcr <- df_pcr %>% na.omit() # limpio NA
df_pcr_tiempo <- df_pcr
## PCR totales a la fecha -----------
# Sumo los totales
df_pcr <- df_pcr %>%
group_by(region, codigo_region, poblacion) %>%
summarise(pcr_region=sum(numero, na.rm=T)) %>% ungroup() %>%
select(codigo_region, pcr_region)
# total pcr
df_pcr$pcr_region %>% sum()
rm(url)
## EoF
|
89b84958bf92e8be411a9e37f9c58fca49d74c1f
|
ff8272e4d55dcada2597638fcae9d56ff80b4fc7
|
/Rcode_SeqTrack_09Feb.R
|
f306aec9acded38cfa81543a7b9ccc32c5ddf7da
|
[] |
no_license
|
AdrianAllen1977/R-code
|
537c08dd29d29514661c7e3b35679bef46e96c54
|
1a1cd56007bfca2cae36a05537402b13b0df94b0
|
refs/heads/master
| 2023-07-20T08:25:56.300787
| 2023-07-17T18:20:42
| 2023-07-17T18:20:42
| 172,774,255
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,802
|
r
|
Rcode_SeqTrack_09Feb.R
|
## Reconstruction of transmission trees in an endemic phase of
## Mycobacterium bovis infection in cattle and badgers in NI
## 1.) PREPARATIONS
## 1.1) load packages
library(ape)
library(adegenet)
library(igraph)
library(tcltk)
## 1.2) Set your working directory on your computer
## Import note: Check that you use slashes / (and not backslash \)
setwd("C:/Users/Assel/Desktop/ASEKE/wgs TVR Aug 2020")
## 1.3) LOAD DATA
## First look at data
cases <- read.csv("TVR302_input_SeqTrack_v1.csv")
#Add "-01-01" (date January 1st) to all of the isolates
cases$Date <- paste(cases$Year,"-01-01", sep = "")
#Order by date
cases <- cases[order(cases$Date),]
## NOte: the samples ("cases") are sorted by sampling date. '1 is the first/oldest case,
## 1 is the last/most recent case.
## Define the columms with dates as GMT dates in POSIXct format.
SampleDate <- as.POSIXct(cases$Date, tz = "GMT") # time zone (tz) in Universal Time, Coordinated (UTC), which is GMT
## In this work we will select the sampling date as the infection date.
Date <- SampleDate
head(Date)
range(Date)
#days=how many days of the infection
days <- as.integer(difftime(Date, min(Date), unit="days"))
days
## Load the SNP sequence data (ordered from oldest to newest)
dna <- fasta2DNAbin("TVR_263clade_n=302_subset_Idonly_06102020.fasta")
#######################################################################################
## 2) EXPLORE SNP DATA
## To have an idea of the existing diversity in these sequences, we compute the
## simple pair-wise Hamming distances and plot their distribution:
D <- dist.dna(dna, model="N")
hist(D, col="grey", nclass=30,
main="Distribution of pairwise genetic distances",
xlab="Number of differing nucleotides")
## check if this remarkable polymorphism is distributed randomly across the genome
snps <- seg.sites(dna)
head(snps)
length(snps)
## There are 323 polymorphic sites in the sample. We can visualize their position, and
## try to detect hotspots of polymorphism by computing the density of SNPs as we
## move along the genome
snpposi.plot(dna, codon=FALSE)
#########################################################################################################################
## 3.) SEQTRACK ANALYSIS
## 3.1) BAsic analysis based on genomic data (without including epidemiological information or disease biology)
## Here, we use SeqTrack on the matrix of pairwise distances (distmat), indicating the labels of the cases
## (x.names=cases$label), the collection dates (x.dates=dates), the mutation rate per site per day (mu_MAP) and
## the number of nucleotides = length of analysed sequences (nbNucl):
distmat <- as.matrix(D)
nbNucl <- ncol(as.matrix(dna))
#Substitution rate for M.bovis
mu_mbovis <- 8.054e-8/365 ## 0.35 substitutions per genome per year (from paper section 3.5)
## 4,345,492-bp M bovis genome
#seqtrack algorithm
res <- seqTrack(distmat, x.names=cases$Id, x.dates=Date, mu=mu_mbovis, haplo.le=nbNucl)
class(res)
res
## The result res is a data.frame with the special class seqTrack, containing the
## following information:
## res$id: the indices of the cases.
## res$ances: the indices of the putative ancestors of the cases.
## res$weight: the number of mutations corresponding to the ancestries.
## res$date: the collection dates of the cases.
## res$ances.date: the collection dates of the putative ancestors.
#Assign hosts to the SeqTrack output data
res$Hosts <- cases$Host
#Create badgers dataset
res_badgers <- subset(res, Hosts == "B")
#Identify ancestor hosts
res$ances.hosts <- ifelse(res$ances %in% res_badgers$id, 'B', 'C')
## seqTrack objects can be plotted simply using the "plot" command.
## Each sequence/case is a node of the graph, and arrows model putative ances-
## tries/transmissions. The number of mutations between ancestors and descendents
## are indicated by the color of the arrows (red = no/few mutations; light grey= many
## mutations) and the numbers in blue. Time is represented on the y axis (up: ancient;
## down: recent).
res_network <- plot(res, main="SeqTrack reconstruction of the transmission tree")
mtext(side=3, text="dark red: no/few mutations; light grey: many mutations")
res_network
#Rename res_network Ids for Hosts
V(res_network)$name <- res$Hosts
#Create interactive plot for SeqTrack tree
tkplot(res_network, vertex.color="yellow", layout=layout_with_lgl,
vertex.size=7, vertex.label.cex = 0.8,
edge.label.cex = 0.7)
## Store result files ("res") on your computer
getwd() ## this command will give you the path of the default working directory
write.csv(res, "SeqTrack_outputfile_Mbovis network_10Feb_final.csv") ## this will save the "res" object in your working directory
#Examine SeqTrack output
#Create barplot with distribution of mutations between ancestors and descendants (Supplementary Figure S10)
range(res$weight, na.rm=TRUE)
barplot(table(res$weight)/sum(!is.na(res$weight)), ylab="Proportions",
xlab="Mutations between inferred ancestor and descendent", col="grey",
cex.lab = 1.3)
#Create transmissions plot -> change hosts names for Badger, Cattle
res$Hosts <- ifelse(res$Hosts == "B", 'Badger', 'Cattle')
res$ances.hosts <- ifelse(res$ances.hosts == "B", 'Badger', 'Cattle')
res$transmissions <- paste(res$ances.hosts, res$Hosts, sep = "_")
#Create barplot with transmission events (Supplementary Figure S11)
barplot(table(res$transmissions)/length(res$transmissions)*100, col="lightblue",
xlab = "Transmission from/to", ylab = "Number (%) from total",
cex.names = 0.75,
cex.lab = 1.3)
|
0e70c2af60b2cadac100ef00e13f003d1ca09c5c
|
0ac77bbd8f8a8b1dd37f0369a540234aaddbb5e9
|
/08_ShinyLab/RestrictionOfRange/server.R
|
54e4bd1a2f8c538ed08a87d3cefe831865bd41ee
|
[] |
no_license
|
civilstat/36721-F15
|
dd8199dd643d41cffd313402ca3264a802e9190d
|
cef0e1dfba17d82a6a1e4dd7143f2c1f2a629894
|
refs/heads/master
| 2021-01-10T13:38:43.183702
| 2015-10-05T22:10:12
| 2015-10-05T22:10:12
| 43,716,109
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,317
|
r
|
server.R
|
# Restriction of Range exercise
# server.R
#
# 08 Shiny Lab Session
# Jerzy Wieczorek
# 9/24/15
# 36-721 Statistical Graphics and Visualization
library(shiny)
library(ggplot2)
# Define server logic
shinyServer(function(input, output) {
# Create a random dataset for plotting (once per Shiny session),
# where Y = 0.5 * X + e,
# and X and e are both N(0,1)
n = 1000
X = rnorm(n)
Y = 0.5 * X + rnorm(n)
# Use a reactive function to add an indicator to this dataset,
# for whether or not each row's X value was selected by the slider
df = reactive({
included = input$limits[1] < X & X < input$limits[2]
data.frame(X = X, Y = Y,
group = factor(included, levels = c(TRUE, FALSE),
labels = c("Included", "Excluded")))
})
# Also use a reactive function to create a subset
# with just the included rows
dfIncluded = reactive({
subset(df(), group == "Included")
})
# Create the plot, with two regression lines:
# thin line on full dataset,
# thick line on Included subset
output$plot <- renderPlot({
ggplot(df(), aes(X, Y)) +
geom_point(aes(col = group)) +
geom_smooth(method = "lm", se = FALSE) +
geom_smooth(data = dfIncluded(),
method = "lm", se = FALSE, lwd = 2)
})
})
|
14ec9100395aa35cc6a098d0a5d47584c35dac66
|
dc248480f6cfe92b936babf693386e91261de933
|
/man/sass_cache_options.Rd
|
54765a0ee09d057f4ccb77bc288abe277ce37d44
|
[
"MIT"
] |
permissive
|
rstudio/sass
|
943c07acee0696b19d63ec562bd901a722ed818e
|
4f42680340264e1655745a99cbd7e0c0481b6a01
|
refs/heads/main
| 2023-08-12T12:56:39.941402
| 2023-07-26T21:23:52
| 2023-07-26T21:23:52
| 144,749,268
| 81
| 22
|
NOASSERTION
| 2023-07-26T21:22:12
| 2018-08-14T17:02:58
|
C++
|
UTF-8
|
R
| false
| true
| 441
|
rd
|
sass_cache_options.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sass_cache.R
\name{sass_cache_options}
\alias{sass_cache_options}
\title{Caching Options for Sass (defunct)}
\usage{
sass_cache_options(cache, cache_dir)
}
\arguments{
\item{cache}{No longer used.}
\item{cache_dir}{No longer used.}
}
\description{
This function is no longer used. Please see \code{\link[=sass_cache_get]{sass_cache_get()}}.
}
\keyword{internal}
|
32ddb0b64ad2dd1663c523c683da6905e1066e5d
|
4ba57c79ec94010ade2836a669a7fd7864d5a85c
|
/02.R
|
7b7e570020ef7f96dc7b86eccda90bad5a9148ed
|
[] |
no_license
|
yansonz/R-Practice-1
|
b55211317399a04433881a4eb49b3a284ea3cd74
|
832ebd1d07a73f522b795f57048ba49c32bf23a0
|
refs/heads/master
| 2021-01-17T07:41:43.131387
| 2016-09-28T02:31:23
| 2016-09-28T02:31:23
| 67,960,773
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 866
|
r
|
02.R
|
n = 5
x = seq(-1, 1, by = 0.2)
total = 0
for(k in 0:floor(n/2)) {
total = total + (-1)^k * factorial(2*n - 2*k) / ( factorial(k) * factorial(n-k) * factorial(n-2*k) ) * x^(n-2*k)
}
1/2^n * total
pn1 = function(n) {
x = seq(-1, 1, by = 0.2)
total = 0
result = numeric(length(x))
for(i in 1:length(x)) {
for(k in 0:floor(n/2)) {
total = total + (-1)^k * factorial(2*n - 2*k) / ( factorial(k) * factorial(n-k) * factorial(n-2*k) ) * x[i]^(n-2*k)
}
result[i] = 1/2^n * total
total = 0
}
result
}
pn1(5)
pn2 = function(n) {
x = seq(-1, 1, by = 0.2)
k = 0:floor(n/2)
total = 0
result = numeric(length(x))
for(i in 1:length(x)) {
total= sum((-1)^k * factorial(2*n - 2*k) / ( factorial(k) * factorial(n-k) * factorial(n-2*k) ) * x[i]^(n-2*k))
result[i] = 1/2^n * total
total = 0
}
result
}
pn2(5)
|
3b475fbfdde65cba3afee6048416f94ce7a67d56
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/quantreg/examples/ranks.Rd.R
|
9abe9ecbfe92890aecb35e1a6af9112a0f6f5040
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 194
|
r
|
ranks.Rd.R
|
library(quantreg)
### Name: ranks
### Title: Quantile Regression Ranks
### Aliases: ranks
### Keywords: regression
### ** Examples
data(stackloss)
ranks(rq(stack.loss ~ stack.x, tau=-1))
|
6bf681616458bd6c10bc672876e1db54ef4fc681
|
075e88689548f684bcb1d1fb38955e91f9ea587a
|
/tmp.R
|
f7cd20133d2234c1f1d6312f90756696437d3b5f
|
[] |
no_license
|
jdhoffa/ds-incubator
|
e9a09332162cc477f63fd98d0230dafa288776ea
|
064da003097ecb64c6f0a0bd2801104257f0e93e
|
refs/heads/master
| 2021-07-11T22:09:16.826301
| 2020-10-17T23:27:22
| 2020-10-17T23:27:22
| 215,499,492
| 0
| 0
| null | 2019-10-16T08:41:55
| 2019-10-16T08:41:55
| null |
UTF-8
|
R
| false
| false
| 33
|
r
|
tmp.R
|
library(random)
library(RandVar)
|
8c9f4fdd706234409a6aab0fed681f8682510097
|
a09b5d25ec0ef9c2db757b7c4333c485da222d7e
|
/R/CNVScopeserver.R
|
2555091c486fde9935d050031e658fcb4e680e0e
|
[] |
no_license
|
cran/CNVScope
|
438a4cc4facbc1b892dd228844733603a929085d
|
fe9eed310d36d96f6cd1ebcc14d9bec82b5a58ca
|
refs/heads/master
| 2022-05-07T22:30:03.560897
| 2022-03-30T22:40:08
| 2022-03-30T22:40:08
| 153,879,861
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 87,622
|
r
|
CNVScopeserver.R
|
#' Server component of the CNVScope plotly shiny application.
#'
#' Server function of the CNVScope shiny application. run with runCNVScopeShiny
#' @name CNVScopeserver
#' @keywords CNV heatmap shiny plotly
#' @import ggplot2 magrittr
#' @rawNamespace import(shiny, except = c(runExample,renderDataTable))
#' @rawNamespace import(RCurl, except = reset)
#' @rawNamespace import(data.table, except = c(melt, dcast))
#' @param session The shiny session object for the application.
#' @param input shiny server input
#' @param output shiny server output
#' @param debug enable debugging mode
#' @return None
#' @examples
#' \dontrun{
#' runCNVScopeShiny()
#' }
#' @export
#globalVariables(c("ensembl_gene_tx_data_gr","baseurl","chromosomes","downsample_factor","basefn",
# "subset_name",
# "expression_data_gr_nbl",'start2','start1','value','Var1','Var2','value1',
# 'tcga_type','census_data_gr','common_coords','myReactives',
# 'genev','delete.isolates','freq_data'),add = F)
#rawNamespace import(GenomicFeatures ,except = show)
if(getRversion() >= "2.15.1") utils::globalVariables(c("."), add=F)
CNVScopeserver<-function(session,input, output, debug=F) {
# if(requireNamespace("plotly",quietly = T)){
#
#importFrom tidyr unite
#importFrom jointseg jointSeg
#importFrom logging addHandler
#importFrom DT renderDataTable
#rawNamespace import(shinyjs, except = runExample)
#import reshape2 htmltools htmlwidgets
ensembl_gene_tx_data_gr <- if(exists("ensembl_gene_tx_data_gr")){get("ensembl_gene_tx_data_gr")} else {NULL}
baseurl <- if(exists("baseurl")){get("baseurl")} else {NULL}
adjpvaluechr <- if(exists("adjpvaluechr")){get("adjpvaluechr")} else {NULL}
basefn <- if(exists("basefn")){get("basefn")} else {NULL}
osteofn <- if(exists("osteofn")){get("osteofn")} else {NULL}
start1 <- if(exists("start1")){get("start1")} else {NULL}
start2 <- if(exists("start2")){get("start2")} else {NULL}
value <- if(exists("value")){get("value")} else {NULL}
value1 <- if(exists("value1")){get("value1")} else {NULL}
Var1 <- if(exists("Var1")){get("Var1")} else {NULL}
Var2 <- if(exists("Var2")){get("Var2")} else {NULL}
bins.seqnames <- if(exists("bins.seqnames")){get("bins.seqnames")} else {NULL}
bins.start <- if(exists("bins.start")){get("bins.start")} else {NULL}
bins.end <- if(exists("bins.end")){get("bins.end")} else {NULL}
expression_data_gr <- if(exists("expression_data_gr")){get("expression_data_gr")} else {NULL}
common_coords <- if(exists("common_coords")){get("common_coords")} else {NULL}
myReactives <- if(exists("myReactives")){get("myReactives")} else {NULL}
genev <- if(exists("genev")){get("genev")} else {NULL}
delete.isolates <- function(graph, mode = 'all') {
isolates <- which(igraph::degree(graph, mode = mode) == 0)
igraph::delete.vertices(graph, isolates)
}
freq_data <- if(exists("freq_data")){get("freq_data")} else {NULL}
#adjpvalue chr cn correlation genes_text probe visval
adjpvalue <- if(exists("adjpvalue")){get("adjpvalue")} else {NULL}
chr <- if(exists("chr")){get("chr")} else {NULL}
cn <- if(exists("cn")){get("cn")} else {NULL}
correlation <- if(exists("correlation")){get("correlation")} else {NULL}
genes_text <- if(exists("genes_text")){get("genes_text")} else {NULL}
probe <- if(exists("probe")){get("probe")} else {NULL}
visval <- if(exists("visval")){get("visval")} else {NULL}
privpolurl <- a("NCI Privacy Policy", href="https://www.cancer.gov/policies/privacy-security",target="_blank")
output$privpol <- renderUI({
tagList(privpolurl)})
downsample_factor<-NULL
subset_name<-NULL
#expression_data_gr_nbl<-NULL
tcga_type<-NULL
chrom.pairs<-NULL
printLogJs <- function(x, ...) {
shinyjs::logjs(x)
T
}
observe({
if (input$geneSearch == 0) {return()}
x<-isolate(input$geneSearch)
#browser()
if(x!=0 & isolate(input$gene_input_col)!=""& isolate(input$gene_input_row)!=""){
if(length(ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_col)])!=0) {
colgene_loc<-paste0(ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_col)][1]$....chromosome_name,":",
ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_col)][1]$....start_position,"-",
ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_col)][1]$....end_position)
} else {
colgene_loc<-""}
if(length(ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_row)])!=0) {
rowgene_loc<-paste0(ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_row)][1]$....chromosome_name,":",
ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_row)][1]$....start_position,"-",
ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_row)][1]$....end_position)
} else {
# ##browser()
rowgene_loc<-""}
updateTextInput(session,"loc_input_col",value=colgene_loc)
updateTextInput(session,"loc_input_row",value=rowgene_loc)
if(length(ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_row)])!=0){
updateSelectInput(session,"chrom2",selected = paste0(ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_row)][1]$....chromosome_name,"_"))}
if(length(ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_col)])!=0){
updateSelectInput(session,"chrom1",selected = paste0(ensembl_gene_tx_data_gr[ensembl_gene_tx_data_gr$....external_gene_name==isolate(input$gene_input_col)][1]$....chromosome_name,"_"))}
} #end check to see if there is input in the gene search.
})
# observeEvent(input$goButton, {
# hide("minimap")
# hide("row_gene_data")
# hide("col_gene_data")
# })
# observeEvent(event_data("plotly_click"), {
# show("minimap")
# show("row_gene_data")
# show("col_gene_data")
# })
observeEvent(plotly::event_data("plotly_click"), {
#showTab(inputId = "tabs",select = T, target = "sample info")
if(isolate(input$data_source)=="linreg_osteosarcoma_CNVkit")
{
showTab(inputId="tabs",target="gain/loss frequency")
}
shinyjs::show("row_gene_data")
shinyjs::show("col_gene_data")
shiny::showTab(inputId="tabs",target="sample info")
shiny::showTab(inputId="tabs",target="COSMIC cancer gene census")
shiny::showTab(inputId="tabs",target="expression_data")
})
observeEvent(input$goButton, {
showTab(inputId = "tabs",select = T, target = "Plots")
if(isolate(input$data_source)!="linreg_osteosarcoma_CNVkit")
{
shiny::hideTab(inputId="tabs",target="gain/loss frequency")
}
if(isolate(input$data_source)=="linreg_osteosarcoma_CNVkit")
{
shiny::showTab(inputId="tabs",select = F,target="gain/loss frequency")
}
})
observeEvent(input$data_source, {
if(isolate(input$data_source)!="linreg_osteosarcoma_CNVkit")
{
shiny::hideTab(inputId="tabs",target="gain/loss frequency")
}
if(isolate(input$data_source)=="linreg_osteosarcoma_CNVkit")
{
shiny::showTab(inputId="tabs",select = F,target="gain/loss frequency")
}
if(is.null(plotly::event_data("plotly_click"))){
shiny::hideTab(inputId="tabs",target="gain/loss frequency")
shiny::hideTab(inputId="tabs",target="sample info")
shiny::hideTab(inputId="tabs",target="COSMIC cancer gene census")
shiny::hideTab(inputId="tabs",target="expression_data")
}
})
getHeight<-function()
{
return(isolate(input$heatmapHeight))
}
logging::addHandler(printLogJs)
isolate(input$goButton)
# observe({
# input$goButton
# if(!is.null(isolate(input$loc_input_row))){
# updateSelectInput(session,"chrom1",chromosomes,selected=paste0(as.character(GRanges(isolate(input$loc_input_row))@seqnames),"_"))}
# })
output$plotlyChromosomalHeatmap <- plotly::renderPlotly({
if (input$goButton == 0) {return()}
input$goButton
if(debug){browser()}
#browser()
#browser()
# if(!file.exists(
# (
# paste0(getwd(),"/matrix/linreg/",
# chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))],
# "melted_downsampled_linreg.RData")
# )
# )){ return("file does not exist!");}
#if there is location data, change the chromosomes from what they were chosen.
#
#isolate(input$loc_input_row)
# observe({
# updateSelectInput(session,"chrom1",chromosomes,selected=paste0(as.character(GRanges(isolate(input$loc_input_row))@seqnames),"_"))
# })
if(isolate(input$data_source)=="linreg_osteosarcoma_CNVkit")
{
# load( url(paste0(paste0(baseurl,"matrix/linreg/unrescaled/",
# chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))],
# "melted_downsampled_linreg_unrescaled.RData"))))
load(paste0(paste0(osteofn,"linreg236/unrescaled/downsample/",
chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))],
"melted_downsampled_linreg_max_cap_75.RData")))
# load( url(paste0(paste0(baseurl,"matrix/linreg/unrescaled/full/",
# chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))],
# "melted_full_linreg_max_cap_75.RData"))))
load( paste0(paste0(osteofn,"linreg236/unrescaled/full/",
chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))],
"melted_full_linreg_max_cap_75.RData")))
downsample_factor<<-4
if(debug){browser()}
if(!exists("osteofn")){ tryCatch(bin_data<<-readRDS((url(paste0(baseurl,"bin_data_lcc236.rds")))),error = function(e) NULL)}
tryCatch(bin_data<<-readRDS((paste0(osteofn,"bin_data_lcc236.rds"))),error = function(e) NULL)
colnames(ggplotmatrix_full)<-colnames(ggplotmatrix)<-c("Var1","Var2","value","Var11","Var21","value1")
}
#
if(isolate(input$data_source)=="TCGA_SARC_SNP6")
{
load( url(paste0(paste0(baseurl,"matrix/TCGA_SARC/downsampled_factor_8/",
chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))],
"melted_downsampled_TGCA_SARC_unrescaledv2.RData"))))
# load( url(paste0(paste0(baseurl,"matrix/TCGA_SARC/full/",
# chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))],
# "melted_full_TGCA_SARC_unrescaled.RData"))))
downsample_factor<<-8
}
#
if(isolate(input$data_source)=="TCGA_BRCA_low_pass")
{
#
sample_name<-"BRCA_output_matrix1e6"
load( url(paste0(paste0(baseurl,"matrix/TCGA_low_pass/BRCA/",
paste0(chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))],"melted_downsampled_TGCA_",sample_name,"_unrescaled",".RData")
))))
ggplotmatrix_full<-ggplotmatrix
}
if(isolate(input$data_source)=="TCGA_AML_low_pass")
{
sample_name<-"AML_output_matrix1e6"
load( url(paste0(paste0(baseurl,"matrix/TCGA_low_pass/AML/",
paste0(chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))],"melted_downsampled_TGCA_",sample_name,"_unrescaled",".RData")
))))
ggplotmatrix_full<-ggplotmatrix
}
if(isolate(input$data_source)=="TCGA_PRAD_low_pass")
{
sample_name<-"PRAD_output_matrix1e6"
load( url(paste0(paste0(baseurl,"matrix/TCGA_low_pass/PRAD/",
paste0(chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))],"melted_downsampled_TGCA_",sample_name,"_unrescaled",".RData")
))))
ggplotmatrix_full<-ggplotmatrix
}
if(isolate(input$data_source)=="TCGA_NBL_low_pass")
{
#browser()
sample_name<-"NBL_output_matrix1e6"
load( paste0(paste0(basefn,"matrix/TCGA_low_pass/NBL/",
paste0(isolate(input$chrom1),isolate(input$chrom2),"nbl_sample_matched_unrescaled.RData")
)))
#browser()
# ggplotmatrix
ggplotmatrix_full<-ggplotmatrix
tryCatch(bin_data<<-readRDS((url(paste0(baseurl,"bin_data_nbl.rds")))),error = function(e) NULL)
tryCatch(bin_data<<-readRDS((paste0(basefn,"bin_data_nbl.rds"))),error = function(e) NULL)
}
if(isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset"))
{
#browser()
subset_name<<-gsub("_subset","",gsub("TCGA_NBL_","",paste0(input$data_source)))
sample_name<-"NBL_output_matrix1e6"
# load( url(paste0(paste0(baseurl,"matrix/TCGA_low_pass/NBL/",subset_name,"/",
# paste0(chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))],"melted_downsampled_TGCA_","NBLsample_matched","_unrescaled",subset_name,".RData")
# ))))
load( url(paste0(paste0(baseurl,"matrix/TCGA_low_pass/NBL/",subset_name,"/",
paste0(isolate(input$chrom1),isolate(input$chrom2),"melted_downsampled_TGCA_","NBLsample_matched","_unrescaled",subset_name,"pos_neg.RData")
))))
if(length(bin_data$probe)==0)
{
bin_data$probe<-rownames(bin_data)
}
#browser()
# ggplotmatrix
ggplotmatrix_full<-ggplotmatrix
tryCatch(bin_data<<-readRDS((url(paste0(baseurl,"bin_data_nbl_",subset_name,".rds")))),error = function(e) NULL)
tryCatch(bin_data<<-readRDS((paste0(basefn,"bin_data_nbl_",subset_name,".rds"))),error = function(e) NULL)
input_mat<-bin_data %>% dplyr::select(-probe)
rownames(input_mat)<-bin_data$probe
#
tryCatch(expression_data_gr_nbl<<-readRDS(url(paste0(baseurl,"tcga_nbl_expression_",subset_name,"subset.rds"))),error = function(e) NULL)
tryCatch(expression_data_gr_nbl<<-readRDS(paste0(basefn,"tcga_nbl_expression_",subset_name,"subset.rds")),error = function(e) NULL)
#browser()
#server-side processing(disabled):
# tryCatch(tcga_gr<<-readRDS((url(paste0(baseurl,"tcga_gr_no_stats.rds")))),error = function(e) NULL)
# tryCatch(tcga_gr<<-readRDS((paste0(basefn,"tcga_gr_no_stats.rds"))),error = function(e) NULL)
# tryCatch(tcga_dfs_cbind_with_ensg_with_ensembl_fpkm<<-readRDS((url(paste0(baseurl,"tcga_dfs_cbind_with_ensg_with_ensembl_fpkm_caseid.rds")))),error = function(e) NULL)
# tryCatch(tcga_dfs_cbind_with_ensg_with_ensembl_fpkm<<-readRDS((paste0(basefn,"tcga_dfs_cbind_with_ensg_with_ensembl_fpkm_caseid.rds"))),error = function(e) NULL)
#
# tcga_dfs_cbind_with_ensg_with_ensembl_fpkm_subset<-as.data.frame(tcga_dfs_cbind_with_ensg_with_ensembl_fpkm)[,na.omit(match(colnames(bin_data),colnames(tcga_dfs_cbind_with_ensg_with_ensembl_fpkm)))]
# #dim(tcga_dfs_cbind_with_ensg_with_ensembl_fpkm_subset)
# mcols(tcga_gr)$rowMean<-rowMeans(tcga_dfs_cbind_with_ensg_with_ensembl_fpkm_subset) #tcga_dfs_cbind_with_ensg[,2:ncol(tcga_dfs_cbind_with_ensg)]
# mcols(tcga_gr)$rowMeanPctl<-heatmaply::percentize(rowMeans(tcga_dfs_cbind_with_ensg_with_ensembl_fpkm_subset))
# mcols(tcga_gr)$rowVar<-matrixStats::rowVars(as.matrix(tcga_dfs_cbind_with_ensg_with_ensembl_fpkm_subset))
# mcols(tcga_gr)$rowVarPctl<-heatmaply::percentize(matrixStats::rowVars(as.matrix(tcga_dfs_cbind_with_ensg_with_ensembl_fpkm_subset)))
# mcols(tcga_gr)$SYMBOL<-mcols(tcga_gr)$....external_gene_name
# mcols(tcga_gr)$gene_type<-mcols(tcga_gr)$....gene_biotype
# expression_data_gr<<-tcga_gr
}
if(isolate(input$data_source)=="TCGA_OS_low_pass")
{
sample_name<-"OS_output_matrix1e6"
load( url(paste0(paste0(baseurl,"matrix/TCGA_low_pass/OS/",
paste0(chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))],chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom2))))],"melted_downsampled_TGCA_",sample_name,"_unrescaled",".RData")
))))
ggplotmatrix_full<-ggplotmatrix
}
# colnames(ggplotmatrix)<-gsub(pattern = "(\\.)+.","",colnames(ggplotmatrix))
# colnames(ggplotmatrix_full)<-gsub(pattern = "(\\.)+.","",colnames(ggplotmatrix_full))
#browser()
ggplotmatrix$value<-signedRescale(ggplotmatrix$value,max_cap=isolate(input$max_cap))[,1]
ggplotmatrix<-dplyr::bind_cols(ggplotmatrix,reshape2::colsplit(ggplotmatrix$Var1,"_",c("chr1","start1","end1")))
ggplotmatrix<-dplyr::bind_cols(ggplotmatrix,reshape2::colsplit(ggplotmatrix$Var2,"_",c("chr2","start2","end2")))
ggplotmatrix<-ggplotmatrix[order(ggplotmatrix$start1,ggplotmatrix$start2),]
if(!is.null(ggplotmatrix)){ggplotmatrix<<-ggplotmatrix}
if(!is.null(ggplotmatrix_full)){ ggplotmatrix_full$value<-signedRescale(ggplotmatrix_full$value,max_cap=isolate(input$max_cap))[,1]}
if(!is.null(ggplotmatrix_full)){ggplotmatrix_full<<-ggplotmatrix_full}
recast_matrix<-reshape2::dcast(data=ggplotmatrix,formula=Var1 ~ Var2, var = ggplotmatrix$value) #this creates a matrix in wide format.
if(ncol(recast_matrix)!=nrow(recast_matrix))
{
rownames(recast_matrix)<-recast_matrix$Var1
recast_matrix<-recast_matrix[,2:ncol(recast_matrix)]
}
recast_matrix_full<-reshape2::dcast(data=ggplotmatrix_full,formula=Var1 ~ Var2, var = ggplotmatrix_full$value) #this creates a matrix with
if(ncol(recast_matrix_full)!=nrow(recast_matrix_full))
{
rownames(recast_matrix_full)<-recast_matrix_full$Var1
recast_matrix_full<-recast_matrix_full[,2:ncol(recast_matrix_full)]
}
#browser()
#resorting recast_matrix
if(!is.null(recast_matrix)){recast_matrix<<-recast_matrix}
if(!is.null(recast_matrix_full)){recast_matrix_full<<-recast_matrix_full}
rownames_gr<-underscored_pos_to_GRanges(rownames(recast_matrix),zeroToOneBasedStart = F,zeroToOneBasedEnd = F)
colnames_gr<-underscored_pos_to_GRanges(colnames(recast_matrix),zeroToOneBasedStart = F,zeroToOneBasedEnd = F)
rownames_gr_full<-underscored_pos_to_GRanges(rownames(recast_matrix_full),zeroToOneBasedStart = F,zeroToOneBasedEnd = F)
colnames_gr_full<-underscored_pos_to_GRanges(colnames(recast_matrix_full),zeroToOneBasedStart = F,zeroToOneBasedEnd = F)
if(!is.null(rownames_gr)){rownames_gr<<-rownames_gr}
if(!is.null(rownames_gr_full)){rownames_gr_full<<-rownames_gr_full}
if(!is.null(colnames_gr)){colnames_gr<<-colnames_gr}
if(!is.null(colnames_gr_full)){colnames_gr_full<<-colnames_gr_full}
ggplotmatrix$value1<-gsub("col genes:","row genes:",ggplotmatrix$value1)
ggplotmatrix$value1<-gsub("row_genes:","col_genes:",ggplotmatrix$value1)
rownames_ordered<-GRanges_to_underscored_pos(rownames_gr[order(rownames_gr)])
colnames_ordered<-GRanges_to_underscored_pos(colnames_gr[order(colnames_gr)])
if(debug){browser()}
recast_matrix<-recast_matrix[rownames_ordered,colnames_ordered]
block_indices_row<-jointseg::jointSeg(recast_matrix,K=10,method="RBS")$bestBkp
block_indices_col<-jointseg::jointSeg(t(recast_matrix),K=10,method="RBS")$bestBkp
block_index_labels_row<-rownames(recast_matrix)[block_indices_row]
block_index_labels_col<-colnames(recast_matrix)[block_indices_col]
# xfactor<-as.factor(ggplotmatrix$Var1)
# levels(xfactor)<-order(colnames_gr)
# yfactor<-as.factor(ggplotmatrix$Var1)
# levels(yfactor)<-order(rownames_gr)
# p <- ggplot(data = ggplotmatrix ) + #geom_tile() + theme_void()
# geom_raster(aes(x = xfactor, y = yfactor,fill=value,text=paste0("value:",value,"\nrow:",Var1,"\ncol:",Var2,"\n",value1))) + scale_x_discrete(breaks = block_index_labels_col) +
# scale_y_discrete(breaks = block_index_labels_row) + theme(axis.text.x = element_text(angle=60, hjust=1)) +
# ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) + theme(legend.position="bottom",axis.title = element_blank()) + coord_flip() #+ scale_y_reverse(breaks=block_indices)
#
#browser()
#recreate input matrix, add rownames.
#browser()
options(stringsAsFactors = F)
input_mat<-bin_data %>% dplyr::select(-probe) %>% as.data.frame()
rownames(input_mat)<-bin_data$probe
#correlate input matrix
if(debug){browser()}
if(isolate(input$visval)=="Correlation" & isolate(input$data_source)!="linreg_osteosarcoma_CNVkit") {
if(isolate(input$cor_method)!="spearman - pearson"){
input_mat_cor<-cor(t(input_mat),method=isolate(input$cor_method))
} else {
input_mat_cor<-cor(t(input_mat),method="spearman")-cor(t(input_mat),method="pearson")
}
#browser()
#wide to long
input_mat_cor_flat<-input_mat_cor %>% reshape2::melt()
#grab ggplotmatrix and add correlation values.
#if(!isolate(input$genes_toggle)){ggplotmatrix$value1<-NULL}
#browser()
#ggplotmatrix_joined<- dplyr::inner_join(x=ggplotmatrix,y=input_mat_cor_flat,by=c("Var1"="Var1","Var2"="Var2"))
ggplotmatrix_joined<- data.table::merge.data.table(x=ggplotmatrix,y=input_mat_cor_flat,by.x=c("Var1","Var2"),by.y=c("Var1","Var2"),all=F)
colnames(ggplotmatrix_joined) <- ggplotmatrix_joined %>% colnames() %>%
gsub(pattern = "value.x",replacement = "linregval") %>%
gsub(pattern = "value.y",replacement = "correlation")
#convert the negative log p-values to p-values and apply two kinds of FDR correction.
#browser()
ggplotmatrix_joined$pvalue<-exp(-(abs(ggplotmatrix_joined$orig_value)))
ggplotmatrix_joined$adjpvaluechr<-p.adjust(p = ggplotmatrix_joined$pvalue,method = "fdr")
ggplotmatrix_joined$adjpvaluegenome<-p.adjust(p = ggplotmatrix_joined$pvalue,method = "fdr",
n = dim(input_mat)[1]*dim(input_mat)[2])
ggplotmatrix_joined<<-ggplotmatrix_joined
rownames_ordered<-GRanges_to_underscored_pos(rownames_gr[order(rownames_gr)])
colnames_ordered<-GRanges_to_underscored_pos(colnames_gr[order(colnames_gr)])
if(isolate(input$fdr_correction)=="chromosome_pair"){
ggplotmatrix_joined$adjpvalue<-ggplotmatrix_joined$adjpvaluechr
} else {
if(isolate(input$fdr_correction)=="genome"){
ggplotmatrix_joined$adjpvalue<-ggplotmatrix_joined$adjpvaluegenome
}
}
#browser()
ggplotmatrix_joined<<-ggplotmatrix_joined
if(isolate(input$visval)=="Correlation") {
ggplotmatrix_joined$visval<-ggplotmatrix_joined$correlation
} else {
if(isolate(input$visval)=="-log(Linear Regression P-value) * correlation sign") {
ggplotmatrix_joined$visval<-ggplotmatrix_joined$linregval
}
}
if(isolate(input$pval_filter_toggle)){
ggplotmatrix_joined$visval<-ifelse(ggplotmatrix_joined$adjpvalue<0.05,ggplotmatrix_joined$linregval,0.5)
} else {
ggplotmatrix_joined$visval<-ggplotmatrix_joined$linregval
}
if(!isolate(input$genes_toggle)){
ggplotmatrix_joined$genes_text<-rep("",nrow(ggplotmatrix_joined))
} else {
ggplotmatrix_joined$genes_text<-ggplotmatrix_joined$value1
}
#browser()
#as.integer(as.character(reshape2::colsplit(ggplotmatrix$Var2,"_",c("chr2","start2","end2"))$start2))
p <- ggplot(data = ggplotmatrix_joined ) + #geom_tile() + theme_void()
geom_tile(aes(x = as.numeric(start2),
y = as.numeric(start1),
fill=visval,text=paste0("value:",visval,"\nrow:",Var1,"\ncol:",Var2,"\n",genes_text,"\nFDR p=",adjpvalue,"\n",isolate(input$cor_method)," Correlation=",correlation)),alpha=ifelse(ggplotmatrix_joined$adjpvaluechr<0.05,1.0,0.1)) + #
scale_x_continuous(breaks = reshape2::colsplit(block_index_labels_col,"_",c("chr","start","end"))$start,labels = block_index_labels_col) +
scale_y_continuous(breaks = reshape2::colsplit(block_index_labels_row,"_",c("chr","start","end"))$start,labels = block_index_labels_row) + theme(axis.text.x = element_text(angle=60, hjust=1)) +
ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) + theme(legend.position="bottom",axis.title = element_blank())
} else {
if(debug){browser()}
if(!isolate(input$genes_toggle)){
ggplotmatrix$genes_text<-rep("",nrow(ggplotmatrix))
} else {
ggplotmatrix$genes_text<-ggplotmatrix$value1
}
#browser()
ggplotmatrix$pvalue<-exp(-(abs(ggplotmatrix$value)))
ggplotmatrix$adjpvaluechr<-p.adjust(p = ggplotmatrix$pvalue,method = "fdr")
ggplotmatrix$adjpvaluegenome<-p.adjust(p = ggplotmatrix$pvalue,method = "fdr",
n = dim(input_mat)[1]*dim(input_mat)[2])
p <- ggplot(data = ggplotmatrix ) + #geom_tile() + theme_void()
geom_tile(aes(x = as.numeric(start2),
y = as.numeric(start1),
fill=value,text=paste0("value:",value,"\nrow:",Var1,"\ncol:",Var2,"\n",genes_text,"\nFDR p=",adjpvaluechr,"\n")),alpha=ifelse((ggplotmatrix$adjpvaluechr<0.05 | !input$pval_filter_toggle),1.0,0.1)) + #
scale_x_continuous(breaks = reshape2::colsplit(block_index_labels_col,"_",c("chr","start","end"))$start,labels = block_index_labels_col) +
scale_y_continuous(breaks = reshape2::colsplit(block_index_labels_row,"_",c("chr","start","end"))$start,labels = block_index_labels_row) + theme(axis.text.x = element_text(angle=60, hjust=1)) +
ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) + theme(legend.position="bottom",axis.title = element_blank())
} #end instructions done IF correlation is specified.
#browser()
#+ geom_contour(binwidth = .395,aes(z=value))
### browser()
#+ coord_flip() #+ scale_y_reverse(breaks=block_indices)
#p
#lumpy_points_toggle
if(isolate(input$data_source)=="linreg_osteosarcoma_CNVkit")
{
if(exists("osteofn"))
{
tryCatch(SVs_data_in_submatrix_coords<-readRDS(paste0(osteofn,"breakpoint_gint_lcc236/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords.rds" )),error = function(e) NULL)
tryCatch(lumpy_summarized_counts<-readRDS(paste0(osteofn,"lumpy_sv_236/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords_lumpy_mirror.rds" )),error = function(e) NULL)
}else {
tryCatch(SVs_data_in_submatrix_coords<-readRDS(url(paste0(baseurl,"breakpoint_gint_lcc236/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords.rds" ))),error = function(e) NULL)
tryCatch(lumpy_summarized_counts<-readRDS(url(paste0(baseurl,"lumpy_sv_236/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords_lumpy_mirror.rds" ))),error = function(e) NULL)
}
}
if(isolate(input$data_source) %in% c("TCGA_AML_low_pass","TCGA_BRCA_low_pass","TCGA_OS_low_pass","TCGA_NBL_low_pass","TCGA_PRAD_low_pass"))
{
if(exists("basefn"))
{
#browser()
tryCatch(SVs_data_in_submatrix_coords<-readRDS(paste0(basefn,"breakpoint_gint/TCGA_low_pass/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords_common_coords.rds" )),error = function(e) NULL)
tryCatch(lumpy_summarized_counts<-readRDS(paste0(basefn,"lumpy_sv/TCGA_low_pass/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords_lumpy_mirror_TCGA_common_coords.rds" )),error = function(e) NULL)
tcga_type<<-gsub("_low_pass","",gsub("TCGA_","",isolate(input$data_source)))
tryCatch(TCGA_low_pass_sample_info<<-readRDS(paste0(basefn,"sample_info/",tcga_type,"TCGA_merged_dtv2.rds" )),error = function(e) NULL)
if(exists("TCGA_low_pass_sample_info")){TCGA_low_pass_sample_info$pos<- tidyr::unite(TCGA_low_pass_sample_info,pos,bins.seqnames,bins.start,bins.end)$pos}
} else {
tryCatch(SVs_data_in_submatrix_coords<-readRDS(url(paste0(baseurl,"breakpoint_gint/TCGA_low_pass/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords_common_coords.rds" ))),error = function(e) NULL)
tryCatch(lumpy_summarized_counts<-readRDS(url(paste0(baseurl,"lumpy_sv/TCGA_low_pass/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords_lumpy_mirror_TCGA_common_coords.rds" ))),error = function(e) NULL)
tcga_type<<-gsub("_low_pass","",gsub("TCGA_","",isolate(input$data_source)))
tryCatch(TCGA_low_pass_sample_info<<-readRDS(url(paste0(baseurl,"sample_info/",tcga_type,"TCGA_merged_dtv2.rds" ))),error = function(e) NULL)
if(exists("TCGA_low_pass_sample_info")){TCGA_low_pass_sample_info$pos<- tidyr::unite(TCGA_low_pass_sample_info,pos,bins.seqnames,bins.start,bins.end)$pos}
}
}
if(isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset"))
{
subset_name<<-gsub("_subset","",gsub("TCGA_NBL_","",paste0(input$data_source)))
if(exists("basefn"))
{
tryCatch(SVs_data_in_submatrix_coords<-readRDS(paste0(basefn,"breakpoint_gint/TCGA_low_pass/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords_common_coords.rds" )),error = function(e) NULL)
tryCatch(lumpy_summarized_counts<-readRDS(paste0(basefn,"lumpy_sv/TCGA_low_pass/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords_lumpy_mirror_TCGA_common_coords.rds" )),error = function(e) NULL)
tcga_type<<-gsub("_low_pass","",gsub("TCGA_","",isolate(input$data_source)))
tryCatch(TCGA_low_pass_sample_info<<-readRDS(paste0(basefn,"sample_info/",tcga_type,"TCGA_merged_dtv2.rds" )),error = function(e) NULL)
if(exists("TCGA_low_pass_sample_info")){TCGA_low_pass_sample_info$pos<- tidyr::unite(TCGA_low_pass_sample_info,pos,bins.seqnames,bins.start,bins.end)$pos}
} else {
tryCatch(SVs_data_in_submatrix_coords<-readRDS(url(paste0(baseurl,"breakpoint_gint/TCGA_low_pass/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords_common_coords.rds" ))),error = function(e) NULL)
tryCatch(lumpy_summarized_counts<-readRDS(url(paste0(baseurl,"lumpy_sv/TCGA_low_pass/",isolate(input$chrom1),isolate(input$chrom2),"SVs_data_in_submatrix_coords_lumpy_mirror_TCGA_common_coords.rds" ))),error = function(e) NULL)
tcga_type<<-gsub("_low_pass","",gsub("TCGA_","",isolate(input$data_source)))
tryCatch(TCGA_low_pass_sample_info<<-readRDS(url(paste0(baseurl,"sample_info/",tcga_type,"TCGA_merged_dtv2.rds" ))),error = function(e) NULL)
if(exists("TCGA_low_pass_sample_info")){TCGA_low_pass_sample_info$pos<- tidyr::unite(TCGA_low_pass_sample_info,pos,bins.seqnames,bins.start,bins.end)$pos}
}
}
# return(lumpy_summarized_counts)
#}
#DISABLING CLIENT SIDE PROCESSING OF GenomicInteraction data.
# submat_row_gr<-underscored_pos_to_GRanges(rownames(recast_matrix))
# submat_col_gr<-underscored_pos_to_GRanges(colnames(recast_matrix))
# breakpoint_gint_full_subset<-breakpoint_gint_full[anchorOne(breakpoint_gint_full)@seqnames %in% gsub("_","",isolate(input$chrom1)) &
# anchorTwo(breakpoint_gint_full)@seqnames %in% gsub("_","",isolate(input$chrom2))]
#
# if(
# grep(paste0("\\b",unique(as.character(submat_row_gr@seqnames)),"\\b"),gsub("_","",chromosomes))>grep(paste0("\\b",unique(as.character(submat_col_gr@seqnames)),"\\b"),gsub("_","",chromosomes))
# ){
# SVs_data_in_submatrix_coords<-rebinGenomicInteractions(gint=breakpoint_gint_full_subset,
# whole_genome_matrix = NULL,
# rownames_gr = submat_col_gr,
# colnames_gr = submat_row_gr,
# rownames_mat = colnames(recast_matrix),
# colnames_mat = rownames(recast_matrix),
# method="nearest")
# } else {SVs_data_in_submatrix_coords<-rebinGenomicInteractions(gint=breakpoint_gint_full_subset,
# whole_genome_matrix = NULL,
# rownames_gr = submat_row_gr,
# colnames_gr = submat_col_gr,
# rownames_mat = rownames(recast_matrix),
# colnames_mat = colnames(recast_matrix),
# method="nearest")
# }
#END CLIENT SIDE GINT PROCESSING
# if(input$contour){
# p <- ggplot(data = ggplotmatrix, aes(x = Var2, y = Var1,fill=value,text=paste0("value:",value,"\nrow:",Var1,"\ncol:",Var2,"\n",value1)) ) + #geom_tile() + theme_void()
# geom_tile() + scale_x_discrete(breaks = block_index_labels_col) +
# scale_y_discrete(breaks = block_index_labels_row) + theme(axis.text.x = element_text(angle=60, hjust=1)) +
# ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) + theme(legend.position="bottom",axis.title = element_blank()) + coord_flip() #+ scale_y_reverse(breaks=block_indices)
# }
#rep(paste0(colnames(lumpy_summarized_counts[,3:ncol(lumpy_summarized_counts)]),collapse='/n'),nrow(lumpy_summarized_counts))
#tidyr::unite(data = lumpy_summarized_counts[,3:ncol(lumpy_summarized_counts)],sep="\n")[,1]
#
if(exists("lumpy_summarized_counts") && isolate(input$lumpy_points_toggle)){
lumpy_summarized_counts$textlabel<-unlist(strsplit(x = paste0("col:",lumpy_summarized_counts$row_bin_label,"\nrow:",lumpy_summarized_counts$col_bin_label,"\ntotal SVs:",lumpy_summarized_counts$total_samples,
"\nhighest freq SV type:",lumpy_summarized_counts$highest_count_sample,lumpy_summarized_counts$highest_count_sample_count/lumpy_summarized_counts$total_samples*100,"%\n types, ranked:",lumpy_summarized_counts$concatenated_sample_names,collapse="@"),"@"))
# p<-p + geom_point(data=lumpy_summarized_counts,mapping=aes(x=as.integer(as.character(lumpy_summarized_counts$col_bin_index)),y=as.integer(as.character(lumpy_summarized_counts$row_bin_index)),
# color=lumpy_summarized_counts$highest_count_sample,size=lumpy_summarized_counts$total_samples,
# text=lumpy_summarized_counts$textlabel
#
# ))
if(is.null(lumpy_summarized_counts$start1))
{lumpy_summarized_counts<-dplyr::bind_cols(lumpy_summarized_counts,reshape2::colsplit(lumpy_summarized_counts$row_bin_label,"_",c("chr1","start1","end1")))
lumpy_summarized_counts<-dplyr::bind_cols(lumpy_summarized_counts,reshape2::colsplit(lumpy_summarized_counts$col_bin_label,"_",c("chr2","start2","end2")))
}
p<-p + geom_point(data=lumpy_summarized_counts,mapping=aes(x=as.numeric(as.character(lumpy_summarized_counts$start1)),y=as.numeric(as.character(lumpy_summarized_counts$start2)),
color=as.character(lumpy_summarized_counts$highest_count_sample),size=as.numeric(as.character(lumpy_summarized_counts$total_samples)),
text=lumpy_summarized_counts$textlabel))
}
#
if(exists("SVs_data_in_submatrix_coords") && isolate(input$plot_points_toggle))
{ SVs_data_in_submatrix_coords$col_bin_index<-as.numeric(as.character(SVs_data_in_submatrix_coords$col_bin_index))
SVs_data_in_submatrix_coords$row_bin_index<-as.numeric(as.character(SVs_data_in_submatrix_coords$row_bin_index))
if(is.null(SVs_data_in_submatrix_coords$start1))
{SVs_data_in_submatrix_coords<-dplyr::bind_cols(SVs_data_in_submatrix_coords,reshape2::colsplit(SVs_data_in_submatrix_coords$row_bin_label,"_",c("chr1","start1","end1")))
SVs_data_in_submatrix_coords<-dplyr::bind_cols(SVs_data_in_submatrix_coords,reshape2::colsplit(SVs_data_in_submatrix_coords$col_bin_label,"_",c("chr2","start2","end2")))
}
SVs_data_in_submatrix_coords$textlabel<-unlist(strsplit(x = paste0("col:",SVs_data_in_submatrix_coords$row_bin_label,"\nrow:",SVs_data_in_submatrix_coords$col_bin_label,"\ntotal SVs:",SVs_data_in_submatrix_coords$total_samples,
"\nhighest freq SV type:",SVs_data_in_submatrix_coords$highest_count_sample,SVs_data_in_submatrix_coords$highest_count_sample_count/SVs_data_in_submatrix_coords$total_samples*100,"%\n types, ranked:",
SVs_data_in_submatrix_coords$concatenated_sample_names,collapse="@"),"@"))
#print(p_with_points)
#},error = function(err) {
# print(paste("Caught & handled error: ",err))
tryCatch( highest_over_tot<-as.numeric(SVs_data_in_submatrix_coords$highest_count_sample_count/SVs_data_in_submatrix_coords$total_samples),error = function(e) NULL)
tryCatch(colorvals<-as.character(cut(highest_over_tot,breaks=unique(quantile(highest_over_tot,probs=c(0.25,0.5,0.75))))),error = function(e) NULL)
if(exists("colorvals"))
{ p_with_points<-p + geom_point(data=SVs_data_in_submatrix_coords,mapping = aes(x=as.numeric(as.character(SVs_data_in_submatrix_coords$start1)),y=as.numeric(as.character(SVs_data_in_submatrix_coords$start2)),
text=SVs_data_in_submatrix_coords$textlabel,
size=as.numeric(as.character(SVs_data_in_submatrix_coords$total_samples)),
#shape=as.character(SVs_data_in_submatrix_coords$highest_count_sample),
color= colorvals) ) + labs(color="",size="")
} else {
p_with_points<-p + geom_point(data=SVs_data_in_submatrix_coords,mapping = aes(x=as.numeric(as.character(SVs_data_in_submatrix_coords$start1)),y=as.numeric(as.character(SVs_data_in_submatrix_coords$start2)),
text=SVs_data_in_submatrix_coords$textlabel,
color="CGI SV",
size=as.numeric(as.character(SVs_data_in_submatrix_coords$total_samples))) ) + labs(size="")}
#+ scale_color_gradient(low="green",high="darkgreen")
#color=as.numeric(SVs_data_in_submatrix_coords$highest_count_sample_count/SVs_data_in_submatrix_coords$total_samples)
# + scale_colour_gradientn(colours = c("blue","white","red"),values=c(0,0.5,1))
# p_with_points<-p + geom_point(data=SVs_data_in_submatrix_coords,mapping = aes(x=as.integer(as.character(SVs_data_in_submatrix_coords$col_bin_index)),y=as.integer(as.character(SVs_data_in_submatrix_coords$row_bin_index)),
# text=tidyr::unite(data = SVs_data_in_submatrix_coords[,3:ncol(SVs_data_in_submatrix_coords)],sep="\n")[,1],
# size=as.integer(as.character(SVs_data_in_submatrix_coords$total_samples)),
# #shape=as.character(SVs_data_in_submatrix_coords$highest_count_sample),
# color=as.character(arules::discretize(as.numeric(SVs_data_in_submatrix_coords$highest_count_sample_count/SVs_data_in_submatrix_coords$total_samples),method="interval"))
# #color=as.numeric(SVs_data_in_submatrix_coords$highest_count_sample_count/SVs_data_in_submatrix_coords$total_samples)
# )) #+ scale_colour_gradientn(colours = c("blue","white","red"),values=c(0,0.5,1))
# scale_colour_gradient2()
#set the range to be specific if there are coordinates (the cell +/- 4), else choose the max range for the particular axis.
if(debug){browser()}
#check for the correct format.
plotly_output<-plotly::ggplotly(p_with_points,tooltip="text") %>% plotly::layout(margin=list(r=0, l=200, t=0, b=200),width=isolate(input$heatmapHeight),height=round(isolate(input$heatmapHeight)/1.25))
} else {if(exists("p"))
{
plotly_output<-plotly::ggplotly(p,tooltip="text") %>% plotly::layout(margin=list(r=0, l=200, t=0, b=200),width=isolate(input$heatmapHeight),height=round(isolate(input$heatmapHeight)/1.25))
}
}
#
#plotly_output<-plotly::ggplotly(p) %>% plotly::layout(margin=list(r=0, l=200, t=0, b=200),width=1280,height=1024)
#%>% saveWidget(title = gsub("_","",paste0(chromosomes[isolate(input$chrom1)],"-",chromosomes[isolate(input$chrom2)])),file = paste0(chromosomes[isolate(input$chrom1)],chromosomes[isolate(input$chrom2)],"transparent_tooltipv27_coord_no_flip_downsample_upward_orientation_plotly_nrsample.html"),selfcontained = T)
#
if( (!is.null(isolate(input$loc_input_row)) | !is.null(isolate(input$loc_input_col)) ) & (!isolate(input$loc_input_row)=="" | !isolate(input$loc_input_col)==""))
{
if(debug){browser()}
#acknowledgement: thanks to stackoverflow comments that made package a reality.
#find the location of the bin in terms of map coordinates for x
#store this as the xcentercoord
#do the same for y
#store as ycentercoord
rowsplit<-reshape2::colsplit(isolate(input$loc_input_row),c("\\:|\\-"),c("chr","start","end"))
columnsplit<-reshape2::colsplit(isolate(input$loc_input_col),c("\\:|\\-"),c("chr","start","end"))
xmin<-columnsplit$start
xmin<-ggplotmatrix$start2[which.min(abs(ggplotmatrix$start2-xmin))]-1e6
xmax<-columnsplit$end
xmax<-ggplotmatrix$start2[which.min(abs(ggplotmatrix$start2-xmax))]+1e6
ymin<-rowsplit$start
ymin<-ggplotmatrix$start2[which.min(abs(ggplotmatrix$start1-ymin))]-1e6
ymax<-rowsplit$end
ymax<-ggplotmatrix$start2[which.min(abs(ggplotmatrix$start1-ymax))]+1e6
xglobalmin<-min(ggplotmatrix$start2)
yglobalmin<-min(ggplotmatrix$start1)
xglobalmax<-max(ggplotmatrix$start2)
yglobalmax<-max(ggplotmatrix$start1)
#edge case-- if the xcentercoord is greater than max or less than zero, reset to zero.
#edge case-- do the same for y
if(xmin<xglobalmin){xmin<-xglobalmin}
if(ymin<yglobalmin){ymin<-yglobalmin}
#edge case-- if xmax is greater than the maximum y, then reset to max.
#edge case-- do the same for y
if(xmax>xglobalmax){xmax<-xglobalmax}
if(ymax>yglobalmax){ymax<-yglobalmax}
#ggplotly(p, dynamicTicks = T) %>% plotly::layout(xaxis=list(autorange=F, range=c(xcentercoord-4,xcentercoord+4)), yaxis=list(autorange=F, range=c(20,30)))
if(!exists("xmin")){xmin<-xglobalmin}
if(!exists("xmax")){xmax<-xglobalmax}
if(!exists("ymin")){ymin<-yglobalmin}
if(!exists("ymax")){ymax<-yglobalmax} #need to round the max and min for all.
#xmin<-floor(xmin/1e6)*1e6
if(exists("p_with_points")){
plotly_output<-plotly::ggplotly(p_with_points,tooltip="text") %>% plotly::layout(margin=list(r=0, l=200, t=0, b=200),width=isolate(input$heatmapHeight),height=round(isolate(input$heatmapHeight)/1.25),
xaxis=list(range=c(xmin,xmax),autorange=F), yaxis=list(range=c(ymin,ymax),autorange=F))
} else {
if(exists("p"))
{
plotly_output<-plotly::ggplotly(p,tooltip="text") %>% plotly::layout(margin=list(r=0, l=200, t=0, b=200),width=isolate(input$heatmapHeight),height=round(isolate(input$heatmapHeight)/1.25),xaxis=list(range=c(xmin,xmax),autorange=F), yaxis=list(range=c(ymin,ymax),autorange=F))
}
}
return(plotly_output)
} else {}
if(debug){browser()}
print(plotly_output)
if(debug){browser()}
return(plotly_output)
})
outputOptions(output,"plotlyChromosomalHeatmap",suspendWhenHidden=F)
output$whole_genome_image<-renderImage({
#output$whole_genome_image<-renderUI({
#https://community.rstudio.com/t/shinydashboard-render-only-the-clicked-tab/36493
input$whole_genome_max_cap
input$goButton
#browser()
if(isolate(input$data_source)=="linreg_osteosarcoma_CNVkit")
{
data_prefix<-"osteo"
pngfn <- osteofn
}
if(isolate(input$data_source)=="TCGA_NBL_low_pass")
{
data_prefix<-"nbl"
pngfn <- basefn
}
if(is.null(data_prefix)){return(NULL)}
# list(src = paste0("http://alps.nci.nih.gov/james/plotly_dashboard/whole_genome_pngs/",data_prefix,"_whole_genome_full_no_downsample_no_labels_rescaled_max_cap_",isolate(input$whole_genome_max_cap),".png"),
# contentType = 'image/png',
# width = isolate(input$heatmapHeight),
# height = round(isolate(input$heatmapHeight)/1.25),
# alt = "This is alternate text")
if(debug){browser()}
# tags$img(src = paste0("http://alps.nci.nih.gov/james/plotly_dashboard/whole_genome_pngs/",data_prefix,"_whole_genome_full_no_downsample_no_labels_rescaled_max_cap_",isolate(input$whole_genome_max_cap),".png"),
# # contentType = 'image/png',
# width = isolate(input$heatmapHeight),
# height = round(isolate(input$heatmapHeight)/1.25),
# alt = "whole genome png")
#
#browser()
#tags$image(src=paste0(pngfn,"whole_genome_pngs/",data_prefix,"_whole_genome_full_no_downsample_no_labels_rescaled_max_cap_",isolate(input$whole_genome_max_cap),".png"),width="100%")
#browser()
return( list(src=paste0(pngfn,"whole_genome_pngs/",data_prefix,"_whole_genome_full_no_downsample_no_labels_rescaled_max_cap_",isolate(input$whole_genome_max_cap),".png"))) #,width="25%"
},deleteFile = F
)
# output$freq_table<-renderDataTable({
#
# return(data.table())
# })
getGGplotMatrix<-function(){if(exists("ggplotmatrix")){return(ggplotmatrix)}else{return(NULL)}}
getGGplotMatrix_full<-function(){if(exists("ggplotmatrix_full")){return(ggplotmatrix_full)}else{return(NULL)}}
#TCGA_low_pass_sample_info
get_tcga_lp_sample_info<-function(){if(exists("TCGA_low_pass_sample_info")){return(TCGA_low_pass_sample_info)}else{return(NULL)}}
get_recast_matrix<-function(){if(exists("recast_matrix")){return(recast_matrix)}else{return(NULL)}}
get_downsample_factor<-function(){if(exists("downsample_factor")){return(downsample_factor)}else{return(NULL)}}
get_recast_matrix_full<-function(){if(exists("recast_matrix_full")){return(recast_matrix_full)}else{return(NULL)}}
get_rownames_gr<-function(){if(exists("rownames_gr")){return(rownames_gr)}else{return(NULL)}}
get_colnames_gr<-function(){if(exists("colnames_gr")){return(colnames_gr)}else{return(NULL)}}
get_rownames_gr_full<-function(){if(exists("rownames_gr_full")){return(rownames_gr_full)}else{return(NULL)}}
get_colnames_gr_full<-function(){if(exists("colnames_gr_full")){return(colnames_gr_full)}else{return(NULL)}}
# get_recast_matrix<-function(){return(recast_matrix)}
output$expression_data<-DT::renderDataTable({
#browser()
if(is.null(plotly::event_data("plotly_click"))){return(data.table())}
if(isolate(input$data_source)=="linreg_osteosarcoma_CNVkit")
{
recast_matrix<-get_recast_matrix()
row_label<-rownames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label.
column_label<-colnames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label.
#row_point_gr<-underscored_pos_to_GRanges(row_label)
#column_point_gr<-underscored_pos_to_GRanges(column_label)
#row_index<-as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1
#col_index<-as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1 #row and col indices of the subset matrix.
row_index_full<-grep(row_label,rownames(get_recast_matrix_full()))
col_index_full<-grep(column_label,colnames(get_recast_matrix_full()))
#
#rowclick<-length(common_coords)-myReactives$currentClick$lat
#colclick<-myReactives$currentClick$lng
if(debug){browser()}
if(is.null(expression_data_gr)){tryCatch(expression_data_gr<-readRDS(paste0(get("osteofn",.GlobalEnv),"expression_data_gr.rds")),error = function(e) NULL) }
rowexpression<-as.data.table(IRanges::subsetByOverlaps(expression_data_gr,get_rownames_gr_full()[seq(from=row_index_full,to=row_index_full+3)]))
colexpression<-as.data.table(IRanges::subsetByOverlaps(expression_data_gr,get_colnames_gr_full()[seq(from=col_index_full,to=col_index_full+3)]))} else {
if(isolate(input$data_source)=="TCGA_NBL_low_pass" | isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset"))
{
if(debug){browser()}
rownames_gr_full<-get_rownames_gr_full()
colnames_gr_full<-get_colnames_gr_full()
# if(!exists("expression_data_gr_nbl")){
tryCatch(expression_data_gr_nbl<-readRDS(paste0(get("basefn",.GlobalEnv),"tcga_nbl_expression.rds")),error = function(e) NULL)
# }
if(length(expression_data_gr_nbl)==0){
tryCatch(expression_data_gr_nbl<-readRDS(paste0(get("basefn",.GlobalEnv),"tcga_nbl_expression.rds")),error = function(e) NULL)
}
#mcols(expression_data_gr_nbl)$SYMBOL<-expression_data_gr_nbl$....external_gene_name
if(debug){browser()}
rowexpression<-as.data.table(IRanges::subsetByOverlaps(expression_data_gr_nbl,rownames_gr_full[rownames_gr_full@ranges@start==plotly::event_data("plotly_click")[["y"]]]))
colexpression<-as.data.table(IRanges::subsetByOverlaps(expression_data_gr_nbl,colnames_gr_full[colnames_gr_full@ranges@start==plotly::event_data("plotly_click")[["x"]]]))
}
}
rowexpression$rowcol<-"row"
colexpression$rowcol<-"col"
comb_expression_df<-rbind(rowexpression,colexpression)
#comb_expression_df_t<-as.data.table(t(comb_expression_df))
#return(comb_expression_df_t)
# cat(file=stderr(),paste0("expression_data"))
# cat(file=stderr(),ls())
#make the rownames match for nbl
outputexpression_df<-as.data.table(unique(comb_expression_df[,c("SYMBOL","seqnames","start","end","gene_type","rowMean","rowMeanPctl","rowVar","rowVarPctl")]))
outputexpression_df_sorted<-outputexpression_df[order(-outputexpression_df$rowVarPctl),]
return(as.data.table(outputexpression_df_sorted))
})
output$census_data<-DT::renderDataTable({
#
if(is.null(plotly::event_data("plotly_click"))){return(data.table())}
recast_matrix<-get_recast_matrix()
if(length(intersect(ls(),"census_data_gr"))!=1) { tryCatch(census_data_gr<-readRDS(paste0(basefn,"censushg19.rds")),error = function(e) NULL)}
row_label<-rownames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label.
column_label<-colnames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label.
#row_point_gr<-underscored_pos_to_GRanges(row_label)
#column_point_gr<-underscored_pos_to_GRanges(column_label)
#row_index<-as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1
#col_index<-as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1 #row and col indices of the subset matrix.
row_index_full<-grep(row_label,rownames(get_recast_matrix_full()))
col_index_full<-grep(column_label,colnames(get_recast_matrix_full()))
#
#rowclick<-length(common_coords)-myReactives$currentClick$lat
#colclick<-myReactives$currentClick$lng
rowcensus<-as.data.table(IRanges::subsetByOverlaps(census_data_gr,get_rownames_gr_full()[seq(from=row_index_full,to=row_index_full+3)]))
colcensus<-as.data.table(IRanges::subsetByOverlaps(census_data_gr,get_colnames_gr_full()[seq(from=col_index_full,to=col_index_full+3)]))
rowcensus$rowcol<-"row"
colcensus$rowcol<-"col"
comb_census_df<-rbind(rowcensus,colcensus)
comb_census_df_t<-as.data.table(t(comb_census_df))
# cat(file=stderr(),paste0("census_data"))
# cat(file=stderr(),ls())
#return(comb_census_df_t)
#browser()
return(unique(as.data.table(comb_census_df))) #[,c("SYMBOL","seqnames","start","end","gene_type","rowMean","rowMeanPctl","rowVar","rowVarPctl")]
})
# output$census_data<-renderDataTable({
# row_label<-rownames(recast_matrix)[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label.
# column_label<-colnames(recast_matrix)[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label.
# if(is.null(myReactives$currentClick)){return(data.frame())}
# #
# rowclick<-round(length(common_coords)-myReactives$currentClick$lat)
# colclick<-round(myReactives$currentClick$lng)
# rowcensus<-as.data.table(subsetByOverlaps(census_data_gr,rownames_gr[rowclick]))
# colcensus<-as.data.table(subsetByOverlaps(census_data_gr,colnames_gr[colclick]))
# rowcensus$rowcol<-"row"
# colcensus$rowcol<-"col"
# comb_expression_df<-rbind(rowcensus,colcensus)
# comb_expression_df_t<-t(comb_expression_df)
# return(comb_expression_df_t)
#
# })
output$gene_data <-
renderPrint({
if(is.null(plotly::event_data("plotly_click"))){return(data.table())}
row_label<-rownames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label.
column_label<-colnames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label.
#if(myReactives)
#
#all_input<-isolate(input)
# cat(file=stderr(),paste0("gene_data"))
# cat(file=stderr(),ls())
rowclick<-length(common_coords)-myReactives$currentClick$lat
colclick<-myReactives$currentClick$lng
row_genes<-genev[rowclick]
col_genes<-genev[colclick]
#
output<-paste0("row genes:",as.character(genev[rowclick]),
"column genes:",as.character(genev[colclick]))
return(output)
})
output$row_gene_data <-
DT::renderDataTable({
if(is.null(plotly::event_data("plotly_click"))){return(data.table())}
#browser()
#row_label<-rownames(recast_matrix)[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label.
#column_label<-colnames(recast_matrix)[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label.
#start<-proc.time()
if(isolate(input$data_source)=="TCGA_NBL_low_pass" |
isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset","linreg_osteosarcoma_CNVkit"))
{
row_label<-paste0(isolate(input$chrom2),plotly::event_data("plotly_click")[["y"]],"_",plotly::event_data("plotly_click")[["y"]]+1e6-1)
#column_label<-paste0(isolate(input$chrom1),event_data("plotly_click")[["x"]],"_",event_data("plotly_click")[["x"]]+1e6-1)
}
row_genes_merged<-IRanges::mergeByOverlaps(ensembl_gene_tx_data_gr,underscored_pos_to_GRanges(row_label))
row_genes<-sort(unique(row_genes_merged[row_genes_merged$....gene_biotype=="protein_coding","....external_gene_name"]))
#cat(file=stderr(),paste0(names(proc.time()-start)))
#cat(file=stderr(),paste0(proc.time()-start))
print(row_genes)
dt<-as.data.table(row_genes)
colnames(dt)<-"row genes"
return(dt)
#if(myReactives)
#
#all_input<-isolate(input)
# cat(file=stderr(),paste0(event_data("plotly_click")))
#cat(file=stderr(),paste0(names(event_data("plotly_click"))))
#cat(file=stderr(),paste0(event_data("plotly_click")["y"]))
#cat(file=stderr(),paste0(row_label))
# cat(file=stderr(),ls())
#rowclick<-length(common_coords)-myReactives$currentClick$lat
#colclick<-myReactives$currentClick$lng
#row_genes<-genev[rowclick]
#col_genes<-genev[colclick]
#
#output<-paste0("row genes:",as.character(genev[rowclick]),
# "column genes:",as.character(genev[colclick]))
#return(output)
}) #,options = list(pageLength=5)
output$col_gene_data <-
DT::renderDataTable({
if(is.null(plotly::event_data("plotly_click"))){return(data.table())}
#browser()
#row_label<-rownames(recast_matrix)[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label.
#column_label<-colnames(recast_matrix)[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label.
if(isolate(input$data_source)=="TCGA_NBL_low_pass" |
isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset","linreg_osteosarcoma_CNVkit"))
{
#browser()
#row_label<-paste0(isolate(input$chrom2),event_data("plotly_click")[["y"]],"_",event_data("plotly_click")[["y"]]+1e6-1)
column_label<-paste0(isolate(input$chrom1),plotly::event_data("plotly_click")[["x"]],"_",plotly::event_data("plotly_click")[["x"]]+1e6-1)
}
#col_genes<-sort(unique(mergeByOverlaps(ensembl_gene_tx_data_gr,underscored_pos_to_GRanges(column_label))$....external_gene_name))
col_genes_merged<-IRanges::mergeByOverlaps(ensembl_gene_tx_data_gr,underscored_pos_to_GRanges(column_label))
col_genes<-sort(unique(col_genes_merged[col_genes_merged$....gene_biotype=="protein_coding","....external_gene_name"]))
print(col_genes)
#print(as.data.table(col_genes))
dt<-as.data.table(col_genes)
colnames(dt)<-"column genes"
return(dt)
#if(myReactives)
#
#all_input<-isolate(input)
# cat(file=stderr(),paste0(event_data("plotly_click")))
#cat(file=stderr(),paste0(names(event_data("plotly_click"))))
#cat(file=stderr(),paste0(event_data("plotly_click")["y"]))
#cat(file=stderr(),paste0(row_label))
# cat(file=stderr(),ls())
#rowclick<-length(common_coords)-myReactives$currentClick$lat
#colclick<-myReactives$currentClick$lng
#row_genes<-genev[rowclick]
#col_genes<-genev[colclick]
#
#output<-paste0("row genes:",as.character(genev[rowclick]),
# "column genes:",as.character(genev[colclick]))
#return(output)
}) #,options = list(pageLength=5)
output$network <- visNetwork::renderVisNetwork({
if (input$goButton == 0) {return()}
input$goButton
#browser()
# ggplotmatrix_filtered<-ggplotmatrix[ggplotmatrix$value > summary(heatmaply::percentize(ggplotmatrix$value))["3rd Qu."] | ggplotmatrix$value < summary(heatmaply::percentize(ggplotmatrix$value))["1st Qu."], ]
# ggplotmatrix_filtered<-ggplotmatrix[heatmaply::percentize(ggplotmatrix$value) > 0.9999 | heatmaply::percentize(ggplotmatrix$value) < 0.0001, ]
ggplotmatrix_filtered<-ggplotmatrix_full[order(ggplotmatrix_full$value),]
ggplotmatrix_filtered<-ggplotmatrix_filtered[c(1:(isolate(input$n_nodes)/2),(nrow(ggplotmatrix_filtered)-(isolate(input$n_nodes)/2)):nrow(ggplotmatrix_filtered)),]
ggplotmatrix_filtered<-ggplotmatrix_filtered[as.character(ggplotmatrix_filtered$Var1)!=as.character(ggplotmatrix_filtered$Var2),]
vertex.attrs<-list(name = unique(c(as.character(ggplotmatrix_filtered$Var1), as.character(ggplotmatrix_filtered$Var2))))
edges<-rbind(as.character(ggplotmatrix_filtered$Var1),as.character(ggplotmatrix_filtered$Var2))
weights<-ggplotmatrix_filtered$value
G <- igraph::graph.empty(n = 0, directed = T)
G <- igraph::add.vertices(G, length(vertex.attrs$name), attr = vertex.attrs)
G <- igraph::add.edges(G, edges,weight=weights)
G_connected<-delete.isolates(G)
# weights_discretized<-arules::discretize(E(G_connected)$weight)
# G_connected_D3<-networkD3::igraph_to_networkD3(G_connected,group = as.character(arules::discretize(strength(G_connected))))
# forceNetwork(Links = G_connected_D3$links, Nodes = G_connected_D3$nodes,
# Source = 'source', Target = 'target',
# NodeID = 'name',Group='group',fontSize = 14,zoom=T)
G_connected_vis<-visNetwork::toVisNetworkData(G_connected)
G_connected_vis$edges$value<-G_connected_vis$edges$weight
col_fun = circlize::colorRamp2(c(0, 0.5, 1), c("blue", "white", "red"))
G_connected_vis$nodes$color<-sapply(col_fun(heatmaply::percentize(igraph::strength(G_connected))) ,function(x) substr(x,start = 1,stop = 7))
visNetwork::visNetwork(nodes = G_connected_vis$nodes,edges = G_connected_vis$edges,width = isolate(input$heatmapHeight),height = round(isolate(input$heatmapHeight)/1.25)) %>%
visNetwork::visInteraction(hover = TRUE) %>%
visNetwork::visEvents(hoverNode = "function(nodes) {
Shiny.onInputChange('current_node_id', nodes);
;}")
})
output$shiny_return <- DT::renderDataTable({
input$current_node_id
if(is.null(isolate(input$current_node_id))){return(data.table())}
#browser()
#DT::datatable(iris, options = list(lengthMenu = c(5, 30, 50), pageLength = 5)
#paste0(input$current_node_id)
return(as.data.table(ggplotmatrix[ggplotmatrix$Var1 %in% isolate(input$current_node_id) | ggplotmatrix$Var2 %in% isolate(input$current_node_id),]))#c("Var1,Var2","value","value1")
},options = list(pageLength=5))#
#pageLength = 5)
output$sample_info<-plotly::renderPlotly({
input$sample_hist_alpha
if(is.null(plotly::event_data("plotly_click"))){return(data.table())}
if(length((!exists("bin_data")|if(exists("bin_data")){dim(bin_data)[1]==3053}))==0 & isolate(input$data_source)=="linreg_osteosarcoma_CNVkit") { tryCatch(bin_data<<-readRDS((paste0(osteofn,"bin_data_lcc236.rds"))),error = function(e) NULL) }
#browser()
#ed <- event_data("plotly_click")
if (is.null(plotly::event_data("plotly_click"))) {return("Click events appear here (double-click to clear)")}
if(isolate(input$data_source)=="linreg_osteosarcoma_CNVkit" | isolate(input$data_source)=="TCGA_NBL_low_pass" |
isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset")
)
{
recast_matrix<-get_recast_matrix()
if(!is.null("recast_matrix")) {
row_label<-rownames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label.
column_label<-colnames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label.
if(isolate(input$data_source)=="TCGA_NBL_low_pass" |
isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset"))
{
row_label<-paste0(isolate(input$chrom2),plotly::event_data("plotly_click")[["y"]],"_",plotly::event_data("plotly_click")[["y"]]+1e6-1)
column_label<-paste0(isolate(input$chrom1),plotly::event_data("plotly_click")[["x"]],"_",plotly::event_data("plotly_click")[["x"]]+1e6-1)
}
if(length(bin_data$probe)==0)
{
bin_data$probe<-rownames(bin_data)
}
d<-as.data.table(bin_data[bin_data$probe %in% c(row_label,column_label),])
if(nrow(d)==0){return("")}
#p <- plotly::plot_ly(x = bin_data[1,], type = "histogram")
# cat(file=stderr(),paste0("sample_info"))
# cat(file=stderr(),ls())
sample_info_p <- plotly::plot_ly(alpha = isolate(input$sample_hist_alpha)) %>%
plotly::add_histogram(x = as.numeric(d[1,]),name=d[1,"probe"]) %>%
plotly::add_histogram(x = as.numeric(d[2,]),name=d[2,"probe"]) %>%
plotly::layout(barmode = "overlay")
print(sample_info_p)
if(debug){browser()}
return(sample_info_p)
}
} #end code for in-house data.
if(isolate(input$data_source) %in% c("TCGA_AML_low_pass","TCGA_BRCA_low_pass","TCGA_OS_low_pass","TCGA_PRAD_low_pass"))
{
TCGA_low_pass_sample_info<-get_tcga_lp_sample_info()
}
})
output$sample_info_scatter<-plotly::renderPlotly({
if(is.null(plotly::event_data("plotly_click"))){return(plotly::plotly_empty())}
#browser()
req(plotly::event_data("plotly_click"))
#if (is.null(event_data("plotly_click"))) {return("Click events appear here (double-click to clear)")}
recast_matrix<-get_recast_matrix()
if(length((!exists("bin_data")|if(exists("bin_data")){dim(bin_data)[1]==3053}))==0 & isolate(input$data_source)=="linreg_osteosarcoma_CNVkit") { tryCatch(bin_data<<-readRDS((paste0(osteofn,"bin_data_lcc236.rds"))),error = function(e) NULL) }
#if((!exists("bin_data")|if(exists("bin_data")){dim(bin_data)[1]==3053}) & isolate(input$data_source)=="linreg_osteosarcoma_CNVkit") { tryCatch(bin_data<-readRDS((paste0(osteofn,"bin_data_lcc236.rds"))),error = function(e) NULL) }
if(!is.null("recast_matrix")) {
row_label<-rownames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label.
column_label<-colnames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label.
if(isolate(input$data_source)=="TCGA_NBL_low_pass" |
isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset"))
{
row_label<-paste0(isolate(input$chrom2),plotly::event_data("plotly_click")[["y"]],"_",plotly::event_data("plotly_click")[["y"]]+1e6-1)
column_label<-paste0(isolate(input$chrom1),plotly::event_data("plotly_click")[["x"]],"_",plotly::event_data("plotly_click")[["x"]]+1e6-1)
}
if(length(bin_data$probe)==0)
{
bin_data$probe<-rownames(bin_data)
}
d<-as.data.table(bin_data[bin_data$probe %in% c(row_label,column_label),])
#testing
#browser()
bin_data_colsplit<-reshape2::colsplit(bin_data$probe,"_",c("chr","start","end"))
bin_data_colsplit[bin_data_colsplit$chr=="chr19",]
#end testing
if(nrow(d)==0){return("")}
#p <- plotly::plot_ly(x = bin_data[1,], type = "histogram")
# cat(file=stderr(),paste0("census_data"))
# cat(file=stderr(),ls())
sample_info_p_scatter <- plotly::plot_ly(alpha = 0.6) %>%
plotly::add_trace(x = as.numeric(d[1,]),name=d[1,"probe"],y=seq(1:ncol(d))) %>%
plotly::add_trace(x = as.numeric(d[2,]),name=d[2,"probe"],y=seq(1:ncol(d)))# %>%
# plotly::layout(barmode = "overlay")
print(sample_info_p_scatter)
if(debug){browser()}
return(sample_info_p_scatter)
}
})
output$minimap<-plotly::renderPlotly({
#if(is.null(event_data("plotly_click"))){return(data.table())}
#if(is.null(event_data("plotly_click"))){return(NULL)}
req(plotly::event_data("plotly_click"))
#event_data("plotly_click")
#if (is.null(event_data("plotly_click"))) {return("Click events appear here (double-click to clear)")}
if(length((!exists("bin_data")|if(exists("bin_data")){dim(bin_data)[1]==3053}))==0 & isolate(input$data_source)=="linreg_osteosarcoma_CNVkit") { tryCatch(bin_data<<-readRDS((paste0(osteofn,"bin_data_lcc236.rds"))),error = function(e) NULL) }
recast_matrix<-get_recast_matrix()
ggplotmatrix_full<-getGGplotMatrix_full()
recast_matrix_full<-get_recast_matrix_full()
if(!is.null("recast_matrix") & !is.null("recast_matrix_full")) {
row_label<-rownames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label.
column_label<-colnames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label.
if(isolate(input$data_source)=="TCGA_NBL_low_pass" |
isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset"))
{
row_label<-paste0(isolate(input$chrom2),plotly::event_data("plotly_click")[["y"]],"_",plotly::event_data("plotly_click")[["y"]]+1e6-1)
column_label<-paste0(isolate(input$chrom1),plotly::event_data("plotly_click")[["x"]],"_",plotly::event_data("plotly_click")[["x"]]+1e6-1)
}
if(length(bin_data$probe)==0)
{
bin_data$probe<-rownames(bin_data)
}
d<-as.data.table(bin_data[bin_data$probe %in% c(row_label,column_label),])
if(nrow(d)==0){return("")}
row_labels_minimap<-rownames(recast_matrix_full)[grep(row_label,rownames(recast_matrix_full)):(grep(row_label,rownames(recast_matrix_full))+3)] #we subset by every fourth number along the rows and columns, hence we need n, n+1, n+2, n+3 (or n1:n2-1, the first number and all the numbers leading up to the next).
col_labels_minimap<-colnames(recast_matrix_full)[grep(column_label,colnames(recast_matrix_full)):(grep(column_label,colnames(recast_matrix_full))+3)]
ggplotmatrix_minimap<-ggplotmatrix_full[as.character(ggplotmatrix_full$Var1) %in% row_labels_minimap & as.character(ggplotmatrix_full$Var2) %in% col_labels_minimap, ]
p <- ggplot(data = ggplotmatrix_minimap ) + #geom_tile() + theme_void()
geom_raster(aes(x = Var2, y = Var1,fill=value,text=paste0("value:",value,"\nrow:",Var1,"\ncol:",Var2,"\n",value1))) + scale_x_discrete() +
scale_y_discrete() + theme(axis.text.x = element_text(angle=60, hjust=1)) +
ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0.5, limits = c(0, 1)) + theme(legend.position="bottom",axis.title = element_blank()) #+ coord_flip() #+ scale_y_reverse(breaks=block_indices)
# cat(file=stderr(),paste0("minimap"))
# cat(file=stderr(),ls())
plotly_output<-plotly::ggplotly(p,tooltip="text") %>% plotly::layout(margin=list(r=0, l=200, t=0, b=200),width=isolate(input$heatmapHeight),height=isolate(input$heatmapHeight)/1.25)
#print(plotly_output)
#essentially, grab the row and column bins (above) for the sampled matrix, then grab the same coordinates for the full matrix, plus four to x, plus four to y.
#p <- plotly::plot_ly(x = bin_data[1,], type = "histogram")
# sample_info_p_scatter <- plot_ly(alpha = 0.6) %>%
# add_trace(x = as.numeric(d[1,]),name=d[1,"probe"],y=seq(1:ncol(d))) %>%
# add_trace(x = as.numeric(d[2,]),name=d[2,"probe"],y=seq(1:ncol(d)))# %>%
# # layout(barmode = "overlay")
# print(sample_info_p_scatter)
if(debug){browser()}
return(plotly_output)
}
})
output$sample_info_scatter2<-plotly::renderPlotly({
if(debug){browser()}
req(plotly::event_data("plotly_click"))
if (is.null(plotly::event_data("plotly_click"))) {return(NULL)}
#browser()
if(length((!exists("bin_data")|if(exists("bin_data")){dim(bin_data)[1]==3053}))==0 & isolate(input$data_source)=="linreg_osteosarcoma_CNVkit") { tryCatch(bin_data<<-readRDS((paste0(osteofn,"bin_data_lcc236.rds"))),error = function(e) NULL) }
if(isolate(input$data_source)=="linreg_osteosarcoma_CNVkit" | isolate(input$data_source)=="TCGA_NBL_low_pass" |
isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset"))
{
recast_matrix<-get_recast_matrix()
if(!is.null("recast_matrix")) {
#
row_label<-rownames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label.
column_label<-colnames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label.
if(isolate(input$data_source)=="TCGA_NBL_low_pass" |
isolate(input$data_source) %in% c("TCGA_NBL_stage3_subset","TCGA_NBL_stage4_subset","TCGA_NBL_stage4s_subset","TCGA_NBL_myc_amp_subset","TCGA_NBL_not_myc_amp_subset"))
{
row_label<-paste0(isolate(input$chrom2),plotly::event_data("plotly_click")[["y"]],"_",plotly::event_data("plotly_click")[["y"]]+1e6-1)
column_label<-paste0(isolate(input$chrom1),plotly::event_data("plotly_click")[["x"]],"_",plotly::event_data("plotly_click")[["x"]]+1e6-1)
}
if(length(bin_data$probe)==0)
{
bin_data$probe<-rownames(bin_data)
}
d<-as.data.table(bin_data[bin_data$probe %in% c(row_label,column_label),])
if(nrow(d)==0){return("")}
d_sample_names<-names(d)[2:length(names(d))]
#p <- plotly::plot_ly(x = bin_data[1,], type = "histogram")
#
#names(d)
#sample_info_p_scatter2 <- plot_ly(alpha = 0.6,x = as.numeric(d[1,]),y=as.numeric(d[2,]),name=d_sample_names)
#
d_t<-as.data.frame(t(d)[2:ncol(d),])
colnames(d_t)<-d$probe
d_t<-as.data.frame(sapply(as.data.frame(d_t),function(x) as.numeric(as.character(x))))
rownames(d_t)<-d_sample_names
if(ncol(d_t)==1){d_t[,2]<-d_t[,1]
colnames(d_t)[2]<-paste0(d$probe,"_")}
#,text=paste0("x: ",paste0(colnames(d_t)[1])," ", d_t[,1],"\n y:",paste0(colnames(d_t)[2])," ",d_t[,2],"\n ",rownames(d_t))
#,color=rownames(d_t)
#
sample_info_p_scatter2<-ggplot(data = d_t,aes(x=d_t[,1],y=d_t[,2])) + geom_point(aes(color=rownames(d_t),text=paste0("x: ",paste0(colnames(d_t)[1])," ", d_t[,1],"\n y:",paste0(colnames(d_t)[2])," ",d_t[,2],"\n ",rownames(d_t)))) + theme(legend.position="none") +
xlab(paste0(colnames(d_t)[1])) + ylab(paste0(colnames(d_t)[2])) + geom_smooth(method=lm)
# %>% #name=d[1,"probe"],y=seq(1:ncol(d))
#add_trace(x = as.numeric(d[2,]),name=d[2,"probe"],y=seq(1:ncol(d)))# %>%
# layout(barmode = "overlay")
# cat(file=stderr(),paste0("sample_info_scatter2"))
#cat(file=stderr(),ls())
# cat(file=stderr(),sapply(ls(),function(x) paste0(unlist(paste0(head(get(x)))))))
# cat(file=stderr(),paste0("sample_info_p_scatter2"))
# cat(file=stderr(),str(sample_info_p_scatter2))
# cat(file=stderr(),paste0("sample_info_p_scatter2_length"))
# cat(file=stderr(),length(sample_info_p_scatter2))
# cat(file=stderr(),unlist(sapply(ls(),function(x) paste0(paste0(head(get(x)))))))
# cat(file=stderr(),paste0("sample_info_p_scatter2"))
# cat(file=stderr(),str(sample_info_p_scatter2))
#cat(file=stderr(),sapply(ls(),function(x) get(x)))
print(plotly::ggplotly(sample_info_p_scatter2,tooltip=c("text")))
return(plotly::ggplotly(sample_info_p_scatter2,tooltip=c("text")))
}
} #end in-house data processing
if(isolate(input$data_source) %in% c("TCGA_AML_low_pass","TCGA_BRCA_low_pass","TCGA_OS_low_pass","TCGA_PRAD_low_pass"))
{
TCGA_low_pass_sample_info<-get_tcga_lp_sample_info()
recast_matrix <- get_recast_matrix()
if (!is.null("recast_matrix")) {
row_label <- rownames(recast_matrix)[order(get_rownames_gr())][as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][1])) + 1]
column_label <- colnames(recast_matrix)[order(get_colnames_gr())][as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][2])) + 1]
if(isolate(input$data_source)=="TCGA_NBL_low_pass")
{
row_label<-paste0(isolate(input$chrom2),plotly::event_data("plotly_click")[["y"]],"_",plotly::event_data("plotly_click")[["y"]]+1e6-1)
column_label<-paste0(isolate(input$chrom1),plotly::event_data("plotly_click")[["x"]],"_",plotly::event_data("plotly_click")[["x"]]+1e6-1)
}
d<-as.data.table(TCGA_low_pass_sample_info[TCGA_low_pass_sample_info$pos %in% c(row_label,column_label),])
if("TCGA_CNV_data_gr.....relativeCvg" %in% colnames(TCGA_low_pass_sample_info)){
d<-as.data.table(TCGA_low_pass_sample_info[TCGA_low_pass_sample_info$pos %in% c(row_label,column_label),c("TCGA_CNV_data_gr.....relativeCvg","TCGA_CNV_data_gr.....sample")])
d_row<-as.data.table(TCGA_low_pass_sample_info[TCGA_low_pass_sample_info$pos %in% c(row_label),c("TCGA_CNV_data_gr.....relativeCvg","TCGA_CNV_data_gr.....sample")])
d_col<-as.data.table(TCGA_low_pass_sample_info[TCGA_low_pass_sample_info$pos %in% c(column_label),c("TCGA_CNV_data_gr.....relativeCvg","TCGA_CNV_data_gr.....sample")])
} else { if("TCGA_CNV_data_gr.....relativeCvg" %in% colnames(TCGA_low_pass_sample_info))
d<-as.data.table(TCGA_low_pass_sample_info[TCGA_low_pass_sample_info$pos %in% c(row_label,column_label),c("TCGA_CNV_data_gr.....Segment_Mean","TCGA_CNV_data_gr.....sample")])
d_row<-as.data.table(TCGA_low_pass_sample_info[TCGA_low_pass_sample_info$pos %in% c(row_label),c("TCGA_CNV_data_gr.....Segment_Mean","TCGA_CNV_data_gr.....sample")])
d_col<-as.data.table(TCGA_low_pass_sample_info[TCGA_low_pass_sample_info$pos %in% c(column_label),c("TCGA_CNV_data_gr.....Segment_Mean","TCGA_CNV_data_gr.....sample")])
}
if(nrow(d)==0){return("")}
sample_info_p_scatter2<-ggplot(data = d_row,aes(x=unlist(d_row[,1]),y=unlist(d_col[,1]))) +
geom_point(aes(color=unlist(d_row[,2]),shape=unlist(d_col[,2]),
text=paste0("row_value: ",paste0(d_row[,1]),"/n sample: ",paste0(d_row[,2]),
" col_value: ", d_col[,1],"\n sample:",paste0(d_col[,2])))) + theme(legend.position="none") +
xlab("column segmentation value") + ylab("row segmentation value") + geom_smooth(method=lm)
# cat(file=stderr(),paste0("sample_info_scatter2"))
# cat(file=stderr(),ls())
}
#d["TCGA_CNV_data_gr.....sample"
}
})
output$freq_table <- DT::renderDataTable({
#if(isolate(is.null(input$subset))){selected_rows<-1:nrow(mappability_df)}
#textv_subset<-textv[selected_rows]
#d<-as.character(names(event_data("plotly_hover")))
#
# cat(file=stderr(),paste0(get_recast_matrix()
# [
# as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1,
# as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1
# ]))
# cat(file=stderr(),rownames(get_recast_matrix())[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1])
#browser()
#if(is.null(freq_data)){ tryCatch(freq_data<-data.table::fread(paste0(osteofn,"OS_freq_data.txt")),error = function(e) NULL)}
if(is.null(freq_data)){ tryCatch(
freq_data<-data.table::as.data.table(readRDS(paste0(osteofn,"OS_freq_data_lcc236.rds"))),error = function(e) NULL)}
recast_matrix<-get_recast_matrix()
#cat(file=stderr(),paste0(d))
if(!is.null("recast_matrix")) {
row_label<-rownames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] #correct column label.
column_label<-colnames(recast_matrix)[as.integer(paste0(plotly::event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] #correct column label.
d<-as.data.table(freq_data[freq_data$pos %in% c(row_label,column_label)])
# cat(file=stderr(),paste0("freq_table"))
# cat(file=stderr(),ls())
if (is.null(d)) {return(data.table())} else {
return(d)}
} else {return(data.table())}
# cat(file=stderr(),paste0(event_data("plotly_click")))
# cat(file=stderr(),paste0(names(event_data("plotly_click"))))
# cat(file=stderr(),paste0(names(event_data("plotly_click")[["pointNumber"]])))
# cat(file=stderr(),paste0(event_data("plotly_click")[["pointNumber"]]))
# cat(file=stderr(),paste0(event_data("plotly_click")["pointNumber"]))
# cat(file=stderr(),paste0(event_data("plotly_click")["curveNumber"]))
# cat(file=stderr(),paste0(event_data("plotly_click")["x"]))
#cat(file=stderr(),as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1 ) #row number
#cat(file=stderr(),as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1 ) #col number
#
# cat(file=stderr(),as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1 ) #col number
# cat(file=stderr(),paste0(chromstarts_linreg))
# cat(file=stderr(),paste0(head(common_coords_linreg)))
# cat(file=stderr(),paste0(head(common_coords_linreg)))
# cat(file=stderr(),paste(names(input)))
# cat(file=stderr(),paste(input$chrom2))
# cat(file=stderr(),paste(chromstarts_linreg[grep(input$chrom2,chromosomes)]+as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))))
# cat(file=stderr(),paste(common_coords_linreg[chromstarts_linreg[grep(input$chrom2,chromosomes)]+as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))]))
# cat(file=stderr(),paste(d))
#need to convert to global coordinates
#
#cat(file=stderr(),exists("ggplotmatrix"))
#cat(file=stderr(),exists("event_data(\"plotly_click\")"))
#cat(file=stderr(),exists("event_data"))
#cat(file=stderr(),paste0(event_data))
#cat(file=stderr(),length(event_data))
#cat(file=stderr(),paste0(event_data[[1]]))
#cat(file=stderr(),paste0(signedRescale))
# if(exists("ggplotmatrix") & !is.null(ggplotmatrix)){
# recast_matrix<-reshape2::dcast(data=ggplotmatrix,formula=Var1 ~ Var2, var = ggplotmatrix$value) #this creates a matrix with
# if(ncol(recast_matrix)!=nrow(recast_matrix))
# {
# rownames(recast_matrix)<-recast_matrix$Var1
# recast_matrix<-recast_matrix[,2:ncol(recast_matrix)]
# }}
# cat(file=stderr(),rownames(recast_matrix)[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1] )
# cat(file=stderr(),colnames(recast_matrix)[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1] )
# cat(file=stderr(),colnames(recast_matrix))
# cat(file=stderr(),rownames(recast_matrix))
# cat(file=stderr(),paste(head(ggplotmatrix)))
# cat(file=stderr(),paste(input))
# cat(file=stderr(),paste(names(input)))
# cat(file=stderr(),paste0(chromosomes[as.integer(gsub("_","",gsub("chr","",isolate(input$chrom1))))]))
# d<-freq_data[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1,as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1]
#print(event_data("plotly_click"))
#showLog()
#class(event_data$plotly_click$pointNumber)
#print(str(event_data("plotly_click")))
#d<-as.data.table(event_data("plotly_click"))
#d <-freq_data[as.integer(event_data("plotly_click")[["pointNumber"]]+1),]
# if (is.null(d)) {return(data.table())} else {
# row_label<-rownames(recast_matrix)[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][1]))+1]
# column_label<-colnames(recast_matrix)[as.integer(paste0(event_data("plotly_click")[["pointNumber"]][[1]][2]))+1]
# d<-as.data.table(freq_data[freq_data$pos %in% c(row_label,column_label)])
# }
# cat(file=stderr(),paste0(d))
# return(d)
})
}
#}
|
dd48ba8df0ec1f0219fbfffb076323a4134bded6
|
9d3a4709b30707fd58a6e179c2719a9c0d117ccd
|
/machine-learning-ex5/R Version/ex5.R
|
59a010ab45adddcc07002c5129530169715da88e
|
[] |
no_license
|
lucariel/ML_coursera
|
0d02b150de5b09af3e4ccc4be2e44637e4850562
|
8b4c57814fcc6b0939b1eea25bd0ea29c7ca4cb1
|
refs/heads/master
| 2020-04-21T14:40:08.104809
| 2019-03-22T13:26:44
| 2019-03-22T13:26:44
| 169,642,627
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,471
|
r
|
ex5.R
|
#Ex 5 ML Coursera
library(tidyverse)
library(R.matlab)
source('linearReg.R')
source("lbfgsb3_.R")
#=========== Part 1: Loading and Visualizing Data =============
data<-readMat("ex5data1.mat")
##Training data
X<-data$X
y<-data$y
##Cross Validation data
Xval<-data$Xval
yval<-data$yval
##Test Data
Xtest<-data$Xtest
ytest<-data$ytest
###Ploting the data
data_plot<-as.tibble(X)%>%ggplot(aes(V1, y))+
geom_point(shape = 4, color = "red")+
xlab("Change in water level (x)")+
ylab("Water flowing out of the damn(y)")
##Init theta
theta <- c(1,1)
##Add x0 to X
Xn <- cbind( as.vector(matrix(1,1, length(X))),X)
Xnval<- cbind( as.vector(matrix(1,1, length(Xval))),Xval)
#=========== Part 2: Regularized Linear Regression Cost =============
linearRegCostFunction_J(Xn, y, 1)(theta)
#=========== Part 3: Regularized Linear Regression Gradient =============
linearRegCostFunction_G(Xn, y, 1)(theta)
#=========== Part 4: Train Linear Regression =============
thetas_lg<-trainLinearReg(Xn, y, 1)
linearRegCostFunction_J(Xn, y, 1)(thetas_lg)
###Plot fit over the data
as.tibble(X)%>%ggplot(aes(V1, y = y))+
geom_point(shape = 4, color = "red")+
xlab("Change in water level (x)")+
ylab("Water flowing out of the damn(y)")+
geom_abline(slope =thetas_lg[2] ,intercept=thetas_lg[1], color="blue")
##=========== Part 5: Learning Curve for Linear Regression =============
error_train<-learningCurve(Xn, y, Xnval, yval, 0)[[1]][-1]
error_val<-learningCurve(Xn, y, Xnval, yval, 0)[[2]][-1]
errorplot<-as.tibble(cbind(error_train,error_val, "m"=seq(1:length(error_val))))
errorplot%>%ggplot()+
geom_line(aes(x= m, y = error_val, color = "Cross Validation"))+
geom_line(aes(x= m, y = error_train, color = "Train"))+
xlab("Number of training examples")+
ylab("Error")+ggtitle("Learning Curve for linear regression")+
scale_colour_manual("",
breaks = c("Cross Validation", "Train"),
values = c("green", "blue"))
#%% =========== Part 6: Feature Mapping for Polynomial Regression =============
p<-8
####For total X####
X_poly <- polyFeatures(X, p)
#View(X_poly)
X_poly <-(scale(X_poly))
#X_poly <- cbind( as.vector(matrix(1,1, dim(X_poly)[1])),X_poly)
####For Test set ########
X_poly_test<-polyFeatures(Xtest,p)
X_poly_test <- scale(X_poly_test)
#X_poly_test <- cbind( as.vector(matrix(1,1, dim(X_poly_test)[1])),X_poly_test)
####For Cross Validation set ########
Xval_poly<- polyFeatures(Xval, p)
Xval_poly<- scale(Xval_poly)
#Xval_poly <- cbind( as.vector(matrix(1,1, dim(Xval_poly)[1])),Xval_poly)
#=========== Part 7: Learning Curve for Polynomial Regression =============
theta_poly<-trainLinearReg(X_poly, y, 1)
X_polyn <- polyFeatures(X, p)
X_polyn <- cbind( as.vector(matrix(1,1, dim(X_polyn)[1])),X_polyn)
y_hat<-rowSums(X_polyn*theta_poly)
data_plot+geom_smooth(aes(y = y_hat))
################Polynomial Regression Learning Curve################
error_train<-learningCurve(X_poly, y, Xval_poly, yval, 2)[[1]][-1]
error_val<-learningCurve(X_poly, y, Xval_poly, yval, 1)[[2]][-1]
errorplot<-as.tibble(cbind(error_train,error_val, "m"=seq(1:length(error_val))))
errorplot%>%ggplot()+
geom_line(aes(x= m, y = error_val, color = "Cross Validation"))+
geom_line(aes(x= m, y = error_train, color = "Train"))+
xlab("Number of training examples")+
ylab("Error")+ggtitle("Learning Curve for polynomial regression")+
scale_colour_manual("",
breaks = c("Cross Validation", "Train"),
values = c("green", "blue"))
# =========== Part 8: Validation for Selecting Lambda =============
X_poly <- polyFeatures(X, p)
X_poly <-(scale(X_poly))
X_poly <- cbind( as.vector(matrix(1,1, dim(X_poly)[1])),X_poly)
Xval_poly<- polyFeatures(Xval, p)
Xval_poly<- scale(Xval_poly)
Xval_poly <- cbind( as.vector(matrix(1,1, dim(Xval_poly)[1])),Xval_poly)
sel_lambda<-validationCurve(X_poly, y,Xval_poly,yval)
lambdas<-sel_lambda[[1]]
error_train_l<-sel_lambda[[2]]
error_val_l<-sel_lambda[[3]]
plot_sel_lambda_data<-as.tibble(cbind(lambdas,error_train_l,error_val_l))
plot_sel_lambda_data%>%ggplot()+
geom_line(aes(x= lambdas, y = error_val_l, color = "Cross Validation"))+
geom_line(aes(x= lambdas, y = error_train_l, color = "Train"))+
xlab("Number of training examples")+
ylab("Error")+ggtitle("Lambda Selection")+
scale_colour_manual("",
breaks = c("Cross Validation", "Train"),
values = c("green", "blue"))
|
22ecb47edb968e3b678bb4a0e7a6fc20b56cb70f
|
67bfba6e24fe9156a352fb4b5b8d184269001783
|
/man/exampleGTV.Rd
|
2fd7c8c65152779b141f6cfd3a9946c84bda94e0
|
[] |
no_license
|
hsong1/GTV
|
0bd8dcc4965f6466ae0ebe0f6185d3194b59ab71
|
e215097ab1a77af2a0429b85e2a0274dcb866e06
|
refs/heads/master
| 2020-04-21T14:22:57.598506
| 2019-11-03T21:34:35
| 2019-11-03T21:34:35
| 169,633,065
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 406
|
rd
|
exampleGTV.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exampleGTV.R
\docType{data}
\name{exampleGTV}
\alias{exampleGTV}
\title{Create Example Data Set}
\format{An object of class \code{list} of length 3.}
\usage{
exampleGTV
}
\description{
a block complete graph G that has K connected components and
each connected component is a complete graph with p/K nodes
}
\keyword{datasets}
|
8da6f05c4e6cb52f52f4af7c4afa255270b1a77a
|
9d814a56ab7711c90907625f37c0597466795d15
|
/global.R
|
5c603508a6f930eb81de018c265de2b0a2838924
|
[] |
no_license
|
jrbautista5/Codebook
|
30d86f25dcb777fdef0cc67d0045198f209ea6e4
|
a72c8ed22f463d3459ebfe3c835d21e0e24e637f
|
refs/heads/master
| 2020-04-22T18:04:32.367724
| 2019-02-13T19:19:59
| 2019-02-13T19:19:59
| 170,564,289
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 35
|
r
|
global.R
|
LA <- readRDS("clean_payroll.rds")
|
ce5b8513a923b9ebbba426a07157f6028aecb94c
|
41c984f669791f0819ef544b58036a3ec70c95d7
|
/mtcars/server.R
|
6b3713bbd818e216eceaf29c5eae8a4fbded821a
|
[] |
no_license
|
prasad81/shiny-application-and-reproducible-pitch
|
d9eb3a721231c7ee4f5973e1ad9011a9f1d9dc25
|
6a492aac36a4b5fc9dda4b2380de17f2cb0df06f
|
refs/heads/master
| 2021-01-23T04:59:29.663229
| 2017-03-27T05:20:35
| 2017-03-27T05:20:35
| 86,266,253
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,123
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
data <- mtcars
#names(data) <- c("mpg", "cyl", "disp", "hp", "drat", "wt", "qsec", "vs", "am","gear", "carb")
#View(data[data$mpg >15 & data$mpg <30,c(1,2,3,4,5,6,7,8,9,10,11)])
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
# Data filtering reaction on input parameters
dataFilter <- reactive({
paste0("data[data$mpg > ", input$mpg[1], " & data$mpg < ", input$mpg[2], ",c(1,2,3,4,5,6,7,8,9,10,11)]" )
})
# Data filtering
output$dataTable <- renderDataTable({
eval(parse(text = dataFilter()))
})
# Plot rendering
output$histplot <- renderPlot({
temp <- paste0("data[data$mpg > ", input$mpg[1], " & data$mpg < ", input$mpg[2], ",c(1)]" )
temp1 <- eval(parse(text=temp))
hist( temp1 , col="blue", breaks=10, xlab = "Miles per gallon"
, main = "Historgram of Miles per gallon of cars")
})
})
|
1ad955fecb35136cf8c687e486332b1cc7006073
|
5f0cfcec5194f11137db76056ef2b3836ab80ff8
|
/man/solnfreq.Rd
|
2560c1d191d1f5eeea031e741aeb79e30ef868f0
|
[] |
no_license
|
JakeJing/treevo
|
54d341655f1e6ddac5ab73df38c890be557e7d17
|
3429ba37e8dc7c79cf441361d07c000f07423b6e
|
refs/heads/master
| 2021-01-12T01:20:10.296046
| 2016-10-03T01:09:15
| 2016-10-03T01:09:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 494
|
rd
|
solnfreq.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summaryStatsLong.R
\name{solnfreq}
\alias{solnfreq}
\title{Calculate frequency of finding best solution in Geiger}
\usage{
solnfreq(x, tol = .Machine$double.eps^0.5)
}
\arguments{
\item{x}{A returned object from fitContinuous()}
\item{tol}{Tolerance for equality of solutions}
}
\value{
The frequency with which the best solution was found
}
\description{
This function is taken from an internal Geiger function
}
|
06dc2557debcfe035b885ac430d9de58a0741fe4
|
3f0829cfe5994fac1aaa2f7542d9ccae71a6e733
|
/whisky.R
|
dab3c853a904118c6cd84434495ec128332a452f
|
[] |
no_license
|
Megha2535/Whiskey_Analysis
|
48127a4dfd9fb5beca8360bd0cc44dcf471f774a
|
d29c410505d91c4569268f79ed47873536199085
|
refs/heads/master
| 2022-12-16T13:43:04.926433
| 2020-09-10T19:10:23
| 2020-09-10T19:10:23
| 294,498,687
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 352
|
r
|
whisky.R
|
install.packages("data.table")
library(data.table)
install.packages("tidyverse")
library(tidyverse)
install.packages(plotly)
library(plotly)
library(tm)
library(wordcloud)
library(wordcloud2)
library(ggExtra)
library(DT)
library(cluster)
library(gridExtra)
library(grid)
#--------------------------------------------------------------
whisky<-fread("")
|
71960acb63dc9b4549d500f63c1d50d91923eadd
|
47eb9c0ee62df42354db9a74510c1c948cac441c
|
/plot1.R
|
6c53d98bee1fa3500089181fccd863084645051b
|
[] |
no_license
|
LMoller/ExData_Plotting1
|
e5949bfcc195b3e8f6e8b337c7f0550d9be012df
|
9f76075366c4501d98016d3f11da7bbac169adc1
|
refs/heads/master
| 2021-01-23T04:29:22.173189
| 2017-03-27T03:48:20
| 2017-03-27T03:48:20
| 86,201,957
| 0
| 0
| null | 2017-03-26T02:25:14
| 2017-03-26T02:25:13
| null |
UTF-8
|
R
| false
| false
| 645
|
r
|
plot1.R
|
##Download file from link and unzip the file
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
powerfile <- unzip(temp)
unlink(temp)
##Read the file
powerconsumption <- read.table(powerfile, header=T, sep=";")
subsetpowerconsump <- powerconsumption[powerconsumption$Date %in% c("1/2/2007","2/2/2007") ,]
#Plot 1
globalactivepower <- as.numeric(as.character(subsetpowerconsump$Global_active_power))
png("plot1.png", width=480, height=480)
hist(globalactivepower, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
|
d994fe131d8a1d857627a3b12f34e06e81088f68
|
e3ebbba983200dfd89209e107ee19ec201abc5ee
|
/cachematrix.R
|
06ace03153b977da2573dcdda31841566b041e99
|
[] |
no_license
|
sappy17/ProgrammingAssignment2
|
850519feb871dddc94a1d83d73246e11c2e7a198
|
5ce31ea2a34a2d91703ba8d83fbf32ad2aa8752c
|
refs/heads/master
| 2021-01-18T20:49:50.760008
| 2017-04-06T15:24:06
| 2017-04-06T15:24:06
| 86,992,557
| 0
| 0
| null | 2017-04-02T14:43:26
| 2017-04-02T14:43:26
| null |
UTF-8
|
R
| false
| false
| 899
|
r
|
cachematrix.R
|
## For calculation of the inverse of a matrix.
## The function is suppossed to calculate the inverse of a matrix. We input the matrix into the function to get back the inverse of it.
## Here I am using solve function to calculate the inverse. Then for testing my code I have selected a matrix and tried to find the inverse of it.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function() inv <<- solve(x) #calculate the inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
funs <- makeCacheMatrix()
funs$set(matrix(1:4, 2))
funs$get()
funs$setInverse()
inv<-funs$getInverse()
funs$get()
inv
|
db8de8d66dcdc9326055ccf0ba05fe9963ac6f75
|
fe1887fbc9bc4ed192c24109ad05f1575935dc1c
|
/Chapter_4/tables_4.4_B1_cumulative_gas_emissions_field.R
|
0c3e9b7aee688d7243412cc11e9897bd4e42cf12
|
[] |
no_license
|
marenwestermann/PhD-thesis
|
6efb3cb9b68b56d380920dc27daa0de77ddadefc
|
3942cc2099eb5edaccbef08f8866dc33da28bf9a
|
refs/heads/master
| 2020-03-20T20:43:06.078371
| 2018-06-18T07:52:08
| 2018-06-18T07:52:08
| 137,702,409
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,127
|
r
|
tables_4.4_B1_cumulative_gas_emissions_field.R
|
total_gases <- read.csv("C:/Users/Maren/Dropbox/UQ PhD/PhD work/experiments/(3) field experiment/stats/gases total/field_exp_gases_total.csv")
##N2O
anova_N2O <- aov(total_gases$N2O_ln ~ total_gases$treatment)
summary(anova_N2O)
pairwise.t.test(total_gases$N2O_ln, total_gases$treatment, p.adjust = "none")
describeBy(total_gases$N2O_kg_per_ha, total_gases$treatment)
##CO2
anova_CO2 <- aov(total_gases$CO2_ln ~ total_gases$treatment)
summary(anova_CO2)
pairwise.t.test(total_gases$CO2_ln, total_gases$treatment, p.adjust = "none")
describeBy(total_gases$CO2_kg_per_ha, total_gases$treatment)
##CH4
anova_CH4 <- aov(log(total_gases$CH4_kg_per_ha) ~ total_gases$treatment)
summary(anova_CH4)
pairwise.t.test(log(total_gases$CH4_kg_per_ha), total_gases$treatment, p.adjust = "none")
describeBy(total_gases$CH4_kg_per_ha, total_gases$treatment)
##CO2-eq
anova_CO2_eq <- aov(total_gases$CO2_eq_total_t_per_ha_ln ~ total_gases$treatment)
summary(anova_CO2_eq)
pairwise.t.test(total_gases$CO2_eq_total_t_per_ha_ln, total_gases$treatment, p.adjust = "none")
describeBy(total_gases$CO2_eq_total_t_per_ha, total_gases$treatment)
|
c3876f1fd0bf16de47b16f312f41717e77b3f71e
|
80bde6ac4965b70b26ce0f3204b70f33f3bb5890
|
/man/plotScores.ldecomp.Rd
|
fc3303669cca1d7a9bdef5295b3df0f3f889a4d2
|
[] |
no_license
|
KasperSkytte/mdatools
|
5c8abccbb2b735a5b058327714d9d354183eca9c
|
4147ee5ca5fc537ae91eee5cb2c42aed4e6b3e54
|
refs/heads/master
| 2020-07-04T13:06:41.067552
| 2019-06-19T08:44:56
| 2019-06-19T08:44:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,024
|
rd
|
plotScores.ldecomp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ldecomp.R
\name{plotScores.ldecomp}
\alias{plotScores.ldecomp}
\title{Scores plot for linear decomposition}
\usage{
\method{plotScores}{ldecomp}(obj, comp = c(1, 2), main = "Scores",
type = "p", xlab = NULL, ylab = NULL, show.labels = FALSE,
show.legend = TRUE, show.axes = TRUE, ...)
}
\arguments{
\item{obj}{object of \code{ldecomp} class.}
\item{comp}{which components to show the plot for (can be one value or vector with two values).}
\item{main}{main title for the plot}
\item{type}{type of the plot}
\item{xlab}{label for x-axis.}
\item{ylab}{label for y-axis.}
\item{show.labels}{logical, show or not labels for the plot objects}
\item{show.legend}{logical, show or not a legend on the plot.}
\item{show.axes}{logical, show or not a axes lines crossing origin (0,0)}
\item{...}{most of graphical parameters from \code{\link{mdaplot}} function can be used.}
}
\description{
Shows a plot with scores values for data objects.
}
|
cebc6f605dabdc55426f2d56e848383ec8349bb5
|
71da24fe85112d599e2af8214fdff1017c6348fc
|
/lab2/lab2_template/R/demo.R
|
3f857aacfefe4c00d12e4dab62f797f52fd43baf
|
[] |
no_license
|
jpdunc23/stat-215a-fall-2020
|
df6343c379f9744c6130da5fcc9ed2be1d4befac
|
135b38d610e60e60240c195122fc0d135f9967f7
|
refs/heads/master
| 2023-01-29T18:54:41.918578
| 2020-12-12T02:25:37
| 2020-12-12T02:25:37
| 290,987,481
| 13
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,022
|
r
|
demo.R
|
library(maps)
library(ggplot2)
library(dplyr)
# load the data
ling_data <- read.table('data/lingData.txt', header = T)
ling_location <- read.table('data/lingLocation.txt', header = T)
# question_data contains three objects: quest.mat, quest.use, all.ans
load("data/question_data.RData")
# ling_data has a column for each question, and ling_location has a column
# for each question x answer. Sorry the columns in ling_location are not usefully named,
# but it's not too tricky to figure out which is which.
# Note that you still need to clean this data (check for NA's, missing location data, etc.)
names(ling_data)
names(ling_location)
state_df <- map_data("state")
my_map_theme <- theme_void()
############
# Make a plot for the second person plural answers.
# You may want to join these data sets more efficiently than this.
plural_second_person <- ling_data %>%
filter(Q050 %in% c(1, 2, 9), long > -125)
# extract the answers to question 50
answers_q50 <- all.ans[['50']]
# Make the column to join on. They must be the same type.
answers_q50$Q050 <- rownames(answers_q50)
plural_second_person$Q050 <- as.character(plural_second_person$Q050)
plural_second_person <- inner_join(plural_second_person, answers_q50, by = "Q050")
# Plot!
ggplot(plural_second_person) +
geom_point(aes(x = long, y = lat, color = ans),
size = 3, alpha = 0.5) +
geom_polygon(aes(x = long, y = lat, group = group),
data = state_df, colour = "black", fill = NA) +
my_map_theme
###############
# Plot the ling_location data (which lives on a grid).
# Note that this doesn't look great with
# state outlines. You can probably do better!
ling_location %>%
filter(Longitude > -125) %>%
ggplot() +
geom_tile(aes(x = Longitude, y = Latitude,
color = log10(V12), fill = log10(V12))) +
geom_polygon(aes(x = long, y = lat, group = group),
data = state_df, colour = "gray", fill = NA) +
my_map_theme
# maybe instead of 1 x 1 lat-long squares, plot by counties?
|
93aa330541845e1e608a6e0f12d9f068f9142786
|
e189d2945876e7b372d3081f4c3b4195cf443982
|
/man/PrecisionMulti.Rd
|
0285b4f84e60753d01132fdd94e420e1b5c7973d
|
[
"Apache-2.0"
] |
permissive
|
Cdk29/fastai
|
1f7a50662ed6204846975395927fce750ff65198
|
974677ad9d63fd4fa642a62583a5ae8b1610947b
|
refs/heads/master
| 2023-04-14T09:00:08.682659
| 2021-04-30T12:18:58
| 2021-04-30T12:18:58
| 324,944,638
| 0
| 1
|
Apache-2.0
| 2021-04-21T08:59:47
| 2020-12-28T07:38:23
| null |
UTF-8
|
R
| false
| true
| 551
|
rd
|
PrecisionMulti.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metric.R
\name{PrecisionMulti}
\alias{PrecisionMulti}
\title{PrecisionMulti}
\usage{
PrecisionMulti(
thresh = 0.5,
sigmoid = TRUE,
labels = NULL,
pos_label = 1,
average = "macro",
sample_weight = NULL
)
}
\arguments{
\item{thresh}{thresh}
\item{sigmoid}{sigmoid}
\item{labels}{labels}
\item{pos_label}{pos_label}
\item{average}{average}
\item{sample_weight}{sample_weight}
}
\value{
None
}
\description{
Precision for multi-label classification problems
}
|
762c32673fca262de4cfb9e6e4a47ee16e1d528c
|
34699f008ef631f3f87ca7e810d1c9e8b791078d
|
/NFL_old/projectionsCreation/mixtureModels/mixtureProjections.R
|
e4030e54c5005e8907305e1ca5032da074a35a6d
|
[] |
no_license
|
OliverCGreenwald/dfs
|
ee0943d16ed60a7e661bedfc889776dab7c04c69
|
ded1a05dcf9febcd30cb82aea19a1241f28c7fef
|
refs/heads/master
| 2021-07-07T02:14:52.966084
| 2017-10-05T03:10:09
| 2017-10-05T03:10:09
| 107,742,009
| 1
| 0
| null | 2017-10-21T01:31:09
| 2017-10-21T01:31:08
| null |
UTF-8
|
R
| false
| false
| 5,945
|
r
|
mixtureProjections.R
|
#setwd("~/Projects/DFS/")
#setwd("~/Documents/PrincetonFall16/fantasyfootball/DFS/")
####### DESCRIPTION #######
# In this file we cluster n player projections into k tiers. Offense only. Sunday only.
# The larger the TierRank, the better the player. Set kCutoffs higher if want more players
# to be ranked. Unranked players set to tier 0.
# Guidelines for setting kCutoffs and kTiers:
# - should be larger for weeks with more games b/c there are more players available
# -
# Based on code from https://github.com/hueykwik/FantasyFootball/blob/master/tiers.Rmd.
####### IMPORT LIBRARIES #########
library(mclust)
library(ggplot2)
####### SET PARAMETERS #########
# wk <- 6
# suggested params
# param.mat <- as.data.frame(matrix(data = NA, nrow = 17, ncol = 15, dimnames = list(NULL, c("Week","Num.Games","Num.QB","Num.RB","Num.WR","Num.TE","Num.Total","Tiers.QB","Tiers.RB","Tiers.WR","Tiers.TE","Cutoff.QB","Cutoff.RB","Cutoff.WR","Cutoff.TE"))))
# param.mat$Week <- 1:17
# param.mat$Num.Games <- c(13,14,14,13,12,13,13,11,11,12,12,12,13,14,13,14,16)
# for (i in 1:17) {
# temp.param <- read.csv(file = paste0("optimizationCode/data_warehouse/2016_cleaned_input/wk", i, "/offensive_players.csv"), stringsAsFactors = F)
#
# # count for each position
# param.mat$Num.QB[i] <- sum(temp.param$Position=="QB")
# param.mat$Num.RB[i] <- sum(temp.param$Position=="RB")
# param.mat$Num.WR[i] <- sum(temp.param$Position=="WR")
# param.mat$Num.TE[i] <- sum(temp.param$Position=="TE")
# param.mat$Num.Total[i] <- nrow(temp.param)
#
# # tiers for each position
# param.mat$Tiers.QB[i] <- 5
#
# # cutoff for each position
# param.mat$Cutoff.QB[i] <- param.mat$Num.Games[i]*2 + 5
# }
# number of tiers (clusters) for each position
kTiers = c(QB = 8, RB = 9, WR = 12, TE = 8)
# number of players to be considered for each position (e.g. 24 means top 24 players at that positino are ranked)
kCutoffs = c(QB = 24, RB = 40, WR = 60, TE = 24)
# iterate through these positions
pos.vec <- c("QB", "RB", "WR", "TE")
####### ITERATE THROUGH ALL WEEKS #########
for (wk in 1:17) {
print(paste0("Week: ", wk))
####### LOAD DATA #########
temp.dfn <- read.csv(file = paste0("optimizationCode/data_warehouse/dailyfantasynerd/dfn_offense_week", wk, ".csv"), stringsAsFactors = F)
####### COMPUTE TIERS #########
# Define function for computing tiers given a set of projections and number of tiers/clusters to be found
computeTiers = function(projections, k, reverse = TRUE) { # reverse: Useful for clustering on projected points. We want highest projected points to be rank 1, so we reverse the levels.
clusters = NULL
while (is.null(clusters)) {
tryCatch({
clusters = Mclust(projections, G = k)
n_clusters = length(unique(clusters$classification))
},
warning = function(w) { warning(w); return(NULL) },
error = function(e) { warning(e); return(NULL) })
if (!is.null(clusters)) break
k = k - 1
}
n_clusters = length(unique(clusters$classification))
tiers = factor(clusters$classification)
if (reverse) {
levels(tiers) = rev(levels(tiers))
levels(tiers) = n_clusters:1
} else {
levels(tiers) = 1:n_clusters
}
print(n_clusters)
return(tiers)
}
# Compute tiers for each position in pos.vec
tier.df.list <- list() # initialize list to store df for each position
for (i in 1:length(pos.vec)) {
pos = pos.vec[i]
ecr_df = temp.dfn[temp.dfn$Pos==pos,]
ecr_df <- ecr_df[order(ecr_df$Proj.FP, decreasing = T),]
ecr_df = ecr_df[1:kCutoffs[pos],] # The data is ordered from best rank to worst rank
ecr_df$TierRank = computeTiers(ecr_df$Proj.FP, kTiers[pos])
print(pos.vec[i])
ecr_df$nchar = nchar(as.character(ecr_df$Player.Name)) # For formatting later
# Calculate position rank, negative so lowest rank will be at bottom in the plot below
ecr_df$position.rank = -seq(nrow(ecr_df))
# Plotting
font = 3.5
barsize = 1.5
dotsize = 2
ceil.fp <- ecr_df$Ceil.FP
floor.fp <- ecr_df$Floor.FP
# We put Avg.Rank as y because geom_errorbar requires ymin/ymax. We then flip the coordinates.
p = ggplot(ecr_df, aes(x = position.rank, y = Proj.FP)) # we will flip these
p = p + geom_errorbar(aes(ymin = floor.fp, ymax = ceil.fp, width=0.2, colour=TierRank), size=barsize*0.8, alpha=0.4)
p = p + coord_flip() # flip x and y
p = p + geom_text(aes(label=Player.Name, colour=TierRank, y = Proj.FP), size=font)
p = p + scale_x_continuous("Ranking by DFN Projection")
p = p + ylab("DFN Projected Fantasy Points (Floor-Proj-Ceiling)")
p = p + ggtitle(paste0("Gaussian Mixture Model: Week ", wk, " Tiers (", pos, ")"))
print(p)
# change factors to char
ecr_df$TierRank <- as.character(ecr_df$TierRank)
# append to list
tier.df.list[[pos]] <- ecr_df
}
####### ADD TIER COLUMN TO CLEANED_INPUT_FILES #########
# load 2016_cleaned_input file
temp <- read.csv(file = paste0("optimizationCode/data_warehouse/2016_cleaned_input/wk", wk, "/offensive_players.csv"), stringsAsFactors = F)
# add tier rank for each position
for (i in 1:length(pos.vec)) {
# load tier df for position
tier.df.temp <- tier.df.list[[pos.vec[i]]]
# subset cleaned input file by position
temp.subset <- temp[temp$Position==pos.vec[i],]
# add TierRank
temp.subset$TierRank <- tier.df.temp$TierRank[match(temp.subset$Name, tier.df.temp$Player.Name)]
# put subsetted data back into original df
temp[temp$Position==pos.vec[i],'TierRank'] <- as.numeric(temp.subset$TierRank)
}
# set unranked players (NAs) to 0
temp$TierRank[is.na(temp$TierRank)] <- 0
# write to file
write.csv(temp, file = paste0("optimizationCode/data_warehouse/2016_cleaned_input/wk", wk, "/offensive_players.csv"), row.names = F)
}
|
5abe9f831b708057380480b60a791165534b4d4c
|
29df0ca13daf4bf82bd806fed640f6775e48326c
|
/man/nuggetvalue.Rd
|
c900f140a6dd272e0699ad6bae75d69c822e1a0f
|
[] |
no_license
|
DiceKrigingClub/DiceKriging
|
7da1cba69f192516d3e6a98967268e372c9a3028
|
b7fa4542c36ed7c9f2da760f62ee49c6dbb92e18
|
refs/heads/master
| 2021-11-23T01:49:25.693561
| 2021-10-29T13:48:55
| 2021-10-29T13:48:55
| 130,677,010
| 7
| 2
| null | 2018-09-20T14:02:39
| 2018-04-23T10:02:05
|
R
|
UTF-8
|
R
| false
| false
| 379
|
rd
|
nuggetvalue.Rd
|
\name{nuggetvalue}
\alias{nuggetvalue}
\alias{nuggetvalue<-}
\title{Get or set the nugget value}
\description{Get or set the nugget value.}
\usage{
nuggetvalue(x)
nuggetvalue(x) <- value
}
\arguments{
\item{x}{ an object containing the covariance structure.}
\item{value}{ an optional variance value standing for the homogeneous nugget effect.}
}
\keyword{models}
|
d0f10ea88ae146b5a2f69cab11e89e02b2908087
|
f57bdf4e211ad7984fb5005883ae4d116c8f2e92
|
/R/pedParts.R
|
48dfad47f04dbaad8d8d1592b4116d7515051fdb
|
[] |
no_license
|
cran/paramlink
|
70d2e3776063411628bd5b5663a408737886d040
|
8af1f6544e37d20717829b5fbf7286fd303c68cc
|
refs/heads/master
| 2022-05-22T10:39:49.492616
| 2022-04-15T07:10:02
| 2022-04-15T07:10:02
| 17,698,214
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,972
|
r
|
pedParts.R
|
#' Pedigree subsets
#'
#' Utility functions for 'linkdat' objects, mainly for extracting various
#' pedigree information.
#'
#' @param x a \code{\link{linkdat}} object. In \code{related.pairs} possibly a
#' list of \code{linkdat} objects.
#' @param id a numerical ID label.
#' @param original.id a logical indicating whether 'id' refers to the original
#' ID label or the internal labeling.
#' @param degree a non-negative integer.
#' @param removal a non-negative integer
#' @param half a logical or NA. If TRUE (resp FALSE), only half (resp. full)
#' siblings/cousins/nephews/nieces are returned. If NA, both categories are
#' included.
#' @param relation one of the words (possibly truncated) \code{parents},
#' \code{siblings}, \code{grandparents}, \code{nephews_nieces},
#' \code{cousins}, \code{spouses}, \code{unrelated}.
#' @param interfam one of the words (possibly truncated) \code{none},
#' \code{founders} or \code{all}, specifying which interfamiliar pairs should
#' be included as unrelated in the case where \code{x} is a list of several
#' pedigrees. If \code{none}, only intrafamiliar pairs are considered; if
#' \code{founders} all interfamiliar pairs of (available) founders are
#' included; if \code{all}, all interfamiliar (available) pairs are included.
#' @param available a logical, if TRUE only pairs of available individuals are
#' returned.
#' @param ... further parameters
#'
#' @return For \code{ancestors(x,id)}, a vector containing the ID's of all
#' ancestors of the individual \code{id}. For \code{descendants(x,id)}, a
#' vector containing the ID's of all descendants (i.e. children,
#' grandchildren, a.s.o.) of individual \code{id}.
#'
#' The functions \code{cousins}, \code{grandparents}, \code{nephews_nieces},
#' \code{offspring}, \code{parents}, \code{siblings}, \code{spouses},
#' \code{unrelated}, each returns an integer vector containing the ID's of all
#' pedigree members having the specified relationship with \code{id}.
#'
#' For \code{related.pairs} a matrix with two columns. Each row gives of a
#' pair of pedigree members with the specified relation. If the input is a
#' list of multiple pedigrees, the matrix entries are characters of the form
#' 'X-Y' where X is the family ID and Y the individual ID of the person.
#'
#' For \code{leaves}, a vector of IDs containing all pedigree members without
#' children.
#'
#' @examples
#'
#' p = cbind(ID=2:9, FID=c(0,0,2,0,4,4,0,2), MID=c(0,0,3,0,5,5,0,8),
#' SEX=c(1,2,1,2,1,2,2,2), AFF=c(2,1,2,1,2,1,1,2))
#' x = linkdat(p)
#' stopifnot(setequal(spouses(x, 2), c(3,8)),
#' setequal(offspring(x, 2), c(4,9)),
#' setequal(descendants(x, 2), c(4,6,7,9)),
#' setequal(leaves(x), c(6,7,9)))
#'
#' # Creating a loop and detecting it with 'pedigreeLoops'
#' # (note that we get two loops, one for each inbred child):
#' loopx = addOffspring(x, father=4, mother=9, noffs=2)
#' lps = pedigreeLoops(loopx)
#' stopifnot(lps[[1]]$top == 2, setequal(sapply(lps, '[[', 'bottom'), 10:11))
#'
#' # We add genotypes for a single SNP marker and compute a LOD score under a dominant model.
#' loopx = setMarkers(loopx, cbind(1,c(2,1,2,1,2,1,1,2,1,1)))
#' loopx = setModel(loopx, 1)
#'
#' # Loops are automatically broken in lod():
#' LOD1 = lod(loopx, theta=0.1)
#' stopifnot(round(LOD1, 3) == 1.746)
#'
#' # Or we can break the loop manually before computing the LOD:
#' loopfree = breakLoops(loopx, loop_breaker=4)
#' LOD2 = lod(loopfree, theta=0.1)
#' stopifnot(all.equal(loopx, tieLoops(loopfree)))
#' stopifnot(all.equal(LOD1, LOD2))
#'
#' @name pedParts
NULL
#' @rdname pedParts
#' @export
offspring = function(x, id, original.id = TRUE) {
if (original.id)
id = .internalID(x, id)
p = x$pedigree
offs_rows = p[, 1 + p[id, "SEX"]] == id
if (original.id)
x$orig.ids[offs_rows] else (1:x$nInd)[offs_rows]
}
#' @rdname pedParts
#' @export
spouses = function(x, id, original.id = TRUE) {
# Returns a vector containing all individuals sharing offspring with <id>.
internal_id = ifelse(original.id, .internalID(x, id), id)
p = x$pedigree
offs_rows = p[, 1 + p[internal_id, "SEX"]] == internal_id
spou = unique.default(p[offs_rows, 4 - p[internal_id, "SEX"]]) # sex=1 -> column 3; sex=2 -> column 2.
if (original.id)
return(x$orig.ids[spou]) else return(spou)
}
#' @rdname pedParts
#' @export
related.pairs = function(x, relation = c("parents", "siblings", "grandparents", "nephews_nieces",
"cousins", "spouses", "unrelated"), available = F, interfam = c("none",
"founders", "all"), ...) {
relation = match.arg(relation)
interfam = match.arg(interfam)
func = function(...) get(relation)(...)
if (is.linkdat.list(x)) {
res = do.call(rbind, lapply(x, function(xx)
related.pairs(xx, relation, available, ...)
))
if (relation == "unrelated" && interfam != "none") {
avail = lapply(x, function(xx) {
ids = if (available)
xx$available else xx$orig.ids
if (interfam == "founders")
ids = intersect(ids, xx$orig.ids[xx$founders])
if (length(ids) == 0)
return(NULL)
ids
})
avail = avail[!sapply(avail, is.null)]
fampairs = data.frame(t(.comb2(length(avail)))) # enable use of lapply below
interfam = do.call(rbind, lapply(fampairs, function(p) fast.grid(avail[p])))
res = rbind(res, interfam)
}
return(res)
}
res = NULL
for (i in 1:x$nInd) {
rels = func(x, i, original.id = F, ...)
rels = rels[rels != i]
res = rbind(res, cbind(rep.int(i, length(rels)), rels, deparse.level = 0))
}
res[res[, 1] > res[, 2], ] = res[res[, 1] > res[, 2], 2:1]
res = unique(res)
if (available) {
avail = .internalID(x, x$available)
res = res[res[, 1] %in% avail & res[, 2] %in% avail, , drop = F]
}
# return matrix with original IDs
res[] = x$orig.ids[res]
res
}
#' @rdname pedParts
#' @export
unrelated = function(x, id, original.id = TRUE) {
if (!original.id)
id = x$orig.ids[id]
ancs = c(id, ancestors(x, id))
rel = unique.default(unlist(lapply(ancs, function(a) c(a, descendants(x, a, original.id = TRUE)))))
unrel = setdiff(x$orig.ids, rel)
if (!original.id)
unrel = .internalID(x, unrel)
unrel
}
#' @rdname pedParts
#' @export
leaves = function(x) {
p = as.matrix(x, FALSE)
.mysetdiff(p[, "ID", drop = F], p[, c("FID", "MID")])
}
#' @rdname pedParts
#' @export
parents = function(x, id, original.id = TRUE) {
grandparents(x, id, degree = 1, original.id = original.id)
}
#' @rdname pedParts
#' @export
grandparents = function(x, id, degree = 2, original.id = TRUE) {
if (original.id)
id = .internalID(x, id)
p = x$pedigree
gp = id
for (i in seq_len(degree)) gp = p[gp, 2:3]
if (original.id)
x$orig.ids[gp] else (1:x$nInd)[gp]
}
#' @rdname pedParts
#' @export
siblings = function(x, id, half = NA, original.id = TRUE) {
if (original.id)
id = .internalID(x, id)
p = x$pedigree
fa = p[id, "FID"]
mo = p[id, "MID"]
if (fa == 0 && mo == 0)
return(numeric())
samefather = p[, "FID"] == fa
samemother = p[, "MID"] == mo
sib_rows = if (is.na(half))
samefather | samemother else if (half)
xor(samefather, samemother) else samefather & samemother
sib_rows[id] = FALSE
if (original.id)
x$orig.ids[sib_rows] else (1:x$nInd)[sib_rows]
}
#' @rdname pedParts
#' @export
cousins = function(x, id, degree = 1, removal = 0, half = NA, original.id = TRUE) {
if (original.id)
id = .internalID(x, id)
gp = grandparents(x, id, degree = degree, original.id = FALSE)
uncles = unique.default(unlist(lapply(gp, siblings, x = x, half = half, original.id = FALSE)))
cous = uncles
for (i in seq_len(degree + removal)) cous = unique.default(unlist(lapply(cous, offspring,
x = x, original.id = FALSE)))
if (original.id)
cous = x$orig.ids[cous]
cous
}
#' @rdname pedParts
#' @export
nephews_nieces = function(x, id, removal = 1, half = NA, original.id = TRUE) {
cousins(x, id, degree = 0, removal = removal, half = half, original.id = original.id)
}
#' @rdname pedParts
#' @export
ancestors = function(x, id) {
# climbs up the pedigree storing parents iteratively. (Not documented: Accepts id of length
# > 1)
if (is.linkdat(x)) {
p = x$pedigree
orig_ids = x$orig.ids
ids_int = .internalID(x, id)
} else if (is.matrix(x) && isTRUE(all(c("ID", "FID", "MID") %in% colnames(x)))) {
p = x
orig_ids = p[, "ID"]
ids_int = match(id, orig_ids)
} else stop("x must be either a linkdat object or a matrix whose colnames include 'ID', 'FID' and 'MID'")
p = relabel(p, 1:nrow(p))
ancest = numeric(0)
up1 = as.numeric(p[ids_int, c("FID", "MID")])
up1 = up1[up1 > 0 & up1 <= nrow(p)] #NB: Avoids pedigree errors without warning! Should be caught in .checkped anyway
up1 = up1[!duplicated.default(up1)]
while (length(up1) > 0) {
ancest = c(ancest, up1)
up1 = .mysetdiff(as.numeric(p[up1, c("FID", "MID")]), ancest)
}
ancest = sort.int(ancest[(ancest != 0) & !duplicated(ancest)])
return(orig_ids[ancest])
}
#' @rdname pedParts
#' @export
descendants = function(x, id, original.id = TRUE) {
if(original.id)
id = .internalID(x, id)
ped = x$pedigree
ID = ped[, 'ID']
F = ped[, 'FID']
M = ped[, 'MID']
desc = numeric()
nextoffs = id
while(length(nextoffs)) {
nextoffs = ID[F %in% nextoffs | M %in% nextoffs]
desc = c(desc, nextoffs)
}
desc = sort.int(unique.default(desc))
if (original.id)
desc = x$orig.ids[desc]
desc
}
#' Pairwise common ancestors
#'
#' Computes a matrix A whose entry A[i,j] is TRUE if pedigree members i and j have a common ancestor, and FALSE otherwise.
#'
#' @param x a \code{\link{linkdat}} object.
#'
#' @examples
#'
#' x = fullSibMating(3)
#' A = hasCA(x)
#' stopifnot(A[1,1], !A[1,2], all(A[3:8, 3:8]))
#'
#' @export
hasCA = function(x) {
if(!all(x$orig.ids - 1:x$nInd == 0))
stop("This is currently only implemented for pedigrees with ordering 1,2,...")
A = matrix(F, ncol=x$nInd, nrow=x$nInd)
for(i in x$founders) {
# vector of all descendants of i, including i
desc = c(i, descendants(x,i))
A[.my.grid(rep(list(desc), 2))] = T
}
A
}
|
0d11e35d4abc2808a72e2508a210b2d0f906d208
|
1b690676d146556cbdc986b72cc6f1ff22c0d781
|
/Fixed.Mod.R
|
410619819ae49f18226391c374fff251c90c183f
|
[
"MIT"
] |
permissive
|
benniu720/Proteomics-Toolkit
|
d39ab86064cfb6f94dea4038040074ee9caa5e61
|
6a2802b243b76e3b0e3a5170d084f88f16cfd09a
|
refs/heads/master
| 2021-01-23T12:52:55.820128
| 2017-01-27T04:40:07
| 2017-01-27T04:40:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,865
|
r
|
Fixed.Mod.R
|
Fixed.Mod<-function(peptides, ptm1, res1, ptm2, res2, ptm3, res3, ptmfml, resfml){ ## the ptm1, res1, ..., ptm3, res3, correspond to those on the sidebar. And the ptmfml and resfml correspond to the user-input PTM in the sidebar.
source("Res.to.Num.R")
source("form.R")
PTM<-data.frame("C"=c(0,0,0,0,0,2,0,0,0,0,0,2,3,0,0),"H"=c(0,0,0,0,1,2,1,-1,3,-3,-2,4,6,0,-2),"N"=c(0,0,0,0,0,0,1,-1,1,-1,0,0,0,0,0),"O"=c(0,1,2,3,3,1,-1,1,0,0,-1,0,0,0,1),"S"=c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), "P"=c(0,0,0,0,1,0,0,0,0,0,0,0,0,0,0),"Br"=c(0,0,0,0,0,0,0,0,0,0,0,0,0,1,0),"Cl"=c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),"Si"=c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),"F"=c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0))
if(is.null(resfml)){
PTM.add<-form("")
} else {
if(substr(resfml,1,1)!="C"){
PTM.add<-form(ptmfml)
} else {
PTM.add<-list("C"=form(ptmfml)$C-2, "H"=form(ptmfml)$H-3, "N"=form(ptmfml)$N-1, "O"=form(ptmfml)$O-1, "S"=form(ptmfml)$S, "P"=form(ptmfml)$P, "Br"=form(ptmfml)$Br, "Cl"=form(ptmfml)$Cl, "Si"=form(ptmfml)$Si, "F"=form(ptmfml)$F)}
}
PTM<-rbind(PTM, PTM.add)
row.names(PTM)<-c("none","monoOxi","diOxi","triOxi","Phospho","Acetyl","Amidated","Deamidated","Ammonium-add","Ammonium-loss","Dehydrated","Dimethyl","Trimethyl","Bromo","Carbonyl",ptmfml)
al<-data.frame("RES"=c(Res.to.Num(res1), Res.to.Num(res2), Res.to.Num(res3),Res.to.Num(resfml)), "PTM"=c(rep(ptm1, length(res1)), rep(ptm2, length(res2)), rep(ptm3, length(res3)),rep(ptmfml, length(resfml))))
peptides$Mods<-rep("",nrow(peptides))
for(i in 1:nrow(peptides)){
id<-which(al$RES>=peptides$start[i] & al$RES<=peptides$stop[i])
if(length(id)==0){
next} else {
mods<-as.character(al$PTM[id])
combine.mods<-PTM[row.names(PTM) %in% mods,]
cm<-0
for(s in 1:nrow(combine.mods)){
cm0<-combine.mods[s,]*sum(mods %in% row.names(combine.mods)[s])
cm<-cm+cm0}
combine.mods<-paste(paste(names(cm), cm, sep=""), collapse = " ")
peptides$Mods[i]<-combine.mods}}
## after this for loop, we add new column named "Mods", which essentially is the add-up of all the elemental compositions of PTMs for each peptide.
ConvertPeptide2<-function(F){ ## F is any peptide sequence
x<-ConvertPeptide(F, IAA=TRUE) ## x is the result from ConvertPeptide() function
x<-list("C"=x$C, "H"=x$H, "N"=x$N,"O"=x$O,"S"=x$S,"P"=0,"Br"=0,"Cl"=0,"Si"=0,"F"=0)
return(x)}
## The ConvertPeptide2 function simply makes the original ConvertPeptide function display more details, ouput the enlongated elemental list.
peptides$Overall<-rep("", nrow(peptides))
for(h in 1:nrow(peptides)){
x<-unlist(ConvertPeptide2(row.names(peptides)[h])) + unlist(form(peptides$Mods[h]))
x<-paste(paste(names(x), x, sep=""), collapse = " ")
peptides$Overall[h]<-x}
return(lapply(peptides$Overall, form))
}
|
389a3c051431ed8c546c30d3d8da58e3561014c4
|
bb19e95b3c3d21bfd8e9db69fd8ea19979c4df0b
|
/shinyStarost/server.R
|
6f3a6adb25b72792c8227ea5e6214b5c01eb7058
|
[
"MIT"
] |
permissive
|
janrudof/APPR-2017-18
|
8e03db72b15c32d9624c88da79fc5e8a48e6afe8
|
baeb7b25da12f4aa9b448f844fe918ec108579af
|
refs/heads/master
| 2021-09-07T02:16:52.408436
| 2018-02-15T17:57:32
| 2018-02-15T17:57:32
| 110,083,621
| 0
| 0
| null | 2017-11-09T07:45:07
| 2017-11-09T07:45:07
| null |
UTF-8
|
R
| false
| false
| 1,114
|
r
|
server.R
|
library(shiny)
shinyServer(function(input, output) {
output$napovedstarost <- renderPlot({
tabela1.preciscena <- tabela1.zdruzena %>% filter(VRSTA_POTOVANJA == "Sli na zasebno potovanje", STAROST != "Starost - SKUPAJ", STAROST == input$starost)
tabela1.preciscena$VRSTA_POTOVANJA <- NULL
tabela1.preciscena$STAROST <- NULL
star <- lm(data = tabela1.preciscena, SKUPAJ ~ LETO)
model1 <- ggplot(tabela1.preciscena) +
aes(x = LETO, y= SKUPAJ) +
labs(title = "Graf napovedi potovanj glede na starostno obdobje", x = "Leto", y = "Število potovanj (v 1000)") +
geom_point(size = 3) +
geom_smooth(method = "lm", formula = y ~ x, fullrange = TRUE, se = TRUE, size = 2) +
theme_bw()
novi.podatki1 <- data.frame(LETO = seq(2016, 2020, 1))
napoved1 <- novi.podatki1 %>% mutate(SKUPAJ = predict(star, .))
model1 + geom_point(data = napoved1, aes(x=LETO, y=SKUPAJ), color = "red", size=3) +
scale_x_continuous(breaks = seq(2006, 2020, 2)) +
theme(plot.title = element_text(lineheight=.10, face="bold", hjust = 0.5))
})
})
|
0a420c81075d15cc35569f057eb1b130d8af2f1c
|
2968dbc3a339019de7f72489196e4c7ea68858ca
|
/man/gg_arma.Rd
|
f46182c114b7de0b15dfe811a4a41a2eefbe44a1
|
[] |
no_license
|
tidyverts/feasts
|
1266009079b445424aa2768d02e2e236358564ba
|
1ad4cfd6df3868566d79932dffc3cd337029f766
|
refs/heads/master
| 2023-06-08T14:27:31.603297
| 2023-03-22T14:21:40
| 2023-03-22T14:21:40
| 135,676,255
| 275
| 25
| null | 2023-05-29T14:37:17
| 2018-06-01T06:23:07
|
R
|
UTF-8
|
R
| false
| true
| 969
|
rd
|
gg_arma.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graphics.R
\name{gg_arma}
\alias{gg_arma}
\title{Plot characteristic ARMA roots}
\usage{
gg_arma(data)
}
\arguments{
\item{data}{A mable containing models with AR and/or MA roots.}
}
\value{
A ggplot object the characteristic roots from ARMA components.
}
\description{
Produces a plot of the inverse AR and MA roots of an ARIMA model.
Inverse roots outside the unit circle are shown in red.
}
\details{
Only models which compute ARMA roots can be visualised with this function.
That is to say, the \code{glance()} of the model contains \code{ar_roots} and \code{ma_roots}.
}
\examples{
if (requireNamespace("fable", quietly = TRUE)) {
library(fable)
library(tsibble)
library(dplyr)
tsibbledata::aus_retail \%>\%
filter(
State == "Victoria",
Industry == "Cafes, restaurants and catering services"
) \%>\%
model(ARIMA(Turnover ~ pdq(0,1,1) + PDQ(0,1,1))) \%>\%
gg_arma()
}
}
|
2014fb13be0a3bc8d4902c8b135c16bbf9016586
|
15a6602e0e0a18decf216f78c8c99775036b3590
|
/R/barplot_by_groups.R
|
9b1e9107fb262e238dd3b43607329e3cf841d16d
|
[
"MIT"
] |
permissive
|
zackarno/butteR
|
51832a78b1c3b186e2f7caf93f7da1ab3f8612ce
|
54cc596b411ddd7528b4e2d74078278dc276c575
|
refs/heads/master
| 2021-06-20T22:42:04.595204
| 2021-05-04T16:53:33
| 2021-05-04T16:53:33
| 207,075,047
| 6
| 11
|
NOASSERTION
| 2020-09-18T10:56:05
| 2019-09-08T07:05:53
|
R
|
UTF-8
|
R
| false
| false
| 2,407
|
r
|
barplot_by_groups.R
|
#' Barplot grouped binary columns
#' @param design Design object from survey or srvyr package.
#' @param list_of_variables Vector containing column names to analyze.
#' @param aggregation_level Column name to aggregate or dissagregate to OR vector of column names to dissagregate to.
#' @param binary Logical (default=TRUE) if the columns are binary or numeric.
#' @export
barplot_by_group <- function(design,list_of_variables,aggregation_level, binary=TRUE) {
design_srvy<-srvyr::as_survey(design)
severity_int_component_health_graphs<-list()
int_component_summary<-list()
aggregate_by<- syms(aggregation_level)
if(is.null(aggregation_level)) {
design_srvy<-design_srvy
}
else {
design_srvy<-design_srvy %>%
group_by(!!!aggregate_by,.drop=FALSE)
}
for(i in 1:length(list_of_variables)){
variable_of_interest<-list_of_variables[i]
int_component_summary[[i]]<-design_srvy %>%
summarise(mean.stat=survey_mean(!!sym(variable_of_interest),na.rm=TRUE, vartype="ci")) %>%
mutate(colname=variable_of_interest)
}
int_component_summaries_binded<-do.call("rbind", int_component_summary)
if(is.null(aggregation_level)) {
p1=int_component_summaries_binded %>% ggplot(aes(x=colname, y=mean.stat, fill=colname))+
colorspace::scale_fill_discrete_qualitative(guide=FALSE)
} else{
p1=int_component_summaries_binded %>% ggplot(aes(x=as.factor(!!sym(aggregation_level)),
y=mean.stat,
fill=colname))+
colorspace::scale_fill_discrete_qualitative()
}
if(binary==TRUE){
p2<-p1+ geom_bar(position=position_dodge(), stat="identity", colour='black') +
geom_errorbar(aes(ymin=mean.stat_low, ymax=mean.stat_upp), width=.2,position=position_dodge(.9))+
scale_y_continuous(breaks=seq(0,1, by=0.1),labels = scales::percent_format(accuracy = 1))+
labs(x=aggregation_level)+
coord_flip()}
if(binary==FALSE){
range_of_data<-design_srvy$variables[,list_of_variables] %>% range()
p2<-p1+ geom_bar(position=position_dodge(), stat="identity", colour='black') +
geom_errorbar(aes(ymin=mean.stat_low, ymax=mean.stat_upp), width=.2,position=position_dodge(.9))+
scale_y_continuous(breaks=seq(min(range_of_data),max(range_of_data), by=0.5))+
labs(x=aggregation_level)+
coord_flip()}
p2
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.