content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
#Chicago crime- Motor Vehicle Theft dataset
rm(list = ls())
setwd('/home/harish/R/Data_Analytics_using_R')
mvt = read.csv("mvt.csv")
str(mvt)
mvt$Arrest = as.numeric(mvt$Arrest)
mvt$Domestic = as.numeric(mvt$Domestic)
str(mvt)
summary(mvt)
DateConvert = as.Date(strptime(mvt$Date, "%m/%d/%y %H:%M"))
mvt$Date = DateConvert
mvt$month = months(DateConvert)
mvt$weekday = weekdays(DateConvert)
#Few motor vehicle theft
mvt$month[which.min(mvt$Date)]
#max motor vechicle theft
mvt$month[which.max(mvt$Date)]
#arrest vs month
table(mvt$Arrest, mvt$month)
#visualizing crime trends
jpeg('mvtdate2.jpg',width = 1280, height = 720)
hist(mvt$Date, breaks = 100)
dev.off()
#no. of arrest vs time frame
boxplot(mvt$Date~ mvt$Arrest)
table(mvt$Arrest, mvt$Year == '2001')
(2152)/(2152+18517)
table(mvt$Arrest, mvt$Year == '2007')
(1212)/(1212+13068)
table(mvt$Arrest, mvt$Year == '2012')
#popular locations
table(mvt$LocationDescription)
sort(table(mvt$LocationDescription))
#TOP 5 data
top5 = subset(mvt, LocationDescription =="STREET" | LocationDescription =="GAS STATION" | LocationDescription =="ALLEY" | LocationDescription =="PARKING LOT/GARAGE(NON.RESID.)"| LocationDescription =="DRIVEWAY - RESIDENTIAL")
#factoring to only 5 category
top5$LocationDescription = factor(top5$LocationDescription)
str(top5$LocationDescription)
#after factoring, which day most occrance did it occur?
table(top5$weekday,top5$LocationDescription)
|
/Data_Analytics_using_R/chicago_crime.R
|
no_license
|
harishaaram/R-tutorial
|
R
| false
| false
| 1,433
|
r
|
#Chicago crime- Motor Vehicle Theft dataset
rm(list = ls())
setwd('/home/harish/R/Data_Analytics_using_R')
mvt = read.csv("mvt.csv")
str(mvt)
mvt$Arrest = as.numeric(mvt$Arrest)
mvt$Domestic = as.numeric(mvt$Domestic)
str(mvt)
summary(mvt)
DateConvert = as.Date(strptime(mvt$Date, "%m/%d/%y %H:%M"))
mvt$Date = DateConvert
mvt$month = months(DateConvert)
mvt$weekday = weekdays(DateConvert)
#Few motor vehicle theft
mvt$month[which.min(mvt$Date)]
#max motor vechicle theft
mvt$month[which.max(mvt$Date)]
#arrest vs month
table(mvt$Arrest, mvt$month)
#visualizing crime trends
jpeg('mvtdate2.jpg',width = 1280, height = 720)
hist(mvt$Date, breaks = 100)
dev.off()
#no. of arrest vs time frame
boxplot(mvt$Date~ mvt$Arrest)
table(mvt$Arrest, mvt$Year == '2001')
(2152)/(2152+18517)
table(mvt$Arrest, mvt$Year == '2007')
(1212)/(1212+13068)
table(mvt$Arrest, mvt$Year == '2012')
#popular locations
table(mvt$LocationDescription)
sort(table(mvt$LocationDescription))
#TOP 5 data
top5 = subset(mvt, LocationDescription =="STREET" | LocationDescription =="GAS STATION" | LocationDescription =="ALLEY" | LocationDescription =="PARKING LOT/GARAGE(NON.RESID.)"| LocationDescription =="DRIVEWAY - RESIDENTIAL")
#factoring to only 5 category
top5$LocationDescription = factor(top5$LocationDescription)
str(top5$LocationDescription)
#after factoring, which day most occrance did it occur?
table(top5$weekday,top5$LocationDescription)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\docType{package}
\name{knitr-package}
\alias{knitr-package}
\alias{knitr}
\title{A general-purpose tool for dynamic report generation in R}
\description{
This is an alternative tool to Sweave with a more flexible design and new
features like caching and finer control of graphics. It is not limited to LaTeX
and is ready to be customized to process other file formats. See the package
website in the references for more information and examples.
}
\note{
The pronunciation of \pkg{knitr} is similar to \emph{neater} (neater
than what?) or you can think of \emph{knitter} (but it is \emph{single t}).
The name comes from \code{knit} + \code{R} (while \code{Sweave} = \code{S}
+ \code{weave}).
}
\references{
Full documentation and demos: \url{https://yihui.name/knitr/};
FAQ's: \url{http://bit.ly/knitr-faq}
}
\seealso{
The core function in this package: \code{\link{knit}}. If you are an
Sweave user, see \code{\link{Sweave2knitr}} on how to convert Sweave files
to \pkg{knitr}.
}
\author{
Yihui Xie <\url{https://yihui.name}>
}
|
/man/knitr-package.Rd
|
no_license
|
mwouts/knitr
|
R
| false
| true
| 1,130
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\docType{package}
\name{knitr-package}
\alias{knitr-package}
\alias{knitr}
\title{A general-purpose tool for dynamic report generation in R}
\description{
This is an alternative tool to Sweave with a more flexible design and new
features like caching and finer control of graphics. It is not limited to LaTeX
and is ready to be customized to process other file formats. See the package
website in the references for more information and examples.
}
\note{
The pronunciation of \pkg{knitr} is similar to \emph{neater} (neater
than what?) or you can think of \emph{knitter} (but it is \emph{single t}).
The name comes from \code{knit} + \code{R} (while \code{Sweave} = \code{S}
+ \code{weave}).
}
\references{
Full documentation and demos: \url{https://yihui.name/knitr/};
FAQ's: \url{http://bit.ly/knitr-faq}
}
\seealso{
The core function in this package: \code{\link{knit}}. If you are an
Sweave user, see \code{\link{Sweave2knitr}} on how to convert Sweave files
to \pkg{knitr}.
}
\author{
Yihui Xie <\url{https://yihui.name}>
}
|
library('data.table')
library('dplyr')
library('tidyverse')
library('lubridate')
library('recommenderlab')
library('stringr')
#-----------------------------------------------
# 1. Read the Synthetic tag data file for assessment data
#-----------------------------------------------
# ==> Commenting for Upskill Evaluation
#test = load(file = "C:/Divya/NUS Course Materials/FYP/SampleCode/SecondDataSet/assessments_with_tags.RData")
df = melt(ASSES_PS_EVAL_GB_28, id.vars = c("question_id","country","org_id","role_id","submission_utc_ts","no_of_trials","points_earned","masked_user_id"))
names(df)[10] = "question_tags"
df = within(df, rm("variable"))
# original KUSHAL question activity data set
# dt_dump = fread("C:/Divya/NUS Course Materials/FYP/SampleCode/SecondDataSet/user_assessments.csv")
# dt = as.data.frame(dt_dump)
# consider on Great Britain
gb = df %>%
filter(country == 'GB')
dim(gb)
#585144
# use data tables
dt_dump = as.data.table(gb)
#dt_dump=ASSES_PS_EVAL_GB_28
dt_dump = na.omit(dt_dump)
dt_dump[, eval("answer_datetime"):=ymd_hms(dt_dump$submission_utc_ts)]
dt_dump[, eval("answer_date"):=as_date(dt_dump$answer_datetime)]
dt_dump[,.N, country][order(-N)]
# total question activity records in GB: 353108
#-----------------------------------------------
# Question Tag Master
#-- ---------------------------------------------
dt_questions_fullset = unique(dt_dump[,.(question_id, question_tags)]) #2630 unique questions & tag combinations
dt_questions_fullset[,qFreq:=.N, by=question_id]
dt_questions_fullset[,tFreq:=.N, by=question_tags]
#3891 question id, with qFreq=4, tFreq=2,24,62
dt_questions_fullset[,diff_level:=ifelse(round((1/qFreq) * (1/tFreq),2) <= 0,
0.01,
round((1/qFreq) * (1/tFreq),2)), by=question_tags]
dt_tag_fullset = dt_questions_fullset[,.N, by=question_tags] #379 unique tags
df_tag_fullset = as.data.frame(dt_tag_fullset)
partitions = seq(0,2400, 10)
bands = seq(10,2400, 10)
df_tag_fullset$qLvl <- cut(df_tag_fullset$N, breaks = partitions, labels = bands)
kValues = NULL
for (x in 1:length(bands)) {
kValues[x] = round((log(0.05/0.95))/(-(bands[x]-1)), 4)
}
kSet=NULL
kSet = data.frame(bands, kValues)
df_tag_fullset$k = sapply(df_tag_fullset$qLvl, function(y) kSet[kSet$bands == y, 2])
#-----------------------------------
# Penalty factor
#-----------------------------------
df_tag_penalty_set = data.frame()
for(z in 1:nrow(kSet)) {
lvl = kSet[z, "bands"]
k = kSet[z, "kValues"]
penalty_set = array(0.5, dim=c(lvl, 1, 1))
for(i in 2:length(penalty_set)) {
penalty_set[i] = round(1/(1 + exp((-k)* (i-1))),4)
}
scaleValue = round(sum(penalty_set)/lvl,4)
penalty_map = data.frame(bands=rep(lvl,lvl),sid=1:lvl, penalty=penalty_set, qPoolScale=rep(scaleValue,lvl))
df_tag_penalty_set=rbind(df_tag_penalty_set,penalty_map)
}
df_expanded_tag_set = data.frame()
for(y in 1:nrow(df_tag_fullset)) {
tag = df_tag_fullset[y, "question_tags"]
lvl = as.numeric(as.character(df_tag_fullset[y,"qLvl"]))
tag_set = cbind(question_tags=rep(tag,lvl), df_tag_penalty_set[df_tag_penalty_set$bands==lvl,])
df_expanded_tag_set=rbind(df_expanded_tag_set, tag_set)
}
#---------------------------------------------------------
# Country specific changes
#---------------------------------------------------------
country_list = unique(dt_dump[,country])
country_list = country_list[!is.na(country_list)]
country_list = country_list[country_list != ""]
file_path = "~/wls26092020/wls26092020/Data/pulse_score_GB_28.csv"
for (c in country_list) {
c = trimws(c, which = c("both"))
print(paste("Start of Processing: " , c, "...."))
# 2. Consider only US data
dt_cntry_full_data = unique(dt_dump[country == c])
cntDataSet = nrow(dt_cntry_full_data) #185269
print(paste("Number of unique records in ", c, ": ", cntDataSet))
# 3. Remove repeated occurrence of same question id and question tags within the same date for every user
dt_cntry_full_data = dt_cntry_full_data[, .SD[which.max(answer_datetime)],
by=c("masked_user_id", "answer_date", "question_id","question_tags")]
#nrow(dt_cntry_full_data)
# 138113
#unique(dt_cntry_full_data[,.N, c("no_of_trials","points_earned")])[order(no_of_trials)]
#unique(dt_cntry_full_data[(points_earned == 10 | points_earned == 5),.N, c("points_earned","no_of_trials")])
# 4. Consider '10' points_earned only
dt_correct_ans = dt_cntry_full_data[points_earned == 10 | points_earned == 5,]
#nrow(dt_correct_ans) #105777 #117364
# 5. Remove repeated questions answered by users in the past.
# Consider only the most recently answered correct questions,
# with no repetition of same question id = tag for same user
dt_final_user_ques_set = dt_correct_ans[,
.SD[which.max(answer_date)],
by=c("masked_user_id","question_id","question_tags")][order(masked_user_id, answer_date)]
#dim(dt_final_user_ques_set) #87311 #96514
#dt_final_user_ques_set[,.N, c("masked_user_id","question_id","question_tags")][order(-N)]
dt_final_user_ques_set = dt_final_user_ques_set[,sid:=seq_along(answer_date),by=c("masked_user_id","question_tags")]
#dim(dt_final_user_ques_set) #87311 #96514
#unique(dt_final_user_ques_set[,no_of_trials])
dt_final_user_ques_set[,c("submission_utc_ts","answer_datetime"):=NULL]
#dt_final_user_ques_set[,.N,by=c("masked_user_id")][order(-N)]
df_ps_comp_set = merge(dt_final_user_ques_set,df_expanded_tag_set, by=c("question_tags","sid"), all = TRUE)
df_ps_comp_set=na.omit(df_ps_comp_set)
df_ps_comp_set[,penalty:= ifelse(points_earned<10, round(penalty/2,4), penalty)]
#---------------------------------
# Difficulty level
#---------------------------------
#dt_question_data = dt_final_user_ques_set[,.(question_id, question_tags, points_earned, no_of_trials)]
#df_question_data = as.data.frame(dt_question_data)
#df_question_diff_level = df_question_data %>%
# group_by(question_id, question_tags) %>%
# summarise(correct_count = sum(points_earned),
# attempt_count = (sum(no_of_trials)*10),
# diff = round(1 - correct_count/attempt_count,3))
df_question_diff_level = as.data.frame(dt_questions_fullset)
df_question_diff_level$qFreq = NULL
df_question_diff_level$tFreq = NULL
df_ps_comp_set = merge(df_ps_comp_set,df_question_diff_level, by=c("question_tags","question_id"), all = TRUE)
df_ps_comp_set=na.omit(df_ps_comp_set)
#df_ps_comp_set[,c("correct_count","attempt_count","sid"):=NULL]
df_ps_comp_set[,.N,by=c("masked_user_id","question_tags")]
#32868 #34274
df_ps_comp_set[,score:=round((sum(penalty * diff_level)/.N)/qPoolScale,4),by=c("masked_user_id","question_tags")]
df_final_ps_score_set = unique(df_ps_comp_set[,list(masked_user_id,question_tags,score)])
write.table(df_final_ps_score_set,file_path,append = TRUE, col.names = TRUE, sep = ",", quote = FALSE)
print(paste("Pulse score output file: ", file_path))
print(paste("End of Processing: ", c))
print(paste("----------------------------------------------"))
}
rm(df,df_expanded_tag_set,df_final_ps_score_set, df_ps_comp_set, df_question_diff_level, df_tag_fullset, df_tag_penalty_set,dt_cntry_full_data,dt_correct_ans, dt_dump,dt_final_user_ques_set)
rm(dt_tag_fullset,gb,kSet,penalty_map,tag_set,bands,c,cntDataSet,i,k,kValues,lvl,partitions,penalty_set,scaleValue,tag,x,y,z)
#-----------------------------------------------------------------
# UPSKILL RECOMMENDER
#-----------------------------------------------------------------
#===>
# df_final_ps_score_set %>%
# filter(score<=0) %>%
# count()
#
# min(df_final_ps_score_set$score)
# max(df_final_ps_score_set$score)
#
# v_users = unique(df_final_ps_score_set$masked_user_id)
# length(v_users)
# #1396 users
# df_users = data.frame(v_users, c(1:length(v_users)))
# colnames(df_users) = c("masked_user_id","user_id")
# v_tags = unique(df_final_ps_score_set$question_tags)
# length(v_tags)
# #379 tags
# df_items = data.frame(v_tags, c(1:length(v_tags)))
# colnames(df_items) = c("question_tags","item_id")
#
# total_users_cnt = length(unique(df_final_ps_score_set$masked_user_id))
# test_users_cnt = round(0.1 * total_users_cnt,0)
# train_users_cnt = total_users_cnt - test_users_cnt
# v_test_users = tail(v_users,test_users_cnt)
# v_train_users = tail(v_users,train_users_cnt)
#
# df_train_set = df_final_ps_score_set[df_final_ps_score_set$masked_user_id %in% v_train_users,]
# df_train_set = merge(df_train_set, df_users, by="masked_user_id", all = TRUE)
# df_train_set = merge(df_train_set, df_items, by="question_tags", all = TRUE)
# df_train_set = na.omit(df_train_set)
# dim(df_train_set)
# # 65725
# df_test_set = df_final_ps_score_set[df_final_ps_score_set$masked_user_id %in% v_test_users,]
# df_test_set = merge(df_test_set, df_users, by="masked_user_id", all = TRUE)
# df_test_set = merge(df_test_set, df_items, by="question_tags", all = TRUE)
# df_test_set = na.omit(df_test_set)
# dim(df_test_set)
# #2537
#
# #-----------------------------------------------------------------
# # Recosystem - Matrix Factorization
# #-----------------------------------------------------------------
# library(recosystem)
# set.seed(123)
#
# train_data = data_memory(df_train_set$user_id, df_train_set$item_id, df_train_set$score)
# test_data = data_memory(df_test_set$user_id, df_test_set$item_id)
# r = Reco()
# model = r$tune(train_data = train_data, opts = list(dim = c(10,20,30),
# lrate = c(0.1,0.2),
# costp_l1 = 0,
# costq_l1 = 0,
# nthread = 1,
# niter = 10))
# model
# r$train(train_data, opts = c(model$min, nthread = 1, niter = 10))
# pred_file = tempfile()
# pred_score = r$predict(test_data = test_data, out_memory())
#
# df_eval_test_set = cbind(df_test_set, pred_score)
# #diff between original and predicted
# df_eval_test_set$sq_err = (df_eval_test_set$score - df_eval_test_set$pred_score)**2
# total_error = sqrt(sum(df_eval_test_set$sq_err))
# # error = 0.6111
#
# recomm_tags = df_eval_test_set %>%
# arrange(desc(pred_score)) %>%
# group_by(masked_user_id) %>%
# slice(1:5)
#
# write.csv(recomm_tags, "C:/Divya/NUS Course Materials/FYP/SampleCode/SecondDataSet/output/upskill_recommendations.csv")
|
/Code/divya/PulseScoreCalculator.R
|
no_license
|
theivanai05/wls26092020
|
R
| false
| false
| 10,968
|
r
|
library('data.table')
library('dplyr')
library('tidyverse')
library('lubridate')
library('recommenderlab')
library('stringr')
#-----------------------------------------------
# 1. Read the Synthetic tag data file for assessment data
#-----------------------------------------------
# ==> Commenting for Upskill Evaluation
#test = load(file = "C:/Divya/NUS Course Materials/FYP/SampleCode/SecondDataSet/assessments_with_tags.RData")
df = melt(ASSES_PS_EVAL_GB_28, id.vars = c("question_id","country","org_id","role_id","submission_utc_ts","no_of_trials","points_earned","masked_user_id"))
names(df)[10] = "question_tags"
df = within(df, rm("variable"))
# original KUSHAL question activity data set
# dt_dump = fread("C:/Divya/NUS Course Materials/FYP/SampleCode/SecondDataSet/user_assessments.csv")
# dt = as.data.frame(dt_dump)
# consider on Great Britain
gb = df %>%
filter(country == 'GB')
dim(gb)
#585144
# use data tables
dt_dump = as.data.table(gb)
#dt_dump=ASSES_PS_EVAL_GB_28
dt_dump = na.omit(dt_dump)
dt_dump[, eval("answer_datetime"):=ymd_hms(dt_dump$submission_utc_ts)]
dt_dump[, eval("answer_date"):=as_date(dt_dump$answer_datetime)]
dt_dump[,.N, country][order(-N)]
# total question activity records in GB: 353108
#-----------------------------------------------
# Question Tag Master
#-- ---------------------------------------------
dt_questions_fullset = unique(dt_dump[,.(question_id, question_tags)]) #2630 unique questions & tag combinations
dt_questions_fullset[,qFreq:=.N, by=question_id]
dt_questions_fullset[,tFreq:=.N, by=question_tags]
#3891 question id, with qFreq=4, tFreq=2,24,62
dt_questions_fullset[,diff_level:=ifelse(round((1/qFreq) * (1/tFreq),2) <= 0,
0.01,
round((1/qFreq) * (1/tFreq),2)), by=question_tags]
dt_tag_fullset = dt_questions_fullset[,.N, by=question_tags] #379 unique tags
df_tag_fullset = as.data.frame(dt_tag_fullset)
partitions = seq(0,2400, 10)
bands = seq(10,2400, 10)
df_tag_fullset$qLvl <- cut(df_tag_fullset$N, breaks = partitions, labels = bands)
kValues = NULL
for (x in 1:length(bands)) {
kValues[x] = round((log(0.05/0.95))/(-(bands[x]-1)), 4)
}
kSet=NULL
kSet = data.frame(bands, kValues)
df_tag_fullset$k = sapply(df_tag_fullset$qLvl, function(y) kSet[kSet$bands == y, 2])
#-----------------------------------
# Penalty factor
#-----------------------------------
df_tag_penalty_set = data.frame()
for(z in 1:nrow(kSet)) {
lvl = kSet[z, "bands"]
k = kSet[z, "kValues"]
penalty_set = array(0.5, dim=c(lvl, 1, 1))
for(i in 2:length(penalty_set)) {
penalty_set[i] = round(1/(1 + exp((-k)* (i-1))),4)
}
scaleValue = round(sum(penalty_set)/lvl,4)
penalty_map = data.frame(bands=rep(lvl,lvl),sid=1:lvl, penalty=penalty_set, qPoolScale=rep(scaleValue,lvl))
df_tag_penalty_set=rbind(df_tag_penalty_set,penalty_map)
}
df_expanded_tag_set = data.frame()
for(y in 1:nrow(df_tag_fullset)) {
tag = df_tag_fullset[y, "question_tags"]
lvl = as.numeric(as.character(df_tag_fullset[y,"qLvl"]))
tag_set = cbind(question_tags=rep(tag,lvl), df_tag_penalty_set[df_tag_penalty_set$bands==lvl,])
df_expanded_tag_set=rbind(df_expanded_tag_set, tag_set)
}
#---------------------------------------------------------
# Country specific changes
#---------------------------------------------------------
country_list = unique(dt_dump[,country])
country_list = country_list[!is.na(country_list)]
country_list = country_list[country_list != ""]
file_path = "~/wls26092020/wls26092020/Data/pulse_score_GB_28.csv"
for (c in country_list) {
c = trimws(c, which = c("both"))
print(paste("Start of Processing: " , c, "...."))
# 2. Consider only US data
dt_cntry_full_data = unique(dt_dump[country == c])
cntDataSet = nrow(dt_cntry_full_data) #185269
print(paste("Number of unique records in ", c, ": ", cntDataSet))
# 3. Remove repeated occurrence of same question id and question tags within the same date for every user
dt_cntry_full_data = dt_cntry_full_data[, .SD[which.max(answer_datetime)],
by=c("masked_user_id", "answer_date", "question_id","question_tags")]
#nrow(dt_cntry_full_data)
# 138113
#unique(dt_cntry_full_data[,.N, c("no_of_trials","points_earned")])[order(no_of_trials)]
#unique(dt_cntry_full_data[(points_earned == 10 | points_earned == 5),.N, c("points_earned","no_of_trials")])
# 4. Consider '10' points_earned only
dt_correct_ans = dt_cntry_full_data[points_earned == 10 | points_earned == 5,]
#nrow(dt_correct_ans) #105777 #117364
# 5. Remove repeated questions answered by users in the past.
# Consider only the most recently answered correct questions,
# with no repetition of same question id = tag for same user
dt_final_user_ques_set = dt_correct_ans[,
.SD[which.max(answer_date)],
by=c("masked_user_id","question_id","question_tags")][order(masked_user_id, answer_date)]
#dim(dt_final_user_ques_set) #87311 #96514
#dt_final_user_ques_set[,.N, c("masked_user_id","question_id","question_tags")][order(-N)]
dt_final_user_ques_set = dt_final_user_ques_set[,sid:=seq_along(answer_date),by=c("masked_user_id","question_tags")]
#dim(dt_final_user_ques_set) #87311 #96514
#unique(dt_final_user_ques_set[,no_of_trials])
dt_final_user_ques_set[,c("submission_utc_ts","answer_datetime"):=NULL]
#dt_final_user_ques_set[,.N,by=c("masked_user_id")][order(-N)]
df_ps_comp_set = merge(dt_final_user_ques_set,df_expanded_tag_set, by=c("question_tags","sid"), all = TRUE)
df_ps_comp_set=na.omit(df_ps_comp_set)
df_ps_comp_set[,penalty:= ifelse(points_earned<10, round(penalty/2,4), penalty)]
#---------------------------------
# Difficulty level
#---------------------------------
#dt_question_data = dt_final_user_ques_set[,.(question_id, question_tags, points_earned, no_of_trials)]
#df_question_data = as.data.frame(dt_question_data)
#df_question_diff_level = df_question_data %>%
# group_by(question_id, question_tags) %>%
# summarise(correct_count = sum(points_earned),
# attempt_count = (sum(no_of_trials)*10),
# diff = round(1 - correct_count/attempt_count,3))
df_question_diff_level = as.data.frame(dt_questions_fullset)
df_question_diff_level$qFreq = NULL
df_question_diff_level$tFreq = NULL
df_ps_comp_set = merge(df_ps_comp_set,df_question_diff_level, by=c("question_tags","question_id"), all = TRUE)
df_ps_comp_set=na.omit(df_ps_comp_set)
#df_ps_comp_set[,c("correct_count","attempt_count","sid"):=NULL]
df_ps_comp_set[,.N,by=c("masked_user_id","question_tags")]
#32868 #34274
df_ps_comp_set[,score:=round((sum(penalty * diff_level)/.N)/qPoolScale,4),by=c("masked_user_id","question_tags")]
df_final_ps_score_set = unique(df_ps_comp_set[,list(masked_user_id,question_tags,score)])
write.table(df_final_ps_score_set,file_path,append = TRUE, col.names = TRUE, sep = ",", quote = FALSE)
print(paste("Pulse score output file: ", file_path))
print(paste("End of Processing: ", c))
print(paste("----------------------------------------------"))
}
rm(df,df_expanded_tag_set,df_final_ps_score_set, df_ps_comp_set, df_question_diff_level, df_tag_fullset, df_tag_penalty_set,dt_cntry_full_data,dt_correct_ans, dt_dump,dt_final_user_ques_set)
rm(dt_tag_fullset,gb,kSet,penalty_map,tag_set,bands,c,cntDataSet,i,k,kValues,lvl,partitions,penalty_set,scaleValue,tag,x,y,z)
#-----------------------------------------------------------------
# UPSKILL RECOMMENDER
#-----------------------------------------------------------------
#===>
# df_final_ps_score_set %>%
# filter(score<=0) %>%
# count()
#
# min(df_final_ps_score_set$score)
# max(df_final_ps_score_set$score)
#
# v_users = unique(df_final_ps_score_set$masked_user_id)
# length(v_users)
# #1396 users
# df_users = data.frame(v_users, c(1:length(v_users)))
# colnames(df_users) = c("masked_user_id","user_id")
# v_tags = unique(df_final_ps_score_set$question_tags)
# length(v_tags)
# #379 tags
# df_items = data.frame(v_tags, c(1:length(v_tags)))
# colnames(df_items) = c("question_tags","item_id")
#
# total_users_cnt = length(unique(df_final_ps_score_set$masked_user_id))
# test_users_cnt = round(0.1 * total_users_cnt,0)
# train_users_cnt = total_users_cnt - test_users_cnt
# v_test_users = tail(v_users,test_users_cnt)
# v_train_users = tail(v_users,train_users_cnt)
#
# df_train_set = df_final_ps_score_set[df_final_ps_score_set$masked_user_id %in% v_train_users,]
# df_train_set = merge(df_train_set, df_users, by="masked_user_id", all = TRUE)
# df_train_set = merge(df_train_set, df_items, by="question_tags", all = TRUE)
# df_train_set = na.omit(df_train_set)
# dim(df_train_set)
# # 65725
# df_test_set = df_final_ps_score_set[df_final_ps_score_set$masked_user_id %in% v_test_users,]
# df_test_set = merge(df_test_set, df_users, by="masked_user_id", all = TRUE)
# df_test_set = merge(df_test_set, df_items, by="question_tags", all = TRUE)
# df_test_set = na.omit(df_test_set)
# dim(df_test_set)
# #2537
#
# #-----------------------------------------------------------------
# # Recosystem - Matrix Factorization
# #-----------------------------------------------------------------
# library(recosystem)
# set.seed(123)
#
# train_data = data_memory(df_train_set$user_id, df_train_set$item_id, df_train_set$score)
# test_data = data_memory(df_test_set$user_id, df_test_set$item_id)
# r = Reco()
# model = r$tune(train_data = train_data, opts = list(dim = c(10,20,30),
# lrate = c(0.1,0.2),
# costp_l1 = 0,
# costq_l1 = 0,
# nthread = 1,
# niter = 10))
# model
# r$train(train_data, opts = c(model$min, nthread = 1, niter = 10))
# pred_file = tempfile()
# pred_score = r$predict(test_data = test_data, out_memory())
#
# df_eval_test_set = cbind(df_test_set, pred_score)
# #diff between original and predicted
# df_eval_test_set$sq_err = (df_eval_test_set$score - df_eval_test_set$pred_score)**2
# total_error = sqrt(sum(df_eval_test_set$sq_err))
# # error = 0.6111
#
# recomm_tags = df_eval_test_set %>%
# arrange(desc(pred_score)) %>%
# group_by(masked_user_id) %>%
# slice(1:5)
#
# write.csv(recomm_tags, "C:/Divya/NUS Course Materials/FYP/SampleCode/SecondDataSet/output/upskill_recommendations.csv")
|
## script for the behavioral analyses as reported in
# "The neural computation of goal-directed behavior in complex motivational states"
# Saulin, Horn, Lotze, Kaiser, & Hein
### load packages
library(dplyr)
library(car)
library(lme4)
library(tidyr)
library(quickpsy)
######################################################################################################
#################### read in data
induction <- read.csv("induction.csv", header = TRUE, sep = ";")
induction_prosocial_decisions <- read.csv("induction_prosocial_decisions.csv")
traits_prosocial_decisions <- read.csv("prosocial_decisions_sepcific_traits.csv", header = TRUE)
prosocial_decisions <- read.csv("prosocial_decisions.csv")
prosocial_decisions_absolute <- read.csv("prosocial_decisions_absolute.csv")
prosocial_rts_per_condition <- read.csv("prosocial_rts_per_condition.csv")
data_all_trials <- read.csv("data_all_trials.csv",header = TRUE, sep = ";")
######################################################################################################
################ motive ratings ######################################################################
# ratings in the baseline vs.motive conditions
lmer_induction <- lmer(rating ~ cond_type + (1 | subjectID), induction)
Anova(lmer_induction)
# difference in ratings between motive conditions?
lmer_rating_condition <- lmer(rating ~ condition + (1|subjectID),induction_prosocial_decisions )
Anova(lmer_rating_condition)
# effect of rating on prosocial decisions in motive conditions?
lmer_prosoc_rating_condition <- lmer(prosoc_decis ~ rating*condition + (1|subjectID),induction_prosocial_decisions )
Anova(lmer_prosoc_rating_condition)
# focus on single motive conditions
# difference in rating values?
ind_pros_single <- induction_prosocial_decisions[induction_prosocial_decisions$condition=="empathy" |induction_prosocial_decisions$condition=="reciprocity",]
lmer_rating_single <- lmer(rating ~ condition + (1|subjectID),ind_pros_single)
Anova(lmer_rating_single)
# effect of rating on prosocial decisions in the single motive conditions?
lmer_rating_prosoc_single <- lmer(prosoc_decis ~ rating*condition + (1|subjectID),ind_pros_single)
Anova(lmer_rating_prosoc_single)
# specificity of induction for the motive conditions regarding empathy and reciprocity?
lmer_trait_motive_conds <- lmer(scale(prosocial_decisions) ~ trait_measure_value*trait_measure_type*condition + (1|ID) , reb_big_reg_4conds[reb_big_reg_4conds$condition!="baseline",])
Anova(lmer_trait_motive_conds)
######################################################################################################
################ frequency of prosocial decisions ####################################################
# difference in the frequency of prosocial decisions between baseline and motive conditions?
lmer_prosoc_decis_baselinevsmotive <- lmer(decis ~ cond_type + (1|subjectID), prosocial_decisions)
Anova(lmer_prosoc_decis_baselinevsmotive)
# focus on comparison between motive conditions
# reciprocity vs empathy
lmer_recipVSemp <- lmer(decis ~ condition + (1|subjectID), prosocial_decisions[prosocial_decisions$condition!="control" & prosocial_decisions$condition!="multi-motive",])
Anova(lmer_recipVSemp)
# multi-motive vs empathy
lmer_multiVSemp <- lmer(decis ~ condition + (1|subjectID), prosocial_decisions[prosocial_decisions$condition!="control" & prosocial_decisions$condition!="reciprocity",])
Anova(lmer_multiVSemp)
# multi-motive vs reciprocity
lmer_multiVSrecip <- lmer(decis ~ condition + (1|subjectID), prosocial_decisions[prosocial_decisions$condition!="control" & prosocial_decisions$condition!="empathy",])
Anova(lmer_multiVSrecip)
# compute the percent change in prosocial prosocial choices in the multi-motive condition relative to each single motive condition
prosoc_multi <- prosocial_decisions$decis[prosocial_decisions$condition=="multi-motive"]
prosoc_recip <- prosocial_decisions$decis[prosocial_decisions$condition=="reciprocity"]
prosoc_emp <- prosocial_decisions$decis[prosocial_decisions$condition=="empathy"]
reciprocity_index <- (prosoc_multi-prosoc_recip)/prosoc_recip*100
t.test(reciprocity_index)
empathy_index <- (prosoc_multi-prosoc_emp)/prosoc_emp*100
t.test(empathy_index)
######################################################################################################
################ reaction times ######################################################################
# difference in reaction times in the baseline condition vs the motive conditions?
lmer_RT_cond_type <- lmer(rt ~ cond_type + (1|ID), prosocial_rts_per_condition)
Anova(lmer_RT_cond_type)
# difference between motive conditions?
lmer_RT_conditions_only_motives <- lmer(rt ~ condition + (1|ID), prosocial_rts_per_condition[prosocial_rts_per_condition$condition!="control",])
Anova(lmer_RT_conditions_only_motives)
######################################################################################################
################ distribution of prosocial decisions #################################################
# compare distributions of the absolute number of prosocial decisions in the different conditions
ks.test(prosocial_decisions_absolute$control, prosocial_decisions_absolute$empathy)
ks.test(prosocial_decisions_absolute$reciprocity, prosocial_decisions_absolute$empathy)
ks.test(prosocial_decisions_absolute$reciprocity, prosocial_decisions_absolute$emprecip)
ks.test(prosocial_decisions_absolute$empathy, prosocial_decisions_absolute$emprecip)
ks.test(prosocial_decisions_absolute$reciprocity, prosocial_decisions_absolute$control)
ks.test(prosocial_decisions_absolute$control, prosocial_decisions_absolute$emprecip)
####################################################################################################
################ distribution of rts ###############################################################
rt_per_condition_and_id <- aggregate(rt ~ ID + condition, data_all_trials, mean)
# compare distributions of the reaction times in the different conditions
ks.test(rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="control"], rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="empathy"])
ks.test(rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="reciprocity"], rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="empathy"])
ks.test(rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="reciprocity"], rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="multi-motive"])
ks.test(rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="empathy"], rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="multi-motive"])
ks.test(rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="reciprocity"], rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="control"])
ks.test(rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="multi-motive"], rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="control"])
####################################################################################################
################ point information and prosocial behavior ##########################################
### do the possible loss for the self or the possible gain for the partner differentially influence
### the frequency of prosocial decisions in the different conditions?
glmer_self_loss_other_gain <- glmer(resp_code ~ self_poss_loss*condition + other_poss_gain*condition + (1|ID), family = "binomial", data_all_trials)
Anova(glmer_self_loss_other_gain)
### does the difference in point equality differentially influence the frequency of prosocial decisions in the differen conditions?
glmer_diff_equality <- glmer(resp_code ~ diff_diff*condition + (1|ID), family = "binomial", data_all_trials)
Anova(glmer_diff_equality)
####################################################################################################
############# psychometric functions ###############################################################
# psychometric functions for the other's possible gain
# compute one function per condition
fit_other_gain <- quickpsy(data_all_trials, other_poss_gain, resp_code, grouping = .(condition))
# compute one function per subject and condition
fit_other_gain_subj <- quickpsy(data_all_trials, other_poss_gain, resp_code, grouping = .(condition, ID))
lmer_thre_other_gain <- lmer(thre ~ condition + (1|ID), fit_other_gain_subj$thresholds)
Anova(lmer_thre_other_gain)
|
/behavioral_analyses.R
|
permissive
|
AnneSaulin/complex_motivations
|
R
| false
| false
| 8,753
|
r
|
## script for the behavioral analyses as reported in
# "The neural computation of goal-directed behavior in complex motivational states"
# Saulin, Horn, Lotze, Kaiser, & Hein
### load packages
library(dplyr)
library(car)
library(lme4)
library(tidyr)
library(quickpsy)
######################################################################################################
#################### read in data
induction <- read.csv("induction.csv", header = TRUE, sep = ";")
induction_prosocial_decisions <- read.csv("induction_prosocial_decisions.csv")
traits_prosocial_decisions <- read.csv("prosocial_decisions_sepcific_traits.csv", header = TRUE)
prosocial_decisions <- read.csv("prosocial_decisions.csv")
prosocial_decisions_absolute <- read.csv("prosocial_decisions_absolute.csv")
prosocial_rts_per_condition <- read.csv("prosocial_rts_per_condition.csv")
data_all_trials <- read.csv("data_all_trials.csv",header = TRUE, sep = ";")
######################################################################################################
################ motive ratings ######################################################################
# ratings in the baseline vs.motive conditions
lmer_induction <- lmer(rating ~ cond_type + (1 | subjectID), induction)
Anova(lmer_induction)
# difference in ratings between motive conditions?
lmer_rating_condition <- lmer(rating ~ condition + (1|subjectID),induction_prosocial_decisions )
Anova(lmer_rating_condition)
# effect of rating on prosocial decisions in motive conditions?
lmer_prosoc_rating_condition <- lmer(prosoc_decis ~ rating*condition + (1|subjectID),induction_prosocial_decisions )
Anova(lmer_prosoc_rating_condition)
# focus on single motive conditions
# difference in rating values?
ind_pros_single <- induction_prosocial_decisions[induction_prosocial_decisions$condition=="empathy" |induction_prosocial_decisions$condition=="reciprocity",]
lmer_rating_single <- lmer(rating ~ condition + (1|subjectID),ind_pros_single)
Anova(lmer_rating_single)
# effect of rating on prosocial decisions in the single motive conditions?
lmer_rating_prosoc_single <- lmer(prosoc_decis ~ rating*condition + (1|subjectID),ind_pros_single)
Anova(lmer_rating_prosoc_single)
# specificity of induction for the motive conditions regarding empathy and reciprocity?
lmer_trait_motive_conds <- lmer(scale(prosocial_decisions) ~ trait_measure_value*trait_measure_type*condition + (1|ID) , reb_big_reg_4conds[reb_big_reg_4conds$condition!="baseline",])
Anova(lmer_trait_motive_conds)
######################################################################################################
################ frequency of prosocial decisions ####################################################
# difference in the frequency of prosocial decisions between baseline and motive conditions?
lmer_prosoc_decis_baselinevsmotive <- lmer(decis ~ cond_type + (1|subjectID), prosocial_decisions)
Anova(lmer_prosoc_decis_baselinevsmotive)
# focus on comparison between motive conditions
# reciprocity vs empathy
lmer_recipVSemp <- lmer(decis ~ condition + (1|subjectID), prosocial_decisions[prosocial_decisions$condition!="control" & prosocial_decisions$condition!="multi-motive",])
Anova(lmer_recipVSemp)
# multi-motive vs empathy
lmer_multiVSemp <- lmer(decis ~ condition + (1|subjectID), prosocial_decisions[prosocial_decisions$condition!="control" & prosocial_decisions$condition!="reciprocity",])
Anova(lmer_multiVSemp)
# multi-motive vs reciprocity
lmer_multiVSrecip <- lmer(decis ~ condition + (1|subjectID), prosocial_decisions[prosocial_decisions$condition!="control" & prosocial_decisions$condition!="empathy",])
Anova(lmer_multiVSrecip)
# compute the percent change in prosocial prosocial choices in the multi-motive condition relative to each single motive condition
prosoc_multi <- prosocial_decisions$decis[prosocial_decisions$condition=="multi-motive"]
prosoc_recip <- prosocial_decisions$decis[prosocial_decisions$condition=="reciprocity"]
prosoc_emp <- prosocial_decisions$decis[prosocial_decisions$condition=="empathy"]
reciprocity_index <- (prosoc_multi-prosoc_recip)/prosoc_recip*100
t.test(reciprocity_index)
empathy_index <- (prosoc_multi-prosoc_emp)/prosoc_emp*100
t.test(empathy_index)
######################################################################################################
################ reaction times ######################################################################
# difference in reaction times in the baseline condition vs the motive conditions?
lmer_RT_cond_type <- lmer(rt ~ cond_type + (1|ID), prosocial_rts_per_condition)
Anova(lmer_RT_cond_type)
# difference between motive conditions?
lmer_RT_conditions_only_motives <- lmer(rt ~ condition + (1|ID), prosocial_rts_per_condition[prosocial_rts_per_condition$condition!="control",])
Anova(lmer_RT_conditions_only_motives)
######################################################################################################
################ distribution of prosocial decisions #################################################
# compare distributions of the absolute number of prosocial decisions in the different conditions
ks.test(prosocial_decisions_absolute$control, prosocial_decisions_absolute$empathy)
ks.test(prosocial_decisions_absolute$reciprocity, prosocial_decisions_absolute$empathy)
ks.test(prosocial_decisions_absolute$reciprocity, prosocial_decisions_absolute$emprecip)
ks.test(prosocial_decisions_absolute$empathy, prosocial_decisions_absolute$emprecip)
ks.test(prosocial_decisions_absolute$reciprocity, prosocial_decisions_absolute$control)
ks.test(prosocial_decisions_absolute$control, prosocial_decisions_absolute$emprecip)
####################################################################################################
################ distribution of rts ###############################################################
rt_per_condition_and_id <- aggregate(rt ~ ID + condition, data_all_trials, mean)
# compare distributions of the reaction times in the different conditions
ks.test(rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="control"], rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="empathy"])
ks.test(rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="reciprocity"], rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="empathy"])
ks.test(rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="reciprocity"], rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="multi-motive"])
ks.test(rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="empathy"], rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="multi-motive"])
ks.test(rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="reciprocity"], rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="control"])
ks.test(rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="multi-motive"], rt_per_condition_and_id$rt[rt_per_condition_and_id$condition=="control"])
####################################################################################################
################ point information and prosocial behavior ##########################################
### do the possible loss for the self or the possible gain for the partner differentially influence
### the frequency of prosocial decisions in the different conditions?
glmer_self_loss_other_gain <- glmer(resp_code ~ self_poss_loss*condition + other_poss_gain*condition + (1|ID), family = "binomial", data_all_trials)
Anova(glmer_self_loss_other_gain)
### does the difference in point equality differentially influence the frequency of prosocial decisions in the differen conditions?
glmer_diff_equality <- glmer(resp_code ~ diff_diff*condition + (1|ID), family = "binomial", data_all_trials)
Anova(glmer_diff_equality)
####################################################################################################
############# psychometric functions ###############################################################
# psychometric functions for the other's possible gain
# compute one function per condition
fit_other_gain <- quickpsy(data_all_trials, other_poss_gain, resp_code, grouping = .(condition))
# compute one function per subject and condition
fit_other_gain_subj <- quickpsy(data_all_trials, other_poss_gain, resp_code, grouping = .(condition, ID))
lmer_thre_other_gain <- lmer(thre ~ condition + (1|ID), fit_other_gain_subj$thresholds)
Anova(lmer_thre_other_gain)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diagnose.R
\name{appraise}
\alias{appraise}
\title{Model diagnostic plots}
\usage{
appraise(
model,
method = c("direct", "simulate", "normal"),
n_uniform = 10,
n_simulate = 50,
type = c("deviance", "pearson", "response"),
n_bins = c("sturges", "scott", "fd"),
ncol = 2,
level = 0.9,
alpha = 0.2,
...
)
}
\arguments{
\item{model}{a fitted model. Currently only class \code{"gam"}.}
\item{method}{character; method used to generate theoretical quantiles.}
\item{n_uniform}{numeric; number of times to randomize uniform quantiles
in the direct computation method (\code{method = "direct"}) for QQ plots.}
\item{n_simulate}{numeric; number of data sets to simulate from the estimated
model when using the simulation method (\code{method = "simulate"}) for QQ
plots.}
\item{type}{character; type of residuals to use. Only \code{"deviance"},
\code{"response"}, and \code{"pearson"} residuals are allowed.}
\item{n_bins}{character or numeric; either the number of bins or a string
indicating how to calculate the number of bins.}
\item{ncol}{numeric; number of columns to draw plots in. See
\code{\link[cowplot:plot_grid]{cowplot::plot_grid()}}.}
\item{level}{numeric; the coverage level for QQ plot reference intervals.
Must be strictly \verb{0 < level < 1}. Only used with \code{method = "simulate"}.}
\item{alpha}{numeric; the level of alpha transparency for the QQ plot
reference interval when \code{method = "simulate"}.}
\item{...}{arguments passed to \code{\link[cowplot:plot_grid]{cowplot::plot_grid()}}, except for \code{align}
and \code{axis}, which are set internally.}
}
\description{
Model diagnostic plots
}
\examples{
library(mgcv)
\dontshow{set.seed(2)}
## simulate some data...
dat <- gamSim(1, n = 400, dist = "normal", scale = 2)
mod <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat)
## run some basic model checks, including checking
## smoothing basis dimensions...
appraise(mod)
}
\seealso{
The plots are produced by functions \code{\link[gratia:qq_plot]{gratia::qq_plot()}},
\code{\link[gratia:residuals_linpred_plot]{gratia::residuals_linpred_plot()}}, \code{\link[gratia:residuals_hist_plot]{gratia::residuals_hist_plot()}},
and \code{\link[gratia:observed_fitted_plot]{gratia::observed_fitted_plot()}}.
}
|
/man/appraise.Rd
|
permissive
|
robinsonjj/gratia
|
R
| false
| true
| 2,335
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diagnose.R
\name{appraise}
\alias{appraise}
\title{Model diagnostic plots}
\usage{
appraise(
model,
method = c("direct", "simulate", "normal"),
n_uniform = 10,
n_simulate = 50,
type = c("deviance", "pearson", "response"),
n_bins = c("sturges", "scott", "fd"),
ncol = 2,
level = 0.9,
alpha = 0.2,
...
)
}
\arguments{
\item{model}{a fitted model. Currently only class \code{"gam"}.}
\item{method}{character; method used to generate theoretical quantiles.}
\item{n_uniform}{numeric; number of times to randomize uniform quantiles
in the direct computation method (\code{method = "direct"}) for QQ plots.}
\item{n_simulate}{numeric; number of data sets to simulate from the estimated
model when using the simulation method (\code{method = "simulate"}) for QQ
plots.}
\item{type}{character; type of residuals to use. Only \code{"deviance"},
\code{"response"}, and \code{"pearson"} residuals are allowed.}
\item{n_bins}{character or numeric; either the number of bins or a string
indicating how to calculate the number of bins.}
\item{ncol}{numeric; number of columns to draw plots in. See
\code{\link[cowplot:plot_grid]{cowplot::plot_grid()}}.}
\item{level}{numeric; the coverage level for QQ plot reference intervals.
Must be strictly \verb{0 < level < 1}. Only used with \code{method = "simulate"}.}
\item{alpha}{numeric; the level of alpha transparency for the QQ plot
reference interval when \code{method = "simulate"}.}
\item{...}{arguments passed to \code{\link[cowplot:plot_grid]{cowplot::plot_grid()}}, except for \code{align}
and \code{axis}, which are set internally.}
}
\description{
Model diagnostic plots
}
\examples{
library(mgcv)
\dontshow{set.seed(2)}
## simulate some data...
dat <- gamSim(1, n = 400, dist = "normal", scale = 2)
mod <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat)
## run some basic model checks, including checking
## smoothing basis dimensions...
appraise(mod)
}
\seealso{
The plots are produced by functions \code{\link[gratia:qq_plot]{gratia::qq_plot()}},
\code{\link[gratia:residuals_linpred_plot]{gratia::residuals_linpred_plot()}}, \code{\link[gratia:residuals_hist_plot]{gratia::residuals_hist_plot()}},
and \code{\link[gratia:observed_fitted_plot]{gratia::observed_fitted_plot()}}.
}
|
test_that("`convert_input()` able to call the respective download function for a data item with the correct arguments", {
mocked_res <- mockery::mock(list(c("A", "B")))
mockery::stub(convert_input, 'dbfile.input.check', data.frame())
mockery::stub(convert_input, 'db.query', data.frame(id = 1))
mockery::stub(convert_input, 'PEcAn.remote::remote.execute.R', mocked_res)
mockery::stub(convert_input, 'purrr::map_dfr', data.frame(missing = c(FALSE), empty = c(FALSE)))
convert_input(
input.id = NA,
outfolder = "test",
formatname = NULL,
mimetype = NULL,
site.id = 1,
start_date = "2011-01-01",
end_date = "2011-12-31",
pkg = 'PEcAn.data.atmosphere',
fcn = 'download.AmerifluxLBL',
con = NULL,
host = data.frame(name = "localhost"),
browndog = NULL,
write = FALSE,
lat.in = 40,
lon.in = -88
)
args <- mockery::mock_args(mocked_res)
expect_equal(
args[[1]]$script,
"PEcAn.data.atmosphere::download.AmerifluxLBL(lat.in=40, lon.in=-88, overwrite=FALSE, outfolder='test/', start_date='2011-01-01', end_date='2011-12-31')"
)
})
test_that("`.get.file.deletion.commands()` able to return correct file deletion commands", {
res <- .get.file.deletion.commands(c("test"))
expect_equal(res$move.to.tmp, "dir.create(c('./tmp'), recursive=TRUE, showWarnings=FALSE); file.rename(from=c('test'), to=c('./tmp/test'))")
expect_equal(res$delete.tmp, "unlink(c('./tmp'), recursive=TRUE)")
expect_equal(res$replace.from.tmp, "file.rename(from=c('./tmp/test'), to=c('test'));unlink(c('./tmp'), recursive=TRUE)")
})
|
/base/db/tests/testthat/test.convert_input.R
|
permissive
|
PecanProject/pecan
|
R
| false
| false
| 1,594
|
r
|
test_that("`convert_input()` able to call the respective download function for a data item with the correct arguments", {
mocked_res <- mockery::mock(list(c("A", "B")))
mockery::stub(convert_input, 'dbfile.input.check', data.frame())
mockery::stub(convert_input, 'db.query', data.frame(id = 1))
mockery::stub(convert_input, 'PEcAn.remote::remote.execute.R', mocked_res)
mockery::stub(convert_input, 'purrr::map_dfr', data.frame(missing = c(FALSE), empty = c(FALSE)))
convert_input(
input.id = NA,
outfolder = "test",
formatname = NULL,
mimetype = NULL,
site.id = 1,
start_date = "2011-01-01",
end_date = "2011-12-31",
pkg = 'PEcAn.data.atmosphere',
fcn = 'download.AmerifluxLBL',
con = NULL,
host = data.frame(name = "localhost"),
browndog = NULL,
write = FALSE,
lat.in = 40,
lon.in = -88
)
args <- mockery::mock_args(mocked_res)
expect_equal(
args[[1]]$script,
"PEcAn.data.atmosphere::download.AmerifluxLBL(lat.in=40, lon.in=-88, overwrite=FALSE, outfolder='test/', start_date='2011-01-01', end_date='2011-12-31')"
)
})
test_that("`.get.file.deletion.commands()` able to return correct file deletion commands", {
res <- .get.file.deletion.commands(c("test"))
expect_equal(res$move.to.tmp, "dir.create(c('./tmp'), recursive=TRUE, showWarnings=FALSE); file.rename(from=c('test'), to=c('./tmp/test'))")
expect_equal(res$delete.tmp, "unlink(c('./tmp'), recursive=TRUE)")
expect_equal(res$replace.from.tmp, "file.rename(from=c('./tmp/test'), to=c('test'));unlink(c('./tmp'), recursive=TRUE)")
})
|
TsallisBeta <-
function(NorP, NorPexp = NULL, q = 1, Correction = "Best", CheckArguments = TRUE, Ps = NULL, Ns = NULL, Pexp = NULL, Nexp = NULL)
{
UseMethod("TsallisBeta")
}
TsallisBeta.ProbaVector <-
function(NorP, NorPexp = NULL, q = 1, Correction = "Best", CheckArguments = TRUE, Ps = NULL, Ns = NULL, Pexp = NULL, Nexp = NULL)
{
if (CheckArguments)
CheckentropartArguments()
if (length(NorP) != length(NorPexp)) {
stop("NorP and NorPexp should have the same length.")
}
dataBeta <- NorP^q * lnq(NorP/NorPexp, q)
dataBeta[NorP == 0] <- 0
entropy <- sum(dataBeta)
names(entropy) <- "None"
return (entropy)
}
TsallisBeta.AbdVector <-
function(NorP, NorPexp = NULL, q = 1, Correction = "Best", CheckArguments = TRUE, Ps = NULL, Ns = NULL, Pexp = NULL, Nexp = NULL)
{
return (bcTsallisBeta(Ns=NorP, Nexp=NorPexp, q=q, Correction=Correction, CheckArguments=CheckArguments))
}
TsallisBeta.integer <-
function(NorP, NorPexp = NULL, q = 1, Correction = "Best", CheckArguments = TRUE, Ps = NULL, Ns = NULL, Pexp = NULL, Nexp = NULL)
{
if (missing(NorP)){
if (!missing(Ns)) {
NorP <- Ns
} else {
stop("An argument NorP or Ns must be provided.")
}
}
if (missing(NorPexp)){
if (!missing(Nexp)) {
NorPexp <- Nexp
} else {
stop("An argument NorPexp or Nexp must be provided.")
}
}
return (bcTsallisBeta(Ns=NorP, Nexp=NorPexp, q=q, Correction=Correction, CheckArguments=CheckArguments))
}
TsallisBeta.numeric <-
function(NorP, NorPexp = NULL, q = 1, Correction = "Best", CheckArguments = TRUE, Ps = NULL, Ns = NULL, Pexp = NULL, Nexp = NULL)
{
if (missing(NorP)){
if (!missing(Ps)) {
NorP <- Ps
} else {
if (!missing(Ns)) {
NorP <- Ns
} else {
stop("An argument NorP or Ps or Ns must be provided.")
}
}
}
if (missing(NorPexp)){
if (!missing(Pexp)) {
NorPexp <- Pexp
} else {
if (!missing(Nexp)) {
NorP <- Nexp
} else {
stop("An argument NorPexp or Pexp or Nexp must be provided.")
}
}
}
if (abs(sum(NorP) - 1) < length(NorP)*.Machine$double.eps) {
# Probabilities sum to 1, allowing rounding error
return (TsallisBeta.ProbaVector(NorP, NorPexp, q=q, CheckArguments=CheckArguments))
} else {
# Abundances
return (TsallisBeta.AbdVector(NorP, NorPexp, q=q, Correction=Correction, CheckArguments=CheckArguments))
}
}
bcTsallisBeta <-
function(Ns, Nexp = NULL, q, Correction = "Best", CheckArguments = TRUE)
{
if (CheckArguments)
CheckentropartArguments()
if (length(Ns) != length(Nexp)) {
stop("Ns and Nexp should have the same length.")
}
# No correction
if (Correction == "None") {
return (TsallisBeta.ProbaVector(Ns/sum(Ns), Nexp/sum(Nexp), q, CheckArguments=FALSE))
}
# Sample coverage
Nrecords <- sum(Ns)
SampleCoverage <- Coverage(Ns, CheckArguments=FALSE)
# Sample coverage (expected)
Nrecordsexp <- sum(Nexp)
SampleCoverageexp <- Coverage(Nexp, CheckArguments=FALSE)
if (Correction == "ChaoShen" | Correction == "Best") {
CiPsi <- SampleCoverage * Ns / Nrecords
CPs <- SampleCoverageexp * Nexp / Nrecordsexp
dataBeta <- CiPsi^q * lnq(CiPsi/CPs, q) / (1 -(1-CiPsi)^Nrecords)
# force 0log0=0
dataBeta[Ns == 0] <- 0
entropy <- sum(dataBeta)
names(entropy) <- Correction
return (entropy)
}
warning("Correction was not recognized")
return (NA)
}
|
/R/TsallisBeta.R
|
no_license
|
thalesshannonwatson/entropart
|
R
| false
| false
| 3,734
|
r
|
TsallisBeta <-
function(NorP, NorPexp = NULL, q = 1, Correction = "Best", CheckArguments = TRUE, Ps = NULL, Ns = NULL, Pexp = NULL, Nexp = NULL)
{
UseMethod("TsallisBeta")
}
TsallisBeta.ProbaVector <-
function(NorP, NorPexp = NULL, q = 1, Correction = "Best", CheckArguments = TRUE, Ps = NULL, Ns = NULL, Pexp = NULL, Nexp = NULL)
{
if (CheckArguments)
CheckentropartArguments()
if (length(NorP) != length(NorPexp)) {
stop("NorP and NorPexp should have the same length.")
}
dataBeta <- NorP^q * lnq(NorP/NorPexp, q)
dataBeta[NorP == 0] <- 0
entropy <- sum(dataBeta)
names(entropy) <- "None"
return (entropy)
}
TsallisBeta.AbdVector <-
function(NorP, NorPexp = NULL, q = 1, Correction = "Best", CheckArguments = TRUE, Ps = NULL, Ns = NULL, Pexp = NULL, Nexp = NULL)
{
return (bcTsallisBeta(Ns=NorP, Nexp=NorPexp, q=q, Correction=Correction, CheckArguments=CheckArguments))
}
TsallisBeta.integer <-
function(NorP, NorPexp = NULL, q = 1, Correction = "Best", CheckArguments = TRUE, Ps = NULL, Ns = NULL, Pexp = NULL, Nexp = NULL)
{
if (missing(NorP)){
if (!missing(Ns)) {
NorP <- Ns
} else {
stop("An argument NorP or Ns must be provided.")
}
}
if (missing(NorPexp)){
if (!missing(Nexp)) {
NorPexp <- Nexp
} else {
stop("An argument NorPexp or Nexp must be provided.")
}
}
return (bcTsallisBeta(Ns=NorP, Nexp=NorPexp, q=q, Correction=Correction, CheckArguments=CheckArguments))
}
TsallisBeta.numeric <-
function(NorP, NorPexp = NULL, q = 1, Correction = "Best", CheckArguments = TRUE, Ps = NULL, Ns = NULL, Pexp = NULL, Nexp = NULL)
{
if (missing(NorP)){
if (!missing(Ps)) {
NorP <- Ps
} else {
if (!missing(Ns)) {
NorP <- Ns
} else {
stop("An argument NorP or Ps or Ns must be provided.")
}
}
}
if (missing(NorPexp)){
if (!missing(Pexp)) {
NorPexp <- Pexp
} else {
if (!missing(Nexp)) {
NorP <- Nexp
} else {
stop("An argument NorPexp or Pexp or Nexp must be provided.")
}
}
}
if (abs(sum(NorP) - 1) < length(NorP)*.Machine$double.eps) {
# Probabilities sum to 1, allowing rounding error
return (TsallisBeta.ProbaVector(NorP, NorPexp, q=q, CheckArguments=CheckArguments))
} else {
# Abundances
return (TsallisBeta.AbdVector(NorP, NorPexp, q=q, Correction=Correction, CheckArguments=CheckArguments))
}
}
bcTsallisBeta <-
function(Ns, Nexp = NULL, q, Correction = "Best", CheckArguments = TRUE)
{
if (CheckArguments)
CheckentropartArguments()
if (length(Ns) != length(Nexp)) {
stop("Ns and Nexp should have the same length.")
}
# No correction
if (Correction == "None") {
return (TsallisBeta.ProbaVector(Ns/sum(Ns), Nexp/sum(Nexp), q, CheckArguments=FALSE))
}
# Sample coverage
Nrecords <- sum(Ns)
SampleCoverage <- Coverage(Ns, CheckArguments=FALSE)
# Sample coverage (expected)
Nrecordsexp <- sum(Nexp)
SampleCoverageexp <- Coverage(Nexp, CheckArguments=FALSE)
if (Correction == "ChaoShen" | Correction == "Best") {
CiPsi <- SampleCoverage * Ns / Nrecords
CPs <- SampleCoverageexp * Nexp / Nrecordsexp
dataBeta <- CiPsi^q * lnq(CiPsi/CPs, q) / (1 -(1-CiPsi)^Nrecords)
# force 0log0=0
dataBeta[Ns == 0] <- 0
entropy <- sum(dataBeta)
names(entropy) <- Correction
return (entropy)
}
warning("Correction was not recognized")
return (NA)
}
|
#' inpdfr: A package to analyse PDF Files Using Ecological Tools.
#'
#' The inpdfr package allows analysing and comparing PDF/TXT documents using both
#' classical text mining tools and those from theoretical ecolgy. In the later,
#' words are considered as species and documents as communities, therefore
#' allowing analysis at the community and metacommunity levels.
#' The inpdfr package provides three cathegories of functions:
#' functions to extract and process text into a word-occurrence data.frame,
#' functions to analyse the word-occurrence data.frame with standard and ecological tools, and
#' functions to use inpdfr through a Gtk2 Graphical User Interface (GitHub version only).
#'
#' @docType package
#' @aliases inpdfr-package
#' @name inpdfr
NULL
|
/R/inpdfr.R
|
no_license
|
frareb/inpdfr
|
R
| false
| false
| 771
|
r
|
#' inpdfr: A package to analyse PDF Files Using Ecological Tools.
#'
#' The inpdfr package allows analysing and comparing PDF/TXT documents using both
#' classical text mining tools and those from theoretical ecolgy. In the later,
#' words are considered as species and documents as communities, therefore
#' allowing analysis at the community and metacommunity levels.
#' The inpdfr package provides three cathegories of functions:
#' functions to extract and process text into a word-occurrence data.frame,
#' functions to analyse the word-occurrence data.frame with standard and ecological tools, and
#' functions to use inpdfr through a Gtk2 Graphical User Interface (GitHub version only).
#'
#' @docType package
#' @aliases inpdfr-package
#' @name inpdfr
NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/C4.R
\docType{data}
\name{Cups}
\alias{Cups}
\title{Chapter 4 Cups}
\format{
A data frame with 32 rows and seven columns
}
\usage{
Cups
}
\description{
A manager of a manufacturing company, shift managers, cup machine operators,
and a lone statistician decided to identify which factors were most influential in keeping their cups from leaking.
Over 30 possible factors were identified, but after some thoughtful discussions the group settled on
six variables that should be tested for their effects on leaking cups. One of the six factors of interest
was which paper supplier to use. Since the company was considering changing suppliers, funds were
available to do some product testing before a purchase was made. However, each trial (each run of
production under specified factor conditions) would cost the company thousands of dollars in lost
production time, material costs, and employee costs.
The company agreed to conduct 32 tests, but wanted to test all six factors and
all corresponding twoway interactions. Fractional factorial designs are very useful
for this type of exploratory data analysis. The details of fractional factorial designs
are beyond the scope of this text. However, balanced
data are a key concept behind these designs. For example, in the Cups data, every factor has two levels
and each level has 16 observations. In addition, within the first factor level (the 16 observations
where side-seam temperature is set to 70\%), every other factor is still balanced (every other factor
has 8 observations at each level).
}
\keyword{datasets}
|
/man/Cups.Rd
|
no_license
|
minceb/Stats2Labs
|
R
| false
| true
| 1,643
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/C4.R
\docType{data}
\name{Cups}
\alias{Cups}
\title{Chapter 4 Cups}
\format{
A data frame with 32 rows and seven columns
}
\usage{
Cups
}
\description{
A manager of a manufacturing company, shift managers, cup machine operators,
and a lone statistician decided to identify which factors were most influential in keeping their cups from leaking.
Over 30 possible factors were identified, but after some thoughtful discussions the group settled on
six variables that should be tested for their effects on leaking cups. One of the six factors of interest
was which paper supplier to use. Since the company was considering changing suppliers, funds were
available to do some product testing before a purchase was made. However, each trial (each run of
production under specified factor conditions) would cost the company thousands of dollars in lost
production time, material costs, and employee costs.
The company agreed to conduct 32 tests, but wanted to test all six factors and
all corresponding twoway interactions. Fractional factorial designs are very useful
for this type of exploratory data analysis. The details of fractional factorial designs
are beyond the scope of this text. However, balanced
data are a key concept behind these designs. For example, in the Cups data, every factor has two levels
and each level has 16 observations. In addition, within the first factor level (the 16 observations
where side-seam temperature is set to 70\%), every other factor is still balanced (every other factor
has 8 observations at each level).
}
\keyword{datasets}
|
if(FALSE){
require("RHJDBC")
# initition
cp = dir(system.file("java", package="RHJDBC"),full.names = TRUE)
.jinit(classpath=cp)
options( java.parameters = "-Xmx8g" ) # set jvm
drv <- JDBC("org.apache.hive.jdbc.HiveDriver")
con <- dbConnect(drv,...)
class(con) = "JHDBCConnection" # 设置类
dbRemoveTable(con, "tmp.mtcars")
dbCreateTable(con, "tmp.mtcars", mtcars)
dbWriteTable(con, "tmp.mtcars", mtcars,overwrite=FALSE)
query = "select * from tmp.mtcars"
data <- dbFetch(dbSendQuery(con,query))
data
}
|
/R/example.R
|
no_license
|
lengyuyeke/RHJDBC
|
R
| false
| false
| 534
|
r
|
if(FALSE){
require("RHJDBC")
# initition
cp = dir(system.file("java", package="RHJDBC"),full.names = TRUE)
.jinit(classpath=cp)
options( java.parameters = "-Xmx8g" ) # set jvm
drv <- JDBC("org.apache.hive.jdbc.HiveDriver")
con <- dbConnect(drv,...)
class(con) = "JHDBCConnection" # 设置类
dbRemoveTable(con, "tmp.mtcars")
dbCreateTable(con, "tmp.mtcars", mtcars)
dbWriteTable(con, "tmp.mtcars", mtcars,overwrite=FALSE)
query = "select * from tmp.mtcars"
data <- dbFetch(dbSendQuery(con,query))
data
}
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
c_D_cross_2d_box <- function(x, bbox, types, intensities, r) {
.Call('_Kcross_c_D_cross_2d_box', PACKAGE = 'Kcross', x, bbox, types, intensities, r)
}
c_K_cross_2d_box <- function(x, bbox, types, ntypes, r, correction) {
.Call('_Kcross_c_K_cross_2d_box', PACKAGE = 'Kcross', x, bbox, types, ntypes, r, correction)
}
c_K_2d_box <- function(x, bbox, r, correction) {
.Call('_Kcross_c_K_2d_box', PACKAGE = 'Kcross', x, bbox, r, correction)
}
c_K_cross_2d_box_just_one_from <- function(x, bbox, types, from, ntypes, r, correction) {
.Call('_Kcross_c_K_cross_2d_box_just_one_from', PACKAGE = 'Kcross', x, bbox, types, from, ntypes, r, correction)
}
c_K_cross_partial_box <- function(x, types, ntypes, counts, intensities, bbox, r) {
.Call('_Kcross_c_K_cross_partial_box', PACKAGE = 'Kcross', x, types, ntypes, counts, intensities, bbox, r)
}
c_K_partial_box <- function(x, bbox, r) {
.Call('_Kcross_c_K_partial_box', PACKAGE = 'Kcross', x, bbox, r)
}
c_bbox_disc_intersection <- function(x, bbox, r) {
.Call('_Kcross_c_bbox_disc_intersection', PACKAGE = 'Kcross', x, bbox, r)
}
c_guan_doublesum_2d_box <- function(x, bbox, bw) {
.Call('_Kcross_c_guan_doublesum_2d_box', PACKAGE = 'Kcross', x, bbox, bw)
}
c_iK_cross_2d_box <- function(x, bbox, types, ntypes, intensities, r, correction) {
.Call('_Kcross_c_iK_cross_2d_box', PACKAGE = 'Kcross', x, bbox, types, ntypes, intensities, r, correction)
}
c_iK_2d_box <- function(x, bbox, intensities, r, correction) {
.Call('_Kcross_c_iK_2d_box', PACKAGE = 'Kcross', x, bbox, intensities, r, correction)
}
c_ipcf_cross_2d_box <- function(x, bbox, ntypes, types, intensities, r, adjust, correction) {
.Call('_Kcross_c_ipcf_cross_2d_box', PACKAGE = 'Kcross', x, bbox, ntypes, types, intensities, r, adjust, correction)
}
c_ipcf_2d_box <- function(x, bbox, intensities, bw, r, adjust, correction) {
.Call('_Kcross_c_ipcf_2d_box', PACKAGE = 'Kcross', x, bbox, intensities, bw, r, adjust, correction)
}
c_ipcf_st_cross_2d_box <- function(x, bbox, ntypes, types, intensities, r, t, sigmas, correction) {
.Call('_Kcross_c_ipcf_st_cross_2d_box', PACKAGE = 'Kcross', x, bbox, ntypes, types, intensities, r, t, sigmas, correction)
}
c_rho_cross_2d_box <- function(x, bbox, types, intensities, r, adjust, correction) {
.Call('_Kcross_c_rho_cross_2d_box', PACKAGE = 'Kcross', x, bbox, types, intensities, r, adjust, correction)
}
c_rho_2d_box <- function(x, bbox, intensity, r, bw, correction, kern = 1L) {
.Call('_Kcross_c_rho_2d_box', PACKAGE = 'Kcross', x, bbox, intensity, r, bw, correction, kern)
}
c_pcf_biv_2d_box <- function(x, n1, r, bw, bbox, correction) {
.Call('_Kcross_c_pcf_biv_2d_box', PACKAGE = 'Kcross', x, n1, r, bw, bbox, correction)
}
c_pcf_2d_box <- function(x, r, bw, bbox, correction, kern = 1L) {
.Call('_Kcross_c_pcf_2d_box', PACKAGE = 'Kcross', x, r, bw, bbox, correction, kern)
}
|
/R/RcppExports.R
|
no_license
|
antiphon/Kcross
|
R
| false
| false
| 3,045
|
r
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
c_D_cross_2d_box <- function(x, bbox, types, intensities, r) {
.Call('_Kcross_c_D_cross_2d_box', PACKAGE = 'Kcross', x, bbox, types, intensities, r)
}
c_K_cross_2d_box <- function(x, bbox, types, ntypes, r, correction) {
.Call('_Kcross_c_K_cross_2d_box', PACKAGE = 'Kcross', x, bbox, types, ntypes, r, correction)
}
c_K_2d_box <- function(x, bbox, r, correction) {
.Call('_Kcross_c_K_2d_box', PACKAGE = 'Kcross', x, bbox, r, correction)
}
c_K_cross_2d_box_just_one_from <- function(x, bbox, types, from, ntypes, r, correction) {
.Call('_Kcross_c_K_cross_2d_box_just_one_from', PACKAGE = 'Kcross', x, bbox, types, from, ntypes, r, correction)
}
c_K_cross_partial_box <- function(x, types, ntypes, counts, intensities, bbox, r) {
.Call('_Kcross_c_K_cross_partial_box', PACKAGE = 'Kcross', x, types, ntypes, counts, intensities, bbox, r)
}
c_K_partial_box <- function(x, bbox, r) {
.Call('_Kcross_c_K_partial_box', PACKAGE = 'Kcross', x, bbox, r)
}
c_bbox_disc_intersection <- function(x, bbox, r) {
.Call('_Kcross_c_bbox_disc_intersection', PACKAGE = 'Kcross', x, bbox, r)
}
c_guan_doublesum_2d_box <- function(x, bbox, bw) {
.Call('_Kcross_c_guan_doublesum_2d_box', PACKAGE = 'Kcross', x, bbox, bw)
}
c_iK_cross_2d_box <- function(x, bbox, types, ntypes, intensities, r, correction) {
.Call('_Kcross_c_iK_cross_2d_box', PACKAGE = 'Kcross', x, bbox, types, ntypes, intensities, r, correction)
}
c_iK_2d_box <- function(x, bbox, intensities, r, correction) {
.Call('_Kcross_c_iK_2d_box', PACKAGE = 'Kcross', x, bbox, intensities, r, correction)
}
c_ipcf_cross_2d_box <- function(x, bbox, ntypes, types, intensities, r, adjust, correction) {
.Call('_Kcross_c_ipcf_cross_2d_box', PACKAGE = 'Kcross', x, bbox, ntypes, types, intensities, r, adjust, correction)
}
c_ipcf_2d_box <- function(x, bbox, intensities, bw, r, adjust, correction) {
.Call('_Kcross_c_ipcf_2d_box', PACKAGE = 'Kcross', x, bbox, intensities, bw, r, adjust, correction)
}
c_ipcf_st_cross_2d_box <- function(x, bbox, ntypes, types, intensities, r, t, sigmas, correction) {
.Call('_Kcross_c_ipcf_st_cross_2d_box', PACKAGE = 'Kcross', x, bbox, ntypes, types, intensities, r, t, sigmas, correction)
}
c_rho_cross_2d_box <- function(x, bbox, types, intensities, r, adjust, correction) {
.Call('_Kcross_c_rho_cross_2d_box', PACKAGE = 'Kcross', x, bbox, types, intensities, r, adjust, correction)
}
c_rho_2d_box <- function(x, bbox, intensity, r, bw, correction, kern = 1L) {
.Call('_Kcross_c_rho_2d_box', PACKAGE = 'Kcross', x, bbox, intensity, r, bw, correction, kern)
}
c_pcf_biv_2d_box <- function(x, n1, r, bw, bbox, correction) {
.Call('_Kcross_c_pcf_biv_2d_box', PACKAGE = 'Kcross', x, n1, r, bw, bbox, correction)
}
c_pcf_2d_box <- function(x, r, bw, bbox, correction, kern = 1L) {
.Call('_Kcross_c_pcf_2d_box', PACKAGE = 'Kcross', x, r, bw, bbox, correction, kern)
}
|
library(keras)
library(dplyr)
library(jsonlite)
library(readr)
library(magrittr)
library(caTools)
# samples <- c("The cat sat on the mat.", "The dog ate my homework.")
expert_data <- read_csv('Data/Train/Expert/data.csv')
expert_data %<>% filter(sentiment != "л")
expert_data %<>% filter(sentiment != "l")
set.seed(1993)
expert_train_ind <- sample.split(expert_data$sentiment, SplitRatio = 0.9)
expert_train <- expert_data[expert_train_ind,]
expert_test <- expert_data[!expert_train_ind, ]
max_features <- 500
maxlen <- 200
expert_train_tokenizer <- text_tokenizer(num_words = max_features) %>%
fit_text_tokenizer(expert_train$text)
expert_train_sequences <- texts_to_sequences(expert_train_tokenizer, expert_train$text)
expert_train_x <- pad_sequences(expert_train_sequences, maxlen)
expert_model <- keras_model_sequential()%>%
# Creates dense embedding layer; outputs 3D tensor
# with shape (batch_size, sequence_length, output_dim)
layer_embedding(input_dim = max_features,
output_dim = 128,
input_length = maxlen
) %>%
layer_conv_1d(
filters = 64,
kernel_size = 5,
padding = 'valid',
activation = 'relu',
strides = 1
) %>%
layer_max_pooling_1d(pool_size = 4) %>%
layer_dropout(rate = 0.7) %>%
bidirectional(layer_cudnn_gru(units = 64)) %>%
layer_dropout(rate = 0.7) %>%
layer_dense(units = 16, activation = 'tanh') %>%
layer_dropout(rate = 0.7) %>%
layer_dense(units = 4, activation = 'tanh') %>%
layer_dropout(rate = 0.7) %>%
layer_dense(units = 1, activation = 'sigmoid')
expert_model %>% compile(
optimizer = "adam",
loss = "binary_crossentropy",
metrics = c("accuracy")
)
expert_history <- expert_model %>% fit(
expert_train_x,
(as.numeric(as.factor(expert_train$sentiment)) - 1),
epochs = 50,
batch_size = 128,
validation_split = 0.1
)
expert_test_tokenizer <- text_tokenizer(num_words = max_features) %>%
fit_text_tokenizer(expert_test$text)
expert_test_sequences <- texts_to_sequences(expert_test_tokenizer, expert_test$text)
expert_test_x <- pad_sequences(expert_test_sequences, maxlen)
pred <- predict(expert_model, expert_test_x)
# 1 l 2 n 3 p
mean(ifelse(pred > 0.5, 'p', 'n') == expert_test$sentiment)
keras::save_model_hdf5(expert_model, 'DeepLearnModel/expert_binary_model.hdf5')
|
/03_3_1_expert_deep_learn_binary.R
|
permissive
|
dongwook412/NorthKoreaReactionInRussia
|
R
| false
| false
| 2,337
|
r
|
library(keras)
library(dplyr)
library(jsonlite)
library(readr)
library(magrittr)
library(caTools)
# samples <- c("The cat sat on the mat.", "The dog ate my homework.")
expert_data <- read_csv('Data/Train/Expert/data.csv')
expert_data %<>% filter(sentiment != "л")
expert_data %<>% filter(sentiment != "l")
set.seed(1993)
expert_train_ind <- sample.split(expert_data$sentiment, SplitRatio = 0.9)
expert_train <- expert_data[expert_train_ind,]
expert_test <- expert_data[!expert_train_ind, ]
max_features <- 500
maxlen <- 200
expert_train_tokenizer <- text_tokenizer(num_words = max_features) %>%
fit_text_tokenizer(expert_train$text)
expert_train_sequences <- texts_to_sequences(expert_train_tokenizer, expert_train$text)
expert_train_x <- pad_sequences(expert_train_sequences, maxlen)
expert_model <- keras_model_sequential()%>%
# Creates dense embedding layer; outputs 3D tensor
# with shape (batch_size, sequence_length, output_dim)
layer_embedding(input_dim = max_features,
output_dim = 128,
input_length = maxlen
) %>%
layer_conv_1d(
filters = 64,
kernel_size = 5,
padding = 'valid',
activation = 'relu',
strides = 1
) %>%
layer_max_pooling_1d(pool_size = 4) %>%
layer_dropout(rate = 0.7) %>%
bidirectional(layer_cudnn_gru(units = 64)) %>%
layer_dropout(rate = 0.7) %>%
layer_dense(units = 16, activation = 'tanh') %>%
layer_dropout(rate = 0.7) %>%
layer_dense(units = 4, activation = 'tanh') %>%
layer_dropout(rate = 0.7) %>%
layer_dense(units = 1, activation = 'sigmoid')
expert_model %>% compile(
optimizer = "adam",
loss = "binary_crossentropy",
metrics = c("accuracy")
)
expert_history <- expert_model %>% fit(
expert_train_x,
(as.numeric(as.factor(expert_train$sentiment)) - 1),
epochs = 50,
batch_size = 128,
validation_split = 0.1
)
expert_test_tokenizer <- text_tokenizer(num_words = max_features) %>%
fit_text_tokenizer(expert_test$text)
expert_test_sequences <- texts_to_sequences(expert_test_tokenizer, expert_test$text)
expert_test_x <- pad_sequences(expert_test_sequences, maxlen)
pred <- predict(expert_model, expert_test_x)
# 1 l 2 n 3 p
mean(ifelse(pred > 0.5, 'p', 'n') == expert_test$sentiment)
keras::save_model_hdf5(expert_model, 'DeepLearnModel/expert_binary_model.hdf5')
|
library(splatter)
library(scater)
params <-newSplatParams()
params<-setParams(params,update=list(nGenes=10000,batchCells=2000, de.prob=0.08,seed=100))
sim <- splatSimulate(params,group.prob=c(0.3,0.3,0.3,0.05,0.05),method="groups",verbose = FALSE)
#count zeros function
count_zeros <- function(x){length(which(x==0))}
#simulate dropout
zero0 <-mean(apply(counts(sim), 2, count_zeros))/10000
sim.drop1 <- splatter:::splatSimDropout(sim, setParams(params,update=list(dropout.type="experiment",dropout.shape=-1,dropout.mid=0)))
zeros <-apply(counts(sim.drop1), 2, count_zeros)
mean(zeros)/10000-zero0
sim.drop2 <- splatter:::splatSimDropout(sim, setParams(params,update=list(dropout.type="experiment",dropout.shape=-1,dropout.mid=1)))
zeros <-apply(counts(sim.drop2), 2, count_zeros)
mean(zeros)/10000-zero0
sim.drop3 <- splatter:::splatSimDropout(sim, setParams(params,update=list(dropout.type="experiment",dropout.shape=-1,dropout.mid=2)))
zeros <-apply(counts(sim.drop3), 2, count_zeros)
mean(zeros)/10000-zero0
sim.drop4 <- splatter:::splatSimDropout(sim, setParams(params,update=list(dropout.type="experiment",dropout.shape=-1,dropout.mid=3)))
zeros <-apply(counts(sim.drop4), 2, count_zeros)
mean(zeros)/10000-zero0
sim.drop5 <- splatter:::splatSimDropout(sim, setParams(params,update=list(dropout.type="experiment",dropout.shape=-1,dropout.mid=6)))
zeros <-apply(counts(sim.drop5), 2, count_zeros)
mean(zeros)/10000-zero0
#write the data and label to a txt file
data0 <-t(counts(sim))
data1 <-t(counts(sim.drop1))
data2 <-t(counts(sim.drop2))
data3 <-t(counts(sim.drop3))
data4 <-t(counts(sim.drop4))
data5 <-t(counts(sim.drop5))
label<-colData(sim)[3]
write.table(cbind(data0,label),"Dropout_0_Data.txt")
write.table(cbind(data1,label),"Dropout_1_Data.txt")
write.table(cbind(data2,label),"Dropout_2_Data.txt")
write.table(cbind(data3,label),"Dropout_3_Data.txt")
write.table(cbind(data4,label),"Dropout_4_Data.txt")
write.table(cbind(data5,label),"Dropout_5_Data.txt")
#visualization
sim<-normalise(sim)
plotPCA(sim)
|
/data_generator/SIM_III.R
|
no_license
|
cyxss/DenseFly4scRNAseq
|
R
| false
| false
| 2,078
|
r
|
library(splatter)
library(scater)
params <-newSplatParams()
params<-setParams(params,update=list(nGenes=10000,batchCells=2000, de.prob=0.08,seed=100))
sim <- splatSimulate(params,group.prob=c(0.3,0.3,0.3,0.05,0.05),method="groups",verbose = FALSE)
#count zeros function
count_zeros <- function(x){length(which(x==0))}
#simulate dropout
zero0 <-mean(apply(counts(sim), 2, count_zeros))/10000
sim.drop1 <- splatter:::splatSimDropout(sim, setParams(params,update=list(dropout.type="experiment",dropout.shape=-1,dropout.mid=0)))
zeros <-apply(counts(sim.drop1), 2, count_zeros)
mean(zeros)/10000-zero0
sim.drop2 <- splatter:::splatSimDropout(sim, setParams(params,update=list(dropout.type="experiment",dropout.shape=-1,dropout.mid=1)))
zeros <-apply(counts(sim.drop2), 2, count_zeros)
mean(zeros)/10000-zero0
sim.drop3 <- splatter:::splatSimDropout(sim, setParams(params,update=list(dropout.type="experiment",dropout.shape=-1,dropout.mid=2)))
zeros <-apply(counts(sim.drop3), 2, count_zeros)
mean(zeros)/10000-zero0
sim.drop4 <- splatter:::splatSimDropout(sim, setParams(params,update=list(dropout.type="experiment",dropout.shape=-1,dropout.mid=3)))
zeros <-apply(counts(sim.drop4), 2, count_zeros)
mean(zeros)/10000-zero0
sim.drop5 <- splatter:::splatSimDropout(sim, setParams(params,update=list(dropout.type="experiment",dropout.shape=-1,dropout.mid=6)))
zeros <-apply(counts(sim.drop5), 2, count_zeros)
mean(zeros)/10000-zero0
#write the data and label to a txt file
data0 <-t(counts(sim))
data1 <-t(counts(sim.drop1))
data2 <-t(counts(sim.drop2))
data3 <-t(counts(sim.drop3))
data4 <-t(counts(sim.drop4))
data5 <-t(counts(sim.drop5))
label<-colData(sim)[3]
write.table(cbind(data0,label),"Dropout_0_Data.txt")
write.table(cbind(data1,label),"Dropout_1_Data.txt")
write.table(cbind(data2,label),"Dropout_2_Data.txt")
write.table(cbind(data3,label),"Dropout_3_Data.txt")
write.table(cbind(data4,label),"Dropout_4_Data.txt")
write.table(cbind(data5,label),"Dropout_5_Data.txt")
#visualization
sim<-normalise(sim)
plotPCA(sim)
|
# Allow convenient use of functions from other packages
#' @include Pipes.R
#' @include Error_Handling.R
NULL
#' Find the percent of missing values
#'
#' Count the percent of missing values in a vector.
#' Counts the percent of NA values for non-character vectors,
#' and the number of NA or "" values for characters.
#'
#' @param vec Vector to count missing values in.
#'
#' @return Numeric scalar containing the percent of missing values.
#' @export
#'
#' @examples
#' percent_missing(c(NA))
#' percent_missing(c(1, 2, 3, 4, 5))
#' percent_missing(c(1, 2, NA, 4, 5))
#'
percent_missing <- function(vec) {
# if (is.numeric(vec) | is.logical(vec) | lubridate::is.Date(vec) | is.factor(vec)) {
# numMissing <- sum(is.na(vec))
# } else if (is.character(vec)) {
# numMissing <- sum(vec == "", na.rm = TRUE) + sum(is.na(vec))
# } else {
# message("Unknown vector data type: ", class(vec))
# numMissing <- sum(is.na(vec))
# }
return(num_missing(vec) / length(vec))
}
#' Find the number of missing values
#'
#' Count the number of missing values in a vector.
#' Counts the number of NA values for non-character vectors,
#' and the number of NA or "" values for characters.
#'
#' @param vec Vector to count missing values in.
#'
#' @return Numeric scalar containing the number of missing values.
#' @export
#'
#' @examples
#' num_missing(c(NA))
#' num_missing(c(1, 2, 3, 4, 5))
#' num_missing(c(1, 2, NA, 4, 5))
#'
num_missing <- function(vec) {
if (is.numeric(vec) | is.logical(vec) | lubridate::is.Date(vec) | is.factor(vec)) {
numMissing <- sum(is.na(vec))
} else if (is.character(vec)) {
numMissing <- sum(is.na(vec)) + sum(vec == "", na.rm = TRUE)
} else {
message("Unknown vector data type: ", class(vec))
numMissing <- sum(is.na(vec))
}
return(numMissing)
}
#' Replace missing values in a vector
#'
#' Replace missing values in a vector with either the mode, median,
#' or mean of the vector, removing NA values in the calculation.
#'
#' @param vec Vector in which to replace missing values.
#' @param method The method to use to determine the imputed value.
#' @param with The value to impute missing values with (if specified, method will be ignored).
#' @param return_imputed Whether or not to return the value that missing elements were imputed with.
#'
#' @return Vector with missing values replaced as desired,
#' or a list of that and the replacement (imputed) value.
#' @export
#'
#' @examples
#' replace_missing(c(1, 1, 1, 2, NA))
#' replace_missing(c(1, 1, 1, 2, NA), method = "mode") # Same as above
#' replace_missing(c(1, NA, 1, 2), method = "median")
#' replace_missing(c(1, NA, 1, 2), method = "mean")
#' replace_missing(c(1, NA, 1, 2), method = "mean", return_imputed = TRUE)
#' replace_missing(c(1, NA, 1, 2), with = 5)
#' replace_missing(c(1, NA, 1, 2), method = "mean", with = 5)
#'
replace_missing <- function(vec, method = "mode", with, return_imputed = FALSE) {
# Find the value to replace missing values with based on the desired method,
# ensuring that we have a numeric vector for median and mean
if (!missing(with)) {
if (!missing(method)) warning(paste0("Both the 'method' and 'with' fields were specified ",
"inside the 'replace_missing' function. ",
"The 'method' field will be ignored."))
imputedVal <- with
} else if (method == "median") {
stop_if(!is.numeric(vec), "Can only calculate median for numeric vector.")
imputedVal <- stats::median(vec, na.rm = TRUE)
} else if (method == "mean") {
stop_if(!is.numeric(vec), "Can only calculate mean for numeric vector.")
imputedVal <- mean(vec, na.rm = TRUE)
} else {
if (method != "mode") message("Invalid method chosen to replace missing values. Mode will be used.")
imputedVal <- table(vec, useNA = "no") %>% sort() %>% utils::tail(1) %>% names()
}
# Convert the imputed value to the appropriate type
if (lubridate::is.Date(vec)) {
imputedVal <- lubridate::as_date(imputedVal)
} else if (is.factor(vec)) {
imputedVal <- as.character(imputedVal)
} else {
imputedVal <- as.vector(imputedVal, mode = class(vec))
}
# Replace NA/missing values with the imputed value, and return the vector
if (is.numeric(vec) | is.factor(vec) | is.logical(vec) | lubridate::is.Date(vec)) {
vec[is.na(vec)] <- imputedVal
} else if (is.character(vec)) {
vec[(vec == "") | is.na(vec)] <- imputedVal
} else {
message("Unknown vector data type: ", class(vec))
vec[is.na(vec)] <- imputedVal
}
# Return the vector, and the imputed value as well if desired
if (return_imputed) return(list(Vec = vec, ImputedVal = imputedVal))
return(vec)
}
|
/R/Missing_Values.R
|
permissive
|
KO112/KO
|
R
| false
| false
| 4,760
|
r
|
# Allow convenient use of functions from other packages
#' @include Pipes.R
#' @include Error_Handling.R
NULL
#' Find the percent of missing values
#'
#' Count the percent of missing values in a vector.
#' Counts the percent of NA values for non-character vectors,
#' and the number of NA or "" values for characters.
#'
#' @param vec Vector to count missing values in.
#'
#' @return Numeric scalar containing the percent of missing values.
#' @export
#'
#' @examples
#' percent_missing(c(NA))
#' percent_missing(c(1, 2, 3, 4, 5))
#' percent_missing(c(1, 2, NA, 4, 5))
#'
percent_missing <- function(vec) {
# if (is.numeric(vec) | is.logical(vec) | lubridate::is.Date(vec) | is.factor(vec)) {
# numMissing <- sum(is.na(vec))
# } else if (is.character(vec)) {
# numMissing <- sum(vec == "", na.rm = TRUE) + sum(is.na(vec))
# } else {
# message("Unknown vector data type: ", class(vec))
# numMissing <- sum(is.na(vec))
# }
return(num_missing(vec) / length(vec))
}
#' Find the number of missing values
#'
#' Count the number of missing values in a vector.
#' Counts the number of NA values for non-character vectors,
#' and the number of NA or "" values for characters.
#'
#' @param vec Vector to count missing values in.
#'
#' @return Numeric scalar containing the number of missing values.
#' @export
#'
#' @examples
#' num_missing(c(NA))
#' num_missing(c(1, 2, 3, 4, 5))
#' num_missing(c(1, 2, NA, 4, 5))
#'
num_missing <- function(vec) {
if (is.numeric(vec) | is.logical(vec) | lubridate::is.Date(vec) | is.factor(vec)) {
numMissing <- sum(is.na(vec))
} else if (is.character(vec)) {
numMissing <- sum(is.na(vec)) + sum(vec == "", na.rm = TRUE)
} else {
message("Unknown vector data type: ", class(vec))
numMissing <- sum(is.na(vec))
}
return(numMissing)
}
#' Replace missing values in a vector
#'
#' Replace missing values in a vector with either the mode, median,
#' or mean of the vector, removing NA values in the calculation.
#'
#' @param vec Vector in which to replace missing values.
#' @param method The method to use to determine the imputed value.
#' @param with The value to impute missing values with (if specified, method will be ignored).
#' @param return_imputed Whether or not to return the value that missing elements were imputed with.
#'
#' @return Vector with missing values replaced as desired,
#' or a list of that and the replacement (imputed) value.
#' @export
#'
#' @examples
#' replace_missing(c(1, 1, 1, 2, NA))
#' replace_missing(c(1, 1, 1, 2, NA), method = "mode") # Same as above
#' replace_missing(c(1, NA, 1, 2), method = "median")
#' replace_missing(c(1, NA, 1, 2), method = "mean")
#' replace_missing(c(1, NA, 1, 2), method = "mean", return_imputed = TRUE)
#' replace_missing(c(1, NA, 1, 2), with = 5)
#' replace_missing(c(1, NA, 1, 2), method = "mean", with = 5)
#'
replace_missing <- function(vec, method = "mode", with, return_imputed = FALSE) {
# Find the value to replace missing values with based on the desired method,
# ensuring that we have a numeric vector for median and mean
if (!missing(with)) {
if (!missing(method)) warning(paste0("Both the 'method' and 'with' fields were specified ",
"inside the 'replace_missing' function. ",
"The 'method' field will be ignored."))
imputedVal <- with
} else if (method == "median") {
stop_if(!is.numeric(vec), "Can only calculate median for numeric vector.")
imputedVal <- stats::median(vec, na.rm = TRUE)
} else if (method == "mean") {
stop_if(!is.numeric(vec), "Can only calculate mean for numeric vector.")
imputedVal <- mean(vec, na.rm = TRUE)
} else {
if (method != "mode") message("Invalid method chosen to replace missing values. Mode will be used.")
imputedVal <- table(vec, useNA = "no") %>% sort() %>% utils::tail(1) %>% names()
}
# Convert the imputed value to the appropriate type
if (lubridate::is.Date(vec)) {
imputedVal <- lubridate::as_date(imputedVal)
} else if (is.factor(vec)) {
imputedVal <- as.character(imputedVal)
} else {
imputedVal <- as.vector(imputedVal, mode = class(vec))
}
# Replace NA/missing values with the imputed value, and return the vector
if (is.numeric(vec) | is.factor(vec) | is.logical(vec) | lubridate::is.Date(vec)) {
vec[is.na(vec)] <- imputedVal
} else if (is.character(vec)) {
vec[(vec == "") | is.na(vec)] <- imputedVal
} else {
message("Unknown vector data type: ", class(vec))
vec[is.na(vec)] <- imputedVal
}
# Return the vector, and the imputed value as well if desired
if (return_imputed) return(list(Vec = vec, ImputedVal = imputedVal))
return(vec)
}
|
##=========================================================
## File: predict_OCRI.R
## History: initially coded as: build_olk_index.R
## Author: Jianying Li
## Comment: compare different data processing parameter
## settings and compare seven models (ROC - metrics)
## produce model comparison box plots
##
## Extended from _2class_prediction_model_comparison_02.R
##=========================================================
#===============================================
# Set up os paths
#==============================================
mac.os <- "/Users/li11/"
linux <- "~/"
windows <- "X:/"
root <- windows
#root <- linux
#root <- mac.os
##===============================================
library(caret)
##===================================
## set up working directory
## and getting the dataset
##===================================
setwd(paste (root, "/myGit/mixturemodel/reconData/para2/", sep=""))
reconFiles <- list.files (pattern = "recon_*")
i = 2
reconFiles[2]
parSet <- sub (".txt", "", sub ("recon_3classes_", "", reconFiles[i]))
comFigName <- paste ("Model_comparison_", parSet, ".png", sep = "")
title <- paste ("Model comparison with ", parSet,sep = "")
comFigName <- paste (root, "/myGit/mixturemodel/modeling/model_selection/", comFigName, sep="")
data <- read.table(reconFiles[i], header=TRUE, sep = "\t")
## data cleaning
var0 <- unlist(lapply(data, function(x) 0 == var(if (is.factor(x)) as.integer(x) else x)))
dataN0 <- data[,-which(var0)]
rownames(dataN0) <- paste ("ID_", dataN0[,1], sep = "")
dataN0[,1] <- NULL
## Retain data ONLY with two classes
data.2.classes <- dataN0[-which (dataN0$label == "k"),]
data.k <- dataN0[which (dataN0$label == "k"),]
dim(data.2.classes)
labels <- as.vector(data.2.classes$label)
data.2.classes <- data.2.classes[,-16]
data.2.classes <- cbind (data.2.classes, label=labels)
file2classes <- data.2.classes
file.olk <- data.k
## create data partition
## create data partition
set.seed(12345)
inTrainingSet <- createDataPartition(file2classes$label, p=.7, list=FALSE)
labelTrain <- file2classes[ inTrainingSet,]
labelTest <- file2classes[-inTrainingSet,]
nrow(labelTrain)
nrow(labelTest)
cvCtrl <- trainControl(method = "repeatedcv", repeats = 5,
summaryFunction = twoClassSummary,
classProbs = TRUE)
cvCtrl.2 <- trainControl(method = "LOOCV",
summaryFunction = twoClassSummary,
classProbs = TRUE)
##====================================
## SVM Example
##====================================
set.seed(12345)
svmTune <- train(label ~ .,
data = labelTrain,
method = "svmRadial",
# The default grid of cost parameters go from 2^-2,
# 0.5 to 1,
# We'll fit 9 values in that sequence via the tuneLength
# argument.
tuneLength = 9,
preProc = c("center", "scale"),
metric = "ROC",
trControl = cvCtrl.2)
svmPred <- predict(svmTune, labelTest)
confusionMatrix(svmPred, labelTest$label)
svmPredProb <- predict(svmTune, labelTest , type = "prob")
boxplot(svmPredProb)
#points(svmPredProb)
str(svmPredProb)
label.c.as.c <- svmPredProb$c[labelTest$label=="c"]
label.n.as.c <- svmPredProb$c[labelTest$label=="n"]
svmPredProb$c =="c"
boxplot(svmPredProb, outpch = NA)
stripchart(svmPredProb,
vertical = TRUE, method = "jitter",
pch = 21, col = "maroon", bg = "bisque",
add = TRUE)
##=============================
## Test on olk sample
##=============================
dim(file.olk)
svmPred.k.prob <- predict(svmTune, file.olk, type = "prob")
rownames(file.olk)
rownames(svmPred.k.prob) <- rownames(file.olk)
#write.table (svmPred.k.prob, file = "prediction_on_olk.txt", row.name = TRUE,sep="\t")
den.c <- density(svmPred.k.prob$c)
label.k.as.c <- svmPred.k.prob$c
den.n <- density(svmPred.k.prob$n)
length(which(svmPred.k.prob$c > 0.5))
length(which(svmPred.k.prob$n > 0.5))
plot( den.c)
lines( den.n, col = "red")
file.olk[-which(svmPred.k.prob$c >0.5),]
file.olk[which(svmPred.k.prob$c >0.5),]
##==========================
## Get three categories
##==========================
lowRisk <- file.olk[which(svmPred.k.prob$c <=0.3),]
highRisk <- file.olk[which(svmPred.k.prob$c >=0.7),]
midRisk <- file.olk[-which((svmPred.k.prob$c <=0.3) | (svmPred.k.prob$c >=0.7)),]
rownames(lowRisk)
rownames(highRisk)
rownames(midRisk)
plot(density(svmPred.k.prob$c))
plot(density(svmPred.k.prob$n))
pairs(svmPred.k.prob)
##==============================
## To get the figure 5
##==============================
rep ("c", length(label.c.as.c))
rep ("n", length(label.n.as.c))
rep ("k", length(label.k.as.c))
predicted.c <- list (label = as.vector(c(rep ("n", length(label.n.as.c)), rep ("k", length(label.k.as.c)), rep ("c", length(label.c.as.c)))),
prob = as.vector(c( svmPredProb$c[labelTest$label=="n"], svmPred.k.prob$c, svmPredProb$c[labelTest$label=="c"])))
str(predicted.c)
colnames(as.data.frame(predicted.c))
predicted.c$label[predicted.c$label == "n"] <- "Normal"
predicted.c$label[predicted.c$label == "k"] <- "OLK"
predicted.c$label[predicted.c$label == "c"] <- "OSCC"
#boxplot(prob ~ label, data = as.data.frame(predicted.c), main = "Samples (by label) predicted as OSCC", ylab = "Probability", outpch = NA)
boxplot(prob ~ label, data = as.data.frame(predicted.c), ylab = "Oral Cancer Risk Index (OCRI)", outpch = NA)
stripchart(prob ~ label, data = as.data.frame(predicted.c),
vertical = TRUE, method = "jitter",
pch = 21, col = "maroon", bg = "bisque",
add = TRUE)
#mtext ("Prediction probability")
##==============================
## To get the figure 6 on
# 128141
##==============================
svmPred.k.prob
sub ("ID_", "", rownames(svmPred.k.prob))
plot(svmPred.k.prob$c, pch = "")
text(svmPred.k.prob$c, lab = sub ("ID_", "", rownames(svmPred.k.prob)))
|
/mixturemodel/pipeLineScripts/predict_OCRI.R
|
no_license
|
ImageRecognitionMaster/myOCRI-iii
|
R
| false
| false
| 6,242
|
r
|
##=========================================================
## File: predict_OCRI.R
## History: initially coded as: build_olk_index.R
## Author: Jianying Li
## Comment: compare different data processing parameter
## settings and compare seven models (ROC - metrics)
## produce model comparison box plots
##
## Extended from _2class_prediction_model_comparison_02.R
##=========================================================
#===============================================
# Set up os paths
#==============================================
mac.os <- "/Users/li11/"
linux <- "~/"
windows <- "X:/"
root <- windows
#root <- linux
#root <- mac.os
##===============================================
library(caret)
##===================================
## set up working directory
## and getting the dataset
##===================================
setwd(paste (root, "/myGit/mixturemodel/reconData/para2/", sep=""))
reconFiles <- list.files (pattern = "recon_*")
i = 2
reconFiles[2]
parSet <- sub (".txt", "", sub ("recon_3classes_", "", reconFiles[i]))
comFigName <- paste ("Model_comparison_", parSet, ".png", sep = "")
title <- paste ("Model comparison with ", parSet,sep = "")
comFigName <- paste (root, "/myGit/mixturemodel/modeling/model_selection/", comFigName, sep="")
data <- read.table(reconFiles[i], header=TRUE, sep = "\t")
## data cleaning
var0 <- unlist(lapply(data, function(x) 0 == var(if (is.factor(x)) as.integer(x) else x)))
dataN0 <- data[,-which(var0)]
rownames(dataN0) <- paste ("ID_", dataN0[,1], sep = "")
dataN0[,1] <- NULL
## Retain data ONLY with two classes
data.2.classes <- dataN0[-which (dataN0$label == "k"),]
data.k <- dataN0[which (dataN0$label == "k"),]
dim(data.2.classes)
labels <- as.vector(data.2.classes$label)
data.2.classes <- data.2.classes[,-16]
data.2.classes <- cbind (data.2.classes, label=labels)
file2classes <- data.2.classes
file.olk <- data.k
## create data partition
## create data partition
set.seed(12345)
inTrainingSet <- createDataPartition(file2classes$label, p=.7, list=FALSE)
labelTrain <- file2classes[ inTrainingSet,]
labelTest <- file2classes[-inTrainingSet,]
nrow(labelTrain)
nrow(labelTest)
cvCtrl <- trainControl(method = "repeatedcv", repeats = 5,
summaryFunction = twoClassSummary,
classProbs = TRUE)
cvCtrl.2 <- trainControl(method = "LOOCV",
summaryFunction = twoClassSummary,
classProbs = TRUE)
##====================================
## SVM Example
##====================================
set.seed(12345)
svmTune <- train(label ~ .,
data = labelTrain,
method = "svmRadial",
# The default grid of cost parameters go from 2^-2,
# 0.5 to 1,
# We'll fit 9 values in that sequence via the tuneLength
# argument.
tuneLength = 9,
preProc = c("center", "scale"),
metric = "ROC",
trControl = cvCtrl.2)
svmPred <- predict(svmTune, labelTest)
confusionMatrix(svmPred, labelTest$label)
svmPredProb <- predict(svmTune, labelTest , type = "prob")
boxplot(svmPredProb)
#points(svmPredProb)
str(svmPredProb)
label.c.as.c <- svmPredProb$c[labelTest$label=="c"]
label.n.as.c <- svmPredProb$c[labelTest$label=="n"]
svmPredProb$c =="c"
boxplot(svmPredProb, outpch = NA)
stripchart(svmPredProb,
vertical = TRUE, method = "jitter",
pch = 21, col = "maroon", bg = "bisque",
add = TRUE)
##=============================
## Test on olk sample
##=============================
dim(file.olk)
svmPred.k.prob <- predict(svmTune, file.olk, type = "prob")
rownames(file.olk)
rownames(svmPred.k.prob) <- rownames(file.olk)
#write.table (svmPred.k.prob, file = "prediction_on_olk.txt", row.name = TRUE,sep="\t")
den.c <- density(svmPred.k.prob$c)
label.k.as.c <- svmPred.k.prob$c
den.n <- density(svmPred.k.prob$n)
length(which(svmPred.k.prob$c > 0.5))
length(which(svmPred.k.prob$n > 0.5))
plot( den.c)
lines( den.n, col = "red")
file.olk[-which(svmPred.k.prob$c >0.5),]
file.olk[which(svmPred.k.prob$c >0.5),]
##==========================
## Get three categories
##==========================
lowRisk <- file.olk[which(svmPred.k.prob$c <=0.3),]
highRisk <- file.olk[which(svmPred.k.prob$c >=0.7),]
midRisk <- file.olk[-which((svmPred.k.prob$c <=0.3) | (svmPred.k.prob$c >=0.7)),]
rownames(lowRisk)
rownames(highRisk)
rownames(midRisk)
plot(density(svmPred.k.prob$c))
plot(density(svmPred.k.prob$n))
pairs(svmPred.k.prob)
##==============================
## To get the figure 5
##==============================
rep ("c", length(label.c.as.c))
rep ("n", length(label.n.as.c))
rep ("k", length(label.k.as.c))
predicted.c <- list (label = as.vector(c(rep ("n", length(label.n.as.c)), rep ("k", length(label.k.as.c)), rep ("c", length(label.c.as.c)))),
prob = as.vector(c( svmPredProb$c[labelTest$label=="n"], svmPred.k.prob$c, svmPredProb$c[labelTest$label=="c"])))
str(predicted.c)
colnames(as.data.frame(predicted.c))
predicted.c$label[predicted.c$label == "n"] <- "Normal"
predicted.c$label[predicted.c$label == "k"] <- "OLK"
predicted.c$label[predicted.c$label == "c"] <- "OSCC"
#boxplot(prob ~ label, data = as.data.frame(predicted.c), main = "Samples (by label) predicted as OSCC", ylab = "Probability", outpch = NA)
boxplot(prob ~ label, data = as.data.frame(predicted.c), ylab = "Oral Cancer Risk Index (OCRI)", outpch = NA)
stripchart(prob ~ label, data = as.data.frame(predicted.c),
vertical = TRUE, method = "jitter",
pch = 21, col = "maroon", bg = "bisque",
add = TRUE)
#mtext ("Prediction probability")
##==============================
## To get the figure 6 on
# 128141
##==============================
svmPred.k.prob
sub ("ID_", "", rownames(svmPred.k.prob))
plot(svmPred.k.prob$c, pch = "")
text(svmPred.k.prob$c, lab = sub ("ID_", "", rownames(svmPred.k.prob)))
|
## this sctipt takes DNA database and for each fragment, make separate FASTA file
## all "our" sequences used
## only those external source sequences used which (a) belong to species / fragments absent in "our" and (b) longest
library(shipunov)
set <- read.table("_kubricks_dna.txt", sep="\t", h=TRUE, as.is=TRUE)
set <- set[order(set$FRAGMENT, set$SPECIES.NEW), ]
## do not use deselected sequences
set <- set[set$SELECT == 1 | is.na(set$SELECT), ]
## split data frame into list
sets <- split(set[, c("SOURCE", "SPECIES.NEW", "SEQUENCE.ID", "SEQUENCE")], set$FRAGMENT)
for (s in names(sets)) {
subs <- sets[[s]]
subs.misu <- subs[subs$SOURCE == "MISU", ] # keep ours
subs.misu.sp <- unique(sort(subs.misu$SPECIES.NEW))
subs.genbank <- subs[subs$SOURCE != "MISU", ] # not ours
subs.genbank <- subs.genbank[!subs.genbank$SPECIES %in% subs.misu.sp, ] # remove species which we already have
subs.genbank <- subs.genbank[order(subs.genbank$SPECIES.NEW, nchar(subs.genbank$SEQUENCE), decreasing=TRUE), ] # sort by length, longer first
subs.genbank <- subs.genbank[!duplicated(subs.genbank$SPECIES), ] # take the first
subs <- rbind(subs.misu, subs.genbank) # join back
subs$ID <- apply(subs[, c("SPECIES.NEW", "SEQUENCE.ID")], 1, function(.x) paste0(.x, collapse="__")) # separator between species and everything else is "__", the rest is unchanged
subs[, c("SOURCE", "SPECIES.NEW", "SEQUENCE.ID")] <- NULL # remove columns except 2
subs <- subs[, c("ID", "SEQUENCE")] # ID first
Write.fasta(subs, file=paste0("20_sets/", s, ".fasta"))
}
|
/20_make_sets.r
|
no_license
|
ashipunov/Ripeline
|
R
| false
| false
| 1,548
|
r
|
## this sctipt takes DNA database and for each fragment, make separate FASTA file
## all "our" sequences used
## only those external source sequences used which (a) belong to species / fragments absent in "our" and (b) longest
library(shipunov)
set <- read.table("_kubricks_dna.txt", sep="\t", h=TRUE, as.is=TRUE)
set <- set[order(set$FRAGMENT, set$SPECIES.NEW), ]
## do not use deselected sequences
set <- set[set$SELECT == 1 | is.na(set$SELECT), ]
## split data frame into list
sets <- split(set[, c("SOURCE", "SPECIES.NEW", "SEQUENCE.ID", "SEQUENCE")], set$FRAGMENT)
for (s in names(sets)) {
subs <- sets[[s]]
subs.misu <- subs[subs$SOURCE == "MISU", ] # keep ours
subs.misu.sp <- unique(sort(subs.misu$SPECIES.NEW))
subs.genbank <- subs[subs$SOURCE != "MISU", ] # not ours
subs.genbank <- subs.genbank[!subs.genbank$SPECIES %in% subs.misu.sp, ] # remove species which we already have
subs.genbank <- subs.genbank[order(subs.genbank$SPECIES.NEW, nchar(subs.genbank$SEQUENCE), decreasing=TRUE), ] # sort by length, longer first
subs.genbank <- subs.genbank[!duplicated(subs.genbank$SPECIES), ] # take the first
subs <- rbind(subs.misu, subs.genbank) # join back
subs$ID <- apply(subs[, c("SPECIES.NEW", "SEQUENCE.ID")], 1, function(.x) paste0(.x, collapse="__")) # separator between species and everything else is "__", the rest is unchanged
subs[, c("SOURCE", "SPECIES.NEW", "SEQUENCE.ID")] <- NULL # remove columns except 2
subs <- subs[, c("ID", "SEQUENCE")] # ID first
Write.fasta(subs, file=paste0("20_sets/", s, ".fasta"))
}
|
# Лабораторная №11_1 ------------------------------------------------------
# выборки -----------------------------------------------------------------
n = 40
eps.sigma = 2
eps.a = 0
eps = rnorm(n, eps.a, eps.sigma)
a1 = 1
a2 = 2
a3 = -1
b1.1 = 1.5
b2.1 = 2
b1.2 = 3
b2.2 = 0.75
b1.3 = -2.1
b2.3 = -1
t = c(1:n)
y1 = a1 + b1.1*t + b2.1*cos(2*pi*t/5) + eps
y2 = a2 + b1.2*t + b2.2*cos(2*pi*t/4) + eps
y3 = a3 + b1.3*t + b2.3*cos(2*pi*t/3) + eps
y3[21] = -500 + eps[21]
y3[30] = 500 + eps[30]
# сглаживание методом скользящей средней ---------------------------------
simple_mean_relaxation = function(tseries, g) {
p = floor(g / 2)
t = c((p+1):(length(tseries) - p))
haty = c(tseries)
if ((g %% 2) == 1) {
for (i in t) {
haty[i] = sum(tseries[(i-p):(i+p)])/g
}
} else {
p = g / 2
for (i in t) {
haty[i] = sum(c(0.5*tseries[i-p], tseries[(i-p+1):(i+p-1)], 0.5*tseries[i+p]))/g
}
}
return(haty)
}
# y1 --------------
y1.g3 = simple_mean_relaxation(y1, 3)
y1.g4 = simple_mean_relaxation(y1, 4)
y1.g5 = simple_mean_relaxation(y1, 5)
ts.plot(ts(y1), ts(y1.g3), ts(y1.g4), ts(y1.g5),
main = expression("Ряд" ~ y[1] ~ "и его сглаживание."),
col = c('black', 'red', 'green', 'blue'))
legend("topleft", c("Исходные зн.", "g = 3", "g = 4", "g = 5"),
col = c('black', 'red', 'green', 'blue'),
bty = "n", cex=1, lty = c(1,1,1,1),
y.intersp = 0.4, x.intersp = 0.5)
# y2 -----------
y2.g3 = simple_mean_relaxation(y2, 3)
y2.g4 = simple_mean_relaxation(y2, 4)
y2.g5 = simple_mean_relaxation(y2, 5)
ts.plot(ts(y2), ts(y2.g3), ts(y2.g4), ts(y2.g5),
main = expression("Ряд" ~ y[2] ~ "и его сглаживание."),
col = c('black', 'red', 'green', 'blue'))
legend("topleft", c("Исходные зн.", "g = 3", "g = 4", "g = 5"),
col = c('black', 'red', 'green', 'blue'),
bty = "n", cex=1, lty = c(1,1,1,1),
y.intersp = 0.4, x.intersp = 0.5)
# y3 ----------
y3.g3 = simple_mean_relaxation(y3, 3)
y3.g4 = simple_mean_relaxation(y3, 4)
y3.g5 = simple_mean_relaxation(y3, 5)
ts.plot(ts(y3), ts(y3.g3), ts(y3.g4), ts(y3.g5),
main = expression("Ряд" ~ y[3] ~ "и его сглаживание."),
col = c('black', 'red', 'green', 'blue'))
legend("topleft", c("Исходные зн.", "g = 3", "g = 4", "g = 5"),
col = c('black', 'red', 'green', 'blue'),
bty = "n", cex=1, lty = c(1,1,1,1),
y.intersp = 0.4, x.intersp = 0.5)
# остатки -----------------------------------------------------------------
# y1 -------
y1.e.3 = y1 - y1.g3
y1.e.4 = y1 - y1.g4
y1.e.5 = y1 - y1.g5
plot(y1.e.3 ~ t, main = expression("Диаграмма остатков" ~ y[1] ~ "для различного значения параметра g"), col = "red")
points(y1.e.4 ~ t, col = "green")
points(y1.e.5 ~ t, col = "blue")
legend("topleft", c("g = 3", "g = 4", "g = 5"), col = c('red', 'green', 'blue'),
lty = c(1,1,1), bty = "n", pch = 1, cex=1,
y.intersp = 0.5, x.intersp = 0.5)
# y2 -------
y2.e.3 = y2 - y2.g3
y2.e.4 = y2 - y2.g4
y2.e.5 = y2 - y2.g5
plot(y2.e.3 ~ t, main = expression("Диаграмма остатков" ~ y[2] ~ " для различного значения параметра g"), col = "red")
points(y2.e.4 ~ t, col = "green")
points(y2.e.5 ~ t, col = "blue")
legend("topleft", c("g = 3", "g = 4", "g = 5"), col = c('red', 'green', 'blue'),
lty = c(1,1,1), bty = "n", pch = 1, cex=1,
y.intersp = 0.5, x.intersp = 0.5)
# y3 -------
y3.e.3 = y3 - y3.g3
y3.e.4 = y3 - y3.g4
y3.e.5 = y3 - y3.g5
plot(y3.e.3 ~ t, main = expression("Диаграмма остатков" ~ y[3] ~ " для различного значения параметра g"), col = "red")
points(y3.e.4 ~ t, col = "green")
points(y3.e.5 ~ t, col = "blue")
legend("topleft", c("g = 3", "g = 4", "g = 5"), col = c('red', 'green', 'blue'),
lty = c(1,1,1), bty = "n", pch = 1, cex=1,
y.intersp = 0.5, x.intersp = 0.5)
|
/11_1/11_1.R
|
no_license
|
Morgolt/statistic
|
R
| false
| false
| 4,098
|
r
|
# Лабораторная №11_1 ------------------------------------------------------
# выборки -----------------------------------------------------------------
n = 40
eps.sigma = 2
eps.a = 0
eps = rnorm(n, eps.a, eps.sigma)
a1 = 1
a2 = 2
a3 = -1
b1.1 = 1.5
b2.1 = 2
b1.2 = 3
b2.2 = 0.75
b1.3 = -2.1
b2.3 = -1
t = c(1:n)
y1 = a1 + b1.1*t + b2.1*cos(2*pi*t/5) + eps
y2 = a2 + b1.2*t + b2.2*cos(2*pi*t/4) + eps
y3 = a3 + b1.3*t + b2.3*cos(2*pi*t/3) + eps
y3[21] = -500 + eps[21]
y3[30] = 500 + eps[30]
# сглаживание методом скользящей средней ---------------------------------
simple_mean_relaxation = function(tseries, g) {
p = floor(g / 2)
t = c((p+1):(length(tseries) - p))
haty = c(tseries)
if ((g %% 2) == 1) {
for (i in t) {
haty[i] = sum(tseries[(i-p):(i+p)])/g
}
} else {
p = g / 2
for (i in t) {
haty[i] = sum(c(0.5*tseries[i-p], tseries[(i-p+1):(i+p-1)], 0.5*tseries[i+p]))/g
}
}
return(haty)
}
# y1 --------------
y1.g3 = simple_mean_relaxation(y1, 3)
y1.g4 = simple_mean_relaxation(y1, 4)
y1.g5 = simple_mean_relaxation(y1, 5)
ts.plot(ts(y1), ts(y1.g3), ts(y1.g4), ts(y1.g5),
main = expression("Ряд" ~ y[1] ~ "и его сглаживание."),
col = c('black', 'red', 'green', 'blue'))
legend("topleft", c("Исходные зн.", "g = 3", "g = 4", "g = 5"),
col = c('black', 'red', 'green', 'blue'),
bty = "n", cex=1, lty = c(1,1,1,1),
y.intersp = 0.4, x.intersp = 0.5)
# y2 -----------
y2.g3 = simple_mean_relaxation(y2, 3)
y2.g4 = simple_mean_relaxation(y2, 4)
y2.g5 = simple_mean_relaxation(y2, 5)
ts.plot(ts(y2), ts(y2.g3), ts(y2.g4), ts(y2.g5),
main = expression("Ряд" ~ y[2] ~ "и его сглаживание."),
col = c('black', 'red', 'green', 'blue'))
legend("topleft", c("Исходные зн.", "g = 3", "g = 4", "g = 5"),
col = c('black', 'red', 'green', 'blue'),
bty = "n", cex=1, lty = c(1,1,1,1),
y.intersp = 0.4, x.intersp = 0.5)
# y3 ----------
y3.g3 = simple_mean_relaxation(y3, 3)
y3.g4 = simple_mean_relaxation(y3, 4)
y3.g5 = simple_mean_relaxation(y3, 5)
ts.plot(ts(y3), ts(y3.g3), ts(y3.g4), ts(y3.g5),
main = expression("Ряд" ~ y[3] ~ "и его сглаживание."),
col = c('black', 'red', 'green', 'blue'))
legend("topleft", c("Исходные зн.", "g = 3", "g = 4", "g = 5"),
col = c('black', 'red', 'green', 'blue'),
bty = "n", cex=1, lty = c(1,1,1,1),
y.intersp = 0.4, x.intersp = 0.5)
# остатки -----------------------------------------------------------------
# y1 -------
y1.e.3 = y1 - y1.g3
y1.e.4 = y1 - y1.g4
y1.e.5 = y1 - y1.g5
plot(y1.e.3 ~ t, main = expression("Диаграмма остатков" ~ y[1] ~ "для различного значения параметра g"), col = "red")
points(y1.e.4 ~ t, col = "green")
points(y1.e.5 ~ t, col = "blue")
legend("topleft", c("g = 3", "g = 4", "g = 5"), col = c('red', 'green', 'blue'),
lty = c(1,1,1), bty = "n", pch = 1, cex=1,
y.intersp = 0.5, x.intersp = 0.5)
# y2 -------
y2.e.3 = y2 - y2.g3
y2.e.4 = y2 - y2.g4
y2.e.5 = y2 - y2.g5
plot(y2.e.3 ~ t, main = expression("Диаграмма остатков" ~ y[2] ~ " для различного значения параметра g"), col = "red")
points(y2.e.4 ~ t, col = "green")
points(y2.e.5 ~ t, col = "blue")
legend("topleft", c("g = 3", "g = 4", "g = 5"), col = c('red', 'green', 'blue'),
lty = c(1,1,1), bty = "n", pch = 1, cex=1,
y.intersp = 0.5, x.intersp = 0.5)
# y3 -------
y3.e.3 = y3 - y3.g3
y3.e.4 = y3 - y3.g4
y3.e.5 = y3 - y3.g5
plot(y3.e.3 ~ t, main = expression("Диаграмма остатков" ~ y[3] ~ " для различного значения параметра g"), col = "red")
points(y3.e.4 ~ t, col = "green")
points(y3.e.5 ~ t, col = "blue")
legend("topleft", c("g = 3", "g = 4", "g = 5"), col = c('red', 'green', 'blue'),
lty = c(1,1,1), bty = "n", pch = 1, cex=1,
y.intersp = 0.5, x.intersp = 0.5)
|
setwd("C:/Users/ctmun/OneDrive/Desktop/DS Explor Anal")
elecpwr <- read.table("C:/Users/ctmun/OneDrive/Desktop/DS Explor Anal/Proj I Pwr/household_power_consumption.txt",
header = TRUE, sep = ";", stringsAsFactors = FALSE)
head(elecpwr, n=3)
class(elecpwr$Date)
## [1] "character"
library(dplyr)
elecDate <- mutate(elecpwr, date= as.Date(Date, format="%d/%m/%Y"))
DateID1 <- "2007-02-01"
DateID2 <- "2007-02-02"
elecsub <- filter(elecDate, (date == DateID1 | date == DateID2))
dim(elecsub)
## [1] 2880 10
elecsubDTG <- mutate(elecsub, DTG=as.POSIXct(paste(elecsub$date, elecsub$Time), format="%Y-%m-%d %H:%M:%S"))
head(elecsubDTG, n=3)
png(filename = "Plot3.png", height = 480, width = 480)
plot(elecsubDTG$DTG, elecsubDTG$Sub_metering_1, type="l", xlab="", ylab = "Energy sub metering")
lines(elecsubDTG$DTG, elecsubDTG$Sub_metering_2, col="red")
lines(elecsubDTG$DTG, elecsubDTG$Sub_metering_3, col="blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black", "red", "blue"), lty=1)
dev.off()
|
/Plot3.R
|
no_license
|
TommyJM/ExData_Plotting1
|
R
| false
| false
| 1,092
|
r
|
setwd("C:/Users/ctmun/OneDrive/Desktop/DS Explor Anal")
elecpwr <- read.table("C:/Users/ctmun/OneDrive/Desktop/DS Explor Anal/Proj I Pwr/household_power_consumption.txt",
header = TRUE, sep = ";", stringsAsFactors = FALSE)
head(elecpwr, n=3)
class(elecpwr$Date)
## [1] "character"
library(dplyr)
elecDate <- mutate(elecpwr, date= as.Date(Date, format="%d/%m/%Y"))
DateID1 <- "2007-02-01"
DateID2 <- "2007-02-02"
elecsub <- filter(elecDate, (date == DateID1 | date == DateID2))
dim(elecsub)
## [1] 2880 10
elecsubDTG <- mutate(elecsub, DTG=as.POSIXct(paste(elecsub$date, elecsub$Time), format="%Y-%m-%d %H:%M:%S"))
head(elecsubDTG, n=3)
png(filename = "Plot3.png", height = 480, width = 480)
plot(elecsubDTG$DTG, elecsubDTG$Sub_metering_1, type="l", xlab="", ylab = "Energy sub metering")
lines(elecsubDTG$DTG, elecsubDTG$Sub_metering_2, col="red")
lines(elecsubDTG$DTG, elecsubDTG$Sub_metering_3, col="blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black", "red", "blue"), lty=1)
dev.off()
|
#input the table name here
table_name <- "DS_20141028_RST_0ZV157LCHOGDG"
#input db or data link info here(either olive or analystdb)
database_name <- "analystdb"
#build strings
tbl <- paste(table_name, database_name , sep ="@")
sqlc <- paste("select * from ", tbl) #sep = "")
con <-odbcConnect("Olive11", uid="olive", pwd="olive_tr33s")
#sqlSave(con, test_table, "TEST_TABLE")
g <- sqlQuery(con, sqlc)
#d <- sqlQuery(con, "select * from TEST_TABLE")
close(con)
#rename table to it fits the schema
ntable <- paste(table_name, "X", sep = "")# sep = "_")
try(sqlDrop(con, ntable, errors = FALSE), silent = TRUE)
sqlSave(con, g, table = ntable)
sqlSave(con, g, rownames = TRUE , addPK=TRUE)
sqlFetch(con, "USArrests", rownames = "state") # get the lot
foo <- cbind(state=row.names(USArrests), USArrests)[1:3, c(1,3)]
foo[1,2] <- 222
sqlUpdate(con, foo, "USArrests")
sqlFetch(con, "USArrests", rownames = "state", max = 5)
sqlDrop(con, "USArrests")
close(con)
|
/MN/sqlsave.R
|
no_license
|
ckeune/RWork
|
R
| false
| false
| 968
|
r
|
#input the table name here
table_name <- "DS_20141028_RST_0ZV157LCHOGDG"
#input db or data link info here(either olive or analystdb)
database_name <- "analystdb"
#build strings
tbl <- paste(table_name, database_name , sep ="@")
sqlc <- paste("select * from ", tbl) #sep = "")
con <-odbcConnect("Olive11", uid="olive", pwd="olive_tr33s")
#sqlSave(con, test_table, "TEST_TABLE")
g <- sqlQuery(con, sqlc)
#d <- sqlQuery(con, "select * from TEST_TABLE")
close(con)
#rename table to it fits the schema
ntable <- paste(table_name, "X", sep = "")# sep = "_")
try(sqlDrop(con, ntable, errors = FALSE), silent = TRUE)
sqlSave(con, g, table = ntable)
sqlSave(con, g, rownames = TRUE , addPK=TRUE)
sqlFetch(con, "USArrests", rownames = "state") # get the lot
foo <- cbind(state=row.names(USArrests), USArrests)[1:3, c(1,3)]
foo[1,2] <- 222
sqlUpdate(con, foo, "USArrests")
sqlFetch(con, "USArrests", rownames = "state", max = 5)
sqlDrop(con, "USArrests")
close(con)
|
### Usage notes for these scripts:
# Almost every function or object depends on the flowCore (and often flowViz) package(s) from biocondcutor.
# To install these, run these commands in R:
# source("http://bioconductor.org/biocLite.R")
# biocLite("flowCore")
# biocLite("flowViz")
#
# The most useful script for an R newbie is 'summary.cyt'.
# It will take a flowSet (see flowCore documentation) and run some QA, gate, and get summary FL1.A (or other channel) data
# as well as experiment information like the time of day and events/µL concentration.
library(flowViz)
#########################
### Cytometer Gates ###
#########################
##################################
### Notes on cytometer changes ###
##################################
### Got new cytometer around 2011-02-22-ish (updated gates)
### Switched FL1-A and FL2-A sometime around 02-23. Was switched back relatively soon afterwards.
### FSC-A was tweaked around 2011-03-8 (early-ish March), changed gates.
### yeastGate
### Defines an SSC.A vs FSC.A gate. Includes only the yeast population
### from a flowSet
# As of 2011-10-26, it may be necessary to create a new yeastGate for diploids that truly cuts out dead cells.
# Used from 2011-02-22 to present, excludes debris/non-yeast
yeastGate <<- polygonGate(filterId="Yeast",
.gate=matrix(c(400000,10000000,10000000,400000,
10000,10000,2300000,60000),
ncol=2,nrow=4,dimnames=list(c("1","1","1","1"),c("FSC.A","SSC.A"))))
oldNemyeastGate <<- polygonGate(filterId="Yeast",
.gate=matrix(c(20000,7000000,1500000,100000,
10000,300000,8000000,10000),
ncol=2,nrow=4,dimnames=list(c("1","1","1","1"),c("FSC.A","SSC.A"))))
NemyeastGate <<- polygonGate(filterId="Yeast",
.gate=matrix(c(50000,7000000,1500000,50000,
10000,300000,4000000,10000),
ncol=2,nrow=4,dimnames=list(c("1","1","1","1"),c("FSC.A","SSC.A"))))
# Used until cytometer was switched around 2011-02-22
oldyeastGate <<- polygonGate(filterId="Yeast",
.gate=matrix(c(160000,1500000,1500000,160000, 0,0,200000,200000),
ncol=2,nrow=4,dimnames=list(c("1","1","1","1"),c("FSC.A","SSC.A"))))
# Diploid Singlet gate, used 2011-07-09 to present
# Diploids are slightly larger and have better separation between singlets/doublets
dipsingletGate <<- polygonGate(filterId="DipSingletGate",
.gate=matrix(c(
#x values
7.5e5,13e5,18e5,15e5,6e5,
#y values
9e5,16e5,26e5,30e5,15e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))
)
)
oldNemdipsingletGate <<- polygonGate(filterId="DipSingletGate",
.gate=matrix(c(
#x values
1e4,12e4,19.5e5,15e5,2e4,
#y values
9e4,10e4,26e5,30e5,15e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))
)
)
oldNemdipdoubletGate <<- polygonGate(filterId="DipDoubletGate",
.gate=matrix(c(
#x values
2e4,20e5,30e5,35e5,25e5,8e5,
#y values
8e4,12.5e5,25e5,26e5,30e5,8.5e5),
ncol=2,nrow=6,dimnames=list(rep(NA,6),c("FSC.A","FSC.H"))
)
)
#as of 25feb2015
NemdipsingletGate <<- polygonGate(filterId="DipSingletGate",
.gate=matrix(c(
#x values
1e4,8e4,20e5,15e5,18e4,
#y values
5e4,9.7e4,21e5,20e5,6e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))
)
)
NemdipdoubletGate <<- polygonGate(filterId="DipDoubletGate",
.gate=matrix(c(
#x values
4.5e4,20e5,20e5,30e5,25e5,8e5,
#y values
5e4,8e5,25e5,26e5,35e5,8.5e5),
ncol=2,nrow=6,dimnames=list(rep(NA,6),c("FSC.A","FSC.H"))
)
)
# Diploid Doublet gate, used 2011-08-09 to present
# Diploids are slightly larger and have better separation between singlets/doublets
hapdoubletGate <<- polygonGate(filterId="HaploidDoubletGate",
.gate=matrix(c(
#x values
6.5e5,1.15e6,1.5e6,1.4e6,1.2e6,5e5,
#y values
5.75e5,9e5,1.3e6,1.4e6,1.5e6,6.5e5),
ncol=2,nrow=6,dimnames=list(rep(NA,6),c("FSC.A","FSC.H"))
)
)
oldNemhapdoubletGate <<- polygonGate(filterId="HaploidDoubletGate",
.gate=matrix(c(
#x values
2e4,20e5,30e5,18e5,25e5,6.75e5,
#y values
8e4,12.5e5,25e5,26e5,30e5,8.5e5),
ncol=2,nrow=6,dimnames=list(rep(NA,6),c("FSC.A","FSC.H"))
)
)
NemhapdoubletGate <<- polygonGate(filterId="HaploidDoubletGate",
.gate=matrix(c(
#x values
1e4,2e5,30e5,25e5,25e5,6.75e5,
#y values
2e4,5e4,20e5,30e5,5e5,8.5e5),
ncol=2,nrow=6,dimnames=list(rep(NA,6),c("FSC.A","FSC.H"))
)
)
# Used 2012-02-22 to present
hapsingletGate <<- polygonGate(filterId="HaploidSingletGate",
.gate=matrix(c(
#x values
5e5,0.8e6,1.15e6,1e6,5e5,
#y values
8e5,1.05e6,1.5e6,1.8e6,1e6),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))
)
)
oldNemhapsingletGate <<- polygonGate(filterId="HaploidSingletGate",
.gate=matrix(c(
#x values
1e4,6e4,19e5,15e5,2e4,
#y values
9e4,10e4,26e5,30e5,15e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))
)
)
NemhapsingletGatev2 <<- polygonGate(filterId="HaploidSingletGate",
.gate=matrix(c(
#x values
1e4,5e4,8e5,10e5,2e4,
#y values
4e4,7e4,10e5,15e5,15e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))
)
)
#NF=> new flow cell installed
NF_NemhapsingletGate <<- polygonGate(filterId="HaploidSingletGate",
.gate=matrix(c(
#x values
1e4,5e4,8e5,10e5,2e4,
#y values
2e4,5e4,7e5,10e5,10e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))
)
)
#Used with the new flow cell installed in the SORP cytometer in Dec 2014
NemhapsingletGate <<- polygonGate(filterId="HaploidSingletGate",
.gate=matrix(c(
#x values
1e4,6.5e4,15e5,12e5,4e4,
#y values
4e4,8e4,16e5,18e5,4e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))))
####################
# Havens et al 2012#
####################
# Used in auxin paper (Havens 2012) to gate all yeast from non-yeast. Also excludes a portion
# of small-FSC.A, high-SSC.A cells (presumably dead).
auxinpaper_yeastGate <<- polygonGate(filterId="Yeast",
.gate=matrix(c(400000,10000000,10000000,400000, 10000,10000,2300000,60000),
ncol=2,nrow=4,dimnames=list(c("1","1","1","1"),c("FSC.A","SSC.A"))))
# Used in auxin paper to gate for singlets after gating with auxinpaper_yeastGate
auxinpaper_singletGate <<- polygonGate(filterId="DipSingletGate",
.gate=matrix(c(
#x values
7.5e5,13e5,18e5,15e5,6e5,
#y values
9e5,16e5,26e5,30e5,15e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))
)
)
# Used in auxin paper to gate for doublets after gating with auxinpaper_yeastGate
auxinpaper_doubletGate <<- polygonGate(filterId="DipDoubletGate",
.gate=matrix(c(
#x values
10e5,17e5,23e5,22e5,20e5,8e5,
#y values
8e5,12.5e5,17e5,20e5,22e5,8.5e5),
ncol=2,nrow=6,dimnames=list(rep(NA,6),c("FSC.A","FSC.H"))
)
)
# haploid singlet gate, used from 2011-02-22 to 2012-02-22
singletGate2 <<- polygonGate(filterId="Singlets",
.gate=matrix(c(400000,3000000,3000000,160000, 620000,3500000,6000000,500000),
ncol=2,nrow=4,dimnames=list(c("1","1","1","1"),c("FSC.A","FSC.H"))))
# Used until cytometer was switched around 2011-02-22
oldsingletGate <<- polygonGate(filterId="Singlets",
.gate=matrix(c(160000,1500000,1400000,160000, 680000,5700000,6000000,750000),
ncol=2,nrow=4,dimnames=list(c("1","1","1","1"),c("FSC.A","FSC.H"))))
# Used up to 2011-02-10
extraoldsingletGate <<- polygonGate(filterId="Singlets",
.gate=matrix(c(160000,800000,800000,160000, 680000,3150000,3500000,750000),
ncol=2,nrow=4,dimnames=list(c("1","1","1","1"),c("FSC.A","FSC.H"))))
## experimental gates
# mChGate - designed around data from cytometer experiment Nick did on 2012-9-19 (NB036)
# subsets cells with high FL3.A (mCherry) vs FL1.A (EYFP) output.
# Note that it was designed around cells that have both EYFP and mCherry and
# may not work for mCherry-only cells
mChGate <<- polygonGate(filterId='mCherryGate',
matrix(c(5e2,2e3,4e4,7e4,8e3,5e2, 2e3,2e3,1.8e4,9e4,8e4,4e3),
ncol=2,
nrow=6,
dimname=list(c(1,1,1,1,1,1),c("FL1.A","FL3.A")))
)
# Same motivatio as the mChGate - separate EYFP+mCh cells from EYFP cells. In this case,
# subsets for EYFP-only cells. Again, may only work for this particular experiment type
EYFPGate <<- polygonGate(filterId='EYFPGate',
matrix(c(0,4e3,3e4,3e4,3e3,0, 0,0,4e3,9e3,2e3,2e3),
ncol=2,
nrow=6,
dimname=list(c(1,1,1,1,1,1),c("FL1.A","FL3.A"))
)
)
# Diploid Singlet gate, first created 2011-07-09
# Diploids are slightly larger and have better separation between singlets/doublets
dipsingletGate3 <<- polygonGate(filterId="DipSingletGate2",
.gate=matrix(c(
#x values
5e5,13e5,18e5,15e5,6e5,
#y values
7e5,16e5,26e5,30e5,15e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))
)
)
# for diploids, highlights dead cells
deadGate <<- polygonGate(filterId="deadGate",
.gate=matrix(c(7.5e5,13e5,3e6,15e5,6e5,
1e5,2e5,7.5e5,4e5,2e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","SSC.A")))
)
# for diploids, takes small-sized cells, maybe could use for excluding dead cells when combined with clustering
deadexcludeGate <<- polygonGate(filterId="deadGate",
.gate=matrix(c(7.5e5,4e6,3e6,1.5e6,6e5,
0,0,7.5e5,8e5,2e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","SSC.A")))
)
# for diploids, is a little more generous than 'dipsingletGate' Combine with flowClust to clean it up
dipsingletGate2 <<- polygonGate(filterId="DipSingletGate",
.gate=matrix(c(
7.5e5,14e5,30e5,15e5,6e5,
9e5,16e5,35e5,40e5,15e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))))
# excludes the big-cell subpopulation, it might be sick/weird/less comparable
# use !excludeBig to exclude them.
excludeBig <<- polygonGate(filterId="excludeBig",
.gate=matrix(c(
3e6,2e7,2e7,2e7,3e6,
1e6,16e5,5e6,1e7,1e7),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))))
# Rob's gate for E. coli
ecoliGate <<- polygonGate(filterId="E.coli",
.gate=matrix(c(2e4,8e4,8e4,2e4, # x points
1,1,6e3,6e3), # y points
ncol=2,nrow=4,dimnames=list(c("1","1","1","1"),c("FSC.H","SSC.H"))))
###########################
### Cytometer Scripts ###
###########################
### polygate:
### Make a gate easier?
polygate <- function(x,y,filterID="newGate",channels=c("FSC.A","FSC.H")) {
if( length(x) != length(y) | !is.numeric(x) | !is.numeric(y)) {
stop("x coordinate vector must be same length as y coordinate vector")
}
gate <- polygonGate(filterId=filterID,
.gate=matrix(c(x,y),
ncol=2,nrow=length(x),dimnames=list(rep(NA,5),channels)))
return(gate)
}
### ploidy:
### Tries to guess the ploidy of a given flowframe
### Uses FSC.A/FSC.H ratio.
### Diploids are typically 5um x 6um ellipsoids while
### haploids are typically 4um x 4um spheroids
### As a result, diploids are longer and you get a larger 'area/volume' FSC.A
### 'Width' might also be useful.
ploidy <- function(flowframe) {
# Find FSC.A/FSC.H. This is close to 1 for diploids and close to .8 for haploids
# Test this assumption!!!!!
fsca <- summary(flowframe)[4,1]
fsch <- summary(flowframe)[4,7]
quotient <- fsca/fsch
if(quotient>0.92) {
return(c("Diploid",quotient))
} else{
return(c("Haploid",quotient))
}
}
### qa.gating:
### Very simple script to check whether a flowSet or flowFrame
### contains empty values, in which case normalization may fail (divide by zero)
qa.gating <- function(x,threshold=100) {
# Defaults to event count threshold of 100
print("Running QA...")
x.class <- class(x)[1]
if(x.class=="flowFrame") {
counts <- length(exprs(x[,1]))
} else if(x.class=="flowSet") {
counts <- fsApply(x,length,use.exprs=T)
} else {
print("Input must be a flowSet or flowFrame")
}
# Find all counts less than 100 (now it's a boolean vector)
counts.boolean <- counts<threshold
counts.failed.position <- grep(TRUE,counts.boolean) #positions of those that failed
# Did we get any failed counts?
# If so, return the position
counts.failed <- length(counts.failed.position)!=0
if(counts.failed) {
print("QA resulted in 1 or more warnings.")
return(counts.failed.position)
} else{
print("QA succeeded")
return(FALSE)
}
}
### fl1transform:
### Normalizes FL1.A values in a flowset/flowframe to FSC.A values
### Should control for (at least some) non-linearity in the values
### Also makes FL1.A proportional to fluorescence/cell volume
### Used to multiply by the mean FSC.A value, but this is probably
### statistically questionable. Now multiplies by a constant (10000)
### simply to keep the values in the integer range.
###
### If you specify transform="log", it will simply do a log transform
### to FL1.A instead.
fl1transform <- function(x,transform=F) {
# Default scaling is 10^4 and is solely for making results human-readable
# Handle both flowFrames and flowSets
x.class <- class(x)[1]
if (x.class=="flowFrame") {
#stop("This is a flowFrame, not a flowSet")
return(transform(x,FL1.A=FL1.A/FSC.A*10^4))
}
# Protect the input from the modifications
x <- x[seq(along=x)]
# Do QA. Reject all frames with no cells
qa.result <- qa.gating(x,threshold=1)
# Remove all 0-valued fluorescence results.
# These are very likely to be artifacts and screw up some transforms
print("Removing 0-valued fluorescence outliers")
x <- Subset(x,rectangleGate(FL1.A=c(0.001,Inf)))
# Transformation setup
trans <- function(x) {
if (transform == "fscanorm") {
x <- transform(x,FL1.A=FL1.A/FSC.A*10^4)
} else if (transform == "log") {
x <- transform(x,FL1.A=log(FL1.A))
} else if (transform == F) {
x <- x # do nothing. Is this necessary?
} else {
stop("No legitimate transform set. Use transform=\"log\" or transform=\"fscanorm\".")
}
}
if (!qa.result) {
x <- trans(x)
# x <- transform(x,FL1.A=FL1.A/FSC.A*10^4)
} else {
# For loop is inefficient, switch this out for fsApply while maintaining all cells
for (i in qa.result) {
x[[i]] <- trans(x)
# x[[i]] <- transform(x[[i]],FL1.A=FL1.A/FSC.A*10^4)
}
# x <- fsApply(x,transform,FL1.A=FL1.A/FSC.A*10^4)
cat(paste(
"### Too few cells at this gating level for frame(s) \n### ",
paste(qa.result,collapse=", "),
".\n### These frames were not normalized.\n\n",sep=""))
}
return(x)
}
### flsummary:
### Get summary statistics for fluorescence, other data
flsummary <- function(flowset,channel="FL3.A",moments=F,split=F,transform=F) {
# Number of cells (experiments) in the flowSet
n_experiment <- length(flowset)
# Initialize empty matrices/data frames to increase efficiency
warnings <- c()
if (moments == T) {
library(moments)
}
# Get time of each frame in minutes of the day
btime_raw <- fsApply(flowset,function(x)as.numeric(unlist(strsplit(keyword(x)$`$BTIM`,split=":"))))
btime <- apply(btime_raw,1,function(x)x[1]*60+x[2]+x[3]/60+x[4]/6000)
time <- btime-min(btime)
# Acquisition time - how long it took to take the sample, in seconds
atime <- fsApply(flowset,function(x)as.numeric(keyword(x)$`#ACQUISITIONTIMEMILLI`)/1000)
events <- fsApply(flowset,function(x)length(x[,1]),use.exprs=T)
uL <- fsApply(flowset,function(x)as.integer(keyword(x)$`$VOL`)/1000)
conc <- events/uL
for (i in 1:n_experiment) {
if (events[i] < 100) {
warnings <- c(warnings,i)
}
}
fl_mean <- fsApply(flowset,function(x)mean(x[,channel]),use.exprs=T)
fl_median <- fsApply(flowset,function(x)median(x[,channel]),use.exprs=T)
fl_sd <- fsApply(flowset,function(x)sd(x[,channel]),use.exprs=T)
fl <- data.frame(fl_mean,fl_median,fl_sd)
colnames(fl) <- paste(channel,c("mean","median","sd"),sep="")
# Do we want mean fl values for data split into 4 evenly sized chunks?
if (split==T) {
split_table <- fsApply(flowset,splitFrame)
split_table <- data.frame(matrix(unlist(split_table),ncol=4,byrow=T))
colnames(split_table) <- paste("split",1:4,sep="")
fl <- cbind(fl,split_table)
}
# Do we want the first few moments?
if (moments == T) {
require(moments)
fl_var <- data.frame(fsApply(flowset,function(x)var(x[,channel]),use.exprs=T))
fl_skew <- data.frame(fsApply(flowset,function(x)skewness(x[,channel]),use.exprs=T))
fl_kurt <- data.frame(fsApply(flowset,function(x)kurtosis(x[,channel]),use.exprs=T))
fl_moments <- data.frame(fl_var,fl_skew,fl_kurt)
colnames(fl_moments) <- paste(channel,c("var","skew","kurt"),sep="")
fl <- cbind(fl,fl_moments)
}
file <- fsApply(flowset,function(x)strsplit(keyword(x)$GUID,".fcs")[[1]])
colnames(file) <- "file"
if (length(warnings) != 0) {
warnings <- paste(warnings,collapse=", ")
print(paste("Warning: frame(s)",warnings,"had less than 100 events in this gate."))
}
# Insert empty strain and colony columns
strain=matrix(nrow=n_experiment)
treatment=matrix(nrow=n_experiment)
# Put it all together
flsummary <- cbind(time,btime,atime,events,conc,fl,file,strain,treatment)
# Make rows filename keys
rownames(flsummary) <- file
# Rename the 'mean', 'median', and 'sd' columns to reflect transformations done or channel used.
# 'FL1.A' = no transformation, 'FL1_FSC' = "fsacanorm", 'log' = "log"
flsummary <- renameflcols(flsummary,channel=channel,transform=transform)
return(flsummary)
}
# renaming function for flsummary data, keeping it separate for ease of use
# probably slow due to for loop
renameflcols <- function(x,channel="FL1.A",transform=F) {
cols <- c("mean","median","sd")
if (transform!=F) {
if (transform=="fscanorm") {
tname <- "FL1_FSC"
} else if (transform=="log") {
tname <- "log"
} else {
stop("invalid transform")
}
} else {
return(x)
}
for (i in cols) {
colnames(x)[grep(i,paste(channel,colnames(x),sep=""))] <- paste(tname,i,sep="")
}
return(x)
}
### summary.cyt:
### Gates a sample to all yeast, then singlet, then doublets
### Does the work of singletsummary.cyt,doubletsummary.cyt,yeastsummary.cyt
### Also calculates singlet to doublet ratio
### Returns a list of data frames, e.g. output$singlets, output$doublets, etc.
summary.cyt <- function(
flowset,
transform=F,
channel="FL1.A",
ploidy=F,
moments=F,
split=F,
only=F) {
# Number of experiments
n_experiments <- length(flowset)
# If using channel="FSC.A", don't use fscanorm
if (channel=="FSC.A"&transform=="fscanorm") {
print("Channel FSC.A selected with no transform= setting set.")
print("Defaulting to no transform (set transform=\"log\" for log transform)")
transform=F
}
# Transform FL1.A
if (transform != F) {
print(paste("Transforming FL1.A using",
transform,
"transform..."
)
)
flowset <- fl1transform(flowset,transform=transform)
}
# Gate the samples
if (ploidy=="haploid") {
print("Gating with haploid gates...")
yeast <- Subset(flowset,yeastGate)
singlets <- Subset(yeast,hapsingletGate)
doublets <- Subset(yeast,hapdoubletGate)
} else if (ploidy=="diploid") {
print("Gating with diploid gates...")
yeast <- Subset(flowset,yeastGate)
singlets <- Subset(yeast,dipsingletGate)
doublets <- Subset(yeast,dipdoubletGate)
} else {
stop('Error: You must define ploidy="haploid" or ploidy="diploid"')
}
if (only==F) {
# Normalize and summarize each subset
print("Summarizing all yeast events...")
yeastsum <- flsummary(yeast,channel=channel,moments=moments,split=split,transform=transform)
print("Summarizing doublets events...")
doubletsum <- flsummary(doublets,channel=channel,moments=moments,split=split,transform=transform)
print("Summarizing singlets events...")
singletsum <- flsummary(singlets,channel=channel,moments=moments,split=split,transform=transform)
} else {
if (only=="singlets") {
print("Summarizing singlets events...")
singletsum <- flsummary(singlets,channel=channel,moments=moments,split=split,transform=transform)
return(singletsum)
} else if (only=="doublets") {
print("Summarizing doublets events...")
doubletsum <- flsummary(doublets,channel=channel,moments=moments,split=split,transform=transform)
return(doubletsum)
} else if (only=="yeast") {
print("Summarizing all yeast events...")
yeastsum <- flsummary(yeast,channel=channel,moments=moments,split=split,transform=transform)
return(yeastsum)
} else {
print("'only' must be 'singlets','doublets', or 'yeast'")
stop()
}
}
summary_list <- list(yeast=yeastsum,singlets=singletsum,doublets=doubletsum)
return(summary_list)
}
### summary.cyt:using gates for the NemLab cytometer (2013)
Nemsummary.cyt <- function(
flowset,
transform=F,
channel="FL1.A",
ploidy=F,
moments=F,
split=F,
only=F) {
# Number of experiments
n_experiments <- length(flowset)
# If using channel="FSC.A", don't use fscanorm
if (channel=="FSC.A"&transform=="fscanorm") {
print("Channel FSC.A selected with no transform= setting set.")
print("Defaulting to no transform (set transform=\"log\" for log transform)")
transform=F
}
# Transform FL1.A
if (transform != F) {
print(paste("Transforming FL1.A using",
transform,
"transform..."
)
)
flowset <- fl1transform(flowset,transform=transform)
}
# Gate the samples
if (ploidy=="haploid") {
print("Gating with haploid gates...")
yeast <- Subset(flowset,NemyeastGate)
singlets <- Subset(yeast,NemhapsingletGate)
doublets <- Subset(yeast,NemhapdoubletGate)
} else if (ploidy=="diploid") {
print("Gating with diploid gates...")
yeast <- Subset(flowset,NemyeastGate)
singlets <- Subset(yeast,NemdipsingletGate)
doublets <- Subset(yeast,NemdipdoubletGate)
} else {
stop('Error: You must define ploidy="haploid" or ploidy="diploid"')
}
if (only==F) {
# Normalize and summarize each subset
print("Summarizing all yeast events...")
yeastsum <- flsummary(yeast,channel=channel,moments=moments,split=split,transform=transform)
print("Summarizing doublets events...")
doubletsum <- flsummary(doublets,channel=channel,moments=moments,split=split,transform=transform)
print("Summarizing singlets events...")
singletsum <- flsummary(singlets,channel=channel,moments=moments,split=split,transform=transform)
} else {
if (only=="singlets") {
print("Summarizing singlets events...")
singletsum <- flsummary(singlets,channel=channel,moments=moments,split=split,transform=transform)
return(singletsum)
} else if (only=="doublets") {
print("Summarizing doublets events...")
doubletsum <- flsummary(doublets,channel=channel,moments=moments,split=split,transform=transform)
return(doubletsum)
} else if (only=="yeast") {
print("Summarizing all yeast events...")
yeastsum <- flsummary(yeast,channel=channel,moments=moments,split=split,transform=transform)
return(yeastsum)
} else {
print("'only' must be 'singlets','doublets', or 'yeast'")
stop()
}
}
summary_list <- list(yeast=yeastsum,singlets=singletsum,doublets=doubletsum)
return(summary_list)
}
### yeastIntSplit:
### Splits a flowSet or flowFrame by FSC.A
### Splits into n equally-spaced intervals
### n defaults to 3 intervals
### Returns a LIST
yeastIntSplit <- function(flowset,nsplit=3){
maxval <- 800000
# Normalize FL1.A by FSC.A (temporary)
flowset <- transform(flowset,`FL1.A` = `FL1.A`/`FSC.A`)
returnedvalues <- matrix(nrow=nsplit,ncol=6)
for (i in 1:nsplit) {
localmin <- floor(1+(i-1)*maxval/nsplit)
localmax <- ceiling(i*maxval/nsplit)
tempgate <- rectangleGate("FSC.A"=c(localmin,localmax))
tempSet <- Subset(flowset,tempgate)
currentFL1.As <- exprs(tempSet[,"FL1.A"])
returnedvalues[i,1] <- i
if (mean(currentFL1.As) != "NaN") {
returnedvalues[i,2] <- mean(currentFL1.As)
}
returnedvalues[i,3] <- sd(currentFL1.As)
returnedvalues[i,4] <- localmin
returnedvalues[i,5] <- localmax
returnedvalues[i,6] <- length(currentFL1.As)
}
finaltable <- data.frame(section=returnedvalues[,1],mean=returnedvalues[,2],sd=returnedvalues[,3],
min=returnedvalues[,4],max=returnedvalues[,5],n=returnedvalues[,6])
return (finaltable)
}
### yeastSampSplit
### Splits a flowFrame by its FSC.A values
### Organizes into n equally-sized populations
### n defaults to 3
### Returns a LIST of tables (kinda weird)
yeastThreePopSplit <- function(flowframe){
flowFrametable <- flowFrame2table(flowframe)
flowFrametable <- flowFrametable[order(flowFrametable$FSC.A),]
firstpop <- flowFrametable[1:(floor(length(flowFrametable[,1])/3)),]
secondpop <- flowFrametable[(ceiling(length(flowFrametable[,1])/3)):(floor(length(flowFrametable[,1])*2/3)),]
thirdpop <- flowFrametable[(ceiling(length(flowFrametable[,1])*2/3)):(length(flowFrametable[,1])),]
poplistmeans <- c(mean(firstpop$FL1.A),mean(secondpop$FL1.A),mean(thirdpop$FL1.A))
poplistsds <- c(sd(firstpop$FL1.A),sd(secondpop$FL1.A),sd(thirdpop$FL1.A))
poptable <- data.frame(size=c("Low","Mid","High"),mean=poplistmeans,sd=poplistsds)
# poplist <- c(firstpop,secondpop,thirdpop)
return(poptable)
}
### flowFrame2Table:
### Generates a full table (data frame) of a flowFrame's data values
### with appropriate labels
flowFrame2Table <- function(flowframe) {
flowframetable <- data.frame(exprs(flowframe),ncol=10)
colnames(flowframetable) <- colnames(flowframe)
return(flowframetable)
}
# Thalf scripts
# Most of these are junk and only work with properly-formatted data frames
# which contain values of 'time', 'mean', 'strain', and 'treatment'.
# Also assumes that data frame is sorted by time on some level
# For some reason this works even when the treatment column is full of NA values
thalfall <- function(x,minval=F) {
#HACK
strain_treatment <- paste(x$strain,x$treatment,sep=",,")
all_levels <- levels(as.factor(strain_treatment))
x$strain_treatment <- strain_treatment
# NOW I KNOW THE DATA FRAME SIZE
tablelen <- length(all_levels)
thalftable <- data.frame(matrix(ncol=5,nrow=tablelen))
colnames(thalftable) <- c("thalf","min","max","strain","treatment")
for (i in seq(along=all_levels)) {
current_subset <- subset(x,strain_treatment==all_levels[i])
# Generate fit object
current_fit <- iaaregress(current_subset)
c <- current_fit$coefficients[[2]]
if (minval == F) {
current_thalf <- predict.thalf(current_fit)
minval_used <- max(c(min(current_fit$data$mean),c))
} else {
current_thalf <- predict.thalf(current_fit,minval=minval)
minval_used <- minval
}
maxval <- current_fit$data[1,"mean"] # First fluorescence data point
thalftable[i,1] <- current_thalf
thalftable[i,2] <- minval_used
thalftable[i,3] <- maxval
thalftable[i,4:5] <- strsplit(all_levels[i],split=",,")[[1]]
}
return(thalftable)
}
# Expects a data frame with columns of mean and time values
getthalf <- function(x,minval=0) {
regression <- iaaregress(x)
thalf <- predict.thalf(regression,minval=minval)
return(thalf)
}
### Should make qplot.logistic into a geom, (geom_logistic?)
### This will allow grouping more easily
### qplot.logistic:
### Makes plotting time series + fit + thalf a little easier
### Assumes that you have 'time' and 'mean' columns
qplot.logistic <- function(timeseriesdata,minval=F) {
# Generate fit object
fitobject <- iaaregress(timeseriesdata)
# Calculate thalf
if (minval==F) {
thalf <- predict.thalf(fitobject)
} else {
thalf <- predict.thalf(fitobject,minval=minval)
}
# Calculate text position
textpos <- range(timeseriesdata[,"mean"])[1] + 0.75*diff(range(timeseriesdata[,"mean"]))
# Plot
a <- qplot(data=timeseriesdata,time,mean) +
geom_line(data=predict.logistic(fitobject),color="blue") +
geom_vline(xintercept=thalf,color="red") +
geom_text(aes(x=thalf+0.1*max(timeseriesdata[,"time"]),y=textpos),label=paste("t½ =",signif(thalf,digits=3)))
return(a)
}
### predict.logistic:
### Generates a data table based on a logistic fit object
### Right now, only works w/ variables "mean" and "time",
### but could easily be changed to dynamically name
predict.logistic <- function(fitobject) {
# Generate table for time vs predicted mean
xrange <- range(fitobject$data[,1])
predicted <- data.frame(x=seq(0,max(fitobject$data[,1]),length.out=100),y=NA)
predicted[,2] <- predict(fitobject,newdata=predicted)
colnames(predicted) <- colnames(fitobject$data)[1:2]
return(predicted)
}
### thalf:
### Estimates t1/2 using a logistic fit model (drm)
### Currently only built for a 4-parameter model
### You can specify the 'min value' (e.g. from steady state data)
### by setting the minval option.
predict.thalf <- function(fitobject,minval=0) {
coefficients <- fitobject$coefficients
b <- coefficients[[1]]
c <- coefficients[[2]]
d <- coefficients[[3]]
e <- coefficients[[4]]
# f <- coefficients[[5]]
maxval <- fitobject$data[1,"mean"] # First fluorescence data point
# maxval <- c+(d-c)/(1+exp(b*-e)) # Only works when data is very logistic-ish
if (minval == 0) {
minval <- max(c(min(fitobject$data$mean),c))
}
halfmax <- mean(c(maxval,minval))
thalf <- exp(log( ( d - c )/( halfmax - c ) - 1 )/b + log(e)) # log model
thalf <- log( ( d - c )/( halfmax - c ) - 1 )/b + e # non-log model
# thalf.5 <- e + (1/b) * (log ( 1 - (d - c) / (y - c) ))^1/f
return(thalf)
}
### iaaregress:
### Does a logistic regression on a properly-formatted table: FL1.A first, Time second.
### Note: I haven't updated the function descriptions for the 'log' model
### in log model, x and e are replaced by log(x) and log(e)
iaaregress <- function(table,param=4) {
if(param==4) {
# 4-parameter model
# f(x) = c + \frac{d-c}{(1+\exp(b(x - e)))}
regress0 <- drm(data=table, mean~time, fct = LL.4()) #log model
# regress0 <- drm(data=table, mean~time, fct = L.4()) # non log model
# regress0 <- drm(data=table, mean~time, fct = L.4(fixed=c(NA,min(table[,"mean"]),max(table[,"mean"]),NA)))
} else {
if(param==3) {
# 3-parameter model
# f(x) = c + \frac{d-c}{(1+\exp(b*x)}
regress0 <- drm(data=table, mean~time, fct = L.3())
}
if(param==5) {
# 5-parameter model
# f(x) = c + \frac{d-c}{(1+\exp(b(x - e)))^f}
# Also sometimes called the Boltzmann model
regress0 <- drm(data=table, mean~time, fct = L.5())
# regress0 <- drm(table, mean~time, fct = L.5(fixed=c(NA,min(table[,"mean"]),max(table[,"mean"]),NA,NA)))
}
}
return(regress0)
}
### Cytometer data time series normalization
### Assumes:
### 1) a data column with 'strain type' called "strain"
### 2) a data column with 'treatment type' called "treatment"
### 3) a data column with the values to normalize called "mean"
### 4) a data column for relative time passed called, "time"
### 5) a "strain" type called "W303", wherefrom the mean will be subtracted
### 6) That you want to normalize to the very first time point's "mean" value
cyt.normalize <- function(timeseries) {
# Figure out the metadata
strains <- levels(factor(timeseries[,"strain"]))
print(paste("Strains:",paste(strains,collapse=", "),sep=" "))
treatments <- levels(factor(timeseries[,"treatment"]))
if (typeof(timeseries[,"treatment"])=="double") {
treatments <- as.double(treatments)
}
print(paste("Treatments:",paste(treatments,collapse=", "),sep=" "))
# Subtract off mean of all W303 'mean' values
w303 <- mean(subset(timeseries,strain=="W303")$mean)
timeseries$mean <- timeseries$mean-w303
# Remove W303 values because the differences will be hugely exaggerated
# if normalized to the first point
timeseries <- subset(timeseries,strain!="W303")
# Divide by first time value for each strain+treatment subset
rownames(timeseries) <- 1:length(timeseries[,1])
for (i in strains) {
for (j in treatments) {
current_subset <- subset(timeseries,strain==i&treatment==j)
current_rows <- rownames(current_subset)
first_row <- timeseries[current_rows[1],]
suppressWarnings(timeseries[current_rows,"mean"] <- timeseries[current_rows,"mean"]/first_row$mean)
}
}
return(timeseries)
}
# flowSplit::
# Take a flowSet, split into N evenly-sized pieces M (not random, but from start to finish)
# In all M, calculate mean fluorescence for all N pieces
# Return data frame to summarize this
# n= is currently useless and all this script does is split into 4
splitSet <- function(flowset, n=4) {
# number of wells
x <- length(group2)
x.ind <- seq(along=group2)
fltable <- data.frame(matrix(nrow=x,ncol=n))
colnames(fltable) <- paste("mean",1:n,sep="")
for (i in x.ind) {
# choose a flowframe
flowset[[i]]
# get the raw fluorescence data
fl_raw <- exprs(flowset[[i]][,3])
fl_length <- length(fl_raw)
# piece size
piece_size <- floor(fl_length/4)
fltable[i,] <- c(
mean(fl_raw[1:piece_size])
,mean(fl_raw[(piece_size+1):(2*piece_size)])
,mean(fl_raw[(2*piece_size+1):(3*piece_size)])
,mean(fl_raw[(3*piece_size+1):fl_length])
)
}
return(fltable)
}
splitFrame <- function(flowframe,n=4) {
# get the raw fluorescence data
fl_raw <- exprs(flowframe[,3])
fl_length <- length(fl_raw)
# piece size
piece_size <- floor(fl_length/n)
fl_vec <- c(
mean1=mean(fl_raw[1:piece_size])
,mean2=mean(fl_raw[(piece_size+1):(2*piece_size)])
,mean3=mean(fl_raw[(2*piece_size+1):(3*piece_size)])
,mean4=mean(fl_raw[(3*piece_size+1):fl_length])
)
# names(fl_vec) <- paste("mean",1:n,sep="")
fltable <- data.frame(t(fl_vec))
return (fltable)
}
# Takes in a flowSet and checks for empty flowFrames (flowCore's Subset fails if there's no events in a flowFrame)
# Also returns a flowSet, with such frames removed if necessary. Posts a note about those which were removed.
qa.flowSet <- function(flowset_in) {
### TODO:
### Make sure all columns have . substituted for -
# There is very likely a better way to do this than a for loop
# Get the flowFrame positions that have zero events in them, for excluding
pos <- c()
for (i in 1:length(flowset_in)) {
if (length(exprs(flowset_in[[i]][,1]))==0) {
pos <- c(pos,i)
}
}
if (length(pos) > 0) {
# remove empty frames.
flowset_out <- flowset_in[(1:length(flowset_in))[-pos]]
print(paste("The following frames had no events and were removed: ",paste(pos,",",sep=""),".",sep=""))
print(paste("The flowSet is now ",length(flowset_out)," frames long."))
return(flowset_out)
} else {
# unchanged flowSet
return(flowset_in)
}
}
#outputs data frame formatted with several parameters used for modeling.
#assumes diploids and only returns singlet data
#requires flowset and strain vector
# on 2012-1-3, replaced 'split' FL1/FSC values by background-subtracted, changing the order of columns. All 'modelformat' data needs to be reprocessed.
modelingFormat <- function(flowset,strain_vec,baseline="noYFP",normalize=F,ploidy="diploid") {
# Make sure strain vector includes correct baseline value
if ( sum(as.numeric(strain_vec==baseline)) == 0 ) {
stop("No baseline strain found in strain_vec (default is noYFP)")
}
# Generate data frames from which to take data
# Raw FL1.A table
raw <- summary.cyt(flowset,transform=F,only="singlets",split=T,ploidy=ploidy)
# Raw FSC.A table
# fsc <- summary.cyt(flowset,channel="FSC.A",only="singlets")
# Normalized data table
# fl1_fsc <- summary.cyt(flowset,transform="fscanorm",only="singlets",split=T)
out <- raw
out$grp <- NA
out$strain <- strain_vec
out$afb <- NA
out$rep <- NA
FL_idx <- which(colnames(out)=="FL1.Amean")
FL_idx <- c(FL_idx,which(colnames(out)=="FL1.Amedian"))
FL_idx <- c(FL_idx,which(colnames(out)=="FL1.Asd"))
colnames(out)[FL_idx] <- c("FL1.A","median","sd")
# out <- cbind(out,FSC.A=fsc$FSC.Amean)
# out <- cbind(out,FL1_FSC=fl1_fsc$FL1_FSCmean)
# out$FL1_FSC_norm <- out$FL1_FSC-mean(subset(out,strain==baseline)$FL1_FSC)
# Changed 2012-1-3
colnames(out)[which(colnames(out)=="split1")] <- "FL1.A_bs_1"
colnames(out)[which(colnames(out)=="split2")] <- "FL1.A_bs_2"
colnames(out)[which(colnames(out)=="split3")] <- "FL1.A_bs_3"
colnames(out)[which(colnames(out)=="split4")] <- "FL1.A_bs_4"
# out <- cbind(out,FL1.A_1=fl1_fsc[,"split1"])
# out <- cbind(out,FL1.A_2=fl1_fsc[,"split2"])
# out <- cbind(out,FL1.A_3=fl1_fsc[,"split3"])
# out <- cbind(out,FL1.A_4=fl1_fsc[,"split4"])
# out <- cbind(out,FL1.A_1=fl1_fsc[,"split1"])
# out <- cbind(out,FL1.A_2=fl1_fsc[,"split2"])
# out <- cbind(out,FL1.A_3=fl1_fsc[,"split3"])
# out <- cbind(out,FL1.A_4=fl1_fsc[,"split4"])
# background-subtract all FL1.A values
out$FL1.3_bs <- out$FL13A-mean(subset(out,strain==baseline)$FL3.A)
out$FL3.A_bs_1 <- out$FL3.A-mean(subset(out,strain==baseline)$FL3.A_bs_1)
out$FL3.A_bs_2 <- out$FL3.A-mean(subset(out,strain==baseline)$FL3.A_bs_2)
out$FL3.A_bs_3 <- out$FL3.A-mean(subset(out,strain==baseline)$FL3.A_bs_3)
out$FL3.A_bs_4 <- out$FL3.A-mean(subset(out,strain==baseline)$FL3.A_bs_4)
if(normalize==T) {
ddply(out,c("strain","treatment"),transform,norm1=FL3_FSC_norm/min(FL3_FSC_norm[1:3]))
}
return(out)
}
# Produces a normalized fluorescence column 'normed'
# Expects the 'FL1.A_bs' column to exist (not hard to extend to others/make it user selectable)
# Has two different methods, version 1 and version 2, described in the script
addnorm <- function(frame,factor_in=c("strain","treatment"),method=1,column="FL3.Amean_bs") {
library(plyr)
if ( (sum(colnames(frame)==column)) == 0 ) {
if( (sum(colnames(frame)=="FL3.A_bs")) == 0 ) {
stop("Could not find the background-subtracted values column. \
This script requires that there be a column named \
FL1.Amean_bs, FL1.A_bs, or the user-defined column using\
column='desired-column'")
} else {
column <- "FL3.A_bs"
}
}
if (method==1) {
# Default normalization method. Takes highest point in dataset grouped by 'factor_in' and sets it to 1,
# divides all other values by that number. This method is default because it works regardless of
# whether the data is a time series.
estimate_0 <- function(x) {
x[,"normed"]=x[,column]/max(x[,column])
return(x)
}
} else if (method == 2) {
# Version 2 - takes the mean value of all time points which are less than 0, after grouped by 'factor_in'.
# Sets this to the value by which all other data points in that group are divided
# Therefore, no value is actually '1' except by very rare chance
# Requires a time series with negative time values to work
estimate_0 <- function(x) {
normresult <- x[,column]/mean(x[x$time<0,column])
x <- cbind(x,normed=normresult)
return(x)
}
} else if (method == 3) {
# Version 3 makes a fit line to all pre-zero time points and infers the y-intercept
# Requires a time series with negative time values to work
estimate_0 <- function(x) {
prezero_points <- x[x$time<0,]
prezero_fit <- lm(prezero_points[,column]~prezero_points[,"time"])
prezero_intercept <- prezero_fit$coefficients[1] # intercept
normresult <- x[,column]/prezero_intercept
x <- cbind(x,normed=normresult)
return(x)
}
} else {
stop("You must define version=1, version=2, or version=3)")
}
# Check for negative time values
if (sum(frame$time<0)==0) {
if (method==2|method==3) {
stop("To use methods 2 or 3, the input data frame must have negative time values for each normalized data subset")
}
}
# Run the chosen estimation function and apply it
frame <- ddply(frame,factor_in,estimate_0)
return(frame)
}
addbs <- function(frame,column="FL3.Amean",baseline="noYFP") {
frame[,paste(column,"_bs",sep="")] <- frame[,column]-mean(subset(frame,strain==baseline)[,column])
return(frame)
}
# Generate a data frame that's useful for plotting overlapping density plots with ggplot.
# At the moment it's very finnicky. It expects a huge data frame as input that's made with the exprs command on a bunch
# bunch of flowFrames, with an extra column called 'exptime' signifying the time of each well's acquisition.
# It shouldn't be hard to make it accept a flowSet and do this automatically.
density_frame <- function(frame,param="FL1.A") {
# generate the general plot
frame_dens <- tapply(frame[,param],frame$exptime,function(x)data.frame(x=density(x)$x,y=density(x)$y))
# coerce the list into a data frame
frame_temp <- frame_dens[[1]]
for (i in 2:length(frame_dens)) {
frame_temp <- rbind(frame_temp,frame_dens[[i]])
}
frame_dens <- frame_temp
# apply the exptime label to it. Very finnicky here as well, NOT at all happy with arbitrary input
frame_dens$exptime <- rep(levels(factor(frame$exptime)),each=512)
# normalize it within the groups
frame_dens <- ddply(frame_dens,"exptime",transform,y_norm=y/max(y))
return(frame_dens)
}
# Script to reprocess cytometer data following a given pattern. Uses modelFormat for reprocessing.
# Expects as input a directory with one folder 'csvs' full of prior csvs and the matching source data in 'source'
# Outputs to same directory in 'newcsvs' folder, overwriting anything that exists there.
reprocess <- function(directory_in,nick_type=T) {
# Delete 'newcsvs' if it already exists
unlink(paste(directory_in,"/newcsvs",sep=""),recursive=T)
# Get csv list and create fresh 'newcsv' dir
csvlist <- list.files(paste(directory_in,"/csvs",sep=""))
dir.create(paste(directory_in,"/newcsvs",sep=""))
# massive loop to reprocess each piece of data
for (i in csvlist) {
csv <- read.csv(paste(directory_in,"/csvs/",i,sep=""))
csv_files <- paste(csv$file,".fcs",sep="")
expname <- gsub("_","",substr(i,1,3))
# find dir for this experiment
dirs <- list.files(paste(directory_in,"source/",sep=""))
dir <- grep(expname,dirs)
message("Reprocessing ",expname,"...")
# Read in flowSet, trim to all that match csv
fs <- read.flowSet(path=paste(directory_in,"source/",dirs[dir],sep=""),alter.names=T)
fs_exp <- sampleNames(fs)
fs_matches <- which(fs_exp %in% csv_files)
fs_trimmed <- fs[fs_matches]
####################################
# Currently uses 'modelingFormat' to reprocess data. Can substitute anything in here
####################################
newcsv <- modelingFormat(fs_trimmed,csv$strain)
# Re-add the metadata: treatment, grp, afb, rep
for (cols in c("treatment","grp","afb","rep")) {
newcsv[,cols] <- csv[,cols]
}
write.csv(newcsv,file=paste(directory_in,"newcsvs/",i,sep=""))
}
message("Finished. Files are in:")
message(directory_in,"newcsvs/")
}
flowFrame.gettime <- function(flowframe) {
time_raw <- as.numeric(unlist(strsplit(keyword(flowframe)$`$BTIM`,split=":")))
time <- time_raw[1]*60+time_raw[2]+time_raw[3]/60+time_raw[4]/6000
return(time)
}
YeastCytSummary <- function(inpath,ploidy=F,only="singlets",channel="FL1.A") {
fs <- read.flowSet(path=inpath,alter.names=T)
if (ploidy=="diploid"|ploidy=="haploid") {
fs_sum <- summary.cyt(fs,only=only,ploidy=ploidy,channel=channel)
} else {
stop('Must define ploidy= as "haploid" or "diploid"')
}
filename=paste( "summary-",format(Sys.time(), "%Y-%m-%d--%H-%M-%S"),".csv",sep="")
# print(paste("~/Desktop",filename,sep=""))
write.csv(fs_sum,paste("~/Desktop/",filename,sep=""))
message("File was written to Desktop/",filename)
}
|
/NemCytometer_2015.R
|
permissive
|
NemLab/Rfunctions
|
R
| false
| false
| 44,619
|
r
|
### Usage notes for these scripts:
# Almost every function or object depends on the flowCore (and often flowViz) package(s) from biocondcutor.
# To install these, run these commands in R:
# source("http://bioconductor.org/biocLite.R")
# biocLite("flowCore")
# biocLite("flowViz")
#
# The most useful script for an R newbie is 'summary.cyt'.
# It will take a flowSet (see flowCore documentation) and run some QA, gate, and get summary FL1.A (or other channel) data
# as well as experiment information like the time of day and events/µL concentration.
library(flowViz)
#########################
### Cytometer Gates ###
#########################
##################################
### Notes on cytometer changes ###
##################################
### Got new cytometer around 2011-02-22-ish (updated gates)
### Switched FL1-A and FL2-A sometime around 02-23. Was switched back relatively soon afterwards.
### FSC-A was tweaked around 2011-03-8 (early-ish March), changed gates.
### yeastGate
### Defines an SSC.A vs FSC.A gate. Includes only the yeast population
### from a flowSet
# As of 2011-10-26, it may be necessary to create a new yeastGate for diploids that truly cuts out dead cells.
# Used from 2011-02-22 to present, excludes debris/non-yeast
yeastGate <<- polygonGate(filterId="Yeast",
.gate=matrix(c(400000,10000000,10000000,400000,
10000,10000,2300000,60000),
ncol=2,nrow=4,dimnames=list(c("1","1","1","1"),c("FSC.A","SSC.A"))))
oldNemyeastGate <<- polygonGate(filterId="Yeast",
.gate=matrix(c(20000,7000000,1500000,100000,
10000,300000,8000000,10000),
ncol=2,nrow=4,dimnames=list(c("1","1","1","1"),c("FSC.A","SSC.A"))))
NemyeastGate <<- polygonGate(filterId="Yeast",
.gate=matrix(c(50000,7000000,1500000,50000,
10000,300000,4000000,10000),
ncol=2,nrow=4,dimnames=list(c("1","1","1","1"),c("FSC.A","SSC.A"))))
# Used until cytometer was switched around 2011-02-22
oldyeastGate <<- polygonGate(filterId="Yeast",
.gate=matrix(c(160000,1500000,1500000,160000, 0,0,200000,200000),
ncol=2,nrow=4,dimnames=list(c("1","1","1","1"),c("FSC.A","SSC.A"))))
# Diploid Singlet gate, used 2011-07-09 to present
# Diploids are slightly larger and have better separation between singlets/doublets
dipsingletGate <<- polygonGate(filterId="DipSingletGate",
.gate=matrix(c(
#x values
7.5e5,13e5,18e5,15e5,6e5,
#y values
9e5,16e5,26e5,30e5,15e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))
)
)
oldNemdipsingletGate <<- polygonGate(filterId="DipSingletGate",
.gate=matrix(c(
#x values
1e4,12e4,19.5e5,15e5,2e4,
#y values
9e4,10e4,26e5,30e5,15e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))
)
)
oldNemdipdoubletGate <<- polygonGate(filterId="DipDoubletGate",
.gate=matrix(c(
#x values
2e4,20e5,30e5,35e5,25e5,8e5,
#y values
8e4,12.5e5,25e5,26e5,30e5,8.5e5),
ncol=2,nrow=6,dimnames=list(rep(NA,6),c("FSC.A","FSC.H"))
)
)
#as of 25feb2015
NemdipsingletGate <<- polygonGate(filterId="DipSingletGate",
.gate=matrix(c(
#x values
1e4,8e4,20e5,15e5,18e4,
#y values
5e4,9.7e4,21e5,20e5,6e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))
)
)
NemdipdoubletGate <<- polygonGate(filterId="DipDoubletGate",
.gate=matrix(c(
#x values
4.5e4,20e5,20e5,30e5,25e5,8e5,
#y values
5e4,8e5,25e5,26e5,35e5,8.5e5),
ncol=2,nrow=6,dimnames=list(rep(NA,6),c("FSC.A","FSC.H"))
)
)
# Diploid Doublet gate, used 2011-08-09 to present
# Diploids are slightly larger and have better separation between singlets/doublets
hapdoubletGate <<- polygonGate(filterId="HaploidDoubletGate",
.gate=matrix(c(
#x values
6.5e5,1.15e6,1.5e6,1.4e6,1.2e6,5e5,
#y values
5.75e5,9e5,1.3e6,1.4e6,1.5e6,6.5e5),
ncol=2,nrow=6,dimnames=list(rep(NA,6),c("FSC.A","FSC.H"))
)
)
oldNemhapdoubletGate <<- polygonGate(filterId="HaploidDoubletGate",
.gate=matrix(c(
#x values
2e4,20e5,30e5,18e5,25e5,6.75e5,
#y values
8e4,12.5e5,25e5,26e5,30e5,8.5e5),
ncol=2,nrow=6,dimnames=list(rep(NA,6),c("FSC.A","FSC.H"))
)
)
NemhapdoubletGate <<- polygonGate(filterId="HaploidDoubletGate",
.gate=matrix(c(
#x values
1e4,2e5,30e5,25e5,25e5,6.75e5,
#y values
2e4,5e4,20e5,30e5,5e5,8.5e5),
ncol=2,nrow=6,dimnames=list(rep(NA,6),c("FSC.A","FSC.H"))
)
)
# Used 2012-02-22 to present
hapsingletGate <<- polygonGate(filterId="HaploidSingletGate",
.gate=matrix(c(
#x values
5e5,0.8e6,1.15e6,1e6,5e5,
#y values
8e5,1.05e6,1.5e6,1.8e6,1e6),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))
)
)
oldNemhapsingletGate <<- polygonGate(filterId="HaploidSingletGate",
.gate=matrix(c(
#x values
1e4,6e4,19e5,15e5,2e4,
#y values
9e4,10e4,26e5,30e5,15e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))
)
)
NemhapsingletGatev2 <<- polygonGate(filterId="HaploidSingletGate",
.gate=matrix(c(
#x values
1e4,5e4,8e5,10e5,2e4,
#y values
4e4,7e4,10e5,15e5,15e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))
)
)
#NF=> new flow cell installed
NF_NemhapsingletGate <<- polygonGate(filterId="HaploidSingletGate",
.gate=matrix(c(
#x values
1e4,5e4,8e5,10e5,2e4,
#y values
2e4,5e4,7e5,10e5,10e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))
)
)
#Used with the new flow cell installed in the SORP cytometer in Dec 2014
NemhapsingletGate <<- polygonGate(filterId="HaploidSingletGate",
.gate=matrix(c(
#x values
1e4,6.5e4,15e5,12e5,4e4,
#y values
4e4,8e4,16e5,18e5,4e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))))
####################
# Havens et al 2012#
####################
# Used in auxin paper (Havens 2012) to gate all yeast from non-yeast. Also excludes a portion
# of small-FSC.A, high-SSC.A cells (presumably dead).
auxinpaper_yeastGate <<- polygonGate(filterId="Yeast",
.gate=matrix(c(400000,10000000,10000000,400000, 10000,10000,2300000,60000),
ncol=2,nrow=4,dimnames=list(c("1","1","1","1"),c("FSC.A","SSC.A"))))
# Used in auxin paper to gate for singlets after gating with auxinpaper_yeastGate
auxinpaper_singletGate <<- polygonGate(filterId="DipSingletGate",
.gate=matrix(c(
#x values
7.5e5,13e5,18e5,15e5,6e5,
#y values
9e5,16e5,26e5,30e5,15e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))
)
)
# Used in auxin paper to gate for doublets after gating with auxinpaper_yeastGate
auxinpaper_doubletGate <<- polygonGate(filterId="DipDoubletGate",
.gate=matrix(c(
#x values
10e5,17e5,23e5,22e5,20e5,8e5,
#y values
8e5,12.5e5,17e5,20e5,22e5,8.5e5),
ncol=2,nrow=6,dimnames=list(rep(NA,6),c("FSC.A","FSC.H"))
)
)
# haploid singlet gate, used from 2011-02-22 to 2012-02-22
singletGate2 <<- polygonGate(filterId="Singlets",
.gate=matrix(c(400000,3000000,3000000,160000, 620000,3500000,6000000,500000),
ncol=2,nrow=4,dimnames=list(c("1","1","1","1"),c("FSC.A","FSC.H"))))
# Used until cytometer was switched around 2011-02-22
oldsingletGate <<- polygonGate(filterId="Singlets",
.gate=matrix(c(160000,1500000,1400000,160000, 680000,5700000,6000000,750000),
ncol=2,nrow=4,dimnames=list(c("1","1","1","1"),c("FSC.A","FSC.H"))))
# Used up to 2011-02-10
extraoldsingletGate <<- polygonGate(filterId="Singlets",
.gate=matrix(c(160000,800000,800000,160000, 680000,3150000,3500000,750000),
ncol=2,nrow=4,dimnames=list(c("1","1","1","1"),c("FSC.A","FSC.H"))))
## experimental gates
# mChGate - designed around data from cytometer experiment Nick did on 2012-9-19 (NB036)
# subsets cells with high FL3.A (mCherry) vs FL1.A (EYFP) output.
# Note that it was designed around cells that have both EYFP and mCherry and
# may not work for mCherry-only cells
mChGate <<- polygonGate(filterId='mCherryGate',
matrix(c(5e2,2e3,4e4,7e4,8e3,5e2, 2e3,2e3,1.8e4,9e4,8e4,4e3),
ncol=2,
nrow=6,
dimname=list(c(1,1,1,1,1,1),c("FL1.A","FL3.A")))
)
# Same motivatio as the mChGate - separate EYFP+mCh cells from EYFP cells. In this case,
# subsets for EYFP-only cells. Again, may only work for this particular experiment type
EYFPGate <<- polygonGate(filterId='EYFPGate',
matrix(c(0,4e3,3e4,3e4,3e3,0, 0,0,4e3,9e3,2e3,2e3),
ncol=2,
nrow=6,
dimname=list(c(1,1,1,1,1,1),c("FL1.A","FL3.A"))
)
)
# Diploid Singlet gate, first created 2011-07-09
# Diploids are slightly larger and have better separation between singlets/doublets
dipsingletGate3 <<- polygonGate(filterId="DipSingletGate2",
.gate=matrix(c(
#x values
5e5,13e5,18e5,15e5,6e5,
#y values
7e5,16e5,26e5,30e5,15e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))
)
)
# for diploids, highlights dead cells
deadGate <<- polygonGate(filterId="deadGate",
.gate=matrix(c(7.5e5,13e5,3e6,15e5,6e5,
1e5,2e5,7.5e5,4e5,2e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","SSC.A")))
)
# for diploids, takes small-sized cells, maybe could use for excluding dead cells when combined with clustering
deadexcludeGate <<- polygonGate(filterId="deadGate",
.gate=matrix(c(7.5e5,4e6,3e6,1.5e6,6e5,
0,0,7.5e5,8e5,2e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","SSC.A")))
)
# for diploids, is a little more generous than 'dipsingletGate' Combine with flowClust to clean it up
dipsingletGate2 <<- polygonGate(filterId="DipSingletGate",
.gate=matrix(c(
7.5e5,14e5,30e5,15e5,6e5,
9e5,16e5,35e5,40e5,15e5),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))))
# excludes the big-cell subpopulation, it might be sick/weird/less comparable
# use !excludeBig to exclude them.
excludeBig <<- polygonGate(filterId="excludeBig",
.gate=matrix(c(
3e6,2e7,2e7,2e7,3e6,
1e6,16e5,5e6,1e7,1e7),
ncol=2,nrow=5,dimnames=list(rep(NA,5),c("FSC.A","FSC.H"))))
# Rob's gate for E. coli
ecoliGate <<- polygonGate(filterId="E.coli",
.gate=matrix(c(2e4,8e4,8e4,2e4, # x points
1,1,6e3,6e3), # y points
ncol=2,nrow=4,dimnames=list(c("1","1","1","1"),c("FSC.H","SSC.H"))))
###########################
### Cytometer Scripts ###
###########################
### polygate:
### Make a gate easier?
polygate <- function(x,y,filterID="newGate",channels=c("FSC.A","FSC.H")) {
if( length(x) != length(y) | !is.numeric(x) | !is.numeric(y)) {
stop("x coordinate vector must be same length as y coordinate vector")
}
gate <- polygonGate(filterId=filterID,
.gate=matrix(c(x,y),
ncol=2,nrow=length(x),dimnames=list(rep(NA,5),channels)))
return(gate)
}
### ploidy:
### Tries to guess the ploidy of a given flowframe
### Uses FSC.A/FSC.H ratio.
### Diploids are typically 5um x 6um ellipsoids while
### haploids are typically 4um x 4um spheroids
### As a result, diploids are longer and you get a larger 'area/volume' FSC.A
### 'Width' might also be useful.
ploidy <- function(flowframe) {
# Find FSC.A/FSC.H. This is close to 1 for diploids and close to .8 for haploids
# Test this assumption!!!!!
fsca <- summary(flowframe)[4,1]
fsch <- summary(flowframe)[4,7]
quotient <- fsca/fsch
if(quotient>0.92) {
return(c("Diploid",quotient))
} else{
return(c("Haploid",quotient))
}
}
### qa.gating:
### Very simple script to check whether a flowSet or flowFrame
### contains empty values, in which case normalization may fail (divide by zero)
qa.gating <- function(x,threshold=100) {
# Defaults to event count threshold of 100
print("Running QA...")
x.class <- class(x)[1]
if(x.class=="flowFrame") {
counts <- length(exprs(x[,1]))
} else if(x.class=="flowSet") {
counts <- fsApply(x,length,use.exprs=T)
} else {
print("Input must be a flowSet or flowFrame")
}
# Find all counts less than 100 (now it's a boolean vector)
counts.boolean <- counts<threshold
counts.failed.position <- grep(TRUE,counts.boolean) #positions of those that failed
# Did we get any failed counts?
# If so, return the position
counts.failed <- length(counts.failed.position)!=0
if(counts.failed) {
print("QA resulted in 1 or more warnings.")
return(counts.failed.position)
} else{
print("QA succeeded")
return(FALSE)
}
}
### fl1transform:
### Normalizes FL1.A values in a flowset/flowframe to FSC.A values
### Should control for (at least some) non-linearity in the values
### Also makes FL1.A proportional to fluorescence/cell volume
### Used to multiply by the mean FSC.A value, but this is probably
### statistically questionable. Now multiplies by a constant (10000)
### simply to keep the values in the integer range.
###
### If you specify transform="log", it will simply do a log transform
### to FL1.A instead.
fl1transform <- function(x,transform=F) {
# Default scaling is 10^4 and is solely for making results human-readable
# Handle both flowFrames and flowSets
x.class <- class(x)[1]
if (x.class=="flowFrame") {
#stop("This is a flowFrame, not a flowSet")
return(transform(x,FL1.A=FL1.A/FSC.A*10^4))
}
# Protect the input from the modifications
x <- x[seq(along=x)]
# Do QA. Reject all frames with no cells
qa.result <- qa.gating(x,threshold=1)
# Remove all 0-valued fluorescence results.
# These are very likely to be artifacts and screw up some transforms
print("Removing 0-valued fluorescence outliers")
x <- Subset(x,rectangleGate(FL1.A=c(0.001,Inf)))
# Transformation setup
trans <- function(x) {
if (transform == "fscanorm") {
x <- transform(x,FL1.A=FL1.A/FSC.A*10^4)
} else if (transform == "log") {
x <- transform(x,FL1.A=log(FL1.A))
} else if (transform == F) {
x <- x # do nothing. Is this necessary?
} else {
stop("No legitimate transform set. Use transform=\"log\" or transform=\"fscanorm\".")
}
}
if (!qa.result) {
x <- trans(x)
# x <- transform(x,FL1.A=FL1.A/FSC.A*10^4)
} else {
# For loop is inefficient, switch this out for fsApply while maintaining all cells
for (i in qa.result) {
x[[i]] <- trans(x)
# x[[i]] <- transform(x[[i]],FL1.A=FL1.A/FSC.A*10^4)
}
# x <- fsApply(x,transform,FL1.A=FL1.A/FSC.A*10^4)
cat(paste(
"### Too few cells at this gating level for frame(s) \n### ",
paste(qa.result,collapse=", "),
".\n### These frames were not normalized.\n\n",sep=""))
}
return(x)
}
### flsummary:
### Get summary statistics for fluorescence, other data
flsummary <- function(flowset,channel="FL3.A",moments=F,split=F,transform=F) {
# Number of cells (experiments) in the flowSet
n_experiment <- length(flowset)
# Initialize empty matrices/data frames to increase efficiency
warnings <- c()
if (moments == T) {
library(moments)
}
# Get time of each frame in minutes of the day
btime_raw <- fsApply(flowset,function(x)as.numeric(unlist(strsplit(keyword(x)$`$BTIM`,split=":"))))
btime <- apply(btime_raw,1,function(x)x[1]*60+x[2]+x[3]/60+x[4]/6000)
time <- btime-min(btime)
# Acquisition time - how long it took to take the sample, in seconds
atime <- fsApply(flowset,function(x)as.numeric(keyword(x)$`#ACQUISITIONTIMEMILLI`)/1000)
events <- fsApply(flowset,function(x)length(x[,1]),use.exprs=T)
uL <- fsApply(flowset,function(x)as.integer(keyword(x)$`$VOL`)/1000)
conc <- events/uL
for (i in 1:n_experiment) {
if (events[i] < 100) {
warnings <- c(warnings,i)
}
}
fl_mean <- fsApply(flowset,function(x)mean(x[,channel]),use.exprs=T)
fl_median <- fsApply(flowset,function(x)median(x[,channel]),use.exprs=T)
fl_sd <- fsApply(flowset,function(x)sd(x[,channel]),use.exprs=T)
fl <- data.frame(fl_mean,fl_median,fl_sd)
colnames(fl) <- paste(channel,c("mean","median","sd"),sep="")
# Do we want mean fl values for data split into 4 evenly sized chunks?
if (split==T) {
split_table <- fsApply(flowset,splitFrame)
split_table <- data.frame(matrix(unlist(split_table),ncol=4,byrow=T))
colnames(split_table) <- paste("split",1:4,sep="")
fl <- cbind(fl,split_table)
}
# Do we want the first few moments?
if (moments == T) {
require(moments)
fl_var <- data.frame(fsApply(flowset,function(x)var(x[,channel]),use.exprs=T))
fl_skew <- data.frame(fsApply(flowset,function(x)skewness(x[,channel]),use.exprs=T))
fl_kurt <- data.frame(fsApply(flowset,function(x)kurtosis(x[,channel]),use.exprs=T))
fl_moments <- data.frame(fl_var,fl_skew,fl_kurt)
colnames(fl_moments) <- paste(channel,c("var","skew","kurt"),sep="")
fl <- cbind(fl,fl_moments)
}
file <- fsApply(flowset,function(x)strsplit(keyword(x)$GUID,".fcs")[[1]])
colnames(file) <- "file"
if (length(warnings) != 0) {
warnings <- paste(warnings,collapse=", ")
print(paste("Warning: frame(s)",warnings,"had less than 100 events in this gate."))
}
# Insert empty strain and colony columns
strain=matrix(nrow=n_experiment)
treatment=matrix(nrow=n_experiment)
# Put it all together
flsummary <- cbind(time,btime,atime,events,conc,fl,file,strain,treatment)
# Make rows filename keys
rownames(flsummary) <- file
# Rename the 'mean', 'median', and 'sd' columns to reflect transformations done or channel used.
# 'FL1.A' = no transformation, 'FL1_FSC' = "fsacanorm", 'log' = "log"
flsummary <- renameflcols(flsummary,channel=channel,transform=transform)
return(flsummary)
}
# renaming function for flsummary data, keeping it separate for ease of use
# probably slow due to for loop
renameflcols <- function(x,channel="FL1.A",transform=F) {
cols <- c("mean","median","sd")
if (transform!=F) {
if (transform=="fscanorm") {
tname <- "FL1_FSC"
} else if (transform=="log") {
tname <- "log"
} else {
stop("invalid transform")
}
} else {
return(x)
}
for (i in cols) {
colnames(x)[grep(i,paste(channel,colnames(x),sep=""))] <- paste(tname,i,sep="")
}
return(x)
}
### summary.cyt:
### Gates a sample to all yeast, then singlet, then doublets
### Does the work of singletsummary.cyt,doubletsummary.cyt,yeastsummary.cyt
### Also calculates singlet to doublet ratio
### Returns a list of data frames, e.g. output$singlets, output$doublets, etc.
summary.cyt <- function(
flowset,
transform=F,
channel="FL1.A",
ploidy=F,
moments=F,
split=F,
only=F) {
# Number of experiments
n_experiments <- length(flowset)
# If using channel="FSC.A", don't use fscanorm
if (channel=="FSC.A"&transform=="fscanorm") {
print("Channel FSC.A selected with no transform= setting set.")
print("Defaulting to no transform (set transform=\"log\" for log transform)")
transform=F
}
# Transform FL1.A
if (transform != F) {
print(paste("Transforming FL1.A using",
transform,
"transform..."
)
)
flowset <- fl1transform(flowset,transform=transform)
}
# Gate the samples
if (ploidy=="haploid") {
print("Gating with haploid gates...")
yeast <- Subset(flowset,yeastGate)
singlets <- Subset(yeast,hapsingletGate)
doublets <- Subset(yeast,hapdoubletGate)
} else if (ploidy=="diploid") {
print("Gating with diploid gates...")
yeast <- Subset(flowset,yeastGate)
singlets <- Subset(yeast,dipsingletGate)
doublets <- Subset(yeast,dipdoubletGate)
} else {
stop('Error: You must define ploidy="haploid" or ploidy="diploid"')
}
if (only==F) {
# Normalize and summarize each subset
print("Summarizing all yeast events...")
yeastsum <- flsummary(yeast,channel=channel,moments=moments,split=split,transform=transform)
print("Summarizing doublets events...")
doubletsum <- flsummary(doublets,channel=channel,moments=moments,split=split,transform=transform)
print("Summarizing singlets events...")
singletsum <- flsummary(singlets,channel=channel,moments=moments,split=split,transform=transform)
} else {
if (only=="singlets") {
print("Summarizing singlets events...")
singletsum <- flsummary(singlets,channel=channel,moments=moments,split=split,transform=transform)
return(singletsum)
} else if (only=="doublets") {
print("Summarizing doublets events...")
doubletsum <- flsummary(doublets,channel=channel,moments=moments,split=split,transform=transform)
return(doubletsum)
} else if (only=="yeast") {
print("Summarizing all yeast events...")
yeastsum <- flsummary(yeast,channel=channel,moments=moments,split=split,transform=transform)
return(yeastsum)
} else {
print("'only' must be 'singlets','doublets', or 'yeast'")
stop()
}
}
summary_list <- list(yeast=yeastsum,singlets=singletsum,doublets=doubletsum)
return(summary_list)
}
### summary.cyt:using gates for the NemLab cytometer (2013)
Nemsummary.cyt <- function(
flowset,
transform=F,
channel="FL1.A",
ploidy=F,
moments=F,
split=F,
only=F) {
# Number of experiments
n_experiments <- length(flowset)
# If using channel="FSC.A", don't use fscanorm
if (channel=="FSC.A"&transform=="fscanorm") {
print("Channel FSC.A selected with no transform= setting set.")
print("Defaulting to no transform (set transform=\"log\" for log transform)")
transform=F
}
# Transform FL1.A
if (transform != F) {
print(paste("Transforming FL1.A using",
transform,
"transform..."
)
)
flowset <- fl1transform(flowset,transform=transform)
}
# Gate the samples
if (ploidy=="haploid") {
print("Gating with haploid gates...")
yeast <- Subset(flowset,NemyeastGate)
singlets <- Subset(yeast,NemhapsingletGate)
doublets <- Subset(yeast,NemhapdoubletGate)
} else if (ploidy=="diploid") {
print("Gating with diploid gates...")
yeast <- Subset(flowset,NemyeastGate)
singlets <- Subset(yeast,NemdipsingletGate)
doublets <- Subset(yeast,NemdipdoubletGate)
} else {
stop('Error: You must define ploidy="haploid" or ploidy="diploid"')
}
if (only==F) {
# Normalize and summarize each subset
print("Summarizing all yeast events...")
yeastsum <- flsummary(yeast,channel=channel,moments=moments,split=split,transform=transform)
print("Summarizing doublets events...")
doubletsum <- flsummary(doublets,channel=channel,moments=moments,split=split,transform=transform)
print("Summarizing singlets events...")
singletsum <- flsummary(singlets,channel=channel,moments=moments,split=split,transform=transform)
} else {
if (only=="singlets") {
print("Summarizing singlets events...")
singletsum <- flsummary(singlets,channel=channel,moments=moments,split=split,transform=transform)
return(singletsum)
} else if (only=="doublets") {
print("Summarizing doublets events...")
doubletsum <- flsummary(doublets,channel=channel,moments=moments,split=split,transform=transform)
return(doubletsum)
} else if (only=="yeast") {
print("Summarizing all yeast events...")
yeastsum <- flsummary(yeast,channel=channel,moments=moments,split=split,transform=transform)
return(yeastsum)
} else {
print("'only' must be 'singlets','doublets', or 'yeast'")
stop()
}
}
summary_list <- list(yeast=yeastsum,singlets=singletsum,doublets=doubletsum)
return(summary_list)
}
### yeastIntSplit:
### Splits a flowSet or flowFrame by FSC.A
### Splits into n equally-spaced intervals
### n defaults to 3 intervals
### Returns a LIST
yeastIntSplit <- function(flowset,nsplit=3){
maxval <- 800000
# Normalize FL1.A by FSC.A (temporary)
flowset <- transform(flowset,`FL1.A` = `FL1.A`/`FSC.A`)
returnedvalues <- matrix(nrow=nsplit,ncol=6)
for (i in 1:nsplit) {
localmin <- floor(1+(i-1)*maxval/nsplit)
localmax <- ceiling(i*maxval/nsplit)
tempgate <- rectangleGate("FSC.A"=c(localmin,localmax))
tempSet <- Subset(flowset,tempgate)
currentFL1.As <- exprs(tempSet[,"FL1.A"])
returnedvalues[i,1] <- i
if (mean(currentFL1.As) != "NaN") {
returnedvalues[i,2] <- mean(currentFL1.As)
}
returnedvalues[i,3] <- sd(currentFL1.As)
returnedvalues[i,4] <- localmin
returnedvalues[i,5] <- localmax
returnedvalues[i,6] <- length(currentFL1.As)
}
finaltable <- data.frame(section=returnedvalues[,1],mean=returnedvalues[,2],sd=returnedvalues[,3],
min=returnedvalues[,4],max=returnedvalues[,5],n=returnedvalues[,6])
return (finaltable)
}
### yeastSampSplit
### Splits a flowFrame by its FSC.A values
### Organizes into n equally-sized populations
### n defaults to 3
### Returns a LIST of tables (kinda weird)
yeastThreePopSplit <- function(flowframe){
flowFrametable <- flowFrame2table(flowframe)
flowFrametable <- flowFrametable[order(flowFrametable$FSC.A),]
firstpop <- flowFrametable[1:(floor(length(flowFrametable[,1])/3)),]
secondpop <- flowFrametable[(ceiling(length(flowFrametable[,1])/3)):(floor(length(flowFrametable[,1])*2/3)),]
thirdpop <- flowFrametable[(ceiling(length(flowFrametable[,1])*2/3)):(length(flowFrametable[,1])),]
poplistmeans <- c(mean(firstpop$FL1.A),mean(secondpop$FL1.A),mean(thirdpop$FL1.A))
poplistsds <- c(sd(firstpop$FL1.A),sd(secondpop$FL1.A),sd(thirdpop$FL1.A))
poptable <- data.frame(size=c("Low","Mid","High"),mean=poplistmeans,sd=poplistsds)
# poplist <- c(firstpop,secondpop,thirdpop)
return(poptable)
}
### flowFrame2Table:
### Generates a full table (data frame) of a flowFrame's data values
### with appropriate labels
flowFrame2Table <- function(flowframe) {
flowframetable <- data.frame(exprs(flowframe),ncol=10)
colnames(flowframetable) <- colnames(flowframe)
return(flowframetable)
}
# Thalf scripts
# Most of these are junk and only work with properly-formatted data frames
# which contain values of 'time', 'mean', 'strain', and 'treatment'.
# Also assumes that data frame is sorted by time on some level
# For some reason this works even when the treatment column is full of NA values
thalfall <- function(x,minval=F) {
#HACK
strain_treatment <- paste(x$strain,x$treatment,sep=",,")
all_levels <- levels(as.factor(strain_treatment))
x$strain_treatment <- strain_treatment
# NOW I KNOW THE DATA FRAME SIZE
tablelen <- length(all_levels)
thalftable <- data.frame(matrix(ncol=5,nrow=tablelen))
colnames(thalftable) <- c("thalf","min","max","strain","treatment")
for (i in seq(along=all_levels)) {
current_subset <- subset(x,strain_treatment==all_levels[i])
# Generate fit object
current_fit <- iaaregress(current_subset)
c <- current_fit$coefficients[[2]]
if (minval == F) {
current_thalf <- predict.thalf(current_fit)
minval_used <- max(c(min(current_fit$data$mean),c))
} else {
current_thalf <- predict.thalf(current_fit,minval=minval)
minval_used <- minval
}
maxval <- current_fit$data[1,"mean"] # First fluorescence data point
thalftable[i,1] <- current_thalf
thalftable[i,2] <- minval_used
thalftable[i,3] <- maxval
thalftable[i,4:5] <- strsplit(all_levels[i],split=",,")[[1]]
}
return(thalftable)
}
# Expects a data frame with columns of mean and time values
getthalf <- function(x,minval=0) {
regression <- iaaregress(x)
thalf <- predict.thalf(regression,minval=minval)
return(thalf)
}
### Should make qplot.logistic into a geom, (geom_logistic?)
### This will allow grouping more easily
### qplot.logistic:
### Makes plotting time series + fit + thalf a little easier
### Assumes that you have 'time' and 'mean' columns
qplot.logistic <- function(timeseriesdata,minval=F) {
# Generate fit object
fitobject <- iaaregress(timeseriesdata)
# Calculate thalf
if (minval==F) {
thalf <- predict.thalf(fitobject)
} else {
thalf <- predict.thalf(fitobject,minval=minval)
}
# Calculate text position
textpos <- range(timeseriesdata[,"mean"])[1] + 0.75*diff(range(timeseriesdata[,"mean"]))
# Plot
a <- qplot(data=timeseriesdata,time,mean) +
geom_line(data=predict.logistic(fitobject),color="blue") +
geom_vline(xintercept=thalf,color="red") +
geom_text(aes(x=thalf+0.1*max(timeseriesdata[,"time"]),y=textpos),label=paste("t½ =",signif(thalf,digits=3)))
return(a)
}
### predict.logistic:
### Generates a data table based on a logistic fit object
### Right now, only works w/ variables "mean" and "time",
### but could easily be changed to dynamically name
predict.logistic <- function(fitobject) {
# Generate table for time vs predicted mean
xrange <- range(fitobject$data[,1])
predicted <- data.frame(x=seq(0,max(fitobject$data[,1]),length.out=100),y=NA)
predicted[,2] <- predict(fitobject,newdata=predicted)
colnames(predicted) <- colnames(fitobject$data)[1:2]
return(predicted)
}
### thalf:
### Estimates t1/2 using a logistic fit model (drm)
### Currently only built for a 4-parameter model
### You can specify the 'min value' (e.g. from steady state data)
### by setting the minval option.
predict.thalf <- function(fitobject,minval=0) {
coefficients <- fitobject$coefficients
b <- coefficients[[1]]
c <- coefficients[[2]]
d <- coefficients[[3]]
e <- coefficients[[4]]
# f <- coefficients[[5]]
maxval <- fitobject$data[1,"mean"] # First fluorescence data point
# maxval <- c+(d-c)/(1+exp(b*-e)) # Only works when data is very logistic-ish
if (minval == 0) {
minval <- max(c(min(fitobject$data$mean),c))
}
halfmax <- mean(c(maxval,minval))
thalf <- exp(log( ( d - c )/( halfmax - c ) - 1 )/b + log(e)) # log model
thalf <- log( ( d - c )/( halfmax - c ) - 1 )/b + e # non-log model
# thalf.5 <- e + (1/b) * (log ( 1 - (d - c) / (y - c) ))^1/f
return(thalf)
}
### iaaregress:
### Does a logistic regression on a properly-formatted table: FL1.A first, Time second.
### Note: I haven't updated the function descriptions for the 'log' model
### in log model, x and e are replaced by log(x) and log(e)
iaaregress <- function(table,param=4) {
if(param==4) {
# 4-parameter model
# f(x) = c + \frac{d-c}{(1+\exp(b(x - e)))}
regress0 <- drm(data=table, mean~time, fct = LL.4()) #log model
# regress0 <- drm(data=table, mean~time, fct = L.4()) # non log model
# regress0 <- drm(data=table, mean~time, fct = L.4(fixed=c(NA,min(table[,"mean"]),max(table[,"mean"]),NA)))
} else {
if(param==3) {
# 3-parameter model
# f(x) = c + \frac{d-c}{(1+\exp(b*x)}
regress0 <- drm(data=table, mean~time, fct = L.3())
}
if(param==5) {
# 5-parameter model
# f(x) = c + \frac{d-c}{(1+\exp(b(x - e)))^f}
# Also sometimes called the Boltzmann model
regress0 <- drm(data=table, mean~time, fct = L.5())
# regress0 <- drm(table, mean~time, fct = L.5(fixed=c(NA,min(table[,"mean"]),max(table[,"mean"]),NA,NA)))
}
}
return(regress0)
}
### Cytometer data time series normalization
### Assumes:
### 1) a data column with 'strain type' called "strain"
### 2) a data column with 'treatment type' called "treatment"
### 3) a data column with the values to normalize called "mean"
### 4) a data column for relative time passed called, "time"
### 5) a "strain" type called "W303", wherefrom the mean will be subtracted
### 6) That you want to normalize to the very first time point's "mean" value
cyt.normalize <- function(timeseries) {
# Figure out the metadata
strains <- levels(factor(timeseries[,"strain"]))
print(paste("Strains:",paste(strains,collapse=", "),sep=" "))
treatments <- levels(factor(timeseries[,"treatment"]))
if (typeof(timeseries[,"treatment"])=="double") {
treatments <- as.double(treatments)
}
print(paste("Treatments:",paste(treatments,collapse=", "),sep=" "))
# Subtract off mean of all W303 'mean' values
w303 <- mean(subset(timeseries,strain=="W303")$mean)
timeseries$mean <- timeseries$mean-w303
# Remove W303 values because the differences will be hugely exaggerated
# if normalized to the first point
timeseries <- subset(timeseries,strain!="W303")
# Divide by first time value for each strain+treatment subset
rownames(timeseries) <- 1:length(timeseries[,1])
for (i in strains) {
for (j in treatments) {
current_subset <- subset(timeseries,strain==i&treatment==j)
current_rows <- rownames(current_subset)
first_row <- timeseries[current_rows[1],]
suppressWarnings(timeseries[current_rows,"mean"] <- timeseries[current_rows,"mean"]/first_row$mean)
}
}
return(timeseries)
}
# flowSplit::
# Take a flowSet, split into N evenly-sized pieces M (not random, but from start to finish)
# In all M, calculate mean fluorescence for all N pieces
# Return data frame to summarize this
# n= is currently useless and all this script does is split into 4
splitSet <- function(flowset, n=4) {
# number of wells
x <- length(group2)
x.ind <- seq(along=group2)
fltable <- data.frame(matrix(nrow=x,ncol=n))
colnames(fltable) <- paste("mean",1:n,sep="")
for (i in x.ind) {
# choose a flowframe
flowset[[i]]
# get the raw fluorescence data
fl_raw <- exprs(flowset[[i]][,3])
fl_length <- length(fl_raw)
# piece size
piece_size <- floor(fl_length/4)
fltable[i,] <- c(
mean(fl_raw[1:piece_size])
,mean(fl_raw[(piece_size+1):(2*piece_size)])
,mean(fl_raw[(2*piece_size+1):(3*piece_size)])
,mean(fl_raw[(3*piece_size+1):fl_length])
)
}
return(fltable)
}
splitFrame <- function(flowframe,n=4) {
# get the raw fluorescence data
fl_raw <- exprs(flowframe[,3])
fl_length <- length(fl_raw)
# piece size
piece_size <- floor(fl_length/n)
fl_vec <- c(
mean1=mean(fl_raw[1:piece_size])
,mean2=mean(fl_raw[(piece_size+1):(2*piece_size)])
,mean3=mean(fl_raw[(2*piece_size+1):(3*piece_size)])
,mean4=mean(fl_raw[(3*piece_size+1):fl_length])
)
# names(fl_vec) <- paste("mean",1:n,sep="")
fltable <- data.frame(t(fl_vec))
return (fltable)
}
# Takes in a flowSet and checks for empty flowFrames (flowCore's Subset fails if there's no events in a flowFrame)
# Also returns a flowSet, with such frames removed if necessary. Posts a note about those which were removed.
qa.flowSet <- function(flowset_in) {
### TODO:
### Make sure all columns have . substituted for -
# There is very likely a better way to do this than a for loop
# Get the flowFrame positions that have zero events in them, for excluding
pos <- c()
for (i in 1:length(flowset_in)) {
if (length(exprs(flowset_in[[i]][,1]))==0) {
pos <- c(pos,i)
}
}
if (length(pos) > 0) {
# remove empty frames.
flowset_out <- flowset_in[(1:length(flowset_in))[-pos]]
print(paste("The following frames had no events and were removed: ",paste(pos,",",sep=""),".",sep=""))
print(paste("The flowSet is now ",length(flowset_out)," frames long."))
return(flowset_out)
} else {
# unchanged flowSet
return(flowset_in)
}
}
#outputs data frame formatted with several parameters used for modeling.
#assumes diploids and only returns singlet data
#requires flowset and strain vector
# on 2012-1-3, replaced 'split' FL1/FSC values by background-subtracted, changing the order of columns. All 'modelformat' data needs to be reprocessed.
modelingFormat <- function(flowset,strain_vec,baseline="noYFP",normalize=F,ploidy="diploid") {
# Make sure strain vector includes correct baseline value
if ( sum(as.numeric(strain_vec==baseline)) == 0 ) {
stop("No baseline strain found in strain_vec (default is noYFP)")
}
# Generate data frames from which to take data
# Raw FL1.A table
raw <- summary.cyt(flowset,transform=F,only="singlets",split=T,ploidy=ploidy)
# Raw FSC.A table
# fsc <- summary.cyt(flowset,channel="FSC.A",only="singlets")
# Normalized data table
# fl1_fsc <- summary.cyt(flowset,transform="fscanorm",only="singlets",split=T)
out <- raw
out$grp <- NA
out$strain <- strain_vec
out$afb <- NA
out$rep <- NA
FL_idx <- which(colnames(out)=="FL1.Amean")
FL_idx <- c(FL_idx,which(colnames(out)=="FL1.Amedian"))
FL_idx <- c(FL_idx,which(colnames(out)=="FL1.Asd"))
colnames(out)[FL_idx] <- c("FL1.A","median","sd")
# out <- cbind(out,FSC.A=fsc$FSC.Amean)
# out <- cbind(out,FL1_FSC=fl1_fsc$FL1_FSCmean)
# out$FL1_FSC_norm <- out$FL1_FSC-mean(subset(out,strain==baseline)$FL1_FSC)
# Changed 2012-1-3
colnames(out)[which(colnames(out)=="split1")] <- "FL1.A_bs_1"
colnames(out)[which(colnames(out)=="split2")] <- "FL1.A_bs_2"
colnames(out)[which(colnames(out)=="split3")] <- "FL1.A_bs_3"
colnames(out)[which(colnames(out)=="split4")] <- "FL1.A_bs_4"
# out <- cbind(out,FL1.A_1=fl1_fsc[,"split1"])
# out <- cbind(out,FL1.A_2=fl1_fsc[,"split2"])
# out <- cbind(out,FL1.A_3=fl1_fsc[,"split3"])
# out <- cbind(out,FL1.A_4=fl1_fsc[,"split4"])
# out <- cbind(out,FL1.A_1=fl1_fsc[,"split1"])
# out <- cbind(out,FL1.A_2=fl1_fsc[,"split2"])
# out <- cbind(out,FL1.A_3=fl1_fsc[,"split3"])
# out <- cbind(out,FL1.A_4=fl1_fsc[,"split4"])
# background-subtract all FL1.A values
out$FL1.3_bs <- out$FL13A-mean(subset(out,strain==baseline)$FL3.A)
out$FL3.A_bs_1 <- out$FL3.A-mean(subset(out,strain==baseline)$FL3.A_bs_1)
out$FL3.A_bs_2 <- out$FL3.A-mean(subset(out,strain==baseline)$FL3.A_bs_2)
out$FL3.A_bs_3 <- out$FL3.A-mean(subset(out,strain==baseline)$FL3.A_bs_3)
out$FL3.A_bs_4 <- out$FL3.A-mean(subset(out,strain==baseline)$FL3.A_bs_4)
if(normalize==T) {
ddply(out,c("strain","treatment"),transform,norm1=FL3_FSC_norm/min(FL3_FSC_norm[1:3]))
}
return(out)
}
# Produces a normalized fluorescence column 'normed'
# Expects the 'FL1.A_bs' column to exist (not hard to extend to others/make it user selectable)
# Has two different methods, version 1 and version 2, described in the script
addnorm <- function(frame,factor_in=c("strain","treatment"),method=1,column="FL3.Amean_bs") {
library(plyr)
if ( (sum(colnames(frame)==column)) == 0 ) {
if( (sum(colnames(frame)=="FL3.A_bs")) == 0 ) {
stop("Could not find the background-subtracted values column. \
This script requires that there be a column named \
FL1.Amean_bs, FL1.A_bs, or the user-defined column using\
column='desired-column'")
} else {
column <- "FL3.A_bs"
}
}
if (method==1) {
# Default normalization method. Takes highest point in dataset grouped by 'factor_in' and sets it to 1,
# divides all other values by that number. This method is default because it works regardless of
# whether the data is a time series.
estimate_0 <- function(x) {
x[,"normed"]=x[,column]/max(x[,column])
return(x)
}
} else if (method == 2) {
# Version 2 - takes the mean value of all time points which are less than 0, after grouped by 'factor_in'.
# Sets this to the value by which all other data points in that group are divided
# Therefore, no value is actually '1' except by very rare chance
# Requires a time series with negative time values to work
estimate_0 <- function(x) {
normresult <- x[,column]/mean(x[x$time<0,column])
x <- cbind(x,normed=normresult)
return(x)
}
} else if (method == 3) {
# Version 3 makes a fit line to all pre-zero time points and infers the y-intercept
# Requires a time series with negative time values to work
estimate_0 <- function(x) {
prezero_points <- x[x$time<0,]
prezero_fit <- lm(prezero_points[,column]~prezero_points[,"time"])
prezero_intercept <- prezero_fit$coefficients[1] # intercept
normresult <- x[,column]/prezero_intercept
x <- cbind(x,normed=normresult)
return(x)
}
} else {
stop("You must define version=1, version=2, or version=3)")
}
# Check for negative time values
if (sum(frame$time<0)==0) {
if (method==2|method==3) {
stop("To use methods 2 or 3, the input data frame must have negative time values for each normalized data subset")
}
}
# Run the chosen estimation function and apply it
frame <- ddply(frame,factor_in,estimate_0)
return(frame)
}
addbs <- function(frame,column="FL3.Amean",baseline="noYFP") {
frame[,paste(column,"_bs",sep="")] <- frame[,column]-mean(subset(frame,strain==baseline)[,column])
return(frame)
}
# Generate a data frame that's useful for plotting overlapping density plots with ggplot.
# At the moment it's very finnicky. It expects a huge data frame as input that's made with the exprs command on a bunch
# bunch of flowFrames, with an extra column called 'exptime' signifying the time of each well's acquisition.
# It shouldn't be hard to make it accept a flowSet and do this automatically.
density_frame <- function(frame,param="FL1.A") {
# generate the general plot
frame_dens <- tapply(frame[,param],frame$exptime,function(x)data.frame(x=density(x)$x,y=density(x)$y))
# coerce the list into a data frame
frame_temp <- frame_dens[[1]]
for (i in 2:length(frame_dens)) {
frame_temp <- rbind(frame_temp,frame_dens[[i]])
}
frame_dens <- frame_temp
# apply the exptime label to it. Very finnicky here as well, NOT at all happy with arbitrary input
frame_dens$exptime <- rep(levels(factor(frame$exptime)),each=512)
# normalize it within the groups
frame_dens <- ddply(frame_dens,"exptime",transform,y_norm=y/max(y))
return(frame_dens)
}
# Script to reprocess cytometer data following a given pattern. Uses modelFormat for reprocessing.
# Expects as input a directory with one folder 'csvs' full of prior csvs and the matching source data in 'source'
# Outputs to same directory in 'newcsvs' folder, overwriting anything that exists there.
reprocess <- function(directory_in,nick_type=T) {
# Delete 'newcsvs' if it already exists
unlink(paste(directory_in,"/newcsvs",sep=""),recursive=T)
# Get csv list and create fresh 'newcsv' dir
csvlist <- list.files(paste(directory_in,"/csvs",sep=""))
dir.create(paste(directory_in,"/newcsvs",sep=""))
# massive loop to reprocess each piece of data
for (i in csvlist) {
csv <- read.csv(paste(directory_in,"/csvs/",i,sep=""))
csv_files <- paste(csv$file,".fcs",sep="")
expname <- gsub("_","",substr(i,1,3))
# find dir for this experiment
dirs <- list.files(paste(directory_in,"source/",sep=""))
dir <- grep(expname,dirs)
message("Reprocessing ",expname,"...")
# Read in flowSet, trim to all that match csv
fs <- read.flowSet(path=paste(directory_in,"source/",dirs[dir],sep=""),alter.names=T)
fs_exp <- sampleNames(fs)
fs_matches <- which(fs_exp %in% csv_files)
fs_trimmed <- fs[fs_matches]
####################################
# Currently uses 'modelingFormat' to reprocess data. Can substitute anything in here
####################################
newcsv <- modelingFormat(fs_trimmed,csv$strain)
# Re-add the metadata: treatment, grp, afb, rep
for (cols in c("treatment","grp","afb","rep")) {
newcsv[,cols] <- csv[,cols]
}
write.csv(newcsv,file=paste(directory_in,"newcsvs/",i,sep=""))
}
message("Finished. Files are in:")
message(directory_in,"newcsvs/")
}
flowFrame.gettime <- function(flowframe) {
time_raw <- as.numeric(unlist(strsplit(keyword(flowframe)$`$BTIM`,split=":")))
time <- time_raw[1]*60+time_raw[2]+time_raw[3]/60+time_raw[4]/6000
return(time)
}
YeastCytSummary <- function(inpath,ploidy=F,only="singlets",channel="FL1.A") {
fs <- read.flowSet(path=inpath,alter.names=T)
if (ploidy=="diploid"|ploidy=="haploid") {
fs_sum <- summary.cyt(fs,only=only,ploidy=ploidy,channel=channel)
} else {
stop('Must define ploidy= as "haploid" or "diploid"')
}
filename=paste( "summary-",format(Sys.time(), "%Y-%m-%d--%H-%M-%S"),".csv",sep="")
# print(paste("~/Desktop",filename,sep=""))
write.csv(fs_sum,paste("~/Desktop/",filename,sep=""))
message("File was written to Desktop/",filename)
}
|
#import data
data=read.csv("Mall_Customers.csv")
X=data[,4:5]
#Finding no of clusters using dendrograms
dendro=hclust(dist(X,method="euclidean"),method = 'ward.D')
plot(dendro,
main = paste("DendroGrams"),
xlab = "Customers",
ylab = "Euclidean Distance")
#Build the model
hc=hclust(dist(X,method="euclidean"),method = 'ward.D')
y_hc=cutree(hc,5)
#visualising the clusters
library(cluster)
clusplot(X,
y_hc,
lines=0,
shade=TRUE,
color=TRUE,
labels=2,
plotchar=FALSE,
span=TRUE,
main=paste("KMeans Cluster Alg"),
xlab="Annaul Income",
ylab="SpendingScore")
|
/Part 4 - Clustering/Hierarchical Clustering/Hierarchical_Clustering.R
|
no_license
|
AchyuthReddy001/Machine-Learning
|
R
| false
| false
| 662
|
r
|
#import data
data=read.csv("Mall_Customers.csv")
X=data[,4:5]
#Finding no of clusters using dendrograms
dendro=hclust(dist(X,method="euclidean"),method = 'ward.D')
plot(dendro,
main = paste("DendroGrams"),
xlab = "Customers",
ylab = "Euclidean Distance")
#Build the model
hc=hclust(dist(X,method="euclidean"),method = 'ward.D')
y_hc=cutree(hc,5)
#visualising the clusters
library(cluster)
clusplot(X,
y_hc,
lines=0,
shade=TRUE,
color=TRUE,
labels=2,
plotchar=FALSE,
span=TRUE,
main=paste("KMeans Cluster Alg"),
xlab="Annaul Income",
ylab="SpendingScore")
|
#' @title Event Constructor
#'
#' @description Event constructor
#' @param id A character identifier
#' @param type A character indicating the event type
#' @param time A character string indicating the event timestamp
#' @param ... A list with the event attributes.
#'
#' @return The function returns an object of class \code{event}. \code{event}
#' objects are implemented as a list of two main elements: A \code{head} and
#' a \code{body}.The \code{head} contains an identifier (\code{id}), a string
#' indicating the event type (\code{type}) and a \code{POSIXct} object indicating
#' when the event occurs (\code{time}). The \code{body} containts the event attributes
#' defined by the user. By default \code{id} is generated using \pkg{uuid} and \code{time}
#' value is the result of \code{Sys.time()} function by default.
#'
#' @examples
#'
#' birth_event <- event(
#' id = 'first-id',
#' type = 'BIRTH',
#' time = '1936-11-09',
#' birth_date = '1936-11-09'
#' )
#'
#' death_event <- event(
#' id = 'second-id',
#' type = 'DEATH',
#' time = '2019-05-22',
#' death_date = '2019-05-22'
#' )
#'
#' @rdname event
#' @export
new_event <- function(id = uuid::UUIDgenerate(), type, time = Sys.time(), ...){
structure(
.Data = list(
header = list(
id = id,
type = type,
time = time
),
body = list(...)
),
class = c("event", "list")
)
}
#' @rdname event
#' @export
validate_event <- function(id, type, time, ... ){
valid <- c(
is.character(id),
is.character(type),
inherits(Sys.time(), "POSIXct")
)
return(all(valid))
}
#' @rdname event
#' @export
event <- function(id = uuid::UUIDgenerate(), type, time, ...){
stopifnot(validate_event(id, type, time, ...))
.event <- new_event(id, type, time, ...)
return(.event)
}
|
/R/event.R
|
no_license
|
cran/eventr
|
R
| false
| false
| 1,897
|
r
|
#' @title Event Constructor
#'
#' @description Event constructor
#' @param id A character identifier
#' @param type A character indicating the event type
#' @param time A character string indicating the event timestamp
#' @param ... A list with the event attributes.
#'
#' @return The function returns an object of class \code{event}. \code{event}
#' objects are implemented as a list of two main elements: A \code{head} and
#' a \code{body}.The \code{head} contains an identifier (\code{id}), a string
#' indicating the event type (\code{type}) and a \code{POSIXct} object indicating
#' when the event occurs (\code{time}). The \code{body} containts the event attributes
#' defined by the user. By default \code{id} is generated using \pkg{uuid} and \code{time}
#' value is the result of \code{Sys.time()} function by default.
#'
#' @examples
#'
#' birth_event <- event(
#' id = 'first-id',
#' type = 'BIRTH',
#' time = '1936-11-09',
#' birth_date = '1936-11-09'
#' )
#'
#' death_event <- event(
#' id = 'second-id',
#' type = 'DEATH',
#' time = '2019-05-22',
#' death_date = '2019-05-22'
#' )
#'
#' @rdname event
#' @export
new_event <- function(id = uuid::UUIDgenerate(), type, time = Sys.time(), ...){
structure(
.Data = list(
header = list(
id = id,
type = type,
time = time
),
body = list(...)
),
class = c("event", "list")
)
}
#' @rdname event
#' @export
validate_event <- function(id, type, time, ... ){
valid <- c(
is.character(id),
is.character(type),
inherits(Sys.time(), "POSIXct")
)
return(all(valid))
}
#' @rdname event
#' @export
event <- function(id = uuid::UUIDgenerate(), type, time, ...){
stopifnot(validate_event(id, type, time, ...))
.event <- new_event(id, type, time, ...)
return(.event)
}
|
# Jake Yeung
# Date of Creation: 2022-05-04
# File: ~/projects/scchic/scripts/revision_scripts/revisions_from_istbea/28-pseudotime_cubic_spline_each_gene_get_derivatives_command_args.R
#
rm(list=ls())
library(scchicFuncs)
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(topicmodels)
library(mgcv)
library(gratia)
suppressPackageStartupMessages(library("argparse"))
# create parser object
parser <- ArgumentParser()
# specify our desired options
# by default ArgumentParser will add an help option
parser$add_argument('-mark', metavar='k4me1, k4me3, k27me3, k9me3', default = "k27me3",
help='mark: k4me1, k4me3, k27me3, or k9me3')
parser$add_argument('-outfile', metavar='OUTRDS',
help='OUTFILE')
parser$add_argument("-v", "--verbose", action="store_true", default=TRUE,
help="Print extra output [default]")
# get command line options, if help option encountered print help and exit,
# otherwise if options not found on command line then set defaults,
args <- parser$parse_args()
# print some progress messages to stderr if "quietly" wasn't requested
if ( args$verbose ) {
print("Arguments:")
print(args)
}
jratio <- 0.66
jmarktmp <- args$mark
outrds.tmp <- args$outfile
jmarks <- c("k4me1", "k4me3", "k27me3", "k9me3"); names(jmarks) <- jmarks
# jmarksold <- c("H3K4me1", "H3K4me3", "H3K27me3", "H3K9me3"); names(jmarksold) <- jmarks
# Load meta ----------------------------------------------------------------
dat.meta.lst <- lapply(jmarks, function(jmark){
# inf.meta <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/from_jupyterhub/multinom_celltyping_update_ctypes_from_LDA_k4me3_cleaned_k27me3_eryths2/metadata_reannotate_from_LLmat_fix_ctypes_by_batch_dynamicbins.", jmark, ".txt")
inf.meta <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/from_jupyterhub/primetime_plots/umaps_pcas_with_batch_corrections/umap_metadata_primetime.", jmark, ".2022-04-21.txt")
dat.meta <- fread(inf.meta)
})
dat.meta.colors <- subset(dat.meta.lst$k4me1, select = c(ctype.from.LL, colcode))
ctype2col <- hash::hash(dat.meta.colors$ctype.from.LL, dat.meta.colors$colcode)
# Load LDAs: all ---------------------------------------------------------
tm.lst <- lapply(jmarks, function(jmark){
if (jmark == "k4me1"){
inf.ldaout <- "/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/LDA_outputs_varfilt/ldaAnalysis_fripfilt_varfilt/lda_outputs.count_mat_var_filt_dynamicbins.out_dynamic_bins_new_only.varcutoff.k4me1.2022-01-28/ldaOut.count_mat_var_filt_dynamicbins.out_dynamic_bins_new_only.varcutoff.k4me1.2022-01-28.Robj"
} else if (jmark == "k4me3"){
inf.ldaout <- "/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/LDA_outputs_k4me3_cleaned/lda_outputs.count_mat_cleaned_no3no7_dynbins_allcells.k4me3.2022-04-12/ldaOut.count_mat_cleaned_no3no7_dynbins_allcells.k4me3.2022-04-12.Robj"
} else if (jmark == "k27me3"){
# inf.ldaout <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/LDA_outputs_k27me3_clean_eryths/lda_outputs.count_mat_merged_with_old_dynbins.k27me3.2022-04-15/ldaOut.count_mat_merged_with_old_dynbins.", jmark, ".2022-04-15.Robj")
# inf.ldaout <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/LDA_outputs_k27me3_clean_eryths/lda_outputs.count_mat_new_only_dynbins.k27me3.2022-04-15/ldaOut.count_mat_new_only.", jmark, ".2022-04-15.Robj")
inf.ldaout <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/LDA_outputs_k27me3_clean_eryths/lda_outputs.count_mat_new_only_dynbins.k27me3.2022-04-15/ldaOut.count_mat_new_only_dynbins.", jmark, ".2022-04-15.Robj")
} else if (jmark == "k9me3"){
inf.ldaout <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/LDA_outputs_repressive_cleaned_from_jupyter/ldaAnalysis_fripfilt_varfilt_binfilt/lda_outputs.count_mat_cleaned_dynbins.", jmark, ".2022-02-16/ldaOut.count_mat_cleaned_dynbins.", jmark, ".2022-02-16.Robj")
}
load(inf.ldaout, v=T)
tm <- posterior(out.lda)
return(tm)
})
# assertthat::assert_that(nrow(dat.meta.lst$k27me3) == nrow(tm.lst$k27me3$topics))
print(lapply(tm.lst, function(x) dim(x$topics)))
dat.impute.lst <- lapply(tm.lst, function(tm){
dat.impute <- log2(t(tm$topics %*% tm$terms))
})
# Load batch corrected k27me3 ---------------------------------------------------
inf.impute.k27me3.bc <- "/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/from_jupyterhub/batch_effect_corrections/mat_wide_k27me3_batch_corrected.2022-04-19.rds"
dat.impute.k27me3.bc <- readRDS(inf.impute.k27me3.bc)
dat.impute.lst$k27me3 <- dat.impute.k27me3.bc
# Load trajs --------------------------------------------------------------
# indir.traj <- "/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/from_jupyterhub/trajs/cleaned2_batch_corrected_eryth_fix"
indir.traj <- "/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/from_jupyterhub/trajs/cleaned"
dat.trajs <- lapply(jmarks, function(jmark){
inf.trajs <- file.path(indir.traj, paste0("trajs_outputs.", jmark, ".rds"))
if (jmark == "k27me3"){
inf.trajs <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/from_jupyterhub/trajs/cleaned2_batch_corrected_eryth_fix/trajs_outputs_batch_corrected.k27me3.2022-04-21.rds")
}
print(inf.trajs)
readRDS(inf.trajs)
})
dat.trajs$k27me3$Granulocytes <- subset(dat.trajs$k27me3$Granulocytes, ctype.from.LL != "Basophils")
# Fit gam for each region --------------------------------------------------
# outrds.final <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/from_jupyterhub/gam_fits/gam_fits_dynamic_bins4.final.", Sys.Date(), ".rds")
# fits.lst.bymark <- parallel::mclapply(jmarks, function(jmark){
# outrds.tmp <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/from_jupyterhub/gam_fits/gam_fits_dynamic_bins4.", jmark, ".", Sys.Date(), ".rds")
jregions <- rownames(dat.impute.lst[[jmarktmp]]); names(jregions) <- jregions
print("Running fits:")
jstart <- Sys.time()
jfits.lst.byregion <- lapply(jregions, function(jregion){
dat.signal <- data.frame(cell = colnames(dat.impute.lst[[jmarktmp]]), signal = dat.impute.lst[[jmarktmp]][jregion, ], stringsAsFactors = FALSE)
traj.ctypes <- names(dat.trajs[[jmarktmp]])
names(traj.ctypes) <- traj.ctypes
dat.trajs.long <- lapply(traj.ctypes, function(traj.ctype){
dat.trajs.sub <- dat.trajs[[jmarktmp]][[traj.ctype]] %>%
filter(is.ctype) %>%
dplyr::select(cell, ctype.ordered, ptime) %>%
mutate(traj = traj.ctype)
# colcode = ctype2col[[traj.ctype]])
}) %>%
bind_rows() %>%
left_join(., dat.signal, by = "cell") %>%
left_join(., subset(dat.meta.lst[[jmarktmp]], select = c(cell, batch, ctype.from.LL, colcode)), by = "cell") %>%
mutate(traj = as.factor(traj),
region = jregion)
dat.trajs.long.split <- split(dat.trajs.long, dat.trajs.long$traj)
jfits.split <- lapply(dat.trajs.long.split, function(jsub){
jfit.sub <- gam(formula = signal ~ s(ptime, k = 4, bs = "cs", by = traj), gamma = 10, method = "REML", data = jsub)
# jsub$pred <- predict(jfit.sub)
# derivatives(jfit.sub)
return(list(fit = jfit.sub))
})
return(jfits.split)
})
print(paste("Done for", jmarktmp))
saveRDS(jfits.lst.byregion, file = outrds.tmp)
# return(jfits.lst.byregion)
print(Sys.time() - jstart)
|
/scripts/revision_scripts/revisions_from_istbea/28-pseudotime_cubic_spline_each_gene_get_derivatives_command_args.R
|
no_license
|
jakeyeung/sortchicAllScripts
|
R
| false
| false
| 8,021
|
r
|
# Jake Yeung
# Date of Creation: 2022-05-04
# File: ~/projects/scchic/scripts/revision_scripts/revisions_from_istbea/28-pseudotime_cubic_spline_each_gene_get_derivatives_command_args.R
#
rm(list=ls())
library(scchicFuncs)
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(topicmodels)
library(mgcv)
library(gratia)
suppressPackageStartupMessages(library("argparse"))
# create parser object
parser <- ArgumentParser()
# specify our desired options
# by default ArgumentParser will add an help option
parser$add_argument('-mark', metavar='k4me1, k4me3, k27me3, k9me3', default = "k27me3",
help='mark: k4me1, k4me3, k27me3, or k9me3')
parser$add_argument('-outfile', metavar='OUTRDS',
help='OUTFILE')
parser$add_argument("-v", "--verbose", action="store_true", default=TRUE,
help="Print extra output [default]")
# get command line options, if help option encountered print help and exit,
# otherwise if options not found on command line then set defaults,
args <- parser$parse_args()
# print some progress messages to stderr if "quietly" wasn't requested
if ( args$verbose ) {
print("Arguments:")
print(args)
}
jratio <- 0.66
jmarktmp <- args$mark
outrds.tmp <- args$outfile
jmarks <- c("k4me1", "k4me3", "k27me3", "k9me3"); names(jmarks) <- jmarks
# jmarksold <- c("H3K4me1", "H3K4me3", "H3K27me3", "H3K9me3"); names(jmarksold) <- jmarks
# Load meta ----------------------------------------------------------------
dat.meta.lst <- lapply(jmarks, function(jmark){
# inf.meta <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/from_jupyterhub/multinom_celltyping_update_ctypes_from_LDA_k4me3_cleaned_k27me3_eryths2/metadata_reannotate_from_LLmat_fix_ctypes_by_batch_dynamicbins.", jmark, ".txt")
inf.meta <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/from_jupyterhub/primetime_plots/umaps_pcas_with_batch_corrections/umap_metadata_primetime.", jmark, ".2022-04-21.txt")
dat.meta <- fread(inf.meta)
})
dat.meta.colors <- subset(dat.meta.lst$k4me1, select = c(ctype.from.LL, colcode))
ctype2col <- hash::hash(dat.meta.colors$ctype.from.LL, dat.meta.colors$colcode)
# Load LDAs: all ---------------------------------------------------------
tm.lst <- lapply(jmarks, function(jmark){
if (jmark == "k4me1"){
inf.ldaout <- "/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/LDA_outputs_varfilt/ldaAnalysis_fripfilt_varfilt/lda_outputs.count_mat_var_filt_dynamicbins.out_dynamic_bins_new_only.varcutoff.k4me1.2022-01-28/ldaOut.count_mat_var_filt_dynamicbins.out_dynamic_bins_new_only.varcutoff.k4me1.2022-01-28.Robj"
} else if (jmark == "k4me3"){
inf.ldaout <- "/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/LDA_outputs_k4me3_cleaned/lda_outputs.count_mat_cleaned_no3no7_dynbins_allcells.k4me3.2022-04-12/ldaOut.count_mat_cleaned_no3no7_dynbins_allcells.k4me3.2022-04-12.Robj"
} else if (jmark == "k27me3"){
# inf.ldaout <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/LDA_outputs_k27me3_clean_eryths/lda_outputs.count_mat_merged_with_old_dynbins.k27me3.2022-04-15/ldaOut.count_mat_merged_with_old_dynbins.", jmark, ".2022-04-15.Robj")
# inf.ldaout <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/LDA_outputs_k27me3_clean_eryths/lda_outputs.count_mat_new_only_dynbins.k27me3.2022-04-15/ldaOut.count_mat_new_only.", jmark, ".2022-04-15.Robj")
inf.ldaout <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/LDA_outputs_k27me3_clean_eryths/lda_outputs.count_mat_new_only_dynbins.k27me3.2022-04-15/ldaOut.count_mat_new_only_dynbins.", jmark, ".2022-04-15.Robj")
} else if (jmark == "k9me3"){
inf.ldaout <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/LDA_outputs_repressive_cleaned_from_jupyter/ldaAnalysis_fripfilt_varfilt_binfilt/lda_outputs.count_mat_cleaned_dynbins.", jmark, ".2022-02-16/ldaOut.count_mat_cleaned_dynbins.", jmark, ".2022-02-16.Robj")
}
load(inf.ldaout, v=T)
tm <- posterior(out.lda)
return(tm)
})
# assertthat::assert_that(nrow(dat.meta.lst$k27me3) == nrow(tm.lst$k27me3$topics))
print(lapply(tm.lst, function(x) dim(x$topics)))
dat.impute.lst <- lapply(tm.lst, function(tm){
dat.impute <- log2(t(tm$topics %*% tm$terms))
})
# Load batch corrected k27me3 ---------------------------------------------------
inf.impute.k27me3.bc <- "/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/from_jupyterhub/batch_effect_corrections/mat_wide_k27me3_batch_corrected.2022-04-19.rds"
dat.impute.k27me3.bc <- readRDS(inf.impute.k27me3.bc)
dat.impute.lst$k27me3 <- dat.impute.k27me3.bc
# Load trajs --------------------------------------------------------------
# indir.traj <- "/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/from_jupyterhub/trajs/cleaned2_batch_corrected_eryth_fix"
indir.traj <- "/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/from_jupyterhub/trajs/cleaned"
dat.trajs <- lapply(jmarks, function(jmark){
inf.trajs <- file.path(indir.traj, paste0("trajs_outputs.", jmark, ".rds"))
if (jmark == "k27me3"){
inf.trajs <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/from_jupyterhub/trajs/cleaned2_batch_corrected_eryth_fix/trajs_outputs_batch_corrected.k27me3.2022-04-21.rds")
}
print(inf.trajs)
readRDS(inf.trajs)
})
dat.trajs$k27me3$Granulocytes <- subset(dat.trajs$k27me3$Granulocytes, ctype.from.LL != "Basophils")
# Fit gam for each region --------------------------------------------------
# outrds.final <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/from_jupyterhub/gam_fits/gam_fits_dynamic_bins4.final.", Sys.Date(), ".rds")
# fits.lst.bymark <- parallel::mclapply(jmarks, function(jmark){
# outrds.tmp <- paste0("/nfs/scistore12/hpcgrp/jyeung/data_from_Hubrecht/hpc_hub_oudenaarden/scChiC/new_experiments/from_jupyterhub/gam_fits/gam_fits_dynamic_bins4.", jmark, ".", Sys.Date(), ".rds")
jregions <- rownames(dat.impute.lst[[jmarktmp]]); names(jregions) <- jregions
print("Running fits:")
jstart <- Sys.time()
jfits.lst.byregion <- lapply(jregions, function(jregion){
dat.signal <- data.frame(cell = colnames(dat.impute.lst[[jmarktmp]]), signal = dat.impute.lst[[jmarktmp]][jregion, ], stringsAsFactors = FALSE)
traj.ctypes <- names(dat.trajs[[jmarktmp]])
names(traj.ctypes) <- traj.ctypes
dat.trajs.long <- lapply(traj.ctypes, function(traj.ctype){
dat.trajs.sub <- dat.trajs[[jmarktmp]][[traj.ctype]] %>%
filter(is.ctype) %>%
dplyr::select(cell, ctype.ordered, ptime) %>%
mutate(traj = traj.ctype)
# colcode = ctype2col[[traj.ctype]])
}) %>%
bind_rows() %>%
left_join(., dat.signal, by = "cell") %>%
left_join(., subset(dat.meta.lst[[jmarktmp]], select = c(cell, batch, ctype.from.LL, colcode)), by = "cell") %>%
mutate(traj = as.factor(traj),
region = jregion)
dat.trajs.long.split <- split(dat.trajs.long, dat.trajs.long$traj)
jfits.split <- lapply(dat.trajs.long.split, function(jsub){
jfit.sub <- gam(formula = signal ~ s(ptime, k = 4, bs = "cs", by = traj), gamma = 10, method = "REML", data = jsub)
# jsub$pred <- predict(jfit.sub)
# derivatives(jfit.sub)
return(list(fit = jfit.sub))
})
return(jfits.split)
})
print(paste("Done for", jmarktmp))
saveRDS(jfits.lst.byregion, file = outrds.tmp)
# return(jfits.lst.byregion)
print(Sys.time() - jstart)
|
# datacleaning.R - created by jl on 10 october 2016.
|
/processing-and-analysis/command-files/datacleaning.R
|
no_license
|
slievefoy/l-c-test
|
R
| false
| false
| 52
|
r
|
# datacleaning.R - created by jl on 10 october 2016.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all-generics.R
\name{stats}
\alias{stats}
\title{Access the stats of an object.}
\usage{
stats(x, name = NULL, ...)
}
\arguments{
\item{x}{An object containing stats}
\item{name}{(Optional); slotname.}
\item{...}{Additional arguments.}
}
\value{
A named list.
}
\description{
Access the stats of an object.
}
\examples{
###
}
|
/man/stats.Rd
|
no_license
|
DKMS-LSL/dr2s
|
R
| false
| true
| 406
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all-generics.R
\name{stats}
\alias{stats}
\title{Access the stats of an object.}
\usage{
stats(x, name = NULL, ...)
}
\arguments{
\item{x}{An object containing stats}
\item{name}{(Optional); slotname.}
\item{...}{Additional arguments.}
}
\value{
A named list.
}
\description{
Access the stats of an object.
}
\examples{
###
}
|
testlist <- list(type = -1650614883L, z = -5.29946982737853e-169)
result <- do.call(esreg::G1_fun,testlist)
str(result)
|
/esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609893456-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 119
|
r
|
testlist <- list(type = -1650614883L, z = -5.29946982737853e-169)
result <- do.call(esreg::G1_fun,testlist)
str(result)
|
### creating area averaged line plot
rasterdir="/Users/heatherwelch/Dropbox/JPSS/global/eastern_pacific_rasters/"
csvdir="/Users/heatherwelch/Dropbox/JPSS/global/eastern_pacific_csvs/"#;dir.create(csvdir)
rasterlist=list.files(rasterdir,pattern = ".grd")
setwd(rasterdir)
df=data.frame(number=1:length(rasterlist)) %>% mutate(date=as.Date("2012-03-01"))%>% mutate(sensor=NA)%>% mutate(mean=NA)
for(i in 1:length(rasterlist)){
name=rasterlist[i]
ras=rasterlist[i] %>% raster()
print(name)
if(grepl("erdMH1chlamday", name)){
sensor="MODIS"
date=substr(name,16,25) %>% as.Date()
}
if(grepl("nesdisVHNSQchlaMonthly", name)){
sensor="VIIRS"
date=substr(name,24,33) %>% as.Date()
}
if(grepl("AVW", name)){
sensor="GlobColour Merged AVW"
date=substr(name,23,32) %>% as.Date()
}
if(grepl("GSM", name)){
sensor="GlobColour Merged GSM"
date=substr(name,23,32)%>% as.Date()
}
if(grepl("ESACCI-OC-L3S", name)){
sensor="OC-CCI"
date=substr(name,15,22) %>% as.Date(format="%Y%m%d")
}
mean=log(ras+0.001) %>% cellStats(.,stat="mean",na.rm=T)
df$date[i]=date
df$sensor[i]=sensor
df$mean[i]=mean
}
df$sensor=as.factor(df$sensor)
dfAll=df
write.csv(dfAll,"/Users/heatherwelch/Dropbox/JPSS/global/eastern_pacific_csvs/All_products_4km_easternP_log.csv")
#### plotting ####
a=ggplot(dfAll,aes(x=date,y=mean)) +geom_line(aes(group=sensor,color=sensor,linetype=sensor),size=.5)+geom_point(aes(color=sensor))+
scale_x_date(date_breaks="year",date_labels = "%Y",date_minor_breaks = "months")+
theme(legend.position=c(.9,.9),legend.justification = c(.4,.4))+
theme(axis.text = element_text(size=6),axis.title = element_text(size=6),legend.text=element_text(size=6),legend.title = element_text(size=6),strip.text.y = element_text(size = 6),strip.text.x = element_text(size = 6), strip.background = element_blank())+
theme(legend.key.size = unit(.5,'lines'))+
scale_color_manual("Product",values=c("VIIRS"="#d3ad06","MODIS"="#0066cc","OC-CCI"="red","GlobColour Merged GSM"="black","GlobColour Merged AVW"="darkgreen"))+ylab("log Chl-a (mg/m3)")+xlab("Year")+
scale_linetype_manual("Product",values = c("VIIRS"="dashed","MODIS"="dashed","OC-CCI"="solid","GlobColour Merged GSM"="solid","GlobColour Merged AVW"="solid"))
a
datatype="easternP_chla_4km_log"
outputDir="/Users/heatherwelch/Dropbox/JPSS/plots_03.05.19/"
png(paste(outputDir,datatype,".png",sep=''),width=24,height=12,units='cm',res=400)
par(ps=10)
par(mar=c(4,4,1,1))
par(cex=1)
a
dev.off()
|
/code_03.05.19/eastern_pacific_chla_v2_log.R
|
no_license
|
HeatherWelch/JPSS_VIIRS
|
R
| false
| false
| 2,542
|
r
|
### creating area averaged line plot
rasterdir="/Users/heatherwelch/Dropbox/JPSS/global/eastern_pacific_rasters/"
csvdir="/Users/heatherwelch/Dropbox/JPSS/global/eastern_pacific_csvs/"#;dir.create(csvdir)
rasterlist=list.files(rasterdir,pattern = ".grd")
setwd(rasterdir)
df=data.frame(number=1:length(rasterlist)) %>% mutate(date=as.Date("2012-03-01"))%>% mutate(sensor=NA)%>% mutate(mean=NA)
for(i in 1:length(rasterlist)){
name=rasterlist[i]
ras=rasterlist[i] %>% raster()
print(name)
if(grepl("erdMH1chlamday", name)){
sensor="MODIS"
date=substr(name,16,25) %>% as.Date()
}
if(grepl("nesdisVHNSQchlaMonthly", name)){
sensor="VIIRS"
date=substr(name,24,33) %>% as.Date()
}
if(grepl("AVW", name)){
sensor="GlobColour Merged AVW"
date=substr(name,23,32) %>% as.Date()
}
if(grepl("GSM", name)){
sensor="GlobColour Merged GSM"
date=substr(name,23,32)%>% as.Date()
}
if(grepl("ESACCI-OC-L3S", name)){
sensor="OC-CCI"
date=substr(name,15,22) %>% as.Date(format="%Y%m%d")
}
mean=log(ras+0.001) %>% cellStats(.,stat="mean",na.rm=T)
df$date[i]=date
df$sensor[i]=sensor
df$mean[i]=mean
}
df$sensor=as.factor(df$sensor)
dfAll=df
write.csv(dfAll,"/Users/heatherwelch/Dropbox/JPSS/global/eastern_pacific_csvs/All_products_4km_easternP_log.csv")
#### plotting ####
a=ggplot(dfAll,aes(x=date,y=mean)) +geom_line(aes(group=sensor,color=sensor,linetype=sensor),size=.5)+geom_point(aes(color=sensor))+
scale_x_date(date_breaks="year",date_labels = "%Y",date_minor_breaks = "months")+
theme(legend.position=c(.9,.9),legend.justification = c(.4,.4))+
theme(axis.text = element_text(size=6),axis.title = element_text(size=6),legend.text=element_text(size=6),legend.title = element_text(size=6),strip.text.y = element_text(size = 6),strip.text.x = element_text(size = 6), strip.background = element_blank())+
theme(legend.key.size = unit(.5,'lines'))+
scale_color_manual("Product",values=c("VIIRS"="#d3ad06","MODIS"="#0066cc","OC-CCI"="red","GlobColour Merged GSM"="black","GlobColour Merged AVW"="darkgreen"))+ylab("log Chl-a (mg/m3)")+xlab("Year")+
scale_linetype_manual("Product",values = c("VIIRS"="dashed","MODIS"="dashed","OC-CCI"="solid","GlobColour Merged GSM"="solid","GlobColour Merged AVW"="solid"))
a
datatype="easternP_chla_4km_log"
outputDir="/Users/heatherwelch/Dropbox/JPSS/plots_03.05.19/"
png(paste(outputDir,datatype,".png",sep=''),width=24,height=12,units='cm',res=400)
par(ps=10)
par(mar=c(4,4,1,1))
par(cex=1)
a
dev.off()
|
#Bhumit Shah 1001765834
#Kaustubh Rajpathak 1001770219
#Project 1
setwd("C:/Users/kkr0219/Documents/Data Mining Datasets/bank-additional-full")
bankData <- read.csv(file = 'bank-additional-full.csv',header=TRUE, sep=";")
#Cleaning and pre-processing
#Removing rows with unknown values
nrow(bankData[bankData$job != "unknown" & bankData$education != "unknown" & bankData$marital != "unknown" & bankData$default != "unknown"
& bankData$housing != "unknown" & bankData$loan != "unknown" & bankData$contact != "unknown", ])
tempdata <- bankData[bankData$job != "unknown" & bankData$education != "unknown" & bankData$marital != "unknown" & bankData$default != "unknown"
& bankData$housing != "unknown" & bankData$loan != "unknown" & bankData$contact != "unknown", ]
nrow(tempdata)
#Removing columns [marital,default, housing, loan, contact]
tempdata$marital <- NULL
tempdata$default <- NULL
tempdata$housing <- NULL
tempdata$loan <- NULL
tempdata$contact <- NULL
#Testing using attribute deletion
#tempdata$campaign <- NULL
#tempdata$duration <- NULL
#tempdata$cons.price.idx <- NULL
#writing final cleaned and pre processed dataset to new file
write.csv(tempdata, "C:/Users/kkr0219/Documents/Data Mining Datasets/bank-additional-full/finalpreprocessed.csv", row.names = FALSE)
preprocessed <- read.csv(file = 'C:/Users/kkr0219/Documents/Data Mining Datasets/bank-additional-full/finalpreprocessed.csv')
View(preprocessed)
#getting sample from preprocessed data with seed value 10
set.seed(10)
sampleset <- preprocessed[sample(nrow(preprocessed), 10000),]
View(sampleset)
#using 80/20 split for training and testing
sample_size <- floor(0.8 * nrow(sampleset))
set.seed(10)
train_ind <- sample(nrow(sampleset), sample_size)
trainset <- sampleset[train_ind, ]
testset <- sampleset[-train_ind, ]
View(trainset)
View(testset)
#using rpart library to implement decision tree algorithm
library(rpart)
library(rattle) #used for printing decision trees
#generation complete decision trees by setting cp = 0
#xval is the number of cross validations
ginimodelfull <- rpart(y~., data = trainset, method = 'class', control = rpart.control(cp = 0, xval = 10))
infomodelfull <- rpart(y~., data = trainset, method = 'class', parms = list(split = 'information'), control = rpart.control(cp = 0, xval = 10))
#display complete decision trees
#fancyRpartPlot(infomodelfull, palettes = c("Greens", "Reds"), sub = "")
#fancyRpartPlot(ginimodelfull, palettes = c("Greens", "Reds"), sub = "")
#print cp information to identify optimal cp for minimal xerror
printcp(infomodelfull)
printcp(ginimodelfull)
#graph for cp values vs xerror
plotcp(infomodelfull, lty = 3, col = 2, upper = "splits")
plotcp(ginimodelfull, lty = 3, col = 2, upper = "splits")
#retrieve best cp value for pruning complete decision tree
ginibestcp <- ginimodelfull$cptable[which.min(ginimodelfull$cptable[,"xerror"]),"CP"]
infobestcp <- infomodelfull$cptable[which.min(infomodelfull$cptable[,"xerror"]),"CP"]
#pruning complete decision tree
ginimodelpruned <- rpart(y~., data = trainset, method = 'class', control = rpart.control(cp = ginibestcp, xval = 10))
infomodelpruned <- rpart(y~., data = trainset, method = 'class', parms = list(split = 'information'), control = rpart.control(cp = infobestcp, xval = 10))
#display pruned decision tree
fancyRpartPlot(infomodelpruned, palettes = c("Greens", "Reds"), sub = "")
fancyRpartPlot(ginimodelpruned, palettes = c("Greens", "Reds"), sub = "")
#print attribute importance
ginivarimp <- as.data.frame(ginimodelpruned$variable.importance)
infovarimp <- as.data.frame(infomodelpruned$variable.importance)
print(paste('Variable importance according to gini index metric'))
print(ginivarimp)
print(paste('Variable importance according to information gain metric'))
print(infovarimp)
#prediction on the basis of pruned decision tree
ginimodelpredict <- predict(ginimodelpruned, testset, type = "class")
infomodelpredict <- predict(infomodelpruned, testset, type = "class")
#using e1071 library to implement naives bayes
library(e1071)
library(caret)
#training the model
#laplace smoothing applied for improving f1 score
banknb <- naiveBayes(as.factor(y)~., trainset, laplace = 4)
banknb
#predicting on the model
banknbpredict <- predict(banknb, testset, prob = TRUE)
#retrieve confusion matrix for predicted values
conf_matrix_info <- table(testset$y, infomodelpredict)
conf_matrix_gini <- table(testset$y, ginimodelpredict)
conf_matrix_nb <- table(testset$y, banknbpredict)
rownames(conf_matrix_gini) <- paste("Actual", rownames(conf_matrix_gini), sep = ":")
rownames(conf_matrix_info) <- paste("Actual", rownames(conf_matrix_info), sep = ":")
rownames(conf_matrix_nb) <- paste("Actual", rownames(conf_matrix_nb), sep = ":")
colnames(conf_matrix_gini) <- paste("Predicted", colnames(conf_matrix_gini), sep = ":")
colnames(conf_matrix_info) <- paste("Predicted", colnames(conf_matrix_info), sep = ":")
colnames(conf_matrix_nb) <- paste("Predicted", colnames(conf_matrix_nb), sep = ":")
print(conf_matrix_info)
print(conf_matrix_gini)
print(conf_matrix_nb)
#calculating various model statistics based on the confusion matrix
giniaccuracy <- sum(diag(conf_matrix_gini)) / sum(conf_matrix_gini)
giniprecision <- conf_matrix_gini[2, "Predicted:yes"] / sum(conf_matrix_gini[1, "Predicted:yes"], conf_matrix_gini[2, "Predicted:yes"])
ginirecall <- conf_matrix_gini[2, "Predicted:yes"] / sum(conf_matrix_gini[2, "Predicted:yes"], conf_matrix_gini[2, "Predicted:no"])
ginif1score <- (2*(ginirecall * giniprecision)) / (ginirecall + giniprecision)
infoaccuracy <- sum(diag(conf_matrix_info)) / sum(conf_matrix_info)
infoprecision <- conf_matrix_info[2, "Predicted:yes"] / sum(conf_matrix_info[1, "Predicted:yes"], conf_matrix_info[2, "Predicted:yes"])
inforecall <- conf_matrix_info[2, "Predicted:yes"] / sum(conf_matrix_info[2, "Predicted:yes"], conf_matrix_info[2, "Predicted:no"])
infof1score <- (2*(inforecall * infoprecision)) / (inforecall + infoprecision)
nbaccuracy <- sum(diag(conf_matrix_nb)) / sum(conf_matrix_nb)
nbprecision <- conf_matrix_nb[2, "Predicted:yes"] / sum(conf_matrix_nb[1, "Predicted:yes"], conf_matrix_nb[2, "Predicted:yes"])
nbrecall <- conf_matrix_nb[2, "Predicted:yes"] / sum(conf_matrix_nb[2, "Predicted:yes"], conf_matrix_nb[2, "Predicted:no"])
nbf1score <- (2*(nbrecall * nbprecision)) / (nbrecall + nbprecision)
print(paste('Accuracy for gini metric test is', giniaccuracy*100, '%'))
print(paste('Precision for gini metric test is', giniprecision))
print(paste('Recall for gini metric test is', ginirecall))
print(paste('F1 score for gini metric test is', ginif1score))
print(paste('Accuracy for information metric test is', infoaccuracy*100, '%'))
print(paste('Precision for information metric test is', infoprecision))
print(paste('Recall for information metric test is', inforecall))
print(paste('F1 score for information metric test is', infof1score))
print(paste('Accuracy for naives bayes model is', nbaccuracy*100, '%'))
print(paste('Precision for naives bayes model is', nbprecision))
print(paste('Recall for naives bayes model is', nbrecall))
print(paste('F1 score for naives bayes model is', nbf1score))
|
/Decision Tree & Naive Bayes Classifiers/Project_It_1.R
|
no_license
|
kaustubh41096/Data-Mining
|
R
| false
| false
| 7,422
|
r
|
#Bhumit Shah 1001765834
#Kaustubh Rajpathak 1001770219
#Project 1
setwd("C:/Users/kkr0219/Documents/Data Mining Datasets/bank-additional-full")
bankData <- read.csv(file = 'bank-additional-full.csv',header=TRUE, sep=";")
#Cleaning and pre-processing
#Removing rows with unknown values
nrow(bankData[bankData$job != "unknown" & bankData$education != "unknown" & bankData$marital != "unknown" & bankData$default != "unknown"
& bankData$housing != "unknown" & bankData$loan != "unknown" & bankData$contact != "unknown", ])
tempdata <- bankData[bankData$job != "unknown" & bankData$education != "unknown" & bankData$marital != "unknown" & bankData$default != "unknown"
& bankData$housing != "unknown" & bankData$loan != "unknown" & bankData$contact != "unknown", ]
nrow(tempdata)
#Removing columns [marital,default, housing, loan, contact]
tempdata$marital <- NULL
tempdata$default <- NULL
tempdata$housing <- NULL
tempdata$loan <- NULL
tempdata$contact <- NULL
#Testing using attribute deletion
#tempdata$campaign <- NULL
#tempdata$duration <- NULL
#tempdata$cons.price.idx <- NULL
#writing final cleaned and pre processed dataset to new file
write.csv(tempdata, "C:/Users/kkr0219/Documents/Data Mining Datasets/bank-additional-full/finalpreprocessed.csv", row.names = FALSE)
preprocessed <- read.csv(file = 'C:/Users/kkr0219/Documents/Data Mining Datasets/bank-additional-full/finalpreprocessed.csv')
View(preprocessed)
#getting sample from preprocessed data with seed value 10
set.seed(10)
sampleset <- preprocessed[sample(nrow(preprocessed), 10000),]
View(sampleset)
#using 80/20 split for training and testing
sample_size <- floor(0.8 * nrow(sampleset))
set.seed(10)
train_ind <- sample(nrow(sampleset), sample_size)
trainset <- sampleset[train_ind, ]
testset <- sampleset[-train_ind, ]
View(trainset)
View(testset)
#using rpart library to implement decision tree algorithm
library(rpart)
library(rattle) #used for printing decision trees
#generation complete decision trees by setting cp = 0
#xval is the number of cross validations
ginimodelfull <- rpart(y~., data = trainset, method = 'class', control = rpart.control(cp = 0, xval = 10))
infomodelfull <- rpart(y~., data = trainset, method = 'class', parms = list(split = 'information'), control = rpart.control(cp = 0, xval = 10))
#display complete decision trees
#fancyRpartPlot(infomodelfull, palettes = c("Greens", "Reds"), sub = "")
#fancyRpartPlot(ginimodelfull, palettes = c("Greens", "Reds"), sub = "")
#print cp information to identify optimal cp for minimal xerror
printcp(infomodelfull)
printcp(ginimodelfull)
#graph for cp values vs xerror
plotcp(infomodelfull, lty = 3, col = 2, upper = "splits")
plotcp(ginimodelfull, lty = 3, col = 2, upper = "splits")
#retrieve best cp value for pruning complete decision tree
ginibestcp <- ginimodelfull$cptable[which.min(ginimodelfull$cptable[,"xerror"]),"CP"]
infobestcp <- infomodelfull$cptable[which.min(infomodelfull$cptable[,"xerror"]),"CP"]
#pruning complete decision tree
ginimodelpruned <- rpart(y~., data = trainset, method = 'class', control = rpart.control(cp = ginibestcp, xval = 10))
infomodelpruned <- rpart(y~., data = trainset, method = 'class', parms = list(split = 'information'), control = rpart.control(cp = infobestcp, xval = 10))
#display pruned decision tree
fancyRpartPlot(infomodelpruned, palettes = c("Greens", "Reds"), sub = "")
fancyRpartPlot(ginimodelpruned, palettes = c("Greens", "Reds"), sub = "")
#print attribute importance
ginivarimp <- as.data.frame(ginimodelpruned$variable.importance)
infovarimp <- as.data.frame(infomodelpruned$variable.importance)
print(paste('Variable importance according to gini index metric'))
print(ginivarimp)
print(paste('Variable importance according to information gain metric'))
print(infovarimp)
#prediction on the basis of pruned decision tree
ginimodelpredict <- predict(ginimodelpruned, testset, type = "class")
infomodelpredict <- predict(infomodelpruned, testset, type = "class")
#using e1071 library to implement naives bayes
library(e1071)
library(caret)
#training the model
#laplace smoothing applied for improving f1 score
banknb <- naiveBayes(as.factor(y)~., trainset, laplace = 4)
banknb
#predicting on the model
banknbpredict <- predict(banknb, testset, prob = TRUE)
#retrieve confusion matrix for predicted values
conf_matrix_info <- table(testset$y, infomodelpredict)
conf_matrix_gini <- table(testset$y, ginimodelpredict)
conf_matrix_nb <- table(testset$y, banknbpredict)
rownames(conf_matrix_gini) <- paste("Actual", rownames(conf_matrix_gini), sep = ":")
rownames(conf_matrix_info) <- paste("Actual", rownames(conf_matrix_info), sep = ":")
rownames(conf_matrix_nb) <- paste("Actual", rownames(conf_matrix_nb), sep = ":")
colnames(conf_matrix_gini) <- paste("Predicted", colnames(conf_matrix_gini), sep = ":")
colnames(conf_matrix_info) <- paste("Predicted", colnames(conf_matrix_info), sep = ":")
colnames(conf_matrix_nb) <- paste("Predicted", colnames(conf_matrix_nb), sep = ":")
print(conf_matrix_info)
print(conf_matrix_gini)
print(conf_matrix_nb)
#calculating various model statistics based on the confusion matrix
giniaccuracy <- sum(diag(conf_matrix_gini)) / sum(conf_matrix_gini)
giniprecision <- conf_matrix_gini[2, "Predicted:yes"] / sum(conf_matrix_gini[1, "Predicted:yes"], conf_matrix_gini[2, "Predicted:yes"])
ginirecall <- conf_matrix_gini[2, "Predicted:yes"] / sum(conf_matrix_gini[2, "Predicted:yes"], conf_matrix_gini[2, "Predicted:no"])
ginif1score <- (2*(ginirecall * giniprecision)) / (ginirecall + giniprecision)
infoaccuracy <- sum(diag(conf_matrix_info)) / sum(conf_matrix_info)
infoprecision <- conf_matrix_info[2, "Predicted:yes"] / sum(conf_matrix_info[1, "Predicted:yes"], conf_matrix_info[2, "Predicted:yes"])
inforecall <- conf_matrix_info[2, "Predicted:yes"] / sum(conf_matrix_info[2, "Predicted:yes"], conf_matrix_info[2, "Predicted:no"])
infof1score <- (2*(inforecall * infoprecision)) / (inforecall + infoprecision)
nbaccuracy <- sum(diag(conf_matrix_nb)) / sum(conf_matrix_nb)
nbprecision <- conf_matrix_nb[2, "Predicted:yes"] / sum(conf_matrix_nb[1, "Predicted:yes"], conf_matrix_nb[2, "Predicted:yes"])
nbrecall <- conf_matrix_nb[2, "Predicted:yes"] / sum(conf_matrix_nb[2, "Predicted:yes"], conf_matrix_nb[2, "Predicted:no"])
nbf1score <- (2*(nbrecall * nbprecision)) / (nbrecall + nbprecision)
print(paste('Accuracy for gini metric test is', giniaccuracy*100, '%'))
print(paste('Precision for gini metric test is', giniprecision))
print(paste('Recall for gini metric test is', ginirecall))
print(paste('F1 score for gini metric test is', ginif1score))
print(paste('Accuracy for information metric test is', infoaccuracy*100, '%'))
print(paste('Precision for information metric test is', infoprecision))
print(paste('Recall for information metric test is', inforecall))
print(paste('F1 score for information metric test is', infof1score))
print(paste('Accuracy for naives bayes model is', nbaccuracy*100, '%'))
print(paste('Precision for naives bayes model is', nbprecision))
print(paste('Recall for naives bayes model is', nbrecall))
print(paste('F1 score for naives bayes model is', nbf1score))
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
files <- list.files(directory, full.name =TRUE)
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
data <- data.frame()
for (i in id){
data <- rbind(data, read.csv(files[i]))
}
mean(data[,pollutant], na.rm =TRUE)
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
## NOTE: Do not round the result!
}
|
/r-programming/pollutantmean.R
|
no_license
|
tttonytian/datasciencecoursera
|
R
| false
| false
| 748
|
r
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
files <- list.files(directory, full.name =TRUE)
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
data <- data.frame()
for (i in id){
data <- rbind(data, read.csv(files[i]))
}
mean(data[,pollutant], na.rm =TRUE)
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
## NOTE: Do not round the result!
}
|
library(h2o)
h2o.init()
data <- h2o.importFile("http://h2o-public-test-data.s3.amazonaws.com/smalldata/airlines/allyears2k_headers.zip")
parts <- h2o.splitFrame(data, c(0.8,0.1),seed = 69)
train <- parts[[1]]
valid <- parts[[2]]
test <- parts[[3]]
y <- "IsArrDelayed"
xWithDep <-setdiff(colnames(data),c(
"ArrDelay","IsArrDelayed",
"ActualElapsedTime",
"ArrTime",
"TailNum"
))
system.time( #17 to 18s
m_DLR_def<-h2o.deeplearning(xWithDep, y,train,
validation_frame = valid,
model_id = "DLR_def",
variable_importance = TRUE)
)
h2o.performance(m_DLR_def, valid = TRUE)
plot(m_DLR_def)
#h2o.varimp(m_DLR_def)
|
/Week4/R/visualization_model_exprimentation.R
|
no_license
|
mrjaypandya/Practical-Machine-Learning-on-H2O
|
R
| false
| false
| 748
|
r
|
library(h2o)
h2o.init()
data <- h2o.importFile("http://h2o-public-test-data.s3.amazonaws.com/smalldata/airlines/allyears2k_headers.zip")
parts <- h2o.splitFrame(data, c(0.8,0.1),seed = 69)
train <- parts[[1]]
valid <- parts[[2]]
test <- parts[[3]]
y <- "IsArrDelayed"
xWithDep <-setdiff(colnames(data),c(
"ArrDelay","IsArrDelayed",
"ActualElapsedTime",
"ArrTime",
"TailNum"
))
system.time( #17 to 18s
m_DLR_def<-h2o.deeplearning(xWithDep, y,train,
validation_frame = valid,
model_id = "DLR_def",
variable_importance = TRUE)
)
h2o.performance(m_DLR_def, valid = TRUE)
plot(m_DLR_def)
#h2o.varimp(m_DLR_def)
|
library(stringr)
library(adegenet)
library(ggplot2)
library(ggpubr)
library(MCMCglmm)
library(dplyr)
library(tidyr)
library(DESeq2)
library(reshape)
########################################################################################################
########################################################################################################
########################################################################################################
########################################################################################################
########################################################################################################
########################################################################################################
################
################ RNAseq
################
########################################################################################################
########################################################################################################
########################################################################################################
########################################################################################################
########################################################################################################
########################################################################################################
library(tximport)
###############################################################################################
###############################################################################################
######
###### DGE F1
######
###############################################################################################
###############################################################################################
###
### DEseq2-
###
dir <- "~/reciprocal_t/analysis/salmon"
#list.files(dir)
samples <- read.table("~/reciprocal_t/analysis/sample_id.txt", header=FALSE)
# now point to quant files
files <- file.path(dir, samples$V1, "quant.sf")
names(files) <- samples$V1
all(file.exists(files))
#subset files to only include F1
files <- files[grep("F1", files)]
gene_tran <- read.table("/data/copepods/tonsa_transcriptome/Atonsa_gen_trans_agp_gff/Atonsa_transcript_to_gene", header=FALSE)
tx2gene <- data.frame(transcript=gene_tran$V2, gene=gene_tran$V1)
# use tximport to read in files
txi <- tximport(files, type = "salmon", tx2gene = tx2gene)
f1samp <- as.data.frame(samples$V1[grep("F1", samples$V1)])
colnames(f1samp) <- c("V1")
id <- separate(data=f1samp, col=V1, sep="_", into = c("Population", "Generation", "Replicate"))
id$Treatment <- substr(id$Population, 1,2)
id$Line <- substr(id$Population, 3,4)
id$group <- paste(id$Treatment, id$Line, sep="")
sampleTable <- data.frame(
sampleName = f1samp$V1,
Line = id$Line,
Treatment = id$Treatment,
Replicate = id$Replicate)
# double check that the following agrees
rownames(sampleTable) <- colnames(txi$counts)
# setting this up, row info is each transcript/gene
# column is phenotypic data
rownames(sampleTable) <- colnames(txi$counts)
# import to DESeq2. this usese counts from tximport. from salmon. gene level
dds <- DESeqDataSetFromTximport(txi,
colData=sampleTable,
design = ~ Line + Treatment + Line:Treatment)
# remove rows where count < 10 in more than 90% of samples
keep <- apply(counts(dds), 1, function(x) {ifelse(length(which(x > 10)) > 13, TRUE, FALSE)})
dds <- dds[keep,]
nrow(dds)
#[1] 23323
# then rolog transform
rld<-rlog(dds,blind=TRUE)
#head(assay(rld))
dat=as.data.frame(assay(rld))
colnames(dat)<-colnames(dds)
####################
#discriminant function analysis
####################
ctr <-dat[,grep("AAAA|HHHH",colnames(dat))] # in home envir
trt <-dat[,grep("AAHH|HHAA",colnames(dat))] # in away envir
pcp=prcomp(t(ctr), retx=TRUE, center=TRUE, scale.=FALSE) # note. the scale command doesnt have much impact
scores=pcp$x
#screeplot(pcp,bstick=T)
# adegenet: finding clusters (even though we know what clusters we want) - choose 7 PCs and 2 groups
clus.ctr=find.clusters(t(ctr),max.n.clus=7,
n.pca=7, n.clust=2) #keep 7 and 2
#rename groups
clus.ctr$grp=c(rep(2, 4),
rep(1, 4))
# discriminant function for two groups:
dp.ctr=dapc(t(ctr),clus.ctr$grp,
n.pca=4, n.da=2) #keep 4 and 2
#scatter(dp.ctr,bg="white",scree.da=FALSE,legend=TRUE,solid=.4) #discriminant function for ORIGIN type expression
pred.trt<-predict.dapc(dp.ctr,newdata=(t(trt))) #skip IO11C for host b/c outlier sample in WGCNA
#must create another dataframe structure in order to plot these predicted values
dpc=data.frame(rbind(dp.ctr$ind.coord,pred.trt$ind.scores))
dpc$Line <- substr(row.names(dpc),3,4)
dpc$Treatment <- substr(row.names(dpc),1,2)
dpc$gp <- c(rep(1.1,4), rep(1.5,8),rep(1.1,4))
dpc$LD1 <- dpc$LD1*-1
dpc.dge.1 <- dpc
gp_means <- dpc %>% group_by(Line, Treatment) %>%
summarize(mean=mean(LD1))
gp_means$gp <- c(1.1, 1.1,1.5, 1.5)
a2 <- ggplot(dpc, aes(x=LD1, y=gp, fill=Treatment, shape=Line)) +
geom_point(size=3, lwd=2)+
ylim(1, 2) +
xlim(-5,5) +
scale_color_manual(values=c("#6699CC","#CC3333"))+
scale_fill_manual(values=c("#6699CC","#CC3333")) +
scale_shape_manual(values=c(21, 24))+
guides(fill=guide_legend(override.aes=list(shape=21, size=7, fill=c("#6699CC","#CC3333")),order = 2),
shape=guide_legend(override.aes=list(shape=c(16, 17), size=c(7,5)))) +
#stat_summary(fun="mean") +
theme_classic()+
ylab(" ")+
xlab(" ") +
ggtitle("F1") +
theme(plot.title = element_text(hjust = 0.5),
axis.line.y=element_blank(),
axis.ticks.y=element_blank(),
axis.text.y=element_blank()) +
geom_point(data=gp_means, aes(x=mean, y=gp, fill=Treatment, shape=Line), size=7, alpha=0.5) +
geom_segment(
x = mean(dpc$LD1[grep( "AAAA", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "HHAA", row.names(dpc))]),
y=1.1, yend=1.1,
lty=1,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="black")+
geom_segment(
x = mean(dpc$LD1[grep( "HHHH", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "AAHH", row.names(dpc))]),
y=1.5, yend=1.5,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="gray50")+
theme(legend.position = "none")
### stats:
dpc.all <- dpc
dpc.all$rep <- substr(row.names(dpc.all), 9,13)
dpc.all$home <- 1
dpc.all$home[grep("AAAA|HHHH", row.names(dpc.all))] <- 0
prior = list(R = list(V = 1, nu = 0.002), G = list(G1 = list(V=1, nu=0.002,alpha.mu=0, alpha.V=1000)))
mod1=MCMCglmm(LD1~Line+Line:home, random=~rep, prior= prior, data=dpc.all,nitt=75000, thin=25, burnin=5000)
summary(mod1)
# post.mean l-95% CI u-95% CI eff.samp pMCMC
#(Intercept) -3.2919 -4.5618 -2.0473 2800 0.00286 **
#LineHH 6.6354 5.5284 7.7635 3059 < 4e-04 ***
#LineAA:home 3.5265 2.4352 4.6981 2800 < 4e-04 ***
#LineHH:home -1.9568 -3.0912 -0.9127 2836 0.00143 **
posterior.mode(mod1$VCV)
summary(mod1$Sol)
# $Sol is the posterior distribution of the fixed effect
#head(mod1$Sol)
HPDinterval(mod1$Sol)
# calculating difference in magnitudes of LineAA:home and LineHH:home using sampled sets of parameters:
awayDelta=abs(mod1$Sol[,"LineAA:home"]) -abs(mod1$Sol[,"LineHH:home"])
# 95% credible interval:
HPDinterval(awayDelta)
if (is.na(table(awayDelta<0)[2])) {
cat("p <",signif(1/length(awayDelta),1))
} else {
cat("p =",signif(table(awayDelta<0)[2]/length(awayDelta),2))
}
###############################################################################################
###############################################################################################
######
###### DGE F2
######
###############################################################################################
###############################################################################################
# now point to quant files
files <- file.path(dir, samples$V1, "quant.sf")
names(files) <- samples$V1
all(file.exists(files))
#subset files to only include F1
files <- files[grep("F2", files)]
txi <- tximport(files, type = "salmon", tx2gene = tx2gene)
id <- separate(data=f1samp, col=V1, sep="_", into = c("Population", "Generation", "Replicate"))
id$Treatment <- substr(id$Population, 1,2)
id$Line <- substr(id$Population, 3,4)
id$group <- paste(id$Treatment, id$Line, sep="")
sampleTable <- data.frame(
sampleName = f1samp$V1,
Line = id$Line,
Treatment = id$Treatment,
Replicate = id$Replicate)
rownames(sampleTable) <- colnames(txi$counts)
# setting this up, row info is each transcript/gene
# columb is phenotypic data
rownames(sampleTable) <- colnames(txi$counts)
dds <- DESeqDataSetFromTximport(txi,
colData=sampleTable,
design = ~ Line + Treatment + Line:Treatment)
# remove rows where count < 10 in more than 90% of samples
keep <- apply(counts(dds), 1, function(x) {ifelse(length(which(x > 10)) > 13, TRUE, FALSE)})
dds <- dds[keep,]
nrow(dds)
#[1] 24881
# then rolog transform
rld<-rlog(dds,blind=TRUE)
dat=as.data.frame(assay(rld))
colnames(dat)<-colnames(dds)
####################
#discriminant function analysis
####################
ctr <-dat[,grep("AAAA|HHHH",colnames(dat))] # in home envir
trt <-dat[,grep("AAHH|HHAA",colnames(dat))] # in away envir
pcp=prcomp(t(ctr), retx=TRUE, center=TRUE, scale.=TRUE)
scores=pcp$x
#screeplot(pcp,bstick=T)
# adegenet: finding clusters (even though we know what clusters we want) - choose 7 PCs and 2 groups
clus.ctr=find.clusters(t(ctr),max.n.clus=7,
n.pca=7, n.clust=2) #keep 7 and 2
#Use clus$grp to rename to in2in and off2off -
clus.ctr$grp=c(rep(2, 4),
rep(1, 4)) #tell the DF which groups you want to cluster; in this case in2in and off2off
# discriminant function for two groups:
dp.ctr=dapc(t(ctr),clus.ctr$grp,
n.pca=3, n.da=2) #keep 7 and 2
#scatter(dp.ctr,bg="white",scree.da=FALSE,legend=TRUE,solid=.4) #discriminant function for ORIGIN type expression
pred.trt<-predict.dapc(dp.ctr,newdata=(t(trt))) #skip IO11C for host b/c outlier sample in WGCNA
dpc <- data.frame(rbind(dp.ctr$ind.coord,pred.trt$ind.scores))
dpc$Line <- substr(row.names(dpc),3,4)
dpc$Treatment <- substr(row.names(dpc),1,2)
dpc$gp <- c(rep(1.1,4), rep(1.5,8),rep(1.1,4))
dpc$LD1 <- dpc$LD1*-1
dpc.dge.2 <- dpc
gp_means <- dpc %>% group_by(Line, Treatment) %>%
summarize(mean=mean(LD1))
gp_means$gp <- c(1.1, 1.1,1.5, 1.5)
b2 <- ggplot(dpc, aes(x=LD1, y=gp, fill=Treatment, shape=Line)) +
geom_point(size=3, lwd=2)+
ylim(1, 2) +
xlim(-5, 5) +
scale_color_manual(values=c("#6699CC","#CC3333"))+
scale_fill_manual(values=c("#6699CC","#CC3333")) +
scale_shape_manual(values=c(21, 24))+
guides(fill=guide_legend(override.aes=list(shape=21, size=7, fill=c("#6699CC","#CC3333")),order = 2),
shape=guide_legend(override.aes=list(shape=c(16, 17), size=c(7,5)))) +
#stat_summary(fun="mean") +
theme_classic()+
ylab(" ")+
xlab(" ") +
ggtitle("F2") +
theme(plot.title = element_text(hjust = 0.5),
axis.line.y=element_blank(),
axis.ticks.y=element_blank(),
axis.text.y=element_blank()) +
geom_point(data=gp_means, aes(x=mean, y=gp, fill=Treatment, shape=Line), size=7, alpha=0.5) +
geom_segment(
x = mean(dpc$LD1[grep( "AAAA", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "HHAA", row.names(dpc))]),
y=1.1, yend=1.1,
lty=1,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="gray50")+
geom_segment(
x = mean(dpc$LD1[grep( "HHHH", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "AAHH", row.names(dpc))]),
y=1.5, yend=1.5,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="gray50")+
theme(legend.position = "none")
### stats:
dpc.all <- dpc
dpc.all$rep <- substr(row.names(dpc.all), 9,13)
dpc.all$home <- 1
dpc.all$home[grep("AAAA|HHHH", row.names(dpc.all))] <- 0
prior = list(R = list(V = 1, nu = 0.002), G = list(G1 = list(V=1, nu=0.002,alpha.mu=0, alpha.V=1000)))
mod1=MCMCglmm(LD1~Line+Line:home, random=~rep, prior= prior, data=dpc.all,nitt=75000, thin=25, burnin=5000)
summary(mod1)
# post.mean l-95% CI u-95% CI eff.samp pMCMC
#(Intercept) 3.3268 2.1485 4.5441 2652 0.001429 **
#LineHH -6.6198 -7.7842 -5.4205 3071 < 4e-04 ***
#LineAA:home -3.5292 -4.6578 -2.4419 2800 0.000714 ***
#LineHH:home 1.9427 0.8655 3.0950 2800 0.003571 **
posterior.mode(mod1$VCV)
# $Sol is the posterior distribution of the fixed effect
#head(mod1$Sol)
HPDinterval(mod1$Sol)
awayDelta=abs(mod1$Sol[,"LineAA:home"])-abs(mod1$Sol[,"LineHH:home"])
# 95% credible interval:
HPDinterval(awayDelta)
if (is.na(table(awayDelta<0)[2])) {
cat("p <",signif(1/length(awayDelta),1))
} else {
cat("p =",signif(table(awayDelta<0)[2]/length(awayDelta),2))
}
###############################################################################################
###############################################################################################
######
###### DGE F3
######
###############################################################################################
###############################################################################################
# now point to quant files
files <- file.path(dir, samples$V1, "quant.sf")
names(files) <- samples$V1
all(file.exists(files))
#subset files to only include F3
files <- files[grep("F3", files)]
txi <- tximport(files, type = "salmon", tx2gene = tx2gene)
names(txi)
head(txi$counts)
tx2gene <- data.frame(transcript=gene_tran$V2, gene=gene_tran$V1)
f1samp <- as.data.frame(samples$V1[grep("F3", samples$V1)])
colnames(f1samp) <- c("V1")
id <- separate(data=f1samp, col=V1, sep="_", into = c("Population", "Generation", "Replicate"))
id$Treatment <- substr(id$Population, 1,2)
id$Line <- substr(id$Population, 3,4)
id$group <- paste(id$Treatment, id$Line, sep="")
sampleTable <- data.frame(
sampleName = f1samp$V1,
Line = id$Line,
Treatment = id$Treatment,
Replicate = id$Replicate)
# double check that the following agrees
rownames(sampleTable) <- colnames(txi$counts)
dds <- DESeqDataSetFromTximport(txi,
colData=sampleTable,
design = ~ Line + Treatment + Line:Treatment)
# remove rows where count < 10 in more than 90% of samples
keep <- apply(counts(dds), 1, function(x) {ifelse(length(which(x > 10)) > 13, TRUE, FALSE)})
dds <- dds[keep,]
nrow(dds)
#[1] 24131
# then rolog transform
rld<-rlog(dds,blind=TRUE)
dat=as.data.frame(assay(rld))
colnames(dat)<-colnames(dds)
####################
#discriminant function analysis
####################
ctr <-dat[,grep("AAAA|HHHH",colnames(dat))] # in home envir
trt <-dat[,grep("AAHH|HHAA",colnames(dat))] # in away envir
pcp=prcomp(t(ctr), retx=TRUE, center=TRUE, scale.=TRUE)
scores=pcp$x
# adegenet: finding clusters (even though we know what clusters we want) - choose 7 PCs and 2 groups
clus.ctr=find.clusters(t(ctr),max.n.clus=7,
n.pca=7, n.clust=2) #keep 7 and 2
#Use clus$grp to rename to in2in and off2off -
clus.ctr$grp=c(rep(2, 4),
rep(1, 4)) #tell the DF which groups you want to cluster; in this case in2in and off2off
# discriminant function for two groups:
dp.ctr=dapc(t(ctr),clus.ctr$grp,
n.pca=4, n.da=2) #keep 7 and 2
#scatter(dp.ctr,bg="white",scree.da=FALSE,legend=TRUE,solid=.4) #discriminant function for ORIGIN type expression
pred.trt<-predict.dapc(dp.ctr,newdata=(t(trt))) #skip IO11C for host b/c outlier sample in WGCNA
#assign groups
dpc <- data.frame(rbind(dp.ctr$ind.coord,pred.trt$ind.scores))
dpc$Line <- substr(row.names(dpc),3,4)
dpc$Treatment <- substr(row.names(dpc),1,2)
dpc$gp <- c(rep(1.1,4), rep(1.5,8),rep(1.1,4))
dpc$LD1 <- dpc$LD1*-1
dpc.dge.3 <- dpc
dpc_dge <- rbind(dpc.dge.1, dpc.dge.2, dpc.dge.3)
write.table(dpc_dge, "~/reciprocal_t/analysis/dpc_dge.txt", sep="\t", quote=F, row.names=F)
gp_means <- dpc %>% group_by(Line, Treatment) %>%
summarize(mean=mean(LD1))
gp_means$gp <- c(1.1, 1.1,1.5, 1.5)
c2 <- ggplot(dpc, aes(x=LD1, y=gp, fill=Treatment, shape=Line)) +
geom_point(size=3, lwd=2)+
ylim(1, 2) +
xlim(-5, 5) +
scale_color_manual(values=c("#6699CC","#CC3333"))+
scale_fill_manual(values=c("#6699CC","#CC3333")) +
scale_shape_manual(values=c(21, 24))+
guides(fill=guide_legend(override.aes=list(shape=21, size=7, fill=c("#6699CC","#CC3333")),order = 2),
shape=guide_legend(override.aes=list(shape=c(16, 17), size=c(7,5)))) +
#stat_summary(fun="mean") +
theme_classic()+
ylab(" ")+
xlab(" ") +
ggtitle("F3") +
theme(plot.title = element_text(hjust = 0.5),
axis.line.y=element_blank(),
axis.ticks.y=element_blank(),
axis.text.y=element_blank()) +
geom_point(data=gp_means, aes(x=mean, y=gp, fill=Treatment, shape=Line), size=7, alpha=0.5) +
geom_segment(
x = mean(dpc$LD1[grep( "AAAA", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "HHAA", row.names(dpc))]),
y=1.1, yend=1.1,
lty=1,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="gray50")+
geom_segment(
x = mean(dpc$LD1[grep( "HHHH", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "AAHH", row.names(dpc))]),
y=1.5, yend=1.5,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="gray50")+
theme(legend.position = "none")
pdf("~/Documents/UVM/Reciprocal_transplant/figures/dapc_RNA_gen.pdf", height = 6, width = 4)
ggarrange(a2,b2,c2, ncol=1, nrow=3, common.legend=TRUE)
dev.off()
### stats:
dpc.all <- dpc
dpc.all$rep <- substr(row.names(dpc.all), 9,13)
dpc.all$home <- 1
dpc.all$home[grep("AAAA|HHHH", row.names(dpc.all))] <- 0
prior = list(R = list(V = 1, nu = 0.002), G = list(G1 = list(V=1, nu=0.002,alpha.mu=0, alpha.V=1000)))
mod1=MCMCglmm(LD1~Line+Line:home, random=~rep, prior= prior, data=dpc.all,nitt=75000, thin=25, burnin=5000)
summary(mod1)
# post.mean l-95% CI u-95% CI eff.samp pMCMC
#(Intercept) -3.480 -4.805 -2.140 2800 <4e-04 ***
#LineHH 6.951 5.492 8.494 2800 <4e-04 ***
#LineAA:home 4.799 3.306 6.291 2800 <4e-04 ***
#LineHH:home -4.555 -6.071 -3.047 2800 <4e-04 ***
posterior.mode(mod1$VCV)
HPDinterval(mod1$Sol)
#awayDelta=abs(mod1$Sol[,"LineAA:home"])-abs(mod1$Sol[,"LineHH:home"])
awayDelta=abs(mod1$Sol[,"LineHH:home"])-abs(mod1$Sol[,"LineAA:home"])
# 95% credible interval:
HPDinterval(awayDelta)
if (is.na(table(awayDelta<0)[2])) {
cat("p <",signif(1/length(awayDelta),1))
} else {
cat("p =",signif(table(awayDelta<0)[2]/length(awayDelta),2))
}
# all gens:
#f1
#LineAA:home 3.5265 2.4352 4.6981 2800 < 4e-04 ***
#LineHH:home -1.9568 -3.0912 -0.9127 2836 0.00143 **
#f2
#LineAA:home -3.5292 -4.6578 -2.4419 2800 0.000714 ***
#LineHH:home 1.9427 0.8655 3.0950 2800 0.003571 **
# f3
#LineAA:home 4.799 3.306 6.291 2800 <4e-04 ***
#LineHH:home -4.555 -6.071 -3.047 2800 <4e-04 ***
p.adjust(c(4e-04,0.00143,0.000714,0.003571,4e-04,4e-04 ), method="bonferroni")
#[1] 0.002400 0.008580 0.004284 0.021426 0.002400 0.002400
######################################################################################################################
###########################################################
###########################################################
## allele freqs
###########################################################
###########################################################
######################################################################################################################
af <- read.table("~/reciprocal_t/analysis/filtered_allele_freqs.txt", header=TRUE)
dat3 <- read.table("~/reciprocal_t/analysis/filtered_variants.txt", header=TRUE)
pops <- c(
"AAAA_F1_REP1", "AAAA_F1_REP2", "AAAA_F1_REP3", "AAAA_F1_REP4",
"AAAA_F2_REP1", "AAAA_F2_REP2", "AAAA_F2_REP3", "AAAA_F2_REP4",
"AAAA_F3_REP1", "AAAA_F3_REP2", "AAAA_F3_REP3", "AAAA_F3_REP4",
"AAHH_F1_REP1", "AAHH_F1_REP2", "AAHH_F1_REP3", "AAHH_F1_REP4",
"AAHH_F2_REP1", "AAHH_F2_REP2", "AAHH_F2_REP3", "AAHH_F2_REP4",
"AAHH_F3_REP1", "AAHH_F3_REP2", "AAHH_F3_REP3", "AAHH_F3_REP4",
"HHAA_F1_REP1", "HHAA_F1_REP2", "HHAA_F1_REP3", "HHAA_F1_REP4",
"HHAA_F2_REP1", "HHAA_F2_REP2", "HHAA_F2_REP3", "HHAA_F2_REP4",
"HHAA_F3_REP1", "HHAA_F3_REP2", "HHAA_F3_REP3", "HHAA_F3_REP4",
"HHHH_F1_REP1", "HHHH_F1_REP2", "HHHH_F1_REP3", "HHHH_F1_REP4",
"HHHH_F2_REP1", "HHHH_F2_REP2", "HHHH_F2_REP3", "HHHH_F2_REP4",
"HHHH_F3_REP1", "HHHH_F3_REP2", "HHHH_F3_REP3", "HHHH_F3_REP4")
freqs <- t(af[,2:ncol(af)])
colnames(freqs) <- c(paste(dat3$Chrom, dat3$Position, sep=":"))
dat <- af
###############################################################################################
###############################################################################################
######
###### F1
######
###############################################################################################
###############################################################################################
ctr <-dat[,grep("AAAA_F1|HHHH_F1",colnames(dat))] # in home envir
trt <-dat[,grep("AAHH_F1|HHAA_F1",colnames(dat))] # in away envir
pcp=prcomp(t(ctr), retx=TRUE, center=TRUE, scale.=FALSE)
scores=pcp$x
#screeplot(pcp,bstick=T)
# adegenet: finding clusters (even though we know what clusters we want) -
clus.ctr=find.clusters(t(ctr),max.n.clus=10,
n.pca=3, n.clust=2)
#Use clus$grp to rename groups
clus.ctr$grp=c(rep(2, 4),
rep(1, 4))
# generate the actual DAPC
dp.ctr=dapc(t(ctr),clus.ctr$grp, var.loadings=TRUE, n.pca=3, n.da=1,
var.contrib =TRUE)
#scatter(dp.ctr,bg="white",scree.da=FALSE,legend=TRUE,solid=.4)
# then add in the transplanted groups to the previously generated DAPC
pred.trt<-predict.dapc(dp.ctr,newdata=(t(trt)))
# create another dataframe structure in order to plot these predicted values
dpc=data.frame(rbind(dp.ctr$ind.coord,pred.trt$ind.scores))
dpc$Line <- substr(row.names(dpc),3,4)
dpc$Treatment <- substr(row.names(dpc),1,2)
dpc$gp <- c(rep(1.1,4), rep(1.5,8),rep(1.1,4))
dpc$LD1 <- dpc$LD1*-1
dpc1 <- dpc
gp_means <- dpc %>% group_by(Line, Treatment) %>%
summarize(mean=mean(LD1))
gp_means$gp <- c(1.1, 1.1,1.5, 1.5)
a2 <- ggplot(dpc, aes(x=LD1, y=gp, fill=Treatment, shape=Line)) +
geom_point(size=3, lwd=2)+
ylim(1, 2) +
xlim(-5.5,5.5) +
scale_color_manual(values=c("#6699CC","#CC3333"))+
scale_fill_manual(values=c("#6699CC","#CC3333")) +
scale_shape_manual(values=c(21, 24))+
guides(fill=guide_legend(override.aes=list(shape=21, size=7, fill=c("#6699CC","#CC3333")),order = 2),
shape=guide_legend(override.aes=list(shape=c(16, 17), size=c(7,5)))) +
#stat_summary(fun="mean") +
theme_classic()+
ylab(" ")+
xlab(" ") +
ggtitle("F1") +
theme(plot.title = element_text(hjust = 0.5),
axis.line.y=element_blank(),
axis.ticks.y=element_blank(),
axis.text.y=element_blank()) +
geom_point(data=gp_means, aes(x=mean, y=gp, fill=Treatment, shape=Line), size=7, alpha=0.5) +
geom_segment(
x = mean(dpc$LD1[grep( "AAAA", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "HHAA", row.names(dpc))]),
y=1.1, yend=1.1,
lty=1,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="gray50")+
geom_segment(
x = mean(dpc$LD1[grep( "HHHH", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "AAHH", row.names(dpc))]),
y=1.5, yend=1.5,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="gray50")+
theme(legend.position = "none")
###############################################################################################
###############################################################################################
######
###### F2
######
###############################################################################################
###############################################################################################
ctr <-dat[,grep("AAAA_F2|HHHH_F2",colnames(dat))] # in home envir
trt <-dat[,grep("AAHH_F2|HHAA_F2",colnames(dat))] # in away envir
pcp=prcomp(t(ctr), retx=TRUE, center=TRUE, scale.=FALSE)
scores=pcp$x
#screeplot(pcp,bstick=T)
# adegenet: finding clusters (even though we know what clusters we want) -
clus.ctr=find.clusters(t(ctr),max.n.clus=10,
n.pca=3, n.clust=2)
#Use clus$grp to rename groups
clus.ctr$grp=c(rep(2, 4),
rep(1, 4))
# generate the actual DAPC
dp.ctr=dapc(t(ctr),clus.ctr$grp, var.loadings=TRUE, n.pca=3, n.da=1,
var.contrib =TRUE)
#scatter(dp.ctr,bg="white",scree.da=FALSE,legend=TRUE,solid=.4)
# then add in the transplanted groups to the previously generated DAPC
pred.trt<-predict.dapc(dp.ctr,newdata=(t(trt)))
# create another dataframe structure in order to plot these predicted values
dpc=data.frame(rbind(dp.ctr$ind.coord,pred.trt$ind.scores))
dpc$Line <- substr(row.names(dpc),3,4)
dpc$Treatment <- substr(row.names(dpc),1,2)
dpc$gp <- c(rep(1.1,4), rep(1.5,8),rep(1.1,4))
dpc$LD1 <- dpc$LD1*-1
dpc2 <- dpc
gp_means <- dpc %>% group_by(Line, Treatment) %>%
summarize(mean=mean(LD1))
gp_means$gp <- c(1.1, 1.1,1.5, 1.5)
b2 <- ggplot(dpc, aes(x=LD1, y=gp, fill=Treatment, shape=Line)) +
geom_point(size=3, lwd=2)+
ylim(1, 2) +
xlim(-5.5,5.5) +
scale_color_manual(values=c("#6699CC","#CC3333"))+
scale_fill_manual(values=c("#6699CC","#CC3333")) +
scale_shape_manual(values=c(21, 24))+
guides(fill=guide_legend(override.aes=list(shape=21, size=7, fill=c("#6699CC","#CC3333")),order = 2),
shape=guide_legend(override.aes=list(shape=c(16, 17), size=c(7,5)))) +
#stat_summary(fun="mean") +
theme_classic()+
ylab(" ")+
xlab(" ") +
ggtitle("F2") +
theme(plot.title = element_text(hjust = 0.5),
axis.line.y=element_blank(),
axis.ticks.y=element_blank(),
axis.text.y=element_blank()) +
geom_point(data=gp_means, aes(x=mean, y=gp, fill=Treatment, shape=Line), size=7, alpha=0.5) +
geom_segment(
x = mean(dpc$LD1[grep( "AAAA", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "HHAA", row.names(dpc))]),
y=1.1, yend=1.1,
lty=1,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="gray50")+
geom_segment(
x = mean(dpc$LD1[grep( "HHHH", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "AAHH", row.names(dpc))]),
y=1.5, yend=1.5,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="gray50")+
theme(legend.position = "none")
###############################################################################################
###############################################################################################
######
###### F3
######
###############################################################################################
###############################################################################################
ctr <-dat[,grep("AAAA_F3|HHHH_F3",colnames(dat))] # in home envir
trt <-dat[,grep("AAHH_F3|HHAA_F3",colnames(dat))] # in away envir
pcp=prcomp(t(ctr), retx=TRUE, center=TRUE, scale.=FALSE)
scores=pcp$x
#screeplot(pcp,bstick=T)
# adegenet: finding clusters (even though we know what clusters we want) -
clus.ctr=find.clusters(t(ctr),max.n.clus=10,
n.pca=3, n.clust=2)
#Use clus$grp to rename groups
clus.ctr$grp=c(rep(2, 4),
rep(1, 4))
# generate the actual DAPC
dp.ctr=dapc(t(ctr),clus.ctr$grp, var.loadings=TRUE, n.pca=3, n.da=1,
var.contrib =TRUE)
#scatter(dp.ctr,bg="white",scree.da=FALSE,legend=TRUE,solid=.4)
# then add in the transplanted groups to the previously generated DAPC
pred.trt<-predict.dapc(dp.ctr,newdata=(t(trt)))
# create another dataframe structure in order to plot these predicted values
dpc=data.frame(rbind(dp.ctr$ind.coord,pred.trt$ind.scores))
dpc=data.frame(rbind(dp.ctr$ind.coord,pred.trt$ind.scores))
dpc$Line <- substr(row.names(dpc),3,4)
dpc$Treatment <- substr(row.names(dpc),1,2)
dpc$gp <- c(rep(1.1,4), rep(1.5,8),rep(1.1,4))
dpc$LD1 <- dpc$LD1*-1
dpc3 <- dpc
dpc_af <- rbind(dpc1, dpc2, dpc3)
write.table(dpc_af, "~/reciprocal_t/analysis/dpc_af.txt", sep="\t", quote=F, row.names=F)
gp_means <- dpc %>% group_by(Line, Treatment) %>%
summarize(mean=mean(LD1))
gp_means$gp <- c(1.1, 1.1,1.5, 1.5)
c2 <- ggplot(dpc, aes(x=LD1, y=gp, fill=Treatment, shape=Line)) +
geom_point(size=3, lwd=2)+
ylim(1, 2) +
xlim(-5.5,5.5) +
scale_color_manual(values=c("#6699CC","#CC3333"))+
scale_fill_manual(values=c("#6699CC","#CC3333")) +
scale_shape_manual(values=c(21, 24))+
guides(fill=guide_legend(override.aes=list(shape=21, size=7, fill=c("#6699CC","#CC3333")),order = 2),
shape=guide_legend(override.aes=list(shape=c(16, 17), size=c(7,5)))) +
#stat_summary(fun="mean") +
theme_classic()+
ylab(" ")+
xlab(" ") +
ggtitle("F3") +
theme(plot.title = element_text(hjust = 0.5),
axis.line.y=element_blank(),
axis.ticks.y=element_blank(),
axis.text.y=element_blank()) +
geom_point(data=gp_means, aes(x=mean, y=gp, fill=Treatment, shape=Line), size=7, alpha=0.5) +
geom_segment(
x = mean(dpc$LD1[grep( "AAAA", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "HHAA", row.names(dpc))]),
y=1.1, yend=1.1,
lty=1,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="gray50")+
geom_segment(
x = mean(dpc$LD1[grep( "HHHH", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "AAHH", row.names(dpc))]),
y=1.5, yend=1.5,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="black")+
theme(legend.position = "none")
pdf("~/Documents/UVM/Reciprocal_transplant/figures/dapc_SNP_gen.pdf", height = 6, width = 4)
ggarrange(a2,b2,c2, ncol=1, nrow=3, common.legend=TRUE)
dev.off()
#############
### stats:
dpc.all <- dpc1
dpc.all$Line <- substr(row.names(dpc.all),3,4)
dpc.all$Treatment <- substr(row.names(dpc.all),1,2)
dpc.all$gp <- c(rep(1.1,4), rep(1.5,8),rep(1.1,4))
dpc.all$LD1 <- dpc.all$LD1*-1
dpc.all$rep <- substr(row.names(dpc.all), 9,13)
dpc.all$home <- 1
dpc.all$home[grep("AAAA|HHHH", row.names(dpc.all))] <- 0
prior = list(R = list(V = 1, nu = 0.002), G = list(G1 = list(V=1, nu=0.002,alpha.mu=0, alpha.V=1000)))
mod1=MCMCglmm(LD1~Line+Line:home, random=~rep, prior= prior, data=dpc.all,nitt=75000, thin=25, burnin=5000)
summary(mod1)
# post.mean l-95% CI u-95% CI eff.samp pMCMC
#(Intercept) 4.2464 3.0894 5.4160 2800 0.000714 ***
#LineHH -8.4911 -9.8337 -7.1083 2800 < 4e-04 ***
#LineAA:home -2.2096 -3.6851 -0.9245 3014 0.005714 **
#LineHH:home 2.1664 0.8041 3.5429 2800 0.001429 **
posterior.mode(mod1$VCV)
HPDinterval(mod1$Sol)
#awayDelta=abs(mod1$Sol[,"LineAA:home"])-abs(mod1$Sol[,"LineHH:home"])
awayDelta=abs(mod1$Sol[,"LineHH:home"])-abs(mod1$Sol[,"LineAA:home"])
# 95% credible interval:
HPDinterval(awayDelta)
if (is.na(table(awayDelta<0)[2])) {
cat("p <",signif(1/length(awayDelta),1))
} else {
cat("p =",signif(table(awayDelta<0)[2]/length(awayDelta),2))
}
# F2
dpc.all <- dpc2
dpc.all$Line <- substr(row.names(dpc.all),3,4)
dpc.all$Treatment <- substr(row.names(dpc.all),1,2)
dpc.all$gp <- c(rep(1.1,4), rep(1.5,8),rep(1.1,4))
dpc.all$LD1 <- dpc.all$LD1*-1
dpc.all$rep <- substr(row.names(dpc.all), 9,13)
dpc.all$home <- 1
dpc.all$home[grep("AAAA|HHHH", row.names(dpc.all))] <- 0
prior = list(R = list(V = 1, nu = 0.002), G = list(G1 = list(V=1, nu=0.002,alpha.mu=0, alpha.V=1000)))
mod2=MCMCglmm(LD1~Line+Line:home, random=~rep, prior= prior, data=dpc.all,nitt=75000, thin=25, burnin=5000)
summary(mod2)
# post.mean l-95% CI u-95% CI eff.samp pMCMC
# (Intercept) -3.7183 -4.8065 -2.4644 2800 0.00143 **
# LineHH 7.4262 6.2932 8.5262 2513 < 4e-04 ***
# LineAA:home 1.8559 0.7622 2.9456 2800 0.00286 **
# LineHH:home -2.1932 -3.3456 -1.0985 2800 0.00143 **
posterior.mode(mod2$VCV)
HPDinterval(mod2$Sol)
#awayDelta=abs(mod1$Sol[,"LineAA:home"])-abs(mod1$Sol[,"LineHH:home"])
awayDelta=abs(mod1$Sol[,"LineHH:home"])-abs(mod1$Sol[,"LineAA:home"])
# 95% credible interval:
HPDinterval(awayDelta)
if (is.na(table(awayDelta<0)[2])) {
cat("p <",signif(1/length(awayDelta),1))
} else {
cat("p =",signif(table(awayDelta<0)[2]/length(awayDelta),2))
}
## F3
dpc.all <- dpc3
dpc.all$Line <- substr(row.names(dpc.all),3,4)
dpc.all$Treatment <- substr(row.names(dpc.all),1,2)
dpc.all$gp <- c(rep(1.1,4), rep(1.5,8),rep(1.1,4))
dpc.all$LD1 <- dpc.all$LD1*-1
dpc.all$rep <- substr(row.names(dpc.all), 9,13)
dpc.all$home <- 1
dpc.all$home[grep("AAAA|HHHH", row.names(dpc.all))] <- 0
prior = list(R = list(V = 1, nu = 0.002), G = list(G1 = list(V=1, nu=0.002,alpha.mu=0, alpha.V=1000)))
mod3=MCMCglmm(LD1~Line+Line:home, random=~rep, prior= prior, data=dpc.all,nitt=75000, thin=25, burnin=5000)
summary(mod3)
# post.mean l-95% CI u-95% CI eff.samp pMCMC
#(Intercept) -2.99296 -4.12458 -1.80411 2800 0.002143 **
#LineHH 6.00740 4.90780 7.05361 2800 < 4e-04 ***
#LineAA:home 1.20025 0.07212 2.13389 3439 0.029286 *
#LineHH:home -2.70971 -3.78055 -1.73574 2800 0.000714 ***
posterior.mode(mod3$VCV)
HPDinterval(mod3$Sol)
#awayDelta=abs(mod1$Sol[,"LineAA:home"])-abs(mod1$Sol[,"LineHH:home"])
awayDelta=abs(mod1$Sol[,"LineHH:home"])-abs(mod1$Sol[,"LineAA:home"])
# 95% credible interval:
HPDinterval(awayDelta)
if (is.na(table(awayDelta<0)[2])) {
cat("p <",signif(1/length(awayDelta),1))
} else {
cat("p =",signif(table(awayDelta<0)[2]/length(awayDelta),2))
}
# correct for multiple testing:
# f1
#LineAA:home -2.2096 -3.6851 -0.9245 3014 0.005714 **
#LineHH:home 2.1664 0.8041 3.5429 2800 0.001429 **
#f2
# LineAA:home 1.8559 0.7622 2.9456 2800 0.00286 **
# LineHH:home -2.1932 -3.3456 -1.0985 2800 0.00143 **
#F3
#LineAA:home 1.20025 0.07212 2.13389 3439 0.029286 *
#LineHH:home -2.70971 -3.78055 -1.73574 2800 0.000714 ***
p.adjust(c(0.005714,0.001429,0.00286,0.00143,0.029286,0.000714), method = "fdr")
# [1] 0.034284 0.008574 0.017160 0.008580 0.175716 0.004284
|
/scripts/11_dapc.R
|
no_license
|
rsbrennan/tonsa_reciprocal
|
R
| false
| false
| 34,374
|
r
|
library(stringr)
library(adegenet)
library(ggplot2)
library(ggpubr)
library(MCMCglmm)
library(dplyr)
library(tidyr)
library(DESeq2)
library(reshape)
########################################################################################################
########################################################################################################
########################################################################################################
########################################################################################################
########################################################################################################
########################################################################################################
################
################ RNAseq
################
########################################################################################################
########################################################################################################
########################################################################################################
########################################################################################################
########################################################################################################
########################################################################################################
library(tximport)
###############################################################################################
###############################################################################################
######
###### DGE F1
######
###############################################################################################
###############################################################################################
###
### DEseq2-
###
dir <- "~/reciprocal_t/analysis/salmon"
#list.files(dir)
samples <- read.table("~/reciprocal_t/analysis/sample_id.txt", header=FALSE)
# now point to quant files
files <- file.path(dir, samples$V1, "quant.sf")
names(files) <- samples$V1
all(file.exists(files))
#subset files to only include F1
files <- files[grep("F1", files)]
gene_tran <- read.table("/data/copepods/tonsa_transcriptome/Atonsa_gen_trans_agp_gff/Atonsa_transcript_to_gene", header=FALSE)
tx2gene <- data.frame(transcript=gene_tran$V2, gene=gene_tran$V1)
# use tximport to read in files
txi <- tximport(files, type = "salmon", tx2gene = tx2gene)
f1samp <- as.data.frame(samples$V1[grep("F1", samples$V1)])
colnames(f1samp) <- c("V1")
id <- separate(data=f1samp, col=V1, sep="_", into = c("Population", "Generation", "Replicate"))
id$Treatment <- substr(id$Population, 1,2)
id$Line <- substr(id$Population, 3,4)
id$group <- paste(id$Treatment, id$Line, sep="")
sampleTable <- data.frame(
sampleName = f1samp$V1,
Line = id$Line,
Treatment = id$Treatment,
Replicate = id$Replicate)
# double check that the following agrees
rownames(sampleTable) <- colnames(txi$counts)
# setting this up, row info is each transcript/gene
# column is phenotypic data
rownames(sampleTable) <- colnames(txi$counts)
# import to DESeq2. this usese counts from tximport. from salmon. gene level
dds <- DESeqDataSetFromTximport(txi,
colData=sampleTable,
design = ~ Line + Treatment + Line:Treatment)
# remove rows where count < 10 in more than 90% of samples
keep <- apply(counts(dds), 1, function(x) {ifelse(length(which(x > 10)) > 13, TRUE, FALSE)})
dds <- dds[keep,]
nrow(dds)
#[1] 23323
# then rolog transform
rld<-rlog(dds,blind=TRUE)
#head(assay(rld))
dat=as.data.frame(assay(rld))
colnames(dat)<-colnames(dds)
####################
#discriminant function analysis
####################
ctr <-dat[,grep("AAAA|HHHH",colnames(dat))] # in home envir
trt <-dat[,grep("AAHH|HHAA",colnames(dat))] # in away envir
pcp=prcomp(t(ctr), retx=TRUE, center=TRUE, scale.=FALSE) # note. the scale command doesnt have much impact
scores=pcp$x
#screeplot(pcp,bstick=T)
# adegenet: finding clusters (even though we know what clusters we want) - choose 7 PCs and 2 groups
clus.ctr=find.clusters(t(ctr),max.n.clus=7,
n.pca=7, n.clust=2) #keep 7 and 2
#rename groups
clus.ctr$grp=c(rep(2, 4),
rep(1, 4))
# discriminant function for two groups:
dp.ctr=dapc(t(ctr),clus.ctr$grp,
n.pca=4, n.da=2) #keep 4 and 2
#scatter(dp.ctr,bg="white",scree.da=FALSE,legend=TRUE,solid=.4) #discriminant function for ORIGIN type expression
pred.trt<-predict.dapc(dp.ctr,newdata=(t(trt))) #skip IO11C for host b/c outlier sample in WGCNA
#must create another dataframe structure in order to plot these predicted values
dpc=data.frame(rbind(dp.ctr$ind.coord,pred.trt$ind.scores))
dpc$Line <- substr(row.names(dpc),3,4)
dpc$Treatment <- substr(row.names(dpc),1,2)
dpc$gp <- c(rep(1.1,4), rep(1.5,8),rep(1.1,4))
dpc$LD1 <- dpc$LD1*-1
dpc.dge.1 <- dpc
gp_means <- dpc %>% group_by(Line, Treatment) %>%
summarize(mean=mean(LD1))
gp_means$gp <- c(1.1, 1.1,1.5, 1.5)
a2 <- ggplot(dpc, aes(x=LD1, y=gp, fill=Treatment, shape=Line)) +
geom_point(size=3, lwd=2)+
ylim(1, 2) +
xlim(-5,5) +
scale_color_manual(values=c("#6699CC","#CC3333"))+
scale_fill_manual(values=c("#6699CC","#CC3333")) +
scale_shape_manual(values=c(21, 24))+
guides(fill=guide_legend(override.aes=list(shape=21, size=7, fill=c("#6699CC","#CC3333")),order = 2),
shape=guide_legend(override.aes=list(shape=c(16, 17), size=c(7,5)))) +
#stat_summary(fun="mean") +
theme_classic()+
ylab(" ")+
xlab(" ") +
ggtitle("F1") +
theme(plot.title = element_text(hjust = 0.5),
axis.line.y=element_blank(),
axis.ticks.y=element_blank(),
axis.text.y=element_blank()) +
geom_point(data=gp_means, aes(x=mean, y=gp, fill=Treatment, shape=Line), size=7, alpha=0.5) +
geom_segment(
x = mean(dpc$LD1[grep( "AAAA", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "HHAA", row.names(dpc))]),
y=1.1, yend=1.1,
lty=1,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="black")+
geom_segment(
x = mean(dpc$LD1[grep( "HHHH", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "AAHH", row.names(dpc))]),
y=1.5, yend=1.5,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="gray50")+
theme(legend.position = "none")
### stats:
dpc.all <- dpc
dpc.all$rep <- substr(row.names(dpc.all), 9,13)
dpc.all$home <- 1
dpc.all$home[grep("AAAA|HHHH", row.names(dpc.all))] <- 0
prior = list(R = list(V = 1, nu = 0.002), G = list(G1 = list(V=1, nu=0.002,alpha.mu=0, alpha.V=1000)))
mod1=MCMCglmm(LD1~Line+Line:home, random=~rep, prior= prior, data=dpc.all,nitt=75000, thin=25, burnin=5000)
summary(mod1)
# post.mean l-95% CI u-95% CI eff.samp pMCMC
#(Intercept) -3.2919 -4.5618 -2.0473 2800 0.00286 **
#LineHH 6.6354 5.5284 7.7635 3059 < 4e-04 ***
#LineAA:home 3.5265 2.4352 4.6981 2800 < 4e-04 ***
#LineHH:home -1.9568 -3.0912 -0.9127 2836 0.00143 **
posterior.mode(mod1$VCV)
summary(mod1$Sol)
# $Sol is the posterior distribution of the fixed effect
#head(mod1$Sol)
HPDinterval(mod1$Sol)
# calculating difference in magnitudes of LineAA:home and LineHH:home using sampled sets of parameters:
awayDelta=abs(mod1$Sol[,"LineAA:home"]) -abs(mod1$Sol[,"LineHH:home"])
# 95% credible interval:
HPDinterval(awayDelta)
if (is.na(table(awayDelta<0)[2])) {
cat("p <",signif(1/length(awayDelta),1))
} else {
cat("p =",signif(table(awayDelta<0)[2]/length(awayDelta),2))
}
###############################################################################################
###############################################################################################
######
###### DGE F2
######
###############################################################################################
###############################################################################################
# now point to quant files
files <- file.path(dir, samples$V1, "quant.sf")
names(files) <- samples$V1
all(file.exists(files))
#subset files to only include F1
files <- files[grep("F2", files)]
txi <- tximport(files, type = "salmon", tx2gene = tx2gene)
id <- separate(data=f1samp, col=V1, sep="_", into = c("Population", "Generation", "Replicate"))
id$Treatment <- substr(id$Population, 1,2)
id$Line <- substr(id$Population, 3,4)
id$group <- paste(id$Treatment, id$Line, sep="")
sampleTable <- data.frame(
sampleName = f1samp$V1,
Line = id$Line,
Treatment = id$Treatment,
Replicate = id$Replicate)
rownames(sampleTable) <- colnames(txi$counts)
# setting this up, row info is each transcript/gene
# columb is phenotypic data
rownames(sampleTable) <- colnames(txi$counts)
dds <- DESeqDataSetFromTximport(txi,
colData=sampleTable,
design = ~ Line + Treatment + Line:Treatment)
# remove rows where count < 10 in more than 90% of samples
keep <- apply(counts(dds), 1, function(x) {ifelse(length(which(x > 10)) > 13, TRUE, FALSE)})
dds <- dds[keep,]
nrow(dds)
#[1] 24881
# then rolog transform
rld<-rlog(dds,blind=TRUE)
dat=as.data.frame(assay(rld))
colnames(dat)<-colnames(dds)
####################
#discriminant function analysis
####################
ctr <-dat[,grep("AAAA|HHHH",colnames(dat))] # in home envir
trt <-dat[,grep("AAHH|HHAA",colnames(dat))] # in away envir
pcp=prcomp(t(ctr), retx=TRUE, center=TRUE, scale.=TRUE)
scores=pcp$x
#screeplot(pcp,bstick=T)
# adegenet: finding clusters (even though we know what clusters we want) - choose 7 PCs and 2 groups
clus.ctr=find.clusters(t(ctr),max.n.clus=7,
n.pca=7, n.clust=2) #keep 7 and 2
#Use clus$grp to rename to in2in and off2off -
clus.ctr$grp=c(rep(2, 4),
rep(1, 4)) #tell the DF which groups you want to cluster; in this case in2in and off2off
# discriminant function for two groups:
dp.ctr=dapc(t(ctr),clus.ctr$grp,
n.pca=3, n.da=2) #keep 7 and 2
#scatter(dp.ctr,bg="white",scree.da=FALSE,legend=TRUE,solid=.4) #discriminant function for ORIGIN type expression
pred.trt<-predict.dapc(dp.ctr,newdata=(t(trt))) #skip IO11C for host b/c outlier sample in WGCNA
dpc <- data.frame(rbind(dp.ctr$ind.coord,pred.trt$ind.scores))
dpc$Line <- substr(row.names(dpc),3,4)
dpc$Treatment <- substr(row.names(dpc),1,2)
dpc$gp <- c(rep(1.1,4), rep(1.5,8),rep(1.1,4))
dpc$LD1 <- dpc$LD1*-1
dpc.dge.2 <- dpc
gp_means <- dpc %>% group_by(Line, Treatment) %>%
summarize(mean=mean(LD1))
gp_means$gp <- c(1.1, 1.1,1.5, 1.5)
b2 <- ggplot(dpc, aes(x=LD1, y=gp, fill=Treatment, shape=Line)) +
geom_point(size=3, lwd=2)+
ylim(1, 2) +
xlim(-5, 5) +
scale_color_manual(values=c("#6699CC","#CC3333"))+
scale_fill_manual(values=c("#6699CC","#CC3333")) +
scale_shape_manual(values=c(21, 24))+
guides(fill=guide_legend(override.aes=list(shape=21, size=7, fill=c("#6699CC","#CC3333")),order = 2),
shape=guide_legend(override.aes=list(shape=c(16, 17), size=c(7,5)))) +
#stat_summary(fun="mean") +
theme_classic()+
ylab(" ")+
xlab(" ") +
ggtitle("F2") +
theme(plot.title = element_text(hjust = 0.5),
axis.line.y=element_blank(),
axis.ticks.y=element_blank(),
axis.text.y=element_blank()) +
geom_point(data=gp_means, aes(x=mean, y=gp, fill=Treatment, shape=Line), size=7, alpha=0.5) +
geom_segment(
x = mean(dpc$LD1[grep( "AAAA", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "HHAA", row.names(dpc))]),
y=1.1, yend=1.1,
lty=1,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="gray50")+
geom_segment(
x = mean(dpc$LD1[grep( "HHHH", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "AAHH", row.names(dpc))]),
y=1.5, yend=1.5,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="gray50")+
theme(legend.position = "none")
### stats:
dpc.all <- dpc
dpc.all$rep <- substr(row.names(dpc.all), 9,13)
dpc.all$home <- 1
dpc.all$home[grep("AAAA|HHHH", row.names(dpc.all))] <- 0
prior = list(R = list(V = 1, nu = 0.002), G = list(G1 = list(V=1, nu=0.002,alpha.mu=0, alpha.V=1000)))
mod1=MCMCglmm(LD1~Line+Line:home, random=~rep, prior= prior, data=dpc.all,nitt=75000, thin=25, burnin=5000)
summary(mod1)
# post.mean l-95% CI u-95% CI eff.samp pMCMC
#(Intercept) 3.3268 2.1485 4.5441 2652 0.001429 **
#LineHH -6.6198 -7.7842 -5.4205 3071 < 4e-04 ***
#LineAA:home -3.5292 -4.6578 -2.4419 2800 0.000714 ***
#LineHH:home 1.9427 0.8655 3.0950 2800 0.003571 **
posterior.mode(mod1$VCV)
# $Sol is the posterior distribution of the fixed effect
#head(mod1$Sol)
HPDinterval(mod1$Sol)
awayDelta=abs(mod1$Sol[,"LineAA:home"])-abs(mod1$Sol[,"LineHH:home"])
# 95% credible interval:
HPDinterval(awayDelta)
if (is.na(table(awayDelta<0)[2])) {
cat("p <",signif(1/length(awayDelta),1))
} else {
cat("p =",signif(table(awayDelta<0)[2]/length(awayDelta),2))
}
###############################################################################################
###############################################################################################
######
###### DGE F3
######
###############################################################################################
###############################################################################################
# now point to quant files
files <- file.path(dir, samples$V1, "quant.sf")
names(files) <- samples$V1
all(file.exists(files))
#subset files to only include F3
files <- files[grep("F3", files)]
txi <- tximport(files, type = "salmon", tx2gene = tx2gene)
names(txi)
head(txi$counts)
tx2gene <- data.frame(transcript=gene_tran$V2, gene=gene_tran$V1)
f1samp <- as.data.frame(samples$V1[grep("F3", samples$V1)])
colnames(f1samp) <- c("V1")
id <- separate(data=f1samp, col=V1, sep="_", into = c("Population", "Generation", "Replicate"))
id$Treatment <- substr(id$Population, 1,2)
id$Line <- substr(id$Population, 3,4)
id$group <- paste(id$Treatment, id$Line, sep="")
sampleTable <- data.frame(
sampleName = f1samp$V1,
Line = id$Line,
Treatment = id$Treatment,
Replicate = id$Replicate)
# double check that the following agrees
rownames(sampleTable) <- colnames(txi$counts)
dds <- DESeqDataSetFromTximport(txi,
colData=sampleTable,
design = ~ Line + Treatment + Line:Treatment)
# remove rows where count < 10 in more than 90% of samples
keep <- apply(counts(dds), 1, function(x) {ifelse(length(which(x > 10)) > 13, TRUE, FALSE)})
dds <- dds[keep,]
nrow(dds)
#[1] 24131
# then rolog transform
rld<-rlog(dds,blind=TRUE)
dat=as.data.frame(assay(rld))
colnames(dat)<-colnames(dds)
####################
#discriminant function analysis
####################
ctr <-dat[,grep("AAAA|HHHH",colnames(dat))] # in home envir
trt <-dat[,grep("AAHH|HHAA",colnames(dat))] # in away envir
pcp=prcomp(t(ctr), retx=TRUE, center=TRUE, scale.=TRUE)
scores=pcp$x
# adegenet: finding clusters (even though we know what clusters we want) - choose 7 PCs and 2 groups
clus.ctr=find.clusters(t(ctr),max.n.clus=7,
n.pca=7, n.clust=2) #keep 7 and 2
#Use clus$grp to rename to in2in and off2off -
clus.ctr$grp=c(rep(2, 4),
rep(1, 4)) #tell the DF which groups you want to cluster; in this case in2in and off2off
# discriminant function for two groups:
dp.ctr=dapc(t(ctr),clus.ctr$grp,
n.pca=4, n.da=2) #keep 7 and 2
#scatter(dp.ctr,bg="white",scree.da=FALSE,legend=TRUE,solid=.4) #discriminant function for ORIGIN type expression
pred.trt<-predict.dapc(dp.ctr,newdata=(t(trt))) #skip IO11C for host b/c outlier sample in WGCNA
#assign groups
dpc <- data.frame(rbind(dp.ctr$ind.coord,pred.trt$ind.scores))
dpc$Line <- substr(row.names(dpc),3,4)
dpc$Treatment <- substr(row.names(dpc),1,2)
dpc$gp <- c(rep(1.1,4), rep(1.5,8),rep(1.1,4))
dpc$LD1 <- dpc$LD1*-1
dpc.dge.3 <- dpc
dpc_dge <- rbind(dpc.dge.1, dpc.dge.2, dpc.dge.3)
write.table(dpc_dge, "~/reciprocal_t/analysis/dpc_dge.txt", sep="\t", quote=F, row.names=F)
gp_means <- dpc %>% group_by(Line, Treatment) %>%
summarize(mean=mean(LD1))
gp_means$gp <- c(1.1, 1.1,1.5, 1.5)
c2 <- ggplot(dpc, aes(x=LD1, y=gp, fill=Treatment, shape=Line)) +
geom_point(size=3, lwd=2)+
ylim(1, 2) +
xlim(-5, 5) +
scale_color_manual(values=c("#6699CC","#CC3333"))+
scale_fill_manual(values=c("#6699CC","#CC3333")) +
scale_shape_manual(values=c(21, 24))+
guides(fill=guide_legend(override.aes=list(shape=21, size=7, fill=c("#6699CC","#CC3333")),order = 2),
shape=guide_legend(override.aes=list(shape=c(16, 17), size=c(7,5)))) +
#stat_summary(fun="mean") +
theme_classic()+
ylab(" ")+
xlab(" ") +
ggtitle("F3") +
theme(plot.title = element_text(hjust = 0.5),
axis.line.y=element_blank(),
axis.ticks.y=element_blank(),
axis.text.y=element_blank()) +
geom_point(data=gp_means, aes(x=mean, y=gp, fill=Treatment, shape=Line), size=7, alpha=0.5) +
geom_segment(
x = mean(dpc$LD1[grep( "AAAA", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "HHAA", row.names(dpc))]),
y=1.1, yend=1.1,
lty=1,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="gray50")+
geom_segment(
x = mean(dpc$LD1[grep( "HHHH", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "AAHH", row.names(dpc))]),
y=1.5, yend=1.5,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="gray50")+
theme(legend.position = "none")
pdf("~/Documents/UVM/Reciprocal_transplant/figures/dapc_RNA_gen.pdf", height = 6, width = 4)
ggarrange(a2,b2,c2, ncol=1, nrow=3, common.legend=TRUE)
dev.off()
### stats:
dpc.all <- dpc
dpc.all$rep <- substr(row.names(dpc.all), 9,13)
dpc.all$home <- 1
dpc.all$home[grep("AAAA|HHHH", row.names(dpc.all))] <- 0
prior = list(R = list(V = 1, nu = 0.002), G = list(G1 = list(V=1, nu=0.002,alpha.mu=0, alpha.V=1000)))
mod1=MCMCglmm(LD1~Line+Line:home, random=~rep, prior= prior, data=dpc.all,nitt=75000, thin=25, burnin=5000)
summary(mod1)
# post.mean l-95% CI u-95% CI eff.samp pMCMC
#(Intercept) -3.480 -4.805 -2.140 2800 <4e-04 ***
#LineHH 6.951 5.492 8.494 2800 <4e-04 ***
#LineAA:home 4.799 3.306 6.291 2800 <4e-04 ***
#LineHH:home -4.555 -6.071 -3.047 2800 <4e-04 ***
posterior.mode(mod1$VCV)
HPDinterval(mod1$Sol)
#awayDelta=abs(mod1$Sol[,"LineAA:home"])-abs(mod1$Sol[,"LineHH:home"])
awayDelta=abs(mod1$Sol[,"LineHH:home"])-abs(mod1$Sol[,"LineAA:home"])
# 95% credible interval:
HPDinterval(awayDelta)
if (is.na(table(awayDelta<0)[2])) {
cat("p <",signif(1/length(awayDelta),1))
} else {
cat("p =",signif(table(awayDelta<0)[2]/length(awayDelta),2))
}
# all gens:
#f1
#LineAA:home 3.5265 2.4352 4.6981 2800 < 4e-04 ***
#LineHH:home -1.9568 -3.0912 -0.9127 2836 0.00143 **
#f2
#LineAA:home -3.5292 -4.6578 -2.4419 2800 0.000714 ***
#LineHH:home 1.9427 0.8655 3.0950 2800 0.003571 **
# f3
#LineAA:home 4.799 3.306 6.291 2800 <4e-04 ***
#LineHH:home -4.555 -6.071 -3.047 2800 <4e-04 ***
p.adjust(c(4e-04,0.00143,0.000714,0.003571,4e-04,4e-04 ), method="bonferroni")
#[1] 0.002400 0.008580 0.004284 0.021426 0.002400 0.002400
######################################################################################################################
###########################################################
###########################################################
## allele freqs
###########################################################
###########################################################
######################################################################################################################
af <- read.table("~/reciprocal_t/analysis/filtered_allele_freqs.txt", header=TRUE)
dat3 <- read.table("~/reciprocal_t/analysis/filtered_variants.txt", header=TRUE)
pops <- c(
"AAAA_F1_REP1", "AAAA_F1_REP2", "AAAA_F1_REP3", "AAAA_F1_REP4",
"AAAA_F2_REP1", "AAAA_F2_REP2", "AAAA_F2_REP3", "AAAA_F2_REP4",
"AAAA_F3_REP1", "AAAA_F3_REP2", "AAAA_F3_REP3", "AAAA_F3_REP4",
"AAHH_F1_REP1", "AAHH_F1_REP2", "AAHH_F1_REP3", "AAHH_F1_REP4",
"AAHH_F2_REP1", "AAHH_F2_REP2", "AAHH_F2_REP3", "AAHH_F2_REP4",
"AAHH_F3_REP1", "AAHH_F3_REP2", "AAHH_F3_REP3", "AAHH_F3_REP4",
"HHAA_F1_REP1", "HHAA_F1_REP2", "HHAA_F1_REP3", "HHAA_F1_REP4",
"HHAA_F2_REP1", "HHAA_F2_REP2", "HHAA_F2_REP3", "HHAA_F2_REP4",
"HHAA_F3_REP1", "HHAA_F3_REP2", "HHAA_F3_REP3", "HHAA_F3_REP4",
"HHHH_F1_REP1", "HHHH_F1_REP2", "HHHH_F1_REP3", "HHHH_F1_REP4",
"HHHH_F2_REP1", "HHHH_F2_REP2", "HHHH_F2_REP3", "HHHH_F2_REP4",
"HHHH_F3_REP1", "HHHH_F3_REP2", "HHHH_F3_REP3", "HHHH_F3_REP4")
freqs <- t(af[,2:ncol(af)])
colnames(freqs) <- c(paste(dat3$Chrom, dat3$Position, sep=":"))
dat <- af
###############################################################################################
###############################################################################################
######
###### F1
######
###############################################################################################
###############################################################################################
ctr <-dat[,grep("AAAA_F1|HHHH_F1",colnames(dat))] # in home envir
trt <-dat[,grep("AAHH_F1|HHAA_F1",colnames(dat))] # in away envir
pcp=prcomp(t(ctr), retx=TRUE, center=TRUE, scale.=FALSE)
scores=pcp$x
#screeplot(pcp,bstick=T)
# adegenet: finding clusters (even though we know what clusters we want) -
clus.ctr=find.clusters(t(ctr),max.n.clus=10,
n.pca=3, n.clust=2)
#Use clus$grp to rename groups
clus.ctr$grp=c(rep(2, 4),
rep(1, 4))
# generate the actual DAPC
dp.ctr=dapc(t(ctr),clus.ctr$grp, var.loadings=TRUE, n.pca=3, n.da=1,
var.contrib =TRUE)
#scatter(dp.ctr,bg="white",scree.da=FALSE,legend=TRUE,solid=.4)
# then add in the transplanted groups to the previously generated DAPC
pred.trt<-predict.dapc(dp.ctr,newdata=(t(trt)))
# create another dataframe structure in order to plot these predicted values
dpc=data.frame(rbind(dp.ctr$ind.coord,pred.trt$ind.scores))
dpc$Line <- substr(row.names(dpc),3,4)
dpc$Treatment <- substr(row.names(dpc),1,2)
dpc$gp <- c(rep(1.1,4), rep(1.5,8),rep(1.1,4))
dpc$LD1 <- dpc$LD1*-1
dpc1 <- dpc
gp_means <- dpc %>% group_by(Line, Treatment) %>%
summarize(mean=mean(LD1))
gp_means$gp <- c(1.1, 1.1,1.5, 1.5)
a2 <- ggplot(dpc, aes(x=LD1, y=gp, fill=Treatment, shape=Line)) +
geom_point(size=3, lwd=2)+
ylim(1, 2) +
xlim(-5.5,5.5) +
scale_color_manual(values=c("#6699CC","#CC3333"))+
scale_fill_manual(values=c("#6699CC","#CC3333")) +
scale_shape_manual(values=c(21, 24))+
guides(fill=guide_legend(override.aes=list(shape=21, size=7, fill=c("#6699CC","#CC3333")),order = 2),
shape=guide_legend(override.aes=list(shape=c(16, 17), size=c(7,5)))) +
#stat_summary(fun="mean") +
theme_classic()+
ylab(" ")+
xlab(" ") +
ggtitle("F1") +
theme(plot.title = element_text(hjust = 0.5),
axis.line.y=element_blank(),
axis.ticks.y=element_blank(),
axis.text.y=element_blank()) +
geom_point(data=gp_means, aes(x=mean, y=gp, fill=Treatment, shape=Line), size=7, alpha=0.5) +
geom_segment(
x = mean(dpc$LD1[grep( "AAAA", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "HHAA", row.names(dpc))]),
y=1.1, yend=1.1,
lty=1,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="gray50")+
geom_segment(
x = mean(dpc$LD1[grep( "HHHH", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "AAHH", row.names(dpc))]),
y=1.5, yend=1.5,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="gray50")+
theme(legend.position = "none")
###############################################################################################
###############################################################################################
######
###### F2
######
###############################################################################################
###############################################################################################
ctr <-dat[,grep("AAAA_F2|HHHH_F2",colnames(dat))] # in home envir
trt <-dat[,grep("AAHH_F2|HHAA_F2",colnames(dat))] # in away envir
pcp=prcomp(t(ctr), retx=TRUE, center=TRUE, scale.=FALSE)
scores=pcp$x
#screeplot(pcp,bstick=T)
# adegenet: finding clusters (even though we know what clusters we want) -
clus.ctr=find.clusters(t(ctr),max.n.clus=10,
n.pca=3, n.clust=2)
#Use clus$grp to rename groups
clus.ctr$grp=c(rep(2, 4),
rep(1, 4))
# generate the actual DAPC
dp.ctr=dapc(t(ctr),clus.ctr$grp, var.loadings=TRUE, n.pca=3, n.da=1,
var.contrib =TRUE)
#scatter(dp.ctr,bg="white",scree.da=FALSE,legend=TRUE,solid=.4)
# then add in the transplanted groups to the previously generated DAPC
pred.trt<-predict.dapc(dp.ctr,newdata=(t(trt)))
# create another dataframe structure in order to plot these predicted values
dpc=data.frame(rbind(dp.ctr$ind.coord,pred.trt$ind.scores))
dpc$Line <- substr(row.names(dpc),3,4)
dpc$Treatment <- substr(row.names(dpc),1,2)
dpc$gp <- c(rep(1.1,4), rep(1.5,8),rep(1.1,4))
dpc$LD1 <- dpc$LD1*-1
dpc2 <- dpc
gp_means <- dpc %>% group_by(Line, Treatment) %>%
summarize(mean=mean(LD1))
gp_means$gp <- c(1.1, 1.1,1.5, 1.5)
b2 <- ggplot(dpc, aes(x=LD1, y=gp, fill=Treatment, shape=Line)) +
geom_point(size=3, lwd=2)+
ylim(1, 2) +
xlim(-5.5,5.5) +
scale_color_manual(values=c("#6699CC","#CC3333"))+
scale_fill_manual(values=c("#6699CC","#CC3333")) +
scale_shape_manual(values=c(21, 24))+
guides(fill=guide_legend(override.aes=list(shape=21, size=7, fill=c("#6699CC","#CC3333")),order = 2),
shape=guide_legend(override.aes=list(shape=c(16, 17), size=c(7,5)))) +
#stat_summary(fun="mean") +
theme_classic()+
ylab(" ")+
xlab(" ") +
ggtitle("F2") +
theme(plot.title = element_text(hjust = 0.5),
axis.line.y=element_blank(),
axis.ticks.y=element_blank(),
axis.text.y=element_blank()) +
geom_point(data=gp_means, aes(x=mean, y=gp, fill=Treatment, shape=Line), size=7, alpha=0.5) +
geom_segment(
x = mean(dpc$LD1[grep( "AAAA", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "HHAA", row.names(dpc))]),
y=1.1, yend=1.1,
lty=1,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="gray50")+
geom_segment(
x = mean(dpc$LD1[grep( "HHHH", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "AAHH", row.names(dpc))]),
y=1.5, yend=1.5,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="gray50")+
theme(legend.position = "none")
###############################################################################################
###############################################################################################
######
###### F3
######
###############################################################################################
###############################################################################################
ctr <-dat[,grep("AAAA_F3|HHHH_F3",colnames(dat))] # in home envir
trt <-dat[,grep("AAHH_F3|HHAA_F3",colnames(dat))] # in away envir
pcp=prcomp(t(ctr), retx=TRUE, center=TRUE, scale.=FALSE)
scores=pcp$x
#screeplot(pcp,bstick=T)
# adegenet: finding clusters (even though we know what clusters we want) -
clus.ctr=find.clusters(t(ctr),max.n.clus=10,
n.pca=3, n.clust=2)
#Use clus$grp to rename groups
clus.ctr$grp=c(rep(2, 4),
rep(1, 4))
# generate the actual DAPC
dp.ctr=dapc(t(ctr),clus.ctr$grp, var.loadings=TRUE, n.pca=3, n.da=1,
var.contrib =TRUE)
#scatter(dp.ctr,bg="white",scree.da=FALSE,legend=TRUE,solid=.4)
# then add in the transplanted groups to the previously generated DAPC
pred.trt<-predict.dapc(dp.ctr,newdata=(t(trt)))
# create another dataframe structure in order to plot these predicted values
dpc=data.frame(rbind(dp.ctr$ind.coord,pred.trt$ind.scores))
dpc=data.frame(rbind(dp.ctr$ind.coord,pred.trt$ind.scores))
dpc$Line <- substr(row.names(dpc),3,4)
dpc$Treatment <- substr(row.names(dpc),1,2)
dpc$gp <- c(rep(1.1,4), rep(1.5,8),rep(1.1,4))
dpc$LD1 <- dpc$LD1*-1
dpc3 <- dpc
dpc_af <- rbind(dpc1, dpc2, dpc3)
write.table(dpc_af, "~/reciprocal_t/analysis/dpc_af.txt", sep="\t", quote=F, row.names=F)
gp_means <- dpc %>% group_by(Line, Treatment) %>%
summarize(mean=mean(LD1))
gp_means$gp <- c(1.1, 1.1,1.5, 1.5)
c2 <- ggplot(dpc, aes(x=LD1, y=gp, fill=Treatment, shape=Line)) +
geom_point(size=3, lwd=2)+
ylim(1, 2) +
xlim(-5.5,5.5) +
scale_color_manual(values=c("#6699CC","#CC3333"))+
scale_fill_manual(values=c("#6699CC","#CC3333")) +
scale_shape_manual(values=c(21, 24))+
guides(fill=guide_legend(override.aes=list(shape=21, size=7, fill=c("#6699CC","#CC3333")),order = 2),
shape=guide_legend(override.aes=list(shape=c(16, 17), size=c(7,5)))) +
#stat_summary(fun="mean") +
theme_classic()+
ylab(" ")+
xlab(" ") +
ggtitle("F3") +
theme(plot.title = element_text(hjust = 0.5),
axis.line.y=element_blank(),
axis.ticks.y=element_blank(),
axis.text.y=element_blank()) +
geom_point(data=gp_means, aes(x=mean, y=gp, fill=Treatment, shape=Line), size=7, alpha=0.5) +
geom_segment(
x = mean(dpc$LD1[grep( "AAAA", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "HHAA", row.names(dpc))]),
y=1.1, yend=1.1,
lty=1,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="gray50")+
geom_segment(
x = mean(dpc$LD1[grep( "HHHH", row.names(dpc))]),
xend = mean(dpc$LD1[grep( "AAHH", row.names(dpc))]),
y=1.5, yend=1.5,
size = 1, arrow = arrow(length = unit(0.2, "inches")),
color="black")+
theme(legend.position = "none")
pdf("~/Documents/UVM/Reciprocal_transplant/figures/dapc_SNP_gen.pdf", height = 6, width = 4)
ggarrange(a2,b2,c2, ncol=1, nrow=3, common.legend=TRUE)
dev.off()
#############
### stats:
dpc.all <- dpc1
dpc.all$Line <- substr(row.names(dpc.all),3,4)
dpc.all$Treatment <- substr(row.names(dpc.all),1,2)
dpc.all$gp <- c(rep(1.1,4), rep(1.5,8),rep(1.1,4))
dpc.all$LD1 <- dpc.all$LD1*-1
dpc.all$rep <- substr(row.names(dpc.all), 9,13)
dpc.all$home <- 1
dpc.all$home[grep("AAAA|HHHH", row.names(dpc.all))] <- 0
prior = list(R = list(V = 1, nu = 0.002), G = list(G1 = list(V=1, nu=0.002,alpha.mu=0, alpha.V=1000)))
mod1=MCMCglmm(LD1~Line+Line:home, random=~rep, prior= prior, data=dpc.all,nitt=75000, thin=25, burnin=5000)
summary(mod1)
# post.mean l-95% CI u-95% CI eff.samp pMCMC
#(Intercept) 4.2464 3.0894 5.4160 2800 0.000714 ***
#LineHH -8.4911 -9.8337 -7.1083 2800 < 4e-04 ***
#LineAA:home -2.2096 -3.6851 -0.9245 3014 0.005714 **
#LineHH:home 2.1664 0.8041 3.5429 2800 0.001429 **
posterior.mode(mod1$VCV)
HPDinterval(mod1$Sol)
#awayDelta=abs(mod1$Sol[,"LineAA:home"])-abs(mod1$Sol[,"LineHH:home"])
awayDelta=abs(mod1$Sol[,"LineHH:home"])-abs(mod1$Sol[,"LineAA:home"])
# 95% credible interval:
HPDinterval(awayDelta)
if (is.na(table(awayDelta<0)[2])) {
cat("p <",signif(1/length(awayDelta),1))
} else {
cat("p =",signif(table(awayDelta<0)[2]/length(awayDelta),2))
}
# F2
dpc.all <- dpc2
dpc.all$Line <- substr(row.names(dpc.all),3,4)
dpc.all$Treatment <- substr(row.names(dpc.all),1,2)
dpc.all$gp <- c(rep(1.1,4), rep(1.5,8),rep(1.1,4))
dpc.all$LD1 <- dpc.all$LD1*-1
dpc.all$rep <- substr(row.names(dpc.all), 9,13)
dpc.all$home <- 1
dpc.all$home[grep("AAAA|HHHH", row.names(dpc.all))] <- 0
prior = list(R = list(V = 1, nu = 0.002), G = list(G1 = list(V=1, nu=0.002,alpha.mu=0, alpha.V=1000)))
mod2=MCMCglmm(LD1~Line+Line:home, random=~rep, prior= prior, data=dpc.all,nitt=75000, thin=25, burnin=5000)
summary(mod2)
# post.mean l-95% CI u-95% CI eff.samp pMCMC
# (Intercept) -3.7183 -4.8065 -2.4644 2800 0.00143 **
# LineHH 7.4262 6.2932 8.5262 2513 < 4e-04 ***
# LineAA:home 1.8559 0.7622 2.9456 2800 0.00286 **
# LineHH:home -2.1932 -3.3456 -1.0985 2800 0.00143 **
posterior.mode(mod2$VCV)
HPDinterval(mod2$Sol)
#awayDelta=abs(mod1$Sol[,"LineAA:home"])-abs(mod1$Sol[,"LineHH:home"])
awayDelta=abs(mod1$Sol[,"LineHH:home"])-abs(mod1$Sol[,"LineAA:home"])
# 95% credible interval:
HPDinterval(awayDelta)
if (is.na(table(awayDelta<0)[2])) {
cat("p <",signif(1/length(awayDelta),1))
} else {
cat("p =",signif(table(awayDelta<0)[2]/length(awayDelta),2))
}
## F3
dpc.all <- dpc3
dpc.all$Line <- substr(row.names(dpc.all),3,4)
dpc.all$Treatment <- substr(row.names(dpc.all),1,2)
dpc.all$gp <- c(rep(1.1,4), rep(1.5,8),rep(1.1,4))
dpc.all$LD1 <- dpc.all$LD1*-1
dpc.all$rep <- substr(row.names(dpc.all), 9,13)
dpc.all$home <- 1
dpc.all$home[grep("AAAA|HHHH", row.names(dpc.all))] <- 0
prior = list(R = list(V = 1, nu = 0.002), G = list(G1 = list(V=1, nu=0.002,alpha.mu=0, alpha.V=1000)))
mod3=MCMCglmm(LD1~Line+Line:home, random=~rep, prior= prior, data=dpc.all,nitt=75000, thin=25, burnin=5000)
summary(mod3)
# post.mean l-95% CI u-95% CI eff.samp pMCMC
#(Intercept) -2.99296 -4.12458 -1.80411 2800 0.002143 **
#LineHH 6.00740 4.90780 7.05361 2800 < 4e-04 ***
#LineAA:home 1.20025 0.07212 2.13389 3439 0.029286 *
#LineHH:home -2.70971 -3.78055 -1.73574 2800 0.000714 ***
posterior.mode(mod3$VCV)
HPDinterval(mod3$Sol)
#awayDelta=abs(mod1$Sol[,"LineAA:home"])-abs(mod1$Sol[,"LineHH:home"])
awayDelta=abs(mod1$Sol[,"LineHH:home"])-abs(mod1$Sol[,"LineAA:home"])
# 95% credible interval:
HPDinterval(awayDelta)
if (is.na(table(awayDelta<0)[2])) {
cat("p <",signif(1/length(awayDelta),1))
} else {
cat("p =",signif(table(awayDelta<0)[2]/length(awayDelta),2))
}
# correct for multiple testing:
# f1
#LineAA:home -2.2096 -3.6851 -0.9245 3014 0.005714 **
#LineHH:home 2.1664 0.8041 3.5429 2800 0.001429 **
#f2
# LineAA:home 1.8559 0.7622 2.9456 2800 0.00286 **
# LineHH:home -2.1932 -3.3456 -1.0985 2800 0.00143 **
#F3
#LineAA:home 1.20025 0.07212 2.13389 3439 0.029286 *
#LineHH:home -2.70971 -3.78055 -1.73574 2800 0.000714 ***
p.adjust(c(0.005714,0.001429,0.00286,0.00143,0.029286,0.000714), method = "fdr")
# [1] 0.034284 0.008574 0.017160 0.008580 0.175716 0.004284
|
#### Libraries I need ####
library(tidyverse)
library(mclust)
#### Read in Data ####
dat <- read_csv("big5clean_construct.csv")
sc_dat <- read_csv("normalized.csv")
#load("./Connor's files/SubClusters.RData")
#### Run Mixed Model with 4 clusters ####
k <- 4
mc <- Mclust(sc_dat[,2:6],k)
# Get the probs for each cluster
pr <- mc$z %>% as.data.frame()
# Get the max for each obs highest prob
pr$Max <- apply(pr, 1, max)
# Create variable for each obs best cluster
pr$ClusterID <- mc$classification
# Loop to get the second highest as well
for (i in 1:nrow(pr)) {
pr$Max2[i] <- sort(pr[i,1:k], TRUE)[2] %>% as.numeric()
pr$SecondClusterID[i] <- match(pr$Max2[i], pr[i,1:k]) %>% as.numeric()
}
# Create variable for the two highest probs added together
pr$toptwo <- pr$Max + pr$Max2
# Lastly merge data with the new dataframe
dat_new <- cbind(dat, pr)
sc_dat_new <- cbind(sc_dat, pr)
# Centers for the clusters
centers <- mc$parameters$mean %>%
as.data.frame()
#### Split into 4 separate dfs ####
for (i in 1:k) {
name <- paste0("dat",i,"best")
temp <- filter(sc_dat_new, ClusterID == i)
assign(name, temp)
}
best_clusters <- list(dat1best,dat2best,dat3best,dat4best)
# Repeat for second best match clusters
for (i in 1:k) {
name <- paste0("dat",i,"second")
temp <- filter(sc_dat_new, SecondClusterID == i)
assign(name, temp)
}
second_clusters <- list(dat1second,dat2second,dat3second,dat4second)
#### Split each cluster into 2 or 4 df ####
dat_final<- data.frame()
for (i in 1:k) {
#i <- 1
mc <- Mclust(best_clusters[i] %>%
as.data.frame() %>%
select(2:6),
2)
pr <- mc$z %>% as.data.frame()
names(pr)[1:2] <- c("Sub1Best", "Sub2Best")
pr$SubMaxBest <- apply(pr, 1, max)
pr$SubMax2Best <- apply(pr, 1, min)
pr$SubBestClusterID <- mc$classification
temp <- best_clusters[i] %>%
as.data.frame() %>%
cbind(pr)
dat_final <- rbind(dat_final, temp)
}
# Repeat for second best match cluster
dat_temp <- data.frame()
for (i in 1:k) {
#i <- 1
mc <- Mclust(second_clusters[i] %>%
as.data.frame() %>%
select(2:6),
2)
pr <- mc$z %>% as.data.frame()
names(pr)[1:2] <- c("Sub1Second", "Sub2Second")
pr$SubMaxSecond <- apply(pr, 1, max)
pr$SubMax2Second <- apply(pr, 1, min)
pr$SubSecondClusterID <- mc$classification
temp <- second_clusters[i] %>%
as.data.frame() %>%
select(X1) %>%
cbind(pr)
dat_temp <- rbind(dat_temp, temp)
}
#### Merge two dfs together ####
dat_final <- merge(dat_final, dat_temp, by = "X1")
#### Conditional Probs ####
dat_final$BB <- dat_final$Max * dat_final$SubMaxBest
dat_final$BS <- dat_final$Max * dat_final$SubMax2Best
dat_final$SB <- dat_final$Max2 * dat_final$SubMaxSecond
dat_final$SS <- dat_final$Max2 * dat_final$SubMax2Second
#### Reorder Columns ####
dat_final <- dat_final %>% select(X1,EXT,EST,AGR,CSN,OPN,ClusterID,SubBestClusterID,SecondClusterID,
SubSecondClusterID,BB,BS,SB,SS,Max,SubMaxBest,SubMax2Best,Max2,
SubMaxSecond,SubMax2Second,V1,V2,V3,V4)
dat$temp <- rowSums(dat_final[11:14])
#### Save data ####
save(best_clusters, centers, dat_final, second_clusters,
file = "./Connor's files/SubClusters.RData")
|
/Connor's files/SubclusterMixedModel.R
|
no_license
|
CRGreenhalgh/Big5Clustering
|
R
| false
| false
| 3,423
|
r
|
#### Libraries I need ####
library(tidyverse)
library(mclust)
#### Read in Data ####
dat <- read_csv("big5clean_construct.csv")
sc_dat <- read_csv("normalized.csv")
#load("./Connor's files/SubClusters.RData")
#### Run Mixed Model with 4 clusters ####
k <- 4
mc <- Mclust(sc_dat[,2:6],k)
# Get the probs for each cluster
pr <- mc$z %>% as.data.frame()
# Get the max for each obs highest prob
pr$Max <- apply(pr, 1, max)
# Create variable for each obs best cluster
pr$ClusterID <- mc$classification
# Loop to get the second highest as well
for (i in 1:nrow(pr)) {
pr$Max2[i] <- sort(pr[i,1:k], TRUE)[2] %>% as.numeric()
pr$SecondClusterID[i] <- match(pr$Max2[i], pr[i,1:k]) %>% as.numeric()
}
# Create variable for the two highest probs added together
pr$toptwo <- pr$Max + pr$Max2
# Lastly merge data with the new dataframe
dat_new <- cbind(dat, pr)
sc_dat_new <- cbind(sc_dat, pr)
# Centers for the clusters
centers <- mc$parameters$mean %>%
as.data.frame()
#### Split into 4 separate dfs ####
for (i in 1:k) {
name <- paste0("dat",i,"best")
temp <- filter(sc_dat_new, ClusterID == i)
assign(name, temp)
}
best_clusters <- list(dat1best,dat2best,dat3best,dat4best)
# Repeat for second best match clusters
for (i in 1:k) {
name <- paste0("dat",i,"second")
temp <- filter(sc_dat_new, SecondClusterID == i)
assign(name, temp)
}
second_clusters <- list(dat1second,dat2second,dat3second,dat4second)
#### Split each cluster into 2 or 4 df ####
dat_final<- data.frame()
for (i in 1:k) {
#i <- 1
mc <- Mclust(best_clusters[i] %>%
as.data.frame() %>%
select(2:6),
2)
pr <- mc$z %>% as.data.frame()
names(pr)[1:2] <- c("Sub1Best", "Sub2Best")
pr$SubMaxBest <- apply(pr, 1, max)
pr$SubMax2Best <- apply(pr, 1, min)
pr$SubBestClusterID <- mc$classification
temp <- best_clusters[i] %>%
as.data.frame() %>%
cbind(pr)
dat_final <- rbind(dat_final, temp)
}
# Repeat for second best match cluster
dat_temp <- data.frame()
for (i in 1:k) {
#i <- 1
mc <- Mclust(second_clusters[i] %>%
as.data.frame() %>%
select(2:6),
2)
pr <- mc$z %>% as.data.frame()
names(pr)[1:2] <- c("Sub1Second", "Sub2Second")
pr$SubMaxSecond <- apply(pr, 1, max)
pr$SubMax2Second <- apply(pr, 1, min)
pr$SubSecondClusterID <- mc$classification
temp <- second_clusters[i] %>%
as.data.frame() %>%
select(X1) %>%
cbind(pr)
dat_temp <- rbind(dat_temp, temp)
}
#### Merge two dfs together ####
dat_final <- merge(dat_final, dat_temp, by = "X1")
#### Conditional Probs ####
dat_final$BB <- dat_final$Max * dat_final$SubMaxBest
dat_final$BS <- dat_final$Max * dat_final$SubMax2Best
dat_final$SB <- dat_final$Max2 * dat_final$SubMaxSecond
dat_final$SS <- dat_final$Max2 * dat_final$SubMax2Second
#### Reorder Columns ####
dat_final <- dat_final %>% select(X1,EXT,EST,AGR,CSN,OPN,ClusterID,SubBestClusterID,SecondClusterID,
SubSecondClusterID,BB,BS,SB,SS,Max,SubMaxBest,SubMax2Best,Max2,
SubMaxSecond,SubMax2Second,V1,V2,V3,V4)
dat$temp <- rowSums(dat_final[11:14])
#### Save data ####
save(best_clusters, centers, dat_final, second_clusters,
file = "./Connor's files/SubClusters.RData")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colorblind.R
\name{colorblind_pal}
\alias{colorblind_pal}
\alias{scale_colour_colorblind}
\alias{scale_color_colorblind}
\alias{scale_fill_colorblind}
\title{Colorblind Color Palette (Discrete) and Scales}
\usage{
colorblind_pal()
scale_colour_colorblind(...)
scale_color_colorblind(...)
scale_fill_colorblind(...)
}
\arguments{
\item{...}{Arguments passed on to \code{discrete_scale}
\describe{
\item{palette}{A palette function that when called with a single integer
argument (the number of levels in the scale) returns the values that
they should take.}
\item{breaks}{One of:
\itemize{
\item \code{NULL} for no breaks
\item \code{waiver()} for the default breaks computed by the
transformation object
\item A character vector of breaks
\item A function that takes the limits as input and returns breaks
as output
}}
\item{limits}{A character vector that defines possible values of the scale
and their order.}
\item{drop}{Should unused factor levels be omitted from the scale?
The default, \code{TRUE}, uses the levels that appear in the data;
\code{FALSE} uses all the levels in the factor.}
\item{na.translate}{Unlike continuous scales, discrete scales can easily show
missing values, and do so by default. If you want to remove missing values
from a discrete scale, specify \code{na.translate = FALSE}.}
\item{na.value}{If \code{na.translate = TRUE}, what value aesthetic
value should missing be displayed as? Does not apply to position scales
where \code{NA} is always placed at the far right.}
\item{scale_name}{The name of the scale}
\item{name}{The name of the scale. Used as the axis or legend title. If
\code{waiver()}, the default, the name of the scale is taken from the first
mapping used for that aesthetic. If \code{NULL}, the legend title will be
omitted.}
\item{labels}{One of:
\itemize{
\item \code{NULL} for no labels
\item \code{waiver()} for the default labels computed by the
transformation object
\item A character vector giving labels (must be same length as \code{breaks})
\item A function that takes the breaks as input and returns labels
as output
}}
\item{expand}{Vector of range expansion constants used to add some
padding around the data, to ensure that they are placed some distance
away from the axes. Use the convenience function \code{\link[=expand_scale]{expand_scale()}}
to generate the values for the \code{expand} argument. The defaults are to
expand the scale by 5\% on each side for continuous variables, and by
0.6 units on each side for discrete variables.}
\item{guide}{A function used to create a guide or its name. See
\code{\link[=guides]{guides()}} for more info.}
\item{position}{The position of the axis. "left" or "right" for vertical
scales, "top" or "bottom" for horizontal scales}
\item{super}{The super class to use for the constructed scale}
}}
}
\description{
An eight-color colorblind safe qualitative discrete palette.
}
\examples{
library("ggplot2")
library("scales")
show_col(colorblind_pal()(8))
p <- ggplot(mtcars) + geom_point(aes(x = wt, y = mpg,
colour = factor(gear))) + facet_wrap(~am)
p + theme_igray() + scale_colour_colorblind()
}
\references{
Chang, W. "\href{http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/#a-colorblind-friendly-palette}{Cookbook for R}"
\url{http://jfly.iam.u-tokyo.ac.jp/color}
}
\seealso{
The \pkg{dichromat} package, \code{\link[scales]{dichromat_pal}()},
and \code{\link{scale_color_tableau}()} for other colorblind palettes.
}
\concept{colour}
|
/man/colorblind.Rd
|
no_license
|
quartin/ggthemes
|
R
| false
| true
| 3,560
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colorblind.R
\name{colorblind_pal}
\alias{colorblind_pal}
\alias{scale_colour_colorblind}
\alias{scale_color_colorblind}
\alias{scale_fill_colorblind}
\title{Colorblind Color Palette (Discrete) and Scales}
\usage{
colorblind_pal()
scale_colour_colorblind(...)
scale_color_colorblind(...)
scale_fill_colorblind(...)
}
\arguments{
\item{...}{Arguments passed on to \code{discrete_scale}
\describe{
\item{palette}{A palette function that when called with a single integer
argument (the number of levels in the scale) returns the values that
they should take.}
\item{breaks}{One of:
\itemize{
\item \code{NULL} for no breaks
\item \code{waiver()} for the default breaks computed by the
transformation object
\item A character vector of breaks
\item A function that takes the limits as input and returns breaks
as output
}}
\item{limits}{A character vector that defines possible values of the scale
and their order.}
\item{drop}{Should unused factor levels be omitted from the scale?
The default, \code{TRUE}, uses the levels that appear in the data;
\code{FALSE} uses all the levels in the factor.}
\item{na.translate}{Unlike continuous scales, discrete scales can easily show
missing values, and do so by default. If you want to remove missing values
from a discrete scale, specify \code{na.translate = FALSE}.}
\item{na.value}{If \code{na.translate = TRUE}, what value aesthetic
value should missing be displayed as? Does not apply to position scales
where \code{NA} is always placed at the far right.}
\item{scale_name}{The name of the scale}
\item{name}{The name of the scale. Used as the axis or legend title. If
\code{waiver()}, the default, the name of the scale is taken from the first
mapping used for that aesthetic. If \code{NULL}, the legend title will be
omitted.}
\item{labels}{One of:
\itemize{
\item \code{NULL} for no labels
\item \code{waiver()} for the default labels computed by the
transformation object
\item A character vector giving labels (must be same length as \code{breaks})
\item A function that takes the breaks as input and returns labels
as output
}}
\item{expand}{Vector of range expansion constants used to add some
padding around the data, to ensure that they are placed some distance
away from the axes. Use the convenience function \code{\link[=expand_scale]{expand_scale()}}
to generate the values for the \code{expand} argument. The defaults are to
expand the scale by 5\% on each side for continuous variables, and by
0.6 units on each side for discrete variables.}
\item{guide}{A function used to create a guide or its name. See
\code{\link[=guides]{guides()}} for more info.}
\item{position}{The position of the axis. "left" or "right" for vertical
scales, "top" or "bottom" for horizontal scales}
\item{super}{The super class to use for the constructed scale}
}}
}
\description{
An eight-color colorblind safe qualitative discrete palette.
}
\examples{
library("ggplot2")
library("scales")
show_col(colorblind_pal()(8))
p <- ggplot(mtcars) + geom_point(aes(x = wt, y = mpg,
colour = factor(gear))) + facet_wrap(~am)
p + theme_igray() + scale_colour_colorblind()
}
\references{
Chang, W. "\href{http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/#a-colorblind-friendly-palette}{Cookbook for R}"
\url{http://jfly.iam.u-tokyo.ac.jp/color}
}
\seealso{
The \pkg{dichromat} package, \code{\link[scales]{dichromat_pal}()},
and \code{\link{scale_color_tableau}()} for other colorblind palettes.
}
\concept{colour}
|
host.dir <- "."
all.counts <- read.table("ESpresso/counttable_es.csv", header=TRUE, row.names=1, colClasses=c("character", rep("integer", 704)))
serum <- sub("ola_mES_([^_]+)_.*", "\\1", colnames(all.counts))
batch <- sub("ola_mES_[^_]+_([^_]+)_.*", "\\1", colnames(all.counts))
targets <- data.frame(Serum=serum, Batch=batch)
# Only using data from two batches.
keep <- targets$Batch %in% c("2", "3")
all.counts <- all.counts[,keep]
targets <- targets[keep,]
targets$Plate <- as.integer(factor(paste0(targets$Serum, targets$Batch)))
targets[] <- lapply(targets, factor)
targets$Serum <- factor(targets$Serum, c("lif", "2i", "a2i"))
# Removing spike-ins.
is.mouse <- grepl("^ENSMUSG", rownames(all.counts))
all.counts <- all.counts[is.mouse,]
# Setting up some grouping levels.
by.group <- targets$Serum
by.plate <- targets$Plate
# Setting up the design.
refdesign <- model.matrix(~Serum + Batch, targets)
# Running parameter estimation.
picdir <- "results_ESpresso"
|
/reference/ESpresso.R
|
no_license
|
jaymgrayson/PlateEffects2016
|
R
| false
| false
| 973
|
r
|
host.dir <- "."
all.counts <- read.table("ESpresso/counttable_es.csv", header=TRUE, row.names=1, colClasses=c("character", rep("integer", 704)))
serum <- sub("ola_mES_([^_]+)_.*", "\\1", colnames(all.counts))
batch <- sub("ola_mES_[^_]+_([^_]+)_.*", "\\1", colnames(all.counts))
targets <- data.frame(Serum=serum, Batch=batch)
# Only using data from two batches.
keep <- targets$Batch %in% c("2", "3")
all.counts <- all.counts[,keep]
targets <- targets[keep,]
targets$Plate <- as.integer(factor(paste0(targets$Serum, targets$Batch)))
targets[] <- lapply(targets, factor)
targets$Serum <- factor(targets$Serum, c("lif", "2i", "a2i"))
# Removing spike-ins.
is.mouse <- grepl("^ENSMUSG", rownames(all.counts))
all.counts <- all.counts[is.mouse,]
# Setting up some grouping levels.
by.group <- targets$Serum
by.plate <- targets$Plate
# Setting up the design.
refdesign <- model.matrix(~Serum + Batch, targets)
# Running parameter estimation.
picdir <- "results_ESpresso"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lightsail_operations.R
\name{lightsail_detach_instances_from_load_balancer}
\alias{lightsail_detach_instances_from_load_balancer}
\title{Detaches the specified instances from a Lightsail load balancer}
\usage{
lightsail_detach_instances_from_load_balancer(loadBalancerName,
instanceNames)
}
\arguments{
\item{loadBalancerName}{[required] The name of the Lightsail load balancer.}
\item{instanceNames}{[required] An array of strings containing the names of the instances you want to
detach from the load balancer.}
}
\description{
Detaches the specified instances from a Lightsail load balancer.
This operation waits until the instances are no longer needed before
they are detached from the load balancer.
The \verb{detach instances from load balancer} operation supports tag-based
access control via resource tags applied to the resource identified by
\verb{load balancer name}. For more information, see the Lightsail Dev Guide.
}
\section{Request syntax}{
\preformatted{svc$detach_instances_from_load_balancer(
loadBalancerName = "string",
instanceNames = list(
"string"
)
)
}
}
\keyword{internal}
|
/paws/man/lightsail_detach_instances_from_load_balancer.Rd
|
permissive
|
sanchezvivi/paws
|
R
| false
| true
| 1,195
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lightsail_operations.R
\name{lightsail_detach_instances_from_load_balancer}
\alias{lightsail_detach_instances_from_load_balancer}
\title{Detaches the specified instances from a Lightsail load balancer}
\usage{
lightsail_detach_instances_from_load_balancer(loadBalancerName,
instanceNames)
}
\arguments{
\item{loadBalancerName}{[required] The name of the Lightsail load balancer.}
\item{instanceNames}{[required] An array of strings containing the names of the instances you want to
detach from the load balancer.}
}
\description{
Detaches the specified instances from a Lightsail load balancer.
This operation waits until the instances are no longer needed before
they are detached from the load balancer.
The \verb{detach instances from load balancer} operation supports tag-based
access control via resource tags applied to the resource identified by
\verb{load balancer name}. For more information, see the Lightsail Dev Guide.
}
\section{Request syntax}{
\preformatted{svc$detach_instances_from_load_balancer(
loadBalancerName = "string",
instanceNames = list(
"string"
)
)
}
}
\keyword{internal}
|
## Matrix inversion is usually a costly computation and there may
## be some benefit to caching the inverse of a matrix rather than
## compute it repeatedly
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" returned
## by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
|
/cachematrix.R
|
no_license
|
alfredleung/ProgrammingAssignment2
|
R
| false
| false
| 959
|
r
|
## Matrix inversion is usually a costly computation and there may
## be some benefit to caching the inverse of a matrix rather than
## compute it repeatedly
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" returned
## by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
|
# Convert the wealth rating to a factor
donors$wealth_rating <- factor(donors$wealth_rating, levels = c(0,1,2,3), labels = c('Unknown','Low','Medium','High'))
# Use relevel() to change reference category (i.e.)
donors$wealth_rating <- relevel(donors$wealth_rating, ref = 'Medium')
# See how our factor coding impacts the model
summary(glm(donated ~ wealth_rating, data = donors, family = 'binomial'))
|
/ml/scripts/transform_examples/factorRecode.R
|
no_license
|
pickle-donut/RScripts
|
R
| false
| false
| 402
|
r
|
# Convert the wealth rating to a factor
donors$wealth_rating <- factor(donors$wealth_rating, levels = c(0,1,2,3), labels = c('Unknown','Low','Medium','High'))
# Use relevel() to change reference category (i.e.)
donors$wealth_rating <- relevel(donors$wealth_rating, ref = 'Medium')
# See how our factor coding impacts the model
summary(glm(donated ~ wealth_rating, data = donors, family = 'binomial'))
|
source("load_data.R")
plot1 <- paste(getwd(),"/plots/plot1.png", sep="")
if(!file.exists(plot1)){
png(plot1, width = 480, height = 480)
hist(as.numeric(as.character(ranged_power$Global_active_power)), main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency", col = "red")
dev.off()
} else {
hist(as.numeric(as.character(ranged_power$Global_active_power)), main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency", col = "red")
}
|
/plot1.R
|
no_license
|
mountain-lion/exdata-030
|
R
| false
| false
| 518
|
r
|
source("load_data.R")
plot1 <- paste(getwd(),"/plots/plot1.png", sep="")
if(!file.exists(plot1)){
png(plot1, width = 480, height = 480)
hist(as.numeric(as.character(ranged_power$Global_active_power)), main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency", col = "red")
dev.off()
} else {
hist(as.numeric(as.character(ranged_power$Global_active_power)), main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency", col = "red")
}
|
#!/usr/bin/env Rscript
# detect script being run by snakemake
# if so, make a mock commandArgs function
if ('snakemake' %in% ls()) {
logfile <- snakemake@log[[1]]
con <- file(logfile, 'w')
sink(con, type='output')
sink(con, type='message')
commandArgs <- function(...) unlist(c(
snakemake@input[1], snakemake@output[1]
))
cat('Got command line arguments from snakemake:\n')
print(commandArgs())
}
args <- commandArgs(trailingOnly=TRUE)
if (length(args) != 2) {
stop("usage: make_cosmic_indel_table.R in_cosmic.csv out.csv")
}
cosmic.csv <- args[1]
outcsv <- args[2]
if (file.exists(outcsv))
stop(paste('output file', outcsv, 'already exists, please delete it first'))
library(scan2)
# N.B.
# Unlike COSMIC SBS, the indel database has no " - " values
cosmic <- fread(cosmic.csv, header=T, stringsAsFactors=F)
head(cosmic)
id83 <- levels(id83(c()))
# In the COSMIC database, ID83 channel names are formatted differently
# from SigProfilerMatrixGenerator (spmgr). E.g.:
# COSMIC
# [1] "DEL_C_1_0" "DEL_C_1_1" "DEL_C_1_2" "DEL_C_1_3" "DEL_C_1_4"
# [6] "DEL_C_1_5+" ...
# SPMGR
# [1] "1:Del:C:0" "1:Del:C:1" "1:Del:C:2" "1:Del:C:3" "1:Del:C:4" "1:Del:C:5"
# This function translates COSMIC names -> SPMGR names
id83.cosmic.to.spmgr <- function(x) {
sapply(strsplit(sub('MH', 'M', gsub('\\+', '', gsub('_', ':', sub('repeats', 'R', sub('DEL', 'Del', sub('INS', 'Ins', x)))))), ':'), function(elts) paste(elts[3], elts[1], elts[2], elts[4], sep=':'))
}
cosmic[[1]] <- id83.cosmic.to.spmgr(cosmic[[1]])
colnames(cosmic)[1] <- 'MutType'
setkey(cosmic, MutType)
cosmic <- cosmic[id83]
head(cosmic)
fwrite(cosmic, file=outcsv)
if ('snakemake' %in% ls()) {
sink()
}
|
/snakemake/scripts/make_cosmic_indel_table.R
|
no_license
|
parklab/luquette-glia-analysis
|
R
| false
| false
| 1,732
|
r
|
#!/usr/bin/env Rscript
# detect script being run by snakemake
# if so, make a mock commandArgs function
if ('snakemake' %in% ls()) {
logfile <- snakemake@log[[1]]
con <- file(logfile, 'w')
sink(con, type='output')
sink(con, type='message')
commandArgs <- function(...) unlist(c(
snakemake@input[1], snakemake@output[1]
))
cat('Got command line arguments from snakemake:\n')
print(commandArgs())
}
args <- commandArgs(trailingOnly=TRUE)
if (length(args) != 2) {
stop("usage: make_cosmic_indel_table.R in_cosmic.csv out.csv")
}
cosmic.csv <- args[1]
outcsv <- args[2]
if (file.exists(outcsv))
stop(paste('output file', outcsv, 'already exists, please delete it first'))
library(scan2)
# N.B.
# Unlike COSMIC SBS, the indel database has no " - " values
cosmic <- fread(cosmic.csv, header=T, stringsAsFactors=F)
head(cosmic)
id83 <- levels(id83(c()))
# In the COSMIC database, ID83 channel names are formatted differently
# from SigProfilerMatrixGenerator (spmgr). E.g.:
# COSMIC
# [1] "DEL_C_1_0" "DEL_C_1_1" "DEL_C_1_2" "DEL_C_1_3" "DEL_C_1_4"
# [6] "DEL_C_1_5+" ...
# SPMGR
# [1] "1:Del:C:0" "1:Del:C:1" "1:Del:C:2" "1:Del:C:3" "1:Del:C:4" "1:Del:C:5"
# This function translates COSMIC names -> SPMGR names
id83.cosmic.to.spmgr <- function(x) {
sapply(strsplit(sub('MH', 'M', gsub('\\+', '', gsub('_', ':', sub('repeats', 'R', sub('DEL', 'Del', sub('INS', 'Ins', x)))))), ':'), function(elts) paste(elts[3], elts[1], elts[2], elts[4], sep=':'))
}
cosmic[[1]] <- id83.cosmic.to.spmgr(cosmic[[1]])
colnames(cosmic)[1] <- 'MutType'
setkey(cosmic, MutType)
cosmic <- cosmic[id83]
head(cosmic)
fwrite(cosmic, file=outcsv)
if ('snakemake' %in% ls()) {
sink()
}
|
library(plotly)
library(ggplot2)
score <- read.csv("../data/학생별과목별성적_국영수_new.csv", header=T)
score
p <- ggplot(data = score,
aes(x = 이름, y = 점수, col = 과목)) +
geom_point()
ggplotly(p)
###########################################################################
|
/R/source/20191224_quiz_interactive_graph.R
|
no_license
|
Jade2290/bigdata_class
|
R
| false
| false
| 329
|
r
|
library(plotly)
library(ggplot2)
score <- read.csv("../data/학생별과목별성적_국영수_new.csv", header=T)
score
p <- ggplot(data = score,
aes(x = 이름, y = 점수, col = 과목)) +
geom_point()
ggplotly(p)
###########################################################################
|
# function for lasso feature selection
# Author: Jason Zhao
#Usage:
# 1. Load the function by: source("path/to/this/script")
# 2. call the function by: featureSelectionLasso(yourDataframeWithoutLabel, LabelVector)
#Note:
# dataFrame is a m*n dataframe
# IF m represents features and n represents samples, keep default
# If n represents features and m represents samples, set rowIsFeature to FALSE when calling the function
#package
require(glmnet)
featureSelectionLasso <- function(dataFrame, targetVec, nfolds = 3, alpha = 1, rowIsFeature = TRUE){
#transpose the data
if (rowIsFeature == TRUE){
x <- t(as.matrix(dataFrame))
}
else{
x <- as.matrix(dataFrame)
}
#make target variable to factor
targetVec <- as.factor(targetVec)
#run lasso
fit <- cv.glmnet(x, targetVec, alpha = alpha, family = "binomial", type.measure = "auc", nfolds = nfolds)
#select the features index with weights not zero
tmp_vec <- as.vector((coef(fit, s="lambda.min") != 0))
#get the selected features
if (rowIsFeature == TRUE){
featureSelected <- rownames(dataFrame)[tmp_vec]
}
else{
featureSelected <- colnames(dataFrame)[tmp_vec]
}
#return
return(featureSelected)
}
|
/featureSelectionLasso.R
|
no_license
|
jasonzhao0307/R_lib_jason
|
R
| false
| false
| 1,168
|
r
|
# function for lasso feature selection
# Author: Jason Zhao
#Usage:
# 1. Load the function by: source("path/to/this/script")
# 2. call the function by: featureSelectionLasso(yourDataframeWithoutLabel, LabelVector)
#Note:
# dataFrame is a m*n dataframe
# IF m represents features and n represents samples, keep default
# If n represents features and m represents samples, set rowIsFeature to FALSE when calling the function
#package
require(glmnet)
featureSelectionLasso <- function(dataFrame, targetVec, nfolds = 3, alpha = 1, rowIsFeature = TRUE){
#transpose the data
if (rowIsFeature == TRUE){
x <- t(as.matrix(dataFrame))
}
else{
x <- as.matrix(dataFrame)
}
#make target variable to factor
targetVec <- as.factor(targetVec)
#run lasso
fit <- cv.glmnet(x, targetVec, alpha = alpha, family = "binomial", type.measure = "auc", nfolds = nfolds)
#select the features index with weights not zero
tmp_vec <- as.vector((coef(fit, s="lambda.min") != 0))
#get the selected features
if (rowIsFeature == TRUE){
featureSelected <- rownames(dataFrame)[tmp_vec]
}
else{
featureSelected <- colnames(dataFrame)[tmp_vec]
}
#return
return(featureSelected)
}
|
% Auto-generated documentation for function plot.pltdTable
% 2021-06-02 11:12:19
\name{plot.pltdTable}
\alias{plot.pltdTable}
\title{Display a \code{pltdtable} Object on a Graphics Device }
\description{
Display a \code{pltdTable} object on a graphics device. This is an alias
for \code{print.pltdTable}. This is an internal function, not intended to
be called by package users.
}
\usage{
\method{plot}{pltdTable}(x, ...)
}
\arguments{
\item{x}{A \code{pltdTable} object, representing a table.
}
\item{...}{Additional arguments passed to \code{print.pltdTable}.
}
}
\value{
\code{x}, invisibly.
}
\seealso{
\code{\link{print.pltdTable}}
}
\keyword{internal}
|
/man/plot.pltdTable.Rd
|
no_license
|
rrprf/tablesgg
|
R
| false
| false
| 671
|
rd
|
% Auto-generated documentation for function plot.pltdTable
% 2021-06-02 11:12:19
\name{plot.pltdTable}
\alias{plot.pltdTable}
\title{Display a \code{pltdtable} Object on a Graphics Device }
\description{
Display a \code{pltdTable} object on a graphics device. This is an alias
for \code{print.pltdTable}. This is an internal function, not intended to
be called by package users.
}
\usage{
\method{plot}{pltdTable}(x, ...)
}
\arguments{
\item{x}{A \code{pltdTable} object, representing a table.
}
\item{...}{Additional arguments passed to \code{print.pltdTable}.
}
}
\value{
\code{x}, invisibly.
}
\seealso{
\code{\link{print.pltdTable}}
}
\keyword{internal}
|
#######################
#gating_LTEE_GAP1_Variants.R
#
#started: 01/07/2016
#modified: 1/25/2019
#
#author1: G Avecilla, S Lauer, D Gresham
#author2: N Brandt
######################
######################
#This script is specific for analyzing the data obtained in LTEE_GAP1_Variants in Gln,
#For any other purpose, the script must be modified accordingly.
###########################################################################################################################
#This script is intended to read in .fcs files and perform manual gating for i) single cells, ii) debris and iii) fluorescence
#
#Gating is performed with untransformed data
#
#Individual gates are saved in a file gates.Rdata for use with the Gresham Lab Flow Cytometry Analysis.Rmd pipeline
###########################################################################################################################
##To be run the first time if packages are not installed.
#source("http://bioconductor.org/biocLite.R")
#biocLite("flowViz")
#biocLite("flowCore")
#Load libraries
library(flowCore)
library(flowViz)
library(ggcyto)
library(ggforce)
#Read in the data
#Set working directory to the folder in which you have stored your .fcs files
#Read in all the fcs files in the directory.
#working directory
dir = '.'
#file location
path.data = "/Users/Brandt/Google Drive/MiniStatRun_10_2018/"
#path.data = "/Users/nathanbrandt/Google Drive/MiniStatRun_10_2018/"
#set name of run to create gates for
list.folders <- c("LTEE_mCitrine_GAP1_Variants_T00",
"LTEE_mCitrine_GAP1_Variants_T06",
"LTEE_mCitrine_GAP1_Variants_T07",
"LTEE_mCitrine_GAP1_Variants_T08.3",
#"LTEE_mCitrine_GAP1_Variants_T11.1",
"LTEE_mCitrine_GAP1_Variants_T11.2",
#"LTEE_mCitrine_GAP1_Variants_T13.1",
"LTEE_mCitrine_GAP1_Variants_T13.2",
"LTEE_mCitrine_GAP1_Variants_T14",
"LTEE_mCitrine_GAP1_Variants_T15",
"LTEE_mCitrine_GAP1_Variants_T18",
"LTEE_mCitrine_GAP1_Variants_T22",
"LTEE_mCitrine_GAP1_Variants_T25",
"LTEE_mCitrine_GAP1_Variants_T27",
"LTEE_mCitrine_GAP1_Variants_T29",
"LTEE_mCitrine_GAP1_Variants_T34")
name <- list.folders[1]
#load sample sheet
sample.sheet <- read.csv(paste(path.data,"samplesheet_",name,".csv", sep=""))
#read in fcs files in order presented in sample sheet (based on well identifier)
files <- paste(path.data,name,"/",sort(factor(list.files(paste(path.data,name,"/", sep=""),full.names=FALSE), levels = paste(sample.sheet$Well,".fcs",sep="" ), ordered=TRUE)),sep="")
flowData <- read.ncdfFlowSet(files=files, pattern=".fcs", alter.names = TRUE)
sample.ind <- which(paste(sample.sheet$Well,".fcs", sep="") %in% sampleNames(flowData))
sample.sheet <- sample.sheet[sample.ind,]
sample.sheet <- sample.sheet[order(sample.sheet$Well),]
#rename sample name of flow set to make it easier to identify
sampleNames(flowData) <- paste(gsub(" ","_",sample.sheet$Strain),"_",sub(" ","_",sample.sheet$Well), sep="")
#set copy number controls
zerocopy <- 1
onecopy <- 3
twocopy <- 4
##############################
#1. Generate gate for singlet cells####
#this gate is defined on the basis of the relationship between forward scatter height and area
#******Please note you may need to adjust the x and y plot limits to properly visualize your data
plot(flowData[[zerocopy]], c('FSC.H','FSC.A'), xlim=c(0,3e6), ylim=c(0,3e6),smooth=T)
singlet.gate <- locator(100, type='l', col='red')
gm.1 <- matrix(,length(singlet.gate$x),2)
colnames(gm.1) <- c('FSC.H','FSC.A')
gm.1[,1] <- singlet.gate$x
gm.1[,2] <- singlet.gate$y
pg.singlets <- polygonGate(filterId="singlets",.gate=gm.1)
ggcyto(flowData[zerocopy], aes(x = `FSC.H`, y = `FSC.A`)) + geom_hex(bins = 512) + geom_gate(pg.singlets)
#Look at the gating on the controls
ggcyto(flowData[c(zerocopy,onecopy,twocopy)], aes(x = `FSC.H`, y = `FSC.A`)) + geom_hex(bins = 512) + xlim(0,3e6) + ylim(0,3e6) + geom_gate(pg.singlets) + facet_wrap_paginate(~name, ncol = 2, nrow = 2, page = 1)
#test that the singlet gate looks reasonable for All samples
for(i in 1:round(length(flowData)/4,0)){
plot <- ggcyto(flowData, aes(x = `FSC.H`, y = `FSC.A`)) + geom_hex(bins = 512) + xlim(0,3e6) + ylim(0,3e6) + geom_gate(pg.singlets) + facet_wrap_paginate(~name, ncol = 2, nrow = 2, page = i)
print(plot)
}
#Filter out the doublets
flowData.singlets <- Subset(flowData,pg.singlets)
##############################
#2. Generate Gate for debris based on forward scatter and side scatter. ####
#This needs to be done separately for each media condition.
#******Please note you may need to adjust the x and y plot limits to properly visualize your data
plot(flowData.singlets[[zerocopy]], c('FSC.A','SSC.A'), xlim=c(0,3e6), ylim=c(0,1e6),smooth=T)
debris.gate <- locator(100, type='l', col='red')
gm.2 <- matrix(,length(debris.gate$x),2)
colnames(gm.2) <- c('FSC.A','SSC.A')
gm.2[,1] <- debris.gate$x
gm.2[,2] <- debris.gate$y
pg.nondebris <- polygonGate(filterId="nonDebris",.gate=gm.2)
#Look at the gating on the controls
ggcyto(flowData.singlets[c(zerocopy,onecopy,twocopy)], aes(x = `FSC.A`, y = `SSC.A`)) + geom_hex(bins = 512) + xlim(0,3e6) + ylim(0,1e6) + geom_gate(pg.nondebris) + facet_wrap_paginate(~name, ncol = 2, nrow = 2, page = 1)
#test that the singlet gate looks reasonable for All samples
for(i in 1:round(length(flowData)/4,0)){
plot <- ggcyto(flowData.singlets, aes(x = `FSC.A`, y = `SSC.A`)) + geom_hex(bins = 512) + xlim(0,3e6) + ylim(0,1e6) + geom_gate(pg.nondebris) + facet_wrap_paginate(~name, ncol = 2, nrow = 2, page = i)
print(plot)
}
#Filter out the debris
flowData.nondebris <- Subset(flowData.singlets,pg.nondebris)
#############################
#3. FLUORESCENCE####
####Generate gates for 0, 1, 2, and 3+ copies####
#******Please note you may need to adjust the x and y plot limits to properly visualize your data
##Plot the control sample that has non-fluorescing cells (0 copy)
plot(flowData.nondebris[[zerocopy]], c('FSC.A','FL1.A'), xlim=c(0,3e6), ylim=c(0,5e4),smooth=T)
#ggcyto(flowData[zerocopy], aes(x = `FSC.A`, y = `FL1.A`)) + geom_hex(bins = 512)
zero.gate <- locator(100, type='l', col='red')
gm.3 <- matrix(,length(zero.gate$x),2)
colnames(gm.3) <- c('FSC.A','FL1.A')
gm.3[,1] <- zero.gate$x
gm.3[,2] <- zero.gate$y
fl1gate.0 <- polygonGate(filterId="zeroFL1",.gate=gm.3)
#Look at the gating on the controls
ggcyto(flowData.nondebris[c(zerocopy,onecopy,twocopy)], aes(x = `FSC.A`, y = `FL1.A`)) + geom_hex(bins = 512) + xlim(0,3e6) + ylim(0,5e4) + geom_gate(fl1gate.0) + facet_wrap_paginate(~name, ncol = 2, nrow = 2, page = 1)
for(i in 1:round(length(flowData)/4,0)){
plot <- ggcyto(flowData, aes(x = `FSC.A`, y = `FL1.A`)) + geom_hex(bins = 512) + xlim(0,3e6) + ylim(0,5e4) + geom_gate(fl1gate.0) + facet_wrap_paginate(~name, ncol = 2, nrow = 2, page = i)
print(plot)
}
##Draw a new gate for the one copy include the gate for zero copies
plot(flowData.nondebris[[onecopy]], c('FSC.A','FL1.A'), xlim=c(0,3e6), ylim=c(0,5e5),smooth=T)
polygon(zero.gate)
one.gate <- locator(100, type='l', col='blue')
gm.4 <- matrix(,length(one.gate$x),2)
colnames(gm.4) <- c('FSC.A','FL1.A')
gm.4[,1] <- one.gate$x
gm.4[,2] <- one.gate$y
fl1gate.1 <- polygonGate(filterId="oneCopyFL1",.gate=gm.4)
##Overlay and check the new gate
ggcyto(flowData.nondebris[onecopy], aes(x = `FSC.A`, y = `FL1.A`)) + geom_hex(bins = 512) + geom_gate(fl1gate.0) + geom_gate(fl1gate.1)
ggcyto(flowData.nondebris[c(onecopy,twocopy)], aes(x = `FSC.A`, y = `FL1.A`)) + geom_hex(bins = 512) + geom_gate(fl1gate.0) + geom_gate(fl1gate.1)
##Plot the control sample that has 2 copies along with the one and zero copy gates and draw a new gate for two copy
plot(flowData.nondebris[[twocopy]], c('FSC.A','FL1.A'), xlim=c(0,2e6), ylim=c(0,5e5),smooth=T)
polygon(zero.gate)
polygon(one.gate)
two.gate <- locator(100, type='l', col='green')
gm.5 <- matrix(,length(two.gate$x),2)
colnames(gm.5) <- c('FSC.A','FL1.A')
gm.5[,1] <- two.gate$x
gm.5[,2] <- two.gate$y
fl1gate.2 <- polygonGate(filterId="twoCopyFL1",.gate=gm.5)
##Overlay and check the new gate
ggcyto(flowData.nondebris[twocopy], aes(x = `FSC.A`, y = `FL1.A`)) + geom_hex(bins = 512) + geom_gate(fl1gate.0) + geom_gate(fl1gate.1) + geom_gate(fl1gate.2)
##Plot the control sample that has 2 copies along with the two, one, and zero copy gates and draw a new gate for more then 2 copies
plot(flowData.nondebris[[twocopy]], c('FSC.A','FL1.A'), xlim=c(0,3e6), ylim=c(0,1e6), smooth=T)
polygon(zero.gate)
polygon(one.gate)
polygon(two.gate)
three.gate <- locator(10, type='l', col='purple')
gm.6 <- matrix(,length(three.gate$x),2)
colnames(gm.6) <- c('FSC.A','FL1.A')
gm.6[,1] <- three.gate$x
gm.6[,2] <- three.gate$y
fl1gate.3 <- polygonGate(filterId="2plusCopyFL1",.gate=gm.6)
#Look at the gating on the controls
ggcyto(flowData.nondebris[c(zerocopy,onecopy,twocopy)], aes(x = `FSC.A`, y = `FL1.A`)) + geom_hex(bins = 512) + xlim(0,3e6) + ylim(0,3e6) + geom_gate(fl1gate.0) + geom_gate(fl1gate.1) + geom_gate(fl1gate.2) + geom_gate(fl1gate.3) + facet_wrap_paginate(~name, ncol = 2, nrow = 2, page = 1)
##Check how the gates look on all the samples
for(i in 1:round(length(flowData)/4,0)){
plot <- ggcyto(flowData, aes(x = `FSC.A`, y = `FL1.A`)) + geom_hex(bins = 512) + xlim(0,3e6) + ylim(0,3e6) + geom_gate(fl1gate.0) + geom_gate(fl1gate.1) + geom_gate(fl1gate.2) + geom_gate(fl1gate.3) + facet_wrap_paginate(~name, ncol = 2, nrow = 2, page = 1)
print(plot)
}
#Save the gate information to an R data file
rm(list=c("flowData"))
save(pg.singlets, pg.nondebris, fl1gate.0, fl1gate.1, fl1gate.2, fl1gate.3, file=paste(name,"_gates_",Sys.Date(),".Rdata",sep=""))
|
/gating_LTEE_GAP1_VAR.R
|
no_license
|
brandtn/Flow_Cyto_Analysis
|
R
| false
| false
| 9,894
|
r
|
#######################
#gating_LTEE_GAP1_Variants.R
#
#started: 01/07/2016
#modified: 1/25/2019
#
#author1: G Avecilla, S Lauer, D Gresham
#author2: N Brandt
######################
######################
#This script is specific for analyzing the data obtained in LTEE_GAP1_Variants in Gln,
#For any other purpose, the script must be modified accordingly.
###########################################################################################################################
#This script is intended to read in .fcs files and perform manual gating for i) single cells, ii) debris and iii) fluorescence
#
#Gating is performed with untransformed data
#
#Individual gates are saved in a file gates.Rdata for use with the Gresham Lab Flow Cytometry Analysis.Rmd pipeline
###########################################################################################################################
##To be run the first time if packages are not installed.
#source("http://bioconductor.org/biocLite.R")
#biocLite("flowViz")
#biocLite("flowCore")
#Load libraries
library(flowCore)
library(flowViz)
library(ggcyto)
library(ggforce)
#Read in the data
#Set working directory to the folder in which you have stored your .fcs files
#Read in all the fcs files in the directory.
#working directory
dir = '.'
#file location
path.data = "/Users/Brandt/Google Drive/MiniStatRun_10_2018/"
#path.data = "/Users/nathanbrandt/Google Drive/MiniStatRun_10_2018/"
#set name of run to create gates for
list.folders <- c("LTEE_mCitrine_GAP1_Variants_T00",
"LTEE_mCitrine_GAP1_Variants_T06",
"LTEE_mCitrine_GAP1_Variants_T07",
"LTEE_mCitrine_GAP1_Variants_T08.3",
#"LTEE_mCitrine_GAP1_Variants_T11.1",
"LTEE_mCitrine_GAP1_Variants_T11.2",
#"LTEE_mCitrine_GAP1_Variants_T13.1",
"LTEE_mCitrine_GAP1_Variants_T13.2",
"LTEE_mCitrine_GAP1_Variants_T14",
"LTEE_mCitrine_GAP1_Variants_T15",
"LTEE_mCitrine_GAP1_Variants_T18",
"LTEE_mCitrine_GAP1_Variants_T22",
"LTEE_mCitrine_GAP1_Variants_T25",
"LTEE_mCitrine_GAP1_Variants_T27",
"LTEE_mCitrine_GAP1_Variants_T29",
"LTEE_mCitrine_GAP1_Variants_T34")
name <- list.folders[1]
#load sample sheet
sample.sheet <- read.csv(paste(path.data,"samplesheet_",name,".csv", sep=""))
#read in fcs files in order presented in sample sheet (based on well identifier)
files <- paste(path.data,name,"/",sort(factor(list.files(paste(path.data,name,"/", sep=""),full.names=FALSE), levels = paste(sample.sheet$Well,".fcs",sep="" ), ordered=TRUE)),sep="")
flowData <- read.ncdfFlowSet(files=files, pattern=".fcs", alter.names = TRUE)
sample.ind <- which(paste(sample.sheet$Well,".fcs", sep="") %in% sampleNames(flowData))
sample.sheet <- sample.sheet[sample.ind,]
sample.sheet <- sample.sheet[order(sample.sheet$Well),]
#rename sample name of flow set to make it easier to identify
sampleNames(flowData) <- paste(gsub(" ","_",sample.sheet$Strain),"_",sub(" ","_",sample.sheet$Well), sep="")
#set copy number controls
zerocopy <- 1
onecopy <- 3
twocopy <- 4
##############################
#1. Generate gate for singlet cells####
#this gate is defined on the basis of the relationship between forward scatter height and area
#******Please note you may need to adjust the x and y plot limits to properly visualize your data
plot(flowData[[zerocopy]], c('FSC.H','FSC.A'), xlim=c(0,3e6), ylim=c(0,3e6),smooth=T)
singlet.gate <- locator(100, type='l', col='red')
gm.1 <- matrix(,length(singlet.gate$x),2)
colnames(gm.1) <- c('FSC.H','FSC.A')
gm.1[,1] <- singlet.gate$x
gm.1[,2] <- singlet.gate$y
pg.singlets <- polygonGate(filterId="singlets",.gate=gm.1)
ggcyto(flowData[zerocopy], aes(x = `FSC.H`, y = `FSC.A`)) + geom_hex(bins = 512) + geom_gate(pg.singlets)
#Look at the gating on the controls
ggcyto(flowData[c(zerocopy,onecopy,twocopy)], aes(x = `FSC.H`, y = `FSC.A`)) + geom_hex(bins = 512) + xlim(0,3e6) + ylim(0,3e6) + geom_gate(pg.singlets) + facet_wrap_paginate(~name, ncol = 2, nrow = 2, page = 1)
#test that the singlet gate looks reasonable for All samples
for(i in 1:round(length(flowData)/4,0)){
plot <- ggcyto(flowData, aes(x = `FSC.H`, y = `FSC.A`)) + geom_hex(bins = 512) + xlim(0,3e6) + ylim(0,3e6) + geom_gate(pg.singlets) + facet_wrap_paginate(~name, ncol = 2, nrow = 2, page = i)
print(plot)
}
#Filter out the doublets
flowData.singlets <- Subset(flowData,pg.singlets)
##############################
#2. Generate Gate for debris based on forward scatter and side scatter. ####
#This needs to be done separately for each media condition.
#******Please note you may need to adjust the x and y plot limits to properly visualize your data
plot(flowData.singlets[[zerocopy]], c('FSC.A','SSC.A'), xlim=c(0,3e6), ylim=c(0,1e6),smooth=T)
debris.gate <- locator(100, type='l', col='red')
gm.2 <- matrix(,length(debris.gate$x),2)
colnames(gm.2) <- c('FSC.A','SSC.A')
gm.2[,1] <- debris.gate$x
gm.2[,2] <- debris.gate$y
pg.nondebris <- polygonGate(filterId="nonDebris",.gate=gm.2)
#Look at the gating on the controls
ggcyto(flowData.singlets[c(zerocopy,onecopy,twocopy)], aes(x = `FSC.A`, y = `SSC.A`)) + geom_hex(bins = 512) + xlim(0,3e6) + ylim(0,1e6) + geom_gate(pg.nondebris) + facet_wrap_paginate(~name, ncol = 2, nrow = 2, page = 1)
#test that the singlet gate looks reasonable for All samples
for(i in 1:round(length(flowData)/4,0)){
plot <- ggcyto(flowData.singlets, aes(x = `FSC.A`, y = `SSC.A`)) + geom_hex(bins = 512) + xlim(0,3e6) + ylim(0,1e6) + geom_gate(pg.nondebris) + facet_wrap_paginate(~name, ncol = 2, nrow = 2, page = i)
print(plot)
}
#Filter out the debris
flowData.nondebris <- Subset(flowData.singlets,pg.nondebris)
#############################
#3. FLUORESCENCE####
####Generate gates for 0, 1, 2, and 3+ copies####
#******Please note you may need to adjust the x and y plot limits to properly visualize your data
##Plot the control sample that has non-fluorescing cells (0 copy)
plot(flowData.nondebris[[zerocopy]], c('FSC.A','FL1.A'), xlim=c(0,3e6), ylim=c(0,5e4),smooth=T)
#ggcyto(flowData[zerocopy], aes(x = `FSC.A`, y = `FL1.A`)) + geom_hex(bins = 512)
zero.gate <- locator(100, type='l', col='red')
gm.3 <- matrix(,length(zero.gate$x),2)
colnames(gm.3) <- c('FSC.A','FL1.A')
gm.3[,1] <- zero.gate$x
gm.3[,2] <- zero.gate$y
fl1gate.0 <- polygonGate(filterId="zeroFL1",.gate=gm.3)
#Look at the gating on the controls
ggcyto(flowData.nondebris[c(zerocopy,onecopy,twocopy)], aes(x = `FSC.A`, y = `FL1.A`)) + geom_hex(bins = 512) + xlim(0,3e6) + ylim(0,5e4) + geom_gate(fl1gate.0) + facet_wrap_paginate(~name, ncol = 2, nrow = 2, page = 1)
for(i in 1:round(length(flowData)/4,0)){
plot <- ggcyto(flowData, aes(x = `FSC.A`, y = `FL1.A`)) + geom_hex(bins = 512) + xlim(0,3e6) + ylim(0,5e4) + geom_gate(fl1gate.0) + facet_wrap_paginate(~name, ncol = 2, nrow = 2, page = i)
print(plot)
}
##Draw a new gate for the one copy include the gate for zero copies
plot(flowData.nondebris[[onecopy]], c('FSC.A','FL1.A'), xlim=c(0,3e6), ylim=c(0,5e5),smooth=T)
polygon(zero.gate)
one.gate <- locator(100, type='l', col='blue')
gm.4 <- matrix(,length(one.gate$x),2)
colnames(gm.4) <- c('FSC.A','FL1.A')
gm.4[,1] <- one.gate$x
gm.4[,2] <- one.gate$y
fl1gate.1 <- polygonGate(filterId="oneCopyFL1",.gate=gm.4)
##Overlay and check the new gate
ggcyto(flowData.nondebris[onecopy], aes(x = `FSC.A`, y = `FL1.A`)) + geom_hex(bins = 512) + geom_gate(fl1gate.0) + geom_gate(fl1gate.1)
ggcyto(flowData.nondebris[c(onecopy,twocopy)], aes(x = `FSC.A`, y = `FL1.A`)) + geom_hex(bins = 512) + geom_gate(fl1gate.0) + geom_gate(fl1gate.1)
##Plot the control sample that has 2 copies along with the one and zero copy gates and draw a new gate for two copy
plot(flowData.nondebris[[twocopy]], c('FSC.A','FL1.A'), xlim=c(0,2e6), ylim=c(0,5e5),smooth=T)
polygon(zero.gate)
polygon(one.gate)
two.gate <- locator(100, type='l', col='green')
gm.5 <- matrix(,length(two.gate$x),2)
colnames(gm.5) <- c('FSC.A','FL1.A')
gm.5[,1] <- two.gate$x
gm.5[,2] <- two.gate$y
fl1gate.2 <- polygonGate(filterId="twoCopyFL1",.gate=gm.5)
##Overlay and check the new gate
ggcyto(flowData.nondebris[twocopy], aes(x = `FSC.A`, y = `FL1.A`)) + geom_hex(bins = 512) + geom_gate(fl1gate.0) + geom_gate(fl1gate.1) + geom_gate(fl1gate.2)
##Plot the control sample that has 2 copies along with the two, one, and zero copy gates and draw a new gate for more then 2 copies
plot(flowData.nondebris[[twocopy]], c('FSC.A','FL1.A'), xlim=c(0,3e6), ylim=c(0,1e6), smooth=T)
polygon(zero.gate)
polygon(one.gate)
polygon(two.gate)
three.gate <- locator(10, type='l', col='purple')
gm.6 <- matrix(,length(three.gate$x),2)
colnames(gm.6) <- c('FSC.A','FL1.A')
gm.6[,1] <- three.gate$x
gm.6[,2] <- three.gate$y
fl1gate.3 <- polygonGate(filterId="2plusCopyFL1",.gate=gm.6)
#Look at the gating on the controls
ggcyto(flowData.nondebris[c(zerocopy,onecopy,twocopy)], aes(x = `FSC.A`, y = `FL1.A`)) + geom_hex(bins = 512) + xlim(0,3e6) + ylim(0,3e6) + geom_gate(fl1gate.0) + geom_gate(fl1gate.1) + geom_gate(fl1gate.2) + geom_gate(fl1gate.3) + facet_wrap_paginate(~name, ncol = 2, nrow = 2, page = 1)
##Check how the gates look on all the samples
for(i in 1:round(length(flowData)/4,0)){
plot <- ggcyto(flowData, aes(x = `FSC.A`, y = `FL1.A`)) + geom_hex(bins = 512) + xlim(0,3e6) + ylim(0,3e6) + geom_gate(fl1gate.0) + geom_gate(fl1gate.1) + geom_gate(fl1gate.2) + geom_gate(fl1gate.3) + facet_wrap_paginate(~name, ncol = 2, nrow = 2, page = 1)
print(plot)
}
#Save the gate information to an R data file
rm(list=c("flowData"))
save(pg.singlets, pg.nondebris, fl1gate.0, fl1gate.1, fl1gate.2, fl1gate.3, file=paste(name,"_gates_",Sys.Date(),".Rdata",sep=""))
|
#' read_Haarlem
#'
#' read station pressure at Haarlem station
#'
#' @param infile input file name
#'
#' @keywords util
#' @export
read_Haarlem <- function(infile){
# read in data
rawdata <- read.table(infile, sep=',', skip=54, header=TRUE, stringsAsFactors=FALSE)
## convert to output format
out <- data.frame(Station='Haarlem',
Year=floor(rawdata$YYYYMMDD/10000),
Month=floor((rawdata$YYYYMMDD%%10000)/100),
Day=rawdata$YYYYMMDD%%100,
Time=c('08:00', '13:00', '22:00')[as.numeric(rawdata$M)],
P.1=as.numeric(substr(as.character(rawdata$P), 1, 2)),
P.2=as.numeric(substr(as.character(rawdata$P), 3, 4)),
P.3=as.numeric(substr(paste0(as.character(rawdata$P), '-'), 5, 5))*3,
P.units='English inches',
TA=floor(rawdata$T/10),
TA.units='F',
stringsAsFactors=FALSE)
print("Only use integer temperatures as digit after decimal point is unclear")
return(out)
}
|
/R/read_Haarlem.R
|
no_license
|
jonasbhend/pressurehelper
|
R
| false
| false
| 1,103
|
r
|
#' read_Haarlem
#'
#' read station pressure at Haarlem station
#'
#' @param infile input file name
#'
#' @keywords util
#' @export
read_Haarlem <- function(infile){
# read in data
rawdata <- read.table(infile, sep=',', skip=54, header=TRUE, stringsAsFactors=FALSE)
## convert to output format
out <- data.frame(Station='Haarlem',
Year=floor(rawdata$YYYYMMDD/10000),
Month=floor((rawdata$YYYYMMDD%%10000)/100),
Day=rawdata$YYYYMMDD%%100,
Time=c('08:00', '13:00', '22:00')[as.numeric(rawdata$M)],
P.1=as.numeric(substr(as.character(rawdata$P), 1, 2)),
P.2=as.numeric(substr(as.character(rawdata$P), 3, 4)),
P.3=as.numeric(substr(paste0(as.character(rawdata$P), '-'), 5, 5))*3,
P.units='English inches',
TA=floor(rawdata$T/10),
TA.units='F',
stringsAsFactors=FALSE)
print("Only use integer temperatures as digit after decimal point is unclear")
return(out)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rPsuedoWishart.R
\name{rPsuedoWishart}
\alias{rPsuedoWishart}
\title{Random Psuedo Wishart Matrix}
\usage{
rPsuedoWishart(n, df, Sigma, covariance = FALSE, simplify = "array")
}
\arguments{
\item{n}{integer: the number of replications.}
\item{df}{numeric parameter, \dQuote{degrees of freedom}.}
\item{Sigma}{positive definite (\eqn{p\times p}{p * p}) \dQuote{scale}
matrix, the matrix parameter of the distribution.}
\item{covariance}{logical on whether a covariance matrix should be generated}
\item{simplify}{logical or character string; should the result be
simplified to a vector, matrix or higher dimensional array if
possible? For \code{sapply} it must be named and not abbreviated.
The default value, \code{TRUE}, returns a vector or matrix if appropriate,
whereas if \code{simplify = "array"} the result may be an
\code{\link{array}} of \dQuote{rank}
(\eqn{=}\code{length(dim(.))}) one higher than the result
of \code{FUN(X[[i]])}.}
}
\value{
A numeric array of dimension \code{p * p * n}, where each array is a positive semidefinite matrix, a realization of the Wishart distribution W_p(Sigma, df)
}
\description{
Generate \code{n} random matrices, distributed according to the Wishart distribution with parameters \code{Sigma} and \code{df}, W_p(Sigma, df).
}
\details{
If X_1, ..., X_m is a sample of m independent multivariate Gaussians with mean vector 0, and covariance matrix Sigma,
the distribution of M = X'X is W_p(Sigma, m).
}
\examples{
rPsuedoWishart(2, 5, diag(1, 20))
}
\references{
Diaz-Garcia, Jose A, Ramon Gutierrez Jaimez, and Kanti V Mardia. 1997. “Wishart and Pseudo-Wishart Distributions and Some Applications to Shape Theory.” Journal of Multivariate Analysis 63 (1): 73–87. doi:10.1006/jmva.1997.1689.
}
|
/man/rPsuedoWishart.Rd
|
no_license
|
BenBarnard/rWishart
|
R
| false
| true
| 1,860
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rPsuedoWishart.R
\name{rPsuedoWishart}
\alias{rPsuedoWishart}
\title{Random Psuedo Wishart Matrix}
\usage{
rPsuedoWishart(n, df, Sigma, covariance = FALSE, simplify = "array")
}
\arguments{
\item{n}{integer: the number of replications.}
\item{df}{numeric parameter, \dQuote{degrees of freedom}.}
\item{Sigma}{positive definite (\eqn{p\times p}{p * p}) \dQuote{scale}
matrix, the matrix parameter of the distribution.}
\item{covariance}{logical on whether a covariance matrix should be generated}
\item{simplify}{logical or character string; should the result be
simplified to a vector, matrix or higher dimensional array if
possible? For \code{sapply} it must be named and not abbreviated.
The default value, \code{TRUE}, returns a vector or matrix if appropriate,
whereas if \code{simplify = "array"} the result may be an
\code{\link{array}} of \dQuote{rank}
(\eqn{=}\code{length(dim(.))}) one higher than the result
of \code{FUN(X[[i]])}.}
}
\value{
A numeric array of dimension \code{p * p * n}, where each array is a positive semidefinite matrix, a realization of the Wishart distribution W_p(Sigma, df)
}
\description{
Generate \code{n} random matrices, distributed according to the Wishart distribution with parameters \code{Sigma} and \code{df}, W_p(Sigma, df).
}
\details{
If X_1, ..., X_m is a sample of m independent multivariate Gaussians with mean vector 0, and covariance matrix Sigma,
the distribution of M = X'X is W_p(Sigma, m).
}
\examples{
rPsuedoWishart(2, 5, diag(1, 20))
}
\references{
Diaz-Garcia, Jose A, Ramon Gutierrez Jaimez, and Kanti V Mardia. 1997. “Wishart and Pseudo-Wishart Distributions and Some Applications to Shape Theory.” Journal of Multivariate Analysis 63 (1): 73–87. doi:10.1006/jmva.1997.1689.
}
|
# copy_webapps_templates.r
# INSTRUCTIONS
# Run this script one time only. This will download the WebApps'
# template files to your local repository. You will then edit the files using
# RStudio. When editing, save, commit, and push all files normally and they will
# automatically be updated on the WebApp every hour (XX:00) every day.
# To edit the equations displayed on the WebApp's goals page, you will edit
# conf/goals.Rmd. The equations are written in a format similar to LaTex; use
# current equations as a syntax guide. To view the rendered equations, press the
# 'Knit HTML' button when working with goals.Rmd.
# Learn more about LaTex syntax at https://en.wikibooks.org/wiki/LaTeX/Mathematics.
# Learn about .Rmd formatting at http://shiny.rstudio.com/articles/rmarkdown.html
# To edit the text content that is displayed on your WebApp, edit only the text
# headers and descriptions; all other information is automatically rendered with
# the most recent content in rgn_labels.csv, layers.csv, goals.Rmd, and
# scores.csv and your changes will be overwritten. Do not change any of the
# formatting or spacing.
# install packages to render goals.Rmd
devtools::install_github("rstudio/rmarkdown")
# setup for copying WebApp template files
library(httr)
dir_gh = '~/github/ohibc/webapps_templates'
url_loc = 'https://raw.githubusercontent.com/OHI-Science/ohi-webapps/master/results'
# create a webapps_templates folder
dir.create(dir_gh, recursive=T, showWarnings=F)
# download template files
for (f in c('regions.brew.md', # ohi-science.org/ohibc/regions -> renders with layers/rgn_labels.csv
'layers.brew.md', # ohi-science.org/ohibc/layers -> renders with layers.csv
'goals.brew.md', # ohi-science.org/ohibc/goals -> renders with conf/goals.Rmd
'scores.brew.md' # ohi-science.org/ohibc/scores -> renders with scores.csv
)){
url_in = file.path(url_loc, f)
f_out = file.path(dir_gh, f)
writeBin(httr::content(GET(url_in)), f_out)
}
|
/copy_webapps_templates.r
|
no_license
|
eblondel/ohibc
|
R
| false
| false
| 2,019
|
r
|
# copy_webapps_templates.r
# INSTRUCTIONS
# Run this script one time only. This will download the WebApps'
# template files to your local repository. You will then edit the files using
# RStudio. When editing, save, commit, and push all files normally and they will
# automatically be updated on the WebApp every hour (XX:00) every day.
# To edit the equations displayed on the WebApp's goals page, you will edit
# conf/goals.Rmd. The equations are written in a format similar to LaTex; use
# current equations as a syntax guide. To view the rendered equations, press the
# 'Knit HTML' button when working with goals.Rmd.
# Learn more about LaTex syntax at https://en.wikibooks.org/wiki/LaTeX/Mathematics.
# Learn about .Rmd formatting at http://shiny.rstudio.com/articles/rmarkdown.html
# To edit the text content that is displayed on your WebApp, edit only the text
# headers and descriptions; all other information is automatically rendered with
# the most recent content in rgn_labels.csv, layers.csv, goals.Rmd, and
# scores.csv and your changes will be overwritten. Do not change any of the
# formatting or spacing.
# install packages to render goals.Rmd
devtools::install_github("rstudio/rmarkdown")
# setup for copying WebApp template files
library(httr)
dir_gh = '~/github/ohibc/webapps_templates'
url_loc = 'https://raw.githubusercontent.com/OHI-Science/ohi-webapps/master/results'
# create a webapps_templates folder
dir.create(dir_gh, recursive=T, showWarnings=F)
# download template files
for (f in c('regions.brew.md', # ohi-science.org/ohibc/regions -> renders with layers/rgn_labels.csv
'layers.brew.md', # ohi-science.org/ohibc/layers -> renders with layers.csv
'goals.brew.md', # ohi-science.org/ohibc/goals -> renders with conf/goals.Rmd
'scores.brew.md' # ohi-science.org/ohibc/scores -> renders with scores.csv
)){
url_in = file.path(url_loc, f)
f_out = file.path(dir_gh, f)
writeBin(httr::content(GET(url_in)), f_out)
}
|
## Plot 3
# Read Data
hhPc<-read.table("hhpc.txt",header=TRUE)
# Construct Plot 3
plot(hhPc$Sub_metering_1,type="l",col="black",ann=FALSE,xaxt="n")
lines(hhPc$Sub_metering_2,type="l",col="red")
lines(hhPc$Sub_metering_3,type="l",col="blue")
axis(side=1,at=c(0,1440,2880),labels=c("Thu","Fri","Sat"))
title(ylab="Energy sub metering")
legend("topright",pch="---",col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
legend("topright",pch="--",col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.copy(png,file="plot3.png",width=480,height=480)
# Copy to a png file
png
3
dev.off(3)
|
/plot3.R
|
no_license
|
JacGu/Exploratory-Data-Analysis
|
R
| false
| false
| 681
|
r
|
## Plot 3
# Read Data
hhPc<-read.table("hhpc.txt",header=TRUE)
# Construct Plot 3
plot(hhPc$Sub_metering_1,type="l",col="black",ann=FALSE,xaxt="n")
lines(hhPc$Sub_metering_2,type="l",col="red")
lines(hhPc$Sub_metering_3,type="l",col="blue")
axis(side=1,at=c(0,1440,2880),labels=c("Thu","Fri","Sat"))
title(ylab="Energy sub metering")
legend("topright",pch="---",col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
legend("topright",pch="--",col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.copy(png,file="plot3.png",width=480,height=480)
# Copy to a png file
png
3
dev.off(3)
|
#Multispecies dependent double-observer model (MDAM)
#Authors: Jessie Golding and Danielle Fagre
#Code for analyzing Danielle Fagre's pilot season (summer 2015)
#songbird data from the National Bison Range
###Master code###
#Set working directory
#CHANGE when changing users
setwd("C:/Users/jessie.golding/Documents/GitHub/MDAM")
#Load packages needed for MDAM
source("run_loadpackages.R")
#Load pilot season data
#Read data from csv file using format file nbr.mdam.R (which cuts down the number of species to 4)
source("nbr.mdam.R")
#Output is dataframe "mdam" with all data from 2015 Pilot Season
source("run_MDAM.R")
save(out,file="pilot")
###Code for loading the saved output later###
load("pilot")
|
/master.R
|
no_license
|
jgoldfinch/MDAM
|
R
| false
| false
| 716
|
r
|
#Multispecies dependent double-observer model (MDAM)
#Authors: Jessie Golding and Danielle Fagre
#Code for analyzing Danielle Fagre's pilot season (summer 2015)
#songbird data from the National Bison Range
###Master code###
#Set working directory
#CHANGE when changing users
setwd("C:/Users/jessie.golding/Documents/GitHub/MDAM")
#Load packages needed for MDAM
source("run_loadpackages.R")
#Load pilot season data
#Read data from csv file using format file nbr.mdam.R (which cuts down the number of species to 4)
source("nbr.mdam.R")
#Output is dataframe "mdam" with all data from 2015 Pilot Season
source("run_MDAM.R")
save(out,file="pilot")
###Code for loading the saved output later###
load("pilot")
|
#=============================================================================#
# Author: Guido Espana & Yutong Yao & Alex Perkins
#=============================================================================#
# user input ---------------
#=============================================================================#
rm(list = ls())
library(RColorBrewer)
library(tidyverse)
library(randomForest)
library(grDevices)
library(here)
library(mgcv)
setwd(here())
set.seed(123)
#=============================================================================#
# Economic parameters ---------------
#=============================================================================#
# Econ data are based on Flasche2016
# Public payer
perspective = "PublicPayer"
country = "Brazil"
econ = list()
c.vax = 23
econ$c.test = 10
if(perspective == "PublicPayer"){
if(country == "Brazil"){
econ$c.amb = 60
econ$c.hosp = 200
econ$c.death = 0
econ$c.vax = c.vax
econ$gdp = 8649
}else if (country == "Philippines"){
econ$c.amb = 20
econ$c.hosp = 400
econ$c.death = 0
econ$c.vax = c.vax
econ$gdp = 2951
}else{
print("Country not found")
}
}
econ$d.amb = 0.545 * 4/365
econ$d.hosp = 0.545 * 14/365
econ$d.death = 1
econ$d.vax = 0
p.death = 0.005
discounting.rate = 0.03
discounting.daly.rate = 0.03
daly.threshold = 3*econ$gdp
save.fig = T
#=============================================================================#
# Functions ---------------
#=============================================================================#
plot_color_bar = function(myColStrip_in, myColBreaks_in,myColBar_in, myTicks_in, plot_line = TRUE,myBarLims_in, label_in = "",...){
image(t(myColStrip_in),
col = myColBar_in, breaks = myColBreaks_in,
xlim = c(0.0,1.0), axes = F)
ylims = par("usr")[c(3,4)]
slope_y = diff(ylims) / diff(myBarLims_in)
b_y = ylims[2] - slope_y * myBarLims_in[2]
myBarTicks = myTicks_in * slope_y + b_y
myBarTickLabels = myTicks_in
axis(4,labels = rep("",length(myBarTicks)), at = myBarTicks,cex.axis = 0.8,tck = -0.1)
axis(4, at = myBarTicks, labels = myBarTickLabels, cex.axis = 0.8,line = -1.0,lwd = 0)
if(plot_line){
abline(h = myBarTicks[2], col = "black", lwd = 2)
}
mtext(label_in, side = 4, outer = F, lwd = 2,...)
}
plot_color_bar_horizontal = function(myColStrip_in, myColBreaks_in,myColBar_in, myTicks_in,
myBarLims_in,
label_in = "",...){
image(myColStrip_in,
col = myColBar_in, breaks = myColBreaks_in,
xlim = c(0,1.0), ylim = c(0,1.0),axes = F, add = F)
xlims = par("usr")[c(1,2)]
slope_x = diff(xlims) / diff(myBarLims_in)
b_x = xlims[2] - slope_x * myBarLims_in[2]
myBarTicks = myTicks_in * slope_x + b_x
myBarTickLabels = myTicks_in
axis(1,labels = rep("",length(myBarTicks)), at = myBarTicks,cex.axis = 0.8,tck = -0.5)
axis(1, at = myBarTicks, labels = myBarTickLabels, cex.axis = 0.8,line = -1.0,lwd = 0)
mtext(label_in, side = 1, outer = F, lwd = 2,...)
}
calculate_costs_public_health = function(dis_table.in, econ.in, p.death.in,
disc.rate.in, disc.daly.in, daly.threshold.in)
{
n.years = nrow(dis_table.in)
disc.rate = 1/((1+disc.rate.in)^(0:(n.years - 1)))
disc.daly = 1/((1+disc.daly.in)^(0:(n.years - 1)))
# Disease cases in the dataframe dis_table.in do not include hospitalizations...
# no need to substract
cost.treatment = sum(
( (dis_table.in$DisAverted) * econ.in$c.amb +
(dis_table.in$HospAverted) * econ.in$c.hosp +
(dis_table.in$HospAverted) * p.death.in * econ.in$c.death
) * disc.rate
)
daly.treat = sum(
((dis_table.in$DisAverted ) * econ$d.amb +
(dis_table.in$HospAverted) * econ$d.hosp
) * disc.daly
)
daly.death = sum(
((dis_table.in$DaysLostDeathAverted) / 365) * econ.in$d.death * disc.daly
)
daly.vax = sum(dis_table.in$Vaccinated * econ.in$d.vax * disc.daly)
net.daly = daly.treat - daly.vax
N.vaccinated = sum(dis_table.in$Vaccinated)
N.tested = sum(dis_table.in$Tested)
if(N.tested <= 0){
threshold.cost = (cost.treatment + net.daly * daly.threshold.in ) / N.vaccinated
return(list(threshold.cost = threshold.cost, net.treat.cost = cost.treatment / N.vaccinated,
net.daly.factor = net.daly / N.vaccinated, death.daly.factor = daly.death/N.vaccinated))
}else{
threshold.cost = (cost.treatment + net.daly * daly.threshold.in ) / N.vaccinated
return(list(threshold.cost = threshold.cost, net.treat.cost = cost.treatment,
net.daly.factor = net.daly, death.daly.factor = daly.death))
}
}
flip.matrix = function(x.in){
return(t(x.in[nrow(x.in):1,]))
}
image.fxn = function(x.in, ...){
image(t(x.in), xaxt = 'n', yaxt = 'n', ...)
}
#=============================================================================#
# Read data and set economic variables ---------------
#=============================================================================#
summary_list = readRDS('../data/output/20190218_output/summary_routine_vaccination_life_exp_All_sweep_files_test.RDS')
threshold_cost_train_table_test = data.frame(
Specificity = rep(0,length(summary_list)),
Sensitivity = 0, SP9 = 0, net.daly.factor = 0,
net.treat.cost = 0,threshold.cost = 0, Prop.vax = 0, Tested = 0)
for(ff in 1:length(summary_list)){
summary_tmp = summary_list[[ff]] %>% filter(Year <= 100)
threshold_cost_train_table_test$Specificity[ff] = summary_tmp$Specificity[1]
threshold_cost_train_table_test$Sensitivity[ff] = summary_tmp$Sensitivity[1]
threshold_cost_train_table_test$SP9[ff] = summary_tmp$SP9Prevax[1]
threshold_cost_train_table_test$PE9[ff] = summary_tmp$PE9[1]
threshold_cost_train_table_test$Prop.vax[ff] = sum(summary_tmp$Vaccinated) / sum(summary_tmp$Tested)
threshold_cost_train_table_test$Tested[ff] = sum(summary_tmp$Tested)
threshold.cost.test = calculate_costs_public_health(
summary_tmp, econ.in = econ, p.death.in = p.death,
disc.rate.in = discounting.rate, disc.daly.in = discounting.daly.rate,
daly.threshold.in = econ$gdp)
threshold_cost_train_table_test$net.treat.cost[ff] = threshold.cost.test$net.treat.cost
threshold_cost_train_table_test$net.daly.factor[ff] = threshold.cost.test$net.daly.factor
threshold_cost_train_table_test$death.daly.factor[ff] = threshold.cost.test$death.daly.factor
threshold_cost_train_table_test$threshold.cost[ff] = threshold.cost.test$threshold.cost
if(ff%%100 == 0){cat("\r:",ff)}
}
#=============================================================================#
# Model fitting ---------
#=============================================================================#
m = "gam"
if(m == "randomforest"){
model_cost_treat_test = randomForest(net.treat.cost ~ (
Specificity + Sensitivity + SP9)^3,
data = threshold_cost_train_table_test,
ntree = 1000, importance = T)
model_daly_test = randomForest(net.daly.factor ~ (
Specificity + Sensitivity + SP9)^3,
data = threshold_cost_train_table_test,
ntree = 1000, importance = T)
model_propvax_test = randomForest(Prop.vax ~ (
Specificity + Sensitivity + SP9)^3,
data = threshold_cost_train_table_test,
ntree = 1000, importance = T)
model_death = randomForest(death.daly.factor ~ (
Specificity + Sensitivity + SP9)^3,
data = threshold_cost_train_table_test,
ntree = 1000, importance = T)
} else if (m == "gam"){
model_cost_treat_test = gam(net.treat.cost ~ s(
Specificity,Sensitivity,SP9, bs = "gp"),
data = threshold_cost_train_table_test,
family = "gaussian")
model_propvax_test = gam(Prop.vax ~ s(
Specificity,Sensitivity,SP9, bs = "gp"),
data = threshold_cost_train_table_test,
family = "gaussian")
model_daly_test = gam(net.daly.factor ~ s(
Specificity, Sensitivity, SP9, bs = "gp"),
data = threshold_cost_train_table_test,
family = "gaussian"
)
model_death = gam(death.daly.factor ~ s(
Specificity, Sensitivity, SP9, bs = "gp"),
data = threshold_cost_train_table_test,
family = "gaussian"
)
}
N.tested = mean(threshold_cost_train_table_test$Tested)
#=============================================================================#
# Plot heatmaps of Threshold cost of the test when vaccine is $69 v2.0---------
#=============================================================================#
sensitivity_array = seq(from = 0, by = 0.02, to = 1.0)
specificity_array = seq(from = 0, by = 0.02, to = 1.0)
cost_effective_matrix_test_screening = matrix(
0,nrow = length(sensitivity_array),
ncol = length(specificity_array))
n.breaks = 2
vax.cost.default = 70
gdp.vec = c(1,3)
daly.thresholds = econ$gdp*gdp.vec
lb = -1;ub = 1
myColBar = c("#E0E0E0FF", "#00FF00FF")
myColBreaks = seq(lb,ub,by = (ub - lb)/n.breaks)
myColBreaksTicks = c(-1,0,1)
myColStrip = as.matrix((myColBreaks + diff(myColBreaks)[1] / 2)[-length(myColBreaks)])
if(save.fig == T){
jpeg(sprintf('../figures/supplement_figure_S18_%s_cost_test_%d_vax_%d_%s_30y.jpeg',
perspective,econ$c.test,econ$c.vax,country),
width=6,height=2.5,units='in',res=400)
}
layout(
matrix(1:10,2,5,byrow = T),
widths = rep(2,5),heights = c(2,2)
)
par(mar = c(0.5,0.2,0.1,0.5), oma = c(2.5,2.5,1,1))
sensitivity.specificity.grid = expand.grid(Specificity = specificity_array,
Sensitivity = sensitivity_array)
for(dd in 1:length(daly.thresholds)){
daly.threshold.tmp = daly.thresholds[dd]
print(daly.thresholds[dd])
for(SP9_tmp in c(0.1,0.3,0.5,0.7,0.9)){
print(SP9_tmp)
test_data = sensitivity.specificity.grid
test_data$SP9 = SP9_tmp
prop.vax = SP9_tmp * sensitivity.specificity.grid$Sensitivity + (1- sensitivity.specificity.grid$Specificity)*(1-SP9_tmp)
prediction.grid = (predict(model_cost_treat_test,test_data,predict.all = F) +
predict(model_daly_test, test_data,predict.all = F) * daly.threshold.tmp +
predict(model_death, test_data,predict.all = F) * daly.threshold.tmp -
econ$c.vax * (N.tested*prop.vax) -
(econ$c.test*N.tested)
) / ( N.tested * prop.vax)
k = 1
for(sen in 1:length(sensitivity_array)){
for(sp in 1:length(specificity_array)){
cost_effective_matrix_test_screening[sen,sp] = prediction.grid[k]
k = k + 1
}
}
print(max(cost_effective_matrix_test_screening))
#correct bounds
cost_matrix = cost_effective_matrix_test_screening
cost_matrix[cost_matrix < lb] = lb
cost_matrix[cost_matrix > ub] = ub
image(
specificity_array,sensitivity_array, t(cost_matrix),
col = myColBar,
breaks = seq(lb,ub,by = (ub - lb)/n.breaks),axes = F,xlim = c(0,1),ylim = c(0,1)
)
breaks.array = c(-1,0,1)
contour(
x=specificity_array,
y=sensitivity_array,
z=t(cost_matrix),
level=breaks.array,
lwd= c(rep(0.0,length(which(breaks.array <0))), 1, rep(0.0,length(which(breaks.array > 0)))),
c(rep(0.1,n.breaks/2),2,rep(0.1,n.breaks/2)),
add=T,drawlabels=F
)
if(SP9_tmp == 0.9){
mtext(text = sprintf("%.0fx GDP",gdp.vec[dd]), side = 4, line = 0, cex = 0.6)
}
if(dd == 1){
sp9str = sprintf("%.1f",SP9_tmp)
mtext(text = bquote(PE[9] ~ " = " ~ .(sp9str)), side = 3, line = 0,cex = 0.7)
}
if(SP9_tmp == 0.1){
axis(2,labels = rep("",6), at = seq(from=0,to=1.0,by=0.2),cex.axis = 0.8,tck = -0.03)
axis(2,at = seq(from=0,to=0.8,by=0.2),cex.axis = 0.7,line = -0.7,lwd = 0)
}else{
axis(2,labels = F,tick = F)
}
box()
if(dd == 2){
axis(1,labels = rep("",6), at = seq(from=0,to=1.0,by=0.2),cex.axis = 0.8,tck = -0.03)
axis(1,at = seq(from=0,to=0.8,by=0.2),cex.axis = 0.7,line = -0.7,lwd = 0)
# axis(1,labels = T,cex.axis = 0.8)
}else{
axis(1,labels = F,tick = F)
}
}
}
mtext(text = "Specificity", side = 1, line = 1, cex = 0.8,outer = T)
mtext(text = "Sensitivity", side = 2, line = 1, cex = 0.8,outer = T)
if(save.fig == T){dev.off()}
|
/analysis/scripts/supplement_figure_S18_absolute_cost_brazil_30y.R
|
no_license
|
confunguido/public_health_assessment_dengue_vaccine
|
R
| false
| false
| 12,186
|
r
|
#=============================================================================#
# Author: Guido Espana & Yutong Yao & Alex Perkins
#=============================================================================#
# user input ---------------
#=============================================================================#
rm(list = ls())
library(RColorBrewer)
library(tidyverse)
library(randomForest)
library(grDevices)
library(here)
library(mgcv)
setwd(here())
set.seed(123)
#=============================================================================#
# Economic parameters ---------------
#=============================================================================#
# Econ data are based on Flasche2016
# Public payer
perspective = "PublicPayer"
country = "Brazil"
econ = list()
c.vax = 23
econ$c.test = 10
if(perspective == "PublicPayer"){
if(country == "Brazil"){
econ$c.amb = 60
econ$c.hosp = 200
econ$c.death = 0
econ$c.vax = c.vax
econ$gdp = 8649
}else if (country == "Philippines"){
econ$c.amb = 20
econ$c.hosp = 400
econ$c.death = 0
econ$c.vax = c.vax
econ$gdp = 2951
}else{
print("Country not found")
}
}
econ$d.amb = 0.545 * 4/365
econ$d.hosp = 0.545 * 14/365
econ$d.death = 1
econ$d.vax = 0
p.death = 0.005
discounting.rate = 0.03
discounting.daly.rate = 0.03
daly.threshold = 3*econ$gdp
save.fig = T
#=============================================================================#
# Functions ---------------
#=============================================================================#
plot_color_bar = function(myColStrip_in, myColBreaks_in,myColBar_in, myTicks_in, plot_line = TRUE,myBarLims_in, label_in = "",...){
image(t(myColStrip_in),
col = myColBar_in, breaks = myColBreaks_in,
xlim = c(0.0,1.0), axes = F)
ylims = par("usr")[c(3,4)]
slope_y = diff(ylims) / diff(myBarLims_in)
b_y = ylims[2] - slope_y * myBarLims_in[2]
myBarTicks = myTicks_in * slope_y + b_y
myBarTickLabels = myTicks_in
axis(4,labels = rep("",length(myBarTicks)), at = myBarTicks,cex.axis = 0.8,tck = -0.1)
axis(4, at = myBarTicks, labels = myBarTickLabels, cex.axis = 0.8,line = -1.0,lwd = 0)
if(plot_line){
abline(h = myBarTicks[2], col = "black", lwd = 2)
}
mtext(label_in, side = 4, outer = F, lwd = 2,...)
}
plot_color_bar_horizontal = function(myColStrip_in, myColBreaks_in,myColBar_in, myTicks_in,
myBarLims_in,
label_in = "",...){
image(myColStrip_in,
col = myColBar_in, breaks = myColBreaks_in,
xlim = c(0,1.0), ylim = c(0,1.0),axes = F, add = F)
xlims = par("usr")[c(1,2)]
slope_x = diff(xlims) / diff(myBarLims_in)
b_x = xlims[2] - slope_x * myBarLims_in[2]
myBarTicks = myTicks_in * slope_x + b_x
myBarTickLabels = myTicks_in
axis(1,labels = rep("",length(myBarTicks)), at = myBarTicks,cex.axis = 0.8,tck = -0.5)
axis(1, at = myBarTicks, labels = myBarTickLabels, cex.axis = 0.8,line = -1.0,lwd = 0)
mtext(label_in, side = 1, outer = F, lwd = 2,...)
}
calculate_costs_public_health = function(dis_table.in, econ.in, p.death.in,
disc.rate.in, disc.daly.in, daly.threshold.in)
{
n.years = nrow(dis_table.in)
disc.rate = 1/((1+disc.rate.in)^(0:(n.years - 1)))
disc.daly = 1/((1+disc.daly.in)^(0:(n.years - 1)))
# Disease cases in the dataframe dis_table.in do not include hospitalizations...
# no need to substract
cost.treatment = sum(
( (dis_table.in$DisAverted) * econ.in$c.amb +
(dis_table.in$HospAverted) * econ.in$c.hosp +
(dis_table.in$HospAverted) * p.death.in * econ.in$c.death
) * disc.rate
)
daly.treat = sum(
((dis_table.in$DisAverted ) * econ$d.amb +
(dis_table.in$HospAverted) * econ$d.hosp
) * disc.daly
)
daly.death = sum(
((dis_table.in$DaysLostDeathAverted) / 365) * econ.in$d.death * disc.daly
)
daly.vax = sum(dis_table.in$Vaccinated * econ.in$d.vax * disc.daly)
net.daly = daly.treat - daly.vax
N.vaccinated = sum(dis_table.in$Vaccinated)
N.tested = sum(dis_table.in$Tested)
if(N.tested <= 0){
threshold.cost = (cost.treatment + net.daly * daly.threshold.in ) / N.vaccinated
return(list(threshold.cost = threshold.cost, net.treat.cost = cost.treatment / N.vaccinated,
net.daly.factor = net.daly / N.vaccinated, death.daly.factor = daly.death/N.vaccinated))
}else{
threshold.cost = (cost.treatment + net.daly * daly.threshold.in ) / N.vaccinated
return(list(threshold.cost = threshold.cost, net.treat.cost = cost.treatment,
net.daly.factor = net.daly, death.daly.factor = daly.death))
}
}
flip.matrix = function(x.in){
return(t(x.in[nrow(x.in):1,]))
}
image.fxn = function(x.in, ...){
image(t(x.in), xaxt = 'n', yaxt = 'n', ...)
}
#=============================================================================#
# Read data and set economic variables ---------------
#=============================================================================#
summary_list = readRDS('../data/output/20190218_output/summary_routine_vaccination_life_exp_All_sweep_files_test.RDS')
threshold_cost_train_table_test = data.frame(
Specificity = rep(0,length(summary_list)),
Sensitivity = 0, SP9 = 0, net.daly.factor = 0,
net.treat.cost = 0,threshold.cost = 0, Prop.vax = 0, Tested = 0)
for(ff in 1:length(summary_list)){
summary_tmp = summary_list[[ff]] %>% filter(Year <= 100)
threshold_cost_train_table_test$Specificity[ff] = summary_tmp$Specificity[1]
threshold_cost_train_table_test$Sensitivity[ff] = summary_tmp$Sensitivity[1]
threshold_cost_train_table_test$SP9[ff] = summary_tmp$SP9Prevax[1]
threshold_cost_train_table_test$PE9[ff] = summary_tmp$PE9[1]
threshold_cost_train_table_test$Prop.vax[ff] = sum(summary_tmp$Vaccinated) / sum(summary_tmp$Tested)
threshold_cost_train_table_test$Tested[ff] = sum(summary_tmp$Tested)
threshold.cost.test = calculate_costs_public_health(
summary_tmp, econ.in = econ, p.death.in = p.death,
disc.rate.in = discounting.rate, disc.daly.in = discounting.daly.rate,
daly.threshold.in = econ$gdp)
threshold_cost_train_table_test$net.treat.cost[ff] = threshold.cost.test$net.treat.cost
threshold_cost_train_table_test$net.daly.factor[ff] = threshold.cost.test$net.daly.factor
threshold_cost_train_table_test$death.daly.factor[ff] = threshold.cost.test$death.daly.factor
threshold_cost_train_table_test$threshold.cost[ff] = threshold.cost.test$threshold.cost
if(ff%%100 == 0){cat("\r:",ff)}
}
#=============================================================================#
# Model fitting ---------
#=============================================================================#
m = "gam"
if(m == "randomforest"){
model_cost_treat_test = randomForest(net.treat.cost ~ (
Specificity + Sensitivity + SP9)^3,
data = threshold_cost_train_table_test,
ntree = 1000, importance = T)
model_daly_test = randomForest(net.daly.factor ~ (
Specificity + Sensitivity + SP9)^3,
data = threshold_cost_train_table_test,
ntree = 1000, importance = T)
model_propvax_test = randomForest(Prop.vax ~ (
Specificity + Sensitivity + SP9)^3,
data = threshold_cost_train_table_test,
ntree = 1000, importance = T)
model_death = randomForest(death.daly.factor ~ (
Specificity + Sensitivity + SP9)^3,
data = threshold_cost_train_table_test,
ntree = 1000, importance = T)
} else if (m == "gam"){
model_cost_treat_test = gam(net.treat.cost ~ s(
Specificity,Sensitivity,SP9, bs = "gp"),
data = threshold_cost_train_table_test,
family = "gaussian")
model_propvax_test = gam(Prop.vax ~ s(
Specificity,Sensitivity,SP9, bs = "gp"),
data = threshold_cost_train_table_test,
family = "gaussian")
model_daly_test = gam(net.daly.factor ~ s(
Specificity, Sensitivity, SP9, bs = "gp"),
data = threshold_cost_train_table_test,
family = "gaussian"
)
model_death = gam(death.daly.factor ~ s(
Specificity, Sensitivity, SP9, bs = "gp"),
data = threshold_cost_train_table_test,
family = "gaussian"
)
}
N.tested = mean(threshold_cost_train_table_test$Tested)
#=============================================================================#
# Plot heatmaps of Threshold cost of the test when vaccine is $69 v2.0---------
#=============================================================================#
sensitivity_array = seq(from = 0, by = 0.02, to = 1.0)
specificity_array = seq(from = 0, by = 0.02, to = 1.0)
cost_effective_matrix_test_screening = matrix(
0,nrow = length(sensitivity_array),
ncol = length(specificity_array))
n.breaks = 2
vax.cost.default = 70
gdp.vec = c(1,3)
daly.thresholds = econ$gdp*gdp.vec
lb = -1;ub = 1
myColBar = c("#E0E0E0FF", "#00FF00FF")
myColBreaks = seq(lb,ub,by = (ub - lb)/n.breaks)
myColBreaksTicks = c(-1,0,1)
myColStrip = as.matrix((myColBreaks + diff(myColBreaks)[1] / 2)[-length(myColBreaks)])
if(save.fig == T){
jpeg(sprintf('../figures/supplement_figure_S18_%s_cost_test_%d_vax_%d_%s_30y.jpeg',
perspective,econ$c.test,econ$c.vax,country),
width=6,height=2.5,units='in',res=400)
}
layout(
matrix(1:10,2,5,byrow = T),
widths = rep(2,5),heights = c(2,2)
)
par(mar = c(0.5,0.2,0.1,0.5), oma = c(2.5,2.5,1,1))
sensitivity.specificity.grid = expand.grid(Specificity = specificity_array,
Sensitivity = sensitivity_array)
for(dd in 1:length(daly.thresholds)){
daly.threshold.tmp = daly.thresholds[dd]
print(daly.thresholds[dd])
for(SP9_tmp in c(0.1,0.3,0.5,0.7,0.9)){
print(SP9_tmp)
test_data = sensitivity.specificity.grid
test_data$SP9 = SP9_tmp
prop.vax = SP9_tmp * sensitivity.specificity.grid$Sensitivity + (1- sensitivity.specificity.grid$Specificity)*(1-SP9_tmp)
prediction.grid = (predict(model_cost_treat_test,test_data,predict.all = F) +
predict(model_daly_test, test_data,predict.all = F) * daly.threshold.tmp +
predict(model_death, test_data,predict.all = F) * daly.threshold.tmp -
econ$c.vax * (N.tested*prop.vax) -
(econ$c.test*N.tested)
) / ( N.tested * prop.vax)
k = 1
for(sen in 1:length(sensitivity_array)){
for(sp in 1:length(specificity_array)){
cost_effective_matrix_test_screening[sen,sp] = prediction.grid[k]
k = k + 1
}
}
print(max(cost_effective_matrix_test_screening))
#correct bounds
cost_matrix = cost_effective_matrix_test_screening
cost_matrix[cost_matrix < lb] = lb
cost_matrix[cost_matrix > ub] = ub
image(
specificity_array,sensitivity_array, t(cost_matrix),
col = myColBar,
breaks = seq(lb,ub,by = (ub - lb)/n.breaks),axes = F,xlim = c(0,1),ylim = c(0,1)
)
breaks.array = c(-1,0,1)
contour(
x=specificity_array,
y=sensitivity_array,
z=t(cost_matrix),
level=breaks.array,
lwd= c(rep(0.0,length(which(breaks.array <0))), 1, rep(0.0,length(which(breaks.array > 0)))),
c(rep(0.1,n.breaks/2),2,rep(0.1,n.breaks/2)),
add=T,drawlabels=F
)
if(SP9_tmp == 0.9){
mtext(text = sprintf("%.0fx GDP",gdp.vec[dd]), side = 4, line = 0, cex = 0.6)
}
if(dd == 1){
sp9str = sprintf("%.1f",SP9_tmp)
mtext(text = bquote(PE[9] ~ " = " ~ .(sp9str)), side = 3, line = 0,cex = 0.7)
}
if(SP9_tmp == 0.1){
axis(2,labels = rep("",6), at = seq(from=0,to=1.0,by=0.2),cex.axis = 0.8,tck = -0.03)
axis(2,at = seq(from=0,to=0.8,by=0.2),cex.axis = 0.7,line = -0.7,lwd = 0)
}else{
axis(2,labels = F,tick = F)
}
box()
if(dd == 2){
axis(1,labels = rep("",6), at = seq(from=0,to=1.0,by=0.2),cex.axis = 0.8,tck = -0.03)
axis(1,at = seq(from=0,to=0.8,by=0.2),cex.axis = 0.7,line = -0.7,lwd = 0)
# axis(1,labels = T,cex.axis = 0.8)
}else{
axis(1,labels = F,tick = F)
}
}
}
mtext(text = "Specificity", side = 1, line = 1, cex = 0.8,outer = T)
mtext(text = "Sensitivity", side = 2, line = 1, cex = 0.8,outer = T)
if(save.fig == T){dev.off()}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nnmf.R
\name{nnmf}
\alias{nnmf}
\title{Fast non-negative matrix factorization by ANLS with MSE loss}
\usage{
nnmf(
A,
k = NULL,
max.iter = 1000,
rel.tol = 0.001,
n.threads = 0,
verbose = TRUE,
trace = 5
)
}
\arguments{
\item{A}{A matrix to be factorized. If sparse, will be converted to dense}
\item{k}{Decomposition rank, integer (required)}
\item{max.iter}{Maximum number of alternating NNLS solutions for H and W, integer (default 1000)}
\item{rel.tol}{Stop criterion, defined as the relative tolerance between two successive iterations: |e2-e1|/avg(e1,e2). (default 1e-3)}
\item{n.threads}{Number of threads/CPUs to use. Default to 0 (all cores).}
\item{verbose}{boolean, give updates every trace iterations}
\item{trace}{An integer specifying a multiple of iterations at which MSE error should be calculated and checked for convergence. To check error every iteration, specify 1. To avoid checking error at all, specify trace > max.iter (default is 5, and is generally an efficient and effective value)}
}
\value{
A list of W and H matrices
}
\description{
Fast non-negative matrix factorization by alternating least squares with sequential coordinate descent against mean squared error loss.
}
|
/man/nnmf.Rd
|
no_license
|
ttriche/scNMF
|
R
| false
| true
| 1,298
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nnmf.R
\name{nnmf}
\alias{nnmf}
\title{Fast non-negative matrix factorization by ANLS with MSE loss}
\usage{
nnmf(
A,
k = NULL,
max.iter = 1000,
rel.tol = 0.001,
n.threads = 0,
verbose = TRUE,
trace = 5
)
}
\arguments{
\item{A}{A matrix to be factorized. If sparse, will be converted to dense}
\item{k}{Decomposition rank, integer (required)}
\item{max.iter}{Maximum number of alternating NNLS solutions for H and W, integer (default 1000)}
\item{rel.tol}{Stop criterion, defined as the relative tolerance between two successive iterations: |e2-e1|/avg(e1,e2). (default 1e-3)}
\item{n.threads}{Number of threads/CPUs to use. Default to 0 (all cores).}
\item{verbose}{boolean, give updates every trace iterations}
\item{trace}{An integer specifying a multiple of iterations at which MSE error should be calculated and checked for convergence. To check error every iteration, specify 1. To avoid checking error at all, specify trace > max.iter (default is 5, and is generally an efficient and effective value)}
}
\value{
A list of W and H matrices
}
\description{
Fast non-negative matrix factorization by alternating least squares with sequential coordinate descent against mean squared error loss.
}
|
# Factors
## Run the following lines and explore the outputs.
## The data set we want to convert into factor
data <- rep(c(1, 2, 3), 4)
data
## Convert data into factors
fdata <- factor(data)
fdata
## We want to display values as Roman numerals
roman_fdata <- factor(data, labels=c("I", "II", "III"))
roman_fdata
## To convert the default factor fdata to roman numerals, we use the assignment form of the levels function
levels(fdata) <- c("I", "II", "III")
fdata
#-------------
## Changing and reordering factors
test.results <- rep(c("+", "-"), times=c(150, 30))
test.results
ftest.results <- factor(test.results)
ftest.results
levels(ftest.results)
## Change the lavel names
levels(ftest.results) <- c("Negative", "Positive")
ftest.results
levels(ftest.results)
## plot barchart using qplot
library(ggplot2)
qplot(ftest.results, geom = "bar")
## Change x-axis label
qplot(ftest.results, geom = "bar") + xlab("test outcome")
## Adde y-axis label
qplot(ftest.results, geom = "bar") + xlab("test outcome") + ylab("Count")
## Add a title
qplot(ftest.results, geom = "bar") + xlab("test outcome") + ylab("Count") + ggtitle("PCR results")
#------------
## Factors can take only predefined values
letter.vec <- factor(LETTERS[1:6],
levels = LETTERS[1:4])
letter.vec
levels(letter.vec)
#-------------
## load iris dataset
data(iris)
class(iris)
class(iris$Sepal.Width)
## Distribution of Sepal.Width by Species
qplot(x = Species, y = Sepal.Width, data = iris, geom = "boxplot", fill = Species)
## reorder the boxplots according to the accending order
## First convert Species into a factor
iris$Species <- factor(iris$Species)
## Next change the order of levels
iris$Species <- factor(iris$Species, levels=c("setosa", "versicolor", "virginica"))
qplot(x = Species, y = Sepal.Width, data = iris, geom = "boxplot", fill = Species)
## If you want to change the labels (observe the difference with the previous command
## iris$Species <- factor(iris$Species, levels=c("setosa", "versicolor", "virginica")))
iris$Species <- factor(iris$Species, labels=c("SE", "VE", "VI")) # We change labels not the order (earlier we change the order not the label names)
#-------------
# Now let's work with diamonds dataset in ggplot2
## load data
data("diamonds")
## class type
class(diamonds)
## First 10 rows of the variable
head(diamonds)
## To read the description file of the diamonds dataset
?diamonds
## plot cut variable. ## cut variable represents the quality of the cut as
## Fair, Good, Very Good, Premium, Ideal.
table(diamonds$cut)
qplot(diamonds$cut, geom = "bar")
## Observe in the above output defaul order
## is not alphabetical. Why?
class(diamonds$cut)
# The class type is both ordered and factor. Hence, you
## do not need to manually set the orders.
|
/public/problems/factor_lab.R
|
no_license
|
statisticsmart/Rprogramming
|
R
| false
| false
| 2,779
|
r
|
# Factors
## Run the following lines and explore the outputs.
## The data set we want to convert into factor
data <- rep(c(1, 2, 3), 4)
data
## Convert data into factors
fdata <- factor(data)
fdata
## We want to display values as Roman numerals
roman_fdata <- factor(data, labels=c("I", "II", "III"))
roman_fdata
## To convert the default factor fdata to roman numerals, we use the assignment form of the levels function
levels(fdata) <- c("I", "II", "III")
fdata
#-------------
## Changing and reordering factors
test.results <- rep(c("+", "-"), times=c(150, 30))
test.results
ftest.results <- factor(test.results)
ftest.results
levels(ftest.results)
## Change the lavel names
levels(ftest.results) <- c("Negative", "Positive")
ftest.results
levels(ftest.results)
## plot barchart using qplot
library(ggplot2)
qplot(ftest.results, geom = "bar")
## Change x-axis label
qplot(ftest.results, geom = "bar") + xlab("test outcome")
## Adde y-axis label
qplot(ftest.results, geom = "bar") + xlab("test outcome") + ylab("Count")
## Add a title
qplot(ftest.results, geom = "bar") + xlab("test outcome") + ylab("Count") + ggtitle("PCR results")
#------------
## Factors can take only predefined values
letter.vec <- factor(LETTERS[1:6],
levels = LETTERS[1:4])
letter.vec
levels(letter.vec)
#-------------
## load iris dataset
data(iris)
class(iris)
class(iris$Sepal.Width)
## Distribution of Sepal.Width by Species
qplot(x = Species, y = Sepal.Width, data = iris, geom = "boxplot", fill = Species)
## reorder the boxplots according to the accending order
## First convert Species into a factor
iris$Species <- factor(iris$Species)
## Next change the order of levels
iris$Species <- factor(iris$Species, levels=c("setosa", "versicolor", "virginica"))
qplot(x = Species, y = Sepal.Width, data = iris, geom = "boxplot", fill = Species)
## If you want to change the labels (observe the difference with the previous command
## iris$Species <- factor(iris$Species, levels=c("setosa", "versicolor", "virginica")))
iris$Species <- factor(iris$Species, labels=c("SE", "VE", "VI")) # We change labels not the order (earlier we change the order not the label names)
#-------------
# Now let's work with diamonds dataset in ggplot2
## load data
data("diamonds")
## class type
class(diamonds)
## First 10 rows of the variable
head(diamonds)
## To read the description file of the diamonds dataset
?diamonds
## plot cut variable. ## cut variable represents the quality of the cut as
## Fair, Good, Very Good, Premium, Ideal.
table(diamonds$cut)
qplot(diamonds$cut, geom = "bar")
## Observe in the above output defaul order
## is not alphabetical. Why?
class(diamonds$cut)
# The class type is both ordered and factor. Hence, you
## do not need to manually set the orders.
|
\name{structure.diagram}
\alias{structure.diagram}
\alias{structure.graph}
\alias{structure.sem}
\alias{lavaan.diagram}
\alias{sem.diagram}
\alias{sem.graph}
\title{Draw a structural equation model specified by two measurement models and a structural model}
\description{Graphic presentations of structural equation models are a very useful way to conceptualize sem and confirmatory factor models. Given a measurement model on x (xmodel) and on y (ymodel) as well as a path model connecting x and y (phi), draw the graph. If the ymodel is not specified, just draw the measurement model (xmodel + phi). If the Rx or Ry matrices are specified, show the correlations between the x variables, or y variables.
Perhaps even more usefully, the function returns a model appropriate for running directly in the \emph{sem package} written by John Fox or the \emph{lavaan} package by Yves Rosseel. For this option to work directly, it is necessary to specfy that errrors=TRUE.
Input can be specified as matrices or the output from \code{\link{fa}}, factanal, or a rotation package such as \emph{GPArotation}.
For symbolic graphs, the input matrices can be character strings or mixtures of character strings and numeric vectors.
As an option, for those without Rgraphviz installed, \code{\link{structure.sem}} will just create the sem model and skip the graph. (This functionality is now included in \code{\link{structure.diagram}}.)
structure.diagram will draw the diagram without using Rgraphviz and is probably the preferred option. structure.graph will be removed eventually.
\code{\link{lavaan.diagram}} will draw either cfa or sem results from the lavaan package. It has been tested for cfa, sem and mimic type output. It takes the output object from \emph{lavaan} and then calls \code{\link{structure.diagram}}.
}
\usage{
structure.diagram(fx, Phi=NULL,fy=NULL,labels=NULL,cut=.3,errors=FALSE,simple=TRUE,
regression=FALSE,lr=TRUE,Rx=NULL,Ry=NULL,digits=1,e.size=.1,
main="Structural model", ...)
structure.graph(fx, Phi = NULL,fy = NULL, out.file = NULL, labels = NULL, cut = 0.3,
errors=TRUE, simple=TRUE,regression=FALSE, size = c(8, 6),
node.font = c("Helvetica", 14), edge.font = c("Helvetica", 10),
rank.direction = c("RL", "TB", "LR", "BT"), digits = 1,
title = "Structural model", ...)
structure.sem(fx, Phi = NULL, fy = NULL,out.file = NULL, labels = NULL,
cut = 0.3, errors=TRUE, simple=TRUE,regression=FALSE)
lavaan.diagram(fit,main,e.size=.1,...)
sem.diagram(fit,main="A SEM from the sem package",...)
sem.graph(fit,out.file=NULL,main= "A SEM from the sem package",...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{fx}{a factor model on the x variables. }
\item{Phi}{A matrix of directed relationships. Lower diagonal values are drawn. If the upper diagonal values match the lower diagonal, two headed arrows are drawn. For a single, directed path, just the value may be specified. }
\item{fy}{a factor model on the y variables (can be empty) }
\item{Rx}{The correlation matrix among the x variables}
\item{Ry}{The correlation matrix among the y variables}
\item{out.file}{name a file to send dot language instructions. }
\item{labels}{variable labels if not specified as colnames for the matrices}
\item{cut}{Draw paths for values > cut }
\item{fit}{The output from a lavaan cfa or sem}
\item{errors}{draw an error term for observerd variables }
\item{simple}{Just draw one path per x or y variable }
\item{regression}{Draw a regression diagram (observed variables cause Y)}
\item{lr}{Direction of diagram is from left to right (lr=TRUE, default) or from bottom to top (lr=FALSE) }
\item{e.size}{size of the ellipses in structure.diagram}
\item{main}{main title of diagram}
\item{size}{page size of graphic }
\item{node.font}{ font type for graph }
\item{edge.font}{font type for graph }
\item{rank.direction}{ Which direction should the graph be oriented }
\item{digits}{Number of digits to draw}
\item{title}{ Title of graphic }
\item{\dots}{ other options to pass to Rgraphviz }
}
\details{
The recommended function is structure.diagram which does not use Rgraphviz but which does not produce dot code either.
All three structure function return a matrix of commands suitable for using in the sem or lavaan packages. (Specify errors=TRUE to get code that will run directly in the sem package.)
The structure.graph output can be directed to an output file for post processing using the dot graphic language but requires that Rgraphviz is installed.
lavaan.diagram will create sem, cfa, or mimic diagrams depending upon the lavaan input.
sem.diagram and sem.graph convert the output from a simple CFA done with the sem package and draw them using structure.diagram or structure.graph.
lavaan.diagram converts the output (fit) from a simple CFA done with the lavaan package and draws them using structure.diagram.
The figure is organized to show the appropriate paths between:
The correlations between the X variables (if Rx is specified) \cr
The X variables and their latent factors (if fx is specified) \cr
The latent X and the latent Y (if Phi is specified) \cr
The latent Y and the observed Y (if fy is specified) \cr
The correlations between the Y variables (if Ry is specified)\cr
A confirmatory factor model would specify just fx and Phi, a structural model would include fx, Phi, and fy. The raw correlations could be shown by just including Rx and Ry.
\code{\link{lavaan.diagram}} may be called from the \code{\link{diagram}} function which also will call \code{\link{fa.diagram}}, \code{\link{omega.diagram}} or \code{\link{iclust.diagram}}, depending upon the class of the fit.
Other diagram functions include \code{\link{fa.diagram}}, \code{\link{omega.diagram}}. All of these functions use the various dia functions such as \code{\link{dia.rect}}, \code{\link{dia.ellipse}}, \code{\link{dia.arrow}}, \code{\link{dia.curve}}, \code{\link{dia.curved.arrow}}, and \code{\link{dia.shape}}.
}
\value{
\item{sem}{(invisible) a model matrix (partially) ready for input to John Fox's sem package. It is of class ``mod" for prettier output. }
\item{lavaan}{(invisible) A model specification for the lavaan package.}
\item{dotfile}{If out.file is specified, a dot language file suitable for using in a dot graphics program such as graphviz or Omnigraffle.}
A graphic structural diagram in the graphics window
}
\author{William Revelle}
\seealso{ \code{\link{fa.graph}}, \code{\link{omega.graph}}, \code{\link{sim.structural}} to create artificial data sets with particular structural properties.}
\examples{
#A set of measurement and structural models
#First set up the various matrices
fx <- matrix(c(.9,.8,.7,rep(0,9), .6,.7,-.8,rep(0,9),.5,.6,.4),ncol=3)
fy <- matrix(c(.9,.8,.6,rep(0,4),.6,.8,-.7),ncol=2)
Phi <- matrix(c(1,.35,0,0,0,
.35,1,.5,0,0,
0,.5, 1,0,0,
.7,-.6, 0, 1,0,
.0, 0, .4,0,1 ),ncol=5,byrow=TRUE)
#now draw a number of models
f1 <- structure.diagram(fx,main = "A measurement model for x")
f2 <- structure.diagram(fx,Phi, main = "A measurement model for x")
f3 <- structure.diagram(fy=fy, main = "A measurement model for y")
f4 <- structure.diagram(fx,Phi,fy,main="A structural path diagram")
f5 <- structure.diagram(fx,Phi,fy,main="A structural path diagram",errors=TRUE)
#a mimic model
fy <- matrix(c(.9,.8,.6,rep(0,4),.6,.8,-.7),ncol=2)
fx <- matrix(c(.6,.5,0,.4),ncol=2)
mimic <- structure.diagram(fx,fy=fy,simple=FALSE,errors=TRUE, main="A mimic diagram")
fy <- matrix(c(rep(.9,8),rep(0,16),rep(.8,8)),ncol=2)
structure.diagram(fx,fy=fy)
#symbolic input
X2 <- matrix(c("a",0,0,"b","e1",0,0,"e2"),ncol=4)
colnames(X2) <- c("X1","X2","E1","E2")
phi2 <- diag(1,4,4)
phi2[2,1] <- phi2[1,2] <- "r"
f2 <- structure.diagram(X2,Phi=phi2,errors=FALSE,main="A symbolic model")
#symbolic input with error
X2 <- matrix(c("a",0,0,"b"),ncol=2)
colnames(X2) <- c("X1","X2")
phi2 <- diag(1,2,2)
phi2[2,1] <- phi2[1,2] <- "r"
f3 <- structure.diagram(X2,Phi=phi2,main="an alternative representation",e.size=.4)
#and yet another one
X6 <- matrix(c("a","b","c",rep(0,6),"d","e","f"),nrow=6)
colnames(X6) <- c("L1","L2")
rownames(X6) <- c("x1","x2","x3","x4","x5","x6")
Y3 <- matrix(c("u","w","z"),ncol=1)
colnames(Y3) <- "Y"
rownames(Y3) <- c("y1","y2","y3")
phi21 <- matrix(c(1,0,"r1",0,1,"r2",0,0,1),ncol=3)
colnames(phi21) <- rownames(phi21) <- c("L1","L2","Y")
f4 <- structure.diagram(X6,phi21,Y3)
###the following example is not run but is included to show how to work with lavaan
\donttest{
library(lavaan)
mod.1 <- 'A =~ A1 + A2 + A3 + A4 + A5
C =~ C1 + C2 + C3 + C4 + C5
E =~ E1 +E2 + E3 + E4 +E5'
fit.1 <- sem(mod.1,psychTools::bfi[complete.cases(psychTools::bfi),],std.lv=TRUE)
lavaan.diagram(fit.1)
#compare with
f3 <- fa(psychTools::bfi[complete.cases(psychTools::bfi),1:15],3)
fa.diagram(f3)
mod.3 <- 'A =~ A1 + A2 + A3 + A4 + A5
C =~ C1 + C2 + C3 + C4 + C5
E =~ E1 +E2 + E3 + E4 +E5
A ~ age + gender
C ~ age + gender
E ~ age + gender'
fit.3 <- sem(mod.3,psychTools::bfi[complete.cases(psychTools::bfi),],std.lv=TRUE)
lavaan.diagram(fit.3, cut=0,simple=FALSE,main="mimic model")
}
# and finally, a regression model
X7 <- matrix(c("a","b","c","d","e","f"),nrow=6)
f5 <- structure.diagram(X7,regression=TRUE,main = "Regression model")
#and a really messy regession model
x8 <- c("b1","b2","b3")
r8 <- matrix(c(1,"r12","r13","r12",1,"r23","r13","r23",1),ncol=3)
f6<- structure.diagram(x8,Phi=r8,regression=TRUE,main="Regression model")
}
\keyword{multivariate }
\keyword{hplot }
|
/man/structure.diagram.Rd
|
no_license
|
cran/psych
|
R
| false
| false
| 9,740
|
rd
|
\name{structure.diagram}
\alias{structure.diagram}
\alias{structure.graph}
\alias{structure.sem}
\alias{lavaan.diagram}
\alias{sem.diagram}
\alias{sem.graph}
\title{Draw a structural equation model specified by two measurement models and a structural model}
\description{Graphic presentations of structural equation models are a very useful way to conceptualize sem and confirmatory factor models. Given a measurement model on x (xmodel) and on y (ymodel) as well as a path model connecting x and y (phi), draw the graph. If the ymodel is not specified, just draw the measurement model (xmodel + phi). If the Rx or Ry matrices are specified, show the correlations between the x variables, or y variables.
Perhaps even more usefully, the function returns a model appropriate for running directly in the \emph{sem package} written by John Fox or the \emph{lavaan} package by Yves Rosseel. For this option to work directly, it is necessary to specfy that errrors=TRUE.
Input can be specified as matrices or the output from \code{\link{fa}}, factanal, or a rotation package such as \emph{GPArotation}.
For symbolic graphs, the input matrices can be character strings or mixtures of character strings and numeric vectors.
As an option, for those without Rgraphviz installed, \code{\link{structure.sem}} will just create the sem model and skip the graph. (This functionality is now included in \code{\link{structure.diagram}}.)
structure.diagram will draw the diagram without using Rgraphviz and is probably the preferred option. structure.graph will be removed eventually.
\code{\link{lavaan.diagram}} will draw either cfa or sem results from the lavaan package. It has been tested for cfa, sem and mimic type output. It takes the output object from \emph{lavaan} and then calls \code{\link{structure.diagram}}.
}
\usage{
structure.diagram(fx, Phi=NULL,fy=NULL,labels=NULL,cut=.3,errors=FALSE,simple=TRUE,
regression=FALSE,lr=TRUE,Rx=NULL,Ry=NULL,digits=1,e.size=.1,
main="Structural model", ...)
structure.graph(fx, Phi = NULL,fy = NULL, out.file = NULL, labels = NULL, cut = 0.3,
errors=TRUE, simple=TRUE,regression=FALSE, size = c(8, 6),
node.font = c("Helvetica", 14), edge.font = c("Helvetica", 10),
rank.direction = c("RL", "TB", "LR", "BT"), digits = 1,
title = "Structural model", ...)
structure.sem(fx, Phi = NULL, fy = NULL,out.file = NULL, labels = NULL,
cut = 0.3, errors=TRUE, simple=TRUE,regression=FALSE)
lavaan.diagram(fit,main,e.size=.1,...)
sem.diagram(fit,main="A SEM from the sem package",...)
sem.graph(fit,out.file=NULL,main= "A SEM from the sem package",...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{fx}{a factor model on the x variables. }
\item{Phi}{A matrix of directed relationships. Lower diagonal values are drawn. If the upper diagonal values match the lower diagonal, two headed arrows are drawn. For a single, directed path, just the value may be specified. }
\item{fy}{a factor model on the y variables (can be empty) }
\item{Rx}{The correlation matrix among the x variables}
\item{Ry}{The correlation matrix among the y variables}
\item{out.file}{name a file to send dot language instructions. }
\item{labels}{variable labels if not specified as colnames for the matrices}
\item{cut}{Draw paths for values > cut }
\item{fit}{The output from a lavaan cfa or sem}
\item{errors}{draw an error term for observerd variables }
\item{simple}{Just draw one path per x or y variable }
\item{regression}{Draw a regression diagram (observed variables cause Y)}
\item{lr}{Direction of diagram is from left to right (lr=TRUE, default) or from bottom to top (lr=FALSE) }
\item{e.size}{size of the ellipses in structure.diagram}
\item{main}{main title of diagram}
\item{size}{page size of graphic }
\item{node.font}{ font type for graph }
\item{edge.font}{font type for graph }
\item{rank.direction}{ Which direction should the graph be oriented }
\item{digits}{Number of digits to draw}
\item{title}{ Title of graphic }
\item{\dots}{ other options to pass to Rgraphviz }
}
\details{
The recommended function is structure.diagram which does not use Rgraphviz but which does not produce dot code either.
All three structure function return a matrix of commands suitable for using in the sem or lavaan packages. (Specify errors=TRUE to get code that will run directly in the sem package.)
The structure.graph output can be directed to an output file for post processing using the dot graphic language but requires that Rgraphviz is installed.
lavaan.diagram will create sem, cfa, or mimic diagrams depending upon the lavaan input.
sem.diagram and sem.graph convert the output from a simple CFA done with the sem package and draw them using structure.diagram or structure.graph.
lavaan.diagram converts the output (fit) from a simple CFA done with the lavaan package and draws them using structure.diagram.
The figure is organized to show the appropriate paths between:
The correlations between the X variables (if Rx is specified) \cr
The X variables and their latent factors (if fx is specified) \cr
The latent X and the latent Y (if Phi is specified) \cr
The latent Y and the observed Y (if fy is specified) \cr
The correlations between the Y variables (if Ry is specified)\cr
A confirmatory factor model would specify just fx and Phi, a structural model would include fx, Phi, and fy. The raw correlations could be shown by just including Rx and Ry.
\code{\link{lavaan.diagram}} may be called from the \code{\link{diagram}} function which also will call \code{\link{fa.diagram}}, \code{\link{omega.diagram}} or \code{\link{iclust.diagram}}, depending upon the class of the fit.
Other diagram functions include \code{\link{fa.diagram}}, \code{\link{omega.diagram}}. All of these functions use the various dia functions such as \code{\link{dia.rect}}, \code{\link{dia.ellipse}}, \code{\link{dia.arrow}}, \code{\link{dia.curve}}, \code{\link{dia.curved.arrow}}, and \code{\link{dia.shape}}.
}
\value{
\item{sem}{(invisible) a model matrix (partially) ready for input to John Fox's sem package. It is of class ``mod" for prettier output. }
\item{lavaan}{(invisible) A model specification for the lavaan package.}
\item{dotfile}{If out.file is specified, a dot language file suitable for using in a dot graphics program such as graphviz or Omnigraffle.}
A graphic structural diagram in the graphics window
}
\author{William Revelle}
\seealso{ \code{\link{fa.graph}}, \code{\link{omega.graph}}, \code{\link{sim.structural}} to create artificial data sets with particular structural properties.}
\examples{
#A set of measurement and structural models
#First set up the various matrices
fx <- matrix(c(.9,.8,.7,rep(0,9), .6,.7,-.8,rep(0,9),.5,.6,.4),ncol=3)
fy <- matrix(c(.9,.8,.6,rep(0,4),.6,.8,-.7),ncol=2)
Phi <- matrix(c(1,.35,0,0,0,
.35,1,.5,0,0,
0,.5, 1,0,0,
.7,-.6, 0, 1,0,
.0, 0, .4,0,1 ),ncol=5,byrow=TRUE)
#now draw a number of models
f1 <- structure.diagram(fx,main = "A measurement model for x")
f2 <- structure.diagram(fx,Phi, main = "A measurement model for x")
f3 <- structure.diagram(fy=fy, main = "A measurement model for y")
f4 <- structure.diagram(fx,Phi,fy,main="A structural path diagram")
f5 <- structure.diagram(fx,Phi,fy,main="A structural path diagram",errors=TRUE)
#a mimic model
fy <- matrix(c(.9,.8,.6,rep(0,4),.6,.8,-.7),ncol=2)
fx <- matrix(c(.6,.5,0,.4),ncol=2)
mimic <- structure.diagram(fx,fy=fy,simple=FALSE,errors=TRUE, main="A mimic diagram")
fy <- matrix(c(rep(.9,8),rep(0,16),rep(.8,8)),ncol=2)
structure.diagram(fx,fy=fy)
#symbolic input
X2 <- matrix(c("a",0,0,"b","e1",0,0,"e2"),ncol=4)
colnames(X2) <- c("X1","X2","E1","E2")
phi2 <- diag(1,4,4)
phi2[2,1] <- phi2[1,2] <- "r"
f2 <- structure.diagram(X2,Phi=phi2,errors=FALSE,main="A symbolic model")
#symbolic input with error
X2 <- matrix(c("a",0,0,"b"),ncol=2)
colnames(X2) <- c("X1","X2")
phi2 <- diag(1,2,2)
phi2[2,1] <- phi2[1,2] <- "r"
f3 <- structure.diagram(X2,Phi=phi2,main="an alternative representation",e.size=.4)
#and yet another one
X6 <- matrix(c("a","b","c",rep(0,6),"d","e","f"),nrow=6)
colnames(X6) <- c("L1","L2")
rownames(X6) <- c("x1","x2","x3","x4","x5","x6")
Y3 <- matrix(c("u","w","z"),ncol=1)
colnames(Y3) <- "Y"
rownames(Y3) <- c("y1","y2","y3")
phi21 <- matrix(c(1,0,"r1",0,1,"r2",0,0,1),ncol=3)
colnames(phi21) <- rownames(phi21) <- c("L1","L2","Y")
f4 <- structure.diagram(X6,phi21,Y3)
###the following example is not run but is included to show how to work with lavaan
\donttest{
library(lavaan)
mod.1 <- 'A =~ A1 + A2 + A3 + A4 + A5
C =~ C1 + C2 + C3 + C4 + C5
E =~ E1 +E2 + E3 + E4 +E5'
fit.1 <- sem(mod.1,psychTools::bfi[complete.cases(psychTools::bfi),],std.lv=TRUE)
lavaan.diagram(fit.1)
#compare with
f3 <- fa(psychTools::bfi[complete.cases(psychTools::bfi),1:15],3)
fa.diagram(f3)
mod.3 <- 'A =~ A1 + A2 + A3 + A4 + A5
C =~ C1 + C2 + C3 + C4 + C5
E =~ E1 +E2 + E3 + E4 +E5
A ~ age + gender
C ~ age + gender
E ~ age + gender'
fit.3 <- sem(mod.3,psychTools::bfi[complete.cases(psychTools::bfi),],std.lv=TRUE)
lavaan.diagram(fit.3, cut=0,simple=FALSE,main="mimic model")
}
# and finally, a regression model
X7 <- matrix(c("a","b","c","d","e","f"),nrow=6)
f5 <- structure.diagram(X7,regression=TRUE,main = "Regression model")
#and a really messy regession model
x8 <- c("b1","b2","b3")
r8 <- matrix(c(1,"r12","r13","r12",1,"r23","r13","r23",1),ncol=3)
f6<- structure.diagram(x8,Phi=r8,regression=TRUE,main="Regression model")
}
\keyword{multivariate }
\keyword{hplot }
|
#written June 6, 2012
#modified March 14, 2023
#note that lower.tri and upper.tri return the matrix in a different order
"lowerUpper" <-
function(lower,upper=NULL,diff=FALSE) {
if(is.null(upper)) {upper <- lower #return two from one
upper[lower.tri(upper)] <- t(upper)[lower.tri(t(upper))]
lower <- t(lower)
lower[lower.tri(lower)] <- t(lower)[lower.tri(lower)]
result <- list(lower=lower,upper=upper)
} else {
if(nrow(lower) !=ncol(lower)) {stop("lower matrix must be square")}
if(nrow(upper) !=ncol(upper)) {stop("upper matrix must be square")}
if(nrow(lower) !=ncol(upper)) {stop("lower and upper matrices must have the same dimensions")}
result <- lower
colnames(result) <- colnames(upper)
rownames(result) <-rownames(lower)
if(diff) upper <- lower - upper
result [lower.tri(result)] <- upper[lower.tri(upper)]
result <- t(result)
diag(result) <- NA}
return(result)}
#revised Oct 6, 2013 to pick up row names and column names from the two matrices
|
/R/lowerUpper.R
|
no_license
|
cran/psych
|
R
| false
| false
| 980
|
r
|
#written June 6, 2012
#modified March 14, 2023
#note that lower.tri and upper.tri return the matrix in a different order
"lowerUpper" <-
function(lower,upper=NULL,diff=FALSE) {
if(is.null(upper)) {upper <- lower #return two from one
upper[lower.tri(upper)] <- t(upper)[lower.tri(t(upper))]
lower <- t(lower)
lower[lower.tri(lower)] <- t(lower)[lower.tri(lower)]
result <- list(lower=lower,upper=upper)
} else {
if(nrow(lower) !=ncol(lower)) {stop("lower matrix must be square")}
if(nrow(upper) !=ncol(upper)) {stop("upper matrix must be square")}
if(nrow(lower) !=ncol(upper)) {stop("lower and upper matrices must have the same dimensions")}
result <- lower
colnames(result) <- colnames(upper)
rownames(result) <-rownames(lower)
if(diff) upper <- lower - upper
result [lower.tri(result)] <- upper[lower.tri(upper)]
result <- t(result)
diag(result) <- NA}
return(result)}
#revised Oct 6, 2013 to pick up row names and column names from the two matrices
|
test_that("get_par_list() returns the expected list", {
unknown_pars <- list(list(par_name = "par_beta", min = 0),
list(par_name = "inv_phi", min = 0, type = "meas_par",
par_trans = "inv"))
actual <- get_par_list(unknown_pars)
expected <- list(list(par_name = "par_beta", par_trans = "exp"),
list(par_name = "phi", par_trans = c("exp", "inv")))
expect_equal(actual, expected)
})
test_that("transform_pars() returns the expected text", {
unknown_pars <- list(list(par_name = "par_beta", min = 0),
list(par_name = "inv_phi", min = 0))
actual_text <- transform_pars(unknown_pars)
expected_text <- paste(
" pars[[1]] <- exp(pars[[1]])",
" pars[[2]] <- exp(pars[[2]])", sep = "\n")
expect_equal(actual_text, expected_text)
unknown_pars <- list(list(par_name = "par_beta"),
list(par_name = "inv_phi"))
actual_text <- transform_pars(unknown_pars)
expected_text <- paste(
" pars[[1]] <- pars[[1]]",
" pars[[2]] <- pars[[2]]", sep = "\n")
expect_equal(actual_text, expected_text)
unknown_pars <- list(list(par_name = "par_beta"),
list(par_name = "inv_phi", min = 0))
expected_text <- paste(
" pars[[1]] <- pars[[1]]",
" pars[[2]] <- exp(pars[[2]])", sep = "\n")
actual_text <- transform_pars(unknown_pars)
expect_equal(actual_text, expected_text)
# Expit
unknown_pars <- list(list(par_name = "par_beta", min = 0),
list(par_name = "par_rho", min = 0, max = 1),
list(par_name = "inv_phi", min = 0))
actual_text <- transform_pars(unknown_pars)
expected_text <- paste(
" pars[[1]] <- exp(pars[[1]])",
" pars[[2]] <- expit(pars[[2]])",
" pars[[3]] <- exp(pars[[3]])",
sep = "\n")
expect_equal(actual_text, expected_text)
})
# assign_pars_text ()-------------------------------------------------------------
test_that("assign_pars_text() returns the expected text", {
unk_constants <- list(list(par_name = "par_beta", min = 0),
list(par_name = "par_rho", min = 0, max = 1))
actual_text <- assign_pars_text(unk_constants)
expected_text <- paste(' ds_inputs$consts["par_beta"] <- pars[[1]]',
' ds_inputs$consts["par_rho"] <- pars[[2]]', sep = "\n")
expect_equal(actual_text, expected_text)
})
# get_model_run_text() ---------------------------------------------------------
test_that("get_model_run_text returns the expected text", {
actual_text <- get_model_run_text("rk4")
expected_text <- paste(
" readsdr_env <- list2env(as.list(ds_inputs$consts))",
" ds_inputs$stocks <- purrr::map_dbl(ds_inputs$stocks, function(x) {",
" eval(parse(text = x), envir = readsdr_env)",
" })",
' o <- sd_simulate(ds_inputs, integ_method = "rk4")',
" o_df <- data.frame(o)",
sep = "\n")
expect_equal(actual_text, expected_text)
})
# get_meas_model_text() --------------------------------------------------------
test_that("get_meas_model_text() returns the expected text", {
# As-is measurement
meas_data_mdl <- list(list(formula = "y ~ poisson(C)",
measurements = 1:10))
n_consts <- 1
unknown_pars <- list(list(par_name = "par_beta", min = 0))
actual_text <- get_meas_model_text(meas_data_mdl, n_consts, unknown_pars,
FALSE)
expected_text <- paste(
'sim_data_1 <- dplyr::filter(o_df, time - trunc(time) == 0)',
"loglik_1 <- sum(dpois(data_1, lambda = sim_data_1[, 'C'] + 1e-05, log = TRUE))",
"loglik <- loglik_1",
"loglik", sep = "\n")
expect_equal(actual_text, expected_text)
# Net flow
meas_data_mdl <- list(list(formula = "y ~ neg_binomial_2(net_flow(C), phi)",
measurements = 1:10))
n_consts <- 1
unknown_pars <- list(list(par_name = "par_beta", min = 0),
list(par_name = "inv_phi", min = 0, par_trans = "inv"))
actual_text <- get_meas_model_text(meas_data_mdl, n_consts, unknown_pars,
FALSE)
expected_text <- paste(
'sim_data_1 <- sd_net_change(o_df, "C")',
"loglik_1 <- sum(dnbinom(data_1, mu = sim_data_1[, 'value'] + 1e-05, size = 1/pars[[2]], log = TRUE))",
"loglik <- loglik_1",
"loglik",
sep = "\n")
expect_equal(actual_text, expected_text)
# Reflect log-lik
actual_text <- get_meas_model_text(meas_data_mdl, n_consts, unknown_pars, TRUE)
expected_text <- paste(
'sim_data_1 <- sd_net_change(o_df, "C")',
"loglik_1 <- sum(dnbinom(data_1, mu = sim_data_1[, 'value'] + 1e-05, size = 1/pars[[2]], log = TRUE))",
"loglik <- loglik_1",
"-loglik",
sep = "\n")
})
test_that("get_meas_model_text() handles a known par in the measurement model", {
meas_data_mdl <- list(list(formula = "y ~ neg_binomial_2(net_flow(C), 10)",
measurements = 1:10))
n_consts <- 1
unknown_pars <- list(list(par_name = "par_beta", min = 0))
actual_text <- get_meas_model_text(meas_data_mdl, n_consts, unknown_pars,
FALSE)
expected_text <- paste(
'sim_data_1 <- sd_net_change(o_df, "C")',
"loglik_1 <- sum(dnbinom(data_1, mu = sim_data_1[, 'value'] + 1e-05, size = 10, log = TRUE))",
"loglik <- loglik_1",
"loglik",
sep = "\n")
expect_equal(actual_text, expected_text)
})
test_that("get_meas_model_text() handles fixed pars", {
meas_data_mdl <- list(list(formula = "y ~ neg_binomial_2(net_flow(C), phi)",
measurements = 1:10))
n_consts <- 1
unknown_pars <- list(list(par_name = "par_beta", min = 0))
supplied_pars <- "phi"
actual_text <- get_meas_model_text(meas_data_mdl, n_consts, unknown_pars,
FALSE, supplied_pars)
expected_text <- paste(
'sim_data_1 <- sd_net_change(o_df, "C")',
"loglik_1 <- sum(dnbinom(data_1, mu = sim_data_1[, 'value'] + 1e-05, size = phi, log = TRUE))",
"loglik <- loglik_1",
"loglik",
sep = "\n")
expect_equal(actual_text, expected_text)
})
test_that("get_meas_model_text() handles multiple measurements", {
n_consts <- 1
meas_data_mdl <- list(list(formula = "y_A ~ neg_binomial_2(net_flow(C_A), phi)",
measurements = 1:10),
list(formula = "y_B ~ neg_binomial_2(net_flow(C_B), phi)",
measurements = 10:20))
unknown_pars <- list(list(par_name = "par_beta", min = 0),
list(par_name = "inv_phi", min = 0, par_trans = "inv"))
actual_text <- get_meas_model_text(meas_data_mdl, n_consts, unknown_pars,
FALSE)
expected_text <- paste(
'sim_data_1 <- sd_net_change(o_df, "C_A")',
"loglik_1 <- sum(dnbinom(data_1, mu = sim_data_1[, 'value'] + 1e-05, size = 1/pars[[2]], log = TRUE))",
'sim_data_2 <- sd_net_change(o_df, "C_B")',
"loglik_2 <- sum(dnbinom(data_2, mu = sim_data_2[, 'value'] + 1e-05, size = 1/pars[[2]], log = TRUE))",
'loglik <- loglik_1 + loglik_2',
'loglik', sep = "\n")
expect_equal(actual_text, expected_text)
})
# sd_loglik_fun() --------------------------------------------------------------
test_that("sd_loglik_fun() returns the expected object", {
filepath <- system.file("models/", "SEIR.stmx", package = "readsdr")
unknown_pars <- list(list(par_name = "par_beta", min = 0))
meas_data_mdl <- list(list(formula = "y ~ neg_binomial_2(net_flow(C), phi)",
measurements = 1:10))
fun_obj <- sd_loglik_fun(filepath, unknown_pars, meas_data_mdl, neg_log = FALSE,
start_time = 0, stop_time = 10, timestep = 1/32)
actual_val <- fun_obj$fun(c(1, 0.1))
expected_val <- -32.47283
expect_equal(actual_val, expected_val, tolerance = 1e-4)
actual_list <- fun_obj$par_list
expected_list <- list(list(par_name = "par_beta", par_trans = "exp"),
list(par_name = "phi", par_trans = c("exp", "inv")))
expect_equal(actual_list, expected_list)
# Test negative loglik
fun_obj <- sd_loglik_fun(filepath, unknown_pars, meas_data_mdl,
neg_log = TRUE, start_time = 0, stop_time = 10,
timestep = 1/32)
actual_val <- fun_obj$fun(c(1, 0.1))
expected_val <- 32.47283
expect_equal(actual_val, expected_val, tolerance = 1e-4)
})
# Multiple meas
test_that("sd_loglik_fun() handles multiple measurements", {
filepath <- system.file("models/", "SEIR_age.stmx", package = "readsdr")
unknown_pars <- list(list(par_name = "k_AA", min = 0),
list(par_name = "par_rho", min = 0, max = 1))
meas_data_mdl <- list(list(formula = "y_A ~ neg_binomial_2(net_flow(C_A), phi)",
measurements = 1:10),
list(formula = "y_B ~ neg_binomial_2(net_flow(C_B), phi)",
measurements = 11:20),
list(formula = "y_C ~ neg_binomial_2(net_flow(C_C), phi)",
measurements = 21:30),
list(formula = "y_D ~ neg_binomial_2(net_flow(C_D), phi)",
measurements = 31:40))
fun_obj <- sd_loglik_fun(filepath, unknown_pars, meas_data_mdl, neg_log = FALSE,
start_time = 0, stop_time = 10, timestep = 1/32)
actual_val <- fun_obj$fun(c(4, 0.5,0.1))
expected_val <- -1168.551
expect_equal(actual_val, expected_val, tolerance = 1e-4)
})
# Fixed pars
test_that("sd_loglik_fun() handles fixed pars", {
filepath <- system.file("models/", "SEIR.stmx", package = "readsdr")
unknown_pars <- list(list(par_name = "par_beta", min = 0))
supplied_pars <- c("par_rho", "I0", "phi")
meas_data_mdl <- list(list(formula = "y ~ neg_binomial_2(net_flow(C), phi)",
measurements = 1:10))
fun_obj <- sd_loglik_fun(filepath, unknown_pars, meas_data_mdl, neg_log = FALSE,
supplied_pars = supplied_pars, start_time = 0,
stop_time = 10, timestep = 1/32)
actual_val <- fun_obj$fun(c(1), list(par_rho = 0.75,
I0 = 1,
inv_phi = exp(0.1)))
expected_val <- -32.47283
expect_equal(actual_val, expected_val, tolerance = 1e-4)
actual_list <- fun_obj$par_list
expected_list <- list(list(par_name = "par_beta", par_trans = "exp"),
list(par_name = "phi", par_trans = c("exp", "inv")))
})
test_that("sd_loglik_fun() overrides sim params", {
filepath <- system.file("models/", "SEIR.stmx", package = "readsdr")
unknown_pars <- list(list(par_name = "par_beta", min = 0))
meas_data_mdl <- list(list(formula = "y ~ neg_binomial_2(net_flow(C), phi)",
measurements = 1:10))
fun_obj <- sd_loglik_fun(filepath, unknown_pars, meas_data_mdl,
start_time = 0, stop_time = 10, timestep = 1/128)
expect_obj <- list(start = 0,
stop = 10,
dt = 1/128)
actual_obj <- fun_obj$ds_inputs$sim_params
expect_equal(actual_obj, expect_obj)
})
test_that("sd_loglik_fun() overrides consts", {
filepath <- system.file("models/", "SEIR.stmx", package = "readsdr")
unknown_pars <- list(list(par_name = "par_beta", min = 0))
meas_data_mdl <- list(list(formula = "y ~ neg_binomial_2(net_flow(C), phi)",
measurements = 1:10))
N_val <- 5234
const_list <- list(N = N_val)
fun_obj <- sd_loglik_fun(filepath, unknown_pars, meas_data_mdl,
start_time = 0, stop_time = 10, timestep = 1/128,
const_list = const_list)
expect_equal(fun_obj$ds_inputs$consts[["N"]], N_val)
})
|
/tests/testthat/test-loglik_fun.R
|
permissive
|
jandraor/readsdr
|
R
| false
| false
| 12,110
|
r
|
test_that("get_par_list() returns the expected list", {
unknown_pars <- list(list(par_name = "par_beta", min = 0),
list(par_name = "inv_phi", min = 0, type = "meas_par",
par_trans = "inv"))
actual <- get_par_list(unknown_pars)
expected <- list(list(par_name = "par_beta", par_trans = "exp"),
list(par_name = "phi", par_trans = c("exp", "inv")))
expect_equal(actual, expected)
})
test_that("transform_pars() returns the expected text", {
unknown_pars <- list(list(par_name = "par_beta", min = 0),
list(par_name = "inv_phi", min = 0))
actual_text <- transform_pars(unknown_pars)
expected_text <- paste(
" pars[[1]] <- exp(pars[[1]])",
" pars[[2]] <- exp(pars[[2]])", sep = "\n")
expect_equal(actual_text, expected_text)
unknown_pars <- list(list(par_name = "par_beta"),
list(par_name = "inv_phi"))
actual_text <- transform_pars(unknown_pars)
expected_text <- paste(
" pars[[1]] <- pars[[1]]",
" pars[[2]] <- pars[[2]]", sep = "\n")
expect_equal(actual_text, expected_text)
unknown_pars <- list(list(par_name = "par_beta"),
list(par_name = "inv_phi", min = 0))
expected_text <- paste(
" pars[[1]] <- pars[[1]]",
" pars[[2]] <- exp(pars[[2]])", sep = "\n")
actual_text <- transform_pars(unknown_pars)
expect_equal(actual_text, expected_text)
# Expit
unknown_pars <- list(list(par_name = "par_beta", min = 0),
list(par_name = "par_rho", min = 0, max = 1),
list(par_name = "inv_phi", min = 0))
actual_text <- transform_pars(unknown_pars)
expected_text <- paste(
" pars[[1]] <- exp(pars[[1]])",
" pars[[2]] <- expit(pars[[2]])",
" pars[[3]] <- exp(pars[[3]])",
sep = "\n")
expect_equal(actual_text, expected_text)
})
# assign_pars_text ()-------------------------------------------------------------
test_that("assign_pars_text() returns the expected text", {
unk_constants <- list(list(par_name = "par_beta", min = 0),
list(par_name = "par_rho", min = 0, max = 1))
actual_text <- assign_pars_text(unk_constants)
expected_text <- paste(' ds_inputs$consts["par_beta"] <- pars[[1]]',
' ds_inputs$consts["par_rho"] <- pars[[2]]', sep = "\n")
expect_equal(actual_text, expected_text)
})
# get_model_run_text() ---------------------------------------------------------
test_that("get_model_run_text returns the expected text", {
actual_text <- get_model_run_text("rk4")
expected_text <- paste(
" readsdr_env <- list2env(as.list(ds_inputs$consts))",
" ds_inputs$stocks <- purrr::map_dbl(ds_inputs$stocks, function(x) {",
" eval(parse(text = x), envir = readsdr_env)",
" })",
' o <- sd_simulate(ds_inputs, integ_method = "rk4")',
" o_df <- data.frame(o)",
sep = "\n")
expect_equal(actual_text, expected_text)
})
# get_meas_model_text() --------------------------------------------------------
test_that("get_meas_model_text() returns the expected text", {
# As-is measurement
meas_data_mdl <- list(list(formula = "y ~ poisson(C)",
measurements = 1:10))
n_consts <- 1
unknown_pars <- list(list(par_name = "par_beta", min = 0))
actual_text <- get_meas_model_text(meas_data_mdl, n_consts, unknown_pars,
FALSE)
expected_text <- paste(
'sim_data_1 <- dplyr::filter(o_df, time - trunc(time) == 0)',
"loglik_1 <- sum(dpois(data_1, lambda = sim_data_1[, 'C'] + 1e-05, log = TRUE))",
"loglik <- loglik_1",
"loglik", sep = "\n")
expect_equal(actual_text, expected_text)
# Net flow
meas_data_mdl <- list(list(formula = "y ~ neg_binomial_2(net_flow(C), phi)",
measurements = 1:10))
n_consts <- 1
unknown_pars <- list(list(par_name = "par_beta", min = 0),
list(par_name = "inv_phi", min = 0, par_trans = "inv"))
actual_text <- get_meas_model_text(meas_data_mdl, n_consts, unknown_pars,
FALSE)
expected_text <- paste(
'sim_data_1 <- sd_net_change(o_df, "C")',
"loglik_1 <- sum(dnbinom(data_1, mu = sim_data_1[, 'value'] + 1e-05, size = 1/pars[[2]], log = TRUE))",
"loglik <- loglik_1",
"loglik",
sep = "\n")
expect_equal(actual_text, expected_text)
# Reflect log-lik
actual_text <- get_meas_model_text(meas_data_mdl, n_consts, unknown_pars, TRUE)
expected_text <- paste(
'sim_data_1 <- sd_net_change(o_df, "C")',
"loglik_1 <- sum(dnbinom(data_1, mu = sim_data_1[, 'value'] + 1e-05, size = 1/pars[[2]], log = TRUE))",
"loglik <- loglik_1",
"-loglik",
sep = "\n")
})
test_that("get_meas_model_text() handles a known par in the measurement model", {
meas_data_mdl <- list(list(formula = "y ~ neg_binomial_2(net_flow(C), 10)",
measurements = 1:10))
n_consts <- 1
unknown_pars <- list(list(par_name = "par_beta", min = 0))
actual_text <- get_meas_model_text(meas_data_mdl, n_consts, unknown_pars,
FALSE)
expected_text <- paste(
'sim_data_1 <- sd_net_change(o_df, "C")',
"loglik_1 <- sum(dnbinom(data_1, mu = sim_data_1[, 'value'] + 1e-05, size = 10, log = TRUE))",
"loglik <- loglik_1",
"loglik",
sep = "\n")
expect_equal(actual_text, expected_text)
})
test_that("get_meas_model_text() handles fixed pars", {
meas_data_mdl <- list(list(formula = "y ~ neg_binomial_2(net_flow(C), phi)",
measurements = 1:10))
n_consts <- 1
unknown_pars <- list(list(par_name = "par_beta", min = 0))
supplied_pars <- "phi"
actual_text <- get_meas_model_text(meas_data_mdl, n_consts, unknown_pars,
FALSE, supplied_pars)
expected_text <- paste(
'sim_data_1 <- sd_net_change(o_df, "C")',
"loglik_1 <- sum(dnbinom(data_1, mu = sim_data_1[, 'value'] + 1e-05, size = phi, log = TRUE))",
"loglik <- loglik_1",
"loglik",
sep = "\n")
expect_equal(actual_text, expected_text)
})
test_that("get_meas_model_text() handles multiple measurements", {
n_consts <- 1
meas_data_mdl <- list(list(formula = "y_A ~ neg_binomial_2(net_flow(C_A), phi)",
measurements = 1:10),
list(formula = "y_B ~ neg_binomial_2(net_flow(C_B), phi)",
measurements = 10:20))
unknown_pars <- list(list(par_name = "par_beta", min = 0),
list(par_name = "inv_phi", min = 0, par_trans = "inv"))
actual_text <- get_meas_model_text(meas_data_mdl, n_consts, unknown_pars,
FALSE)
expected_text <- paste(
'sim_data_1 <- sd_net_change(o_df, "C_A")',
"loglik_1 <- sum(dnbinom(data_1, mu = sim_data_1[, 'value'] + 1e-05, size = 1/pars[[2]], log = TRUE))",
'sim_data_2 <- sd_net_change(o_df, "C_B")',
"loglik_2 <- sum(dnbinom(data_2, mu = sim_data_2[, 'value'] + 1e-05, size = 1/pars[[2]], log = TRUE))",
'loglik <- loglik_1 + loglik_2',
'loglik', sep = "\n")
expect_equal(actual_text, expected_text)
})
# sd_loglik_fun() --------------------------------------------------------------
test_that("sd_loglik_fun() returns the expected object", {
filepath <- system.file("models/", "SEIR.stmx", package = "readsdr")
unknown_pars <- list(list(par_name = "par_beta", min = 0))
meas_data_mdl <- list(list(formula = "y ~ neg_binomial_2(net_flow(C), phi)",
measurements = 1:10))
fun_obj <- sd_loglik_fun(filepath, unknown_pars, meas_data_mdl, neg_log = FALSE,
start_time = 0, stop_time = 10, timestep = 1/32)
actual_val <- fun_obj$fun(c(1, 0.1))
expected_val <- -32.47283
expect_equal(actual_val, expected_val, tolerance = 1e-4)
actual_list <- fun_obj$par_list
expected_list <- list(list(par_name = "par_beta", par_trans = "exp"),
list(par_name = "phi", par_trans = c("exp", "inv")))
expect_equal(actual_list, expected_list)
# Test negative loglik
fun_obj <- sd_loglik_fun(filepath, unknown_pars, meas_data_mdl,
neg_log = TRUE, start_time = 0, stop_time = 10,
timestep = 1/32)
actual_val <- fun_obj$fun(c(1, 0.1))
expected_val <- 32.47283
expect_equal(actual_val, expected_val, tolerance = 1e-4)
})
# Multiple meas
test_that("sd_loglik_fun() handles multiple measurements", {
filepath <- system.file("models/", "SEIR_age.stmx", package = "readsdr")
unknown_pars <- list(list(par_name = "k_AA", min = 0),
list(par_name = "par_rho", min = 0, max = 1))
meas_data_mdl <- list(list(formula = "y_A ~ neg_binomial_2(net_flow(C_A), phi)",
measurements = 1:10),
list(formula = "y_B ~ neg_binomial_2(net_flow(C_B), phi)",
measurements = 11:20),
list(formula = "y_C ~ neg_binomial_2(net_flow(C_C), phi)",
measurements = 21:30),
list(formula = "y_D ~ neg_binomial_2(net_flow(C_D), phi)",
measurements = 31:40))
fun_obj <- sd_loglik_fun(filepath, unknown_pars, meas_data_mdl, neg_log = FALSE,
start_time = 0, stop_time = 10, timestep = 1/32)
actual_val <- fun_obj$fun(c(4, 0.5,0.1))
expected_val <- -1168.551
expect_equal(actual_val, expected_val, tolerance = 1e-4)
})
# Fixed pars
test_that("sd_loglik_fun() handles fixed pars", {
filepath <- system.file("models/", "SEIR.stmx", package = "readsdr")
unknown_pars <- list(list(par_name = "par_beta", min = 0))
supplied_pars <- c("par_rho", "I0", "phi")
meas_data_mdl <- list(list(formula = "y ~ neg_binomial_2(net_flow(C), phi)",
measurements = 1:10))
fun_obj <- sd_loglik_fun(filepath, unknown_pars, meas_data_mdl, neg_log = FALSE,
supplied_pars = supplied_pars, start_time = 0,
stop_time = 10, timestep = 1/32)
actual_val <- fun_obj$fun(c(1), list(par_rho = 0.75,
I0 = 1,
inv_phi = exp(0.1)))
expected_val <- -32.47283
expect_equal(actual_val, expected_val, tolerance = 1e-4)
actual_list <- fun_obj$par_list
expected_list <- list(list(par_name = "par_beta", par_trans = "exp"),
list(par_name = "phi", par_trans = c("exp", "inv")))
})
test_that("sd_loglik_fun() overrides sim params", {
filepath <- system.file("models/", "SEIR.stmx", package = "readsdr")
unknown_pars <- list(list(par_name = "par_beta", min = 0))
meas_data_mdl <- list(list(formula = "y ~ neg_binomial_2(net_flow(C), phi)",
measurements = 1:10))
fun_obj <- sd_loglik_fun(filepath, unknown_pars, meas_data_mdl,
start_time = 0, stop_time = 10, timestep = 1/128)
expect_obj <- list(start = 0,
stop = 10,
dt = 1/128)
actual_obj <- fun_obj$ds_inputs$sim_params
expect_equal(actual_obj, expect_obj)
})
test_that("sd_loglik_fun() overrides consts", {
filepath <- system.file("models/", "SEIR.stmx", package = "readsdr")
unknown_pars <- list(list(par_name = "par_beta", min = 0))
meas_data_mdl <- list(list(formula = "y ~ neg_binomial_2(net_flow(C), phi)",
measurements = 1:10))
N_val <- 5234
const_list <- list(N = N_val)
fun_obj <- sd_loglik_fun(filepath, unknown_pars, meas_data_mdl,
start_time = 0, stop_time = 10, timestep = 1/128,
const_list = const_list)
expect_equal(fun_obj$ds_inputs$consts[["N"]], N_val)
})
|
library(biomaRt)
human.mart <- useMart(biomart = "ENSEMBL_MART_ENSEMBL",
dataset = "hsapiens_gene_ensembl",
host = "useast.ensembl.org")
listAttributes(human.mart)[grep("^mim", listAttributes(human.mart)$name), ]
dat <- getBM(attributes = c("hgnc_symbol", "chromosome_name", "start_position",
"end_position", "band", "strand", "gene_biotype", "ensembl_gene_id",
"mim_morbid_accession", "mim_morbid_description"),
filters = c("chromosome_name"),
values = 20,
mart = human.mart,
uniqueRows = T)
library(GenomicRanges)
gr1 = with(dat, GRanges(chromosome_name,
IRanges(start=start_position, end=end_position, names = hgnc_symbol,
band=band, gene_biotype=gene_biotype,
ensembl_gene_id=ensembl_gene_id,
mim_morbid_accession=mim_morbid_accession,
mim_morbid_description=mim_morbid_description),
strand=strand))
gr1.new <- gr1[BiocGenerics::width(gr1) <= 100000]
gr1.new
gr2 <- GRanges(seqnames = 20,
ranges = IRanges(start = 5000000, end = 5300000))
partial.overlaps <- findOverlaps(query = gr1.new, subject = gr2, type = "any")
findOverlaps(query = gr1.new, subject = gr2, type = "within")
final.answer <- gr1.new[queryHits(partial.overlaps)]
final.answer[!is.na(final.answer$mim_morbid_accession)]
## The only OMIM gene is PCNA. The disease is ATLD2
## for ATAXIA-TELANGIECTASIA-LIKE DISORDER 2
## which effect excision repair so it makes sense that it
## is involved with gene in DNA replication!
|
/R/Genome Analysis Class/part3_2.R
|
no_license
|
BJWiley233/Practical-Computer-Concepts-Files
|
R
| false
| false
| 1,763
|
r
|
library(biomaRt)
human.mart <- useMart(biomart = "ENSEMBL_MART_ENSEMBL",
dataset = "hsapiens_gene_ensembl",
host = "useast.ensembl.org")
listAttributes(human.mart)[grep("^mim", listAttributes(human.mart)$name), ]
dat <- getBM(attributes = c("hgnc_symbol", "chromosome_name", "start_position",
"end_position", "band", "strand", "gene_biotype", "ensembl_gene_id",
"mim_morbid_accession", "mim_morbid_description"),
filters = c("chromosome_name"),
values = 20,
mart = human.mart,
uniqueRows = T)
library(GenomicRanges)
gr1 = with(dat, GRanges(chromosome_name,
IRanges(start=start_position, end=end_position, names = hgnc_symbol,
band=band, gene_biotype=gene_biotype,
ensembl_gene_id=ensembl_gene_id,
mim_morbid_accession=mim_morbid_accession,
mim_morbid_description=mim_morbid_description),
strand=strand))
gr1.new <- gr1[BiocGenerics::width(gr1) <= 100000]
gr1.new
gr2 <- GRanges(seqnames = 20,
ranges = IRanges(start = 5000000, end = 5300000))
partial.overlaps <- findOverlaps(query = gr1.new, subject = gr2, type = "any")
findOverlaps(query = gr1.new, subject = gr2, type = "within")
final.answer <- gr1.new[queryHits(partial.overlaps)]
final.answer[!is.na(final.answer$mim_morbid_accession)]
## The only OMIM gene is PCNA. The disease is ATLD2
## for ATAXIA-TELANGIECTASIA-LIKE DISORDER 2
## which effect excision repair so it makes sense that it
## is involved with gene in DNA replication!
|
#' Add a GRN to a dynwrap object
#'
#' @inheritParams common_param
#' @param regulatory_network A data frame consisting of three columns: `"regulator"`, `"target"`, `"strength"`.
#' @param regulatory_network_sc A data frame consisting of four columns: `"cell_id"`, `"regulator"`, `"target"`, `"strength"`.
#' @param regulators The feature ids of the regulators.
#' @param targets The feature ids of the targets.
#' @param ... Extra arguments to be saved in the model.
#'
#' @return A dynwrap object with the regulatory network added.
#'
#' @export
add_regulatory_network <- function(dataset, regulatory_network, regulatory_network_sc = NULL, regulators = NULL, targets = NULL, ...) {
# check regulatory network
assert_that(
is.data.frame(regulatory_network),
regulatory_network %has_names% c("regulator", "target", "strength"),
is.character(regulatory_network$regulator) || is.factor(regulatory_network$regulator),
is.character(regulatory_network$target) || is.factor(regulatory_network$target),
is.numeric(regulatory_network$strength),
!is.null(regulators),
!is.null(targets),
all(regulatory_network$regulator %in% regulators),
all(regulatory_network$target %in% targets)
)
if (!is.factor(regulatory_network$regulator)) {
regulatory_network$regulator <- factor(regulatory_network$regulator, regulators)
}
if (!is.factor(regulatory_network$target)) {
regulatory_network$target <- factor(regulatory_network$target, targets)
}
# check sc regulatory network
cell_ids <- dataset$cell_ids
assert_that(
is.data.frame(regulatory_network_sc),
regulatory_network_sc %has_names% c("cell_id", "regulator", "target", "strength"),
is.character(regulatory_network_sc$cell_id) || is.factor(regulatory_network_sc$cell_id),
is.character(regulatory_network_sc$regulator) || is.factor(regulatory_network_sc$regulator),
is.character(regulatory_network_sc$target) || is.factor(regulatory_network_sc$target),
is.numeric(regulatory_network_sc$strength),
!is.null(dataset$cell_ids),
all(regulatory_network_sc$cell_id %in% dataset$cell_ids),
all(regulatory_network_sc$regulator %in% regulators),
all(regulatory_network_sc$target %in% targets)
)
if (!is.factor(regulatory_network_sc$cell_id)) {
regulatory_network_sc$cell_id <- factor(regulatory_network_sc$cell_id, cell_ids)
}
if (!is.factor(regulatory_network_sc$regulator)) {
regulatory_network_sc$regulator <- factor(regulatory_network_sc$regulator, regulators)
}
if (!is.factor(regulatory_network_sc$target)) {
regulatory_network_sc$target <- factor(regulatory_network_sc$target, targets)
}
dataset <- dataset %>% extend_with(
"dynwrap::with_regulatory_network",
regulatory_network = regulatory_network,
regulatory_network_sc = regulatory_network_sc,
regulators = regulators,
targets = targets,
...
)
}
|
/R/add_regulatory_network.R
|
permissive
|
dynverse/dynwrap
|
R
| false
| false
| 2,898
|
r
|
#' Add a GRN to a dynwrap object
#'
#' @inheritParams common_param
#' @param regulatory_network A data frame consisting of three columns: `"regulator"`, `"target"`, `"strength"`.
#' @param regulatory_network_sc A data frame consisting of four columns: `"cell_id"`, `"regulator"`, `"target"`, `"strength"`.
#' @param regulators The feature ids of the regulators.
#' @param targets The feature ids of the targets.
#' @param ... Extra arguments to be saved in the model.
#'
#' @return A dynwrap object with the regulatory network added.
#'
#' @export
add_regulatory_network <- function(dataset, regulatory_network, regulatory_network_sc = NULL, regulators = NULL, targets = NULL, ...) {
# check regulatory network
assert_that(
is.data.frame(regulatory_network),
regulatory_network %has_names% c("regulator", "target", "strength"),
is.character(regulatory_network$regulator) || is.factor(regulatory_network$regulator),
is.character(regulatory_network$target) || is.factor(regulatory_network$target),
is.numeric(regulatory_network$strength),
!is.null(regulators),
!is.null(targets),
all(regulatory_network$regulator %in% regulators),
all(regulatory_network$target %in% targets)
)
if (!is.factor(regulatory_network$regulator)) {
regulatory_network$regulator <- factor(regulatory_network$regulator, regulators)
}
if (!is.factor(regulatory_network$target)) {
regulatory_network$target <- factor(regulatory_network$target, targets)
}
# check sc regulatory network
cell_ids <- dataset$cell_ids
assert_that(
is.data.frame(regulatory_network_sc),
regulatory_network_sc %has_names% c("cell_id", "regulator", "target", "strength"),
is.character(regulatory_network_sc$cell_id) || is.factor(regulatory_network_sc$cell_id),
is.character(regulatory_network_sc$regulator) || is.factor(regulatory_network_sc$regulator),
is.character(regulatory_network_sc$target) || is.factor(regulatory_network_sc$target),
is.numeric(regulatory_network_sc$strength),
!is.null(dataset$cell_ids),
all(regulatory_network_sc$cell_id %in% dataset$cell_ids),
all(regulatory_network_sc$regulator %in% regulators),
all(regulatory_network_sc$target %in% targets)
)
if (!is.factor(regulatory_network_sc$cell_id)) {
regulatory_network_sc$cell_id <- factor(regulatory_network_sc$cell_id, cell_ids)
}
if (!is.factor(regulatory_network_sc$regulator)) {
regulatory_network_sc$regulator <- factor(regulatory_network_sc$regulator, regulators)
}
if (!is.factor(regulatory_network_sc$target)) {
regulatory_network_sc$target <- factor(regulatory_network_sc$target, targets)
}
dataset <- dataset %>% extend_with(
"dynwrap::with_regulatory_network",
regulatory_network = regulatory_network,
regulatory_network_sc = regulatory_network_sc,
regulators = regulators,
targets = targets,
...
)
}
|
setwd("~/Desktop/ESCP/R Class")
Fueleff <-read.csv("1-FuelEfficiency.csv")
head(Fueleff)
plot(GPM~WT, Fueleff)
model_1= lm(GPM~., data=Fueleff)
summary(model_1)
Fueleff= Fueleff[-1]
model_1= lm(GPM~., data=Fueleff)
summary(model_1)
cor(Fueleff)
library(leaps)
X=Fueleff[ ,2:7]
Y=Fueleff[ ,1]
#new function. I use Mallow's law to run the multiple regression and run the multiple variables and decide which two best models
#it will keep
output= summary(regsubsets(X,Y, nbest=2, nvmax = ncol(X)))
tab = cbind(output$which, output$rsq, output$adjr2, output$cp)
model_2= lm(GPM~WT, data = Fueleff)
summary(model_2)
mape(Fueleff$GPM, predict(model_1,Fueleff))
mape(Fueleff$GPM, predict(model_2,Fueleff))
rmse(Fueleff$GPM, predict(model_1,Fueleff))
rmse(Fueleff$GPM, predict(model_2,Fueleff))
me(Fueleff$GPM, predict(model_1, Fueleff))
me(Fueleff$GPM, predict(model_2, Fueleff))
#split the data in a different way
ind=createDataPartition(Fueleff$GPM, p=2/3, list=FALSE)
training_data_set <- Fueleff [ind,]
testing_data_set <- Fueleff [-ind,]
model_11 = lm(GPM~., data=training_data_set)
mape(Fueleff$GPM, predict(model_11, testing_data_set))
rmse(Fueleff$GPM, predict(model_11, testing_data_set))
me(Fueleff$GPM, predict(model_11, testing_data_set))
model_22 = lm(GPM~WT, data= training_data_set)
mape(Fueleff$GPM, predict(model_11,testing_data_set))
rmse(Fueleff$GPM, predict(model_11,testing_data_set))
me(Fueleff$GPM, predict(model_11, testing_data_set))
|
/Multiple Linear Regression Class.R
|
no_license
|
Shivanandrai/R-Code-
|
R
| false
| false
| 1,471
|
r
|
setwd("~/Desktop/ESCP/R Class")
Fueleff <-read.csv("1-FuelEfficiency.csv")
head(Fueleff)
plot(GPM~WT, Fueleff)
model_1= lm(GPM~., data=Fueleff)
summary(model_1)
Fueleff= Fueleff[-1]
model_1= lm(GPM~., data=Fueleff)
summary(model_1)
cor(Fueleff)
library(leaps)
X=Fueleff[ ,2:7]
Y=Fueleff[ ,1]
#new function. I use Mallow's law to run the multiple regression and run the multiple variables and decide which two best models
#it will keep
output= summary(regsubsets(X,Y, nbest=2, nvmax = ncol(X)))
tab = cbind(output$which, output$rsq, output$adjr2, output$cp)
model_2= lm(GPM~WT, data = Fueleff)
summary(model_2)
mape(Fueleff$GPM, predict(model_1,Fueleff))
mape(Fueleff$GPM, predict(model_2,Fueleff))
rmse(Fueleff$GPM, predict(model_1,Fueleff))
rmse(Fueleff$GPM, predict(model_2,Fueleff))
me(Fueleff$GPM, predict(model_1, Fueleff))
me(Fueleff$GPM, predict(model_2, Fueleff))
#split the data in a different way
ind=createDataPartition(Fueleff$GPM, p=2/3, list=FALSE)
training_data_set <- Fueleff [ind,]
testing_data_set <- Fueleff [-ind,]
model_11 = lm(GPM~., data=training_data_set)
mape(Fueleff$GPM, predict(model_11, testing_data_set))
rmse(Fueleff$GPM, predict(model_11, testing_data_set))
me(Fueleff$GPM, predict(model_11, testing_data_set))
model_22 = lm(GPM~WT, data= training_data_set)
mape(Fueleff$GPM, predict(model_11,testing_data_set))
rmse(Fueleff$GPM, predict(model_11,testing_data_set))
me(Fueleff$GPM, predict(model_11, testing_data_set))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blbglm.R
\name{split_data_glm}
\alias{split_data_glm}
\title{split data into m parts of approximated equal sizes}
\usage{
split_data_glm(data, m)
}
\arguments{
\item{data}{data frame}
\item{m}{numeric, split data into m parts of approximated equal sizes}
}
\description{
split data into m parts of approximated equal sizes
}
|
/man/split_data_glm.Rd
|
permissive
|
XiaoqiNa/blblm
|
R
| false
| true
| 404
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blbglm.R
\name{split_data_glm}
\alias{split_data_glm}
\title{split data into m parts of approximated equal sizes}
\usage{
split_data_glm(data, m)
}
\arguments{
\item{data}{data frame}
\item{m}{numeric, split data into m parts of approximated equal sizes}
}
\description{
split data into m parts of approximated equal sizes
}
|
# Load packages and get the data
packages <- c("data.table", "reshape2")
sapply(packages, require, character.only=TRUE, quietly=TRUE)
path <- getwd()
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, file.path(path, "dataFiles.zip"))
unzip(zipfile = "dataFiles.zip")
# Load activity labels and features
activityLabels <- fread(file.path(path, "UCI HAR Dataset/activity_labels.txt")
, col.names = c("classLabels", "activityName"))
features <- fread(file.path(path, "UCI HAR Dataset/features.txt")
, col.names = c("index", "featureNames"))
featuresWanted <- grep("(mean|std)\\(\\)", features[, featureNames])
measurements <- features[featuresWanted, featureNames]
measurements <- gsub('[()]', '', measurements)
# Load train datasets
train <- fread(file.path(path, "UCI HAR Dataset/train/X_train.txt"))[, featuresWanted, with = FALSE]
data.table::setnames(train, colnames(train), measurements)
trainActivities <- fread(file.path(path, "UCI HAR Dataset/train/Y_train.txt")
, col.names = c("Activity"))
trainSubjects <- fread(file.path(path, "UCI HAR Dataset/train/subject_train.txt")
, col.names = c("SubjectNum"))
train <- cbind(trainSubjects, trainActivities, train)
# Load test datasets
test <- fread(file.path(path, "UCI HAR Dataset/test/X_test.txt"))[, featuresWanted, with = FALSE]
data.table::setnames(test, colnames(test), measurements)
testActivities <- fread(file.path(path, "UCI HAR Dataset/test/Y_test.txt")
, col.names = c("Activity"))
testSubjects <- fread(file.path(path, "UCI HAR Dataset/test/subject_test.txt")
, col.names = c("SubjectNum"))
test <- cbind(testSubjects, testActivities, test)
# Merge datasets
combined <- rbind(train, test)
# Convert classLabels to activityName basically. More explicit.
combined[["Activity"]] <- factor(combined[, Activity]
, levels = activityLabels[["classLabels"]]
, labels = activityLabels[["activityName"]])
combined[["SubjectNum"]] <- as.factor(combined[, SubjectNum])
combined <- reshape2::melt(data = combined, id = c("SubjectNum", "Activity"))
combined <- reshape2::dcast(data = combined, SubjectNum + Activity ~ variable, fun.aggregate = mean)
data.table::fwrite(x = combined, file = "tidy_data.txt", quote = FALSE)
|
/run_analysis.R
|
no_license
|
nashr00/Coursera-Project-Getting-and-Cleaning-Data
|
R
| false
| false
| 2,495
|
r
|
# Load packages and get the data
packages <- c("data.table", "reshape2")
sapply(packages, require, character.only=TRUE, quietly=TRUE)
path <- getwd()
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, file.path(path, "dataFiles.zip"))
unzip(zipfile = "dataFiles.zip")
# Load activity labels and features
activityLabels <- fread(file.path(path, "UCI HAR Dataset/activity_labels.txt")
, col.names = c("classLabels", "activityName"))
features <- fread(file.path(path, "UCI HAR Dataset/features.txt")
, col.names = c("index", "featureNames"))
featuresWanted <- grep("(mean|std)\\(\\)", features[, featureNames])
measurements <- features[featuresWanted, featureNames]
measurements <- gsub('[()]', '', measurements)
# Load train datasets
train <- fread(file.path(path, "UCI HAR Dataset/train/X_train.txt"))[, featuresWanted, with = FALSE]
data.table::setnames(train, colnames(train), measurements)
trainActivities <- fread(file.path(path, "UCI HAR Dataset/train/Y_train.txt")
, col.names = c("Activity"))
trainSubjects <- fread(file.path(path, "UCI HAR Dataset/train/subject_train.txt")
, col.names = c("SubjectNum"))
train <- cbind(trainSubjects, trainActivities, train)
# Load test datasets
test <- fread(file.path(path, "UCI HAR Dataset/test/X_test.txt"))[, featuresWanted, with = FALSE]
data.table::setnames(test, colnames(test), measurements)
testActivities <- fread(file.path(path, "UCI HAR Dataset/test/Y_test.txt")
, col.names = c("Activity"))
testSubjects <- fread(file.path(path, "UCI HAR Dataset/test/subject_test.txt")
, col.names = c("SubjectNum"))
test <- cbind(testSubjects, testActivities, test)
# Merge datasets
combined <- rbind(train, test)
# Convert classLabels to activityName basically. More explicit.
combined[["Activity"]] <- factor(combined[, Activity]
, levels = activityLabels[["classLabels"]]
, labels = activityLabels[["activityName"]])
combined[["SubjectNum"]] <- as.factor(combined[, SubjectNum])
combined <- reshape2::melt(data = combined, id = c("SubjectNum", "Activity"))
combined <- reshape2::dcast(data = combined, SubjectNum + Activity ~ variable, fun.aggregate = mean)
data.table::fwrite(x = combined, file = "tidy_data.txt", quote = FALSE)
|
# Exercise 2: working with data frames
# Create a vector of 100 employees ("Employee 1", "Employee 2", ... "Employee 100")
# Hint: use the `paste()` function and vector recycling to add a number to the word
# "Employee"
employees <- paste("Employee", 1:100)
# Create a vector of 100 random salaries for the year 2017
# Use the `runif()` function to pick random numbers between 40000 and 50000
salary_2017 <- runif(100, 40000, 50000)
# Create a vector of 100 annual salary adjustments between -5000 and 10000.
# (A negative number represents a salary decrease due to corporate greed)
# Again use the `runif()` function to pick 100 random numbers in that range.
salary_adjustments <- runif(100, -5000, 10000)
# Create a data frame `salaries` by combining the 3 vectors you just made
# Remember to set `stringsAsFactors=FALSE`!
salaries <- data.frame(employees, salary_2017, salary_adjustments, stringsAsFactors = FALSE)
# Add a column to the `salaries` data frame that represents each person's
# salary in 2018 (e.g., with the salary adjustment added in).
salaries$salary_2018 <- salaries$salary_2017 + salaries$salary_adjustments
# Add a column to the `salaries` data frame that has a value of `TRUE` if the
# person got a raise (their salary went up)
salaries$got_raise <- salaries$salary_adjustments > 0
View(salaries)
### Retrieve values from your data frame to answer the following questions
### Note that you should get the value as specific as possible (e.g., a single
### cell rather than the whole row!)
# What was the 2018 salary of Employee 57
salaries[57, "salary_2018"]
# How many employees got a raise?
nrow(salaries[salaries$got_raise == TRUE, ])
# What was the dollar value of the highest raise?
max(salaries$salary_adjustments)
# What was the "name" of the employee who received the highest raise?
salaries[salaries$salary_adjustments == max(salaries$salary_adjustments) , "employees"]
# What was the largest decrease in salaries between the two years?
min(salaries$salary_adjustments)
# What was the name of the employee who recieved largest decrease in salary?
salaries[salaries$salary_adjustments == min(salaries$salary_adjustments) , "employees"]
# What was the average salary change?
mean(salaries$salary_adjustment)
# For people who did not get a raise, how much money did they lose on average?
mean(salaries[salaries$got_raise == FALSE, ]$salary_adjustments)
## Consider: do the above averages match what you expected them to be based on
## how you generated the salaries?
# Write a .csv file of your salary data to your working directory
write.csv(salaries, file = "salaries.csv")
|
/chapter-10-exercises/exercise-2/exercise.R
|
permissive
|
shi-lin-li/book-exercises
|
R
| false
| false
| 2,625
|
r
|
# Exercise 2: working with data frames
# Create a vector of 100 employees ("Employee 1", "Employee 2", ... "Employee 100")
# Hint: use the `paste()` function and vector recycling to add a number to the word
# "Employee"
employees <- paste("Employee", 1:100)
# Create a vector of 100 random salaries for the year 2017
# Use the `runif()` function to pick random numbers between 40000 and 50000
salary_2017 <- runif(100, 40000, 50000)
# Create a vector of 100 annual salary adjustments between -5000 and 10000.
# (A negative number represents a salary decrease due to corporate greed)
# Again use the `runif()` function to pick 100 random numbers in that range.
salary_adjustments <- runif(100, -5000, 10000)
# Create a data frame `salaries` by combining the 3 vectors you just made
# Remember to set `stringsAsFactors=FALSE`!
salaries <- data.frame(employees, salary_2017, salary_adjustments, stringsAsFactors = FALSE)
# Add a column to the `salaries` data frame that represents each person's
# salary in 2018 (e.g., with the salary adjustment added in).
salaries$salary_2018 <- salaries$salary_2017 + salaries$salary_adjustments
# Add a column to the `salaries` data frame that has a value of `TRUE` if the
# person got a raise (their salary went up)
salaries$got_raise <- salaries$salary_adjustments > 0
View(salaries)
### Retrieve values from your data frame to answer the following questions
### Note that you should get the value as specific as possible (e.g., a single
### cell rather than the whole row!)
# What was the 2018 salary of Employee 57
salaries[57, "salary_2018"]
# How many employees got a raise?
nrow(salaries[salaries$got_raise == TRUE, ])
# What was the dollar value of the highest raise?
max(salaries$salary_adjustments)
# What was the "name" of the employee who received the highest raise?
salaries[salaries$salary_adjustments == max(salaries$salary_adjustments) , "employees"]
# What was the largest decrease in salaries between the two years?
min(salaries$salary_adjustments)
# What was the name of the employee who recieved largest decrease in salary?
salaries[salaries$salary_adjustments == min(salaries$salary_adjustments) , "employees"]
# What was the average salary change?
mean(salaries$salary_adjustment)
# For people who did not get a raise, how much money did they lose on average?
mean(salaries[salaries$got_raise == FALSE, ]$salary_adjustments)
## Consider: do the above averages match what you expected them to be based on
## how you generated the salaries?
# Write a .csv file of your salary data to your working directory
write.csv(salaries, file = "salaries.csv")
|
filterRegroup <- function(DF.input = NULL) {
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
DF.output <- DF.input;
DF.output <- DF.output[DF.output[['Amount']]>0,];
is.4000s <- (3999.9999 < DF.output[,'AccountCode'] & DF.output[,'AccountCode'] < 5000);
DF.output <- DF.output[is.4000s,];
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
DF.output[,'depositItem.group'] <- character(length = nrow(DF.output));
DF.output[,'char.AccountName'] <- as.character(DF.output[,'AccountName']);
DF.output[,'char.ContactTypeMain'] <- as.character(DF.output[,'ContactTypeMain']);
majorAccountCodes <- c(4001,4002,4005,4013);
is.majorAccountCodes <- DF.output[,'AccountCode'] %in% majorAccountCodes;
DF.output[ is.majorAccountCodes,'depositItem.group'] <- DF.output[ is.majorAccountCodes,'char.AccountName'];
DF.output[!is.majorAccountCodes,'depositItem.group'] <- DF.output[!is.majorAccountCodes,'char.ContactTypeMain'];
DF.output[,'depositItem.group'] <- as.factor(DF.output[,'depositItem.group']);
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
DF.output <- DF.output[,setdiff(colnames(DF.output),c('char.AccountName','char.ContactTypeMain'))];
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
return(DF.output);
}
|
/projects/DataForGood/2016-Youville/code/filterRegroup.R
|
no_license
|
paradisepilot/statistics
|
R
| false
| false
| 1,245
|
r
|
filterRegroup <- function(DF.input = NULL) {
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
DF.output <- DF.input;
DF.output <- DF.output[DF.output[['Amount']]>0,];
is.4000s <- (3999.9999 < DF.output[,'AccountCode'] & DF.output[,'AccountCode'] < 5000);
DF.output <- DF.output[is.4000s,];
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
DF.output[,'depositItem.group'] <- character(length = nrow(DF.output));
DF.output[,'char.AccountName'] <- as.character(DF.output[,'AccountName']);
DF.output[,'char.ContactTypeMain'] <- as.character(DF.output[,'ContactTypeMain']);
majorAccountCodes <- c(4001,4002,4005,4013);
is.majorAccountCodes <- DF.output[,'AccountCode'] %in% majorAccountCodes;
DF.output[ is.majorAccountCodes,'depositItem.group'] <- DF.output[ is.majorAccountCodes,'char.AccountName'];
DF.output[!is.majorAccountCodes,'depositItem.group'] <- DF.output[!is.majorAccountCodes,'char.ContactTypeMain'];
DF.output[,'depositItem.group'] <- as.factor(DF.output[,'depositItem.group']);
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
DF.output <- DF.output[,setdiff(colnames(DF.output),c('char.AccountName','char.ContactTypeMain'))];
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
return(DF.output);
}
|
\name{msBP.Gibbs}
\alias{msBP.Gibbs}
\title{
Gibbs sampling for density estimation for msBP model
}
\description{
Gibbs sampling for Markov Chain Motecarlo sampling from the posterior distribution of an msBP model.
}
\usage{
msBP.Gibbs(x, a, b, g0 = "normal", g0par=c(0,1), mcmc,
grid = list(n.points=40, low=0.001, upp=0.999), state=NULL, hyper,
printing=0, maxScale=5, ...)
}
\arguments{
\item{x}{
the observed sample
}
\item{a}{
scalar a parameter
}
\item{b}{
scalar b parameter
}
\item{g0}{
prior guess for the density of \code{x}. Currently only "normal", "unif", "gamma", and "empirical" are supported. From version 1.1 random paramters are also allowed (only with \code{g0="normal"}).
}
\item{g0par}{
additional scalar parameters for \code{g0}. If "normal" corresponds to mean and standard deviation, if "uniform" to upper and lower bounds, if "gamma" to shape and rate parameters. If "empirical" this value is not used. From version 1.1 random paramters are also allowed (only with \code{g0="normal"}).
}
\item{mcmc}{a list giving the MCMC parameters. It must include the
following integers: \code{nb} giving the number of burn-in iterations, \code{nrep} giving
the total number of iterations (including \code{nb}), and \code{ndisplay} giving
the multiple of iterations to be displayed on screen while the \code{C++} routine is running (a message will be printed every \code{ndisplay} iterations).}
\item{grid}{
a list giving the parameters for plotting the posterior mean density over a finite grid. It must include the following values: \code{low} and \code{upp} giving the lower and upper bound respectively of the grid and \code{n.points}, an integer giving the number of points of the grid}
\item{state}{a list giving the current value of the parameters. This list is used if the current analysis is the continuation of a
previous analysis or if we want to start the MCMC algorithm from some particular value of the parameters.}
\item{hyper}{a list containing the values of the hyperparameters for \code{a} and \code{b} or for the parameters of the prior guess (only if \code{g0="normal"}) . It must contains \code{hyperprior}, a list of three logical values determining if hyperpriors for \code{a}, \code{b} and \code{g0} are used (TRUE) or if \code{a}, \code{b}, or \code{g0} are fixed (FALSE), and \code{hyperpar} a list containing the hyperparameters for the hyperprior distributions: \code{beta}, \code{gamma}, \code{delta}, \code{lambda}, \code{mu0}, \code{kappa0}, \code{alpha0}, and \code{beta0}. See details. \code{gridB} is a grid of values for which the prior (and posterior) for \code{b} is evaluated with a Griddy Gibbs approach (Ritter and Tanner, 1992). See details.}
\item{printing}{Vector of integers if the internal C++ function need to print what is doing}
\item{maxScale}{maximum scale of the binary trees.}
\item{...}{additional arguments.}
}
\details{
Before calling the proper C++ subrouting the function center the sample on an initial guess for the density of the data. If \code{g0 = 'empirical'} the data are transformed so that the expctation of the msBP prior is centered on the kernel density estimate of \code{x}.
The algorithm consists of two primary steps: (i) allocate each observation
to a multiscale cluster, conditionally on the values of the weights (see also \code{msBP.postCluster});
(ii) update the weights, conditionally on the cluster allocations.
All the procedure is written in C++ and additional R scripts are used to pre- and post-process the data and the output.
If \code{hyper$hyperpriors$a} or \code{hyper$hyperpriors$b} is true, additional hyperpriors for \code{a} and \code{b} are assumed. Specifically the algorithm implements \eqn{a \sim Ga(\beta,\gamma)}{a ~ Ga(\beta,\gamma)} and \eqn{b \sim Ga(\delta, \lambda)}{b ~ Ga(\delta, \lambda)}.
For the former parameter the full conditional posterior distribution is available in closed form, i.e.
\deqn{ a | - \sim Ga\left(\beta + 2^{s'+1} - 1, \gamma - \sum_{s=0}^{s'} \sum_{h=1}^{2^s} \log(1-S_{s,h}) \right),}{%
a | - ~ Ga(\beta + 2^{s'+1} - 1, \gamma - \sum_{s=0}^{s'} \sum_{h=1}^{2^s} log(1-S_{s,h}) ),
}
while for the latter its full conditional posterior is proportional to
\deqn{
\frac{b^{\delta-1}}{B(b,b)^{2^{s+1}-1}} \exp \left\{b \left(
\sum_{s=0}^{s'} \sum_{h=1}^{2^s} \log\{R_{s,h} (1 - R_{s,h} )\} - \lambda\right) \right\},
}{
b^{\delta-1}/B(b,b)^{2^{s+1}-1} exp ( b ( \sum_{s=0}^{s'} \sum_{h=1}^{2^s} log(R_{s,h} (1 - R_{s,h} )) - \lambda ) ,
}
where \eqn{s'} is the maximum occupied scale and \eqn{B(p, q)} is the Beta function. To sample
from the latter distribution, a griddy Gibbs approach over the grid defined by \code{hyper$hyperpar$gridB} is used. See Ritter and Tanner (1992).
From Version 1.1, if \code{hyper$hyperpriors$base=TRUE} and \code{g0="normal"} additional hyperpriors for the parameter of the centering normal density are assumed. Specifically the model is
\deqn{
y = \Phi(x; \mu, \sigma^2)
}{
y = \Phi(x; \mu, \sigma^2)
}
\deqn{
(\mu, \sigma^2) \sim N(\mu; \mu_0, \kappa_0\sigma^2)\mbox{I-Ga}(\sigma^2; \alpha_0, \beta_0)
}{
(\mu, \sigma^2) ~ N(\mu; \mu_0, \kappa_0\sigma^2)I-Ga(\sigma^2; \alpha_0, \beta_0)
}
and an addtional step simulating the values of \eqn{\mu} and \eqn{\sigma^2} from their conditional posterior distribution is added to the Gibbs sampler of Canale and Dunson (2016). Specifically, a Metropolis-Hastings step with proposal equal to the prior is implemented.
}
\value{
A list containing
%mcmcsamples=list(dens=postDens, a=res$postA, b=res$postB, scale=scale, S=postS, R=postR, weights=postW, s=posts, h = posth),
%postmean = list(a=mean(res$postA), b=mean(res$postB), S=postMeanS, R=postMeanR, weights=postMeanW, scales=postMeanScale)
\item{density}{A list containing \code{postMeanDens}, the posterior mean density estimate evaluated over \code{xDens} and \code{postLowDens} and \code{postUppDens}, the lower and upper pointwise 95\% credible bands,}
\item{mcmc}{A list containing the MCMC chains: \code{dens} is a matrix (\code{nrep}-\code{nb}) times \code{n.grid}, \code{a} and \code{b} are the vectors with the MCMC chains for the two parameters (if \code{hyperprior} was TRUE), \code{scale} is a matix where each column is a MCMC chain of the total mass for each scale, \code{R} and \code{S}, are matrices where each column in the \code{tree2vec} form of the corresponding trees, \code{weights} is a matrix where each column is the \code{tree2vec} form of the corresponding tree of weights, \code{s} and \code{h} are matrices where each column is the MCMC chain for the node labels for a subject.}
\item{postmean}{A list containing posterior means over the MCMC samples of \code{a}, \code{b}, and of all binary trees}
\item{fit}{A list containing the LPML, mean and median of the log CPO.}
}
\references{
Canale, A. and Dunson, D. B. (2016), "Multiscale Bernstein polynomials for densities", Statistica Sinica, 26
Ritter C., Tanner M. (1992). "Facilitating the Gibbs Sampler: the Gibbs Stopper and the Griddy-Gibbs Sampler." Journal of the American Statistical Association, 87, 861-868.
}
\seealso{
\code{\link{msBP.postCluster}}
}
\examples{
\dontrun{
data(galaxy)
galaxy <- data.frame(galaxy)
speeds <- galaxy$speed/1000
set.seed(1)
#with fixed g0 and random a, b
fit.msbp.1 <- msBP.Gibbs(speeds, a = 10, b = 5, g0 = "empirical",
mcmc=list(nrep = 10000, nb = 5000, ndisplay = 1000),
hyper=list(hyperprior=list(a = TRUE, b = TRUE, g0 = FALSE),
hyperpar=list(beta=5,gamma = 1,delta = 1,lambda = 1)),
printing = 0, maxS = 7, grid = list(n.points = 150, low = 5, upp = 38))
#with random a, b and hyperparameters of g0
fit.msbp.2 <- msBP.Gibbs(speeds, a = 10, b=5, g0 = "normal",
mcmc=list(nrep = 10000, nb = 5000, ndisplay = 1000),
hyper=list(hyperprior = list(a = TRUE, b = TRUE, g0 = TRUE),
hyperpar = list(beta = 50, gamma = 5, delta = 10, lambda = 1,
gridB = seq(0, 20, length = 30),
mu0 = 21, kappa0 = 0.1, alpha0 = 1, beta0 = 20)),
printing = 0, maxS = 7, grid = list(n.points = 150, lo w= 5, upp = 38))
hist(speeds, prob=TRUE,br=10, ylim=c(0,0.23), main="", col='grey')
points(fit.msbp.1$density$postMeanDens~fit.msbp.1$density$xDens, ty='l', lwd=2)
points(fit.msbp.1$density$postUppDens~fit.msbp.1$density$xDens, ty='l',lty=2, lwd=2)
points(fit.msbp.1$density$postLowDens~fit.msbp.1$density$xDens, ty='l',lty=2, lwd=2)
hist(speeds, prob=TRUE,br=10, ylim=c(0,0.23), main="", col='grey')
points(fit.msbp.2$density$postMeanDens~fit.msbp.2$density$xDens, ty='l', lwd=2)
points(fit.msbp.2$density$postUppDens~fit.msbp.2$density$xDens, ty='l',lty=2, lwd=2)
points(fit.msbp.2$density$postLowDens~fit.msbp.2$density$xDens, ty='l',lty=2, lwd=2)
}
}
|
/man/msBP.Gibbs.Rd
|
no_license
|
david-dunson/msBP
|
R
| false
| false
| 8,742
|
rd
|
\name{msBP.Gibbs}
\alias{msBP.Gibbs}
\title{
Gibbs sampling for density estimation for msBP model
}
\description{
Gibbs sampling for Markov Chain Motecarlo sampling from the posterior distribution of an msBP model.
}
\usage{
msBP.Gibbs(x, a, b, g0 = "normal", g0par=c(0,1), mcmc,
grid = list(n.points=40, low=0.001, upp=0.999), state=NULL, hyper,
printing=0, maxScale=5, ...)
}
\arguments{
\item{x}{
the observed sample
}
\item{a}{
scalar a parameter
}
\item{b}{
scalar b parameter
}
\item{g0}{
prior guess for the density of \code{x}. Currently only "normal", "unif", "gamma", and "empirical" are supported. From version 1.1 random paramters are also allowed (only with \code{g0="normal"}).
}
\item{g0par}{
additional scalar parameters for \code{g0}. If "normal" corresponds to mean and standard deviation, if "uniform" to upper and lower bounds, if "gamma" to shape and rate parameters. If "empirical" this value is not used. From version 1.1 random paramters are also allowed (only with \code{g0="normal"}).
}
\item{mcmc}{a list giving the MCMC parameters. It must include the
following integers: \code{nb} giving the number of burn-in iterations, \code{nrep} giving
the total number of iterations (including \code{nb}), and \code{ndisplay} giving
the multiple of iterations to be displayed on screen while the \code{C++} routine is running (a message will be printed every \code{ndisplay} iterations).}
\item{grid}{
a list giving the parameters for plotting the posterior mean density over a finite grid. It must include the following values: \code{low} and \code{upp} giving the lower and upper bound respectively of the grid and \code{n.points}, an integer giving the number of points of the grid}
\item{state}{a list giving the current value of the parameters. This list is used if the current analysis is the continuation of a
previous analysis or if we want to start the MCMC algorithm from some particular value of the parameters.}
\item{hyper}{a list containing the values of the hyperparameters for \code{a} and \code{b} or for the parameters of the prior guess (only if \code{g0="normal"}) . It must contains \code{hyperprior}, a list of three logical values determining if hyperpriors for \code{a}, \code{b} and \code{g0} are used (TRUE) or if \code{a}, \code{b}, or \code{g0} are fixed (FALSE), and \code{hyperpar} a list containing the hyperparameters for the hyperprior distributions: \code{beta}, \code{gamma}, \code{delta}, \code{lambda}, \code{mu0}, \code{kappa0}, \code{alpha0}, and \code{beta0}. See details. \code{gridB} is a grid of values for which the prior (and posterior) for \code{b} is evaluated with a Griddy Gibbs approach (Ritter and Tanner, 1992). See details.}
\item{printing}{Vector of integers if the internal C++ function need to print what is doing}
\item{maxScale}{maximum scale of the binary trees.}
\item{...}{additional arguments.}
}
\details{
Before calling the proper C++ subrouting the function center the sample on an initial guess for the density of the data. If \code{g0 = 'empirical'} the data are transformed so that the expctation of the msBP prior is centered on the kernel density estimate of \code{x}.
The algorithm consists of two primary steps: (i) allocate each observation
to a multiscale cluster, conditionally on the values of the weights (see also \code{msBP.postCluster});
(ii) update the weights, conditionally on the cluster allocations.
All the procedure is written in C++ and additional R scripts are used to pre- and post-process the data and the output.
If \code{hyper$hyperpriors$a} or \code{hyper$hyperpriors$b} is true, additional hyperpriors for \code{a} and \code{b} are assumed. Specifically the algorithm implements \eqn{a \sim Ga(\beta,\gamma)}{a ~ Ga(\beta,\gamma)} and \eqn{b \sim Ga(\delta, \lambda)}{b ~ Ga(\delta, \lambda)}.
For the former parameter the full conditional posterior distribution is available in closed form, i.e.
\deqn{ a | - \sim Ga\left(\beta + 2^{s'+1} - 1, \gamma - \sum_{s=0}^{s'} \sum_{h=1}^{2^s} \log(1-S_{s,h}) \right),}{%
a | - ~ Ga(\beta + 2^{s'+1} - 1, \gamma - \sum_{s=0}^{s'} \sum_{h=1}^{2^s} log(1-S_{s,h}) ),
}
while for the latter its full conditional posterior is proportional to
\deqn{
\frac{b^{\delta-1}}{B(b,b)^{2^{s+1}-1}} \exp \left\{b \left(
\sum_{s=0}^{s'} \sum_{h=1}^{2^s} \log\{R_{s,h} (1 - R_{s,h} )\} - \lambda\right) \right\},
}{
b^{\delta-1}/B(b,b)^{2^{s+1}-1} exp ( b ( \sum_{s=0}^{s'} \sum_{h=1}^{2^s} log(R_{s,h} (1 - R_{s,h} )) - \lambda ) ,
}
where \eqn{s'} is the maximum occupied scale and \eqn{B(p, q)} is the Beta function. To sample
from the latter distribution, a griddy Gibbs approach over the grid defined by \code{hyper$hyperpar$gridB} is used. See Ritter and Tanner (1992).
From Version 1.1, if \code{hyper$hyperpriors$base=TRUE} and \code{g0="normal"} additional hyperpriors for the parameter of the centering normal density are assumed. Specifically the model is
\deqn{
y = \Phi(x; \mu, \sigma^2)
}{
y = \Phi(x; \mu, \sigma^2)
}
\deqn{
(\mu, \sigma^2) \sim N(\mu; \mu_0, \kappa_0\sigma^2)\mbox{I-Ga}(\sigma^2; \alpha_0, \beta_0)
}{
(\mu, \sigma^2) ~ N(\mu; \mu_0, \kappa_0\sigma^2)I-Ga(\sigma^2; \alpha_0, \beta_0)
}
and an addtional step simulating the values of \eqn{\mu} and \eqn{\sigma^2} from their conditional posterior distribution is added to the Gibbs sampler of Canale and Dunson (2016). Specifically, a Metropolis-Hastings step with proposal equal to the prior is implemented.
}
\value{
A list containing
%mcmcsamples=list(dens=postDens, a=res$postA, b=res$postB, scale=scale, S=postS, R=postR, weights=postW, s=posts, h = posth),
%postmean = list(a=mean(res$postA), b=mean(res$postB), S=postMeanS, R=postMeanR, weights=postMeanW, scales=postMeanScale)
\item{density}{A list containing \code{postMeanDens}, the posterior mean density estimate evaluated over \code{xDens} and \code{postLowDens} and \code{postUppDens}, the lower and upper pointwise 95\% credible bands,}
\item{mcmc}{A list containing the MCMC chains: \code{dens} is a matrix (\code{nrep}-\code{nb}) times \code{n.grid}, \code{a} and \code{b} are the vectors with the MCMC chains for the two parameters (if \code{hyperprior} was TRUE), \code{scale} is a matix where each column is a MCMC chain of the total mass for each scale, \code{R} and \code{S}, are matrices where each column in the \code{tree2vec} form of the corresponding trees, \code{weights} is a matrix where each column is the \code{tree2vec} form of the corresponding tree of weights, \code{s} and \code{h} are matrices where each column is the MCMC chain for the node labels for a subject.}
\item{postmean}{A list containing posterior means over the MCMC samples of \code{a}, \code{b}, and of all binary trees}
\item{fit}{A list containing the LPML, mean and median of the log CPO.}
}
\references{
Canale, A. and Dunson, D. B. (2016), "Multiscale Bernstein polynomials for densities", Statistica Sinica, 26
Ritter C., Tanner M. (1992). "Facilitating the Gibbs Sampler: the Gibbs Stopper and the Griddy-Gibbs Sampler." Journal of the American Statistical Association, 87, 861-868.
}
\seealso{
\code{\link{msBP.postCluster}}
}
\examples{
\dontrun{
data(galaxy)
galaxy <- data.frame(galaxy)
speeds <- galaxy$speed/1000
set.seed(1)
#with fixed g0 and random a, b
fit.msbp.1 <- msBP.Gibbs(speeds, a = 10, b = 5, g0 = "empirical",
mcmc=list(nrep = 10000, nb = 5000, ndisplay = 1000),
hyper=list(hyperprior=list(a = TRUE, b = TRUE, g0 = FALSE),
hyperpar=list(beta=5,gamma = 1,delta = 1,lambda = 1)),
printing = 0, maxS = 7, grid = list(n.points = 150, low = 5, upp = 38))
#with random a, b and hyperparameters of g0
fit.msbp.2 <- msBP.Gibbs(speeds, a = 10, b=5, g0 = "normal",
mcmc=list(nrep = 10000, nb = 5000, ndisplay = 1000),
hyper=list(hyperprior = list(a = TRUE, b = TRUE, g0 = TRUE),
hyperpar = list(beta = 50, gamma = 5, delta = 10, lambda = 1,
gridB = seq(0, 20, length = 30),
mu0 = 21, kappa0 = 0.1, alpha0 = 1, beta0 = 20)),
printing = 0, maxS = 7, grid = list(n.points = 150, lo w= 5, upp = 38))
hist(speeds, prob=TRUE,br=10, ylim=c(0,0.23), main="", col='grey')
points(fit.msbp.1$density$postMeanDens~fit.msbp.1$density$xDens, ty='l', lwd=2)
points(fit.msbp.1$density$postUppDens~fit.msbp.1$density$xDens, ty='l',lty=2, lwd=2)
points(fit.msbp.1$density$postLowDens~fit.msbp.1$density$xDens, ty='l',lty=2, lwd=2)
hist(speeds, prob=TRUE,br=10, ylim=c(0,0.23), main="", col='grey')
points(fit.msbp.2$density$postMeanDens~fit.msbp.2$density$xDens, ty='l', lwd=2)
points(fit.msbp.2$density$postUppDens~fit.msbp.2$density$xDens, ty='l',lty=2, lwd=2)
points(fit.msbp.2$density$postLowDens~fit.msbp.2$density$xDens, ty='l',lty=2, lwd=2)
}
}
|
# Copyright 2018 Observational Health Data Sciences and Informatics
#
# This file is part of SkeletonCompartiveEffectStudy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Synthesize positive controls
#'
#' @details
#' This function will synthesize positve controls based on the negative controls. The simulated outcomes
#' will be added to the cohort table.
#'
#' @param connectionDetails An object of type \code{connectionDetails} as created using the
#' \code{\link[DatabaseConnector]{createConnectionDetails}} function in the
#' DatabaseConnector package.
#' @param cdmDatabaseSchema Schema name where your patient-level data in OMOP CDM format resides.
#' Note that for SQL Server, this should include both the database and
#' schema name, for example 'cdm_data.dbo'.
#' @param cohortDatabaseSchema Schema name where intermediate data can be stored. You will need to have
#' write priviliges in this schema. Note that for SQL Server, this should
#' include both the database and schema name, for example 'cdm_data.dbo'.
#' @param cohortTable The name of the table that will be created in the work database schema.
#' This table will hold the exposure and outcome cohorts used in this
#' study.
#' @param oracleTempSchema Should be used in Oracle to specify a schema where the user has write
#' priviliges for storing temporary tables.
#' @param outputFolder Name of local folder to place results; make sure to use forward slashes
#' (/)
#' @param maxCores How many parallel cores should be used? If more cores are made available
#' this can speed up the analyses.
#'
#' @export
synthesizePositiveControls <- function(connectionDetails,
cdmDatabaseSchema,
cohortDatabaseSchema,
cohortTable = "cohort",
oracleTempSchema,
outputFolder,
maxCores = 1) {
# Set to TRUE if you don't want to use synthetic positive controls in your study:
skipSynthesis <- FALSE
if (!skipSynthesis) {
synthesisFolder <- file.path(outputFolder, "positiveControlSynthesis")
if (!file.exists(synthesisFolder))
dir.create(synthesisFolder)
synthesisSummaryFile <- file.path(outputFolder, "SynthesisSummary.csv")
if (!file.exists(synthesisSummaryFile)) {
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "SkeletonCompartiveEffectStudy")
negativeControls <- read.csv(pathToCsv)
exposureOutcomePairs <- data.frame(exposureId = negativeControls$targetId,
outcomeId = negativeControls$outcomeId)
exposureOutcomePairs <- unique(exposureOutcomePairs)
prior = Cyclops::createPrior("laplace", exclude = 0, useCrossValidation = TRUE)
control = Cyclops::createControl(cvType = "auto",
startingVariance = 0.01,
noiseLevel = "quiet",
cvRepetitions = 1,
threads = min(c(10, maxCores)))
covariateSettings <- FeatureExtraction::createCovariateSettings(useDemographicsAgeGroup = TRUE,
useDemographicsGender = TRUE,
useDemographicsIndexYear = TRUE,
useDemographicsIndexMonth = TRUE,
useConditionGroupEraLongTerm = TRUE,
useDrugGroupEraLongTerm = TRUE,
useProcedureOccurrenceLongTerm = TRUE,
useMeasurementLongTerm = TRUE,
useObservationLongTerm = TRUE,
useCharlsonIndex = TRUE,
useDcsi = TRUE,
useChads2Vasc = TRUE,
longTermStartDays = 365,
endDays = 0)
result <- MethodEvaluation::injectSignals(connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
oracleTempSchema = oracleTempSchema,
exposureDatabaseSchema = cohortDatabaseSchema,
exposureTable = cohortTable,
outcomeDatabaseSchema = cohortDatabaseSchema,
outcomeTable = cohortTable,
outputDatabaseSchema = cohortDatabaseSchema,
outputTable = cohortTable,
createOutputTable = FALSE,
outputIdOffset = 10000,
exposureOutcomePairs = exposureOutcomePairs,
firstExposureOnly = TRUE,
firstOutcomeOnly = TRUE,
removePeopleWithPriorOutcomes = TRUE,
modelType = "survival",
washoutPeriod = 183,
riskWindowStart = 0,
riskWindowEnd = 30,
addExposureDaysToEnd = TRUE,
effectSizes = c(1.5, 2, 4),
precision = 0.01,
prior = prior,
control = control,
maxSubjectsForModel = 250000,
minOutcomeCountForModel = 50,
minOutcomeCountForInjection = 25,
workFolder = synthesisFolder,
modelThreads = max(1, round(maxCores/8)),
generationThreads = min(6, maxCores),
covariateSettings = covariateSettings)
write.csv(result, synthesisSummaryFile, row.names = FALSE)
}
OhdsiRTools::logTrace("Merging positive with negative controls ")
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "SkeletonCompartiveEffectStudy")
negativeControls <- read.csv(pathToCsv)
synthesisSummary <- read.csv(synthesisSummaryFile)
synthesisSummary$targetId <- synthesisSummary$exposureId
synthesisSummary <- merge(synthesisSummary, negativeControls)
synthesisSummary <- synthesisSummary[synthesisSummary$trueEffectSize != 0, ]
synthesisSummary$outcomeName <- paste0(synthesisSummary$OutcomeName, ", RR=", synthesisSummary$targetEffectSize)
synthesisSummary$oldOutcomeId <- synthesisSummary$outcomeId
synthesisSummary$outcomeId <- synthesisSummary$newOutcomeId
}
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "SkeletonCompartiveEffectStudy")
negativeControls <- read.csv(pathToCsv)
negativeControls$targetEffectSize <- 1
negativeControls$trueEffectSize <- 1
negativeControls$trueEffectSizeFirstExposure <- 1
negativeControls$oldOutcomeId <- negativeControls$outcomeId
if (skipSynthesis) {
allControls <- negativeControls
} else {
allControls <- rbind(negativeControls, synthesisSummary[, names(negativeControls)])
}
write.csv(allControls, file.path(outputFolder, "AllControls.csv"), row.names = FALSE)
}
|
/SkeletonCompartiveEffectStudy/R/SynthesizePositiveControls.R
|
no_license
|
NEONKID/StudyProtocolSandbox
|
R
| false
| false
| 9,233
|
r
|
# Copyright 2018 Observational Health Data Sciences and Informatics
#
# This file is part of SkeletonCompartiveEffectStudy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Synthesize positive controls
#'
#' @details
#' This function will synthesize positve controls based on the negative controls. The simulated outcomes
#' will be added to the cohort table.
#'
#' @param connectionDetails An object of type \code{connectionDetails} as created using the
#' \code{\link[DatabaseConnector]{createConnectionDetails}} function in the
#' DatabaseConnector package.
#' @param cdmDatabaseSchema Schema name where your patient-level data in OMOP CDM format resides.
#' Note that for SQL Server, this should include both the database and
#' schema name, for example 'cdm_data.dbo'.
#' @param cohortDatabaseSchema Schema name where intermediate data can be stored. You will need to have
#' write priviliges in this schema. Note that for SQL Server, this should
#' include both the database and schema name, for example 'cdm_data.dbo'.
#' @param cohortTable The name of the table that will be created in the work database schema.
#' This table will hold the exposure and outcome cohorts used in this
#' study.
#' @param oracleTempSchema Should be used in Oracle to specify a schema where the user has write
#' priviliges for storing temporary tables.
#' @param outputFolder Name of local folder to place results; make sure to use forward slashes
#' (/)
#' @param maxCores How many parallel cores should be used? If more cores are made available
#' this can speed up the analyses.
#'
#' @export
synthesizePositiveControls <- function(connectionDetails,
cdmDatabaseSchema,
cohortDatabaseSchema,
cohortTable = "cohort",
oracleTempSchema,
outputFolder,
maxCores = 1) {
# Set to TRUE if you don't want to use synthetic positive controls in your study:
skipSynthesis <- FALSE
if (!skipSynthesis) {
synthesisFolder <- file.path(outputFolder, "positiveControlSynthesis")
if (!file.exists(synthesisFolder))
dir.create(synthesisFolder)
synthesisSummaryFile <- file.path(outputFolder, "SynthesisSummary.csv")
if (!file.exists(synthesisSummaryFile)) {
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "SkeletonCompartiveEffectStudy")
negativeControls <- read.csv(pathToCsv)
exposureOutcomePairs <- data.frame(exposureId = negativeControls$targetId,
outcomeId = negativeControls$outcomeId)
exposureOutcomePairs <- unique(exposureOutcomePairs)
prior = Cyclops::createPrior("laplace", exclude = 0, useCrossValidation = TRUE)
control = Cyclops::createControl(cvType = "auto",
startingVariance = 0.01,
noiseLevel = "quiet",
cvRepetitions = 1,
threads = min(c(10, maxCores)))
covariateSettings <- FeatureExtraction::createCovariateSettings(useDemographicsAgeGroup = TRUE,
useDemographicsGender = TRUE,
useDemographicsIndexYear = TRUE,
useDemographicsIndexMonth = TRUE,
useConditionGroupEraLongTerm = TRUE,
useDrugGroupEraLongTerm = TRUE,
useProcedureOccurrenceLongTerm = TRUE,
useMeasurementLongTerm = TRUE,
useObservationLongTerm = TRUE,
useCharlsonIndex = TRUE,
useDcsi = TRUE,
useChads2Vasc = TRUE,
longTermStartDays = 365,
endDays = 0)
result <- MethodEvaluation::injectSignals(connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
oracleTempSchema = oracleTempSchema,
exposureDatabaseSchema = cohortDatabaseSchema,
exposureTable = cohortTable,
outcomeDatabaseSchema = cohortDatabaseSchema,
outcomeTable = cohortTable,
outputDatabaseSchema = cohortDatabaseSchema,
outputTable = cohortTable,
createOutputTable = FALSE,
outputIdOffset = 10000,
exposureOutcomePairs = exposureOutcomePairs,
firstExposureOnly = TRUE,
firstOutcomeOnly = TRUE,
removePeopleWithPriorOutcomes = TRUE,
modelType = "survival",
washoutPeriod = 183,
riskWindowStart = 0,
riskWindowEnd = 30,
addExposureDaysToEnd = TRUE,
effectSizes = c(1.5, 2, 4),
precision = 0.01,
prior = prior,
control = control,
maxSubjectsForModel = 250000,
minOutcomeCountForModel = 50,
minOutcomeCountForInjection = 25,
workFolder = synthesisFolder,
modelThreads = max(1, round(maxCores/8)),
generationThreads = min(6, maxCores),
covariateSettings = covariateSettings)
write.csv(result, synthesisSummaryFile, row.names = FALSE)
}
OhdsiRTools::logTrace("Merging positive with negative controls ")
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "SkeletonCompartiveEffectStudy")
negativeControls <- read.csv(pathToCsv)
synthesisSummary <- read.csv(synthesisSummaryFile)
synthesisSummary$targetId <- synthesisSummary$exposureId
synthesisSummary <- merge(synthesisSummary, negativeControls)
synthesisSummary <- synthesisSummary[synthesisSummary$trueEffectSize != 0, ]
synthesisSummary$outcomeName <- paste0(synthesisSummary$OutcomeName, ", RR=", synthesisSummary$targetEffectSize)
synthesisSummary$oldOutcomeId <- synthesisSummary$outcomeId
synthesisSummary$outcomeId <- synthesisSummary$newOutcomeId
}
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "SkeletonCompartiveEffectStudy")
negativeControls <- read.csv(pathToCsv)
negativeControls$targetEffectSize <- 1
negativeControls$trueEffectSize <- 1
negativeControls$trueEffectSizeFirstExposure <- 1
negativeControls$oldOutcomeId <- negativeControls$outcomeId
if (skipSynthesis) {
allControls <- negativeControls
} else {
allControls <- rbind(negativeControls, synthesisSummary[, names(negativeControls)])
}
write.csv(allControls, file.path(outputFolder, "AllControls.csv"), row.names = FALSE)
}
|
shinyUI(
dashboardPage(
dashboardHeader(title = "Interactive plotting"),
dashboardSidebar(
fileInput(inputId = "Ifile",
label = "Insert File",
accept = ".csv"),
checkboxInput("Iheader", "Header", TRUE),
###plot options
selectInput(inputId = "Iplottype",
label = "Plot type",
choices = c("Barplot", "Scatterplot", "Histogram")),
# uiOutput("Oxval1"),
# uiOutput("Oyval1"),
uiOutput("Oplotui"),
fluidRow(column(width = 5, offset = 3, actionButton("Igoplt", "Plot")))
),
dashboardBody(
ggiraphOutput("Oplot")
)
)
)
|
/shiny/drill_down/app2/ui.R
|
no_license
|
askhari139/R_Learning
|
R
| false
| false
| 789
|
r
|
shinyUI(
dashboardPage(
dashboardHeader(title = "Interactive plotting"),
dashboardSidebar(
fileInput(inputId = "Ifile",
label = "Insert File",
accept = ".csv"),
checkboxInput("Iheader", "Header", TRUE),
###plot options
selectInput(inputId = "Iplottype",
label = "Plot type",
choices = c("Barplot", "Scatterplot", "Histogram")),
# uiOutput("Oxval1"),
# uiOutput("Oyval1"),
uiOutput("Oplotui"),
fluidRow(column(width = 5, offset = 3, actionButton("Igoplt", "Plot")))
),
dashboardBody(
ggiraphOutput("Oplot")
)
)
)
|
library(data.table)
library(dplyr)
library(plyr)
###MERGE the training and the test sets to create one data set.
# imports list of all features that were collected per subject
features<-fread("./Dataset/features.txt",
sep = " ",
col.names = c("featID","feat")
)
#import list of activities
activity <- fread("./Dataset/activity_labels.txt",
col.names = c("ActID", "Activity")
)
###TEST DATA--------------------------------------------------------------
# imports list of subject IDs used in the test dataset
test_subjects<-fread("./Dataset/test/subject_test.txt",
sep = " ", col.names = c("SID")
)
#Imports test activity associated with test data
test_activity <- fread("./Dataset/test/Y_test.txt",sep = " ", col.names = c("ActID"))
test_activity <- left_join(test_activity, activity, by = "ActID")
# Imports test data
test_data <- fread("./Dataset/test/X_test.txt", sep = " ")
#Add varnames to test data - this code creates dataframe column vector of feature
#names, finds the duplicate featurenames, makes each unique, and then adds the
#column names to the test dataset
colnames(test_data) <- make.unique(features[[2]])
#selects columns with the string "mean" or "std" in the column name
test_data <- test_data %>% select(matches('mean|std'))
# combine test subjects and activity with the data
test_data<- cbind(test_subjects, test_activity, test_data)
#remove ActID variable as not needed
test_data <- subset(test_data, select= -c(ActID))
###TRAIN DATA--------------------------------------------------------------
# imports list of subject IDs used in the train dataset
train_subjects<-fread("./Dataset/train/subject_train.txt", sep = " ", col.names = c("SID"))
#Imports train activity associated with train data
train_activity <- fread("./Dataset/train/Y_train.txt",sep = " ", col.names = c("ActID"))
train_activity <- left_join(train_activity, activity, by = "ActID")
# Imports test data
train_data <- fread("./Dataset/train/X_train.txt", sep = " ")
#Add varnames to test data - this code creates dataframe column vector of feature
#names, finds the duplicate featurenames, makes each unique, and then adds the
#column names to the train dataset
colnames(train_data) <- make.unique(features[[2]])
#selects columns with the string "mean" or "std" in the column name
train_data <- train_data %>% select(matches('mean|std'))
# combine test subjects and activity with the data
train_data<- cbind(train_subjects, train_activity, train_data)
#remove ActID variable as not needed
train_data <- subset(train_data, select= -c(ActID))
### Combine Test and Train data sets into one--------------------------------------------
mergedf <- rbind(test_data, train_data)
mergesummary<-mergedf %>% group_by(SID,Activity) %>% summarise_each(funs(mean))
# Write mergesummary data to file
write.table(mergesummary, file="mergesummary.txt", row.name=FALSE)
|
/run_analysis.R
|
no_license
|
Scott-Purvis/R_Cleaning_Project
|
R
| false
| false
| 3,000
|
r
|
library(data.table)
library(dplyr)
library(plyr)
###MERGE the training and the test sets to create one data set.
# imports list of all features that were collected per subject
features<-fread("./Dataset/features.txt",
sep = " ",
col.names = c("featID","feat")
)
#import list of activities
activity <- fread("./Dataset/activity_labels.txt",
col.names = c("ActID", "Activity")
)
###TEST DATA--------------------------------------------------------------
# imports list of subject IDs used in the test dataset
test_subjects<-fread("./Dataset/test/subject_test.txt",
sep = " ", col.names = c("SID")
)
#Imports test activity associated with test data
test_activity <- fread("./Dataset/test/Y_test.txt",sep = " ", col.names = c("ActID"))
test_activity <- left_join(test_activity, activity, by = "ActID")
# Imports test data
test_data <- fread("./Dataset/test/X_test.txt", sep = " ")
#Add varnames to test data - this code creates dataframe column vector of feature
#names, finds the duplicate featurenames, makes each unique, and then adds the
#column names to the test dataset
colnames(test_data) <- make.unique(features[[2]])
#selects columns with the string "mean" or "std" in the column name
test_data <- test_data %>% select(matches('mean|std'))
# combine test subjects and activity with the data
test_data<- cbind(test_subjects, test_activity, test_data)
#remove ActID variable as not needed
test_data <- subset(test_data, select= -c(ActID))
###TRAIN DATA--------------------------------------------------------------
# imports list of subject IDs used in the train dataset
train_subjects<-fread("./Dataset/train/subject_train.txt", sep = " ", col.names = c("SID"))
#Imports train activity associated with train data
train_activity <- fread("./Dataset/train/Y_train.txt",sep = " ", col.names = c("ActID"))
train_activity <- left_join(train_activity, activity, by = "ActID")
# Imports test data
train_data <- fread("./Dataset/train/X_train.txt", sep = " ")
#Add varnames to test data - this code creates dataframe column vector of feature
#names, finds the duplicate featurenames, makes each unique, and then adds the
#column names to the train dataset
colnames(train_data) <- make.unique(features[[2]])
#selects columns with the string "mean" or "std" in the column name
train_data <- train_data %>% select(matches('mean|std'))
# combine test subjects and activity with the data
train_data<- cbind(train_subjects, train_activity, train_data)
#remove ActID variable as not needed
train_data <- subset(train_data, select= -c(ActID))
### Combine Test and Train data sets into one--------------------------------------------
mergedf <- rbind(test_data, train_data)
mergesummary<-mergedf %>% group_by(SID,Activity) %>% summarise_each(funs(mean))
# Write mergesummary data to file
write.table(mergesummary, file="mergesummary.txt", row.name=FALSE)
|
\docType{package}
\name{dxR-package}
\alias{dxR}
\alias{dxR-package}
\title{DNAnexus R Client Library}
\description{
dxR is an R extension containing API wrapper functions
for interacting with the new DNAnexus platform.
}
\details{
\tabular{ll}{ Package: \tab dxR\cr Type: \tab Package\cr
Version: \tab 0.167.0\cr License: \tab Apache License (==
2.0)\cr }
}
\author{
Katherine Lai
Maintainer: Katherine Lai <klai@dnanexus.com>
}
|
/src/R/dxR/man/dxR-package.Rd
|
permissive
|
vhuarui/dx-toolkit
|
R
| false
| false
| 446
|
rd
|
\docType{package}
\name{dxR-package}
\alias{dxR}
\alias{dxR-package}
\title{DNAnexus R Client Library}
\description{
dxR is an R extension containing API wrapper functions
for interacting with the new DNAnexus platform.
}
\details{
\tabular{ll}{ Package: \tab dxR\cr Type: \tab Package\cr
Version: \tab 0.167.0\cr License: \tab Apache License (==
2.0)\cr }
}
\author{
Katherine Lai
Maintainer: Katherine Lai <klai@dnanexus.com>
}
|
# Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test Pet")
model.instance <- Pet$new()
test_that("id", {
# tests for the property `id` (integer)
# uncomment below to test the property
#expect_equal(model.instance$`id`, "EXPECTED_RESULT")
})
test_that("category", {
# tests for the property `category` (Category)
# uncomment below to test the property
#expect_equal(model.instance$`category`, "EXPECTED_RESULT")
})
test_that("name", {
# tests for the property `name` (character)
# uncomment below to test the property
#expect_equal(model.instance$`name`, "EXPECTED_RESULT")
})
test_that("photoUrls", {
# tests for the property `photoUrls` (array[character])
# uncomment below to test the property
#expect_equal(model.instance$`photoUrls`, "EXPECTED_RESULT")
})
test_that("tags", {
# tests for the property `tags` (array[Tag])
# uncomment below to test the property
#expect_equal(model.instance$`tags`, "EXPECTED_RESULT")
})
test_that("status", {
# tests for the property `status` (character)
# pet status in the store
# uncomment below to test the property
#expect_equal(model.instance$`status`, "EXPECTED_RESULT")
})
|
/tests/testthat/test_pet.R
|
no_license
|
wing328/petstore-r-client
|
R
| false
| false
| 1,250
|
r
|
# Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test Pet")
model.instance <- Pet$new()
test_that("id", {
# tests for the property `id` (integer)
# uncomment below to test the property
#expect_equal(model.instance$`id`, "EXPECTED_RESULT")
})
test_that("category", {
# tests for the property `category` (Category)
# uncomment below to test the property
#expect_equal(model.instance$`category`, "EXPECTED_RESULT")
})
test_that("name", {
# tests for the property `name` (character)
# uncomment below to test the property
#expect_equal(model.instance$`name`, "EXPECTED_RESULT")
})
test_that("photoUrls", {
# tests for the property `photoUrls` (array[character])
# uncomment below to test the property
#expect_equal(model.instance$`photoUrls`, "EXPECTED_RESULT")
})
test_that("tags", {
# tests for the property `tags` (array[Tag])
# uncomment below to test the property
#expect_equal(model.instance$`tags`, "EXPECTED_RESULT")
})
test_that("status", {
# tests for the property `status` (character)
# pet status in the store
# uncomment below to test the property
#expect_equal(model.instance$`status`, "EXPECTED_RESULT")
})
|
# This file contains all the code needed to parse and print various sections of your CV
# from data. Feel free to tweak it as you desire!
#' Create a CV_Printer object.
#'
#' @param data_location Path of the spreadsheets holding all your data. This can
#' be either a URL to a google sheet with multiple sheets containing the four
#' data types or a path to a folder containing four `.csv`s with the neccesary
#' data.
#' @param source_location Where is the code to build your CV hosted?
#' @param pdf_mode Is the output being rendered into a pdf? Aka do links need to
#' be stripped?
#' @param sheet_is_publicly_readable If you're using google sheets for data, is
#' the sheet publicly available? (Makes authorization easier.)
#' @param cache_data If set to true when data is read in it will be saved to an
#' `.rds` object so it doesn't need to be repeatedly pulled from google
#' sheets. This is also nice when you have non-public sheets that don't play
#' nice with authentication during the knit process.
#' @return A new `CV_Printer` object.
create_CV_object <- function(data_location,
pdf_mode = FALSE,
sheet_is_publicly_readable = TRUE,
cache_data = TRUE) {
cv <- list(
pdf_mode = pdf_mode,
links = c(),
cache_data = cache_data
) %>%
load_data(data_location, sheet_is_publicly_readable)
extract_year <- function(dates){
date_year <- stringr::str_extract(dates, "(20|19)[0-9]{2}")
date_year[is.na(date_year)] <- lubridate::year(lubridate::ymd(Sys.Date())) + 10
date_year
}
parse_dates <- function(dates){
date_month <- stringr::str_extract(dates, "(\\w+|\\d+)(?=(\\s|\\/|-)(20|19)[0-9]{2})")
date_month[is.na(date_month)] <- "1"
paste("1", date_month, extract_year(dates), sep = "-") %>%
lubridate::dmy()
}
# Clean up entries dataframe to format we need it for printing
cv$entries_data %<>%
tidyr::unite(
tidyr::starts_with('description'),
col = "description_bullets",
sep = "\n- ",
na.rm = TRUE
) %>%
dplyr::mutate(
description_bullets = ifelse(description_bullets != "", paste0("- ", description_bullets), ""),
start = ifelse(start == "NULL", NA, start),
end = ifelse(end == "NULL", NA, end),
start_year = extract_year(start),
end_year = extract_year(end),
no_start = is.na(start),
has_start = !no_start,
no_end = is.na(end),
has_end = !no_end,
timeline = dplyr::case_when(
no_start & no_end ~ "N/A",
no_start & has_end ~ as.character(end),
has_start & no_end ~ paste("Current", "-", start),
TRUE ~ paste(end, "-", start)
)
) %>%
dplyr::arrange(desc(parse_dates(end))) %>%
dplyr::mutate_all(~ ifelse(is.na(.), 'N/A', .))
cv
}
# Load data for CV
load_data <- function(cv, data_location, sheet_is_publicly_readable){
cache_loc <- "ddcv_cache.rds"
has_cached_data <- fs::file_exists(cache_loc)
is_google_sheets_location <- stringr::str_detect(data_location, "docs\\.google\\.com")
if(has_cached_data & cv$cache_data){
cv <- c(cv, readr::read_rds(cache_loc))
} else if(is_google_sheets_location){
if(sheet_is_publicly_readable){
# This tells google sheets to not try and authenticate. Note that this will only
# work if your sheet has sharing set to "anyone with link can view"
googlesheets4::gs4_deauth()
} else {
# My info is in a public sheet so there's no need to do authentication but if you want
# to use a private sheet, then this is the way you need to do it.
# designate project-specific cache so we can render Rmd without problems
options(gargle_oauth_cache = ".secrets")
}
read_gsheet <- function(sheet_id){
googlesheets4::read_sheet(data_location, sheet = sheet_id, skip = 1, col_types = "c")
}
cv$entries_data <- read_gsheet(sheet_id = "entries")
cv$skills <- read_gsheet(sheet_id = "language_skills")
cv$text_blocks <- read_gsheet(sheet_id = "text_blocks")
cv$contact_info <- read_gsheet(sheet_id = "contact_info")
} else {
# Want to go old-school with csvs?
cv$entries_data <- readr::read_csv(paste0(data_location, "entries.csv"), skip = 1)
cv$skills <- readr::read_csv(paste0(data_location, "language_skills.csv"), skip = 1)
cv$text_blocks <- readr::read_csv(paste0(data_location, "text_blocks.csv"), skip = 1)
cv$contact_info <- readr::read_csv(paste0(data_location, "contact_info.csv"), skip = 1)
}
if(cv$cache_data & !has_cached_data){
# Make sure we only cache the data and not settings etc.
readr::write_rds(
list(
entries_data = cv$entries_data,
skills = cv$skills,
text_blocks = cv$text_blocks,
contact_info = cv$contact_info
),
cache_loc
)
cat(glue::glue("CV data is cached at {cache_loc}.\n"))
}
invisible(cv)
}
# Remove links from a text block and add to internal list
sanitize_links <- function(cv, text){
if(cv$pdf_mode){
link_titles <- stringr::str_extract_all(text, '(?<=\\[).+?(?=\\]\\()')[[1]]
link_destinations <- stringr::str_extract_all(text, '(?<=\\]\\().+?(?=\\))')[[1]]
n_links <- length(cv$links)
n_new_links <- length(link_titles)
if(n_new_links > 0){
# add links to links array
cv$links <- c(cv$links, link_destinations)
# Build map of link destination to superscript
link_superscript_mappings <- purrr::set_names(
paste0("<sup>", (1:n_new_links) + n_links, "</sup>"),
paste0("(", link_destinations, ")")
)
# Replace the link destination and remove square brackets for title
text <- text %>%
stringr::str_replace_all(stringr::fixed(link_superscript_mappings)) %>%
stringr::str_replace_all('\\[(.+?)\\](?=<sup>)', "\\1")
}
}
list(cv = cv, text = text)
}
#' @description Take a position data frame and the section id desired and prints the section to markdown.
#' @param section_id ID of the entries section to be printed as encoded by the `section` column of the `entries` table
print_section <- function(cv, section_id, glue_template = "default"){
if(glue_template == "default"){
glue_template <- "
### {title}
{loc}
{institution}
{timeline}
{description_bullets}
\n\n\n"
}
section_data <- dplyr::filter(cv$entries_data, section == section_id)
# Take entire entries data frame and removes the links in descending order
# so links for the same position are right next to each other in number.
for(i in 1:nrow(section_data)){
for(col in c('title', 'description_bullets')){
strip_res <- sanitize_links(cv, section_data[i, col])
section_data[i, col] <- strip_res$text
cv <- strip_res$cv
}
}
print(glue::glue_data(section_data, glue_template))
invisible(strip_res$cv)
}
#' @description Prints out text block identified by a given label.
#' @param label ID of the text block to print as encoded in `label` column of `text_blocks` table.
print_text_block <- function(cv, label){
text_block <- dplyr::filter(cv$text_blocks, loc == label) %>%
dplyr::pull(text)
strip_res <- sanitize_links(cv, text_block)
cat(strip_res$text)
invisible(strip_res$cv)
}
#' @description Construct a bar chart of skills
#' @param out_of The relative maximum for skills. Used to set what a fully filled in skill bar is.
print_skill_bars <- function(cv, out_of = 5, bar_color = "#969696", bar_background = "#d9d9d9", glue_template = "default"){
if(glue_template == "default"){
glue_template <- "
<div
class = 'skill-bar'
style = \"background:linear-gradient(to right,
{bar_color} {width_percent}%,
{bar_background} {width_percent}% 100%);\"
>{skill}</div>"
}
cv$skills %>%
dplyr::mutate(width_percent = round(100*as.numeric(level)/out_of)) %>%
glue::glue_data(glue_template) %>%
print()
invisible(cv)
}
#' @description List of all links in document labeled by their superscript integer.
print_links <- function(cv) {
n_links <- length(cv$links)
if (n_links > 0) {
cat("
Links {data-icon=link}
--------------------------------------------------------------------------------
<br>
")
purrr::walk2(cv$links, 1:n_links, function(link, index) {
print(glue::glue('{index}. {link}'))
})
}
invisible(cv)
}
#' @description Contact information section with icons
print_contact_info <- function(cv){
glue::glue_data(
cv$contact_info,
"- <i class='fa fa-{icon}'></i> {contact}"
) %>% print()
invisible(cv)
}
|
/CV_printing_functions.R
|
no_license
|
nuhorchak/resume
|
R
| false
| false
| 8,725
|
r
|
# This file contains all the code needed to parse and print various sections of your CV
# from data. Feel free to tweak it as you desire!
#' Create a CV_Printer object.
#'
#' @param data_location Path of the spreadsheets holding all your data. This can
#' be either a URL to a google sheet with multiple sheets containing the four
#' data types or a path to a folder containing four `.csv`s with the neccesary
#' data.
#' @param source_location Where is the code to build your CV hosted?
#' @param pdf_mode Is the output being rendered into a pdf? Aka do links need to
#' be stripped?
#' @param sheet_is_publicly_readable If you're using google sheets for data, is
#' the sheet publicly available? (Makes authorization easier.)
#' @param cache_data If set to true when data is read in it will be saved to an
#' `.rds` object so it doesn't need to be repeatedly pulled from google
#' sheets. This is also nice when you have non-public sheets that don't play
#' nice with authentication during the knit process.
#' @return A new `CV_Printer` object.
create_CV_object <- function(data_location,
pdf_mode = FALSE,
sheet_is_publicly_readable = TRUE,
cache_data = TRUE) {
cv <- list(
pdf_mode = pdf_mode,
links = c(),
cache_data = cache_data
) %>%
load_data(data_location, sheet_is_publicly_readable)
extract_year <- function(dates){
date_year <- stringr::str_extract(dates, "(20|19)[0-9]{2}")
date_year[is.na(date_year)] <- lubridate::year(lubridate::ymd(Sys.Date())) + 10
date_year
}
parse_dates <- function(dates){
date_month <- stringr::str_extract(dates, "(\\w+|\\d+)(?=(\\s|\\/|-)(20|19)[0-9]{2})")
date_month[is.na(date_month)] <- "1"
paste("1", date_month, extract_year(dates), sep = "-") %>%
lubridate::dmy()
}
# Clean up entries dataframe to format we need it for printing
cv$entries_data %<>%
tidyr::unite(
tidyr::starts_with('description'),
col = "description_bullets",
sep = "\n- ",
na.rm = TRUE
) %>%
dplyr::mutate(
description_bullets = ifelse(description_bullets != "", paste0("- ", description_bullets), ""),
start = ifelse(start == "NULL", NA, start),
end = ifelse(end == "NULL", NA, end),
start_year = extract_year(start),
end_year = extract_year(end),
no_start = is.na(start),
has_start = !no_start,
no_end = is.na(end),
has_end = !no_end,
timeline = dplyr::case_when(
no_start & no_end ~ "N/A",
no_start & has_end ~ as.character(end),
has_start & no_end ~ paste("Current", "-", start),
TRUE ~ paste(end, "-", start)
)
) %>%
dplyr::arrange(desc(parse_dates(end))) %>%
dplyr::mutate_all(~ ifelse(is.na(.), 'N/A', .))
cv
}
# Load data for CV
load_data <- function(cv, data_location, sheet_is_publicly_readable){
cache_loc <- "ddcv_cache.rds"
has_cached_data <- fs::file_exists(cache_loc)
is_google_sheets_location <- stringr::str_detect(data_location, "docs\\.google\\.com")
if(has_cached_data & cv$cache_data){
cv <- c(cv, readr::read_rds(cache_loc))
} else if(is_google_sheets_location){
if(sheet_is_publicly_readable){
# This tells google sheets to not try and authenticate. Note that this will only
# work if your sheet has sharing set to "anyone with link can view"
googlesheets4::gs4_deauth()
} else {
# My info is in a public sheet so there's no need to do authentication but if you want
# to use a private sheet, then this is the way you need to do it.
# designate project-specific cache so we can render Rmd without problems
options(gargle_oauth_cache = ".secrets")
}
read_gsheet <- function(sheet_id){
googlesheets4::read_sheet(data_location, sheet = sheet_id, skip = 1, col_types = "c")
}
cv$entries_data <- read_gsheet(sheet_id = "entries")
cv$skills <- read_gsheet(sheet_id = "language_skills")
cv$text_blocks <- read_gsheet(sheet_id = "text_blocks")
cv$contact_info <- read_gsheet(sheet_id = "contact_info")
} else {
# Want to go old-school with csvs?
cv$entries_data <- readr::read_csv(paste0(data_location, "entries.csv"), skip = 1)
cv$skills <- readr::read_csv(paste0(data_location, "language_skills.csv"), skip = 1)
cv$text_blocks <- readr::read_csv(paste0(data_location, "text_blocks.csv"), skip = 1)
cv$contact_info <- readr::read_csv(paste0(data_location, "contact_info.csv"), skip = 1)
}
if(cv$cache_data & !has_cached_data){
# Make sure we only cache the data and not settings etc.
readr::write_rds(
list(
entries_data = cv$entries_data,
skills = cv$skills,
text_blocks = cv$text_blocks,
contact_info = cv$contact_info
),
cache_loc
)
cat(glue::glue("CV data is cached at {cache_loc}.\n"))
}
invisible(cv)
}
# Remove links from a text block and add to internal list
sanitize_links <- function(cv, text){
if(cv$pdf_mode){
link_titles <- stringr::str_extract_all(text, '(?<=\\[).+?(?=\\]\\()')[[1]]
link_destinations <- stringr::str_extract_all(text, '(?<=\\]\\().+?(?=\\))')[[1]]
n_links <- length(cv$links)
n_new_links <- length(link_titles)
if(n_new_links > 0){
# add links to links array
cv$links <- c(cv$links, link_destinations)
# Build map of link destination to superscript
link_superscript_mappings <- purrr::set_names(
paste0("<sup>", (1:n_new_links) + n_links, "</sup>"),
paste0("(", link_destinations, ")")
)
# Replace the link destination and remove square brackets for title
text <- text %>%
stringr::str_replace_all(stringr::fixed(link_superscript_mappings)) %>%
stringr::str_replace_all('\\[(.+?)\\](?=<sup>)', "\\1")
}
}
list(cv = cv, text = text)
}
#' @description Take a position data frame and the section id desired and prints the section to markdown.
#' @param section_id ID of the entries section to be printed as encoded by the `section` column of the `entries` table
print_section <- function(cv, section_id, glue_template = "default"){
if(glue_template == "default"){
glue_template <- "
### {title}
{loc}
{institution}
{timeline}
{description_bullets}
\n\n\n"
}
section_data <- dplyr::filter(cv$entries_data, section == section_id)
# Take entire entries data frame and removes the links in descending order
# so links for the same position are right next to each other in number.
for(i in 1:nrow(section_data)){
for(col in c('title', 'description_bullets')){
strip_res <- sanitize_links(cv, section_data[i, col])
section_data[i, col] <- strip_res$text
cv <- strip_res$cv
}
}
print(glue::glue_data(section_data, glue_template))
invisible(strip_res$cv)
}
#' @description Prints out text block identified by a given label.
#' @param label ID of the text block to print as encoded in `label` column of `text_blocks` table.
print_text_block <- function(cv, label){
text_block <- dplyr::filter(cv$text_blocks, loc == label) %>%
dplyr::pull(text)
strip_res <- sanitize_links(cv, text_block)
cat(strip_res$text)
invisible(strip_res$cv)
}
#' @description Construct a bar chart of skills
#' @param out_of The relative maximum for skills. Used to set what a fully filled in skill bar is.
print_skill_bars <- function(cv, out_of = 5, bar_color = "#969696", bar_background = "#d9d9d9", glue_template = "default"){
if(glue_template == "default"){
glue_template <- "
<div
class = 'skill-bar'
style = \"background:linear-gradient(to right,
{bar_color} {width_percent}%,
{bar_background} {width_percent}% 100%);\"
>{skill}</div>"
}
cv$skills %>%
dplyr::mutate(width_percent = round(100*as.numeric(level)/out_of)) %>%
glue::glue_data(glue_template) %>%
print()
invisible(cv)
}
#' @description List of all links in document labeled by their superscript integer.
print_links <- function(cv) {
n_links <- length(cv$links)
if (n_links > 0) {
cat("
Links {data-icon=link}
--------------------------------------------------------------------------------
<br>
")
purrr::walk2(cv$links, 1:n_links, function(link, index) {
print(glue::glue('{index}. {link}'))
})
}
invisible(cv)
}
#' @description Contact information section with icons
print_contact_info <- function(cv){
glue::glue_data(
cv$contact_info,
"- <i class='fa fa-{icon}'></i> {contact}"
) %>% print()
invisible(cv)
}
|
library(leaflet)
library(htmltools)
library(shiny)
shinyServer(function(input,output){
output$mymap<-renderLeaflet({
birds<-read.csv("BirdTestData.csv", header = TRUE)
birds$COMMON.NAME<-as.character(birds$COMMON.NAME)
birds$LOCALITY<-as.character(birds$LOCALITY)
birds$OBSERVATION.DATE<-as.Date(birds$OBSERVATION.DATE,"%m/%d/%Y")
bird_1<-subset(birds,COMMON.NAME =="American Kestrel" & OBSERVATION.DATE > "2015-01-01" & OBSERVATION.DATE < "2015-01-21")
bird_map<-leaflet(bird_1) %>% addTiles()%>% addCircleMarkers(~bird_1$LONGITUDE, ~bird_1$LATITUDE,weight=1,
radius = ~2, popup = ~htmlEscape(bird_1$LOCALITY))
bird_map
}
)
})
|
/server.R
|
no_license
|
cincysam6/eBird-Data-Explorer
|
R
| false
| false
| 737
|
r
|
library(leaflet)
library(htmltools)
library(shiny)
shinyServer(function(input,output){
output$mymap<-renderLeaflet({
birds<-read.csv("BirdTestData.csv", header = TRUE)
birds$COMMON.NAME<-as.character(birds$COMMON.NAME)
birds$LOCALITY<-as.character(birds$LOCALITY)
birds$OBSERVATION.DATE<-as.Date(birds$OBSERVATION.DATE,"%m/%d/%Y")
bird_1<-subset(birds,COMMON.NAME =="American Kestrel" & OBSERVATION.DATE > "2015-01-01" & OBSERVATION.DATE < "2015-01-21")
bird_map<-leaflet(bird_1) %>% addTiles()%>% addCircleMarkers(~bird_1$LONGITUDE, ~bird_1$LATITUDE,weight=1,
radius = ~2, popup = ~htmlEscape(bird_1$LOCALITY))
bird_map
}
)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PsFunctions.R
\name{stratifyByPs}
\alias{stratifyByPs}
\title{Stratify persons by propensity score}
\usage{
stratifyByPs(data, numberOfStrata = 5, stratificationColumns = c())
}
\arguments{
\item{data}{A data frame with the three columns described below}
\item{numberOfStrata}{How many strata? The boundaries of the strata are automatically
defined to contain equal numbers of treated persons.}
\item{stratificationColumns}{Names of one or more columns in the \code{data} data.frame on which
subjects should also be stratified in addition to stratification on
propensity score.}
}
\value{
Returns a date frame with the same columns as the input data plus one extra column: stratumId.
}
\description{
\code{stratifyByPs} uses the provided propensity scores to stratify persons. Additional
stratification variables for stratifications can also be used.
}
\details{
The data frame should have the following three columns: \tabular{lll}{ \verb{rowId} \tab(integer)
\tab A unique identifier for each row (e.g. the person ID) \cr \verb{treatment} \tab(integer) \tab
Column indicating whether the person is in the treated (1) or comparator\cr \tab \tab (0) group \cr
\verb{propensityScore} \tab(real) \tab Propensity score \cr }
}
\examples{
rowId <- 1:200
treatment <- rep(0:1, each = 100)
propensityScore <- c(runif(100, min = 0, max = 1), runif(100, min = 0, max = 1))
data <- data.frame(rowId = rowId, treatment = treatment, propensityScore = propensityScore)
result <- stratifyByPs(data, 5)
}
|
/man/stratifyByPs.Rd
|
permissive
|
Mbaroudi/CohortMethod
|
R
| false
| true
| 1,573
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PsFunctions.R
\name{stratifyByPs}
\alias{stratifyByPs}
\title{Stratify persons by propensity score}
\usage{
stratifyByPs(data, numberOfStrata = 5, stratificationColumns = c())
}
\arguments{
\item{data}{A data frame with the three columns described below}
\item{numberOfStrata}{How many strata? The boundaries of the strata are automatically
defined to contain equal numbers of treated persons.}
\item{stratificationColumns}{Names of one or more columns in the \code{data} data.frame on which
subjects should also be stratified in addition to stratification on
propensity score.}
}
\value{
Returns a date frame with the same columns as the input data plus one extra column: stratumId.
}
\description{
\code{stratifyByPs} uses the provided propensity scores to stratify persons. Additional
stratification variables for stratifications can also be used.
}
\details{
The data frame should have the following three columns: \tabular{lll}{ \verb{rowId} \tab(integer)
\tab A unique identifier for each row (e.g. the person ID) \cr \verb{treatment} \tab(integer) \tab
Column indicating whether the person is in the treated (1) or comparator\cr \tab \tab (0) group \cr
\verb{propensityScore} \tab(real) \tab Propensity score \cr }
}
\examples{
rowId <- 1:200
treatment <- rep(0:1, each = 100)
propensityScore <- c(runif(100, min = 0, max = 1), runif(100, min = 0, max = 1))
data <- data.frame(rowId = rowId, treatment = treatment, propensityScore = propensityScore)
result <- stratifyByPs(data, 5)
}
|
testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = -1.68533735424651e+308, SL95 = 2.1224816047267e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615828464-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 487
|
r
|
testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = -1.68533735424651e+308, SL95 = 2.1224816047267e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
## -- Pre Processing --
m_no_transform <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "none"))
m_center_transform <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "center"))
m_scale_transform <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "scale"))
m_standardize_transform <- function(m) m %>% m_center_transform() %>% m_scale_transform()
m_range_transform <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "range"))
m_boxcox_transform <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "BoxCox"))
m_yeojohnson_transform <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "YeoJohnson"))
m_pca_transform <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "pca"))
m_ica_transform <- function(m, n.comp) {
m %>%
m_set(pre_processors = c(m$pre_processors, "ica")) %>%
m_set(pre_processor_args = append(m$pre_processor_args, list(ica = list(n.comp = n.comp))))
}
m_spatial_sign_transform <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "spatialSign"))
m_knn_impute <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "knnImpute"))
m_median_impute <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "medianImpute"))
m_bag_impute <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "bagImpute"))
m_pre_process_param <- function(m) {
build <- function(training_data, seed, response = NULL, pre_processors = NULL, pre_processor_args = NULL, engineered_data = NULL) {
if (length(pre_processors) == 0) {
return (NULL)
}
pre_processors <- pre_processors %>% purrr::discard(. == "none")
if (length(pre_processors) == 0) {
return (NULL)
}
if (!is.null(pre_processor_args)) {
pre_processor_args <- pre_processor_args %>% flatten()
}
if (is.null(engineered_data)) {
engineered_data <- training_data
}
divide <- m_divide(engineered_data, response)
set.seed(seed)
pre_process_args <- append(list(x = divide$x, method = pre_processors), pre_processor_args)
safely(function() do.call(caret::preProcess, pre_process_args))()
}
m %>%
m_seed() %>%
m_split() %>%
m_engineer() %>%
m_default(pre_process_param = build)
}
m_pre_process <- function(data, response, pre_process_param) {
if (is.null(pre_process_param)) {
return(data)
}
divide <- m_divide(data, response)
x <- predict(pre_process_param, newdata = as.data.frame(divide$x)) %>% as_tibble()
y <- divide$y
bind_cols(x, y)
}
m_pre_processed_data <- function(m) {
build <- function(training_data, response = NULL, engineered_data = NULL, pre_process_param = NULL) {
if (is.null(pre_process_param)) {
return(NULL)
}
if (!is.null(pre_process_param$error)) {
return(NULL)
}
if (is.null(engineered_data)) {
engineered_data <- training_data
}
m_pre_process(engineered_data, response, pre_process_param$result)
}
m %>%
m_split() %>%
m_engineer() %>%
m_pre_process_param() %>%
m_default(pre_processed_data = build)
}
|
/Lib/m_pre_processing.R
|
no_license
|
matthew-coad/Climate
|
R
| false
| false
| 3,339
|
r
|
## -- Pre Processing --
m_no_transform <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "none"))
m_center_transform <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "center"))
m_scale_transform <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "scale"))
m_standardize_transform <- function(m) m %>% m_center_transform() %>% m_scale_transform()
m_range_transform <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "range"))
m_boxcox_transform <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "BoxCox"))
m_yeojohnson_transform <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "YeoJohnson"))
m_pca_transform <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "pca"))
m_ica_transform <- function(m, n.comp) {
m %>%
m_set(pre_processors = c(m$pre_processors, "ica")) %>%
m_set(pre_processor_args = append(m$pre_processor_args, list(ica = list(n.comp = n.comp))))
}
m_spatial_sign_transform <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "spatialSign"))
m_knn_impute <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "knnImpute"))
m_median_impute <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "medianImpute"))
m_bag_impute <- function(m) m %>% m_set(pre_processors = c(m$pre_processors, "bagImpute"))
m_pre_process_param <- function(m) {
build <- function(training_data, seed, response = NULL, pre_processors = NULL, pre_processor_args = NULL, engineered_data = NULL) {
if (length(pre_processors) == 0) {
return (NULL)
}
pre_processors <- pre_processors %>% purrr::discard(. == "none")
if (length(pre_processors) == 0) {
return (NULL)
}
if (!is.null(pre_processor_args)) {
pre_processor_args <- pre_processor_args %>% flatten()
}
if (is.null(engineered_data)) {
engineered_data <- training_data
}
divide <- m_divide(engineered_data, response)
set.seed(seed)
pre_process_args <- append(list(x = divide$x, method = pre_processors), pre_processor_args)
safely(function() do.call(caret::preProcess, pre_process_args))()
}
m %>%
m_seed() %>%
m_split() %>%
m_engineer() %>%
m_default(pre_process_param = build)
}
m_pre_process <- function(data, response, pre_process_param) {
if (is.null(pre_process_param)) {
return(data)
}
divide <- m_divide(data, response)
x <- predict(pre_process_param, newdata = as.data.frame(divide$x)) %>% as_tibble()
y <- divide$y
bind_cols(x, y)
}
m_pre_processed_data <- function(m) {
build <- function(training_data, response = NULL, engineered_data = NULL, pre_process_param = NULL) {
if (is.null(pre_process_param)) {
return(NULL)
}
if (!is.null(pre_process_param$error)) {
return(NULL)
}
if (is.null(engineered_data)) {
engineered_data <- training_data
}
m_pre_process(engineered_data, response, pre_process_param$result)
}
m %>%
m_split() %>%
m_engineer() %>%
m_pre_process_param() %>%
m_default(pre_processed_data = build)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DE.R
\name{format_de_results}
\alias{format_de_results}
\title{Minimal formatting of de results}
\usage{
format_de_results(dds, txi, contrast, keep_stats = TRUE, add_mean_dds = FALSE)
}
\arguments{
\item{dds}{The DESeqDataSet object returned by deseq2_analysis.}
\item{txi}{The txi object returned by the import_kallisto function.}
\item{contrast}{A vector describing the contrasts in the c("<comp_group>",
"<comp1>", "<comp2>") format.}
\item{keep_stats}{Keep baseMean, lfcSE and stat values in the results?
Default: \code{TRUE}.}
\item{add_mean_dds}{Add the mean DESeq normalization value for each group of
the comparison. Default: \code{FALSE}}
}
\value{
A data.frame with the id, ensembl_gene, symbol, entrez_id,
transcript_type, log2FoldChange, pvalue, padj columns.
}
\description{
This function will call DESeq2::results on the dds object and add the
informations from txi$anno using a full join. This means that the id that
were removed in the dds production step by filtering the rows with a small
number of counts will be re-included in the results. In this case, the
log2FoldChange, the pvalue and the padj columns will all be NA.
}
\examples{
txi <- get_demo_txi()
design <- get_demo_design()
dds <- deseq2_analysis(txi, design, ~ group)
de_res <- format_de_results(dds, txi, c("group", "A", "B"))
}
|
/man/format_de_results.Rd
|
no_license
|
CharlesJB/rnaseq
|
R
| false
| true
| 1,396
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DE.R
\name{format_de_results}
\alias{format_de_results}
\title{Minimal formatting of de results}
\usage{
format_de_results(dds, txi, contrast, keep_stats = TRUE, add_mean_dds = FALSE)
}
\arguments{
\item{dds}{The DESeqDataSet object returned by deseq2_analysis.}
\item{txi}{The txi object returned by the import_kallisto function.}
\item{contrast}{A vector describing the contrasts in the c("<comp_group>",
"<comp1>", "<comp2>") format.}
\item{keep_stats}{Keep baseMean, lfcSE and stat values in the results?
Default: \code{TRUE}.}
\item{add_mean_dds}{Add the mean DESeq normalization value for each group of
the comparison. Default: \code{FALSE}}
}
\value{
A data.frame with the id, ensembl_gene, symbol, entrez_id,
transcript_type, log2FoldChange, pvalue, padj columns.
}
\description{
This function will call DESeq2::results on the dds object and add the
informations from txi$anno using a full join. This means that the id that
were removed in the dds production step by filtering the rows with a small
number of counts will be re-included in the results. In this case, the
log2FoldChange, the pvalue and the padj columns will all be NA.
}
\examples{
txi <- get_demo_txi()
design <- get_demo_design()
dds <- deseq2_analysis(txi, design, ~ group)
de_res <- format_de_results(dds, txi, c("group", "A", "B"))
}
|
\name{frNN}
\alias{frNN}
\alias{frnn}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Find the Fixed Radius Nearest Neighbors}
\description{
This function uses a kd-tree to find the fixed radius nearest neighbors
(including distances) fast.
}
\usage{
frNN(x, eps, sort = TRUE, search = "kdtree", bucketSize = 10,
splitRule = "suggest", approx = 0)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{ a data matrix or a dist object.}
\item{eps}{ neighbors radius. }
\item{search}{ nearest neighbor search strategy (one of "kdtree"
or "linear", "dist").}
\item{sort}{ sort the neighbors by distance? }
\item{bucketSize}{ max size of the kd-tree leafs. }
\item{splitRule}{ rule to split the kd-tree. One of "STD",
"MIDPT", "FAIR", "SL_MIDPT", "SL_FAIR" or "SUGGEST"
(SL stands for sliding). "SUGGEST" uses ANNs best guess.}
\item{approx}{ use approximate nearest neighbors. All NN up to a distance of
a factor of 1+\code{approx} eps may be used. Some actual NN may be
omitted leading to spurious clusters and noise points.
However, the algorithm will enjoy a significant speedup. }
}
\details{
For details on the parameters see \code{\link{kNN}}.
Note: self-matches are not returned!
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
A list with the following components:
\item{dist }{a matrix with distances. }
\item{id }{a matrix with ids. }
\item{eps }{ eps used. }
%% ...
}
\seealso{
\code{\link{kNN}} for k nearest neighbor search.
}
\references{
David M. Mount and Sunil Arya (2010). ANN: A Library for Approximate Nearest Neighbor Searching, \url{https://www.cs.umd.edu/~mount/ANN/}.
}
\author{
Michael Hahsler
}
\examples{
data(iris)
# Find fixed radius nearest neighbors for each point
nn <- frNN(iris[,-5], eps=.5)
# Number of neighbors
hist(sapply(nn$id, length),
xlab = "k", main="Number of Neighbors",
sub = paste("Neighborhood size eps =", nn$eps))
# Explore neighbors of point i = 10
i <- 10
nn$id[[i]]
nn$dist[[i]]
plot(iris[,-5], col = ifelse(1:nrow(iris) \%in\% nn$id[[i]], "red", "black"))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{model}
|
/man/frNN.Rd
|
no_license
|
augustoqm/dbscan
|
R
| false
| false
| 2,292
|
rd
|
\name{frNN}
\alias{frNN}
\alias{frnn}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Find the Fixed Radius Nearest Neighbors}
\description{
This function uses a kd-tree to find the fixed radius nearest neighbors
(including distances) fast.
}
\usage{
frNN(x, eps, sort = TRUE, search = "kdtree", bucketSize = 10,
splitRule = "suggest", approx = 0)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{ a data matrix or a dist object.}
\item{eps}{ neighbors radius. }
\item{search}{ nearest neighbor search strategy (one of "kdtree"
or "linear", "dist").}
\item{sort}{ sort the neighbors by distance? }
\item{bucketSize}{ max size of the kd-tree leafs. }
\item{splitRule}{ rule to split the kd-tree. One of "STD",
"MIDPT", "FAIR", "SL_MIDPT", "SL_FAIR" or "SUGGEST"
(SL stands for sliding). "SUGGEST" uses ANNs best guess.}
\item{approx}{ use approximate nearest neighbors. All NN up to a distance of
a factor of 1+\code{approx} eps may be used. Some actual NN may be
omitted leading to spurious clusters and noise points.
However, the algorithm will enjoy a significant speedup. }
}
\details{
For details on the parameters see \code{\link{kNN}}.
Note: self-matches are not returned!
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
A list with the following components:
\item{dist }{a matrix with distances. }
\item{id }{a matrix with ids. }
\item{eps }{ eps used. }
%% ...
}
\seealso{
\code{\link{kNN}} for k nearest neighbor search.
}
\references{
David M. Mount and Sunil Arya (2010). ANN: A Library for Approximate Nearest Neighbor Searching, \url{https://www.cs.umd.edu/~mount/ANN/}.
}
\author{
Michael Hahsler
}
\examples{
data(iris)
# Find fixed radius nearest neighbors for each point
nn <- frNN(iris[,-5], eps=.5)
# Number of neighbors
hist(sapply(nn$id, length),
xlab = "k", main="Number of Neighbors",
sub = paste("Neighborhood size eps =", nn$eps))
# Explore neighbors of point i = 10
i <- 10
nn$id[[i]]
nn$dist[[i]]
plot(iris[,-5], col = ifelse(1:nrow(iris) \%in\% nn$id[[i]], "red", "black"))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{model}
|
\name{apclusterDemo}
\alias{apclusterDemo}
\title{Affinity Propagation Demo}
\description{
Runs affinity propagation demo for randomly generated data set
according to Frey and Dueck
}
\usage{
apclusterDemo(l=100, d=2, seed=NA, ...)
}
\arguments{
\item{l}{number of data points to be generated}
\item{d}{dimension of data to be created}
\item{seed}{for reproducibility, the seed of the random number
generator can be set to a fixed value; if \code{NA},
the seed remains unchanged}
\item{...}{all other arguments are passed on to
\code{\link{apcluster}}}
}
\details{\code{apclusterDemo} creates \code{l} \code{d}-dimensional
data points that are uniformly distributed in \eqn{[0,1]^d}. Affinity
propagation is executed for this data set with default parameters.
Alternative settings can be passed to \code{\link{apcluster}} with
additional arguments. After completion of affinity propagation,
the results are shown and the performance measures are plotted.
This function corresponds to the demo function in the original
Matlab code of Frey and Dueck. We warn the user, however, that
uniformly distributed data are not necessarily ideal for demonstrating
clustering, as there can never be real clusters in uniformly
distributed data - all clusters found must be random artefacts.
}
\value{
Upon successful completion, the function returns an invisible list
with three components. The first is the data set that has been
created, the second is the similarity matrix, and the third is an
\code{\linkS4class{APResult}} object with the clustering results (see
examples below).
}
\author{Ulrich Bodenhofer, Andreas Kothmeier & Johannes Palme
\email{apcluster@bioinf.jku.at}}
\references{\url{http://www.bioinf.jku.at/software/apcluster}
Frey, B. J. and Dueck, D. (2007) Clustering by passing messages
between data points. \emph{Science} \bold{315}, 972-976.
DOI: \href{http://dx.doi.org/10.1126/science.1136800}{10.1126/science.1136800}.
Bodenhofer, U., Kothmeier, A., and Hochreiter, S. (2011)
APCluster: an R package for affinity propagation clustering.
\emph{Bioinformatics} \bold{27}, 2463-2464.
DOI: \href{http://dx.doi.org/10.1093/bioinformatics/btr406}{10.1093/bioinformatics/btr406}.
}
\seealso{\code{\linkS4class{APResult}}, \code{\link{plot-methods}},
\code{\link{apcluster}}, \code{\link{apclusterL}}}
\examples{
## create random data set and run affinity propagation
apd <- apclusterDemo()
## plot clustering result along with data set
plot(apd[[3]], apd[[1]])
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{cluster}
|
/fuzzedpackages/apcluster/man/apclusterDemo.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| false
| 2,649
|
rd
|
\name{apclusterDemo}
\alias{apclusterDemo}
\title{Affinity Propagation Demo}
\description{
Runs affinity propagation demo for randomly generated data set
according to Frey and Dueck
}
\usage{
apclusterDemo(l=100, d=2, seed=NA, ...)
}
\arguments{
\item{l}{number of data points to be generated}
\item{d}{dimension of data to be created}
\item{seed}{for reproducibility, the seed of the random number
generator can be set to a fixed value; if \code{NA},
the seed remains unchanged}
\item{...}{all other arguments are passed on to
\code{\link{apcluster}}}
}
\details{\code{apclusterDemo} creates \code{l} \code{d}-dimensional
data points that are uniformly distributed in \eqn{[0,1]^d}. Affinity
propagation is executed for this data set with default parameters.
Alternative settings can be passed to \code{\link{apcluster}} with
additional arguments. After completion of affinity propagation,
the results are shown and the performance measures are plotted.
This function corresponds to the demo function in the original
Matlab code of Frey and Dueck. We warn the user, however, that
uniformly distributed data are not necessarily ideal for demonstrating
clustering, as there can never be real clusters in uniformly
distributed data - all clusters found must be random artefacts.
}
\value{
Upon successful completion, the function returns an invisible list
with three components. The first is the data set that has been
created, the second is the similarity matrix, and the third is an
\code{\linkS4class{APResult}} object with the clustering results (see
examples below).
}
\author{Ulrich Bodenhofer, Andreas Kothmeier & Johannes Palme
\email{apcluster@bioinf.jku.at}}
\references{\url{http://www.bioinf.jku.at/software/apcluster}
Frey, B. J. and Dueck, D. (2007) Clustering by passing messages
between data points. \emph{Science} \bold{315}, 972-976.
DOI: \href{http://dx.doi.org/10.1126/science.1136800}{10.1126/science.1136800}.
Bodenhofer, U., Kothmeier, A., and Hochreiter, S. (2011)
APCluster: an R package for affinity propagation clustering.
\emph{Bioinformatics} \bold{27}, 2463-2464.
DOI: \href{http://dx.doi.org/10.1093/bioinformatics/btr406}{10.1093/bioinformatics/btr406}.
}
\seealso{\code{\linkS4class{APResult}}, \code{\link{plot-methods}},
\code{\link{apcluster}}, \code{\link{apclusterL}}}
\examples{
## create random data set and run affinity propagation
apd <- apclusterDemo()
## plot clustering result along with data set
plot(apd[[3]], apd[[1]])
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{cluster}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{dm3_HindIII_10000}
\alias{dm3_HindIII_10000}
\title{Genomic features for dm3 genome and HindIII restriction enzyme at 10 Kbp}
\format{A data frame with 13047 rows and 5 variables:
\describe{
\item{chr:}{chromosome}
\item{map:}{mappability as computed by gem}
\item{res:}{restriction enzyme density per 1 Kbp computed by Biostrings::matchPattern()}
\item{cg:}{cg content as computed by bedtools}
\item{bin:}{genomic bin with the format chromosome:start_position}
\item{pos:}{start postion of the genomic bin}
}}
\usage{
dm3_HindIII_10000
}
\description{
A \code{data.frame} containing the mappability, restriction
enzyme density and CG proportion of the dm3 genome and
HindIII restriction enzyme in 10 Kbp bins
}
\keyword{datasets}
|
/man/dm3_HindIII_10000.Rd
|
no_license
|
4DGenome/hicfeatures
|
R
| false
| true
| 857
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{dm3_HindIII_10000}
\alias{dm3_HindIII_10000}
\title{Genomic features for dm3 genome and HindIII restriction enzyme at 10 Kbp}
\format{A data frame with 13047 rows and 5 variables:
\describe{
\item{chr:}{chromosome}
\item{map:}{mappability as computed by gem}
\item{res:}{restriction enzyme density per 1 Kbp computed by Biostrings::matchPattern()}
\item{cg:}{cg content as computed by bedtools}
\item{bin:}{genomic bin with the format chromosome:start_position}
\item{pos:}{start postion of the genomic bin}
}}
\usage{
dm3_HindIII_10000
}
\description{
A \code{data.frame} containing the mappability, restriction
enzyme density and CG proportion of the dm3 genome and
HindIII restriction enzyme in 10 Kbp bins
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plumber.R
\name{cr_plumber_pubsub}
\alias{cr_plumber_pubsub}
\title{Plumber - Pub/Sub parser}
\usage{
cr_plumber_pubsub(message = NULL, pass_f = function(x) x)
}
\arguments{
\item{message}{The pubsub message}
\item{pass_f}{An R function that will work with the data parsed out of the pubsub \code{message$data} field.}
}
\description{
A function to use in plumber scripts to accept Pub/Sub messages
}
\details{
This function is intended to be used within \link[plumber]{plumb} API scripts. It needs to be annotated with a \code{@post} URL route and a \code{@param message The pubsub message} as per the plumber documentation.
\code{pass_f} should be a function you create that accepts one argument, the data from the pubsub \code{message$data} field. It is unencoded for you.
The Docker container for the API will need to include \code{googleCloudRunner} installed in its R environment to run this function. This is available in the public \code{gcr.io/gcer-public/cloudrunner} image.
}
\examples{
\dontrun{
# within a plumber api.R script:
# example function echos back pubsub message
pub <- function(x){
paste("Echo:", x)
}
#' Recieve pub/sub message
#' @post /pubsub
#' @param message a pub/sub message
function(message=NULL){
googleCloudRunner::cr_plumber_pubsub(message, pub)
}
}
}
\seealso{
\href{https://cloud.google.com/run/docs/tutorials/pubsub}{Google Pub/Sub tutorial for Cloud Run}
Other Cloud Run functions:
\code{\link{cr_run_get}()},
\code{\link{cr_run_list}()},
\code{\link{cr_run}()}
}
\concept{Cloud Run functions}
|
/man/cr_plumber_pubsub.Rd
|
no_license
|
heoa/googleCloudRunner
|
R
| false
| true
| 1,630
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plumber.R
\name{cr_plumber_pubsub}
\alias{cr_plumber_pubsub}
\title{Plumber - Pub/Sub parser}
\usage{
cr_plumber_pubsub(message = NULL, pass_f = function(x) x)
}
\arguments{
\item{message}{The pubsub message}
\item{pass_f}{An R function that will work with the data parsed out of the pubsub \code{message$data} field.}
}
\description{
A function to use in plumber scripts to accept Pub/Sub messages
}
\details{
This function is intended to be used within \link[plumber]{plumb} API scripts. It needs to be annotated with a \code{@post} URL route and a \code{@param message The pubsub message} as per the plumber documentation.
\code{pass_f} should be a function you create that accepts one argument, the data from the pubsub \code{message$data} field. It is unencoded for you.
The Docker container for the API will need to include \code{googleCloudRunner} installed in its R environment to run this function. This is available in the public \code{gcr.io/gcer-public/cloudrunner} image.
}
\examples{
\dontrun{
# within a plumber api.R script:
# example function echos back pubsub message
pub <- function(x){
paste("Echo:", x)
}
#' Recieve pub/sub message
#' @post /pubsub
#' @param message a pub/sub message
function(message=NULL){
googleCloudRunner::cr_plumber_pubsub(message, pub)
}
}
}
\seealso{
\href{https://cloud.google.com/run/docs/tutorials/pubsub}{Google Pub/Sub tutorial for Cloud Run}
Other Cloud Run functions:
\code{\link{cr_run_get}()},
\code{\link{cr_run_list}()},
\code{\link{cr_run}()}
}
\concept{Cloud Run functions}
|
titanic <- read.csv("C:/Users/Sareh/Documents/GitHub/sds383d/data/titanic.csv")
titanic <- titanic[!is.na(titanic$Age), ] # remove missing age rows
x <- matrix(titanic$Age, nrow(titanic), 1)
y <- as.numeric(titanic$Survived == "Yes")
log.posterior.function <- function(beta, X, y) {
- 0.5 * t(beta) %*% beta + y %*% log(1/(1+exp(-X %*% beta))) + (1 - y) %*% log(1 - (1/(1+exp(-X %*% beta))))
}
map <- optim(0, function(beta) -log.posterior.function(beta, x, y), method = "Brent", lower = -1, upper = 1)
map$par
## the optimal value of beta ia -0.011
### Problem 3.3
Betarange = -1:1
|
/Section 3/exercise 3.2&3.3.R
|
no_license
|
Sarehkch/Statistical-Modeling-II
|
R
| false
| false
| 610
|
r
|
titanic <- read.csv("C:/Users/Sareh/Documents/GitHub/sds383d/data/titanic.csv")
titanic <- titanic[!is.na(titanic$Age), ] # remove missing age rows
x <- matrix(titanic$Age, nrow(titanic), 1)
y <- as.numeric(titanic$Survived == "Yes")
log.posterior.function <- function(beta, X, y) {
- 0.5 * t(beta) %*% beta + y %*% log(1/(1+exp(-X %*% beta))) + (1 - y) %*% log(1 - (1/(1+exp(-X %*% beta))))
}
map <- optim(0, function(beta) -log.posterior.function(beta, x, y), method = "Brent", lower = -1, upper = 1)
map$par
## the optimal value of beta ia -0.011
### Problem 3.3
Betarange = -1:1
|
# Functions for smoothing or dis-aggregating data over map regions.
# m is a map object
# z is a named vector
# res is resolution of sampling grid
# span is kernel parameter (larger = smoother)
# span = Inf is a special case which invokes cubic spline kernel.
# span is scaled by the map size, and is independent of res.
# result is a frame
smooth.map <- function(m, z, res = 50, span = 1/10, averages = FALSE,
type = c("smooth", "interp"), merge = FALSE) {
#if(is.data.frame(z)) z = as.named.vector(z)
if(averages) {
# turn averages into sums
z = z * area.map(m, names(z), sqmi=FALSE)
}
# sampling grid
xlim <- range(m$x, na.rm = TRUE)
ylim <- range(m$y, na.rm = TRUE)
midpoints <- function(start, end, n) {
inc <- (end - start)/n
seq(start + inc/2, end - inc/2, len = n)
}
# 2*res is an assumption about aspect ratio (usually true)
if(length(res) == 1) res = c(2*res, res)
xs <- midpoints(xlim[1], xlim[2], res[1])
ys <- midpoints(ylim[1], ylim[2], res[2])
x <- expand.grid(x = xs, y = ys)
if(FALSE) {
# add centroids to the sample points
xc = apply.polygon(m[c("x", "y")], centroid.polygon)
# convert m into a matrix
xc <- t(array(unlist(xc), c(2, length(xc))))
xc = data.frame(x = xc[, 1], y = xc[, 2])
x = rbind(x, xc)
}
radius = sqrt(diff(xlim)^2 + diff(ylim)^2)/2
lambda = 1/(span*radius)^2
#cat("lambda = ", lambda, "\n")
cell.area = diff(xs[1:2])*diff(ys[1:2])
r <- factor(map.where(m, x))
if(merge) {
# merge regions
# merge[r] is the parent of region r
# regions with merge[r] = NA are considered absent from the map
# (no sample points will be placed there)
# this can be slow on complex maps
merge <- names(z)
merge <- merge[match.map(m, merge)]
names(merge) <- m$names
levels(r) <- merge[levels(r)]
}
# remove points not on the map
i <- !is.na(r)
x <- x[i, ]
r <- r[i]
xo = x
if(TRUE) {
# kludge - drop regions with no samples
n = table(r)
bad = (n == 0)
newlevels = levels(r)
newlevels[bad] = NA
levels(r) = newlevels
}
# put z in canonical order, and drop values which are not in the map
z = z[levels(r)]
# remove regions not named in z, or where z is NA
bad = is.na(z)
z = z[!bad]
newlevels = levels(r)
newlevels[bad] = NA
levels(r) = newlevels
i <- !is.na(r)
x <- x[i, ]
r <- r[i]
# do all regions have sample points?
n = table(r)
if(any(n == 0)) stop(paste(paste(names(n)[n == 0], collapse = ", "), "have no sample points"))
type <- match.arg(type)
if(FALSE) {
# code for these is in 315/util.r
# most time is spent here
# w <- switch(type,
# mass = gp.smoother(x, x, r, lambda),
# smooth = kr.smoother(x, x, r, lambda))
# z = drop(z %*% w)
# cbind(x, z = z)
} else {
if(type == "smooth") {
z = kernel.smooth(x, z, xo, lambda, r)
} else {
z = gp.smooth(x, z, xo, lambda, r)
}
z = z/cell.area
cbind(xo, z = z)
}
}
gp.smooth <- function(x, z, xo, lambda, r) {
# predict a function measured at locations x to new locations xo
krr = kernel.region.region(x, r, lambda)
white.z = solve(krr, z)
kernel.smooth(x, white.z, xo, lambda, r, normalize = FALSE)
}
kernel.smooth <- function(x, z, xo, lambda, region = NULL, normalize = TRUE) {
# predict a function measured at locations x to new locations xo
if(!is.matrix(x)) dim(x) <- c(length(x), 1)
if(!is.matrix(xo)) dim(xo) <- c(length(xo), 1)
n = nrow(x)
if(is.null(region)) region = 1:n
if(length(region) < n) stop("region must have same length as x")
region = as.integer(region)
if(any(is.na(region))) stop("region has NAs")
if(max(region) > length(z)) stop("not enough measurements for the number of regions")
no = nrow(xo)
if(normalize) {
# divide by region sizes
z = as.double(z/as.numeric(table(region)))
}
.C(C_kernel_smooth,
as.integer(n), as.integer(ncol(x)),
as.double(t(x)), z, as.integer(region),
as.integer(no), as.double(t(xo)), zo = double(no),
as.double(lambda), as.integer(normalize))$zo
}
kernel.region.region <- function(x, region, lambda) {
if(!is.matrix(x)) dim(x) <- c(length(x), 1)
region = as.integer(region)
nr = max(region)
krr = .C(C_kernel_region_region,
as.integer(nrow(x)), as.integer(ncol(x)),
as.double(t(x)),
region, as.double(lambda), as.integer(nr), krr = double(nr*nr))$krr
dim(krr) = c(nr, nr)
krr
}
kernel.region.x <- function(x, region, z, lambda) {
if(!is.matrix(x)) dim(x) <- c(length(x), 1)
if(!is.matrix(z)) dim(z) <- c(length(z), 1)
region = as.integer(region)
nr = max(region)
no = nrow(z)
krx = .C(C_kernel_region_x,
as.integer(nrow(x)), as.integer(ncol(x)),
as.double(t(x)), region, as.integer(no), as.double(t(z)),
as.double(lambda), as.integer(nr), krx = double(nr*no))$krx
dim(krx) = c(nr, no)
krx
}
|
/R/smooth.r
|
no_license
|
meensrinivasan/maps
|
R
| false
| false
| 4,935
|
r
|
# Functions for smoothing or dis-aggregating data over map regions.
# m is a map object
# z is a named vector
# res is resolution of sampling grid
# span is kernel parameter (larger = smoother)
# span = Inf is a special case which invokes cubic spline kernel.
# span is scaled by the map size, and is independent of res.
# result is a frame
smooth.map <- function(m, z, res = 50, span = 1/10, averages = FALSE,
type = c("smooth", "interp"), merge = FALSE) {
#if(is.data.frame(z)) z = as.named.vector(z)
if(averages) {
# turn averages into sums
z = z * area.map(m, names(z), sqmi=FALSE)
}
# sampling grid
xlim <- range(m$x, na.rm = TRUE)
ylim <- range(m$y, na.rm = TRUE)
midpoints <- function(start, end, n) {
inc <- (end - start)/n
seq(start + inc/2, end - inc/2, len = n)
}
# 2*res is an assumption about aspect ratio (usually true)
if(length(res) == 1) res = c(2*res, res)
xs <- midpoints(xlim[1], xlim[2], res[1])
ys <- midpoints(ylim[1], ylim[2], res[2])
x <- expand.grid(x = xs, y = ys)
if(FALSE) {
# add centroids to the sample points
xc = apply.polygon(m[c("x", "y")], centroid.polygon)
# convert m into a matrix
xc <- t(array(unlist(xc), c(2, length(xc))))
xc = data.frame(x = xc[, 1], y = xc[, 2])
x = rbind(x, xc)
}
radius = sqrt(diff(xlim)^2 + diff(ylim)^2)/2
lambda = 1/(span*radius)^2
#cat("lambda = ", lambda, "\n")
cell.area = diff(xs[1:2])*diff(ys[1:2])
r <- factor(map.where(m, x))
if(merge) {
# merge regions
# merge[r] is the parent of region r
# regions with merge[r] = NA are considered absent from the map
# (no sample points will be placed there)
# this can be slow on complex maps
merge <- names(z)
merge <- merge[match.map(m, merge)]
names(merge) <- m$names
levels(r) <- merge[levels(r)]
}
# remove points not on the map
i <- !is.na(r)
x <- x[i, ]
r <- r[i]
xo = x
if(TRUE) {
# kludge - drop regions with no samples
n = table(r)
bad = (n == 0)
newlevels = levels(r)
newlevels[bad] = NA
levels(r) = newlevels
}
# put z in canonical order, and drop values which are not in the map
z = z[levels(r)]
# remove regions not named in z, or where z is NA
bad = is.na(z)
z = z[!bad]
newlevels = levels(r)
newlevels[bad] = NA
levels(r) = newlevels
i <- !is.na(r)
x <- x[i, ]
r <- r[i]
# do all regions have sample points?
n = table(r)
if(any(n == 0)) stop(paste(paste(names(n)[n == 0], collapse = ", "), "have no sample points"))
type <- match.arg(type)
if(FALSE) {
# code for these is in 315/util.r
# most time is spent here
# w <- switch(type,
# mass = gp.smoother(x, x, r, lambda),
# smooth = kr.smoother(x, x, r, lambda))
# z = drop(z %*% w)
# cbind(x, z = z)
} else {
if(type == "smooth") {
z = kernel.smooth(x, z, xo, lambda, r)
} else {
z = gp.smooth(x, z, xo, lambda, r)
}
z = z/cell.area
cbind(xo, z = z)
}
}
gp.smooth <- function(x, z, xo, lambda, r) {
# predict a function measured at locations x to new locations xo
krr = kernel.region.region(x, r, lambda)
white.z = solve(krr, z)
kernel.smooth(x, white.z, xo, lambda, r, normalize = FALSE)
}
kernel.smooth <- function(x, z, xo, lambda, region = NULL, normalize = TRUE) {
# predict a function measured at locations x to new locations xo
if(!is.matrix(x)) dim(x) <- c(length(x), 1)
if(!is.matrix(xo)) dim(xo) <- c(length(xo), 1)
n = nrow(x)
if(is.null(region)) region = 1:n
if(length(region) < n) stop("region must have same length as x")
region = as.integer(region)
if(any(is.na(region))) stop("region has NAs")
if(max(region) > length(z)) stop("not enough measurements for the number of regions")
no = nrow(xo)
if(normalize) {
# divide by region sizes
z = as.double(z/as.numeric(table(region)))
}
.C(C_kernel_smooth,
as.integer(n), as.integer(ncol(x)),
as.double(t(x)), z, as.integer(region),
as.integer(no), as.double(t(xo)), zo = double(no),
as.double(lambda), as.integer(normalize))$zo
}
kernel.region.region <- function(x, region, lambda) {
if(!is.matrix(x)) dim(x) <- c(length(x), 1)
region = as.integer(region)
nr = max(region)
krr = .C(C_kernel_region_region,
as.integer(nrow(x)), as.integer(ncol(x)),
as.double(t(x)),
region, as.double(lambda), as.integer(nr), krr = double(nr*nr))$krr
dim(krr) = c(nr, nr)
krr
}
kernel.region.x <- function(x, region, z, lambda) {
if(!is.matrix(x)) dim(x) <- c(length(x), 1)
if(!is.matrix(z)) dim(z) <- c(length(z), 1)
region = as.integer(region)
nr = max(region)
no = nrow(z)
krx = .C(C_kernel_region_x,
as.integer(nrow(x)), as.integer(ncol(x)),
as.double(t(x)), region, as.integer(no), as.double(t(z)),
as.double(lambda), as.integer(nr), krx = double(nr*no))$krx
dim(krx) = c(nr, no)
krx
}
|
# 1. Eliminar datos sin valor
# El parámetro na.strings rellena los valores NA
data <- read.csv("/cloud/project/missing-data.csv", na.strings = "")
# na.omit elimina las filas que contengan un string NA
data.limpia <- na.omit(data)
# Consultando valores específicos con is.na
is.na(data[4,2])
is.na(data[4,1])
is.na(data$Income)
# 2. Limpieza selectiva de datos sin valor
# Eliminar NAs solo de la variable Income
data.income.limpio <- data[!is.na(data$Income),]
# complete.cases() devuelve un vector booleano que representa si cada
# fila contiene o no algún valor NA
complete.cases(data)
data.limpia2 <- data[complete.cases(data),]
# Limpiar valores 0:
# Convertir los 0 de Income en NA
data$Income[data$Income == 0] <- NA
# Al verificar de nuevo, vemos que todos los valores son NA, lo que
# significa que ningún valor es 0
data$Income[data$Income == 0]
# Limpiar valores NA para métricas estadísticas
# Al aplicar algunas funciones estadísticas a una columna de un dataframe,
# si esta contiene algún NA, el resultado de la función será NA
mean(data$Income)
sd(data$Income)
sum(data$Income)
# Podemos usar el argumento na.rm para evitar esto
mean(data$Income, na.rm=TRUE)
sd(data$Income, na.rm=TRUE)
sum(data$Income, na.rm=TRUE)
|
/01-missing-data.R
|
no_license
|
arturocuya-upc/upc-cc50-lab-2021-09-22
|
R
| false
| false
| 1,256
|
r
|
# 1. Eliminar datos sin valor
# El parámetro na.strings rellena los valores NA
data <- read.csv("/cloud/project/missing-data.csv", na.strings = "")
# na.omit elimina las filas que contengan un string NA
data.limpia <- na.omit(data)
# Consultando valores específicos con is.na
is.na(data[4,2])
is.na(data[4,1])
is.na(data$Income)
# 2. Limpieza selectiva de datos sin valor
# Eliminar NAs solo de la variable Income
data.income.limpio <- data[!is.na(data$Income),]
# complete.cases() devuelve un vector booleano que representa si cada
# fila contiene o no algún valor NA
complete.cases(data)
data.limpia2 <- data[complete.cases(data),]
# Limpiar valores 0:
# Convertir los 0 de Income en NA
data$Income[data$Income == 0] <- NA
# Al verificar de nuevo, vemos que todos los valores son NA, lo que
# significa que ningún valor es 0
data$Income[data$Income == 0]
# Limpiar valores NA para métricas estadísticas
# Al aplicar algunas funciones estadísticas a una columna de un dataframe,
# si esta contiene algún NA, el resultado de la función será NA
mean(data$Income)
sd(data$Income)
sum(data$Income)
# Podemos usar el argumento na.rm para evitar esto
mean(data$Income, na.rm=TRUE)
sd(data$Income, na.rm=TRUE)
sum(data$Income, na.rm=TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/openfair.R
\name{openfair_tef_tc_diff_lm}
\alias{openfair_tef_tc_diff_lm}
\title{Run an OpenFAIR simulation at the TEF/TC/DIFF/LM levels}
\usage{
openfair_tef_tc_diff_lm(tef, tc, diff, lm, n = 10^4, verbose = FALSE)
}
\arguments{
\item{tef}{Parameters for TEF simulation}
\item{tc}{Parameters for TC simulation}
\item{diff}{Parameters for DIFF simulation}
\item{lm}{Parameters for LM simulation}
\item{n}{Number of iterations to run.}
\item{verbose}{Whether to print progress indicators.}
}
\value{
Dataframe of scenario name, threat_event count, loss_event count,
mean TC and DIFF exceedance, and ALE samples.
}
\description{
Run an OpenFAIR model with parameters provided for TEF, TC, DIFF, and
LM sampling. If there are multiple controls provided for the scenario, the
arithmetic mean (average) is taken across samples for all controls to get
the effective control strength for each threat event.
}
\examples{
data(mc_quantitative_scenarios)
params <- mc_quantitative_scenarios$scenario[[1]]$parameters
openfair_tef_tc_diff_lm(params$tef, params$tc, params$diff, params$lm, 10)
}
\seealso{
Other OpenFAIR helpers:
\code{\link{compare_tef_vuln}()},
\code{\link{get_mean_control_strength}()},
\code{\link{sample_diff}()},
\code{\link{sample_lef}()},
\code{\link{sample_lm}()},
\code{\link{sample_tc}()},
\code{\link{sample_vuln}()},
\code{\link{select_loss_opportunities}()}
}
\concept{OpenFAIR helpers}
|
/man/openfair_tef_tc_diff_lm.Rd
|
permissive
|
davidski/evaluator
|
R
| false
| true
| 1,489
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/openfair.R
\name{openfair_tef_tc_diff_lm}
\alias{openfair_tef_tc_diff_lm}
\title{Run an OpenFAIR simulation at the TEF/TC/DIFF/LM levels}
\usage{
openfair_tef_tc_diff_lm(tef, tc, diff, lm, n = 10^4, verbose = FALSE)
}
\arguments{
\item{tef}{Parameters for TEF simulation}
\item{tc}{Parameters for TC simulation}
\item{diff}{Parameters for DIFF simulation}
\item{lm}{Parameters for LM simulation}
\item{n}{Number of iterations to run.}
\item{verbose}{Whether to print progress indicators.}
}
\value{
Dataframe of scenario name, threat_event count, loss_event count,
mean TC and DIFF exceedance, and ALE samples.
}
\description{
Run an OpenFAIR model with parameters provided for TEF, TC, DIFF, and
LM sampling. If there are multiple controls provided for the scenario, the
arithmetic mean (average) is taken across samples for all controls to get
the effective control strength for each threat event.
}
\examples{
data(mc_quantitative_scenarios)
params <- mc_quantitative_scenarios$scenario[[1]]$parameters
openfair_tef_tc_diff_lm(params$tef, params$tc, params$diff, params$lm, 10)
}
\seealso{
Other OpenFAIR helpers:
\code{\link{compare_tef_vuln}()},
\code{\link{get_mean_control_strength}()},
\code{\link{sample_diff}()},
\code{\link{sample_lef}()},
\code{\link{sample_lm}()},
\code{\link{sample_tc}()},
\code{\link{sample_vuln}()},
\code{\link{select_loss_opportunities}()}
}
\concept{OpenFAIR helpers}
|
#' Cleans separate Fama-French data frames and adds them to global environment after
#' splitting the set into thirds, quintiles and deciles.
#'
#' @param data A data set.
#' @param spr Calculates spreads for relevant data by default.
cleanSubFF <- function(data, spr=TRUE) {
# Checks for cleanData folder
if (!file.exists("cleanData")){
dir.create(file.path(getwd(), "cleanData"))
}
# Cleans divisions
prefix <- deparse(substitute(data))
row.names(data) <- NULL
data <- numValues(data)
# Fixes dates (to end of period)
if (grepl("Dly", prefix) == TRUE) {
data[,1] <- ymd(data[,1])
} else if (grepl("Ann", prefix) == TRUE) {
for (i in 1:nrow(data)) {
data[i,1] <- paste0(data[i,1], "-12-31")
}
data[,1] <- ymd(data[,1])
} else {
for (i in 1:nrow(data)) {
data[i,1] <- paste0(data[i,1], "05")
}
data[,1] <- ymd(data[,1])
for (i in 1:nrow(data)) {
day(data[i,1]) <- 1
month(data[i,1]) <- month(data[i,1]) + 1
day(data[i,1]) <- day(data[i,1]) - 1
}
}
# Names final sheets
sheet1S <- paste0(prefix, "3")
sheet2S <- paste0(prefix, "5")
sheet3S <- paste0(prefix, "10")
# Splits up by columns
sheet1 <- data[,1:4]
sheet2 <- data[,c(1,5:9)]
sheet3 <- data[,c(1,10:19)]
# Adds in spreads
if (spr == TRUE){
sheet1$Spread <- sheet1$Hi30 - sheet1$Lo30
sheet2$Spread <- sheet2$Hi20 - sheet2$Lo20
sheet3$Spread <- sheet3$Hi10 - sheet3$Lo10
}
# Assigns variable names
assign(sheet1S, sheet1, envir = globalenv())
assign(sheet2S, sheet2, envir = globalenv())
assign(sheet3S, sheet3, envir = globalenv())
# Exports to clean folder
setwd(file.path(getwd(), "cleanData"))
if (file.exists(sheet1S)) {
unlink(sheet1S)
}
save(sheet1, file=paste0(sheet1S, ".Rdata"))
if (file.exists(sheet2S)) {
unlink(sheet2S)
}
save(sheet2, file=paste0(sheet2S, ".Rdata"))
if (file.exists(sheet3S)) {
unlink(sheet3S)
}
save(sheet3, file=paste0(sheet3S, ".Rdata"))
# Restores directory
setwd("..")
}
|
/R/cleanSubFF.R
|
no_license
|
jrs9/FFAQR
|
R
| false
| false
| 2,049
|
r
|
#' Cleans separate Fama-French data frames and adds them to global environment after
#' splitting the set into thirds, quintiles and deciles.
#'
#' @param data A data set.
#' @param spr Calculates spreads for relevant data by default.
cleanSubFF <- function(data, spr=TRUE) {
# Checks for cleanData folder
if (!file.exists("cleanData")){
dir.create(file.path(getwd(), "cleanData"))
}
# Cleans divisions
prefix <- deparse(substitute(data))
row.names(data) <- NULL
data <- numValues(data)
# Fixes dates (to end of period)
if (grepl("Dly", prefix) == TRUE) {
data[,1] <- ymd(data[,1])
} else if (grepl("Ann", prefix) == TRUE) {
for (i in 1:nrow(data)) {
data[i,1] <- paste0(data[i,1], "-12-31")
}
data[,1] <- ymd(data[,1])
} else {
for (i in 1:nrow(data)) {
data[i,1] <- paste0(data[i,1], "05")
}
data[,1] <- ymd(data[,1])
for (i in 1:nrow(data)) {
day(data[i,1]) <- 1
month(data[i,1]) <- month(data[i,1]) + 1
day(data[i,1]) <- day(data[i,1]) - 1
}
}
# Names final sheets
sheet1S <- paste0(prefix, "3")
sheet2S <- paste0(prefix, "5")
sheet3S <- paste0(prefix, "10")
# Splits up by columns
sheet1 <- data[,1:4]
sheet2 <- data[,c(1,5:9)]
sheet3 <- data[,c(1,10:19)]
# Adds in spreads
if (spr == TRUE){
sheet1$Spread <- sheet1$Hi30 - sheet1$Lo30
sheet2$Spread <- sheet2$Hi20 - sheet2$Lo20
sheet3$Spread <- sheet3$Hi10 - sheet3$Lo10
}
# Assigns variable names
assign(sheet1S, sheet1, envir = globalenv())
assign(sheet2S, sheet2, envir = globalenv())
assign(sheet3S, sheet3, envir = globalenv())
# Exports to clean folder
setwd(file.path(getwd(), "cleanData"))
if (file.exists(sheet1S)) {
unlink(sheet1S)
}
save(sheet1, file=paste0(sheet1S, ".Rdata"))
if (file.exists(sheet2S)) {
unlink(sheet2S)
}
save(sheet2, file=paste0(sheet2S, ".Rdata"))
if (file.exists(sheet3S)) {
unlink(sheet3S)
}
save(sheet3, file=paste0(sheet3S, ".Rdata"))
# Restores directory
setwd("..")
}
|
library(car)
library(lmtest)
library(nortest)
library(tseries)
library(moments)
library(gvlma)
library(perturb)
library(leaps)
library(MASS)
load("hbat.RData")
# remember, X19 is overall customer satisfaction with the company and indpendent variables measure different perceptions on the service
# provided
Full<-lm(X19~X6 + X7 + X8 + X9 + X10 + X11 + X12 + X13 + X14 + X15 + X16 + X17 + X18, data=hbat)
summary(Full)
attributes(Full)
# We implement a do-it-yourself method for variable selection as a first approximation to see what is going
# on when fitting a multiple linear regression model
#We are dropping out non significant variables, one by one, as the model changes with every adjustment we make
# We first drop out the one with the highest p-value, X15, and so on
back1=update(Full,.~.-X15, hbat)
summary(back1)
back1=update(back1,.~.-X18, hbat)
summary(back1)
back1=update(back1,.~.-X10, hbat)
summary(back1)
back1=update(back1,.~.-X8, hbat)
summary(back1)
back1=update(back1,.~.-X14, hbat)
summary(back1)
back1=update(back1,.~.-X13, hbat)
summary(back1)
back1=update(back1,.~.-X17, hbat)
summary(back1)
back1=update(back1,.~.-X16, hbat)
summary(back1)
#So, deletion sequence has been X15, X18, X10, X8, X14, X13, X17 and X16, using the function update()
#Our final model is now stored at object back1, we rename it to lm.1
lm.1=back1
# Assume now that we want to test if parameters \beta_X6 (point estimation = 0.37) and \beta_X9 ( point estimation =0.31)
# can be assumed to be equal. We fit a model with this hypothesis:
lm.3=lm(X19~I(X6+X9)+X7+X11+X12, hbat) #restricted model
#and compare both models
anova(lm.3,lm.1)
# p-value allows us to make the simplification, but this is for illustration purposes only, so I continue with model lm.1
summary(lm.1)
#confidence intervals for beta parameters
confint(lm.1)
#Diagnostics plots
par(mfrow=c(3,2))
plot(lm.1, which=c(1:6), ask=F)
par(mfrow=c(1,1))
#More residual plots versus each term and versus fitted values. Also computes a
#curvature test for each of the plots by adding a quadratic term and testing the
#quadratic to be zero. This is Tukey's test for nonadditivity when plotting against
#fitted values. We are interested in high p-values. Package car.
residualPlots(lm.1)
# For instance, if we want to see if there is a quadratic dependence on, say, X9 (added
# to the model)
residualPlots(lm.1, terms=~X9^2)
#it doesn't seem so
#Influential Observations
# built-in function, several measures of influence: cov ratio, cook's distance, hat values
InF=influence.measures(lm.1)
summary(InF)
# and residuals that weren't print out before
# load package "car"
influencePlot(lm.1, labels=NULL,id.method="identify")
# or
influencePlot(lm.1, labels=NULL,id.method="noteworthy")
# From InF we get the numbers of the potential influence observations
obsi=which(apply(InF$is.inf,1,any))
# and we put together all the influence measures we are interested in
cbind(res=rstandard(lm.1)[obsi], InF$infmat[obsi,8:10])
#more Influence plots
infIndexPlot(lm.1)
# Plots available for multiple linear regression (that are not available in simple linear regression)
#Partial residual plots (or added variable plots): one for each independent variable. It provides information
# about the nature of the marginal relationship for each regressor under consideration. They can also be used
# to explore for influence observations (package car)
avPlots(lm.1)
#Component + residual plots: they are an extension of partial residual plots, and a good way to
# see if the predictors have a linear #relationship to the dependent variable. They may suggest a
# transformation of the predictor, typically, sqrt(x), 1/x, log(x).
crPlots(lm.1)
#Residual Analysis
boxplot(lm.1$residuals, outline=T)
# package "nortest" and "tseries"
cvm.test(lm.1$residuals)
jarque.bera.test(lm.1$residuals)
# Lilliefors test
lillie.test(lm.1$residuals)
#kurtosis and skewness for residuals
anscombe.test(lm.1$residuals)
agostino.test(hbat[,6])
#It doesn't seem lack of normality is a problem
#Outlier test (Bonferroni, package car)
#defaullt parameters cutoff=0.05, n.max=10
outlierTest(lm.1, cutoff=0.05)
#Constant/Non constant variance (homocedasticity), package "lmtest"
# Breusch-Pagan test
bptest(lm.1)
# Goldfeld-Quandt Test
gqtest(lm.1)
#Score test for non-constant error variance (package car). Null hypothesis
#of constant error variance. Alternative: the error variance changes with the fitted values.
ncvTest(lm.1)
ncvTest(lm.1, var.formula=~hbat$X12)
#Spread-level plot: suggests a power transformation if there is a problem with non contst variance. What we want is
#the lowess fit showed to be flat, not sloped.
slp(lm.1)
#Durbin Watson test, to test for residuals independence (autocorrelation of residuals):
#package lmtest
dwtest(lm.1, alternative="two.sided")
#Box-Ljung test
Box.test(residuals(lm.1))
# The-values are actually low, we may have a problem with independence of residuals.
#General test for errors specification in the model, here we are interested in high p-values.
# tests if powers of the fitted values should be included in the model (that includes powers of the explanatory variables and interaction #terms)
resettest(lm.1,type="fitted")
## tests if powers of the explanatory variables should be included in the model, low p-values is a "yes" to this question
resettest(lm.1, power=2:3,type="regressor")
## it seems we need to add some powers of explanatory variables, we will see this later on.
# Harvey-Collier test for linearity. Small p-values suggest that the true relationship is not linear but concave or convexe.
harvtest(lm.1)
## Global Diagnostoc measure, package gvlma
gvlma(lm.1)
#it compares the Global Stat value with
qchisq(0.95,4)
# As it is less, although one of the hypothesis is not satisfied, it is globally accepted
plot(gvlma(lm.1))
# We could transform variable X12 to log(X12) and check out the gvlma test
lm.1log12=lm(X19~X6+X9+X7+X11+log(X12), data=hbat)
gvlma(lm.1log12)
##Including polynomial terms
### It can happen when including polynomial terms that we introduce correlations between
### the variables (that could cause multicollinearity issues.) As a precautionary
### measure, when including polynomial terms we will prefer to center the data:
hbats=data.frame(scale(hbat,scale=F))
lm.2s=lm(X19~X6+X7+X12+X9+X11+I(X7^2), data=hbats)
lm.1s=lm(X19~X6+X7+X12+X9+X11, data=hbats)
anova(lm.1s,lm.2s)
# we prefer model lm.1s
lm.3s=lm(X19~X6+X7+X12+X9+X11+I(X7^2)+I(X12^2), data=hbats)
anova(lm.1s,lm.3s)
#we prefer model lm.3s, there are no outliers, normality is not rejected
outlierTest(lm.3s)
lillie.test(residuals(lm.3s))
## p-value in Durbin-Watson test is a little bit improved
dwtest(lm.3s,alternative="two.sided")
#some influential observations
influencePlot(lm.3s, id.method="noteworthy")
#Global diagnosis
gvlma(lm.3s)
#predictions
#first, we create new data frame with three new data points (made up "on the fly")
newlm.1=data.frame(X6=c(5.3,6,9.1),X7=c(3,4,5), X9=c(3,6,7), X11=c(3,4,8), X12=c(3,6,8))
# prediction with model lm.1
# confidence interval of the mean response (robust to non normality of the errors)
predict(lm.1, newlm.1, interval="confidence")
#prediction intervals for new observations (much less robust to non normality of errors)
predict(lm.1, newlm.1, interval="prediction")
#Predictions with model lm.3s
newlm.3s=data.frame(scale(newlm.1,center=c(7.81,3.6,6.918, 5.805, 5.123)))
predict(lm.3s, newlm.3s, interval="prediction")
predict(lm.3s, newlm.3s, interval="confidence")
|
/RegressionModels.R
|
no_license
|
Linika30/DataAnalysis
|
R
| false
| false
| 7,540
|
r
|
library(car)
library(lmtest)
library(nortest)
library(tseries)
library(moments)
library(gvlma)
library(perturb)
library(leaps)
library(MASS)
load("hbat.RData")
# remember, X19 is overall customer satisfaction with the company and indpendent variables measure different perceptions on the service
# provided
Full<-lm(X19~X6 + X7 + X8 + X9 + X10 + X11 + X12 + X13 + X14 + X15 + X16 + X17 + X18, data=hbat)
summary(Full)
attributes(Full)
# We implement a do-it-yourself method for variable selection as a first approximation to see what is going
# on when fitting a multiple linear regression model
#We are dropping out non significant variables, one by one, as the model changes with every adjustment we make
# We first drop out the one with the highest p-value, X15, and so on
back1=update(Full,.~.-X15, hbat)
summary(back1)
back1=update(back1,.~.-X18, hbat)
summary(back1)
back1=update(back1,.~.-X10, hbat)
summary(back1)
back1=update(back1,.~.-X8, hbat)
summary(back1)
back1=update(back1,.~.-X14, hbat)
summary(back1)
back1=update(back1,.~.-X13, hbat)
summary(back1)
back1=update(back1,.~.-X17, hbat)
summary(back1)
back1=update(back1,.~.-X16, hbat)
summary(back1)
#So, deletion sequence has been X15, X18, X10, X8, X14, X13, X17 and X16, using the function update()
#Our final model is now stored at object back1, we rename it to lm.1
lm.1=back1
# Assume now that we want to test if parameters \beta_X6 (point estimation = 0.37) and \beta_X9 ( point estimation =0.31)
# can be assumed to be equal. We fit a model with this hypothesis:
lm.3=lm(X19~I(X6+X9)+X7+X11+X12, hbat) #restricted model
#and compare both models
anova(lm.3,lm.1)
# p-value allows us to make the simplification, but this is for illustration purposes only, so I continue with model lm.1
summary(lm.1)
#confidence intervals for beta parameters
confint(lm.1)
#Diagnostics plots
par(mfrow=c(3,2))
plot(lm.1, which=c(1:6), ask=F)
par(mfrow=c(1,1))
#More residual plots versus each term and versus fitted values. Also computes a
#curvature test for each of the plots by adding a quadratic term and testing the
#quadratic to be zero. This is Tukey's test for nonadditivity when plotting against
#fitted values. We are interested in high p-values. Package car.
residualPlots(lm.1)
# For instance, if we want to see if there is a quadratic dependence on, say, X9 (added
# to the model)
residualPlots(lm.1, terms=~X9^2)
#it doesn't seem so
#Influential Observations
# built-in function, several measures of influence: cov ratio, cook's distance, hat values
InF=influence.measures(lm.1)
summary(InF)
# and residuals that weren't print out before
# load package "car"
influencePlot(lm.1, labels=NULL,id.method="identify")
# or
influencePlot(lm.1, labels=NULL,id.method="noteworthy")
# From InF we get the numbers of the potential influence observations
obsi=which(apply(InF$is.inf,1,any))
# and we put together all the influence measures we are interested in
cbind(res=rstandard(lm.1)[obsi], InF$infmat[obsi,8:10])
#more Influence plots
infIndexPlot(lm.1)
# Plots available for multiple linear regression (that are not available in simple linear regression)
#Partial residual plots (or added variable plots): one for each independent variable. It provides information
# about the nature of the marginal relationship for each regressor under consideration. They can also be used
# to explore for influence observations (package car)
avPlots(lm.1)
#Component + residual plots: they are an extension of partial residual plots, and a good way to
# see if the predictors have a linear #relationship to the dependent variable. They may suggest a
# transformation of the predictor, typically, sqrt(x), 1/x, log(x).
crPlots(lm.1)
#Residual Analysis
boxplot(lm.1$residuals, outline=T)
# package "nortest" and "tseries"
cvm.test(lm.1$residuals)
jarque.bera.test(lm.1$residuals)
# Lilliefors test
lillie.test(lm.1$residuals)
#kurtosis and skewness for residuals
anscombe.test(lm.1$residuals)
agostino.test(hbat[,6])
#It doesn't seem lack of normality is a problem
#Outlier test (Bonferroni, package car)
#defaullt parameters cutoff=0.05, n.max=10
outlierTest(lm.1, cutoff=0.05)
#Constant/Non constant variance (homocedasticity), package "lmtest"
# Breusch-Pagan test
bptest(lm.1)
# Goldfeld-Quandt Test
gqtest(lm.1)
#Score test for non-constant error variance (package car). Null hypothesis
#of constant error variance. Alternative: the error variance changes with the fitted values.
ncvTest(lm.1)
ncvTest(lm.1, var.formula=~hbat$X12)
#Spread-level plot: suggests a power transformation if there is a problem with non contst variance. What we want is
#the lowess fit showed to be flat, not sloped.
slp(lm.1)
#Durbin Watson test, to test for residuals independence (autocorrelation of residuals):
#package lmtest
dwtest(lm.1, alternative="two.sided")
#Box-Ljung test
Box.test(residuals(lm.1))
# The-values are actually low, we may have a problem with independence of residuals.
#General test for errors specification in the model, here we are interested in high p-values.
# tests if powers of the fitted values should be included in the model (that includes powers of the explanatory variables and interaction #terms)
resettest(lm.1,type="fitted")
## tests if powers of the explanatory variables should be included in the model, low p-values is a "yes" to this question
resettest(lm.1, power=2:3,type="regressor")
## it seems we need to add some powers of explanatory variables, we will see this later on.
# Harvey-Collier test for linearity. Small p-values suggest that the true relationship is not linear but concave or convexe.
harvtest(lm.1)
## Global Diagnostoc measure, package gvlma
gvlma(lm.1)
#it compares the Global Stat value with
qchisq(0.95,4)
# As it is less, although one of the hypothesis is not satisfied, it is globally accepted
plot(gvlma(lm.1))
# We could transform variable X12 to log(X12) and check out the gvlma test
lm.1log12=lm(X19~X6+X9+X7+X11+log(X12), data=hbat)
gvlma(lm.1log12)
##Including polynomial terms
### It can happen when including polynomial terms that we introduce correlations between
### the variables (that could cause multicollinearity issues.) As a precautionary
### measure, when including polynomial terms we will prefer to center the data:
hbats=data.frame(scale(hbat,scale=F))
lm.2s=lm(X19~X6+X7+X12+X9+X11+I(X7^2), data=hbats)
lm.1s=lm(X19~X6+X7+X12+X9+X11, data=hbats)
anova(lm.1s,lm.2s)
# we prefer model lm.1s
lm.3s=lm(X19~X6+X7+X12+X9+X11+I(X7^2)+I(X12^2), data=hbats)
anova(lm.1s,lm.3s)
#we prefer model lm.3s, there are no outliers, normality is not rejected
outlierTest(lm.3s)
lillie.test(residuals(lm.3s))
## p-value in Durbin-Watson test is a little bit improved
dwtest(lm.3s,alternative="two.sided")
#some influential observations
influencePlot(lm.3s, id.method="noteworthy")
#Global diagnosis
gvlma(lm.3s)
#predictions
#first, we create new data frame with three new data points (made up "on the fly")
newlm.1=data.frame(X6=c(5.3,6,9.1),X7=c(3,4,5), X9=c(3,6,7), X11=c(3,4,8), X12=c(3,6,8))
# prediction with model lm.1
# confidence interval of the mean response (robust to non normality of the errors)
predict(lm.1, newlm.1, interval="confidence")
#prediction intervals for new observations (much less robust to non normality of errors)
predict(lm.1, newlm.1, interval="prediction")
#Predictions with model lm.3s
newlm.3s=data.frame(scale(newlm.1,center=c(7.81,3.6,6.918, 5.805, 5.123)))
predict(lm.3s, newlm.3s, interval="prediction")
predict(lm.3s, newlm.3s, interval="confidence")
|
#' Add graph object to a graph series object
#' @description Add a graph object to an extant graph series object for
#' storage of multiple graphs across a sequential or temporal one-dimensional
#' array.
#' @param graph a graph object to add to the graph series object.
#' @param graph_series a graph series object to which the graph object will be
#' added.
#' @return a graph series object of type \code{dgr_graph_1D}.
#' @examples
#' \dontrun{
#' # Create three graphs (using \code{pipeR} for speed)
#' # and create a graph series using those graphs
#' library(magrittr)
#'
#' graph_1 <- create_graph() %>%
#' add_node("a") %>% add_node("b") %>% add_node("c") %>%
#' add_edge("a", "c") %>% add_edge("a", "b") %>% add_edge("b", "c")
#'
#' graph_2 <- graph_1 %>%
#' add_node("d") %>% add_edge("d", "c")
#'
#' graph_3 <- graph_2 %>%
#' add_node("e") %>% add_edge("e", "b")
#'
#' # Create an empty graph series
#' series <- create_series(series_type = "sequential")
#'
#' # Add graphs to the graph series
#' series <- graph_1 %>% add_to_series(series)
#' series <- graph_2 %>% add_to_series(series)
#' series <- graph_3 %>% add_to_series(series)
#' }
#' @export add_to_series
add_to_series <- function(graph,
graph_series){
# Get the series type
series_type <- graph_series$series_type
# Stop function if graph is not valid
if (class(graph) != "dgr_graph"){
stop("The supplied graph object is not valid.")
}
# Stop function if graph series type is not valid
if (!(series_type %in% c("sequential", "temporal"))){
stop("The graph series type is neither 'sequential' nor 'temporal'")
}
# If graph series type is 'sequential', add graph to series
if (series_type == "sequential"){
graph_series$graphs[[length(graph_series$graphs) + 1]] <- graph
return(graph_series)
}
# For a graph series with a temporal type, determine if 'graph_time' and,
# optionally, a 'graph_tz' value is provided
if (series_type == "temporal"){
is_time_provided <- ifelse(!is.null(graph$graph_time), TRUE, FALSE)
is_tz_provided <- ifelse(!is.null(graph$graph_tz), TRUE, FALSE)
# Stop function if no time information available in a graph to be
# added to a graph series of the 'temporal' type
if (is_time_provided == FALSE){
stop("No time information is provided in this graph object.")
} else {
# If time zone not provided, automatically provide the "GMT" time zone
if (is_tz_provided == FALSE){
graph$graph_tz <- "GMT"
}
is_time_in_correct_format <-
ifelse(grepl("^[0-9]{4}-[0-9]{2}-[0-9]{2}$",
graph$graph_time) |
grepl("^[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$",
graph$graph_time) |
grepl("^[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}$",
graph$graph_time), TRUE, FALSE)
is_tz_in_correct_format <-
ifelse(graph$graph_tz %in% OlsonNames(), TRUE, FALSE)
if (is_time_in_correct_format == FALSE){
stop("The time provided in this graph object is not in the correct format.")
}
if (is_tz_in_correct_format == FALSE){
stop("The time zone provided in this graph object is not in the correct format.")
}
if (is_time_in_correct_format & is_tz_in_correct_format){
graph_series$graphs[[length(graph_series$graphs) + 1]] <- graph
return(graph_series)
}
}
}
}
|
/R/add_to_series.R
|
no_license
|
UweBlock/DiagrammeR
|
R
| false
| false
| 3,495
|
r
|
#' Add graph object to a graph series object
#' @description Add a graph object to an extant graph series object for
#' storage of multiple graphs across a sequential or temporal one-dimensional
#' array.
#' @param graph a graph object to add to the graph series object.
#' @param graph_series a graph series object to which the graph object will be
#' added.
#' @return a graph series object of type \code{dgr_graph_1D}.
#' @examples
#' \dontrun{
#' # Create three graphs (using \code{pipeR} for speed)
#' # and create a graph series using those graphs
#' library(magrittr)
#'
#' graph_1 <- create_graph() %>%
#' add_node("a") %>% add_node("b") %>% add_node("c") %>%
#' add_edge("a", "c") %>% add_edge("a", "b") %>% add_edge("b", "c")
#'
#' graph_2 <- graph_1 %>%
#' add_node("d") %>% add_edge("d", "c")
#'
#' graph_3 <- graph_2 %>%
#' add_node("e") %>% add_edge("e", "b")
#'
#' # Create an empty graph series
#' series <- create_series(series_type = "sequential")
#'
#' # Add graphs to the graph series
#' series <- graph_1 %>% add_to_series(series)
#' series <- graph_2 %>% add_to_series(series)
#' series <- graph_3 %>% add_to_series(series)
#' }
#' @export add_to_series
add_to_series <- function(graph,
graph_series){
# Get the series type
series_type <- graph_series$series_type
# Stop function if graph is not valid
if (class(graph) != "dgr_graph"){
stop("The supplied graph object is not valid.")
}
# Stop function if graph series type is not valid
if (!(series_type %in% c("sequential", "temporal"))){
stop("The graph series type is neither 'sequential' nor 'temporal'")
}
# If graph series type is 'sequential', add graph to series
if (series_type == "sequential"){
graph_series$graphs[[length(graph_series$graphs) + 1]] <- graph
return(graph_series)
}
# For a graph series with a temporal type, determine if 'graph_time' and,
# optionally, a 'graph_tz' value is provided
if (series_type == "temporal"){
is_time_provided <- ifelse(!is.null(graph$graph_time), TRUE, FALSE)
is_tz_provided <- ifelse(!is.null(graph$graph_tz), TRUE, FALSE)
# Stop function if no time information available in a graph to be
# added to a graph series of the 'temporal' type
if (is_time_provided == FALSE){
stop("No time information is provided in this graph object.")
} else {
# If time zone not provided, automatically provide the "GMT" time zone
if (is_tz_provided == FALSE){
graph$graph_tz <- "GMT"
}
is_time_in_correct_format <-
ifelse(grepl("^[0-9]{4}-[0-9]{2}-[0-9]{2}$",
graph$graph_time) |
grepl("^[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$",
graph$graph_time) |
grepl("^[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}$",
graph$graph_time), TRUE, FALSE)
is_tz_in_correct_format <-
ifelse(graph$graph_tz %in% OlsonNames(), TRUE, FALSE)
if (is_time_in_correct_format == FALSE){
stop("The time provided in this graph object is not in the correct format.")
}
if (is_tz_in_correct_format == FALSE){
stop("The time zone provided in this graph object is not in the correct format.")
}
if (is_time_in_correct_format & is_tz_in_correct_format){
graph_series$graphs[[length(graph_series$graphs) + 1]] <- graph
return(graph_series)
}
}
}
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.32784507357642e-308, 7.00600001791186e-310, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615832663-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 362
|
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.32784507357642e-308, 7.00600001791186e-310, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result)
|
#' Plot score tracks
#'
#'
#' @param dame GRanges object containing a region of interest, or detected with
#' find_dames
#' @param window Number of CpG sites outside (up or down-stream) of the DAME
#' should be plotted. Default = 0.
#' @param positions Number of bp sites outside (up or down-stream) of the DAME
#' should be plotted. Default = 0.
#' @param derASM SummarizedExperiment object obtained from calc_derivedasm
#' (Filtering should be done by the user)
#' @param ASM SummarizedExperiment object obtained from calc_asm (Filtering
#' should be done by the user)
#' @param colvec Vector of colors (mainly useful for the SNP plot, because I add
#' it with cowplot, so I don't export a ggplot, optional)
#' @param plotSNP whether to add the SNP track, only if derASM is specified.
#' Default = FALSE
#'
#' @return Plot
#'
#' @importFrom GenomicRanges GRanges
#' @importFrom IRanges IRanges
#' @importFrom BiocGenerics start
#' @importFrom BiocGenerics end
#' @importFrom GenomeInfoDb seqnames
#' @importFrom SummarizedExperiment assay
#' @importFrom SummarizedExperiment colData
#' @importFrom S4Vectors queryHits
#' @importFrom BiocGenerics start<-
#' @importFrom BiocGenerics end<-
#' @import ggplot2
#'
#' @examples
#' library(GenomicRanges)
#' DAME <- GRanges(19, IRanges(306443,310272))
#' data('readtuples_output')
#' ASM <- calc_asm(readtuples_output)
#' SummarizedExperiment::colData(ASM)$group <- c(rep('CRC',3),rep('NORM',2))
#' SummarizedExperiment::colData(ASM)$samples <- colnames(ASM)
#' dame_track(dame = DAME,
#' ASM = ASM)
#'
#' @export
dame_track <- function(dame, window = 0, positions = 0, derASM = NULL,
ASM = NULL, colvec = NULL, plotSNP = FALSE) {
res_dame <- dame
start(res_dame) <- start(dame) - positions
end(res_dame) <- end(dame) + positions
if (!is.null(derASM)) {
if (!all.equal(colData(derASM)$samples, colnames(derASM))) {
stop("Sample names in colData() and colnames are different")
}
ASMsnp <- assay(derASM, "der.ASM")
SNP <- assay(derASM, "snp.table")
ref <- assay(derASM, "ref.meth")/assay(derASM, "ref.cov")
alt <- assay(derASM, "alt.meth")/assay(derASM, "alt.cov")
snpgr <- SummarizedExperiment::rowRanges(derASM)
over <- GenomicRanges::findOverlaps(snpgr, res_dame)
if (window != 0) {
win <- c(seq(from = (queryHits(over)[1] - window),
to = (queryHits(over)[1] - 1), by = 1), queryHits(over),
seq(from = (utils::tail(queryHits(over), n = 1) +
1), to = (utils::tail(queryHits(over), n = 1) +
window), by = 1))
} else {
win <- queryHits(over)
}
# ASMsnp
subASMsnp <- as.data.frame(ASMsnp[win, ])
subref <- as.data.frame(ref[win, ])
subalt <- as.data.frame(alt[win, ])
subASMsnp$pos <- subref$pos <- subalt$pos <- start(snpgr)[win]
subASMsnp_long <- reshape2::melt(subASMsnp, id.vars = "pos",
measure.vars = colnames(ASMsnp))
subASMsnp_long$score <- "ASMsnp"
# marg.meth per allele
subref_long <- reshape2::melt(subref, id.vars = "pos",
measure.vars = colnames(ASMsnp))
subref_long$score <- "REF:meth"
subalt_long <- reshape2::melt(subalt, id.vars = "pos",
measure.vars = colnames(ASMsnp))
subalt_long$score <- "ALT:meth"
# SNP
subSNP <- SNP[win, ]
subSNP[is.na(subSNP)] <- "none"
subSNP <- data.frame(subSNP, stringsAsFactors = FALSE)
subSNP$pos <- start(snpgr)[win]
subSNP_long <- reshape2::melt(subSNP, id.vars = "pos",
measure.vars = colnames(ASMsnp), value.name = "snp.pos")
# Make SNP name nicer
loc <- as.integer(stringr::str_extract(subSNP_long$snp.pos,
"[0-9]+$"))
chrom <- as.integer(stringr::str_extract(subSNP_long$snp.pos,
"^[0-9]+"))
subSNP_long$snp.pos <- ifelse(subSNP_long$snp.pos ==
"none", "none", sprintf("chr%s:%s", chrom, loc))
}
if (!is.null(ASM)) {
if (!all.equal(colData(ASM)$samples, colnames(ASM))) {
stop("Sample names in colData() and colnames are different")
}
meth <- (assay(ASM, "MM") + assay(ASM, "MU") + assay(ASM,
"UM"))/assay(ASM, "cov")
ASMtuple <- assay(ASM, "asm")
tupgr <- SummarizedExperiment::rowRanges(ASM)
# ASMtuple
over <- GenomicRanges::findOverlaps(tupgr, res_dame)
if (window != 0) {
win <- c(seq(from = (queryHits(over)[1] - window),
to = (queryHits(over)[1] - 1), by = 1), queryHits(over),
seq(from = (utils::tail(queryHits(over), n = 1) +
1), to = (utils::tail(queryHits(over), n = 1) +
window), by = 1))
} else {
win <- queryHits(over)
}
subASMtuple <- as.data.frame(ASMtuple[win, ])
submeth <- as.data.frame(meth[win, ])
subASMtuple$pos <- submeth$pos <- tupgr$midpt[win]
subASMtuple_long <- reshape2::melt(subASMtuple, id.vars = "pos",
measure.vars = colnames(ASMtuple))
subASMtuple_long$score <- "ASMtuple"
# Marginal meth
submeth_long <- reshape2::melt(submeth, id.vars = "pos",
measure.vars = colnames(meth))
submeth_long$score <- "meth"
}
if ((!is.null(derASM)) & (!is.null(ASM))) {
message("Using both scores")
full_long <- rbind(subASMtuple_long, submeth_long, subASMsnp_long,
subref_long, subalt_long)
full_long$score <- factor(full_long$score, levels = c("ASMtuple",
"meth", "ASMsnp", "REF:meth", "ALT:meth"))
full_long$group <- colData(derASM)$group[match(full_long$variable,
colData(derASM)$samples)]
}
if (is.null(ASM) & !is.null(derASM)) {
message("Using ASMsnp score")
full_long <- rbind(subASMsnp_long, subref_long, subalt_long)
full_long$score <- factor(full_long$score, levels = c("ASMsnp",
"REF:meth", "ALT:meth"))
full_long$group <- colData(derASM)$group[match(full_long$variable,
colData(derASM)$samples)]
}
if (is.null(derASM) & !is.null(ASM)) {
message("Using ASMtuple score")
full_long <- rbind(subASMtuple_long, submeth_long)
full_long$group <- colData(ASM)$group[match(full_long$variable,
colData(ASM)$samples)]
}
# To draw rectangle on region
forect <- data.frame(xmin = start(dame), xmax = end(dame),
ymin = 0, ymax = Inf)
# plot scores
m1 <- ggplot(data = full_long) + geom_line(mapping = aes_(x = ~pos,
y = ~value, group = ~variable, color = ~group), alpha = 0.5) +
geom_point(mapping = aes_(x = ~pos, y = ~value, group = ~variable,
color = ~group)) + geom_rect(data = forect, aes_(xmin = ~xmin,
xmax = ~xmax, ymin = ~ymin, ymax = ~ymax), alpha = 0.1) +
facet_grid(score ~ ., scales = "free_y") + theme_bw() +
xlab("position") + ggtitle("Scores")
cord <- ggplot_build(m1)$layout$panel_scales_x[[1]]$range$range
# plot SNP table
if (plotSNP) {
m2 <- ggplot(data = subSNP_long) +
geom_point(aes_(x = ~pos, y = 1, group = ~variable,
color = ~snp.pos), shape = 8, size = 1,
fill = "white", stroke = 1) +
facet_grid(variable ~ .) +
theme_bw() +
theme(panel.spacing = unit(0, "lines"),
axis.text.y = element_blank(), axis.ticks.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
coord_cartesian(xlim = cord) +
ylab("") +
ggtitle("SNPs - for ASMsnp") +
xlab("position")
if (!is.null(colvec)) {
m2 <- m2 + scale_color_manual(values = colvec)
m1 <- m1 + scale_color_manual(values = colvec)
}
p <- cowplot::plot_grid(m1, m2, ncol = 1, nrow = 2, rel_heights = c(3,
1), align = "v")
} else {
if (!is.null(colvec)) {
p <- m1 + scale_color_manual(values = colvec)
} else {
p <- m1
}
}
return(p)
}
|
/R/score_tracks.R
|
permissive
|
JianpingQuan/DAMEfinder
|
R
| false
| false
| 8,640
|
r
|
#' Plot score tracks
#'
#'
#' @param dame GRanges object containing a region of interest, or detected with
#' find_dames
#' @param window Number of CpG sites outside (up or down-stream) of the DAME
#' should be plotted. Default = 0.
#' @param positions Number of bp sites outside (up or down-stream) of the DAME
#' should be plotted. Default = 0.
#' @param derASM SummarizedExperiment object obtained from calc_derivedasm
#' (Filtering should be done by the user)
#' @param ASM SummarizedExperiment object obtained from calc_asm (Filtering
#' should be done by the user)
#' @param colvec Vector of colors (mainly useful for the SNP plot, because I add
#' it with cowplot, so I don't export a ggplot, optional)
#' @param plotSNP whether to add the SNP track, only if derASM is specified.
#' Default = FALSE
#'
#' @return Plot
#'
#' @importFrom GenomicRanges GRanges
#' @importFrom IRanges IRanges
#' @importFrom BiocGenerics start
#' @importFrom BiocGenerics end
#' @importFrom GenomeInfoDb seqnames
#' @importFrom SummarizedExperiment assay
#' @importFrom SummarizedExperiment colData
#' @importFrom S4Vectors queryHits
#' @importFrom BiocGenerics start<-
#' @importFrom BiocGenerics end<-
#' @import ggplot2
#'
#' @examples
#' library(GenomicRanges)
#' DAME <- GRanges(19, IRanges(306443,310272))
#' data('readtuples_output')
#' ASM <- calc_asm(readtuples_output)
#' SummarizedExperiment::colData(ASM)$group <- c(rep('CRC',3),rep('NORM',2))
#' SummarizedExperiment::colData(ASM)$samples <- colnames(ASM)
#' dame_track(dame = DAME,
#' ASM = ASM)
#'
#' @export
dame_track <- function(dame, window = 0, positions = 0, derASM = NULL,
ASM = NULL, colvec = NULL, plotSNP = FALSE) {
res_dame <- dame
start(res_dame) <- start(dame) - positions
end(res_dame) <- end(dame) + positions
if (!is.null(derASM)) {
if (!all.equal(colData(derASM)$samples, colnames(derASM))) {
stop("Sample names in colData() and colnames are different")
}
ASMsnp <- assay(derASM, "der.ASM")
SNP <- assay(derASM, "snp.table")
ref <- assay(derASM, "ref.meth")/assay(derASM, "ref.cov")
alt <- assay(derASM, "alt.meth")/assay(derASM, "alt.cov")
snpgr <- SummarizedExperiment::rowRanges(derASM)
over <- GenomicRanges::findOverlaps(snpgr, res_dame)
if (window != 0) {
win <- c(seq(from = (queryHits(over)[1] - window),
to = (queryHits(over)[1] - 1), by = 1), queryHits(over),
seq(from = (utils::tail(queryHits(over), n = 1) +
1), to = (utils::tail(queryHits(over), n = 1) +
window), by = 1))
} else {
win <- queryHits(over)
}
# ASMsnp
subASMsnp <- as.data.frame(ASMsnp[win, ])
subref <- as.data.frame(ref[win, ])
subalt <- as.data.frame(alt[win, ])
subASMsnp$pos <- subref$pos <- subalt$pos <- start(snpgr)[win]
subASMsnp_long <- reshape2::melt(subASMsnp, id.vars = "pos",
measure.vars = colnames(ASMsnp))
subASMsnp_long$score <- "ASMsnp"
# marg.meth per allele
subref_long <- reshape2::melt(subref, id.vars = "pos",
measure.vars = colnames(ASMsnp))
subref_long$score <- "REF:meth"
subalt_long <- reshape2::melt(subalt, id.vars = "pos",
measure.vars = colnames(ASMsnp))
subalt_long$score <- "ALT:meth"
# SNP
subSNP <- SNP[win, ]
subSNP[is.na(subSNP)] <- "none"
subSNP <- data.frame(subSNP, stringsAsFactors = FALSE)
subSNP$pos <- start(snpgr)[win]
subSNP_long <- reshape2::melt(subSNP, id.vars = "pos",
measure.vars = colnames(ASMsnp), value.name = "snp.pos")
# Make SNP name nicer
loc <- as.integer(stringr::str_extract(subSNP_long$snp.pos,
"[0-9]+$"))
chrom <- as.integer(stringr::str_extract(subSNP_long$snp.pos,
"^[0-9]+"))
subSNP_long$snp.pos <- ifelse(subSNP_long$snp.pos ==
"none", "none", sprintf("chr%s:%s", chrom, loc))
}
if (!is.null(ASM)) {
if (!all.equal(colData(ASM)$samples, colnames(ASM))) {
stop("Sample names in colData() and colnames are different")
}
meth <- (assay(ASM, "MM") + assay(ASM, "MU") + assay(ASM,
"UM"))/assay(ASM, "cov")
ASMtuple <- assay(ASM, "asm")
tupgr <- SummarizedExperiment::rowRanges(ASM)
# ASMtuple
over <- GenomicRanges::findOverlaps(tupgr, res_dame)
if (window != 0) {
win <- c(seq(from = (queryHits(over)[1] - window),
to = (queryHits(over)[1] - 1), by = 1), queryHits(over),
seq(from = (utils::tail(queryHits(over), n = 1) +
1), to = (utils::tail(queryHits(over), n = 1) +
window), by = 1))
} else {
win <- queryHits(over)
}
subASMtuple <- as.data.frame(ASMtuple[win, ])
submeth <- as.data.frame(meth[win, ])
subASMtuple$pos <- submeth$pos <- tupgr$midpt[win]
subASMtuple_long <- reshape2::melt(subASMtuple, id.vars = "pos",
measure.vars = colnames(ASMtuple))
subASMtuple_long$score <- "ASMtuple"
# Marginal meth
submeth_long <- reshape2::melt(submeth, id.vars = "pos",
measure.vars = colnames(meth))
submeth_long$score <- "meth"
}
if ((!is.null(derASM)) & (!is.null(ASM))) {
message("Using both scores")
full_long <- rbind(subASMtuple_long, submeth_long, subASMsnp_long,
subref_long, subalt_long)
full_long$score <- factor(full_long$score, levels = c("ASMtuple",
"meth", "ASMsnp", "REF:meth", "ALT:meth"))
full_long$group <- colData(derASM)$group[match(full_long$variable,
colData(derASM)$samples)]
}
if (is.null(ASM) & !is.null(derASM)) {
message("Using ASMsnp score")
full_long <- rbind(subASMsnp_long, subref_long, subalt_long)
full_long$score <- factor(full_long$score, levels = c("ASMsnp",
"REF:meth", "ALT:meth"))
full_long$group <- colData(derASM)$group[match(full_long$variable,
colData(derASM)$samples)]
}
if (is.null(derASM) & !is.null(ASM)) {
message("Using ASMtuple score")
full_long <- rbind(subASMtuple_long, submeth_long)
full_long$group <- colData(ASM)$group[match(full_long$variable,
colData(ASM)$samples)]
}
# To draw rectangle on region
forect <- data.frame(xmin = start(dame), xmax = end(dame),
ymin = 0, ymax = Inf)
# plot scores
m1 <- ggplot(data = full_long) + geom_line(mapping = aes_(x = ~pos,
y = ~value, group = ~variable, color = ~group), alpha = 0.5) +
geom_point(mapping = aes_(x = ~pos, y = ~value, group = ~variable,
color = ~group)) + geom_rect(data = forect, aes_(xmin = ~xmin,
xmax = ~xmax, ymin = ~ymin, ymax = ~ymax), alpha = 0.1) +
facet_grid(score ~ ., scales = "free_y") + theme_bw() +
xlab("position") + ggtitle("Scores")
cord <- ggplot_build(m1)$layout$panel_scales_x[[1]]$range$range
# plot SNP table
if (plotSNP) {
m2 <- ggplot(data = subSNP_long) +
geom_point(aes_(x = ~pos, y = 1, group = ~variable,
color = ~snp.pos), shape = 8, size = 1,
fill = "white", stroke = 1) +
facet_grid(variable ~ .) +
theme_bw() +
theme(panel.spacing = unit(0, "lines"),
axis.text.y = element_blank(), axis.ticks.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
coord_cartesian(xlim = cord) +
ylab("") +
ggtitle("SNPs - for ASMsnp") +
xlab("position")
if (!is.null(colvec)) {
m2 <- m2 + scale_color_manual(values = colvec)
m1 <- m1 + scale_color_manual(values = colvec)
}
p <- cowplot::plot_grid(m1, m2, ncol = 1, nrow = 2, rel_heights = c(3,
1), align = "v")
} else {
if (!is.null(colvec)) {
p <- m1 + scale_color_manual(values = colvec)
} else {
p <- m1
}
}
return(p)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/output.R
\name{write_flags}
\alias{write_flags}
\title{Add quality flags to a data file in Station Exchange Format version 0.2.0}
\usage{
write_flags(infile, qcfile, outpath, note = "")
}
\arguments{
\item{infile}{Character string giving the path of the SEF file.}
\item{qcfile}{Character string giving the path of the file with
the quality flags as produced with the QC tests.
This file must have 6 (8) tab-separated columns
for daily (sub-daily) data: variable code, year, month, day, (hour),
(minute), value, semicolon(';')-separated failed tests.}
\item{outpath}{Character string giving the output path.}
\item{note}{Character string to be added to the end of the name of the
output file.
It will be separated from the rest of the name by an underscore.
Blanks will be also replaced by underscores.}
}
\description{
Add quality flags to a data file in Station Exchange Format version 0.2.0
}
\note{
The data will be converted to the standard units adopted by the qc
(see \link{Variables}). An exception is made for cloud cover (oktas
will not be converted).
}
\author{
Yuri Brugnara
}
|
/man/write_flags.Rd
|
permissive
|
csaybar/dataresqc
|
R
| false
| true
| 1,171
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/output.R
\name{write_flags}
\alias{write_flags}
\title{Add quality flags to a data file in Station Exchange Format version 0.2.0}
\usage{
write_flags(infile, qcfile, outpath, note = "")
}
\arguments{
\item{infile}{Character string giving the path of the SEF file.}
\item{qcfile}{Character string giving the path of the file with
the quality flags as produced with the QC tests.
This file must have 6 (8) tab-separated columns
for daily (sub-daily) data: variable code, year, month, day, (hour),
(minute), value, semicolon(';')-separated failed tests.}
\item{outpath}{Character string giving the output path.}
\item{note}{Character string to be added to the end of the name of the
output file.
It will be separated from the rest of the name by an underscore.
Blanks will be also replaced by underscores.}
}
\description{
Add quality flags to a data file in Station Exchange Format version 0.2.0
}
\note{
The data will be converted to the standard units adopted by the qc
(see \link{Variables}). An exception is made for cloud cover (oktas
will not be converted).
}
\author{
Yuri Brugnara
}
|
#---------------------------------------------------------------------------------------------------------
# labeledHeatmap.R
#---------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------
#
# .reverseRows = function(Matrix)
#
#--------------------------------------------------------------------------
#
.reverseRows = function(Matrix)
{
ind = seq(from=dim(Matrix)[1], to=1, by=-1);
Matrix[ind,, drop = FALSE];
#Matrix
}
.extend = function(x, n)
{
nRep = ceiling(n/length(x));
rep(x, nRep)[1:n];
}
# Adapt a numeric index to a subset
# Aim: if 'index' is a numeric index of special entries of a vector,
# create a new index that references 'subset' elements of the vector
.restrictIndex = function(index, subset)
{
out = match(index, subset);
out[!is.na(out)];
}
#--------------------------------------------------------------------------
#
# labeledHeatmap
#
#--------------------------------------------------------------------------
# This function plots a heatmap of the specified matrix
# and labels the x and y axes wit the given labels.
# It is assumed that the number of entries in xLabels and yLabels is consistent
# with the dimensions in.
# If colorLabels==TRUE, the labels are not printed and instead interpreted as colors --
# -- a simple symbol with the appropriate color is printed instead of the label.
# The x,yLabels are expected to have the form "..color" as in "MEgrey" or "PCturquoise".
# xSymbol, ySymbols are additional markers that can be placed next to color labels
labeledHeatmap = function (
Matrix,
xLabels, yLabels = NULL,
xSymbols = NULL, ySymbols = NULL,
colorLabels = NULL,
xColorLabels = FALSE, yColorLabels = FALSE,
checkColorsValid = TRUE,
invertColors = FALSE,
setStdMargins = TRUE,
xLabelsPosition = "bottom",
xLabelsAngle = 45,
xLabelsAdj = 1,
yLabelsPosition = "left",
xColorWidth = 2*strheight("M"),
yColorWidth = 2*strwidth("M"),
xColorOffset = strheight("M")/3,
yColorOffset = strwidth("M")/3,
# Content of heatmap
colorMatrix = NULL,
colors = NULL,
naColor = "grey",
textMatrix = NULL, cex.text = NULL,
textAdj = c(0.5, 0.5),
# labeling of rows and columns
cex.lab = NULL,
cex.lab.x = cex.lab,
cex.lab.y = cex.lab,
colors.lab.x = 1,
colors.lab.y = 1,
font.lab.x = 1,
font.lab.y = 1,
bg.lab.x = NULL,
bg.lab.y = NULL,
x.adj.lab.y = 1,
plotLegend = TRUE,
keepLegendSpace = plotLegend,
legendLabel = "",
cex.legendLabel = 1,
# Separator line specification
verticalSeparator.x = NULL,
verticalSeparator.col = 1,
verticalSeparator.lty = 1,
verticalSeparator.lwd = 1,
verticalSeparator.ext = 0,
verticalSeparator.interval = 0,
horizontalSeparator.y = NULL,
horizontalSeparator.col = 1,
horizontalSeparator.lty = 1,
horizontalSeparator.lwd = 1,
horizontalSeparator.ext = 0,
horizontalSeparator.interval = 0,
# optional restrictions on which rows and columns to actually show
showRows = NULL,
showCols = NULL,
# Other arguments...
... )
{
textFnc = match.fun("text");
if (!is.null(colorLabels)) {xColorLabels = colorLabels; yColorLabels = colorLabels; }
if (is.null(yLabels) & (!is.null(xLabels)) & (dim(Matrix)[1]==dim(Matrix)[2]))
yLabels = xLabels;
nCols = ncol(Matrix);
nRows = nrow(Matrix);
if (length(xLabels)!=nCols)
stop("Length of 'xLabels' must equal the number of columns in 'Matrix.'");
if (length(yLabels)!=nRows)
stop("Length of 'yLabels' must equal the number of rows in 'Matrix.'");
if (is.null(showRows)) showRows = c(1:nRows);
if (is.null(showCols)) showCols = c(1:nCols);
nShowCols = length(showCols);
nShowRows = length(showRows);
if (nShowCols==0) stop("'showCols' is empty.");
if (nShowRows==0) stop("'showRows' is empty.");
if (checkColorsValid)
{
xValidColors = !is.na(match(substring(xLabels, 3), colors()));
yValidColors = !is.na(match(substring(yLabels, 3), colors()));
} else {
xValidColors = rep(TRUE, length(xLabels));
yValidColors = rep(TRUE, length(yLabels));
}
if (sum(xValidColors)>0) xColorLabInd = xValidColors[showCols]
if (sum(!xValidColors)>0) xTextLabInd = !xValidColors[showCols]
if (sum(yValidColors)>0) yColorLabInd = yValidColors[showRows]
if (sum(!yValidColors)>0) yTextLabInd = !yValidColors[showRows]
if (setStdMargins)
{
if (xColorLabels & yColorLabels)
{
par(mar=c(2,2,3,5)+0.2);
} else {
par(mar = c(7,7,3,5)+0.2);
}
}
xLabels.show = xLabels[showCols];
yLabels.show = yLabels[showRows];
if (!is.null(xSymbols))
{
if (length(xSymbols)!=nCols)
stop("When 'xSymbols' are given, their length must equal the number of columns in 'Matrix.'");
xSymbols.show = xSymbols[showCols];
} else
xSymbols.show = NULL;
if (!is.null(ySymbols))
{
if (length(ySymbols)!=nRows)
stop("When 'ySymbols' are given, their length must equal the number of rows in 'Matrix.'");
ySymbols.show = ySymbols[showRows];
} else
ySymbols.show = NULL;
xLabPos = charmatch(xLabelsPosition, c("bottom", "top"));
if (is.na(xLabPos))
stop("Argument 'xLabelsPosition' must be (a unique abbreviation of) 'bottom', 'top'");
yLabPos = charmatch(yLabelsPosition, c("left", "right"));
if (is.na(yLabPos))
stop("Argument 'yLabelsPosition' must be (a unique abbreviation of) 'left', 'right'");
if (is.null(colors)) colors = heat.colors(30);
if (invertColors) colors = rev(colors);
labPos = .heatmapWithLegend(Matrix[showRows, showCols, drop = FALSE],
signed = FALSE, colorMatrix = colorMatrix, colors = colors, naColor = naColor,
cex.legendAxis = cex.lab, plotLegend = plotLegend, keepLegendSpace = keepLegendSpace,
legendLabel = legendLabel, cex.legendLabel = cex.legendLabel, ...)
plotbox = labPos$box;
xmin = plotbox[1]; xmax = plotbox[2]; ymin = plotbox[3]; yrange = plotbox[4]-ymin;
ymax = plotbox[4]; xrange = xmax - xmin;
# The positions below are for showCols/showRows-restriceted data
xLeft = labPos$xLeft;
xRight = labPos$xRight;
yTop = labPos$yTop;
yBot = labPos$yBot;
xspacing = labPos$xMid[2] - labPos$xMid[1];
yspacing = abs(labPos$yMid[2] - labPos$yMid[1]);
offsetx = .extend(xColorOffset, nCols)[showCols]
offsety = .extend(yColorOffset, nRows)[showRows]
xColW = xColorWidth;
yColW = yColorWidth;
# Additional angle-dependent offsets for x axis labels
textOffsetY = strheight("M") * cos(xLabelsAngle/180 * pi);
if (any(xValidColors)) offsetx = offsetx + xColW;
if (any(yValidColors)) offsety = offsety + yColW;
# Create the background for column and row labels.
extension.left = par("mai")[2] * # left margin width in inches
par("cxy")[1] / par("cin")[1] # character size in user corrdinates/character size in inches
extension.right = par("mai")[4] * # right margin width in inches
par("cxy")[1] / par("cin")[1] # character size in user corrdinates/character size in inches
extension.bottom = par("mai")[1] *
par("cxy")[2] / par("cin")[2]- # character size in user corrdinates/character size in inches
offsetx
extension.top = par("mai")[3] *
par("cxy")[2] / par("cin")[2]- # character size in user corrdinates/character size in inches
offsetx
figureBox = par("usr");
figXrange = figureBox[2] - figureBox[1];
figYrange = figureBox[4] - figureBox[3];
if (!is.null(bg.lab.x))
{
bg.lab.x = .extend(bg.lab.x, nCols)[showCols];
if (xLabPos==1)
{
y0 = ymin;
ext = extension.bottom;
sign = 1;
} else {
y0 = ymax;
ext = extension.top;
sign = -1;
}
figureDims = par("pin");
angle = xLabelsAngle/180*pi;
ratio = figureDims[1]/figureDims[2] * figYrange/figXrange;
ext.x = -sign * ext * 1/tan(angle)/ratio;
ext.y = sign * ext * sign(sin(angle))
#offset = (sum(xValidColors)>0) * xColW + offsetx + textOffsetY;
offset = offsetx + textOffsetY;
for (cc in 1:nShowCols)
polygon(x = c(xLeft[cc], xLeft[cc], xLeft[cc] + ext.x, xRight[cc] + ext.x, xRight[cc], xRight[cc]),
y = c(y0, y0-sign*offset[cc], y0-sign*offset[cc] - ext.y, y0-sign*offset[cc] - ext.y,
y0-sign*offset[cc], y0),
border = bg.lab.x[cc], col = bg.lab.x[cc], xpd = TRUE);
}
if (!is.null(bg.lab.y))
{
bg.lab.y = .extend(bg.lab.y, nRows)
reverseRows = TRUE;
if (reverseRows) bg.lab.y = rev(bg.lab.y);
bg.lab.y = bg.lab.y[showRows];
if (yLabPos==1)
{
xl = xmin-extension.left;
xr = xmin;
} else {
xl = xmax;
xr = xmax + extension.right;
}
for (r in 1:nShowRows)
rect(xl, yBot[r], xr, yTop[r],
col = bg.lab.y[r], border = bg.lab.y[r], xpd = TRUE);
}
colors.lab.x = .extend(colors.lab.x, nCols)[showCols];
font.lab.x = .extend(font.lab.x, nCols)[showCols];
# Write out labels
if (sum(!xValidColors)>0)
{
xLabYPos = if(xLabPos==1) ymin - offsetx- textOffsetY else ymax + offsetx + textOffsetY;
if (is.null(cex.lab)) cex.lab = 1;
mapply(textFnc, x = labPos$xMid[xTextLabInd],
y = xLabYPos, labels = xLabels.show[xTextLabInd],
col = colors.lab.x[xTextLabInd],
font = font.lab.x[xTextLabInd],
MoreArgs = list(srt = xLabelsAngle,
adj = xLabelsAdj, xpd = TRUE, cex = cex.lab.x));
}
if (sum(xValidColors)>0)
{
baseY = if (xLabPos==1) ymin-offsetx else ymax + offsetx;
deltaY = if (xLabPos==1) xColW else -xColW;
rect(xleft = labPos$xMid[xColorLabInd] - xspacing/2, ybottom = baseY[xColorLabInd],
xright = labPos$xMid[xColorLabInd] + xspacing/2, ytop = baseY[xColorLabInd] + deltaY,
density = -1, col = substring(xLabels.show[xColorLabInd], 3),
border = substring(xLabels.show[xColorLabInd], 3), xpd = TRUE)
if (!is.null(xSymbols))
mapply(textFnc, x = labPos$xMid[xColorLabInd],
y = baseY[xColorLabInd] -textOffsetY - sign(deltaY)* strwidth("M")/3,
labels = xSymbols.show[xColorLabInd],
col = colors.lab.x[xColorLabInd],
font = font.lab.x[xColorLabInd],
MoreArgs = list( adj = xLabelsAdj,
xpd = TRUE, srt = xLabelsAngle, cex = cex.lab.x));
}
x.adj.lab.y = .extend(x.adj.lab.y, nRows)[showRows]
if (yLabPos==1)
{
marginWidth = par("mai")[2] / par("pin")[1] * xrange
} else {
marginWidth = par("mai")[4] / par("pin")[1] * xrange
}
xSpaceForYLabels = marginWidth-2*strwidth("M")/3 - ifelse(yValidColors[showRows], yColW, 0);
xPosOfYLabels.relative = xSpaceForYLabels * (1-x.adj.lab.y) + offsety
colors.lab.y = .extend(colors.lab.y, nRows)[showRows];
font.lab.y = .extend(font.lab.y, nRows)[showRows];
if (sum(!yValidColors)>0)
{
if (is.null(cex.lab)) cex.lab = 1;
if (yLabPos==1)
{
x = xmin - strwidth("M")/3 - xPosOfYLabels.relative[yTextLabInd]
adj = x.adj.lab.y[yTextLabInd]
} else {
x = xmax + strwidth("M")/3 + xPosOfYLabels.relative[yTextLabInd];
adj = 1-x.adj.lab.y[yTextLabInd];
}
mapply(textFnc, y = labPos$yMid[yTextLabInd], labels = yLabels.show[yTextLabInd],
adj = lapply(adj, c, 0.5),
x = x,
col = colors.lab.y[yTextLabInd],
font = font.lab.y[yTextLabInd],
MoreArgs = list(srt = 0, xpd = TRUE, cex = cex.lab.y));
}
if (sum(yValidColors)>0)
{
if (yLabPos==1)
{
xl = xmin-offsety;
xr = xmin-offsety + yColW;
xtext = xmin - strwidth("M")/3 - xPosOfYLabels.relative[yColorLabInd];
adj = x.adj.lab.y[yColorLabInd]
} else {
xl = xmax + offsety - yColW;
xr = xmax + offsety;
xtext = xmin + strwidth("M")/3 + xPosOfYLabels.relative[yColorLabInd]
adj = 1-x.adj.lab.y[yColorLabInd];
}
rect(xleft = xl[yColorLabInd], ybottom = rev(labPos$yMid[yColorLabInd]) - yspacing/2,
xright = xr[yColorLabInd], ytop = rev(labPos$yMid[yColorLabInd]) + yspacing/2,
density = -1, col = substring(rev(yLabels.show[yColorLabInd]), 3),
border = substring(rev(yLabels.show[yColorLabInd]), 3), xpd = TRUE)
#for (i in yColorLabInd)
#{
# lines(c(xmin- offsetx, xmin- offsetx+yColW), y = rep(labPos$yMid[i] - yspacing/2, 2), col = i, xpd = TRUE)
# lines(c(xmin- offsetx, xmin- offsetx+yColW), y = rep(labPos$yMid[i] + yspacing/2, 2), col = i, xpd = TRUE)
#}
if (!is.null(ySymbols))
mapply(textFnc, y = labPos$yMid[yColorLabInd], labels = ySymbols.show[yColorLabInd],
adj = lapply(adj, c, 0.5),
x = xtext, col = colors.lab.y[yColorLabInd],
font = font.lab.y[yColorLabInd],
MoreArgs = list(srt = 0, xpd = TRUE, cex = cex.lab.y));
}
# Draw separator lines, if requested
showCols.ext = c(if (1 %in% showCols) 0 else NULL, showCols);
showCols.shift = if (0 %in% showCols.ext) 1 else 0;
if (length(verticalSeparator.x) > 0)
{
if (any(verticalSeparator.x < 0 | verticalSeparator.x > nCols))
stop("If given. 'verticalSeparator.x' must all be between 0 and the number of columns.");
colSepShowIndex = which(verticalSeparator.x %in% showCols.ext);
verticalSeparator.x.show = .restrictIndex(verticalSeparator.x, showCols.ext)-showCols.shift;
} else if (verticalSeparator.interval > 0)
{
verticalSeparator.x.show = verticalSeparator.x =
seq(from = verticalSeparator.interval, by = verticalSeparator.interval,
length.out = floor(length(showCols)/verticalSeparator.interval));
colSepShowIndex = 1:length(verticalSeparator.x);
} else
verticalSeparator.x.show = NULL;
if (length(verticalSeparator.x.show) > 0)
{
nLines = length(verticalSeparator.x);
vs.col = .extend(verticalSeparator.col, nLines)[colSepShowIndex];
vs.lty = .extend(verticalSeparator.lty, nLines)[colSepShowIndex];
vs.lwd = .extend(verticalSeparator.lwd, nLines)[colSepShowIndex];
vs.ext = .extend(verticalSeparator.ext, nLines)[colSepShowIndex];
x.lines = ifelse(verticalSeparator.x.show>0, labPos$xRight[verticalSeparator.x.show], labPos$xLeft[1]);
nLines.show = length(verticalSeparator.x.show);
for (l in 1:nLines.show)
lines(rep(x.lines[l], 2), c(ymin, ymax), col = vs.col[l], lty = vs.lty[l], lwd = vs.lwd[l]);
angle = xLabelsAngle/180*pi;
if (angle==0) angle = pi/2;
if (xLabelsPosition =="bottom")
{
sign = 1;
y0 = ymin;
ext = extension.bottom;
} else {
sign = -1;
y0 = ymax;
ext = extension.top;
}
figureDims = par("pin");
ratio = figureDims[1]/figureDims[2] * figYrange/figXrange;
ext.x = -sign * ext * 1/tan(angle)/ratio;
ext.y = sign * ext * sign(sin(angle))
#offset = (sum(xValidColors)>0) * xColW + offsetx + textOffsetY;
offset = offsetx + textOffsetY;
for (l in 1:nLines.show)
lines(c(x.lines[l], x.lines[l], x.lines[l] + vs.ext[l] * ext.x[l]),
c(y0, y0-sign*offset[l], y0-sign*offset[l] - vs.ext[l] * ext.y[l]),
col = vs.col[l], lty = vs.lty[l], lwd = vs.lwd[l], xpd = TRUE);
}
showRows.ext = c(if (1 %in% showRows) 0 else NULL, showRows);
showRows.shift = if (0 %in% showRows.ext) 1 else 0;
if (length(horizontalSeparator.y) >0)
{
if (any(horizontalSeparator.y < 0 | horizontalSeparator.y > nRows))
stop("If given. 'horizontalSeparator.y' must all be between 0 and the number of rows.");
rowSepShowIndex = which( horizontalSeparator.y %in% showRows.ext);
horizontalSeparator.y.show = .restrictIndex(horizontalSeparator.y, showRows.ext)-showRows.shift;
} else if (horizontalSeparator.interval > 0)
{
horizontalSeparator.y.show = horizontalSeparator.y =
seq(from = horizontalSeparator.interval, by = horizontalSeparator.interval,
length.out = floor(length(showRows)/horizontalSeparator.interval));
rowSepShowIndex = 1:length(horizontalSeparator.y);
} else
horizontalSeparator.y.show = NULL;
if (length(horizontalSeparator.y.show) > 0)
{
reverseRows = TRUE;
if (reverseRows)
{
horizontalSeparator.y.show = nShowRows - horizontalSeparator.y.show+1;
y.lines = ifelse( horizontalSeparator.y.show <=nShowRows,
labPos$yBot[horizontalSeparator.y.show], labPos$yTop[nShowRows]);
} else {
y.lines = ifelse( horizontalSeparator.y.show > 0, labPos$yBot[horizontalSeparator.y.show], labPos$yTop[1]);
}
nLines = length(horizontalSeparator.y);
vs.col = .extend(horizontalSeparator.col, nLines)[rowSepShowIndex];
vs.lty = .extend(horizontalSeparator.lty, nLines)[rowSepShowIndex];
vs.lwd = .extend(horizontalSeparator.lwd, nLines)[rowSepShowIndex];
vs.ext = .extend(horizontalSeparator.ext, nLines)[rowSepShowIndex];
nLines.show = length(horizontalSeparator.y.show);
for (l in 1:nLines.show)
{
if (yLabPos==1)
{
xl = xmin-vs.ext[l]*extension.left;
xr = xmax;
} else {
xl = xmin;
xr = xmax + vs.ext[l]*extension.right;
}
lines(c(xl, xr), rep(y.lines[l], 2),
col = vs.col[l], lty = vs.lty[l], lwd = vs.lwd[l], xpd = TRUE);
}
}
if (!is.null(textMatrix))
{
if (is.null(cex.text)) cex.text = par("cex");
if (is.null(dim(textMatrix)))
if (length(textMatrix)==prod(dim(Matrix))) dim(textMatrix)=dim(Matrix);
if (!isTRUE(all.equal(dim(textMatrix), dim(Matrix))))
stop("labeledHeatmap: textMatrix was given, but has dimensions incompatible with Matrix.");
for (rw in 1:nShowRows)
for (cl in 1:nShowCols)
{
text(labPos$xMid[cl], labPos$yMid[rw],
as.character(textMatrix[showRows[rw],showCols[cl]]), xpd = TRUE, cex = cex.text, adj = textAdj);
}
}
axis(1, labels = FALSE, tick = FALSE)
axis(2, labels = FALSE, tick = FALSE)
axis(3, labels = FALSE, tick = FALSE)
axis(4, labels = FALSE, tick = FALSE)
invisible(labPos)
}
#===================================================================================================
#
# multi-page labeled heatmap
#
#===================================================================================================
labeledHeatmap.multiPage = function(
# Input data and ornament[s
Matrix,
xLabels, yLabels = NULL,
xSymbols = NULL, ySymbols = NULL,
textMatrix = NULL,
# Paging options
rowsPerPage = NULL, maxRowsPerPage = 20,
colsPerPage = NULL, maxColsPerPage = 10,
addPageNumberToMain = TRUE,
# Further arguments to labeledHeatmap
zlim = NULL,
signed = TRUE,
main = "",
...)
{
nr = nrow(Matrix);
nc = ncol(Matrix);
if (is.null(rowsPerPage))
{
nPages.rows = ceiling(nr/maxRowsPerPage);
rowsPerPage = allocateJobs(nr, nPages.rows);
} else
nPages.rows = length(rowsPerPage);
if (is.null(colsPerPage))
{
nPages.cols = ceiling(nc/maxColsPerPage);
colsPerPage = allocateJobs(nc, nPages.cols);
} else
nPages.cols = length(colsPerPage);
if (is.null(zlim))
{
zlim = range(Matrix, na.rm = TRUE)
if (signed) zlim = c(-max(abs(zlim)), max(abs(zlim)));
}
page = 1;
multiPage = (nPages.cols > 1 | nPages.rows > 1)
for (page.col in 1:nPages.cols) for (page.row in 1:nPages.rows)
{
rows = rowsPerPage[[page.row]];
cols = colsPerPage[[page.col]];
main.1 = main;
if (addPageNumberToMain & multiPage) main.1 = spaste(main, "(page ", page, ")");
labeledHeatmap(Matrix = Matrix,
xLabels = xLabels, xSymbols = xSymbols,
yLabels = yLabels, ySymbols = ySymbols,
textMatrix = textMatrix,
zlim = zlim, main = main.1,
showRows = rows, showCols = cols,
...);
page = page + 1;
}
}
|
/R/labeledHeatmap.R
|
no_license
|
cran/WGCNA
|
R
| false
| false
| 20,188
|
r
|
#---------------------------------------------------------------------------------------------------------
# labeledHeatmap.R
#---------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------
#
# .reverseRows = function(Matrix)
#
#--------------------------------------------------------------------------
#
.reverseRows = function(Matrix)
{
ind = seq(from=dim(Matrix)[1], to=1, by=-1);
Matrix[ind,, drop = FALSE];
#Matrix
}
.extend = function(x, n)
{
nRep = ceiling(n/length(x));
rep(x, nRep)[1:n];
}
# Adapt a numeric index to a subset
# Aim: if 'index' is a numeric index of special entries of a vector,
# create a new index that references 'subset' elements of the vector
.restrictIndex = function(index, subset)
{
out = match(index, subset);
out[!is.na(out)];
}
#--------------------------------------------------------------------------
#
# labeledHeatmap
#
#--------------------------------------------------------------------------
# This function plots a heatmap of the specified matrix
# and labels the x and y axes wit the given labels.
# It is assumed that the number of entries in xLabels and yLabels is consistent
# with the dimensions in.
# If colorLabels==TRUE, the labels are not printed and instead interpreted as colors --
# -- a simple symbol with the appropriate color is printed instead of the label.
# The x,yLabels are expected to have the form "..color" as in "MEgrey" or "PCturquoise".
# xSymbol, ySymbols are additional markers that can be placed next to color labels
labeledHeatmap = function (
Matrix,
xLabels, yLabels = NULL,
xSymbols = NULL, ySymbols = NULL,
colorLabels = NULL,
xColorLabels = FALSE, yColorLabels = FALSE,
checkColorsValid = TRUE,
invertColors = FALSE,
setStdMargins = TRUE,
xLabelsPosition = "bottom",
xLabelsAngle = 45,
xLabelsAdj = 1,
yLabelsPosition = "left",
xColorWidth = 2*strheight("M"),
yColorWidth = 2*strwidth("M"),
xColorOffset = strheight("M")/3,
yColorOffset = strwidth("M")/3,
# Content of heatmap
colorMatrix = NULL,
colors = NULL,
naColor = "grey",
textMatrix = NULL, cex.text = NULL,
textAdj = c(0.5, 0.5),
# labeling of rows and columns
cex.lab = NULL,
cex.lab.x = cex.lab,
cex.lab.y = cex.lab,
colors.lab.x = 1,
colors.lab.y = 1,
font.lab.x = 1,
font.lab.y = 1,
bg.lab.x = NULL,
bg.lab.y = NULL,
x.adj.lab.y = 1,
plotLegend = TRUE,
keepLegendSpace = plotLegend,
legendLabel = "",
cex.legendLabel = 1,
# Separator line specification
verticalSeparator.x = NULL,
verticalSeparator.col = 1,
verticalSeparator.lty = 1,
verticalSeparator.lwd = 1,
verticalSeparator.ext = 0,
verticalSeparator.interval = 0,
horizontalSeparator.y = NULL,
horizontalSeparator.col = 1,
horizontalSeparator.lty = 1,
horizontalSeparator.lwd = 1,
horizontalSeparator.ext = 0,
horizontalSeparator.interval = 0,
# optional restrictions on which rows and columns to actually show
showRows = NULL,
showCols = NULL,
# Other arguments...
... )
{
textFnc = match.fun("text");
if (!is.null(colorLabels)) {xColorLabels = colorLabels; yColorLabels = colorLabels; }
if (is.null(yLabels) & (!is.null(xLabels)) & (dim(Matrix)[1]==dim(Matrix)[2]))
yLabels = xLabels;
nCols = ncol(Matrix);
nRows = nrow(Matrix);
if (length(xLabels)!=nCols)
stop("Length of 'xLabels' must equal the number of columns in 'Matrix.'");
if (length(yLabels)!=nRows)
stop("Length of 'yLabels' must equal the number of rows in 'Matrix.'");
if (is.null(showRows)) showRows = c(1:nRows);
if (is.null(showCols)) showCols = c(1:nCols);
nShowCols = length(showCols);
nShowRows = length(showRows);
if (nShowCols==0) stop("'showCols' is empty.");
if (nShowRows==0) stop("'showRows' is empty.");
if (checkColorsValid)
{
xValidColors = !is.na(match(substring(xLabels, 3), colors()));
yValidColors = !is.na(match(substring(yLabels, 3), colors()));
} else {
xValidColors = rep(TRUE, length(xLabels));
yValidColors = rep(TRUE, length(yLabels));
}
if (sum(xValidColors)>0) xColorLabInd = xValidColors[showCols]
if (sum(!xValidColors)>0) xTextLabInd = !xValidColors[showCols]
if (sum(yValidColors)>0) yColorLabInd = yValidColors[showRows]
if (sum(!yValidColors)>0) yTextLabInd = !yValidColors[showRows]
if (setStdMargins)
{
if (xColorLabels & yColorLabels)
{
par(mar=c(2,2,3,5)+0.2);
} else {
par(mar = c(7,7,3,5)+0.2);
}
}
xLabels.show = xLabels[showCols];
yLabels.show = yLabels[showRows];
if (!is.null(xSymbols))
{
if (length(xSymbols)!=nCols)
stop("When 'xSymbols' are given, their length must equal the number of columns in 'Matrix.'");
xSymbols.show = xSymbols[showCols];
} else
xSymbols.show = NULL;
if (!is.null(ySymbols))
{
if (length(ySymbols)!=nRows)
stop("When 'ySymbols' are given, their length must equal the number of rows in 'Matrix.'");
ySymbols.show = ySymbols[showRows];
} else
ySymbols.show = NULL;
xLabPos = charmatch(xLabelsPosition, c("bottom", "top"));
if (is.na(xLabPos))
stop("Argument 'xLabelsPosition' must be (a unique abbreviation of) 'bottom', 'top'");
yLabPos = charmatch(yLabelsPosition, c("left", "right"));
if (is.na(yLabPos))
stop("Argument 'yLabelsPosition' must be (a unique abbreviation of) 'left', 'right'");
if (is.null(colors)) colors = heat.colors(30);
if (invertColors) colors = rev(colors);
labPos = .heatmapWithLegend(Matrix[showRows, showCols, drop = FALSE],
signed = FALSE, colorMatrix = colorMatrix, colors = colors, naColor = naColor,
cex.legendAxis = cex.lab, plotLegend = plotLegend, keepLegendSpace = keepLegendSpace,
legendLabel = legendLabel, cex.legendLabel = cex.legendLabel, ...)
plotbox = labPos$box;
xmin = plotbox[1]; xmax = plotbox[2]; ymin = plotbox[3]; yrange = plotbox[4]-ymin;
ymax = plotbox[4]; xrange = xmax - xmin;
# The positions below are for showCols/showRows-restriceted data
xLeft = labPos$xLeft;
xRight = labPos$xRight;
yTop = labPos$yTop;
yBot = labPos$yBot;
xspacing = labPos$xMid[2] - labPos$xMid[1];
yspacing = abs(labPos$yMid[2] - labPos$yMid[1]);
offsetx = .extend(xColorOffset, nCols)[showCols]
offsety = .extend(yColorOffset, nRows)[showRows]
xColW = xColorWidth;
yColW = yColorWidth;
# Additional angle-dependent offsets for x axis labels
textOffsetY = strheight("M") * cos(xLabelsAngle/180 * pi);
if (any(xValidColors)) offsetx = offsetx + xColW;
if (any(yValidColors)) offsety = offsety + yColW;
# Create the background for column and row labels.
extension.left = par("mai")[2] * # left margin width in inches
par("cxy")[1] / par("cin")[1] # character size in user corrdinates/character size in inches
extension.right = par("mai")[4] * # right margin width in inches
par("cxy")[1] / par("cin")[1] # character size in user corrdinates/character size in inches
extension.bottom = par("mai")[1] *
par("cxy")[2] / par("cin")[2]- # character size in user corrdinates/character size in inches
offsetx
extension.top = par("mai")[3] *
par("cxy")[2] / par("cin")[2]- # character size in user corrdinates/character size in inches
offsetx
figureBox = par("usr");
figXrange = figureBox[2] - figureBox[1];
figYrange = figureBox[4] - figureBox[3];
if (!is.null(bg.lab.x))
{
bg.lab.x = .extend(bg.lab.x, nCols)[showCols];
if (xLabPos==1)
{
y0 = ymin;
ext = extension.bottom;
sign = 1;
} else {
y0 = ymax;
ext = extension.top;
sign = -1;
}
figureDims = par("pin");
angle = xLabelsAngle/180*pi;
ratio = figureDims[1]/figureDims[2] * figYrange/figXrange;
ext.x = -sign * ext * 1/tan(angle)/ratio;
ext.y = sign * ext * sign(sin(angle))
#offset = (sum(xValidColors)>0) * xColW + offsetx + textOffsetY;
offset = offsetx + textOffsetY;
for (cc in 1:nShowCols)
polygon(x = c(xLeft[cc], xLeft[cc], xLeft[cc] + ext.x, xRight[cc] + ext.x, xRight[cc], xRight[cc]),
y = c(y0, y0-sign*offset[cc], y0-sign*offset[cc] - ext.y, y0-sign*offset[cc] - ext.y,
y0-sign*offset[cc], y0),
border = bg.lab.x[cc], col = bg.lab.x[cc], xpd = TRUE);
}
if (!is.null(bg.lab.y))
{
bg.lab.y = .extend(bg.lab.y, nRows)
reverseRows = TRUE;
if (reverseRows) bg.lab.y = rev(bg.lab.y);
bg.lab.y = bg.lab.y[showRows];
if (yLabPos==1)
{
xl = xmin-extension.left;
xr = xmin;
} else {
xl = xmax;
xr = xmax + extension.right;
}
for (r in 1:nShowRows)
rect(xl, yBot[r], xr, yTop[r],
col = bg.lab.y[r], border = bg.lab.y[r], xpd = TRUE);
}
colors.lab.x = .extend(colors.lab.x, nCols)[showCols];
font.lab.x = .extend(font.lab.x, nCols)[showCols];
# Write out labels
if (sum(!xValidColors)>0)
{
xLabYPos = if(xLabPos==1) ymin - offsetx- textOffsetY else ymax + offsetx + textOffsetY;
if (is.null(cex.lab)) cex.lab = 1;
mapply(textFnc, x = labPos$xMid[xTextLabInd],
y = xLabYPos, labels = xLabels.show[xTextLabInd],
col = colors.lab.x[xTextLabInd],
font = font.lab.x[xTextLabInd],
MoreArgs = list(srt = xLabelsAngle,
adj = xLabelsAdj, xpd = TRUE, cex = cex.lab.x));
}
if (sum(xValidColors)>0)
{
baseY = if (xLabPos==1) ymin-offsetx else ymax + offsetx;
deltaY = if (xLabPos==1) xColW else -xColW;
rect(xleft = labPos$xMid[xColorLabInd] - xspacing/2, ybottom = baseY[xColorLabInd],
xright = labPos$xMid[xColorLabInd] + xspacing/2, ytop = baseY[xColorLabInd] + deltaY,
density = -1, col = substring(xLabels.show[xColorLabInd], 3),
border = substring(xLabels.show[xColorLabInd], 3), xpd = TRUE)
if (!is.null(xSymbols))
mapply(textFnc, x = labPos$xMid[xColorLabInd],
y = baseY[xColorLabInd] -textOffsetY - sign(deltaY)* strwidth("M")/3,
labels = xSymbols.show[xColorLabInd],
col = colors.lab.x[xColorLabInd],
font = font.lab.x[xColorLabInd],
MoreArgs = list( adj = xLabelsAdj,
xpd = TRUE, srt = xLabelsAngle, cex = cex.lab.x));
}
x.adj.lab.y = .extend(x.adj.lab.y, nRows)[showRows]
if (yLabPos==1)
{
marginWidth = par("mai")[2] / par("pin")[1] * xrange
} else {
marginWidth = par("mai")[4] / par("pin")[1] * xrange
}
xSpaceForYLabels = marginWidth-2*strwidth("M")/3 - ifelse(yValidColors[showRows], yColW, 0);
xPosOfYLabels.relative = xSpaceForYLabels * (1-x.adj.lab.y) + offsety
colors.lab.y = .extend(colors.lab.y, nRows)[showRows];
font.lab.y = .extend(font.lab.y, nRows)[showRows];
if (sum(!yValidColors)>0)
{
if (is.null(cex.lab)) cex.lab = 1;
if (yLabPos==1)
{
x = xmin - strwidth("M")/3 - xPosOfYLabels.relative[yTextLabInd]
adj = x.adj.lab.y[yTextLabInd]
} else {
x = xmax + strwidth("M")/3 + xPosOfYLabels.relative[yTextLabInd];
adj = 1-x.adj.lab.y[yTextLabInd];
}
mapply(textFnc, y = labPos$yMid[yTextLabInd], labels = yLabels.show[yTextLabInd],
adj = lapply(adj, c, 0.5),
x = x,
col = colors.lab.y[yTextLabInd],
font = font.lab.y[yTextLabInd],
MoreArgs = list(srt = 0, xpd = TRUE, cex = cex.lab.y));
}
if (sum(yValidColors)>0)
{
if (yLabPos==1)
{
xl = xmin-offsety;
xr = xmin-offsety + yColW;
xtext = xmin - strwidth("M")/3 - xPosOfYLabels.relative[yColorLabInd];
adj = x.adj.lab.y[yColorLabInd]
} else {
xl = xmax + offsety - yColW;
xr = xmax + offsety;
xtext = xmin + strwidth("M")/3 + xPosOfYLabels.relative[yColorLabInd]
adj = 1-x.adj.lab.y[yColorLabInd];
}
rect(xleft = xl[yColorLabInd], ybottom = rev(labPos$yMid[yColorLabInd]) - yspacing/2,
xright = xr[yColorLabInd], ytop = rev(labPos$yMid[yColorLabInd]) + yspacing/2,
density = -1, col = substring(rev(yLabels.show[yColorLabInd]), 3),
border = substring(rev(yLabels.show[yColorLabInd]), 3), xpd = TRUE)
#for (i in yColorLabInd)
#{
# lines(c(xmin- offsetx, xmin- offsetx+yColW), y = rep(labPos$yMid[i] - yspacing/2, 2), col = i, xpd = TRUE)
# lines(c(xmin- offsetx, xmin- offsetx+yColW), y = rep(labPos$yMid[i] + yspacing/2, 2), col = i, xpd = TRUE)
#}
if (!is.null(ySymbols))
mapply(textFnc, y = labPos$yMid[yColorLabInd], labels = ySymbols.show[yColorLabInd],
adj = lapply(adj, c, 0.5),
x = xtext, col = colors.lab.y[yColorLabInd],
font = font.lab.y[yColorLabInd],
MoreArgs = list(srt = 0, xpd = TRUE, cex = cex.lab.y));
}
# Draw separator lines, if requested
showCols.ext = c(if (1 %in% showCols) 0 else NULL, showCols);
showCols.shift = if (0 %in% showCols.ext) 1 else 0;
if (length(verticalSeparator.x) > 0)
{
if (any(verticalSeparator.x < 0 | verticalSeparator.x > nCols))
stop("If given. 'verticalSeparator.x' must all be between 0 and the number of columns.");
colSepShowIndex = which(verticalSeparator.x %in% showCols.ext);
verticalSeparator.x.show = .restrictIndex(verticalSeparator.x, showCols.ext)-showCols.shift;
} else if (verticalSeparator.interval > 0)
{
verticalSeparator.x.show = verticalSeparator.x =
seq(from = verticalSeparator.interval, by = verticalSeparator.interval,
length.out = floor(length(showCols)/verticalSeparator.interval));
colSepShowIndex = 1:length(verticalSeparator.x);
} else
verticalSeparator.x.show = NULL;
if (length(verticalSeparator.x.show) > 0)
{
nLines = length(verticalSeparator.x);
vs.col = .extend(verticalSeparator.col, nLines)[colSepShowIndex];
vs.lty = .extend(verticalSeparator.lty, nLines)[colSepShowIndex];
vs.lwd = .extend(verticalSeparator.lwd, nLines)[colSepShowIndex];
vs.ext = .extend(verticalSeparator.ext, nLines)[colSepShowIndex];
x.lines = ifelse(verticalSeparator.x.show>0, labPos$xRight[verticalSeparator.x.show], labPos$xLeft[1]);
nLines.show = length(verticalSeparator.x.show);
for (l in 1:nLines.show)
lines(rep(x.lines[l], 2), c(ymin, ymax), col = vs.col[l], lty = vs.lty[l], lwd = vs.lwd[l]);
angle = xLabelsAngle/180*pi;
if (angle==0) angle = pi/2;
if (xLabelsPosition =="bottom")
{
sign = 1;
y0 = ymin;
ext = extension.bottom;
} else {
sign = -1;
y0 = ymax;
ext = extension.top;
}
figureDims = par("pin");
ratio = figureDims[1]/figureDims[2] * figYrange/figXrange;
ext.x = -sign * ext * 1/tan(angle)/ratio;
ext.y = sign * ext * sign(sin(angle))
#offset = (sum(xValidColors)>0) * xColW + offsetx + textOffsetY;
offset = offsetx + textOffsetY;
for (l in 1:nLines.show)
lines(c(x.lines[l], x.lines[l], x.lines[l] + vs.ext[l] * ext.x[l]),
c(y0, y0-sign*offset[l], y0-sign*offset[l] - vs.ext[l] * ext.y[l]),
col = vs.col[l], lty = vs.lty[l], lwd = vs.lwd[l], xpd = TRUE);
}
showRows.ext = c(if (1 %in% showRows) 0 else NULL, showRows);
showRows.shift = if (0 %in% showRows.ext) 1 else 0;
if (length(horizontalSeparator.y) >0)
{
if (any(horizontalSeparator.y < 0 | horizontalSeparator.y > nRows))
stop("If given. 'horizontalSeparator.y' must all be between 0 and the number of rows.");
rowSepShowIndex = which( horizontalSeparator.y %in% showRows.ext);
horizontalSeparator.y.show = .restrictIndex(horizontalSeparator.y, showRows.ext)-showRows.shift;
} else if (horizontalSeparator.interval > 0)
{
horizontalSeparator.y.show = horizontalSeparator.y =
seq(from = horizontalSeparator.interval, by = horizontalSeparator.interval,
length.out = floor(length(showRows)/horizontalSeparator.interval));
rowSepShowIndex = 1:length(horizontalSeparator.y);
} else
horizontalSeparator.y.show = NULL;
if (length(horizontalSeparator.y.show) > 0)
{
reverseRows = TRUE;
if (reverseRows)
{
horizontalSeparator.y.show = nShowRows - horizontalSeparator.y.show+1;
y.lines = ifelse( horizontalSeparator.y.show <=nShowRows,
labPos$yBot[horizontalSeparator.y.show], labPos$yTop[nShowRows]);
} else {
y.lines = ifelse( horizontalSeparator.y.show > 0, labPos$yBot[horizontalSeparator.y.show], labPos$yTop[1]);
}
nLines = length(horizontalSeparator.y);
vs.col = .extend(horizontalSeparator.col, nLines)[rowSepShowIndex];
vs.lty = .extend(horizontalSeparator.lty, nLines)[rowSepShowIndex];
vs.lwd = .extend(horizontalSeparator.lwd, nLines)[rowSepShowIndex];
vs.ext = .extend(horizontalSeparator.ext, nLines)[rowSepShowIndex];
nLines.show = length(horizontalSeparator.y.show);
for (l in 1:nLines.show)
{
if (yLabPos==1)
{
xl = xmin-vs.ext[l]*extension.left;
xr = xmax;
} else {
xl = xmin;
xr = xmax + vs.ext[l]*extension.right;
}
lines(c(xl, xr), rep(y.lines[l], 2),
col = vs.col[l], lty = vs.lty[l], lwd = vs.lwd[l], xpd = TRUE);
}
}
if (!is.null(textMatrix))
{
if (is.null(cex.text)) cex.text = par("cex");
if (is.null(dim(textMatrix)))
if (length(textMatrix)==prod(dim(Matrix))) dim(textMatrix)=dim(Matrix);
if (!isTRUE(all.equal(dim(textMatrix), dim(Matrix))))
stop("labeledHeatmap: textMatrix was given, but has dimensions incompatible with Matrix.");
for (rw in 1:nShowRows)
for (cl in 1:nShowCols)
{
text(labPos$xMid[cl], labPos$yMid[rw],
as.character(textMatrix[showRows[rw],showCols[cl]]), xpd = TRUE, cex = cex.text, adj = textAdj);
}
}
axis(1, labels = FALSE, tick = FALSE)
axis(2, labels = FALSE, tick = FALSE)
axis(3, labels = FALSE, tick = FALSE)
axis(4, labels = FALSE, tick = FALSE)
invisible(labPos)
}
#===================================================================================================
#
# multi-page labeled heatmap
#
#===================================================================================================
labeledHeatmap.multiPage = function(
# Input data and ornament[s
Matrix,
xLabels, yLabels = NULL,
xSymbols = NULL, ySymbols = NULL,
textMatrix = NULL,
# Paging options
rowsPerPage = NULL, maxRowsPerPage = 20,
colsPerPage = NULL, maxColsPerPage = 10,
addPageNumberToMain = TRUE,
# Further arguments to labeledHeatmap
zlim = NULL,
signed = TRUE,
main = "",
...)
{
nr = nrow(Matrix);
nc = ncol(Matrix);
if (is.null(rowsPerPage))
{
nPages.rows = ceiling(nr/maxRowsPerPage);
rowsPerPage = allocateJobs(nr, nPages.rows);
} else
nPages.rows = length(rowsPerPage);
if (is.null(colsPerPage))
{
nPages.cols = ceiling(nc/maxColsPerPage);
colsPerPage = allocateJobs(nc, nPages.cols);
} else
nPages.cols = length(colsPerPage);
if (is.null(zlim))
{
zlim = range(Matrix, na.rm = TRUE)
if (signed) zlim = c(-max(abs(zlim)), max(abs(zlim)));
}
page = 1;
multiPage = (nPages.cols > 1 | nPages.rows > 1)
for (page.col in 1:nPages.cols) for (page.row in 1:nPages.rows)
{
rows = rowsPerPage[[page.row]];
cols = colsPerPage[[page.col]];
main.1 = main;
if (addPageNumberToMain & multiPage) main.1 = spaste(main, "(page ", page, ")");
labeledHeatmap(Matrix = Matrix,
xLabels = xLabels, xSymbols = xSymbols,
yLabels = yLabels, ySymbols = ySymbols,
textMatrix = textMatrix,
zlim = zlim, main = main.1,
showRows = rows, showCols = cols,
...);
page = page + 1;
}
}
|
setwd("C://Users//varun//Downloads")
library(dplyr)
best <- function(state, outcome) {
## Read outcome data
outcome_data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state and outcome are valid
unique_state<- unique(outcome_data$State)
if(!(state %in% unique_state)){
stop("invalid state")
}
outcomes<- c("heart attack","heart failure","pneumonia")
if(!(outcome %in% outcomes)){
stop("invalid outcome")
}
## Return hospital name in that state with lowest 30-day death
patt = "^Hospital.30.Day.Death..Mortality"
filter(outcome_data,outcome_data$State==state,)->temp
if (outcome =="heart attack" ) {
patt_out = "attack"
}else if(outcome=="heart failure"){
patt_out = "failure"
}else{
patt_out = "pneumonia"
}
temp%>%select(Hospital.Name,ends_with(patt_out))->temp
temp%>%select(Hospital.Name,rate= grep(pattern =patt ,names(temp),ignore.case = T))->final_data
## rate
final_data$rate = as.numeric(final_data$rate)
final_data = final_data[which(!is.na(final_data$rate)),]
rank_vector= final_data$rate
final_data$rank<- rank(rank_vector,ties.method = "max")
hname <- filter(final_data,final_data$rank ==1)[1]
return(as.character(hname))
}
#a<- read.csv("outcome-of-care-measures.csv", colClasses = "character")
#
#b<-split(filter(a,a$State=="NY"),a$Hospital.Name,drop = TRUE)
#outcome<- "pneumonia"
#patt= paste("^Hospital.30.Day.Death..Mortality",sep = "")
#patt
#filter(a,a$State=="MD")%>%select(contains(a,outcome,ignore.case = T))->temp
#temp%>%select(grep(pattern =patt ,names(temp),ignore.case = T))
#str(contain)
|
/best.R
|
no_license
|
varun15chaturvedi/ProgrammingAssignment2
|
R
| false
| false
| 1,649
|
r
|
setwd("C://Users//varun//Downloads")
library(dplyr)
best <- function(state, outcome) {
## Read outcome data
outcome_data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state and outcome are valid
unique_state<- unique(outcome_data$State)
if(!(state %in% unique_state)){
stop("invalid state")
}
outcomes<- c("heart attack","heart failure","pneumonia")
if(!(outcome %in% outcomes)){
stop("invalid outcome")
}
## Return hospital name in that state with lowest 30-day death
patt = "^Hospital.30.Day.Death..Mortality"
filter(outcome_data,outcome_data$State==state,)->temp
if (outcome =="heart attack" ) {
patt_out = "attack"
}else if(outcome=="heart failure"){
patt_out = "failure"
}else{
patt_out = "pneumonia"
}
temp%>%select(Hospital.Name,ends_with(patt_out))->temp
temp%>%select(Hospital.Name,rate= grep(pattern =patt ,names(temp),ignore.case = T))->final_data
## rate
final_data$rate = as.numeric(final_data$rate)
final_data = final_data[which(!is.na(final_data$rate)),]
rank_vector= final_data$rate
final_data$rank<- rank(rank_vector,ties.method = "max")
hname <- filter(final_data,final_data$rank ==1)[1]
return(as.character(hname))
}
#a<- read.csv("outcome-of-care-measures.csv", colClasses = "character")
#
#b<-split(filter(a,a$State=="NY"),a$Hospital.Name,drop = TRUE)
#outcome<- "pneumonia"
#patt= paste("^Hospital.30.Day.Death..Mortality",sep = "")
#patt
#filter(a,a$State=="MD")%>%select(contains(a,outcome,ignore.case = T))->temp
#temp%>%select(grep(pattern =patt ,names(temp),ignore.case = T))
#str(contain)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggurvplot_arguments.R
\name{ggsurvplot_arguments}
\alias{ggsurvplot_arguments}
\title{ggsurvplot Argument Descriptions}
\arguments{
\item{fit}{an object of class survfit.}
\item{data}{a dataset used to fit survival curves. If not supplied then data
will be extracted from 'fit' object.}
\item{fun}{an arbitrary function defining a transformation of the survival
curve. Often used transformations can be specified with a character
argument: "event" plots cumulative events (f(y) = 1-y), "cumhaz" plots the
cumulative hazard function (f(y) = -log(y)), and "pct" for survival
probability in percentage.}
\item{surv.scale}{scale transformation of survival curves. Allowed values are
"default" or "percent".}
\item{xscale}{numeric or character value specifying x-axis scale. \itemize{
\item If numeric, the value is used to divide the labels on the x axis. For
example, a value of 365.25 will give labels in years instead of the original
days. \item If character, allowed options include one of c("d_m", "d_y",
"m_d", "m_y", "y_d", "y_m"), where d = days, m = months and y = years. For
example, xscale = "d_m" will transform labels from days to months; xscale =
"m_y", will transform labels from months to years.}}
\item{color}{color to be used for the survival curves. \itemize{ \item If the
number of strata/group (n.strata) = 1, the expected value is the color name.
For example color = "blue". \item If n.strata > 1, the expected value is the
grouping variable name. By default, survival curves are colored by strata
using the argument color = "strata", but you can also color survival curves
by any other grouping variables used to fit the survival curves. In this
case, it's possible to specify a custom color palette by using the argument
palette.}}
\item{palette}{the color palette to be used. Allowed values include "hue" for
the default hue color scale; "grey" for grey color palettes; brewer palettes
e.g. "RdBu", "Blues", ...; or custom color palette e.g. c("blue", "red").
See details section for more information. Can be also a numeric vector of
length(groups); in this case a basic color palette is created using the
function \link[grDevices]{palette}.}
\item{linetype}{line types. Allowed values includes i) "strata" for changing
linetypes by strata (i.e. groups); ii) a numeric vector (e.g., c(1, 2)) or a
character vector c("solid", "dashed").}
\item{break.time.by}{numeric value controlling time axis breaks. Default value
is NULL.}
\item{break.x.by}{alias of break.time.by. Numeric value controlling x axis
breaks. Default value is NULL.}
\item{break.y.by}{same as break.x.by but for y axis.}
\item{conf.int}{logical value. If TRUE, plots confidence interval.}
\item{conf.int.fill}{fill color to be used for confidence interval.}
\item{conf.int.style}{confidence interval style. Allowed values include
c("ribbon", "step").}
\item{censor}{logical value. If TRUE, censors will be drawn.}
\item{censor.shape}{character or numeric value specifying the point shape of
censors. Default value is "+" (3), a sensible choice is "|" (124).}
\item{censor.size}{numveric value specifying the point size of censors.
Default is 4.5.}
\item{pval}{logical value, a numeric or a string. If logical and TRUE, the
p-value is added on the plot. If numeric, than the computet p-value is
substituted with the one passed with this parameter. If character, then the
customized string appears on the plot. See examples - Example 3.}
\item{pval.size}{numeric value specifying the p-value text size. Default is 5.}
\item{pval.coord}{numeric vector, of length 2, specifying the x and y
coordinates of the p-value. Default values are NULL.}
\item{title, xlab, ylab}{main title and axis labels}
\item{xlim, ylim}{x and y axis limits e.g. xlim = c(0, 1000), ylim = c(0, 1).}
\item{axes.offset}{logical value. Default is TRUE. If FALSE, set the plot axes
to start at the origin.}
\item{legend}{character specifying legend position. Allowed values are one of
c("top", "bottom", "left", "right", "none"). Default is "top" side position.
to remove the legend use legend = "none". Legend position can be also
specified using a numeric vector c(x, y); see details section.}
\item{legend.title}{legend title.}
\item{legend.labs}{character vector specifying legend labels. Used to replace
the names of the strata from the fit. Should be given in the same order as
those strata.}
\item{risk.table}{Allowed values include: \itemize{ \item TRUE or FALSE
specifying whether to show or not the risk table. Default is FALSE. \item
"absolute" or "percentage": to show the \bold{absolute number} and the
\bold{percentage} of subjects at risk by time, respectively. Use i)
"abs_pct" to show both absolute number and percentage. ii) "nrisk_cumcensor"
and "nrisk_cumevents" to show the number at risk and, the cumulative number
of censoring and events, respectively. }}
\item{risk.table.title}{The title to be used for the risk table.}
\item{risk.table.pos}{character vector specifying the risk table position.
Allowed options are one of c("out", "in") indicating 'outside' or 'inside'
the main plot, respectively. Default value is "out".}
\item{risk.table.col}{same as tables.col but for risk table only.}
\item{risk.table.fontsize, fontsize}{font size to be used for the risk table
and the cumulative events table.}
\item{risk.table.y.text}{logical. Default is TRUE. If FALSE, risk table y axis
tick labels will be hidden.}
\item{risk.table.y.text.col}{logical. Default value is FALSE. If TRUE, risk
table tick labels will be colored by strata.}
\item{tables.height}{numeric value (in [0 - 1]) specifying the general height
of all tables under the main survival plot.}
\item{tables.y.text}{logical. Default is TRUE. If FALSE, the y axis tick
labels of tables will be hidden.}
\item{tables.y.text.col}{logical. Default value is FALSE. If TRUE, tables tick
labels will be colored by strata.}
\item{tables.col}{color to be used for all tables under the main plot. Default
value is "black". If you want to color by strata (i.e. groups), use
tables.col = "strata".}
\item{tables.theme}{function, ggplot2 theme name. Default value is
\link{theme_survminer}. Allowed values include ggplot2 official themes: see
\code{\link[ggplot2]{theme}}.}
\item{risk.table.height}{the height of the risk table on the grid. Increase
the value when you have many strata. Default is 0.25. Ignored when
risk.table = FALSE.}
\item{surv.plot.height}{the height of the survival plot on the grid. Default
is 0.75. Ignored when risk.table = FALSE. \code{1-risk.table.height -
ncensor.plot.height} when \code{risk.table = TRUE} and \code{ncensor.plot =
TRUE}}
\item{ncensor.plot}{logical value. If TRUE, the number of censored subjects at
time t is plotted. Default is FALSE. Ignored when cumcensor = TRUE.}
\item{ncensor.plot.title}{The title to be used for the censor plot. Used when
\code{ncensor.plot = TRUE}.}
\item{ncensor.plot.height}{The height of the censor plot. Used when
\code{ncensor.plot = TRUE}.}
\item{cumevents}{logical value specifying whether to show or not the table of
the cumulative number of events. Default is FALSE.}
\item{cumevents.title}{The title to be used for the cumulative events table.}
\item{cumevents.col}{same as tables.col but for the cumulative events table
only.}
\item{cumevents.y.text}{logical. Default is TRUE. If FALSE, the y axis tick
labels of the cumulative events table will be hidden.}
\item{cumevents.y.text.col}{logical. Default value is FALSE. If TRUE, the y
tick labels of the cumulative events will be colored by strata.}
\item{cumevents.height}{the height of the cumulative events table on the grid.
Default is 0.25. Ignored when cumevents = FALSE.}
\item{cumcensor}{logical value specifying whether to show or not the table of
the cumulative number of censoring. Default is FALSE.}
\item{cumcensor.title}{The title to be used for the cumcensor table.}
\item{cumcensor.col}{same as tables.col but for cumcensor table only.}
\item{cumcensor.y.text}{logical. Default is TRUE. If FALSE, the y axis tick
labels of the cumcensor table will be hidden.}
\item{cumcensor.y.text.col}{logical. Default value is FALSE. If TRUE, the y
tick labels of the cumcensor will be colored by strata.}
\item{cumcensor.height}{the height of the cumcensor table on the grid. Default
is 0.25. Ignored when cumcensor = FALSE.}
\item{surv.median.line}{character vector for drawing a horizontal/vertical
line at median survival. Allowed values include one of c("none", "hv", "h",
"v"). v: vertical, h:horizontal.}
\item{ggtheme}{function, ggplot2 theme name. Default value is
\link{theme_survminer}. Allowed values include ggplot2 official themes: see
\code{\link[ggplot2]{theme}}.}
\item{...}{other arguments to be passed i) to ggplot2 geom_*() functions such
as linetype, size, ii) or to the function \link[ggpubr]{ggpar}() for
customizing the plots. See details section.}
\item{log.rank.weights}{The name for the type of weights to be used in
computing the p-value for log-rank test. By default \code{survdiff} is used
to calculate regular log-rank test (with weights == 1). A user can specify
\code{"1", "n", "sqrtN", "S1", "S2", "FH"} to use weights specified in
\link[survMisc]{comp}, so that weight correspond to the test as : 1 -
log-rank, n - Gehan-Breslow (generalized Wilcoxon), sqrtN - Tarone-Ware, S1
- Peto-Peto's modified survival estimate, S2 - modified Peto-Peto (by
Andersen), FH - Fleming-Harrington(p=1, q=1).}
\item{pval.method}{whether to add a text with the test name used for
calculating the pvalue, that corresponds to survival curves' comparison -
used only when \code{pval=TRUE}}
\item{pval.method.size}{the same as \code{pval.size} but for displaying
\code{log.rank.weights} name}
\item{pval.method.coord}{the same as \code{pval.coord} but for displaying
\code{log.rank.weights} name}
}
\description{
ggsurvplot Argument Descriptions
}
|
/man/ggsurvplot_arguments.Rd
|
no_license
|
woodhaha/survminer
|
R
| false
| true
| 9,969
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggurvplot_arguments.R
\name{ggsurvplot_arguments}
\alias{ggsurvplot_arguments}
\title{ggsurvplot Argument Descriptions}
\arguments{
\item{fit}{an object of class survfit.}
\item{data}{a dataset used to fit survival curves. If not supplied then data
will be extracted from 'fit' object.}
\item{fun}{an arbitrary function defining a transformation of the survival
curve. Often used transformations can be specified with a character
argument: "event" plots cumulative events (f(y) = 1-y), "cumhaz" plots the
cumulative hazard function (f(y) = -log(y)), and "pct" for survival
probability in percentage.}
\item{surv.scale}{scale transformation of survival curves. Allowed values are
"default" or "percent".}
\item{xscale}{numeric or character value specifying x-axis scale. \itemize{
\item If numeric, the value is used to divide the labels on the x axis. For
example, a value of 365.25 will give labels in years instead of the original
days. \item If character, allowed options include one of c("d_m", "d_y",
"m_d", "m_y", "y_d", "y_m"), where d = days, m = months and y = years. For
example, xscale = "d_m" will transform labels from days to months; xscale =
"m_y", will transform labels from months to years.}}
\item{color}{color to be used for the survival curves. \itemize{ \item If the
number of strata/group (n.strata) = 1, the expected value is the color name.
For example color = "blue". \item If n.strata > 1, the expected value is the
grouping variable name. By default, survival curves are colored by strata
using the argument color = "strata", but you can also color survival curves
by any other grouping variables used to fit the survival curves. In this
case, it's possible to specify a custom color palette by using the argument
palette.}}
\item{palette}{the color palette to be used. Allowed values include "hue" for
the default hue color scale; "grey" for grey color palettes; brewer palettes
e.g. "RdBu", "Blues", ...; or custom color palette e.g. c("blue", "red").
See details section for more information. Can be also a numeric vector of
length(groups); in this case a basic color palette is created using the
function \link[grDevices]{palette}.}
\item{linetype}{line types. Allowed values includes i) "strata" for changing
linetypes by strata (i.e. groups); ii) a numeric vector (e.g., c(1, 2)) or a
character vector c("solid", "dashed").}
\item{break.time.by}{numeric value controlling time axis breaks. Default value
is NULL.}
\item{break.x.by}{alias of break.time.by. Numeric value controlling x axis
breaks. Default value is NULL.}
\item{break.y.by}{same as break.x.by but for y axis.}
\item{conf.int}{logical value. If TRUE, plots confidence interval.}
\item{conf.int.fill}{fill color to be used for confidence interval.}
\item{conf.int.style}{confidence interval style. Allowed values include
c("ribbon", "step").}
\item{censor}{logical value. If TRUE, censors will be drawn.}
\item{censor.shape}{character or numeric value specifying the point shape of
censors. Default value is "+" (3), a sensible choice is "|" (124).}
\item{censor.size}{numveric value specifying the point size of censors.
Default is 4.5.}
\item{pval}{logical value, a numeric or a string. If logical and TRUE, the
p-value is added on the plot. If numeric, than the computet p-value is
substituted with the one passed with this parameter. If character, then the
customized string appears on the plot. See examples - Example 3.}
\item{pval.size}{numeric value specifying the p-value text size. Default is 5.}
\item{pval.coord}{numeric vector, of length 2, specifying the x and y
coordinates of the p-value. Default values are NULL.}
\item{title, xlab, ylab}{main title and axis labels}
\item{xlim, ylim}{x and y axis limits e.g. xlim = c(0, 1000), ylim = c(0, 1).}
\item{axes.offset}{logical value. Default is TRUE. If FALSE, set the plot axes
to start at the origin.}
\item{legend}{character specifying legend position. Allowed values are one of
c("top", "bottom", "left", "right", "none"). Default is "top" side position.
to remove the legend use legend = "none". Legend position can be also
specified using a numeric vector c(x, y); see details section.}
\item{legend.title}{legend title.}
\item{legend.labs}{character vector specifying legend labels. Used to replace
the names of the strata from the fit. Should be given in the same order as
those strata.}
\item{risk.table}{Allowed values include: \itemize{ \item TRUE or FALSE
specifying whether to show or not the risk table. Default is FALSE. \item
"absolute" or "percentage": to show the \bold{absolute number} and the
\bold{percentage} of subjects at risk by time, respectively. Use i)
"abs_pct" to show both absolute number and percentage. ii) "nrisk_cumcensor"
and "nrisk_cumevents" to show the number at risk and, the cumulative number
of censoring and events, respectively. }}
\item{risk.table.title}{The title to be used for the risk table.}
\item{risk.table.pos}{character vector specifying the risk table position.
Allowed options are one of c("out", "in") indicating 'outside' or 'inside'
the main plot, respectively. Default value is "out".}
\item{risk.table.col}{same as tables.col but for risk table only.}
\item{risk.table.fontsize, fontsize}{font size to be used for the risk table
and the cumulative events table.}
\item{risk.table.y.text}{logical. Default is TRUE. If FALSE, risk table y axis
tick labels will be hidden.}
\item{risk.table.y.text.col}{logical. Default value is FALSE. If TRUE, risk
table tick labels will be colored by strata.}
\item{tables.height}{numeric value (in [0 - 1]) specifying the general height
of all tables under the main survival plot.}
\item{tables.y.text}{logical. Default is TRUE. If FALSE, the y axis tick
labels of tables will be hidden.}
\item{tables.y.text.col}{logical. Default value is FALSE. If TRUE, tables tick
labels will be colored by strata.}
\item{tables.col}{color to be used for all tables under the main plot. Default
value is "black". If you want to color by strata (i.e. groups), use
tables.col = "strata".}
\item{tables.theme}{function, ggplot2 theme name. Default value is
\link{theme_survminer}. Allowed values include ggplot2 official themes: see
\code{\link[ggplot2]{theme}}.}
\item{risk.table.height}{the height of the risk table on the grid. Increase
the value when you have many strata. Default is 0.25. Ignored when
risk.table = FALSE.}
\item{surv.plot.height}{the height of the survival plot on the grid. Default
is 0.75. Ignored when risk.table = FALSE. \code{1-risk.table.height -
ncensor.plot.height} when \code{risk.table = TRUE} and \code{ncensor.plot =
TRUE}}
\item{ncensor.plot}{logical value. If TRUE, the number of censored subjects at
time t is plotted. Default is FALSE. Ignored when cumcensor = TRUE.}
\item{ncensor.plot.title}{The title to be used for the censor plot. Used when
\code{ncensor.plot = TRUE}.}
\item{ncensor.plot.height}{The height of the censor plot. Used when
\code{ncensor.plot = TRUE}.}
\item{cumevents}{logical value specifying whether to show or not the table of
the cumulative number of events. Default is FALSE.}
\item{cumevents.title}{The title to be used for the cumulative events table.}
\item{cumevents.col}{same as tables.col but for the cumulative events table
only.}
\item{cumevents.y.text}{logical. Default is TRUE. If FALSE, the y axis tick
labels of the cumulative events table will be hidden.}
\item{cumevents.y.text.col}{logical. Default value is FALSE. If TRUE, the y
tick labels of the cumulative events will be colored by strata.}
\item{cumevents.height}{the height of the cumulative events table on the grid.
Default is 0.25. Ignored when cumevents = FALSE.}
\item{cumcensor}{logical value specifying whether to show or not the table of
the cumulative number of censoring. Default is FALSE.}
\item{cumcensor.title}{The title to be used for the cumcensor table.}
\item{cumcensor.col}{same as tables.col but for cumcensor table only.}
\item{cumcensor.y.text}{logical. Default is TRUE. If FALSE, the y axis tick
labels of the cumcensor table will be hidden.}
\item{cumcensor.y.text.col}{logical. Default value is FALSE. If TRUE, the y
tick labels of the cumcensor will be colored by strata.}
\item{cumcensor.height}{the height of the cumcensor table on the grid. Default
is 0.25. Ignored when cumcensor = FALSE.}
\item{surv.median.line}{character vector for drawing a horizontal/vertical
line at median survival. Allowed values include one of c("none", "hv", "h",
"v"). v: vertical, h:horizontal.}
\item{ggtheme}{function, ggplot2 theme name. Default value is
\link{theme_survminer}. Allowed values include ggplot2 official themes: see
\code{\link[ggplot2]{theme}}.}
\item{...}{other arguments to be passed i) to ggplot2 geom_*() functions such
as linetype, size, ii) or to the function \link[ggpubr]{ggpar}() for
customizing the plots. See details section.}
\item{log.rank.weights}{The name for the type of weights to be used in
computing the p-value for log-rank test. By default \code{survdiff} is used
to calculate regular log-rank test (with weights == 1). A user can specify
\code{"1", "n", "sqrtN", "S1", "S2", "FH"} to use weights specified in
\link[survMisc]{comp}, so that weight correspond to the test as : 1 -
log-rank, n - Gehan-Breslow (generalized Wilcoxon), sqrtN - Tarone-Ware, S1
- Peto-Peto's modified survival estimate, S2 - modified Peto-Peto (by
Andersen), FH - Fleming-Harrington(p=1, q=1).}
\item{pval.method}{whether to add a text with the test name used for
calculating the pvalue, that corresponds to survival curves' comparison -
used only when \code{pval=TRUE}}
\item{pval.method.size}{the same as \code{pval.size} but for displaying
\code{log.rank.weights} name}
\item{pval.method.coord}{the same as \code{pval.coord} but for displaying
\code{log.rank.weights} name}
}
\description{
ggsurvplot Argument Descriptions
}
|
library(ggplot2)
library(dplyr)
options(na.action = "na.exclude")
# Helper functions ----------------------------------------------------------
deseas <- function(var, month) {
resid(lm(var ~ factor(month))) + mean(var, na.rm = TRUE)
}
# Explore multiple cities ----------------------------------------------------
tx <- read.csv("../data/tx-house-sales.csv")
# We know from our exploration of Houston data that many of the series
# have strong seasonal components. It's a good idea to check that's true for
# all cities. We'll start with sales.
qplot(date, sales, data = tx, geom = "line", group = city)
# Hmmmm. Problem! There's a large variation in the number of sales between
# cities. The seasonal pattern does look pretty constant though.
# First possible solution, just remove the seasonal effect as we did for a
# single city, but applied to multiple cities (using model as a tool)
tx_city <- tx %>% group_by(city)
tx = tx_city %>% mutate(sales_ds = deseas(sales, month))
qplot(date, sales_ds, data = tx, geom = "line", group = city)
# It works, but we don't gain much insight into what's going on.
# Let's fit the models, and actually look at them this time
models = tx %>% group_by(city) %>% do(model=lm(sales ~ factor(month), data=.))
models[[1, 'model']]
coef(models[[1, 'model']])
# To extract the coefficients, we want to go from a list to data frame
# Notice how plyr remembers the city names that we originally broke the
# models up by.
(models %>% group_by(city) %>% do(data.frame(t(coef(.[[1, 'model']])), check.names=FALSE)))[1:5, 1:3]
# Two problems with the model:
# * Coefficients aren't comparable, because of varying sizes
# Solution: log-transform to convert to ratios
# * Coefficients not in useful form for plotting
# Solution: create data frame ourselves
qplot(date, log10(sales), data = tx, geom = "line", group = city)
models2 <- tx_city %>% do(model=lm(log10(sales) ~ factor(month), data = .))
coef2 <- models2 %>% group_by(city) %>% do((function(row) {
mod = row[['model']][[1]]
data.frame(
month = 1:12,
effect = c(0, coef(mod)[-1]),
intercept = coef(mod)[1])
})(.))
# Pretty consistent pattern, although there are few outliers
qplot(month, effect, data = coef2, group = city, geom = "line")
# More interpretable if we back-transform - can now interpret as ratios
qplot(month, 10 ^ effect, data = coef2, group = city, geom = "line")
# What are the outliers?
qplot(month, 10 ^ effect, data = coef2, geom = "line") + facet_wrap(~ city)
# They are small cities. Hmmmmm
# Have a look at the distributions
qplot(effect, data = coef2, binwidth = 0.05) + facet_wrap(~ month)
# Single model ----------------------------------------------------
mod <- lm(log10(sales) ~ city + factor(month), data = tx)
tx$sales2 <- 10 ^ resid(mod)
qplot(date, sales2, data = tx, geom = "line", group = city)
# Now we're starting to get somewhere! Can see general pattern, although
# there are a few outliers. Look at cities individually to identify:
last_plot() + facet_wrap(~ city)
# Some problem cities:
# * Bryan-College station: has different seasonal pattern (Texas A&M?)
# * Similarly with San Marcos (a lot of missing data)
# * Palestine: massive increase beginning 2007
# Can resolve seasonal problems by fitting separate seasonal pattern to each
# city (Challenge: how is this different to the indivudal models we fit
# before?) But probably better to use more sophisticated model (e.g. mixed
# effects) model.
mod2 <- lm(log10(sales) ~ city:factor(month), data = tx)
tx$sales3 <- 10 ^ resid(mod2)
qplot(date, sales3, data = tx, geom = "line") + facet_wrap(~ city)
# Further exploration
qplot(date, sales2, data = tx, geom = "line", group = city, alpha = I(0.2))
last_plot() + geom_smooth(aes(group = 1))
# Could smooth individual cities - again just using model as a tool
library(mgcv)
smooth <- function(var, date) {
predict(gam(var ~ s(date)))
}
tx <- tx %>% group_by(city) %>% mutate(sales2_sm = as.vector(smooth(sales2, date)))
qplot(date, sales2_sm, data = tx, geom = "line", group = city)
# Another approach -----------------------------------------------------------
# Essence of most cities is seasonal term plus long term smooth trend. We
# could fit this model to each city, and then look for model which don't
# fit well.
library(splines)
models3 <- tx %>% group_by(city) %>% do(model=lm(log10(sales) ~ factor(month) + ns(date, 3), data = .))
# Extract rsquared from each model
rsq <- function(mod) c(rsq = summary(mod[[1]])$r.squared)
quality <- models3 %>% group_by(city) %>% summarise(rsq=rsq(model))
qplot(rsq, city, data = quality)
qplot(rsq, reorder(city, rsq), data = quality)
quality$poor <- quality$rsq < 0.7
tx2 <- inner_join(tx, quality, by = "city")
# The cities don't look particularly differnet
qplot(date, log10(sales), data = tx2, geom = "line", colour = poor) +
facet_wrap(~ city) + opts(legend.position = "none")
# But we should probably look at the residuals & predictions
mfit <- models3 %>% do((function(mod) {
data.frame(resid = resid(mod$model), pred = predict(mod$model))
})(.))
tx2 <- cbind(tx2, mfit)
qplot(date, pred, data = tx2, geom = "line", colour = poor) +
facet_wrap(~ city) + opts(legend.position = "none")
qplot(date, resid, data = tx2, geom = "line", colour = poor) +
facet_wrap(~ city) + opts(legend.position = "none")
|
/R/tx-explore-all-dplyr.r
|
permissive
|
pssguy/plyrToDplyr
|
R
| false
| false
| 5,383
|
r
|
library(ggplot2)
library(dplyr)
options(na.action = "na.exclude")
# Helper functions ----------------------------------------------------------
deseas <- function(var, month) {
resid(lm(var ~ factor(month))) + mean(var, na.rm = TRUE)
}
# Explore multiple cities ----------------------------------------------------
tx <- read.csv("../data/tx-house-sales.csv")
# We know from our exploration of Houston data that many of the series
# have strong seasonal components. It's a good idea to check that's true for
# all cities. We'll start with sales.
qplot(date, sales, data = tx, geom = "line", group = city)
# Hmmmm. Problem! There's a large variation in the number of sales between
# cities. The seasonal pattern does look pretty constant though.
# First possible solution, just remove the seasonal effect as we did for a
# single city, but applied to multiple cities (using model as a tool)
tx_city <- tx %>% group_by(city)
tx = tx_city %>% mutate(sales_ds = deseas(sales, month))
qplot(date, sales_ds, data = tx, geom = "line", group = city)
# It works, but we don't gain much insight into what's going on.
# Let's fit the models, and actually look at them this time
models = tx %>% group_by(city) %>% do(model=lm(sales ~ factor(month), data=.))
models[[1, 'model']]
coef(models[[1, 'model']])
# To extract the coefficients, we want to go from a list to data frame
# Notice how plyr remembers the city names that we originally broke the
# models up by.
(models %>% group_by(city) %>% do(data.frame(t(coef(.[[1, 'model']])), check.names=FALSE)))[1:5, 1:3]
# Two problems with the model:
# * Coefficients aren't comparable, because of varying sizes
# Solution: log-transform to convert to ratios
# * Coefficients not in useful form for plotting
# Solution: create data frame ourselves
qplot(date, log10(sales), data = tx, geom = "line", group = city)
models2 <- tx_city %>% do(model=lm(log10(sales) ~ factor(month), data = .))
coef2 <- models2 %>% group_by(city) %>% do((function(row) {
mod = row[['model']][[1]]
data.frame(
month = 1:12,
effect = c(0, coef(mod)[-1]),
intercept = coef(mod)[1])
})(.))
# Pretty consistent pattern, although there are few outliers
qplot(month, effect, data = coef2, group = city, geom = "line")
# More interpretable if we back-transform - can now interpret as ratios
qplot(month, 10 ^ effect, data = coef2, group = city, geom = "line")
# What are the outliers?
qplot(month, 10 ^ effect, data = coef2, geom = "line") + facet_wrap(~ city)
# They are small cities. Hmmmmm
# Have a look at the distributions
qplot(effect, data = coef2, binwidth = 0.05) + facet_wrap(~ month)
# Single model ----------------------------------------------------
mod <- lm(log10(sales) ~ city + factor(month), data = tx)
tx$sales2 <- 10 ^ resid(mod)
qplot(date, sales2, data = tx, geom = "line", group = city)
# Now we're starting to get somewhere! Can see general pattern, although
# there are a few outliers. Look at cities individually to identify:
last_plot() + facet_wrap(~ city)
# Some problem cities:
# * Bryan-College station: has different seasonal pattern (Texas A&M?)
# * Similarly with San Marcos (a lot of missing data)
# * Palestine: massive increase beginning 2007
# Can resolve seasonal problems by fitting separate seasonal pattern to each
# city (Challenge: how is this different to the indivudal models we fit
# before?) But probably better to use more sophisticated model (e.g. mixed
# effects) model.
mod2 <- lm(log10(sales) ~ city:factor(month), data = tx)
tx$sales3 <- 10 ^ resid(mod2)
qplot(date, sales3, data = tx, geom = "line") + facet_wrap(~ city)
# Further exploration
qplot(date, sales2, data = tx, geom = "line", group = city, alpha = I(0.2))
last_plot() + geom_smooth(aes(group = 1))
# Could smooth individual cities - again just using model as a tool
library(mgcv)
smooth <- function(var, date) {
predict(gam(var ~ s(date)))
}
tx <- tx %>% group_by(city) %>% mutate(sales2_sm = as.vector(smooth(sales2, date)))
qplot(date, sales2_sm, data = tx, geom = "line", group = city)
# Another approach -----------------------------------------------------------
# Essence of most cities is seasonal term plus long term smooth trend. We
# could fit this model to each city, and then look for model which don't
# fit well.
library(splines)
models3 <- tx %>% group_by(city) %>% do(model=lm(log10(sales) ~ factor(month) + ns(date, 3), data = .))
# Extract rsquared from each model
rsq <- function(mod) c(rsq = summary(mod[[1]])$r.squared)
quality <- models3 %>% group_by(city) %>% summarise(rsq=rsq(model))
qplot(rsq, city, data = quality)
qplot(rsq, reorder(city, rsq), data = quality)
quality$poor <- quality$rsq < 0.7
tx2 <- inner_join(tx, quality, by = "city")
# The cities don't look particularly differnet
qplot(date, log10(sales), data = tx2, geom = "line", colour = poor) +
facet_wrap(~ city) + opts(legend.position = "none")
# But we should probably look at the residuals & predictions
mfit <- models3 %>% do((function(mod) {
data.frame(resid = resid(mod$model), pred = predict(mod$model))
})(.))
tx2 <- cbind(tx2, mfit)
qplot(date, pred, data = tx2, geom = "line", colour = poor) +
facet_wrap(~ city) + opts(legend.position = "none")
qplot(date, resid, data = tx2, geom = "line", colour = poor) +
facet_wrap(~ city) + opts(legend.position = "none")
|
library(dplyr)
library(Seurat)
library(reticulate)
library(sctransform)
library(SeuratData)
library(cowplot)
library(ggplot2)
use_virtualenv(".virtualenvs/UMAP")
#Path to output from cellranger
cell_ranger_path_drug
cell_ranger_path_veh
drug.data <- Read10X(data.dir = paste0(cell_ranger_path_drug, "/outs/filtered_feature_bc_matrix"))
veh.data <- Read10X(data.dir = paste0(cell_ranger_path_ve, "/outs/filtered_feature_bc_matrix"))
drug <- CreateSeuratObject(counts = drug.data, project = "PROJECT_NAME", min.cells = 3, min.features = 200)
drug #14419 features, 3950 samples
drug[["percent.mt"]] <- PercentageFeatureSet(drug, pattern = "^mt-")
VlnPlot(drug, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
drug <- subset(drug, subset = nFeature_RNA > 200 & percent.mt < 5)
veh <- CreateSeuratObject(counts = veh.data, project = "PROJECT_NAME", min.cells = 3, min.features = 200)
veh #14028 features, 4077 samples
veh[["percent.mt"]] <- PercentageFeatureSet(veh, pattern = "^mt-")
VlnPlot(veh, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
veh <- subset(veh, subset = nFeature_RNA > 200 & percent.mt < 5)
# Identify the 10 most highly variable genes
top10_drug <- head(VariableFeatures(drug), 10)
top10_veh <- head(VariableFeatures(veh), 10)
# run sctransform
drug %>%
SCTransform(vars.to.regress = "percent.mt", verbose = FALSE) %>%
RunPCA(verbose = FALSE) %>%
RunUMAP(dims = 1:30, verbose = FALSE) %>%
FindNeighbors(dims = 1:30, verbose = FALSE) %>%
FindClusters(verbose = FALSE) -> drug
DimPlot(label = TRUE) + NoLegend()
veh %>%
SCTransform(vars.to.regress = "percent.mt", verbose = FALSE) %>%
RunPCA(verbose = FALSE) %>%
RunUMAP(dims = 1:30, verbose = FALSE) %>%
FindNeighbors(dims = 1:30, verbose = FALSE) %>%
FindClusters(verbose = FALSE) -> veh
#Load scMatch annotations ('annot' object) generated from the scMatch_analysis.R script
load("annot_veh.Rdata")
load("annot_drug.Rdata")
annot_veh$top.sample <- NULL
annot_veh$top.correlation.score <- NULL
annot_veh$cell <- gsub("-1", "", LL_38_annot$cell)
temp_names <- names(veh$SCT_snn_res.0.8)
veh$cell_types_scMatch <- LL_38_annot$cell.type[match(temp_names, annot_veh$cell)]
veh$cell_types_scMatch2 <- veh$cell_types_scMatch
veh$cell_types_scMatch2 <- gsub(", fetal liver derived", "", veh$cell_types_scMatch2)
veh$cell_types_scMatch2 <- gsub(", placenta derived", "", veh$cell_types_scMatch2)
veh$cell_types_scMatch2 <- gsub("- alternatively activated", "", veh$cell_types_scMatch2)
DimPlot(veh, label = TRUE, group.by = 'cell_types_scMatch2')
DimPlot(veh, label = FALSE, group.by = 'cell_types_scMatch2')
annot_drug$top.sample <- NULL
annot_drug$top.correlation.score <- NULL
annot_drug$cell <- gsub("-1", "", LL_30_annot$cell)
temp_names <- names(drug$SCT_snn_res.0.8)
drug$cell_types_scMatch <- LL_30_annot$cell.type[match(temp_names, annot_drug$cell)]
drug$cell_types_scMatch2 <- drug$cell_types_scMatch
drug$cell_types_scMatch2 <- gsub(", fetal liver derived", "", drug$cell_types_scMatch2)
drug$cell_types_scMatch2 <- gsub(", placenta derived", "", drug$cell_types_scMatch2)
drug$cell_types_scMatch2 <- gsub("- alternatively activated", "", drug$cell_types_scMatch2)
DimPlot(drug, label = TRUE, group.by = 'cell_types_scMatch2')
DimPlot(drug, label = FALSE, group.by = 'cell_types_scMatch2')
###Combining plots
combined.integrated <- RunPCA(combined.integrated, verbose = FALSE)
combined.integrated <- RunUMAP(combined.integrated, dims = 1:30)
DimPlot(veh, label = TRUE)
options(future.globals.maxSize = 4000 * 1024^2)
drug <- CreateSeuratObject(counts = drug.data, project = "drug", min.cells = 3, min.features = 200)
drug
drug[["percent.mt"]] <- PercentageFeatureSet(drug, pattern = "^mt-")
VlnPlot(drug, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
drug <- subset(drug, subset = nFeature_RNA > 200 & percent.mt < 5)
veh <- CreateSeuratObject(counts = veh.data, project = "veh", min.cells = 3, min.features = 200)
veh
veh[["percent.mt"]] <- PercentageFeatureSet(veh, pattern = "^mt-")
VlnPlot(veh, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
veh <- subset(veh, subset = nFeature_RNA > 200 & percent.mt < 5)
##Combination
combined <- merge(veh, y = drug, add.cell.ids = c("veh", "drug"), project = "PROJECT_NAME")
combined.list <- SplitObject(combined)
for (i in 1:length(combined.list)) {
combined.list[[i]] <- SCTransform(combined.list[[i]], verbose = FALSE)
}
genes_veh <- rownames(veh)
genes_drug <- rownames(drug)
genes_veh[genes_veh %in% genes_drug]
combined.features <- SelectIntegrationFeatures(object.list = combined.list, nfeatures = 3000)
combined.list <- PrepSCTIntegration(object.list = combined.list, anchor.features = combined.features,
verbose = FALSE)
combined.anchors <- FindIntegrationAnchors(object.list = combined.list, normalization.method = "SCT",
anchor.features = combined.features, verbose = FALSE)
combined.integrated <- IntegrateData(anchorset = combined.anchors, normalization.method = "SCT",
verbose = FALSE)
combined.integrated <- RunPCA(combined.integrated, verbose = FALSE)
combined.integrated <- RunUMAP(combined.integrated, dims = 1:30)
plots <- DimPlot(combined.integrated, combine = FALSE)
plots <- lapply(X = plots, FUN = function(x) x + theme(legend.position = "top") + guides(color = guide_legend(nrow = 3,
byrow = TRUE, override.aes = list(size = 3))))
CombinePlots(plots)
veh_cells <- WhichCells(object=combined.integrated, ident="veh")
drug_cells <- WhichCells(object=combined.integrated, ident="drug")
combined.integrated %>%
FindNeighbors(dims = 1:30, verbose = FALSE) %>%
FindClusters(verbose = FALSE, resolution=1.1) -> combined.integrated2 #1.1
#DimPlot(label=TRUE)
DimPlot(combined.integrated2, label = TRUE)
markers <- FindAllMarkers(combined.integrated2, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
##Adding the annotations from scMatch to the combined plot
DimPlot(combined.integrated2, reduction = "umap", split.by = "orig.ident")
scMatch_annot <- rbind(annot_drug, annot_veh)
combined.integrated2$integrated_snn_res.1.1
temp_names <- names(combined.integrated2$cell_types_scMatch)
combined.integrated2$cell_types_scMatch <- scMatch_annot$cell.type[match(temp_names, scMatch_annot$cell)]
combined.integrated2$cell_types_scMatch2 <- combined.integrated2$cell_types_scMatch
combined.integrated2$cell_types_scMatch2 <- gsub(", fetal liver derived", "", combined.integrated2$cell_types_scMatch2)
combined.integrated2$cell_types_scMatch2 <- gsub(", placenta derived", "", combined.integrated2$cell_types_scMatch2)
combined.integrated2$cell_types_scMatch2 <- gsub(" - alternatively activated", "", combined.integrated2$cell_types_scMatch2)
combined.integrated2$orig.ident <- factor(combined.integrated2$orig.ident, levels=c("veh", "drug"))
DimPlot(combined.integrated2, reduction = "umap", group.by = "cell_types_scMatch2",
split.by='orig.ident')
DimPlot(combined.integrated2, reduction = "umap", group.by = "orig.ident",
split.by = "cell_types_scMatch2")
which_no_na <- names(combined.integrated2$cell_types_scMatch2)[!is.na(combined.integrated2$cell_types_scMatch2)]
combined.integrated_no_na <- subset(combined.integrated2, cells=which_no_na)
DimPlot(combined.integrated_no_na, reduction = "umap", group.by = "orig.ident",
split.by = "cell_types_scMatch2")
#Calculation of cell proportions
cell_proportions <- vector()
for(cluster in 0:14){
cluster_cells <- WhichCells(object=combined.integrated2, ident=cluster)
n_veh <- sum(cluster_cells %in% veh_cells)
n_drug <- sum(cluster_cells %in% drug_cells)
p_veh <- (n_veh/length(cluster_cells))*100
p_drug <- (n_drug/length(cluster_cells))*100
cell_proportions <- rbind(cell_proportions,
c(cluster, length(cluster_cells), n_veh, n_drug, p_veh, p_drug))
}
colnames(cell_proportions) <- c("Cluster",
"Total_cells", "N_veh", "N_drug", "P_veh", "P_drug")
cell_proportions <- as.data.frame(cell_proportions)
significant_genes$P_veh <- cell_proportions$P_veh[match(significant_genes$cluster, cell_proportions$Cluster)]
significant_genes$P_drug <- cell_proportions$P_drug[match(significant_genes$cluster, cell_proportions$Cluster)]
|
/Seurat_pipeline_sctransform.R
|
no_license
|
JohnstoneKats/SingleCellAnalysis
|
R
| false
| false
| 8,512
|
r
|
library(dplyr)
library(Seurat)
library(reticulate)
library(sctransform)
library(SeuratData)
library(cowplot)
library(ggplot2)
use_virtualenv(".virtualenvs/UMAP")
#Path to output from cellranger
cell_ranger_path_drug
cell_ranger_path_veh
drug.data <- Read10X(data.dir = paste0(cell_ranger_path_drug, "/outs/filtered_feature_bc_matrix"))
veh.data <- Read10X(data.dir = paste0(cell_ranger_path_ve, "/outs/filtered_feature_bc_matrix"))
drug <- CreateSeuratObject(counts = drug.data, project = "PROJECT_NAME", min.cells = 3, min.features = 200)
drug #14419 features, 3950 samples
drug[["percent.mt"]] <- PercentageFeatureSet(drug, pattern = "^mt-")
VlnPlot(drug, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
drug <- subset(drug, subset = nFeature_RNA > 200 & percent.mt < 5)
veh <- CreateSeuratObject(counts = veh.data, project = "PROJECT_NAME", min.cells = 3, min.features = 200)
veh #14028 features, 4077 samples
veh[["percent.mt"]] <- PercentageFeatureSet(veh, pattern = "^mt-")
VlnPlot(veh, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
veh <- subset(veh, subset = nFeature_RNA > 200 & percent.mt < 5)
# Identify the 10 most highly variable genes
top10_drug <- head(VariableFeatures(drug), 10)
top10_veh <- head(VariableFeatures(veh), 10)
# run sctransform
drug %>%
SCTransform(vars.to.regress = "percent.mt", verbose = FALSE) %>%
RunPCA(verbose = FALSE) %>%
RunUMAP(dims = 1:30, verbose = FALSE) %>%
FindNeighbors(dims = 1:30, verbose = FALSE) %>%
FindClusters(verbose = FALSE) -> drug
DimPlot(label = TRUE) + NoLegend()
veh %>%
SCTransform(vars.to.regress = "percent.mt", verbose = FALSE) %>%
RunPCA(verbose = FALSE) %>%
RunUMAP(dims = 1:30, verbose = FALSE) %>%
FindNeighbors(dims = 1:30, verbose = FALSE) %>%
FindClusters(verbose = FALSE) -> veh
#Load scMatch annotations ('annot' object) generated from the scMatch_analysis.R script
load("annot_veh.Rdata")
load("annot_drug.Rdata")
annot_veh$top.sample <- NULL
annot_veh$top.correlation.score <- NULL
annot_veh$cell <- gsub("-1", "", LL_38_annot$cell)
temp_names <- names(veh$SCT_snn_res.0.8)
veh$cell_types_scMatch <- LL_38_annot$cell.type[match(temp_names, annot_veh$cell)]
veh$cell_types_scMatch2 <- veh$cell_types_scMatch
veh$cell_types_scMatch2 <- gsub(", fetal liver derived", "", veh$cell_types_scMatch2)
veh$cell_types_scMatch2 <- gsub(", placenta derived", "", veh$cell_types_scMatch2)
veh$cell_types_scMatch2 <- gsub("- alternatively activated", "", veh$cell_types_scMatch2)
DimPlot(veh, label = TRUE, group.by = 'cell_types_scMatch2')
DimPlot(veh, label = FALSE, group.by = 'cell_types_scMatch2')
annot_drug$top.sample <- NULL
annot_drug$top.correlation.score <- NULL
annot_drug$cell <- gsub("-1", "", LL_30_annot$cell)
temp_names <- names(drug$SCT_snn_res.0.8)
drug$cell_types_scMatch <- LL_30_annot$cell.type[match(temp_names, annot_drug$cell)]
drug$cell_types_scMatch2 <- drug$cell_types_scMatch
drug$cell_types_scMatch2 <- gsub(", fetal liver derived", "", drug$cell_types_scMatch2)
drug$cell_types_scMatch2 <- gsub(", placenta derived", "", drug$cell_types_scMatch2)
drug$cell_types_scMatch2 <- gsub("- alternatively activated", "", drug$cell_types_scMatch2)
DimPlot(drug, label = TRUE, group.by = 'cell_types_scMatch2')
DimPlot(drug, label = FALSE, group.by = 'cell_types_scMatch2')
###Combining plots
combined.integrated <- RunPCA(combined.integrated, verbose = FALSE)
combined.integrated <- RunUMAP(combined.integrated, dims = 1:30)
DimPlot(veh, label = TRUE)
options(future.globals.maxSize = 4000 * 1024^2)
drug <- CreateSeuratObject(counts = drug.data, project = "drug", min.cells = 3, min.features = 200)
drug
drug[["percent.mt"]] <- PercentageFeatureSet(drug, pattern = "^mt-")
VlnPlot(drug, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
drug <- subset(drug, subset = nFeature_RNA > 200 & percent.mt < 5)
veh <- CreateSeuratObject(counts = veh.data, project = "veh", min.cells = 3, min.features = 200)
veh
veh[["percent.mt"]] <- PercentageFeatureSet(veh, pattern = "^mt-")
VlnPlot(veh, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
veh <- subset(veh, subset = nFeature_RNA > 200 & percent.mt < 5)
##Combination
combined <- merge(veh, y = drug, add.cell.ids = c("veh", "drug"), project = "PROJECT_NAME")
combined.list <- SplitObject(combined)
for (i in 1:length(combined.list)) {
combined.list[[i]] <- SCTransform(combined.list[[i]], verbose = FALSE)
}
genes_veh <- rownames(veh)
genes_drug <- rownames(drug)
genes_veh[genes_veh %in% genes_drug]
combined.features <- SelectIntegrationFeatures(object.list = combined.list, nfeatures = 3000)
combined.list <- PrepSCTIntegration(object.list = combined.list, anchor.features = combined.features,
verbose = FALSE)
combined.anchors <- FindIntegrationAnchors(object.list = combined.list, normalization.method = "SCT",
anchor.features = combined.features, verbose = FALSE)
combined.integrated <- IntegrateData(anchorset = combined.anchors, normalization.method = "SCT",
verbose = FALSE)
combined.integrated <- RunPCA(combined.integrated, verbose = FALSE)
combined.integrated <- RunUMAP(combined.integrated, dims = 1:30)
plots <- DimPlot(combined.integrated, combine = FALSE)
plots <- lapply(X = plots, FUN = function(x) x + theme(legend.position = "top") + guides(color = guide_legend(nrow = 3,
byrow = TRUE, override.aes = list(size = 3))))
CombinePlots(plots)
veh_cells <- WhichCells(object=combined.integrated, ident="veh")
drug_cells <- WhichCells(object=combined.integrated, ident="drug")
combined.integrated %>%
FindNeighbors(dims = 1:30, verbose = FALSE) %>%
FindClusters(verbose = FALSE, resolution=1.1) -> combined.integrated2 #1.1
#DimPlot(label=TRUE)
DimPlot(combined.integrated2, label = TRUE)
markers <- FindAllMarkers(combined.integrated2, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
##Adding the annotations from scMatch to the combined plot
DimPlot(combined.integrated2, reduction = "umap", split.by = "orig.ident")
scMatch_annot <- rbind(annot_drug, annot_veh)
combined.integrated2$integrated_snn_res.1.1
temp_names <- names(combined.integrated2$cell_types_scMatch)
combined.integrated2$cell_types_scMatch <- scMatch_annot$cell.type[match(temp_names, scMatch_annot$cell)]
combined.integrated2$cell_types_scMatch2 <- combined.integrated2$cell_types_scMatch
combined.integrated2$cell_types_scMatch2 <- gsub(", fetal liver derived", "", combined.integrated2$cell_types_scMatch2)
combined.integrated2$cell_types_scMatch2 <- gsub(", placenta derived", "", combined.integrated2$cell_types_scMatch2)
combined.integrated2$cell_types_scMatch2 <- gsub(" - alternatively activated", "", combined.integrated2$cell_types_scMatch2)
combined.integrated2$orig.ident <- factor(combined.integrated2$orig.ident, levels=c("veh", "drug"))
DimPlot(combined.integrated2, reduction = "umap", group.by = "cell_types_scMatch2",
split.by='orig.ident')
DimPlot(combined.integrated2, reduction = "umap", group.by = "orig.ident",
split.by = "cell_types_scMatch2")
which_no_na <- names(combined.integrated2$cell_types_scMatch2)[!is.na(combined.integrated2$cell_types_scMatch2)]
combined.integrated_no_na <- subset(combined.integrated2, cells=which_no_na)
DimPlot(combined.integrated_no_na, reduction = "umap", group.by = "orig.ident",
split.by = "cell_types_scMatch2")
#Calculation of cell proportions
cell_proportions <- vector()
for(cluster in 0:14){
cluster_cells <- WhichCells(object=combined.integrated2, ident=cluster)
n_veh <- sum(cluster_cells %in% veh_cells)
n_drug <- sum(cluster_cells %in% drug_cells)
p_veh <- (n_veh/length(cluster_cells))*100
p_drug <- (n_drug/length(cluster_cells))*100
cell_proportions <- rbind(cell_proportions,
c(cluster, length(cluster_cells), n_veh, n_drug, p_veh, p_drug))
}
colnames(cell_proportions) <- c("Cluster",
"Total_cells", "N_veh", "N_drug", "P_veh", "P_drug")
cell_proportions <- as.data.frame(cell_proportions)
significant_genes$P_veh <- cell_proportions$P_veh[match(significant_genes$cluster, cell_proportions$Cluster)]
significant_genes$P_drug <- cell_proportions$P_drug[match(significant_genes$cluster, cell_proportions$Cluster)]
|
#' @title Decompose portfolio variance risk into factor/residual risk
#'
#' @description Decompose portfolio variance risk into factor/residual risk
#'
#'
#' @param object fit object of class \code{tsfm}, or \code{ffm}.
#' @param weights a vector of weights of the assets in the portfolio. Default is NULL,
#' in which case an equal weights will be used.
#' @param factor.cov optional user specified factor covariance matrix with
#' named columns; defaults to the sample covariance matrix.
#' @param use an optional character string giving a method for computing
#' covariances in the presence of missing values. This must be (an
#' abbreviation of) one of the strings "everything", "all.obs",
#' "complete.obs", "na.or.complete", or "pairwise.complete.obs". Default is
#' "pairwise.complete.obs".
#' @param ... optional arguments passed to \code{\link[stats]{cov}}.
#'
#' @return A vector containing: percent factor contribution to risk
#' portfolio volatility risk, factor volatility risk and
#' residual/specific volatility risk
#'
#' @author Douglas Martin, Lingjie Yi
#'
#'
#' @seealso \code{\link{fitTsfm}}, \code{\link{fitFfm}}
#' for the different factor model fitting functions.
#'
#' \code{\link{portSdDecomp}} for portfolio factor model VaR decomposition.
#' \code{\link{portVaRDecomp}} for portfolio factor model VaR decomposition.
#' \code{\link{portEsDecomp}} for portfolio factor model ES decomposition.
#'
#'
#' @examples
#' # Time Series Factor Model
#'
#' # load data
#' data(managers, package = 'PerformanceAnalytics')
#'
#' fit.macro <- fitTsfm(asset.names = colnames(managers[,(1:6)]),
#' factor.names = colnames(managers[,(7:9)]),
#' rf.name = colnames(managers[,10]),
#' data = managers)
#'
#' decomp <- portVolDecomp(fit.macro)
#'
#' decomp
#'
#' # Fundamental Factor Model
#' data("stocks145scores6")
#' dat = stocks145scores6
#' dat$DATE = zoo::as.yearmon(dat$DATE)
#' dat = dat[dat$DATE >=zoo::as.yearmon("2008-01-01") & dat$DATE <= zoo::as.yearmon("2012-12-31"),]
#'
#'
#' # Load long-only GMV weights for the return data
#' data("wtsStocks145GmvLo")
#' wtsStocks145GmvLo = round(wtsStocks145GmvLo,5)
#'
#' # fit a fundamental factor model
#' fit.cross <- fitFfm(data = dat,
#' exposure.vars = c("SECTOR","ROE","BP","PM12M1M","SIZE","ANNVOL1M",
#' "EP"),date.var = "DATE", ret.var = "RETURN", asset.var = "TICKER",
#' fit.method="WLS", z.score = "crossSection")
#'
#' decomp = portVolDecomp(fit.cross)
#' # get the factor contributions of risk
#' decomp
#'
#' @export
portVolDecomp <- function(object, ...){
# check input object validity
if (!inherits(object, c("tsfm", "ffm"))) {
stop("Invalid argument: Object should be of class 'tsfm', or 'ffm'.")
}
UseMethod("portVolDecomp")
}
#' @rdname portVolDecomp
#' @method portVolDecomp tsfm
#' @export
portVolDecomp.tsfm <- function(object, weights = NULL, factor.cov,
use="pairwise.complete.obs", ...) {
# get beta.star: 1 x (K+1)
beta <- object$beta
beta[is.na(beta)] <- 0
n.assets = nrow(beta)
asset.names <- object$asset.names
# check if there is weight input
if(is.null(weights)){
weights = rep(1/n.assets, n.assets)
}else{
# check if number of weight parameter matches
if(n.assets != length(weights)){
stop("Invalid argument: incorrect number of weights")
}
if(!is.null(names(weights))){
weights = weights[asset.names]
}else{
stop("Invalid argument: names of weights vector should match with asset names")
}
}
# get cov(F): K x K
factor <- as.matrix(object$data[, object$factor.names])
if (missing(factor.cov)) {
factor.cov = cov(factor, use=use, ...)
} else {
if (!identical(dim(factor.cov), as.integer(c(ncol(factor), ncol(factor))))) {
stop("Dimensions of user specified factor covariance matrix are not
compatible with the number of factors in the fitTsfm object")
}
}
beta = as.matrix(beta)
x = t(weights) %*% beta
factorVol = x %*% factor.cov %*% t(x)
D <- diag(object$resid.sd^2)
residVol = t(weights) %*% D %*% weights
totalVol = factorVol + residVol
percentFactorVol = factorVol/totalVol
output = list("Percent Factor Contribution to Risk" = percentFactorVol,
"Portfolio Volatility Risk" = totalVol,
"Factor Volatility Risk" = factorVol,
"Residual Volatility Risk" = residVol)
return(output)
}
#' @rdname portVolDecomp
#' @method portVolDecomp ffm
#' @export
portVolDecomp.ffm <- function(object, weights = NULL, factor.cov, ...) {
if (!inherits(object, "ffm")) {
stop("Invalid argument: object should be of class'ffm'.")
}
which.numeric <- sapply(object$data[,object$exposure.vars,drop=FALSE], is.numeric)
exposures.num <- object$exposure.vars[which.numeric]
exposures.char <- object$exposure.vars[!which.numeric]
# get parameter from the factor model fit
beta = object$beta
n.assets = nrow(beta)
asset.names <- unique(object$data[[object$asset.var]])
TP = length(object$time.periods)
# check if there is weight input
if(is.null(weights)){
weights = rep(1/n.assets, n.assets)
}else{
# check if number of weight parameter matches
if(n.assets != length(weights)){
stop("Invalid argument: incorrect number of weights")
}
if(!is.null(names(weights))){
weights = weights[asset.names]
}else{
stop("Invalid argument: names of weights vector should match with asset names")
}
}
#calculate x = t(w) * B
# get cov(F): K x K
if (missing(factor.cov)) {
factor.cov = object$factor.cov
} else {
if (!identical(dim(factor.cov), dim(object$factor.cov))) {
stop("Dimensions of user specified factor covariance matrix are not
compatible with the number of factors (including dummies) in the
fitFfm object")
}
}
x = weights %*% beta
factorVol = x %*% factor.cov %*% t(x)
D <- diag(object$resid.var)
residVol = t(weights) %*% D %*% weights
totalVol = factorVol + residVol
percentFactorVol = factorVol/totalVol
output = list("Percent Factor Contribution to Risk" = percentFactorVol,
"Portfolio Volatility Risk" = totalVol,
"Factor Volatility Risk" = factorVol,
"Residual Volatility Risk" = residVol)
return(output)
}
|
/R/portVolDecomp.R
|
no_license
|
FoeinLove/FactorAnalytics
|
R
| false
| false
| 6,624
|
r
|
#' @title Decompose portfolio variance risk into factor/residual risk
#'
#' @description Decompose portfolio variance risk into factor/residual risk
#'
#'
#' @param object fit object of class \code{tsfm}, or \code{ffm}.
#' @param weights a vector of weights of the assets in the portfolio. Default is NULL,
#' in which case an equal weights will be used.
#' @param factor.cov optional user specified factor covariance matrix with
#' named columns; defaults to the sample covariance matrix.
#' @param use an optional character string giving a method for computing
#' covariances in the presence of missing values. This must be (an
#' abbreviation of) one of the strings "everything", "all.obs",
#' "complete.obs", "na.or.complete", or "pairwise.complete.obs". Default is
#' "pairwise.complete.obs".
#' @param ... optional arguments passed to \code{\link[stats]{cov}}.
#'
#' @return A vector containing: percent factor contribution to risk
#' portfolio volatility risk, factor volatility risk and
#' residual/specific volatility risk
#'
#' @author Douglas Martin, Lingjie Yi
#'
#'
#' @seealso \code{\link{fitTsfm}}, \code{\link{fitFfm}}
#' for the different factor model fitting functions.
#'
#' \code{\link{portSdDecomp}} for portfolio factor model VaR decomposition.
#' \code{\link{portVaRDecomp}} for portfolio factor model VaR decomposition.
#' \code{\link{portEsDecomp}} for portfolio factor model ES decomposition.
#'
#'
#' @examples
#' # Time Series Factor Model
#'
#' # load data
#' data(managers, package = 'PerformanceAnalytics')
#'
#' fit.macro <- fitTsfm(asset.names = colnames(managers[,(1:6)]),
#' factor.names = colnames(managers[,(7:9)]),
#' rf.name = colnames(managers[,10]),
#' data = managers)
#'
#' decomp <- portVolDecomp(fit.macro)
#'
#' decomp
#'
#' # Fundamental Factor Model
#' data("stocks145scores6")
#' dat = stocks145scores6
#' dat$DATE = zoo::as.yearmon(dat$DATE)
#' dat = dat[dat$DATE >=zoo::as.yearmon("2008-01-01") & dat$DATE <= zoo::as.yearmon("2012-12-31"),]
#'
#'
#' # Load long-only GMV weights for the return data
#' data("wtsStocks145GmvLo")
#' wtsStocks145GmvLo = round(wtsStocks145GmvLo,5)
#'
#' # fit a fundamental factor model
#' fit.cross <- fitFfm(data = dat,
#' exposure.vars = c("SECTOR","ROE","BP","PM12M1M","SIZE","ANNVOL1M",
#' "EP"),date.var = "DATE", ret.var = "RETURN", asset.var = "TICKER",
#' fit.method="WLS", z.score = "crossSection")
#'
#' decomp = portVolDecomp(fit.cross)
#' # get the factor contributions of risk
#' decomp
#'
#' @export
portVolDecomp <- function(object, ...){
# check input object validity
if (!inherits(object, c("tsfm", "ffm"))) {
stop("Invalid argument: Object should be of class 'tsfm', or 'ffm'.")
}
UseMethod("portVolDecomp")
}
#' @rdname portVolDecomp
#' @method portVolDecomp tsfm
#' @export
portVolDecomp.tsfm <- function(object, weights = NULL, factor.cov,
use="pairwise.complete.obs", ...) {
# get beta.star: 1 x (K+1)
beta <- object$beta
beta[is.na(beta)] <- 0
n.assets = nrow(beta)
asset.names <- object$asset.names
# check if there is weight input
if(is.null(weights)){
weights = rep(1/n.assets, n.assets)
}else{
# check if number of weight parameter matches
if(n.assets != length(weights)){
stop("Invalid argument: incorrect number of weights")
}
if(!is.null(names(weights))){
weights = weights[asset.names]
}else{
stop("Invalid argument: names of weights vector should match with asset names")
}
}
# get cov(F): K x K
factor <- as.matrix(object$data[, object$factor.names])
if (missing(factor.cov)) {
factor.cov = cov(factor, use=use, ...)
} else {
if (!identical(dim(factor.cov), as.integer(c(ncol(factor), ncol(factor))))) {
stop("Dimensions of user specified factor covariance matrix are not
compatible with the number of factors in the fitTsfm object")
}
}
beta = as.matrix(beta)
x = t(weights) %*% beta
factorVol = x %*% factor.cov %*% t(x)
D <- diag(object$resid.sd^2)
residVol = t(weights) %*% D %*% weights
totalVol = factorVol + residVol
percentFactorVol = factorVol/totalVol
output = list("Percent Factor Contribution to Risk" = percentFactorVol,
"Portfolio Volatility Risk" = totalVol,
"Factor Volatility Risk" = factorVol,
"Residual Volatility Risk" = residVol)
return(output)
}
#' @rdname portVolDecomp
#' @method portVolDecomp ffm
#' @export
portVolDecomp.ffm <- function(object, weights = NULL, factor.cov, ...) {
if (!inherits(object, "ffm")) {
stop("Invalid argument: object should be of class'ffm'.")
}
which.numeric <- sapply(object$data[,object$exposure.vars,drop=FALSE], is.numeric)
exposures.num <- object$exposure.vars[which.numeric]
exposures.char <- object$exposure.vars[!which.numeric]
# get parameter from the factor model fit
beta = object$beta
n.assets = nrow(beta)
asset.names <- unique(object$data[[object$asset.var]])
TP = length(object$time.periods)
# check if there is weight input
if(is.null(weights)){
weights = rep(1/n.assets, n.assets)
}else{
# check if number of weight parameter matches
if(n.assets != length(weights)){
stop("Invalid argument: incorrect number of weights")
}
if(!is.null(names(weights))){
weights = weights[asset.names]
}else{
stop("Invalid argument: names of weights vector should match with asset names")
}
}
#calculate x = t(w) * B
# get cov(F): K x K
if (missing(factor.cov)) {
factor.cov = object$factor.cov
} else {
if (!identical(dim(factor.cov), dim(object$factor.cov))) {
stop("Dimensions of user specified factor covariance matrix are not
compatible with the number of factors (including dummies) in the
fitFfm object")
}
}
x = weights %*% beta
factorVol = x %*% factor.cov %*% t(x)
D <- diag(object$resid.var)
residVol = t(weights) %*% D %*% weights
totalVol = factorVol + residVol
percentFactorVol = factorVol/totalVol
output = list("Percent Factor Contribution to Risk" = percentFactorVol,
"Portfolio Volatility Risk" = totalVol,
"Factor Volatility Risk" = factorVol,
"Residual Volatility Risk" = residVol)
return(output)
}
|
workers <- 11
prefix <- "DIY 3.4 GHz Intel Core i7 6core"
if (workers > 1) {
library(doParallel)
cl <- makeForkCluster(nnodes = workers)
registerDoParallel(cl)
}
library(caret)
library(xgboost)
library(lubridate)
library(sessioninfo)
rand_int <- sample.int(10000, 1)
set.seed(598)
dat <- twoClassSim(2000, noiseVars = 100)
ctrl <- trainControl(method = "repeatedcv",
repeats = 5,
search = "random")
set.seed(7257)
len <- 25
grid <-
data.frame(
nrounds = sample(1:1000, size = len, replace = TRUE),
max_depth = sample(1:10, replace = TRUE, size = len),
eta = runif(len, min = .001, max = .6),
gamma = runif(len, min = 0, max = 10),
colsample_bytree = runif(len, min = .3, max = .7),
min_child_weight = sample(0:20, size = len, replace = TRUE),
subsample = runif(len, min = .25, max = 1)
)
set.seed(2098)
mod <- train(
Class ~ .,
data = dat,
method = "xgbTree",
trControl = ctrl,
tuneGrid = grid,
nthread = 1
)
stopCluster(cl)
res <-
data.frame(
time = as.vector(mod$times$everything[3]) / 60,
os = Sys.info()[['sysname']],
R = R.version.string,
when = now(),
workers = workers,
setting = prefix,
method = "fork",
task = "xgbTree"
)
file <- paste(prefix, workers, rand_int, sep = "_")
save(res, file = paste0(file, ".RData"))
session_info()
q("no")
|
/Ubuntu/2016 DIY 3.4 GHz Intel Core i7 6core/xgb_fork_11.R
|
no_license
|
minghao2016/par-caret-bench
|
R
| false
| false
| 1,349
|
r
|
workers <- 11
prefix <- "DIY 3.4 GHz Intel Core i7 6core"
if (workers > 1) {
library(doParallel)
cl <- makeForkCluster(nnodes = workers)
registerDoParallel(cl)
}
library(caret)
library(xgboost)
library(lubridate)
library(sessioninfo)
rand_int <- sample.int(10000, 1)
set.seed(598)
dat <- twoClassSim(2000, noiseVars = 100)
ctrl <- trainControl(method = "repeatedcv",
repeats = 5,
search = "random")
set.seed(7257)
len <- 25
grid <-
data.frame(
nrounds = sample(1:1000, size = len, replace = TRUE),
max_depth = sample(1:10, replace = TRUE, size = len),
eta = runif(len, min = .001, max = .6),
gamma = runif(len, min = 0, max = 10),
colsample_bytree = runif(len, min = .3, max = .7),
min_child_weight = sample(0:20, size = len, replace = TRUE),
subsample = runif(len, min = .25, max = 1)
)
set.seed(2098)
mod <- train(
Class ~ .,
data = dat,
method = "xgbTree",
trControl = ctrl,
tuneGrid = grid,
nthread = 1
)
stopCluster(cl)
res <-
data.frame(
time = as.vector(mod$times$everything[3]) / 60,
os = Sys.info()[['sysname']],
R = R.version.string,
when = now(),
workers = workers,
setting = prefix,
method = "fork",
task = "xgbTree"
)
file <- paste(prefix, workers, rand_int, sep = "_")
save(res, file = paste0(file, ".RData"))
session_info()
q("no")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ppeldata.R
\docType{data}
\name{manifesto}
\alias{manifesto}
\title{Manifesto Project Main Dataset of Party Preferences}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 4456 rows and 175 columns.}
\source{
\href{https://manifesto-project.wzb.eu/datasets}{Manifesto Project}
}
\usage{
data(manifesto)
}
\description{
Cross-country panel data (1920-2018).
}
\details{
See the codebook here: \href{https://github.com/vladtarko/ppeldata/tree/master/codebooks}.
}
\examples{
data(manifesto)
}
\references{
Volkens, Andrea / Krause, Werner / Lehmann, Pola / Matthieß, Theres / Merz,
Nicolas / Regel, Sven / Weßels, Bernhard(2019):
The Manifesto Data Collection. Manifesto Project (MRG / CMP / MARPOR). Version 2019a.
Berlin: Wissenschaftszentrum Berlin für Sozialforschung (WZB).
\href{https://doi.org/10.25522/manifesto.mpds.2019a}
Budge, Ian / Klingemann, Hans-Dieter / Volkens, Andrea / Bara, Judith with Tanenbaum,
Eric / Fording, Richard C. / Hearl, Derek J. / Kim, Hee Min / McDonald, Michael / Mendez,
Silvia (2001): Mapping Policy Preferences. Estimates for Parties, Electors, and Governments
1945-1998. Oxford: Oxford University Press.
Klingemann, Hans-Dieter / Volkens, Andrea / Bara, Judith / Budge, Ian / McDonald, Michael (2006):
Mapping Policy Preferences II. Estimates for Parties, Electors, and Governments in Eastern Europe,
the European Union and the OECD, 1990-2003. Oxford: Oxford University Press.
}
\keyword{datasets}
|
/man/manifesto.Rd
|
no_license
|
vladtarko/ppeldata
|
R
| false
| true
| 1,573
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ppeldata.R
\docType{data}
\name{manifesto}
\alias{manifesto}
\title{Manifesto Project Main Dataset of Party Preferences}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 4456 rows and 175 columns.}
\source{
\href{https://manifesto-project.wzb.eu/datasets}{Manifesto Project}
}
\usage{
data(manifesto)
}
\description{
Cross-country panel data (1920-2018).
}
\details{
See the codebook here: \href{https://github.com/vladtarko/ppeldata/tree/master/codebooks}.
}
\examples{
data(manifesto)
}
\references{
Volkens, Andrea / Krause, Werner / Lehmann, Pola / Matthieß, Theres / Merz,
Nicolas / Regel, Sven / Weßels, Bernhard(2019):
The Manifesto Data Collection. Manifesto Project (MRG / CMP / MARPOR). Version 2019a.
Berlin: Wissenschaftszentrum Berlin für Sozialforschung (WZB).
\href{https://doi.org/10.25522/manifesto.mpds.2019a}
Budge, Ian / Klingemann, Hans-Dieter / Volkens, Andrea / Bara, Judith with Tanenbaum,
Eric / Fording, Richard C. / Hearl, Derek J. / Kim, Hee Min / McDonald, Michael / Mendez,
Silvia (2001): Mapping Policy Preferences. Estimates for Parties, Electors, and Governments
1945-1998. Oxford: Oxford University Press.
Klingemann, Hans-Dieter / Volkens, Andrea / Bara, Judith / Budge, Ian / McDonald, Michael (2006):
Mapping Policy Preferences II. Estimates for Parties, Electors, and Governments in Eastern Europe,
the European Union and the OECD, 1990-2003. Oxford: Oxford University Press.
}
\keyword{datasets}
|
###############################################################################
#
#
#
#
#
#
###############################################################################
lxfit <- function(x,y, anal_type = 'normal', isIncrease = TRUE,mEXTREMA = 2,
nsamps = 50000,
nburn = 10000,
a_list = c(1,1,1,1.15,1.3,1.7,2,2.6,2.0,3.5,5,9,12,24,30,50)){
#make sure the y variable is a matrix
t_y = as.matrix(y,ncol=ncol(y))
#center the x variable to be between 0 and 1
t_x = matrix((x-min(x))/(max(x)-min(x)),ncol=1 )
if (nrow(t_x) != nrow(t_y)){
stop("X and Y vectors must have the same number of rows.")
}
if (anal_type == 'normal'){
analysis = 1
}else if (anal_type == 'negbin'){
analysis = 2
}else if (anal_type == 'binomial'){
analysis = 3
}else {
stop(sprintf("Only Normal 'normal,' Negative Binomial 'negbin', and binomial 'binomial'
models are supported at this time. User supplied %s",analy_type))
}
#error check the JK quantity making sure 1 is in the list and
#it is sorted
a_list = sort(a_list)
if (a_list[1] != 1){
stop("The minimum value in the anealling list must be 1")
}
if (length(a_list)==1){
stop("Anealling list must have more than one element")
}
if (nsamps < nburn){
stop("Number of samples must be greater than the number of burnin samples.")
}
if (nsamps < 0){
stop("Number of samples must be greater than zero.")
}
if (nburn < 0){
warning("Burn in samples less than zero. Setting burn in samples to zero.")
nburn = 1;
}
if (mEXTREMA == 2){
if (isIncrease == TRUE){
M = 100
}else{
M= -100
}
AXTREMA = 2
}
if (mEXTREMA == 1)
{
if (isIncrease == TRUE){
M = -100
}else{
M= 100
}
AXTREMA = 1
}
return (.LXsample_linear_2(t_x,t_y,a_list,nsamps,nburn,analysis,M,AXTREMA))
}
##############################################################
# Calculate Posterior model probabilities for two changepoints
# nburn - is the number of burnin samples to ignore
# nsamps - is the maximum sample - needs to be less than
# the number of samples used in the fit
##############################################################
calculateProbs_2 <- function(fit,nburn,nsamps){
sa1 = fit$sa1
sa2 = fit$sa2
if (nsamps > length(sa2)){
stop("Number of samples (nsamps) must be less than or equal to the total number of
samples")
}
SHAPES = rep(0,length(nburn:nsamps))
for (ii in 1:length(SHAPES)) {
tt <- ii + nburn - 1
#decide 'base shape'
shape = 1; #'n' shape
shape = ((sa1[tt] > -.5)*(sa2[tt]> -.5)*(sa1[tt] < 0.5)*(sa2[tt]< 0.5))*4 + shape; #s shaped
shape = ((sa1[tt] <= -0.5)*(sa2[tt] >= 0.5)+(sa1[tt] >= 0.5)*(sa2[tt] <= -0.5))*2 + shape# Monotone Decreasing
shape = ((sa1[tt] <= -0.5)*(sa2[tt] <= -0.5)+(sa1[tt] >= 0.5)*(sa2[tt] >= 0.5))*3 + shape # Monotone Increasing
shape = ((sa1[tt] > -.5)*(sa1[tt]< 0.5)*(sa2[tt] <= -0.5)+(sa2[tt] > -.5)*(sa2[tt]< 0.5)*(sa1[tt] <= -0.5))*1 +shape#J-shaped
SHAPES[ii] = shape
}
pr1 = mean(SHAPES == 5) #S SHAPED
pr2 = mean(SHAPES == 3) #Monotone Decreasing
pr3 = mean(SHAPES == 4) #Monotone Increasing
pr4 = mean(SHAPES == 2) #J shaped
pr5 = 1-pr1-pr2-pr3-pr4 # n- shaped
return (list(pr1=pr1,pr2=pr2,pr3=pr3,pr4=pr4,pr5=pr5,SHAPES = SHAPES))
}
#####################################################
#####################################################
##############################################################
# Calculate Posterior model probabilities for two changepoints
# nburn - is the number of burnin samples to ignore
# nsamps - is the maximum sample - needs to be less than
# the number of samples used in the fit
##############################################################
calculateProbs_1 <- function(fit,nburn,nsamps){
sa1 = fit$sa1[nburn:nsamps]
pr1 = mean(sa1 < -0.5) #Monotone Increasing
pr2 = mean(sa1 > 0.5) #Monotone Decreasing
pr3 = 1-pr1-pr2 #n shaped
return (list(pr1=pr1,pr2=pr2,pr3=pr3))
}
#####################################################
#####################################################
bayesFactor_2 <- function(HA,HB, out){
if (out$EXTREMA == 2){
priorprobs <- c(0.3146,
0.0963,
0.0963,
0.2464,
0.2464)
postProbs <- calculateProbs_2(out,out$mcmcP[1],out$mcmcP[2])
pprobs <- c(postProbs$pr1,postProbs$pr2,postProbs$pr3,postProbs$pr4,postProbs$pr5)
if (out$isIncreasing){
testNames <- c("'~'-shaped",
"Monotone Decreasing",
"Monotone Increasing",
"J-shaped",
"n-shaped")
}else{
testNames <- c("Inverse '~'-shaped",
"Monotone Increasing",
"Monotone Decreasing",
"n-shaped",
"J-shaped")
}
if (length(HA) != 5){
stop("Hyptohesis must have 5 options")
}
if (length(HB) != 5){
stop("Hyptohesis must have 5 options")
}
if (sum(HA) >= length(HA)){
stop("Hypothesis HA must have fewer groups than the total number of available hyptoheses.")
}
if (sum(HB) >= length(HB)){
stop("Hypothesis HB must have fewer groups than the total number of available hyptoheses.")
}
}else{ #When there is just one extrema
priorprobs <- c(0.22,
0.22,
0.56)
postProbs <- calculateProbs_1(out,out$mcmcP[1],out$mcmcP[2])
pprobs <- c(postProbs$pr1,postProbs$pr2,postProbs$pr3)
if (length(HA) != 3){
stop("Hyptohesis must have 3 options")
}
if (length(HB) != 3){
stop("Hyptohesis must have 3 options")
}
if (out$isIncreasing){
testNames <- c("Monotone Decreasing",
"Monotone Increasing",
"J-shaped")
}else{
testNames <- c("Monotone Increasing",
"Monotone Decreasing",
"n-shaped")
}
}
###############################################################
###############################################################
cat('\nBayes Factor for\ncomparing Hypotheses:\n\tHA:\n')
for (ii in 1:length(HA)){
if (HA[ii] > 0){
cat(sprintf("\t\t%s\n",testNames[ii]))
}
}
cat("\t\t-VS-\n\tHB:\n")
for (ii in 1:length(HB)){
if (HB[ii] > 0){
cat(sprintf("\t\t%s\n",testNames[ii]))
}
}
cat("------------------------------------------------------------------------------\n")
temp = (sum(HB*priorprobs)/sum(HA*priorprobs))
tempb = (sum(HA*pprobs)/sum(HB*pprobs))
cat(sprintf("Bayes Factor: %1.3f \n",as.numeric(temp*tempb))) #*(sum(HB*priorprobs)/sum(HA*priorprobs)))
cat("------------------------------------------------------------------------------\n\n\n")
cat("------------------------------------------------------------------------------\n")
cat("Bayes Factor\tStrength of evidence\n")
cat("-------------------------------------------------------------------------------\n")
cat("1 to 3.2 not worth more than a bare mention\n")
cat("3.2 to 10 positive\n")
cat("10 to 31.6 strong\n")
cat("31.6 to 100 very strong\n")
cat(">100 decisive\n")
cat("\tBased upon H. Jeffreys (1961).")
}
##############################################################
##############################################################
# Calculate Posterior model probabilities for two changepoints
# nburn - is the number of burnin samples to ignore
# nsamps - is the maximum sample - needs to be less than
# the number of samples used in the fit
##############################################################
hcalculateProbs_2 <- function(fit,nburn,nsamps){
sa1 = fit$sa1
sa2 = fit$sa2
if (nsamps > length(sa2)){
stop("Number of samples (nsamps) must be less than or equal to the total number of
samples")
}
SHAPES = rep(0,nburn:nsamps)
for (ii in nburn:nsamps) {
a.s <- c(sa1[ii],sa2[ii])
betas = fit$beta_sample[[ii]]
knots = c(fit$model_sample[[ii]][1,])
betas = betas[2:length(betas)]
flat = rep(0,length(knots)-1)
n = length(betas)
#decide 'base shape'
shape = 1; #'n' shape
shape = ((sa1[ii] > -.5)*(sa2[ii]> -.5)*(sa1[ii] < 0.5)*(sa2[ii]< 0.5))*4 + shape; #s shaped
shape = ((sa1[ii] <= -0.5)*(sa2[ii] >= 0.5)+(sa1[ii] >= 0.5)*(sa2[ii] <= -0.5))*2 + shape# Monotone Decreasing
shape = ((sa1[ii] <= -0.5)*(sa2[ii] <= -0.5)+(sa1[ii] >= 0.5)*(sa2[ii] >= 0.5))*3 + shape # Monotone Increasing
shape = ((sa1[ii] > -.5)*(sa1[ii]< 0.5)*(sa2[ii] <= -0.5)+(sa2[ii] > -.5)*(sa2[ii]< 0.5)*(sa1[ii] <= -0.5))*1 +shape#J-shaped
flat[1] = (betas[1] == 0) + (betas[2] == 0)
flat[2] = (betas[2] == 0)
flat[length(flat)] = (betas[n] == 0) + (betas[n-1] == 0)
flat[length(flat)-1] = (betas[n-1] == 0)
if (length(flat) > 2){
for (jj in 3:(n-2)){
flat[jj-2] = flat[jj-2] + (betas[jj] == 0)
flat[jj-1] = flat[jj-1] + (betas[jj] == 0)
flat[jj] = flat[jj] + (betas[jj] == 0)
}
}
intLoc1 = sum(sa1[ii] > knots-0.5)
intLoc2 = sum(sa2[ii] > knots-0.5)
temp = sort(c(intLoc1,intLoc2)+1)
temp = c(1,temp,length(flat)+2)
flat = c(1,( flat == 3),1) #if there are three flat betas the region is flat
#different 'checks' depending on the starting shape of the curve
if (shape == 5){
left = (prod(flat[temp[1]:temp[2]]) ==1)
center = (prod(flat[temp[2]:temp[3]]) ==1)
right = (prod(flat[temp[3]:temp[4]]) ==1)
if (center){
shape = 4
}else if (left){
if (right){shape = 2}else{shape=3}
}else if(right){
shape = 1
}
}else if(shape == 1 ){
temp = unique(temp)
left = (prod(flat[temp[1]:temp[2]]) ==1)
right = (prod(flat[temp[2]:temp[3]]) ==1)
if (left){
shape = 3
}else if(right){
shape = 4
}
}else if(shape == 2){
temp = unique(temp)
left = (prod(flat[temp[1]:temp[2]]) ==1)
right = (prod(flat[temp[2]:temp[3]]) ==1)
if (left){
shape=3
}else if(right){
shape = 2
}
}
SHAPES[ii] = shape
}
pr1 = mean(SHAPES == 5) #S SHAPED
pr2 = mean(SHAPES == 3) #Monotone Decreasing
pr3 = mean(SHAPES == 4) #Monotone Increasing
pr4 = mean(SHAPES == 2) #J shaped
pr5 = 1-pr1-pr2-pr3-pr4 # n- shaped
return (list(pr1=pr1,pr2=pr2,pr3=pr3,pr4=pr4,pr5=pr5,SHAPES = SHAPES))
}
|
/R/samplerfunctions.R
|
no_license
|
david-dunson/lxsplines
|
R
| false
| false
| 11,199
|
r
|
###############################################################################
#
#
#
#
#
#
###############################################################################
lxfit <- function(x,y, anal_type = 'normal', isIncrease = TRUE,mEXTREMA = 2,
nsamps = 50000,
nburn = 10000,
a_list = c(1,1,1,1.15,1.3,1.7,2,2.6,2.0,3.5,5,9,12,24,30,50)){
#make sure the y variable is a matrix
t_y = as.matrix(y,ncol=ncol(y))
#center the x variable to be between 0 and 1
t_x = matrix((x-min(x))/(max(x)-min(x)),ncol=1 )
if (nrow(t_x) != nrow(t_y)){
stop("X and Y vectors must have the same number of rows.")
}
if (anal_type == 'normal'){
analysis = 1
}else if (anal_type == 'negbin'){
analysis = 2
}else if (anal_type == 'binomial'){
analysis = 3
}else {
stop(sprintf("Only Normal 'normal,' Negative Binomial 'negbin', and binomial 'binomial'
models are supported at this time. User supplied %s",analy_type))
}
#error check the JK quantity making sure 1 is in the list and
#it is sorted
a_list = sort(a_list)
if (a_list[1] != 1){
stop("The minimum value in the anealling list must be 1")
}
if (length(a_list)==1){
stop("Anealling list must have more than one element")
}
if (nsamps < nburn){
stop("Number of samples must be greater than the number of burnin samples.")
}
if (nsamps < 0){
stop("Number of samples must be greater than zero.")
}
if (nburn < 0){
warning("Burn in samples less than zero. Setting burn in samples to zero.")
nburn = 1;
}
if (mEXTREMA == 2){
if (isIncrease == TRUE){
M = 100
}else{
M= -100
}
AXTREMA = 2
}
if (mEXTREMA == 1)
{
if (isIncrease == TRUE){
M = -100
}else{
M= 100
}
AXTREMA = 1
}
return (.LXsample_linear_2(t_x,t_y,a_list,nsamps,nburn,analysis,M,AXTREMA))
}
##############################################################
# Calculate Posterior model probabilities for two changepoints
# nburn - is the number of burnin samples to ignore
# nsamps - is the maximum sample - needs to be less than
# the number of samples used in the fit
##############################################################
calculateProbs_2 <- function(fit,nburn,nsamps){
sa1 = fit$sa1
sa2 = fit$sa2
if (nsamps > length(sa2)){
stop("Number of samples (nsamps) must be less than or equal to the total number of
samples")
}
SHAPES = rep(0,length(nburn:nsamps))
for (ii in 1:length(SHAPES)) {
tt <- ii + nburn - 1
#decide 'base shape'
shape = 1; #'n' shape
shape = ((sa1[tt] > -.5)*(sa2[tt]> -.5)*(sa1[tt] < 0.5)*(sa2[tt]< 0.5))*4 + shape; #s shaped
shape = ((sa1[tt] <= -0.5)*(sa2[tt] >= 0.5)+(sa1[tt] >= 0.5)*(sa2[tt] <= -0.5))*2 + shape# Monotone Decreasing
shape = ((sa1[tt] <= -0.5)*(sa2[tt] <= -0.5)+(sa1[tt] >= 0.5)*(sa2[tt] >= 0.5))*3 + shape # Monotone Increasing
shape = ((sa1[tt] > -.5)*(sa1[tt]< 0.5)*(sa2[tt] <= -0.5)+(sa2[tt] > -.5)*(sa2[tt]< 0.5)*(sa1[tt] <= -0.5))*1 +shape#J-shaped
SHAPES[ii] = shape
}
pr1 = mean(SHAPES == 5) #S SHAPED
pr2 = mean(SHAPES == 3) #Monotone Decreasing
pr3 = mean(SHAPES == 4) #Monotone Increasing
pr4 = mean(SHAPES == 2) #J shaped
pr5 = 1-pr1-pr2-pr3-pr4 # n- shaped
return (list(pr1=pr1,pr2=pr2,pr3=pr3,pr4=pr4,pr5=pr5,SHAPES = SHAPES))
}
#####################################################
#####################################################
##############################################################
# Calculate Posterior model probabilities for two changepoints
# nburn - is the number of burnin samples to ignore
# nsamps - is the maximum sample - needs to be less than
# the number of samples used in the fit
##############################################################
calculateProbs_1 <- function(fit,nburn,nsamps){
sa1 = fit$sa1[nburn:nsamps]
pr1 = mean(sa1 < -0.5) #Monotone Increasing
pr2 = mean(sa1 > 0.5) #Monotone Decreasing
pr3 = 1-pr1-pr2 #n shaped
return (list(pr1=pr1,pr2=pr2,pr3=pr3))
}
#####################################################
#####################################################
bayesFactor_2 <- function(HA,HB, out){
if (out$EXTREMA == 2){
priorprobs <- c(0.3146,
0.0963,
0.0963,
0.2464,
0.2464)
postProbs <- calculateProbs_2(out,out$mcmcP[1],out$mcmcP[2])
pprobs <- c(postProbs$pr1,postProbs$pr2,postProbs$pr3,postProbs$pr4,postProbs$pr5)
if (out$isIncreasing){
testNames <- c("'~'-shaped",
"Monotone Decreasing",
"Monotone Increasing",
"J-shaped",
"n-shaped")
}else{
testNames <- c("Inverse '~'-shaped",
"Monotone Increasing",
"Monotone Decreasing",
"n-shaped",
"J-shaped")
}
if (length(HA) != 5){
stop("Hyptohesis must have 5 options")
}
if (length(HB) != 5){
stop("Hyptohesis must have 5 options")
}
if (sum(HA) >= length(HA)){
stop("Hypothesis HA must have fewer groups than the total number of available hyptoheses.")
}
if (sum(HB) >= length(HB)){
stop("Hypothesis HB must have fewer groups than the total number of available hyptoheses.")
}
}else{ #When there is just one extrema
priorprobs <- c(0.22,
0.22,
0.56)
postProbs <- calculateProbs_1(out,out$mcmcP[1],out$mcmcP[2])
pprobs <- c(postProbs$pr1,postProbs$pr2,postProbs$pr3)
if (length(HA) != 3){
stop("Hyptohesis must have 3 options")
}
if (length(HB) != 3){
stop("Hyptohesis must have 3 options")
}
if (out$isIncreasing){
testNames <- c("Monotone Decreasing",
"Monotone Increasing",
"J-shaped")
}else{
testNames <- c("Monotone Increasing",
"Monotone Decreasing",
"n-shaped")
}
}
###############################################################
###############################################################
cat('\nBayes Factor for\ncomparing Hypotheses:\n\tHA:\n')
for (ii in 1:length(HA)){
if (HA[ii] > 0){
cat(sprintf("\t\t%s\n",testNames[ii]))
}
}
cat("\t\t-VS-\n\tHB:\n")
for (ii in 1:length(HB)){
if (HB[ii] > 0){
cat(sprintf("\t\t%s\n",testNames[ii]))
}
}
cat("------------------------------------------------------------------------------\n")
temp = (sum(HB*priorprobs)/sum(HA*priorprobs))
tempb = (sum(HA*pprobs)/sum(HB*pprobs))
cat(sprintf("Bayes Factor: %1.3f \n",as.numeric(temp*tempb))) #*(sum(HB*priorprobs)/sum(HA*priorprobs)))
cat("------------------------------------------------------------------------------\n\n\n")
cat("------------------------------------------------------------------------------\n")
cat("Bayes Factor\tStrength of evidence\n")
cat("-------------------------------------------------------------------------------\n")
cat("1 to 3.2 not worth more than a bare mention\n")
cat("3.2 to 10 positive\n")
cat("10 to 31.6 strong\n")
cat("31.6 to 100 very strong\n")
cat(">100 decisive\n")
cat("\tBased upon H. Jeffreys (1961).")
}
##############################################################
##############################################################
# Calculate Posterior model probabilities for two changepoints
# nburn - is the number of burnin samples to ignore
# nsamps - is the maximum sample - needs to be less than
# the number of samples used in the fit
##############################################################
hcalculateProbs_2 <- function(fit,nburn,nsamps){
sa1 = fit$sa1
sa2 = fit$sa2
if (nsamps > length(sa2)){
stop("Number of samples (nsamps) must be less than or equal to the total number of
samples")
}
SHAPES = rep(0,nburn:nsamps)
for (ii in nburn:nsamps) {
a.s <- c(sa1[ii],sa2[ii])
betas = fit$beta_sample[[ii]]
knots = c(fit$model_sample[[ii]][1,])
betas = betas[2:length(betas)]
flat = rep(0,length(knots)-1)
n = length(betas)
#decide 'base shape'
shape = 1; #'n' shape
shape = ((sa1[ii] > -.5)*(sa2[ii]> -.5)*(sa1[ii] < 0.5)*(sa2[ii]< 0.5))*4 + shape; #s shaped
shape = ((sa1[ii] <= -0.5)*(sa2[ii] >= 0.5)+(sa1[ii] >= 0.5)*(sa2[ii] <= -0.5))*2 + shape# Monotone Decreasing
shape = ((sa1[ii] <= -0.5)*(sa2[ii] <= -0.5)+(sa1[ii] >= 0.5)*(sa2[ii] >= 0.5))*3 + shape # Monotone Increasing
shape = ((sa1[ii] > -.5)*(sa1[ii]< 0.5)*(sa2[ii] <= -0.5)+(sa2[ii] > -.5)*(sa2[ii]< 0.5)*(sa1[ii] <= -0.5))*1 +shape#J-shaped
flat[1] = (betas[1] == 0) + (betas[2] == 0)
flat[2] = (betas[2] == 0)
flat[length(flat)] = (betas[n] == 0) + (betas[n-1] == 0)
flat[length(flat)-1] = (betas[n-1] == 0)
if (length(flat) > 2){
for (jj in 3:(n-2)){
flat[jj-2] = flat[jj-2] + (betas[jj] == 0)
flat[jj-1] = flat[jj-1] + (betas[jj] == 0)
flat[jj] = flat[jj] + (betas[jj] == 0)
}
}
intLoc1 = sum(sa1[ii] > knots-0.5)
intLoc2 = sum(sa2[ii] > knots-0.5)
temp = sort(c(intLoc1,intLoc2)+1)
temp = c(1,temp,length(flat)+2)
flat = c(1,( flat == 3),1) #if there are three flat betas the region is flat
#different 'checks' depending on the starting shape of the curve
if (shape == 5){
left = (prod(flat[temp[1]:temp[2]]) ==1)
center = (prod(flat[temp[2]:temp[3]]) ==1)
right = (prod(flat[temp[3]:temp[4]]) ==1)
if (center){
shape = 4
}else if (left){
if (right){shape = 2}else{shape=3}
}else if(right){
shape = 1
}
}else if(shape == 1 ){
temp = unique(temp)
left = (prod(flat[temp[1]:temp[2]]) ==1)
right = (prod(flat[temp[2]:temp[3]]) ==1)
if (left){
shape = 3
}else if(right){
shape = 4
}
}else if(shape == 2){
temp = unique(temp)
left = (prod(flat[temp[1]:temp[2]]) ==1)
right = (prod(flat[temp[2]:temp[3]]) ==1)
if (left){
shape=3
}else if(right){
shape = 2
}
}
SHAPES[ii] = shape
}
pr1 = mean(SHAPES == 5) #S SHAPED
pr2 = mean(SHAPES == 3) #Monotone Decreasing
pr3 = mean(SHAPES == 4) #Monotone Increasing
pr4 = mean(SHAPES == 2) #J shaped
pr5 = 1-pr1-pr2-pr3-pr4 # n- shaped
return (list(pr1=pr1,pr2=pr2,pr3=pr3,pr4=pr4,pr5=pr5,SHAPES = SHAPES))
}
|
#dataset
dataFile <- "exdata_data_NEI_data.zip"
#file path
if (!file.exists(dataFile)) {
download.file(dataUrl, dataFile, mode = "wb")
}
library(dplyr)
library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Question #5:
#How have emissions from motor vehicle sources changed from 1999-2008 in
#Baltimore City?
vehicles <- grepl("vehicle", SCC$SCC.Level.Two, ignore.case=TRUE)
vehiclesSCC <- SCC[vehicles,]$SCC
vehiclesNEI <- NEI[NEI$SCC %in% vehiclesSCC,]
Baltimore_City <- filter(vehiclesNEI, fips == "24510")
BCM_by_year <- group_by(Baltimore_City, year)
BCM_Emission_by_year <- summarize(BCM_by_year, Emissions = sum(Emissions))
#creating plot5
png("plot5.png", height = 480, width = 480)
barplot(
BCM_Emission_by_year$Emissions,
names.arg=BCM_Emission_by_year$year,
xlab="Year",
ylab="PM2.5 Emissions (Tons)",
main="Emissions from Motor Vehicle Sources from 1999 to 2008 in Baltimore City"
)
dev.off()
|
/plot5.R
|
no_license
|
Walfare123/EDA-Week-4-Course-Project
|
R
| false
| false
| 1,057
|
r
|
#dataset
dataFile <- "exdata_data_NEI_data.zip"
#file path
if (!file.exists(dataFile)) {
download.file(dataUrl, dataFile, mode = "wb")
}
library(dplyr)
library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Question #5:
#How have emissions from motor vehicle sources changed from 1999-2008 in
#Baltimore City?
vehicles <- grepl("vehicle", SCC$SCC.Level.Two, ignore.case=TRUE)
vehiclesSCC <- SCC[vehicles,]$SCC
vehiclesNEI <- NEI[NEI$SCC %in% vehiclesSCC,]
Baltimore_City <- filter(vehiclesNEI, fips == "24510")
BCM_by_year <- group_by(Baltimore_City, year)
BCM_Emission_by_year <- summarize(BCM_by_year, Emissions = sum(Emissions))
#creating plot5
png("plot5.png", height = 480, width = 480)
barplot(
BCM_Emission_by_year$Emissions,
names.arg=BCM_Emission_by_year$year,
xlab="Year",
ylab="PM2.5 Emissions (Tons)",
main="Emissions from Motor Vehicle Sources from 1999 to 2008 in Baltimore City"
)
dev.off()
|
testlist <- list(A = structure(c(1.38997190089722e-309, 3.81575932257023e-236, 3.81571422914747e-236), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613125915-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 226
|
r
|
testlist <- list(A = structure(c(1.38997190089722e-309, 3.81575932257023e-236, 3.81571422914747e-236), .Dim = c(1L, 3L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/get_sites.R
\name{get_sites}
\alias{get_sites}
\title{Return Site Information.}
\usage{
get_sites(sitename, altmin, altmax, loc, gpid)
}
\arguments{
\item{sitename}{A character string representing the full or partial site name.}
\item{altmin}{Minimum site altitude (in m).}
\item{altmax}{Maximum site altitude (in m).}
\item{loc}{A numeric vector c(lonW, latS, lonE, latN) representing the bounding box within which to search for sites. The convention here is to use negative values for longitudes west of Grewnwich or longitudes south of the equator.}
\item{gpid}{A character string or numeric value, must correspond to a valid geopolitical identity in the Neotoma Database. Use get.tables('GeoPoliticalUnits') for a list of acceptable values, or link here: http://api.neotomadb.org/apdx/geopol.htm}
}
\description{
Deprecated, see \code{get_site}
}
\author{
Simon J. Goring \email{simon.j.goring@gmail.com}
}
\references{
Neotoma Project Website: http://www.neotomadb.org
API Reference: http://api.neotomadb.org/doc/resources/contacts
}
\keyword{IO}
\keyword{connection}
|
/man/get_sites.Rd
|
no_license
|
Nitin-Joshi-perk/neotoma
|
R
| false
| false
| 1,168
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/get_sites.R
\name{get_sites}
\alias{get_sites}
\title{Return Site Information.}
\usage{
get_sites(sitename, altmin, altmax, loc, gpid)
}
\arguments{
\item{sitename}{A character string representing the full or partial site name.}
\item{altmin}{Minimum site altitude (in m).}
\item{altmax}{Maximum site altitude (in m).}
\item{loc}{A numeric vector c(lonW, latS, lonE, latN) representing the bounding box within which to search for sites. The convention here is to use negative values for longitudes west of Grewnwich or longitudes south of the equator.}
\item{gpid}{A character string or numeric value, must correspond to a valid geopolitical identity in the Neotoma Database. Use get.tables('GeoPoliticalUnits') for a list of acceptable values, or link here: http://api.neotomadb.org/apdx/geopol.htm}
}
\description{
Deprecated, see \code{get_site}
}
\author{
Simon J. Goring \email{simon.j.goring@gmail.com}
}
\references{
Neotoma Project Website: http://www.neotomadb.org
API Reference: http://api.neotomadb.org/doc/resources/contacts
}
\keyword{IO}
\keyword{connection}
|
## TODO: when it's get stable, define billing object
## PrivilegesEnum <- setSingleEnum("Privileges", c(""))
## Billing
## to make it simple to update, return a list, not an object, because no action defined an this object
Billing <- setRefClass("Billing", contains = "Item",
fields = list(id = "characterORNULL",
name = "characterORNULL",
owner = "characterORNULL",
privileges = "listORNULL",
type = "characterORNULL",
pending = "logicalORNULL",
disabled = "logicalORNULL",
active = "logicalORNULL",
balance = "listORNULL",
project_breakdown = "listORNULL",
total_spending = "listORNULL"), ## 1.1
methods = list(
initialize = function(id = NULL,
name = NULL,
owner = NULL, privileges = list(),
type = NULL, pending = NULL,
disabled = NULL, active = NULL, balance = list(),
project_breakdown = list(),
total_spending = list(), ...){
id <<- id
name <<- name
owner <<- owner
privileges <<- privileges
type <<- type
disabled <<- disabled
active <<- active
balance <<- balance
## for breakdown
project_breakdown <<- project_breakdown
total_spending <<- total_spending
callSuper(...)
},
show = function(){
.showFields(.self, "== Billing ==",
values = c("id", "href", "name",
"owner", "privileges", "type",
"disabled", "active", "balance",
"project_breakdown",
"total_spending"))
}
))
.asBilling <- function(x){
Billing(id = x$id,
href = x$href,
name = x$name,
owner = x$owner,
privileges = x$privileges,
type = x$type,
disabled = x$disabled,
active = x$active,
balance = x$balance,
response = response(x),
## for breakdown
project_breakdown = x$project_breakdown,
total_spending = x$total_spending)
}
BillingList <- setListClass("Billing", contains = "Item0")
.asBillingList <- function(x){
obj <- BillingList(lapply(x$items, .asBilling))
obj@href <- x$href
obj@response <- response(x)
obj
}
|
/R/Billing-class.R
|
permissive
|
teamcgc/sevenbridges
|
R
| false
| false
| 3,346
|
r
|
## TODO: when it's get stable, define billing object
## PrivilegesEnum <- setSingleEnum("Privileges", c(""))
## Billing
## to make it simple to update, return a list, not an object, because no action defined an this object
Billing <- setRefClass("Billing", contains = "Item",
fields = list(id = "characterORNULL",
name = "characterORNULL",
owner = "characterORNULL",
privileges = "listORNULL",
type = "characterORNULL",
pending = "logicalORNULL",
disabled = "logicalORNULL",
active = "logicalORNULL",
balance = "listORNULL",
project_breakdown = "listORNULL",
total_spending = "listORNULL"), ## 1.1
methods = list(
initialize = function(id = NULL,
name = NULL,
owner = NULL, privileges = list(),
type = NULL, pending = NULL,
disabled = NULL, active = NULL, balance = list(),
project_breakdown = list(),
total_spending = list(), ...){
id <<- id
name <<- name
owner <<- owner
privileges <<- privileges
type <<- type
disabled <<- disabled
active <<- active
balance <<- balance
## for breakdown
project_breakdown <<- project_breakdown
total_spending <<- total_spending
callSuper(...)
},
show = function(){
.showFields(.self, "== Billing ==",
values = c("id", "href", "name",
"owner", "privileges", "type",
"disabled", "active", "balance",
"project_breakdown",
"total_spending"))
}
))
.asBilling <- function(x){
Billing(id = x$id,
href = x$href,
name = x$name,
owner = x$owner,
privileges = x$privileges,
type = x$type,
disabled = x$disabled,
active = x$active,
balance = x$balance,
response = response(x),
## for breakdown
project_breakdown = x$project_breakdown,
total_spending = x$total_spending)
}
BillingList <- setListClass("Billing", contains = "Item0")
.asBillingList <- function(x){
obj <- BillingList(lapply(x$items, .asBilling))
obj@href <- x$href
obj@response <- response(x)
obj
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.