blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
617db0953d2221bc518b97298420881a35769a4e
b4eacbe6efcedd50ddb3b4974006a02aeefff9d9
/cachematrix.R
4a0a1bd01c1b225cdcf12b843f615c2024ea3af8
[]
no_license
kelly-tidwell/ProgrammingAssignment2
061c3d901e79def620ddebc82e0be75e252ea6b3
220139c635ca8f17d0e41c771c8cc4fb29519f35
refs/heads/master
2021-01-20T21:44:40.590358
2015-02-22T17:27:21
2015-02-22T17:27:21
31,049,610
0
0
null
2015-02-20T03:33:52
2015-02-20T03:33:51
null
UTF-8
R
false
false
1,577
r
cachematrix.R
## Put comments here that give an overall description of what your ## functions do ## Write a short comment describing this function #makeCacheMatrix is a funciton that creates a special "matrix", which is really a list containing a functoin to #1. set the value of the matrix #2. get the value of the matrix #3. set the vaule of the inverse #4. get the value of the inverse makeCacheMatrix <- function(x = matrix()) { xinv<- NULL #the result of the inversion will be stored here set <- function(y) { x<<-y #this '<<-' keeps these internal variables from being exposed to the outside environment xinv<<-NULL } get<function() x #returns the input of the matrix setinv<-function(inv) xinv<<-inv #sets the inversed matrix getinv<-function()xinv #returns the inversed matrix list(set = set, get = get, setinv = setinv, getinv = getinv) } ## Write a short comment describing this function #cacheSolve function determines if the inverse has already been created in cache which will save time and resources. cacheSolve <- function(x, ...) { m<-x$getinv() #gets the inversed matrix from x (if it is uncalculated then NULL is returned) if(lis.null(m)){ #if found in cache then message is returned message("getting cached data") return(m) } data<-x$get() #if not then x$get is preformed to return the matrix m<-solve(data, ...) #solves x$setinv(m) #sets it to the object m #returns a matrix that is the inverse of 'x' ## Return a matrix that is the inverse of 'x' }
4a5cef0bafff7c76111fe9db398be34882b76b76
9e8e728f8d616a50313ff81d52d49e634435f49f
/src/GLFMR/f_p_1.R
33b08bae21ff2ef577de84a2bb12c7bf3276e521
[]
no_license
tartaruszen/GLFM
abb5ee055bf9942ad0762eaca56dd63e5853a578
7c4c14785f9c68263f0a224e535b659618926c28
refs/heads/master
2020-12-02T22:40:35.500264
2017-07-03T21:45:22
2017-07-03T21:45:22
null
0
0
null
null
null
null
UTF-8
R
false
false
143
r
f_p_1.R
f_p_1<-function(x,mu,w){ if (w == 0){ stop( 'scaling factor should never be 0') } else{ y <- log( exp(w*(x-mu) - 1) ) } }
e5d8925caade7ff87dde7c01a5b3c967973ec805
6c7368ddcd2bd20aee6b7eb4d56fd7e2291ae3d2
/postgre-new/RCode/thesis_figures.R
701ac9aa4e7895889f42a1c5e60e798b6101ea6f
[]
no_license
tectronics/hackystat-ui-trajectory
77e6b04a8165810285f8a7b9532571019d0f23aa
f138117dae63ae5df4912d11f36d1470ef1995c3
refs/heads/master
2018-01-11T15:16:27.040944
2014-10-08T16:04:28
2014-10-08T16:04:28
46,141,008
0
0
null
null
null
null
UTF-8
R
false
false
15,018
r
thesis_figures.R
## # Author: seninp # modified: 12-2013 ## ## get connection require(RMySQL) require(ggplot2) require(scales) require(gridExtra) require(Cairo) # require(splines) require(MASS) library(zoo) # # connect to DB # con <- dbConnect(MySQL(), user="postgre", password="postgre", dbname="postgre2", host="localhost") # # qery DB function # commits_summary = function(start,end){ res <- dbGetQuery(con, paste("select count(distinct(c.commit_hash)) freq, ", "WEEK(c.utc_time,1) week from `change` c where ", "c.utc_time between \"",start,"\" AND \"", end,"\" group by week",sep="")) res } # # get dataframe of weekly commits # dat=data.frame(week=c(1:53)) for(interval in c(1996:2013)){ yearly_commits = commits_summary(paste(interval,"-01-01",sep=""), paste(interval,"-12-30",sep="")) tmp = data.frame(week=c(1:53)) tmp = merge(tmp,yearly_commits,all.x=T) dat = cbind(dat,tmp$freq) } # # massage it # names(dat) = c("week",paste(c(1996:2013))) dm = melt(dat,id.var="week") # # plot weekly averages # p_commits <- ggplot(dm, aes(factor(week), value)) + geom_boxplot() + scale_x_discrete("Week of the year") + scale_y_continuous("Commits") + ggtitle("Weekly commits, PostgreSQL") p_commits sprintf("%f", pi) y1996=commits_summary("1996-01-01","1997-01-01") y1997=commits_summary("1996-01-01","1997-01-01") y1998=commits_summary("1996-01-01","1997-01-01") y1999=commits_summary("1996-01-01","1997-01-01") y1996=commits_summary("1996-01-01","1997-01-01") y1996=commits_summary("1996-01-01","1997-01-01") # get the numbers of commits over time c_query <- dbGetQuery(con, paste("select count(distinct(c.commit_hash)) freq, ", "DATE_FORMAT(c.utc_time, \"%Y-%m-%d\") cdate from `change` c group by cdate;")) commits = data.frame(date=as.POSIXlt(c_query[,2]),commits=c_query[,1]) pc <- ggplot(data=commits, aes(x=date, y=commits)) + geom_line() pc x<-zoo(commits$commits, commits$date); x_smooth <- rollmean(x, 60, fill = list(mean(commits$commits), NULL, mean(commits$commits))) commits$smooth <- coredata(x_smooth) p_speed <- ggplot(commits, aes(date, smooth)) + geom_line() + ggtitle("Commits, smoothed") + theme(axis.text.y=element_blank()) p_speed # get the numbers of commits over time c_query <- dbGetQuery(con, paste("select count(distinct(c.commit_hash)) freq, ", "YEARWEEK(c.utc_time) cdate from `change` c group by cdate;")) commits = data.frame(date=c_query[,2],commits=c_query[,1]) pc <- ggplot(data=commits, aes(x=date, y=commits)) + geom_line() pc # paste("select sum(",type,") af, DATE_FORMAT(c.author_date, \"%Y-%m-%d\") adate ", # "from `change` c group by adate",sep="")) res } # make a timeseries out of the data # dat = read.csv("data/commit_fest.csv",header=F) d_fest <- data.frame(start=as.Date(dat[,2]), end=as.Date(dat[,3]), y=c(rep(c(-0.2,-0.2),8)), col=rep(c("blue","red"),8)) dat = read.csv("data/releases_wiki",header=F) dat=dat[3:21,] d_release <- data.frame(release=dat[,1],date=as.Date(dat[,2]), desc=dat[,3]) cf_summary = function(type,start,end){ res <- dbGetQuery(con, paste("select sum(",type,") af, DATE_FORMAT(c.author_date, \"%Y-%m-%d\") adate ", "from `change` c where c.`author_date` between \"",start,"\" AND \"", end,"\" group by adate",sep="")) # paste("select sum(",type,") af, DATE_FORMAT(c.author_date, \"%Y-%m-%d\") adate ", # "from `change` c group by adate",sep="")) res } # commits_per_day = function(start,end){ res <- dbGetQuery(con, paste("select count(distinct(id)) af, DATE_FORMAT(c.author_date, \"%Y-%m-%d\") adate ", "from `change` c where c.`author_date` between \"",start,"\" AND \"", end,"\" group by adate",sep="")) res } # # # total commits commits = commits_per_day("1996-07-01","2013-05-01") commits=data.frame(date=as.Date(commits$adate),value=commits$af) (p_commits <- ggplot(commits, aes(x=date, y=value)) + geom_line() + scale_x_date(labels=date_format("%Y"), breaks=date_breaks("years")) + geom_segment(data=d_release, aes(x=date,y=0,xend=date,yend=50), col="red") + ggtitle("Commits per day, PostgreSQL") + theme(legend.position = "none", axis.text.x = element_text(angle = 45, hjust = 1))) # # plot a smoothed version (p_commits_s <- ggplot(data, aes(x=date, y=value)) + scale_x_date(labels=date_format("%Y"), breaks=date_breaks("years")) + geom_segment(data=d_release, aes(x=date,y=0,xend=date,yend=12), col="red") + ggtitle("Commits per day, PostgreSQL") + theme(legend.position = "none", axis.text.x = element_text(angle = 45, hjust = 1)) + stat_smooth(method = "lm", formula = y ~ ns(x,70))) # Cairo(width = 800, height = 450, file="figures/release_commits.png", type="png", pointsize=9, bg = "transparent", canvas = "white", units = "px", dpi = 82) print(arrangeGrob(p_commits, p_commits_s, ncol=1, heights=c(2/3,1/3))) dev.off() # # # added files added_files=cf_summary("added_files","1996-07-01","2013-05-01") added_files=data.frame(date=as.Date(added_files$adate),value=added_files$af) (p_added_files <- ggplot(added_files, aes(x=date, y=value)) + geom_line() + scale_x_date(labels=date_format("%Y"), breaks=date_breaks("years")) + geom_segment(data=d_release, aes(x=date,y=0,xend=date,yend=150), col="red") + ggtitle("Added files per day, PostgreSQL") + theme(legend.position = "none", axis.text.x = element_text(angle = 45, hjust = 1))) # # plot a smoothed version (p_added_files_s <- ggplot(added_files, aes(x=date, y=value)) + scale_x_date(labels=date_format("%Y"), breaks=date_breaks("years")) + geom_segment(data=d_release, aes(x=date,y=0,xend=date,yend=9), col="red") + ggtitle("Added files per day, PostgreSQL") + theme(legend.position = "none", axis.text.x = element_text(angle = 45, hjust = 1)) + stat_smooth(method = "lm", formula = y ~ ns(x,70))) # Cairo(width = 800, height = 450, file="figures/release_added_files.png", type="png", pointsize=9, bg = "transparent", canvas = "white", units = "px", dpi = 82) print(arrangeGrob(p_added_files, p_added_files_s, ncol=1, heights=c(2/3,1/3))) dev.off() # # # edited files edited_files=cf_summary("edited_files","1996-07-01","2013-05-01") edited_files=data.frame(date=as.Date(edited_files$adate),value=edited_files$af) (p_edited_files <- ggplot(edited_files, aes(x=date, y=value)) + geom_line() + scale_x_date(labels=date_format("%Y"), breaks=date_breaks("years")) + geom_segment(data=d_release, aes(x=date,y=0,xend=date,yend=2000), col="red") + ggtitle("Edited files per day, PostgreSQL") + theme(legend.position = "none", axis.text.x = element_text(angle = 45, hjust = 1))) # # plot a smoothed version (p_edited_files_s <- ggplot(edited_files, aes(x=date, y=value)) + scale_x_date(labels=date_format("%Y"), breaks=date_breaks("years")) + geom_segment(data=d_release, aes(x=date,y=0,xend=date,yend=70), col="red") + ggtitle("Edited files per day, PostgreSQL") + theme(legend.position = "none", axis.text.x = element_text(angle = 45, hjust = 1)) + stat_smooth(method = "lm", formula = y ~ ns(x,70))) # Cairo(width = 800, height = 450, file="figures/release_edited_files.png", type="png", pointsize=9, bg = "transparent", canvas = "white", units = "px", dpi = 82) print(arrangeGrob(p_edited_files, p_edited_files_s, ncol=1, heights=c(2/3,1/3))) dev.off() # # # deleted files deleted_files=cf_summary("removed_files","1996-07-01","2013-05-01") deleted_files=data.frame(date=as.Date(deleted_files$adate),value=deleted_files$af) (p_deleted_files <- ggplot(deleted_files, aes(x=date, y=value)) + geom_line() + scale_x_date(labels=date_format("%Y"), breaks=date_breaks("years")) + geom_segment(data=d_release, aes(x=date,y=0,xend=date,yend=180), col="red") + ggtitle("Deleted files per day, PostgreSQL") + theme(legend.position = "none", axis.text.x = element_text(angle = 45, hjust = 1))) # # plot a smoothed version (p_deleted_files_s <- ggplot(deleted_files, aes(x=date, y=value)) + scale_x_date(labels=date_format("%Y"), breaks=date_breaks("years")) + geom_segment(data=d_release, aes(x=date,y=0,xend=date,yend=5), col="red") + ggtitle("Deleted files per day, PostgreSQL") + theme(legend.position = "none", axis.text.x = element_text(angle = 45, hjust = 1)) + stat_smooth(method = "lm", formula = y ~ ns(x,50))) # Cairo(width = 800, height = 450, file="figures/release_deleted_files.png", type="png", pointsize=9, bg = "transparent", canvas = "white", units = "px", dpi = 82) print(arrangeGrob(p_deleted_files, p_deleted_files_s, ncol=1, heights=c(2/3,1/3))) dev.off() Cairo(width = 800, height = 150, file="figures/release_commits_smoothed.png", type="png", pointsize=9, bg = "transparent", canvas = "white", units = "px", dpi = 82) print(arrangeGrob(p_commits_s, ncol=1)) dev.off() Cairo(width = 800, height = 450, file="figures/release_files_smoothed.png", type="png", pointsize=9, bg = "transparent", canvas = "white", units = "px", dpi = 82) print(arrangeGrob(p_added_files_s, p_edited_files_s, p_deleted_files_s, ncol=1)) dev.off() # # # LOC Business # # # # # added LOC added_lines=cf_summary("added_lines","1996-07-01","2013-05-01") #added_lines$af[added_lines$af>(mean(added_lines$af)+2*sd(added_lines$af))] = 2*sd(added_lines$af) added_lines=data.frame(date=as.Date(added_lines$adate),value=added_lines$af) (p_added_lines <- ggplot(added_lines, aes(x=date, y=value)) + geom_line() + scale_x_date(labels=date_format("%Y"), breaks=date_breaks("years")) + geom_segment(data=d_release, aes(x=date,y=0,xend=date,yend=150000), col="red") + ggtitle("Added lines per day, PostgreSQL") + theme(legend.position = "none", axis.text.x = element_text(angle = 45, hjust = 1))) # # plot a smoothed version (p_added_lines_s <- ggplot(added_lines, aes(x=date, y=value)) + scale_x_date(labels=date_format("%Y"), breaks=date_breaks("years")) + geom_segment(data=d_release, aes(x=date,y=0,xend=date,yend=4000), col="red") + ggtitle("Added lines per day, PostgreSQL") + theme(legend.position = "none", axis.text.x = element_text(angle = 45, hjust = 1)) + stat_smooth(method = "lm", formula = y ~ ns(x,70))) # Cairo(width = 800, height = 450, file="figures/release_added_lines.png", type="png", pointsize=9, bg = "transparent", canvas = "white", units = "px", dpi = 82) print(arrangeGrob(p_added_lines, p_added_lines_s, ncol=1, heights=c(2/3,1/3))) dev.off() # # # edited lines edited_lines=cf_summary("edited_lines","1996-07-01","2013-05-01") edited_lines=data.frame(date=as.Date(edited_lines$adate),value=edited_lines$af) (p_edited_lines <- ggplot(edited_lines, aes(x=date, y=value)) + geom_line() + scale_x_date(labels=date_format("%Y"), breaks=date_breaks("years")) + geom_segment(data=d_release, aes(x=date,y=0,xend=date,yend=150000), col="red") + ggtitle("Edited lines per day, PostgreSQL") + theme(legend.position = "none", axis.text.x = element_text(angle = 45, hjust = 1))) # # plot a smoothed version (p_edited_lines_s <- ggplot(edited_lines, aes(x=date, y=value)) + scale_x_date(labels=date_format("%Y"), breaks=date_breaks("years")) + geom_segment(data=d_release, aes(x=date,y=0,xend=date,yend=2000), col="red") + ggtitle("Edited lines per day, PostgreSQL") + theme(legend.position = "none", axis.text.x = element_text(angle = 45, hjust = 1)) + stat_smooth(method = "lm", formula = y ~ ns(x,50))) # Cairo(width = 800, height = 450, file="figures/release_edited_lines.png", type="png", pointsize=9, bg = "transparent", canvas = "white", units = "px", dpi = 82) print(arrangeGrob(p_edited_lines, p_edited_lines_s, ncol=1, heights=c(2/3,1/3))) dev.off() # # # deleted lines deleted_lines=cf_summary("removed_lines","1996-07-01","2013-05-01") deleted_lines$af[deleted_lines$af>(mean(deleted_lines$af)+2*sd(deleted_lines$af))] = 2*sd(deleted_lines$af) deleted_lines=data.frame(date=as.Date(deleted_lines$adate),value=deleted_lines$af) (p_deleted_lines <- ggplot(deleted_lines, aes(x=date, y=value)) + geom_line() + scale_x_date(labels=date_format("%Y"), breaks=date_breaks("years")) + geom_segment(data=d_release, aes(x=date,y=0,xend=date,yend=8000), col="red") + ggtitle("Deleted lines per day, PostgreSQL") + theme(legend.position = "none", axis.text.x = element_text(angle = 45, hjust = 1))) # # plot a smoothed version (p_deleted_lines_s <- ggplot(deleted_lines, aes(x=date, y=value)) + scale_x_date(labels=date_format("%Y"), breaks=date_breaks("years")) + geom_segment(data=d_release, aes(x=date,y=0,xend=date,yend=700), col="red") + ggtitle("Deleted lines per day, PostgreSQL") + theme(legend.position = "none", axis.text.x = element_text(angle = 45, hjust = 1)) + stat_smooth(method = "lm", formula = y ~ ns(x,110))) # Cairo(width = 800, height = 450, file="figures/release_deleted_lines.png", type="png", pointsize=9, bg = "transparent", canvas = "white", units = "px", dpi = 82) print(arrangeGrob(p_deleted_lines, p_deleted_lines_s, ncol=1, heights=c(2/3,1/3))) dev.off() # # # churn lines churn=cf_summary("added_lines+edited_lines+removed_lines","1996-07-01","2013-05-01") churn$af[churn$af>(mean(churn$af)+2*sd(churn$af))] = 2*sd(churn$af) churn=data.frame(date=as.Date(churn$adate),value=churn$af) (p_churn <- ggplot(churn, aes(x=date, y=value)) + geom_line() + scale_x_date(labels=date_format("%Y"), breaks=date_breaks("years")) + geom_segment(data=d_release, aes(x=date,y=0,xend=date,yend=18000), col="red") + ggtitle("Daily churn, PostgreSQL") + theme(legend.position = "none", axis.text.x = element_text(angle = 45, hjust = 1))) # # plot a smoothed version (p_churn_s <- ggplot(churn, aes(x=date, y=value)) + scale_x_date(labels=date_format("%Y"), breaks=date_breaks("years")) + geom_segment(data=d_release, aes(x=date,y=0,xend=date,yend=2700), col="red") + ggtitle("Daily churn, PostgreSQL") + theme(legend.position = "none", axis.text.x = element_text(angle = 45, hjust = 1)) + stat_smooth(method = "lm", formula = y ~ ns(x,110))) # Cairo(width = 800, height = 450, file="figures/release_churn.png", type="png", pointsize=9, bg = "transparent", canvas = "white", units = "px", dpi = 82) print(arrangeGrob(p_churn, p_churn_s, ncol=1, heights=c(2/3,1/3))) dev.off() Cairo(width = 800, height = 150, file="figures/release_churn_smoothed.png", type="png", pointsize=9, bg = "transparent", canvas = "white", units = "px", dpi = 82) print(arrangeGrob(p_churn_s, ncol=1)) dev.off() Cairo(width = 800, height = 450, file="figures/release_lines_smoothed.png", type="png", pointsize=9, bg = "transparent", canvas = "white", units = "px", dpi = 82) print(arrangeGrob(p_added_lines_s, p_edited_lines_s, p_deleted_lines_s, ncol=1)) dev.off()
9338758fda127e37063e77d19b1099280d71512b
8fafea88c3ea03f1b63dd4b2a74663d57577a3a0
/scripts/03 data_processing.R
f03ecbd7341797806b212914e1629fdf8920dccc
[]
no_license
smodestas/Seimo_rinkimai_2020
0bb6f2c01a008b191f48b93b9b4786a94e9e7476
40e88dfeb917d59840dd60d71c769adaa5896f44
refs/heads/main
2023-02-12T09:46:10.070595
2021-01-09T12:07:24
2021-01-09T12:07:24
322,553,570
0
0
null
null
null
null
UTF-8
R
false
false
10,324
r
03 data_processing.R
rm(list = objects()) ### Loading data #### vienmandat <- readRDS("./raw-data/srapped-data/vienmandat.RDS") daugiamandat <- readRDS("./raw-data/srapped-data/daugiamandat.RDS") bendras <- readRDS("./raw-data/srapped-data/bendras.RDS") #### Cleaning data #### vienmandat$Kandidatas <- vienmandat$Kandidatas %>% tolower %>% toTitleCase vienmandat[,c(3:7,10:15)] <- vienmandat[,c(3:7,10:15)] %>% mutate_at(.vars = vars(everything()), .funs = ~ str_replace(., pattern = ",", replacement = ".") %>% as.numeric) daugiamandat[,c(3:7,11:15)] <- daugiamandat[,c(3:7,11:15)] %>% mutate_at(.vars = vars(everything()), .funs = ~ str_replace(., pattern = ",", replacement = ".") %>% as.numeric) vienmandat <- vienmandat %>% mutate(Apygardos_nr = str_extract(Apygarda,"[:alnum:]{1,3}[.]") %>% str_remove("[:punct:]") %>% as.numeric, Apylinkes_nr = str_extract(apylinke,"[:alnum:]{1,3}[.]") %>% str_remove("[:punct:]") %>% as.numeric) %>% mutate_at(.vars = vars(Apygarda,apylinke), .funs = ~ str_replace(., pattern = "[:alnum:]{1,3}[.]", replacement = "") %>% str_squish) daugiamandat <- daugiamandat %>% mutate(Apygardos_nr = str_extract(Apygarda,"[:alnum:]{1,3}[.]") %>% str_remove("[:punct:]") %>% as.numeric, Apylinkes_nr = str_extract(apylinke,"[:alnum:]{1,3}[.]") %>% str_remove("[:punct:]") %>% as.numeric) %>% mutate_at(.vars = vars(Apygarda,apylinke), .funs = ~ str_replace(., pattern = "[:alnum:]{1,3}[.]", replacement = "") %>% str_squish) vienmandat <- vienmandat %>% replace_na(list(apylinkėse = 0, paštu_apylinkeje = 0, iš_viso_apylinkeje = 0, nuo_galiojančių_biuletenių_apylinkeje = 0, nuo_dalyvavusių_rinkėjų_apylinkeje = 0, nuo_dalyvavusių_rinkėjų_apylinkeje = 0, nuo_galiojančių_biuletenių_apygardoje = 0)) daugiamandat$party_color <- ifelse(daugiamandat$Pavadinimas == "Tėvynės sąjunga – Lietuvos krikščionys demokratai","green4", ifelse(daugiamandat$Pavadinimas == "Lietuvos valstiečių ir žaliųjų sąjunga", "chartreuse1", ifelse(daugiamandat$Pavadinimas == "Lietuvos socialdemokratų partija", "red", ifelse(daugiamandat$Pavadinimas == "Lietuvos Respublikos liberalų sąjūdis","orange", ifelse(daugiamandat$Pavadinimas == "Lietuvos lenkų rinkimų akcija - Krikščioniškų šeimų sąjunga","red4", ifelse(daugiamandat$Pavadinimas == "Darbo partija", "navy", ifelse(daugiamandat$Pavadinimas == "Laisvės partija", "maroon1", ifelse(daugiamandat$Pavadinimas == "Lietuvos socialdemokratų darbo partija","tomato","grey")))))))) vienmandat$party_color <- ifelse(vienmandat$Iškėlė == "Tėvynės sąjunga – Lietuvos krikščionys demokratai","green4", ifelse(vienmandat$Iškėlė == "Lietuvos valstiečių ir žaliųjų sąjunga", "chartreuse1", ifelse(vienmandat$Iškėlė == "Lietuvos socialdemokratų partija", "red", ifelse(vienmandat$Iškėlė == "Lietuvos Respublikos liberalų sąjūdis","orange", ifelse(vienmandat$Iškėlė == "Lietuvos lenkų rinkimų akcija - Krikščioniškų šeimų sąjunga","red4", ifelse(vienmandat$Iškėlė == "Darbo partija", "navy", ifelse(vienmandat$Iškėlė == "Laisvės partija", "maroon1", ifelse(vienmandat$Iškėlė == "Lietuvos socialdemokratų darbo partija","tomato","grey")))))))) vienmandat$apylinke <- str_replace_all(vienmandat$apylinke,".\\s",".") daugiamandat$apylinke <- str_replace_all(daugiamandat$apylinke,".\\s",".") vienmandat$Apygarda <- str_replace(vienmandat$Apygarda, "–","-") daugiamandat$Apygarda <- str_replace(daugiamandat$Apygarda, "–","-") bendras <- bendras %>% select(-`VRK suteiktas Nr.`,-`Partija, koalicija`) bendras$Pavadinimas <- bendras$Pavadinimas %>% tolower %>% toTitleCase names(bendras) <- c("partija", "apylinkėse", "paštu", "iš_viso" ,"proc_nuo_balsavusiu", "mandatai") daugiamandat <- daugiamandat %>% select(-Pirmumobalsai_apygardoje,-Pirmumobalsai_apylinkeje) #### Saving data #### saveRDS(bendras, file = "./processed-data/bendras.RDS") saveRDS(daugiamandat, file = "./processed-data/daugiamandat.RDS") saveRDS(vienmandat, file = "./processed-data/vienmandat.RDS") #### Merging election data with spatial data #### #### reading shapefile zemelapis_apylinkiu <- st_read("./raw-data/Apylinkiu_ribos_2020/Apylinkės_2020.shp") zemelapis_apygardu <- st_read("./raw-data/Apygardu_ribos_2020/Apygardos_2020.shp") zemelapis_apylinkiu_df <- st_transform(zemelapis_apylinkiu, '+proj=longlat +datum=WGS84') zemelapis_apygardu_df <- st_transform(zemelapis_apygardu, '+proj=longlat +datum=WGS84') names(zemelapis_apylinkiu_df)[1:5] <- c("APL_ID","APL_NUM","APL_PAV","APG_NUM","APG_PAV") names(zemelapis_apygardu_df)[3:4] <- c("APG_NUM","APG_PAV") zemelapis_apylinkiu_df <- zemelapis_apylinkiu_df %>% arrange(APG_NUM,APL_NUM) zemelapis_apygardu_df <- zemelapis_apygardu_df %>% arrange(APG_NUM) zemelapis_apylinkiu_df$APL_PAV <- str_replace_all(zemelapis_apylinkiu_df$APL_PAV,".\\s",".") rm(zemelapis_apylinkiu,zemelapis_apygardu) #### merging map data with election data #### zemelapis_apylinkiu_df_vnmd <- zemelapis_apylinkiu_df %>% left_join(vienmandat %>% select(Kandidatas,Iškėlė, iš_viso_apylinkeje, nuo_dalyvavusių_rinkėjų_apylinkeje, Apylinkes_nr, apylinke, Apygardos_nr,party_color), by = c("APG_NUM" = "Apygardos_nr", "APL_NUM" = "Apylinkes_nr", "APL_PAV" = "apylinke")) zemelapis_apygardu_df_vnmd <- zemelapis_apygardu_df %>% left_join(vienmandat %>% select(Kandidatas,Iškėlė, iš_viso_apygardoje, nuo_dalyvavusių_rinkėjų_apygardoje, Apygardos_nr, party_color), by = c("APG_NUM" = "Apygardos_nr")) zemelapis_apylinkiu_df_dgmd <- zemelapis_apylinkiu_df %>% left_join(daugiamandat %>% select(Pavadinimas,iš_viso_apylinkeje, nuo_galiojančių_biuletenių, Apygarda, Apygardos_nr, apylinke, Apylinkes_nr, party_color), by = c("APG_NUM" = "Apygardos_nr", "APL_NUM" = "Apylinkes_nr", "APL_PAV" = "apylinke")) daugiamandat$Apygardos_nr <- daugiamandat$Apygardos_nr %>% as.integer zemelapis_apygardu_df_dgmd <- zemelapis_apygardu_df %>% inner_join(daugiamandat %>% select(Pavadinimas,iš_viso_apygardoje, nuo_dalyvavusių_rinkėjų, Apygarda, Apygardos_nr, party_color), by = c("APG_NUM" = "Apygardos_nr")) names(zemelapis_apygardu_df_dgmd)[6:7] <- c("is_viso_apygardoje","nuo_dalyvavusiu_rinkeju") #### Saving spatial data #### saveRDS(zemelapis_apygardu_df_dgmd, "processed-data/zemelapis_apygardu_df_dgmd.RDS") saveRDS(zemelapis_apylinkiu_df_dgmd, "processed-data/zemelapis_apylinkiu_df_dgmd.RDS") saveRDS(zemelapis_apygardu_df_vnmd, "processed-data/zemelapis_apygardu_df_vnmd.RDS") saveRDS(zemelapis_apylinkiu_df_vnmd, "processed-data/zemelapis_apylinkiu_df_vnmd.RDS")
a3ec320e5ad19c6f3271252b9bd6dc7eda31a615
9e9af9030b571b5c030d6fcfe23ee87be95ced01
/_scratch/analyze_authorized_officers.R
e1f00bf37bf2728ebb7cfb0fd8310caba9be7d02
[]
no_license
monkeycycle/police-compensation
fec34d16ffc3e45b2f9abeff412c5a4da907ecdc
55527e2005d66f63f64225ee7c0207c8f2bc5085
refs/heads/main
2023-07-12T22:25:13.995619
2021-08-22T05:10:41
2021-08-22T05:10:41
383,266,415
0
0
null
null
null
null
UTF-8
R
false
false
2,160
r
analyze_authorized_officers.R
wps_authorized_actual_officers <- wps_annual_reports %>% select(year_date, total_authorized_police_members, sworn_total_actual) %>% mutate( auth_actual_diff = sworn_total_actual - total_authorized_police_members, pct_change_authorized = round((total_authorized_police_members/lag(total_authorized_police_members) - 1) * 100, 2), pct_change_actual = round((sworn_total_actual/lag(sworn_total_actual) - 1) * 100, 2) ) %>% select( year_date, total_authorized_police_members, pct_change_authorized, sworn_total_actual, pct_change_actual, auth_actual_diff, ) wps_authorized_actual_officers_count_tall <- wps_authorized_actual_officers %>% select( year_date, total_authorized_police_members, sworn_total_actual, ) %>% rename( year_date = year_date, authorized = total_authorized_police_members, actual = sworn_total_actual, ) %>% pivot_longer( -year_date ) %>% mutate ( name = factor(name, levels=c( "authorized", "actual" )), value = as.numeric(value) ) wps_authorized_officers_final_year_date = wps_authorized_actual_officers %>% arrange(desc(year_date)) %>% head(1) %>% select(total_authorized_police_members) %>% pull() wps_authorized_officers_min = min(wps_authorized_actual_officers$total_authorized_police_members) wps_authorized_officers_max = max(wps_authorized_actual_officers$total_authorized_police_members) wps_authorized_officers_min_max_diff = wps_authorized_officers_max - wps_authorized_officers_min wps_authorized_officers_max_final_diff = wps_authorized_officers_final_year_date - wps_authorized_officers_max wps_actual_officers_final_year_date = wps_authorized_actual_officers %>% arrange(desc(year_date)) %>% head(1) %>% select(sworn_total_actual) %>% pull() wps_actual_officers_min = min(wps_authorized_actual_officers$sworn_total_actual) wps_actual_officers_max = max(wps_authorized_actual_officers$sworn_total_actual) wps_actual_officers_min_max_diff = wps_actual_officers_max - wps_actual_officers_min wps_actual_officers_max_final_diff = wps_actual_officers_final_year_date - wps_actual_officers_max
0a324bc21fd67fbe1c82df4ea91a2cb1709f5469
54b4976030ae6a42e10282c8f41609ef266721c9
/man/ecd.asymp_stats.Rd
5580269977df279393a6b89f0754b99ad7f062ad
[]
no_license
cran/ecd
b1be437b407e20c34d65bcf7dbee467a9556b4c1
18f3650d6dff442ee46ed7fed108f35c4a4199b9
refs/heads/master
2022-05-18T20:24:56.375378
2022-05-09T20:10:02
2022-05-09T20:10:02
48,670,406
0
0
null
null
null
null
UTF-8
R
false
true
863
rd
ecd.asymp_stats.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ecd-asymp-stats-method.R \name{ecd.asymp_stats} \alias{ecd.asymp_stats} \alias{ecd.asymp_kurtosis} \title{Compute asymptotic statistics of an ecd object} \usage{ ecd.asymp_stats(object, q) ecd.asymp_kurtosis(object, q) } \arguments{ \item{object}{an object of ecd class with quantile} \item{q}{numeric vector of quantiles} } \value{ a list of stats list, or a vector of kurtosis } \description{ The main API for asymptotic statistics. It follows the same definition of moments, except the integral of PDF is limited to a range of quantile. That is to truncate the tails. The asymptotic kurtosis is also called truncated kurtosis. } \examples{ \dontrun{ d <- ecd(1,1, with.quantile=TRUE) q <- 0.01 ecd.asymp_stats(d,q) ecd.asymp_kurtosis(d,q) } } \keyword{statistics}
a0bc6acaa5dc214a9ef83a8335cab40d540265a5
807c010451d6298f80ac5fefb91574e89f198f82
/R/SOGL_final_new.R
4eaaf11db1419d2d9f21170ad18d826e4fb7b938
[]
no_license
cran/MAMA
ebe954d0a31189b61f7071b650256f10318c9887
97d3cea2f6ac7269f618cce47931eefecfa7bfae
refs/heads/master
2020-12-24T18:04:14.838773
2013-01-28T00:00:00
2013-01-28T00:00:00
17,680,395
2
0
null
null
null
null
UTF-8
R
false
false
8,715
r
SOGL_final_new.R
computeOrdering <- function(data, varname, test) { if (test == "FCH") { ord <- fold.change(data,varname) for (i in 1:ncol(ord)) { ord[,i]<- rownames(ord)[order(ord[,i], decreasing = TRUE)] } } if (test == "T"){ ord<-meta.test(data, varname)$test for (i in 1:ncol(ord)) { ord[,i]<- rownames(ord)[order(ord[,i], decreasing = TRUE)] } } if (test == "") {} rownames(ord) <- NULL return(ord) } flip <- function(order) { order<-order[nrow(order):1,] return(order) } commonGenes<-function(ord,n) { r<-apply(ord[,-1], 2,function(x) match(ord[,1],x)) r<-apply(r,1,max) or<-pmax(r[1:n],1:n) tmp <- table(or) x <- integer(n) x[as.integer(names(tmp))] <- tmp x <- cumsum(x) return(x[1:n]) } prelimScore<-function(ordering, alpha, min.weight = 1e-05, two.sided = TRUE) { n<- -log(min.weight)/alpha comm.dir <- commonGenes(ordering,n) if (two.sided) { comm.flip <- commonGenes(flip(ordering),n) cg <- comm.dir + comm.flip } else {cg <- comm.dir} w <- exp(-alpha*c(1:length(cg))) pS<- sum(w * cg) #pS<- weighted.mean(cg, w) return(pS) } ### preparePermutations<-function(id, B, sample.ratio = 0.8) { n1 <- floor(sum(id) * sample.ratio) n2 <- floor(sum(1 - id) * sample.ratio) per<- function ( v, m) { x <- sort(unique(v)) M <- matrix(NA, m, length(v)) #M[1, ] <- v #prvy riadok povodne - potrebujem?? for (i in 2:m) {M[i, ] <- sample(v)} return(M[-1,]) } yperm <- per(id, B + 1) ysubs <- matrix(nrow = B, ncol = (n1 + n2)) for (i in 1:B) { x <- sample(1:(sum(id)), n1) y <- sample((sum(id) + 1):(length(id)), n2) ysubs[i, ] <- c(x, y) } return(list(yperm = yperm, ysubs = ysubs)) } RandomScore<-function(data, varname, B, alpha, test, which=c("random", "empirical", "subsample"), two.sided = TRUE){ N=length(clinical(data)) n<-nrow(GEDM(data)[[1]]) random = NULL empirical.ci = NULL subsample = NULL classlab<-list() for (i in 1:N) classlab[[i]]<-as.numeric(clinical(data)[[i]][,varname])-1 classlab<-sapply(classlab, function(x) 1-x) p<-sapply(classlab, preparePermutations, B) if ("random" %in% which) { pp<-p[1,] Score<- function(j, data, varname, pp, test, two.sided) { prData<-prepareData(j, data, varname, pp, 1) ordering <- computeOrdering(prData, varname, test) comm <- commonGenes( ordering,n) if (two.sided) { comm2<- commonGenes( flip(ordering),n) cg <- comm + comm2 } else cg <- comm SC<-sapply(as.list(alpha), function(x,n,cg) {sum(exp(-x*c(1:n))*cg)}, n,cg) return(SC) } random<-sapply(1:B,Score, data, varname, pp, test, two.sided) } if ("empirical" %in% which) { pp<-p[1,] Empirical<-function(j, data, varname, p, test) { prData<-prepareData(j, data, varname, p, 1) ordering <- computeOrdering(prData, varname, test) cg<-commonGenes(ordering, n) cg2<-commonGenes(flip(ordering),n) res<-list(top=cg, bottom=cg2) return(res) } res<-sapply(1:B,Empirical, data, varname, pp, test) top<- t(matrix(unlist(res["top",]), nrow = B, byrow = TRUE)) bottom<- t(matrix(unlist(res["bottom", ]), nrow = B, byrow = TRUE)) top.ci<- t(apply(top, 1, quantile, probs = c(0.025, 0.5, 0.975))) bottom.ci<- t(apply(bottom, 1, quantile, probs = c(0.025, 0.5, 0.975))) empirical.ci <- list(top = top.ci, bottom = bottom.ci) } if ("subsample" %in% which) { pp<-p[2,] Subsample<- function(j, data, varname, pp, test, two.sided) { preData<-prepareData(j, data, varname, pp, 2) ordering <- computeOrdering(preData, varname, test) comm <- commonGenes( ordering,n) if (two.sided) { comm2<- commonGenes( flip(ordering),n) cg <- comm + comm2 } else cg <- comm subSC<-sapply(as.list(alpha), function(x,n,cg) {sum(exp(-x*c(1:n))*cg)}, n,cg) return(subSC) } subsample<-sapply(1:B,Subsample, data, varname, pp, test, two.sided) } return(res=list(random = random, empirical.ci = empirical.ci, subsample = subsample)) } prepareData<-function(j, data, varname , p, type) { if (type == 1) { cl<-clinical(data) for (i in 1:length(cl)) { cl[[i]][,varname]<-p[[i]][j,]+1 } perData<- new ("MetaArray", GEDM = GEDM(data), clinical = cl, datanames = datanames(data)) } if (type == 2) { dumExpr<-list() dumClin<-list() for (i in 1:length(GEDM(data))) { dumExpr[[i]]<-GEDM(data)[[i]][,p[[i]][j,]] dum<- as.data.frame(clinical(data)[[i]][p[[i]][j,],]) if (dim(dum)[2] == 1) {colnames(dum) <- varname rownames(dum) <- colnames(dumExpr[[i]]) } dumClin[[i]]<- dum } perData<- new ("MetaArray", GEDM = dumExpr, clinical = dumClin, datanames = datanames(data)) } return(perData) } ## # Vypocet alfa tak, ze pri exp vahach na hodnote <n> klesne vaha pod <min.weight> # Hodnoti sa maximalne 2500 genov computeAlpha <- function(n = NULL, min.weight = 1e-05, ngenes) { if (is.null(n)) { n <- c(100, 150, 200, 300, 400, 500, 750, 1000, 1500, 2000, 2500) alpha <- -log(min.weight)/n } else { alpha <- -log(min.weight)/n } select <- n <= ngenes alpha <- alpha[select] return(alpha) } # Vypocet pauc podla OrderedList # Vyber alfa podla pAUC selectAlpha <- function (alpha, subsample, random){ pAUC <- numeric(length(alpha)) B <- dim(subsample)[2] y <- c(rep(0, B), rep(1, B)) pauc <- function(x, A, B) { n <- length(A) o <- order(A, decreasing = TRUE) r <- c(0, cumsum(B[o[-n]])) t <- (0:(n - 1)) - r r <- r/sum(B) t <- t/sum(1 - B) roc <- numeric(length(x)) for (i in 1:length(x)) { z <- which(t <= x[i]) z <- z[length(z)] roc[i] <- r[z] } return(roc) } for (i in 1:length(alpha)) { X <- c(random[i,], subsample[i,]) pAUC[i] <- integrate(pauc, 0, 0.1, A = X, B = y, stop.on.error = FALSE)$value } x=list(alpha = alpha[which.max(pAUC)], pAUC=pAUC) return(x) } # Vyznamnost skore sigScore <- function (ranking, alpha, B, min.weight = 1e-05, two.sided = TRUE) { s <- prelimScore(ranking, alpha, min.weight, two.sided ) # random rankings and their score rs <- numeric(B) rrank <- ranking for (i in 1:B) { rrank<-apply(rrank, 2, function(x) sample(x)) rs[i]<- prelimScore(rrank, alpha, min.weight, two.sided) } # empirical probability return (sum(rs>=s)/B) } # Geny selectGenes<-function(ordering, alpha, percent, min.weight = 1e-05, two.sided = TRUE) { n<- -log(min.weight)/alpha comm.dir <- commonGenes(ordering,n) if (two.sided) { comm.flip <- commonGenes(flip(ordering),n) cg <- comm.dir + comm.flip} else cg<- comm.dir w <- exp(-alpha*c(1:length(cg))) pS <- w * cg y <- min(which(cumsum(pS) >= percent * sum(pS) - (1e-05))) y1 <- colIntersect(ordering[1:y,]) y2 <- colIntersect(flip(ordering)[1:y,]) genes<- sort(c(y1,y2)) return(genes) } colIntersect <- function(x) { N<-ncol(x) dum<-intersect(x[,1], x[,2]) if (N >= 3) { for (i in 3:N) { dum<-intersect(dum,x[,i]) } } return(dum) } # Wraper function performSOGL <- function(data, varname, test, B, which=c("score", "empirical"), min.weight = 1e-05, two.sided = TRUE, percent = 0.95){ cat("Processing data...") if (!all(sapply(1:(length(GEDM(data))-1), function(x) all(rownames(GEDM(data)[[x]])==rownames(GEDM(data)[[x+1]]))))) stop("The gene expression data matrices have not equal rownames") all.genes<-rownames(GEDM(data)[[1]]) ordering<- computeOrdering(data, varname, test) A<-computeAlpha(ngenes=nrow(GEDM(data)[[1]])) cat("Tuning alpha..") if ("empirical" %in% which) sampl <- c("random", "empirical", "subsample") else sampl <- c("random", "subsample") sampling<-RandomScore(data, varname, B, A, test, which=sampl) a<-selectAlpha(A, sampling$subsample, sampling$random) score<-prelimScore(ordering, a$alpha) n<- -log(min.weight)/a$alpha comm.dir <- commonGenes(ordering,n) if (two.sided) { comm.flip <- commonGenes(flip(ordering),n) cg <- list(top = comm.dir, bottom = comm.flip) } else cg <- list(top = comm.dir, bottom = NULL) cat("Significance and genes...") sig<-sigScore(ordering, a$alpha, B) genes<-selectGenes(ordering, a$alpha, percent) alph.pos<-match(a$alpha, A) res <- list(ordering = ordering, alpha.selected = a$alpha, alpha.considered = A, pAUC=a$pAUC, random = sampling$random[alph.pos,], subsample = sampling$subsample[alph.pos,], emp.ci = sampling$empirical.ci, common.genes = cg, score = score, significance = sig, genes = genes, all.genes=all.genes) class(res)<-"SOGLresult" return(res) }
6c665ffc065b2954e9a176464c1ba8fb27ae73a9
b5f1967395871f81b8c4555ef196a97ced9c2f68
/man/preprocess.Rd
9a4f61a20b505252589c75bf8be749a27218288e
[]
no_license
GeertsManon/rKOMICS
03cf12b3abc7bbf478df442cb28e19bd14f7ef65
a51234afd2df5646bc751297c384856004bff6f9
refs/heads/main
2023-08-05T09:28:15.819882
2021-09-12T17:41:34
2021-09-12T17:41:34
384,369,959
0
0
null
null
null
null
UTF-8
R
false
true
2,521
rd
preprocess.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/preprocess.R \name{preprocess} \alias{preprocess} \title{Filtering of minicircle sequences} \usage{ preprocess(files, groups, circ = TRUE, min = 500, max = 1500, writeDNA = TRUE) } \arguments{ \item{files}{a character vector containing the fasta file names in the format sampleA.minicircles.fasta, sampleB.minicircles.fasta,... (output of KOMICS).} \item{groups}{a factor specifying to which group (e.g. species) the samples belong to. It should have the same length as the list of files.} \item{circ}{a logical parameter. By default non-circularized minicicle sequences will be excluded. If interested in non-circularized sequences as well, set the parameter to FALSE.} \item{min}{a minimum value for the minicircle sequences length. Default value is set to 500.} \item{max}{a maximum value for the minicircle sequences length. Default value is set to 1500.} \item{writeDNA}{a logical parameter. By default filtered minicircle sequences will by written in fasta format to the current working directory. Set to FALSE if only interested in other output values like plots and summary.} } \value{ \item{samples}{the sample names (based on the input files).} \item{N_MC}{a table containing the sample name, which group it belongs to and the number of minicirce sequences (N_MC) before and after filtering.} \item{plot}{a barplot visualizing the number of minicircle sequences per sample before and after filtering.} \item{summary}{the total number of minicircle sequences before and after filtering.} } \description{ Assembling minicircle sequences with KOMICS generates individual fasta files (one per sample). The preprocess function allows you to filter the minicircle sequences based on sequence length (as the size of minicircular kDNA is species-specific and variable) and circularization success. The function will write filtered individual fasta files in the current working directory. } \examples{ require(ggplot2) data(exData) ### setwd("") ### run function table(exData$species) pre <- preprocess(files = system.file("extdata", exData$fastafiles, package="rKOMICS"), groups = exData$species, circ = TRUE, min = 500, max = 1200, writeDNA = FALSE) pre$summary ### visualize results barplot(pre$N_MC[,"beforefiltering"], names.arg = pre$N_MC[,1], las=2, cex.names=0.4) ### alter plot pre$plot + labs(caption = paste0('N of MC sequences before and after filtering, ', Sys.Date())) }
3c681c896606ce3a6d6948aea071c1dd5d24552b
ef5f8872e4724a4be1cab2c248423bb9bbc646f1
/inst/doc/basic-dutils.R
3897578a2b2b018a055c348f9950dff845dbea61
[]
no_license
cran/mets
90b853434d5ef8b8128bce8dbd83d64d1d9ab37a
da16c5483bd64fb9dba0be3d502b98f2017c08d9
refs/heads/master
2023-01-28T20:46:30.039893
2023-01-17T08:40:07
2023-01-17T08:40:07
17,697,439
0
1
null
null
null
null
UTF-8
R
false
false
7,669
r
basic-dutils.R
## ---- include = FALSE--------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) library(mets) ## ----------------------------------------------------------------------------- library(mets) data(melanoma) ## ----------------------------------------------------------------------------- is.data.frame(melanoma) ## ----------------------------------------------------------------------------- dmean(melanoma,~thick+I(log(thick))) ## ----------------------------------------------------------------------------- dmean(melanoma,~thick+I(log(thick))|I(days>500)) ## ----------------------------------------------------------------------------- dmean(melanoma,thick+I(log(thick))~sex|I(days>500)) ## ----------------------------------------------------------------------------- dmean(melanoma,thick+I(log(thick))~I(dcut(days))) ## ----------------------------------------------------------------------------- dmean(melanoma,"s*"+"*a*"~sex|I(days>500)) ## ----------------------------------------------------------------------------- melanoma=drename(melanoma,tykkelse~thick) names(melanoma) ## ----------------------------------------------------------------------------- data(melanoma) melanoma=drm(melanoma,~thick+sex) names(melanoma) ## ----------------------------------------------------------------------------- data(melanoma) melanoma=ddrop(melanoma,~thick+sex) names(melanoma) ## ----------------------------------------------------------------------------- data(melanoma) melanoma=dkeep(melanoma,~thick+sex+status+days) names(melanoma) ## ----------------------------------------------------------------------------- data(melanoma) ddrop(melanoma) <- ~thick+sex names(melanoma) ## ----------------------------------------------------------------------------- data(melanoma) names(melanoma) melanoma=dkeep(melanoma,~days+status+.) names(melanoma) ## ----------------------------------------------------------------------------- data(melanoma) dstr(melanoma) ## ----------------------------------------------------------------------------- dlist(melanoma) ## ----------------------------------------------------------------------------- dlist(melanoma, ~.|sex==1) ## ----------------------------------------------------------------------------- dlist(melanoma, ~ulc+days+thick+sex|sex==1) ## ----------------------------------------------------------------------------- dsummary(melanoma) ## ----------------------------------------------------------------------------- dsummary(melanoma,~thick+status+sex) ## ----------------------------------------------------------------------------- dsummary(melanoma,thick+days+status~sex) ## ----------------------------------------------------------------------------- dsummary(melanoma,thick+days+status~sex|thick<97) ## ----------------------------------------------------------------------------- dsummary(melanoma,thick+status~+1|sex==1) ## ----------------------------------------------------------------------------- dsummary(melanoma,~thick+status|sex==1) ## ----------------------------------------------------------------------------- dsummary(melanoma,thick+days+status~sex|I(thick<97 & sex==1)) ## ----------------------------------------------------------------------------- dtable(melanoma,~status+sex) ## ----------------------------------------------------------------------------- dtable(melanoma,~status+sex+ulc,level=2) ## ----------------------------------------------------------------------------- dtable(melanoma,~status+sex+ulc,level=1) ## ----------------------------------------------------------------------------- dtable(melanoma,~status+sex+ulc+dcut(days)+I(days>300),level=1) ## ----------------------------------------------------------------------------- data(melanoma) mel= dsort(melanoma,~days) dsort(melanoma) <- ~days head(mel) ## ----------------------------------------------------------------------------- dsort(melanoma) <- ~days-status head(melanoma) ## ----------------------------------------------------------------------------- data(melanoma) melanoma= transform(melanoma, thick2=thick^2, lthick=log(thick) ) dhead(melanoma) ## ----------------------------------------------------------------------------- melanoma=dtransform(melanoma,ll=thick*1.05^ulc,sex==1) melanoma=dtransform(melanoma,ll=thick,sex!=1) dmean(melanoma,ll~sex+ulc) ## ----------------------------------------------------------------------------- melanoma=dcut(melanoma,~thick,breaks=c(0,200,500,800,2000)) ## ----------------------------------------------------------------------------- dlevels(melanoma) ## ----------------------------------------------------------------------------- dtable(melanoma,~thickcat.0) ## ----------------------------------------------------------------------------- dcut(melanoma,breaks=c(0,200,500,800,2000)) <- gr.thick1~thick dlevels(melanoma) ## ----------------------------------------------------------------------------- dcut(melanoma) <- ~ thick # new variable is thickcat.4 dlevels(melanoma) ## ----------------------------------------------------------------------------- data(melanoma) dcut(melanoma,breaks=2) <- ~ thick # new variable is thick.2 dlevels(melanoma) ## ----------------------------------------------------------------------------- data(melanoma) mela= dcut(melanoma,thickcat4+dayscat4~thick+days,breaks=4) dlevels(mela) ## ----------------------------------------------------------------------------- data(melanoma) dcut(melanoma,breaks=4) <- thickcat4+dayscat4~thick+days dlevels(melanoma) ## ----------------------------------------------------------------------------- melanoma$gthick = cut(melanoma$thick,breaks=c(0,200,500,800,2000)) melanoma$gthick = cut(melanoma$thick,breaks=quantile(melanoma$thick),include.lowest=TRUE) ## ----------------------------------------------------------------------------- data(melanoma) dcut(melanoma,breaks=4) <- thickcat4~thick dlevels(melanoma) ## ----------------------------------------------------------------------------- dtable(melanoma,~thickcat4) melanoma = drelevel(melanoma,~thickcat4,ref="(194,356]") dlevels(melanoma) ## ----------------------------------------------------------------------------- melanoma = drelevel(melanoma,~thickcat4,ref=2) dlevels(melanoma) ## ----------------------------------------------------------------------------- melanoma = drelevel(melanoma,~thickcat4,newlevels=1:3) dlevels(melanoma) ## ----------------------------------------------------------------------------- dkeep(melanoma) <- ~thick+thickcat4 melanoma = drelevel(melanoma,gthick2~thickcat4,newlevels=list(1:2,3:4)) dlevels(melanoma) ## ----------------------------------------------------------------------------- dfactor(melanoma,levels=c(3,1,2,4)) <- thickcat4.2~thickcat4 dlevel(melanoma,~ "thickcat4*") dtable(melanoma,~thickcat4+thickcat4.2) ## ----------------------------------------------------------------------------- melanoma=drelevel(melanoma,gthick3~thickcat4,newlevels=list(group1.2=1:2,group3.4=3:4)) dlevels(melanoma) ## ----------------------------------------------------------------------------- data(melanoma) melanoma = dfactor(melanoma,~status, labels=c("malignant-melanoma","censoring","dead-other")) melanoma = dfactor(melanoma,sexl~sex,labels=c("females","males")) dtable(melanoma,~sexl+status.f) ## ----------------------------------------------------------------------------- melanoma = dnumeric(melanoma,~sexl) dstr(melanoma,"sex*") dtable(melanoma,~'sex*',level=2) ## ----------------------------------------------------------------------------- sessionInfo()
b5d9d5351f098b00dbae5bb6f6f307395dcff4cd
5a3bfe0e6d0ca6beef6940cc3986edf571481388
/Project 5 -- Grupo Bimbo Predicting Demand/Cozart/XGBoost Running Sum Feature.R
908199a1c2ad4b5981a474c719f2a23430bd3e1a
[]
no_license
SathishPaloju/Data_Science_Bootcamp_Projects
8c45c5480b34681cd8660a8bcb5b111d7903558a
cd3f63516d2303623ac7f70059ea2b0459dade9c
refs/heads/master
2021-12-10T20:38:08.407939
2016-09-18T17:49:19
2016-09-18T17:49:19
null
0
0
null
null
null
null
UTF-8
R
false
false
2,492
r
XGBoost Running Sum Feature.R
setwd("C:/Users/Hayes/Desktop/BDS 005/Projects/Project 5 test") library(hash) library(data.table) library(Metrics) library(foreach) library(dplyr) library(xgboost) library(doParallel) registerDoParallel(2) library(slackr) library(Matrix) slackr_setup(channel = "@hcozart", username = "i_bimbobot", icon_emoji = "", incoming_webhook_url = "datasqawd.slack.com", api_token = "xoxb-51493476675-ZkuheKfwDSdeEtTok0NRPcG6", echo = FALSE) train <- fread("train_with_mde_feature.csv") #make count column for cummulative sum train[, count := 1] #order by week train = train[order(Semana)] #create running sum of client product pairs train[, Cum.Sum := cumsum(count), by=list(Cliente_ID,Producto_ID)] #remove pred train <- select(train,c(-11,-12)) #xgboost model test <- train[Semana == 9, ] train <- train[Semana < 9, ] test[is.na(test)] <- 0 train[is.na(train)] <- 0 train.y <- train$Demanda_uni_equil test.y <- test$Demanda_uni_equil test$Demanda_uni_equil <- NULL memory.size() memory.limit(size = 20000) train.model <- sparse.model.matrix(Demanda_uni_equil ~ ., data = train) gc() dtrain <- xgb.DMatrix(data = train.model, label = train.y) watchlist <- list(train=dtrain) rm(train.model,train) gc() depths = c(21, 26, 27, 28, 29, 30) results = as.numeric(1:length(depths)) for (depth_cv in 1:length(depths)) { set.seed(1234) param <- list( objective = "reg:linear", booster = "gbtree", eval_metric = "rmse", eta = 0.2, max_depth = depths[depth_cv] ) clf <- xgb.train( params = param, data = dtrain, nrounds = 10, verbose = 1, watchlist = watchlist, maximize = FALSE ) test$Demanda_uni_equil <- -1 test.model <- sparse.model.matrix(Demanda_uni_equil ~ ., data = test) gc() preds <- predict(clf, test.model) test.y <- as.numeric(test.y) gc() preds[preds < 0] = 0.1 result = rmsle(test.y, preds) results[depth_cv] = result message = paste0("Hey Hayes ;), for depth ", depths[depth_cv], ", your rmsle was: ", result) slackr(message) to_csv = data.frame(nrounds = depths, results = results) write.csv(to_csv, "results_with_lag_feature_weightpieces_cumsum.csv", row.names = F) }
738be50fa5b1b28d40fd10de6463a3b3b205594e
defbf410c07e93cb4c40c32c7aae6efc91064fd0
/histogram.R
7e434e2a7402828b0f193c37985c84adbfabadca
[]
no_license
yuutingzzz/Visualizations
f78f528c2e774ffa7b85bae71d704f5a4b85069d
087531f132acaf95f0e05b2cfacf24ba1ab829b2
refs/heads/master
2022-11-09T01:17:53.841824
2020-06-24T14:26:02
2020-06-24T14:26:02
274,526,190
0
0
null
null
null
null
UTF-8
R
false
false
246
r
histogram.R
library(dslabs) data(heights) m_heights <- filter(heights,sex == "Male") head(m_heights) p <- m_heights %>% ggplot(aes(x=height)) p + geom_histogram(binwidth=1,fill="grey",col="black") + xlab("Male heights in inches") + ggtitle("Histogram")
d2fb07a0c77cf96e9c852bb6d525d758d34d1ec7
307bf2687171022411c18c7a563775639c031ab1
/R/formats.R
8105f10b6eee7346102d4580078491df0ec170c9
[]
no_license
jeremyrcoyle/skimr
bcf3d5809a27bfceffd487b7fce64d8e98f10fc6
30ba9086967cbc33df64c970d3b1639ff0542bcf
refs/heads/master
2021-05-04T22:33:23.139921
2018-02-02T21:39:50
2018-02-02T21:39:50
120,045,111
2
1
null
2018-02-03T00:15:19
2018-02-03T00:15:19
null
UTF-8
R
false
false
4,203
r
formats.R
#' @include skimr-package.R NULL #' Change the formatting options for printed skim objects #' #' Formats are dispatched according to the type of value returned by the #' "skimmer," i.e. summary function. One special formatting "type" exists for #' the names of the returned vector. The names are used to assign the levels for #' statistics that have more than one value. Counts and quantiles are common #' cases. #' #' When a vector is named, the name and the value are combined into a single #' formatted value. To deal with excessively long names for factor levels, #' only the first three characters of the name are returned by default. This #' can be changed by setting a new value for `max_char` within the #' `.levels` type. #' #' Skim uses [`format()`] to convert the numeric values returned by the summary #' functions into displayed values. The default options are a subset of options #' available in that function. #' #' @param ... Named arguments that contain named lists specifying formats to #' apply. #' @param append Whether the provided options should be in addition to the #' defaults already in `skim`. Default is `TRUE`. #' @return When setting formatting options, `invisible(NULL)`. When looking up #' values, a list of option-value pairs. #' @examples #' # Format numbers to have more digits #' skim_format(numeric = list(digits = 3)) #' #' # Show the values for the formats #' show_formats #' #' # Show 4-character names in factor levels #' skim_format(.levels = list(max_char = 4)) #' #' # Reset to the defaults #' skim_format_defaults() #' @export skim_format <- function(..., append = TRUE) { skim_options(list(...), env = "formats", append = append) } #' @describeIn skim_format Use the default formatting options within skim #' @export skim_format_defaults <- function() { assign("formats", .formats, envir = options) } #' @describeIn skim_format Show formatting options currently used, by data type. #' For each data type, options are returned as a list of option-value pairs. #' @param which A character vector. One or more of the classes whose formatting #' options you wish to display. #' @export show_formats <- function(which = NULL) { show_options(which, "formats") } .formats <- list( .levels = list(max_char = 3, max_levels = 4), .align_decimal = TRUE, numeric = list(digits = 2, nsmall = 2, drop0trailing = TRUE), integer = list(drop0trailing = TRUE), character = list(width = 8), date = list(format = "%Y-%m-%d"), posixct = list(format = "%Y-%m-%d"), logical = list(), asis = list(), difftime = list(), spark = NULL ) # Set the default formatting options options$formats <- .formats #' Internal functions for generating formatted versions of summary #' statistics. Generally speaking, formats are dispatched according to the #' value that is returned by the "skimmer," i.e. formatting function. #' #' The existence of levels makes this a little more complicated. We check for #' them by looking at a vector's length and create the formatted values using #' the vector's names. If the vector is only length one, we don't care whether #' or not it's named. #' #' @param x A vector of computed statistics to format. #' @return A length-one character vector that contains a formatted version of #' the statistic. This is the verion that should be ready for printing. #' @noRd get_formatted <- function(x) { formats <- get_formats(class(x)) if (length(x) > 1) { formatted <- purrr::map(x, get_formatted) trimmed <- substr(names(x), 1, options$formats$.levels$max_char) paste(trimmed, trimws(formatted), sep = ": ") } else if (is.null(formats)) { x } else { do.call(format, c(x = list(unname(x)), formats)) } } #' Get the formatting options of a particular type (Internal) #' #' @param type A length-one character vector #' @return A list of formatting options #' @noRd get_formats <- function(type) { low <- tolower(type) id <- purrr::detect_index(low, ~.x %in% names(options$formats)) if (id) { options$formats[[low[id]]] } else { warning("Skimr does not know how to format type: ", paste(type, collapse = ", "), ". Leaving as is.") list() } }
f144d183b27dacdd6731fede710b16baf4734900
684ab6df320ca26f9dfcd6471742645dd688fbc2
/R/functions.R
7973cce460415ecb06ef57b29dd557f4d6fe7c62
[]
no_license
Josh-Myers/MyersMisc
0861ca1c9f67e2683d2e69fa1ce85ccc736dbb55
92ad735817931c2c9a8015029c0fc60767a0e568
refs/heads/master
2020-04-08T14:47:39.955675
2018-11-29T02:49:53
2018-11-29T02:49:53
159,449,218
0
0
null
null
null
null
UTF-8
R
false
false
34,525
r
functions.R
# package functions #' MyCalPlot function #' my hack of validate.plot.default in rms for my 2 panel plots to match with val.prob #' @keywords Calibration curves #' @export #' @examples #' MyCalPlot() MyCalPlot <- function (p, y, logit, group, weights = rep(1, length(y)), normwt = FALSE, pl = TRUE, smooth = TRUE, logistic.cal = TRUE, xlab = "Predicted Probability", ylab = "Actual Probability", lim = c(0, 1), m, g, cuts, emax.lim = c(0, 1), legendloc = lim[1] + c(0.55 * diff(lim), 0.27 * diff(lim)), statloc = c(0, 0.99), riskdist = "calibrated", cex = 0.7, mkh = 0.02, connect.group = FALSE, connect.smooth = TRUE, g.group = 4, evaluate = 100, nmin = 0) { if (missing(p)) p <- plogis(logit) else logit <- qlogis(p) if (length(p) != length(y)) stop("lengths of p or logit and y do not agree") names(p) <- names(y) <- names(logit) <- NULL Spi <- function(p, y) { z <- sum((y - p) * (1 - 2 * p))/sqrt(sum((1 - 2 * p) * (1 - 2 * p) * p * (1 - p))) P <- 2 * pnorm(-abs(z)) c(Z = z, P = P) } if (!missing(group)) { if (length(group) == 1 && is.logical(group) && group) group <- rep("", length(y)) if (!is.factor(group)) group <- if (is.logical(group) || is.character(group)) as.factor(group) else cut2(group, g = g.group) names(group) <- NULL nma <- !(is.na(p + y + weights) | is.na(group)) ng <- length(levels(group)) } else { nma <- !is.na(p + y + weights) ng <- 0 } logit <- logit[nma] y <- y[nma] p <- p[nma] if (ng > 0) { group <- group[nma] weights <- weights[nma] return(val.probg(p, y, group, evaluate, weights, normwt, nmin)) } if (length(unique(p)) == 1) { P <- mean(y) Intc <- qlogis(P) n <- length(y) D <- -1/n L01 <- -2 * sum(y * logit - logb(1 + exp(logit)), na.rm = TRUE) L.cal <- -2 * sum(y * Intc - logb(1 + exp(Intc)), na.rm = TRUE) U.chisq <- L01 - L.cal U.p <- 1 - pchisq(U.chisq, 1) U <- (U.chisq - 1)/n Q <- D - U spi <- unname(Spi(p, y)) stats <- c(0, 0.5, 0, D, 0, 1, U, U.chisq, U.p, Q, mean((y - p[1])^2), Intc, 0, rep(abs(p[1] - P), 2), spi) names(stats) <- c("Dxy", "C (ROC)", "R2", "D", "D:Chi-sq", "D:p", "U", "U:Chi-sq", "U:p", "Q", "Brier", "Intercept", "Slope", "Emax", "Eavg", "S:z", "S:p") return(stats) } i <- !is.infinite(logit) nm <- sum(!i) if (nm > 0) warning(paste(nm, "observations deleted from logistic calibration due to probs. of 0 or 1")) f.fixed <- lrm.fit(logit[i], y[i], initial = c(0, 1), maxit = 1L) f.recal <- lrm.fit(logit[i], y[i]) stats <- f.fixed$stats n <- stats["Obs"] predprob <- seq(emax.lim[1], emax.lim[2], by = 5e-04) lt <- f.recal$coef[1] + f.recal$coef[2] * qlogis(predprob) calp <- plogis(lt) emax <- max(abs(predprob - calp)) Sm <- lowess(p, y, iter = 0) cal.smooth <- approx(Sm, xout = p, ties = mean)$y eavg <- mean(abs(p - cal.smooth)) if (pl) { plot(0.5, 0.5, xlim = lim, ylim = lim, type = "n", xlab = xlab, ylab = ylab) abline(0, 1, lty = 2) lt <- 2 leg <- "Ideal" marks <- -1 if (logistic.cal) { lt <- c(lt, 1) leg <- c(leg, "Logistic calibration") marks <- c(marks, -1) } if (smooth) { if (connect.smooth) { lines(Sm, lty = 1) lt <- c(lt, 3) marks <- c(marks, -1) } else { points(Sm) lt <- c(lt, 0) marks <- c(marks, 1) } leg <- c(leg, "Actual") } if (!missing(m) | !missing(g) | !missing(cuts)) { if (!missing(m)) q <- cut2(p, m = m, levels.mean = TRUE, digits = 7) else if (!missing(g)) q <- cut2(p, g = g, levels.mean = TRUE, digits = 7) else if (!missing(cuts)) q <- cut2(p, cuts = cuts, levels.mean = TRUE, digits = 7) means <- as.numeric(levels(q)) prop <- tapply(y, q, function(x) mean(x, na.rm = TRUE)) points(means, prop, pch = 2) if (connect.group) { lines(means, prop) lt <- c(lt, 1) } else lt <- c(lt, 0) leg <- c(leg, "Grouped observations") marks <- c(marks, 2) } } lr <- stats["Model L.R."] p.lr <- stats["P"] D <- (lr - 1)/n L01 <- -2 * sum(y * logit - logb(1 + exp(logit)), na.rm = TRUE) U.chisq <- L01 - f.recal$deviance[2] p.U <- 1 - pchisq(U.chisq, 2) U <- (U.chisq - 2)/n Q <- D - U Dxy <- stats["Dxy"] C <- stats["C"] R2 <- stats["R2"] B <- mean((p - y)^2) spi <- unname(Spi(p, y)) stats <- c(Dxy, C, R2, D, lr, p.lr, U, U.chisq, p.U, Q, B, f.recal$coef, emax, spi) names(stats) <- c("Dxy", "C (ROC)", "R2", "D", "D:Chi-sq", "D:p", "U", "U:Chi-sq", "U:p", "Q", "Brier", "Intercept", "Slope", "Emax", "S:z", "S:p") stats <- c(stats, c(Eavg = eavg)) if (pl) { logit <- seq(-7, 7, length = 200) prob <- plogis(logit) pred.prob <- f.recal$coef[1] + f.recal$coef[2] * logit pred.prob <- plogis(pred.prob) if (logistic.cal) lines(prob, pred.prob, lty = 1) lp <- legendloc if (!is.logical(lp)) { if (!is.list(lp)) lp <- list(x = lp[1], y = lp[2]) legend(lp, leg, lty = c(2,1), pch = marks, cex = cex, bty = "n") } if (!is.logical(statloc)) { dostats <- c(1, 2, 3, 4, 7, 10, 11, 12, 13, 14, 15, 16) leg <- format(names(stats)[dostats]) leg <- paste(leg, ":", format(stats[dostats]), sep = "") if (!is.list(statloc)) statloc <- list(x = statloc[1], y = statloc[2]) text(statloc, paste(format(names(stats[dostats])), collapse = "\n"), adj = c(0, 1), cex = cex) text(statloc$x + 0.225 * diff(lim), statloc$y, paste(format(round(stats[dostats], 3)), collapse = "\n"), adj = c(1, 1), cex = cex) } if (is.character(riskdist)) { if (riskdist == "calibrated") { x <- f.recal$coef[1] + f.recal$coef[2] * qlogis(p) x <- plogis(x) x[p == 0] <- 0 x[p == 1] <- 1 } else x <- p bins <- seq(lim[1], lim[2], length = 101) x <- x[x >= lim[1] & x <= lim[2]] f <- table(cut(x, bins)) j <- f > 0 bins <- (bins[-101])[j] f <- f[j] f <- lim[1] + 0.15 * diff(lim) * f/max(f) segments(bins, 0, bins, f) } } stats } #' MyValPlot function #' My hack val.prob for my plot - set line type and legend etc #' @keywords Validation plot #' @export #' @examples #' MyValPlot() MyValPlot <- function (x, xlab, ylab, xlim, ylim, legend = TRUE, subtitles = TRUE, scat1d.opts = NULL, ...) { at <- attributes(x) if (missing(ylab)) ylab <- if (at$model == "lr") "Actual Probability" else paste("Observed", at$yvar.name) if (missing(xlab)) { if (at$model == "lr") { xlab <- paste("Predicted Pr{", at$yvar.name, sep = "") if (at$non.slopes == 1) { xlab <- if (at$lev.name == "TRUE") paste(xlab, "}", sep = "") else paste(xlab, "=", at$lev.name, "}", sep = "") } else xlab <- paste(xlab, ">=", at$lev.name, "}", sep = "") } else xlab <- paste("Predicted", at$yvar.name) } p <- x[, "predy"] p.app <- x[, "calibrated.orig"] p.cal <- x[, "calibrated.corrected"] if (missing(xlim) & missing(ylim)) xlim <- ylim <- range(c(p, p.app, p.cal), na.rm = TRUE) else { if (missing(xlim)) xlim <- range(p) if (missing(ylim)) ylim <- range(c(p.app, p.cal, na.rm = TRUE)) } plot(p, p.app, xlim = xlim, ylim = ylim, xlab = xlab, ylab = ylab, type = "n", ...) predicted <- at$predicted err <- NULL if (length(predicted)) { s <- !is.na(p + p.cal) err <- predicted - approx(p[s], p.cal[s], xout = predicted, ties = mean)$y cat("\nn=", n <- length(err), " Mean absolute error=", round(mae <- mean(abs(err), na.rm = TRUE), 3), " Mean squared error=", round(mean(err^2, na.rm = TRUE), 5), "\n0.9 Quantile of absolute error=", round(quantile(abs(err), 0.9, na.rm = TRUE), 3), "\n\n", sep = "") if (subtitles) title(sub = paste("Mean absolute error=", round(mae, 3), " n=", n, sep = ""), cex = 0.65, adj = 1) do.call("scat1d", c(list(x = predicted), scat1d.opts)) } lines(p, p.app, lty = 3) lines(p, p.cal, lty = 1) abline(a = 0, b = 1, lty = 2) if (subtitles) title(sub = paste("B=", at$B, "repetitions,", at$method), adj = 0) if (!(is.logical(legend) && !legend)) { if (is.logical(legend)) legend <- list(x = xlim[1] + 0.55 * diff(xlim), y = ylim[1] + 0.32 * diff(ylim)) legend(legend, c("Ideal", "Apparent", "Bias-corrected"), lty = c(2, 3, 1), bty = "n") } invisible(err) } #' my val.prb ci.2 function #' My hack of steyerberg's cal.plot function - which is an adapataion of the val.prob function from rms #' The main thing was to get all hist bins poining up to match plot(calibrate) - I had 2 panel cal plot - needed to match the bottom histograms #' couldn't do that with val.prob because no way to control space under hist plot - could do that with this function #' changed degree to 1 to match val.prob which is what I really wanted to use in the first place #' how to install the package #' library(githubinstall) #' githubinstall("CalibrationCurves") #' library(CalibrationCurves) #' @keywords Calibration curves #' @export #' @examples #' My.val.prob.ci.2() My.val.prob.ci.2 <- function (p, y, logit, group, weights = rep(1, length(y)), normwt = F, pl = T, smooth = c("loess", "rcs", F), CL.smooth = "fill", CL.BT = F, lty.smooth = 1, col.smooth = "black", lwd.smooth = 1, nr.knots = 5, logistic.cal = F, lty.log = 1, col.log = "black", lwd.log = 1, xlab = "Predicted Probability", ylab = "Observed proportion", xlim = c(-0.02, 1), ylim = c(-0.15, 1), m, g, cuts, emax.lim = c(0, 1), legendloc = c(0.5, 0.27), statloc = c(0, 0.85), dostats = T, cl.level = 0.95, method.ci = "pepe", roundstats = 2, riskdist = "predicted", cex = 0.75, cex.leg = 0.75, connect.group = F, connect.smooth = T, g.group = 4, evaluate = 100, nmin = 0, d0lab = "0", d1lab = "1", cex.d01 = 0.7, dist.label = 0.04, line.bins = -0.05, dist.label2 = 0.03, cutoff, las = 1, length.seg = 1, y.intersp = 1, lty.ideal = 1, col.ideal = "grey", lwd.ideal = 1, ...) { if (smooth[1] == F) { smooth <- "F" } smooth <- match.arg(smooth) if (!missing(p)) if (any(!(p >= 0 | p <= 1))) { stop("Probabilities can not be > 1 or < 0.") } if (missing(p)) p <- 1/(1 + exp(-logit)) else logit <- log(p/(1 - p)) if (!all(c(0, 1) %in% y)) { stop("The vector with the binary outcome can only contain the values 0 and 1.") } if (length(p) != length(y)) stop("lengths of p or logit and y do not agree") names(p) <- names(y) <- names(logit) <- NULL if (!missing(group)) { if (length(group) == 1 && is.logical(group) && group) group <- rep("", length(y)) if (!is.factor(group)) group <- if (is.logical(group) || is.character(group)) as.factor(group) else cut2(group, g = g.group) names(group) <- NULL nma <- !(is.na(p + y + weights) | is.na(group)) ng <- length(levels(group)) } else { nma <- !is.na(p + y + weights) ng <- 0 } logit <- logit[nma] y <- y[nma] p <- p[nma] if (ng > 0) { group <- group[nma] weights <- weights[nma] return(val.probg(p, y, group, evaluate, weights, normwt, nmin)) } y <- y[order(p)] logit <- logit[order(p)] p <- p[order(p)] if (length(p) > 5000 & smooth == "loess") { warning("Number of observations > 5000, RCS is recommended.", immediate. = T) } if (length(p) > 1000 & CL.BT == T) { warning("Number of observations is > 1000, this could take a while...", immediate. = T) } if (length(unique(p)) == 1) { P <- mean(y) Intc <- log(P/(1 - P)) n <- length(y) D <- -1/n L01 <- -2 * sum(y * logit - log(1 + exp(logit)), na.rm = T) L.cal <- -2 * sum(y * Intc - log(1 + exp(Intc)), na.rm = T) U.chisq <- L01 - L.cal U.p <- 1 - pchisq(U.chisq, 1) U <- (U.chisq - 1)/n Q <- D - U stats <- c(0, 0.5, 0, D, 0, 1, U, U.chisq, U.p, Q, mean((y - p[1])^2), Intc, 0, rep(abs(p[1] - P), 2)) names(stats) <- c("Dxy", "C (ROC)", "R2", "D", "D:Chi-sq", "D:p", "U", "U:Chi-sq", "U:p", "Q", "Brier", "Intercept", "Slope", "Emax", "Eavg", "ECI") return(stats) } i <- !is.infinite(logit) nm <- sum(!i) if (nm > 0) warning(paste(nm, "observations deleted from logistic calibration due to probs. of 0 or 1")) i.2 <- i f.or <- lrm(y[i] ~ logit[i]) f <- lrm.fit(logit[i], y[i]) cl.slope <- confint(f, level = cl.level)[2, ] f2 <- lrm.fit(offset = logit[i], y = y[i]) cl.interc <- confint(f2, level = cl.level) stats <- f$stats cl.auc <- ci.auc(y, p, cl.level, method.ci) n <- stats["Obs"] predprob <- seq(emax.lim[1], emax.lim[2], by = 5e-04) lt <- f$coef[1] + f$coef[2] * log(predprob/(1 - predprob)) calp <- 1/(1 + exp(-lt)) emax <- max(abs(predprob - calp)) if (pl) { plot(0.5, 0.5, xlim = xlim, ylim = ylim, type = "n", xlab = xlab, ylab = ylab, las = las, ...) clip(0, 1, 0, 1) abline(0, 1, lty = lty.ideal, col = col.ideal, lwd = lwd.ideal) do.call("clip", as.list(par()$usr)) lt <- lty.ideal lw.d <- lwd.ideal all.col <- col.ideal leg <- "Ideal" marks <- -1 if (logistic.cal) { lt <- c(lt, lty.log) lw.d <- c(lw.d, lwd.log) all.col <- c(all.col, col.log) leg <- c(leg, "Logistic calibration") marks <- c(marks, -1) } if (smooth != "F") { all.col <- c(all.col, col.smooth) } if (smooth == "loess") { Sm <- loess(y ~ p, degree = 1) Sm <- data.frame(Sm$x, Sm$fitted) Sm.01 <- Sm if (connect.smooth == T & CL.smooth != "fill") { clip(0, 1, 0, 1) lines(Sm, lty = lty.smooth, lwd = lwd.smooth, col = col.smooth) do.call("clip", as.list(par()$usr)) lt <- c(lt, lty.smooth) lw.d <- c(lw.d, lwd.smooth) marks <- c(marks, -1) } else if (connect.smooth == F & CL.smooth != "fill") { clip(0, 1, 0, 1) points(Sm, col = col.smooth) do.call("clip", as.list(par()$usr)) lt <- c(lt, 0) lw.d <- c(lw.d, 1) marks <- c(marks, 1) } if (CL.smooth == T | CL.smooth == "fill") { to.pred <- seq(min(p), max(p), length = 200) if (CL.BT == T) { cat("Bootstrap samples are being generated.\n\n\n") res.BT <- replicate(2000, BT.samples(y, p, to.pred)) CL.BT <- apply(res.BT, 1, quantile, c(0.025, 0.975)) colnames(CL.BT) <- to.pred if (CL.smooth == "fill") { clip(0, 1, 0, 1) polygon(x = c(to.pred, rev(to.pred)), y = c(CL.BT[2, ], rev(CL.BT[1, ])), col = rgb(177, 177, 177, 177, maxColorValue = 255), border = NA) if (connect.smooth == T) { lines(Sm, lty = lty.smooth, lwd = lwd.smooth, col = col.smooth) lt <- c(lt, lty.smooth) lw.d <- c(lw.d, lwd.smooth) marks <- c(marks, -1) } else if (connect.smooth == F) { points(Sm, col = col.smooth) lt <- c(lt, 0) lw.d <- c(lw.d, 1) marks <- c(marks, 1) } do.call("clip", as.list(par()$usr)) leg <- c(leg, "Actual") } else { clip(0, 1, 0, 1) lines(to.pred, CL.BT[1, ], lty = 2, lwd = 1, col = col.smooth) clip(0, 1, 0, 1) lines(to.pred, CL.BT[2, ], lty = 2, lwd = 1, col = col.smooth) do.call("clip", as.list(par()$usr)) leg <- c(leg, "Actual", "CL flexible") lt <- c(lt, 2) lw.d <- c(lw.d, 1) all.col <- c(all.col, col.smooth) marks <- c(marks, -1) } } else { Sm.0 <- loess(y ~ p, degree = 2) cl.loess <- predict(Sm.0, type = "fitted", se = T) clip(0, 1, 0, 1) if (CL.smooth == "fill") { polygon(x = c(Sm.0$x, rev(Sm.0$x)), y = c(cl.loess$fit + cl.loess$se.fit * 1.96, rev(cl.loess$fit - cl.loess$se.fit * 1.96)), col = rgb(177, 177, 177, 177, maxColorValue = 255), border = NA) if (connect.smooth == T) { lines(Sm, lty = lty.smooth, lwd = lwd.smooth, col = col.smooth) lt <- c(lt, lty.smooth) lw.d <- c(lw.d, lwd.smooth) marks <- c(marks, -1) } else if (connect.smooth == F) { points(Sm, col = col.smooth) lt <- c(lt, 0) lw.d <- c(lw.d, 1) marks <- c(marks, 1) } do.call("clip", as.list(par()$usr)) leg <- c(leg, "Actual") } else { lines(Sm.0$x, cl.loess$fit + cl.loess$se.fit * 1.96, lty = 2, lwd = 1, col = col.smooth) lines(Sm.0$x, cl.loess$fit - cl.loess$se.fit * 1.96, lty = 2, lwd = 1, col = col.smooth) do.call("clip", as.list(par()$usr)) leg <- c(leg, "Actual", "CL flexible") lt <- c(lt, 2) lw.d <- c(lw.d, 1) all.col <- c(all.col, col.smooth) marks <- c(marks, -1) } } } else { leg <- c(leg, "Actual") } cal.smooth <- approx(Sm.01, xout = p)$y eavg <- mean(abs(p - cal.smooth)) ECI <- mean((p - cal.smooth)^2) * 100 } if (smooth == "rcs") { par(lwd = lwd.smooth, bty = "n", col = col.smooth) if (!is.numeric(nr.knots)) { stop("Nr.knots must be numeric.") } if (nr.knots == 5) { tryCatch(rcspline.plot(p, y, model = "logistic", nk = 5, show = "prob", statloc = "none", add = T, showknots = F, xrange = c(min(na.omit(p)), max(na.omit(p))), lty = lty.smooth), error = function(e) { warning("The number of knots led to estimation problems, nk will be set to 4.", immediate. = T) tryCatch(rcspline.plot(p, y, model = "logistic", nk = 4, show = "prob", statloc = "none", add = T, showknots = F, xrange = c(min(na.omit(p)), max(na.omit(p))), lty = lty.smooth), error = function(e) { warning("Nk 4 also led to estimation problems, nk will be set to 3.", immediate. = T) rcspline.plot(p, y, model = "logistic", nk = 3, show = "prob", statloc = "none", add = T, showknots = F, xrange = c(min(na.omit(p)), max(na.omit(p))), lty = lty.smooth) }) }) } else if (nr.knots == 4) { tryCatch(rcspline.plot(p, y, model = "logistic", nk = 4, show = "prob", statloc = "none", add = T, showknots = F, xrange = c(min(na.omit(p)), max(na.omit(p))), lty = lty.smooth), error = function(e) { warning("The number of knots led to estimation problems, nk will be set to 3.", immediate. = T) rcspline.plot(p, y, model = "logistic", nk = 3, show = "prob", statloc = "none", add = T, showknots = F, xrange = c(min(na.omit(p)), max(na.omit(p))), lty = lty.smooth) }) } else if (nr.knots == 3) { tryCatch(rcspline.plot(p, y, model = "logistic", nk = 3, show = "prob", statloc = "none", add = T, showknots = F, xrange = c(min(na.omit(p)), max(na.omit(p))), lty = lty.smooth), error = function(e) { stop("Nk = 3 led to estimation problems.") }) } else { stop(paste("Number of knots = ", nr.knots, sep = "", ", only 5 >= nk >=3 is allowed.")) } par(lwd = 1, bty = "o", col = "black") leg <- c(leg, "Flexible calibration (RCS)", "CL flexible") lt <- c(lt, lty.smooth, 2) lw.d <- c(lw.d, rep(lwd.smooth, 2)) all.col <- c(all.col, col.smooth) marks <- c(marks, -1, -1) } if (!missing(m) | !missing(g) | !missing(cuts)) { if (!missing(m)) q <- cut2(p, m = m, levels.mean = T, digits = 7) else if (!missing(g)) q <- cut2(p, g = g, levels.mean = T, digits = 7) else if (!missing(cuts)) q <- cut2(p, cuts = cuts, levels.mean = T, digits = 7) means <- as.single(levels(q)) prop <- tapply(y, q, function(x) mean(x, na.rm = T)) points(means, prop, pch = 2, cex = 1) ng <- tapply(y, q, length) og <- tapply(y, q, sum) ob <- og/ng se.ob <- sqrt(ob * (1 - ob)/ng) g <- length(as.single(levels(q))) for (i in 1:g) lines(c(means[i], means[i]), c(prop[i], min(1, prop[i] + 1.96 * se.ob[i])), type = "l") for (i in 1:g) lines(c(means[i], means[i]), c(prop[i], max(0, prop[i] - 1.96 * se.ob[i])), type = "l") if (connect.group) { lines(means, prop) lt <- c(lt, 1) lw.d <- c(lw.d, 1) } else lt <- c(lt, 0) lw.d <- c(lw.d, 0) leg <- c(leg, "Grouped observations") marks <- c(marks, 2) } } lr <- stats["Model L.R."] p.lr <- stats["P"] D <- (lr - 1)/n L01 <- -2 * sum(y * logit - logb(1 + exp(logit)), na.rm = TRUE) U.chisq <- L01 - f$deviance[2] p.U <- 1 - pchisq(U.chisq, 2) U <- (U.chisq - 2)/n Q <- D - U Dxy <- stats["Dxy"] C <- stats["C"] R2 <- stats["R2"] B <- sum((p - y)^2)/n Bmax <- mean(y) * (1 - mean(y))^2 + (1 - mean(y)) * mean(y)^2 Bscaled <- 1 - B/Bmax stats <- c(Dxy, C, R2, D, lr, p.lr, U, U.chisq, p.U, Q, B, f2$coef[1], f$coef[2], emax, Bscaled) names(stats) <- c("Dxy", "C (ROC)", "R2", "D", "D:Chi-sq", "D:p", "U", "U:Chi-sq", "U:p", "Q", "Brier", "Intercept", "Slope", "Emax", "Brier scaled") if (smooth == "loess") stats <- c(stats, c(Eavg = eavg), c(ECI = ECI)) if (!missing(cutoff)) { arrows(x0 = cutoff, y0 = 0.1, x1 = cutoff, y1 = -0.025, length = 0.15) } if (pl) { if (min(p) > plogis(-7) | max(p) < plogis(7)) { lrm.fit.1 <- lrm(y[i.2] ~ qlogis(p[i.2])) if (logistic.cal) lines(p[i.2], plogis(lrm.fit.1$linear.predictors), lwd = lwd.log, lty = lty.log, col = col.log) } else { logit <- seq(-7, 7, length = 200) prob <- 1/(1 + exp(-logit)) pred.prob <- f$coef[1] + f$coef[2] * logit pred.prob <- 1/(1 + exp(-pred.prob)) if (logistic.cal) lines(prob, pred.prob, lty = lty.log, lwd = lwd.log, col = col.log) } lp <- legendloc if (!is.logical(lp)) { if (!is.list(lp)) lp <- list(x = lp[1], y = lp[2]) legend(lp, leg, lty = lt, pch = marks, cex = cex.leg, bty = "n", lwd = lw.d, col = all.col, y.intersp = y.intersp) } if (!is.logical(statloc)) { if (dostats[1] == T) { stats.2 <- paste("Calibration\n", "...intercept: ", sprintf(paste("%.", roundstats, "f", sep = ""), stats["Intercept"]), " (", sprintf(paste("%.", roundstats, "f", sep = ""), cl.interc[1]), " to ", sprintf(paste("%.", roundstats, "f", sep = ""), cl.interc[2]), ")", "\n", "...slope: ", sprintf(paste("%.", roundstats, "f", sep = ""), stats["Slope"]), " (", sprintf(paste("%.", roundstats, "f", sep = ""), cl.slope[1]), " to ", sprintf(paste("%.", roundstats, "f", sep = ""), cl.slope[2]), ")", "\n", "Discrimination\n", "...c-statistic: ", sprintf(paste("%.", roundstats, "f", sep = ""), stats["C (ROC)"]), " (", sprintf(paste("%.", roundstats, "f", sep = ""), cl.auc[2]), " to ", sprintf(paste("%.", roundstats, "f", sep = ""), cl.auc[3]), ")", sep = "") text(statloc[1], statloc[2], stats.2, pos = 4, cex = cex) } else { dostats <- dostats leg <- format(names(stats)[dostats]) leg <- paste(leg, ":", format(stats[dostats], digits = roundstats), sep = "") if (!is.list(statloc)) statloc <- list(x = statloc[1], y = statloc[2]) text(statloc, paste(format(names(stats[dostats])), collapse = "\n"), adj = 0, cex = cex) text(statloc$x + (xlim[2] - xlim[1])/3, statloc$y, paste(format(round(stats[dostats], digits = roundstats)), collapse = "\n"), adj = 1, cex = cex) } } if (is.character(riskdist)) { if (riskdist == "calibrated") { x <- f$coef[1] + f$coef[2] * log(p/(1 - p)) x <- 1/(1 + exp(-x)) x[p == 0] <- 0 x[p == 1] <- 1 } else x <- p bins <- seq(0, min(1, max(xlim)), length = 101) x <- x[x >= 0 & x <= 1] f0 <- table(cut(x[y == 0], bins)) f1 <- table(cut(x[y == 1], bins)) j0 <- f0 > 0 j1 <- f1 > 0 bins0 <- (bins[-101])[j0] bins1 <- (bins[-101])[j1] f0 <- f0[j0] f1 <- f1[j1] maxf <- max(f0, f1) f0 <- (0.1 * f0)/maxf f1 <- (0.1 * f1)/maxf segments(bins1, line.bins, bins1, length.seg * f1 + line.bins) segments(bins0, line.bins, bins0, length.seg * f0 + line.bins) lines(c(min(bins0, bins1) - 0.01, max(bins0, bins1) + 0.01), c(line.bins, line.bins)) text(max(bins0, bins1) + dist.label, line.bins + dist.label2, d1lab, cex = cex.d01) text(max(bins0, bins1) + dist.label, line.bins - dist.label2, d0lab, cex = cex.d01) } } if (dostats == T) { cat(paste("\n\n A ", cl.level * 100, "% confidence interval is given for the calibration intercept, calibration slope and c-statistic. \n\n", sep = "")) } stats } #' My plot.xmean.ordinaly from rms #' removed the xlab and ylab defaults from plot.xmean.ordinaly - so I could set my own #' @keywords Ordinal plots #' @export #' @examples #' My.plot.xmean.ordinaly() My.plot.xmean.ordinaly = function (x, data, subset, na.action, subn = TRUE, cr = FALSE, topcats = 1, cex.points = 0.75, ...) { X <- match.call(expand.dots = FALSE) X$subn <- X$cr <- X$topcats <- X$cex.points <- X$... <- NULL if (missing(na.action)) X$na.action <- na.keep Terms <- if (missing(data)) terms(x) else terms(x, data = data) X$formula <- Terms X[[1]] <- as.name("model.frame") X <- eval.parent(X) resp <- attr(Terms, "response") if (resp == 0) stop("must have a response variable") nx <- ncol(X) - 1 Y <- X[[resp]] nam <- as.character(attr(Terms, "variables")) nam <- nam[-1] dopl <- function(x, y, cr, xname, yname) { s <- !is.na(unclass(Y) + x) y <- y[s] x <- x[s] n <- length(x) f <- lrm.fit(x, y) fy <- f$freq/n ns <- length(fy) - 1 k <- ns + 1 intcept <- f$coef[1:ns] xb <- f$linear.predictors - intcept[1] xb <- sapply(intcept, "+", xb) P <- 1/(1 + exp(-xb)) P <- matrix(P, ncol = ns) P <- cbind(1, P) - cbind(P, 0) xmean.y <- tapply(x, y, mean) xp <- x * P/n xmean.y.po <- apply(xp, 2, sum)/fy yy <- 1:length(fy) rr <- c(xmean.y, xmean.y.po) if (cr) { u <- cr.setup(y) s <- u$subs yc <- u$y xc <- x[s] cohort <- u$cohort xcohort <- matrix(0, nrow = length(xc), ncol = length(levels(cohort)) - 1) xcohort[col(xcohort) == unclass(cohort) - 1] <- 1 cof <- lrm.fit(cbind(xcohort, xc), yc)$coefficients cumprob <- rep(1, n) for (j in 1:k) { P[, j] <- cumprob * (if (j == k) 1 else plogis(cof[1] + (if (j > 1) cof[j] else 0) + cof[k] * x)) cumprob <- cumprob - P[, j] } xp <- x * P/n xmean.y.cr <- apply(xp, 2, sum)/fy rr <- c(rr, xmean.y.cr) } plot(yy, xmean.y, type = "b", ylim = range(rr), axes = FALSE, ...) mgp.axis(1, at = yy, labels = names(fy)) mgp.axis(2) lines(yy, xmean.y.po, lty = 2, ...) if (cr) points(yy, xmean.y.cr, pch = "C", cex = cex.points) if (subn) title(sub = paste("n=", n, sep = ""), adj = 0) } for (i in 1:nx) { x <- X[[resp + i]] if (is.factor(x)) { f <- table(x) ncat <- length(f) if (ncat < 2) { warning(paste("predictor", nam[resp + i], "only has one level and is ignored")) next } nc <- min(ncat - 1, topcats) cats <- (names(f)[order(-f)])[1:nc] for (wcat in cats) { xx <- 1 * (x == wcat) xname <- paste(nam[resp + i], wcat, sep = "=") dopl(xx, Y, cr, xname, nam[resp]) } } else dopl(x, Y, cr, nam[resp + i], nam[resp]) } invisible() }
bffa4429138d0bbda5699474fbe3be0fcbf4f11f
9f1c221750fcd80b1a534752098ff5617115c395
/tests/testthat/test_readTweetData.R
6f210193aa38675034d02eaf3ab6054aeb93628c
[]
no_license
AlfonsoRReyes/CreatingPackages
857118d5e74700ab316ffa4a29b380eeeb9b455f
f9723df4f121be53bf97923fe1328edcb72c066a
refs/heads/master
2021-01-11T20:36:02.710934
2017-04-18T19:14:01
2017-04-18T19:14:01
79,151,941
0
0
null
null
null
null
UTF-8
R
false
false
704
r
test_readTweetData.R
context("readTweetData tests") test_that("readTweetData properties data.frame", { # compare dimensions, names, class, size DF in bytes, size attributes in bytes baseLoc <- system.file(package="ismbTweetAnalysis") extPath <- file.path(baseLoc, "extdata") ismb <- readTweetData(file.path(extPath, "ismb.txt"), "ismb") ismb2014 <- readTweetData(file.path(extPath, "ismb2014.txt"), "ismb2014") expect_equal(dim(ismb2014), c(243, 5)) expect_equal(names(ismb2014), c("text", "created", "id", "screenName", "hashSearch")) expect_equal(class(ismb2014), "data.frame") expect_equal(as.double(object.size(ismb2014)), 55560) expect_equal(as.double(object.size(attributes(ismb2014))), 1872) })
5c1cb8532de2d5f1c2dff6d62a90024e0b5c3a84
22b31c323642436b77685c2b54fb423fd655e547
/sequence.R
985f859fcbbd2beb055fb9cbc0503ee060aca00b
[]
no_license
SamikshaAg/R-Programming
e35a3b9bb5490fcf11a05be30c2eafca03e6c35c
9508588fef7290b623901460c46027806ffc9ee5
refs/heads/master
2020-01-23T21:44:27.017232
2016-12-04T14:21:36
2016-12-04T14:21:36
74,695,073
0
0
null
null
null
null
UTF-8
R
false
false
681
r
sequence.R
#sequence from 1 to 20 my_seq1<-1:20 #sequence from 1 to 20 using seq() function my_seq2<-seq(1,20) #sequence from 1 to 20 incremented by 0.5 my_seq3<-seq(1,20,by=0.5) #sequence of 30 numbers between 5 and 10 my_seq4<-seq(5,10,length=30) #sequence of numbers with length as the length of along.with argument my_seq5<-seq(along.with=my_seq4) #alternative function to the above type my_seq6<-seq_along(my_seq4) #replicate function my_seq7<-rep(0,times=10) #replicating each element of the given vector 10 times my_seq8<-rep(c(0,1,2),each=10) #printing all the sequence variables my_seq1 my_seq2 my_seq3 my_seq4 my_seq5 my_seq6 my_seq7 my_seq8
a6b6040ac82bc35d6ea000d2c037bb87246bb801
ca0b298e9cb0407238a28418d9419f284eb4011a
/R/func_demo.R
7ba830973de59d65ae56f71e90a1068e82dde550
[ "MIT" ]
permissive
Angelovici-Lab/BioTools
be28a84acd7fde6b3516886ece495a623a230148
c6d6e9278f19331ed991658e0673ba37e538e782
refs/heads/master
2023-06-15T06:08:50.120854
2021-07-16T22:23:49
2021-07-16T22:23:49
283,844,000
0
0
null
null
null
null
UTF-8
R
false
false
132
r
func_demo.R
#' Demo #' #' @description This is a demo function. #' @keywords Demo #' @export #' demo <- function(){ return("Demo") }
420fe8ca2ad859f6342dfbd8add8dfa45caf82e2
883a4a0c1eae84485e1d38e1635fcae6ecca1772
/nCompiler/R/NF.R
feb7f6d471ddee51debff9241d6fe93f11c870fe
[ "BSD-3-Clause" ]
permissive
nimble-dev/nCompiler
6d3a64d55d1ee3df07775e156064bb9b3d2e7df2
392aabaf28806827c7aa7b0b47f535456878bd69
refs/heads/master
2022-10-28T13:58:45.873095
2022-10-05T20:14:58
2022-10-05T20:14:58
174,240,931
56
7
BSD-3-Clause
2022-05-07T00:25:21
2019-03-07T00:15:35
R
UTF-8
R
false
false
5,630
r
NF.R
nFunctionLabelMaker <- labelFunctionCreator('nFun') ## nFunction represents a pure function, not a class, although it ## may sometimes be implemented as a class. ## In particular, when derivatives are enabled, a nFunction wil be ## implemented as a class, with the CppAD tape as member data. ## Argument types and/or passing semantics can be provided in the argument ## fields and function body or indicated in separate arguments. ## This almost allows a pure R function to be simply modified ("decorated") ## to be a nFunction, with the exception that explicit return() statements ## are required. ## nClass represents a class with R fields/methods and C fields/methods, ## all explicitly defined. ## A method in a nClass is a nFunction. ## The old.nCompiler approach of smartly determining a class definition from ## evaluated setup code is a special case that will continue to be supported. ## It will create a nClass. ## nFunctionClass inherits from function. ## Hence an object can be used as a function but also has slots (accessed with @) ## for internal content. nFunctionClass <- setClass( Class = "nFunction", contains = "function", slots = list( internals = "ANY" ## An NF_InternalsClass. , originalCode = "ANY" ) ) #' Create a nFunction. #' #' Create a nFunction, which can be compiled via C++ using \link{nCompile_nFunction} or \code{nCompile} (TBD) #' #' @param fun R function to be turned into a nFunction #' @param name An internal name for the nFunction. If \code{NA}, an internal name will be generated. #' @param argTypes List of argument types declarations. An alternative is to provide argument types as part of default values in \code{fun}. See details below. #' @param refArgs Character vector of names of arguments that should be passed by reference instead of by value. An alternative is to indicate pass-by-reference as part of a type declaration in \code{refArgs} or in default value(s) of \code{fun}. See details below. #' @param returnType A type declaration for the type returned by \code{fun}. An alternative is to provide this information with a \code{returnType()} statement in the body of \code{fun}. #' @param enableDerivs Allows derviatives to be obtained automatically. Currently disabled. #' @param check If \code{TRUE}, \code{fun} will be checked for errors (including anything that cannot be compiled. (This is currently disabled.) #' @param returnCallable If \code{TRUE}, return a \code{nFunction} object that can be used as a funtion (because it is a function). If \code{FALSE} (only for advanced debugging), return the internal information of the \code{nFunction}. #' @param where Environment to be used as the closure of the returned \code{nFunction}. #' @return An object of class \code{nFunction}, which inherits from class \code{function}. #' @details A \code{nFunction} is a special kind of R function that can be compiled by automatic generation of C++. See (TBD) for information about writing \code{nFunctions}. See (TBD) for information about type declarations. #' #' @seealso \cite{\link{NFinternals}} for access to the internal information of a \code{nFunction} (for advanced use only). #' #' @examples #' \donttest{ #' rawfoo <- function(a = 5, b) { #' b <- b + a #' return(a) ## explicit use of return() is necessary #' } #' foo <- nFunction( #' fun = rawfoo, #' argTypes = list(a = "numericScalar()", #' b = "ref(integerVector())"), #' # First alternative is to provide arguments in \code{rawfoo} as #' # function(a = numericScalar(5), b = ref(integerVector())) #' # Second alternative is to use b = integerVector() and provide #' # refArgs = "b". #' returnType = "numericScalar()" #' # Alternative would be to include "returnType(numericVector())" #' # in \code{rawfoo} #' ) #' } #' @export nFunction <- function(fun, name = NA, argTypes = list(), refArgs = character(), blockRefArgs = character(), returnType = NULL, enableDerivs = list(), check = get_nOption('check_nFunction'), returnCallable = TRUE, where = parent.frame(), ... ) { ## Provide a default label is one is needed. if(is.na(name)) name <- nFunctionLabelMaker() ## Create internals that will be used for compilation. internals <- NF_InternalsClass$new(fun, name = name, argTypes = argTypes, refArgs = refArgs, blockRefArgs = blockRefArgs, returnType = returnType, enableDerivs = enableDerivs, check = check, where = where) ## Return a callable function. ## This will be modified: ## 1. to provide pass-by-reference behavior ## as requested for any arguments. ## 2. with any returnType() statement removed. if(returnCallable) { modifiedFun <- internals$getFunction() nFunctionClass(modifiedFun, internals = internals) } else internals } # Provenance of names # # nFunction creates a name either from the user or from a label maker. # This name goes in the NF_InternalsClass object, where it is also copied to uniqueName # # The NF_InternalsClass object makes a cpp_code_name by pasting the name to a unique ID separated by "_" #
3c118ad6b9b5285ae698deabf733727fe77e50c9
84696ddd7b2e7f2b735d6196331407b276d9babc
/toplotbullbear.R
33fb919eab83c366f842e46ca5f279e872fb75db
[]
no_license
phoebeshih/crawlfutures
f59f13432e8b3dafb2956d3e70924c2d53030847
cf6e00d3e406e6448b7a6e730525fb0bc4b46fc4
refs/heads/master
2021-06-05T23:24:49.898453
2016-06-12T13:39:19
2016-06-12T13:39:19
null
0
0
null
null
null
null
UTF-8
R
false
false
1,986
r
toplotbullbear.R
getwd() #大台 unclosed_TXF <- read.csv(file="unclosed_TXF.csv",head=TRUE,sep=",") #小台 unclosed_MXF <- read.csv(file="unclosed_MXF.csv",head=TRUE,sep=",") foreign<-c('foreign_bear','foreign_bull') dealer<-c('dealer_bear','dealer_bull') trust<-c('trust_bear','trust_bull') cols <- c('darkseagreen','pink') columns<-foreign dataframe<-unclosed_TXF plotname<-'futures:TX' #加權指數 taiex <- read.csv(file="taiex.csv",head=TRUE,sep=",") names(taiex) ylims<-c(min(taiex[,'price']), max(taiex[,'price'])) x<-c(1:nrow(taiex)) y<-taiex[, 'price'] plot(x, y, type="l", xaxt="n", xlab="time", ylab="price", ylim=ylims2) axis(1, at=x, labels=taiex[, 'date']) #description legend(x=1, y=max(taiex[,'price']), bty="n" , legend=c('taiex','amount', columns) , col=c('black','grey', cols) , lwd=c(1,1,1,1,1)) #大台 futures_TX <- read.csv(file="futures_TX.csv",head=TRUE,sep=",") names(futures_TX) par(new=T) y<-futures_TX[,'price'] plot(x, y, type="l", axes=F, ylab="", xlab="", col="green", lwd=1, ylim=ylims) ##成交量 par(new=T) y<-futures_TX[,'amount'] plot(x, y, type="h", axes=F, ylab="", xlab="", col="dimgrey", lwd=1) #小台 futures_MTX <- read.csv(file="futures_MTX.csv",head=TRUE,sep=",") names(futures_MTX) par(new=T) y<-futures_MTX[,'price'] plot(x, y, type="l", axes=F, ylab="", xlab="", col="cornflowerblue", lwd=1, lty=2, ylim=ylims) ##成交量 par(new=T) y<-futures_MTX[,'amount'] plot(x, y, type="h", axes=F, ylab="", xlab="", col="grey", lwd=2) maxs<-c() len<-length(columns) for(i in 1:len){ tmp <- max(dataframe[,columns[i]]) maxs<-c(maxs, tmp) } ylims<-c(0,max(maxs)) plot_unclosed<-function(name, dataframe, columns, colors){ for(i in 1:len){ y<-dataframe[, columns[i]] par(new=T) plot(x, y, type="l", axes=F, ylab="", xlab="", col=colors[i], main=name, ylim=ylims) par(new=T) plot(x, y, type="h", axes=F, ylab="", xlab="", col=colors[i], ylim=ylims) } } plot_unclosed(plotname, dataframe, columns, cols)
1d25b4eb5cf894334281ff7be996e4ecbb62f560
5a08e607367a964680b4740a6f64587eb7c7020a
/IlluminaEPICtools/R/getGenes.R
f9abbf2cf33b087b3ee28e76b104e98e42b8ba45
[]
no_license
qiongmeng-m/EpiPsychosis_IGF2
1b409ca334de0bab68f3f7b6822d289a1c647744
82fc709aa8e9406ae138aafe2fb13f79c658d54a
refs/heads/master
2021-10-18T22:00:57.756578
2019-02-14T16:58:56
2019-02-14T16:58:56
null
0
0
null
null
null
null
UTF-8
R
false
false
452
r
getGenes.R
#' get granges of genes #' #' @param gFile (char) path to gene definition file #' @return (GRanges) #' @import GenomicRanges #' @export getGenes <- function(gFile) { dat <- read.delim(gFile,sep="\t",h=T,as.is=T) cList <- c(paste("chr",1:22,sep=""),"chrX","chrY") dat <- subset(dat,chrom %in% cList) gene_GR <- GRanges(dat$chrom,IRanges(dat$txStart,dat$txEnd), strand=dat$strand) gene_GR$name <- dat$name gene_GR$name2 <- dat$name2 gene_GR }
6fe5432ef662ee69038a756fefc5ccc6507d0d1a
fdedcc4fb558790169c100efd3a396614e815067
/man/plot.population.Rd
55612ddfc419ff3144c2d0843ee6d16753e86cad
[]
no_license
cran/MoBPS
b7f7fdbde92a190c800185e2dd005d514d81f1d3
c2aeedfcba8ebf563cc64e4d4a5d2d5f186e95e1
refs/heads/master
2021-11-22T18:48:13.109306
2021-11-09T15:50:18
2021-11-09T15:50:18
251,009,278
0
0
null
null
null
null
UTF-8
R
false
true
781
rd
plot.population.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.population.R \name{plot.population} \alias{plot.population} \title{Plot Population} \usage{ \method{plot}{population}(x, type = "bve", gen = NULL, database = NULL, cohorts = NULL, ...) } \arguments{ \item{x}{Population-list} \item{type}{Default "bve" - bv.development, alt: "kinship" - kinship.development(), "pca" - get.pca()} \item{gen}{generations to consider} \item{database}{groups to consider} \item{cohorts}{cohorts to consider} \item{...}{remaining stuff} } \value{ Summary of the population list including number of individuals, genone length and trait overview } \description{ Basic plot of the population list } \examples{ data(ex_pop) plot(ex_pop) }
be1a5fce7829bf137ee63b7ac90921281908d415
3a7dc9233fbf64759b5234a18f45a66f8e1cfd96
/man/swap_counts_from_feature.Rd
30fbdfb74f7b180665834ba049fbd6224e61c35e
[ "MIT" ]
permissive
whtns/seuratTools
2b3328ce9cf7f3dcdddd03786a8baf87d8e2d646
39b6cf4e73f9fa8a3f1a85330cc0bcbf9c302297
refs/heads/master
2023-06-23T04:33:46.584459
2023-06-22T22:50:58
2023-06-22T22:50:58
179,151,711
9
0
NOASSERTION
2021-08-17T20:20:43
2019-04-02T20:14:56
R
UTF-8
R
false
true
313
rd
swap_counts_from_feature.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{swap_counts_from_feature} \alias{swap_counts_from_feature} \title{Swap counts from Feature} \usage{ swap_counts_from_feature(cds, featureType) } \arguments{ \item{featureType}{} } \description{ Swap counts from Feature }
2f6dae29b632d8db0d2cb5a7d4f0084ae6fce0d8
608369a849cf5244ac51d9180d142bceeba0a7ba
/man/qsavem.Rd
93f0ae8794e6265ef88d83f1aa36954b9508b4b5
[]
no_license
traversc/qs
437d72b46b184d99e01e90d7cd00f3e5aaa155e5
792ae169ba0959568c69c18598d90faba33be623
refs/heads/master
2023-04-11T14:53:37.867427
2023-03-31T18:00:55
2023-03-31T18:00:55
163,551,739
361
30
null
2021-11-12T17:30:47
2018-12-30T00:54:43
C
UTF-8
R
false
true
1,318
rd
qsavem.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/qsavem-load.R \name{qsavem} \alias{qsavem} \title{qsavem} \usage{ qsavem(...) } \arguments{ \item{...}{Objects to serialize. Named arguments will be passed to \code{\link[=qsave]{qsave()}} during saving. Un-named arguments will be saved. A named \code{file} argument is required.} } \description{ Saves (serializes) multiple objects to disk. } \details{ This function extends \code{\link[=qsave]{qsave()}} to replicate the functionality of \code{\link[base:save]{base::save()}} to save multiple objects. Read them back with \code{\link[=qload]{qload()}}. } \examples{ x1 <- data.frame(int = sample(1e3, replace=TRUE), num = rnorm(1e3), char = sample(starnames$`IAU Name`, 1e3, replace=TRUE), stringsAsFactors = FALSE) x2 <- data.frame(int = sample(1e3, replace=TRUE), num = rnorm(1e3), char = sample(starnames$`IAU Name`, 1e3, replace=TRUE), stringsAsFactors = FALSE) myfile <- tempfile() qsavem(x1, x2, file=myfile) rm(x1, x2) qload(myfile) exists('x1') && exists('x2') # returns true # qs support multithreading qsavem(x1, x2, file=myfile, nthreads=2) rm(x1, x2) qload(myfile, nthreads=2) exists('x1') && exists('x2') # returns true }
d91865bf977d78bf8dc7a9bb54462c9a09466724
7c21f28832d7cdf51205140203bbab5b47a254da
/scripts/get_figures.R
89c26513f09dc013d14188046009ba0a26a90dd4
[]
no_license
mariafiruleva/bat_paper
23d22e51d95accf952bdc730262e97856fade63d
7dc77efc7892e003cc383cf5bf0644478301fbc3
refs/heads/main
2023-04-08T06:24:24.512375
2021-04-16T14:59:33
2021-04-16T14:59:33
358,629,273
0
0
null
null
null
null
UTF-8
R
false
false
18,598
r
get_figures.R
suppressMessages(library(Seurat)) suppressMessages(library(ggplot2)) suppressMessages(library(reshape2)) suppressMessages(library(RColorBrewer)) suppressMessages(library(Matrix)) suppressMessages(library(dplyr)) suppressMessages(library(grid)) suppressMessages(library(gridExtra)) suppressMessages(library(plyr)) suppressMessages(library(MASS)) suppressMessages(library(argparse)) suppressMessages(library(dyno)) source('scripts/funs_figures.R') parser <- ArgumentParser(description = 'Get scRNA-seq related figures from the paper') parser$add_argument('--data', type = "character", help = 'Path to seurat rda object') parser$add_argument('--traj', type = "character", help = 'Path to trajectory rda object') parser$add_argument('--pws', type = "character", help = 'Path to pathways file in json format') parser$add_argument('--target_genes', type = "character", help = 'Path to txt file with gene names') ## SET VARIABLES arguments <- parser$parse_args() print(arguments) load(arguments$data) load(arguments$traj) getPalette.1 <- colorRampPalette(brewer.pal(9, "Set1")) ## UMAP: clusters, split by genotype whole.integrated$custom_clusters <- whole.integrated$integrated_snn_res.1 whole.integrated$custom_clusters <- ifelse(whole.integrated$integrated_snn_res.1 == '0', '0', whole.integrated$custom_clusters) whole.integrated$custom_clusters <- ifelse(whole.integrated$integrated_snn_res.1 == '11', '1', whole.integrated$custom_clusters) whole.integrated$custom_clusters <- ifelse(whole.integrated$integrated_snn_res.1 == '2', '2', whole.integrated$custom_clusters) whole.integrated$custom_clusters <- ifelse(whole.integrated$integrated_snn_res.1 == '9', '3', whole.integrated$custom_clusters) whole.integrated$custom_clusters <- ifelse(whole.integrated$integrated_snn_res.1 == '1', '4', whole.integrated$custom_clusters) whole.integrated$custom_clusters <- ifelse(whole.integrated$integrated_snn_res.1 == '8', '5', whole.integrated$custom_clusters) whole.integrated$custom_clusters <- ifelse(whole.integrated$integrated_snn_res.1 == '7', '6', whole.integrated$custom_clusters) whole.integrated$custom_clusters <- ifelse(whole.integrated$integrated_snn_res.1 == '12', '7', whole.integrated$custom_clusters) whole.integrated$custom_clusters <- ifelse(whole.integrated$integrated_snn_res.1 == '13', '8', whole.integrated$custom_clusters) whole.integrated$custom_clusters <- ifelse(whole.integrated$integrated_snn_res.1 == '10', '9', whole.integrated$custom_clusters) whole.integrated$custom_clusters <- ifelse(whole.integrated$integrated_snn_res.1 == '5' | whole.integrated$integrated_snn_res.1 == '6' | whole.integrated$integrated_snn_res.1 == '14', '10', whole.integrated$custom_clusters) whole.integrated$custom_clusters <- ifelse(whole.integrated$integrated_snn_res.1 == '4' | whole.integrated$integrated_snn_res.1 == '16', '11', whole.integrated$custom_clusters) whole.integrated$custom_clusters <- ifelse(whole.integrated$integrated_snn_res.1 == '3' | whole.integrated$integrated_snn_res.1 == '15', '12', whole.integrated$custom_clusters) sorted_labels <- 0:12 whole.integrated$custom_clusters <- factor(x = whole.integrated$custom_clusters, levels = sorted_labels) Idents(whole.integrated) <- 'custom_clusters' new_labels <- c('0 Neutrophils', expression(~1~Myeloid~cells~italic(Zeb2)^{"hi"}), expression(~2~Monocytes~italic(Ly6C)^{"low"}), expression(~3~Monocytes~italic(Ly6C)^{"int"}), expression(~4~Monocytes~italic(Ly6C)^{"hi"}), '5 Matrix Macrophages', '6 Macrophages M2-like', expression(7~Macrophages~italic(Lpl)^{"hi"}), expression(8~Macrophages~italic(Plin2)^{"hi"}), '9 Dendritic cells', '10 T cells', '11 B cells', '12 NK cells') whole.integrated$genotype <- factor(x = whole.integrated$genotype, levels = c("WT", "KO")) plt <- DimPlot(whole.integrated, split.by='genotype', pt.size=0.25)+ scale_color_manual(values=getPalette.1(length(unique(whole.integrated$custom_clusters))), labels = new_labels)+ theme_bw(base_size=11)+ theme(legend.text.align = 0, legend.key.size=unit(0.2, "in"), aspect.ratio = 1, plot.margin=grid::unit(c(0,0,0.2,0), "in"), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks.x=element_blank(), axis.ticks.y=element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank())+ guides(fill = guide_legend(ncol = 4, override.aes = list(color = c('black'))))+ scale_fill_continuous(guide="legend",breaks=seq(0.2,0.8,by=0.1)) LabelClusters(plt, id = "ident", size=5, fontface = 'bold', repel=F) ggsave("article_plots/clustering_total.png", width = 8, height = 4, dpi=600, units='in') ggsave("article_plots/clustering_total.svg", width = 11, height = 5, dpi=600, units='in', device = 'svg') ## histogram: cells per cluster per genotype df <- cbind(as.character(whole.integrated$genotype), as.character(whole.integrated$custom_clusters)) %>% as.data.frame() %>% magrittr::set_colnames(c('genotype', 'cluster')) df$cluster <- factor(x = df$cluster, levels = as.character(sort(as.numeric(levels(df$cluster))))) df$genotype <- factor(x = df$genotype, levels = c("WT", "KO")) p <- df %>% group_by(cluster, genotype) %>% dplyr::summarise(n = n()) per_gt <- table(whole.integrated$genotype) %>% as.data.frame() %>% magrittr::set_colnames(c('genotype', 'count')) p <- p %>% group_by(genotype) %>% mutate(total = per_gt$count[match(genotype, per_gt$genotype)]) %>% group_by(cluster, add=TRUE) %>% mutate(per=round(100*n/total,2)) hist_plot <- ggplot(p,aes(x=cluster,y=per, fill=genotype))+ geom_bar(stat = 'identity', position=position_dodge(width = 0.5), width = 0.45) + theme_bw(base_size=11)+ scale_fill_brewer(palette="Set1", direction = -1)+ theme(legend.text.align = 0, legend.key.size=unit(0.2, "in"), aspect.ratio = 1, legend.position = 'top', legend.title = element_blank(), plot.margin=grid::unit(c(0,0,0,0), "in"))+ geom_segment(aes(x = 2, y = 8, xend = 2, yend = 6), arrow = arrow(length = unit(0.3, "cm")), lineend='round')+ geom_segment(aes(x = 6, y = 13, xend = 6, yend = 11), arrow = arrow(length = unit(0.3, "cm")), lineend='round')+ geom_segment(aes(x = 7, y = 13, xend = 7, yend = 11), arrow = arrow(length = unit(0.3, "cm")), lineend='round')+ geom_segment(aes(x = 8, y = 10, xend = 8, yend = 8), arrow = arrow(length = unit(0.3, "cm")), lineend='round')+ geom_segment(aes(x = 9, y = 10, xend = 9, yend = 8), arrow = arrow(length = unit(0.3, "cm")), lineend='round')+ ylab('cells per clutser, %') ggsave(plot = hist_plot, filename = "article_plots/corrected_hist.png", width = 4, height = 4, dpi=600, units='in') ggsave(plot = hist_plot, filename = "article_plots/corrected_hist.svg", width = 4, height = 4, dpi=600, units='in') ## Violin plots get_expr <- function(object, gene_set, slot='data', assay='SCT') { av <- numeric(ncol(object)) zz <- which(tolower(rownames(GetAssayData(object, slot = slot, assay = assay))) %in% tolower(gene_set)) object@assays$SCT@data[zz, ] } plot_vln <- function(object, gene_set, reduction="umap", assay='SCT', slot='data') { red <- cbind(get_expr(object, gene_set)) %>% as.data.frame() %>% magrittr::set_colnames(c('expression')) red$genotype <- ifelse(rownames(red) %in% names(object$genotype[object$genotype == 'WT']), 'WT', 'KO') red$genotype <- factor(x = red$genotype, levels = c("WT", "KO")) red$cluster <- object$custom_clusters target_clusters <- c(2, 4, 3, 6, 5, 7, 8) red <- red %>% dplyr::filter(cluster %in% target_clusters) ggplot(data=red, aes(x=cluster, y=expression, fill=genotype)) + theme_bw(base_size = 8) + theme(panel.spacing = unit(0, "lines"), legend.position = 'none', legend.title = element_blank(), aspect.ratio = 0.2, axis.ticks = element_blank(), axis.line = element_blank(), plot.title = element_text(hjust = 0.5, face = "italic", size=6, margin=margin(0,0,0,0)), plot.margin=grid::unit(c(0,0,0,0), "in")) + geom_split_violin(scale="width", alpha=0.7) + scale_fill_brewer(palette='Set1', guide=guide_legend(ncol=2), direction=-1) + ylab(NULL) + xlab(NULL)+ggtitle(gene_set) if (!dir.exists('article_plots/violins')) { dir.create('article_plots/violins', recursive = T) } ggsave(sprintf("article_plots/violins/%s_vln.png", gene_set), width = 3, height = 0.8, dpi=600, units='in') } sapply(c('Plin2', 'Lpl', 'Cd36', 'Trem2', 'Fabp4', 'Fabp5'), function(x) plot_vln(whole.integrated, x)) ## Trajectory plot obj$cluster$grouping <- as.factor(obj$cluster$grouping) obj$cluster$grouping <- ifelse(obj$cluster$grouping == 1, '4', ifelse(obj$cluster$grouping == 9, '3', ifelse(obj$cluster$grouping == 8, '5', ifelse(obj$cluster$grouping == 7, '6', ifelse(obj$cluster$grouping == 12, '7', ifelse(obj$cluster$grouping == 13, '8', '2')))))) obj$cluster$grouping <- as.factor(obj$cluster$grouping) new_traj_labels <- c(expression(~2~Monocytes~italic(Ly6C)^{"low"}), expression(~3~Monocytes~italic(Ly6C)^{"int"}), expression(~4~Monocytes~italic(Ly6C)^{"hi"}), '5 Matrix Macrophages', '6 Macrophages M2-like', expression(7~Macrophages~italic(Lpl)^{"hi"}), expression(8~Macrophages~italic(Plin2)^{"hi"})) traj <- plot_dimred( model, dimred = dimred, expression_source = obj$cluster$expression, color_density = "grouping", alpha_cells = 0.7, size_cells = 1, size_trajectory = 1.5, grouping = obj$cluster$grouping, label_milestones = F, )+ scale_fill_manual(values=getPalette.1(length(unique(whole.integrated$custom_clusters)))[3:9], labels = NULL)+ scale_color_manual(values=getPalette.1(length(unique(whole.integrated$custom_clusters)))[3:9],labels = new_traj_labels)+ guides(fill=FALSE, color = guide_legend(override.aes = list(size=3)))+ theme_void(base_size = 11)+ theme(aspect.ratio = 1, legend.title = element_blank(), legend.text.align = 0, legend.key.size=unit(0.2, "in"), plot.margin=grid::unit(c(0,0,0.2,0), "in"), legend.text=element_text(size=11)) ggsave(plot = traj, filename = "article_plots/trajectory.png", width = 8, height = 6, dpi=600, units='in') ggsave(plot = traj, filename = "article_plots/trajectory.svg", width = 8, height = 6, dpi=600, units='in', device = 'svg') ## PATHWAY get_pw_expr <- function(object, gene_set, slot='data', assay='SCT') { av <- numeric(ncol(object)) zz <- which(tolower(rownames(GetAssayData(object, slot = slot, assay = assay))) %in% tolower(gene_set)) geneExp <- as.matrix(log2(object@assays$SCT@data[zz, ] + 1)) geneExp <- t(scale(t(geneExp))) geneExp[is.nan(geneExp)] <- 0 av <- av + colSums(geneExp) / length(gene_set) av } plot_target_pw <- function(object, gene_set, pw_name, reduction="umap", assay='SCT', slot='data', macs=F) { red <- cbind(get_pw_expr(object, gene_set), object@reductions[['umap']]@cell.embeddings) %>% as.data.frame() %>% magrittr::set_colnames(c('expression', paste0(reduction, 1), paste0(reduction, 2))) red$genotype <- ifelse(rownames(red) %in% names(object$genotype[object$genotype == 'WT']), 'WT', 'KO') red$genotype <- factor(x = red$genotype, levels = c('WT', 'KO')) p <- ggplot(red, aes_string(x=paste0(reduction, 1), y=paste0(reduction, 2), color="expression"))+ geom_point(size=0.15)+ theme_bw(base_size=8) + facet_grid(.~genotype)+ scale_color_gradientn(colours=c("darkblue", "blue", "grey", "red", "darkred"), breaks = c(0, floor(max(red$expression) * 100) / 100), rescaler = function(x, from) { res <- numeric(length(x)) res[x >= 1 & !is.na(x)] <- 1 res[x < 1 & !is.na(x)] <- (x[x < 1 & !is.na(x)] + 1) / 2 res[x < -1 & !is.na(x)] <- 0 res[is.na(x)] <- NA res })+ theme(legend.background = element_blank(), legend.key = element_blank(), legend.title = element_blank(), aspect.ratio = 1, axis.ticks = element_blank(), axis.line = element_blank(), plot.title = element_text(hjust = 0.5), plot.margin=grid::unit(c(0,0,0,0), "in"), axis.title.x = element_blank(), axis.title.y = element_blank(), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks.x = element_blank(), axis.ticks.y = element_blank())+ ggtitle(gsub('_', ' ', pw_name)) if (macs) { p <- p+ xlim(NA, 5)+ ylim(0, NA) } if (!dir.exists('article_plots/macs_pw')) { dir.create('article_plots/macs_pw', recursive = T) } ggsave(plot=p, filename=sprintf("article_plots/macs_pw/%s.png", pw_name), width = 4.25, height = 2, dpi=600, units='in') } pws <- jsonlite::fromJSON(arguments$pws) ## zoom macs: pathways plot_target_pw(whole.integrated, pws$KEGG_GLYCEROLIPID_METABOLISM, 'KEGG_GLYCEROLIPID_METABOLISM', macs = T) plot_target_pw(whole.integrated, pws$KEGG_GLYCEROPHOSPHOLIPID_METABOLISM, 'KEGG_GLYCEROPHOSPHOLIPID_METABOLISM', macs = T) plot_target_pw(whole.integrated, pws$PID_LYSOPHOSPHOLIPID_PATHWAY, 'PID_LYSOPHOSPHOLIPID_PATHWAY', macs = T) plot_target_pw(whole.integrated, pws$HALLMARK_FATTY_ACID_METABOLISM, 'HALLMARK_FATTY_ACID_METABOLISM', macs = T) ## zoom macs : genes plot_target_gene <- function(object, gene, reduction="umap", assay='SCT', slot='data', macs = F) { data <- GetAssayData(object, slot = slot, assay = assay) red <- object@reductions[[reduction]]@cell.embeddings red <- as.data.frame(red) colnames(red) <- paste0(reduction, 1:ncol(red)) genes_indexes <- which(rownames(data) %in% gene[[1]]) expression_signaling <- data[genes_indexes,] red$expression <- expression_signaling red$genotype <- ifelse(rownames(red) %in% names(object$genotype[object$genotype == 'WT']), 'WT', 'KO') red$genotype <- factor(x = red$genotype, levels = c('WT', 'KO')) red$cluster <- object$integrated_snn_res.0.2 onlyBreak <- floor(max(red$expression)) p <- ggplot(red, aes_string(x=paste0(reduction, 1), y=paste0(reduction, 2), color="expression"))+ geom_point(size=0.1)+ theme_bw(base_size=11) + facet_grid(.~genotype)+ scale_color_gradientn(colours=c("grey", "red", "red3"),breaks=c(0, onlyBreak), guide = guide_colorbar(frame.colour = "black", ticks.colour = "black"))+ theme(legend.background = element_blank(), legend.key = element_blank(), legend.title = element_blank(), aspect.ratio = 1, axis.ticks = element_blank(), axis.line = element_blank(), plot.title = element_text(hjust = 0.5, face = "italic"), plot.margin=grid::unit(c(0,0,0.2,0), "in"), axis.title.x = element_blank(), axis.title.y = element_blank(), axis.ticks.x = element_blank(), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks.y = element_blank())+ ggtitle(gene) if (macs) { p <- p+ xlim(NA, 5)+ ylim(0, NA) } if (!dir.exists('article_plots/macs_genes')) { dir.create('article_plots/macs_genes', recursive = T) } ggsave(sprintf("article_plots/macs_genes/%s.png", gene), width = 4.25, height = 2.2, dpi=600, units='in') } sapply(c('Mrc1', 'Clec10a', 'Mki67', 'Ccna2', 'Top2a', 'Cd86', 'Cd80', 'Cd68', 'Tlr2', 'Tlr4', 'Cxcl10'), function(x) plot_target_gene(whole.integrated, x, macs = T)) ## all genes plot_markers <- function(object, gene, out_dir, reduction="umap", assay='SCT', slot='data') { data <- GetAssayData(object, slot = slot, assay = assay) red <- object@reductions[[reduction]]@cell.embeddings red <- as.data.frame(red) colnames(red) <- paste0(reduction, 1:ncol(red)) genes_indexes <- which(rownames(data) %in% gene[[1]]) expression_signaling <- data[genes_indexes,] red$expression <- expression_signaling onlyBreak <- floor(max(red$expression)) ggplot(red, aes_string(x=paste0(reduction, 1), y=paste0(reduction, 2), color="expression"))+ geom_point(size=0.05)+ theme_bw(base_size=11) + scale_color_gradientn(colours=c("grey", "red", "red3"),breaks=c(0, onlyBreak), guide = guide_colorbar(frame.colour = "black", ticks.colour = "black"))+ theme(legend.background = element_blank(), legend.key = element_blank(), legend.title = element_blank(), aspect.ratio = 1, axis.ticks = element_blank(), axis.line = element_blank(), plot.title = element_text(hjust = 0.5, face = "italic"), plot.margin=grid::unit(c(0,0,0.2,0), "in"), axis.title.x = element_blank(), axis.title.y = element_blank(), axis.ticks.x = element_blank(), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks.y = element_blank())+ ggtitle(gene) if (!dir.exists(out_dir)) { dir.create(out_dir, recursive = T) } ggsave(sprintf("%s/%s.png", out_dir, gene), width = 4.25, height = 2.2, dpi=600, units='in') } top5_markers <- data.table::fread(arguments$target_genes, header = F) sapply(top5_markers$V1, function(x) plot_markers(whole.integrated, x, 'article_plots/marker_genes'))
f9a0cfe154604b71227d48da1cb3c66f88d6bda1
8982143e6eff27f3cd338ced46ef545425af1176
/iteration/map-functions.R
37a0dcf0a33b73b0d17cf7aa567b337d53562d8d
[]
no_license
uroszivanovic/R-for-Data-Science
825f493769e3646de15ebced983a4ffa2aee1d45
46ccf93a1a3afd958941461b32eea8ae78319267
refs/heads/master
2022-06-20T18:42:24.850635
2020-05-06T16:46:49
2020-05-06T16:46:49
257,034,087
0
0
null
null
null
null
UTF-8
R
false
false
843
r
map-functions.R
library(tidyverse) #The output is always a list: map(c(TRUE, FALSE, TRUE), ~ !.) map(c("Hello", "World"), str_to_upper) map(1:5, ~ rnorm(.)) map(c(-0.5, 0, 1), ~ rnorm(1, mean = .)) map(1:5, runif) #The others - outputs are always vectors: df <- tibble( a = rnorm(10), b = rnorm(10), c = rnorm(10), d = rnorm(10) ) map_dbl(df, mean) map_dbl(df, median) map_dbl(df, sd) df %>% map_dbl(mean) df %>% map_dbl(median) df %>% map_dbl(sd) map_int(iris, function(x) length(unique(x))) map_chr(nycflights13::flights, typeof) map_lgl(diamonds, is.factor) #the main difference between general map and other map functions (where outputs are vectors): map(-2:2, rnorm, n = 5) map_dbl(-2:2, rnorm, n = 5)# returning an error !!! #To return a double vector, we could use map() followed by flatten_dbl(): flatten_dbl(map(-2:2, rnorm, n = 5))
991ad6e5f21a55c95e9a31e8fc56d9635a7a32d5
6d6bc8a91bc134ec51b04dce1cf31c91986eddbc
/R/getCategoryFrequenciesForest.R
330c5731d475500e12383d3f49f6154315836c92
[]
no_license
adibender/rFtools
09e2b2f0b567dcfe95c1b65a917cc50544e598f5
d4cba8b8b65ee6dfc24d2aedd944697840c51690
refs/heads/master
2020-03-21T10:03:18.445005
2018-06-23T20:47:50
2018-06-23T20:47:50
138,431,381
1
0
null
null
null
null
UTF-8
R
false
false
736
r
getCategoryFrequenciesForest.R
getCategoryFrequenciesForest <- function(rFobject, X) { ## workaround to transform data.frame with factors into numeric matrix X <- matrix(as.numeric(as.matrix(X)), ncol = ncol(X)) splitInfoForest <- getSplitInformationForest(rfObject = rFobject, X = X) nodeObservationsForest <- lapply(splitInfoForest, function(z) z[[1]]) splitVariablesForest<- lapply(splitInfoForest, function(z) z[[3]]) cF <- lapply(1:length(nodeObservationsForest), function(z) getCategoryFrequenciesTree( nodeObservationsTree = nodeObservationsForest[[z]], X = X, splitVariablesTree = splitVariablesForest[[z]])) class(cF) <- c("CategoryFrequenciesForest") return(cF) }
f4a1425d162a62f62415f83ea02c44002ccf8900
d385bdb791ab267ac2411cc245c696a631ae2ad7
/Plot2.R
3af6d27899f19d896243c965501cd0ea3b25550f
[]
no_license
Patti-West/ExData_Plotting1
4081aa734112b9e3809e88ccf1f2cc853c8f1c77
300530c1918033ad6ced05a1d321c122af5ab2c8
refs/heads/master
2021-01-21T08:51:22.439371
2015-04-12T22:54:03
2015-04-12T22:54:03
33,836,893
0
0
null
2015-04-12T22:43:21
2015-04-12T22:43:21
null
UTF-8
R
false
false
567
r
Plot2.R
#Same as before - reading and preparing the data set setwd("~/Coursera") power_all <- read.table("~/Coursera/household_power_consumption.txt", header=TRUE, sep=";", na.strings="?") power_all$Date1 <-strptime(power_all$Date, "%d/%m/%Y") power <- subset(power_all,Date1=="2007-02-01"|Date1=="2007-02-02") power$Date2 <-strptime(paste(power$Date,power$Time),"%d/%m/%Y%H:%M:%S") rm(power_all) #plot 2 png('plot2.png',width = 480, height = 480, units = "px") plot(power$Date2,power$Global_active_power,xlab="",ylab="Global Active Power (kilowatts)",type="l") dev.off()
5d2c3ce2a192c3e016752d0c158d7354f4d3a8b4
9df052ed249d1be60bfeb83d9b76865ba14c6278
/R/PlotGlobalSens.R
2444ced170aec574069cb6083482dddd419bd131
[]
no_license
cran/capm
f68e7be0391f3044eab96dfdefe767f589c762c5
44f662f34d8bb935a9a56243c8c953a13e876773
refs/heads/master
2021-01-17T07:51:28.863562
2019-10-24T15:50:05
2019-10-24T15:50:05
17,694,936
0
0
null
null
null
null
UTF-8
R
false
false
3,672
r
PlotGlobalSens.R
#' Plot results of GlobalSens function #' @description Plot results of of \code{\link{CalculateGlobalSens}} function. #' @param global.out output from \code{\link{CalculateGlobalSens}} function. #' @param legend.label string with the name for the legend. #' @param x.label string with the name for the x axis. #' @param y.label string with the name for the y axis. #' @param qt.label string with the name for the envelope calculated using the quantiles 0.05 and 0.95. #' @param sd.label string with the name for the envelope calculated using the mean +- standard deviation ranges. #' @param inner.color any valid specification of a color for the inner envelope. #' @param outer.color any valid specification of a color for the outer envelope. #' @details Font size of saved plots is usually different to the font size seen in graphic browsers. Before changing font sizes, see the final result in saved (or preview) plots. #' #' Other details of the plot can be modifyed using appropriate functions from \code{ggplot2} package. #' @references Baquero, O. S., Marconcin, S., Rocha, A., & Garcia, R. D. C. M. (2018). Companion animal demography and population management in Pinhais, Brazil. Preventive Veterinary Medicine. #' #' \url{http://oswaldosantos.github.io/capm} #' @seealso \link[deSolve]{plot.deSolve}. #' @export #' @examples #' ## IASA model #' #' ## Parameters and intial conditions. #' data(dogs) #' dogs_iasa <- GetDataIASA(dogs, #' destination.label = "Pinhais", #' total.estimate = 50444) #' #' # Solve for point estimates. #' solve_iasa_pt <- SolveIASA(pars = dogs_iasa$pars, #' init = dogs_iasa$init, #' time = 0:15, #' alpha.owned = TRUE, #' method = 'rk4') #' #' ## Set ranges 10 % greater and lesser than the #' ## point estimates. #' rg_solve_iasa <- SetRanges(pars = dogs_iasa$pars) #' #' ## Calculate golobal sensitivity of combined parameters. #' ## To calculate global sensitivity to each parameter, set #' ## all as FALSE. #' glob_all_solve_iasa <- CalculateGlobalSens( #' model.out = solve_iasa_pt, #' ranges = rg_solve_iasa, #' sensv = "n2", all = TRUE) #' PlotGlobalSens(glob_all_solve_iasa) #' PlotGlobalSens <- function(global.out = NULL, x.label = 'Time', y.label = 'Population', legend.label = 'Sensitivity range', qt.label = 'Qt 0.05 - 0.95', sd.label = 'mean +- sd ', inner.color = "DarkRed", outer.color = "LightBlue") { # Workaround to the "no visible binding for global variable" note. x <- Mean <- Min <- Max <- Sd <- q05 <- q95 <- NULL if (colnames(global.out)[length(global.out)] == 'param') { ggplot(global.out, aes(x = x, y = Mean)) + geom_ribbon(aes(ymin = q05, ymax = q95, fill = outer.color)) + geom_ribbon(aes(ymin = Mean - Sd, ymax = Mean + Sd, fill = inner.color)) + geom_line() + facet_wrap( ~ param) + xlab(x.label) + ylab(y.label) + scale_fill_manual( name = legend.label, values = c(inner.color, outer.color), labels = c(sd.label, qt.label)) + theme_minimal() + theme(legend.position = 'top') } else { ggplot(global.out, aes(x = x, y = Mean)) + geom_ribbon(aes(ymin = q05, ymax = q95, fill = outer.color)) + geom_ribbon(aes(ymin = Mean - Sd, ymax = Mean + Sd, fill = inner.color)) + geom_line() + xlab(x.label) + ylab(y.label) + scale_fill_manual( name = legend.label, values = c(inner.color, outer.color), labels = c(sd.label, qt.label)) + theme_minimal() + theme(legend.position = 'top') } }
be33d6620ae5ad1ecc046ffd2f2c7f0b9ef5726f
ce2435ac0d405cc80cfaddc02bb709ea7491a5d5
/Rprogramming/ReadingLinesOfaTextFile.R
f21a3aa0fea979b081c8bf5371f2dc37c2a7f3b2
[ "CC0-1.0" ]
permissive
pauEscarcia/BigData-Zacatecas
b9e4014ee1242522c04a46a8fd40badd809cfe7c
6ed59608d4583f8d0bdb5caa55c80f41a1c3844a
refs/heads/master
2021-01-10T05:25:26.429723
2016-03-14T03:18:03
2016-03-14T03:18:03
43,478,578
0
1
null
null
null
null
UTF-8
R
false
false
96
r
ReadingLinesOfaTextFile.R
#open connection to gz-compressed text file con <- gzfile("words.gz") x <- readlines(con,10) x
47c794753c5dcb1d28016844ad4af8b9e9215e0c
75db022357f0aaff30d419c13eafb9dddfce885a
/inst/Assessments/LFA41Assessment/5a.figureLengthFreqs.r
73e2c2a781152894e112c21dfc0667d273d67ad2
[]
no_license
LobsterScience/bio.lobster
d4c553f0f55f561bb9f9cd4fac52c585e9cd16f8
b2af955291cb70c2d994e58fd99d68c6d7907181
refs/heads/master
2023-09-01T00:12:23.064363
2023-08-23T16:34:12
2023-08-23T16:34:12
60,636,005
11
5
null
2017-01-20T14:35:09
2016-06-07T18:18:28
R
UTF-8
R
false
false
6,654
r
5a.figureLengthFreqs.r
#figureLengthFreqs require(bio.lobster) la() m=0 fp = file.path(project.datadirectory('bio.lobster'),'analysis','lfa41Assessment') a = c( file.path(fp,'LengthFrequenciesLFA41polygonSummerRV.rdata'), file.path(fp,'LengthFrequenciesLFA41NEFSCspringrestratified.rdata'), file.path(fp,'LengthFrequenciesLFA41NEFSCfallrestratified.rdata'), file.path(fp,'LengthFrequenciesLFA41dfogeorges.rdata')) ###hard coded long term median values ---need to change with updates lens = c(111,110,111,108,108,106,106,114,106,108) for(i in 1:length(a)) { out = c() load(a[i]) if(grepl('NEFSC',a[i])) { aa <- aa[which(aa$FLEN>49),] # print(i) } yll = max(aa$n.yst) af = aggregate(ObsLobs~yr,data=aa,FUN=sum) names(af) = c('x','y') h = split(aa,f=aa$yr) for(j in 1:length(h)) { g = h[[j]] g$ff = round(g$FLEN/3)*3 y = unique(g$yr) u = aggregate(n.yst~ff,data=g,FUN=sum) fn = paste(strsplit(strsplit(a[i],"/")[[1]][grep("Length",strsplit(a[i],"/")[[1]])],"\\.")[[1]][1],y,'pdf',sep=".") nn = sum(g$ObsLobs) pdf(file.path(project.figuredirectory('bio.lobster'),fn)) plot(u$ff,u$n.yst,lwd=3,xlab='Carapace Length',ylab = 'Stratified Mean Number',type='h',ylim=c(0,yll)) legend('topleft',bty='n',pch="", legend=c(y,paste('N=',nn,sep=" ")),cex=2) dev.off() #print(fn) lm = median(rep(g$FLEN,times=g$n.yst*1000)) ll = quantile(rep(g$FLEN,times=g$n.yst*1000),0.25) lu = quantile(rep(g$FLEN,times=g$n.yst*1000),0.75) lmax = quantile(rep(g$FLEN,times=g$n.yst*1000),0.95) aS = with(subset(g,FLEN<lens[i]),sum(n.yst)) aL = with(subset(g,FLEN>=lens[i]),sum(n.yst)) u = subset(u,n.yst>0)$n.yst u = u / sum(u) m=m+1 print(m) Eh = -1*(sum(u*log(u))) / log(length(u)) out = rbind(out,c(y,lm,ll,lu,aS,aL,lmax,Eh,nn)) } out = as.data.frame(out) names(out) = c('yr','medL','medLlower','medLupper','smallCatch','largeCatch','upper95','ShannonEquitability','ObsLobs') fn = paste('max95',strsplit(strsplit(a[i],"/")[[1]][grep("Length",strsplit(a[i],"/")[[1]])],"\\.")[[1]][1],'pdf',sep=".") nn = sum(g$ObsLobs) pdf(file.path(project.figuredirectory('bio.lobster'),fn)) plot(out$yr,out$upper95,lwd=1,xlab='Year',ylab = 'Maximum Length (mm)',type='b',ylim=c(115,195),pch=16) lines(out$yr,runmed(out$upper95,k=3,endrule='median'),col='salmon',lwd=2) dev.off() fn = paste('shannon',strsplit(strsplit(a[i],"/")[[1]][grep("Length",strsplit(a[i],"/")[[1]])],"\\.")[[1]][1],'pdf',sep=".") nn = sum(g$ObsLobs) oo = out[,c('yr','ShannonEquitability')] ii = which(is.na(oo$ShannonEquitability)) oo$ShannonEquitability[ii] <- oo$ShannonEquitability[ii-1] pdf(file.path(project.figuredirectory('bio.lobster'),fn)) plot(oo$yr,oo$ShannonEquitability,lwd=1,xlab='Year',ylab = 'Shannon Equitability',type='b',pch=16,ylim=c(0.60,1)) lines(oo$yr,runmed(oo$ShannonEquitability,k=3,endrule='median'),col='salmon',lwd=2) dev.off() #print(fn) # print(median(out$medL)) save(out,file=file.path(fp,paste('medianL',strsplit(strsplit(a[i],"/")[[1]][grep("Length",strsplit(a[i],"/")[[1]])],"\\.")[[1]][1],'rdata',sep="."))) write.csv(out,file=file.path(fp,'indicators',paste('medianL',strsplit(strsplit(a[i],"/")[[1]][grep("Length",strsplit(a[i],"/")[[1]])],"\\.")[[1]][1],'csv',sep="."))) p=list() p$add.reference.lines = F p$time.series.start.year = min(aa$yr) p$time.series.end.year = max(aa$yr) p$metric = 'medianL' #weights p$measure = 'stratified.mean' #'stratified.total' p$figure.title = "" p$reference.measure = 'median' # mean, geomean p$file.name = paste('medianL',strsplit(strsplit(a[i],"/")[[1]][grep("Length",strsplit(a[i],"/")[[1]])],"\\.")[[1]][1],'png',sep=".") print(p$file.name) p$y.maximum = NULL # NULL # if ymax is too high for one year p$show.truncated.numbers = F #if using ymax and want to show the numbers that are cut off as values on figure p$ylim = c(60,155) p$legend = FALSE p$running.median = T p$running.length = 3 p$running.mean = F #can only have rmedian or rmean p$error.polygon=T p$error.bars=F p$ylim2 = c(0,500) figure.stratified.analysis(x=out,out.dir = 'bio.lobster', x2 = af, p=p,sampleSizes=T) } ###Length Freqs divided into five breaks with the last year being isolated for comparison require(bio.lobster) la() a = c( file.path(fp,'LengthFrequenciesLFA41polygonSummerRV.rdata'), file.path(fp,'LengthFrequenciesLFA41NEFSCspringrestratified.rdata'), file.path(fp,'LengthFrequenciesLFA41NEFSCfallrestratified.rdata'), file.path(fp,'LengthFrequenciesLFA41dfogeorges.rdata')) for(i in 1:length(a)) { load(a[i]) if(grepl('NEFSC',a[i])) { aa <- aa[which(aa$FLEN>49),] print(i) } af = aggregate(ObsLobs~yr,data=aa,FUN=sum) names(af) = c('x','y') YG = 5 # year grouping y = unique(aa$yr) yL = y[length(y)] #last year yLL = length(y)-1 yLm = yLL %% YG yLr = yLL %/% YG yw = y[which(y %in% y[1:yLm])] #add the early years to the first histogram and keep the rest at 5 years yLw = c(rep(1,yLm),rep(1:yLr,each = YG),yLr+1) grps = data.frame(yr = y,ry = yLw) aa = merge(aa,grps,by='yr',all.x=T) aa$ff = round(aa$FLEN/3)*3 yll = max(aggregate(n.yst~ff+ry,data=aa,FUN=mean)$n.yst) yll = 1 h = split(aa,f=aa$ry) for(j in 1:length(h)) { g = h[[j]] y = unique(g$yr) u = aggregate(n.yst~ff,data=g,FUN=mean) u$n.yst = u$n.yst / max(u$n.yst) fn = paste(strsplit(strsplit(a[i],"/")[[1]][grep("Length",strsplit(a[i],"/")[[1]])],"\\.")[[1]][1],min(y),max(y),'pdf',sep=".") nn = sum(g$ObsLobs) pdf(file.path(project.figuredirectory('bio.lobster'),fn)) plot(u$ff,u$n.yst,lwd=3,xlab='Carapace Length',ylab = 'Scaled Stratified Mean Number',type='h',ylim=c(0,yll)) abline(v=82.5,lty=2,col='red',lwd=3) legend('topleft',bty='n',pch="", legend=c(paste(min(y),max(y),sep="-"),paste('N=',nn,sep=" ")),cex=1.5) dev.off() print(fn) } }
1daaf275cc202275b758b028c2c75dcd620a0e1b
e88ce2c592127610f5879cc4c005f13b84f6dfd2
/platforms/affymetrix.R
f61c7001363873316981bcba0b30e07b4a7321e7
[]
no_license
hfogle/glds_microarrays
e693e40e210cfb62f5d4b351f9c2178c004d39ce
81647bada89a53bc17d502106c079758203af0f0
refs/heads/main
2023-07-18T20:48:47.173342
2021-09-20T16:20:42
2021-09-20T16:20:42
362,676,392
0
1
null
null
null
null
UTF-8
R
false
false
13,534
r
affymetrix.R
cat("\nAffymetrix pipeline selected\n") ### Import Raw Data # for (file in opt$files){ # if (grepl("\\.gz$", file)) { # R.utils::gunzip(filename = file, remove = TRUE) # opt$files<-list.files(file.path(tempin,"00-RawData"))} # } # rm(file) cat("Extracted runsheet files: ",opt$files) str(opt$files) workdir <- opt$out raw <- oligo::read.celfiles(opt$files) cat("\nAffymetrix platform subtype: ",class(raw),"\n") # try({raw <- oligo::read.celfiles(opt$files, pkgname=database)}) #try forcing probe database from options on raw import # if(!exists(raw)){ # raw <- oligo::read.celfiles(opt$files) # } ### Copy Raw Files to output directory dir.create(file.path(workdir,"Processed_Data"), showWarnings = FALSE) dir.create(file.path(workdir,"Processed_Data",opt$glds), showWarnings = FALSE) dir.create(file.path(workdir,"Processed_Data",opt$glds,"00-RawData"), showWarnings = FALSE) file.copy(from = opt$files, to = file.path(workdir,"Processed_Data",opt$glds,"00-RawData"), overwrite = FALSE, recursive = FALSE, copy.mode = FALSE) ### Create Checksum file checksums <- tools::md5sum(opt$files) names(checksums) <- basename(opt$files) write.table(checksums, file.path(workdir,"Processed_Data",opt$glds,"00-RawData","md5sum.txt"),quote = FALSE) ### Generate Raw Data QA HTML Report if(opt$reports == TRUE){ rmarkdown::render("qa_summary_raw.Rmd","html_document", output_file="raw_qa",output_dir=file.path(workdir,"Processed_Data",opt$glds,"00-RawData")) } ### Background Correction and Normalization if (class(raw)=="ExonFeatureSet" || class(raw)=="GeneFeatureSet"){ data <- oligo::rma(raw, target = "core", background=TRUE, normalize=TRUE) data.bgonly <- oligo::rma(raw, target = "core", background=TRUE, normalize=FALSE) cat("RMA background correction and quantile normalization performed with gene level summarization.\n") } if (class(raw)=="ExpressionFeatureSet"){ data <- oligo::rma(raw, normalize = TRUE, background = TRUE) data.bgonly <- oligo::rma(raw, normalize = FALSE, background = TRUE) cat("RMA background correction and quantile normalization performed.\n") } ### Generate Normalized Data QA HTML Report if(opt$reports == TRUE){ rmarkdown::render("qa_summary_normalized.Rmd","html_document", output_file="normalized_qa",output_dir=file.path(workdir,"Processed_Data",opt$glds,"01-NormalizedData")) } ### Write out the expression values dir.create(file.path(workdir,"Processed_Data",opt$glds,"01-NormalizedData"), showWarnings = FALSE) setwd(file.path(workdir,"Processed_Data",opt$glds,"01-NormalizedData")) expression <- data.frame(Biobase::exprs(data)) write.table(expression,"normalized.txt",quote=FALSE, append=FALSE, sep = "\t", col.names=NA) ### Import Probe Data if (length(opt$probe >= 1)){ options(connectionObserver = NULL) database <- sub('\\.annotation.tar.gz$', '', basename(opt$probe)) cat("\nLoading local probe annotation database: ",database,"\n") if(!require(database, character.only=TRUE)) { BiocManager::install(database, ask = FALSE) } install.packages(opt$probe,repos = NULL, verbose = FALSE, quiet = TRUE) library(database, character.only=TRUE) }else { package <- raw@annotation package <- gsub("pd.","",package) package <- gsub(".v1","transcriptcluster",package) package <- gsub("[.]","",package) package <- paste0(package,".db") database <- package cat("\nSearch for package: ",database) if(!require(database, character.only=TRUE)) { BiocManager::install(database, ask = FALSE) } library(database, character.only=TRUE) } keytype<-"PROBEID" keys<-rownames(expression) ### Map assay database annotations annotation <- data.frame(REFSEQ=mapIds(eval(parse(text = database),env=.GlobalEnv),keys = keys,keytype = keytype, column = "REFSEQ",multiVals = "first")) try(annotation$ENSEMBL<-mapIds(eval(parse(text = database),env=.GlobalEnv),keys = keys,keytype = keytype, column = "ENSEMBL",multiVals = "first")) try(annotation$SYMBOL<-mapIds(eval(parse(text = database),env=.GlobalEnv),keys = keys,keytype = keytype, column = "SYMBOL",multiVals = "first")) try(annotation$GENENAME<-mapIds(eval(parse(text = database),env=.GlobalEnv),keys = keys,keytype = keytype, column = "GENENAME",multiVals = "first")) try(annotation$ENTREZID<-mapIds(eval(parse(text = database),env=.GlobalEnv),keys = keys,keytype = keytype, column = "ENTREZID",multiVals = "first")) try(annotation$TAIR<-mapIds(eval(parse(text = database),env=.GlobalEnv),keys = keys,keytype = keytype, column = "TAIR",multiVals = "first")) try(annotation$GOSLIM_IDS<-mapIds(eval(parse(text = database),env=.GlobalEnv),keys = keys,keytype = keytype, column = "GO",multiVals = "first")) ### Map STRING annotations try({ string_db <- STRINGdb::STRINGdb$new( version="11", species=organism_table$taxon[organism_table$species == opt$species],score_threshold=0) string_map<-string_db$map(annotation,"ENTREZID",removeUnmappedRows = TRUE, takeFirst = TRUE) string_cols <-string_map[,c("ENTREZID","STRING_id")] string_cols <- string_cols[!duplicated(string_cols$ENTREZID),] annotation <- dplyr::left_join(annotation,string_cols,by="ENTREZID") rm(string_map,string_db) }) rm(keytype,keys) ### Generate normalized annotated expression text file cat("\nGenerating normalized-annotated.txt file\n") setwd(file.path(workdir,"Processed_Data",opt$glds,"01-NormalizedData")) expression <- cbind(annotation,expression) write.table(expression,"normalized-annotated.txt",quote=FALSE, append = FALSE, row.names = FALSE, sep = "\t") write.table(annotation,"probe_annotations.txt",quote=FALSE, append = FALSE, row.names = FALSE, sep = "\t") ### Rename assay samples with ISAtab sample names index<-sapply(targets$t1$SampleName,function(x){grep(x,sampleNames(data))}) sampleNames(data)<-targets$t1$SampleName[order(index)] ### Sort assay data to be in same order as ISAtab metadata targets$t1 <- targets$t1[order(index),] targets$t2 <- targets$t2[order(index),] targets$t3 <- targets$t3[order(index),] rm(index) ### Annotate the expression set object and save as a file cat("\nGenerating normalized-annotated.rda file\n") setwd(file.path(workdir,"Processed_Data",opt$glds,"01-NormalizedData")) fData(data)<-annotation save(data,file = "normalized-annotated.rda") fData(data.bgonly)<-annotation #save(data.bgonly,file = "uncorrected-annotated.rda") ### Gene level estimation by maximum interquartile range cat("\nPerforming gene level estimation by max interquartile range") data.filt <- data #data.filt$genes <- annotation # annotation_stats$gene_level_features <-dim(data.filt)[1] # annotation_stats$numDupsRemoved <- 0 # annotation_stats$numRemoved.ACCNUM <- 0 # try({data.filt <- genefilter::nsFilter(data, require.entrez=TRUE, # remove.dupEntrez=TRUE, var.func=IQR, # var.cutoff=0.5, var.filter=TRUE, # filterByQuantile=TRUE, feature.exclude="^AFFX") # cat("Probe Aggregation Summary \n") # str(data.filt$filter.log) # filter.log <- data.filt$filter.log # # data.filt <- data.filt$eset # annotation_stats$gene_level_features <-dim(data.filt@featureData@data)[1] # annotation_stats$numDupsRemoved <- filter.log$numDupsRemoved # annotation_stats$numRemoved.ACCNUM <- filter.log$numRemoved.ACCNUM #}) ### Basic linear model fit cat("\nConstructing linear model\n") library(limma) group__ <- factor(targets$t3$Group, levels = unique(targets$t3$Group)) design <- model.matrix(~ 0 + group__) colnames(design)<-gsub("group__","",colnames(design)) #remove design name formatting fit <- lmFit(data.filt, design) if (is.fullrank(design) == FALSE){ cat("The following groups are non estimable:",nonEstimable(design)) } fit.groups <- colnames(fit$design)[which(fit$assign == 1)] fit.index <- which(levels(group__) %in% fit.groups) fit.group.names <- unique(targets$t2$Group) # ### Remove low expression probes # cat("\nRemoving low expression probes for DGE\n") # CutOff <- quantile(as.matrix(data),probs=.33) # # hist_res <- graphics::hist(as.matrix(data.filt), 100, col = "cornsilk", freq = FALSE, # main = "Probe Filtering Intensity Cutoff", # border = "antiquewhite4", # xlab = "Median intensities") # # abline(v = CutOff, col = "coral4", lwd = 2) # keep <- fit$Amean > CutOff # fit <- fit[keep,] # filter out probes below cutoff expression level # annotation_stats$expressed_genes <- dim(fit$genes)[1] # path <- file.path(workdir,"Processed_Data",opt$glds,"01-NormalizedData","QC_Repports") # setwd(path) # # rm(hist_res,keep,CutOff,path) ### Create Contrast Model cat("\nCreating contrast model\n") combos<-combn(fit.groups,2) # generate matrix of pairwise group combinations for comparison combos.names<-combn(fit.group.names,2) contrasts<-c(paste(combos[1,],combos[2,],sep = "-"),paste(combos[2,],combos[1,],sep = "-")) # format combinations for limma:makeContrasts contrast.names <-c(paste(combos.names[1,],combos.names[2,],sep = "v"),paste(combos.names[2,],combos.names[1,],sep = "v")) # format combinations for output table file names cont.matrix <- makeContrasts(contrasts = contrasts,levels=design) contrast.fit <- contrasts.fit(fit, cont.matrix) contrast.fit <- eBayes(contrast.fit) results<-decideTests(contrast.fit, method = "separate", adjust.method = "BH", p.value = 0.05, lfc = 0.5) # FDR .05 # try({ # colnames(results@.Data) <- contrast.names # summary <- as.data.frame(summary(results)) # summary <- summary[,c(2,1,3)] # colnames(summary)<-c("CONTRAST","REGULATION","GENE COUNT SIG") # DT::datatable(summary, caption = "Summary of Differentially Regulated Genes (P<=05)") # }) rm(combos,combos.names,cont.matrix) ### Construct DGE Output Tables cat("Building DGE tables\n") dir.create(file.path(workdir,"Processed_Data",opt$glds,"02-Limma_DGE"), showWarnings = FALSE) setwd(file.path(workdir,"Processed_Data",opt$glds,"02-Limma_DGE")) output_table <- fit$genes reduced_output_table <- fit$genes cat("\nDim of fit$genes: ",dim(output_table),"\n") #try(expr <- as.data.frame(data.filt$E[keep,])) #try(expr <- as.data.frame(data.filt@assayData$exprs[rownames(data.filt@assayData$exprs) %in% rownames(fit$genes),])) expr <- as.data.frame(data.filt@assayData$exprs) cat("\nDim of expr: ",dim(expr),"\n") cat("\nDim of data.filt.exprs: ",dim(data.filt@assayData$exprs),"\n") output_table <- cbind(output_table,expr) reduced_output_table <- cbind(reduced_output_table,expr) # add all sample mean column output_table$All.mean <- fit$Amean reduced_output_table$All.mean <- fit$Amean # add all sample stdev column output_table$All.stdev <- contrast.fit$s2.post reduced_output_table$All.stdev <- contrast.fit$s2.post # add F statistic p-value (similar to ANOVA p-value) column output_table$F.p.value <- contrast.fit$F.p.value reduced_output_table$F.p.value <- contrast.fit$F.p.value uu<- unique(targets$t2$Group) # Add group mean values group_means<-fit$coefficients colnames(group_means)<-paste0("Group.Mean_",uu) output_table<-cbind(output_table,group_means) reduced_output_table<-cbind(reduced_output_table,group_means) rm(group_means) # add group stdev columns group_stdev<-fit$stdev.unscaled * fit$coefficients colnames(group_stdev)<-paste0("Group.Stdev_",uu) output_table<-cbind(output_table,group_stdev) reduced_output_table<-cbind(reduced_output_table,group_stdev) rm(group_stdev) # iterate through contrasts for (i in 1:length(contrasts)){ top <- topTable(contrast.fit, coef = i, number = Inf, genelist = contrast.fit$genes$ID, adjust.method = "BH", sort.by = "none") table <- top[,c(1,4,5)] # Pull columns for Log2fc, P.value, Adj.p.value colnames(table)<- c("Log2fc","P.value","Adj.p.value") table.reduced <- table table$Updown <- sign(top$logFC) table$Sig.1 <- top$adj.P.Val<=0.1 table$Sig.05 <- top$adj.P.Val<=0.05 table$Log2_P.value <- log2(top$P.Value) # For volcano plot table$Log2_Adj.p.value <- log2(top$adj.P.Val) # For volcano plot colnames(table.reduced)<-paste(colnames(table.reduced),contrast.names[i],sep = "_") colnames(table)<-paste(colnames(table),contrast.names[i],sep = "_") output_table<-cbind(output_table,table) reduced_output_table<-cbind(reduced_output_table,table.reduced) } rm(i,top,table,table.reduced) ### Export DGE Output Data Tables setwd(file.path(workdir,"Processed_Data",opt$glds,"02-Limma_DGE")) write.csv(reduced_output_table,"differential_expression.csv", row.names = FALSE) write.csv(output_table,"visualization_output_table.csv", row.names = FALSE) contrast.output <- contrast.fit$contrasts row.names(contrast.output)<-uu contrast.order <- order(match(contrasts,colnames(contrast.fit$contrasts))) colnames(contrast.output)<-contrast.names write.csv(contrast.output,"contrasts.csv") rm (uu,group__,fit.index,fit.groups,fit.group.names,contrasts,contrast.names) ### Export Metadata files dir.create(file.path(workdir,"Processed_Data",opt$glds,"Metadata"), showWarnings = FALSE) path<-file.path(workdir,"Processed_Data",opt$glds,"Metadata") setwd(path) file.copy(from = opt$isa, to = file.path(path,basename(opt$isa)),overwrite = FALSE, recursive = FALSE, copy.mode = FALSE) try(file.copy(from = Sys.glob(opt$probe), to = file.path(path,basename(opt$probe)),overwrite = FALSE, recursive = FALSE, copy.mode = FALSE)) file.copy(from = opt$runsheet, to = file.path(path,basename(opt$runsheet)), overwrite = FALSE, recursive = FALSE, copy.mode = FALSE) rm(path) cat("All data files have been written to: ",file.path(workdir,"Processed_Data",opt$glds))
6fdac0c6ba96cd4570a1fb334cf7038d0055df47
a62e487b9fbd390a38519750d5054b042527c92e
/R/KH.Algorithm.R
dd24cff338c15394956aaa3fcd56d0e86292a4e2
[]
no_license
BimaAdi/MetaOpt2
dcfd5a62184ff8bcf1ab0e82371bd82eca3c4a62
640c7f3bb0e6c03fc20143b7064f06257f8f6b7e
refs/heads/master
2020-04-20T14:36:46.807556
2019-03-27T04:46:38
2019-03-27T04:46:38
168,904,438
1
0
null
null
null
null
UTF-8
R
false
false
8,122
r
KH.Algorithm.R
# Krill-Heard Algorithm(KH) KH <- function(FUN, optimType="MIN", numVar, numPopulation=40, maxIter=500, rangeVar, maxMotionInduced=0.01, inertiaWeightOfMotionInduced=0.01, epsilon=1e-05, foragingSpeed=0.02, inertiaWeightOfForagingSpeed=0.01, maxDifussionSpeed=0.01, constantSpace=1, mu=0.1){ # Validation if(numPopulation < 1){ stop("numPopulation must greater than 0") } if(maxIter < 0){ stop("maxIter must greater than or equal to 0") } if(inertiaWeightOfMotionInduced < 0 | inertiaWeightOfMotionInduced > 1){ stop("inertiaWeightOfMotionInduced must between 0 and 1") } if(inertiaWeightOfForagingSpeed < 0 | inertiaWeightOfForagingSpeed > 1){ stop("inertiaWeightOfForagingSpeed must between 0 and 1") } if(maxMotionInduced < 0 | maxMotionInduced > 1){ stop("maxMotionInduced must between 0 and 1") } if(foragingSpeed < 0 | foragingSpeed > 1){ stop("foragingSpeed must between 0 and 1") } if(maxDifussionSpeed < 0 | maxDifussionSpeed > 1){ stop("maxDifussionSpeed must between 0 and 1") } if(constantSpace < 0 | constantSpace > 2){ stop("constantSpace must between 0 and 2") } if(mu < 0 | mu > 1){ stop("mu must between 0 and 1") } # calculate the dimension of problem if not specified by user dimension <- ncol(rangeVar) # parsing rangeVar to lowerBound and upperBound lowerBound <- rangeVar[1,] upperBound <- rangeVar[2,] # if user define the same upper bound and lower bound for each dimension if(dimension==1){ dimension <- numVar } ## convert optimType to numerical form ## 1 for minimization and -1 for maximization if(optimType == "MAX") optimType <- -1 else optimType <- 1 # if user only define one lb and ub, then repeat it until the dimension if(length(lowerBound)==1 & length(upperBound)==1){ lowerBound <- rep(lowerBound, dimension) upperBound <- rep(upperBound, dimension) } # generate candidate solution candidateSolution <- generateRandom(numPopulation, dimension, lowerBound, upperBound) bestPos <- engineKH(FUN, optimType, maxIter, lowerBound, upperBound, candidateSolution, maxMotionInduced, inertiaWeightOfMotionInduced, epsilon, foragingSpeed, inertiaWeightOfForagingSpeed, maxDifussionSpeed, constantSpace, mu) return(bestPos) } engineKH <- function(FUN, optimType, maxIter, lowerBound, upperBound, candidateSolution, maxMotionInduced, inertiaWeightOfMotionInduced, epsilon, foragingSpeed, inertiaWeightOfForagingSpeed, maxDifussionSpeed, constantSpace, mu){ numVar <- ncol(candidateSolution) numPopulation <- nrow(candidateSolution) N <- matrix(rep(0, numPopulation * numVar), ncol = numVar) f <- matrix(rep(0, numPopulation * numVar), ncol = numVar) gbest <- calcBest(FUN, -1*optimType, candidateSolution) progressbar <- txtProgressBar(min = 0, max = maxIter, style = 3) for(t in 1:maxIter){ CSFitness <- calcFitness(FUN, optimType, candidateSolution) best <- candidateSolution[order(CSFitness)[1], ] worst <- candidateSolution[order(CSFitness)[numPopulation], ] bestFitness <- calcFitness(FUN, optimType, matrix(best, ncol = numVar)) worstFitness <- calcFitness(FUN, optimType, matrix(worst, ncol = numVar)) gbest <- calcBest(FUN, -1*optimType, rbind(candidateSolution, gbest)) # motion iduced sensingDistance <- 1/5*ncol(candidateSolution)*colSums(as.matrix(dist(candidateSolution))) isIncludeSD <- as.matrix(dist(candidateSolution, diag = T, upper = T)) < sensingDistance alpha <- c() for(index in 1:numPopulation){ X <- apply(as.matrix(candidateSolution[isIncludeSD[index,],]), c(1), function(x, y){ xijKH(y, x, epsilon) }, y=candidateSolution[index,]) K <- sapply(CSFitness[isIncludeSD[index,]], function(x, y){ kijKH(y,x, bestFitness, worstFitness) }, y=CSFitness[index]) if(numVar == 1){ alphaLocal <- sum(X * K) }else{ alphaLocal <- colSums(t(X) * K) } Cbest <- 2*(runif(1)+t/maxIter) X <- xijKH(candidateSolution[index,], best, epsilon) K <- kijKH(CSFitness[index], bestFitness, bestFitness, worstFitness) alphaTarget <- Cbest*K*X alpha <- rbind(alpha, alphaLocal + alphaTarget) } N <- maxMotionInduced * alpha + inertiaWeightOfMotionInduced * N # foraging motion if(numVar == 1){ Xfood <- sum(candidateSolution * 1 / CSFitness) / sum(1/CSFitness) if(is.nan(Xfood)){ Xfood <- 0 } }else{ Xfood <- colSums(candidateSolution * 1 / CSFitness) / sum(1/CSFitness) } xFoodFitness <- calcFitness(FUN, optimType, matrix(Xfood, ncol = numVar)) Cfood <- 2*(1-t/maxIter) Kifood <- sapply(CSFitness, function(x){ kijKH(x, xFoodFitness, bestFitness, worstFitness) }) Xifood <- apply(candidateSolution, c(1), function(x){ xijKH(x, Xfood, epsilon) }) Kibest <- sapply(CSFitness, function(x){ kijKH(x, bestFitness, bestFitness, worstFitness) }) Xibest <- apply(candidateSolution, c(1), function(x){ xijKH(x, best, epsilon) }) if(numVar == 1){ betaFood <- Cfood*Kifood*Xifood betaBest <- Xibest * Kibest }else{ betaFood <- t(Cfood*Kifood*Xifood) betaBest <- t(Xibest) * Kibest } beta <- betaFood + betaBest f <- foragingSpeed*beta + inertiaWeightOfForagingSpeed*f # physical difussion D <- maxDifussionSpeed * (1 - t/maxIter)*runif(1, min = -1, max = 1) # Motion calculation TotalMotion <- N + f + D deltaT <- constantSpace*sum(upperBound - lowerBound) candidateSolution <- candidateSolution + deltaT * TotalMotion # implement genetic operator ---- # CrossOver CSFitness <- calcFitness(FUN, optimType, candidateSolution) best <- candidateSolution[order(CSFitness)[1], ] worst <- candidateSolution[order(CSFitness)[numPopulation], ] bestFitness <- calcFitness(FUN, optimType, matrix(best, ncol = numVar)) worstFitness <- calcFitness(FUN, optimType, matrix(worst, ncol = numVar)) gbest <- calcBest(FUN, -1*optimType, rbind(candidateSolution, gbest)) Kibest <- sapply(CSFitness, function(x){ kijKH(x, bestFitness, bestFitness, worstFitness) }) randomMatrix <- matrix(runif(numVar * numPopulation), ncol = numVar) Cr <- 0.2 * Kibest prob <- randomMatrix < Cr if(!all(prob == FALSE)){ Xrm <- sapply(col(candidateSolution)[prob], function(x){ choosen <- sample.int(numPopulation, 1) return(candidateSolution[choosen, x]) }) candidateSolution[prob] <- Xrm } # Mutation CSFitness <- calcFitness(FUN, optimType, candidateSolution) best <- candidateSolution[order(CSFitness)[1], ] worst <- candidateSolution[order(CSFitness)[numPopulation], ] bestFitness <- calcFitness(FUN, optimType, matrix(best, ncol = numVar)) worstFitness <- calcFitness(FUN, optimType, matrix(worst, ncol = numVar)) gbest <- calcBest(FUN, -1*optimType, rbind(candidateSolution, gbest)) Kibest <- sapply(CSFitness, function(x){ kijKH(x, bestFitness, bestFitness, worstFitness) }) randomMatrix <- matrix(runif(numVar * numPopulation), ncol = numVar) Mu <- 0.05 * Kibest prob <- randomMatrix < Mu if(!all(prob == FALSE)){ Xgbest <- sapply(col(candidateSolution)[prob], function(x){ P <- sample.int(numPopulation, 1) Q <- sample.int(numPopulation, 1) return(gbest[x] + mu * (candidateSolution[P, x] - candidateSolution[Q, x])) }) candidateSolution[prob] <- Xgbest } setTxtProgressBar(progressbar, t) } close(progressbar) gbest <- calcBest(FUN, -1*optimType, rbind(candidateSolution, gbest)) return(gbest) } xijKH <- function(i, j, epsilon){ (j - i)/(dist(rbind(j, i)) + epsilon) } kijKH <- function(i, j, best, worst){ if(worst == best){ 0 }else{ (i - j)/(worst - best) } }
40b595b8b6861d4be0fadf804121fe838b4717d5
e5bb0581bf88dfd4ff986dd760571c1528ea72a7
/Shiny/Functions/filters.R
45c1ff02bd20b28a7897971d650926549b69b2d9
[]
no_license
deslion/EyeTrackingProject
6b9805a8361cf24268c2841acd7749abec3ad73f
246771b25c8bc3d0e981219142545b586c85adf1
refs/heads/master
2021-01-15T20:53:25.696030
2016-03-29T13:42:52
2016-03-29T13:42:52
52,740,006
0
1
null
2016-02-28T19:47:43
2016-02-28T19:47:43
null
UTF-8
R
false
false
6,911
r
filters.R
createFilter <- function(id, name, description, fun, settings) { filter <- new(Class = "FilterEventDetector", id = id, name = name, fun = fun, description = description, settings = settings) return(filter) } # noFilter INDEPENDENT OF ETRAN CLASSES noFilter <- function(t,x,y,settings) { okMarker <- 1; gapMarker <- 2; artMarker <- 3 markers <- rep(okMarker, length(t)) groups <- rep(1, length(t)) res <- list(t = t, x = x, y = y, eventMarkers = markers, eventGroups = groups) return(res) } # standardFilter INDEPENDENT OF ETRAN CLASSES standardFilter <- function(t, x, y, settings) { okMarker <- 1; gapMarker <- 2; artMarker <- 3 screenRes <- settings$screenResolution interpolate <- settings$interpolate markers1 <- ifelse(x == 0 & y == 0, gapMarker, okMarker) if (!is.na(screenRes)[1]) { markers2 <- ifelse(x > screenRes[1] | y > screenRes[2], artMarker, okMarker) markers1[which(markers1 == okMarker)] <- markers2[which(markers1 == okMarker)] } markers <- markers1 if (interpolate) { gapMarkers <- ifelse(markers != okMarker, gapMarker, okMarker) gapMarks <- data.frame(firstGap = gapMarkers[-length(gapMarkers)], secondGap = gapMarkers[-1]) transitions <- apply(gapMarks, MARGIN = 1, function(x) {if (x[2] != x[1]) {1} else {0}}) group <- c(1,cumsum(transitions)+1) data <- data.frame(t, x, y, group) lastGroup <- group[length(group)] gapGroups <- unique(group[which(gapMarkers == gapMarker)]) if (length(gapGroups) != 0) { data2 <- lapply(gapGroups, FUN = function(x) { if (x == 1) { smpCnt <- nrow(data[data$group == x,]) samplesAfterGap <- data[data$group == x + 1,] firstSampleAfterGap <- samplesAfterGap[1,] newX <- rep(firstSampleAfterGap$x, smpCnt) newY <- rep(firstSampleAfterGap$y, smpCnt) } if (x == lastGroup) { smpCnt <- nrow(data[data$group == x,]) samplesBeforeGap <- data[data$group == x - 1,] lastSampleBeforeGap <- samplesBeforeGap[nrow(samplesBeforeGap),] newX <- rep(lastSampleBeforeGap$x, smpCnt) newY <- rep(lastSampleBeforeGap$y, smpCnt) } if (x != 1 && x != lastGroup) { samplesAfterGap <- data[data$group == x + 1,] firstSampleAfterGap <- samplesAfterGap[1,] samplesBeforeGap <- data[data$group == x - 1,] lastSampleBeforeGap <- samplesBeforeGap[nrow(samplesBeforeGap),] ts <- data[data$group == x,1] ts2 <- c(lastSampleBeforeGap$t, ts, firstSampleAfterGap$t) dts <- (ts2[-1]-ts2[-length(ts2)])/(firstSampleAfterGap$t-lastSampleBeforeGap$t) dPos <- list(dx = firstSampleAfterGap$x-lastSampleBeforeGap$x, dy = firstSampleAfterGap$y-lastSampleBeforeGap$y) newX <- lastSampleBeforeGap$x+dPos$dx*cumsum(dts)[1:length(ts)] newY <- lastSampleBeforeGap$y+dPos$dy*cumsum(dts)[1:length(ts)] } data[data$group == x,2] <- newX data[data$group == x,3] <- newY } ) filteredData <- do.call("rbind", data2) data[rownames(filteredData),] <- filteredData t <- data$t; x <- data$x; y <- data$y } } markersDF <- cbind(markers[-length(markers)], markers[-1]) transitions <- apply(markersDF, MARGIN = 1, function(x) {if (x[2] != x[1]) {1} else {0}}) groups <- c(1,cumsum(transitions)+1) res <- list(t = t, x = x, y = y, eventMarkers = markers, eventGroups = groups) return(res) } ## CORE FILTER ## # data filter: finds ok, gap (0,0-s) and artifact samples # and marks them according to markersDefinition located inside settings list coreFilter <- function(DataRecord, settings) { interpolate <- ifelse(length(settings$interpolate) != 0, settings$interpolate, F) filter <- settings$subfun filterID <- settings$filterID t <- DataRecord@eyesDataObject@time@time if (DataRecord@eyesDataObject@conditions@conditions$eye == "left") { leftX <- DataRecord@eyesDataObject@leftEyeSamples@eyeData$porx leftY <- DataRecord@eyesDataObject@leftEyeSamples@eyeData$pory res <- filter(t, leftX, leftY, settings) filterEventMarkers <- new(Class = "FilterEventMarkers", markers = res$eventMarkers, groups = res$eventGroups, eventClass = "FilterEvent") if (interpolate) { DataRecord@eyesDataObject@leftEyeSamples@eyeData$porx <- res$x DataRecord@eyesDataObject@leftEyeSamples@eyeData$pory <- res$y } DataRecord@eyesDataObject@leftEventsMarkers$filterMarkers <- filterEventMarkers } if (DataRecord@eyesDataObject@conditions@conditions$eye == "right") { rightX <- DataRecord@eyesDataObject@rightEyeSamples@eyeData$porx rightY <- DataRecord@eyesDataObject@rightEyeSamples@eyeData$pory res <- filter(t, rightX, rightY, settings) if (interpolate) { DataRecord@eyesDataObject@rightEyeSamples@eyeData$porx <- res$x DataRecord@eyesDataObject@rightEyeSamples@eyeData$pory <- res$y } filterEventMarkers <- new(Class = "FilterEventMarkers", markers = res$eventMarkers, groups = res$eventGroups, eventClass = "FilterEvent") DataRecord@eyesDataObject@rightEventsMarkers$filterMarkers <- filterEventMarkers } if (DataRecord@eyesDataObject@conditions@conditions$eye == "both") { leftX <- DataRecord@eyesDataObject@leftEyeSamples@eyeData$porx leftY <- DataRecord@eyesDataObject@leftEyeSamples@eyeData$pory rightX <- DataRecord@eyesDataObject@rightEyeSamples@eyeData$porx rightY <- DataRecord@eyesDataObject@rightEyeSamples@eyeData$pory resLeft <- filter(t, leftX, leftY, settings) resRight <- filter(t, rightX, rightY, settings) if (interpolate) { DataRecord@eyesDataObject@leftEyeSamples@eyeData$porx <- resLeft$x DataRecord@eyesDataObject@leftEyeSamples@eyeData$port <- resLeft$y DataRecord@eyesDataObject@rightEyeSamples@eyeData$porx <- resRight$x DataRecord@eyesDataObject@rightEyeSamples@eyeData$pory <- resRight$y } leftFilterEventMarkers <- new(Class = "FilterEventMarkers", markers = resLeft$eventMarkers, groups = resLeft$eventGroups, eventClass = "FilterEvent") rightFilterEventMarkers <- new(Class = "FilterEventMarkers", markers = resRight$eventMarkers, groups = resRight$eventGroups, eventClass = "FilterEvent") DataRecord@eyesDataObject@leftEventsMarkers$filterMarkers <- leftFilterEventMarkers DataRecord@eyesDataObject@leftEventsMarkers$filterMarkers <- rightFilterEventMarkers } return(DataRecord) }
fd6858be1d3fa470d6844363ddf66725fb0f943a
d51f182411fb25af96960abb1959826992609654
/plot3.R
eb96347efbc40c96fb095da70c37f5a98fd6857a
[]
no_license
mihir080/Coursera-_Exploratory-Data-Analysis
600a65b59852d4cbe374d23ac72cb57a3142041a
38441b1cd63d48af19f4ed8b0ab943f37f775f27
refs/heads/master
2022-07-24T23:32:56.633494
2020-05-22T15:29:17
2020-05-22T15:29:17
266,133,323
0
0
null
null
null
null
UTF-8
R
false
false
925
r
plot3.R
##Reading the data into R power <- read.table("consumption.txt", header = TRUE, sep = ";") ## subsetting the data required power2 <- power[as.character(power$Date) %in% c("1/2/2007", "2/2/2007"), ] ## Join the date and time variables of the data power2$datetime <- paste(power2$Date, power2$Time) ## converting date time to required format power2$datetime <- strptime(power2$datetime, "%d/%m/%Y %H:%M:%S") attach(power2) ##opening required png file png("plot3.png", width = 480, height = 480, units = "px") plot(datetime, as.numeric(as.character(Sub_metering_1)), type = "l", xlab = "", ylab = "Energy sub metering", ylim = c(0,40)) lines(datetime, as.numeric(as.character(Sub_metering_2)), col = "red") lines(datetime, as.numeric(as.character(Sub_metering_3)), col = "blue") legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) dev.off()
b9e583b66ddca957c205ff231ac0d86a0d4622cd
4f1672ab0833585fd3c29917a267850439d1dc66
/TimeSeriesAnalysis/FunctionFitting_GWAS_SimpleGP.R
3e8234598f8834b715203deee30736a204b17af3
[]
no_license
ter56/Cornell_SMB_MKK3andTimeSeriesAnalysis
69de38a8c0a9f4fdd7c8e054d0ed09103a6e7da4
19260c2fe7a7f4faa44c5f566b1797c439c91ff1
refs/heads/main
2023-08-29T03:14:01.893885
2021-10-25T19:09:22
2021-10-25T19:09:22
373,516,268
0
0
null
null
null
null
UTF-8
R
false
false
106,552
r
FunctionFitting_GWAS_SimpleGP.R
library(Hmisc);library(drc);library(ggplot2);library(readxl);library(reshape2); library(patchwork);library(rrBLUP);library(plyr);library(dplyr); library(knitr);library(tidyr); library(fda) ; library(magic) library(patchwork); library(knitr);library(stringr);library(LDcorSV) library(ggtext);library(ggh4x) library(GAPIT3) setwd(rprojroot::find_rstudio_root_file()) source('TimeSeriesAnalysis/Functions/FPCA_function.R') source('TimeSeriesAnalysis/Functions/pca_fun.R') source('TimeSeriesAnalysis/Functions/pca_score.R') source('TimeSeriesAnalysis/Functions/tuning_nointer.R') DF_OperationsV2 = function(dataframe){ dataframe = dataframe[order(dataframe$P.value),] dataframe$logPercentileQQplot = -log10(c(1:length(dataframe$SNP))/length(dataframe$SNP)) dataframe$rank = c(1:length(dataframe$SNP)) dataframe$FDRPval = length(dataframe$SNP)*dataframe$P.value/dataframe$rank dataframe = dataframe[order(dataframe$Chromosome, as.numeric(dataframe$Position)),] dataframe$log10PVal = -log10(dataframe$P.value) dataframe$ordinal = c(1:length(dataframe$SNP)) return(dataframe) } #Operations to be performed on GAPIToutput$GWAS so that homeMadeManhattanLDPruned works TableOutput = function(GWAS.sum.dataframe, n = 20){ GWAS.sum.dataframe[order(GWAS.sum.dataframe$rank),c('SNP','Chromosome','Position','P.value','maf','FDRPval')][1:n,] %>% mutate(maf = as.numeric(as.character(maf))) %>% kable(digits = c(0,1,0,8,2,6), align = 'lcccrr')} ld_heatmap=function(df, markerList){ ld <- as.matrix(round(df,0)) if(c(-1,3,4) %in% ld){ ld[which(ld==3)]=2 ld[which(ld==4)]=2 ld[which(ld== -1)]=0 } LD <- LD.Measures(donnees=ld, na.presence=F) #LD$loc1=as.character(LD$loc1); LD$loc2=as.character(LD$loc2) r2 <- matrix(0, nrow=ncol(df), ncol=ncol(df)) r2[lower.tri(r2, diag=FALSE)] <- LD$r2 r2 <- t(r2) r2 <- as.data.frame(round(r2, 5)) diag(r2) <- 1 r2[lower.tri(r2)] = NA rownames(r2)=colnames(df); colnames(r2)=rownames(r2) r_2=melt(as.matrix(r2), na.rm=T) r_2 = r_2 %>% mutate(ChrPos = mapvalues(as.character(Var1), from = as.character(markerList$SNP), to = paste0(markerList$Chromosome,':', str_pad(as.character(markerList$Position),pad = "0",width = 9,side = 'left')))) graphic = ggplot(r_2, aes(Var2, ChrPos, fill = value))+ geom_tile(color = "white")+ scale_fill_gradient2(low = "blue", high = "red", mid = "white", midpoint = 0.5, limit = c(0,1), space = "Lab", name="r2") + theme_classic() #+ ggtitle(paste("LD r2 from",colnames(r2)[1],"-", colnames(r2)[length(colnames(r2))], sep=" " )) return(graphic) } # Loading the BLUES from all lines and GAPIT functions ##### setwd(rprojroot::find_rstudio_root_file()) load('PhenotypeData/ProcessedData/2020/GGS2020_BLUE_summary_allTP.RData') load('PhenotypeData/ProcessedData/2019and2020Combined/PM111_GE3_1920_BLUE.RData') load('PhenotypeData/ProcessedData/2019and2020Combined/PM47_GE3_1920_BLUE.RData') load('PhenotypeData/ProcessedData/2019and2020Combined/PM6_GE3_1920_BLUE.RData') load('GenotypeData/myGD_LDpruned_w_KASP.RData') load('GenotypeData/myGM_LDpruned_w_KASP.RData') # Bind BLUEs Together and change results that are above 1 to 1 as noted in script below##### All.BluesGE31920 = all_BLUE %>% mutate(time = PM_date-5) %>% filter(TP %in% c('TP2','TP3','TP5', 'TP7')) %>% select(taxa, GE3, TP, time) %>% rbind(.,PM111_GE3_1920_BLUE %>% mutate(TP = 'TP6', time = 105), PM47_GE3_1920_BLUE %>% mutate(TP = 'TP4', time = 42), PM6_GE3_1920_BLUE %>% mutate(TP = 'TP1', time = 0)) %>% arrange(taxa, TP) %>% mutate(GE3 = ifelse(GE3>0,ifelse(GE3<1,GE3,1),0)) All.BluesGE31920 %>% group_by(taxa) %>% dplyr::tally() %>% select(n) %>%table() GE3GT4Obs = All.BluesGE31920 %>% group_by(taxa) %>% dplyr::tally() %>% filter(n>4) %>% select(taxa) %>% mutate(taxa = unique(taxa)) %>% ungroup() # Fit normal logistic models with 3 parameters for all the lines that we can #### # Three parameter logistic function is used as upper bound is fixed - ie at 100% germ: GE3_3P_logFits = All.BluesGE31920 %>% filter(taxa %in% GE3GT4Obs$taxa & taxa %nin% c('Chevallier_D10',"G_31_5","P_16_1",'P_29_5')) %>% arrange(taxa, TP) %>% group_by(taxa) %>% group_modify(~ broom::tidy(drm(GE3~time, data = .x, fct=LL.4(fixed = c(NA, NA, 1, NA), names = c('Rate','Lower','Upper','Centering'))))) %>% do(add_row(., taxa = .$taxa[1],term = 'TimeTo90',curve ='Derived', estimate = as.double(exp(log((1-.[2,4])/(0.90-.[2,4])-1)/.[1,4]+log(.[3,4]))))%>% add_row(., taxa = .$taxa[1],term = 'TimeTo95',curve ='Derived', estimate = as.double(exp(log((1-.[2,4])/(0.95-.[2,4])-1)/.[1,4]+log(.[3,4])))) %>% add_row(., taxa = .$taxa[1],term = 'rTimeTo95',curve ='Derived', estimate = as.double(.[3,4]*exp(log((1-0.95)/0.95)/.[1,4]))) %>% add_row(., taxa = .$taxa[1],term = 'rTimeTo90',curve ='Derived', estimate = as.double(.[3,4]*exp(log((1-0.90)/0.90)/.[1,4]))) ) %>% ungroup() %>% mutate(estimate = as.numeric(ifelse(is.nan(estimate),0, estimate))) GE3_3P_logFits %>% ggplot(aes( x= estimate))+geom_histogram()+facet_wrap(vars(term), scales = 'free') GE3logTaxatoFilter = GE3_3P_logFits %>% filter(term == 'TimeTo90' & estimate >165 | term == 'Centering' & estimate>150 | (term == 'Rate' & (estimate > 0 | estimate < -13))) GE3_3P_logFits %>% group_by(term) %>% summarize(mean = mean(estimate), stand.dev = sd(estimate), median = median(estimate)) GE3_3P_logFits %>% filter(taxa %nin% GE3logTaxatoFilter$taxa) %>%group_by(term) %>% summarize(mean = mean(estimate), stand.dev = sd(estimate), median = median(estimate), max = max(estimate), min = min(estimate)) unique(GE3logTaxatoFilter$taxa) #filters these lines, as well as the problematic lines above: 27 lines removed. c('Chevallier_D10',"G_31_5","P_16_1",'P_29_5') GE3_3P_logFits %>% filter(taxa %nin% GE3logTaxatoFilter$taxa) %>% ggplot(aes( x= estimate))+geom_histogram()+facet_wrap(vars(term), scales = 'free') GE3fittedCurves = data.frame() for (i in unique(GE3_3P_logFits$taxa)){ time = seq(0,150,3) tmp = GE3_3P_logFits %>% filter(taxa ==i) y = tmp$estimate[2]+(1-tmp$estimate[2])/(1+exp(tmp$estimate[1]*(log(time)-log(tmp$estimate[3])))) GE3fittedCurves = rbind(GE3fittedCurves, data.frame(taxa = i, time = time, GE3estimate = y)) } FacetsParams = GE3_3P_logFits %>% filter(taxa %nin% GE3logTaxatoFilter$taxa) %>% ggplot(aes( x= estimate))+geom_histogram()+facet_wrap(vars(term), scales = 'free') GE3_3P_logFits %>% filter(taxa %nin% GE3logTaxatoFilter$taxa) %>% group_by(taxa) %>% #can we color based on the time? ggplot(aes( x= estimate))+geom_histogram()+facet_wrap(vars(term), scales = 'free') #Now we have our variables - lets run the MLMM models on them and see if we need to filter things out. GE3_3P_logFits.W = GE3_3P_logFits %>% filter(taxa %nin% GE3logTaxatoFilter$taxa) %>% select(term, taxa, estimate) %>% pivot_wider(names_from = 'term', values_from = 'estimate')%>% mutate(rTimeTo90 = ifelse(rTimeTo90>200,NA,rTimeTo90), rTimeTo95 = ifelse(rTimeTo95>250,NA,rTimeTo95)) dim(GE3_3P_logFits.W) #n = 465 panel.cor <- function(x, y){usr <- par("usr"); on.exit(par(usr)) par(usr = c(0, 1, 0, 1)) r <- round(cor(x, y, use = 'complete.obs'), digits=2) txt <- paste0("R = ", r) cex.cor <- 0.8/strwidth(txt) text(0.5, 0.5, txt, cex = 2) } ; upper.panel<-function(x, y){ points(x,y, pch = 19) }# Customize upper panel pairs(GE3_3P_logFits.W[,2:8], #just another way to look at the data. lower.panel = upper.panel, upper.panel = panel.cor) # GE3 Logistic GWAS #### GE3Lower.mlmm = GAPIT(Y = as.data.frame(GE3_3P_logFits.W[,c('taxa','Lower')]),GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GE3Lower.mlmm.s = DF_OperationsV2(GE3Lower.mlmm$GWAS) GE3Rate.mlmm = GAPIT(Y = as.data.frame(GE3_3P_logFits.W[,c('taxa','Rate')]),GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GE3Rate.mlmm.s = DF_OperationsV2(GE3Rate.mlmm$GWAS) GE3Center.mlmm = GAPIT(Y = as.data.frame(GE3_3P_logFits.W[,c('taxa','Centering')]),GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GE3Center.mlmm.s = DF_OperationsV2(GE3Center.mlmm$GWAS) GE3Timeto90.mlmm = GAPIT(Y = as.data.frame(GE3_3P_logFits.W[,c('taxa','TimeTo90')]),GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GE3Timeto90.mlmm.s = DF_OperationsV2(GE3Timeto90.mlmm$GWAS) GE3Timeto95.mlmm = GAPIT(Y = as.data.frame(GE3_3P_logFits.W[,c('taxa','TimeTo95')]),GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GE3Timeto95.mlmm.s = DF_OperationsV2(GE3Timeto95.mlmm$GWAS) GE3rTimeto90.mlmm = GE3_3P_logFits.W %>% filter(rTimeTo90 < 200) %>% select(taxa,rTimeTo90)%>%data.frame()%>% GAPIT(Y = .,GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GE3rTimeto90.mlmm.s = DF_OperationsV2(GE3rTimeto90.mlmm$GWAS) GE3rTimeto95.mlmm = GE3_3P_logFits.W %>% filter(rTimeTo95 < 250) %>% select(taxa,rTimeTo95)%>%data.frame()%>% GAPIT(Y =.,GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GE3rTimeto95.mlmm.s = DF_OperationsV2(GE3rTimeto95.mlmm$GWAS) GE3LogisticFits1920.GWAS.S = list(GE3Rate = GE3Rate.mlmm.s, GE3Lower = GE3Lower.mlmm.s, GE3Center=GE3Center.mlmm.s, GE3Timeto90 =GE3Timeto90.mlmm.s, GE3Timeto95= GE3Timeto95.mlmm.s, GE3rTimeto90= GE3rTimeto90.mlmm.s, GE3rTimeto95 =GE3rTimeto95.mlmm.s) # get markers GE3 logistic GWAS ##### top_n_markers = function(GWAS.sum.df, trait, ModelingType, n = 40){ GWAS.sum.df %>% arrange(P.value) %>% filter(maf > 0.06) %>% slice_head(n = n) %>% mutate(trait = trait, maf = round(as.numeric(as.character(maf)),2), ModelType = ModelingType) } GE3LogisticFits1920.GWAS.S.Traits = c('Rate','Lower','Centering','TimeTo90','TimeTo95','rTimeTo90','rTimeTo95') GE3LogisticTopMarkers = data.frame() counter = 1 for (i in GE3LogisticFits1920.GWAS.S){ GE3LogisticTopMarkers = rbind(GE3LogisticTopMarkers, top_n_markers(i, trait = GE3LogisticFits1920.GWAS.S.Traits[counter], ModelingType = 'GE3Logistic')) counter = counter + 1 } SNPperChr = table(GE3LogisticFits1920.GWAS.S$GE3Rate$Chromosome) MiddleOfChr = SNPperChr/2 breaks = c(); breaks[1]= MiddleOfChr[1]; ChromLines = c();ChromLines[1] = SNPperChr[1] for(i in 2:length(SNPperChr)){ breaks[i] = sum(SNPperChr[1:i-1])+MiddleOfChr[i] ChromLines[i] = sum(SNPperChr[1:i]) } # 9228 is the ordinal position of the qds1 snp. # ~10771 is the ordinal for the position of the Sd2 regions GE3LogisticTopMarkers %>% ggplot(aes(x = ordinal, y = log10PVal))+ geom_point(aes(shape = ModelType, color = trait), size =2.5) + geom_vline(xintercept = ChromLines)+ geom_vline(xintercept = c(9228,10771), color = 'red')+ annotate(geom = 'text', x = 9228, y = 22, label = 'AlaAt')+ annotate(geom = 'text', x = 10771, y = 18, label = 'MKK3')+ scale_x_continuous(label = c("1H","2H", "3H", "4H", "5H", "6H", "7H", "UN"), breaks = breaks)+ ylab('-log(p)')+xlab(NULL)+ geom_hline(yintercept = 4)+ ylim(0,30)+ theme_bw()+labs(title = 'GE3 Logistic MTA, MAF>0.06', color = 'Parameter',shape = 'Model Type') GE3LogisticTopMarkers %>% filter(P.value <5e-5 & maf >0.07) %>% arrange(trait,Chromosome, Position, P.value) GE3Marker_List = GE3LogisticTopMarkers %>% filter(P.value <1e-4) %>% arrange(Chromosome, Position, P.value) %>% group_by(SNP) %>% filter(row_number()==1) GE3Marker_List2 = GE3LogisticTopMarkers %>% filter(P.value <1e-4 & maf >0.07) %>% arrange(trait, Chromosome, Position) %>% select(SNP, Chromosome, Position, P.value, maf,trait) myGD20_prune %>% select(GE3Marker_List$SNP) %>% ld_heatmap(.,GE3Marker_List) # Repeat analysis with only the <8.0 line lower asymptopes ######### FiltGE3Lower.mlmm =GE3_3P_logFits.W %>% filter(Lower<0.8) %>% select(taxa, Lower) %>% as.data.frame() %>% GAPIT(Y = .,GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) FiltGE3Lower.mlmm.s = DF_OperationsV2(FiltGE3Lower.mlmm$GWAS) FiltGE3Rate.mlmm = GE3_3P_logFits.W %>% filter(Lower<0.8) %>% select(taxa, Rate) %>% as.data.frame() %>% GAPIT(Y = .,GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) FiltGE3Rate.mlmm.s = DF_OperationsV2(FiltGE3Rate.mlmm$GWAS) FiltGE3Center.mlmm = GE3_3P_logFits.W %>% filter(Lower<0.8) %>% select(taxa, Centering) %>% as.data.frame() %>% GAPIT(Y = .,GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) FiltGE3Center.mlmm.s = DF_OperationsV2(FiltGE3Center.mlmm$GWAS) FiltGE3Timeto90.mlmm = GE3_3P_logFits.W %>% filter(Lower<0.8) %>% select(taxa, TimeTo90) %>% as.data.frame() %>% GAPIT(Y = .,GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) FiltGE3Timeto90.mlmm.s = DF_OperationsV2(GE3Timeto90.mlmm$GWAS) FiltGE3Timeto95.mlmm = GE3_3P_logFits.W %>% filter(Lower<0.8) %>% select(taxa, TimeTo95) %>% as.data.frame() %>% GAPIT(Y = .,GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) FiltGE3Timeto95.mlmm.s = DF_OperationsV2(FiltGE3Timeto90.mlmm$GWAS) FiltGE3rTimeto90.mlmm = GE3_3P_logFits.W %>% filter(rTimeTo90 < 200 & Lower < 0.8) %>% select(taxa,rTimeTo90)%>%data.frame()%>% GAPIT(Y = .,GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) FiltGE3rTimeto90.mlmm.s = DF_OperationsV2(FiltGE3rTimeto90.mlmm$GWAS) FiltGE3rTimeto95.mlmm = GE3_3P_logFits.W %>% filter(rTimeTo95 < 250 & Lower < 0.8) %>% select(taxa,rTimeTo95)%>%data.frame()%>% GAPIT(Y =.,GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) FiltGE3rTimeto95.mlmm.s = DF_OperationsV2(FiltGE3rTimeto95.mlmm$GWAS) FiltGE3LogisticFits1920.GWAS.S = list(fGE3Rate = FiltGE3Rate.mlmm.s, fGE3Lower = FiltGE3Lower.mlmm.s, fGE3Center=FiltGE3Center.mlmm.s, fGE3Timeto90 =FiltGE3Timeto90.mlmm.s, fGE3Timeto95= FiltGE3Timeto95.mlmm.s, fGE3rTimeto90= FiltGE3rTimeto90.mlmm.s, fGE3rTimeto95 =FiltGE3rTimeto95.mlmm.s) filtGE3LogisticFits1920.GWAS.S.Traits = c('Rate','Lower','Centering','TimeTo90','TimeTo95','rTimeTo90','rTimeTo95') filtGE3LogisticTopMarkers = data.frame() counter = 1 for (i in FiltGE3LogisticFits1920.GWAS.S){ filtGE3LogisticTopMarkers = rbind(filtGE3LogisticTopMarkers, top_n_markers(i, trait = filtGE3LogisticFits1920.GWAS.S.Traits[counter], ModelingType = 'FiltGE3Logistic')) counter = counter + 1 } filtGE3LogisticTopMarkers %>% filter(maf >0.07 &P.value<5e-5) %>% arrange(trait)%>% select(!c(nobs,effect, logPercentileQQplot, rank, FDRPval, ordinal)) # GE3 FPCA ############## GE3.7Obs = All.BluesGE31920 %>% group_by(taxa) %>% dplyr::tally() %>% filter(n==7) %>% select(taxa) All.BluesGE31920.FPCAInput = All.BluesGE31920 %>% filter(taxa %in% GE3.7Obs$taxa) GE31920.FPCA = FPCA_function(dfTaxaTraitTime = All.BluesGE31920.FPCAInput[,c('taxa','time','GE3')], Trait = 'GE3', #Trait name must be entered as a character ie Trait = 'GE3' NumKnots = 1, # NumKnots is the number of interior knots to be fitted order = 3, # Order is the dergree of the polynomial to be fit to the data. NumObsevationsPerLine = 7) GE31920.FPCA$EstimatedMeanPlot GE31920.FPCA$PCs_withTaxa %>% select(FPC1, FPC2, FPC3) %>% pivot_longer(cols = starts_with('FPC'), names_to = 'FPC') %>% ggplot(aes(x = value))+geom_histogram()+facet_wrap(facets =vars(FPC),nrow = 1, scales = 'free') GE31920.FPCA$RecoveredCurvePlot GE31920.FPCA$phi.fun.plot GE31920.FPCA$v1 VectorofTimeto95 = as.data.frame(GE31920.FPCA$RecoveredCurves) %>% mutate(time = GE31920.FPCA$phi.fun.df$time) %>% pivot_longer(cols = 1:dim(GE31920.FPCA$PCs_withTaxa)[1],names_to = 'taxa') %>% group_by(taxa) %>% mutate(test = value>0.95) %>% filter(test ==TRUE) %>% arrange(time) %>%slice_head()%>%ungroup() %>% mutate(TimeTo95 = time) VectorofTimeto90 = as.data.frame(GE31920.FPCA$RecoveredCurves) %>% mutate(time = GE31920.FPCA$phi.fun.df$time) %>% pivot_longer(cols = 1:dim(GE31920.FPCA$PCs_withTaxa)[1],names_to = 'taxa') %>% group_by(taxa) %>% mutate(test = value>0.90) %>% filter(test ==TRUE) %>% arrange(time) %>%slice_head()%>%ungroup() %>% mutate(TimeTo90 = time) GE31920FPCA.ouputs.longer = GE31920.FPCA$PCs_withTaxa %>% merge(.,VectorofTimeto90, by = 'taxa') %>% select(!c(time,value,test)) %>% merge(.,VectorofTimeto95, by = 'taxa') %>% select(!c(time,value,test)) %>%pivot_longer(cols = c(2:7)) GE31920FPCA.ouputs.longer %>% ggplot(aes(x = value))+geom_histogram()+facet_wrap(facets =vars(name), scales = 'free') GE31920FPCA.ouputs.longer %>%pivot_wider(names_from = 'name',values_from = 'value') %>% select(!c(taxa,FPC4)) %>% pairs(., lower.panel = upper.panel, upper.panel = panel.cor) GE31920PC1.mlmm = GAPIT(Y = GE31920.FPCA$PCs_withTaxa[,c('taxa','FPC1')],GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GE31920PC1.mlmm.s = DF_OperationsV2(GE31920PC1.mlmm$GWAS) GE31920PC2.mlmm = GAPIT(Y = GE31920.FPCA$PCs_withTaxa[,c('taxa','FPC2')],GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GE31920PC2.mlmm.s = DF_OperationsV2(GE31920PC2.mlmm$GWAS) GE31920PC3.mlmm = GAPIT(Y = GE31920.FPCA$PCs_withTaxa[,c('taxa','FPC3')],GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GE31920PC3.mlmm.s = DF_OperationsV2(GE31920PC3.mlmm$GWAS) GE31920FPCAtimeto95.mlmm = GAPIT(Y = data.frame(VectorofTimeto95[,c('taxa','TimeTo95')]) ,GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GE31920FPCAtimeto95.mlmm.s = DF_OperationsV2(GE31920FPCAtimeto95.mlmm$GWAS) TableOutput(GE31920FPCAtimeto95.mlmm.s) GE31920FPCAtimeto90.mlmm = GAPIT(Y = data.frame(VectorofTimeto90[,c('taxa','TimeTo90')]) ,GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GE31920FPCAtimeto90.mlmm.s = DF_OperationsV2(GE31920FPCAtimeto90.mlmm$GWAS) TableOutput(GE31920FPCAtimeto90.mlmm.s) GE3FPCA1920.GWAS.S = list(GE3FPCA_PC1 = GE31920PC1.mlmm.s, GE3FPCA_PC2 = GE31920PC2.mlmm.s, GE3FPCA_PC3 = GE31920PC3.mlmm.s, GE3FPCA_Timeto90 = GE31920FPCAtimeto90.mlmm.s, GE3FPCA_Timeto95 = GE31920FPCAtimeto95.mlmm.s) GE3FPCA1920.GWAS.S.traits = c('FPC1','FPC2','FPC3','TimeTo90','TimeTo95') GE3FPCATopMarkers = data.frame() counter = 1 for (i in GE3FPCA1920.GWAS.S){ GE3FPCATopMarkers = rbind(GE3FPCATopMarkers, top_n_markers(i, trait = GE3FPCA1920.GWAS.S.traits[counter], ModelingType = 'GE3FPCA')) counter = counter + 1 } GE3FPCATopMarkers %>% ggplot(aes(x = ordinal, y = log10PVal))+ geom_point(aes(shape = ModelType, color = trait), size =2.5) + geom_vline(xintercept = ChromLines)+ geom_vline(xintercept = c(9228,10771), color = 'red')+ annotate(geom = 'text', x = 9228, y = 22, label = 'AlaAt')+ annotate(geom = 'text', x = 10771, y = 18, label = 'MKK3')+ scale_x_continuous(label = c("1H","2H", "3H", "4H", "5H", "6H", "7H", "UN"), breaks = breaks)+ ylab('-log(p)')+xlab(NULL)+ geom_hline(yintercept = 4)+ ylim(0,30)+ theme_bw()+labs(title = 'GE3 FPCA MTA, MAF>0.06', color = 'Parameter',shape = 'Model Type') GE3FPCAMarker_List = GE3FPCATopMarkers %>% filter(P.value <1e-4) %>% arrange(Chromosome, Position, P.value) %>% group_by(SNP) %>% filter(row_number()==1) myGD20_prune %>% select(GE3FPCAMarker_List$SNP) %>% ld_heatmap(.,GE3FPCAMarker_List) # GI3 Logistic ############### load('PhenotypeData/ProcessedData/2020/GGS2020_BLUE_summary_allTP.RData') load('PhenotypeData/ProcessedData/2019and2020Combined/PM111_GI3_1920_BLUE.RData') load('PhenotypeData/ProcessedData/2019and2020Combined/PM47_GI3_1920_BLUE.RData') load('PhenotypeData/ProcessedData/2019and2020Combined/PM6_GI3_1920_BLUE.RData') # Merge 2020 and 2019 data all1920GIBlues = PM6_GI3_1920_BLUE %>% mutate(TP = 'TP1') %>% rbind(.,(PM47_GI3_1920_BLUE %>% mutate(TP = 'TP4'))) %>% rbind(.,(PM111_GI3_1920_BLUE %>% mutate(TP = 'TP6'))) %>% merge(., all_BLUE, by = c('taxa','TP'), all = TRUE) %>% mutate(GI3 = ifelse(is.na(GI3),GI3scale, GI3), time = PM_date - 5) %>% filter(!is.na(GE3)) %>% select(!c(GE3,GE5,GI3scale,GI5scale))#there are some errant lines that only appeared in 2019 data that must be removed. dim(all1920GIBlues) all1920GIBlues %>% ggplot(aes(x = time, y = GI3, group = taxa))+geom_point() # These make the loop break if all 7 timepoints are used all1920GIBlues %>% filter(taxa %in%c('G_31_5', 'Megs_song','NY18120B_4','NY18125B_1','Oderbrucker', 'P_2_5','P_26_3','P_36_6', 'SB193R_1','SG514R_4','SN873_3', 'ST1431R_1')) all1920GIBlues %>% arrange(taxa, TP) %>% group_by(taxa) %>% dplyr::tally() %>% select(n) %>% table() GI3GT5Obs = all1920GIBlues %>% group_by(taxa) %>% dplyr::tally() %>% filter(n>4) %>% select(taxa) #GT = Greater than GI3_4P_logFits = all1920GIBlues %>% filter(taxa %in% GI3GT5Obs$taxa & taxa %nin% c('G_31_5', 'Megs_song','NY18120B_4','NY18125B_1','Oderbrucker', 'P_2_5','P_26_3','P_36_6', 'SB193R_1','SG514R_4','SN873_3', 'ST1431R_1')) %>% arrange(taxa, TP) %>% group_by(taxa) %>% group_modify(~ broom::tidy( drm(GI3~time, data = .x, fct =LL.4(names = c('Rate','Lower','Upper','Centering'))))) %>% do(add_row(., taxa = .$taxa[1],term = 'TimeTo5.0',curve ='Derived', estimate = ifelse(.[2,4] > 5.0, 0,as.double((((.[3,4]-.[2,4])/(5.0-.[2,4])-1)^.[1,4])*.[4,4])))%>% add_row(., taxa = .$taxa[1],term = 'TimeTo5.6',curve ='Derived', estimate = ifelse(.[2,4] > 5.6, 0,as.double((((.[3,4]-.[2,4])/(5.6-.[2,4])-1)^.[1,4])*.[4,4])))%>% add_row(., taxa = .$taxa[1],term = 'DeltaGI95',curve ='Derived', estimate = as.double(.[4,4]*exp(log((1-0.95)/0.95)/.[1,4]))) %>% add_row(., taxa = .$taxa[1],term = 'DeltaGI90',curve ='Derived', estimate = as.double(.[4,4]*exp(log((1-0.90)/0.90)/.[1,4]))) ) %>% ungroup() %>% mutate(estimate = ifelse(is.nan(estimate),NA,estimate)) # We give any models that fail to reach a GI of that level a NA at this point. GI3_4P_logFits %>% ggplot(aes( x= estimate))+geom_histogram()+facet_wrap(vars(term), scales = 'free') GI3_4P_logFits %>% filter(!(term == 'Lower' & estimate > 10 | term == 'Rate' &(estimate > 0 | estimate < -15) | term == 'Centering' & estimate > 150)) %>% ggplot(aes( x= estimate))+geom_histogram()+facet_wrap(vars(term), scales = 'free') GI3logTaxatoFilter = GI3_4P_logFits %>% filter((term == 'Lower' & estimate > 10 | term == 'Rate' &(estimate > 0 | estimate < -15) | term == 'Centering' & estimate > 150)) %>% select(taxa) %>% unique() GI3fittedCurves = data.frame() for (i in unique(GI3_4P_logFits$taxa)){ time = seq(0,150,3) tmp = GI3_4P_logFits %>% filter(taxa ==i) y = tmp$estimate[2]+(tmp$estimate[3]-tmp$estimate[2])/(1+exp(tmp$estimate[1]*(log(time)-log(tmp$estimate[4])))) GI3fittedCurves = rbind(GI3fittedCurves, data.frame(taxa = i, time = time, GI3estimate = y)) } GI3fittedCurves %>% ggplot(aes(x = time, y = GI3estimate, group = taxa))+ geom_line() +labs(title = 'GI3 Logistic Fits') GI3fittedCurves %>% filter(taxa %nin% GI3logTaxatoFilter$taxa) %>% ggplot(aes(x = time, y = GI3estimate, group = taxa))+ geom_line() +labs(title = 'GI3 Logistic Fits') GI3_4P_logFits %>% filter(taxa %nin% GI3logTaxatoFilter$taxa) %>% ggplot(aes( x= estimate))+geom_histogram()+facet_wrap(vars(term), scales = 'free') GI3_4P_logFits %>% filter(taxa %nin% GI3logTaxatoFilter$taxa) %>% filter(term %in% c('Centering','Upper','Lower','Rate')) %>% ggplot(aes( x= estimate))+geom_histogram()+facet_wrap(vars(term), scales = 'free') GI3_4P_logFits.w = GI3_4P_logFits %>% filter(taxa %nin% GI3logTaxatoFilter$taxa) %>% select(taxa, term, estimate) %>% pivot_wider(names_from = 'term', values_from = 'estimate')%>% mutate(DeltaGI = Upper-Lower, DeltaGI90 = ifelse(DeltaGI90>150,NA,DeltaGI90), DeltaGI95 = ifelse(DeltaGI95>200,NA,DeltaGI95)) GI3Lower.mlmm = GAPIT(Y = as.data.frame(GI3_4P_logFits.w[,c('taxa','Lower')]),GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GI3Lower.mlmm.s = DF_OperationsV2(GI3Lower.mlmm$GWAS) GI3Rate.mlmm = GAPIT(Y = as.data.frame(GI3_4P_logFits.w[,c('taxa','Rate')]),GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GI3Rate.mlmm.s = DF_OperationsV2(GI3Rate.mlmm$GWAS) GI3Center.mlmm = GAPIT(Y = as.data.frame(GI3_4P_logFits.w[,c('taxa','Centering')]),GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GI3Center.mlmm.s = DF_OperationsV2(GI3Center.mlmm$GWAS) GI3Upper.mlmm = GAPIT(Y = as.data.frame(GI3_4P_logFits.w[,c('taxa','Upper')]),GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GI3Upper.mlmm.s = DF_OperationsV2(GI3Upper.mlmm$GWAS) GI3DeltaGI.mlmm = GAPIT(Y = as.data.frame(GI3_4P_logFits.w[,c('taxa','DeltaGI')]),GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GI3DeltaGI.mlmm.s = DF_OperationsV2(GI3DeltaGI.mlmm$GWAS) GI3DeltaGI90.mlmm = GI3_4P_logFits.w %>% select(taxa, DeltaGI90) %>% filter(DeltaGI90<150)%>% data.frame() %>% GAPIT(Y = .,GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GI3DeltaGI90.mlmm.s = DF_OperationsV2(GI3DeltaGI90.mlmm$GWAS) TableOutput(GI3DeltaGI90.mlmm.s) GI3DeltaGI95.mlmm = GI3_4P_logFits.w %>% select(taxa, DeltaGI95) %>% filter(DeltaGI95<200)%>% data.frame() %>% GAPIT(Y = .,GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GI3DeltaGI95.mlmm.s = DF_OperationsV2(GI3DeltaGI95.mlmm$GWAS) TableOutput(GI3DeltaGI95.mlmm.s) GI3Timeto5.0.mlmm = GI3_4P_logFits.w %>% select(taxa, TimeTo5.0) %>% filter(TimeTo5.0<250) %>% data.frame() %>% GAPIT(Y = .,GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GI3Timeto5.0.mlmm.s = DF_OperationsV2(GI3Timeto5.0.mlmm$GWAS) TableOutput(GI3Timeto5.0.mlmm.s) GI3LogisticFits1920.GWAS.S = list(GI3Rate = GI3Rate.mlmm.s, GI3Lower = GI3Lower.mlmm.s, GI3Center=GI3Center.mlmm.s, GI3Upper = GI3Upper.mlmm.s, GI3DeltaGI= GI3DeltaGI.mlmm.s, GI3DeltaGI90 =GI3DeltaGI90.mlmm.s, GI3DeltaGI95 = GI3DeltaGI95.mlmm.s, GI3Timeto5.0 = GI3Timeto5.0.mlmm.s) GI3LogisticFits1920.GWAS.S.Traits = c('Rate','Lower','Centering','Upper', 'DeltaGI','DeltaGI90','DeltaGI95', 'TimeTo5.0') GI3LogisticTopMarkers = data.frame() counter = 1 for (i in GI3LogisticFits1920.GWAS.S){ GI3LogisticTopMarkers = rbind(GI3LogisticTopMarkers, top_n_markers(i, trait = GI3LogisticFits1920.GWAS.S.Traits[counter], ModelingType = 'GI3Logistic')) counter = counter + 1 } GI3LogisticTopMarkers %>% ggplot(aes(x = ordinal, y = log10PVal))+ geom_point(aes(shape = ModelType, color = trait), size =2.5) + geom_vline(xintercept = ChromLines)+ geom_vline(xintercept = c(9228,10771), color = 'red')+ annotate(geom = 'text', x = 9228, y = 22, label = 'AlaAt')+ annotate(geom = 'text', x = 10771, y = 18, label = 'MKK3')+ scale_x_continuous(label = c("1H","2H", "3H", "4H", "5H", "6H", "7H", "UN"), breaks = breaks)+ ylab('-log(p)')+xlab(NULL)+ geom_hline(yintercept = 4)+ ylim(0,30)+ theme_bw()+labs(title = 'GI3 Logistic MTA, MAF>0.06', color = 'Parameter',shape = 'Model Type') GI3LogisticTopMarkers %>% filter(P.value <1e-4) %>% arrange(Chromosome, Position, P.value) %>% group_by(SNP) %>% filter(row_number()==1 | row_number()==n()) GI3Marker_List = GI3LogisticTopMarkers %>% filter(P.value <1e-4) %>% arrange(Chromosome, Position, P.value) %>% group_by(SNP) %>% filter(row_number()==1) myGD20_prune %>% select(GI3Marker_List$SNP) %>% ld_heatmap(.,GI3Marker_List) pairs(GI3_4P_logFits.w[,2:5], lower.panel = upper.panel, upper.panel = panel.cor) # GI3 GWA on parameters after filtering for GI>5 ##### FiltGI3Lower.mlmm =GI3_4P_logFits.w %>% filter(Lower<5) %>% select(taxa, Lower) %>% as.data.frame() %>% GAPIT(Y = .,GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) FiltGI3Lower.mlmm.s = DF_OperationsV2(FiltGI3Lower.mlmm$GWAS) FiltGI3Rate.mlmm = GI3_4P_logFits.w %>% filter(Lower<5) %>% select(taxa, Rate) %>% as.data.frame() %>% GAPIT(Y = .,GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) FiltGI3Rate.mlmm.s = DF_OperationsV2(FiltGI3Rate.mlmm$GWAS) FiltGI3Center.mlmm = GI3_4P_logFits.w %>% filter(Lower<5) %>% select(taxa, Centering) %>% as.data.frame() %>% GAPIT(Y = .,GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) FiltGI3Center.mlmm.s = DF_OperationsV2(FiltGI3Center.mlmm$GWAS) FiltGI3Upper.mlmm = GI3_4P_logFits.w %>% filter(Lower<5) %>% select(taxa, Upper) %>% as.data.frame() %>% GAPIT(Y = .,GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) FiltGI3Upper.mlmm.s = DF_OperationsV2(FiltGI3Upper.mlmm$GWAS) FiltGI3DeltaGI.mlmm = GI3_4P_logFits.w %>% filter(Lower<5) %>% select(taxa, DeltaGI) %>% as.data.frame() %>% GAPIT(Y = .,GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) FiltGI3DeltaGI.mlmm.s = DF_OperationsV2(FiltGI3DeltaGI.mlmm$GWAS) FiltGI3DeltaGI90.mlmm = GI3_4P_logFits.w %>% filter(Lower<5) %>% select(taxa, DeltaGI90) %>% filter(DeltaGI90<150)%>% data.frame() %>% GAPIT(Y = .,GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) FiltGI3DeltaGI90.mlmm.s = DF_OperationsV2(FiltGI3DeltaGI90.mlmm$GWAS) FiltGI3DeltaGI95.mlmm = GI3_4P_logFits.w %>% filter(Lower<5) %>% select(taxa, DeltaGI95) %>% filter(DeltaGI95<200)%>% data.frame() %>% GAPIT(Y = .,GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) FiltGI3DeltaGI95.mlmm.s = DF_OperationsV2(FiltGI3DeltaGI95.mlmm$GWAS) FiltGI3Timeto5.0.mlmm = GI3_4P_logFits.w %>% filter(Lower<5) %>% select(taxa, TimeTo5.0) %>% filter(TimeTo5.0<250) %>% data.frame() %>% GAPIT(Y = .,GD=myGD20_prune, GM=myGM20_prune,PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) FiltGI3Timeto5.0.mlmm.s = DF_OperationsV2(FiltGI3Timeto5.0.mlmm$GWAS) FiltGI3LogisticFits1920.GWAS.S = list(fGI3Rate = FiltGI3Rate.mlmm.s, fGI3Lower = FiltGI3Lower.mlmm.s, fGI3Center=FiltGI3Center.mlmm.s, fGI3Upper = FiltGI3Upper.mlmm.s, fGI3DeltaGI = FiltGI3DeltaGI.mlmm.s, fGI3DetlaGI90 = FiltGI3DeltaGI90.mlmm.s, fGI3DetlaGI95 = FiltGI3DeltaGI95.mlmm.s, fGI3TimeTo5.0 = FiltGI3Timeto5.0.mlmm.s) filtGI3LogisticFits1920.GWAS.S.Traits = c('Rate','Lower','Centering','Upper', 'DeltaGI','DeltaGI90','DeltaGI95','TimeTo5.0') filtGI3LogisticTopMarkers = data.frame() counter = 1 for (i in FiltGI3LogisticFits1920.GWAS.S){ filtGI3LogisticTopMarkers = rbind(filtGI3LogisticTopMarkers, top_n_markers(i, trait = filtGI3LogisticFits1920.GWAS.S.Traits[counter], ModelingType = 'FiltGI3Logistic')) counter = counter + 1 } filtGI3LogisticTopMarkers %>% filter(maf >0.07 &P.value<5e-5) %>% arrange(trait)%>% select(!c(nobs,effect, logPercentileQQplot, rank, FDRPval, ordinal)) # GI3 FPCA ################################################# all1920GIBlues$time GI3.7Obs = all1920GIBlues %>% group_by(taxa) %>% dplyr::tally() %>% filter(n==7) %>% select(taxa) all1920GIBlues.FPCAInput = all1920GIBlues %>% filter(taxa %in% GI3.7Obs$taxa) GI31920.FPCA = FPCA_function(dfTaxaTraitTime = all1920GIBlues.FPCAInput[,c('taxa','time','GI3')], Trait = 'GI3', #Trait name must be entered as a character ie Trait = 'GE3' NumKnots = 1, # NumKnots is the number of interior knots to be fitted order = 3, # Order is the dergree of the polynomial to be fit to the data. NumObsevationsPerLine = 7) GI31920.FPCA$EstimatedMeanPlot GI31920.FPCA$PCs_withTaxa %>% select(FPC1, FPC2, FPC3) %>% pivot_longer(cols = starts_with('FPC'), names_to = 'FPC') %>% ggplot(aes(x = value))+geom_histogram()+facet_wrap(facets =vars(FPC),nrow = 1, scales = 'free') GI31920.FPCA$RecoveredCurvePlot VectorofTimeto5.0 = as.data.frame(GI31920.FPCA$RecoveredCurves) %>% mutate(time = GI31920.FPCA$phi.fun.df$time) %>% pivot_longer(cols = 1:dim(GI31920.FPCA$PCs_withTaxa)[1],names_to = 'taxa') %>% group_by(taxa) %>% mutate(test = value>5.0) %>% filter(test ==TRUE) %>% arrange(time) %>%slice_head()%>%ungroup() %>% mutate(TimeTo5.0 = time) hist(VectorofTimeto5.0$TimeTo5.0) VectorofTimeto5.6 = as.data.frame(GI31920.FPCA$RecoveredCurves) %>% mutate(time = GI31920.FPCA$phi.fun.df$time) %>% pivot_longer(cols = 1:dim(GI31920.FPCA$PCs_withTaxa)[1],names_to = 'taxa') %>% group_by(taxa) %>% mutate(test = value>5.6) %>% filter(test ==TRUE) %>% arrange(time) %>%slice_head()%>%ungroup() %>% mutate(TimeTo5.6 = time) hist(VectorofTimeto5.6$TimeTo5.6) GI31920FPCA.ouputs.longer = GI31920.FPCA$PCs_withTaxa %>% merge(.,VectorofTimeto5.0, by = 'taxa',all.x = TRUE) %>% select(!c(time,value,test)) %>% merge(.,VectorofTimeto5.6, by = 'taxa',all.x = TRUE) %>% select(!c(time,value,test)) %>%pivot_longer(cols = c(2:7)) GI31920FPCA.ouputs.longer %>% ggplot(aes(x = value))+ geom_histogram()+facet_wrap(facets =vars(name), scales = 'free') GI31920FPCA.ouputs.longer %>%pivot_wider(names_from = 'name',values_from = 'value') %>% select(!c(taxa,FPC4)) %>% pairs(., lower.panel = upper.panel, upper.panel = panel.cor) GI31920.FPCA$v1 GI31920PC1.mlmm = GAPIT(Y = GI31920.FPCA$PCs_withTaxa[,c('taxa','FPC1')],GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GI31920PC1.mlmm.s = DF_OperationsV2(GI31920PC1.mlmm$GWAS) GI31920PC2.mlmm = GAPIT(Y = GI31920.FPCA$PCs_withTaxa[,c('taxa','FPC2')],GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GI31920PC2.mlmm.s = DF_OperationsV2(GI31920PC2.mlmm$GWAS) GI31920PC3.mlmm = GAPIT(Y = GI31920.FPCA$PCs_withTaxa[,c('taxa','FPC3')],GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GI31920PC3.mlmm.s = DF_OperationsV2(GI31920PC3.mlmm$GWAS) GI31920FPCAtimeto5.0.mlmm = GAPIT(Y = data.frame(VectorofTimeto5.0[,c('taxa','TimeTo5.0')]) ,GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GI31920FPCAtimeto5.0.mlmm.s = DF_OperationsV2(GI31920FPCAtimeto5.0.mlmm$GWAS) TableOutput(GI31920FPCAtimeto5.0.mlmm.s) GI31920FPCAtimeto5.6.mlmm = GAPIT(Y = data.frame(VectorofTimeto5.6[,c('taxa','TimeTo5.6')]) ,GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2, Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GI31920FPCAtimeto5.6.mlmm.s = DF_OperationsV2(GI31920FPCAtimeto5.6.mlmm$GWAS) TableOutput(GI31920FPCAtimeto5.6.mlmm.s) GI3FPCA1920.GWAS.S = list(GI3FPCA_PC1 = GI31920PC1.mlmm.s, GI3FPCA_PC2 = GI31920PC2.mlmm.s, GI3FPCA_PC3 = GI31920PC3.mlmm.s, GI3FPCA_Timeto5.0 = GI31920FPCAtimeto5.0.mlmm.s, GI3FPCA_Timeto5.6 = GI31920FPCAtimeto5.6.mlmm.s) GI3FPCA1920.GWAS.S.traits = c('FPC1','FPC2','FPC3','TimeTo5.0','TimeTo5.6') GI3FPCATopMarkers = data.frame() counter = 1 for (i in GI3FPCA1920.GWAS.S){ GI3FPCATopMarkers = rbind(GI3FPCATopMarkers, top_n_markers(i, trait = GI3FPCA1920.GWAS.S.traits[counter], ModelingType = 'GI3FPCA')) counter = counter + 1 } GI3FPCATopMarkers %>% ggplot(aes(x = ordinal, y = log10PVal))+ geom_point(aes(shape = ModelType, color = trait), size =2.5) + geom_vline(xintercept = ChromLines)+ geom_vline(xintercept = c(9228,10771), color = 'red')+ annotate(geom = 'text', x = 9228, y = 22, label = 'AlaAt')+ annotate(geom = 'text', x = 10771, y = 18, label = 'MKK3')+ scale_x_continuous(label = c("1H","2H", "3H", "4H", "5H", "6H", "7H", "UN"), breaks = breaks)+ ylab('-log(p)')+xlab(NULL)+ geom_hline(yintercept = 4)+ ylim(0,30)+ theme_bw()+labs(title = 'GI3 FPCA MTA, MAF>0.06', color = 'Parameter',shape = 'Model Type') GI3FPCATopMarkers %>% filter(P.value <1e-4) %>% arrange(Chromosome, Position, P.value) %>% group_by(SNP) %>% filter(row_number()==1 | row_number()==n()) GI3FPCAMarker_List = GI3FPCATopMarkers %>% filter(P.value <1e-4) %>% arrange(Chromosome, Position, P.value) %>% group_by(SNP) %>% filter(row_number()==1) myGD20_prune %>% select(GI3FPCAMarker_List$SNP) %>% ld_heatmap(.,GI3FPCAMarker_List) # Correlation between TP1 to 7 values for GI and GE and the time series values predicted for those times ##### # 0 14 28 42 63 105 154 ARE THE TIMES THAT things were measured,so we need close to that... GE31920.FPCA$RecoveredCurves %>% as.data.frame() %>% mutate(time = round(GE31920.FPCA$phi.fun.df$time,2)) %>% pivot_longer(cols = 1:484, values_to = 'GE3', names_to = 'taxa') %>% select(taxa,time,GE3) %>% filter(time %in% c(0,13.91,27.81,42.23,62.84,105.07,154)) %>% mutate(time = mapvalues(time, from =c(0,13.91,27.81,42.23,62.84,105.07,154),to = c(0,14,28,42,63,105,154))) %>% rename(fGE = GE3) %>% merge(All.BluesGE31920.FPCAInput, by=c('taxa','time')) %>% group_by(time) %>%summarise(correlation = cor(fGE,GE3)) GI31920.FPCA$RecoveredCurves %>% as.data.frame() %>% mutate(time = round(GI31920.FPCA$phi.fun.df$time,2)) %>% pivot_longer(cols = 1:484, values_to = 'GI3', names_to = 'taxa') %>% select(taxa,time,GI3) %>% filter(time %in% c(0,13.91,27.81,42.23,62.84,105.07,154)) %>% mutate(time = mapvalues(time, from =c(0,13.91,27.81,42.23,62.84,105.07,154),to = c(0,14,28,42,63,105,154))) %>% rename(fGI = GI3) %>% merge(all1920GIBlues.FPCAInput, by=c('taxa','time')) %>% group_by(time) %>%summarise(correlation = cor(fGI,GI3)) GE3logEstimatesForComparison = data.frame() for (i in unique(GE3_3P_logFits$taxa)){ time =c(0,14,28,42,63,105,154) tmp = GE3_3P_logFits %>% filter(taxa ==i) y = tmp$estimate[2]+(1-tmp$estimate[2])/(1+exp(tmp$estimate[1]*(log(time)-log(tmp$estimate[3])))) GE3logEstimatesForComparison = rbind(GE3logEstimatesForComparison, data.frame(taxa = i, time = time, GE3estimate = y)) } GE3logEstimatesForComparison %>% merge(.,All.BluesGE31920 %>% filter(taxa %in% GE3GT4Obs$taxa & taxa %nin% c('Chevallier_D10',"G_31_5","P_16_1",'P_29_5')), by = c('taxa','time')) %>% group_by(time) %>% summarise(correlation = cor(GE3, GE3estimate)) GI3logEstimatesForComparison = data.frame() for (i in unique(GE3_3P_logFits$taxa)){ time =c(0,14,28,42,63,105,154) tmp = GI3_4P_logFits %>% filter(taxa ==i) y = tmp$estimate[2]+(tmp$estimate[3]-tmp$estimate[2])/(1+exp(tmp$estimate[1]*(log(time)-log(tmp$estimate[4])))) GI3logEstimatesForComparison = rbind(GI3logEstimatesForComparison, data.frame(taxa = i, time = time, GI3estimate = y)) } GI3logEstimatesForComparison %>% merge(.,all1920GIBlues %>% filter(taxa %in% GI3GT5Obs$taxa & taxa %nin% c('G_31_5', 'Megs_song','NY18120B_4','NY18125B_1','Oderbrucker', 'P_2_5','P_26_3','P_36_6', 'SB193R_1','SG514R_4','SN873_3','ST1431R_1')), by = c('taxa','time')) %>% group_by(time) %>% summarise(correlation = cor(GI3, GI3estimate)) # Sum total of markers ######### rbind(GI3FPCATopMarkers, GE3FPCATopMarkers, GI3LogisticTopMarkers, GE3LogisticTopMarkers) %>% filter(maf >0.07) %>% mutate(ModelTypeParam = paste0(ModelType,trait)) %>% ggplot(aes(x = ordinal, y = log10PVal, alpha = maf))+ geom_point(aes(shape = ModelType, color = trait), size =2.5) + geom_vline(xintercept = ChromLines)+ geom_vline(xintercept = c(9228,10771), color = 'red')+ annotate(geom = 'text', x = 9228, y = 22, label = 'AlaAt')+ annotate(geom = 'text', x = 10771, y = 18, label = 'MKK3')+ scale_x_continuous(label = c("1H","2H", "3H", "4H", "5H", "6H", "7H", "UN"), breaks = breaks)+ ylab('-log(p)')+xlab(NULL)+ geom_hline(yintercept = 4)+ ylim(0,30)+ theme_bw()+labs(title = 'All MTA, MAF>0.07', color = 'Parameter',shape = 'Model Type') SigMarkers = rbind(GI3FPCATopMarkers, GE3FPCATopMarkers, GI3LogisticTopMarkers, GE3LogisticTopMarkers) %>% filter(maf >0.1 & P.value < 5e-5) %>% arrange(Chromosome, Position) SigMarkers %>% arrange(Chromosome, Position, P.value) %>%select(!c(nobs,effect, logPercentileQQplot, rank, FDRPval, ordinal)) u = SigMarkers %>% select(SNP) %>% unique() # Single Time point GWAS for Comparison ##### # All.BluesGE31920 is input GE31920.TP1.mlmm = All.BluesGE31920 %>% filter(TP =='TP1') %>% select(taxa, GE3) %>%data.frame() %>% GAPIT(.,GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GE31920.TP1.mlmm.s = DF_OperationsV2(GE31920.TP1.mlmm$GWAS) GE31920.TP2.mlmm = All.BluesGE31920 %>% filter(TP =='TP2') %>% select(taxa, GE3) %>%data.frame() %>% GAPIT(.,GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GE31920.TP2.mlmm.s = DF_OperationsV2(GE31920.TP2.mlmm$GWAS) GE31920.TP3.mlmm = All.BluesGE31920 %>% filter(TP =='TP3') %>% select(taxa, GE3) %>%data.frame() %>% GAPIT(.,GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GE31920.TP3.mlmm.s = DF_OperationsV2(GE31920.TP3.mlmm$GWAS) GE31920.TP4.mlmm = All.BluesGE31920 %>% filter(TP =='TP4') %>% select(taxa, GE3) %>%data.frame() %>% GAPIT(.,GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GE31920.TP4.mlmm.s = DF_OperationsV2(GE31920.TP4.mlmm$GWAS) GE31920.TP5.mlmm = All.BluesGE31920 %>% filter(TP =='TP5') %>% select(taxa, GE3) %>%data.frame() %>% GAPIT(.,GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GE31920.TP5.mlmm.s = DF_OperationsV2(GE31920.TP5.mlmm$GWAS) GE31920.TP6.mlmm = All.BluesGE31920 %>% filter(TP =='TP6') %>% select(taxa, GE3) %>%data.frame() %>% GAPIT(.,GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GE31920.TP6.mlmm.s = DF_OperationsV2(GE31920.TP6.mlmm$GWAS) GE31920.TP7.mlmm = All.BluesGE31920 %>% filter(TP =='TP7') %>% select(taxa, GE3) %>%data.frame() %>% GAPIT(.,GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GE31920.TP7.mlmm.s = DF_OperationsV2(GE31920.TP7.mlmm$GWAS) GE31920PerTP.GWAS.S = list( TP1GE31920 = GE31920.TP1.mlmm.s, TP2GE31920 = GE31920.TP2.mlmm.s, TP3GE31920 = GE31920.TP3.mlmm.s, TP4GE31920 = GE31920.TP4.mlmm.s, TP5GE31920 = GE31920.TP5.mlmm.s, TP6GE31920 = GE31920.TP6.mlmm.s, TP7GE31920 = GE31920.TP7.mlmm.s ) GE3perTP.GWAS.traits = c('TP1 GE3','TP2 GE3','TP3 GE3','TP4 GE3','TP5 GE3','TP6 GE3','TP7 GE3') GE3perTPTopMarkers = data.frame() counter = 1 for (i in GE31920PerTP.GWAS.S){ GE3perTPTopMarkers = rbind(GE3perTPTopMarkers, top_n_markers(i, trait = GE3perTP.GWAS.traits[counter], ModelingType = 'GE3 per Time Point')) counter = counter + 1 } GE3perTPTopMarkers %>% filter(maf >0.07) %>% mutate(ModelTypeParam = paste0(ModelType,trait)) %>% ggplot(aes(x = ordinal, y = log10PVal, alpha = maf))+ geom_point(aes(shape = ModelType, color = trait), size =2.5) + geom_vline(xintercept = ChromLines)+ geom_vline(xintercept = c(9228,10771), color = 'red')+ annotate(geom = 'text', x = 9228, y = 22, label = 'AlaAt')+ annotate(geom = 'text', x = 10771, y = 18, label = 'MKK3')+ scale_x_continuous(label = c("1H","2H", "3H", "4H", "5H", "6H", "7H", "UN"), breaks = breaks)+ ylab('-log(p)')+xlab(NULL)+ geom_hline(yintercept = 4)+ ylim(0,30)+ theme_bw()+labs(title = 'All MTA, MAF>0.07', color = 'Parameter',shape = 'Model Type') # all1920GIBlues GI3 is input GI31920.TP1.mlmm = all1920GIBlues %>% filter(TP =='TP1') %>% select(taxa, GI3) %>%data.frame() %>% GAPIT(.,GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GI31920.TP1.mlmm.s = DF_OperationsV2(GI31920.TP1.mlmm$GWAS) GI31920.TP2.mlmm = all1920GIBlues %>% filter(TP =='TP2') %>% select(taxa, GI3) %>%data.frame() %>% GAPIT(.,GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GI31920.TP2.mlmm.s = DF_OperationsV2(GI31920.TP2.mlmm$GWAS) GI31920.TP3.mlmm = all1920GIBlues %>% filter(TP =='TP3') %>% select(taxa, GI3) %>%data.frame() %>% GAPIT(.,GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GI31920.TP3.mlmm.s = DF_OperationsV2(GI31920.TP3.mlmm$GWAS) GI31920.TP4.mlmm = all1920GIBlues %>% filter(TP =='TP4') %>% select(taxa, GI3) %>%data.frame() %>% GAPIT(.,GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GI31920.TP4.mlmm.s = DF_OperationsV2(GI31920.TP4.mlmm$GWAS) GI31920.TP5.mlmm = all1920GIBlues %>% filter(TP =='TP5') %>% select(taxa, GI3) %>%data.frame() %>% GAPIT(.,GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GI31920.TP5.mlmm.s = DF_OperationsV2(GI31920.TP5.mlmm$GWAS) GI31920.TP6.mlmm = all1920GIBlues %>% filter(TP =='TP6') %>% select(taxa, GI3) %>%data.frame() %>% GAPIT(.,GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GI31920.TP6.mlmm.s = DF_OperationsV2(GI31920.TP6.mlmm$GWAS) GI31920.TP7.mlmm = all1920GIBlues %>% filter(TP =='TP7') %>% select(taxa, GI3) %>%data.frame() %>% GAPIT(.,GD=myGD20_prune, GM=myGM20_prune, PCA.total = 2,Geno.View.output=F, model="MLMM", Major.allele.zero = F, file.output=F,SNP.MAF = 0.05) GI31920.TP7.mlmm.s = DF_OperationsV2(GI31920.TP7.mlmm$GWAS) GI31920PerTP.GWAS.S = list( TP1GI31920 = GI31920.TP1.mlmm.s, TP2GI31920 = GI31920.TP2.mlmm.s, TP3GI31920 = GI31920.TP3.mlmm.s, TP4GI31920 = GI31920.TP4.mlmm.s, TP5GI31920 = GI31920.TP5.mlmm.s, TP6GI31920 = GI31920.TP6.mlmm.s, TP7GI31920 = GI31920.TP7.mlmm.s ) GI3perTP.GWAS.traits = c('TP1 GI3','TP2 GI3','TP3 GI3','TP4 GI3','TP5 GI3','TP6 GI3','TP7 GI3') GI3perTPTopMarkers = data.frame() counter = 1 for (i in GI31920PerTP.GWAS.S){ GI3perTPTopMarkers = rbind(GI3perTPTopMarkers, top_n_markers(i, trait = GI3perTP.GWAS.traits[counter], ModelingType = 'GI3 per Time Point')) counter = counter + 1 } GI3perTPTopMarkers %>% filter(maf >0.07) %>% mutate(ModelTypeParam = paste0(ModelType,trait)) %>% ggplot(aes(x = ordinal, y = log10PVal, alpha = maf))+ geom_point(aes(shape = ModelType, color = trait), size =2.5) + geom_vline(xintercept = ChromLines)+ geom_vline(xintercept = c(9228,10771), color = 'red')+ annotate(geom = 'text', x = 9228, y = 22, label = 'AlaAt')+ annotate(geom = 'text', x = 10771, y = 18, label = 'MKK3')+ scale_x_continuous(label = c("1H","2H", "3H", "4H", "5H", "6H", "7H", "UN"), breaks = breaks)+ ylab('-log(p)')+xlab(NULL)+ geom_hline(yintercept = 4)+ ylim(0,30)+ theme_bw()+labs(title = 'All MTA, MAF>0.07', color = 'Parameter',shape = 'Model Type') setwd(rprojroot::find_rstudio_root_file()) # To save all the outputs uncomment and change the dirrectory to where you would like it # setwd('TimeSeriesAnalysis/Output/') # save(GI3FPCA1920.GWAS.S, file = 'GI3FPCA1920.GWAS.S.RData') # save(GE3FPCA1920.GWAS.S, file = 'GE3FPCA1920.GWAS.S.RData') # save(GI3LogisticFits1920.GWAS.S, file = 'GI3LogisticFits1920.GWAS.S.RData') # save(GE3LogisticFits1920.GWAS.S, file = 'GE3LogisticFits1920.GWAS.S.RData') # save(GI31920PerTP.GWAS.S, file = 'GI31920PerTP.GWAS.S.RData') # save(GE31920PerTP.GWAS.S, file = 'GE31920PerTP.GWAS.S.RData') # save(FiltGE3LogisticFits1920.GWAS.S, file ='FiltGE3LogisticFits1920.GWAS.S.RData') # save(FiltGI3LogisticFits1920.GWAS.S, file = 'FiltGI3LogisticFits1920.GWAS.S.RData') # setwd(rprojroot::find_rstudio_root_file()) ############################## ### Genomic Prediction ####### setwd(rprojroot::find_rstudio_root_file()) load('SpringBarley/GenotypeData/myGD_LDpruned_w_KASP.RData') load('SpringBarley/PhenotypeData/ProcessedData/2020/GGS2020_BLUE_summary_allTP.RData') # We are interested in the GP power within our # breeding program so the JIC and naked barleys were excluded PopulationKey = read.csv(file = 'SpringBarley/GenotypeData/PopulationKey.csv') %>% select(!X) CULines =PopulationKey %>% filter(Population %in% c("check","C1G","C2G","C1P","base")) CUTP1Germs = all_BLUE %>% filter(TP == "TP1" & taxa %in% CULines$taxa &taxa %nin% c('P_5_3','P_8_6')) myGD20_pruneCU = myGD20_prune %>% filter(taxa %in% CULines$taxa) MAFList = data.frame() counter = 2 MarkerNames = colnames(myGD20_pruneCU) for (col in myGD20_pruneCU[,-1]){ MAFList = rbind(MAFList, data.frame(MarkerName = MarkerNames[counter], maf = sum(as.numeric(col))/422/2)) counter = counter+1 } MAFList =MAFList %>% mutate(filt95 = maf<0.95, filt90 = maf<0.9) myGD20_pruneCUfilt = myGD20_pruneCU[,which(c(TRUE,MAFList$filt95))] myGD20_pruneCUfilt[1:5,1:5] #takes 91 out to drop mkk3 Mkk3taxa = myGD20_pruneCUfilt %>% select(taxa, MKK3_E165Q) %>% filter(MKK3_E165Q==0) %>% select(taxa) dim(myGD20_pruneCUfilt) myGM20_pruneCU = myGM20_prune[which(MAFList$filt95),] FoldCVGP = function(Phenotype, myGD, numfolds=50,datasplit, trait_name){ # Phenotype is full vector of phenotypes # myGD is a n taxa x N markers dataframe with -1,0,1 coding and no 'taxa' column in it # numfolds is the number of folds to cv (ie 5 for five-fold cv) # datasplit is the percentage as a decimal for the training/testing split. TrueandPredicted = data.frame() num_entries = length(Phenotype) Training_size = round(datasplit*num_entries,0) start_time <- Sys.time() set.seed(1) for (i in 1:numfolds){ trainingSet = sort(sample(1:num_entries,Training_size)) testingSet = setdiff(1:num_entries,trainingSet) y_train = Phenotype[trainingSet] y_test = Phenotype[testingSet] marker_train = myGD[trainingSet,] marker_test = myGD[testingSet,] trained.Model = mixed.solve(y_train, Z = marker_train, K = NULL, SE =FALSE) PredictedPheno = as.matrix(marker_test) %*% as.matrix(trained.Model$u) print(cor(y_test,PredictedPheno, use = 'complete.obs')) TrueandPredicted = rbind(TrueandPredicted, data.frame(TruePheno = y_test, PredPheno = PredictedPheno, fold = i, trait = trait_name, correlation = cor(y_test,PredictedPheno, use = 'complete.obs'))) } end_time <- Sys.time() print(end_time-start_time) return(TrueandPredicted) } FoldCVGPSamePopSize = function(Phenotype1, myGD1, numfolds=50,datasplit, trait_name, sample_size){ # Phenotype is full vector of phenotypes # myGD is a n taxa x N markers dataframe with -1,0,1 coding and no 'taxa' column in it # numfolds is the number of folds to cv (ie 5 for five-fold cv) # datasplit is the percentage as a decimal for the training/testing split. set.seed(1) TrueandPredicted = data.frame() num_entries = length(Phenotype1) Training_size = round(datasplit*sample_size,0) start_time <- Sys.time() for (i in 1:numfolds){ subsampled = sort(sample(1:num_entries, sample_size)) Phenotype = Phenotype1[subsampled] myGD = myGD1[subsampled,] trainingSet = sort(sample(1:sample_size,Training_size)) testingSet = setdiff(1:sample_size,trainingSet) y_train = Phenotype[trainingSet] y_test = Phenotype[testingSet] marker_train = myGD[trainingSet,] marker_test = myGD[testingSet,] trained.Model = mixed.solve(y_train, Z = marker_train, K = NULL, SE =FALSE) PredictedPheno = as.matrix(marker_test) %*% as.matrix(trained.Model$u) print(cor(y_test,PredictedPheno, use = 'complete.obs')) TrueandPredicted = rbind(TrueandPredicted, data.frame(TruePheno = y_test, PredPheno = PredictedPheno, fold = i, trait = trait_name, correlation = cor(y_test,PredictedPheno, use = 'complete.obs'))) } end_time <- Sys.time() print(end_time-start_time) return(TrueandPredicted) } GPPHSCU = rbind(FoldCVGP((PHS.blues %>% filter(taxa %in% CULines$taxa &taxa %nin% c('P_5_3','P_8_6')))$PHS, myGD = myGD20_pruneCUfilt[,-1]-1, #convert to correct format datasplit = .8, trait_name = 'perTP_PHS_withVND'), FoldCVGP((PHS.blues %>% filter(taxa %in% CULines$taxa & taxa %nin% c('P_5_3','P_8_6') & taxa %in% Mkk3taxa$taxa))$PHS, myGD = (myGD20_pruneCUfilt %>% filter(taxa %in% Mkk3taxa$taxa))[,-1]-1, #convert to correct format datasplit = .8, trait_name = 'perTP_PHS_noVND'), FoldCVGPSamePopSize((PHS.blues %>% filter(taxa %in% CULines$taxa &taxa %nin% c('P_5_3','P_8_6')))$PHS, myGD = myGD20_pruneCUfilt[,-1]-1, #convert to correct format datasplit = .8, trait_name = 'perTP_PHS_withVNDSamePop', sample_size = length((PHS.blues %>% filter(taxa %in% CULines$taxa & taxa %nin% c('P_5_3','P_8_6') & taxa %in% Mkk3taxa$taxa))$PHS))) GPPHSCU %>% group_by(trait) %>% summarise(mean(correlation)) # GP Per TP GP using all the 1920BLUES.##### # All.BluesGE31920 # contians the GE3 # all1920GIBlues # contains the GI3 all_BLUE.w =All.BluesGE31920 %>% select(!time) %>% mutate(trait = 'GE3') %>% pivot_wider(names_from =c( 'TP', 'trait'), values_from = 'GE3') %>% join(all1920GIBlues %>% select(!c('PM_date','time','date')) %>% mutate(trait = 'GI3scale') %>% pivot_wider(names_from =c( 'TP', 'trait'), values_from = 'GI3' )) %>% filter(taxa %in% CULines$taxa &taxa %nin% c('P_5_3','P_8_6')) PhenoNames = gsub(gsub(x = colnames(all_BLUE.w),pattern = '_', replacement = ''),pattern = 'scale',replacement = '') GPPerTP = data.frame() counter = 2 for (col in all_BLUE.w[,-1]){ temp = FoldCVGP(col, myGD20_pruneCUfilt[,-1]-1, #convert to correct format datasplit = .8, trait_name = paste0('perTP_',PhenoNames[counter],'_withVND')) temp2 = FoldCVGP(col[which(all_BLUE.w$taxa %in% Mkk3taxa$taxa)], (myGD20_pruneCUfilt %>% filter(taxa %in% Mkk3taxa$taxa & taxa %in% all_BLUE.w$taxa))[,-1]-1, datasplit = .8, trait_name = paste0('perTP_',PhenoNames[counter],'_noVND')) temp3 = FoldCVGPSamePopSize(col, myGD20_pruneCUfilt[,-1]-1, #convert to correct format datasplit = .8, trait_name = paste0('perTP_',PhenoNames[counter],'_withVNDSamePop'), sample_size = length(col[which(all_BLUE.w$taxa %in% Mkk3taxa$taxa)])) GPPerTP = rbind(GPPerTP,temp,temp2,temp3) counter = counter+1 } # GP GE3 Logistic fits ##### GPGElogfits = data.frame() GE3_3P_logFits.WCU = GE3_3P_logFits.W %>% filter(taxa %in% CULines$taxa &taxa %nin% c('P_5_3','P_8_6')) missingGE3Plog = myGD20_pruneCUfilt$taxa %in% GE3_3P_logFits.WCU$taxa counter = 2 for( col in GE3_3P_logFits.WCU[,-1]){ temp = FoldCVGP(col, myGD20_pruneCUfilt[which(missingGE3Plog),-1]-1, #convert to correct format datasplit = .8, trait_name =paste0('GELogistic_', colnames(GE3_3P_logFits.WCU)[counter],'_withVND')) temp2 = FoldCVGP(col[which(GE3_3P_logFits.WCU$taxa %in% Mkk3taxa$taxa)], (myGD20_pruneCUfilt[which(missingGE3Plog),] %>% filter(taxa %in% Mkk3taxa$taxa))[,-1]-1, #convert to correct format datasplit = .8, trait_name =paste0('GELogistic_', colnames(GE3_3P_logFits.WCU)[counter],'_noVND')) temp3 = FoldCVGPSamePopSize(col, myGD20_pruneCUfilt[which(missingGE3Plog),-1]-1, #convert to correct format datasplit = .8, trait_name =paste0('GELogistic_', colnames(GE3_3P_logFits.WCU)[counter],'_withVNDSamePop'), sample_size = length(col[which(GE3_3P_logFits.WCU$taxa %in% Mkk3taxa$taxa)])) GPGElogfits =rbind(GPGElogfits,temp,temp2, temp3) counter = counter+1 } # GP GE3 FPCA ##### GPGEFPCA =data.frame() GE3.FPCA.WCU = GE31920FPCA.ouputs.longer %>% filter(taxa %in% CULines$taxa &taxa %nin% c('P_5_3','P_8_6'))%>% mutate(name = mapvalues(name, from = c('TimeTo90','TimeTo95'), to = c('fTimeTo90', 'fTimeTo95')))%>% filter(name != 'FPC4') %>% pivot_wider(names_from = 'name', values_from = 'value',) missingGE3FPCA= myGD20_pruneCUfilt$taxa %in% GE3.FPCA.WCU$taxa counter = 2 for( col in GE3.FPCA.WCU[,-1]){ temp = FoldCVGP(col, myGD20_pruneCUfilt[which(missingGE3FPCA),-1]-1, #convert to correct format datasplit = .8, trait_name =paste0('GEFPCA_',colnames(GE3.FPCA.WCU)[counter],'_withVND')) temp2 = FoldCVGP(col[which(GE3.FPCA.WCU$taxa %in% Mkk3taxa$taxa)], (myGD20_pruneCUfilt[which(missingGE3FPCA),] %>% filter(taxa %in% Mkk3taxa$taxa))[,-1]-1, #convert to correct format datasplit = .8, trait_name =paste0('GEFPCA_',colnames(GE3.FPCA.WCU)[counter],'_noVND')) temp3 = FoldCVGPSamePopSize(col, myGD20_pruneCUfilt[which(missingGE3FPCA),-1]-1, #convert to correct format datasplit = .8, trait_name = paste0('GEFPCA_',colnames(GE3.FPCA.WCU)[counter],'_withVNDSamePop'), sample_size = length(col[which(GE3.FPCA.WCU$taxa %in% Mkk3taxa$taxa)])) GPGEFPCA =rbind(GPGEFPCA,temp,temp2,temp3) counter = counter+1 } # GP GI3 Logistic #### GPGIlogfits = data.frame() GI3_4P_logFits.WCU = GI3_4P_logFits.w %>% filter(taxa %in% CULines$taxa &taxa %nin% c('P_5_3','P_8_6'))%>% mutate(TimeTo5.0 = as.numeric(ifelse(TimeTo5.0<250,TimeTo5.0,NA))) %>% select(!TimeTo5.6) missingGI4Plog = myGD20_pruneCUfilt$taxa %in% GI3_4P_logFits.WCU$taxa counter = 2 for( col in GI3_4P_logFits.WCU[,-1]){ temp = FoldCVGP(col, myGD20_pruneCUfilt[which(missingGI4Plog),-1]-1, #convert to correct format datasplit = .8, trait_name =paste0('GILogistic_', colnames(GI3_4P_logFits.WCU)[counter],'_withVND')) temp2 = FoldCVGP(col[which(GI3_4P_logFits.WCU$taxa %in% Mkk3taxa$taxa)], (myGD20_pruneCUfilt[which(missingGI4Plog),] %>% filter(taxa %in% Mkk3taxa$taxa))[,-1]-1, #convert to correct format datasplit = .8, trait_name =paste0('GILogistic_', colnames(GI3_4P_logFits.WCU)[counter],'_noVND')) temp3 = FoldCVGPSamePopSize(col, myGD20_pruneCUfilt[which(missingGI4Plog),-1]-1, #convert to correct format datasplit = .8, trait_name = paste0('GILogistic_', colnames(GI3_4P_logFits.WCU)[counter],'_withVNDSamePop'), sample_size = length(col[which(GI3_4P_logFits.WCU$taxa %in% Mkk3taxa$taxa)])) GPGIlogfits =rbind(GPGIlogfits,temp,temp2, temp3) counter = counter+1 } # GP GI3 FPCA #### GPGIFPCA = data.frame() GI3.FPCA.WCU = GI31920FPCA.ouputs.longer %>% filter(taxa %in% CULines$taxa &taxa %nin% c('P_5_3','P_8_6')) %>% mutate(name = mapvalues(name, from = c('TimeTo5.0','TimeTo5.6'), to = c('fTimeTo5.0','fTimeTo5.6'))) %>% pivot_wider(values_from = value, names_from = name) %>% select(!FPC4) missingGI3FPCA= myGD20_pruneCUfilt$taxa %in% GI3.FPCA.WCU$taxa counter = 2 for( col in GI3.FPCA.WCU[,-1]){ temp = FoldCVGP(col, myGD20_pruneCUfilt[which(missingGI3FPCA),-1]-1, #convert to correct format datasplit = .8, trait_name =paste0('GIFPCA_', colnames(GI3.FPCA.WCU)[counter],'_withVND')) temp2 = FoldCVGP(col[which(GI3.FPCA.WCU$taxa %in% Mkk3taxa$taxa)], (myGD20_pruneCUfilt[which(missingGI3FPCA),] %>% filter(taxa %in% Mkk3taxa$taxa))[,-1]-1, #convert to correct format datasplit = .8, trait_name =paste0('GIFPCA_', colnames(GI3.FPCA.WCU)[counter],'_noVND')) temp3 = FoldCVGPSamePopSize(col, myGD20_pruneCUfilt[which(missingGI3FPCA),-1]-1, #convert to correct format datasplit = .8, trait_name = paste0('GIFPCA_', colnames(GI3.FPCA.WCU)[counter],'_withVNDSamePop'), sample_size = length(col[which(GI3.FPCA.WCU$taxa %in% Mkk3taxa$taxa)])) GPGIFPCA =rbind(GPGIFPCA,temp, temp2, temp3) counter = counter+1 } ################################ # setwd('GWAS_OUTPUTS/') # load("GE31920PerTP.GWAS.S.RData") # load("GE3FPCA1920.GWAS.S.RData") # load("GE3LogisticFits1920.GWAS.S.RData") # load("GI31920PerTP.GWAS.S.RData") # load("GI3FPCA1920.GWAS.S.RData") # load("GI3LogisticFits1920.GWAS.S.RData") # setwd(rprojroot::find_rstudio_root_file()) ### PLOTS ###### ### Figure 1 GE Recovered curve plots for FPCA and Logistic, and parameters histograms ######### GElogPcounts = GE3_3P_logFits.W %>% pivot_longer(cols = 2:8) %>% filter(!is.na(value))%>% group_by(name) %>% summarise(n = n()) %>% mutate(FacetLabel = paste0(name,': n=', n)) %>% merge(.,GE3_3P_logFits.W %>% pivot_longer(cols = 2:8),by = 'name') %>% mutate(ModelType = 'Logistic', ParamFiltered = taxa %in% (GE3_3P_logFits.W %>% filter(Lower>0.8))$taxa) GEfpcaPcounts = GE31920FPCA.ouputs.longer %>% group_by(name) %>% filter(!(is.na(value)))%>%summarise(n = n()) %>% mutate(FacetLabel = paste0(name,': n=', n)) %>% merge(.,GE31920FPCA.ouputs.longer, by ='name') %>% mutate(ModelType = 'FPCA') GEfpcaRecoveredCurves = GE31920.FPCA$RecoveredCurves %>% as.data.frame() %>% mutate(time = GE31920.FPCA$phi.fun.df$time) %>% pivot_longer(cols = 1:484, values_to = 'GE3', names_to = 'taxa') %>% select(taxa,time,GE3) %>% mutate(ModelType = 'FPCA fits') %>% ggplot(aes(x = time, y = GE3, group = taxa))+geom_line(color ='darkgrey')+ facet_grid(cols = vars(ModelType))+ geom_point(data = All.BluesGE31920%>% filter(taxa %nin% GE3logTaxatoFilter$taxa), color ='blue', size =.9)+ theme_bw(base_size = 8) +xlab('Time')+ylab('GE') GElogisticRecoveredCurves = GE3fittedCurves %>% filter(taxa %nin% GE3logTaxatoFilter$taxa) %>% mutate(ModelType = 'Logistic fits', ParamFiltered = taxa %in% (GE3_3P_logFits.W %>% filter(Lower>0.8))$taxa) %>% ggplot(aes(x = time, y = GE3estimate, group = taxa))+geom_line(color = 'darkgrey')+ facet_grid(cols = vars(ModelType)) + geom_point(data = All.BluesGE31920%>% filter(taxa %nin% GE3logTaxatoFilter$taxa),inherit.aes = F, aes(x = time, y = GE3), color ='blue', size =.9)+ theme_bw(base_size = 8) +xlab('Time')+ylab('GE') GECurveandParamPlot = GEfpcaRecoveredCurves+ ggplot(GEfpcaPcounts %>% mutate(FacetLabel= mapvalues(FacetLabel, from = c('TimeTo90: n=484','TimeTo95: n=484'),to = c('fTimeTo90: n=484','fTimeTo95: n=484'))), aes(x = value))+geom_histogram()+facet_wrap(vars(FacetLabel),scales = 'free')+theme_bw(base_size = 8)+ labs(subtitle = 'FPCA FPCs and derived values')+ GElogisticRecoveredCurves+ ggplot(GElogPcounts,aes(x = value))+geom_histogram()+ facet_wrap(vars(FacetLabel), scales = 'free')+ theme_bw(base_size = 8)+theme(legend.position = 'none')+ labs(subtitle = 'Logistic parameter and derived values')+ plot_layout(design = c('AABBB \n CCDDD'), heights = c(2,3))+ plot_annotation(tag_levels = 'a') & theme(plot.tag.position = c(0, 1), plot.tag = element_text(size = 8, hjust = 0, vjust = 0)) GECurveandParamPlot setwd(rprojroot::find_rstudio_root_file()) pdf('Figure1_GECurveAndParameters.pdf',width =8,height = 6) GECurveandParamPlot dev.off() ### figure 2 GE Functional Manhattan Plot ##### GEfunctionalModelingManhattan = rbind(GE3FPCATopMarkers,GE3LogisticTopMarkers, filtGE3LogisticTopMarkers) %>% filter(maf >0.07) %>% mutate(ModelTypeParam = paste0(ModelType,trait), ModelType = mapvalues(ModelType, from = c('GE3Logistic','GE3FPCA','FiltGE3Logistic'), to=c('Logistic','FPCA','Filtered Logistic'))) %>% mutate(trait = factor(trait, levels = c('FPC1','FPC2','FPC3','TimeTo90','TimeTo95','Centering', 'Lower','Rate','rTimeTo90','rTimeTo95'))) %>% ggplot(aes(x = ordinal, y = log10PVal, shape = ModelType))+ geom_point(aes(color = trait), size =2.5) + geom_vline(xintercept = ChromLines)+ geom_vline(xintercept = c(9228,10771), color = 'red')+ annotate(geom = 'text', x = 9228, y = 22, label = 'AlaAT1')+ annotate(geom = 'text', x = 10771, y = 18, label = 'MKK3')+ scale_x_continuous(label = c("1H","2H", "3H", "4H", "5H", "6H", "7H", "UN"), breaks = breaks)+ ylab('-log(p-value)')+xlab('Chromosome')+ geom_hline(yintercept = -log10(5e-5))+ ylim(0,30)+ theme_bw()+labs(shape = 'Time series\nmodel',color = 'Parameter') + guides(color = guide_legend(order=2), shape = guide_legend(order=1)) pdf('Figure2_GEFunctionalGWAresults.pdf', 8,5.5) GEfunctionalModelingManhattan dev.off() ### Figure 3 GI recovered curves and parameters plotted for FPCA and logistic curves ####### GIlogPcounts = GI3_4P_logFits.w %>% select(!c(TimeTo5.6)) %>% mutate(TimeTo5.0 = as.numeric(ifelse(TimeTo5.0<250,TimeTo5.0,NA)))%>% pivot_longer(cols = 2:9) %>% group_by(name)%>% filter(!is.na(value)) %>% summarise(n = n())%>% mutate(FacetLabel = paste0(name,': n=', n)) %>% merge(.,GI3_4P_logFits.w %>% select(!c(TimeTo5.6)) %>% mutate(TimeTo5.0 = as.numeric(ifelse(TimeTo5.0<250,TimeTo5.0,NA)))%>% pivot_longer(cols = 2:9), by = 'name') GIfpcaPcounts = GI31920FPCA.ouputs.longer %>% group_by(name) %>% filter(!(is.na(value)))%>%summarise(n = n()) %>% mutate(FacetLabel= mapvalues(paste0(name,': n=', n), from = c('TimeTo5.0: n=478','TimeTo5.6: n=398'), to = c('fTimeTo5.0: n=478','fTimeTo5.6: n=398'))) %>% merge(.,GI31920FPCA.ouputs.longer, by ='name') %>% mutate(ModelType = 'FPCA') GIfpcaRecoveredCurves = GI31920.FPCA$RecoveredCurves %>% as.data.frame() %>% mutate(time = GI31920.FPCA$phi.fun.df$time) %>% pivot_longer(cols = 1:484, values_to = 'GI3', names_to = 'taxa') %>% select(taxa,time,GI3) %>% mutate(ModelType = 'FPCA fits') %>% ggplot(aes(x = time, y = GI3, group = taxa))+geom_line(color ='darkgrey')+ facet_grid(cols = vars(ModelType))+ geom_point(data = all1920GIBlues %>% filter(taxa %nin% GI3logTaxatoFilter$taxa), color ='blue', size = 0.9)+ theme_bw(base_size = 8) +xlab('Time')+ylab('GI') GIlogisticRecoveredCurves = GI3fittedCurves %>% filter(taxa %nin% GI3logTaxatoFilter$taxa) %>% mutate(ModelType = 'Logistic fits') %>% ggplot(aes(x = time, y = GI3estimate, group = taxa))+geom_line(color ='darkgrey')+ facet_grid(cols = vars(ModelType)) + geom_point(data = all1920GIBlues%>% filter(taxa %nin% GI3logTaxatoFilter$taxa), inherit.aes = F, aes(x = time, y = GI3), color ='blue', size = 0.9)+ theme_bw(base_size = 8) +xlab('Time')+ylab('GI') GICurveandParamPlot = GIfpcaRecoveredCurves+ ggplot(GIfpcaPcounts,aes(x = value))+geom_histogram()+facet_wrap(vars(FacetLabel),scales = 'free')+theme_bw(base_size = 8)+ labs(subtitle = 'FPCA FPCs and derived values')+ GIlogisticRecoveredCurves+ ggplot(GIlogPcounts,aes(x = value))+geom_histogram()+facet_wrap(vars(FacetLabel), scales = 'free')+theme_bw(base_size = 8)+ labs(subtitle = 'Logistic parameter and derived values')+ plot_layout(design = c('AABBB \n CCDDD'), heights = c(2,3))+ plot_annotation(tag_levels = 'a') & theme(plot.tag.position = c(0, 1), plot.tag = element_text(size = 8, hjust = 0, vjust = 0)) GICurveandParamPlot pdf('Figure3_GICurvesAndParameters.pdf', width = 8,height = 6) GICurveandParamPlot dev.off() ### Figure 4 GI Functional Manhattan ##### GIfunctionalModelingManhattan = rbind(GI3FPCATopMarkers, GI3LogisticTopMarkers, filtGI3LogisticTopMarkers) %>% filter(maf >0.07) %>% mutate(ModelTypeParam = paste0(ModelType,trait), ModelType = mapvalues(ModelType, from = c('GI3Logistic','GI3FPCA','FiltGI3Logistic' ), to=c('Logistic','FPCA', 'Filtered Logistic'))) %>% mutate(trait = factor(trait, levels = c('FPC1','FPC2','FPC3','TimeTo5.0','TimeTo5.6','Centering', 'Lower','Rate','Upper','DeltaGI','DeltaGI90','DeltaGI95'))) %>% ggplot(aes(x = ordinal, y = log10PVal, shape = ModelType))+ geom_point(aes(color = trait), size =2.5) + geom_vline(xintercept = ChromLines)+ geom_vline(xintercept = c(9228,10771), color = 'red')+ annotate(geom = 'text', x = 9228, y = 22, label = 'AlaAT1')+ annotate(geom = 'text', x = 10771, y = 18, label = 'MKK3')+ scale_x_continuous(label = c("1H","2H", "3H", "4H", "5H", "6H", "7H", "UN"), breaks = breaks)+ ylab('-log(p-value)')+xlab('Chromosome')+ geom_hline(yintercept = -log10(5e-5))+ ylim(0,30)+ theme_bw()+labs(shape = 'Time series\nmodel',color = 'Parameter') + guides(color = guide_legend(order=2), shape = guide_legend(order=1)) pdf('Figure4_GIFunctionalGWAresults.pdf', 8.00,6.00) GIfunctionalModelingManhattan dev.off() ### Figure 5 Results of GP on all parameters based on training sets ####### pdf('Figure5_SimpleGP.pdf', 8.00,6.00) rbind(GPGEFPCA %>% mutate(trait = gsub(pattern = 'GE', replacement = '',x =trait)), GPGElogfits%>% mutate(trait = gsub(pattern = 'GE', replacement = '',x =trait)), GPPerTP %>% filter(substr(trait,10,12)=='GE3')%>% mutate(trait = gsub(pattern = '3_', replacement = '_',x = trait))) %>% group_by(trait, fold) %>% summarize('Prediction accuracy' = mean(correlation)) %>% ungroup(trait, fold) %>% separate(trait, sep = '_', into =c('xFacet', 'xMarker', 'MKK3included')) %>% mutate(yfacetLabel = 'GE') %>% mutate(xMarkerf = factor(xMarker,levels = c('FPC1','FPC2', 'FPC3','fTimeTo90','fTimeTo95','Centering','Lower','Rate', 'TimeTo90', 'TimeTo95','rTimeTo90','rTimeTo95','TP1GE','TP2GE','TP3GE','TP4GE','TP5GE','TP6GE','TP7GE' )), Model = 'Model framework', xFacet = mapvalues(xFacet, from ='perTP', to = 'per Time Point'), MKK3included = factor(MKK3included, levels = c('withVND','noVND', 'withVNDSamePop'))) %>% ggplot(aes(x = `Prediction accuracy`, y = xMarkerf, fill = MKK3included))+ geom_boxplot()+ facet_nested(Model+xFacet~yfacetLabel, scales = 'free',space = 'free_y')+ theme_bw()+ylab(NULL)+scale_y_discrete(limits=rev)+ xlim(-.2,1) + geom_vline(xintercept = 0)+ scale_fill_manual(values = c('#66c2a5','#fc8d62','#8da0cb'),name ='MKK3 allele\nexcluded', labels = c('None-full',expression(MKK3[N^{"*"}]), 'None-STPS'))+ theme(legend.text.align = 0)+ rbind(GPGIlogfits%>% mutate(trait = gsub(pattern = 'GI', replacement = '',x =trait)), GPGIFPCA %>% mutate(trait = gsub(pattern = 'GI', replacement = '',x =trait)), GPPerTP%>%filter(substr(trait,10,12)=='GI3') %>% mutate(trait = gsub(pattern = '3_', replacement = '_',x = trait)), GPPHSCU) %>% group_by(trait, fold) %>% summarize('Prediction accuracy' = mean(correlation)) %>% ungroup(trait, fold) %>% separate(trait, sep = '_', into =c('xFacet', 'xMarker', 'MKK3included')) %>% mutate(xMarker = mapvalues(xMarker, from =c('Delta','Delta90','Delta95'), to = c('DeltaGI', 'DeltaGI90','DeltaGI95')), yfacetLabel = 'GI', xMarkerf = factor(xMarker, levels = c('FPC1','FPC2', 'FPC3','fTimeTo5.0','fTimeTo5.6','Centering','Lower','Rate', 'Upper', 'TimeTo5.0','DeltaGI90','DeltaGI95','DeltaGI','TP1GI','TP2GI','TP3GI','TP4GI', 'TP5GI','TP6GI','TP7GI','PHS' )), Model = 'Model framework', xFacet = mapvalues(xFacet, from ='perTP', to = 'per Time Point'), MKK3included = factor(MKK3included, levels = c('withVND', 'noVND','withVNDSamePop'))) %>% arrange(MKK3included) %>% ggplot(aes(x = `Prediction accuracy`, y = xMarkerf, fill = MKK3included))+ geom_boxplot()+ facet_nested(Model+xFacet~yfacetLabel, scales = 'free',space = 'free_y')+ theme_bw()+ ylab(NULL)+scale_y_discrete(limits=rev)+ xlim(-.2,1) + geom_vline(xintercept = 0)+ scale_fill_manual(values = c('#66c2a5','#fc8d62','#8da0cb'),name ='MKK3 allele\nexcluded', labels = c('None-full',expression(MKK3[N^{"*"}]), 'None-STPS'))+ theme(legend.text.align = 0)+ plot_layout(ncol = 2, guides = 'collect')+plot_annotation(tag_levels = 'a') dev.off() ### Figure 6 Leave one TP out cross validation ###### setwd(rprojroot::find_rstudio_root_file()) load("Output/FPCA_GEGI_predictions.RData") load("Output/Logistic_GEGI_predictions.RData") rectangles = FPCA_GEGI_predictions %>% select(TP, Trait, Dropped) %>% unique()%>% mutate(Top = 5, Bottom = -5, TraitLabel = paste0('Trait: ', Trait), TP = Dropped) %>% filter(Dropped !='None') %>% mutate(alpha = 0.05) FPCALogisticPrediction = Logistic_GEGI_predictions %>% mutate(TraitLabel = paste0('Model: Logistic; Trait: ', Trait)) %>% rbind(FPCA_GEGI_predictions %>% mutate(TraitLabel = paste0('Model: FPCA; Trait: ', Trait))) %>% mutate(TraitLabel = factor(TraitLabel,levels = c('Model: FPCA; Trait: GE', 'Model: Logistic; Trait: GE', 'Model: FPCA; Trait: GI', 'Model: Logistic; Trait: GI')), MethodLabel = factor(mapvalues(Method, from = c('TrainingSetCorrelations','GPP','GPT'), to = c('Training Set\nAccuracy', 'Testing Set\nGPP Accuracy', 'Testing Set\nGPT Accuracy')), levels = c('Training Set\nAccuracy', 'Testing Set\nGPT Accuracy','Testing Set\nGPP Accuracy'), ordered = TRUE), TPModel = paste0(TP,':',Model)) Rectangles2 = rbind(rectangles %>% mutate(TraitLabel = paste0('Model: Logistic; Trait: ', Trait), Model = 'Logistic'), rectangles %>% mutate(TraitLabel = paste0('Model: FPCA; Trait: ', Trait), Model = 'FPCA')) %>% mutate(TraitLabel = factor(TraitLabel,levels = c('Model: FPCA; Trait: GE', 'Model: Logistic; Trait: GE', 'Model: FPCA; Trait: GI', 'Model: Logistic; Trait: GI')), TPModel = paste0(TP,':',Model)) library(ggh4x) pdf('Figure6_LeaveOneTPoutGP.pdf', 10,8) FPCALogisticPrediction %>% mutate('TP Dropped' = 'Time point masked') %>% ggplot(aes(x = TP, y = correlation, color = MethodLabel))+ geom_boxplot(outlier.size = 0.5) + facet_nested(`TP Dropped`+Dropped ~ TraitLabel)+ scale_color_manual(values = c('#e41a1c','#377eb8','#4daf4a'))+ geom_hline(yintercept = 0, color = 'black') + xlab('Time point of prediction')+ labs(color = 'Prediction \nMethod') + geom_tile(data = Rectangles2 %>% mutate('TP Dropped' = 'Time point masked'), inherit.aes = FALSE, aes(x = TP, fill = NA,y = 0), alpha = 0.05, height = 2, width = 1, fill = 'lightgrey') + theme_bw(base_size = 10) + ylab('Prediction accuracy') dev.off() ### Figure 7 Time To thresholds vs PHS and other things ###### setwd(rprojroot::find_rstudio_root_file()) load("PhenotypeData/ProcessedData/PHS_BLUEs_GGS1920.RData") PopulationKey = read.csv(file = 'GenotypeData/PopulationKey.csv') %>% select(!X) CULines =PopulationKey %>% filter(Population %in% c("check","C1G","C2G","C1P","base")) #PHS with ftimeto5.0 and ftimeto95 in ONLY THE CU LINES PHS.blues %>% merge(., all_BLUE %>% select(TP, taxa, GE3,GI3scale)%>%filter(TP =='TP1'), by = 'taxa')%>% rename('GE TP1' =GE3, 'GI TP1' = GI3scale) %>% merge(.,VectorofTimeto95[c('taxa',"TimeTo95")], by = 'taxa') %>% merge(.,VectorofTimeto5.0[c('taxa','TimeTo5.0')], by = 'taxa') %>% merge(., myGD20_prune[c('taxa','MKK3_E165Q','JHI-Hv50k-2016-367342', 'AlaAT_L214F')], by = 'taxa',all.x = TRUE) %>% filter(MKK3_E165Q != 1 & `JHI-Hv50k-2016-367342` != 1)%>% mutate(SD2 = paste0(round(`JHI-Hv50k-2016-367342`),round(MKK3_E165Q)), AlaAT = round(AlaAT_L214F), AlaATcode = mapvalues(AlaAT,from =c(0,2,1), to = c('N','D','Het')), MKK3_E165Q = paste(MKK3_E165Q), SD2Code= mapvalues(SD2, from = c('00','20','22'), to= c('Dormant','Non-Dormant','Very \n Non-Dormant'))) %>% rename(fTimeTo95 = TimeTo95, fTimeTo5.0 = TimeTo5.0) %>% filter(taxa %in% CULines$taxa) %>% gatherpairs(PHS, 'GE TP1', 'GI TP1',fTimeTo95,fTimeTo5.0) %>% ggplot(aes(x = .xvalue, y = .yvalue, color = SD2Code))+ geom_point()+ scale_colour_manual(values = c('#66c2a5','#fc8d62','#8da0cb'),name = "*HvMKK3* \nallele", labels = expression(MKK3[D],MKK3[N],MKK3[N^{"*"}]))+theme_bw() + theme(legend.title = element_markdown())+ facet_grid(cols = vars(.xkey), rows = vars(.ykey), scales = 'free',switch = "y")+ xlab('Values')+ylab('Values') #lets try with ggally to get what I want... #This is mostly what I want expect for the markdown problems with the labeller. pdf('Figure7_PHScorrelations.pdf', 10.00,6.00) PHS.blues %>% merge(., all_BLUE %>% select(TP, taxa, GE3,GI3scale)%>%filter(TP =='TP1'), by = 'taxa')%>% rename('GETP1' =GE3, 'GITP1' = GI3scale) %>% merge(.,VectorofTimeto95[c('taxa',"TimeTo95")], by = 'taxa') %>% merge(.,VectorofTimeto5.0[c('taxa','TimeTo5.0')], by = 'taxa') %>% merge(., myGD20_prune[c('taxa','MKK3_E165Q','JHI-Hv50k-2016-367342', 'AlaAT_L214F')], by = 'taxa',all.x = TRUE) %>% filter(MKK3_E165Q != 1 & `JHI-Hv50k-2016-367342` != 1)%>% mutate(SD2 = paste0(round(`JHI-Hv50k-2016-367342`),round(MKK3_E165Q)), MKK3_E165Q = paste(MKK3_E165Q), SD2Code= mapvalues(SD2, from = c('00','20','22'), to =c('MKK3 D','MKK3 N','MKK3 N*'))) %>% rename(fTimeTo95 = TimeTo95, fTimeTo5.0 = TimeTo5.0) %>% filter(taxa %in% CULines$taxa) %>% ggpairs(data = ., columns = c('PHS', 'GETP1', 'GITP1','fTimeTo95','fTimeTo5.0'), ggplot2::aes(color = SD2Code))+theme_bw()+theme(axis.text = element_text(size = 6)) dev.off() ### Sup Table 1 and 2 significant per TP and Functional Markers for the tables ###### # per TP rbind(GI3perTPTopMarkers,GE3perTPTopMarkers) %>% filter(maf > 0.07 &P.value<5e-5) %>% mutate(log10PVal = round(log10PVal,2)) %>% select(ModelType,trait, SNP,Chromosome, Position,maf, log10PVal) #for functional analysis SigMarkers = rbind(GI3FPCATopMarkers, GE3FPCATopMarkers, GI3LogisticTopMarkers, GE3LogisticTopMarkers, filtGE3LogisticTopMarkers, filtGI3LogisticTopMarkers) %>% filter(maf >0.07 & P.value < 5e-5) %>% arrange(Chromosome, Position) View(SigMarkers %>% select(ModelType,trait,SNP, Chromosome, Position,maf, P.value)) SigMarkers %>% arrange(ModelType, trait, Chromosome, Position, P.value) %>% mutate(log10PVal = round(log10PVal,2)) %>% select(ModelType,trait, SNP,Chromosome, Position, maf, log10PVal)%>% merge(., rbind( GE3_3P_logFits.W %>% pivot_longer(cols = 2:8) %>% filter(!is.na(value))%>% group_by(name) %>% summarise(n = n()) %>% mutate(ModelType = 'GE3Logistic') %>% rename(trait = name), GE3_3P_logFits.W %>% filter(Lower<0.8) %>% pivot_longer(cols = 2:8) %>% filter(!is.na(value))%>% group_by(name) %>% summarise(n = n()) %>% mutate(ModelType = 'FiltGE3Logistic') %>% rename(trait = name), GE31920FPCA.ouputs.longer %>% group_by(name) %>% filter(!(is.na(value)))%>%summarise(n = n()) %>% mutate(ModelType = 'GE3FPCA') %>% rename(trait = name), GI3_4P_logFits.w %>% mutate(TimeTo5.0 = as.numeric(ifelse(TimeTo5.0<250,TimeTo5.0,NA)))%>% pivot_longer(cols = 2:10) %>% group_by(name)%>% filter(!is.na(value)) %>% summarise(n = n())%>%mutate(ModelType = 'GI3Logistic') %>% rename(trait = name), GI3_4P_logFits.w %>% filter(Lower<5.0) %>% mutate(TimeTo5.0 = as.numeric(ifelse(TimeTo5.0<250,TimeTo5.0,NA)))%>% pivot_longer(cols = 2:10) %>% group_by(name)%>% filter(!is.na(value)) %>% summarise(n = n())%>% mutate(ModelType = 'FiltGI3Logistic') %>% rename(trait = name), GI31920FPCA.ouputs.longer %>% group_by(name) %>% filter(!(is.na(value)))%>%summarise(n = n()) %>% rename(trait = name) %>% mutate(ModelType = 'GI3FPCA')), by = c('ModelType','trait'),all.X =TRUE)%>% mutate(ModelType = mapvalues(ModelType, from=c("GI3Logistic","GI3FPCA","GE3Logistic","FiltGE3Logistic","FiltGI3Logistic","GE3FPCA"), to=c("GILogistic","GIFPCA","GELogistic","GELogisticFilt","GILogisticFilt","GEFPCA")))%>% rename(Chr = Chromosome, Trait = trait, '-log(p-value)' = log10PVal) %>% mutate('Marker Region' = ifelse(Chr==5 & Position>585246000 & Position < 600000000, 'SD2', ifelse(Chr==5 &Position >442000000 & Position <443000000,'SD1',NA)), 'Gene Candidate' = mapvalues(`Marker Region`, from = c('SD1','SD2'), to = c('HvAlaAT1','HvMKK3'))) %>% tail(.,17) uniqueMarkers = SigMarkers %>% select(SNP) %>% mutate(SNP = as.character(SNP)) %>% unique() ModelList = c(rep("GIFPCA",5), rep("GEFPCA",5), rep("GILogistic",8), rep("GELogistic",7), rep("GELogisticFilt",7), rep("GILogisticFilt",8)) TraitList = c(GI3FPCA1920.GWAS.S.traits,GE3FPCA1920.GWAS.S.traits,GI3LogisticFits1920.GWAS.S.Traits, GE3LogisticFits1920.GWAS.S.Traits,filtGE3LogisticFits1920.GWAS.S.Traits,filtGI3LogisticFits1920.GWAS.S.Traits) AllPvals = data.frame() Countout = 1 counter = 1 for (i in c(GI3FPCA1920.GWAS.S,GE3FPCA1920.GWAS.S,GI3LogisticFits1920.GWAS.S,GE3LogisticFits1920.GWAS.S, FiltGE3LogisticFits1920.GWAS.S,FiltGI3LogisticFits1920.GWAS.S)){ print(head(i)) AllPvals = rbind(i %>% mutate(SNP = as.character(SNP))%>% filter(SNP %in% uniqueMarkers$SNP) %>% select(SNP,Chromosome, Position,maf, log10PVal) %>% mutate(ModelType =ModelList[counter], Trait = TraitList[counter]) ,AllPvals) counter =counter+1 } AllPvals %>% colnames() AllPvals %>%select(!maf)%>%pivot_wider(names_from = c(ModelType,Trait), names_sep = '_', values_from = log10PVal) ### Sup figure 1 GE FPCA models FPC effects coloring #### jpeg(filename = 'GEFPCscolored.jpg', 700,350, res = 120) GE31920.FPCA$RecoveredCurves %>% as.data.frame() %>% mutate(time = GE31920.FPCA$phi.fun.df$time) %>% pivot_longer(cols = 1:484, values_to = 'GE3', names_to = 'taxa') %>% select(taxa,time,GE3) %>% mutate(ModelType = 'FPC1') %>% merge(GE31920.FPCA$PCs_withTaxa, all.x = TRUE, by = 'taxa') %>% ggplot(aes(x = time, y = GE3, group = taxa))+geom_line(aes(color = FPC1))+ facet_grid(cols = vars(ModelType))+ # labs(subtitle = 'FPCA fits')+ # geom_point(data = All.BluesGE31920%>% filter(taxa %nin% GE3logTaxatoFilter$taxa), color ='blue', size =.9)+ theme_bw(base_size = 8) +xlab('Time')+ylab('GE') + GE31920.FPCA$RecoveredCurves %>% as.data.frame() %>% mutate(time = GE31920.FPCA$phi.fun.df$time) %>% pivot_longer(cols = 1:484, values_to = 'GE3', names_to = 'taxa') %>% select(taxa,time,GE3) %>% mutate(ModelType = 'FPC2') %>% merge(GE31920.FPCA$PCs_withTaxa, all.x = TRUE, by = 'taxa') %>% ggplot(aes(x = time, y = GE3, group = taxa))+geom_line(aes(color = FPC2))+ facet_grid(cols = vars(ModelType))+ theme_bw(base_size = 8) +xlab('Time')+ylab('GE') dev.off() ### Sup figure 2 GI FPCA models FPC effects coloring ##### jpeg(filename = 'GIFPCscolored.jpg', 700,350, res = 120) GI31920.FPCA$RecoveredCurves %>% as.data.frame() %>% mutate(time = GI31920.FPCA$phi.fun.df$time) %>% pivot_longer(cols = 1:484, values_to = 'GI3', names_to = 'taxa') %>% select(taxa,time,GI3) %>% mutate(ModelType = 'FPC1') %>% merge(GI31920.FPCA$PCs_withTaxa, all.x = TRUE, by = 'taxa') %>% ggplot(aes(x = time, y = GI3, group = taxa))+geom_line(aes(color = FPC1))+ facet_grid(cols = vars(ModelType))+ theme_bw(base_size = 8) +xlab('Time')+ylab('GI') + GI31920.FPCA$RecoveredCurves %>% as.data.frame() %>% mutate(time = GI31920.FPCA$phi.fun.df$time) %>% pivot_longer(cols = 1:484, values_to = 'GI3', names_to = 'taxa') %>% select(taxa,time,GI3) %>% mutate(ModelType = 'FPC2') %>% merge(GI31920.FPCA$PCs_withTaxa, all.x = TRUE, by = 'taxa') %>% ggplot(aes(x = time, y = GI3, group = taxa))+geom_line(aes(color = FPC2))+ facet_grid(cols = vars(ModelType))+ theme_bw(base_size = 8) +xlab('Time')+ylab('GI') dev.off() ### Sup Figure 3 per time point Manhattan plot ##### jpeg('perTPModeling.jpg', 800,400, res = 120) rbind(GI3perTPTopMarkers,GE3perTPTopMarkers) %>% mutate(ModelType = mapvalues(ModelType, from = c('GE3 per Time Point','GI3 per Time Point'), to =c('GE','GI') )) %>% filter(maf >0.07) %>% mutate(ModelTypeParam = paste0(ModelType,trait), trait = substr(trait,1,3)) %>% ggplot(aes(x = ordinal, y = log10PVal, shape = ModelType))+ geom_point(aes(color = trait), size =2.5) + geom_vline(xintercept = ChromLines)+ geom_vline(xintercept = c(9228,10771), color = 'red')+ annotate(geom = 'text', x = 9228, y = 22, label = 'AlaAt')+ annotate(geom = 'text', x = 10771, y = 18, label = 'MKK3')+ scale_x_continuous(label = c("1H","2H", "3H", "4H", "5H", "6H", "7H", "UN"), breaks = breaks)+ ylab('-log(p-value)')+xlab('Chromosome')+ geom_hline(yintercept = -log10(5e-5))+ theme_bw()+labs(color = 'Time point',shape = 'Trait')+ guides(color = guide_legend(order=2), shape = guide_legend(order=1)) dev.off() ### Sup Figure 4 GE Logistic Parameters correlations and colored by MKK3 status ######### jpeg(filename = 'GELogParamsScaterplot.jpg',700,500,res= 120 ) merge(GE3_3P_logFits.W%>%select(taxa, Lower,Rate, Centering) %>% pivot_longer(.,names_to = 'Xnames',values_to = 'Xvalues', cols = c(Lower,Rate,Centering)), GE3_3P_logFits.W%>%select(taxa, Lower,Rate, Centering), by = 'taxa')%>% pivot_longer(.,names_to = 'Ynames',values_to = 'Yvalues', cols = c(Lower,Rate,Centering)) %>% merge(., myGD20_prune[c('taxa','MKK3_E165Q','JHI-Hv50k-2016-367342')], by = 'taxa',all.x = TRUE) %>% filter(MKK3_E165Q != 1 & `JHI-Hv50k-2016-367342` != 1)%>% mutate(SD2 = paste0(round(`JHI-Hv50k-2016-367342`),round(MKK3_E165Q)), MKK3_E165Q = paste(MKK3_E165Q), SD2Code= mapvalues(SD2, from = c('00','20','22'), to = c('D','ND','VND'))) %>% ggplot(aes(x = Xvalues, y = Yvalues, color = SD2Code))+geom_point()+ facet_grid(rows = vars(Ynames), cols = vars(Xnames), scales = 'free')+theme_bw() + scale_colour_manual(values = c('#66c2a5','#fc8d62','#8da0cb'),name = "*HvMKK3* \nallele", labels = expression(MKK3[D],MKK3[N],MKK3[N^{"*"}]))+ theme(legend.title = element_markdown())+ xlab('Values')+ylab('Values') dev.off() # set dplyr functions select <- dplyr::select; rename <- dplyr::rename; mutate <- dplyr::mutate; summarize <- dplyr::summarize; arrange <- dplyr::arrange; slice <- dplyr::slice; filter <- dplyr::filter; recode<-dplyr::recode # remove obs for setosa setwd(rprojroot::find_rstudio_root_file()) jpeg(filename = 'GELogParamsGGallyPlot.jpg',700,500,res= 120 ) GE3_3P_logFits.W%>%select(taxa, Lower,Rate, Centering) %>% merge(., myGD20_prune[c('taxa','MKK3_E165Q','JHI-Hv50k-2016-367342')], by = 'taxa',all.x = TRUE) %>% filter(MKK3_E165Q != 1 & `JHI-Hv50k-2016-367342` != 1)%>% mutate(SD2 = paste0(round(`JHI-Hv50k-2016-367342`),round(MKK3_E165Q)), MKK3_E165Q = paste(MKK3_E165Q), SD2Code= mapvalues(SD2, from = c('00','20','22'), to = c('MKK3 D','MKK3 N','MKK3 N*'))) %>% ggpairs(data = ., columns = c('Lower','Rate','Centering'), ggplot2::aes(color = SD2Code))+theme_bw() # scale_color_manual(values=c('MKK3<sub>D</sub>'="#66c2a5",'MKK3<sub>N</sub>'='#fc8d62','MKK3<sub>N*</sub>'='#8da0cb',"Overall Corr"="black")) dev.off() ### Sup Figure 5 Time to 95 and 90 GE logistic models png histograms KS tests ###### testing = data.frame(taxa = myGD20_prune$taxa, Chr6mk = paste0(round(myGD20_prune$`JHI-Hv50k-2016-408912`,0)), SDHaplo = paste0(round(myGD20_prune$AlaAT_L214F,0), round(myGD20_prune$`JHI-Hv50k-2016-367342`,0), myGD20_prune$MKK3_E165Q)) %>% merge(VectorofTimeto95[c('taxa','TimeTo95')], by = 'taxa',all = TRUE) %>% rename(fTimeTo95 = TimeTo95) %>% merge(GE3_3P_logFits.W[c('taxa','TimeTo95')],by ='taxa', all = TRUE) %>% merge(VectorofTimeto90[c('taxa','TimeTo90')], by = 'taxa',all = TRUE) %>% rename(fTimeTo90 = TimeTo90) %>% merge(GE3_3P_logFits.W[c('taxa','TimeTo90')],by ='taxa', all = TRUE) cor(as.numeric(testing$Chr6mk), testing$TimeTo90, use = 'complete.obs') testing %>% select(TimeTo90, fTimeTo90, TimeTo95, fTimeTo95) %>% pairs(.,lower.panel = upper.panel,upper.panel = panel.cor) timetoDormbreaksum = testing %>% pivot_longer(cols =c(TimeTo90, fTimeTo90, TimeTo95, fTimeTo95)) %>% group_by(name) %>% filter(!is.na(value)) %>% summarize(Mean = mean(value), ' 0th Percentile' = quantile(value)[1], ' 25th Percentile' = quantile(value)[2], ' 50th Percentile, \n Median' = quantile(value)[3], ' 75th Percentile' = quantile(value)[4], '100th Percentile' = range(value)[2]) %>% pivot_longer(cols = !name, names_to = 'Statistic') %>% mutate(linetype = ifelse(Statistic == 'Mean','solid','dashed')) jpeg('TimeToDormancyBreakHist.jpg', 700,500, res = 120) testing %>% pivot_longer(cols =c(TimeTo90, fTimeTo90, TimeTo95, fTimeTo95)) %>% filter(value<100) %>% ggplot(aes(x= value)) +facet_wrap(vars(name))+geom_histogram()+ geom_vline(data = timetoDormbreaksum, aes(xintercept = value, color = Statistic, linetype =linetype), size = 1.6) + xlim(0, 100) + guides(linetype = FALSE) + theme_bw() +xlab('Estimated time to threshold') dev.off() ks.test(x = testing$TimeTo90, y = testing$fTimeTo90, alternative = c('two.sided')) ks.test(x = testing$TimeTo95, y = testing$fTimeTo95, alternative = c('two.sided')) t.test(x = testing$TimeTo90, y = testing$fTimeTo90, alternative = c('two.sided')) t.test(x = testing$TimeTo95, y = testing$fTimeTo95, alternative = c('two.sided')) ### Sup Figure 6 GI Logistic parameters Correlations and colored by MKK3 Status ###### jpeg(filename = 'GILogParamsScaterplot.jpg',700,500,res= 120 ) merge(GI3_4P_logFits.w%>%select(taxa, Lower,Rate, Centering, Upper) %>% pivot_longer(.,names_to = 'Xnames',values_to = 'Xvalues', cols = c(Lower,Rate,Centering,Upper)), GI3_4P_logFits.w%>%select(taxa, Lower,Rate, Centering, Upper), by = 'taxa')%>% pivot_longer(.,names_to = 'Ynames',values_to = 'Yvalues', cols = c(Lower,Rate,Centering,Upper)) %>% merge(., myGD20_prune[c('taxa','MKK3_E165Q','JHI-Hv50k-2016-367342')], by = 'taxa',all.x = TRUE) %>% filter(MKK3_E165Q != 1 & `JHI-Hv50k-2016-367342` != 1)%>% mutate(SD2 = paste0(round(`JHI-Hv50k-2016-367342`),round(MKK3_E165Q)), MKK3_E165Q = paste(MKK3_E165Q), SD2Code= mapvalues(SD2, from = c('00','20','22'), to= c('Dormant','Non-Dormant','Very \n Non-Dormant'))) %>% ggplot(aes(x = Xvalues, y = Yvalues, color = SD2Code))+geom_point()+ facet_grid(rows = vars(Ynames), cols = vars(Xnames), scales = 'free')+theme_bw() + scale_colour_manual(values = c('#66c2a5','#fc8d62','#8da0cb'),name = "*HvMKK3* \nallele", labels = expression(MKK3[D],MKK3[N],MKK3[N^{"*"}]))+ theme(legend.title = element_markdown())+ xlab('Values')+ylab('Values') dev.off() jpeg(filename = 'GILogParamsGGallyPlot.jpg',700,500,res= 120 ) data.frame(taxa = GI3_4P_logFits.w$taxa, Rate = as.vector(GI3_4P_logFits.w$Rate), Upper = as.vector(GI3_4P_logFits.w$Upper), Centering = as.vector(GI3_4P_logFits.w$Centering), Lower = as.vector(GI3_4P_logFits.w$Lower)) %>% merge(., myGD20_prune[c('taxa','MKK3_E165Q','JHI-Hv50k-2016-367342')], by = 'taxa',all.x = TRUE) %>% filter(MKK3_E165Q != 1 & `JHI-Hv50k-2016-367342` != 1)%>% mutate(SD2 = paste0(round(`JHI-Hv50k-2016-367342`),round(MKK3_E165Q)), MKK3_E165Q = paste(MKK3_E165Q), SD2Code= mapvalues(SD2, from = c('00','20','22'), to =c('MKK3 D','MKK3 N','MKK3 N*'))) %>% ggpairs(data = ., columns = c('Lower','Rate','Centering','Upper'), ggplot2::aes(color = SD2Code))+theme_bw() # scale_color_manual(values=c('MKK3<sub>D</sub>'="#66c2a5",'MKK3<sub>N</sub>'='#fc8d62','MKK3<sub>N/*</sub>'='#8da0cb',"Overall Corr"="black")) dev.off() ### Sup Figure 7 Some examples of poor GI fits ##### jpeg('PoorGILogFits.jpg', 500,400, res = 120) GI3fittedCurves %>% filter(taxa %nin% GI3logTaxatoFilter$taxa) %>% mutate(ModelType = 'Logistic fits with rate < -10') %>% filter(taxa %in% (GI3_4P_logFits.w %>% filter(Rate < -10))$taxa)%>% ggplot(aes(x = time, y = GI3estimate, group = taxa, color = taxa))+geom_line(size = 1.5)+ facet_grid(cols = vars(ModelType)) + geom_point(data = all1920GIBlues%>% filter(taxa %nin% GI3logTaxatoFilter$taxa)%>% filter(taxa %in% (GI3_4P_logFits.w %>% filter(Rate < -10))$taxa), inherit.aes = F, aes(x = time, y = GI3,color = taxa), size = 1.5)+ theme_bw(base_size = 10 ) +xlab('Time')+ylab('GI') +labs(color = 'Line') dev.off() ### Distributions of fTimeto5.0 vs TimeTo5.0 jpeg('TimeTo5thresholdsfpcavslogsitic.jpg', 500,400, res = 120) join(VectorofTimeto5.0 %>% select(taxa, TimeTo5.0) %>% rename(fTimeTo5.0 = TimeTo5.0), GI3_4P_logFits %>% filter(term == 'TimeTo5.0') %>% select(taxa, estimate) %>% filter(estimate<250) %>% rename(TimeTo5.0 = estimate)) %>% pivot_longer(cols = !taxa) %>% ggplot(aes(x = value))+geom_histogram()+ facet_wrap(vars(name), ncol = 1)+ geom_vline(data = join(VectorofTimeto5.0 %>% select(taxa, TimeTo5.0) %>% rename(fTimeTo5.0 = TimeTo5.0), GI3_4P_logFits %>% filter(term == 'TimeTo5.0') %>% select(taxa, estimate) %>% filter(estimate<250) %>% rename(TimeTo5.0 = estimate)) %>% pivot_longer(cols = !taxa)%>% filter(!is.na(value)) %>% group_by(name) %>% summarize(Mean = mean(value), ' 0th Percentile' = quantile(value)[1], ' 25th Percentile' = quantile(value)[2], ' 50th Percentile;\nMedian' = quantile(value)[3], ' 75th Percentile' = quantile(value)[4]) %>% pivot_longer(cols = !name, names_to = 'Statistic') %>% mutate(linetype = ifelse(Statistic == 'Mean','solid','dashed')), aes(xintercept = value, color = Statistic), size = 1.6)+ xlab('Time to threshold') +theme_bw() dev.off() ### LD between things. ############# myGM20_prune %>% filter(Chromosome==6 & Position>472000000 & Position<474000000) ld = myGD20_prune %>% select(AlaAT_L214F,MKK3_E165Q, `JHI-Hv50k-2016-367342`,`JHI-Hv50k-2016-408912`, `JHI-Hv50k-2016-408918`,`JHI-Hv50k-2016-408820`, `JHI-Hv50k-2016-408472`) markerList = myGM20_prune %>% filter(SNP %in% c('AlaAT_L214F','MKK3_E165Q', 'JHI-Hv50k-2016-367342','JHI-Hv50k-2016-408912', 'JHI-Hv50k-2016-408918','JHI-Hv50k-2016-408820', 'JHI-Hv50k-2016-408472')) df = ld ld_heatmap=function(df){ ld <- as.matrix(round(df,0)) if(c(-1,3,4) %in% ld){ ld[which(ld==3)]=2 ld[which(ld==4)]=2 ld[which(ld== -1)]=0 } LD <- LD.Measures(donnees=ld, na.presence=F) #LD$loc1=as.character(LD$loc1); LD$loc2=as.character(LD$loc2) r2 <- matrix(0, nrow=ncol(df), ncol=ncol(df)) r2[lower.tri(r2, diag=FALSE)] <- LD$r2 r2 <- t(r2) r2 <- as.data.frame(round(r2, 5)) diag(r2) <- 1 r2[lower.tri(r2)] = NA rownames(r2)=colnames(df); colnames(r2)=rownames(r2) r_2=melt(as.matrix(r2), na.rm=T) r_2 graphic = ggplot(r_2, aes(Var2, Var1, fill = value))+ geom_tile(color = "white")+ scale_fill_gradient2(low = "blue", high = "red", mid = "white", midpoint = 0.5, limit = c(0,1), space = "Lab", name="r2") + theme_classic() + geom_text(aes(label = value))+theme_bw()#+ ggtitle(paste("LD r2 from",colnames(r2)[1],"-", colnames(r2)[length(colnames(r2))], sep=" " )) return(graphic) } ld_heatmap(ld) + labs(title = 'LD between SD1, SD2, and Chromosome 6H Markers')+ xlab('')+ylab('') +theme(axis.text.x = element_blank()) ### Random Correlations ########### merge(VectorofTimeto5.0[c('taxa','TimeTo5.0')], VectorofTimeto5.6[c('taxa','TimeTo5.6')], by ='taxa', all.x = TRUE) %>% select(TimeTo5.0,TimeTo5.6) %>% cor(.,use = 'complete.obs') cor(GI3_4P_logFits.w$DeltaGI90,GI3_4P_logFits.w$DeltaGI95, use = 'complete.obs') merge(GE3_3P_logFits.W[c('taxa','TimeTo90')], VectorofTimeto90[c('taxa','TimeTo90')], by = 'taxa', all = TRUE) %>% select(TimeTo90.x,TimeTo90.y) %>% cor(use = 'complete.obs', method = 'spearman') merge(GE3_3P_logFits.W[c('taxa','TimeTo90')], VectorofTimeto90[c('taxa','TimeTo90')], by = 'taxa', all = TRUE) %>% select(TimeTo90.x,TimeTo90.y) %>% plot() merge(GE3_3P_logFits.W[c('taxa','TimeTo95')], VectorofTimeto95[c('taxa','TimeTo95')], by = 'taxa', all = TRUE) %>% select(TimeTo95.x,TimeTo95.y) %>% cor(use = 'complete.obs', method = 'spearman') merge(GE3_3P_logFits.W[c('taxa','TimeTo95')], VectorofTimeto95[c('taxa','TimeTo95')], by = 'taxa', all = TRUE) %>% select(TimeTo95.x,TimeTo95.y) %>% plot() cor(myGD20_prune$`JHI-Hv50k-2016-367342`, myGD20_prune$`JHI-Hv50k-2016-366325`) GI3_4P_logFits.w %>% select(taxa,TimeTo5.0) %>% mutate(TimeTo5.0 = ifelse(TimeTo5.0 >250,NA,TimeTo5.0))%>% merge(VectorofTimeto5.0, by = 'taxa') %>% select(TimeTo5.0.x, TimeTo5.0.y) %>% cor(.,use = 'complete.obs') GI3_4P_logFits.w %>% select(taxa,TimeTo5.0) %>% mutate(TimeTo5.0 = ifelse(TimeTo5.0 >250,NA,TimeTo5.0))%>% merge(VectorofTimeto5.0, by = 'taxa') %>% select(TimeTo5.0.x, TimeTo5.0.y) %>% plot() ks.test(x = GI3_4P_logFits.w %>% select(TimeTo5.0) %>% mutate(TimeTo5.0 = ifelse(TimeTo5.0 >250,NA,TimeTo5.0)), y = VectorofTimeto5.0$TimeTo5.0, alternative = c('two.sided'))
f8bfc5a38c8dd70c4b317f4b41fe6941c12d81ee
ca49614ccb369ba523b8fe0aad57e1856d946bc6
/distances/random_insertions_telomeric_distances.R
3538142c20d8e34c6ed46352a6f3c59395f067f3
[]
no_license
lebmih/LAIR
25a0020eaaffcbe53ed192d3fa8e5e1766713576
547ae2067df31b37a9b101dc08288f98c87dc1b8
refs/heads/master
2022-12-06T17:30:47.833588
2020-09-01T19:14:53
2020-09-01T19:14:53
177,459,885
1
0
null
null
null
null
UTF-8
R
false
false
23,039
r
random_insertions_telomeric_distances.R
## Working directory setwd("~/Lab/LongBCRs/MethodPaper/RunTables") ## Libraries x <- c('tidyr', 'dplyr', 'ggplot2', 'ggrepel', 'reshape2', 'stringr', 'Biostrings', 'ggridges', 'ggpubr', 'cowplot') lapply(x, require, character.only = TRUE) rm(x) ### Randomize the insertions ## Plan ## We need to take the insertions table and change the chromosomal coordinate to ## random position inside the same chromosome ## Then we need to calculate overlaps for our true data and for 1000 generations ## of random data ## For the start let's just plot the random insertions # Replace the insert.genomic.center with random number # between 1 and chromosome length degl <- read.table(file = 'degl_expanded_310719.txt', sep = '\t', header = TRUE, stringsAsFactors = FALSE) inserts <- left_join(inserts, select(chrlen, chromosome, Length.bp), by = 'chromosome') telomeres.length <- 10000 ## Generating the artificial insertions dataset inserts.generic <- data.frame() for (j in seq(1:100)){ inserts.generic.temp <- inserts rand_position <- c() for (i in seq_along(inserts.generic.temp$chromosome)){ rand_position <- c(rand_position, sample(1:inserts.generic.temp$Length.bp[i],1)) } inserts.generic.temp$insert.genomic.center <- rand_position inserts.generic.temp$insert.genomic.coord.s.start <- inserts.generic.temp$insert.genomic.center - inserts.generic.temp$insert.length/2 inserts.generic.temp$insert.genomic.coord.s.end <- inserts.generic.temp$insert.genomic.center + inserts.generic.temp$insert.length/2 inserts.generic <- bind_rows(inserts.generic, inserts.generic.temp) if(j %in% seq(0,100,10)){ print(paste(j,"out of 100 cycles finished")) } } rm(i, j, rand_position, inserts.generic.temp) #inserts.generic$telomeric.distance <- mapply(min, # inserts.generic$insert.genomic.coord.s.start - telomeres.length, # inserts.generic$Length.bp - telomeres.length - inserts.generic$insert.genomic.coord.s.end) #for (j in seq(1:1000)){ # inserts1 <- inserts # rand_position <- c() # for (i in seq_along(inserts1$chromosome)){ # rand_position <- c(rand_position, sample(1:inserts1$Length.bp[i],1)) # } # inserts1$insert.genomic.center <- rand_position # inserts1$insert.genomic.coord.s.start <- inserts1$insert.genomic.center - inserts1$insert.length/2 # inserts1$insert.genomic.coord.s.end <- inserts1$insert.genomic.center + inserts1$insert.length/2 # inserts1$telomeric.distance <- mapply(min, inserts1$insert.genomic.coord.s.start - telomeres.length, # inserts1$Length.bp - telomeres.length - inserts1$insert.genomic.coord.s.end) # telomeric.distance <- c(telomeric.distance, inserts1$telomeric.distance) #} #inserts.vdjch1 <- filter(inserts, insert.type == 'VDJ-CH1') #inserts.vdj <- filter(inserts, insert.type == 'V-DJ') #inserts.vch1 <- filter(inserts, insert.type == 'V-CH1') #teldistanceTotal <- data.frame(inserts = 'Total', telomeric.distance = inserts$telomeric.distance) #teldistanceVDJCH1 <- data.frame(inserts = 'VDJ-CH1', telomeric.distance = inserts.vdjch1$telomeric.distance) #teldistanceVDJ <- data.frame(inserts = 'V-DJ', telomeric.distance = inserts.vdj$telomeric.distance) #teldistanceVCH1 <- data.frame(inserts = 'V-CH1', telomeric.distance = inserts.vch1$telomeric.distance) #teldistanceGenerated <- data.frame(inserts = 'Generated', telomeric.distance = inserts.generic$telomeric.distance) #teldistance <- bind_rows(teldistanceVDJCH1, teldistanceVDJ, # teldistanceVCH1, teldistanceGenerated) ## 280719 #teldistance <- bind_rows(teldistanceVDJCH1, teldistanceVDJ, teldistanceGenerated) inserts.generic$insert.type <- 'Generated' inserts.all <- bind_rows(inserts, inserts.generic) inserts.all$insert.type[is.na(inserts.all$insert.type)] <- 'V-CH1' inserts.all$telomeric.distance <- mapply(min, inserts.all$insert.genomic.coord.s.start - telomeres.length, inserts.all$Length.bp - telomeres.length - inserts.all$insert.genomic.coord.s.end) inserts.all$telomeric.distance.Mbp <- inserts.all$telomeric.distance/1000000 # H0: Median Telomeric Distance of random insertions equals to that of real insertions wilcox.test(inserts.all$telomeric.distance[inserts.all$insert.type == 'Generated'], inserts.all$telomeric.distance[inserts.all$insert.type == 'VDJ-CH1'], mu = 0, alternative = "greater", paired = F, correct = F, exact = T) telomeric_dist_boxplot <- ggplot(data = inserts.all)+ geom_boxplot(aes(x = insert.type, y = telomeric.distance.Mbp), alpha = 0.2)+ theme_classic()+ #xlab("")+ #scale_x_discrete(labels = c('Random' = 'Random\nInsertions', 'Real' = 'Donors\nInsertions'))+ ylab("Distance of insertion donor\nto the closest telomere (Mb)")+ theme(axis.text.x = element_text(size = 16, angle = 0, vjust = 0.6))+ xlab("Insert type")+ annotate('text', x = 2, y = 130, label = 'p = 0.86', size = 4)+ annotate('text', x = 3, y = 130, label = 'p < 2.2e-16', size = 4) #annotate('text', x = 4, y = 130, label = 'p < 2.2e-16', size = 4) telomeric_dist_boxplot telomeric_dist_density <- ggplot(data = inserts.all)+ geom_density(aes(x = telomeric.distance.Mbp, y = ..density.., fill = insert.type), alpha = 0.2)+ labs(x = "Distance to the closest telomere, Mbp", fill = 'Insert type')+ theme_classic() telomeric_dist_density ## Plots saving ggsave(paste('telomeric_dist_boxplot_', format(Sys.time(), "%d%m%y_%H%M"), '.pdf', sep = ''), width = 8, height = 5, device = "pdf", plot = telomeric_dist_boxplot) ggsave(paste('telomeric_dist_density_', format(Sys.time(), "%d%m%y_%H%M"), '.pdf', sep = ''), width = 8, height = 5, device = "pdf", plot = telomeric_dist_density) delete_the_plots_from_R_memory <- TRUE if(delete_the_plots_from_R_memory == TRUE){rm(telomeric_dist_boxplot, telomeric_dist_boxplot)} ## 20.02.19 gaps <- read.table(file = 'hg19chrGaps.txt', sep = '\t', header = TRUE) ## 21.02.19 ## Let's find the distance between rdna and the inserts rdna <- read.table(file = 'rdna_bed.txt', sep = '\t', header = TRUE) unique(rdna$feature) # drop score as we don't need it rdna <- select(rdna, - score) ## Calculating the center of rDNA regions rdna$chrom.center <- (rdna$chrom.end + rdna$chrom.start)/2 rdna <- filter(rdna, !str_detect(chromosome, '_')) rdna <- filter(rdna, !str_detect(chromosome, 'chrM')) unique(rdna$chromosome) rm(gaps) dist.to.closest.rdna <- c() for (i in seq_along(inserts.all$run.id)){ rdna.in.this.chrom <- rdna$chrom.center[rdna$chromosome == inserts.all$chromosome[i]] rdna.dist <- min(abs(rdna.in.this.chrom - inserts.all$insert.genomic.center[i])) dist.to.closest.rdna <- c(dist.to.closest.rdna, rdna.dist) } inserts.all$rdna.distance <- dist.to.closest.rdna inserts.all$rdna.dist.mpb <- inserts.all$rdna.distance / 1000000 rm(rdna.in.this.chrom, dist.to.closest.rdna, i, rdna.dist) ## Testing the statistical significance inserts.all$insert.type %>% unique() -> unique.insert.types uni.ins.types.n <- length(unique.insert.types) wilcox.matrix <- matrix(data = NA, nrow = uni.ins.types.n, ncol = uni.ins.types.n, dimnames = list(unique.insert.types, unique.insert.types)) for(i in colnames(wilcox.matrix)){ for(j in rownames(wilcox.matrix)){ wilcox.test(inserts.all$rdna.distance[inserts.all$insert.type == i], inserts.all$rdna.distance[inserts.all$insert.type == j], mu = 0, alternative = "two.sided", paired = F, correct = F, exact = T)$p.value %>% format(digits = 2, scientific = TRUE) -> wilcox.matrix[i,j] } } rm(i,j) ## Plotting the distance to rDNA rdna_dist_boxplot <- ggplot(data = inserts.all)+ geom_boxplot(aes(x = insert.type, y = rdna.dist.mpb), alpha = 0.2)+ theme_classic()+ #xlab("")+ #scale_x_discrete(labels = c('Random' = 'Random\nInsertions', 'Real' = 'Donors\nInsertions'))+ ylim(0, 5)+ ylab("Distance of insertion donor\nto the closest rDNA (Mb)")+ xlab("Insert type")+ theme(axis.text.x = element_text(size = 16, angle = 0, vjust = 0.6))+ annotate('text', x = "V-DJ", y = 5, label = ifelse(as.numeric(wilcox.matrix["V-DJ","Generated"])<=0.05, wilcox.matrix["V-DJ","Generated"], NA), size = 4)+ annotate('text', x = "VDJ-CH1", y = 5, label = ifelse(as.numeric(wilcox.matrix["VDJ-CH1","Generated"])<=0.05, wilcox.matrix["VDJ-CH1","Generated"], NA), size = 4) #annotate('text', x = 4, y = 10, label = 'p = 6.51e-12', size = 5) rdna_dist_boxplot rdna_dist_density <- ggplot(data = inserts.all)+ geom_density_ridges(aes(x = rdna.dist.mpb, y = insert.type, fill = insert.type), alpha = 0.2)+ labs(x = "Distance to the closest rDNA, Mbp", fill = 'Insert type')+ theme_classic()+ xlim(0, 1.2e+01) rdna_dist_density ## Saving the plots ggsave(paste('rdna_dist_density_', format(Sys.time(), "%d%m%y_%H%M"), '.pdf', sep = ''), width = 8, height = 5, device = "pdf", plot = rdna_dist_density) ggsave(paste('rdna_dist_boxplot_', format(Sys.time(), "%d%m%y_%H%M"), '.pdf', sep = ''), width = 8, height = 5, device = "pdf", plot = rdna_dist_boxplot) if(delete_the_plots_from_R_memory == TRUE){rm(rdna_dist_density, rdna_dist_boxplot)} rm(wilcox.matrix) ################################### ## 30072019 ## Calculating the distance to the closest genes dist.to.closest.agtotal <- c() overlap.closest.agtotal <- c() count.overlap <- function(subject.starts, subject.ends, query.start, query.end){ subject.starts <- as.numeric(subject.starts) subject.ends <- as.numeric(subject.ends) query.start <- as.numeric(query.start) query.end <- as.numeric(query.end) if(sum(subject.starts > subject.ends, na.rm = TRUE)){ stop("Some of the subject ranges you provided are flipped") } if(query.start > query.end){ stop("Query range you provided is flipped") } count.overlap <- sum(query.start >= subject.starts & query.start <= subject.ends| query.end >= subject.starts & query.end <= subject.ends| query.start <= subject.starts & query.end >= subject.ends) return(count.overlap) } for (i in seq(nrow(inserts.all))){ agtotal.in.this.chrom.start <- agtotal$start[agtotal$chrom == inserts.all$chromosome[i]] agtotal.in.this.chrom.end <- agtotal$end[agtotal$chrom == inserts.all$chromosome[i]] agtotal.in.this.chrom.center <- (agtotal.in.this.chrom.start + agtotal.in.this.chrom.end)/2 agtotal.dist <- min(abs(agtotal.in.this.chrom.center - inserts.all$insert.genomic.center[i])) agtotal.overlap <- count.overlap(agtotal.in.this.chrom.start, agtotal.in.this.chrom.end, inserts.all$insert.genomic.coord.s.start[i], inserts.all$insert.genomic.coord.s.end[i]) dist.to.closest.agtotal <- c(dist.to.closest.agtotal, agtotal.dist) overlap.closest.agtotal <- c(overlap.closest.agtotal, agtotal.overlap) if(i %in% seq(0,nrow(inserts.all),10000)){ paste(round(i*100/nrow(inserts.all)), "% of the job is done", sep = "") %>% print() } } inserts.all$transcribed.dist <- dist.to.closest.agtotal inserts.all$transcribed.overlap <- overlap.closest.agtotal inserts.all$transcribed.dist.mpb <- inserts.all$transcribed.dist / 1000000 rm(dist.to.closest.degl, overlap.closest.degl, degl.in.this.chrom.center, degl.in.this.chrom.start, degl.in.this.chrom.end, degl.overlap, degl.dist) # P-VALUES COMPUTATION wilcox.matrix <- matrix(data = NA, nrow = uni.ins.types.n, ncol = uni.ins.types.n, dimnames = list(unique.insert.types, unique.insert.types)) for(i in colnames(wilcox.matrix)){ for(j in rownames(wilcox.matrix)){ wilcox.test(inserts.all$transcribed.dist[inserts.all$insert.type == i], inserts.all$transcribed.dist[inserts.all$insert.type == j], mu = 0, alternative = "two.sided", paired = F, correct = F, exact = T)$p.value %>% format(digits = 2, scientific = TRUE) -> wilcox.matrix[i,j] } } rm(i,j) melt(wilcox.matrix) %>% mutate(value = as.numeric(levels(value))[value]) %>% filter(Var1 == 'Generated', Var2 != 'Generated') -> wilcox.matrix wilcox.matrix$significance <- "ns" wilcox.matrix$significance[wilcox.matrix$value <= 0.05] <- "*" wilcox.matrix$significance[wilcox.matrix$value <= 0.01] <- "**" wilcox.matrix$significance[wilcox.matrix$value <= 0.001] <- "***" wilcox.matrix$significance[wilcox.matrix$value <= 0.0001] <- "****" genes_dist_boxplot <- ggplot(data = inserts.all)+ geom_boxplot(aes(x = insert.type, y = transcribed.dist), alpha = 0.2)+ theme_classic()+ #xlab("")+ #scale_x_discrete(labels = c('Random' = 'Random\nInsertions', 'Real' = 'Donors\nInsertions'))+ ylim(0, 75000)+ ylab("Distance of insertion donor\nto the closest transcribed locus (bp)")+ xlab("Insert type")+ theme(axis.text.x = element_text(size = 16, angle = 0, vjust = 0.6))+ annotate('text', x = "V-DJ", y = 75000, label = ifelse(wilcox.matrix$significance[wilcox.matrix$Var2 == 'V-DJ'] != 'ns', paste(wilcox.matrix$significance[wilcox.matrix$Var2 == 'V-DJ'], "\np = ",wilcox.matrix$value[wilcox.matrix$Var2 == 'V-DJ'], sep =""), NA), size = 4)+ annotate('text', x = "VDJ-CH1", y = 75000, label = ifelse(wilcox.matrix$significance[wilcox.matrix$Var2 == 'VDJ-CH1'] != 'ns', paste(wilcox.matrix$significance[wilcox.matrix$Var2 == 'VDJ-CH1'], "\np = ",wilcox.matrix$value[wilcox.matrix$Var2 == 'VDJ-CH1'], sep =""), NA), size = 4) #annotate('text', x = 4, y = 10, label = 'p = 6.51e-12', size = 5) genes_dist_boxplot ggsave(paste('transcribed_dist_', format(Sys.time(), "%d%m%y_%H%M"), '.pdf', sep = ''), width = 8, height = 5, device = "pdf", plot = genes_dist_boxplot) median(inserts.all$degldist[inserts.all$insert.type == 'Generated']) mean(inserts.all$degldist[inserts.all$insert.type == 'V-DJ']) median(inserts.all$degldist[inserts.all$insert.type == 'VDJ-CH1']) ggplot(data = inserts.all)+ geom_density_ridges(aes(x = degldist, y = insert.type, fill = insert.type), alpha = 0.3)+ xlim(0,100000)+ theme_classic()+ xlab('Distance to the closest gene, bp')+ ylab('Insert type') ggplot(data = inserts.all)+ geom_violin(aes(y = degldist, x = insert.type, fill = insert.type), alpha = 0.3, adjust = 0.5)+ ylim(0,100000)+ theme_classic()+ ylab('Distance to the closest gene, bp')+ xlab('Insert type') closest.degl.names <- c() degl %>% mutate(center = (start + end)/2) -> degl for (i in seq(nrow(inserts.all))){ degl %>% filter(chrom == inserts.all$chromosome[i]) -> degl.temp degl.dist <- min(abs(degl.temp$center - inserts.all$insert.genomic.center[i])) closest.degl.id <- which(abs(degl.temp$center - inserts.all$insert.genomic.center[i]) == degl.dist)[1] closest.degl.name <- degl.temp$Name[closest.degl.id] closest.degl.names <- c(closest.degl.names, closest.degl.name) #nbcs.closest.degl <- c(nbcs.closest.degl, arow$NBCs) #cbvsnbcs.closest.degl <- c(cbvsnbcs.closest.degl, arow$CBs_vsN) if(i %in% seq(0,100000,1000)){ print(i) } } inserts.all$closestdegl <- closest.degl.names inserts.all %>% rename('closestdegl' = 'Name') -> inserts.all inserts.all %>% left_join(select(degl, -c('start','end','chrom','center')), by = 'Name') -> inserts.all rm(closest.degl.name, closest.degl.names, degl.temp, degl.dist, closest.degl.id, i) d <- ggplot(data = inserts.all)+ geom_boxplot(aes(x = insert.type, y = NBCs), alpha = 0.2)+ #geom_boxplot(aes(x = insert.type, y = CBs), alpha = 0.2)+ theme_classic()+ xlab("")+ ylab("")+ scale_x_discrete(labels = c())+ ylim(-10, 10)+ #ylab("Expression difference\nof the closest gene, log2fold")+ #xlab("Insert type")+ theme(axis.text.x = element_text(size = 10, angle = 0, vjust = 0.6)) #annotate('text', x = 2, y = 1, label = paste('p = ',p.GenvsVDJ), size = 4)+ #annotate('text', x = 3, y = 1, label = paste('p = ',p.GenvsVDJCH1), size = 4) #annotate('text', x = 4, y = 10, label = 'p = 6.51e-12', size = 5) d d14 <- ggplot(data = inserts.all)+ geom_boxplot(aes(x = insert.type, y = BMPCs_vsN), alpha = 0.2)+ #geom_boxplot(aes(x = insert.type, y = CBs), alpha = 0.2)+ theme_classic()+ #xlab("")+ #scale_x_discrete(labels = c('Random' = 'Random\nInsertions', 'Real' = 'Donors\nInsertions'))+ ylim(-10, 10)+ #ylab("Expression difference\nof the closest gene, log2fold")+ #xlab("Insert type")+ #theme(axis.text.x = element_text(size = 16, angle = 0, vjust = 0.6)) #annotate('text', x = 2, y = 1, label = paste('p = ',p.GenvsVDJ), size = 4)+ #annotate('text', x = 3, y = 1, label = paste('p = ',p.GenvsVDJCH1), size = 4) #annotate('text', x = 4, y = 10, label = 'p = 6.51e-12', size = 5) d14 col_to_test <- c('NBCs','CBs','CCs','MBCs','prePBs','PBs','EPCs','BMPCs', 'CBs_vsN','CCs_vsN','MBCs_vsN','prePBs_vsN','PBs_vsN','EPCs_vsN','BMPCs_vsN') wilcox_genvsvdj <- c() wilcox_genvsvdjch1 <- c() wilcox_vdjvsvdjch1 <- c() for(i in col_to_test){ wilcox_genvsvdjch1 <- c(wilcox_genvsvdjch1, wilcox.test(inserts.all[inserts.all$insert.type == 'Generated',i], inserts.all[inserts.all$insert.type == 'VDJ-CH1',i], mu = 0, alternative = "two.sided", paired = F, correct = F, exact = T)$p.value %>% format(digits = 2, scientific = TRUE)) wilcox_genvsvdj <- c(wilcox_genvsvdj, wilcox.test(inserts.all[inserts.all$insert.type == 'Generated',i], inserts.all[inserts.all$insert.type == 'V-DJ',i], mu = 0, alternative = "two.sided", paired = F, correct = F, exact = T)$p.value %>% format(digits = 2, scientific = TRUE)) wilcox_vdjvsvdjch1 <- c(wilcox_vdjvsvdjch1, wilcox.test(inserts.all[inserts.all$insert.type == 'V-DJ',i], inserts.all[inserts.all$insert.type == 'VDJ-CH1',i], mu = 0, alternative = "two.sided", paired = F, correct = F, exact = T)$p.value %>% format(digits = 2, scientific = TRUE)) } wilcox.matrix <- data.frame(population = col_to_test, gen.vs.vdj = wilcox_genvsvdj, gen.vs.vdj.sign = "ns", #as.numeric(wilcox_genvsvdj) <= 0.05, gen.vs.vdjch1 = wilcox_genvsvdjch1, gen.vs.vdjch1.sign = "ns", #as.numeric(wilcox_genvsvdjch1) <= 0.05, vdj.vs.vdjch1 = wilcox_vdjvsvdjch1, vdj.vs.vdjch1.sign = "ns", stringsAsFactors = FALSE) #as.numeric(wilcox_vdjvsvdjch1) <= 0.05) wilcox.matrix$gen.vs.vdjch1 <- as.numeric(as.character(wilcox.matrix$gen.vs.vdjch1)) wilcox.matrix$gen.vs.vdj <- as.numeric(as.character(wilcox.matrix$gen.vs.vdj)) wilcox.matrix$vdj.vs.vdjch1 <- as.numeric(as.character(wilcox.matrix$vdj.vs.vdjch1)) wilcox.matrix$gen.vs.vdjch1.sign[wilcox.matrix$gen.vs.vdjch1 <= 0.05] <- "*" wilcox.matrix$gen.vs.vdjch1.sign[wilcox.matrix$gen.vs.vdjch1 <= 0.01] <- "**" wilcox.matrix$gen.vs.vdjch1.sign[wilcox.matrix$gen.vs.vdjch1 <= 0.001] <- "***" wilcox.matrix$gen.vs.vdjch1.sign[wilcox.matrix$gen.vs.vdjch1 <= 0.0001] <- "****" wilcox.matrix$gen.vs.vdj.sign[wilcox.matrix$gen.vs.vdj <= 0.05] <- "*" wilcox.matrix$gen.vs.vdj.sign[wilcox.matrix$gen.vs.vdj <= 0.01] <- "**" wilcox.matrix$gen.vs.vdj.sign[wilcox.matrix$gen.vs.vdj <= 0.001] <- "***" wilcox.matrix$gen.vs.vdj.sign[wilcox.matrix$gen.vs.vdj <= 0.0001] <- "****" wilcox.matrix$vdj.vs.vdjch1.sign[wilcox.matrix$vdj.vs.vdjch1 <= 0.05] <- "*" wilcox.matrix$vdj.vs.vdjch1.sign[wilcox.matrix$vdj.vs.vdjch1 <= 0.01] <- "**" wilcox.matrix$vdj.vs.vdjch1.sign[wilcox.matrix$vdj.vs.vdjch1 <= 0.001] <- "***" wilcox.matrix$vdj.vs.vdjch1.sign[wilcox.matrix$vdj.vs.vdjch1 <= 0.0001] <- "****" ggarrange(d, d1, d2, d3, d4, d5, d6, d7, labels = c('NBCs','CBs','CCs','MBCs','prePBs','PBs','EPCs','BMPCs'), ncol = 4, nrow = 2) inserts.all.melted <- melt(inserts.all, id = c('run.id','sample.id','insert.id.s.', 'ins.id','contig.id','Name', 'insert.type'), measure.vars = col_to_test) inserts.all.melted %>% rename("variable" = "population", "value" = "expression") -> inserts.all.melted wilcox.matrix %>% select(-c(vdj.vs.vdjch1,vdj.vs.vdjch1.sign, gen.vs.vdj, gen.vs.vdjch1)) %>% rename("gen.vs.vdj.sign" = "V-DJ", "gen.vs.vdjch1.sign" = "VDJ-CH1") %>% melt(id = c('population')) %>% rename("variable" = "insert.type", "value" = "significance") -> wilcox.matrix.molten inserts.all.melted1 <- left_join(inserts.all.melted, wilcox.matrix.molten, by = c("insert.type","population")) inserts.all.melted1 <- transform(inserts.all.melted1, population=factor(population, levels = levels(inserts.all.melted$population))) diff_exp_closest_gene <- ggplot(data = inserts.all.melted1)+ geom_boxplot(aes(x = insert.type, y = expression, fill = insert.type), alpha = 0.2, outlier.shape = NA)+ geom_text(aes(x = insert.type, y = 4.5,label = significance), data = distinct(inserts.all.melted1, population, insert.type, .keep_all = TRUE)%>% filter(significance != 'ns'))+ theme_classic()+ xlab("")+ ylim(-5, 5)+ ylab("Expression difference\nof the closest gene, log2fold")+ xlab("Insert type")+ facet_wrap( ~ population, ncol = 4) diff_exp_closest_gene ggsave(paste('proximal_expression_boxplots_', format(Sys.time(), "%d%m%y_%H%M"), '.pdf', sep = ''), width = 12, height = 6.75, device = "pdf", plot = diff_exp_closest_gene) if(delete_the_plots_from_R_memory == TRUE){rm(diff_exp_closest_gene)} rm(wilcox.matrix)
669665f3ca29d0ce8295c19837cd6f815d96945a
a3a86cbf369626f205863425cbf1a373ab8a55f9
/man/discard_short_fixations.Rd
e43f80737f4732595dc1838ce1bd94a112807b77
[ "MIT" ]
permissive
chandms/ARETT-R-Package
77bc0efb20e08cc345b06714eab9ca91ad4e02e6
cb1c45947b29dd989e0109e60c67af2bac5d0968
refs/heads/master
2023-09-02T01:24:19.471989
2021-03-24T09:17:11
2021-03-24T09:17:11
null
0
0
null
null
null
null
UTF-8
R
false
true
1,459
rd
discard_short_fixations.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/discard_short_fixations.R \name{discard_short_fixations} \alias{discard_short_fixations} \title{Discard Short Fixations} \format{ Input data frame columns \describe{ \item{eventIndex}{Index of the event in which the gaze point lies} \item{eventType}{Type of the current event} \item{eventDuration}{Duration of the current event} \item{fixation_x}{X coordinate of the current fixation} \item{fixation_y}{Y coordinate of the current fixation} \item{fixation_z}{Z coordinate of the current fixation} } } \usage{ discard_short_fixations(data, min_duration = 60, reclassify_saccade = FALSE) } \arguments{ \item{data}{Data frame of the eye tracking data we want to process} \item{min_duration}{Minimum duration of a fixation} \item{reclassify_saccade}{Reclassify discarded fixations as saccade instead of a gap} } \value{ The input data frame with the fixation classifications which are too short discarded } \description{ Discard fixations which have a duration shorter than the specified minimum duration. } \details{ This function removes all fixations which are shorter than the specified minimum duration and reclassifies them as gaps. If \code{reclassify_saccade} is specified they are classified as saccade instead of a gap. This post processing is only necessary after the I-VT filter as both I-DT as well as I-AOI inherently include a minimum fixation duration. }
5981e5ad51865cdf53fecaa89d8866b308dbf061
3071df15b012f0fb792b13b6592213699c7bece2
/src/main/player_data_finder.R
c170106cf0be58c67628986ac0d65b9c49ef4f3e
[]
no_license
day536/fantasy
fb3303803f3d6a5c60a621872ec6b50776339fc3
801d8ad27f25ee5d5875d06a296a49c75efe852e
refs/heads/master
2022-09-05T13:47:34.402724
2020-05-27T18:43:06
2020-05-27T18:43:06
null
0
0
null
null
null
null
UTF-8
R
false
false
2,649
r
player_data_finder.R
# Dynasty Fantasy Football Predictions # Willis Day and Pat McHugh # May 7, 2020 library(httr) library(jsonlite) #library(dplyr) #library(tidyverse) lids <- c("460932538890711040") years <- c(2020) lid <- lids[1] year <- years[1] league <- GET(paste("https://api.sleeper.app/v1/league/", lids[1], sep="")) leagueList <- fromJSON(content(league,as="text")) getOwnerList <- function(lid){ owners = GET(paste("https://api.sleeper.app/v1/league/", lid, "/users", sep="")) ownerList <- fromJSON(content(owners,as="text")) return (ownerList) } getRosterList <- function(lid){ rosters = GET(paste("https://api.sleeper.app/v1/league/", lid, "/rosters", sep="")) rosterList <- fromJSON(content(rosters,as="text")) return (rosterList) } getPlayerList <- function(lid){ players = GET("https://api.sleeper.app/v1/players/nfl") playerList <- fromJSON(content(players,as="text")) return (playerList) } getRosterDf <- function(rosterList, playerDf){ rosterDf <- data.frame(owner_id=rosterList$owner_id) starters <- do.call(rbind, rosterList$starters) nStarters <- ncol(starters) fullRosters <- do.call(rbind, rosterList$players) rosterSize <- ncol(fullRosters) nReserves <- rosterSize - nStarters rostersWithNames <- matrix(nrow=nrow(rosterDf), ncol=rosterSize*2) for (i in 1:nrow(rosterDf)){ reserves <- setdiff(fullRosters[i,], starters[i,]) roster <- c(starters[i,], reserves) length(roster) <- rosterSize rosterNames <- unname(unlist(sapply(roster, function(x){playerVec[which(names(playerVec) == x)]}))) length(rosterNames) <- rosterSize newRow <- c(rbind(roster, rosterNames)) rostersWithNames[i,] <- newRow } rosterDf <- cbind(rosterDf, rostersWithNames) idColNames <- c(paste("starter_", 1:nStarters, sep=""), paste("reserve_", 1:nReserves, sep="")) nameColNames <- c(paste("starter_", 1:nStarters, "_name", sep=""), paste("reserve_", 1:nReserves, "_name", sep="")) colnames(rosterDf) <- c("owner_id", c(rbind(idColNames, nameColNames))) return (rosterDf) } ownerList <- getOwnerList(lid) rosterList <- getRosterList(lid) playerList <- getPlayerList(lid) ownerDf <- data.frame(owner_id=ownerList$user_id, owner_name=ownerList$display_name, owner_index=1:length(ownerList$user_id), stringsAsFactors = F) playerVec <- sapply(playerList, function(x){if (exists("full_name", where=x)){return (x[["full_name"]])} else {return (x[["player_id"]])}}) playerDf <- data.frame(player_id=names(playerList), player_name=unlist(unname(playerVec))) rosterDf <- getRosterDf(rosterList, playerDf) ownersAndRosters <- merge(ownerDf, rosterDf, by="owner_id")
0e7991f515486e8ba4055ec19917f1eeece719c8
9d8b743d802fb2a067ea4b1c12b44f4d9685418b
/man/data-dataContour.Rd
8406b6b51bcc2c23f881daa0cac269394682c4be
[]
no_license
cran/RcmdrPlugin.KMggplot2
24bee4662164a40a502101b4aef4e0aaa500e23d
a7397a49cf3ceab2bfc33fff8812e3c31b9e030d
refs/heads/master
2021-01-14T02:35:35.657819
2019-09-17T06:10:02
2019-09-17T06:10:02
17,693,102
0
1
null
null
null
null
UTF-8
R
false
true
831
rd
data-dataContour.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-aaa.r \docType{data} \name{dataContour} \alias{dataContour} \title{A Dummy Data for Contour Plots} \format{A data frame with 10000 observations on the following 3 variables. \describe{ \item{\code{x}}{a x-axis variable} \item{\code{y}}{a y-axis variable} \item{\code{z}}{density} }} \description{ A Dummy Data for Contour Plots } \examples{ # This data is generated by the code below: # set.seed(5435678) # dataContour <- data.frame( # x = (zx <- (rep(1:100, 100) - 5) / 2), # y = (zy <- (rep(1:100, each=100) - 5) / 2), # z = (z <- zx*zy) # ) # save(dataContour, file = "dataContour.RData") # try(data(dataContour, package = "RcmdrPlugin.KMggplot2")) } \seealso{ contour-class } \keyword{data}
3d23f08f415947f3af5970364bcd62a0ae782956
76361d555c85f527a77ffd6d03778dc406877aab
/module2/dataframes.R
cdec8baad8fab53a1470b046300d2381c3151558
[]
no_license
DanielleQuinn/AAFC_Workshop
8eeceb4d61c602f5a20a4b9bef0100e2258175f5
f19a7172db820b8fa91a0c08e3bf5d3908ed7524
refs/heads/master
2021-02-08T18:30:22.836172
2020-11-26T21:00:37
2020-11-26T21:00:37
244,183,976
1
0
null
null
null
null
UTF-8
R
false
false
4,411
r
dataframes.R
# ---- Project Description ---- # This project is used for ... # It was last updated by ... on ... R.version.string # It uses R version 4.0.2 # ---- Load packages ---- library(lubridate) library(stringr) library(dplyr) # Note: If an error is produced saying there is no such package # it means you need to install the package using install.packages() # ---- Importing Data ---- # List the files & folders that are available in your project list.files() # List the files and folders available in the "data" folder list.files("data") # Comma Separated Values (.csv) data <- read.csv("data/fish.csv") # Tab Delimited Values (.txt) taxonomy <- read.delim("data/taxonomy.txt") # ---- Exploring Data Frames ---- View(data) # View data in a new tab View(taxonomy) dim(data) # Number of rows and columns nrow(data) # Number of rows ncol(data) # Number of columns head(data) # Display the first six rows tail(data) # Display the last six rows names(data) # Display the names of each column summary(data) # Summarise each column str(data) # Display the structure of the object glimpse(data) # Display the structure of the object using {dplyr} # ---- Factors ---- ########################################################################## ## By default ... # ## R < 4.0 character columns in imported data are treated as FACTORS # ## R >= 4.0 character columns in imported data are treated as CHARACTERS # ########################################################################## # Factors are variables that have levels / categories / groups class(data$habitat) # If R < 4.0, this will be a factor. If R >= 4.0, this will be a character. # Step One: Do we want to treat this variable as a factor or a character? # Step Two: Do we need to change it? class(data$habitat) # Step Three: If so... ## Change a character column to a factor class(data$habitat) data$habitat <- as.factor(data$habitat) class(data$habitat) levels(data$habitat) # What levels (categories) do we have? str(data) # How does this change the structure? ## Change a factor column to a character class(data$habitat) data$habitat <- as.character(data$habitat) class(data$habitat) # WARNING: What happens if you tried to switch from a factor to a number? test_sites <- as.factor(c(34, 34, 35, 35, 36, 36)) # Create a new object to test this out on test_sites # This is what it looks like class(test_sites) # It is a factor levels(test_sites) # It has three levels as.numeric(test_sites) # Why do you think this happens? # If you use R >= 4.0 you are less likely to run into trouble with factors! # ---- Joining Data ---- # How might we want to combine these data sets? head(data) head(taxonomy) # Demonstration Using Test Data test_survey1 <- data.frame(person = c("A", "B", "C", "D"), colour = c("red", "blue", "green", "blue")) test_survey2 <- data.frame(person = c("A", "B", "C", "E", "F", "G"), animal = c("dog", "dog", "cat", "horse", "dog", "cat")) test_survey1 test_survey2 # Functions in the _join(x, y) family add columns from y to x, matching rows based on the key(s) # left_join(x, y) : keeps all rows that appear in x left_join(test_survey1, test_survey2) # inner_join(x, y) : keeps all rows that appear in BOTH x and y inner_join(test_survey1, test_survey2) # full_join(x, y) : keeps all rows that apear in EITHER x or y full_join(test_survey1, test_survey2) ## Which join do we want to use to combine our two data frames? head(data) head(taxonomy) left_join(data, taxonomy) # Make this change permanent data <- left_join(data, taxonomy) # ---- Converting Cases ---- # Is there anything we might want to do to clean up the scientific names? data$scientific_name # Convert to lower case str_to_lower(data$scientific_name) # Convert to title case str_to_title(data$scientific_name) # Convert to sentence case str_to_sentence(data$scientific_name) # Make the change permanent by overwriting the existing column head(data) # Still in upper case data$scientific_name <- str_to_sentence(data$scientific_name) head(data) # Converted to title case # ---- Write (Save) Changes to a New File ---- write.csv(data, "data/clean_fish.csv", row.names = FALSE)
e1cba04534ee24f686b8597fb4c91b07f9494fac
75ec20adb7fc7f8df4bfc2a45a8d45a3da69d857
/scripts/explore/chl_oc_cci/climatology_map.R
43d1fb1e4f76a9f140533f20e00df0670116772d
[]
no_license
dreanod/redseachl
a824ebfe8334b07f5ed7030aeaef486b44b262a0
e0e89f4210199667740faddb66b85ee3270c4ae3
refs/heads/master
2016-09-05T14:47:11.006842
2015-05-07T05:53:32
2015-05-07T05:53:32
25,761,215
0
0
null
null
null
null
UTF-8
R
false
false
3,470
r
climatology_map.R
library(raster) library(scales) library(ggplot2) library(RColorBrewer) FILES <- list.files('data/chl_oc_cci/clean/', pattern='*.grd', full.names=TRUE) outputDir <- 'derived/EDA/chl_oc_cci/climatology_maps' dir.create(outputDir, recursive=TRUE) m <- as.matrix(raster(FILES[1])) m <- array(NA, c(dim(m), length(FILES))) for (i in 1:length(FILES)) { f <- FILES[i] print(f) r <- raster(f) m[,,i] <- as.matrix(r) } clim <- apply(m, c(1,2), mean, na.rm=TRUE) r <- raster(FILES[1]) r[,] <- clim df <- as.data.frame(r , xy=TRUE) names(df) <- c('long', 'lat', 'chl') load('data/common/shapefiles/red_sea_outside.Rdata') p <- ggplot() p <- p + geom_tile(aes(x=long, y=lat, fill=chl), data=df) p <- p + scale_fill_gradientn(colours=rev(brewer.pal(7, 'Spectral')), limits=c(0.05, 10), trans='log', oob=squish, name='chl (mg/m^3)', breaks=c(0.1, 1, 10), labels=as.character(c(0.1, 1, 10))) p <- p + coord_cartesian() p <- p + geom_polygon(aes(x=long, y=lat, group=group), red.sea.outside, fill='white', contour='black') p <- p + ggtitle('yearly CHL average (OC-CCI 1997-2012)') fn <- paste(outputDir, '/yearly_average.png', sep='') ggsave(fn, plot=p) # seasonal climatologies (summer/winter) date_from_filename <- function(f) { b <- basename(f) b <- strsplit(b, '[.]')[[1]] b <- b[1] return(as.Date(b)) } m <- as.matrix(raster(FILES[1])) m_winter <- array(NA, c(dim(m), length(FILES))) m_summer <- array(NA, c(dim(m), length(FILES))) for (i in 1:length(FILES)) { f <- FILES[i] print(f) r <- raster(f) d <- date_from_filename(f) month <- as.numeric(format(d, '%m')) if (month > 3 & month < 10) { m_summer[,,i] <- as.matrix(r) } else { m_winter[,,i] <- as.matrix(r) } } r_summer <- raster(FILES[1]) clim_summer <- apply(m_summer, c(1,2), mean, na.rm=TRUE) r_summer[,] <- clim_summer r_winter <- raster(FILES[1]) clim_winter <- apply(m_winter, c(1,2), mean, na.rm=TRUE) r_winter[,] <- clim_winter df <- as.data.frame(r_winter, xy=TRUE) names(df) <- c('long', 'lat', 'chl') p <- ggplot() p <- p + geom_tile(aes(x=long, y=lat, fill=chl), data=df) p <- p + scale_fill_gradientn(colours=rev(brewer.pal(7, 'Spectral')), limits=c(0.05, 10), trans='log', oob=squish, name='chl (mg/m^3)', breaks=c(0.1, 1, 10), labels=as.character(c(0.1, 1, 10))) p <- p + coord_cartesian() p <- p + geom_polygon(aes(x=long, y=lat, group=group), red.sea.outside, fill='white', contour='black') p <- p + ggtitle('winter CHL average (OC-CCI 1997-2012)') fn <- paste(outputDir, '/winter_average.png', sep='') ggsave(fn, plot=p) df <- as.data.frame(r_summer, xy=TRUE) names(df) <- c('long', 'lat', 'chl') p <- ggplot() p <- p + geom_tile(aes(x=long, y=lat, fill=chl), data=df) p <- p + scale_fill_gradientn(colours=rev(brewer.pal(7, 'Spectral')), limits=c(0.05, 10), trans='log', oob=squish, name='chl (mg/m^3)', breaks=c(0.1, 1, 10), labels=as.character(c(0.1, 1, 10))) p <- p + coord_cartesian() p <- p + geom_polygon(aes(x=long, y=lat, group=group), red.sea.outside, fill='white', contour='black') p <- p + ggtitle('summer CHL average (OC-CCI 1997-2012)') fn <- paste(outputDir, '/summer_average.png', sep='') ggsave(fn, plot=p)
b9bda8d2f54b4f92c60a0a131d1a5172fa9437ec
0cc572af25bec5e671a1d085435d3e088b6cb1cc
/decision+tree+assignment.R
9a1e4e342a76e42b54703b8617ec97ca31e511b6
[]
no_license
Nikanksha/Hypothesis-Testing
89d1260997f9b3027cdff2452d6d3fe981868467
e23e56252914c3808e1204ccdb121a04c97de27a
refs/heads/master
2022-09-21T19:42:49.964818
2020-06-04T12:00:05
2020-06-04T12:00:05
269,346,706
0
0
null
null
null
null
UTF-8
R
false
false
1,398
r
decision+tree+assignment.R
data("iris") install.packages("caret") install.packages("C50") library(caret) library(C50) inTraininglocal<-createDataPartition(iris$Species,p=.70,list = F) training<-iris[inTraininglocal,] testing<-iris[-inTraininglocal,] #model building model<-C5.0(training$Species~.,data=training) #generate the model summary summary(model) #predict for test data pred<-predict.C5.0(model,testing[,-5]) a<-table(testing$Species,pred) sum(diag(a))/sum(a) #company data set Company_Data install.packages("caret") install.packages("C50") library(caret) library(C50) inTraininglocal<-createDataPartition(Company_Data$Sales,p=.70,list = F) training<-iris[inTraininglocal,] testing<-iris[-inTraininglocal,] #model building model<-C5.0(training$Species~.,data=training) #generate the model summary summary(model) #predict for test data pred<-predict.C5.0(model,testing[,-5]) a<-table(testing$Species,pred) sum(diag(a))/sum(a)") install.packages("caret") install.packages("C50") library(caret) library(C50) inTraininglocal<-createDataPartition(iris$Species,p=.70,list = F) training<-iris[inTraininglocal,] testing<-iris[-inTraininglocal,] #model building model<-C5.0(training$Species~.,data=training) #generate the model summary summary(model) #predict for test data pred<-predict.C5.0(model,testing[,-5]) a<-table(testing$Species,pred) sum(diag(a))/sum(a)
18f0b7c983fa42c0ce25817b617da50581e6ea90
bc66cb62f1e35fc0db76661eb8abe1b4ffa05f19
/man/write_errors.Rd
61aed68aa2680dbd23ce86814d2ba24e2ebf253a
[ "MIT" ]
permissive
bms63/timber
c4767ecdb97b762ecded50a6b36bd3584acbd8b6
5df97d7db4089f1ae405f202c43f17a4bf0d4434
refs/heads/main
2023-06-16T11:12:24.530880
2021-07-12T15:08:40
2021-07-12T15:08:40
null
0
0
null
null
null
null
UTF-8
R
false
true
455
rd
write_errors.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/writer.R \name{write_errors} \alias{write_errors} \title{Format errors attribute for writing} \usage{ write_errors() } \value{ A formatted vector of errors } \description{ Format errors attribute for writing } \examples{ scriptPath <- tempfile() logDir <- tempdir() writeLines("print('hello timber')", con = scriptPath) log_remove() log_config(scriptPath) write_errors() }
39bf821506c19a2640c9fd6954bfa1e8af72d1ed
4d86fab1be703b41c9972b0ce1c2cba6326cbf93
/MechaCarChallenge.R
073dfbd8896cac2bb8f3cdce132df268e8210d86
[]
no_license
Bominkkwon/MechaCar_Statistical_Analysis
1eb77a3a82dd399ab213de949ab81c77262984ac
e5278bfd6f5006893fa379235b5d5174e0f5dda8
refs/heads/main
2023-04-25T16:31:51.432341
2021-05-31T03:32:03
2021-05-31T03:32:03
372,085,786
0
0
null
null
null
null
UTF-8
R
false
false
1,203
r
MechaCarChallenge.R
library(dplyr) library(tidyverse) MechaCar_mpg_table <- read.csv(file='MechaCar_mpg.csv') Suspension_Coil_table <- read.csv(file='Suspension_Coil.csv') head(MechaCar_mpg_table) lm(formula = mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD, data = MechaCar_mpg_table) summary(lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD,MechaCar_mpg_table)) total_summary <- data.frame( Mean=mean(Suspension_Coil_table$PSI), Median=median(Suspension_Coil_table$PSI), Variance=var(Suspension_Coil_table$PSI), SD=sd(Suspension_Coil_table$PSI)) show(total_summary) lot_summary <- Suspension_Coil_table %>% group_by(Manufacturing_Lot) %>% summarize(Mean=mean(PSI), Median=median(PSI),Variance=var(PSI),SD=sd(PSI), .groups = 'keep') t.test(x=Suspension_Coil_table$PSI,mu=1500) lot1_subset <- subset(Suspension_Coil_table, Manufacturing_Lot=='Lot1') t.test(x=lot1_subset$PSI, mu=1500) lot2_subset <- subset(Suspension_Coil_table, Manufacturing_Lot=='Lot2') t.test(x=lot2_subset$PSI, mu=1500) lot3_subset <- subset(Suspension_Coil_table, Manufacturing_Lot=='Lot3') t.test(x=lot3_subset$PSI, mu=1500)
0cd8662edfb3feabaa99cc7eca11cd1c400b331b
23f456b19715dfaae4fa452c4f5bacbf9425d837
/man/heatMatrix.Rd
8e877f2d5ac8f6d8e8ca5b6bab5675716cad6020
[]
no_license
al2na/genomation
7a1c3a3aae5c8bfc74e32ee3a9151d03b28d57ca
2b701f676c2bc532bbf392ac6dd97c3f869b5f15
refs/heads/master
2020-04-05T23:45:41.324386
2015-08-25T11:01:12
2015-08-25T11:01:12
1,983,197
1
1
null
null
null
null
UTF-8
R
false
false
5,542
rd
heatMatrix.Rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/plotMatrix.R \name{heatMatrix} \alias{heatMatrix} \title{Draw a heatmap of a given ScoreMatrix object} \usage{ heatMatrix(mat, grid = FALSE, col = NULL, xcoords = NULL, group = NULL, group.col = NULL, order = FALSE, user.order = FALSE, winsorize = c(0, 100), clustfun = NULL, main = "", legend.name = NULL, cex.legend = 1, xlab = NULL, cex.main = 1, cex.lab = 1, cex.axis = 1, newpage = TRUE) } \arguments{ \item{mat}{a \code{ScoreMatrix} object} \item{grid}{if TRUE, grid graphics will be used. if FALSE, base graphics will be used on the top level, so users can use par(mfrow) or par(mfcol) prior to calling the function. Default:FALSE} \item{col}{a vector of colors, such as the ones created by heat.colors(10). If NULL (which is default), jet color scheme (common in matlab plots) will be used.} \item{xcoords}{a vector of numbers showing relative positions of the bases or windows. It must match the number of columns in the \code{ScoreMatrix}. Alternatively, it could be a numeric vector of two elements. Such as c(0,100) showing the relative start and end coordinates of the first and last column of the \code{ScoreMatrix} object.} \item{group}{a list of vectors of row numbers or a factor. This grouping is used for rowside colors of the heatmap. If it is a list, each element of the list must be a vector of row numbers. Names of the elements of the list will be used as names of groups. If \code{group} is a factor , it's length must match the number of rows of the matrix, and factor levels will be used as the names of the groups in the plot.} \item{group.col}{a vector of color names to be used at the rowside colors if \code{group} argument is given or \code{clustfun} function is given.} \item{order}{Logical indicating if the rows should be ordered or not (Default:FALSE). If \code{order=TRUE} the matrix will be ordered with rowSums(mat) values in descending order. If \code{group} argument is provided, first the groups will be ordered in descending order of sums of rows then, everything within the clusters will be ordered by sums of rows. If \code{clustfun} is given then rows within clusters will be order in descending order of sums of rows.} \item{user.order}{a numerical vector indicating the order of groups/clusters (it works only when \code{group} or \code{clustfun} argument is given).} \item{winsorize}{Numeric vector of two, defaults to c(0,100). This vector determines the upper and lower percentile values to limit the extreme values. For example, c(0,99) will limit the values to only 99th percentile, everything above the 99 percentile will be equalized to the value of 99th percentile.This is useful for visualization of matrices that have outliers.} \item{clustfun}{a function for clustering rows of \code{mat} that returns a vector of integers indicating the cluster to which each point is allocated (a vector of cluster membership), e.g. k-means algorithm with 3 centers: function(x) kmeans(x, centers=3)$cluster. By default FALSE.} \item{main}{a character string for the plot title} \item{legend.name}{a character label plotted next to the legend} \item{cex.legend}{A numerical value giving the amount by which legend axis marks should be magnified relative to the default} \item{xlab}{label a character string for x-axis of the heatmap} \item{cex.main}{A numerical value giving the amount by which plot title should be magnified} \item{cex.lab}{A numerical value giving the amount by which axis labels (including 'legend.name') should be magnified relative to the default.} \item{cex.axis}{A numerical value giving the amount by which axis marks should be magnified relative to the default} \item{newpage}{logical indicating if \code{grid.newpage()} function should be invoked if \code{grid=TRUE}.} } \value{ returns clustering result invisibly, if clustfun is definied } \description{ The function makes a heatmap out of given \code{ScoreMatrix} object. If desired it can use clustering using given clustering function (e.g. k-means) and plot cluster color codes as a sidebar. In addition, user can define groups of rows using 'group' argument. } \examples{ # data(cage) # data(promoters) # scores1=ScoreMatrix(target=cage,windows=promoters,strand.aware=TRUE, # weight.col="tpm") # set.seed(1000) # heatMatrix(mat=scores1,legend.name="tpm",winsorize=c(0,99),xlab="region around TSS", # xcoords=-1000:1000, # cex.legend=0.8,main="CAGE clusters on promoters",cex.lab=1, # cex.axis=0.9,grid=FALSE) ## examples using clustering functions ## k-means # cl1 <- function(x) kmeans(x, centers=3)$cluster # set.seed(1000) # heatMatrix(mat=scores1,legend.name="tpm",winsorize=c(0,99),xlab="region around TSS", # xcoords=-1000:1000,clustfun=cl1, # cex.legend=0.8,main="CAGE clusters on promoters",cex.lab=1, # cex.axis=0.9,grid=FALSE, # user.order=c(1,3,2)) ## hierarchical clustering # cl2 <- function(x) cutree(hclust(dist(x), method="complete"), k=3) # set.seed(1000) # heatMatrix(mat=scores1,legend.name="tpm",winsorize=c(0,99),xlab="region around TSS", # xcoords=-1000:1000,clustfun=cl2, # cex.legend=0.8,main="CAGE clusters on promoters",cex.lab=1, # cex.axis=0.9,grid=FALSE) # # }
46acea4577cc8351071b196ec5e3b2f67f94ed84
050edfa53f5ec7d76b2321c552266e0f60e4db92
/man/diamond.Rd
a5ecb035f90df5fd31caa07b69eda5d3864a7392
[]
no_license
placeboo/subgraph
e1ab54fabda52ed4243fdc5cdc2a348b2da6d41c
37036807aa7bd75aeab90fe224fdd44c126fb3f9
refs/heads/master
2021-10-27T15:54:59.877512
2019-04-18T08:08:57
2019-04-18T08:08:57
107,905,890
0
1
null
null
null
null
UTF-8
R
false
true
452
rd
diamond.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/diamond.R \name{diamond} \alias{diamond} \title{Diamond graphs List all possible fiveFlower graphs based on given Four nodes} \usage{ diamond(x) } \arguments{ \item{x}{The vector representing nodes} } \value{ A matrix listing edges of Diamond graphs } \description{ Diamond graphs List all possible fiveFlower graphs based on given Four nodes } \examples{ diamond(c(1:4)) }
ddab6a12f0e0f1d248b642ae0ec663a9a0a42711
a31a28a10f58460aebc21891f552250004c43582
/rscripts/Scripts_page180.R
95cd36da2b1b1f49177b97a6868d3730f14d0db3
[]
no_license
suvaries/racademy
540809825659cb8105d3ae01587c37dedd1ad151
482d4a622661934984b864a924743b5baace6964
refs/heads/master
2020-03-19T09:50:38.507466
2018-06-05T20:50:24
2018-06-05T20:50:24
null
0
0
null
null
null
null
UTF-8
R
false
false
1,039
r
Scripts_page180.R
# Fonksiyonlar - Yahoo finance data çekme # getYahoo <- function(tickerList, from="2017-01-01", end="2018-01-01", pricetype="AdjClose" ){ library(quantmod) if (pricetype == "AdjClose") { type = 6 } else if (pricetype == "Open") { type = 1 } else if (pricetype == "High") { type = 2 } else if (pricetype == "Low") { type = 3 } else{ type = 4 } data <- list() for(i in 1:length(tickerList)) { data[[i]] <- getSymbols(tickerList[i], src="yahoo", auto.assign = FALSE, warnings = FALSE, from = from, to = end)[,type] } data <- as.data.frame(data) colnames(data)<-tickerList return(data) } ## TEST ## tickerList <- c("^GSPC","MSFT","SBUX","XOM","AMZN") pricetype="AdjClose" from = "2015-01-01" end = "2016-01-01" data <- getYahoo(tickerList,from,end,pricetype)
55329f7ab9aeb7b2df35285dd515d7e249ab774a
b6fe639016db185ea6dc74c65e7aee63d62699c8
/plot5.r
973e179121a841c01af2c143916d94426c18e6b6
[]
no_license
aluuu/ExData_CourseProject2
580ff072d54a39834ee965e2bffe3218e08997d4
c5c984ac79b51072ee651b4a944e870df49c8fa0
refs/heads/master
2020-12-25T14:23:48.576273
2016-09-03T09:34:54
2016-09-03T09:34:54
67,282,490
0
0
null
null
null
null
UTF-8
R
false
false
443
r
plot5.r
source("load_data.r") library(ggplot2) bcNEI <- subset(NEI, fips=="24510") bcMotorNEI <- subset(bcNEI, type=="ON-ROAD") emissions <- group_by_(bcMotorNEI, .dots=c("year")) %>% summarise(total_emission=sum(Emissions, na.rm=T)) qplot(year, total_emission, data=emissions, geom="line") + ggtitle("Changes of emissions from PM2.5 from motor vehicle sources") + ylab("Total PM2.5 emission (t)") + xlab("Year") ggsave("plot5.png")
68b2cdacd86a95ffcdb814efdbac1fe2ec3fce74
7bd4158d7a2b701f3d33c1335ef62635b6f8f05a
/tests/testthat/test-gen_sta_xml.R
0f802f70e94c356cc8488e0227a123faec5a9659
[]
no_license
SticsRPacks/SticsRFiles
f7ceccf81a805cd87939021b76e1d174b5d54c52
ed3ef394244f3e7a44093dd3c3424ee710282020
refs/heads/main
2023-08-30T21:41:20.020763
2023-07-28T08:44:29
2023-07-28T08:44:29
187,986,787
2
3
null
2023-07-28T08:44:30
2019-05-22T07:32:34
R
UTF-8
R
false
false
831
r
test-gen_sta_xml.R
library(SticsRFiles) stics_version <- get_stics_versions_compat()$latest_version version_num <- get_version_num() context("Creating an xml station file to latest version") workspace_path <- get_examples_path("xml", stics_version = stics_version) xl_path <- download_usm_xl(file = "inputs_stics_example.xlsx") sta_param_df <- read_params_table(file = xl_path, sheet_name = "Station") ini_param_df <- read_params_table(file = xl_path, sheet_name = "Ini") out_dir <- file.path(tempdir(), "sta_xml") if (!dir.exists(out_dir)) dir.create(out_dir) gen_sta_xml(out_dir = out_dir, param_df = sta_param_df) test_that("Create a xml station file", { expect_true(file.exists(file.path(out_dir, "climatex_sta.xml"))) }) test_that("Create a xml station file", { expect_error(gen_sta_xml(out_dir = out_dir, param_df = ini_param_df)) })
33b3fc37ccc32dcae0754318946e8fe0dce4f057
1d91fb9f180131ac09491227c960f5f3ea58eaed
/man/ineq_eta_dag.Rd
96dd3f43bb8ea8eca9101ab934e17448e605b16e
[]
no_license
alysonvanraalte/LifeIneq
f1616547978ac1783b872badf85f6e4b2de9b67b
78e4b84dec2de0c669b41c341f91250325fad2f0
refs/heads/master
2023-06-07T22:40:03.627355
2023-06-06T08:20:40
2023-06-06T08:20:40
224,405,912
5
5
null
2023-04-26T13:10:10
2019-11-27T10:34:02
R
UTF-8
R
false
true
1,582
rd
ineq_eta_dag.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/indices.R \name{ineq_eta_dag} \alias{ineq_eta_dag} \title{ineq_eta_dag} \usage{ ineq_eta_dag(age, dx, lx, ex, ax, check = TRUE) } \arguments{ \item{age}{numeric. vector of lower age bounds.} \item{dx}{numeric. vector of the lifetable death distribution.} \item{lx}{numeric. vector of the lifetable survivorship.} \item{ex}{numeric. vector of remaining life expectancy.} \item{ax}{numeric. vector of the average time spent in the age interval of those dying within the interval.} \item{check}{logical. Shall we perform basic checks on input vectors? Default TRUE} } \description{ Calculate a lifetable column for the average age at death lost at death of a population. } \details{ This quantity is not featured in the literature to our knowledge, but we've added it in order to make an age-at-death version of \eqn{e^\dagger} (which is a shortfall metric), for the sake of completeness. We also don't know what to call this measure, so we chose \code{eta_dag} to make the association with \code{edag} clear. } \examples{ data(LT) # A vector containing the conditional mean age-at-death lost at death of a population eaaddag = ineq_eta_dag(age=LT$Age,dx=LT$dx,lx=LT$lx,ex=LT$ex,ax=LT$ax) # The aad-dag from birth eaaddag[1] # The aad-dag conditional upon survival to age 10 eaaddag[11] \dontrun{ plot(0:110, eaaddag, type='l') } } \seealso{ \code{MortalityLaws::\link[MortalityLaws]{MortalityLaw}} \code{ungroup::\link[ungroup]{pclm}} \code{MortalitySmooth::\link[MortalitySmooth]{Mort1Dsmooth}} }
b986fe7765485be0ab18c5a9d67154e459064470
6ec0b79efbab7bf2f445bb237917b8c0dc8bdba1
/plot1.R
e5821469cb397d61c833235d613ab124bf28df4e
[]
no_license
MMADave/Exploratory_Data_Plotting
7db76a98fa3da495c95188e5c3911233d6ea7975
2eed0c309799001229e1d20eb3a59a1de21339ad
refs/heads/master
2021-01-18T11:14:56.924422
2014-07-13T02:01:21
2014-07-13T02:01:21
null
0
0
null
null
null
null
UTF-8
R
false
false
368
r
plot1.R
data <- read.table(pipe('findstr /B /R ^[1-2]/2/2007 household_power_consumption.txt'),header=F, sep=';') colnames(data) <-names(read.table('household_power_consumption.txt', header=TRUE,sep=";",nrows=1)) windows() hist(data$Global_active_power, main="Global Active Power", xlab="Global Active Power (kilowatts)", col="red") dev.copy(png, file = "plot1.png") dev.off()
2ac12cbdcc60bb8a08d2a74e3692e76157c1bc3a
04a3b346735b9366fedc0deab1d9c08b25647e85
/demo_associations.r
8a244c56a1511b683060dbfd2ee514b961845c2f
[]
no_license
edisona/amcat.r
ee4abccd95040c82a8660320df9838aaf913f88e
3e2fe2e029c6fdb44c191cbe48dc264f7d9ad202
refs/heads/master
2020-05-17T12:21:18.828489
2013-12-16T09:00:42
2013-12-16T09:00:42
32,724,312
1
0
null
null
null
null
UTF-8
R
false
false
2,294
r
demo_associations.r
source('amcatr.r') source('amcat_getdata.r') source('codebook_lib.r') source('associations_lib.r') ##### INLOGGEN OP AMCAT ##### conn = amcat.connect('http://amcat-dev.labs.vu.nl') # AmCAT vraagt om je inloggegevens #### SELECT DATA #### articlesets = c(3321,3322) # hier alle articlesets invoeren: c(articleset1, articleset2, ...) meta = amcat.getMeta(conn, articlesets, media=c(999,77,4,6), from_date='2012-01-01', to_date='2013-01-01') #### QUICK ANALYSIS #### queries = c('Frankrijk# frankrijk franse* fransman', 'Duitsland# duitsland duitse*', 'Spanje# spanje spanjaard* spaanse') hits = amcat.getHits(conn, queries, articlesets) hits = hits[hits$id %in% meta$id,] associations.conProb(hits, meta, byMedium=F, byDateinterval=NA, calc_type='cp') associations.conProb(hits, meta, byMedium=T, byDateinterval=NA, calc_type='cp') associations.conProb(hits, meta, byMedium=F, byDateinterval='month', calc_type='cp') ##### CODEBOOK BASED ANALYSIS ##### codebook = amcat.getCodebook(conn, codebook_id=206) codebook hits = amcat.getHits(conn, codebook$queries, articlesets) hits = hits[hits$id %in% meta$id,] ##### Use codebook hierarchy to aggregate data ##### codebook.getChildren(codebook$hierarchy, 'Political parties') # show children of code codebook.aggCode(hits, codebook$hierarchy, 'Political parties') # aggregate hits: sum of code and children of code codebook.aggAllCodes(hits, codebook$hierarchy, codes=c()) # aggregate hits based on ontology. If codes parameter is an empty vector, all objects are aggregated. hits = codebook.appendAggHits(hits, codebook$hierarchy) ##### Calculate Conditional Probability ##### codes = c('Political parties','') only_from = c('CDA','VVD','PvdA','PVV'); only_to = c('Economy') # can be used to limit combination to be calculated. If empty, all combinations of codes are calculated. # unaggregated, cp = zero since economy has no query (and thereby no hits) of its own associations.conProb(hits, meta, codes, variable='hits', only_from=only_from, only_to=only_to, byMedium=F, byDateinterval=NA, calc_type='cp') # aggregated, economy contains hits of its codebook children associations.conProb(hits, meta, codes, variable='agg_hits', only_from=only_from, only_to=only_to, byMedium=F, byDateinterval=NA, calc_type='cp')
78f846ad24d94a0596e69e431cb7a5789645e889
f3119ef84a7d4a15f8347ba073c291607edae5ac
/Stegen and Hurlbert 2009 community assembly.r
61195598f67e9a232141f9e66e1cb3462a2b6c2c
[]
no_license
bendmorris/fia_spatial_diversity
167e7b37b1eeb0d96737c88e05932dab0f44f998
6cff0170cdd7a8d1eaa2419b1b50df20376f51de
refs/heads/master
2020-12-30T10:36:34.284563
2013-09-16T13:35:06
2013-09-16T13:35:06
8,338,407
0
0
null
null
null
null
UTF-8
R
false
false
11,623
r
Stegen and Hurlbert 2009 community assembly.r
## code for simulating 'metacommunities' and assembling local communities ## and calculating taxonomic, functional and phylogenetic beta diversity ## in order to evaluate the performance of different beta diversity metrics ## this version evolves traits and assigns spatial locations based on the assumed trait-space correlation ## which is itself a function of dispersal limitation and environmental filtering ## this version has two traits, two environmental variables and two spatial dimensions ## this version evaluates the variance partitioning across 11 niche and dispersal breadths, but only one env.-space correlation date() library(ape) library(geiger) library(apTreeshape) library(picante) library(lattice) individuals = 10000 # number of individuals in a local community communities = 20 # number of local communities meta.rich = 500 # metacommunity richness ## start parameters to manipulate es.sd = 0.9 ## standard deviation for the environment-space correlation rsq.desired=0.5 ## desired space-environment r.sq #for (niche.breadth in c(10^seq(0,1,0.5))) { # variance of gaussian niche curve #for (disp.breadth in c(10^seq(-4,1,0.5))) { # variance of gaussian dispersal kernal ########################### # Variables to manipulate # ########################### param_values = c(10^-9, 10^-4, 10^1) for (niche.breadth in param_values) { for (disp.breadth in param_values) { meta.abun = rlnorm(meta.rich, meanlog = 1, sdlog = 1) ## "global" species abundance distribution ## end parameters to manipulate phylo.comp = matrix(c(0),nrow=0,ncol=23) trait.comp = matrix(c(0),nrow=0,ncol=23) taxa.comp = matrix(c(0),nrow=0,ncol=23) ## start simulated metacommunity, which is the overall phylogeny ##windows(21,14) ##par(mfrow=c(2,3)) my.d = 0 #death rate - when set to zero = pure birth model my.b = 0.1 #birth rate tree1 = birthdeath.tree(b=my.b, d=my.d, taxa.stop=(meta.rich+1), return.all.extinct=FALSE) #generate random ultrametric trees with given richness tree2 = as.treeshape(tree1) #intermediate step to estimate tree imbalance meta = rescaleTree(tree1, 1) #standardise tree root-to-tip length Ic = colless(tree2,norm = "yule") # tree imbalance PD = sum(meta$edge.length) # alpha phylodiversity gamma = gammaStat(meta) # related to stemminess meta = cophenetic(meta)/2; rownames(meta)[rownames(meta)=="s501"]="del" colnames(meta)[colnames(meta)=="s501"]="del" meta = subset(meta, select = -del) meta = subset(t(meta), select = -del) meta = t(meta); dim(meta); range(as.numeric(rownames(meta))); ########################## # PHYLOGENY ########################## meta = as.phylo(hclust(as.dist(meta))) phylo.dist = round(cophenetic(meta)/2,4); phylo.dist[1:5,1:5]; ##plot(meta,typ="fan",show.tip.label = F,main="Metacommunity Phylogeny") ## end simulation of metacommunity, which is now meta ## start assinging environmental conditions and spatial locations to communities ##windows(14,7); par(mfrow=c(1,2)) # first spatial and envirnmental axes env.opt.one = matrix(c(runif(communities, min=-10, max=10)),ncol=1,nrow=communities) # randomly selecting the environmental optima for each trait for each local community for (i in 1:ncol(env.opt.one)) {env.opt.one[,i] = (env.opt.one[,i]-mean(env.opt.one[,i]))/sd(env.opt.one[,i])} # standardizing environmental optima into z-scores for (loop in 1:1000) { comm.space.one = rnorm(communities, mean = env.opt.one, sd = es.sd); comm.space.one = (comm.space.one-mean(comm.space.one))/sd(comm.space.one); comm.space.one = matrix(c(comm.space.one),ncol=1) # standardizing community spatial locations into z-scores space.env.r.sq = round(summary(lm(comm.space.one~env.opt.one[,1]))$r.sq,2) if ( abs(space.env.r.sq - rsq.desired) < 0.05 ) {break} else{} } plot(comm.space.one~env.opt.one[,1],main="1st axes Space-Environment Correlation",ylab="Local Spatial Position",xlab="Local Environmental Value"); legend(min(env.opt.one[,1]),max(comm.space.one),legend = paste("R.sq = ",space.env.r.sq,sep=""),box.lty = 0) space.env.cov.one = cov(comm.space.one,env.opt.one[,1]) # second spatial and envirnmental axes env.opt.two = matrix(c(runif(communities, min=-10, max=10)),ncol=1,nrow=communities) # randomly selecting the environmental optima for each trait for each local community for (i in 1:ncol(env.opt.two)) {env.opt.two[,i] = (env.opt.two[,i]-mean(env.opt.two[,i]))/sd(env.opt.two[,i])} # standardizing environmental optima into z-scores for (loop in 1:1000) { comm.space.two = rnorm(communities, mean = env.opt.two, sd = es.sd); comm.space.two = (comm.space.two-mean(comm.space.two))/sd(comm.space.two); comm.space.two = matrix(c(comm.space.two),ncol=1) # standardizing community spatial locations into z-scores space.env.r.sq = round(summary(lm(comm.space.two~env.opt.two[,1]))$r.sq,2) if ( abs(space.env.r.sq - rsq.desired) < 0.05 ) {break} else{} } plot(comm.space.two~env.opt.two[,1],main="2nd axes Space-Environment Correlation",ylab="Local Spatial Position",xlab="Local Environmental Value"); legend(min(env.opt.two[,1]),max(comm.space.two),legend = paste("R.sq = ",space.env.r.sq,sep=""),box.lty = 0) space.env.cov.two = cov(comm.space.two,env.opt.two[,1]) ################################################### # SITE X SPATIAL COORDINATES MATRIX ################################################### site.xy = data.frame(x = comm.space.one, y = comm.space.two) ## end assinging environmental conditions and spatial locations to communities ## start calculation of the trait-space covariance trait.space.cov.1.1 = (exp(-disp.breadth) + space.env.cov.one*exp(-niche.breadth))/3 ## the covariance between traits and space along the first axes trait.space.cov.2.2 = (exp(-disp.breadth) + space.env.cov.two*exp(-niche.breadth))/3 ## the covariance between traits and space along the second axes trait.space.cov.1.2 = ( exp(-disp.breadth) )/3 ## the covariance between first trait and second space axes trait.space.cov.2.1 = ( exp(-disp.breadth) )/3 ## the covariance between second trait and first space axes trait.trait.cov = 0 ## covariance between traits space.space.cov = 0 ## covariance between spatial dimensions all.var = 1 ## variance of space and traits ## end calculation of the trait-space covariance ## start trait/space evolution, brownian motion traits = 4 # number of space + trait dimensions var.cov = matrix(c( all.var,trait.trait.cov,trait.space.cov.1.1,trait.space.cov.1.2, trait.trait.cov,all.var,trait.space.cov.2.1,trait.space.cov.2.2, trait.space.cov.1.1,trait.space.cov.1.2,all.var,space.space.cov, trait.space.cov.2.1,trait.space.cov.2.2,space.space.cov,all.var) ,ncol=traits,nrow=traits); var.cov ##for (loop in 1:1000) { trait.brown = sim.char(meta,var.cov) ## matrix of traits t.s.matrix = matrix(c(trait.brown),nrow=meta.rich); rownames(t.s.matrix) = rownames(trait.brown) for (i in 1:ncol(t.s.matrix)) {t.s.matrix[,i] = (t.s.matrix[,i]-mean(t.s.matrix[,i]))/sd(t.s.matrix[,i])} # standardizing traits into z-scores cov(t.s.matrix)-var.cov ##real.trait.space.cov = cov(t.s.matrix[,1],t.s.matrix[,2]) ##if ( abs(real.trait.space.cov - trait.space.cov) < 0.05 ) {break} else{} ##} trait.matrix = rbind(cbind(env.opt.one,env.opt.two),as.matrix(t.s.matrix[,c(1,2)],ncol=2)) for (i in 1:communities) {rownames(trait.matrix)[i] = paste("c",i,sep="") } trait.matrix = as.matrix(dist(trait.matrix,upper=T,diag=T)); trait.matrix = trait.matrix/max(trait.matrix); trait.matrix[1:5,1:5]; dim(trait.matrix) # distance matrix made with the environmental optima space.matrix = rbind(cbind(comm.space.one,comm.space.two),as.matrix(t.s.matrix[,c(3,4)],ncol=2)) for (i in 1:communities) {rownames(space.matrix)[i] = paste("c",i,sep="") } space.matrix = as.matrix(dist(space.matrix,upper=T,diag=T)); space.matrix = space.matrix/max(space.matrix); space.matrix[1:5,1:5]; dim(space.matrix) # distance matrix made with the environmental optima #K.trait = Kcalc(trait.brown,meta) ## trait phylogenetic signal #K.trait ##trait.dendro = as.phylo(hclust(as.dist(trait.matrix))) ##space.dendro = as.phylo(hclust(as.dist(space.matrix))) ##trait.dendro.fsor = as.phylo(hclust(as.dist(trait.matrix[(communities+1):nrow(trait.matrix),(communities+1):ncol(trait.matrix)]))) ##plot(trait.dendro,typ="fan",show.tip.label = F,cex=0.5,main="Metacommunity Trait Dendrogram") ##plot(space.dendro,typ="fan",show.tip.label = F,cex=0.5,main="Metacommunity Space Dendrogram") ## end trait/space evolution, brownian motion ## start community assembly, abundance and environment species = rownames(trait.matrix)[(communities+1):nrow(trait.matrix)] pres.ab = matrix(c(0),ncol=meta.rich,nrow=communities) rich = numeric() for(i in 1:nrow(pres.ab)) { ##local.species = as.numeric(sample(species,local.rich,replace=F,prob=c(exp(-((trait.matrix[(communities+1):nrow(trait.matrix),i])^2)/(2*niche.breadth))/(sqrt(2*niche.breadth*pi))))) # niche breadth multiplied by 10 to get more rare species ##rel.abun = sample(local.species,individuals,replace=T,prob=c(exp(-((trait.matrix[c(local.species+communities),i])^2)/(2*niche.breadth))/(sqrt(2*niche.breadth*pi)))) # THE PROBLEM IS HERE: the expression passed to prob is just a matrix full of 0's, # resulting in "too few positive probabilities" rel.abun = as.numeric(sample(species,individuals,replace=T, prob=c( meta.abun ## probaility from the global abundance distribution *exp(-((trait.matrix[c(communities+1):nrow(trait.matrix),i])^2)/(2*niche.breadth))/(sqrt(2*niche.breadth*pi)) ## probability from environmental filtering *exp(-((space.matrix[c(communities+1):nrow(space.matrix),i])^2)/(2*disp.breadth))/(sqrt(2*disp.breadth*pi)) ## probability from spatial dispersal limitation ))) rel.abun = as.data.frame(rel.abun) rel.abun = cbind(rel.abun,1) rel.abun = tapply(rel.abun[,2],rel.abun[,1],FUN=sum) rel.abun = cbind(as.numeric(names(rel.abun)),rel.abun) rel.abun[,2] = rel.abun[,2]/sum(rel.abun[,2]) rich[i] = nrow(rel.abun) pres.ab[i,c(rel.abun[,1])] = rel.abun[,2] } pres.ab[1:5,1:5] ##hist(log(rel.abun[,2]),breaks=20,xlab="Relative Abundance",ylab="# of Species in Local Community",main="Example Local SAD") coms = cbind(trait.matrix[1:communities,1],space.matrix[1:communities,1],pres.ab) colnames(coms) = c("Env","Spatial",c(1:meta.rich)) coms[1:5,1:5] ################################################### # SITE X SPECIES MATRIX IN EITHER REL.ABUN OR COMS ################################################### ## end community assembly, abundance and environment } }
546bd900a0f93fb0196ed0d3920930c1c19acc3d
dc4e4365c7c5d6a94ea0616c12d1a59ccffa12e9
/man/mrExec.Rd
c0fd8b9de78c61f1d10d3685e583b1afe9a14d5b
[ "BSD-3-Clause" ]
permissive
hafen/datadr
582321d9a5628fb2facd1623fb4048a2ac1575bf
b4f7e7a1d9a09ac51b0c456f35177d61626d73c3
refs/heads/master
2020-12-29T03:06:38.926328
2018-08-20T23:59:20
2018-08-20T23:59:20
20,773,966
2
0
null
null
null
null
UTF-8
R
false
true
3,021
rd
mrExec.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mapreduce.R \name{mrExec} \alias{mrExec} \title{Execute a MapReduce Job} \usage{ mrExec(data, setup = NULL, map = NULL, reduce = NULL, output = NULL, overwrite = FALSE, control = NULL, params = NULL, packages = NULL, verbose = TRUE) } \arguments{ \item{data}{a ddo/ddf object, or list of ddo/ddf objects} \item{setup}{an expression of R code (created using the R command \code{expression}) to be run before map and reduce} \item{map}{an R expression that is evaluated during the map stage. For each task, this expression is executed multiple times (see details).} \item{reduce}{a vector of R expressions with names pre, reduce, and post that is evaluated during the reduce stage. For example \code{reduce = expression(pre = {...}, reduce = {...}, post = {...})}. reduce is optional, and if not specified the map output key-value pairs will be the result. If it is not specified, then a default identity reduce is performed. Setting it to 0 will skip the reduce altogether.} \item{output}{a "kvConnection" object indicating where the output data should reside (see \code{\link{localDiskConn}}, \code{\link{hdfsConn}}). If \code{NULL} (default), output will be an in-memory "ddo" object. If a character string, it will be treated as a path to be passed to the same type of connection as \code{data} - relative paths will be relative to the working directory of that back end.} \item{overwrite}{logical; should existing output location be overwritten? (also can specify \code{overwrite = "backup"} to move the existing output to _bak)} \item{control}{parameters specifying how the backend should handle things (most-likely parameters to \code{rhwatch} in RHIPE) - see \code{\link{rhipeControl}} and \code{\link{localDiskControl}}} \item{params}{a named list of objects external to the input data that are needed in the map or reduce phases} \item{packages}{a vector of R package names that contain functions used in \code{fn} (most should be taken care of automatically such that this is rarely necessary to specify)} \item{verbose}{logical - print messages about what is being done} } \value{ "ddo" object - to keep it simple. It is up to the user to update or cast as "ddf" if that is the desired result. } \description{ Execute a MapReduce job } \examples{ # compute min and max Sepal Length by species for iris data # using a random partitioning of it as input d <- divide(iris, by = rrDiv(20)) mapExp <- expression({ lapply(map.values, function(r) { by(r, r$Species, function(x) { collect( as.character(x$Species[1]), range(x$Sepal.Length, na.rm = TRUE) ) }) }) }) reduceExp <- expression( pre = { rng <- c(Inf, -Inf) }, reduce = { rx <- unlist(reduce.values) rng <- c(min(rng[1], rx, na.rm = TRUE), max(rng[2], rx, na.rm = TRUE)) }, post = { collect(reduce.key, rng) }) res <- mrExec(d, map = mapExp, reduce = reduceExp) as.list(res) } \author{ Ryan Hafen }
a05cc3a876f5124cc78259365d758aa3df7bdb4e
b6ea5f9d0d597d7ee146bac059d7f644740247a6
/R/enhancerToGene.R
86e19fb1d100d427ed01d72e9afd764cb97ad97d
[]
no_license
aertslab/ScoMAP
5315406086f98298534a613bfce1faec02ef8177
f81bc49a6eeb6ba17f04cf7e43837126b9b7fb77
refs/heads/master
2021-10-07T18:07:29.783024
2021-10-04T07:58:14
2021-10-04T07:58:14
243,273,940
20
3
null
null
null
null
UTF-8
R
false
false
28,257
r
enhancerToGene.R
#' getSearchSpace #' #' Get regions per gene in which to look for potential enhancers to be linked #' @param txdb Txdb object matching with the genome assembly used for the analysis #' @param org.db Org.Db objet for the corresponding species #' @param genes Genes for which enhancer-to-gene links want to be inferred #' @param extend Space around the TSS that must be considered (first value, upstream; second, downstream TSS). #' In addition, intronic regions in the gene will be considered. Default=c(50000, 50000) #' #' @return Genomic ranges object containing the regions considered for each gene (as metadata) #' #' @examples #' searchSpace <- getSearchSpace(txdb, org.db, rownames(DGEM), extend=c(50000, 50000)) #' #' @import AnnotationDbi #' @import GenomicRanges #' #' @export getSearchSpace <- function(txdb, org.db, genes, extend=c(50000, 50000)) { # Check up if(!is(txdb,'TxDb')){ stop('txdb has to be an object of class TxDb') } if(!is(org.db,'OrgDb')){ stop('org.db has to be an object of class OrgDb') } # Get search space around TSS # Genes to ensemble dict if (taxonomyId(org.db) == 9606){ ENS2SYMBOL <- AnnotationDbi::select(org.db, keys = genes, columns="ENTREZID", keytype="SYMBOL") } else { ENS2SYMBOL <- AnnotationDbi::select(org.db, keys = genes, columns="ENSEMBL", keytype="SYMBOL") } if (sum(is.na(ENS2SYMBOL[,2])) > 0){ENS2SYMBOL <- ENS2SYMBOL[-which(is.na(ENS2SYMBOL[,2])),]} # Select genes in the list filter_list <- list() filter_list[['GENEID']] <- ENS2SYMBOL[,2] # Take promoter coordinates for the specific genes TSS <- promoters(txdb, upstream =extend[1], downstream=extend[2], filter=filter_list, columns=c("GENEID")) # Annotate to symbol ENS2SYMBOL_VECTOR <- as.vector(ENS2SYMBOL[,1]) names(ENS2SYMBOL_VECTOR) <- ENS2SYMBOL[,2] elementMetadata(TSS)$SYMBOL <- ENS2SYMBOL_VECTOR[unlist(as.vector(elementMetadata(TSS)$GENEID))] elementMetadata(TSS) <- elementMetadata(TSS)[ , -which(colnames(elementMetadata(TSS)) %in% c('GENEID', 'width'))] colnames(elementMetadata(TSS)) <- 'SYMBOL' elementMetadata(TSS)$RegionType <- rep('Extended Promoter', length(unlist(as.vector(elementMetadata(TSS)$SYMBOL)))) # Get introns introns <- intronicParts(txdb, linked.to.single.gene.only=FALSE) # Get Ens to symbol name if (taxonomyId(org.db) == 9606){ ENS2SYMBOLL <- AnnotationDbi::select(org.db, keys = unlist(as.vector(elementMetadata(introns)$gene_id)), columns="SYMBOL", keytype="ENTREZID") } else { ENS2SYMBOLL <- AnnotationDbi::select(org.db, keys = unlist(as.vector(elementMetadata(introns)$gene_id)), columns="SYMBOL", keytype="ENSEMBL") } if (sum(is.na(ENS2SYMBOLL[,1])) > 0){ENS2SYMBOLL <- ENS2SYMBOLL[-which(is.na(ENS2SYMBOLL[,1])),]} ENS2SYMBOLL_VECTOR <- as.vector(ENS2SYMBOLL[,2]) names(ENS2SYMBOLL_VECTOR) <- ENS2SYMBOLL[,1] introns <- S4Vectors::expand(introns, "gene_id") elementMetadata(introns)$SYMBOL <- ENS2SYMBOLL_VECTOR[unlist(as.vector(elementMetadata(introns)$gene_id))] elementMetadata(introns) <- elementMetadata(introns)[ , -which(colnames(elementMetadata(introns)) %in% c('gene_id', 'tx_name', 'tx_id'))] colnames(elementMetadata(introns)) <- 'SYMBOL' # Subset for selected genes introns <- introns[which(elementMetadata(introns)$SYMBOL %in% genes),] elementMetadata(introns)$RegionType <- rep('Intron', length(unlist(as.vector(elementMetadata(introns)$SYMBOL)))) # Merge regions searchSpace <- c(TSS, introns) names(searchSpace) <- NULL return(searchSpace) } #' enhancerToGene #' #' Link enhancers to target genes #' @param VM_RNA_mat Matrix containing genes as rows, virtual cells as columns and gene expression as values (see getVirtualFeatureMatrix()) #' @param VM_RNA_mat Matrix containing regions as rows, virtual cells as columns and cisTopic's region accessibility probabilities as values (recommended, see getVirtualFeatureMatrix()) #' @param searchSpace Search space GenomicRanges object as obtained from getSearchSpace() #' @param method Whether to use pearson correlation between region accessibility and gene expression ('Correlation') #' or a random forest ('RF') model to infer enhancer-to-gene links. #' @param minoverlap Minimum overlap between the candidate regulatory regions and the search space. Default: 0.4. #' @param nCores How many cores to use if method='RF'. Default: 1 #' @param nTrees How many trees to use per RF model if method='RF'. Default: 1000 #' #' @return A list containing a data frame for each gene, in which the RF importance (if method='RF') or the #' correlation values (if method='Correlation') are given. #' #' @examples #' RF_links <- enhancerToGene(VM_DGEM, VM_PRTM, searchSpace, method='RF') #' Cor_links <- enhancerToGene(VM_DGEM, VM_PRTM, searchSpace, method='Correlation') #' #' @import parallel #' @import doSNOW #' @import Matrix #' @importFrom plyr llply #' #' @export enhancerToGene <- function(VM_RNA_mat, VM_ATAC_mat, searchSpace, method, minOverlap=0.4, nCores = 1, nTrees=1000 ){ #Get region Granges regions <- rownames(VM_ATAC_mat) GR_regions <- .regionVectorToGenomicRanges(regions) region2gene <- .getOverlapRegionsFromGR_regions(GR_regions, searchSpace) region2gene_split <- split(region2gene, region2gene$gene) region2gene_list <- llply(region2gene_split, function(x) unique(as.vector(unlist(x[,1])))) region2gene_list <- region2gene_list[names(region2gene_list) %in% rownames(VM_RNA_mat)] region2gene_list <- llply(region2gene_list, function(x) x[x %in% rownames(VM_ATAC_mat)]) gene_names <- names(region2gene_list) region2gene_list <- llply(1:length(region2gene_list), function (i) rbind(VM_RNA_mat[(names(region2gene_list)[i]),,drop=FALSE], VM_ATAC_mat[region2gene_list[[i]],])) names(region2gene_list) <- gene_names if(method == 'RF'){ if(! "GENIE3" %in% installed.packages()){ stop('Please, install GENIE3: \n BiocManager::install("GENIE3")') } else { require(GENIE3) } if (nCores > 1){ cl <- makeCluster(nCores, type = "SOCK") registerDoSNOW(cl) clusterEvalQ(cl, library(GENIE3)) clusterExport(cl, c('region2gene_list'), envir=environment()) opts <- list(preschedule=TRUE) clusterSetRNGStream(cl, 123) output <- llply(region2gene_list, function(x) as.data.frame(tryCatch(GENIE3(as.matrix(x), treeMethod = "RF", K = "sqrt", nTrees = nTrees, regulators = rownames(x)[-1], targets = rownames(x)[1], nCores = 1, verbose = FALSE), error=function(e) NULL), .parallel = TRUE, .paropts = list(.options.snow=opts), .inform=FALSE)) } else { output <- llply(region2gene_list, function(x) as.data.frame(tryCatch(GENIE3(as.matrix(x), treeMethod = "RF", K = "sqrt", nTrees = nTrees, regulators = rownames(x)[-1], targets = rownames(x)[1], nCores = 1, verbose = FALSE), error=function(e) NULL), .parallel = FALSE, .inform=FALSE, .progress = "text")) } if (sum(sapply(output, is.null)) > 0){ output <- output[-which(sapply(output, is.null))] } if (0 %in% sapply(output, nrow)){ output <- output[-which(sapply(output, nrow) == 0)] } output <- lapply(output, function (x) {colnames(x) <- 'RF_importance'; x}) } else if (method == 'Correlation'){ output <- lapply(region2gene_list, function (x) t(as.data.frame(t(apply(x[2:nrow(x),,drop=FALSE], 1 , cor , y = x[1,]))))) output <- lapply(output, function (x) {colnames(x) <- 'Correlation'; x}) } return(output) } .regionVectorToGenomicRanges <- function(regionVector){ chr <- sapply(strsplit(regionVector, split = ":"), "[", 1) coord <- sapply(strsplit(regionVector, split = ":"), "[", 2) start <- as.numeric(sapply(strsplit(coord, split = "-"), "[", 1)) end <- as.numeric(sapply(strsplit(coord, split = "-"), "[", 2)) dataframe <- as.data.frame(cbind(chr, start, end)) colnames(dataframe) <- c('seqnames', 'start', 'end') rownames(dataframe) <- regionVector Gr <- makeGRangesFromDataFrame(dataframe, keep.extra.columns = TRUE) return(Gr) } .getOverlapRegionsFromGR_regions <- function( GR_regions, searchSpace, minOverlap=0.4, overlapping=TRUE, ...) { dbRegionsOverlap <- findOverlaps(searchSpace, GR_regions, type='any', select="all", ignore.strand=TRUE) if(minOverlap>0){ overlaps <- pintersect(searchSpace[queryHits(dbRegionsOverlap)], GR_regions[subjectHits(dbRegionsOverlap)]) percentOverlapGR_regions <- width(overlaps) / width(searchSpace[queryHits(dbRegionsOverlap)]) percentOverlapRegions <- width(overlaps) / width(GR_regions[subjectHits(dbRegionsOverlap)]) maxOverlap <- apply(cbind(percentOverlapGR_regions, percentOverlapRegions), 1, max) dbRegionsOverlap <- dbRegionsOverlap[which(maxOverlap > minOverlap)] maxOverlap <- maxOverlap[which(maxOverlap > minOverlap)] } selectedRegions <- searchSpace[queryHits(dbRegionsOverlap)] symbol <- as.data.frame(selectedRegions)$SYMBOL selectedRegions <- paste(as.vector(seqnames(selectedRegions)), ':', as.vector(start(selectedRegions)), '-', as.vector(end(selectedRegions)), sep='') selectedGR_regions <- names(GR_regions[subjectHits(dbRegionsOverlap)]) selectedMapped <- data.frame(selectedGR_regions, selectedRegions, symbol, maxOverlap, row.names=NULL) colnames(selectedMapped) <- c('dataRegion', 'searchSpace', 'gene', 'maxOverlap') return(selectedMapped) } #' plotLinks #' #' Plot enhancer-to-gene links (To be enhanced) #' @param RF_links Data frames list containing RF scores for each region in each gene as returned by enhancerToGene(). #' @param Cor_links Data frames list containing correlation scores for each region in each gene as returned by enhancerToGene(). If both RF and correlation links are provided, the height of the links will represent the RF importance and the color whether the correlation is positive or negative. If only RF is provided, links will be colored black; and if only correlation links are provided the height of the lnk will indicate the absolute correlation value and the color whether it is positive or negative. #' @param annot Annotation data frame, as required by cicero (Pliner et al., 2019) #' @param txdb Txdb object matching with the genome assembly used for the analysis #' @param org.db Org.Db objet for the corresponding species #' @param gene Gene for which enhancer-to-gene links wants to be plotted #' @param chr Chromosome name of the genomic window that wants to be plotted #' @param start Start position of the genomic window that wants to be plotted #' @param end End position of the genomic window that wants to be plotted #' @param cutoff Value below which links will not be shown. Default: -1. #' #' @examples #' plotLinks(RF_links, dm6_annot, TxDb.Dmelanogaster.UCSC.dm6.ensGene, org.Dm.eg.db, gene='dac', 'chr2L', 16470000, 16490000) #' #' @import AnnotationDbi #' @import GenomicRanges #' #' @export plotLinks <- function(RF_links=NULL, Cor_links=NULL, annot, txdb, org.db, gene, chr, start, end, cutoff=0){ # Check up if(! "cicero" %in% installed.packages()){ stop('Please, install cicero: \n BiocManager::install("cicero")') } else { require(cicero) } # Check up if(!is(txdb,'TxDb')){ stop('txdb has to be an object of class TxDb') } if(!is(org.db,'OrgDb')){ stop('org.db has to be an object of class OrgDb') } # Get search space around TSS # Genes to ensemble dict if (taxonomyId(org.db) == 9606){ ENS2SYMBOL <- AnnotationDbi::select(org.db, keys = gene, columns="ENTREZID", keytype="SYMBOL") } else { ENS2SYMBOL <- AnnotationDbi::select(org.db, keys = gene, columns="ENSEMBL", keytype="SYMBOL") } if (sum(is.na(ENS2SYMBOL[,2])) > 0){ENS2SYMBOL <- ENS2SYMBOL[-which(is.na(ENS2SYMBOL[,2])),]} # Select genes in the list filter_list <- list() filter_list[['GENEID']] <- ENS2SYMBOL[,2] # Take promoter coordinates for the specific genes TSS <- promoters(txdb, upstream = 0, downstream= 0, filter=filter_list, columns=c("GENEID")) TSS <- paste(seqnames(TSS), start(TSS), end(TSS), sep='_') if (!is.null(RF_links)){ # Form conns data frame linksgene <- RF_links[[gene]] regions <- gsub(':', '_', rownames(linksgene)) regions <- gsub('-', '_', rownames(linksgene)) conns <- as.data.frame(cbind(regions, rep(TSS, length(regions)), linksgene[,1], rep('black', length(regions)))) if(!is.null(Cor_links)){ corlinksgene <- as.vector(unlist(Cor_links[[gene]])) mypal <- colorRampPalette(c('#FF0000', '#228B22'))(10) color <- .map2color(corlinksgene,mypal) conns <- as.data.frame(cbind(regions, rep(TSS, length(regions)), linksgene[,1], color)) } names(conns) <- c('Peak1', 'Peak2', 'coaccess', 'color') conns[,3] <- as.numeric(as.vector(unlist(conns[,3]))) cicero::plot_connections(conns, chr, start, end, alpha_by_coaccess = TRUE, gene_model = annot, connection_color='color', connection_color_legend=F, gene_model_color='blue', coaccess_cutoff = cutoff, connection_width = .5, collapseTranscripts = "longest", peak_color = "black") } else if (!is.null(Cor_links)) { # Form conns data frame linksgene <- Cor_links[[gene]] regions <- gsub(':', '_', rownames(linksgene)) regions <- gsub('-', '_', rownames(linksgene)) mypal <- colorRampPalette(c('#FF0000', '#228B22'))(10) color <- .map2color(linksgene,mypal) linksgene <- abs(linksgene) conns <- as.data.frame(cbind(regions, rep(TSS, length(regions)), linksgene[,1], color)) names(conns) <- c('Peak1', 'Peak2', 'coaccess', 'color') conns[,3] <- as.numeric(as.vector(unlist(conns[,3]))) cicero::plot_connections(conns, chr, start, end, alpha_by_coaccess = TRUE, gene_model = annot, connection_color='color', connection_color_legend=F, gene_model_color='blue', coaccess_cutoff = cutoff, connection_width = .5, collapseTranscripts = "longest", peak_color = "black") } } #' pruneLinks #' #' Prune enhancer-to-gene links #' @param RF_links Data frames list containing RF scores for each region in each gene as returned by enhancerToGene(). #' @param Cor_links Data frames list containing correlation scores for each region in each gene as returned by enhancerToGene(). If both RF and correlation links are provided, the height of the links will represent the RF importance and the color whether the correlation is positive or negative. If only RF is provided, links will be colored black; and if only correlation links are provided the height of the lnk will indicate the absolute correlation value and the color whether it is positive or negative. #' @param cor_prob Probability threshold on the fitted distribution above which positive and negative correlation will be taken. #' @param cor_thr A vector containing the lower and upper thresholds for the correlation links. #' #' @return A list containing slots with the pruned RF links ('RF_links') and/or correlation links ('Cor_links') #' @examples #' pruneLinks(RF_links, Cor_links) #' #' @import plyr #' #' @export pruneLinks <- function(RF_links=NULL, Cor_links=NULL, cor_prob=0.01, cor_thr=NULL){ if(is.null(RF_links) & is.null(Cor_links)){ stop('Please, provide at least either RF or correlation links.') } if (!is.null(RF_links)){ if(! "Binarize" %in% installed.packages()){ stop('Please, install Binarize: \n install.packages("Binarize")') } else { require(Binarize) } RF_links_tmp <- llply(RF_links, as.matrix) RF_links_tmp <- llply(RF_links_tmp, function (x) x[complete.cases(x),]) RF_links_2_enh <- RF_links_tmp[lengths(RF_links_tmp) <= 2] RF_links_tmp <- RF_links_tmp[lengths(RF_links_tmp) > 2] RF_links_tmp <- llply(RF_links_tmp, function (x) x[which(x >= binarize.BASC(x)@threshold)]) if(length(RF_links_2_enh) > 0){ RF_links_tmp <- c(RF_links_tmp, RF_links_2_enh) } RF_links_tmp <- llply(RF_links_tmp, as.data.frame) RF_links_tmp <- llply(RF_links_tmp, function (x) {colnames(x) <- 'RF_importance'; x}) } if(!is.null(Cor_links)){ if(! "fitdistrplus" %in% installed.packages()){ stop('Please, install fitdistrplus: \n install.packages("fitdistrplus")') } else { require(fitdistrplus) } Cor_links_tmp <- llply(Cor_links, as.matrix) fit <- suppressWarnings(fitdistrplus::fitdist(unlist(Cor_links_tmp)[!is.na(unlist(Cor_links_tmp))], "norm", method='mme')) cutofflow <- as.numeric(unlist(quantile(fit, probs = cor_prob))[1]) cutoffup <- as.numeric(unlist(quantile(fit, probs = 1-cor_prob))[1]) if (!is.null(cor_thr)){ cutofflow <- cor_thr[1] cutoffup <- cor_thr[2] } print(paste0('Low cutoff: ', cutofflow, '; Upper cutoff:', cutoffup)) Cor_links_tmp <- llply(Cor_links_tmp, function (x) x[which(x > cutoffup | x < cutofflow),]) Cor_links_tmp <- llply(Cor_links_tmp, as.data.frame) Cor_links_tmp <- llply(Cor_links_tmp, function (x) {colnames(x) <- 'Correlation'; x}) } if (!is.null(RF_links) & !is.null(Cor_links)){ # Check-up RF_links_tmp <- RF_links_tmp[names(RF_links_tmp) %in% names(Cor_links_tmp)] Cor_links_tmp <- Cor_links_tmp[names(RF_links_tmp)] RF_thr_enh <- llply(RF_links_tmp, rownames) Cor_links_enh <- llply(Cor_links_tmp, rownames) thr_enh <- llply(1:length(RF_thr_enh), function(i) c(RF_thr_enh[[i]], Cor_links_enh[[i]])) RF_links <- RF_links[names(RF_links_tmp)] RF_links_tmp <- llply(1:length(thr_enh), function(i) RF_links[[i]][rownames(RF_links[[i]]) %in% thr_enh[[i]],,drop=FALSE]) names(RF_links_tmp) <- names(RF_links) Cor_links <- Cor_links[names(Cor_links_tmp)] Cor_links_tmp <- llply(1:length(thr_enh), function(i) Cor_links[[i]][rownames(Cor_links[[i]]) %in% thr_enh[[i]],,drop=FALSE]) names(Cor_links_tmp) <- names(Cor_links) } prunedLinks <- list() if (!is.null(RF_links_tmp)){ prunedLinks[['RF_links']] <- lapply(RF_links_tmp, as.data.frame) if (sum(as.vector(unlist(lapply(prunedLinks[['RF_links']], nrow))) == 0)){ prunedLinks[['RF_links']] <- prunedLinks[['RF_links']][-which(as.vector(unlist(lapply( prunedLinks[['RF_links']], nrow))) == 0)] } } if (!is.null(Cor_links_tmp)){ prunedLinks[['Cor_links']] <- lapply(Cor_links_tmp, as.data.frame) if (sum(as.vector(unlist(lapply(prunedLinks[['Cor_links']], nrow))) == 0)){ prunedLinks[['Cor_links']] <- prunedLinks[['Cor_links']][-which(as.vector(unlist(lapply( prunedLinks[['Cor_links']], nrow))) == 0)] } } return(prunedLinks) } #' exportBB #' #' Export links to bigInteract format for visualization in UCSC. #' @param RF_links Data frames list containing RF scores for each region in each gene as returned by enhancerToGene(). #' @param Cor_links Data frames list containing correlation scores for each region in each gene as returned by enhancerToGene(). If both RF and correlation links are provided, the height of the links will represent the RF importance and the color whether the correlation is positive or negative. If only RF is provided, links will be colored black; and if only correlation links are provided the height of the lnk will indicate the absolute correlation value and the color whether it is positive or negative. #' @param annot Annotation data frame, as required by cicero (Pliner et al., 2019) #' @param txdb Txdb object matching with the genome assembly used for the analysis #' @param org.db Org.Db objet for the corresponding species #' @param standardized Whether link scores (RF or correlation based) should be standardized per gene. #' This is recommended for visualization in UCSC, as scores must be between 0-1000. Default=TRUE. #' @param save_path Path to save bb file. #' #' @return A dataframe containing links in bigInteract format. For more information, please visit #' https://genome.ucsc.edu/goldenPath/help/interact.html. Scores values will #' depend on whether RF links and or correlation links are provided, if both are provided #' RF importances will be used for the score and correlations will be used for the color. #' #' @examples #' exportBB(RF_links, Cor_links) #' #' @import plyr #' @import AnnotationDbi #' @import data.table #' #' @export exportBB <- function(RF_links=NULL, Cor_links=NULL, txdb, org.db, standardized=TRUE, save_path=NULL){ # Check up if(!is(txdb,'TxDb')){ stop('txdb has to be an object of class TxDb') } if(!is(org.db,'OrgDb')){ stop('org.db has to be an object of class OrgDb') } if (!is.null(RF_links)){ genes <- names(RF_links) if (sum(as.vector(unlist(lapply(RF_links, nrow))) == 0)){ RF_links <- RF_links[-which(as.vector(unlist(lapply(RF_links, nrow))) == 0)] } } else if (!is.null(Cor_links)) { genes <- names(Cor_links) if (sum(as.vector(unlist(lapply(Cor_links, nrow))) == 0)){ Cor_links <- Cor_links[-which(as.vector(unlist(lapply(Cor_links, nrow))) == 0)] } } # Get search space around TSS # Genes to ensemble dict if (taxonomyId(org.db) == 9606){ ENS2SYMBOL <- AnnotationDbi::select(org.db, keys = genes, columns="ENTREZID", keytype="SYMBOL") } else { ENS2SYMBOL <- AnnotationDbi::select(org.db, keys = genes, columns="ENSEMBL", keytype="SYMBOL") } if (sum(is.na(ENS2SYMBOL[,2])) > 0){ENS2SYMBOL <- ENS2SYMBOL[-which(is.na(ENS2SYMBOL[,2])),]} # Select genes in the list filter_list <- list() filter_list[['GENEID']] <- ENS2SYMBOL[,2] # Take promoter coordinates for the specific genes TSS <- promoters(txdb, upstream = 0, downstream= 0, filter=filter_list, columns=c("GENEID")) ENS2SYMBOL_VECTOR <- as.vector(ENS2SYMBOL[,1]) names(ENS2SYMBOL_VECTOR) <- ENS2SYMBOL[,2] elementMetadata(TSS)$GENEID <- ENS2SYMBOL_VECTOR[unlist(as.vector(elementMetadata(TSS)$GENEID))] TSS <- as.data.frame(TSS) TSS <- split(TSS, TSS$GENEID) TSS <- llply(TSS, function (x) x[1,]) TSS <- llply(TSS, function (x) paste0(x[,1], ':', x[,2], '-', x[,3])) if (!is.null(RF_links) & !is.null(Cor_links)){ RF_links <- RF_links[names(RF_links) %in% names(TSS)] RF_links <- RF_links[names(RF_links) %in% names(Cor_links)] if (standardized == TRUE){ RF_links <- lapply(RF_links, function(x) {x[,1] <- (as.numeric(x[,1])-min(as.numeric(x[,1])))/(max(as.numeric(x[,1]))-min(as.numeric(x[,1])));x}) RF_links <- lapply(RF_links, function(x) {x[x[,1] == 'NaN',1] <- 1;x}) } Cor_links <- Cor_links[names(RF_links)] TSS <- TSS[names(RF_links)] BB <- as.data.frame(data.table::rbindlist(llply(1:length(RF_links), function(i) cbind(rownames(RF_links[[i]]), RF_links[[i]], Cor_links[[i]], rep(names(RF_links)[i], nrow(RF_links[[i]])), rep(TSS[[i]], nrow(RF_links[[i]])))))) BB[,3] <- as.numeric(as.vector(unlist(BB[,3]))) color <- BB[!is.na(BB[,3]),3] mypal <- colorRampPalette(c('#FF0000', 'grey', '#228B22'))(10) BB[!is.na(BB[,3]),3] <- .map2color(color,mypal, limits=c(-1,1)) if(sum(is.na(BB[,3])) > 0){ BB[which(is.na(BB[,3])),3] <- 'black' } } else if (!is.null(RF_links) & is.null(Cor_links)){ RF_links <- RF_links[names(RF_links) %in% names(TSS)] if (standardized == TRUE){ RF_links <- lapply(RF_links, function(x) {x[,1] <- (as.numeric(x[,1])-min(as.numeric(x[,1])))/(max(as.numeric(x[,1]))-min(as.numeric(x[,1])));x}) RF_links <- lapply(RF_links, function(x) {x[x[,1] == 'NaN',1] <- 1;x}) } TSS <- TSS[names(RF_links)] BB <- as.data.frame(data.table::rbindlist(llply(1:length(RF_links), function(i) cbind(rownames(RF_links[[i]]), RF_links[[i]], rep('black', nrow(RF_links[[i]])), rep(names(RF_links)[i], nrow(RF_links[[i]])), rep(TSS[[i]], nrow(RF_links[[i]])))))) } else if (is.null(RF_links) & !is.null(Cor_links)){ Cor_links <- Cor_links[names(Cor_links) %in% names(TSS)] if (standardized == TRUE){ Cor_links <- lapply(Cor_links, function(x) {x[,1] <- (abs(as.numeric(x[,1]))-abs(min(as.numeric(x[,1]))))/(max(abs(as.numeric(x[,1])))-min(abs(as.numeric(x[,1]))));x}) Cor_links <- lapply(Cor_links, function(x) {x[x[,1] == 'NaN',1] <- 1;x}) } TSS <- TSS[names(Cor_links)] BB <- as.data.frame(data.table::rbindlist(llply(1:length(Cor_links), function(i) as.data.frame(cbind(rownames(Cor_links[[i]]), Cor_links[[i]], Cor_links[[i]], rep(names(Cor_links)[i], nrow(Cor_links[[i]])), rep(TSS[[i]], nrow(Cor_links[[i]]))))))) if(sum(is.na(BB[,2])) > 0){ BB <- BB[-which(is.na(BB[,2])),] } BB[,3] <- as.numeric(as.vector(unlist(BB[,3]))) color <- BB[!is.na(BB[,3]),3] mypal <- colorRampPalette(c('#FF0000', 'grey', '#228B22'))(10) BB[!is.na(BB[,3]),3] <- .map2color(color,mypal, limits=c(-1,1)) if(sum(is.na(BB[,3])) > 0){ BB[which(is.na(BB[,3])),3] <- 'black' } } else { stop('Please, provide at least either correlation or RF links.') } BB[,2] <- abs(as.numeric(as.vector(unlist(BB[,2])))) colnames(BB) <- c('Enhancer', 'Score', 'Color', 'Gene', 'TSS') BB <- as.matrix(BB) Enhancer_seqnames <- sapply(strsplit(BB[,1], split = ":"), "[", 1) Enhancer_coord <- sapply(strsplit(BB[,1], split = ":"), "[", 2) Enhancer_start <- round((as.numeric(sapply(strsplit(Enhancer_coord, split = "-"), "[", 1))+as.numeric(sapply(strsplit(Enhancer_coord, split = "-"), "[", 2)))/2) Enhancer_end <- Enhancer_start+1 TSS_seqnames <- sapply(strsplit(BB[,5], split = ":"), "[", 1) TSS_coord <- sapply(strsplit(BB[,5], split = ":"), "[", 2) TSS_start <- round((as.numeric(sapply(strsplit(TSS_coord, split = "-"), "[", 1))+as.numeric(sapply(strsplit(TSS_coord, split = "-"), "[", 2)))/2) TSS_end <- TSS_start+1 TSS_name <- paste0(TSS_seqnames, ':', TSS_start, '-', TSS_end) BB <- cbind(Enhancer_seqnames, Enhancer_start, TSS_end, BB[,'Gene'], round(as.numeric(BB[,'Score'])*1000), round(as.numeric(BB[,'Score'])*10), BB[,'Gene'], BB[, 'Color'], Enhancer_seqnames, Enhancer_start, Enhancer_end, BB[,'Enhancer'], rep('.', nrow(BB)), TSS_seqnames, TSS_start, TSS_end, TSS_name, rep('.', nrow(BB))) BB[which(as.numeric(BB[,3]) < as.numeric(BB[,2])), c(2,3)] <- BB[which(as.numeric(BB[,3]) < as.numeric(BB[,2])), c(3,2)] BB <- as.data.frame(BB) colnames(BB) <- c('chrom', 'chromStart', 'chromEnd', 'name', 'score', 'value', 'exp', 'color', 'sourceChrom', 'sourceStart', 'sourceEnd', 'sourceName', 'sourceStrand', 'targetChrom', 'targetStart', 'targetEnd', 'targetName', 'targetStrand') BB$targetName if (sum(as.vector(unlist(BB[,9])) != as.vector(unlist(BB[,14]))) > 0){ BB <- BB[-which(as.vector(unlist(BB[,9])) != as.vector(unlist(BB[,14]))),] } if (!is.null(file)){ write.table(BB, file=save_path, row.names=FALSE, col.names = FALSE, quote=FALSE, sep = "\t", eol = "\n") } return(BB) } # Helper fuction .map2color<-function(x,pal,limits=NULL){ if(is.null(limits)) limits=range(x) pal[findInterval(x,seq(limits[1],limits[2],length.out=length(pal)+1), all.inside=TRUE)] }
46aa63771ce2e707810135b14992980c5754f0a2
a9de5afbb7e169f668224305858821765d70a072
/pkg/R/stringdist.R
2061b2ed98e387e9892b670893a14fe237529e65
[]
no_license
rsaporta/stringdist
05f5a1db5f97a30497035d99d6ed8179c56f35ff
771e5ec46e83e739d087962f9d9792ce5d94acdb
refs/heads/master
2020-12-25T00:39:23.282374
2013-07-19T14:20:39
2013-07-19T14:20:39
null
0
0
null
null
null
null
UTF-8
R
false
false
12,471
r
stringdist.R
#' A package for string distance calculation #' #' @name stringdist-package #' @docType package #' @useDynLib stringdist {} #' Compute distance metrics between strings #' #' @section Details: #' \code{stringdist} computes pairwise string distances between elements of \code{character} vectors \code{a} and \code{b}, #' where the vector with less elements is recycled. \code{stringdistmatrix} computes the string distance matrix with rows according to #' \code{a} and columns according to \code{b}. #' #' #' Currently, the following distance metrics are supported: #' \tabular{ll}{ #' \code{osa} \tab Optimal string aligment, (restricted Damerau-Levenshtein distance).\cr #' \code{lv} \tab Levenshtein distance (as in R's native \code{\link[utils]{adist}}).\cr #' \code{dl} \tab Full Damerau-Levenshtein distance.\cr #' \code{hamming} \tab Hamming distance (\code{a} and \code{b} must have same nr of characters).\cr #' \code{lcs} \tab Longest common substring distance.\cr #' \code{qgram} \tab \eqn{q}-gram distance. \cr #' \code{cosine} \tab cosine distance between \eqn{q}-gram profiles \cr #' \code{jaccard} \tab Jaccard distance between \eqn{q}-gram profiles \cr #' \code{jw} \tab Jaro, or Jaro-Winker distance. #' } #' The \bold{Hamming distance} (\code{hamming}) counts the number of character substitutions that turns #' \code{b} into \code{a}. If \code{a} and \code{b} have different number of characters \code{Inf} is #' returned. #' #' The \bold{Levenshtein distance} (\code{lv}) counts the number of deletions, insertions and substitutions necessary #' to turn \code{b} into \code{a}. This method is equivalent to \code{R}'s native \code{\link[utils]{adist}} function. #' The computation is aborted when \code{maxDist} is exceeded, in which case \code{Inf} is returned. #' #' The \bold{Optimal String Alignment distance} (\code{osa}) is like the Levenshtein distance but also #' allows transposition of adjacent characters. Here, each substring may be edited only once so a #' character cannot be transposed twice. #' The computation is aborted when \code{maxDist} is exceeded, in which case \code{Inf} is returned. #' #' The \bold{full Damerau-Levensthein distance} (\code{dl}) allows for multiple transpositions. #' The computation is aborted when \code{maxDist} is exceeded, in which case \code{Inf} is returned. #' #' The \bold{longest common substring} is defined as the longest string that can be obtained by pairing characters #' from \code{a} and \code{b} while keeping the order of characters intact. The lcs-distance is defined as the #' number of unpaired characters. The distance is equivalent to the edit distance allowing only deletions and #' insertions, each with weight one. #' The computation is aborted when \code{maxDist} is exceeded, in which case \code{Inf} is returned. #' #' A \bold{\eqn{q}-gram} is a subsequence of \eqn{q} \emph{consecutive} characters of a string. If \eqn{x} (\eqn{y}) is the vector of counts #' of \eqn{q}-gram occurrences in \code{a} (\code{b}), the \bold{\eqn{q}-gram distance} is given by the sum over #' the absolute differences \eqn{|x_i-y_i|}. #' The computation is aborted when \code{q} is is larger than the length of any of the strings. In that case \code{Inf} is returned. #' #' The \bold{cosine distance} is computed as \eqn{1-x\cdot y/(\|x\|\|y\|)}, where \eqn{x} and \eqn{y} were defined above. #' #' Let \eqn{X} be the set of unique \eqn{q}-grams in \code{a} and \eqn{Y} the set of unique \eqn{q}-grams in \code{b}. #' The \bold{Jaccard distance} is given by \eqn{1-|X\cap Y|/|X\cup Y|}. #' #' The \bold{Jaro distance} (\code{method=jw}, \code{p=0}), is a number between 0 (exact match) and 1 (completely dissimilar) measuring #' dissimilarity between strings. #' It is defined to be 0 when both strings have length 0, and 1 when there are no character matches between \code{a} and \code{b}. #' Otherwise, the Jaro distance is defined as \eqn{1-(1/3)(m/|a| + m/|b| + (m-t)/m)}. Here,\eqn{|a|} indicates the number of #' characters in \code{a}, \eqn{m} is the number of #' character matches and \eqn{t} the number of transpositions of matching characters. #' A character \eqn{c} of \code{a} \emph{matches} a character from \code{b} when #' \eqn{c} occurs in \code{b}, and the index of \eqn{c} in \code{a} differs less than \eqn{\max(|a|,|b|)/2 -1} (where we use integer division). #' Two matching characters are transposed when they are matched but they occur in different order in string \code{a} and \code{b}. #' #' The \bold{Jaro-Winkler distance} (\code{method=jw}, \code{0<p<=0.25}) adds a correction term to the Jaro-distance. It is defined as \eqn{d - l*p*d}, where #' \eqn{d} is the Jaro-distance. Here, \eqn{l} is obtained by counting, from the start of the input strings, after how many #' characters the first character mismatch between the two strings occurs, with a maximum of four. The factor \eqn{p} #' is a penalty factor, which in the work of Winkler is often chosen \eqn{0.1}. #' #' @section Encoding issues: #' Input strings are re-encoded to \code{utf8} an then to \code{integer} #' vectors prior to the distance calculation (since the underlying \code{C}-code expects unsigned ints). #' This double conversion is necessary as it seems the only way to #' reliably convert (possibly multibyte) characters to integers on all systems #' supported by \code{R}. #' (\code{R}'s native \code{\link[utils]{adist}} function does this as well). #' See \code{\link[base]{Encoding}} for further details. #' #' @section Paralellization: #' The \code{stringdistmatrix} function uses \code{\link[parallel]{makeCluster}} to generate a cluster and compute the #' distance matrix in parallel. As the cluster is local, the \code{ncores} parameter should not be larger than the number #' of cores on your machine. Use \code{\link[parallel]{detectCores}} to check the number of cores available. Alternatively, #' you can create a cluster by yourself, using \code{\link[parallel]{makeCluster}} and pass that to \code{stringdistmatrix}. #' #' @references #' \itemize{ #' \item{ #' R.W. Hamming (1950). Error detecting and Error Correcting codes, The Bell System Technical Journal 29, 147-160 #' } #' \item{ #' V.I. Levenshtein. (1960). Binary codes capable of correcting deletions, insertions, and reversals. Soviet Physics Doklady 10 707-711. #' } #' \item{ #' F.J. Damerau (1964) A technique for computer detection and correction of spelling errors. Communications of the ACM 7 171-176. #' } #' \item{ #' An extensive overview of (online) string matching algorithms is given by G. Navarro (2001). #' A guided tour to approximate string matching, ACM Computing Surveys 33 31-88. #' } #' \item{ #' Many algorithms are available in pseudocode from wikipedia: http://en.wikipedia.org/wiki/Damerau-Levenshtein_distance. #' } #' \item{The code for the full Damerau-Levenshtein distance was adapted from Nick Logan's public github repository: #' \url{https://github.com/ugexe/Text--Levenshtein--Damerau--XS/blob/master/damerau-int.c}. #' } #' #' \item{ #' A good reference for qgram distances is E. Ukkonen (1992), Approximate string matching with q-grams and maximal matches. #' Theoretical Computer Science, 92, 191-211. #' } #' #' \item{Wikipedia \code{http://en.wikipedia.org/wiki/Jaro\%E2\%80\%93Winkler_distance} describes the Jaro-Winker #' distance used in this package. Unfortunately, there seems to be no single #' definition for the Jaro distance in literature. For example Cohen, Ravikumar and Fienberg (Proceeedings of IIWEB03, Vol 47, 2003) #' report a different matching window for characters in strings \code{a} and \code{b}. #' } #' #' #'} #' #' #' #' @param a R object (target); will be converted by \code{as.character}. #' @param b R object (source); will be converted by \code{as.character}. #' @param method Method for distance calculation. The default is \code{"osa"} (see details). #' @param weight The penalty for deletion, insertion, substitution and transposition, in that order. #' Weights must be positive and not exceed 1. \code{weight[4]} is ignored when \code{method='lv'} and \code{weight} is #' ignored completely when \code{method='hamming'}, \code{'qgram'}, \code{'cosine'}, \code{'Jaccard'}, \code{'lcs'} or \code{'jw'}. #' @param maxDist Maximum string distance for edit-like distances, in some cases computation is stopped when \code{maxDist} is reached. #' \code{maxDist=Inf} means calculation goes on untill the distance is computed. Ignored for \code{method='qgram'}, \code{'cosine'}, \code{'jaccard'} and #' \code{method='jw'}. #' @param q size of the \eqn{q}-gram, must be nonnegative. Ignored for all but \code{method='qgram'}, \code{'jaccard'} or \code{'cosine'}. #' @param p penalty factor for Jaro-Winkler distance. The valid range for \code{p} is \code{0<= p <= 0.25}. #' If \code{p=0} (default), the Jaro-distance is returned. Ignored for all methods except \code{'jw'}. #' #' #' #' @return For \code{stringdist}, a vector with string distances of size \code{max(length(a),length(b))}. #' For \code{stringdistmatrix}, a \code{length(a)xlength(b)} \code{matrix}. The returned distance is #' nonnegative if it can be computed, \code{NA} if any of the two argument strings is \code{NA} and \code{Inf} #' when it cannot be computed or \code{maxDist} is exceeded. See details for the meaning of \code{Inf} for the various algorithms. #' #' #' @example ../examples/stringdist.R #' @export stringdist <- function(a, b, method=c("osa","lv","dl","hamming","lcs", "qgram","cosine","jaccard", "jw"), weight=c(d=1,i=1,s=1,t=1), maxDist=Inf, q=1, p=0 ){ a <- as.character(a) b <- as.character(b) if (length(a) == 0 || length(b) == 0){ return(numeric(0)) } if ( max(length(a),length(b)) %% min(length(a),length(b)) != 0 ){ warning(RECYCLEWARNING) } method <- match.arg(method) a <- char2int(a) b <- char2int(b) stopifnot( all(is.finite(weight)), all(weight > 0), all(weight <=1), q >= 0, p <= 0.25, p >= 0 ) do_dist(b,a,method,weight,maxDist,q,p) } #' @param ncores number of cores to use. If \code{ncores>1}, a local cluster is #' created using \code{\link[parallel]{makeCluster}}. Parallelisation is over \code{b}, so #' the speed gain by parallelisation is highest when \code{b} has less elements than \code{a}. #' @param cluster (optional) a custom cluster, created with #' \code{\link[parallel]{makeCluster}}. If \code{cluster} is not \code{NULL}, #' \code{ncores} is ignored. #' #' #' #' @rdname stringdist #' @export stringdistmatrix <- function(a, b, method=c("osa","lv","dl","hamming","lcs","qgram","cosine","jaccard", "jw"), weight=c(d=1,i=1,s=1,t=1), maxDist=Inf, q=1, p=0, ncores=1, cluster=NULL ){ a <- as.character(a) b <- as.character(b) if (length(a) == 0 || length(b) == 0){ return(numeric(0)) } method <- match.arg(method) stopifnot( all(is.finite(weight)), all(weight > 0), all(weight <=1), q >= 0, p <= 0.25, p >= 0 ) a <- char2int(a) b <- lapply(char2int(b),list) if (ncores==1){ x <- sapply(b,do_dist,a,method,weight,maxDist, q) } else { if ( is.null(cluster) ){ cl <- makeCluster(ncores) } else { stopifnot(inherits(cluster, 'cluster')) cl <- cluster } x <- parSapply(cluster, b,do_dist,a,method,weight,maxDist, q, p) if (is.null(cluster)) stopCluster(cl) } as.matrix(x) } char2int <- function(x){ # For some OS's enc2utf8 has unexpected behavior for NA's, # see https://bugs.r-project.org/bugzilla3/show_bug.cgi?id=15201. # This is fixed for R >= 2.15.3. # i <- !is.na(x) # x[i] <- enc2utf8(x[i]) lapply(enc2utf8(x),utf8ToInt) } do_dist <- function(a, b, method, weight, maxDist, q, p){ if (maxDist==Inf) maxDist <- 0L; switch(method, osa = .Call('R_osa' , a, b, as.double(weight), as.double(maxDist)), lv = .Call('R_lv' , a, b, as.double(weight), as.double(maxDist)), dl = .Call('R_dl' , a, b, as.double(weight), as.double(maxDist)), hamming = .Call('R_hm' , a, b, as.integer(maxDist)), lcs = .Call('R_lcs' , a, b, as.integer(maxDist)), qgram = .Call('R_qgram_tree' , a, b, as.integer(q), 0L), cosine = .Call('R_qgram_tree' , a, b, as.integer(q), 1L), jaccard = .Call('R_qgram_tree' , a, b, as.integer(q), 2L), jw = .Call('R_jw' , a, b, as.double(p)) ) }
72369e5a66276fda9ffa4e82afd1481580d55aed
6ff74dfb7753a81505ac7da6b8192dc9e423a941
/run_analysis.R
b04984014ef20a8da079789dd04e976a8e0720cf
[]
no_license
emasnou/GetData_prj
40777e8a27a8c03891fb34caec2cbff8a2840199
ff86a90c329647726e0938c2ca9dbe68754e347b
refs/heads/master
2021-01-16T17:45:41.858563
2015-07-26T14:08:51
2015-07-26T14:08:51
39,720,867
0
0
null
null
null
null
UTF-8
R
false
false
5,468
r
run_analysis.R
# run_analysis.R # Coursera Getting and Clearing data course script. # Load funtcions source for some utilities. source ("src/functions.R") library(dplyr) # Initialize environment. # clean: CleanEnv() # Write system specs into doc directory. WriteSpecs() # Initial values for variables dataset.filename = "getdata_projectfiles_UCI HAR Dataset.zip" dataset.url = "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" # datasets path once uncompressed. dataset.variables.labels="data/UCI HAR Dataset/features.txt" dataset.activity.names="data/UCI HAR Dataset/activity_labels.txt" dataset.train = "data/UCI HAR Dataset/train/X_train.txt" dataset.train.labels = "data/UCI HAR Dataset/train/y_train.txt" dataset.train.subjects = "data/UCI HAR Dataset/train/subject_train.txt" dataset.test = "data/UCI HAR Dataset/test/X_test.txt" dataset.test.labels = "data/UCI HAR Dataset/test/y_test.txt" dataset.test.subjects = "data/UCI HAR Dataset/test/subject_test.txt" # Check if data file exists and download it if don't. DataExists (dataset.filename, dataset.url) UnzipFile(dataset.filename) ############################## ### DATA PREPARATION ############################## # Variable names array for all datasets: DF.colNames = read.delim(dataset.variables.labels, header=FALSE, sep="", stringsAsFactors=FALSE) # Activity names DF.activity.names = read.delim(dataset.activity.names, header=FALSE, sep="", stringsAsFactors=FALSE) ## Train dataset. train.activity = read.delim(dataset.train.labels,header=FALSE, sep="") train.subjects = read.delim(dataset.train.subjects,header=FALSE, sep="") colnames(train.subjects)= c("Subjects") DF.train = read.delim(dataset.train, header = FALSE, sep = "", dec = ".", fill=FALSE, col.names=DF.colNames[,2]) # merge activity and subjects into main DF. # id with activity name and add them to DF.train. tmp = merge(x=DF.activity.names,y=train.activity) colnames(tmp)= c( "id","Activity") DF.train = cbind(DF.train, tmp[,2],train.subjects) colnames(DF.train)[562]= c("Activity") DF.train = mutate(DF.train,Data.Type="TRAIN") ## Test dataset. test.activity = read.delim(dataset.test.labels,header=FALSE, sep="") test.subjects = read.delim(dataset.test.subjects,header=FALSE, sep="") colnames(train.subjects)= c("Subjects") DF.test = read.delim(dataset.test, header = FALSE, sep = "", dec = ".", fill=FALSE, col.names=DF.colNames[,2]) # merge activity and subjects into main DF. # id with activity name and add them to DF.train. tmp = merge(x=DF.activity.names,y=test.activity) colnames(tmp)= c( "id","Activity") DF.test = cbind(DF.test, tmp[,2],test.subjects) colnames(DF.test)[562]= c("Activity") colnames(DF.test)[563]= c("Subjects") DF.test = mutate(DF.test,Data.Type="TEST") # Clean variables not used forward. rm(list=c("DF.activity.names", "DF.colNames", "test.activity","test.subjects", "tmp","train.activity", "train.subjects")) ############################################################## # Merges the training and the test sets to create one data set. ############################################################## DF.total = rbind(DF.train,DF.test) # remove former datasets. rm (list=c("DF.train","DF.test")) ############################################################# # Extracts only the measurements on the mean and # standard deviation for each measurement. ############################################################# print (" Mean for all columns") apply(DF.total[1:561],2,mean) print (" Standard deviation for all columns") apply(DF.total[1:561],2,sd) ############################################################# # Uses descriptive activity names to name the activities # in the data set ############################################################# # This action has already been done in de datapreparation phase # for both datasets. # # tmp = merge(x=DF.activity.names,y=test.activity) # colnames(tmp)= c( "id","Activity") ############################################################# # Appropriately labels the data set with descriptive variable names. ############################################################# # This action has already been done in de datapreparation phase. # for both datasets. # # colnames(train.subjects)= c("Subjects") # DF.test = read.delim(dataset.test, # header = FALSE, # sep = "", # dec = ".", # fill=FALSE, # col.names=DF.colNames[,2]) print ("Final Data set") print (DF.total) ############################################################## # From the data set in step 4, creates a second, # independent tidy data set with the average of each variable # for each activity and each subject. ############################################################### DF.new = group_by(DF.total,Activity,Subjects) DF.new = summarise_each_(DF.new, funs(mean,sd), names(DF.new)[-(562:564)]) print (DF.new) write.table(DF.new,file = "data/Output.txt",append = FALSE,row.names = FALSE)
44fd39ccbfbc4ad6992feb08489b6f91c006859c
fd108a4f1c91f1a8ea3b2850d485be9f7f1c3451
/mm.R
22eb8dd84ff7f2a09251e7328fb5be8036abfa06
[]
no_license
mauriciodamiao/Econometry
d499675d5b8dae70f0b7004b2096ae88c678caa2
e09c7b8c94bbf0ce1d951fad96cf0ec761c6de18
refs/heads/master
2022-12-09T20:52:49.014426
2020-09-15T02:02:55
2020-09-15T02:02:55
null
0
0
null
null
null
null
UTF-8
R
false
false
341
r
mm.R
y = c( 1, 4, 3, 5, 7) x = c(0.5, 1, 2, 11, 2) df = data.frame(y, x) pars = c(1, 2) otim = function(data, pars){ y_hat = pars[1] + pars[2]*df[,2] y_hat_med = mean(y_hat) y_med = mean(df[,1]) dist = abs(y_hat_med - y_med) return(dist) } otim(df, pars) library(optimx) optim(par = pars, fn = otim, data=df)
e1f0c7389861b9c218dd04a01314a7594ad975cd
1a96e268494d1dbf1163783bec528ef26aabcd59
/R/workbench.R
a79fb69d4681a2082e62843a943748157eb3b6e1
[]
no_license
jm3594/crtpower
22c22f841461c0b900fb7f912c733219a6156f88
3be0bc079aae24824addd31347cd647d6e067afa
refs/heads/master
2020-07-20T22:13:48.925757
2017-06-30T20:35:50
2017-06-30T20:35:50
73,311,539
0
0
null
null
null
null
UTF-8
R
false
false
780
r
workbench.R
# delete this before packagizing # testing continuous outcome alpha <- 0.05 power <- 0.2233 m <- 3 n <- 100 nsd <- 0 cv <- NULL icc <- 0.01 varw <- 0.99 varb <- NULL d <- 0.5*sqrt(varw) test <- crtpower.2mean(alpha, power, m, n, nsd, cv, d, icc, varw, varb) testfun <- Vectorize(crtpower.2mean) #---------------------------------------------------------- # testing binary outcome m <- 20 n <- 20 cv <- 0 p1 <- 0.10 p2 <- NULL icc <- 0.001 alpha <- 0.05 power <- 0.80 pooled <- FALSE crtpower.2prop(alpha, power, m, n, cv, p1, p2, icc, pooled) #----------------------------------------------------------- # testing count outcome m <- 28 n <- 424 cv <- 0 r1 <- 0.0148 d <- 0.0044 icc <- 0 alpha <- 0.05 power <- NULL crtpower.2r.test(alpha, power, m, n, cv, r1, d, icc)
b821ed04f89b74fa55e5875c3ee2b82f4a4e7e09
b83c6415f71f85245fdbb7b788f8ca09a46e39ab
/tests/testthat/test_hello.R
c2c12a5d7e846cb36bf90419a1e1e79c88f2812a
[]
no_license
ahorawzy/wzytry
06bccf918d8855bf6799b9deeb7c90ae33b43f28
3dc49734c8272b9ab102854fae12068dc7ffbedb
refs/heads/master
2020-03-18T14:00:38.943579
2018-05-26T13:19:59
2018-05-26T13:19:59
134,824,044
0
0
null
null
null
null
UTF-8
R
false
false
125
r
test_hello.R
library(wzytry) context("test for hello.R") test_that("hello function can say hello",{ expect_is(hello(),"character") })
37a194f6556e1c11a32c50deaf6dcfa653edd04b
b404a06211d0702b8b4ed40d9a8b05ba3009f02e
/R/PlayerSeasonStat.r
8f97375aa4c4349c04d5742aaf69c0208d0f557a
[]
no_license
saiemgilani/cfbd-api-R
2b94b8fbeff9462f3eeeee467f932bb2b22c4432
84535ae89b8b08eb4f63a7f136d62948a3f34def
refs/heads/master
2023-03-05T17:49:10.113757
2021-02-15T15:24:20
2021-02-15T15:24:20
339,117,428
0
0
null
null
null
null
UTF-8
R
false
false
5,189
r
PlayerSeasonStat.r
# College Football Data API # # This is an API for accessing all sorts of college football data. It currently has a wide array of data ranging from play by play to player statistics to game scores and more. # # OpenAPI spec version: 2.3.5 # Contact: admin@collegefootballdata.com # Generated by: https://github.com/swagger-api/swagger-codegen.git #' PlayerSeasonStat Class #' #' @field season #' @field playerId #' @field player #' @field team #' @field conference #' @field category #' @field statType #' @field stat #' #' @importFrom R6 R6Class #' @importFrom jsonlite fromJSON toJSON #' @export PlayerSeasonStat <- R6::R6Class( 'PlayerSeasonStat', public = list( `season` = NULL, `playerId` = NULL, `player` = NULL, `team` = NULL, `conference` = NULL, `category` = NULL, `statType` = NULL, `stat` = NULL, initialize = function(`season`, `playerId`, `player`, `team`, `conference`, `category`, `statType`, `stat`){ if (!missing(`season`)) { stopifnot(is.numeric(`season`), length(`season`) == 1) self$`season` <- `season` } if (!missing(`playerId`)) { stopifnot(is.numeric(`playerId`), length(`playerId`) == 1) self$`playerId` <- `playerId` } if (!missing(`player`)) { stopifnot(is.character(`player`), length(`player`) == 1) self$`player` <- `player` } if (!missing(`team`)) { stopifnot(is.character(`team`), length(`team`) == 1) self$`team` <- `team` } if (!missing(`conference`)) { stopifnot(is.character(`conference`), length(`conference`) == 1) self$`conference` <- `conference` } if (!missing(`category`)) { stopifnot(is.character(`category`), length(`category`) == 1) self$`category` <- `category` } if (!missing(`statType`)) { stopifnot(is.character(`statType`), length(`statType`) == 1) self$`statType` <- `statType` } if (!missing(`stat`)) { self$`stat` <- `stat` } }, toJSON = function() { PlayerSeasonStatObject <- list() if (!is.null(self$`season`)) { PlayerSeasonStatObject[['season']] <- self$`season` } if (!is.null(self$`playerId`)) { PlayerSeasonStatObject[['playerId']] <- self$`playerId` } if (!is.null(self$`player`)) { PlayerSeasonStatObject[['player']] <- self$`player` } if (!is.null(self$`team`)) { PlayerSeasonStatObject[['team']] <- self$`team` } if (!is.null(self$`conference`)) { PlayerSeasonStatObject[['conference']] <- self$`conference` } if (!is.null(self$`category`)) { PlayerSeasonStatObject[['category']] <- self$`category` } if (!is.null(self$`statType`)) { PlayerSeasonStatObject[['statType']] <- self$`statType` } if (!is.null(self$`stat`)) { PlayerSeasonStatObject[['stat']] <- self$`stat` } PlayerSeasonStatObject }, fromJSON = function(PlayerSeasonStatJson) { PlayerSeasonStatObject <- jsonlite::fromJSON(PlayerSeasonStatJson) if (!is.null(PlayerSeasonStatObject$`season`)) { self$`season` <- PlayerSeasonStatObject$`season` } if (!is.null(PlayerSeasonStatObject$`playerId`)) { self$`playerId` <- PlayerSeasonStatObject$`playerId` } if (!is.null(PlayerSeasonStatObject$`player`)) { self$`player` <- PlayerSeasonStatObject$`player` } if (!is.null(PlayerSeasonStatObject$`team`)) { self$`team` <- PlayerSeasonStatObject$`team` } if (!is.null(PlayerSeasonStatObject$`conference`)) { self$`conference` <- PlayerSeasonStatObject$`conference` } if (!is.null(PlayerSeasonStatObject$`category`)) { self$`category` <- PlayerSeasonStatObject$`category` } if (!is.null(PlayerSeasonStatObject$`statType`)) { self$`statType` <- PlayerSeasonStatObject$`statType` } if (!is.null(PlayerSeasonStatObject$`stat`)) { self$`stat` <- PlayerSeasonStatObject$`stat` } }, toJSONString = function() { sprintf( '{ "season": %d, "playerId": %d, "player": %s, "team": %s, "conference": %s, "category": %s, "statType": %s, "stat": %s }', self$`season`, self$`playerId`, self$`player`, self$`team`, self$`conference`, self$`category`, self$`statType`, self$`stat` ) }, fromJSONString = function(PlayerSeasonStatJson) { PlayerSeasonStatObject <- jsonlite::fromJSON(PlayerSeasonStatJson) self$`season` <- PlayerSeasonStatObject$`season` self$`playerId` <- PlayerSeasonStatObject$`playerId` self$`player` <- PlayerSeasonStatObject$`player` self$`team` <- PlayerSeasonStatObject$`team` self$`conference` <- PlayerSeasonStatObject$`conference` self$`category` <- PlayerSeasonStatObject$`category` self$`statType` <- PlayerSeasonStatObject$`statType` self$`stat` <- PlayerSeasonStatObject$`stat` } ) )
6e763c4d6be667063d9b80d7c89e334ee2cb4462
05ff20b5652c78649f3ee7ffcebe013466dd8697
/man/ENstepQ.Rd
6a4f429809c21b94732933df1d75206b9d2143c4
[]
no_license
cran/epanet2toolkit
b28a457c5f9b24756130489a5f6d34fd7e48f4d1
493228edea328d641c116a96fcfc1cadd671bc8f
refs/heads/master
2023-02-24T17:36:50.821558
2023-02-09T08:50:07
2023-02-09T08:50:07
107,667,516
0
0
null
null
null
null
UTF-8
R
false
true
318
rd
ENstepQ.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/quality.r \name{ENstepQ} \alias{ENstepQ} \title{Advances WQ simulation one water quality time step.} \usage{ ENstepQ() } \value{ time remaining in the overall simulation } \description{ Advances WQ simulation one water quality time step. }
d6927ab5cfc4bdc790ffc6155a865554a31f3dd9
53d7e351e21cc70ae0f2b746dbfbd8e2eec22566
/man/umxPath.Rd
c3b8469ca67038d83fcf1e423fbead2c4bb3ca48
[]
no_license
tbates/umx
eaa122285241fc00444846581225756be319299d
12b1d8a43c84cc810b24244fda1a681f7a3eb813
refs/heads/master
2023-08-31T14:58:18.941189
2023-08-31T09:52:02
2023-08-31T09:52:02
5,418,108
38
25
null
2023-09-12T21:09:45
2012-08-14T20:18:01
R
UTF-8
R
false
true
8,513
rd
umxPath.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/build_run_modify.R \name{umxPath} \alias{umxPath} \title{Easier (and powerful) specification of paths in SEM.} \usage{ umxPath( from = NULL, to = NULL, with = NULL, var = NULL, cov = NULL, means = NULL, v1m0 = NULL, v.m. = NULL, v0m0 = NULL, v.m0 = NULL, v0m. = NULL, fixedAt = NULL, freeAt = NULL, firstAt = NULL, unique.bivariate = NULL, unique.pairs = NULL, fromEach = NULL, forms = NULL, Cholesky = NULL, defn = NULL, connect = c("single", "all.pairs", "all.bivariate", "unique.pairs", "unique.bivariate"), arrows = 1, free = TRUE, values = NA, labels = NA, lbound = NA, ubound = NA, hasMeans = NULL ) } \arguments{ \item{from}{One or more source variables e.g "A" or c("A","B")} \item{to}{One or more target variables for one-headed paths, e.g "A" or c("A","B").} \item{with}{2-headed path <--> from 'from' to 'with'.} \item{var}{Equivalent to setting 'from' and 'arrows' = 2. nb: from, to, and with must be left empty.} \item{cov}{Convenience to allow 2 variables to covary (equivalent to 'from' and 'with'). nb: leave from, to, etc. empty} \item{means}{equivalent to "from = 'one', to = x. nb: from, to, with and var must be left empty (their default).} \item{v1m0}{variance of 1 and mean of zero in one call.} \item{v.m.}{variance and mean, both free.} \item{v0m0}{variance and mean, both fixed at zero.} \item{v.m0}{variance free, mean fixed at zero.} \item{v0m.}{variance fixed at 0, mean free.} \item{fixedAt}{Equivalent to setting "free = FALSE, values = fixedAt"} \item{freeAt}{Equivalent to setting "free = TRUE, values = freeAt"} \item{firstAt}{First path is fixed at this value (free is ignored: warning if other than a single TRUE)} \item{unique.bivariate}{equivalent to setting from, and "connect = "unique.bivariate", arrows = 2". nb: from, to, and with must be left empty (their default)} \item{unique.pairs}{equivalent to setting "connect = "unique.pairs", arrows = 2" (don't use from, to, or with)} \item{fromEach}{Like all.bivariate, but with one head arrows. 'to' can be set.} \item{forms}{Build a formative variable. 'from' variables form the latent. Latent variance is fixed at 0. Loading of path 1 is fixed at 1. unique.bivariate between 'from' variables.} \item{Cholesky}{Treat \strong{Cholesky} variables as latent and \strong{to} as measured, and connect as in an ACE model.} \item{defn}{Implements a definition variable as a latent with zero variance & mean and labeled 'data.defVar'} \item{connect}{as in mxPath - nb: from and to must also be set.} \item{arrows}{as in mxPath - nb: from and to must also be set.} \item{free}{whether the value is free to be optimised} \item{values}{default value list} \item{labels}{labels for each path} \item{lbound}{lower bounds for each path value} \item{ubound}{upper bounds for each path value} \item{hasMeans}{Used in 'forms' case to know whether the data have means or not.} } \value{ \itemize{ \item 1 or more \code{\link[=mxPath]{mxPath()}}s } } \description{ This function is used to easily and compactly specify paths in models. In addition to \code{from} and \code{to}, it adds specialised parameters for variances (var), two headed paths (with) and means (mean). There are also new terms to describe fixing values: \code{fixedAt} and \code{fixFirst}. To give a couple of the most common, time-saving examples: \itemize{ \item \code{umxPath("A", with = "B", fixedAt = 1)} \item \code{umxPath(var = c("A", "B"), fixedAt = 1)} \item \code{umxPath(v.m. = manifests)} \item \code{umxPath(v1m0 = latents)} \item \code{umxPath(v1m0 = latents)} \item \code{umxPath(means = manifests)} \item \code{umxPath(fromEach = c('A',"B","C"), to = c("y1","y2"))} \item \code{umxPath(unique.bivariate = c('A',"B","C"))} \item \code{umxPath("A", to = c("B","C","D"), firstAt = 1)} } } \details{ \code{umxPath} introduces the following new words to your path-defining vocabulary: \code{with}, \code{var}, \code{cov}, \code{means}, \code{v1m0}, \code{v0m0}, \code{v.m0}, \code{v.m}, \code{fixedAt}, \code{freeAt}, \code{firstAt}, \code{unique.bivariate}, \code{unique.pairs}, \code{fromEach}, \code{Cholesky}, \code{defn}, \code{forms}. \code{with} creates covariances (2-headed paths): \code{umxPath(A, with = B)} Specify a variance for A with \code{umxPath(var = "A")}. Of course you can use vectors anywhere: \code{umxPath(var = c('N','E', 'O'))} To specify a mean, you just say: \code{umxPath(mean = "A")}, which is equivalent to \code{mxPath(from = "one", to = "A")}. To fix a path at a value, you can say: \code{umxPath(var = "A", fixedAt = 1)} The common task of creating a variable with variance fixed at 1 and mean at 0 is done thus: \code{umxPath(v1m0 = "A")} For free variance and means use: \code{umxPath(v.m. = "A")} \code{umxPath} exposes \code{unique.bivariate} and \code{unique.pairs}, So to create paths A<->A, B<->B, and A->B, you would say: \code{umxPath(unique.pairs = c('A',"B"))} To create paths A<->B, B<->C, and A<->C, you would say: \code{umxPath(unique.bivariate = c('A',"B","C"))} Creates one-headed arrows on the all.bivariate pattern \code{umxPath(fromEach = c('A',"B","C"))} Setting up a latent trait, you can scale with a fixed first path thus: \code{umxPath("A", to = c("B","C","D"), firstAt = 1)} To create Cholesky-pattern connections: \verb{umxPath(Cholesky = c("A1", "A2"), to c("var1", "var2"))} } \examples{ # ========================================== # = Examples of each path type, and option = # ========================================== umxPath("A", to = "B") # One-headed path from A to B umxPath("A", to = "B", fixedAt = 1) # same, with value fixed @1 umxPath("A", to = c("B", "C"), fixedAt = 1:2) # same, with more than 1 value umxPath("A", to = c("B","C"), firstAt = 1) # Fix only the first path, others free umxPath(var = "A") # Give a variance to A umxPath(var = "A", fixedAt = 1) # Give A variance, fixed at 1 umxPath(means = c("A","B")) # Create a means model for A: from = "one", to = "A" umxPath(v1m0 = "A") # Give "A" variance and a mean, fixed at 1 and 0 respectively umxPath(v.m. = "A") # Give "A" variance and a mean, leaving both free. umxPath(v0m0 = "W", label = c(NA, "data.W")) umxPath("A", with = "B") # using with: same as "to = B, arrows = 2" umxPath("A", with = "B", fixedAt = .5) # 2-head path fixed at .5 umxPath("A", with = c("B", "C"), firstAt = 1) # first covariance fixed at 1 umxPath(cov = c("A", "B")) # Covariance A <-> B umxPath(defn = "mpg") # create latent called def_mpg, with 0 mean * var, and label = "data.mpg" umxPath(fromEach = c('a','b'), to = c('c','d')) # a->c, a<->d, b<->c, b<->d umxPath(unique.bivariate = c('a','b','c')) # bivariate paths a<->b, a<->c, b<->c etc. umxPath(unique.pairs = letters[1:3]) # all distinct pairs: a<->a, a<->b, a<->c, b<->b, etc. umxPath(Cholesky = c("A1","A2"), to = c("m1", "m2")) # Cholesky \dontrun{ # A worked example data(demoOneFactor) manifests = names(demoOneFactor) m1 = umxRAM("One Factor", data = demoOneFactor, type= "cov", umxPath("G", to = manifests), umxPath(var = manifests), umxPath(var = "G", fixedAt = 1.0) ) umxSummary(m1, std = TRUE) require(umx) # ==================== # = Cholesky example = # ==================== # ====================================================================== # = 3-factor Cholesky (A component of a 5-variable 3-factor ACE model) = # ====================================================================== latents = paste0("A", 1:3) manifests = names(demoOneFactor) m1 = umxRAM("Chol", data = demoOneFactor, type = "cov", umxPath(Cholesky = latents, to = manifests), umxPath(var = manifests), umxPath(var = latents, fixedAt = 1) ) plot(m1, splines= FALSE) # ========================================================= # = Definition variable example.Not much use at present, = # = as def vars are not readily used in RAM models... = # = Working on something rational and intuitive. = # ========================================================= data(mtcars) m1 = umxRAM("manifest", data = mtcars, umxPath(v.m. = "mpg"), umxPath(defn = "mpg") ) } } \references{ \itemize{ \item \url{https://tbates.github.io} } } \seealso{ \itemize{ \item \code{\link[=mxPath]{mxPath()}} } Other Core Model Building Functions: \code{\link{umxMatrix}()}, \code{\link{umxModify}()}, \code{\link{umxRAM}()}, \code{\link{umxSuperModel}()}, \code{\link{umx}} } \concept{Core Model Building Functions}
25f849ebfbd5df5276505f5d56e20f75cb4c2c43
0886403758e751713d9c3031d7a9329addcbfd06
/EDA.R
859d56357be95dfbb3b7cfcff906881952d704f2
[]
no_license
antortjim/test_datrik
e3787d0fed2387fc418fa2df753854f18c7ff295
820604bb6e1d7af8ef54d0d6cac8de7614df6ec3
refs/heads/master
2020-04-03T03:51:33.517086
2018-10-28T20:00:46
2018-10-28T20:00:46
154,996,873
0
0
null
null
null
null
UTF-8
R
false
false
25,137
r
EDA.R
###################################################################### ## EDA for technical test for Junior Data Scientist ## Author: Antonio Ortega ## Date: 28-10-2018 ## This script is compatible with a knitr Rnw file ## *save and *load chunks save and load variables ## stored as RData files, useful during compilation of Rnw files ###################################################################### ## ---- load_libraries ---- library(ggplot2) library(viridis) library(dplyr) library(magrittr) library(waffle) library(tidyr) library(tibble) library(ade4) library(data.table) library(stringr) library(FactoMineR) library(cowplot) library(pheatmap) library(kableExtra) # library(MASS) # library(scales) plot_dir <- "plots" output_data <- "proc_data" rdata_dir <- "RData" tables_dir <- "tables" ## ---- load_data ---- datos <- read.table("datos.csv", sep = ",", header=T, stringsAsFactors = F) datos <- datos[, c(colnames(datos)[1:4], colnames(datos)[5:(ncol(datos)-1)] %>% sort,"Y")] write(x = kable(x = datos %>% colnames %>% strsplit(., split = "_") %>% lapply(., function(x) x[[1]]) %>% unlist %>% table %>% t, format = "latex", digits = 2), file = file.path(tables_dir, "data_summary.tex") ) datos$edad_integer <- str_match(string = datos$edad, pattern = "\\[\\d{2}-(\\d{2})\\)") %>% .[, 2] %>% as.integer train_set <- datos[!is.na(datos["Y"]),] x_train <- train_set %>% select(-Y) y_train <- select(train_set, Y) %>% mutate(Y=as.factor(Y)) test_set <- datos[is.na(datos["Y"]),] x_test <- test_set %>% select(-Y) ## ---- load_data_save ---- datasets <- list(x_train = x_train, y_train = y_train, x_test = x_test) save("datasets", file = file.path("RData", "datasets.RData")) ## ---- load_data_load ---- load(file = "RData/datasets.RData") x_train <- datasets$x_train x_test <- datasets$x_test y_train <- datasets$y_train ## ---- plots1 ---- race <- as.character(x_train$raza) race <- table(race) # Decrease the counts to make them compatible with waffle() # Counts are not important, just the proportion race <- 200*race/(max(race)) %>% sort races <- names(race) race <- race %>% as.numeric names(race) <- races race <- race %>% sort %>% rev # Customize colors for more clear visualization # politically incorrect :-) race_colors <- c( "Caucasian"="orange", "AfricanAmerican"="black", "Hispanic"="olivedrab", "Other"="gray", "Asian"="yellow" )[names(race)] p1 <- waffle(race, rows = 25, flip = T, colors = race_colors) ggsave(p1, filename = file.path(plot_dir, "race_waffle.png")) p2 <- ggplot(data=x_train, mapping=aes(x = edad, y=..count../1e3)) p2 <- p2 + geom_bar() + labs(y = "kCounts", x="Age") + ggtitle("Age distribution") ggsave(p2, filename = file.path(plot_dir, "age_histogram.png")) p3 <- plot_grid(ncol=2, p1 + theme(legend.position = "top"), p2, labels = "AUTO") ggsave(p3, filename = file.path(plot_dir, "visualize_categories.png"), width=14) ## ---- preprocess_function ---- preprocess_data <- function(x_train, x_test, etiquettes=FALSE) { # Every feature is preprocessed in groups according to their type (nominal, ordinal, etc) # Train and test sets have separate pieces of code for "hygiene", # even if it implies code cluttering # The result for each group is a list with the processed data for the training and test set respectively, # in data.frame format if(etiquettes) { ## Preprocess etiquettes print("Processing etiquettes") train_etiquettes <- x_train[, c("etiqueta_1", "etiqueta_2", "etiqueta_3")] test_etiquettes <- x_test[, c("etiqueta_1", "etiqueta_2", "etiqueta_3")] rownames(train_etiquettes) <- x_train$identificador rownames(test_etiquettes) <- x_test$identificador # Extract a vector storing all the etiquetas occuring in the TRAINING SET ONLY unique_etiquettes <- train_etiquettes %>% unlist %>% unique %>% sort # Encode the etiquetas in format one-hot # Initialize of vector of counts of etiquetas to 0 to all of them etiquettes_template <- rep(0, length(unique_etiquettes)) names(etiquettes_template) <- unique_etiquettes # For both train and test set etiquettes_proc <- lapply( list(train = train_etiquettes, test = test_etiquettes), function(y) { # For every row, apply the same function res <- y %>% as.matrix %>% apply(., 1, function(x) { # Count how many times each etiqueta appears # This returns a table with the counts of the etiquetas appearing in this individual # but not the ones not appearing (which obv should be 0) local_count <- table(x) # Set the counts of the template to 0 et <- etiquettes_template # Drop any etiquette that's not in the training set. Makes sense when analyzing test set local_count <- local_count[names(local_count) %in% names(et)] # Set the counts of the found etiquetas to the right count et[names(local_count)] <- local_count return(et) }) %>% t %>% unlist # Format the colnames of the etiquetas so they start with the et_ prefix colnames(res) <- paste0("et_", colnames(res)) # Make the data structure a data.frame of factors res <- res %>% apply(.,2,as.factor) %>% as.data.frame return(res) }) } ## Preprocess nominals print("Processing nominals") # Drop nominal 4 # Make one-hot encoding using acm.disjonctif train_nominals_one_hot <- x_train %>% select(nominal_1:nominal_3) %>% acm.disjonctif() %>% apply(.,2,as.factor) %>% as.data.frame test_nominals_one_hot <- x_test %>% select(nominal_1:nominal_3) %>% acm.disjonctif() %>% apply(.,2,as.factor) %>% as.data.frame # Drop the nominal categories not present in the training set missing_nominal_test <- colnames(train_nominals_one_hot)[!colnames(train_nominals_one_hot) %in% colnames(test_nominals_one_hot)] missing_test <- as.data.frame(matrix(0, nrow=nrow(x_test), ncol=length(missing_nominal_test))) colnames(missing_test) <- missing_nominal_test test_nominals_one_hot <- cbind(test_nominals_one_hot, missing_test)[colnames(train_nominals_one_hot)] # Make the list nominals_proc <- list(train = train_nominals_one_hot, test = test_nominals_one_hot) ## Preprocess drugs (farmacos) print("Processing drugs") # Select the farmaco features train_drugs <- x_train[, grep(pattern = "farmaco", x = colnames(x_train))] test_drugs <- x_test[, grep(pattern = "farmaco", x = colnames(x_test))] # Drop drugs with no variability (non-informative) train_drugs <- train_drugs[,train_drugs %>% apply(.,2,function(x) length(unique(x))) > 1] test_drugs <- test_drugs[, colnames(train_drugs)] # Replace strings with integer and make list drugs_proc <- list(train = train_drugs, test = test_drugs) %>% lapply(function(x) { x[x=="No"] <- "-1" x[x=="Down"] <- "0" x[x=="Steady"] <- "1" x[x=="Up"] <- "2" x <- x %>% apply(.,2, as.integer) %>% as.data.frame }) ## Preprocess ordinal print("Processing ordinals") # Select the features train_ordinal <- x_train %>% select(ordinal_1:ordinal_2) test_ordinal <- x_test %>% select(ordinal_1:ordinal_2) # Replace strings with integer and make list ordinals_proc <- lapply(list(train = train_ordinal, test = test_ordinal), function(x) { x[x=="None"] <- "0" x[x=="Norm"] <- "1" x[x==">7" | x==">200"] <- "2" x[x==">8" | x==">300"] <- "3" x <- x %>% apply(.,2,as.integer) %>% as.data.frame x }) ## Preprocess binary print("Processing binaries") # Select the binary features and make them factors of 0 and 1. train_binary <- x_train %>% select(binary_1:binary_3) %>% apply(.,2, function(x) as.factor(as.integer(as.factor(x)) - 1)) %>% as.data.frame test_binary <- x_test %>% select(binary_1:binary_3) %>% apply(.,2, function(x) as.factor(as.integer(as.factor(x)) - 1)) %>% as.data.frame # Make the list binary_proc <- list(train = train_binary, test = test_binary) ## Preprocess counter print("Processing counters") # Just make the list (no need to modify them :)) counter_proc <- list( train = x_train %>% select(counter_1:counter_7), test = x_test %>% select(counter_1:counter_7) ) ## Preprocess race print("Processing race") # Make the list while one-hot encoding # the same way as nominals race_proc <- list( train = x_train %>% select(raza) %>% acm.disjonctif() %>% apply(.,2,as.factor) %>% as.data.frame, test = x_test %>% select(raza) %>% acm.disjonctif() %>% apply(.,2,as.factor) %>% as.data.frame ) ## Preprocess sex print("Processing sex") # Select the sex feature and process them like binaries sex_proc <- list( train = x_train %>% select(sexo) %>% apply(.,2,function(x) as.factor(as.integer(as.factor(x)) - 1)) %>% as.data.frame, test = x_test %>% select(sexo) %>% apply(.,2,function(x) as.factor(as.integer(as.factor(x)) - 1)) %>% as.data.frame ) ## Preprocess age print("Processing age") # Select the age feature in integer format train_age <- x_train$edad_integer test_age <- x_test$edad_integer # Compute the mean age in the TRAINING SET ONLY train_age_mean <- mean(train_age, na.rm = T) # Impute missing ages using this mean on both sets train_age[is.na(train_age)] <- train_age_mean test_age[is.na(test_age)] <- train_age_mean # Make the list age_proc <- list(train = data.frame(edad_integer = train_age), test = data.frame(edad_integer = test_age)) ## CBIND all the features for each dataset returning a single data.frame ## Optionally include etiquetas (huge amount of one-hot columns) ## Make a list for each dataset datasets <- c("train", "test") if(etiquettes) { processed_datasets <- lapply(datasets, function(x) { cbind(race_proc[[x]], sex_proc[[x]], age_proc[[x]], nominals_proc[[x]], counter_proc[[x]], drugs_proc[[x]], ordinals_proc[[x]], binary_proc[[x]], etiquettes_proc[[x]]) }) } else { processed_datasets <- lapply(datasets, function(x) { cbind(race_proc[[x]], sex_proc[[x]], age_proc[[x]], nominals_proc[[x]], counter_proc[[x]], drugs_proc[[x]], ordinals_proc[[x]], binary_proc[[x]]) }) } # Name the list and return it names(processed_datasets) <- datasets return(processed_datasets) } ## Preprocess using the function above without and with etiquetas ## Store the indes of quantitative and qualitative features ## ---- preprocess ---- processed_datasets <- preprocess_data(x_train, x_test, etiquettes = FALSE) proc_x_train <- processed_datasets$train proc_x_test <- processed_datasets$test quanti <- c("farmaco", "ordinal", "edad_integer", "counter") quali <- c("raza", "binary", "nominal", "sexo") quanti_index <- lapply(quanti, function(x) grep(x = colnames(proc_x_train), pattern = x)) %>% unlist quali_index <- lapply(quali, function(x) grep(x = colnames(proc_x_train), pattern = x)) %>% unlist ## ---- preprocess_etiqueta ---- processed_datasets <- preprocess_data(x_train, x_test, etiquettes = TRUE) quanti_index_full <- lapply(quanti, function(x) grep(x = colnames(processed_datasets$train), pattern = x)) %>% unlist quali_index_full <- lapply(c(quali, "et"), function(x) grep(x = colnames(processed_datasets$train), pattern = x)) %>% unlist ## Save the processed datasets with etiquetas to csv files here write.table(x = processed_datasets$train, file = file.path(output_data, "x_train.csv"), sep = ",", row.names = x_train$identificador, quote = F, col.names = T) write.table(x = processed_datasets$test, file = file.path(output_data, "x_test.csv"), sep = ",", row.names = x_test$identificador, quote = F, col.names = T) ## ---- preprocess_save ---- # rm(processed_datasets) # Save all objects to an RData file save(list = c("quanti", "quali", "quanti_index", "quali_index", "quanti_index_full", "quali_index_full", "proc_x_train", "proc_x_test", "processed_datasets"), file = file.path(rdata_dir, "preprocessed.RData")) ## ---- preprocess_load ---- # Load them load(file = file.path(rdata_dir, "preprocessed.RData")) ## ---- PCA ---- # Compute a PCA of the quantitative features in the training set # with centering and scaling to give equal importance to all features res.pca <- prcomp(x = proc_x_train[, quanti_index], center = T, scale. = T) # Store in data.frame data for variance captured by each PC pca_barplot_data <- data.frame(PC = 1:length(res.pca$sdev), var = res.pca$sdev**2) # Store in data.frame the PCS, the label and the etiquetas (which are not present in the proc_x_train) pca_data <- cbind(x_train[, grep(pattern = "et", x = colnames(x_train), invert = T)], as.data.frame(res.pca$x), y_train) ## ---- PCA_visualization ---- # Create barplots showing the variance captured by each PC # and the cumulative variance p0 <- plot_grid(ncol = 2, rel_widths = c(1,1), ggplot(data = pca_barplot_data, aes(x=PC, y=var)) + geom_bar(stat="identity") + labs(y = "Variance"), ggplot(data = pca_barplot_data, aes(x=PC, y=cumsum(var)/sum(var))) + geom_bar(stat="identity") + labs(y = "Fraction cumulative variance") ) # Create PCA plots facetting and coloring with different categories/label p1 <- ggplot( data = pca_data, mapping = aes(x = PC1, y = PC2, col = raza) ) + geom_point(size=.1) + guides(col=F) + ggtitle("Raza") p2 <- ggplot( data = pca_data, mapping = aes(x = PC1, y = PC2, col = sexo) ) + geom_point(size=.1) + guides(col=F) + ggtitle("Sexo") p3 <- ggplot( data = pca_data, mapping = aes(x = PC1, y = PC2, col = edad_integer) ) + geom_point(size=.1) + guides(col=F) + ggtitle("Edad") p4 <- ggplot( data = pca_data, mapping = aes(x = PC1, y = PC2, col = Y) ) + geom_point(size=.1) + guides(col=F) + ggtitle("Y") p <- plot_grid(ncol=4, p1,p2,p3,p4, labels="AUTO") ggsave( filename = file.path(plot_dir, "PCA_multicategory.png"), plot = p, width=20 ) ## ---- MCA_functions ---- # Utility functions to prepare data for MCA and handle its output prepare_projection_data <- function(x, y, proc_x, quali_index, quanti_index) { res.mca <- MCA( X = proc_x, ncp = ncp, quanti.sup = quanti_index, graph = F ) cats <- apply(proc_x[,quali_index], 2, function(x) nlevels(as.factor(x))) mca_df <- extract_mca_data(proc_x, x, cats, res.mca) mca_df$obs$Y <- y$Y sup_proj_data <- cbind(proc_x[, quanti_index], mca_df$obs %>% select(`Dim 1`:`Dim 5`)) colnames(sup_proj_data)[colnames(sup_proj_data) %>% grep(pattern = "Dim")] <- paste0("MCA_", 1:ncp) return(list(mca = res.mca, vars = mca_df$vars, obs = mca_df$obs, proj=sup_proj_data)) } extract_mca_data <- function(proc_data, x, cats, mca) { mca_vars_df <- data.frame(mca$var$coord, Variable = rep(names(cats), cats)) mca_vars_df$Type <- mca_vars_df$Variable %>% as.character %>% gsub(pattern = "\\.", replacement = "_", x = .) %>% strsplit(x = ., split = "_") %>% lapply(function(x) x[[1]]) %>% unlist mca_obs_df <- cbind(mca$ind$coord, edad_integer = x[,"edad_integer"], x[, lapply(quali, function(y) grep(x = colnames(x), pattern = y)) %>% unlist] ) return(list(vars = mca_vars_df, obs = mca_obs_df)) } make_mca_plots <- function(train, test, point_size, prefix="") { p1 <- ggplot(data=train$vars, aes(x = Dim.1, y = Dim.2, col = Type)) p1 <- p1 + geom_hline(yintercept = 0, colour = "gray70") p1 <- p1 + geom_vline(xintercept = 0, colour = "gray70") p1 <- p1 + geom_point(size=2) p1 <- p1 + ggtitle("MCA plot of variables using R package FactoMineR") p1 <- p1 + scale_color_viridis(discrete = T) p2 <- ggplot(data = train$obs, aes(x = `Dim 1`, y = `Dim 2`, col = raza)) p2 <- p2 + geom_point(size=point_size) + facet_wrap(~raza) p2 <- p2 + ggtitle("MCA plot facetted by race") p3 <- ggplot(data = train$obs, aes(x = `Dim 1`, y = `Dim 2`)) p3 <- p3 + geom_point(size=point_size) + facet_wrap(~Y) p3 <- p3 + ggtitle("MCA plot facetted by Y") p4 <- ggplot(data = test$obs, aes(x = `Dim 1`, y = `Dim 2`)) p4 <- p4 + geom_point(size=point_size) + ggtitle("MCA plot of the test set") p5 <- ggplot(data = train$obs, aes(x = `Dim 1`, y = `Dim 2`, col = raza)) p5 <- p5 + geom_point(size=point_size) + facet_wrap(~Y) p5 <- p5 + ggtitle("MCA plot facetted by Y") p6 <- ggplot(data = train$obs, aes(x = `Dim 1`, y = `Dim 2`, col = as.factor(edad_integer))) p6 <- p6 + geom_point(size=point_size) + facet_wrap(~Y) p6 <- p6 + ggtitle("MCA plot facetted by Y") p6 <- p6 + scale_color_discrete() + guides(col = guide_legend(title = "Edad")) p7 <- plot_grid(p3, p4, ncol=2) # Contribution plot # Extract the contributions frm the mca object mca_contribution <- train$mca$var$contrib[,1] # Compute the variable type for nice coloring in barplot type <- strsplit( gsub(x = names(mca_contribution), pattern = "\\.", replacement = "_"), split = "_") %>% lapply(., function(x) x[[1]]) %>% unlist # Make dataframe mca_contribution_df <- data.frame(variable = names(mca_contribution), type = type, contrib = mca_contribution, stringsAsFactors = F) %>% arrange(-contrib) # Sort features by their contribution mca_contribution_df$variable <- factor(mca_contribution_df$variable, levels = mca_contribution_df$variable) p8 <- ggplot(mca_contribution_df %>% head(30), aes(x = variable, y = contrib, fill = type)) + geom_bar(stat="identity") + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + scale_fill_viridis(discrete = T) ggsave(width=7, height=7, plot = p1, filename = file.path(plot_dir, paste0(prefix, "mca_variables.png"))) ggsave(width=7, height=7, plot = p2, filename = file.path(plot_dir, paste0(prefix, "mca_obs_race_facet.png"))) ggsave(width=7, height=7, plot = p3, filename = file.path(plot_dir, paste0(prefix, "mca_obs_Y_facet.png"))) ggsave(width=7, height=7, plot = p4, filename = file.path(plot_dir, paste0(prefix, "mca_obs_test.png"))) ggsave(width=7, height=7, plot = p5, filename = file.path(plot_dir, paste0(prefix, "mca_obs_Y_facet_raza_col.png"))) ggsave(width=7, height=7, plot = p6, filename = file.path(plot_dir, paste0(prefix, "mca_obs_Y_facet_edad_col.png"))) ggsave(width=10, height=7, plot = p7, filename = file.path(plot_dir, paste0(prefix, "mca_obs_Y_facet_all.png"))) ggsave(width=14, height=7, plot = p8, filename = file.path(plot_dir, paste0(prefix, "mca_contrib.png"))) return(list(p1,p2,p3,p4,p5,p6,p7)) } ## ---- MCA ---- # Perform MCA as implemented in FactoMineR with five dimensions as output # both with and without etiquetas # DANGER! THIS CHUNK CAN TAKE SEVERAL MINUTES TO RUN, # SPECIALLY THE PART WITH ETIQUETAS (many more features) ncp <- 5 # Without etiquetas train_sup_proj_data <- prepare_projection_data(x_train, y_train, proc_x_train, quali_index, quanti_index) unique_cols <- apply(proc_x_test, 2, function(x) length(unique(x)) == 1) %>% which quanti_index_mca <- lapply(quanti, function(x) grep(x = colnames(proc_x_test[,-unique_cols]), pattern = x)) %>% unlist quali_index_mca <- lapply(quali, function(x) grep(x = colnames(proc_x_test[,-unique_cols]), pattern = x)) %>% unlist test_sup_proj_data <- prepare_projection_data(x_test, NULL, proc_x_test[, -unique_cols], quali_index_mca, quanti_index_mca) # With etiquetas train_sup_proj_data_full <- prepare_projection_data(x_train, y_train, processed_datasets$train, quali_index_full, quanti_index_full) unique_cols <- apply(processed_datasets$test, 2, function(x) length(unique(x)) == 1) %>% which quanti_index_full_mca <- lapply(quanti, function(x) grep(x = colnames(processed_datasets$test[,-unique_cols]), pattern = x)) %>% unlist quali_index_full_mca <- lapply(c("et", quali), function(x) grep(x = colnames(processed_datasets$test[,-unique_cols]), pattern = x)) %>% unlist test_sup_proj_data_full <- prepare_projection_data(x_test, NULL, processed_datasets$test[, -unique_cols], quali_index_full_mca, quanti_index_full_mca) ## ---- MCA_save ---- save(list = c("train_sup_proj_data", "test_sup_proj_data", "train_sup_proj_data_full", "test_sup_proj_data_full"), file = file.path(rdata_dir, "mca_dfs.RData")) ## ---- MCA_load ---- load(file.path(rdata_dir, "mca_dfs.RData")) ## ---- MCA_visualization ---- # Visualize the MC results and the contribution of each variable # to the found dimensions point_size <- .5 # MCA results plots plots_without <- make_mca_plots(train_sup_proj_data, test_sup_proj_data, point_size) plots_with <- make_mca_plots(train_sup_proj_data_full, test_sup_proj_data_full, point_size, prefix = "etiqueta_") # Combine some plots for easier integration in latex ggsave(filename = file.path(plot_dir, "mca_variables_combined.png"), plot = plot_grid(plots_without[[1]], plots_with[[1]], labels="AUTO"), width = 14 ) ggsave(filename = file.path(plot_dir, "mca_obs_Y_facet_all_combined.png"), plot = plot_grid(plots_without[[7]], plots_with[[7]], nrow = 2, labels="AUTO"), width = 14, height = 14 ) ## ---- supervised_projections ---- # lda_res <- lda(formula = Y ~ ., data = cbind(train_sup_proj_data_full$proj, Y = y_train$Y)) # prop.lda <- lda_res$svd^2/sum(lda_res$svd^2) # # # extra <- train_sup_proj_data_full$proj[, !(colnames(train_sup_proj_data_full$proj) %in% colnames(test_sup_proj_data_full$proj))] # # extra <- extra %>% apply(.,2,function(x) rep(0, length(x))) # # # # test_data_lda <- cbind(test_sup_proj_data_full$proj, extra)[colnames(train_sup_proj_data_full$proj)] # # plda <- predict(object = lda_res, # newdata = train_sup_proj_data_full$proj) # # dataset <- data.frame(Y = y_train$Y, lda = plda$x) # ggplot(dataset) + geom_point(aes(lda.LD1, lda.LD2, colour = Y, shape = Y), size = 2.5) + # labs(x = paste("LD1 (", percent(prop.lda[1]), ")", sep=""), # y = paste("LD2 (", percent(prop.lda[2]), ")", sep="")) ## lda <- plotRLDF( ## t(sup_proj_data), ## labels.y = y_train$Y, ## trend = TRUE, robust = TRUE ## ) ## str(lda) ## Perform PLSDA # plsda <- DiscriMiner::plsDA(variables = scale(sup_proj_data), # group = y_train$Y, # autosel = FALSE, comps = 2) # # # Lots of output, we are interested in the components # summary(plsda) # # qplot(data=as.data.frame(plsda$components), x=t1, y=t2, geom=c("point"), color=y_train$Y) ## ---- heatmap ---- # Select only some individuals because pheatmap cannot handle to many individuals idx <- sample(x = 1:nrow(train_sup_proj_data$proj),size = 1e3) heatmap_data <- train_sup_proj_data$proj[idx, ] %>% as.matrix heatmap <- pheatmap(heatmap_data, annotation_row = cbind(select(x_train[idx, ], raza, sexo, edad_integer), Y = y_train[idx,"Y"]), scale="row", filename = file.path(plot_dir, "heatmap.png")) # Export counts of categories in counter variables to a table to be integrated in the latex report write(x = kable(x = train_sup_proj_data$proj[,train_sup_proj_data$proj %>% colnames %>% grep(pattern = "counter", x = .)] %>% lapply(., function(x) length(table(x))) %>% unlist %>% sort %>% rev), file = file.path(tables_dir, "counters_overview.tex")) ## ---- heatmap_save ---- save(list = c("heatmap"), file = file.path(rdata_dir, "heatmap.RData")) ## ---- heatmap_load ---- load(file.path(rdata_dir, "heatmap.RData")) ## ---- export_data ---- # Save processed datasets without one-hot encoding data but with mca features write.table(x = train_sup_proj_data$proj, file = file.path(output_data, "x_train_mca.csv"), sep = ",", row.names = x_train$identificador, quote = F, col.names = T) write.table(x = test_sup_proj_data$proj, file = file.path(output_data, "x_test_mca.csv"), sep = ",", row.names = x_test$identificador, quote = F, col.names = T) # Save processed datasets with one-hot encoding data (full) and the mca features found with them too write.table(x = train_sup_proj_data_full$proj, file = file.path(output_data, "x_train_mca_full.csv"), sep = ",", row.names = x_train$identificador, quote = F, col.names = T) write.table(x = test_sup_proj_data_full$proj, file = file.path(output_data, "x_test_mca_full.csv"), sep = ",", row.names = x_test$identificador, quote = F, col.names = T) # Save label to a separate file write.table(x = y_train, file = file.path(output_data, "y_train.csv"), sep = ",", row.names = x_train$identificador, quote = F, col.names = T) ## ---- session_info ---- sessionInfo()
38f2031ecf12d0f7918953f080be5d6568e50730
f67901840f79345b380f6b99909df061e91924c3
/man/Ypop.Rd
b700756d5577fb02d4e2550bf97f0c5a7303b0eb
[]
no_license
gpapadog/Interference
234aeffb0e1215d1d658b5fde5d5f63c80609a6c
06683213bc5282a64274978df4b476735c8802b3
refs/heads/master
2022-11-13T13:50:04.596318
2022-10-26T16:50:31
2022-10-26T16:50:33
100,611,280
9
2
null
2018-10-11T15:52:51
2017-08-17T14:18:24
R
UTF-8
R
false
true
1,336
rd
Ypop.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Ypop_function.R \name{Ypop} \alias{Ypop} \title{Estimates and asymptotic variance of the population average potential outcome for known or estimated propensity score model.} \usage{ Ypop( ygroup, ps = c("true", "estimated"), scores = NULL, dta = NULL, use = "everything" ) } \arguments{ \item{ygroup}{An array including the group average potential outcome estimates where the dimensions correspond to group, individual treatment and value of alpha.} \item{ps}{String. Can take values 'true', or 'estimated' for known or estimated propensity score. Defaults to 'true'.} \item{scores}{A matrix with rows corresponding to the parameters of the propensity score model and columns for groups. Includes the score of the propensity score evaluated for the variables of each group. Can be left NULL when ps is set to 'true'.} \item{dta}{The data set including the variable neigh. Defaults to NULL. Can be left NULL when the true propensity score is used.} \item{use}{Whether the data with missing values will be used for the estimation of the variance. Argument for cov() function. Defaults to 'everything'.} } \description{ Estimates and asymptotic variance of the population average potential outcome for known or estimated propensity score model. }
dcc1698ab95f9cea2f9269239ea6c21dbbdfb585
4160831a7bd3950b2356158285f68f9f66715d5a
/learning_R.r
cb1893edf33398679aad8ed8a79ca11b231b8b3f
[]
no_license
yash-17/mlr
c08e47f171874ee95519caf2dc11a09389f7d364
805b9f1f20bbe3f63a4fd34e52a679ff2af4c705
refs/heads/master
2021-01-01T03:34:38.904368
2016-04-28T18:48:45
2016-04-28T18:48:45
56,141,138
0
0
null
null
null
null
UTF-8
R
false
false
952
r
learning_R.r
#To run a file, cd to the directory and run: #Rscript <filename.r> # This symbol means comment # <- symbol is used for assignments #Two statements print() and cat() can be used to print, diff is that print() append "\n" a <- 42 A <- a * 2 # R is case sensitive print(a) cat(A, "\n") # "84" is concatenated with "\n" if(A>a) # true, 84 > 42 { cat(A, ">", a, "\n") } #Functions in R Square <- function(x) { return(x^2) } Square(4) print(Square(4)) print(Square(x=4)) # same thing countdown <- function(x){ while(x>0){ print(x) x <- x-1; Sys.sleep(1)#sleeps for some time } print(x) } countdown(6) #User_Input readInt <- function(){ n <- readline("Enter an integer: ") if(!grepl("^[0-9]+$",n)){ #check for integer return(readInt()) } return(as.integer(n)) } if(interactive()){ print(readInt()) #This doesn't run if session is not interactive. } #To make session interactive enter into R shell and write source("filename.r")
63be361ee618c624b7e040a01e16b4e5ca663637
ec3ce7311c62dc411f02e26f34d87ee9d90e4040
/utils/get_combined_linelist.R
e55219230504cf25c0416d536e2c6fb79b99ef45
[ "MIT" ]
permissive
irtyamine/CovidGlobalNow
c3f21799f5bd28ee00d79ef0d6d4b8bc04819fce
26fa769b1691a7b1705c4c80ab2bfe76f85d9cc0
refs/heads/master
2022-04-13T00:01:03.944158
2020-04-04T11:59:07
2020-04-04T11:59:07
null
0
0
null
null
null
null
UTF-8
R
false
false
886
r
get_combined_linelist.R
require(magrittr) #' Get a combined linelist based on multiple countries data get_combined_linelist <- function() { NCoVUtils::get_international_linelist("Germany") %>% dplyr::bind_rows(NCoVUtils::get_international_linelist("Italy")) %>% dplyr::bind_rows(NCoVUtils::get_international_linelist("France")) %>% dplyr::bind_rows(NCoVUtils::get_international_linelist("Spain")) %>% dplyr::bind_rows(NCoVUtils::get_international_linelist("Autria")) %>% dplyr::bind_rows(NCoVUtils::get_international_linelist("Netherlands")) %>% dplyr::bind_rows(NCoVUtils::get_international_linelist("Belgium")) %>% dplyr::bind_rows(NCoVUtils::get_international_linelist("United States")) %>% dplyr::bind_rows(NCoVUtils::get_international_linelist("Canada")) %>% dplyr::bind_rows(NCoVUtils::get_international_linelist("Australia")) %>% tidyr::drop_na(report_delay) }
40a7f13e7e56930e9fe50af46f05c9a3b1481629
c2165d9ec108bbb0d8fe2f291d135215d0792c22
/parse_brokers.R
6f5cd0072e27d2d7eff2ef6ff5cbcd316a278b9f
[ "Giftware" ]
permissive
mcarlo/MICSP
cd12e18c4672ccf21dc14a66fa7c14b864966f25
6d82394b70031ae6c87e9a7fec710b7d3df16fe0
refs/heads/master
2021-01-10T18:46:42.724698
2014-07-25T23:29:44
2014-07-25T23:29:44
null
0
0
null
null
null
null
UTF-8
R
false
false
2,722
r
parse_brokers.R
# This script reads a file listing mutual funds' brokerage platforms and creates # a data-base like file, appropriate for querying or using in an Excel pivot # table oldwd <- getwd() setwd("~/GSpending/platform_tickers") # read MStar file. Column names are tickers. Row 1 cells are fund names. # all other rows are Broker names carrying that fund funds_platforms <- read.csv("~/GSpending/platform_tickers/funds_platforms.csv") tickers <- colnames(funds_platforms) funds <- t(funds_platforms[1, ]) fundTable <- cbind(tickers, funds) numFunds <- length(tickers) # Replace slash characters with dashes reformatted <- unlist(lapply(funds_platforms[-1, ], function(x) gsub("/", "-", x))) # Begin "transposing" matrix, with brokers as row names and tickers for columns numRows <- length(funds_platforms[,1]) - 1 platforms <- matrix(reformatted, nrow = numRows, ncol = numFunds) system.time(brokers <- unique(as.vector(platforms))) brokerList <- sort(unique(brokers)) # find all unique broker names brokerList <- brokerList[2:length(brokerList)] # remove blank numBrokers <- length(brokerList) # set up matrix for cross referencing brokers and funds brokerMatrix <- matrix(rep(0, numFunds * numBrokers), nrow = numBrokers, ncol = numFunds) rownames(brokerMatrix) <- brokerList colnames(brokerMatrix) <- tickers # Turn 0s into 1s whereever a ticker is on that broker's platform for (j in 1:numFunds){ ticker <- tickers[j] brokerMatrix[ , j] <- 1 * (rownames(brokerMatrix) %in% platforms[, j]) } write_tickers <- function(broker){ # select only tickers on that broker's platform (cell == 1) tickerList <- colnames(brokerMatrix)[brokerMatrix[broker,] == 1] fundList <- as.vector(fundTable[fundTable[, 1] %in% tickerList, 2]) numTickers <- length(tickerList) # want to write a "list file," consisting of "Name"[tab]"Ticker" unsortedList <- matrix(cbind(fundList, tickerList), nrow = numTickers, ncol = 2) listFile <- matrix(unsortedList[order(unsortedList[,2]), ], nrow = numTickers, ncol = 2) # name the output file "<broker>_list.txt" nextFile <- paste(broker, "_list.txt", sep = "") write.table(listFile, file = nextFile, sep = "\t", row.names = F, col.names=F, quote = F) } fileSeq <- 1:numBrokers genFiles <- function(){ for (i in fileSeq){ nextBroker <- brokerList[i] write_tickers(nextBroker) } } system.time(genFiles()) # takes under a second setwd(oldwd) #brokerMatrix[16:20, 1:10] # Citi #19 #brokerMatrix[150:153,1] # SunAmerica #153 #rownames(brokerMatrix)[154:180] # WFA 176:179 #trickyRows <- c(19, 153, 176:179) #rownames(brokerMatrix)[trickyRows]
b39fdfe3831bd4ba196ff01ac39d620821191e16
4b48647555feaac4cbb9bb4864db20e6e40a8980
/R/geom-sidehistogram.r
d2f0607eb1639e48ab8e36e53fcd000fe3ed90d1
[ "MIT" ]
permissive
seifudd/ggside
8d9fdca5b042f9528c5dc4ef5ce0d7f64537f730
442c83db4cca57bc9cc962be563fbd7df0463d86
refs/heads/master
2023-07-12T20:29:20.936007
2021-08-16T19:30:55
2021-08-16T19:30:55
null
0
0
null
null
null
null
UTF-8
R
false
false
2,863
r
geom-sidehistogram.r
#' Side Histograms #' #' The [xside] and [yside] variants of \link[ggplot2]{geom_histogram} is #' [geom_xsidehistogram] and [geom_ysidehistogram]. These variants both inherit #' from \link[ggplot2]{geom_histogram} and only differ on where they plot #' data relative to main panels. #' #' @section Aesthetics: #' `geom_*sidehistogram` uses the same aesthetics as [geom_*sidebar()] #' #' @inheritParams ggplot2::geom_histogram #' #' @aliases geom_*sidehistogram #' @return XLayer or YLayer object to be added to a ggplot object #' @examples #' #' p <-ggplot(iris, aes(Sepal.Width, Sepal.Length, color = Species, fill = Species)) + #' geom_point() #' #' #sidehistogram #' p + #' geom_xsidehistogram(binwidth = 0.1) + #' geom_ysidehistogram(binwidth = 0.1) #' p + #' geom_xsidehistogram(aes(y = after_stat(density)), binwidth = 0.1) + #' geom_ysidehistogram(aes(x = after_stat(density)), binwidth = 0.1) #' @export geom_xsidehistogram <- function(mapping = NULL, data = NULL, stat = "bin", position = "stack", ..., binwidth = NULL, bins = NULL, na.rm = FALSE, orientation = "x", show.legend = NA, inherit.aes = TRUE) { mapping <- default_stat_aes(mapping, stat, orientation) l <- layer( data = data, mapping = mapping, stat = stat, geom = GeomXsidebar, position = position, show.legend = show.legend, inherit.aes = inherit.aes, params = list( binwidth = binwidth, bins = bins, na.rm = na.rm, orientation = orientation, pad = FALSE, ... ), layer_class = XLayer ) structure(l, class = c("ggside_layer",class(l))) } #' @rdname geom_xsidehistogram #' @aliases geom_ysidehistogram #' @export geom_ysidehistogram <- function(mapping = NULL, data = NULL, stat = "bin", position = "stack", ..., binwidth = NULL, bins = NULL, na.rm = FALSE, orientation = "y", show.legend = NA, inherit.aes = TRUE) { mapping <- default_stat_aes(mapping, stat, orientation) l <- layer( data = data, mapping = mapping, stat = stat, geom = GeomYsidebar, position = position, show.legend = show.legend, inherit.aes = inherit.aes, params = list( binwidth = binwidth, bins = bins, na.rm = na.rm, orientation = orientation, pad = FALSE, ... ), layer_class = YLayer ) structure(l, class = c("ggside_layer",class(l))) }
3f10c0aad3a52612c2a156522855f3fb36c7a75d
cad42fa7ce02225eb60eceff6042e9afc56f0fae
/confidence_intervals/outbred.pruned_JZ.R
d86150fa5a92656b60d18e74e68abaeb22108ca6
[]
no_license
jzou1115/MegaAnalysis_CFW_Code
dd27c111206c7fd92666ab274f1471316374ff45
972fb6bcb98389df8cb39286b65319a1be1bc01e
refs/heads/master
2021-01-02T12:00:01.353300
2020-09-01T23:52:26
2020-09-01T23:52:26
239,609,518
0
0
null
null
null
null
UTF-8
R
false
false
4,345
r
outbred.pruned_JZ.R
#JZ: #Input: #1. chr- chromosome of qtl performing simulation for (lead snp?) #2. bp - position of qtl performing simulation for (lead snp?) #3. mm - ? #4. dosages - only ever use dosages$dosages, which is probably a nxm matrix, where n is number of samples and m is number of snps #5. n.sim - number of simulations to perform for qtl #6. window - largest distance to consider for locus #7. out.dir - output directory #8. mc.cores - number of cores to use? Doesn't actually seem to be used in this function simulate.qtl.confidence.interval <- function( chr, bp, mm, dosages, n.sim=1000, window=5.0e6, out.dir="./", mc.cores=48 ) { did = dosages$ids map = dosages$map #JZ: What are the adjustments for? if ( mm$per.chromosome ) { # use chromosome-specific adjustments mm.use = mm$mm.chr[[chr]] cat("Using per.chromosome\n") } else { mm.use = mm$mm.all # use a common adjustment } #JZ: subset SNPs for some reason. perhaps to intersect snps between mm.use and dosages$ids. use = match( mm.use$ids, did, nomatch=0) D = dosages$dosages D = D[,use] #JZ: find SNPs within the window (default = 5MB) and subset the dosages and map files snps.subset2 = which( abs(map$bp -bp) < window ) # outer subset defining the search window D.qtl = D[snps.subset2,] map.qtl = map[snps.subset2,] bp.idx = which( map.qtl$bp == bp ) if ( length(bp.idx) == 0 ) { warning("ERROR coordinate" ,chr, bp , "not found\n") return(NULL); } #JZ: n is number of SNPs within the window n = nrow(D.qtl) #JZ: what is the point of this? compute the association statistic? What is the multiplier? if ( mm$mixed.model == TRUE ) { D.qtl = t(mm.use$multiplier) %*% t(D.qtl) # dimension nsub (rows) * nsnps (cols) r = cor( D.qtl, mm.use$y ) lm.snp = lm( mm.use$y ~ D.qtl[,bp.idx],y=TRUE) } else { D.qtl = t(D.qtl) r = cor( D.qtl, mm.use$y.original ) lm.snp = lm( mm.use$y.original ~ D.qtl[,bp.idx], y=TRUE) } r2 = r*r r2 = ifelse(is.na(r2), 0, r2) #JZ: why would this be NA? phen.r2 = r2[bp.idx] phen = lm.snp$y phen.resid = resid(lm.snp) #JZ:empirical residuals of association statistic #JZ: find all snps that are within window/2 and correlated with phenotype. Why window/2 when line 31 uses window? snps.subset1 = which( abs(map.qtl$bp -bp) < window/2 & r2>0 ) #JZ: Select 1000 random snps to be causal and perform simulations with. snps.sample = sort(sample(snps.subset1, n.sim, replace=TRUE)) # the snp sites to simulate from #JZ: compute summary statistics for 1000 snps var.snp = apply( D.qtl, 2, var, na.rm=TRUE ) # variability of the dosages at each snp snps.beta = r[bp.idx]*sqrt(var(phen))/sqrt(var.snp) snps.sample = c( bp.idx, snps.sample ) #JZ: apply simulation framework to 1000 snps. Assume that snp is causal; sims = mclapply(snps.sample, function( snp, snps.beta, var.snp, phen.resid, D.qtl, map.qtl, target.r2 ) { # y = sample(phen.resid) + snps.beta[snp]*D.qtl[snp,] #JZ: simulate phenotype assuming that SNP is causal. randomly assign empirical residuals to different samples. (doesn't seem equivalent to bootstrapping samples?) y = sample(phen.resid) + snps.beta[snp]*D.qtl[,snp] #JZ: compute association statistics using simulated phenotype nsub = length(y) r = cor( D.qtl, y ) r2 = r*r r2 = ifelse(is.na(r2), 0, r2) F = (r2*(nsub-2))/(1-r2+1.0e-20) logP = -pf(F, 1, nsub-2, lower.tail = FALSE, log.p = TRUE)/log(10) logP[snp] = -logP[snp] # exclude the causal snp #JZ: find SNP (not including causal snp) with the most significant association statistic w.max = which.max( logP ) #JZ: find bp location of most significant association statistic in simulations bp.max = map.qtl$bp[w.max] #JZ: compute difference distance between causal snp and the most significant association statistic in simulations bp.delta = bp.max-map.qtl$bp[snp] return( c(snp, map.qtl$bp[snp], logP[snp], map.qtl$bp[w.max], logP[w.max], bp.delta, snps.beta[snp], var.snp[snp] )) }, snps.beta, var.snp, phen.resid, D.qtl, map.qtl, phen.r2, mc.cores=mc.cores ) sims = do.call( "rbind", sims ) colnames(sims) = c("snp", "bp", "snp.logP", "max.bp", "max.logP", "bp.delta", "snp.beta", "snp.var") return(t(sims)) }
9a320b69f9345ad728d986b2f60ce6880897965b
146fb76ac1029e350e44775c95026c5ef13fb39a
/project v1.R
90b0f2984539e7c5671bb7a0e67557905923dc9e
[]
no_license
lwang130/Profitable-Loan-Prediction-using-statistical-method-and-machine-learning
c667ba61cd1bd5978766561af6f33d094436c101
29af7b898440331eaf7c4985d0566f52e6772e0d
refs/heads/main
2023-03-31T07:22:15.798566
2021-04-08T03:09:24
2021-04-08T03:09:24
355,748,166
0
0
null
null
null
null
UTF-8
R
false
false
18,399
r
project v1.R
#################################################Pre-processing the Data########################################## # Load packages as needed library(tidyverse) library(randomForest) library(rpart.plot) library(boot) library(glmnet) library(boot) library(gbm) library(rpart) library(chron) library(knitr) # Import the data loan_data <- read_csv("application2019.csv", col_types = cols()) # Explore the data head(loan_data) names(loan_data) # check the variable names sum(duplicated(select(loan_data, id))) == 0 # check if there are duplicate loans nrow(loan_data)# check the number of loans # Generate variables #loan_data <- mutate(loan_data, dti = if_else(W2inc_m1 == 0 | debt == 0, 0, debt/W2inc_m1)) # Add variable: dti (debt to income ratio) #loan_data <- mutate(loan_data, profit_amt = amt_paid - loan_amt) # Add variable: profit_amt #loan_data <- mutate(loan_data, profit_binary = as.numeric(amt_paid - loan_amt > 0))# Add variable: profit_binary # Clean data with no missing values in any predictors loan_data$statecode <- factor(loan_data$statecode) loan_data <- loan_data %>% select(-id, -name, -SSN, -date) %>% filter(complete.cases(.)) # Split in the training and testing data sets set.seed(1) train_data <- sample_frac(loan_data, 0.8) test_data <- setdiff(loan_data, train_data) # Create the formula f <- formula(amt_paid ~ . ) ###############################Applying Predictive Models - linear regression, Lasso, and Decision Trees############################## # Linear regression and Lasso # Multiple linear regression lm <- glm(f, data = train_data) mse_lm <- mean((predict(lm, newdata = test_data) - test_data$amt_paid)^2) # LASSO regression lasso <- cv.glmnet(x = model.matrix(f, data = train_data), y = train_data$amt_paid) mse_lasso <- mean((predict(lasso, newx = model.matrix(f, data = test_data), s = min(lasso$lambda)) - test_data$amt_paid)^2) # Lasso model, using the value of lambda that gives minimum mean cross-validated error # Trees # Decision Tree tree <- rpart(f, data = train_data, cp = 0.001, parms = list(loss = matrix(c(0, 10, 1, 0), ncol = 2))) mse_tree <- mean((predict(tree, newdata = test_data) - test_data$amt_paid)^2) prp(tree, extra = 1, box.palette = "auto") ########################Improving the Model - Cross Validation and Variable Selection#################################### # Variable Selection #Variable Selection - Forward Stepwise Selection # Function to calculate CV MSE for any formula f # Default for cv.glm: K=n (LOOCV) cv_fun <- function(f) { glmfit <- glm(f, data = loan_data) cv.glm(data = loan_data, glmfit)$delta[1] } #k = 0 f1_1 <- formula(amt_paid ~ creditscore) f1_2 <- formula(amt_paid ~ W2inc_m1) f1_3 <- formula(amt_paid ~ loan_amt) f1_4 <- formula(amt_paid ~ W2inc_m2) f1_5 <- formula(amt_paid ~ asset) f1_6 <- formula(amt_paid ~ statecode) f1_7 <- formula(amt_paid ~ avg_homeprice) f1_8 <- formula(amt_paid ~ age) f1_9 <- formula(amt_paid ~ unemprate) f1_10 <- formula(amt_paid ~ educ) f1_11 <- formula(amt_paid ~ debt) f1_12 <- formula(amt_paid ~ taxdependent) f1_13 <- formula(amt_paid ~ married) f1_14 <- formula(amt_paid ~ amt_due) formulas1 <- list(f1_1, f1_2, f1_3, f1_4, f1_5, f1_6, f1_7, f1_8, f1_9, f1_10, f1_11, f1_12, f1_13, f1_14) formulas1_cv <- vector("numeric", length(formulas1)) for (i in 1:length(formulas1)) { formulas1_cv[[i]] <- cv_fun(formulas1[[i]]) } M1 <- formulas1[[which.min(formulas1_cv)]] M1 #k = 1 f2_1 <- formula(amt_paid ~ W2inc_m1 + creditscore) f2_2 <- formula(amt_paid ~ W2inc_m1 + loan_amt) f2_3 <- formula(amt_paid ~ W2inc_m1 + W2inc_m2) f2_4 <- formula(amt_paid ~ W2inc_m1 + asset) f2_5 <- formula(amt_paid ~ W2inc_m1 + statecode) f2_6 <- formula(amt_paid ~ W2inc_m1 + avg_homeprice) f2_7 <- formula(amt_paid ~ W2inc_m1 + age) f2_8 <- formula(amt_paid ~ W2inc_m1 + unemprate) f2_9 <- formula(amt_paid ~ W2inc_m1 + educ) f2_10 <- formula(amt_paid ~ W2inc_m1 + debt) f2_11 <- formula(amt_paid ~ W2inc_m1 + taxdependent) f2_12 <- formula(amt_paid ~ W2inc_m1 + married) f2_13 <- formula(amt_paid ~ W2inc_m1 + amt_due) formulas2 <- list(f2_1, f2_2, f2_3, f2_4, f2_5, f2_6, f2_7, f2_8, f2_9, f2_10, f2_11, f2_12, f2_13) formulas2_cv <- vector("numeric", length(formulas2)) for (i in 1:length(formulas2)) { formulas2_cv[[i]] <- cv_fun(formulas2[[i]]) } M2 <- formulas2[[which.min(formulas2_cv)]] M2 #k = 2 f3_1 <- formula(amt_paid ~ W2inc_m1 + statecode + creditscore) f3_2 <- formula(amt_paid ~ W2inc_m1 + statecode + loan_amt) f3_3 <- formula(amt_paid ~ W2inc_m1 + statecode + W2inc_m2) f3_4 <- formula(amt_paid ~ W2inc_m1 + statecode + asset) f3_5 <- formula(amt_paid ~ W2inc_m1 + statecode + amt_due) f3_6 <- formula(amt_paid ~ W2inc_m1 + statecode + avg_homeprice) f3_7 <- formula(amt_paid ~ W2inc_m1 + statecode + age) f3_8 <- formula(amt_paid ~ W2inc_m1 + statecode + unemprate) f3_9 <- formula(amt_paid ~ W2inc_m1 + statecode + educ) f3_10 <- formula(amt_paid ~ W2inc_m1 + statecode + debt) f3_11 <- formula(amt_paid ~ W2inc_m1+ statecode + taxdependent) f3_12 <- formula(amt_paid ~ W2inc_m1 + statecode + married) formulas3 <- list(f3_1, f3_2, f3_3, f3_4, f3_5, f3_6, f3_7, f3_8, f3_9, f3_10, f3_11, f3_12) formulas3_cv <- vector("numeric", length(formulas3)) for (i in 1:length(formulas3)) { formulas3_cv[[i]] <- cv_fun(formulas3[[i]]) } M3 <- formulas3[[which.min(formulas3_cv)]] M3 #k = 3 f4_1 <- formula(amt_paid ~ W2inc_m1 + age + statecode + creditscore) f4_2 <- formula(amt_paid ~ W2inc_m1 + age + statecode + loan_amt) f4_3 <- formula(amt_paid ~ W2inc_m1 + age + statecode + W2inc_m2) f4_4 <- formula(amt_paid ~ W2inc_m1 + age + statecode + asset) f4_5 <- formula(amt_paid ~ W2inc_m1 + age + statecode + educ) f4_6 <- formula(amt_paid ~ W2inc_m1 + age + statecode + avg_homeprice) f4_7 <- formula(amt_paid ~ W2inc_m1 + age + statecode + amt_due) f4_8 <- formula(amt_paid ~ W2inc_m1 + age + statecode + unemprate) f4_9 <- formula(amt_paid ~ W2inc_m1 + age + statecode + married) f4_10 <- formula(amt_paid ~ W2inc_m1 + age + statecode + debt) f4_11 <- formula(amt_paid ~ W2inc_m1 + age + statecode + taxdependent) formulas4 <- list(f4_1, f4_2, f4_3, f4_4, f4_5, f4_6, f4_7, f4_8, f4_9, f4_10, f4_11) formulas4_cv <- vector("numeric", length(formulas4)) for (i in 1:length(formulas4)) { formulas4_cv[[i]] <- cv_fun(formulas4[[i]]) } M4 <- formulas4[[which.min(formulas4_cv)]] M4 #k = 4 f5_1 <- formula(amt_paid ~ W2inc_m1 + creditscore + statecode + age + amt_due) f5_2 <- formula(amt_paid ~ W2inc_m1 + creditscore + statecode + age + loan_amt) f5_3 <- formula(amt_paid ~ W2inc_m1 + creditscore + statecode + age + W2inc_m2) f5_4 <- formula(amt_paid ~ W2inc_m1 + creditscore + statecode + age + asset) f5_5 <- formula(amt_paid ~ W2inc_m1 + creditscore + statecode + age + educ) f5_6 <- formula(amt_paid ~ W2inc_m1 + creditscore + statecode + age + avg_homeprice) f5_7 <- formula(amt_paid ~ W2inc_m1 + creditscore + statecode + age + married) f5_8 <- formula(amt_paid ~ W2inc_m1 + creditscore + statecode + age + unemprate) f5_9 <- formula(amt_paid ~ W2inc_m1 + creditscore + statecode + age + taxdependent) f5_10 <- formula(amt_paid ~ W2inc_m1 + creditscore + statecode + age + debt) formulas5 <- list(f5_1, f5_2, f5_3, f5_4, f5_5, f5_6, f5_7, f5_8, f5_9, f5_10) formulas5_cv <- vector("numeric", length(formulas5)) for (i in 1:length(formulas5)) { formulas5_cv[[i]] <- cv_fun(formulas5[[i]]) } M5 <- formulas5[[which.min(formulas5_cv)]] M5 #k = 5 f6_1 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + taxdependent) f6_2 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + loan_amt) f6_3 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + W2inc_m2) f6_4 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + asset) f6_5 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + educ) f6_6 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + avg_homeprice) f6_7 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + married) f6_8 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate) f6_9 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + debt) formulas6 <- list(f6_1, f6_2, f6_3, f6_4, f6_5, f6_6, f6_7, f6_8, f6_9) formulas6_cv <- vector("numeric", length(formulas6)) for (i in 1:length(formulas6)) { formulas6_cv[[i]] <- cv_fun(formulas6[[i]]) } M6 <- formulas6[[which.min(formulas6_cv)]] M6 #k = 6 f7_1 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent) f7_2 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + loan_amt) f7_3 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + W2inc_m2) f7_4 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + asset) f7_5 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + educ) f7_6 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + avg_homeprice) f7_7 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + married) f7_8 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + debt) formulas7 <- list(f7_1, f7_2, f7_3, f7_4, f7_5, f7_6, f7_7, f7_8) formulas7_cv <- vector("numeric", length(formulas7)) for (i in 1:length(formulas7)) { formulas7_cv[[i]] <- cv_fun(formulas7[[i]]) } M7 <- formulas7[[which.min(formulas7_cv)]] M7 #k = 7 f8_1 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + debt) f8_2 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + loan_amt) f8_3 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + W2inc_m2) f8_4 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + asset) f8_5 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + educ) f8_6 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + avg_homeprice) f8_7 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married) formulas8 <- list(f8_1, f8_2, f8_3, f8_4, f8_5, f8_6, f8_7) formulas8_cv <- vector("numeric", length(formulas8)) for (i in 1:length(formulas8)) { formulas8_cv[[i]] <- cv_fun(formulas8[[i]]) } M8 <- formulas8[[which.min(formulas8_cv)]] M8 #k = 8 f9_1 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + debt) f9_2 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + loan_amt) f9_3 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + W2inc_m2) f9_4 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + asset) f9_5 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + educ) f9_6 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + avg_homeprice) formulas9 <- list(f9_1, f9_2, f9_3, f9_4, f9_5, f9_6) formulas9_cv <- vector("numeric", length(formulas9)) for (i in 1:length(formulas9)) { formulas9_cv[[i]] <- cv_fun(formulas9[[i]]) } M9 <- formulas9[[which.min(formulas9_cv)]] M9 #k = 9 f10_1 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + W2inc_m2 + avg_homeprice) f10_2 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + W2inc_m2 + loan_amt) f10_3 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + W2inc_m2 + asset) f10_4 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + W2inc_m2 + debt) f10_5 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + W2inc_m2 + educ) formulas10 <- list(f10_1, f10_2, f10_3, f10_4, f10_5) formulas10_cv <- vector("numeric", length(formulas10)) for (i in 1:length(formulas10)) { formulas10_cv[[i]] <- cv_fun(formulas10[[i]]) } M10 <- formulas10[[which.min(formulas10_cv)]] M10 #k = 10 f11_1 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + educ + W2inc_m2 + asset) f11_2 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + educ + W2inc_m2 + loan_amt) f11_3 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + educ + W2inc_m2 + avg_homeprice) f11_4 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + educ + W2inc_m2 + debt) formulas11 <- list(f11_1, f11_2, f11_3, f11_4) formulas11_cv <- vector("numeric", length(formulas11)) for (i in 1:length(formulas11)) { formulas11_cv[[i]] <- cv_fun(formulas11[[i]]) } M11 <- formulas11[[which.min(formulas11_cv)]] M11 #k = 11 f12_1 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + educ + W2inc_m2 + asset + debt) f12_2 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + educ + W2inc_m2 + asset + loan_amt) f12_3 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + educ + W2inc_m2 + asset + avg_homeprice) formulas12 <- list(f12_1, f12_2, f12_3) formulas12_cv <- vector("numeric", length(formulas12)) for (i in 1:length(formulas12)) { formulas12_cv[[i]] <- cv_fun(formulas12[[i]]) } M12 <- formulas12[[which.min(formulas12_cv)]] M12 #k = 12 f13_1 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + asset + W2inc_m2 + loan_amt + educ + avg_homeprice) f13_2 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + asset + W2inc_m2 + loan_amt + educ + debt) formulas13 <- list(f13_1, f13_2) formulas13_cv <- vector("numeric", length(formulas13)) for (i in 1:length(formulas13)) { formulas13_cv[[i]] <- cv_fun(formulas13[[i]]) } M13 <- formulas13[[which.min(formulas13_cv)]] M13 #k = 13 f14_1 <- formula(amt_paid ~ W2inc_m1 + amt_due + statecode + age + creditscore + unemprate + taxdependent + married + asset + W2inc_m2 + loan_amt + debt + educ + avg_homeprice) formulas14 <- list(f14_1) formulas14_cv <- vector("numeric", length(formulas14)) for (i in 1:length(formulas14)) { formulas14_cv[[i]] <- cv_fun(formulas14[[i]]) } M14 <- formulas14[[which.min(formulas14_cv)]] M14 #Select a single best model from among M0,...,Mp using cross validated prediction error, Cp (AIC), BIC, or adjusted R2. formulas <- list(M1, M2, M3, M4, M5, M6, M7, M8, M9, M10, M11, M12, M13, M14) formulas_cv <- vector("numeric", length(formulas)) for (i in 1:length(formulas)) { formulas_cv[[i]] <- cv_fun(formulas[[i]]) } f2 <- formulas[[which.min(formulas_cv)]] f2 ####################################Improved linear regression and Lasso################################################ # Multiple linear regression lm2 <- glm(f2, data = train_data) mse_lm2 <- mean((predict(lm2, newdata = test_data) - test_data$amt_paid)^2) # LASSO regression lasso2 <- cv.glmnet(x = model.matrix(f2, data = train_data), y = train_data$amt_paid) mse_lasso2 <- mean((predict(lasso2, newx = model.matrix(f2, data = test_data), s = min(lasso$lambda)) - test_data$amt_paid)^2) # Lasso model, using the value of lambda that gives minimum mean cross-validated error ###################################Imrove Decision Trees###################################################### # Bagging bag <- randomForest(f, data = train_data, ntree = 1000, mtry = 14, importance = TRUE) mse_bag <- mean((predict(bag, test_data) - test_data$amt_paid)^2) varImpPlot(bag) # Random Forest rf <- randomForest(f, data = train_data, ntree = 1000, mtry = 2, importance = TRUE) mse_rf <- mean((predict(rf, test_data) - test_data$amt_paid)^2) varImpPlot(rf) # Boosting boost <- gbm(f, data = train_data, distribution = "gaussian", n.trees = 100, interaction.depth = 20) mse_boost <- mean((predict(boost, test_data) - test_data$amt_paid)^2) summary(boost) ####################################Conclusions######################################## # Results of MSE in table MSE = c(mse_lm, mse_lasso, mse_lm2, mse_lasso2, mse_tree, mse_bag, mse_rf, mse_boost) mymatrix <- matrix(MSE, nrow = 8, ncol = 1, byrow = FALSE) row.names(mymatrix) <- c("Linear Regression", "Linear Regression (new)", "LASSO","LASSO (new)", "Decision tree", "Bagging", "Random forest", "Boosting") kable(mymatrix, row.names = TRUE, col.names = c("MSE")) # Apply new Lasso Regression to 2020 data, calculate the Total Profit for 2020 loans loan_data_2020 <- read_csv("application2020.csv", col_types = cols()) loan_data_2020 <- mutate(loan_data_2020, amt_paid = rep(0, nrow(loan_data_2020))) loan_data_2020 <- mutate(loan_data_2020, predicted_amt_paid = predict(lasso2, newx = model.matrix(f2, data = loan_data_2020), s = min(lasso2$lambda)), predicted_profit = predicted_amt_paid - loan_amt, profit = amt_due - loan_amt) # Export the final approval applicants file loan_data_2020 <- mutate(loan_data_2020, approve = as.numeric(predicted_profit > 0)) #%>% select(id, name, approve) predicted_profit <- sum(loan_data_2020[which(loan_data_2020$approve == 1),]$profit) predicted_profit loan_data_2020 <- select(loan_data_2020, id, name, approve) write.csv(loan_data_2020, file = "Project_Result.csv", row.names = FALSE)
16a64d7c95cc4073134084e7c9c1f8ade4a50cda
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/rgr/examples/map.eda7.Rd.R
777e0db38f3b42855c6e6fdb7bf6e40e89911a7c
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
963
r
map.eda7.Rd.R
library(rgr) ### Name: map.eda7 ### Title: Plot a Symbol Map of Data Based on the Tukey Boxplot ### Aliases: map.eda7 ### Keywords: hplot ### ** Examples ## Make test data available data(kola.o) attach(kola.o) ## Plot a default symbol map map.eda7(UTME, UTMN, Cu) ## Plot with logarithmically scaled boxplot fences and more ## appropriate axis labelling map.eda7(UTME/1000, UTMN/1000, Cu, logz = TRUE, xlab = "Kola Project UTM Eastings (km)", ylab = "Kola Project UTM Northings (km)") ## Plot a grey-scale equivalent of the above map map.eda7(UTME/1000, UTMN/1000, Cu, logz = TRUE, ifgrey = TRUE, xlab = "Kola Project UTM Eastings (km)", ylab = "Kola Project UTM Northings (km)") ## Plot the same map with an alternate colour scheme map.eda7(UTME/1000, UTMN/1000, Cu, logz = TRUE, xlab = "Kola Project UTM Eastings (km)", ylab = "Kola Project UTM Northings (km)", symcolr = c(27, 24, 22, 12, 5, 3, 36)) ## Detach test data detach(kola.o)
c08dffd48d9536bbab51e57d91759970eb39e262
767da5bac2d9433f88074f91a44f516352bd5224
/reactome_analysis_worker/reactome_analysis_worker/resources/r_code/padog_analyser.R
0c9e379a0eac50ceb16a110bf0c53de45b31c1f5
[]
no_license
reactome/gsa-backend
dc4447f2dbb892bb6172dbd09fb5540868781962
281bb6ef6efa0153babe0399d5bbe846ed576c7f
refs/heads/master
2023-08-31T00:08:27.892065
2023-06-04T12:23:38
2023-06-04T12:23:38
198,663,462
3
1
null
2023-08-21T19:38:03
2019-07-24T15:34:36
Python
UTF-8
R
false
false
6,264
r
padog_analyser.R
#' Function to convert (for discrete values) and normalise (if set) the data. #' #' This function evaluates two global values as parameters: \code{edger.norm.function} #' and \code{continuous.norm.function} which control the normalisation methods used #' for discrete and continuous data respectively. #' #' @param expression.data data.frame containing the expression values #' @param sample.data data.frame with all sample annotations (one sample per row) #' @param design model.matrix specifying the experimental design #' @param data.type type of data submitted (ie. rnaseq, proteomics-sc, proteomics-int) prepareData <- function(expression.data, sample.data, design, data.type) { if (data.type %in% c("rnaseq_counts", "proteomics_sc")) { expression.data <- DGEList(counts=expression.data, samples = sample.data, group = sample.data$analysis_group) # normalize using edgeR's function expression.data <- calcNormFactors(expression.data, method = edger.norm.function) expression.data <- voom(expression.data, design=design, plot = FALSE) } else { if (continuous.norm.function == "none") { warning("Not performing normalization for proteomics int data") expression.data <- new("EList", list(E = expression.data, targets = sample.data)) } else { # create the EList object expression.data <- new("EListRaw", list(E = expression.data, targets = sample.data)) # normalize if set (may be set to "none") expression.data <- normalizeBetweenArrays(expression.data, method = continuous.norm.function) } } return(expression.data) } #' "Public" function called by the analyzer to load the required libraries. #' This is mainly put in a separate function to be able to quicky see if #' required libraries are not available on the system. #' load_libraries <- function() { suppressPackageStartupMessages(library(PADOG)) suppressPackageStartupMessages(library(edgeR)) suppressPackageStartupMessages(library(limma)) } #' Main function to process the dataset #' #' @param expression.data data.frame containing the expression values #' @param sample.data data.frame with all sample annotations (one sample per row) #' @param design model.matrix specifying the experimental design #' @param gene.indices a named list with each gene set as entry (and its id as name) and the index of the member #' genes based on the expression.data data.frame as values #' @param data.type type of data submitted (ie. rnaseq, proteomics-sc, proteomics-int) #' @param analysis.group.1 name of the first coefficient to test based on the experimental design. Must correspond #' to a colname in the \code{design} #' @param analysis.group.2 name of the second coefficient to test based on the experimental design process <- function(expression.data, sample.data, design, gene.indices, data.type, analysis.group.1, analysis.group.2) { # prepare the data expression.data <- prepareData(expression.data, sample.data, design, data.type) # padog requires the samples in a "control" and a "disease" group padog_group <- rep(NA, ncol(expression.data)) padog_group[design[, analysis.group.1] == 1] <- "c" padog_group[design[, analysis.group.2] == 1] <- "d" # remove all samples that are not in the treatment groups samples_to_keep <- padog_group %in% c("c", "d") expression.data <- expression.data[, samples_to_keep] padog_group <- padog_group[samples_to_keep] sample.data <- sample.data[samples_to_keep, ] # convert the gene.indices back to the identifiers gene_identifier_set <- lapply(gene.indices, function(gene_ids) { rownames(expression.data)[gene_ids] }) # get the sample group if set is_paired <- FALSE sample_block <- NULL if (nchar(sample.groups) > 0) { if (!sample.groups %in% colnames(sample.data)) { stop("Error: Failed to find defined sample.groups '", sample.groups, "' in the sample metadata. ", "In the ReactomeGSA R package, this must also be specified as an 'additional_factor'") } is_paired <- TRUE sample_block <- sample.data[, sample.groups] } # check padog's requirements and create nice error messages found_genes <- length(unique(as.numeric(unlist(gene.indices)))) if (found_genes <= 10) { stop("Error: PADOG requires more than 10 proteins/genes to be found in the gene sets. Only ", found_genes, " identifiers be mapped to Reactome's pathways") } # there must be at least 6 samples if (any(table(padog_group) < 3)) { stop("Error: PADOG requires at least 3 samples per group.") } # run PADOG padog_result <- padog( esetm = as.matrix(expression.data$E), group = padog_group, paired = is_paired, block = sample_block, gslist = gene_identifier_set, NI = 1000, # number of iterations plots = FALSE, Nmin = 0) # minimum gene set size # convert to the required result output colnames(padog_result) <- plyr::mapvalues( from = c("ID", "Size", "meanAbsT0", "padog0", "PmeanAbsT", "Ppadog"), to = c("Pathway", "NGenes", "MeanAbsT0", "MeanWeightT0", "PValue", "FDR"), x = colnames(padog_result)) return(padog_result[, c("Pathway", "FDR", "PValue", "NGenes", "MeanAbsT0", "MeanWeightT0")]) } get_gene_fc <- function(expression.data, sample.data, design, data.type, analysis.group.1, analysis.group.2) { # prepare the data expression.data <- prepareData(expression.data, sample.data, design, data.type) # create the contrast vector contrasts <- rep(0, ncol(design)) contrasts[which(colnames(design) == analysis.group.1)] <- -1 contrasts[which(colnames(design) == analysis.group.2)] <- 1 # create the fit fit <- lmFit(expression.data, design) cont.fit <- contrasts.fit(fit, contrasts) fit1 <- eBayes(cont.fit) result <- na.omit(topTable(fit1, adjust="fdr", number="all")) result$Identifier <- rownames(result) # move "Identifier" as first column col_order <- c("Identifier", colnames(result)[1:ncol(result)-1]) return(result[, col_order]) }
62df2b43f582e8210a0aeb325273ab2ed9fb6ef6
09f9794d4cdce27142bc3d248bc9872ee2c7f650
/Parciales/PPunto2.R
5aa87ac52e46ac193395eb967c833e34f4f4a53e
[]
no_license
tsrf195/Analisis_Numerico
de2f132101df28434df4527375d32cd127b38aa9
52847e78a9294a48dcc8b92692b20455f7fa4613
refs/heads/main
2023-04-24T17:32:56.122033
2021-05-13T05:49:57
2021-05-13T05:49:57
335,025,792
0
0
null
null
null
null
UTF-8
R
false
false
328
r
PPunto2.R
i=1 n=100 tolerancia=10^-9 f <- function(x){ x^2-cos(x)-1 } plot(f,from=1,to=2) VI=f(1) VIA=f(0) while(i<=n){ VS = VI - (f(VI)*(VI-VIA)/(f(VI)-f(VIA))) if(abs(VS-VI)/abs(VS) < tolerancia){ cat("Iteracion =",i,"Raiz =",VS*-1,"Tolerancia =",tolerancia,"\n") break } i=i+1 VI = VS }
0813650580de6b5cb14310b245eeed2a09c7cffc
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
/codeml_files/newick_trees_processed/5927_8/rinput.R
663afb2284c26577ac90d682014499ace88957fd
[]
no_license
DaniBoo/cyanobacteria_project
6a816bb0ccf285842b61bfd3612c176f5877a1fb
be08ff723284b0c38f9c758d3e250c664bbfbf3b
refs/heads/master
2021-01-25T05:28:00.686474
2013-03-23T15:09:39
2013-03-23T15:09:39
null
0
0
null
null
null
null
UTF-8
R
false
false
135
r
rinput.R
library(ape) testtree <- read.tree("5927_8.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="5927_8_unrooted.txt")
bb5ef4bbde2aac473eb707215dd1bd80e6ba49ab
fbcdb1aae40c7296f5b5edfc2bc59a5669496df2
/R/datavisualization/example4.R
8159bf1f812cfd86a7697a6d8506b414394605c8
[]
no_license
Fr4nc3/code-hints
b9db1e6b4697fc6932d92e5977a8fbc8d2a8d591
931575a9bd7f843fca006c6616754b49161dcb50
refs/heads/master
2023-02-05T23:07:24.085366
2022-08-11T13:00:59
2022-08-11T13:00:59
52,673,727
5
3
null
2023-01-31T22:44:42
2016-02-27T15:37:48
HTML
UTF-8
R
false
false
7,856
r
example4.R
########################################## # Francia Riesco # 02/12/2018 # #4 ########################################## # The data on the next two pages is from a Canadian 1970 census which collected information about specific occupations. # Data collected was used to develop a regression model to predict prestige for all occupations. Use R to calculate # the quantities and generate the visual summaries requested below. # (1) Save the data to excel and read into R for analysis. getwd() setwd("") # set up my workenviroment # I had an rJava error from my mac and I fixed adding this line, I was unable to do it in a different way dyn.load('/Library/Java/JavaVirtualMachines/jdk1.8.0_151.jdk/Contents/Home/jre/lib/server/libjvm.dylib') # problem with my rJava library in mac library(xlsx) # to read the xls data <- read.xlsx("hw4.xlsx", 1) summary(data) # (2) To get a sense of the data, generate a scatterplot to examine the association between prestige score and years of education. # Briefly describe the form, direction, and strength of the association between the variables. Calculate the correlation. x<-data$Education.Level#independent variable y<-data$Prestige.Score #dependent variable xlabel <- "Education Level (years)" ylabel <- "Prestige Score" par(mfrow=c(1, 1)) # need 1x1layout library(ggplot2) ggplot(data, aes(x=x, y=y)) + geom_point()+ labs(title="Scatterplot of Education Level vs Prestige Score", x=xlabel, y=ylabel )+ geom_smooth(method=lm, se=TRUE) cor(y,x) cor.test(y,x) # (3) Perform a simple linear regression. Generate a residual plot. Assess whether the model assumptions are met. # Are there any outliers or influence points? If so, identify them by ID and comment on the effect of each on the regression. lm(y~x) m<-lm(y~x) summary(m) #residual plot plot(x,resid(m), axes=TRUE, frame.plot=TRUE, xlab = xlabel, ylab="residuals") abline(h=0) model = lm(formula = y ~ x) summary(model) abline(model, lwd = 2, col = "red") # Pull out the residual and fitted values from the model so that we can plot them. resid = resid(model) fitted = fitted(model) # Plot the residuals to check the assumptions. plot(x = x, y = resid, main = "Residuals vs Education Level (Predictor X)") abline(h = 0) plot(x = fitted, y = resid, main = "Residuals vs Predicted Score (yhat)") abline(h = 0) hist(x = resid, main = "Residuals", breaks = 20) plot(density(x = resid), main = "Residuals") df<- data.frame(resid = resid) ggplot(data=df, aes(df$resid)) + geom_histogram(aes(y =..density..), #breaks=seq(), bins = 10, col="red", fill="green", alpha=.2) + geom_density(col=2) + labs(title="Residuals Histogram", x="Residuals", y="Counts") #used the example from # https://stats.stackexchange.com/questions/117873/how-to-detect-outliers-from-residual-plot # I was able to identify 3 point which they are easy to identify in the plot s library(car) (outs <- influencePlot(m)) n <- 2 Cooksdist <- as.numeric(tail(row.names(outs[order(outs$CookD), ]), n)) Lev <- as.numeric(tail(row.names(outs[order(outs$Hat), ]), n)) StdRes <- as.numeric(tail(row.names(outs[order(outs$StudRes), ]), n)) outs df <- data.frame(x=x, y=y) plot(df$x, df$y, xlab = xlabel, ylab = ylabel) abline(m, col = "blue") points(df$x[Cooksdist], df$y[Cooksdist], col = "red", pch = 0, lwd = 15) points(df$x[Lev], df$y[Lev], col = "blue", pch = 25, lwd = 8) points(df$x[StdRes], df$y[StdRes], col = "green", pch = 20, lwd = 5) text(df$x[as.numeric(row.names(outs))], df$y[as.numeric(row.names(outs))], labels = paste("", sep ="x", ""), pos = 1) # If there were outliers in the plots, we could sort the residuals to find which observations had #sort(resid) # (4) Calculate the least squares regression equation that predicts prestige from education, income and percentage of women. # Formally test whether the set of these predictors are associated with prestige at the α= 0.05 level. panel.cor <- function(x, y, digits=2, prefix="", cex.cor, ...) { usr <- par("usr") on.exit(par(usr)) par(usr = c(0, 1, 0, 1)) r <- abs(cor(x, y, use="complete.obs")) txt <- format(c(r, 0.123456789), digits=digits)[1] txt <- paste(prefix, txt, sep="") if(missing(cex.cor)) cex.cor <- 0.8/strwidth(txt) text(0.5, 0.5, txt, cex = cex.cor * (1 + r) / 2) } panel.hist <- function(x, ...) { usr <- par("usr") on.exit(par(usr)) par(usr = c(usr[1:2], 0, 1.5) ) h <- hist(x, plot = FALSE) breaks <- h$breaks nB <- length(breaks) y <- h$counts y <- y/max(y) rect(breaks[-nB], 0, breaks[-1], y, col="white", ...) } panel.lm <- function (x, y, col = par("col"), bg = NA, pch = par("pch"), cex = 1, col.smooth = "black", ...) { points(x, y, pch = pch, col = col, bg = bg, cex = cex) abline(stats::lm(y ~ x), col = col.smooth, ...) } m<-lm(data$Prestige.Score~data$Education.Level+data$Income+data$Percent.of.Workforce.that.are.Women) summary(m) anova(m) x<-data$Education.Level#independent variable y<-data$Prestige.Score #dependent variable df <- data.frame( income = data$Income+data, prestige.score = data$Prestige.Score, education = data$Education.Level+data, women = data$Percent.of.Workforce.that.are.Women ) pairs(data,upper.panel=panel.cor, diag.panel=panel.hist, lower.panel=panel.lm) pairs(data) cor(data) # (5) If the overall model was significant, summarize the information about the contribution of each variable separately at the same # significance level as used for the overall model (no need to do a formal 5-step procedure for each one, just comment on the results of the tests). # Provide interpretations for any estimates that were significant. Calculate 95% confidence intervals where appropriate. summary(lm(data$Prestige.Score~data$Education.Level)) confint(lm(data$Prestige.Score~data$Education.Level), level = 0.95) summary(lm(data$Prestige.Score~data$Income)) confint(lm(data$Prestige.Score~data$Income), level = 0.95) summary(lm(data$Prestige.Score~data$Percent.of.Workforce.that.are.Women)) confint(lm(data$Prestige.Score~data$Percent.of.Workforce.that.are.Women), level = 0.95) # (6) Generate a residual plot showing the fitted values from the regression against the residuals. Is the fit of the model reasonable? # Are there any outliers or influence points? confint(m, level = 0.95) par(mfrow=c(1, 1)) # need 1x1layout plot(fitted(m),resid(m), axes=TRUE, main="Fitted Values vs Residuals", frame.plot=TRUE, xlab = "fitted values", ylab="residuals") abline(h=0) #Checking Normality of residuals hist(resid(m)) plot(m, pch=16, which=1) df <- data.frame(resid = resid(m)) ggplot(data=df, aes(df$resid))+ geom_histogram(aes(y =..density..), col="red", fill="green", alpha = .2) + geom_density(col=2) + labs(title="Histogram for Residuals Multiple Regression") + labs(x="Residuals", y="Count") #exclude women percentage # m<-lm(data$Prestige.Score~data$Education.Level+data$Income) # summary(m) # anova(m) # plot(m, pch=16, which=1) # REFERENCES # [] http://www.r-tutor.com/elementary-statistics/simple-linear-regression/residual-plot # [] https://www.statmethods.net/stats/regression.html # [] https://www.statmethods.net/stats/rdiagnostics.html # [] http://r-statistics.co/Linear-Regression.html # [] https://rpubs.com/FelipeRego/MultipleLinearRegressionInRFirstSteps # Version 1.1.383 – © 2009-2017 RStudio, Inc. # Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/604.4.7 (KHTML, like Gecko) # R version 3.4.2 (2017-09-28) -- "Short Summer" # Copyright (C) 2017 The R Foundation for Statistical Computing # Platform: x86_64-apple-darwin15.6.0 (64-bit)
1bb418770d68b2e176ade7f80df6b4353ff404c9
f09d0478b9c1a61bd77d1f77d6638a01042fda91
/apt-R-celBatch2binary.R
2e7136d6e1918d743f80a5dc5ea98deaf713b23b
[]
no_license
antoniofabio/aptUtils
1bce3dca59386f84f6ded6e692ef09af2a243e33
6ce546ec30a4b505c84cac59f5d38c08280ddf05
refs/heads/master
2020-05-17T11:13:40.272588
2012-07-17T08:43:00
2012-07-17T08:43:00
5,079,359
1
0
null
null
null
null
UTF-8
R
false
false
1,997
r
apt-R-celBatch2binary.R
#!/usr/bin/env Rscript if(!suppressPackageStartupMessages(require("optparse", quietly=TRUE))) { stop("the 'optparse' package is needed in order to run this script") } option_list <- list(make_option(c("-d", "--cdf"), help="CDF file name"), make_option(c("-o", "--output-file"), default="X.bin", help="output RData file name [default: X.bin]"), make_option(c("-a", "--annot"), default="probesets.txt", help="probesets annotation file"), make_option(c("-p", "--progress"), action="store_true", default=FALSE, help="show progress [default: FALSE]")) parser <- OptionParser(usage="%prog [options] cel-files", option_list=option_list) arguments <- parse_args(parser, positional_arguments = TRUE) opt <- arguments$options verbose <- opt$progress cdfFile <- opt$cdf celFiles <- arguments$args if(length(celFiles)==0) { print_help(parser) quit("no") } stopifnot(!is.null(opt$cdf)) tmpOutDir <- tempdir() tmpTxtFiles <- file.path(tmpOutDir, gsub("(.)\\.CEL$", "\\1.TXT", basename(celFiles))) tmpBinFiles <- file.path(tmpOutDir, gsub("(.)\\.CEL$", "\\1.BIN", basename(celFiles))) for(i in seq_along(celFiles)) { if(verbose) { cat('.') } cmd <- sprintf("apt-cel-extract -d %s -v 0 --pm-only -o %s %s", cdfFile, tmpTxtFiles[i], celFiles[i]) message("shell command: `", cmd, "`") system(cmd) y <- read.delim(tmpTxtFiles[i], sep="\t", colClasses=c(rep("NULL", 7), "integer"))[[1]] if(i>1) { unlink(tmpTxtFiles[i]) } writeBin(y, tmpBinFiles[i], size=2, endian="little") } if(verbose) { cat('\n') } system(sprintf("cat %s > %s", paste(tmpBinFiles, collapse=" "), opt$`output-file`)) d <- read.delim(tmpTxtFiles[1], sep="\t", as.is=TRUE) unlink(tmpTxtFiles[1]) probeset_id <- unique(d$probeset_id) probeset_len <- tapply(d$probeset_id, d$probeset_id, length) df <- data.frame(probeset_id=probeset_id, probeset_len=probeset_len) write.csv(df, file=opt$annot, row.names=FALSE)
f4417800adef004d53fea827262d1549e5bff5de
2ce1b58d561f8cce10c40d4dda42d1fc7d6083e1
/202004271753test/GSE93157.R
b2bb3f716a83448b47930538c9ad4eac6cd912d3
[]
no_license
AncientDragon/Git_R
40e577ce6782bf2dd61a3150b8ecacf691394153
95e65c6b77999a7411b52846ebd86ac16136274e
refs/heads/master
2021-07-21T23:03:21.288539
2020-07-24T09:50:15
2020-07-24T09:50:15
197,338,596
0
0
null
null
null
null
UTF-8
R
false
false
1,645
r
GSE93157.R
library(openxlsx) test_93157 = read.xlsx("GSE93157.xlsx") test_93157_train_sub = sample(nrow(test_93157), 9 / 10 * nrow(test_93157)) test_93157_train_data = test_93157[test_93157_train_sub,] test_93157_test_data = test_93157[-test_93157_train_sub,] library(pROC) library(e1071) test_93157_train_data$group = as.factor(test_93157_train_data$group) test_93157_test_data$group = as.factor(test_93157_test_data$group) test_93157_svm = svm( formula = group ~ DEFB1 + C4BPA + IL2 + LAMP1 + CAMP + IL17B + FCGR3A + KIT + POU2F2 + IFNL2 + BLK, data = test_93157_train_data, type = 'C', kernel = 'radial' ) test_93157_pre_svm = predict(test_93157_svm, newdata = test_93157_test_data) test_93157_obs_p_svm = data.frame(prob = test_93157_pre_svm, obs = test_93157_test_data$group) test_93157_table = table(test_93157_test_data$group, test_93157_pre_svm, dnn = c("real", "pre")) test_93157_svm_roc = roc(test_93157_test_data$group, as.numeric(test_93157_pre_svm)) #test_93157_pre_svm = predict(test_93157_svm, newdata = test_93157) #test_93157_obs_p_svm = data.frame(prob = test_93157_pre_svm, obs = test_93157$group) #test_93157_table = table(test_93157$group, test_93157_pre_svm, dnn = c("real", "pre")) #test_93157_svm_roc = roc(test_93157$group, as.numeric(test_93157_pre_svm)) plot( test_93157_svm_roc, grid = c(0.05, 0.05), grid.col = c("black", "green"), print.auc = TRUE, auc.polygon = TRUE, max.auc.polygon = TRUE, auc.polygon.col = "yellow", print.thres = TRUE, main = 'GSE93157SVM模型ROC曲线 kernel = radial' ) test_93157_pre_svm test_93157_table
e4700243933259df744f9d9015b7419dc44f63e7
c85471f60e9d5c462de6c60c880d05898ec81411
/cache/gdatascience|tidytuesday|video_games.R
7df7118701cfce38a618a1ae056cd5cd400499d7
[ "CC-BY-4.0", "MIT" ]
permissive
a-rosenberg/github-content-scraper
2416d644ea58403beacba33349ee127e4eb42afe
ed3340610a20bb3bd569f5e19db56008365e7ffa
refs/heads/master
2020-09-06T08:34:58.186945
2019-11-15T05:14:37
2019-11-15T05:14:37
220,376,154
0
0
null
null
null
null
UTF-8
R
false
false
3,804
r
gdatascience|tidytuesday|video_games.R
## ----setup, include=FALSE------------------------------------------------ knitr::opts_chunk$set(echo = TRUE) ## ------------------------------------------------------------------------ library(tidyverse) library(lubridate) theme_set(theme_light()) video_games <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-07-30/video_games.csv") %>% mutate(game = str_to_title(game), release_date = str_replace(release_date, "8 Oct, 2014", "Oct 8, 2014"), release_date = mdy(release_date), release_year = year(release_date), release_month = month(release_date, label = TRUE), owners = as.numeric(gsub(",","",str_extract(owners,"[0-9]+(,[0-9]+)*"))), owners = if_else(owners == 0, 1, owners), revenue = owners * price) %>% select(-number) ## ------------------------------------------------------------------------ video_games %>% arrange(desc(owners)) %>% select(game, release_date, price, owners, revenue) %>% top_n(7, owners) ## ------------------------------------------------------------------------ paste0(sum(is.na(video_games$price)), " out of ", nrow(video_games), " = ", round(100*sum(is.na(video_games$price))/nrow(video_games), 2), "%") ## ------------------------------------------------------------------------ video_games %>% filter(is.na(price)) %>% group_by(release_year) %>% summarise(n = n()) %>% ggplot(aes(release_year, n)) + geom_col() ## ------------------------------------------------------------------------ video_games %>% ggplot(aes(release_date, price)) + geom_point(alpha = 0.25) + geom_smooth(method = "lm") ## ------------------------------------------------------------------------ video_games %>% group_by(release_year) %>% summarise(avg_price = mean(price, na.rm = TRUE)) %>% ggplot(aes(release_year, avg_price)) + geom_col() + geom_smooth(method = "lm") ## ------------------------------------------------------------------------ avg_video_game_revenue <- mean(video_games$revenue, na.rm = TRUE) video_games %>% group_by(release_year, release_month) %>% summarise(avg_revenue = mean(revenue, na.rm = TRUE)) %>% ggplot(aes(release_month, avg_revenue, fill = as.factor(release_month))) + geom_col() + facet_wrap(~release_year, scales = "free_y") + geom_hline(yintercept = avg_video_game_revenue, linetype = 2) ## ------------------------------------------------------------------------ video_games %>% arrange(desc(revenue)) %>% select(game, release_date, price, owners, revenue) ## ------------------------------------------------------------------------ video_games %>% ggplot(aes(release_date, revenue)) + geom_point(alpha = 0.25) + geom_smooth(method = "lm") ## ------------------------------------------------------------------------ video_games %>% group_by(release_year) %>% summarise(total_revenue = sum(revenue, na.rm = TRUE)) %>% ggplot(aes(release_year, total_revenue, fill = as.factor(release_year))) + geom_col(show.legend = FALSE) + scale_y_continuous(labels = scales::dollar_format(scale = 0.000000001, suffix = "B")) + labs(x = "Release year", y = "Total revenue (in billions)", title = "2017 PC video games generated over $4 billion", subtitle = "Based on price multiplied by the number of owners", caption = "Designer: Tony Galvan @gdatascience1 | Source: Steam Spy") ## ------------------------------------------------------------------------ video_games %>% tidytext::unnest_tokens(tbl = ., output = word, input = game) %>% count(word, sort = TRUE) %>% filter(is.na(as.numeric(word))) %>% anti_join(get_stopwords()) %>% filter(n > 100) %>% na.omit() %>% wordcloud2::wordcloud2(shape = "cardiod")
5f24df7d723cf6713f27f74d52dff335fdc8a2c0
cf46b40f288b0bbcef1c3d2834adc23f83cce27c
/trump_score_full/trump-score.R
43b649ce46df6230b7f0c2731b7298f7ae3acc1d
[]
no_license
jgreen4919/trump_scores
4191a51792e525c38fd50fefface57d4461a7dcb
1c41eb43066c4a08958e0fe76211ecb7155d412a
refs/heads/master
2021-05-13T23:16:07.636906
2018-02-24T15:03:47
2018-02-24T15:03:47
116,511,505
0
0
null
null
null
null
UTF-8
R
false
false
575
r
trump-score.R
library(rvest) library(tidyverse) h <- read_html('https://projects.fivethirtyeight.com/congress-trump-score/house/') trump_score <- h %>% html_nodes('table.member-list') %>% map(html_table, fill = TRUE) %>% set_names(c('Senate', 'House')) %>% map(set_names, c('member', 'member_short', 'party', 'district', 'trump_score', 'trump_margin', 'predicted_score', 'trump_plus_minus')) %>% map(mutate_all, na_if, '—') %>% map_df(mutate_at, vars(trump_score:trump_plus_minus), parse_number, .id = 'chamber')
12801012d79c8a1a3b1ba353e6940896fdd2ca57
23617caf7005786cd089b7f73f7da604b5ae3d6f
/R/get-difftime.R
ecc62beac76d8d00bda9dcf26288499e5436c602
[]
no_license
poissonconsulting/lexr
bedc665d09be4856a92a982d3cd912af5975a025
8d6cdd07af61a2ef7b8bf635500ecccb52700e92
refs/heads/master
2021-07-19T13:11:13.160924
2021-02-16T20:37:35
2021-02-16T20:37:35
48,457,451
1
0
null
null
null
null
UTF-8
R
false
false
916
r
get-difftime.R
#' Get Difftime #' #' @param object The object to get the difftime for. #' @return A difftime object #' @export get_difftime <- function(object) { UseMethod("get_difftime", object) } get_difftime.POSIXct <- function(object) { datetimes <- unique(object) datetimes %<>% sort() n <- length(datetimes) difftimes <- difftime(datetimes[2:n], datetimes[1:(n-1)]) min(difftimes, na.rm = TRUE) } #' @export get_difftime.lex_data <- function(object) { datetimes <- unique(object$detection$DateTimeDetection) datetimes %<>% sort() n <- length(datetimes) difftimes <- difftime(datetimes[2:n], datetimes[1:(n-1)]) min(difftimes, na.rm = TRUE) } #' @export get_difftime.detect_data <- function(object) { difftime(object$interval$DateTime[2], object$interval$DateTime[1]) } #' @export get_difftime.analysis_data <- function(object) { difftime(object$period$DateTime[2], object$period$DateTime[1]) }
55e0799e4e7bf468f56dc0fe18058fc5cfe2862a
de76bcdc1b54d52d8a9308cd4ae34290adb3754c
/R/read_ecco.R
522e7f95e228551e66a4464f0f581dfc612ef4c4
[]
no_license
COMHIS/estc
d0e3f249e23d76140f50cc5040e067cc2207a615
44a1befe88e4cfe2909d40364a246d946485e7cc
refs/heads/master
2022-05-01T05:46:41.347913
2022-04-02T11:27:48
2022-04-02T11:27:48
107,229,188
5
3
null
null
null
null
UTF-8
R
false
false
1,431
r
read_ecco.R
#' @title Read ECCO #' @description Read ECCO data dump. #' @param version Version number 1: ECCOI; 2: ECCOII #' @return ECCO data.frame #' @export #' @details Assumes that the working directory includes the source data file. #' @author Leo Lahti \email{leo.lahti@@iki.fi} #' @references See citation("estc") #' @examples \dontrun{ecco <- read_ecco(version = 2)} #' @keywords utilities read_ecco <- function (version = 2) { # ECCOI if (version == 1) { f <- "ecco.csv.gz" message(paste("Reading file", f)) ecco <- read.csv(f) # Old dump CSV } else if (version == 2) { f <- "ecco2.json.gz" message(paste("Reading file", f)) ecco <- fromJSON(file = f, method = "C") # Ignore column 11 "containsGraphicOfType" which is hierarchical ecco <- as.data.frame(t(sapply(ecco, function (x) {x[setdiff(names(x), "containsGraphicOfType")]})), stringsAsFactors = FALSE) ecco <- as.data.frame(sapply(ecco, function (x) {unlist(x)})) } # Polish ecco ID ecco$id <- as.character(ecco$ESTCID) ecco$documentID <- as.character(ecco$documentID) ecco$ESTCID <- as.character(ecco$ESTCID) ecco$totalPages <- as.numeric(as.character(ecco$totalPages)) # Remove leading zeroes (T012345 -> T12345) ecco$id <- sapply(ecco$id, function (x) {paste(substr(x, 1, 1), gsub("^0+", "", substr(x, 2, nchar(x))), sep = "")}, USE.NAMES = FALSE) message("ECCO read successfully.") ecco }
9f863380d044555144333a8f0f1a98addc2184af
0ad8f87c779f07840c408aaf70f14a150d62294b
/Problem_1.R
e301aaa37ccac0556cc0cfc3ef825e628f221b51
[]
no_license
feb-uni-sofia/homework-1-r-basics-toddtsvetanov
de2dbf1e2d9c0747ae0893e5c2d2df6f7d2846e7
7b0288418aa31b883aab36bc8dfafa10ea17075d
refs/heads/master
2021-04-15T13:31:25.306974
2018-04-02T14:20:52
2018-04-02T14:20:52
126,696,924
0
0
null
null
null
null
UTF-8
R
false
false
318
r
Problem_1.R
# a) x <- c(4,1,1,4) # b) y <- c(1,4) # c) The result is like this because R Studio repeats "y" as many times as it takes # to fill "x" (it repeats it). x-y # d) s <- c(x,y) # e) rep(s,10) length(rep(s,10)) # f) rep(s, each = 3) # g) seq(7,21) # i) 7:21 # ii) # h) length(seq(7,21))
f02828796b60b2f4704c3f2870811f8ed1693010
dd33784cfb1b3159e49e714814b959e434f2a38d
/scripts/revisions_1/bs_posteriorPredictive.R
a6f04b4fe64a65d2c1608eaa920fe72c6f447756
[]
no_license
Nian-Jingqing/blind_spot_psychophy
1780ed74cc4210041639eaad716cd1d713a4e84a
8de1a80accccef881a5e8ac522c0d5e8bccfd354
refs/heads/master
2022-01-14T11:26:34.237158
2019-03-03T17:34:15
2019-03-03T17:34:15
null
0
0
null
null
null
null
UTF-8
R
false
false
7,661
r
bs_posteriorPredictive.R
if(1==0){ source('revisions_1/bs_load_stan.R') postPredAll = NULL for(exp in c("EEG","Control","Inset4b",'Inset4a')){ if(exp=='EEG'){ stanfit = factorialFit.exp1 data = dat.exp1 } if(exp=='Control'){ stanfit = factorialFit.exp2 data = dat.exp2 } if(exp=='Horizontal'){ stanfit = factorialFit.exp3 data = dat.exp3 } if(exp=='Inset4b'){ stanfit = factorialFit.exp4b data = dat.exp4b #data = subset(data,!(data$subject%in%c('Inset4b.35','Inset4b.38','Inset4b.39','Inset4b.40','Inset4b.41'))) # I did not yet put the dmoinant eye / handedness } if(exp=='Inset4a'){ stanfit = factorialFit.exp4a data = dat.exp4a #data = subset(data,!(data$subject%in%c('Inset4a.35','Inset4a.38','Inset4a.39','Inset4a.40','Inset4a.41'))) # I did not yet put the dmoinant eye / handedness } if (exp =='EEG' | exp == 'Inset4a'){ formRan = ~1+stimLLoc + stimRLoc +oneBack }else{ formRan = ~1+stimLLoc*controlTrial + controlTrial*stimRLoc +oneBack } formFix = ~0+handedness+dominantEye #current.na.action <- options('na.action') #options(na.action='na.pass') ranX <- model.matrix(formRan, data) fixX <- model.matrix(formFix, data) #options(na.action=current.na.action) subjectIndexVector = factor(data$subject) options(warn=2) postPred = bs_posteriorPredictive(stanfit,ranX=ranX,fixX = fixX,subjectIndexVector = subjectIndexVector) if(exp=='Control' || exp == 'Inset4b'){ postPredSub = subset(postPred,postPred$controlTrial3 == 0) } if(exp=='EEG' | exp =='Inset4a'){ postPredSub = postPred } if(exp=='Horizontal' ){ postPredSub = subset(postPred,postPred$controlTrial1 == 1) } postPredCum = ddply(postPredSub,.(stimLLoc,stimRLoc,subject,postPredIteration,type),summarise,answer=mean(answer)) #levels(postPredCum$subject) <- levels(data$subject) postPredAll = rbind(postPredAll,cbind(postPredCum,experiment=exp)) } tmp = subset(dat.uni,dat.uni$controlTrial==1 & dat.uni$experiment!='Horizontal') # there is a strange bug in horizontal where the subjects are mixed up. ggplot(subset(postPredAll,postPredAll$type =='sameSubjectPP'),aes(x=interaction(stimLLoc,stimRLoc),y=answer))+ geom_violin(scale='width')+ geom_point(data = ddply(tmp,.(stimLLoc,stimRLoc,subject),summarise,answer=mean(answer)))+facet_wrap(~subject)+tBE()+ theme(strip.background = element_blank(), strip.text.x = element_blank()) ggsave(file='../export/SupplementaryAa_postPred.pdf',useDingbats=F,width=6,height=4,scale=1.5) postPredCumNewSubject = ddply(subset(postPredAll,postPredAll$type =='newSubjectPP'),.(subject,postPredIteration,experiment,stimLLoc,stimRLoc),summarize,answer = mean(answer)) # We do not want inset 4a in this plot, it has the reverse bias ppSubjectDiff = ddply(subset(postPredAll,postPredAll$type =='newSubjectPP' & experiment!='Inset4a'),.(subject,postPredIteration),summarize,answerDiff = mean(answer[stimLLoc==0 & stimRLoc==1] - answer[stimLLoc==1 & stimRLoc==0])) options(warn=1) subjectDiff = ddply(tmp,.(subject,experiment),summarise,answerDiff=mean(answer[stimLLoc==0 & stimRLoc==1]) - mean(answer[stimLLoc==1 & stimRLoc==0]),.inform = T) subjectDiff = subset(subjectDiff,subjectDiff$experiment != 'Inset4a') ggplot(subjectDiff,aes(x=100*answerDiff))+ geom_histogram(aes(y=..density..),bins=30)+ geom_density(color='red',data=ppSubjectDiff,size=1)+tBE(20) + geom_density(size=1)+geom_point(aes(y=-0.003),position=position_jitter(height=0.001))+ xlab('mean Blind Spot Effect [%]') ggsave(file='../export/SupplementaryAb_blindspotEffect.pdf',useDingbats=F,width=6,height=2,scale=1.5) } bs_posteriorPredictive = function(stanfit,ranX,fixX,subjectIndexVector,nIter = 100){ #browser() nSub = length(unique(subjectIndexVector)) check_param_in_model = function(param,model){ return(length(grep(param,colnames(model),fixed=T))>0) } library(ggmcmc) S = ggs(stanfit) #browser() subjectLevels = levels(subjectIndexVector) #subjectIndexVector = factor(subjectIndexVector) #get rid of old levels #levels(subjectIndexVector) = 1:length(unique(subjectIndexVector)) #convert them to 1:N #subjectIndexVector = as.numeric(subjectIndexVector) # make them numerical (which they shouldbe already) #parmList = unique(S$Parameter) #fit@par_dims # beta = fixed effects # sigma_u = random-variances # L_u = random-correlations (half of matrix # z_u = ? # u = subjRanef predOut = predNewSub = NULL S$InteractionChainIter = (S$Chain-1)*max(S$Iteration) + S$Iteration iter_list = sample.int(max(S$InteractionChainIter),nIter) for (it in 1:nIter){ nTrials = length(ranX[,1]) show(sprintf('iteration %i',it)) it_idx = iter_list[it] k = S[S$InteractionChainIter==it_idx,] beta = k$value[grep('beta',k$Parameter)] n_beta_fix = length(grep('beta_fix',k$Parameter)) assertthat::are_equal(dim(fixX)[2],n_beta_fix) assertthat::are_equal(dim(ranX)[2],beta-n_beta_fix) # beta[c(1:3,5:20)] = 0 sigma = k$value[grep('sigma.u',k$Parameter)] u = k$value[grep('^u.',k$Parameter)]%>%matrix(ncol=length(grep('sigma.u',k$Parameter))) covmat= k$value[grep('L.u',k$Parameter)]%>%matrix(nrow=length(grep('sigma.u',k$Parameter))) covmat = (covmat)%*%t(covmat) #sigma[1:length(sigma)] = 0.01 #browser() # browser() # This is pulling new subjects out of the estimated random effects matrix, i.e. measuring nSub new subjects ran = mnormt::rmnorm(n=nSub,mean=beta[1:(length(beta)-n_beta_fix)],varcov=diag(sigma) %*% covmat %*%diag(sigma)) # This is taking the estimates of the subjects values, i.e. measuring the same subjects again ran.sub = t(t(u)+beta[1:(length(beta)-n_beta_fix)]) fix = beta[(length(beta)-n_beta_fix+1):length(beta)] # This is a bit ugly because I did not know how to use adply with an additional loop-index (which one needs to tile down the fullModelMatrix. # Thus the head/tail combo #ran = cbind(ran,1:nSub) #ran.sub = cbind(ran.sub,1:nSub) get_pred = function(ran,fix,ranX,fixX){ pred = NULL for(rI in 1:dim(ran)[1]){ X = ranX[subjectIndexVector == subjectLevels[rI],] fX = fixX[subjectIndexVector == subjectLevels[rI],] #browser() Xb = ran[rI,]%*%t(X) + fix %*%t(fX) sim = sapply(plogis(Xb),function(p)rbinom(1,1,p)) pred = rbind(pred,cbind(data.frame(answer=sim,trial=1:dim(X)[1]),X,fX,subject=subjectLevels[rI])) } #pred = adply(ran,1,function(x){ #X=ranX[subjectIndexVector==tail(x,1),] #fX=fixX[subjectIndexVector==tail(x,1),] #Simulate data #return(dat)}, #.id='subject',.inform=F) return(pred) } pred = get_pred(ran,fix,ranX,fixX) # for now we don't want PostPred for 'new'-subjects pred.sub = get_pred(ran.sub,fix,ranX,fixX) #browser() #pred$subject = factor(subjectIndexVector[pred$subject]) #pred.sub$subject = factor(subjectIndexVector[pred.sub$subject]) predOut = rbind(predOut,cbind(postPredIteration = it,pred.sub)) predNewSub = rbind(predNewSub,cbind(postPredIteration = it,pred)) } #levels(predOut$subject) = subjectLevels #levels(predNewSub$subject) = subjectLevels return(rbind(cbind(type = 'sameSubjectPP',predOut), cbind(type= 'newSubjectPP', predNewSub))) }
d1086596999939094891b42bd4cd72bb03b3b101
7b091599fdcdfa598f0063be29bc80cbc75ea3ce
/man/loadSampleModels.Rd
d6d666537f575c5344d91a72de78d47c3191edaa
[]
no_license
beeva-jorgezaldivar/plumberModel
1bdd0ab8f82d1c8f7e6b9f887805f1db6f5cc0f8
1dbc5d81d294cb01786f3594936c03195ce1947f
refs/heads/master
2020-03-19T21:26:12.951174
2018-09-11T09:59:17
2018-09-11T09:59:17
136,940,402
5
2
null
null
null
null
UTF-8
R
false
true
288
rd
loadSampleModels.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{loadSampleModels} \alias{loadSampleModels} \title{Obtengo el directorio en el que estan los modelos} \usage{ loadSampleModels() } \description{ Obtengo el directorio en el que estan los modelos }
5082542388c8dfd07f5a25a60f17e875cd7657e0
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/seroincidence/examples/salmonellaSSIParams4.Rd.R
fdb3b99134e5b3c1068f00f194c6e0f0bdb3e01b
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
282
r
salmonellaSSIParams4.Rd.R
library(seroincidence) ### Name: salmonellaSSIParams4 ### Title: Salmonella SSI Response Parameters Data for Model 4 ### Aliases: salmonellaSSIParams4 ### ** Examples # Show first rows of every dataframe contained in salmonellaSSIParams4 lapply(salmonellaSSIParams4, head)
fdc8532f85d49a0136a966a91600aa0802237055
6470ce550c26c7cd13245dab8b84623534e78655
/第11章 地理空间型图表/图11-11-5 网格分面示意图.R
a1ba6c8d7c66e75485e6720be8b3e17de8f8f389
[]
no_license
EasyChart/Beautiful-Visualization-with-R
0d73ed4ee1e1855e33048330294335fbad6d2a25
27990b9349b697ec4336d3e72bae5f3a08d5f5ea
refs/heads/master
2023-06-10T07:36:29.289034
2023-06-05T03:48:59
2023-06-05T03:48:59
189,740,776
687
446
null
2020-02-26T08:07:21
2019-06-01T14:14:10
PostScript
GB18030
R
false
false
2,508
r
图11-11-5 网格分面示意图.R
#EasyCharts团队出品, #如需使用与深入学习,请联系微信:EasyCharts library(geofacet) library(ggplot2) library(reshape2) library(plyr) #(b)中国--------------------------------------------------------------------- Griddata<-read.csv("China_Grid.csv",stringsAsFactors=TRUE) sdata<-read.csv("Province_data.csv",stringsAsFactors=TRUE) colnames(sdata)<-c("code","value") mydata<-join(x=Griddata,y=sdata,by=c("code")) ggplot(mydata, aes(x=value,fill=code)) + geom_density(alpha=1,colour="black",size=0.25)+ facet_geo(~ code,grid=Griddata)+ theme_light()+ theme(legend.position = "none", panel.grid.minor =element_blank()) #cairo_pdf(file="ChinaGrid6.pdf",width=6.52,height=6) #showtext.begin() ggplot(mydata, aes(x=value,fill=code)) + geom_density(alpha=1,colour="black",size=0.25)+ facet_geo(~ code,grid=Griddata)+ theme_light()+ theme(panel.background=element_blank(), panel.border =element_blank(), legend.position = "none", panel.grid =element_blank(), strip.placement = "bottom", strip.background=element_blank(), strip.text=element_text(colour="black")) #showtext.end() #dev.off() #Province_data.csv的构造------------------------------------------------------------ for (i in 1 :nrow(mydata)) { x<- rnorm(mean=runif(1)*10, 100) if (i==1){ sdata<-as.data.frame(x) colnames(sdata)[i]<-as.character(mydata[i,4]) } else{ sdata<-cbind(sdata,x) colnames(sdata)[i]<-as.character(mydata[i,4]) } } sdata<-melt(sdata) colnames(sdata)<-c("code","value") write.csv(sdata,"Province_data.csv",row.names = FALSE) #-(a) 美国------------------------------------------------------------------------ mydata <- us_state_grid1 mydata$col[mydata$code == "WI"] <- 7 grid_preview(my_grid) for (i in 1 :nrow(mydata)) { x<- rnorm(mean=runif(1)*10, 100) if (i==1){ sdata<-as.data.frame(x) colnames(sdata)[i]<-as.character(mydata[i,4]) } else{ sdata<-cbind(sdata,x) colnames(sdata)[i]<-as.character(mydata[i,4]) } } sdata<-melt(sdata) colnames(sdata)<-c("name","value") mydata_new<-join(x=mydata,y=sdata,by=c("name")) #cairo_pdf(file="USAGrid5.pdf",width=6.52,height=6) #showtext.begin() ggplot(mydata_new, aes(x=value,fill=code)) + geom_density(alpha=1,colour="black",size=0.25)+ facet_geo(~ code,grid=mydata)+#, scales='free' theme_light()+ ylim(0,0.6)+ theme(legend.position = "none", panel.grid.minor =element_blank()) #showtext.end() #dev.off()
ab613674245520cb5acd5236f68ff79ce179dc2c
1f67b4cc04795c4a9bf1962a18cfcac78c35974d
/R/statismoModelFromRepresenter.r
ada83e949471fb0103176bad8a652a07140ee8db
[]
no_license
Celli119/RvtkStatismo
4eebcc84c2b390badb9c227691e1a606da236917
73ad9fe5881854aef9f76a47077a0d304bb6546b
refs/heads/master
2021-01-16T17:36:21.335226
2016-03-09T06:59:41
2016-03-09T06:59:41
53,198,379
0
0
null
2016-03-05T11:34:59
2016-03-05T11:34:58
null
UTF-8
R
false
false
1,827
r
statismoModelFromRepresenter.r
#' generate model from a representer using gaussian kernels #' #' generate model from a representer using gaussian kernels #' #' @param representer mesh3d or matrix used as representer #' @param kernel a list containing numeric vectors of length 2. Except the first entry of this list may be of length 2 and is then interpreted as Multiscale Bspline kernel. For a Gaussian Kernel, the first entry specifies the bandwidth and the second the scaling. For a Multiscale kernel, the additional 3rd entry sets the number of levels. #' @param ncomp integer: number of PCs to approximate #' @param nystroem number of samples to compute Nystroem approximation of eigenvectors #' @param combine character determining how to combine the kernels: "sum" or "product" are supported. #' @param isoScale standard deviation of isotropic scaling. #' @param centroid specify the center of scaling. If NULL, the centroid will be used. #' @return returns a shape model of class \code{\link{pPCA}} #' @examples #' require(Rvcg) #' data(humface) #' hummodel <- statismoModelFromRepresenter(humface) #' \dontrun{ #' require(rgl) #' for (i in 1:5) wire3d(DrawSample(hummodel),col=i) #' } #' @export statismoModelFromRepresenter <- function(representer,kernel=list(c(100,70)),ncomp=10,nystroem=500,combine="sum",isoScale=0, centroid=NULL) { representer <- dataset2representer(representer) center <- as.vector(representer$vb[1:3,]) pp <- new("pPCA") pp@sigma <- 0 pp@PCA$center <- center pp@PCA$sdev <- 1 pp@representer <- representer pp@PCA$rotation <- matrix(0,length(pp@PCA$center),1) centroid <- apply(GetDomainPoints(pp),2,mean) out <- statismoGPmodel(pp,useEmpiric=FALSE,kernel=kernel,ncomp=ncomp,nystroem=nystroem,combine = combine,combineEmp=0,isoScale=isoScale,centroid=centroid) return(out) }
91f10a707eedc226adff26abf3cb5bf7c3ebb31b
360df3c6d013b7a9423b65d1fac0172bbbcf73ca
/FDA_Pesticide_Glossary/arsenic_acid_arsenic.R
d854de324fddd02514c539408284ae2d400188af
[ "MIT" ]
permissive
andrewdefries/andrewdefries.github.io
026aad7bd35d29d60d9746039dd7a516ad6c215f
d84f2c21f06c40b7ec49512a4fb13b4246f92209
refs/heads/master
2016-09-06T01:44:48.290950
2015-05-01T17:19:42
2015-05-01T17:19:42
17,783,203
0
1
null
null
null
null
UTF-8
R
false
false
276
r
arsenic_acid_arsenic.R
library("knitr") library("rgl") #knit("arsenic_acid_arsenic.Rmd") #markdownToHTML('arsenic_acid_arsenic.md', 'arsenic_acid_arsenic.html', options=c("use_xhml")) #system("pandoc -s arsenic_acid_arsenic.html -o arsenic_acid_arsenic.pdf") knit2html('arsenic_acid_arsenic.Rmd')
dd612dd5f2b5eeb975e7270d4d5f49c431344b76
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/dendextend/examples/color_branches.Rd.R
8d9e0b44a9ef92ed22024add348c7d9e6c9c00aa
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
4,582
r
color_branches.Rd.R
library(dendextend) ### Name: color_branches ### Title: Color tree's branches according to sub-clusters ### Aliases: color_branches colour_branches branches_color ### ** Examples ## Not run: ##D par(mfrow = c(1,2)) ##D dend <- USArrests %>% dist %>% hclust(method = "ave") %>% as.dendrogram ##D d1=color_branches(dend, k = 5, col = c(3,1,1,4,1)) ##D plot(d1) # selective coloring of branches :) ##D d2=color_branches(d1,5) ##D plot(d2) ##D ##D par(mfrow = c(1,2)) ##D d1=color_branches(dend,5, col = c(3,1,1,4,1),groupLabels=TRUE) ##D plot(d1) # selective coloring of branches :) ##D d2=color_branches(d1,5,groupLabels=TRUE) ##D plot(d2) ##D ##D par(mfrow = c(1,3)) ##D d5=color_branches(dend,5) ##D plot(d5) ##D d5g=color_branches(dend,5,groupLabels=TRUE) ##D plot(d5g) ##D d5gr=color_branches(dend,5,groupLabels=as.roman) ##D plot(d5gr) ##D ##D par(mfrow = c(1,1)) ##D ##D # messy - but interesting: ##D dend_override=color_branches(dend,2,groupLabels=as.roman) ##D dend_override=color_branches(dend_override,4,groupLabels=as.roman) ##D dend_override=color_branches(dend_override,7,groupLabels=as.roman) ##D plot(dend_override) ##D ##D d5=color_branches(dend=dend[[1]],k=5) ##D ##D ##D library(dendextend) ##D data(iris, envir = environment()) ##D d_iris <- dist(iris[,-5]) ##D hc_iris <- hclust(d_iris) ##D dend_iris <- as.dendrogram(hc_iris) ##D dend_iris=color_branches(dend_iris,k=3) ##D ##D library(colorspace) ##D labels_colors(dend_iris) <- ##D rainbow_hcl(3)[sort_levels_values( ##D as.numeric(iris[,5])[order.dendrogram(dend_iris)] ##D )] ##D ##D plot(dend_iris, ##D main = "Clustered Iris dataset", ##D sub = "labels are colored based on the true cluster") ##D ##D ##D ##D # cutree(dend_iris,k=3, order_clusters_as_data=FALSE, ##D # try_cutree_hclust=FALSE) ##D # cutree(dend_iris,k=3, order_clusters_as_data=FALSE) ##D ##D library(colorspace) ##D ##D data(iris, envir = environment()) ##D d_iris <- dist(iris[,-5]) ##D hc_iris <- hclust(d_iris) ##D labels(hc_iris) # no labels, because "iris" has no row names ##D dend_iris <- as.dendrogram(hc_iris) ##D is.integer(labels(dend_iris)) # this could cause problems... ##D ##D iris_species <- rev(levels(iris[,5])) ##D dend_iris <- color_branches(dend_iris,k=3, groupLabels=iris_species) ##D is.character(labels(dend_iris)) # labels are no longer "integer" ##D ##D # have the labels match the real classification of the flowers: ##D labels_colors(dend_iris) <- ##D rainbow_hcl(3)[sort_levels_values( ##D as.numeric(iris[,5])[order.dendrogram(dend_iris)] ##D )] ##D ##D # We'll add the flower type ##D labels(dend_iris) <- paste(as.character(iris[,5])[order.dendrogram(dend_iris)], ##D "(",labels(dend_iris),")", ##D sep = "") ##D ##D dend_iris <- hang.dendrogram(dend_iris,hang_height=0.1) ##D ##D # reduce the size of the labels: ##D dend_iris <- assign_values_to_leaves_nodePar(dend_iris, 0.5, "lab.cex") ##D ##D par(mar = c(3,3,3,7)) ##D plot(dend_iris, ##D main = "Clustered Iris dataset ##D (the labels give the true flower species)", ##D horiz = TRUE, nodePar = list(cex = .007)) ##D legend("topleft", legend = iris_species, fill = rainbow_hcl(3)) ##D a= dend_iris[[1]] ##D dend_iris1 <- color_branches(a,k = 3) ##D plot(dend_iris1) ##D ##D # str(dendrapply(d2, unclass)) ##D # unclass(d1) ##D ##D c(1:5) %>% # take some data ##D dist %>% # calculate a distance matrix, ##D # on it compute hierarchical clustering using the "average" method, ##D hclust(method = "single") %>% ##D as.dendrogram %>% color_branches(k=3) %>% plot # nice, returns the tree as is... ##D ##D ##D # Example of the "clusters" parameter ##D par(mfrow =c(1,2)) ##D dend <- c(1:5) %>% dist %>% hclust %>% as.dendrogram ##D dend %>% color_branches(k=3) %>% plot ##D dend %>% color_branches(clusters=c(1,1,2,2,3)) %>% plot ##D ##D ##D # another example, based on the question here: ##D # https://stackoverflow.com/q/45432271/256662 ##D ##D ##D library(cluster) ##D set.seed(999) ##D iris2 <- iris[sample(x = 1:150,size = 50,replace = F),] ##D clust <- diana(iris2) ##D dend <- as.dendrogram(clust) ##D ##D temp_col <- c("red", "blue", "green")[as.numeric(iris2$Species)] ##D temp_col <- temp_col[order.dendrogram(dend)] ##D temp_col <- factor(temp_col, unique(temp_col)) ##D ##D library(dendextend) ##D dend %>% color_branches(clusters = as.numeric(temp_col), col = levels(temp_col)) %>% ##D set("labels_colors", as.character(temp_col)) %>% ##D plot ##D ##D ##D ## End(Not run)
eb1b45a01309810d23cea95fec249758e3f84e14
27d0548026ed38f8e721aa1ec33ef091327a7f29
/HEADER FILE.R
d345416378471d38e38f2d0d6151b4c1eb2fc531
[]
no_license
MaxPaurat/Mobile-Data-for-Public-Health
c7a6dee61592f994ad0733748b07b6ef8d3d9c69
b14aca1dac1ce84c04cbaea916026f0f48caae2d
refs/heads/master
2020-03-12T00:00:27.848846
2018-04-20T09:35:05
2018-04-20T09:35:05
130,337,534
0
0
null
null
null
null
UTF-8
R
false
false
44
r
HEADER FILE.R
#Header file source("Read in Data.r")
27b3ac656ad0d5688f777d9ff3aaa03b1934be6c
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/fMultivar/examples/utils-density2d.Rd.R
d8934eca5aa3419001eafe8c2f4c264b513fe0de
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
312
r
utils-density2d.Rd.R
library(fMultivar) ### Name: utils-density2d ### Title: Bivariate Density Tools ### Aliases: density2d hist2d ### Keywords: math ### ** Examples ## hist2d - # Normal Random Numbers: set.seed(4711) X <- rnorm2d(40000) # 2D Histogram Plot: Z <- hist2d(X) image(Z) contour(Z, add=TRUE)
313bfaff358bef7e0c2b64c83519046c8af32ae6
21b7ca342de321bf77d917ded9dbac7507c75f12
/R/webchannel.R
848920d4e7444b077a13df519897a860c4bfe75d
[ "MIT" ]
permissive
altairwei/wiztimebook
d1cc6434866a84822f01e5fbde6b475bd196b060
20eed6dad993368d2c0c109b63b9e00261f400c3
refs/heads/master
2023-05-03T20:04:12.891724
2021-05-27T12:35:37
2021-05-27T12:35:37
250,298,842
0
1
null
null
null
null
UTF-8
R
false
false
734
r
webchannel.R
#' Connect to WizNote public APIs #' @export connect_to_wiznote <- function(global_rv) { baseUrl <- "ws://localhost:8848" message(paste0("Connecting to WebSocket server at ", baseUrl)) socket <- websocket::WebSocket$new(baseUrl, autoConnect = FALSE) socket$onClose(function(event) { message("web channel closed") }) socket$onError(function(event) { message(paste0("web channel error:", event$message)) }) socket$onOpen(function(event) { message("WebSocket connected, setting up QWebChannel.") rwebchannel::QWebChannel$new(socket, function(channel) { message("QWebChannel setup finished.") global_rv$WizExplorerApp <- channel$objects[["WizExplorerApp"]] }) }) socket$connect() }
72d035467b1ab359543fa7a706357a7e3a357f50
ae2454d6bcb4570d693f05ce0a9b77a1fcb2d24c
/inst/shiny/GUI/server.R
8c316b1d76fd31fa678ac3307c77e4ed69608e72
[]
no_license
abbassix/adca2d89
4e902a5c200bb0e0e7be8f6861a7fdfb56396c8c
4d9bc05a1160a9114801d4c98d4afd46f62725a2
refs/heads/master
2022-04-16T08:33:42.243727
2020-02-25T07:21:30
2020-02-25T07:21:30
null
0
0
null
null
null
null
UTF-8
R
false
false
616
r
server.R
# Define server logic to summarize and view selected dataset ---- server <- function(input, output) { # Generate a summary of the dataset ---- output$summary <- renderPrint({ #dataset <- datasetInput() if (input$amazon_url == "") { cat("Please enter the address of the product on amazon.it\nin the given box on sidebar.") } else { asin = sentimeter::get_asin(input$amazon_url) cat(paste("ASIN:", asin)) cat('\n') cat(paste("Number of reviews:", sentimeter::number_of_reviews(asin))) sentimeter::sentiment_analysis(sentimeter::scrape_reviews(asin, 1)) } }) }
135369d1cd9f7e4a97cf3bbfa9dbb20ac3bcae0c
42504ca37ab0dee26b121e8fae4f4b13d98482e6
/cachematrix.R
992e60de5d35ed9c01d948989d51bad736f55ad8
[]
no_license
nikolaypugachyov/ProgrammingAssignment2
66a9d07bb83d9d2b674c603f779f75c03a24e9ec
80a52c19aa5df39289476e63252ae656d8414b32
refs/heads/master
2020-05-03T12:22:07.075679
2019-03-31T00:09:13
2019-03-31T00:09:13
178,623,890
0
0
null
2019-03-31T00:10:52
2019-03-31T00:01:23
R
UTF-8
R
false
false
1,607
r
cachematrix.R
## Put comments here that give an overall description of what your ## functions do ## Write a short comment describing this function ## My comments: ## The function below creates a special matrix in a few steps ## 1. set the value of the matrix via `set` ## 2. get the value of the matrix via `get` ## 3. set the value of the inverse via `setinv` ## 4. get the value of the inverse via `getinv` makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setinv <- function(solve) m <<- solve getinv <- function() m list(set = set, get = get, setinv = setinv, getinv = getinv) } ## Write a short comment describing this function ## My comments: ## This function calculates the inverse of the special matrix created with the ## function above. ## 1. It checks if the inverse has already been claculated ## 2. If yes, it gets the inverse from the cache skipping the calculation ## 3. If no, it calculates the inverse and stores it in the cache via setinv cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m <- x$getinv() if (!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data, ...) x$setinv(m) m } ## Quick check for lazy checkers ;) # set.seed(1) # mat <- matrix(sample(1:10, 100, replace = TRUE), 10, 10) # inv <- solve(mat) # x <- makeCacheMatrix(mat) # cacheSolve(x) ## Thank you! Bye!
d72816961260a1733efeac0f9360a4c4a157db99
1f3c33ff4573e8d00421be9c1064286c7c16bdd7
/old FUNCTIONS/GROW_FP3.R
665286c9bb93cf6cbf79e2c604a6ec5b388f3be3
[]
no_license
mccannecology/model
bcddb18189a49507d10a49f063c99a9d0e529e49
3ab691c6069b174d97230719ee40f36e6129a0bf
refs/heads/master
2021-05-28T08:39:34.460524
2015-03-18T15:38:43
2015-03-18T15:38:43
17,213,665
0
0
null
null
null
null
UTF-8
R
false
false
1,372
r
GROW_FP3.R
####################################### # Growth function # # Compatible w/ new LIST structure # # Compatible w/ SAV component # # # # Identical to Scheffer et al. 2003 # # # # INPUTS: # # x1... LIST[[i]]$FPmatrix # # x2... LIST[[i+1]]$FPmatrix # # x3... LIST[[i]]$FPALLmatrix # # n... numbFPspecies # # x4... LIST[[i]]$TOTALP # # x5... LIST[[i]]$TOTALN # # # # Created: MJ McCann 3/23/2014 # # Updated: 6/20/2014 # # No cap @ 100 g/m2 # ####################################### GROW_FP3 <- function(x1,x2,x3,n,x4,x5) { for (j in 1:height) { # loop over all rows (height) for (k in 1:width) { # loop over all columns (width) if (x1[j,k] > 0) { x2[j,k] <- x1[j,k] + # initial biomass ((x1[j,k]*speciesmatrix$maxrgr[n+1]) * # new growth (x5/(x5+speciesmatrix$halfsatN[n+1])) * # nitrogen limitation (1/(1+lightlimitation_FP*x3[j,k])) - # biomass limitation (lossFP*x1[j,k])) # biomass loss } } } return(x2) }
e9293851927d73cefc54144ce0caa249412d43af
9783a80282dd55fb5cec3f4feade68281cf0eb5f
/cachematrix.R
fce310d38aac9da35ac4739a597aa9ea26fbabe5
[]
no_license
mcohen01/ProgrammingAssignment2
b76e5334700ca4d9c010d68c52f8320a994f8312
e69f3f4fe2068d9aa67244b9b71abd4602367a04
refs/heads/master
2021-01-14T11:10:51.023582
2014-10-21T04:53:58
2014-10-21T04:53:58
null
0
0
null
null
null
null
UTF-8
R
false
false
1,176
r
cachematrix.R
## Matrix inversion is usually a costly computation. ## These functions together cache the inverse of a ## matrix rather than computing it repeatedly. ## minimal "caching" api using R's lexical scoping rules makeCacheMatrix <- function(x = matrix()) { inverse <- NULL set <- function(y) { x <<- y inverse <<- NULL } get <- function() x setinverse <- function(inverse) inverse <<- inverse getinverse <- function() inverse list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## compute the inverse of the matrix if not already computed cacheSolve <- function(x, ...) { inverse <- x$getinverse() if(!is.null(inverse)) { message("getting cached data") return(inverse) } data <- x$get() inverse <- solve(data, ...) x$setinverse(inverse) inverse } ## test code as specified in https://class.coursera.org/rprog-008/forum/thread?thread_id=174 # amatrix = makeCacheMatrix(matrix(c(1,2,3,4), nrow=2, ncol=2)) # amatrix$get() # cacheSolve(amatrix) # amatrix$getinverse() # amatrix$set(matrix(c(0,5,99,66), nrow=2, ncol=2)) # cacheSolve(amatrix) # amatrix$get() # amatrix$getinverse()
5af02f042366cb8c51d4906f44acdfa9e29046d2
6647079130c5fd10007246b095b26e5c36272538
/cachematrix.R
a8359c91761fb3ffae6ad59f5c8f296b01de9142
[]
no_license
cdaustin/ProgrammingAssignment2
164a22748b59e06e985dc61bcd49f0f5e584eb2a
f84c4fe9c14f67cf0829b89b2975d2a4a45b14ba
refs/heads/master
2021-01-14T14:18:24.780318
2016-01-23T18:13:28
2016-01-23T18:13:28
49,979,865
0
0
null
2016-01-19T20:10:41
2016-01-19T20:10:40
null
UTF-8
R
false
false
1,137
r
cachematrix.R
## Put comments here that give an overall description of what your ## functions do ## Write a short comment describing this function makeCacheMatrix <- function(x = matrix()) { m<-NULL ## sets to NULL as a placeholder for a future value set<-function(y){ x<<-y m<<-NULL ## defines a function to set the vector x to a new vector y, and resets m to NULL } get<-function() x ## returns the vector x setmatrix<-function(solve) m<<- solve ## sets the inverse of the matrix getmatrix<-function() m ## returns the inverse list(set=set, get=get, setmatrix=setmatrix, getmatrix=getmatrix) ## returns the 'special vector' containing all of the functions just defined } ## Write a short comment describing this function cacheSolve <- function(x, ...) { m<-x$getmatrix() if(!is.null(m)){ message("getting cached data") return(m) } matrix<-x$get() m<-solve(matrix, ...) x$setmatrix(m) m ## Return a matrix that is the inverse of 'x' }
d7e7a4a209c39d63409a721498a26c40df95a791
bf716a5869e0f669b3f8324dcc1a995c1213a1f1
/2_process/src/process_and_style.R
a419c30cd2ff65ad3c38d47367e5fb466be9d38b
[]
no_license
slevin75/ds-pipelines-targets-2
832779b2282d45e73eb1326e7a8ecf1f4b5c0213
11a11fc34bb7be102dd5328dbcbff2558b3ea9cc
refs/heads/main
2023-08-22T18:20:11.431461
2021-10-18T16:33:05
2021-10-18T16:33:05
416,356,593
0
0
null
2021-10-18T14:24:31
2021-10-12T13:50:18
R
UTF-8
R
false
false
634
r
process_and_style.R
process_data <- function(nwis_data){ nwis_data_clean <- rename(nwis_data, water_temperature = X_00010_00000) %>% select(-agency_cd, -X_00010_00000_cd, -tz_cd) return(nwis_data_clean) } annotate_data <- function(site_data_clean, site_filename,fileout){ site_info <- read_csv(site_filename) annotated_data <- left_join(site_data_clean, site_info, by = "site_no") %>% select(station_name = station_nm, site_no, dateTime, water_temperature, latitude = dec_lat_va, longitude = dec_long_va) mutate(annotated_data,station_name=as.factor(station_name)) write_csv(annotated_data, file = fileout) return(fileout) }
b15e97a454b842fca911195c621cef49301d0731
812bf2b9d2e315d50ffb38e1ebfb6a4c6eaddf6a
/DE_analysis/Book_flowork.R
d46ca7a1c7778652c7a352640a2a5b7923da5ef0
[ "MIT" ]
permissive
bnetlab/DE_and_Functional_Analysis_of_Geneset
6487352284a62e1725699d09042cb589a0f60cc5
34a0d8af5e169befc5f303d2679f84ad0f84caf8
refs/heads/master
2020-06-12T22:31:56.825165
2020-02-26T17:36:42
2020-02-26T17:36:42
194,448,229
0
0
null
null
null
null
UTF-8
R
false
false
7,723
r
Book_flowork.R
#tutrial Libro #obtiene todos lo datos de ENA cut - f11 samples_at_ENA . txt | xargs wget # obterner muestras espacificas for ACC_NR in ERR458493 ERR458494 ERR458495 ERR458496 ERR458497 ERR458498ERR458499; do egrep ${ACC_NR} ERP004763_sample_mapping.tsv | cut -f 11 PRJEB5348.txt | xargs wget; done #fastqce analises ../FastQC/fastqc ERR458506.fastq.gz --extract -o fastqc_results/ #to see all the results together need run multiqc in the directory with all fastqc results multiqc -d fastqc_results/ -o QC # see the results report firefox QC/multiqc_report.html #Qaligment # Download genome sequence of S. cerevisiae from UCSC wget http://hgdownload.soe.ucsc.edu/goldenPath/sacCer3/bigZips/sacCer3.2 bit #convert from bit to fasta ../UCSC_tools/twoBitToFa sacCer3.2bit sacCer3.fa #download the gtf and bed file #download the table with all field from UCSU and process # Confirm the head head -n1 sacCer3_allfields.gtf cut -f 2- sacCer3_allfields.gtf | sed '1d' |\ ../UCSC_tools/genePredToGtf file stdin sacCer3_Refseq.gtf head -n1 sacCer3_Refseq.gtf #Generated the genomic Index REF_DIR=~/Data_RNA-seq/Referece_Genome runSTAR=~/Data_RNA-seq/STAR-2.7.1a/bin/Linux_x86_64_static/STAR ${runSTAR} --runMode genomeGenerate --genomeDir STARindex/ --genomeFastaFiles ${REF_DIR}/sacCer3.fa --sjdbGTFfile ${REF_DIR}/sacCer3.gtf --sjdbOverhang 49 --runThreadN 2 #Aligment #defines the files fastq routes # This step has to be done for each individual FASTQ file FILES=`ls -m Data_raw/WT/*fastq.gz| sed 's/ //g'` FILES=`echo $FILES|sed 's/ //g'` ${runSTAR} --genomeDir STARindex --readFilesIn $FILES --readFilesCommand zcat --outFileNamePrefix alignment_STAR/WT_1_ --outFilterMultimapNmax 1 --outReadsUnmapped Fastx --outSAMtype BAM SortedByCoordinate --twopassMode Basic --runThreadN 2 #BAM file indexing Most downstream applications will require a .BAM.BAI file together with every BAM file to quickly access the BAM files without having to load them into memory samtools index alignment_STAR/WT_6_Aligned.sortedByCoord.out.bam # a traves del archivo log.final se puede revisar la calidad del alimiento the unique reads is the more importan t #to see the aligment quality we can used samtools flagstat WT_1_Aligned.sortedByCoord.out.bam #use the sam tool to see how many reads were mapped #RSeQC you can see the number of reads mapped bam_stat.py -i WT_1_Aligned.sortedByCoord.out.bam #and analize the quantity of red mappes (unique ) samtools flagstat WT_1_Aligned.sortedByCoord.out.bam > flagstat_WT_1.txt #See the gene aligment distibution read_distribution.py -r /home/edianfranco/Data_RNA-seq/Referece_Genome/sacCer3.bed -i alignment_STAR/SNF2_1_Aligned.sortedByCoord.out.bam #Genes Body Gene body coverage To assess possible 3’ or 5’ biases, geneBody_coverage.py -i alignment_STAR/WT_1_Aligned.sortedByCoord.out.bam -r /home/edianfranco/Data_RNA-seq/Referece_Genome/sacCer3.bed -o GeneBody_coverage_WT_1 #Gene-based read counting###### # Quality control with QoRTs #count the read for run the program for FASTQ in Data_raw/SNF2/ERR458500*gz; do zcat $FASTQ | wc -l ; done | paste -sd+ | bc | awk '{print $1/4}' #run the prgram java -Xmx4g -jar hartleys-QoRTs-099881f/QoRTs.jar QC --singleEnded --seqReadCt 1885330 --generatePdfReport alignment_STAR/WT_1_Aligned.sortedByCoord.out.bam /home/edianfranco/Data_RNA-seq/Referece_Genome/sacCer3.gtf ./QoRTs_output_WT_1 #Read Quantification #for this we use the tool subread to make a raw count of reads subread-1.6.4-source/bin/featureCounts -a /home/edianfranco/Data_RNA-seq/Referece_Genome/sacCer3_Refseq.gtf -o features_count_results.txt alignment_STAR/*bam #summary.txt a summa aboyt teh process and result content the coutn #count the exons subread-1.6.4-source/bin/featureCounts -a /home/edianfranco/Data_RNA-seq/Referece_Genome/sacCer3_Refseq.gtf -f -t exon -O -o features_counts_exons.txt alignment_STAR/*bam #remove repetitive exons sort -k2,2n -k3,3n features_counts_exons.txt | uniq > features_counts_exons_unique.txt ##################R###################### # read data from count_feature to meake de DE analises library("magrittr") read.counts2<- read.table("/home/edianfranco/Data_RNA-seq/features_count_results.txt", header = TRUE) row.names(read.counts)<-read.counts$Geneid read.counts<- read.counts[,-c(1:6)] orig_names <- names(read.counts ) names(read.counts ) <- c("SNF2 _1", "SNF2 _2", "SNF2 _3", "SNF2 _4", "SNF2 _5", "SNF2 _6","SNF2 _7","WT_1", "WT_2", "WT_3", "WT_4", "WT_5","WT_6","WT_7") #names(read.counts) <- gsub(".*(SNF2|WT)(_[0 -9]+) .*", "\\1\\2",orig_names) automatic name #Now that we have the read counts, we also need some information about the samples, which will be stored in colData sample_info <- data.frame(condition = gsub("_[0 -9]+", "", names (read.counts)),row.names = names (read.counts)) library(DESeq2) #generate the DESeqDataSet DESeq_dataset<-DESeqDataSetFromMatrix(countData = read.counts, colData = sample_info, design = ~ condition) #Dataframe verfication DESeq_dataset %>% head assay(DESeq_dataset) %>% head rowRanges(DESeq_dataset) %>% head #test what counts () returns counts(DESeq_dataset)%>%str # remove genes without any counts DESeq_dataset<-DESeq_dataset[rowSums(counts(DESeq_dataset))>0,] colSums(counts(DESeq_dataset)) # should be the same as colSums ( readcounts ) # calculate the size factor and add it to the data set DESeq_dataset<-estimateSizeFactors(DESeq_dataset) sizeFactors(DESeq_dataset) #normalized count.sf_nomarlized<-counts(DESeq_dataset, normalized= TRUE) #log transformation log.norm.count<-log2(count.sf_nomarlized + 1) #plot the results par(mfrow=c(2,1)) #to plot the following two images underneath each other # first , boxplots of non - transformed read counts (one per sample ) boxplot(count.sf_nomarlized, notch= TRUE, main= "Unstrafomed read counts", ylab="Read counst") # box plots of log2 - transformed read counts boxplot(log.norm.count, notch= TRUE, main= "log2-trnsformed read counts", ylab=" log2 (Read counst)") #Transformation of read counts including variance shrinkage # obtain regularized log - transformed values DESeq_dS_rlog<-rlog(DESeq_dataset,blind = TRUE) rlog_norm_count<-assay(DESeq_dS_rlog) # mean -sd plot for rlog - transformed data library(vsn) library(ggplot2) msd_plot<-meanSdPlot(rlog_norm_count,ranks = FALSE,plot =FALSE) msd_plot$gg+ggtitle("rlog-tranformed read counts") + ylab ("Standard desviation") #####Differential Gene Expression Analysis####### #1-DESeq2 workflow #DGE analysis is performed on the raw data str (colData(DESeq_dataset)$condition) # set WT as the first -level - factor colData (DESeq_dataset)$condition <- relevel(colData(DESeq_dataset)$condition , "WT") # sequencing depth normalization between the samples DESeq_dataset_<-DESeq(DESeq_dataset) # gene - wise dispersion estimates across all samples DESeq_dataset_<-estimateSizeFactors(DESeq_dataset_) # this fits a negative binomial GLM andapplies Wald statistics to each gene DESeq_dataset_<-nbinomWaldTest(DESeq_dataset_) #The results() function lets you extract the base means across samples DGE_results<-results(DESeq_dataset_,independentFiltering = TRUE,alpha = 0.5) #the DESeqResult object can basically be handled like a data . frame head (DGE_results) table (DGE_results$padj<0.05) rownames(subset(DGE_results, padj<0.05)) #Exploratory plots following DGE analysis hist (DGE_results$pvalue,col = " grey ", border = " white ", xlab = "", ylab = "", main = " frequencies of p- values ") plotMA (DGE_results, alpha = 0.05 , main = "WT vs. SNF2 mutants ", ylim = c( -4 ,4))
3f4e3c2f5b967a58aa7e2ed9b4a85787f3d570c8
fef7fd51ef8890e8704cd2e5c8e6295a9515273a
/R/is_vector.r
eadc1011fb87f395aea00223741f3ea1184838c7
[]
no_license
tomasgreif/mondrianr
a82d41550e2124032b5efd3f2fb5cb439af647dc
c806272ff41eec5e863f5c572a1b94680176726a
refs/heads/master
2020-04-05T15:42:53.371365
2014-03-28T13:10:58
2014-03-28T13:10:58
15,005,183
1
2
null
2013-12-16T09:16:14
2013-12-07T12:21:39
R
UTF-8
R
false
false
1,258
r
is_vector.r
#' Check if argument is vector with given properties #' #' Checks if argument is vector with given length and mode. Used to control arguments. Returns error when object #' does not match given properties. #' #' @param vector Input object. #' @param length Required length of vector. #' @param mode Required mode of vector #' @examples #' is_vector(c(1:9),9,'numeric') #' is_vector('a') #' is_vector(TRUE,mode='logical') #' #' \dontrun{ #' is_vector(TRUE,mode='character') #' is_vector(c('a','b'),length=3) #' is_vector(iris)} #' @export is_vector <- function(vector, length=1, mode='character') { if(!is.vector(vector)) { stop('Argument "', deparse(substitute(vector)),'" is not vector. Argument expects ', mode, ' vector of length ', length ,', not ', mode(vector),'.') } else if (is.vector(vector) & length(vector) != length) { stop('Incorrect length of argument "',deparse(substitute(vector)), '". Expected length is ', length, ' not ', length(vector),'.') } else if (!all(is.na(vector)) & !is.na(mode) & mode != mode(vector)) { stop('Incorrect vector mode for "',deparse(substitute(vector)) ,'". Expected mode is ', mode, ' not ', mode(vector) ,'.') } return(invisible(TRUE)) }